pyworkflow-engine 0.1.7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- dashboard/backend/app/__init__.py +1 -0
- dashboard/backend/app/config.py +32 -0
- dashboard/backend/app/controllers/__init__.py +6 -0
- dashboard/backend/app/controllers/run_controller.py +86 -0
- dashboard/backend/app/controllers/workflow_controller.py +33 -0
- dashboard/backend/app/dependencies/__init__.py +5 -0
- dashboard/backend/app/dependencies/storage.py +50 -0
- dashboard/backend/app/repositories/__init__.py +6 -0
- dashboard/backend/app/repositories/run_repository.py +80 -0
- dashboard/backend/app/repositories/workflow_repository.py +27 -0
- dashboard/backend/app/rest/__init__.py +8 -0
- dashboard/backend/app/rest/v1/__init__.py +12 -0
- dashboard/backend/app/rest/v1/health.py +33 -0
- dashboard/backend/app/rest/v1/runs.py +133 -0
- dashboard/backend/app/rest/v1/workflows.py +41 -0
- dashboard/backend/app/schemas/__init__.py +23 -0
- dashboard/backend/app/schemas/common.py +16 -0
- dashboard/backend/app/schemas/event.py +24 -0
- dashboard/backend/app/schemas/hook.py +25 -0
- dashboard/backend/app/schemas/run.py +54 -0
- dashboard/backend/app/schemas/step.py +28 -0
- dashboard/backend/app/schemas/workflow.py +31 -0
- dashboard/backend/app/server.py +87 -0
- dashboard/backend/app/services/__init__.py +6 -0
- dashboard/backend/app/services/run_service.py +240 -0
- dashboard/backend/app/services/workflow_service.py +155 -0
- dashboard/backend/main.py +18 -0
- docs/concepts/cancellation.mdx +362 -0
- docs/concepts/continue-as-new.mdx +434 -0
- docs/concepts/events.mdx +266 -0
- docs/concepts/fault-tolerance.mdx +370 -0
- docs/concepts/hooks.mdx +552 -0
- docs/concepts/limitations.mdx +167 -0
- docs/concepts/schedules.mdx +775 -0
- docs/concepts/sleep.mdx +312 -0
- docs/concepts/steps.mdx +301 -0
- docs/concepts/workflows.mdx +255 -0
- docs/guides/cli.mdx +942 -0
- docs/guides/configuration.mdx +560 -0
- docs/introduction.mdx +155 -0
- docs/quickstart.mdx +279 -0
- examples/__init__.py +1 -0
- examples/celery/__init__.py +1 -0
- examples/celery/durable/docker-compose.yml +55 -0
- examples/celery/durable/pyworkflow.config.yaml +12 -0
- examples/celery/durable/workflows/__init__.py +122 -0
- examples/celery/durable/workflows/basic.py +87 -0
- examples/celery/durable/workflows/batch_processing.py +102 -0
- examples/celery/durable/workflows/cancellation.py +273 -0
- examples/celery/durable/workflows/child_workflow_patterns.py +240 -0
- examples/celery/durable/workflows/child_workflows.py +202 -0
- examples/celery/durable/workflows/continue_as_new.py +260 -0
- examples/celery/durable/workflows/fault_tolerance.py +210 -0
- examples/celery/durable/workflows/hooks.py +211 -0
- examples/celery/durable/workflows/idempotency.py +112 -0
- examples/celery/durable/workflows/long_running.py +99 -0
- examples/celery/durable/workflows/retries.py +101 -0
- examples/celery/durable/workflows/schedules.py +209 -0
- examples/celery/transient/01_basic_workflow.py +91 -0
- examples/celery/transient/02_fault_tolerance.py +257 -0
- examples/celery/transient/__init__.py +20 -0
- examples/celery/transient/pyworkflow.config.yaml +25 -0
- examples/local/__init__.py +1 -0
- examples/local/durable/01_basic_workflow.py +94 -0
- examples/local/durable/02_file_storage.py +132 -0
- examples/local/durable/03_retries.py +169 -0
- examples/local/durable/04_long_running.py +119 -0
- examples/local/durable/05_event_log.py +145 -0
- examples/local/durable/06_idempotency.py +148 -0
- examples/local/durable/07_hooks.py +334 -0
- examples/local/durable/08_cancellation.py +233 -0
- examples/local/durable/09_child_workflows.py +198 -0
- examples/local/durable/10_child_workflow_patterns.py +265 -0
- examples/local/durable/11_continue_as_new.py +249 -0
- examples/local/durable/12_schedules.py +198 -0
- examples/local/durable/__init__.py +1 -0
- examples/local/transient/01_quick_tasks.py +87 -0
- examples/local/transient/02_retries.py +130 -0
- examples/local/transient/03_sleep.py +141 -0
- examples/local/transient/__init__.py +1 -0
- pyworkflow/__init__.py +256 -0
- pyworkflow/aws/__init__.py +68 -0
- pyworkflow/aws/context.py +234 -0
- pyworkflow/aws/handler.py +184 -0
- pyworkflow/aws/testing.py +310 -0
- pyworkflow/celery/__init__.py +41 -0
- pyworkflow/celery/app.py +198 -0
- pyworkflow/celery/scheduler.py +315 -0
- pyworkflow/celery/tasks.py +1746 -0
- pyworkflow/cli/__init__.py +132 -0
- pyworkflow/cli/__main__.py +6 -0
- pyworkflow/cli/commands/__init__.py +1 -0
- pyworkflow/cli/commands/hooks.py +640 -0
- pyworkflow/cli/commands/quickstart.py +495 -0
- pyworkflow/cli/commands/runs.py +773 -0
- pyworkflow/cli/commands/scheduler.py +130 -0
- pyworkflow/cli/commands/schedules.py +794 -0
- pyworkflow/cli/commands/setup.py +703 -0
- pyworkflow/cli/commands/worker.py +413 -0
- pyworkflow/cli/commands/workflows.py +1257 -0
- pyworkflow/cli/output/__init__.py +1 -0
- pyworkflow/cli/output/formatters.py +321 -0
- pyworkflow/cli/output/styles.py +121 -0
- pyworkflow/cli/utils/__init__.py +1 -0
- pyworkflow/cli/utils/async_helpers.py +30 -0
- pyworkflow/cli/utils/config.py +130 -0
- pyworkflow/cli/utils/config_generator.py +344 -0
- pyworkflow/cli/utils/discovery.py +53 -0
- pyworkflow/cli/utils/docker_manager.py +651 -0
- pyworkflow/cli/utils/interactive.py +364 -0
- pyworkflow/cli/utils/storage.py +115 -0
- pyworkflow/config.py +329 -0
- pyworkflow/context/__init__.py +63 -0
- pyworkflow/context/aws.py +230 -0
- pyworkflow/context/base.py +416 -0
- pyworkflow/context/local.py +930 -0
- pyworkflow/context/mock.py +381 -0
- pyworkflow/core/__init__.py +0 -0
- pyworkflow/core/exceptions.py +353 -0
- pyworkflow/core/registry.py +313 -0
- pyworkflow/core/scheduled.py +328 -0
- pyworkflow/core/step.py +494 -0
- pyworkflow/core/workflow.py +294 -0
- pyworkflow/discovery.py +248 -0
- pyworkflow/engine/__init__.py +0 -0
- pyworkflow/engine/events.py +879 -0
- pyworkflow/engine/executor.py +682 -0
- pyworkflow/engine/replay.py +273 -0
- pyworkflow/observability/__init__.py +19 -0
- pyworkflow/observability/logging.py +234 -0
- pyworkflow/primitives/__init__.py +33 -0
- pyworkflow/primitives/child_handle.py +174 -0
- pyworkflow/primitives/child_workflow.py +372 -0
- pyworkflow/primitives/continue_as_new.py +101 -0
- pyworkflow/primitives/define_hook.py +150 -0
- pyworkflow/primitives/hooks.py +97 -0
- pyworkflow/primitives/resume_hook.py +210 -0
- pyworkflow/primitives/schedule.py +545 -0
- pyworkflow/primitives/shield.py +96 -0
- pyworkflow/primitives/sleep.py +100 -0
- pyworkflow/runtime/__init__.py +21 -0
- pyworkflow/runtime/base.py +179 -0
- pyworkflow/runtime/celery.py +310 -0
- pyworkflow/runtime/factory.py +101 -0
- pyworkflow/runtime/local.py +706 -0
- pyworkflow/scheduler/__init__.py +9 -0
- pyworkflow/scheduler/local.py +248 -0
- pyworkflow/serialization/__init__.py +0 -0
- pyworkflow/serialization/decoder.py +146 -0
- pyworkflow/serialization/encoder.py +162 -0
- pyworkflow/storage/__init__.py +54 -0
- pyworkflow/storage/base.py +612 -0
- pyworkflow/storage/config.py +185 -0
- pyworkflow/storage/dynamodb.py +1315 -0
- pyworkflow/storage/file.py +827 -0
- pyworkflow/storage/memory.py +549 -0
- pyworkflow/storage/postgres.py +1161 -0
- pyworkflow/storage/schemas.py +486 -0
- pyworkflow/storage/sqlite.py +1136 -0
- pyworkflow/utils/__init__.py +0 -0
- pyworkflow/utils/duration.py +177 -0
- pyworkflow/utils/schedule.py +391 -0
- pyworkflow_engine-0.1.7.dist-info/METADATA +687 -0
- pyworkflow_engine-0.1.7.dist-info/RECORD +196 -0
- pyworkflow_engine-0.1.7.dist-info/WHEEL +5 -0
- pyworkflow_engine-0.1.7.dist-info/entry_points.txt +2 -0
- pyworkflow_engine-0.1.7.dist-info/licenses/LICENSE +21 -0
- pyworkflow_engine-0.1.7.dist-info/top_level.txt +5 -0
- tests/examples/__init__.py +0 -0
- tests/integration/__init__.py +0 -0
- tests/integration/test_cancellation.py +330 -0
- tests/integration/test_child_workflows.py +439 -0
- tests/integration/test_continue_as_new.py +428 -0
- tests/integration/test_dynamodb_storage.py +1146 -0
- tests/integration/test_fault_tolerance.py +369 -0
- tests/integration/test_schedule_storage.py +484 -0
- tests/unit/__init__.py +0 -0
- tests/unit/backends/__init__.py +1 -0
- tests/unit/backends/test_dynamodb_storage.py +1554 -0
- tests/unit/backends/test_postgres_storage.py +1281 -0
- tests/unit/backends/test_sqlite_storage.py +1460 -0
- tests/unit/conftest.py +41 -0
- tests/unit/test_cancellation.py +364 -0
- tests/unit/test_child_workflows.py +680 -0
- tests/unit/test_continue_as_new.py +441 -0
- tests/unit/test_event_limits.py +316 -0
- tests/unit/test_executor.py +320 -0
- tests/unit/test_fault_tolerance.py +334 -0
- tests/unit/test_hooks.py +495 -0
- tests/unit/test_registry.py +261 -0
- tests/unit/test_replay.py +420 -0
- tests/unit/test_schedule_schemas.py +285 -0
- tests/unit/test_schedule_utils.py +286 -0
- tests/unit/test_scheduled_workflow.py +274 -0
- tests/unit/test_step.py +353 -0
- tests/unit/test_workflow.py +243 -0
|
@@ -0,0 +1,94 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Durable Workflow - Basic Example
|
|
3
|
+
|
|
4
|
+
This example demonstrates a simple event-sourced workflow using InMemoryStorageBackend.
|
|
5
|
+
- 3-step order processing workflow
|
|
6
|
+
- Events recorded for each step
|
|
7
|
+
- Event log inspection after completion
|
|
8
|
+
- Basic @workflow and @step decorators
|
|
9
|
+
|
|
10
|
+
Run: python examples/local/durable/01_basic_workflow.py 2>/dev/null
|
|
11
|
+
"""
|
|
12
|
+
|
|
13
|
+
import asyncio
|
|
14
|
+
|
|
15
|
+
from pyworkflow import (
|
|
16
|
+
configure,
|
|
17
|
+
get_workflow_events,
|
|
18
|
+
get_workflow_run,
|
|
19
|
+
reset_config,
|
|
20
|
+
start,
|
|
21
|
+
step,
|
|
22
|
+
workflow,
|
|
23
|
+
)
|
|
24
|
+
from pyworkflow.storage import InMemoryStorageBackend
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
# --- Steps ---
|
|
28
|
+
@step()
|
|
29
|
+
async def process_order(order_id: str) -> dict:
|
|
30
|
+
"""Process the order and validate it."""
|
|
31
|
+
print(f" Processing order {order_id}...")
|
|
32
|
+
return {"order_id": order_id, "status": "processed"}
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
@step()
|
|
36
|
+
async def charge_payment(order: dict, amount: float) -> dict:
|
|
37
|
+
"""Charge the payment for the order."""
|
|
38
|
+
print(f" Charging payment: ${amount:.2f}...")
|
|
39
|
+
return {**order, "charged": amount}
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
@step()
|
|
43
|
+
async def send_notification(order: dict) -> dict:
|
|
44
|
+
"""Send order confirmation notification."""
|
|
45
|
+
print(f" Sending notification for order {order['order_id']}...")
|
|
46
|
+
return {**order, "notified": True}
|
|
47
|
+
|
|
48
|
+
|
|
49
|
+
# --- Workflow ---
|
|
50
|
+
@workflow(durable=True, tags=["local", "durable"])
|
|
51
|
+
async def order_workflow(order_id: str, amount: float) -> dict:
|
|
52
|
+
"""Complete order processing workflow."""
|
|
53
|
+
order = await process_order(order_id)
|
|
54
|
+
order = await charge_payment(order, amount)
|
|
55
|
+
order = await send_notification(order)
|
|
56
|
+
return order
|
|
57
|
+
|
|
58
|
+
|
|
59
|
+
async def main():
|
|
60
|
+
# Configure with InMemoryStorageBackend
|
|
61
|
+
reset_config()
|
|
62
|
+
storage = InMemoryStorageBackend()
|
|
63
|
+
configure(storage=storage, default_durable=True)
|
|
64
|
+
|
|
65
|
+
print("=== Durable Workflow - Basic Example ===\n")
|
|
66
|
+
print("Running order workflow...")
|
|
67
|
+
|
|
68
|
+
# Start workflow
|
|
69
|
+
run_id = await start(order_workflow, "order-123", 99.99)
|
|
70
|
+
print(f"\nWorkflow completed: {run_id}\n")
|
|
71
|
+
|
|
72
|
+
# Check workflow status
|
|
73
|
+
run = await get_workflow_run(run_id)
|
|
74
|
+
print(f"Status: {run.status.value}")
|
|
75
|
+
print(f"Result: {run.result}")
|
|
76
|
+
|
|
77
|
+
# Inspect event log
|
|
78
|
+
events = await get_workflow_events(run_id)
|
|
79
|
+
print(f"\n=== Event Log ({len(events)} events) ===")
|
|
80
|
+
for event in events:
|
|
81
|
+
print(f" {event.sequence}: {event.type.value}")
|
|
82
|
+
if event.type.value == "step_completed":
|
|
83
|
+
step_name = event.data.get("step_name", "unknown")
|
|
84
|
+
print(f" Step: {step_name}")
|
|
85
|
+
|
|
86
|
+
print("\n=== Key Takeaways ===")
|
|
87
|
+
print("✓ Workflow executed with event sourcing")
|
|
88
|
+
print("✓ Each step recorded as an event")
|
|
89
|
+
print("✓ InMemoryStorageBackend used (data lost on exit)")
|
|
90
|
+
print("✓ Try 02_file_storage.py for persistence!")
|
|
91
|
+
|
|
92
|
+
|
|
93
|
+
if __name__ == "__main__":
|
|
94
|
+
asyncio.run(main())
|
|
@@ -0,0 +1,132 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Durable Workflow - File Storage
|
|
3
|
+
|
|
4
|
+
This example demonstrates persistent workflow storage using FileStorageBackend.
|
|
5
|
+
- Same 3-step workflow as 01_basic_workflow.py
|
|
6
|
+
- Data persists to filesystem in workflow_data/ directory
|
|
7
|
+
- Human-readable JSON files
|
|
8
|
+
- Inspect stored file structure and JSONL event log
|
|
9
|
+
|
|
10
|
+
Run: python examples/local/durable/02_file_storage.py 2>/dev/null
|
|
11
|
+
"""
|
|
12
|
+
|
|
13
|
+
import asyncio
|
|
14
|
+
import json
|
|
15
|
+
import os
|
|
16
|
+
from pathlib import Path
|
|
17
|
+
|
|
18
|
+
from pyworkflow import (
|
|
19
|
+
configure,
|
|
20
|
+
get_workflow_events,
|
|
21
|
+
get_workflow_run,
|
|
22
|
+
reset_config,
|
|
23
|
+
start,
|
|
24
|
+
step,
|
|
25
|
+
workflow,
|
|
26
|
+
)
|
|
27
|
+
from pyworkflow.storage import FileStorageBackend
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
# --- Steps ---
|
|
31
|
+
@step()
|
|
32
|
+
async def process_order(order_id: str) -> dict:
|
|
33
|
+
"""Process the order and validate it."""
|
|
34
|
+
print(f" Processing order {order_id}...")
|
|
35
|
+
return {"order_id": order_id, "status": "processed"}
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
@step()
|
|
39
|
+
async def charge_payment(order: dict, amount: float) -> dict:
|
|
40
|
+
"""Charge the payment for the order."""
|
|
41
|
+
print(f" Charging payment: ${amount:.2f}...")
|
|
42
|
+
return {**order, "charged": amount}
|
|
43
|
+
|
|
44
|
+
|
|
45
|
+
@step()
|
|
46
|
+
async def send_notification(order: dict) -> dict:
|
|
47
|
+
"""Send order confirmation notification."""
|
|
48
|
+
print(f" Sending notification for order {order['order_id']}...")
|
|
49
|
+
return {**order, "notified": True}
|
|
50
|
+
|
|
51
|
+
|
|
52
|
+
# --- Workflow ---
|
|
53
|
+
@workflow(durable=True, tags=["local", "durable"])
|
|
54
|
+
async def order_workflow(order_id: str, amount: float) -> dict:
|
|
55
|
+
"""Complete order processing workflow."""
|
|
56
|
+
order = await process_order(order_id)
|
|
57
|
+
order = await charge_payment(order, amount)
|
|
58
|
+
order = await send_notification(order)
|
|
59
|
+
return order
|
|
60
|
+
|
|
61
|
+
|
|
62
|
+
async def main():
|
|
63
|
+
# Use local directory for persistence (added to .gitignore)
|
|
64
|
+
data_dir = Path(__file__).parent / "workflow_data"
|
|
65
|
+
data_dir.mkdir(exist_ok=True)
|
|
66
|
+
|
|
67
|
+
print("=== Durable Workflow - File Storage ===\n")
|
|
68
|
+
print(f"Storage directory: {data_dir}\n")
|
|
69
|
+
|
|
70
|
+
# Configure with FileStorageBackend
|
|
71
|
+
reset_config()
|
|
72
|
+
storage = FileStorageBackend(base_path=str(data_dir))
|
|
73
|
+
configure(storage=storage, default_durable=True)
|
|
74
|
+
|
|
75
|
+
print("Running order workflow...")
|
|
76
|
+
|
|
77
|
+
# Start workflow
|
|
78
|
+
run_id = await start(order_workflow, "order-456", 149.99)
|
|
79
|
+
print(f"\nWorkflow completed: {run_id}\n")
|
|
80
|
+
|
|
81
|
+
# Check workflow status
|
|
82
|
+
run = await get_workflow_run(run_id)
|
|
83
|
+
print(f"Status: {run.status.value}")
|
|
84
|
+
print(f"Result: {run.result}")
|
|
85
|
+
|
|
86
|
+
# Show events
|
|
87
|
+
events = await get_workflow_events(run_id)
|
|
88
|
+
print(f"\n=== Event Log ({len(events)} events) ===")
|
|
89
|
+
for event in events:
|
|
90
|
+
print(f" {event.sequence}: {event.type.value}")
|
|
91
|
+
|
|
92
|
+
# Show stored files
|
|
93
|
+
print("\n=== Stored Files ===")
|
|
94
|
+
for root, dirs, files in os.walk(data_dir):
|
|
95
|
+
# Skip hidden directories (.locks)
|
|
96
|
+
dirs[:] = [d for d in dirs if not d.startswith(".")]
|
|
97
|
+
for f in files:
|
|
98
|
+
path = os.path.join(root, f)
|
|
99
|
+
rel_path = os.path.relpath(path, data_dir)
|
|
100
|
+
size = os.path.getsize(path)
|
|
101
|
+
print(f" {rel_path} ({size} bytes)")
|
|
102
|
+
|
|
103
|
+
# Show JSONL event log contents
|
|
104
|
+
event_log_path = data_dir / "events" / f"{run_id}.jsonl"
|
|
105
|
+
if event_log_path.exists():
|
|
106
|
+
print(f"\n=== Event Log File Contents ({event_log_path.name}) ===")
|
|
107
|
+
with open(event_log_path) as f:
|
|
108
|
+
for i, line in enumerate(f, 1):
|
|
109
|
+
event_data = json.loads(line.strip())
|
|
110
|
+
event_type = event_data.get("type", "unknown")
|
|
111
|
+
print(f" Line {i}: {event_type}")
|
|
112
|
+
# Show full data for first event
|
|
113
|
+
if i == 1:
|
|
114
|
+
print(f" Full data: {json.dumps(event_data, indent=6)}")
|
|
115
|
+
|
|
116
|
+
print("\n=== Directory Structure ===")
|
|
117
|
+
print(" runs/ - Workflow run metadata (JSON)")
|
|
118
|
+
print(" events/ - Event log (JSONL, append-only)")
|
|
119
|
+
print(" steps/ - Step execution records (JSON)")
|
|
120
|
+
print(" .locks/ - Internal file locks")
|
|
121
|
+
|
|
122
|
+
print("\n=== Key Takeaways ===")
|
|
123
|
+
print("✓ Data persists to filesystem in workflow_data/")
|
|
124
|
+
print("✓ Human-readable JSON format")
|
|
125
|
+
print("✓ JSONL (JSON Lines) for event log (one event per line)")
|
|
126
|
+
print("✓ Survives process restarts")
|
|
127
|
+
print("✓ Good for development and single-machine deployments")
|
|
128
|
+
print(f"\nℹ Storage persisted at: {data_dir.absolute()}")
|
|
129
|
+
|
|
130
|
+
|
|
131
|
+
if __name__ == "__main__":
|
|
132
|
+
asyncio.run(main())
|
|
@@ -0,0 +1,169 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Durable Workflow - Automatic Retries with Suspension
|
|
3
|
+
|
|
4
|
+
This example demonstrates automatic retry behavior with workflow suspension/resumption.
|
|
5
|
+
- Simulates flaky external API (fails 2x, succeeds on 3rd try)
|
|
6
|
+
- Workflow suspends between retry attempts
|
|
7
|
+
- Retry events recorded in event log for audit trail
|
|
8
|
+
- Demonstrates manual resume after each retry delay
|
|
9
|
+
|
|
10
|
+
IMPORTANT: Manual resumption (await resume(run_id)) is ONLY for local development/CI.
|
|
11
|
+
In production, use Celery runtime for automatic scheduled resumption.
|
|
12
|
+
See examples/celery/ for production-ready distributed execution.
|
|
13
|
+
|
|
14
|
+
Run: python examples/local/durable/03_retries.py 2>/dev/null
|
|
15
|
+
"""
|
|
16
|
+
|
|
17
|
+
import asyncio
|
|
18
|
+
|
|
19
|
+
from pyworkflow import (
|
|
20
|
+
configure,
|
|
21
|
+
get_workflow_events,
|
|
22
|
+
get_workflow_run,
|
|
23
|
+
reset_config,
|
|
24
|
+
resume,
|
|
25
|
+
start,
|
|
26
|
+
step,
|
|
27
|
+
workflow,
|
|
28
|
+
)
|
|
29
|
+
from pyworkflow.storage import InMemoryStorageBackend
|
|
30
|
+
|
|
31
|
+
# Simulate API call counter
|
|
32
|
+
attempt_count = 0
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
# --- Steps ---
|
|
36
|
+
@step()
|
|
37
|
+
async def validate_order(order_id: str) -> dict:
|
|
38
|
+
"""Validate the order."""
|
|
39
|
+
print(f" Validating order {order_id}...")
|
|
40
|
+
return {"order_id": order_id, "valid": True}
|
|
41
|
+
|
|
42
|
+
|
|
43
|
+
@step(max_retries=3, retry_delay=1)
|
|
44
|
+
async def call_flaky_api(order: dict) -> dict:
|
|
45
|
+
"""Simulate unreliable external API - fails twice then succeeds."""
|
|
46
|
+
global attempt_count
|
|
47
|
+
attempt_count += 1
|
|
48
|
+
|
|
49
|
+
print(f" Calling external API (attempt {attempt_count})...")
|
|
50
|
+
|
|
51
|
+
if attempt_count < 3:
|
|
52
|
+
# Simulate temporary failure
|
|
53
|
+
raise Exception(f"API timeout - connection refused (attempt {attempt_count})")
|
|
54
|
+
|
|
55
|
+
# Third attempt succeeds
|
|
56
|
+
print(f" ✓ API call successful on attempt {attempt_count}!")
|
|
57
|
+
return {**order, "api_response": "payment_approved", "attempts": attempt_count}
|
|
58
|
+
|
|
59
|
+
|
|
60
|
+
@step()
|
|
61
|
+
async def finalize_order(order: dict) -> dict:
|
|
62
|
+
"""Finalize the order after successful API call."""
|
|
63
|
+
print(f" Finalizing order {order['order_id']}...")
|
|
64
|
+
return {**order, "finalized": True}
|
|
65
|
+
|
|
66
|
+
|
|
67
|
+
# --- Workflow ---
|
|
68
|
+
@workflow(durable=True, tags=["local", "durable"])
|
|
69
|
+
async def order_workflow(order_id: str) -> dict:
|
|
70
|
+
"""Complete order processing with retry logic."""
|
|
71
|
+
order = await validate_order(order_id)
|
|
72
|
+
order = await call_flaky_api(order) # Will retry on failure
|
|
73
|
+
order = await finalize_order(order)
|
|
74
|
+
return order
|
|
75
|
+
|
|
76
|
+
|
|
77
|
+
async def main():
|
|
78
|
+
global attempt_count
|
|
79
|
+
|
|
80
|
+
# Configure with InMemoryStorageBackend
|
|
81
|
+
reset_config()
|
|
82
|
+
storage = InMemoryStorageBackend()
|
|
83
|
+
configure(storage=storage, default_durable=True)
|
|
84
|
+
|
|
85
|
+
print("=== Durable Workflow - Automatic Retries with Suspension ===\n")
|
|
86
|
+
print("Simulating flaky API (fails 2x, succeeds on 3rd try)...\n")
|
|
87
|
+
|
|
88
|
+
# Reset counter
|
|
89
|
+
attempt_count = 0
|
|
90
|
+
|
|
91
|
+
# Start workflow
|
|
92
|
+
print("Starting workflow...")
|
|
93
|
+
run_id = await start(order_workflow, "order-789")
|
|
94
|
+
|
|
95
|
+
# Check status after first attempt
|
|
96
|
+
run = await get_workflow_run(run_id)
|
|
97
|
+
print(f"\nStatus after attempt 1: {run.status.value}")
|
|
98
|
+
|
|
99
|
+
if run.status.value == "suspended":
|
|
100
|
+
print("→ Workflow suspended for retry (waiting 1 second...)")
|
|
101
|
+
|
|
102
|
+
# Show events so far
|
|
103
|
+
events = await get_workflow_events(run_id)
|
|
104
|
+
print(f"\n=== Event Log (After Attempt 1) - {len(events)} events ===")
|
|
105
|
+
for event in events:
|
|
106
|
+
event_type = event.type.value
|
|
107
|
+
attempt = event.data.get("attempt", "?")
|
|
108
|
+
print(f" {event.sequence}: {event_type} (attempt={attempt})")
|
|
109
|
+
if event_type == "step_retrying":
|
|
110
|
+
next_attempt = event.data.get("attempt")
|
|
111
|
+
print(f" → Will retry as attempt {next_attempt}")
|
|
112
|
+
|
|
113
|
+
# Wait for retry delay and resume
|
|
114
|
+
await asyncio.sleep(1.5)
|
|
115
|
+
print("\nResuming workflow for attempt 2...")
|
|
116
|
+
await resume(run_id)
|
|
117
|
+
|
|
118
|
+
# Check status again
|
|
119
|
+
run = await get_workflow_run(run_id)
|
|
120
|
+
print(f"Status after attempt 2: {run.status.value}")
|
|
121
|
+
|
|
122
|
+
if run.status.value == "suspended":
|
|
123
|
+
print("→ Workflow suspended for retry again (waiting 1 second...)")
|
|
124
|
+
|
|
125
|
+
# Wait and resume for attempt 3
|
|
126
|
+
await asyncio.sleep(1.5)
|
|
127
|
+
print("\nResuming workflow for attempt 3...")
|
|
128
|
+
result = await resume(run_id)
|
|
129
|
+
|
|
130
|
+
print("\n✓ Workflow completed successfully!")
|
|
131
|
+
print(f"Result: {result}")
|
|
132
|
+
|
|
133
|
+
# Final status
|
|
134
|
+
run = await get_workflow_run(run_id)
|
|
135
|
+
print(f"\nFinal status: {run.status.value}")
|
|
136
|
+
|
|
137
|
+
# Show complete event log
|
|
138
|
+
events = await get_workflow_events(run_id)
|
|
139
|
+
print(f"\n=== Complete Event Log ({len(events)} events) ===")
|
|
140
|
+
|
|
141
|
+
for event in events:
|
|
142
|
+
event_type = event.type.value
|
|
143
|
+
attempt = event.data.get("attempt", "")
|
|
144
|
+
step_name = event.data.get("step_name", "")
|
|
145
|
+
|
|
146
|
+
if attempt:
|
|
147
|
+
print(f" {event.sequence}: {event_type} (attempt={attempt}, step={step_name})")
|
|
148
|
+
else:
|
|
149
|
+
print(f" {event.sequence}: {event_type}")
|
|
150
|
+
|
|
151
|
+
if event_type == "step_failed":
|
|
152
|
+
error = event.data.get("error", "")[:50]
|
|
153
|
+
print(f" Error: {error}...")
|
|
154
|
+
elif event_type == "step_retrying":
|
|
155
|
+
retry_after = event.data.get("retry_after")
|
|
156
|
+
resume_at = event.data.get("resume_at", "")[:19]
|
|
157
|
+
print(f" Retry after: {retry_after}s, resume at: {resume_at}")
|
|
158
|
+
|
|
159
|
+
print("\n=== Key Takeaways ===")
|
|
160
|
+
print("✓ Workflow suspends between retry attempts (releases resources)")
|
|
161
|
+
print("✓ Each retry requires manual resume() or automatic Celery scheduling")
|
|
162
|
+
print("✓ Event log shows STEP_FAILED + STEP_RETRYING for each retry")
|
|
163
|
+
print("✓ Resume restores state via event replay and continues from retry")
|
|
164
|
+
print("✓ max_retries=3, retry_delay=1 (1 initial + 3 retries = 4 total attempts)")
|
|
165
|
+
print(f"✓ Total attempts in this run: {attempt_count}")
|
|
166
|
+
|
|
167
|
+
|
|
168
|
+
if __name__ == "__main__":
|
|
169
|
+
asyncio.run(main())
|
|
@@ -0,0 +1,119 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Durable Workflow - Long Running with Sleep
|
|
3
|
+
|
|
4
|
+
This example demonstrates workflow suspension and resumption with sleep().
|
|
5
|
+
- Workflow suspends during sleep (releases resources)
|
|
6
|
+
- Can be resumed after sleep completes
|
|
7
|
+
- Uses FileStorageBackend for persistence across process restarts
|
|
8
|
+
- Demonstrates manual resumption pattern
|
|
9
|
+
|
|
10
|
+
IMPORTANT: Manual resumption (await resume(run_id)) is ONLY for local development/CI.
|
|
11
|
+
In production, use Celery runtime for automatic scheduled resumption.
|
|
12
|
+
See examples/celery/ for production-ready distributed execution.
|
|
13
|
+
|
|
14
|
+
Run: python examples/local/durable/04_long_running.py 2>/dev/null
|
|
15
|
+
"""
|
|
16
|
+
|
|
17
|
+
import asyncio
|
|
18
|
+
import tempfile
|
|
19
|
+
|
|
20
|
+
from pyworkflow import (
|
|
21
|
+
configure,
|
|
22
|
+
get_workflow_run,
|
|
23
|
+
reset_config,
|
|
24
|
+
resume,
|
|
25
|
+
sleep,
|
|
26
|
+
start,
|
|
27
|
+
step,
|
|
28
|
+
workflow,
|
|
29
|
+
)
|
|
30
|
+
from pyworkflow.storage import FileStorageBackend
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
# --- Steps ---
|
|
34
|
+
@step()
|
|
35
|
+
async def prepare_batch(batch_id: str) -> dict:
|
|
36
|
+
"""Prepare the batch for processing."""
|
|
37
|
+
print(f" Preparing batch {batch_id}...")
|
|
38
|
+
return {"batch_id": batch_id, "status": "prepared"}
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
@step()
|
|
42
|
+
async def process_batch(batch: dict) -> dict:
|
|
43
|
+
"""Process the batch after sleep completes."""
|
|
44
|
+
print(f" Processing batch {batch['batch_id']}...")
|
|
45
|
+
return {**batch, "status": "processed", "items": 1000}
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
@step()
|
|
49
|
+
async def finalize_batch(batch: dict) -> dict:
|
|
50
|
+
"""Finalize the batch."""
|
|
51
|
+
print(f" Finalizing batch {batch['batch_id']}...")
|
|
52
|
+
return {**batch, "status": "completed"}
|
|
53
|
+
|
|
54
|
+
|
|
55
|
+
# --- Workflow ---
|
|
56
|
+
@workflow(durable=True, tags=["local", "durable"])
|
|
57
|
+
async def batch_workflow(batch_id: str) -> dict:
|
|
58
|
+
"""Long-running batch processing workflow with sleep."""
|
|
59
|
+
batch = await prepare_batch(batch_id)
|
|
60
|
+
|
|
61
|
+
print(" Sleeping for 5 seconds (workflow will suspend)...")
|
|
62
|
+
await sleep("5s") # Suspends workflow here
|
|
63
|
+
|
|
64
|
+
print(" Resuming after sleep...")
|
|
65
|
+
batch = await process_batch(batch)
|
|
66
|
+
batch = await finalize_batch(batch)
|
|
67
|
+
return batch
|
|
68
|
+
|
|
69
|
+
|
|
70
|
+
async def main():
|
|
71
|
+
# Use temp directory (use real path like "./workflow_data" for production)
|
|
72
|
+
with tempfile.TemporaryDirectory() as tmpdir:
|
|
73
|
+
print("=== Durable Workflow - Long Running ===\n")
|
|
74
|
+
|
|
75
|
+
# Configure with FileStorageBackend (for persistence)
|
|
76
|
+
reset_config()
|
|
77
|
+
storage = FileStorageBackend(base_path=tmpdir)
|
|
78
|
+
configure(storage=storage, default_durable=True)
|
|
79
|
+
|
|
80
|
+
print("Starting batch workflow...\n")
|
|
81
|
+
|
|
82
|
+
# Start workflow
|
|
83
|
+
run_id = await start(batch_workflow, "batch-001")
|
|
84
|
+
|
|
85
|
+
# Check status after start
|
|
86
|
+
run = await get_workflow_run(run_id)
|
|
87
|
+
print(f"\nWorkflow status after sleep: {run.status.value}")
|
|
88
|
+
|
|
89
|
+
if run.status.value == "suspended":
|
|
90
|
+
print("Workflow is suspended (waiting for sleep to complete)")
|
|
91
|
+
|
|
92
|
+
# Wait for sleep duration, then resume
|
|
93
|
+
print("\nWaiting 5 seconds for sleep to complete...")
|
|
94
|
+
await asyncio.sleep(5)
|
|
95
|
+
|
|
96
|
+
print(f"Resuming workflow {run_id}...\n")
|
|
97
|
+
result = await resume(run_id)
|
|
98
|
+
|
|
99
|
+
print("\nWorkflow completed!")
|
|
100
|
+
print(f"Result: {result}")
|
|
101
|
+
else:
|
|
102
|
+
# Workflow already completed (sleep was short enough)
|
|
103
|
+
print("Workflow completed without suspension")
|
|
104
|
+
print(f"Result: {run.result}")
|
|
105
|
+
|
|
106
|
+
# Final status check
|
|
107
|
+
run = await get_workflow_run(run_id)
|
|
108
|
+
print(f"\nFinal status: {run.status.value}")
|
|
109
|
+
|
|
110
|
+
print("\n=== Key Takeaways ===")
|
|
111
|
+
print("✓ Workflow suspends during sleep() (releases resources)")
|
|
112
|
+
print("✓ FileStorageBackend persists state during suspension")
|
|
113
|
+
print("✓ Can resume after sleep completes (even after process restart)")
|
|
114
|
+
print("✓ Perfect for rate limiting, scheduled tasks, waiting for events")
|
|
115
|
+
print("\nℹ For production: use real storage path, implement auto-resume logic")
|
|
116
|
+
|
|
117
|
+
|
|
118
|
+
if __name__ == "__main__":
|
|
119
|
+
asyncio.run(main())
|
|
@@ -0,0 +1,145 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Durable Workflow - Event Log Deep Dive
|
|
3
|
+
|
|
4
|
+
This example demonstrates detailed event sourcing inspection.
|
|
5
|
+
- Multiple workflows to show different event sequences
|
|
6
|
+
- Deep dive into event structure (sequence, type, timestamp, data)
|
|
7
|
+
- Understanding event types and their meaning
|
|
8
|
+
- Event replay concepts
|
|
9
|
+
|
|
10
|
+
Run: python examples/local/durable/05_event_log.py 2>/dev/null
|
|
11
|
+
"""
|
|
12
|
+
|
|
13
|
+
import asyncio
|
|
14
|
+
|
|
15
|
+
from pyworkflow import (
|
|
16
|
+
configure,
|
|
17
|
+
get_workflow_events,
|
|
18
|
+
get_workflow_run,
|
|
19
|
+
reset_config,
|
|
20
|
+
start,
|
|
21
|
+
step,
|
|
22
|
+
workflow,
|
|
23
|
+
)
|
|
24
|
+
from pyworkflow.storage import InMemoryStorageBackend
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
# --- Steps ---
|
|
28
|
+
@step()
|
|
29
|
+
async def step_a(value: int) -> int:
|
|
30
|
+
"""Simple step that doubles the value."""
|
|
31
|
+
return value * 2
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
@step()
|
|
35
|
+
async def step_b(value: int) -> int:
|
|
36
|
+
"""Simple step that adds 10."""
|
|
37
|
+
return value + 10
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
@step()
|
|
41
|
+
async def step_c(value: int) -> int:
|
|
42
|
+
"""Simple step that subtracts 5."""
|
|
43
|
+
return value - 5
|
|
44
|
+
|
|
45
|
+
|
|
46
|
+
# --- Workflows ---
|
|
47
|
+
@workflow(durable=True, tags=["local", "durable"])
|
|
48
|
+
async def simple_workflow(value: int) -> int:
|
|
49
|
+
"""Simple 2-step workflow."""
|
|
50
|
+
result = await step_a(value)
|
|
51
|
+
result = await step_b(result)
|
|
52
|
+
return result
|
|
53
|
+
|
|
54
|
+
|
|
55
|
+
@workflow(durable=True, tags=["local", "durable"])
|
|
56
|
+
async def complex_workflow(value: int) -> int:
|
|
57
|
+
"""More complex 3-step workflow."""
|
|
58
|
+
result = await step_a(value)
|
|
59
|
+
result = await step_b(result)
|
|
60
|
+
result = await step_c(result)
|
|
61
|
+
return result
|
|
62
|
+
|
|
63
|
+
|
|
64
|
+
def print_event_details(event, index: int):
|
|
65
|
+
"""Pretty print event details."""
|
|
66
|
+
print(f"\nEvent #{index + 1}:")
|
|
67
|
+
print(f" Sequence: {event.sequence}")
|
|
68
|
+
print(f" Type: {event.type.value}")
|
|
69
|
+
print(f" Timestamp: {event.timestamp.strftime('%Y-%m-%d %H:%M:%S.%f')[:-3]}")
|
|
70
|
+
|
|
71
|
+
if event.data:
|
|
72
|
+
print(" Data:")
|
|
73
|
+
for key, value in event.data.items():
|
|
74
|
+
# Format value nicely
|
|
75
|
+
if isinstance(value, str) and len(value) > 50:
|
|
76
|
+
value = value[:50] + "..."
|
|
77
|
+
print(f" {key}: {value}")
|
|
78
|
+
|
|
79
|
+
|
|
80
|
+
async def main():
|
|
81
|
+
# Configure with InMemoryStorageBackend
|
|
82
|
+
reset_config()
|
|
83
|
+
storage = InMemoryStorageBackend()
|
|
84
|
+
configure(storage=storage, default_durable=True)
|
|
85
|
+
|
|
86
|
+
print("=== Durable Workflow - Event Log Deep Dive ===\n")
|
|
87
|
+
|
|
88
|
+
# Run first workflow
|
|
89
|
+
print("Running simple_workflow(5)...\n")
|
|
90
|
+
run_id_1 = await start(simple_workflow, 5)
|
|
91
|
+
|
|
92
|
+
run = await get_workflow_run(run_id_1)
|
|
93
|
+
print(f"Result: {run.result}")
|
|
94
|
+
print(f"Status: {run.status.value}")
|
|
95
|
+
|
|
96
|
+
# Inspect events
|
|
97
|
+
events = await get_workflow_events(run_id_1)
|
|
98
|
+
print(f"\n=== Event Log for simple_workflow ({len(events)} events) ===")
|
|
99
|
+
|
|
100
|
+
for i, event in enumerate(events):
|
|
101
|
+
print_event_details(event, i)
|
|
102
|
+
|
|
103
|
+
# Run second workflow
|
|
104
|
+
print("\n" + "=" * 60)
|
|
105
|
+
print("\nRunning complex_workflow(10)...\n")
|
|
106
|
+
run_id_2 = await start(complex_workflow, 10)
|
|
107
|
+
|
|
108
|
+
run = await get_workflow_run(run_id_2)
|
|
109
|
+
print(f"Result: {run.result}")
|
|
110
|
+
print(f"Status: {run.status.value}")
|
|
111
|
+
|
|
112
|
+
# Inspect events
|
|
113
|
+
events = await get_workflow_events(run_id_2)
|
|
114
|
+
print(f"\n=== Event Log for complex_workflow ({len(events)} events) ===")
|
|
115
|
+
|
|
116
|
+
for i, event in enumerate(events):
|
|
117
|
+
print_event_details(event, i)
|
|
118
|
+
|
|
119
|
+
# Event type summary
|
|
120
|
+
print("\n" + "=" * 60)
|
|
121
|
+
print("\n=== Event Types Explained ===")
|
|
122
|
+
print("workflow_started - Workflow execution begins")
|
|
123
|
+
print("step_completed - Step successfully executed (result cached)")
|
|
124
|
+
print("step_failed - Step failed (will retry if configured)")
|
|
125
|
+
print("sleep_started - Workflow suspended (sleep begins)")
|
|
126
|
+
print("sleep_completed - Workflow resumed (sleep ends)")
|
|
127
|
+
print("workflow_completed - Workflow finished successfully")
|
|
128
|
+
print("workflow_failed - Workflow failed permanently")
|
|
129
|
+
|
|
130
|
+
print("\n=== Event Replay Concepts ===")
|
|
131
|
+
print("✓ Events are immutable - never modified, only appended")
|
|
132
|
+
print("✓ On crash/restart, events replayed to restore state")
|
|
133
|
+
print("✓ step_completed events: result cached, step not re-executed")
|
|
134
|
+
print("✓ Sequence numbers ensure deterministic ordering")
|
|
135
|
+
print("✓ Timestamps enable time-travel debugging")
|
|
136
|
+
|
|
137
|
+
print("\n=== Key Takeaways ===")
|
|
138
|
+
print("✓ Every state change recorded as an event")
|
|
139
|
+
print("✓ Events contain sequence, type, timestamp, and data")
|
|
140
|
+
print("✓ Event log enables crash recovery via replay")
|
|
141
|
+
print("✓ Complete audit trail for compliance and debugging")
|
|
142
|
+
|
|
143
|
+
|
|
144
|
+
if __name__ == "__main__":
|
|
145
|
+
asyncio.run(main())
|