edda-framework 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
edda/__init__.py ADDED
@@ -0,0 +1,56 @@
1
+ """
2
+ Edda Framework - CloudEvents-native Durable Execution framework.
3
+
4
+ Example:
5
+ >>> import asyncio
6
+ >>> import sys
7
+ >>> import uvloop
8
+ >>> from edda import EddaApp, workflow, activity, wait_event, wait_timer
9
+ >>>
10
+ >>> # Python 3.12+ uses asyncio.set_event_loop_policy()
11
+ >>> if sys.version_info >= (3, 12):
12
+ ... asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
13
+ ... else:
14
+ ... uvloop.install()
15
+ >>>
16
+ >>> app = EddaApp(
17
+ ... service_name="order-service",
18
+ ... db_url="sqlite:///workflow.db",
19
+ ... outbox_enabled=True
20
+ ... )
21
+ """
22
+
23
+ from edda.activity import activity
24
+ from edda.app import EddaApp
25
+ from edda.compensation import compensation, on_failure, register_compensation
26
+ from edda.context import WorkflowContext
27
+ from edda.events import ReceivedEvent, send_event, wait_event, wait_timer, wait_until
28
+ from edda.exceptions import RetryExhaustedError, TerminalError
29
+ from edda.hooks import HooksBase, WorkflowHooks
30
+ from edda.outbox import OutboxRelayer, send_event_transactional
31
+ from edda.retry import RetryPolicy
32
+ from edda.workflow import workflow
33
+
34
+ __version__ = "0.1.0"
35
+
36
+ __all__ = [
37
+ "EddaApp",
38
+ "workflow",
39
+ "activity",
40
+ "WorkflowContext",
41
+ "ReceivedEvent",
42
+ "wait_event",
43
+ "wait_timer",
44
+ "wait_until",
45
+ "send_event",
46
+ "compensation",
47
+ "register_compensation",
48
+ "on_failure", # Already exported, just confirming it's in __all__
49
+ "OutboxRelayer",
50
+ "send_event_transactional",
51
+ "WorkflowHooks",
52
+ "HooksBase",
53
+ "RetryPolicy",
54
+ "RetryExhaustedError",
55
+ "TerminalError",
56
+ ]
edda/activity.py ADDED
@@ -0,0 +1,505 @@
1
+ """
2
+ Activity module for Edda framework.
3
+
4
+ This module provides the @activity decorator for defining atomic units of work
5
+ within workflows. Activities are the building blocks of Sagas and support
6
+ deterministic replay through result caching.
7
+ """
8
+
9
+ import asyncio
10
+ import functools
11
+ import inspect
12
+ import time
13
+ from collections.abc import Callable
14
+ from typing import Any, TypeVar, cast
15
+
16
+ from edda.context import WorkflowContext
17
+ from edda.exceptions import RetryExhaustedError, TerminalError, WorkflowCancelledException
18
+ from edda.pydantic_utils import (
19
+ enum_value_to_enum,
20
+ extract_enum_from_annotation,
21
+ extract_pydantic_model_from_annotation,
22
+ from_json_dict,
23
+ to_json_dict,
24
+ )
25
+ from edda.retry import RetryMetadata, RetryPolicy
26
+
27
+ F = TypeVar("F", bound=Callable[..., Any])
28
+
29
+
30
+ class Activity:
31
+ """
32
+ Wrapper class for activity functions.
33
+
34
+ Handles execution, result caching during replay, and history recording.
35
+ Supports automatic retry with exponential backoff.
36
+ """
37
+
38
+ def __init__(self, func: Callable[..., Any], retry_policy: "RetryPolicy | None" = None):
39
+ """
40
+ Initialize activity wrapper.
41
+
42
+ Args:
43
+ func: The async function to wrap
44
+ retry_policy: Optional retry policy for this activity.
45
+ If None, uses the default policy from EddaApp.
46
+ """
47
+ self.func = func
48
+ self.name = func.__name__
49
+ self.retry_policy = retry_policy
50
+ functools.update_wrapper(self, func)
51
+
52
+ async def __call__(self, ctx: WorkflowContext, *args: Any, **kwargs: Any) -> Any:
53
+ """
54
+ Execute the activity with automatic retry.
55
+
56
+ During replay, returns cached result. During normal execution,
57
+ executes the function with retry logic and records the result.
58
+
59
+ Args:
60
+ ctx: Workflow context
61
+ *args: Positional arguments for the activity
62
+ **kwargs: Keyword arguments for the activity
63
+ Optional: activity_id (str) - Explicit activity ID
64
+ - Auto-generated by default (format: "{function_name}:{counter}")
65
+ - Manual specification required ONLY for concurrent execution
66
+ (asyncio.gather, async for, etc.)
67
+ - For sequential execution, rely on auto-generation
68
+
69
+ Returns:
70
+ Activity result
71
+
72
+ Raises:
73
+ RetryExhaustedError: When all retry attempts are exhausted
74
+ TerminalError: For non-retryable errors
75
+ WorkflowCancelledException: When workflow is cancelled
76
+ Any exception raised by the activity function (if retry policy allows)
77
+
78
+ Example:
79
+ Sequential execution (auto-generated IDs - recommended)::
80
+
81
+ result1 = await my_activity(ctx, arg1) # Auto: "my_activity:1"
82
+ result2 = await my_activity(ctx, arg2) # Auto: "my_activity:2"
83
+
84
+ Concurrent execution (manual IDs - required)::
85
+
86
+ results = await asyncio.gather(
87
+ my_activity(ctx, arg1, activity_id="my_activity:1"),
88
+ my_activity(ctx, arg2, activity_id="my_activity:2"),
89
+ )
90
+ """
91
+ # Resolve activity ID (explicit or auto-generated)
92
+ activity_id = self._resolve_id(ctx, kwargs.pop("activity_id", None))
93
+
94
+ # Record activity ID execution
95
+ ctx._record_activity_id(activity_id)
96
+
97
+ # Call hook: activity start
98
+ if ctx.hooks and hasattr(ctx.hooks, "on_activity_start"):
99
+ await ctx.hooks.on_activity_start(
100
+ ctx.instance_id, activity_id, self.name, ctx.is_replaying
101
+ )
102
+
103
+ # Check if workflow has been cancelled
104
+ instance = await ctx._get_instance()
105
+ if instance and instance.get("status") == "cancelled":
106
+ raise WorkflowCancelledException(f"Workflow {ctx.instance_id} has been cancelled")
107
+
108
+ # Check if we're replaying and have a cached result
109
+ if ctx.is_replaying:
110
+ found, cached_result = ctx._get_cached_result(activity_id)
111
+ if found:
112
+ # Check if this was an error
113
+ if isinstance(cached_result, dict) and cached_result.get("_error"):
114
+ # Reconstruct and raise the error
115
+ error_type = cached_result.get("error_type", "Exception")
116
+ error_message = cached_result.get("error_message", "Unknown error")
117
+
118
+ # Call hook: activity failed (from cache)
119
+ error_obj = Exception(f"{error_type}: {error_message}")
120
+ if ctx.hooks and hasattr(ctx.hooks, "on_activity_failed"):
121
+ await ctx.hooks.on_activity_failed(
122
+ ctx.instance_id, activity_id, self.name, error_obj
123
+ )
124
+
125
+ raise error_obj
126
+
127
+ # Restore Pydantic model or Enum from cached result based on return type
128
+ sig = inspect.signature(self.func)
129
+ restored_result = cached_result
130
+
131
+ # Check if return type is Pydantic model
132
+ model = extract_pydantic_model_from_annotation(sig.return_annotation)
133
+ if model is not None and isinstance(cached_result, dict):
134
+ restored_result = from_json_dict(cached_result, model)
135
+ # Check if return type is Enum
136
+ elif (
137
+ enum_class := extract_enum_from_annotation(sig.return_annotation)
138
+ ) is not None:
139
+ from enum import Enum
140
+
141
+ if not isinstance(cached_result, Enum):
142
+ restored_result = enum_value_to_enum(cached_result, enum_class)
143
+
144
+ # Call hook: activity complete (cache hit)
145
+ if ctx.hooks and hasattr(ctx.hooks, "on_activity_complete"):
146
+ await ctx.hooks.on_activity_complete(
147
+ ctx.instance_id, activity_id, self.name, restored_result, cache_hit=True
148
+ )
149
+
150
+ # Return cached successful result
151
+ return restored_result
152
+
153
+ # Resolve retry policy (activity-level > app-level > default)
154
+ retry_policy = self._resolve_retry_policy(ctx)
155
+
156
+ # Retry loop (OUTSIDE transaction - each attempt is independent)
157
+ attempt = 0
158
+ start_time = time.time()
159
+ retry_metadata = RetryMetadata()
160
+ last_error: Exception | None = None
161
+
162
+ while True:
163
+ attempt += 1
164
+
165
+ try:
166
+ # Execute activity in transaction (one attempt)
167
+ async with ctx.transaction():
168
+ # Calculate retry metadata if there were retries
169
+ retry_meta = None
170
+ if attempt > 1:
171
+ retry_metadata.total_duration_ms = int((time.time() - start_time) * 1000)
172
+ retry_metadata.total_attempts = attempt # Update attempt count on success
173
+ retry_meta = retry_metadata
174
+
175
+ result = await self._execute_and_record(
176
+ ctx, activity_id, args, kwargs, retry_meta
177
+ )
178
+ return result
179
+
180
+ except WorkflowCancelledException:
181
+ # Never retry cancellation
182
+ raise
183
+
184
+ except TerminalError as error:
185
+ # Never retry terminal errors, but record the failure
186
+ input_data = {
187
+ "args": [to_json_dict(arg) for arg in args],
188
+ "kwargs": {k: to_json_dict(v) for k, v in kwargs.items()},
189
+ }
190
+ # Record failure (no retry metadata for terminal errors)
191
+ await ctx._record_activity_failed(
192
+ activity_id, self.name, error, input_data, retry_metadata=None
193
+ )
194
+
195
+ # Call hook: activity failed
196
+ if ctx.hooks and hasattr(ctx.hooks, "on_activity_failed"):
197
+ await ctx.hooks.on_activity_failed(
198
+ ctx.instance_id, activity_id, self.name, error
199
+ )
200
+
201
+ # Re-raise immediately (no retry)
202
+ raise
203
+
204
+ except Exception as error:
205
+ last_error = error
206
+
207
+ # Record this attempt in metadata
208
+ retry_metadata.add_attempt(attempt, error)
209
+
210
+ # Check if should retry
211
+ should_retry, reason = self._should_retry(retry_policy, error, attempt, start_time)
212
+
213
+ if not should_retry:
214
+ # Exhausted retries - mark metadata as exhausted
215
+ retry_metadata.exhausted = True
216
+ retry_metadata.total_duration_ms = int((time.time() - start_time) * 1000)
217
+
218
+ # Record failure with retry metadata outside transaction
219
+ input_data = {
220
+ "args": [to_json_dict(arg) for arg in args],
221
+ "kwargs": {k: to_json_dict(v) for k, v in kwargs.items()},
222
+ }
223
+ # Include retry metadata in the failure record
224
+ await ctx._record_activity_failed(
225
+ activity_id, self.name, error, input_data, retry_metadata
226
+ )
227
+
228
+ # Call hook: activity failed
229
+ if ctx.hooks and hasattr(ctx.hooks, "on_activity_failed"):
230
+ await ctx.hooks.on_activity_failed(
231
+ ctx.instance_id, activity_id, self.name, error
232
+ )
233
+
234
+ # Raise RetryExhaustedError with original exception as cause
235
+ raise RetryExhaustedError(
236
+ f"Activity {self.name} failed after {attempt} attempts: {reason}"
237
+ ) from last_error
238
+
239
+ # Calculate backoff delay
240
+ delay = retry_policy.calculate_delay(attempt)
241
+
242
+ # Call hook: activity retry
243
+ if ctx.hooks and hasattr(ctx.hooks, "on_activity_retry"):
244
+ await ctx.hooks.on_activity_retry(
245
+ ctx.instance_id, activity_id, self.name, error, attempt, delay
246
+ )
247
+
248
+ # Wait before next retry
249
+ await asyncio.sleep(delay)
250
+
251
+ async def _execute_and_record(
252
+ self,
253
+ ctx: WorkflowContext,
254
+ activity_id: str,
255
+ args: tuple[Any, ...],
256
+ kwargs: dict[str, Any],
257
+ retry_metadata: Any = None,
258
+ ) -> Any:
259
+ """
260
+ Execute activity function and record the result.
261
+
262
+ This helper method contains the core execution logic and is called
263
+ within a transaction. If the activity fails, the exception is propagated
264
+ and the transaction will be rolled back by the caller.
265
+
266
+ Args:
267
+ ctx: Workflow context
268
+ activity_id: Activity ID
269
+ args: Positional arguments for the activity
270
+ kwargs: Keyword arguments for the activity
271
+ retry_metadata: Optional retry metadata (RetryMetadata instance)
272
+
273
+ Returns:
274
+ Activity result
275
+
276
+ Raises:
277
+ Any exception raised by the activity function
278
+ """
279
+ # Capture input parameters for recording
280
+ # Convert Pydantic models to JSON dicts for storage
281
+ # args and kwargs contain the actual activity arguments (ctx is already passed separately)
282
+ input_data = {
283
+ "args": [to_json_dict(arg) for arg in args],
284
+ "kwargs": {k: to_json_dict(v) for k, v in kwargs.items()},
285
+ }
286
+
287
+ # Execute the activity function
288
+ result = await self.func(ctx, *args, **kwargs)
289
+
290
+ # Convert Pydantic model result to JSON dict for storage
291
+ result_for_storage = to_json_dict(result)
292
+
293
+ # Record successful completion with input data
294
+ # Always record when we actually execute, even during replay
295
+ # (if we're here, it means there was no cached result)
296
+ await ctx._record_activity_completed(
297
+ activity_id, self.name, result_for_storage, input_data, retry_metadata
298
+ )
299
+
300
+ # Auto-register compensation if @on_failure decorator is present
301
+ if hasattr(self.func, "_compensation_func") and hasattr(self.func, "_has_compensation"):
302
+ compensation_func = self.func._compensation_func
303
+
304
+ # Merge activity result and input kwargs for compensation parameters
305
+ # Convention: compensation function receives both input params and result values
306
+ comp_kwargs = {**kwargs} # Start with input kwargs
307
+
308
+ # Add result values if result is a dict
309
+ if isinstance(result, dict):
310
+ comp_kwargs.update(result)
311
+
312
+ # Register the compensation
313
+ from edda.compensation import register_compensation
314
+
315
+ await register_compensation(
316
+ ctx, compensation_func, activity_id=activity_id, **comp_kwargs
317
+ )
318
+
319
+ print(f"[Activity] Auto-registered compensation: {compensation_func.__name__}")
320
+
321
+ # Check if workflow was cancelled during activity execution
322
+ instance = await ctx._get_instance()
323
+ if instance and instance.get("status") == "cancelled":
324
+ from edda.exceptions import WorkflowCancelledException
325
+
326
+ raise WorkflowCancelledException(
327
+ f"Workflow {ctx.instance_id} was cancelled during {self.name} execution"
328
+ )
329
+
330
+ # Call hook: activity complete (cache miss)
331
+ if ctx.hooks and hasattr(ctx.hooks, "on_activity_complete"):
332
+ await ctx.hooks.on_activity_complete(
333
+ ctx.instance_id, activity_id, self.name, result, cache_hit=False
334
+ )
335
+
336
+ return result
337
+
338
+ def _resolve_id(self, ctx: WorkflowContext, explicit_id: str | None) -> str:
339
+ """
340
+ Resolve activity ID (explicit or auto-generated).
341
+
342
+ Args:
343
+ ctx: Workflow context
344
+ explicit_id: Explicitly provided activity ID (from kwargs)
345
+
346
+ Returns:
347
+ Resolved activity ID
348
+ """
349
+ if explicit_id is not None:
350
+ return explicit_id
351
+
352
+ # Auto-generate ID using context's generator
353
+ return ctx._generate_activity_id(self.name)
354
+
355
+ def _resolve_retry_policy(self, ctx: WorkflowContext) -> "RetryPolicy":
356
+ """
357
+ Resolve retry policy with priority: activity-level > app-level > default.
358
+
359
+ Args:
360
+ ctx: Workflow context
361
+
362
+ Returns:
363
+ Resolved retry policy
364
+ """
365
+ from edda.retry import DEFAULT_RETRY_POLICY
366
+
367
+ # Priority 1: Activity-level policy (specified in @activity decorator)
368
+ if self.retry_policy is not None:
369
+ return self.retry_policy
370
+
371
+ # Priority 2: App-level policy (EddaApp default_retry_policy)
372
+ # Note: This will be implemented in Phase 5 (edda/app.py)
373
+ if hasattr(ctx, "_app_retry_policy") and ctx._app_retry_policy is not None:
374
+ return cast(RetryPolicy, ctx._app_retry_policy)
375
+
376
+ # Priority 3: Framework default
377
+ return DEFAULT_RETRY_POLICY
378
+
379
+ def _should_retry(
380
+ self,
381
+ retry_policy: "RetryPolicy",
382
+ error: Exception,
383
+ attempt: int,
384
+ start_time: float,
385
+ ) -> tuple[bool, str]:
386
+ """
387
+ Determine if the activity should be retried.
388
+
389
+ Args:
390
+ retry_policy: Retry policy configuration
391
+ error: Exception that caused the failure
392
+ attempt: Current attempt number (1-indexed)
393
+ start_time: Start time of the first attempt (Unix timestamp)
394
+
395
+ Returns:
396
+ Tuple of (should_retry: bool, reason: str)
397
+ - should_retry: True if retry should be attempted, False otherwise
398
+ - reason: Human-readable reason for the decision
399
+ """
400
+ import time
401
+
402
+ # Check if error is retryable according to policy
403
+ if not retry_policy.is_retryable(error):
404
+ return False, f"Error type {type(error).__name__} is not retryable"
405
+
406
+ # Check max_attempts limit
407
+ if retry_policy.max_attempts is not None and attempt >= retry_policy.max_attempts:
408
+ return False, f"Max attempts ({retry_policy.max_attempts}) reached"
409
+
410
+ # Check max_duration limit
411
+ if retry_policy.max_duration is not None:
412
+ elapsed = time.time() - start_time
413
+ if elapsed >= retry_policy.max_duration:
414
+ return (
415
+ False,
416
+ f"Max duration ({retry_policy.max_duration}s) exceeded (elapsed: {elapsed:.1f}s)",
417
+ )
418
+
419
+ # Should retry
420
+ return True, f"Will retry (attempt {attempt + 1})"
421
+
422
+
423
+ def activity(
424
+ func: F | None = None, *, retry_policy: "RetryPolicy | None" = None
425
+ ) -> F | Callable[[F], F]:
426
+ """
427
+ Decorator for defining activities (atomic units of work) with automatic retry.
428
+
429
+ Activities are async functions that take a WorkflowContext as the first
430
+ parameter, followed by any other parameters.
431
+
432
+ Activities are automatically wrapped in a transaction, ensuring that
433
+ activity execution, history recording, and event sending are atomic.
434
+ Each retry attempt is executed in an independent transaction.
435
+
436
+ When using ctx.session to access the Edda-managed session, all operations
437
+ (activity execution, history recording, event sending) use that shared session,
438
+ ensuring atomicity within a single transaction.
439
+
440
+ For non-idempotent operations (e.g., external API calls), place them in
441
+ Activities to leverage result caching during replay. For operations that
442
+ can be safely re-executed during replay, place them directly in the
443
+ Workflow function.
444
+
445
+ Example:
446
+ >>> @activity # Uses default retry policy (5 attempts, exponential backoff)
447
+ ... async def reserve_inventory(ctx: WorkflowContext, order_id: str) -> dict:
448
+ ... # Your business logic here
449
+ ... return {"reservation_id": "123"}
450
+
451
+ >>> from edda.retry import RetryPolicy, AGGRESSIVE_RETRY
452
+ >>> @activity(retry_policy=AGGRESSIVE_RETRY) # Custom retry policy
453
+ ... async def process_payment(ctx: WorkflowContext, amount: float) -> dict:
454
+ ... # Fast retries for low-latency services
455
+ ... return {"status": "completed"}
456
+
457
+ >>> @activity # Non-idempotent operations cached during replay
458
+ ... async def charge_credit_card(ctx: WorkflowContext, amount: float) -> dict:
459
+ ... # External API call - result is cached, won't be called again on replay
460
+ ... # If this fails, automatic retry with exponential backoff
461
+ ... return {"transaction_id": "txn_123"}
462
+
463
+ >>> from edda.exceptions import TerminalError
464
+ >>> @activity
465
+ ... async def validate_user(ctx: WorkflowContext, user_id: str) -> dict:
466
+ ... user = await fetch_user(user_id)
467
+ ... if not user:
468
+ ... # Don't retry - user doesn't exist
469
+ ... raise TerminalError(f"User {user_id} not found")
470
+ ... return {"user_id": user_id, "name": user.name}
471
+
472
+ Args:
473
+ func: Async function to wrap as an activity
474
+ retry_policy: Optional retry policy for this activity.
475
+ If None, uses the default policy from EddaApp.
476
+
477
+ Returns:
478
+ Decorated function that can be called within a workflow
479
+
480
+ Raises:
481
+ RetryExhaustedError: When all retry attempts are exhausted
482
+ TerminalError: For non-retryable errors (no retry attempted)
483
+ """
484
+
485
+ def decorator(f: F) -> F:
486
+ # Verify the function is async
487
+ if not inspect.iscoroutinefunction(f):
488
+ raise TypeError(f"Activity {f.__name__} must be an async function")
489
+
490
+ # Create the Activity wrapper with retry policy
491
+ activity_wrapper = Activity(f, retry_policy=retry_policy)
492
+
493
+ # Mark as activity for introspection
494
+ activity_wrapper._is_activity = True # type: ignore[attr-defined]
495
+
496
+ # Return the wrapper cast to the original type
497
+ return cast(F, activity_wrapper)
498
+
499
+ # Support both @activity and @activity(retry_policy=...)
500
+ if func is None:
501
+ # Called with arguments: @activity(retry_policy=...)
502
+ return decorator
503
+ else:
504
+ # Called without arguments: @activity
505
+ return decorator(func)