pyworkflow-engine 0.1.11__tar.gz → 0.1.13__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (190) hide show
  1. {pyworkflow_engine-0.1.11 → pyworkflow_engine-0.1.13}/PKG-INFO +1 -1
  2. {pyworkflow_engine-0.1.11 → pyworkflow_engine-0.1.13}/docs/concepts/step-context.mdx +4 -6
  3. {pyworkflow_engine-0.1.11 → pyworkflow_engine-0.1.13}/examples/local/durable/13_step_context.py +0 -6
  4. {pyworkflow_engine-0.1.11 → pyworkflow_engine-0.1.13}/pyproject.toml +1 -1
  5. {pyworkflow_engine-0.1.11 → pyworkflow_engine-0.1.13}/pyworkflow/__init__.py +1 -1
  6. {pyworkflow_engine-0.1.11 → pyworkflow_engine-0.1.13}/pyworkflow/celery/app.py +97 -3
  7. pyworkflow_engine-0.1.13/pyworkflow/celery/loop.py +108 -0
  8. pyworkflow_engine-0.1.13/pyworkflow/celery/singleton.py +368 -0
  9. {pyworkflow_engine-0.1.11 → pyworkflow_engine-0.1.13}/pyworkflow/celery/tasks.py +553 -111
  10. {pyworkflow_engine-0.1.11 → pyworkflow_engine-0.1.13}/pyworkflow/cli/commands/worker.py +13 -16
  11. {pyworkflow_engine-0.1.11 → pyworkflow_engine-0.1.13}/pyworkflow/config.py +5 -0
  12. {pyworkflow_engine-0.1.11 → pyworkflow_engine-0.1.13}/pyworkflow/context/base.py +4 -0
  13. {pyworkflow_engine-0.1.11 → pyworkflow_engine-0.1.13}/pyworkflow/context/local.py +27 -1
  14. {pyworkflow_engine-0.1.11 → pyworkflow_engine-0.1.13}/pyworkflow/context/step_context.py +1 -11
  15. {pyworkflow_engine-0.1.11 → pyworkflow_engine-0.1.13}/pyworkflow/core/step.py +43 -15
  16. pyworkflow_engine-0.1.13/pyworkflow/core/validation.py +112 -0
  17. {pyworkflow_engine-0.1.11 → pyworkflow_engine-0.1.13}/pyworkflow/engine/events.py +44 -30
  18. {pyworkflow_engine-0.1.11 → pyworkflow_engine-0.1.13}/pyworkflow/engine/executor.py +21 -1
  19. {pyworkflow_engine-0.1.11 → pyworkflow_engine-0.1.13}/pyworkflow/engine/replay.py +0 -39
  20. {pyworkflow_engine-0.1.11 → pyworkflow_engine-0.1.13}/pyworkflow/observability/logging.py +43 -1
  21. {pyworkflow_engine-0.1.11 → pyworkflow_engine-0.1.13}/pyworkflow/runtime/celery.py +1 -1
  22. {pyworkflow_engine-0.1.11 → pyworkflow_engine-0.1.13}/pyworkflow/runtime/local.py +41 -1
  23. {pyworkflow_engine-0.1.11 → pyworkflow_engine-0.1.13}/pyworkflow/storage/config.py +81 -2
  24. {pyworkflow_engine-0.1.11 → pyworkflow_engine-0.1.13}/pyworkflow/storage/postgres.py +103 -34
  25. {pyworkflow_engine-0.1.11 → pyworkflow_engine-0.1.13}/pyworkflow_engine.egg-info/SOURCES.txt +8 -1
  26. pyworkflow_engine-0.1.13/tests/integration/test_singleton.py +449 -0
  27. pyworkflow_engine-0.1.13/tests/integration/test_workflow_suspended.py +438 -0
  28. pyworkflow_engine-0.1.13/tests/unit/test_singleton.py +686 -0
  29. pyworkflow_engine-0.1.13/tests/unit/test_validation.py +322 -0
  30. pyworkflow_engine-0.1.13/tests/unit/test_workflow_suspended.py +405 -0
  31. pyworkflow_engine-0.1.11/tests/unit/__init__.py +0 -0
  32. {pyworkflow_engine-0.1.11 → pyworkflow_engine-0.1.13}/CLAUDE.md +0 -0
  33. {pyworkflow_engine-0.1.11 → pyworkflow_engine-0.1.13}/DISTRIBUTED.md +0 -0
  34. {pyworkflow_engine-0.1.11 → pyworkflow_engine-0.1.13}/LICENSE +0 -0
  35. {pyworkflow_engine-0.1.11 → pyworkflow_engine-0.1.13}/MANIFEST.in +0 -0
  36. {pyworkflow_engine-0.1.11 → pyworkflow_engine-0.1.13}/README.md +0 -0
  37. {pyworkflow_engine-0.1.11 → pyworkflow_engine-0.1.13}/RELEASING.md +0 -0
  38. {pyworkflow_engine-0.1.11 → pyworkflow_engine-0.1.13}/docs/concepts/cancellation.mdx +0 -0
  39. {pyworkflow_engine-0.1.11 → pyworkflow_engine-0.1.13}/docs/concepts/continue-as-new.mdx +0 -0
  40. {pyworkflow_engine-0.1.11 → pyworkflow_engine-0.1.13}/docs/concepts/events.mdx +0 -0
  41. {pyworkflow_engine-0.1.11 → pyworkflow_engine-0.1.13}/docs/concepts/fault-tolerance.mdx +0 -0
  42. {pyworkflow_engine-0.1.11 → pyworkflow_engine-0.1.13}/docs/concepts/hooks.mdx +0 -0
  43. {pyworkflow_engine-0.1.11 → pyworkflow_engine-0.1.13}/docs/concepts/limitations.mdx +0 -0
  44. {pyworkflow_engine-0.1.11 → pyworkflow_engine-0.1.13}/docs/concepts/schedules.mdx +0 -0
  45. {pyworkflow_engine-0.1.11 → pyworkflow_engine-0.1.13}/docs/concepts/sleep.mdx +0 -0
  46. {pyworkflow_engine-0.1.11 → pyworkflow_engine-0.1.13}/docs/concepts/steps.mdx +0 -0
  47. {pyworkflow_engine-0.1.11 → pyworkflow_engine-0.1.13}/docs/concepts/workflows.mdx +0 -0
  48. {pyworkflow_engine-0.1.11 → pyworkflow_engine-0.1.13}/docs/guides/cli.mdx +0 -0
  49. {pyworkflow_engine-0.1.11 → pyworkflow_engine-0.1.13}/docs/guides/configuration.mdx +0 -0
  50. {pyworkflow_engine-0.1.11 → pyworkflow_engine-0.1.13}/docs/introduction.mdx +0 -0
  51. {pyworkflow_engine-0.1.11 → pyworkflow_engine-0.1.13}/docs/quickstart.mdx +0 -0
  52. {pyworkflow_engine-0.1.11 → pyworkflow_engine-0.1.13}/examples/__init__.py +0 -0
  53. {pyworkflow_engine-0.1.11 → pyworkflow_engine-0.1.13}/examples/celery/__init__.py +0 -0
  54. {pyworkflow_engine-0.1.11 → pyworkflow_engine-0.1.13}/examples/celery/durable/docker-compose.yml +0 -0
  55. {pyworkflow_engine-0.1.11 → pyworkflow_engine-0.1.13}/examples/celery/durable/pyworkflow.config.yaml +0 -0
  56. {pyworkflow_engine-0.1.11 → pyworkflow_engine-0.1.13}/examples/celery/durable/workflows/__init__.py +0 -0
  57. {pyworkflow_engine-0.1.11 → pyworkflow_engine-0.1.13}/examples/celery/durable/workflows/basic.py +0 -0
  58. {pyworkflow_engine-0.1.11 → pyworkflow_engine-0.1.13}/examples/celery/durable/workflows/batch_processing.py +0 -0
  59. {pyworkflow_engine-0.1.11 → pyworkflow_engine-0.1.13}/examples/celery/durable/workflows/cancellation.py +0 -0
  60. {pyworkflow_engine-0.1.11 → pyworkflow_engine-0.1.13}/examples/celery/durable/workflows/child_workflow_patterns.py +0 -0
  61. {pyworkflow_engine-0.1.11 → pyworkflow_engine-0.1.13}/examples/celery/durable/workflows/child_workflows.py +0 -0
  62. {pyworkflow_engine-0.1.11 → pyworkflow_engine-0.1.13}/examples/celery/durable/workflows/continue_as_new.py +0 -0
  63. {pyworkflow_engine-0.1.11 → pyworkflow_engine-0.1.13}/examples/celery/durable/workflows/fault_tolerance.py +0 -0
  64. {pyworkflow_engine-0.1.11 → pyworkflow_engine-0.1.13}/examples/celery/durable/workflows/hooks.py +0 -0
  65. {pyworkflow_engine-0.1.11 → pyworkflow_engine-0.1.13}/examples/celery/durable/workflows/idempotency.py +0 -0
  66. {pyworkflow_engine-0.1.11 → pyworkflow_engine-0.1.13}/examples/celery/durable/workflows/long_running.py +0 -0
  67. {pyworkflow_engine-0.1.11 → pyworkflow_engine-0.1.13}/examples/celery/durable/workflows/retries.py +0 -0
  68. {pyworkflow_engine-0.1.11 → pyworkflow_engine-0.1.13}/examples/celery/durable/workflows/schedules.py +0 -0
  69. {pyworkflow_engine-0.1.11 → pyworkflow_engine-0.1.13}/examples/celery/durable/workflows/step_context.py +0 -0
  70. {pyworkflow_engine-0.1.11 → pyworkflow_engine-0.1.13}/examples/celery/transient/01_basic_workflow.py +0 -0
  71. {pyworkflow_engine-0.1.11 → pyworkflow_engine-0.1.13}/examples/celery/transient/02_fault_tolerance.py +0 -0
  72. {pyworkflow_engine-0.1.11 → pyworkflow_engine-0.1.13}/examples/celery/transient/__init__.py +0 -0
  73. {pyworkflow_engine-0.1.11 → pyworkflow_engine-0.1.13}/examples/celery/transient/pyworkflow.config.yaml +0 -0
  74. {pyworkflow_engine-0.1.11 → pyworkflow_engine-0.1.13}/examples/local/__init__.py +0 -0
  75. {pyworkflow_engine-0.1.11 → pyworkflow_engine-0.1.13}/examples/local/durable/01_basic_workflow.py +0 -0
  76. {pyworkflow_engine-0.1.11 → pyworkflow_engine-0.1.13}/examples/local/durable/02_file_storage.py +0 -0
  77. {pyworkflow_engine-0.1.11 → pyworkflow_engine-0.1.13}/examples/local/durable/03_retries.py +0 -0
  78. {pyworkflow_engine-0.1.11 → pyworkflow_engine-0.1.13}/examples/local/durable/04_long_running.py +0 -0
  79. {pyworkflow_engine-0.1.11 → pyworkflow_engine-0.1.13}/examples/local/durable/05_event_log.py +0 -0
  80. {pyworkflow_engine-0.1.11 → pyworkflow_engine-0.1.13}/examples/local/durable/06_idempotency.py +0 -0
  81. {pyworkflow_engine-0.1.11 → pyworkflow_engine-0.1.13}/examples/local/durable/07_hooks.py +0 -0
  82. {pyworkflow_engine-0.1.11 → pyworkflow_engine-0.1.13}/examples/local/durable/08_cancellation.py +0 -0
  83. {pyworkflow_engine-0.1.11 → pyworkflow_engine-0.1.13}/examples/local/durable/09_child_workflows.py +0 -0
  84. {pyworkflow_engine-0.1.11 → pyworkflow_engine-0.1.13}/examples/local/durable/10_child_workflow_patterns.py +0 -0
  85. {pyworkflow_engine-0.1.11 → pyworkflow_engine-0.1.13}/examples/local/durable/11_continue_as_new.py +0 -0
  86. {pyworkflow_engine-0.1.11 → pyworkflow_engine-0.1.13}/examples/local/durable/12_schedules.py +0 -0
  87. {pyworkflow_engine-0.1.11 → pyworkflow_engine-0.1.13}/examples/local/durable/__init__.py +0 -0
  88. {pyworkflow_engine-0.1.11 → pyworkflow_engine-0.1.13}/examples/local/transient/01_quick_tasks.py +0 -0
  89. {pyworkflow_engine-0.1.11 → pyworkflow_engine-0.1.13}/examples/local/transient/02_retries.py +0 -0
  90. {pyworkflow_engine-0.1.11 → pyworkflow_engine-0.1.13}/examples/local/transient/03_sleep.py +0 -0
  91. {pyworkflow_engine-0.1.11 → pyworkflow_engine-0.1.13}/examples/local/transient/__init__.py +0 -0
  92. {pyworkflow_engine-0.1.11 → pyworkflow_engine-0.1.13}/pyworkflow/aws/__init__.py +0 -0
  93. {pyworkflow_engine-0.1.11 → pyworkflow_engine-0.1.13}/pyworkflow/aws/context.py +0 -0
  94. {pyworkflow_engine-0.1.11 → pyworkflow_engine-0.1.13}/pyworkflow/aws/handler.py +0 -0
  95. {pyworkflow_engine-0.1.11 → pyworkflow_engine-0.1.13}/pyworkflow/aws/testing.py +0 -0
  96. {pyworkflow_engine-0.1.11 → pyworkflow_engine-0.1.13}/pyworkflow/celery/__init__.py +0 -0
  97. {pyworkflow_engine-0.1.11 → pyworkflow_engine-0.1.13}/pyworkflow/celery/scheduler.py +0 -0
  98. {pyworkflow_engine-0.1.11 → pyworkflow_engine-0.1.13}/pyworkflow/cli/__init__.py +0 -0
  99. {pyworkflow_engine-0.1.11 → pyworkflow_engine-0.1.13}/pyworkflow/cli/__main__.py +0 -0
  100. {pyworkflow_engine-0.1.11 → pyworkflow_engine-0.1.13}/pyworkflow/cli/commands/__init__.py +0 -0
  101. {pyworkflow_engine-0.1.11 → pyworkflow_engine-0.1.13}/pyworkflow/cli/commands/hooks.py +0 -0
  102. {pyworkflow_engine-0.1.11 → pyworkflow_engine-0.1.13}/pyworkflow/cli/commands/quickstart.py +0 -0
  103. {pyworkflow_engine-0.1.11 → pyworkflow_engine-0.1.13}/pyworkflow/cli/commands/runs.py +0 -0
  104. {pyworkflow_engine-0.1.11 → pyworkflow_engine-0.1.13}/pyworkflow/cli/commands/scheduler.py +0 -0
  105. {pyworkflow_engine-0.1.11 → pyworkflow_engine-0.1.13}/pyworkflow/cli/commands/schedules.py +0 -0
  106. {pyworkflow_engine-0.1.11 → pyworkflow_engine-0.1.13}/pyworkflow/cli/commands/setup.py +0 -0
  107. {pyworkflow_engine-0.1.11 → pyworkflow_engine-0.1.13}/pyworkflow/cli/commands/workflows.py +0 -0
  108. {pyworkflow_engine-0.1.11 → pyworkflow_engine-0.1.13}/pyworkflow/cli/output/__init__.py +0 -0
  109. {pyworkflow_engine-0.1.11 → pyworkflow_engine-0.1.13}/pyworkflow/cli/output/formatters.py +0 -0
  110. {pyworkflow_engine-0.1.11 → pyworkflow_engine-0.1.13}/pyworkflow/cli/output/styles.py +0 -0
  111. {pyworkflow_engine-0.1.11 → pyworkflow_engine-0.1.13}/pyworkflow/cli/utils/__init__.py +0 -0
  112. {pyworkflow_engine-0.1.11 → pyworkflow_engine-0.1.13}/pyworkflow/cli/utils/async_helpers.py +0 -0
  113. {pyworkflow_engine-0.1.11 → pyworkflow_engine-0.1.13}/pyworkflow/cli/utils/config.py +0 -0
  114. {pyworkflow_engine-0.1.11 → pyworkflow_engine-0.1.13}/pyworkflow/cli/utils/config_generator.py +0 -0
  115. {pyworkflow_engine-0.1.11 → pyworkflow_engine-0.1.13}/pyworkflow/cli/utils/discovery.py +0 -0
  116. {pyworkflow_engine-0.1.11 → pyworkflow_engine-0.1.13}/pyworkflow/cli/utils/docker_manager.py +0 -0
  117. {pyworkflow_engine-0.1.11 → pyworkflow_engine-0.1.13}/pyworkflow/cli/utils/interactive.py +0 -0
  118. {pyworkflow_engine-0.1.11 → pyworkflow_engine-0.1.13}/pyworkflow/cli/utils/storage.py +0 -0
  119. {pyworkflow_engine-0.1.11 → pyworkflow_engine-0.1.13}/pyworkflow/context/__init__.py +0 -0
  120. {pyworkflow_engine-0.1.11 → pyworkflow_engine-0.1.13}/pyworkflow/context/aws.py +0 -0
  121. {pyworkflow_engine-0.1.11 → pyworkflow_engine-0.1.13}/pyworkflow/context/mock.py +0 -0
  122. {pyworkflow_engine-0.1.11 → pyworkflow_engine-0.1.13}/pyworkflow/core/__init__.py +0 -0
  123. {pyworkflow_engine-0.1.11 → pyworkflow_engine-0.1.13}/pyworkflow/core/exceptions.py +0 -0
  124. {pyworkflow_engine-0.1.11 → pyworkflow_engine-0.1.13}/pyworkflow/core/registry.py +0 -0
  125. {pyworkflow_engine-0.1.11 → pyworkflow_engine-0.1.13}/pyworkflow/core/scheduled.py +0 -0
  126. {pyworkflow_engine-0.1.11 → pyworkflow_engine-0.1.13}/pyworkflow/core/workflow.py +0 -0
  127. {pyworkflow_engine-0.1.11 → pyworkflow_engine-0.1.13}/pyworkflow/discovery.py +0 -0
  128. {pyworkflow_engine-0.1.11 → pyworkflow_engine-0.1.13}/pyworkflow/engine/__init__.py +0 -0
  129. {pyworkflow_engine-0.1.11 → pyworkflow_engine-0.1.13}/pyworkflow/observability/__init__.py +0 -0
  130. {pyworkflow_engine-0.1.11 → pyworkflow_engine-0.1.13}/pyworkflow/primitives/__init__.py +0 -0
  131. {pyworkflow_engine-0.1.11 → pyworkflow_engine-0.1.13}/pyworkflow/primitives/child_handle.py +0 -0
  132. {pyworkflow_engine-0.1.11 → pyworkflow_engine-0.1.13}/pyworkflow/primitives/child_workflow.py +0 -0
  133. {pyworkflow_engine-0.1.11 → pyworkflow_engine-0.1.13}/pyworkflow/primitives/continue_as_new.py +0 -0
  134. {pyworkflow_engine-0.1.11 → pyworkflow_engine-0.1.13}/pyworkflow/primitives/define_hook.py +0 -0
  135. {pyworkflow_engine-0.1.11 → pyworkflow_engine-0.1.13}/pyworkflow/primitives/hooks.py +0 -0
  136. {pyworkflow_engine-0.1.11 → pyworkflow_engine-0.1.13}/pyworkflow/primitives/resume_hook.py +0 -0
  137. {pyworkflow_engine-0.1.11 → pyworkflow_engine-0.1.13}/pyworkflow/primitives/schedule.py +0 -0
  138. {pyworkflow_engine-0.1.11 → pyworkflow_engine-0.1.13}/pyworkflow/primitives/shield.py +0 -0
  139. {pyworkflow_engine-0.1.11 → pyworkflow_engine-0.1.13}/pyworkflow/primitives/sleep.py +0 -0
  140. {pyworkflow_engine-0.1.11 → pyworkflow_engine-0.1.13}/pyworkflow/runtime/__init__.py +0 -0
  141. {pyworkflow_engine-0.1.11 → pyworkflow_engine-0.1.13}/pyworkflow/runtime/base.py +0 -0
  142. {pyworkflow_engine-0.1.11 → pyworkflow_engine-0.1.13}/pyworkflow/runtime/factory.py +0 -0
  143. {pyworkflow_engine-0.1.11 → pyworkflow_engine-0.1.13}/pyworkflow/scheduler/__init__.py +0 -0
  144. {pyworkflow_engine-0.1.11 → pyworkflow_engine-0.1.13}/pyworkflow/scheduler/local.py +0 -0
  145. {pyworkflow_engine-0.1.11 → pyworkflow_engine-0.1.13}/pyworkflow/serialization/__init__.py +0 -0
  146. {pyworkflow_engine-0.1.11 → pyworkflow_engine-0.1.13}/pyworkflow/serialization/decoder.py +0 -0
  147. {pyworkflow_engine-0.1.11 → pyworkflow_engine-0.1.13}/pyworkflow/serialization/encoder.py +0 -0
  148. {pyworkflow_engine-0.1.11 → pyworkflow_engine-0.1.13}/pyworkflow/storage/__init__.py +0 -0
  149. {pyworkflow_engine-0.1.11 → pyworkflow_engine-0.1.13}/pyworkflow/storage/base.py +0 -0
  150. {pyworkflow_engine-0.1.11 → pyworkflow_engine-0.1.13}/pyworkflow/storage/cassandra.py +0 -0
  151. {pyworkflow_engine-0.1.11 → pyworkflow_engine-0.1.13}/pyworkflow/storage/dynamodb.py +0 -0
  152. {pyworkflow_engine-0.1.11 → pyworkflow_engine-0.1.13}/pyworkflow/storage/file.py +0 -0
  153. {pyworkflow_engine-0.1.11 → pyworkflow_engine-0.1.13}/pyworkflow/storage/memory.py +0 -0
  154. {pyworkflow_engine-0.1.11 → pyworkflow_engine-0.1.13}/pyworkflow/storage/mysql.py +0 -0
  155. {pyworkflow_engine-0.1.11 → pyworkflow_engine-0.1.13}/pyworkflow/storage/schemas.py +0 -0
  156. {pyworkflow_engine-0.1.11 → pyworkflow_engine-0.1.13}/pyworkflow/storage/sqlite.py +0 -0
  157. {pyworkflow_engine-0.1.11 → pyworkflow_engine-0.1.13}/pyworkflow/utils/__init__.py +0 -0
  158. {pyworkflow_engine-0.1.11 → pyworkflow_engine-0.1.13}/pyworkflow/utils/duration.py +0 -0
  159. {pyworkflow_engine-0.1.11 → pyworkflow_engine-0.1.13}/pyworkflow/utils/schedule.py +0 -0
  160. {pyworkflow_engine-0.1.11 → pyworkflow_engine-0.1.13}/setup.cfg +0 -0
  161. {pyworkflow_engine-0.1.11/tests/examples → pyworkflow_engine-0.1.13/tests/integration}/__init__.py +0 -0
  162. {pyworkflow_engine-0.1.11 → pyworkflow_engine-0.1.13}/tests/integration/test_cancellation.py +0 -0
  163. {pyworkflow_engine-0.1.11 → pyworkflow_engine-0.1.13}/tests/integration/test_cassandra_storage.py +0 -0
  164. {pyworkflow_engine-0.1.11 → pyworkflow_engine-0.1.13}/tests/integration/test_child_workflows.py +0 -0
  165. {pyworkflow_engine-0.1.11 → pyworkflow_engine-0.1.13}/tests/integration/test_continue_as_new.py +0 -0
  166. {pyworkflow_engine-0.1.11 → pyworkflow_engine-0.1.13}/tests/integration/test_dynamodb_storage.py +0 -0
  167. {pyworkflow_engine-0.1.11 → pyworkflow_engine-0.1.13}/tests/integration/test_fault_tolerance.py +0 -0
  168. {pyworkflow_engine-0.1.11 → pyworkflow_engine-0.1.13}/tests/integration/test_schedule_storage.py +0 -0
  169. {pyworkflow_engine-0.1.11/tests/integration → pyworkflow_engine-0.1.13/tests/unit}/__init__.py +0 -0
  170. {pyworkflow_engine-0.1.11 → pyworkflow_engine-0.1.13}/tests/unit/backends/__init__.py +0 -0
  171. {pyworkflow_engine-0.1.11 → pyworkflow_engine-0.1.13}/tests/unit/backends/test_cassandra_storage.py +0 -0
  172. {pyworkflow_engine-0.1.11 → pyworkflow_engine-0.1.13}/tests/unit/backends/test_dynamodb_storage.py +0 -0
  173. {pyworkflow_engine-0.1.11 → pyworkflow_engine-0.1.13}/tests/unit/backends/test_postgres_storage.py +0 -0
  174. {pyworkflow_engine-0.1.11 → pyworkflow_engine-0.1.13}/tests/unit/backends/test_sqlite_storage.py +0 -0
  175. {pyworkflow_engine-0.1.11 → pyworkflow_engine-0.1.13}/tests/unit/conftest.py +0 -0
  176. {pyworkflow_engine-0.1.11 → pyworkflow_engine-0.1.13}/tests/unit/test_cancellation.py +0 -0
  177. {pyworkflow_engine-0.1.11 → pyworkflow_engine-0.1.13}/tests/unit/test_child_workflows.py +0 -0
  178. {pyworkflow_engine-0.1.11 → pyworkflow_engine-0.1.13}/tests/unit/test_continue_as_new.py +0 -0
  179. {pyworkflow_engine-0.1.11 → pyworkflow_engine-0.1.13}/tests/unit/test_event_limits.py +0 -0
  180. {pyworkflow_engine-0.1.11 → pyworkflow_engine-0.1.13}/tests/unit/test_executor.py +0 -0
  181. {pyworkflow_engine-0.1.11 → pyworkflow_engine-0.1.13}/tests/unit/test_fault_tolerance.py +0 -0
  182. {pyworkflow_engine-0.1.11 → pyworkflow_engine-0.1.13}/tests/unit/test_hooks.py +0 -0
  183. {pyworkflow_engine-0.1.11 → pyworkflow_engine-0.1.13}/tests/unit/test_registry.py +0 -0
  184. {pyworkflow_engine-0.1.11 → pyworkflow_engine-0.1.13}/tests/unit/test_replay.py +0 -0
  185. {pyworkflow_engine-0.1.11 → pyworkflow_engine-0.1.13}/tests/unit/test_schedule_schemas.py +0 -0
  186. {pyworkflow_engine-0.1.11 → pyworkflow_engine-0.1.13}/tests/unit/test_schedule_utils.py +0 -0
  187. {pyworkflow_engine-0.1.11 → pyworkflow_engine-0.1.13}/tests/unit/test_scheduled_workflow.py +0 -0
  188. {pyworkflow_engine-0.1.11 → pyworkflow_engine-0.1.13}/tests/unit/test_step.py +0 -0
  189. {pyworkflow_engine-0.1.11 → pyworkflow_engine-0.1.13}/tests/unit/test_step_context.py +0 -0
  190. {pyworkflow_engine-0.1.11 → pyworkflow_engine-0.1.13}/tests/unit/test_workflow.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: pyworkflow-engine
3
- Version: 0.1.11
3
+ Version: 0.1.13
4
4
  Summary: A Python implementation of durable, event-sourced workflows inspired by Vercel Workflow
5
5
  Author: PyWorkflow Contributors
6
6
  License: MIT
@@ -129,7 +129,7 @@ async def my_workflow(user_id: str):
129
129
  ```
130
130
 
131
131
  <Note>
132
- `set_step_context()` is an async function because it persists the context to storage and records a `CONTEXT_UPDATED` event.
132
+ `set_step_context()` is an async function because it persists the context to storage.
133
133
  </Note>
134
134
 
135
135
  ### Reading Context (Workflow and Steps)
@@ -201,14 +201,12 @@ async def process_order(order_id: str):
201
201
 
202
202
  ## Context Persistence and Replay
203
203
 
204
- Step Context is event-sourced for durability:
204
+ Step Context is persisted for durability:
205
205
 
206
- 1. **Persistence**: When you call `set_step_context()`, the context is:
207
- - Stored in the `WorkflowRun.context` field
208
- - Recorded as a `CONTEXT_UPDATED` event
206
+ 1. **Persistence**: When you call `set_step_context()`, the context is stored in the `WorkflowRun.context` field.
209
207
 
210
208
  2. **Replay**: When a workflow resumes after suspension:
211
- - Context is restored from the event log
209
+ - Context is restored from `WorkflowRun.context`
212
210
  - Steps receive the same context they had during original execution
213
211
 
214
212
  ```python
@@ -20,7 +20,6 @@ from pyworkflow import (
20
20
  StepContext,
21
21
  configure,
22
22
  get_step_context,
23
- get_workflow_events,
24
23
  get_workflow_run,
25
24
  has_step_context,
26
25
  reset_config,
@@ -190,11 +189,6 @@ async def main():
190
189
  # Check stored context
191
190
  print(f"Stored context: {run.context}")
192
191
 
193
- # Inspect events for CONTEXT_UPDATED
194
- events = await get_workflow_events(run_id)
195
- context_events = [e for e in events if e.type.value == "context.updated"]
196
- print(f"Context update events: {len(context_events)}")
197
-
198
192
  # Example 2: Context with sleep (persistence test)
199
193
  print("\n--- Example 2: Context Persistence Across Sleep ---")
200
194
  run_id_2 = await start(
@@ -7,7 +7,7 @@ packages = [{include = "pyworkflow"}]
7
7
 
8
8
  [project]
9
9
  name = "pyworkflow-engine"
10
- version = "0.1.11"
10
+ version = "0.1.13"
11
11
  description = "A Python implementation of durable, event-sourced workflows inspired by Vercel Workflow"
12
12
  readme = "README.md"
13
13
  requires-python = ">=3.11"
@@ -29,7 +29,7 @@ Quick Start:
29
29
  >>> run_id = await start(my_workflow, "Alice")
30
30
  """
31
31
 
32
- __version__ = "0.1.11"
32
+ __version__ = "0.1.13"
33
33
 
34
34
  # Configuration
35
35
  from pyworkflow.config import (
@@ -15,10 +15,28 @@ garbage collector and Celery's saferepr module. It does not affect functionality
15
15
  import os
16
16
 
17
17
  from celery import Celery
18
+ from celery.signals import worker_init, worker_process_init, worker_shutdown
18
19
  from kombu import Exchange, Queue
19
20
 
20
21
  from pyworkflow.observability.logging import configure_logging
21
22
 
23
+ # Track if logging has been configured in this process
24
+ _logging_configured = False
25
+
26
+
27
+ def _configure_worker_logging() -> None:
28
+ """Configure logging for the current worker process."""
29
+ global _logging_configured
30
+ if not _logging_configured:
31
+ from loguru import logger as loguru_logger
32
+
33
+ # Enable pyworkflow logging (may have been disabled by CLI)
34
+ loguru_logger.enable("pyworkflow")
35
+
36
+ log_level = os.getenv("PYWORKFLOW_LOG_LEVEL", "INFO").upper()
37
+ configure_logging(level=log_level)
38
+ _logging_configured = True
39
+
22
40
 
23
41
  def discover_workflows(modules: list[str] | None = None) -> None:
24
42
  """
@@ -118,6 +136,14 @@ def create_celery_app(
118
136
  accept_content=["json"],
119
137
  timezone="UTC",
120
138
  enable_utc=True,
139
+ # Broker transport options - prevent task redelivery
140
+ # See: https://github.com/celery/celery/issues/5935
141
+ broker_transport_options={
142
+ "visibility_timeout": 3600, # 12 hours - prevent Redis from re-queueing tasks
143
+ },
144
+ result_backend_transport_options={
145
+ "visibility_timeout": 3600,
146
+ },
121
147
  # Task routing
122
148
  task_default_queue="pyworkflow.default",
123
149
  task_default_exchange="pyworkflow",
@@ -154,7 +180,7 @@ def create_celery_app(
154
180
  task_reject_on_worker_lost=True,
155
181
  worker_prefetch_multiplier=1, # Fair task distribution
156
182
  # Retry settings
157
- task_autoretry_for=(Exception,),
183
+ task_autoretry_for=(),
158
184
  task_retry_backoff=True,
159
185
  task_retry_backoff_max=600, # 10 minutes max
160
186
  task_retry_jitter=True,
@@ -168,8 +194,19 @@ def create_celery_app(
168
194
  worker_task_log_format="[%(asctime)s: %(levelname)s/%(processName)s] [%(task_name)s(%(task_id)s)] %(message)s",
169
195
  )
170
196
 
171
- # Configure logging
172
- configure_logging(level="INFO")
197
+ # Configure singleton locking for Redis brokers
198
+ # This enables distributed locking to prevent duplicate task execution
199
+ is_redis_broker = broker_url.startswith("redis://") or broker_url.startswith("rediss://")
200
+ if is_redis_broker:
201
+ app.conf.update(
202
+ singleton_backend_url=broker_url,
203
+ singleton_key_prefix="pyworkflow:lock:",
204
+ singleton_lock_expiry=3600, # 1 hour TTL (safety net)
205
+ )
206
+
207
+ # Note: Logging is configured via Celery signals (worker_init, worker_process_init)
208
+ # to ensure proper initialization AFTER process forking.
209
+ # See on_worker_init() and on_worker_process_init() below.
173
210
 
174
211
  # Auto-discover workflows from environment variable or configured modules
175
212
  discover_workflows()
@@ -182,6 +219,63 @@ def create_celery_app(
182
219
  celery_app = create_celery_app()
183
220
 
184
221
 
222
+ # ========== Celery Worker Signals ==========
223
+ # These signals ensure proper initialization in forked worker processes
224
+
225
+
226
+ @worker_init.connect
227
+ def on_worker_init(**kwargs):
228
+ """
229
+ Called when the main worker process starts (before forking).
230
+
231
+ For prefork pool, this runs in the parent process.
232
+ For solo/threads pool, this is the main initialization point.
233
+ """
234
+ _configure_worker_logging()
235
+
236
+
237
+ @worker_process_init.connect
238
+ def on_worker_process_init(**kwargs):
239
+ """
240
+ Called when a worker child process is initialized (after forking).
241
+
242
+ This is critical for prefork pool:
243
+ - loguru's background thread doesn't survive fork()
244
+ - We need a persistent event loop for connection pool reuse
245
+ """
246
+ _configure_worker_logging()
247
+
248
+ # Initialize persistent event loop for this worker
249
+ from pyworkflow.celery.loop import init_worker_loop
250
+
251
+ init_worker_loop()
252
+
253
+
254
+ @worker_shutdown.connect
255
+ def on_worker_shutdown(**kwargs):
256
+ """
257
+ Called when the worker is shutting down.
258
+
259
+ Cleans up:
260
+ - Storage backend connections (PostgreSQL connection pools, etc.)
261
+ - The persistent event loop
262
+ """
263
+ from loguru import logger
264
+
265
+ from pyworkflow.celery.loop import close_worker_loop, run_async
266
+ from pyworkflow.storage.config import disconnect_all_cached
267
+
268
+ try:
269
+ # Clean up storage connections using the persistent loop
270
+ run_async(disconnect_all_cached())
271
+ except Exception as e:
272
+ # Log but don't fail shutdown
273
+ logger.warning(f"Error during storage cleanup on shutdown: {e}")
274
+ finally:
275
+ # Close the persistent event loop
276
+ close_worker_loop()
277
+
278
+
185
279
  def get_celery_app() -> Celery:
186
280
  """
187
281
  Get the global Celery application instance.
@@ -0,0 +1,108 @@
1
+ """
2
+ Persistent event loop management for Celery workers.
3
+
4
+ This module provides a single, persistent event loop per worker process.
5
+ Using a persistent loop allows asyncpg connection pools to be reused across
6
+ tasks, avoiding the overhead of creating/destroying pools for each task.
7
+
8
+ Usage:
9
+ from pyworkflow.celery.loop import run_async
10
+
11
+ # Instead of: result = asyncio.run(some_coroutine())
12
+ # Use: result = run_async(some_coroutine())
13
+ """
14
+
15
+ import asyncio
16
+ import threading
17
+ from collections.abc import Coroutine
18
+ from typing import Any, TypeVar
19
+
20
+ T = TypeVar("T")
21
+
22
+ # Per-worker persistent event loop
23
+ # Created in worker_process_init, closed in worker_shutdown
24
+ _worker_loop: asyncio.AbstractEventLoop | None = None
25
+ _loop_lock = threading.Lock()
26
+
27
+
28
+ def init_worker_loop() -> None:
29
+ """
30
+ Initialize the persistent event loop for this worker process.
31
+
32
+ Called from worker_process_init signal handler.
33
+ """
34
+ global _worker_loop
35
+
36
+ with _loop_lock:
37
+ if _worker_loop is None or _worker_loop.is_closed():
38
+ _worker_loop = asyncio.new_event_loop()
39
+ asyncio.set_event_loop(_worker_loop)
40
+
41
+
42
+ def close_worker_loop() -> None:
43
+ """
44
+ Close the persistent event loop for this worker process.
45
+
46
+ Called from worker_shutdown signal handler.
47
+ """
48
+ global _worker_loop
49
+
50
+ with _loop_lock:
51
+ if _worker_loop is not None and not _worker_loop.is_closed():
52
+ try:
53
+ # Run any pending cleanup
54
+ _worker_loop.run_until_complete(_worker_loop.shutdown_asyncgens())
55
+ except Exception:
56
+ pass
57
+ finally:
58
+ _worker_loop.close()
59
+ _worker_loop = None
60
+
61
+
62
+ def get_worker_loop() -> asyncio.AbstractEventLoop:
63
+ """
64
+ Get the persistent event loop for this worker process.
65
+
66
+ If no loop exists (e.g., running outside Celery worker), creates one.
67
+
68
+ Returns:
69
+ The worker's event loop
70
+ """
71
+ global _worker_loop
72
+
73
+ with _loop_lock:
74
+ if _worker_loop is None or _worker_loop.is_closed():
75
+ # Not in a Celery worker or loop was closed - create a new one
76
+ _worker_loop = asyncio.new_event_loop()
77
+ asyncio.set_event_loop(_worker_loop)
78
+ return _worker_loop
79
+
80
+
81
+ def run_async(coro: Coroutine[Any, Any, T]) -> T:
82
+ """
83
+ Run a coroutine on the persistent worker event loop.
84
+
85
+ This is a drop-in replacement for asyncio.run() that reuses
86
+ the same event loop across tasks, allowing connection pools
87
+ to be shared.
88
+
89
+ Args:
90
+ coro: The coroutine to run
91
+
92
+ Returns:
93
+ The result of the coroutine
94
+
95
+ Example:
96
+ # Instead of:
97
+ result = asyncio.run(storage.get_run(run_id))
98
+
99
+ # Use:
100
+ result = run_async(storage.get_run(run_id))
101
+ """
102
+ loop = get_worker_loop()
103
+ return loop.run_until_complete(coro)
104
+
105
+
106
+ def is_loop_running() -> bool:
107
+ """Check if the worker loop exists and is not closed."""
108
+ return _worker_loop is not None and not _worker_loop.is_closed()
@@ -0,0 +1,368 @@
1
+ """
2
+ Singleton task implementation for PyWorkflow.
3
+
4
+ Provides Redis-based distributed locking to prevent duplicate task execution.
5
+ Self-contained implementation (no external dependencies beyond redis).
6
+
7
+ Based on:
8
+ - steinitzu/celery-singleton library concepts
9
+ - FlowHunt's battle-tested refinements for retry-safe lock management
10
+ """
11
+
12
+ import inspect
13
+ import json
14
+ from hashlib import md5
15
+ from typing import Any
16
+ from uuid import uuid4
17
+
18
+ from celery import Task
19
+ from celery.exceptions import WorkerLostError
20
+ from loguru import logger
21
+
22
+
23
+ def generate_lock_key(
24
+ task_name: str,
25
+ task_args: list[Any] | tuple[Any, ...] | None = None,
26
+ task_kwargs: dict[str, Any] | None = None,
27
+ key_prefix: str = "pyworkflow:lock:",
28
+ ) -> str:
29
+ """
30
+ Generate a unique lock key for a task based on its name and arguments.
31
+
32
+ Uses MD5 hash to keep key length reasonable while ensuring uniqueness.
33
+ """
34
+ str_args = json.dumps(task_args or [], sort_keys=True, default=str)
35
+ str_kwargs = json.dumps(task_kwargs or {}, sort_keys=True, default=str)
36
+ task_hash = md5((task_name + str_args + str_kwargs).encode()).hexdigest()
37
+ return key_prefix + task_hash
38
+
39
+
40
+ class SingletonConfig:
41
+ """Configuration for singleton task behavior."""
42
+
43
+ def __init__(self, app: Any):
44
+ self.app = app
45
+
46
+ @property
47
+ def backend_url(self) -> str | None:
48
+ return self.app.conf.get("singleton_backend_url")
49
+
50
+ @property
51
+ def key_prefix(self) -> str:
52
+ return self.app.conf.get("singleton_key_prefix", "pyworkflow:lock:")
53
+
54
+ @property
55
+ def lock_expiry(self) -> int:
56
+ return self.app.conf.get("singleton_lock_expiry", 3600)
57
+
58
+ @property
59
+ def raise_on_duplicate(self) -> bool:
60
+ return self.app.conf.get("singleton_raise_on_duplicate", False)
61
+
62
+
63
+ class RedisLockBackend:
64
+ """Redis backend for distributed locking."""
65
+
66
+ def __init__(self, url: str):
67
+ import redis
68
+
69
+ self.redis = redis.from_url(url, decode_responses=True)
70
+
71
+ def lock(self, lock_key: str, task_id: str, expiry: int | None = None) -> bool:
72
+ """Acquire lock atomically. Returns True if acquired."""
73
+ return bool(self.redis.set(lock_key, task_id, nx=True, ex=expiry))
74
+
75
+ def unlock(self, lock_key: str) -> None:
76
+ """Release the lock."""
77
+ self.redis.delete(lock_key)
78
+
79
+ def get(self, lock_key: str) -> str | None:
80
+ """Get the task ID holding the lock."""
81
+ return self.redis.get(lock_key)
82
+
83
+
84
+ class DuplicateTaskError(Exception):
85
+ """Raised when attempting to queue a duplicate singleton task."""
86
+
87
+ def __init__(self, message: str, task_id: str):
88
+ self.task_id = task_id
89
+ super().__init__(message)
90
+
91
+
92
+ class SingletonWorkflowTask(Task):
93
+ """
94
+ Base class for singleton workflow tasks with distributed locking.
95
+
96
+ Features:
97
+ - Redis-based lock prevents duplicate execution
98
+ - Support for unique_on with nested dict/list access (e.g., "data.run_id")
99
+ - Retry-safe: locks NOT released on failure (prevents duplicate during retries)
100
+ - Lock released on success or when max retries exceeded
101
+ - Time-based lock expiry as safety net
102
+
103
+ Configuration:
104
+ unique_on: List of argument names to use for uniqueness (e.g., ["run_id", "step_id"])
105
+ Supports nested access with dot notation (e.g., ["data.run_id"])
106
+ raise_on_duplicate: If True, raise DuplicateTaskError instead of returning existing result
107
+ lock_expiry: Lock TTL in seconds (default: 3600 = 1 hour)
108
+
109
+ Example:
110
+ @celery_app.task(
111
+ base=SingletonWorkflowTask,
112
+ unique_on=["run_id", "step_id"],
113
+ )
114
+ def my_task(run_id: str, step_id: str, data: dict):
115
+ ...
116
+ """
117
+
118
+ abstract = True
119
+
120
+ # Singleton configuration (can be overridden per-task)
121
+ unique_on: list[str] | str | None = None
122
+ raise_on_duplicate: bool | None = None
123
+ lock_expiry: int | None = None
124
+
125
+ # Lock behavior
126
+ release_lock_on_success: bool = True
127
+ release_lock_on_failure: bool = False # Keep lock during retries
128
+
129
+ # Celery task settings
130
+ max_retries: int | None = None
131
+ acks_on_failure_or_timeout: bool = True
132
+
133
+ # Cached instances (class-level, shared across task instances)
134
+ _singleton_backend: RedisLockBackend | None = None
135
+ _singleton_config: SingletonConfig | None = None
136
+
137
+ @property
138
+ def singleton_config(self) -> SingletonConfig:
139
+ if self._singleton_config is None:
140
+ self._singleton_config = SingletonConfig(self.app)
141
+ return self._singleton_config
142
+
143
+ @property
144
+ def singleton_backend(self) -> RedisLockBackend | None:
145
+ if self._singleton_backend is None:
146
+ url = self.singleton_config.backend_url
147
+ if not url:
148
+ # Try broker URL if it's Redis
149
+ broker = self.app.conf.broker_url or ""
150
+ if broker.startswith("redis://") or broker.startswith("rediss://"):
151
+ url = broker
152
+ if url:
153
+ self._singleton_backend = RedisLockBackend(url)
154
+ return self._singleton_backend
155
+
156
+ @property
157
+ def _lock_expiry(self) -> int:
158
+ if self.lock_expiry is not None:
159
+ return self.lock_expiry
160
+ return self.singleton_config.lock_expiry
161
+
162
+ @property
163
+ def _raise_on_duplicate(self) -> bool:
164
+ if self.raise_on_duplicate is not None:
165
+ return self.raise_on_duplicate
166
+ return self.singleton_config.raise_on_duplicate
167
+
168
+ def generate_lock(
169
+ self,
170
+ task_name: str,
171
+ task_args: list[Any] | tuple[Any, ...] | None = None,
172
+ task_kwargs: dict[str, Any] | None = None,
173
+ ) -> str:
174
+ """Generate lock key, supporting nested attribute access via unique_on."""
175
+ unique_on = self.unique_on
176
+ task_args = task_args or []
177
+ task_kwargs = task_kwargs or {}
178
+
179
+ if unique_on:
180
+ if isinstance(unique_on, str):
181
+ unique_on = [unique_on]
182
+
183
+ # Bind arguments to function signature
184
+ sig = inspect.signature(self.run)
185
+ bound = sig.bind(*task_args, **task_kwargs).arguments
186
+
187
+ unique_args: list[Any] = []
188
+ for key in unique_on:
189
+ keys = key.split(".")
190
+ if keys[0] not in bound:
191
+ raise ValueError(f"Key '{keys[0]}' not found in task arguments")
192
+
193
+ value = bound[keys[0]]
194
+ # Navigate nested structure (supports one level of nesting)
195
+ if len(keys) == 2:
196
+ nested_key = keys[1]
197
+ if isinstance(value, dict):
198
+ if nested_key not in value:
199
+ raise ValueError(f"Key '{nested_key}' not found in dict")
200
+ unique_args.append(value[nested_key])
201
+ elif isinstance(value, (list, tuple)):
202
+ unique_args.append(value[int(nested_key)])
203
+ elif hasattr(value, nested_key):
204
+ unique_args.append(getattr(value, nested_key))
205
+ else:
206
+ raise ValueError(f"Key '{key}' has unsupported type")
207
+ elif len(keys) == 1:
208
+ unique_args.append(value)
209
+ else:
210
+ raise ValueError(f"Key '{key}' has too many levels (max 2)")
211
+
212
+ return generate_lock_key(
213
+ task_name,
214
+ unique_args,
215
+ {},
216
+ key_prefix=self.singleton_config.key_prefix,
217
+ )
218
+ else:
219
+ return generate_lock_key(
220
+ task_name,
221
+ list(task_args),
222
+ task_kwargs,
223
+ key_prefix=self.singleton_config.key_prefix,
224
+ )
225
+
226
+ def acquire_lock(self, lock_key: str, task_id: str) -> bool:
227
+ """Attempt to acquire lock. Returns True if successful."""
228
+ backend = self.singleton_backend
229
+ if backend is None:
230
+ return True # No Redis = no locking
231
+ return backend.lock(lock_key, task_id, expiry=self._lock_expiry)
232
+
233
+ def release_lock(
234
+ self,
235
+ task_args: list[Any] | tuple[Any, ...] | None = None,
236
+ task_kwargs: dict[str, Any] | None = None,
237
+ ) -> None:
238
+ """Release the lock for this task."""
239
+ backend = self.singleton_backend
240
+ if backend is None:
241
+ return
242
+ lock_key = self.generate_lock(self.name, task_args, task_kwargs)
243
+ backend.unlock(lock_key)
244
+
245
+ def get_existing_task_id(self, lock_key: str) -> str | None:
246
+ """Get task ID holding the lock, if any."""
247
+ backend = self.singleton_backend
248
+ if backend is None:
249
+ return None
250
+ return backend.get(lock_key)
251
+
252
+ def apply_async(
253
+ self,
254
+ args: list[Any] | tuple[Any, ...] | None = None,
255
+ kwargs: dict[str, Any] | None = None,
256
+ task_id: str | None = None,
257
+ **options: Any,
258
+ ) -> Any:
259
+ """Override apply_async to implement singleton behavior."""
260
+ args = args or []
261
+ kwargs = kwargs or {}
262
+ task_id = task_id or str(uuid4())
263
+
264
+ backend = self.singleton_backend
265
+ if backend is None:
266
+ # No Redis = normal behavior
267
+ return super().apply_async(args, kwargs, task_id=task_id, **options)
268
+
269
+ lock_key = self.generate_lock(self.name, args, kwargs)
270
+
271
+ # Try to acquire lock and run
272
+ if self.acquire_lock(lock_key, task_id):
273
+ try:
274
+ return super().apply_async(args, kwargs, task_id=task_id, **options)
275
+ except Exception:
276
+ # Release lock if apply_async fails
277
+ backend.unlock(lock_key)
278
+ raise
279
+
280
+ # Lock not acquired - check for existing task
281
+ existing_task_id = self.get_existing_task_id(lock_key)
282
+ if existing_task_id:
283
+ logger.debug(
284
+ "Singleton: duplicate task blocked",
285
+ task=self.name,
286
+ existing_task_id=existing_task_id,
287
+ )
288
+ if self._raise_on_duplicate:
289
+ raise DuplicateTaskError(f"Duplicate of task {existing_task_id}", existing_task_id)
290
+ return self.AsyncResult(existing_task_id)
291
+
292
+ # Race condition: lock disappeared, retry
293
+ if self.acquire_lock(lock_key, task_id):
294
+ try:
295
+ return super().apply_async(args, kwargs, task_id=task_id, **options)
296
+ except Exception:
297
+ backend.unlock(lock_key)
298
+ raise
299
+
300
+ # Still can't acquire - return existing or submit anyway
301
+ existing_task_id = self.get_existing_task_id(lock_key)
302
+ if existing_task_id:
303
+ return self.AsyncResult(existing_task_id)
304
+
305
+ # Fallback: submit anyway (rare edge case)
306
+ logger.warning(f"Singleton lock unstable, submitting anyway: {self.name}")
307
+ return super().apply_async(args, kwargs, task_id=task_id, **options)
308
+
309
+ def on_success(
310
+ self, retval: Any, task_id: str, args: tuple[Any, ...], kwargs: dict[str, Any]
311
+ ) -> None:
312
+ """Release lock on successful task completion."""
313
+ if self.release_lock_on_success:
314
+ self.release_lock(task_args=args, task_kwargs=kwargs)
315
+
316
+ def on_failure(
317
+ self,
318
+ exc: Exception,
319
+ task_id: str,
320
+ args: tuple[Any, ...],
321
+ kwargs: dict[str, Any],
322
+ einfo: Any,
323
+ ) -> None:
324
+ """
325
+ Retry-aware lock management on failure.
326
+
327
+ - If task will retry: Keep lock
328
+ - If max retries exceeded: Release lock
329
+ """
330
+ max_retries_exceeded = False
331
+ if hasattr(self, "request") and self.request:
332
+ current_retries = getattr(self.request, "retries", 0)
333
+ max_retries = self.max_retries if self.max_retries is not None else 3
334
+ max_retries_exceeded = current_retries >= max_retries
335
+
336
+ if self.release_lock_on_failure or max_retries_exceeded:
337
+ self.release_lock(task_args=args, task_kwargs=kwargs)
338
+ if max_retries_exceeded:
339
+ logger.warning(
340
+ f"Task {self.name} failed after {current_retries} retries. Lock released.",
341
+ task_id=task_id,
342
+ error=str(exc),
343
+ )
344
+
345
+ # Log appropriately
346
+ if isinstance(exc, WorkerLostError):
347
+ logger.warning("Task interrupted due to worker loss", task_id=task_id)
348
+ else:
349
+ logger.error(
350
+ f"Task {self.name} failed: {exc}",
351
+ task_id=task_id,
352
+ traceback=einfo.traceback if einfo else None,
353
+ )
354
+
355
+ def on_retry(
356
+ self,
357
+ exc: Exception,
358
+ task_id: str,
359
+ args: tuple[Any, ...],
360
+ kwargs: dict[str, Any],
361
+ einfo: Any,
362
+ ) -> None:
363
+ """Lock is retained during retry."""
364
+ logger.warning(
365
+ f"Task {self.name} retrying (lock retained)",
366
+ task_id=task_id,
367
+ retry_count=self.request.retries,
368
+ )