pyworkflow-engine 0.1.13__tar.gz → 0.1.15__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {pyworkflow_engine-0.1.13 → pyworkflow_engine-0.1.15}/PKG-INFO +1 -1
- {pyworkflow_engine-0.1.13 → pyworkflow_engine-0.1.15}/pyproject.toml +1 -1
- {pyworkflow_engine-0.1.13 → pyworkflow_engine-0.1.15}/pyworkflow/__init__.py +1 -1
- {pyworkflow_engine-0.1.13 → pyworkflow_engine-0.1.15}/pyworkflow/celery/singleton.py +6 -4
- {pyworkflow_engine-0.1.13 → pyworkflow_engine-0.1.15}/pyworkflow/celery/tasks.py +93 -2
- {pyworkflow_engine-0.1.13 → pyworkflow_engine-0.1.15}/pyworkflow/context/local.py +46 -0
- {pyworkflow_engine-0.1.13 → pyworkflow_engine-0.1.15}/pyworkflow/primitives/resume_hook.py +2 -1
- {pyworkflow_engine-0.1.13 → pyworkflow_engine-0.1.15}/pyworkflow/runtime/base.py +4 -0
- {pyworkflow_engine-0.1.13 → pyworkflow_engine-0.1.15}/pyworkflow/runtime/celery.py +12 -1
- {pyworkflow_engine-0.1.13 → pyworkflow_engine-0.1.15}/pyworkflow/runtime/local.py +8 -0
- {pyworkflow_engine-0.1.13 → pyworkflow_engine-0.1.15}/pyworkflow/storage/base.py +4 -1
- {pyworkflow_engine-0.1.13 → pyworkflow_engine-0.1.15}/pyworkflow/storage/cassandra.py +30 -25
- {pyworkflow_engine-0.1.13 → pyworkflow_engine-0.1.15}/pyworkflow/storage/dynamodb.py +32 -16
- {pyworkflow_engine-0.1.13 → pyworkflow_engine-0.1.15}/pyworkflow/storage/file.py +39 -13
- {pyworkflow_engine-0.1.13 → pyworkflow_engine-0.1.15}/pyworkflow/storage/memory.py +28 -11
- {pyworkflow_engine-0.1.13 → pyworkflow_engine-0.1.15}/pyworkflow/storage/mysql.py +27 -11
- {pyworkflow_engine-0.1.13 → pyworkflow_engine-0.1.15}/pyworkflow/storage/postgres.py +29 -12
- {pyworkflow_engine-0.1.13 → pyworkflow_engine-0.1.15}/pyworkflow/storage/sqlite.py +29 -12
- {pyworkflow_engine-0.1.13 → pyworkflow_engine-0.1.15}/tests/integration/test_singleton.py +44 -3
- {pyworkflow_engine-0.1.13 → pyworkflow_engine-0.1.15}/CLAUDE.md +0 -0
- {pyworkflow_engine-0.1.13 → pyworkflow_engine-0.1.15}/DISTRIBUTED.md +0 -0
- {pyworkflow_engine-0.1.13 → pyworkflow_engine-0.1.15}/LICENSE +0 -0
- {pyworkflow_engine-0.1.13 → pyworkflow_engine-0.1.15}/MANIFEST.in +0 -0
- {pyworkflow_engine-0.1.13 → pyworkflow_engine-0.1.15}/README.md +0 -0
- {pyworkflow_engine-0.1.13 → pyworkflow_engine-0.1.15}/RELEASING.md +0 -0
- {pyworkflow_engine-0.1.13 → pyworkflow_engine-0.1.15}/docs/concepts/cancellation.mdx +0 -0
- {pyworkflow_engine-0.1.13 → pyworkflow_engine-0.1.15}/docs/concepts/continue-as-new.mdx +0 -0
- {pyworkflow_engine-0.1.13 → pyworkflow_engine-0.1.15}/docs/concepts/events.mdx +0 -0
- {pyworkflow_engine-0.1.13 → pyworkflow_engine-0.1.15}/docs/concepts/fault-tolerance.mdx +0 -0
- {pyworkflow_engine-0.1.13 → pyworkflow_engine-0.1.15}/docs/concepts/hooks.mdx +0 -0
- {pyworkflow_engine-0.1.13 → pyworkflow_engine-0.1.15}/docs/concepts/limitations.mdx +0 -0
- {pyworkflow_engine-0.1.13 → pyworkflow_engine-0.1.15}/docs/concepts/schedules.mdx +0 -0
- {pyworkflow_engine-0.1.13 → pyworkflow_engine-0.1.15}/docs/concepts/sleep.mdx +0 -0
- {pyworkflow_engine-0.1.13 → pyworkflow_engine-0.1.15}/docs/concepts/step-context.mdx +0 -0
- {pyworkflow_engine-0.1.13 → pyworkflow_engine-0.1.15}/docs/concepts/steps.mdx +0 -0
- {pyworkflow_engine-0.1.13 → pyworkflow_engine-0.1.15}/docs/concepts/workflows.mdx +0 -0
- {pyworkflow_engine-0.1.13 → pyworkflow_engine-0.1.15}/docs/guides/cli.mdx +0 -0
- {pyworkflow_engine-0.1.13 → pyworkflow_engine-0.1.15}/docs/guides/configuration.mdx +0 -0
- {pyworkflow_engine-0.1.13 → pyworkflow_engine-0.1.15}/docs/introduction.mdx +0 -0
- {pyworkflow_engine-0.1.13 → pyworkflow_engine-0.1.15}/docs/quickstart.mdx +0 -0
- {pyworkflow_engine-0.1.13 → pyworkflow_engine-0.1.15}/examples/__init__.py +0 -0
- {pyworkflow_engine-0.1.13 → pyworkflow_engine-0.1.15}/examples/celery/__init__.py +0 -0
- {pyworkflow_engine-0.1.13 → pyworkflow_engine-0.1.15}/examples/celery/durable/docker-compose.yml +0 -0
- {pyworkflow_engine-0.1.13 → pyworkflow_engine-0.1.15}/examples/celery/durable/pyworkflow.config.yaml +0 -0
- {pyworkflow_engine-0.1.13 → pyworkflow_engine-0.1.15}/examples/celery/durable/workflows/__init__.py +0 -0
- {pyworkflow_engine-0.1.13 → pyworkflow_engine-0.1.15}/examples/celery/durable/workflows/basic.py +0 -0
- {pyworkflow_engine-0.1.13 → pyworkflow_engine-0.1.15}/examples/celery/durable/workflows/batch_processing.py +0 -0
- {pyworkflow_engine-0.1.13 → pyworkflow_engine-0.1.15}/examples/celery/durable/workflows/cancellation.py +0 -0
- {pyworkflow_engine-0.1.13 → pyworkflow_engine-0.1.15}/examples/celery/durable/workflows/child_workflow_patterns.py +0 -0
- {pyworkflow_engine-0.1.13 → pyworkflow_engine-0.1.15}/examples/celery/durable/workflows/child_workflows.py +0 -0
- {pyworkflow_engine-0.1.13 → pyworkflow_engine-0.1.15}/examples/celery/durable/workflows/continue_as_new.py +0 -0
- {pyworkflow_engine-0.1.13 → pyworkflow_engine-0.1.15}/examples/celery/durable/workflows/fault_tolerance.py +0 -0
- {pyworkflow_engine-0.1.13 → pyworkflow_engine-0.1.15}/examples/celery/durable/workflows/hooks.py +0 -0
- {pyworkflow_engine-0.1.13 → pyworkflow_engine-0.1.15}/examples/celery/durable/workflows/idempotency.py +0 -0
- {pyworkflow_engine-0.1.13 → pyworkflow_engine-0.1.15}/examples/celery/durable/workflows/long_running.py +0 -0
- {pyworkflow_engine-0.1.13 → pyworkflow_engine-0.1.15}/examples/celery/durable/workflows/retries.py +0 -0
- {pyworkflow_engine-0.1.13 → pyworkflow_engine-0.1.15}/examples/celery/durable/workflows/schedules.py +0 -0
- {pyworkflow_engine-0.1.13 → pyworkflow_engine-0.1.15}/examples/celery/durable/workflows/step_context.py +0 -0
- {pyworkflow_engine-0.1.13 → pyworkflow_engine-0.1.15}/examples/celery/transient/01_basic_workflow.py +0 -0
- {pyworkflow_engine-0.1.13 → pyworkflow_engine-0.1.15}/examples/celery/transient/02_fault_tolerance.py +0 -0
- {pyworkflow_engine-0.1.13 → pyworkflow_engine-0.1.15}/examples/celery/transient/__init__.py +0 -0
- {pyworkflow_engine-0.1.13 → pyworkflow_engine-0.1.15}/examples/celery/transient/pyworkflow.config.yaml +0 -0
- {pyworkflow_engine-0.1.13 → pyworkflow_engine-0.1.15}/examples/local/__init__.py +0 -0
- {pyworkflow_engine-0.1.13 → pyworkflow_engine-0.1.15}/examples/local/durable/01_basic_workflow.py +0 -0
- {pyworkflow_engine-0.1.13 → pyworkflow_engine-0.1.15}/examples/local/durable/02_file_storage.py +0 -0
- {pyworkflow_engine-0.1.13 → pyworkflow_engine-0.1.15}/examples/local/durable/03_retries.py +0 -0
- {pyworkflow_engine-0.1.13 → pyworkflow_engine-0.1.15}/examples/local/durable/04_long_running.py +0 -0
- {pyworkflow_engine-0.1.13 → pyworkflow_engine-0.1.15}/examples/local/durable/05_event_log.py +0 -0
- {pyworkflow_engine-0.1.13 → pyworkflow_engine-0.1.15}/examples/local/durable/06_idempotency.py +0 -0
- {pyworkflow_engine-0.1.13 → pyworkflow_engine-0.1.15}/examples/local/durable/07_hooks.py +0 -0
- {pyworkflow_engine-0.1.13 → pyworkflow_engine-0.1.15}/examples/local/durable/08_cancellation.py +0 -0
- {pyworkflow_engine-0.1.13 → pyworkflow_engine-0.1.15}/examples/local/durable/09_child_workflows.py +0 -0
- {pyworkflow_engine-0.1.13 → pyworkflow_engine-0.1.15}/examples/local/durable/10_child_workflow_patterns.py +0 -0
- {pyworkflow_engine-0.1.13 → pyworkflow_engine-0.1.15}/examples/local/durable/11_continue_as_new.py +0 -0
- {pyworkflow_engine-0.1.13 → pyworkflow_engine-0.1.15}/examples/local/durable/12_schedules.py +0 -0
- {pyworkflow_engine-0.1.13 → pyworkflow_engine-0.1.15}/examples/local/durable/13_step_context.py +0 -0
- {pyworkflow_engine-0.1.13 → pyworkflow_engine-0.1.15}/examples/local/durable/__init__.py +0 -0
- {pyworkflow_engine-0.1.13 → pyworkflow_engine-0.1.15}/examples/local/transient/01_quick_tasks.py +0 -0
- {pyworkflow_engine-0.1.13 → pyworkflow_engine-0.1.15}/examples/local/transient/02_retries.py +0 -0
- {pyworkflow_engine-0.1.13 → pyworkflow_engine-0.1.15}/examples/local/transient/03_sleep.py +0 -0
- {pyworkflow_engine-0.1.13 → pyworkflow_engine-0.1.15}/examples/local/transient/__init__.py +0 -0
- {pyworkflow_engine-0.1.13 → pyworkflow_engine-0.1.15}/pyworkflow/aws/__init__.py +0 -0
- {pyworkflow_engine-0.1.13 → pyworkflow_engine-0.1.15}/pyworkflow/aws/context.py +0 -0
- {pyworkflow_engine-0.1.13 → pyworkflow_engine-0.1.15}/pyworkflow/aws/handler.py +0 -0
- {pyworkflow_engine-0.1.13 → pyworkflow_engine-0.1.15}/pyworkflow/aws/testing.py +0 -0
- {pyworkflow_engine-0.1.13 → pyworkflow_engine-0.1.15}/pyworkflow/celery/__init__.py +0 -0
- {pyworkflow_engine-0.1.13 → pyworkflow_engine-0.1.15}/pyworkflow/celery/app.py +0 -0
- {pyworkflow_engine-0.1.13 → pyworkflow_engine-0.1.15}/pyworkflow/celery/loop.py +0 -0
- {pyworkflow_engine-0.1.13 → pyworkflow_engine-0.1.15}/pyworkflow/celery/scheduler.py +0 -0
- {pyworkflow_engine-0.1.13 → pyworkflow_engine-0.1.15}/pyworkflow/cli/__init__.py +0 -0
- {pyworkflow_engine-0.1.13 → pyworkflow_engine-0.1.15}/pyworkflow/cli/__main__.py +0 -0
- {pyworkflow_engine-0.1.13 → pyworkflow_engine-0.1.15}/pyworkflow/cli/commands/__init__.py +0 -0
- {pyworkflow_engine-0.1.13 → pyworkflow_engine-0.1.15}/pyworkflow/cli/commands/hooks.py +0 -0
- {pyworkflow_engine-0.1.13 → pyworkflow_engine-0.1.15}/pyworkflow/cli/commands/quickstart.py +0 -0
- {pyworkflow_engine-0.1.13 → pyworkflow_engine-0.1.15}/pyworkflow/cli/commands/runs.py +0 -0
- {pyworkflow_engine-0.1.13 → pyworkflow_engine-0.1.15}/pyworkflow/cli/commands/scheduler.py +0 -0
- {pyworkflow_engine-0.1.13 → pyworkflow_engine-0.1.15}/pyworkflow/cli/commands/schedules.py +0 -0
- {pyworkflow_engine-0.1.13 → pyworkflow_engine-0.1.15}/pyworkflow/cli/commands/setup.py +0 -0
- {pyworkflow_engine-0.1.13 → pyworkflow_engine-0.1.15}/pyworkflow/cli/commands/worker.py +0 -0
- {pyworkflow_engine-0.1.13 → pyworkflow_engine-0.1.15}/pyworkflow/cli/commands/workflows.py +0 -0
- {pyworkflow_engine-0.1.13 → pyworkflow_engine-0.1.15}/pyworkflow/cli/output/__init__.py +0 -0
- {pyworkflow_engine-0.1.13 → pyworkflow_engine-0.1.15}/pyworkflow/cli/output/formatters.py +0 -0
- {pyworkflow_engine-0.1.13 → pyworkflow_engine-0.1.15}/pyworkflow/cli/output/styles.py +0 -0
- {pyworkflow_engine-0.1.13 → pyworkflow_engine-0.1.15}/pyworkflow/cli/utils/__init__.py +0 -0
- {pyworkflow_engine-0.1.13 → pyworkflow_engine-0.1.15}/pyworkflow/cli/utils/async_helpers.py +0 -0
- {pyworkflow_engine-0.1.13 → pyworkflow_engine-0.1.15}/pyworkflow/cli/utils/config.py +0 -0
- {pyworkflow_engine-0.1.13 → pyworkflow_engine-0.1.15}/pyworkflow/cli/utils/config_generator.py +0 -0
- {pyworkflow_engine-0.1.13 → pyworkflow_engine-0.1.15}/pyworkflow/cli/utils/discovery.py +0 -0
- {pyworkflow_engine-0.1.13 → pyworkflow_engine-0.1.15}/pyworkflow/cli/utils/docker_manager.py +0 -0
- {pyworkflow_engine-0.1.13 → pyworkflow_engine-0.1.15}/pyworkflow/cli/utils/interactive.py +0 -0
- {pyworkflow_engine-0.1.13 → pyworkflow_engine-0.1.15}/pyworkflow/cli/utils/storage.py +0 -0
- {pyworkflow_engine-0.1.13 → pyworkflow_engine-0.1.15}/pyworkflow/config.py +0 -0
- {pyworkflow_engine-0.1.13 → pyworkflow_engine-0.1.15}/pyworkflow/context/__init__.py +0 -0
- {pyworkflow_engine-0.1.13 → pyworkflow_engine-0.1.15}/pyworkflow/context/aws.py +0 -0
- {pyworkflow_engine-0.1.13 → pyworkflow_engine-0.1.15}/pyworkflow/context/base.py +0 -0
- {pyworkflow_engine-0.1.13 → pyworkflow_engine-0.1.15}/pyworkflow/context/mock.py +0 -0
- {pyworkflow_engine-0.1.13 → pyworkflow_engine-0.1.15}/pyworkflow/context/step_context.py +0 -0
- {pyworkflow_engine-0.1.13 → pyworkflow_engine-0.1.15}/pyworkflow/core/__init__.py +0 -0
- {pyworkflow_engine-0.1.13 → pyworkflow_engine-0.1.15}/pyworkflow/core/exceptions.py +0 -0
- {pyworkflow_engine-0.1.13 → pyworkflow_engine-0.1.15}/pyworkflow/core/registry.py +0 -0
- {pyworkflow_engine-0.1.13 → pyworkflow_engine-0.1.15}/pyworkflow/core/scheduled.py +0 -0
- {pyworkflow_engine-0.1.13 → pyworkflow_engine-0.1.15}/pyworkflow/core/step.py +0 -0
- {pyworkflow_engine-0.1.13 → pyworkflow_engine-0.1.15}/pyworkflow/core/validation.py +0 -0
- {pyworkflow_engine-0.1.13 → pyworkflow_engine-0.1.15}/pyworkflow/core/workflow.py +0 -0
- {pyworkflow_engine-0.1.13 → pyworkflow_engine-0.1.15}/pyworkflow/discovery.py +0 -0
- {pyworkflow_engine-0.1.13 → pyworkflow_engine-0.1.15}/pyworkflow/engine/__init__.py +0 -0
- {pyworkflow_engine-0.1.13 → pyworkflow_engine-0.1.15}/pyworkflow/engine/events.py +0 -0
- {pyworkflow_engine-0.1.13 → pyworkflow_engine-0.1.15}/pyworkflow/engine/executor.py +0 -0
- {pyworkflow_engine-0.1.13 → pyworkflow_engine-0.1.15}/pyworkflow/engine/replay.py +0 -0
- {pyworkflow_engine-0.1.13 → pyworkflow_engine-0.1.15}/pyworkflow/observability/__init__.py +0 -0
- {pyworkflow_engine-0.1.13 → pyworkflow_engine-0.1.15}/pyworkflow/observability/logging.py +0 -0
- {pyworkflow_engine-0.1.13 → pyworkflow_engine-0.1.15}/pyworkflow/primitives/__init__.py +0 -0
- {pyworkflow_engine-0.1.13 → pyworkflow_engine-0.1.15}/pyworkflow/primitives/child_handle.py +0 -0
- {pyworkflow_engine-0.1.13 → pyworkflow_engine-0.1.15}/pyworkflow/primitives/child_workflow.py +0 -0
- {pyworkflow_engine-0.1.13 → pyworkflow_engine-0.1.15}/pyworkflow/primitives/continue_as_new.py +0 -0
- {pyworkflow_engine-0.1.13 → pyworkflow_engine-0.1.15}/pyworkflow/primitives/define_hook.py +0 -0
- {pyworkflow_engine-0.1.13 → pyworkflow_engine-0.1.15}/pyworkflow/primitives/hooks.py +0 -0
- {pyworkflow_engine-0.1.13 → pyworkflow_engine-0.1.15}/pyworkflow/primitives/schedule.py +0 -0
- {pyworkflow_engine-0.1.13 → pyworkflow_engine-0.1.15}/pyworkflow/primitives/shield.py +0 -0
- {pyworkflow_engine-0.1.13 → pyworkflow_engine-0.1.15}/pyworkflow/primitives/sleep.py +0 -0
- {pyworkflow_engine-0.1.13 → pyworkflow_engine-0.1.15}/pyworkflow/runtime/__init__.py +0 -0
- {pyworkflow_engine-0.1.13 → pyworkflow_engine-0.1.15}/pyworkflow/runtime/factory.py +0 -0
- {pyworkflow_engine-0.1.13 → pyworkflow_engine-0.1.15}/pyworkflow/scheduler/__init__.py +0 -0
- {pyworkflow_engine-0.1.13 → pyworkflow_engine-0.1.15}/pyworkflow/scheduler/local.py +0 -0
- {pyworkflow_engine-0.1.13 → pyworkflow_engine-0.1.15}/pyworkflow/serialization/__init__.py +0 -0
- {pyworkflow_engine-0.1.13 → pyworkflow_engine-0.1.15}/pyworkflow/serialization/decoder.py +0 -0
- {pyworkflow_engine-0.1.13 → pyworkflow_engine-0.1.15}/pyworkflow/serialization/encoder.py +0 -0
- {pyworkflow_engine-0.1.13 → pyworkflow_engine-0.1.15}/pyworkflow/storage/__init__.py +0 -0
- {pyworkflow_engine-0.1.13 → pyworkflow_engine-0.1.15}/pyworkflow/storage/config.py +0 -0
- {pyworkflow_engine-0.1.13 → pyworkflow_engine-0.1.15}/pyworkflow/storage/schemas.py +0 -0
- {pyworkflow_engine-0.1.13 → pyworkflow_engine-0.1.15}/pyworkflow/utils/__init__.py +0 -0
- {pyworkflow_engine-0.1.13 → pyworkflow_engine-0.1.15}/pyworkflow/utils/duration.py +0 -0
- {pyworkflow_engine-0.1.13 → pyworkflow_engine-0.1.15}/pyworkflow/utils/schedule.py +0 -0
- {pyworkflow_engine-0.1.13 → pyworkflow_engine-0.1.15}/pyworkflow_engine.egg-info/SOURCES.txt +0 -0
- {pyworkflow_engine-0.1.13 → pyworkflow_engine-0.1.15}/setup.cfg +0 -0
- {pyworkflow_engine-0.1.13 → pyworkflow_engine-0.1.15}/tests/integration/__init__.py +0 -0
- {pyworkflow_engine-0.1.13 → pyworkflow_engine-0.1.15}/tests/integration/test_cancellation.py +0 -0
- {pyworkflow_engine-0.1.13 → pyworkflow_engine-0.1.15}/tests/integration/test_cassandra_storage.py +0 -0
- {pyworkflow_engine-0.1.13 → pyworkflow_engine-0.1.15}/tests/integration/test_child_workflows.py +0 -0
- {pyworkflow_engine-0.1.13 → pyworkflow_engine-0.1.15}/tests/integration/test_continue_as_new.py +0 -0
- {pyworkflow_engine-0.1.13 → pyworkflow_engine-0.1.15}/tests/integration/test_dynamodb_storage.py +0 -0
- {pyworkflow_engine-0.1.13 → pyworkflow_engine-0.1.15}/tests/integration/test_fault_tolerance.py +0 -0
- {pyworkflow_engine-0.1.13 → pyworkflow_engine-0.1.15}/tests/integration/test_schedule_storage.py +0 -0
- {pyworkflow_engine-0.1.13 → pyworkflow_engine-0.1.15}/tests/integration/test_workflow_suspended.py +0 -0
- {pyworkflow_engine-0.1.13 → pyworkflow_engine-0.1.15}/tests/unit/__init__.py +0 -0
- {pyworkflow_engine-0.1.13 → pyworkflow_engine-0.1.15}/tests/unit/backends/__init__.py +0 -0
- {pyworkflow_engine-0.1.13 → pyworkflow_engine-0.1.15}/tests/unit/backends/test_cassandra_storage.py +0 -0
- {pyworkflow_engine-0.1.13 → pyworkflow_engine-0.1.15}/tests/unit/backends/test_dynamodb_storage.py +0 -0
- {pyworkflow_engine-0.1.13 → pyworkflow_engine-0.1.15}/tests/unit/backends/test_postgres_storage.py +0 -0
- {pyworkflow_engine-0.1.13 → pyworkflow_engine-0.1.15}/tests/unit/backends/test_sqlite_storage.py +0 -0
- {pyworkflow_engine-0.1.13 → pyworkflow_engine-0.1.15}/tests/unit/conftest.py +0 -0
- {pyworkflow_engine-0.1.13 → pyworkflow_engine-0.1.15}/tests/unit/test_cancellation.py +0 -0
- {pyworkflow_engine-0.1.13 → pyworkflow_engine-0.1.15}/tests/unit/test_child_workflows.py +0 -0
- {pyworkflow_engine-0.1.13 → pyworkflow_engine-0.1.15}/tests/unit/test_continue_as_new.py +0 -0
- {pyworkflow_engine-0.1.13 → pyworkflow_engine-0.1.15}/tests/unit/test_event_limits.py +0 -0
- {pyworkflow_engine-0.1.13 → pyworkflow_engine-0.1.15}/tests/unit/test_executor.py +0 -0
- {pyworkflow_engine-0.1.13 → pyworkflow_engine-0.1.15}/tests/unit/test_fault_tolerance.py +0 -0
- {pyworkflow_engine-0.1.13 → pyworkflow_engine-0.1.15}/tests/unit/test_hooks.py +0 -0
- {pyworkflow_engine-0.1.13 → pyworkflow_engine-0.1.15}/tests/unit/test_registry.py +0 -0
- {pyworkflow_engine-0.1.13 → pyworkflow_engine-0.1.15}/tests/unit/test_replay.py +0 -0
- {pyworkflow_engine-0.1.13 → pyworkflow_engine-0.1.15}/tests/unit/test_schedule_schemas.py +0 -0
- {pyworkflow_engine-0.1.13 → pyworkflow_engine-0.1.15}/tests/unit/test_schedule_utils.py +0 -0
- {pyworkflow_engine-0.1.13 → pyworkflow_engine-0.1.15}/tests/unit/test_scheduled_workflow.py +0 -0
- {pyworkflow_engine-0.1.13 → pyworkflow_engine-0.1.15}/tests/unit/test_singleton.py +0 -0
- {pyworkflow_engine-0.1.13 → pyworkflow_engine-0.1.15}/tests/unit/test_step.py +0 -0
- {pyworkflow_engine-0.1.13 → pyworkflow_engine-0.1.15}/tests/unit/test_step_context.py +0 -0
- {pyworkflow_engine-0.1.13 → pyworkflow_engine-0.1.15}/tests/unit/test_validation.py +0 -0
- {pyworkflow_engine-0.1.13 → pyworkflow_engine-0.1.15}/tests/unit/test_workflow.py +0 -0
- {pyworkflow_engine-0.1.13 → pyworkflow_engine-0.1.15}/tests/unit/test_workflow_suspended.py +0 -0
|
@@ -7,7 +7,7 @@ packages = [{include = "pyworkflow"}]
|
|
|
7
7
|
|
|
8
8
|
[project]
|
|
9
9
|
name = "pyworkflow-engine"
|
|
10
|
-
version = "0.1.
|
|
10
|
+
version = "0.1.15"
|
|
11
11
|
description = "A Python implementation of durable, event-sourced workflows inspired by Vercel Workflow"
|
|
12
12
|
readme = "README.md"
|
|
13
13
|
requires-python = ">=3.11"
|
|
@@ -96,7 +96,7 @@ class SingletonWorkflowTask(Task):
|
|
|
96
96
|
Features:
|
|
97
97
|
- Redis-based lock prevents duplicate execution
|
|
98
98
|
- Support for unique_on with nested dict/list access (e.g., "data.run_id")
|
|
99
|
-
- Retry-safe:
|
|
99
|
+
- Retry-safe: lock released in on_retry callback to allow retry to acquire it
|
|
100
100
|
- Lock released on success or when max retries exceeded
|
|
101
101
|
- Time-based lock expiry as safety net
|
|
102
102
|
|
|
@@ -124,7 +124,7 @@ class SingletonWorkflowTask(Task):
|
|
|
124
124
|
|
|
125
125
|
# Lock behavior
|
|
126
126
|
release_lock_on_success: bool = True
|
|
127
|
-
release_lock_on_failure: bool = False #
|
|
127
|
+
release_lock_on_failure: bool = False # Only release on max retries exceeded
|
|
128
128
|
|
|
129
129
|
# Celery task settings
|
|
130
130
|
max_retries: int | None = None
|
|
@@ -360,9 +360,11 @@ class SingletonWorkflowTask(Task):
|
|
|
360
360
|
kwargs: dict[str, Any],
|
|
361
361
|
einfo: Any,
|
|
362
362
|
) -> None:
|
|
363
|
-
"""
|
|
363
|
+
"""Release lock during retry to allow retry task to acquire it."""
|
|
364
|
+
# Release lock so retry can acquire it via apply_async()
|
|
365
|
+
self.release_lock(task_args=args, task_kwargs=kwargs)
|
|
364
366
|
logger.warning(
|
|
365
|
-
f"Task {self.name} retrying (lock
|
|
367
|
+
f"Task {self.name} retrying (lock released for retry)",
|
|
366
368
|
task_id=task_id,
|
|
367
369
|
retry_count=self.request.retries,
|
|
368
370
|
)
|
|
@@ -321,7 +321,7 @@ def execute_step_task(
|
|
|
321
321
|
# Use exponential backoff for unexpected errors
|
|
322
322
|
countdown = _calculate_exponential_backoff(self.request.retries)
|
|
323
323
|
logger.warning(
|
|
324
|
-
f"Step failed (unexpected): {step_name}, retrying in {countdown:.1f}s
|
|
324
|
+
f"Step failed (unexpected): {step_name}, retrying in {countdown:.1f}s...: {str(e)}",
|
|
325
325
|
run_id=run_id,
|
|
326
326
|
step_id=step_id,
|
|
327
327
|
error=str(e),
|
|
@@ -1704,6 +1704,7 @@ async def _start_workflow_on_worker(
|
|
|
1704
1704
|
def resume_workflow_task(
|
|
1705
1705
|
run_id: str,
|
|
1706
1706
|
storage_config: dict[str, Any] | None = None,
|
|
1707
|
+
triggered_by_hook_id: str | None = None,
|
|
1707
1708
|
) -> Any | None:
|
|
1708
1709
|
"""
|
|
1709
1710
|
Resume a suspended workflow.
|
|
@@ -1714,6 +1715,9 @@ def resume_workflow_task(
|
|
|
1714
1715
|
Args:
|
|
1715
1716
|
run_id: Workflow run ID to resume
|
|
1716
1717
|
storage_config: Storage backend configuration
|
|
1718
|
+
triggered_by_hook_id: Optional hook ID that triggered this resume.
|
|
1719
|
+
Used to prevent spurious resumes when a workflow
|
|
1720
|
+
has already moved past the triggering hook.
|
|
1717
1721
|
|
|
1718
1722
|
Returns:
|
|
1719
1723
|
Workflow result if completed, None if suspended again
|
|
@@ -1727,13 +1731,18 @@ def resume_workflow_task(
|
|
|
1727
1731
|
f"RESUME_WORKFLOW_TASK ENTRY: {run_id}",
|
|
1728
1732
|
run_id=run_id,
|
|
1729
1733
|
celery_task_id=resume_workflow_task.request.id,
|
|
1734
|
+
triggered_by_hook_id=triggered_by_hook_id,
|
|
1730
1735
|
)
|
|
1731
1736
|
|
|
1732
1737
|
# Get storage backend
|
|
1733
1738
|
storage = _get_storage_backend(storage_config)
|
|
1734
1739
|
|
|
1735
1740
|
# Resume workflow directly on worker
|
|
1736
|
-
result = run_async(
|
|
1741
|
+
result = run_async(
|
|
1742
|
+
_resume_workflow_on_worker(
|
|
1743
|
+
run_id, storage, storage_config, triggered_by_hook_id=triggered_by_hook_id
|
|
1744
|
+
)
|
|
1745
|
+
)
|
|
1737
1746
|
|
|
1738
1747
|
if result is not None:
|
|
1739
1748
|
logger.info(f"Workflow completed on worker: {run_id}")
|
|
@@ -1940,15 +1949,81 @@ async def _complete_pending_sleeps(
|
|
|
1940
1949
|
return updated_events
|
|
1941
1950
|
|
|
1942
1951
|
|
|
1952
|
+
def _is_hook_still_relevant(hook_id: str, events: list[Any]) -> bool:
|
|
1953
|
+
"""
|
|
1954
|
+
Check if a hook is still relevant for resuming the workflow.
|
|
1955
|
+
|
|
1956
|
+
A hook is "still relevant" if there are no newer hooks created after
|
|
1957
|
+
this hook was received. This prevents spurious resumes when:
|
|
1958
|
+
1. resume_hook() is called multiple times for the same hook
|
|
1959
|
+
2. The workflow moved past the first resume and created a new hook
|
|
1960
|
+
3. The duplicate resume task runs but the workflow is now waiting on a different hook
|
|
1961
|
+
|
|
1962
|
+
Args:
|
|
1963
|
+
hook_id: The hook ID that triggered the resume
|
|
1964
|
+
events: List of workflow events
|
|
1965
|
+
|
|
1966
|
+
Returns:
|
|
1967
|
+
True if the hook is still relevant, False if workflow has moved past it
|
|
1968
|
+
"""
|
|
1969
|
+
from pyworkflow.engine.events import EventType
|
|
1970
|
+
|
|
1971
|
+
# Sort events by sequence to process in order
|
|
1972
|
+
sorted_events = sorted(events, key=lambda e: e.sequence or 0)
|
|
1973
|
+
|
|
1974
|
+
# Find the sequence number of HOOK_RECEIVED for this hook
|
|
1975
|
+
hook_received_sequence = None
|
|
1976
|
+
for event in sorted_events:
|
|
1977
|
+
if event.type == EventType.HOOK_RECEIVED and event.data.get("hook_id") == hook_id:
|
|
1978
|
+
hook_received_sequence = event.sequence
|
|
1979
|
+
break
|
|
1980
|
+
|
|
1981
|
+
if hook_received_sequence is None:
|
|
1982
|
+
# Hook was never received - shouldn't happen, but allow resume
|
|
1983
|
+
logger.warning(
|
|
1984
|
+
f"Hook {hook_id} was not found in HOOK_RECEIVED events, allowing resume",
|
|
1985
|
+
hook_id=hook_id,
|
|
1986
|
+
)
|
|
1987
|
+
return True
|
|
1988
|
+
|
|
1989
|
+
# Check if there's a HOOK_CREATED event after this hook was received
|
|
1990
|
+
# (indicating the workflow has moved past this hook and created a new one)
|
|
1991
|
+
for event in sorted_events:
|
|
1992
|
+
if event.type == EventType.HOOK_CREATED:
|
|
1993
|
+
event_sequence = event.sequence or 0
|
|
1994
|
+
if event_sequence > hook_received_sequence:
|
|
1995
|
+
# There's a newer hook - this resume is stale
|
|
1996
|
+
newer_hook_id = event.data.get("hook_id")
|
|
1997
|
+
logger.debug(
|
|
1998
|
+
f"Found newer hook {newer_hook_id} (seq {event_sequence}) "
|
|
1999
|
+
f"after triggered hook {hook_id} (received at seq {hook_received_sequence})",
|
|
2000
|
+
hook_id=hook_id,
|
|
2001
|
+
newer_hook_id=newer_hook_id,
|
|
2002
|
+
)
|
|
2003
|
+
return False
|
|
2004
|
+
|
|
2005
|
+
# No newer hooks created - this resume is still relevant
|
|
2006
|
+
return True
|
|
2007
|
+
|
|
2008
|
+
|
|
1943
2009
|
async def _resume_workflow_on_worker(
|
|
1944
2010
|
run_id: str,
|
|
1945
2011
|
storage: StorageBackend,
|
|
1946
2012
|
storage_config: dict[str, Any] | None = None,
|
|
2013
|
+
triggered_by_hook_id: str | None = None,
|
|
1947
2014
|
) -> Any | None:
|
|
1948
2015
|
"""
|
|
1949
2016
|
Internal function to resume workflow on Celery worker.
|
|
1950
2017
|
|
|
1951
2018
|
This mirrors the logic from testing.py but runs on workers.
|
|
2019
|
+
|
|
2020
|
+
Args:
|
|
2021
|
+
run_id: Workflow run ID to resume
|
|
2022
|
+
storage: Storage backend
|
|
2023
|
+
storage_config: Storage configuration for task dispatch
|
|
2024
|
+
triggered_by_hook_id: Optional hook ID that triggered this resume.
|
|
2025
|
+
If provided, we verify the hook is still relevant
|
|
2026
|
+
before resuming to prevent spurious resumes.
|
|
1952
2027
|
"""
|
|
1953
2028
|
from pyworkflow.core.exceptions import WorkflowNotFoundError
|
|
1954
2029
|
|
|
@@ -1983,6 +2058,22 @@ async def _resume_workflow_on_worker(
|
|
|
1983
2058
|
)
|
|
1984
2059
|
return None
|
|
1985
2060
|
|
|
2061
|
+
# If this resume was triggered by a specific hook, verify the hook is still relevant.
|
|
2062
|
+
# A hook is "stale" if the workflow has already moved past it (created a newer hook).
|
|
2063
|
+
# This prevents spurious resumes from duplicate resume_hook() calls.
|
|
2064
|
+
if triggered_by_hook_id:
|
|
2065
|
+
events = await storage.get_events(run_id)
|
|
2066
|
+
hook_still_relevant = _is_hook_still_relevant(triggered_by_hook_id, events)
|
|
2067
|
+
if not hook_still_relevant:
|
|
2068
|
+
logger.info(
|
|
2069
|
+
f"Hook {triggered_by_hook_id} is no longer relevant (workflow moved past it), "
|
|
2070
|
+
"skipping spurious resume",
|
|
2071
|
+
run_id=run_id,
|
|
2072
|
+
workflow_name=run.workflow_name,
|
|
2073
|
+
triggered_by_hook_id=triggered_by_hook_id,
|
|
2074
|
+
)
|
|
2075
|
+
return None
|
|
2076
|
+
|
|
1986
2077
|
# Check for cancellation flag
|
|
1987
2078
|
cancellation_requested = await storage.check_cancellation_flag(run_id)
|
|
1988
2079
|
|
|
@@ -114,6 +114,31 @@ class LocalContext(WorkflowContext):
|
|
|
114
114
|
self._replay_events(event_log)
|
|
115
115
|
self._is_replaying = False
|
|
116
116
|
|
|
117
|
+
def _extract_counter_from_id(self, id_string: str) -> int:
|
|
118
|
+
"""Extract counter value from hook_id or sleep_id.
|
|
119
|
+
|
|
120
|
+
Formats:
|
|
121
|
+
- hook_{name}_{counter}
|
|
122
|
+
- sleep_{counter}_{duration}s
|
|
123
|
+
|
|
124
|
+
Args:
|
|
125
|
+
id_string: The hook_id or sleep_id string
|
|
126
|
+
|
|
127
|
+
Returns:
|
|
128
|
+
The counter value, or 0 if parsing fails
|
|
129
|
+
"""
|
|
130
|
+
try:
|
|
131
|
+
parts = id_string.split("_")
|
|
132
|
+
if id_string.startswith("hook_"):
|
|
133
|
+
# hook_{name}_{counter} - counter is last part
|
|
134
|
+
return int(parts[-1])
|
|
135
|
+
elif id_string.startswith("sleep_"):
|
|
136
|
+
# sleep_{counter}_{duration}s - counter is second part
|
|
137
|
+
return int(parts[1])
|
|
138
|
+
except (ValueError, IndexError):
|
|
139
|
+
pass
|
|
140
|
+
return 0
|
|
141
|
+
|
|
117
142
|
def _replay_events(self, events: list[Any]) -> None:
|
|
118
143
|
"""Replay events to restore state."""
|
|
119
144
|
from pyworkflow.engine.events import EventType
|
|
@@ -142,6 +167,12 @@ class LocalContext(WorkflowContext):
|
|
|
142
167
|
payload = deserialize(event.data.get("payload"))
|
|
143
168
|
self._hook_results[hook_id] = payload
|
|
144
169
|
|
|
170
|
+
elif event.type == EventType.HOOK_CREATED:
|
|
171
|
+
# Track pending hooks for re-suspension
|
|
172
|
+
hook_id = event.data.get("hook_id")
|
|
173
|
+
if hook_id:
|
|
174
|
+
self._pending_hooks[hook_id] = event.data
|
|
175
|
+
|
|
145
176
|
elif event.type == EventType.STEP_RETRYING:
|
|
146
177
|
step_id = event.data.get("step_id")
|
|
147
178
|
self._retry_states[step_id] = {
|
|
@@ -893,6 +924,21 @@ class LocalContext(WorkflowContext):
|
|
|
893
924
|
logger.debug(f"[replay] Hook {hook_id} already received")
|
|
894
925
|
return self._hook_results[hook_id]
|
|
895
926
|
|
|
927
|
+
# Check if already pending (created but not yet received - replay mode)
|
|
928
|
+
# This prevents duplicate hook creation when workflow resumes
|
|
929
|
+
if hook_id in self._pending_hooks:
|
|
930
|
+
logger.debug(f"[replay] Hook {hook_id} already pending, re-suspending")
|
|
931
|
+
pending_data = self._pending_hooks[hook_id]
|
|
932
|
+
actual_token = pending_data.get("token")
|
|
933
|
+
# Call on_created callback if provided
|
|
934
|
+
if on_created is not None:
|
|
935
|
+
await on_created(actual_token)
|
|
936
|
+
raise SuspensionSignal(
|
|
937
|
+
reason=f"hook:{hook_id}",
|
|
938
|
+
hook_id=hook_id,
|
|
939
|
+
token=actual_token,
|
|
940
|
+
)
|
|
941
|
+
|
|
896
942
|
# Generate composite token: run_id:hook_id
|
|
897
943
|
from pyworkflow.primitives.resume_hook import create_hook_token
|
|
898
944
|
|
|
@@ -185,6 +185,7 @@ async def resume_hook(
|
|
|
185
185
|
hook_id=hook_id,
|
|
186
186
|
status=HookStatus.RECEIVED,
|
|
187
187
|
payload=serialized_payload,
|
|
188
|
+
run_id=run_id,
|
|
188
189
|
)
|
|
189
190
|
|
|
190
191
|
# Schedule workflow resumption via configured runtime
|
|
@@ -195,7 +196,7 @@ async def resume_hook(
|
|
|
195
196
|
runtime = get_runtime(config.default_runtime)
|
|
196
197
|
|
|
197
198
|
try:
|
|
198
|
-
await runtime.schedule_resume(run_id, storage)
|
|
199
|
+
await runtime.schedule_resume(run_id, storage, triggered_by_hook_id=hook_id)
|
|
199
200
|
except Exception as e:
|
|
200
201
|
logger.warning(
|
|
201
202
|
f"Failed to schedule workflow resumption: {e}",
|
|
@@ -97,6 +97,7 @@ class Runtime(ABC):
|
|
|
97
97
|
self,
|
|
98
98
|
run_id: str,
|
|
99
99
|
storage: "StorageBackend",
|
|
100
|
+
triggered_by_hook_id: str | None = None,
|
|
100
101
|
) -> None:
|
|
101
102
|
"""
|
|
102
103
|
Schedule a workflow to be resumed immediately.
|
|
@@ -109,6 +110,9 @@ class Runtime(ABC):
|
|
|
109
110
|
Args:
|
|
110
111
|
run_id: The run_id of the workflow to resume
|
|
111
112
|
storage: Storage backend
|
|
113
|
+
triggered_by_hook_id: Optional hook ID that triggered this resume.
|
|
114
|
+
Used by distributed runtimes to prevent
|
|
115
|
+
spurious resumes from duplicate calls.
|
|
112
116
|
"""
|
|
113
117
|
# Default implementation: no-op
|
|
114
118
|
# Subclasses override if they support async scheduling
|
|
@@ -202,25 +202,36 @@ class CeleryRuntime(Runtime):
|
|
|
202
202
|
self,
|
|
203
203
|
run_id: str,
|
|
204
204
|
storage: "StorageBackend",
|
|
205
|
+
triggered_by_hook_id: str | None = None,
|
|
205
206
|
) -> None:
|
|
206
207
|
"""
|
|
207
208
|
Schedule immediate workflow resumption via Celery task.
|
|
208
209
|
|
|
209
210
|
This is called by resume_hook() to trigger workflow resumption
|
|
210
211
|
after a hook event is received.
|
|
212
|
+
|
|
213
|
+
Args:
|
|
214
|
+
run_id: The workflow run ID to resume
|
|
215
|
+
storage: Storage backend for configuration
|
|
216
|
+
triggered_by_hook_id: Optional hook ID that triggered this resume.
|
|
217
|
+
Used to prevent spurious resumes from duplicate calls.
|
|
211
218
|
"""
|
|
212
219
|
from pyworkflow.celery.tasks import resume_workflow_task
|
|
213
220
|
|
|
214
221
|
logger.info(
|
|
215
222
|
f"Scheduling workflow resume via Celery: {run_id}",
|
|
216
223
|
run_id=run_id,
|
|
224
|
+
triggered_by_hook_id=triggered_by_hook_id,
|
|
217
225
|
)
|
|
218
226
|
|
|
219
227
|
storage_config = self._get_storage_config(storage)
|
|
220
228
|
|
|
221
229
|
resume_workflow_task.apply_async(
|
|
222
230
|
args=[run_id],
|
|
223
|
-
kwargs={
|
|
231
|
+
kwargs={
|
|
232
|
+
"storage_config": storage_config,
|
|
233
|
+
"triggered_by_hook_id": triggered_by_hook_id,
|
|
234
|
+
},
|
|
224
235
|
)
|
|
225
236
|
|
|
226
237
|
logger.info(
|
|
@@ -507,16 +507,24 @@ class LocalRuntime(Runtime):
|
|
|
507
507
|
self,
|
|
508
508
|
run_id: str,
|
|
509
509
|
storage: "StorageBackend",
|
|
510
|
+
triggered_by_hook_id: str | None = None,
|
|
510
511
|
) -> None:
|
|
511
512
|
"""
|
|
512
513
|
Schedule immediate workflow resumption.
|
|
513
514
|
|
|
514
515
|
For local runtime, this directly calls resume_workflow since
|
|
515
516
|
execution happens in-process.
|
|
517
|
+
|
|
518
|
+
Args:
|
|
519
|
+
run_id: The workflow run ID to resume
|
|
520
|
+
storage: Storage backend
|
|
521
|
+
triggered_by_hook_id: Optional hook ID that triggered this resume.
|
|
522
|
+
Not used in local runtime (no queueing).
|
|
516
523
|
"""
|
|
517
524
|
logger.info(
|
|
518
525
|
f"Scheduling immediate workflow resume: {run_id}",
|
|
519
526
|
run_id=run_id,
|
|
527
|
+
triggered_by_hook_id=triggered_by_hook_id,
|
|
520
528
|
)
|
|
521
529
|
|
|
522
530
|
try:
|
|
@@ -291,12 +291,13 @@ class StorageBackend(ABC):
|
|
|
291
291
|
pass
|
|
292
292
|
|
|
293
293
|
@abstractmethod
|
|
294
|
-
async def get_hook(self, hook_id: str) -> Hook | None:
|
|
294
|
+
async def get_hook(self, hook_id: str, run_id: str | None = None) -> Hook | None:
|
|
295
295
|
"""
|
|
296
296
|
Retrieve a hook by ID.
|
|
297
297
|
|
|
298
298
|
Args:
|
|
299
299
|
hook_id: Hook identifier
|
|
300
|
+
run_id: Run ID (required for composite key lookup in SQL backends)
|
|
300
301
|
|
|
301
302
|
Returns:
|
|
302
303
|
Hook if found, None otherwise
|
|
@@ -322,6 +323,7 @@ class StorageBackend(ABC):
|
|
|
322
323
|
hook_id: str,
|
|
323
324
|
status: HookStatus,
|
|
324
325
|
payload: str | None = None,
|
|
326
|
+
run_id: str | None = None,
|
|
325
327
|
) -> None:
|
|
326
328
|
"""
|
|
327
329
|
Update hook status and optionally payload.
|
|
@@ -330,6 +332,7 @@ class StorageBackend(ABC):
|
|
|
330
332
|
hook_id: Hook identifier
|
|
331
333
|
status: New status
|
|
332
334
|
payload: JSON serialized payload (if received)
|
|
335
|
+
run_id: Run ID (required for composite key lookup in SQL backends)
|
|
333
336
|
"""
|
|
334
337
|
pass
|
|
335
338
|
|
|
@@ -1072,29 +1072,31 @@ class CassandraStorageBackend(StorageBackend):
|
|
|
1072
1072
|
|
|
1073
1073
|
session.execute(batch)
|
|
1074
1074
|
|
|
1075
|
-
async def get_hook(self, hook_id: str) -> Hook | None:
|
|
1076
|
-
"""Retrieve a hook by ID."""
|
|
1075
|
+
async def get_hook(self, hook_id: str, run_id: str | None = None) -> Hook | None:
|
|
1076
|
+
"""Retrieve a hook by ID (run_id allows skipping lookup table)."""
|
|
1077
1077
|
session = self._ensure_connected()
|
|
1078
1078
|
|
|
1079
|
-
|
|
1080
|
-
|
|
1081
|
-
|
|
1082
|
-
|
|
1083
|
-
|
|
1084
|
-
|
|
1085
|
-
|
|
1086
|
-
|
|
1079
|
+
if not run_id:
|
|
1080
|
+
# First lookup run_id from lookup table
|
|
1081
|
+
lookup = session.execute(
|
|
1082
|
+
SimpleStatement(
|
|
1083
|
+
"SELECT run_id FROM hooks_by_id WHERE hook_id = %s",
|
|
1084
|
+
consistency_level=self.read_consistency,
|
|
1085
|
+
),
|
|
1086
|
+
(hook_id,),
|
|
1087
|
+
).one()
|
|
1087
1088
|
|
|
1088
|
-
|
|
1089
|
-
|
|
1089
|
+
if not lookup:
|
|
1090
|
+
return None
|
|
1091
|
+
run_id = lookup.run_id
|
|
1090
1092
|
|
|
1091
|
-
#
|
|
1093
|
+
# Get full hook
|
|
1092
1094
|
row = session.execute(
|
|
1093
1095
|
SimpleStatement(
|
|
1094
1096
|
"SELECT * FROM hooks WHERE run_id = %s AND hook_id = %s",
|
|
1095
1097
|
consistency_level=self.read_consistency,
|
|
1096
1098
|
),
|
|
1097
|
-
(
|
|
1099
|
+
(run_id, hook_id),
|
|
1098
1100
|
).one()
|
|
1099
1101
|
|
|
1100
1102
|
if not row:
|
|
@@ -1137,21 +1139,24 @@ class CassandraStorageBackend(StorageBackend):
|
|
|
1137
1139
|
hook_id: str,
|
|
1138
1140
|
status: HookStatus,
|
|
1139
1141
|
payload: str | None = None,
|
|
1142
|
+
run_id: str | None = None,
|
|
1140
1143
|
) -> None:
|
|
1141
1144
|
"""Update hook status and optionally payload."""
|
|
1142
1145
|
session = self._ensure_connected()
|
|
1143
1146
|
|
|
1144
|
-
|
|
1145
|
-
|
|
1146
|
-
|
|
1147
|
-
|
|
1148
|
-
|
|
1149
|
-
|
|
1150
|
-
|
|
1151
|
-
|
|
1147
|
+
if not run_id:
|
|
1148
|
+
# First lookup run_id from lookup table
|
|
1149
|
+
lookup = session.execute(
|
|
1150
|
+
SimpleStatement(
|
|
1151
|
+
"SELECT run_id FROM hooks_by_id WHERE hook_id = %s",
|
|
1152
|
+
consistency_level=self.read_consistency,
|
|
1153
|
+
),
|
|
1154
|
+
(hook_id,),
|
|
1155
|
+
).one()
|
|
1152
1156
|
|
|
1153
|
-
|
|
1154
|
-
|
|
1157
|
+
if not lookup:
|
|
1158
|
+
return
|
|
1159
|
+
run_id = lookup.run_id
|
|
1155
1160
|
|
|
1156
1161
|
received_at = datetime.now(UTC) if status == HookStatus.RECEIVED else None
|
|
1157
1162
|
|
|
@@ -1164,7 +1169,7 @@ class CassandraStorageBackend(StorageBackend):
|
|
|
1164
1169
|
""",
|
|
1165
1170
|
consistency_level=self.write_consistency,
|
|
1166
1171
|
),
|
|
1167
|
-
(status.value, payload, received_at,
|
|
1172
|
+
(status.value, payload, received_at, run_id, hook_id),
|
|
1168
1173
|
)
|
|
1169
1174
|
|
|
1170
1175
|
async def list_hooks(
|
|
@@ -722,9 +722,9 @@ class DynamoDBStorageBackend(StorageBackend):
|
|
|
722
722
|
async def create_hook(self, hook: Hook) -> None:
|
|
723
723
|
"""Create a hook record."""
|
|
724
724
|
async with self._get_client() as client:
|
|
725
|
-
# Main hook item
|
|
725
|
+
# Main hook item (composite key: run_id + hook_id)
|
|
726
726
|
item = {
|
|
727
|
-
"PK": f"HOOK#{hook.hook_id}",
|
|
727
|
+
"PK": f"HOOK#{hook.run_id}#{hook.hook_id}",
|
|
728
728
|
"SK": "#METADATA",
|
|
729
729
|
"entity_type": "hook",
|
|
730
730
|
"hook_id": hook.hook_id,
|
|
@@ -741,12 +741,13 @@ class DynamoDBStorageBackend(StorageBackend):
|
|
|
741
741
|
"GSI1SK": f"{hook.status.value}#{hook.created_at.isoformat()}",
|
|
742
742
|
}
|
|
743
743
|
|
|
744
|
-
# Token lookup item
|
|
744
|
+
# Token lookup item (stores run_id and hook_id for lookup)
|
|
745
745
|
token_item = {
|
|
746
746
|
"PK": f"TOKEN#{hook.token}",
|
|
747
|
-
"SK": f"HOOK#{hook.hook_id}",
|
|
747
|
+
"SK": f"HOOK#{hook.run_id}#{hook.hook_id}",
|
|
748
748
|
"entity_type": "hook_token",
|
|
749
749
|
"hook_id": hook.hook_id,
|
|
750
|
+
"run_id": hook.run_id,
|
|
750
751
|
}
|
|
751
752
|
|
|
752
753
|
# Write both items
|
|
@@ -759,16 +760,26 @@ class DynamoDBStorageBackend(StorageBackend):
|
|
|
759
760
|
Item=self._dict_to_item(token_item),
|
|
760
761
|
)
|
|
761
762
|
|
|
762
|
-
async def get_hook(self, hook_id: str) -> Hook | None:
|
|
763
|
-
"""Retrieve a hook by ID."""
|
|
763
|
+
async def get_hook(self, hook_id: str, run_id: str | None = None) -> Hook | None:
|
|
764
|
+
"""Retrieve a hook by ID (requires run_id for composite key)."""
|
|
764
765
|
async with self._get_client() as client:
|
|
765
|
-
|
|
766
|
-
|
|
767
|
-
|
|
768
|
-
|
|
769
|
-
|
|
770
|
-
|
|
771
|
-
|
|
766
|
+
if run_id:
|
|
767
|
+
response = await client.get_item(
|
|
768
|
+
TableName=self.table_name,
|
|
769
|
+
Key={
|
|
770
|
+
"PK": {"S": f"HOOK#{run_id}#{hook_id}"},
|
|
771
|
+
"SK": {"S": "#METADATA"},
|
|
772
|
+
},
|
|
773
|
+
)
|
|
774
|
+
else:
|
|
775
|
+
# Fallback: try old format without run_id
|
|
776
|
+
response = await client.get_item(
|
|
777
|
+
TableName=self.table_name,
|
|
778
|
+
Key={
|
|
779
|
+
"PK": {"S": f"HOOK#{hook_id}"},
|
|
780
|
+
"SK": {"S": "#METADATA"},
|
|
781
|
+
},
|
|
782
|
+
)
|
|
772
783
|
|
|
773
784
|
item = response.get("Item")
|
|
774
785
|
if not item:
|
|
@@ -779,7 +790,7 @@ class DynamoDBStorageBackend(StorageBackend):
|
|
|
779
790
|
async def get_hook_by_token(self, token: str) -> Hook | None:
|
|
780
791
|
"""Retrieve a hook by its token."""
|
|
781
792
|
async with self._get_client() as client:
|
|
782
|
-
# First get the hook_id from the token lookup item
|
|
793
|
+
# First get the hook_id and run_id from the token lookup item
|
|
783
794
|
response = await client.query(
|
|
784
795
|
TableName=self.table_name,
|
|
785
796
|
KeyConditionExpression="PK = :pk",
|
|
@@ -792,13 +803,16 @@ class DynamoDBStorageBackend(StorageBackend):
|
|
|
792
803
|
return None
|
|
793
804
|
|
|
794
805
|
hook_id = self._deserialize_value(items[0]["hook_id"])
|
|
795
|
-
|
|
806
|
+
run_id_attr = items[0].get("run_id")
|
|
807
|
+
run_id = self._deserialize_value(run_id_attr) if run_id_attr else None
|
|
808
|
+
return await self.get_hook(hook_id, run_id)
|
|
796
809
|
|
|
797
810
|
async def update_hook_status(
|
|
798
811
|
self,
|
|
799
812
|
hook_id: str,
|
|
800
813
|
status: HookStatus,
|
|
801
814
|
payload: str | None = None,
|
|
815
|
+
run_id: str | None = None,
|
|
802
816
|
) -> None:
|
|
803
817
|
"""Update hook status and optionally payload."""
|
|
804
818
|
async with self._get_client() as client:
|
|
@@ -814,10 +828,12 @@ class DynamoDBStorageBackend(StorageBackend):
|
|
|
814
828
|
update_expr += ", received_at = :received_at"
|
|
815
829
|
expr_values[":received_at"] = {"S": datetime.now(UTC).isoformat()}
|
|
816
830
|
|
|
831
|
+
pk = f"HOOK#{run_id}#{hook_id}" if run_id else f"HOOK#{hook_id}"
|
|
832
|
+
|
|
817
833
|
await client.update_item(
|
|
818
834
|
TableName=self.table_name,
|
|
819
835
|
Key={
|
|
820
|
-
"PK": {"S":
|
|
836
|
+
"PK": {"S": pk},
|
|
821
837
|
"SK": {"S": "#METADATA"},
|
|
822
838
|
},
|
|
823
839
|
UpdateExpression=update_expr,
|