pyworkflow-engine 0.1.13__py3-none-any.whl → 0.1.15__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
pyworkflow/__init__.py CHANGED
@@ -29,7 +29,7 @@ Quick Start:
29
29
  >>> run_id = await start(my_workflow, "Alice")
30
30
  """
31
31
 
32
- __version__ = "0.1.13"
32
+ __version__ = "0.1.15"
33
33
 
34
34
  # Configuration
35
35
  from pyworkflow.config import (
@@ -96,7 +96,7 @@ class SingletonWorkflowTask(Task):
96
96
  Features:
97
97
  - Redis-based lock prevents duplicate execution
98
98
  - Support for unique_on with nested dict/list access (e.g., "data.run_id")
99
- - Retry-safe: locks NOT released on failure (prevents duplicate during retries)
99
+ - Retry-safe: lock released in on_retry callback to allow retry to acquire it
100
100
  - Lock released on success or when max retries exceeded
101
101
  - Time-based lock expiry as safety net
102
102
 
@@ -124,7 +124,7 @@ class SingletonWorkflowTask(Task):
124
124
 
125
125
  # Lock behavior
126
126
  release_lock_on_success: bool = True
127
- release_lock_on_failure: bool = False # Keep lock during retries
127
+ release_lock_on_failure: bool = False # Only release on max retries exceeded
128
128
 
129
129
  # Celery task settings
130
130
  max_retries: int | None = None
@@ -360,9 +360,11 @@ class SingletonWorkflowTask(Task):
360
360
  kwargs: dict[str, Any],
361
361
  einfo: Any,
362
362
  ) -> None:
363
- """Lock is retained during retry."""
363
+ """Release lock during retry to allow retry task to acquire it."""
364
+ # Release lock so retry can acquire it via apply_async()
365
+ self.release_lock(task_args=args, task_kwargs=kwargs)
364
366
  logger.warning(
365
- f"Task {self.name} retrying (lock retained)",
367
+ f"Task {self.name} retrying (lock released for retry)",
366
368
  task_id=task_id,
367
369
  retry_count=self.request.retries,
368
370
  )
@@ -321,7 +321,7 @@ def execute_step_task(
321
321
  # Use exponential backoff for unexpected errors
322
322
  countdown = _calculate_exponential_backoff(self.request.retries)
323
323
  logger.warning(
324
- f"Step failed (unexpected): {step_name}, retrying in {countdown:.1f}s...",
324
+ f"Step failed (unexpected): {step_name}, retrying in {countdown:.1f}s...: {str(e)}",
325
325
  run_id=run_id,
326
326
  step_id=step_id,
327
327
  error=str(e),
@@ -1704,6 +1704,7 @@ async def _start_workflow_on_worker(
1704
1704
  def resume_workflow_task(
1705
1705
  run_id: str,
1706
1706
  storage_config: dict[str, Any] | None = None,
1707
+ triggered_by_hook_id: str | None = None,
1707
1708
  ) -> Any | None:
1708
1709
  """
1709
1710
  Resume a suspended workflow.
@@ -1714,6 +1715,9 @@ def resume_workflow_task(
1714
1715
  Args:
1715
1716
  run_id: Workflow run ID to resume
1716
1717
  storage_config: Storage backend configuration
1718
+ triggered_by_hook_id: Optional hook ID that triggered this resume.
1719
+ Used to prevent spurious resumes when a workflow
1720
+ has already moved past the triggering hook.
1717
1721
 
1718
1722
  Returns:
1719
1723
  Workflow result if completed, None if suspended again
@@ -1727,13 +1731,18 @@ def resume_workflow_task(
1727
1731
  f"RESUME_WORKFLOW_TASK ENTRY: {run_id}",
1728
1732
  run_id=run_id,
1729
1733
  celery_task_id=resume_workflow_task.request.id,
1734
+ triggered_by_hook_id=triggered_by_hook_id,
1730
1735
  )
1731
1736
 
1732
1737
  # Get storage backend
1733
1738
  storage = _get_storage_backend(storage_config)
1734
1739
 
1735
1740
  # Resume workflow directly on worker
1736
- result = run_async(_resume_workflow_on_worker(run_id, storage, storage_config))
1741
+ result = run_async(
1742
+ _resume_workflow_on_worker(
1743
+ run_id, storage, storage_config, triggered_by_hook_id=triggered_by_hook_id
1744
+ )
1745
+ )
1737
1746
 
1738
1747
  if result is not None:
1739
1748
  logger.info(f"Workflow completed on worker: {run_id}")
@@ -1940,15 +1949,81 @@ async def _complete_pending_sleeps(
1940
1949
  return updated_events
1941
1950
 
1942
1951
 
1952
+ def _is_hook_still_relevant(hook_id: str, events: list[Any]) -> bool:
1953
+ """
1954
+ Check if a hook is still relevant for resuming the workflow.
1955
+
1956
+ A hook is "still relevant" if there are no newer hooks created after
1957
+ this hook was received. This prevents spurious resumes when:
1958
+ 1. resume_hook() is called multiple times for the same hook
1959
+ 2. The workflow moved past the first resume and created a new hook
1960
+ 3. The duplicate resume task runs but the workflow is now waiting on a different hook
1961
+
1962
+ Args:
1963
+ hook_id: The hook ID that triggered the resume
1964
+ events: List of workflow events
1965
+
1966
+ Returns:
1967
+ True if the hook is still relevant, False if workflow has moved past it
1968
+ """
1969
+ from pyworkflow.engine.events import EventType
1970
+
1971
+ # Sort events by sequence to process in order
1972
+ sorted_events = sorted(events, key=lambda e: e.sequence or 0)
1973
+
1974
+ # Find the sequence number of HOOK_RECEIVED for this hook
1975
+ hook_received_sequence = None
1976
+ for event in sorted_events:
1977
+ if event.type == EventType.HOOK_RECEIVED and event.data.get("hook_id") == hook_id:
1978
+ hook_received_sequence = event.sequence
1979
+ break
1980
+
1981
+ if hook_received_sequence is None:
1982
+ # Hook was never received - shouldn't happen, but allow resume
1983
+ logger.warning(
1984
+ f"Hook {hook_id} was not found in HOOK_RECEIVED events, allowing resume",
1985
+ hook_id=hook_id,
1986
+ )
1987
+ return True
1988
+
1989
+ # Check if there's a HOOK_CREATED event after this hook was received
1990
+ # (indicating the workflow has moved past this hook and created a new one)
1991
+ for event in sorted_events:
1992
+ if event.type == EventType.HOOK_CREATED:
1993
+ event_sequence = event.sequence or 0
1994
+ if event_sequence > hook_received_sequence:
1995
+ # There's a newer hook - this resume is stale
1996
+ newer_hook_id = event.data.get("hook_id")
1997
+ logger.debug(
1998
+ f"Found newer hook {newer_hook_id} (seq {event_sequence}) "
1999
+ f"after triggered hook {hook_id} (received at seq {hook_received_sequence})",
2000
+ hook_id=hook_id,
2001
+ newer_hook_id=newer_hook_id,
2002
+ )
2003
+ return False
2004
+
2005
+ # No newer hooks created - this resume is still relevant
2006
+ return True
2007
+
2008
+
1943
2009
  async def _resume_workflow_on_worker(
1944
2010
  run_id: str,
1945
2011
  storage: StorageBackend,
1946
2012
  storage_config: dict[str, Any] | None = None,
2013
+ triggered_by_hook_id: str | None = None,
1947
2014
  ) -> Any | None:
1948
2015
  """
1949
2016
  Internal function to resume workflow on Celery worker.
1950
2017
 
1951
2018
  This mirrors the logic from testing.py but runs on workers.
2019
+
2020
+ Args:
2021
+ run_id: Workflow run ID to resume
2022
+ storage: Storage backend
2023
+ storage_config: Storage configuration for task dispatch
2024
+ triggered_by_hook_id: Optional hook ID that triggered this resume.
2025
+ If provided, we verify the hook is still relevant
2026
+ before resuming to prevent spurious resumes.
1952
2027
  """
1953
2028
  from pyworkflow.core.exceptions import WorkflowNotFoundError
1954
2029
 
@@ -1983,6 +2058,22 @@ async def _resume_workflow_on_worker(
1983
2058
  )
1984
2059
  return None
1985
2060
 
2061
+ # If this resume was triggered by a specific hook, verify the hook is still relevant.
2062
+ # A hook is "stale" if the workflow has already moved past it (created a newer hook).
2063
+ # This prevents spurious resumes from duplicate resume_hook() calls.
2064
+ if triggered_by_hook_id:
2065
+ events = await storage.get_events(run_id)
2066
+ hook_still_relevant = _is_hook_still_relevant(triggered_by_hook_id, events)
2067
+ if not hook_still_relevant:
2068
+ logger.info(
2069
+ f"Hook {triggered_by_hook_id} is no longer relevant (workflow moved past it), "
2070
+ "skipping spurious resume",
2071
+ run_id=run_id,
2072
+ workflow_name=run.workflow_name,
2073
+ triggered_by_hook_id=triggered_by_hook_id,
2074
+ )
2075
+ return None
2076
+
1986
2077
  # Check for cancellation flag
1987
2078
  cancellation_requested = await storage.check_cancellation_flag(run_id)
1988
2079
 
@@ -114,6 +114,31 @@ class LocalContext(WorkflowContext):
114
114
  self._replay_events(event_log)
115
115
  self._is_replaying = False
116
116
 
117
+ def _extract_counter_from_id(self, id_string: str) -> int:
118
+ """Extract counter value from hook_id or sleep_id.
119
+
120
+ Formats:
121
+ - hook_{name}_{counter}
122
+ - sleep_{counter}_{duration}s
123
+
124
+ Args:
125
+ id_string: The hook_id or sleep_id string
126
+
127
+ Returns:
128
+ The counter value, or 0 if parsing fails
129
+ """
130
+ try:
131
+ parts = id_string.split("_")
132
+ if id_string.startswith("hook_"):
133
+ # hook_{name}_{counter} - counter is last part
134
+ return int(parts[-1])
135
+ elif id_string.startswith("sleep_"):
136
+ # sleep_{counter}_{duration}s - counter is second part
137
+ return int(parts[1])
138
+ except (ValueError, IndexError):
139
+ pass
140
+ return 0
141
+
117
142
  def _replay_events(self, events: list[Any]) -> None:
118
143
  """Replay events to restore state."""
119
144
  from pyworkflow.engine.events import EventType
@@ -142,6 +167,12 @@ class LocalContext(WorkflowContext):
142
167
  payload = deserialize(event.data.get("payload"))
143
168
  self._hook_results[hook_id] = payload
144
169
 
170
+ elif event.type == EventType.HOOK_CREATED:
171
+ # Track pending hooks for re-suspension
172
+ hook_id = event.data.get("hook_id")
173
+ if hook_id:
174
+ self._pending_hooks[hook_id] = event.data
175
+
145
176
  elif event.type == EventType.STEP_RETRYING:
146
177
  step_id = event.data.get("step_id")
147
178
  self._retry_states[step_id] = {
@@ -893,6 +924,21 @@ class LocalContext(WorkflowContext):
893
924
  logger.debug(f"[replay] Hook {hook_id} already received")
894
925
  return self._hook_results[hook_id]
895
926
 
927
+ # Check if already pending (created but not yet received - replay mode)
928
+ # This prevents duplicate hook creation when workflow resumes
929
+ if hook_id in self._pending_hooks:
930
+ logger.debug(f"[replay] Hook {hook_id} already pending, re-suspending")
931
+ pending_data = self._pending_hooks[hook_id]
932
+ actual_token = pending_data.get("token")
933
+ # Call on_created callback if provided
934
+ if on_created is not None:
935
+ await on_created(actual_token)
936
+ raise SuspensionSignal(
937
+ reason=f"hook:{hook_id}",
938
+ hook_id=hook_id,
939
+ token=actual_token,
940
+ )
941
+
896
942
  # Generate composite token: run_id:hook_id
897
943
  from pyworkflow.primitives.resume_hook import create_hook_token
898
944
 
@@ -185,6 +185,7 @@ async def resume_hook(
185
185
  hook_id=hook_id,
186
186
  status=HookStatus.RECEIVED,
187
187
  payload=serialized_payload,
188
+ run_id=run_id,
188
189
  )
189
190
 
190
191
  # Schedule workflow resumption via configured runtime
@@ -195,7 +196,7 @@ async def resume_hook(
195
196
  runtime = get_runtime(config.default_runtime)
196
197
 
197
198
  try:
198
- await runtime.schedule_resume(run_id, storage)
199
+ await runtime.schedule_resume(run_id, storage, triggered_by_hook_id=hook_id)
199
200
  except Exception as e:
200
201
  logger.warning(
201
202
  f"Failed to schedule workflow resumption: {e}",
@@ -97,6 +97,7 @@ class Runtime(ABC):
97
97
  self,
98
98
  run_id: str,
99
99
  storage: "StorageBackend",
100
+ triggered_by_hook_id: str | None = None,
100
101
  ) -> None:
101
102
  """
102
103
  Schedule a workflow to be resumed immediately.
@@ -109,6 +110,9 @@ class Runtime(ABC):
109
110
  Args:
110
111
  run_id: The run_id of the workflow to resume
111
112
  storage: Storage backend
113
+ triggered_by_hook_id: Optional hook ID that triggered this resume.
114
+ Used by distributed runtimes to prevent
115
+ spurious resumes from duplicate calls.
112
116
  """
113
117
  # Default implementation: no-op
114
118
  # Subclasses override if they support async scheduling
@@ -202,25 +202,36 @@ class CeleryRuntime(Runtime):
202
202
  self,
203
203
  run_id: str,
204
204
  storage: "StorageBackend",
205
+ triggered_by_hook_id: str | None = None,
205
206
  ) -> None:
206
207
  """
207
208
  Schedule immediate workflow resumption via Celery task.
208
209
 
209
210
  This is called by resume_hook() to trigger workflow resumption
210
211
  after a hook event is received.
212
+
213
+ Args:
214
+ run_id: The workflow run ID to resume
215
+ storage: Storage backend for configuration
216
+ triggered_by_hook_id: Optional hook ID that triggered this resume.
217
+ Used to prevent spurious resumes from duplicate calls.
211
218
  """
212
219
  from pyworkflow.celery.tasks import resume_workflow_task
213
220
 
214
221
  logger.info(
215
222
  f"Scheduling workflow resume via Celery: {run_id}",
216
223
  run_id=run_id,
224
+ triggered_by_hook_id=triggered_by_hook_id,
217
225
  )
218
226
 
219
227
  storage_config = self._get_storage_config(storage)
220
228
 
221
229
  resume_workflow_task.apply_async(
222
230
  args=[run_id],
223
- kwargs={"storage_config": storage_config},
231
+ kwargs={
232
+ "storage_config": storage_config,
233
+ "triggered_by_hook_id": triggered_by_hook_id,
234
+ },
224
235
  )
225
236
 
226
237
  logger.info(
@@ -507,16 +507,24 @@ class LocalRuntime(Runtime):
507
507
  self,
508
508
  run_id: str,
509
509
  storage: "StorageBackend",
510
+ triggered_by_hook_id: str | None = None,
510
511
  ) -> None:
511
512
  """
512
513
  Schedule immediate workflow resumption.
513
514
 
514
515
  For local runtime, this directly calls resume_workflow since
515
516
  execution happens in-process.
517
+
518
+ Args:
519
+ run_id: The workflow run ID to resume
520
+ storage: Storage backend
521
+ triggered_by_hook_id: Optional hook ID that triggered this resume.
522
+ Not used in local runtime (no queueing).
516
523
  """
517
524
  logger.info(
518
525
  f"Scheduling immediate workflow resume: {run_id}",
519
526
  run_id=run_id,
527
+ triggered_by_hook_id=triggered_by_hook_id,
520
528
  )
521
529
 
522
530
  try:
@@ -291,12 +291,13 @@ class StorageBackend(ABC):
291
291
  pass
292
292
 
293
293
  @abstractmethod
294
- async def get_hook(self, hook_id: str) -> Hook | None:
294
+ async def get_hook(self, hook_id: str, run_id: str | None = None) -> Hook | None:
295
295
  """
296
296
  Retrieve a hook by ID.
297
297
 
298
298
  Args:
299
299
  hook_id: Hook identifier
300
+ run_id: Run ID (required for composite key lookup in SQL backends)
300
301
 
301
302
  Returns:
302
303
  Hook if found, None otherwise
@@ -322,6 +323,7 @@ class StorageBackend(ABC):
322
323
  hook_id: str,
323
324
  status: HookStatus,
324
325
  payload: str | None = None,
326
+ run_id: str | None = None,
325
327
  ) -> None:
326
328
  """
327
329
  Update hook status and optionally payload.
@@ -330,6 +332,7 @@ class StorageBackend(ABC):
330
332
  hook_id: Hook identifier
331
333
  status: New status
332
334
  payload: JSON serialized payload (if received)
335
+ run_id: Run ID (required for composite key lookup in SQL backends)
333
336
  """
334
337
  pass
335
338
 
@@ -1072,29 +1072,31 @@ class CassandraStorageBackend(StorageBackend):
1072
1072
 
1073
1073
  session.execute(batch)
1074
1074
 
1075
- async def get_hook(self, hook_id: str) -> Hook | None:
1076
- """Retrieve a hook by ID."""
1075
+ async def get_hook(self, hook_id: str, run_id: str | None = None) -> Hook | None:
1076
+ """Retrieve a hook by ID (run_id allows skipping lookup table)."""
1077
1077
  session = self._ensure_connected()
1078
1078
 
1079
- # First lookup run_id
1080
- lookup = session.execute(
1081
- SimpleStatement(
1082
- "SELECT run_id FROM hooks_by_id WHERE hook_id = %s",
1083
- consistency_level=self.read_consistency,
1084
- ),
1085
- (hook_id,),
1086
- ).one()
1079
+ if not run_id:
1080
+ # First lookup run_id from lookup table
1081
+ lookup = session.execute(
1082
+ SimpleStatement(
1083
+ "SELECT run_id FROM hooks_by_id WHERE hook_id = %s",
1084
+ consistency_level=self.read_consistency,
1085
+ ),
1086
+ (hook_id,),
1087
+ ).one()
1087
1088
 
1088
- if not lookup:
1089
- return None
1089
+ if not lookup:
1090
+ return None
1091
+ run_id = lookup.run_id
1090
1092
 
1091
- # Then get full hook
1093
+ # Get full hook
1092
1094
  row = session.execute(
1093
1095
  SimpleStatement(
1094
1096
  "SELECT * FROM hooks WHERE run_id = %s AND hook_id = %s",
1095
1097
  consistency_level=self.read_consistency,
1096
1098
  ),
1097
- (lookup.run_id, hook_id),
1099
+ (run_id, hook_id),
1098
1100
  ).one()
1099
1101
 
1100
1102
  if not row:
@@ -1137,21 +1139,24 @@ class CassandraStorageBackend(StorageBackend):
1137
1139
  hook_id: str,
1138
1140
  status: HookStatus,
1139
1141
  payload: str | None = None,
1142
+ run_id: str | None = None,
1140
1143
  ) -> None:
1141
1144
  """Update hook status and optionally payload."""
1142
1145
  session = self._ensure_connected()
1143
1146
 
1144
- # First lookup run_id
1145
- lookup = session.execute(
1146
- SimpleStatement(
1147
- "SELECT run_id FROM hooks_by_id WHERE hook_id = %s",
1148
- consistency_level=self.read_consistency,
1149
- ),
1150
- (hook_id,),
1151
- ).one()
1147
+ if not run_id:
1148
+ # First lookup run_id from lookup table
1149
+ lookup = session.execute(
1150
+ SimpleStatement(
1151
+ "SELECT run_id FROM hooks_by_id WHERE hook_id = %s",
1152
+ consistency_level=self.read_consistency,
1153
+ ),
1154
+ (hook_id,),
1155
+ ).one()
1152
1156
 
1153
- if not lookup:
1154
- return
1157
+ if not lookup:
1158
+ return
1159
+ run_id = lookup.run_id
1155
1160
 
1156
1161
  received_at = datetime.now(UTC) if status == HookStatus.RECEIVED else None
1157
1162
 
@@ -1164,7 +1169,7 @@ class CassandraStorageBackend(StorageBackend):
1164
1169
  """,
1165
1170
  consistency_level=self.write_consistency,
1166
1171
  ),
1167
- (status.value, payload, received_at, lookup.run_id, hook_id),
1172
+ (status.value, payload, received_at, run_id, hook_id),
1168
1173
  )
1169
1174
 
1170
1175
  async def list_hooks(
@@ -722,9 +722,9 @@ class DynamoDBStorageBackend(StorageBackend):
722
722
  async def create_hook(self, hook: Hook) -> None:
723
723
  """Create a hook record."""
724
724
  async with self._get_client() as client:
725
- # Main hook item
725
+ # Main hook item (composite key: run_id + hook_id)
726
726
  item = {
727
- "PK": f"HOOK#{hook.hook_id}",
727
+ "PK": f"HOOK#{hook.run_id}#{hook.hook_id}",
728
728
  "SK": "#METADATA",
729
729
  "entity_type": "hook",
730
730
  "hook_id": hook.hook_id,
@@ -741,12 +741,13 @@ class DynamoDBStorageBackend(StorageBackend):
741
741
  "GSI1SK": f"{hook.status.value}#{hook.created_at.isoformat()}",
742
742
  }
743
743
 
744
- # Token lookup item
744
+ # Token lookup item (stores run_id and hook_id for lookup)
745
745
  token_item = {
746
746
  "PK": f"TOKEN#{hook.token}",
747
- "SK": f"HOOK#{hook.hook_id}",
747
+ "SK": f"HOOK#{hook.run_id}#{hook.hook_id}",
748
748
  "entity_type": "hook_token",
749
749
  "hook_id": hook.hook_id,
750
+ "run_id": hook.run_id,
750
751
  }
751
752
 
752
753
  # Write both items
@@ -759,16 +760,26 @@ class DynamoDBStorageBackend(StorageBackend):
759
760
  Item=self._dict_to_item(token_item),
760
761
  )
761
762
 
762
- async def get_hook(self, hook_id: str) -> Hook | None:
763
- """Retrieve a hook by ID."""
763
+ async def get_hook(self, hook_id: str, run_id: str | None = None) -> Hook | None:
764
+ """Retrieve a hook by ID (requires run_id for composite key)."""
764
765
  async with self._get_client() as client:
765
- response = await client.get_item(
766
- TableName=self.table_name,
767
- Key={
768
- "PK": {"S": f"HOOK#{hook_id}"},
769
- "SK": {"S": "#METADATA"},
770
- },
771
- )
766
+ if run_id:
767
+ response = await client.get_item(
768
+ TableName=self.table_name,
769
+ Key={
770
+ "PK": {"S": f"HOOK#{run_id}#{hook_id}"},
771
+ "SK": {"S": "#METADATA"},
772
+ },
773
+ )
774
+ else:
775
+ # Fallback: try old format without run_id
776
+ response = await client.get_item(
777
+ TableName=self.table_name,
778
+ Key={
779
+ "PK": {"S": f"HOOK#{hook_id}"},
780
+ "SK": {"S": "#METADATA"},
781
+ },
782
+ )
772
783
 
773
784
  item = response.get("Item")
774
785
  if not item:
@@ -779,7 +790,7 @@ class DynamoDBStorageBackend(StorageBackend):
779
790
  async def get_hook_by_token(self, token: str) -> Hook | None:
780
791
  """Retrieve a hook by its token."""
781
792
  async with self._get_client() as client:
782
- # First get the hook_id from the token lookup item
793
+ # First get the hook_id and run_id from the token lookup item
783
794
  response = await client.query(
784
795
  TableName=self.table_name,
785
796
  KeyConditionExpression="PK = :pk",
@@ -792,13 +803,16 @@ class DynamoDBStorageBackend(StorageBackend):
792
803
  return None
793
804
 
794
805
  hook_id = self._deserialize_value(items[0]["hook_id"])
795
- return await self.get_hook(hook_id)
806
+ run_id_attr = items[0].get("run_id")
807
+ run_id = self._deserialize_value(run_id_attr) if run_id_attr else None
808
+ return await self.get_hook(hook_id, run_id)
796
809
 
797
810
  async def update_hook_status(
798
811
  self,
799
812
  hook_id: str,
800
813
  status: HookStatus,
801
814
  payload: str | None = None,
815
+ run_id: str | None = None,
802
816
  ) -> None:
803
817
  """Update hook status and optionally payload."""
804
818
  async with self._get_client() as client:
@@ -814,10 +828,12 @@ class DynamoDBStorageBackend(StorageBackend):
814
828
  update_expr += ", received_at = :received_at"
815
829
  expr_values[":received_at"] = {"S": datetime.now(UTC).isoformat()}
816
830
 
831
+ pk = f"HOOK#{run_id}#{hook_id}" if run_id else f"HOOK#{hook_id}"
832
+
817
833
  await client.update_item(
818
834
  TableName=self.table_name,
819
835
  Key={
820
- "PK": {"S": f"HOOK#{hook_id}"},
836
+ "PK": {"S": pk},
821
837
  "SK": {"S": "#METADATA"},
822
838
  },
823
839
  UpdateExpression=update_expr,
@@ -464,7 +464,8 @@ class FileStorageBackend(StorageBackend):
464
464
 
465
465
  async def create_hook(self, hook: Hook) -> None:
466
466
  """Create a hook record."""
467
- hook_file = self.hooks_dir / f"{hook.hook_id}.json"
467
+ # Use composite filename: run_id__hook_id.json (double underscore separator)
468
+ hook_file = self.hooks_dir / f"{hook.run_id}__{hook.hook_id}.json"
468
469
  lock_file = self.locks_dir / "token_index.lock"
469
470
  lock = FileLock(str(lock_file))
470
471
 
@@ -473,16 +474,25 @@ class FileStorageBackend(StorageBackend):
473
474
  def _write() -> None:
474
475
  with lock:
475
476
  hook_file.write_text(json.dumps(data, indent=2))
476
- # Update token index
477
+ # Update token index (stores run_id:hook_id as value)
477
478
  index = self._load_token_index()
478
- index[hook.token] = hook.hook_id
479
+ index[hook.token] = f"{hook.run_id}:{hook.hook_id}"
479
480
  self._save_token_index(index)
480
481
 
481
482
  await asyncio.to_thread(_write)
482
483
 
483
- async def get_hook(self, hook_id: str) -> Hook | None:
484
- """Retrieve a hook by ID."""
485
- hook_file = self.hooks_dir / f"{hook_id}.json"
484
+ async def get_hook(self, hook_id: str, run_id: str | None = None) -> Hook | None:
485
+ """Retrieve a hook by ID (requires run_id for composite filename)."""
486
+ if run_id:
487
+ hook_file = self.hooks_dir / f"{run_id}__{hook_id}.json"
488
+ else:
489
+ # Fallback: try old format for backwards compat
490
+ hook_file = self.hooks_dir / f"{hook_id}.json"
491
+ if not hook_file.exists():
492
+ # Search for any file with this hook_id
493
+ for f in self.hooks_dir.glob(f"*__{hook_id}.json"):
494
+ hook_file = f
495
+ break
486
496
 
487
497
  if not hook_file.exists():
488
498
  return None
@@ -496,13 +506,18 @@ class FileStorageBackend(StorageBackend):
496
506
  async def get_hook_by_token(self, token: str) -> Hook | None:
497
507
  """Retrieve a hook by its token."""
498
508
 
499
- def _lookup() -> str | None:
509
+ def _lookup() -> tuple[str, str] | None:
500
510
  index = self._load_token_index()
501
- return index.get(token)
511
+ value = index.get(token)
512
+ if value and ":" in value:
513
+ parts = value.split(":", 1)
514
+ return (parts[0], parts[1])
515
+ return None
502
516
 
503
- hook_id = await asyncio.to_thread(_lookup)
504
- if hook_id:
505
- return await self.get_hook(hook_id)
517
+ result = await asyncio.to_thread(_lookup)
518
+ if result:
519
+ run_id, hook_id = result
520
+ return await self.get_hook(hook_id, run_id)
506
521
  return None
507
522
 
508
523
  async def update_hook_status(
@@ -510,14 +525,25 @@ class FileStorageBackend(StorageBackend):
510
525
  hook_id: str,
511
526
  status: HookStatus,
512
527
  payload: str | None = None,
528
+ run_id: str | None = None,
513
529
  ) -> None:
514
530
  """Update hook status and optionally payload."""
515
- hook_file = self.hooks_dir / f"{hook_id}.json"
531
+ if run_id:
532
+ hook_file = self.hooks_dir / f"{run_id}__{hook_id}.json"
533
+ else:
534
+ # Fallback: try old format
535
+ hook_file = self.hooks_dir / f"{hook_id}.json"
536
+ if not hook_file.exists():
537
+ # Search for any file with this hook_id
538
+ for f in self.hooks_dir.glob(f"*__{hook_id}.json"):
539
+ hook_file = f
540
+ break
516
541
 
517
542
  if not hook_file.exists():
518
543
  raise ValueError(f"Hook {hook_id} not found")
519
544
 
520
- lock_file = self.locks_dir / f"hook_{hook_id}.lock"
545
+ safe_hook_id = hook_id.replace("/", "_").replace(":", "_")
546
+ lock_file = self.locks_dir / f"hook_{safe_hook_id}.lock"
521
547
  lock = FileLock(str(lock_file))
522
548
 
523
549
  def _update() -> None:
@@ -43,10 +43,10 @@ class InMemoryStorageBackend(StorageBackend):
43
43
  self._runs: dict[str, WorkflowRun] = {}
44
44
  self._events: dict[str, list[Event]] = {}
45
45
  self._steps: dict[str, StepExecution] = {}
46
- self._hooks: dict[str, Hook] = {}
46
+ self._hooks: dict[tuple[str, str], Hook] = {} # (run_id, hook_id) -> Hook
47
47
  self._schedules: dict[str, Schedule] = {}
48
48
  self._idempotency_index: dict[str, str] = {} # key -> run_id
49
- self._token_index: dict[str, str] = {} # token -> hook_id
49
+ self._token_index: dict[str, tuple[str, str]] = {} # token -> (run_id, hook_id)
50
50
  self._cancellation_flags: dict[str, bool] = {} # run_id -> cancelled
51
51
  self._lock = threading.RLock()
52
52
  self._event_sequences: dict[str, int] = {} # run_id -> next sequence
@@ -292,20 +292,28 @@ class InMemoryStorageBackend(StorageBackend):
292
292
  async def create_hook(self, hook: Hook) -> None:
293
293
  """Create a hook record."""
294
294
  with self._lock:
295
- self._hooks[hook.hook_id] = hook
296
- self._token_index[hook.token] = hook.hook_id
295
+ key = (hook.run_id, hook.hook_id)
296
+ self._hooks[key] = hook
297
+ self._token_index[hook.token] = key
297
298
 
298
- async def get_hook(self, hook_id: str) -> Hook | None:
299
- """Retrieve a hook by ID."""
299
+ async def get_hook(self, hook_id: str, run_id: str | None = None) -> Hook | None:
300
+ """Retrieve a hook by ID (requires run_id for composite key lookup)."""
300
301
  with self._lock:
301
- return self._hooks.get(hook_id)
302
+ if run_id:
303
+ return self._hooks.get((run_id, hook_id))
304
+ else:
305
+ # Fallback: find any hook with this ID (may return wrong one if duplicates)
306
+ for (_r_id, h_id), hook in self._hooks.items():
307
+ if h_id == hook_id:
308
+ return hook
309
+ return None
302
310
 
303
311
  async def get_hook_by_token(self, token: str) -> Hook | None:
304
312
  """Retrieve a hook by its token."""
305
313
  with self._lock:
306
- hook_id = self._token_index.get(token)
307
- if hook_id:
308
- return self._hooks.get(hook_id)
314
+ key = self._token_index.get(token)
315
+ if key:
316
+ return self._hooks.get(key)
309
317
  return None
310
318
 
311
319
  async def update_hook_status(
@@ -313,10 +321,19 @@ class InMemoryStorageBackend(StorageBackend):
313
321
  hook_id: str,
314
322
  status: HookStatus,
315
323
  payload: str | None = None,
324
+ run_id: str | None = None,
316
325
  ) -> None:
317
326
  """Update hook status and optionally payload."""
318
327
  with self._lock:
319
- hook = self._hooks.get(hook_id)
328
+ if run_id:
329
+ hook = self._hooks.get((run_id, hook_id))
330
+ else:
331
+ # Fallback: find any hook with this ID
332
+ hook = None
333
+ for (_r_id, h_id), h in self._hooks.items():
334
+ if h_id == hook_id:
335
+ hook = h
336
+ break
320
337
  if hook:
321
338
  hook.status = status
322
339
  if payload is not None:
@@ -175,11 +175,11 @@ class MySQLStorageBackend(StorageBackend):
175
175
  ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci
176
176
  """)
177
177
 
178
- # Hooks table
178
+ # Hooks table (composite PK: run_id + hook_id since hook_id is only unique per run)
179
179
  await cur.execute("""
180
180
  CREATE TABLE IF NOT EXISTS hooks (
181
- hook_id VARCHAR(255) PRIMARY KEY,
182
181
  run_id VARCHAR(255) NOT NULL,
182
+ hook_id VARCHAR(255) NOT NULL,
183
183
  token VARCHAR(255) UNIQUE NOT NULL,
184
184
  created_at DATETIME(6) NOT NULL,
185
185
  received_at DATETIME(6),
@@ -187,6 +187,7 @@ class MySQLStorageBackend(StorageBackend):
187
187
  status VARCHAR(50) NOT NULL,
188
188
  payload LONGTEXT,
189
189
  metadata LONGTEXT DEFAULT '{}',
190
+ PRIMARY KEY (run_id, hook_id),
190
191
  UNIQUE INDEX idx_hooks_token (token),
191
192
  INDEX idx_hooks_run_id (run_id),
192
193
  INDEX idx_hooks_status (status),
@@ -660,12 +661,19 @@ class MySQLStorageBackend(StorageBackend):
660
661
  ),
661
662
  )
662
663
 
663
- async def get_hook(self, hook_id: str) -> Hook | None:
664
- """Retrieve a hook by ID."""
664
+ async def get_hook(self, hook_id: str, run_id: str | None = None) -> Hook | None:
665
+ """Retrieve a hook by ID (requires run_id for composite key lookup)."""
665
666
  pool = self._ensure_connected()
666
667
 
667
668
  async with pool.acquire() as conn, conn.cursor(aiomysql.DictCursor) as cur:
668
- await cur.execute("SELECT * FROM hooks WHERE hook_id = %s", (hook_id,))
669
+ if run_id:
670
+ await cur.execute(
671
+ "SELECT * FROM hooks WHERE run_id = %s AND hook_id = %s",
672
+ (run_id, hook_id),
673
+ )
674
+ else:
675
+ # Fallback: find any hook with this ID (may return wrong one if duplicates)
676
+ await cur.execute("SELECT * FROM hooks WHERE hook_id = %s", (hook_id,))
669
677
  row = await cur.fetchone()
670
678
 
671
679
  if not row:
@@ -691,6 +699,7 @@ class MySQLStorageBackend(StorageBackend):
691
699
  hook_id: str,
692
700
  status: HookStatus,
693
701
  payload: str | None = None,
702
+ run_id: str | None = None,
694
703
  ) -> None:
695
704
  """Update hook status and optionally payload."""
696
705
  pool = self._ensure_connected()
@@ -706,13 +715,20 @@ class MySQLStorageBackend(StorageBackend):
706
715
  updates.append("received_at = %s")
707
716
  params.append(datetime.now(UTC))
708
717
 
709
- params.append(hook_id)
710
-
711
718
  async with pool.acquire() as conn, conn.cursor() as cur:
712
- await cur.execute(
713
- f"UPDATE hooks SET {', '.join(updates)} WHERE hook_id = %s",
714
- tuple(params),
715
- )
719
+ if run_id:
720
+ params.append(run_id)
721
+ params.append(hook_id)
722
+ await cur.execute(
723
+ f"UPDATE hooks SET {', '.join(updates)} WHERE run_id = %s AND hook_id = %s",
724
+ tuple(params),
725
+ )
726
+ else:
727
+ params.append(hook_id)
728
+ await cur.execute(
729
+ f"UPDATE hooks SET {', '.join(updates)} WHERE hook_id = %s",
730
+ tuple(params),
731
+ )
716
732
 
717
733
  async def list_hooks(
718
734
  self,
@@ -216,18 +216,19 @@ class PostgresStorageBackend(StorageBackend):
216
216
  # Indexes for steps
217
217
  await conn.execute("CREATE INDEX IF NOT EXISTS idx_steps_run_id ON steps(run_id)")
218
218
 
219
- # Hooks table
219
+ # Hooks table (composite PK: run_id + hook_id since hook_id is only unique per run)
220
220
  await conn.execute("""
221
221
  CREATE TABLE IF NOT EXISTS hooks (
222
- hook_id TEXT PRIMARY KEY,
223
222
  run_id TEXT NOT NULL REFERENCES workflow_runs(run_id) ON DELETE CASCADE,
223
+ hook_id TEXT NOT NULL,
224
224
  token TEXT UNIQUE NOT NULL,
225
225
  created_at TIMESTAMPTZ NOT NULL,
226
226
  received_at TIMESTAMPTZ,
227
227
  expires_at TIMESTAMPTZ,
228
228
  status TEXT NOT NULL,
229
229
  payload TEXT,
230
- metadata TEXT DEFAULT '{}'
230
+ metadata TEXT DEFAULT '{}',
231
+ PRIMARY KEY (run_id, hook_id)
231
232
  )
232
233
  """)
233
234
 
@@ -751,12 +752,20 @@ class PostgresStorageBackend(StorageBackend):
751
752
  json.dumps(hook.metadata),
752
753
  )
753
754
 
754
- async def get_hook(self, hook_id: str) -> Hook | None:
755
- """Retrieve a hook by ID."""
755
+ async def get_hook(self, hook_id: str, run_id: str | None = None) -> Hook | None:
756
+ """Retrieve a hook by ID (requires run_id for composite key lookup)."""
756
757
  pool = await self._get_pool()
757
758
 
758
759
  async with pool.acquire() as conn:
759
- row = await conn.fetchrow("SELECT * FROM hooks WHERE hook_id = $1", hook_id)
760
+ if run_id:
761
+ row = await conn.fetchrow(
762
+ "SELECT * FROM hooks WHERE run_id = $1 AND hook_id = $2",
763
+ run_id,
764
+ hook_id,
765
+ )
766
+ else:
767
+ # Fallback: find any hook with this ID (may return wrong one if duplicates)
768
+ row = await conn.fetchrow("SELECT * FROM hooks WHERE hook_id = $1", hook_id)
760
769
 
761
770
  if not row:
762
771
  return None
@@ -780,6 +789,7 @@ class PostgresStorageBackend(StorageBackend):
780
789
  hook_id: str,
781
790
  status: HookStatus,
782
791
  payload: str | None = None,
792
+ run_id: str | None = None,
783
793
  ) -> None:
784
794
  """Update hook status and optionally payload."""
785
795
  pool = await self._get_pool()
@@ -798,13 +808,20 @@ class PostgresStorageBackend(StorageBackend):
798
808
  params.append(datetime.now(UTC))
799
809
  param_idx += 1
800
810
 
801
- params.append(hook_id)
802
-
803
811
  async with pool.acquire() as conn:
804
- await conn.execute(
805
- f"UPDATE hooks SET {', '.join(updates)} WHERE hook_id = ${param_idx}",
806
- *params,
807
- )
812
+ if run_id:
813
+ params.append(run_id)
814
+ params.append(hook_id)
815
+ await conn.execute(
816
+ f"UPDATE hooks SET {', '.join(updates)} WHERE run_id = ${param_idx} AND hook_id = ${param_idx + 1}",
817
+ *params,
818
+ )
819
+ else:
820
+ params.append(hook_id)
821
+ await conn.execute(
822
+ f"UPDATE hooks SET {', '.join(updates)} WHERE hook_id = ${param_idx}",
823
+ *params,
824
+ )
808
825
 
809
826
  async def list_hooks(
810
827
  self,
@@ -164,11 +164,11 @@ class SQLiteStorageBackend(StorageBackend):
164
164
  # Indexes for steps
165
165
  await db.execute("CREATE INDEX IF NOT EXISTS idx_steps_run_id ON steps(run_id)")
166
166
 
167
- # Hooks table
167
+ # Hooks table (composite PK: run_id + hook_id since hook_id is only unique per run)
168
168
  await db.execute("""
169
169
  CREATE TABLE IF NOT EXISTS hooks (
170
- hook_id TEXT PRIMARY KEY,
171
170
  run_id TEXT NOT NULL,
171
+ hook_id TEXT NOT NULL,
172
172
  token TEXT UNIQUE NOT NULL,
173
173
  created_at TIMESTAMP NOT NULL,
174
174
  received_at TIMESTAMP,
@@ -176,6 +176,7 @@ class SQLiteStorageBackend(StorageBackend):
176
176
  status TEXT NOT NULL,
177
177
  payload TEXT,
178
178
  metadata TEXT DEFAULT '{}',
179
+ PRIMARY KEY (run_id, hook_id),
179
180
  FOREIGN KEY (run_id) REFERENCES workflow_runs(run_id) ON DELETE CASCADE
180
181
  )
181
182
  """)
@@ -645,12 +646,20 @@ class SQLiteStorageBackend(StorageBackend):
645
646
  )
646
647
  await db.commit()
647
648
 
648
- async def get_hook(self, hook_id: str) -> Hook | None:
649
- """Retrieve a hook by ID."""
649
+ async def get_hook(self, hook_id: str, run_id: str | None = None) -> Hook | None:
650
+ """Retrieve a hook by ID (requires run_id for composite key lookup)."""
650
651
  db = self._ensure_connected()
651
652
 
652
- async with db.execute("SELECT * FROM hooks WHERE hook_id = ?", (hook_id,)) as cursor:
653
- row = await cursor.fetchone()
653
+ if run_id:
654
+ async with db.execute(
655
+ "SELECT * FROM hooks WHERE run_id = ? AND hook_id = ?",
656
+ (run_id, hook_id),
657
+ ) as cursor:
658
+ row = await cursor.fetchone()
659
+ else:
660
+ # Fallback: find any hook with this ID (may return wrong one if duplicates)
661
+ async with db.execute("SELECT * FROM hooks WHERE hook_id = ?", (hook_id,)) as cursor:
662
+ row = await cursor.fetchone()
654
663
 
655
664
  if not row:
656
665
  return None
@@ -674,6 +683,7 @@ class SQLiteStorageBackend(StorageBackend):
674
683
  hook_id: str,
675
684
  status: HookStatus,
676
685
  payload: str | None = None,
686
+ run_id: str | None = None,
677
687
  ) -> None:
678
688
  """Update hook status and optionally payload."""
679
689
  db = self._ensure_connected()
@@ -689,12 +699,19 @@ class SQLiteStorageBackend(StorageBackend):
689
699
  updates.append("received_at = ?")
690
700
  params.append(datetime.now(UTC).isoformat())
691
701
 
692
- params.append(hook_id)
693
-
694
- await db.execute(
695
- f"UPDATE hooks SET {', '.join(updates)} WHERE hook_id = ?",
696
- tuple(params),
697
- )
702
+ if run_id:
703
+ params.append(run_id)
704
+ params.append(hook_id)
705
+ await db.execute(
706
+ f"UPDATE hooks SET {', '.join(updates)} WHERE run_id = ? AND hook_id = ?",
707
+ tuple(params),
708
+ )
709
+ else:
710
+ params.append(hook_id)
711
+ await db.execute(
712
+ f"UPDATE hooks SET {', '.join(updates)} WHERE hook_id = ?",
713
+ tuple(params),
714
+ )
698
715
  await db.commit()
699
716
 
700
717
  async def list_hooks(
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: pyworkflow-engine
3
- Version: 0.1.13
3
+ Version: 0.1.15
4
4
  Summary: A Python implementation of durable, event-sourced workflows inspired by Vercel Workflow
5
5
  Author: PyWorkflow Contributors
6
6
  License: MIT
@@ -1,4 +1,4 @@
1
- pyworkflow/__init__.py,sha256=C7t5zLS32QNzI2BXNdGpWXwzgNyUVchs6L7dcx0p84c,6281
1
+ pyworkflow/__init__.py,sha256=y7ZDE1MHyoDPMBh_mnmHhmtYxFeZV0aSWeKpUn6JXuA,6281
2
2
  pyworkflow/config.py,sha256=pKwPrpCwBJiDpB-MIjM0U7GW1TFmQFO341pihL5-vTM,14455
3
3
  pyworkflow/discovery.py,sha256=snW3l4nvY3Nc067TGlwtn_qdzTU9ybN7YPr8FbvY8iM,8066
4
4
  pyworkflow/aws/__init__.py,sha256=Ak_xHcR9LTRX-CwcS0XecYmzrXZw4EM3V9aKBBDEmIk,1741
@@ -9,8 +9,8 @@ pyworkflow/celery/__init__.py,sha256=FywVyqnT8AYz9cXkr-wel7_-N7dHFsPNASEPMFESf4Q
9
9
  pyworkflow/celery/app.py,sha256=UwZauZjVzOxMPX3WmPilRi8Emg5_VbMjHjNn7uz7R14,9670
10
10
  pyworkflow/celery/loop.py,sha256=mu8cIfMJYgHAoGCN_DdDoNoXK3QHzHpLmrPCyFDQYIY,3016
11
11
  pyworkflow/celery/scheduler.py,sha256=Ms4rqRpdpMiLM8l4y3DK-Divunj9afYuUaGGoNQe7P4,11288
12
- pyworkflow/celery/singleton.py,sha256=BykAovBVP0XUo2ZbTQzD4fANa6C_lHSqqaWnhfNfQGw,12978
13
- pyworkflow/celery/tasks.py,sha256=uHpOoHvZd72CYxCG4yhjgyT7j12fOoyf2380pJgMACs,82083
12
+ pyworkflow/celery/singleton.py,sha256=J4a5LY5GsSFbO2evkql4Pw7h38tA2rQbR3J2cXkJRZg,13155
13
+ pyworkflow/celery/tasks.py,sha256=BNHZwWTSRc3q8EgAy4tEmXAm6O0vtVLgrG7MrO0ZZXA,86049
14
14
  pyworkflow/cli/__init__.py,sha256=tcbe-fcZmyeEKUy_aEo8bsEF40HsNKOwvyMBZIJZPwc,3844
15
15
  pyworkflow/cli/__main__.py,sha256=LxLLS4FEEPXa5rWpLTtKuivn6Xp9pGia-QKGoxt9SS0,148
16
16
  pyworkflow/cli/commands/__init__.py,sha256=IXvnTgukALckkO8fTlZhVRq80ojSqpnIIgboAg_-yZU,39
@@ -36,7 +36,7 @@ pyworkflow/cli/utils/storage.py,sha256=a5Iu2Xe1_mPgBVYc8B6I63MFfW12ko7wURqcpq3RB
36
36
  pyworkflow/context/__init__.py,sha256=dI5zW1lAFGw68jI2UpKUqyADozDboGNl-RmhEvSTuCI,2150
37
37
  pyworkflow/context/aws.py,sha256=MYxrFsRzCgaZ0YQAyE26UOT_ryxuag5DwiDSodclQIg,7571
38
38
  pyworkflow/context/base.py,sha256=Hlfm5MNHh_BVbRCgEcILmHiqsn81iYFqt0GSLkFGo00,13772
39
- pyworkflow/context/local.py,sha256=H9UTuIWjelP4Nsc16guDf_fSMxVsIyfzbymJclvADhw,37118
39
+ pyworkflow/context/local.py,sha256=eKBF-e_WSkVIqbynVysQy6rH02rXmPts29KtjY41IQI,38853
40
40
  pyworkflow/context/mock.py,sha256=TJzQ3P3_ZHm1lCJZJACIFFvz2ydFxz2cT9eEGOQS5I0,12061
41
41
  pyworkflow/context/step_context.py,sha256=6P2jn1v7MTlYaWCTt6DBq7Nkmxm7nvna4oGpTZJeMbg,8862
42
42
  pyworkflow/core/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -58,37 +58,37 @@ pyworkflow/primitives/child_workflow.py,sha256=_T7PCqiH0tjIm_lpJ6NmfUPWCFx-MjH6t
58
58
  pyworkflow/primitives/continue_as_new.py,sha256=NKcimHsgr5ExkvRvfO28hxgPw_I7Q74Vz9WL8r0PhPc,3329
59
59
  pyworkflow/primitives/define_hook.py,sha256=gNzk7DuObfWG1T9AdHnDnGLHNKjnApiVRlCKPObugfY,4443
60
60
  pyworkflow/primitives/hooks.py,sha256=ws9U81ymsY8M4FFTvJ2X4EMGmIrilb3vCKZ0V_EGZdE,3085
61
- pyworkflow/primitives/resume_hook.py,sha256=q6gb0qsAhOkFRKMs-PkbLSFLnLerx0VGMkPp9CbkXZQ,6192
61
+ pyworkflow/primitives/resume_hook.py,sha256=vwa0znU1DuasStzQVTES-jp9XUzbVq4vimmbpiZH6yg,6245
62
62
  pyworkflow/primitives/schedule.py,sha256=2hVM2Swl9dRx3RHd5nblJLaU8HaSy-NHYue2Cf9TOcU,14961
63
63
  pyworkflow/primitives/shield.py,sha256=MUYakU0euZoYNb6MbFyRfJN8GEXsRFkIbZEo84vRN9c,2924
64
64
  pyworkflow/primitives/sleep.py,sha256=iH1e5CoWY-jZbYNAU3GRW1xR_8EtCuPIcIohzU4jWJo,3097
65
65
  pyworkflow/runtime/__init__.py,sha256=DkwTgFCMRGyyW8NGcW7Nyy9beOg5kO1TXhqhysj1-aY,649
66
- pyworkflow/runtime/base.py,sha256=-X2pct03XuA3o1P6yD5ywTDgegN6_a450gG8MBVeKRE,5190
67
- pyworkflow/runtime/celery.py,sha256=FMxiLiRf1pLWD7itEyd6klrH8PjSUOLTxWd7E8TyOG4,9476
66
+ pyworkflow/runtime/base.py,sha256=ATlPeheYzUMlk_v-9abLyUQf4y4iYB53VpaoQ73pYsc,5465
67
+ pyworkflow/runtime/celery.py,sha256=JadRjmpL1rsp-zNAHq3vL8XdAhKucu1vEsxGvshaLjk,9961
68
68
  pyworkflow/runtime/factory.py,sha256=TRbqWPfyZ0tPFKb0faI9SkBRXxE5AEVTwGW4pS2diM8,2684
69
- pyworkflow/runtime/local.py,sha256=8mhUyMJDseXFsO0XvwJN6QkXd_4tjPEHG1_N_BmJgsE,25868
69
+ pyworkflow/runtime/local.py,sha256=UPOdJsejrWzdz5p1XpTfMgWC0EK85qT7HHBUjmJ4xOQ,26228
70
70
  pyworkflow/scheduler/__init__.py,sha256=lQQo0Cia_ULIg-KPIrqILV30rUIzybxj1k_ZZTQNZyg,222
71
71
  pyworkflow/scheduler/local.py,sha256=CnK4UC6ofD3_AZJUlO9iUAdgAnbMmJvPaL_VucNKs5Q,8154
72
72
  pyworkflow/serialization/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
73
73
  pyworkflow/serialization/decoder.py,sha256=F7Ofuw1Yzo82iSFFXiK2yoW_v2YRbLMpX3CQbKjm0Ls,3860
74
74
  pyworkflow/serialization/encoder.py,sha256=ZBwAxe5Bb4MCfFJePHw7ArJlIbBieSwUgsysGCI2iPU,4108
75
75
  pyworkflow/storage/__init__.py,sha256=LhVjLNZdo4Mi5dEC75hjSPnbQr9jBoIsTOrC8vzTGOM,1924
76
- pyworkflow/storage/base.py,sha256=DxgOB9kr3i1uaitY_E9PzhnNWxaq1U5EvbbSjKyoH8M,16104
77
- pyworkflow/storage/cassandra.py,sha256=Nig0SUlTyxuNgPjOXnVBlzDq3PAGci4jIT1JI0i-GOk,61428
76
+ pyworkflow/storage/base.py,sha256=ZS0jTdBRmT4nhADIPpr_MmOvqQvhse3LSb6CDt5rr-4,16324
77
+ pyworkflow/storage/cassandra.py,sha256=DTJJr4yqAV-941YjQQdoO1-5SsFoqke82sXpBrXPyKg,61741
78
78
  pyworkflow/storage/config.py,sha256=45UMPxRoqgK4ZwE7HIK9ctxE_eoK3eAE_1tRhn3Psd4,12410
79
- pyworkflow/storage/dynamodb.py,sha256=tGNQQqESxhZzOP5NJULCZKcQf9UuSQNL17TJo6R1jlw,53301
80
- pyworkflow/storage/file.py,sha256=lKilavXn_CRiIVL5XeV7tY9lm2vJADH-h9Teg0gA84A,28842
81
- pyworkflow/storage/memory.py,sha256=r2z6LiRw8J2AbO9Qw2wtYjzGfX-VJlRX_RVI2U8c-hs,19753
82
- pyworkflow/storage/mysql.py,sha256=f1aGyAL8fGsLnmHkpEwP4MFSwvYTpQxOBECHKCnetGI,42904
83
- pyworkflow/storage/postgres.py,sha256=KrDVF715YSEFouNVQOG6g-ekNvkTtrNxqWSvJMYueeM,44450
79
+ pyworkflow/storage/dynamodb.py,sha256=8ClKZeP3TURDVJN2SJMAoLWWSXAgqysQXnqUXoRhaos,54159
80
+ pyworkflow/storage/file.py,sha256=g2eO-MOaQKliM97_gTVGPS27GmH5mahi5GBZ99RP8Rs,30133
81
+ pyworkflow/storage/memory.py,sha256=J5ZA2DsXE5tEQDn5dmtJ3mNEWy9okR6sTYZ-xp3slWY,20554
82
+ pyworkflow/storage/mysql.py,sha256=lA2TRBvYlWEq5mQcnfyRivFO1n0kviq_yH3mEzGp8rs,43764
83
+ pyworkflow/storage/postgres.py,sha256=GUW2OjV6CHWi9fUkUPswmGwvaY7c76QxIQ9kRL8BM6o,45351
84
84
  pyworkflow/storage/schemas.py,sha256=o1ntTYNgQQ5YVuXtPCShtENEsndVjdrXclWrkCgkitg,18002
85
- pyworkflow/storage/sqlite.py,sha256=oBzJnnOp2uk0-U7hMTQk9QgJq3RBwXPQfrmYpivjdgE,39529
85
+ pyworkflow/storage/sqlite.py,sha256=qDhFjyFAenwYq6MF_66FFhDaBG7CEr7ni9Uy72X7MvQ,40377
86
86
  pyworkflow/utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
87
87
  pyworkflow/utils/duration.py,sha256=C-itmiSQQlplw7j6XB679hLF9xYGnyCwm7twO88OF8U,3978
88
88
  pyworkflow/utils/schedule.py,sha256=dO_MkGFyfwZpb0LDlW6BGyZzlPuQIA6dc6j9nk9lc4Y,10691
89
- pyworkflow_engine-0.1.13.dist-info/licenses/LICENSE,sha256=Y49RCTZ5ayn_yzBcRxnyIFdcMCyuYm150aty_FIznfY,1080
90
- pyworkflow_engine-0.1.13.dist-info/METADATA,sha256=zqczKyklwOmbUKz9hfYrmhxP_ZteNp49g7RoB8zPaSM,19628
91
- pyworkflow_engine-0.1.13.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
92
- pyworkflow_engine-0.1.13.dist-info/entry_points.txt,sha256=3IGAfuylnS39U0YX0pxnjrj54kB4iT_bNYrmsiDB-dE,51
93
- pyworkflow_engine-0.1.13.dist-info/top_level.txt,sha256=FLTv9pQmLDBXrQdLOhTMIS3njFibliMsQEfumqmdzBE,11
94
- pyworkflow_engine-0.1.13.dist-info/RECORD,,
89
+ pyworkflow_engine-0.1.15.dist-info/licenses/LICENSE,sha256=Y49RCTZ5ayn_yzBcRxnyIFdcMCyuYm150aty_FIznfY,1080
90
+ pyworkflow_engine-0.1.15.dist-info/METADATA,sha256=U7Dxscf56DPD5TOPCvuyeR75XF5CnftWN78i2yboZBU,19628
91
+ pyworkflow_engine-0.1.15.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
92
+ pyworkflow_engine-0.1.15.dist-info/entry_points.txt,sha256=3IGAfuylnS39U0YX0pxnjrj54kB4iT_bNYrmsiDB-dE,51
93
+ pyworkflow_engine-0.1.15.dist-info/top_level.txt,sha256=FLTv9pQmLDBXrQdLOhTMIS3njFibliMsQEfumqmdzBE,11
94
+ pyworkflow_engine-0.1.15.dist-info/RECORD,,