dbos 0.25.0a16__py3-none-any.whl → 0.26.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
dbos/_context.py CHANGED
@@ -93,6 +93,11 @@ class DBOSContext:
93
93
  self.assumed_role: Optional[str] = None
94
94
  self.step_status: Optional[StepStatus] = None
95
95
 
96
+ # A user-specified workflow timeout. Takes priority over a propagated deadline.
97
+ self.workflow_timeout_ms: Optional[int] = None
98
+ # A propagated workflow deadline.
99
+ self.workflow_deadline_epoch_ms: Optional[int] = None
100
+
96
101
  def create_child(self) -> DBOSContext:
97
102
  rv = DBOSContext()
98
103
  rv.logger = self.logger
@@ -195,8 +200,10 @@ class DBOSContext:
195
200
  def end_handler(self, exc_value: Optional[BaseException]) -> None:
196
201
  self._end_span(exc_value)
197
202
 
198
- def get_current_span(self) -> Span:
199
- return self.spans[-1]
203
+ def get_current_span(self) -> Optional[Span]:
204
+ if len(self.spans):
205
+ return self.spans[-1]
206
+ return None
200
207
 
201
208
  def _start_span(self, attributes: TracedAttributes) -> None:
202
209
  attributes["operationUUID"] = (
@@ -358,11 +365,60 @@ class SetWorkflowID:
358
365
  return False # Did not handle
359
366
 
360
367
 
368
+ class SetWorkflowTimeout:
369
+ """
370
+ Set the workflow timeout (in seconds) to be used for the enclosed workflow invocations.
371
+
372
+ Typical Usage
373
+ ```
374
+ with SetWorkflowTimeout(<timeout in seconds>):
375
+ result = workflow_function(...)
376
+ ```
377
+ """
378
+
379
+ def __init__(self, workflow_timeout_sec: Optional[float]) -> None:
380
+ if workflow_timeout_sec and not workflow_timeout_sec > 0:
381
+ raise Exception(
382
+ f"Invalid workflow timeout {workflow_timeout_sec}. Timeouts must be positive."
383
+ )
384
+ self.created_ctx = False
385
+ self.workflow_timeout_ms = (
386
+ int(workflow_timeout_sec * 1000)
387
+ if workflow_timeout_sec is not None
388
+ else None
389
+ )
390
+ self.saved_workflow_timeout: Optional[int] = None
391
+
392
+ def __enter__(self) -> SetWorkflowTimeout:
393
+ # Code to create a basic context
394
+ ctx = get_local_dbos_context()
395
+ if ctx is None:
396
+ self.created_ctx = True
397
+ _set_local_dbos_context(DBOSContext())
398
+ ctx = assert_current_dbos_context()
399
+ self.saved_workflow_timeout = ctx.workflow_timeout_ms
400
+ ctx.workflow_timeout_ms = self.workflow_timeout_ms
401
+ return self
402
+
403
+ def __exit__(
404
+ self,
405
+ exc_type: Optional[Type[BaseException]],
406
+ exc_value: Optional[BaseException],
407
+ traceback: Optional[TracebackType],
408
+ ) -> Literal[False]:
409
+ assert_current_dbos_context().workflow_timeout_ms = self.saved_workflow_timeout
410
+ # Code to clean up the basic context if we created it
411
+ if self.created_ctx:
412
+ _clear_local_dbos_context()
413
+ return False # Did not handle
414
+
415
+
361
416
  class EnterDBOSWorkflow(AbstractContextManager[DBOSContext, Literal[False]]):
362
417
  def __init__(self, attributes: TracedAttributes) -> None:
363
418
  self.created_ctx = False
364
419
  self.attributes = attributes
365
420
  self.is_temp_workflow = attributes["name"] == "temp_wf"
421
+ self.saved_workflow_timeout: Optional[int] = None
366
422
 
367
423
  def __enter__(self) -> DBOSContext:
368
424
  # Code to create a basic context
@@ -372,6 +428,10 @@ class EnterDBOSWorkflow(AbstractContextManager[DBOSContext, Literal[False]]):
372
428
  ctx = DBOSContext()
373
429
  _set_local_dbos_context(ctx)
374
430
  assert not ctx.is_within_workflow()
431
+ # Unset the workflow_timeout_ms context var so it is not applied to this
432
+ # workflow's children (instead we propagate the deadline)
433
+ self.saved_workflow_timeout = ctx.workflow_timeout_ms
434
+ ctx.workflow_timeout_ms = None
375
435
  ctx.start_workflow(
376
436
  None, self.attributes, self.is_temp_workflow
377
437
  ) # Will get from the context's next workflow ID
@@ -386,6 +446,10 @@ class EnterDBOSWorkflow(AbstractContextManager[DBOSContext, Literal[False]]):
386
446
  ctx = assert_current_dbos_context()
387
447
  assert ctx.is_within_workflow()
388
448
  ctx.end_workflow(exc_value, self.is_temp_workflow)
449
+ # Restore the saved workflow timeout
450
+ ctx.workflow_timeout_ms = self.saved_workflow_timeout
451
+ # Clear any propagating timeout
452
+ ctx.workflow_deadline_epoch_ms = None
389
453
  # Code to clean up the basic context if we created it
390
454
  if self.created_ctx:
391
455
  _clear_local_dbos_context()
dbos/_core.py CHANGED
@@ -3,6 +3,7 @@ import functools
3
3
  import inspect
4
4
  import json
5
5
  import sys
6
+ import threading
6
7
  import time
7
8
  import traceback
8
9
  from concurrent.futures import Future
@@ -14,11 +15,9 @@ from typing import (
14
15
  Coroutine,
15
16
  Generic,
16
17
  Optional,
17
- Tuple,
18
18
  TypeVar,
19
19
  Union,
20
20
  cast,
21
- overload,
22
21
  )
23
22
 
24
23
  from dbos._outcome import Immediate, NoResult, Outcome, Pending
@@ -52,13 +51,13 @@ from ._error import (
52
51
  DBOSMaxStepRetriesExceeded,
53
52
  DBOSNonExistentWorkflowError,
54
53
  DBOSRecoveryError,
54
+ DBOSUnexpectedStepError,
55
55
  DBOSWorkflowCancelledError,
56
56
  DBOSWorkflowConflictIDError,
57
57
  DBOSWorkflowFunctionNotFoundError,
58
58
  )
59
59
  from ._registrations import (
60
60
  DEFAULT_MAX_RECOVERY_ATTEMPTS,
61
- DBOSFuncInfo,
62
61
  get_config_name,
63
62
  get_dbos_class_name,
64
63
  get_dbos_func_name,
@@ -74,6 +73,7 @@ from ._serialization import WorkflowInputs
74
73
  from ._sys_db import (
75
74
  GetEventWorkflowContext,
76
75
  OperationResultInternal,
76
+ WorkflowStatus,
77
77
  WorkflowStatusInternal,
78
78
  WorkflowStatusString,
79
79
  )
@@ -86,7 +86,6 @@ if TYPE_CHECKING:
86
86
  DBOSRegistry,
87
87
  IsolationLevel,
88
88
  )
89
- from ._workflow_commands import WorkflowStatus
90
89
 
91
90
  from sqlalchemy.exc import DBAPIError, InvalidRequestError
92
91
 
@@ -118,7 +117,7 @@ class WorkflowHandleFuture(Generic[R]):
118
117
  self.dbos._sys_db.record_get_result(self.workflow_id, serialized_r, None)
119
118
  return r
120
119
 
121
- def get_status(self) -> "WorkflowStatus":
120
+ def get_status(self) -> WorkflowStatus:
122
121
  stat = self.dbos.get_workflow_status(self.workflow_id)
123
122
  if stat is None:
124
123
  raise DBOSNonExistentWorkflowError(self.workflow_id)
@@ -145,7 +144,7 @@ class WorkflowHandlePolling(Generic[R]):
145
144
  self.dbos._sys_db.record_get_result(self.workflow_id, serialized_r, None)
146
145
  return r
147
146
 
148
- def get_status(self) -> "WorkflowStatus":
147
+ def get_status(self) -> WorkflowStatus:
149
148
  stat = self.dbos.get_workflow_status(self.workflow_id)
150
149
  if stat is None:
151
150
  raise DBOSNonExistentWorkflowError(self.workflow_id)
@@ -180,7 +179,7 @@ class WorkflowHandleAsyncTask(Generic[R]):
180
179
  )
181
180
  return r
182
181
 
183
- async def get_status(self) -> "WorkflowStatus":
182
+ async def get_status(self) -> WorkflowStatus:
184
183
  stat = await asyncio.to_thread(self.dbos.get_workflow_status, self.workflow_id)
185
184
  if stat is None:
186
185
  raise DBOSNonExistentWorkflowError(self.workflow_id)
@@ -216,7 +215,7 @@ class WorkflowHandleAsyncPolling(Generic[R]):
216
215
  )
217
216
  return r
218
217
 
219
- async def get_status(self) -> "WorkflowStatus":
218
+ async def get_status(self) -> WorkflowStatus:
220
219
  stat = await asyncio.to_thread(self.dbos.get_workflow_status, self.workflow_id)
221
220
  if stat is None:
222
221
  raise DBOSNonExistentWorkflowError(self.workflow_id)
@@ -226,19 +225,30 @@ class WorkflowHandleAsyncPolling(Generic[R]):
226
225
  def _init_workflow(
227
226
  dbos: "DBOS",
228
227
  ctx: DBOSContext,
228
+ *,
229
229
  inputs: WorkflowInputs,
230
230
  wf_name: str,
231
231
  class_name: Optional[str],
232
232
  config_name: Optional[str],
233
- temp_wf_type: Optional[str],
234
- queue: Optional[str] = None,
235
- max_recovery_attempts: int = DEFAULT_MAX_RECOVERY_ATTEMPTS,
233
+ queue: Optional[str],
234
+ workflow_timeout_ms: Optional[int],
235
+ workflow_deadline_epoch_ms: Optional[int],
236
+ max_recovery_attempts: Optional[int],
236
237
  ) -> WorkflowStatusInternal:
237
238
  wfid = (
238
239
  ctx.workflow_id
239
240
  if len(ctx.workflow_id) > 0
240
241
  else ctx.id_assigned_for_next_workflow
241
242
  )
243
+
244
+ # In debug mode, just return the existing status
245
+ if dbos.debug_mode:
246
+ get_status_result = dbos._sys_db.get_workflow_status(wfid)
247
+ if get_status_result is None:
248
+ raise DBOSNonExistentWorkflowError(wfid)
249
+ return get_status_result
250
+
251
+ # Initialize a workflow status object from the context
242
252
  status: WorkflowStatusInternal = {
243
253
  "workflow_uuid": wfid,
244
254
  "status": (
@@ -266,31 +276,47 @@ def _init_workflow(
266
276
  "queue_name": queue,
267
277
  "created_at": None,
268
278
  "updated_at": None,
279
+ "workflow_timeout_ms": workflow_timeout_ms,
280
+ "workflow_deadline_epoch_ms": workflow_deadline_epoch_ms,
269
281
  }
270
282
 
271
283
  # If we have a class name, the first arg is the instance and do not serialize
272
284
  if class_name is not None:
273
285
  inputs = {"args": inputs["args"][1:], "kwargs": inputs["kwargs"]}
274
286
 
275
- wf_status = status["status"]
276
- if dbos.debug_mode:
277
- get_status_result = dbos._sys_db.get_workflow_status(wfid)
278
- if get_status_result is None:
279
- raise DBOSNonExistentWorkflowError(wfid)
280
- wf_status = get_status_result["status"]
281
- else:
282
- # Synchronously record the status and inputs for workflows
283
- # TODO: Make this transactional (and with the queue step below)
284
- wf_status = dbos._sys_db.insert_workflow_status(
285
- status, max_recovery_attempts=max_recovery_attempts
286
- )
287
+ # Synchronously record the status and inputs for workflows
288
+ wf_status, workflow_deadline_epoch_ms = dbos._sys_db.init_workflow(
289
+ status,
290
+ _serialization.serialize_args(inputs),
291
+ max_recovery_attempts=max_recovery_attempts,
292
+ )
287
293
 
288
- # TODO: Modify the inputs if they were changed by `update_workflow_inputs`
289
- dbos._sys_db.update_workflow_inputs(wfid, _serialization.serialize_args(inputs))
294
+ if workflow_deadline_epoch_ms is not None:
295
+ evt = threading.Event()
296
+ dbos.stop_events.append(evt)
297
+
298
+ def timeout_func() -> None:
299
+ try:
300
+ assert workflow_deadline_epoch_ms is not None
301
+ time_to_wait_sec = (
302
+ workflow_deadline_epoch_ms - (time.time() * 1000)
303
+ ) / 1000
304
+ if time_to_wait_sec > 0:
305
+ was_stopped = evt.wait(time_to_wait_sec)
306
+ if was_stopped:
307
+ return
308
+ dbos._sys_db.cancel_workflow(wfid)
309
+ except Exception as e:
310
+ dbos.logger.warning(
311
+ f"Exception in timeout thread for workflow {wfid}: {e}"
312
+ )
290
313
 
291
- if queue is not None and wf_status == WorkflowStatusString.ENQUEUED.value:
292
- dbos._sys_db.enqueue(wfid, queue)
314
+ timeout_thread = threading.Thread(target=timeout_func, daemon=True)
315
+ timeout_thread.start()
316
+ dbos._background_threads.append(timeout_thread)
293
317
 
318
+ ctx.workflow_deadline_epoch_ms = workflow_deadline_epoch_ms
319
+ status["workflow_deadline_epoch_ms"] = workflow_deadline_epoch_ms
294
320
  status["status"] = wf_status
295
321
  return status
296
322
 
@@ -364,7 +390,9 @@ def _execute_workflow_wthread(
364
390
  if isinstance(result, Immediate):
365
391
  return cast(Immediate[R], result)()
366
392
  else:
367
- return asyncio.run(cast(Pending[R], result)())
393
+ return dbos._background_event_loop.submit_coroutine(
394
+ cast(Pending[R], result)()
395
+ )
368
396
  except Exception:
369
397
  dbos.logger.error(
370
398
  f"Exception encountered in asynchronous workflow: {traceback.format_exc()}"
@@ -398,9 +426,7 @@ async def _execute_workflow_async(
398
426
  raise
399
427
 
400
428
 
401
- def execute_workflow_by_id(
402
- dbos: "DBOS", workflow_id: str, startNew: bool = False
403
- ) -> "WorkflowHandle[Any]":
429
+ def execute_workflow_by_id(dbos: "DBOS", workflow_id: str) -> "WorkflowHandle[Any]":
404
430
  status = dbos._sys_db.get_workflow_status(workflow_id)
405
431
  if not status:
406
432
  raise DBOSRecoveryError(workflow_id, "Workflow status not found")
@@ -441,7 +467,7 @@ def execute_workflow_by_id(
441
467
  class_object = dbos._registry.class_info_map[class_name]
442
468
  inputs["args"] = (class_object,) + inputs["args"]
443
469
 
444
- if startNew:
470
+ with SetWorkflowID(workflow_id):
445
471
  return start_workflow(
446
472
  dbos,
447
473
  wf_func,
@@ -450,16 +476,6 @@ def execute_workflow_by_id(
450
476
  *inputs["args"],
451
477
  **inputs["kwargs"],
452
478
  )
453
- else:
454
- with SetWorkflowID(workflow_id):
455
- return start_workflow(
456
- dbos,
457
- wf_func,
458
- status["queue_name"],
459
- True,
460
- *inputs["args"],
461
- **inputs["kwargs"],
462
- )
463
479
 
464
480
 
465
481
  def _get_new_wf() -> tuple[str, DBOSContext]:
@@ -516,6 +532,13 @@ def start_workflow(
516
532
  "kwargs": kwargs,
517
533
  }
518
534
 
535
+ local_ctx = get_local_dbos_context()
536
+ workflow_timeout_ms, workflow_deadline_epoch_ms = _get_timeout_deadline(
537
+ local_ctx, queue_name
538
+ )
539
+ workflow_timeout_ms = (
540
+ local_ctx.workflow_timeout_ms if local_ctx is not None else None
541
+ )
519
542
  new_wf_id, new_wf_ctx = _get_new_wf()
520
543
 
521
544
  ctx = new_wf_ctx
@@ -534,8 +557,9 @@ def start_workflow(
534
557
  wf_name=get_dbos_func_name(func),
535
558
  class_name=get_dbos_class_name(fi, func, args),
536
559
  config_name=get_config_name(fi, func, args),
537
- temp_wf_type=get_temp_workflow_type(func),
538
560
  queue=queue_name,
561
+ workflow_timeout_ms=workflow_timeout_ms,
562
+ workflow_deadline_epoch_ms=workflow_deadline_epoch_ms,
539
563
  max_recovery_attempts=fi.max_recovery_attempts,
540
564
  )
541
565
 
@@ -598,6 +622,10 @@ async def start_workflow_async(
598
622
  "kwargs": kwargs,
599
623
  }
600
624
 
625
+ local_ctx = get_local_dbos_context()
626
+ workflow_timeout_ms, workflow_deadline_epoch_ms = _get_timeout_deadline(
627
+ local_ctx, queue_name
628
+ )
601
629
  new_wf_id, new_wf_ctx = _get_new_wf()
602
630
 
603
631
  ctx = new_wf_ctx
@@ -619,8 +647,9 @@ async def start_workflow_async(
619
647
  wf_name=get_dbos_func_name(func),
620
648
  class_name=get_dbos_class_name(fi, func, args),
621
649
  config_name=get_config_name(fi, func, args),
622
- temp_wf_type=get_temp_workflow_type(func),
623
650
  queue=queue_name,
651
+ workflow_timeout_ms=workflow_timeout_ms,
652
+ workflow_deadline_epoch_ms=workflow_deadline_epoch_ms,
624
653
  max_recovery_attempts=fi.max_recovery_attempts,
625
654
  )
626
655
 
@@ -668,7 +697,7 @@ else:
668
697
  def workflow_wrapper(
669
698
  dbosreg: "DBOSRegistry",
670
699
  func: Callable[P, R],
671
- max_recovery_attempts: int = DEFAULT_MAX_RECOVERY_ATTEMPTS,
700
+ max_recovery_attempts: Optional[int] = DEFAULT_MAX_RECOVERY_ATTEMPTS,
672
701
  ) -> Callable[P, R]:
673
702
  func.__orig_func = func # type: ignore
674
703
 
@@ -695,6 +724,9 @@ def workflow_wrapper(
695
724
  "kwargs": kwargs,
696
725
  }
697
726
  ctx = get_local_dbos_context()
727
+ workflow_timeout_ms, workflow_deadline_epoch_ms = _get_timeout_deadline(
728
+ ctx, queue=None
729
+ )
698
730
  enterWorkflowCtxMgr = (
699
731
  EnterDBOSChildWorkflow if ctx and ctx.is_workflow() else EnterDBOSWorkflow
700
732
  )
@@ -732,7 +764,9 @@ def workflow_wrapper(
732
764
  wf_name=get_dbos_func_name(func),
733
765
  class_name=get_dbos_class_name(fi, func, args),
734
766
  config_name=get_config_name(fi, func, args),
735
- temp_wf_type=get_temp_workflow_type(func),
767
+ queue=None,
768
+ workflow_timeout_ms=workflow_timeout_ms,
769
+ workflow_deadline_epoch_ms=workflow_deadline_epoch_ms,
736
770
  max_recovery_attempts=max_recovery_attempts,
737
771
  )
738
772
 
@@ -780,7 +814,7 @@ def workflow_wrapper(
780
814
 
781
815
 
782
816
  def decorate_workflow(
783
- reg: "DBOSRegistry", max_recovery_attempts: int
817
+ reg: "DBOSRegistry", max_recovery_attempts: Optional[int]
784
818
  ) -> Callable[[Callable[P, R]], Callable[P, R]]:
785
819
  def _workflow_decorator(func: Callable[P, R]) -> Callable[P, R]:
786
820
  wrapped_func = workflow_wrapper(reg, func, max_recovery_attempts)
@@ -794,19 +828,23 @@ def decorate_transaction(
794
828
  dbosreg: "DBOSRegistry", isolation_level: "IsolationLevel" = "SERIALIZABLE"
795
829
  ) -> Callable[[F], F]:
796
830
  def decorator(func: F) -> F:
831
+
832
+ transaction_name = func.__qualname__
833
+
797
834
  def invoke_tx(*args: Any, **kwargs: Any) -> Any:
798
835
  if dbosreg.dbos is None:
799
836
  raise DBOSException(
800
837
  f"Function {func.__name__} invoked before DBOS initialized"
801
838
  )
802
839
 
840
+ dbos = dbosreg.dbos
803
841
  ctx = assert_current_dbos_context()
804
- if dbosreg.is_workflow_cancelled(ctx.workflow_id):
842
+ status = dbos._sys_db.get_workflow_status(ctx.workflow_id)
843
+ if status and status["status"] == WorkflowStatusString.CANCELLED.value:
805
844
  raise DBOSWorkflowCancelledError(
806
845
  f"Workflow {ctx.workflow_id} is cancelled. Aborting transaction {func.__name__}."
807
846
  )
808
847
 
809
- dbos = dbosreg.dbos
810
848
  with dbos._app_db.sessionmaker() as session:
811
849
  attributes: TracedAttributes = {
812
850
  "name": func.__name__,
@@ -822,17 +860,12 @@ def decorate_transaction(
822
860
  "txn_snapshot": "", # TODO: add actual snapshot
823
861
  "executor_id": None,
824
862
  "txn_id": None,
863
+ "function_name": transaction_name,
825
864
  }
826
865
  retry_wait_seconds = 0.001
827
866
  backoff_factor = 1.5
828
867
  max_retry_wait_seconds = 2.0
829
868
  while True:
830
-
831
- if dbosreg.is_workflow_cancelled(ctx.workflow_id):
832
- raise DBOSWorkflowCancelledError(
833
- f"Workflow {ctx.workflow_id} is cancelled. Aborting transaction {func.__name__}."
834
- )
835
-
836
869
  has_recorded_error = False
837
870
  txn_error: Optional[Exception] = None
838
871
  try:
@@ -849,6 +882,7 @@ def decorate_transaction(
849
882
  session,
850
883
  ctx.workflow_id,
851
884
  ctx.function_id,
885
+ transaction_name,
852
886
  )
853
887
  )
854
888
  if dbos.debug_mode and recorded_output is None:
@@ -892,10 +926,12 @@ def decorate_transaction(
892
926
  except DBAPIError as dbapi_error:
893
927
  if dbapi_error.orig.sqlstate == "40001": # type: ignore
894
928
  # Retry on serialization failure
895
- ctx.get_current_span().add_event(
896
- "Transaction Serialization Failure",
897
- {"retry_wait_seconds": retry_wait_seconds},
898
- )
929
+ span = ctx.get_current_span()
930
+ if span:
931
+ span.add_event(
932
+ "Transaction Serialization Failure",
933
+ {"retry_wait_seconds": retry_wait_seconds},
934
+ )
899
935
  time.sleep(retry_wait_seconds)
900
936
  retry_wait_seconds = min(
901
937
  retry_wait_seconds * backoff_factor,
@@ -910,6 +946,8 @@ def decorate_transaction(
910
946
  )
911
947
  txn_error = invalid_request_error
912
948
  raise
949
+ except DBOSUnexpectedStepError:
950
+ raise
913
951
  except Exception as error:
914
952
  txn_error = error
915
953
  raise
@@ -975,7 +1013,7 @@ def decorate_step(
975
1013
  ) -> Callable[[Callable[P, R]], Callable[P, R]]:
976
1014
  def decorator(func: Callable[P, R]) -> Callable[P, R]:
977
1015
 
978
- stepName = func.__qualname__
1016
+ step_name = func.__qualname__
979
1017
 
980
1018
  def invoke_step(*args: Any, **kwargs: Any) -> Any:
981
1019
  if dbosreg.dbos is None:
@@ -989,13 +1027,6 @@ def decorate_step(
989
1027
  "operationType": OperationType.STEP.value,
990
1028
  }
991
1029
 
992
- # Check if the workflow is cancelled
993
- ctx = assert_current_dbos_context()
994
- if dbosreg.is_workflow_cancelled(ctx.workflow_id):
995
- raise DBOSWorkflowCancelledError(
996
- f"Workflow {ctx.workflow_id} is cancelled. Aborting step {func.__name__}."
997
- )
998
-
999
1030
  attempts = max_attempts if retries_allowed else 1
1000
1031
  max_retry_interval_seconds: float = 3600 # 1 Hour
1001
1032
 
@@ -1004,13 +1035,15 @@ def decorate_step(
1004
1035
  f"Step being automatically retried. (attempt {attempt + 1} of {attempts}). {traceback.format_exc()}"
1005
1036
  )
1006
1037
  ctx = assert_current_dbos_context()
1007
- ctx.get_current_span().add_event(
1008
- f"Step attempt {attempt} failed",
1009
- {
1010
- "error": str(error),
1011
- "retryIntervalSeconds": interval_seconds,
1012
- },
1013
- )
1038
+ span = ctx.get_current_span()
1039
+ if span:
1040
+ span.add_event(
1041
+ f"Step attempt {attempt} failed",
1042
+ {
1043
+ "error": str(error),
1044
+ "retryIntervalSeconds": interval_seconds,
1045
+ },
1046
+ )
1014
1047
  return min(
1015
1048
  interval_seconds * (backoff_rate**attempt),
1016
1049
  max_retry_interval_seconds,
@@ -1021,7 +1054,7 @@ def decorate_step(
1021
1054
  step_output: OperationResultInternal = {
1022
1055
  "workflow_uuid": ctx.workflow_id,
1023
1056
  "function_id": ctx.function_id,
1024
- "function_name": stepName,
1057
+ "function_name": step_name,
1025
1058
  "output": None,
1026
1059
  "error": None,
1027
1060
  }
@@ -1039,7 +1072,7 @@ def decorate_step(
1039
1072
  def check_existing_result() -> Union[NoResult, R]:
1040
1073
  ctx = assert_current_dbos_context()
1041
1074
  recorded_output = dbos._sys_db.check_operation_execution(
1042
- ctx.workflow_id, ctx.function_id
1075
+ ctx.workflow_id, ctx.function_id, step_name
1043
1076
  )
1044
1077
  if dbos.debug_mode and recorded_output is None:
1045
1078
  raise DBOSException("Step output not found in debug mode")
@@ -1227,3 +1260,24 @@ def get_event(
1227
1260
  else:
1228
1261
  # Directly call it outside of a workflow
1229
1262
  return dbos._sys_db.get_event(workflow_id, key, timeout_seconds)
1263
+
1264
+
1265
+ def _get_timeout_deadline(
1266
+ ctx: Optional[DBOSContext], queue: Optional[str]
1267
+ ) -> tuple[Optional[int], Optional[int]]:
1268
+ if ctx is None:
1269
+ return None, None
1270
+ # If a timeout is explicitly specified, use it over any propagated deadline
1271
+ if ctx.workflow_timeout_ms:
1272
+ if queue:
1273
+ # Queued workflows are assigned a deadline on dequeue
1274
+ return ctx.workflow_timeout_ms, None
1275
+ else:
1276
+ # Otherwise, compute the deadline immediately
1277
+ return (
1278
+ ctx.workflow_timeout_ms,
1279
+ int(time.time() * 1000) + ctx.workflow_timeout_ms,
1280
+ )
1281
+ # Otherwise, return the propagated deadline, if any
1282
+ else:
1283
+ return None, ctx.workflow_deadline_epoch_ms