dbos 1.1.0a2__tar.gz → 1.1.0a4__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (106) hide show
  1. {dbos-1.1.0a2 → dbos-1.1.0a4}/PKG-INFO +1 -1
  2. {dbos-1.1.0a2 → dbos-1.1.0a4}/dbos/_context.py +6 -0
  3. {dbos-1.1.0a2 → dbos-1.1.0a4}/dbos/_error.py +14 -0
  4. {dbos-1.1.0a2 → dbos-1.1.0a4}/dbos/_sys_db.py +87 -18
  5. {dbos-1.1.0a2 → dbos-1.1.0a4}/pyproject.toml +1 -1
  6. {dbos-1.1.0a2 → dbos-1.1.0a4}/tests/test_concurrency.py +65 -1
  7. {dbos-1.1.0a2 → dbos-1.1.0a4}/tests/test_dbos.py +10 -4
  8. {dbos-1.1.0a2 → dbos-1.1.0a4}/tests/test_failures.py +6 -0
  9. {dbos-1.1.0a2 → dbos-1.1.0a4}/tests/test_queue.py +116 -11
  10. {dbos-1.1.0a2 → dbos-1.1.0a4}/tests/test_workflow_introspection.py +4 -0
  11. {dbos-1.1.0a2 → dbos-1.1.0a4}/LICENSE +0 -0
  12. {dbos-1.1.0a2 → dbos-1.1.0a4}/README.md +0 -0
  13. {dbos-1.1.0a2 → dbos-1.1.0a4}/dbos/__init__.py +0 -0
  14. {dbos-1.1.0a2 → dbos-1.1.0a4}/dbos/__main__.py +0 -0
  15. {dbos-1.1.0a2 → dbos-1.1.0a4}/dbos/_admin_server.py +0 -0
  16. {dbos-1.1.0a2 → dbos-1.1.0a4}/dbos/_app_db.py +0 -0
  17. {dbos-1.1.0a2 → dbos-1.1.0a4}/dbos/_classproperty.py +0 -0
  18. {dbos-1.1.0a2 → dbos-1.1.0a4}/dbos/_client.py +0 -0
  19. {dbos-1.1.0a2 → dbos-1.1.0a4}/dbos/_conductor/conductor.py +0 -0
  20. {dbos-1.1.0a2 → dbos-1.1.0a4}/dbos/_conductor/protocol.py +0 -0
  21. {dbos-1.1.0a2 → dbos-1.1.0a4}/dbos/_core.py +0 -0
  22. {dbos-1.1.0a2 → dbos-1.1.0a4}/dbos/_croniter.py +0 -0
  23. {dbos-1.1.0a2 → dbos-1.1.0a4}/dbos/_dbos.py +0 -0
  24. {dbos-1.1.0a2 → dbos-1.1.0a4}/dbos/_dbos_config.py +0 -0
  25. {dbos-1.1.0a2 → dbos-1.1.0a4}/dbos/_debug.py +0 -0
  26. {dbos-1.1.0a2 → dbos-1.1.0a4}/dbos/_docker_pg_helper.py +0 -0
  27. {dbos-1.1.0a2 → dbos-1.1.0a4}/dbos/_event_loop.py +0 -0
  28. {dbos-1.1.0a2 → dbos-1.1.0a4}/dbos/_fastapi.py +0 -0
  29. {dbos-1.1.0a2 → dbos-1.1.0a4}/dbos/_flask.py +0 -0
  30. {dbos-1.1.0a2 → dbos-1.1.0a4}/dbos/_kafka.py +0 -0
  31. {dbos-1.1.0a2 → dbos-1.1.0a4}/dbos/_kafka_message.py +0 -0
  32. {dbos-1.1.0a2 → dbos-1.1.0a4}/dbos/_logger.py +0 -0
  33. {dbos-1.1.0a2 → dbos-1.1.0a4}/dbos/_migrations/env.py +0 -0
  34. {dbos-1.1.0a2 → dbos-1.1.0a4}/dbos/_migrations/script.py.mako +0 -0
  35. {dbos-1.1.0a2 → dbos-1.1.0a4}/dbos/_migrations/versions/04ca4f231047_workflow_queues_executor_id.py +0 -0
  36. {dbos-1.1.0a2 → dbos-1.1.0a4}/dbos/_migrations/versions/27ac6900c6ad_add_queue_dedup.py +0 -0
  37. {dbos-1.1.0a2 → dbos-1.1.0a4}/dbos/_migrations/versions/50f3227f0b4b_fix_job_queue.py +0 -0
  38. {dbos-1.1.0a2 → dbos-1.1.0a4}/dbos/_migrations/versions/5c361fc04708_added_system_tables.py +0 -0
  39. {dbos-1.1.0a2 → dbos-1.1.0a4}/dbos/_migrations/versions/83f3732ae8e7_workflow_timeout.py +0 -0
  40. {dbos-1.1.0a2 → dbos-1.1.0a4}/dbos/_migrations/versions/933e86bdac6a_add_queue_priority.py +0 -0
  41. {dbos-1.1.0a2 → dbos-1.1.0a4}/dbos/_migrations/versions/a3b18ad34abe_added_triggers.py +0 -0
  42. {dbos-1.1.0a2 → dbos-1.1.0a4}/dbos/_migrations/versions/d76646551a6b_job_queue_limiter.py +0 -0
  43. {dbos-1.1.0a2 → dbos-1.1.0a4}/dbos/_migrations/versions/d76646551a6c_workflow_queue.py +0 -0
  44. {dbos-1.1.0a2 → dbos-1.1.0a4}/dbos/_migrations/versions/eab0cc1d9a14_job_queue.py +0 -0
  45. {dbos-1.1.0a2 → dbos-1.1.0a4}/dbos/_migrations/versions/f4b9b32ba814_functionname_childid_op_outputs.py +0 -0
  46. {dbos-1.1.0a2 → dbos-1.1.0a4}/dbos/_outcome.py +0 -0
  47. {dbos-1.1.0a2 → dbos-1.1.0a4}/dbos/_queue.py +0 -0
  48. {dbos-1.1.0a2 → dbos-1.1.0a4}/dbos/_recovery.py +0 -0
  49. {dbos-1.1.0a2 → dbos-1.1.0a4}/dbos/_registrations.py +0 -0
  50. {dbos-1.1.0a2 → dbos-1.1.0a4}/dbos/_roles.py +0 -0
  51. {dbos-1.1.0a2 → dbos-1.1.0a4}/dbos/_scheduler.py +0 -0
  52. {dbos-1.1.0a2 → dbos-1.1.0a4}/dbos/_schemas/__init__.py +0 -0
  53. {dbos-1.1.0a2 → dbos-1.1.0a4}/dbos/_schemas/application_database.py +0 -0
  54. {dbos-1.1.0a2 → dbos-1.1.0a4}/dbos/_schemas/system_database.py +0 -0
  55. {dbos-1.1.0a2 → dbos-1.1.0a4}/dbos/_serialization.py +0 -0
  56. {dbos-1.1.0a2 → dbos-1.1.0a4}/dbos/_templates/dbos-db-starter/README.md +0 -0
  57. {dbos-1.1.0a2 → dbos-1.1.0a4}/dbos/_templates/dbos-db-starter/__package/__init__.py +0 -0
  58. {dbos-1.1.0a2 → dbos-1.1.0a4}/dbos/_templates/dbos-db-starter/__package/main.py.dbos +0 -0
  59. {dbos-1.1.0a2 → dbos-1.1.0a4}/dbos/_templates/dbos-db-starter/__package/schema.py +0 -0
  60. {dbos-1.1.0a2 → dbos-1.1.0a4}/dbos/_templates/dbos-db-starter/alembic.ini +0 -0
  61. {dbos-1.1.0a2 → dbos-1.1.0a4}/dbos/_templates/dbos-db-starter/dbos-config.yaml.dbos +0 -0
  62. {dbos-1.1.0a2 → dbos-1.1.0a4}/dbos/_templates/dbos-db-starter/migrations/env.py.dbos +0 -0
  63. {dbos-1.1.0a2 → dbos-1.1.0a4}/dbos/_templates/dbos-db-starter/migrations/script.py.mako +0 -0
  64. {dbos-1.1.0a2 → dbos-1.1.0a4}/dbos/_templates/dbos-db-starter/migrations/versions/2024_07_31_180642_init.py +0 -0
  65. {dbos-1.1.0a2 → dbos-1.1.0a4}/dbos/_templates/dbos-db-starter/start_postgres_docker.py +0 -0
  66. {dbos-1.1.0a2 → dbos-1.1.0a4}/dbos/_tracer.py +0 -0
  67. {dbos-1.1.0a2 → dbos-1.1.0a4}/dbos/_utils.py +0 -0
  68. {dbos-1.1.0a2 → dbos-1.1.0a4}/dbos/_workflow_commands.py +0 -0
  69. {dbos-1.1.0a2 → dbos-1.1.0a4}/dbos/cli/_github_init.py +0 -0
  70. {dbos-1.1.0a2 → dbos-1.1.0a4}/dbos/cli/_template_init.py +0 -0
  71. {dbos-1.1.0a2 → dbos-1.1.0a4}/dbos/cli/cli.py +0 -0
  72. {dbos-1.1.0a2 → dbos-1.1.0a4}/dbos/dbos-config.schema.json +0 -0
  73. {dbos-1.1.0a2 → dbos-1.1.0a4}/dbos/py.typed +0 -0
  74. {dbos-1.1.0a2 → dbos-1.1.0a4}/tests/__init__.py +0 -0
  75. {dbos-1.1.0a2 → dbos-1.1.0a4}/tests/atexit_no_ctor.py +0 -0
  76. {dbos-1.1.0a2 → dbos-1.1.0a4}/tests/atexit_no_launch.py +0 -0
  77. {dbos-1.1.0a2 → dbos-1.1.0a4}/tests/classdefs.py +0 -0
  78. {dbos-1.1.0a2 → dbos-1.1.0a4}/tests/client_collateral.py +0 -0
  79. {dbos-1.1.0a2 → dbos-1.1.0a4}/tests/client_worker.py +0 -0
  80. {dbos-1.1.0a2 → dbos-1.1.0a4}/tests/conftest.py +0 -0
  81. {dbos-1.1.0a2 → dbos-1.1.0a4}/tests/dupname_classdefs1.py +0 -0
  82. {dbos-1.1.0a2 → dbos-1.1.0a4}/tests/dupname_classdefsa.py +0 -0
  83. {dbos-1.1.0a2 → dbos-1.1.0a4}/tests/more_classdefs.py +0 -0
  84. {dbos-1.1.0a2 → dbos-1.1.0a4}/tests/queuedworkflow.py +0 -0
  85. {dbos-1.1.0a2 → dbos-1.1.0a4}/tests/test_admin_server.py +0 -0
  86. {dbos-1.1.0a2 → dbos-1.1.0a4}/tests/test_async.py +0 -0
  87. {dbos-1.1.0a2 → dbos-1.1.0a4}/tests/test_classdecorators.py +0 -0
  88. {dbos-1.1.0a2 → dbos-1.1.0a4}/tests/test_cli.py +0 -0
  89. {dbos-1.1.0a2 → dbos-1.1.0a4}/tests/test_client.py +0 -0
  90. {dbos-1.1.0a2 → dbos-1.1.0a4}/tests/test_config.py +0 -0
  91. {dbos-1.1.0a2 → dbos-1.1.0a4}/tests/test_croniter.py +0 -0
  92. {dbos-1.1.0a2 → dbos-1.1.0a4}/tests/test_debug.py +0 -0
  93. {dbos-1.1.0a2 → dbos-1.1.0a4}/tests/test_docker_secrets.py +0 -0
  94. {dbos-1.1.0a2 → dbos-1.1.0a4}/tests/test_fastapi.py +0 -0
  95. {dbos-1.1.0a2 → dbos-1.1.0a4}/tests/test_fastapi_roles.py +0 -0
  96. {dbos-1.1.0a2 → dbos-1.1.0a4}/tests/test_flask.py +0 -0
  97. {dbos-1.1.0a2 → dbos-1.1.0a4}/tests/test_kafka.py +0 -0
  98. {dbos-1.1.0a2 → dbos-1.1.0a4}/tests/test_outcome.py +0 -0
  99. {dbos-1.1.0a2 → dbos-1.1.0a4}/tests/test_package.py +0 -0
  100. {dbos-1.1.0a2 → dbos-1.1.0a4}/tests/test_scheduler.py +0 -0
  101. {dbos-1.1.0a2 → dbos-1.1.0a4}/tests/test_schema_migration.py +0 -0
  102. {dbos-1.1.0a2 → dbos-1.1.0a4}/tests/test_singleton.py +0 -0
  103. {dbos-1.1.0a2 → dbos-1.1.0a4}/tests/test_spans.py +0 -0
  104. {dbos-1.1.0a2 → dbos-1.1.0a4}/tests/test_sqlalchemy.py +0 -0
  105. {dbos-1.1.0a2 → dbos-1.1.0a4}/tests/test_workflow_management.py +0 -0
  106. {dbos-1.1.0a2 → dbos-1.1.0a4}/version/__init__.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: dbos
3
- Version: 1.1.0a2
3
+ Version: 1.1.0a4
4
4
  Summary: Ultra-lightweight durable execution in Python
5
5
  Author-Email: "DBOS, Inc." <contact@dbos.dev>
6
6
  License: MIT
@@ -392,6 +392,7 @@ class SetWorkflowTimeout:
392
392
  else None
393
393
  )
394
394
  self.saved_workflow_timeout: Optional[int] = None
395
+ self.saved_workflow_deadline_epoch_ms: Optional[int] = None
395
396
 
396
397
  def __enter__(self) -> SetWorkflowTimeout:
397
398
  # Code to create a basic context
@@ -402,6 +403,8 @@ class SetWorkflowTimeout:
402
403
  ctx = assert_current_dbos_context()
403
404
  self.saved_workflow_timeout = ctx.workflow_timeout_ms
404
405
  ctx.workflow_timeout_ms = self.workflow_timeout_ms
406
+ self.saved_workflow_deadline_epoch_ms = ctx.workflow_deadline_epoch_ms
407
+ ctx.workflow_deadline_epoch_ms = None
405
408
  return self
406
409
 
407
410
  def __exit__(
@@ -411,6 +414,9 @@ class SetWorkflowTimeout:
411
414
  traceback: Optional[TracebackType],
412
415
  ) -> Literal[False]:
413
416
  assert_current_dbos_context().workflow_timeout_ms = self.saved_workflow_timeout
417
+ assert_current_dbos_context().workflow_deadline_epoch_ms = (
418
+ self.saved_workflow_deadline_epoch_ms
419
+ )
414
420
  # Code to clean up the basic context if we created it
415
421
  if self.created_ctx:
416
422
  _clear_local_dbos_context()
@@ -62,6 +62,7 @@ class DBOSErrorCode(Enum):
62
62
  WorkflowCancelled = 10
63
63
  UnexpectedStep = 11
64
64
  QueueDeduplicated = 12
65
+ AwaitedWorkflowCancelled = 13
65
66
  ConflictingRegistrationError = 25
66
67
 
67
68
 
@@ -206,6 +207,19 @@ class DBOSQueueDeduplicatedError(DBOSException):
206
207
  )
207
208
 
208
209
 
210
+ class DBOSAwaitedWorkflowCancelledError(DBOSException):
211
+ def __init__(self, workflow_id: str):
212
+ self.workflow_id = workflow_id
213
+ super().__init__(
214
+ f"Awaited workflow {workflow_id} was cancelled",
215
+ dbos_error_code=DBOSErrorCode.AwaitedWorkflowCancelled.value,
216
+ )
217
+
218
+ def __reduce__(self) -> Any:
219
+ # Tell jsonpickle how to reconstruct this object
220
+ return (self.__class__, (self.workflow_id,))
221
+
222
+
209
223
  #######################################
210
224
  ## BaseException
211
225
  #######################################
@@ -32,6 +32,7 @@ from dbos._utils import INTERNAL_QUEUE_NAME
32
32
  from . import _serialization
33
33
  from ._context import get_local_dbos_context
34
34
  from ._error import (
35
+ DBOSAwaitedWorkflowCancelledError,
35
36
  DBOSConflictingWorkflowError,
36
37
  DBOSDeadLetterQueueError,
37
38
  DBOSNonExistentWorkflowError,
@@ -96,6 +97,10 @@ class WorkflowStatus:
96
97
  executor_id: Optional[str]
97
98
  # The application version on which this workflow was started
98
99
  app_version: Optional[str]
100
+ # The start-to-close timeout of the workflow in ms
101
+ workflow_timeout_ms: Optional[int]
102
+ # The deadline of a workflow, computed by adding its timeout to its start time.
103
+ workflow_deadline_epoch_ms: Optional[int]
99
104
 
100
105
  # INTERNAL FIELDS
101
106
 
@@ -222,6 +227,47 @@ class StepInfo(TypedDict):
222
227
  _dbos_null_topic = "__null__topic__"
223
228
 
224
229
 
230
+ class ConditionCount(TypedDict):
231
+ condition: threading.Condition
232
+ count: int
233
+
234
+
235
+ class ThreadSafeConditionDict:
236
+ def __init__(self) -> None:
237
+ self._dict: Dict[str, ConditionCount] = {}
238
+ self._lock = threading.Lock()
239
+
240
+ def get(self, key: str) -> Optional[threading.Condition]:
241
+ with self._lock:
242
+ if key not in self._dict:
243
+ # Key does not exist, return None
244
+ return None
245
+ return self._dict[key]["condition"]
246
+
247
+ def set(
248
+ self, key: str, value: threading.Condition
249
+ ) -> tuple[bool, threading.Condition]:
250
+ with self._lock:
251
+ if key in self._dict:
252
+ # Key already exists, do not overwrite. Increment the wait count.
253
+ cc = self._dict[key]
254
+ cc["count"] += 1
255
+ return False, cc["condition"]
256
+ self._dict[key] = ConditionCount(condition=value, count=1)
257
+ return True, value
258
+
259
+ def pop(self, key: str) -> None:
260
+ with self._lock:
261
+ if key in self._dict:
262
+ cc = self._dict[key]
263
+ cc["count"] -= 1
264
+ if cc["count"] == 0:
265
+ # No more threads waiting on this condition, remove it
266
+ del self._dict[key]
267
+ else:
268
+ dbos_logger.warning(f"Key {key} not found in condition dictionary.")
269
+
270
+
225
271
  class SystemDatabase:
226
272
 
227
273
  def __init__(
@@ -248,8 +294,8 @@ class SystemDatabase:
248
294
  self._engine_kwargs = engine_kwargs
249
295
 
250
296
  self.notification_conn: Optional[psycopg.connection.Connection] = None
251
- self.notifications_map: Dict[str, threading.Condition] = {}
252
- self.workflow_events_map: Dict[str, threading.Condition] = {}
297
+ self.notifications_map = ThreadSafeConditionDict()
298
+ self.workflow_events_map = ThreadSafeConditionDict()
253
299
 
254
300
  # Now we can run background processes
255
301
  self._run_background_processes = True
@@ -720,9 +766,9 @@ class SystemDatabase:
720
766
  error = row[2]
721
767
  raise _serialization.deserialize_exception(error)
722
768
  elif status == WorkflowStatusString.CANCELLED.value:
723
- # Raise a normal exception here, not the cancellation exception
769
+ # Raise AwaitedWorkflowCancelledError here, not the cancellation exception
724
770
  # because the awaiting workflow is not being cancelled.
725
- raise Exception(f"Awaited workflow {workflow_id} was cancelled")
771
+ raise DBOSAwaitedWorkflowCancelledError(workflow_id)
726
772
  else:
727
773
  pass # CB: I guess we're assuming the WF will show up eventually.
728
774
  time.sleep(1)
@@ -796,6 +842,8 @@ class SystemDatabase:
796
842
  SystemSchema.workflow_inputs.c.inputs,
797
843
  SystemSchema.workflow_status.c.output,
798
844
  SystemSchema.workflow_status.c.error,
845
+ SystemSchema.workflow_status.c.workflow_deadline_epoch_ms,
846
+ SystemSchema.workflow_status.c.workflow_timeout_ms,
799
847
  ).join(
800
848
  SystemSchema.workflow_inputs,
801
849
  SystemSchema.workflow_status.c.workflow_uuid
@@ -877,6 +925,8 @@ class SystemDatabase:
877
925
  info.input = inputs
878
926
  info.output = output
879
927
  info.error = exception
928
+ info.workflow_deadline_epoch_ms = row[18]
929
+ info.workflow_timeout_ms = row[19]
880
930
 
881
931
  infos.append(info)
882
932
  return infos
@@ -906,6 +956,8 @@ class SystemDatabase:
906
956
  SystemSchema.workflow_inputs.c.inputs,
907
957
  SystemSchema.workflow_status.c.output,
908
958
  SystemSchema.workflow_status.c.error,
959
+ SystemSchema.workflow_status.c.workflow_deadline_epoch_ms,
960
+ SystemSchema.workflow_status.c.workflow_timeout_ms,
909
961
  ).select_from(
910
962
  SystemSchema.workflow_queue.join(
911
963
  SystemSchema.workflow_status,
@@ -983,6 +1035,8 @@ class SystemDatabase:
983
1035
  info.input = inputs
984
1036
  info.output = output
985
1037
  info.error = exception
1038
+ info.workflow_deadline_epoch_ms = row[18]
1039
+ info.workflow_timeout_ms = row[19]
986
1040
 
987
1041
  infos.append(info)
988
1042
 
@@ -1288,7 +1342,12 @@ class SystemDatabase:
1288
1342
  condition = threading.Condition()
1289
1343
  # Must acquire first before adding to the map. Otherwise, the notification listener may notify it before the condition is acquired and waited.
1290
1344
  condition.acquire()
1291
- self.notifications_map[payload] = condition
1345
+ success, _ = self.notifications_map.set(payload, condition)
1346
+ if not success:
1347
+ # This should not happen, but if it does, it means the workflow is executed concurrently.
1348
+ condition.release()
1349
+ self.notifications_map.pop(payload)
1350
+ raise DBOSWorkflowConflictIDError(workflow_uuid)
1292
1351
 
1293
1352
  # Check if the key is already in the database. If not, wait for the notification.
1294
1353
  init_recv: Sequence[Any]
@@ -1381,11 +1440,11 @@ class SystemDatabase:
1381
1440
  f"Received notification on channel: {channel}, payload: {notify.payload}"
1382
1441
  )
1383
1442
  if channel == "dbos_notifications_channel":
1384
- if (
1385
- notify.payload
1386
- and notify.payload in self.notifications_map
1387
- ):
1388
- condition = self.notifications_map[notify.payload]
1443
+ if notify.payload:
1444
+ condition = self.notifications_map.get(notify.payload)
1445
+ if condition is None:
1446
+ # No condition found for this payload
1447
+ continue
1389
1448
  condition.acquire()
1390
1449
  condition.notify_all()
1391
1450
  condition.release()
@@ -1393,11 +1452,11 @@ class SystemDatabase:
1393
1452
  f"Signaled notifications condition for {notify.payload}"
1394
1453
  )
1395
1454
  elif channel == "dbos_workflow_events_channel":
1396
- if (
1397
- notify.payload
1398
- and notify.payload in self.workflow_events_map
1399
- ):
1400
- condition = self.workflow_events_map[notify.payload]
1455
+ if notify.payload:
1456
+ condition = self.workflow_events_map.get(notify.payload)
1457
+ if condition is None:
1458
+ # No condition found for this payload
1459
+ continue
1401
1460
  condition.acquire()
1402
1461
  condition.notify_all()
1403
1462
  condition.release()
@@ -1535,8 +1594,13 @@ class SystemDatabase:
1535
1594
 
1536
1595
  payload = f"{target_uuid}::{key}"
1537
1596
  condition = threading.Condition()
1538
- self.workflow_events_map[payload] = condition
1539
1597
  condition.acquire()
1598
+ success, existing_condition = self.workflow_events_map.set(payload, condition)
1599
+ if not success:
1600
+ # Wait on the existing condition
1601
+ condition.release()
1602
+ condition = existing_condition
1603
+ condition.acquire()
1540
1604
 
1541
1605
  # Check if the key is already in the database. If not, wait for the notification.
1542
1606
  init_recv: Sequence[Any]
@@ -1776,8 +1840,13 @@ class SystemDatabase:
1776
1840
  # If a timeout is set, set the deadline on dequeue
1777
1841
  workflow_deadline_epoch_ms=sa.case(
1778
1842
  (
1779
- SystemSchema.workflow_status.c.workflow_timeout_ms.isnot(
1780
- None
1843
+ sa.and_(
1844
+ SystemSchema.workflow_status.c.workflow_timeout_ms.isnot(
1845
+ None
1846
+ ),
1847
+ SystemSchema.workflow_status.c.workflow_deadline_epoch_ms.is_(
1848
+ None
1849
+ ),
1781
1850
  ),
1782
1851
  sa.func.extract("epoch", sa.func.now()) * 1000
1783
1852
  + SystemSchema.workflow_status.c.workflow_timeout_ms,
@@ -27,7 +27,7 @@ dependencies = [
27
27
  ]
28
28
  requires-python = ">=3.9"
29
29
  readme = "README.md"
30
- version = "1.1.0a2"
30
+ version = "1.1.0a4"
31
31
 
32
32
  [project.license]
33
33
  text = "MIT"
@@ -2,7 +2,7 @@ import threading
2
2
  import time
3
3
  import uuid
4
4
  from concurrent.futures import Future, ThreadPoolExecutor
5
- from typing import Tuple
5
+ from typing import Tuple, cast
6
6
 
7
7
  from sqlalchemy import text
8
8
 
@@ -108,3 +108,67 @@ def test_concurrent_conflict_uuid(dbos: DBOS) -> None:
108
108
 
109
109
  assert future1.result() == wfuuid
110
110
  assert future2.result() == wfuuid
111
+
112
+
113
+ def test_concurrent_recv(dbos: DBOS) -> None:
114
+ condition = threading.Condition()
115
+ counter = 0
116
+
117
+ @DBOS.workflow()
118
+ def test_workflow(topic: str) -> str:
119
+ nonlocal counter
120
+ condition.acquire()
121
+ counter += 1
122
+ if counter % 2 == 1:
123
+ # Wait for the other one to notify
124
+ condition.wait()
125
+ else:
126
+ # Notify the other one
127
+ condition.notify()
128
+ condition.release()
129
+ m = cast(str, DBOS.recv(topic, 5))
130
+ return m
131
+
132
+ def test_thread(id: str, topic: str) -> str:
133
+ with SetWorkflowID(id):
134
+ return test_workflow(topic)
135
+
136
+ wfuuid = str(uuid.uuid4())
137
+ topic = "test_topic"
138
+ with ThreadPoolExecutor(max_workers=2) as executor:
139
+ future1 = executor.submit(test_thread, wfuuid, topic)
140
+ future2 = executor.submit(test_thread, wfuuid, topic)
141
+
142
+ expected_message = "test message"
143
+ DBOS.send(wfuuid, expected_message, topic)
144
+ # Both should return the same message
145
+ assert future1.result() == future2.result()
146
+ assert future1.result() == expected_message
147
+ # Make sure the notification map is empty
148
+ assert not dbos._sys_db.notifications_map._dict
149
+
150
+
151
+ def test_concurrent_getevent(dbos: DBOS) -> None:
152
+ @DBOS.workflow()
153
+ def test_workflow(event_name: str, value: str) -> str:
154
+ DBOS.set_event(event_name, value)
155
+ return value
156
+
157
+ def test_thread(id: str, event_name: str) -> str:
158
+ return cast(str, DBOS.get_event(id, event_name, 5))
159
+
160
+ wfuuid = str(uuid.uuid4())
161
+ event_name = "test_event"
162
+ with ThreadPoolExecutor(max_workers=2) as executor:
163
+ future1 = executor.submit(test_thread, wfuuid, event_name)
164
+ future2 = executor.submit(test_thread, wfuuid, event_name)
165
+
166
+ expected_message = "test message"
167
+ with SetWorkflowID(wfuuid):
168
+ test_workflow(event_name, expected_message)
169
+
170
+ # Both should return the same message
171
+ assert future1.result() == future2.result()
172
+ assert future1.result() == expected_message
173
+ # Make sure the event map is empty
174
+ assert not dbos._sys_db.workflow_events_map._dict
@@ -24,6 +24,7 @@ from dbos import (
24
24
  # Private API because this is a test
25
25
  from dbos._context import assert_current_dbos_context, get_local_dbos_context
26
26
  from dbos._error import (
27
+ DBOSAwaitedWorkflowCancelledError,
27
28
  DBOSConflictingRegistrationError,
28
29
  DBOSMaxStepRetriesExceeded,
29
30
  DBOSWorkflowCancelledError,
@@ -1507,7 +1508,14 @@ def test_workflow_timeout(dbos: DBOS) -> None:
1507
1508
  with SetWorkflowID(wfid):
1508
1509
  blocked_workflow()
1509
1510
  assert assert_current_dbos_context().workflow_deadline_epoch_ms is None
1511
+ start_time = time.time() * 1000
1510
1512
  handle = DBOS.start_workflow(blocked_workflow)
1513
+ status = handle.get_status()
1514
+ assert status.workflow_timeout_ms == 100
1515
+ assert (
1516
+ status.workflow_deadline_epoch_ms is not None
1517
+ and status.workflow_deadline_epoch_ms > start_time
1518
+ )
1511
1519
  with pytest.raises(DBOSWorkflowCancelledError):
1512
1520
  handle.get_result()
1513
1521
 
@@ -1555,13 +1563,11 @@ def test_workflow_timeout(dbos: DBOS) -> None:
1555
1563
  with pytest.raises(DBOSWorkflowCancelledError):
1556
1564
  parent_workflow()
1557
1565
 
1558
- with pytest.raises(Exception) as exc_info:
1566
+ with pytest.raises(DBOSAwaitedWorkflowCancelledError):
1559
1567
  DBOS.retrieve_workflow(start_child).get_result()
1560
- assert "was cancelled" in str(exc_info.value)
1561
1568
 
1562
- with pytest.raises(Exception) as exc_info:
1569
+ with pytest.raises(DBOSAwaitedWorkflowCancelledError):
1563
1570
  DBOS.retrieve_workflow(direct_child).get_result()
1564
- assert "was cancelled" in str(exc_info.value)
1565
1571
 
1566
1572
  # Verify the context variables are set correctly
1567
1573
  with SetWorkflowTimeout(1.0):
@@ -9,6 +9,7 @@ from sqlalchemy.exc import InvalidRequestError, OperationalError
9
9
 
10
10
  from dbos import DBOS, Queue, SetWorkflowID
11
11
  from dbos._error import (
12
+ DBOSAwaitedWorkflowCancelledError,
12
13
  DBOSDeadLetterQueueError,
13
14
  DBOSMaxStepRetriesExceeded,
14
15
  DBOSNotAuthorizedError,
@@ -461,6 +462,11 @@ def test_error_serialization() -> None:
461
462
  d = deserialize_exception(serialize_exception(e))
462
463
  assert isinstance(d, DBOSQueueDeduplicatedError)
463
464
  assert str(d) == str(e)
465
+ # AwaitedWorkflowCancelledError
466
+ e = DBOSAwaitedWorkflowCancelledError("id")
467
+ d = deserialize_exception(serialize_exception(e))
468
+ assert isinstance(d, DBOSAwaitedWorkflowCancelledError)
469
+ assert str(d) == str(e)
464
470
 
465
471
  # Test safe_deserialize
466
472
  class BadException(Exception):
@@ -26,6 +26,7 @@ from dbos import (
26
26
  )
27
27
  from dbos._context import assert_current_dbos_context
28
28
  from dbos._dbos import WorkflowHandleAsync
29
+ from dbos._error import DBOSAwaitedWorkflowCancelledError, DBOSWorkflowCancelledError
29
30
  from dbos._schemas.system_database import SystemSchema
30
31
  from dbos._sys_db import WorkflowStatusString
31
32
  from dbos._utils import GlobalParams
@@ -853,9 +854,8 @@ def test_cancelling_queued_workflows(dbos: DBOS) -> None:
853
854
 
854
855
  # Complete the blocked workflow
855
856
  blocking_event.set()
856
- with pytest.raises(Exception) as exc_info:
857
+ with pytest.raises(DBOSAwaitedWorkflowCancelledError):
857
858
  blocked_handle.get_result()
858
- assert "was cancelled" in str(exc_info.value)
859
859
 
860
860
  # Verify all queue entries eventually get cleaned up.
861
861
  assert queue_entries_are_cleaned_up(dbos)
@@ -891,9 +891,8 @@ def test_timeout_queue(dbos: DBOS) -> None:
891
891
 
892
892
  # Verify the blocked workflows are cancelled
893
893
  for handle in handles:
894
- with pytest.raises(Exception) as exc_info:
894
+ with pytest.raises(DBOSAwaitedWorkflowCancelledError):
895
895
  handle.get_result()
896
- assert "was cancelled" in str(exc_info.value)
897
896
 
898
897
  # Verify the normal workflow succeeds
899
898
  normal_handle.get_result()
@@ -911,17 +910,14 @@ def test_timeout_queue(dbos: DBOS) -> None:
911
910
 
912
911
  with SetWorkflowTimeout(1.0):
913
912
  handle = queue.enqueue(parent_workflow)
914
- with pytest.raises(Exception) as exc_info:
913
+ with pytest.raises(DBOSAwaitedWorkflowCancelledError):
915
914
  handle.get_result()
916
- assert "was cancelled" in str(exc_info.value)
917
915
 
918
- with pytest.raises(Exception) as exc_info:
916
+ with pytest.raises(DBOSAwaitedWorkflowCancelledError):
919
917
  DBOS.retrieve_workflow(child_id).get_result()
920
- assert "was cancelled" in str(exc_info.value)
921
918
 
922
919
  # Verify if a parent called with a timeout enqueues a blocked child
923
920
  # then exits the deadline propagates and the child is cancelled.
924
- child_id = str(uuid.uuid4())
925
921
  queue = Queue("regular_queue")
926
922
 
927
923
  @DBOS.workflow()
@@ -931,9 +927,41 @@ def test_timeout_queue(dbos: DBOS) -> None:
931
927
 
932
928
  with SetWorkflowTimeout(1.0):
933
929
  child_id = exiting_parent_workflow()
934
- with pytest.raises(Exception) as exc_info:
930
+ with pytest.raises(DBOSAwaitedWorkflowCancelledError):
931
+ DBOS.retrieve_workflow(child_id).get_result()
932
+
933
+ # Verify if a parent called with a timeout enqueues a child that
934
+ # never starts because the queue is blocked, the deadline propagates
935
+ # and both parent and child are cancelled.
936
+ child_id = str(uuid.uuid4())
937
+ queue = Queue("stuck_queue", concurrency=1)
938
+
939
+ start_event = threading.Event()
940
+ blocking_event = threading.Event()
941
+
942
+ @DBOS.workflow()
943
+ def stuck_workflow() -> None:
944
+ start_event.set()
945
+ blocking_event.wait()
946
+
947
+ stuck_handle = queue.enqueue(stuck_workflow)
948
+ start_event.wait()
949
+
950
+ @DBOS.workflow()
951
+ def blocked_parent_workflow() -> None:
952
+ with SetWorkflowID(child_id):
953
+ queue.enqueue(blocking_workflow)
954
+ while True:
955
+ DBOS.sleep(0.1)
956
+
957
+ with SetWorkflowTimeout(1.0):
958
+ handle = DBOS.start_workflow(blocked_parent_workflow)
959
+ with pytest.raises(DBOSWorkflowCancelledError):
960
+ handle.get_result()
961
+ with pytest.raises(DBOSAwaitedWorkflowCancelledError):
935
962
  DBOS.retrieve_workflow(child_id).get_result()
936
- assert "was cancelled" in str(exc_info.value)
963
+ blocking_event.set()
964
+ stuck_handle.get_result()
937
965
 
938
966
  # Verify all queue entries eventually get cleaned up.
939
967
  assert queue_entries_are_cleaned_up(dbos)
@@ -1341,3 +1369,80 @@ def test_worker_concurrency_across_versions(dbos: DBOS, client: DBOSClient) -> N
1341
1369
  # Change the version, verify the other version complets
1342
1370
  GlobalParams.app_version = other_version
1343
1371
  assert other_version_handle.get_result()
1372
+
1373
+
1374
+ def test_timeout_queue_recovery(dbos: DBOS) -> None:
1375
+ queue = Queue("test_queue")
1376
+ evt = threading.Event()
1377
+
1378
+ @DBOS.workflow()
1379
+ def blocking_workflow() -> None:
1380
+ evt.set()
1381
+ while True:
1382
+ DBOS.sleep(0.1)
1383
+
1384
+ timeout = 3.0
1385
+ enqueue_time = time.time()
1386
+ with SetWorkflowTimeout(timeout):
1387
+ original_handle = queue.enqueue(blocking_workflow)
1388
+
1389
+ # Verify the workflow's timeout is properly configured
1390
+ evt.wait()
1391
+ original_status = original_handle.get_status()
1392
+ assert original_status.workflow_timeout_ms == timeout * 1000
1393
+ assert (
1394
+ original_status.workflow_deadline_epoch_ms is not None
1395
+ and original_status.workflow_deadline_epoch_ms > enqueue_time * 1000
1396
+ )
1397
+
1398
+ # Recover the workflow. Verify its deadline remains the same
1399
+ evt.clear()
1400
+ handles = DBOS._recover_pending_workflows()
1401
+ assert len(handles) == 1
1402
+ evt.wait()
1403
+ recovered_handle = handles[0]
1404
+ recovered_status = recovered_handle.get_status()
1405
+ assert recovered_status.workflow_timeout_ms == timeout * 1000
1406
+ assert (
1407
+ recovered_status.workflow_deadline_epoch_ms
1408
+ == original_status.workflow_deadline_epoch_ms
1409
+ )
1410
+
1411
+ with pytest.raises(DBOSAwaitedWorkflowCancelledError):
1412
+ original_handle.get_result()
1413
+
1414
+ with pytest.raises(DBOSAwaitedWorkflowCancelledError):
1415
+ recovered_handle.get_result()
1416
+
1417
+
1418
+ def test_unsetting_timeout(dbos: DBOS) -> None:
1419
+
1420
+ queue = Queue("test_queue")
1421
+
1422
+ @DBOS.workflow()
1423
+ def child() -> str:
1424
+ for _ in range(5):
1425
+ DBOS.sleep(1)
1426
+ return DBOS.workflow_id
1427
+
1428
+ @DBOS.workflow()
1429
+ def parent(child_one: str, child_two: str) -> None:
1430
+ with SetWorkflowID(child_two):
1431
+ with SetWorkflowTimeout(None):
1432
+ queue.enqueue(child)
1433
+
1434
+ with SetWorkflowID(child_one):
1435
+ queue.enqueue(child)
1436
+
1437
+ child_one, child_two = str(uuid.uuid4()), str(uuid.uuid4())
1438
+ with SetWorkflowTimeout(1.0):
1439
+ queue.enqueue(parent, child_one, child_two).get_result()
1440
+
1441
+ # Verify child one, which has a propagated timeout, is cancelled
1442
+ handle: WorkflowHandle[str] = DBOS.retrieve_workflow(child_one)
1443
+ with pytest.raises(DBOSAwaitedWorkflowCancelledError):
1444
+ handle.get_result()
1445
+
1446
+ # Verify child two, which doesn't have a timeout, succeeds
1447
+ handle = DBOS.retrieve_workflow(child_two)
1448
+ assert handle.get_result() == child_two
@@ -41,6 +41,8 @@ def test_list_workflow(dbos: DBOS) -> None:
41
41
  assert output.app_version == GlobalParams.app_version
42
42
  assert output.app_id == ""
43
43
  assert output.recovery_attempts == 1
44
+ assert output.workflow_timeout_ms is None
45
+ assert output.workflow_deadline_epoch_ms is None
44
46
 
45
47
  # Test searching by status
46
48
  outputs = DBOS.list_workflows(status="PENDING")
@@ -222,6 +224,8 @@ def test_queued_workflows(dbos: DBOS) -> None:
222
224
  assert workflow.created_at is not None and workflow.created_at > 0
223
225
  assert workflow.updated_at is not None and workflow.updated_at > 0
224
226
  assert workflow.recovery_attempts == 1
227
+ assert workflow.workflow_timeout_ms is None
228
+ assert workflow.workflow_deadline_epoch_ms is None
225
229
 
226
230
  # Test sort_desc inverts the order
227
231
  workflows = DBOS.list_queued_workflows(sort_desc=True)
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes