dbos 1.1.0a3__tar.gz → 1.2.0a2__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (106) hide show
  1. {dbos-1.1.0a3 → dbos-1.2.0a2}/PKG-INFO +1 -1
  2. {dbos-1.1.0a3 → dbos-1.2.0a2}/dbos/_admin_server.py +24 -4
  3. {dbos-1.1.0a3 → dbos-1.2.0a2}/dbos/_app_db.py +0 -15
  4. {dbos-1.1.0a3 → dbos-1.2.0a2}/dbos/_context.py +6 -0
  5. {dbos-1.1.0a3 → dbos-1.2.0a2}/dbos/_core.py +0 -1
  6. {dbos-1.1.0a3 → dbos-1.2.0a2}/dbos/_dbos.py +22 -0
  7. {dbos-1.1.0a3 → dbos-1.2.0a2}/dbos/_error.py +14 -0
  8. {dbos-1.1.0a3 → dbos-1.2.0a2}/dbos/_event_loop.py +10 -7
  9. {dbos-1.1.0a3 → dbos-1.2.0a2}/dbos/_sys_db.py +22 -16
  10. {dbos-1.1.0a3 → dbos-1.2.0a2}/dbos/_workflow_commands.py +1 -10
  11. {dbos-1.1.0a3 → dbos-1.2.0a2}/pyproject.toml +1 -1
  12. {dbos-1.1.0a3 → dbos-1.2.0a2}/tests/test_admin_server.py +28 -1
  13. {dbos-1.1.0a3 → dbos-1.2.0a2}/tests/test_async.py +42 -1
  14. {dbos-1.1.0a3 → dbos-1.2.0a2}/tests/test_dbos.py +10 -4
  15. {dbos-1.1.0a3 → dbos-1.2.0a2}/tests/test_failures.py +6 -0
  16. {dbos-1.1.0a3 → dbos-1.2.0a2}/tests/test_queue.py +116 -11
  17. {dbos-1.1.0a3 → dbos-1.2.0a2}/tests/test_workflow_introspection.py +4 -0
  18. {dbos-1.1.0a3 → dbos-1.2.0a2}/tests/test_workflow_management.py +0 -11
  19. {dbos-1.1.0a3 → dbos-1.2.0a2}/LICENSE +0 -0
  20. {dbos-1.1.0a3 → dbos-1.2.0a2}/README.md +0 -0
  21. {dbos-1.1.0a3 → dbos-1.2.0a2}/dbos/__init__.py +0 -0
  22. {dbos-1.1.0a3 → dbos-1.2.0a2}/dbos/__main__.py +0 -0
  23. {dbos-1.1.0a3 → dbos-1.2.0a2}/dbos/_classproperty.py +0 -0
  24. {dbos-1.1.0a3 → dbos-1.2.0a2}/dbos/_client.py +0 -0
  25. {dbos-1.1.0a3 → dbos-1.2.0a2}/dbos/_conductor/conductor.py +0 -0
  26. {dbos-1.1.0a3 → dbos-1.2.0a2}/dbos/_conductor/protocol.py +0 -0
  27. {dbos-1.1.0a3 → dbos-1.2.0a2}/dbos/_croniter.py +0 -0
  28. {dbos-1.1.0a3 → dbos-1.2.0a2}/dbos/_dbos_config.py +0 -0
  29. {dbos-1.1.0a3 → dbos-1.2.0a2}/dbos/_debug.py +0 -0
  30. {dbos-1.1.0a3 → dbos-1.2.0a2}/dbos/_docker_pg_helper.py +0 -0
  31. {dbos-1.1.0a3 → dbos-1.2.0a2}/dbos/_fastapi.py +0 -0
  32. {dbos-1.1.0a3 → dbos-1.2.0a2}/dbos/_flask.py +0 -0
  33. {dbos-1.1.0a3 → dbos-1.2.0a2}/dbos/_kafka.py +0 -0
  34. {dbos-1.1.0a3 → dbos-1.2.0a2}/dbos/_kafka_message.py +0 -0
  35. {dbos-1.1.0a3 → dbos-1.2.0a2}/dbos/_logger.py +0 -0
  36. {dbos-1.1.0a3 → dbos-1.2.0a2}/dbos/_migrations/env.py +0 -0
  37. {dbos-1.1.0a3 → dbos-1.2.0a2}/dbos/_migrations/script.py.mako +0 -0
  38. {dbos-1.1.0a3 → dbos-1.2.0a2}/dbos/_migrations/versions/04ca4f231047_workflow_queues_executor_id.py +0 -0
  39. {dbos-1.1.0a3 → dbos-1.2.0a2}/dbos/_migrations/versions/27ac6900c6ad_add_queue_dedup.py +0 -0
  40. {dbos-1.1.0a3 → dbos-1.2.0a2}/dbos/_migrations/versions/50f3227f0b4b_fix_job_queue.py +0 -0
  41. {dbos-1.1.0a3 → dbos-1.2.0a2}/dbos/_migrations/versions/5c361fc04708_added_system_tables.py +0 -0
  42. {dbos-1.1.0a3 → dbos-1.2.0a2}/dbos/_migrations/versions/83f3732ae8e7_workflow_timeout.py +0 -0
  43. {dbos-1.1.0a3 → dbos-1.2.0a2}/dbos/_migrations/versions/933e86bdac6a_add_queue_priority.py +0 -0
  44. {dbos-1.1.0a3 → dbos-1.2.0a2}/dbos/_migrations/versions/a3b18ad34abe_added_triggers.py +0 -0
  45. {dbos-1.1.0a3 → dbos-1.2.0a2}/dbos/_migrations/versions/d76646551a6b_job_queue_limiter.py +0 -0
  46. {dbos-1.1.0a3 → dbos-1.2.0a2}/dbos/_migrations/versions/d76646551a6c_workflow_queue.py +0 -0
  47. {dbos-1.1.0a3 → dbos-1.2.0a2}/dbos/_migrations/versions/eab0cc1d9a14_job_queue.py +0 -0
  48. {dbos-1.1.0a3 → dbos-1.2.0a2}/dbos/_migrations/versions/f4b9b32ba814_functionname_childid_op_outputs.py +0 -0
  49. {dbos-1.1.0a3 → dbos-1.2.0a2}/dbos/_outcome.py +0 -0
  50. {dbos-1.1.0a3 → dbos-1.2.0a2}/dbos/_queue.py +0 -0
  51. {dbos-1.1.0a3 → dbos-1.2.0a2}/dbos/_recovery.py +0 -0
  52. {dbos-1.1.0a3 → dbos-1.2.0a2}/dbos/_registrations.py +0 -0
  53. {dbos-1.1.0a3 → dbos-1.2.0a2}/dbos/_roles.py +0 -0
  54. {dbos-1.1.0a3 → dbos-1.2.0a2}/dbos/_scheduler.py +0 -0
  55. {dbos-1.1.0a3 → dbos-1.2.0a2}/dbos/_schemas/__init__.py +0 -0
  56. {dbos-1.1.0a3 → dbos-1.2.0a2}/dbos/_schemas/application_database.py +0 -0
  57. {dbos-1.1.0a3 → dbos-1.2.0a2}/dbos/_schemas/system_database.py +0 -0
  58. {dbos-1.1.0a3 → dbos-1.2.0a2}/dbos/_serialization.py +0 -0
  59. {dbos-1.1.0a3 → dbos-1.2.0a2}/dbos/_templates/dbos-db-starter/README.md +0 -0
  60. {dbos-1.1.0a3 → dbos-1.2.0a2}/dbos/_templates/dbos-db-starter/__package/__init__.py +0 -0
  61. {dbos-1.1.0a3 → dbos-1.2.0a2}/dbos/_templates/dbos-db-starter/__package/main.py.dbos +0 -0
  62. {dbos-1.1.0a3 → dbos-1.2.0a2}/dbos/_templates/dbos-db-starter/__package/schema.py +0 -0
  63. {dbos-1.1.0a3 → dbos-1.2.0a2}/dbos/_templates/dbos-db-starter/alembic.ini +0 -0
  64. {dbos-1.1.0a3 → dbos-1.2.0a2}/dbos/_templates/dbos-db-starter/dbos-config.yaml.dbos +0 -0
  65. {dbos-1.1.0a3 → dbos-1.2.0a2}/dbos/_templates/dbos-db-starter/migrations/env.py.dbos +0 -0
  66. {dbos-1.1.0a3 → dbos-1.2.0a2}/dbos/_templates/dbos-db-starter/migrations/script.py.mako +0 -0
  67. {dbos-1.1.0a3 → dbos-1.2.0a2}/dbos/_templates/dbos-db-starter/migrations/versions/2024_07_31_180642_init.py +0 -0
  68. {dbos-1.1.0a3 → dbos-1.2.0a2}/dbos/_templates/dbos-db-starter/start_postgres_docker.py +0 -0
  69. {dbos-1.1.0a3 → dbos-1.2.0a2}/dbos/_tracer.py +0 -0
  70. {dbos-1.1.0a3 → dbos-1.2.0a2}/dbos/_utils.py +0 -0
  71. {dbos-1.1.0a3 → dbos-1.2.0a2}/dbos/cli/_github_init.py +0 -0
  72. {dbos-1.1.0a3 → dbos-1.2.0a2}/dbos/cli/_template_init.py +0 -0
  73. {dbos-1.1.0a3 → dbos-1.2.0a2}/dbos/cli/cli.py +0 -0
  74. {dbos-1.1.0a3 → dbos-1.2.0a2}/dbos/dbos-config.schema.json +0 -0
  75. {dbos-1.1.0a3 → dbos-1.2.0a2}/dbos/py.typed +0 -0
  76. {dbos-1.1.0a3 → dbos-1.2.0a2}/tests/__init__.py +0 -0
  77. {dbos-1.1.0a3 → dbos-1.2.0a2}/tests/atexit_no_ctor.py +0 -0
  78. {dbos-1.1.0a3 → dbos-1.2.0a2}/tests/atexit_no_launch.py +0 -0
  79. {dbos-1.1.0a3 → dbos-1.2.0a2}/tests/classdefs.py +0 -0
  80. {dbos-1.1.0a3 → dbos-1.2.0a2}/tests/client_collateral.py +0 -0
  81. {dbos-1.1.0a3 → dbos-1.2.0a2}/tests/client_worker.py +0 -0
  82. {dbos-1.1.0a3 → dbos-1.2.0a2}/tests/conftest.py +0 -0
  83. {dbos-1.1.0a3 → dbos-1.2.0a2}/tests/dupname_classdefs1.py +0 -0
  84. {dbos-1.1.0a3 → dbos-1.2.0a2}/tests/dupname_classdefsa.py +0 -0
  85. {dbos-1.1.0a3 → dbos-1.2.0a2}/tests/more_classdefs.py +0 -0
  86. {dbos-1.1.0a3 → dbos-1.2.0a2}/tests/queuedworkflow.py +0 -0
  87. {dbos-1.1.0a3 → dbos-1.2.0a2}/tests/test_classdecorators.py +0 -0
  88. {dbos-1.1.0a3 → dbos-1.2.0a2}/tests/test_cli.py +0 -0
  89. {dbos-1.1.0a3 → dbos-1.2.0a2}/tests/test_client.py +0 -0
  90. {dbos-1.1.0a3 → dbos-1.2.0a2}/tests/test_concurrency.py +0 -0
  91. {dbos-1.1.0a3 → dbos-1.2.0a2}/tests/test_config.py +0 -0
  92. {dbos-1.1.0a3 → dbos-1.2.0a2}/tests/test_croniter.py +0 -0
  93. {dbos-1.1.0a3 → dbos-1.2.0a2}/tests/test_debug.py +0 -0
  94. {dbos-1.1.0a3 → dbos-1.2.0a2}/tests/test_docker_secrets.py +0 -0
  95. {dbos-1.1.0a3 → dbos-1.2.0a2}/tests/test_fastapi.py +0 -0
  96. {dbos-1.1.0a3 → dbos-1.2.0a2}/tests/test_fastapi_roles.py +0 -0
  97. {dbos-1.1.0a3 → dbos-1.2.0a2}/tests/test_flask.py +0 -0
  98. {dbos-1.1.0a3 → dbos-1.2.0a2}/tests/test_kafka.py +0 -0
  99. {dbos-1.1.0a3 → dbos-1.2.0a2}/tests/test_outcome.py +0 -0
  100. {dbos-1.1.0a3 → dbos-1.2.0a2}/tests/test_package.py +0 -0
  101. {dbos-1.1.0a3 → dbos-1.2.0a2}/tests/test_scheduler.py +0 -0
  102. {dbos-1.1.0a3 → dbos-1.2.0a2}/tests/test_schema_migration.py +0 -0
  103. {dbos-1.1.0a3 → dbos-1.2.0a2}/tests/test_singleton.py +0 -0
  104. {dbos-1.1.0a3 → dbos-1.2.0a2}/tests/test_spans.py +0 -0
  105. {dbos-1.1.0a3 → dbos-1.2.0a2}/tests/test_sqlalchemy.py +0 -0
  106. {dbos-1.1.0a3 → dbos-1.2.0a2}/version/__init__.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: dbos
3
- Version: 1.1.0a3
3
+ Version: 1.2.0a2
4
4
  Summary: Ultra-lightweight durable execution in Python
5
5
  Author-Email: "DBOS, Inc." <contact@dbos.dev>
6
6
  License: MIT
@@ -5,8 +5,9 @@ import re
5
5
  import threading
6
6
  from functools import partial
7
7
  from http.server import BaseHTTPRequestHandler, ThreadingHTTPServer
8
- from typing import TYPE_CHECKING, Any, List, TypedDict
8
+ from typing import TYPE_CHECKING, Any, List, Optional, TypedDict
9
9
 
10
+ from ._context import SetWorkflowID
10
11
  from ._error import DBOSException
11
12
  from ._logger import dbos_logger
12
13
  from ._recovery import recover_pending_workflows
@@ -141,7 +142,11 @@ class AdminRequestHandler(BaseHTTPRequestHandler):
141
142
  try:
142
143
  data = json.loads(post_data.decode("utf-8"))
143
144
  start_step: int = data.get("start_step", 1)
144
- self._handle_fork(workflow_id, start_step)
145
+ new_workflow_id: Optional[str] = data.get("new_workflow_id")
146
+ application_version: Optional[str] = data.get("application_version")
147
+ self._handle_fork(
148
+ workflow_id, start_step, new_workflow_id, application_version
149
+ )
145
150
  except (json.JSONDecodeError, AttributeError) as e:
146
151
  self.send_response(500)
147
152
  self.send_header("Content-Type", "application/json")
@@ -191,9 +196,24 @@ class AdminRequestHandler(BaseHTTPRequestHandler):
191
196
  self.end_headers()
192
197
  self.wfile.write(response_body)
193
198
 
194
- def _handle_fork(self, workflow_id: str, start_step: int) -> None:
199
+ def _handle_fork(
200
+ self,
201
+ workflow_id: str,
202
+ start_step: int,
203
+ new_workflow_id: Optional[str],
204
+ application_version: Optional[str],
205
+ ) -> None:
195
206
  try:
196
- handle = self.dbos.fork_workflow(workflow_id, start_step)
207
+ print(f"Forking workflow {workflow_id} from step {start_step}")
208
+ if new_workflow_id is not None:
209
+ with SetWorkflowID(new_workflow_id):
210
+ handle = self.dbos.fork_workflow(
211
+ workflow_id, start_step, application_version=application_version
212
+ )
213
+ else:
214
+ handle = self.dbos.fork_workflow(
215
+ workflow_id, start_step, application_version=application_version
216
+ )
197
217
  response_body = json.dumps(
198
218
  {
199
219
  "workflow_id": handle.workflow_id,
@@ -216,21 +216,6 @@ class ApplicationDatabase:
216
216
  for row in rows
217
217
  ]
218
218
 
219
- def get_max_function_id(self, workflow_uuid: str) -> Optional[int]:
220
- with self.engine.begin() as conn:
221
- max_function_id_row = conn.execute(
222
- sa.select(
223
- sa.func.max(ApplicationSchema.transaction_outputs.c.function_id)
224
- ).where(
225
- ApplicationSchema.transaction_outputs.c.workflow_uuid
226
- == workflow_uuid
227
- )
228
- ).fetchone()
229
-
230
- max_function_id = max_function_id_row[0] if max_function_id_row else None
231
-
232
- return max_function_id
233
-
234
219
  def clone_workflow_transactions(
235
220
  self, src_workflow_id: str, forked_workflow_id: str, start_step: int
236
221
  ) -> None:
@@ -392,6 +392,7 @@ class SetWorkflowTimeout:
392
392
  else None
393
393
  )
394
394
  self.saved_workflow_timeout: Optional[int] = None
395
+ self.saved_workflow_deadline_epoch_ms: Optional[int] = None
395
396
 
396
397
  def __enter__(self) -> SetWorkflowTimeout:
397
398
  # Code to create a basic context
@@ -402,6 +403,8 @@ class SetWorkflowTimeout:
402
403
  ctx = assert_current_dbos_context()
403
404
  self.saved_workflow_timeout = ctx.workflow_timeout_ms
404
405
  ctx.workflow_timeout_ms = self.workflow_timeout_ms
406
+ self.saved_workflow_deadline_epoch_ms = ctx.workflow_deadline_epoch_ms
407
+ ctx.workflow_deadline_epoch_ms = None
405
408
  return self
406
409
 
407
410
  def __exit__(
@@ -411,6 +414,9 @@ class SetWorkflowTimeout:
411
414
  traceback: Optional[TracebackType],
412
415
  ) -> Literal[False]:
413
416
  assert_current_dbos_context().workflow_timeout_ms = self.saved_workflow_timeout
417
+ assert_current_dbos_context().workflow_deadline_epoch_ms = (
418
+ self.saved_workflow_deadline_epoch_ms
419
+ )
414
420
  # Code to clean up the basic context if we created it
415
421
  if self.created_ctx:
416
422
  _clear_local_dbos_context()
@@ -602,7 +602,6 @@ async def start_workflow_async(
602
602
  *args: P.args,
603
603
  **kwargs: P.kwargs,
604
604
  ) -> "WorkflowHandleAsync[R]":
605
-
606
605
  # If the function has a class, add the class object as its first argument
607
606
  fself: Optional[object] = None
608
607
  if hasattr(func, "__self__"):
@@ -299,6 +299,7 @@ class DBOS:
299
299
 
300
300
  self._launched: bool = False
301
301
  self._debug_mode: bool = False
302
+ self._configured_threadpool: bool = False
302
303
  self._sys_db_field: Optional[SystemDatabase] = None
303
304
  self._app_db_field: Optional[ApplicationDatabase] = None
304
305
  self._registry: DBOSRegistry = _get_or_create_dbos_registry()
@@ -719,6 +720,7 @@ class DBOS:
719
720
  **kwargs: P.kwargs,
720
721
  ) -> WorkflowHandleAsync[R]:
721
722
  """Invoke a workflow function on the event loop, returning a handle to the ongoing execution."""
723
+ await cls._configure_asyncio_thread_pool()
722
724
  return await start_workflow_async(
723
725
  _get_dbos_instance(), func, None, True, *args, **kwargs
724
726
  )
@@ -736,6 +738,7 @@ class DBOS:
736
738
  async def get_workflow_status_async(
737
739
  cls, workflow_id: str
738
740
  ) -> Optional[WorkflowStatus]:
741
+ await cls._configure_asyncio_thread_pool()
739
742
  """Return the status of a workflow execution."""
740
743
  return await asyncio.to_thread(cls.get_workflow_status, workflow_id)
741
744
 
@@ -757,6 +760,7 @@ class DBOS:
757
760
  ) -> WorkflowHandleAsync[R]:
758
761
  """Return a `WorkflowHandle` for a workflow execution."""
759
762
  dbos = _get_dbos_instance()
763
+ await cls._configure_asyncio_thread_pool()
760
764
  if existing_workflow:
761
765
  stat = await dbos.get_workflow_status_async(workflow_id)
762
766
  if stat is None:
@@ -775,6 +779,7 @@ class DBOS:
775
779
  cls, destination_id: str, message: Any, topic: Optional[str] = None
776
780
  ) -> None:
777
781
  """Send a message to a workflow execution."""
782
+ await cls._configure_asyncio_thread_pool()
778
783
  await asyncio.to_thread(lambda: DBOS.send(destination_id, message, topic))
779
784
 
780
785
  @classmethod
@@ -797,6 +802,7 @@ class DBOS:
797
802
  This function is to be called from within a workflow.
798
803
  `recv_async` will return the message sent on `topic`, asyncronously waiting if necessary.
799
804
  """
805
+ await cls._configure_asyncio_thread_pool()
800
806
  return await asyncio.to_thread(lambda: DBOS.recv(topic, timeout_seconds))
801
807
 
802
808
  @classmethod
@@ -835,6 +841,7 @@ class DBOS:
835
841
  It is important to use `DBOS.sleep` or `DBOS.sleep_async` (as opposed to any other sleep) within workflows,
836
842
  as the DBOS sleep methods are durable and completed sleeps will be skipped during recovery.
837
843
  """
844
+ await cls._configure_asyncio_thread_pool()
838
845
  await asyncio.to_thread(lambda: DBOS.sleep(seconds))
839
846
 
840
847
  @classmethod
@@ -869,6 +876,7 @@ class DBOS:
869
876
  value(Any): A serializable value to associate with the key
870
877
 
871
878
  """
879
+ await cls._configure_asyncio_thread_pool()
872
880
  await asyncio.to_thread(lambda: DBOS.set_event(key, value))
873
881
 
874
882
  @classmethod
@@ -901,6 +909,7 @@ class DBOS:
901
909
  timeout_seconds(float): The amount of time to wait, in case `set_event` has not yet been called byt the workflow
902
910
 
903
911
  """
912
+ await cls._configure_asyncio_thread_pool()
904
913
  return await asyncio.to_thread(
905
914
  lambda: DBOS.get_event(workflow_id, key, timeout_seconds)
906
915
  )
@@ -929,6 +938,19 @@ class DBOS:
929
938
  fn, "DBOS.cancelWorkflow"
930
939
  )
931
940
 
941
+ @classmethod
942
+ async def _configure_asyncio_thread_pool(cls) -> None:
943
+ """
944
+ Configure the thread pool for asyncio.to_thread.
945
+
946
+ This function is called before the first call to asyncio.to_thread.
947
+ """
948
+ if _get_dbos_instance()._configured_threadpool:
949
+ return
950
+ loop = asyncio.get_running_loop()
951
+ loop.set_default_executor(_get_dbos_instance()._executor)
952
+ _get_dbos_instance()._configured_threadpool = True
953
+
932
954
  @classmethod
933
955
  def resume_workflow(cls, workflow_id: str) -> WorkflowHandle[Any]:
934
956
  """Resume a workflow by ID."""
@@ -62,6 +62,7 @@ class DBOSErrorCode(Enum):
62
62
  WorkflowCancelled = 10
63
63
  UnexpectedStep = 11
64
64
  QueueDeduplicated = 12
65
+ AwaitedWorkflowCancelled = 13
65
66
  ConflictingRegistrationError = 25
66
67
 
67
68
 
@@ -206,6 +207,19 @@ class DBOSQueueDeduplicatedError(DBOSException):
206
207
  )
207
208
 
208
209
 
210
+ class DBOSAwaitedWorkflowCancelledError(DBOSException):
211
+ def __init__(self, workflow_id: str):
212
+ self.workflow_id = workflow_id
213
+ super().__init__(
214
+ f"Awaited workflow {workflow_id} was cancelled",
215
+ dbos_error_code=DBOSErrorCode.AwaitedWorkflowCancelled.value,
216
+ )
217
+
218
+ def __reduce__(self) -> Any:
219
+ # Tell jsonpickle how to reconstruct this object
220
+ return (self.__class__, (self.workflow_id,))
221
+
222
+
209
223
  #######################################
210
224
  ## BaseException
211
225
  #######################################
@@ -1,5 +1,6 @@
1
1
  import asyncio
2
2
  import threading
3
+ from concurrent.futures import ThreadPoolExecutor
3
4
  from typing import Any, Coroutine, Optional, TypeVar
4
5
 
5
6
 
@@ -33,15 +34,17 @@ class BackgroundEventLoop:
33
34
 
34
35
  def _run_event_loop(self) -> None:
35
36
  self._loop = asyncio.new_event_loop()
36
- asyncio.set_event_loop(self._loop)
37
+ with ThreadPoolExecutor(max_workers=64) as thread_pool:
38
+ self._loop.set_default_executor(thread_pool)
39
+ asyncio.set_event_loop(self._loop)
37
40
 
38
- self._running = True
39
- self._ready.set() # Signal that the loop is ready
41
+ self._running = True
42
+ self._ready.set() # Signal that the loop is ready
40
43
 
41
- try:
42
- self._loop.run_forever()
43
- finally:
44
- self._loop.close()
44
+ try:
45
+ self._loop.run_forever()
46
+ finally:
47
+ self._loop.close()
45
48
 
46
49
  async def _shutdown(self) -> None:
47
50
  if self._loop is None:
@@ -32,6 +32,7 @@ from dbos._utils import INTERNAL_QUEUE_NAME
32
32
  from . import _serialization
33
33
  from ._context import get_local_dbos_context
34
34
  from ._error import (
35
+ DBOSAwaitedWorkflowCancelledError,
35
36
  DBOSConflictingWorkflowError,
36
37
  DBOSDeadLetterQueueError,
37
38
  DBOSNonExistentWorkflowError,
@@ -96,6 +97,10 @@ class WorkflowStatus:
96
97
  executor_id: Optional[str]
97
98
  # The application version on which this workflow was started
98
99
  app_version: Optional[str]
100
+ # The start-to-close timeout of the workflow in ms
101
+ workflow_timeout_ms: Optional[int]
102
+ # The deadline of a workflow, computed by adding its timeout to its start time.
103
+ workflow_deadline_epoch_ms: Optional[int]
99
104
 
100
105
  # INTERNAL FIELDS
101
106
 
@@ -596,18 +601,6 @@ class SystemDatabase:
596
601
  )
597
602
  )
598
603
 
599
- def get_max_function_id(self, workflow_uuid: str) -> Optional[int]:
600
- with self.engine.begin() as conn:
601
- max_function_id_row = conn.execute(
602
- sa.select(
603
- sa.func.max(SystemSchema.operation_outputs.c.function_id)
604
- ).where(SystemSchema.operation_outputs.c.workflow_uuid == workflow_uuid)
605
- ).fetchone()
606
-
607
- max_function_id = max_function_id_row[0] if max_function_id_row else None
608
-
609
- return max_function_id
610
-
611
604
  def fork_workflow(
612
605
  self,
613
606
  original_workflow_id: str,
@@ -761,9 +754,9 @@ class SystemDatabase:
761
754
  error = row[2]
762
755
  raise _serialization.deserialize_exception(error)
763
756
  elif status == WorkflowStatusString.CANCELLED.value:
764
- # Raise a normal exception here, not the cancellation exception
757
+ # Raise AwaitedWorkflowCancelledError here, not the cancellation exception
765
758
  # because the awaiting workflow is not being cancelled.
766
- raise Exception(f"Awaited workflow {workflow_id} was cancelled")
759
+ raise DBOSAwaitedWorkflowCancelledError(workflow_id)
767
760
  else:
768
761
  pass # CB: I guess we're assuming the WF will show up eventually.
769
762
  time.sleep(1)
@@ -837,6 +830,8 @@ class SystemDatabase:
837
830
  SystemSchema.workflow_inputs.c.inputs,
838
831
  SystemSchema.workflow_status.c.output,
839
832
  SystemSchema.workflow_status.c.error,
833
+ SystemSchema.workflow_status.c.workflow_deadline_epoch_ms,
834
+ SystemSchema.workflow_status.c.workflow_timeout_ms,
840
835
  ).join(
841
836
  SystemSchema.workflow_inputs,
842
837
  SystemSchema.workflow_status.c.workflow_uuid
@@ -918,6 +913,8 @@ class SystemDatabase:
918
913
  info.input = inputs
919
914
  info.output = output
920
915
  info.error = exception
916
+ info.workflow_deadline_epoch_ms = row[18]
917
+ info.workflow_timeout_ms = row[19]
921
918
 
922
919
  infos.append(info)
923
920
  return infos
@@ -947,6 +944,8 @@ class SystemDatabase:
947
944
  SystemSchema.workflow_inputs.c.inputs,
948
945
  SystemSchema.workflow_status.c.output,
949
946
  SystemSchema.workflow_status.c.error,
947
+ SystemSchema.workflow_status.c.workflow_deadline_epoch_ms,
948
+ SystemSchema.workflow_status.c.workflow_timeout_ms,
950
949
  ).select_from(
951
950
  SystemSchema.workflow_queue.join(
952
951
  SystemSchema.workflow_status,
@@ -1024,6 +1023,8 @@ class SystemDatabase:
1024
1023
  info.input = inputs
1025
1024
  info.output = output
1026
1025
  info.error = exception
1026
+ info.workflow_deadline_epoch_ms = row[18]
1027
+ info.workflow_timeout_ms = row[19]
1027
1028
 
1028
1029
  infos.append(info)
1029
1030
 
@@ -1827,8 +1828,13 @@ class SystemDatabase:
1827
1828
  # If a timeout is set, set the deadline on dequeue
1828
1829
  workflow_deadline_epoch_ms=sa.case(
1829
1830
  (
1830
- SystemSchema.workflow_status.c.workflow_timeout_ms.isnot(
1831
- None
1831
+ sa.and_(
1832
+ SystemSchema.workflow_status.c.workflow_timeout_ms.isnot(
1833
+ None
1834
+ ),
1835
+ SystemSchema.workflow_status.c.workflow_deadline_epoch_ms.is_(
1836
+ None
1837
+ ),
1832
1838
  ),
1833
1839
  sa.func.extract("epoch", sa.func.now()) * 1000
1834
1840
  + SystemSchema.workflow_status.c.workflow_timeout_ms,
@@ -103,16 +103,7 @@ def fork_workflow(
103
103
  *,
104
104
  application_version: Optional[str],
105
105
  ) -> str:
106
- def get_max_function_id(workflow_uuid: str) -> int:
107
- max_transactions = app_db.get_max_function_id(workflow_uuid) or 0
108
- max_operations = sys_db.get_max_function_id(workflow_uuid) or 0
109
- return max(max_transactions, max_operations)
110
-
111
- max_function_id = get_max_function_id(workflow_id)
112
- if max_function_id > 0 and start_step > max_function_id:
113
- raise DBOSException(
114
- f"Cannot fork workflow {workflow_id} from step {start_step}. The workflow has {max_function_id} steps."
115
- )
106
+
116
107
  ctx = get_local_dbos_context()
117
108
  if ctx is not None and len(ctx.id_assigned_for_next_workflow) > 0:
118
109
  forked_workflow_id = ctx.id_assigned_for_next_workflow
@@ -27,7 +27,7 @@ dependencies = [
27
27
  ]
28
28
  requires-python = ">=3.9"
29
29
  readme = "README.md"
30
- version = "1.1.0a3"
30
+ version = "1.2.0a2"
31
31
 
32
32
  [project.license]
33
33
  text = "MIT"
@@ -11,7 +11,14 @@ import sqlalchemy as sa
11
11
  from requests.exceptions import ConnectionError
12
12
 
13
13
  # Public API
14
- from dbos import DBOS, DBOSConfig, Queue, SetWorkflowID, _workflow_commands
14
+ from dbos import (
15
+ DBOS,
16
+ DBOSConfig,
17
+ Queue,
18
+ SetWorkflowID,
19
+ WorkflowHandle,
20
+ _workflow_commands,
21
+ )
15
22
  from dbos._error import DBOSWorkflowCancelledError
16
23
  from dbos._schemas.system_database import SystemSchema
17
24
  from dbos._sys_db import SystemDatabase, WorkflowStatusString
@@ -425,4 +432,24 @@ def test_admin_workflow_fork(dbos: DBOS, sys_db: SystemDatabase) -> None:
425
432
  time.sleep(1)
426
433
  count += 1
427
434
 
435
+ # test for new_workflow_id and app version
436
+
437
+ new_version = "my_new_version"
438
+ GlobalParams.app_version = new_version
439
+
440
+ response = requests.post(
441
+ f"http://localhost:3001/workflows/{wfUuid}/fork",
442
+ json={"new_workflow_id": "123456", "application_version": new_version},
443
+ timeout=5,
444
+ )
445
+ assert response.status_code == 200
446
+
447
+ new_workflow_id = response.json().get("workflow_id")
448
+ assert new_workflow_id == "123456", "Expected new workflow ID is not 123456"
449
+
450
+ handle: WorkflowHandle[None] = dbos.retrieve_workflow(new_workflow_id)
451
+ assert (
452
+ handle.get_status().app_version == new_version
453
+ ), f"Expected application version to be {new_version}, but got {handle.get_status().app_version}"
454
+
428
455
  assert worked, "Workflow did not finish successfully"
@@ -1,7 +1,7 @@
1
1
  import asyncio
2
2
  import time
3
3
  import uuid
4
- from typing import Optional
4
+ from typing import List, Optional
5
5
 
6
6
  import pytest
7
7
  import sqlalchemy as sa
@@ -523,3 +523,44 @@ async def test_workflow_timeout_async(dbos: DBOS) -> None:
523
523
  with pytest.raises(Exception) as exc_info:
524
524
  await (await DBOS.retrieve_workflow_async(direct_child)).get_result()
525
525
  assert "was cancelled" in str(exc_info.value)
526
+
527
+
528
+ @pytest.mark.asyncio
529
+ async def test_max_parallel_workflows(dbos: DBOS) -> None:
530
+ queue = Queue("parallel_queue")
531
+
532
+ @DBOS.workflow()
533
+ async def test_workflow(i: int) -> int:
534
+ await DBOS.sleep_async(5)
535
+ return i
536
+
537
+ begin_time = time.time()
538
+
539
+ tasks: List[WorkflowHandleAsync[int]] = []
540
+ for i in range(50):
541
+ tasks.append(await DBOS.start_workflow_async(test_workflow, i))
542
+
543
+ # Wait for all tasks to complete
544
+ for i in range(50):
545
+ assert (await tasks[i].get_result()) == i, f"Task {i} should return {i}"
546
+
547
+ end_time = time.time()
548
+ assert (
549
+ end_time - begin_time < 10
550
+ ), "All tasks should complete in less than 10 seconds"
551
+
552
+ # Test enqueues
553
+ begin_time = time.time()
554
+ tasks = []
555
+
556
+ for i in range(50):
557
+ tasks.append(await queue.enqueue_async(test_workflow, i))
558
+
559
+ # Wait for all tasks to complete
560
+ for i in range(50):
561
+ assert (await tasks[i].get_result()) == i, f"Task {i} should return {i}"
562
+
563
+ end_time = time.time()
564
+ assert (
565
+ end_time - begin_time < 10
566
+ ), "All enqueued tasks should complete in less than 10 seconds"
@@ -24,6 +24,7 @@ from dbos import (
24
24
  # Private API because this is a test
25
25
  from dbos._context import assert_current_dbos_context, get_local_dbos_context
26
26
  from dbos._error import (
27
+ DBOSAwaitedWorkflowCancelledError,
27
28
  DBOSConflictingRegistrationError,
28
29
  DBOSMaxStepRetriesExceeded,
29
30
  DBOSWorkflowCancelledError,
@@ -1507,7 +1508,14 @@ def test_workflow_timeout(dbos: DBOS) -> None:
1507
1508
  with SetWorkflowID(wfid):
1508
1509
  blocked_workflow()
1509
1510
  assert assert_current_dbos_context().workflow_deadline_epoch_ms is None
1511
+ start_time = time.time() * 1000
1510
1512
  handle = DBOS.start_workflow(blocked_workflow)
1513
+ status = handle.get_status()
1514
+ assert status.workflow_timeout_ms == 100
1515
+ assert (
1516
+ status.workflow_deadline_epoch_ms is not None
1517
+ and status.workflow_deadline_epoch_ms > start_time
1518
+ )
1511
1519
  with pytest.raises(DBOSWorkflowCancelledError):
1512
1520
  handle.get_result()
1513
1521
 
@@ -1555,13 +1563,11 @@ def test_workflow_timeout(dbos: DBOS) -> None:
1555
1563
  with pytest.raises(DBOSWorkflowCancelledError):
1556
1564
  parent_workflow()
1557
1565
 
1558
- with pytest.raises(Exception) as exc_info:
1566
+ with pytest.raises(DBOSAwaitedWorkflowCancelledError):
1559
1567
  DBOS.retrieve_workflow(start_child).get_result()
1560
- assert "was cancelled" in str(exc_info.value)
1561
1568
 
1562
- with pytest.raises(Exception) as exc_info:
1569
+ with pytest.raises(DBOSAwaitedWorkflowCancelledError):
1563
1570
  DBOS.retrieve_workflow(direct_child).get_result()
1564
- assert "was cancelled" in str(exc_info.value)
1565
1571
 
1566
1572
  # Verify the context variables are set correctly
1567
1573
  with SetWorkflowTimeout(1.0):
@@ -9,6 +9,7 @@ from sqlalchemy.exc import InvalidRequestError, OperationalError
9
9
 
10
10
  from dbos import DBOS, Queue, SetWorkflowID
11
11
  from dbos._error import (
12
+ DBOSAwaitedWorkflowCancelledError,
12
13
  DBOSDeadLetterQueueError,
13
14
  DBOSMaxStepRetriesExceeded,
14
15
  DBOSNotAuthorizedError,
@@ -461,6 +462,11 @@ def test_error_serialization() -> None:
461
462
  d = deserialize_exception(serialize_exception(e))
462
463
  assert isinstance(d, DBOSQueueDeduplicatedError)
463
464
  assert str(d) == str(e)
465
+ # AwaitedWorkflowCancelledError
466
+ e = DBOSAwaitedWorkflowCancelledError("id")
467
+ d = deserialize_exception(serialize_exception(e))
468
+ assert isinstance(d, DBOSAwaitedWorkflowCancelledError)
469
+ assert str(d) == str(e)
464
470
 
465
471
  # Test safe_deserialize
466
472
  class BadException(Exception):
@@ -26,6 +26,7 @@ from dbos import (
26
26
  )
27
27
  from dbos._context import assert_current_dbos_context
28
28
  from dbos._dbos import WorkflowHandleAsync
29
+ from dbos._error import DBOSAwaitedWorkflowCancelledError, DBOSWorkflowCancelledError
29
30
  from dbos._schemas.system_database import SystemSchema
30
31
  from dbos._sys_db import WorkflowStatusString
31
32
  from dbos._utils import GlobalParams
@@ -853,9 +854,8 @@ def test_cancelling_queued_workflows(dbos: DBOS) -> None:
853
854
 
854
855
  # Complete the blocked workflow
855
856
  blocking_event.set()
856
- with pytest.raises(Exception) as exc_info:
857
+ with pytest.raises(DBOSAwaitedWorkflowCancelledError):
857
858
  blocked_handle.get_result()
858
- assert "was cancelled" in str(exc_info.value)
859
859
 
860
860
  # Verify all queue entries eventually get cleaned up.
861
861
  assert queue_entries_are_cleaned_up(dbos)
@@ -891,9 +891,8 @@ def test_timeout_queue(dbos: DBOS) -> None:
891
891
 
892
892
  # Verify the blocked workflows are cancelled
893
893
  for handle in handles:
894
- with pytest.raises(Exception) as exc_info:
894
+ with pytest.raises(DBOSAwaitedWorkflowCancelledError):
895
895
  handle.get_result()
896
- assert "was cancelled" in str(exc_info.value)
897
896
 
898
897
  # Verify the normal workflow succeeds
899
898
  normal_handle.get_result()
@@ -911,17 +910,14 @@ def test_timeout_queue(dbos: DBOS) -> None:
911
910
 
912
911
  with SetWorkflowTimeout(1.0):
913
912
  handle = queue.enqueue(parent_workflow)
914
- with pytest.raises(Exception) as exc_info:
913
+ with pytest.raises(DBOSAwaitedWorkflowCancelledError):
915
914
  handle.get_result()
916
- assert "was cancelled" in str(exc_info.value)
917
915
 
918
- with pytest.raises(Exception) as exc_info:
916
+ with pytest.raises(DBOSAwaitedWorkflowCancelledError):
919
917
  DBOS.retrieve_workflow(child_id).get_result()
920
- assert "was cancelled" in str(exc_info.value)
921
918
 
922
919
  # Verify if a parent called with a timeout enqueues a blocked child
923
920
  # then exits the deadline propagates and the child is cancelled.
924
- child_id = str(uuid.uuid4())
925
921
  queue = Queue("regular_queue")
926
922
 
927
923
  @DBOS.workflow()
@@ -931,9 +927,41 @@ def test_timeout_queue(dbos: DBOS) -> None:
931
927
 
932
928
  with SetWorkflowTimeout(1.0):
933
929
  child_id = exiting_parent_workflow()
934
- with pytest.raises(Exception) as exc_info:
930
+ with pytest.raises(DBOSAwaitedWorkflowCancelledError):
931
+ DBOS.retrieve_workflow(child_id).get_result()
932
+
933
+ # Verify if a parent called with a timeout enqueues a child that
934
+ # never starts because the queue is blocked, the deadline propagates
935
+ # and both parent and child are cancelled.
936
+ child_id = str(uuid.uuid4())
937
+ queue = Queue("stuck_queue", concurrency=1)
938
+
939
+ start_event = threading.Event()
940
+ blocking_event = threading.Event()
941
+
942
+ @DBOS.workflow()
943
+ def stuck_workflow() -> None:
944
+ start_event.set()
945
+ blocking_event.wait()
946
+
947
+ stuck_handle = queue.enqueue(stuck_workflow)
948
+ start_event.wait()
949
+
950
+ @DBOS.workflow()
951
+ def blocked_parent_workflow() -> None:
952
+ with SetWorkflowID(child_id):
953
+ queue.enqueue(blocking_workflow)
954
+ while True:
955
+ DBOS.sleep(0.1)
956
+
957
+ with SetWorkflowTimeout(1.0):
958
+ handle = DBOS.start_workflow(blocked_parent_workflow)
959
+ with pytest.raises(DBOSWorkflowCancelledError):
960
+ handle.get_result()
961
+ with pytest.raises(DBOSAwaitedWorkflowCancelledError):
935
962
  DBOS.retrieve_workflow(child_id).get_result()
936
- assert "was cancelled" in str(exc_info.value)
963
+ blocking_event.set()
964
+ stuck_handle.get_result()
937
965
 
938
966
  # Verify all queue entries eventually get cleaned up.
939
967
  assert queue_entries_are_cleaned_up(dbos)
@@ -1341,3 +1369,80 @@ def test_worker_concurrency_across_versions(dbos: DBOS, client: DBOSClient) -> N
1341
1369
  # Change the version, verify the other version complets
1342
1370
  GlobalParams.app_version = other_version
1343
1371
  assert other_version_handle.get_result()
1372
+
1373
+
1374
+ def test_timeout_queue_recovery(dbos: DBOS) -> None:
1375
+ queue = Queue("test_queue")
1376
+ evt = threading.Event()
1377
+
1378
+ @DBOS.workflow()
1379
+ def blocking_workflow() -> None:
1380
+ evt.set()
1381
+ while True:
1382
+ DBOS.sleep(0.1)
1383
+
1384
+ timeout = 3.0
1385
+ enqueue_time = time.time()
1386
+ with SetWorkflowTimeout(timeout):
1387
+ original_handle = queue.enqueue(blocking_workflow)
1388
+
1389
+ # Verify the workflow's timeout is properly configured
1390
+ evt.wait()
1391
+ original_status = original_handle.get_status()
1392
+ assert original_status.workflow_timeout_ms == timeout * 1000
1393
+ assert (
1394
+ original_status.workflow_deadline_epoch_ms is not None
1395
+ and original_status.workflow_deadline_epoch_ms > enqueue_time * 1000
1396
+ )
1397
+
1398
+ # Recover the workflow. Verify its deadline remains the same
1399
+ evt.clear()
1400
+ handles = DBOS._recover_pending_workflows()
1401
+ assert len(handles) == 1
1402
+ evt.wait()
1403
+ recovered_handle = handles[0]
1404
+ recovered_status = recovered_handle.get_status()
1405
+ assert recovered_status.workflow_timeout_ms == timeout * 1000
1406
+ assert (
1407
+ recovered_status.workflow_deadline_epoch_ms
1408
+ == original_status.workflow_deadline_epoch_ms
1409
+ )
1410
+
1411
+ with pytest.raises(DBOSAwaitedWorkflowCancelledError):
1412
+ original_handle.get_result()
1413
+
1414
+ with pytest.raises(DBOSAwaitedWorkflowCancelledError):
1415
+ recovered_handle.get_result()
1416
+
1417
+
1418
+ def test_unsetting_timeout(dbos: DBOS) -> None:
1419
+
1420
+ queue = Queue("test_queue")
1421
+
1422
+ @DBOS.workflow()
1423
+ def child() -> str:
1424
+ for _ in range(5):
1425
+ DBOS.sleep(1)
1426
+ return DBOS.workflow_id
1427
+
1428
+ @DBOS.workflow()
1429
+ def parent(child_one: str, child_two: str) -> None:
1430
+ with SetWorkflowID(child_two):
1431
+ with SetWorkflowTimeout(None):
1432
+ queue.enqueue(child)
1433
+
1434
+ with SetWorkflowID(child_one):
1435
+ queue.enqueue(child)
1436
+
1437
+ child_one, child_two = str(uuid.uuid4()), str(uuid.uuid4())
1438
+ with SetWorkflowTimeout(1.0):
1439
+ queue.enqueue(parent, child_one, child_two).get_result()
1440
+
1441
+ # Verify child one, which has a propagated timeout, is cancelled
1442
+ handle: WorkflowHandle[str] = DBOS.retrieve_workflow(child_one)
1443
+ with pytest.raises(DBOSAwaitedWorkflowCancelledError):
1444
+ handle.get_result()
1445
+
1446
+ # Verify child two, which doesn't have a timeout, succeeds
1447
+ handle = DBOS.retrieve_workflow(child_two)
1448
+ assert handle.get_result() == child_two
@@ -41,6 +41,8 @@ def test_list_workflow(dbos: DBOS) -> None:
41
41
  assert output.app_version == GlobalParams.app_version
42
42
  assert output.app_id == ""
43
43
  assert output.recovery_attempts == 1
44
+ assert output.workflow_timeout_ms is None
45
+ assert output.workflow_deadline_epoch_ms is None
44
46
 
45
47
  # Test searching by status
46
48
  outputs = DBOS.list_workflows(status="PENDING")
@@ -222,6 +224,8 @@ def test_queued_workflows(dbos: DBOS) -> None:
222
224
  assert workflow.created_at is not None and workflow.created_at > 0
223
225
  assert workflow.updated_at is not None and workflow.updated_at > 0
224
226
  assert workflow.recovery_attempts == 1
227
+ assert workflow.workflow_timeout_ms is None
228
+ assert workflow.workflow_deadline_epoch_ms is None
225
229
 
226
230
  # Test sort_desc inverts the order
227
231
  workflows = DBOS.list_queued_workflows(sort_desc=True)
@@ -482,17 +482,6 @@ def test_restart_fromsteps_steps_tr(
482
482
  assert stepFourCount == 2
483
483
  assert trFiveCount == 3
484
484
 
485
- # invalid step
486
- try:
487
- forked_handle = DBOS.fork_workflow(wfid, 7)
488
- assert forked_handle.workflow_id != wfid
489
- forked_handle.get_result()
490
- except Exception as e:
491
- print(f"Exception: {e}")
492
- assert isinstance(e, DBOSException)
493
- assert "Cannot fork workflow" in str(e)
494
- assert trOneCount == 1
495
-
496
485
  # invalid < 1 will default to 1
497
486
  forked_handle = DBOS.fork_workflow(wfid, -1)
498
487
  assert forked_handle.workflow_id != wfid
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes