dbos 0.16.1__tar.gz → 0.17.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of dbos might be problematic. Click here for more details.

Files changed (79) hide show
  1. {dbos-0.16.1 → dbos-0.17.0}/PKG-INFO +1 -1
  2. {dbos-0.16.1 → dbos-0.17.0}/dbos/_core.py +175 -93
  3. {dbos-0.16.1 → dbos-0.17.0}/dbos/_dbos.py +104 -12
  4. dbos-0.17.0/dbos/_outcome.py +206 -0
  5. {dbos-0.16.1 → dbos-0.17.0}/dbos/_queue.py +2 -2
  6. {dbos-0.16.1 → dbos-0.17.0}/dbos/_sys_db.py +6 -3
  7. {dbos-0.16.1 → dbos-0.17.0}/pyproject.toml +2 -1
  8. dbos-0.17.0/tests/test_async.py +331 -0
  9. {dbos-0.16.1 → dbos-0.17.0}/tests/test_dbos.py +127 -0
  10. dbos-0.17.0/tests/test_outcome.py +114 -0
  11. {dbos-0.16.1 → dbos-0.17.0}/tests/test_spans.py +38 -0
  12. {dbos-0.16.1 → dbos-0.17.0}/LICENSE +0 -0
  13. {dbos-0.16.1 → dbos-0.17.0}/README.md +0 -0
  14. {dbos-0.16.1 → dbos-0.17.0}/dbos/__init__.py +0 -0
  15. {dbos-0.16.1 → dbos-0.17.0}/dbos/_admin_server.py +0 -0
  16. {dbos-0.16.1 → dbos-0.17.0}/dbos/_app_db.py +0 -0
  17. {dbos-0.16.1 → dbos-0.17.0}/dbos/_classproperty.py +0 -0
  18. {dbos-0.16.1 → dbos-0.17.0}/dbos/_context.py +0 -0
  19. {dbos-0.16.1 → dbos-0.17.0}/dbos/_croniter.py +0 -0
  20. {dbos-0.16.1 → dbos-0.17.0}/dbos/_dbos_config.py +0 -0
  21. {dbos-0.16.1 → dbos-0.17.0}/dbos/_error.py +0 -0
  22. {dbos-0.16.1 → dbos-0.17.0}/dbos/_fastapi.py +0 -0
  23. {dbos-0.16.1 → dbos-0.17.0}/dbos/_flask.py +0 -0
  24. {dbos-0.16.1 → dbos-0.17.0}/dbos/_kafka.py +0 -0
  25. {dbos-0.16.1 → dbos-0.17.0}/dbos/_kafka_message.py +0 -0
  26. {dbos-0.16.1 → dbos-0.17.0}/dbos/_logger.py +0 -0
  27. {dbos-0.16.1 → dbos-0.17.0}/dbos/_migrations/env.py +0 -0
  28. {dbos-0.16.1 → dbos-0.17.0}/dbos/_migrations/script.py.mako +0 -0
  29. {dbos-0.16.1 → dbos-0.17.0}/dbos/_migrations/versions/50f3227f0b4b_fix_job_queue.py +0 -0
  30. {dbos-0.16.1 → dbos-0.17.0}/dbos/_migrations/versions/5c361fc04708_added_system_tables.py +0 -0
  31. {dbos-0.16.1 → dbos-0.17.0}/dbos/_migrations/versions/a3b18ad34abe_added_triggers.py +0 -0
  32. {dbos-0.16.1 → dbos-0.17.0}/dbos/_migrations/versions/d76646551a6b_job_queue_limiter.py +0 -0
  33. {dbos-0.16.1 → dbos-0.17.0}/dbos/_migrations/versions/d76646551a6c_workflow_queue.py +0 -0
  34. {dbos-0.16.1 → dbos-0.17.0}/dbos/_migrations/versions/eab0cc1d9a14_job_queue.py +0 -0
  35. {dbos-0.16.1 → dbos-0.17.0}/dbos/_recovery.py +0 -0
  36. {dbos-0.16.1 → dbos-0.17.0}/dbos/_registrations.py +0 -0
  37. {dbos-0.16.1 → dbos-0.17.0}/dbos/_request.py +0 -0
  38. {dbos-0.16.1 → dbos-0.17.0}/dbos/_roles.py +0 -0
  39. {dbos-0.16.1 → dbos-0.17.0}/dbos/_scheduler.py +0 -0
  40. {dbos-0.16.1 → dbos-0.17.0}/dbos/_schemas/__init__.py +0 -0
  41. {dbos-0.16.1 → dbos-0.17.0}/dbos/_schemas/application_database.py +0 -0
  42. {dbos-0.16.1 → dbos-0.17.0}/dbos/_schemas/system_database.py +0 -0
  43. {dbos-0.16.1 → dbos-0.17.0}/dbos/_serialization.py +0 -0
  44. {dbos-0.16.1 → dbos-0.17.0}/dbos/_templates/hello/README.md +0 -0
  45. {dbos-0.16.1 → dbos-0.17.0}/dbos/_templates/hello/__package/__init__.py +0 -0
  46. {dbos-0.16.1 → dbos-0.17.0}/dbos/_templates/hello/__package/main.py +0 -0
  47. {dbos-0.16.1 → dbos-0.17.0}/dbos/_templates/hello/__package/schema.py +0 -0
  48. {dbos-0.16.1 → dbos-0.17.0}/dbos/_templates/hello/alembic.ini +0 -0
  49. {dbos-0.16.1 → dbos-0.17.0}/dbos/_templates/hello/dbos-config.yaml.dbos +0 -0
  50. {dbos-0.16.1 → dbos-0.17.0}/dbos/_templates/hello/migrations/env.py.dbos +0 -0
  51. {dbos-0.16.1 → dbos-0.17.0}/dbos/_templates/hello/migrations/script.py.mako +0 -0
  52. {dbos-0.16.1 → dbos-0.17.0}/dbos/_templates/hello/migrations/versions/2024_07_31_180642_init.py +0 -0
  53. {dbos-0.16.1 → dbos-0.17.0}/dbos/_templates/hello/start_postgres_docker.py +0 -0
  54. {dbos-0.16.1 → dbos-0.17.0}/dbos/_tracer.py +0 -0
  55. {dbos-0.16.1 → dbos-0.17.0}/dbos/cli.py +0 -0
  56. {dbos-0.16.1 → dbos-0.17.0}/dbos/dbos-config.schema.json +0 -0
  57. {dbos-0.16.1 → dbos-0.17.0}/dbos/py.typed +0 -0
  58. {dbos-0.16.1 → dbos-0.17.0}/tests/__init__.py +0 -0
  59. {dbos-0.16.1 → dbos-0.17.0}/tests/atexit_no_ctor.py +0 -0
  60. {dbos-0.16.1 → dbos-0.17.0}/tests/atexit_no_launch.py +0 -0
  61. {dbos-0.16.1 → dbos-0.17.0}/tests/classdefs.py +0 -0
  62. {dbos-0.16.1 → dbos-0.17.0}/tests/conftest.py +0 -0
  63. {dbos-0.16.1 → dbos-0.17.0}/tests/more_classdefs.py +0 -0
  64. {dbos-0.16.1 → dbos-0.17.0}/tests/test_admin_server.py +0 -0
  65. {dbos-0.16.1 → dbos-0.17.0}/tests/test_classdecorators.py +0 -0
  66. {dbos-0.16.1 → dbos-0.17.0}/tests/test_concurrency.py +0 -0
  67. {dbos-0.16.1 → dbos-0.17.0}/tests/test_config.py +0 -0
  68. {dbos-0.16.1 → dbos-0.17.0}/tests/test_croniter.py +0 -0
  69. {dbos-0.16.1 → dbos-0.17.0}/tests/test_failures.py +0 -0
  70. {dbos-0.16.1 → dbos-0.17.0}/tests/test_fastapi.py +0 -0
  71. {dbos-0.16.1 → dbos-0.17.0}/tests/test_fastapi_roles.py +0 -0
  72. {dbos-0.16.1 → dbos-0.17.0}/tests/test_flask.py +0 -0
  73. {dbos-0.16.1 → dbos-0.17.0}/tests/test_kafka.py +0 -0
  74. {dbos-0.16.1 → dbos-0.17.0}/tests/test_package.py +0 -0
  75. {dbos-0.16.1 → dbos-0.17.0}/tests/test_queue.py +0 -0
  76. {dbos-0.16.1 → dbos-0.17.0}/tests/test_scheduler.py +0 -0
  77. {dbos-0.16.1 → dbos-0.17.0}/tests/test_schema_migration.py +0 -0
  78. {dbos-0.16.1 → dbos-0.17.0}/tests/test_singleton.py +0 -0
  79. {dbos-0.16.1 → dbos-0.17.0}/version/__init__.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: dbos
3
- Version: 0.16.1
3
+ Version: 0.17.0
4
4
  Summary: Ultra-lightweight durable execution in Python
5
5
  Author-Email: "DBOS, Inc." <contact@dbos.dev>
6
6
  License: MIT
@@ -1,10 +1,27 @@
1
+ import asyncio
2
+ import functools
3
+ import inspect
1
4
  import json
2
5
  import sys
3
6
  import time
4
7
  import traceback
5
8
  from concurrent.futures import Future
6
9
  from functools import wraps
7
- from typing import TYPE_CHECKING, Any, Callable, Generic, Optional, Tuple, TypeVar, cast
10
+ from typing import (
11
+ TYPE_CHECKING,
12
+ Any,
13
+ Callable,
14
+ Coroutine,
15
+ Generic,
16
+ Optional,
17
+ Tuple,
18
+ TypeVar,
19
+ Union,
20
+ cast,
21
+ overload,
22
+ )
23
+
24
+ from dbos._outcome import Immediate, NoResult, Outcome, Pending
8
25
 
9
26
  from ._app_db import ApplicationDatabase, TransactionResultInternal
10
27
 
@@ -181,39 +198,38 @@ def _init_workflow(
181
198
  return status
182
199
 
183
200
 
184
- def _execute_workflow(
201
+ def _get_wf_invoke_func(
185
202
  dbos: "DBOS",
186
203
  status: WorkflowStatusInternal,
187
- func: "Workflow[P, R]",
188
- *args: Any,
189
- **kwargs: Any,
190
- ) -> R:
191
- try:
192
- output = func(*args, **kwargs)
193
- status["status"] = "SUCCESS"
194
- status["output"] = _serialization.serialize(output)
195
- if status["queue_name"] is not None:
196
- queue = dbos._registry.queue_info_map[status["queue_name"]]
197
- dbos._sys_db.remove_from_queue(status["workflow_uuid"], queue)
198
- dbos._sys_db.buffer_workflow_status(status)
199
- except DBOSWorkflowConflictIDError:
200
- # Retrieve the workflow handle and wait for the result.
201
- # Must use existing_workflow=False because workflow status might not be set yet for single transaction workflows.
202
- wf_handle: "WorkflowHandle[R]" = dbos.retrieve_workflow(
203
- status["workflow_uuid"], existing_workflow=False
204
- )
205
- output = wf_handle.get_result()
206
- return output
207
- except Exception as error:
208
- status["status"] = "ERROR"
209
- status["error"] = _serialization.serialize_exception(error)
210
- if status["queue_name"] is not None:
211
- queue = dbos._registry.queue_info_map[status["queue_name"]]
212
- dbos._sys_db.remove_from_queue(status["workflow_uuid"], queue)
213
- dbos._sys_db.update_workflow_status(status)
214
- raise
204
+ ) -> Callable[[Callable[[], R]], R]:
205
+ def persist(func: Callable[[], R]) -> R:
206
+ try:
207
+ output = func()
208
+ status["status"] = "SUCCESS"
209
+ status["output"] = _serialization.serialize(output)
210
+ if status["queue_name"] is not None:
211
+ queue = dbos._registry.queue_info_map[status["queue_name"]]
212
+ dbos._sys_db.remove_from_queue(status["workflow_uuid"], queue)
213
+ dbos._sys_db.buffer_workflow_status(status)
214
+ return output
215
+ except DBOSWorkflowConflictIDError:
216
+ # Retrieve the workflow handle and wait for the result.
217
+ # Must use existing_workflow=False because workflow status might not be set yet for single transaction workflows.
218
+ wf_handle: "WorkflowHandle[R]" = dbos.retrieve_workflow(
219
+ status["workflow_uuid"], existing_workflow=False
220
+ )
221
+ output = wf_handle.get_result()
222
+ return output
223
+ except Exception as error:
224
+ status["status"] = "ERROR"
225
+ status["error"] = _serialization.serialize_exception(error)
226
+ if status["queue_name"] is not None:
227
+ queue = dbos._registry.queue_info_map[status["queue_name"]]
228
+ dbos._sys_db.remove_from_queue(status["workflow_uuid"], queue)
229
+ dbos._sys_db.update_workflow_status(status)
230
+ raise
215
231
 
216
- return output
232
+ return persist
217
233
 
218
234
 
219
235
  def _execute_workflow_wthread(
@@ -231,7 +247,15 @@ def _execute_workflow_wthread(
231
247
  with DBOSContextSwap(ctx):
232
248
  with EnterDBOSWorkflow(attributes):
233
249
  try:
234
- return _execute_workflow(dbos, status, func, *args, **kwargs)
250
+ result = (
251
+ Outcome[R]
252
+ .make(functools.partial(func, *args, **kwargs))
253
+ .then(_get_wf_invoke_func(dbos, status))
254
+ )
255
+ if isinstance(result, Immediate):
256
+ return cast(Immediate[R], result)()
257
+ else:
258
+ return asyncio.run(cast(Pending[R], result)())
235
259
  except Exception:
236
260
  dbos.logger.error(
237
261
  f"Exception encountered in asynchronous workflow: {traceback.format_exc()}"
@@ -305,6 +329,18 @@ def execute_workflow_by_id(dbos: "DBOS", workflow_id: str) -> "WorkflowHandle[An
305
329
  )
306
330
 
307
331
 
332
+ @overload
333
+ def start_workflow(
334
+ dbos: "DBOS",
335
+ func: "Workflow[P, Coroutine[Any, Any, R]]",
336
+ queue_name: Optional[str],
337
+ execute_workflow: bool,
338
+ *args: P.args,
339
+ **kwargs: P.kwargs,
340
+ ) -> "WorkflowHandle[R]": ...
341
+
342
+
343
+ @overload
308
344
  def start_workflow(
309
345
  dbos: "DBOS",
310
346
  func: "Workflow[P, R]",
@@ -312,6 +348,16 @@ def start_workflow(
312
348
  execute_workflow: bool,
313
349
  *args: P.args,
314
350
  **kwargs: P.kwargs,
351
+ ) -> "WorkflowHandle[R]": ...
352
+
353
+
354
+ def start_workflow(
355
+ dbos: "DBOS",
356
+ func: "Workflow[P, Union[R, Coroutine[Any, Any, R]]]",
357
+ queue_name: Optional[str],
358
+ execute_workflow: bool,
359
+ *args: P.args,
360
+ **kwargs: P.kwargs,
315
361
  ) -> "WorkflowHandle[R]":
316
362
  fself: Optional[object] = None
317
363
  if hasattr(func, "__self__"):
@@ -396,16 +442,16 @@ def start_workflow(
396
442
 
397
443
  def workflow_wrapper(
398
444
  dbosreg: "DBOSRegistry",
399
- func: F,
445
+ func: Callable[P, R],
400
446
  max_recovery_attempts: int = DEFAULT_MAX_RECOVERY_ATTEMPTS,
401
- ) -> F:
447
+ ) -> Callable[P, R]:
402
448
  func.__orig_func = func # type: ignore
403
449
 
404
450
  fi = get_or_create_func_info(func)
405
451
  fi.max_recovery_attempts = max_recovery_attempts
406
452
 
407
453
  @wraps(func)
408
- def wrapper(*args: Any, **kwargs: Any) -> Any:
454
+ def wrapper(*args: Any, **kwargs: Any) -> R:
409
455
  if dbosreg.dbos is None:
410
456
  raise DBOSException(
411
457
  f"Function {func.__name__} invoked before DBOS initialized"
@@ -425,7 +471,10 @@ def workflow_wrapper(
425
471
  enterWorkflowCtxMgr = (
426
472
  EnterDBOSChildWorkflow if ctx and ctx.is_workflow() else EnterDBOSWorkflow
427
473
  )
428
- with enterWorkflowCtxMgr(attributes), DBOSAssumeRole(rr):
474
+
475
+ wfOutcome = Outcome[R].make(functools.partial(func, *args, **kwargs))
476
+
477
+ def init_wf() -> Callable[[Callable[[], R]], R]:
429
478
  ctx = assert_current_dbos_context() # Now the child ctx
430
479
  status = _init_workflow(
431
480
  dbos,
@@ -441,16 +490,23 @@ def workflow_wrapper(
441
490
  dbos.logger.debug(
442
491
  f"Running workflow, id: {ctx.workflow_id}, name: {get_dbos_func_name(func)}"
443
492
  )
444
- return _execute_workflow(dbos, status, func, *args, **kwargs)
445
493
 
446
- wrapped_func = cast(F, wrapper)
447
- return wrapped_func
494
+ return _get_wf_invoke_func(dbos, status)
495
+
496
+ outcome = (
497
+ wfOutcome.wrap(init_wf)
498
+ .also(DBOSAssumeRole(rr))
499
+ .also(enterWorkflowCtxMgr(attributes))
500
+ )
501
+ return outcome() # type: ignore
502
+
503
+ return wrapper
448
504
 
449
505
 
450
506
  def decorate_workflow(
451
507
  reg: "DBOSRegistry", max_recovery_attempts: int
452
- ) -> Callable[[F], F]:
453
- def _workflow_decorator(func: F) -> F:
508
+ ) -> Callable[[Callable[P, R]], Callable[P, R]]:
509
+ def _workflow_decorator(func: Callable[P, R]) -> Callable[P, R]:
454
510
  wrapped_func = workflow_wrapper(reg, func, max_recovery_attempts)
455
511
  reg.register_wf_function(func.__qualname__, wrapped_func)
456
512
  return wrapped_func
@@ -473,7 +529,8 @@ def decorate_transaction(
473
529
  "name": func.__name__,
474
530
  "operationType": OperationType.TRANSACTION.value,
475
531
  }
476
- with EnterDBOSTransaction(session, attributes=attributes) as ctx:
532
+ with EnterDBOSTransaction(session, attributes=attributes):
533
+ ctx = assert_current_dbos_context()
477
534
  txn_output: TransactionResultInternal = {
478
535
  "workflow_uuid": ctx.workflow_id,
479
536
  "function_id": ctx.function_id,
@@ -562,6 +619,11 @@ def decorate_transaction(
562
619
  raise
563
620
  return output
564
621
 
622
+ if inspect.iscoroutinefunction(func):
623
+ raise DBOSException(
624
+ f"Function {func.__name__} is a coroutine function, but DBOS.transaction does not support coroutine functions"
625
+ )
626
+
565
627
  fi = get_or_create_func_info(func)
566
628
 
567
629
  @wraps(func)
@@ -603,8 +665,8 @@ def decorate_step(
603
665
  interval_seconds: float = 1.0,
604
666
  max_attempts: int = 3,
605
667
  backoff_rate: float = 2.0,
606
- ) -> Callable[[F], F]:
607
- def decorator(func: F) -> F:
668
+ ) -> Callable[[Callable[P, R]], Callable[P, R]]:
669
+ def decorator(func: Callable[P, R]) -> Callable[P, R]:
608
670
 
609
671
  def invoke_step(*args: Any, **kwargs: Any) -> Any:
610
672
  if dbosreg.dbos is None:
@@ -617,13 +679,48 @@ def decorate_step(
617
679
  "name": func.__name__,
618
680
  "operationType": OperationType.STEP.value,
619
681
  }
620
- with EnterDBOSStep(attributes) as ctx:
682
+
683
+ attempts = max_attempts if retries_allowed else 1
684
+ max_retry_interval_seconds: float = 3600 # 1 Hour
685
+
686
+ def on_exception(attempt: int, error: BaseException) -> float:
687
+ dbos.logger.warning(
688
+ f"Step being automatically retried. (attempt {attempt} of {attempts}). {traceback.format_exc()}"
689
+ )
690
+ ctx = assert_current_dbos_context()
691
+ ctx.get_current_span().add_event(
692
+ f"Step attempt {attempt} failed",
693
+ {
694
+ "error": str(error),
695
+ "retryIntervalSeconds": interval_seconds,
696
+ },
697
+ )
698
+ return min(
699
+ interval_seconds * (backoff_rate**attempt),
700
+ max_retry_interval_seconds,
701
+ )
702
+
703
+ def record_step_result(func: Callable[[], R]) -> R:
704
+ ctx = assert_current_dbos_context()
621
705
  step_output: OperationResultInternal = {
622
706
  "workflow_uuid": ctx.workflow_id,
623
707
  "function_id": ctx.function_id,
624
708
  "output": None,
625
709
  "error": None,
626
710
  }
711
+
712
+ try:
713
+ output = func()
714
+ step_output["output"] = _serialization.serialize(output)
715
+ return output
716
+ except Exception as error:
717
+ step_output["error"] = _serialization.serialize_exception(error)
718
+ raise
719
+ finally:
720
+ dbos._sys_db.record_operation_result(step_output)
721
+
722
+ def check_existing_result() -> Union[NoResult, R]:
723
+ ctx = assert_current_dbos_context()
627
724
  recorded_output = dbos._sys_db.check_operation_execution(
628
725
  ctx.workflow_id, ctx.function_id
629
726
  )
@@ -637,57 +734,29 @@ def decorate_step(
637
734
  )
638
735
  raise deserialized_error
639
736
  elif recorded_output["output"] is not None:
640
- return _serialization.deserialize(recorded_output["output"])
737
+ return cast(
738
+ R, _serialization.deserialize(recorded_output["output"])
739
+ )
641
740
  else:
642
741
  raise Exception("Output and error are both None")
643
742
  else:
644
743
  dbos.logger.debug(
645
744
  f"Running step, id: {ctx.function_id}, name: {attributes['name']}"
646
745
  )
746
+ return NoResult()
647
747
 
648
- output = None
649
- error = None
650
- local_max_attempts = max_attempts if retries_allowed else 1
651
- max_retry_interval_seconds: float = 3600 # 1 Hour
652
- local_interval_seconds = interval_seconds
653
- for attempt in range(1, local_max_attempts + 1):
654
- try:
655
- output = func(*args, **kwargs)
656
- step_output["output"] = _serialization.serialize(output)
657
- error = None
658
- break
659
- except Exception as err:
660
- error = err
661
- if retries_allowed:
662
- dbos.logger.warning(
663
- f"Step being automatically retried. (attempt {attempt} of {local_max_attempts}). {traceback.format_exc()}"
664
- )
665
- ctx.get_current_span().add_event(
666
- f"Step attempt {attempt} failed",
667
- {
668
- "error": str(error),
669
- "retryIntervalSeconds": local_interval_seconds,
670
- },
671
- )
672
- if attempt == local_max_attempts:
673
- error = DBOSMaxStepRetriesExceeded()
674
- else:
675
- time.sleep(local_interval_seconds)
676
- local_interval_seconds = min(
677
- local_interval_seconds * backoff_rate,
678
- max_retry_interval_seconds,
679
- )
680
-
681
- step_output["error"] = (
682
- _serialization.serialize_exception(error)
683
- if error is not None
684
- else None
748
+ stepOutcome = Outcome[R].make(functools.partial(func, *args, **kwargs))
749
+ if retries_allowed:
750
+ stepOutcome = stepOutcome.retry(
751
+ max_attempts, on_exception, lambda i: DBOSMaxStepRetriesExceeded()
685
752
  )
686
- dbos._sys_db.record_operation_result(step_output)
687
753
 
688
- if error is not None:
689
- raise error
690
- return output
754
+ outcome = (
755
+ stepOutcome.then(record_step_result)
756
+ .intercept(check_existing_result)
757
+ .also(EnterDBOSStep(attributes))
758
+ )
759
+ return outcome()
691
760
 
692
761
  fi = get_or_create_func_info(func)
693
762
 
@@ -711,16 +780,25 @@ def decorate_step(
711
780
  assert tempwf
712
781
  return tempwf(*args, **kwargs)
713
782
 
714
- def temp_wf(*args: Any, **kwargs: Any) -> Any:
783
+ def temp_wf_sync(*args: Any, **kwargs: Any) -> Any:
715
784
  return wrapper(*args, **kwargs)
716
785
 
786
+ async def temp_wf_async(*args: Any, **kwargs: Any) -> Any:
787
+ return await wrapper(*args, **kwargs)
788
+
789
+ # Other code in transact-py depends on the name of temporary workflow functions to be "temp_wf"
790
+ # so set the name of both sync and async temporary workflow functions explicitly
791
+ temp_wf_sync.__name__ = "temp_wf"
792
+ temp_wf_async.__name__ = "temp_wf"
793
+
794
+ temp_wf = temp_wf_async if inspect.iscoroutinefunction(func) else temp_wf_sync
717
795
  wrapped_wf = workflow_wrapper(dbosreg, temp_wf)
718
796
  set_dbos_func_name(temp_wf, "<temp>." + func.__qualname__)
719
797
  set_temp_workflow_type(temp_wf, "step")
720
798
  dbosreg.register_wf_function(get_dbos_func_name(temp_wf), wrapped_wf)
721
799
  wrapper.__orig_func = temp_wf # type: ignore
722
800
 
723
- return cast(F, wrapper)
801
+ return cast(Callable[P, R], wrapper)
724
802
 
725
803
  return decorator
726
804
 
@@ -732,7 +810,8 @@ def send(
732
810
  attributes: TracedAttributes = {
733
811
  "name": "send",
734
812
  }
735
- with EnterDBOSStep(attributes) as ctx:
813
+ with EnterDBOSStep(attributes):
814
+ ctx = assert_current_dbos_context()
736
815
  dbos._sys_db.send(
737
816
  ctx.workflow_id,
738
817
  ctx.curr_step_function_id,
@@ -759,7 +838,8 @@ def recv(dbos: "DBOS", topic: Optional[str] = None, timeout_seconds: float = 60)
759
838
  attributes: TracedAttributes = {
760
839
  "name": "recv",
761
840
  }
762
- with EnterDBOSStep(attributes) as ctx:
841
+ with EnterDBOSStep(attributes):
842
+ ctx = assert_current_dbos_context()
763
843
  ctx.function_id += 1 # Reserve for the sleep
764
844
  timeout_function_id = ctx.function_id
765
845
  return dbos._sys_db.recv(
@@ -784,7 +864,8 @@ def set_event(dbos: "DBOS", key: str, value: Any) -> None:
784
864
  attributes: TracedAttributes = {
785
865
  "name": "set_event",
786
866
  }
787
- with EnterDBOSStep(attributes) as ctx:
867
+ with EnterDBOSStep(attributes):
868
+ ctx = assert_current_dbos_context()
788
869
  dbos._sys_db.set_event(
789
870
  ctx.workflow_id, ctx.curr_step_function_id, key, value
790
871
  )
@@ -805,7 +886,8 @@ def get_event(
805
886
  attributes: TracedAttributes = {
806
887
  "name": "get_event",
807
888
  }
808
- with EnterDBOSStep(attributes) as ctx:
889
+ with EnterDBOSStep(attributes):
890
+ ctx = assert_current_dbos_context()
809
891
  ctx.function_id += 1
810
892
  timeout_function_id = ctx.function_id
811
893
  caller_ctx: GetEventWorkflowContext = {
@@ -1,5 +1,6 @@
1
1
  from __future__ import annotations
2
2
 
3
+ import asyncio
3
4
  import atexit
4
5
  import json
5
6
  import os
@@ -13,6 +14,7 @@ from typing import (
13
14
  TYPE_CHECKING,
14
15
  Any,
15
16
  Callable,
17
+ Coroutine,
16
18
  Generic,
17
19
  List,
18
20
  Literal,
@@ -21,6 +23,9 @@ from typing import (
21
23
  Tuple,
22
24
  Type,
23
25
  TypeVar,
26
+ Union,
27
+ cast,
28
+ overload,
24
29
  )
25
30
 
26
31
  from opentelemetry.trace import Span
@@ -40,7 +45,7 @@ from ._core import (
40
45
  start_workflow,
41
46
  workflow_wrapper,
42
47
  )
43
- from ._queue import Queue, _queue_thread
48
+ from ._queue import Queue, queue_thread
44
49
  from ._recovery import recover_pending_workflows, startup_recovery_thread
45
50
  from ._registrations import (
46
51
  DEFAULT_MAX_RECOVERY_ATTEMPTS,
@@ -71,6 +76,7 @@ else:
71
76
  from ._admin_server import AdminServer
72
77
  from ._app_db import ApplicationDatabase
73
78
  from ._context import (
79
+ DBOSContext,
74
80
  EnterDBOSStep,
75
81
  TracedAttributes,
76
82
  assert_current_dbos_context,
@@ -277,6 +283,7 @@ class DBOS:
277
283
  self.flask: Optional["Flask"] = flask
278
284
  self._executor_field: Optional[ThreadPoolExecutor] = None
279
285
  self._background_threads: List[threading.Thread] = []
286
+ self._executor_id: str = os.environ.get("DBOS__VMID", "local")
280
287
 
281
288
  # If using FastAPI, set up middleware and lifecycle events
282
289
  if self.fastapi is not None:
@@ -377,7 +384,7 @@ class DBOS:
377
384
  evt = threading.Event()
378
385
  self.stop_events.append(evt)
379
386
  bg_queue_thread = threading.Thread(
380
- target=_queue_thread, args=(evt, self), daemon=True
387
+ target=queue_thread, args=(evt, self), daemon=True
381
388
  )
382
389
  bg_queue_thread.start()
383
390
  self._background_threads.append(bg_queue_thread)
@@ -432,7 +439,7 @@ class DBOS:
432
439
  @classmethod
433
440
  def workflow(
434
441
  cls, *, max_recovery_attempts: int = DEFAULT_MAX_RECOVERY_ATTEMPTS
435
- ) -> Callable[[F], F]:
442
+ ) -> Callable[[Callable[P, R]], Callable[P, R]]:
436
443
  """Decorate a function for use as a DBOS workflow."""
437
444
  return decorate_workflow(_get_or_create_dbos_registry(), max_recovery_attempts)
438
445
 
@@ -457,7 +464,7 @@ class DBOS:
457
464
  interval_seconds: float = 1.0,
458
465
  max_attempts: int = 3,
459
466
  backoff_rate: float = 2.0,
460
- ) -> Callable[[F], F]:
467
+ ) -> Callable[[Callable[P, R]], Callable[P, R]]:
461
468
  """
462
469
  Decorate and configure a function for use as a DBOS step.
463
470
 
@@ -542,15 +549,36 @@ class DBOS:
542
549
  f"{e.name} dependency not found. Please install {e.name} via your package manager."
543
550
  ) from e
544
551
 
552
+ @overload
553
+ @classmethod
554
+ def start_workflow(
555
+ cls,
556
+ func: Workflow[P, Coroutine[Any, Any, R]],
557
+ *args: P.args,
558
+ **kwargs: P.kwargs,
559
+ ) -> WorkflowHandle[R]: ...
560
+
561
+ @overload
545
562
  @classmethod
546
563
  def start_workflow(
547
564
  cls,
548
565
  func: Workflow[P, R],
549
566
  *args: P.args,
550
567
  **kwargs: P.kwargs,
568
+ ) -> WorkflowHandle[R]: ...
569
+
570
+ @classmethod
571
+ def start_workflow(
572
+ cls,
573
+ func: Workflow[P, Union[R, Coroutine[Any, Any, R]]],
574
+ *args: P.args,
575
+ **kwargs: P.kwargs,
551
576
  ) -> WorkflowHandle[R]:
552
577
  """Invoke a workflow function in the background, returning a handle to the ongoing execution."""
553
- return start_workflow(_get_dbos_instance(), func, None, True, *args, **kwargs)
578
+ return cast(
579
+ WorkflowHandle[R],
580
+ start_workflow(_get_dbos_instance(), func, None, True, *args, **kwargs),
581
+ )
554
582
 
555
583
  @classmethod
556
584
  def get_workflow_status(cls, workflow_id: str) -> Optional[WorkflowStatus]:
@@ -602,6 +630,13 @@ class DBOS:
602
630
  """Send a message to a workflow execution."""
603
631
  return send(_get_dbos_instance(), destination_id, message, topic)
604
632
 
633
+ @classmethod
634
+ async def send_async(
635
+ cls, destination_id: str, message: Any, topic: Optional[str] = None
636
+ ) -> None:
637
+ """Send a message to a workflow execution."""
638
+ await asyncio.to_thread(lambda: DBOS.send(destination_id, message, topic))
639
+
605
640
  @classmethod
606
641
  def recv(cls, topic: Optional[str] = None, timeout_seconds: float = 60) -> Any:
607
642
  """
@@ -612,13 +647,25 @@ class DBOS:
612
647
  """
613
648
  return recv(_get_dbos_instance(), topic, timeout_seconds)
614
649
 
650
+ @classmethod
651
+ async def recv_async(
652
+ cls, topic: Optional[str] = None, timeout_seconds: float = 60
653
+ ) -> Any:
654
+ """
655
+ Receive a workflow message.
656
+
657
+ This function is to be called from within a workflow.
658
+ `recv_async` will return the message sent on `topic`, asyncronously waiting if necessary.
659
+ """
660
+ return await asyncio.to_thread(lambda: DBOS.recv(topic, timeout_seconds))
661
+
615
662
  @classmethod
616
663
  def sleep(cls, seconds: float) -> None:
617
664
  """
618
665
  Sleep for the specified time (in seconds).
619
666
 
620
- It is important to use `DBOS.sleep` (as opposed to any other sleep) within workflows,
621
- as the `DBOS.sleep`s are durable and completed sleeps will be skipped during recovery.
667
+ It is important to use `DBOS.sleep` or `DBOS.sleep_async` (as opposed to any other sleep) within workflows,
668
+ as the DBOS sleep methods are durable and completed sleeps will be skipped during recovery.
622
669
  """
623
670
  if seconds <= 0:
624
671
  return
@@ -631,7 +678,8 @@ class DBOS:
631
678
  attributes: TracedAttributes = {
632
679
  "name": "sleep",
633
680
  }
634
- with EnterDBOSStep(attributes) as ctx:
681
+ with EnterDBOSStep(attributes):
682
+ ctx = assert_current_dbos_context()
635
683
  _get_dbos_instance()._sys_db.sleep(
636
684
  ctx.workflow_id, ctx.curr_step_function_id, seconds
637
685
  )
@@ -639,17 +687,25 @@ class DBOS:
639
687
  # Cannot call it from outside of a workflow
640
688
  raise DBOSException("sleep() must be called from within a workflow")
641
689
 
690
+ @classmethod
691
+ async def sleep_async(cls, seconds: float) -> None:
692
+ """
693
+ Sleep for the specified time (in seconds).
694
+
695
+ It is important to use `DBOS.sleep` or `DBOS.sleep_async` (as opposed to any other sleep) within workflows,
696
+ as the DBOS sleep methods are durable and completed sleeps will be skipped during recovery.
697
+ """
698
+ await asyncio.to_thread(lambda: DBOS.sleep(seconds))
699
+
642
700
  @classmethod
643
701
  def set_event(cls, key: str, value: Any) -> None:
644
702
  """
645
703
  Set a workflow event.
646
704
 
647
- This function is to be called from within a workflow.
648
-
649
705
  `set_event` sets the `value` of `key` for the current workflow instance ID.
650
706
  This `value` can then be retrieved by other functions, using `get_event` below.
651
-
652
- Each workflow invocation should only call set_event once per `key`.
707
+ If the event `key` already exists, its `value` is updated.
708
+ This function can only be called from within a workflow.
653
709
 
654
710
  Args:
655
711
  key(str): The event key / name within the workflow
@@ -658,6 +714,23 @@ class DBOS:
658
714
  """
659
715
  return set_event(_get_dbos_instance(), key, value)
660
716
 
717
+ @classmethod
718
+ async def set_event_async(cls, key: str, value: Any) -> None:
719
+ """
720
+ Set a workflow event.
721
+
722
+ `set_event_async` sets the `value` of `key` for the current workflow instance ID.
723
+ This `value` can then be retrieved by other functions, using `get_event` below.
724
+ If the event `key` already exists, its `value` is updated.
725
+ This function can only be called from within a workflow.
726
+
727
+ Args:
728
+ key(str): The event key / name within the workflow
729
+ value(Any): A serializable value to associate with the key
730
+
731
+ """
732
+ await asyncio.to_thread(lambda: DBOS.set_event(key, value))
733
+
661
734
  @classmethod
662
735
  def get_event(cls, workflow_id: str, key: str, timeout_seconds: float = 60) -> Any:
663
736
  """
@@ -673,6 +746,25 @@ class DBOS:
673
746
  """
674
747
  return get_event(_get_dbos_instance(), workflow_id, key, timeout_seconds)
675
748
 
749
+ @classmethod
750
+ async def get_event_async(
751
+ cls, workflow_id: str, key: str, timeout_seconds: float = 60
752
+ ) -> Any:
753
+ """
754
+ Return the `value` of a workflow event, waiting for it to occur if necessary.
755
+
756
+ `get_event_async` waits for a corresponding `set_event` by the workflow with ID `workflow_id` with the same `key`.
757
+
758
+ Args:
759
+ workflow_id(str): The workflow instance ID that is expected to call `set_event` on `key`
760
+ key(str): The event key / name within the workflow
761
+ timeout_seconds(float): The amount of time to wait, in case `set_event` has not yet been called byt the workflow
762
+
763
+ """
764
+ return await asyncio.to_thread(
765
+ lambda: DBOS.get_event(workflow_id, key, timeout_seconds)
766
+ )
767
+
676
768
  @classmethod
677
769
  def execute_workflow_id(cls, workflow_id: str) -> WorkflowHandle[Any]:
678
770
  """Execute a workflow by ID (for recovery)."""