dbos 0.16.1__py3-none-any.whl → 0.17.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of dbos might be problematic. Click here for more details.
- dbos/_core.py +175 -93
- dbos/_dbos.py +104 -12
- dbos/_outcome.py +206 -0
- dbos/_queue.py +2 -2
- dbos/_sys_db.py +6 -3
- {dbos-0.16.1.dist-info → dbos-0.17.0.dist-info}/METADATA +1 -1
- {dbos-0.16.1.dist-info → dbos-0.17.0.dist-info}/RECORD +10 -9
- {dbos-0.16.1.dist-info → dbos-0.17.0.dist-info}/WHEEL +0 -0
- {dbos-0.16.1.dist-info → dbos-0.17.0.dist-info}/entry_points.txt +0 -0
- {dbos-0.16.1.dist-info → dbos-0.17.0.dist-info}/licenses/LICENSE +0 -0
dbos/_core.py
CHANGED
|
@@ -1,10 +1,27 @@
|
|
|
1
|
+
import asyncio
|
|
2
|
+
import functools
|
|
3
|
+
import inspect
|
|
1
4
|
import json
|
|
2
5
|
import sys
|
|
3
6
|
import time
|
|
4
7
|
import traceback
|
|
5
8
|
from concurrent.futures import Future
|
|
6
9
|
from functools import wraps
|
|
7
|
-
from typing import
|
|
10
|
+
from typing import (
|
|
11
|
+
TYPE_CHECKING,
|
|
12
|
+
Any,
|
|
13
|
+
Callable,
|
|
14
|
+
Coroutine,
|
|
15
|
+
Generic,
|
|
16
|
+
Optional,
|
|
17
|
+
Tuple,
|
|
18
|
+
TypeVar,
|
|
19
|
+
Union,
|
|
20
|
+
cast,
|
|
21
|
+
overload,
|
|
22
|
+
)
|
|
23
|
+
|
|
24
|
+
from dbos._outcome import Immediate, NoResult, Outcome, Pending
|
|
8
25
|
|
|
9
26
|
from ._app_db import ApplicationDatabase, TransactionResultInternal
|
|
10
27
|
|
|
@@ -181,39 +198,38 @@ def _init_workflow(
|
|
|
181
198
|
return status
|
|
182
199
|
|
|
183
200
|
|
|
184
|
-
def
|
|
201
|
+
def _get_wf_invoke_func(
|
|
185
202
|
dbos: "DBOS",
|
|
186
203
|
status: WorkflowStatusInternal,
|
|
187
|
-
|
|
188
|
-
|
|
189
|
-
|
|
190
|
-
|
|
191
|
-
|
|
192
|
-
|
|
193
|
-
|
|
194
|
-
|
|
195
|
-
|
|
196
|
-
|
|
197
|
-
|
|
198
|
-
|
|
199
|
-
|
|
200
|
-
|
|
201
|
-
|
|
202
|
-
|
|
203
|
-
|
|
204
|
-
|
|
205
|
-
|
|
206
|
-
|
|
207
|
-
|
|
208
|
-
|
|
209
|
-
|
|
210
|
-
|
|
211
|
-
|
|
212
|
-
dbos._sys_db.
|
|
213
|
-
|
|
214
|
-
raise
|
|
204
|
+
) -> Callable[[Callable[[], R]], R]:
|
|
205
|
+
def persist(func: Callable[[], R]) -> R:
|
|
206
|
+
try:
|
|
207
|
+
output = func()
|
|
208
|
+
status["status"] = "SUCCESS"
|
|
209
|
+
status["output"] = _serialization.serialize(output)
|
|
210
|
+
if status["queue_name"] is not None:
|
|
211
|
+
queue = dbos._registry.queue_info_map[status["queue_name"]]
|
|
212
|
+
dbos._sys_db.remove_from_queue(status["workflow_uuid"], queue)
|
|
213
|
+
dbos._sys_db.buffer_workflow_status(status)
|
|
214
|
+
return output
|
|
215
|
+
except DBOSWorkflowConflictIDError:
|
|
216
|
+
# Retrieve the workflow handle and wait for the result.
|
|
217
|
+
# Must use existing_workflow=False because workflow status might not be set yet for single transaction workflows.
|
|
218
|
+
wf_handle: "WorkflowHandle[R]" = dbos.retrieve_workflow(
|
|
219
|
+
status["workflow_uuid"], existing_workflow=False
|
|
220
|
+
)
|
|
221
|
+
output = wf_handle.get_result()
|
|
222
|
+
return output
|
|
223
|
+
except Exception as error:
|
|
224
|
+
status["status"] = "ERROR"
|
|
225
|
+
status["error"] = _serialization.serialize_exception(error)
|
|
226
|
+
if status["queue_name"] is not None:
|
|
227
|
+
queue = dbos._registry.queue_info_map[status["queue_name"]]
|
|
228
|
+
dbos._sys_db.remove_from_queue(status["workflow_uuid"], queue)
|
|
229
|
+
dbos._sys_db.update_workflow_status(status)
|
|
230
|
+
raise
|
|
215
231
|
|
|
216
|
-
return
|
|
232
|
+
return persist
|
|
217
233
|
|
|
218
234
|
|
|
219
235
|
def _execute_workflow_wthread(
|
|
@@ -231,7 +247,15 @@ def _execute_workflow_wthread(
|
|
|
231
247
|
with DBOSContextSwap(ctx):
|
|
232
248
|
with EnterDBOSWorkflow(attributes):
|
|
233
249
|
try:
|
|
234
|
-
|
|
250
|
+
result = (
|
|
251
|
+
Outcome[R]
|
|
252
|
+
.make(functools.partial(func, *args, **kwargs))
|
|
253
|
+
.then(_get_wf_invoke_func(dbos, status))
|
|
254
|
+
)
|
|
255
|
+
if isinstance(result, Immediate):
|
|
256
|
+
return cast(Immediate[R], result)()
|
|
257
|
+
else:
|
|
258
|
+
return asyncio.run(cast(Pending[R], result)())
|
|
235
259
|
except Exception:
|
|
236
260
|
dbos.logger.error(
|
|
237
261
|
f"Exception encountered in asynchronous workflow: {traceback.format_exc()}"
|
|
@@ -305,6 +329,18 @@ def execute_workflow_by_id(dbos: "DBOS", workflow_id: str) -> "WorkflowHandle[An
|
|
|
305
329
|
)
|
|
306
330
|
|
|
307
331
|
|
|
332
|
+
@overload
|
|
333
|
+
def start_workflow(
|
|
334
|
+
dbos: "DBOS",
|
|
335
|
+
func: "Workflow[P, Coroutine[Any, Any, R]]",
|
|
336
|
+
queue_name: Optional[str],
|
|
337
|
+
execute_workflow: bool,
|
|
338
|
+
*args: P.args,
|
|
339
|
+
**kwargs: P.kwargs,
|
|
340
|
+
) -> "WorkflowHandle[R]": ...
|
|
341
|
+
|
|
342
|
+
|
|
343
|
+
@overload
|
|
308
344
|
def start_workflow(
|
|
309
345
|
dbos: "DBOS",
|
|
310
346
|
func: "Workflow[P, R]",
|
|
@@ -312,6 +348,16 @@ def start_workflow(
|
|
|
312
348
|
execute_workflow: bool,
|
|
313
349
|
*args: P.args,
|
|
314
350
|
**kwargs: P.kwargs,
|
|
351
|
+
) -> "WorkflowHandle[R]": ...
|
|
352
|
+
|
|
353
|
+
|
|
354
|
+
def start_workflow(
|
|
355
|
+
dbos: "DBOS",
|
|
356
|
+
func: "Workflow[P, Union[R, Coroutine[Any, Any, R]]]",
|
|
357
|
+
queue_name: Optional[str],
|
|
358
|
+
execute_workflow: bool,
|
|
359
|
+
*args: P.args,
|
|
360
|
+
**kwargs: P.kwargs,
|
|
315
361
|
) -> "WorkflowHandle[R]":
|
|
316
362
|
fself: Optional[object] = None
|
|
317
363
|
if hasattr(func, "__self__"):
|
|
@@ -396,16 +442,16 @@ def start_workflow(
|
|
|
396
442
|
|
|
397
443
|
def workflow_wrapper(
|
|
398
444
|
dbosreg: "DBOSRegistry",
|
|
399
|
-
func:
|
|
445
|
+
func: Callable[P, R],
|
|
400
446
|
max_recovery_attempts: int = DEFAULT_MAX_RECOVERY_ATTEMPTS,
|
|
401
|
-
) ->
|
|
447
|
+
) -> Callable[P, R]:
|
|
402
448
|
func.__orig_func = func # type: ignore
|
|
403
449
|
|
|
404
450
|
fi = get_or_create_func_info(func)
|
|
405
451
|
fi.max_recovery_attempts = max_recovery_attempts
|
|
406
452
|
|
|
407
453
|
@wraps(func)
|
|
408
|
-
def wrapper(*args: Any, **kwargs: Any) ->
|
|
454
|
+
def wrapper(*args: Any, **kwargs: Any) -> R:
|
|
409
455
|
if dbosreg.dbos is None:
|
|
410
456
|
raise DBOSException(
|
|
411
457
|
f"Function {func.__name__} invoked before DBOS initialized"
|
|
@@ -425,7 +471,10 @@ def workflow_wrapper(
|
|
|
425
471
|
enterWorkflowCtxMgr = (
|
|
426
472
|
EnterDBOSChildWorkflow if ctx and ctx.is_workflow() else EnterDBOSWorkflow
|
|
427
473
|
)
|
|
428
|
-
|
|
474
|
+
|
|
475
|
+
wfOutcome = Outcome[R].make(functools.partial(func, *args, **kwargs))
|
|
476
|
+
|
|
477
|
+
def init_wf() -> Callable[[Callable[[], R]], R]:
|
|
429
478
|
ctx = assert_current_dbos_context() # Now the child ctx
|
|
430
479
|
status = _init_workflow(
|
|
431
480
|
dbos,
|
|
@@ -441,16 +490,23 @@ def workflow_wrapper(
|
|
|
441
490
|
dbos.logger.debug(
|
|
442
491
|
f"Running workflow, id: {ctx.workflow_id}, name: {get_dbos_func_name(func)}"
|
|
443
492
|
)
|
|
444
|
-
return _execute_workflow(dbos, status, func, *args, **kwargs)
|
|
445
493
|
|
|
446
|
-
|
|
447
|
-
|
|
494
|
+
return _get_wf_invoke_func(dbos, status)
|
|
495
|
+
|
|
496
|
+
outcome = (
|
|
497
|
+
wfOutcome.wrap(init_wf)
|
|
498
|
+
.also(DBOSAssumeRole(rr))
|
|
499
|
+
.also(enterWorkflowCtxMgr(attributes))
|
|
500
|
+
)
|
|
501
|
+
return outcome() # type: ignore
|
|
502
|
+
|
|
503
|
+
return wrapper
|
|
448
504
|
|
|
449
505
|
|
|
450
506
|
def decorate_workflow(
|
|
451
507
|
reg: "DBOSRegistry", max_recovery_attempts: int
|
|
452
|
-
) -> Callable[[
|
|
453
|
-
def _workflow_decorator(func:
|
|
508
|
+
) -> Callable[[Callable[P, R]], Callable[P, R]]:
|
|
509
|
+
def _workflow_decorator(func: Callable[P, R]) -> Callable[P, R]:
|
|
454
510
|
wrapped_func = workflow_wrapper(reg, func, max_recovery_attempts)
|
|
455
511
|
reg.register_wf_function(func.__qualname__, wrapped_func)
|
|
456
512
|
return wrapped_func
|
|
@@ -473,7 +529,8 @@ def decorate_transaction(
|
|
|
473
529
|
"name": func.__name__,
|
|
474
530
|
"operationType": OperationType.TRANSACTION.value,
|
|
475
531
|
}
|
|
476
|
-
with EnterDBOSTransaction(session, attributes=attributes)
|
|
532
|
+
with EnterDBOSTransaction(session, attributes=attributes):
|
|
533
|
+
ctx = assert_current_dbos_context()
|
|
477
534
|
txn_output: TransactionResultInternal = {
|
|
478
535
|
"workflow_uuid": ctx.workflow_id,
|
|
479
536
|
"function_id": ctx.function_id,
|
|
@@ -562,6 +619,11 @@ def decorate_transaction(
|
|
|
562
619
|
raise
|
|
563
620
|
return output
|
|
564
621
|
|
|
622
|
+
if inspect.iscoroutinefunction(func):
|
|
623
|
+
raise DBOSException(
|
|
624
|
+
f"Function {func.__name__} is a coroutine function, but DBOS.transaction does not support coroutine functions"
|
|
625
|
+
)
|
|
626
|
+
|
|
565
627
|
fi = get_or_create_func_info(func)
|
|
566
628
|
|
|
567
629
|
@wraps(func)
|
|
@@ -603,8 +665,8 @@ def decorate_step(
|
|
|
603
665
|
interval_seconds: float = 1.0,
|
|
604
666
|
max_attempts: int = 3,
|
|
605
667
|
backoff_rate: float = 2.0,
|
|
606
|
-
) -> Callable[[
|
|
607
|
-
def decorator(func:
|
|
668
|
+
) -> Callable[[Callable[P, R]], Callable[P, R]]:
|
|
669
|
+
def decorator(func: Callable[P, R]) -> Callable[P, R]:
|
|
608
670
|
|
|
609
671
|
def invoke_step(*args: Any, **kwargs: Any) -> Any:
|
|
610
672
|
if dbosreg.dbos is None:
|
|
@@ -617,13 +679,48 @@ def decorate_step(
|
|
|
617
679
|
"name": func.__name__,
|
|
618
680
|
"operationType": OperationType.STEP.value,
|
|
619
681
|
}
|
|
620
|
-
|
|
682
|
+
|
|
683
|
+
attempts = max_attempts if retries_allowed else 1
|
|
684
|
+
max_retry_interval_seconds: float = 3600 # 1 Hour
|
|
685
|
+
|
|
686
|
+
def on_exception(attempt: int, error: BaseException) -> float:
|
|
687
|
+
dbos.logger.warning(
|
|
688
|
+
f"Step being automatically retried. (attempt {attempt} of {attempts}). {traceback.format_exc()}"
|
|
689
|
+
)
|
|
690
|
+
ctx = assert_current_dbos_context()
|
|
691
|
+
ctx.get_current_span().add_event(
|
|
692
|
+
f"Step attempt {attempt} failed",
|
|
693
|
+
{
|
|
694
|
+
"error": str(error),
|
|
695
|
+
"retryIntervalSeconds": interval_seconds,
|
|
696
|
+
},
|
|
697
|
+
)
|
|
698
|
+
return min(
|
|
699
|
+
interval_seconds * (backoff_rate**attempt),
|
|
700
|
+
max_retry_interval_seconds,
|
|
701
|
+
)
|
|
702
|
+
|
|
703
|
+
def record_step_result(func: Callable[[], R]) -> R:
|
|
704
|
+
ctx = assert_current_dbos_context()
|
|
621
705
|
step_output: OperationResultInternal = {
|
|
622
706
|
"workflow_uuid": ctx.workflow_id,
|
|
623
707
|
"function_id": ctx.function_id,
|
|
624
708
|
"output": None,
|
|
625
709
|
"error": None,
|
|
626
710
|
}
|
|
711
|
+
|
|
712
|
+
try:
|
|
713
|
+
output = func()
|
|
714
|
+
step_output["output"] = _serialization.serialize(output)
|
|
715
|
+
return output
|
|
716
|
+
except Exception as error:
|
|
717
|
+
step_output["error"] = _serialization.serialize_exception(error)
|
|
718
|
+
raise
|
|
719
|
+
finally:
|
|
720
|
+
dbos._sys_db.record_operation_result(step_output)
|
|
721
|
+
|
|
722
|
+
def check_existing_result() -> Union[NoResult, R]:
|
|
723
|
+
ctx = assert_current_dbos_context()
|
|
627
724
|
recorded_output = dbos._sys_db.check_operation_execution(
|
|
628
725
|
ctx.workflow_id, ctx.function_id
|
|
629
726
|
)
|
|
@@ -637,57 +734,29 @@ def decorate_step(
|
|
|
637
734
|
)
|
|
638
735
|
raise deserialized_error
|
|
639
736
|
elif recorded_output["output"] is not None:
|
|
640
|
-
return
|
|
737
|
+
return cast(
|
|
738
|
+
R, _serialization.deserialize(recorded_output["output"])
|
|
739
|
+
)
|
|
641
740
|
else:
|
|
642
741
|
raise Exception("Output and error are both None")
|
|
643
742
|
else:
|
|
644
743
|
dbos.logger.debug(
|
|
645
744
|
f"Running step, id: {ctx.function_id}, name: {attributes['name']}"
|
|
646
745
|
)
|
|
746
|
+
return NoResult()
|
|
647
747
|
|
|
648
|
-
|
|
649
|
-
|
|
650
|
-
|
|
651
|
-
|
|
652
|
-
local_interval_seconds = interval_seconds
|
|
653
|
-
for attempt in range(1, local_max_attempts + 1):
|
|
654
|
-
try:
|
|
655
|
-
output = func(*args, **kwargs)
|
|
656
|
-
step_output["output"] = _serialization.serialize(output)
|
|
657
|
-
error = None
|
|
658
|
-
break
|
|
659
|
-
except Exception as err:
|
|
660
|
-
error = err
|
|
661
|
-
if retries_allowed:
|
|
662
|
-
dbos.logger.warning(
|
|
663
|
-
f"Step being automatically retried. (attempt {attempt} of {local_max_attempts}). {traceback.format_exc()}"
|
|
664
|
-
)
|
|
665
|
-
ctx.get_current_span().add_event(
|
|
666
|
-
f"Step attempt {attempt} failed",
|
|
667
|
-
{
|
|
668
|
-
"error": str(error),
|
|
669
|
-
"retryIntervalSeconds": local_interval_seconds,
|
|
670
|
-
},
|
|
671
|
-
)
|
|
672
|
-
if attempt == local_max_attempts:
|
|
673
|
-
error = DBOSMaxStepRetriesExceeded()
|
|
674
|
-
else:
|
|
675
|
-
time.sleep(local_interval_seconds)
|
|
676
|
-
local_interval_seconds = min(
|
|
677
|
-
local_interval_seconds * backoff_rate,
|
|
678
|
-
max_retry_interval_seconds,
|
|
679
|
-
)
|
|
680
|
-
|
|
681
|
-
step_output["error"] = (
|
|
682
|
-
_serialization.serialize_exception(error)
|
|
683
|
-
if error is not None
|
|
684
|
-
else None
|
|
748
|
+
stepOutcome = Outcome[R].make(functools.partial(func, *args, **kwargs))
|
|
749
|
+
if retries_allowed:
|
|
750
|
+
stepOutcome = stepOutcome.retry(
|
|
751
|
+
max_attempts, on_exception, lambda i: DBOSMaxStepRetriesExceeded()
|
|
685
752
|
)
|
|
686
|
-
dbos._sys_db.record_operation_result(step_output)
|
|
687
753
|
|
|
688
|
-
|
|
689
|
-
|
|
690
|
-
|
|
754
|
+
outcome = (
|
|
755
|
+
stepOutcome.then(record_step_result)
|
|
756
|
+
.intercept(check_existing_result)
|
|
757
|
+
.also(EnterDBOSStep(attributes))
|
|
758
|
+
)
|
|
759
|
+
return outcome()
|
|
691
760
|
|
|
692
761
|
fi = get_or_create_func_info(func)
|
|
693
762
|
|
|
@@ -711,16 +780,25 @@ def decorate_step(
|
|
|
711
780
|
assert tempwf
|
|
712
781
|
return tempwf(*args, **kwargs)
|
|
713
782
|
|
|
714
|
-
def
|
|
783
|
+
def temp_wf_sync(*args: Any, **kwargs: Any) -> Any:
|
|
715
784
|
return wrapper(*args, **kwargs)
|
|
716
785
|
|
|
786
|
+
async def temp_wf_async(*args: Any, **kwargs: Any) -> Any:
|
|
787
|
+
return await wrapper(*args, **kwargs)
|
|
788
|
+
|
|
789
|
+
# Other code in transact-py depends on the name of temporary workflow functions to be "temp_wf"
|
|
790
|
+
# so set the name of both sync and async temporary workflow functions explicitly
|
|
791
|
+
temp_wf_sync.__name__ = "temp_wf"
|
|
792
|
+
temp_wf_async.__name__ = "temp_wf"
|
|
793
|
+
|
|
794
|
+
temp_wf = temp_wf_async if inspect.iscoroutinefunction(func) else temp_wf_sync
|
|
717
795
|
wrapped_wf = workflow_wrapper(dbosreg, temp_wf)
|
|
718
796
|
set_dbos_func_name(temp_wf, "<temp>." + func.__qualname__)
|
|
719
797
|
set_temp_workflow_type(temp_wf, "step")
|
|
720
798
|
dbosreg.register_wf_function(get_dbos_func_name(temp_wf), wrapped_wf)
|
|
721
799
|
wrapper.__orig_func = temp_wf # type: ignore
|
|
722
800
|
|
|
723
|
-
return cast(
|
|
801
|
+
return cast(Callable[P, R], wrapper)
|
|
724
802
|
|
|
725
803
|
return decorator
|
|
726
804
|
|
|
@@ -732,7 +810,8 @@ def send(
|
|
|
732
810
|
attributes: TracedAttributes = {
|
|
733
811
|
"name": "send",
|
|
734
812
|
}
|
|
735
|
-
with EnterDBOSStep(attributes)
|
|
813
|
+
with EnterDBOSStep(attributes):
|
|
814
|
+
ctx = assert_current_dbos_context()
|
|
736
815
|
dbos._sys_db.send(
|
|
737
816
|
ctx.workflow_id,
|
|
738
817
|
ctx.curr_step_function_id,
|
|
@@ -759,7 +838,8 @@ def recv(dbos: "DBOS", topic: Optional[str] = None, timeout_seconds: float = 60)
|
|
|
759
838
|
attributes: TracedAttributes = {
|
|
760
839
|
"name": "recv",
|
|
761
840
|
}
|
|
762
|
-
with EnterDBOSStep(attributes)
|
|
841
|
+
with EnterDBOSStep(attributes):
|
|
842
|
+
ctx = assert_current_dbos_context()
|
|
763
843
|
ctx.function_id += 1 # Reserve for the sleep
|
|
764
844
|
timeout_function_id = ctx.function_id
|
|
765
845
|
return dbos._sys_db.recv(
|
|
@@ -784,7 +864,8 @@ def set_event(dbos: "DBOS", key: str, value: Any) -> None:
|
|
|
784
864
|
attributes: TracedAttributes = {
|
|
785
865
|
"name": "set_event",
|
|
786
866
|
}
|
|
787
|
-
with EnterDBOSStep(attributes)
|
|
867
|
+
with EnterDBOSStep(attributes):
|
|
868
|
+
ctx = assert_current_dbos_context()
|
|
788
869
|
dbos._sys_db.set_event(
|
|
789
870
|
ctx.workflow_id, ctx.curr_step_function_id, key, value
|
|
790
871
|
)
|
|
@@ -805,7 +886,8 @@ def get_event(
|
|
|
805
886
|
attributes: TracedAttributes = {
|
|
806
887
|
"name": "get_event",
|
|
807
888
|
}
|
|
808
|
-
with EnterDBOSStep(attributes)
|
|
889
|
+
with EnterDBOSStep(attributes):
|
|
890
|
+
ctx = assert_current_dbos_context()
|
|
809
891
|
ctx.function_id += 1
|
|
810
892
|
timeout_function_id = ctx.function_id
|
|
811
893
|
caller_ctx: GetEventWorkflowContext = {
|
dbos/_dbos.py
CHANGED
|
@@ -1,5 +1,6 @@
|
|
|
1
1
|
from __future__ import annotations
|
|
2
2
|
|
|
3
|
+
import asyncio
|
|
3
4
|
import atexit
|
|
4
5
|
import json
|
|
5
6
|
import os
|
|
@@ -13,6 +14,7 @@ from typing import (
|
|
|
13
14
|
TYPE_CHECKING,
|
|
14
15
|
Any,
|
|
15
16
|
Callable,
|
|
17
|
+
Coroutine,
|
|
16
18
|
Generic,
|
|
17
19
|
List,
|
|
18
20
|
Literal,
|
|
@@ -21,6 +23,9 @@ from typing import (
|
|
|
21
23
|
Tuple,
|
|
22
24
|
Type,
|
|
23
25
|
TypeVar,
|
|
26
|
+
Union,
|
|
27
|
+
cast,
|
|
28
|
+
overload,
|
|
24
29
|
)
|
|
25
30
|
|
|
26
31
|
from opentelemetry.trace import Span
|
|
@@ -40,7 +45,7 @@ from ._core import (
|
|
|
40
45
|
start_workflow,
|
|
41
46
|
workflow_wrapper,
|
|
42
47
|
)
|
|
43
|
-
from ._queue import Queue,
|
|
48
|
+
from ._queue import Queue, queue_thread
|
|
44
49
|
from ._recovery import recover_pending_workflows, startup_recovery_thread
|
|
45
50
|
from ._registrations import (
|
|
46
51
|
DEFAULT_MAX_RECOVERY_ATTEMPTS,
|
|
@@ -71,6 +76,7 @@ else:
|
|
|
71
76
|
from ._admin_server import AdminServer
|
|
72
77
|
from ._app_db import ApplicationDatabase
|
|
73
78
|
from ._context import (
|
|
79
|
+
DBOSContext,
|
|
74
80
|
EnterDBOSStep,
|
|
75
81
|
TracedAttributes,
|
|
76
82
|
assert_current_dbos_context,
|
|
@@ -277,6 +283,7 @@ class DBOS:
|
|
|
277
283
|
self.flask: Optional["Flask"] = flask
|
|
278
284
|
self._executor_field: Optional[ThreadPoolExecutor] = None
|
|
279
285
|
self._background_threads: List[threading.Thread] = []
|
|
286
|
+
self._executor_id: str = os.environ.get("DBOS__VMID", "local")
|
|
280
287
|
|
|
281
288
|
# If using FastAPI, set up middleware and lifecycle events
|
|
282
289
|
if self.fastapi is not None:
|
|
@@ -377,7 +384,7 @@ class DBOS:
|
|
|
377
384
|
evt = threading.Event()
|
|
378
385
|
self.stop_events.append(evt)
|
|
379
386
|
bg_queue_thread = threading.Thread(
|
|
380
|
-
target=
|
|
387
|
+
target=queue_thread, args=(evt, self), daemon=True
|
|
381
388
|
)
|
|
382
389
|
bg_queue_thread.start()
|
|
383
390
|
self._background_threads.append(bg_queue_thread)
|
|
@@ -432,7 +439,7 @@ class DBOS:
|
|
|
432
439
|
@classmethod
|
|
433
440
|
def workflow(
|
|
434
441
|
cls, *, max_recovery_attempts: int = DEFAULT_MAX_RECOVERY_ATTEMPTS
|
|
435
|
-
) -> Callable[[
|
|
442
|
+
) -> Callable[[Callable[P, R]], Callable[P, R]]:
|
|
436
443
|
"""Decorate a function for use as a DBOS workflow."""
|
|
437
444
|
return decorate_workflow(_get_or_create_dbos_registry(), max_recovery_attempts)
|
|
438
445
|
|
|
@@ -457,7 +464,7 @@ class DBOS:
|
|
|
457
464
|
interval_seconds: float = 1.0,
|
|
458
465
|
max_attempts: int = 3,
|
|
459
466
|
backoff_rate: float = 2.0,
|
|
460
|
-
) -> Callable[[
|
|
467
|
+
) -> Callable[[Callable[P, R]], Callable[P, R]]:
|
|
461
468
|
"""
|
|
462
469
|
Decorate and configure a function for use as a DBOS step.
|
|
463
470
|
|
|
@@ -542,15 +549,36 @@ class DBOS:
|
|
|
542
549
|
f"{e.name} dependency not found. Please install {e.name} via your package manager."
|
|
543
550
|
) from e
|
|
544
551
|
|
|
552
|
+
@overload
|
|
553
|
+
@classmethod
|
|
554
|
+
def start_workflow(
|
|
555
|
+
cls,
|
|
556
|
+
func: Workflow[P, Coroutine[Any, Any, R]],
|
|
557
|
+
*args: P.args,
|
|
558
|
+
**kwargs: P.kwargs,
|
|
559
|
+
) -> WorkflowHandle[R]: ...
|
|
560
|
+
|
|
561
|
+
@overload
|
|
545
562
|
@classmethod
|
|
546
563
|
def start_workflow(
|
|
547
564
|
cls,
|
|
548
565
|
func: Workflow[P, R],
|
|
549
566
|
*args: P.args,
|
|
550
567
|
**kwargs: P.kwargs,
|
|
568
|
+
) -> WorkflowHandle[R]: ...
|
|
569
|
+
|
|
570
|
+
@classmethod
|
|
571
|
+
def start_workflow(
|
|
572
|
+
cls,
|
|
573
|
+
func: Workflow[P, Union[R, Coroutine[Any, Any, R]]],
|
|
574
|
+
*args: P.args,
|
|
575
|
+
**kwargs: P.kwargs,
|
|
551
576
|
) -> WorkflowHandle[R]:
|
|
552
577
|
"""Invoke a workflow function in the background, returning a handle to the ongoing execution."""
|
|
553
|
-
return
|
|
578
|
+
return cast(
|
|
579
|
+
WorkflowHandle[R],
|
|
580
|
+
start_workflow(_get_dbos_instance(), func, None, True, *args, **kwargs),
|
|
581
|
+
)
|
|
554
582
|
|
|
555
583
|
@classmethod
|
|
556
584
|
def get_workflow_status(cls, workflow_id: str) -> Optional[WorkflowStatus]:
|
|
@@ -602,6 +630,13 @@ class DBOS:
|
|
|
602
630
|
"""Send a message to a workflow execution."""
|
|
603
631
|
return send(_get_dbos_instance(), destination_id, message, topic)
|
|
604
632
|
|
|
633
|
+
@classmethod
|
|
634
|
+
async def send_async(
|
|
635
|
+
cls, destination_id: str, message: Any, topic: Optional[str] = None
|
|
636
|
+
) -> None:
|
|
637
|
+
"""Send a message to a workflow execution."""
|
|
638
|
+
await asyncio.to_thread(lambda: DBOS.send(destination_id, message, topic))
|
|
639
|
+
|
|
605
640
|
@classmethod
|
|
606
641
|
def recv(cls, topic: Optional[str] = None, timeout_seconds: float = 60) -> Any:
|
|
607
642
|
"""
|
|
@@ -612,13 +647,25 @@ class DBOS:
|
|
|
612
647
|
"""
|
|
613
648
|
return recv(_get_dbos_instance(), topic, timeout_seconds)
|
|
614
649
|
|
|
650
|
+
@classmethod
|
|
651
|
+
async def recv_async(
|
|
652
|
+
cls, topic: Optional[str] = None, timeout_seconds: float = 60
|
|
653
|
+
) -> Any:
|
|
654
|
+
"""
|
|
655
|
+
Receive a workflow message.
|
|
656
|
+
|
|
657
|
+
This function is to be called from within a workflow.
|
|
658
|
+
`recv_async` will return the message sent on `topic`, asyncronously waiting if necessary.
|
|
659
|
+
"""
|
|
660
|
+
return await asyncio.to_thread(lambda: DBOS.recv(topic, timeout_seconds))
|
|
661
|
+
|
|
615
662
|
@classmethod
|
|
616
663
|
def sleep(cls, seconds: float) -> None:
|
|
617
664
|
"""
|
|
618
665
|
Sleep for the specified time (in seconds).
|
|
619
666
|
|
|
620
|
-
It is important to use `DBOS.sleep` (as opposed to any other sleep) within workflows,
|
|
621
|
-
as the
|
|
667
|
+
It is important to use `DBOS.sleep` or `DBOS.sleep_async` (as opposed to any other sleep) within workflows,
|
|
668
|
+
as the DBOS sleep methods are durable and completed sleeps will be skipped during recovery.
|
|
622
669
|
"""
|
|
623
670
|
if seconds <= 0:
|
|
624
671
|
return
|
|
@@ -631,7 +678,8 @@ class DBOS:
|
|
|
631
678
|
attributes: TracedAttributes = {
|
|
632
679
|
"name": "sleep",
|
|
633
680
|
}
|
|
634
|
-
with EnterDBOSStep(attributes)
|
|
681
|
+
with EnterDBOSStep(attributes):
|
|
682
|
+
ctx = assert_current_dbos_context()
|
|
635
683
|
_get_dbos_instance()._sys_db.sleep(
|
|
636
684
|
ctx.workflow_id, ctx.curr_step_function_id, seconds
|
|
637
685
|
)
|
|
@@ -639,17 +687,25 @@ class DBOS:
|
|
|
639
687
|
# Cannot call it from outside of a workflow
|
|
640
688
|
raise DBOSException("sleep() must be called from within a workflow")
|
|
641
689
|
|
|
690
|
+
@classmethod
|
|
691
|
+
async def sleep_async(cls, seconds: float) -> None:
|
|
692
|
+
"""
|
|
693
|
+
Sleep for the specified time (in seconds).
|
|
694
|
+
|
|
695
|
+
It is important to use `DBOS.sleep` or `DBOS.sleep_async` (as opposed to any other sleep) within workflows,
|
|
696
|
+
as the DBOS sleep methods are durable and completed sleeps will be skipped during recovery.
|
|
697
|
+
"""
|
|
698
|
+
await asyncio.to_thread(lambda: DBOS.sleep(seconds))
|
|
699
|
+
|
|
642
700
|
@classmethod
|
|
643
701
|
def set_event(cls, key: str, value: Any) -> None:
|
|
644
702
|
"""
|
|
645
703
|
Set a workflow event.
|
|
646
704
|
|
|
647
|
-
This function is to be called from within a workflow.
|
|
648
|
-
|
|
649
705
|
`set_event` sets the `value` of `key` for the current workflow instance ID.
|
|
650
706
|
This `value` can then be retrieved by other functions, using `get_event` below.
|
|
651
|
-
|
|
652
|
-
|
|
707
|
+
If the event `key` already exists, its `value` is updated.
|
|
708
|
+
This function can only be called from within a workflow.
|
|
653
709
|
|
|
654
710
|
Args:
|
|
655
711
|
key(str): The event key / name within the workflow
|
|
@@ -658,6 +714,23 @@ class DBOS:
|
|
|
658
714
|
"""
|
|
659
715
|
return set_event(_get_dbos_instance(), key, value)
|
|
660
716
|
|
|
717
|
+
@classmethod
|
|
718
|
+
async def set_event_async(cls, key: str, value: Any) -> None:
|
|
719
|
+
"""
|
|
720
|
+
Set a workflow event.
|
|
721
|
+
|
|
722
|
+
`set_event_async` sets the `value` of `key` for the current workflow instance ID.
|
|
723
|
+
This `value` can then be retrieved by other functions, using `get_event` below.
|
|
724
|
+
If the event `key` already exists, its `value` is updated.
|
|
725
|
+
This function can only be called from within a workflow.
|
|
726
|
+
|
|
727
|
+
Args:
|
|
728
|
+
key(str): The event key / name within the workflow
|
|
729
|
+
value(Any): A serializable value to associate with the key
|
|
730
|
+
|
|
731
|
+
"""
|
|
732
|
+
await asyncio.to_thread(lambda: DBOS.set_event(key, value))
|
|
733
|
+
|
|
661
734
|
@classmethod
|
|
662
735
|
def get_event(cls, workflow_id: str, key: str, timeout_seconds: float = 60) -> Any:
|
|
663
736
|
"""
|
|
@@ -673,6 +746,25 @@ class DBOS:
|
|
|
673
746
|
"""
|
|
674
747
|
return get_event(_get_dbos_instance(), workflow_id, key, timeout_seconds)
|
|
675
748
|
|
|
749
|
+
@classmethod
|
|
750
|
+
async def get_event_async(
|
|
751
|
+
cls, workflow_id: str, key: str, timeout_seconds: float = 60
|
|
752
|
+
) -> Any:
|
|
753
|
+
"""
|
|
754
|
+
Return the `value` of a workflow event, waiting for it to occur if necessary.
|
|
755
|
+
|
|
756
|
+
`get_event_async` waits for a corresponding `set_event` by the workflow with ID `workflow_id` with the same `key`.
|
|
757
|
+
|
|
758
|
+
Args:
|
|
759
|
+
workflow_id(str): The workflow instance ID that is expected to call `set_event` on `key`
|
|
760
|
+
key(str): The event key / name within the workflow
|
|
761
|
+
timeout_seconds(float): The amount of time to wait, in case `set_event` has not yet been called byt the workflow
|
|
762
|
+
|
|
763
|
+
"""
|
|
764
|
+
return await asyncio.to_thread(
|
|
765
|
+
lambda: DBOS.get_event(workflow_id, key, timeout_seconds)
|
|
766
|
+
)
|
|
767
|
+
|
|
676
768
|
@classmethod
|
|
677
769
|
def execute_workflow_id(cls, workflow_id: str) -> WorkflowHandle[Any]:
|
|
678
770
|
"""Execute a workflow by ID (for recovery)."""
|
dbos/_outcome.py
ADDED
|
@@ -0,0 +1,206 @@
|
|
|
1
|
+
import asyncio
|
|
2
|
+
import contextlib
|
|
3
|
+
import inspect
|
|
4
|
+
import time
|
|
5
|
+
from typing import Any, Callable, Coroutine, Optional, Protocol, TypeVar, Union, cast
|
|
6
|
+
|
|
7
|
+
T = TypeVar("T")
|
|
8
|
+
R = TypeVar("R")
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
class NoResult:
|
|
12
|
+
_instance: Optional["NoResult"] = None
|
|
13
|
+
__slots__ = ()
|
|
14
|
+
|
|
15
|
+
def __new__(cls, *args: Any, **kwargs: Any) -> "NoResult":
|
|
16
|
+
if not cls._instance:
|
|
17
|
+
cls._instance = super(NoResult, cls).__new__(cls, *args, **kwargs)
|
|
18
|
+
return cls._instance
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
# define Outcome protocol w/ common composition methods
|
|
22
|
+
class Outcome(Protocol[T]):
|
|
23
|
+
|
|
24
|
+
def wrap(
|
|
25
|
+
self, before: Callable[[], Callable[[Callable[[], T]], R]]
|
|
26
|
+
) -> "Outcome[R]": ...
|
|
27
|
+
|
|
28
|
+
def then(self, next: Callable[[Callable[[], T]], R]) -> "Outcome[R]": ...
|
|
29
|
+
|
|
30
|
+
def also(
|
|
31
|
+
self, cm: contextlib.AbstractContextManager[Any, bool]
|
|
32
|
+
) -> "Outcome[T]": ...
|
|
33
|
+
|
|
34
|
+
def retry(
|
|
35
|
+
self,
|
|
36
|
+
attempts: int,
|
|
37
|
+
on_exception: Callable[[int, BaseException], float],
|
|
38
|
+
exceeded_retries: Callable[[int], BaseException],
|
|
39
|
+
) -> "Outcome[T]": ...
|
|
40
|
+
|
|
41
|
+
def intercept(
|
|
42
|
+
self, interceptor: Callable[[], Union[NoResult, T]]
|
|
43
|
+
) -> "Outcome[T]": ...
|
|
44
|
+
|
|
45
|
+
def __call__(self) -> Union[T, Coroutine[Any, Any, T]]: ...
|
|
46
|
+
|
|
47
|
+
# Helper function to create an Immediate or Pending Result, depending on if func is a coroutine function or not
|
|
48
|
+
@staticmethod
|
|
49
|
+
def make(func: Callable[[], Union[T, Coroutine[Any, Any, T]]]) -> "Outcome[T]":
|
|
50
|
+
return (
|
|
51
|
+
Pending(cast(Callable[[], Coroutine[Any, Any, T]], func))
|
|
52
|
+
if inspect.iscoroutinefunction(func)
|
|
53
|
+
else Immediate(cast(Callable[[], T], func))
|
|
54
|
+
)
|
|
55
|
+
|
|
56
|
+
|
|
57
|
+
# Immediate Outcome - for composing non-async functions
|
|
58
|
+
class Immediate(Outcome[T]):
|
|
59
|
+
__slots__ = "_func"
|
|
60
|
+
|
|
61
|
+
def __init__(self, func: Callable[[], T]):
|
|
62
|
+
self._func = func
|
|
63
|
+
|
|
64
|
+
def then(self, next: Callable[[Callable[[], T]], R]) -> "Immediate[R]":
|
|
65
|
+
return Immediate(lambda: next(self._func))
|
|
66
|
+
|
|
67
|
+
def wrap(
|
|
68
|
+
self, before: Callable[[], Callable[[Callable[[], T]], R]]
|
|
69
|
+
) -> "Immediate[R]":
|
|
70
|
+
return Immediate(lambda: before()(self._func))
|
|
71
|
+
|
|
72
|
+
@staticmethod
|
|
73
|
+
def _intercept(
|
|
74
|
+
func: Callable[[], T], interceptor: Callable[[], Union[NoResult, T]]
|
|
75
|
+
) -> T:
|
|
76
|
+
intercepted = interceptor()
|
|
77
|
+
return intercepted if not isinstance(intercepted, NoResult) else func()
|
|
78
|
+
|
|
79
|
+
def intercept(
|
|
80
|
+
self, interceptor: Callable[[], Union[NoResult, T]]
|
|
81
|
+
) -> "Immediate[T]":
|
|
82
|
+
return Immediate[T](lambda: Immediate._intercept(self._func, interceptor))
|
|
83
|
+
|
|
84
|
+
@staticmethod
|
|
85
|
+
def _also(func: Callable[[], T], cm: contextlib.AbstractContextManager[Any, bool]) -> T: # type: ignore
|
|
86
|
+
with cm:
|
|
87
|
+
return func()
|
|
88
|
+
|
|
89
|
+
def also(self, cm: contextlib.AbstractContextManager[Any, bool]) -> "Immediate[T]":
|
|
90
|
+
return Immediate[T](lambda: Immediate._also(self._func, cm))
|
|
91
|
+
|
|
92
|
+
@staticmethod
|
|
93
|
+
def _retry(
|
|
94
|
+
func: Callable[[], T],
|
|
95
|
+
attempts: int,
|
|
96
|
+
on_exception: Callable[[int, BaseException], float],
|
|
97
|
+
exceeded_retries: Callable[[int], BaseException],
|
|
98
|
+
) -> T:
|
|
99
|
+
for i in range(attempts):
|
|
100
|
+
try:
|
|
101
|
+
return func()
|
|
102
|
+
except Exception as exp:
|
|
103
|
+
wait_time = on_exception(i, exp)
|
|
104
|
+
time.sleep(wait_time)
|
|
105
|
+
|
|
106
|
+
raise exceeded_retries(attempts)
|
|
107
|
+
|
|
108
|
+
def retry(
|
|
109
|
+
self,
|
|
110
|
+
attempts: int,
|
|
111
|
+
on_exception: Callable[[int, BaseException], float],
|
|
112
|
+
exceeded_retries: Callable[[int], BaseException],
|
|
113
|
+
) -> "Immediate[T]":
|
|
114
|
+
assert attempts > 0
|
|
115
|
+
return Immediate[T](
|
|
116
|
+
lambda: Immediate._retry(
|
|
117
|
+
self._func, attempts, on_exception, exceeded_retries
|
|
118
|
+
)
|
|
119
|
+
)
|
|
120
|
+
|
|
121
|
+
def __call__(self) -> T:
|
|
122
|
+
return self._func()
|
|
123
|
+
|
|
124
|
+
|
|
125
|
+
# Pending Outcome - for composing async functions
|
|
126
|
+
class Pending(Outcome[T]):
|
|
127
|
+
__slots__ = "_func"
|
|
128
|
+
|
|
129
|
+
def __init__(self, func: Callable[[], Coroutine[Any, Any, T]]):
|
|
130
|
+
self._func = func
|
|
131
|
+
|
|
132
|
+
# Helper method in order to raise an exception in a lambda
|
|
133
|
+
@staticmethod
|
|
134
|
+
def _raise(ex: BaseException) -> T:
|
|
135
|
+
raise ex
|
|
136
|
+
|
|
137
|
+
async def _wrap(
|
|
138
|
+
func: Callable[[], Coroutine[Any, Any, T]],
|
|
139
|
+
before: Callable[[], Callable[[Callable[[], T]], R]],
|
|
140
|
+
) -> R:
|
|
141
|
+
after = await asyncio.to_thread(before)
|
|
142
|
+
try:
|
|
143
|
+
value = await func()
|
|
144
|
+
return await asyncio.to_thread(after, lambda: value)
|
|
145
|
+
except BaseException as exp:
|
|
146
|
+
return await asyncio.to_thread(after, lambda: Pending._raise(exp))
|
|
147
|
+
|
|
148
|
+
def wrap(
|
|
149
|
+
self, before: Callable[[], Callable[[Callable[[], T]], R]]
|
|
150
|
+
) -> "Pending[R]":
|
|
151
|
+
return Pending[R](lambda: Pending._wrap(self._func, before))
|
|
152
|
+
|
|
153
|
+
def then(self, next: Callable[[Callable[[], T]], R]) -> "Pending[R]":
|
|
154
|
+
return Pending[R](lambda: Pending._wrap(self._func, lambda: next))
|
|
155
|
+
|
|
156
|
+
@staticmethod
|
|
157
|
+
async def _also( # type: ignore
|
|
158
|
+
func: Callable[[], Coroutine[Any, Any, T]],
|
|
159
|
+
cm: contextlib.AbstractContextManager[Any, bool],
|
|
160
|
+
) -> T:
|
|
161
|
+
with cm:
|
|
162
|
+
return await func()
|
|
163
|
+
|
|
164
|
+
def also(self, cm: contextlib.AbstractContextManager[Any, bool]) -> "Pending[T]":
|
|
165
|
+
return Pending[T](lambda: Pending._also(self._func, cm))
|
|
166
|
+
|
|
167
|
+
@staticmethod
|
|
168
|
+
async def _intercept(
|
|
169
|
+
func: Callable[[], Coroutine[Any, Any, T]],
|
|
170
|
+
interceptor: Callable[[], Union[NoResult, T]],
|
|
171
|
+
) -> T:
|
|
172
|
+
intercepted = await asyncio.to_thread(interceptor)
|
|
173
|
+
return intercepted if not isinstance(intercepted, NoResult) else await func()
|
|
174
|
+
|
|
175
|
+
def intercept(self, interceptor: Callable[[], Union[NoResult, T]]) -> "Pending[T]":
|
|
176
|
+
return Pending[T](lambda: Pending._intercept(self._func, interceptor))
|
|
177
|
+
|
|
178
|
+
@staticmethod
|
|
179
|
+
async def _retry(
|
|
180
|
+
func: Callable[[], Coroutine[Any, Any, T]],
|
|
181
|
+
attempts: int,
|
|
182
|
+
on_exception: Callable[[int, BaseException], float],
|
|
183
|
+
exceeded_retries: Callable[[int], BaseException],
|
|
184
|
+
) -> T:
|
|
185
|
+
for i in range(attempts):
|
|
186
|
+
try:
|
|
187
|
+
return await func()
|
|
188
|
+
except Exception as exp:
|
|
189
|
+
wait_time = on_exception(i, exp)
|
|
190
|
+
await asyncio.sleep(wait_time)
|
|
191
|
+
|
|
192
|
+
raise exceeded_retries(attempts)
|
|
193
|
+
|
|
194
|
+
def retry(
|
|
195
|
+
self,
|
|
196
|
+
attempts: int,
|
|
197
|
+
on_exception: Callable[[int, BaseException], float],
|
|
198
|
+
exceeded_retries: Callable[[int], BaseException],
|
|
199
|
+
) -> "Pending[T]":
|
|
200
|
+
assert attempts > 0
|
|
201
|
+
return Pending[T](
|
|
202
|
+
lambda: Pending._retry(self._func, attempts, on_exception, exceeded_retries)
|
|
203
|
+
)
|
|
204
|
+
|
|
205
|
+
async def __call__(self) -> T:
|
|
206
|
+
return await self._func()
|
dbos/_queue.py
CHANGED
|
@@ -51,13 +51,13 @@ class Queue:
|
|
|
51
51
|
return start_workflow(dbos, func, self.name, False, *args, **kwargs)
|
|
52
52
|
|
|
53
53
|
|
|
54
|
-
def
|
|
54
|
+
def queue_thread(stop_event: threading.Event, dbos: "DBOS") -> None:
|
|
55
55
|
while not stop_event.is_set():
|
|
56
56
|
if stop_event.wait(timeout=1):
|
|
57
57
|
return
|
|
58
58
|
for _, queue in dbos._registry.queue_info_map.items():
|
|
59
59
|
try:
|
|
60
|
-
wf_ids = dbos._sys_db.start_queued_workflows(queue)
|
|
60
|
+
wf_ids = dbos._sys_db.start_queued_workflows(queue, dbos._executor_id)
|
|
61
61
|
for id in wf_ids:
|
|
62
62
|
execute_workflow_by_id(dbos, id)
|
|
63
63
|
except Exception:
|
dbos/_sys_db.py
CHANGED
|
@@ -1104,7 +1104,7 @@ class SystemDatabase:
|
|
|
1104
1104
|
.on_conflict_do_nothing()
|
|
1105
1105
|
)
|
|
1106
1106
|
|
|
1107
|
-
def start_queued_workflows(self, queue: "Queue") -> List[str]:
|
|
1107
|
+
def start_queued_workflows(self, queue: "Queue", executor_id: str) -> List[str]:
|
|
1108
1108
|
start_time_ms = int(time.time() * 1000)
|
|
1109
1109
|
if queue.limiter is not None:
|
|
1110
1110
|
limiter_period_ms = int(queue.limiter["period"] * 1000)
|
|
@@ -1159,7 +1159,7 @@ class SystemDatabase:
|
|
|
1159
1159
|
if len(ret_ids) + num_recent_queries >= queue.limiter["limit"]:
|
|
1160
1160
|
break
|
|
1161
1161
|
|
|
1162
|
-
# To start a function, first set its status to PENDING
|
|
1162
|
+
# To start a function, first set its status to PENDING and update its executor ID
|
|
1163
1163
|
c.execute(
|
|
1164
1164
|
SystemSchema.workflow_status.update()
|
|
1165
1165
|
.where(SystemSchema.workflow_status.c.workflow_uuid == id)
|
|
@@ -1167,7 +1167,10 @@ class SystemDatabase:
|
|
|
1167
1167
|
SystemSchema.workflow_status.c.status
|
|
1168
1168
|
== WorkflowStatusString.ENQUEUED.value
|
|
1169
1169
|
)
|
|
1170
|
-
.values(
|
|
1170
|
+
.values(
|
|
1171
|
+
status=WorkflowStatusString.PENDING.value,
|
|
1172
|
+
executor_id=executor_id,
|
|
1173
|
+
)
|
|
1171
1174
|
)
|
|
1172
1175
|
|
|
1173
1176
|
# Then give it a start time
|
|
@@ -1,15 +1,15 @@
|
|
|
1
|
-
dbos-0.
|
|
2
|
-
dbos-0.
|
|
3
|
-
dbos-0.
|
|
4
|
-
dbos-0.
|
|
1
|
+
dbos-0.17.0.dist-info/METADATA,sha256=jFKo8TAjYuyNqaNqT6YpSBvRzFVuTW8uws5cODC_25Q,5020
|
|
2
|
+
dbos-0.17.0.dist-info/WHEEL,sha256=thaaA2w1JzcGC48WYufAs8nrYZjJm8LqNfnXFOFyCC4,90
|
|
3
|
+
dbos-0.17.0.dist-info/entry_points.txt,sha256=z6GcVANQV7Uw_82H9Ob2axJX6V3imftyZsljdh-M1HU,54
|
|
4
|
+
dbos-0.17.0.dist-info/licenses/LICENSE,sha256=VGZit_a5-kdw9WT6fY5jxAWVwGQzgLFyPWrcVVUhVNU,1067
|
|
5
5
|
dbos/__init__.py,sha256=CxRHBHEthPL4PZoLbZhp3rdm44-KkRTT2-7DkK9d4QQ,724
|
|
6
6
|
dbos/_admin_server.py,sha256=DOgzVp9kmwiebQqmJB1LcrZnGTxSMbZiGXdenc1wZDg,3163
|
|
7
7
|
dbos/_app_db.py,sha256=_tv2vmPjjiaikwgxH3mqxgJ4nUUcG2-0uMXKWCqVu1c,5509
|
|
8
8
|
dbos/_classproperty.py,sha256=f0X-_BySzn3yFDRKB2JpCbLYQ9tLwt1XftfshvY7CBs,626
|
|
9
9
|
dbos/_context.py,sha256=KV3fd3-Rv6EWrYDUdHARxltSlNZGNtQtNSqeQ-gkXE8,18049
|
|
10
|
-
dbos/_core.py,sha256=
|
|
10
|
+
dbos/_core.py,sha256=NWJFQX5bECBvKlYH9pVmNJgmqFGYPnkHnOGjOlOQ3Ag,33504
|
|
11
11
|
dbos/_croniter.py,sha256=hbhgfsHBqclUS8VeLnJ9PSE9Z54z6mi4nnrr1aUXn0k,47561
|
|
12
|
-
dbos/_dbos.py,sha256=
|
|
12
|
+
dbos/_dbos.py,sha256=riYx_dkYFzqeVDYpmcA5ABdAYQFhwyDi4AwxIihDNKA,34809
|
|
13
13
|
dbos/_dbos_config.py,sha256=f37eccN3JpCA32kRdQ4UsERjhYGcdLWv-N21ijnDZmY,6406
|
|
14
14
|
dbos/_error.py,sha256=UETk8CoZL-TO2Utn1-E7OSWelhShWmKM-fOlODMR9PE,3893
|
|
15
15
|
dbos/_fastapi.py,sha256=iyefCZq-ZDKRUjN_rgYQmFmyvWf4gPrSlC6CLbfq4a8,3419
|
|
@@ -25,7 +25,8 @@ dbos/_migrations/versions/a3b18ad34abe_added_triggers.py,sha256=Rv0ZsZYZ_WdgGEUL
|
|
|
25
25
|
dbos/_migrations/versions/d76646551a6b_job_queue_limiter.py,sha256=8PyFi8rd6CN-mUro43wGhsg5wcQWKZPRHD6jw8R5pVc,986
|
|
26
26
|
dbos/_migrations/versions/d76646551a6c_workflow_queue.py,sha256=G942nophZ2uC2vc4hGBC02Ptng1715roTjY3xiyzZU4,729
|
|
27
27
|
dbos/_migrations/versions/eab0cc1d9a14_job_queue.py,sha256=uvhFOtqbBreCePhAxZfIT0qCAI7BiZTou9wt6QnbY7c,1412
|
|
28
|
-
dbos/
|
|
28
|
+
dbos/_outcome.py,sha256=FDMgWVjZ06vm9xO-38H17mTqBImUYQxgKs_bDCSIAhE,6648
|
|
29
|
+
dbos/_queue.py,sha256=5NZ6RfKQd8LQD8EeUXgrwu86r0AadKEqPIMmL_1ORuw,1956
|
|
29
30
|
dbos/_recovery.py,sha256=jbzGYxICA2drzyzlBSy2UiXhKV_16tBVacKQdTkqf-w,2008
|
|
30
31
|
dbos/_registrations.py,sha256=mei6q6_3R5uei8i_Wo_TqGZs85s10shOekDX41sFYD0,6642
|
|
31
32
|
dbos/_request.py,sha256=cX1B3Atlh160phgS35gF1VEEV4pD126c9F3BDgBmxZU,929
|
|
@@ -35,7 +36,7 @@ dbos/_schemas/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
|
35
36
|
dbos/_schemas/application_database.py,sha256=KeyoPrF7hy_ODXV7QNike_VFSD74QBRfQ76D7QyE9HI,966
|
|
36
37
|
dbos/_schemas/system_database.py,sha256=7iw7eHJzEvkatHMOaHORoSvtfisF73wW5j8hRt_Ph14,5126
|
|
37
38
|
dbos/_serialization.py,sha256=YCYv0qKAwAZ1djZisBC7khvKqG-5OcIv9t9EC5PFIog,1743
|
|
38
|
-
dbos/_sys_db.py,sha256=
|
|
39
|
+
dbos/_sys_db.py,sha256=uZKeCnGc2MgvEd0ID3nReBBZj21HzClP56TFkXTvIZE,49028
|
|
39
40
|
dbos/_templates/hello/README.md,sha256=GhxhBj42wjTt1fWEtwNriHbJuKb66Vzu89G4pxNHw2g,930
|
|
40
41
|
dbos/_templates/hello/__package/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
41
42
|
dbos/_templates/hello/__package/main.py,sha256=eI0SS9Nwj-fldtiuSzIlIG6dC91GXXwdRsoHxv6S_WI,2719
|
|
@@ -51,4 +52,4 @@ dbos/cli.py,sha256=em1uAxrp5yyg53V7ZpmHFtqD6OJp2cMJkG9vGJPoFTA,10904
|
|
|
51
52
|
dbos/dbos-config.schema.json,sha256=tS7x-bdFbFvpobcs3pIOhwun3yr_ndvTEYOn4BJjTzs,5889
|
|
52
53
|
dbos/py.typed,sha256=QfzXT1Ktfk3Rj84akygc7_42z0lRpCq0Ilh8OXI6Zas,44
|
|
53
54
|
version/__init__.py,sha256=L4sNxecRuqdtSFdpUGX3TtBi9KL3k7YsZVIvv-fv9-A,1678
|
|
54
|
-
dbos-0.
|
|
55
|
+
dbos-0.17.0.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|