dbos 0.16.0a2__py3-none-any.whl → 0.16.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of dbos might be problematic. Click here for more details.
- dbos/_core.py +93 -175
- dbos/_dbos.py +10 -101
- {dbos-0.16.0a2.dist-info → dbos-0.16.1.dist-info}/METADATA +1 -1
- {dbos-0.16.0a2.dist-info → dbos-0.16.1.dist-info}/RECORD +7 -8
- dbos/_outcome.py +0 -190
- {dbos-0.16.0a2.dist-info → dbos-0.16.1.dist-info}/WHEEL +0 -0
- {dbos-0.16.0a2.dist-info → dbos-0.16.1.dist-info}/entry_points.txt +0 -0
- {dbos-0.16.0a2.dist-info → dbos-0.16.1.dist-info}/licenses/LICENSE +0 -0
dbos/_core.py
CHANGED
|
@@ -1,27 +1,10 @@
|
|
|
1
|
-
import asyncio
|
|
2
|
-
import functools
|
|
3
|
-
import inspect
|
|
4
1
|
import json
|
|
5
2
|
import sys
|
|
6
3
|
import time
|
|
7
4
|
import traceback
|
|
8
5
|
from concurrent.futures import Future
|
|
9
6
|
from functools import wraps
|
|
10
|
-
from typing import
|
|
11
|
-
TYPE_CHECKING,
|
|
12
|
-
Any,
|
|
13
|
-
Callable,
|
|
14
|
-
Coroutine,
|
|
15
|
-
Generic,
|
|
16
|
-
Optional,
|
|
17
|
-
Tuple,
|
|
18
|
-
TypeVar,
|
|
19
|
-
Union,
|
|
20
|
-
cast,
|
|
21
|
-
overload,
|
|
22
|
-
)
|
|
23
|
-
|
|
24
|
-
from dbos._outcome import Immediate, Outcome, Pending
|
|
7
|
+
from typing import TYPE_CHECKING, Any, Callable, Generic, Optional, Tuple, TypeVar, cast
|
|
25
8
|
|
|
26
9
|
from ._app_db import ApplicationDatabase, TransactionResultInternal
|
|
27
10
|
|
|
@@ -198,38 +181,39 @@ def _init_workflow(
|
|
|
198
181
|
return status
|
|
199
182
|
|
|
200
183
|
|
|
201
|
-
def
|
|
184
|
+
def _execute_workflow(
|
|
202
185
|
dbos: "DBOS",
|
|
203
186
|
status: WorkflowStatusInternal,
|
|
204
|
-
|
|
205
|
-
|
|
206
|
-
|
|
207
|
-
|
|
208
|
-
|
|
209
|
-
|
|
210
|
-
|
|
211
|
-
|
|
212
|
-
|
|
213
|
-
dbos.
|
|
214
|
-
|
|
215
|
-
|
|
216
|
-
|
|
217
|
-
|
|
218
|
-
|
|
219
|
-
|
|
220
|
-
|
|
221
|
-
|
|
222
|
-
|
|
223
|
-
|
|
224
|
-
|
|
225
|
-
|
|
226
|
-
|
|
227
|
-
|
|
228
|
-
|
|
229
|
-
dbos._sys_db.
|
|
230
|
-
|
|
187
|
+
func: "Workflow[P, R]",
|
|
188
|
+
*args: Any,
|
|
189
|
+
**kwargs: Any,
|
|
190
|
+
) -> R:
|
|
191
|
+
try:
|
|
192
|
+
output = func(*args, **kwargs)
|
|
193
|
+
status["status"] = "SUCCESS"
|
|
194
|
+
status["output"] = _serialization.serialize(output)
|
|
195
|
+
if status["queue_name"] is not None:
|
|
196
|
+
queue = dbos._registry.queue_info_map[status["queue_name"]]
|
|
197
|
+
dbos._sys_db.remove_from_queue(status["workflow_uuid"], queue)
|
|
198
|
+
dbos._sys_db.buffer_workflow_status(status)
|
|
199
|
+
except DBOSWorkflowConflictIDError:
|
|
200
|
+
# Retrieve the workflow handle and wait for the result.
|
|
201
|
+
# Must use existing_workflow=False because workflow status might not be set yet for single transaction workflows.
|
|
202
|
+
wf_handle: "WorkflowHandle[R]" = dbos.retrieve_workflow(
|
|
203
|
+
status["workflow_uuid"], existing_workflow=False
|
|
204
|
+
)
|
|
205
|
+
output = wf_handle.get_result()
|
|
206
|
+
return output
|
|
207
|
+
except Exception as error:
|
|
208
|
+
status["status"] = "ERROR"
|
|
209
|
+
status["error"] = _serialization.serialize_exception(error)
|
|
210
|
+
if status["queue_name"] is not None:
|
|
211
|
+
queue = dbos._registry.queue_info_map[status["queue_name"]]
|
|
212
|
+
dbos._sys_db.remove_from_queue(status["workflow_uuid"], queue)
|
|
213
|
+
dbos._sys_db.update_workflow_status(status)
|
|
214
|
+
raise
|
|
231
215
|
|
|
232
|
-
return
|
|
216
|
+
return output
|
|
233
217
|
|
|
234
218
|
|
|
235
219
|
def _execute_workflow_wthread(
|
|
@@ -247,15 +231,7 @@ def _execute_workflow_wthread(
|
|
|
247
231
|
with DBOSContextSwap(ctx):
|
|
248
232
|
with EnterDBOSWorkflow(attributes):
|
|
249
233
|
try:
|
|
250
|
-
|
|
251
|
-
Outcome[R]
|
|
252
|
-
.make(functools.partial(func, *args, **kwargs))
|
|
253
|
-
.then(_get_wf_invoke_func(dbos, status))
|
|
254
|
-
)
|
|
255
|
-
if isinstance(result, Immediate):
|
|
256
|
-
return cast(Immediate[R], result)()
|
|
257
|
-
else:
|
|
258
|
-
return asyncio.run(cast(Pending[R], result)())
|
|
234
|
+
return _execute_workflow(dbos, status, func, *args, **kwargs)
|
|
259
235
|
except Exception:
|
|
260
236
|
dbos.logger.error(
|
|
261
237
|
f"Exception encountered in asynchronous workflow: {traceback.format_exc()}"
|
|
@@ -329,18 +305,6 @@ def execute_workflow_by_id(dbos: "DBOS", workflow_id: str) -> "WorkflowHandle[An
|
|
|
329
305
|
)
|
|
330
306
|
|
|
331
307
|
|
|
332
|
-
@overload
|
|
333
|
-
def start_workflow(
|
|
334
|
-
dbos: "DBOS",
|
|
335
|
-
func: "Workflow[P, Coroutine[Any, Any, R]]",
|
|
336
|
-
queue_name: Optional[str],
|
|
337
|
-
execute_workflow: bool,
|
|
338
|
-
*args: P.args,
|
|
339
|
-
**kwargs: P.kwargs,
|
|
340
|
-
) -> "WorkflowHandle[R]": ...
|
|
341
|
-
|
|
342
|
-
|
|
343
|
-
@overload
|
|
344
308
|
def start_workflow(
|
|
345
309
|
dbos: "DBOS",
|
|
346
310
|
func: "Workflow[P, R]",
|
|
@@ -348,16 +312,6 @@ def start_workflow(
|
|
|
348
312
|
execute_workflow: bool,
|
|
349
313
|
*args: P.args,
|
|
350
314
|
**kwargs: P.kwargs,
|
|
351
|
-
) -> "WorkflowHandle[R]": ...
|
|
352
|
-
|
|
353
|
-
|
|
354
|
-
def start_workflow(
|
|
355
|
-
dbos: "DBOS",
|
|
356
|
-
func: "Workflow[P, Union[R, Coroutine[Any, Any, R]]]",
|
|
357
|
-
queue_name: Optional[str],
|
|
358
|
-
execute_workflow: bool,
|
|
359
|
-
*args: P.args,
|
|
360
|
-
**kwargs: P.kwargs,
|
|
361
315
|
) -> "WorkflowHandle[R]":
|
|
362
316
|
fself: Optional[object] = None
|
|
363
317
|
if hasattr(func, "__self__"):
|
|
@@ -442,16 +396,16 @@ def start_workflow(
|
|
|
442
396
|
|
|
443
397
|
def workflow_wrapper(
|
|
444
398
|
dbosreg: "DBOSRegistry",
|
|
445
|
-
func:
|
|
399
|
+
func: F,
|
|
446
400
|
max_recovery_attempts: int = DEFAULT_MAX_RECOVERY_ATTEMPTS,
|
|
447
|
-
) ->
|
|
401
|
+
) -> F:
|
|
448
402
|
func.__orig_func = func # type: ignore
|
|
449
403
|
|
|
450
404
|
fi = get_or_create_func_info(func)
|
|
451
405
|
fi.max_recovery_attempts = max_recovery_attempts
|
|
452
406
|
|
|
453
407
|
@wraps(func)
|
|
454
|
-
def wrapper(*args: Any, **kwargs: Any) ->
|
|
408
|
+
def wrapper(*args: Any, **kwargs: Any) -> Any:
|
|
455
409
|
if dbosreg.dbos is None:
|
|
456
410
|
raise DBOSException(
|
|
457
411
|
f"Function {func.__name__} invoked before DBOS initialized"
|
|
@@ -471,10 +425,7 @@ def workflow_wrapper(
|
|
|
471
425
|
enterWorkflowCtxMgr = (
|
|
472
426
|
EnterDBOSChildWorkflow if ctx and ctx.is_workflow() else EnterDBOSWorkflow
|
|
473
427
|
)
|
|
474
|
-
|
|
475
|
-
wfOutcome = Outcome[R].make(functools.partial(func, *args, **kwargs))
|
|
476
|
-
|
|
477
|
-
def init_wf() -> Callable[[Callable[[], R]], R]:
|
|
428
|
+
with enterWorkflowCtxMgr(attributes), DBOSAssumeRole(rr):
|
|
478
429
|
ctx = assert_current_dbos_context() # Now the child ctx
|
|
479
430
|
status = _init_workflow(
|
|
480
431
|
dbos,
|
|
@@ -490,23 +441,16 @@ def workflow_wrapper(
|
|
|
490
441
|
dbos.logger.debug(
|
|
491
442
|
f"Running workflow, id: {ctx.workflow_id}, name: {get_dbos_func_name(func)}"
|
|
492
443
|
)
|
|
444
|
+
return _execute_workflow(dbos, status, func, *args, **kwargs)
|
|
493
445
|
|
|
494
|
-
|
|
495
|
-
|
|
496
|
-
outcome = (
|
|
497
|
-
wfOutcome.wrap(init_wf)
|
|
498
|
-
.also(DBOSAssumeRole(rr))
|
|
499
|
-
.also(enterWorkflowCtxMgr(attributes))
|
|
500
|
-
)
|
|
501
|
-
return outcome() # type: ignore
|
|
502
|
-
|
|
503
|
-
return wrapper
|
|
446
|
+
wrapped_func = cast(F, wrapper)
|
|
447
|
+
return wrapped_func
|
|
504
448
|
|
|
505
449
|
|
|
506
450
|
def decorate_workflow(
|
|
507
451
|
reg: "DBOSRegistry", max_recovery_attempts: int
|
|
508
|
-
) -> Callable[[
|
|
509
|
-
def _workflow_decorator(func:
|
|
452
|
+
) -> Callable[[F], F]:
|
|
453
|
+
def _workflow_decorator(func: F) -> F:
|
|
510
454
|
wrapped_func = workflow_wrapper(reg, func, max_recovery_attempts)
|
|
511
455
|
reg.register_wf_function(func.__qualname__, wrapped_func)
|
|
512
456
|
return wrapped_func
|
|
@@ -529,8 +473,7 @@ def decorate_transaction(
|
|
|
529
473
|
"name": func.__name__,
|
|
530
474
|
"operationType": OperationType.TRANSACTION.value,
|
|
531
475
|
}
|
|
532
|
-
with EnterDBOSTransaction(session, attributes=attributes):
|
|
533
|
-
ctx = assert_current_dbos_context()
|
|
476
|
+
with EnterDBOSTransaction(session, attributes=attributes) as ctx:
|
|
534
477
|
txn_output: TransactionResultInternal = {
|
|
535
478
|
"workflow_uuid": ctx.workflow_id,
|
|
536
479
|
"function_id": ctx.function_id,
|
|
@@ -619,11 +562,6 @@ def decorate_transaction(
|
|
|
619
562
|
raise
|
|
620
563
|
return output
|
|
621
564
|
|
|
622
|
-
if inspect.iscoroutinefunction(func):
|
|
623
|
-
raise DBOSException(
|
|
624
|
-
f"Function {func.__name__} is a coroutine function, but DBOS.transaction does not support coroutine functions"
|
|
625
|
-
)
|
|
626
|
-
|
|
627
565
|
fi = get_or_create_func_info(func)
|
|
628
566
|
|
|
629
567
|
@wraps(func)
|
|
@@ -665,8 +603,8 @@ def decorate_step(
|
|
|
665
603
|
interval_seconds: float = 1.0,
|
|
666
604
|
max_attempts: int = 3,
|
|
667
605
|
backoff_rate: float = 2.0,
|
|
668
|
-
) -> Callable[[
|
|
669
|
-
def decorator(func:
|
|
606
|
+
) -> Callable[[F], F]:
|
|
607
|
+
def decorator(func: F) -> F:
|
|
670
608
|
|
|
671
609
|
def invoke_step(*args: Any, **kwargs: Any) -> Any:
|
|
672
610
|
if dbosreg.dbos is None:
|
|
@@ -679,48 +617,13 @@ def decorate_step(
|
|
|
679
617
|
"name": func.__name__,
|
|
680
618
|
"operationType": OperationType.STEP.value,
|
|
681
619
|
}
|
|
682
|
-
|
|
683
|
-
attempts = max_attempts if retries_allowed else 1
|
|
684
|
-
max_retry_interval_seconds: float = 3600 # 1 Hour
|
|
685
|
-
|
|
686
|
-
def on_exception(attempt: int, error: BaseException) -> float:
|
|
687
|
-
dbos.logger.warning(
|
|
688
|
-
f"Step being automatically retried. (attempt {attempt} of {attempts}). {traceback.format_exc()}"
|
|
689
|
-
)
|
|
690
|
-
ctx = assert_current_dbos_context()
|
|
691
|
-
ctx.get_current_span().add_event(
|
|
692
|
-
f"Step attempt {attempt} failed",
|
|
693
|
-
{
|
|
694
|
-
"error": str(error),
|
|
695
|
-
"retryIntervalSeconds": interval_seconds,
|
|
696
|
-
},
|
|
697
|
-
)
|
|
698
|
-
return min(
|
|
699
|
-
interval_seconds * (backoff_rate**attempt),
|
|
700
|
-
max_retry_interval_seconds,
|
|
701
|
-
)
|
|
702
|
-
|
|
703
|
-
def record_step_result(func: Callable[[], R]) -> R:
|
|
704
|
-
ctx = assert_current_dbos_context()
|
|
620
|
+
with EnterDBOSStep(attributes) as ctx:
|
|
705
621
|
step_output: OperationResultInternal = {
|
|
706
622
|
"workflow_uuid": ctx.workflow_id,
|
|
707
623
|
"function_id": ctx.function_id,
|
|
708
624
|
"output": None,
|
|
709
625
|
"error": None,
|
|
710
626
|
}
|
|
711
|
-
|
|
712
|
-
try:
|
|
713
|
-
output = func()
|
|
714
|
-
step_output["output"] = _serialization.serialize(output)
|
|
715
|
-
return output
|
|
716
|
-
except Exception as error:
|
|
717
|
-
step_output["error"] = _serialization.serialize_exception(error)
|
|
718
|
-
raise
|
|
719
|
-
finally:
|
|
720
|
-
dbos._sys_db.record_operation_result(step_output)
|
|
721
|
-
|
|
722
|
-
def check_existing_result() -> Optional[R]:
|
|
723
|
-
ctx = assert_current_dbos_context()
|
|
724
627
|
recorded_output = dbos._sys_db.check_operation_execution(
|
|
725
628
|
ctx.workflow_id, ctx.function_id
|
|
726
629
|
)
|
|
@@ -734,29 +637,57 @@ def decorate_step(
|
|
|
734
637
|
)
|
|
735
638
|
raise deserialized_error
|
|
736
639
|
elif recorded_output["output"] is not None:
|
|
737
|
-
return
|
|
738
|
-
R, _serialization.deserialize(recorded_output["output"])
|
|
739
|
-
)
|
|
640
|
+
return _serialization.deserialize(recorded_output["output"])
|
|
740
641
|
else:
|
|
741
642
|
raise Exception("Output and error are both None")
|
|
742
643
|
else:
|
|
743
644
|
dbos.logger.debug(
|
|
744
645
|
f"Running step, id: {ctx.function_id}, name: {attributes['name']}"
|
|
745
646
|
)
|
|
746
|
-
return None
|
|
747
647
|
|
|
748
|
-
|
|
749
|
-
|
|
750
|
-
|
|
751
|
-
|
|
648
|
+
output = None
|
|
649
|
+
error = None
|
|
650
|
+
local_max_attempts = max_attempts if retries_allowed else 1
|
|
651
|
+
max_retry_interval_seconds: float = 3600 # 1 Hour
|
|
652
|
+
local_interval_seconds = interval_seconds
|
|
653
|
+
for attempt in range(1, local_max_attempts + 1):
|
|
654
|
+
try:
|
|
655
|
+
output = func(*args, **kwargs)
|
|
656
|
+
step_output["output"] = _serialization.serialize(output)
|
|
657
|
+
error = None
|
|
658
|
+
break
|
|
659
|
+
except Exception as err:
|
|
660
|
+
error = err
|
|
661
|
+
if retries_allowed:
|
|
662
|
+
dbos.logger.warning(
|
|
663
|
+
f"Step being automatically retried. (attempt {attempt} of {local_max_attempts}). {traceback.format_exc()}"
|
|
664
|
+
)
|
|
665
|
+
ctx.get_current_span().add_event(
|
|
666
|
+
f"Step attempt {attempt} failed",
|
|
667
|
+
{
|
|
668
|
+
"error": str(error),
|
|
669
|
+
"retryIntervalSeconds": local_interval_seconds,
|
|
670
|
+
},
|
|
671
|
+
)
|
|
672
|
+
if attempt == local_max_attempts:
|
|
673
|
+
error = DBOSMaxStepRetriesExceeded()
|
|
674
|
+
else:
|
|
675
|
+
time.sleep(local_interval_seconds)
|
|
676
|
+
local_interval_seconds = min(
|
|
677
|
+
local_interval_seconds * backoff_rate,
|
|
678
|
+
max_retry_interval_seconds,
|
|
679
|
+
)
|
|
680
|
+
|
|
681
|
+
step_output["error"] = (
|
|
682
|
+
_serialization.serialize_exception(error)
|
|
683
|
+
if error is not None
|
|
684
|
+
else None
|
|
752
685
|
)
|
|
686
|
+
dbos._sys_db.record_operation_result(step_output)
|
|
753
687
|
|
|
754
|
-
|
|
755
|
-
|
|
756
|
-
|
|
757
|
-
.also(EnterDBOSStep(attributes))
|
|
758
|
-
)
|
|
759
|
-
return outcome()
|
|
688
|
+
if error is not None:
|
|
689
|
+
raise error
|
|
690
|
+
return output
|
|
760
691
|
|
|
761
692
|
fi = get_or_create_func_info(func)
|
|
762
693
|
|
|
@@ -780,25 +711,16 @@ def decorate_step(
|
|
|
780
711
|
assert tempwf
|
|
781
712
|
return tempwf(*args, **kwargs)
|
|
782
713
|
|
|
783
|
-
def
|
|
714
|
+
def temp_wf(*args: Any, **kwargs: Any) -> Any:
|
|
784
715
|
return wrapper(*args, **kwargs)
|
|
785
716
|
|
|
786
|
-
async def temp_wf_async(*args: Any, **kwargs: Any) -> Any:
|
|
787
|
-
return await wrapper(*args, **kwargs)
|
|
788
|
-
|
|
789
|
-
# Other code in transact-py depends on the name of temporary workflow functions to be "temp_wf"
|
|
790
|
-
# so set the name of both sync and async temporary workflow functions explicitly
|
|
791
|
-
temp_wf_sync.__name__ = "temp_wf"
|
|
792
|
-
temp_wf_async.__name__ = "temp_wf"
|
|
793
|
-
|
|
794
|
-
temp_wf = temp_wf_async if inspect.iscoroutinefunction(func) else temp_wf_sync
|
|
795
717
|
wrapped_wf = workflow_wrapper(dbosreg, temp_wf)
|
|
796
718
|
set_dbos_func_name(temp_wf, "<temp>." + func.__qualname__)
|
|
797
719
|
set_temp_workflow_type(temp_wf, "step")
|
|
798
720
|
dbosreg.register_wf_function(get_dbos_func_name(temp_wf), wrapped_wf)
|
|
799
721
|
wrapper.__orig_func = temp_wf # type: ignore
|
|
800
722
|
|
|
801
|
-
return cast(
|
|
723
|
+
return cast(F, wrapper)
|
|
802
724
|
|
|
803
725
|
return decorator
|
|
804
726
|
|
|
@@ -810,8 +732,7 @@ def send(
|
|
|
810
732
|
attributes: TracedAttributes = {
|
|
811
733
|
"name": "send",
|
|
812
734
|
}
|
|
813
|
-
with EnterDBOSStep(attributes):
|
|
814
|
-
ctx = assert_current_dbos_context()
|
|
735
|
+
with EnterDBOSStep(attributes) as ctx:
|
|
815
736
|
dbos._sys_db.send(
|
|
816
737
|
ctx.workflow_id,
|
|
817
738
|
ctx.curr_step_function_id,
|
|
@@ -838,8 +759,7 @@ def recv(dbos: "DBOS", topic: Optional[str] = None, timeout_seconds: float = 60)
|
|
|
838
759
|
attributes: TracedAttributes = {
|
|
839
760
|
"name": "recv",
|
|
840
761
|
}
|
|
841
|
-
with EnterDBOSStep(attributes):
|
|
842
|
-
ctx = assert_current_dbos_context()
|
|
762
|
+
with EnterDBOSStep(attributes) as ctx:
|
|
843
763
|
ctx.function_id += 1 # Reserve for the sleep
|
|
844
764
|
timeout_function_id = ctx.function_id
|
|
845
765
|
return dbos._sys_db.recv(
|
|
@@ -864,8 +784,7 @@ def set_event(dbos: "DBOS", key: str, value: Any) -> None:
|
|
|
864
784
|
attributes: TracedAttributes = {
|
|
865
785
|
"name": "set_event",
|
|
866
786
|
}
|
|
867
|
-
with EnterDBOSStep(attributes):
|
|
868
|
-
ctx = assert_current_dbos_context()
|
|
787
|
+
with EnterDBOSStep(attributes) as ctx:
|
|
869
788
|
dbos._sys_db.set_event(
|
|
870
789
|
ctx.workflow_id, ctx.curr_step_function_id, key, value
|
|
871
790
|
)
|
|
@@ -886,8 +805,7 @@ def get_event(
|
|
|
886
805
|
attributes: TracedAttributes = {
|
|
887
806
|
"name": "get_event",
|
|
888
807
|
}
|
|
889
|
-
with EnterDBOSStep(attributes):
|
|
890
|
-
ctx = assert_current_dbos_context()
|
|
808
|
+
with EnterDBOSStep(attributes) as ctx:
|
|
891
809
|
ctx.function_id += 1
|
|
892
810
|
timeout_function_id = ctx.function_id
|
|
893
811
|
caller_ctx: GetEventWorkflowContext = {
|
dbos/_dbos.py
CHANGED
|
@@ -1,6 +1,5 @@
|
|
|
1
1
|
from __future__ import annotations
|
|
2
2
|
|
|
3
|
-
import asyncio
|
|
4
3
|
import atexit
|
|
5
4
|
import json
|
|
6
5
|
import os
|
|
@@ -14,7 +13,6 @@ from typing import (
|
|
|
14
13
|
TYPE_CHECKING,
|
|
15
14
|
Any,
|
|
16
15
|
Callable,
|
|
17
|
-
Coroutine,
|
|
18
16
|
Generic,
|
|
19
17
|
List,
|
|
20
18
|
Literal,
|
|
@@ -23,9 +21,6 @@ from typing import (
|
|
|
23
21
|
Tuple,
|
|
24
22
|
Type,
|
|
25
23
|
TypeVar,
|
|
26
|
-
Union,
|
|
27
|
-
cast,
|
|
28
|
-
overload,
|
|
29
24
|
)
|
|
30
25
|
|
|
31
26
|
from opentelemetry.trace import Span
|
|
@@ -76,7 +71,6 @@ else:
|
|
|
76
71
|
from ._admin_server import AdminServer
|
|
77
72
|
from ._app_db import ApplicationDatabase
|
|
78
73
|
from ._context import (
|
|
79
|
-
DBOSContext,
|
|
80
74
|
EnterDBOSStep,
|
|
81
75
|
TracedAttributes,
|
|
82
76
|
assert_current_dbos_context,
|
|
@@ -438,7 +432,7 @@ class DBOS:
|
|
|
438
432
|
@classmethod
|
|
439
433
|
def workflow(
|
|
440
434
|
cls, *, max_recovery_attempts: int = DEFAULT_MAX_RECOVERY_ATTEMPTS
|
|
441
|
-
) -> Callable[[
|
|
435
|
+
) -> Callable[[F], F]:
|
|
442
436
|
"""Decorate a function for use as a DBOS workflow."""
|
|
443
437
|
return decorate_workflow(_get_or_create_dbos_registry(), max_recovery_attempts)
|
|
444
438
|
|
|
@@ -463,7 +457,7 @@ class DBOS:
|
|
|
463
457
|
interval_seconds: float = 1.0,
|
|
464
458
|
max_attempts: int = 3,
|
|
465
459
|
backoff_rate: float = 2.0,
|
|
466
|
-
) -> Callable[[
|
|
460
|
+
) -> Callable[[F], F]:
|
|
467
461
|
"""
|
|
468
462
|
Decorate and configure a function for use as a DBOS step.
|
|
469
463
|
|
|
@@ -548,36 +542,15 @@ class DBOS:
|
|
|
548
542
|
f"{e.name} dependency not found. Please install {e.name} via your package manager."
|
|
549
543
|
) from e
|
|
550
544
|
|
|
551
|
-
@overload
|
|
552
|
-
@classmethod
|
|
553
|
-
def start_workflow(
|
|
554
|
-
cls,
|
|
555
|
-
func: Workflow[P, Coroutine[Any, Any, R]],
|
|
556
|
-
*args: P.args,
|
|
557
|
-
**kwargs: P.kwargs,
|
|
558
|
-
) -> WorkflowHandle[R]: ...
|
|
559
|
-
|
|
560
|
-
@overload
|
|
561
545
|
@classmethod
|
|
562
546
|
def start_workflow(
|
|
563
547
|
cls,
|
|
564
548
|
func: Workflow[P, R],
|
|
565
549
|
*args: P.args,
|
|
566
550
|
**kwargs: P.kwargs,
|
|
567
|
-
) -> WorkflowHandle[R]: ...
|
|
568
|
-
|
|
569
|
-
@classmethod
|
|
570
|
-
def start_workflow(
|
|
571
|
-
cls,
|
|
572
|
-
func: Workflow[P, Union[R, Coroutine[Any, Any, R]]],
|
|
573
|
-
*args: P.args,
|
|
574
|
-
**kwargs: P.kwargs,
|
|
575
551
|
) -> WorkflowHandle[R]:
|
|
576
552
|
"""Invoke a workflow function in the background, returning a handle to the ongoing execution."""
|
|
577
|
-
return
|
|
578
|
-
WorkflowHandle[R],
|
|
579
|
-
start_workflow(_get_dbos_instance(), func, None, True, *args, **kwargs),
|
|
580
|
-
)
|
|
553
|
+
return start_workflow(_get_dbos_instance(), func, None, True, *args, **kwargs)
|
|
581
554
|
|
|
582
555
|
@classmethod
|
|
583
556
|
def get_workflow_status(cls, workflow_id: str) -> Optional[WorkflowStatus]:
|
|
@@ -629,13 +602,6 @@ class DBOS:
|
|
|
629
602
|
"""Send a message to a workflow execution."""
|
|
630
603
|
return send(_get_dbos_instance(), destination_id, message, topic)
|
|
631
604
|
|
|
632
|
-
@classmethod
|
|
633
|
-
async def send_async(
|
|
634
|
-
cls, destination_id: str, message: Any, topic: Optional[str] = None
|
|
635
|
-
) -> None:
|
|
636
|
-
"""Send a message to a workflow execution."""
|
|
637
|
-
await asyncio.to_thread(lambda: DBOS.send(destination_id, message, topic))
|
|
638
|
-
|
|
639
605
|
@classmethod
|
|
640
606
|
def recv(cls, topic: Optional[str] = None, timeout_seconds: float = 60) -> Any:
|
|
641
607
|
"""
|
|
@@ -646,25 +612,13 @@ class DBOS:
|
|
|
646
612
|
"""
|
|
647
613
|
return recv(_get_dbos_instance(), topic, timeout_seconds)
|
|
648
614
|
|
|
649
|
-
@classmethod
|
|
650
|
-
async def recv_async(
|
|
651
|
-
cls, topic: Optional[str] = None, timeout_seconds: float = 60
|
|
652
|
-
) -> Any:
|
|
653
|
-
"""
|
|
654
|
-
Receive a workflow message.
|
|
655
|
-
|
|
656
|
-
This function is to be called from within a workflow.
|
|
657
|
-
`recv_async` will return the message sent on `topic`, asyncronously waiting if necessary.
|
|
658
|
-
"""
|
|
659
|
-
return await asyncio.to_thread(lambda: DBOS.recv(topic, timeout_seconds))
|
|
660
|
-
|
|
661
615
|
@classmethod
|
|
662
616
|
def sleep(cls, seconds: float) -> None:
|
|
663
617
|
"""
|
|
664
618
|
Sleep for the specified time (in seconds).
|
|
665
619
|
|
|
666
|
-
It is important to use `DBOS.sleep`
|
|
667
|
-
as the DBOS
|
|
620
|
+
It is important to use `DBOS.sleep` (as opposed to any other sleep) within workflows,
|
|
621
|
+
as the `DBOS.sleep`s are durable and completed sleeps will be skipped during recovery.
|
|
668
622
|
"""
|
|
669
623
|
if seconds <= 0:
|
|
670
624
|
return
|
|
@@ -677,8 +631,7 @@ class DBOS:
|
|
|
677
631
|
attributes: TracedAttributes = {
|
|
678
632
|
"name": "sleep",
|
|
679
633
|
}
|
|
680
|
-
with EnterDBOSStep(attributes):
|
|
681
|
-
ctx = assert_current_dbos_context()
|
|
634
|
+
with EnterDBOSStep(attributes) as ctx:
|
|
682
635
|
_get_dbos_instance()._sys_db.sleep(
|
|
683
636
|
ctx.workflow_id, ctx.curr_step_function_id, seconds
|
|
684
637
|
)
|
|
@@ -686,49 +639,24 @@ class DBOS:
|
|
|
686
639
|
# Cannot call it from outside of a workflow
|
|
687
640
|
raise DBOSException("sleep() must be called from within a workflow")
|
|
688
641
|
|
|
689
|
-
@classmethod
|
|
690
|
-
async def sleep_async(cls, seconds: float) -> None:
|
|
691
|
-
"""
|
|
692
|
-
Sleep for the specified time (in seconds).
|
|
693
|
-
|
|
694
|
-
It is important to use `DBOS.sleep` or `DBOS.sleep_async` (as opposed to any other sleep) within workflows,
|
|
695
|
-
as the DBOS sleep methods are durable and completed sleeps will be skipped during recovery.
|
|
696
|
-
"""
|
|
697
|
-
await asyncio.to_thread(lambda: DBOS.sleep(seconds))
|
|
698
|
-
|
|
699
642
|
@classmethod
|
|
700
643
|
def set_event(cls, key: str, value: Any) -> None:
|
|
701
644
|
"""
|
|
702
645
|
Set a workflow event.
|
|
703
646
|
|
|
647
|
+
This function is to be called from within a workflow.
|
|
648
|
+
|
|
704
649
|
`set_event` sets the `value` of `key` for the current workflow instance ID.
|
|
705
650
|
This `value` can then be retrieved by other functions, using `get_event` below.
|
|
706
|
-
If the event `key` already exists, its `value` is updated.
|
|
707
|
-
This function can only be called from within a workflow.
|
|
708
|
-
|
|
709
|
-
Args:
|
|
710
|
-
key(str): The event key / name within the workflow
|
|
711
|
-
value(Any): A serializable value to associate with the key
|
|
712
|
-
|
|
713
|
-
"""
|
|
714
|
-
return set_event(_get_dbos_instance(), key, value)
|
|
715
|
-
|
|
716
|
-
@classmethod
|
|
717
|
-
async def set_event_async(cls, key: str, value: Any) -> None:
|
|
718
|
-
"""
|
|
719
|
-
Set a workflow event.
|
|
720
651
|
|
|
721
|
-
|
|
722
|
-
This `value` can then be retrieved by other functions, using `get_event` below.
|
|
723
|
-
If the event `key` already exists, its `value` is updated.
|
|
724
|
-
This function can only be called from within a workflow.
|
|
652
|
+
Each workflow invocation should only call set_event once per `key`.
|
|
725
653
|
|
|
726
654
|
Args:
|
|
727
655
|
key(str): The event key / name within the workflow
|
|
728
656
|
value(Any): A serializable value to associate with the key
|
|
729
657
|
|
|
730
658
|
"""
|
|
731
|
-
|
|
659
|
+
return set_event(_get_dbos_instance(), key, value)
|
|
732
660
|
|
|
733
661
|
@classmethod
|
|
734
662
|
def get_event(cls, workflow_id: str, key: str, timeout_seconds: float = 60) -> Any:
|
|
@@ -745,25 +673,6 @@ class DBOS:
|
|
|
745
673
|
"""
|
|
746
674
|
return get_event(_get_dbos_instance(), workflow_id, key, timeout_seconds)
|
|
747
675
|
|
|
748
|
-
@classmethod
|
|
749
|
-
async def get_event_async(
|
|
750
|
-
cls, workflow_id: str, key: str, timeout_seconds: float = 60
|
|
751
|
-
) -> Any:
|
|
752
|
-
"""
|
|
753
|
-
Return the `value` of a workflow event, waiting for it to occur if necessary.
|
|
754
|
-
|
|
755
|
-
`get_event_async` waits for a corresponding `set_event` by the workflow with ID `workflow_id` with the same `key`.
|
|
756
|
-
|
|
757
|
-
Args:
|
|
758
|
-
workflow_id(str): The workflow instance ID that is expected to call `set_event` on `key`
|
|
759
|
-
key(str): The event key / name within the workflow
|
|
760
|
-
timeout_seconds(float): The amount of time to wait, in case `set_event` has not yet been called byt the workflow
|
|
761
|
-
|
|
762
|
-
"""
|
|
763
|
-
return await asyncio.to_thread(
|
|
764
|
-
lambda: DBOS.get_event(workflow_id, key, timeout_seconds)
|
|
765
|
-
)
|
|
766
|
-
|
|
767
676
|
@classmethod
|
|
768
677
|
def execute_workflow_id(cls, workflow_id: str) -> WorkflowHandle[Any]:
|
|
769
678
|
"""Execute a workflow by ID (for recovery)."""
|
|
@@ -1,15 +1,15 @@
|
|
|
1
|
-
dbos-0.16.
|
|
2
|
-
dbos-0.16.
|
|
3
|
-
dbos-0.16.
|
|
4
|
-
dbos-0.16.
|
|
1
|
+
dbos-0.16.1.dist-info/METADATA,sha256=DykTz31jHMRMUpYD1WXMx-LKRgiunj6uMjwLo7pcqsY,5020
|
|
2
|
+
dbos-0.16.1.dist-info/WHEEL,sha256=thaaA2w1JzcGC48WYufAs8nrYZjJm8LqNfnXFOFyCC4,90
|
|
3
|
+
dbos-0.16.1.dist-info/entry_points.txt,sha256=z6GcVANQV7Uw_82H9Ob2axJX6V3imftyZsljdh-M1HU,54
|
|
4
|
+
dbos-0.16.1.dist-info/licenses/LICENSE,sha256=VGZit_a5-kdw9WT6fY5jxAWVwGQzgLFyPWrcVVUhVNU,1067
|
|
5
5
|
dbos/__init__.py,sha256=CxRHBHEthPL4PZoLbZhp3rdm44-KkRTT2-7DkK9d4QQ,724
|
|
6
6
|
dbos/_admin_server.py,sha256=DOgzVp9kmwiebQqmJB1LcrZnGTxSMbZiGXdenc1wZDg,3163
|
|
7
7
|
dbos/_app_db.py,sha256=_tv2vmPjjiaikwgxH3mqxgJ4nUUcG2-0uMXKWCqVu1c,5509
|
|
8
8
|
dbos/_classproperty.py,sha256=f0X-_BySzn3yFDRKB2JpCbLYQ9tLwt1XftfshvY7CBs,626
|
|
9
9
|
dbos/_context.py,sha256=KV3fd3-Rv6EWrYDUdHARxltSlNZGNtQtNSqeQ-gkXE8,18049
|
|
10
|
-
dbos/_core.py,sha256=
|
|
10
|
+
dbos/_core.py,sha256=HUteo2HP9C4UvB-y41xwUphLZyYRTdb0sW5XqZ6QPAY,31167
|
|
11
11
|
dbos/_croniter.py,sha256=hbhgfsHBqclUS8VeLnJ9PSE9Z54z6mi4nnrr1aUXn0k,47561
|
|
12
|
-
dbos/_dbos.py,sha256=
|
|
12
|
+
dbos/_dbos.py,sha256=iYI-iY3kp_N0lNYYVKwssea7dgD7vsYHn2uBIhmZ2lM,31478
|
|
13
13
|
dbos/_dbos_config.py,sha256=f37eccN3JpCA32kRdQ4UsERjhYGcdLWv-N21ijnDZmY,6406
|
|
14
14
|
dbos/_error.py,sha256=UETk8CoZL-TO2Utn1-E7OSWelhShWmKM-fOlODMR9PE,3893
|
|
15
15
|
dbos/_fastapi.py,sha256=iyefCZq-ZDKRUjN_rgYQmFmyvWf4gPrSlC6CLbfq4a8,3419
|
|
@@ -25,7 +25,6 @@ dbos/_migrations/versions/a3b18ad34abe_added_triggers.py,sha256=Rv0ZsZYZ_WdgGEUL
|
|
|
25
25
|
dbos/_migrations/versions/d76646551a6b_job_queue_limiter.py,sha256=8PyFi8rd6CN-mUro43wGhsg5wcQWKZPRHD6jw8R5pVc,986
|
|
26
26
|
dbos/_migrations/versions/d76646551a6c_workflow_queue.py,sha256=G942nophZ2uC2vc4hGBC02Ptng1715roTjY3xiyzZU4,729
|
|
27
27
|
dbos/_migrations/versions/eab0cc1d9a14_job_queue.py,sha256=uvhFOtqbBreCePhAxZfIT0qCAI7BiZTou9wt6QnbY7c,1412
|
|
28
|
-
dbos/_outcome.py,sha256=4BxjNfVhjI-M_m8eCWimpvkO8m-knAEyoDscFYq6SQ4,6237
|
|
29
28
|
dbos/_queue.py,sha256=hAXwrfBmtv6BGrlmFq-Ol6b_ED-HDaYqSSxumMJC6Xo,1938
|
|
30
29
|
dbos/_recovery.py,sha256=jbzGYxICA2drzyzlBSy2UiXhKV_16tBVacKQdTkqf-w,2008
|
|
31
30
|
dbos/_registrations.py,sha256=mei6q6_3R5uei8i_Wo_TqGZs85s10shOekDX41sFYD0,6642
|
|
@@ -52,4 +51,4 @@ dbos/cli.py,sha256=em1uAxrp5yyg53V7ZpmHFtqD6OJp2cMJkG9vGJPoFTA,10904
|
|
|
52
51
|
dbos/dbos-config.schema.json,sha256=tS7x-bdFbFvpobcs3pIOhwun3yr_ndvTEYOn4BJjTzs,5889
|
|
53
52
|
dbos/py.typed,sha256=QfzXT1Ktfk3Rj84akygc7_42z0lRpCq0Ilh8OXI6Zas,44
|
|
54
53
|
version/__init__.py,sha256=L4sNxecRuqdtSFdpUGX3TtBi9KL3k7YsZVIvv-fv9-A,1678
|
|
55
|
-
dbos-0.16.
|
|
54
|
+
dbos-0.16.1.dist-info/RECORD,,
|
dbos/_outcome.py
DELETED
|
@@ -1,190 +0,0 @@
|
|
|
1
|
-
import asyncio
|
|
2
|
-
import contextlib
|
|
3
|
-
import inspect
|
|
4
|
-
import time
|
|
5
|
-
from typing import Any, Callable, Coroutine, Optional, Protocol, TypeVar, Union, cast
|
|
6
|
-
|
|
7
|
-
T = TypeVar("T")
|
|
8
|
-
R = TypeVar("R")
|
|
9
|
-
|
|
10
|
-
|
|
11
|
-
# define Outcome protocol w/ common composition methods
|
|
12
|
-
class Outcome(Protocol[T]):
|
|
13
|
-
|
|
14
|
-
def wrap(
|
|
15
|
-
self, before: Callable[[], Callable[[Callable[[], T]], R]]
|
|
16
|
-
) -> "Outcome[R]": ...
|
|
17
|
-
|
|
18
|
-
def then(self, next: Callable[[Callable[[], T]], R]) -> "Outcome[R]": ...
|
|
19
|
-
|
|
20
|
-
def also(
|
|
21
|
-
self, cm: contextlib.AbstractContextManager[Any, bool]
|
|
22
|
-
) -> "Outcome[T]": ...
|
|
23
|
-
|
|
24
|
-
def retry(
|
|
25
|
-
self,
|
|
26
|
-
attempts: int,
|
|
27
|
-
on_exception: Callable[[int, BaseException], float],
|
|
28
|
-
exceeded_retries: Callable[[int], BaseException],
|
|
29
|
-
) -> "Outcome[T]": ...
|
|
30
|
-
|
|
31
|
-
def intercept(self, interceptor: Callable[[], Optional[T]]) -> "Outcome[T]": ...
|
|
32
|
-
|
|
33
|
-
def __call__(self) -> Union[T, Coroutine[Any, Any, T]]: ...
|
|
34
|
-
|
|
35
|
-
# Helper function to create an Immediate or Pending Result, depending on if func is a coroutine function or not
|
|
36
|
-
@staticmethod
|
|
37
|
-
def make(func: Callable[[], Union[T, Coroutine[Any, Any, T]]]) -> "Outcome[T]":
|
|
38
|
-
return (
|
|
39
|
-
Pending(cast(Callable[[], Coroutine[Any, Any, T]], func))
|
|
40
|
-
if inspect.iscoroutinefunction(func)
|
|
41
|
-
else Immediate(cast(Callable[[], T], func))
|
|
42
|
-
)
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
# Immediate Outcome - for composing non-async functions
|
|
46
|
-
class Immediate(Outcome[T]):
|
|
47
|
-
__slots__ = "_func"
|
|
48
|
-
|
|
49
|
-
def __init__(self, func: Callable[[], T]):
|
|
50
|
-
self._func = func
|
|
51
|
-
|
|
52
|
-
def then(self, next: Callable[[Callable[[], T]], R]) -> "Immediate[R]":
|
|
53
|
-
return Immediate(lambda: next(self._func))
|
|
54
|
-
|
|
55
|
-
def wrap(
|
|
56
|
-
self, before: Callable[[], Callable[[Callable[[], T]], R]]
|
|
57
|
-
) -> "Immediate[R]":
|
|
58
|
-
return Immediate(lambda: before()(self._func))
|
|
59
|
-
|
|
60
|
-
@staticmethod
|
|
61
|
-
def _intercept(func: Callable[[], T], interceptor: Callable[[], Optional[T]]) -> T:
|
|
62
|
-
intercepted = interceptor()
|
|
63
|
-
return intercepted if intercepted else func()
|
|
64
|
-
|
|
65
|
-
def intercept(self, interceptor: Callable[[], Optional[T]]) -> "Immediate[T]":
|
|
66
|
-
return Immediate[T](lambda: Immediate._intercept(self._func, interceptor))
|
|
67
|
-
|
|
68
|
-
@staticmethod
|
|
69
|
-
def _also(func: Callable[[], T], cm: contextlib.AbstractContextManager[Any, bool]) -> T: # type: ignore
|
|
70
|
-
with cm:
|
|
71
|
-
return func()
|
|
72
|
-
|
|
73
|
-
def also(self, cm: contextlib.AbstractContextManager[Any, bool]) -> "Immediate[T]":
|
|
74
|
-
return Immediate[T](lambda: Immediate._also(self._func, cm))
|
|
75
|
-
|
|
76
|
-
@staticmethod
|
|
77
|
-
def _retry(
|
|
78
|
-
func: Callable[[], T],
|
|
79
|
-
attempts: int,
|
|
80
|
-
on_exception: Callable[[int, BaseException], float],
|
|
81
|
-
exceeded_retries: Callable[[int], BaseException],
|
|
82
|
-
) -> T:
|
|
83
|
-
for i in range(attempts):
|
|
84
|
-
try:
|
|
85
|
-
return func()
|
|
86
|
-
except Exception as exp:
|
|
87
|
-
wait_time = on_exception(i, exp)
|
|
88
|
-
time.sleep(wait_time)
|
|
89
|
-
|
|
90
|
-
raise exceeded_retries(attempts)
|
|
91
|
-
|
|
92
|
-
def retry(
|
|
93
|
-
self,
|
|
94
|
-
attempts: int,
|
|
95
|
-
on_exception: Callable[[int, BaseException], float],
|
|
96
|
-
exceeded_retries: Callable[[int], BaseException],
|
|
97
|
-
) -> "Immediate[T]":
|
|
98
|
-
assert attempts > 0
|
|
99
|
-
return Immediate[T](
|
|
100
|
-
lambda: Immediate._retry(
|
|
101
|
-
self._func, attempts, on_exception, exceeded_retries
|
|
102
|
-
)
|
|
103
|
-
)
|
|
104
|
-
|
|
105
|
-
def __call__(self) -> T:
|
|
106
|
-
return self._func()
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
# Pending Outcome - for composing async functions
|
|
110
|
-
class Pending(Outcome[T]):
|
|
111
|
-
__slots__ = "_func"
|
|
112
|
-
|
|
113
|
-
def __init__(self, func: Callable[[], Coroutine[Any, Any, T]]):
|
|
114
|
-
self._func = func
|
|
115
|
-
|
|
116
|
-
# Helper method in order to raise an exception in a lambda
|
|
117
|
-
@staticmethod
|
|
118
|
-
def _raise(ex: BaseException) -> T:
|
|
119
|
-
raise ex
|
|
120
|
-
|
|
121
|
-
async def _wrap(
|
|
122
|
-
func: Callable[[], Coroutine[Any, Any, T]],
|
|
123
|
-
before: Callable[[], Callable[[Callable[[], T]], R]],
|
|
124
|
-
) -> R:
|
|
125
|
-
after = await asyncio.to_thread(before)
|
|
126
|
-
try:
|
|
127
|
-
value = await func()
|
|
128
|
-
return await asyncio.to_thread(after, lambda: value)
|
|
129
|
-
except BaseException as exp:
|
|
130
|
-
return await asyncio.to_thread(after, lambda: Pending._raise(exp))
|
|
131
|
-
|
|
132
|
-
def wrap(
|
|
133
|
-
self, before: Callable[[], Callable[[Callable[[], T]], R]]
|
|
134
|
-
) -> "Pending[R]":
|
|
135
|
-
return Pending[R](lambda: Pending._wrap(self._func, before))
|
|
136
|
-
|
|
137
|
-
def then(self, next: Callable[[Callable[[], T]], R]) -> "Pending[R]":
|
|
138
|
-
return Pending[R](lambda: Pending._wrap(self._func, lambda: next))
|
|
139
|
-
|
|
140
|
-
@staticmethod
|
|
141
|
-
async def _also( # type: ignore
|
|
142
|
-
func: Callable[[], Coroutine[Any, Any, T]],
|
|
143
|
-
cm: contextlib.AbstractContextManager[Any, bool],
|
|
144
|
-
) -> T:
|
|
145
|
-
with cm:
|
|
146
|
-
return await func()
|
|
147
|
-
|
|
148
|
-
def also(self, cm: contextlib.AbstractContextManager[Any, bool]) -> "Pending[T]":
|
|
149
|
-
return Pending[T](lambda: Pending._also(self._func, cm))
|
|
150
|
-
|
|
151
|
-
@staticmethod
|
|
152
|
-
async def _intercept(
|
|
153
|
-
func: Callable[[], Coroutine[Any, Any, T]],
|
|
154
|
-
interceptor: Callable[[], Optional[T]],
|
|
155
|
-
) -> T:
|
|
156
|
-
intercepted = await asyncio.to_thread(interceptor)
|
|
157
|
-
return intercepted if intercepted else await func()
|
|
158
|
-
|
|
159
|
-
def intercept(self, interceptor: Callable[[], Optional[T]]) -> "Pending[T]":
|
|
160
|
-
return Pending[T](lambda: Pending._intercept(self._func, interceptor))
|
|
161
|
-
|
|
162
|
-
@staticmethod
|
|
163
|
-
async def _retry(
|
|
164
|
-
func: Callable[[], Coroutine[Any, Any, T]],
|
|
165
|
-
attempts: int,
|
|
166
|
-
on_exception: Callable[[int, BaseException], float],
|
|
167
|
-
exceeded_retries: Callable[[int], BaseException],
|
|
168
|
-
) -> T:
|
|
169
|
-
for i in range(attempts):
|
|
170
|
-
try:
|
|
171
|
-
return await func()
|
|
172
|
-
except Exception as exp:
|
|
173
|
-
wait_time = on_exception(i, exp)
|
|
174
|
-
await asyncio.sleep(wait_time)
|
|
175
|
-
|
|
176
|
-
raise exceeded_retries(attempts)
|
|
177
|
-
|
|
178
|
-
def retry(
|
|
179
|
-
self,
|
|
180
|
-
attempts: int,
|
|
181
|
-
on_exception: Callable[[int, BaseException], float],
|
|
182
|
-
exceeded_retries: Callable[[int], BaseException],
|
|
183
|
-
) -> "Pending[T]":
|
|
184
|
-
assert attempts > 0
|
|
185
|
-
return Pending[T](
|
|
186
|
-
lambda: Pending._retry(self._func, attempts, on_exception, exceeded_retries)
|
|
187
|
-
)
|
|
188
|
-
|
|
189
|
-
async def __call__(self) -> T:
|
|
190
|
-
return await self._func()
|
|
File without changes
|
|
File without changes
|
|
File without changes
|