dbos 0.15.0a2__py3-none-any.whl → 0.16.0a2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of dbos might be problematic. Click here for more details.
- dbos/_context.py +16 -6
- dbos/_core.py +175 -93
- dbos/_dbos.py +101 -10
- dbos/_outcome.py +190 -0
- {dbos-0.15.0a2.dist-info → dbos-0.16.0a2.dist-info}/METADATA +1 -1
- {dbos-0.15.0a2.dist-info → dbos-0.16.0a2.dist-info}/RECORD +9 -8
- {dbos-0.15.0a2.dist-info → dbos-0.16.0a2.dist-info}/WHEEL +0 -0
- {dbos-0.15.0a2.dist-info → dbos-0.16.0a2.dist-info}/entry_points.txt +0 -0
- {dbos-0.15.0a2.dist-info → dbos-0.16.0a2.dist-info}/licenses/LICENSE +0 -0
dbos/_context.py
CHANGED
|
@@ -98,18 +98,27 @@ class DBOSContext:
|
|
|
98
98
|
wfid = str(uuid.uuid4())
|
|
99
99
|
return wfid
|
|
100
100
|
|
|
101
|
-
def start_workflow(
|
|
101
|
+
def start_workflow(
|
|
102
|
+
self,
|
|
103
|
+
wfid: Optional[str],
|
|
104
|
+
attributes: TracedAttributes,
|
|
105
|
+
is_temp_workflow: bool = False,
|
|
106
|
+
) -> None:
|
|
102
107
|
if wfid is None or len(wfid) == 0:
|
|
103
108
|
wfid = self.assign_workflow_id()
|
|
104
109
|
self.id_assigned_for_next_workflow = ""
|
|
105
110
|
self.workflow_id = wfid
|
|
106
111
|
self.function_id = 0
|
|
107
|
-
|
|
112
|
+
if not is_temp_workflow:
|
|
113
|
+
self._start_span(attributes)
|
|
108
114
|
|
|
109
|
-
def end_workflow(
|
|
115
|
+
def end_workflow(
|
|
116
|
+
self, exc_value: Optional[BaseException], is_temp_workflow: bool = False
|
|
117
|
+
) -> None:
|
|
110
118
|
self.workflow_id = ""
|
|
111
119
|
self.function_id = -1
|
|
112
|
-
|
|
120
|
+
if not is_temp_workflow:
|
|
121
|
+
self._end_span(exc_value)
|
|
113
122
|
|
|
114
123
|
def is_within_workflow(self) -> bool:
|
|
115
124
|
return len(self.workflow_id) > 0
|
|
@@ -349,6 +358,7 @@ class EnterDBOSWorkflow(AbstractContextManager[DBOSContext, Literal[False]]):
|
|
|
349
358
|
def __init__(self, attributes: TracedAttributes) -> None:
|
|
350
359
|
self.created_ctx = False
|
|
351
360
|
self.attributes = attributes
|
|
361
|
+
self.is_temp_workflow = attributes["name"] == "temp_wf"
|
|
352
362
|
|
|
353
363
|
def __enter__(self) -> DBOSContext:
|
|
354
364
|
# Code to create a basic context
|
|
@@ -359,7 +369,7 @@ class EnterDBOSWorkflow(AbstractContextManager[DBOSContext, Literal[False]]):
|
|
|
359
369
|
_set_local_dbos_context(ctx)
|
|
360
370
|
assert not ctx.is_within_workflow()
|
|
361
371
|
ctx.start_workflow(
|
|
362
|
-
None, self.attributes
|
|
372
|
+
None, self.attributes, self.is_temp_workflow
|
|
363
373
|
) # Will get from the context's next workflow ID
|
|
364
374
|
return ctx
|
|
365
375
|
|
|
@@ -371,7 +381,7 @@ class EnterDBOSWorkflow(AbstractContextManager[DBOSContext, Literal[False]]):
|
|
|
371
381
|
) -> Literal[False]:
|
|
372
382
|
ctx = assert_current_dbos_context()
|
|
373
383
|
assert ctx.is_within_workflow()
|
|
374
|
-
ctx.end_workflow(exc_value)
|
|
384
|
+
ctx.end_workflow(exc_value, self.is_temp_workflow)
|
|
375
385
|
# Code to clean up the basic context if we created it
|
|
376
386
|
if self.created_ctx:
|
|
377
387
|
_clear_local_dbos_context()
|
dbos/_core.py
CHANGED
|
@@ -1,10 +1,27 @@
|
|
|
1
|
+
import asyncio
|
|
2
|
+
import functools
|
|
3
|
+
import inspect
|
|
1
4
|
import json
|
|
2
5
|
import sys
|
|
3
6
|
import time
|
|
4
7
|
import traceback
|
|
5
8
|
from concurrent.futures import Future
|
|
6
9
|
from functools import wraps
|
|
7
|
-
from typing import
|
|
10
|
+
from typing import (
|
|
11
|
+
TYPE_CHECKING,
|
|
12
|
+
Any,
|
|
13
|
+
Callable,
|
|
14
|
+
Coroutine,
|
|
15
|
+
Generic,
|
|
16
|
+
Optional,
|
|
17
|
+
Tuple,
|
|
18
|
+
TypeVar,
|
|
19
|
+
Union,
|
|
20
|
+
cast,
|
|
21
|
+
overload,
|
|
22
|
+
)
|
|
23
|
+
|
|
24
|
+
from dbos._outcome import Immediate, Outcome, Pending
|
|
8
25
|
|
|
9
26
|
from ._app_db import ApplicationDatabase, TransactionResultInternal
|
|
10
27
|
|
|
@@ -181,39 +198,38 @@ def _init_workflow(
|
|
|
181
198
|
return status
|
|
182
199
|
|
|
183
200
|
|
|
184
|
-
def
|
|
201
|
+
def _get_wf_invoke_func(
|
|
185
202
|
dbos: "DBOS",
|
|
186
203
|
status: WorkflowStatusInternal,
|
|
187
|
-
|
|
188
|
-
|
|
189
|
-
|
|
190
|
-
|
|
191
|
-
|
|
192
|
-
|
|
193
|
-
|
|
194
|
-
|
|
195
|
-
|
|
196
|
-
|
|
197
|
-
|
|
198
|
-
|
|
199
|
-
|
|
200
|
-
|
|
201
|
-
|
|
202
|
-
|
|
203
|
-
|
|
204
|
-
|
|
205
|
-
|
|
206
|
-
|
|
207
|
-
|
|
208
|
-
|
|
209
|
-
|
|
210
|
-
|
|
211
|
-
|
|
212
|
-
dbos._sys_db.
|
|
213
|
-
|
|
214
|
-
raise
|
|
204
|
+
) -> Callable[[Callable[[], R]], R]:
|
|
205
|
+
def persist(func: Callable[[], R]) -> R:
|
|
206
|
+
try:
|
|
207
|
+
output = func()
|
|
208
|
+
status["status"] = "SUCCESS"
|
|
209
|
+
status["output"] = _serialization.serialize(output)
|
|
210
|
+
if status["queue_name"] is not None:
|
|
211
|
+
queue = dbos._registry.queue_info_map[status["queue_name"]]
|
|
212
|
+
dbos._sys_db.remove_from_queue(status["workflow_uuid"], queue)
|
|
213
|
+
dbos._sys_db.buffer_workflow_status(status)
|
|
214
|
+
return output
|
|
215
|
+
except DBOSWorkflowConflictIDError:
|
|
216
|
+
# Retrieve the workflow handle and wait for the result.
|
|
217
|
+
# Must use existing_workflow=False because workflow status might not be set yet for single transaction workflows.
|
|
218
|
+
wf_handle: "WorkflowHandle[R]" = dbos.retrieve_workflow(
|
|
219
|
+
status["workflow_uuid"], existing_workflow=False
|
|
220
|
+
)
|
|
221
|
+
output = wf_handle.get_result()
|
|
222
|
+
return output
|
|
223
|
+
except Exception as error:
|
|
224
|
+
status["status"] = "ERROR"
|
|
225
|
+
status["error"] = _serialization.serialize_exception(error)
|
|
226
|
+
if status["queue_name"] is not None:
|
|
227
|
+
queue = dbos._registry.queue_info_map[status["queue_name"]]
|
|
228
|
+
dbos._sys_db.remove_from_queue(status["workflow_uuid"], queue)
|
|
229
|
+
dbos._sys_db.update_workflow_status(status)
|
|
230
|
+
raise
|
|
215
231
|
|
|
216
|
-
return
|
|
232
|
+
return persist
|
|
217
233
|
|
|
218
234
|
|
|
219
235
|
def _execute_workflow_wthread(
|
|
@@ -231,7 +247,15 @@ def _execute_workflow_wthread(
|
|
|
231
247
|
with DBOSContextSwap(ctx):
|
|
232
248
|
with EnterDBOSWorkflow(attributes):
|
|
233
249
|
try:
|
|
234
|
-
|
|
250
|
+
result = (
|
|
251
|
+
Outcome[R]
|
|
252
|
+
.make(functools.partial(func, *args, **kwargs))
|
|
253
|
+
.then(_get_wf_invoke_func(dbos, status))
|
|
254
|
+
)
|
|
255
|
+
if isinstance(result, Immediate):
|
|
256
|
+
return cast(Immediate[R], result)()
|
|
257
|
+
else:
|
|
258
|
+
return asyncio.run(cast(Pending[R], result)())
|
|
235
259
|
except Exception:
|
|
236
260
|
dbos.logger.error(
|
|
237
261
|
f"Exception encountered in asynchronous workflow: {traceback.format_exc()}"
|
|
@@ -305,6 +329,18 @@ def execute_workflow_by_id(dbos: "DBOS", workflow_id: str) -> "WorkflowHandle[An
|
|
|
305
329
|
)
|
|
306
330
|
|
|
307
331
|
|
|
332
|
+
@overload
|
|
333
|
+
def start_workflow(
|
|
334
|
+
dbos: "DBOS",
|
|
335
|
+
func: "Workflow[P, Coroutine[Any, Any, R]]",
|
|
336
|
+
queue_name: Optional[str],
|
|
337
|
+
execute_workflow: bool,
|
|
338
|
+
*args: P.args,
|
|
339
|
+
**kwargs: P.kwargs,
|
|
340
|
+
) -> "WorkflowHandle[R]": ...
|
|
341
|
+
|
|
342
|
+
|
|
343
|
+
@overload
|
|
308
344
|
def start_workflow(
|
|
309
345
|
dbos: "DBOS",
|
|
310
346
|
func: "Workflow[P, R]",
|
|
@@ -312,6 +348,16 @@ def start_workflow(
|
|
|
312
348
|
execute_workflow: bool,
|
|
313
349
|
*args: P.args,
|
|
314
350
|
**kwargs: P.kwargs,
|
|
351
|
+
) -> "WorkflowHandle[R]": ...
|
|
352
|
+
|
|
353
|
+
|
|
354
|
+
def start_workflow(
|
|
355
|
+
dbos: "DBOS",
|
|
356
|
+
func: "Workflow[P, Union[R, Coroutine[Any, Any, R]]]",
|
|
357
|
+
queue_name: Optional[str],
|
|
358
|
+
execute_workflow: bool,
|
|
359
|
+
*args: P.args,
|
|
360
|
+
**kwargs: P.kwargs,
|
|
315
361
|
) -> "WorkflowHandle[R]":
|
|
316
362
|
fself: Optional[object] = None
|
|
317
363
|
if hasattr(func, "__self__"):
|
|
@@ -396,16 +442,16 @@ def start_workflow(
|
|
|
396
442
|
|
|
397
443
|
def workflow_wrapper(
|
|
398
444
|
dbosreg: "DBOSRegistry",
|
|
399
|
-
func:
|
|
445
|
+
func: Callable[P, R],
|
|
400
446
|
max_recovery_attempts: int = DEFAULT_MAX_RECOVERY_ATTEMPTS,
|
|
401
|
-
) ->
|
|
447
|
+
) -> Callable[P, R]:
|
|
402
448
|
func.__orig_func = func # type: ignore
|
|
403
449
|
|
|
404
450
|
fi = get_or_create_func_info(func)
|
|
405
451
|
fi.max_recovery_attempts = max_recovery_attempts
|
|
406
452
|
|
|
407
453
|
@wraps(func)
|
|
408
|
-
def wrapper(*args: Any, **kwargs: Any) ->
|
|
454
|
+
def wrapper(*args: Any, **kwargs: Any) -> R:
|
|
409
455
|
if dbosreg.dbos is None:
|
|
410
456
|
raise DBOSException(
|
|
411
457
|
f"Function {func.__name__} invoked before DBOS initialized"
|
|
@@ -425,7 +471,10 @@ def workflow_wrapper(
|
|
|
425
471
|
enterWorkflowCtxMgr = (
|
|
426
472
|
EnterDBOSChildWorkflow if ctx and ctx.is_workflow() else EnterDBOSWorkflow
|
|
427
473
|
)
|
|
428
|
-
|
|
474
|
+
|
|
475
|
+
wfOutcome = Outcome[R].make(functools.partial(func, *args, **kwargs))
|
|
476
|
+
|
|
477
|
+
def init_wf() -> Callable[[Callable[[], R]], R]:
|
|
429
478
|
ctx = assert_current_dbos_context() # Now the child ctx
|
|
430
479
|
status = _init_workflow(
|
|
431
480
|
dbos,
|
|
@@ -441,16 +490,23 @@ def workflow_wrapper(
|
|
|
441
490
|
dbos.logger.debug(
|
|
442
491
|
f"Running workflow, id: {ctx.workflow_id}, name: {get_dbos_func_name(func)}"
|
|
443
492
|
)
|
|
444
|
-
return _execute_workflow(dbos, status, func, *args, **kwargs)
|
|
445
493
|
|
|
446
|
-
|
|
447
|
-
|
|
494
|
+
return _get_wf_invoke_func(dbos, status)
|
|
495
|
+
|
|
496
|
+
outcome = (
|
|
497
|
+
wfOutcome.wrap(init_wf)
|
|
498
|
+
.also(DBOSAssumeRole(rr))
|
|
499
|
+
.also(enterWorkflowCtxMgr(attributes))
|
|
500
|
+
)
|
|
501
|
+
return outcome() # type: ignore
|
|
502
|
+
|
|
503
|
+
return wrapper
|
|
448
504
|
|
|
449
505
|
|
|
450
506
|
def decorate_workflow(
|
|
451
507
|
reg: "DBOSRegistry", max_recovery_attempts: int
|
|
452
|
-
) -> Callable[[
|
|
453
|
-
def _workflow_decorator(func:
|
|
508
|
+
) -> Callable[[Callable[P, R]], Callable[P, R]]:
|
|
509
|
+
def _workflow_decorator(func: Callable[P, R]) -> Callable[P, R]:
|
|
454
510
|
wrapped_func = workflow_wrapper(reg, func, max_recovery_attempts)
|
|
455
511
|
reg.register_wf_function(func.__qualname__, wrapped_func)
|
|
456
512
|
return wrapped_func
|
|
@@ -473,7 +529,8 @@ def decorate_transaction(
|
|
|
473
529
|
"name": func.__name__,
|
|
474
530
|
"operationType": OperationType.TRANSACTION.value,
|
|
475
531
|
}
|
|
476
|
-
with EnterDBOSTransaction(session, attributes=attributes)
|
|
532
|
+
with EnterDBOSTransaction(session, attributes=attributes):
|
|
533
|
+
ctx = assert_current_dbos_context()
|
|
477
534
|
txn_output: TransactionResultInternal = {
|
|
478
535
|
"workflow_uuid": ctx.workflow_id,
|
|
479
536
|
"function_id": ctx.function_id,
|
|
@@ -562,6 +619,11 @@ def decorate_transaction(
|
|
|
562
619
|
raise
|
|
563
620
|
return output
|
|
564
621
|
|
|
622
|
+
if inspect.iscoroutinefunction(func):
|
|
623
|
+
raise DBOSException(
|
|
624
|
+
f"Function {func.__name__} is a coroutine function, but DBOS.transaction does not support coroutine functions"
|
|
625
|
+
)
|
|
626
|
+
|
|
565
627
|
fi = get_or_create_func_info(func)
|
|
566
628
|
|
|
567
629
|
@wraps(func)
|
|
@@ -603,8 +665,8 @@ def decorate_step(
|
|
|
603
665
|
interval_seconds: float = 1.0,
|
|
604
666
|
max_attempts: int = 3,
|
|
605
667
|
backoff_rate: float = 2.0,
|
|
606
|
-
) -> Callable[[
|
|
607
|
-
def decorator(func:
|
|
668
|
+
) -> Callable[[Callable[P, R]], Callable[P, R]]:
|
|
669
|
+
def decorator(func: Callable[P, R]) -> Callable[P, R]:
|
|
608
670
|
|
|
609
671
|
def invoke_step(*args: Any, **kwargs: Any) -> Any:
|
|
610
672
|
if dbosreg.dbos is None:
|
|
@@ -617,13 +679,48 @@ def decorate_step(
|
|
|
617
679
|
"name": func.__name__,
|
|
618
680
|
"operationType": OperationType.STEP.value,
|
|
619
681
|
}
|
|
620
|
-
|
|
682
|
+
|
|
683
|
+
attempts = max_attempts if retries_allowed else 1
|
|
684
|
+
max_retry_interval_seconds: float = 3600 # 1 Hour
|
|
685
|
+
|
|
686
|
+
def on_exception(attempt: int, error: BaseException) -> float:
|
|
687
|
+
dbos.logger.warning(
|
|
688
|
+
f"Step being automatically retried. (attempt {attempt} of {attempts}). {traceback.format_exc()}"
|
|
689
|
+
)
|
|
690
|
+
ctx = assert_current_dbos_context()
|
|
691
|
+
ctx.get_current_span().add_event(
|
|
692
|
+
f"Step attempt {attempt} failed",
|
|
693
|
+
{
|
|
694
|
+
"error": str(error),
|
|
695
|
+
"retryIntervalSeconds": interval_seconds,
|
|
696
|
+
},
|
|
697
|
+
)
|
|
698
|
+
return min(
|
|
699
|
+
interval_seconds * (backoff_rate**attempt),
|
|
700
|
+
max_retry_interval_seconds,
|
|
701
|
+
)
|
|
702
|
+
|
|
703
|
+
def record_step_result(func: Callable[[], R]) -> R:
|
|
704
|
+
ctx = assert_current_dbos_context()
|
|
621
705
|
step_output: OperationResultInternal = {
|
|
622
706
|
"workflow_uuid": ctx.workflow_id,
|
|
623
707
|
"function_id": ctx.function_id,
|
|
624
708
|
"output": None,
|
|
625
709
|
"error": None,
|
|
626
710
|
}
|
|
711
|
+
|
|
712
|
+
try:
|
|
713
|
+
output = func()
|
|
714
|
+
step_output["output"] = _serialization.serialize(output)
|
|
715
|
+
return output
|
|
716
|
+
except Exception as error:
|
|
717
|
+
step_output["error"] = _serialization.serialize_exception(error)
|
|
718
|
+
raise
|
|
719
|
+
finally:
|
|
720
|
+
dbos._sys_db.record_operation_result(step_output)
|
|
721
|
+
|
|
722
|
+
def check_existing_result() -> Optional[R]:
|
|
723
|
+
ctx = assert_current_dbos_context()
|
|
627
724
|
recorded_output = dbos._sys_db.check_operation_execution(
|
|
628
725
|
ctx.workflow_id, ctx.function_id
|
|
629
726
|
)
|
|
@@ -637,57 +734,29 @@ def decorate_step(
|
|
|
637
734
|
)
|
|
638
735
|
raise deserialized_error
|
|
639
736
|
elif recorded_output["output"] is not None:
|
|
640
|
-
return
|
|
737
|
+
return cast(
|
|
738
|
+
R, _serialization.deserialize(recorded_output["output"])
|
|
739
|
+
)
|
|
641
740
|
else:
|
|
642
741
|
raise Exception("Output and error are both None")
|
|
643
742
|
else:
|
|
644
743
|
dbos.logger.debug(
|
|
645
744
|
f"Running step, id: {ctx.function_id}, name: {attributes['name']}"
|
|
646
745
|
)
|
|
746
|
+
return None
|
|
647
747
|
|
|
648
|
-
|
|
649
|
-
|
|
650
|
-
|
|
651
|
-
|
|
652
|
-
local_interval_seconds = interval_seconds
|
|
653
|
-
for attempt in range(1, local_max_attempts + 1):
|
|
654
|
-
try:
|
|
655
|
-
output = func(*args, **kwargs)
|
|
656
|
-
step_output["output"] = _serialization.serialize(output)
|
|
657
|
-
error = None
|
|
658
|
-
break
|
|
659
|
-
except Exception as err:
|
|
660
|
-
error = err
|
|
661
|
-
if retries_allowed:
|
|
662
|
-
dbos.logger.warning(
|
|
663
|
-
f"Step being automatically retried. (attempt {attempt} of {local_max_attempts}). {traceback.format_exc()}"
|
|
664
|
-
)
|
|
665
|
-
ctx.get_current_span().add_event(
|
|
666
|
-
f"Step attempt {attempt} failed",
|
|
667
|
-
{
|
|
668
|
-
"error": str(error),
|
|
669
|
-
"retryIntervalSeconds": local_interval_seconds,
|
|
670
|
-
},
|
|
671
|
-
)
|
|
672
|
-
if attempt == local_max_attempts:
|
|
673
|
-
error = DBOSMaxStepRetriesExceeded()
|
|
674
|
-
else:
|
|
675
|
-
time.sleep(local_interval_seconds)
|
|
676
|
-
local_interval_seconds = min(
|
|
677
|
-
local_interval_seconds * backoff_rate,
|
|
678
|
-
max_retry_interval_seconds,
|
|
679
|
-
)
|
|
680
|
-
|
|
681
|
-
step_output["error"] = (
|
|
682
|
-
_serialization.serialize_exception(error)
|
|
683
|
-
if error is not None
|
|
684
|
-
else None
|
|
748
|
+
stepOutcome = Outcome[R].make(functools.partial(func, *args, **kwargs))
|
|
749
|
+
if retries_allowed:
|
|
750
|
+
stepOutcome = stepOutcome.retry(
|
|
751
|
+
max_attempts, on_exception, lambda i: DBOSMaxStepRetriesExceeded()
|
|
685
752
|
)
|
|
686
|
-
dbos._sys_db.record_operation_result(step_output)
|
|
687
753
|
|
|
688
|
-
|
|
689
|
-
|
|
690
|
-
|
|
754
|
+
outcome = (
|
|
755
|
+
stepOutcome.then(record_step_result)
|
|
756
|
+
.intercept(check_existing_result)
|
|
757
|
+
.also(EnterDBOSStep(attributes))
|
|
758
|
+
)
|
|
759
|
+
return outcome()
|
|
691
760
|
|
|
692
761
|
fi = get_or_create_func_info(func)
|
|
693
762
|
|
|
@@ -711,16 +780,25 @@ def decorate_step(
|
|
|
711
780
|
assert tempwf
|
|
712
781
|
return tempwf(*args, **kwargs)
|
|
713
782
|
|
|
714
|
-
def
|
|
783
|
+
def temp_wf_sync(*args: Any, **kwargs: Any) -> Any:
|
|
715
784
|
return wrapper(*args, **kwargs)
|
|
716
785
|
|
|
786
|
+
async def temp_wf_async(*args: Any, **kwargs: Any) -> Any:
|
|
787
|
+
return await wrapper(*args, **kwargs)
|
|
788
|
+
|
|
789
|
+
# Other code in transact-py depends on the name of temporary workflow functions to be "temp_wf"
|
|
790
|
+
# so set the name of both sync and async temporary workflow functions explicitly
|
|
791
|
+
temp_wf_sync.__name__ = "temp_wf"
|
|
792
|
+
temp_wf_async.__name__ = "temp_wf"
|
|
793
|
+
|
|
794
|
+
temp_wf = temp_wf_async if inspect.iscoroutinefunction(func) else temp_wf_sync
|
|
717
795
|
wrapped_wf = workflow_wrapper(dbosreg, temp_wf)
|
|
718
796
|
set_dbos_func_name(temp_wf, "<temp>." + func.__qualname__)
|
|
719
797
|
set_temp_workflow_type(temp_wf, "step")
|
|
720
798
|
dbosreg.register_wf_function(get_dbos_func_name(temp_wf), wrapped_wf)
|
|
721
799
|
wrapper.__orig_func = temp_wf # type: ignore
|
|
722
800
|
|
|
723
|
-
return cast(
|
|
801
|
+
return cast(Callable[P, R], wrapper)
|
|
724
802
|
|
|
725
803
|
return decorator
|
|
726
804
|
|
|
@@ -732,7 +810,8 @@ def send(
|
|
|
732
810
|
attributes: TracedAttributes = {
|
|
733
811
|
"name": "send",
|
|
734
812
|
}
|
|
735
|
-
with EnterDBOSStep(attributes)
|
|
813
|
+
with EnterDBOSStep(attributes):
|
|
814
|
+
ctx = assert_current_dbos_context()
|
|
736
815
|
dbos._sys_db.send(
|
|
737
816
|
ctx.workflow_id,
|
|
738
817
|
ctx.curr_step_function_id,
|
|
@@ -759,7 +838,8 @@ def recv(dbos: "DBOS", topic: Optional[str] = None, timeout_seconds: float = 60)
|
|
|
759
838
|
attributes: TracedAttributes = {
|
|
760
839
|
"name": "recv",
|
|
761
840
|
}
|
|
762
|
-
with EnterDBOSStep(attributes)
|
|
841
|
+
with EnterDBOSStep(attributes):
|
|
842
|
+
ctx = assert_current_dbos_context()
|
|
763
843
|
ctx.function_id += 1 # Reserve for the sleep
|
|
764
844
|
timeout_function_id = ctx.function_id
|
|
765
845
|
return dbos._sys_db.recv(
|
|
@@ -784,7 +864,8 @@ def set_event(dbos: "DBOS", key: str, value: Any) -> None:
|
|
|
784
864
|
attributes: TracedAttributes = {
|
|
785
865
|
"name": "set_event",
|
|
786
866
|
}
|
|
787
|
-
with EnterDBOSStep(attributes)
|
|
867
|
+
with EnterDBOSStep(attributes):
|
|
868
|
+
ctx = assert_current_dbos_context()
|
|
788
869
|
dbos._sys_db.set_event(
|
|
789
870
|
ctx.workflow_id, ctx.curr_step_function_id, key, value
|
|
790
871
|
)
|
|
@@ -805,7 +886,8 @@ def get_event(
|
|
|
805
886
|
attributes: TracedAttributes = {
|
|
806
887
|
"name": "get_event",
|
|
807
888
|
}
|
|
808
|
-
with EnterDBOSStep(attributes)
|
|
889
|
+
with EnterDBOSStep(attributes):
|
|
890
|
+
ctx = assert_current_dbos_context()
|
|
809
891
|
ctx.function_id += 1
|
|
810
892
|
timeout_function_id = ctx.function_id
|
|
811
893
|
caller_ctx: GetEventWorkflowContext = {
|
dbos/_dbos.py
CHANGED
|
@@ -1,5 +1,6 @@
|
|
|
1
1
|
from __future__ import annotations
|
|
2
2
|
|
|
3
|
+
import asyncio
|
|
3
4
|
import atexit
|
|
4
5
|
import json
|
|
5
6
|
import os
|
|
@@ -13,6 +14,7 @@ from typing import (
|
|
|
13
14
|
TYPE_CHECKING,
|
|
14
15
|
Any,
|
|
15
16
|
Callable,
|
|
17
|
+
Coroutine,
|
|
16
18
|
Generic,
|
|
17
19
|
List,
|
|
18
20
|
Literal,
|
|
@@ -21,6 +23,9 @@ from typing import (
|
|
|
21
23
|
Tuple,
|
|
22
24
|
Type,
|
|
23
25
|
TypeVar,
|
|
26
|
+
Union,
|
|
27
|
+
cast,
|
|
28
|
+
overload,
|
|
24
29
|
)
|
|
25
30
|
|
|
26
31
|
from opentelemetry.trace import Span
|
|
@@ -71,6 +76,7 @@ else:
|
|
|
71
76
|
from ._admin_server import AdminServer
|
|
72
77
|
from ._app_db import ApplicationDatabase
|
|
73
78
|
from ._context import (
|
|
79
|
+
DBOSContext,
|
|
74
80
|
EnterDBOSStep,
|
|
75
81
|
TracedAttributes,
|
|
76
82
|
assert_current_dbos_context,
|
|
@@ -432,7 +438,7 @@ class DBOS:
|
|
|
432
438
|
@classmethod
|
|
433
439
|
def workflow(
|
|
434
440
|
cls, *, max_recovery_attempts: int = DEFAULT_MAX_RECOVERY_ATTEMPTS
|
|
435
|
-
) -> Callable[[
|
|
441
|
+
) -> Callable[[Callable[P, R]], Callable[P, R]]:
|
|
436
442
|
"""Decorate a function for use as a DBOS workflow."""
|
|
437
443
|
return decorate_workflow(_get_or_create_dbos_registry(), max_recovery_attempts)
|
|
438
444
|
|
|
@@ -457,7 +463,7 @@ class DBOS:
|
|
|
457
463
|
interval_seconds: float = 1.0,
|
|
458
464
|
max_attempts: int = 3,
|
|
459
465
|
backoff_rate: float = 2.0,
|
|
460
|
-
) -> Callable[[
|
|
466
|
+
) -> Callable[[Callable[P, R]], Callable[P, R]]:
|
|
461
467
|
"""
|
|
462
468
|
Decorate and configure a function for use as a DBOS step.
|
|
463
469
|
|
|
@@ -542,15 +548,36 @@ class DBOS:
|
|
|
542
548
|
f"{e.name} dependency not found. Please install {e.name} via your package manager."
|
|
543
549
|
) from e
|
|
544
550
|
|
|
551
|
+
@overload
|
|
552
|
+
@classmethod
|
|
553
|
+
def start_workflow(
|
|
554
|
+
cls,
|
|
555
|
+
func: Workflow[P, Coroutine[Any, Any, R]],
|
|
556
|
+
*args: P.args,
|
|
557
|
+
**kwargs: P.kwargs,
|
|
558
|
+
) -> WorkflowHandle[R]: ...
|
|
559
|
+
|
|
560
|
+
@overload
|
|
545
561
|
@classmethod
|
|
546
562
|
def start_workflow(
|
|
547
563
|
cls,
|
|
548
564
|
func: Workflow[P, R],
|
|
549
565
|
*args: P.args,
|
|
550
566
|
**kwargs: P.kwargs,
|
|
567
|
+
) -> WorkflowHandle[R]: ...
|
|
568
|
+
|
|
569
|
+
@classmethod
|
|
570
|
+
def start_workflow(
|
|
571
|
+
cls,
|
|
572
|
+
func: Workflow[P, Union[R, Coroutine[Any, Any, R]]],
|
|
573
|
+
*args: P.args,
|
|
574
|
+
**kwargs: P.kwargs,
|
|
551
575
|
) -> WorkflowHandle[R]:
|
|
552
576
|
"""Invoke a workflow function in the background, returning a handle to the ongoing execution."""
|
|
553
|
-
return
|
|
577
|
+
return cast(
|
|
578
|
+
WorkflowHandle[R],
|
|
579
|
+
start_workflow(_get_dbos_instance(), func, None, True, *args, **kwargs),
|
|
580
|
+
)
|
|
554
581
|
|
|
555
582
|
@classmethod
|
|
556
583
|
def get_workflow_status(cls, workflow_id: str) -> Optional[WorkflowStatus]:
|
|
@@ -602,6 +629,13 @@ class DBOS:
|
|
|
602
629
|
"""Send a message to a workflow execution."""
|
|
603
630
|
return send(_get_dbos_instance(), destination_id, message, topic)
|
|
604
631
|
|
|
632
|
+
@classmethod
|
|
633
|
+
async def send_async(
|
|
634
|
+
cls, destination_id: str, message: Any, topic: Optional[str] = None
|
|
635
|
+
) -> None:
|
|
636
|
+
"""Send a message to a workflow execution."""
|
|
637
|
+
await asyncio.to_thread(lambda: DBOS.send(destination_id, message, topic))
|
|
638
|
+
|
|
605
639
|
@classmethod
|
|
606
640
|
def recv(cls, topic: Optional[str] = None, timeout_seconds: float = 60) -> Any:
|
|
607
641
|
"""
|
|
@@ -612,13 +646,25 @@ class DBOS:
|
|
|
612
646
|
"""
|
|
613
647
|
return recv(_get_dbos_instance(), topic, timeout_seconds)
|
|
614
648
|
|
|
649
|
+
@classmethod
|
|
650
|
+
async def recv_async(
|
|
651
|
+
cls, topic: Optional[str] = None, timeout_seconds: float = 60
|
|
652
|
+
) -> Any:
|
|
653
|
+
"""
|
|
654
|
+
Receive a workflow message.
|
|
655
|
+
|
|
656
|
+
This function is to be called from within a workflow.
|
|
657
|
+
`recv_async` will return the message sent on `topic`, asyncronously waiting if necessary.
|
|
658
|
+
"""
|
|
659
|
+
return await asyncio.to_thread(lambda: DBOS.recv(topic, timeout_seconds))
|
|
660
|
+
|
|
615
661
|
@classmethod
|
|
616
662
|
def sleep(cls, seconds: float) -> None:
|
|
617
663
|
"""
|
|
618
664
|
Sleep for the specified time (in seconds).
|
|
619
665
|
|
|
620
|
-
It is important to use `DBOS.sleep` (as opposed to any other sleep) within workflows,
|
|
621
|
-
as the
|
|
666
|
+
It is important to use `DBOS.sleep` or `DBOS.sleep_async` (as opposed to any other sleep) within workflows,
|
|
667
|
+
as the DBOS sleep methods are durable and completed sleeps will be skipped during recovery.
|
|
622
668
|
"""
|
|
623
669
|
if seconds <= 0:
|
|
624
670
|
return
|
|
@@ -631,7 +677,8 @@ class DBOS:
|
|
|
631
677
|
attributes: TracedAttributes = {
|
|
632
678
|
"name": "sleep",
|
|
633
679
|
}
|
|
634
|
-
with EnterDBOSStep(attributes)
|
|
680
|
+
with EnterDBOSStep(attributes):
|
|
681
|
+
ctx = assert_current_dbos_context()
|
|
635
682
|
_get_dbos_instance()._sys_db.sleep(
|
|
636
683
|
ctx.workflow_id, ctx.curr_step_function_id, seconds
|
|
637
684
|
)
|
|
@@ -639,17 +686,25 @@ class DBOS:
|
|
|
639
686
|
# Cannot call it from outside of a workflow
|
|
640
687
|
raise DBOSException("sleep() must be called from within a workflow")
|
|
641
688
|
|
|
689
|
+
@classmethod
|
|
690
|
+
async def sleep_async(cls, seconds: float) -> None:
|
|
691
|
+
"""
|
|
692
|
+
Sleep for the specified time (in seconds).
|
|
693
|
+
|
|
694
|
+
It is important to use `DBOS.sleep` or `DBOS.sleep_async` (as opposed to any other sleep) within workflows,
|
|
695
|
+
as the DBOS sleep methods are durable and completed sleeps will be skipped during recovery.
|
|
696
|
+
"""
|
|
697
|
+
await asyncio.to_thread(lambda: DBOS.sleep(seconds))
|
|
698
|
+
|
|
642
699
|
@classmethod
|
|
643
700
|
def set_event(cls, key: str, value: Any) -> None:
|
|
644
701
|
"""
|
|
645
702
|
Set a workflow event.
|
|
646
703
|
|
|
647
|
-
This function is to be called from within a workflow.
|
|
648
|
-
|
|
649
704
|
`set_event` sets the `value` of `key` for the current workflow instance ID.
|
|
650
705
|
This `value` can then be retrieved by other functions, using `get_event` below.
|
|
651
|
-
|
|
652
|
-
|
|
706
|
+
If the event `key` already exists, its `value` is updated.
|
|
707
|
+
This function can only be called from within a workflow.
|
|
653
708
|
|
|
654
709
|
Args:
|
|
655
710
|
key(str): The event key / name within the workflow
|
|
@@ -658,6 +713,23 @@ class DBOS:
|
|
|
658
713
|
"""
|
|
659
714
|
return set_event(_get_dbos_instance(), key, value)
|
|
660
715
|
|
|
716
|
+
@classmethod
|
|
717
|
+
async def set_event_async(cls, key: str, value: Any) -> None:
|
|
718
|
+
"""
|
|
719
|
+
Set a workflow event.
|
|
720
|
+
|
|
721
|
+
`set_event_async` sets the `value` of `key` for the current workflow instance ID.
|
|
722
|
+
This `value` can then be retrieved by other functions, using `get_event` below.
|
|
723
|
+
If the event `key` already exists, its `value` is updated.
|
|
724
|
+
This function can only be called from within a workflow.
|
|
725
|
+
|
|
726
|
+
Args:
|
|
727
|
+
key(str): The event key / name within the workflow
|
|
728
|
+
value(Any): A serializable value to associate with the key
|
|
729
|
+
|
|
730
|
+
"""
|
|
731
|
+
await asyncio.to_thread(lambda: DBOS.set_event(key, value))
|
|
732
|
+
|
|
661
733
|
@classmethod
|
|
662
734
|
def get_event(cls, workflow_id: str, key: str, timeout_seconds: float = 60) -> Any:
|
|
663
735
|
"""
|
|
@@ -673,6 +745,25 @@ class DBOS:
|
|
|
673
745
|
"""
|
|
674
746
|
return get_event(_get_dbos_instance(), workflow_id, key, timeout_seconds)
|
|
675
747
|
|
|
748
|
+
@classmethod
|
|
749
|
+
async def get_event_async(
|
|
750
|
+
cls, workflow_id: str, key: str, timeout_seconds: float = 60
|
|
751
|
+
) -> Any:
|
|
752
|
+
"""
|
|
753
|
+
Return the `value` of a workflow event, waiting for it to occur if necessary.
|
|
754
|
+
|
|
755
|
+
`get_event_async` waits for a corresponding `set_event` by the workflow with ID `workflow_id` with the same `key`.
|
|
756
|
+
|
|
757
|
+
Args:
|
|
758
|
+
workflow_id(str): The workflow instance ID that is expected to call `set_event` on `key`
|
|
759
|
+
key(str): The event key / name within the workflow
|
|
760
|
+
timeout_seconds(float): The amount of time to wait, in case `set_event` has not yet been called byt the workflow
|
|
761
|
+
|
|
762
|
+
"""
|
|
763
|
+
return await asyncio.to_thread(
|
|
764
|
+
lambda: DBOS.get_event(workflow_id, key, timeout_seconds)
|
|
765
|
+
)
|
|
766
|
+
|
|
676
767
|
@classmethod
|
|
677
768
|
def execute_workflow_id(cls, workflow_id: str) -> WorkflowHandle[Any]:
|
|
678
769
|
"""Execute a workflow by ID (for recovery)."""
|
dbos/_outcome.py
ADDED
|
@@ -0,0 +1,190 @@
|
|
|
1
|
+
import asyncio
|
|
2
|
+
import contextlib
|
|
3
|
+
import inspect
|
|
4
|
+
import time
|
|
5
|
+
from typing import Any, Callable, Coroutine, Optional, Protocol, TypeVar, Union, cast
|
|
6
|
+
|
|
7
|
+
T = TypeVar("T")
|
|
8
|
+
R = TypeVar("R")
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
# define Outcome protocol w/ common composition methods
|
|
12
|
+
class Outcome(Protocol[T]):
|
|
13
|
+
|
|
14
|
+
def wrap(
|
|
15
|
+
self, before: Callable[[], Callable[[Callable[[], T]], R]]
|
|
16
|
+
) -> "Outcome[R]": ...
|
|
17
|
+
|
|
18
|
+
def then(self, next: Callable[[Callable[[], T]], R]) -> "Outcome[R]": ...
|
|
19
|
+
|
|
20
|
+
def also(
|
|
21
|
+
self, cm: contextlib.AbstractContextManager[Any, bool]
|
|
22
|
+
) -> "Outcome[T]": ...
|
|
23
|
+
|
|
24
|
+
def retry(
|
|
25
|
+
self,
|
|
26
|
+
attempts: int,
|
|
27
|
+
on_exception: Callable[[int, BaseException], float],
|
|
28
|
+
exceeded_retries: Callable[[int], BaseException],
|
|
29
|
+
) -> "Outcome[T]": ...
|
|
30
|
+
|
|
31
|
+
def intercept(self, interceptor: Callable[[], Optional[T]]) -> "Outcome[T]": ...
|
|
32
|
+
|
|
33
|
+
def __call__(self) -> Union[T, Coroutine[Any, Any, T]]: ...
|
|
34
|
+
|
|
35
|
+
# Helper function to create an Immediate or Pending Result, depending on if func is a coroutine function or not
|
|
36
|
+
@staticmethod
|
|
37
|
+
def make(func: Callable[[], Union[T, Coroutine[Any, Any, T]]]) -> "Outcome[T]":
|
|
38
|
+
return (
|
|
39
|
+
Pending(cast(Callable[[], Coroutine[Any, Any, T]], func))
|
|
40
|
+
if inspect.iscoroutinefunction(func)
|
|
41
|
+
else Immediate(cast(Callable[[], T], func))
|
|
42
|
+
)
|
|
43
|
+
|
|
44
|
+
|
|
45
|
+
# Immediate Outcome - for composing non-async functions
|
|
46
|
+
class Immediate(Outcome[T]):
|
|
47
|
+
__slots__ = "_func"
|
|
48
|
+
|
|
49
|
+
def __init__(self, func: Callable[[], T]):
|
|
50
|
+
self._func = func
|
|
51
|
+
|
|
52
|
+
def then(self, next: Callable[[Callable[[], T]], R]) -> "Immediate[R]":
|
|
53
|
+
return Immediate(lambda: next(self._func))
|
|
54
|
+
|
|
55
|
+
def wrap(
|
|
56
|
+
self, before: Callable[[], Callable[[Callable[[], T]], R]]
|
|
57
|
+
) -> "Immediate[R]":
|
|
58
|
+
return Immediate(lambda: before()(self._func))
|
|
59
|
+
|
|
60
|
+
@staticmethod
|
|
61
|
+
def _intercept(func: Callable[[], T], interceptor: Callable[[], Optional[T]]) -> T:
|
|
62
|
+
intercepted = interceptor()
|
|
63
|
+
return intercepted if intercepted else func()
|
|
64
|
+
|
|
65
|
+
def intercept(self, interceptor: Callable[[], Optional[T]]) -> "Immediate[T]":
|
|
66
|
+
return Immediate[T](lambda: Immediate._intercept(self._func, interceptor))
|
|
67
|
+
|
|
68
|
+
@staticmethod
|
|
69
|
+
def _also(func: Callable[[], T], cm: contextlib.AbstractContextManager[Any, bool]) -> T: # type: ignore
|
|
70
|
+
with cm:
|
|
71
|
+
return func()
|
|
72
|
+
|
|
73
|
+
def also(self, cm: contextlib.AbstractContextManager[Any, bool]) -> "Immediate[T]":
|
|
74
|
+
return Immediate[T](lambda: Immediate._also(self._func, cm))
|
|
75
|
+
|
|
76
|
+
@staticmethod
|
|
77
|
+
def _retry(
|
|
78
|
+
func: Callable[[], T],
|
|
79
|
+
attempts: int,
|
|
80
|
+
on_exception: Callable[[int, BaseException], float],
|
|
81
|
+
exceeded_retries: Callable[[int], BaseException],
|
|
82
|
+
) -> T:
|
|
83
|
+
for i in range(attempts):
|
|
84
|
+
try:
|
|
85
|
+
return func()
|
|
86
|
+
except Exception as exp:
|
|
87
|
+
wait_time = on_exception(i, exp)
|
|
88
|
+
time.sleep(wait_time)
|
|
89
|
+
|
|
90
|
+
raise exceeded_retries(attempts)
|
|
91
|
+
|
|
92
|
+
def retry(
|
|
93
|
+
self,
|
|
94
|
+
attempts: int,
|
|
95
|
+
on_exception: Callable[[int, BaseException], float],
|
|
96
|
+
exceeded_retries: Callable[[int], BaseException],
|
|
97
|
+
) -> "Immediate[T]":
|
|
98
|
+
assert attempts > 0
|
|
99
|
+
return Immediate[T](
|
|
100
|
+
lambda: Immediate._retry(
|
|
101
|
+
self._func, attempts, on_exception, exceeded_retries
|
|
102
|
+
)
|
|
103
|
+
)
|
|
104
|
+
|
|
105
|
+
def __call__(self) -> T:
|
|
106
|
+
return self._func()
|
|
107
|
+
|
|
108
|
+
|
|
109
|
+
# Pending Outcome - for composing async functions
|
|
110
|
+
class Pending(Outcome[T]):
|
|
111
|
+
__slots__ = "_func"
|
|
112
|
+
|
|
113
|
+
def __init__(self, func: Callable[[], Coroutine[Any, Any, T]]):
|
|
114
|
+
self._func = func
|
|
115
|
+
|
|
116
|
+
# Helper method in order to raise an exception in a lambda
|
|
117
|
+
@staticmethod
|
|
118
|
+
def _raise(ex: BaseException) -> T:
|
|
119
|
+
raise ex
|
|
120
|
+
|
|
121
|
+
async def _wrap(
|
|
122
|
+
func: Callable[[], Coroutine[Any, Any, T]],
|
|
123
|
+
before: Callable[[], Callable[[Callable[[], T]], R]],
|
|
124
|
+
) -> R:
|
|
125
|
+
after = await asyncio.to_thread(before)
|
|
126
|
+
try:
|
|
127
|
+
value = await func()
|
|
128
|
+
return await asyncio.to_thread(after, lambda: value)
|
|
129
|
+
except BaseException as exp:
|
|
130
|
+
return await asyncio.to_thread(after, lambda: Pending._raise(exp))
|
|
131
|
+
|
|
132
|
+
def wrap(
|
|
133
|
+
self, before: Callable[[], Callable[[Callable[[], T]], R]]
|
|
134
|
+
) -> "Pending[R]":
|
|
135
|
+
return Pending[R](lambda: Pending._wrap(self._func, before))
|
|
136
|
+
|
|
137
|
+
def then(self, next: Callable[[Callable[[], T]], R]) -> "Pending[R]":
|
|
138
|
+
return Pending[R](lambda: Pending._wrap(self._func, lambda: next))
|
|
139
|
+
|
|
140
|
+
@staticmethod
|
|
141
|
+
async def _also( # type: ignore
|
|
142
|
+
func: Callable[[], Coroutine[Any, Any, T]],
|
|
143
|
+
cm: contextlib.AbstractContextManager[Any, bool],
|
|
144
|
+
) -> T:
|
|
145
|
+
with cm:
|
|
146
|
+
return await func()
|
|
147
|
+
|
|
148
|
+
def also(self, cm: contextlib.AbstractContextManager[Any, bool]) -> "Pending[T]":
|
|
149
|
+
return Pending[T](lambda: Pending._also(self._func, cm))
|
|
150
|
+
|
|
151
|
+
@staticmethod
|
|
152
|
+
async def _intercept(
|
|
153
|
+
func: Callable[[], Coroutine[Any, Any, T]],
|
|
154
|
+
interceptor: Callable[[], Optional[T]],
|
|
155
|
+
) -> T:
|
|
156
|
+
intercepted = await asyncio.to_thread(interceptor)
|
|
157
|
+
return intercepted if intercepted else await func()
|
|
158
|
+
|
|
159
|
+
def intercept(self, interceptor: Callable[[], Optional[T]]) -> "Pending[T]":
|
|
160
|
+
return Pending[T](lambda: Pending._intercept(self._func, interceptor))
|
|
161
|
+
|
|
162
|
+
@staticmethod
|
|
163
|
+
async def _retry(
|
|
164
|
+
func: Callable[[], Coroutine[Any, Any, T]],
|
|
165
|
+
attempts: int,
|
|
166
|
+
on_exception: Callable[[int, BaseException], float],
|
|
167
|
+
exceeded_retries: Callable[[int], BaseException],
|
|
168
|
+
) -> T:
|
|
169
|
+
for i in range(attempts):
|
|
170
|
+
try:
|
|
171
|
+
return await func()
|
|
172
|
+
except Exception as exp:
|
|
173
|
+
wait_time = on_exception(i, exp)
|
|
174
|
+
await asyncio.sleep(wait_time)
|
|
175
|
+
|
|
176
|
+
raise exceeded_retries(attempts)
|
|
177
|
+
|
|
178
|
+
def retry(
|
|
179
|
+
self,
|
|
180
|
+
attempts: int,
|
|
181
|
+
on_exception: Callable[[int, BaseException], float],
|
|
182
|
+
exceeded_retries: Callable[[int], BaseException],
|
|
183
|
+
) -> "Pending[T]":
|
|
184
|
+
assert attempts > 0
|
|
185
|
+
return Pending[T](
|
|
186
|
+
lambda: Pending._retry(self._func, attempts, on_exception, exceeded_retries)
|
|
187
|
+
)
|
|
188
|
+
|
|
189
|
+
async def __call__(self) -> T:
|
|
190
|
+
return await self._func()
|
|
@@ -1,15 +1,15 @@
|
|
|
1
|
-
dbos-0.
|
|
2
|
-
dbos-0.
|
|
3
|
-
dbos-0.
|
|
4
|
-
dbos-0.
|
|
1
|
+
dbos-0.16.0a2.dist-info/METADATA,sha256=kH8V4rGJRqSp0StPXPo-Ftmg-O1ITgB_Vyjn638AABU,5022
|
|
2
|
+
dbos-0.16.0a2.dist-info/WHEEL,sha256=thaaA2w1JzcGC48WYufAs8nrYZjJm8LqNfnXFOFyCC4,90
|
|
3
|
+
dbos-0.16.0a2.dist-info/entry_points.txt,sha256=z6GcVANQV7Uw_82H9Ob2axJX6V3imftyZsljdh-M1HU,54
|
|
4
|
+
dbos-0.16.0a2.dist-info/licenses/LICENSE,sha256=VGZit_a5-kdw9WT6fY5jxAWVwGQzgLFyPWrcVVUhVNU,1067
|
|
5
5
|
dbos/__init__.py,sha256=CxRHBHEthPL4PZoLbZhp3rdm44-KkRTT2-7DkK9d4QQ,724
|
|
6
6
|
dbos/_admin_server.py,sha256=DOgzVp9kmwiebQqmJB1LcrZnGTxSMbZiGXdenc1wZDg,3163
|
|
7
7
|
dbos/_app_db.py,sha256=_tv2vmPjjiaikwgxH3mqxgJ4nUUcG2-0uMXKWCqVu1c,5509
|
|
8
8
|
dbos/_classproperty.py,sha256=f0X-_BySzn3yFDRKB2JpCbLYQ9tLwt1XftfshvY7CBs,626
|
|
9
|
-
dbos/_context.py,sha256=
|
|
10
|
-
dbos/_core.py,sha256=
|
|
9
|
+
dbos/_context.py,sha256=KV3fd3-Rv6EWrYDUdHARxltSlNZGNtQtNSqeQ-gkXE8,18049
|
|
10
|
+
dbos/_core.py,sha256=F7ep-KA6c0tyrTxV7iwHjKRbxy9RyzCcxl-M8XsvVfE,33481
|
|
11
11
|
dbos/_croniter.py,sha256=hbhgfsHBqclUS8VeLnJ9PSE9Z54z6mi4nnrr1aUXn0k,47561
|
|
12
|
-
dbos/_dbos.py,sha256=
|
|
12
|
+
dbos/_dbos.py,sha256=lBDofWMgK-vVgrU6xmBn1miVv1cf7IamGa9QvPn-BxM,34740
|
|
13
13
|
dbos/_dbos_config.py,sha256=f37eccN3JpCA32kRdQ4UsERjhYGcdLWv-N21ijnDZmY,6406
|
|
14
14
|
dbos/_error.py,sha256=UETk8CoZL-TO2Utn1-E7OSWelhShWmKM-fOlODMR9PE,3893
|
|
15
15
|
dbos/_fastapi.py,sha256=iyefCZq-ZDKRUjN_rgYQmFmyvWf4gPrSlC6CLbfq4a8,3419
|
|
@@ -25,6 +25,7 @@ dbos/_migrations/versions/a3b18ad34abe_added_triggers.py,sha256=Rv0ZsZYZ_WdgGEUL
|
|
|
25
25
|
dbos/_migrations/versions/d76646551a6b_job_queue_limiter.py,sha256=8PyFi8rd6CN-mUro43wGhsg5wcQWKZPRHD6jw8R5pVc,986
|
|
26
26
|
dbos/_migrations/versions/d76646551a6c_workflow_queue.py,sha256=G942nophZ2uC2vc4hGBC02Ptng1715roTjY3xiyzZU4,729
|
|
27
27
|
dbos/_migrations/versions/eab0cc1d9a14_job_queue.py,sha256=uvhFOtqbBreCePhAxZfIT0qCAI7BiZTou9wt6QnbY7c,1412
|
|
28
|
+
dbos/_outcome.py,sha256=4BxjNfVhjI-M_m8eCWimpvkO8m-knAEyoDscFYq6SQ4,6237
|
|
28
29
|
dbos/_queue.py,sha256=hAXwrfBmtv6BGrlmFq-Ol6b_ED-HDaYqSSxumMJC6Xo,1938
|
|
29
30
|
dbos/_recovery.py,sha256=jbzGYxICA2drzyzlBSy2UiXhKV_16tBVacKQdTkqf-w,2008
|
|
30
31
|
dbos/_registrations.py,sha256=mei6q6_3R5uei8i_Wo_TqGZs85s10shOekDX41sFYD0,6642
|
|
@@ -51,4 +52,4 @@ dbos/cli.py,sha256=em1uAxrp5yyg53V7ZpmHFtqD6OJp2cMJkG9vGJPoFTA,10904
|
|
|
51
52
|
dbos/dbos-config.schema.json,sha256=tS7x-bdFbFvpobcs3pIOhwun3yr_ndvTEYOn4BJjTzs,5889
|
|
52
53
|
dbos/py.typed,sha256=QfzXT1Ktfk3Rj84akygc7_42z0lRpCq0Ilh8OXI6Zas,44
|
|
53
54
|
version/__init__.py,sha256=L4sNxecRuqdtSFdpUGX3TtBi9KL3k7YsZVIvv-fv9-A,1678
|
|
54
|
-
dbos-0.
|
|
55
|
+
dbos-0.16.0a2.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|