dbos 1.13.1__py3-none-any.whl → 1.14.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of dbos might be problematic. Click here for more details.
- dbos/__init__.py +3 -0
- dbos/_client.py +17 -9
- dbos/_context.py +15 -2
- dbos/_core.py +45 -11
- dbos/_dbos.py +9 -6
- dbos/_debouncer.py +395 -0
- dbos/_logger.py +1 -1
- dbos/_outcome.py +67 -13
- dbos/_serialization.py +7 -2
- dbos/_sys_db.py +41 -3
- dbos/_tracer.py +7 -0
- {dbos-1.13.1.dist-info → dbos-1.14.0.dist-info}/METADATA +1 -1
- {dbos-1.13.1.dist-info → dbos-1.14.0.dist-info}/RECORD +16 -15
- {dbos-1.13.1.dist-info → dbos-1.14.0.dist-info}/WHEEL +0 -0
- {dbos-1.13.1.dist-info → dbos-1.14.0.dist-info}/entry_points.txt +0 -0
- {dbos-1.13.1.dist-info → dbos-1.14.0.dist-info}/licenses/LICENSE +0 -0
dbos/__init__.py
CHANGED
|
@@ -9,6 +9,7 @@ from ._context import (
|
|
|
9
9
|
)
|
|
10
10
|
from ._dbos import DBOS, DBOSConfiguredInstance, WorkflowHandle, WorkflowHandleAsync
|
|
11
11
|
from ._dbos_config import DBOSConfig
|
|
12
|
+
from ._debouncer import Debouncer, DebouncerClient
|
|
12
13
|
from ._kafka_message import KafkaMessage
|
|
13
14
|
from ._queue import Queue
|
|
14
15
|
from ._sys_db import GetWorkflowsInput, WorkflowStatus, WorkflowStatusString
|
|
@@ -32,4 +33,6 @@ __all__ = [
|
|
|
32
33
|
"WorkflowStatusString",
|
|
33
34
|
"error",
|
|
34
35
|
"Queue",
|
|
36
|
+
"Debouncer",
|
|
37
|
+
"DebouncerClient",
|
|
35
38
|
]
|
dbos/_client.py
CHANGED
|
@@ -3,6 +3,7 @@ import sys
|
|
|
3
3
|
import time
|
|
4
4
|
import uuid
|
|
5
5
|
from typing import (
|
|
6
|
+
TYPE_CHECKING,
|
|
6
7
|
Any,
|
|
7
8
|
AsyncGenerator,
|
|
8
9
|
Generator,
|
|
@@ -24,7 +25,10 @@ else:
|
|
|
24
25
|
from typing import NotRequired
|
|
25
26
|
|
|
26
27
|
from dbos import _serialization
|
|
27
|
-
|
|
28
|
+
|
|
29
|
+
if TYPE_CHECKING:
|
|
30
|
+
from dbos._dbos import WorkflowHandle, WorkflowHandleAsync
|
|
31
|
+
|
|
28
32
|
from dbos._dbos_config import (
|
|
29
33
|
get_application_database_url,
|
|
30
34
|
get_system_database_url,
|
|
@@ -224,23 +228,25 @@ class DBOSClient:
|
|
|
224
228
|
|
|
225
229
|
def enqueue(
|
|
226
230
|
self, options: EnqueueOptions, *args: Any, **kwargs: Any
|
|
227
|
-
) -> WorkflowHandle[R]:
|
|
231
|
+
) -> "WorkflowHandle[R]":
|
|
228
232
|
workflow_id = self._enqueue(options, *args, **kwargs)
|
|
229
233
|
return WorkflowHandleClientPolling[R](workflow_id, self._sys_db)
|
|
230
234
|
|
|
231
235
|
async def enqueue_async(
|
|
232
236
|
self, options: EnqueueOptions, *args: Any, **kwargs: Any
|
|
233
|
-
) -> WorkflowHandleAsync[R]:
|
|
237
|
+
) -> "WorkflowHandleAsync[R]":
|
|
234
238
|
workflow_id = await asyncio.to_thread(self._enqueue, options, *args, **kwargs)
|
|
235
239
|
return WorkflowHandleClientAsyncPolling[R](workflow_id, self._sys_db)
|
|
236
240
|
|
|
237
|
-
def retrieve_workflow(self, workflow_id: str) -> WorkflowHandle[R]:
|
|
241
|
+
def retrieve_workflow(self, workflow_id: str) -> "WorkflowHandle[R]":
|
|
238
242
|
status = get_workflow(self._sys_db, workflow_id)
|
|
239
243
|
if status is None:
|
|
240
244
|
raise DBOSNonExistentWorkflowError(workflow_id)
|
|
241
245
|
return WorkflowHandleClientPolling[R](workflow_id, self._sys_db)
|
|
242
246
|
|
|
243
|
-
async def retrieve_workflow_async(
|
|
247
|
+
async def retrieve_workflow_async(
|
|
248
|
+
self, workflow_id: str
|
|
249
|
+
) -> "WorkflowHandleAsync[R]":
|
|
244
250
|
status = await asyncio.to_thread(get_workflow, self._sys_db, workflow_id)
|
|
245
251
|
if status is None:
|
|
246
252
|
raise DBOSNonExistentWorkflowError(workflow_id)
|
|
@@ -311,11 +317,13 @@ class DBOSClient:
|
|
|
311
317
|
async def cancel_workflow_async(self, workflow_id: str) -> None:
|
|
312
318
|
await asyncio.to_thread(self.cancel_workflow, workflow_id)
|
|
313
319
|
|
|
314
|
-
def resume_workflow(self, workflow_id: str) -> WorkflowHandle[Any]:
|
|
320
|
+
def resume_workflow(self, workflow_id: str) -> "WorkflowHandle[Any]":
|
|
315
321
|
self._sys_db.resume_workflow(workflow_id)
|
|
316
322
|
return WorkflowHandleClientPolling[Any](workflow_id, self._sys_db)
|
|
317
323
|
|
|
318
|
-
async def resume_workflow_async(
|
|
324
|
+
async def resume_workflow_async(
|
|
325
|
+
self, workflow_id: str
|
|
326
|
+
) -> "WorkflowHandleAsync[Any]":
|
|
319
327
|
await asyncio.to_thread(self.resume_workflow, workflow_id)
|
|
320
328
|
return WorkflowHandleClientAsyncPolling[Any](workflow_id, self._sys_db)
|
|
321
329
|
|
|
@@ -451,7 +459,7 @@ class DBOSClient:
|
|
|
451
459
|
start_step: int,
|
|
452
460
|
*,
|
|
453
461
|
application_version: Optional[str] = None,
|
|
454
|
-
) -> WorkflowHandle[Any]:
|
|
462
|
+
) -> "WorkflowHandle[Any]":
|
|
455
463
|
forked_workflow_id = fork_workflow(
|
|
456
464
|
self._sys_db,
|
|
457
465
|
self._app_db,
|
|
@@ -467,7 +475,7 @@ class DBOSClient:
|
|
|
467
475
|
start_step: int,
|
|
468
476
|
*,
|
|
469
477
|
application_version: Optional[str] = None,
|
|
470
|
-
) -> WorkflowHandleAsync[Any]:
|
|
478
|
+
) -> "WorkflowHandleAsync[Any]":
|
|
471
479
|
forked_workflow_id = await asyncio.to_thread(
|
|
472
480
|
fork_workflow,
|
|
473
481
|
self._sys_db,
|
dbos/_context.py
CHANGED
|
@@ -215,11 +215,18 @@ class DBOSContext:
|
|
|
215
215
|
def end_handler(self, exc_value: Optional[BaseException]) -> None:
|
|
216
216
|
self._end_span(exc_value)
|
|
217
217
|
|
|
218
|
-
|
|
218
|
+
""" Return the current DBOS span if any. It must be a span created by DBOS."""
|
|
219
|
+
|
|
220
|
+
def get_current_dbos_span(self) -> Optional[Span]:
|
|
219
221
|
if len(self.context_spans) > 0:
|
|
220
222
|
return self.context_spans[-1].span
|
|
221
223
|
return None
|
|
222
224
|
|
|
225
|
+
""" Return the current active span if any. It might not be a DBOS span."""
|
|
226
|
+
|
|
227
|
+
def get_current_active_span(self) -> Optional[Span]:
|
|
228
|
+
return dbos_tracer.get_current_span()
|
|
229
|
+
|
|
223
230
|
def _start_span(self, attributes: TracedAttributes) -> None:
|
|
224
231
|
if dbos_tracer.disable_otlp:
|
|
225
232
|
return
|
|
@@ -235,7 +242,7 @@ class DBOSContext:
|
|
|
235
242
|
attributes["authenticatedUserAssumedRole"] = self.assumed_role
|
|
236
243
|
span = dbos_tracer.start_span(
|
|
237
244
|
attributes,
|
|
238
|
-
parent=
|
|
245
|
+
parent=None, # It'll use the current active span as the parent
|
|
239
246
|
)
|
|
240
247
|
# Activate the current span
|
|
241
248
|
cm = use_span(
|
|
@@ -517,6 +524,7 @@ class EnterDBOSWorkflow(AbstractContextManager[DBOSContext, Literal[False]]):
|
|
|
517
524
|
self.saved_workflow_timeout: Optional[int] = None
|
|
518
525
|
self.saved_deduplication_id: Optional[str] = None
|
|
519
526
|
self.saved_priority: Optional[int] = None
|
|
527
|
+
self.saved_is_within_set_workflow_id_block: bool = False
|
|
520
528
|
|
|
521
529
|
def __enter__(self) -> DBOSContext:
|
|
522
530
|
# Code to create a basic context
|
|
@@ -526,6 +534,9 @@ class EnterDBOSWorkflow(AbstractContextManager[DBOSContext, Literal[False]]):
|
|
|
526
534
|
ctx = DBOSContext()
|
|
527
535
|
_set_local_dbos_context(ctx)
|
|
528
536
|
assert not ctx.is_within_workflow()
|
|
537
|
+
# Unset is_within_set_workflow_id_block as the workflow is not within a block
|
|
538
|
+
self.saved_is_within_set_workflow_id_block = ctx.is_within_set_workflow_id_block
|
|
539
|
+
ctx.is_within_set_workflow_id_block = False
|
|
529
540
|
# Unset the workflow_timeout_ms context var so it is not applied to this
|
|
530
541
|
# workflow's children (instead we propagate the deadline)
|
|
531
542
|
self.saved_workflow_timeout = ctx.workflow_timeout_ms
|
|
@@ -550,6 +561,8 @@ class EnterDBOSWorkflow(AbstractContextManager[DBOSContext, Literal[False]]):
|
|
|
550
561
|
ctx = assert_current_dbos_context()
|
|
551
562
|
assert ctx.is_within_workflow()
|
|
552
563
|
ctx.end_workflow(exc_value)
|
|
564
|
+
# Restore is_within_set_workflow_id_block
|
|
565
|
+
ctx.is_within_set_workflow_id_block = self.saved_is_within_set_workflow_id_block
|
|
553
566
|
# Restore the saved workflow timeout
|
|
554
567
|
ctx.workflow_timeout_ms = self.saved_workflow_timeout
|
|
555
568
|
# Clear any propagating timeout
|
dbos/_core.py
CHANGED
|
@@ -19,8 +19,6 @@ from typing import (
|
|
|
19
19
|
cast,
|
|
20
20
|
)
|
|
21
21
|
|
|
22
|
-
import psycopg
|
|
23
|
-
|
|
24
22
|
from dbos._outcome import Immediate, NoResult, Outcome, Pending
|
|
25
23
|
from dbos._utils import GlobalParams, retriable_postgres_exception
|
|
26
24
|
|
|
@@ -52,12 +50,14 @@ from ._error import (
|
|
|
52
50
|
DBOSException,
|
|
53
51
|
DBOSMaxStepRetriesExceeded,
|
|
54
52
|
DBOSNonExistentWorkflowError,
|
|
53
|
+
DBOSQueueDeduplicatedError,
|
|
55
54
|
DBOSRecoveryError,
|
|
56
55
|
DBOSUnexpectedStepError,
|
|
57
56
|
DBOSWorkflowCancelledError,
|
|
58
57
|
DBOSWorkflowConflictIDError,
|
|
59
58
|
DBOSWorkflowFunctionNotFoundError,
|
|
60
59
|
)
|
|
60
|
+
from ._logger import dbos_logger
|
|
61
61
|
from ._registrations import (
|
|
62
62
|
DEFAULT_MAX_RECOVERY_ATTEMPTS,
|
|
63
63
|
get_config_name,
|
|
@@ -96,6 +96,15 @@ R = TypeVar("R", covariant=True) # A generic type for workflow return values
|
|
|
96
96
|
F = TypeVar("F", bound=Callable[..., Any])
|
|
97
97
|
|
|
98
98
|
TEMP_SEND_WF_NAME = "<temp>.temp_send_workflow"
|
|
99
|
+
DEBOUNCER_WORKFLOW_NAME = "_dbos_debouncer_workflow"
|
|
100
|
+
|
|
101
|
+
|
|
102
|
+
def check_is_in_coroutine() -> bool:
|
|
103
|
+
try:
|
|
104
|
+
asyncio.get_running_loop()
|
|
105
|
+
return True
|
|
106
|
+
except RuntimeError:
|
|
107
|
+
return False
|
|
99
108
|
|
|
100
109
|
|
|
101
110
|
class WorkflowHandleFuture(Generic[R]):
|
|
@@ -303,10 +312,22 @@ def _init_workflow(
|
|
|
303
312
|
}
|
|
304
313
|
|
|
305
314
|
# Synchronously record the status and inputs for workflows
|
|
306
|
-
|
|
307
|
-
|
|
308
|
-
|
|
309
|
-
|
|
315
|
+
try:
|
|
316
|
+
wf_status, workflow_deadline_epoch_ms = dbos._sys_db.init_workflow(
|
|
317
|
+
status,
|
|
318
|
+
max_recovery_attempts=max_recovery_attempts,
|
|
319
|
+
)
|
|
320
|
+
except DBOSQueueDeduplicatedError as e:
|
|
321
|
+
if ctx.has_parent():
|
|
322
|
+
result: OperationResultInternal = {
|
|
323
|
+
"workflow_uuid": ctx.parent_workflow_id,
|
|
324
|
+
"function_id": ctx.parent_workflow_fid,
|
|
325
|
+
"function_name": wf_name,
|
|
326
|
+
"output": None,
|
|
327
|
+
"error": _serialization.serialize_exception(e),
|
|
328
|
+
}
|
|
329
|
+
dbos._sys_db.record_operation_result(result)
|
|
330
|
+
raise
|
|
310
331
|
|
|
311
332
|
if workflow_deadline_epoch_ms is not None:
|
|
312
333
|
evt = threading.Event()
|
|
@@ -830,11 +851,16 @@ def workflow_wrapper(
|
|
|
830
851
|
dbos._sys_db.record_get_result(workflow_id, serialized_r, None)
|
|
831
852
|
return r
|
|
832
853
|
|
|
854
|
+
if check_is_in_coroutine() and not inspect.iscoroutinefunction(func):
|
|
855
|
+
dbos_logger.warning(
|
|
856
|
+
f"Sync workflow ({get_dbos_func_name(func)}) shouldn't be invoked from within another async function. Define it as async or use asyncio.to_thread instead."
|
|
857
|
+
)
|
|
858
|
+
|
|
833
859
|
outcome = (
|
|
834
|
-
wfOutcome.wrap(init_wf)
|
|
860
|
+
wfOutcome.wrap(init_wf, dbos=dbos)
|
|
835
861
|
.also(DBOSAssumeRole(rr))
|
|
836
862
|
.also(enterWorkflowCtxMgr(attributes))
|
|
837
|
-
.then(record_get_result)
|
|
863
|
+
.then(record_get_result, dbos=dbos)
|
|
838
864
|
)
|
|
839
865
|
return outcome() # type: ignore
|
|
840
866
|
|
|
@@ -959,7 +985,7 @@ def decorate_transaction(
|
|
|
959
985
|
dbapi_error
|
|
960
986
|
) or dbos._app_db._is_serialization_error(dbapi_error):
|
|
961
987
|
# Retry on serialization failure
|
|
962
|
-
span = ctx.
|
|
988
|
+
span = ctx.get_current_dbos_span()
|
|
963
989
|
if span:
|
|
964
990
|
span.add_event(
|
|
965
991
|
"Transaction Failure",
|
|
@@ -1011,6 +1037,10 @@ def decorate_transaction(
|
|
|
1011
1037
|
assert (
|
|
1012
1038
|
ctx.is_workflow()
|
|
1013
1039
|
), "Transactions must be called from within workflows"
|
|
1040
|
+
if check_is_in_coroutine():
|
|
1041
|
+
dbos_logger.warning(
|
|
1042
|
+
f"Transaction function ({get_dbos_func_name(func)}) shouldn't be invoked from within another async function. Use asyncio.to_thread instead."
|
|
1043
|
+
)
|
|
1014
1044
|
with DBOSAssumeRole(rr):
|
|
1015
1045
|
return invoke_tx(*args, **kwargs)
|
|
1016
1046
|
else:
|
|
@@ -1074,7 +1104,7 @@ def decorate_step(
|
|
|
1074
1104
|
exc_info=error,
|
|
1075
1105
|
)
|
|
1076
1106
|
ctx = assert_current_dbos_context()
|
|
1077
|
-
span = ctx.
|
|
1107
|
+
span = ctx.get_current_dbos_span()
|
|
1078
1108
|
if span:
|
|
1079
1109
|
span.add_event(
|
|
1080
1110
|
f"Step attempt {attempt} failed",
|
|
@@ -1146,7 +1176,7 @@ def decorate_step(
|
|
|
1146
1176
|
|
|
1147
1177
|
outcome = (
|
|
1148
1178
|
stepOutcome.then(record_step_result)
|
|
1149
|
-
.intercept(check_existing_result)
|
|
1179
|
+
.intercept(check_existing_result, dbos=dbos)
|
|
1150
1180
|
.also(EnterDBOSStep(attributes))
|
|
1151
1181
|
)
|
|
1152
1182
|
return outcome()
|
|
@@ -1155,6 +1185,10 @@ def decorate_step(
|
|
|
1155
1185
|
|
|
1156
1186
|
@wraps(func)
|
|
1157
1187
|
def wrapper(*args: Any, **kwargs: Any) -> Any:
|
|
1188
|
+
if check_is_in_coroutine() and not inspect.iscoroutinefunction(func):
|
|
1189
|
+
dbos_logger.warning(
|
|
1190
|
+
f"Sync step ({get_dbos_func_name(func)}) shouldn't be invoked from within another async function. Define it as async or use asyncio.to_thread instead."
|
|
1191
|
+
)
|
|
1158
1192
|
# If the step is called from a workflow, run it as a step.
|
|
1159
1193
|
# Otherwise, run it as a normal function.
|
|
1160
1194
|
ctx = get_local_dbos_context()
|
dbos/_dbos.py
CHANGED
|
@@ -32,12 +32,14 @@ from opentelemetry.trace import Span
|
|
|
32
32
|
from rich import print
|
|
33
33
|
|
|
34
34
|
from dbos._conductor.conductor import ConductorWebsocket
|
|
35
|
+
from dbos._debouncer import debouncer_workflow
|
|
35
36
|
from dbos._sys_db import SystemDatabase, WorkflowStatus
|
|
36
37
|
from dbos._utils import INTERNAL_QUEUE_NAME, GlobalParams
|
|
37
38
|
from dbos._workflow_commands import fork_workflow, list_queued_workflows, list_workflows
|
|
38
39
|
|
|
39
40
|
from ._classproperty import classproperty
|
|
40
41
|
from ._core import (
|
|
42
|
+
DEBOUNCER_WORKFLOW_NAME,
|
|
41
43
|
TEMP_SEND_WF_NAME,
|
|
42
44
|
WorkflowHandleAsyncPolling,
|
|
43
45
|
WorkflowHandlePolling,
|
|
@@ -390,11 +392,12 @@ class DBOS:
|
|
|
390
392
|
) -> None:
|
|
391
393
|
self.send(destination_id, message, topic)
|
|
392
394
|
|
|
393
|
-
|
|
394
|
-
|
|
395
|
-
|
|
396
|
-
|
|
397
|
-
|
|
395
|
+
decorate_workflow(self._registry, TEMP_SEND_WF_NAME, None)(send_temp_workflow)
|
|
396
|
+
|
|
397
|
+
# Register the debouncer workflow
|
|
398
|
+
decorate_workflow(self._registry, DEBOUNCER_WORKFLOW_NAME, None)(
|
|
399
|
+
debouncer_workflow
|
|
400
|
+
)
|
|
398
401
|
|
|
399
402
|
for handler in dbos_logger.handlers:
|
|
400
403
|
handler.flush()
|
|
@@ -1297,7 +1300,7 @@ class DBOS:
|
|
|
1297
1300
|
def span(cls) -> Span:
|
|
1298
1301
|
"""Return the tracing `Span` associated with the current context."""
|
|
1299
1302
|
ctx = assert_current_dbos_context()
|
|
1300
|
-
span = ctx.
|
|
1303
|
+
span = ctx.get_current_active_span()
|
|
1301
1304
|
assert span
|
|
1302
1305
|
return span
|
|
1303
1306
|
|
dbos/_debouncer.py
ADDED
|
@@ -0,0 +1,395 @@
|
|
|
1
|
+
import asyncio
|
|
2
|
+
import math
|
|
3
|
+
import sys
|
|
4
|
+
import time
|
|
5
|
+
import types
|
|
6
|
+
import uuid
|
|
7
|
+
from typing import (
|
|
8
|
+
TYPE_CHECKING,
|
|
9
|
+
Any,
|
|
10
|
+
Callable,
|
|
11
|
+
Coroutine,
|
|
12
|
+
Dict,
|
|
13
|
+
Generic,
|
|
14
|
+
Optional,
|
|
15
|
+
Tuple,
|
|
16
|
+
TypedDict,
|
|
17
|
+
TypeVar,
|
|
18
|
+
Union,
|
|
19
|
+
)
|
|
20
|
+
|
|
21
|
+
if sys.version_info < (3, 10):
|
|
22
|
+
from typing_extensions import ParamSpec
|
|
23
|
+
else:
|
|
24
|
+
from typing import ParamSpec
|
|
25
|
+
|
|
26
|
+
from dbos._client import (
|
|
27
|
+
DBOSClient,
|
|
28
|
+
EnqueueOptions,
|
|
29
|
+
WorkflowHandleClientAsyncPolling,
|
|
30
|
+
WorkflowHandleClientPolling,
|
|
31
|
+
)
|
|
32
|
+
from dbos._context import (
|
|
33
|
+
DBOSContextEnsure,
|
|
34
|
+
SetEnqueueOptions,
|
|
35
|
+
SetWorkflowID,
|
|
36
|
+
SetWorkflowTimeout,
|
|
37
|
+
assert_current_dbos_context,
|
|
38
|
+
)
|
|
39
|
+
from dbos._core import (
|
|
40
|
+
DEBOUNCER_WORKFLOW_NAME,
|
|
41
|
+
WorkflowHandleAsyncPolling,
|
|
42
|
+
WorkflowHandlePolling,
|
|
43
|
+
)
|
|
44
|
+
from dbos._error import DBOSQueueDeduplicatedError
|
|
45
|
+
from dbos._queue import Queue
|
|
46
|
+
from dbos._registrations import get_dbos_func_name
|
|
47
|
+
from dbos._serialization import WorkflowInputs
|
|
48
|
+
from dbos._utils import INTERNAL_QUEUE_NAME
|
|
49
|
+
|
|
50
|
+
if TYPE_CHECKING:
|
|
51
|
+
from dbos._dbos import WorkflowHandle, WorkflowHandleAsync
|
|
52
|
+
|
|
53
|
+
P = ParamSpec("P") # A generic type for workflow parameters
|
|
54
|
+
R = TypeVar("R", covariant=True) # A generic type for workflow return values
|
|
55
|
+
|
|
56
|
+
|
|
57
|
+
_DEBOUNCER_TOPIC = "DEBOUNCER_TOPIC"
|
|
58
|
+
|
|
59
|
+
|
|
60
|
+
# Options saved from the local context to pass through to the debounced function
|
|
61
|
+
class ContextOptions(TypedDict):
|
|
62
|
+
workflow_id: str
|
|
63
|
+
deduplication_id: Optional[str]
|
|
64
|
+
priority: Optional[int]
|
|
65
|
+
app_version: Optional[str]
|
|
66
|
+
workflow_timeout_sec: Optional[float]
|
|
67
|
+
|
|
68
|
+
|
|
69
|
+
# Parameters for the debouncer workflow
|
|
70
|
+
class DebouncerOptions(TypedDict):
|
|
71
|
+
workflow_name: str
|
|
72
|
+
debounce_timeout_sec: Optional[float]
|
|
73
|
+
queue_name: Optional[str]
|
|
74
|
+
|
|
75
|
+
|
|
76
|
+
# The message sent from a debounce to the debouncer workflow
|
|
77
|
+
class DebouncerMessage(TypedDict):
|
|
78
|
+
inputs: WorkflowInputs
|
|
79
|
+
message_id: str
|
|
80
|
+
debounce_period_sec: float
|
|
81
|
+
|
|
82
|
+
|
|
83
|
+
def debouncer_workflow(
|
|
84
|
+
initial_debounce_period_sec: float,
|
|
85
|
+
ctx: ContextOptions,
|
|
86
|
+
options: DebouncerOptions,
|
|
87
|
+
*args: Tuple[Any, ...],
|
|
88
|
+
**kwargs: Dict[str, Any],
|
|
89
|
+
) -> None:
|
|
90
|
+
from dbos._dbos import DBOS, _get_dbos_instance
|
|
91
|
+
|
|
92
|
+
dbos = _get_dbos_instance()
|
|
93
|
+
|
|
94
|
+
workflow_inputs: WorkflowInputs = {"args": args, "kwargs": kwargs}
|
|
95
|
+
# Every time the debounced workflow is called, a message is sent to this workflow.
|
|
96
|
+
# It waits until debounce_period_sec have passed since the last message or until
|
|
97
|
+
# debounce_timeout_sec has elapsed.
|
|
98
|
+
debounce_deadline_epoch_sec = (
|
|
99
|
+
time.time() + options["debounce_timeout_sec"]
|
|
100
|
+
if options["debounce_timeout_sec"]
|
|
101
|
+
else math.inf
|
|
102
|
+
)
|
|
103
|
+
debounce_period_sec = initial_debounce_period_sec
|
|
104
|
+
while time.time() < debounce_deadline_epoch_sec:
|
|
105
|
+
time_until_deadline = max(debounce_deadline_epoch_sec - time.time(), 0)
|
|
106
|
+
timeout = min(debounce_period_sec, time_until_deadline)
|
|
107
|
+
message: DebouncerMessage = DBOS.recv(_DEBOUNCER_TOPIC, timeout_seconds=timeout)
|
|
108
|
+
if message is None:
|
|
109
|
+
break
|
|
110
|
+
else:
|
|
111
|
+
workflow_inputs = message["inputs"]
|
|
112
|
+
debounce_period_sec = message["debounce_period_sec"]
|
|
113
|
+
# Acknowledge receipt of the message
|
|
114
|
+
DBOS.set_event(message["message_id"], message["message_id"])
|
|
115
|
+
# After the timeout or period has elapsed, start the user workflow with the requested context parameters,
|
|
116
|
+
# either directly or on a queue.
|
|
117
|
+
with SetWorkflowID(ctx["workflow_id"]):
|
|
118
|
+
with SetWorkflowTimeout(ctx["workflow_timeout_sec"]):
|
|
119
|
+
func = dbos._registry.workflow_info_map.get(options["workflow_name"], None)
|
|
120
|
+
if not func:
|
|
121
|
+
raise Exception(
|
|
122
|
+
f"Invalid workflow name provided to debouncer: {options['workflow_name']}"
|
|
123
|
+
)
|
|
124
|
+
if options["queue_name"]:
|
|
125
|
+
queue = dbos._registry.queue_info_map.get(options["queue_name"], None)
|
|
126
|
+
if not queue:
|
|
127
|
+
raise Exception(
|
|
128
|
+
f"Invalid queue name provided to debouncer: {options['queue_name']}"
|
|
129
|
+
)
|
|
130
|
+
with SetEnqueueOptions(
|
|
131
|
+
deduplication_id=ctx["deduplication_id"],
|
|
132
|
+
priority=ctx["priority"],
|
|
133
|
+
app_version=ctx["app_version"],
|
|
134
|
+
):
|
|
135
|
+
queue.enqueue(
|
|
136
|
+
func, *workflow_inputs["args"], **workflow_inputs["kwargs"]
|
|
137
|
+
)
|
|
138
|
+
else:
|
|
139
|
+
DBOS.start_workflow(
|
|
140
|
+
func, *workflow_inputs["args"], **workflow_inputs["kwargs"]
|
|
141
|
+
)
|
|
142
|
+
|
|
143
|
+
|
|
144
|
+
class Debouncer(Generic[P, R]):
|
|
145
|
+
|
|
146
|
+
def __init__(
|
|
147
|
+
self,
|
|
148
|
+
workflow_name: str,
|
|
149
|
+
*,
|
|
150
|
+
debounce_timeout_sec: Optional[float] = None,
|
|
151
|
+
queue: Optional[Queue] = None,
|
|
152
|
+
):
|
|
153
|
+
self.func_name = workflow_name
|
|
154
|
+
self.options: DebouncerOptions = {
|
|
155
|
+
"debounce_timeout_sec": debounce_timeout_sec,
|
|
156
|
+
"queue_name": queue.name if queue else None,
|
|
157
|
+
"workflow_name": workflow_name,
|
|
158
|
+
}
|
|
159
|
+
|
|
160
|
+
@staticmethod
|
|
161
|
+
def create(
|
|
162
|
+
workflow: Callable[P, R],
|
|
163
|
+
*,
|
|
164
|
+
debounce_timeout_sec: Optional[float] = None,
|
|
165
|
+
queue: Optional[Queue] = None,
|
|
166
|
+
) -> "Debouncer[P, R]":
|
|
167
|
+
|
|
168
|
+
if isinstance(workflow, (types.MethodType)):
|
|
169
|
+
raise TypeError("Only workflow functions may be debounced, not methods")
|
|
170
|
+
return Debouncer[P, R](
|
|
171
|
+
get_dbos_func_name(workflow),
|
|
172
|
+
debounce_timeout_sec=debounce_timeout_sec,
|
|
173
|
+
queue=queue,
|
|
174
|
+
)
|
|
175
|
+
|
|
176
|
+
@staticmethod
|
|
177
|
+
def create_async(
|
|
178
|
+
workflow: Callable[P, Coroutine[Any, Any, R]],
|
|
179
|
+
*,
|
|
180
|
+
debounce_timeout_sec: Optional[float] = None,
|
|
181
|
+
queue: Optional[Queue] = None,
|
|
182
|
+
) -> "Debouncer[P, R]":
|
|
183
|
+
|
|
184
|
+
if isinstance(workflow, (types.MethodType)):
|
|
185
|
+
raise TypeError("Only workflow functions may be debounced, not methods")
|
|
186
|
+
return Debouncer[P, R](
|
|
187
|
+
get_dbos_func_name(workflow),
|
|
188
|
+
debounce_timeout_sec=debounce_timeout_sec,
|
|
189
|
+
queue=queue,
|
|
190
|
+
)
|
|
191
|
+
|
|
192
|
+
def debounce(
|
|
193
|
+
self,
|
|
194
|
+
debounce_key: str,
|
|
195
|
+
debounce_period_sec: float,
|
|
196
|
+
*args: P.args,
|
|
197
|
+
**kwargs: P.kwargs,
|
|
198
|
+
) -> "WorkflowHandle[R]":
|
|
199
|
+
from dbos._dbos import DBOS, _get_dbos_instance
|
|
200
|
+
|
|
201
|
+
dbos = _get_dbos_instance()
|
|
202
|
+
internal_queue = dbos._registry.get_internal_queue()
|
|
203
|
+
|
|
204
|
+
# Read all workflow settings from context, pass them through ContextOptions
|
|
205
|
+
# into the debouncer to apply to the user workflow, then reset the context
|
|
206
|
+
# so workflow settings aren't applied to the debouncer.
|
|
207
|
+
with DBOSContextEnsure():
|
|
208
|
+
ctx = assert_current_dbos_context()
|
|
209
|
+
|
|
210
|
+
# Deterministically generate the user workflow ID and message ID
|
|
211
|
+
def assign_debounce_ids() -> tuple[str, str]:
|
|
212
|
+
return str(uuid.uuid4()), ctx.assign_workflow_id()
|
|
213
|
+
|
|
214
|
+
message_id, user_workflow_id = dbos._sys_db.call_function_as_step(
|
|
215
|
+
assign_debounce_ids, "DBOS.assign_debounce_ids"
|
|
216
|
+
)
|
|
217
|
+
ctx.id_assigned_for_next_workflow = ""
|
|
218
|
+
ctx.is_within_set_workflow_id_block = False
|
|
219
|
+
ctxOptions: ContextOptions = {
|
|
220
|
+
"workflow_id": user_workflow_id,
|
|
221
|
+
"app_version": ctx.app_version,
|
|
222
|
+
"deduplication_id": ctx.deduplication_id,
|
|
223
|
+
"priority": ctx.priority,
|
|
224
|
+
"workflow_timeout_sec": (
|
|
225
|
+
ctx.workflow_timeout_ms / 1000.0
|
|
226
|
+
if ctx.workflow_timeout_ms
|
|
227
|
+
else None
|
|
228
|
+
),
|
|
229
|
+
}
|
|
230
|
+
while True:
|
|
231
|
+
try:
|
|
232
|
+
# Attempt to enqueue a debouncer for this workflow.
|
|
233
|
+
deduplication_id = f"{self.options['workflow_name']}-{debounce_key}"
|
|
234
|
+
with SetEnqueueOptions(deduplication_id=deduplication_id):
|
|
235
|
+
with SetWorkflowTimeout(None):
|
|
236
|
+
internal_queue.enqueue(
|
|
237
|
+
debouncer_workflow,
|
|
238
|
+
debounce_period_sec,
|
|
239
|
+
ctxOptions,
|
|
240
|
+
self.options,
|
|
241
|
+
*args,
|
|
242
|
+
**kwargs,
|
|
243
|
+
)
|
|
244
|
+
return WorkflowHandlePolling(user_workflow_id, dbos)
|
|
245
|
+
except DBOSQueueDeduplicatedError:
|
|
246
|
+
# If there is already a debouncer, send a message to it.
|
|
247
|
+
# Deterministically retrieve the ID of the debouncer
|
|
248
|
+
def get_deduplicated_workflow() -> Optional[str]:
|
|
249
|
+
return dbos._sys_db.get_deduplicated_workflow(
|
|
250
|
+
queue_name=internal_queue.name,
|
|
251
|
+
deduplication_id=deduplication_id,
|
|
252
|
+
)
|
|
253
|
+
|
|
254
|
+
dedup_wfid = dbos._sys_db.call_function_as_step(
|
|
255
|
+
get_deduplicated_workflow, "DBOS.get_deduplicated_workflow"
|
|
256
|
+
)
|
|
257
|
+
if dedup_wfid is None:
|
|
258
|
+
continue
|
|
259
|
+
else:
|
|
260
|
+
workflow_inputs: WorkflowInputs = {"args": args, "kwargs": kwargs}
|
|
261
|
+
message: DebouncerMessage = {
|
|
262
|
+
"message_id": message_id,
|
|
263
|
+
"inputs": workflow_inputs,
|
|
264
|
+
"debounce_period_sec": debounce_period_sec,
|
|
265
|
+
}
|
|
266
|
+
DBOS.send(dedup_wfid, message, _DEBOUNCER_TOPIC)
|
|
267
|
+
# Wait for the debouncer to acknowledge receipt of the message.
|
|
268
|
+
# If the message is not acknowledged, this likely means the debouncer started its workflow
|
|
269
|
+
# and exited without processing this message, so try again.
|
|
270
|
+
if not DBOS.get_event(dedup_wfid, message_id, timeout_seconds=1):
|
|
271
|
+
continue
|
|
272
|
+
# Retrieve the user workflow ID from the input to the debouncer
|
|
273
|
+
# and return a handle to it
|
|
274
|
+
dedup_workflow_input = (
|
|
275
|
+
DBOS.retrieve_workflow(dedup_wfid).get_status().input
|
|
276
|
+
)
|
|
277
|
+
assert dedup_workflow_input is not None
|
|
278
|
+
user_workflow_id = dedup_workflow_input["args"][1]["workflow_id"]
|
|
279
|
+
return WorkflowHandlePolling(user_workflow_id, dbos)
|
|
280
|
+
|
|
281
|
+
async def debounce_async(
|
|
282
|
+
self,
|
|
283
|
+
debounce_key: str,
|
|
284
|
+
debounce_period_sec: float,
|
|
285
|
+
*args: P.args,
|
|
286
|
+
**kwargs: P.kwargs,
|
|
287
|
+
) -> "WorkflowHandleAsync[R]":
|
|
288
|
+
from dbos._dbos import _get_dbos_instance
|
|
289
|
+
|
|
290
|
+
dbos = _get_dbos_instance()
|
|
291
|
+
handle = await asyncio.to_thread(
|
|
292
|
+
self.debounce, debounce_key, debounce_period_sec, *args, **kwargs
|
|
293
|
+
)
|
|
294
|
+
return WorkflowHandleAsyncPolling(handle.workflow_id, dbos)
|
|
295
|
+
|
|
296
|
+
|
|
297
|
+
class DebouncerClient:
|
|
298
|
+
|
|
299
|
+
def __init__(
|
|
300
|
+
self,
|
|
301
|
+
client: DBOSClient,
|
|
302
|
+
workflow_options: EnqueueOptions,
|
|
303
|
+
*,
|
|
304
|
+
debounce_timeout_sec: Optional[float] = None,
|
|
305
|
+
queue: Optional[Queue] = None,
|
|
306
|
+
):
|
|
307
|
+
self.workflow_options = workflow_options
|
|
308
|
+
self.debouncer_options: DebouncerOptions = {
|
|
309
|
+
"debounce_timeout_sec": debounce_timeout_sec,
|
|
310
|
+
"queue_name": queue.name if queue else None,
|
|
311
|
+
"workflow_name": workflow_options["workflow_name"],
|
|
312
|
+
}
|
|
313
|
+
self.client = client
|
|
314
|
+
|
|
315
|
+
def debounce(
|
|
316
|
+
self, debounce_key: str, debounce_period_sec: float, *args: Any, **kwargs: Any
|
|
317
|
+
) -> "WorkflowHandle[R]":
|
|
318
|
+
|
|
319
|
+
ctxOptions: ContextOptions = {
|
|
320
|
+
"workflow_id": (
|
|
321
|
+
self.workflow_options["workflow_id"]
|
|
322
|
+
if self.workflow_options.get("workflow_id")
|
|
323
|
+
else str(uuid.uuid4())
|
|
324
|
+
),
|
|
325
|
+
"app_version": self.workflow_options.get("app_version"),
|
|
326
|
+
"deduplication_id": self.workflow_options.get("deduplication_id"),
|
|
327
|
+
"priority": self.workflow_options.get("priority"),
|
|
328
|
+
"workflow_timeout_sec": self.workflow_options.get("workflow_timeout"),
|
|
329
|
+
}
|
|
330
|
+
message_id = str(uuid.uuid4())
|
|
331
|
+
while True:
|
|
332
|
+
try:
|
|
333
|
+
# Attempt to enqueue a debouncer for this workflow.
|
|
334
|
+
deduplication_id = (
|
|
335
|
+
f"{self.debouncer_options['workflow_name']}-{debounce_key}"
|
|
336
|
+
)
|
|
337
|
+
debouncer_options: EnqueueOptions = {
|
|
338
|
+
"workflow_name": DEBOUNCER_WORKFLOW_NAME,
|
|
339
|
+
"queue_name": INTERNAL_QUEUE_NAME,
|
|
340
|
+
"deduplication_id": deduplication_id,
|
|
341
|
+
}
|
|
342
|
+
self.client.enqueue(
|
|
343
|
+
debouncer_options,
|
|
344
|
+
debounce_period_sec,
|
|
345
|
+
ctxOptions,
|
|
346
|
+
self.debouncer_options,
|
|
347
|
+
*args,
|
|
348
|
+
**kwargs,
|
|
349
|
+
)
|
|
350
|
+
return WorkflowHandleClientPolling[R](
|
|
351
|
+
ctxOptions["workflow_id"], self.client._sys_db
|
|
352
|
+
)
|
|
353
|
+
except DBOSQueueDeduplicatedError:
|
|
354
|
+
# If there is already a debouncer, send a message to it.
|
|
355
|
+
dedup_wfid = self.client._sys_db.get_deduplicated_workflow(
|
|
356
|
+
queue_name=INTERNAL_QUEUE_NAME,
|
|
357
|
+
deduplication_id=deduplication_id,
|
|
358
|
+
)
|
|
359
|
+
if dedup_wfid is None:
|
|
360
|
+
continue
|
|
361
|
+
else:
|
|
362
|
+
workflow_inputs: WorkflowInputs = {"args": args, "kwargs": kwargs}
|
|
363
|
+
message: DebouncerMessage = {
|
|
364
|
+
"message_id": message_id,
|
|
365
|
+
"inputs": workflow_inputs,
|
|
366
|
+
"debounce_period_sec": debounce_period_sec,
|
|
367
|
+
}
|
|
368
|
+
self.client.send(dedup_wfid, message, _DEBOUNCER_TOPIC)
|
|
369
|
+
# Wait for the debouncer to acknowledge receipt of the message.
|
|
370
|
+
# If the message is not acknowledged, this likely means the debouncer started its workflow
|
|
371
|
+
# and exited without processing this message, so try again.
|
|
372
|
+
if not self.client.get_event(
|
|
373
|
+
dedup_wfid, message_id, timeout_seconds=1
|
|
374
|
+
):
|
|
375
|
+
continue
|
|
376
|
+
# Retrieve the user workflow ID from the input to the debouncer
|
|
377
|
+
# and return a handle to it
|
|
378
|
+
dedup_workflow_input = (
|
|
379
|
+
self.client.retrieve_workflow(dedup_wfid).get_status().input
|
|
380
|
+
)
|
|
381
|
+
assert dedup_workflow_input is not None
|
|
382
|
+
user_workflow_id = dedup_workflow_input["args"][1]["workflow_id"]
|
|
383
|
+
return WorkflowHandleClientPolling[R](
|
|
384
|
+
user_workflow_id, self.client._sys_db
|
|
385
|
+
)
|
|
386
|
+
|
|
387
|
+
async def debounce_async(
|
|
388
|
+
self, deboucne_key: str, debounce_period_sec: float, *args: Any, **kwargs: Any
|
|
389
|
+
) -> "WorkflowHandleAsync[R]":
|
|
390
|
+
handle: "WorkflowHandle[R]" = await asyncio.to_thread(
|
|
391
|
+
self.debounce, deboucne_key, debounce_period_sec, *args, **kwargs
|
|
392
|
+
)
|
|
393
|
+
return WorkflowHandleClientAsyncPolling[R](
|
|
394
|
+
handle.workflow_id, self.client._sys_db
|
|
395
|
+
)
|
dbos/_logger.py
CHANGED
|
@@ -39,7 +39,7 @@ class DBOSLogTransformer(logging.Filter):
|
|
|
39
39
|
if ctx:
|
|
40
40
|
if ctx.is_within_workflow():
|
|
41
41
|
record.operationUUID = ctx.workflow_id
|
|
42
|
-
span = ctx.
|
|
42
|
+
span = ctx.get_current_active_span()
|
|
43
43
|
if span:
|
|
44
44
|
trace_id = format_trace_id(span.get_span_context().trace_id)
|
|
45
45
|
record.traceId = trace_id
|
dbos/_outcome.py
CHANGED
|
@@ -2,9 +2,24 @@ import asyncio
|
|
|
2
2
|
import contextlib
|
|
3
3
|
import inspect
|
|
4
4
|
import time
|
|
5
|
-
from typing import
|
|
5
|
+
from typing import (
|
|
6
|
+
TYPE_CHECKING,
|
|
7
|
+
Any,
|
|
8
|
+
Callable,
|
|
9
|
+
Coroutine,
|
|
10
|
+
Optional,
|
|
11
|
+
Protocol,
|
|
12
|
+
TypeVar,
|
|
13
|
+
Union,
|
|
14
|
+
cast,
|
|
15
|
+
)
|
|
6
16
|
|
|
7
17
|
from dbos._context import EnterDBOSStepRetry
|
|
18
|
+
from dbos._error import DBOSException
|
|
19
|
+
from dbos._registrations import get_dbos_func_name
|
|
20
|
+
|
|
21
|
+
if TYPE_CHECKING:
|
|
22
|
+
from ._dbos import DBOS
|
|
8
23
|
|
|
9
24
|
T = TypeVar("T")
|
|
10
25
|
R = TypeVar("R")
|
|
@@ -24,10 +39,15 @@ class NoResult:
|
|
|
24
39
|
class Outcome(Protocol[T]):
|
|
25
40
|
|
|
26
41
|
def wrap(
|
|
27
|
-
self,
|
|
42
|
+
self,
|
|
43
|
+
before: Callable[[], Callable[[Callable[[], T]], R]],
|
|
44
|
+
*,
|
|
45
|
+
dbos: Optional["DBOS"] = None,
|
|
28
46
|
) -> "Outcome[R]": ...
|
|
29
47
|
|
|
30
|
-
def then(
|
|
48
|
+
def then(
|
|
49
|
+
self, next: Callable[[Callable[[], T]], R], *, dbos: Optional["DBOS"] = None
|
|
50
|
+
) -> "Outcome[R]": ...
|
|
31
51
|
|
|
32
52
|
def also(
|
|
33
53
|
self, cm: contextlib.AbstractContextManager[Any, bool]
|
|
@@ -41,7 +61,10 @@ class Outcome(Protocol[T]):
|
|
|
41
61
|
) -> "Outcome[T]": ...
|
|
42
62
|
|
|
43
63
|
def intercept(
|
|
44
|
-
self,
|
|
64
|
+
self,
|
|
65
|
+
interceptor: Callable[[], Union[NoResult, T]],
|
|
66
|
+
*,
|
|
67
|
+
dbos: Optional["DBOS"] = None,
|
|
45
68
|
) -> "Outcome[T]": ...
|
|
46
69
|
|
|
47
70
|
def __call__(self) -> Union[T, Coroutine[Any, Any, T]]: ...
|
|
@@ -63,11 +86,17 @@ class Immediate(Outcome[T]):
|
|
|
63
86
|
def __init__(self, func: Callable[[], T]):
|
|
64
87
|
self._func = func
|
|
65
88
|
|
|
66
|
-
def then(
|
|
89
|
+
def then(
|
|
90
|
+
self,
|
|
91
|
+
next: Callable[[Callable[[], T]], R],
|
|
92
|
+
dbos: Optional["DBOS"] = None,
|
|
93
|
+
) -> "Immediate[R]":
|
|
67
94
|
return Immediate(lambda: next(self._func))
|
|
68
95
|
|
|
69
96
|
def wrap(
|
|
70
|
-
self,
|
|
97
|
+
self,
|
|
98
|
+
before: Callable[[], Callable[[Callable[[], T]], R]],
|
|
99
|
+
dbos: Optional["DBOS"] = None,
|
|
71
100
|
) -> "Immediate[R]":
|
|
72
101
|
return Immediate(lambda: before()(self._func))
|
|
73
102
|
|
|
@@ -79,7 +108,10 @@ class Immediate(Outcome[T]):
|
|
|
79
108
|
return intercepted if not isinstance(intercepted, NoResult) else func()
|
|
80
109
|
|
|
81
110
|
def intercept(
|
|
82
|
-
self,
|
|
111
|
+
self,
|
|
112
|
+
interceptor: Callable[[], Union[NoResult, T]],
|
|
113
|
+
*,
|
|
114
|
+
dbos: Optional["DBOS"] = None,
|
|
83
115
|
) -> "Immediate[T]":
|
|
84
116
|
return Immediate[T](lambda: Immediate._intercept(self._func, interceptor))
|
|
85
117
|
|
|
@@ -142,7 +174,12 @@ class Pending(Outcome[T]):
|
|
|
142
174
|
async def _wrap(
|
|
143
175
|
func: Callable[[], Coroutine[Any, Any, T]],
|
|
144
176
|
before: Callable[[], Callable[[Callable[[], T]], R]],
|
|
177
|
+
*,
|
|
178
|
+
dbos: Optional["DBOS"] = None,
|
|
145
179
|
) -> R:
|
|
180
|
+
# Make sure the executor pool is configured correctly
|
|
181
|
+
if dbos is not None:
|
|
182
|
+
await dbos._configure_asyncio_thread_pool()
|
|
146
183
|
after = await asyncio.to_thread(before)
|
|
147
184
|
try:
|
|
148
185
|
value = await func()
|
|
@@ -151,12 +188,17 @@ class Pending(Outcome[T]):
|
|
|
151
188
|
return await asyncio.to_thread(after, lambda: Pending._raise(exp))
|
|
152
189
|
|
|
153
190
|
def wrap(
|
|
154
|
-
self,
|
|
191
|
+
self,
|
|
192
|
+
before: Callable[[], Callable[[Callable[[], T]], R]],
|
|
193
|
+
*,
|
|
194
|
+
dbos: Optional["DBOS"] = None,
|
|
155
195
|
) -> "Pending[R]":
|
|
156
|
-
return Pending[R](lambda: Pending._wrap(self._func, before))
|
|
196
|
+
return Pending[R](lambda: Pending._wrap(self._func, before, dbos=dbos))
|
|
157
197
|
|
|
158
|
-
def then(
|
|
159
|
-
|
|
198
|
+
def then(
|
|
199
|
+
self, next: Callable[[Callable[[], T]], R], *, dbos: Optional["DBOS"] = None
|
|
200
|
+
) -> "Pending[R]":
|
|
201
|
+
return Pending[R](lambda: Pending._wrap(self._func, lambda: next, dbos=dbos))
|
|
160
202
|
|
|
161
203
|
@staticmethod
|
|
162
204
|
async def _also( # type: ignore
|
|
@@ -173,12 +215,24 @@ class Pending(Outcome[T]):
|
|
|
173
215
|
async def _intercept(
|
|
174
216
|
func: Callable[[], Coroutine[Any, Any, T]],
|
|
175
217
|
interceptor: Callable[[], Union[NoResult, T]],
|
|
218
|
+
*,
|
|
219
|
+
dbos: Optional["DBOS"] = None,
|
|
176
220
|
) -> T:
|
|
221
|
+
# Make sure the executor pool is configured correctly
|
|
222
|
+
if dbos is not None:
|
|
223
|
+
await dbos._configure_asyncio_thread_pool()
|
|
177
224
|
intercepted = await asyncio.to_thread(interceptor)
|
|
178
225
|
return intercepted if not isinstance(intercepted, NoResult) else await func()
|
|
179
226
|
|
|
180
|
-
def intercept(
|
|
181
|
-
|
|
227
|
+
def intercept(
|
|
228
|
+
self,
|
|
229
|
+
interceptor: Callable[[], Union[NoResult, T]],
|
|
230
|
+
*,
|
|
231
|
+
dbos: Optional["DBOS"] = None,
|
|
232
|
+
) -> "Pending[T]":
|
|
233
|
+
return Pending[T](
|
|
234
|
+
lambda: Pending._intercept(self._func, interceptor, dbos=dbos)
|
|
235
|
+
)
|
|
182
236
|
|
|
183
237
|
@staticmethod
|
|
184
238
|
async def _retry(
|
dbos/_serialization.py
CHANGED
|
@@ -12,8 +12,13 @@ class WorkflowInputs(TypedDict):
|
|
|
12
12
|
|
|
13
13
|
|
|
14
14
|
def _validate_item(data: Any) -> None:
|
|
15
|
-
if isinstance(data, (types.
|
|
16
|
-
raise TypeError("Serialized data item should not be a
|
|
15
|
+
if isinstance(data, (types.MethodType)):
|
|
16
|
+
raise TypeError("Serialized data item should not be a class method")
|
|
17
|
+
if isinstance(data, (types.FunctionType)):
|
|
18
|
+
if jsonpickle.decode(jsonpickle.encode(data, unpicklable=True)) is None:
|
|
19
|
+
raise TypeError(
|
|
20
|
+
"Serialized function should be defined at the top level of a module"
|
|
21
|
+
)
|
|
17
22
|
|
|
18
23
|
|
|
19
24
|
def serialize(data: Any) -> str:
|
dbos/_sys_db.py
CHANGED
|
@@ -740,6 +740,33 @@ class SystemDatabase(ABC):
|
|
|
740
740
|
}
|
|
741
741
|
return status
|
|
742
742
|
|
|
743
|
+
@db_retry()
|
|
744
|
+
def get_deduplicated_workflow(
|
|
745
|
+
self, queue_name: str, deduplication_id: str
|
|
746
|
+
) -> Optional[str]:
|
|
747
|
+
"""
|
|
748
|
+
Get the workflow ID associated with a given queue name and deduplication ID.
|
|
749
|
+
|
|
750
|
+
Args:
|
|
751
|
+
queue_name: The name of the queue
|
|
752
|
+
deduplication_id: The deduplication ID
|
|
753
|
+
|
|
754
|
+
Returns:
|
|
755
|
+
The workflow UUID if found, None otherwise
|
|
756
|
+
"""
|
|
757
|
+
with self.engine.begin() as c:
|
|
758
|
+
row = c.execute(
|
|
759
|
+
sa.select(SystemSchema.workflow_status.c.workflow_uuid).where(
|
|
760
|
+
SystemSchema.workflow_status.c.queue_name == queue_name,
|
|
761
|
+
SystemSchema.workflow_status.c.deduplication_id == deduplication_id,
|
|
762
|
+
)
|
|
763
|
+
).fetchone()
|
|
764
|
+
|
|
765
|
+
if row is None:
|
|
766
|
+
return None
|
|
767
|
+
workflow_id: str = row[0]
|
|
768
|
+
return workflow_id
|
|
769
|
+
|
|
743
770
|
@db_retry()
|
|
744
771
|
def await_workflow_result(self, workflow_id: str) -> Any:
|
|
745
772
|
while True:
|
|
@@ -1221,7 +1248,10 @@ class SystemDatabase(ABC):
|
|
|
1221
1248
|
def check_child_workflow(
|
|
1222
1249
|
self, workflow_uuid: str, function_id: int
|
|
1223
1250
|
) -> Optional[str]:
|
|
1224
|
-
sql = sa.select(
|
|
1251
|
+
sql = sa.select(
|
|
1252
|
+
SystemSchema.operation_outputs.c.child_workflow_id,
|
|
1253
|
+
SystemSchema.operation_outputs.c.error,
|
|
1254
|
+
).where(
|
|
1225
1255
|
SystemSchema.operation_outputs.c.workflow_uuid == workflow_uuid,
|
|
1226
1256
|
SystemSchema.operation_outputs.c.function_id == function_id,
|
|
1227
1257
|
)
|
|
@@ -1233,7 +1263,10 @@ class SystemDatabase(ABC):
|
|
|
1233
1263
|
|
|
1234
1264
|
if row is None:
|
|
1235
1265
|
return None
|
|
1236
|
-
|
|
1266
|
+
elif row[1]:
|
|
1267
|
+
raise _serialization.deserialize_exception(row[1])
|
|
1268
|
+
else:
|
|
1269
|
+
return str(row[0])
|
|
1237
1270
|
|
|
1238
1271
|
@db_retry()
|
|
1239
1272
|
def send(
|
|
@@ -1901,8 +1934,13 @@ class SystemDatabase(ABC):
|
|
|
1901
1934
|
)
|
|
1902
1935
|
if self._debug_mode and recorded_output is None:
|
|
1903
1936
|
raise Exception(
|
|
1904
|
-
"called
|
|
1937
|
+
"called writeStream in debug mode without a previous execution"
|
|
1905
1938
|
)
|
|
1939
|
+
if recorded_output is not None:
|
|
1940
|
+
dbos_logger.debug(
|
|
1941
|
+
f"Replaying writeStream, id: {function_id}, key: {key}"
|
|
1942
|
+
)
|
|
1943
|
+
return
|
|
1906
1944
|
# Find the maximum offset for this workflow_uuid and key combination
|
|
1907
1945
|
max_offset_result = c.execute(
|
|
1908
1946
|
sa.select(sa.func.max(SystemSchema.streams.c.offset)).where(
|
dbos/_tracer.py
CHANGED
|
@@ -77,5 +77,12 @@ class DBOSTracer:
|
|
|
77
77
|
def end_span(self, span: Span) -> None:
|
|
78
78
|
span.end()
|
|
79
79
|
|
|
80
|
+
def get_current_span(self) -> Optional[Span]:
|
|
81
|
+
# Return the current active span if any. It might not be a DBOS span.
|
|
82
|
+
span = trace.get_current_span()
|
|
83
|
+
if span.get_span_context().is_valid:
|
|
84
|
+
return span
|
|
85
|
+
return None
|
|
86
|
+
|
|
80
87
|
|
|
81
88
|
dbos_tracer = DBOSTracer()
|
|
@@ -1,8 +1,8 @@
|
|
|
1
|
-
dbos-1.
|
|
2
|
-
dbos-1.
|
|
3
|
-
dbos-1.
|
|
4
|
-
dbos-1.
|
|
5
|
-
dbos/__init__.py,sha256=
|
|
1
|
+
dbos-1.14.0.dist-info/METADATA,sha256=5Fr-NGWHu3DheyFCoQnaLRzltjBocycWHP6sG6fZLU8,13266
|
|
2
|
+
dbos-1.14.0.dist-info/WHEEL,sha256=9P2ygRxDrTJz3gsagc0Z96ukrxjr-LFBGOgv3AuKlCA,90
|
|
3
|
+
dbos-1.14.0.dist-info/entry_points.txt,sha256=_QOQ3tVfEjtjBlr1jS4sHqHya9lI2aIEIWkz8dqYp14,58
|
|
4
|
+
dbos-1.14.0.dist-info/licenses/LICENSE,sha256=VGZit_a5-kdw9WT6fY5jxAWVwGQzgLFyPWrcVVUhVNU,1067
|
|
5
|
+
dbos/__init__.py,sha256=pT4BuNLDCrIQX27vQG8NlfxX6PZRU7r9miq4thJTszU,982
|
|
6
6
|
dbos/__main__.py,sha256=G7Exn-MhGrVJVDbgNlpzhfh8WMX_72t3_oJaFT9Lmt8,653
|
|
7
7
|
dbos/_admin_server.py,sha256=e8ELhcDWqR3_PNobnNgUvLGh5lzZq0yFSF6dvtzoQRI,16267
|
|
8
8
|
dbos/_alembic_migrations/env.py,sha256=38SIGVbmn_VV2x2u1aHLcPOoWgZ84eCymf3g_NljmbU,1626
|
|
@@ -24,14 +24,15 @@ dbos/_alembic_migrations/versions/eab0cc1d9a14_job_queue.py,sha256=uvhFOtqbBreCe
|
|
|
24
24
|
dbos/_alembic_migrations/versions/f4b9b32ba814_functionname_childid_op_outputs.py,sha256=m90Lc5YH0ZISSq1MyxND6oq3RZrZKrIqEsZtwJ1jWxA,1049
|
|
25
25
|
dbos/_app_db.py,sha256=GsV-uYU0QsChWwQDxnrh8_iiZ_zMQB-bsP2jPGIe2aM,16094
|
|
26
26
|
dbos/_classproperty.py,sha256=f0X-_BySzn3yFDRKB2JpCbLYQ9tLwt1XftfshvY7CBs,626
|
|
27
|
-
dbos/_client.py,sha256=
|
|
27
|
+
dbos/_client.py,sha256=NgLpGQAPN1ehn6vIto2ToIvFUtprTWdEAbixu9wFZMU,18887
|
|
28
28
|
dbos/_conductor/conductor.py,sha256=3E_hL3c9g9yWqKZkvI6KA0-ZzPMPRo06TOzT1esMiek,24114
|
|
29
29
|
dbos/_conductor/protocol.py,sha256=q3rgLxINFtWFigdOONc-4gX4vn66UmMlJQD6Kj8LnL4,7420
|
|
30
|
-
dbos/_context.py,sha256=
|
|
31
|
-
dbos/_core.py,sha256=
|
|
30
|
+
dbos/_context.py,sha256=DC0yC8feklTckClG0Nc-uSDeRuTu7emLP5NmcuJiogk,27542
|
|
31
|
+
dbos/_core.py,sha256=plF80l5Rh_bBpy5PFZy3p3ux6agmYkUgZq8e36i68F4,50443
|
|
32
32
|
dbos/_croniter.py,sha256=XHAyUyibs_59sJQfSNWkP7rqQY6_XrlfuuCxk4jYqek,47559
|
|
33
|
-
dbos/_dbos.py,sha256=
|
|
33
|
+
dbos/_dbos.py,sha256=AgkcE9YSC9KWsDUNfEhdbkfR9NjT0seZDAOunb3n61w,58201
|
|
34
34
|
dbos/_dbos_config.py,sha256=_26ktif8qAZW4Ujg6dZfLkYO7dE4CI8b3IQbw_5YkpA,25710
|
|
35
|
+
dbos/_debouncer.py,sha256=KMu64pbq7mUAY4g_9_gzP4a4FKDOv5BbLhkkh9PcBuA,15217
|
|
35
36
|
dbos/_debug.py,sha256=99j2SChWmCPAlZoDmjsJGe77tpU2LEa8E2TtLAnnh7o,1831
|
|
36
37
|
dbos/_docker_pg_helper.py,sha256=tLJXWqZ4S-ExcaPnxg_i6cVxL6ZxrYlZjaGsklY-s2I,6115
|
|
37
38
|
dbos/_error.py,sha256=GwO0Ng4d4iB52brY09-Ss6Cz_V28Xc0D0cRCzZ6XmNM,8688
|
|
@@ -40,9 +41,9 @@ dbos/_fastapi.py,sha256=D0H6TPYYTJ0LnkKn7t9sfPwPgDx6fO8AZQtvBcH3ibI,3277
|
|
|
40
41
|
dbos/_flask.py,sha256=Npnakt-a3W5OykONFRkDRnumaDhTQmA0NPdUCGRYKXE,1652
|
|
41
42
|
dbos/_kafka.py,sha256=Gm4fHWl7gYb-i5BMvwNwm5Km3z8zQpseqdMgqgFjlGI,4252
|
|
42
43
|
dbos/_kafka_message.py,sha256=NYvOXNG3Qn7bghn1pv3fg4Pbs86ILZGcK4IB-MLUNu0,409
|
|
43
|
-
dbos/_logger.py,sha256=
|
|
44
|
+
dbos/_logger.py,sha256=iS4AviQViSKiz-IYCjZLWmW9x616IA-Ms9Xtwq1dcx4,4739
|
|
44
45
|
dbos/_migration.py,sha256=wJrSTYerlkYDFYmkTqo4a7n4WaKmqXh8BdkUEzgSEQQ,10898
|
|
45
|
-
dbos/_outcome.py,sha256=
|
|
46
|
+
dbos/_outcome.py,sha256=7HvosMfEHTh1U5P6xok7kFTGLwa2lPaul0YApb3UnN4,8191
|
|
46
47
|
dbos/_queue.py,sha256=0kJTPwXy3nZ4Epzt-lHky9M9S4L31645drPGFR8fIJY,4854
|
|
47
48
|
dbos/_recovery.py,sha256=K-wlFhdf4yGRm6cUzyhcTjQUS0xp2T5rdNMLiiBErYg,2882
|
|
48
49
|
dbos/_registrations.py,sha256=bEOntObnWaBylnebr5ZpcX2hk7OVLDd1z4BvW4_y3zA,7380
|
|
@@ -51,8 +52,8 @@ dbos/_scheduler.py,sha256=CWeGVfl9h51VXfxt80y5Da_5pE8SPty_AYkfpJkkMxQ,2117
|
|
|
51
52
|
dbos/_schemas/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
52
53
|
dbos/_schemas/application_database.py,sha256=SypAS9l9EsaBHFn9FR8jmnqt01M74d9AF1AMa4m2hhI,1040
|
|
53
54
|
dbos/_schemas/system_database.py,sha256=-dAKk-_Y3vzbpLT4ei-sIrBQgFyQiwPj1enZb1TYc8I,4943
|
|
54
|
-
dbos/_serialization.py,sha256=
|
|
55
|
-
dbos/_sys_db.py,sha256=
|
|
55
|
+
dbos/_serialization.py,sha256=VOMpwuJ6IskOUEegFDPqjIoV5PoBWfH9BgtnuI1jLok,3906
|
|
56
|
+
dbos/_sys_db.py,sha256=SspVk-wYmE6xZLuyYQUclwh_AMjnkDXcog5g5WmYn7c,83036
|
|
56
57
|
dbos/_sys_db_postgres.py,sha256=WcG-f1CUzUNBGEOjqKEp6DDraN63jTnJ6CAfieCcxOs,7555
|
|
57
58
|
dbos/_sys_db_sqlite.py,sha256=xT9l-czMhLmfuu5UcnBzAyUxSFgzt3XtEWx9t_D8mZs,7361
|
|
58
59
|
dbos/_templates/dbos-db-starter/README.md,sha256=GhxhBj42wjTt1fWEtwNriHbJuKb66Vzu89G4pxNHw2g,930
|
|
@@ -65,7 +66,7 @@ dbos/_templates/dbos-db-starter/migrations/env.py.dbos,sha256=IBB_gz9RjC20HPfOTG
|
|
|
65
66
|
dbos/_templates/dbos-db-starter/migrations/script.py.mako,sha256=MEqL-2qATlST9TAOeYgscMn1uy6HUS9NFvDgl93dMj8,635
|
|
66
67
|
dbos/_templates/dbos-db-starter/migrations/versions/2024_07_31_180642_init.py,sha256=MpS7LGaJS0CpvsjhfDkp9EJqvMvVCjRPfUp4c0aE2ys,941
|
|
67
68
|
dbos/_templates/dbos-db-starter/start_postgres_docker.py,sha256=lQVLlYO5YkhGPEgPqwGc7Y8uDKse9HsWv5fynJEFJHM,1681
|
|
68
|
-
dbos/_tracer.py,sha256=
|
|
69
|
+
dbos/_tracer.py,sha256=1MtRa0bS3ZfpZN3dw-O57_M1lc76WYK2bAThLWW2TSc,3408
|
|
69
70
|
dbos/_utils.py,sha256=ZdoM1MDbHnlJrh31zfhp3iX62bAxK1kyvMwXnltC_84,1779
|
|
70
71
|
dbos/_workflow_commands.py,sha256=EmmAaQfRWeOZm_WPTznuU-O3he3jiSzzT9VpYrhxugE,4835
|
|
71
72
|
dbos/cli/_github_init.py,sha256=Y_bDF9gfO2jB1id4FV5h1oIxEJRWyqVjhb7bNEa5nQ0,3224
|
|
@@ -75,4 +76,4 @@ dbos/cli/migration.py,sha256=5GiyagLZkyVvDz3StYxtFdkFoKFCmh6eSXjzsIGhZ_A,3330
|
|
|
75
76
|
dbos/dbos-config.schema.json,sha256=LyUT1DOTaAwOP6suxQGS5KemVIqXGPyu_q7Hbo0neA8,6192
|
|
76
77
|
dbos/py.typed,sha256=QfzXT1Ktfk3Rj84akygc7_42z0lRpCq0Ilh8OXI6Zas,44
|
|
77
78
|
version/__init__.py,sha256=L4sNxecRuqdtSFdpUGX3TtBi9KL3k7YsZVIvv-fv9-A,1678
|
|
78
|
-
dbos-1.
|
|
79
|
+
dbos-1.14.0.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|