dbos 0.25.1__py3-none-any.whl → 0.26.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- dbos/__init__.py +1 -2
- dbos/_admin_server.py +56 -6
- dbos/_app_db.py +135 -8
- dbos/_client.py +175 -15
- dbos/_conductor/conductor.py +2 -1
- dbos/_conductor/protocol.py +1 -2
- dbos/_context.py +62 -0
- dbos/_core.py +115 -65
- dbos/_dbos.py +152 -106
- dbos/_dbos_config.py +53 -67
- dbos/_debug.py +1 -1
- dbos/_docker_pg_helper.py +191 -0
- dbos/_error.py +61 -15
- dbos/_event_loop.py +67 -0
- dbos/_kafka.py +1 -1
- dbos/_migrations/versions/83f3732ae8e7_workflow_timeout.py +44 -0
- dbos/_queue.py +2 -1
- dbos/_recovery.py +1 -1
- dbos/_registrations.py +20 -5
- dbos/_scheduler.py +1 -1
- dbos/_schemas/application_database.py +1 -0
- dbos/_schemas/system_database.py +3 -1
- dbos/_sys_db.py +533 -130
- dbos/_utils.py +2 -0
- dbos/_workflow_commands.py +49 -104
- dbos/cli/cli.py +70 -4
- dbos/dbos-config.schema.json +26 -21
- {dbos-0.25.1.dist-info → dbos-0.26.0.dist-info}/METADATA +1 -1
- {dbos-0.25.1.dist-info → dbos-0.26.0.dist-info}/RECORD +32 -33
- dbos/_cloudutils/authentication.py +0 -163
- dbos/_cloudutils/cloudutils.py +0 -254
- dbos/_cloudutils/databases.py +0 -241
- dbos/_db_wizard.py +0 -220
- {dbos-0.25.1.dist-info → dbos-0.26.0.dist-info}/WHEEL +0 -0
- {dbos-0.25.1.dist-info → dbos-0.26.0.dist-info}/entry_points.txt +0 -0
- {dbos-0.25.1.dist-info → dbos-0.26.0.dist-info}/licenses/LICENSE +0 -0
dbos/_context.py
CHANGED
|
@@ -93,6 +93,11 @@ class DBOSContext:
|
|
|
93
93
|
self.assumed_role: Optional[str] = None
|
|
94
94
|
self.step_status: Optional[StepStatus] = None
|
|
95
95
|
|
|
96
|
+
# A user-specified workflow timeout. Takes priority over a propagated deadline.
|
|
97
|
+
self.workflow_timeout_ms: Optional[int] = None
|
|
98
|
+
# A propagated workflow deadline.
|
|
99
|
+
self.workflow_deadline_epoch_ms: Optional[int] = None
|
|
100
|
+
|
|
96
101
|
def create_child(self) -> DBOSContext:
|
|
97
102
|
rv = DBOSContext()
|
|
98
103
|
rv.logger = self.logger
|
|
@@ -360,11 +365,60 @@ class SetWorkflowID:
|
|
|
360
365
|
return False # Did not handle
|
|
361
366
|
|
|
362
367
|
|
|
368
|
+
class SetWorkflowTimeout:
|
|
369
|
+
"""
|
|
370
|
+
Set the workflow timeout (in seconds) to be used for the enclosed workflow invocations.
|
|
371
|
+
|
|
372
|
+
Typical Usage
|
|
373
|
+
```
|
|
374
|
+
with SetWorkflowTimeout(<timeout in seconds>):
|
|
375
|
+
result = workflow_function(...)
|
|
376
|
+
```
|
|
377
|
+
"""
|
|
378
|
+
|
|
379
|
+
def __init__(self, workflow_timeout_sec: Optional[float]) -> None:
|
|
380
|
+
if workflow_timeout_sec and not workflow_timeout_sec > 0:
|
|
381
|
+
raise Exception(
|
|
382
|
+
f"Invalid workflow timeout {workflow_timeout_sec}. Timeouts must be positive."
|
|
383
|
+
)
|
|
384
|
+
self.created_ctx = False
|
|
385
|
+
self.workflow_timeout_ms = (
|
|
386
|
+
int(workflow_timeout_sec * 1000)
|
|
387
|
+
if workflow_timeout_sec is not None
|
|
388
|
+
else None
|
|
389
|
+
)
|
|
390
|
+
self.saved_workflow_timeout: Optional[int] = None
|
|
391
|
+
|
|
392
|
+
def __enter__(self) -> SetWorkflowTimeout:
|
|
393
|
+
# Code to create a basic context
|
|
394
|
+
ctx = get_local_dbos_context()
|
|
395
|
+
if ctx is None:
|
|
396
|
+
self.created_ctx = True
|
|
397
|
+
_set_local_dbos_context(DBOSContext())
|
|
398
|
+
ctx = assert_current_dbos_context()
|
|
399
|
+
self.saved_workflow_timeout = ctx.workflow_timeout_ms
|
|
400
|
+
ctx.workflow_timeout_ms = self.workflow_timeout_ms
|
|
401
|
+
return self
|
|
402
|
+
|
|
403
|
+
def __exit__(
|
|
404
|
+
self,
|
|
405
|
+
exc_type: Optional[Type[BaseException]],
|
|
406
|
+
exc_value: Optional[BaseException],
|
|
407
|
+
traceback: Optional[TracebackType],
|
|
408
|
+
) -> Literal[False]:
|
|
409
|
+
assert_current_dbos_context().workflow_timeout_ms = self.saved_workflow_timeout
|
|
410
|
+
# Code to clean up the basic context if we created it
|
|
411
|
+
if self.created_ctx:
|
|
412
|
+
_clear_local_dbos_context()
|
|
413
|
+
return False # Did not handle
|
|
414
|
+
|
|
415
|
+
|
|
363
416
|
class EnterDBOSWorkflow(AbstractContextManager[DBOSContext, Literal[False]]):
|
|
364
417
|
def __init__(self, attributes: TracedAttributes) -> None:
|
|
365
418
|
self.created_ctx = False
|
|
366
419
|
self.attributes = attributes
|
|
367
420
|
self.is_temp_workflow = attributes["name"] == "temp_wf"
|
|
421
|
+
self.saved_workflow_timeout: Optional[int] = None
|
|
368
422
|
|
|
369
423
|
def __enter__(self) -> DBOSContext:
|
|
370
424
|
# Code to create a basic context
|
|
@@ -374,6 +428,10 @@ class EnterDBOSWorkflow(AbstractContextManager[DBOSContext, Literal[False]]):
|
|
|
374
428
|
ctx = DBOSContext()
|
|
375
429
|
_set_local_dbos_context(ctx)
|
|
376
430
|
assert not ctx.is_within_workflow()
|
|
431
|
+
# Unset the workflow_timeout_ms context var so it is not applied to this
|
|
432
|
+
# workflow's children (instead we propagate the deadline)
|
|
433
|
+
self.saved_workflow_timeout = ctx.workflow_timeout_ms
|
|
434
|
+
ctx.workflow_timeout_ms = None
|
|
377
435
|
ctx.start_workflow(
|
|
378
436
|
None, self.attributes, self.is_temp_workflow
|
|
379
437
|
) # Will get from the context's next workflow ID
|
|
@@ -388,6 +446,10 @@ class EnterDBOSWorkflow(AbstractContextManager[DBOSContext, Literal[False]]):
|
|
|
388
446
|
ctx = assert_current_dbos_context()
|
|
389
447
|
assert ctx.is_within_workflow()
|
|
390
448
|
ctx.end_workflow(exc_value, self.is_temp_workflow)
|
|
449
|
+
# Restore the saved workflow timeout
|
|
450
|
+
ctx.workflow_timeout_ms = self.saved_workflow_timeout
|
|
451
|
+
# Clear any propagating timeout
|
|
452
|
+
ctx.workflow_deadline_epoch_ms = None
|
|
391
453
|
# Code to clean up the basic context if we created it
|
|
392
454
|
if self.created_ctx:
|
|
393
455
|
_clear_local_dbos_context()
|
dbos/_core.py
CHANGED
|
@@ -3,6 +3,7 @@ import functools
|
|
|
3
3
|
import inspect
|
|
4
4
|
import json
|
|
5
5
|
import sys
|
|
6
|
+
import threading
|
|
6
7
|
import time
|
|
7
8
|
import traceback
|
|
8
9
|
from concurrent.futures import Future
|
|
@@ -14,11 +15,9 @@ from typing import (
|
|
|
14
15
|
Coroutine,
|
|
15
16
|
Generic,
|
|
16
17
|
Optional,
|
|
17
|
-
Tuple,
|
|
18
18
|
TypeVar,
|
|
19
19
|
Union,
|
|
20
20
|
cast,
|
|
21
|
-
overload,
|
|
22
21
|
)
|
|
23
22
|
|
|
24
23
|
from dbos._outcome import Immediate, NoResult, Outcome, Pending
|
|
@@ -52,13 +51,13 @@ from ._error import (
|
|
|
52
51
|
DBOSMaxStepRetriesExceeded,
|
|
53
52
|
DBOSNonExistentWorkflowError,
|
|
54
53
|
DBOSRecoveryError,
|
|
54
|
+
DBOSUnexpectedStepError,
|
|
55
55
|
DBOSWorkflowCancelledError,
|
|
56
56
|
DBOSWorkflowConflictIDError,
|
|
57
57
|
DBOSWorkflowFunctionNotFoundError,
|
|
58
58
|
)
|
|
59
59
|
from ._registrations import (
|
|
60
60
|
DEFAULT_MAX_RECOVERY_ATTEMPTS,
|
|
61
|
-
DBOSFuncInfo,
|
|
62
61
|
get_config_name,
|
|
63
62
|
get_dbos_class_name,
|
|
64
63
|
get_dbos_func_name,
|
|
@@ -74,6 +73,7 @@ from ._serialization import WorkflowInputs
|
|
|
74
73
|
from ._sys_db import (
|
|
75
74
|
GetEventWorkflowContext,
|
|
76
75
|
OperationResultInternal,
|
|
76
|
+
WorkflowStatus,
|
|
77
77
|
WorkflowStatusInternal,
|
|
78
78
|
WorkflowStatusString,
|
|
79
79
|
)
|
|
@@ -86,7 +86,6 @@ if TYPE_CHECKING:
|
|
|
86
86
|
DBOSRegistry,
|
|
87
87
|
IsolationLevel,
|
|
88
88
|
)
|
|
89
|
-
from ._workflow_commands import WorkflowStatus
|
|
90
89
|
|
|
91
90
|
from sqlalchemy.exc import DBAPIError, InvalidRequestError
|
|
92
91
|
|
|
@@ -118,7 +117,7 @@ class WorkflowHandleFuture(Generic[R]):
|
|
|
118
117
|
self.dbos._sys_db.record_get_result(self.workflow_id, serialized_r, None)
|
|
119
118
|
return r
|
|
120
119
|
|
|
121
|
-
def get_status(self) ->
|
|
120
|
+
def get_status(self) -> WorkflowStatus:
|
|
122
121
|
stat = self.dbos.get_workflow_status(self.workflow_id)
|
|
123
122
|
if stat is None:
|
|
124
123
|
raise DBOSNonExistentWorkflowError(self.workflow_id)
|
|
@@ -145,7 +144,7 @@ class WorkflowHandlePolling(Generic[R]):
|
|
|
145
144
|
self.dbos._sys_db.record_get_result(self.workflow_id, serialized_r, None)
|
|
146
145
|
return r
|
|
147
146
|
|
|
148
|
-
def get_status(self) ->
|
|
147
|
+
def get_status(self) -> WorkflowStatus:
|
|
149
148
|
stat = self.dbos.get_workflow_status(self.workflow_id)
|
|
150
149
|
if stat is None:
|
|
151
150
|
raise DBOSNonExistentWorkflowError(self.workflow_id)
|
|
@@ -180,7 +179,7 @@ class WorkflowHandleAsyncTask(Generic[R]):
|
|
|
180
179
|
)
|
|
181
180
|
return r
|
|
182
181
|
|
|
183
|
-
async def get_status(self) ->
|
|
182
|
+
async def get_status(self) -> WorkflowStatus:
|
|
184
183
|
stat = await asyncio.to_thread(self.dbos.get_workflow_status, self.workflow_id)
|
|
185
184
|
if stat is None:
|
|
186
185
|
raise DBOSNonExistentWorkflowError(self.workflow_id)
|
|
@@ -216,7 +215,7 @@ class WorkflowHandleAsyncPolling(Generic[R]):
|
|
|
216
215
|
)
|
|
217
216
|
return r
|
|
218
217
|
|
|
219
|
-
async def get_status(self) ->
|
|
218
|
+
async def get_status(self) -> WorkflowStatus:
|
|
220
219
|
stat = await asyncio.to_thread(self.dbos.get_workflow_status, self.workflow_id)
|
|
221
220
|
if stat is None:
|
|
222
221
|
raise DBOSNonExistentWorkflowError(self.workflow_id)
|
|
@@ -226,19 +225,30 @@ class WorkflowHandleAsyncPolling(Generic[R]):
|
|
|
226
225
|
def _init_workflow(
|
|
227
226
|
dbos: "DBOS",
|
|
228
227
|
ctx: DBOSContext,
|
|
228
|
+
*,
|
|
229
229
|
inputs: WorkflowInputs,
|
|
230
230
|
wf_name: str,
|
|
231
231
|
class_name: Optional[str],
|
|
232
232
|
config_name: Optional[str],
|
|
233
|
-
|
|
234
|
-
|
|
235
|
-
|
|
233
|
+
queue: Optional[str],
|
|
234
|
+
workflow_timeout_ms: Optional[int],
|
|
235
|
+
workflow_deadline_epoch_ms: Optional[int],
|
|
236
|
+
max_recovery_attempts: Optional[int],
|
|
236
237
|
) -> WorkflowStatusInternal:
|
|
237
238
|
wfid = (
|
|
238
239
|
ctx.workflow_id
|
|
239
240
|
if len(ctx.workflow_id) > 0
|
|
240
241
|
else ctx.id_assigned_for_next_workflow
|
|
241
242
|
)
|
|
243
|
+
|
|
244
|
+
# In debug mode, just return the existing status
|
|
245
|
+
if dbos.debug_mode:
|
|
246
|
+
get_status_result = dbos._sys_db.get_workflow_status(wfid)
|
|
247
|
+
if get_status_result is None:
|
|
248
|
+
raise DBOSNonExistentWorkflowError(wfid)
|
|
249
|
+
return get_status_result
|
|
250
|
+
|
|
251
|
+
# Initialize a workflow status object from the context
|
|
242
252
|
status: WorkflowStatusInternal = {
|
|
243
253
|
"workflow_uuid": wfid,
|
|
244
254
|
"status": (
|
|
@@ -266,31 +276,47 @@ def _init_workflow(
|
|
|
266
276
|
"queue_name": queue,
|
|
267
277
|
"created_at": None,
|
|
268
278
|
"updated_at": None,
|
|
279
|
+
"workflow_timeout_ms": workflow_timeout_ms,
|
|
280
|
+
"workflow_deadline_epoch_ms": workflow_deadline_epoch_ms,
|
|
269
281
|
}
|
|
270
282
|
|
|
271
283
|
# If we have a class name, the first arg is the instance and do not serialize
|
|
272
284
|
if class_name is not None:
|
|
273
285
|
inputs = {"args": inputs["args"][1:], "kwargs": inputs["kwargs"]}
|
|
274
286
|
|
|
275
|
-
|
|
276
|
-
|
|
277
|
-
|
|
278
|
-
|
|
279
|
-
|
|
280
|
-
|
|
281
|
-
else:
|
|
282
|
-
# Synchronously record the status and inputs for workflows
|
|
283
|
-
# TODO: Make this transactional (and with the queue step below)
|
|
284
|
-
wf_status = dbos._sys_db.insert_workflow_status(
|
|
285
|
-
status, max_recovery_attempts=max_recovery_attempts
|
|
286
|
-
)
|
|
287
|
+
# Synchronously record the status and inputs for workflows
|
|
288
|
+
wf_status, workflow_deadline_epoch_ms = dbos._sys_db.init_workflow(
|
|
289
|
+
status,
|
|
290
|
+
_serialization.serialize_args(inputs),
|
|
291
|
+
max_recovery_attempts=max_recovery_attempts,
|
|
292
|
+
)
|
|
287
293
|
|
|
288
|
-
|
|
289
|
-
|
|
294
|
+
if workflow_deadline_epoch_ms is not None:
|
|
295
|
+
evt = threading.Event()
|
|
296
|
+
dbos.stop_events.append(evt)
|
|
290
297
|
|
|
291
|
-
|
|
292
|
-
|
|
298
|
+
def timeout_func() -> None:
|
|
299
|
+
try:
|
|
300
|
+
assert workflow_deadline_epoch_ms is not None
|
|
301
|
+
time_to_wait_sec = (
|
|
302
|
+
workflow_deadline_epoch_ms - (time.time() * 1000)
|
|
303
|
+
) / 1000
|
|
304
|
+
if time_to_wait_sec > 0:
|
|
305
|
+
was_stopped = evt.wait(time_to_wait_sec)
|
|
306
|
+
if was_stopped:
|
|
307
|
+
return
|
|
308
|
+
dbos._sys_db.cancel_workflow(wfid)
|
|
309
|
+
except Exception as e:
|
|
310
|
+
dbos.logger.warning(
|
|
311
|
+
f"Exception in timeout thread for workflow {wfid}: {e}"
|
|
312
|
+
)
|
|
313
|
+
|
|
314
|
+
timeout_thread = threading.Thread(target=timeout_func, daemon=True)
|
|
315
|
+
timeout_thread.start()
|
|
316
|
+
dbos._background_threads.append(timeout_thread)
|
|
293
317
|
|
|
318
|
+
ctx.workflow_deadline_epoch_ms = workflow_deadline_epoch_ms
|
|
319
|
+
status["workflow_deadline_epoch_ms"] = workflow_deadline_epoch_ms
|
|
294
320
|
status["status"] = wf_status
|
|
295
321
|
return status
|
|
296
322
|
|
|
@@ -364,7 +390,9 @@ def _execute_workflow_wthread(
|
|
|
364
390
|
if isinstance(result, Immediate):
|
|
365
391
|
return cast(Immediate[R], result)()
|
|
366
392
|
else:
|
|
367
|
-
return
|
|
393
|
+
return dbos._background_event_loop.submit_coroutine(
|
|
394
|
+
cast(Pending[R], result)()
|
|
395
|
+
)
|
|
368
396
|
except Exception:
|
|
369
397
|
dbos.logger.error(
|
|
370
398
|
f"Exception encountered in asynchronous workflow: {traceback.format_exc()}"
|
|
@@ -398,9 +426,7 @@ async def _execute_workflow_async(
|
|
|
398
426
|
raise
|
|
399
427
|
|
|
400
428
|
|
|
401
|
-
def execute_workflow_by_id(
|
|
402
|
-
dbos: "DBOS", workflow_id: str, startNew: bool = False
|
|
403
|
-
) -> "WorkflowHandle[Any]":
|
|
429
|
+
def execute_workflow_by_id(dbos: "DBOS", workflow_id: str) -> "WorkflowHandle[Any]":
|
|
404
430
|
status = dbos._sys_db.get_workflow_status(workflow_id)
|
|
405
431
|
if not status:
|
|
406
432
|
raise DBOSRecoveryError(workflow_id, "Workflow status not found")
|
|
@@ -441,7 +467,7 @@ def execute_workflow_by_id(
|
|
|
441
467
|
class_object = dbos._registry.class_info_map[class_name]
|
|
442
468
|
inputs["args"] = (class_object,) + inputs["args"]
|
|
443
469
|
|
|
444
|
-
|
|
470
|
+
with SetWorkflowID(workflow_id):
|
|
445
471
|
return start_workflow(
|
|
446
472
|
dbos,
|
|
447
473
|
wf_func,
|
|
@@ -450,16 +476,6 @@ def execute_workflow_by_id(
|
|
|
450
476
|
*inputs["args"],
|
|
451
477
|
**inputs["kwargs"],
|
|
452
478
|
)
|
|
453
|
-
else:
|
|
454
|
-
with SetWorkflowID(workflow_id):
|
|
455
|
-
return start_workflow(
|
|
456
|
-
dbos,
|
|
457
|
-
wf_func,
|
|
458
|
-
status["queue_name"],
|
|
459
|
-
True,
|
|
460
|
-
*inputs["args"],
|
|
461
|
-
**inputs["kwargs"],
|
|
462
|
-
)
|
|
463
479
|
|
|
464
480
|
|
|
465
481
|
def _get_new_wf() -> tuple[str, DBOSContext]:
|
|
@@ -516,6 +532,13 @@ def start_workflow(
|
|
|
516
532
|
"kwargs": kwargs,
|
|
517
533
|
}
|
|
518
534
|
|
|
535
|
+
local_ctx = get_local_dbos_context()
|
|
536
|
+
workflow_timeout_ms, workflow_deadline_epoch_ms = _get_timeout_deadline(
|
|
537
|
+
local_ctx, queue_name
|
|
538
|
+
)
|
|
539
|
+
workflow_timeout_ms = (
|
|
540
|
+
local_ctx.workflow_timeout_ms if local_ctx is not None else None
|
|
541
|
+
)
|
|
519
542
|
new_wf_id, new_wf_ctx = _get_new_wf()
|
|
520
543
|
|
|
521
544
|
ctx = new_wf_ctx
|
|
@@ -534,8 +557,9 @@ def start_workflow(
|
|
|
534
557
|
wf_name=get_dbos_func_name(func),
|
|
535
558
|
class_name=get_dbos_class_name(fi, func, args),
|
|
536
559
|
config_name=get_config_name(fi, func, args),
|
|
537
|
-
temp_wf_type=get_temp_workflow_type(func),
|
|
538
560
|
queue=queue_name,
|
|
561
|
+
workflow_timeout_ms=workflow_timeout_ms,
|
|
562
|
+
workflow_deadline_epoch_ms=workflow_deadline_epoch_ms,
|
|
539
563
|
max_recovery_attempts=fi.max_recovery_attempts,
|
|
540
564
|
)
|
|
541
565
|
|
|
@@ -598,6 +622,10 @@ async def start_workflow_async(
|
|
|
598
622
|
"kwargs": kwargs,
|
|
599
623
|
}
|
|
600
624
|
|
|
625
|
+
local_ctx = get_local_dbos_context()
|
|
626
|
+
workflow_timeout_ms, workflow_deadline_epoch_ms = _get_timeout_deadline(
|
|
627
|
+
local_ctx, queue_name
|
|
628
|
+
)
|
|
601
629
|
new_wf_id, new_wf_ctx = _get_new_wf()
|
|
602
630
|
|
|
603
631
|
ctx = new_wf_ctx
|
|
@@ -619,8 +647,9 @@ async def start_workflow_async(
|
|
|
619
647
|
wf_name=get_dbos_func_name(func),
|
|
620
648
|
class_name=get_dbos_class_name(fi, func, args),
|
|
621
649
|
config_name=get_config_name(fi, func, args),
|
|
622
|
-
temp_wf_type=get_temp_workflow_type(func),
|
|
623
650
|
queue=queue_name,
|
|
651
|
+
workflow_timeout_ms=workflow_timeout_ms,
|
|
652
|
+
workflow_deadline_epoch_ms=workflow_deadline_epoch_ms,
|
|
624
653
|
max_recovery_attempts=fi.max_recovery_attempts,
|
|
625
654
|
)
|
|
626
655
|
|
|
@@ -668,7 +697,7 @@ else:
|
|
|
668
697
|
def workflow_wrapper(
|
|
669
698
|
dbosreg: "DBOSRegistry",
|
|
670
699
|
func: Callable[P, R],
|
|
671
|
-
max_recovery_attempts: int = DEFAULT_MAX_RECOVERY_ATTEMPTS,
|
|
700
|
+
max_recovery_attempts: Optional[int] = DEFAULT_MAX_RECOVERY_ATTEMPTS,
|
|
672
701
|
) -> Callable[P, R]:
|
|
673
702
|
func.__orig_func = func # type: ignore
|
|
674
703
|
|
|
@@ -695,6 +724,9 @@ def workflow_wrapper(
|
|
|
695
724
|
"kwargs": kwargs,
|
|
696
725
|
}
|
|
697
726
|
ctx = get_local_dbos_context()
|
|
727
|
+
workflow_timeout_ms, workflow_deadline_epoch_ms = _get_timeout_deadline(
|
|
728
|
+
ctx, queue=None
|
|
729
|
+
)
|
|
698
730
|
enterWorkflowCtxMgr = (
|
|
699
731
|
EnterDBOSChildWorkflow if ctx and ctx.is_workflow() else EnterDBOSWorkflow
|
|
700
732
|
)
|
|
@@ -732,7 +764,9 @@ def workflow_wrapper(
|
|
|
732
764
|
wf_name=get_dbos_func_name(func),
|
|
733
765
|
class_name=get_dbos_class_name(fi, func, args),
|
|
734
766
|
config_name=get_config_name(fi, func, args),
|
|
735
|
-
|
|
767
|
+
queue=None,
|
|
768
|
+
workflow_timeout_ms=workflow_timeout_ms,
|
|
769
|
+
workflow_deadline_epoch_ms=workflow_deadline_epoch_ms,
|
|
736
770
|
max_recovery_attempts=max_recovery_attempts,
|
|
737
771
|
)
|
|
738
772
|
|
|
@@ -780,7 +814,7 @@ def workflow_wrapper(
|
|
|
780
814
|
|
|
781
815
|
|
|
782
816
|
def decorate_workflow(
|
|
783
|
-
reg: "DBOSRegistry", max_recovery_attempts: int
|
|
817
|
+
reg: "DBOSRegistry", max_recovery_attempts: Optional[int]
|
|
784
818
|
) -> Callable[[Callable[P, R]], Callable[P, R]]:
|
|
785
819
|
def _workflow_decorator(func: Callable[P, R]) -> Callable[P, R]:
|
|
786
820
|
wrapped_func = workflow_wrapper(reg, func, max_recovery_attempts)
|
|
@@ -794,19 +828,23 @@ def decorate_transaction(
|
|
|
794
828
|
dbosreg: "DBOSRegistry", isolation_level: "IsolationLevel" = "SERIALIZABLE"
|
|
795
829
|
) -> Callable[[F], F]:
|
|
796
830
|
def decorator(func: F) -> F:
|
|
831
|
+
|
|
832
|
+
transaction_name = func.__qualname__
|
|
833
|
+
|
|
797
834
|
def invoke_tx(*args: Any, **kwargs: Any) -> Any:
|
|
798
835
|
if dbosreg.dbos is None:
|
|
799
836
|
raise DBOSException(
|
|
800
837
|
f"Function {func.__name__} invoked before DBOS initialized"
|
|
801
838
|
)
|
|
802
839
|
|
|
840
|
+
dbos = dbosreg.dbos
|
|
803
841
|
ctx = assert_current_dbos_context()
|
|
804
|
-
|
|
842
|
+
status = dbos._sys_db.get_workflow_status(ctx.workflow_id)
|
|
843
|
+
if status and status["status"] == WorkflowStatusString.CANCELLED.value:
|
|
805
844
|
raise DBOSWorkflowCancelledError(
|
|
806
845
|
f"Workflow {ctx.workflow_id} is cancelled. Aborting transaction {func.__name__}."
|
|
807
846
|
)
|
|
808
847
|
|
|
809
|
-
dbos = dbosreg.dbos
|
|
810
848
|
with dbos._app_db.sessionmaker() as session:
|
|
811
849
|
attributes: TracedAttributes = {
|
|
812
850
|
"name": func.__name__,
|
|
@@ -822,17 +860,12 @@ def decorate_transaction(
|
|
|
822
860
|
"txn_snapshot": "", # TODO: add actual snapshot
|
|
823
861
|
"executor_id": None,
|
|
824
862
|
"txn_id": None,
|
|
863
|
+
"function_name": transaction_name,
|
|
825
864
|
}
|
|
826
865
|
retry_wait_seconds = 0.001
|
|
827
866
|
backoff_factor = 1.5
|
|
828
867
|
max_retry_wait_seconds = 2.0
|
|
829
868
|
while True:
|
|
830
|
-
|
|
831
|
-
if dbosreg.is_workflow_cancelled(ctx.workflow_id):
|
|
832
|
-
raise DBOSWorkflowCancelledError(
|
|
833
|
-
f"Workflow {ctx.workflow_id} is cancelled. Aborting transaction {func.__name__}."
|
|
834
|
-
)
|
|
835
|
-
|
|
836
869
|
has_recorded_error = False
|
|
837
870
|
txn_error: Optional[Exception] = None
|
|
838
871
|
try:
|
|
@@ -849,6 +882,7 @@ def decorate_transaction(
|
|
|
849
882
|
session,
|
|
850
883
|
ctx.workflow_id,
|
|
851
884
|
ctx.function_id,
|
|
885
|
+
transaction_name,
|
|
852
886
|
)
|
|
853
887
|
)
|
|
854
888
|
if dbos.debug_mode and recorded_output is None:
|
|
@@ -912,6 +946,8 @@ def decorate_transaction(
|
|
|
912
946
|
)
|
|
913
947
|
txn_error = invalid_request_error
|
|
914
948
|
raise
|
|
949
|
+
except DBOSUnexpectedStepError:
|
|
950
|
+
raise
|
|
915
951
|
except Exception as error:
|
|
916
952
|
txn_error = error
|
|
917
953
|
raise
|
|
@@ -977,7 +1013,7 @@ def decorate_step(
|
|
|
977
1013
|
) -> Callable[[Callable[P, R]], Callable[P, R]]:
|
|
978
1014
|
def decorator(func: Callable[P, R]) -> Callable[P, R]:
|
|
979
1015
|
|
|
980
|
-
|
|
1016
|
+
step_name = func.__qualname__
|
|
981
1017
|
|
|
982
1018
|
def invoke_step(*args: Any, **kwargs: Any) -> Any:
|
|
983
1019
|
if dbosreg.dbos is None:
|
|
@@ -991,13 +1027,6 @@ def decorate_step(
|
|
|
991
1027
|
"operationType": OperationType.STEP.value,
|
|
992
1028
|
}
|
|
993
1029
|
|
|
994
|
-
# Check if the workflow is cancelled
|
|
995
|
-
ctx = assert_current_dbos_context()
|
|
996
|
-
if dbosreg.is_workflow_cancelled(ctx.workflow_id):
|
|
997
|
-
raise DBOSWorkflowCancelledError(
|
|
998
|
-
f"Workflow {ctx.workflow_id} is cancelled. Aborting step {func.__name__}."
|
|
999
|
-
)
|
|
1000
|
-
|
|
1001
1030
|
attempts = max_attempts if retries_allowed else 1
|
|
1002
1031
|
max_retry_interval_seconds: float = 3600 # 1 Hour
|
|
1003
1032
|
|
|
@@ -1025,7 +1054,7 @@ def decorate_step(
|
|
|
1025
1054
|
step_output: OperationResultInternal = {
|
|
1026
1055
|
"workflow_uuid": ctx.workflow_id,
|
|
1027
1056
|
"function_id": ctx.function_id,
|
|
1028
|
-
"function_name":
|
|
1057
|
+
"function_name": step_name,
|
|
1029
1058
|
"output": None,
|
|
1030
1059
|
"error": None,
|
|
1031
1060
|
}
|
|
@@ -1043,7 +1072,7 @@ def decorate_step(
|
|
|
1043
1072
|
def check_existing_result() -> Union[NoResult, R]:
|
|
1044
1073
|
ctx = assert_current_dbos_context()
|
|
1045
1074
|
recorded_output = dbos._sys_db.check_operation_execution(
|
|
1046
|
-
ctx.workflow_id, ctx.function_id
|
|
1075
|
+
ctx.workflow_id, ctx.function_id, step_name
|
|
1047
1076
|
)
|
|
1048
1077
|
if dbos.debug_mode and recorded_output is None:
|
|
1049
1078
|
raise DBOSException("Step output not found in debug mode")
|
|
@@ -1231,3 +1260,24 @@ def get_event(
|
|
|
1231
1260
|
else:
|
|
1232
1261
|
# Directly call it outside of a workflow
|
|
1233
1262
|
return dbos._sys_db.get_event(workflow_id, key, timeout_seconds)
|
|
1263
|
+
|
|
1264
|
+
|
|
1265
|
+
def _get_timeout_deadline(
|
|
1266
|
+
ctx: Optional[DBOSContext], queue: Optional[str]
|
|
1267
|
+
) -> tuple[Optional[int], Optional[int]]:
|
|
1268
|
+
if ctx is None:
|
|
1269
|
+
return None, None
|
|
1270
|
+
# If a timeout is explicitly specified, use it over any propagated deadline
|
|
1271
|
+
if ctx.workflow_timeout_ms:
|
|
1272
|
+
if queue:
|
|
1273
|
+
# Queued workflows are assigned a deadline on dequeue
|
|
1274
|
+
return ctx.workflow_timeout_ms, None
|
|
1275
|
+
else:
|
|
1276
|
+
# Otherwise, compute the deadline immediately
|
|
1277
|
+
return (
|
|
1278
|
+
ctx.workflow_timeout_ms,
|
|
1279
|
+
int(time.time() * 1000) + ctx.workflow_timeout_ms,
|
|
1280
|
+
)
|
|
1281
|
+
# Otherwise, return the propagated deadline, if any
|
|
1282
|
+
else:
|
|
1283
|
+
return None, ctx.workflow_deadline_epoch_ms
|