dbos 0.27.0a7__py3-none-any.whl → 0.27.0a9__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of dbos might be problematic. Click here for more details.
- dbos/__init__.py +2 -0
- dbos/_client.py +39 -10
- dbos/_context.py +50 -0
- dbos/_core.py +13 -0
- dbos/_dbos.py +8 -1
- dbos/_error.py +13 -0
- dbos/_migrations/versions/27ac6900c6ad_add_queue_dedup.py +45 -0
- dbos/_queue.py +5 -3
- dbos/_schemas/system_database.py +9 -0
- dbos/_sys_db.py +63 -15
- dbos/_workflow_commands.py +15 -2
- {dbos-0.27.0a7.dist-info → dbos-0.27.0a9.dist-info}/METADATA +1 -1
- {dbos-0.27.0a7.dist-info → dbos-0.27.0a9.dist-info}/RECORD +16 -15
- {dbos-0.27.0a7.dist-info → dbos-0.27.0a9.dist-info}/WHEEL +0 -0
- {dbos-0.27.0a7.dist-info → dbos-0.27.0a9.dist-info}/entry_points.txt +0 -0
- {dbos-0.27.0a7.dist-info → dbos-0.27.0a9.dist-info}/licenses/LICENSE +0 -0
dbos/__init__.py
CHANGED
|
@@ -3,6 +3,7 @@ from ._client import DBOSClient, EnqueueOptions
|
|
|
3
3
|
from ._context import (
|
|
4
4
|
DBOSContextEnsure,
|
|
5
5
|
DBOSContextSetAuth,
|
|
6
|
+
SetEnqueueOptions,
|
|
6
7
|
SetWorkflowID,
|
|
7
8
|
SetWorkflowTimeout,
|
|
8
9
|
)
|
|
@@ -25,6 +26,7 @@ __all__ = [
|
|
|
25
26
|
"KafkaMessage",
|
|
26
27
|
"SetWorkflowID",
|
|
27
28
|
"SetWorkflowTimeout",
|
|
29
|
+
"SetEnqueueOptions",
|
|
28
30
|
"WorkflowHandle",
|
|
29
31
|
"WorkflowHandleAsync",
|
|
30
32
|
"WorkflowStatus",
|
dbos/_client.py
CHANGED
|
@@ -19,6 +19,7 @@ from dbos._error import DBOSNonExistentWorkflowError
|
|
|
19
19
|
from dbos._registrations import DEFAULT_MAX_RECOVERY_ATTEMPTS
|
|
20
20
|
from dbos._serialization import WorkflowInputs
|
|
21
21
|
from dbos._sys_db import (
|
|
22
|
+
EnqueueOptionsInternal,
|
|
22
23
|
StepInfo,
|
|
23
24
|
SystemDatabase,
|
|
24
25
|
WorkflowStatus,
|
|
@@ -42,6 +43,7 @@ class EnqueueOptions(TypedDict):
|
|
|
42
43
|
workflow_id: NotRequired[str]
|
|
43
44
|
app_version: NotRequired[str]
|
|
44
45
|
workflow_timeout: NotRequired[float]
|
|
46
|
+
deduplication_id: NotRequired[str]
|
|
45
47
|
|
|
46
48
|
|
|
47
49
|
class WorkflowHandleClientPolling(Generic[R]):
|
|
@@ -112,6 +114,9 @@ class DBOSClient:
|
|
|
112
114
|
if workflow_id is None:
|
|
113
115
|
workflow_id = str(uuid.uuid4())
|
|
114
116
|
workflow_timeout = options.get("workflow_timeout", None)
|
|
117
|
+
enqueue_options_internal: EnqueueOptionsInternal = {
|
|
118
|
+
"deduplication_id": options.get("deduplication_id"),
|
|
119
|
+
}
|
|
115
120
|
|
|
116
121
|
status: WorkflowStatusInternal = {
|
|
117
122
|
"workflow_uuid": workflow_id,
|
|
@@ -144,7 +149,10 @@ class DBOSClient:
|
|
|
144
149
|
}
|
|
145
150
|
|
|
146
151
|
self._sys_db.init_workflow(
|
|
147
|
-
status,
|
|
152
|
+
status,
|
|
153
|
+
_serialization.serialize_args(inputs),
|
|
154
|
+
max_recovery_attempts=None,
|
|
155
|
+
enqueue_options=enqueue_options_internal,
|
|
148
156
|
)
|
|
149
157
|
return workflow_id
|
|
150
158
|
|
|
@@ -235,11 +243,13 @@ class DBOSClient:
|
|
|
235
243
|
async def cancel_workflow_async(self, workflow_id: str) -> None:
|
|
236
244
|
await asyncio.to_thread(self.cancel_workflow, workflow_id)
|
|
237
245
|
|
|
238
|
-
def resume_workflow(self, workflow_id: str) ->
|
|
246
|
+
def resume_workflow(self, workflow_id: str) -> WorkflowHandle[Any]:
|
|
239
247
|
self._sys_db.resume_workflow(workflow_id)
|
|
248
|
+
return WorkflowHandleClientPolling[Any](workflow_id, self._sys_db)
|
|
240
249
|
|
|
241
|
-
async def resume_workflow_async(self, workflow_id: str) ->
|
|
250
|
+
async def resume_workflow_async(self, workflow_id: str) -> WorkflowHandleAsync[Any]:
|
|
242
251
|
await asyncio.to_thread(self.resume_workflow, workflow_id)
|
|
252
|
+
return WorkflowHandleClientAsyncPolling[Any](workflow_id, self._sys_db)
|
|
243
253
|
|
|
244
254
|
def list_workflows(
|
|
245
255
|
self,
|
|
@@ -353,16 +363,35 @@ class DBOSClient:
|
|
|
353
363
|
async def list_workflow_steps_async(self, workflow_id: str) -> List[StepInfo]:
|
|
354
364
|
return await asyncio.to_thread(self.list_workflow_steps, workflow_id)
|
|
355
365
|
|
|
356
|
-
def fork_workflow(
|
|
366
|
+
def fork_workflow(
|
|
367
|
+
self,
|
|
368
|
+
workflow_id: str,
|
|
369
|
+
start_step: int,
|
|
370
|
+
*,
|
|
371
|
+
application_version: Optional[str] = None,
|
|
372
|
+
) -> WorkflowHandle[Any]:
|
|
357
373
|
forked_workflow_id = fork_workflow(
|
|
358
|
-
self._sys_db,
|
|
374
|
+
self._sys_db,
|
|
375
|
+
self._app_db,
|
|
376
|
+
workflow_id,
|
|
377
|
+
start_step,
|
|
378
|
+
application_version=application_version,
|
|
359
379
|
)
|
|
360
|
-
return WorkflowHandleClientPolling[
|
|
380
|
+
return WorkflowHandleClientPolling[Any](forked_workflow_id, self._sys_db)
|
|
361
381
|
|
|
362
382
|
async def fork_workflow_async(
|
|
363
|
-
self,
|
|
364
|
-
|
|
383
|
+
self,
|
|
384
|
+
workflow_id: str,
|
|
385
|
+
start_step: int,
|
|
386
|
+
*,
|
|
387
|
+
application_version: Optional[str] = None,
|
|
388
|
+
) -> WorkflowHandleAsync[Any]:
|
|
365
389
|
forked_workflow_id = await asyncio.to_thread(
|
|
366
|
-
fork_workflow,
|
|
390
|
+
fork_workflow,
|
|
391
|
+
self._sys_db,
|
|
392
|
+
self._app_db,
|
|
393
|
+
workflow_id,
|
|
394
|
+
start_step,
|
|
395
|
+
application_version=application_version,
|
|
367
396
|
)
|
|
368
|
-
return WorkflowHandleClientAsyncPolling[
|
|
397
|
+
return WorkflowHandleClientAsyncPolling[Any](forked_workflow_id, self._sys_db)
|
dbos/_context.py
CHANGED
|
@@ -98,6 +98,9 @@ class DBOSContext:
|
|
|
98
98
|
# A propagated workflow deadline.
|
|
99
99
|
self.workflow_deadline_epoch_ms: Optional[int] = None
|
|
100
100
|
|
|
101
|
+
# A user-specified deduplication ID for the enqueuing workflow.
|
|
102
|
+
self.deduplication_id: Optional[str] = None
|
|
103
|
+
|
|
101
104
|
def create_child(self) -> DBOSContext:
|
|
102
105
|
rv = DBOSContext()
|
|
103
106
|
rv.logger = self.logger
|
|
@@ -413,12 +416,53 @@ class SetWorkflowTimeout:
|
|
|
413
416
|
return False # Did not handle
|
|
414
417
|
|
|
415
418
|
|
|
419
|
+
class SetEnqueueOptions:
|
|
420
|
+
"""
|
|
421
|
+
Set the workflow enqueue options for the enclosed enqueue operation.
|
|
422
|
+
|
|
423
|
+
Usage:
|
|
424
|
+
```
|
|
425
|
+
with SetEnqueueOptions(deduplication_id=<deduplication id>):
|
|
426
|
+
queue.enqueue(...)
|
|
427
|
+
```
|
|
428
|
+
"""
|
|
429
|
+
|
|
430
|
+
def __init__(self, *, deduplication_id: Optional[str] = None) -> None:
|
|
431
|
+
self.created_ctx = False
|
|
432
|
+
self.deduplication_id: Optional[str] = deduplication_id
|
|
433
|
+
self.saved_deduplication_id: Optional[str] = None
|
|
434
|
+
|
|
435
|
+
def __enter__(self) -> SetEnqueueOptions:
|
|
436
|
+
# Code to create a basic context
|
|
437
|
+
ctx = get_local_dbos_context()
|
|
438
|
+
if ctx is None:
|
|
439
|
+
self.created_ctx = True
|
|
440
|
+
_set_local_dbos_context(DBOSContext())
|
|
441
|
+
ctx = assert_current_dbos_context()
|
|
442
|
+
self.saved_deduplication_id = ctx.deduplication_id
|
|
443
|
+
ctx.deduplication_id = self.deduplication_id
|
|
444
|
+
return self
|
|
445
|
+
|
|
446
|
+
def __exit__(
|
|
447
|
+
self,
|
|
448
|
+
exc_type: Optional[Type[BaseException]],
|
|
449
|
+
exc_value: Optional[BaseException],
|
|
450
|
+
traceback: Optional[TracebackType],
|
|
451
|
+
) -> Literal[False]:
|
|
452
|
+
assert_current_dbos_context().deduplication_id = self.saved_deduplication_id
|
|
453
|
+
# Code to clean up the basic context if we created it
|
|
454
|
+
if self.created_ctx:
|
|
455
|
+
_clear_local_dbos_context()
|
|
456
|
+
return False
|
|
457
|
+
|
|
458
|
+
|
|
416
459
|
class EnterDBOSWorkflow(AbstractContextManager[DBOSContext, Literal[False]]):
|
|
417
460
|
def __init__(self, attributes: TracedAttributes) -> None:
|
|
418
461
|
self.created_ctx = False
|
|
419
462
|
self.attributes = attributes
|
|
420
463
|
self.is_temp_workflow = attributes["name"] == "temp_wf"
|
|
421
464
|
self.saved_workflow_timeout: Optional[int] = None
|
|
465
|
+
self.saved_deduplication_id: Optional[str] = None
|
|
422
466
|
|
|
423
467
|
def __enter__(self) -> DBOSContext:
|
|
424
468
|
# Code to create a basic context
|
|
@@ -432,6 +476,10 @@ class EnterDBOSWorkflow(AbstractContextManager[DBOSContext, Literal[False]]):
|
|
|
432
476
|
# workflow's children (instead we propagate the deadline)
|
|
433
477
|
self.saved_workflow_timeout = ctx.workflow_timeout_ms
|
|
434
478
|
ctx.workflow_timeout_ms = None
|
|
479
|
+
# Unset the deduplication_id context var so it is not applied to this
|
|
480
|
+
# workflow's children
|
|
481
|
+
self.saved_deduplication_id = ctx.deduplication_id
|
|
482
|
+
ctx.deduplication_id = None
|
|
435
483
|
ctx.start_workflow(
|
|
436
484
|
None, self.attributes, self.is_temp_workflow
|
|
437
485
|
) # Will get from the context's next workflow ID
|
|
@@ -450,6 +498,8 @@ class EnterDBOSWorkflow(AbstractContextManager[DBOSContext, Literal[False]]):
|
|
|
450
498
|
ctx.workflow_timeout_ms = self.saved_workflow_timeout
|
|
451
499
|
# Clear any propagating timeout
|
|
452
500
|
ctx.workflow_deadline_epoch_ms = None
|
|
501
|
+
# Restore the saved deduplication ID
|
|
502
|
+
ctx.deduplication_id = self.saved_deduplication_id
|
|
453
503
|
# Code to clean up the basic context if we created it
|
|
454
504
|
if self.created_ctx:
|
|
455
505
|
_clear_local_dbos_context()
|
dbos/_core.py
CHANGED
|
@@ -71,6 +71,7 @@ from ._registrations import (
|
|
|
71
71
|
from ._roles import check_required_roles
|
|
72
72
|
from ._serialization import WorkflowInputs
|
|
73
73
|
from ._sys_db import (
|
|
74
|
+
EnqueueOptionsInternal,
|
|
74
75
|
GetEventWorkflowContext,
|
|
75
76
|
OperationResultInternal,
|
|
76
77
|
WorkflowStatus,
|
|
@@ -234,6 +235,7 @@ def _init_workflow(
|
|
|
234
235
|
workflow_timeout_ms: Optional[int],
|
|
235
236
|
workflow_deadline_epoch_ms: Optional[int],
|
|
236
237
|
max_recovery_attempts: Optional[int],
|
|
238
|
+
enqueue_options: Optional[EnqueueOptionsInternal],
|
|
237
239
|
) -> WorkflowStatusInternal:
|
|
238
240
|
wfid = (
|
|
239
241
|
ctx.workflow_id
|
|
@@ -289,6 +291,7 @@ def _init_workflow(
|
|
|
289
291
|
status,
|
|
290
292
|
_serialization.serialize_args(inputs),
|
|
291
293
|
max_recovery_attempts=max_recovery_attempts,
|
|
294
|
+
enqueue_options=enqueue_options,
|
|
292
295
|
)
|
|
293
296
|
|
|
294
297
|
if workflow_deadline_epoch_ms is not None:
|
|
@@ -539,6 +542,9 @@ def start_workflow(
|
|
|
539
542
|
workflow_timeout_ms = (
|
|
540
543
|
local_ctx.workflow_timeout_ms if local_ctx is not None else None
|
|
541
544
|
)
|
|
545
|
+
enqueue_options = EnqueueOptionsInternal(
|
|
546
|
+
deduplication_id=local_ctx.deduplication_id if local_ctx is not None else None,
|
|
547
|
+
)
|
|
542
548
|
new_wf_id, new_wf_ctx = _get_new_wf()
|
|
543
549
|
|
|
544
550
|
ctx = new_wf_ctx
|
|
@@ -561,6 +567,7 @@ def start_workflow(
|
|
|
561
567
|
workflow_timeout_ms=workflow_timeout_ms,
|
|
562
568
|
workflow_deadline_epoch_ms=workflow_deadline_epoch_ms,
|
|
563
569
|
max_recovery_attempts=fi.max_recovery_attempts,
|
|
570
|
+
enqueue_options=enqueue_options,
|
|
564
571
|
)
|
|
565
572
|
|
|
566
573
|
wf_status = status["status"]
|
|
@@ -626,6 +633,9 @@ async def start_workflow_async(
|
|
|
626
633
|
workflow_timeout_ms, workflow_deadline_epoch_ms = _get_timeout_deadline(
|
|
627
634
|
local_ctx, queue_name
|
|
628
635
|
)
|
|
636
|
+
enqueue_options = EnqueueOptionsInternal(
|
|
637
|
+
deduplication_id=local_ctx.deduplication_id if local_ctx is not None else None,
|
|
638
|
+
)
|
|
629
639
|
new_wf_id, new_wf_ctx = _get_new_wf()
|
|
630
640
|
|
|
631
641
|
ctx = new_wf_ctx
|
|
@@ -651,6 +661,7 @@ async def start_workflow_async(
|
|
|
651
661
|
workflow_timeout_ms=workflow_timeout_ms,
|
|
652
662
|
workflow_deadline_epoch_ms=workflow_deadline_epoch_ms,
|
|
653
663
|
max_recovery_attempts=fi.max_recovery_attempts,
|
|
664
|
+
enqueue_options=enqueue_options,
|
|
654
665
|
)
|
|
655
666
|
|
|
656
667
|
if ctx.has_parent():
|
|
@@ -727,6 +738,7 @@ def workflow_wrapper(
|
|
|
727
738
|
workflow_timeout_ms, workflow_deadline_epoch_ms = _get_timeout_deadline(
|
|
728
739
|
ctx, queue=None
|
|
729
740
|
)
|
|
741
|
+
|
|
730
742
|
enterWorkflowCtxMgr = (
|
|
731
743
|
EnterDBOSChildWorkflow if ctx and ctx.is_workflow() else EnterDBOSWorkflow
|
|
732
744
|
)
|
|
@@ -768,6 +780,7 @@ def workflow_wrapper(
|
|
|
768
780
|
workflow_timeout_ms=workflow_timeout_ms,
|
|
769
781
|
workflow_deadline_epoch_ms=workflow_deadline_epoch_ms,
|
|
770
782
|
max_recovery_attempts=max_recovery_attempts,
|
|
783
|
+
enqueue_options=None,
|
|
771
784
|
)
|
|
772
785
|
|
|
773
786
|
# TODO: maybe modify the parameters if they've been changed by `_init_workflow`
|
dbos/_dbos.py
CHANGED
|
@@ -978,7 +978,13 @@ class DBOS:
|
|
|
978
978
|
return cls.fork_workflow(workflow_id, 1)
|
|
979
979
|
|
|
980
980
|
@classmethod
|
|
981
|
-
def fork_workflow(
|
|
981
|
+
def fork_workflow(
|
|
982
|
+
cls,
|
|
983
|
+
workflow_id: str,
|
|
984
|
+
start_step: int,
|
|
985
|
+
*,
|
|
986
|
+
application_version: Optional[str] = None,
|
|
987
|
+
) -> WorkflowHandle[Any]:
|
|
982
988
|
"""Restart a workflow with a new workflow ID from a specific step"""
|
|
983
989
|
|
|
984
990
|
def fn() -> str:
|
|
@@ -988,6 +994,7 @@ class DBOS:
|
|
|
988
994
|
_get_dbos_instance()._app_db,
|
|
989
995
|
workflow_id,
|
|
990
996
|
start_step,
|
|
997
|
+
application_version=application_version,
|
|
991
998
|
)
|
|
992
999
|
|
|
993
1000
|
new_id = _get_dbos_instance()._sys_db.call_function_as_step(
|
dbos/_error.py
CHANGED
|
@@ -61,6 +61,7 @@ class DBOSErrorCode(Enum):
|
|
|
61
61
|
ConflictingWorkflowError = 9
|
|
62
62
|
WorkflowCancelled = 10
|
|
63
63
|
UnexpectedStep = 11
|
|
64
|
+
QueueDeduplicated = 12
|
|
64
65
|
ConflictingRegistrationError = 25
|
|
65
66
|
|
|
66
67
|
|
|
@@ -178,6 +179,18 @@ class DBOSUnexpectedStepError(DBOSException):
|
|
|
178
179
|
)
|
|
179
180
|
|
|
180
181
|
|
|
182
|
+
class DBOSQueueDeduplicatedError(DBOSException):
|
|
183
|
+
"""Exception raised when a workflow is deduplicated in the queue."""
|
|
184
|
+
|
|
185
|
+
def __init__(
|
|
186
|
+
self, workflow_id: str, queue_name: str, deduplication_id: str
|
|
187
|
+
) -> None:
|
|
188
|
+
super().__init__(
|
|
189
|
+
f"Workflow {workflow_id} was deduplicated due to an existing workflow in queue {queue_name} with deduplication ID {deduplication_id}.",
|
|
190
|
+
dbos_error_code=DBOSErrorCode.QueueDeduplicated.value,
|
|
191
|
+
)
|
|
192
|
+
|
|
193
|
+
|
|
181
194
|
#######################################
|
|
182
195
|
## BaseException
|
|
183
196
|
#######################################
|
|
@@ -0,0 +1,45 @@
|
|
|
1
|
+
"""add queue dedup
|
|
2
|
+
|
|
3
|
+
Revision ID: 27ac6900c6ad
|
|
4
|
+
Revises: 83f3732ae8e7
|
|
5
|
+
Create Date: 2025-04-23 16:18:48.530047
|
|
6
|
+
|
|
7
|
+
"""
|
|
8
|
+
|
|
9
|
+
from typing import Sequence, Union
|
|
10
|
+
|
|
11
|
+
import sqlalchemy as sa
|
|
12
|
+
from alembic import op
|
|
13
|
+
|
|
14
|
+
# revision identifiers, used by Alembic.
|
|
15
|
+
revision: str = "27ac6900c6ad"
|
|
16
|
+
down_revision: Union[str, None] = "83f3732ae8e7"
|
|
17
|
+
branch_labels: Union[str, Sequence[str], None] = None
|
|
18
|
+
depends_on: Union[str, Sequence[str], None] = None
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
def upgrade() -> None:
|
|
22
|
+
op.add_column(
|
|
23
|
+
"workflow_queue",
|
|
24
|
+
sa.Column(
|
|
25
|
+
"deduplication_id",
|
|
26
|
+
sa.Text(),
|
|
27
|
+
nullable=True,
|
|
28
|
+
),
|
|
29
|
+
schema="dbos",
|
|
30
|
+
)
|
|
31
|
+
|
|
32
|
+
# Unique constraint for queue_name, deduplication_id
|
|
33
|
+
op.create_unique_constraint(
|
|
34
|
+
"uq_workflow_queue_name_dedup_id",
|
|
35
|
+
"workflow_queue",
|
|
36
|
+
["queue_name", "deduplication_id"],
|
|
37
|
+
schema="dbos",
|
|
38
|
+
)
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
def downgrade() -> None:
|
|
42
|
+
op.drop_constraint(
|
|
43
|
+
"uq_workflow_queue_name_dedup_id", "workflow_queue", schema="dbos"
|
|
44
|
+
)
|
|
45
|
+
op.drop_column("workflow_queue", "deduplication_id", schema="dbos")
|
dbos/_queue.py
CHANGED
|
@@ -99,6 +99,8 @@ def queue_thread(stop_event: threading.Event, dbos: "DBOS") -> None:
|
|
|
99
99
|
f"Exception encountered in queue thread: {traceback.format_exc()}"
|
|
100
100
|
)
|
|
101
101
|
except Exception:
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
102
|
+
if not stop_event.is_set():
|
|
103
|
+
# Only print the error if the thread is not stopping
|
|
104
|
+
dbos.logger.warning(
|
|
105
|
+
f"Exception encountered in queue thread: {traceback.format_exc()}"
|
|
106
|
+
)
|
dbos/_schemas/system_database.py
CHANGED
|
@@ -10,6 +10,7 @@ from sqlalchemy import (
|
|
|
10
10
|
String,
|
|
11
11
|
Table,
|
|
12
12
|
Text,
|
|
13
|
+
UniqueConstraint,
|
|
13
14
|
text,
|
|
14
15
|
)
|
|
15
16
|
|
|
@@ -174,4 +175,12 @@ class SystemSchema:
|
|
|
174
175
|
"completed_at_epoch_ms",
|
|
175
176
|
BigInteger(),
|
|
176
177
|
),
|
|
178
|
+
Column(
|
|
179
|
+
"deduplication_id",
|
|
180
|
+
Text,
|
|
181
|
+
nullable=True,
|
|
182
|
+
),
|
|
183
|
+
UniqueConstraint(
|
|
184
|
+
"queue_name", "deduplication_id", name="uq_workflow_queue_name_dedup_id"
|
|
185
|
+
),
|
|
177
186
|
)
|
dbos/_sys_db.py
CHANGED
|
@@ -37,6 +37,7 @@ from ._error import (
|
|
|
37
37
|
DBOSConflictingWorkflowError,
|
|
38
38
|
DBOSDeadLetterQueueError,
|
|
39
39
|
DBOSNonExistentWorkflowError,
|
|
40
|
+
DBOSQueueDeduplicatedError,
|
|
40
41
|
DBOSUnexpectedStepError,
|
|
41
42
|
DBOSWorkflowCancelledError,
|
|
42
43
|
DBOSWorkflowConflictIDError,
|
|
@@ -135,6 +136,10 @@ class WorkflowStatusInternal(TypedDict):
|
|
|
135
136
|
workflow_deadline_epoch_ms: Optional[int]
|
|
136
137
|
|
|
137
138
|
|
|
139
|
+
class EnqueueOptionsInternal(TypedDict):
|
|
140
|
+
deduplication_id: Optional[str] # Unique ID for deduplication on a queue
|
|
141
|
+
|
|
142
|
+
|
|
138
143
|
class RecordedResult(TypedDict):
|
|
139
144
|
output: Optional[str] # JSON (jsonpickle)
|
|
140
145
|
error: Optional[str] # JSON (jsonpickle)
|
|
@@ -539,15 +544,17 @@ class SystemDatabase:
|
|
|
539
544
|
# Execute with snapshot isolation in case of concurrent calls on the same workflow
|
|
540
545
|
c.execute(sa.text("SET TRANSACTION ISOLATION LEVEL REPEATABLE READ"))
|
|
541
546
|
# Check the status of the workflow. If it is complete, do nothing.
|
|
542
|
-
|
|
547
|
+
status_row = c.execute(
|
|
543
548
|
sa.select(
|
|
544
549
|
SystemSchema.workflow_status.c.status,
|
|
545
550
|
).where(SystemSchema.workflow_status.c.workflow_uuid == workflow_id)
|
|
546
551
|
).fetchone()
|
|
552
|
+
if status_row is None:
|
|
553
|
+
return
|
|
554
|
+
status = status_row[0]
|
|
547
555
|
if (
|
|
548
|
-
|
|
549
|
-
or
|
|
550
|
-
or row[0] == WorkflowStatusString.ERROR.value
|
|
556
|
+
status == WorkflowStatusString.SUCCESS.value
|
|
557
|
+
or status == WorkflowStatusString.ERROR.value
|
|
551
558
|
):
|
|
552
559
|
return
|
|
553
560
|
# Remove the workflow from the queues table so resume can safely be called on an ENQUEUED workflow
|
|
@@ -587,7 +594,12 @@ class SystemDatabase:
|
|
|
587
594
|
return max_function_id
|
|
588
595
|
|
|
589
596
|
def fork_workflow(
|
|
590
|
-
self,
|
|
597
|
+
self,
|
|
598
|
+
original_workflow_id: str,
|
|
599
|
+
forked_workflow_id: str,
|
|
600
|
+
start_step: int,
|
|
601
|
+
*,
|
|
602
|
+
application_version: Optional[str],
|
|
591
603
|
) -> str:
|
|
592
604
|
|
|
593
605
|
status = self.get_workflow_status(original_workflow_id)
|
|
@@ -607,7 +619,11 @@ class SystemDatabase:
|
|
|
607
619
|
name=status["name"],
|
|
608
620
|
class_name=status["class_name"],
|
|
609
621
|
config_name=status["config_name"],
|
|
610
|
-
application_version=
|
|
622
|
+
application_version=(
|
|
623
|
+
application_version
|
|
624
|
+
if application_version is not None
|
|
625
|
+
else status["app_version"]
|
|
626
|
+
),
|
|
611
627
|
application_id=status["app_id"],
|
|
612
628
|
request=status["request"],
|
|
613
629
|
authenticated_user=status["authenticated_user"],
|
|
@@ -1597,17 +1613,43 @@ class SystemDatabase:
|
|
|
1597
1613
|
)
|
|
1598
1614
|
return value
|
|
1599
1615
|
|
|
1600
|
-
def enqueue(
|
|
1616
|
+
def enqueue(
|
|
1617
|
+
self,
|
|
1618
|
+
workflow_id: str,
|
|
1619
|
+
queue_name: str,
|
|
1620
|
+
conn: sa.Connection,
|
|
1621
|
+
*,
|
|
1622
|
+
enqueue_options: Optional[EnqueueOptionsInternal],
|
|
1623
|
+
) -> None:
|
|
1601
1624
|
if self._debug_mode:
|
|
1602
1625
|
raise Exception("called enqueue in debug mode")
|
|
1603
|
-
|
|
1604
|
-
|
|
1605
|
-
|
|
1606
|
-
|
|
1607
|
-
|
|
1626
|
+
try:
|
|
1627
|
+
deduplication_id = (
|
|
1628
|
+
enqueue_options["deduplication_id"]
|
|
1629
|
+
if enqueue_options is not None
|
|
1630
|
+
else None
|
|
1608
1631
|
)
|
|
1609
|
-
|
|
1610
|
-
|
|
1632
|
+
query = (
|
|
1633
|
+
pg.insert(SystemSchema.workflow_queue)
|
|
1634
|
+
.values(
|
|
1635
|
+
workflow_uuid=workflow_id,
|
|
1636
|
+
queue_name=queue_name,
|
|
1637
|
+
deduplication_id=deduplication_id,
|
|
1638
|
+
)
|
|
1639
|
+
.on_conflict_do_nothing(
|
|
1640
|
+
index_elements=SystemSchema.workflow_queue.primary_key.columns
|
|
1641
|
+
)
|
|
1642
|
+
) # Ignore primary key constraint violation
|
|
1643
|
+
conn.execute(query)
|
|
1644
|
+
except DBAPIError as dbapi_error:
|
|
1645
|
+
# Unique constraint violation for the deduplication ID
|
|
1646
|
+
if dbapi_error.orig.sqlstate == "23505": # type: ignore
|
|
1647
|
+
assert (
|
|
1648
|
+
deduplication_id is not None
|
|
1649
|
+
), f"deduplication_id should not be None. Workflow ID: {workflow_id}, Queue name: {queue_name}."
|
|
1650
|
+
raise DBOSQueueDeduplicatedError(
|
|
1651
|
+
workflow_id, queue_name, deduplication_id
|
|
1652
|
+
)
|
|
1611
1653
|
|
|
1612
1654
|
def start_queued_workflows(
|
|
1613
1655
|
self, queue: "Queue", executor_id: str, app_version: str
|
|
@@ -1879,6 +1921,7 @@ class SystemDatabase:
|
|
|
1879
1921
|
inputs: str,
|
|
1880
1922
|
*,
|
|
1881
1923
|
max_recovery_attempts: Optional[int],
|
|
1924
|
+
enqueue_options: Optional[EnqueueOptionsInternal],
|
|
1882
1925
|
) -> tuple[WorkflowStatuses, Optional[int]]:
|
|
1883
1926
|
"""
|
|
1884
1927
|
Synchronously record the status and inputs for workflows in a single transaction
|
|
@@ -1894,7 +1937,12 @@ class SystemDatabase:
|
|
|
1894
1937
|
status["queue_name"] is not None
|
|
1895
1938
|
and wf_status == WorkflowStatusString.ENQUEUED.value
|
|
1896
1939
|
):
|
|
1897
|
-
self.enqueue(
|
|
1940
|
+
self.enqueue(
|
|
1941
|
+
status["workflow_uuid"],
|
|
1942
|
+
status["queue_name"],
|
|
1943
|
+
conn,
|
|
1944
|
+
enqueue_options=enqueue_options,
|
|
1945
|
+
)
|
|
1898
1946
|
return wf_status, workflow_deadline_epoch_ms
|
|
1899
1947
|
|
|
1900
1948
|
|
dbos/_workflow_commands.py
CHANGED
|
@@ -1,6 +1,7 @@
|
|
|
1
1
|
import uuid
|
|
2
2
|
from typing import List, Optional
|
|
3
3
|
|
|
4
|
+
from dbos._context import get_local_dbos_context
|
|
4
5
|
from dbos._error import DBOSException
|
|
5
6
|
|
|
6
7
|
from ._app_db import ApplicationDatabase
|
|
@@ -103,6 +104,8 @@ def fork_workflow(
|
|
|
103
104
|
app_db: ApplicationDatabase,
|
|
104
105
|
workflow_id: str,
|
|
105
106
|
start_step: int,
|
|
107
|
+
*,
|
|
108
|
+
application_version: Optional[str],
|
|
106
109
|
) -> str:
|
|
107
110
|
def get_max_function_id(workflow_uuid: str) -> int:
|
|
108
111
|
max_transactions = app_db.get_max_function_id(workflow_uuid) or 0
|
|
@@ -114,7 +117,17 @@ def fork_workflow(
|
|
|
114
117
|
raise DBOSException(
|
|
115
118
|
f"Cannot fork workflow {workflow_id} from step {start_step}. The workflow has {max_function_id} steps."
|
|
116
119
|
)
|
|
117
|
-
|
|
120
|
+
ctx = get_local_dbos_context()
|
|
121
|
+
if ctx is not None and len(ctx.id_assigned_for_next_workflow) > 0:
|
|
122
|
+
forked_workflow_id = ctx.id_assigned_for_next_workflow
|
|
123
|
+
ctx.id_assigned_for_next_workflow = ""
|
|
124
|
+
else:
|
|
125
|
+
forked_workflow_id = str(uuid.uuid4())
|
|
118
126
|
app_db.clone_workflow_transactions(workflow_id, forked_workflow_id, start_step)
|
|
119
|
-
sys_db.fork_workflow(
|
|
127
|
+
sys_db.fork_workflow(
|
|
128
|
+
workflow_id,
|
|
129
|
+
forked_workflow_id,
|
|
130
|
+
start_step,
|
|
131
|
+
application_version=application_version,
|
|
132
|
+
)
|
|
120
133
|
return forked_workflow_id
|
|
@@ -1,23 +1,23 @@
|
|
|
1
|
-
dbos-0.27.
|
|
2
|
-
dbos-0.27.
|
|
3
|
-
dbos-0.27.
|
|
4
|
-
dbos-0.27.
|
|
5
|
-
dbos/__init__.py,sha256
|
|
1
|
+
dbos-0.27.0a9.dist-info/METADATA,sha256=tK7jyKq5lMF4Zq0xqhq4K3s7f43xRSRD6XzX5AObucs,5553
|
|
2
|
+
dbos-0.27.0a9.dist-info/WHEEL,sha256=tSfRZzRHthuv7vxpI4aehrdN9scLjk-dCJkPLzkHxGg,90
|
|
3
|
+
dbos-0.27.0a9.dist-info/entry_points.txt,sha256=_QOQ3tVfEjtjBlr1jS4sHqHya9lI2aIEIWkz8dqYp14,58
|
|
4
|
+
dbos-0.27.0a9.dist-info/licenses/LICENSE,sha256=VGZit_a5-kdw9WT6fY5jxAWVwGQzgLFyPWrcVVUhVNU,1067
|
|
5
|
+
dbos/__init__.py,sha256=-FdBlOlr-f2tY__C23J4v22MoCAXqcDN_-zXsJXdoZ0,1005
|
|
6
6
|
dbos/__main__.py,sha256=G7Exn-MhGrVJVDbgNlpzhfh8WMX_72t3_oJaFT9Lmt8,653
|
|
7
7
|
dbos/_admin_server.py,sha256=bR7hO8WS5hUzxjbDS3X0hXWuW8k3AQQSAvaynnthhtc,9031
|
|
8
8
|
dbos/_app_db.py,sha256=3j8_5-MlSDY0otLRszFE-GfenU6JC20fcfSL-drSNYk,11800
|
|
9
9
|
dbos/_classproperty.py,sha256=f0X-_BySzn3yFDRKB2JpCbLYQ9tLwt1XftfshvY7CBs,626
|
|
10
|
-
dbos/_client.py,sha256=
|
|
10
|
+
dbos/_client.py,sha256=Id-jzAUH6JMN-9WmAGyo0vm-nc0URjNIVwA2iKnCN5Q,13418
|
|
11
11
|
dbos/_conductor/conductor.py,sha256=HYzVL29IMMrs2Mnms_7cHJynCnmmEN5SDQOMjzn3UoU,16840
|
|
12
12
|
dbos/_conductor/protocol.py,sha256=zEKIuOQdIaSduNqfZKpo8PSD9_1oNpKIPnBNCu3RUyE,6681
|
|
13
|
-
dbos/_context.py,sha256=
|
|
14
|
-
dbos/_core.py,sha256=
|
|
13
|
+
dbos/_context.py,sha256=5aJHOjh6-2Zc7Fwzw924Vg0utLEkaR-oBMRdz3cE95k,23680
|
|
14
|
+
dbos/_core.py,sha256=7zhdO-VfZe84wgOzBVsliqO-BI20OzcLTFqvrGyxttw,48425
|
|
15
15
|
dbos/_croniter.py,sha256=XHAyUyibs_59sJQfSNWkP7rqQY6_XrlfuuCxk4jYqek,47559
|
|
16
|
-
dbos/_dbos.py,sha256=
|
|
16
|
+
dbos/_dbos.py,sha256=ENDQ6Xi4MoKrjXoCRlk1B64yZP7D-MyDUjUlOTRsw9I,48314
|
|
17
17
|
dbos/_dbos_config.py,sha256=L0Z0OOB5FoPM9g-joZqXGeJnlxWQsEUtgPtgtg9Uf48,21732
|
|
18
18
|
dbos/_debug.py,sha256=MNlQVZ6TscGCRQeEEL0VE8Uignvr6dPeDDDefS3xgIE,1823
|
|
19
19
|
dbos/_docker_pg_helper.py,sha256=NmcgqmR5rQA_4igfeqh8ugNT2z3YmoOvuep_MEtxTiY,5854
|
|
20
|
-
dbos/_error.py,sha256=
|
|
20
|
+
dbos/_error.py,sha256=FOvv40rCWezx9J-0z45ScPYHO8WpmI2IHErZ8Wl1NU4,7510
|
|
21
21
|
dbos/_event_loop.py,sha256=NmaLbEQFfEK36S_0KhVD39YdYrGce3qSKCTJ-5RqKQ0,2136
|
|
22
22
|
dbos/_fastapi.py,sha256=PhaKftbApHnjtYEOw0EYna_3K0cmz__J9of7mRJWzu4,3704
|
|
23
23
|
dbos/_flask.py,sha256=DZKUZR5-xOzPI7tYZ53r2PvvHVoAb8SYwLzMVFsVfjI,2608
|
|
@@ -27,6 +27,7 @@ dbos/_logger.py,sha256=qv2srteCF2rSRjCK1VGOck3ieIkwUe9Lvbv60mJc16E,4069
|
|
|
27
27
|
dbos/_migrations/env.py,sha256=38SIGVbmn_VV2x2u1aHLcPOoWgZ84eCymf3g_NljmbU,1626
|
|
28
28
|
dbos/_migrations/script.py.mako,sha256=MEqL-2qATlST9TAOeYgscMn1uy6HUS9NFvDgl93dMj8,635
|
|
29
29
|
dbos/_migrations/versions/04ca4f231047_workflow_queues_executor_id.py,sha256=ICLPl8CN9tQXMsLDsAj8z1TsL831-Z3F8jSBvrR-wyw,736
|
|
30
|
+
dbos/_migrations/versions/27ac6900c6ad_add_queue_dedup.py,sha256=56w1v6TdofW3V18iwm0MP0SAeSaAUPSS40HIcn6qYIE,1072
|
|
30
31
|
dbos/_migrations/versions/50f3227f0b4b_fix_job_queue.py,sha256=ZBYrtTdxy64HxIAlOes89fVIk2P1gNaJack7wuC_epg,873
|
|
31
32
|
dbos/_migrations/versions/5c361fc04708_added_system_tables.py,sha256=Xr9hBDJjkAtymlauOmAy00yUHj0VVUaEz7kNwEM9IwE,6403
|
|
32
33
|
dbos/_migrations/versions/83f3732ae8e7_workflow_timeout.py,sha256=Q_R35pb8AfVI3sg5mzKwyoPfYB88Ychcc8gwxpM9R7A,1035
|
|
@@ -36,7 +37,7 @@ dbos/_migrations/versions/d76646551a6c_workflow_queue.py,sha256=G942nophZ2uC2vc4
|
|
|
36
37
|
dbos/_migrations/versions/eab0cc1d9a14_job_queue.py,sha256=uvhFOtqbBreCePhAxZfIT0qCAI7BiZTou9wt6QnbY7c,1412
|
|
37
38
|
dbos/_migrations/versions/f4b9b32ba814_functionname_childid_op_outputs.py,sha256=m90Lc5YH0ZISSq1MyxND6oq3RZrZKrIqEsZtwJ1jWxA,1049
|
|
38
39
|
dbos/_outcome.py,sha256=EXxBg4jXCVJsByDQ1VOCIedmbeq_03S6d-p1vqQrLFU,6810
|
|
39
|
-
dbos/_queue.py,sha256=
|
|
40
|
+
dbos/_queue.py,sha256=aKCGahWBGJOLOv5PCOOId96Va3YQ4ICuHWXy-eQXohE,3526
|
|
40
41
|
dbos/_recovery.py,sha256=98Py7icfytyIELJ54gIsdvmURBvTb0HmWaxEAuYL0dc,2546
|
|
41
42
|
dbos/_registrations.py,sha256=EZzG3ZfYmWA2bHX2hpnSIQ3PTi3-cXsvbcmXjyOusMk,7302
|
|
42
43
|
dbos/_request.py,sha256=cX1B3Atlh160phgS35gF1VEEV4pD126c9F3BDgBmxZU,929
|
|
@@ -44,9 +45,9 @@ dbos/_roles.py,sha256=iOsgmIAf1XVzxs3gYWdGRe1B880YfOw5fpU7Jwx8_A8,2271
|
|
|
44
45
|
dbos/_scheduler.py,sha256=SR1oRZRcVzYsj-JauV2LA8JtwTkt8mru7qf6H1AzQ1U,2027
|
|
45
46
|
dbos/_schemas/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
46
47
|
dbos/_schemas/application_database.py,sha256=SypAS9l9EsaBHFn9FR8jmnqt01M74d9AF1AMa4m2hhI,1040
|
|
47
|
-
dbos/_schemas/system_database.py,sha256=
|
|
48
|
+
dbos/_schemas/system_database.py,sha256=wLqrhApNqrwZC1SdUxi_ca0y_66WzKaaBOxvND4_bdg,5738
|
|
48
49
|
dbos/_serialization.py,sha256=YCYv0qKAwAZ1djZisBC7khvKqG-5OcIv9t9EC5PFIog,1743
|
|
49
|
-
dbos/_sys_db.py,sha256=
|
|
50
|
+
dbos/_sys_db.py,sha256=CJf-PgAerEZdbCe9ZJOJnjkrqPcflUz04KFJaGRMm7Q,82389
|
|
50
51
|
dbos/_templates/dbos-db-starter/README.md,sha256=GhxhBj42wjTt1fWEtwNriHbJuKb66Vzu89G4pxNHw2g,930
|
|
51
52
|
dbos/_templates/dbos-db-starter/__package/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
52
53
|
dbos/_templates/dbos-db-starter/__package/main.py,sha256=nJMN3ZD2lmwg4Dcgmiwqc-tQGuCJuJal2Xl85iA277U,2453
|
|
@@ -59,11 +60,11 @@ dbos/_templates/dbos-db-starter/migrations/versions/2024_07_31_180642_init.py,sh
|
|
|
59
60
|
dbos/_templates/dbos-db-starter/start_postgres_docker.py,sha256=lQVLlYO5YkhGPEgPqwGc7Y8uDKse9HsWv5fynJEFJHM,1681
|
|
60
61
|
dbos/_tracer.py,sha256=yN6GRDKu_1p-EqtQLNarMocPfga2ZuqpzStzzSPYhzo,2732
|
|
61
62
|
dbos/_utils.py,sha256=nFRUHzVjXG5AusF85AlYHikj63Tzi-kQm992ihsrAxA,201
|
|
62
|
-
dbos/_workflow_commands.py,sha256=
|
|
63
|
+
dbos/_workflow_commands.py,sha256=7_f8-w0MbS1gqC5v68EwzbUtomVM0lLebozpHxXmRYg,3982
|
|
63
64
|
dbos/cli/_github_init.py,sha256=Y_bDF9gfO2jB1id4FV5h1oIxEJRWyqVjhb7bNEa5nQ0,3224
|
|
64
65
|
dbos/cli/_template_init.py,sha256=-WW3kbq0W_Tq4WbMqb1UGJG3xvJb3woEY5VspG95Srk,2857
|
|
65
66
|
dbos/cli/cli.py,sha256=a3rUrHog5-e22KjjUPOuTjH20PmUgSP0amRpMd6LVJE,18882
|
|
66
67
|
dbos/dbos-config.schema.json,sha256=8KcwJb_sQc4-6tQG2TLmjE_nratfrQa0qVLl9XPsvWE,6367
|
|
67
68
|
dbos/py.typed,sha256=QfzXT1Ktfk3Rj84akygc7_42z0lRpCq0Ilh8OXI6Zas,44
|
|
68
69
|
version/__init__.py,sha256=L4sNxecRuqdtSFdpUGX3TtBi9KL3k7YsZVIvv-fv9-A,1678
|
|
69
|
-
dbos-0.27.
|
|
70
|
+
dbos-0.27.0a9.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|