dbos 1.3.0a1__py3-none-any.whl → 1.3.0a3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- dbos/_client.py +8 -1
- dbos/_core.py +22 -13
- dbos/_dbos.py +3 -5
- dbos/_event_loop.py +7 -10
- dbos/_migrations/versions/66478e1b95e5_consolidate_queues.py +71 -0
- dbos/_queue.py +11 -0
- dbos/_schemas/system_database.py +9 -52
- dbos/_sys_db.py +112 -246
- {dbos-1.3.0a1.dist-info → dbos-1.3.0a3.dist-info}/METADATA +1 -1
- {dbos-1.3.0a1.dist-info → dbos-1.3.0a3.dist-info}/RECORD +13 -12
- {dbos-1.3.0a1.dist-info → dbos-1.3.0a3.dist-info}/WHEEL +0 -0
- {dbos-1.3.0a1.dist-info → dbos-1.3.0a3.dist-info}/entry_points.txt +0 -0
- {dbos-1.3.0a1.dist-info → dbos-1.3.0a3.dist-info}/licenses/LICENSE +0 -0
dbos/_client.py
CHANGED
@@ -163,6 +163,12 @@ class DBOSClient:
|
|
163
163
|
int(workflow_timeout * 1000) if workflow_timeout is not None else None
|
164
164
|
),
|
165
165
|
"workflow_deadline_epoch_ms": None,
|
166
|
+
"deduplication_id": enqueue_options_internal["deduplication_id"],
|
167
|
+
"priority": (
|
168
|
+
enqueue_options_internal["priority"]
|
169
|
+
if enqueue_options_internal["priority"] is not None
|
170
|
+
else 0
|
171
|
+
),
|
166
172
|
}
|
167
173
|
|
168
174
|
inputs: WorkflowInputs = {
|
@@ -174,7 +180,6 @@ class DBOSClient:
|
|
174
180
|
status,
|
175
181
|
_serialization.serialize_args(inputs),
|
176
182
|
max_recovery_attempts=None,
|
177
|
-
enqueue_options=enqueue_options_internal,
|
178
183
|
)
|
179
184
|
return workflow_id
|
180
185
|
|
@@ -230,6 +235,8 @@ class DBOSClient:
|
|
230
235
|
"app_version": None,
|
231
236
|
"workflow_timeout_ms": None,
|
232
237
|
"workflow_deadline_epoch_ms": None,
|
238
|
+
"deduplication_id": None,
|
239
|
+
"priority": 0,
|
233
240
|
}
|
234
241
|
with self._sys_db.engine.begin() as conn:
|
235
242
|
self._sys_db._insert_workflow_status(
|
dbos/_core.py
CHANGED
@@ -279,6 +279,18 @@ def _init_workflow(
|
|
279
279
|
"updated_at": None,
|
280
280
|
"workflow_timeout_ms": workflow_timeout_ms,
|
281
281
|
"workflow_deadline_epoch_ms": workflow_deadline_epoch_ms,
|
282
|
+
"deduplication_id": (
|
283
|
+
enqueue_options["deduplication_id"] if enqueue_options is not None else None
|
284
|
+
),
|
285
|
+
"priority": (
|
286
|
+
(
|
287
|
+
enqueue_options["priority"]
|
288
|
+
if enqueue_options["priority"] is not None
|
289
|
+
else 0
|
290
|
+
)
|
291
|
+
if enqueue_options is not None
|
292
|
+
else 0
|
293
|
+
),
|
282
294
|
}
|
283
295
|
|
284
296
|
# If we have a class name, the first arg is the instance and do not serialize
|
@@ -290,7 +302,6 @@ def _init_workflow(
|
|
290
302
|
status,
|
291
303
|
_serialization.serialize_args(inputs),
|
292
304
|
max_recovery_attempts=max_recovery_attempts,
|
293
|
-
enqueue_options=enqueue_options,
|
294
305
|
)
|
295
306
|
|
296
307
|
if workflow_deadline_epoch_ms is not None:
|
@@ -342,13 +353,12 @@ def _get_wf_invoke_func(
|
|
342
353
|
return recorded_result
|
343
354
|
try:
|
344
355
|
output = func()
|
345
|
-
status["status"] = "SUCCESS"
|
346
|
-
status["output"] = _serialization.serialize(output)
|
347
356
|
if not dbos.debug_mode:
|
348
|
-
|
349
|
-
|
350
|
-
|
351
|
-
|
357
|
+
dbos._sys_db.update_workflow_outcome(
|
358
|
+
status["workflow_uuid"],
|
359
|
+
"SUCCESS",
|
360
|
+
output=_serialization.serialize(output),
|
361
|
+
)
|
352
362
|
return output
|
353
363
|
except DBOSWorkflowConflictIDError:
|
354
364
|
# Await the workflow result
|
@@ -357,13 +367,12 @@ def _get_wf_invoke_func(
|
|
357
367
|
except DBOSWorkflowCancelledError as error:
|
358
368
|
raise
|
359
369
|
except Exception as error:
|
360
|
-
status["status"] = "ERROR"
|
361
|
-
status["error"] = _serialization.serialize_exception(error)
|
362
370
|
if not dbos.debug_mode:
|
363
|
-
|
364
|
-
|
365
|
-
|
366
|
-
|
371
|
+
dbos._sys_db.update_workflow_outcome(
|
372
|
+
status["workflow_uuid"],
|
373
|
+
"ERROR",
|
374
|
+
error=_serialization.serialize_exception(error),
|
375
|
+
)
|
367
376
|
raise
|
368
377
|
|
369
378
|
return persist
|
dbos/_dbos.py
CHANGED
@@ -215,6 +215,8 @@ class DBOSRegistry:
|
|
215
215
|
sources = sorted(
|
216
216
|
[inspect.getsource(wf) for wf in self.workflow_info_map.values()]
|
217
217
|
)
|
218
|
+
# Different DBOS versions should produce different app versions
|
219
|
+
sources.append(GlobalParams.dbos_version)
|
218
220
|
for source in sources:
|
219
221
|
hasher.update(source.encode("utf-8"))
|
220
222
|
return hasher.hexdigest()
|
@@ -297,7 +299,6 @@ class DBOS:
|
|
297
299
|
|
298
300
|
self._launched: bool = False
|
299
301
|
self._debug_mode: bool = False
|
300
|
-
self._configured_threadpool: bool = False
|
301
302
|
self._sys_db_field: Optional[SystemDatabase] = None
|
302
303
|
self._app_db_field: Optional[ApplicationDatabase] = None
|
303
304
|
self._registry: DBOSRegistry = _get_or_create_dbos_registry()
|
@@ -410,7 +411,7 @@ class DBOS:
|
|
410
411
|
GlobalParams.executor_id = str(uuid.uuid4())
|
411
412
|
dbos_logger.info(f"Executor ID: {GlobalParams.executor_id}")
|
412
413
|
dbos_logger.info(f"Application version: {GlobalParams.app_version}")
|
413
|
-
self._executor_field = ThreadPoolExecutor(max_workers=
|
414
|
+
self._executor_field = ThreadPoolExecutor(max_workers=sys.maxsize)
|
414
415
|
self._background_event_loop.start()
|
415
416
|
assert self._config["database_url"] is not None
|
416
417
|
assert self._config["database"]["sys_db_engine_kwargs"] is not None
|
@@ -941,11 +942,8 @@ class DBOS:
|
|
941
942
|
|
942
943
|
This function is called before the first call to asyncio.to_thread.
|
943
944
|
"""
|
944
|
-
if _get_dbos_instance()._configured_threadpool:
|
945
|
-
return
|
946
945
|
loop = asyncio.get_running_loop()
|
947
946
|
loop.set_default_executor(_get_dbos_instance()._executor)
|
948
|
-
_get_dbos_instance()._configured_threadpool = True
|
949
947
|
|
950
948
|
@classmethod
|
951
949
|
def resume_workflow(cls, workflow_id: str) -> WorkflowHandle[Any]:
|
dbos/_event_loop.py
CHANGED
@@ -1,6 +1,5 @@
|
|
1
1
|
import asyncio
|
2
2
|
import threading
|
3
|
-
from concurrent.futures import ThreadPoolExecutor
|
4
3
|
from typing import Any, Coroutine, Optional, TypeVar
|
5
4
|
|
6
5
|
|
@@ -34,17 +33,15 @@ class BackgroundEventLoop:
|
|
34
33
|
|
35
34
|
def _run_event_loop(self) -> None:
|
36
35
|
self._loop = asyncio.new_event_loop()
|
37
|
-
|
38
|
-
self._loop.set_default_executor(thread_pool)
|
39
|
-
asyncio.set_event_loop(self._loop)
|
36
|
+
asyncio.set_event_loop(self._loop)
|
40
37
|
|
41
|
-
|
42
|
-
|
38
|
+
self._running = True
|
39
|
+
self._ready.set() # Signal that the loop is ready
|
43
40
|
|
44
|
-
|
45
|
-
|
46
|
-
|
47
|
-
|
41
|
+
try:
|
42
|
+
self._loop.run_forever()
|
43
|
+
finally:
|
44
|
+
self._loop.close()
|
48
45
|
|
49
46
|
async def _shutdown(self) -> None:
|
50
47
|
if self._loop is None:
|
@@ -0,0 +1,71 @@
|
|
1
|
+
"""consolidate_queues
|
2
|
+
|
3
|
+
Revision ID: 66478e1b95e5
|
4
|
+
Revises: 933e86bdac6a
|
5
|
+
Create Date: 2025-05-21 10:14:25.674613
|
6
|
+
|
7
|
+
"""
|
8
|
+
|
9
|
+
from typing import Sequence, Union
|
10
|
+
|
11
|
+
import sqlalchemy as sa
|
12
|
+
from alembic import op
|
13
|
+
|
14
|
+
# revision identifiers, used by Alembic.
|
15
|
+
revision: str = "66478e1b95e5"
|
16
|
+
down_revision: Union[str, None] = "933e86bdac6a"
|
17
|
+
branch_labels: Union[str, Sequence[str], None] = None
|
18
|
+
depends_on: Union[str, Sequence[str], None] = None
|
19
|
+
|
20
|
+
|
21
|
+
def upgrade() -> None:
|
22
|
+
# Add new columns to workflow_status table
|
23
|
+
op.add_column(
|
24
|
+
"workflow_status",
|
25
|
+
sa.Column("started_at_epoch_ms", sa.BigInteger(), nullable=True),
|
26
|
+
schema="dbos",
|
27
|
+
)
|
28
|
+
|
29
|
+
op.add_column(
|
30
|
+
"workflow_status",
|
31
|
+
sa.Column("deduplication_id", sa.Text(), nullable=True),
|
32
|
+
schema="dbos",
|
33
|
+
)
|
34
|
+
|
35
|
+
op.add_column(
|
36
|
+
"workflow_status",
|
37
|
+
sa.Column(
|
38
|
+
"priority", sa.Integer(), nullable=False, server_default=sa.text("'0'::int")
|
39
|
+
),
|
40
|
+
schema="dbos",
|
41
|
+
)
|
42
|
+
|
43
|
+
# Add unique constraint for deduplication_id
|
44
|
+
op.create_unique_constraint(
|
45
|
+
"uq_workflow_status_queue_name_dedup_id",
|
46
|
+
"workflow_status",
|
47
|
+
["queue_name", "deduplication_id"],
|
48
|
+
schema="dbos",
|
49
|
+
)
|
50
|
+
|
51
|
+
# Add index on status field
|
52
|
+
op.create_index(
|
53
|
+
"workflow_status_status_index", "workflow_status", ["status"], schema="dbos"
|
54
|
+
)
|
55
|
+
|
56
|
+
|
57
|
+
def downgrade() -> None:
|
58
|
+
# Drop indexes
|
59
|
+
op.drop_index(
|
60
|
+
"workflow_status_status_index", table_name="workflow_status", schema="dbos"
|
61
|
+
)
|
62
|
+
|
63
|
+
# Drop unique constraint
|
64
|
+
op.drop_constraint(
|
65
|
+
"uq_workflow_status_queue_name_dedup_id", "workflow_status", schema="dbos"
|
66
|
+
)
|
67
|
+
|
68
|
+
# Drop columns
|
69
|
+
op.drop_column("workflow_status", "priority", schema="dbos")
|
70
|
+
op.drop_column("workflow_status", "deduplication_id", schema="dbos")
|
71
|
+
op.drop_column("workflow_status", "started_at_epoch_ms", schema="dbos")
|
dbos/_queue.py
CHANGED
@@ -5,6 +5,7 @@ from typing import TYPE_CHECKING, Any, Callable, Coroutine, Optional, TypedDict
|
|
5
5
|
from psycopg import errors
|
6
6
|
from sqlalchemy.exc import OperationalError
|
7
7
|
|
8
|
+
from dbos._context import get_local_dbos_context
|
8
9
|
from dbos._logger import dbos_logger
|
9
10
|
from dbos._utils import GlobalParams
|
10
11
|
|
@@ -41,6 +42,7 @@ class Queue:
|
|
41
42
|
limiter: Optional[QueueRateLimit] = None,
|
42
43
|
*, # Disable positional arguments from here on
|
43
44
|
worker_concurrency: Optional[int] = None,
|
45
|
+
priority_enabled: bool = False,
|
44
46
|
) -> None:
|
45
47
|
if (
|
46
48
|
worker_concurrency is not None
|
@@ -54,6 +56,7 @@ class Queue:
|
|
54
56
|
self.concurrency = concurrency
|
55
57
|
self.worker_concurrency = worker_concurrency
|
56
58
|
self.limiter = limiter
|
59
|
+
self.priority_enabled = priority_enabled
|
57
60
|
from ._dbos import _get_or_create_dbos_registry
|
58
61
|
|
59
62
|
registry = _get_or_create_dbos_registry()
|
@@ -66,6 +69,14 @@ class Queue:
|
|
66
69
|
) -> "WorkflowHandle[R]":
|
67
70
|
from ._dbos import _get_dbos_instance
|
68
71
|
|
72
|
+
context = get_local_dbos_context()
|
73
|
+
if (
|
74
|
+
context is not None
|
75
|
+
and context.priority is not None
|
76
|
+
and not self.priority_enabled
|
77
|
+
):
|
78
|
+
dbos_logger.warning(f"Priority is not enabled for queue {self.name}. Setting priority will not have any effect.")
|
79
|
+
|
69
80
|
dbos = _get_dbos_instance()
|
70
81
|
return start_workflow(dbos, func, self.name, False, *args, **kwargs)
|
71
82
|
|
dbos/_schemas/system_database.py
CHANGED
@@ -1,6 +1,5 @@
|
|
1
1
|
from sqlalchemy import (
|
2
2
|
BigInteger,
|
3
|
-
Boolean,
|
4
3
|
Column,
|
5
4
|
ForeignKey,
|
6
5
|
Index,
|
@@ -57,8 +56,17 @@ class SystemSchema:
|
|
57
56
|
Column("queue_name", Text, nullable=True),
|
58
57
|
Column("workflow_timeout_ms", BigInteger, nullable=True),
|
59
58
|
Column("workflow_deadline_epoch_ms", BigInteger, nullable=True),
|
59
|
+
Column("started_at_epoch_ms", BigInteger(), nullable=True),
|
60
|
+
Column("deduplication_id", Text(), nullable=True),
|
61
|
+
Column("priority", Integer(), nullable=False, server_default=text("'0'::int")),
|
60
62
|
Index("workflow_status_created_at_index", "created_at"),
|
61
63
|
Index("workflow_status_executor_id_index", "executor_id"),
|
64
|
+
Index("workflow_status_status_index", "status"),
|
65
|
+
UniqueConstraint(
|
66
|
+
"queue_name",
|
67
|
+
"deduplication_id",
|
68
|
+
name="uq_workflow_status_queue_name_dedup_id",
|
69
|
+
),
|
62
70
|
)
|
63
71
|
|
64
72
|
operation_outputs = Table(
|
@@ -138,54 +146,3 @@ class SystemSchema:
|
|
138
146
|
Column("value", Text, nullable=False),
|
139
147
|
PrimaryKeyConstraint("workflow_uuid", "key"),
|
140
148
|
)
|
141
|
-
|
142
|
-
scheduler_state = Table(
|
143
|
-
"scheduler_state",
|
144
|
-
metadata_obj,
|
145
|
-
Column("workflow_fn_name", Text, primary_key=True, nullable=False),
|
146
|
-
Column("last_run_time", BigInteger, nullable=False),
|
147
|
-
)
|
148
|
-
|
149
|
-
workflow_queue = Table(
|
150
|
-
"workflow_queue",
|
151
|
-
metadata_obj,
|
152
|
-
Column(
|
153
|
-
"workflow_uuid",
|
154
|
-
Text,
|
155
|
-
ForeignKey(
|
156
|
-
"workflow_status.workflow_uuid", onupdate="CASCADE", ondelete="CASCADE"
|
157
|
-
),
|
158
|
-
nullable=False,
|
159
|
-
primary_key=True,
|
160
|
-
),
|
161
|
-
# Column("executor_id", Text), # This column is deprecated. Do *not* use it.
|
162
|
-
Column("queue_name", Text, nullable=False),
|
163
|
-
Column(
|
164
|
-
"created_at_epoch_ms",
|
165
|
-
BigInteger,
|
166
|
-
nullable=False,
|
167
|
-
server_default=text("(EXTRACT(epoch FROM now()) * 1000::numeric)::bigint"),
|
168
|
-
),
|
169
|
-
Column(
|
170
|
-
"started_at_epoch_ms",
|
171
|
-
BigInteger(),
|
172
|
-
),
|
173
|
-
Column(
|
174
|
-
"completed_at_epoch_ms",
|
175
|
-
BigInteger(),
|
176
|
-
),
|
177
|
-
Column(
|
178
|
-
"deduplication_id",
|
179
|
-
Text,
|
180
|
-
nullable=True,
|
181
|
-
),
|
182
|
-
Column(
|
183
|
-
"priority",
|
184
|
-
Integer,
|
185
|
-
nullable=False,
|
186
|
-
server_default=text("'0'::int"),
|
187
|
-
),
|
188
|
-
UniqueConstraint(
|
189
|
-
"queue_name", "deduplication_id", name="uq_workflow_queue_name_dedup_id"
|
190
|
-
),
|
191
|
-
)
|
dbos/_sys_db.py
CHANGED
@@ -136,13 +136,17 @@ class WorkflowStatusInternal(TypedDict):
|
|
136
136
|
# The deadline of a workflow, computed by adding its timeout to its start time.
|
137
137
|
# Deadlines propagate to children. When the deadline is reached, the workflow is cancelled.
|
138
138
|
workflow_deadline_epoch_ms: Optional[int]
|
139
|
+
# Unique ID for deduplication on a queue
|
140
|
+
deduplication_id: Optional[str]
|
141
|
+
# Priority of the workflow on the queue, starting from 1 ~ 2,147,483,647. Default 0 (highest priority).
|
142
|
+
priority: int
|
139
143
|
|
140
144
|
|
141
145
|
class EnqueueOptionsInternal(TypedDict):
|
142
|
-
|
143
|
-
|
144
|
-
|
145
|
-
]
|
146
|
+
# Unique ID for deduplication on a queue
|
147
|
+
deduplication_id: Optional[str]
|
148
|
+
# Priority of the workflow on the queue, starting from 1 ~ 2,147,483,647. Default 0 (highest priority).
|
149
|
+
priority: Optional[int]
|
146
150
|
|
147
151
|
|
148
152
|
class RecordedResult(TypedDict):
|
@@ -456,6 +460,8 @@ class SystemDatabase:
|
|
456
460
|
),
|
457
461
|
workflow_timeout_ms=status["workflow_timeout_ms"],
|
458
462
|
workflow_deadline_epoch_ms=status["workflow_deadline_epoch_ms"],
|
463
|
+
deduplication_id=status["deduplication_id"],
|
464
|
+
priority=status["priority"],
|
459
465
|
)
|
460
466
|
.on_conflict_do_update(
|
461
467
|
index_elements=["workflow_uuid"],
|
@@ -465,7 +471,18 @@ class SystemDatabase:
|
|
465
471
|
|
466
472
|
cmd = cmd.returning(SystemSchema.workflow_status.c.recovery_attempts, SystemSchema.workflow_status.c.status, SystemSchema.workflow_status.c.workflow_deadline_epoch_ms, SystemSchema.workflow_status.c.name, SystemSchema.workflow_status.c.class_name, SystemSchema.workflow_status.c.config_name, SystemSchema.workflow_status.c.queue_name) # type: ignore
|
467
473
|
|
468
|
-
|
474
|
+
try:
|
475
|
+
results = conn.execute(cmd)
|
476
|
+
except DBAPIError as dbapi_error:
|
477
|
+
# Unique constraint violation for the deduplication ID
|
478
|
+
if dbapi_error.orig.sqlstate == "23505": # type: ignore
|
479
|
+
assert status["deduplication_id"] is not None
|
480
|
+
assert status["queue_name"] is not None
|
481
|
+
raise DBOSQueueDeduplicatedError(
|
482
|
+
status["workflow_uuid"],
|
483
|
+
status["queue_name"],
|
484
|
+
status["deduplication_id"],
|
485
|
+
)
|
469
486
|
row = results.fetchone()
|
470
487
|
if row is not None:
|
471
488
|
# Check the started workflow matches the expected name, class_name, config_name, and queue_name
|
@@ -495,12 +512,6 @@ class SystemDatabase:
|
|
495
512
|
and max_recovery_attempts is not None
|
496
513
|
and recovery_attempts > max_recovery_attempts + 1
|
497
514
|
):
|
498
|
-
delete_cmd = sa.delete(SystemSchema.workflow_queue).where(
|
499
|
-
SystemSchema.workflow_queue.c.workflow_uuid
|
500
|
-
== status["workflow_uuid"]
|
501
|
-
)
|
502
|
-
conn.execute(delete_cmd)
|
503
|
-
|
504
515
|
dlq_cmd = (
|
505
516
|
sa.update(SystemSchema.workflow_status)
|
506
517
|
.where(
|
@@ -513,6 +524,8 @@ class SystemDatabase:
|
|
513
524
|
)
|
514
525
|
.values(
|
515
526
|
status=WorkflowStatusString.RETRIES_EXCEEDED.value,
|
527
|
+
deduplication_id=None,
|
528
|
+
started_at_epoch_ms=None,
|
516
529
|
queue_name=None,
|
517
530
|
)
|
518
531
|
)
|
@@ -526,44 +539,28 @@ class SystemDatabase:
|
|
526
539
|
return wf_status, workflow_deadline_epoch_ms
|
527
540
|
|
528
541
|
@db_retry()
|
529
|
-
def
|
542
|
+
def update_workflow_outcome(
|
530
543
|
self,
|
531
|
-
|
544
|
+
workflow_id: str,
|
545
|
+
status: WorkflowStatuses,
|
546
|
+
*,
|
547
|
+
output: Optional[str] = None,
|
548
|
+
error: Optional[str] = None,
|
532
549
|
) -> None:
|
533
550
|
if self._debug_mode:
|
534
551
|
raise Exception("called update_workflow_status in debug mode")
|
535
|
-
wf_status: WorkflowStatuses = status["status"]
|
536
552
|
with self.engine.begin() as c:
|
537
553
|
c.execute(
|
538
|
-
|
554
|
+
sa.update(SystemSchema.workflow_status)
|
539
555
|
.values(
|
540
|
-
|
541
|
-
|
542
|
-
|
543
|
-
|
544
|
-
|
545
|
-
|
546
|
-
error=status["error"],
|
547
|
-
executor_id=status["executor_id"],
|
548
|
-
application_version=status["app_version"],
|
549
|
-
application_id=status["app_id"],
|
550
|
-
authenticated_user=status["authenticated_user"],
|
551
|
-
authenticated_roles=status["authenticated_roles"],
|
552
|
-
assumed_role=status["assumed_role"],
|
553
|
-
queue_name=status["queue_name"],
|
554
|
-
recovery_attempts=(
|
555
|
-
1 if wf_status != WorkflowStatusString.ENQUEUED.value else 0
|
556
|
-
),
|
557
|
-
)
|
558
|
-
.on_conflict_do_update(
|
559
|
-
index_elements=["workflow_uuid"],
|
560
|
-
set_=dict(
|
561
|
-
status=status["status"],
|
562
|
-
output=status["output"],
|
563
|
-
error=status["error"],
|
564
|
-
updated_at=func.extract("epoch", func.now()) * 1000,
|
565
|
-
),
|
556
|
+
status=status,
|
557
|
+
output=output,
|
558
|
+
error=error,
|
559
|
+
# As the workflow is complete, remove its deduplication ID
|
560
|
+
deduplication_id=None,
|
561
|
+
updated_at=func.extract("epoch", func.now()) * 1000,
|
566
562
|
)
|
563
|
+
.where(SystemSchema.workflow_status.c.workflow_uuid == workflow_id)
|
567
564
|
)
|
568
565
|
|
569
566
|
def cancel_workflow(
|
@@ -585,18 +582,15 @@ class SystemDatabase:
|
|
585
582
|
or row[0] == WorkflowStatusString.ERROR.value
|
586
583
|
):
|
587
584
|
return
|
588
|
-
#
|
589
|
-
c.execute(
|
590
|
-
sa.delete(SystemSchema.workflow_queue).where(
|
591
|
-
SystemSchema.workflow_queue.c.workflow_uuid == workflow_id
|
592
|
-
)
|
593
|
-
)
|
594
|
-
# Set the workflow's status to CANCELLED
|
585
|
+
# Set the workflow's status to CANCELLED and remove it from any queue it is on
|
595
586
|
c.execute(
|
596
587
|
sa.update(SystemSchema.workflow_status)
|
597
588
|
.where(SystemSchema.workflow_status.c.workflow_uuid == workflow_id)
|
598
589
|
.values(
|
599
590
|
status=WorkflowStatusString.CANCELLED.value,
|
591
|
+
queue_name=None,
|
592
|
+
deduplication_id=None,
|
593
|
+
started_at_epoch_ms=None,
|
600
594
|
)
|
601
595
|
)
|
602
596
|
|
@@ -620,19 +614,6 @@ class SystemDatabase:
|
|
620
614
|
or status == WorkflowStatusString.ERROR.value
|
621
615
|
):
|
622
616
|
return
|
623
|
-
# Remove the workflow from the queues table so resume can safely be called on an ENQUEUED workflow
|
624
|
-
c.execute(
|
625
|
-
sa.delete(SystemSchema.workflow_queue).where(
|
626
|
-
SystemSchema.workflow_queue.c.workflow_uuid == workflow_id
|
627
|
-
)
|
628
|
-
)
|
629
|
-
# Enqueue the workflow on the internal queue
|
630
|
-
c.execute(
|
631
|
-
pg.insert(SystemSchema.workflow_queue).values(
|
632
|
-
workflow_uuid=workflow_id,
|
633
|
-
queue_name=INTERNAL_QUEUE_NAME,
|
634
|
-
)
|
635
|
-
)
|
636
617
|
# Set the workflow's status to ENQUEUED and clear its recovery attempts and deadline.
|
637
618
|
c.execute(
|
638
619
|
sa.update(SystemSchema.workflow_status)
|
@@ -642,6 +623,8 @@ class SystemDatabase:
|
|
642
623
|
queue_name=INTERNAL_QUEUE_NAME,
|
643
624
|
recovery_attempts=0,
|
644
625
|
workflow_deadline_epoch_ms=None,
|
626
|
+
deduplication_id=None,
|
627
|
+
started_at_epoch_ms=None,
|
645
628
|
)
|
646
629
|
)
|
647
630
|
|
@@ -720,14 +703,6 @@ class SystemDatabase:
|
|
720
703
|
)
|
721
704
|
|
722
705
|
c.execute(insert_stmt)
|
723
|
-
|
724
|
-
# Enqueue the forked workflow on the internal queue
|
725
|
-
c.execute(
|
726
|
-
pg.insert(SystemSchema.workflow_queue).values(
|
727
|
-
workflow_uuid=forked_workflow_id,
|
728
|
-
queue_name=INTERNAL_QUEUE_NAME,
|
729
|
-
)
|
730
|
-
)
|
731
706
|
return forked_workflow_id
|
732
707
|
|
733
708
|
@db_retry()
|
@@ -753,6 +728,8 @@ class SystemDatabase:
|
|
753
728
|
SystemSchema.workflow_status.c.application_id,
|
754
729
|
SystemSchema.workflow_status.c.workflow_deadline_epoch_ms,
|
755
730
|
SystemSchema.workflow_status.c.workflow_timeout_ms,
|
731
|
+
SystemSchema.workflow_status.c.deduplication_id,
|
732
|
+
SystemSchema.workflow_status.c.priority,
|
756
733
|
).where(SystemSchema.workflow_status.c.workflow_uuid == workflow_uuid)
|
757
734
|
).fetchone()
|
758
735
|
if row is None:
|
@@ -777,6 +754,8 @@ class SystemDatabase:
|
|
777
754
|
"app_id": row[13],
|
778
755
|
"workflow_deadline_epoch_ms": row[14],
|
779
756
|
"workflow_timeout_ms": row[15],
|
757
|
+
"deduplication_id": row[16],
|
758
|
+
"priority": row[17],
|
780
759
|
}
|
781
760
|
return status
|
782
761
|
|
@@ -972,37 +951,40 @@ class SystemDatabase:
|
|
972
951
|
"""
|
973
952
|
Retrieve a list of queued workflows result and inputs based on the input criteria. The result is a list of external-facing workflow status objects.
|
974
953
|
"""
|
975
|
-
query =
|
976
|
-
|
977
|
-
|
978
|
-
|
979
|
-
|
980
|
-
|
981
|
-
|
982
|
-
|
983
|
-
|
984
|
-
|
985
|
-
|
986
|
-
|
987
|
-
|
988
|
-
|
989
|
-
|
990
|
-
|
991
|
-
|
992
|
-
|
993
|
-
|
994
|
-
|
995
|
-
|
996
|
-
|
997
|
-
|
998
|
-
|
999
|
-
SystemSchema.workflow_queue.c.workflow_uuid
|
1000
|
-
== SystemSchema.workflow_status.c.workflow_uuid,
|
1001
|
-
).join(
|
954
|
+
query = (
|
955
|
+
sa.select(
|
956
|
+
SystemSchema.workflow_status.c.workflow_uuid,
|
957
|
+
SystemSchema.workflow_status.c.status,
|
958
|
+
SystemSchema.workflow_status.c.name,
|
959
|
+
SystemSchema.workflow_status.c.recovery_attempts,
|
960
|
+
SystemSchema.workflow_status.c.config_name,
|
961
|
+
SystemSchema.workflow_status.c.class_name,
|
962
|
+
SystemSchema.workflow_status.c.authenticated_user,
|
963
|
+
SystemSchema.workflow_status.c.authenticated_roles,
|
964
|
+
SystemSchema.workflow_status.c.assumed_role,
|
965
|
+
SystemSchema.workflow_status.c.queue_name,
|
966
|
+
SystemSchema.workflow_status.c.executor_id,
|
967
|
+
SystemSchema.workflow_status.c.created_at,
|
968
|
+
SystemSchema.workflow_status.c.updated_at,
|
969
|
+
SystemSchema.workflow_status.c.application_version,
|
970
|
+
SystemSchema.workflow_status.c.application_id,
|
971
|
+
SystemSchema.workflow_inputs.c.inputs,
|
972
|
+
SystemSchema.workflow_status.c.output,
|
973
|
+
SystemSchema.workflow_status.c.error,
|
974
|
+
SystemSchema.workflow_status.c.workflow_deadline_epoch_ms,
|
975
|
+
SystemSchema.workflow_status.c.workflow_timeout_ms,
|
976
|
+
)
|
977
|
+
.join(
|
1002
978
|
SystemSchema.workflow_inputs,
|
1003
|
-
SystemSchema.
|
979
|
+
SystemSchema.workflow_status.c.workflow_uuid
|
1004
980
|
== SystemSchema.workflow_inputs.c.workflow_uuid,
|
1005
981
|
)
|
982
|
+
.where(
|
983
|
+
sa.and_(
|
984
|
+
SystemSchema.workflow_status.c.queue_name.isnot(None),
|
985
|
+
SystemSchema.workflow_status.c.status.in_(["ENQUEUED", "PENDING"]),
|
986
|
+
)
|
987
|
+
)
|
1006
988
|
)
|
1007
989
|
if input["sort_desc"]:
|
1008
990
|
query = query.order_by(SystemSchema.workflow_status.c.created_at.desc())
|
@@ -1014,7 +996,7 @@ class SystemDatabase:
|
|
1014
996
|
|
1015
997
|
if input.get("queue_name"):
|
1016
998
|
query = query.where(
|
1017
|
-
SystemSchema.
|
999
|
+
SystemSchema.workflow_status.c.queue_name == input["queue_name"]
|
1018
1000
|
)
|
1019
1001
|
|
1020
1002
|
if input.get("status"):
|
@@ -1693,51 +1675,6 @@ class SystemDatabase:
|
|
1693
1675
|
)
|
1694
1676
|
return value
|
1695
1677
|
|
1696
|
-
def _enqueue(
|
1697
|
-
self,
|
1698
|
-
workflow_id: str,
|
1699
|
-
queue_name: str,
|
1700
|
-
conn: sa.Connection,
|
1701
|
-
*,
|
1702
|
-
enqueue_options: Optional[EnqueueOptionsInternal],
|
1703
|
-
) -> None:
|
1704
|
-
if self._debug_mode:
|
1705
|
-
raise Exception("called enqueue in debug mode")
|
1706
|
-
try:
|
1707
|
-
deduplication_id = (
|
1708
|
-
enqueue_options["deduplication_id"]
|
1709
|
-
if enqueue_options is not None
|
1710
|
-
else None
|
1711
|
-
)
|
1712
|
-
priority = (
|
1713
|
-
enqueue_options["priority"] if enqueue_options is not None else None
|
1714
|
-
)
|
1715
|
-
# Default to 0 (highest priority) if not provided
|
1716
|
-
if priority is None:
|
1717
|
-
priority = 0
|
1718
|
-
query = (
|
1719
|
-
pg.insert(SystemSchema.workflow_queue)
|
1720
|
-
.values(
|
1721
|
-
workflow_uuid=workflow_id,
|
1722
|
-
queue_name=queue_name,
|
1723
|
-
deduplication_id=deduplication_id,
|
1724
|
-
priority=priority,
|
1725
|
-
)
|
1726
|
-
.on_conflict_do_nothing(
|
1727
|
-
index_elements=SystemSchema.workflow_queue.primary_key.columns
|
1728
|
-
)
|
1729
|
-
) # Ignore primary key constraint violation
|
1730
|
-
conn.execute(query)
|
1731
|
-
except DBAPIError as dbapi_error:
|
1732
|
-
# Unique constraint violation for the deduplication ID
|
1733
|
-
if dbapi_error.orig.sqlstate == "23505": # type: ignore
|
1734
|
-
assert (
|
1735
|
-
deduplication_id is not None
|
1736
|
-
), f"deduplication_id should not be None. Workflow ID: {workflow_id}, Queue name: {queue_name}."
|
1737
|
-
raise DBOSQueueDeduplicatedError(
|
1738
|
-
workflow_id, queue_name, deduplication_id
|
1739
|
-
)
|
1740
|
-
|
1741
1678
|
def start_queued_workflows(
|
1742
1679
|
self, queue: "Queue", executor_id: str, app_version: str
|
1743
1680
|
) -> List[str]:
|
@@ -1755,13 +1692,14 @@ class SystemDatabase:
|
|
1755
1692
|
if queue.limiter is not None:
|
1756
1693
|
query = (
|
1757
1694
|
sa.select(sa.func.count())
|
1758
|
-
.select_from(SystemSchema.
|
1759
|
-
.where(SystemSchema.
|
1695
|
+
.select_from(SystemSchema.workflow_status)
|
1696
|
+
.where(SystemSchema.workflow_status.c.queue_name == queue.name)
|
1760
1697
|
.where(
|
1761
|
-
SystemSchema.
|
1698
|
+
SystemSchema.workflow_status.c.status
|
1699
|
+
!= WorkflowStatusString.ENQUEUED.value
|
1762
1700
|
)
|
1763
1701
|
.where(
|
1764
|
-
SystemSchema.
|
1702
|
+
SystemSchema.workflow_status.c.started_at_epoch_ms
|
1765
1703
|
> start_time_ms - limiter_period_ms
|
1766
1704
|
)
|
1767
1705
|
)
|
@@ -1775,14 +1713,8 @@ class SystemDatabase:
|
|
1775
1713
|
SystemSchema.workflow_status.c.executor_id,
|
1776
1714
|
sa.func.count().label("task_count"),
|
1777
1715
|
)
|
1778
|
-
.select_from(
|
1779
|
-
|
1780
|
-
SystemSchema.workflow_status,
|
1781
|
-
SystemSchema.workflow_queue.c.workflow_uuid
|
1782
|
-
== SystemSchema.workflow_status.c.workflow_uuid,
|
1783
|
-
)
|
1784
|
-
)
|
1785
|
-
.where(SystemSchema.workflow_queue.c.queue_name == queue.name)
|
1716
|
+
.select_from(SystemSchema.workflow_status)
|
1717
|
+
.where(SystemSchema.workflow_status.c.queue_name == queue.name)
|
1786
1718
|
.where(
|
1787
1719
|
SystemSchema.workflow_status.c.status
|
1788
1720
|
== WorkflowStatusString.PENDING.value
|
@@ -1817,16 +1749,10 @@ class SystemDatabase:
|
|
1817
1749
|
# Only retrieve workflows of the local version (or without version set)
|
1818
1750
|
query = (
|
1819
1751
|
sa.select(
|
1820
|
-
SystemSchema.
|
1821
|
-
)
|
1822
|
-
.select_from(
|
1823
|
-
SystemSchema.workflow_queue.join(
|
1824
|
-
SystemSchema.workflow_status,
|
1825
|
-
SystemSchema.workflow_queue.c.workflow_uuid
|
1826
|
-
== SystemSchema.workflow_status.c.workflow_uuid,
|
1827
|
-
)
|
1752
|
+
SystemSchema.workflow_status.c.workflow_uuid,
|
1828
1753
|
)
|
1829
|
-
.
|
1754
|
+
.select_from(SystemSchema.workflow_status)
|
1755
|
+
.where(SystemSchema.workflow_status.c.queue_name == queue.name)
|
1830
1756
|
.where(
|
1831
1757
|
SystemSchema.workflow_status.c.status
|
1832
1758
|
== WorkflowStatusString.ENQUEUED.value
|
@@ -1838,12 +1764,15 @@ class SystemDatabase:
|
|
1838
1764
|
SystemSchema.workflow_status.c.application_version.is_(None),
|
1839
1765
|
)
|
1840
1766
|
)
|
1841
|
-
.order_by(
|
1842
|
-
SystemSchema.workflow_queue.c.priority.asc(),
|
1843
|
-
SystemSchema.workflow_queue.c.created_at_epoch_ms.asc(),
|
1844
|
-
)
|
1845
1767
|
.with_for_update(nowait=True) # Error out early
|
1846
1768
|
)
|
1769
|
+
if queue.priority_enabled:
|
1770
|
+
query = query.order_by(
|
1771
|
+
SystemSchema.workflow_status.c.priority.asc(),
|
1772
|
+
SystemSchema.workflow_status.c.created_at.asc(),
|
1773
|
+
)
|
1774
|
+
else:
|
1775
|
+
query = query.order_by(SystemSchema.workflow_status.c.created_at.asc())
|
1847
1776
|
# Apply limit only if max_tasks is finite
|
1848
1777
|
if max_tasks != float("inf"):
|
1849
1778
|
query = query.limit(int(max_tasks))
|
@@ -1873,6 +1802,7 @@ class SystemDatabase:
|
|
1873
1802
|
status=WorkflowStatusString.PENDING.value,
|
1874
1803
|
application_version=app_version,
|
1875
1804
|
executor_id=executor_id,
|
1805
|
+
started_at_epoch_ms=start_time_ms,
|
1876
1806
|
# If a timeout is set, set the deadline on dequeue
|
1877
1807
|
workflow_deadline_epoch_ms=sa.case(
|
1878
1808
|
(
|
@@ -1892,82 +1822,31 @@ class SystemDatabase:
|
|
1892
1822
|
)
|
1893
1823
|
)
|
1894
1824
|
# Then give it a start time
|
1895
|
-
c.execute(
|
1896
|
-
SystemSchema.workflow_queue.update()
|
1897
|
-
.where(SystemSchema.workflow_queue.c.workflow_uuid == id)
|
1898
|
-
.values(started_at_epoch_ms=start_time_ms)
|
1899
|
-
)
|
1900
1825
|
ret_ids.append(id)
|
1901
1826
|
|
1902
|
-
# If we have a limiter, garbage-collect all completed workflows started
|
1903
|
-
# before the period. If there's no limiter, there's no need--they were
|
1904
|
-
# deleted on completion.
|
1905
|
-
if queue.limiter is not None:
|
1906
|
-
c.execute(
|
1907
|
-
sa.delete(SystemSchema.workflow_queue)
|
1908
|
-
.where(SystemSchema.workflow_queue.c.completed_at_epoch_ms != None)
|
1909
|
-
.where(SystemSchema.workflow_queue.c.queue_name == queue.name)
|
1910
|
-
.where(
|
1911
|
-
SystemSchema.workflow_queue.c.started_at_epoch_ms
|
1912
|
-
< start_time_ms - limiter_period_ms
|
1913
|
-
)
|
1914
|
-
)
|
1915
|
-
|
1916
1827
|
# Return the IDs of all functions we started
|
1917
1828
|
return ret_ids
|
1918
1829
|
|
1919
|
-
@db_retry()
|
1920
|
-
def remove_from_queue(self, workflow_id: str, queue: "Queue") -> None:
|
1921
|
-
if self._debug_mode:
|
1922
|
-
raise Exception("called remove_from_queue in debug mode")
|
1923
|
-
|
1924
|
-
with self.engine.begin() as c:
|
1925
|
-
if queue.limiter is None:
|
1926
|
-
c.execute(
|
1927
|
-
sa.delete(SystemSchema.workflow_queue).where(
|
1928
|
-
SystemSchema.workflow_queue.c.workflow_uuid == workflow_id
|
1929
|
-
)
|
1930
|
-
)
|
1931
|
-
else:
|
1932
|
-
c.execute(
|
1933
|
-
sa.update(SystemSchema.workflow_queue)
|
1934
|
-
.where(SystemSchema.workflow_queue.c.workflow_uuid == workflow_id)
|
1935
|
-
.values(completed_at_epoch_ms=int(time.time() * 1000))
|
1936
|
-
)
|
1937
|
-
|
1938
1830
|
def clear_queue_assignment(self, workflow_id: str) -> bool:
|
1939
1831
|
if self._debug_mode:
|
1940
1832
|
raise Exception("called clear_queue_assignment in debug mode")
|
1941
1833
|
|
1942
|
-
with self.engine.
|
1943
|
-
|
1944
|
-
|
1945
|
-
|
1946
|
-
|
1947
|
-
|
1948
|
-
|
1949
|
-
|
1950
|
-
|
1951
|
-
.values(started_at_epoch_ms=None)
|
1834
|
+
with self.engine.begin() as c:
|
1835
|
+
# Reset the status of the task to "ENQUEUED"
|
1836
|
+
res = c.execute(
|
1837
|
+
sa.update(SystemSchema.workflow_status)
|
1838
|
+
.where(SystemSchema.workflow_status.c.workflow_uuid == workflow_id)
|
1839
|
+
.where(SystemSchema.workflow_status.c.queue_name.isnot(None))
|
1840
|
+
.where(
|
1841
|
+
SystemSchema.workflow_status.c.status
|
1842
|
+
== WorkflowStatusString.PENDING.value
|
1952
1843
|
)
|
1953
|
-
|
1954
|
-
|
1955
|
-
if res.rowcount == 0:
|
1956
|
-
transaction.rollback()
|
1957
|
-
return False
|
1958
|
-
|
1959
|
-
# Reset the status of the task to "ENQUEUED"
|
1960
|
-
res = conn.execute(
|
1961
|
-
sa.update(SystemSchema.workflow_status)
|
1962
|
-
.where(SystemSchema.workflow_status.c.workflow_uuid == workflow_id)
|
1963
|
-
.values(status=WorkflowStatusString.ENQUEUED.value)
|
1844
|
+
.values(
|
1845
|
+
status=WorkflowStatusString.ENQUEUED.value, started_at_epoch_ms=None
|
1964
1846
|
)
|
1965
|
-
|
1966
|
-
|
1967
|
-
|
1968
|
-
f"UNREACHABLE: Workflow {workflow_id} is found in the workflow_queue table but not found in the workflow_status table"
|
1969
|
-
)
|
1970
|
-
return True
|
1847
|
+
)
|
1848
|
+
# If no rows were affected, the workflow is not anymore in the queue or was already completed
|
1849
|
+
return res.rowcount > 0
|
1971
1850
|
|
1972
1851
|
T = TypeVar("T")
|
1973
1852
|
|
@@ -2012,7 +1891,6 @@ class SystemDatabase:
|
|
2012
1891
|
inputs: str,
|
2013
1892
|
*,
|
2014
1893
|
max_recovery_attempts: Optional[int],
|
2015
|
-
enqueue_options: Optional[EnqueueOptionsInternal],
|
2016
1894
|
) -> tuple[WorkflowStatuses, Optional[int]]:
|
2017
1895
|
"""
|
2018
1896
|
Synchronously record the status and inputs for workflows in a single transaction
|
@@ -2021,19 +1899,7 @@ class SystemDatabase:
|
|
2021
1899
|
wf_status, workflow_deadline_epoch_ms = self._insert_workflow_status(
|
2022
1900
|
status, conn, max_recovery_attempts=max_recovery_attempts
|
2023
1901
|
)
|
2024
|
-
# TODO: Modify the inputs if they were changed by `update_workflow_inputs`
|
2025
1902
|
self._update_workflow_inputs(status["workflow_uuid"], inputs, conn)
|
2026
|
-
|
2027
|
-
if (
|
2028
|
-
status["queue_name"] is not None
|
2029
|
-
and wf_status == WorkflowStatusString.ENQUEUED.value
|
2030
|
-
):
|
2031
|
-
self._enqueue(
|
2032
|
-
status["workflow_uuid"],
|
2033
|
-
status["queue_name"],
|
2034
|
-
conn,
|
2035
|
-
enqueue_options=enqueue_options,
|
2036
|
-
)
|
2037
1903
|
return wf_status, workflow_deadline_epoch_ms
|
2038
1904
|
|
2039
1905
|
def check_connection(self) -> None:
|
@@ -1,24 +1,24 @@
|
|
1
|
-
dbos-1.3.
|
2
|
-
dbos-1.3.
|
3
|
-
dbos-1.3.
|
4
|
-
dbos-1.3.
|
1
|
+
dbos-1.3.0a3.dist-info/METADATA,sha256=sF5nfkjeSrszHYrI_66k_ilrVHWZCfw0X9k9wfcoYl8,13267
|
2
|
+
dbos-1.3.0a3.dist-info/WHEEL,sha256=tSfRZzRHthuv7vxpI4aehrdN9scLjk-dCJkPLzkHxGg,90
|
3
|
+
dbos-1.3.0a3.dist-info/entry_points.txt,sha256=_QOQ3tVfEjtjBlr1jS4sHqHya9lI2aIEIWkz8dqYp14,58
|
4
|
+
dbos-1.3.0a3.dist-info/licenses/LICENSE,sha256=VGZit_a5-kdw9WT6fY5jxAWVwGQzgLFyPWrcVVUhVNU,1067
|
5
5
|
dbos/__init__.py,sha256=NssPCubaBxdiKarOWa-wViz1hdJSkmBGcpLX_gQ4NeA,891
|
6
6
|
dbos/__main__.py,sha256=G7Exn-MhGrVJVDbgNlpzhfh8WMX_72t3_oJaFT9Lmt8,653
|
7
7
|
dbos/_admin_server.py,sha256=TWXi4drrzKFpKkUmEJpJkQBZxAtOalnhtYicEn2nDK0,10618
|
8
8
|
dbos/_app_db.py,sha256=0PKqpxJ3EbIaak3Wl0lNl3hXvhBfz4EEHaCw1bUOvIM,9937
|
9
9
|
dbos/_classproperty.py,sha256=f0X-_BySzn3yFDRKB2JpCbLYQ9tLwt1XftfshvY7CBs,626
|
10
|
-
dbos/_client.py,sha256=
|
10
|
+
dbos/_client.py,sha256=jrvbv33KMkoACD4-wE72a6-Mp2fx0tagh_MnHY93Apk,14576
|
11
11
|
dbos/_conductor/conductor.py,sha256=o0IaZjwnZ2TOyHeP2H4iSX6UnXLXQ4uODvWAKD9hHMs,21703
|
12
12
|
dbos/_conductor/protocol.py,sha256=wgOFZxmS81bv0WCB9dAyg0s6QzldpzVKQDoSPeaX0Ws,6967
|
13
13
|
dbos/_context.py,sha256=5ajoWAmToAfzzmMLylnJZoL4Ny9rBwZWuG05sXadMIA,24798
|
14
|
-
dbos/_core.py,sha256
|
14
|
+
dbos/_core.py,sha256=-yhsWn5TqDEDsIE_fY1O5qxu295kYFnz8IJnS9luhTE,48656
|
15
15
|
dbos/_croniter.py,sha256=XHAyUyibs_59sJQfSNWkP7rqQY6_XrlfuuCxk4jYqek,47559
|
16
|
-
dbos/_dbos.py,sha256=
|
16
|
+
dbos/_dbos.py,sha256=Y---ozwd9OXzA1o-oWEbnafQkUibblE6o1kUuVZa_90,47163
|
17
17
|
dbos/_dbos_config.py,sha256=JWVuPE_Ifyr-pYHFxclFalB_HZ8ETFCGNJzBHGpClXw,20347
|
18
18
|
dbos/_debug.py,sha256=MNlQVZ6TscGCRQeEEL0VE8Uignvr6dPeDDDefS3xgIE,1823
|
19
19
|
dbos/_docker_pg_helper.py,sha256=tLJXWqZ4S-ExcaPnxg_i6cVxL6ZxrYlZjaGsklY-s2I,6115
|
20
20
|
dbos/_error.py,sha256=q0OQJZTbR8FFHV9hEpAGpz9oWBT5L509zUhmyff7FJw,8500
|
21
|
-
dbos/_event_loop.py,sha256=
|
21
|
+
dbos/_event_loop.py,sha256=NmaLbEQFfEK36S_0KhVD39YdYrGce3qSKCTJ-5RqKQ0,2136
|
22
22
|
dbos/_fastapi.py,sha256=m4SL3H9P-NBQ_ZrbFxAWMOqNyIi3HGEn2ODR7xAK038,3118
|
23
23
|
dbos/_flask.py,sha256=Npnakt-a3W5OykONFRkDRnumaDhTQmA0NPdUCGRYKXE,1652
|
24
24
|
dbos/_kafka.py,sha256=pz0xZ9F3X9Ky1k-VSbeF3tfPhP3UPr3lUUhUfE41__U,4198
|
@@ -30,6 +30,7 @@ dbos/_migrations/versions/04ca4f231047_workflow_queues_executor_id.py,sha256=ICL
|
|
30
30
|
dbos/_migrations/versions/27ac6900c6ad_add_queue_dedup.py,sha256=56w1v6TdofW3V18iwm0MP0SAeSaAUPSS40HIcn6qYIE,1072
|
31
31
|
dbos/_migrations/versions/50f3227f0b4b_fix_job_queue.py,sha256=ZBYrtTdxy64HxIAlOes89fVIk2P1gNaJack7wuC_epg,873
|
32
32
|
dbos/_migrations/versions/5c361fc04708_added_system_tables.py,sha256=Xr9hBDJjkAtymlauOmAy00yUHj0VVUaEz7kNwEM9IwE,6403
|
33
|
+
dbos/_migrations/versions/66478e1b95e5_consolidate_queues.py,sha256=Qo9C8pFSdN0GPM0fN-DI5GPRegXq99Mig2me04IXfLI,1894
|
33
34
|
dbos/_migrations/versions/83f3732ae8e7_workflow_timeout.py,sha256=Q_R35pb8AfVI3sg5mzKwyoPfYB88Ychcc8gwxpM9R7A,1035
|
34
35
|
dbos/_migrations/versions/933e86bdac6a_add_queue_priority.py,sha256=yZX2kGF33skpXIBdMXtDNx-Nl_orFatKeHB8c-3K8-c,773
|
35
36
|
dbos/_migrations/versions/a3b18ad34abe_added_triggers.py,sha256=Rv0ZsZYZ_WdgGEULYsPfnp4YzaO5L198gDTgYY39AVA,2022
|
@@ -38,16 +39,16 @@ dbos/_migrations/versions/d76646551a6c_workflow_queue.py,sha256=G942nophZ2uC2vc4
|
|
38
39
|
dbos/_migrations/versions/eab0cc1d9a14_job_queue.py,sha256=uvhFOtqbBreCePhAxZfIT0qCAI7BiZTou9wt6QnbY7c,1412
|
39
40
|
dbos/_migrations/versions/f4b9b32ba814_functionname_childid_op_outputs.py,sha256=m90Lc5YH0ZISSq1MyxND6oq3RZrZKrIqEsZtwJ1jWxA,1049
|
40
41
|
dbos/_outcome.py,sha256=EXxBg4jXCVJsByDQ1VOCIedmbeq_03S6d-p1vqQrLFU,6810
|
41
|
-
dbos/_queue.py,sha256=
|
42
|
+
dbos/_queue.py,sha256=w3vjOqU0NNdQlUFcvhhdRIzepcS2gtWJCgzobR1dJ8U,4009
|
42
43
|
dbos/_recovery.py,sha256=jVMexjfCCNopzyn8gVQzJCmGJaP9G3C1EFaoCQ_Nh7g,2564
|
43
44
|
dbos/_registrations.py,sha256=CZt1ElqDjCT7hz6iyT-1av76Yu-iuwu_c9lozO87wvM,7303
|
44
45
|
dbos/_roles.py,sha256=iOsgmIAf1XVzxs3gYWdGRe1B880YfOw5fpU7Jwx8_A8,2271
|
45
46
|
dbos/_scheduler.py,sha256=SR1oRZRcVzYsj-JauV2LA8JtwTkt8mru7qf6H1AzQ1U,2027
|
46
47
|
dbos/_schemas/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
47
48
|
dbos/_schemas/application_database.py,sha256=SypAS9l9EsaBHFn9FR8jmnqt01M74d9AF1AMa4m2hhI,1040
|
48
|
-
dbos/_schemas/system_database.py,sha256=
|
49
|
+
dbos/_schemas/system_database.py,sha256=5ebwz_h-Yz11u_eYbs8Cy8YinDpxvyiw8mEc0qjR5bo,4796
|
49
50
|
dbos/_serialization.py,sha256=bWuwhXSQcGmiazvhJHA5gwhrRWxtmFmcCFQSDJnqqkU,3666
|
50
|
-
dbos/_sys_db.py,sha256=
|
51
|
+
dbos/_sys_db.py,sha256=hrf4MsEs-jF5-x4asTzNwYf8KJ_VnAZ1y42KTS3kDTo,80119
|
51
52
|
dbos/_templates/dbos-db-starter/README.md,sha256=GhxhBj42wjTt1fWEtwNriHbJuKb66Vzu89G4pxNHw2g,930
|
52
53
|
dbos/_templates/dbos-db-starter/__package/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
53
54
|
dbos/_templates/dbos-db-starter/__package/main.py.dbos,sha256=aQnBPSSQpkB8ERfhf7gB7P9tsU6OPKhZscfeh0yiaD8,2702
|
@@ -67,4 +68,4 @@ dbos/cli/cli.py,sha256=EemOMqNpzSU2BQhAxV_e59pBRITDLwt49HF6W3uWBZg,20775
|
|
67
68
|
dbos/dbos-config.schema.json,sha256=CjaspeYmOkx6Ip_pcxtmfXJTn_YGdSx_0pcPBF7KZmo,6060
|
68
69
|
dbos/py.typed,sha256=QfzXT1Ktfk3Rj84akygc7_42z0lRpCq0Ilh8OXI6Zas,44
|
69
70
|
version/__init__.py,sha256=L4sNxecRuqdtSFdpUGX3TtBi9KL3k7YsZVIvv-fv9-A,1678
|
70
|
-
dbos-1.3.
|
71
|
+
dbos-1.3.0a3.dist-info/RECORD,,
|
File without changes
|
File without changes
|
File without changes
|