dbos 1.2.0a9__py3-none-any.whl → 1.3.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- dbos/_client.py +15 -7
- dbos/_core.py +28 -22
- dbos/_dbos.py +11 -6
- dbos/_dbos_config.py +1 -1
- dbos/_event_loop.py +7 -10
- dbos/_logger.py +17 -11
- dbos/_migrations/versions/66478e1b95e5_consolidate_queues.py +71 -0
- dbos/_migrations/versions/d994145b47b6_consolidate_inputs.py +30 -0
- dbos/_queue.py +15 -2
- dbos/_schemas/system_database.py +10 -67
- dbos/_sys_db.py +157 -349
- {dbos-1.2.0a9.dist-info → dbos-1.3.0.dist-info}/METADATA +1 -1
- {dbos-1.2.0a9.dist-info → dbos-1.3.0.dist-info}/RECORD +16 -14
- {dbos-1.2.0a9.dist-info → dbos-1.3.0.dist-info}/WHEEL +0 -0
- {dbos-1.2.0a9.dist-info → dbos-1.3.0.dist-info}/entry_points.txt +0 -0
- {dbos-1.2.0a9.dist-info → dbos-1.3.0.dist-info}/licenses/LICENSE +0 -0
dbos/_client.py
CHANGED
@@ -141,6 +141,11 @@ class DBOSClient:
|
|
141
141
|
"priority": options.get("priority"),
|
142
142
|
}
|
143
143
|
|
144
|
+
inputs: WorkflowInputs = {
|
145
|
+
"args": args,
|
146
|
+
"kwargs": kwargs,
|
147
|
+
}
|
148
|
+
|
144
149
|
status: WorkflowStatusInternal = {
|
145
150
|
"workflow_uuid": workflow_id,
|
146
151
|
"status": WorkflowStatusString.ENQUEUED.value,
|
@@ -163,18 +168,18 @@ class DBOSClient:
|
|
163
168
|
int(workflow_timeout * 1000) if workflow_timeout is not None else None
|
164
169
|
),
|
165
170
|
"workflow_deadline_epoch_ms": None,
|
166
|
-
|
167
|
-
|
168
|
-
|
169
|
-
|
170
|
-
|
171
|
+
"deduplication_id": enqueue_options_internal["deduplication_id"],
|
172
|
+
"priority": (
|
173
|
+
enqueue_options_internal["priority"]
|
174
|
+
if enqueue_options_internal["priority"] is not None
|
175
|
+
else 0
|
176
|
+
),
|
177
|
+
"inputs": _serialization.serialize_args(inputs),
|
171
178
|
}
|
172
179
|
|
173
180
|
self._sys_db.init_workflow(
|
174
181
|
status,
|
175
|
-
_serialization.serialize_args(inputs),
|
176
182
|
max_recovery_attempts=None,
|
177
|
-
enqueue_options=enqueue_options_internal,
|
178
183
|
)
|
179
184
|
return workflow_id
|
180
185
|
|
@@ -230,6 +235,9 @@ class DBOSClient:
|
|
230
235
|
"app_version": None,
|
231
236
|
"workflow_timeout_ms": None,
|
232
237
|
"workflow_deadline_epoch_ms": None,
|
238
|
+
"deduplication_id": None,
|
239
|
+
"priority": 0,
|
240
|
+
"inputs": _serialization.serialize_args({"args": (), "kwargs": {}}),
|
233
241
|
}
|
234
242
|
with self._sys_db.engine.begin() as conn:
|
235
243
|
self._sys_db._insert_workflow_status(
|
dbos/_core.py
CHANGED
@@ -252,6 +252,10 @@ def _init_workflow(
|
|
252
252
|
raise DBOSNonExistentWorkflowError(wfid)
|
253
253
|
return get_status_result
|
254
254
|
|
255
|
+
# If we have a class name, the first arg is the instance and do not serialize
|
256
|
+
if class_name is not None:
|
257
|
+
inputs = {"args": inputs["args"][1:], "kwargs": inputs["kwargs"]}
|
258
|
+
|
255
259
|
# Initialize a workflow status object from the context
|
256
260
|
status: WorkflowStatusInternal = {
|
257
261
|
"workflow_uuid": wfid,
|
@@ -279,18 +283,25 @@ def _init_workflow(
|
|
279
283
|
"updated_at": None,
|
280
284
|
"workflow_timeout_ms": workflow_timeout_ms,
|
281
285
|
"workflow_deadline_epoch_ms": workflow_deadline_epoch_ms,
|
286
|
+
"deduplication_id": (
|
287
|
+
enqueue_options["deduplication_id"] if enqueue_options is not None else None
|
288
|
+
),
|
289
|
+
"priority": (
|
290
|
+
(
|
291
|
+
enqueue_options["priority"]
|
292
|
+
if enqueue_options["priority"] is not None
|
293
|
+
else 0
|
294
|
+
)
|
295
|
+
if enqueue_options is not None
|
296
|
+
else 0
|
297
|
+
),
|
298
|
+
"inputs": _serialization.serialize_args(inputs),
|
282
299
|
}
|
283
300
|
|
284
|
-
# If we have a class name, the first arg is the instance and do not serialize
|
285
|
-
if class_name is not None:
|
286
|
-
inputs = {"args": inputs["args"][1:], "kwargs": inputs["kwargs"]}
|
287
|
-
|
288
301
|
# Synchronously record the status and inputs for workflows
|
289
302
|
wf_status, workflow_deadline_epoch_ms = dbos._sys_db.init_workflow(
|
290
303
|
status,
|
291
|
-
_serialization.serialize_args(inputs),
|
292
304
|
max_recovery_attempts=max_recovery_attempts,
|
293
|
-
enqueue_options=enqueue_options,
|
294
305
|
)
|
295
306
|
|
296
307
|
if workflow_deadline_epoch_ms is not None:
|
@@ -342,13 +353,12 @@ def _get_wf_invoke_func(
|
|
342
353
|
return recorded_result
|
343
354
|
try:
|
344
355
|
output = func()
|
345
|
-
status["status"] = "SUCCESS"
|
346
|
-
status["output"] = _serialization.serialize(output)
|
347
356
|
if not dbos.debug_mode:
|
348
|
-
|
349
|
-
|
350
|
-
|
351
|
-
|
357
|
+
dbos._sys_db.update_workflow_outcome(
|
358
|
+
status["workflow_uuid"],
|
359
|
+
"SUCCESS",
|
360
|
+
output=_serialization.serialize(output),
|
361
|
+
)
|
352
362
|
return output
|
353
363
|
except DBOSWorkflowConflictIDError:
|
354
364
|
# Await the workflow result
|
@@ -357,13 +367,12 @@ def _get_wf_invoke_func(
|
|
357
367
|
except DBOSWorkflowCancelledError as error:
|
358
368
|
raise
|
359
369
|
except Exception as error:
|
360
|
-
status["status"] = "ERROR"
|
361
|
-
status["error"] = _serialization.serialize_exception(error)
|
362
370
|
if not dbos.debug_mode:
|
363
|
-
|
364
|
-
|
365
|
-
|
366
|
-
|
371
|
+
dbos._sys_db.update_workflow_outcome(
|
372
|
+
status["workflow_uuid"],
|
373
|
+
"ERROR",
|
374
|
+
error=_serialization.serialize_exception(error),
|
375
|
+
)
|
367
376
|
raise
|
368
377
|
|
369
378
|
return persist
|
@@ -432,16 +441,13 @@ def execute_workflow_by_id(dbos: "DBOS", workflow_id: str) -> "WorkflowHandle[An
|
|
432
441
|
status = dbos._sys_db.get_workflow_status(workflow_id)
|
433
442
|
if not status:
|
434
443
|
raise DBOSRecoveryError(workflow_id, "Workflow status not found")
|
435
|
-
inputs =
|
436
|
-
if not inputs:
|
437
|
-
raise DBOSRecoveryError(workflow_id, "Workflow inputs not found")
|
444
|
+
inputs = _serialization.deserialize_args(status["inputs"])
|
438
445
|
wf_func = dbos._registry.workflow_info_map.get(status["name"], None)
|
439
446
|
if not wf_func:
|
440
447
|
raise DBOSWorkflowFunctionNotFoundError(
|
441
448
|
workflow_id, "Workflow function not found"
|
442
449
|
)
|
443
450
|
with DBOSContextEnsure():
|
444
|
-
ctx = assert_current_dbos_context()
|
445
451
|
# If this function belongs to a configured class, add that class instance as its first argument
|
446
452
|
if status["config_name"] is not None:
|
447
453
|
config_name = status["config_name"]
|
dbos/_dbos.py
CHANGED
@@ -100,7 +100,13 @@ from ._error import (
|
|
100
100
|
DBOSNonExistentWorkflowError,
|
101
101
|
)
|
102
102
|
from ._event_loop import BackgroundEventLoop
|
103
|
-
from ._logger import
|
103
|
+
from ._logger import (
|
104
|
+
add_otlp_to_all_loggers,
|
105
|
+
add_transformer_to_all_loggers,
|
106
|
+
config_logger,
|
107
|
+
dbos_logger,
|
108
|
+
init_logger,
|
109
|
+
)
|
104
110
|
from ._workflow_commands import get_workflow, list_workflow_steps
|
105
111
|
|
106
112
|
# Most DBOS functions are just any callable F, so decorators / wrappers work on F
|
@@ -215,6 +221,8 @@ class DBOSRegistry:
|
|
215
221
|
sources = sorted(
|
216
222
|
[inspect.getsource(wf) for wf in self.workflow_info_map.values()]
|
217
223
|
)
|
224
|
+
# Different DBOS versions should produce different app versions
|
225
|
+
sources.append(GlobalParams.dbos_version)
|
218
226
|
for source in sources:
|
219
227
|
hasher.update(source.encode("utf-8"))
|
220
228
|
return hasher.hexdigest()
|
@@ -297,7 +305,6 @@ class DBOS:
|
|
297
305
|
|
298
306
|
self._launched: bool = False
|
299
307
|
self._debug_mode: bool = False
|
300
|
-
self._configured_threadpool: bool = False
|
301
308
|
self._sys_db_field: Optional[SystemDatabase] = None
|
302
309
|
self._app_db_field: Optional[ApplicationDatabase] = None
|
303
310
|
self._registry: DBOSRegistry = _get_or_create_dbos_registry()
|
@@ -410,7 +417,7 @@ class DBOS:
|
|
410
417
|
GlobalParams.executor_id = str(uuid.uuid4())
|
411
418
|
dbos_logger.info(f"Executor ID: {GlobalParams.executor_id}")
|
412
419
|
dbos_logger.info(f"Application version: {GlobalParams.app_version}")
|
413
|
-
self._executor_field = ThreadPoolExecutor(max_workers=
|
420
|
+
self._executor_field = ThreadPoolExecutor(max_workers=sys.maxsize)
|
414
421
|
self._background_event_loop.start()
|
415
422
|
assert self._config["database_url"] is not None
|
416
423
|
assert self._config["database"]["sys_db_engine_kwargs"] is not None
|
@@ -513,6 +520,7 @@ class DBOS:
|
|
513
520
|
for handler in dbos_logger.handlers:
|
514
521
|
handler.flush()
|
515
522
|
add_otlp_to_all_loggers()
|
523
|
+
add_transformer_to_all_loggers()
|
516
524
|
except Exception:
|
517
525
|
dbos_logger.error(f"DBOS failed to launch: {traceback.format_exc()}")
|
518
526
|
raise
|
@@ -941,11 +949,8 @@ class DBOS:
|
|
941
949
|
|
942
950
|
This function is called before the first call to asyncio.to_thread.
|
943
951
|
"""
|
944
|
-
if _get_dbos_instance()._configured_threadpool:
|
945
|
-
return
|
946
952
|
loop = asyncio.get_running_loop()
|
947
953
|
loop.set_default_executor(_get_dbos_instance()._executor)
|
948
|
-
_get_dbos_instance()._configured_threadpool = True
|
949
954
|
|
950
955
|
@classmethod
|
951
956
|
def resume_workflow(cls, workflow_id: str) -> WorkflowHandle[Any]:
|
dbos/_dbos_config.py
CHANGED
@@ -91,7 +91,7 @@ class ConfigFile(TypedDict, total=False):
|
|
91
91
|
Data structure containing the DBOS Configuration.
|
92
92
|
|
93
93
|
This configuration data is typically loaded from `dbos-config.yaml`.
|
94
|
-
See `https://docs.dbos.dev/
|
94
|
+
See `https://docs.dbos.dev/python/reference/configuration#dbos-configuration-file`
|
95
95
|
|
96
96
|
Attributes:
|
97
97
|
name (str): Application name
|
dbos/_event_loop.py
CHANGED
@@ -1,6 +1,5 @@
|
|
1
1
|
import asyncio
|
2
2
|
import threading
|
3
|
-
from concurrent.futures import ThreadPoolExecutor
|
4
3
|
from typing import Any, Coroutine, Optional, TypeVar
|
5
4
|
|
6
5
|
|
@@ -34,17 +33,15 @@ class BackgroundEventLoop:
|
|
34
33
|
|
35
34
|
def _run_event_loop(self) -> None:
|
36
35
|
self._loop = asyncio.new_event_loop()
|
37
|
-
|
38
|
-
self._loop.set_default_executor(thread_pool)
|
39
|
-
asyncio.set_event_loop(self._loop)
|
36
|
+
asyncio.set_event_loop(self._loop)
|
40
37
|
|
41
|
-
|
42
|
-
|
38
|
+
self._running = True
|
39
|
+
self._ready.set() # Signal that the loop is ready
|
43
40
|
|
44
|
-
|
45
|
-
|
46
|
-
|
47
|
-
|
41
|
+
try:
|
42
|
+
self._loop.run_forever()
|
43
|
+
finally:
|
44
|
+
self._loop.close()
|
48
45
|
|
49
46
|
async def _shutdown(self) -> None:
|
50
47
|
if self._loop is None:
|
dbos/_logger.py
CHANGED
@@ -16,7 +16,7 @@ if TYPE_CHECKING:
|
|
16
16
|
from ._dbos_config import ConfigFile
|
17
17
|
|
18
18
|
dbos_logger = logging.getLogger("dbos")
|
19
|
-
_otlp_handler,
|
19
|
+
_otlp_handler, _dbos_log_transformer = None, None
|
20
20
|
|
21
21
|
|
22
22
|
class DBOSLogTransformer(logging.Filter):
|
@@ -93,25 +93,31 @@ def config_logger(config: "ConfigFile") -> None:
|
|
93
93
|
global _otlp_handler
|
94
94
|
_otlp_handler = LoggingHandler(logger_provider=log_provider)
|
95
95
|
|
96
|
-
# Attach DBOS-specific attributes to all log entries.
|
97
|
-
global _otlp_transformer
|
98
|
-
_otlp_transformer = DBOSLogTransformer()
|
99
|
-
|
100
96
|
# Direct DBOS logs to OTLP
|
101
97
|
dbos_logger.addHandler(_otlp_handler)
|
102
|
-
|
98
|
+
|
99
|
+
# Attach DBOS-specific attributes to all log entries.
|
100
|
+
global _dbos_log_transformer
|
101
|
+
_dbos_log_transformer = DBOSLogTransformer()
|
102
|
+
dbos_logger.addFilter(_dbos_log_transformer)
|
103
103
|
|
104
104
|
|
105
105
|
def add_otlp_to_all_loggers() -> None:
|
106
|
-
if _otlp_handler is not None
|
106
|
+
if _otlp_handler is not None:
|
107
107
|
root = logging.root
|
108
|
-
|
109
108
|
root.addHandler(_otlp_handler)
|
110
|
-
root.addFilter(_otlp_transformer)
|
111
|
-
|
112
109
|
for logger_name in root.manager.loggerDict:
|
113
110
|
if logger_name != dbos_logger.name:
|
114
111
|
logger = logging.getLogger(logger_name)
|
115
112
|
if not logger.propagate:
|
116
113
|
logger.addHandler(_otlp_handler)
|
117
|
-
|
114
|
+
|
115
|
+
|
116
|
+
def add_transformer_to_all_loggers() -> None:
|
117
|
+
if _dbos_log_transformer is not None:
|
118
|
+
root = logging.root
|
119
|
+
root.addFilter(_dbos_log_transformer)
|
120
|
+
for logger_name in root.manager.loggerDict:
|
121
|
+
if logger_name != dbos_logger.name:
|
122
|
+
logger = logging.getLogger(logger_name)
|
123
|
+
logger.addFilter(_dbos_log_transformer)
|
@@ -0,0 +1,71 @@
|
|
1
|
+
"""consolidate_queues
|
2
|
+
|
3
|
+
Revision ID: 66478e1b95e5
|
4
|
+
Revises: 933e86bdac6a
|
5
|
+
Create Date: 2025-05-21 10:14:25.674613
|
6
|
+
|
7
|
+
"""
|
8
|
+
|
9
|
+
from typing import Sequence, Union
|
10
|
+
|
11
|
+
import sqlalchemy as sa
|
12
|
+
from alembic import op
|
13
|
+
|
14
|
+
# revision identifiers, used by Alembic.
|
15
|
+
revision: str = "66478e1b95e5"
|
16
|
+
down_revision: Union[str, None] = "933e86bdac6a"
|
17
|
+
branch_labels: Union[str, Sequence[str], None] = None
|
18
|
+
depends_on: Union[str, Sequence[str], None] = None
|
19
|
+
|
20
|
+
|
21
|
+
def upgrade() -> None:
|
22
|
+
# Add new columns to workflow_status table
|
23
|
+
op.add_column(
|
24
|
+
"workflow_status",
|
25
|
+
sa.Column("started_at_epoch_ms", sa.BigInteger(), nullable=True),
|
26
|
+
schema="dbos",
|
27
|
+
)
|
28
|
+
|
29
|
+
op.add_column(
|
30
|
+
"workflow_status",
|
31
|
+
sa.Column("deduplication_id", sa.Text(), nullable=True),
|
32
|
+
schema="dbos",
|
33
|
+
)
|
34
|
+
|
35
|
+
op.add_column(
|
36
|
+
"workflow_status",
|
37
|
+
sa.Column(
|
38
|
+
"priority", sa.Integer(), nullable=False, server_default=sa.text("'0'::int")
|
39
|
+
),
|
40
|
+
schema="dbos",
|
41
|
+
)
|
42
|
+
|
43
|
+
# Add unique constraint for deduplication_id
|
44
|
+
op.create_unique_constraint(
|
45
|
+
"uq_workflow_status_queue_name_dedup_id",
|
46
|
+
"workflow_status",
|
47
|
+
["queue_name", "deduplication_id"],
|
48
|
+
schema="dbos",
|
49
|
+
)
|
50
|
+
|
51
|
+
# Add index on status field
|
52
|
+
op.create_index(
|
53
|
+
"workflow_status_status_index", "workflow_status", ["status"], schema="dbos"
|
54
|
+
)
|
55
|
+
|
56
|
+
|
57
|
+
def downgrade() -> None:
|
58
|
+
# Drop indexes
|
59
|
+
op.drop_index(
|
60
|
+
"workflow_status_status_index", table_name="workflow_status", schema="dbos"
|
61
|
+
)
|
62
|
+
|
63
|
+
# Drop unique constraint
|
64
|
+
op.drop_constraint(
|
65
|
+
"uq_workflow_status_queue_name_dedup_id", "workflow_status", schema="dbos"
|
66
|
+
)
|
67
|
+
|
68
|
+
# Drop columns
|
69
|
+
op.drop_column("workflow_status", "priority", schema="dbos")
|
70
|
+
op.drop_column("workflow_status", "deduplication_id", schema="dbos")
|
71
|
+
op.drop_column("workflow_status", "started_at_epoch_ms", schema="dbos")
|
@@ -0,0 +1,30 @@
|
|
1
|
+
"""consolidate_inputs
|
2
|
+
|
3
|
+
Revision ID: d994145b47b6
|
4
|
+
Revises: 66478e1b95e5
|
5
|
+
Create Date: 2025-05-23 08:09:15.515009
|
6
|
+
|
7
|
+
"""
|
8
|
+
|
9
|
+
from typing import Sequence, Union
|
10
|
+
|
11
|
+
import sqlalchemy as sa
|
12
|
+
from alembic import op
|
13
|
+
|
14
|
+
# revision identifiers, used by Alembic.
|
15
|
+
revision: str = "d994145b47b6"
|
16
|
+
down_revision: Union[str, None] = "66478e1b95e5"
|
17
|
+
branch_labels: Union[str, Sequence[str], None] = None
|
18
|
+
depends_on: Union[str, Sequence[str], None] = None
|
19
|
+
|
20
|
+
|
21
|
+
def upgrade() -> None:
|
22
|
+
op.add_column(
|
23
|
+
"workflow_status",
|
24
|
+
sa.Column("inputs", sa.Text(), nullable=True),
|
25
|
+
schema="dbos",
|
26
|
+
)
|
27
|
+
|
28
|
+
|
29
|
+
def downgrade() -> None:
|
30
|
+
op.drop_column("workflow_status", "inputs", schema="dbos")
|
dbos/_queue.py
CHANGED
@@ -5,8 +5,9 @@ from typing import TYPE_CHECKING, Any, Callable, Coroutine, Optional, TypedDict
|
|
5
5
|
from psycopg import errors
|
6
6
|
from sqlalchemy.exc import OperationalError
|
7
7
|
|
8
|
+
from dbos._context import get_local_dbos_context
|
8
9
|
from dbos._logger import dbos_logger
|
9
|
-
from dbos._utils import GlobalParams
|
10
|
+
from dbos._utils import INTERNAL_QUEUE_NAME, GlobalParams
|
10
11
|
|
11
12
|
from ._core import P, R, execute_workflow_by_id, start_workflow, start_workflow_async
|
12
13
|
|
@@ -41,6 +42,7 @@ class Queue:
|
|
41
42
|
limiter: Optional[QueueRateLimit] = None,
|
42
43
|
*, # Disable positional arguments from here on
|
43
44
|
worker_concurrency: Optional[int] = None,
|
45
|
+
priority_enabled: bool = False,
|
44
46
|
) -> None:
|
45
47
|
if (
|
46
48
|
worker_concurrency is not None
|
@@ -54,10 +56,11 @@ class Queue:
|
|
54
56
|
self.concurrency = concurrency
|
55
57
|
self.worker_concurrency = worker_concurrency
|
56
58
|
self.limiter = limiter
|
59
|
+
self.priority_enabled = priority_enabled
|
57
60
|
from ._dbos import _get_or_create_dbos_registry
|
58
61
|
|
59
62
|
registry = _get_or_create_dbos_registry()
|
60
|
-
if self.name in registry.queue_info_map:
|
63
|
+
if self.name in registry.queue_info_map and self.name != INTERNAL_QUEUE_NAME:
|
61
64
|
dbos_logger.warning(f"Queue {name} has already been declared")
|
62
65
|
registry.queue_info_map[self.name] = self
|
63
66
|
|
@@ -66,6 +69,16 @@ class Queue:
|
|
66
69
|
) -> "WorkflowHandle[R]":
|
67
70
|
from ._dbos import _get_dbos_instance
|
68
71
|
|
72
|
+
context = get_local_dbos_context()
|
73
|
+
if (
|
74
|
+
context is not None
|
75
|
+
and context.priority is not None
|
76
|
+
and not self.priority_enabled
|
77
|
+
):
|
78
|
+
dbos_logger.warning(
|
79
|
+
f"Priority is not enabled for queue {self.name}. Setting priority will not have any effect."
|
80
|
+
)
|
81
|
+
|
69
82
|
dbos = _get_dbos_instance()
|
70
83
|
return start_workflow(dbos, func, self.name, False, *args, **kwargs)
|
71
84
|
|
dbos/_schemas/system_database.py
CHANGED
@@ -1,6 +1,5 @@
|
|
1
1
|
from sqlalchemy import (
|
2
2
|
BigInteger,
|
3
|
-
Boolean,
|
4
3
|
Column,
|
5
4
|
ForeignKey,
|
6
5
|
Index,
|
@@ -57,8 +56,18 @@ class SystemSchema:
|
|
57
56
|
Column("queue_name", Text, nullable=True),
|
58
57
|
Column("workflow_timeout_ms", BigInteger, nullable=True),
|
59
58
|
Column("workflow_deadline_epoch_ms", BigInteger, nullable=True),
|
59
|
+
Column("started_at_epoch_ms", BigInteger(), nullable=True),
|
60
|
+
Column("deduplication_id", Text(), nullable=True),
|
61
|
+
Column("inputs", Text()),
|
62
|
+
Column("priority", Integer(), nullable=False, server_default=text("'0'::int")),
|
60
63
|
Index("workflow_status_created_at_index", "created_at"),
|
61
64
|
Index("workflow_status_executor_id_index", "executor_id"),
|
65
|
+
Index("workflow_status_status_index", "status"),
|
66
|
+
UniqueConstraint(
|
67
|
+
"queue_name",
|
68
|
+
"deduplication_id",
|
69
|
+
name="uq_workflow_status_queue_name_dedup_id",
|
70
|
+
),
|
62
71
|
)
|
63
72
|
|
64
73
|
operation_outputs = Table(
|
@@ -80,21 +89,6 @@ class SystemSchema:
|
|
80
89
|
PrimaryKeyConstraint("workflow_uuid", "function_id"),
|
81
90
|
)
|
82
91
|
|
83
|
-
workflow_inputs = Table(
|
84
|
-
"workflow_inputs",
|
85
|
-
metadata_obj,
|
86
|
-
Column(
|
87
|
-
"workflow_uuid",
|
88
|
-
Text,
|
89
|
-
ForeignKey(
|
90
|
-
"workflow_status.workflow_uuid", onupdate="CASCADE", ondelete="CASCADE"
|
91
|
-
),
|
92
|
-
primary_key=True,
|
93
|
-
nullable=False,
|
94
|
-
),
|
95
|
-
Column("inputs", Text, nullable=False),
|
96
|
-
)
|
97
|
-
|
98
92
|
notifications = Table(
|
99
93
|
"notifications",
|
100
94
|
metadata_obj,
|
@@ -138,54 +132,3 @@ class SystemSchema:
|
|
138
132
|
Column("value", Text, nullable=False),
|
139
133
|
PrimaryKeyConstraint("workflow_uuid", "key"),
|
140
134
|
)
|
141
|
-
|
142
|
-
scheduler_state = Table(
|
143
|
-
"scheduler_state",
|
144
|
-
metadata_obj,
|
145
|
-
Column("workflow_fn_name", Text, primary_key=True, nullable=False),
|
146
|
-
Column("last_run_time", BigInteger, nullable=False),
|
147
|
-
)
|
148
|
-
|
149
|
-
workflow_queue = Table(
|
150
|
-
"workflow_queue",
|
151
|
-
metadata_obj,
|
152
|
-
Column(
|
153
|
-
"workflow_uuid",
|
154
|
-
Text,
|
155
|
-
ForeignKey(
|
156
|
-
"workflow_status.workflow_uuid", onupdate="CASCADE", ondelete="CASCADE"
|
157
|
-
),
|
158
|
-
nullable=False,
|
159
|
-
primary_key=True,
|
160
|
-
),
|
161
|
-
# Column("executor_id", Text), # This column is deprecated. Do *not* use it.
|
162
|
-
Column("queue_name", Text, nullable=False),
|
163
|
-
Column(
|
164
|
-
"created_at_epoch_ms",
|
165
|
-
BigInteger,
|
166
|
-
nullable=False,
|
167
|
-
server_default=text("(EXTRACT(epoch FROM now()) * 1000::numeric)::bigint"),
|
168
|
-
),
|
169
|
-
Column(
|
170
|
-
"started_at_epoch_ms",
|
171
|
-
BigInteger(),
|
172
|
-
),
|
173
|
-
Column(
|
174
|
-
"completed_at_epoch_ms",
|
175
|
-
BigInteger(),
|
176
|
-
),
|
177
|
-
Column(
|
178
|
-
"deduplication_id",
|
179
|
-
Text,
|
180
|
-
nullable=True,
|
181
|
-
),
|
182
|
-
Column(
|
183
|
-
"priority",
|
184
|
-
Integer,
|
185
|
-
nullable=False,
|
186
|
-
server_default=text("'0'::int"),
|
187
|
-
),
|
188
|
-
UniqueConstraint(
|
189
|
-
"queue_name", "deduplication_id", name="uq_workflow_queue_name_dedup_id"
|
190
|
-
),
|
191
|
-
)
|