dbos 0.19.0a4__py3-none-any.whl → 0.20.0a2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of dbos might be problematic. Click here for more details.
- dbos/_admin_server.py +45 -2
- dbos/_context.py +11 -2
- dbos/_core.py +45 -5
- dbos/_dbos.py +19 -0
- dbos/_error.py +11 -0
- dbos/_fastapi.py +6 -2
- dbos/_flask.py +6 -2
- dbos/_kafka.py +17 -1
- dbos/_queue.py +1 -0
- dbos/_sys_db.py +69 -37
- dbos/_workflow_commands.py +171 -0
- dbos/cli.py +140 -1
- {dbos-0.19.0a4.dist-info → dbos-0.20.0a2.dist-info}/METADATA +21 -16
- {dbos-0.19.0a4.dist-info → dbos-0.20.0a2.dist-info}/RECORD +17 -16
- {dbos-0.19.0a4.dist-info → dbos-0.20.0a2.dist-info}/WHEEL +0 -0
- {dbos-0.19.0a4.dist-info → dbos-0.20.0a2.dist-info}/entry_points.txt +0 -0
- {dbos-0.19.0a4.dist-info → dbos-0.20.0a2.dist-info}/licenses/LICENSE +0 -0
dbos/_admin_server.py
CHANGED
|
@@ -1,6 +1,7 @@
|
|
|
1
1
|
from __future__ import annotations
|
|
2
2
|
|
|
3
3
|
import json
|
|
4
|
+
import re
|
|
4
5
|
import threading
|
|
5
6
|
from functools import partial
|
|
6
7
|
from http.server import BaseHTTPRequestHandler, ThreadingHTTPServer
|
|
@@ -15,6 +16,9 @@ if TYPE_CHECKING:
|
|
|
15
16
|
_health_check_path = "/dbos-healthz"
|
|
16
17
|
_workflow_recovery_path = "/dbos-workflow-recovery"
|
|
17
18
|
_deactivate_path = "/deactivate"
|
|
19
|
+
# /workflows/:workflow_id/cancel
|
|
20
|
+
# /workflows/:workflow_id/resume
|
|
21
|
+
# /workflows/:workflow_id/restart
|
|
18
22
|
|
|
19
23
|
|
|
20
24
|
class AdminServer:
|
|
@@ -79,12 +83,51 @@ class AdminRequestHandler(BaseHTTPRequestHandler):
|
|
|
79
83
|
self._end_headers()
|
|
80
84
|
self.wfile.write(json.dumps(workflow_ids).encode("utf-8"))
|
|
81
85
|
else:
|
|
82
|
-
|
|
83
|
-
|
|
86
|
+
|
|
87
|
+
restart_match = re.match(
|
|
88
|
+
r"^/workflows/(?P<workflow_id>[^/]+)/restart$", self.path
|
|
89
|
+
)
|
|
90
|
+
resume_match = re.match(
|
|
91
|
+
r"^/workflows/(?P<workflow_id>[^/]+)/resume$", self.path
|
|
92
|
+
)
|
|
93
|
+
cancel_match = re.match(
|
|
94
|
+
r"^/workflows/(?P<workflow_id>[^/]+)/cancel$", self.path
|
|
95
|
+
)
|
|
96
|
+
|
|
97
|
+
if restart_match:
|
|
98
|
+
workflow_id = restart_match.group("workflow_id")
|
|
99
|
+
self._handle_restart(workflow_id)
|
|
100
|
+
elif resume_match:
|
|
101
|
+
workflow_id = resume_match.group("workflow_id")
|
|
102
|
+
self._handle_resume(workflow_id)
|
|
103
|
+
elif cancel_match:
|
|
104
|
+
workflow_id = cancel_match.group("workflow_id")
|
|
105
|
+
self._handle_cancel(workflow_id)
|
|
106
|
+
else:
|
|
107
|
+
self.send_response(404)
|
|
108
|
+
self._end_headers()
|
|
84
109
|
|
|
85
110
|
def log_message(self, format: str, *args: Any) -> None:
|
|
86
111
|
return # Disable admin server request logging
|
|
87
112
|
|
|
113
|
+
def _handle_restart(self, workflow_id: str) -> None:
|
|
114
|
+
self.dbos.restart_workflow(workflow_id)
|
|
115
|
+
print("Restarting workflow", workflow_id)
|
|
116
|
+
self.send_response(204)
|
|
117
|
+
self._end_headers()
|
|
118
|
+
|
|
119
|
+
def _handle_resume(self, workflow_id: str) -> None:
|
|
120
|
+
print("Resuming workflow", workflow_id)
|
|
121
|
+
self.dbos.resume_workflow(workflow_id)
|
|
122
|
+
self.send_response(204)
|
|
123
|
+
self._end_headers()
|
|
124
|
+
|
|
125
|
+
def _handle_cancel(self, workflow_id: str) -> None:
|
|
126
|
+
print("Cancelling workflow", workflow_id)
|
|
127
|
+
self.dbos.cancel_workflow(workflow_id)
|
|
128
|
+
self.send_response(204)
|
|
129
|
+
self._end_headers()
|
|
130
|
+
|
|
88
131
|
|
|
89
132
|
# Be consistent with DBOS-TS response.
|
|
90
133
|
class PerfUtilization(TypedDict):
|
dbos/_context.py
CHANGED
|
@@ -57,6 +57,7 @@ class DBOSContext:
|
|
|
57
57
|
self.request: Optional["Request"] = None
|
|
58
58
|
|
|
59
59
|
self.id_assigned_for_next_workflow: str = ""
|
|
60
|
+
self.is_within_set_workflow_id_block: bool = False
|
|
60
61
|
|
|
61
62
|
self.parent_workflow_id: str = ""
|
|
62
63
|
self.parent_workflow_fid: int = -1
|
|
@@ -78,6 +79,7 @@ class DBOSContext:
|
|
|
78
79
|
rv.logger = self.logger
|
|
79
80
|
rv.id_assigned_for_next_workflow = self.id_assigned_for_next_workflow
|
|
80
81
|
self.id_assigned_for_next_workflow = ""
|
|
82
|
+
rv.is_within_set_workflow_id_block = self.is_within_set_workflow_id_block
|
|
81
83
|
rv.parent_workflow_id = self.workflow_id
|
|
82
84
|
rv.parent_workflow_fid = self.function_id
|
|
83
85
|
rv.in_recovery = self.in_recovery
|
|
@@ -95,6 +97,10 @@ class DBOSContext:
|
|
|
95
97
|
if len(self.id_assigned_for_next_workflow) > 0:
|
|
96
98
|
wfid = self.id_assigned_for_next_workflow
|
|
97
99
|
else:
|
|
100
|
+
if self.is_within_set_workflow_id_block:
|
|
101
|
+
self.logger.warning(
|
|
102
|
+
f"Multiple workflows started in the same SetWorkflowID block. Only the first workflow is assigned the specified workflow ID; subsequent workflows will use a generated workflow ID."
|
|
103
|
+
)
|
|
98
104
|
wfid = str(uuid.uuid4())
|
|
99
105
|
return wfid
|
|
100
106
|
|
|
@@ -286,7 +292,7 @@ class DBOSContextSwap:
|
|
|
286
292
|
|
|
287
293
|
class SetWorkflowID:
|
|
288
294
|
"""
|
|
289
|
-
Set the workflow ID to be used for the enclosed workflow invocation.
|
|
295
|
+
Set the workflow ID to be used for the enclosed workflow invocation. Note: Only the first workflow will be started with the specified workflow ID within a `with SetWorkflowID` block.
|
|
290
296
|
|
|
291
297
|
Typical Usage
|
|
292
298
|
```
|
|
@@ -311,7 +317,9 @@ class SetWorkflowID:
|
|
|
311
317
|
if ctx is None:
|
|
312
318
|
self.created_ctx = True
|
|
313
319
|
_set_local_dbos_context(DBOSContext())
|
|
314
|
-
assert_current_dbos_context()
|
|
320
|
+
ctx = assert_current_dbos_context()
|
|
321
|
+
ctx.id_assigned_for_next_workflow = self.wfid
|
|
322
|
+
ctx.is_within_set_workflow_id_block = True
|
|
315
323
|
return self
|
|
316
324
|
|
|
317
325
|
def __exit__(
|
|
@@ -321,6 +329,7 @@ class SetWorkflowID:
|
|
|
321
329
|
traceback: Optional[TracebackType],
|
|
322
330
|
) -> Literal[False]:
|
|
323
331
|
# Code to clean up the basic context if we created it
|
|
332
|
+
assert_current_dbos_context().is_within_set_workflow_id_block = False
|
|
324
333
|
if self.created_ctx:
|
|
325
334
|
_clear_local_dbos_context()
|
|
326
335
|
return False # Did not handle
|
dbos/_core.py
CHANGED
|
@@ -188,6 +188,7 @@ def _init_workflow(
|
|
|
188
188
|
wf_status = dbos._sys_db.update_workflow_status(
|
|
189
189
|
status, False, ctx.in_recovery, max_recovery_attempts=max_recovery_attempts
|
|
190
190
|
)
|
|
191
|
+
# TODO: Modify the inputs if they were changed by `update_workflow_inputs`
|
|
191
192
|
dbos._sys_db.update_workflow_inputs(wfid, _serialization.serialize_args(inputs))
|
|
192
193
|
else:
|
|
193
194
|
# Buffer the inputs for single-transaction workflows, but don't buffer the status
|
|
@@ -265,7 +266,9 @@ def _execute_workflow_wthread(
|
|
|
265
266
|
raise
|
|
266
267
|
|
|
267
268
|
|
|
268
|
-
def execute_workflow_by_id(
|
|
269
|
+
def execute_workflow_by_id(
|
|
270
|
+
dbos: "DBOS", workflow_id: str, startNew: bool = False
|
|
271
|
+
) -> "WorkflowHandle[Any]":
|
|
269
272
|
status = dbos._sys_db.get_workflow_status(workflow_id)
|
|
270
273
|
if not status:
|
|
271
274
|
raise DBOSRecoveryError(workflow_id, "Workflow status not found")
|
|
@@ -292,7 +295,8 @@ def execute_workflow_by_id(dbos: "DBOS", workflow_id: str) -> "WorkflowHandle[An
|
|
|
292
295
|
workflow_id,
|
|
293
296
|
f"Cannot execute workflow because instance '{iname}' is not registered",
|
|
294
297
|
)
|
|
295
|
-
|
|
298
|
+
|
|
299
|
+
if startNew:
|
|
296
300
|
return start_workflow(
|
|
297
301
|
dbos,
|
|
298
302
|
wf_func,
|
|
@@ -302,6 +306,17 @@ def execute_workflow_by_id(dbos: "DBOS", workflow_id: str) -> "WorkflowHandle[An
|
|
|
302
306
|
*inputs["args"],
|
|
303
307
|
**inputs["kwargs"],
|
|
304
308
|
)
|
|
309
|
+
else:
|
|
310
|
+
with SetWorkflowID(workflow_id):
|
|
311
|
+
return start_workflow(
|
|
312
|
+
dbos,
|
|
313
|
+
wf_func,
|
|
314
|
+
status["queue_name"],
|
|
315
|
+
True,
|
|
316
|
+
dbos._registry.instance_info_map[iname],
|
|
317
|
+
*inputs["args"],
|
|
318
|
+
**inputs["kwargs"],
|
|
319
|
+
)
|
|
305
320
|
elif status["class_name"] is not None:
|
|
306
321
|
class_name = status["class_name"]
|
|
307
322
|
if class_name not in dbos._registry.class_info_map:
|
|
@@ -309,7 +324,8 @@ def execute_workflow_by_id(dbos: "DBOS", workflow_id: str) -> "WorkflowHandle[An
|
|
|
309
324
|
workflow_id,
|
|
310
325
|
f"Cannot execute workflow because class '{class_name}' is not registered",
|
|
311
326
|
)
|
|
312
|
-
|
|
327
|
+
|
|
328
|
+
if startNew:
|
|
313
329
|
return start_workflow(
|
|
314
330
|
dbos,
|
|
315
331
|
wf_func,
|
|
@@ -319,8 +335,19 @@ def execute_workflow_by_id(dbos: "DBOS", workflow_id: str) -> "WorkflowHandle[An
|
|
|
319
335
|
*inputs["args"],
|
|
320
336
|
**inputs["kwargs"],
|
|
321
337
|
)
|
|
338
|
+
else:
|
|
339
|
+
with SetWorkflowID(workflow_id):
|
|
340
|
+
return start_workflow(
|
|
341
|
+
dbos,
|
|
342
|
+
wf_func,
|
|
343
|
+
status["queue_name"],
|
|
344
|
+
True,
|
|
345
|
+
dbos._registry.class_info_map[class_name],
|
|
346
|
+
*inputs["args"],
|
|
347
|
+
**inputs["kwargs"],
|
|
348
|
+
)
|
|
322
349
|
else:
|
|
323
|
-
|
|
350
|
+
if startNew:
|
|
324
351
|
return start_workflow(
|
|
325
352
|
dbos,
|
|
326
353
|
wf_func,
|
|
@@ -329,6 +356,16 @@ def execute_workflow_by_id(dbos: "DBOS", workflow_id: str) -> "WorkflowHandle[An
|
|
|
329
356
|
*inputs["args"],
|
|
330
357
|
**inputs["kwargs"],
|
|
331
358
|
)
|
|
359
|
+
else:
|
|
360
|
+
with SetWorkflowID(workflow_id):
|
|
361
|
+
return start_workflow(
|
|
362
|
+
dbos,
|
|
363
|
+
wf_func,
|
|
364
|
+
status["queue_name"],
|
|
365
|
+
True,
|
|
366
|
+
*inputs["args"],
|
|
367
|
+
**inputs["kwargs"],
|
|
368
|
+
)
|
|
332
369
|
|
|
333
370
|
|
|
334
371
|
@overload
|
|
@@ -422,6 +459,9 @@ def start_workflow(
|
|
|
422
459
|
or wf_status == WorkflowStatusString.ERROR.value
|
|
423
460
|
or wf_status == WorkflowStatusString.SUCCESS.value
|
|
424
461
|
):
|
|
462
|
+
dbos.logger.debug(
|
|
463
|
+
f"Workflow {new_wf_id} already completed with status {wf_status}. Directly returning a workflow handle."
|
|
464
|
+
)
|
|
425
465
|
return WorkflowHandlePolling(new_wf_id, dbos)
|
|
426
466
|
|
|
427
467
|
if fself is not None:
|
|
@@ -494,7 +534,7 @@ def workflow_wrapper(
|
|
|
494
534
|
temp_wf_type=get_temp_workflow_type(func),
|
|
495
535
|
max_recovery_attempts=max_recovery_attempts,
|
|
496
536
|
)
|
|
497
|
-
|
|
537
|
+
# TODO: maybe modify the parameters if they've been changed by `_init_workflow`
|
|
498
538
|
dbos.logger.debug(
|
|
499
539
|
f"Running workflow, id: {ctx.workflow_id}, name: {get_dbos_func_name(func)}"
|
|
500
540
|
)
|
dbos/_dbos.py
CHANGED
|
@@ -56,6 +56,7 @@ from ._registrations import (
|
|
|
56
56
|
)
|
|
57
57
|
from ._roles import default_required_roles, required_roles
|
|
58
58
|
from ._scheduler import ScheduledWorkflow, scheduled
|
|
59
|
+
from ._sys_db import WorkflowStatusString
|
|
59
60
|
from ._tracer import dbos_tracer
|
|
60
61
|
|
|
61
62
|
if TYPE_CHECKING:
|
|
@@ -231,6 +232,7 @@ class DBOS:
|
|
|
231
232
|
f"DBOS configured multiple times with conflicting information"
|
|
232
233
|
)
|
|
233
234
|
config = _dbos_global_registry.config
|
|
235
|
+
|
|
234
236
|
_dbos_global_instance = super().__new__(cls)
|
|
235
237
|
_dbos_global_instance.__init__(fastapi=fastapi, config=config, flask=flask) # type: ignore
|
|
236
238
|
else:
|
|
@@ -767,6 +769,11 @@ class DBOS:
|
|
|
767
769
|
"""Execute a workflow by ID (for recovery)."""
|
|
768
770
|
return execute_workflow_by_id(_get_dbos_instance(), workflow_id)
|
|
769
771
|
|
|
772
|
+
@classmethod
|
|
773
|
+
def restart_workflow(cls, workflow_id: str) -> None:
|
|
774
|
+
"""Execute a workflow by ID (for recovery)."""
|
|
775
|
+
execute_workflow_by_id(_get_dbos_instance(), workflow_id, True)
|
|
776
|
+
|
|
770
777
|
@classmethod
|
|
771
778
|
def recover_pending_workflows(
|
|
772
779
|
cls, executor_ids: List[str] = ["local"]
|
|
@@ -774,6 +781,18 @@ class DBOS:
|
|
|
774
781
|
"""Find all PENDING workflows and execute them."""
|
|
775
782
|
return recover_pending_workflows(_get_dbos_instance(), executor_ids)
|
|
776
783
|
|
|
784
|
+
@classmethod
|
|
785
|
+
def cancel_workflow(cls, workflow_id: str) -> None:
|
|
786
|
+
"""Cancel a workflow by ID."""
|
|
787
|
+
_get_dbos_instance()._sys_db.set_workflow_status(
|
|
788
|
+
workflow_id, WorkflowStatusString.CANCELLED, False
|
|
789
|
+
)
|
|
790
|
+
|
|
791
|
+
@classmethod
|
|
792
|
+
def resume_workflow(cls, workflow_id: str) -> None:
|
|
793
|
+
"""Resume a workflow by ID."""
|
|
794
|
+
execute_workflow_by_id(_get_dbos_instance(), workflow_id, False)
|
|
795
|
+
|
|
777
796
|
@classproperty
|
|
778
797
|
def logger(cls) -> Logger:
|
|
779
798
|
"""Return the DBOS `Logger` for the current context."""
|
dbos/_error.py
CHANGED
|
@@ -35,6 +35,7 @@ class DBOSErrorCode(Enum):
|
|
|
35
35
|
DeadLetterQueueError = 6
|
|
36
36
|
MaxStepRetriesExceeded = 7
|
|
37
37
|
NotAuthorized = 8
|
|
38
|
+
ConflictingWorkflowError = 9
|
|
38
39
|
|
|
39
40
|
|
|
40
41
|
class DBOSWorkflowConflictIDError(DBOSException):
|
|
@@ -47,6 +48,16 @@ class DBOSWorkflowConflictIDError(DBOSException):
|
|
|
47
48
|
)
|
|
48
49
|
|
|
49
50
|
|
|
51
|
+
class DBOSConflictingWorkflowError(DBOSException):
|
|
52
|
+
"""Exception raised different workflows started with the same workflow ID."""
|
|
53
|
+
|
|
54
|
+
def __init__(self, workflow_id: str, message: Optional[str] = None):
|
|
55
|
+
super().__init__(
|
|
56
|
+
f"Conflicting workflow invocation with the same ID ({workflow_id}): {message}",
|
|
57
|
+
dbos_error_code=DBOSErrorCode.ConflictingWorkflowError.value,
|
|
58
|
+
)
|
|
59
|
+
|
|
60
|
+
|
|
50
61
|
class DBOSRecoveryError(DBOSException):
|
|
51
62
|
"""Exception raised when a workflow recovery fails."""
|
|
52
63
|
|
dbos/_fastapi.py
CHANGED
|
@@ -94,7 +94,11 @@ def setup_fastapi_middleware(app: FastAPI, dbos: DBOS) -> None:
|
|
|
94
94
|
with EnterDBOSHandler(attributes):
|
|
95
95
|
ctx = assert_current_dbos_context()
|
|
96
96
|
ctx.request = _make_request(request)
|
|
97
|
-
workflow_id = request.headers.get("dbos-idempotency-key"
|
|
98
|
-
|
|
97
|
+
workflow_id = request.headers.get("dbos-idempotency-key")
|
|
98
|
+
if workflow_id is not None:
|
|
99
|
+
# Set the workflow ID for the handler
|
|
100
|
+
with SetWorkflowID(workflow_id):
|
|
101
|
+
response = await call_next(request)
|
|
102
|
+
else:
|
|
99
103
|
response = await call_next(request)
|
|
100
104
|
return response
|
dbos/_flask.py
CHANGED
|
@@ -34,8 +34,12 @@ class FlaskMiddleware:
|
|
|
34
34
|
with EnterDBOSHandler(attributes):
|
|
35
35
|
ctx = assert_current_dbos_context()
|
|
36
36
|
ctx.request = _make_request(request)
|
|
37
|
-
workflow_id = request.headers.get("dbos-idempotency-key"
|
|
38
|
-
|
|
37
|
+
workflow_id = request.headers.get("dbos-idempotency-key")
|
|
38
|
+
if workflow_id is not None:
|
|
39
|
+
# Set the workflow ID for the handler
|
|
40
|
+
with SetWorkflowID(workflow_id):
|
|
41
|
+
response = self.app(environ, start_response)
|
|
42
|
+
else:
|
|
39
43
|
response = self.app(environ, start_response)
|
|
40
44
|
return response
|
|
41
45
|
|
dbos/_kafka.py
CHANGED
|
@@ -1,3 +1,4 @@
|
|
|
1
|
+
import re
|
|
1
2
|
import threading
|
|
2
3
|
from typing import TYPE_CHECKING, Any, Callable, NoReturn
|
|
3
4
|
|
|
@@ -19,6 +20,14 @@ _kafka_queue: Queue
|
|
|
19
20
|
_in_order_kafka_queues: dict[str, Queue] = {}
|
|
20
21
|
|
|
21
22
|
|
|
23
|
+
def safe_group_name(method_name: str, topics: list[str]) -> str:
|
|
24
|
+
safe_group_id = "-".join(
|
|
25
|
+
re.sub(r"[^a-zA-Z0-9\-]", "", str(r)) for r in [method_name, *topics]
|
|
26
|
+
)
|
|
27
|
+
|
|
28
|
+
return f"dbos-kafka-group-{safe_group_id}"[:255]
|
|
29
|
+
|
|
30
|
+
|
|
22
31
|
def _kafka_consumer_loop(
|
|
23
32
|
func: _KafkaConsumerWorkflow,
|
|
24
33
|
config: dict[str, Any],
|
|
@@ -34,6 +43,12 @@ def _kafka_consumer_loop(
|
|
|
34
43
|
if "auto.offset.reset" not in config:
|
|
35
44
|
config["auto.offset.reset"] = "earliest"
|
|
36
45
|
|
|
46
|
+
if config.get("group.id") is None:
|
|
47
|
+
config["group.id"] = safe_group_name(func.__qualname__, topics)
|
|
48
|
+
dbos_logger.warning(
|
|
49
|
+
f"Consumer group ID not found. Using generated group.id {config['group.id']}"
|
|
50
|
+
)
|
|
51
|
+
|
|
37
52
|
consumer = Consumer(config)
|
|
38
53
|
try:
|
|
39
54
|
consumer.subscribe(topics)
|
|
@@ -71,8 +86,9 @@ def _kafka_consumer_loop(
|
|
|
71
86
|
topic=cmsg.topic(),
|
|
72
87
|
value=cmsg.value(),
|
|
73
88
|
)
|
|
89
|
+
groupID = config.get("group.id")
|
|
74
90
|
with SetWorkflowID(
|
|
75
|
-
f"kafka-unique-id-{msg.topic}-{msg.partition}-{msg.offset}"
|
|
91
|
+
f"kafka-unique-id-{msg.topic}-{msg.partition}-{groupID}-{msg.offset}"
|
|
76
92
|
):
|
|
77
93
|
if in_order:
|
|
78
94
|
assert msg.topic is not None
|
dbos/_queue.py
CHANGED
dbos/_sys_db.py
CHANGED
|
@@ -28,6 +28,7 @@ from sqlalchemy.exc import DBAPIError
|
|
|
28
28
|
from . import _serialization
|
|
29
29
|
from ._dbos_config import ConfigFile
|
|
30
30
|
from ._error import (
|
|
31
|
+
DBOSConflictingWorkflowError,
|
|
31
32
|
DBOSDeadLetterQueueError,
|
|
32
33
|
DBOSException,
|
|
33
34
|
DBOSNonExistentWorkflowError,
|
|
@@ -288,8 +289,14 @@ class SystemDatabase:
|
|
|
288
289
|
),
|
|
289
290
|
)
|
|
290
291
|
else:
|
|
291
|
-
|
|
292
|
-
|
|
292
|
+
# A blank update so that we can return the existing status
|
|
293
|
+
cmd = cmd.on_conflict_do_update(
|
|
294
|
+
index_elements=["workflow_uuid"],
|
|
295
|
+
set_=dict(
|
|
296
|
+
recovery_attempts=SystemSchema.workflow_status.c.recovery_attempts
|
|
297
|
+
),
|
|
298
|
+
)
|
|
299
|
+
cmd = cmd.returning(SystemSchema.workflow_status.c.recovery_attempts, SystemSchema.workflow_status.c.status, SystemSchema.workflow_status.c.name, SystemSchema.workflow_status.c.class_name, SystemSchema.workflow_status.c.config_name, SystemSchema.workflow_status.c.queue_name) # type: ignore
|
|
293
300
|
|
|
294
301
|
if conn is not None:
|
|
295
302
|
results = conn.execute(cmd)
|
|
@@ -297,37 +304,53 @@ class SystemDatabase:
|
|
|
297
304
|
with self.engine.begin() as c:
|
|
298
305
|
results = c.execute(cmd)
|
|
299
306
|
|
|
300
|
-
|
|
301
|
-
|
|
302
|
-
|
|
303
|
-
|
|
304
|
-
|
|
305
|
-
|
|
306
|
-
|
|
307
|
-
|
|
308
|
-
|
|
309
|
-
|
|
310
|
-
|
|
311
|
-
|
|
307
|
+
row = results.fetchone()
|
|
308
|
+
if row is not None:
|
|
309
|
+
# Check the started workflow matches the expected name, class_name, config_name, and queue_name
|
|
310
|
+
# A mismatch indicates a workflow starting with the same UUID but different functions, which would throw an exception.
|
|
311
|
+
recovery_attempts: int = row[0]
|
|
312
|
+
wf_status = row[1]
|
|
313
|
+
err_msg: Optional[str] = None
|
|
314
|
+
if row[2] != status["name"]:
|
|
315
|
+
err_msg = f"Workflow already exists with a different function name: {row[2]}, but the provided function name is: {status['name']}"
|
|
316
|
+
elif row[3] != status["class_name"]:
|
|
317
|
+
err_msg = f"Workflow already exists with a different class name: {row[3]}, but the provided class name is: {status['class_name']}"
|
|
318
|
+
elif row[4] != status["config_name"]:
|
|
319
|
+
err_msg = f"Workflow already exists with a different config name: {row[4]}, but the provided config name is: {status['config_name']}"
|
|
320
|
+
elif row[5] != status["queue_name"]:
|
|
321
|
+
# This is a warning because a different queue name is not necessarily an error.
|
|
322
|
+
dbos_logger.warning(
|
|
323
|
+
f"Workflow already exists in queue: {row[5]}, but the provided queue name is: {status['queue_name']}. The queue is not updated."
|
|
324
|
+
)
|
|
325
|
+
if err_msg is not None:
|
|
326
|
+
raise DBOSConflictingWorkflowError(status["workflow_uuid"], err_msg)
|
|
327
|
+
|
|
328
|
+
if in_recovery and recovery_attempts > max_recovery_attempts:
|
|
329
|
+
with self.engine.begin() as c:
|
|
330
|
+
c.execute(
|
|
331
|
+
sa.delete(SystemSchema.workflow_queue).where(
|
|
332
|
+
SystemSchema.workflow_queue.c.workflow_uuid
|
|
333
|
+
== status["workflow_uuid"]
|
|
312
334
|
)
|
|
313
|
-
|
|
314
|
-
|
|
315
|
-
|
|
316
|
-
|
|
317
|
-
|
|
318
|
-
|
|
319
|
-
|
|
320
|
-
|
|
321
|
-
|
|
322
|
-
|
|
323
|
-
|
|
324
|
-
|
|
325
|
-
|
|
326
|
-
|
|
335
|
+
)
|
|
336
|
+
c.execute(
|
|
337
|
+
sa.update(SystemSchema.workflow_status)
|
|
338
|
+
.where(
|
|
339
|
+
SystemSchema.workflow_status.c.workflow_uuid
|
|
340
|
+
== status["workflow_uuid"]
|
|
341
|
+
)
|
|
342
|
+
.where(
|
|
343
|
+
SystemSchema.workflow_status.c.status
|
|
344
|
+
== WorkflowStatusString.PENDING.value
|
|
345
|
+
)
|
|
346
|
+
.values(
|
|
347
|
+
status=WorkflowStatusString.RETRIES_EXCEEDED.value,
|
|
348
|
+
queue_name=None,
|
|
327
349
|
)
|
|
328
|
-
raise DBOSDeadLetterQueueError(
|
|
329
|
-
status["workflow_uuid"], max_recovery_attempts
|
|
330
350
|
)
|
|
351
|
+
raise DBOSDeadLetterQueueError(
|
|
352
|
+
status["workflow_uuid"], max_recovery_attempts
|
|
353
|
+
)
|
|
331
354
|
|
|
332
355
|
# Record we have exported status for this single-transaction workflow
|
|
333
356
|
if status["workflow_uuid"] in self._temp_txn_wf_ids:
|
|
@@ -356,7 +379,7 @@ class SystemDatabase:
|
|
|
356
379
|
stmt = (
|
|
357
380
|
sa.update(SystemSchema.workflow_status)
|
|
358
381
|
.where(
|
|
359
|
-
SystemSchema.
|
|
382
|
+
SystemSchema.workflow_status.c.workflow_uuid == workflow_uuid
|
|
360
383
|
)
|
|
361
384
|
.values(recovery_attempts=reset_recovery_attempts)
|
|
362
385
|
)
|
|
@@ -538,18 +561,27 @@ class SystemDatabase:
|
|
|
538
561
|
workflow_uuid=workflow_uuid,
|
|
539
562
|
inputs=inputs,
|
|
540
563
|
)
|
|
541
|
-
.
|
|
564
|
+
.on_conflict_do_update(
|
|
565
|
+
index_elements=["workflow_uuid"],
|
|
566
|
+
set_=dict(workflow_uuid=SystemSchema.workflow_inputs.c.workflow_uuid),
|
|
567
|
+
)
|
|
568
|
+
.returning(SystemSchema.workflow_inputs.c.inputs)
|
|
542
569
|
)
|
|
543
570
|
if conn is not None:
|
|
544
|
-
conn.execute(cmd)
|
|
571
|
+
row = conn.execute(cmd).fetchone()
|
|
545
572
|
else:
|
|
546
573
|
with self.engine.begin() as c:
|
|
547
|
-
c.execute(cmd)
|
|
548
|
-
|
|
574
|
+
row = c.execute(cmd).fetchone()
|
|
575
|
+
if row is not None and row[0] != inputs:
|
|
576
|
+
dbos_logger.warning(
|
|
577
|
+
f"Workflow inputs for {workflow_uuid} changed since the first call! Use the original inputs."
|
|
578
|
+
)
|
|
579
|
+
# TODO: actually changing the input
|
|
549
580
|
if workflow_uuid in self._temp_txn_wf_ids:
|
|
550
581
|
# Clean up the single-transaction tracking sets
|
|
551
582
|
self._exported_temp_txn_wf_status.discard(workflow_uuid)
|
|
552
583
|
self._temp_txn_wf_ids.discard(workflow_uuid)
|
|
584
|
+
return
|
|
553
585
|
|
|
554
586
|
def get_workflow_inputs(
|
|
555
587
|
self, workflow_uuid: str
|
|
@@ -582,12 +614,12 @@ class SystemDatabase:
|
|
|
582
614
|
if input.start_time:
|
|
583
615
|
query = query.where(
|
|
584
616
|
SystemSchema.workflow_status.c.created_at
|
|
585
|
-
>= datetime.datetime.fromisoformat(input.start_time).timestamp()
|
|
617
|
+
>= datetime.datetime.fromisoformat(input.start_time).timestamp() * 1000
|
|
586
618
|
)
|
|
587
619
|
if input.end_time:
|
|
588
620
|
query = query.where(
|
|
589
621
|
SystemSchema.workflow_status.c.created_at
|
|
590
|
-
<= datetime.datetime.fromisoformat(input.end_time).timestamp()
|
|
622
|
+
<= datetime.datetime.fromisoformat(input.end_time).timestamp() * 1000
|
|
591
623
|
)
|
|
592
624
|
if input.status:
|
|
593
625
|
query = query.where(SystemSchema.workflow_status.c.status == input.status)
|
|
@@ -0,0 +1,171 @@
|
|
|
1
|
+
import importlib
|
|
2
|
+
import os
|
|
3
|
+
import sys
|
|
4
|
+
from typing import Any, List, Optional, cast
|
|
5
|
+
|
|
6
|
+
import typer
|
|
7
|
+
from rich import print
|
|
8
|
+
|
|
9
|
+
from dbos import DBOS
|
|
10
|
+
|
|
11
|
+
from . import _serialization, load_config
|
|
12
|
+
from ._core import execute_workflow_by_id
|
|
13
|
+
from ._dbos_config import ConfigFile, _is_valid_app_name
|
|
14
|
+
from ._sys_db import (
|
|
15
|
+
GetWorkflowsInput,
|
|
16
|
+
GetWorkflowsOutput,
|
|
17
|
+
SystemDatabase,
|
|
18
|
+
WorkflowStatuses,
|
|
19
|
+
WorkflowStatusInternal,
|
|
20
|
+
WorkflowStatusString,
|
|
21
|
+
)
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
class WorkflowInformation:
|
|
25
|
+
workflowUUID: str
|
|
26
|
+
status: WorkflowStatuses
|
|
27
|
+
workflowName: str
|
|
28
|
+
workflowClassName: Optional[str]
|
|
29
|
+
workflowConfigName: Optional[str]
|
|
30
|
+
input: Optional[_serialization.WorkflowInputs] # JSON (jsonpickle)
|
|
31
|
+
output: Optional[str] # JSON (jsonpickle)
|
|
32
|
+
error: Optional[str] # JSON (jsonpickle)
|
|
33
|
+
executor_id: Optional[str]
|
|
34
|
+
app_version: Optional[str]
|
|
35
|
+
app_id: Optional[str]
|
|
36
|
+
request: Optional[str] # JSON (jsonpickle)
|
|
37
|
+
recovery_attempts: Optional[int]
|
|
38
|
+
authenticated_user: Optional[str]
|
|
39
|
+
assumed_role: Optional[str]
|
|
40
|
+
authenticated_roles: Optional[str] # JSON list of roles.
|
|
41
|
+
queue_name: Optional[str]
|
|
42
|
+
|
|
43
|
+
|
|
44
|
+
def _list_workflows(
|
|
45
|
+
config: ConfigFile,
|
|
46
|
+
li: int,
|
|
47
|
+
user: Optional[str],
|
|
48
|
+
starttime: Optional[str],
|
|
49
|
+
endtime: Optional[str],
|
|
50
|
+
status: Optional[str],
|
|
51
|
+
request: bool,
|
|
52
|
+
appversion: Optional[str],
|
|
53
|
+
) -> List[WorkflowInformation]:
|
|
54
|
+
|
|
55
|
+
sys_db = None
|
|
56
|
+
|
|
57
|
+
try:
|
|
58
|
+
sys_db = SystemDatabase(config)
|
|
59
|
+
|
|
60
|
+
input = GetWorkflowsInput()
|
|
61
|
+
input.authenticated_user = user
|
|
62
|
+
input.start_time = starttime
|
|
63
|
+
input.end_time = endtime
|
|
64
|
+
if status is not None:
|
|
65
|
+
input.status = cast(WorkflowStatuses, status)
|
|
66
|
+
input.application_version = appversion
|
|
67
|
+
input.limit = li
|
|
68
|
+
|
|
69
|
+
output: GetWorkflowsOutput = sys_db.get_workflows(input)
|
|
70
|
+
|
|
71
|
+
infos: List[WorkflowInformation] = []
|
|
72
|
+
|
|
73
|
+
if output.workflow_uuids is None:
|
|
74
|
+
typer.echo("No workflows found")
|
|
75
|
+
return {}
|
|
76
|
+
|
|
77
|
+
for workflow_id in output.workflow_uuids:
|
|
78
|
+
info = _get_workflow_info(
|
|
79
|
+
sys_db, workflow_id, request
|
|
80
|
+
) # Call the method for each ID
|
|
81
|
+
|
|
82
|
+
if info is not None:
|
|
83
|
+
infos.append(info)
|
|
84
|
+
|
|
85
|
+
return infos
|
|
86
|
+
except Exception as e:
|
|
87
|
+
typer.echo(f"Error listing workflows: {e}")
|
|
88
|
+
return []
|
|
89
|
+
finally:
|
|
90
|
+
if sys_db:
|
|
91
|
+
sys_db.destroy()
|
|
92
|
+
|
|
93
|
+
|
|
94
|
+
def _get_workflow(
|
|
95
|
+
config: ConfigFile, uuid: str, request: bool
|
|
96
|
+
) -> Optional[WorkflowInformation]:
|
|
97
|
+
sys_db = None
|
|
98
|
+
|
|
99
|
+
try:
|
|
100
|
+
sys_db = SystemDatabase(config)
|
|
101
|
+
|
|
102
|
+
info = _get_workflow_info(sys_db, uuid, request)
|
|
103
|
+
return info
|
|
104
|
+
|
|
105
|
+
except Exception as e:
|
|
106
|
+
typer.echo(f"Error getting workflow: {e}")
|
|
107
|
+
return None
|
|
108
|
+
finally:
|
|
109
|
+
if sys_db:
|
|
110
|
+
sys_db.destroy()
|
|
111
|
+
|
|
112
|
+
|
|
113
|
+
def _cancel_workflow(config: ConfigFile, uuid: str) -> None:
|
|
114
|
+
# config = load_config()
|
|
115
|
+
sys_db = None
|
|
116
|
+
|
|
117
|
+
try:
|
|
118
|
+
sys_db = SystemDatabase(config)
|
|
119
|
+
sys_db.set_workflow_status(uuid, WorkflowStatusString.CANCELLED, False)
|
|
120
|
+
return
|
|
121
|
+
|
|
122
|
+
except Exception as e:
|
|
123
|
+
typer.echo(f"Failed to connect to DBOS system database: {e}")
|
|
124
|
+
return None
|
|
125
|
+
finally:
|
|
126
|
+
if sys_db:
|
|
127
|
+
sys_db.destroy()
|
|
128
|
+
|
|
129
|
+
|
|
130
|
+
def _get_workflow_info(
|
|
131
|
+
sys_db: SystemDatabase, workflowUUID: str, getRequest: bool
|
|
132
|
+
) -> Optional[WorkflowInformation]:
|
|
133
|
+
|
|
134
|
+
info = sys_db.get_workflow_status(workflowUUID)
|
|
135
|
+
if info is None:
|
|
136
|
+
return None
|
|
137
|
+
|
|
138
|
+
winfo = WorkflowInformation()
|
|
139
|
+
|
|
140
|
+
winfo.workflowUUID = workflowUUID
|
|
141
|
+
winfo.status = info["status"]
|
|
142
|
+
winfo.workflowName = info["name"]
|
|
143
|
+
winfo.workflowClassName = info["class_name"]
|
|
144
|
+
winfo.workflowConfigName = info["config_name"]
|
|
145
|
+
winfo.executor_id = info["executor_id"]
|
|
146
|
+
winfo.app_version = info["app_version"]
|
|
147
|
+
winfo.app_id = info["app_id"]
|
|
148
|
+
winfo.recovery_attempts = info["recovery_attempts"]
|
|
149
|
+
winfo.authenticated_user = info["authenticated_user"]
|
|
150
|
+
winfo.assumed_role = info["assumed_role"]
|
|
151
|
+
winfo.authenticated_roles = info["authenticated_roles"]
|
|
152
|
+
winfo.queue_name = info["queue_name"]
|
|
153
|
+
|
|
154
|
+
# no input field
|
|
155
|
+
input_data = sys_db.get_workflow_inputs(workflowUUID)
|
|
156
|
+
if input_data is not None:
|
|
157
|
+
winfo.input = input_data
|
|
158
|
+
|
|
159
|
+
if info.get("status") == "SUCCESS":
|
|
160
|
+
result = sys_db.await_workflow_result(workflowUUID)
|
|
161
|
+
winfo.output = result
|
|
162
|
+
elif info.get("status") == "ERROR":
|
|
163
|
+
try:
|
|
164
|
+
sys_db.await_workflow_result(workflowUUID)
|
|
165
|
+
except Exception as e:
|
|
166
|
+
winfo.error = str(e)
|
|
167
|
+
|
|
168
|
+
if not getRequest:
|
|
169
|
+
winfo.request = None
|
|
170
|
+
|
|
171
|
+
return winfo
|
dbos/cli.py
CHANGED
|
@@ -8,6 +8,8 @@ import typing
|
|
|
8
8
|
from os import path
|
|
9
9
|
from typing import Any
|
|
10
10
|
|
|
11
|
+
import jsonpickle # type: ignore
|
|
12
|
+
import requests
|
|
11
13
|
import sqlalchemy as sa
|
|
12
14
|
import tomlkit
|
|
13
15
|
import typer
|
|
@@ -17,12 +19,16 @@ from typing_extensions import Annotated
|
|
|
17
19
|
|
|
18
20
|
from dbos._schemas.system_database import SystemSchema
|
|
19
21
|
|
|
20
|
-
from . import load_config
|
|
22
|
+
from . import _serialization, load_config
|
|
21
23
|
from ._app_db import ApplicationDatabase
|
|
22
24
|
from ._dbos_config import _is_valid_app_name
|
|
23
25
|
from ._sys_db import SystemDatabase
|
|
26
|
+
from ._workflow_commands import _cancel_workflow, _get_workflow, _list_workflows
|
|
24
27
|
|
|
25
28
|
app = typer.Typer()
|
|
29
|
+
workflow = typer.Typer()
|
|
30
|
+
|
|
31
|
+
app.add_typer(workflow, name="workflow", help="Manage DBOS workflows")
|
|
26
32
|
|
|
27
33
|
|
|
28
34
|
def _on_windows() -> bool:
|
|
@@ -333,5 +339,138 @@ def reset(
|
|
|
333
339
|
sys_db.destroy()
|
|
334
340
|
|
|
335
341
|
|
|
342
|
+
@workflow.command(help="List workflows for your application")
|
|
343
|
+
def list(
|
|
344
|
+
limit: Annotated[
|
|
345
|
+
int,
|
|
346
|
+
typer.Option("--limit", "-l", help="Limit the results returned"),
|
|
347
|
+
] = 10,
|
|
348
|
+
user: Annotated[
|
|
349
|
+
typing.Optional[str],
|
|
350
|
+
typer.Option("--user", "-u", help="Retrieve workflows run by this user"),
|
|
351
|
+
] = None,
|
|
352
|
+
starttime: Annotated[
|
|
353
|
+
typing.Optional[str],
|
|
354
|
+
typer.Option(
|
|
355
|
+
"--start-time",
|
|
356
|
+
"-s",
|
|
357
|
+
help="Retrieve workflows starting after this timestamp (ISO 8601 format)",
|
|
358
|
+
),
|
|
359
|
+
] = None,
|
|
360
|
+
endtime: Annotated[
|
|
361
|
+
typing.Optional[str],
|
|
362
|
+
typer.Option(
|
|
363
|
+
"--end-time",
|
|
364
|
+
"-e",
|
|
365
|
+
help="Retrieve workflows starting before this timestamp (ISO 8601 format)",
|
|
366
|
+
),
|
|
367
|
+
] = None,
|
|
368
|
+
status: Annotated[
|
|
369
|
+
typing.Optional[str],
|
|
370
|
+
typer.Option(
|
|
371
|
+
"--status",
|
|
372
|
+
"-S",
|
|
373
|
+
help="Retrieve workflows with this status (PENDING, SUCCESS, ERROR, RETRIES_EXCEEDED, ENQUEUED, or CANCELLED)",
|
|
374
|
+
),
|
|
375
|
+
] = None,
|
|
376
|
+
appversion: Annotated[
|
|
377
|
+
typing.Optional[str],
|
|
378
|
+
typer.Option(
|
|
379
|
+
"--application-version",
|
|
380
|
+
"-v",
|
|
381
|
+
help="Retrieve workflows with this application version",
|
|
382
|
+
),
|
|
383
|
+
] = None,
|
|
384
|
+
request: Annotated[
|
|
385
|
+
bool,
|
|
386
|
+
typer.Option("--request", help="Retrieve workflow request information"),
|
|
387
|
+
] = True,
|
|
388
|
+
appdir: Annotated[
|
|
389
|
+
typing.Optional[str],
|
|
390
|
+
typer.Option("--app-dir", "-d", help="Specify the application root directory"),
|
|
391
|
+
] = None,
|
|
392
|
+
) -> None:
|
|
393
|
+
config = load_config()
|
|
394
|
+
workflows = _list_workflows(
|
|
395
|
+
config, limit, user, starttime, endtime, status, request, appversion
|
|
396
|
+
)
|
|
397
|
+
print(jsonpickle.encode(workflows, unpicklable=False))
|
|
398
|
+
|
|
399
|
+
|
|
400
|
+
@workflow.command(help="Retrieve the status of a workflow")
|
|
401
|
+
def get(
|
|
402
|
+
uuid: Annotated[str, typer.Argument()],
|
|
403
|
+
appdir: Annotated[
|
|
404
|
+
typing.Optional[str],
|
|
405
|
+
typer.Option("--app-dir", "-d", help="Specify the application root directory"),
|
|
406
|
+
] = None,
|
|
407
|
+
request: Annotated[
|
|
408
|
+
bool,
|
|
409
|
+
typer.Option("--request", help="Retrieve workflow request information"),
|
|
410
|
+
] = True,
|
|
411
|
+
) -> None:
|
|
412
|
+
config = load_config()
|
|
413
|
+
print(jsonpickle.encode(_get_workflow(config, uuid, request), unpicklable=False))
|
|
414
|
+
|
|
415
|
+
|
|
416
|
+
@workflow.command(
|
|
417
|
+
help="Cancel a workflow so it is no longer automatically retried or restarted"
|
|
418
|
+
)
|
|
419
|
+
def cancel(
|
|
420
|
+
uuid: Annotated[str, typer.Argument()],
|
|
421
|
+
appdir: Annotated[
|
|
422
|
+
typing.Optional[str],
|
|
423
|
+
typer.Option("--app-dir", "-d", help="Specify the application root directory"),
|
|
424
|
+
] = None,
|
|
425
|
+
) -> None:
|
|
426
|
+
config = load_config()
|
|
427
|
+
_cancel_workflow(config, uuid)
|
|
428
|
+
print(f"Workflow {uuid} has been cancelled")
|
|
429
|
+
|
|
430
|
+
|
|
431
|
+
@workflow.command(help="Resume a workflow that has been cancelled")
|
|
432
|
+
def resume(
|
|
433
|
+
uuid: Annotated[str, typer.Argument()],
|
|
434
|
+
host: Annotated[
|
|
435
|
+
typing.Optional[str],
|
|
436
|
+
typer.Option("--host", "-h", help="Specify the admin host"),
|
|
437
|
+
] = "localhost",
|
|
438
|
+
port: Annotated[
|
|
439
|
+
typing.Optional[int],
|
|
440
|
+
typer.Option("--port", "-p", help="Specify the admin port"),
|
|
441
|
+
] = 3001,
|
|
442
|
+
) -> None:
|
|
443
|
+
response = requests.post(
|
|
444
|
+
f"http://{host}:{port}/workflows/{uuid}/resume", json=[], timeout=5
|
|
445
|
+
)
|
|
446
|
+
|
|
447
|
+
if response.status_code == 200:
|
|
448
|
+
print(f"Workflow {uuid} has been resumed")
|
|
449
|
+
else:
|
|
450
|
+
print(f"Failed to resume workflow {uuid}. Status code: {response.status_code}")
|
|
451
|
+
|
|
452
|
+
|
|
453
|
+
@workflow.command(help="Restart a workflow from the beginning with a new id")
|
|
454
|
+
def restart(
|
|
455
|
+
uuid: Annotated[str, typer.Argument()],
|
|
456
|
+
host: Annotated[
|
|
457
|
+
typing.Optional[str],
|
|
458
|
+
typer.Option("--host", "-h", help="Specify the admin host"),
|
|
459
|
+
] = "localhost",
|
|
460
|
+
port: Annotated[
|
|
461
|
+
typing.Optional[int],
|
|
462
|
+
typer.Option("--port", "-p", help="Specify the admin port"),
|
|
463
|
+
] = 3001,
|
|
464
|
+
) -> None:
|
|
465
|
+
response = requests.post(
|
|
466
|
+
f"http://{host}:{port}/workflows/{uuid}/restart", json=[], timeout=5
|
|
467
|
+
)
|
|
468
|
+
|
|
469
|
+
if response.status_code == 200:
|
|
470
|
+
print(f"Workflow {uuid} has been restarted")
|
|
471
|
+
else:
|
|
472
|
+
print(f"Failed to resume workflow {uuid}. Status code: {response.status_code}")
|
|
473
|
+
|
|
474
|
+
|
|
336
475
|
if __name__ == "__main__":
|
|
337
476
|
app()
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: dbos
|
|
3
|
-
Version: 0.
|
|
3
|
+
Version: 0.20.0a2
|
|
4
4
|
Summary: Ultra-lightweight durable execution in Python
|
|
5
5
|
Author-Email: "DBOS, Inc." <contact@dbos.dev>
|
|
6
6
|
License: MIT
|
|
@@ -28,14 +28,14 @@ Description-Content-Type: text/markdown
|
|
|
28
28
|
|
|
29
29
|
<div align="center">
|
|
30
30
|
|
|
31
|
-
# DBOS Transact:
|
|
31
|
+
# DBOS Transact: A Lightweight Durable Execution Library Built on Postgres
|
|
32
32
|
|
|
33
33
|
#### [Documentation](https://docs.dbos.dev/) • [Examples](https://docs.dbos.dev/examples) • [Github](https://github.com/dbos-inc) • [Discord](https://discord.com/invite/jsmC6pXGgX)
|
|
34
34
|
</div>
|
|
35
35
|
|
|
36
36
|
---
|
|
37
37
|
|
|
38
|
-
DBOS Transact is a Python library
|
|
38
|
+
DBOS Transact is a Python library for **ultra-lightweight durable execution**.
|
|
39
39
|
For example:
|
|
40
40
|
|
|
41
41
|
```python
|
|
@@ -55,18 +55,23 @@ def workflow()
|
|
|
55
55
|
|
|
56
56
|
Durable execution means your program is **resilient to any failure**.
|
|
57
57
|
If it is ever interrupted or crashes, all your workflows will automatically resume from the last completed step.
|
|
58
|
-
|
|
59
|
-
No matter how many times you try to crash it, it always resumes from exactly where it left off!
|
|
58
|
+
Durable execution helps solve many common problems:
|
|
60
59
|
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
60
|
+
- Orchestrating long-running or business-critical workflows so they seamlessly recover from any failure.
|
|
61
|
+
- Running reliable background jobs with no timeouts.
|
|
62
|
+
- Processing incoming events (e.g. from Kafka) exactly once.
|
|
63
|
+
- Running a fault-tolerant distributed task queue.
|
|
64
|
+
- Running a reliable cron scheduler.
|
|
65
|
+
- Operating an AI agent, or anything that connects to an unreliable or non-deterministic API.
|
|
64
66
|
|
|
65
|
-
|
|
67
|
+
What’s unique about DBOS's implementation of durable execution is that it’s implemented in a **lightweight library** that’s **totally backed by Postgres**.
|
|
68
|
+
To use DBOS, just `pip install` it and annotate your program with DBOS decorators.
|
|
69
|
+
Under the hood, those decorators store your program's execution state (which workflows are currently executing and which steps they've completed) in a Postgres database.
|
|
70
|
+
If your program crashes or is interrupted, they automatically recover its workflows from their stored state.
|
|
71
|
+
So all you need to use DBOS is Postgres—there are no other dependencies you have to manage, no separate workflow server.
|
|
66
72
|
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
- Observability—all workflows automatically emit [OpenTelemetry](https://opentelemetry.io/) traces.
|
|
73
|
+
One big advantage of this approach is that you can add DBOS to **any** Python application—**it’s just a library**.
|
|
74
|
+
You can use DBOS to add reliable background jobs or cron scheduling or queues to your app with no external dependencies except Postgres.
|
|
70
75
|
|
|
71
76
|
## Getting Started
|
|
72
77
|
|
|
@@ -77,7 +82,7 @@ pip install dbos
|
|
|
77
82
|
dbos init --config
|
|
78
83
|
```
|
|
79
84
|
|
|
80
|
-
Then, try it out with this simple program
|
|
85
|
+
Then, try it out with this simple program:
|
|
81
86
|
|
|
82
87
|
```python
|
|
83
88
|
from fastapi import FastAPI
|
|
@@ -107,14 +112,14 @@ def fastapi_endpoint():
|
|
|
107
112
|
dbos_workflow()
|
|
108
113
|
```
|
|
109
114
|
|
|
110
|
-
Save the program into `main.py
|
|
115
|
+
Save the program into `main.py` and start it with `fastapi run`.
|
|
111
116
|
Visit `localhost:8000` in your browser to start the workflow.
|
|
112
117
|
When prompted, press `Control + \` to force quit your application.
|
|
113
118
|
It should crash midway through the workflow, having completed step one but not step two.
|
|
114
119
|
Then, restart your app with `fastapi run`.
|
|
115
120
|
It should resume the workflow from where it left off, completing step two without re-executing step one.
|
|
116
121
|
|
|
117
|
-
To learn how to build more complex workflows, see
|
|
122
|
+
To learn how to build more complex workflows, see the [programming guide](https://docs.dbos.dev/python/programming-guide) or [examples](https://docs.dbos.dev/examples).
|
|
118
123
|
|
|
119
124
|
## Documentation
|
|
120
125
|
|
|
@@ -125,7 +130,7 @@ To learn how to build more complex workflows, see our [programming guide](https:
|
|
|
125
130
|
|
|
126
131
|
- [**AI-Powered Slackbot**](https://docs.dbos.dev/python/examples/rag-slackbot) — A Slackbot that answers questions about previous Slack conversations, using DBOS to durably orchestrate its RAG pipeline.
|
|
127
132
|
- [**Widget Store**](https://docs.dbos.dev/python/examples/widget-store) — An online storefront that uses DBOS durable workflows to be resilient to any failure.
|
|
128
|
-
- [**
|
|
133
|
+
- [**Scheduled Reminders**](https://docs.dbos.dev/python/examples/scheduled-reminders) — In just three lines of code, schedule an email to send days, weeks, or months in the future.
|
|
129
134
|
|
|
130
135
|
More examples [here](https://docs.dbos.dev/examples)!
|
|
131
136
|
|
|
@@ -1,24 +1,24 @@
|
|
|
1
|
-
dbos-0.
|
|
2
|
-
dbos-0.
|
|
3
|
-
dbos-0.
|
|
4
|
-
dbos-0.
|
|
1
|
+
dbos-0.20.0a2.dist-info/METADATA,sha256=HuHOltiSuDZN-RYCA22G-kTSVYlfK9bksxlPiAamZuo,5309
|
|
2
|
+
dbos-0.20.0a2.dist-info/WHEEL,sha256=thaaA2w1JzcGC48WYufAs8nrYZjJm8LqNfnXFOFyCC4,90
|
|
3
|
+
dbos-0.20.0a2.dist-info/entry_points.txt,sha256=z6GcVANQV7Uw_82H9Ob2axJX6V3imftyZsljdh-M1HU,54
|
|
4
|
+
dbos-0.20.0a2.dist-info/licenses/LICENSE,sha256=VGZit_a5-kdw9WT6fY5jxAWVwGQzgLFyPWrcVVUhVNU,1067
|
|
5
5
|
dbos/__init__.py,sha256=CxRHBHEthPL4PZoLbZhp3rdm44-KkRTT2-7DkK9d4QQ,724
|
|
6
|
-
dbos/_admin_server.py,sha256=
|
|
6
|
+
dbos/_admin_server.py,sha256=PJgneZG9-64TapZrPeJtt73puAswRImCE5uce2k2PKU,4750
|
|
7
7
|
dbos/_app_db.py,sha256=_tv2vmPjjiaikwgxH3mqxgJ4nUUcG2-0uMXKWCqVu1c,5509
|
|
8
8
|
dbos/_classproperty.py,sha256=f0X-_BySzn3yFDRKB2JpCbLYQ9tLwt1XftfshvY7CBs,626
|
|
9
9
|
dbos/_cloudutils/authentication.py,sha256=V0fCWQN9stCkhbuuxgPTGpvuQcDqfU3KAxPAh01vKW4,5007
|
|
10
10
|
dbos/_cloudutils/cloudutils.py,sha256=5e3CW1deSW-dI5G3QN0XbiVsBhyqT8wu7fuV2f8wtGU,7688
|
|
11
11
|
dbos/_cloudutils/databases.py,sha256=x4187Djsyoa-QaG3Kog8JT2_GERsnqa93LIVanmVUmg,8393
|
|
12
|
-
dbos/_context.py,sha256=
|
|
13
|
-
dbos/_core.py,sha256=
|
|
12
|
+
dbos/_context.py,sha256=RH08s_nee95vgxdz6AsYuVWF1LuJSVtOyIifblsa4pw,18760
|
|
13
|
+
dbos/_core.py,sha256=dmVve1YZyQmAfwKsxqz6N3bXBowsMBmLxBDsymoWKsA,35956
|
|
14
14
|
dbos/_croniter.py,sha256=hbhgfsHBqclUS8VeLnJ9PSE9Z54z6mi4nnrr1aUXn0k,47561
|
|
15
15
|
dbos/_db_wizard.py,sha256=xgKLna0_6Xi50F3o8msRosXba8NScHlpJR5ICVCkHDQ,7534
|
|
16
|
-
dbos/_dbos.py,sha256=
|
|
16
|
+
dbos/_dbos.py,sha256=Kgnity6JxjThIf1L8CQbeobQMiJqUUeWlRf36_eGu2g,35385
|
|
17
17
|
dbos/_dbos_config.py,sha256=h_q1gzudhsAMVkGMD0qQ6kLic6YhdJgzm50YFSIx9Bo,8196
|
|
18
|
-
dbos/_error.py,sha256=
|
|
19
|
-
dbos/_fastapi.py,sha256=
|
|
20
|
-
dbos/_flask.py,sha256=
|
|
21
|
-
dbos/_kafka.py,sha256=
|
|
18
|
+
dbos/_error.py,sha256=vtaSsG0QW6cRlwfZ4zzZWy_IHCZlomwSlrDyGWuyn8c,4337
|
|
19
|
+
dbos/_fastapi.py,sha256=yRHrCwul2iYBxAAYuBQLcn9LMYUS6PE4CU9y1vUSPR8,3587
|
|
20
|
+
dbos/_flask.py,sha256=DZKUZR5-xOzPI7tYZ53r2PvvHVoAb8SYwLzMVFsVfjI,2608
|
|
21
|
+
dbos/_kafka.py,sha256=o6DbwnsYRDtvVTZVsN7BAK8cdP79AfoWX3Q7CGY2Yuo,4199
|
|
22
22
|
dbos/_kafka_message.py,sha256=NYvOXNG3Qn7bghn1pv3fg4Pbs86ILZGcK4IB-MLUNu0,409
|
|
23
23
|
dbos/_logger.py,sha256=iYwbA7DLyXalWa2Yu07HO6Xm301nRuenMU64GgwUMkU,3576
|
|
24
24
|
dbos/_migrations/env.py,sha256=38SIGVbmn_VV2x2u1aHLcPOoWgZ84eCymf3g_NljmbU,1626
|
|
@@ -31,7 +31,7 @@ dbos/_migrations/versions/d76646551a6b_job_queue_limiter.py,sha256=8PyFi8rd6CN-m
|
|
|
31
31
|
dbos/_migrations/versions/d76646551a6c_workflow_queue.py,sha256=G942nophZ2uC2vc4hGBC02Ptng1715roTjY3xiyzZU4,729
|
|
32
32
|
dbos/_migrations/versions/eab0cc1d9a14_job_queue.py,sha256=uvhFOtqbBreCePhAxZfIT0qCAI7BiZTou9wt6QnbY7c,1412
|
|
33
33
|
dbos/_outcome.py,sha256=FDMgWVjZ06vm9xO-38H17mTqBImUYQxgKs_bDCSIAhE,6648
|
|
34
|
-
dbos/_queue.py,sha256=
|
|
34
|
+
dbos/_queue.py,sha256=o_aczwualJTMoXb0XXL-Y5QH77OEukWzuerogbWi2ho,2779
|
|
35
35
|
dbos/_recovery.py,sha256=jbzGYxICA2drzyzlBSy2UiXhKV_16tBVacKQdTkqf-w,2008
|
|
36
36
|
dbos/_registrations.py,sha256=mei6q6_3R5uei8i_Wo_TqGZs85s10shOekDX41sFYD0,6642
|
|
37
37
|
dbos/_request.py,sha256=cX1B3Atlh160phgS35gF1VEEV4pD126c9F3BDgBmxZU,929
|
|
@@ -41,7 +41,7 @@ dbos/_schemas/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
|
41
41
|
dbos/_schemas/application_database.py,sha256=KeyoPrF7hy_ODXV7QNike_VFSD74QBRfQ76D7QyE9HI,966
|
|
42
42
|
dbos/_schemas/system_database.py,sha256=rwp4EvCSaXcUoMaRczZCvETCxGp72k3-hvLyGUDkih0,5163
|
|
43
43
|
dbos/_serialization.py,sha256=YCYv0qKAwAZ1djZisBC7khvKqG-5OcIv9t9EC5PFIog,1743
|
|
44
|
-
dbos/_sys_db.py,sha256=
|
|
44
|
+
dbos/_sys_db.py,sha256=ha5E11P83oi78L4R7cX_OL_N1Tf2Ir0Xr30GK1_27SA,52290
|
|
45
45
|
dbos/_templates/hello/README.md,sha256=GhxhBj42wjTt1fWEtwNriHbJuKb66Vzu89G4pxNHw2g,930
|
|
46
46
|
dbos/_templates/hello/__package/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
47
47
|
dbos/_templates/hello/__package/main.py,sha256=eI0SS9Nwj-fldtiuSzIlIG6dC91GXXwdRsoHxv6S_WI,2719
|
|
@@ -53,8 +53,9 @@ dbos/_templates/hello/migrations/script.py.mako,sha256=MEqL-2qATlST9TAOeYgscMn1u
|
|
|
53
53
|
dbos/_templates/hello/migrations/versions/2024_07_31_180642_init.py,sha256=U5thFWGqNN4QLrNXT7wUUqftIFDNE5eSdqD8JNW1mec,942
|
|
54
54
|
dbos/_templates/hello/start_postgres_docker.py,sha256=lQVLlYO5YkhGPEgPqwGc7Y8uDKse9HsWv5fynJEFJHM,1681
|
|
55
55
|
dbos/_tracer.py,sha256=rvBY1RQU6DO7rL7EnaJJxGcmd4tP_PpGqUEE6imZnhY,2518
|
|
56
|
-
dbos/
|
|
56
|
+
dbos/_workflow_commands.py,sha256=tj-gJARjDJ5aYo0ii2udTAU4l36vbeXwmOYh8Q4y_ac,4625
|
|
57
|
+
dbos/cli.py,sha256=26fowBwpV-U7kuPdGRnAcuUMJIqYvstMX9qJ0t-B6BI,15354
|
|
57
58
|
dbos/dbos-config.schema.json,sha256=X5TpXNcARGceX0zQs0fVgtZW_Xj9uBbY5afPt9Rz9yk,5741
|
|
58
59
|
dbos/py.typed,sha256=QfzXT1Ktfk3Rj84akygc7_42z0lRpCq0Ilh8OXI6Zas,44
|
|
59
60
|
version/__init__.py,sha256=L4sNxecRuqdtSFdpUGX3TtBi9KL3k7YsZVIvv-fv9-A,1678
|
|
60
|
-
dbos-0.
|
|
61
|
+
dbos-0.20.0a2.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|