dbos 0.28.0a1__py3-none-any.whl → 0.28.0a6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- dbos/_admin_server.py +3 -3
- dbos/_client.py +17 -9
- dbos/_conductor/conductor.py +1 -5
- dbos/_conductor/protocol.py +1 -1
- dbos/_context.py +26 -9
- dbos/_core.py +3 -8
- dbos/_dbos.py +14 -18
- dbos/_fastapi.py +2 -23
- dbos/_flask.py +3 -37
- dbos/_migrations/versions/933e86bdac6a_add_queue_priority.py +35 -0
- dbos/_recovery.py +1 -1
- dbos/_schemas/system_database.py +6 -1
- dbos/_sys_db.py +60 -61
- dbos/_utils.py +9 -0
- dbos/_workflow_commands.py +4 -8
- dbos/cli/cli.py +9 -0
- dbos-0.28.0a6.dist-info/METADATA +312 -0
- {dbos-0.28.0a1.dist-info → dbos-0.28.0a6.dist-info}/RECORD +21 -21
- dbos/_request.py +0 -35
- dbos-0.28.0a1.dist-info/METADATA +0 -144
- {dbos-0.28.0a1.dist-info → dbos-0.28.0a6.dist-info}/WHEEL +0 -0
- {dbos-0.28.0a1.dist-info → dbos-0.28.0a6.dist-info}/entry_points.txt +0 -0
- {dbos-0.28.0a1.dist-info → dbos-0.28.0a6.dist-info}/licenses/LICENSE +0 -0
dbos/_sys_db.py
CHANGED
@@ -106,8 +106,6 @@ class WorkflowStatus:
|
|
106
106
|
app_id: Optional[str]
|
107
107
|
# The number of times this workflow's execution has been attempted
|
108
108
|
recovery_attempts: Optional[int]
|
109
|
-
# The HTTP request that triggered the workflow, if known
|
110
|
-
request: Optional[str]
|
111
109
|
|
112
110
|
|
113
111
|
class WorkflowStatusInternal(TypedDict):
|
@@ -120,7 +118,6 @@ class WorkflowStatusInternal(TypedDict):
|
|
120
118
|
assumed_role: Optional[str]
|
121
119
|
authenticated_roles: Optional[str] # JSON list of roles
|
122
120
|
output: Optional[str] # JSON (jsonpickle)
|
123
|
-
request: Optional[str] # JSON (jsonpickle)
|
124
121
|
error: Optional[str] # JSON (jsonpickle)
|
125
122
|
created_at: Optional[int] # Unix epoch timestamp in ms
|
126
123
|
updated_at: Optional[int] # Unix epoch timestamp in ms
|
@@ -138,6 +135,9 @@ class WorkflowStatusInternal(TypedDict):
|
|
138
135
|
|
139
136
|
class EnqueueOptionsInternal(TypedDict):
|
140
137
|
deduplication_id: Optional[str] # Unique ID for deduplication on a queue
|
138
|
+
priority: Optional[
|
139
|
+
int
|
140
|
+
] # Priority of the workflow on the queue, starting from 1 ~ 2,147,483,647. Default 0 (highest priority).
|
141
141
|
|
142
142
|
|
143
143
|
class RecordedResult(TypedDict):
|
@@ -369,7 +369,6 @@ class SystemDatabase:
|
|
369
369
|
executor_id=status["executor_id"],
|
370
370
|
application_version=status["app_version"],
|
371
371
|
application_id=status["app_id"],
|
372
|
-
request=status["request"],
|
373
372
|
authenticated_user=status["authenticated_user"],
|
374
373
|
authenticated_roles=status["authenticated_roles"],
|
375
374
|
assumed_role=status["assumed_role"],
|
@@ -477,7 +476,6 @@ class SystemDatabase:
|
|
477
476
|
executor_id=status["executor_id"],
|
478
477
|
application_version=status["app_version"],
|
479
478
|
application_id=status["app_id"],
|
480
|
-
request=status["request"],
|
481
479
|
authenticated_user=status["authenticated_user"],
|
482
480
|
authenticated_roles=status["authenticated_roles"],
|
483
481
|
assumed_role=status["assumed_role"],
|
@@ -625,7 +623,6 @@ class SystemDatabase:
|
|
625
623
|
else status["app_version"]
|
626
624
|
),
|
627
625
|
application_id=status["app_id"],
|
628
|
-
request=status["request"],
|
629
626
|
authenticated_user=status["authenticated_user"],
|
630
627
|
authenticated_roles=status["authenticated_roles"],
|
631
628
|
assumed_role=status["assumed_role"],
|
@@ -687,7 +684,6 @@ class SystemDatabase:
|
|
687
684
|
sa.select(
|
688
685
|
SystemSchema.workflow_status.c.status,
|
689
686
|
SystemSchema.workflow_status.c.name,
|
690
|
-
SystemSchema.workflow_status.c.request,
|
691
687
|
SystemSchema.workflow_status.c.recovery_attempts,
|
692
688
|
SystemSchema.workflow_status.c.config_name,
|
693
689
|
SystemSchema.workflow_status.c.class_name,
|
@@ -712,21 +708,20 @@ class SystemDatabase:
|
|
712
708
|
"error": None,
|
713
709
|
"status": row[0],
|
714
710
|
"name": row[1],
|
715
|
-
"
|
716
|
-
"
|
717
|
-
"
|
718
|
-
"
|
719
|
-
"
|
720
|
-
"
|
721
|
-
"
|
722
|
-
"
|
723
|
-
"
|
724
|
-
"
|
725
|
-
"
|
726
|
-
"
|
727
|
-
"
|
728
|
-
"
|
729
|
-
"workflow_timeout_ms": row[16],
|
711
|
+
"recovery_attempts": row[2],
|
712
|
+
"config_name": row[3],
|
713
|
+
"class_name": row[4],
|
714
|
+
"authenticated_user": row[5],
|
715
|
+
"authenticated_roles": row[6],
|
716
|
+
"assumed_role": row[7],
|
717
|
+
"queue_name": row[8],
|
718
|
+
"executor_id": row[9],
|
719
|
+
"created_at": row[10],
|
720
|
+
"updated_at": row[11],
|
721
|
+
"app_version": row[12],
|
722
|
+
"app_id": row[13],
|
723
|
+
"workflow_deadline_epoch_ms": row[14],
|
724
|
+
"workflow_timeout_ms": row[15],
|
730
725
|
}
|
731
726
|
return status
|
732
727
|
|
@@ -802,9 +797,7 @@ class SystemDatabase:
|
|
802
797
|
)
|
803
798
|
return inputs
|
804
799
|
|
805
|
-
def get_workflows(
|
806
|
-
self, input: GetWorkflowsInput, get_request: bool = False
|
807
|
-
) -> List[WorkflowStatus]:
|
800
|
+
def get_workflows(self, input: GetWorkflowsInput) -> List[WorkflowStatus]:
|
808
801
|
"""
|
809
802
|
Retrieve a list of workflows result and inputs based on the input criteria. The result is a list of external-facing workflow status objects.
|
810
803
|
"""
|
@@ -812,7 +805,6 @@ class SystemDatabase:
|
|
812
805
|
SystemSchema.workflow_status.c.workflow_uuid,
|
813
806
|
SystemSchema.workflow_status.c.status,
|
814
807
|
SystemSchema.workflow_status.c.name,
|
815
|
-
SystemSchema.workflow_status.c.request,
|
816
808
|
SystemSchema.workflow_status.c.recovery_attempts,
|
817
809
|
SystemSchema.workflow_status.c.config_name,
|
818
810
|
SystemSchema.workflow_status.c.class_name,
|
@@ -885,27 +877,26 @@ class SystemDatabase:
|
|
885
877
|
info.workflow_id = row[0]
|
886
878
|
info.status = row[1]
|
887
879
|
info.name = row[2]
|
888
|
-
info.
|
889
|
-
info.
|
890
|
-
info.
|
891
|
-
info.
|
892
|
-
info.authenticated_user = row[7]
|
880
|
+
info.recovery_attempts = row[3]
|
881
|
+
info.config_name = row[4]
|
882
|
+
info.class_name = row[5]
|
883
|
+
info.authenticated_user = row[6]
|
893
884
|
info.authenticated_roles = (
|
894
|
-
json.loads(row[
|
885
|
+
json.loads(row[7]) if row[7] is not None else None
|
895
886
|
)
|
896
|
-
info.assumed_role = row[
|
897
|
-
info.queue_name = row[
|
898
|
-
info.executor_id = row[
|
899
|
-
info.created_at = row[
|
900
|
-
info.updated_at = row[
|
901
|
-
info.app_version = row[
|
902
|
-
info.app_id = row[
|
887
|
+
info.assumed_role = row[8]
|
888
|
+
info.queue_name = row[9]
|
889
|
+
info.executor_id = row[10]
|
890
|
+
info.created_at = row[11]
|
891
|
+
info.updated_at = row[12]
|
892
|
+
info.app_version = row[13]
|
893
|
+
info.app_id = row[14]
|
903
894
|
|
904
895
|
inputs, output, exception = _serialization.safe_deserialize(
|
905
896
|
info.workflow_id,
|
906
|
-
serialized_input=row[
|
907
|
-
serialized_output=row[
|
908
|
-
serialized_exception=row[
|
897
|
+
serialized_input=row[15],
|
898
|
+
serialized_output=row[16],
|
899
|
+
serialized_exception=row[17],
|
909
900
|
)
|
910
901
|
info.input = inputs
|
911
902
|
info.output = output
|
@@ -915,7 +906,7 @@ class SystemDatabase:
|
|
915
906
|
return infos
|
916
907
|
|
917
908
|
def get_queued_workflows(
|
918
|
-
self, input: GetQueuedWorkflowsInput
|
909
|
+
self, input: GetQueuedWorkflowsInput
|
919
910
|
) -> List[WorkflowStatus]:
|
920
911
|
"""
|
921
912
|
Retrieve a list of queued workflows result and inputs based on the input criteria. The result is a list of external-facing workflow status objects.
|
@@ -924,7 +915,6 @@ class SystemDatabase:
|
|
924
915
|
SystemSchema.workflow_status.c.workflow_uuid,
|
925
916
|
SystemSchema.workflow_status.c.status,
|
926
917
|
SystemSchema.workflow_status.c.name,
|
927
|
-
SystemSchema.workflow_status.c.request,
|
928
918
|
SystemSchema.workflow_status.c.recovery_attempts,
|
929
919
|
SystemSchema.workflow_status.c.config_name,
|
930
920
|
SystemSchema.workflow_status.c.class_name,
|
@@ -993,27 +983,26 @@ class SystemDatabase:
|
|
993
983
|
info.workflow_id = row[0]
|
994
984
|
info.status = row[1]
|
995
985
|
info.name = row[2]
|
996
|
-
info.
|
997
|
-
info.
|
998
|
-
info.
|
999
|
-
info.
|
1000
|
-
info.authenticated_user = row[7]
|
986
|
+
info.recovery_attempts = row[3]
|
987
|
+
info.config_name = row[4]
|
988
|
+
info.class_name = row[5]
|
989
|
+
info.authenticated_user = row[6]
|
1001
990
|
info.authenticated_roles = (
|
1002
|
-
json.loads(row[
|
991
|
+
json.loads(row[7]) if row[7] is not None else None
|
1003
992
|
)
|
1004
|
-
info.assumed_role = row[
|
1005
|
-
info.queue_name = row[
|
1006
|
-
info.executor_id = row[
|
1007
|
-
info.created_at = row[
|
1008
|
-
info.updated_at = row[
|
1009
|
-
info.app_version = row[
|
1010
|
-
info.app_id = row[
|
993
|
+
info.assumed_role = row[8]
|
994
|
+
info.queue_name = row[9]
|
995
|
+
info.executor_id = row[10]
|
996
|
+
info.created_at = row[11]
|
997
|
+
info.updated_at = row[12]
|
998
|
+
info.app_version = row[13]
|
999
|
+
info.app_id = row[14]
|
1011
1000
|
|
1012
1001
|
inputs, output, exception = _serialization.safe_deserialize(
|
1013
1002
|
info.workflow_id,
|
1014
|
-
serialized_input=row[
|
1015
|
-
serialized_output=row[
|
1016
|
-
serialized_exception=row[
|
1003
|
+
serialized_input=row[15],
|
1004
|
+
serialized_output=row[16],
|
1005
|
+
serialized_exception=row[17],
|
1017
1006
|
)
|
1018
1007
|
info.input = inputs
|
1019
1008
|
info.output = output
|
@@ -1633,12 +1622,19 @@ class SystemDatabase:
|
|
1633
1622
|
if enqueue_options is not None
|
1634
1623
|
else None
|
1635
1624
|
)
|
1625
|
+
priority = (
|
1626
|
+
enqueue_options["priority"] if enqueue_options is not None else None
|
1627
|
+
)
|
1628
|
+
# Default to 0 (highest priority) if not provided
|
1629
|
+
if priority is None:
|
1630
|
+
priority = 0
|
1636
1631
|
query = (
|
1637
1632
|
pg.insert(SystemSchema.workflow_queue)
|
1638
1633
|
.values(
|
1639
1634
|
workflow_uuid=workflow_id,
|
1640
1635
|
queue_name=queue_name,
|
1641
1636
|
deduplication_id=deduplication_id,
|
1637
|
+
priority=priority,
|
1642
1638
|
)
|
1643
1639
|
.on_conflict_do_nothing(
|
1644
1640
|
index_elements=SystemSchema.workflow_queue.primary_key.columns
|
@@ -1747,7 +1743,10 @@ class SystemDatabase:
|
|
1747
1743
|
.where(SystemSchema.workflow_queue.c.queue_name == queue.name)
|
1748
1744
|
.where(SystemSchema.workflow_queue.c.started_at_epoch_ms == None)
|
1749
1745
|
.where(SystemSchema.workflow_queue.c.completed_at_epoch_ms == None)
|
1750
|
-
.order_by(
|
1746
|
+
.order_by(
|
1747
|
+
SystemSchema.workflow_queue.c.priority.asc(),
|
1748
|
+
SystemSchema.workflow_queue.c.created_at_epoch_ms.asc(),
|
1749
|
+
)
|
1751
1750
|
.with_for_update(nowait=True) # Error out early
|
1752
1751
|
)
|
1753
1752
|
# Apply limit only if max_tasks is finite
|
dbos/_utils.py
CHANGED
@@ -1,8 +1,17 @@
|
|
1
|
+
import importlib.metadata
|
1
2
|
import os
|
2
3
|
|
3
4
|
INTERNAL_QUEUE_NAME = "_dbos_internal_queue"
|
4
5
|
|
6
|
+
request_id_header = "x-request-id"
|
7
|
+
|
5
8
|
|
6
9
|
class GlobalParams:
|
7
10
|
app_version: str = os.environ.get("DBOS__APPVERSION", "")
|
8
11
|
executor_id: str = os.environ.get("DBOS__VMID", "local")
|
12
|
+
try:
|
13
|
+
# Only works on Python >= 3.8
|
14
|
+
dbos_version = importlib.metadata.version("dbos")
|
15
|
+
except importlib.metadata.PackageNotFoundError:
|
16
|
+
# If package is not installed or during development
|
17
|
+
dbos_version = "unknown"
|
dbos/_workflow_commands.py
CHANGED
@@ -27,7 +27,6 @@ def list_workflows(
|
|
27
27
|
limit: Optional[int] = None,
|
28
28
|
offset: Optional[int] = None,
|
29
29
|
sort_desc: bool = False,
|
30
|
-
request: bool = False,
|
31
30
|
workflow_id_prefix: Optional[str] = None,
|
32
31
|
) -> List[WorkflowStatus]:
|
33
32
|
input = GetWorkflowsInput()
|
@@ -43,7 +42,7 @@ def list_workflows(
|
|
43
42
|
input.sort_desc = sort_desc
|
44
43
|
input.workflow_id_prefix = workflow_id_prefix
|
45
44
|
|
46
|
-
infos: List[WorkflowStatus] = sys_db.get_workflows(input
|
45
|
+
infos: List[WorkflowStatus] = sys_db.get_workflows(input)
|
47
46
|
|
48
47
|
return infos
|
49
48
|
|
@@ -59,7 +58,6 @@ def list_queued_workflows(
|
|
59
58
|
limit: Optional[int] = None,
|
60
59
|
offset: Optional[int] = None,
|
61
60
|
sort_desc: bool = False,
|
62
|
-
request: bool = False,
|
63
61
|
) -> List[WorkflowStatus]:
|
64
62
|
input: GetQueuedWorkflowsInput = {
|
65
63
|
"queue_name": queue_name,
|
@@ -72,17 +70,15 @@ def list_queued_workflows(
|
|
72
70
|
"sort_desc": sort_desc,
|
73
71
|
}
|
74
72
|
|
75
|
-
infos: List[WorkflowStatus] = sys_db.get_queued_workflows(input
|
73
|
+
infos: List[WorkflowStatus] = sys_db.get_queued_workflows(input)
|
76
74
|
return infos
|
77
75
|
|
78
76
|
|
79
|
-
def get_workflow(
|
80
|
-
sys_db: SystemDatabase, workflow_id: str, get_request: bool
|
81
|
-
) -> Optional[WorkflowStatus]:
|
77
|
+
def get_workflow(sys_db: SystemDatabase, workflow_id: str) -> Optional[WorkflowStatus]:
|
82
78
|
input = GetWorkflowsInput()
|
83
79
|
input.workflow_ids = [workflow_id]
|
84
80
|
|
85
|
-
infos: List[WorkflowStatus] = sys_db.get_workflows(input
|
81
|
+
infos: List[WorkflowStatus] = sys_db.get_workflows(input)
|
86
82
|
if not infos:
|
87
83
|
return None
|
88
84
|
|
dbos/cli/cli.py
CHANGED
@@ -24,6 +24,7 @@ from .._dbos_config import _is_valid_app_name
|
|
24
24
|
from .._docker_pg_helper import start_docker_pg, stop_docker_pg
|
25
25
|
from .._schemas.system_database import SystemSchema
|
26
26
|
from .._sys_db import SystemDatabase, reset_system_database
|
27
|
+
from .._utils import GlobalParams
|
27
28
|
from ..cli._github_init import create_template_from_github
|
28
29
|
from ._template_init import copy_template, get_project_name, get_templates_directory
|
29
30
|
|
@@ -42,6 +43,14 @@ def start_client(db_url: Optional[str] = None) -> DBOSClient:
|
|
42
43
|
|
43
44
|
|
44
45
|
app = typer.Typer()
|
46
|
+
|
47
|
+
|
48
|
+
@app.command(help="Show the version and exit")
|
49
|
+
def version() -> None:
|
50
|
+
"""Display the current version of DBOS CLI."""
|
51
|
+
typer.echo(f"DBOS CLI version: {GlobalParams.dbos_version}")
|
52
|
+
|
53
|
+
|
45
54
|
workflow = typer.Typer()
|
46
55
|
queue = typer.Typer()
|
47
56
|
|
@@ -0,0 +1,312 @@
|
|
1
|
+
Metadata-Version: 2.1
|
2
|
+
Name: dbos
|
3
|
+
Version: 0.28.0a6
|
4
|
+
Summary: Ultra-lightweight durable execution in Python
|
5
|
+
Author-Email: "DBOS, Inc." <contact@dbos.dev>
|
6
|
+
License: MIT
|
7
|
+
Requires-Python: >=3.9
|
8
|
+
Requires-Dist: pyyaml>=6.0.2
|
9
|
+
Requires-Dist: jsonschema>=4.23.0
|
10
|
+
Requires-Dist: alembic>=1.13.3
|
11
|
+
Requires-Dist: typing-extensions>=4.12.2; python_version < "3.10"
|
12
|
+
Requires-Dist: typer>=0.12.5
|
13
|
+
Requires-Dist: jsonpickle>=3.3.0
|
14
|
+
Requires-Dist: opentelemetry-api>=1.27.0
|
15
|
+
Requires-Dist: opentelemetry-sdk>=1.27.0
|
16
|
+
Requires-Dist: opentelemetry-exporter-otlp-proto-http>=1.27.0
|
17
|
+
Requires-Dist: python-dateutil>=2.9.0.post0
|
18
|
+
Requires-Dist: fastapi[standard]>=0.115.2
|
19
|
+
Requires-Dist: tomlkit>=0.13.2
|
20
|
+
Requires-Dist: psycopg[binary]>=3.1
|
21
|
+
Requires-Dist: docker>=7.1.0
|
22
|
+
Requires-Dist: cryptography>=43.0.3
|
23
|
+
Requires-Dist: rich>=13.9.4
|
24
|
+
Requires-Dist: pyjwt>=2.10.1
|
25
|
+
Requires-Dist: websockets>=15.0
|
26
|
+
Description-Content-Type: text/markdown
|
27
|
+
|
28
|
+
|
29
|
+
<div align="center">
|
30
|
+
|
31
|
+
# DBOS Transact: Lightweight Durable Workflows
|
32
|
+
|
33
|
+
#### [Documentation](https://docs.dbos.dev/) • [Examples](https://docs.dbos.dev/examples) • [Github](https://github.com/dbos-inc) • [Discord](https://discord.com/invite/jsmC6pXGgX)
|
34
|
+
</div>
|
35
|
+
|
36
|
+
---
|
37
|
+
|
38
|
+
## What is DBOS?
|
39
|
+
|
40
|
+
DBOS provides lightweight durable workflows built on top of Postgres.
|
41
|
+
Instead of managing your own workflow orchestrator or task queue system, you can use DBOS to add durable workflows and queues to your program in just a few lines of code.
|
42
|
+
|
43
|
+
To get started, follow the [quickstart](https://docs.dbos.dev/quickstart) to install this open-source library and connect it to a Postgres database.
|
44
|
+
Then, annotate workflows and steps in your program to make it durable!
|
45
|
+
That's all you need to do—DBOS is entirely contained in this open-source library, there's no additional infrastructure for you to configure or manage.
|
46
|
+
|
47
|
+
## When Should I Use DBOS?
|
48
|
+
|
49
|
+
You should consider using DBOS if your application needs to **reliably handle failures**.
|
50
|
+
For example, you might be building a payments service that must reliably process transactions even if servers crash mid-operation, or a long-running data pipeline that needs to resume seamlessly from checkpoints rather than restart from the beginning when interrupted.
|
51
|
+
|
52
|
+
Handling failures is costly and complicated, requiring complex state management and recovery logic as well as heavyweight tools like external orchestration services.
|
53
|
+
DBOS makes it simpler: annotate your code to checkpoint it in Postgres and automatically recover from any failure.
|
54
|
+
DBOS also provides powerful Postgres-backed primitives that makes it easier to write and operate reliable code, including durable queues, notifications, scheduling, event processing, and programmatic workflow management.
|
55
|
+
|
56
|
+
## Features
|
57
|
+
|
58
|
+
<details open><summary><strong>💾 Durable Workflows</strong></summary>
|
59
|
+
|
60
|
+
####
|
61
|
+
|
62
|
+
DBOS workflows make your program **durable** by checkpointing its state in Postgres.
|
63
|
+
If your program ever fails, when it restarts all your workflows will automatically resume from the last completed step.
|
64
|
+
|
65
|
+
You add durable workflows to your existing Python program by annotating ordinary functions as workflows and steps:
|
66
|
+
|
67
|
+
```python
|
68
|
+
from dbos import DBOS
|
69
|
+
|
70
|
+
@DBOS.step()
|
71
|
+
def step_one():
|
72
|
+
...
|
73
|
+
|
74
|
+
@DBOS.step()
|
75
|
+
def step_two():
|
76
|
+
...
|
77
|
+
|
78
|
+
@DBOS.workflow()
|
79
|
+
def workflow()
|
80
|
+
step_one()
|
81
|
+
step_two()
|
82
|
+
```
|
83
|
+
|
84
|
+
Workflows are particularly useful for
|
85
|
+
|
86
|
+
- Orchestrating business processes so they seamlessly recover from any failure.
|
87
|
+
- Building observable and fault-tolerant data pipelines.
|
88
|
+
- Operating an AI agent, or any application that relies on unreliable or non-deterministic APIs.
|
89
|
+
|
90
|
+
[Read more ↗️](https://docs.dbos.dev/python/tutorials/workflow-tutorial)
|
91
|
+
|
92
|
+
</details>
|
93
|
+
|
94
|
+
<details><summary><strong>📒 Durable Queues</strong></summary>
|
95
|
+
|
96
|
+
####
|
97
|
+
|
98
|
+
DBOS queues help you **durably** run tasks in the background.
|
99
|
+
You can enqueue a task (which can be a single step or an entire workflow) from a durable workflow and one of your processes will pick it up for execution.
|
100
|
+
DBOS manages the execution of your tasks: it guarantees that tasks complete, and that their callers get their results without needing to resubmit them, even if your application is interrupted.
|
101
|
+
|
102
|
+
Queues also provide flow control, so you can limit the concurrency of your tasks on a per-queue or per-process basis.
|
103
|
+
You can also set timeouts for tasks, rate limit how often queued tasks are executed, deduplicate tasks, or prioritize tasks.
|
104
|
+
|
105
|
+
You can add queues to your workflows in just a couple lines of code.
|
106
|
+
They don't require a separate queueing service or message broker—just Postgres.
|
107
|
+
|
108
|
+
```python
|
109
|
+
from dbos import DBOS, Queue
|
110
|
+
|
111
|
+
queue = Queue("example_queue")
|
112
|
+
|
113
|
+
@DBOS.step()
|
114
|
+
def process_task(task):
|
115
|
+
...
|
116
|
+
|
117
|
+
@DBOS.workflow()
|
118
|
+
def process_tasks(tasks):
|
119
|
+
task_handles = []
|
120
|
+
# Enqueue each task so all tasks are processed concurrently.
|
121
|
+
for task in tasks:
|
122
|
+
handle = queue.enqueue(process_task, task)
|
123
|
+
task_handles.append(handle)
|
124
|
+
# Wait for each task to complete and retrieve its result.
|
125
|
+
# Return the results of all tasks.
|
126
|
+
return [handle.get_result() for handle in task_handles]
|
127
|
+
```
|
128
|
+
|
129
|
+
[Read more ↗️](https://docs.dbos.dev/python/tutorials/queue-tutorial)
|
130
|
+
|
131
|
+
</details>
|
132
|
+
|
133
|
+
<details><summary><strong>⚙️ Programmatic Workflow Management</strong></summary>
|
134
|
+
|
135
|
+
####
|
136
|
+
|
137
|
+
Your workflows are stored as rows in a Postgres table, so you have full programmatic control over them.
|
138
|
+
Write scripts to query workflow executions, batch pause or resume workflows, or even restart failed workflows from a specific step.
|
139
|
+
Handle bugs or failures that affect thousands of workflows with power and flexibility.
|
140
|
+
|
141
|
+
```python
|
142
|
+
# Create a DBOS client connected to your Postgres database.
|
143
|
+
client = DBOSClient(database_url)
|
144
|
+
# Find all workflows that errored between 3:00 and 5:00 AM UTC on 2025-04-22.
|
145
|
+
workflows = client.list_workflows(status="ERROR",
|
146
|
+
start_time="2025-04-22T03:00:00Z", end_time="2025-04-22T05:00:00Z")
|
147
|
+
for workflow in workflows:
|
148
|
+
# Check which workflows failed due to an outage in a service called from Step 2.
|
149
|
+
steps = client.list_workflow_steps(workflow)
|
150
|
+
if len(steps) >= 3 and isinstance(steps[2]["error"], ServiceOutage):
|
151
|
+
# To recover from the outage, restart those workflows from Step 2.
|
152
|
+
DBOS.fork_workflow(workflow.workflow_id, 2)
|
153
|
+
```
|
154
|
+
|
155
|
+
[Read more ↗️](https://docs.dbos.dev/python/reference/client)
|
156
|
+
|
157
|
+
</details>
|
158
|
+
|
159
|
+
<details><summary><strong>🎫 Exactly-Once Event Processing</strong></summary>
|
160
|
+
|
161
|
+
####
|
162
|
+
|
163
|
+
Use DBOS to build reliable webhooks, event listeners, or Kafka consumers by starting a workflow exactly-once in response to an event.
|
164
|
+
Acknowledge the event immediately while reliably processing it in the background.
|
165
|
+
|
166
|
+
For example:
|
167
|
+
|
168
|
+
```python
|
169
|
+
def handle_message(request: Request) -> None:
|
170
|
+
event_id = request.body["event_id"]
|
171
|
+
# Use the event ID as an idempotency key to start the workflow exactly-once
|
172
|
+
with SetWorkflowID(event_id):
|
173
|
+
# Start the workflow in the background, then acknowledge the event
|
174
|
+
DBOS.start_workflow(message_workflow, request.body["event"])
|
175
|
+
```
|
176
|
+
|
177
|
+
Or with Kafka:
|
178
|
+
|
179
|
+
```python
|
180
|
+
@DBOS.kafka_consumer(config,["alerts-topic"])
|
181
|
+
@DBOS.workflow()
|
182
|
+
def process_kafka_alerts(msg):
|
183
|
+
# This workflow runs exactly-once for each message sent to the topic
|
184
|
+
alerts = msg.value.decode()
|
185
|
+
for alert in alerts:
|
186
|
+
respond_to_alert(alert)
|
187
|
+
```
|
188
|
+
|
189
|
+
[Read more ↗️](https://docs.dbos.dev/python/tutorials/workflow-tutorial)
|
190
|
+
|
191
|
+
</details>
|
192
|
+
|
193
|
+
<details><summary><strong>📅 Durable Scheduling</strong></summary>
|
194
|
+
|
195
|
+
####
|
196
|
+
|
197
|
+
Schedule workflows using cron syntax, or use durable sleep to pause workflows for as long as you like (even days or weeks) before executing.
|
198
|
+
|
199
|
+
You can schedule a workflow using a single annotation:
|
200
|
+
|
201
|
+
```python
|
202
|
+
@DBOS.scheduled('* * * * *') # crontab syntax to run once every minute
|
203
|
+
@DBOS.workflow()
|
204
|
+
def example_scheduled_workflow(scheduled_time: datetime, actual_time: datetime):
|
205
|
+
DBOS.logger.info("I am a workflow scheduled to run once a minute.")
|
206
|
+
```
|
207
|
+
|
208
|
+
You can add a durable sleep to any workflow with a single line of code.
|
209
|
+
It stores its wakeup time in Postgres so the workflow sleeps through any interruption or restart, then always resumes on schedule.
|
210
|
+
|
211
|
+
```python
|
212
|
+
@DBOS.workflow()
|
213
|
+
def reminder_workflow(email: str, time_to_sleep: int):
|
214
|
+
send_confirmation_email(email)
|
215
|
+
DBOS.sleep(time_to_sleep)
|
216
|
+
send_reminder_email(email)
|
217
|
+
```
|
218
|
+
|
219
|
+
[Read more ↗️](https://docs.dbos.dev/python/tutorials/scheduled-workflows)
|
220
|
+
|
221
|
+
</details>
|
222
|
+
|
223
|
+
<details><summary><strong>📫 Durable Notifications</strong></summary>
|
224
|
+
|
225
|
+
####
|
226
|
+
|
227
|
+
Pause your workflow executions until a notification is received, or emit events from your workflow to send progress updates to external clients.
|
228
|
+
All notifications are stored in Postgres, so they can be sent and received with exactly-once semantics.
|
229
|
+
Set durable timeouts when waiting for events, so you can wait for as long as you like (even days or weeks) through interruptions or restarts, then resume once a notification arrives or the timeout is reached.
|
230
|
+
|
231
|
+
For example, build a reliable billing workflow that durably waits for a notification from a payments service, processing it exactly-once:
|
232
|
+
|
233
|
+
```python
|
234
|
+
@DBOS.workflow()
|
235
|
+
def billing_workflow():
|
236
|
+
... # Calculate the charge, then submit the bill to a payments service
|
237
|
+
payment_status = DBOS.recv(PAYMENT_STATUS, timeout=payment_service_timeout)
|
238
|
+
if payment_status is not None and payment_status == "paid":
|
239
|
+
... # Handle a successful payment.
|
240
|
+
else:
|
241
|
+
... # Handle a failed payment or timeout.
|
242
|
+
```
|
243
|
+
|
244
|
+
</details>
|
245
|
+
|
246
|
+
|
247
|
+
## Getting Started
|
248
|
+
|
249
|
+
To get started, follow the [quickstart](https://docs.dbos.dev/quickstart) to install this open-source library and connect it to a Postgres database.
|
250
|
+
Then, check out the [programming guide](https://docs.dbos.dev/python/programming-guide) to learn how to build with durable workflows and queues.
|
251
|
+
|
252
|
+
## Documentation
|
253
|
+
|
254
|
+
[https://docs.dbos.dev](https://docs.dbos.dev)
|
255
|
+
|
256
|
+
## Examples
|
257
|
+
|
258
|
+
[https://docs.dbos.dev/examples](https://docs.dbos.dev/examples)
|
259
|
+
|
260
|
+
## DBOS vs. Other Systems
|
261
|
+
|
262
|
+
<details><summary><strong>DBOS vs. Temporal</strong></summary>
|
263
|
+
|
264
|
+
####
|
265
|
+
|
266
|
+
Both DBOS and Temporal provide durable execution, but DBOS is implemented in a lightweight Postgres-backed library whereas Temporal is implemented in an externally orchestrated server.
|
267
|
+
|
268
|
+
You can add DBOS to your program by installing this open-source library, connecting it to Postgres, and annotating workflows and steps.
|
269
|
+
By contrast, to add Temporal to your program, you must rearchitect your program to move your workflows and steps (activities) to a Temporal worker, configure a Temporal server to orchestrate those workflows, and access your workflows only through a Temporal client.
|
270
|
+
[This blog post](https://www.dbos.dev/blog/durable-execution-coding-comparison) makes the comparison in more detail.
|
271
|
+
|
272
|
+
**When to use DBOS:** You need to add durable workflows to your applications with minimal rearchitecting, or you are using Postgres.
|
273
|
+
|
274
|
+
**When to use Temporal:** You don't want to add Postgres to your stack, or you need a language DBOS doesn't support yet.
|
275
|
+
|
276
|
+
</details>
|
277
|
+
|
278
|
+
<details><summary><strong>DBOS vs. Airflow</strong></summary>
|
279
|
+
|
280
|
+
####
|
281
|
+
|
282
|
+
DBOS and Airflow both provide workflow abstractions.
|
283
|
+
Airflow is targeted at data science use cases, providing many out-of-the-box connectors but requiring workflows be written as explicit DAGs and externally orchestrating them from an Airflow cluster.
|
284
|
+
Airflow is designed for batch operations and does not provide good performance for streaming or real-time use cases.
|
285
|
+
DBOS is general-purpose, but is often used for data pipelines, allowing developers to write workflows as code and requiring no infrastructure except Postgres.
|
286
|
+
|
287
|
+
**When to use DBOS:** You need the flexibility of writing workflows as code, or you need higher performance than Airflow is capable of (particularly for streaming or real-time use cases).
|
288
|
+
|
289
|
+
**When to use Airflow:** You need Airflow's ecosystem of connectors.
|
290
|
+
|
291
|
+
</details>
|
292
|
+
|
293
|
+
<details><summary><strong>DBOS vs. Celery/BullMQ</strong></summary>
|
294
|
+
|
295
|
+
####
|
296
|
+
|
297
|
+
DBOS provides a similar queue abstraction to dedicated queueing systems like Celery or BullMQ: you can declare queues, submit tasks to them, and control their flow with concurrency limits, rate limits, timeouts, prioritization, etc.
|
298
|
+
However, DBOS queues are **durable and Postgres-backed** and integrate with durable workflows.
|
299
|
+
For example, in DBOS you can write a durable workflow that enqueues a thousand tasks and waits for their results.
|
300
|
+
DBOS checkpoints the workflow and each of its tasks in Postgres, guaranteeing that even if failures or interruptions occur, the tasks will complete and the workflow will collect their results.
|
301
|
+
By contrast, Celery/BullMQ are Redis-backed and don't provide workflows, so they provide fewer guarantees but better performance.
|
302
|
+
|
303
|
+
**When to use DBOS:** You need the reliability of enqueueing tasks from durable workflows.
|
304
|
+
|
305
|
+
**When to use Celery/BullMQ**: You don't need durability, or you need very high throughput beyond what your Postgres server can support.
|
306
|
+
</details>
|
307
|
+
|
308
|
+
## Community
|
309
|
+
|
310
|
+
If you want to ask questions or hang out with the community, join us on [Discord](https://discord.gg/fMwQjeW5zg)!
|
311
|
+
If you see a bug or have a feature request, don't hesitate to open an issue here on GitHub.
|
312
|
+
If you're interested in contributing, check out our [contributions guide](./CONTRIBUTING.md).
|