dbos 0.27.0a11__py3-none-any.whl → 0.28.0a4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
dbos/_client.py CHANGED
@@ -6,6 +6,7 @@ from typing import Any, Generic, List, Optional, TypedDict, TypeVar
6
6
  from sqlalchemy import URL
7
7
 
8
8
  from dbos._app_db import ApplicationDatabase
9
+ from dbos._context import MaxPriority, MinPriority
9
10
 
10
11
  if sys.version_info < (3, 11):
11
12
  from typing_extensions import NotRequired
@@ -15,7 +16,7 @@ else:
15
16
  from dbos import _serialization
16
17
  from dbos._dbos import WorkflowHandle, WorkflowHandleAsync
17
18
  from dbos._dbos_config import parse_database_url_to_dbconfig
18
- from dbos._error import DBOSNonExistentWorkflowError
19
+ from dbos._error import DBOSException, DBOSNonExistentWorkflowError
19
20
  from dbos._registrations import DEFAULT_MAX_RECOVERY_ATTEMPTS
20
21
  from dbos._serialization import WorkflowInputs
21
22
  from dbos._sys_db import (
@@ -44,6 +45,15 @@ class EnqueueOptions(TypedDict):
44
45
  app_version: NotRequired[str]
45
46
  workflow_timeout: NotRequired[float]
46
47
  deduplication_id: NotRequired[str]
48
+ priority: NotRequired[int]
49
+
50
+
51
+ def validate_enqueue_options(options: EnqueueOptions) -> None:
52
+ priority = options.get("priority")
53
+ if priority is not None and (priority < MinPriority or priority > MaxPriority):
54
+ raise DBOSException(
55
+ f"Invalid priority {priority}. Priority must be between {MinPriority}~{MaxPriority}."
56
+ )
47
57
 
48
58
 
49
59
  class WorkflowHandleClientPolling(Generic[R]):
@@ -103,6 +113,7 @@ class DBOSClient:
103
113
  self._sys_db.destroy()
104
114
 
105
115
  def _enqueue(self, options: EnqueueOptions, *args: Any, **kwargs: Any) -> str:
116
+ validate_enqueue_options(options)
106
117
  workflow_name = options["workflow_name"]
107
118
  queue_name = options["queue_name"]
108
119
 
@@ -116,6 +127,7 @@ class DBOSClient:
116
127
  workflow_timeout = options.get("workflow_timeout", None)
117
128
  enqueue_options_internal: EnqueueOptionsInternal = {
118
129
  "deduplication_id": options.get("deduplication_id"),
130
+ "priority": options.get("priority"),
119
131
  }
120
132
 
121
133
  status: WorkflowStatusInternal = {
dbos/_context.py CHANGED
@@ -31,6 +31,9 @@ class OperationType(Enum):
31
31
 
32
32
  OperationTypes = Literal["handler", "workflow", "transaction", "step", "procedure"]
33
33
 
34
+ MaxPriority = 2**31 - 1 # 2,147,483,647
35
+ MinPriority = 1
36
+
34
37
 
35
38
  # Keys must be the same as in TypeScript Transact
36
39
  class TracedAttributes(TypedDict, total=False):
@@ -100,6 +103,8 @@ class DBOSContext:
100
103
 
101
104
  # A user-specified deduplication ID for the enqueuing workflow.
102
105
  self.deduplication_id: Optional[str] = None
106
+ # A user-specified priority for the enqueuing workflow.
107
+ self.priority: Optional[int] = None
103
108
 
104
109
  def create_child(self) -> DBOSContext:
105
110
  rv = DBOSContext()
@@ -422,15 +427,23 @@ class SetEnqueueOptions:
422
427
 
423
428
  Usage:
424
429
  ```
425
- with SetEnqueueOptions(deduplication_id=<deduplication id>):
430
+ with SetEnqueueOptions(deduplication_id=<deduplication id>, priority=<priority>):
426
431
  queue.enqueue(...)
427
432
  ```
428
433
  """
429
434
 
430
- def __init__(self, *, deduplication_id: Optional[str] = None) -> None:
435
+ def __init__(
436
+ self, *, deduplication_id: Optional[str] = None, priority: Optional[int] = None
437
+ ) -> None:
431
438
  self.created_ctx = False
432
439
  self.deduplication_id: Optional[str] = deduplication_id
433
440
  self.saved_deduplication_id: Optional[str] = None
441
+ if priority is not None and (priority < MinPriority or priority > MaxPriority):
442
+ raise Exception(
443
+ f"Invalid priority {priority}. Priority must be between {MinPriority}~{MaxPriority}."
444
+ )
445
+ self.priority: Optional[int] = priority
446
+ self.saved_priority: Optional[int] = None
434
447
 
435
448
  def __enter__(self) -> SetEnqueueOptions:
436
449
  # Code to create a basic context
@@ -441,6 +454,8 @@ class SetEnqueueOptions:
441
454
  ctx = assert_current_dbos_context()
442
455
  self.saved_deduplication_id = ctx.deduplication_id
443
456
  ctx.deduplication_id = self.deduplication_id
457
+ self.saved_priority = ctx.priority
458
+ ctx.priority = self.priority
444
459
  return self
445
460
 
446
461
  def __exit__(
@@ -449,7 +464,9 @@ class SetEnqueueOptions:
449
464
  exc_value: Optional[BaseException],
450
465
  traceback: Optional[TracebackType],
451
466
  ) -> Literal[False]:
452
- assert_current_dbos_context().deduplication_id = self.saved_deduplication_id
467
+ curr_ctx = assert_current_dbos_context()
468
+ curr_ctx.deduplication_id = self.saved_deduplication_id
469
+ curr_ctx.priority = self.saved_priority
453
470
  # Code to clean up the basic context if we created it
454
471
  if self.created_ctx:
455
472
  _clear_local_dbos_context()
@@ -463,6 +480,7 @@ class EnterDBOSWorkflow(AbstractContextManager[DBOSContext, Literal[False]]):
463
480
  self.is_temp_workflow = attributes["name"] == "temp_wf"
464
481
  self.saved_workflow_timeout: Optional[int] = None
465
482
  self.saved_deduplication_id: Optional[str] = None
483
+ self.saved_priority: Optional[int] = None
466
484
 
467
485
  def __enter__(self) -> DBOSContext:
468
486
  # Code to create a basic context
@@ -476,10 +494,12 @@ class EnterDBOSWorkflow(AbstractContextManager[DBOSContext, Literal[False]]):
476
494
  # workflow's children (instead we propagate the deadline)
477
495
  self.saved_workflow_timeout = ctx.workflow_timeout_ms
478
496
  ctx.workflow_timeout_ms = None
479
- # Unset the deduplication_id context var so it is not applied to this
497
+ # Unset the deduplication_id and priority context var so it is not applied to this
480
498
  # workflow's children
481
499
  self.saved_deduplication_id = ctx.deduplication_id
482
500
  ctx.deduplication_id = None
501
+ self.saved_priority = ctx.priority
502
+ ctx.priority = None
483
503
  ctx.start_workflow(
484
504
  None, self.attributes, self.is_temp_workflow
485
505
  ) # Will get from the context's next workflow ID
@@ -498,7 +518,8 @@ class EnterDBOSWorkflow(AbstractContextManager[DBOSContext, Literal[False]]):
498
518
  ctx.workflow_timeout_ms = self.saved_workflow_timeout
499
519
  # Clear any propagating timeout
500
520
  ctx.workflow_deadline_epoch_ms = None
501
- # Restore the saved deduplication ID
521
+ # Restore the saved deduplication ID and priority
522
+ ctx.priority = self.saved_priority
502
523
  ctx.deduplication_id = self.saved_deduplication_id
503
524
  # Code to clean up the basic context if we created it
504
525
  if self.created_ctx:
dbos/_core.py CHANGED
@@ -544,6 +544,7 @@ def start_workflow(
544
544
  )
545
545
  enqueue_options = EnqueueOptionsInternal(
546
546
  deduplication_id=local_ctx.deduplication_id if local_ctx is not None else None,
547
+ priority=local_ctx.priority if local_ctx is not None else None,
547
548
  )
548
549
  new_wf_id, new_wf_ctx = _get_new_wf()
549
550
 
@@ -635,6 +636,7 @@ async def start_workflow_async(
635
636
  )
636
637
  enqueue_options = EnqueueOptionsInternal(
637
638
  deduplication_id=local_ctx.deduplication_id if local_ctx is not None else None,
639
+ priority=local_ctx.priority if local_ctx is not None else None,
638
640
  )
639
641
  new_wf_id, new_wf_ctx = _get_new_wf()
640
642
 
dbos/_dbos.py CHANGED
@@ -371,7 +371,7 @@ class DBOS:
371
371
  set_env_vars(self._config)
372
372
  config_logger(self._config)
373
373
  dbos_tracer.config(self._config)
374
- dbos_logger.info("Initializing DBOS")
374
+ dbos_logger.info(f"Initializing DBOS (v{GlobalParams.dbos_version})")
375
375
 
376
376
  # If using FastAPI, set up middleware and lifecycle events
377
377
  if self.fastapi is not None:
@@ -0,0 +1,35 @@
1
+ """add queue priority
2
+
3
+ Revision ID: 933e86bdac6a
4
+ Revises: 27ac6900c6ad
5
+ Create Date: 2025-04-25 18:17:40.462737
6
+
7
+ """
8
+
9
+ from typing import Sequence, Union
10
+
11
+ import sqlalchemy as sa
12
+ from alembic import op
13
+
14
+ # revision identifiers, used by Alembic.
15
+ revision: str = "933e86bdac6a"
16
+ down_revision: Union[str, None] = "27ac6900c6ad"
17
+ branch_labels: Union[str, Sequence[str], None] = None
18
+ depends_on: Union[str, Sequence[str], None] = None
19
+
20
+
21
+ def upgrade() -> None:
22
+ op.add_column(
23
+ "workflow_queue",
24
+ sa.Column(
25
+ "priority",
26
+ sa.Integer(),
27
+ nullable=False,
28
+ server_default=sa.text("'0'::int"),
29
+ ),
30
+ schema="dbos",
31
+ )
32
+
33
+
34
+ def downgrade() -> None:
35
+ op.drop_column("workflow_queue", "priority", schema="dbos")
@@ -180,6 +180,12 @@ class SystemSchema:
180
180
  Text,
181
181
  nullable=True,
182
182
  ),
183
+ Column(
184
+ "priority",
185
+ Integer,
186
+ nullable=False,
187
+ server_default=text("'0'::int"),
188
+ ),
183
189
  UniqueConstraint(
184
190
  "queue_name", "deduplication_id", name="uq_workflow_queue_name_dedup_id"
185
191
  ),
dbos/_sys_db.py CHANGED
@@ -138,6 +138,9 @@ class WorkflowStatusInternal(TypedDict):
138
138
 
139
139
  class EnqueueOptionsInternal(TypedDict):
140
140
  deduplication_id: Optional[str] # Unique ID for deduplication on a queue
141
+ priority: Optional[
142
+ int
143
+ ] # Priority of the workflow on the queue, starting from 1 ~ 2,147,483,647. Default 0 (highest priority).
141
144
 
142
145
 
143
146
  class RecordedResult(TypedDict):
@@ -1633,12 +1636,19 @@ class SystemDatabase:
1633
1636
  if enqueue_options is not None
1634
1637
  else None
1635
1638
  )
1639
+ priority = (
1640
+ enqueue_options["priority"] if enqueue_options is not None else None
1641
+ )
1642
+ # Default to 0 (highest priority) if not provided
1643
+ if priority is None:
1644
+ priority = 0
1636
1645
  query = (
1637
1646
  pg.insert(SystemSchema.workflow_queue)
1638
1647
  .values(
1639
1648
  workflow_uuid=workflow_id,
1640
1649
  queue_name=queue_name,
1641
1650
  deduplication_id=deduplication_id,
1651
+ priority=priority,
1642
1652
  )
1643
1653
  .on_conflict_do_nothing(
1644
1654
  index_elements=SystemSchema.workflow_queue.primary_key.columns
@@ -1747,7 +1757,10 @@ class SystemDatabase:
1747
1757
  .where(SystemSchema.workflow_queue.c.queue_name == queue.name)
1748
1758
  .where(SystemSchema.workflow_queue.c.started_at_epoch_ms == None)
1749
1759
  .where(SystemSchema.workflow_queue.c.completed_at_epoch_ms == None)
1750
- .order_by(SystemSchema.workflow_queue.c.created_at_epoch_ms.asc())
1760
+ .order_by(
1761
+ SystemSchema.workflow_queue.c.priority.asc(),
1762
+ SystemSchema.workflow_queue.c.created_at_epoch_ms.asc(),
1763
+ )
1751
1764
  .with_for_update(nowait=True) # Error out early
1752
1765
  )
1753
1766
  # Apply limit only if max_tasks is finite
dbos/_utils.py CHANGED
@@ -1,3 +1,4 @@
1
+ import importlib.metadata
1
2
  import os
2
3
 
3
4
  INTERNAL_QUEUE_NAME = "_dbos_internal_queue"
@@ -6,3 +7,9 @@ INTERNAL_QUEUE_NAME = "_dbos_internal_queue"
6
7
  class GlobalParams:
7
8
  app_version: str = os.environ.get("DBOS__APPVERSION", "")
8
9
  executor_id: str = os.environ.get("DBOS__VMID", "local")
10
+ try:
11
+ # Only works on Python >= 3.8
12
+ dbos_version = importlib.metadata.version("dbos")
13
+ except importlib.metadata.PackageNotFoundError:
14
+ # If package is not installed or during development
15
+ dbos_version = "unknown"
dbos/cli/cli.py CHANGED
@@ -24,6 +24,7 @@ from .._dbos_config import _is_valid_app_name
24
24
  from .._docker_pg_helper import start_docker_pg, stop_docker_pg
25
25
  from .._schemas.system_database import SystemSchema
26
26
  from .._sys_db import SystemDatabase, reset_system_database
27
+ from .._utils import GlobalParams
27
28
  from ..cli._github_init import create_template_from_github
28
29
  from ._template_init import copy_template, get_project_name, get_templates_directory
29
30
 
@@ -42,6 +43,14 @@ def start_client(db_url: Optional[str] = None) -> DBOSClient:
42
43
 
43
44
 
44
45
  app = typer.Typer()
46
+
47
+
48
+ @app.command(help="Show the version and exit")
49
+ def version() -> None:
50
+ """Display the current version of DBOS CLI."""
51
+ typer.echo(f"DBOS CLI version: {GlobalParams.dbos_version}")
52
+
53
+
45
54
  workflow = typer.Typer()
46
55
  queue = typer.Typer()
47
56
 
@@ -0,0 +1,312 @@
1
+ Metadata-Version: 2.1
2
+ Name: dbos
3
+ Version: 0.28.0a4
4
+ Summary: Ultra-lightweight durable execution in Python
5
+ Author-Email: "DBOS, Inc." <contact@dbos.dev>
6
+ License: MIT
7
+ Requires-Python: >=3.9
8
+ Requires-Dist: pyyaml>=6.0.2
9
+ Requires-Dist: jsonschema>=4.23.0
10
+ Requires-Dist: alembic>=1.13.3
11
+ Requires-Dist: typing-extensions>=4.12.2; python_version < "3.10"
12
+ Requires-Dist: typer>=0.12.5
13
+ Requires-Dist: jsonpickle>=3.3.0
14
+ Requires-Dist: opentelemetry-api>=1.27.0
15
+ Requires-Dist: opentelemetry-sdk>=1.27.0
16
+ Requires-Dist: opentelemetry-exporter-otlp-proto-http>=1.27.0
17
+ Requires-Dist: python-dateutil>=2.9.0.post0
18
+ Requires-Dist: fastapi[standard]>=0.115.2
19
+ Requires-Dist: tomlkit>=0.13.2
20
+ Requires-Dist: psycopg[binary]>=3.1
21
+ Requires-Dist: docker>=7.1.0
22
+ Requires-Dist: cryptography>=43.0.3
23
+ Requires-Dist: rich>=13.9.4
24
+ Requires-Dist: pyjwt>=2.10.1
25
+ Requires-Dist: websockets>=15.0
26
+ Description-Content-Type: text/markdown
27
+
28
+
29
+ <div align="center">
30
+
31
+ # DBOS Transact: Lightweight Durable Workflows
32
+
33
+ #### [Documentation](https://docs.dbos.dev/) &nbsp;&nbsp;•&nbsp;&nbsp; [Examples](https://docs.dbos.dev/examples) &nbsp;&nbsp;•&nbsp;&nbsp; [Github](https://github.com/dbos-inc) &nbsp;&nbsp;•&nbsp;&nbsp; [Discord](https://discord.com/invite/jsmC6pXGgX)
34
+ </div>
35
+
36
+ ---
37
+
38
+ ## What is DBOS?
39
+
40
+ DBOS provides lightweight durable workflows built on top of Postgres.
41
+ Instead of managing your own workflow orchestrator or task queue system, you can use DBOS to add durable workflows and queues to your program in just a few lines of code.
42
+
43
+ To get started, follow the [quickstart](https://docs.dbos.dev/quickstart) to install this open-source library and connect it to a Postgres database.
44
+ Then, annotate workflows and steps in your program to make it durable!
45
+ That's all you need to do&mdash;DBOS is entirely contained in this open-source library, there's no additional infrastructure for you to configure or manage.
46
+
47
+ ## When Should I Use DBOS?
48
+
49
+ You should consider using DBOS if your application needs to **reliably handle failures**.
50
+ For example, you might be building a payments service that must reliably process transactions even if servers crash mid-operation, or a long-running data pipeline that needs to resume seamlessly from checkpoints rather than restart from the beginning when interrupted.
51
+
52
+ Handling failures is costly and complicated, requiring complex state management and recovery logic as well as heavyweight tools like external orchestration services.
53
+ DBOS makes it simpler: annotate your code to checkpoint it in Postgres and automatically recover from any failure.
54
+ DBOS also provides powerful Postgres-backed primitives that makes it easier to write and operate reliable code, including durable queues, notifications, scheduling, event processing, and programmatic workflow management.
55
+
56
+ ## Features
57
+
58
+ <details open><summary><strong>💾 Durable Workflows</strong></summary>
59
+
60
+ ####
61
+
62
+ DBOS workflows make your program **durable** by checkpointing its state in Postgres.
63
+ If your program ever fails, when it restarts all your workflows will automatically resume from the last completed step.
64
+
65
+ You add durable workflows to your existing Python program by annotating ordinary functions as workflows and steps:
66
+
67
+ ```python
68
+ from dbos import DBOS
69
+
70
+ @DBOS.step()
71
+ def step_one():
72
+ ...
73
+
74
+ @DBOS.step()
75
+ def step_two():
76
+ ...
77
+
78
+ @DBOS.workflow()
79
+ def workflow()
80
+ step_one()
81
+ step_two()
82
+ ```
83
+
84
+ Workflows are particularly useful for
85
+
86
+ - Orchestrating business processes so they seamlessly recover from any failure.
87
+ - Building observable and fault-tolerant data pipelines.
88
+ - Operating an AI agent, or any application that relies on unreliable or non-deterministic APIs.
89
+
90
+ [Read more ↗️](https://docs.dbos.dev/python/tutorials/workflow-tutorial)
91
+
92
+ </details>
93
+
94
+ <details><summary><strong>📒 Durable Queues</strong></summary>
95
+
96
+ ####
97
+
98
+ DBOS queues help you **durably** run tasks in the background.
99
+ You can enqueue a task (which can be a single step or an entire workflow) from a durable workflow and one of your processes will pick it up for execution.
100
+ DBOS manages the execution of your tasks: it guarantees that tasks complete, and that their callers get their results without needing to resubmit them, even if your application is interrupted.
101
+
102
+ Queues also provide flow control, so you can limit the concurrency of your tasks on a per-queue or per-process basis.
103
+ You can also set timeouts for tasks, rate limit how often queued tasks are executed, deduplicate tasks, or prioritize tasks.
104
+
105
+ You can add queues to your workflows in just a couple lines of code.
106
+ They don't require a separate queueing service or message broker&mdash;just Postgres.
107
+
108
+ ```python
109
+ from dbos import DBOS, Queue
110
+
111
+ queue = Queue("example_queue")
112
+
113
+ @DBOS.step()
114
+ def process_task(task):
115
+ ...
116
+
117
+ @DBOS.workflow()
118
+ def process_tasks(tasks):
119
+ task_handles = []
120
+ # Enqueue each task so all tasks are processed concurrently.
121
+ for task in tasks:
122
+ handle = queue.enqueue(process_task, task)
123
+ task_handles.append(handle)
124
+ # Wait for each task to complete and retrieve its result.
125
+ # Return the results of all tasks.
126
+ return [handle.get_result() for handle in task_handles]
127
+ ```
128
+
129
+ [Read more ↗️](https://docs.dbos.dev/python/tutorials/queue-tutorial)
130
+
131
+ </details>
132
+
133
+ <details><summary><strong>⚙️ Programmatic Workflow Management</strong></summary>
134
+
135
+ ####
136
+
137
+ Your workflows are stored as rows in a Postgres table, so you have full programmatic control over them.
138
+ Write scripts to query workflow executions, batch pause or resume workflows, or even restart failed workflows from a specific step.
139
+ Handle bugs or failures that affect thousands of workflows with power and flexibility.
140
+
141
+ ```python
142
+ # Create a DBOS client connected to your Postgres database.
143
+ client = DBOSClient(database_url)
144
+ # Find all workflows that errored between 3:00 and 5:00 AM UTC on 2025-04-22.
145
+ workflows = client.list_workflows(status="ERROR",
146
+ start_time="2025-04-22T03:00:00Z", end_time="2025-04-22T05:00:00Z")
147
+ for workflow in workflows:
148
+ # Check which workflows failed due to an outage in a service called from Step 2.
149
+ steps = client.list_workflow_steps(workflow)
150
+ if len(steps) >= 3 and isinstance(steps[2]["error"], ServiceOutage):
151
+ # To recover from the outage, restart those workflows from Step 2.
152
+ DBOS.fork_workflow(workflow.workflow_id, 2)
153
+ ```
154
+
155
+ [Read more ↗️](https://docs.dbos.dev/python/reference/client)
156
+
157
+ </details>
158
+
159
+ <details><summary><strong>🎫 Exactly-Once Event Processing</strong></summary>
160
+
161
+ ####
162
+
163
+ Use DBOS to build reliable webhooks, event listeners, or Kafka consumers by starting a workflow exactly-once in response to an event.
164
+ Acknowledge the event immediately while reliably processing it in the background.
165
+
166
+ For example:
167
+
168
+ ```python
169
+ def handle_message(request: Request) -> None:
170
+ event_id = request.body["event_id"]
171
+ # Use the event ID as an idempotency key to start the workflow exactly-once
172
+ with SetWorkflowID(event_id):
173
+ # Start the workflow in the background, then acknowledge the event
174
+ DBOS.start_workflow(message_workflow, request.body["event"])
175
+ ```
176
+
177
+ Or with Kafka:
178
+
179
+ ```python
180
+ @DBOS.kafka_consumer(config,["alerts-topic"])
181
+ @DBOS.workflow()
182
+ def process_kafka_alerts(msg):
183
+ # This workflow runs exactly-once for each message sent to the topic
184
+ alerts = msg.value.decode()
185
+ for alert in alerts:
186
+ respond_to_alert(alert)
187
+ ```
188
+
189
+ [Read more ↗️](https://docs.dbos.dev/python/tutorials/workflow-tutorial)
190
+
191
+ </details>
192
+
193
+ <details><summary><strong>📅 Durable Scheduling</strong></summary>
194
+
195
+ ####
196
+
197
+ Schedule workflows using cron syntax, or use durable sleep to pause workflows for as long as you like (even days or weeks) before executing.
198
+
199
+ You can schedule a workflow using a single annotation:
200
+
201
+ ```python
202
+ @DBOS.scheduled('* * * * *') # crontab syntax to run once every minute
203
+ @DBOS.workflow()
204
+ def example_scheduled_workflow(scheduled_time: datetime, actual_time: datetime):
205
+ DBOS.logger.info("I am a workflow scheduled to run once a minute.")
206
+ ```
207
+
208
+ You can add a durable sleep to any workflow with a single line of code.
209
+ It stores its wakeup time in Postgres so the workflow sleeps through any interruption or restart, then always resumes on schedule.
210
+
211
+ ```python
212
+ @DBOS.workflow()
213
+ def reminder_workflow(email: str, time_to_sleep: int):
214
+ send_confirmation_email(email)
215
+ DBOS.sleep(time_to_sleep)
216
+ send_reminder_email(email)
217
+ ```
218
+
219
+ [Read more ↗️](https://docs.dbos.dev/python/tutorials/scheduled-workflows)
220
+
221
+ </details>
222
+
223
+ <details><summary><strong>📫 Durable Notifications</strong></summary>
224
+
225
+ ####
226
+
227
+ Pause your workflow executions until a notification is received, or emit events from your workflow to send progress updates to external clients.
228
+ All notifications are stored in Postgres, so they can be sent and received with exactly-once semantics.
229
+ Set durable timeouts when waiting for events, so you can wait for as long as you like (even days or weeks) through interruptions or restarts, then resume once a notification arrives or the timeout is reached.
230
+
231
+ For example, build a reliable billing workflow that durably waits for a notification from a payments service, processing it exactly-once:
232
+
233
+ ```python
234
+ @DBOS.workflow()
235
+ def billing_workflow():
236
+ ... # Calculate the charge, then submit the bill to a payments service
237
+ payment_status = DBOS.recv(PAYMENT_STATUS, timeout=payment_service_timeout)
238
+ if payment_status is not None and payment_status == "paid":
239
+ ... # Handle a successful payment.
240
+ else:
241
+ ... # Handle a failed payment or timeout.
242
+ ```
243
+
244
+ </details>
245
+
246
+
247
+ ## Getting Started
248
+
249
+ To get started, follow the [quickstart](https://docs.dbos.dev/quickstart) to install this open-source library and connect it to a Postgres database.
250
+ Then, check out the [programming guide](https://docs.dbos.dev/python/programming-guide) to learn how to build with durable workflows and queues.
251
+
252
+ ## Documentation
253
+
254
+ [https://docs.dbos.dev](https://docs.dbos.dev)
255
+
256
+ ## Examples
257
+
258
+ [https://docs.dbos.dev/examples](https://docs.dbos.dev/examples)
259
+
260
+ ## DBOS vs. Other Systems
261
+
262
+ <details><summary><strong>DBOS vs. Temporal</strong></summary>
263
+
264
+ ####
265
+
266
+ Both DBOS and Temporal provide durable execution, but DBOS is implemented in a lightweight Postgres-backed library whereas Temporal is implemented in an externally orchestrated server.
267
+
268
+ You can add DBOS to your program by installing this open-source library, connecting it to Postgres, and annotating workflows and steps.
269
+ By contrast, to add Temporal to your program, you must rearchitect your program to move your workflows and steps (activities) to a Temporal worker, configure a Temporal server to orchestrate those workflows, and access your workflows only through a Temporal client.
270
+ [This blog post](https://www.dbos.dev/blog/durable-execution-coding-comparison) makes the comparison in more detail.
271
+
272
+ **When to use DBOS:** You need to add durable workflows to your applications with minimal rearchitecting, or you are using Postgres.
273
+
274
+ **When to use Temporal:** You don't want to add Postgres to your stack, or you need a language DBOS doesn't support yet.
275
+
276
+ </details>
277
+
278
+ <details><summary><strong>DBOS vs. Airflow</strong></summary>
279
+
280
+ ####
281
+
282
+ DBOS and Airflow both provide workflow abstractions.
283
+ Airflow is targeted at data science use cases, providing many out-of-the-box connectors but requiring workflows be written as explicit DAGs and externally orchestrating them from an Airflow cluster.
284
+ Airflow is designed for batch operations and does not provide good performance for streaming or real-time use cases.
285
+ DBOS is general-purpose, but is often used for data pipelines, allowing developers to write workflows as code and requiring no infrastructure except Postgres.
286
+
287
+ **When to use DBOS:** You need the flexibility of writing workflows as code, or you need higher performance than Airflow is capable of (particularly for streaming or real-time use cases).
288
+
289
+ **When to use Airflow:** You need Airflow's ecosystem of connectors.
290
+
291
+ </details>
292
+
293
+ <details><summary><strong>DBOS vs. Celery/BullMQ</strong></summary>
294
+
295
+ ####
296
+
297
+ DBOS provides a similar queue abstraction to dedicated queueing systems like Celery or BullMQ: you can declare queues, submit tasks to them, and control their flow with concurrency limits, rate limits, timeouts, prioritization, etc.
298
+ However, DBOS queues are **durable and Postgres-backed** and integrate with durable workflows.
299
+ For example, in DBOS you can write a durable workflow that enqueues a thousand tasks and waits for their results.
300
+ DBOS checkpoints the workflow and each of its tasks in Postgres, guaranteeing that even if failures or interruptions occur, the tasks will complete and the workflow will collect their results.
301
+ By contrast, Celery/BullMQ are Redis-backed and don't provide workflows, so they provide fewer guarantees but better performance.
302
+
303
+ **When to use DBOS:** You need the reliability of enqueueing tasks from durable workflows.
304
+
305
+ **When to use Celery/BullMQ**: You don't need durability, or you need very high throughput beyond what your Postgres server can support.
306
+ </details>
307
+
308
+ ## Community
309
+
310
+ If you want to ask questions or hang out with the community, join us on [Discord](https://discord.gg/fMwQjeW5zg)!
311
+ If you see a bug or have a feature request, don't hesitate to open an issue here on GitHub.
312
+ If you're interested in contributing, check out our [contributions guide](./CONTRIBUTING.md).
@@ -1,19 +1,19 @@
1
- dbos-0.27.0a11.dist-info/METADATA,sha256=yw_xz14PUHXYfgd-u1bEU0qU0jmm7nmxV2m5yrCGMZo,5554
2
- dbos-0.27.0a11.dist-info/WHEEL,sha256=tSfRZzRHthuv7vxpI4aehrdN9scLjk-dCJkPLzkHxGg,90
3
- dbos-0.27.0a11.dist-info/entry_points.txt,sha256=_QOQ3tVfEjtjBlr1jS4sHqHya9lI2aIEIWkz8dqYp14,58
4
- dbos-0.27.0a11.dist-info/licenses/LICENSE,sha256=VGZit_a5-kdw9WT6fY5jxAWVwGQzgLFyPWrcVVUhVNU,1067
1
+ dbos-0.28.0a4.dist-info/METADATA,sha256=KyMx9FTut8a9dhtuvbD5FingAQayckrfPUj4wjrGCZQ,13268
2
+ dbos-0.28.0a4.dist-info/WHEEL,sha256=tSfRZzRHthuv7vxpI4aehrdN9scLjk-dCJkPLzkHxGg,90
3
+ dbos-0.28.0a4.dist-info/entry_points.txt,sha256=_QOQ3tVfEjtjBlr1jS4sHqHya9lI2aIEIWkz8dqYp14,58
4
+ dbos-0.28.0a4.dist-info/licenses/LICENSE,sha256=VGZit_a5-kdw9WT6fY5jxAWVwGQzgLFyPWrcVVUhVNU,1067
5
5
  dbos/__init__.py,sha256=-FdBlOlr-f2tY__C23J4v22MoCAXqcDN_-zXsJXdoZ0,1005
6
6
  dbos/__main__.py,sha256=G7Exn-MhGrVJVDbgNlpzhfh8WMX_72t3_oJaFT9Lmt8,653
7
7
  dbos/_admin_server.py,sha256=NG0JWQQer9kEslPNAA0dBv-O262sjarz7ZSlv8yird0,9053
8
8
  dbos/_app_db.py,sha256=3j8_5-MlSDY0otLRszFE-GfenU6JC20fcfSL-drSNYk,11800
9
9
  dbos/_classproperty.py,sha256=f0X-_BySzn3yFDRKB2JpCbLYQ9tLwt1XftfshvY7CBs,626
10
- dbos/_client.py,sha256=Id-jzAUH6JMN-9WmAGyo0vm-nc0URjNIVwA2iKnCN5Q,13418
10
+ dbos/_client.py,sha256=Cn-feSnOcuDNnBfP0UBvb3xUWSmaLuoSc8CKQAoDwP8,13931
11
11
  dbos/_conductor/conductor.py,sha256=HYzVL29IMMrs2Mnms_7cHJynCnmmEN5SDQOMjzn3UoU,16840
12
12
  dbos/_conductor/protocol.py,sha256=zEKIuOQdIaSduNqfZKpo8PSD9_1oNpKIPnBNCu3RUyE,6681
13
- dbos/_context.py,sha256=5aJHOjh6-2Zc7Fwzw924Vg0utLEkaR-oBMRdz3cE95k,23680
14
- dbos/_core.py,sha256=7zhdO-VfZe84wgOzBVsliqO-BI20OzcLTFqvrGyxttw,48425
13
+ dbos/_context.py,sha256=fX_lWKx2_bHNTigVkuDtjDfX7BIeCsdDAQ1u47A2_Fo,24590
14
+ dbos/_core.py,sha256=1hDRqEmm77GUJVY1xCEzYsUP_HzLGtflI_v3TCxkQuI,48569
15
15
  dbos/_croniter.py,sha256=XHAyUyibs_59sJQfSNWkP7rqQY6_XrlfuuCxk4jYqek,47559
16
- dbos/_dbos.py,sha256=ENDQ6Xi4MoKrjXoCRlk1B64yZP7D-MyDUjUlOTRsw9I,48314
16
+ dbos/_dbos.py,sha256=f0SnnWL-koLgohj8IaV6l5-tKDtnKpJgAYyel-SwYaE,48346
17
17
  dbos/_dbos_config.py,sha256=L0Z0OOB5FoPM9g-joZqXGeJnlxWQsEUtgPtgtg9Uf48,21732
18
18
  dbos/_debug.py,sha256=MNlQVZ6TscGCRQeEEL0VE8Uignvr6dPeDDDefS3xgIE,1823
19
19
  dbos/_docker_pg_helper.py,sha256=NmcgqmR5rQA_4igfeqh8ugNT2z3YmoOvuep_MEtxTiY,5854
@@ -31,6 +31,7 @@ dbos/_migrations/versions/27ac6900c6ad_add_queue_dedup.py,sha256=56w1v6TdofW3V18
31
31
  dbos/_migrations/versions/50f3227f0b4b_fix_job_queue.py,sha256=ZBYrtTdxy64HxIAlOes89fVIk2P1gNaJack7wuC_epg,873
32
32
  dbos/_migrations/versions/5c361fc04708_added_system_tables.py,sha256=Xr9hBDJjkAtymlauOmAy00yUHj0VVUaEz7kNwEM9IwE,6403
33
33
  dbos/_migrations/versions/83f3732ae8e7_workflow_timeout.py,sha256=Q_R35pb8AfVI3sg5mzKwyoPfYB88Ychcc8gwxpM9R7A,1035
34
+ dbos/_migrations/versions/933e86bdac6a_add_queue_priority.py,sha256=yZX2kGF33skpXIBdMXtDNx-Nl_orFatKeHB8c-3K8-c,773
34
35
  dbos/_migrations/versions/a3b18ad34abe_added_triggers.py,sha256=Rv0ZsZYZ_WdgGEULYsPfnp4YzaO5L198gDTgYY39AVA,2022
35
36
  dbos/_migrations/versions/d76646551a6b_job_queue_limiter.py,sha256=8PyFi8rd6CN-mUro43wGhsg5wcQWKZPRHD6jw8R5pVc,986
36
37
  dbos/_migrations/versions/d76646551a6c_workflow_queue.py,sha256=G942nophZ2uC2vc4hGBC02Ptng1715roTjY3xiyzZU4,729
@@ -45,9 +46,9 @@ dbos/_roles.py,sha256=iOsgmIAf1XVzxs3gYWdGRe1B880YfOw5fpU7Jwx8_A8,2271
45
46
  dbos/_scheduler.py,sha256=SR1oRZRcVzYsj-JauV2LA8JtwTkt8mru7qf6H1AzQ1U,2027
46
47
  dbos/_schemas/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
47
48
  dbos/_schemas/application_database.py,sha256=SypAS9l9EsaBHFn9FR8jmnqt01M74d9AF1AMa4m2hhI,1040
48
- dbos/_schemas/system_database.py,sha256=wLqrhApNqrwZC1SdUxi_ca0y_66WzKaaBOxvND4_bdg,5738
49
+ dbos/_schemas/system_database.py,sha256=VVjMUfxPwFMdZPBazNkO00ufG6SgL9OCybLcwC9CiVk,5883
49
50
  dbos/_serialization.py,sha256=bWuwhXSQcGmiazvhJHA5gwhrRWxtmFmcCFQSDJnqqkU,3666
50
- dbos/_sys_db.py,sha256=caIbhOwAnfugGzhnJ5rOG2V_bXphD9tJ4Un37gnG47A,82281
51
+ dbos/_sys_db.py,sha256=r9a3RA9hUogRJ9v3gNol_CjTFKjRhIO9aFaXXLc7G_Y,82820
51
52
  dbos/_templates/dbos-db-starter/README.md,sha256=GhxhBj42wjTt1fWEtwNriHbJuKb66Vzu89G4pxNHw2g,930
52
53
  dbos/_templates/dbos-db-starter/__package/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
53
54
  dbos/_templates/dbos-db-starter/__package/main.py,sha256=nJMN3ZD2lmwg4Dcgmiwqc-tQGuCJuJal2Xl85iA277U,2453
@@ -59,12 +60,12 @@ dbos/_templates/dbos-db-starter/migrations/script.py.mako,sha256=MEqL-2qATlST9TA
59
60
  dbos/_templates/dbos-db-starter/migrations/versions/2024_07_31_180642_init.py,sha256=MpS7LGaJS0CpvsjhfDkp9EJqvMvVCjRPfUp4c0aE2ys,941
60
61
  dbos/_templates/dbos-db-starter/start_postgres_docker.py,sha256=lQVLlYO5YkhGPEgPqwGc7Y8uDKse9HsWv5fynJEFJHM,1681
61
62
  dbos/_tracer.py,sha256=yN6GRDKu_1p-EqtQLNarMocPfga2ZuqpzStzzSPYhzo,2732
62
- dbos/_utils.py,sha256=nFRUHzVjXG5AusF85AlYHikj63Tzi-kQm992ihsrAxA,201
63
+ dbos/_utils.py,sha256=pu8qvhZ__44LRCxO4xSb6OuyTuIV4DP-yWT22p2IuVM,477
63
64
  dbos/_workflow_commands.py,sha256=7_f8-w0MbS1gqC5v68EwzbUtomVM0lLebozpHxXmRYg,3982
64
65
  dbos/cli/_github_init.py,sha256=Y_bDF9gfO2jB1id4FV5h1oIxEJRWyqVjhb7bNEa5nQ0,3224
65
66
  dbos/cli/_template_init.py,sha256=-WW3kbq0W_Tq4WbMqb1UGJG3xvJb3woEY5VspG95Srk,2857
66
- dbos/cli/cli.py,sha256=a3rUrHog5-e22KjjUPOuTjH20PmUgSP0amRpMd6LVJE,18882
67
+ dbos/cli/cli.py,sha256=gXKELYAK9_CTejQ-WbNEIqnEYByJndXHDYSX4naFg8g,19106
67
68
  dbos/dbos-config.schema.json,sha256=8KcwJb_sQc4-6tQG2TLmjE_nratfrQa0qVLl9XPsvWE,6367
68
69
  dbos/py.typed,sha256=QfzXT1Ktfk3Rj84akygc7_42z0lRpCq0Ilh8OXI6Zas,44
69
70
  version/__init__.py,sha256=L4sNxecRuqdtSFdpUGX3TtBi9KL3k7YsZVIvv-fv9-A,1678
70
- dbos-0.27.0a11.dist-info/RECORD,,
71
+ dbos-0.28.0a4.dist-info/RECORD,,
@@ -1,145 +0,0 @@
1
- Metadata-Version: 2.1
2
- Name: dbos
3
- Version: 0.27.0a11
4
- Summary: Ultra-lightweight durable execution in Python
5
- Author-Email: "DBOS, Inc." <contact@dbos.dev>
6
- License: MIT
7
- Requires-Python: >=3.9
8
- Requires-Dist: pyyaml>=6.0.2
9
- Requires-Dist: jsonschema>=4.23.0
10
- Requires-Dist: alembic>=1.13.3
11
- Requires-Dist: typing-extensions>=4.12.2; python_version < "3.10"
12
- Requires-Dist: typer>=0.12.5
13
- Requires-Dist: jsonpickle>=3.3.0
14
- Requires-Dist: opentelemetry-api>=1.27.0
15
- Requires-Dist: opentelemetry-sdk>=1.27.0
16
- Requires-Dist: opentelemetry-exporter-otlp-proto-http>=1.27.0
17
- Requires-Dist: python-dateutil>=2.9.0.post0
18
- Requires-Dist: fastapi[standard]>=0.115.2
19
- Requires-Dist: tomlkit>=0.13.2
20
- Requires-Dist: psycopg[binary]>=3.1
21
- Requires-Dist: docker>=7.1.0
22
- Requires-Dist: cryptography>=43.0.3
23
- Requires-Dist: rich>=13.9.4
24
- Requires-Dist: pyjwt>=2.10.1
25
- Requires-Dist: websockets>=15.0
26
- Requires-Dist: pyright>=1.1.398
27
- Description-Content-Type: text/markdown
28
-
29
-
30
- <div align="center">
31
-
32
- # DBOS Transact: A Lightweight Durable Execution Library Built on Postgres
33
-
34
- #### [Documentation](https://docs.dbos.dev/) &nbsp;&nbsp;•&nbsp;&nbsp; [Examples](https://docs.dbos.dev/examples) &nbsp;&nbsp;•&nbsp;&nbsp; [Github](https://github.com/dbos-inc) &nbsp;&nbsp;•&nbsp;&nbsp; [Discord](https://discord.com/invite/jsmC6pXGgX)
35
- </div>
36
-
37
- ---
38
-
39
- DBOS Transact is a Python library for **ultra-lightweight durable execution**.
40
- For example:
41
-
42
- ```python
43
- @DBOS.step()
44
- def step_one():
45
- ...
46
-
47
- @DBOS.step()
48
- def step_two():
49
- ...
50
-
51
- @DBOS.workflow()
52
- def workflow()
53
- step_one()
54
- step_two()
55
- ```
56
-
57
- Durable execution means your program is **resilient to any failure**.
58
- If it is ever interrupted or crashes, all your workflows will automatically resume from the last completed step.
59
- Durable execution helps solve many common problems:
60
-
61
- - Orchestrating long-running or business-critical workflows so they seamlessly recover from any failure.
62
- - Running reliable background jobs with no timeouts.
63
- - Processing incoming events (e.g. from Kafka) exactly once.
64
- - Running a fault-tolerant distributed task queue.
65
- - Running a reliable cron scheduler.
66
- - Operating an AI agent, or anything that connects to an unreliable or non-deterministic API.
67
-
68
- What’s unique about DBOS's implementation of durable execution is that it’s implemented in a **lightweight library** that’s **totally backed by Postgres**.
69
- To use DBOS, just `pip install` it and annotate your program with DBOS decorators.
70
- Under the hood, those decorators store your program's execution state (which workflows are currently executing and which steps they've completed) in a Postgres database.
71
- If your program crashes or is interrupted, they automatically recover its workflows from their stored state.
72
- So all you need to use DBOS is Postgres&mdash;there are no other dependencies you have to manage, no separate workflow server.
73
-
74
- One big advantage of this approach is that you can add DBOS to **any** Python application&mdash;**it’s just a library**.
75
- You can use DBOS to add reliable background jobs or cron scheduling or queues to your app with no external dependencies except Postgres.
76
-
77
- ## Getting Started
78
-
79
- Install and configure with:
80
-
81
- ```shell
82
- python3 -m venv dbos-example/.venv
83
- cd dbos-example
84
- source .venv/bin/activate
85
- pip install dbos
86
- dbos init --config
87
- ```
88
-
89
- Then, try it out with this simple program:
90
-
91
- ```python
92
- from fastapi import FastAPI
93
- from dbos import DBOS
94
-
95
- app = FastAPI()
96
- DBOS(fastapi=app)
97
-
98
- @DBOS.step()
99
- def step_one():
100
- print("Step one completed!")
101
-
102
- @DBOS.step()
103
- def step_two():
104
- print("Step two completed!")
105
-
106
- @DBOS.workflow()
107
- def dbos_workflow():
108
- step_one()
109
- for _ in range(5):
110
- print("Press Control + C twice to stop the app...")
111
- DBOS.sleep(1)
112
- step_two()
113
-
114
- @app.get("/")
115
- def fastapi_endpoint():
116
- dbos_workflow()
117
- ```
118
-
119
- Save the program into `main.py` and start it with `fastapi run`.
120
- Visit `localhost:8000` in your browser to start the workflow.
121
- When prompted, press `Control + C` (You may need to press `Control + C` twice quickly, or press `Control + \`, if `Control + C` is not effective in your environment) to force quit your application.
122
- It should crash midway through the workflow, having completed step one but not step two.
123
- Then, restart your app with `fastapi run`.
124
- It should resume the workflow from where it left off, completing step two without re-executing step one.
125
-
126
- To learn how to build more complex workflows, see the [programming guide](https://docs.dbos.dev/python/programming-guide) or [examples](https://docs.dbos.dev/examples).
127
-
128
- ## Documentation
129
-
130
- [https://docs.dbos.dev](https://docs.dbos.dev)
131
-
132
- ## Examples
133
-
134
-
135
- - [**AI-Powered Slackbot**](https://docs.dbos.dev/python/examples/rag-slackbot) &mdash; A Slackbot that answers questions about previous Slack conversations, using DBOS to durably orchestrate its RAG pipeline.
136
- - [**Widget Store**](https://docs.dbos.dev/python/examples/widget-store) &mdash; An online storefront that uses DBOS durable workflows to be resilient to any failure.
137
- - [**Scheduled Reminders**](https://docs.dbos.dev/python/examples/scheduled-reminders) &mdash; In just three lines of code, schedule an email to send days, weeks, or months in the future.
138
-
139
- More examples [here](https://docs.dbos.dev/examples)!
140
-
141
- ## Community
142
-
143
- If you're interested in building with us, please star our repository and join our community on [Discord](https://discord.gg/fMwQjeW5zg)!
144
- If you see a bug or have a feature request, don't hesitate to open an issue here on GitHub.
145
- If you're interested in contributing, check out our [contributions guide](./CONTRIBUTING.md).