dbos 2.2.0a2__py3-none-any.whl → 2.2.0a3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
dbos/_client.py CHANGED
@@ -62,6 +62,7 @@ class EnqueueOptions(_EnqueueOptionsRequired, total=False):
62
62
  deduplication_id: str
63
63
  priority: int
64
64
  max_recovery_attempts: int
65
+ queue_partition_key: str
65
66
 
66
67
 
67
68
  def validate_enqueue_options(options: EnqueueOptions) -> None:
@@ -185,6 +186,7 @@ class DBOSClient:
185
186
  "deduplication_id": options.get("deduplication_id"),
186
187
  "priority": options.get("priority"),
187
188
  "app_version": options.get("app_version"),
189
+ "queue_partition_key": options.get("queue_partition_key"),
188
190
  }
189
191
 
190
192
  inputs: WorkflowInputs = {
@@ -221,6 +223,7 @@ class DBOSClient:
221
223
  else 0
222
224
  ),
223
225
  "inputs": self._serializer.serialize(inputs),
226
+ "queue_partition_key": enqueue_options_internal["queue_partition_key"],
224
227
  }
225
228
 
226
229
  self._sys_db.init_workflow(
@@ -286,6 +289,7 @@ class DBOSClient:
286
289
  "deduplication_id": None,
287
290
  "priority": 0,
288
291
  "inputs": self._serializer.serialize({"args": (), "kwargs": {}}),
292
+ "queue_partition_key": None,
289
293
  }
290
294
  with self._sys_db.engine.begin() as conn:
291
295
  self._sys_db._insert_workflow_status(
dbos/_context.py CHANGED
@@ -120,6 +120,8 @@ class DBOSContext:
120
120
  self.deduplication_id: Optional[str] = None
121
121
  # A user-specified priority for the enqueuing workflow.
122
122
  self.priority: Optional[int] = None
123
+ # If the workflow is enqueued on a partitioned queue, its partition key
124
+ self.queue_partition_key: Optional[str] = None
123
125
 
124
126
  def create_child(self) -> DBOSContext:
125
127
  rv = DBOSContext()
@@ -479,6 +481,7 @@ class SetEnqueueOptions:
479
481
  deduplication_id: Optional[str] = None,
480
482
  priority: Optional[int] = None,
481
483
  app_version: Optional[str] = None,
484
+ queue_partition_key: Optional[str] = None,
482
485
  ) -> None:
483
486
  self.created_ctx = False
484
487
  self.deduplication_id: Optional[str] = deduplication_id
@@ -491,6 +494,8 @@ class SetEnqueueOptions:
491
494
  self.saved_priority: Optional[int] = None
492
495
  self.app_version: Optional[str] = app_version
493
496
  self.saved_app_version: Optional[str] = None
497
+ self.queue_partition_key = queue_partition_key
498
+ self.saved_queue_partition_key: Optional[str] = None
494
499
 
495
500
  def __enter__(self) -> SetEnqueueOptions:
496
501
  # Code to create a basic context
@@ -505,6 +510,8 @@ class SetEnqueueOptions:
505
510
  ctx.priority = self.priority
506
511
  self.saved_app_version = ctx.app_version
507
512
  ctx.app_version = self.app_version
513
+ self.saved_queue_partition_key = ctx.queue_partition_key
514
+ ctx.queue_partition_key = self.queue_partition_key
508
515
  return self
509
516
 
510
517
  def __exit__(
@@ -517,6 +524,7 @@ class SetEnqueueOptions:
517
524
  curr_ctx.deduplication_id = self.saved_deduplication_id
518
525
  curr_ctx.priority = self.saved_priority
519
526
  curr_ctx.app_version = self.saved_app_version
527
+ curr_ctx.queue_partition_key = self.saved_queue_partition_key
520
528
  # Code to clean up the basic context if we created it
521
529
  if self.created_ctx:
522
530
  _clear_local_dbos_context()
dbos/_core.py CHANGED
@@ -303,6 +303,11 @@ def _init_workflow(
303
303
  else 0
304
304
  ),
305
305
  "inputs": dbos._serializer.serialize(inputs),
306
+ "queue_partition_key": (
307
+ enqueue_options["queue_partition_key"]
308
+ if enqueue_options is not None
309
+ else None
310
+ ),
306
311
  }
307
312
 
308
313
  # Synchronously record the status and inputs for workflows
@@ -571,6 +576,9 @@ def start_workflow(
571
576
  deduplication_id=local_ctx.deduplication_id if local_ctx is not None else None,
572
577
  priority=local_ctx.priority if local_ctx is not None else None,
573
578
  app_version=local_ctx.app_version if local_ctx is not None else None,
579
+ queue_partition_key=(
580
+ local_ctx.queue_partition_key if local_ctx is not None else None
581
+ ),
574
582
  )
575
583
  new_wf_id, new_wf_ctx = _get_new_wf()
576
584
 
@@ -664,6 +672,9 @@ async def start_workflow_async(
664
672
  deduplication_id=local_ctx.deduplication_id if local_ctx is not None else None,
665
673
  priority=local_ctx.priority if local_ctx is not None else None,
666
674
  app_version=local_ctx.app_version if local_ctx is not None else None,
675
+ queue_partition_key=(
676
+ local_ctx.queue_partition_key if local_ctx is not None else None
677
+ ),
667
678
  )
668
679
  new_wf_id, new_wf_ctx = _get_new_wf()
669
680
 
dbos/_migration.py CHANGED
@@ -203,8 +203,14 @@ CREATE TABLE \"{schema}\".event_dispatch_kv (
203
203
  """
204
204
 
205
205
 
206
+ def get_dbos_migration_two(schema: str) -> str:
207
+ return f"""
208
+ ALTER TABLE \"{schema}\".workflow_status ADD COLUMN queue_partition_key TEXT;
209
+ """
210
+
211
+
206
212
  def get_dbos_migrations(schema: str) -> list[str]:
207
- return [get_dbos_migration_one(schema)]
213
+ return [get_dbos_migration_one(schema), get_dbos_migration_two(schema)]
208
214
 
209
215
 
210
216
  def get_sqlite_timestamp_expr() -> str:
@@ -293,4 +299,8 @@ CREATE TABLE streams (
293
299
  );
294
300
  """
295
301
 
296
- sqlite_migrations = [sqlite_migration_one]
302
+ sqlite_migration_two = """
303
+ ALTER TABLE workflow_status ADD COLUMN queue_partition_key TEXT;
304
+ """
305
+
306
+ sqlite_migrations = [sqlite_migration_one, sqlite_migration_two]
dbos/_queue.py CHANGED
@@ -43,6 +43,7 @@ class Queue:
43
43
  *, # Disable positional arguments from here on
44
44
  worker_concurrency: Optional[int] = None,
45
45
  priority_enabled: bool = False,
46
+ partition_queue: bool = False,
46
47
  ) -> None:
47
48
  if (
48
49
  worker_concurrency is not None
@@ -57,6 +58,7 @@ class Queue:
57
58
  self.worker_concurrency = worker_concurrency
58
59
  self.limiter = limiter
59
60
  self.priority_enabled = priority_enabled
61
+ self.partition_queue = partition_queue
60
62
  from ._dbos import _get_or_create_dbos_registry
61
63
 
62
64
  registry = _get_or_create_dbos_registry()
@@ -78,6 +80,18 @@ class Queue:
78
80
  raise Exception(
79
81
  f"Priority is not enabled for queue {self.name}. Setting priority will not have any effect."
80
82
  )
83
+ if self.partition_queue and (
84
+ context is None or context.queue_partition_key is None
85
+ ):
86
+ raise Exception(
87
+ f"A workflow cannot be enqueued on partitioned queue {self.name} without a partition key"
88
+ )
89
+ if context and context.queue_partition_key and not self.partition_queue:
90
+ raise Exception(
91
+ f"You can only use a partition key on a partition-enabled queue. Key {context.queue_partition_key} was used with non-partitioned queue {self.name}"
92
+ )
93
+ if context and context.queue_partition_key and context.deduplication_id:
94
+ raise Exception("Deduplication is not supported for partitioned queues")
81
95
 
82
96
  dbos = _get_dbos_instance()
83
97
  return start_workflow(dbos, func, self.name, False, *args, **kwargs)
@@ -105,10 +119,21 @@ def queue_thread(stop_event: threading.Event, dbos: "DBOS") -> None:
105
119
  queues = dict(dbos._registry.queue_info_map)
106
120
  for _, queue in queues.items():
107
121
  try:
108
- wf_ids = dbos._sys_db.start_queued_workflows(
109
- queue, GlobalParams.executor_id, GlobalParams.app_version
110
- )
111
- for id in wf_ids:
122
+ if queue.partition_queue:
123
+ dequeued_workflows = []
124
+ queue_partition_keys = dbos._sys_db.get_queue_partitions(queue.name)
125
+ for key in queue_partition_keys:
126
+ dequeued_workflows += dbos._sys_db.start_queued_workflows(
127
+ queue,
128
+ GlobalParams.executor_id,
129
+ GlobalParams.app_version,
130
+ key,
131
+ )
132
+ else:
133
+ dequeued_workflows = dbos._sys_db.start_queued_workflows(
134
+ queue, GlobalParams.executor_id, GlobalParams.app_version, None
135
+ )
136
+ for id in dequeued_workflows:
112
137
  execute_workflow_by_id(dbos, id)
113
138
  except OperationalError as e:
114
139
  if isinstance(
@@ -77,6 +77,7 @@ class SystemSchema:
77
77
  Column("deduplication_id", Text(), nullable=True),
78
78
  Column("inputs", Text()),
79
79
  Column("priority", Integer(), nullable=False, server_default=text("'0'::int")),
80
+ Column("queue_partition_key", Text()),
80
81
  Index("workflow_status_created_at_index", "created_at"),
81
82
  Index("workflow_status_executor_id_index", "executor_id"),
82
83
  Index("workflow_status_status_index", "status"),
dbos/_sys_db.py CHANGED
@@ -152,6 +152,8 @@ class WorkflowStatusInternal(TypedDict):
152
152
  priority: int
153
153
  # Serialized workflow inputs
154
154
  inputs: str
155
+ # If this workflow is enqueued on a partitioned queue, its partition key
156
+ queue_partition_key: Optional[str]
155
157
 
156
158
 
157
159
  class EnqueueOptionsInternal(TypedDict):
@@ -161,6 +163,8 @@ class EnqueueOptionsInternal(TypedDict):
161
163
  priority: Optional[int]
162
164
  # On what version the workflow is enqueued. Current version if not specified.
163
165
  app_version: Optional[str]
166
+ # If the workflow is enqueued on a partitioned queue, its partition key
167
+ queue_partition_key: Optional[str]
164
168
 
165
169
 
166
170
  class RecordedResult(TypedDict):
@@ -490,6 +494,7 @@ class SystemDatabase(ABC):
490
494
  deduplication_id=status["deduplication_id"],
491
495
  priority=status["priority"],
492
496
  inputs=status["inputs"],
497
+ queue_partition_key=status["queue_partition_key"],
493
498
  )
494
499
  .on_conflict_do_update(
495
500
  index_elements=["workflow_uuid"],
@@ -761,6 +766,7 @@ class SystemDatabase(ABC):
761
766
  SystemSchema.workflow_status.c.deduplication_id,
762
767
  SystemSchema.workflow_status.c.priority,
763
768
  SystemSchema.workflow_status.c.inputs,
769
+ SystemSchema.workflow_status.c.queue_partition_key,
764
770
  ).where(SystemSchema.workflow_status.c.workflow_uuid == workflow_uuid)
765
771
  ).fetchone()
766
772
  if row is None:
@@ -788,6 +794,7 @@ class SystemDatabase(ABC):
788
794
  "deduplication_id": row[16],
789
795
  "priority": row[17],
790
796
  "inputs": row[18],
797
+ "queue_partition_key": row[19],
791
798
  }
792
799
  return status
793
800
 
@@ -1714,8 +1721,41 @@ class SystemDatabase(ABC):
1714
1721
  )
1715
1722
  return value
1716
1723
 
1724
+ @db_retry()
1725
+ def get_queue_partitions(self, queue_name: str) -> List[str]:
1726
+ """
1727
+ Get all unique partition names associated with a queue for ENQUEUED workflows.
1728
+
1729
+ Args:
1730
+ queue_name: The name of the queue to get partitions for
1731
+
1732
+ Returns:
1733
+ A list of unique partition names for the queue
1734
+ """
1735
+ with self.engine.begin() as c:
1736
+ query = (
1737
+ sa.select(SystemSchema.workflow_status.c.queue_partition_key)
1738
+ .distinct()
1739
+ .where(SystemSchema.workflow_status.c.queue_name == queue_name)
1740
+ .where(
1741
+ SystemSchema.workflow_status.c.status.in_(
1742
+ [
1743
+ WorkflowStatusString.ENQUEUED.value,
1744
+ ]
1745
+ )
1746
+ )
1747
+ .where(SystemSchema.workflow_status.c.queue_partition_key.isnot(None))
1748
+ )
1749
+
1750
+ rows = c.execute(query).fetchall()
1751
+ return [row[0] for row in rows]
1752
+
1717
1753
  def start_queued_workflows(
1718
- self, queue: "Queue", executor_id: str, app_version: str
1754
+ self,
1755
+ queue: "Queue",
1756
+ executor_id: str,
1757
+ app_version: str,
1758
+ queue_partition_key: Optional[str],
1719
1759
  ) -> List[str]:
1720
1760
  if self._debug_mode:
1721
1761
  return []
@@ -1734,6 +1774,10 @@ class SystemDatabase(ABC):
1734
1774
  sa.select(sa.func.count())
1735
1775
  .select_from(SystemSchema.workflow_status)
1736
1776
  .where(SystemSchema.workflow_status.c.queue_name == queue.name)
1777
+ .where(
1778
+ SystemSchema.workflow_status.c.queue_partition_key
1779
+ == queue_partition_key
1780
+ )
1737
1781
  .where(
1738
1782
  SystemSchema.workflow_status.c.status
1739
1783
  != WorkflowStatusString.ENQUEUED.value
@@ -1758,6 +1802,10 @@ class SystemDatabase(ABC):
1758
1802
  )
1759
1803
  .select_from(SystemSchema.workflow_status)
1760
1804
  .where(SystemSchema.workflow_status.c.queue_name == queue.name)
1805
+ .where(
1806
+ SystemSchema.workflow_status.c.queue_partition_key
1807
+ == queue_partition_key
1808
+ )
1761
1809
  .where(
1762
1810
  SystemSchema.workflow_status.c.status
1763
1811
  == WorkflowStatusString.PENDING.value
@@ -1799,6 +1847,10 @@ class SystemDatabase(ABC):
1799
1847
  )
1800
1848
  .select_from(SystemSchema.workflow_status)
1801
1849
  .where(SystemSchema.workflow_status.c.queue_name == queue.name)
1850
+ .where(
1851
+ SystemSchema.workflow_status.c.queue_partition_key
1852
+ == queue_partition_key
1853
+ )
1802
1854
  .where(
1803
1855
  SystemSchema.workflow_status.c.status
1804
1856
  == WorkflowStatusString.ENQUEUED.value
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: dbos
3
- Version: 2.2.0a2
3
+ Version: 2.2.0a3
4
4
  Summary: Ultra-lightweight durable execution in Python
5
5
  Author-Email: "DBOS, Inc." <contact@dbos.dev>
6
6
  License: MIT
@@ -1,17 +1,17 @@
1
- dbos-2.2.0a2.dist-info/METADATA,sha256=N8ToPPsy7wCWFBPg5BbVEM_XYJ82Q5LFEFOu7WdlwIU,14532
2
- dbos-2.2.0a2.dist-info/WHEEL,sha256=9P2ygRxDrTJz3gsagc0Z96ukrxjr-LFBGOgv3AuKlCA,90
3
- dbos-2.2.0a2.dist-info/entry_points.txt,sha256=_QOQ3tVfEjtjBlr1jS4sHqHya9lI2aIEIWkz8dqYp14,58
4
- dbos-2.2.0a2.dist-info/licenses/LICENSE,sha256=VGZit_a5-kdw9WT6fY5jxAWVwGQzgLFyPWrcVVUhVNU,1067
1
+ dbos-2.2.0a3.dist-info/METADATA,sha256=BIvdTxFdjJo9CSVgU7cplDHr5pR0p74ZuFoUbm3EP9c,14532
2
+ dbos-2.2.0a3.dist-info/WHEEL,sha256=9P2ygRxDrTJz3gsagc0Z96ukrxjr-LFBGOgv3AuKlCA,90
3
+ dbos-2.2.0a3.dist-info/entry_points.txt,sha256=_QOQ3tVfEjtjBlr1jS4sHqHya9lI2aIEIWkz8dqYp14,58
4
+ dbos-2.2.0a3.dist-info/licenses/LICENSE,sha256=VGZit_a5-kdw9WT6fY5jxAWVwGQzgLFyPWrcVVUhVNU,1067
5
5
  dbos/__init__.py,sha256=M7FdFSBGhcvaLIXrNw_0eR68ijwMWV7_UEyimHMP_F4,1039
6
6
  dbos/__main__.py,sha256=G7Exn-MhGrVJVDbgNlpzhfh8WMX_72t3_oJaFT9Lmt8,653
7
7
  dbos/_admin_server.py,sha256=hubQJw5T8zGKCPNS6FQTXy8jQ8GTJxoYQaDTMlICl9k,16267
8
8
  dbos/_app_db.py,sha256=mvWQ66ebdbiD9fpGKHZBWNVEza6Ulo1D-3UoTB_LwRc,16378
9
9
  dbos/_classproperty.py,sha256=f0X-_BySzn3yFDRKB2JpCbLYQ9tLwt1XftfshvY7CBs,626
10
- dbos/_client.py,sha256=l15aNXTnNQrjmchBNvJypdwdc53dRlMQlUF40iOqDWo,19092
10
+ dbos/_client.py,sha256=oUpIYcUsWHSH_w1fXQ4ZvbtmgnWZNgDmMj7UqUC0iRA,19317
11
11
  dbos/_conductor/conductor.py,sha256=3E_hL3c9g9yWqKZkvI6KA0-ZzPMPRo06TOzT1esMiek,24114
12
12
  dbos/_conductor/protocol.py,sha256=q3rgLxINFtWFigdOONc-4gX4vn66UmMlJQD6Kj8LnL4,7420
13
- dbos/_context.py,sha256=cJDxVbswTLXKE5MV4Hmg6gpIX3Dd5mBTG-4lmofWP9E,27668
14
- dbos/_core.py,sha256=ThzKR7LCqLlR4eNkoGKrssddM9iVJC0UfshmDbx1TEA,50728
13
+ dbos/_context.py,sha256=XKllmsDR_oMcWOuZnoe1X4yv2JeOi_vsAuyWC-mWs_o,28164
14
+ dbos/_core.py,sha256=6OU3SMW5x8CvO7c0LBlHhF1eLiHPLs6nfkkasP73IEo,51124
15
15
  dbos/_croniter.py,sha256=XHAyUyibs_59sJQfSNWkP7rqQY6_XrlfuuCxk4jYqek,47559
16
16
  dbos/_dbos.py,sha256=dr32Z_NT36JkUxWGyYVX7xkl3bYJmgsxVMOX8H9_mpM,59394
17
17
  dbos/_dbos_config.py,sha256=NIMQfxkznoyscyeMFLrfrPAS1W_PHXXWrxqpvvrbp3E,24923
@@ -25,18 +25,18 @@ dbos/_flask.py,sha256=Npnakt-a3W5OykONFRkDRnumaDhTQmA0NPdUCGRYKXE,1652
25
25
  dbos/_kafka.py,sha256=Gm4fHWl7gYb-i5BMvwNwm5Km3z8zQpseqdMgqgFjlGI,4252
26
26
  dbos/_kafka_message.py,sha256=NYvOXNG3Qn7bghn1pv3fg4Pbs86ILZGcK4IB-MLUNu0,409
27
27
  dbos/_logger.py,sha256=djnCp147QoQ1iG9Bt3Uz8RyGaXGmi6gebccXsrA6Cps,4660
28
- dbos/_migration.py,sha256=VAQxZXWQISifW0JpIG78lowV1MTBJ5ZC4P0YIwqxQhM,10013
28
+ dbos/_migration.py,sha256=Fvc3m4dC4oDpjPMHX-tUZVnXklVB9OMMojSLuVyV9ak,10312
29
29
  dbos/_outcome.py,sha256=7HvosMfEHTh1U5P6xok7kFTGLwa2lPaul0YApb3UnN4,8191
30
- dbos/_queue.py,sha256=cgFFwVPUeQtrTgk7ivoTZb0v9ya8rZK4m7-G-h5gIb4,4846
30
+ dbos/_queue.py,sha256=GmqZHl9smES1KSmpauhSdsnZFJHDyfvRArmC-jBibhw,6228
31
31
  dbos/_recovery.py,sha256=K-wlFhdf4yGRm6cUzyhcTjQUS0xp2T5rdNMLiiBErYg,2882
32
32
  dbos/_registrations.py,sha256=bEOntObnWaBylnebr5ZpcX2hk7OVLDd1z4BvW4_y3zA,7380
33
33
  dbos/_roles.py,sha256=kCuhhg8XLtrHCgKgm44I0abIRTGHltf88OwjEKAUggk,2317
34
34
  dbos/_scheduler.py,sha256=n96dNzKMr6-2RQvMxRI6BaoExHbLjw0Kr46j1P-DjP4,2620
35
35
  dbos/_schemas/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
36
36
  dbos/_schemas/application_database.py,sha256=SypAS9l9EsaBHFn9FR8jmnqt01M74d9AF1AMa4m2hhI,1040
37
- dbos/_schemas/system_database.py,sha256=aEkjRQDh9xjdke0d9uFx_20-c9UjQtvuLtHZ24aOypA,5497
37
+ dbos/_schemas/system_database.py,sha256=mNsBV0ttlqJArvOqGPY60WvtuiWrHCpYnVxtvMfe2LI,5544
38
38
  dbos/_serialization.py,sha256=8TVXB1c2k3keodNcXszqmcOGTQz2r5UBSYtxn2OrYjI,2804
39
- dbos/_sys_db.py,sha256=ON8P8DZOy9FZ-TFFcBKMhzTdBXbUhE2oEGKKofDJgwE,85571
39
+ dbos/_sys_db.py,sha256=FDboSk58CyQCAFjOF_KMLnRtIw05OL3IpJHT1qwKEKo,87596
40
40
  dbos/_sys_db_postgres.py,sha256=GuyGVyZZD_Wl7LjRSkHnOuZ-hOROlO4Xs2UeDhKq10E,6963
41
41
  dbos/_sys_db_sqlite.py,sha256=ifjKdy-Z9vlVIBf5L6XnSaNjiBdvqPE73asVHim4A5Q,6998
42
42
  dbos/_templates/dbos-db-starter/README.md,sha256=GhxhBj42wjTt1fWEtwNriHbJuKb66Vzu89G4pxNHw2g,930
@@ -56,4 +56,4 @@ dbos/cli/migration.py,sha256=I0_0ngWTuCPQf6Symbpd0lizaxWUKe3uTYEmuCmsrdU,3775
56
56
  dbos/dbos-config.schema.json,sha256=47wofTZ5jlFynec7bG0L369tAXbRQQ2euBxBXvg4m9c,1730
57
57
  dbos/py.typed,sha256=QfzXT1Ktfk3Rj84akygc7_42z0lRpCq0Ilh8OXI6Zas,44
58
58
  version/__init__.py,sha256=L4sNxecRuqdtSFdpUGX3TtBi9KL3k7YsZVIvv-fv9-A,1678
59
- dbos-2.2.0a2.dist-info/RECORD,,
59
+ dbos-2.2.0a3.dist-info/RECORD,,
File without changes