dbos 2.2.0a2__tar.gz → 2.2.0a3__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (99) hide show
  1. {dbos-2.2.0a2 → dbos-2.2.0a3}/PKG-INFO +1 -1
  2. {dbos-2.2.0a2 → dbos-2.2.0a3}/dbos/_client.py +4 -0
  3. {dbos-2.2.0a2 → dbos-2.2.0a3}/dbos/_context.py +8 -0
  4. {dbos-2.2.0a2 → dbos-2.2.0a3}/dbos/_core.py +11 -0
  5. {dbos-2.2.0a2 → dbos-2.2.0a3}/dbos/_migration.py +12 -2
  6. {dbos-2.2.0a2 → dbos-2.2.0a3}/dbos/_queue.py +29 -4
  7. {dbos-2.2.0a2 → dbos-2.2.0a3}/dbos/_schemas/system_database.py +1 -0
  8. {dbos-2.2.0a2 → dbos-2.2.0a3}/dbos/_sys_db.py +53 -1
  9. {dbos-2.2.0a2 → dbos-2.2.0a3}/pyproject.toml +1 -1
  10. {dbos-2.2.0a2 → dbos-2.2.0a3}/tests/test_queue.py +77 -0
  11. {dbos-2.2.0a2 → dbos-2.2.0a3}/LICENSE +0 -0
  12. {dbos-2.2.0a2 → dbos-2.2.0a3}/README.md +0 -0
  13. {dbos-2.2.0a2 → dbos-2.2.0a3}/dbos/__init__.py +0 -0
  14. {dbos-2.2.0a2 → dbos-2.2.0a3}/dbos/__main__.py +0 -0
  15. {dbos-2.2.0a2 → dbos-2.2.0a3}/dbos/_admin_server.py +0 -0
  16. {dbos-2.2.0a2 → dbos-2.2.0a3}/dbos/_app_db.py +0 -0
  17. {dbos-2.2.0a2 → dbos-2.2.0a3}/dbos/_classproperty.py +0 -0
  18. {dbos-2.2.0a2 → dbos-2.2.0a3}/dbos/_conductor/conductor.py +0 -0
  19. {dbos-2.2.0a2 → dbos-2.2.0a3}/dbos/_conductor/protocol.py +0 -0
  20. {dbos-2.2.0a2 → dbos-2.2.0a3}/dbos/_croniter.py +0 -0
  21. {dbos-2.2.0a2 → dbos-2.2.0a3}/dbos/_dbos.py +0 -0
  22. {dbos-2.2.0a2 → dbos-2.2.0a3}/dbos/_dbos_config.py +0 -0
  23. {dbos-2.2.0a2 → dbos-2.2.0a3}/dbos/_debouncer.py +0 -0
  24. {dbos-2.2.0a2 → dbos-2.2.0a3}/dbos/_debug.py +0 -0
  25. {dbos-2.2.0a2 → dbos-2.2.0a3}/dbos/_docker_pg_helper.py +0 -0
  26. {dbos-2.2.0a2 → dbos-2.2.0a3}/dbos/_error.py +0 -0
  27. {dbos-2.2.0a2 → dbos-2.2.0a3}/dbos/_event_loop.py +0 -0
  28. {dbos-2.2.0a2 → dbos-2.2.0a3}/dbos/_fastapi.py +0 -0
  29. {dbos-2.2.0a2 → dbos-2.2.0a3}/dbos/_flask.py +0 -0
  30. {dbos-2.2.0a2 → dbos-2.2.0a3}/dbos/_kafka.py +0 -0
  31. {dbos-2.2.0a2 → dbos-2.2.0a3}/dbos/_kafka_message.py +0 -0
  32. {dbos-2.2.0a2 → dbos-2.2.0a3}/dbos/_logger.py +0 -0
  33. {dbos-2.2.0a2 → dbos-2.2.0a3}/dbos/_outcome.py +0 -0
  34. {dbos-2.2.0a2 → dbos-2.2.0a3}/dbos/_recovery.py +0 -0
  35. {dbos-2.2.0a2 → dbos-2.2.0a3}/dbos/_registrations.py +0 -0
  36. {dbos-2.2.0a2 → dbos-2.2.0a3}/dbos/_roles.py +0 -0
  37. {dbos-2.2.0a2 → dbos-2.2.0a3}/dbos/_scheduler.py +0 -0
  38. {dbos-2.2.0a2 → dbos-2.2.0a3}/dbos/_schemas/__init__.py +0 -0
  39. {dbos-2.2.0a2 → dbos-2.2.0a3}/dbos/_schemas/application_database.py +0 -0
  40. {dbos-2.2.0a2 → dbos-2.2.0a3}/dbos/_serialization.py +0 -0
  41. {dbos-2.2.0a2 → dbos-2.2.0a3}/dbos/_sys_db_postgres.py +0 -0
  42. {dbos-2.2.0a2 → dbos-2.2.0a3}/dbos/_sys_db_sqlite.py +0 -0
  43. {dbos-2.2.0a2 → dbos-2.2.0a3}/dbos/_templates/dbos-db-starter/README.md +0 -0
  44. {dbos-2.2.0a2 → dbos-2.2.0a3}/dbos/_templates/dbos-db-starter/__package/__init__.py +0 -0
  45. {dbos-2.2.0a2 → dbos-2.2.0a3}/dbos/_templates/dbos-db-starter/__package/main.py.dbos +0 -0
  46. {dbos-2.2.0a2 → dbos-2.2.0a3}/dbos/_templates/dbos-db-starter/__package/schema.py +0 -0
  47. {dbos-2.2.0a2 → dbos-2.2.0a3}/dbos/_templates/dbos-db-starter/dbos-config.yaml.dbos +0 -0
  48. {dbos-2.2.0a2 → dbos-2.2.0a3}/dbos/_templates/dbos-db-starter/migrations/create_table.py.dbos +0 -0
  49. {dbos-2.2.0a2 → dbos-2.2.0a3}/dbos/_templates/dbos-db-starter/start_postgres_docker.py +0 -0
  50. {dbos-2.2.0a2 → dbos-2.2.0a3}/dbos/_tracer.py +0 -0
  51. {dbos-2.2.0a2 → dbos-2.2.0a3}/dbos/_utils.py +0 -0
  52. {dbos-2.2.0a2 → dbos-2.2.0a3}/dbos/_workflow_commands.py +0 -0
  53. {dbos-2.2.0a2 → dbos-2.2.0a3}/dbos/cli/_github_init.py +0 -0
  54. {dbos-2.2.0a2 → dbos-2.2.0a3}/dbos/cli/_template_init.py +0 -0
  55. {dbos-2.2.0a2 → dbos-2.2.0a3}/dbos/cli/cli.py +0 -0
  56. {dbos-2.2.0a2 → dbos-2.2.0a3}/dbos/cli/migration.py +0 -0
  57. {dbos-2.2.0a2 → dbos-2.2.0a3}/dbos/dbos-config.schema.json +0 -0
  58. {dbos-2.2.0a2 → dbos-2.2.0a3}/dbos/py.typed +0 -0
  59. {dbos-2.2.0a2 → dbos-2.2.0a3}/tests/__init__.py +0 -0
  60. {dbos-2.2.0a2 → dbos-2.2.0a3}/tests/atexit_no_ctor.py +0 -0
  61. {dbos-2.2.0a2 → dbos-2.2.0a3}/tests/atexit_no_launch.py +0 -0
  62. {dbos-2.2.0a2 → dbos-2.2.0a3}/tests/classdefs.py +0 -0
  63. {dbos-2.2.0a2 → dbos-2.2.0a3}/tests/client_collateral.py +0 -0
  64. {dbos-2.2.0a2 → dbos-2.2.0a3}/tests/client_worker.py +0 -0
  65. {dbos-2.2.0a2 → dbos-2.2.0a3}/tests/conftest.py +0 -0
  66. {dbos-2.2.0a2 → dbos-2.2.0a3}/tests/dupname_classdefs1.py +0 -0
  67. {dbos-2.2.0a2 → dbos-2.2.0a3}/tests/dupname_classdefsa.py +0 -0
  68. {dbos-2.2.0a2 → dbos-2.2.0a3}/tests/more_classdefs.py +0 -0
  69. {dbos-2.2.0a2 → dbos-2.2.0a3}/tests/queuedworkflow.py +0 -0
  70. {dbos-2.2.0a2 → dbos-2.2.0a3}/tests/script_without_fastapi.py +0 -0
  71. {dbos-2.2.0a2 → dbos-2.2.0a3}/tests/test_admin_server.py +0 -0
  72. {dbos-2.2.0a2 → dbos-2.2.0a3}/tests/test_async.py +0 -0
  73. {dbos-2.2.0a2 → dbos-2.2.0a3}/tests/test_async_workflow_management.py +0 -0
  74. {dbos-2.2.0a2 → dbos-2.2.0a3}/tests/test_classdecorators.py +0 -0
  75. {dbos-2.2.0a2 → dbos-2.2.0a3}/tests/test_cli.py +0 -0
  76. {dbos-2.2.0a2 → dbos-2.2.0a3}/tests/test_client.py +0 -0
  77. {dbos-2.2.0a2 → dbos-2.2.0a3}/tests/test_concurrency.py +0 -0
  78. {dbos-2.2.0a2 → dbos-2.2.0a3}/tests/test_config.py +0 -0
  79. {dbos-2.2.0a2 → dbos-2.2.0a3}/tests/test_croniter.py +0 -0
  80. {dbos-2.2.0a2 → dbos-2.2.0a3}/tests/test_dbos.py +0 -0
  81. {dbos-2.2.0a2 → dbos-2.2.0a3}/tests/test_debouncer.py +0 -0
  82. {dbos-2.2.0a2 → dbos-2.2.0a3}/tests/test_debug.py +0 -0
  83. {dbos-2.2.0a2 → dbos-2.2.0a3}/tests/test_docker_secrets.py +0 -0
  84. {dbos-2.2.0a2 → dbos-2.2.0a3}/tests/test_failures.py +0 -0
  85. {dbos-2.2.0a2 → dbos-2.2.0a3}/tests/test_fastapi.py +0 -0
  86. {dbos-2.2.0a2 → dbos-2.2.0a3}/tests/test_fastapi_roles.py +0 -0
  87. {dbos-2.2.0a2 → dbos-2.2.0a3}/tests/test_flask.py +0 -0
  88. {dbos-2.2.0a2 → dbos-2.2.0a3}/tests/test_kafka.py +0 -0
  89. {dbos-2.2.0a2 → dbos-2.2.0a3}/tests/test_outcome.py +0 -0
  90. {dbos-2.2.0a2 → dbos-2.2.0a3}/tests/test_package.py +0 -0
  91. {dbos-2.2.0a2 → dbos-2.2.0a3}/tests/test_scheduler.py +0 -0
  92. {dbos-2.2.0a2 → dbos-2.2.0a3}/tests/test_schema_migration.py +0 -0
  93. {dbos-2.2.0a2 → dbos-2.2.0a3}/tests/test_singleton.py +0 -0
  94. {dbos-2.2.0a2 → dbos-2.2.0a3}/tests/test_spans.py +0 -0
  95. {dbos-2.2.0a2 → dbos-2.2.0a3}/tests/test_sqlalchemy.py +0 -0
  96. {dbos-2.2.0a2 → dbos-2.2.0a3}/tests/test_streaming.py +0 -0
  97. {dbos-2.2.0a2 → dbos-2.2.0a3}/tests/test_workflow_introspection.py +0 -0
  98. {dbos-2.2.0a2 → dbos-2.2.0a3}/tests/test_workflow_management.py +0 -0
  99. {dbos-2.2.0a2 → dbos-2.2.0a3}/version/__init__.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: dbos
3
- Version: 2.2.0a2
3
+ Version: 2.2.0a3
4
4
  Summary: Ultra-lightweight durable execution in Python
5
5
  Author-Email: "DBOS, Inc." <contact@dbos.dev>
6
6
  License: MIT
@@ -62,6 +62,7 @@ class EnqueueOptions(_EnqueueOptionsRequired, total=False):
62
62
  deduplication_id: str
63
63
  priority: int
64
64
  max_recovery_attempts: int
65
+ queue_partition_key: str
65
66
 
66
67
 
67
68
  def validate_enqueue_options(options: EnqueueOptions) -> None:
@@ -185,6 +186,7 @@ class DBOSClient:
185
186
  "deduplication_id": options.get("deduplication_id"),
186
187
  "priority": options.get("priority"),
187
188
  "app_version": options.get("app_version"),
189
+ "queue_partition_key": options.get("queue_partition_key"),
188
190
  }
189
191
 
190
192
  inputs: WorkflowInputs = {
@@ -221,6 +223,7 @@ class DBOSClient:
221
223
  else 0
222
224
  ),
223
225
  "inputs": self._serializer.serialize(inputs),
226
+ "queue_partition_key": enqueue_options_internal["queue_partition_key"],
224
227
  }
225
228
 
226
229
  self._sys_db.init_workflow(
@@ -286,6 +289,7 @@ class DBOSClient:
286
289
  "deduplication_id": None,
287
290
  "priority": 0,
288
291
  "inputs": self._serializer.serialize({"args": (), "kwargs": {}}),
292
+ "queue_partition_key": None,
289
293
  }
290
294
  with self._sys_db.engine.begin() as conn:
291
295
  self._sys_db._insert_workflow_status(
@@ -120,6 +120,8 @@ class DBOSContext:
120
120
  self.deduplication_id: Optional[str] = None
121
121
  # A user-specified priority for the enqueuing workflow.
122
122
  self.priority: Optional[int] = None
123
+ # If the workflow is enqueued on a partitioned queue, its partition key
124
+ self.queue_partition_key: Optional[str] = None
123
125
 
124
126
  def create_child(self) -> DBOSContext:
125
127
  rv = DBOSContext()
@@ -479,6 +481,7 @@ class SetEnqueueOptions:
479
481
  deduplication_id: Optional[str] = None,
480
482
  priority: Optional[int] = None,
481
483
  app_version: Optional[str] = None,
484
+ queue_partition_key: Optional[str] = None,
482
485
  ) -> None:
483
486
  self.created_ctx = False
484
487
  self.deduplication_id: Optional[str] = deduplication_id
@@ -491,6 +494,8 @@ class SetEnqueueOptions:
491
494
  self.saved_priority: Optional[int] = None
492
495
  self.app_version: Optional[str] = app_version
493
496
  self.saved_app_version: Optional[str] = None
497
+ self.queue_partition_key = queue_partition_key
498
+ self.saved_queue_partition_key: Optional[str] = None
494
499
 
495
500
  def __enter__(self) -> SetEnqueueOptions:
496
501
  # Code to create a basic context
@@ -505,6 +510,8 @@ class SetEnqueueOptions:
505
510
  ctx.priority = self.priority
506
511
  self.saved_app_version = ctx.app_version
507
512
  ctx.app_version = self.app_version
513
+ self.saved_queue_partition_key = ctx.queue_partition_key
514
+ ctx.queue_partition_key = self.queue_partition_key
508
515
  return self
509
516
 
510
517
  def __exit__(
@@ -517,6 +524,7 @@ class SetEnqueueOptions:
517
524
  curr_ctx.deduplication_id = self.saved_deduplication_id
518
525
  curr_ctx.priority = self.saved_priority
519
526
  curr_ctx.app_version = self.saved_app_version
527
+ curr_ctx.queue_partition_key = self.saved_queue_partition_key
520
528
  # Code to clean up the basic context if we created it
521
529
  if self.created_ctx:
522
530
  _clear_local_dbos_context()
@@ -303,6 +303,11 @@ def _init_workflow(
303
303
  else 0
304
304
  ),
305
305
  "inputs": dbos._serializer.serialize(inputs),
306
+ "queue_partition_key": (
307
+ enqueue_options["queue_partition_key"]
308
+ if enqueue_options is not None
309
+ else None
310
+ ),
306
311
  }
307
312
 
308
313
  # Synchronously record the status and inputs for workflows
@@ -571,6 +576,9 @@ def start_workflow(
571
576
  deduplication_id=local_ctx.deduplication_id if local_ctx is not None else None,
572
577
  priority=local_ctx.priority if local_ctx is not None else None,
573
578
  app_version=local_ctx.app_version if local_ctx is not None else None,
579
+ queue_partition_key=(
580
+ local_ctx.queue_partition_key if local_ctx is not None else None
581
+ ),
574
582
  )
575
583
  new_wf_id, new_wf_ctx = _get_new_wf()
576
584
 
@@ -664,6 +672,9 @@ async def start_workflow_async(
664
672
  deduplication_id=local_ctx.deduplication_id if local_ctx is not None else None,
665
673
  priority=local_ctx.priority if local_ctx is not None else None,
666
674
  app_version=local_ctx.app_version if local_ctx is not None else None,
675
+ queue_partition_key=(
676
+ local_ctx.queue_partition_key if local_ctx is not None else None
677
+ ),
667
678
  )
668
679
  new_wf_id, new_wf_ctx = _get_new_wf()
669
680
 
@@ -203,8 +203,14 @@ CREATE TABLE \"{schema}\".event_dispatch_kv (
203
203
  """
204
204
 
205
205
 
206
+ def get_dbos_migration_two(schema: str) -> str:
207
+ return f"""
208
+ ALTER TABLE \"{schema}\".workflow_status ADD COLUMN queue_partition_key TEXT;
209
+ """
210
+
211
+
206
212
  def get_dbos_migrations(schema: str) -> list[str]:
207
- return [get_dbos_migration_one(schema)]
213
+ return [get_dbos_migration_one(schema), get_dbos_migration_two(schema)]
208
214
 
209
215
 
210
216
  def get_sqlite_timestamp_expr() -> str:
@@ -293,4 +299,8 @@ CREATE TABLE streams (
293
299
  );
294
300
  """
295
301
 
296
- sqlite_migrations = [sqlite_migration_one]
302
+ sqlite_migration_two = """
303
+ ALTER TABLE workflow_status ADD COLUMN queue_partition_key TEXT;
304
+ """
305
+
306
+ sqlite_migrations = [sqlite_migration_one, sqlite_migration_two]
@@ -43,6 +43,7 @@ class Queue:
43
43
  *, # Disable positional arguments from here on
44
44
  worker_concurrency: Optional[int] = None,
45
45
  priority_enabled: bool = False,
46
+ partition_queue: bool = False,
46
47
  ) -> None:
47
48
  if (
48
49
  worker_concurrency is not None
@@ -57,6 +58,7 @@ class Queue:
57
58
  self.worker_concurrency = worker_concurrency
58
59
  self.limiter = limiter
59
60
  self.priority_enabled = priority_enabled
61
+ self.partition_queue = partition_queue
60
62
  from ._dbos import _get_or_create_dbos_registry
61
63
 
62
64
  registry = _get_or_create_dbos_registry()
@@ -78,6 +80,18 @@ class Queue:
78
80
  raise Exception(
79
81
  f"Priority is not enabled for queue {self.name}. Setting priority will not have any effect."
80
82
  )
83
+ if self.partition_queue and (
84
+ context is None or context.queue_partition_key is None
85
+ ):
86
+ raise Exception(
87
+ f"A workflow cannot be enqueued on partitioned queue {self.name} without a partition key"
88
+ )
89
+ if context and context.queue_partition_key and not self.partition_queue:
90
+ raise Exception(
91
+ f"You can only use a partition key on a partition-enabled queue. Key {context.queue_partition_key} was used with non-partitioned queue {self.name}"
92
+ )
93
+ if context and context.queue_partition_key and context.deduplication_id:
94
+ raise Exception("Deduplication is not supported for partitioned queues")
81
95
 
82
96
  dbos = _get_dbos_instance()
83
97
  return start_workflow(dbos, func, self.name, False, *args, **kwargs)
@@ -105,10 +119,21 @@ def queue_thread(stop_event: threading.Event, dbos: "DBOS") -> None:
105
119
  queues = dict(dbos._registry.queue_info_map)
106
120
  for _, queue in queues.items():
107
121
  try:
108
- wf_ids = dbos._sys_db.start_queued_workflows(
109
- queue, GlobalParams.executor_id, GlobalParams.app_version
110
- )
111
- for id in wf_ids:
122
+ if queue.partition_queue:
123
+ dequeued_workflows = []
124
+ queue_partition_keys = dbos._sys_db.get_queue_partitions(queue.name)
125
+ for key in queue_partition_keys:
126
+ dequeued_workflows += dbos._sys_db.start_queued_workflows(
127
+ queue,
128
+ GlobalParams.executor_id,
129
+ GlobalParams.app_version,
130
+ key,
131
+ )
132
+ else:
133
+ dequeued_workflows = dbos._sys_db.start_queued_workflows(
134
+ queue, GlobalParams.executor_id, GlobalParams.app_version, None
135
+ )
136
+ for id in dequeued_workflows:
112
137
  execute_workflow_by_id(dbos, id)
113
138
  except OperationalError as e:
114
139
  if isinstance(
@@ -77,6 +77,7 @@ class SystemSchema:
77
77
  Column("deduplication_id", Text(), nullable=True),
78
78
  Column("inputs", Text()),
79
79
  Column("priority", Integer(), nullable=False, server_default=text("'0'::int")),
80
+ Column("queue_partition_key", Text()),
80
81
  Index("workflow_status_created_at_index", "created_at"),
81
82
  Index("workflow_status_executor_id_index", "executor_id"),
82
83
  Index("workflow_status_status_index", "status"),
@@ -152,6 +152,8 @@ class WorkflowStatusInternal(TypedDict):
152
152
  priority: int
153
153
  # Serialized workflow inputs
154
154
  inputs: str
155
+ # If this workflow is enqueued on a partitioned queue, its partition key
156
+ queue_partition_key: Optional[str]
155
157
 
156
158
 
157
159
  class EnqueueOptionsInternal(TypedDict):
@@ -161,6 +163,8 @@ class EnqueueOptionsInternal(TypedDict):
161
163
  priority: Optional[int]
162
164
  # On what version the workflow is enqueued. Current version if not specified.
163
165
  app_version: Optional[str]
166
+ # If the workflow is enqueued on a partitioned queue, its partition key
167
+ queue_partition_key: Optional[str]
164
168
 
165
169
 
166
170
  class RecordedResult(TypedDict):
@@ -490,6 +494,7 @@ class SystemDatabase(ABC):
490
494
  deduplication_id=status["deduplication_id"],
491
495
  priority=status["priority"],
492
496
  inputs=status["inputs"],
497
+ queue_partition_key=status["queue_partition_key"],
493
498
  )
494
499
  .on_conflict_do_update(
495
500
  index_elements=["workflow_uuid"],
@@ -761,6 +766,7 @@ class SystemDatabase(ABC):
761
766
  SystemSchema.workflow_status.c.deduplication_id,
762
767
  SystemSchema.workflow_status.c.priority,
763
768
  SystemSchema.workflow_status.c.inputs,
769
+ SystemSchema.workflow_status.c.queue_partition_key,
764
770
  ).where(SystemSchema.workflow_status.c.workflow_uuid == workflow_uuid)
765
771
  ).fetchone()
766
772
  if row is None:
@@ -788,6 +794,7 @@ class SystemDatabase(ABC):
788
794
  "deduplication_id": row[16],
789
795
  "priority": row[17],
790
796
  "inputs": row[18],
797
+ "queue_partition_key": row[19],
791
798
  }
792
799
  return status
793
800
 
@@ -1714,8 +1721,41 @@ class SystemDatabase(ABC):
1714
1721
  )
1715
1722
  return value
1716
1723
 
1724
+ @db_retry()
1725
+ def get_queue_partitions(self, queue_name: str) -> List[str]:
1726
+ """
1727
+ Get all unique partition names associated with a queue for ENQUEUED workflows.
1728
+
1729
+ Args:
1730
+ queue_name: The name of the queue to get partitions for
1731
+
1732
+ Returns:
1733
+ A list of unique partition names for the queue
1734
+ """
1735
+ with self.engine.begin() as c:
1736
+ query = (
1737
+ sa.select(SystemSchema.workflow_status.c.queue_partition_key)
1738
+ .distinct()
1739
+ .where(SystemSchema.workflow_status.c.queue_name == queue_name)
1740
+ .where(
1741
+ SystemSchema.workflow_status.c.status.in_(
1742
+ [
1743
+ WorkflowStatusString.ENQUEUED.value,
1744
+ ]
1745
+ )
1746
+ )
1747
+ .where(SystemSchema.workflow_status.c.queue_partition_key.isnot(None))
1748
+ )
1749
+
1750
+ rows = c.execute(query).fetchall()
1751
+ return [row[0] for row in rows]
1752
+
1717
1753
  def start_queued_workflows(
1718
- self, queue: "Queue", executor_id: str, app_version: str
1754
+ self,
1755
+ queue: "Queue",
1756
+ executor_id: str,
1757
+ app_version: str,
1758
+ queue_partition_key: Optional[str],
1719
1759
  ) -> List[str]:
1720
1760
  if self._debug_mode:
1721
1761
  return []
@@ -1734,6 +1774,10 @@ class SystemDatabase(ABC):
1734
1774
  sa.select(sa.func.count())
1735
1775
  .select_from(SystemSchema.workflow_status)
1736
1776
  .where(SystemSchema.workflow_status.c.queue_name == queue.name)
1777
+ .where(
1778
+ SystemSchema.workflow_status.c.queue_partition_key
1779
+ == queue_partition_key
1780
+ )
1737
1781
  .where(
1738
1782
  SystemSchema.workflow_status.c.status
1739
1783
  != WorkflowStatusString.ENQUEUED.value
@@ -1758,6 +1802,10 @@ class SystemDatabase(ABC):
1758
1802
  )
1759
1803
  .select_from(SystemSchema.workflow_status)
1760
1804
  .where(SystemSchema.workflow_status.c.queue_name == queue.name)
1805
+ .where(
1806
+ SystemSchema.workflow_status.c.queue_partition_key
1807
+ == queue_partition_key
1808
+ )
1761
1809
  .where(
1762
1810
  SystemSchema.workflow_status.c.status
1763
1811
  == WorkflowStatusString.PENDING.value
@@ -1799,6 +1847,10 @@ class SystemDatabase(ABC):
1799
1847
  )
1800
1848
  .select_from(SystemSchema.workflow_status)
1801
1849
  .where(SystemSchema.workflow_status.c.queue_name == queue.name)
1850
+ .where(
1851
+ SystemSchema.workflow_status.c.queue_partition_key
1852
+ == queue_partition_key
1853
+ )
1802
1854
  .where(
1803
1855
  SystemSchema.workflow_status.c.status
1804
1856
  == WorkflowStatusString.ENQUEUED.value
@@ -34,7 +34,7 @@ classifiers = [
34
34
  "Topic :: Software Development :: Libraries :: Python Modules",
35
35
  "Framework :: AsyncIO",
36
36
  ]
37
- version = "2.2.0a2"
37
+ version = "2.2.0a3"
38
38
 
39
39
  [project.license]
40
40
  text = "MIT"
@@ -1612,3 +1612,80 @@ async def test_enqueue_version_async(dbos: DBOS) -> None:
1612
1612
  # Change the global version, verify it works
1613
1613
  GlobalParams.app_version = future_version
1614
1614
  assert await handle.get_result() == input
1615
+
1616
+
1617
+ def test_queue_partitions(dbos: DBOS, client: DBOSClient) -> None:
1618
+
1619
+ blocking_event = threading.Event()
1620
+ waiting_event = threading.Event()
1621
+
1622
+ @DBOS.workflow()
1623
+ def blocked_workflow() -> str:
1624
+ waiting_event.set()
1625
+ blocking_event.wait()
1626
+ assert DBOS.workflow_id
1627
+ return DBOS.workflow_id
1628
+
1629
+ @DBOS.workflow()
1630
+ def normal_workflow() -> str:
1631
+ assert DBOS.workflow_id
1632
+ return DBOS.workflow_id
1633
+
1634
+ queue = Queue("queue", partition_queue=True, worker_concurrency=1)
1635
+
1636
+ blocked_partition_key = "blocked"
1637
+ normal_partition_key = "normal"
1638
+
1639
+ # Enqueue a blocked workflow and a normal workflow on
1640
+ # the blocked partition. Verify the blocked workflow starts
1641
+ # but the normal workflow is stuck behind it.
1642
+ with SetEnqueueOptions(queue_partition_key=blocked_partition_key):
1643
+ blocked_blocked_handle = queue.enqueue(blocked_workflow)
1644
+ blocked_normal_handle = queue.enqueue(normal_workflow)
1645
+
1646
+ waiting_event.wait()
1647
+ assert (
1648
+ blocked_blocked_handle.get_status().status == WorkflowStatusString.PENDING.value
1649
+ )
1650
+ assert (
1651
+ blocked_normal_handle.get_status().status == WorkflowStatusString.ENQUEUED.value
1652
+ )
1653
+
1654
+ # Enqueue a normal workflow on the other partition and verify it runs normally
1655
+ with SetEnqueueOptions(queue_partition_key=normal_partition_key):
1656
+ normal_handle = queue.enqueue(normal_workflow)
1657
+
1658
+ assert normal_handle.get_result()
1659
+
1660
+ # Unblock the blocked partition and verify its workflows complete
1661
+ blocking_event.set()
1662
+ assert blocked_blocked_handle.get_result()
1663
+ assert blocked_normal_handle.get_result()
1664
+
1665
+ # Confirm client enqueue works with partitions
1666
+ client_handle: WorkflowHandle[None] = client.enqueue(
1667
+ {
1668
+ "queue_name": queue.name,
1669
+ "workflow_name": normal_workflow.__qualname__,
1670
+ "queue_partition_key": blocked_partition_key,
1671
+ }
1672
+ )
1673
+ assert client_handle.get_result()
1674
+
1675
+ # You can only enqueue on a partitioned queue with a partition key
1676
+ with pytest.raises(Exception):
1677
+ queue.enqueue(normal_workflow)
1678
+
1679
+ # Deduplication is not supported for partitioned queues
1680
+ with pytest.raises(Exception):
1681
+ with SetEnqueueOptions(
1682
+ queue_partition_key=normal_partition_key, deduplication_id="key"
1683
+ ):
1684
+ queue.enqueue(normal_workflow)
1685
+
1686
+ # You can only enqueue with a partition key on a partitioned queue
1687
+ partitionless_queue = Queue("partitionless-queue")
1688
+
1689
+ with pytest.raises(Exception):
1690
+ with SetEnqueueOptions(queue_partition_key="test"):
1691
+ partitionless_queue.enqueue(normal_workflow)
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes