dbos 1.3.0a2__py3-none-any.whl → 1.3.0a4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
dbos/_client.py CHANGED
@@ -163,6 +163,12 @@ class DBOSClient:
163
163
  int(workflow_timeout * 1000) if workflow_timeout is not None else None
164
164
  ),
165
165
  "workflow_deadline_epoch_ms": None,
166
+ "deduplication_id": enqueue_options_internal["deduplication_id"],
167
+ "priority": (
168
+ enqueue_options_internal["priority"]
169
+ if enqueue_options_internal["priority"] is not None
170
+ else 0
171
+ ),
166
172
  }
167
173
 
168
174
  inputs: WorkflowInputs = {
@@ -174,7 +180,6 @@ class DBOSClient:
174
180
  status,
175
181
  _serialization.serialize_args(inputs),
176
182
  max_recovery_attempts=None,
177
- enqueue_options=enqueue_options_internal,
178
183
  )
179
184
  return workflow_id
180
185
 
@@ -230,6 +235,8 @@ class DBOSClient:
230
235
  "app_version": None,
231
236
  "workflow_timeout_ms": None,
232
237
  "workflow_deadline_epoch_ms": None,
238
+ "deduplication_id": None,
239
+ "priority": 0,
233
240
  }
234
241
  with self._sys_db.engine.begin() as conn:
235
242
  self._sys_db._insert_workflow_status(
dbos/_core.py CHANGED
@@ -279,6 +279,18 @@ def _init_workflow(
279
279
  "updated_at": None,
280
280
  "workflow_timeout_ms": workflow_timeout_ms,
281
281
  "workflow_deadline_epoch_ms": workflow_deadline_epoch_ms,
282
+ "deduplication_id": (
283
+ enqueue_options["deduplication_id"] if enqueue_options is not None else None
284
+ ),
285
+ "priority": (
286
+ (
287
+ enqueue_options["priority"]
288
+ if enqueue_options["priority"] is not None
289
+ else 0
290
+ )
291
+ if enqueue_options is not None
292
+ else 0
293
+ ),
282
294
  }
283
295
 
284
296
  # If we have a class name, the first arg is the instance and do not serialize
@@ -290,7 +302,6 @@ def _init_workflow(
290
302
  status,
291
303
  _serialization.serialize_args(inputs),
292
304
  max_recovery_attempts=max_recovery_attempts,
293
- enqueue_options=enqueue_options,
294
305
  )
295
306
 
296
307
  if workflow_deadline_epoch_ms is not None:
@@ -342,13 +353,12 @@ def _get_wf_invoke_func(
342
353
  return recorded_result
343
354
  try:
344
355
  output = func()
345
- status["status"] = "SUCCESS"
346
- status["output"] = _serialization.serialize(output)
347
356
  if not dbos.debug_mode:
348
- if status["queue_name"] is not None:
349
- queue = dbos._registry.queue_info_map[status["queue_name"]]
350
- dbos._sys_db.remove_from_queue(status["workflow_uuid"], queue)
351
- dbos._sys_db.update_workflow_status(status)
357
+ dbos._sys_db.update_workflow_outcome(
358
+ status["workflow_uuid"],
359
+ "SUCCESS",
360
+ output=_serialization.serialize(output),
361
+ )
352
362
  return output
353
363
  except DBOSWorkflowConflictIDError:
354
364
  # Await the workflow result
@@ -357,13 +367,12 @@ def _get_wf_invoke_func(
357
367
  except DBOSWorkflowCancelledError as error:
358
368
  raise
359
369
  except Exception as error:
360
- status["status"] = "ERROR"
361
- status["error"] = _serialization.serialize_exception(error)
362
370
  if not dbos.debug_mode:
363
- if status["queue_name"] is not None:
364
- queue = dbos._registry.queue_info_map[status["queue_name"]]
365
- dbos._sys_db.remove_from_queue(status["workflow_uuid"], queue)
366
- dbos._sys_db.update_workflow_status(status)
371
+ dbos._sys_db.update_workflow_outcome(
372
+ status["workflow_uuid"],
373
+ "ERROR",
374
+ error=_serialization.serialize_exception(error),
375
+ )
367
376
  raise
368
377
 
369
378
  return persist
dbos/_dbos.py CHANGED
@@ -215,6 +215,8 @@ class DBOSRegistry:
215
215
  sources = sorted(
216
216
  [inspect.getsource(wf) for wf in self.workflow_info_map.values()]
217
217
  )
218
+ # Different DBOS versions should produce different app versions
219
+ sources.append(GlobalParams.dbos_version)
218
220
  for source in sources:
219
221
  hasher.update(source.encode("utf-8"))
220
222
  return hasher.hexdigest()
@@ -0,0 +1,71 @@
1
+ """consolidate_queues
2
+
3
+ Revision ID: 66478e1b95e5
4
+ Revises: 933e86bdac6a
5
+ Create Date: 2025-05-21 10:14:25.674613
6
+
7
+ """
8
+
9
+ from typing import Sequence, Union
10
+
11
+ import sqlalchemy as sa
12
+ from alembic import op
13
+
14
+ # revision identifiers, used by Alembic.
15
+ revision: str = "66478e1b95e5"
16
+ down_revision: Union[str, None] = "933e86bdac6a"
17
+ branch_labels: Union[str, Sequence[str], None] = None
18
+ depends_on: Union[str, Sequence[str], None] = None
19
+
20
+
21
+ def upgrade() -> None:
22
+ # Add new columns to workflow_status table
23
+ op.add_column(
24
+ "workflow_status",
25
+ sa.Column("started_at_epoch_ms", sa.BigInteger(), nullable=True),
26
+ schema="dbos",
27
+ )
28
+
29
+ op.add_column(
30
+ "workflow_status",
31
+ sa.Column("deduplication_id", sa.Text(), nullable=True),
32
+ schema="dbos",
33
+ )
34
+
35
+ op.add_column(
36
+ "workflow_status",
37
+ sa.Column(
38
+ "priority", sa.Integer(), nullable=False, server_default=sa.text("'0'::int")
39
+ ),
40
+ schema="dbos",
41
+ )
42
+
43
+ # Add unique constraint for deduplication_id
44
+ op.create_unique_constraint(
45
+ "uq_workflow_status_queue_name_dedup_id",
46
+ "workflow_status",
47
+ ["queue_name", "deduplication_id"],
48
+ schema="dbos",
49
+ )
50
+
51
+ # Add index on status field
52
+ op.create_index(
53
+ "workflow_status_status_index", "workflow_status", ["status"], schema="dbos"
54
+ )
55
+
56
+
57
+ def downgrade() -> None:
58
+ # Drop indexes
59
+ op.drop_index(
60
+ "workflow_status_status_index", table_name="workflow_status", schema="dbos"
61
+ )
62
+
63
+ # Drop unique constraint
64
+ op.drop_constraint(
65
+ "uq_workflow_status_queue_name_dedup_id", "workflow_status", schema="dbos"
66
+ )
67
+
68
+ # Drop columns
69
+ op.drop_column("workflow_status", "priority", schema="dbos")
70
+ op.drop_column("workflow_status", "deduplication_id", schema="dbos")
71
+ op.drop_column("workflow_status", "started_at_epoch_ms", schema="dbos")
dbos/_queue.py CHANGED
@@ -5,6 +5,7 @@ from typing import TYPE_CHECKING, Any, Callable, Coroutine, Optional, TypedDict
5
5
  from psycopg import errors
6
6
  from sqlalchemy.exc import OperationalError
7
7
 
8
+ from dbos._context import get_local_dbos_context
8
9
  from dbos._logger import dbos_logger
9
10
  from dbos._utils import GlobalParams
10
11
 
@@ -41,6 +42,7 @@ class Queue:
41
42
  limiter: Optional[QueueRateLimit] = None,
42
43
  *, # Disable positional arguments from here on
43
44
  worker_concurrency: Optional[int] = None,
45
+ priority_enabled: bool = False,
44
46
  ) -> None:
45
47
  if (
46
48
  worker_concurrency is not None
@@ -54,6 +56,7 @@ class Queue:
54
56
  self.concurrency = concurrency
55
57
  self.worker_concurrency = worker_concurrency
56
58
  self.limiter = limiter
59
+ self.priority_enabled = priority_enabled
57
60
  from ._dbos import _get_or_create_dbos_registry
58
61
 
59
62
  registry = _get_or_create_dbos_registry()
@@ -66,6 +69,14 @@ class Queue:
66
69
  ) -> "WorkflowHandle[R]":
67
70
  from ._dbos import _get_dbos_instance
68
71
 
72
+ context = get_local_dbos_context()
73
+ if (
74
+ context is not None
75
+ and context.priority is not None
76
+ and not self.priority_enabled
77
+ ):
78
+ dbos_logger.warning(f"Priority is not enabled for queue {self.name}. Setting priority will not have any effect.")
79
+
69
80
  dbos = _get_dbos_instance()
70
81
  return start_workflow(dbos, func, self.name, False, *args, **kwargs)
71
82
 
@@ -1,6 +1,5 @@
1
1
  from sqlalchemy import (
2
2
  BigInteger,
3
- Boolean,
4
3
  Column,
5
4
  ForeignKey,
6
5
  Index,
@@ -57,8 +56,17 @@ class SystemSchema:
57
56
  Column("queue_name", Text, nullable=True),
58
57
  Column("workflow_timeout_ms", BigInteger, nullable=True),
59
58
  Column("workflow_deadline_epoch_ms", BigInteger, nullable=True),
59
+ Column("started_at_epoch_ms", BigInteger(), nullable=True),
60
+ Column("deduplication_id", Text(), nullable=True),
61
+ Column("priority", Integer(), nullable=False, server_default=text("'0'::int")),
60
62
  Index("workflow_status_created_at_index", "created_at"),
61
63
  Index("workflow_status_executor_id_index", "executor_id"),
64
+ Index("workflow_status_status_index", "status"),
65
+ UniqueConstraint(
66
+ "queue_name",
67
+ "deduplication_id",
68
+ name="uq_workflow_status_queue_name_dedup_id",
69
+ ),
62
70
  )
63
71
 
64
72
  operation_outputs = Table(
@@ -138,54 +146,3 @@ class SystemSchema:
138
146
  Column("value", Text, nullable=False),
139
147
  PrimaryKeyConstraint("workflow_uuid", "key"),
140
148
  )
141
-
142
- scheduler_state = Table(
143
- "scheduler_state",
144
- metadata_obj,
145
- Column("workflow_fn_name", Text, primary_key=True, nullable=False),
146
- Column("last_run_time", BigInteger, nullable=False),
147
- )
148
-
149
- workflow_queue = Table(
150
- "workflow_queue",
151
- metadata_obj,
152
- Column(
153
- "workflow_uuid",
154
- Text,
155
- ForeignKey(
156
- "workflow_status.workflow_uuid", onupdate="CASCADE", ondelete="CASCADE"
157
- ),
158
- nullable=False,
159
- primary_key=True,
160
- ),
161
- # Column("executor_id", Text), # This column is deprecated. Do *not* use it.
162
- Column("queue_name", Text, nullable=False),
163
- Column(
164
- "created_at_epoch_ms",
165
- BigInteger,
166
- nullable=False,
167
- server_default=text("(EXTRACT(epoch FROM now()) * 1000::numeric)::bigint"),
168
- ),
169
- Column(
170
- "started_at_epoch_ms",
171
- BigInteger(),
172
- ),
173
- Column(
174
- "completed_at_epoch_ms",
175
- BigInteger(),
176
- ),
177
- Column(
178
- "deduplication_id",
179
- Text,
180
- nullable=True,
181
- ),
182
- Column(
183
- "priority",
184
- Integer,
185
- nullable=False,
186
- server_default=text("'0'::int"),
187
- ),
188
- UniqueConstraint(
189
- "queue_name", "deduplication_id", name="uq_workflow_queue_name_dedup_id"
190
- ),
191
- )
dbos/_sys_db.py CHANGED
@@ -136,13 +136,17 @@ class WorkflowStatusInternal(TypedDict):
136
136
  # The deadline of a workflow, computed by adding its timeout to its start time.
137
137
  # Deadlines propagate to children. When the deadline is reached, the workflow is cancelled.
138
138
  workflow_deadline_epoch_ms: Optional[int]
139
+ # Unique ID for deduplication on a queue
140
+ deduplication_id: Optional[str]
141
+ # Priority of the workflow on the queue, starting from 1 ~ 2,147,483,647. Default 0 (highest priority).
142
+ priority: int
139
143
 
140
144
 
141
145
  class EnqueueOptionsInternal(TypedDict):
142
- deduplication_id: Optional[str] # Unique ID for deduplication on a queue
143
- priority: Optional[
144
- int
145
- ] # Priority of the workflow on the queue, starting from 1 ~ 2,147,483,647. Default 0 (highest priority).
146
+ # Unique ID for deduplication on a queue
147
+ deduplication_id: Optional[str]
148
+ # Priority of the workflow on the queue, starting from 1 ~ 2,147,483,647. Default 0 (highest priority).
149
+ priority: Optional[int]
146
150
 
147
151
 
148
152
  class RecordedResult(TypedDict):
@@ -456,6 +460,8 @@ class SystemDatabase:
456
460
  ),
457
461
  workflow_timeout_ms=status["workflow_timeout_ms"],
458
462
  workflow_deadline_epoch_ms=status["workflow_deadline_epoch_ms"],
463
+ deduplication_id=status["deduplication_id"],
464
+ priority=status["priority"],
459
465
  )
460
466
  .on_conflict_do_update(
461
467
  index_elements=["workflow_uuid"],
@@ -465,7 +471,18 @@ class SystemDatabase:
465
471
 
466
472
  cmd = cmd.returning(SystemSchema.workflow_status.c.recovery_attempts, SystemSchema.workflow_status.c.status, SystemSchema.workflow_status.c.workflow_deadline_epoch_ms, SystemSchema.workflow_status.c.name, SystemSchema.workflow_status.c.class_name, SystemSchema.workflow_status.c.config_name, SystemSchema.workflow_status.c.queue_name) # type: ignore
467
473
 
468
- results = conn.execute(cmd)
474
+ try:
475
+ results = conn.execute(cmd)
476
+ except DBAPIError as dbapi_error:
477
+ # Unique constraint violation for the deduplication ID
478
+ if dbapi_error.orig.sqlstate == "23505": # type: ignore
479
+ assert status["deduplication_id"] is not None
480
+ assert status["queue_name"] is not None
481
+ raise DBOSQueueDeduplicatedError(
482
+ status["workflow_uuid"],
483
+ status["queue_name"],
484
+ status["deduplication_id"],
485
+ )
469
486
  row = results.fetchone()
470
487
  if row is not None:
471
488
  # Check the started workflow matches the expected name, class_name, config_name, and queue_name
@@ -495,12 +512,6 @@ class SystemDatabase:
495
512
  and max_recovery_attempts is not None
496
513
  and recovery_attempts > max_recovery_attempts + 1
497
514
  ):
498
- delete_cmd = sa.delete(SystemSchema.workflow_queue).where(
499
- SystemSchema.workflow_queue.c.workflow_uuid
500
- == status["workflow_uuid"]
501
- )
502
- conn.execute(delete_cmd)
503
-
504
515
  dlq_cmd = (
505
516
  sa.update(SystemSchema.workflow_status)
506
517
  .where(
@@ -513,6 +524,8 @@ class SystemDatabase:
513
524
  )
514
525
  .values(
515
526
  status=WorkflowStatusString.RETRIES_EXCEEDED.value,
527
+ deduplication_id=None,
528
+ started_at_epoch_ms=None,
516
529
  queue_name=None,
517
530
  )
518
531
  )
@@ -526,44 +539,28 @@ class SystemDatabase:
526
539
  return wf_status, workflow_deadline_epoch_ms
527
540
 
528
541
  @db_retry()
529
- def update_workflow_status(
542
+ def update_workflow_outcome(
530
543
  self,
531
- status: WorkflowStatusInternal,
544
+ workflow_id: str,
545
+ status: WorkflowStatuses,
546
+ *,
547
+ output: Optional[str] = None,
548
+ error: Optional[str] = None,
532
549
  ) -> None:
533
550
  if self._debug_mode:
534
551
  raise Exception("called update_workflow_status in debug mode")
535
- wf_status: WorkflowStatuses = status["status"]
536
552
  with self.engine.begin() as c:
537
553
  c.execute(
538
- pg.insert(SystemSchema.workflow_status)
554
+ sa.update(SystemSchema.workflow_status)
539
555
  .values(
540
- workflow_uuid=status["workflow_uuid"],
541
- status=status["status"],
542
- name=status["name"],
543
- class_name=status["class_name"],
544
- config_name=status["config_name"],
545
- output=status["output"],
546
- error=status["error"],
547
- executor_id=status["executor_id"],
548
- application_version=status["app_version"],
549
- application_id=status["app_id"],
550
- authenticated_user=status["authenticated_user"],
551
- authenticated_roles=status["authenticated_roles"],
552
- assumed_role=status["assumed_role"],
553
- queue_name=status["queue_name"],
554
- recovery_attempts=(
555
- 1 if wf_status != WorkflowStatusString.ENQUEUED.value else 0
556
- ),
557
- )
558
- .on_conflict_do_update(
559
- index_elements=["workflow_uuid"],
560
- set_=dict(
561
- status=status["status"],
562
- output=status["output"],
563
- error=status["error"],
564
- updated_at=func.extract("epoch", func.now()) * 1000,
565
- ),
556
+ status=status,
557
+ output=output,
558
+ error=error,
559
+ # As the workflow is complete, remove its deduplication ID
560
+ deduplication_id=None,
561
+ updated_at=func.extract("epoch", func.now()) * 1000,
566
562
  )
563
+ .where(SystemSchema.workflow_status.c.workflow_uuid == workflow_id)
567
564
  )
568
565
 
569
566
  def cancel_workflow(
@@ -585,18 +582,15 @@ class SystemDatabase:
585
582
  or row[0] == WorkflowStatusString.ERROR.value
586
583
  ):
587
584
  return
588
- # Remove the workflow from the queues table so it does not block the table
589
- c.execute(
590
- sa.delete(SystemSchema.workflow_queue).where(
591
- SystemSchema.workflow_queue.c.workflow_uuid == workflow_id
592
- )
593
- )
594
- # Set the workflow's status to CANCELLED
585
+ # Set the workflow's status to CANCELLED and remove it from any queue it is on
595
586
  c.execute(
596
587
  sa.update(SystemSchema.workflow_status)
597
588
  .where(SystemSchema.workflow_status.c.workflow_uuid == workflow_id)
598
589
  .values(
599
590
  status=WorkflowStatusString.CANCELLED.value,
591
+ queue_name=None,
592
+ deduplication_id=None,
593
+ started_at_epoch_ms=None,
600
594
  )
601
595
  )
602
596
 
@@ -620,19 +614,6 @@ class SystemDatabase:
620
614
  or status == WorkflowStatusString.ERROR.value
621
615
  ):
622
616
  return
623
- # Remove the workflow from the queues table so resume can safely be called on an ENQUEUED workflow
624
- c.execute(
625
- sa.delete(SystemSchema.workflow_queue).where(
626
- SystemSchema.workflow_queue.c.workflow_uuid == workflow_id
627
- )
628
- )
629
- # Enqueue the workflow on the internal queue
630
- c.execute(
631
- pg.insert(SystemSchema.workflow_queue).values(
632
- workflow_uuid=workflow_id,
633
- queue_name=INTERNAL_QUEUE_NAME,
634
- )
635
- )
636
617
  # Set the workflow's status to ENQUEUED and clear its recovery attempts and deadline.
637
618
  c.execute(
638
619
  sa.update(SystemSchema.workflow_status)
@@ -642,6 +623,8 @@ class SystemDatabase:
642
623
  queue_name=INTERNAL_QUEUE_NAME,
643
624
  recovery_attempts=0,
644
625
  workflow_deadline_epoch_ms=None,
626
+ deduplication_id=None,
627
+ started_at_epoch_ms=None,
645
628
  )
646
629
  )
647
630
 
@@ -720,14 +703,6 @@ class SystemDatabase:
720
703
  )
721
704
 
722
705
  c.execute(insert_stmt)
723
-
724
- # Enqueue the forked workflow on the internal queue
725
- c.execute(
726
- pg.insert(SystemSchema.workflow_queue).values(
727
- workflow_uuid=forked_workflow_id,
728
- queue_name=INTERNAL_QUEUE_NAME,
729
- )
730
- )
731
706
  return forked_workflow_id
732
707
 
733
708
  @db_retry()
@@ -753,6 +728,8 @@ class SystemDatabase:
753
728
  SystemSchema.workflow_status.c.application_id,
754
729
  SystemSchema.workflow_status.c.workflow_deadline_epoch_ms,
755
730
  SystemSchema.workflow_status.c.workflow_timeout_ms,
731
+ SystemSchema.workflow_status.c.deduplication_id,
732
+ SystemSchema.workflow_status.c.priority,
756
733
  ).where(SystemSchema.workflow_status.c.workflow_uuid == workflow_uuid)
757
734
  ).fetchone()
758
735
  if row is None:
@@ -777,6 +754,8 @@ class SystemDatabase:
777
754
  "app_id": row[13],
778
755
  "workflow_deadline_epoch_ms": row[14],
779
756
  "workflow_timeout_ms": row[15],
757
+ "deduplication_id": row[16],
758
+ "priority": row[17],
780
759
  }
781
760
  return status
782
761
 
@@ -972,37 +951,40 @@ class SystemDatabase:
972
951
  """
973
952
  Retrieve a list of queued workflows result and inputs based on the input criteria. The result is a list of external-facing workflow status objects.
974
953
  """
975
- query = sa.select(
976
- SystemSchema.workflow_status.c.workflow_uuid,
977
- SystemSchema.workflow_status.c.status,
978
- SystemSchema.workflow_status.c.name,
979
- SystemSchema.workflow_status.c.recovery_attempts,
980
- SystemSchema.workflow_status.c.config_name,
981
- SystemSchema.workflow_status.c.class_name,
982
- SystemSchema.workflow_status.c.authenticated_user,
983
- SystemSchema.workflow_status.c.authenticated_roles,
984
- SystemSchema.workflow_status.c.assumed_role,
985
- SystemSchema.workflow_status.c.queue_name,
986
- SystemSchema.workflow_status.c.executor_id,
987
- SystemSchema.workflow_status.c.created_at,
988
- SystemSchema.workflow_status.c.updated_at,
989
- SystemSchema.workflow_status.c.application_version,
990
- SystemSchema.workflow_status.c.application_id,
991
- SystemSchema.workflow_inputs.c.inputs,
992
- SystemSchema.workflow_status.c.output,
993
- SystemSchema.workflow_status.c.error,
994
- SystemSchema.workflow_status.c.workflow_deadline_epoch_ms,
995
- SystemSchema.workflow_status.c.workflow_timeout_ms,
996
- ).select_from(
997
- SystemSchema.workflow_queue.join(
998
- SystemSchema.workflow_status,
999
- SystemSchema.workflow_queue.c.workflow_uuid
1000
- == SystemSchema.workflow_status.c.workflow_uuid,
1001
- ).join(
954
+ query = (
955
+ sa.select(
956
+ SystemSchema.workflow_status.c.workflow_uuid,
957
+ SystemSchema.workflow_status.c.status,
958
+ SystemSchema.workflow_status.c.name,
959
+ SystemSchema.workflow_status.c.recovery_attempts,
960
+ SystemSchema.workflow_status.c.config_name,
961
+ SystemSchema.workflow_status.c.class_name,
962
+ SystemSchema.workflow_status.c.authenticated_user,
963
+ SystemSchema.workflow_status.c.authenticated_roles,
964
+ SystemSchema.workflow_status.c.assumed_role,
965
+ SystemSchema.workflow_status.c.queue_name,
966
+ SystemSchema.workflow_status.c.executor_id,
967
+ SystemSchema.workflow_status.c.created_at,
968
+ SystemSchema.workflow_status.c.updated_at,
969
+ SystemSchema.workflow_status.c.application_version,
970
+ SystemSchema.workflow_status.c.application_id,
971
+ SystemSchema.workflow_inputs.c.inputs,
972
+ SystemSchema.workflow_status.c.output,
973
+ SystemSchema.workflow_status.c.error,
974
+ SystemSchema.workflow_status.c.workflow_deadline_epoch_ms,
975
+ SystemSchema.workflow_status.c.workflow_timeout_ms,
976
+ )
977
+ .join(
1002
978
  SystemSchema.workflow_inputs,
1003
- SystemSchema.workflow_queue.c.workflow_uuid
979
+ SystemSchema.workflow_status.c.workflow_uuid
1004
980
  == SystemSchema.workflow_inputs.c.workflow_uuid,
1005
981
  )
982
+ .where(
983
+ sa.and_(
984
+ SystemSchema.workflow_status.c.queue_name.isnot(None),
985
+ SystemSchema.workflow_status.c.status.in_(["ENQUEUED", "PENDING"]),
986
+ )
987
+ )
1006
988
  )
1007
989
  if input["sort_desc"]:
1008
990
  query = query.order_by(SystemSchema.workflow_status.c.created_at.desc())
@@ -1014,7 +996,7 @@ class SystemDatabase:
1014
996
 
1015
997
  if input.get("queue_name"):
1016
998
  query = query.where(
1017
- SystemSchema.workflow_queue.c.queue_name == input["queue_name"]
999
+ SystemSchema.workflow_status.c.queue_name == input["queue_name"]
1018
1000
  )
1019
1001
 
1020
1002
  if input.get("status"):
@@ -1693,51 +1675,6 @@ class SystemDatabase:
1693
1675
  )
1694
1676
  return value
1695
1677
 
1696
- def _enqueue(
1697
- self,
1698
- workflow_id: str,
1699
- queue_name: str,
1700
- conn: sa.Connection,
1701
- *,
1702
- enqueue_options: Optional[EnqueueOptionsInternal],
1703
- ) -> None:
1704
- if self._debug_mode:
1705
- raise Exception("called enqueue in debug mode")
1706
- try:
1707
- deduplication_id = (
1708
- enqueue_options["deduplication_id"]
1709
- if enqueue_options is not None
1710
- else None
1711
- )
1712
- priority = (
1713
- enqueue_options["priority"] if enqueue_options is not None else None
1714
- )
1715
- # Default to 0 (highest priority) if not provided
1716
- if priority is None:
1717
- priority = 0
1718
- query = (
1719
- pg.insert(SystemSchema.workflow_queue)
1720
- .values(
1721
- workflow_uuid=workflow_id,
1722
- queue_name=queue_name,
1723
- deduplication_id=deduplication_id,
1724
- priority=priority,
1725
- )
1726
- .on_conflict_do_nothing(
1727
- index_elements=SystemSchema.workflow_queue.primary_key.columns
1728
- )
1729
- ) # Ignore primary key constraint violation
1730
- conn.execute(query)
1731
- except DBAPIError as dbapi_error:
1732
- # Unique constraint violation for the deduplication ID
1733
- if dbapi_error.orig.sqlstate == "23505": # type: ignore
1734
- assert (
1735
- deduplication_id is not None
1736
- ), f"deduplication_id should not be None. Workflow ID: {workflow_id}, Queue name: {queue_name}."
1737
- raise DBOSQueueDeduplicatedError(
1738
- workflow_id, queue_name, deduplication_id
1739
- )
1740
-
1741
1678
  def start_queued_workflows(
1742
1679
  self, queue: "Queue", executor_id: str, app_version: str
1743
1680
  ) -> List[str]:
@@ -1755,13 +1692,14 @@ class SystemDatabase:
1755
1692
  if queue.limiter is not None:
1756
1693
  query = (
1757
1694
  sa.select(sa.func.count())
1758
- .select_from(SystemSchema.workflow_queue)
1759
- .where(SystemSchema.workflow_queue.c.queue_name == queue.name)
1695
+ .select_from(SystemSchema.workflow_status)
1696
+ .where(SystemSchema.workflow_status.c.queue_name == queue.name)
1760
1697
  .where(
1761
- SystemSchema.workflow_queue.c.started_at_epoch_ms.isnot(None)
1698
+ SystemSchema.workflow_status.c.status
1699
+ != WorkflowStatusString.ENQUEUED.value
1762
1700
  )
1763
1701
  .where(
1764
- SystemSchema.workflow_queue.c.started_at_epoch_ms
1702
+ SystemSchema.workflow_status.c.started_at_epoch_ms
1765
1703
  > start_time_ms - limiter_period_ms
1766
1704
  )
1767
1705
  )
@@ -1769,64 +1707,57 @@ class SystemDatabase:
1769
1707
  if num_recent_queries >= queue.limiter["limit"]:
1770
1708
  return []
1771
1709
 
1772
- # Count how many workflows on this queue are currently PENDING both locally and globally.
1773
- pending_tasks_query = (
1774
- sa.select(
1775
- SystemSchema.workflow_status.c.executor_id,
1776
- sa.func.count().label("task_count"),
1777
- )
1778
- .select_from(
1779
- SystemSchema.workflow_queue.join(
1780
- SystemSchema.workflow_status,
1781
- SystemSchema.workflow_queue.c.workflow_uuid
1782
- == SystemSchema.workflow_status.c.workflow_uuid,
1783
- )
1784
- )
1785
- .where(SystemSchema.workflow_queue.c.queue_name == queue.name)
1786
- .where(
1787
- SystemSchema.workflow_status.c.status
1788
- == WorkflowStatusString.PENDING.value
1789
- )
1790
- .group_by(SystemSchema.workflow_status.c.executor_id)
1791
- )
1792
- pending_workflows = c.execute(pending_tasks_query).fetchall()
1793
- pending_workflows_dict = {row[0]: row[1] for row in pending_workflows}
1794
- local_pending_workflows = pending_workflows_dict.get(executor_id, 0)
1795
-
1796
1710
  # Compute max_tasks, the number of workflows that can be dequeued given local and global concurrency limits,
1797
1711
  max_tasks = float("inf")
1798
- if queue.worker_concurrency is not None:
1799
- # Print a warning if the local concurrency limit is violated
1800
- if local_pending_workflows > queue.worker_concurrency:
1801
- dbos_logger.warning(
1802
- f"The number of local pending workflows ({local_pending_workflows}) on queue {queue.name} exceeds the local concurrency limit ({queue.worker_concurrency})"
1712
+ if queue.worker_concurrency is not None or queue.concurrency is not None:
1713
+ # Count how many workflows on this queue are currently PENDING both locally and globally.
1714
+ pending_tasks_query = (
1715
+ sa.select(
1716
+ SystemSchema.workflow_status.c.executor_id,
1717
+ sa.func.count().label("task_count"),
1718
+ )
1719
+ .select_from(SystemSchema.workflow_status)
1720
+ .where(SystemSchema.workflow_status.c.queue_name == queue.name)
1721
+ .where(
1722
+ SystemSchema.workflow_status.c.status
1723
+ == WorkflowStatusString.PENDING.value
1724
+ )
1725
+ .group_by(SystemSchema.workflow_status.c.executor_id)
1726
+ )
1727
+ pending_workflows = c.execute(pending_tasks_query).fetchall()
1728
+ pending_workflows_dict = {row[0]: row[1] for row in pending_workflows}
1729
+ local_pending_workflows = pending_workflows_dict.get(executor_id, 0)
1730
+
1731
+ if queue.worker_concurrency is not None:
1732
+ # Print a warning if the local concurrency limit is violated
1733
+ if local_pending_workflows > queue.worker_concurrency:
1734
+ dbos_logger.warning(
1735
+ f"The number of local pending workflows ({local_pending_workflows}) on queue {queue.name} exceeds the local concurrency limit ({queue.worker_concurrency})"
1736
+ )
1737
+ max_tasks = max(
1738
+ 0, queue.worker_concurrency - local_pending_workflows
1803
1739
  )
1804
- max_tasks = max(0, queue.worker_concurrency - local_pending_workflows)
1805
1740
 
1806
- if queue.concurrency is not None:
1807
- global_pending_workflows = sum(pending_workflows_dict.values())
1808
- # Print a warning if the global concurrency limit is violated
1809
- if global_pending_workflows > queue.concurrency:
1810
- dbos_logger.warning(
1811
- f"The total number of pending workflows ({global_pending_workflows}) on queue {queue.name} exceeds the global concurrency limit ({queue.concurrency})"
1741
+ if queue.concurrency is not None:
1742
+ global_pending_workflows = sum(pending_workflows_dict.values())
1743
+ # Print a warning if the global concurrency limit is violated
1744
+ if global_pending_workflows > queue.concurrency:
1745
+ dbos_logger.warning(
1746
+ f"The total number of pending workflows ({global_pending_workflows}) on queue {queue.name} exceeds the global concurrency limit ({queue.concurrency})"
1747
+ )
1748
+ available_tasks = max(
1749
+ 0, queue.concurrency - global_pending_workflows
1812
1750
  )
1813
- available_tasks = max(0, queue.concurrency - global_pending_workflows)
1814
- max_tasks = min(max_tasks, available_tasks)
1751
+ max_tasks = min(max_tasks, available_tasks)
1815
1752
 
1816
1753
  # Retrieve the first max_tasks workflows in the queue.
1817
1754
  # Only retrieve workflows of the local version (or without version set)
1818
1755
  query = (
1819
1756
  sa.select(
1820
- SystemSchema.workflow_queue.c.workflow_uuid,
1821
- )
1822
- .select_from(
1823
- SystemSchema.workflow_queue.join(
1824
- SystemSchema.workflow_status,
1825
- SystemSchema.workflow_queue.c.workflow_uuid
1826
- == SystemSchema.workflow_status.c.workflow_uuid,
1827
- )
1757
+ SystemSchema.workflow_status.c.workflow_uuid,
1828
1758
  )
1829
- .where(SystemSchema.workflow_queue.c.queue_name == queue.name)
1759
+ .select_from(SystemSchema.workflow_status)
1760
+ .where(SystemSchema.workflow_status.c.queue_name == queue.name)
1830
1761
  .where(
1831
1762
  SystemSchema.workflow_status.c.status
1832
1763
  == WorkflowStatusString.ENQUEUED.value
@@ -1838,12 +1769,15 @@ class SystemDatabase:
1838
1769
  SystemSchema.workflow_status.c.application_version.is_(None),
1839
1770
  )
1840
1771
  )
1841
- .order_by(
1842
- SystemSchema.workflow_queue.c.priority.asc(),
1843
- SystemSchema.workflow_queue.c.created_at_epoch_ms.asc(),
1844
- )
1845
1772
  .with_for_update(nowait=True) # Error out early
1846
1773
  )
1774
+ if queue.priority_enabled:
1775
+ query = query.order_by(
1776
+ SystemSchema.workflow_status.c.priority.asc(),
1777
+ SystemSchema.workflow_status.c.created_at.asc(),
1778
+ )
1779
+ else:
1780
+ query = query.order_by(SystemSchema.workflow_status.c.created_at.asc())
1847
1781
  # Apply limit only if max_tasks is finite
1848
1782
  if max_tasks != float("inf"):
1849
1783
  query = query.limit(int(max_tasks))
@@ -1873,6 +1807,7 @@ class SystemDatabase:
1873
1807
  status=WorkflowStatusString.PENDING.value,
1874
1808
  application_version=app_version,
1875
1809
  executor_id=executor_id,
1810
+ started_at_epoch_ms=start_time_ms,
1876
1811
  # If a timeout is set, set the deadline on dequeue
1877
1812
  workflow_deadline_epoch_ms=sa.case(
1878
1813
  (
@@ -1892,82 +1827,31 @@ class SystemDatabase:
1892
1827
  )
1893
1828
  )
1894
1829
  # Then give it a start time
1895
- c.execute(
1896
- SystemSchema.workflow_queue.update()
1897
- .where(SystemSchema.workflow_queue.c.workflow_uuid == id)
1898
- .values(started_at_epoch_ms=start_time_ms)
1899
- )
1900
1830
  ret_ids.append(id)
1901
1831
 
1902
- # If we have a limiter, garbage-collect all completed workflows started
1903
- # before the period. If there's no limiter, there's no need--they were
1904
- # deleted on completion.
1905
- if queue.limiter is not None:
1906
- c.execute(
1907
- sa.delete(SystemSchema.workflow_queue)
1908
- .where(SystemSchema.workflow_queue.c.completed_at_epoch_ms != None)
1909
- .where(SystemSchema.workflow_queue.c.queue_name == queue.name)
1910
- .where(
1911
- SystemSchema.workflow_queue.c.started_at_epoch_ms
1912
- < start_time_ms - limiter_period_ms
1913
- )
1914
- )
1915
-
1916
1832
  # Return the IDs of all functions we started
1917
1833
  return ret_ids
1918
1834
 
1919
- @db_retry()
1920
- def remove_from_queue(self, workflow_id: str, queue: "Queue") -> None:
1921
- if self._debug_mode:
1922
- raise Exception("called remove_from_queue in debug mode")
1923
-
1924
- with self.engine.begin() as c:
1925
- if queue.limiter is None:
1926
- c.execute(
1927
- sa.delete(SystemSchema.workflow_queue).where(
1928
- SystemSchema.workflow_queue.c.workflow_uuid == workflow_id
1929
- )
1930
- )
1931
- else:
1932
- c.execute(
1933
- sa.update(SystemSchema.workflow_queue)
1934
- .where(SystemSchema.workflow_queue.c.workflow_uuid == workflow_id)
1935
- .values(completed_at_epoch_ms=int(time.time() * 1000))
1936
- )
1937
-
1938
1835
  def clear_queue_assignment(self, workflow_id: str) -> bool:
1939
1836
  if self._debug_mode:
1940
1837
  raise Exception("called clear_queue_assignment in debug mode")
1941
1838
 
1942
- with self.engine.connect() as conn:
1943
- with conn.begin() as transaction:
1944
- # Reset the start time in the queue to mark it as not started
1945
- res = conn.execute(
1946
- sa.update(SystemSchema.workflow_queue)
1947
- .where(SystemSchema.workflow_queue.c.workflow_uuid == workflow_id)
1948
- .where(
1949
- SystemSchema.workflow_queue.c.completed_at_epoch_ms.is_(None)
1950
- )
1951
- .values(started_at_epoch_ms=None)
1839
+ with self.engine.begin() as c:
1840
+ # Reset the status of the task to "ENQUEUED"
1841
+ res = c.execute(
1842
+ sa.update(SystemSchema.workflow_status)
1843
+ .where(SystemSchema.workflow_status.c.workflow_uuid == workflow_id)
1844
+ .where(SystemSchema.workflow_status.c.queue_name.isnot(None))
1845
+ .where(
1846
+ SystemSchema.workflow_status.c.status
1847
+ == WorkflowStatusString.PENDING.value
1952
1848
  )
1953
-
1954
- # If no rows were affected, the workflow is not anymore in the queue or was already completed
1955
- if res.rowcount == 0:
1956
- transaction.rollback()
1957
- return False
1958
-
1959
- # Reset the status of the task to "ENQUEUED"
1960
- res = conn.execute(
1961
- sa.update(SystemSchema.workflow_status)
1962
- .where(SystemSchema.workflow_status.c.workflow_uuid == workflow_id)
1963
- .values(status=WorkflowStatusString.ENQUEUED.value)
1849
+ .values(
1850
+ status=WorkflowStatusString.ENQUEUED.value, started_at_epoch_ms=None
1964
1851
  )
1965
- if res.rowcount == 0:
1966
- # This should never happen
1967
- raise Exception(
1968
- f"UNREACHABLE: Workflow {workflow_id} is found in the workflow_queue table but not found in the workflow_status table"
1969
- )
1970
- return True
1852
+ )
1853
+ # If no rows were affected, the workflow is not anymore in the queue or was already completed
1854
+ return res.rowcount > 0
1971
1855
 
1972
1856
  T = TypeVar("T")
1973
1857
 
@@ -2012,7 +1896,6 @@ class SystemDatabase:
2012
1896
  inputs: str,
2013
1897
  *,
2014
1898
  max_recovery_attempts: Optional[int],
2015
- enqueue_options: Optional[EnqueueOptionsInternal],
2016
1899
  ) -> tuple[WorkflowStatuses, Optional[int]]:
2017
1900
  """
2018
1901
  Synchronously record the status and inputs for workflows in a single transaction
@@ -2021,19 +1904,7 @@ class SystemDatabase:
2021
1904
  wf_status, workflow_deadline_epoch_ms = self._insert_workflow_status(
2022
1905
  status, conn, max_recovery_attempts=max_recovery_attempts
2023
1906
  )
2024
- # TODO: Modify the inputs if they were changed by `update_workflow_inputs`
2025
1907
  self._update_workflow_inputs(status["workflow_uuid"], inputs, conn)
2026
-
2027
- if (
2028
- status["queue_name"] is not None
2029
- and wf_status == WorkflowStatusString.ENQUEUED.value
2030
- ):
2031
- self._enqueue(
2032
- status["workflow_uuid"],
2033
- status["queue_name"],
2034
- conn,
2035
- enqueue_options=enqueue_options,
2036
- )
2037
1908
  return wf_status, workflow_deadline_epoch_ms
2038
1909
 
2039
1910
  def check_connection(self) -> None:
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: dbos
3
- Version: 1.3.0a2
3
+ Version: 1.3.0a4
4
4
  Summary: Ultra-lightweight durable execution in Python
5
5
  Author-Email: "DBOS, Inc." <contact@dbos.dev>
6
6
  License: MIT
@@ -1,19 +1,19 @@
1
- dbos-1.3.0a2.dist-info/METADATA,sha256=FLGWD6E0OqgjOJ8RnpSPGTK9sAi0s4F_7_I_vfu51KE,13267
2
- dbos-1.3.0a2.dist-info/WHEEL,sha256=tSfRZzRHthuv7vxpI4aehrdN9scLjk-dCJkPLzkHxGg,90
3
- dbos-1.3.0a2.dist-info/entry_points.txt,sha256=_QOQ3tVfEjtjBlr1jS4sHqHya9lI2aIEIWkz8dqYp14,58
4
- dbos-1.3.0a2.dist-info/licenses/LICENSE,sha256=VGZit_a5-kdw9WT6fY5jxAWVwGQzgLFyPWrcVVUhVNU,1067
1
+ dbos-1.3.0a4.dist-info/METADATA,sha256=SxzfQt29v1tlpqECaOISk3fOPzN9vCsagpgQ5n3TqwM,13267
2
+ dbos-1.3.0a4.dist-info/WHEEL,sha256=tSfRZzRHthuv7vxpI4aehrdN9scLjk-dCJkPLzkHxGg,90
3
+ dbos-1.3.0a4.dist-info/entry_points.txt,sha256=_QOQ3tVfEjtjBlr1jS4sHqHya9lI2aIEIWkz8dqYp14,58
4
+ dbos-1.3.0a4.dist-info/licenses/LICENSE,sha256=VGZit_a5-kdw9WT6fY5jxAWVwGQzgLFyPWrcVVUhVNU,1067
5
5
  dbos/__init__.py,sha256=NssPCubaBxdiKarOWa-wViz1hdJSkmBGcpLX_gQ4NeA,891
6
6
  dbos/__main__.py,sha256=G7Exn-MhGrVJVDbgNlpzhfh8WMX_72t3_oJaFT9Lmt8,653
7
7
  dbos/_admin_server.py,sha256=TWXi4drrzKFpKkUmEJpJkQBZxAtOalnhtYicEn2nDK0,10618
8
8
  dbos/_app_db.py,sha256=0PKqpxJ3EbIaak3Wl0lNl3hXvhBfz4EEHaCw1bUOvIM,9937
9
9
  dbos/_classproperty.py,sha256=f0X-_BySzn3yFDRKB2JpCbLYQ9tLwt1XftfshvY7CBs,626
10
- dbos/_client.py,sha256=BZ5mROMnHrWyMsMj8gYCfey79Zc4eZp1Srlrgel485o,14302
10
+ dbos/_client.py,sha256=jrvbv33KMkoACD4-wE72a6-Mp2fx0tagh_MnHY93Apk,14576
11
11
  dbos/_conductor/conductor.py,sha256=o0IaZjwnZ2TOyHeP2H4iSX6UnXLXQ4uODvWAKD9hHMs,21703
12
12
  dbos/_conductor/protocol.py,sha256=wgOFZxmS81bv0WCB9dAyg0s6QzldpzVKQDoSPeaX0Ws,6967
13
13
  dbos/_context.py,sha256=5ajoWAmToAfzzmMLylnJZoL4Ny9rBwZWuG05sXadMIA,24798
14
- dbos/_core.py,sha256=m2i9lsHjNKTi8BQyiSOUBrAVH5OvMoBswNZPRpMVIC0,48662
14
+ dbos/_core.py,sha256=-yhsWn5TqDEDsIE_fY1O5qxu295kYFnz8IJnS9luhTE,48656
15
15
  dbos/_croniter.py,sha256=XHAyUyibs_59sJQfSNWkP7rqQY6_XrlfuuCxk4jYqek,47559
16
- dbos/_dbos.py,sha256=ccVC97MgJr979z9sc0HhVLrdi1BJ9GzaMJSKFiPyKck,47041
16
+ dbos/_dbos.py,sha256=Y---ozwd9OXzA1o-oWEbnafQkUibblE6o1kUuVZa_90,47163
17
17
  dbos/_dbos_config.py,sha256=JWVuPE_Ifyr-pYHFxclFalB_HZ8ETFCGNJzBHGpClXw,20347
18
18
  dbos/_debug.py,sha256=MNlQVZ6TscGCRQeEEL0VE8Uignvr6dPeDDDefS3xgIE,1823
19
19
  dbos/_docker_pg_helper.py,sha256=tLJXWqZ4S-ExcaPnxg_i6cVxL6ZxrYlZjaGsklY-s2I,6115
@@ -30,6 +30,7 @@ dbos/_migrations/versions/04ca4f231047_workflow_queues_executor_id.py,sha256=ICL
30
30
  dbos/_migrations/versions/27ac6900c6ad_add_queue_dedup.py,sha256=56w1v6TdofW3V18iwm0MP0SAeSaAUPSS40HIcn6qYIE,1072
31
31
  dbos/_migrations/versions/50f3227f0b4b_fix_job_queue.py,sha256=ZBYrtTdxy64HxIAlOes89fVIk2P1gNaJack7wuC_epg,873
32
32
  dbos/_migrations/versions/5c361fc04708_added_system_tables.py,sha256=Xr9hBDJjkAtymlauOmAy00yUHj0VVUaEz7kNwEM9IwE,6403
33
+ dbos/_migrations/versions/66478e1b95e5_consolidate_queues.py,sha256=Qo9C8pFSdN0GPM0fN-DI5GPRegXq99Mig2me04IXfLI,1894
33
34
  dbos/_migrations/versions/83f3732ae8e7_workflow_timeout.py,sha256=Q_R35pb8AfVI3sg5mzKwyoPfYB88Ychcc8gwxpM9R7A,1035
34
35
  dbos/_migrations/versions/933e86bdac6a_add_queue_priority.py,sha256=yZX2kGF33skpXIBdMXtDNx-Nl_orFatKeHB8c-3K8-c,773
35
36
  dbos/_migrations/versions/a3b18ad34abe_added_triggers.py,sha256=Rv0ZsZYZ_WdgGEULYsPfnp4YzaO5L198gDTgYY39AVA,2022
@@ -38,16 +39,16 @@ dbos/_migrations/versions/d76646551a6c_workflow_queue.py,sha256=G942nophZ2uC2vc4
38
39
  dbos/_migrations/versions/eab0cc1d9a14_job_queue.py,sha256=uvhFOtqbBreCePhAxZfIT0qCAI7BiZTou9wt6QnbY7c,1412
39
40
  dbos/_migrations/versions/f4b9b32ba814_functionname_childid_op_outputs.py,sha256=m90Lc5YH0ZISSq1MyxND6oq3RZrZKrIqEsZtwJ1jWxA,1049
40
41
  dbos/_outcome.py,sha256=EXxBg4jXCVJsByDQ1VOCIedmbeq_03S6d-p1vqQrLFU,6810
41
- dbos/_queue.py,sha256=oDQcydDwYM68U5KQKN6iZiSC-4LXye6KFmSJ7ohG048,3558
42
+ dbos/_queue.py,sha256=w3vjOqU0NNdQlUFcvhhdRIzepcS2gtWJCgzobR1dJ8U,4009
42
43
  dbos/_recovery.py,sha256=jVMexjfCCNopzyn8gVQzJCmGJaP9G3C1EFaoCQ_Nh7g,2564
43
44
  dbos/_registrations.py,sha256=CZt1ElqDjCT7hz6iyT-1av76Yu-iuwu_c9lozO87wvM,7303
44
45
  dbos/_roles.py,sha256=iOsgmIAf1XVzxs3gYWdGRe1B880YfOw5fpU7Jwx8_A8,2271
45
46
  dbos/_scheduler.py,sha256=SR1oRZRcVzYsj-JauV2LA8JtwTkt8mru7qf6H1AzQ1U,2027
46
47
  dbos/_schemas/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
47
48
  dbos/_schemas/application_database.py,sha256=SypAS9l9EsaBHFn9FR8jmnqt01M74d9AF1AMa4m2hhI,1040
48
- dbos/_schemas/system_database.py,sha256=3Z0L72bOgHnusK1hBaETWU9RfiLBP0QnS-fdu41i0yY,5835
49
+ dbos/_schemas/system_database.py,sha256=5ebwz_h-Yz11u_eYbs8Cy8YinDpxvyiw8mEc0qjR5bo,4796
49
50
  dbos/_serialization.py,sha256=bWuwhXSQcGmiazvhJHA5gwhrRWxtmFmcCFQSDJnqqkU,3666
50
- dbos/_sys_db.py,sha256=KP2wIoytbtFoNzx3-SmobkAn3HhE6tZy_9Vvh8qpUQ8,85780
51
+ dbos/_sys_db.py,sha256=wAmqy4oznS3BkG-Q0K0PnpodrecuUSP9UyXGQbTYkk4,80429
51
52
  dbos/_templates/dbos-db-starter/README.md,sha256=GhxhBj42wjTt1fWEtwNriHbJuKb66Vzu89G4pxNHw2g,930
52
53
  dbos/_templates/dbos-db-starter/__package/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
53
54
  dbos/_templates/dbos-db-starter/__package/main.py.dbos,sha256=aQnBPSSQpkB8ERfhf7gB7P9tsU6OPKhZscfeh0yiaD8,2702
@@ -67,4 +68,4 @@ dbos/cli/cli.py,sha256=EemOMqNpzSU2BQhAxV_e59pBRITDLwt49HF6W3uWBZg,20775
67
68
  dbos/dbos-config.schema.json,sha256=CjaspeYmOkx6Ip_pcxtmfXJTn_YGdSx_0pcPBF7KZmo,6060
68
69
  dbos/py.typed,sha256=QfzXT1Ktfk3Rj84akygc7_42z0lRpCq0Ilh8OXI6Zas,44
69
70
  version/__init__.py,sha256=L4sNxecRuqdtSFdpUGX3TtBi9KL3k7YsZVIvv-fv9-A,1678
70
- dbos-1.3.0a2.dist-info/RECORD,,
71
+ dbos-1.3.0a4.dist-info/RECORD,,
File without changes