dbos 0.26.0a15__py3-none-any.whl → 0.26.0a19__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
dbos/_app_db.py CHANGED
@@ -74,9 +74,12 @@ class ApplicationDatabase:
74
74
  database["connectionTimeoutMillis"] / 1000
75
75
  )
76
76
 
77
+ pool_size = database.get("app_db_pool_size")
78
+ if pool_size is None:
79
+ pool_size = 20
77
80
  self.engine = sa.create_engine(
78
81
  app_db_url,
79
- pool_size=database["app_db_pool_size"],
82
+ pool_size=pool_size,
80
83
  max_overflow=0,
81
84
  pool_timeout=30,
82
85
  connect_args=connect_args,
dbos/_client.py CHANGED
@@ -3,6 +3,8 @@ import sys
3
3
  import uuid
4
4
  from typing import Any, Generic, List, Optional, TypedDict, TypeVar
5
5
 
6
+ from dbos._app_db import ApplicationDatabase
7
+
6
8
  if sys.version_info < (3, 11):
7
9
  from typing_extensions import NotRequired
8
10
  else:
@@ -14,11 +16,18 @@ from dbos._dbos_config import parse_database_url_to_dbconfig
14
16
  from dbos._error import DBOSNonExistentWorkflowError
15
17
  from dbos._registrations import DEFAULT_MAX_RECOVERY_ATTEMPTS
16
18
  from dbos._serialization import WorkflowInputs
17
- from dbos._sys_db import SystemDatabase, WorkflowStatusInternal, WorkflowStatusString
19
+ from dbos._sys_db import (
20
+ StepInfo,
21
+ SystemDatabase,
22
+ WorkflowStatusInternal,
23
+ WorkflowStatusString,
24
+ )
18
25
  from dbos._workflow_commands import (
19
26
  WorkflowStatus,
27
+ fork_workflow,
20
28
  get_workflow,
21
29
  list_queued_workflows,
30
+ list_workflow_steps,
22
31
  list_workflows,
23
32
  )
24
33
 
@@ -82,6 +91,7 @@ class DBOSClient:
82
91
  if system_database is not None:
83
92
  db_config["sys_db_name"] = system_database
84
93
  self._sys_db = SystemDatabase(db_config)
94
+ self._app_db = ApplicationDatabase(db_config)
85
95
 
86
96
  def destroy(self) -> None:
87
97
  self._sys_db.destroy()
@@ -124,12 +134,7 @@ class DBOSClient:
124
134
  "kwargs": kwargs,
125
135
  }
126
136
 
127
- wf_status = self._sys_db.insert_workflow_status(status)
128
- self._sys_db.update_workflow_inputs(
129
- workflow_id, _serialization.serialize_args(inputs)
130
- )
131
- if wf_status == WorkflowStatusString.ENQUEUED.value:
132
- self._sys_db.enqueue(workflow_id, queue_name)
137
+ self._sys_db.init_workflow(status, _serialization.serialize_args(inputs))
133
138
  return workflow_id
134
139
 
135
140
  def enqueue(
@@ -184,7 +189,8 @@ class DBOSClient:
184
189
  "app_id": None,
185
190
  "app_version": None,
186
191
  }
187
- self._sys_db.insert_workflow_status(status)
192
+ with self._sys_db.engine.begin() as conn:
193
+ self._sys_db.insert_workflow_status(status, conn)
188
194
  self._sys_db.send(status["workflow_uuid"], 0, destination_id, message, topic)
189
195
 
190
196
  async def send_async(
@@ -233,6 +239,7 @@ class DBOSClient:
233
239
  limit: Optional[int] = None,
234
240
  offset: Optional[int] = None,
235
241
  sort_desc: bool = False,
242
+ workflow_id_prefix: Optional[str] = None,
236
243
  ) -> List[WorkflowStatus]:
237
244
  return list_workflows(
238
245
  self._sys_db,
@@ -246,6 +253,7 @@ class DBOSClient:
246
253
  limit=limit,
247
254
  offset=offset,
248
255
  sort_desc=sort_desc,
256
+ workflow_id_prefix=workflow_id_prefix,
249
257
  )
250
258
 
251
259
  async def list_workflows_async(
@@ -323,3 +331,23 @@ class DBOSClient:
323
331
  offset=offset,
324
332
  sort_desc=sort_desc,
325
333
  )
334
+
335
+ def list_workflow_steps(self, workflow_id: str) -> List[StepInfo]:
336
+ return list_workflow_steps(self._sys_db, self._app_db, workflow_id)
337
+
338
+ async def list_workflow_steps_async(self, workflow_id: str) -> List[StepInfo]:
339
+ return await asyncio.to_thread(self.list_workflow_steps, workflow_id)
340
+
341
+ def fork_workflow(self, workflow_id: str, start_step: int) -> WorkflowHandle[R]:
342
+ forked_workflow_id = fork_workflow(
343
+ self._sys_db, self._app_db, workflow_id, start_step
344
+ )
345
+ return WorkflowHandleClientPolling[R](forked_workflow_id, self._sys_db)
346
+
347
+ async def fork_workflow_async(
348
+ self, workflow_id: str, start_step: int
349
+ ) -> WorkflowHandleAsync[R]:
350
+ forked_workflow_id = await asyncio.to_thread(
351
+ fork_workflow, self._sys_db, self._app_db, workflow_id, start_step
352
+ )
353
+ return WorkflowHandleClientAsyncPolling[R](forked_workflow_id, self._sys_db)
dbos/_core.py CHANGED
@@ -280,18 +280,12 @@ def _init_workflow(
280
280
  raise DBOSNonExistentWorkflowError(wfid)
281
281
  wf_status = get_status_result["status"]
282
282
  else:
283
- # Synchronously record the status and inputs for workflows
284
- # TODO: Make this transactional (and with the queue step below)
285
- wf_status = dbos._sys_db.insert_workflow_status(
286
- status, max_recovery_attempts=max_recovery_attempts
283
+ wf_status = dbos._sys_db.init_workflow(
284
+ status,
285
+ _serialization.serialize_args(inputs),
286
+ max_recovery_attempts=max_recovery_attempts,
287
287
  )
288
288
 
289
- # TODO: Modify the inputs if they were changed by `update_workflow_inputs`
290
- dbos._sys_db.update_workflow_inputs(wfid, _serialization.serialize_args(inputs))
291
-
292
- if queue is not None and wf_status == WorkflowStatusString.ENQUEUED.value:
293
- dbos._sys_db.enqueue(wfid, queue)
294
-
295
289
  status["status"] = wf_status
296
290
  return status
297
291
 
dbos/_dbos.py CHANGED
@@ -34,6 +34,7 @@ from dbos._conductor.conductor import ConductorWebsocket
34
34
  from dbos._utils import INTERNAL_QUEUE_NAME, GlobalParams
35
35
  from dbos._workflow_commands import (
36
36
  WorkflowStatus,
37
+ fork_workflow,
37
38
  list_queued_workflows,
38
39
  list_workflows,
39
40
  )
@@ -67,7 +68,7 @@ from ._registrations import (
67
68
  )
68
69
  from ._roles import default_required_roles, required_roles
69
70
  from ._scheduler import ScheduledWorkflow, scheduled
70
- from ._sys_db import reset_system_database
71
+ from ._sys_db import StepInfo, reset_system_database
71
72
  from ._tracer import dbos_tracer
72
73
 
73
74
  if TYPE_CHECKING:
@@ -113,7 +114,7 @@ from ._error import (
113
114
  from ._event_loop import BackgroundEventLoop
114
115
  from ._logger import add_otlp_to_all_loggers, config_logger, dbos_logger, init_logger
115
116
  from ._sys_db import SystemDatabase
116
- from ._workflow_commands import WorkflowStatus, get_workflow
117
+ from ._workflow_commands import WorkflowStatus, get_workflow, list_workflow_steps
117
118
 
118
119
  # Most DBOS functions are just any callable F, so decorators / wrappers work on F
119
120
  # There are cases where the parameters P and return value R should be separate
@@ -363,13 +364,13 @@ class DBOS:
363
364
  check_config_consistency(name=unvalidated_config["name"])
364
365
 
365
366
  if unvalidated_config is not None:
366
- self.config: ConfigFile = process_config(data=unvalidated_config)
367
+ self._config: ConfigFile = process_config(data=unvalidated_config)
367
368
  else:
368
369
  raise ValueError("No valid configuration was loaded.")
369
370
 
370
- set_env_vars(self.config)
371
- config_logger(self.config)
372
- dbos_tracer.config(self.config)
371
+ set_env_vars(self._config)
372
+ config_logger(self._config)
373
+ dbos_tracer.config(self._config)
373
374
  dbos_logger.info("Initializing DBOS")
374
375
 
375
376
  # If using FastAPI, set up middleware and lifecycle events
@@ -453,19 +454,19 @@ class DBOS:
453
454
  self._executor_field = ThreadPoolExecutor(max_workers=64)
454
455
  self._background_event_loop.start()
455
456
  self._sys_db_field = SystemDatabase(
456
- self.config["database"], debug_mode=debug_mode
457
+ self._config["database"], debug_mode=debug_mode
457
458
  )
458
459
  self._app_db_field = ApplicationDatabase(
459
- self.config["database"], debug_mode=debug_mode
460
+ self._config["database"], debug_mode=debug_mode
460
461
  )
461
462
 
462
463
  if debug_mode:
463
464
  return
464
465
 
465
- admin_port = self.config.get("runtimeConfig", {}).get("admin_port")
466
+ admin_port = self._config.get("runtimeConfig", {}).get("admin_port")
466
467
  if admin_port is None:
467
468
  admin_port = 3001
468
- run_admin_server = self.config.get("runtimeConfig", {}).get(
469
+ run_admin_server = self._config.get("runtimeConfig", {}).get(
469
470
  "run_admin_server"
470
471
  )
471
472
  if run_admin_server:
@@ -563,7 +564,7 @@ class DBOS:
563
564
  assert (
564
565
  not self._launched
565
566
  ), "The system database cannot be reset after DBOS is launched. Resetting the system database is a destructive operation that should only be used in a test environment."
566
- reset_system_database(self.config)
567
+ reset_system_database(self._config)
567
568
 
568
569
  def _destroy(self) -> None:
569
570
  self._initialized = False
@@ -959,40 +960,19 @@ class DBOS:
959
960
  @classmethod
960
961
  def restart_workflow(cls, workflow_id: str) -> WorkflowHandle[Any]:
961
962
  """Restart a workflow with a new workflow ID"""
962
-
963
963
  return cls.fork_workflow(workflow_id, 1)
964
964
 
965
965
  @classmethod
966
- def fork_workflow(
967
- cls, workflow_id: str, start_step: int = 1
968
- ) -> WorkflowHandle[Any]:
969
- """Restart a workflow with a new workflow ID"""
970
-
971
- def get_max_function_id(workflow_uuid: str) -> int:
972
- max_transactions = (
973
- _get_dbos_instance()._app_db.get_max_function_id(workflow_uuid) or 0
974
- )
975
- max_operations = (
976
- _get_dbos_instance()._sys_db.get_max_function_id(workflow_uuid) or 0
977
- )
978
- return max(max_transactions, max_operations)
979
-
980
- max_function_id = get_max_function_id(workflow_id)
981
- if max_function_id > 0 and start_step > max_function_id:
982
- raise DBOSException(
983
- f"Cannot fork workflow {workflow_id} at step {start_step}. The workflow has {max_function_id} steps."
984
- )
966
+ def fork_workflow(cls, workflow_id: str, start_step: int) -> WorkflowHandle[Any]:
967
+ """Restart a workflow with a new workflow ID from a specific step"""
985
968
 
986
969
  def fn() -> str:
987
- forked_workflow_id = str(uuid.uuid4())
988
970
  dbos_logger.info(f"Forking workflow: {workflow_id} from step {start_step}")
989
-
990
- _get_dbos_instance()._app_db.clone_workflow_transactions(
991
- workflow_id, forked_workflow_id, start_step
992
- )
993
-
994
- return _get_dbos_instance()._sys_db.fork_workflow(
995
- workflow_id, forked_workflow_id, start_step
971
+ return fork_workflow(
972
+ _get_dbos_instance()._sys_db,
973
+ _get_dbos_instance()._app_db,
974
+ workflow_id,
975
+ start_step,
996
976
  )
997
977
 
998
978
  new_id = _get_dbos_instance()._sys_db.call_function_as_step(
@@ -1014,6 +994,7 @@ class DBOS:
1014
994
  limit: Optional[int] = None,
1015
995
  offset: Optional[int] = None,
1016
996
  sort_desc: bool = False,
997
+ workflow_id_prefix: Optional[str] = None,
1017
998
  ) -> List[WorkflowStatus]:
1018
999
  def fn() -> List[WorkflowStatus]:
1019
1000
  return list_workflows(
@@ -1028,6 +1009,7 @@ class DBOS:
1028
1009
  limit=limit,
1029
1010
  offset=offset,
1030
1011
  sort_desc=sort_desc,
1012
+ workflow_id_prefix=workflow_id_prefix,
1031
1013
  )
1032
1014
 
1033
1015
  return _get_dbos_instance()._sys_db.call_function_as_step(
@@ -1064,6 +1046,17 @@ class DBOS:
1064
1046
  fn, "DBOS.listQueuedWorkflows"
1065
1047
  )
1066
1048
 
1049
+ @classmethod
1050
+ def list_workflow_steps(cls, workflow_id: str) -> List[StepInfo]:
1051
+ def fn() -> List[StepInfo]:
1052
+ return list_workflow_steps(
1053
+ _get_dbos_instance()._sys_db, _get_dbos_instance()._app_db, workflow_id
1054
+ )
1055
+
1056
+ return _get_dbos_instance()._sys_db.call_function_as_step(
1057
+ fn, "DBOS.listWorkflowSteps"
1058
+ )
1059
+
1067
1060
  @classproperty
1068
1061
  def logger(cls) -> Logger:
1069
1062
  """Return the DBOS `Logger` for the current context."""
@@ -1074,15 +1067,15 @@ class DBOS:
1074
1067
  """Return the DBOS `ConfigFile` for the current context."""
1075
1068
  global _dbos_global_instance
1076
1069
  if _dbos_global_instance is not None:
1077
- return _dbos_global_instance.config
1070
+ return _dbos_global_instance._config
1078
1071
  reg = _get_or_create_dbos_registry()
1079
1072
  if reg.config is not None:
1080
1073
  return reg.config
1081
- config = (
1074
+ loaded_config = (
1082
1075
  load_config()
1083
1076
  ) # This will return the processed & validated config (with defaults)
1084
- reg.config = config
1085
- return config
1077
+ reg.config = loaded_config
1078
+ return loaded_config
1086
1079
 
1087
1080
  @classproperty
1088
1081
  def sql_session(cls) -> Session:
dbos/_sys_db.py CHANGED
@@ -132,6 +132,9 @@ class GetWorkflowsInput:
132
132
  self.sort_desc: bool = (
133
133
  False # If true, sort by created_at in DESC order. Default false (in ASC order).
134
134
  )
135
+ self.workflow_id_prefix: Optional[str] = (
136
+ None # If set, search for workflow IDs starting with this string
137
+ )
135
138
 
136
139
 
137
140
  class GetQueuedWorkflowsInput(TypedDict):
@@ -282,6 +285,7 @@ class SystemDatabase:
282
285
  def insert_workflow_status(
283
286
  self,
284
287
  status: WorkflowStatusInternal,
288
+ conn: sa.Connection,
285
289
  *,
286
290
  max_recovery_attempts: int = DEFAULT_MAX_RECOVERY_ATTEMPTS,
287
291
  ) -> WorkflowStatuses:
@@ -325,8 +329,7 @@ class SystemDatabase:
325
329
 
326
330
  cmd = cmd.returning(SystemSchema.workflow_status.c.recovery_attempts, SystemSchema.workflow_status.c.status, SystemSchema.workflow_status.c.name, SystemSchema.workflow_status.c.class_name, SystemSchema.workflow_status.c.config_name, SystemSchema.workflow_status.c.queue_name) # type: ignore
327
331
 
328
- with self.engine.begin() as c:
329
- results = c.execute(cmd)
332
+ results = conn.execute(cmd)
330
333
 
331
334
  row = results.fetchone()
332
335
  if row is not None:
@@ -352,28 +355,30 @@ class SystemDatabase:
352
355
  # Every time we start executing a workflow (and thus attempt to insert its status), we increment `recovery_attempts` by 1.
353
356
  # When this number becomes equal to `maxRetries + 1`, we mark the workflow as `RETRIES_EXCEEDED`.
354
357
  if recovery_attempts > max_recovery_attempts + 1:
355
- with self.engine.begin() as c:
356
- c.execute(
357
- sa.delete(SystemSchema.workflow_queue).where(
358
- SystemSchema.workflow_queue.c.workflow_uuid
359
- == status["workflow_uuid"]
360
- )
358
+ delete_cmd = sa.delete(SystemSchema.workflow_queue).where(
359
+ SystemSchema.workflow_queue.c.workflow_uuid
360
+ == status["workflow_uuid"]
361
+ )
362
+ conn.execute(delete_cmd)
363
+
364
+ dlq_cmd = (
365
+ sa.update(SystemSchema.workflow_status)
366
+ .where(
367
+ SystemSchema.workflow_status.c.workflow_uuid
368
+ == status["workflow_uuid"]
361
369
  )
362
- c.execute(
363
- sa.update(SystemSchema.workflow_status)
364
- .where(
365
- SystemSchema.workflow_status.c.workflow_uuid
366
- == status["workflow_uuid"]
367
- )
368
- .where(
369
- SystemSchema.workflow_status.c.status
370
- == WorkflowStatusString.PENDING.value
371
- )
372
- .values(
373
- status=WorkflowStatusString.RETRIES_EXCEEDED.value,
374
- queue_name=None,
375
- )
370
+ .where(
371
+ SystemSchema.workflow_status.c.status
372
+ == WorkflowStatusString.PENDING.value
373
+ )
374
+ .values(
375
+ status=WorkflowStatusString.RETRIES_EXCEEDED.value,
376
+ queue_name=None,
376
377
  )
378
+ )
379
+ conn.execute(dlq_cmd)
380
+ # Need to commit here because we're throwing an exception
381
+ conn.commit()
377
382
  raise DBOSDeadLetterQueueError(
378
383
  status["workflow_uuid"], max_recovery_attempts
379
384
  )
@@ -652,7 +657,7 @@ class SystemDatabase:
652
657
  time.sleep(1)
653
658
 
654
659
  def update_workflow_inputs(
655
- self, workflow_uuid: str, inputs: str, conn: Optional[sa.Connection] = None
660
+ self, workflow_uuid: str, inputs: str, conn: sa.Connection
656
661
  ) -> None:
657
662
  if self._debug_mode:
658
663
  raise Exception("called update_workflow_inputs in debug mode")
@@ -669,11 +674,8 @@ class SystemDatabase:
669
674
  )
670
675
  .returning(SystemSchema.workflow_inputs.c.inputs)
671
676
  )
672
- if conn is not None:
673
- row = conn.execute(cmd).fetchone()
674
- else:
675
- with self.engine.begin() as c:
676
- row = c.execute(cmd).fetchone()
677
+
678
+ row = conn.execute(cmd).fetchone()
677
679
  if row is not None and row[0] != inputs:
678
680
  # In a distributed environment, scheduled workflows are enqueued multiple times with slightly different timestamps
679
681
  if not workflow_uuid.startswith("sched-"):
@@ -734,6 +736,12 @@ class SystemDatabase:
734
736
  query = query.where(
735
737
  SystemSchema.workflow_status.c.workflow_uuid.in_(input.workflow_ids)
736
738
  )
739
+ if input.workflow_id_prefix:
740
+ query = query.where(
741
+ SystemSchema.workflow_status.c.workflow_uuid.startswith(
742
+ input.workflow_id_prefix
743
+ )
744
+ )
737
745
  if input.limit:
738
746
  query = query.limit(input.limit)
739
747
  if input.offset:
@@ -1380,18 +1388,17 @@ class SystemDatabase:
1380
1388
  )
1381
1389
  return value
1382
1390
 
1383
- def enqueue(self, workflow_id: str, queue_name: str) -> None:
1391
+ def enqueue(self, workflow_id: str, queue_name: str, conn: sa.Connection) -> None:
1384
1392
  if self._debug_mode:
1385
1393
  raise Exception("called enqueue in debug mode")
1386
- with self.engine.begin() as c:
1387
- c.execute(
1388
- pg.insert(SystemSchema.workflow_queue)
1389
- .values(
1390
- workflow_uuid=workflow_id,
1391
- queue_name=queue_name,
1392
- )
1393
- .on_conflict_do_nothing()
1394
+ conn.execute(
1395
+ pg.insert(SystemSchema.workflow_queue)
1396
+ .values(
1397
+ workflow_uuid=workflow_id,
1398
+ queue_name=queue_name,
1394
1399
  )
1400
+ .on_conflict_do_nothing()
1401
+ )
1395
1402
 
1396
1403
  def start_queued_workflows(
1397
1404
  self, queue: "Queue", executor_id: str, app_version: str
@@ -1646,6 +1653,30 @@ class SystemDatabase:
1646
1653
  )
1647
1654
  return result
1648
1655
 
1656
+ def init_workflow(
1657
+ self,
1658
+ status: WorkflowStatusInternal,
1659
+ inputs: str,
1660
+ *,
1661
+ max_recovery_attempts: int = DEFAULT_MAX_RECOVERY_ATTEMPTS,
1662
+ ) -> WorkflowStatuses:
1663
+ """
1664
+ Synchronously record the status and inputs for workflows in a single transaction
1665
+ """
1666
+ with self.engine.begin() as conn:
1667
+ wf_status = self.insert_workflow_status(
1668
+ status, conn, max_recovery_attempts=max_recovery_attempts
1669
+ )
1670
+ # TODO: Modify the inputs if they were changed by `update_workflow_inputs`
1671
+ self.update_workflow_inputs(status["workflow_uuid"], inputs, conn)
1672
+
1673
+ if (
1674
+ status["queue_name"] is not None
1675
+ and wf_status == WorkflowStatusString.ENQUEUED.value
1676
+ ):
1677
+ self.enqueue(status["workflow_uuid"], status["queue_name"], conn)
1678
+ return wf_status
1679
+
1649
1680
 
1650
1681
  def reset_system_database(config: ConfigFile) -> None:
1651
1682
  sysdb_name = (
@@ -2,6 +2,8 @@ import json
2
2
  import uuid
3
3
  from typing import Any, List, Optional
4
4
 
5
+ from dbos._error import DBOSException
6
+
5
7
  from . import _serialization
6
8
  from ._app_db import ApplicationDatabase
7
9
  from ._sys_db import (
@@ -71,6 +73,7 @@ def list_workflows(
71
73
  offset: Optional[int] = None,
72
74
  sort_desc: bool = False,
73
75
  request: bool = False,
76
+ workflow_id_prefix: Optional[str] = None,
74
77
  ) -> List[WorkflowStatus]:
75
78
  input = GetWorkflowsInput()
76
79
  input.workflow_ids = workflow_ids
@@ -83,6 +86,7 @@ def list_workflows(
83
86
  input.name = name
84
87
  input.offset = offset
85
88
  input.sort_desc = sort_desc
89
+ input.workflow_id_prefix = workflow_id_prefix
86
90
 
87
91
  output: GetWorkflowsOutput = sys_db.get_workflows(input)
88
92
  infos: List[WorkflowStatus] = []
@@ -183,3 +187,25 @@ def list_workflow_steps(
183
187
  merged_steps = steps + transactions
184
188
  merged_steps.sort(key=lambda step: step["function_id"])
185
189
  return merged_steps
190
+
191
+
192
+ def fork_workflow(
193
+ sys_db: SystemDatabase,
194
+ app_db: ApplicationDatabase,
195
+ workflow_id: str,
196
+ start_step: int,
197
+ ) -> str:
198
+ def get_max_function_id(workflow_uuid: str) -> int:
199
+ max_transactions = app_db.get_max_function_id(workflow_uuid) or 0
200
+ max_operations = sys_db.get_max_function_id(workflow_uuid) or 0
201
+ return max(max_transactions, max_operations)
202
+
203
+ max_function_id = get_max_function_id(workflow_id)
204
+ if max_function_id > 0 and start_step > max_function_id:
205
+ raise DBOSException(
206
+ f"Cannot fork workflow {workflow_id} from step {start_step}. The workflow has {max_function_id} steps."
207
+ )
208
+ forked_workflow_id = str(uuid.uuid4())
209
+ app_db.clone_workflow_transactions(workflow_id, forked_workflow_id, start_step)
210
+ sys_db.fork_workflow(workflow_id, forked_workflow_id, start_step)
211
+ return forked_workflow_id
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: dbos
3
- Version: 0.26.0a15
3
+ Version: 0.26.0a19
4
4
  Summary: Ultra-lightweight durable execution in Python
5
5
  Author-Email: "DBOS, Inc." <contact@dbos.dev>
6
6
  License: MIT
@@ -1,19 +1,19 @@
1
- dbos-0.26.0a15.dist-info/METADATA,sha256=-vvSiXF-3_KrI9sYYJ7xD6HQXDVGyI0c0F1Jo6rZQoM,5554
2
- dbos-0.26.0a15.dist-info/WHEEL,sha256=tSfRZzRHthuv7vxpI4aehrdN9scLjk-dCJkPLzkHxGg,90
3
- dbos-0.26.0a15.dist-info/entry_points.txt,sha256=_QOQ3tVfEjtjBlr1jS4sHqHya9lI2aIEIWkz8dqYp14,58
4
- dbos-0.26.0a15.dist-info/licenses/LICENSE,sha256=VGZit_a5-kdw9WT6fY5jxAWVwGQzgLFyPWrcVVUhVNU,1067
1
+ dbos-0.26.0a19.dist-info/METADATA,sha256=YWeav0kTgNnVV03IK_iG7vuKR04nKeOnIM4M1cmCELw,5554
2
+ dbos-0.26.0a19.dist-info/WHEEL,sha256=tSfRZzRHthuv7vxpI4aehrdN9scLjk-dCJkPLzkHxGg,90
3
+ dbos-0.26.0a19.dist-info/entry_points.txt,sha256=_QOQ3tVfEjtjBlr1jS4sHqHya9lI2aIEIWkz8dqYp14,58
4
+ dbos-0.26.0a19.dist-info/licenses/LICENSE,sha256=VGZit_a5-kdw9WT6fY5jxAWVwGQzgLFyPWrcVVUhVNU,1067
5
5
  dbos/__init__.py,sha256=3NQfGlBiiUSM_v88STdVP3rNZvGkUL_9WbSotKb8Voo,873
6
6
  dbos/__main__.py,sha256=G7Exn-MhGrVJVDbgNlpzhfh8WMX_72t3_oJaFT9Lmt8,653
7
7
  dbos/_admin_server.py,sha256=RrbABfR1D3p9c_QLrCSrgFuYce6FKi0fjMRIYLjO_Y8,9038
8
- dbos/_app_db.py,sha256=Q9lEyCJFoZMTlnjMO8Pj8bczVmVWyDOP8qPQ6l5PpEU,11241
8
+ dbos/_app_db.py,sha256=obNlgC9IZ20y8tqQeA1q4TjceG3jBFalxz70ieDOWCA,11332
9
9
  dbos/_classproperty.py,sha256=f0X-_BySzn3yFDRKB2JpCbLYQ9tLwt1XftfshvY7CBs,626
10
- dbos/_client.py,sha256=5iaoFsu5wAqwjjj3EWusZ1eDbBAW8FwYazhokdCJ9h4,10964
10
+ dbos/_client.py,sha256=PtOZv_4TCd7I0y9kw_0a93Lf_cUkytdDjCdrrHnyTS4,12020
11
11
  dbos/_conductor/conductor.py,sha256=HYzVL29IMMrs2Mnms_7cHJynCnmmEN5SDQOMjzn3UoU,16840
12
12
  dbos/_conductor/protocol.py,sha256=xN7pmooyF1pqbH1b6WhllU5718P7zSb_b0KCwA6bzcs,6716
13
13
  dbos/_context.py,sha256=I8sLkdKTTkZEz7wG-MjynaQB6XEF2bLXuwNksiauP7w,19430
14
- dbos/_core.py,sha256=de8GecFmW5DNf5dYfnpSX3IDO24Wc6pBpCC1VZ1iVyI,45505
14
+ dbos/_core.py,sha256=uxDIJui4WS_2V1k2np0Ifue_IRzLTyq-c52bgZSQYn4,45118
15
15
  dbos/_croniter.py,sha256=XHAyUyibs_59sJQfSNWkP7rqQY6_XrlfuuCxk4jYqek,47559
16
- dbos/_dbos.py,sha256=xl5swjxBSRfCPn_8_ZagOdmjPYf1SDjtla2sAH4v0dY,47390
16
+ dbos/_dbos.py,sha256=Waz_5d9PkDjxD9LUe-nDf4gn5ds2kO0ZyJFQd8Tkz9w,47155
17
17
  dbos/_dbos_config.py,sha256=m05IFjM0jSwZBsnFMF_4qP2JkjVFc0gqyM2tnotXq20,20636
18
18
  dbos/_debug.py,sha256=MNlQVZ6TscGCRQeEEL0VE8Uignvr6dPeDDDefS3xgIE,1823
19
19
  dbos/_docker_pg_helper.py,sha256=NmcgqmR5rQA_4igfeqh8ugNT2z3YmoOvuep_MEtxTiY,5854
@@ -45,7 +45,7 @@ dbos/_schemas/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
45
45
  dbos/_schemas/application_database.py,sha256=SypAS9l9EsaBHFn9FR8jmnqt01M74d9AF1AMa4m2hhI,1040
46
46
  dbos/_schemas/system_database.py,sha256=W9eSpL7SZzQkxcEZ4W07BOcwkkDr35b9oCjUOgfHWek,5336
47
47
  dbos/_serialization.py,sha256=YCYv0qKAwAZ1djZisBC7khvKqG-5OcIv9t9EC5PFIog,1743
48
- dbos/_sys_db.py,sha256=uQO45HVR6bS1Fa_iH8uQKnFPFuNTLM0obPXtLuKd_vc,70117
48
+ dbos/_sys_db.py,sha256=kfNR9R7rQ6MTqBuPt4OI5nZElIJNXlGuUjG_ypGKHWI,71195
49
49
  dbos/_templates/dbos-db-starter/README.md,sha256=GhxhBj42wjTt1fWEtwNriHbJuKb66Vzu89G4pxNHw2g,930
50
50
  dbos/_templates/dbos-db-starter/__package/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
51
51
  dbos/_templates/dbos-db-starter/__package/main.py,sha256=nJMN3ZD2lmwg4Dcgmiwqc-tQGuCJuJal2Xl85iA277U,2453
@@ -58,11 +58,11 @@ dbos/_templates/dbos-db-starter/migrations/versions/2024_07_31_180642_init.py,sh
58
58
  dbos/_templates/dbos-db-starter/start_postgres_docker.py,sha256=lQVLlYO5YkhGPEgPqwGc7Y8uDKse9HsWv5fynJEFJHM,1681
59
59
  dbos/_tracer.py,sha256=dFDSFlta-rfA3-ahIRLYwnnoAOmlavdxAGllqwFgnCA,2440
60
60
  dbos/_utils.py,sha256=nFRUHzVjXG5AusF85AlYHikj63Tzi-kQm992ihsrAxA,201
61
- dbos/_workflow_commands.py,sha256=YJamxSQqI0pQMKo-G1aGJkLHB2JMXaijlBaHabq11kg,6165
61
+ dbos/_workflow_commands.py,sha256=BzvWGOQ-4fbHlAoFI5Hdwk1PimUUgBn1kISLnSMt0To,7189
62
62
  dbos/cli/_github_init.py,sha256=Y_bDF9gfO2jB1id4FV5h1oIxEJRWyqVjhb7bNEa5nQ0,3224
63
63
  dbos/cli/_template_init.py,sha256=-WW3kbq0W_Tq4WbMqb1UGJG3xvJb3woEY5VspG95Srk,2857
64
64
  dbos/cli/cli.py,sha256=1qCTs__A9LOEfU44XZ6TufwmRwe68ZEwbWEPli3vnVM,17873
65
65
  dbos/dbos-config.schema.json,sha256=i7jcxXqByKq0Jzv3nAUavONtj03vTwj6vWP4ylmBr8o,5694
66
66
  dbos/py.typed,sha256=QfzXT1Ktfk3Rj84akygc7_42z0lRpCq0Ilh8OXI6Zas,44
67
67
  version/__init__.py,sha256=L4sNxecRuqdtSFdpUGX3TtBi9KL3k7YsZVIvv-fv9-A,1678
68
- dbos-0.26.0a15.dist-info/RECORD,,
68
+ dbos-0.26.0a19.dist-info/RECORD,,