dbos 1.2.0a4__py3-none-any.whl → 1.2.0a6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of dbos might be problematic. Click here for more details.

dbos/_client.py CHANGED
@@ -109,6 +109,7 @@ class DBOSClient:
109
109
  },
110
110
  sys_db_name=system_database,
111
111
  )
112
+ self._sys_db.check_connection()
112
113
  self._app_db = ApplicationDatabase(
113
114
  database_url=database_url,
114
115
  engine_kwargs={
@@ -231,7 +232,7 @@ class DBOSClient:
231
232
  "workflow_deadline_epoch_ms": None,
232
233
  }
233
234
  with self._sys_db.engine.begin() as conn:
234
- self._sys_db.insert_workflow_status(
235
+ self._sys_db._insert_workflow_status(
235
236
  status, conn, max_recovery_attempts=None
236
237
  )
237
238
  self._sys_db.send(status["workflow_uuid"], 0, destination_id, message, topic)
dbos/_core.py CHANGED
@@ -20,8 +20,10 @@ from typing import (
20
20
  cast,
21
21
  )
22
22
 
23
+ import psycopg
24
+
23
25
  from dbos._outcome import Immediate, NoResult, Outcome, Pending
24
- from dbos._utils import GlobalParams
26
+ from dbos._utils import GlobalParams, retriable_postgres_exception
25
27
 
26
28
  from ._app_db import ApplicationDatabase, TransactionResultInternal
27
29
 
@@ -931,12 +933,18 @@ def decorate_transaction(
931
933
  )
932
934
  break
933
935
  except DBAPIError as dbapi_error:
934
- if dbapi_error.orig.sqlstate == "40001": # type: ignore
936
+ driver_error = cast(
937
+ Optional[psycopg.OperationalError], dbapi_error.orig
938
+ )
939
+ if retriable_postgres_exception(dbapi_error) or (
940
+ driver_error is not None
941
+ and driver_error.sqlstate == "40001"
942
+ ):
935
943
  # Retry on serialization failure
936
944
  span = ctx.get_current_span()
937
945
  if span:
938
946
  span.add_event(
939
- "Transaction Serialization Failure",
947
+ "Transaction Failure",
940
948
  {"retry_wait_seconds": retry_wait_seconds},
941
949
  )
942
950
  time.sleep(retry_wait_seconds)
dbos/_dbos_config.py CHANGED
@@ -407,6 +407,7 @@ def configure_db_engine_parameters(
407
407
  "pool_timeout": 30,
408
408
  "max_overflow": 0,
409
409
  "pool_size": 20,
410
+ "pool_pre_ping": True,
410
411
  }
411
412
  # If user-provided kwargs are present, use them instead
412
413
  user_kwargs = data.get("db_engine_kwargs")
dbos/_queue.py CHANGED
@@ -98,12 +98,8 @@ def queue_thread(stop_event: threading.Event, dbos: "DBOS") -> None:
98
98
  if not isinstance(
99
99
  e.orig, (errors.SerializationFailure, errors.LockNotAvailable)
100
100
  ):
101
- dbos.logger.warning(
102
- f"Exception encountered in queue thread: {traceback.format_exc()}"
103
- )
104
- except Exception:
101
+ dbos.logger.warning(f"Exception encountered in queue thread: {e}")
102
+ except Exception as e:
105
103
  if not stop_event.is_set():
106
104
  # Only print the error if the thread is not stopping
107
- dbos.logger.warning(
108
- f"Exception encountered in queue thread: {traceback.format_exc()}"
109
- )
105
+ dbos.logger.warning(f"Exception encountered in queue thread: {e}")
dbos/_sys_db.py CHANGED
@@ -1,7 +1,9 @@
1
1
  import datetime
2
+ import functools
2
3
  import json
3
4
  import logging
4
5
  import os
6
+ import random
5
7
  import re
6
8
  import threading
7
9
  import time
@@ -17,6 +19,7 @@ from typing import (
17
19
  Sequence,
18
20
  TypedDict,
19
21
  TypeVar,
22
+ cast,
20
23
  )
21
24
 
22
25
  import psycopg
@@ -27,7 +30,7 @@ from alembic.config import Config
27
30
  from sqlalchemy.exc import DBAPIError
28
31
  from sqlalchemy.sql import func
29
32
 
30
- from dbos._utils import INTERNAL_QUEUE_NAME
33
+ from dbos._utils import INTERNAL_QUEUE_NAME, retriable_postgres_exception
31
34
 
32
35
  from . import _serialization
33
36
  from ._context import get_local_dbos_context
@@ -268,6 +271,51 @@ class ThreadSafeConditionDict:
268
271
  dbos_logger.warning(f"Key {key} not found in condition dictionary.")
269
272
 
270
273
 
274
+ F = TypeVar("F", bound=Callable[..., Any])
275
+
276
+
277
+ def db_retry(
278
+ initial_backoff: float = 1.0, max_backoff: float = 60.0
279
+ ) -> Callable[[F], F]:
280
+ """
281
+ If a workflow encounters a database connection issue while performing an operation,
282
+ block the workflow and retry the operation until it reconnects and succeeds.
283
+
284
+ In other words, if DBOS loses its database connection, everything pauses until the connection is recovered,
285
+ trading off availability for correctness.
286
+ """
287
+
288
+ def decorator(func: F) -> F:
289
+ @functools.wraps(func)
290
+ def wrapper(*args: Any, **kwargs: Any) -> Any:
291
+ retries: int = 0
292
+ backoff: float = initial_backoff
293
+ while True:
294
+ try:
295
+ return func(*args, **kwargs)
296
+ except DBAPIError as e:
297
+
298
+ # Determine if this is a retriable exception
299
+ if not retriable_postgres_exception(e):
300
+ raise
301
+
302
+ retries += 1
303
+ # Calculate backoff with jitter
304
+ actual_backoff: float = backoff * (0.5 + random.random())
305
+ dbos_logger.warning(
306
+ f"Database connection failed: {str(e)}. "
307
+ f"Retrying in {actual_backoff:.2f}s (attempt {retries})"
308
+ )
309
+ # Sleep with backoff
310
+ time.sleep(actual_backoff)
311
+ # Increase backoff for next attempt (exponential)
312
+ backoff = min(backoff * 2, max_backoff)
313
+
314
+ return cast(F, wrapper)
315
+
316
+ return decorator
317
+
318
+
271
319
  class SystemDatabase:
272
320
 
273
321
  def __init__(
@@ -365,7 +413,7 @@ class SystemDatabase:
365
413
  self.notification_conn.close()
366
414
  self.engine.dispose()
367
415
 
368
- def insert_workflow_status(
416
+ def _insert_workflow_status(
369
417
  self,
370
418
  status: WorkflowStatusInternal,
371
419
  conn: sa.Connection,
@@ -377,6 +425,15 @@ class SystemDatabase:
377
425
  wf_status: WorkflowStatuses = status["status"]
378
426
  workflow_deadline_epoch_ms: Optional[int] = status["workflow_deadline_epoch_ms"]
379
427
 
428
+ # Values to update when a row already exists for this workflow
429
+ update_values: dict[str, Any] = {
430
+ "recovery_attempts": SystemSchema.workflow_status.c.recovery_attempts + 1,
431
+ "updated_at": func.extract("epoch", func.now()) * 1000,
432
+ }
433
+ # Don't update an existing executor ID when enqueueing a workflow.
434
+ if wf_status != WorkflowStatusString.ENQUEUED.value:
435
+ update_values["executor_id"] = status["executor_id"]
436
+
380
437
  cmd = (
381
438
  pg.insert(SystemSchema.workflow_status)
382
439
  .values(
@@ -402,13 +459,7 @@ class SystemDatabase:
402
459
  )
403
460
  .on_conflict_do_update(
404
461
  index_elements=["workflow_uuid"],
405
- set_=dict(
406
- executor_id=status["executor_id"],
407
- recovery_attempts=(
408
- SystemSchema.workflow_status.c.recovery_attempts + 1
409
- ),
410
- updated_at=func.extract("epoch", func.now()) * 1000,
411
- ),
462
+ set_=update_values,
412
463
  )
413
464
  )
414
465
 
@@ -474,53 +525,46 @@ class SystemDatabase:
474
525
 
475
526
  return wf_status, workflow_deadline_epoch_ms
476
527
 
528
+ @db_retry()
477
529
  def update_workflow_status(
478
530
  self,
479
531
  status: WorkflowStatusInternal,
480
- *,
481
- conn: Optional[sa.Connection] = None,
482
532
  ) -> None:
483
533
  if self._debug_mode:
484
534
  raise Exception("called update_workflow_status in debug mode")
485
535
  wf_status: WorkflowStatuses = status["status"]
486
-
487
- cmd = (
488
- pg.insert(SystemSchema.workflow_status)
489
- .values(
490
- workflow_uuid=status["workflow_uuid"],
491
- status=status["status"],
492
- name=status["name"],
493
- class_name=status["class_name"],
494
- config_name=status["config_name"],
495
- output=status["output"],
496
- error=status["error"],
497
- executor_id=status["executor_id"],
498
- application_version=status["app_version"],
499
- application_id=status["app_id"],
500
- authenticated_user=status["authenticated_user"],
501
- authenticated_roles=status["authenticated_roles"],
502
- assumed_role=status["assumed_role"],
503
- queue_name=status["queue_name"],
504
- recovery_attempts=(
505
- 1 if wf_status != WorkflowStatusString.ENQUEUED.value else 0
506
- ),
507
- )
508
- .on_conflict_do_update(
509
- index_elements=["workflow_uuid"],
510
- set_=dict(
536
+ with self.engine.begin() as c:
537
+ c.execute(
538
+ pg.insert(SystemSchema.workflow_status)
539
+ .values(
540
+ workflow_uuid=status["workflow_uuid"],
511
541
  status=status["status"],
542
+ name=status["name"],
543
+ class_name=status["class_name"],
544
+ config_name=status["config_name"],
512
545
  output=status["output"],
513
546
  error=status["error"],
514
- updated_at=func.extract("epoch", func.now()) * 1000,
515
- ),
547
+ executor_id=status["executor_id"],
548
+ application_version=status["app_version"],
549
+ application_id=status["app_id"],
550
+ authenticated_user=status["authenticated_user"],
551
+ authenticated_roles=status["authenticated_roles"],
552
+ assumed_role=status["assumed_role"],
553
+ queue_name=status["queue_name"],
554
+ recovery_attempts=(
555
+ 1 if wf_status != WorkflowStatusString.ENQUEUED.value else 0
556
+ ),
557
+ )
558
+ .on_conflict_do_update(
559
+ index_elements=["workflow_uuid"],
560
+ set_=dict(
561
+ status=status["status"],
562
+ output=status["output"],
563
+ error=status["error"],
564
+ updated_at=func.extract("epoch", func.now()) * 1000,
565
+ ),
566
+ )
516
567
  )
517
- )
518
-
519
- if conn is not None:
520
- conn.execute(cmd)
521
- else:
522
- with self.engine.begin() as c:
523
- c.execute(cmd)
524
568
 
525
569
  def cancel_workflow(
526
570
  self,
@@ -686,6 +730,7 @@ class SystemDatabase:
686
730
  )
687
731
  return forked_workflow_id
688
732
 
733
+ @db_retry()
689
734
  def get_workflow_status(
690
735
  self, workflow_uuid: str
691
736
  ) -> Optional[WorkflowStatusInternal]:
@@ -735,6 +780,7 @@ class SystemDatabase:
735
780
  }
736
781
  return status
737
782
 
783
+ @db_retry()
738
784
  def await_workflow_result(self, workflow_id: str) -> Any:
739
785
  while True:
740
786
  with self.engine.begin() as c:
@@ -761,7 +807,7 @@ class SystemDatabase:
761
807
  pass # CB: I guess we're assuming the WF will show up eventually.
762
808
  time.sleep(1)
763
809
 
764
- def update_workflow_inputs(
810
+ def _update_workflow_inputs(
765
811
  self, workflow_uuid: str, inputs: str, conn: sa.Connection
766
812
  ) -> None:
767
813
  if self._debug_mode:
@@ -791,6 +837,7 @@ class SystemDatabase:
791
837
 
792
838
  return
793
839
 
840
+ @db_retry()
794
841
  def get_workflow_inputs(
795
842
  self, workflow_uuid: str
796
843
  ) -> Optional[_serialization.WorkflowInputs]:
@@ -1084,8 +1131,8 @@ class SystemDatabase:
1084
1131
  for row in rows
1085
1132
  ]
1086
1133
 
1087
- def record_operation_result(
1088
- self, result: OperationResultInternal, conn: Optional[sa.Connection] = None
1134
+ def _record_operation_result_txn(
1135
+ self, result: OperationResultInternal, conn: sa.Connection
1089
1136
  ) -> None:
1090
1137
  if self._debug_mode:
1091
1138
  raise Exception("called record_operation_result in debug mode")
@@ -1100,16 +1147,18 @@ class SystemDatabase:
1100
1147
  error=error,
1101
1148
  )
1102
1149
  try:
1103
- if conn is not None:
1104
- conn.execute(sql)
1105
- else:
1106
- with self.engine.begin() as c:
1107
- c.execute(sql)
1150
+ conn.execute(sql)
1108
1151
  except DBAPIError as dbapi_error:
1109
1152
  if dbapi_error.orig.sqlstate == "23505": # type: ignore
1110
1153
  raise DBOSWorkflowConflictIDError(result["workflow_uuid"])
1111
1154
  raise
1112
1155
 
1156
+ @db_retry()
1157
+ def record_operation_result(self, result: OperationResultInternal) -> None:
1158
+ with self.engine.begin() as c:
1159
+ self._record_operation_result_txn(result, c)
1160
+
1161
+ @db_retry()
1113
1162
  def record_get_result(
1114
1163
  self, result_workflow_id: str, output: Optional[str], error: Optional[str]
1115
1164
  ) -> None:
@@ -1135,6 +1184,7 @@ class SystemDatabase:
1135
1184
  with self.engine.begin() as c:
1136
1185
  c.execute(sql)
1137
1186
 
1187
+ @db_retry()
1138
1188
  def record_child_workflow(
1139
1189
  self,
1140
1190
  parentUUID: str,
@@ -1159,13 +1209,12 @@ class SystemDatabase:
1159
1209
  raise DBOSWorkflowConflictIDError(parentUUID)
1160
1210
  raise
1161
1211
 
1162
- def check_operation_execution(
1212
+ def _check_operation_execution_txn(
1163
1213
  self,
1164
1214
  workflow_id: str,
1165
1215
  function_id: int,
1166
1216
  function_name: str,
1167
- *,
1168
- conn: Optional[sa.Connection] = None,
1217
+ conn: sa.Connection,
1169
1218
  ) -> Optional[RecordedResult]:
1170
1219
  # First query: Retrieve the workflow status
1171
1220
  workflow_status_sql = sa.select(
@@ -1183,13 +1232,8 @@ class SystemDatabase:
1183
1232
  )
1184
1233
 
1185
1234
  # Execute both queries
1186
- if conn is not None:
1187
- workflow_status_rows = conn.execute(workflow_status_sql).all()
1188
- operation_output_rows = conn.execute(operation_output_sql).all()
1189
- else:
1190
- with self.engine.begin() as c:
1191
- workflow_status_rows = c.execute(workflow_status_sql).all()
1192
- operation_output_rows = c.execute(operation_output_sql).all()
1235
+ workflow_status_rows = conn.execute(workflow_status_sql).all()
1236
+ operation_output_rows = conn.execute(operation_output_sql).all()
1193
1237
 
1194
1238
  # Check if the workflow exists
1195
1239
  assert (
@@ -1231,6 +1275,16 @@ class SystemDatabase:
1231
1275
  }
1232
1276
  return result
1233
1277
 
1278
+ @db_retry()
1279
+ def check_operation_execution(
1280
+ self, workflow_id: str, function_id: int, function_name: str
1281
+ ) -> Optional[RecordedResult]:
1282
+ with self.engine.begin() as c:
1283
+ return self._check_operation_execution_txn(
1284
+ workflow_id, function_id, function_name, c
1285
+ )
1286
+
1287
+ @db_retry()
1234
1288
  def check_child_workflow(
1235
1289
  self, workflow_uuid: str, function_id: int
1236
1290
  ) -> Optional[str]:
@@ -1248,6 +1302,7 @@ class SystemDatabase:
1248
1302
  return None
1249
1303
  return str(row[0])
1250
1304
 
1305
+ @db_retry()
1251
1306
  def send(
1252
1307
  self,
1253
1308
  workflow_uuid: str,
@@ -1259,7 +1314,7 @@ class SystemDatabase:
1259
1314
  function_name = "DBOS.send"
1260
1315
  topic = topic if topic is not None else _dbos_null_topic
1261
1316
  with self.engine.begin() as c:
1262
- recorded_output = self.check_operation_execution(
1317
+ recorded_output = self._check_operation_execution_txn(
1263
1318
  workflow_uuid, function_id, function_name, conn=c
1264
1319
  )
1265
1320
  if self._debug_mode and recorded_output is None:
@@ -1297,8 +1352,9 @@ class SystemDatabase:
1297
1352
  "output": None,
1298
1353
  "error": None,
1299
1354
  }
1300
- self.record_operation_result(output, conn=c)
1355
+ self._record_operation_result_txn(output, conn=c)
1301
1356
 
1357
+ @db_retry()
1302
1358
  def recv(
1303
1359
  self,
1304
1360
  workflow_uuid: str,
@@ -1391,7 +1447,7 @@ class SystemDatabase:
1391
1447
  message: Any = None
1392
1448
  if len(rows) > 0:
1393
1449
  message = _serialization.deserialize(rows[0][0])
1394
- self.record_operation_result(
1450
+ self._record_operation_result_txn(
1395
1451
  {
1396
1452
  "workflow_uuid": workflow_uuid,
1397
1453
  "function_id": function_id,
@@ -1455,13 +1511,14 @@ class SystemDatabase:
1455
1511
  dbos_logger.error(f"Unknown channel: {channel}")
1456
1512
  except Exception as e:
1457
1513
  if self._run_background_processes:
1458
- dbos_logger.error(f"Notification listener error: {e}")
1514
+ dbos_logger.warning(f"Notification listener error: {e}")
1459
1515
  time.sleep(1)
1460
1516
  # Then the loop will try to reconnect and restart the listener
1461
1517
  finally:
1462
1518
  if self.notification_conn is not None:
1463
1519
  self.notification_conn.close()
1464
1520
 
1521
+ @db_retry()
1465
1522
  def sleep(
1466
1523
  self,
1467
1524
  workflow_uuid: str,
@@ -1501,6 +1558,7 @@ class SystemDatabase:
1501
1558
  time.sleep(duration)
1502
1559
  return duration
1503
1560
 
1561
+ @db_retry()
1504
1562
  def set_event(
1505
1563
  self,
1506
1564
  workflow_uuid: str,
@@ -1510,7 +1568,7 @@ class SystemDatabase:
1510
1568
  ) -> None:
1511
1569
  function_name = "DBOS.setEvent"
1512
1570
  with self.engine.begin() as c:
1513
- recorded_output = self.check_operation_execution(
1571
+ recorded_output = self._check_operation_execution_txn(
1514
1572
  workflow_uuid, function_id, function_name, conn=c
1515
1573
  )
1516
1574
  if self._debug_mode and recorded_output is None:
@@ -1542,8 +1600,9 @@ class SystemDatabase:
1542
1600
  "output": None,
1543
1601
  "error": None,
1544
1602
  }
1545
- self.record_operation_result(output, conn=c)
1603
+ self._record_operation_result_txn(output, conn=c)
1546
1604
 
1605
+ @db_retry()
1547
1606
  def get_event(
1548
1607
  self,
1549
1608
  target_uuid: str,
@@ -1634,7 +1693,7 @@ class SystemDatabase:
1634
1693
  )
1635
1694
  return value
1636
1695
 
1637
- def enqueue(
1696
+ def _enqueue(
1638
1697
  self,
1639
1698
  workflow_id: str,
1640
1699
  queue_name: str,
@@ -1857,6 +1916,7 @@ class SystemDatabase:
1857
1916
  # Return the IDs of all functions we started
1858
1917
  return ret_ids
1859
1918
 
1919
+ @db_retry()
1860
1920
  def remove_from_queue(self, workflow_id: str, queue: "Queue") -> None:
1861
1921
  if self._debug_mode:
1862
1922
  raise Exception("called remove_from_queue in debug mode")
@@ -1945,6 +2005,7 @@ class SystemDatabase:
1945
2005
  )
1946
2006
  return result
1947
2007
 
2008
+ @db_retry()
1948
2009
  def init_workflow(
1949
2010
  self,
1950
2011
  status: WorkflowStatusInternal,
@@ -1957,17 +2018,17 @@ class SystemDatabase:
1957
2018
  Synchronously record the status and inputs for workflows in a single transaction
1958
2019
  """
1959
2020
  with self.engine.begin() as conn:
1960
- wf_status, workflow_deadline_epoch_ms = self.insert_workflow_status(
2021
+ wf_status, workflow_deadline_epoch_ms = self._insert_workflow_status(
1961
2022
  status, conn, max_recovery_attempts=max_recovery_attempts
1962
2023
  )
1963
2024
  # TODO: Modify the inputs if they were changed by `update_workflow_inputs`
1964
- self.update_workflow_inputs(status["workflow_uuid"], inputs, conn)
2025
+ self._update_workflow_inputs(status["workflow_uuid"], inputs, conn)
1965
2026
 
1966
2027
  if (
1967
2028
  status["queue_name"] is not None
1968
2029
  and wf_status == WorkflowStatusString.ENQUEUED.value
1969
2030
  ):
1970
- self.enqueue(
2031
+ self._enqueue(
1971
2032
  status["workflow_uuid"],
1972
2033
  status["queue_name"],
1973
2034
  conn,
@@ -1975,6 +2036,14 @@ class SystemDatabase:
1975
2036
  )
1976
2037
  return wf_status, workflow_deadline_epoch_ms
1977
2038
 
2039
+ def check_connection(self) -> None:
2040
+ try:
2041
+ with self.engine.begin() as conn:
2042
+ conn.execute(sa.text("SELECT 1")).fetchall()
2043
+ except Exception as e:
2044
+ dbos_logger.error(f"Error connecting to the DBOS system database: {e}")
2045
+ raise
2046
+
1978
2047
 
1979
2048
  def reset_system_database(postgres_db_url: sa.URL, sysdb_name: str) -> None:
1980
2049
  try:
dbos/_utils.py CHANGED
@@ -1,6 +1,9 @@
1
1
  import importlib.metadata
2
2
  import os
3
3
 
4
+ import psycopg
5
+ from sqlalchemy.exc import DBAPIError
6
+
4
7
  INTERNAL_QUEUE_NAME = "_dbos_internal_queue"
5
8
 
6
9
  request_id_header = "x-request-id"
@@ -15,3 +18,33 @@ class GlobalParams:
15
18
  except importlib.metadata.PackageNotFoundError:
16
19
  # If package is not installed or during development
17
20
  dbos_version = "unknown"
21
+
22
+
23
+ def retriable_postgres_exception(e: DBAPIError) -> bool:
24
+ if e.connection_invalidated:
25
+ return True
26
+ if isinstance(e.orig, psycopg.OperationalError):
27
+ driver_error: psycopg.OperationalError = e.orig
28
+ pgcode = driver_error.sqlstate or ""
29
+ # Failure to establish connection
30
+ if "connection failed" in str(driver_error):
31
+ return True
32
+ # Error within database transaction
33
+ elif "server closed the connection unexpectedly" in str(driver_error):
34
+ return True
35
+ # Connection timeout
36
+ if isinstance(driver_error, psycopg.errors.ConnectionTimeout):
37
+ return True
38
+ # Insufficient resources
39
+ elif pgcode.startswith("53"):
40
+ return True
41
+ # Connection exception
42
+ elif pgcode.startswith("08"):
43
+ return True
44
+ # Operator intervention
45
+ elif pgcode.startswith("57"):
46
+ return True
47
+ else:
48
+ return False
49
+ else:
50
+ return False
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: dbos
3
- Version: 1.2.0a4
3
+ Version: 1.2.0a6
4
4
  Summary: Ultra-lightweight durable execution in Python
5
5
  Author-Email: "DBOS, Inc." <contact@dbos.dev>
6
6
  License: MIT
@@ -1,20 +1,20 @@
1
- dbos-1.2.0a4.dist-info/METADATA,sha256=Tx0t3cKXZZ4AFcoCMceTSlObY86rhSLkxAedOQw5V6c,13267
2
- dbos-1.2.0a4.dist-info/WHEEL,sha256=tSfRZzRHthuv7vxpI4aehrdN9scLjk-dCJkPLzkHxGg,90
3
- dbos-1.2.0a4.dist-info/entry_points.txt,sha256=_QOQ3tVfEjtjBlr1jS4sHqHya9lI2aIEIWkz8dqYp14,58
4
- dbos-1.2.0a4.dist-info/licenses/LICENSE,sha256=VGZit_a5-kdw9WT6fY5jxAWVwGQzgLFyPWrcVVUhVNU,1067
1
+ dbos-1.2.0a6.dist-info/METADATA,sha256=Dhyh2OiUvqrejsl64mGHV0ypo09eg7_xw-n_xz-aV54,13267
2
+ dbos-1.2.0a6.dist-info/WHEEL,sha256=tSfRZzRHthuv7vxpI4aehrdN9scLjk-dCJkPLzkHxGg,90
3
+ dbos-1.2.0a6.dist-info/entry_points.txt,sha256=_QOQ3tVfEjtjBlr1jS4sHqHya9lI2aIEIWkz8dqYp14,58
4
+ dbos-1.2.0a6.dist-info/licenses/LICENSE,sha256=VGZit_a5-kdw9WT6fY5jxAWVwGQzgLFyPWrcVVUhVNU,1067
5
5
  dbos/__init__.py,sha256=NssPCubaBxdiKarOWa-wViz1hdJSkmBGcpLX_gQ4NeA,891
6
6
  dbos/__main__.py,sha256=G7Exn-MhGrVJVDbgNlpzhfh8WMX_72t3_oJaFT9Lmt8,653
7
7
  dbos/_admin_server.py,sha256=TWXi4drrzKFpKkUmEJpJkQBZxAtOalnhtYicEn2nDK0,10618
8
8
  dbos/_app_db.py,sha256=0PKqpxJ3EbIaak3Wl0lNl3hXvhBfz4EEHaCw1bUOvIM,9937
9
9
  dbos/_classproperty.py,sha256=f0X-_BySzn3yFDRKB2JpCbLYQ9tLwt1XftfshvY7CBs,626
10
- dbos/_client.py,sha256=-nK2GjS9D0qnD2DkRDs7gKxNECwYlsvW6hFCjADlnv0,14186
10
+ dbos/_client.py,sha256=mGDuQRcSdkyEHf1s0rJuqHQiWbqIBt85qijNJSYmBik,14227
11
11
  dbos/_conductor/conductor.py,sha256=o0IaZjwnZ2TOyHeP2H4iSX6UnXLXQ4uODvWAKD9hHMs,21703
12
12
  dbos/_conductor/protocol.py,sha256=wgOFZxmS81bv0WCB9dAyg0s6QzldpzVKQDoSPeaX0Ws,6967
13
13
  dbos/_context.py,sha256=5ajoWAmToAfzzmMLylnJZoL4Ny9rBwZWuG05sXadMIA,24798
14
- dbos/_core.py,sha256=7ukQH_KClBaMFy0sVTSR5tWylW-RqI9qaReBY-LDKrk,48316
14
+ dbos/_core.py,sha256=m2i9lsHjNKTi8BQyiSOUBrAVH5OvMoBswNZPRpMVIC0,48662
15
15
  dbos/_croniter.py,sha256=XHAyUyibs_59sJQfSNWkP7rqQY6_XrlfuuCxk4jYqek,47559
16
16
  dbos/_dbos.py,sha256=1EhH7r6v2vwW3Z74nK6_Zw8InE1jSXedEsztz0I4ggA,47269
17
- dbos/_dbos_config.py,sha256=BFL2ol4nrqOPEiu1Dj-Nk3HRiVih0DecOgCdMyENOSQ,20233
17
+ dbos/_dbos_config.py,sha256=JYtEbhjcCxLUhktMgqIEBz7i5nk1Ryg0vqSJHXqdGOo,20264
18
18
  dbos/_debug.py,sha256=MNlQVZ6TscGCRQeEEL0VE8Uignvr6dPeDDDefS3xgIE,1823
19
19
  dbos/_docker_pg_helper.py,sha256=tLJXWqZ4S-ExcaPnxg_i6cVxL6ZxrYlZjaGsklY-s2I,6115
20
20
  dbos/_error.py,sha256=q0OQJZTbR8FFHV9hEpAGpz9oWBT5L509zUhmyff7FJw,8500
@@ -38,7 +38,7 @@ dbos/_migrations/versions/d76646551a6c_workflow_queue.py,sha256=G942nophZ2uC2vc4
38
38
  dbos/_migrations/versions/eab0cc1d9a14_job_queue.py,sha256=uvhFOtqbBreCePhAxZfIT0qCAI7BiZTou9wt6QnbY7c,1412
39
39
  dbos/_migrations/versions/f4b9b32ba814_functionname_childid_op_outputs.py,sha256=m90Lc5YH0ZISSq1MyxND6oq3RZrZKrIqEsZtwJ1jWxA,1049
40
40
  dbos/_outcome.py,sha256=EXxBg4jXCVJsByDQ1VOCIedmbeq_03S6d-p1vqQrLFU,6810
41
- dbos/_queue.py,sha256=6cmqB1DoCJUh-y7DetneZRrL5jM5mw0xG9qj7jPu8EE,3687
41
+ dbos/_queue.py,sha256=oDQcydDwYM68U5KQKN6iZiSC-4LXye6KFmSJ7ohG048,3558
42
42
  dbos/_recovery.py,sha256=jVMexjfCCNopzyn8gVQzJCmGJaP9G3C1EFaoCQ_Nh7g,2564
43
43
  dbos/_registrations.py,sha256=CZt1ElqDjCT7hz6iyT-1av76Yu-iuwu_c9lozO87wvM,7303
44
44
  dbos/_roles.py,sha256=iOsgmIAf1XVzxs3gYWdGRe1B880YfOw5fpU7Jwx8_A8,2271
@@ -47,7 +47,7 @@ dbos/_schemas/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
47
47
  dbos/_schemas/application_database.py,sha256=SypAS9l9EsaBHFn9FR8jmnqt01M74d9AF1AMa4m2hhI,1040
48
48
  dbos/_schemas/system_database.py,sha256=3Z0L72bOgHnusK1hBaETWU9RfiLBP0QnS-fdu41i0yY,5835
49
49
  dbos/_serialization.py,sha256=bWuwhXSQcGmiazvhJHA5gwhrRWxtmFmcCFQSDJnqqkU,3666
50
- dbos/_sys_db.py,sha256=BZdUrFHG8Ze77hIuxwHpsnE--6UymjjhlH7cA3yP_-0,83230
50
+ dbos/_sys_db.py,sha256=dNb2xeidel6-YEApxFCN0TTJZNpYr6Wc8LdFvX3pEb4,85730
51
51
  dbos/_templates/dbos-db-starter/README.md,sha256=GhxhBj42wjTt1fWEtwNriHbJuKb66Vzu89G4pxNHw2g,930
52
52
  dbos/_templates/dbos-db-starter/__package/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
53
53
  dbos/_templates/dbos-db-starter/__package/main.py.dbos,sha256=aQnBPSSQpkB8ERfhf7gB7P9tsU6OPKhZscfeh0yiaD8,2702
@@ -59,7 +59,7 @@ dbos/_templates/dbos-db-starter/migrations/script.py.mako,sha256=MEqL-2qATlST9TA
59
59
  dbos/_templates/dbos-db-starter/migrations/versions/2024_07_31_180642_init.py,sha256=MpS7LGaJS0CpvsjhfDkp9EJqvMvVCjRPfUp4c0aE2ys,941
60
60
  dbos/_templates/dbos-db-starter/start_postgres_docker.py,sha256=lQVLlYO5YkhGPEgPqwGc7Y8uDKse9HsWv5fynJEFJHM,1681
61
61
  dbos/_tracer.py,sha256=yN6GRDKu_1p-EqtQLNarMocPfga2ZuqpzStzzSPYhzo,2732
62
- dbos/_utils.py,sha256=UbpMYRBSyvJqdXeWAnfSw8xXM1R1mfnyl1oTunhEjJM,513
62
+ dbos/_utils.py,sha256=uywq1QrjMwy17btjxW4bES49povlQwYwYbvKwMT6C2U,1575
63
63
  dbos/_workflow_commands.py,sha256=UCpHWvCEXjVZtf5FNanFvtJpgUJDSI1EFBqQP0x_2A0,3346
64
64
  dbos/cli/_github_init.py,sha256=Y_bDF9gfO2jB1id4FV5h1oIxEJRWyqVjhb7bNEa5nQ0,3224
65
65
  dbos/cli/_template_init.py,sha256=7JBcpMqP1r2mfCnvWatu33z8ctEGHJarlZYKgB83cXE,2972
@@ -67,4 +67,4 @@ dbos/cli/cli.py,sha256=HinoCGrAUTiSeq7AAoCFfhdiE0uDw7vLMuDMN1_YTLI,20705
67
67
  dbos/dbos-config.schema.json,sha256=CjaspeYmOkx6Ip_pcxtmfXJTn_YGdSx_0pcPBF7KZmo,6060
68
68
  dbos/py.typed,sha256=QfzXT1Ktfk3Rj84akygc7_42z0lRpCq0Ilh8OXI6Zas,44
69
69
  version/__init__.py,sha256=L4sNxecRuqdtSFdpUGX3TtBi9KL3k7YsZVIvv-fv9-A,1678
70
- dbos-1.2.0a4.dist-info/RECORD,,
70
+ dbos-1.2.0a6.dist-info/RECORD,,
File without changes