dbos 0.21.0a5__py3-none-any.whl → 0.22.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of dbos might be problematic. Click here for more details.

dbos/_admin_server.py CHANGED
@@ -16,6 +16,7 @@ if TYPE_CHECKING:
16
16
  _health_check_path = "/dbos-healthz"
17
17
  _workflow_recovery_path = "/dbos-workflow-recovery"
18
18
  _deactivate_path = "/deactivate"
19
+ _workflow_queues_metadata_path = "/dbos-workflow-queues-metadata"
19
20
  # /workflows/:workflow_id/cancel
20
21
  # /workflows/:workflow_id/resume
21
22
  # /workflows/:workflow_id/restart
@@ -64,6 +65,26 @@ class AdminRequestHandler(BaseHTTPRequestHandler):
64
65
  self.send_response(200)
65
66
  self._end_headers()
66
67
  self.wfile.write("deactivated".encode("utf-8"))
68
+ elif self.path == _workflow_queues_metadata_path:
69
+ queue_metadata_array = []
70
+ from ._dbos import _get_or_create_dbos_registry
71
+
72
+ registry = _get_or_create_dbos_registry()
73
+ for queue in registry.queue_info_map.values():
74
+ queue_metadata = {
75
+ "name": queue.name,
76
+ "concurrency": queue.concurrency,
77
+ "workerConcurrency": queue.worker_concurrency,
78
+ "rateLimit": queue.limiter,
79
+ }
80
+ # Remove keys with None values
81
+ queue_metadata = {
82
+ k: v for k, v in queue_metadata.items() if v is not None
83
+ }
84
+ queue_metadata_array.append(queue_metadata)
85
+ self.send_response(200)
86
+ self._end_headers()
87
+ self.wfile.write(json.dumps(queue_metadata_array).encode("utf-8"))
67
88
  else:
68
89
  self.send_response(404)
69
90
  self._end_headers()
@@ -29,9 +29,11 @@ class DBOSCloudCredentials:
29
29
  @dataclass
30
30
  class UserProfile:
31
31
  Name: str
32
- Email: str
33
32
  Organization: str
34
- SubscriptionPlan: str
33
+
34
+ def __init__(self, **kwargs: Any) -> None:
35
+ self.Name = kwargs.get("Name", "")
36
+ self.Organization = kwargs.get("Organization", "")
35
37
 
36
38
 
37
39
  class AppLanguages(Enum):
@@ -23,6 +23,10 @@ class UserDBCredentials:
23
23
  RoleName: str
24
24
  Password: str
25
25
 
26
+ def __init__(self, **kwargs: Any) -> None:
27
+ self.RoleName = kwargs.get("RoleName", "")
28
+ self.Password = kwargs.get("Password", "")
29
+
26
30
 
27
31
  @dataclass
28
32
  class UserDBInstance:
dbos/_context.py CHANGED
@@ -49,7 +49,6 @@ class TracedAttributes(TypedDict, total=False):
49
49
  class DBOSContext:
50
50
  def __init__(self) -> None:
51
51
  self.executor_id = os.environ.get("DBOS__VMID", "local")
52
- self.app_version = os.environ.get("DBOS__APPVERSION", "")
53
52
  self.app_id = os.environ.get("DBOS__APPID", "")
54
53
 
55
54
  self.logger = dbos_logger
dbos/_core.py CHANGED
@@ -63,6 +63,7 @@ from ._registrations import (
63
63
  get_or_create_func_info,
64
64
  get_temp_workflow_type,
65
65
  set_dbos_func_name,
66
+ set_func_info,
66
67
  set_temp_workflow_type,
67
68
  )
68
69
  from ._roles import check_required_roles
@@ -162,7 +163,7 @@ def _init_workflow(
162
163
  "output": None,
163
164
  "error": None,
164
165
  "app_id": ctx.app_id,
165
- "app_version": ctx.app_version,
166
+ "app_version": dbos.app_version,
166
167
  "executor_id": ctx.executor_id,
167
168
  "request": (
168
169
  _serialization.serialize(ctx.request) if ctx.request is not None else None
@@ -286,6 +287,7 @@ def execute_workflow_by_id(
286
287
  ctx.request = (
287
288
  _serialization.deserialize(request) if request is not None else None
288
289
  )
290
+ # If this function belongs to a configured class, add that class instance as its first argument
289
291
  if status["config_name"] is not None:
290
292
  config_name = status["config_name"]
291
293
  class_name = status["class_name"]
@@ -295,28 +297,9 @@ def execute_workflow_by_id(
295
297
  workflow_id,
296
298
  f"Cannot execute workflow because instance '{iname}' is not registered",
297
299
  )
298
-
299
- if startNew:
300
- return start_workflow(
301
- dbos,
302
- wf_func,
303
- status["queue_name"],
304
- True,
305
- dbos._registry.instance_info_map[iname],
306
- *inputs["args"],
307
- **inputs["kwargs"],
308
- )
309
- else:
310
- with SetWorkflowID(workflow_id):
311
- return start_workflow(
312
- dbos,
313
- wf_func,
314
- status["queue_name"],
315
- True,
316
- dbos._registry.instance_info_map[iname],
317
- *inputs["args"],
318
- **inputs["kwargs"],
319
- )
300
+ class_instance = dbos._registry.instance_info_map[iname]
301
+ inputs["args"] = (class_instance,) + inputs["args"]
302
+ # If this function is a class method, add that class object as its first argument
320
303
  elif status["class_name"] is not None:
321
304
  class_name = status["class_name"]
322
305
  if class_name not in dbos._registry.class_info_map:
@@ -324,30 +307,20 @@ def execute_workflow_by_id(
324
307
  workflow_id,
325
308
  f"Cannot execute workflow because class '{class_name}' is not registered",
326
309
  )
310
+ class_object = dbos._registry.class_info_map[class_name]
311
+ inputs["args"] = (class_object,) + inputs["args"]
327
312
 
328
- if startNew:
329
- return start_workflow(
330
- dbos,
331
- wf_func,
332
- status["queue_name"],
333
- True,
334
- dbos._registry.class_info_map[class_name],
335
- *inputs["args"],
336
- **inputs["kwargs"],
337
- )
338
- else:
339
- with SetWorkflowID(workflow_id):
340
- return start_workflow(
341
- dbos,
342
- wf_func,
343
- status["queue_name"],
344
- True,
345
- dbos._registry.class_info_map[class_name],
346
- *inputs["args"],
347
- **inputs["kwargs"],
348
- )
313
+ if startNew:
314
+ return start_workflow(
315
+ dbos,
316
+ wf_func,
317
+ status["queue_name"],
318
+ True,
319
+ *inputs["args"],
320
+ **inputs["kwargs"],
321
+ )
349
322
  else:
350
- if startNew:
323
+ with SetWorkflowID(workflow_id):
351
324
  return start_workflow(
352
325
  dbos,
353
326
  wf_func,
@@ -356,16 +329,6 @@ def execute_workflow_by_id(
356
329
  *inputs["args"],
357
330
  **inputs["kwargs"],
358
331
  )
359
- else:
360
- with SetWorkflowID(workflow_id):
361
- return start_workflow(
362
- dbos,
363
- wf_func,
364
- status["queue_name"],
365
- True,
366
- *inputs["args"],
367
- **inputs["kwargs"],
368
- )
369
332
 
370
333
 
371
334
  @overload
@@ -398,9 +361,12 @@ def start_workflow(
398
361
  *args: P.args,
399
362
  **kwargs: P.kwargs,
400
363
  ) -> "WorkflowHandle[R]":
364
+ # If the function has a class, add the class object as its first argument
401
365
  fself: Optional[object] = None
402
366
  if hasattr(func, "__self__"):
403
367
  fself = func.__self__
368
+ if fself is not None:
369
+ args = (fself,) + args # type: ignore
404
370
 
405
371
  fi = get_func_info(func)
406
372
  if fi is None:
@@ -436,17 +402,13 @@ def start_workflow(
436
402
  new_wf_ctx.id_assigned_for_next_workflow = new_wf_ctx.assign_workflow_id()
437
403
  new_wf_id = new_wf_ctx.id_assigned_for_next_workflow
438
404
 
439
- gin_args: Tuple[Any, ...] = args
440
- if fself is not None:
441
- gin_args = (fself,)
442
-
443
405
  status = _init_workflow(
444
406
  dbos,
445
407
  new_wf_ctx,
446
408
  inputs=inputs,
447
409
  wf_name=get_dbos_func_name(func),
448
- class_name=get_dbos_class_name(fi, func, gin_args),
449
- config_name=get_config_name(fi, func, gin_args),
410
+ class_name=get_dbos_class_name(fi, func, args),
411
+ config_name=get_config_name(fi, func, args),
450
412
  temp_wf_type=get_temp_workflow_type(func),
451
413
  queue=queue_name,
452
414
  max_recovery_attempts=fi.max_recovery_attempts,
@@ -464,27 +426,15 @@ def start_workflow(
464
426
  )
465
427
  return WorkflowHandlePolling(new_wf_id, dbos)
466
428
 
467
- if fself is not None:
468
- future = dbos._executor.submit(
469
- cast(Callable[..., R], _execute_workflow_wthread),
470
- dbos,
471
- status,
472
- func,
473
- new_wf_ctx,
474
- fself,
475
- *args,
476
- **kwargs,
477
- )
478
- else:
479
- future = dbos._executor.submit(
480
- cast(Callable[..., R], _execute_workflow_wthread),
481
- dbos,
482
- status,
483
- func,
484
- new_wf_ctx,
485
- *args,
486
- **kwargs,
487
- )
429
+ future = dbos._executor.submit(
430
+ cast(Callable[..., R], _execute_workflow_wthread),
431
+ dbos,
432
+ status,
433
+ func,
434
+ new_wf_ctx,
435
+ *args,
436
+ **kwargs,
437
+ )
488
438
  return WorkflowHandleFuture(new_wf_id, future, dbos)
489
439
 
490
440
 
@@ -516,6 +466,8 @@ def workflow_wrapper(
516
466
 
517
467
  @wraps(func)
518
468
  def wrapper(*args: Any, **kwargs: Any) -> R:
469
+ fi = get_func_info(func)
470
+ assert fi is not None
519
471
  if dbosreg.dbos is None:
520
472
  raise DBOSException(
521
473
  f"Function {func.__name__} invoked before DBOS initialized"
@@ -572,7 +524,7 @@ def decorate_workflow(
572
524
  ) -> Callable[[Callable[P, R]], Callable[P, R]]:
573
525
  def _workflow_decorator(func: Callable[P, R]) -> Callable[P, R]:
574
526
  wrapped_func = workflow_wrapper(reg, func, max_recovery_attempts)
575
- reg.register_wf_function(func.__qualname__, wrapped_func)
527
+ reg.register_wf_function(func.__qualname__, wrapped_func, "workflow")
576
528
  return wrapped_func
577
529
 
578
530
  return _workflow_decorator
@@ -724,8 +676,12 @@ def decorate_transaction(
724
676
  wrapped_wf = workflow_wrapper(dbosreg, temp_wf)
725
677
  set_dbos_func_name(temp_wf, "<temp>." + func.__qualname__)
726
678
  set_temp_workflow_type(temp_wf, "transaction")
727
- dbosreg.register_wf_function(get_dbos_func_name(temp_wf), wrapped_wf)
679
+ dbosreg.register_wf_function(
680
+ get_dbos_func_name(temp_wf), wrapped_wf, "transaction"
681
+ )
728
682
  wrapper.__orig_func = temp_wf # type: ignore
683
+ set_func_info(wrapped_wf, get_or_create_func_info(func))
684
+ set_func_info(temp_wf, get_or_create_func_info(func))
729
685
 
730
686
  return cast(F, wrapper)
731
687
 
@@ -873,8 +829,10 @@ def decorate_step(
873
829
  wrapped_wf = workflow_wrapper(dbosreg, temp_wf)
874
830
  set_dbos_func_name(temp_wf, "<temp>." + func.__qualname__)
875
831
  set_temp_workflow_type(temp_wf, "step")
876
- dbosreg.register_wf_function(get_dbos_func_name(temp_wf), wrapped_wf)
832
+ dbosreg.register_wf_function(get_dbos_func_name(temp_wf), wrapped_wf, "step")
877
833
  wrapper.__orig_func = temp_wf # type: ignore
834
+ set_func_info(wrapped_wf, get_or_create_func_info(func))
835
+ set_func_info(temp_wf, get_or_create_func_info(func))
878
836
 
879
837
  return cast(Callable[P, R], wrapper)
880
838
 
dbos/_db_wizard.py CHANGED
@@ -1,7 +1,7 @@
1
1
  import json
2
2
  import os
3
3
  import time
4
- from typing import TYPE_CHECKING, Optional, TypedDict
4
+ from typing import TYPE_CHECKING, Optional, TypedDict, cast
5
5
 
6
6
  import docker # type: ignore
7
7
  import typer
@@ -45,8 +45,20 @@ def db_wizard(config: "ConfigFile", config_file_path: str) -> "ConfigFile":
45
45
  f"Could not connect to Postgres: password authentication failed: {db_connection_error}"
46
46
  )
47
47
  db_config = config["database"]
48
+
49
+ # Read the config file and check if the database hostname/port/username are set. If so, skip the wizard.
50
+ with open(config_file_path, "r") as file:
51
+ content = file.read()
52
+ local_config = yaml.safe_load(content)
53
+ if "database" not in local_config:
54
+ local_config["database"] = {}
55
+ local_config = cast("ConfigFile", local_config)
56
+
48
57
  if (
49
- db_config["hostname"] != "localhost"
58
+ local_config["database"].get("hostname")
59
+ or local_config["database"].get("port")
60
+ or local_config["database"].get("username")
61
+ or db_config["hostname"] != "localhost"
50
62
  or db_config["port"] != 5432
51
63
  or db_config["username"] != "postgres"
52
64
  ):
dbos/_dbos.py CHANGED
@@ -2,6 +2,8 @@ from __future__ import annotations
2
2
 
3
3
  import asyncio
4
4
  import atexit
5
+ import hashlib
6
+ import inspect
5
7
  import json
6
8
  import os
7
9
  import sys
@@ -56,7 +58,7 @@ from ._registrations import (
56
58
  )
57
59
  from ._roles import default_required_roles, required_roles
58
60
  from ._scheduler import ScheduledWorkflow, scheduled
59
- from ._sys_db import WorkflowStatusString, reset_system_database
61
+ from ._sys_db import reset_system_database
60
62
  from ._tracer import dbos_tracer
61
63
 
62
64
  if TYPE_CHECKING:
@@ -83,7 +85,11 @@ from ._context import (
83
85
  get_local_dbos_context,
84
86
  )
85
87
  from ._dbos_config import ConfigFile, load_config, set_env_vars
86
- from ._error import DBOSException, DBOSNonExistentWorkflowError
88
+ from ._error import (
89
+ DBOSConflictingRegistrationError,
90
+ DBOSException,
91
+ DBOSNonExistentWorkflowError,
92
+ )
87
93
  from ._logger import add_otlp_to_all_loggers, dbos_logger
88
94
  from ._sys_db import SystemDatabase
89
95
 
@@ -142,6 +148,7 @@ RegisteredJob = Tuple[
142
148
  class DBOSRegistry:
143
149
  def __init__(self) -> None:
144
150
  self.workflow_info_map: dict[str, Workflow[..., Any]] = {}
151
+ self.function_type_map: dict[str, str] = {}
145
152
  self.class_info_map: dict[str, type] = {}
146
153
  self.instance_info_map: dict[str, object] = {}
147
154
  self.queue_info_map: dict[str, Queue] = {}
@@ -149,7 +156,11 @@ class DBOSRegistry:
149
156
  self.dbos: Optional[DBOS] = None
150
157
  self.config: Optional[ConfigFile] = None
151
158
 
152
- def register_wf_function(self, name: str, wrapped_func: F) -> None:
159
+ def register_wf_function(self, name: str, wrapped_func: F, functype: str) -> None:
160
+ if name in self.function_type_map:
161
+ if self.function_type_map[name] != functype:
162
+ raise DBOSConflictingRegistrationError(name)
163
+ self.function_type_map[name] = functype
153
164
  self.workflow_info_map[name] = wrapped_func
154
165
 
155
166
  def register_class(self, cls: type, ci: DBOSClassInfo) -> None:
@@ -186,6 +197,22 @@ class DBOSRegistry:
186
197
  else:
187
198
  self.instance_info_map[fn] = inst
188
199
 
200
+ def compute_app_version(self) -> str:
201
+ """
202
+ An application's version is computed from a hash of the source of its workflows.
203
+ This is guaranteed to be stable given identical source code because it uses an MD5 hash
204
+ and because it iterates through the workflows in sorted order.
205
+ This way, if the app's workflows are updated (which would break recovery), its version changes.
206
+ App version can be manually set through the DBOS__APPVERSION environment variable.
207
+ """
208
+ hasher = hashlib.md5()
209
+ sources = sorted(
210
+ [inspect.getsource(wf) for wf in self.workflow_info_map.values()]
211
+ )
212
+ for source in sources:
213
+ hasher.update(source.encode("utf-8"))
214
+ return hasher.hexdigest()
215
+
189
216
 
190
217
  class DBOS:
191
218
  """
@@ -283,6 +310,7 @@ class DBOS:
283
310
  self._executor_field: Optional[ThreadPoolExecutor] = None
284
311
  self._background_threads: List[threading.Thread] = []
285
312
  self._executor_id: str = os.environ.get("DBOS__VMID", "local")
313
+ self.app_version: str = os.environ.get("DBOS__APPVERSION", "")
286
314
 
287
315
  # If using FastAPI, set up middleware and lifecycle events
288
316
  if self.fastapi is not None:
@@ -305,7 +333,7 @@ class DBOS:
305
333
  temp_send_wf = workflow_wrapper(self._registry, send_temp_workflow)
306
334
  set_dbos_func_name(send_temp_workflow, TEMP_SEND_WF_NAME)
307
335
  set_temp_workflow_type(send_temp_workflow, "send")
308
- self._registry.register_wf_function(TEMP_SEND_WF_NAME, temp_send_wf)
336
+ self._registry.register_wf_function(TEMP_SEND_WF_NAME, temp_send_wf, "send")
309
337
 
310
338
  for handler in dbos_logger.handlers:
311
339
  handler.flush()
@@ -351,6 +379,10 @@ class DBOS:
351
379
  dbos_logger.warning(f"DBOS was already launched")
352
380
  return
353
381
  self._launched = True
382
+ if self.app_version == "":
383
+ self.app_version = self._registry.compute_app_version()
384
+ dbos_logger.info(f"Application version: {self.app_version}")
385
+ dbos_tracer.app_version = self.app_version
354
386
  self._executor_field = ThreadPoolExecutor(max_workers=64)
355
387
  self._sys_db_field = SystemDatabase(self.config)
356
388
  self._app_db_field = ApplicationDatabase(self.config)
@@ -359,9 +391,19 @@ class DBOS:
359
391
  admin_port = 3001
360
392
  self._admin_server_field = AdminServer(dbos=self, port=admin_port)
361
393
 
362
- if not os.environ.get("DBOS__VMID"):
363
- workflow_ids = self._sys_db.get_pending_workflows("local")
364
- self._executor.submit(startup_recovery_thread, self, workflow_ids)
394
+ workflow_ids = self._sys_db.get_pending_workflows(
395
+ self._executor_id, self.app_version
396
+ )
397
+ if (len(workflow_ids)) > 0:
398
+ self.logger.info(
399
+ f"Recovering {len(workflow_ids)} workflows from application version {self.app_version}"
400
+ )
401
+ else:
402
+ self.logger.info(
403
+ f"No workflows to recover from application version {self.app_version}"
404
+ )
405
+
406
+ self._executor.submit(startup_recovery_thread, self, workflow_ids)
365
407
 
366
408
  # Listen to notifications
367
409
  notification_listener_thread = threading.Thread(
@@ -398,13 +440,13 @@ class DBOS:
398
440
  self._background_threads.append(poller_thread)
399
441
  self._registry.pollers = []
400
442
 
401
- dbos_logger.info("DBOS launched")
443
+ dbos_logger.info("DBOS launched!")
402
444
 
403
445
  # Flush handlers and add OTLP to all loggers if enabled
404
446
  # to enable their export in DBOS Cloud
405
447
  for handler in dbos_logger.handlers:
406
448
  handler.flush()
407
- add_otlp_to_all_loggers()
449
+ add_otlp_to_all_loggers(self.app_version)
408
450
  except Exception:
409
451
  dbos_logger.error(f"DBOS failed to launch: {traceback.format_exc()}")
410
452
  raise
@@ -613,6 +655,7 @@ class DBOS:
613
655
  workflow_id=workflow_id,
614
656
  status=stat["status"],
615
657
  name=stat["name"],
658
+ executor_id=stat["executor_id"],
616
659
  recovery_attempts=stat["recovery_attempts"],
617
660
  class_name=stat["class_name"],
618
661
  config_name=stat["config_name"],
@@ -909,6 +952,7 @@ class WorkflowStatus:
909
952
  workflow_id(str): The ID of the workflow execution
910
953
  status(str): The status of the execution, from `WorkflowStatusString`
911
954
  name(str): The workflow function name
955
+ executor_id(str): The ID of the executor running the workflow
912
956
  class_name(str): For member functions, the name of the class containing the workflow function
913
957
  config_name(str): For instance member functions, the name of the class instance for the execution
914
958
  queue_name(str): For workflows that are or were queued, the queue name
@@ -922,6 +966,7 @@ class WorkflowStatus:
922
966
  workflow_id: str
923
967
  status: str
924
968
  name: str
969
+ executor_id: Optional[str]
925
970
  class_name: Optional[str]
926
971
  config_name: Optional[str]
927
972
  queue_name: Optional[str]
@@ -994,6 +1039,10 @@ def _dbos_exit_hook() -> None:
994
1039
  )
995
1040
  return
996
1041
  if not _dbos_global_instance._launched:
1042
+ if _dbos_global_instance.fastapi is not None:
1043
+ # FastAPI lifespan middleware will call launch/destroy, so we can ignore this.
1044
+ # This is likely to happen during fastapi dev runs, where the reloader loads the module multiple times.
1045
+ return
997
1046
  print("DBOS exiting; DBOS exists but launch() was not called")
998
1047
  dbos_logger.warning("DBOS exiting; DBOS exists but launch() was not called")
999
1048
  return
dbos/_error.py CHANGED
@@ -36,6 +36,7 @@ class DBOSErrorCode(Enum):
36
36
  MaxStepRetriesExceeded = 7
37
37
  NotAuthorized = 8
38
38
  ConflictingWorkflowError = 9
39
+ ConflictingRegistrationError = 25
39
40
 
40
41
 
41
42
  class DBOSWorkflowConflictIDError(DBOSException):
@@ -127,3 +128,13 @@ class DBOSMaxStepRetriesExceeded(DBOSException):
127
128
  "Step reached maximum retries.",
128
129
  dbos_error_code=DBOSErrorCode.MaxStepRetriesExceeded.value,
129
130
  )
131
+
132
+
133
+ class DBOSConflictingRegistrationError(DBOSException):
134
+ """Exception raised when conflicting decorators are applied to the same function."""
135
+
136
+ def __init__(self, name: str) -> None:
137
+ super().__init__(
138
+ f"Operation (Name: {name}) is already registered with a conflicting function type",
139
+ dbos_error_code=DBOSErrorCode.ConflictingRegistrationError.value,
140
+ )
dbos/_logger.py CHANGED
@@ -86,8 +86,9 @@ def config_logger(config: "ConfigFile") -> None:
86
86
  dbos_logger.addFilter(_otlp_transformer)
87
87
 
88
88
 
89
- def add_otlp_to_all_loggers() -> None:
89
+ def add_otlp_to_all_loggers(app_version: str) -> None:
90
90
  if _otlp_handler is not None and _otlp_transformer is not None:
91
+ _otlp_transformer.app_version = app_version
91
92
  root = logging.root
92
93
 
93
94
  root.addHandler(_otlp_handler)
dbos/_queue.py CHANGED
@@ -76,7 +76,9 @@ def queue_thread(stop_event: threading.Event, dbos: "DBOS") -> None:
76
76
  execute_workflow_by_id(dbos, id)
77
77
  except OperationalError as e:
78
78
  # Ignore serialization error
79
- if not isinstance(e.orig, errors.SerializationFailure):
79
+ if not isinstance(
80
+ e.orig, (errors.SerializationFailure, errors.LockNotAvailable)
81
+ ):
80
82
  dbos.logger.warning(
81
83
  f"Exception encountered in queue thread: {traceback.format_exc()}"
82
84
  )
dbos/_recovery.py CHANGED
@@ -6,20 +6,29 @@ from typing import TYPE_CHECKING, Any, List
6
6
 
7
7
  from ._core import execute_workflow_by_id
8
8
  from ._error import DBOSWorkflowFunctionNotFoundError
9
+ from ._sys_db import GetPendingWorkflowsOutput
9
10
 
10
11
  if TYPE_CHECKING:
11
12
  from ._dbos import DBOS, WorkflowHandle
12
13
 
13
14
 
14
- def startup_recovery_thread(dbos: "DBOS", workflow_ids: List[str]) -> None:
15
+ def startup_recovery_thread(
16
+ dbos: "DBOS", pending_workflows: List[GetPendingWorkflowsOutput]
17
+ ) -> None:
15
18
  """Attempt to recover local pending workflows on startup using a background thread."""
16
19
  stop_event = threading.Event()
17
20
  dbos.stop_events.append(stop_event)
18
- while not stop_event.is_set() and len(workflow_ids) > 0:
21
+ while not stop_event.is_set() and len(pending_workflows) > 0:
19
22
  try:
20
- for workflowID in list(workflow_ids):
21
- execute_workflow_by_id(dbos, workflowID)
22
- workflow_ids.remove(workflowID)
23
+ for pending_workflow in list(pending_workflows):
24
+ if (
25
+ pending_workflow.queue_name
26
+ and pending_workflow.queue_name != "_dbos_internal_queue"
27
+ ):
28
+ dbos._sys_db.clear_queue_assignment(pending_workflow.workflow_uuid)
29
+ continue
30
+ execute_workflow_by_id(dbos, pending_workflow.workflow_uuid)
31
+ pending_workflows.remove(pending_workflow)
23
32
  except DBOSWorkflowFunctionNotFoundError:
24
33
  time.sleep(1)
25
34
  except Exception as e:
@@ -34,17 +43,27 @@ def recover_pending_workflows(
34
43
  ) -> List["WorkflowHandle[Any]"]:
35
44
  workflow_handles: List["WorkflowHandle[Any]"] = []
36
45
  for executor_id in executor_ids:
37
- if executor_id == "local" and os.environ.get("DBOS__VMID"):
38
- dbos.logger.debug(
39
- f"Skip local recovery because it's running in a VM: {os.environ.get('DBOS__VMID')}"
40
- )
41
46
  dbos.logger.debug(f"Recovering pending workflows for executor: {executor_id}")
42
- workflow_ids = dbos._sys_db.get_pending_workflows(executor_id)
43
- dbos.logger.debug(f"Pending workflows: {workflow_ids}")
44
-
45
- for workflowID in workflow_ids:
46
- handle = execute_workflow_by_id(dbos, workflowID)
47
- workflow_handles.append(handle)
48
-
49
- dbos.logger.info("Recovered pending workflows")
47
+ pending_workflows = dbos._sys_db.get_pending_workflows(
48
+ executor_id, dbos.app_version
49
+ )
50
+ for pending_workflow in pending_workflows:
51
+ if (
52
+ pending_workflow.queue_name
53
+ and pending_workflow.queue_name != "_dbos_internal_queue"
54
+ ):
55
+ try:
56
+ dbos._sys_db.clear_queue_assignment(pending_workflow.workflow_uuid)
57
+ workflow_handles.append(
58
+ dbos.retrieve_workflow(pending_workflow.workflow_uuid)
59
+ )
60
+ except Exception as e:
61
+ dbos.logger.error(e)
62
+ else:
63
+ workflow_handles.append(
64
+ execute_workflow_by_id(dbos, pending_workflow.workflow_uuid)
65
+ )
66
+ dbos.logger.info(
67
+ f"Recovering {len(pending_workflows)} workflows from version {dbos.app_version}"
68
+ )
50
69
  return workflow_handles
dbos/_registrations.py CHANGED
@@ -1,4 +1,5 @@
1
1
  import inspect
2
+ from dataclasses import dataclass
2
3
  from enum import Enum
3
4
  from types import FunctionType
4
5
  from typing import Any, Callable, List, Literal, Optional, Tuple, Type, cast
@@ -31,9 +32,9 @@ def set_temp_workflow_type(f: Any, name: TempWorkflowType) -> None:
31
32
  setattr(f, "dbos_temp_workflow_type", name)
32
33
 
33
34
 
35
+ @dataclass
34
36
  class DBOSClassInfo:
35
- def __init__(self) -> None:
36
- self.def_required_roles: Optional[List[str]] = None
37
+ def_required_roles: Optional[List[str]] = None
37
38
 
38
39
 
39
40
  class DBOSFuncType(Enum):
@@ -44,12 +45,12 @@ class DBOSFuncType(Enum):
44
45
  Instance = 4
45
46
 
46
47
 
48
+ @dataclass
47
49
  class DBOSFuncInfo:
48
- def __init__(self) -> None:
49
- self.class_info: Optional[DBOSClassInfo] = None
50
- self.func_type: DBOSFuncType = DBOSFuncType.Unknown
51
- self.required_roles: Optional[List[str]] = None
52
- self.max_recovery_attempts = DEFAULT_MAX_RECOVERY_ATTEMPTS
50
+ class_info: Optional[DBOSClassInfo] = None
51
+ func_type: DBOSFuncType = DBOSFuncType.Unknown
52
+ required_roles: Optional[List[str]] = None
53
+ max_recovery_attempts: int = DEFAULT_MAX_RECOVERY_ATTEMPTS
53
54
 
54
55
 
55
56
  def get_or_create_class_info(cls: Type[Any]) -> DBOSClassInfo:
@@ -110,6 +111,10 @@ def get_or_create_func_info(func: Callable[..., Any]) -> DBOSFuncInfo:
110
111
  return fi
111
112
 
112
113
 
114
+ def set_func_info(func: Callable[..., Any], fi: DBOSFuncInfo) -> None:
115
+ setattr(func, "dbos_func_decorator_info", fi)
116
+
117
+
113
118
  def get_class_info(cls: Type[Any]) -> Optional[DBOSClassInfo]:
114
119
  if hasattr(cls, "dbos_class_decorator_info"):
115
120
  ci: DBOSClassInfo = getattr(cls, "dbos_class_decorator_info")
dbos/_sys_db.py CHANGED
@@ -1,4 +1,5 @@
1
1
  import datetime
2
+ import logging
2
3
  import os
3
4
  import re
4
5
  import threading
@@ -13,6 +14,7 @@ from typing import (
13
14
  Optional,
14
15
  Sequence,
15
16
  Set,
17
+ Tuple,
16
18
  TypedDict,
17
19
  cast,
18
20
  )
@@ -22,8 +24,8 @@ import sqlalchemy as sa
22
24
  import sqlalchemy.dialects.postgresql as pg
23
25
  from alembic import command
24
26
  from alembic.config import Config
25
- from sqlalchemy import or_
26
27
  from sqlalchemy.exc import DBAPIError
28
+ from sqlalchemy.sql import func
27
29
 
28
30
  from . import _serialization
29
31
  from ._dbos_config import ConfigFile
@@ -140,6 +142,12 @@ class GetWorkflowsOutput:
140
142
  self.workflow_uuids = workflow_uuids
141
143
 
142
144
 
145
+ class GetPendingWorkflowsOutput:
146
+ def __init__(self, *, workflow_uuid: str, queue_name: Optional[str] = None):
147
+ self.workflow_uuid: str = workflow_uuid
148
+ self.queue_name: Optional[str] = queue_name
149
+
150
+
143
151
  class WorkflowInformation(TypedDict, total=False):
144
152
  workflow_uuid: str
145
153
  status: WorkflowStatuses # The status of the workflow.
@@ -183,6 +191,10 @@ class SystemDatabase:
183
191
  host=config["database"]["hostname"],
184
192
  port=config["database"]["port"],
185
193
  database="postgres",
194
+ # fills the "application_name" column in pg_stat_activity
195
+ query={
196
+ "application_name": f"dbos_transact_{os.environ.get('DBOS__VMID', 'local')}"
197
+ },
186
198
  )
187
199
  engine = sa.create_engine(postgres_db_url)
188
200
  with engine.connect() as conn:
@@ -201,6 +213,10 @@ class SystemDatabase:
201
213
  host=config["database"]["hostname"],
202
214
  port=config["database"]["port"],
203
215
  database=sysdb_name,
216
+ # fills the "application_name" column in pg_stat_activity
217
+ query={
218
+ "application_name": f"dbos_transact_{os.environ.get('DBOS__VMID', 'local')}"
219
+ },
204
220
  )
205
221
 
206
222
  # Create a connection pool for the system database
@@ -214,6 +230,7 @@ class SystemDatabase:
214
230
  )
215
231
  alembic_cfg = Config()
216
232
  alembic_cfg.set_main_option("script_location", migration_dir)
233
+ logging.getLogger("alembic").setLevel(logging.WARNING)
217
234
  # Alembic requires the % in URL-escaped parameters to itself be escaped to %%.
218
235
  escaped_conn_string = re.sub(
219
236
  r"%(?=[0-9A-Fa-f]{2})",
@@ -221,7 +238,12 @@ class SystemDatabase:
221
238
  self.engine.url.render_as_string(hide_password=False),
222
239
  )
223
240
  alembic_cfg.set_main_option("sqlalchemy.url", escaped_conn_string)
224
- command.upgrade(alembic_cfg, "head")
241
+ try:
242
+ command.upgrade(alembic_cfg, "head")
243
+ except Exception as e:
244
+ dbos_logger.warning(
245
+ f"Exception during system database construction. This is most likely because the system database was configured using a later version of DBOS: {e}"
246
+ )
225
247
 
226
248
  self.notification_conn: Optional[psycopg.connection.Connection] = None
227
249
  self.notifications_map: Dict[str, threading.Condition] = {}
@@ -288,6 +310,7 @@ class SystemDatabase:
288
310
  recovery_attempts=(
289
311
  SystemSchema.workflow_status.c.recovery_attempts + 1
290
312
  ),
313
+ updated_at=func.extract("epoch", func.now()) * 1000,
291
314
  ),
292
315
  )
293
316
  )
@@ -385,6 +408,7 @@ class SystemDatabase:
385
408
  status=status["status"],
386
409
  output=status["output"],
387
410
  error=status["error"],
411
+ updated_at=func.extract("epoch", func.now()) * 1000,
388
412
  ),
389
413
  )
390
414
  )
@@ -465,6 +489,7 @@ class SystemDatabase:
465
489
  SystemSchema.workflow_status.c.authenticated_roles,
466
490
  SystemSchema.workflow_status.c.assumed_role,
467
491
  SystemSchema.workflow_status.c.queue_name,
492
+ SystemSchema.workflow_status.c.executor_id,
468
493
  ).where(SystemSchema.workflow_status.c.workflow_uuid == workflow_uuid)
469
494
  ).fetchone()
470
495
  if row is None:
@@ -479,7 +504,7 @@ class SystemDatabase:
479
504
  "error": None,
480
505
  "app_id": None,
481
506
  "app_version": None,
482
- "executor_id": None,
507
+ "executor_id": row[10],
483
508
  "request": row[2],
484
509
  "recovery_attempts": row[3],
485
510
  "authenticated_user": row[6],
@@ -665,7 +690,7 @@ class SystemDatabase:
665
690
 
666
691
  def get_workflows(self, input: GetWorkflowsInput) -> GetWorkflowsOutput:
667
692
  query = sa.select(SystemSchema.workflow_status.c.workflow_uuid).order_by(
668
- SystemSchema.workflow_status.c.created_at.desc()
693
+ SystemSchema.workflow_status.c.created_at.asc()
669
694
  )
670
695
  if input.name:
671
696
  query = query.where(SystemSchema.workflow_status.c.name == input.name)
@@ -711,7 +736,7 @@ class SystemDatabase:
711
736
  SystemSchema.workflow_queue.c.workflow_uuid
712
737
  == SystemSchema.workflow_status.c.workflow_uuid,
713
738
  )
714
- .order_by(SystemSchema.workflow_status.c.created_at.desc())
739
+ .order_by(SystemSchema.workflow_status.c.created_at.asc())
715
740
  )
716
741
 
717
742
  if input.get("name"):
@@ -746,16 +771,29 @@ class SystemDatabase:
746
771
 
747
772
  return GetWorkflowsOutput(workflow_uuids)
748
773
 
749
- def get_pending_workflows(self, executor_id: str) -> list[str]:
774
+ def get_pending_workflows(
775
+ self, executor_id: str, app_version: str
776
+ ) -> list[GetPendingWorkflowsOutput]:
750
777
  with self.engine.begin() as c:
751
778
  rows = c.execute(
752
- sa.select(SystemSchema.workflow_status.c.workflow_uuid).where(
779
+ sa.select(
780
+ SystemSchema.workflow_status.c.workflow_uuid,
781
+ SystemSchema.workflow_status.c.queue_name,
782
+ ).where(
753
783
  SystemSchema.workflow_status.c.status
754
784
  == WorkflowStatusString.PENDING.value,
755
785
  SystemSchema.workflow_status.c.executor_id == executor_id,
786
+ SystemSchema.workflow_status.c.application_version == app_version,
756
787
  )
757
788
  ).fetchall()
758
- return [row[0] for row in rows]
789
+
790
+ return [
791
+ GetPendingWorkflowsOutput(
792
+ workflow_uuid=row.workflow_uuid,
793
+ queue_name=row.queue_name,
794
+ )
795
+ for row in rows
796
+ ]
759
797
 
760
798
  def record_operation_result(
761
799
  self, result: OperationResultInternal, conn: Optional[sa.Connection] = None
@@ -1284,6 +1322,55 @@ class SystemDatabase:
1284
1322
  # Dequeue functions eligible for this worker and ordered by the time at which they were enqueued.
1285
1323
  # If there is a global or local concurrency limit N, select only the N oldest enqueued
1286
1324
  # functions, else select all of them.
1325
+
1326
+ # First lets figure out how many tasks the worker can dequeue
1327
+ running_tasks_query = (
1328
+ sa.select(
1329
+ SystemSchema.workflow_queue.c.executor_id,
1330
+ sa.func.count().label("task_count"),
1331
+ )
1332
+ .where(SystemSchema.workflow_queue.c.queue_name == queue.name)
1333
+ .where(
1334
+ SystemSchema.workflow_queue.c.executor_id.isnot(
1335
+ None
1336
+ ) # Task is dequeued
1337
+ )
1338
+ .where(
1339
+ SystemSchema.workflow_queue.c.completed_at_epoch_ms.is_(
1340
+ None
1341
+ ) # Task is not completed
1342
+ )
1343
+ .group_by(SystemSchema.workflow_queue.c.executor_id)
1344
+ )
1345
+ running_tasks_result = c.execute(running_tasks_query).fetchall()
1346
+ running_tasks_result_dict = {row[0]: row[1] for row in running_tasks_result}
1347
+ running_tasks_for_this_worker = running_tasks_result_dict.get(
1348
+ executor_id, 0
1349
+ ) # Get count for current executor
1350
+
1351
+ max_tasks = float("inf")
1352
+ if queue.worker_concurrency is not None:
1353
+ # Worker local concurrency limit should always be >= running_tasks_for_this_worker
1354
+ # This should never happen but a check + warning doesn't hurt
1355
+ if running_tasks_for_this_worker > queue.worker_concurrency:
1356
+ dbos_logger.warning(
1357
+ f"Number of tasks on this worker ({running_tasks_for_this_worker}) exceeds the worker concurrency limit ({queue.worker_concurrency})"
1358
+ )
1359
+ max_tasks = max(
1360
+ 0, queue.worker_concurrency - running_tasks_for_this_worker
1361
+ )
1362
+ if queue.concurrency is not None:
1363
+ total_running_tasks = sum(running_tasks_result_dict.values())
1364
+ # Queue global concurrency limit should always be >= running_tasks_count
1365
+ # This should never happen but a check + warning doesn't hurt
1366
+ if total_running_tasks > queue.concurrency:
1367
+ dbos_logger.warning(
1368
+ f"Total running tasks ({total_running_tasks}) exceeds the global concurrency limit ({queue.concurrency})"
1369
+ )
1370
+ available_tasks = max(0, queue.concurrency - total_running_tasks)
1371
+ max_tasks = min(max_tasks, available_tasks)
1372
+
1373
+ # Lookup tasks
1287
1374
  query = (
1288
1375
  sa.select(
1289
1376
  SystemSchema.workflow_queue.c.workflow_uuid,
@@ -1292,29 +1379,25 @@ class SystemDatabase:
1292
1379
  )
1293
1380
  .where(SystemSchema.workflow_queue.c.queue_name == queue.name)
1294
1381
  .where(SystemSchema.workflow_queue.c.completed_at_epoch_ms == None)
1295
- .where(
1296
- # Only select functions that have not been started yet or have been started by this worker
1297
- or_(
1298
- SystemSchema.workflow_queue.c.executor_id == None,
1299
- SystemSchema.workflow_queue.c.executor_id == executor_id,
1300
- )
1301
- )
1382
+ .where(SystemSchema.workflow_queue.c.executor_id == None)
1302
1383
  .order_by(SystemSchema.workflow_queue.c.created_at_epoch_ms.asc())
1384
+ .with_for_update(nowait=True) # Error out early
1303
1385
  )
1304
- # Set a dequeue limit if necessary
1305
- if queue.worker_concurrency is not None:
1306
- query = query.limit(queue.worker_concurrency)
1307
- elif queue.concurrency is not None:
1308
- query = query.limit(queue.concurrency)
1386
+ # Apply limit only if max_tasks is finite
1387
+ if max_tasks != float("inf"):
1388
+ query = query.limit(int(max_tasks))
1309
1389
 
1310
1390
  rows = c.execute(query).fetchall()
1311
1391
 
1312
- # Now, get the workflow IDs of functions that have not yet been started
1313
- dequeued_ids: List[str] = [row[0] for row in rows if row[1] is None]
1392
+ # Get the workflow IDs
1393
+ dequeued_ids: List[str] = [row[0] for row in rows]
1394
+ if len(dequeued_ids) > 0:
1395
+ dbos_logger.debug(
1396
+ f"[{queue.name}] dequeueing {len(dequeued_ids)} task(s)"
1397
+ )
1314
1398
  ret_ids: list[str] = []
1315
- dbos_logger.debug(f"[{queue.name}] dequeueing {len(dequeued_ids)} task(s)")
1316
- for id in dequeued_ids:
1317
1399
 
1400
+ for id in dequeued_ids:
1318
1401
  # If we have a limiter, stop starting functions when the number
1319
1402
  # of functions started this period exceeds the limit.
1320
1403
  if queue.limiter is not None:
@@ -1375,6 +1458,19 @@ class SystemDatabase:
1375
1458
  .values(completed_at_epoch_ms=int(time.time() * 1000))
1376
1459
  )
1377
1460
 
1461
+ def clear_queue_assignment(self, workflow_id: str) -> None:
1462
+ with self.engine.begin() as c:
1463
+ c.execute(
1464
+ sa.update(SystemSchema.workflow_queue)
1465
+ .where(SystemSchema.workflow_queue.c.workflow_uuid == workflow_id)
1466
+ .values(executor_id=None, started_at_epoch_ms=None)
1467
+ )
1468
+ c.execute(
1469
+ sa.update(SystemSchema.workflow_status)
1470
+ .where(SystemSchema.workflow_status.c.workflow_uuid == workflow_id)
1471
+ .values(executor_id=None, status=WorkflowStatusString.ENQUEUED.value)
1472
+ )
1473
+
1378
1474
 
1379
1475
  def reset_system_database(config: ConfigFile) -> None:
1380
1476
  sysdb_name = (
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: dbos
3
- Version: 0.21.0a5
3
+ Version: 0.22.0
4
4
  Summary: Ultra-lightweight durable execution in Python
5
5
  Author-Email: "DBOS, Inc." <contact@dbos.dev>
6
6
  License: MIT
@@ -1,26 +1,26 @@
1
- dbos-0.21.0a5.dist-info/METADATA,sha256=1LAPN_Eu4X7ptR9xM02sR1tryFaQ42KeKGkeOm3yCQQ,5309
2
- dbos-0.21.0a5.dist-info/WHEEL,sha256=thaaA2w1JzcGC48WYufAs8nrYZjJm8LqNfnXFOFyCC4,90
3
- dbos-0.21.0a5.dist-info/entry_points.txt,sha256=_QOQ3tVfEjtjBlr1jS4sHqHya9lI2aIEIWkz8dqYp14,58
4
- dbos-0.21.0a5.dist-info/licenses/LICENSE,sha256=VGZit_a5-kdw9WT6fY5jxAWVwGQzgLFyPWrcVVUhVNU,1067
1
+ dbos-0.22.0.dist-info/METADATA,sha256=K4tMreHMHoB2VORi7cfuh1oUiOezJIy7EHVsr5Zh0fA,5307
2
+ dbos-0.22.0.dist-info/WHEEL,sha256=thaaA2w1JzcGC48WYufAs8nrYZjJm8LqNfnXFOFyCC4,90
3
+ dbos-0.22.0.dist-info/entry_points.txt,sha256=_QOQ3tVfEjtjBlr1jS4sHqHya9lI2aIEIWkz8dqYp14,58
4
+ dbos-0.22.0.dist-info/licenses/LICENSE,sha256=VGZit_a5-kdw9WT6fY5jxAWVwGQzgLFyPWrcVVUhVNU,1067
5
5
  dbos/__init__.py,sha256=CxRHBHEthPL4PZoLbZhp3rdm44-KkRTT2-7DkK9d4QQ,724
6
- dbos/_admin_server.py,sha256=PJgneZG9-64TapZrPeJtt73puAswRImCE5uce2k2PKU,4750
6
+ dbos/_admin_server.py,sha256=YiVn5lywz2Vg8_juyNHOYl0HVEy48--7b4phwK7r92o,5732
7
7
  dbos/_app_db.py,sha256=_tv2vmPjjiaikwgxH3mqxgJ4nUUcG2-0uMXKWCqVu1c,5509
8
8
  dbos/_classproperty.py,sha256=f0X-_BySzn3yFDRKB2JpCbLYQ9tLwt1XftfshvY7CBs,626
9
9
  dbos/_cloudutils/authentication.py,sha256=V0fCWQN9stCkhbuuxgPTGpvuQcDqfU3KAxPAh01vKW4,5007
10
- dbos/_cloudutils/cloudutils.py,sha256=5e3CW1deSW-dI5G3QN0XbiVsBhyqT8wu7fuV2f8wtGU,7688
11
- dbos/_cloudutils/databases.py,sha256=x4187Djsyoa-QaG3Kog8JT2_GERsnqa93LIVanmVUmg,8393
12
- dbos/_context.py,sha256=FHB_fpE4fQt4fIJvAmMMsbY4xHwH77gsW01cFsRZjsE,17779
13
- dbos/_core.py,sha256=nGiXyYgV8H5TRRZG0e8HCd5IZimufYQLmKNr7nBbwbo,36564
10
+ dbos/_cloudutils/cloudutils.py,sha256=YC7jGsIopT0KveLsqbRpQk2KlRBk-nIRC_UCgep4f3o,7797
11
+ dbos/_cloudutils/databases.py,sha256=_shqaqSvhY4n2ScgQ8IP5PDZvzvcx3YBKV8fj-cxhSY,8543
12
+ dbos/_context.py,sha256=gikN5lUVqnvR-ISoOElXYeYsR_BO2whebB3YP2DJBM4,17713
13
+ dbos/_core.py,sha256=b1IL3LDRGmomHhcs929n_G2pfFSmNuSxFrwhcBuo20k,35519
14
14
  dbos/_croniter.py,sha256=hbhgfsHBqclUS8VeLnJ9PSE9Z54z6mi4nnrr1aUXn0k,47561
15
- dbos/_db_wizard.py,sha256=xgKLna0_6Xi50F3o8msRosXba8NScHlpJR5ICVCkHDQ,7534
16
- dbos/_dbos.py,sha256=y5RgXPxdNsnketphphGMlaYZABFEQpr78UK-Xyja6dk,36216
15
+ dbos/_db_wizard.py,sha256=6tfJaCRa1NtkUdNW75a2yvi_mEgnPJ9C1HP2zPG1hCU,8067
16
+ dbos/_dbos.py,sha256=h25S5Mjl1JAfwMEpqRdyBSuUB_HI3TC8J9Nqtqy_XwQ,38453
17
17
  dbos/_dbos_config.py,sha256=DfiqVVxNqnafkocSzLqBp1Ig5vCviDTDK_GO3zTtQqI,8298
18
- dbos/_error.py,sha256=vtaSsG0QW6cRlwfZ4zzZWy_IHCZlomwSlrDyGWuyn8c,4337
18
+ dbos/_error.py,sha256=NqlobQneZ2ycCQacXc8a38TIOHxFRjBXdF40i3wZUaA,4775
19
19
  dbos/_fastapi.py,sha256=ke03vqsSYDnO6XeOtOVFXj0-f-v1MGsOxa9McaROvNc,3616
20
20
  dbos/_flask.py,sha256=DZKUZR5-xOzPI7tYZ53r2PvvHVoAb8SYwLzMVFsVfjI,2608
21
21
  dbos/_kafka.py,sha256=o6DbwnsYRDtvVTZVsN7BAK8cdP79AfoWX3Q7CGY2Yuo,4199
22
22
  dbos/_kafka_message.py,sha256=NYvOXNG3Qn7bghn1pv3fg4Pbs86ILZGcK4IB-MLUNu0,409
23
- dbos/_logger.py,sha256=iYwbA7DLyXalWa2Yu07HO6Xm301nRuenMU64GgwUMkU,3576
23
+ dbos/_logger.py,sha256=hNEeOgR9yOwdgcOuvnW_wN9rbfpTk5OowPNhEJmjoQE,3644
24
24
  dbos/_migrations/env.py,sha256=38SIGVbmn_VV2x2u1aHLcPOoWgZ84eCymf3g_NljmbU,1626
25
25
  dbos/_migrations/script.py.mako,sha256=MEqL-2qATlST9TAOeYgscMn1uy6HUS9NFvDgl93dMj8,635
26
26
  dbos/_migrations/versions/04ca4f231047_workflow_queues_executor_id.py,sha256=ICLPl8CN9tQXMsLDsAj8z1TsL831-Z3F8jSBvrR-wyw,736
@@ -31,9 +31,9 @@ dbos/_migrations/versions/d76646551a6b_job_queue_limiter.py,sha256=8PyFi8rd6CN-m
31
31
  dbos/_migrations/versions/d76646551a6c_workflow_queue.py,sha256=G942nophZ2uC2vc4hGBC02Ptng1715roTjY3xiyzZU4,729
32
32
  dbos/_migrations/versions/eab0cc1d9a14_job_queue.py,sha256=uvhFOtqbBreCePhAxZfIT0qCAI7BiZTou9wt6QnbY7c,1412
33
33
  dbos/_outcome.py,sha256=FDMgWVjZ06vm9xO-38H17mTqBImUYQxgKs_bDCSIAhE,6648
34
- dbos/_queue.py,sha256=o_aczwualJTMoXb0XXL-Y5QH77OEukWzuerogbWi2ho,2779
35
- dbos/_recovery.py,sha256=ehruOebVRl8qHftydStGZd-VjJ7p9oYtYXDwyT4ZHRY,1874
36
- dbos/_registrations.py,sha256=mei6q6_3R5uei8i_Wo_TqGZs85s10shOekDX41sFYD0,6642
34
+ dbos/_queue.py,sha256=eZiapBcyn70-viW0y9fo7u09V6_VF5ACNGJxD-U_dNM,2844
35
+ dbos/_recovery.py,sha256=GtNMvPFM9qetob-gCU9FPI2fo_BGZYRws4EFSpOuFa4,2675
36
+ dbos/_registrations.py,sha256=_zy6k944Ll8QwqU12Kr3OP23ukVtm8axPNN1TS_kJRc,6717
37
37
  dbos/_request.py,sha256=cX1B3Atlh160phgS35gF1VEEV4pD126c9F3BDgBmxZU,929
38
38
  dbos/_roles.py,sha256=iOsgmIAf1XVzxs3gYWdGRe1B880YfOw5fpU7Jwx8_A8,2271
39
39
  dbos/_scheduler.py,sha256=0I3e8Y-OIBG3wiUCIskShd-Sk_eUFCFyRB5u4L7IHXI,1940
@@ -41,7 +41,7 @@ dbos/_schemas/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
41
41
  dbos/_schemas/application_database.py,sha256=KeyoPrF7hy_ODXV7QNike_VFSD74QBRfQ76D7QyE9HI,966
42
42
  dbos/_schemas/system_database.py,sha256=rwp4EvCSaXcUoMaRczZCvETCxGp72k3-hvLyGUDkih0,5163
43
43
  dbos/_serialization.py,sha256=YCYv0qKAwAZ1djZisBC7khvKqG-5OcIv9t9EC5PFIog,1743
44
- dbos/_sys_db.py,sha256=DLoSddcraHPUMiA5zWIck9Kes7_UO39RL2CRPqtYyz0,58247
44
+ dbos/_sys_db.py,sha256=aGa3KDKUVeK9d7r_yK21uoKV3LJGzRzsg8S3Bea5j_U,62685
45
45
  dbos/_templates/dbos-db-starter/README.md,sha256=GhxhBj42wjTt1fWEtwNriHbJuKb66Vzu89G4pxNHw2g,930
46
46
  dbos/_templates/dbos-db-starter/__package/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
47
47
  dbos/_templates/dbos-db-starter/__package/main.py,sha256=eI0SS9Nwj-fldtiuSzIlIG6dC91GXXwdRsoHxv6S_WI,2719
@@ -60,4 +60,4 @@ dbos/cli/cli.py,sha256=_tXw2IQrWW7fV_h51f_R99vEBSi6aMLz-vCOxKaENiQ,14155
60
60
  dbos/dbos-config.schema.json,sha256=X5TpXNcARGceX0zQs0fVgtZW_Xj9uBbY5afPt9Rz9yk,5741
61
61
  dbos/py.typed,sha256=QfzXT1Ktfk3Rj84akygc7_42z0lRpCq0Ilh8OXI6Zas,44
62
62
  version/__init__.py,sha256=L4sNxecRuqdtSFdpUGX3TtBi9KL3k7YsZVIvv-fv9-A,1678
63
- dbos-0.21.0a5.dist-info/RECORD,,
63
+ dbos-0.22.0.dist-info/RECORD,,
File without changes