dbos 0.21.0a7__py3-none-any.whl → 0.22.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of dbos might be problematic. Click here for more details.
- dbos/_admin_server.py +21 -0
- dbos/_cloudutils/cloudutils.py +4 -2
- dbos/_cloudutils/databases.py +4 -0
- dbos/_context.py +0 -1
- dbos/_core.py +43 -85
- dbos/_db_wizard.py +14 -2
- dbos/_dbos.py +54 -8
- dbos/_error.py +11 -0
- dbos/_logger.py +2 -1
- dbos/_queue.py +3 -1
- dbos/_recovery.py +6 -7
- dbos/_registrations.py +12 -7
- dbos/_sys_db.py +84 -19
- {dbos-0.21.0a7.dist-info → dbos-0.22.0.dist-info}/METADATA +1 -1
- {dbos-0.21.0a7.dist-info → dbos-0.22.0.dist-info}/RECORD +18 -18
- {dbos-0.21.0a7.dist-info → dbos-0.22.0.dist-info}/WHEEL +0 -0
- {dbos-0.21.0a7.dist-info → dbos-0.22.0.dist-info}/entry_points.txt +0 -0
- {dbos-0.21.0a7.dist-info → dbos-0.22.0.dist-info}/licenses/LICENSE +0 -0
dbos/_admin_server.py
CHANGED
|
@@ -16,6 +16,7 @@ if TYPE_CHECKING:
|
|
|
16
16
|
_health_check_path = "/dbos-healthz"
|
|
17
17
|
_workflow_recovery_path = "/dbos-workflow-recovery"
|
|
18
18
|
_deactivate_path = "/deactivate"
|
|
19
|
+
_workflow_queues_metadata_path = "/dbos-workflow-queues-metadata"
|
|
19
20
|
# /workflows/:workflow_id/cancel
|
|
20
21
|
# /workflows/:workflow_id/resume
|
|
21
22
|
# /workflows/:workflow_id/restart
|
|
@@ -64,6 +65,26 @@ class AdminRequestHandler(BaseHTTPRequestHandler):
|
|
|
64
65
|
self.send_response(200)
|
|
65
66
|
self._end_headers()
|
|
66
67
|
self.wfile.write("deactivated".encode("utf-8"))
|
|
68
|
+
elif self.path == _workflow_queues_metadata_path:
|
|
69
|
+
queue_metadata_array = []
|
|
70
|
+
from ._dbos import _get_or_create_dbos_registry
|
|
71
|
+
|
|
72
|
+
registry = _get_or_create_dbos_registry()
|
|
73
|
+
for queue in registry.queue_info_map.values():
|
|
74
|
+
queue_metadata = {
|
|
75
|
+
"name": queue.name,
|
|
76
|
+
"concurrency": queue.concurrency,
|
|
77
|
+
"workerConcurrency": queue.worker_concurrency,
|
|
78
|
+
"rateLimit": queue.limiter,
|
|
79
|
+
}
|
|
80
|
+
# Remove keys with None values
|
|
81
|
+
queue_metadata = {
|
|
82
|
+
k: v for k, v in queue_metadata.items() if v is not None
|
|
83
|
+
}
|
|
84
|
+
queue_metadata_array.append(queue_metadata)
|
|
85
|
+
self.send_response(200)
|
|
86
|
+
self._end_headers()
|
|
87
|
+
self.wfile.write(json.dumps(queue_metadata_array).encode("utf-8"))
|
|
67
88
|
else:
|
|
68
89
|
self.send_response(404)
|
|
69
90
|
self._end_headers()
|
dbos/_cloudutils/cloudutils.py
CHANGED
|
@@ -29,9 +29,11 @@ class DBOSCloudCredentials:
|
|
|
29
29
|
@dataclass
|
|
30
30
|
class UserProfile:
|
|
31
31
|
Name: str
|
|
32
|
-
Email: str
|
|
33
32
|
Organization: str
|
|
34
|
-
|
|
33
|
+
|
|
34
|
+
def __init__(self, **kwargs: Any) -> None:
|
|
35
|
+
self.Name = kwargs.get("Name", "")
|
|
36
|
+
self.Organization = kwargs.get("Organization", "")
|
|
35
37
|
|
|
36
38
|
|
|
37
39
|
class AppLanguages(Enum):
|
dbos/_cloudutils/databases.py
CHANGED
dbos/_context.py
CHANGED
|
@@ -49,7 +49,6 @@ class TracedAttributes(TypedDict, total=False):
|
|
|
49
49
|
class DBOSContext:
|
|
50
50
|
def __init__(self) -> None:
|
|
51
51
|
self.executor_id = os.environ.get("DBOS__VMID", "local")
|
|
52
|
-
self.app_version = os.environ.get("DBOS__APPVERSION", "")
|
|
53
52
|
self.app_id = os.environ.get("DBOS__APPID", "")
|
|
54
53
|
|
|
55
54
|
self.logger = dbos_logger
|
dbos/_core.py
CHANGED
|
@@ -63,6 +63,7 @@ from ._registrations import (
|
|
|
63
63
|
get_or_create_func_info,
|
|
64
64
|
get_temp_workflow_type,
|
|
65
65
|
set_dbos_func_name,
|
|
66
|
+
set_func_info,
|
|
66
67
|
set_temp_workflow_type,
|
|
67
68
|
)
|
|
68
69
|
from ._roles import check_required_roles
|
|
@@ -162,7 +163,7 @@ def _init_workflow(
|
|
|
162
163
|
"output": None,
|
|
163
164
|
"error": None,
|
|
164
165
|
"app_id": ctx.app_id,
|
|
165
|
-
"app_version":
|
|
166
|
+
"app_version": dbos.app_version,
|
|
166
167
|
"executor_id": ctx.executor_id,
|
|
167
168
|
"request": (
|
|
168
169
|
_serialization.serialize(ctx.request) if ctx.request is not None else None
|
|
@@ -286,6 +287,7 @@ def execute_workflow_by_id(
|
|
|
286
287
|
ctx.request = (
|
|
287
288
|
_serialization.deserialize(request) if request is not None else None
|
|
288
289
|
)
|
|
290
|
+
# If this function belongs to a configured class, add that class instance as its first argument
|
|
289
291
|
if status["config_name"] is not None:
|
|
290
292
|
config_name = status["config_name"]
|
|
291
293
|
class_name = status["class_name"]
|
|
@@ -295,28 +297,9 @@ def execute_workflow_by_id(
|
|
|
295
297
|
workflow_id,
|
|
296
298
|
f"Cannot execute workflow because instance '{iname}' is not registered",
|
|
297
299
|
)
|
|
298
|
-
|
|
299
|
-
|
|
300
|
-
|
|
301
|
-
dbos,
|
|
302
|
-
wf_func,
|
|
303
|
-
status["queue_name"],
|
|
304
|
-
True,
|
|
305
|
-
dbos._registry.instance_info_map[iname],
|
|
306
|
-
*inputs["args"],
|
|
307
|
-
**inputs["kwargs"],
|
|
308
|
-
)
|
|
309
|
-
else:
|
|
310
|
-
with SetWorkflowID(workflow_id):
|
|
311
|
-
return start_workflow(
|
|
312
|
-
dbos,
|
|
313
|
-
wf_func,
|
|
314
|
-
status["queue_name"],
|
|
315
|
-
True,
|
|
316
|
-
dbos._registry.instance_info_map[iname],
|
|
317
|
-
*inputs["args"],
|
|
318
|
-
**inputs["kwargs"],
|
|
319
|
-
)
|
|
300
|
+
class_instance = dbos._registry.instance_info_map[iname]
|
|
301
|
+
inputs["args"] = (class_instance,) + inputs["args"]
|
|
302
|
+
# If this function is a class method, add that class object as its first argument
|
|
320
303
|
elif status["class_name"] is not None:
|
|
321
304
|
class_name = status["class_name"]
|
|
322
305
|
if class_name not in dbos._registry.class_info_map:
|
|
@@ -324,30 +307,20 @@ def execute_workflow_by_id(
|
|
|
324
307
|
workflow_id,
|
|
325
308
|
f"Cannot execute workflow because class '{class_name}' is not registered",
|
|
326
309
|
)
|
|
310
|
+
class_object = dbos._registry.class_info_map[class_name]
|
|
311
|
+
inputs["args"] = (class_object,) + inputs["args"]
|
|
327
312
|
|
|
328
|
-
|
|
329
|
-
|
|
330
|
-
|
|
331
|
-
|
|
332
|
-
|
|
333
|
-
|
|
334
|
-
|
|
335
|
-
|
|
336
|
-
|
|
337
|
-
)
|
|
338
|
-
else:
|
|
339
|
-
with SetWorkflowID(workflow_id):
|
|
340
|
-
return start_workflow(
|
|
341
|
-
dbos,
|
|
342
|
-
wf_func,
|
|
343
|
-
status["queue_name"],
|
|
344
|
-
True,
|
|
345
|
-
dbos._registry.class_info_map[class_name],
|
|
346
|
-
*inputs["args"],
|
|
347
|
-
**inputs["kwargs"],
|
|
348
|
-
)
|
|
313
|
+
if startNew:
|
|
314
|
+
return start_workflow(
|
|
315
|
+
dbos,
|
|
316
|
+
wf_func,
|
|
317
|
+
status["queue_name"],
|
|
318
|
+
True,
|
|
319
|
+
*inputs["args"],
|
|
320
|
+
**inputs["kwargs"],
|
|
321
|
+
)
|
|
349
322
|
else:
|
|
350
|
-
|
|
323
|
+
with SetWorkflowID(workflow_id):
|
|
351
324
|
return start_workflow(
|
|
352
325
|
dbos,
|
|
353
326
|
wf_func,
|
|
@@ -356,16 +329,6 @@ def execute_workflow_by_id(
|
|
|
356
329
|
*inputs["args"],
|
|
357
330
|
**inputs["kwargs"],
|
|
358
331
|
)
|
|
359
|
-
else:
|
|
360
|
-
with SetWorkflowID(workflow_id):
|
|
361
|
-
return start_workflow(
|
|
362
|
-
dbos,
|
|
363
|
-
wf_func,
|
|
364
|
-
status["queue_name"],
|
|
365
|
-
True,
|
|
366
|
-
*inputs["args"],
|
|
367
|
-
**inputs["kwargs"],
|
|
368
|
-
)
|
|
369
332
|
|
|
370
333
|
|
|
371
334
|
@overload
|
|
@@ -398,9 +361,12 @@ def start_workflow(
|
|
|
398
361
|
*args: P.args,
|
|
399
362
|
**kwargs: P.kwargs,
|
|
400
363
|
) -> "WorkflowHandle[R]":
|
|
364
|
+
# If the function has a class, add the class object as its first argument
|
|
401
365
|
fself: Optional[object] = None
|
|
402
366
|
if hasattr(func, "__self__"):
|
|
403
367
|
fself = func.__self__
|
|
368
|
+
if fself is not None:
|
|
369
|
+
args = (fself,) + args # type: ignore
|
|
404
370
|
|
|
405
371
|
fi = get_func_info(func)
|
|
406
372
|
if fi is None:
|
|
@@ -436,17 +402,13 @@ def start_workflow(
|
|
|
436
402
|
new_wf_ctx.id_assigned_for_next_workflow = new_wf_ctx.assign_workflow_id()
|
|
437
403
|
new_wf_id = new_wf_ctx.id_assigned_for_next_workflow
|
|
438
404
|
|
|
439
|
-
gin_args: Tuple[Any, ...] = args
|
|
440
|
-
if fself is not None:
|
|
441
|
-
gin_args = (fself,)
|
|
442
|
-
|
|
443
405
|
status = _init_workflow(
|
|
444
406
|
dbos,
|
|
445
407
|
new_wf_ctx,
|
|
446
408
|
inputs=inputs,
|
|
447
409
|
wf_name=get_dbos_func_name(func),
|
|
448
|
-
class_name=get_dbos_class_name(fi, func,
|
|
449
|
-
config_name=get_config_name(fi, func,
|
|
410
|
+
class_name=get_dbos_class_name(fi, func, args),
|
|
411
|
+
config_name=get_config_name(fi, func, args),
|
|
450
412
|
temp_wf_type=get_temp_workflow_type(func),
|
|
451
413
|
queue=queue_name,
|
|
452
414
|
max_recovery_attempts=fi.max_recovery_attempts,
|
|
@@ -464,27 +426,15 @@ def start_workflow(
|
|
|
464
426
|
)
|
|
465
427
|
return WorkflowHandlePolling(new_wf_id, dbos)
|
|
466
428
|
|
|
467
|
-
|
|
468
|
-
|
|
469
|
-
|
|
470
|
-
|
|
471
|
-
|
|
472
|
-
|
|
473
|
-
|
|
474
|
-
|
|
475
|
-
|
|
476
|
-
**kwargs,
|
|
477
|
-
)
|
|
478
|
-
else:
|
|
479
|
-
future = dbos._executor.submit(
|
|
480
|
-
cast(Callable[..., R], _execute_workflow_wthread),
|
|
481
|
-
dbos,
|
|
482
|
-
status,
|
|
483
|
-
func,
|
|
484
|
-
new_wf_ctx,
|
|
485
|
-
*args,
|
|
486
|
-
**kwargs,
|
|
487
|
-
)
|
|
429
|
+
future = dbos._executor.submit(
|
|
430
|
+
cast(Callable[..., R], _execute_workflow_wthread),
|
|
431
|
+
dbos,
|
|
432
|
+
status,
|
|
433
|
+
func,
|
|
434
|
+
new_wf_ctx,
|
|
435
|
+
*args,
|
|
436
|
+
**kwargs,
|
|
437
|
+
)
|
|
488
438
|
return WorkflowHandleFuture(new_wf_id, future, dbos)
|
|
489
439
|
|
|
490
440
|
|
|
@@ -516,6 +466,8 @@ def workflow_wrapper(
|
|
|
516
466
|
|
|
517
467
|
@wraps(func)
|
|
518
468
|
def wrapper(*args: Any, **kwargs: Any) -> R:
|
|
469
|
+
fi = get_func_info(func)
|
|
470
|
+
assert fi is not None
|
|
519
471
|
if dbosreg.dbos is None:
|
|
520
472
|
raise DBOSException(
|
|
521
473
|
f"Function {func.__name__} invoked before DBOS initialized"
|
|
@@ -572,7 +524,7 @@ def decorate_workflow(
|
|
|
572
524
|
) -> Callable[[Callable[P, R]], Callable[P, R]]:
|
|
573
525
|
def _workflow_decorator(func: Callable[P, R]) -> Callable[P, R]:
|
|
574
526
|
wrapped_func = workflow_wrapper(reg, func, max_recovery_attempts)
|
|
575
|
-
reg.register_wf_function(func.__qualname__, wrapped_func)
|
|
527
|
+
reg.register_wf_function(func.__qualname__, wrapped_func, "workflow")
|
|
576
528
|
return wrapped_func
|
|
577
529
|
|
|
578
530
|
return _workflow_decorator
|
|
@@ -724,8 +676,12 @@ def decorate_transaction(
|
|
|
724
676
|
wrapped_wf = workflow_wrapper(dbosreg, temp_wf)
|
|
725
677
|
set_dbos_func_name(temp_wf, "<temp>." + func.__qualname__)
|
|
726
678
|
set_temp_workflow_type(temp_wf, "transaction")
|
|
727
|
-
dbosreg.register_wf_function(
|
|
679
|
+
dbosreg.register_wf_function(
|
|
680
|
+
get_dbos_func_name(temp_wf), wrapped_wf, "transaction"
|
|
681
|
+
)
|
|
728
682
|
wrapper.__orig_func = temp_wf # type: ignore
|
|
683
|
+
set_func_info(wrapped_wf, get_or_create_func_info(func))
|
|
684
|
+
set_func_info(temp_wf, get_or_create_func_info(func))
|
|
729
685
|
|
|
730
686
|
return cast(F, wrapper)
|
|
731
687
|
|
|
@@ -873,8 +829,10 @@ def decorate_step(
|
|
|
873
829
|
wrapped_wf = workflow_wrapper(dbosreg, temp_wf)
|
|
874
830
|
set_dbos_func_name(temp_wf, "<temp>." + func.__qualname__)
|
|
875
831
|
set_temp_workflow_type(temp_wf, "step")
|
|
876
|
-
dbosreg.register_wf_function(get_dbos_func_name(temp_wf), wrapped_wf)
|
|
832
|
+
dbosreg.register_wf_function(get_dbos_func_name(temp_wf), wrapped_wf, "step")
|
|
877
833
|
wrapper.__orig_func = temp_wf # type: ignore
|
|
834
|
+
set_func_info(wrapped_wf, get_or_create_func_info(func))
|
|
835
|
+
set_func_info(temp_wf, get_or_create_func_info(func))
|
|
878
836
|
|
|
879
837
|
return cast(Callable[P, R], wrapper)
|
|
880
838
|
|
dbos/_db_wizard.py
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
import json
|
|
2
2
|
import os
|
|
3
3
|
import time
|
|
4
|
-
from typing import TYPE_CHECKING, Optional, TypedDict
|
|
4
|
+
from typing import TYPE_CHECKING, Optional, TypedDict, cast
|
|
5
5
|
|
|
6
6
|
import docker # type: ignore
|
|
7
7
|
import typer
|
|
@@ -45,8 +45,20 @@ def db_wizard(config: "ConfigFile", config_file_path: str) -> "ConfigFile":
|
|
|
45
45
|
f"Could not connect to Postgres: password authentication failed: {db_connection_error}"
|
|
46
46
|
)
|
|
47
47
|
db_config = config["database"]
|
|
48
|
+
|
|
49
|
+
# Read the config file and check if the database hostname/port/username are set. If so, skip the wizard.
|
|
50
|
+
with open(config_file_path, "r") as file:
|
|
51
|
+
content = file.read()
|
|
52
|
+
local_config = yaml.safe_load(content)
|
|
53
|
+
if "database" not in local_config:
|
|
54
|
+
local_config["database"] = {}
|
|
55
|
+
local_config = cast("ConfigFile", local_config)
|
|
56
|
+
|
|
48
57
|
if (
|
|
49
|
-
|
|
58
|
+
local_config["database"].get("hostname")
|
|
59
|
+
or local_config["database"].get("port")
|
|
60
|
+
or local_config["database"].get("username")
|
|
61
|
+
or db_config["hostname"] != "localhost"
|
|
50
62
|
or db_config["port"] != 5432
|
|
51
63
|
or db_config["username"] != "postgres"
|
|
52
64
|
):
|
dbos/_dbos.py
CHANGED
|
@@ -2,6 +2,8 @@ from __future__ import annotations
|
|
|
2
2
|
|
|
3
3
|
import asyncio
|
|
4
4
|
import atexit
|
|
5
|
+
import hashlib
|
|
6
|
+
import inspect
|
|
5
7
|
import json
|
|
6
8
|
import os
|
|
7
9
|
import sys
|
|
@@ -83,7 +85,11 @@ from ._context import (
|
|
|
83
85
|
get_local_dbos_context,
|
|
84
86
|
)
|
|
85
87
|
from ._dbos_config import ConfigFile, load_config, set_env_vars
|
|
86
|
-
from ._error import
|
|
88
|
+
from ._error import (
|
|
89
|
+
DBOSConflictingRegistrationError,
|
|
90
|
+
DBOSException,
|
|
91
|
+
DBOSNonExistentWorkflowError,
|
|
92
|
+
)
|
|
87
93
|
from ._logger import add_otlp_to_all_loggers, dbos_logger
|
|
88
94
|
from ._sys_db import SystemDatabase
|
|
89
95
|
|
|
@@ -142,6 +148,7 @@ RegisteredJob = Tuple[
|
|
|
142
148
|
class DBOSRegistry:
|
|
143
149
|
def __init__(self) -> None:
|
|
144
150
|
self.workflow_info_map: dict[str, Workflow[..., Any]] = {}
|
|
151
|
+
self.function_type_map: dict[str, str] = {}
|
|
145
152
|
self.class_info_map: dict[str, type] = {}
|
|
146
153
|
self.instance_info_map: dict[str, object] = {}
|
|
147
154
|
self.queue_info_map: dict[str, Queue] = {}
|
|
@@ -149,7 +156,11 @@ class DBOSRegistry:
|
|
|
149
156
|
self.dbos: Optional[DBOS] = None
|
|
150
157
|
self.config: Optional[ConfigFile] = None
|
|
151
158
|
|
|
152
|
-
def register_wf_function(self, name: str, wrapped_func: F) -> None:
|
|
159
|
+
def register_wf_function(self, name: str, wrapped_func: F, functype: str) -> None:
|
|
160
|
+
if name in self.function_type_map:
|
|
161
|
+
if self.function_type_map[name] != functype:
|
|
162
|
+
raise DBOSConflictingRegistrationError(name)
|
|
163
|
+
self.function_type_map[name] = functype
|
|
153
164
|
self.workflow_info_map[name] = wrapped_func
|
|
154
165
|
|
|
155
166
|
def register_class(self, cls: type, ci: DBOSClassInfo) -> None:
|
|
@@ -186,6 +197,22 @@ class DBOSRegistry:
|
|
|
186
197
|
else:
|
|
187
198
|
self.instance_info_map[fn] = inst
|
|
188
199
|
|
|
200
|
+
def compute_app_version(self) -> str:
|
|
201
|
+
"""
|
|
202
|
+
An application's version is computed from a hash of the source of its workflows.
|
|
203
|
+
This is guaranteed to be stable given identical source code because it uses an MD5 hash
|
|
204
|
+
and because it iterates through the workflows in sorted order.
|
|
205
|
+
This way, if the app's workflows are updated (which would break recovery), its version changes.
|
|
206
|
+
App version can be manually set through the DBOS__APPVERSION environment variable.
|
|
207
|
+
"""
|
|
208
|
+
hasher = hashlib.md5()
|
|
209
|
+
sources = sorted(
|
|
210
|
+
[inspect.getsource(wf) for wf in self.workflow_info_map.values()]
|
|
211
|
+
)
|
|
212
|
+
for source in sources:
|
|
213
|
+
hasher.update(source.encode("utf-8"))
|
|
214
|
+
return hasher.hexdigest()
|
|
215
|
+
|
|
189
216
|
|
|
190
217
|
class DBOS:
|
|
191
218
|
"""
|
|
@@ -283,6 +310,7 @@ class DBOS:
|
|
|
283
310
|
self._executor_field: Optional[ThreadPoolExecutor] = None
|
|
284
311
|
self._background_threads: List[threading.Thread] = []
|
|
285
312
|
self._executor_id: str = os.environ.get("DBOS__VMID", "local")
|
|
313
|
+
self.app_version: str = os.environ.get("DBOS__APPVERSION", "")
|
|
286
314
|
|
|
287
315
|
# If using FastAPI, set up middleware and lifecycle events
|
|
288
316
|
if self.fastapi is not None:
|
|
@@ -305,7 +333,7 @@ class DBOS:
|
|
|
305
333
|
temp_send_wf = workflow_wrapper(self._registry, send_temp_workflow)
|
|
306
334
|
set_dbos_func_name(send_temp_workflow, TEMP_SEND_WF_NAME)
|
|
307
335
|
set_temp_workflow_type(send_temp_workflow, "send")
|
|
308
|
-
self._registry.register_wf_function(TEMP_SEND_WF_NAME, temp_send_wf)
|
|
336
|
+
self._registry.register_wf_function(TEMP_SEND_WF_NAME, temp_send_wf, "send")
|
|
309
337
|
|
|
310
338
|
for handler in dbos_logger.handlers:
|
|
311
339
|
handler.flush()
|
|
@@ -351,6 +379,10 @@ class DBOS:
|
|
|
351
379
|
dbos_logger.warning(f"DBOS was already launched")
|
|
352
380
|
return
|
|
353
381
|
self._launched = True
|
|
382
|
+
if self.app_version == "":
|
|
383
|
+
self.app_version = self._registry.compute_app_version()
|
|
384
|
+
dbos_logger.info(f"Application version: {self.app_version}")
|
|
385
|
+
dbos_tracer.app_version = self.app_version
|
|
354
386
|
self._executor_field = ThreadPoolExecutor(max_workers=64)
|
|
355
387
|
self._sys_db_field = SystemDatabase(self.config)
|
|
356
388
|
self._app_db_field = ApplicationDatabase(self.config)
|
|
@@ -359,9 +391,19 @@ class DBOS:
|
|
|
359
391
|
admin_port = 3001
|
|
360
392
|
self._admin_server_field = AdminServer(dbos=self, port=admin_port)
|
|
361
393
|
|
|
362
|
-
|
|
363
|
-
|
|
364
|
-
|
|
394
|
+
workflow_ids = self._sys_db.get_pending_workflows(
|
|
395
|
+
self._executor_id, self.app_version
|
|
396
|
+
)
|
|
397
|
+
if (len(workflow_ids)) > 0:
|
|
398
|
+
self.logger.info(
|
|
399
|
+
f"Recovering {len(workflow_ids)} workflows from application version {self.app_version}"
|
|
400
|
+
)
|
|
401
|
+
else:
|
|
402
|
+
self.logger.info(
|
|
403
|
+
f"No workflows to recover from application version {self.app_version}"
|
|
404
|
+
)
|
|
405
|
+
|
|
406
|
+
self._executor.submit(startup_recovery_thread, self, workflow_ids)
|
|
365
407
|
|
|
366
408
|
# Listen to notifications
|
|
367
409
|
notification_listener_thread = threading.Thread(
|
|
@@ -398,13 +440,13 @@ class DBOS:
|
|
|
398
440
|
self._background_threads.append(poller_thread)
|
|
399
441
|
self._registry.pollers = []
|
|
400
442
|
|
|
401
|
-
dbos_logger.info("DBOS launched")
|
|
443
|
+
dbos_logger.info("DBOS launched!")
|
|
402
444
|
|
|
403
445
|
# Flush handlers and add OTLP to all loggers if enabled
|
|
404
446
|
# to enable their export in DBOS Cloud
|
|
405
447
|
for handler in dbos_logger.handlers:
|
|
406
448
|
handler.flush()
|
|
407
|
-
add_otlp_to_all_loggers()
|
|
449
|
+
add_otlp_to_all_loggers(self.app_version)
|
|
408
450
|
except Exception:
|
|
409
451
|
dbos_logger.error(f"DBOS failed to launch: {traceback.format_exc()}")
|
|
410
452
|
raise
|
|
@@ -997,6 +1039,10 @@ def _dbos_exit_hook() -> None:
|
|
|
997
1039
|
)
|
|
998
1040
|
return
|
|
999
1041
|
if not _dbos_global_instance._launched:
|
|
1042
|
+
if _dbos_global_instance.fastapi is not None:
|
|
1043
|
+
# FastAPI lifespan middleware will call launch/destroy, so we can ignore this.
|
|
1044
|
+
# This is likely to happen during fastapi dev runs, where the reloader loads the module multiple times.
|
|
1045
|
+
return
|
|
1000
1046
|
print("DBOS exiting; DBOS exists but launch() was not called")
|
|
1001
1047
|
dbos_logger.warning("DBOS exiting; DBOS exists but launch() was not called")
|
|
1002
1048
|
return
|
dbos/_error.py
CHANGED
|
@@ -36,6 +36,7 @@ class DBOSErrorCode(Enum):
|
|
|
36
36
|
MaxStepRetriesExceeded = 7
|
|
37
37
|
NotAuthorized = 8
|
|
38
38
|
ConflictingWorkflowError = 9
|
|
39
|
+
ConflictingRegistrationError = 25
|
|
39
40
|
|
|
40
41
|
|
|
41
42
|
class DBOSWorkflowConflictIDError(DBOSException):
|
|
@@ -127,3 +128,13 @@ class DBOSMaxStepRetriesExceeded(DBOSException):
|
|
|
127
128
|
"Step reached maximum retries.",
|
|
128
129
|
dbos_error_code=DBOSErrorCode.MaxStepRetriesExceeded.value,
|
|
129
130
|
)
|
|
131
|
+
|
|
132
|
+
|
|
133
|
+
class DBOSConflictingRegistrationError(DBOSException):
|
|
134
|
+
"""Exception raised when conflicting decorators are applied to the same function."""
|
|
135
|
+
|
|
136
|
+
def __init__(self, name: str) -> None:
|
|
137
|
+
super().__init__(
|
|
138
|
+
f"Operation (Name: {name}) is already registered with a conflicting function type",
|
|
139
|
+
dbos_error_code=DBOSErrorCode.ConflictingRegistrationError.value,
|
|
140
|
+
)
|
dbos/_logger.py
CHANGED
|
@@ -86,8 +86,9 @@ def config_logger(config: "ConfigFile") -> None:
|
|
|
86
86
|
dbos_logger.addFilter(_otlp_transformer)
|
|
87
87
|
|
|
88
88
|
|
|
89
|
-
def add_otlp_to_all_loggers() -> None:
|
|
89
|
+
def add_otlp_to_all_loggers(app_version: str) -> None:
|
|
90
90
|
if _otlp_handler is not None and _otlp_transformer is not None:
|
|
91
|
+
_otlp_transformer.app_version = app_version
|
|
91
92
|
root = logging.root
|
|
92
93
|
|
|
93
94
|
root.addHandler(_otlp_handler)
|
dbos/_queue.py
CHANGED
|
@@ -76,7 +76,9 @@ def queue_thread(stop_event: threading.Event, dbos: "DBOS") -> None:
|
|
|
76
76
|
execute_workflow_by_id(dbos, id)
|
|
77
77
|
except OperationalError as e:
|
|
78
78
|
# Ignore serialization error
|
|
79
|
-
if not isinstance(
|
|
79
|
+
if not isinstance(
|
|
80
|
+
e.orig, (errors.SerializationFailure, errors.LockNotAvailable)
|
|
81
|
+
):
|
|
80
82
|
dbos.logger.warning(
|
|
81
83
|
f"Exception encountered in queue thread: {traceback.format_exc()}"
|
|
82
84
|
)
|
dbos/_recovery.py
CHANGED
|
@@ -43,12 +43,10 @@ def recover_pending_workflows(
|
|
|
43
43
|
) -> List["WorkflowHandle[Any]"]:
|
|
44
44
|
workflow_handles: List["WorkflowHandle[Any]"] = []
|
|
45
45
|
for executor_id in executor_ids:
|
|
46
|
-
if executor_id == "local" and os.environ.get("DBOS__VMID"):
|
|
47
|
-
dbos.logger.debug(
|
|
48
|
-
f"Skip local recovery because it's running in a VM: {os.environ.get('DBOS__VMID')}"
|
|
49
|
-
)
|
|
50
46
|
dbos.logger.debug(f"Recovering pending workflows for executor: {executor_id}")
|
|
51
|
-
pending_workflows = dbos._sys_db.get_pending_workflows(
|
|
47
|
+
pending_workflows = dbos._sys_db.get_pending_workflows(
|
|
48
|
+
executor_id, dbos.app_version
|
|
49
|
+
)
|
|
52
50
|
for pending_workflow in pending_workflows:
|
|
53
51
|
if (
|
|
54
52
|
pending_workflow.queue_name
|
|
@@ -65,6 +63,7 @@ def recover_pending_workflows(
|
|
|
65
63
|
workflow_handles.append(
|
|
66
64
|
execute_workflow_by_id(dbos, pending_workflow.workflow_uuid)
|
|
67
65
|
)
|
|
68
|
-
|
|
69
|
-
|
|
66
|
+
dbos.logger.info(
|
|
67
|
+
f"Recovering {len(pending_workflows)} workflows from version {dbos.app_version}"
|
|
68
|
+
)
|
|
70
69
|
return workflow_handles
|
dbos/_registrations.py
CHANGED
|
@@ -1,4 +1,5 @@
|
|
|
1
1
|
import inspect
|
|
2
|
+
from dataclasses import dataclass
|
|
2
3
|
from enum import Enum
|
|
3
4
|
from types import FunctionType
|
|
4
5
|
from typing import Any, Callable, List, Literal, Optional, Tuple, Type, cast
|
|
@@ -31,9 +32,9 @@ def set_temp_workflow_type(f: Any, name: TempWorkflowType) -> None:
|
|
|
31
32
|
setattr(f, "dbos_temp_workflow_type", name)
|
|
32
33
|
|
|
33
34
|
|
|
35
|
+
@dataclass
|
|
34
36
|
class DBOSClassInfo:
|
|
35
|
-
|
|
36
|
-
self.def_required_roles: Optional[List[str]] = None
|
|
37
|
+
def_required_roles: Optional[List[str]] = None
|
|
37
38
|
|
|
38
39
|
|
|
39
40
|
class DBOSFuncType(Enum):
|
|
@@ -44,12 +45,12 @@ class DBOSFuncType(Enum):
|
|
|
44
45
|
Instance = 4
|
|
45
46
|
|
|
46
47
|
|
|
48
|
+
@dataclass
|
|
47
49
|
class DBOSFuncInfo:
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
self.max_recovery_attempts = DEFAULT_MAX_RECOVERY_ATTEMPTS
|
|
50
|
+
class_info: Optional[DBOSClassInfo] = None
|
|
51
|
+
func_type: DBOSFuncType = DBOSFuncType.Unknown
|
|
52
|
+
required_roles: Optional[List[str]] = None
|
|
53
|
+
max_recovery_attempts: int = DEFAULT_MAX_RECOVERY_ATTEMPTS
|
|
53
54
|
|
|
54
55
|
|
|
55
56
|
def get_or_create_class_info(cls: Type[Any]) -> DBOSClassInfo:
|
|
@@ -110,6 +111,10 @@ def get_or_create_func_info(func: Callable[..., Any]) -> DBOSFuncInfo:
|
|
|
110
111
|
return fi
|
|
111
112
|
|
|
112
113
|
|
|
114
|
+
def set_func_info(func: Callable[..., Any], fi: DBOSFuncInfo) -> None:
|
|
115
|
+
setattr(func, "dbos_func_decorator_info", fi)
|
|
116
|
+
|
|
117
|
+
|
|
113
118
|
def get_class_info(cls: Type[Any]) -> Optional[DBOSClassInfo]:
|
|
114
119
|
if hasattr(cls, "dbos_class_decorator_info"):
|
|
115
120
|
ci: DBOSClassInfo = getattr(cls, "dbos_class_decorator_info")
|
dbos/_sys_db.py
CHANGED
|
@@ -1,4 +1,5 @@
|
|
|
1
1
|
import datetime
|
|
2
|
+
import logging
|
|
2
3
|
import os
|
|
3
4
|
import re
|
|
4
5
|
import threading
|
|
@@ -13,6 +14,7 @@ from typing import (
|
|
|
13
14
|
Optional,
|
|
14
15
|
Sequence,
|
|
15
16
|
Set,
|
|
17
|
+
Tuple,
|
|
16
18
|
TypedDict,
|
|
17
19
|
cast,
|
|
18
20
|
)
|
|
@@ -22,8 +24,8 @@ import sqlalchemy as sa
|
|
|
22
24
|
import sqlalchemy.dialects.postgresql as pg
|
|
23
25
|
from alembic import command
|
|
24
26
|
from alembic.config import Config
|
|
25
|
-
from sqlalchemy import or_
|
|
26
27
|
from sqlalchemy.exc import DBAPIError
|
|
28
|
+
from sqlalchemy.sql import func
|
|
27
29
|
|
|
28
30
|
from . import _serialization
|
|
29
31
|
from ._dbos_config import ConfigFile
|
|
@@ -189,6 +191,10 @@ class SystemDatabase:
|
|
|
189
191
|
host=config["database"]["hostname"],
|
|
190
192
|
port=config["database"]["port"],
|
|
191
193
|
database="postgres",
|
|
194
|
+
# fills the "application_name" column in pg_stat_activity
|
|
195
|
+
query={
|
|
196
|
+
"application_name": f"dbos_transact_{os.environ.get('DBOS__VMID', 'local')}"
|
|
197
|
+
},
|
|
192
198
|
)
|
|
193
199
|
engine = sa.create_engine(postgres_db_url)
|
|
194
200
|
with engine.connect() as conn:
|
|
@@ -207,6 +213,10 @@ class SystemDatabase:
|
|
|
207
213
|
host=config["database"]["hostname"],
|
|
208
214
|
port=config["database"]["port"],
|
|
209
215
|
database=sysdb_name,
|
|
216
|
+
# fills the "application_name" column in pg_stat_activity
|
|
217
|
+
query={
|
|
218
|
+
"application_name": f"dbos_transact_{os.environ.get('DBOS__VMID', 'local')}"
|
|
219
|
+
},
|
|
210
220
|
)
|
|
211
221
|
|
|
212
222
|
# Create a connection pool for the system database
|
|
@@ -220,6 +230,7 @@ class SystemDatabase:
|
|
|
220
230
|
)
|
|
221
231
|
alembic_cfg = Config()
|
|
222
232
|
alembic_cfg.set_main_option("script_location", migration_dir)
|
|
233
|
+
logging.getLogger("alembic").setLevel(logging.WARNING)
|
|
223
234
|
# Alembic requires the % in URL-escaped parameters to itself be escaped to %%.
|
|
224
235
|
escaped_conn_string = re.sub(
|
|
225
236
|
r"%(?=[0-9A-Fa-f]{2})",
|
|
@@ -227,7 +238,12 @@ class SystemDatabase:
|
|
|
227
238
|
self.engine.url.render_as_string(hide_password=False),
|
|
228
239
|
)
|
|
229
240
|
alembic_cfg.set_main_option("sqlalchemy.url", escaped_conn_string)
|
|
230
|
-
|
|
241
|
+
try:
|
|
242
|
+
command.upgrade(alembic_cfg, "head")
|
|
243
|
+
except Exception as e:
|
|
244
|
+
dbos_logger.warning(
|
|
245
|
+
f"Exception during system database construction. This is most likely because the system database was configured using a later version of DBOS: {e}"
|
|
246
|
+
)
|
|
231
247
|
|
|
232
248
|
self.notification_conn: Optional[psycopg.connection.Connection] = None
|
|
233
249
|
self.notifications_map: Dict[str, threading.Condition] = {}
|
|
@@ -294,6 +310,7 @@ class SystemDatabase:
|
|
|
294
310
|
recovery_attempts=(
|
|
295
311
|
SystemSchema.workflow_status.c.recovery_attempts + 1
|
|
296
312
|
),
|
|
313
|
+
updated_at=func.extract("epoch", func.now()) * 1000,
|
|
297
314
|
),
|
|
298
315
|
)
|
|
299
316
|
)
|
|
@@ -391,6 +408,7 @@ class SystemDatabase:
|
|
|
391
408
|
status=status["status"],
|
|
392
409
|
output=status["output"],
|
|
393
410
|
error=status["error"],
|
|
411
|
+
updated_at=func.extract("epoch", func.now()) * 1000,
|
|
394
412
|
),
|
|
395
413
|
)
|
|
396
414
|
)
|
|
@@ -754,7 +772,7 @@ class SystemDatabase:
|
|
|
754
772
|
return GetWorkflowsOutput(workflow_uuids)
|
|
755
773
|
|
|
756
774
|
def get_pending_workflows(
|
|
757
|
-
self, executor_id: str
|
|
775
|
+
self, executor_id: str, app_version: str
|
|
758
776
|
) -> list[GetPendingWorkflowsOutput]:
|
|
759
777
|
with self.engine.begin() as c:
|
|
760
778
|
rows = c.execute(
|
|
@@ -765,8 +783,10 @@ class SystemDatabase:
|
|
|
765
783
|
SystemSchema.workflow_status.c.status
|
|
766
784
|
== WorkflowStatusString.PENDING.value,
|
|
767
785
|
SystemSchema.workflow_status.c.executor_id == executor_id,
|
|
786
|
+
SystemSchema.workflow_status.c.application_version == app_version,
|
|
768
787
|
)
|
|
769
788
|
).fetchall()
|
|
789
|
+
|
|
770
790
|
return [
|
|
771
791
|
GetPendingWorkflowsOutput(
|
|
772
792
|
workflow_uuid=row.workflow_uuid,
|
|
@@ -1302,6 +1322,55 @@ class SystemDatabase:
|
|
|
1302
1322
|
# Dequeue functions eligible for this worker and ordered by the time at which they were enqueued.
|
|
1303
1323
|
# If there is a global or local concurrency limit N, select only the N oldest enqueued
|
|
1304
1324
|
# functions, else select all of them.
|
|
1325
|
+
|
|
1326
|
+
# First lets figure out how many tasks the worker can dequeue
|
|
1327
|
+
running_tasks_query = (
|
|
1328
|
+
sa.select(
|
|
1329
|
+
SystemSchema.workflow_queue.c.executor_id,
|
|
1330
|
+
sa.func.count().label("task_count"),
|
|
1331
|
+
)
|
|
1332
|
+
.where(SystemSchema.workflow_queue.c.queue_name == queue.name)
|
|
1333
|
+
.where(
|
|
1334
|
+
SystemSchema.workflow_queue.c.executor_id.isnot(
|
|
1335
|
+
None
|
|
1336
|
+
) # Task is dequeued
|
|
1337
|
+
)
|
|
1338
|
+
.where(
|
|
1339
|
+
SystemSchema.workflow_queue.c.completed_at_epoch_ms.is_(
|
|
1340
|
+
None
|
|
1341
|
+
) # Task is not completed
|
|
1342
|
+
)
|
|
1343
|
+
.group_by(SystemSchema.workflow_queue.c.executor_id)
|
|
1344
|
+
)
|
|
1345
|
+
running_tasks_result = c.execute(running_tasks_query).fetchall()
|
|
1346
|
+
running_tasks_result_dict = {row[0]: row[1] for row in running_tasks_result}
|
|
1347
|
+
running_tasks_for_this_worker = running_tasks_result_dict.get(
|
|
1348
|
+
executor_id, 0
|
|
1349
|
+
) # Get count for current executor
|
|
1350
|
+
|
|
1351
|
+
max_tasks = float("inf")
|
|
1352
|
+
if queue.worker_concurrency is not None:
|
|
1353
|
+
# Worker local concurrency limit should always be >= running_tasks_for_this_worker
|
|
1354
|
+
# This should never happen but a check + warning doesn't hurt
|
|
1355
|
+
if running_tasks_for_this_worker > queue.worker_concurrency:
|
|
1356
|
+
dbos_logger.warning(
|
|
1357
|
+
f"Number of tasks on this worker ({running_tasks_for_this_worker}) exceeds the worker concurrency limit ({queue.worker_concurrency})"
|
|
1358
|
+
)
|
|
1359
|
+
max_tasks = max(
|
|
1360
|
+
0, queue.worker_concurrency - running_tasks_for_this_worker
|
|
1361
|
+
)
|
|
1362
|
+
if queue.concurrency is not None:
|
|
1363
|
+
total_running_tasks = sum(running_tasks_result_dict.values())
|
|
1364
|
+
# Queue global concurrency limit should always be >= running_tasks_count
|
|
1365
|
+
# This should never happen but a check + warning doesn't hurt
|
|
1366
|
+
if total_running_tasks > queue.concurrency:
|
|
1367
|
+
dbos_logger.warning(
|
|
1368
|
+
f"Total running tasks ({total_running_tasks}) exceeds the global concurrency limit ({queue.concurrency})"
|
|
1369
|
+
)
|
|
1370
|
+
available_tasks = max(0, queue.concurrency - total_running_tasks)
|
|
1371
|
+
max_tasks = min(max_tasks, available_tasks)
|
|
1372
|
+
|
|
1373
|
+
# Lookup tasks
|
|
1305
1374
|
query = (
|
|
1306
1375
|
sa.select(
|
|
1307
1376
|
SystemSchema.workflow_queue.c.workflow_uuid,
|
|
@@ -1310,29 +1379,25 @@ class SystemDatabase:
|
|
|
1310
1379
|
)
|
|
1311
1380
|
.where(SystemSchema.workflow_queue.c.queue_name == queue.name)
|
|
1312
1381
|
.where(SystemSchema.workflow_queue.c.completed_at_epoch_ms == None)
|
|
1313
|
-
.where(
|
|
1314
|
-
# Only select functions that have not been started yet or have been started by this worker
|
|
1315
|
-
or_(
|
|
1316
|
-
SystemSchema.workflow_queue.c.executor_id == None,
|
|
1317
|
-
SystemSchema.workflow_queue.c.executor_id == executor_id,
|
|
1318
|
-
)
|
|
1319
|
-
)
|
|
1382
|
+
.where(SystemSchema.workflow_queue.c.executor_id == None)
|
|
1320
1383
|
.order_by(SystemSchema.workflow_queue.c.created_at_epoch_ms.asc())
|
|
1384
|
+
.with_for_update(nowait=True) # Error out early
|
|
1321
1385
|
)
|
|
1322
|
-
#
|
|
1323
|
-
if
|
|
1324
|
-
query = query.limit(
|
|
1325
|
-
elif queue.concurrency is not None:
|
|
1326
|
-
query = query.limit(queue.concurrency)
|
|
1386
|
+
# Apply limit only if max_tasks is finite
|
|
1387
|
+
if max_tasks != float("inf"):
|
|
1388
|
+
query = query.limit(int(max_tasks))
|
|
1327
1389
|
|
|
1328
1390
|
rows = c.execute(query).fetchall()
|
|
1329
1391
|
|
|
1330
|
-
#
|
|
1331
|
-
dequeued_ids: List[str] = [row[0] for row in rows
|
|
1392
|
+
# Get the workflow IDs
|
|
1393
|
+
dequeued_ids: List[str] = [row[0] for row in rows]
|
|
1394
|
+
if len(dequeued_ids) > 0:
|
|
1395
|
+
dbos_logger.debug(
|
|
1396
|
+
f"[{queue.name}] dequeueing {len(dequeued_ids)} task(s)"
|
|
1397
|
+
)
|
|
1332
1398
|
ret_ids: list[str] = []
|
|
1333
|
-
dbos_logger.debug(f"[{queue.name}] dequeueing {len(dequeued_ids)} task(s)")
|
|
1334
|
-
for id in dequeued_ids:
|
|
1335
1399
|
|
|
1400
|
+
for id in dequeued_ids:
|
|
1336
1401
|
# If we have a limiter, stop starting functions when the number
|
|
1337
1402
|
# of functions started this period exceeds the limit.
|
|
1338
1403
|
if queue.limiter is not None:
|
|
@@ -1,26 +1,26 @@
|
|
|
1
|
-
dbos-0.
|
|
2
|
-
dbos-0.
|
|
3
|
-
dbos-0.
|
|
4
|
-
dbos-0.
|
|
1
|
+
dbos-0.22.0.dist-info/METADATA,sha256=K4tMreHMHoB2VORi7cfuh1oUiOezJIy7EHVsr5Zh0fA,5307
|
|
2
|
+
dbos-0.22.0.dist-info/WHEEL,sha256=thaaA2w1JzcGC48WYufAs8nrYZjJm8LqNfnXFOFyCC4,90
|
|
3
|
+
dbos-0.22.0.dist-info/entry_points.txt,sha256=_QOQ3tVfEjtjBlr1jS4sHqHya9lI2aIEIWkz8dqYp14,58
|
|
4
|
+
dbos-0.22.0.dist-info/licenses/LICENSE,sha256=VGZit_a5-kdw9WT6fY5jxAWVwGQzgLFyPWrcVVUhVNU,1067
|
|
5
5
|
dbos/__init__.py,sha256=CxRHBHEthPL4PZoLbZhp3rdm44-KkRTT2-7DkK9d4QQ,724
|
|
6
|
-
dbos/_admin_server.py,sha256=
|
|
6
|
+
dbos/_admin_server.py,sha256=YiVn5lywz2Vg8_juyNHOYl0HVEy48--7b4phwK7r92o,5732
|
|
7
7
|
dbos/_app_db.py,sha256=_tv2vmPjjiaikwgxH3mqxgJ4nUUcG2-0uMXKWCqVu1c,5509
|
|
8
8
|
dbos/_classproperty.py,sha256=f0X-_BySzn3yFDRKB2JpCbLYQ9tLwt1XftfshvY7CBs,626
|
|
9
9
|
dbos/_cloudutils/authentication.py,sha256=V0fCWQN9stCkhbuuxgPTGpvuQcDqfU3KAxPAh01vKW4,5007
|
|
10
|
-
dbos/_cloudutils/cloudutils.py,sha256=
|
|
11
|
-
dbos/_cloudutils/databases.py,sha256=
|
|
12
|
-
dbos/_context.py,sha256=
|
|
13
|
-
dbos/_core.py,sha256=
|
|
10
|
+
dbos/_cloudutils/cloudutils.py,sha256=YC7jGsIopT0KveLsqbRpQk2KlRBk-nIRC_UCgep4f3o,7797
|
|
11
|
+
dbos/_cloudutils/databases.py,sha256=_shqaqSvhY4n2ScgQ8IP5PDZvzvcx3YBKV8fj-cxhSY,8543
|
|
12
|
+
dbos/_context.py,sha256=gikN5lUVqnvR-ISoOElXYeYsR_BO2whebB3YP2DJBM4,17713
|
|
13
|
+
dbos/_core.py,sha256=b1IL3LDRGmomHhcs929n_G2pfFSmNuSxFrwhcBuo20k,35519
|
|
14
14
|
dbos/_croniter.py,sha256=hbhgfsHBqclUS8VeLnJ9PSE9Z54z6mi4nnrr1aUXn0k,47561
|
|
15
|
-
dbos/_db_wizard.py,sha256=
|
|
16
|
-
dbos/_dbos.py,sha256=
|
|
15
|
+
dbos/_db_wizard.py,sha256=6tfJaCRa1NtkUdNW75a2yvi_mEgnPJ9C1HP2zPG1hCU,8067
|
|
16
|
+
dbos/_dbos.py,sha256=h25S5Mjl1JAfwMEpqRdyBSuUB_HI3TC8J9Nqtqy_XwQ,38453
|
|
17
17
|
dbos/_dbos_config.py,sha256=DfiqVVxNqnafkocSzLqBp1Ig5vCviDTDK_GO3zTtQqI,8298
|
|
18
|
-
dbos/_error.py,sha256=
|
|
18
|
+
dbos/_error.py,sha256=NqlobQneZ2ycCQacXc8a38TIOHxFRjBXdF40i3wZUaA,4775
|
|
19
19
|
dbos/_fastapi.py,sha256=ke03vqsSYDnO6XeOtOVFXj0-f-v1MGsOxa9McaROvNc,3616
|
|
20
20
|
dbos/_flask.py,sha256=DZKUZR5-xOzPI7tYZ53r2PvvHVoAb8SYwLzMVFsVfjI,2608
|
|
21
21
|
dbos/_kafka.py,sha256=o6DbwnsYRDtvVTZVsN7BAK8cdP79AfoWX3Q7CGY2Yuo,4199
|
|
22
22
|
dbos/_kafka_message.py,sha256=NYvOXNG3Qn7bghn1pv3fg4Pbs86ILZGcK4IB-MLUNu0,409
|
|
23
|
-
dbos/_logger.py,sha256=
|
|
23
|
+
dbos/_logger.py,sha256=hNEeOgR9yOwdgcOuvnW_wN9rbfpTk5OowPNhEJmjoQE,3644
|
|
24
24
|
dbos/_migrations/env.py,sha256=38SIGVbmn_VV2x2u1aHLcPOoWgZ84eCymf3g_NljmbU,1626
|
|
25
25
|
dbos/_migrations/script.py.mako,sha256=MEqL-2qATlST9TAOeYgscMn1uy6HUS9NFvDgl93dMj8,635
|
|
26
26
|
dbos/_migrations/versions/04ca4f231047_workflow_queues_executor_id.py,sha256=ICLPl8CN9tQXMsLDsAj8z1TsL831-Z3F8jSBvrR-wyw,736
|
|
@@ -31,9 +31,9 @@ dbos/_migrations/versions/d76646551a6b_job_queue_limiter.py,sha256=8PyFi8rd6CN-m
|
|
|
31
31
|
dbos/_migrations/versions/d76646551a6c_workflow_queue.py,sha256=G942nophZ2uC2vc4hGBC02Ptng1715roTjY3xiyzZU4,729
|
|
32
32
|
dbos/_migrations/versions/eab0cc1d9a14_job_queue.py,sha256=uvhFOtqbBreCePhAxZfIT0qCAI7BiZTou9wt6QnbY7c,1412
|
|
33
33
|
dbos/_outcome.py,sha256=FDMgWVjZ06vm9xO-38H17mTqBImUYQxgKs_bDCSIAhE,6648
|
|
34
|
-
dbos/_queue.py,sha256=
|
|
35
|
-
dbos/_recovery.py,sha256=
|
|
36
|
-
dbos/_registrations.py,sha256=
|
|
34
|
+
dbos/_queue.py,sha256=eZiapBcyn70-viW0y9fo7u09V6_VF5ACNGJxD-U_dNM,2844
|
|
35
|
+
dbos/_recovery.py,sha256=GtNMvPFM9qetob-gCU9FPI2fo_BGZYRws4EFSpOuFa4,2675
|
|
36
|
+
dbos/_registrations.py,sha256=_zy6k944Ll8QwqU12Kr3OP23ukVtm8axPNN1TS_kJRc,6717
|
|
37
37
|
dbos/_request.py,sha256=cX1B3Atlh160phgS35gF1VEEV4pD126c9F3BDgBmxZU,929
|
|
38
38
|
dbos/_roles.py,sha256=iOsgmIAf1XVzxs3gYWdGRe1B880YfOw5fpU7Jwx8_A8,2271
|
|
39
39
|
dbos/_scheduler.py,sha256=0I3e8Y-OIBG3wiUCIskShd-Sk_eUFCFyRB5u4L7IHXI,1940
|
|
@@ -41,7 +41,7 @@ dbos/_schemas/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
|
41
41
|
dbos/_schemas/application_database.py,sha256=KeyoPrF7hy_ODXV7QNike_VFSD74QBRfQ76D7QyE9HI,966
|
|
42
42
|
dbos/_schemas/system_database.py,sha256=rwp4EvCSaXcUoMaRczZCvETCxGp72k3-hvLyGUDkih0,5163
|
|
43
43
|
dbos/_serialization.py,sha256=YCYv0qKAwAZ1djZisBC7khvKqG-5OcIv9t9EC5PFIog,1743
|
|
44
|
-
dbos/_sys_db.py,sha256=
|
|
44
|
+
dbos/_sys_db.py,sha256=aGa3KDKUVeK9d7r_yK21uoKV3LJGzRzsg8S3Bea5j_U,62685
|
|
45
45
|
dbos/_templates/dbos-db-starter/README.md,sha256=GhxhBj42wjTt1fWEtwNriHbJuKb66Vzu89G4pxNHw2g,930
|
|
46
46
|
dbos/_templates/dbos-db-starter/__package/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
47
47
|
dbos/_templates/dbos-db-starter/__package/main.py,sha256=eI0SS9Nwj-fldtiuSzIlIG6dC91GXXwdRsoHxv6S_WI,2719
|
|
@@ -60,4 +60,4 @@ dbos/cli/cli.py,sha256=_tXw2IQrWW7fV_h51f_R99vEBSi6aMLz-vCOxKaENiQ,14155
|
|
|
60
60
|
dbos/dbos-config.schema.json,sha256=X5TpXNcARGceX0zQs0fVgtZW_Xj9uBbY5afPt9Rz9yk,5741
|
|
61
61
|
dbos/py.typed,sha256=QfzXT1Ktfk3Rj84akygc7_42z0lRpCq0Ilh8OXI6Zas,44
|
|
62
62
|
version/__init__.py,sha256=L4sNxecRuqdtSFdpUGX3TtBi9KL3k7YsZVIvv-fv9-A,1678
|
|
63
|
-
dbos-0.
|
|
63
|
+
dbos-0.22.0.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|