dbos 0.7.1__tar.gz → 0.8.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of dbos might be problematic. Click here for more details.
- {dbos-0.7.1 → dbos-0.8.0}/PKG-INFO +1 -1
- {dbos-0.7.1 → dbos-0.8.0}/dbos/__init__.py +2 -1
- {dbos-0.7.1 → dbos-0.8.0}/dbos/context.py +33 -0
- {dbos-0.7.1 → dbos-0.8.0}/dbos/core.py +53 -27
- {dbos-0.7.1 → dbos-0.8.0}/dbos/dbos.py +47 -36
- {dbos-0.7.1 → dbos-0.8.0}/dbos/error.py +11 -0
- {dbos-0.7.1 → dbos-0.8.0}/dbos/migrations/versions/50f3227f0b4b_fix_job_queue.py +2 -1
- dbos-0.8.0/dbos/migrations/versions/d76646551a6b_job_queue_limiter.py +43 -0
- dbos-0.8.0/dbos/migrations/versions/d76646551a6c_workflow_queue.py +28 -0
- {dbos-0.7.1 → dbos-0.8.0}/dbos/migrations/versions/eab0cc1d9a14_job_queue.py +2 -1
- {dbos-0.7.1 → dbos-0.8.0}/dbos/queue.py +31 -8
- {dbos-0.7.1 → dbos-0.8.0}/dbos/registrations.py +3 -0
- {dbos-0.7.1 → dbos-0.8.0}/dbos/request.py +1 -0
- {dbos-0.7.1 → dbos-0.8.0}/dbos/scheduler/scheduler.py +7 -1
- {dbos-0.7.1 → dbos-0.8.0}/dbos/schemas/system_database.py +10 -2
- {dbos-0.7.1 → dbos-0.8.0}/dbos/system_database.py +167 -34
- dbos-0.8.0/dbos/utils.py +55 -0
- {dbos-0.7.1 → dbos-0.8.0}/pyproject.toml +1 -1
- {dbos-0.7.1 → dbos-0.8.0}/tests/test_dbos.py +166 -0
- {dbos-0.7.1 → dbos-0.8.0}/tests/test_failures.py +35 -1
- {dbos-0.7.1 → dbos-0.8.0}/tests/test_fastapi_roles.py +7 -19
- dbos-0.8.0/tests/test_queue.py +322 -0
- dbos-0.7.1/dbos/utils.py +0 -14
- dbos-0.7.1/tests/test_queue.py +0 -110
- {dbos-0.7.1 → dbos-0.8.0}/LICENSE +0 -0
- {dbos-0.7.1 → dbos-0.8.0}/README.md +0 -0
- {dbos-0.7.1 → dbos-0.8.0}/dbos/admin_sever.py +0 -0
- {dbos-0.7.1 → dbos-0.8.0}/dbos/application_database.py +0 -0
- {dbos-0.7.1 → dbos-0.8.0}/dbos/cli.py +0 -0
- {dbos-0.7.1 → dbos-0.8.0}/dbos/dbos-config.schema.json +0 -0
- {dbos-0.7.1 → dbos-0.8.0}/dbos/dbos_config.py +0 -0
- {dbos-0.7.1 → dbos-0.8.0}/dbos/decorators.py +0 -0
- {dbos-0.7.1 → dbos-0.8.0}/dbos/fastapi.py +0 -0
- {dbos-0.7.1 → dbos-0.8.0}/dbos/flask.py +0 -0
- {dbos-0.7.1 → dbos-0.8.0}/dbos/kafka.py +0 -0
- {dbos-0.7.1 → dbos-0.8.0}/dbos/kafka_message.py +0 -0
- {dbos-0.7.1 → dbos-0.8.0}/dbos/logger.py +0 -0
- {dbos-0.7.1 → dbos-0.8.0}/dbos/migrations/env.py +0 -0
- {dbos-0.7.1 → dbos-0.8.0}/dbos/migrations/script.py.mako +0 -0
- {dbos-0.7.1 → dbos-0.8.0}/dbos/migrations/versions/5c361fc04708_added_system_tables.py +0 -0
- {dbos-0.7.1 → dbos-0.8.0}/dbos/migrations/versions/a3b18ad34abe_added_triggers.py +0 -0
- {dbos-0.7.1 → dbos-0.8.0}/dbos/py.typed +0 -0
- {dbos-0.7.1 → dbos-0.8.0}/dbos/recovery.py +0 -0
- {dbos-0.7.1 → dbos-0.8.0}/dbos/roles.py +0 -0
- {dbos-0.7.1 → dbos-0.8.0}/dbos/scheduler/croniter.py +0 -0
- {dbos-0.7.1 → dbos-0.8.0}/dbos/schemas/__init__.py +0 -0
- {dbos-0.7.1 → dbos-0.8.0}/dbos/schemas/application_database.py +0 -0
- {dbos-0.7.1 → dbos-0.8.0}/dbos/templates/hello/README.md +0 -0
- {dbos-0.7.1 → dbos-0.8.0}/dbos/templates/hello/__package/__init__.py +0 -0
- {dbos-0.7.1 → dbos-0.8.0}/dbos/templates/hello/__package/main.py +0 -0
- {dbos-0.7.1 → dbos-0.8.0}/dbos/templates/hello/__package/schema.py +0 -0
- {dbos-0.7.1 → dbos-0.8.0}/dbos/templates/hello/alembic.ini +0 -0
- {dbos-0.7.1 → dbos-0.8.0}/dbos/templates/hello/dbos-config.yaml.dbos +0 -0
- {dbos-0.7.1 → dbos-0.8.0}/dbos/templates/hello/migrations/env.py.dbos +0 -0
- {dbos-0.7.1 → dbos-0.8.0}/dbos/templates/hello/migrations/script.py.mako +0 -0
- {dbos-0.7.1 → dbos-0.8.0}/dbos/templates/hello/migrations/versions/2024_07_31_180642_init.py +0 -0
- {dbos-0.7.1 → dbos-0.8.0}/dbos/templates/hello/start_postgres_docker.py +0 -0
- {dbos-0.7.1 → dbos-0.8.0}/dbos/tracer.py +0 -0
- {dbos-0.7.1 → dbos-0.8.0}/tests/__init__.py +0 -0
- {dbos-0.7.1 → dbos-0.8.0}/tests/atexit_no_ctor.py +0 -0
- {dbos-0.7.1 → dbos-0.8.0}/tests/atexit_no_launch.py +0 -0
- {dbos-0.7.1 → dbos-0.8.0}/tests/classdefs.py +0 -0
- {dbos-0.7.1 → dbos-0.8.0}/tests/conftest.py +0 -0
- {dbos-0.7.1 → dbos-0.8.0}/tests/more_classdefs.py +0 -0
- {dbos-0.7.1 → dbos-0.8.0}/tests/scheduler/test_croniter.py +0 -0
- {dbos-0.7.1 → dbos-0.8.0}/tests/scheduler/test_scheduler.py +0 -0
- {dbos-0.7.1 → dbos-0.8.0}/tests/test_admin_server.py +0 -0
- {dbos-0.7.1 → dbos-0.8.0}/tests/test_classdecorators.py +0 -0
- {dbos-0.7.1 → dbos-0.8.0}/tests/test_concurrency.py +0 -0
- {dbos-0.7.1 → dbos-0.8.0}/tests/test_config.py +0 -0
- {dbos-0.7.1 → dbos-0.8.0}/tests/test_fastapi.py +0 -0
- {dbos-0.7.1 → dbos-0.8.0}/tests/test_flask.py +0 -0
- {dbos-0.7.1 → dbos-0.8.0}/tests/test_kafka.py +0 -0
- {dbos-0.7.1 → dbos-0.8.0}/tests/test_package.py +0 -0
- {dbos-0.7.1 → dbos-0.8.0}/tests/test_schema_migration.py +0 -0
- {dbos-0.7.1 → dbos-0.8.0}/tests/test_singleton.py +0 -0
- {dbos-0.7.1 → dbos-0.8.0}/version/__init__.py +0 -0
|
@@ -1,5 +1,5 @@
|
|
|
1
1
|
from . import error as error
|
|
2
|
-
from .context import DBOSContextEnsure, SetWorkflowID
|
|
2
|
+
from .context import DBOSContextEnsure, DBOSContextSetAuth, SetWorkflowID
|
|
3
3
|
from .dbos import DBOS, DBOSConfiguredInstance, WorkflowHandle, WorkflowStatus
|
|
4
4
|
from .dbos_config import ConfigFile, get_dbos_database_url, load_config
|
|
5
5
|
from .kafka_message import KafkaMessage
|
|
@@ -11,6 +11,7 @@ __all__ = [
|
|
|
11
11
|
"DBOS",
|
|
12
12
|
"DBOSConfiguredInstance",
|
|
13
13
|
"DBOSContextEnsure",
|
|
14
|
+
"DBOSContextSetAuth",
|
|
14
15
|
"GetWorkflowsInput",
|
|
15
16
|
"KafkaMessage",
|
|
16
17
|
"SetWorkflowID",
|
|
@@ -492,6 +492,39 @@ class EnterDBOSHandler:
|
|
|
492
492
|
return False # Did not handle
|
|
493
493
|
|
|
494
494
|
|
|
495
|
+
class DBOSContextSetAuth(DBOSContextEnsure):
|
|
496
|
+
def __init__(self, user: Optional[str], roles: Optional[List[str]]) -> None:
|
|
497
|
+
self.created_ctx = False
|
|
498
|
+
self.user = user
|
|
499
|
+
self.roles = roles
|
|
500
|
+
self.prev_user: Optional[str] = None
|
|
501
|
+
self.prev_roles: Optional[List[str]] = None
|
|
502
|
+
|
|
503
|
+
def __enter__(self) -> DBOSContext:
|
|
504
|
+
ctx = get_local_dbos_context()
|
|
505
|
+
if ctx is None:
|
|
506
|
+
self.created_ctx = True
|
|
507
|
+
set_local_dbos_context(DBOSContext())
|
|
508
|
+
ctx = assert_current_dbos_context()
|
|
509
|
+
self.prev_user = ctx.authenticated_user
|
|
510
|
+
self.prev_roles = ctx.authenticated_roles
|
|
511
|
+
ctx.set_authentication(self.user, self.roles)
|
|
512
|
+
return ctx
|
|
513
|
+
|
|
514
|
+
def __exit__(
|
|
515
|
+
self,
|
|
516
|
+
exc_type: Optional[Type[BaseException]],
|
|
517
|
+
exc_value: Optional[BaseException],
|
|
518
|
+
traceback: Optional[TracebackType],
|
|
519
|
+
) -> Literal[False]:
|
|
520
|
+
ctx = assert_current_dbos_context()
|
|
521
|
+
ctx.set_authentication(self.prev_user, self.prev_roles)
|
|
522
|
+
# Clean up the basic context if we created it
|
|
523
|
+
if self.created_ctx:
|
|
524
|
+
clear_local_dbos_context()
|
|
525
|
+
return False # Did not handle
|
|
526
|
+
|
|
527
|
+
|
|
495
528
|
class DBOSAssumeRole:
|
|
496
529
|
def __init__(self, assume_role: Optional[str]) -> None:
|
|
497
530
|
self.prior_role: Optional[str] = None
|
|
@@ -4,17 +4,7 @@ import time
|
|
|
4
4
|
import traceback
|
|
5
5
|
from concurrent.futures import Future
|
|
6
6
|
from functools import wraps
|
|
7
|
-
from typing import
|
|
8
|
-
TYPE_CHECKING,
|
|
9
|
-
Any,
|
|
10
|
-
Callable,
|
|
11
|
-
Generic,
|
|
12
|
-
List,
|
|
13
|
-
Optional,
|
|
14
|
-
Tuple,
|
|
15
|
-
TypeVar,
|
|
16
|
-
cast,
|
|
17
|
-
)
|
|
7
|
+
from typing import TYPE_CHECKING, Any, Callable, Generic, Optional, Tuple, TypeVar, cast
|
|
18
8
|
|
|
19
9
|
from dbos.application_database import ApplicationDatabase, TransactionResultInternal
|
|
20
10
|
|
|
@@ -48,6 +38,7 @@ from dbos.error import (
|
|
|
48
38
|
DBOSWorkflowFunctionNotFoundError,
|
|
49
39
|
)
|
|
50
40
|
from dbos.registrations import (
|
|
41
|
+
DEFAULT_MAX_RECOVERY_ATTEMPTS,
|
|
51
42
|
get_config_name,
|
|
52
43
|
get_dbos_class_name,
|
|
53
44
|
get_dbos_func_name,
|
|
@@ -61,10 +52,10 @@ from dbos.roles import check_required_roles
|
|
|
61
52
|
from dbos.system_database import (
|
|
62
53
|
GetEventWorkflowContext,
|
|
63
54
|
OperationResultInternal,
|
|
64
|
-
WorkflowInputs,
|
|
65
55
|
WorkflowStatusInternal,
|
|
66
56
|
WorkflowStatusString,
|
|
67
57
|
)
|
|
58
|
+
from dbos.utils import WorkflowInputs
|
|
68
59
|
|
|
69
60
|
if TYPE_CHECKING:
|
|
70
61
|
from dbos.dbos import DBOS, Workflow, WorkflowHandle, WorkflowStatus, _DBOSRegistry
|
|
@@ -128,6 +119,7 @@ def _init_workflow(
|
|
|
128
119
|
config_name: Optional[str],
|
|
129
120
|
temp_wf_type: Optional[str],
|
|
130
121
|
queue: Optional[str] = None,
|
|
122
|
+
max_recovery_attempts: int = DEFAULT_MAX_RECOVERY_ATTEMPTS,
|
|
131
123
|
) -> WorkflowStatusInternal:
|
|
132
124
|
wfid = (
|
|
133
125
|
ctx.workflow_id
|
|
@@ -167,11 +159,13 @@ def _init_workflow(
|
|
|
167
159
|
# Synchronously record the status and inputs for workflows and single-step workflows
|
|
168
160
|
# We also have to do this for single-step workflows because of the foreign key constraint on the operation outputs table
|
|
169
161
|
# TODO: Make this transactional (and with the queue step below)
|
|
170
|
-
dbos._sys_db.update_workflow_status(
|
|
171
|
-
|
|
162
|
+
dbos._sys_db.update_workflow_status(
|
|
163
|
+
status, False, ctx.in_recovery, max_recovery_attempts=max_recovery_attempts
|
|
164
|
+
)
|
|
165
|
+
dbos._sys_db.update_workflow_inputs(wfid, utils.serialize_args(inputs))
|
|
172
166
|
else:
|
|
173
167
|
# Buffer the inputs for single-transaction workflows, but don't buffer the status
|
|
174
|
-
dbos._sys_db.buffer_workflow_inputs(wfid, utils.
|
|
168
|
+
dbos._sys_db.buffer_workflow_inputs(wfid, utils.serialize_args(inputs))
|
|
175
169
|
|
|
176
170
|
if queue is not None:
|
|
177
171
|
dbos._sys_db.enqueue(wfid, queue)
|
|
@@ -191,7 +185,8 @@ def _execute_workflow(
|
|
|
191
185
|
status["status"] = "SUCCESS"
|
|
192
186
|
status["output"] = utils.serialize(output)
|
|
193
187
|
if status["queue_name"] is not None:
|
|
194
|
-
dbos.
|
|
188
|
+
queue = dbos._registry.queue_info_map[status["queue_name"]]
|
|
189
|
+
dbos._sys_db.remove_from_queue(status["workflow_uuid"], queue)
|
|
195
190
|
dbos._sys_db.buffer_workflow_status(status)
|
|
196
191
|
except DBOSWorkflowConflictIDError:
|
|
197
192
|
# Retrieve the workflow handle and wait for the result.
|
|
@@ -203,9 +198,10 @@ def _execute_workflow(
|
|
|
203
198
|
return output
|
|
204
199
|
except Exception as error:
|
|
205
200
|
status["status"] = "ERROR"
|
|
206
|
-
status["error"] = utils.
|
|
201
|
+
status["error"] = utils.serialize_exception(error)
|
|
207
202
|
if status["queue_name"] is not None:
|
|
208
|
-
dbos.
|
|
203
|
+
queue = dbos._registry.queue_info_map[status["queue_name"]]
|
|
204
|
+
dbos._sys_db.remove_from_queue(status["workflow_uuid"], queue)
|
|
209
205
|
dbos._sys_db.update_workflow_status(status)
|
|
210
206
|
raise
|
|
211
207
|
|
|
@@ -228,7 +224,7 @@ def _execute_workflow_wthread(
|
|
|
228
224
|
with EnterDBOSWorkflow(attributes):
|
|
229
225
|
try:
|
|
230
226
|
return _execute_workflow(dbos, status, func, *args, **kwargs)
|
|
231
|
-
except Exception
|
|
227
|
+
except Exception:
|
|
232
228
|
dbos.logger.error(
|
|
233
229
|
f"Exception encountered in asynchronous workflow: {traceback.format_exc()}"
|
|
234
230
|
)
|
|
@@ -299,10 +295,15 @@ def _execute_workflow_id(dbos: "DBOS", workflow_id: str) -> "WorkflowHandle[Any]
|
|
|
299
295
|
)
|
|
300
296
|
|
|
301
297
|
|
|
302
|
-
def _workflow_wrapper(
|
|
298
|
+
def _workflow_wrapper(
|
|
299
|
+
dbosreg: "_DBOSRegistry",
|
|
300
|
+
func: F,
|
|
301
|
+
max_recovery_attempts: int = DEFAULT_MAX_RECOVERY_ATTEMPTS,
|
|
302
|
+
) -> F:
|
|
303
303
|
func.__orig_func = func # type: ignore
|
|
304
304
|
|
|
305
305
|
fi = get_or_create_func_info(func)
|
|
306
|
+
fi.max_recovery_attempts = max_recovery_attempts
|
|
306
307
|
|
|
307
308
|
@wraps(func)
|
|
308
309
|
def wrapper(*args: Any, **kwargs: Any) -> Any:
|
|
@@ -335,17 +336,21 @@ def _workflow_wrapper(dbosreg: "_DBOSRegistry", func: F) -> F:
|
|
|
335
336
|
class_name=get_dbos_class_name(fi, func, args),
|
|
336
337
|
config_name=get_config_name(fi, func, args),
|
|
337
338
|
temp_wf_type=get_temp_workflow_type(func),
|
|
339
|
+
max_recovery_attempts=max_recovery_attempts,
|
|
338
340
|
)
|
|
339
341
|
|
|
342
|
+
dbos.logger.debug(
|
|
343
|
+
f"Running workflow, id: {ctx.workflow_id}, name: {get_dbos_func_name(func)}"
|
|
344
|
+
)
|
|
340
345
|
return _execute_workflow(dbos, status, func, *args, **kwargs)
|
|
341
346
|
|
|
342
347
|
wrapped_func = cast(F, wrapper)
|
|
343
348
|
return wrapped_func
|
|
344
349
|
|
|
345
350
|
|
|
346
|
-
def _workflow(reg: "_DBOSRegistry") -> Callable[[F], F]:
|
|
351
|
+
def _workflow(reg: "_DBOSRegistry", max_recovery_attempts: int) -> Callable[[F], F]:
|
|
347
352
|
def _workflow_decorator(func: F) -> F:
|
|
348
|
-
wrapped_func = _workflow_wrapper(reg, func)
|
|
353
|
+
wrapped_func = _workflow_wrapper(reg, func, max_recovery_attempts)
|
|
349
354
|
reg.register_wf_function(func.__qualname__, wrapped_func)
|
|
350
355
|
return wrapped_func
|
|
351
356
|
|
|
@@ -411,6 +416,7 @@ def _start_workflow(
|
|
|
411
416
|
config_name=get_config_name(fi, func, gin_args),
|
|
412
417
|
temp_wf_type=get_temp_workflow_type(func),
|
|
413
418
|
queue=queue_name,
|
|
419
|
+
max_recovery_attempts=fi.max_recovery_attempts,
|
|
414
420
|
)
|
|
415
421
|
|
|
416
422
|
if not execute_workflow:
|
|
@@ -487,9 +493,14 @@ def _transaction(
|
|
|
487
493
|
)
|
|
488
494
|
)
|
|
489
495
|
if recorded_output:
|
|
496
|
+
dbos.logger.debug(
|
|
497
|
+
f"Replaying transaction, id: {ctx.function_id}, name: {attributes['name']}"
|
|
498
|
+
)
|
|
490
499
|
if recorded_output["error"]:
|
|
491
|
-
deserialized_error =
|
|
492
|
-
|
|
500
|
+
deserialized_error = (
|
|
501
|
+
utils.deserialize_exception(
|
|
502
|
+
recorded_output["error"]
|
|
503
|
+
)
|
|
493
504
|
)
|
|
494
505
|
has_recorded_error = True
|
|
495
506
|
raise deserialized_error
|
|
@@ -501,6 +512,11 @@ def _transaction(
|
|
|
501
512
|
raise Exception(
|
|
502
513
|
"Output and error are both None"
|
|
503
514
|
)
|
|
515
|
+
else:
|
|
516
|
+
dbos.logger.debug(
|
|
517
|
+
f"Running transaction, id: {ctx.function_id}, name: {attributes['name']}"
|
|
518
|
+
)
|
|
519
|
+
|
|
504
520
|
output = func(*args, **kwargs)
|
|
505
521
|
txn_output["output"] = utils.serialize(output)
|
|
506
522
|
assert (
|
|
@@ -527,7 +543,7 @@ def _transaction(
|
|
|
527
543
|
except Exception as error:
|
|
528
544
|
# Don't record the error if it was already recorded
|
|
529
545
|
if not has_recorded_error:
|
|
530
|
-
txn_output["error"] = utils.
|
|
546
|
+
txn_output["error"] = utils.serialize_exception(error)
|
|
531
547
|
dbos._app_db.record_transaction_error(txn_output)
|
|
532
548
|
raise
|
|
533
549
|
return output
|
|
@@ -598,13 +614,23 @@ def _step(
|
|
|
598
614
|
ctx.workflow_id, ctx.function_id
|
|
599
615
|
)
|
|
600
616
|
if recorded_output:
|
|
617
|
+
dbos.logger.debug(
|
|
618
|
+
f"Replaying step, id: {ctx.function_id}, name: {attributes['name']}"
|
|
619
|
+
)
|
|
601
620
|
if recorded_output["error"] is not None:
|
|
602
|
-
deserialized_error = utils.
|
|
621
|
+
deserialized_error = utils.deserialize_exception(
|
|
622
|
+
recorded_output["error"]
|
|
623
|
+
)
|
|
603
624
|
raise deserialized_error
|
|
604
625
|
elif recorded_output["output"] is not None:
|
|
605
626
|
return utils.deserialize(recorded_output["output"])
|
|
606
627
|
else:
|
|
607
628
|
raise Exception("Output and error are both None")
|
|
629
|
+
else:
|
|
630
|
+
dbos.logger.debug(
|
|
631
|
+
f"Running step, id: {ctx.function_id}, name: {attributes['name']}"
|
|
632
|
+
)
|
|
633
|
+
|
|
608
634
|
output = None
|
|
609
635
|
error = None
|
|
610
636
|
local_max_attempts = max_attempts if retries_allowed else 1
|
|
@@ -639,7 +665,7 @@ def _step(
|
|
|
639
665
|
)
|
|
640
666
|
|
|
641
667
|
step_output["error"] = (
|
|
642
|
-
utils.
|
|
668
|
+
utils.serialize_exception(error) if error is not None else None
|
|
643
669
|
)
|
|
644
670
|
dbos._sys_db.record_operation_result(step_output)
|
|
645
671
|
|
|
@@ -5,6 +5,7 @@ import json
|
|
|
5
5
|
import os
|
|
6
6
|
import sys
|
|
7
7
|
import threading
|
|
8
|
+
import traceback
|
|
8
9
|
from concurrent.futures import ThreadPoolExecutor
|
|
9
10
|
from dataclasses import dataclass
|
|
10
11
|
from logging import Logger
|
|
@@ -42,6 +43,7 @@ from dbos.decorators import classproperty
|
|
|
42
43
|
from dbos.queue import Queue, queue_thread
|
|
43
44
|
from dbos.recovery import _recover_pending_workflows, _startup_recovery_thread
|
|
44
45
|
from dbos.registrations import (
|
|
46
|
+
DEFAULT_MAX_RECOVERY_ATTEMPTS,
|
|
45
47
|
DBOSClassInfo,
|
|
46
48
|
get_or_create_class_info,
|
|
47
49
|
set_dbos_func_name,
|
|
@@ -338,43 +340,47 @@ class DBOS:
|
|
|
338
340
|
_dbos_global_instance._launch()
|
|
339
341
|
|
|
340
342
|
def _launch(self) -> None:
|
|
341
|
-
|
|
342
|
-
|
|
343
|
-
|
|
344
|
-
|
|
345
|
-
|
|
346
|
-
|
|
347
|
-
|
|
348
|
-
|
|
349
|
-
|
|
350
|
-
|
|
351
|
-
|
|
352
|
-
|
|
353
|
-
|
|
354
|
-
|
|
355
|
-
|
|
356
|
-
|
|
357
|
-
|
|
358
|
-
|
|
359
|
-
|
|
360
|
-
|
|
361
|
-
|
|
362
|
-
|
|
363
|
-
self._executor.submit(queue_thread, evt, self)
|
|
364
|
-
|
|
365
|
-
# Grab any pollers that were deferred and start them
|
|
366
|
-
for evt, func, args, kwargs in self._registry.pollers:
|
|
343
|
+
try:
|
|
344
|
+
if self._launched:
|
|
345
|
+
dbos_logger.warning(f"DBOS was already launched")
|
|
346
|
+
return
|
|
347
|
+
self._launched = True
|
|
348
|
+
self._executor_field = ThreadPoolExecutor(max_workers=64)
|
|
349
|
+
self._sys_db_field = SystemDatabase(self.config)
|
|
350
|
+
self._app_db_field = ApplicationDatabase(self.config)
|
|
351
|
+
self._admin_server_field = AdminServer(dbos=self)
|
|
352
|
+
|
|
353
|
+
if not os.environ.get("DBOS__VMID"):
|
|
354
|
+
workflow_ids = self._sys_db.get_pending_workflows("local")
|
|
355
|
+
self._executor.submit(_startup_recovery_thread, self, workflow_ids)
|
|
356
|
+
|
|
357
|
+
# Listen to notifications
|
|
358
|
+
self._executor.submit(self._sys_db._notification_listener)
|
|
359
|
+
|
|
360
|
+
# Start flush workflow buffers thread
|
|
361
|
+
self._executor.submit(self._sys_db.flush_workflow_buffers)
|
|
362
|
+
|
|
363
|
+
# Start the queue thread
|
|
364
|
+
evt = threading.Event()
|
|
367
365
|
self.stop_events.append(evt)
|
|
368
|
-
self._executor.submit(
|
|
369
|
-
self._registry.pollers = []
|
|
366
|
+
self._executor.submit(queue_thread, evt, self)
|
|
370
367
|
|
|
371
|
-
|
|
368
|
+
# Grab any pollers that were deferred and start them
|
|
369
|
+
for evt, func, args, kwargs in self._registry.pollers:
|
|
370
|
+
self.stop_events.append(evt)
|
|
371
|
+
self._executor.submit(func, *args, **kwargs)
|
|
372
|
+
self._registry.pollers = []
|
|
372
373
|
|
|
373
|
-
|
|
374
|
-
|
|
375
|
-
|
|
376
|
-
|
|
377
|
-
|
|
374
|
+
dbos_logger.info("DBOS launched")
|
|
375
|
+
|
|
376
|
+
# Flush handlers and add OTLP to all loggers if enabled
|
|
377
|
+
# to enable their export in DBOS Cloud
|
|
378
|
+
for handler in dbos_logger.handlers:
|
|
379
|
+
handler.flush()
|
|
380
|
+
add_otlp_to_all_loggers()
|
|
381
|
+
except Exception:
|
|
382
|
+
dbos_logger.error(f"DBOS failed to launch: {traceback.format_exc()}")
|
|
383
|
+
raise
|
|
378
384
|
|
|
379
385
|
def _destroy(self) -> None:
|
|
380
386
|
self._initialized = False
|
|
@@ -401,9 +407,11 @@ class DBOS:
|
|
|
401
407
|
|
|
402
408
|
# Decorators for DBOS functionality
|
|
403
409
|
@classmethod
|
|
404
|
-
def workflow(
|
|
410
|
+
def workflow(
|
|
411
|
+
cls, *, max_recovery_attempts: int = DEFAULT_MAX_RECOVERY_ATTEMPTS
|
|
412
|
+
) -> Callable[[F], F]:
|
|
405
413
|
"""Decorate a function for use as a DBOS workflow."""
|
|
406
|
-
return _workflow(_get_or_create_dbos_registry())
|
|
414
|
+
return _workflow(_get_or_create_dbos_registry(), max_recovery_attempts)
|
|
407
415
|
|
|
408
416
|
@classmethod
|
|
409
417
|
def transaction(
|
|
@@ -542,6 +550,7 @@ class DBOS:
|
|
|
542
550
|
recovery_attempts=stat["recovery_attempts"],
|
|
543
551
|
class_name=stat["class_name"],
|
|
544
552
|
config_name=stat["config_name"],
|
|
553
|
+
queue_name=stat["queue_name"],
|
|
545
554
|
authenticated_user=stat["authenticated_user"],
|
|
546
555
|
assumed_role=stat["assumed_role"],
|
|
547
556
|
authenticated_roles=(
|
|
@@ -748,6 +757,7 @@ class WorkflowStatus:
|
|
|
748
757
|
name(str): The workflow function name
|
|
749
758
|
class_name(str): For member functions, the name of the class containing the workflow function
|
|
750
759
|
config_name(str): For instance member functions, the name of the class instance for the execution
|
|
760
|
+
queue_name(str): For workflows that are or were queued, the queue name
|
|
751
761
|
authenticated_user(str): The user who invoked the workflow
|
|
752
762
|
assumed_role(str): The access role used by the user to allow access to the workflow function
|
|
753
763
|
authenticated_roles(List[str]): List of all access roles available to the authenticated user
|
|
@@ -760,6 +770,7 @@ class WorkflowStatus:
|
|
|
760
770
|
name: str
|
|
761
771
|
class_name: Optional[str]
|
|
762
772
|
config_name: Optional[str]
|
|
773
|
+
queue_name: Optional[str]
|
|
763
774
|
authenticated_user: Optional[str]
|
|
764
775
|
assumed_role: Optional[str]
|
|
765
776
|
authenticated_roles: Optional[List[str]]
|
|
@@ -32,6 +32,7 @@ class DBOSErrorCode(Enum):
|
|
|
32
32
|
InitializationError = 3
|
|
33
33
|
WorkflowFunctionNotFound = 4
|
|
34
34
|
NonExistentWorkflowError = 5
|
|
35
|
+
DeadLetterQueueError = 6
|
|
35
36
|
MaxStepRetriesExceeded = 7
|
|
36
37
|
NotAuthorized = 8
|
|
37
38
|
|
|
@@ -86,6 +87,16 @@ class DBOSNonExistentWorkflowError(DBOSException):
|
|
|
86
87
|
)
|
|
87
88
|
|
|
88
89
|
|
|
90
|
+
class DBOSDeadLetterQueueError(DBOSException):
|
|
91
|
+
"""Exception raised when a workflow database record does not exist for a given ID."""
|
|
92
|
+
|
|
93
|
+
def __init__(self, wf_id: str, max_retries: int):
|
|
94
|
+
super().__init__(
|
|
95
|
+
f"Workflow {wf_id} has been moved to the dead-letter queue after exceeding the maximum of ${max_retries} retries",
|
|
96
|
+
dbos_error_code=DBOSErrorCode.DeadLetterQueueError.value,
|
|
97
|
+
)
|
|
98
|
+
|
|
99
|
+
|
|
89
100
|
class DBOSNotAuthorizedError(DBOSException):
|
|
90
101
|
"""Exception raised by DBOS role-based security when the user is not authorized to access a function."""
|
|
91
102
|
|
|
@@ -0,0 +1,43 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Adjust workflow queue to add columns for rate limiter.
|
|
3
|
+
|
|
4
|
+
Revision ID: d76646551a6b
|
|
5
|
+
Revises: 50f3227f0b4b
|
|
6
|
+
Create Date: 2024-09-25 14:48:10.218015
|
|
7
|
+
|
|
8
|
+
"""
|
|
9
|
+
|
|
10
|
+
from typing import Sequence, Union
|
|
11
|
+
|
|
12
|
+
import sqlalchemy as sa
|
|
13
|
+
from alembic import op
|
|
14
|
+
|
|
15
|
+
# revision identifiers, used by Alembic.
|
|
16
|
+
revision: str = "d76646551a6b"
|
|
17
|
+
down_revision: Union[str, None] = "50f3227f0b4b"
|
|
18
|
+
branch_labels: Union[str, Sequence[str], None] = None
|
|
19
|
+
depends_on: Union[str, Sequence[str], None] = None
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
def upgrade() -> None:
|
|
23
|
+
op.add_column(
|
|
24
|
+
"job_queue",
|
|
25
|
+
sa.Column(
|
|
26
|
+
"started_at_epoch_ms",
|
|
27
|
+
sa.BigInteger(),
|
|
28
|
+
),
|
|
29
|
+
schema="dbos",
|
|
30
|
+
)
|
|
31
|
+
op.add_column(
|
|
32
|
+
"job_queue",
|
|
33
|
+
sa.Column(
|
|
34
|
+
"completed_at_epoch_ms",
|
|
35
|
+
sa.BigInteger(),
|
|
36
|
+
),
|
|
37
|
+
schema="dbos",
|
|
38
|
+
)
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
def downgrade() -> None:
|
|
42
|
+
op.drop_column("job_queue", "started_at_epoch_ms", schema="dbos")
|
|
43
|
+
op.drop_column("job_queue", "completed_at_epoch_ms", schema="dbos")
|
|
@@ -0,0 +1,28 @@
|
|
|
1
|
+
"""workflow_queue
|
|
2
|
+
|
|
3
|
+
Revision ID: d76646551a6c
|
|
4
|
+
Revises: d76646551a6b
|
|
5
|
+
Create Date: 2024-09-27 12:00:00.0
|
|
6
|
+
|
|
7
|
+
"""
|
|
8
|
+
|
|
9
|
+
from typing import Sequence, Union
|
|
10
|
+
|
|
11
|
+
import sqlalchemy as sa
|
|
12
|
+
from alembic import op
|
|
13
|
+
|
|
14
|
+
# revision identifiers, used by Alembic.
|
|
15
|
+
revision: str = "d76646551a6c"
|
|
16
|
+
down_revision: Union[str, None] = "d76646551a6b"
|
|
17
|
+
branch_labels: Union[str, Sequence[str], None] = None
|
|
18
|
+
depends_on: Union[str, Sequence[str], None] = None
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
def upgrade() -> None:
|
|
22
|
+
op.rename_table("job_queue", "workflow_queue", schema="dbos")
|
|
23
|
+
op.execute("CREATE VIEW dbos.job_queue AS SELECT * FROM dbos.workflow_queue;")
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
def downgrade() -> None:
|
|
27
|
+
op.execute("DROP VIEW dbos.job_queue;")
|
|
28
|
+
op.rename_table("workflow_queue", "job_queue", schema="dbos")
|
|
@@ -1,7 +1,6 @@
|
|
|
1
1
|
import threading
|
|
2
|
-
import time
|
|
3
2
|
import traceback
|
|
4
|
-
from typing import TYPE_CHECKING, Optional
|
|
3
|
+
from typing import TYPE_CHECKING, Optional, TypedDict
|
|
5
4
|
|
|
6
5
|
from dbos.core import P, R, _execute_workflow_id, _start_workflow
|
|
7
6
|
|
|
@@ -9,10 +8,35 @@ if TYPE_CHECKING:
|
|
|
9
8
|
from dbos.dbos import DBOS, Workflow, WorkflowHandle
|
|
10
9
|
|
|
11
10
|
|
|
11
|
+
class QueueRateLimit(TypedDict):
|
|
12
|
+
"""
|
|
13
|
+
Limit the maximum number of workflows from this queue that can be started in a given period.
|
|
14
|
+
|
|
15
|
+
If the limit is 5 and the period is 10, no more than 5 functions can be
|
|
16
|
+
started per 10 seconds.
|
|
17
|
+
"""
|
|
18
|
+
|
|
19
|
+
limit: int
|
|
20
|
+
period: float
|
|
21
|
+
|
|
22
|
+
|
|
12
23
|
class Queue:
|
|
13
|
-
|
|
24
|
+
"""
|
|
25
|
+
Workflow queue.
|
|
26
|
+
|
|
27
|
+
Workflow queues allow workflows to be started at a later time, based on concurrency and
|
|
28
|
+
rate limits.
|
|
29
|
+
"""
|
|
30
|
+
|
|
31
|
+
def __init__(
|
|
32
|
+
self,
|
|
33
|
+
name: str,
|
|
34
|
+
concurrency: Optional[int] = None,
|
|
35
|
+
limiter: Optional[QueueRateLimit] = None,
|
|
36
|
+
) -> None:
|
|
14
37
|
self.name = name
|
|
15
38
|
self.concurrency = concurrency
|
|
39
|
+
self.limiter = limiter
|
|
16
40
|
from dbos.dbos import _get_or_create_dbos_registry
|
|
17
41
|
|
|
18
42
|
registry = _get_or_create_dbos_registry()
|
|
@@ -29,12 +53,11 @@ class Queue:
|
|
|
29
53
|
|
|
30
54
|
def queue_thread(stop_event: threading.Event, dbos: "DBOS") -> None:
|
|
31
55
|
while not stop_event.is_set():
|
|
32
|
-
|
|
33
|
-
|
|
56
|
+
if stop_event.wait(timeout=1):
|
|
57
|
+
return
|
|
58
|
+
for _, queue in dbos._registry.queue_info_map.items():
|
|
34
59
|
try:
|
|
35
|
-
wf_ids = dbos._sys_db.start_queued_workflows(
|
|
36
|
-
queue_name, queue.concurrency
|
|
37
|
-
)
|
|
60
|
+
wf_ids = dbos._sys_db.start_queued_workflows(queue)
|
|
38
61
|
for id in wf_ids:
|
|
39
62
|
_execute_workflow_id(dbos, id)
|
|
40
63
|
except Exception:
|
|
@@ -3,6 +3,8 @@ from enum import Enum
|
|
|
3
3
|
from types import FunctionType
|
|
4
4
|
from typing import Any, Callable, List, Literal, Optional, Tuple, Type, cast
|
|
5
5
|
|
|
6
|
+
DEFAULT_MAX_RECOVERY_ATTEMPTS = 50
|
|
7
|
+
|
|
6
8
|
|
|
7
9
|
def get_dbos_func_name(f: Any) -> str:
|
|
8
10
|
if hasattr(f, "dbos_function_name"):
|
|
@@ -47,6 +49,7 @@ class DBOSFuncInfo:
|
|
|
47
49
|
self.class_info: Optional[DBOSClassInfo] = None
|
|
48
50
|
self.func_type: DBOSFuncType = DBOSFuncType.Unknown
|
|
49
51
|
self.required_roles: Optional[List[str]] = None
|
|
52
|
+
self.max_recovery_attempts = DEFAULT_MAX_RECOVERY_ATTEMPTS
|
|
50
53
|
|
|
51
54
|
|
|
52
55
|
def get_or_create_class_info(cls: Type[Any]) -> DBOSClassInfo:
|
|
@@ -2,6 +2,7 @@ import threading
|
|
|
2
2
|
from datetime import datetime, timezone
|
|
3
3
|
from typing import TYPE_CHECKING, Callable
|
|
4
4
|
|
|
5
|
+
from dbos.logger import dbos_logger
|
|
5
6
|
from dbos.queue import Queue
|
|
6
7
|
|
|
7
8
|
if TYPE_CHECKING:
|
|
@@ -18,7 +19,12 @@ scheduler_queue: Queue
|
|
|
18
19
|
def scheduler_loop(
|
|
19
20
|
func: ScheduledWorkflow, cron: str, stop_event: threading.Event
|
|
20
21
|
) -> None:
|
|
21
|
-
|
|
22
|
+
try:
|
|
23
|
+
iter = croniter(cron, datetime.now(timezone.utc), second_at_beginning=True)
|
|
24
|
+
except Exception as e:
|
|
25
|
+
dbos_logger.error(
|
|
26
|
+
f'Cannot run scheduled function {func.__name__}. Invalid crontab "{cron}"'
|
|
27
|
+
)
|
|
22
28
|
while not stop_event.is_set():
|
|
23
29
|
nextExecTime = iter.get_next(datetime)
|
|
24
30
|
sleepTime = nextExecTime - datetime.now(timezone.utc)
|
|
@@ -142,8 +142,8 @@ class SystemSchema:
|
|
|
142
142
|
Column("last_run_time", BigInteger, nullable=False),
|
|
143
143
|
)
|
|
144
144
|
|
|
145
|
-
|
|
146
|
-
"
|
|
145
|
+
workflow_queue = Table(
|
|
146
|
+
"workflow_queue",
|
|
147
147
|
metadata_obj,
|
|
148
148
|
Column(
|
|
149
149
|
"workflow_uuid",
|
|
@@ -161,4 +161,12 @@ class SystemSchema:
|
|
|
161
161
|
nullable=False,
|
|
162
162
|
server_default=text("(EXTRACT(epoch FROM now()) * 1000::numeric)::bigint"),
|
|
163
163
|
),
|
|
164
|
+
Column(
|
|
165
|
+
"started_at_epoch_ms",
|
|
166
|
+
BigInteger(),
|
|
167
|
+
),
|
|
168
|
+
Column(
|
|
169
|
+
"completed_at_epoch_ms",
|
|
170
|
+
BigInteger(),
|
|
171
|
+
),
|
|
164
172
|
)
|