dbos 0.8.0a3__tar.gz → 0.8.0a10__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {dbos-0.8.0a3 → dbos-0.8.0a10}/PKG-INFO +1 -1
- {dbos-0.8.0a3 → dbos-0.8.0a10}/dbos/core.py +39 -7
- {dbos-0.8.0a3 → dbos-0.8.0a10}/dbos/dbos.py +47 -36
- {dbos-0.8.0a3 → dbos-0.8.0a10}/dbos/error.py +11 -0
- {dbos-0.8.0a3 → dbos-0.8.0a10}/dbos/migrations/versions/50f3227f0b4b_fix_job_queue.py +2 -1
- dbos-0.8.0a10/dbos/migrations/versions/d76646551a6b_job_queue_limiter.py +43 -0
- dbos-0.8.0a10/dbos/migrations/versions/d76646551a6c_workflow_queue.py +28 -0
- {dbos-0.8.0a3 → dbos-0.8.0a10}/dbos/migrations/versions/eab0cc1d9a14_job_queue.py +2 -1
- {dbos-0.8.0a3 → dbos-0.8.0a10}/dbos/queue.py +31 -8
- {dbos-0.8.0a3 → dbos-0.8.0a10}/dbos/registrations.py +3 -0
- {dbos-0.8.0a3 → dbos-0.8.0a10}/dbos/request.py +1 -0
- {dbos-0.8.0a3 → dbos-0.8.0a10}/dbos/scheduler/scheduler.py +7 -1
- {dbos-0.8.0a3 → dbos-0.8.0a10}/dbos/schemas/system_database.py +10 -2
- {dbos-0.8.0a3 → dbos-0.8.0a10}/dbos/system_database.py +151 -24
- {dbos-0.8.0a3 → dbos-0.8.0a10}/pyproject.toml +1 -1
- {dbos-0.8.0a3 → dbos-0.8.0a10}/tests/test_dbos.py +81 -0
- {dbos-0.8.0a3 → dbos-0.8.0a10}/tests/test_failures.py +35 -1
- dbos-0.8.0a10/tests/test_queue.py +322 -0
- dbos-0.8.0a3/tests/test_queue.py +0 -110
- {dbos-0.8.0a3 → dbos-0.8.0a10}/LICENSE +0 -0
- {dbos-0.8.0a3 → dbos-0.8.0a10}/README.md +0 -0
- {dbos-0.8.0a3 → dbos-0.8.0a10}/dbos/__init__.py +0 -0
- {dbos-0.8.0a3 → dbos-0.8.0a10}/dbos/admin_sever.py +0 -0
- {dbos-0.8.0a3 → dbos-0.8.0a10}/dbos/application_database.py +0 -0
- {dbos-0.8.0a3 → dbos-0.8.0a10}/dbos/cli.py +0 -0
- {dbos-0.8.0a3 → dbos-0.8.0a10}/dbos/context.py +0 -0
- {dbos-0.8.0a3 → dbos-0.8.0a10}/dbos/dbos-config.schema.json +0 -0
- {dbos-0.8.0a3 → dbos-0.8.0a10}/dbos/dbos_config.py +0 -0
- {dbos-0.8.0a3 → dbos-0.8.0a10}/dbos/decorators.py +0 -0
- {dbos-0.8.0a3 → dbos-0.8.0a10}/dbos/fastapi.py +0 -0
- {dbos-0.8.0a3 → dbos-0.8.0a10}/dbos/flask.py +0 -0
- {dbos-0.8.0a3 → dbos-0.8.0a10}/dbos/kafka.py +0 -0
- {dbos-0.8.0a3 → dbos-0.8.0a10}/dbos/kafka_message.py +0 -0
- {dbos-0.8.0a3 → dbos-0.8.0a10}/dbos/logger.py +0 -0
- {dbos-0.8.0a3 → dbos-0.8.0a10}/dbos/migrations/env.py +0 -0
- {dbos-0.8.0a3 → dbos-0.8.0a10}/dbos/migrations/script.py.mako +0 -0
- {dbos-0.8.0a3 → dbos-0.8.0a10}/dbos/migrations/versions/5c361fc04708_added_system_tables.py +0 -0
- {dbos-0.8.0a3 → dbos-0.8.0a10}/dbos/migrations/versions/a3b18ad34abe_added_triggers.py +0 -0
- {dbos-0.8.0a3 → dbos-0.8.0a10}/dbos/py.typed +0 -0
- {dbos-0.8.0a3 → dbos-0.8.0a10}/dbos/recovery.py +0 -0
- {dbos-0.8.0a3 → dbos-0.8.0a10}/dbos/roles.py +0 -0
- {dbos-0.8.0a3 → dbos-0.8.0a10}/dbos/scheduler/croniter.py +0 -0
- {dbos-0.8.0a3 → dbos-0.8.0a10}/dbos/schemas/__init__.py +0 -0
- {dbos-0.8.0a3 → dbos-0.8.0a10}/dbos/schemas/application_database.py +0 -0
- {dbos-0.8.0a3 → dbos-0.8.0a10}/dbos/templates/hello/README.md +0 -0
- {dbos-0.8.0a3 → dbos-0.8.0a10}/dbos/templates/hello/__package/__init__.py +0 -0
- {dbos-0.8.0a3 → dbos-0.8.0a10}/dbos/templates/hello/__package/main.py +0 -0
- {dbos-0.8.0a3 → dbos-0.8.0a10}/dbos/templates/hello/__package/schema.py +0 -0
- {dbos-0.8.0a3 → dbos-0.8.0a10}/dbos/templates/hello/alembic.ini +0 -0
- {dbos-0.8.0a3 → dbos-0.8.0a10}/dbos/templates/hello/dbos-config.yaml.dbos +0 -0
- {dbos-0.8.0a3 → dbos-0.8.0a10}/dbos/templates/hello/migrations/env.py.dbos +0 -0
- {dbos-0.8.0a3 → dbos-0.8.0a10}/dbos/templates/hello/migrations/script.py.mako +0 -0
- {dbos-0.8.0a3 → dbos-0.8.0a10}/dbos/templates/hello/migrations/versions/2024_07_31_180642_init.py +0 -0
- {dbos-0.8.0a3 → dbos-0.8.0a10}/dbos/templates/hello/start_postgres_docker.py +0 -0
- {dbos-0.8.0a3 → dbos-0.8.0a10}/dbos/tracer.py +0 -0
- {dbos-0.8.0a3 → dbos-0.8.0a10}/dbos/utils.py +0 -0
- {dbos-0.8.0a3 → dbos-0.8.0a10}/tests/__init__.py +0 -0
- {dbos-0.8.0a3 → dbos-0.8.0a10}/tests/atexit_no_ctor.py +0 -0
- {dbos-0.8.0a3 → dbos-0.8.0a10}/tests/atexit_no_launch.py +0 -0
- {dbos-0.8.0a3 → dbos-0.8.0a10}/tests/classdefs.py +0 -0
- {dbos-0.8.0a3 → dbos-0.8.0a10}/tests/conftest.py +0 -0
- {dbos-0.8.0a3 → dbos-0.8.0a10}/tests/more_classdefs.py +0 -0
- {dbos-0.8.0a3 → dbos-0.8.0a10}/tests/scheduler/test_croniter.py +0 -0
- {dbos-0.8.0a3 → dbos-0.8.0a10}/tests/scheduler/test_scheduler.py +0 -0
- {dbos-0.8.0a3 → dbos-0.8.0a10}/tests/test_admin_server.py +0 -0
- {dbos-0.8.0a3 → dbos-0.8.0a10}/tests/test_classdecorators.py +0 -0
- {dbos-0.8.0a3 → dbos-0.8.0a10}/tests/test_concurrency.py +0 -0
- {dbos-0.8.0a3 → dbos-0.8.0a10}/tests/test_config.py +0 -0
- {dbos-0.8.0a3 → dbos-0.8.0a10}/tests/test_fastapi.py +0 -0
- {dbos-0.8.0a3 → dbos-0.8.0a10}/tests/test_fastapi_roles.py +0 -0
- {dbos-0.8.0a3 → dbos-0.8.0a10}/tests/test_flask.py +0 -0
- {dbos-0.8.0a3 → dbos-0.8.0a10}/tests/test_kafka.py +0 -0
- {dbos-0.8.0a3 → dbos-0.8.0a10}/tests/test_package.py +0 -0
- {dbos-0.8.0a3 → dbos-0.8.0a10}/tests/test_schema_migration.py +0 -0
- {dbos-0.8.0a3 → dbos-0.8.0a10}/tests/test_singleton.py +0 -0
- {dbos-0.8.0a3 → dbos-0.8.0a10}/version/__init__.py +0 -0
|
@@ -38,6 +38,7 @@ from dbos.error import (
|
|
|
38
38
|
DBOSWorkflowFunctionNotFoundError,
|
|
39
39
|
)
|
|
40
40
|
from dbos.registrations import (
|
|
41
|
+
DEFAULT_MAX_RECOVERY_ATTEMPTS,
|
|
41
42
|
get_config_name,
|
|
42
43
|
get_dbos_class_name,
|
|
43
44
|
get_dbos_func_name,
|
|
@@ -118,6 +119,7 @@ def _init_workflow(
|
|
|
118
119
|
config_name: Optional[str],
|
|
119
120
|
temp_wf_type: Optional[str],
|
|
120
121
|
queue: Optional[str] = None,
|
|
122
|
+
max_recovery_attempts: int = DEFAULT_MAX_RECOVERY_ATTEMPTS,
|
|
121
123
|
) -> WorkflowStatusInternal:
|
|
122
124
|
wfid = (
|
|
123
125
|
ctx.workflow_id
|
|
@@ -157,7 +159,9 @@ def _init_workflow(
|
|
|
157
159
|
# Synchronously record the status and inputs for workflows and single-step workflows
|
|
158
160
|
# We also have to do this for single-step workflows because of the foreign key constraint on the operation outputs table
|
|
159
161
|
# TODO: Make this transactional (and with the queue step below)
|
|
160
|
-
dbos._sys_db.update_workflow_status(
|
|
162
|
+
dbos._sys_db.update_workflow_status(
|
|
163
|
+
status, False, ctx.in_recovery, max_recovery_attempts=max_recovery_attempts
|
|
164
|
+
)
|
|
161
165
|
dbos._sys_db.update_workflow_inputs(wfid, utils.serialize_args(inputs))
|
|
162
166
|
else:
|
|
163
167
|
# Buffer the inputs for single-transaction workflows, but don't buffer the status
|
|
@@ -181,7 +185,8 @@ def _execute_workflow(
|
|
|
181
185
|
status["status"] = "SUCCESS"
|
|
182
186
|
status["output"] = utils.serialize(output)
|
|
183
187
|
if status["queue_name"] is not None:
|
|
184
|
-
dbos.
|
|
188
|
+
queue = dbos._registry.queue_info_map[status["queue_name"]]
|
|
189
|
+
dbos._sys_db.remove_from_queue(status["workflow_uuid"], queue)
|
|
185
190
|
dbos._sys_db.buffer_workflow_status(status)
|
|
186
191
|
except DBOSWorkflowConflictIDError:
|
|
187
192
|
# Retrieve the workflow handle and wait for the result.
|
|
@@ -195,7 +200,8 @@ def _execute_workflow(
|
|
|
195
200
|
status["status"] = "ERROR"
|
|
196
201
|
status["error"] = utils.serialize_exception(error)
|
|
197
202
|
if status["queue_name"] is not None:
|
|
198
|
-
dbos.
|
|
203
|
+
queue = dbos._registry.queue_info_map[status["queue_name"]]
|
|
204
|
+
dbos._sys_db.remove_from_queue(status["workflow_uuid"], queue)
|
|
199
205
|
dbos._sys_db.update_workflow_status(status)
|
|
200
206
|
raise
|
|
201
207
|
|
|
@@ -218,7 +224,7 @@ def _execute_workflow_wthread(
|
|
|
218
224
|
with EnterDBOSWorkflow(attributes):
|
|
219
225
|
try:
|
|
220
226
|
return _execute_workflow(dbos, status, func, *args, **kwargs)
|
|
221
|
-
except Exception
|
|
227
|
+
except Exception:
|
|
222
228
|
dbos.logger.error(
|
|
223
229
|
f"Exception encountered in asynchronous workflow: {traceback.format_exc()}"
|
|
224
230
|
)
|
|
@@ -289,10 +295,15 @@ def _execute_workflow_id(dbos: "DBOS", workflow_id: str) -> "WorkflowHandle[Any]
|
|
|
289
295
|
)
|
|
290
296
|
|
|
291
297
|
|
|
292
|
-
def _workflow_wrapper(
|
|
298
|
+
def _workflow_wrapper(
|
|
299
|
+
dbosreg: "_DBOSRegistry",
|
|
300
|
+
func: F,
|
|
301
|
+
max_recovery_attempts: int = DEFAULT_MAX_RECOVERY_ATTEMPTS,
|
|
302
|
+
) -> F:
|
|
293
303
|
func.__orig_func = func # type: ignore
|
|
294
304
|
|
|
295
305
|
fi = get_or_create_func_info(func)
|
|
306
|
+
fi.max_recovery_attempts = max_recovery_attempts
|
|
296
307
|
|
|
297
308
|
@wraps(func)
|
|
298
309
|
def wrapper(*args: Any, **kwargs: Any) -> Any:
|
|
@@ -325,17 +336,21 @@ def _workflow_wrapper(dbosreg: "_DBOSRegistry", func: F) -> F:
|
|
|
325
336
|
class_name=get_dbos_class_name(fi, func, args),
|
|
326
337
|
config_name=get_config_name(fi, func, args),
|
|
327
338
|
temp_wf_type=get_temp_workflow_type(func),
|
|
339
|
+
max_recovery_attempts=max_recovery_attempts,
|
|
328
340
|
)
|
|
329
341
|
|
|
342
|
+
dbos.logger.debug(
|
|
343
|
+
f"Running workflow, id: {ctx.workflow_id}, name: {get_dbos_func_name(func)}"
|
|
344
|
+
)
|
|
330
345
|
return _execute_workflow(dbos, status, func, *args, **kwargs)
|
|
331
346
|
|
|
332
347
|
wrapped_func = cast(F, wrapper)
|
|
333
348
|
return wrapped_func
|
|
334
349
|
|
|
335
350
|
|
|
336
|
-
def _workflow(reg: "_DBOSRegistry") -> Callable[[F], F]:
|
|
351
|
+
def _workflow(reg: "_DBOSRegistry", max_recovery_attempts: int) -> Callable[[F], F]:
|
|
337
352
|
def _workflow_decorator(func: F) -> F:
|
|
338
|
-
wrapped_func = _workflow_wrapper(reg, func)
|
|
353
|
+
wrapped_func = _workflow_wrapper(reg, func, max_recovery_attempts)
|
|
339
354
|
reg.register_wf_function(func.__qualname__, wrapped_func)
|
|
340
355
|
return wrapped_func
|
|
341
356
|
|
|
@@ -401,6 +416,7 @@ def _start_workflow(
|
|
|
401
416
|
config_name=get_config_name(fi, func, gin_args),
|
|
402
417
|
temp_wf_type=get_temp_workflow_type(func),
|
|
403
418
|
queue=queue_name,
|
|
419
|
+
max_recovery_attempts=fi.max_recovery_attempts,
|
|
404
420
|
)
|
|
405
421
|
|
|
406
422
|
if not execute_workflow:
|
|
@@ -477,6 +493,9 @@ def _transaction(
|
|
|
477
493
|
)
|
|
478
494
|
)
|
|
479
495
|
if recorded_output:
|
|
496
|
+
dbos.logger.debug(
|
|
497
|
+
f"Replaying transaction, id: {ctx.function_id}, name: {attributes['name']}"
|
|
498
|
+
)
|
|
480
499
|
if recorded_output["error"]:
|
|
481
500
|
deserialized_error = (
|
|
482
501
|
utils.deserialize_exception(
|
|
@@ -493,6 +512,11 @@ def _transaction(
|
|
|
493
512
|
raise Exception(
|
|
494
513
|
"Output and error are both None"
|
|
495
514
|
)
|
|
515
|
+
else:
|
|
516
|
+
dbos.logger.debug(
|
|
517
|
+
f"Running transaction, id: {ctx.function_id}, name: {attributes['name']}"
|
|
518
|
+
)
|
|
519
|
+
|
|
496
520
|
output = func(*args, **kwargs)
|
|
497
521
|
txn_output["output"] = utils.serialize(output)
|
|
498
522
|
assert (
|
|
@@ -590,6 +614,9 @@ def _step(
|
|
|
590
614
|
ctx.workflow_id, ctx.function_id
|
|
591
615
|
)
|
|
592
616
|
if recorded_output:
|
|
617
|
+
dbos.logger.debug(
|
|
618
|
+
f"Replaying step, id: {ctx.function_id}, name: {attributes['name']}"
|
|
619
|
+
)
|
|
593
620
|
if recorded_output["error"] is not None:
|
|
594
621
|
deserialized_error = utils.deserialize_exception(
|
|
595
622
|
recorded_output["error"]
|
|
@@ -599,6 +626,11 @@ def _step(
|
|
|
599
626
|
return utils.deserialize(recorded_output["output"])
|
|
600
627
|
else:
|
|
601
628
|
raise Exception("Output and error are both None")
|
|
629
|
+
else:
|
|
630
|
+
dbos.logger.debug(
|
|
631
|
+
f"Running step, id: {ctx.function_id}, name: {attributes['name']}"
|
|
632
|
+
)
|
|
633
|
+
|
|
602
634
|
output = None
|
|
603
635
|
error = None
|
|
604
636
|
local_max_attempts = max_attempts if retries_allowed else 1
|
|
@@ -5,6 +5,7 @@ import json
|
|
|
5
5
|
import os
|
|
6
6
|
import sys
|
|
7
7
|
import threading
|
|
8
|
+
import traceback
|
|
8
9
|
from concurrent.futures import ThreadPoolExecutor
|
|
9
10
|
from dataclasses import dataclass
|
|
10
11
|
from logging import Logger
|
|
@@ -42,6 +43,7 @@ from dbos.decorators import classproperty
|
|
|
42
43
|
from dbos.queue import Queue, queue_thread
|
|
43
44
|
from dbos.recovery import _recover_pending_workflows, _startup_recovery_thread
|
|
44
45
|
from dbos.registrations import (
|
|
46
|
+
DEFAULT_MAX_RECOVERY_ATTEMPTS,
|
|
45
47
|
DBOSClassInfo,
|
|
46
48
|
get_or_create_class_info,
|
|
47
49
|
set_dbos_func_name,
|
|
@@ -338,43 +340,47 @@ class DBOS:
|
|
|
338
340
|
_dbos_global_instance._launch()
|
|
339
341
|
|
|
340
342
|
def _launch(self) -> None:
|
|
341
|
-
|
|
342
|
-
|
|
343
|
-
|
|
344
|
-
|
|
345
|
-
|
|
346
|
-
|
|
347
|
-
|
|
348
|
-
|
|
349
|
-
|
|
350
|
-
|
|
351
|
-
|
|
352
|
-
|
|
353
|
-
|
|
354
|
-
|
|
355
|
-
|
|
356
|
-
|
|
357
|
-
|
|
358
|
-
|
|
359
|
-
|
|
360
|
-
|
|
361
|
-
|
|
362
|
-
|
|
363
|
-
self._executor.submit(queue_thread, evt, self)
|
|
364
|
-
|
|
365
|
-
# Grab any pollers that were deferred and start them
|
|
366
|
-
for evt, func, args, kwargs in self._registry.pollers:
|
|
343
|
+
try:
|
|
344
|
+
if self._launched:
|
|
345
|
+
dbos_logger.warning(f"DBOS was already launched")
|
|
346
|
+
return
|
|
347
|
+
self._launched = True
|
|
348
|
+
self._executor_field = ThreadPoolExecutor(max_workers=64)
|
|
349
|
+
self._sys_db_field = SystemDatabase(self.config)
|
|
350
|
+
self._app_db_field = ApplicationDatabase(self.config)
|
|
351
|
+
self._admin_server_field = AdminServer(dbos=self)
|
|
352
|
+
|
|
353
|
+
if not os.environ.get("DBOS__VMID"):
|
|
354
|
+
workflow_ids = self._sys_db.get_pending_workflows("local")
|
|
355
|
+
self._executor.submit(_startup_recovery_thread, self, workflow_ids)
|
|
356
|
+
|
|
357
|
+
# Listen to notifications
|
|
358
|
+
self._executor.submit(self._sys_db._notification_listener)
|
|
359
|
+
|
|
360
|
+
# Start flush workflow buffers thread
|
|
361
|
+
self._executor.submit(self._sys_db.flush_workflow_buffers)
|
|
362
|
+
|
|
363
|
+
# Start the queue thread
|
|
364
|
+
evt = threading.Event()
|
|
367
365
|
self.stop_events.append(evt)
|
|
368
|
-
self._executor.submit(
|
|
369
|
-
self._registry.pollers = []
|
|
366
|
+
self._executor.submit(queue_thread, evt, self)
|
|
370
367
|
|
|
371
|
-
|
|
368
|
+
# Grab any pollers that were deferred and start them
|
|
369
|
+
for evt, func, args, kwargs in self._registry.pollers:
|
|
370
|
+
self.stop_events.append(evt)
|
|
371
|
+
self._executor.submit(func, *args, **kwargs)
|
|
372
|
+
self._registry.pollers = []
|
|
372
373
|
|
|
373
|
-
|
|
374
|
-
|
|
375
|
-
|
|
376
|
-
|
|
377
|
-
|
|
374
|
+
dbos_logger.info("DBOS launched")
|
|
375
|
+
|
|
376
|
+
# Flush handlers and add OTLP to all loggers if enabled
|
|
377
|
+
# to enable their export in DBOS Cloud
|
|
378
|
+
for handler in dbos_logger.handlers:
|
|
379
|
+
handler.flush()
|
|
380
|
+
add_otlp_to_all_loggers()
|
|
381
|
+
except Exception:
|
|
382
|
+
dbos_logger.error(f"DBOS failed to launch: {traceback.format_exc()}")
|
|
383
|
+
raise
|
|
378
384
|
|
|
379
385
|
def _destroy(self) -> None:
|
|
380
386
|
self._initialized = False
|
|
@@ -401,9 +407,11 @@ class DBOS:
|
|
|
401
407
|
|
|
402
408
|
# Decorators for DBOS functionality
|
|
403
409
|
@classmethod
|
|
404
|
-
def workflow(
|
|
410
|
+
def workflow(
|
|
411
|
+
cls, *, max_recovery_attempts: int = DEFAULT_MAX_RECOVERY_ATTEMPTS
|
|
412
|
+
) -> Callable[[F], F]:
|
|
405
413
|
"""Decorate a function for use as a DBOS workflow."""
|
|
406
|
-
return _workflow(_get_or_create_dbos_registry())
|
|
414
|
+
return _workflow(_get_or_create_dbos_registry(), max_recovery_attempts)
|
|
407
415
|
|
|
408
416
|
@classmethod
|
|
409
417
|
def transaction(
|
|
@@ -542,6 +550,7 @@ class DBOS:
|
|
|
542
550
|
recovery_attempts=stat["recovery_attempts"],
|
|
543
551
|
class_name=stat["class_name"],
|
|
544
552
|
config_name=stat["config_name"],
|
|
553
|
+
queue_name=stat["queue_name"],
|
|
545
554
|
authenticated_user=stat["authenticated_user"],
|
|
546
555
|
assumed_role=stat["assumed_role"],
|
|
547
556
|
authenticated_roles=(
|
|
@@ -748,6 +757,7 @@ class WorkflowStatus:
|
|
|
748
757
|
name(str): The workflow function name
|
|
749
758
|
class_name(str): For member functions, the name of the class containing the workflow function
|
|
750
759
|
config_name(str): For instance member functions, the name of the class instance for the execution
|
|
760
|
+
queue_name(str): For workflows that are or were queued, the queue name
|
|
751
761
|
authenticated_user(str): The user who invoked the workflow
|
|
752
762
|
assumed_role(str): The access role used by the user to allow access to the workflow function
|
|
753
763
|
authenticated_roles(List[str]): List of all access roles available to the authenticated user
|
|
@@ -760,6 +770,7 @@ class WorkflowStatus:
|
|
|
760
770
|
name: str
|
|
761
771
|
class_name: Optional[str]
|
|
762
772
|
config_name: Optional[str]
|
|
773
|
+
queue_name: Optional[str]
|
|
763
774
|
authenticated_user: Optional[str]
|
|
764
775
|
assumed_role: Optional[str]
|
|
765
776
|
authenticated_roles: Optional[List[str]]
|
|
@@ -32,6 +32,7 @@ class DBOSErrorCode(Enum):
|
|
|
32
32
|
InitializationError = 3
|
|
33
33
|
WorkflowFunctionNotFound = 4
|
|
34
34
|
NonExistentWorkflowError = 5
|
|
35
|
+
DeadLetterQueueError = 6
|
|
35
36
|
MaxStepRetriesExceeded = 7
|
|
36
37
|
NotAuthorized = 8
|
|
37
38
|
|
|
@@ -86,6 +87,16 @@ class DBOSNonExistentWorkflowError(DBOSException):
|
|
|
86
87
|
)
|
|
87
88
|
|
|
88
89
|
|
|
90
|
+
class DBOSDeadLetterQueueError(DBOSException):
|
|
91
|
+
"""Exception raised when a workflow database record does not exist for a given ID."""
|
|
92
|
+
|
|
93
|
+
def __init__(self, wf_id: str, max_retries: int):
|
|
94
|
+
super().__init__(
|
|
95
|
+
f"Workflow {wf_id} has been moved to the dead-letter queue after exceeding the maximum of ${max_retries} retries",
|
|
96
|
+
dbos_error_code=DBOSErrorCode.DeadLetterQueueError.value,
|
|
97
|
+
)
|
|
98
|
+
|
|
99
|
+
|
|
89
100
|
class DBOSNotAuthorizedError(DBOSException):
|
|
90
101
|
"""Exception raised by DBOS role-based security when the user is not authorized to access a function."""
|
|
91
102
|
|
|
@@ -0,0 +1,43 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Adjust workflow queue to add columns for rate limiter.
|
|
3
|
+
|
|
4
|
+
Revision ID: d76646551a6b
|
|
5
|
+
Revises: 50f3227f0b4b
|
|
6
|
+
Create Date: 2024-09-25 14:48:10.218015
|
|
7
|
+
|
|
8
|
+
"""
|
|
9
|
+
|
|
10
|
+
from typing import Sequence, Union
|
|
11
|
+
|
|
12
|
+
import sqlalchemy as sa
|
|
13
|
+
from alembic import op
|
|
14
|
+
|
|
15
|
+
# revision identifiers, used by Alembic.
|
|
16
|
+
revision: str = "d76646551a6b"
|
|
17
|
+
down_revision: Union[str, None] = "50f3227f0b4b"
|
|
18
|
+
branch_labels: Union[str, Sequence[str], None] = None
|
|
19
|
+
depends_on: Union[str, Sequence[str], None] = None
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
def upgrade() -> None:
|
|
23
|
+
op.add_column(
|
|
24
|
+
"job_queue",
|
|
25
|
+
sa.Column(
|
|
26
|
+
"started_at_epoch_ms",
|
|
27
|
+
sa.BigInteger(),
|
|
28
|
+
),
|
|
29
|
+
schema="dbos",
|
|
30
|
+
)
|
|
31
|
+
op.add_column(
|
|
32
|
+
"job_queue",
|
|
33
|
+
sa.Column(
|
|
34
|
+
"completed_at_epoch_ms",
|
|
35
|
+
sa.BigInteger(),
|
|
36
|
+
),
|
|
37
|
+
schema="dbos",
|
|
38
|
+
)
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
def downgrade() -> None:
|
|
42
|
+
op.drop_column("job_queue", "started_at_epoch_ms", schema="dbos")
|
|
43
|
+
op.drop_column("job_queue", "completed_at_epoch_ms", schema="dbos")
|
|
@@ -0,0 +1,28 @@
|
|
|
1
|
+
"""workflow_queue
|
|
2
|
+
|
|
3
|
+
Revision ID: d76646551a6c
|
|
4
|
+
Revises: d76646551a6b
|
|
5
|
+
Create Date: 2024-09-27 12:00:00.0
|
|
6
|
+
|
|
7
|
+
"""
|
|
8
|
+
|
|
9
|
+
from typing import Sequence, Union
|
|
10
|
+
|
|
11
|
+
import sqlalchemy as sa
|
|
12
|
+
from alembic import op
|
|
13
|
+
|
|
14
|
+
# revision identifiers, used by Alembic.
|
|
15
|
+
revision: str = "d76646551a6c"
|
|
16
|
+
down_revision: Union[str, None] = "d76646551a6b"
|
|
17
|
+
branch_labels: Union[str, Sequence[str], None] = None
|
|
18
|
+
depends_on: Union[str, Sequence[str], None] = None
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
def upgrade() -> None:
|
|
22
|
+
op.rename_table("job_queue", "workflow_queue", schema="dbos")
|
|
23
|
+
op.execute("CREATE VIEW dbos.job_queue AS SELECT * FROM dbos.workflow_queue;")
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
def downgrade() -> None:
|
|
27
|
+
op.execute("DROP VIEW dbos.job_queue;")
|
|
28
|
+
op.rename_table("workflow_queue", "job_queue", schema="dbos")
|
|
@@ -1,7 +1,6 @@
|
|
|
1
1
|
import threading
|
|
2
|
-
import time
|
|
3
2
|
import traceback
|
|
4
|
-
from typing import TYPE_CHECKING, Optional
|
|
3
|
+
from typing import TYPE_CHECKING, Optional, TypedDict
|
|
5
4
|
|
|
6
5
|
from dbos.core import P, R, _execute_workflow_id, _start_workflow
|
|
7
6
|
|
|
@@ -9,10 +8,35 @@ if TYPE_CHECKING:
|
|
|
9
8
|
from dbos.dbos import DBOS, Workflow, WorkflowHandle
|
|
10
9
|
|
|
11
10
|
|
|
11
|
+
class QueueRateLimit(TypedDict):
|
|
12
|
+
"""
|
|
13
|
+
Limit the maximum number of workflows from this queue that can be started in a given period.
|
|
14
|
+
|
|
15
|
+
If the limit is 5 and the period is 10, no more than 5 functions can be
|
|
16
|
+
started per 10 seconds.
|
|
17
|
+
"""
|
|
18
|
+
|
|
19
|
+
limit: int
|
|
20
|
+
period: float
|
|
21
|
+
|
|
22
|
+
|
|
12
23
|
class Queue:
|
|
13
|
-
|
|
24
|
+
"""
|
|
25
|
+
Workflow queue.
|
|
26
|
+
|
|
27
|
+
Workflow queues allow workflows to be started at a later time, based on concurrency and
|
|
28
|
+
rate limits.
|
|
29
|
+
"""
|
|
30
|
+
|
|
31
|
+
def __init__(
|
|
32
|
+
self,
|
|
33
|
+
name: str,
|
|
34
|
+
concurrency: Optional[int] = None,
|
|
35
|
+
limiter: Optional[QueueRateLimit] = None,
|
|
36
|
+
) -> None:
|
|
14
37
|
self.name = name
|
|
15
38
|
self.concurrency = concurrency
|
|
39
|
+
self.limiter = limiter
|
|
16
40
|
from dbos.dbos import _get_or_create_dbos_registry
|
|
17
41
|
|
|
18
42
|
registry = _get_or_create_dbos_registry()
|
|
@@ -29,12 +53,11 @@ class Queue:
|
|
|
29
53
|
|
|
30
54
|
def queue_thread(stop_event: threading.Event, dbos: "DBOS") -> None:
|
|
31
55
|
while not stop_event.is_set():
|
|
32
|
-
|
|
33
|
-
|
|
56
|
+
if stop_event.wait(timeout=1):
|
|
57
|
+
return
|
|
58
|
+
for _, queue in dbos._registry.queue_info_map.items():
|
|
34
59
|
try:
|
|
35
|
-
wf_ids = dbos._sys_db.start_queued_workflows(
|
|
36
|
-
queue_name, queue.concurrency
|
|
37
|
-
)
|
|
60
|
+
wf_ids = dbos._sys_db.start_queued_workflows(queue)
|
|
38
61
|
for id in wf_ids:
|
|
39
62
|
_execute_workflow_id(dbos, id)
|
|
40
63
|
except Exception:
|
|
@@ -3,6 +3,8 @@ from enum import Enum
|
|
|
3
3
|
from types import FunctionType
|
|
4
4
|
from typing import Any, Callable, List, Literal, Optional, Tuple, Type, cast
|
|
5
5
|
|
|
6
|
+
DEFAULT_MAX_RECOVERY_ATTEMPTS = 50
|
|
7
|
+
|
|
6
8
|
|
|
7
9
|
def get_dbos_func_name(f: Any) -> str:
|
|
8
10
|
if hasattr(f, "dbos_function_name"):
|
|
@@ -47,6 +49,7 @@ class DBOSFuncInfo:
|
|
|
47
49
|
self.class_info: Optional[DBOSClassInfo] = None
|
|
48
50
|
self.func_type: DBOSFuncType = DBOSFuncType.Unknown
|
|
49
51
|
self.required_roles: Optional[List[str]] = None
|
|
52
|
+
self.max_recovery_attempts = DEFAULT_MAX_RECOVERY_ATTEMPTS
|
|
50
53
|
|
|
51
54
|
|
|
52
55
|
def get_or_create_class_info(cls: Type[Any]) -> DBOSClassInfo:
|
|
@@ -2,6 +2,7 @@ import threading
|
|
|
2
2
|
from datetime import datetime, timezone
|
|
3
3
|
from typing import TYPE_CHECKING, Callable
|
|
4
4
|
|
|
5
|
+
from dbos.logger import dbos_logger
|
|
5
6
|
from dbos.queue import Queue
|
|
6
7
|
|
|
7
8
|
if TYPE_CHECKING:
|
|
@@ -18,7 +19,12 @@ scheduler_queue: Queue
|
|
|
18
19
|
def scheduler_loop(
|
|
19
20
|
func: ScheduledWorkflow, cron: str, stop_event: threading.Event
|
|
20
21
|
) -> None:
|
|
21
|
-
|
|
22
|
+
try:
|
|
23
|
+
iter = croniter(cron, datetime.now(timezone.utc), second_at_beginning=True)
|
|
24
|
+
except Exception as e:
|
|
25
|
+
dbos_logger.error(
|
|
26
|
+
f'Cannot run scheduled function {func.__name__}. Invalid crontab "{cron}"'
|
|
27
|
+
)
|
|
22
28
|
while not stop_event.is_set():
|
|
23
29
|
nextExecTime = iter.get_next(datetime)
|
|
24
30
|
sleepTime = nextExecTime - datetime.now(timezone.utc)
|
|
@@ -142,8 +142,8 @@ class SystemSchema:
|
|
|
142
142
|
Column("last_run_time", BigInteger, nullable=False),
|
|
143
143
|
)
|
|
144
144
|
|
|
145
|
-
|
|
146
|
-
"
|
|
145
|
+
workflow_queue = Table(
|
|
146
|
+
"workflow_queue",
|
|
147
147
|
metadata_obj,
|
|
148
148
|
Column(
|
|
149
149
|
"workflow_uuid",
|
|
@@ -161,4 +161,12 @@ class SystemSchema:
|
|
|
161
161
|
nullable=False,
|
|
162
162
|
server_default=text("(EXTRACT(epoch FROM now()) * 1000::numeric)::bigint"),
|
|
163
163
|
),
|
|
164
|
+
Column(
|
|
165
|
+
"started_at_epoch_ms",
|
|
166
|
+
BigInteger(),
|
|
167
|
+
),
|
|
168
|
+
Column(
|
|
169
|
+
"completed_at_epoch_ms",
|
|
170
|
+
BigInteger(),
|
|
171
|
+
),
|
|
164
172
|
)
|