dbos 0.8.0a0__py3-none-any.whl → 0.8.0a7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of dbos might be problematic. Click here for more details.
- dbos/__init__.py +2 -1
- dbos/context.py +33 -0
- dbos/core.py +53 -27
- dbos/dbos.py +44 -36
- dbos/error.py +11 -0
- dbos/migrations/versions/50f3227f0b4b_fix_job_queue.py +34 -0
- dbos/migrations/versions/d76646551a6b_job_queue_limiter.py +42 -0
- dbos/queue.py +29 -9
- dbos/registrations.py +3 -0
- dbos/schemas/system_database.py +8 -0
- dbos/system_database.py +161 -32
- dbos/utils.py +43 -2
- {dbos-0.8.0a0.dist-info → dbos-0.8.0a7.dist-info}/METADATA +1 -1
- {dbos-0.8.0a0.dist-info → dbos-0.8.0a7.dist-info}/RECORD +17 -15
- {dbos-0.8.0a0.dist-info → dbos-0.8.0a7.dist-info}/WHEEL +0 -0
- {dbos-0.8.0a0.dist-info → dbos-0.8.0a7.dist-info}/entry_points.txt +0 -0
- {dbos-0.8.0a0.dist-info → dbos-0.8.0a7.dist-info}/licenses/LICENSE +0 -0
dbos/__init__.py
CHANGED
|
@@ -1,5 +1,5 @@
|
|
|
1
1
|
from . import error as error
|
|
2
|
-
from .context import DBOSContextEnsure, SetWorkflowID
|
|
2
|
+
from .context import DBOSContextEnsure, DBOSContextSetAuth, SetWorkflowID
|
|
3
3
|
from .dbos import DBOS, DBOSConfiguredInstance, WorkflowHandle, WorkflowStatus
|
|
4
4
|
from .dbos_config import ConfigFile, get_dbos_database_url, load_config
|
|
5
5
|
from .kafka_message import KafkaMessage
|
|
@@ -11,6 +11,7 @@ __all__ = [
|
|
|
11
11
|
"DBOS",
|
|
12
12
|
"DBOSConfiguredInstance",
|
|
13
13
|
"DBOSContextEnsure",
|
|
14
|
+
"DBOSContextSetAuth",
|
|
14
15
|
"GetWorkflowsInput",
|
|
15
16
|
"KafkaMessage",
|
|
16
17
|
"SetWorkflowID",
|
dbos/context.py
CHANGED
|
@@ -492,6 +492,39 @@ class EnterDBOSHandler:
|
|
|
492
492
|
return False # Did not handle
|
|
493
493
|
|
|
494
494
|
|
|
495
|
+
class DBOSContextSetAuth(DBOSContextEnsure):
|
|
496
|
+
def __init__(self, user: Optional[str], roles: Optional[List[str]]) -> None:
|
|
497
|
+
self.created_ctx = False
|
|
498
|
+
self.user = user
|
|
499
|
+
self.roles = roles
|
|
500
|
+
self.prev_user: Optional[str] = None
|
|
501
|
+
self.prev_roles: Optional[List[str]] = None
|
|
502
|
+
|
|
503
|
+
def __enter__(self) -> DBOSContext:
|
|
504
|
+
ctx = get_local_dbos_context()
|
|
505
|
+
if ctx is None:
|
|
506
|
+
self.created_ctx = True
|
|
507
|
+
set_local_dbos_context(DBOSContext())
|
|
508
|
+
ctx = assert_current_dbos_context()
|
|
509
|
+
self.prev_user = ctx.authenticated_user
|
|
510
|
+
self.prev_roles = ctx.authenticated_roles
|
|
511
|
+
ctx.set_authentication(self.user, self.roles)
|
|
512
|
+
return ctx
|
|
513
|
+
|
|
514
|
+
def __exit__(
|
|
515
|
+
self,
|
|
516
|
+
exc_type: Optional[Type[BaseException]],
|
|
517
|
+
exc_value: Optional[BaseException],
|
|
518
|
+
traceback: Optional[TracebackType],
|
|
519
|
+
) -> Literal[False]:
|
|
520
|
+
ctx = assert_current_dbos_context()
|
|
521
|
+
ctx.set_authentication(self.prev_user, self.prev_roles)
|
|
522
|
+
# Clean up the basic context if we created it
|
|
523
|
+
if self.created_ctx:
|
|
524
|
+
clear_local_dbos_context()
|
|
525
|
+
return False # Did not handle
|
|
526
|
+
|
|
527
|
+
|
|
495
528
|
class DBOSAssumeRole:
|
|
496
529
|
def __init__(self, assume_role: Optional[str]) -> None:
|
|
497
530
|
self.prior_role: Optional[str] = None
|
dbos/core.py
CHANGED
|
@@ -4,17 +4,7 @@ import time
|
|
|
4
4
|
import traceback
|
|
5
5
|
from concurrent.futures import Future
|
|
6
6
|
from functools import wraps
|
|
7
|
-
from typing import
|
|
8
|
-
TYPE_CHECKING,
|
|
9
|
-
Any,
|
|
10
|
-
Callable,
|
|
11
|
-
Generic,
|
|
12
|
-
List,
|
|
13
|
-
Optional,
|
|
14
|
-
Tuple,
|
|
15
|
-
TypeVar,
|
|
16
|
-
cast,
|
|
17
|
-
)
|
|
7
|
+
from typing import TYPE_CHECKING, Any, Callable, Generic, Optional, Tuple, TypeVar, cast
|
|
18
8
|
|
|
19
9
|
from dbos.application_database import ApplicationDatabase, TransactionResultInternal
|
|
20
10
|
|
|
@@ -48,6 +38,7 @@ from dbos.error import (
|
|
|
48
38
|
DBOSWorkflowFunctionNotFoundError,
|
|
49
39
|
)
|
|
50
40
|
from dbos.registrations import (
|
|
41
|
+
DEFAULT_MAX_RECOVERY_ATTEMPTS,
|
|
51
42
|
get_config_name,
|
|
52
43
|
get_dbos_class_name,
|
|
53
44
|
get_dbos_func_name,
|
|
@@ -61,10 +52,10 @@ from dbos.roles import check_required_roles
|
|
|
61
52
|
from dbos.system_database import (
|
|
62
53
|
GetEventWorkflowContext,
|
|
63
54
|
OperationResultInternal,
|
|
64
|
-
WorkflowInputs,
|
|
65
55
|
WorkflowStatusInternal,
|
|
66
56
|
WorkflowStatusString,
|
|
67
57
|
)
|
|
58
|
+
from dbos.utils import WorkflowInputs
|
|
68
59
|
|
|
69
60
|
if TYPE_CHECKING:
|
|
70
61
|
from dbos.dbos import DBOS, Workflow, WorkflowHandle, WorkflowStatus, _DBOSRegistry
|
|
@@ -128,6 +119,7 @@ def _init_workflow(
|
|
|
128
119
|
config_name: Optional[str],
|
|
129
120
|
temp_wf_type: Optional[str],
|
|
130
121
|
queue: Optional[str] = None,
|
|
122
|
+
max_recovery_attempts: int = DEFAULT_MAX_RECOVERY_ATTEMPTS,
|
|
131
123
|
) -> WorkflowStatusInternal:
|
|
132
124
|
wfid = (
|
|
133
125
|
ctx.workflow_id
|
|
@@ -167,11 +159,13 @@ def _init_workflow(
|
|
|
167
159
|
# Synchronously record the status and inputs for workflows and single-step workflows
|
|
168
160
|
# We also have to do this for single-step workflows because of the foreign key constraint on the operation outputs table
|
|
169
161
|
# TODO: Make this transactional (and with the queue step below)
|
|
170
|
-
dbos._sys_db.update_workflow_status(
|
|
171
|
-
|
|
162
|
+
dbos._sys_db.update_workflow_status(
|
|
163
|
+
status, False, ctx.in_recovery, max_recovery_attempts=max_recovery_attempts
|
|
164
|
+
)
|
|
165
|
+
dbos._sys_db.update_workflow_inputs(wfid, utils.serialize_args(inputs))
|
|
172
166
|
else:
|
|
173
167
|
# Buffer the inputs for single-transaction workflows, but don't buffer the status
|
|
174
|
-
dbos._sys_db.buffer_workflow_inputs(wfid, utils.
|
|
168
|
+
dbos._sys_db.buffer_workflow_inputs(wfid, utils.serialize_args(inputs))
|
|
175
169
|
|
|
176
170
|
if queue is not None:
|
|
177
171
|
dbos._sys_db.enqueue(wfid, queue)
|
|
@@ -191,7 +185,8 @@ def _execute_workflow(
|
|
|
191
185
|
status["status"] = "SUCCESS"
|
|
192
186
|
status["output"] = utils.serialize(output)
|
|
193
187
|
if status["queue_name"] is not None:
|
|
194
|
-
dbos.
|
|
188
|
+
queue = dbos._registry.queue_info_map[status["queue_name"]]
|
|
189
|
+
dbos._sys_db.remove_from_queue(status["workflow_uuid"], queue)
|
|
195
190
|
dbos._sys_db.buffer_workflow_status(status)
|
|
196
191
|
except DBOSWorkflowConflictIDError:
|
|
197
192
|
# Retrieve the workflow handle and wait for the result.
|
|
@@ -203,9 +198,10 @@ def _execute_workflow(
|
|
|
203
198
|
return output
|
|
204
199
|
except Exception as error:
|
|
205
200
|
status["status"] = "ERROR"
|
|
206
|
-
status["error"] = utils.
|
|
201
|
+
status["error"] = utils.serialize_exception(error)
|
|
207
202
|
if status["queue_name"] is not None:
|
|
208
|
-
dbos.
|
|
203
|
+
queue = dbos._registry.queue_info_map[status["queue_name"]]
|
|
204
|
+
dbos._sys_db.remove_from_queue(status["workflow_uuid"], queue)
|
|
209
205
|
dbos._sys_db.update_workflow_status(status)
|
|
210
206
|
raise
|
|
211
207
|
|
|
@@ -228,7 +224,7 @@ def _execute_workflow_wthread(
|
|
|
228
224
|
with EnterDBOSWorkflow(attributes):
|
|
229
225
|
try:
|
|
230
226
|
return _execute_workflow(dbos, status, func, *args, **kwargs)
|
|
231
|
-
except Exception
|
|
227
|
+
except Exception:
|
|
232
228
|
dbos.logger.error(
|
|
233
229
|
f"Exception encountered in asynchronous workflow: {traceback.format_exc()}"
|
|
234
230
|
)
|
|
@@ -299,10 +295,15 @@ def _execute_workflow_id(dbos: "DBOS", workflow_id: str) -> "WorkflowHandle[Any]
|
|
|
299
295
|
)
|
|
300
296
|
|
|
301
297
|
|
|
302
|
-
def _workflow_wrapper(
|
|
298
|
+
def _workflow_wrapper(
|
|
299
|
+
dbosreg: "_DBOSRegistry",
|
|
300
|
+
func: F,
|
|
301
|
+
max_recovery_attempts: int = DEFAULT_MAX_RECOVERY_ATTEMPTS,
|
|
302
|
+
) -> F:
|
|
303
303
|
func.__orig_func = func # type: ignore
|
|
304
304
|
|
|
305
305
|
fi = get_or_create_func_info(func)
|
|
306
|
+
fi.max_recovery_attempts = max_recovery_attempts
|
|
306
307
|
|
|
307
308
|
@wraps(func)
|
|
308
309
|
def wrapper(*args: Any, **kwargs: Any) -> Any:
|
|
@@ -335,17 +336,21 @@ def _workflow_wrapper(dbosreg: "_DBOSRegistry", func: F) -> F:
|
|
|
335
336
|
class_name=get_dbos_class_name(fi, func, args),
|
|
336
337
|
config_name=get_config_name(fi, func, args),
|
|
337
338
|
temp_wf_type=get_temp_workflow_type(func),
|
|
339
|
+
max_recovery_attempts=max_recovery_attempts,
|
|
338
340
|
)
|
|
339
341
|
|
|
342
|
+
dbos.logger.debug(
|
|
343
|
+
f"Running workflow, id: {ctx.workflow_id}, name: {get_dbos_func_name(func)}"
|
|
344
|
+
)
|
|
340
345
|
return _execute_workflow(dbos, status, func, *args, **kwargs)
|
|
341
346
|
|
|
342
347
|
wrapped_func = cast(F, wrapper)
|
|
343
348
|
return wrapped_func
|
|
344
349
|
|
|
345
350
|
|
|
346
|
-
def _workflow(reg: "_DBOSRegistry") -> Callable[[F], F]:
|
|
351
|
+
def _workflow(reg: "_DBOSRegistry", max_recovery_attempts: int) -> Callable[[F], F]:
|
|
347
352
|
def _workflow_decorator(func: F) -> F:
|
|
348
|
-
wrapped_func = _workflow_wrapper(reg, func)
|
|
353
|
+
wrapped_func = _workflow_wrapper(reg, func, max_recovery_attempts)
|
|
349
354
|
reg.register_wf_function(func.__qualname__, wrapped_func)
|
|
350
355
|
return wrapped_func
|
|
351
356
|
|
|
@@ -411,6 +416,7 @@ def _start_workflow(
|
|
|
411
416
|
config_name=get_config_name(fi, func, gin_args),
|
|
412
417
|
temp_wf_type=get_temp_workflow_type(func),
|
|
413
418
|
queue=queue_name,
|
|
419
|
+
max_recovery_attempts=fi.max_recovery_attempts,
|
|
414
420
|
)
|
|
415
421
|
|
|
416
422
|
if not execute_workflow:
|
|
@@ -487,9 +493,14 @@ def _transaction(
|
|
|
487
493
|
)
|
|
488
494
|
)
|
|
489
495
|
if recorded_output:
|
|
496
|
+
dbos.logger.debug(
|
|
497
|
+
f"Replaying transaction, id: {ctx.function_id}, name: {attributes['name']}"
|
|
498
|
+
)
|
|
490
499
|
if recorded_output["error"]:
|
|
491
|
-
deserialized_error =
|
|
492
|
-
|
|
500
|
+
deserialized_error = (
|
|
501
|
+
utils.deserialize_exception(
|
|
502
|
+
recorded_output["error"]
|
|
503
|
+
)
|
|
493
504
|
)
|
|
494
505
|
has_recorded_error = True
|
|
495
506
|
raise deserialized_error
|
|
@@ -501,6 +512,11 @@ def _transaction(
|
|
|
501
512
|
raise Exception(
|
|
502
513
|
"Output and error are both None"
|
|
503
514
|
)
|
|
515
|
+
else:
|
|
516
|
+
dbos.logger.debug(
|
|
517
|
+
f"Running transaction, id: {ctx.function_id}, name: {attributes['name']}"
|
|
518
|
+
)
|
|
519
|
+
|
|
504
520
|
output = func(*args, **kwargs)
|
|
505
521
|
txn_output["output"] = utils.serialize(output)
|
|
506
522
|
assert (
|
|
@@ -527,7 +543,7 @@ def _transaction(
|
|
|
527
543
|
except Exception as error:
|
|
528
544
|
# Don't record the error if it was already recorded
|
|
529
545
|
if not has_recorded_error:
|
|
530
|
-
txn_output["error"] = utils.
|
|
546
|
+
txn_output["error"] = utils.serialize_exception(error)
|
|
531
547
|
dbos._app_db.record_transaction_error(txn_output)
|
|
532
548
|
raise
|
|
533
549
|
return output
|
|
@@ -598,13 +614,23 @@ def _step(
|
|
|
598
614
|
ctx.workflow_id, ctx.function_id
|
|
599
615
|
)
|
|
600
616
|
if recorded_output:
|
|
617
|
+
dbos.logger.debug(
|
|
618
|
+
f"Replaying step, id: {ctx.function_id}, name: {attributes['name']}"
|
|
619
|
+
)
|
|
601
620
|
if recorded_output["error"] is not None:
|
|
602
|
-
deserialized_error = utils.
|
|
621
|
+
deserialized_error = utils.deserialize_exception(
|
|
622
|
+
recorded_output["error"]
|
|
623
|
+
)
|
|
603
624
|
raise deserialized_error
|
|
604
625
|
elif recorded_output["output"] is not None:
|
|
605
626
|
return utils.deserialize(recorded_output["output"])
|
|
606
627
|
else:
|
|
607
628
|
raise Exception("Output and error are both None")
|
|
629
|
+
else:
|
|
630
|
+
dbos.logger.debug(
|
|
631
|
+
f"Running step, id: {ctx.function_id}, name: {attributes['name']}"
|
|
632
|
+
)
|
|
633
|
+
|
|
608
634
|
output = None
|
|
609
635
|
error = None
|
|
610
636
|
local_max_attempts = max_attempts if retries_allowed else 1
|
|
@@ -639,7 +665,7 @@ def _step(
|
|
|
639
665
|
)
|
|
640
666
|
|
|
641
667
|
step_output["error"] = (
|
|
642
|
-
utils.
|
|
668
|
+
utils.serialize_exception(error) if error is not None else None
|
|
643
669
|
)
|
|
644
670
|
dbos._sys_db.record_operation_result(step_output)
|
|
645
671
|
|
dbos/dbos.py
CHANGED
|
@@ -5,6 +5,7 @@ import json
|
|
|
5
5
|
import os
|
|
6
6
|
import sys
|
|
7
7
|
import threading
|
|
8
|
+
import traceback
|
|
8
9
|
from concurrent.futures import ThreadPoolExecutor
|
|
9
10
|
from dataclasses import dataclass
|
|
10
11
|
from logging import Logger
|
|
@@ -42,6 +43,7 @@ from dbos.decorators import classproperty
|
|
|
42
43
|
from dbos.queue import Queue, queue_thread
|
|
43
44
|
from dbos.recovery import _recover_pending_workflows, _startup_recovery_thread
|
|
44
45
|
from dbos.registrations import (
|
|
46
|
+
DEFAULT_MAX_RECOVERY_ATTEMPTS,
|
|
45
47
|
DBOSClassInfo,
|
|
46
48
|
get_or_create_class_info,
|
|
47
49
|
set_dbos_func_name,
|
|
@@ -338,43 +340,47 @@ class DBOS:
|
|
|
338
340
|
_dbos_global_instance._launch()
|
|
339
341
|
|
|
340
342
|
def _launch(self) -> None:
|
|
341
|
-
|
|
342
|
-
|
|
343
|
-
|
|
344
|
-
|
|
345
|
-
|
|
346
|
-
|
|
347
|
-
|
|
348
|
-
|
|
349
|
-
|
|
350
|
-
|
|
351
|
-
|
|
352
|
-
|
|
353
|
-
|
|
354
|
-
|
|
355
|
-
|
|
356
|
-
|
|
357
|
-
|
|
358
|
-
|
|
359
|
-
|
|
360
|
-
|
|
361
|
-
|
|
362
|
-
|
|
363
|
-
self._executor.submit(queue_thread, evt, self)
|
|
364
|
-
|
|
365
|
-
# Grab any pollers that were deferred and start them
|
|
366
|
-
for evt, func, args, kwargs in self._registry.pollers:
|
|
343
|
+
try:
|
|
344
|
+
if self._launched:
|
|
345
|
+
dbos_logger.warning(f"DBOS was already launched")
|
|
346
|
+
return
|
|
347
|
+
self._launched = True
|
|
348
|
+
self._executor_field = ThreadPoolExecutor(max_workers=64)
|
|
349
|
+
self._sys_db_field = SystemDatabase(self.config)
|
|
350
|
+
self._app_db_field = ApplicationDatabase(self.config)
|
|
351
|
+
self._admin_server_field = AdminServer(dbos=self)
|
|
352
|
+
|
|
353
|
+
if not os.environ.get("DBOS__VMID"):
|
|
354
|
+
workflow_ids = self._sys_db.get_pending_workflows("local")
|
|
355
|
+
self._executor.submit(_startup_recovery_thread, self, workflow_ids)
|
|
356
|
+
|
|
357
|
+
# Listen to notifications
|
|
358
|
+
self._executor.submit(self._sys_db._notification_listener)
|
|
359
|
+
|
|
360
|
+
# Start flush workflow buffers thread
|
|
361
|
+
self._executor.submit(self._sys_db.flush_workflow_buffers)
|
|
362
|
+
|
|
363
|
+
# Start the queue thread
|
|
364
|
+
evt = threading.Event()
|
|
367
365
|
self.stop_events.append(evt)
|
|
368
|
-
self._executor.submit(
|
|
369
|
-
self._registry.pollers = []
|
|
366
|
+
self._executor.submit(queue_thread, evt, self)
|
|
370
367
|
|
|
371
|
-
|
|
368
|
+
# Grab any pollers that were deferred and start them
|
|
369
|
+
for evt, func, args, kwargs in self._registry.pollers:
|
|
370
|
+
self.stop_events.append(evt)
|
|
371
|
+
self._executor.submit(func, *args, **kwargs)
|
|
372
|
+
self._registry.pollers = []
|
|
372
373
|
|
|
373
|
-
|
|
374
|
-
|
|
375
|
-
|
|
376
|
-
|
|
377
|
-
|
|
374
|
+
dbos_logger.info("DBOS launched")
|
|
375
|
+
|
|
376
|
+
# Flush handlers and add OTLP to all loggers if enabled
|
|
377
|
+
# to enable their export in DBOS Cloud
|
|
378
|
+
for handler in dbos_logger.handlers:
|
|
379
|
+
handler.flush()
|
|
380
|
+
add_otlp_to_all_loggers()
|
|
381
|
+
except Exception:
|
|
382
|
+
dbos_logger.error(f"DBOS failed to launch: {traceback.format_exc()}")
|
|
383
|
+
raise
|
|
378
384
|
|
|
379
385
|
def _destroy(self) -> None:
|
|
380
386
|
self._initialized = False
|
|
@@ -401,9 +407,11 @@ class DBOS:
|
|
|
401
407
|
|
|
402
408
|
# Decorators for DBOS functionality
|
|
403
409
|
@classmethod
|
|
404
|
-
def workflow(
|
|
410
|
+
def workflow(
|
|
411
|
+
cls, *, max_recovery_attempts: int = DEFAULT_MAX_RECOVERY_ATTEMPTS
|
|
412
|
+
) -> Callable[[F], F]:
|
|
405
413
|
"""Decorate a function for use as a DBOS workflow."""
|
|
406
|
-
return _workflow(_get_or_create_dbos_registry())
|
|
414
|
+
return _workflow(_get_or_create_dbos_registry(), max_recovery_attempts)
|
|
407
415
|
|
|
408
416
|
@classmethod
|
|
409
417
|
def transaction(
|
dbos/error.py
CHANGED
|
@@ -32,6 +32,7 @@ class DBOSErrorCode(Enum):
|
|
|
32
32
|
InitializationError = 3
|
|
33
33
|
WorkflowFunctionNotFound = 4
|
|
34
34
|
NonExistentWorkflowError = 5
|
|
35
|
+
DeadLetterQueueError = 6
|
|
35
36
|
MaxStepRetriesExceeded = 7
|
|
36
37
|
NotAuthorized = 8
|
|
37
38
|
|
|
@@ -86,6 +87,16 @@ class DBOSNonExistentWorkflowError(DBOSException):
|
|
|
86
87
|
)
|
|
87
88
|
|
|
88
89
|
|
|
90
|
+
class DBOSDeadLetterQueueError(DBOSException):
|
|
91
|
+
"""Exception raised when a workflow database record does not exist for a given ID."""
|
|
92
|
+
|
|
93
|
+
def __init__(self, wf_id: str, max_retries: int):
|
|
94
|
+
super().__init__(
|
|
95
|
+
f"Workflow {wf_id} has been moved to the dead-letter queue after exceeding the maximum of ${max_retries} retries",
|
|
96
|
+
dbos_error_code=DBOSErrorCode.DeadLetterQueueError.value,
|
|
97
|
+
)
|
|
98
|
+
|
|
99
|
+
|
|
89
100
|
class DBOSNotAuthorizedError(DBOSException):
|
|
90
101
|
"""Exception raised by DBOS role-based security when the user is not authorized to access a function."""
|
|
91
102
|
|
|
@@ -0,0 +1,34 @@
|
|
|
1
|
+
"""fix_job_queue
|
|
2
|
+
|
|
3
|
+
Revision ID: 50f3227f0b4b
|
|
4
|
+
Revises: eab0cc1d9a14
|
|
5
|
+
Create Date: 2024-09-25 14:03:53.308068
|
|
6
|
+
|
|
7
|
+
"""
|
|
8
|
+
|
|
9
|
+
from typing import Sequence, Union
|
|
10
|
+
|
|
11
|
+
from alembic import op
|
|
12
|
+
|
|
13
|
+
# revision identifiers, used by Alembic.
|
|
14
|
+
revision: str = "50f3227f0b4b"
|
|
15
|
+
down_revision: Union[str, None] = "eab0cc1d9a14"
|
|
16
|
+
branch_labels: Union[str, Sequence[str], None] = None
|
|
17
|
+
depends_on: Union[str, Sequence[str], None] = None
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
def upgrade() -> None:
|
|
21
|
+
op.drop_constraint("job_queue_pkey", "job_queue", schema="dbos", type_="primary")
|
|
22
|
+
|
|
23
|
+
op.create_primary_key(
|
|
24
|
+
"job_queue_pkey", "job_queue", ["workflow_uuid"], schema="dbos"
|
|
25
|
+
)
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
def downgrade() -> None:
|
|
29
|
+
# Reverting the changes
|
|
30
|
+
op.drop_constraint("job_queue_pkey", "job_queue", schema="dbos", type_="primary")
|
|
31
|
+
|
|
32
|
+
op.create_primary_key(
|
|
33
|
+
"job_queue_pkey", "job_queue", ["created_at_epoch_ms"], schema="dbos"
|
|
34
|
+
)
|
|
@@ -0,0 +1,42 @@
|
|
|
1
|
+
"""job_queue_limiter
|
|
2
|
+
|
|
3
|
+
Revision ID: d76646551a6b
|
|
4
|
+
Revises: 50f3227f0b4b
|
|
5
|
+
Create Date: 2024-09-25 14:48:10.218015
|
|
6
|
+
|
|
7
|
+
"""
|
|
8
|
+
|
|
9
|
+
from typing import Sequence, Union
|
|
10
|
+
|
|
11
|
+
import sqlalchemy as sa
|
|
12
|
+
from alembic import op
|
|
13
|
+
|
|
14
|
+
# revision identifiers, used by Alembic.
|
|
15
|
+
revision: str = "d76646551a6b"
|
|
16
|
+
down_revision: Union[str, None] = "50f3227f0b4b"
|
|
17
|
+
branch_labels: Union[str, Sequence[str], None] = None
|
|
18
|
+
depends_on: Union[str, Sequence[str], None] = None
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
def upgrade() -> None:
|
|
22
|
+
op.add_column(
|
|
23
|
+
"job_queue",
|
|
24
|
+
sa.Column(
|
|
25
|
+
"started_at_epoch_ms",
|
|
26
|
+
sa.BigInteger(),
|
|
27
|
+
),
|
|
28
|
+
schema="dbos",
|
|
29
|
+
)
|
|
30
|
+
op.add_column(
|
|
31
|
+
"job_queue",
|
|
32
|
+
sa.Column(
|
|
33
|
+
"completed_at_epoch_ms",
|
|
34
|
+
sa.BigInteger(),
|
|
35
|
+
),
|
|
36
|
+
schema="dbos",
|
|
37
|
+
)
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
def downgrade() -> None:
|
|
41
|
+
op.drop_column("job_queue", "started_at_epoch_ms", schema="dbos")
|
|
42
|
+
op.drop_column("job_queue", "completed_at_epoch_ms", schema="dbos")
|
dbos/queue.py
CHANGED
|
@@ -1,18 +1,32 @@
|
|
|
1
1
|
import threading
|
|
2
|
-
import
|
|
3
|
-
from typing import TYPE_CHECKING, Optional
|
|
2
|
+
import traceback
|
|
3
|
+
from typing import TYPE_CHECKING, Optional, TypedDict
|
|
4
4
|
|
|
5
5
|
from dbos.core import P, R, _execute_workflow_id, _start_workflow
|
|
6
|
-
from dbos.error import DBOSInitializationError
|
|
7
6
|
|
|
8
7
|
if TYPE_CHECKING:
|
|
9
8
|
from dbos.dbos import DBOS, Workflow, WorkflowHandle
|
|
10
9
|
|
|
11
10
|
|
|
11
|
+
# Limit the maximum number of functions from this queue
|
|
12
|
+
# that can be started in a given period. If the limit is 5
|
|
13
|
+
# and the period is 10, no more than 5 functions can be
|
|
14
|
+
# started per 10 seconds.
|
|
15
|
+
class Limiter(TypedDict):
|
|
16
|
+
limit: int
|
|
17
|
+
period: float
|
|
18
|
+
|
|
19
|
+
|
|
12
20
|
class Queue:
|
|
13
|
-
def __init__(
|
|
21
|
+
def __init__(
|
|
22
|
+
self,
|
|
23
|
+
name: str,
|
|
24
|
+
concurrency: Optional[int] = None,
|
|
25
|
+
limiter: Optional[Limiter] = None,
|
|
26
|
+
) -> None:
|
|
14
27
|
self.name = name
|
|
15
28
|
self.concurrency = concurrency
|
|
29
|
+
self.limiter = limiter
|
|
16
30
|
from dbos.dbos import _get_or_create_dbos_registry
|
|
17
31
|
|
|
18
32
|
registry = _get_or_create_dbos_registry()
|
|
@@ -29,8 +43,14 @@ class Queue:
|
|
|
29
43
|
|
|
30
44
|
def queue_thread(stop_event: threading.Event, dbos: "DBOS") -> None:
|
|
31
45
|
while not stop_event.is_set():
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
|
|
46
|
+
if stop_event.wait(timeout=1):
|
|
47
|
+
return
|
|
48
|
+
for _, queue in dbos._registry.queue_info_map.items():
|
|
49
|
+
try:
|
|
50
|
+
wf_ids = dbos._sys_db.start_queued_workflows(queue)
|
|
51
|
+
for id in wf_ids:
|
|
52
|
+
_execute_workflow_id(dbos, id)
|
|
53
|
+
except Exception:
|
|
54
|
+
dbos.logger.warning(
|
|
55
|
+
f"Exception encountered in queue thread: {traceback.format_exc()}"
|
|
56
|
+
)
|
dbos/registrations.py
CHANGED
|
@@ -3,6 +3,8 @@ from enum import Enum
|
|
|
3
3
|
from types import FunctionType
|
|
4
4
|
from typing import Any, Callable, List, Literal, Optional, Tuple, Type, cast
|
|
5
5
|
|
|
6
|
+
DEFAULT_MAX_RECOVERY_ATTEMPTS = 50
|
|
7
|
+
|
|
6
8
|
|
|
7
9
|
def get_dbos_func_name(f: Any) -> str:
|
|
8
10
|
if hasattr(f, "dbos_function_name"):
|
|
@@ -47,6 +49,7 @@ class DBOSFuncInfo:
|
|
|
47
49
|
self.class_info: Optional[DBOSClassInfo] = None
|
|
48
50
|
self.func_type: DBOSFuncType = DBOSFuncType.Unknown
|
|
49
51
|
self.required_roles: Optional[List[str]] = None
|
|
52
|
+
self.max_recovery_attempts = DEFAULT_MAX_RECOVERY_ATTEMPTS
|
|
50
53
|
|
|
51
54
|
|
|
52
55
|
def get_or_create_class_info(cls: Type[Any]) -> DBOSClassInfo:
|
dbos/schemas/system_database.py
CHANGED
|
@@ -161,4 +161,12 @@ class SystemSchema:
|
|
|
161
161
|
nullable=False,
|
|
162
162
|
server_default=text("(EXTRACT(epoch FROM now()) * 1000::numeric)::bigint"),
|
|
163
163
|
),
|
|
164
|
+
Column(
|
|
165
|
+
"started_at_epoch_ms",
|
|
166
|
+
BigInteger(),
|
|
167
|
+
),
|
|
168
|
+
Column(
|
|
169
|
+
"completed_at_epoch_ms",
|
|
170
|
+
BigInteger(),
|
|
171
|
+
),
|
|
164
172
|
)
|
dbos/system_database.py
CHANGED
|
@@ -3,7 +3,19 @@ import os
|
|
|
3
3
|
import threading
|
|
4
4
|
import time
|
|
5
5
|
from enum import Enum
|
|
6
|
-
from typing import
|
|
6
|
+
from typing import (
|
|
7
|
+
TYPE_CHECKING,
|
|
8
|
+
Any,
|
|
9
|
+
Dict,
|
|
10
|
+
List,
|
|
11
|
+
Literal,
|
|
12
|
+
Optional,
|
|
13
|
+
Sequence,
|
|
14
|
+
Set,
|
|
15
|
+
Tuple,
|
|
16
|
+
TypedDict,
|
|
17
|
+
cast,
|
|
18
|
+
)
|
|
7
19
|
|
|
8
20
|
import psycopg
|
|
9
21
|
import sqlalchemy as sa
|
|
@@ -13,12 +25,20 @@ from alembic.config import Config
|
|
|
13
25
|
from sqlalchemy.exc import DBAPIError
|
|
14
26
|
|
|
15
27
|
import dbos.utils as utils
|
|
16
|
-
from dbos.error import
|
|
28
|
+
from dbos.error import (
|
|
29
|
+
DBOSDeadLetterQueueError,
|
|
30
|
+
DBOSNonExistentWorkflowError,
|
|
31
|
+
DBOSWorkflowConflictIDError,
|
|
32
|
+
)
|
|
33
|
+
from dbos.registrations import DEFAULT_MAX_RECOVERY_ATTEMPTS
|
|
17
34
|
|
|
18
35
|
from .dbos_config import ConfigFile
|
|
19
36
|
from .logger import dbos_logger
|
|
20
37
|
from .schemas.system_database import SystemSchema
|
|
21
38
|
|
|
39
|
+
if TYPE_CHECKING:
|
|
40
|
+
from .queue import Queue
|
|
41
|
+
|
|
22
42
|
|
|
23
43
|
class WorkflowStatusString(Enum):
|
|
24
44
|
"""Enumeration of values allowed for `WorkflowSatusInternal.status`."""
|
|
@@ -36,11 +56,6 @@ WorkflowStatuses = Literal[
|
|
|
36
56
|
]
|
|
37
57
|
|
|
38
58
|
|
|
39
|
-
class WorkflowInputs(TypedDict):
|
|
40
|
-
args: Any
|
|
41
|
-
kwargs: Any
|
|
42
|
-
|
|
43
|
-
|
|
44
59
|
class WorkflowStatusInternal(TypedDict):
|
|
45
60
|
workflow_uuid: str
|
|
46
61
|
status: WorkflowStatuses
|
|
@@ -127,7 +142,7 @@ class WorkflowInformation(TypedDict, total=False):
|
|
|
127
142
|
# The role used to run this workflow. Empty string if authorization is not required.
|
|
128
143
|
authenticated_roles: List[str]
|
|
129
144
|
# All roles the authenticated user has, if any.
|
|
130
|
-
input: Optional[WorkflowInputs]
|
|
145
|
+
input: Optional[utils.WorkflowInputs]
|
|
131
146
|
output: Optional[str]
|
|
132
147
|
error: Optional[str]
|
|
133
148
|
request: Optional[str]
|
|
@@ -227,7 +242,9 @@ class SystemDatabase:
|
|
|
227
242
|
status: WorkflowStatusInternal,
|
|
228
243
|
replace: bool = True,
|
|
229
244
|
in_recovery: bool = False,
|
|
245
|
+
*,
|
|
230
246
|
conn: Optional[sa.Connection] = None,
|
|
247
|
+
max_recovery_attempts: int = DEFAULT_MAX_RECOVERY_ATTEMPTS,
|
|
231
248
|
) -> None:
|
|
232
249
|
cmd = pg.insert(SystemSchema.workflow_status).values(
|
|
233
250
|
workflow_uuid=status["workflow_uuid"],
|
|
@@ -265,12 +282,36 @@ class SystemDatabase:
|
|
|
265
282
|
)
|
|
266
283
|
else:
|
|
267
284
|
cmd = cmd.on_conflict_do_nothing()
|
|
285
|
+
cmd = cmd.returning(SystemSchema.workflow_status.c.recovery_attempts) # type: ignore
|
|
268
286
|
|
|
269
287
|
if conn is not None:
|
|
270
|
-
conn.execute(cmd)
|
|
288
|
+
results = conn.execute(cmd)
|
|
271
289
|
else:
|
|
272
290
|
with self.engine.begin() as c:
|
|
273
|
-
c.execute(cmd)
|
|
291
|
+
results = c.execute(cmd)
|
|
292
|
+
if in_recovery:
|
|
293
|
+
row = results.fetchone()
|
|
294
|
+
if row is not None:
|
|
295
|
+
recovery_attempts: int = row[0]
|
|
296
|
+
if recovery_attempts > max_recovery_attempts:
|
|
297
|
+
with self.engine.begin() as c:
|
|
298
|
+
c.execute(
|
|
299
|
+
sa.update(SystemSchema.workflow_status)
|
|
300
|
+
.where(
|
|
301
|
+
SystemSchema.workflow_status.c.workflow_uuid
|
|
302
|
+
== status["workflow_uuid"]
|
|
303
|
+
)
|
|
304
|
+
.where(
|
|
305
|
+
SystemSchema.workflow_status.c.status
|
|
306
|
+
== WorkflowStatusString.PENDING.value
|
|
307
|
+
)
|
|
308
|
+
.values(
|
|
309
|
+
status=WorkflowStatusString.RETRIES_EXCEEDED.value,
|
|
310
|
+
)
|
|
311
|
+
)
|
|
312
|
+
raise DBOSDeadLetterQueueError(
|
|
313
|
+
status["workflow_uuid"], max_recovery_attempts
|
|
314
|
+
)
|
|
274
315
|
|
|
275
316
|
# Record we have exported status for this single-transaction workflow
|
|
276
317
|
if status["workflow_uuid"] in self._temp_txn_wf_ids:
|
|
@@ -447,7 +488,7 @@ class SystemDatabase:
|
|
|
447
488
|
if status == str(WorkflowStatusString.SUCCESS.value):
|
|
448
489
|
return utils.deserialize(stat["output"])
|
|
449
490
|
elif status == str(WorkflowStatusString.ERROR.value):
|
|
450
|
-
raise utils.
|
|
491
|
+
raise utils.deserialize_exception(stat["error"])
|
|
451
492
|
return None
|
|
452
493
|
|
|
453
494
|
def get_workflow_info(
|
|
@@ -487,7 +528,7 @@ class SystemDatabase:
|
|
|
487
528
|
self._exported_temp_txn_wf_status.discard(workflow_uuid)
|
|
488
529
|
self._temp_txn_wf_ids.discard(workflow_uuid)
|
|
489
530
|
|
|
490
|
-
def get_workflow_inputs(self, workflow_uuid: str) -> Optional[WorkflowInputs]:
|
|
531
|
+
def get_workflow_inputs(self, workflow_uuid: str) -> Optional[utils.WorkflowInputs]:
|
|
491
532
|
with self.engine.begin() as c:
|
|
492
533
|
row = c.execute(
|
|
493
534
|
sa.select(SystemSchema.workflow_inputs.c.inputs).where(
|
|
@@ -496,7 +537,7 @@ class SystemDatabase:
|
|
|
496
537
|
).fetchone()
|
|
497
538
|
if row is None:
|
|
498
539
|
return None
|
|
499
|
-
inputs: WorkflowInputs = utils.
|
|
540
|
+
inputs: utils.WorkflowInputs = utils.deserialize_args(row[0])
|
|
500
541
|
return inputs
|
|
501
542
|
|
|
502
543
|
def get_workflows(self, input: GetWorkflowsInput) -> GetWorkflowsOutput:
|
|
@@ -611,7 +652,14 @@ class SystemDatabase:
|
|
|
611
652
|
workflow_uuid, function_id, conn=c
|
|
612
653
|
)
|
|
613
654
|
if recorded_output is not None:
|
|
655
|
+
dbos_logger.debug(
|
|
656
|
+
f"Replaying send, id: {function_id}, destination_uuid: {destination_uuid}, topic: {topic}"
|
|
657
|
+
)
|
|
614
658
|
return # Already sent before
|
|
659
|
+
else:
|
|
660
|
+
dbos_logger.debug(
|
|
661
|
+
f"Running send, id: {function_id}, destination_uuid: {destination_uuid}, topic: {topic}"
|
|
662
|
+
)
|
|
615
663
|
|
|
616
664
|
try:
|
|
617
665
|
c.execute(
|
|
@@ -647,10 +695,13 @@ class SystemDatabase:
|
|
|
647
695
|
# First, check for previous executions.
|
|
648
696
|
recorded_output = self.check_operation_execution(workflow_uuid, function_id)
|
|
649
697
|
if recorded_output is not None:
|
|
698
|
+
dbos_logger.debug(f"Replaying recv, id: {function_id}, topic: {topic}")
|
|
650
699
|
if recorded_output["output"] is not None:
|
|
651
700
|
return utils.deserialize(recorded_output["output"])
|
|
652
701
|
else:
|
|
653
702
|
raise Exception("No output recorded in the last recv")
|
|
703
|
+
else:
|
|
704
|
+
dbos_logger.debug(f"Running recv, id: {function_id}, topic: {topic}")
|
|
654
705
|
|
|
655
706
|
# Insert a condition to the notifications map, so the listener can notify it when a message is received.
|
|
656
707
|
payload = f"{workflow_uuid}::{topic}"
|
|
@@ -793,9 +844,11 @@ class SystemDatabase:
|
|
|
793
844
|
recorded_output = self.check_operation_execution(workflow_uuid, function_id)
|
|
794
845
|
end_time: float
|
|
795
846
|
if recorded_output is not None:
|
|
847
|
+
dbos_logger.debug(f"Replaying sleep, id: {function_id}, seconds: {seconds}")
|
|
796
848
|
assert recorded_output["output"] is not None, "no recorded end time"
|
|
797
849
|
end_time = utils.deserialize(recorded_output["output"])
|
|
798
850
|
else:
|
|
851
|
+
dbos_logger.debug(f"Running sleep, id: {function_id}, seconds: {seconds}")
|
|
799
852
|
end_time = time.time() + seconds
|
|
800
853
|
try:
|
|
801
854
|
self.record_operation_result(
|
|
@@ -825,7 +878,10 @@ class SystemDatabase:
|
|
|
825
878
|
workflow_uuid, function_id, conn=c
|
|
826
879
|
)
|
|
827
880
|
if recorded_output is not None:
|
|
881
|
+
dbos_logger.debug(f"Replaying set_event, id: {function_id}, key: {key}")
|
|
828
882
|
return # Already sent before
|
|
883
|
+
else:
|
|
884
|
+
dbos_logger.debug(f"Running set_event, id: {function_id}, key: {key}")
|
|
829
885
|
|
|
830
886
|
c.execute(
|
|
831
887
|
pg.insert(SystemSchema.workflow_events)
|
|
@@ -866,10 +922,17 @@ class SystemDatabase:
|
|
|
866
922
|
caller_ctx["workflow_uuid"], caller_ctx["function_id"]
|
|
867
923
|
)
|
|
868
924
|
if recorded_output is not None:
|
|
925
|
+
dbos_logger.debug(
|
|
926
|
+
f"Replaying get_event, id: {caller_ctx['function_id']}, key: {key}"
|
|
927
|
+
)
|
|
869
928
|
if recorded_output["output"] is not None:
|
|
870
929
|
return utils.deserialize(recorded_output["output"])
|
|
871
930
|
else:
|
|
872
931
|
raise Exception("No output recorded in the last get_event")
|
|
932
|
+
else:
|
|
933
|
+
dbos_logger.debug(
|
|
934
|
+
f"Running get_event, id: {caller_ctx['function_id']}, key: {key}"
|
|
935
|
+
)
|
|
873
936
|
|
|
874
937
|
payload = f"{target_uuid}::{key}"
|
|
875
938
|
condition = threading.Condition()
|
|
@@ -1024,22 +1087,60 @@ class SystemDatabase:
|
|
|
1024
1087
|
.on_conflict_do_nothing()
|
|
1025
1088
|
)
|
|
1026
1089
|
|
|
1027
|
-
def start_queued_workflows(
|
|
1028
|
-
|
|
1029
|
-
|
|
1090
|
+
def start_queued_workflows(self, queue: "Queue") -> List[str]:
|
|
1091
|
+
start_time_ms = int(time.time() * 1000)
|
|
1092
|
+
if queue.limiter is not None:
|
|
1093
|
+
limiter_period_ms = int(queue.limiter["period"] * 1000)
|
|
1030
1094
|
with self.engine.begin() as c:
|
|
1031
|
-
|
|
1032
|
-
|
|
1095
|
+
# Execute with snapshot isolation to ensure multiple workers respect limits
|
|
1096
|
+
c.execute(sa.text("SET TRANSACTION ISOLATION LEVEL REPEATABLE READ"))
|
|
1097
|
+
|
|
1098
|
+
# If there is a limiter, compute how many functions have started in its period.
|
|
1099
|
+
if queue.limiter is not None:
|
|
1100
|
+
query = (
|
|
1101
|
+
sa.select(sa.func.count())
|
|
1102
|
+
.select_from(SystemSchema.job_queue)
|
|
1103
|
+
.where(SystemSchema.job_queue.c.started_at_epoch_ms.isnot(None))
|
|
1104
|
+
.where(
|
|
1105
|
+
SystemSchema.job_queue.c.started_at_epoch_ms
|
|
1106
|
+
> start_time_ms - limiter_period_ms
|
|
1107
|
+
)
|
|
1108
|
+
)
|
|
1109
|
+
num_recent_queries = c.execute(query).fetchone()[0] # type: ignore
|
|
1110
|
+
if num_recent_queries >= queue.limiter["limit"]:
|
|
1111
|
+
return []
|
|
1112
|
+
|
|
1113
|
+
# Select not-yet-completed functions in the queue ordered by the
|
|
1114
|
+
# time at which they were enqueued.
|
|
1115
|
+
# If there is a concurrency limit N, select only the N most recent
|
|
1116
|
+
# functions, else select all of them.
|
|
1117
|
+
query = (
|
|
1118
|
+
sa.select(
|
|
1119
|
+
SystemSchema.job_queue.c.workflow_uuid,
|
|
1120
|
+
SystemSchema.job_queue.c.started_at_epoch_ms,
|
|
1121
|
+
)
|
|
1122
|
+
.where(SystemSchema.job_queue.c.queue_name == queue.name)
|
|
1123
|
+
.where(SystemSchema.job_queue.c.completed_at_epoch_ms == None)
|
|
1124
|
+
.order_by(SystemSchema.job_queue.c.created_at_epoch_ms.asc())
|
|
1033
1125
|
)
|
|
1034
|
-
if concurrency is not None:
|
|
1035
|
-
query = query.
|
|
1036
|
-
|
|
1037
|
-
|
|
1126
|
+
if queue.concurrency is not None:
|
|
1127
|
+
query = query.limit(queue.concurrency)
|
|
1128
|
+
|
|
1129
|
+
# From the functions retrieved, get the workflow IDs of the functions
|
|
1130
|
+
# that have not yet been started so we can start them.
|
|
1038
1131
|
rows = c.execute(query).fetchall()
|
|
1039
|
-
dequeued_ids: List[str] = [row[0] for row in rows]
|
|
1040
|
-
ret_ids = []
|
|
1132
|
+
dequeued_ids: List[str] = [row[0] for row in rows if row[1] is None]
|
|
1133
|
+
ret_ids: list[str] = []
|
|
1041
1134
|
for id in dequeued_ids:
|
|
1042
|
-
|
|
1135
|
+
|
|
1136
|
+
# If we have a limiter, stop starting functions when the number
|
|
1137
|
+
# of functions started this period exceeds the limit.
|
|
1138
|
+
if queue.limiter is not None:
|
|
1139
|
+
if len(ret_ids) + num_recent_queries >= queue.limiter["limit"]:
|
|
1140
|
+
break
|
|
1141
|
+
|
|
1142
|
+
# To start a function, first set its status to PENDING
|
|
1143
|
+
c.execute(
|
|
1043
1144
|
SystemSchema.workflow_status.update()
|
|
1044
1145
|
.where(SystemSchema.workflow_status.c.workflow_uuid == id)
|
|
1045
1146
|
.where(
|
|
@@ -1048,14 +1149,42 @@ class SystemDatabase:
|
|
|
1048
1149
|
)
|
|
1049
1150
|
.values(status=WorkflowStatusString.PENDING.value)
|
|
1050
1151
|
)
|
|
1051
|
-
|
|
1052
|
-
|
|
1152
|
+
|
|
1153
|
+
# Then give it a start time
|
|
1154
|
+
c.execute(
|
|
1155
|
+
SystemSchema.job_queue.update()
|
|
1156
|
+
.where(SystemSchema.job_queue.c.workflow_uuid == id)
|
|
1157
|
+
.values(started_at_epoch_ms=start_time_ms)
|
|
1158
|
+
)
|
|
1159
|
+
ret_ids.append(id)
|
|
1160
|
+
|
|
1161
|
+
# If we have a limiter, garbage-collect all completed functions started
|
|
1162
|
+
# before the period. If there's no limiter, there's no need--they were
|
|
1163
|
+
# deleted on completion.
|
|
1164
|
+
if queue.limiter is not None:
|
|
1165
|
+
c.execute(
|
|
1166
|
+
sa.delete(SystemSchema.job_queue)
|
|
1167
|
+
.where(SystemSchema.job_queue.c.completed_at_epoch_ms != None)
|
|
1168
|
+
.where(
|
|
1169
|
+
SystemSchema.job_queue.c.started_at_epoch_ms
|
|
1170
|
+
< start_time_ms - limiter_period_ms
|
|
1171
|
+
)
|
|
1172
|
+
)
|
|
1173
|
+
|
|
1174
|
+
# Return the IDs of all functions we started
|
|
1053
1175
|
return ret_ids
|
|
1054
1176
|
|
|
1055
|
-
def remove_from_queue(self, workflow_id: str) -> None:
|
|
1177
|
+
def remove_from_queue(self, workflow_id: str, queue: "Queue") -> None:
|
|
1056
1178
|
with self.engine.begin() as c:
|
|
1057
|
-
|
|
1058
|
-
|
|
1059
|
-
SystemSchema.job_queue.
|
|
1179
|
+
if queue.limiter is None:
|
|
1180
|
+
c.execute(
|
|
1181
|
+
sa.delete(SystemSchema.job_queue).where(
|
|
1182
|
+
SystemSchema.job_queue.c.workflow_uuid == workflow_id
|
|
1183
|
+
)
|
|
1184
|
+
)
|
|
1185
|
+
else:
|
|
1186
|
+
c.execute(
|
|
1187
|
+
sa.update(SystemSchema.job_queue)
|
|
1188
|
+
.where(SystemSchema.job_queue.c.workflow_uuid == workflow_id)
|
|
1189
|
+
.values(completed_at_epoch_ms=int(time.time() * 1000))
|
|
1060
1190
|
)
|
|
1061
|
-
)
|
dbos/utils.py
CHANGED
|
@@ -1,14 +1,55 @@
|
|
|
1
|
-
|
|
1
|
+
import types
|
|
2
|
+
from typing import Any, Dict, Tuple, TypedDict
|
|
2
3
|
|
|
3
4
|
import jsonpickle # type: ignore
|
|
4
5
|
|
|
5
6
|
|
|
7
|
+
class WorkflowInputs(TypedDict):
|
|
8
|
+
args: Tuple[Any, ...]
|
|
9
|
+
kwargs: Dict[str, Any]
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
def validate_item(data: Any) -> None:
|
|
13
|
+
if isinstance(data, (types.FunctionType, types.MethodType)):
|
|
14
|
+
raise TypeError("Serialized data item should not be a function")
|
|
15
|
+
|
|
16
|
+
|
|
6
17
|
def serialize(data: Any) -> str:
|
|
7
18
|
"""Serialize an object to a JSON string using jsonpickle."""
|
|
8
|
-
|
|
19
|
+
validate_item(data)
|
|
20
|
+
encoded_data: str = jsonpickle.encode(data, unpicklable=True)
|
|
21
|
+
return encoded_data
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
def serialize_args(data: WorkflowInputs) -> str:
|
|
25
|
+
"""Serialize args to a JSON string using jsonpickle."""
|
|
26
|
+
arg: Any
|
|
27
|
+
for arg in data["args"]:
|
|
28
|
+
validate_item(arg)
|
|
29
|
+
for arg in data["kwargs"].values():
|
|
30
|
+
validate_item(arg)
|
|
31
|
+
encoded_data: str = jsonpickle.encode(data, unpicklable=True)
|
|
32
|
+
return encoded_data
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
def serialize_exception(data: Exception) -> str:
|
|
36
|
+
"""Serialize an Exception object to a JSON string using jsonpickle."""
|
|
37
|
+
encoded_data: str = jsonpickle.encode(data, unpicklable=True)
|
|
9
38
|
return encoded_data
|
|
10
39
|
|
|
11
40
|
|
|
12
41
|
def deserialize(serialized_data: str) -> Any:
|
|
13
42
|
"""Deserialize a JSON string back to a Python object using jsonpickle."""
|
|
14
43
|
return jsonpickle.decode(serialized_data)
|
|
44
|
+
|
|
45
|
+
|
|
46
|
+
def deserialize_args(serialized_data: str) -> WorkflowInputs:
|
|
47
|
+
"""Deserialize a JSON string back to a Python object list using jsonpickle."""
|
|
48
|
+
args: WorkflowInputs = jsonpickle.decode(serialized_data)
|
|
49
|
+
return args
|
|
50
|
+
|
|
51
|
+
|
|
52
|
+
def deserialize_exception(serialized_data: str) -> Exception:
|
|
53
|
+
"""Deserialize JSON string back to a Python Exception using jsonpickle."""
|
|
54
|
+
upo: Exception = jsonpickle.decode(serialized_data)
|
|
55
|
+
return upo
|
|
@@ -1,18 +1,18 @@
|
|
|
1
|
-
dbos-0.8.
|
|
2
|
-
dbos-0.8.
|
|
3
|
-
dbos-0.8.
|
|
4
|
-
dbos-0.8.
|
|
5
|
-
dbos/__init__.py,sha256
|
|
1
|
+
dbos-0.8.0a7.dist-info/METADATA,sha256=cqNzAf7TbC3n_DDGTPUj_xawggwMydLnIQ5056iFDUI,5010
|
|
2
|
+
dbos-0.8.0a7.dist-info/WHEEL,sha256=Vza3XR51HW1KmFP0iIMUVYIvz0uQuKJpIXKYOBGQyFQ,90
|
|
3
|
+
dbos-0.8.0a7.dist-info/entry_points.txt,sha256=z6GcVANQV7Uw_82H9Ob2axJX6V3imftyZsljdh-M1HU,54
|
|
4
|
+
dbos-0.8.0a7.dist-info/licenses/LICENSE,sha256=VGZit_a5-kdw9WT6fY5jxAWVwGQzgLFyPWrcVVUhVNU,1067
|
|
5
|
+
dbos/__init__.py,sha256=-h1QgWNL11CiLlHEKa2ycAJVJw5SXYZ4BGNNWBAiE9k,726
|
|
6
6
|
dbos/admin_sever.py,sha256=Qg5T3YRrbPW05PR_99yAaxgo1ugQrAp_uTeTqSfjm_k,3397
|
|
7
7
|
dbos/application_database.py,sha256=knFK8We8y6WrIpnFCKvFq5hvSuFQqUuJqOqDpSVMCPI,5521
|
|
8
8
|
dbos/cli.py,sha256=z5dXbbnGWzSC3E1rfS8Lp1_OIImzcDKM7jP-iu_Q4aI,8602
|
|
9
|
-
dbos/context.py,sha256=
|
|
10
|
-
dbos/core.py,sha256=
|
|
9
|
+
dbos/context.py,sha256=4MsxZdoh1WIsgoUsaxo0B6caGN6xq2WC60MzbBppzGk,17738
|
|
10
|
+
dbos/core.py,sha256=ggsRC2XicvNI1qqruEFoqxoTU5oSSnhMZvDih3AG_3A,30879
|
|
11
11
|
dbos/dbos-config.schema.json,sha256=azpfmoDZg7WfSy3kvIsk9iEiKB_-VZt03VEOoXJAkqE,5331
|
|
12
|
-
dbos/dbos.py,sha256=
|
|
12
|
+
dbos/dbos.py,sha256=LnqX7rFETpcyxT9YHs4Uc3uOB4EDQC-zis3UFQU4smc,29705
|
|
13
13
|
dbos/dbos_config.py,sha256=NJVze2GkKgYUmcPP31Unb-QpsA0TzImEeQGJgVq6W6k,5352
|
|
14
14
|
dbos/decorators.py,sha256=lbPefsLK6Cya4cb7TrOcLglOpGT3pc6qjZdsQKlfZLg,629
|
|
15
|
-
dbos/error.py,sha256=
|
|
15
|
+
dbos/error.py,sha256=UETk8CoZL-TO2Utn1-E7OSWelhShWmKM-fOlODMR9PE,3893
|
|
16
16
|
dbos/fastapi.py,sha256=gx9hlpxYOiwbuhSlbY9bn5C-F_FsCbrJvkX9ZAvDG6U,3418
|
|
17
17
|
dbos/flask.py,sha256=azr4geMEGuuTBCyxIZmgDmmP-6s_pTIF-lGyp9Q4IB8,2430
|
|
18
18
|
dbos/kafka.py,sha256=LH3hbNapnkjLcuXNUtdGU0398JafWb-t0GwUl3LOzkc,3645
|
|
@@ -20,21 +20,23 @@ dbos/kafka_message.py,sha256=NYvOXNG3Qn7bghn1pv3fg4Pbs86ILZGcK4IB-MLUNu0,409
|
|
|
20
20
|
dbos/logger.py,sha256=D-aFSZUCHBP34J1IZ5YNkTrJW-rDiH3py_v9jLU4Yrk,3565
|
|
21
21
|
dbos/migrations/env.py,sha256=38SIGVbmn_VV2x2u1aHLcPOoWgZ84eCymf3g_NljmbU,1626
|
|
22
22
|
dbos/migrations/script.py.mako,sha256=MEqL-2qATlST9TAOeYgscMn1uy6HUS9NFvDgl93dMj8,635
|
|
23
|
+
dbos/migrations/versions/50f3227f0b4b_fix_job_queue.py,sha256=ZtnsZFMuon-D0n8V5BR10jQEqJPUsYsOwt29FAoKG8g,868
|
|
23
24
|
dbos/migrations/versions/5c361fc04708_added_system_tables.py,sha256=QMgFMb0aLgC25YicsvPSr6AHRCA6Zd66hyaRUhwKzrQ,6404
|
|
24
25
|
dbos/migrations/versions/a3b18ad34abe_added_triggers.py,sha256=Rv0ZsZYZ_WdgGEULYsPfnp4YzaO5L198gDTgYY39AVA,2022
|
|
26
|
+
dbos/migrations/versions/d76646551a6b_job_queue_limiter.py,sha256=M1upulBOLXm9ORJc2Q6PA0AwX0CA3zgqc-NhYS-eNPY,948
|
|
25
27
|
dbos/migrations/versions/eab0cc1d9a14_job_queue.py,sha256=_9-FCW-zOpCQfblTS_yRLtFiUaWlC1tM4BoKBTDeH9k,1395
|
|
26
28
|
dbos/py.typed,sha256=QfzXT1Ktfk3Rj84akygc7_42z0lRpCq0Ilh8OXI6Zas,44
|
|
27
|
-
dbos/queue.py,sha256=
|
|
29
|
+
dbos/queue.py,sha256=4MIWYDdl3DhYsnayy747xF1Jgdq2qvV4On3KJbJ4NDU,1764
|
|
28
30
|
dbos/recovery.py,sha256=zqtO_ExGoIErLMVnbneU3VeHLVWvhV4jnfqssAVlQQk,2016
|
|
29
|
-
dbos/registrations.py,sha256=
|
|
31
|
+
dbos/registrations.py,sha256=mei6q6_3R5uei8i_Wo_TqGZs85s10shOekDX41sFYD0,6642
|
|
30
32
|
dbos/request.py,sha256=-FIwtknayvRl6OjvqO4V2GySVzSdP1Ft3cc9ZBS-PLY,928
|
|
31
33
|
dbos/roles.py,sha256=7Lh7uwUq1dpa6TXCOHre4mPTd5qmXzK_QPkvYR52DXg,2285
|
|
32
34
|
dbos/scheduler/croniter.py,sha256=hbhgfsHBqclUS8VeLnJ9PSE9Z54z6mi4nnrr1aUXn0k,47561
|
|
33
35
|
dbos/scheduler/scheduler.py,sha256=Sz4EIpAtur7so2YajTic64GrTpa4qPw8QxXn0M34v80,1360
|
|
34
36
|
dbos/schemas/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
35
37
|
dbos/schemas/application_database.py,sha256=q_Wr2XbiZNBYFkOtu7uKavo1T_cSOBblxKGHThYGGsY,962
|
|
36
|
-
dbos/schemas/system_database.py,sha256
|
|
37
|
-
dbos/system_database.py,sha256=
|
|
38
|
+
dbos/schemas/system_database.py,sha256=-37sNXfx6cNGyzndj9mrWQLDH5iIBrwsT56ZiJ56Sj0,5116
|
|
39
|
+
dbos/system_database.py,sha256=x-TpKHIS187KED6BxPozvMLt6Qjhgh3dYSKZkd6epM0,47764
|
|
38
40
|
dbos/templates/hello/README.md,sha256=GhxhBj42wjTt1fWEtwNriHbJuKb66Vzu89G4pxNHw2g,930
|
|
39
41
|
dbos/templates/hello/__package/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
40
42
|
dbos/templates/hello/__package/main.py,sha256=eI0SS9Nwj-fldtiuSzIlIG6dC91GXXwdRsoHxv6S_WI,2719
|
|
@@ -46,6 +48,6 @@ dbos/templates/hello/migrations/script.py.mako,sha256=MEqL-2qATlST9TAOeYgscMn1uy
|
|
|
46
48
|
dbos/templates/hello/migrations/versions/2024_07_31_180642_init.py,sha256=U5thFWGqNN4QLrNXT7wUUqftIFDNE5eSdqD8JNW1mec,942
|
|
47
49
|
dbos/templates/hello/start_postgres_docker.py,sha256=lQVLlYO5YkhGPEgPqwGc7Y8uDKse9HsWv5fynJEFJHM,1681
|
|
48
50
|
dbos/tracer.py,sha256=GaXDhdKKF_IQp5SAMipGXiDVwteRKjNbrXyYCH1mor0,2520
|
|
49
|
-
dbos/utils.py,sha256=
|
|
51
|
+
dbos/utils.py,sha256=lwRymY-y7GprAS8pKmbICQvOJd5eGxKGTxCMFn0OwaQ,1739
|
|
50
52
|
version/__init__.py,sha256=L4sNxecRuqdtSFdpUGX3TtBi9KL3k7YsZVIvv-fv9-A,1678
|
|
51
|
-
dbos-0.8.
|
|
53
|
+
dbos-0.8.0a7.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|