dbos 0.15.0__tar.gz → 0.16.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of dbos might be problematic. Click here for more details.
- {dbos-0.15.0 → dbos-0.16.0}/PKG-INFO +1 -1
- {dbos-0.15.0 → dbos-0.16.0}/dbos/_context.py +16 -6
- {dbos-0.15.0 → dbos-0.16.0}/dbos/_core.py +175 -93
- {dbos-0.15.0 → dbos-0.16.0}/dbos/_dbos.py +101 -10
- dbos-0.16.0/dbos/_outcome.py +190 -0
- {dbos-0.15.0 → dbos-0.16.0}/pyproject.toml +2 -1
- dbos-0.16.0/tests/test_async.py +331 -0
- dbos-0.16.0/tests/test_outcome.py +114 -0
- dbos-0.16.0/tests/test_spans.py +118 -0
- {dbos-0.15.0 → dbos-0.16.0}/LICENSE +0 -0
- {dbos-0.15.0 → dbos-0.16.0}/README.md +0 -0
- {dbos-0.15.0 → dbos-0.16.0}/dbos/__init__.py +0 -0
- {dbos-0.15.0 → dbos-0.16.0}/dbos/_admin_server.py +0 -0
- {dbos-0.15.0 → dbos-0.16.0}/dbos/_app_db.py +0 -0
- {dbos-0.15.0 → dbos-0.16.0}/dbos/_classproperty.py +0 -0
- {dbos-0.15.0 → dbos-0.16.0}/dbos/_croniter.py +0 -0
- {dbos-0.15.0 → dbos-0.16.0}/dbos/_dbos_config.py +0 -0
- {dbos-0.15.0 → dbos-0.16.0}/dbos/_error.py +0 -0
- {dbos-0.15.0 → dbos-0.16.0}/dbos/_fastapi.py +0 -0
- {dbos-0.15.0 → dbos-0.16.0}/dbos/_flask.py +0 -0
- {dbos-0.15.0 → dbos-0.16.0}/dbos/_kafka.py +0 -0
- {dbos-0.15.0 → dbos-0.16.0}/dbos/_kafka_message.py +0 -0
- {dbos-0.15.0 → dbos-0.16.0}/dbos/_logger.py +0 -0
- {dbos-0.15.0 → dbos-0.16.0}/dbos/_migrations/env.py +0 -0
- {dbos-0.15.0 → dbos-0.16.0}/dbos/_migrations/script.py.mako +0 -0
- {dbos-0.15.0 → dbos-0.16.0}/dbos/_migrations/versions/50f3227f0b4b_fix_job_queue.py +0 -0
- {dbos-0.15.0 → dbos-0.16.0}/dbos/_migrations/versions/5c361fc04708_added_system_tables.py +0 -0
- {dbos-0.15.0 → dbos-0.16.0}/dbos/_migrations/versions/a3b18ad34abe_added_triggers.py +0 -0
- {dbos-0.15.0 → dbos-0.16.0}/dbos/_migrations/versions/d76646551a6b_job_queue_limiter.py +0 -0
- {dbos-0.15.0 → dbos-0.16.0}/dbos/_migrations/versions/d76646551a6c_workflow_queue.py +0 -0
- {dbos-0.15.0 → dbos-0.16.0}/dbos/_migrations/versions/eab0cc1d9a14_job_queue.py +0 -0
- {dbos-0.15.0 → dbos-0.16.0}/dbos/_queue.py +0 -0
- {dbos-0.15.0 → dbos-0.16.0}/dbos/_recovery.py +0 -0
- {dbos-0.15.0 → dbos-0.16.0}/dbos/_registrations.py +0 -0
- {dbos-0.15.0 → dbos-0.16.0}/dbos/_request.py +0 -0
- {dbos-0.15.0 → dbos-0.16.0}/dbos/_roles.py +0 -0
- {dbos-0.15.0 → dbos-0.16.0}/dbos/_scheduler.py +0 -0
- {dbos-0.15.0 → dbos-0.16.0}/dbos/_schemas/__init__.py +0 -0
- {dbos-0.15.0 → dbos-0.16.0}/dbos/_schemas/application_database.py +0 -0
- {dbos-0.15.0 → dbos-0.16.0}/dbos/_schemas/system_database.py +0 -0
- {dbos-0.15.0 → dbos-0.16.0}/dbos/_serialization.py +0 -0
- {dbos-0.15.0 → dbos-0.16.0}/dbos/_sys_db.py +0 -0
- {dbos-0.15.0 → dbos-0.16.0}/dbos/_templates/hello/README.md +0 -0
- {dbos-0.15.0 → dbos-0.16.0}/dbos/_templates/hello/__package/__init__.py +0 -0
- {dbos-0.15.0 → dbos-0.16.0}/dbos/_templates/hello/__package/main.py +0 -0
- {dbos-0.15.0 → dbos-0.16.0}/dbos/_templates/hello/__package/schema.py +0 -0
- {dbos-0.15.0 → dbos-0.16.0}/dbos/_templates/hello/alembic.ini +0 -0
- {dbos-0.15.0 → dbos-0.16.0}/dbos/_templates/hello/dbos-config.yaml.dbos +0 -0
- {dbos-0.15.0 → dbos-0.16.0}/dbos/_templates/hello/migrations/env.py.dbos +0 -0
- {dbos-0.15.0 → dbos-0.16.0}/dbos/_templates/hello/migrations/script.py.mako +0 -0
- {dbos-0.15.0 → dbos-0.16.0}/dbos/_templates/hello/migrations/versions/2024_07_31_180642_init.py +0 -0
- {dbos-0.15.0 → dbos-0.16.0}/dbos/_templates/hello/start_postgres_docker.py +0 -0
- {dbos-0.15.0 → dbos-0.16.0}/dbos/_tracer.py +0 -0
- {dbos-0.15.0 → dbos-0.16.0}/dbos/cli.py +0 -0
- {dbos-0.15.0 → dbos-0.16.0}/dbos/dbos-config.schema.json +0 -0
- {dbos-0.15.0 → dbos-0.16.0}/dbos/py.typed +0 -0
- {dbos-0.15.0 → dbos-0.16.0}/tests/__init__.py +0 -0
- {dbos-0.15.0 → dbos-0.16.0}/tests/atexit_no_ctor.py +0 -0
- {dbos-0.15.0 → dbos-0.16.0}/tests/atexit_no_launch.py +0 -0
- {dbos-0.15.0 → dbos-0.16.0}/tests/classdefs.py +0 -0
- {dbos-0.15.0 → dbos-0.16.0}/tests/conftest.py +0 -0
- {dbos-0.15.0 → dbos-0.16.0}/tests/more_classdefs.py +0 -0
- {dbos-0.15.0 → dbos-0.16.0}/tests/test_admin_server.py +0 -0
- {dbos-0.15.0 → dbos-0.16.0}/tests/test_classdecorators.py +0 -0
- {dbos-0.15.0 → dbos-0.16.0}/tests/test_concurrency.py +0 -0
- {dbos-0.15.0 → dbos-0.16.0}/tests/test_config.py +0 -0
- {dbos-0.15.0 → dbos-0.16.0}/tests/test_croniter.py +0 -0
- {dbos-0.15.0 → dbos-0.16.0}/tests/test_dbos.py +0 -0
- {dbos-0.15.0 → dbos-0.16.0}/tests/test_failures.py +0 -0
- {dbos-0.15.0 → dbos-0.16.0}/tests/test_fastapi.py +0 -0
- {dbos-0.15.0 → dbos-0.16.0}/tests/test_fastapi_roles.py +0 -0
- {dbos-0.15.0 → dbos-0.16.0}/tests/test_flask.py +0 -0
- {dbos-0.15.0 → dbos-0.16.0}/tests/test_kafka.py +0 -0
- {dbos-0.15.0 → dbos-0.16.0}/tests/test_package.py +0 -0
- {dbos-0.15.0 → dbos-0.16.0}/tests/test_queue.py +0 -0
- {dbos-0.15.0 → dbos-0.16.0}/tests/test_scheduler.py +0 -0
- {dbos-0.15.0 → dbos-0.16.0}/tests/test_schema_migration.py +0 -0
- {dbos-0.15.0 → dbos-0.16.0}/tests/test_singleton.py +0 -0
- {dbos-0.15.0 → dbos-0.16.0}/version/__init__.py +0 -0
|
@@ -98,18 +98,27 @@ class DBOSContext:
|
|
|
98
98
|
wfid = str(uuid.uuid4())
|
|
99
99
|
return wfid
|
|
100
100
|
|
|
101
|
-
def start_workflow(
|
|
101
|
+
def start_workflow(
|
|
102
|
+
self,
|
|
103
|
+
wfid: Optional[str],
|
|
104
|
+
attributes: TracedAttributes,
|
|
105
|
+
is_temp_workflow: bool = False,
|
|
106
|
+
) -> None:
|
|
102
107
|
if wfid is None or len(wfid) == 0:
|
|
103
108
|
wfid = self.assign_workflow_id()
|
|
104
109
|
self.id_assigned_for_next_workflow = ""
|
|
105
110
|
self.workflow_id = wfid
|
|
106
111
|
self.function_id = 0
|
|
107
|
-
|
|
112
|
+
if not is_temp_workflow:
|
|
113
|
+
self._start_span(attributes)
|
|
108
114
|
|
|
109
|
-
def end_workflow(
|
|
115
|
+
def end_workflow(
|
|
116
|
+
self, exc_value: Optional[BaseException], is_temp_workflow: bool = False
|
|
117
|
+
) -> None:
|
|
110
118
|
self.workflow_id = ""
|
|
111
119
|
self.function_id = -1
|
|
112
|
-
|
|
120
|
+
if not is_temp_workflow:
|
|
121
|
+
self._end_span(exc_value)
|
|
113
122
|
|
|
114
123
|
def is_within_workflow(self) -> bool:
|
|
115
124
|
return len(self.workflow_id) > 0
|
|
@@ -349,6 +358,7 @@ class EnterDBOSWorkflow(AbstractContextManager[DBOSContext, Literal[False]]):
|
|
|
349
358
|
def __init__(self, attributes: TracedAttributes) -> None:
|
|
350
359
|
self.created_ctx = False
|
|
351
360
|
self.attributes = attributes
|
|
361
|
+
self.is_temp_workflow = attributes["name"] == "temp_wf"
|
|
352
362
|
|
|
353
363
|
def __enter__(self) -> DBOSContext:
|
|
354
364
|
# Code to create a basic context
|
|
@@ -359,7 +369,7 @@ class EnterDBOSWorkflow(AbstractContextManager[DBOSContext, Literal[False]]):
|
|
|
359
369
|
_set_local_dbos_context(ctx)
|
|
360
370
|
assert not ctx.is_within_workflow()
|
|
361
371
|
ctx.start_workflow(
|
|
362
|
-
None, self.attributes
|
|
372
|
+
None, self.attributes, self.is_temp_workflow
|
|
363
373
|
) # Will get from the context's next workflow ID
|
|
364
374
|
return ctx
|
|
365
375
|
|
|
@@ -371,7 +381,7 @@ class EnterDBOSWorkflow(AbstractContextManager[DBOSContext, Literal[False]]):
|
|
|
371
381
|
) -> Literal[False]:
|
|
372
382
|
ctx = assert_current_dbos_context()
|
|
373
383
|
assert ctx.is_within_workflow()
|
|
374
|
-
ctx.end_workflow(exc_value)
|
|
384
|
+
ctx.end_workflow(exc_value, self.is_temp_workflow)
|
|
375
385
|
# Code to clean up the basic context if we created it
|
|
376
386
|
if self.created_ctx:
|
|
377
387
|
_clear_local_dbos_context()
|
|
@@ -1,10 +1,27 @@
|
|
|
1
|
+
import asyncio
|
|
2
|
+
import functools
|
|
3
|
+
import inspect
|
|
1
4
|
import json
|
|
2
5
|
import sys
|
|
3
6
|
import time
|
|
4
7
|
import traceback
|
|
5
8
|
from concurrent.futures import Future
|
|
6
9
|
from functools import wraps
|
|
7
|
-
from typing import
|
|
10
|
+
from typing import (
|
|
11
|
+
TYPE_CHECKING,
|
|
12
|
+
Any,
|
|
13
|
+
Callable,
|
|
14
|
+
Coroutine,
|
|
15
|
+
Generic,
|
|
16
|
+
Optional,
|
|
17
|
+
Tuple,
|
|
18
|
+
TypeVar,
|
|
19
|
+
Union,
|
|
20
|
+
cast,
|
|
21
|
+
overload,
|
|
22
|
+
)
|
|
23
|
+
|
|
24
|
+
from dbos._outcome import Immediate, Outcome, Pending
|
|
8
25
|
|
|
9
26
|
from ._app_db import ApplicationDatabase, TransactionResultInternal
|
|
10
27
|
|
|
@@ -181,39 +198,38 @@ def _init_workflow(
|
|
|
181
198
|
return status
|
|
182
199
|
|
|
183
200
|
|
|
184
|
-
def
|
|
201
|
+
def _get_wf_invoke_func(
|
|
185
202
|
dbos: "DBOS",
|
|
186
203
|
status: WorkflowStatusInternal,
|
|
187
|
-
|
|
188
|
-
|
|
189
|
-
|
|
190
|
-
|
|
191
|
-
|
|
192
|
-
|
|
193
|
-
|
|
194
|
-
|
|
195
|
-
|
|
196
|
-
|
|
197
|
-
|
|
198
|
-
|
|
199
|
-
|
|
200
|
-
|
|
201
|
-
|
|
202
|
-
|
|
203
|
-
|
|
204
|
-
|
|
205
|
-
|
|
206
|
-
|
|
207
|
-
|
|
208
|
-
|
|
209
|
-
|
|
210
|
-
|
|
211
|
-
|
|
212
|
-
dbos._sys_db.
|
|
213
|
-
|
|
214
|
-
raise
|
|
204
|
+
) -> Callable[[Callable[[], R]], R]:
|
|
205
|
+
def persist(func: Callable[[], R]) -> R:
|
|
206
|
+
try:
|
|
207
|
+
output = func()
|
|
208
|
+
status["status"] = "SUCCESS"
|
|
209
|
+
status["output"] = _serialization.serialize(output)
|
|
210
|
+
if status["queue_name"] is not None:
|
|
211
|
+
queue = dbos._registry.queue_info_map[status["queue_name"]]
|
|
212
|
+
dbos._sys_db.remove_from_queue(status["workflow_uuid"], queue)
|
|
213
|
+
dbos._sys_db.buffer_workflow_status(status)
|
|
214
|
+
return output
|
|
215
|
+
except DBOSWorkflowConflictIDError:
|
|
216
|
+
# Retrieve the workflow handle and wait for the result.
|
|
217
|
+
# Must use existing_workflow=False because workflow status might not be set yet for single transaction workflows.
|
|
218
|
+
wf_handle: "WorkflowHandle[R]" = dbos.retrieve_workflow(
|
|
219
|
+
status["workflow_uuid"], existing_workflow=False
|
|
220
|
+
)
|
|
221
|
+
output = wf_handle.get_result()
|
|
222
|
+
return output
|
|
223
|
+
except Exception as error:
|
|
224
|
+
status["status"] = "ERROR"
|
|
225
|
+
status["error"] = _serialization.serialize_exception(error)
|
|
226
|
+
if status["queue_name"] is not None:
|
|
227
|
+
queue = dbos._registry.queue_info_map[status["queue_name"]]
|
|
228
|
+
dbos._sys_db.remove_from_queue(status["workflow_uuid"], queue)
|
|
229
|
+
dbos._sys_db.update_workflow_status(status)
|
|
230
|
+
raise
|
|
215
231
|
|
|
216
|
-
return
|
|
232
|
+
return persist
|
|
217
233
|
|
|
218
234
|
|
|
219
235
|
def _execute_workflow_wthread(
|
|
@@ -231,7 +247,15 @@ def _execute_workflow_wthread(
|
|
|
231
247
|
with DBOSContextSwap(ctx):
|
|
232
248
|
with EnterDBOSWorkflow(attributes):
|
|
233
249
|
try:
|
|
234
|
-
|
|
250
|
+
result = (
|
|
251
|
+
Outcome[R]
|
|
252
|
+
.make(functools.partial(func, *args, **kwargs))
|
|
253
|
+
.then(_get_wf_invoke_func(dbos, status))
|
|
254
|
+
)
|
|
255
|
+
if isinstance(result, Immediate):
|
|
256
|
+
return cast(Immediate[R], result)()
|
|
257
|
+
else:
|
|
258
|
+
return asyncio.run(cast(Pending[R], result)())
|
|
235
259
|
except Exception:
|
|
236
260
|
dbos.logger.error(
|
|
237
261
|
f"Exception encountered in asynchronous workflow: {traceback.format_exc()}"
|
|
@@ -305,6 +329,18 @@ def execute_workflow_by_id(dbos: "DBOS", workflow_id: str) -> "WorkflowHandle[An
|
|
|
305
329
|
)
|
|
306
330
|
|
|
307
331
|
|
|
332
|
+
@overload
|
|
333
|
+
def start_workflow(
|
|
334
|
+
dbos: "DBOS",
|
|
335
|
+
func: "Workflow[P, Coroutine[Any, Any, R]]",
|
|
336
|
+
queue_name: Optional[str],
|
|
337
|
+
execute_workflow: bool,
|
|
338
|
+
*args: P.args,
|
|
339
|
+
**kwargs: P.kwargs,
|
|
340
|
+
) -> "WorkflowHandle[R]": ...
|
|
341
|
+
|
|
342
|
+
|
|
343
|
+
@overload
|
|
308
344
|
def start_workflow(
|
|
309
345
|
dbos: "DBOS",
|
|
310
346
|
func: "Workflow[P, R]",
|
|
@@ -312,6 +348,16 @@ def start_workflow(
|
|
|
312
348
|
execute_workflow: bool,
|
|
313
349
|
*args: P.args,
|
|
314
350
|
**kwargs: P.kwargs,
|
|
351
|
+
) -> "WorkflowHandle[R]": ...
|
|
352
|
+
|
|
353
|
+
|
|
354
|
+
def start_workflow(
|
|
355
|
+
dbos: "DBOS",
|
|
356
|
+
func: "Workflow[P, Union[R, Coroutine[Any, Any, R]]]",
|
|
357
|
+
queue_name: Optional[str],
|
|
358
|
+
execute_workflow: bool,
|
|
359
|
+
*args: P.args,
|
|
360
|
+
**kwargs: P.kwargs,
|
|
315
361
|
) -> "WorkflowHandle[R]":
|
|
316
362
|
fself: Optional[object] = None
|
|
317
363
|
if hasattr(func, "__self__"):
|
|
@@ -396,16 +442,16 @@ def start_workflow(
|
|
|
396
442
|
|
|
397
443
|
def workflow_wrapper(
|
|
398
444
|
dbosreg: "DBOSRegistry",
|
|
399
|
-
func:
|
|
445
|
+
func: Callable[P, R],
|
|
400
446
|
max_recovery_attempts: int = DEFAULT_MAX_RECOVERY_ATTEMPTS,
|
|
401
|
-
) ->
|
|
447
|
+
) -> Callable[P, R]:
|
|
402
448
|
func.__orig_func = func # type: ignore
|
|
403
449
|
|
|
404
450
|
fi = get_or_create_func_info(func)
|
|
405
451
|
fi.max_recovery_attempts = max_recovery_attempts
|
|
406
452
|
|
|
407
453
|
@wraps(func)
|
|
408
|
-
def wrapper(*args: Any, **kwargs: Any) ->
|
|
454
|
+
def wrapper(*args: Any, **kwargs: Any) -> R:
|
|
409
455
|
if dbosreg.dbos is None:
|
|
410
456
|
raise DBOSException(
|
|
411
457
|
f"Function {func.__name__} invoked before DBOS initialized"
|
|
@@ -425,7 +471,10 @@ def workflow_wrapper(
|
|
|
425
471
|
enterWorkflowCtxMgr = (
|
|
426
472
|
EnterDBOSChildWorkflow if ctx and ctx.is_workflow() else EnterDBOSWorkflow
|
|
427
473
|
)
|
|
428
|
-
|
|
474
|
+
|
|
475
|
+
wfOutcome = Outcome[R].make(functools.partial(func, *args, **kwargs))
|
|
476
|
+
|
|
477
|
+
def init_wf() -> Callable[[Callable[[], R]], R]:
|
|
429
478
|
ctx = assert_current_dbos_context() # Now the child ctx
|
|
430
479
|
status = _init_workflow(
|
|
431
480
|
dbos,
|
|
@@ -441,16 +490,23 @@ def workflow_wrapper(
|
|
|
441
490
|
dbos.logger.debug(
|
|
442
491
|
f"Running workflow, id: {ctx.workflow_id}, name: {get_dbos_func_name(func)}"
|
|
443
492
|
)
|
|
444
|
-
return _execute_workflow(dbos, status, func, *args, **kwargs)
|
|
445
493
|
|
|
446
|
-
|
|
447
|
-
|
|
494
|
+
return _get_wf_invoke_func(dbos, status)
|
|
495
|
+
|
|
496
|
+
outcome = (
|
|
497
|
+
wfOutcome.wrap(init_wf)
|
|
498
|
+
.also(DBOSAssumeRole(rr))
|
|
499
|
+
.also(enterWorkflowCtxMgr(attributes))
|
|
500
|
+
)
|
|
501
|
+
return outcome() # type: ignore
|
|
502
|
+
|
|
503
|
+
return wrapper
|
|
448
504
|
|
|
449
505
|
|
|
450
506
|
def decorate_workflow(
|
|
451
507
|
reg: "DBOSRegistry", max_recovery_attempts: int
|
|
452
|
-
) -> Callable[[
|
|
453
|
-
def _workflow_decorator(func:
|
|
508
|
+
) -> Callable[[Callable[P, R]], Callable[P, R]]:
|
|
509
|
+
def _workflow_decorator(func: Callable[P, R]) -> Callable[P, R]:
|
|
454
510
|
wrapped_func = workflow_wrapper(reg, func, max_recovery_attempts)
|
|
455
511
|
reg.register_wf_function(func.__qualname__, wrapped_func)
|
|
456
512
|
return wrapped_func
|
|
@@ -473,7 +529,8 @@ def decorate_transaction(
|
|
|
473
529
|
"name": func.__name__,
|
|
474
530
|
"operationType": OperationType.TRANSACTION.value,
|
|
475
531
|
}
|
|
476
|
-
with EnterDBOSTransaction(session, attributes=attributes)
|
|
532
|
+
with EnterDBOSTransaction(session, attributes=attributes):
|
|
533
|
+
ctx = assert_current_dbos_context()
|
|
477
534
|
txn_output: TransactionResultInternal = {
|
|
478
535
|
"workflow_uuid": ctx.workflow_id,
|
|
479
536
|
"function_id": ctx.function_id,
|
|
@@ -562,6 +619,11 @@ def decorate_transaction(
|
|
|
562
619
|
raise
|
|
563
620
|
return output
|
|
564
621
|
|
|
622
|
+
if inspect.iscoroutinefunction(func):
|
|
623
|
+
raise DBOSException(
|
|
624
|
+
f"Function {func.__name__} is a coroutine function, but DBOS.transaction does not support coroutine functions"
|
|
625
|
+
)
|
|
626
|
+
|
|
565
627
|
fi = get_or_create_func_info(func)
|
|
566
628
|
|
|
567
629
|
@wraps(func)
|
|
@@ -603,8 +665,8 @@ def decorate_step(
|
|
|
603
665
|
interval_seconds: float = 1.0,
|
|
604
666
|
max_attempts: int = 3,
|
|
605
667
|
backoff_rate: float = 2.0,
|
|
606
|
-
) -> Callable[[
|
|
607
|
-
def decorator(func:
|
|
668
|
+
) -> Callable[[Callable[P, R]], Callable[P, R]]:
|
|
669
|
+
def decorator(func: Callable[P, R]) -> Callable[P, R]:
|
|
608
670
|
|
|
609
671
|
def invoke_step(*args: Any, **kwargs: Any) -> Any:
|
|
610
672
|
if dbosreg.dbos is None:
|
|
@@ -617,13 +679,48 @@ def decorate_step(
|
|
|
617
679
|
"name": func.__name__,
|
|
618
680
|
"operationType": OperationType.STEP.value,
|
|
619
681
|
}
|
|
620
|
-
|
|
682
|
+
|
|
683
|
+
attempts = max_attempts if retries_allowed else 1
|
|
684
|
+
max_retry_interval_seconds: float = 3600 # 1 Hour
|
|
685
|
+
|
|
686
|
+
def on_exception(attempt: int, error: BaseException) -> float:
|
|
687
|
+
dbos.logger.warning(
|
|
688
|
+
f"Step being automatically retried. (attempt {attempt} of {attempts}). {traceback.format_exc()}"
|
|
689
|
+
)
|
|
690
|
+
ctx = assert_current_dbos_context()
|
|
691
|
+
ctx.get_current_span().add_event(
|
|
692
|
+
f"Step attempt {attempt} failed",
|
|
693
|
+
{
|
|
694
|
+
"error": str(error),
|
|
695
|
+
"retryIntervalSeconds": interval_seconds,
|
|
696
|
+
},
|
|
697
|
+
)
|
|
698
|
+
return min(
|
|
699
|
+
interval_seconds * (backoff_rate**attempt),
|
|
700
|
+
max_retry_interval_seconds,
|
|
701
|
+
)
|
|
702
|
+
|
|
703
|
+
def record_step_result(func: Callable[[], R]) -> R:
|
|
704
|
+
ctx = assert_current_dbos_context()
|
|
621
705
|
step_output: OperationResultInternal = {
|
|
622
706
|
"workflow_uuid": ctx.workflow_id,
|
|
623
707
|
"function_id": ctx.function_id,
|
|
624
708
|
"output": None,
|
|
625
709
|
"error": None,
|
|
626
710
|
}
|
|
711
|
+
|
|
712
|
+
try:
|
|
713
|
+
output = func()
|
|
714
|
+
step_output["output"] = _serialization.serialize(output)
|
|
715
|
+
return output
|
|
716
|
+
except Exception as error:
|
|
717
|
+
step_output["error"] = _serialization.serialize_exception(error)
|
|
718
|
+
raise
|
|
719
|
+
finally:
|
|
720
|
+
dbos._sys_db.record_operation_result(step_output)
|
|
721
|
+
|
|
722
|
+
def check_existing_result() -> Optional[R]:
|
|
723
|
+
ctx = assert_current_dbos_context()
|
|
627
724
|
recorded_output = dbos._sys_db.check_operation_execution(
|
|
628
725
|
ctx.workflow_id, ctx.function_id
|
|
629
726
|
)
|
|
@@ -637,57 +734,29 @@ def decorate_step(
|
|
|
637
734
|
)
|
|
638
735
|
raise deserialized_error
|
|
639
736
|
elif recorded_output["output"] is not None:
|
|
640
|
-
return
|
|
737
|
+
return cast(
|
|
738
|
+
R, _serialization.deserialize(recorded_output["output"])
|
|
739
|
+
)
|
|
641
740
|
else:
|
|
642
741
|
raise Exception("Output and error are both None")
|
|
643
742
|
else:
|
|
644
743
|
dbos.logger.debug(
|
|
645
744
|
f"Running step, id: {ctx.function_id}, name: {attributes['name']}"
|
|
646
745
|
)
|
|
746
|
+
return None
|
|
647
747
|
|
|
648
|
-
|
|
649
|
-
|
|
650
|
-
|
|
651
|
-
|
|
652
|
-
local_interval_seconds = interval_seconds
|
|
653
|
-
for attempt in range(1, local_max_attempts + 1):
|
|
654
|
-
try:
|
|
655
|
-
output = func(*args, **kwargs)
|
|
656
|
-
step_output["output"] = _serialization.serialize(output)
|
|
657
|
-
error = None
|
|
658
|
-
break
|
|
659
|
-
except Exception as err:
|
|
660
|
-
error = err
|
|
661
|
-
if retries_allowed:
|
|
662
|
-
dbos.logger.warning(
|
|
663
|
-
f"Step being automatically retried. (attempt {attempt} of {local_max_attempts}). {traceback.format_exc()}"
|
|
664
|
-
)
|
|
665
|
-
ctx.get_current_span().add_event(
|
|
666
|
-
f"Step attempt {attempt} failed",
|
|
667
|
-
{
|
|
668
|
-
"error": str(error),
|
|
669
|
-
"retryIntervalSeconds": local_interval_seconds,
|
|
670
|
-
},
|
|
671
|
-
)
|
|
672
|
-
if attempt == local_max_attempts:
|
|
673
|
-
error = DBOSMaxStepRetriesExceeded()
|
|
674
|
-
else:
|
|
675
|
-
time.sleep(local_interval_seconds)
|
|
676
|
-
local_interval_seconds = min(
|
|
677
|
-
local_interval_seconds * backoff_rate,
|
|
678
|
-
max_retry_interval_seconds,
|
|
679
|
-
)
|
|
680
|
-
|
|
681
|
-
step_output["error"] = (
|
|
682
|
-
_serialization.serialize_exception(error)
|
|
683
|
-
if error is not None
|
|
684
|
-
else None
|
|
748
|
+
stepOutcome = Outcome[R].make(functools.partial(func, *args, **kwargs))
|
|
749
|
+
if retries_allowed:
|
|
750
|
+
stepOutcome = stepOutcome.retry(
|
|
751
|
+
max_attempts, on_exception, lambda i: DBOSMaxStepRetriesExceeded()
|
|
685
752
|
)
|
|
686
|
-
dbos._sys_db.record_operation_result(step_output)
|
|
687
753
|
|
|
688
|
-
|
|
689
|
-
|
|
690
|
-
|
|
754
|
+
outcome = (
|
|
755
|
+
stepOutcome.then(record_step_result)
|
|
756
|
+
.intercept(check_existing_result)
|
|
757
|
+
.also(EnterDBOSStep(attributes))
|
|
758
|
+
)
|
|
759
|
+
return outcome()
|
|
691
760
|
|
|
692
761
|
fi = get_or_create_func_info(func)
|
|
693
762
|
|
|
@@ -711,16 +780,25 @@ def decorate_step(
|
|
|
711
780
|
assert tempwf
|
|
712
781
|
return tempwf(*args, **kwargs)
|
|
713
782
|
|
|
714
|
-
def
|
|
783
|
+
def temp_wf_sync(*args: Any, **kwargs: Any) -> Any:
|
|
715
784
|
return wrapper(*args, **kwargs)
|
|
716
785
|
|
|
786
|
+
async def temp_wf_async(*args: Any, **kwargs: Any) -> Any:
|
|
787
|
+
return await wrapper(*args, **kwargs)
|
|
788
|
+
|
|
789
|
+
# Other code in transact-py depends on the name of temporary workflow functions to be "temp_wf"
|
|
790
|
+
# so set the name of both sync and async temporary workflow functions explicitly
|
|
791
|
+
temp_wf_sync.__name__ = "temp_wf"
|
|
792
|
+
temp_wf_async.__name__ = "temp_wf"
|
|
793
|
+
|
|
794
|
+
temp_wf = temp_wf_async if inspect.iscoroutinefunction(func) else temp_wf_sync
|
|
717
795
|
wrapped_wf = workflow_wrapper(dbosreg, temp_wf)
|
|
718
796
|
set_dbos_func_name(temp_wf, "<temp>." + func.__qualname__)
|
|
719
797
|
set_temp_workflow_type(temp_wf, "step")
|
|
720
798
|
dbosreg.register_wf_function(get_dbos_func_name(temp_wf), wrapped_wf)
|
|
721
799
|
wrapper.__orig_func = temp_wf # type: ignore
|
|
722
800
|
|
|
723
|
-
return cast(
|
|
801
|
+
return cast(Callable[P, R], wrapper)
|
|
724
802
|
|
|
725
803
|
return decorator
|
|
726
804
|
|
|
@@ -732,7 +810,8 @@ def send(
|
|
|
732
810
|
attributes: TracedAttributes = {
|
|
733
811
|
"name": "send",
|
|
734
812
|
}
|
|
735
|
-
with EnterDBOSStep(attributes)
|
|
813
|
+
with EnterDBOSStep(attributes):
|
|
814
|
+
ctx = assert_current_dbos_context()
|
|
736
815
|
dbos._sys_db.send(
|
|
737
816
|
ctx.workflow_id,
|
|
738
817
|
ctx.curr_step_function_id,
|
|
@@ -759,7 +838,8 @@ def recv(dbos: "DBOS", topic: Optional[str] = None, timeout_seconds: float = 60)
|
|
|
759
838
|
attributes: TracedAttributes = {
|
|
760
839
|
"name": "recv",
|
|
761
840
|
}
|
|
762
|
-
with EnterDBOSStep(attributes)
|
|
841
|
+
with EnterDBOSStep(attributes):
|
|
842
|
+
ctx = assert_current_dbos_context()
|
|
763
843
|
ctx.function_id += 1 # Reserve for the sleep
|
|
764
844
|
timeout_function_id = ctx.function_id
|
|
765
845
|
return dbos._sys_db.recv(
|
|
@@ -784,7 +864,8 @@ def set_event(dbos: "DBOS", key: str, value: Any) -> None:
|
|
|
784
864
|
attributes: TracedAttributes = {
|
|
785
865
|
"name": "set_event",
|
|
786
866
|
}
|
|
787
|
-
with EnterDBOSStep(attributes)
|
|
867
|
+
with EnterDBOSStep(attributes):
|
|
868
|
+
ctx = assert_current_dbos_context()
|
|
788
869
|
dbos._sys_db.set_event(
|
|
789
870
|
ctx.workflow_id, ctx.curr_step_function_id, key, value
|
|
790
871
|
)
|
|
@@ -805,7 +886,8 @@ def get_event(
|
|
|
805
886
|
attributes: TracedAttributes = {
|
|
806
887
|
"name": "get_event",
|
|
807
888
|
}
|
|
808
|
-
with EnterDBOSStep(attributes)
|
|
889
|
+
with EnterDBOSStep(attributes):
|
|
890
|
+
ctx = assert_current_dbos_context()
|
|
809
891
|
ctx.function_id += 1
|
|
810
892
|
timeout_function_id = ctx.function_id
|
|
811
893
|
caller_ctx: GetEventWorkflowContext = {
|