dbos 0.26.0a19__py3-none-any.whl → 0.26.0a22__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
dbos/__init__.py CHANGED
@@ -5,8 +5,7 @@ from ._dbos import DBOS, DBOSConfiguredInstance, WorkflowHandle
5
5
  from ._dbos_config import ConfigFile, DBOSConfig, get_dbos_database_url, load_config
6
6
  from ._kafka_message import KafkaMessage
7
7
  from ._queue import Queue
8
- from ._sys_db import GetWorkflowsInput, WorkflowStatusString
9
- from ._workflow_commands import WorkflowStatus
8
+ from ._sys_db import GetWorkflowsInput, WorkflowStatus, WorkflowStatusString
10
9
 
11
10
  __all__ = [
12
11
  "ConfigFile",
dbos/_client.py CHANGED
@@ -1,5 +1,6 @@
1
1
  import asyncio
2
2
  import sys
3
+ import time
3
4
  import uuid
4
5
  from typing import Any, Generic, List, Optional, TypedDict, TypeVar
5
6
 
@@ -19,11 +20,11 @@ from dbos._serialization import WorkflowInputs
19
20
  from dbos._sys_db import (
20
21
  StepInfo,
21
22
  SystemDatabase,
23
+ WorkflowStatus,
22
24
  WorkflowStatusInternal,
23
25
  WorkflowStatusString,
24
26
  )
25
27
  from dbos._workflow_commands import (
26
- WorkflowStatus,
27
28
  fork_workflow,
28
29
  get_workflow,
29
30
  list_queued_workflows,
@@ -39,6 +40,7 @@ class EnqueueOptions(TypedDict):
39
40
  queue_name: str
40
41
  workflow_id: NotRequired[str]
41
42
  app_version: NotRequired[str]
43
+ workflow_timeout: NotRequired[float]
42
44
 
43
45
 
44
46
  class WorkflowHandleClientPolling(Generic[R]):
@@ -54,7 +56,7 @@ class WorkflowHandleClientPolling(Generic[R]):
54
56
  res: R = self._sys_db.await_workflow_result(self.workflow_id)
55
57
  return res
56
58
 
57
- def get_status(self) -> "WorkflowStatus":
59
+ def get_status(self) -> WorkflowStatus:
58
60
  status = get_workflow(self._sys_db, self.workflow_id, True)
59
61
  if status is None:
60
62
  raise DBOSNonExistentWorkflowError(self.workflow_id)
@@ -76,7 +78,7 @@ class WorkflowHandleClientAsyncPolling(Generic[R]):
76
78
  )
77
79
  return res
78
80
 
79
- async def get_status(self) -> "WorkflowStatus":
81
+ async def get_status(self) -> WorkflowStatus:
80
82
  status = await asyncio.to_thread(
81
83
  get_workflow, self._sys_db, self.workflow_id, True
82
84
  )
@@ -107,6 +109,7 @@ class DBOSClient:
107
109
  workflow_id = options.get("workflow_id")
108
110
  if workflow_id is None:
109
111
  workflow_id = str(uuid.uuid4())
112
+ workflow_timeout = options.get("workflow_timeout", None)
110
113
 
111
114
  status: WorkflowStatusInternal = {
112
115
  "workflow_uuid": workflow_id,
@@ -127,6 +130,10 @@ class DBOSClient:
127
130
  "executor_id": None,
128
131
  "recovery_attempts": None,
129
132
  "app_id": None,
133
+ "workflow_timeout_ms": (
134
+ int(workflow_timeout * 1000) if workflow_timeout is not None else None
135
+ ),
136
+ "workflow_deadline_epoch_ms": None,
130
137
  }
131
138
 
132
139
  inputs: WorkflowInputs = {
@@ -134,7 +141,9 @@ class DBOSClient:
134
141
  "kwargs": kwargs,
135
142
  }
136
143
 
137
- self._sys_db.init_workflow(status, _serialization.serialize_args(inputs))
144
+ self._sys_db.init_workflow(
145
+ status, _serialization.serialize_args(inputs), max_recovery_attempts=None
146
+ )
138
147
  return workflow_id
139
148
 
140
149
  def enqueue(
@@ -188,9 +197,13 @@ class DBOSClient:
188
197
  "recovery_attempts": None,
189
198
  "app_id": None,
190
199
  "app_version": None,
200
+ "workflow_timeout_ms": None,
201
+ "workflow_deadline_epoch_ms": None,
191
202
  }
192
203
  with self._sys_db.engine.begin() as conn:
193
- self._sys_db.insert_workflow_status(status, conn)
204
+ self._sys_db.insert_workflow_status(
205
+ status, conn, max_recovery_attempts=None
206
+ )
194
207
  self._sys_db.send(status["workflow_uuid"], 0, destination_id, message, topic)
195
208
 
196
209
  async def send_async(
@@ -3,8 +3,7 @@ from dataclasses import asdict, dataclass
3
3
  from enum import Enum
4
4
  from typing import List, Optional, Type, TypedDict, TypeVar
5
5
 
6
- from dbos._sys_db import StepInfo
7
- from dbos._workflow_commands import WorkflowStatus
6
+ from dbos._sys_db import StepInfo, WorkflowStatus
8
7
 
9
8
 
10
9
  class MessageType(str, Enum):
dbos/_context.py CHANGED
@@ -93,6 +93,11 @@ class DBOSContext:
93
93
  self.assumed_role: Optional[str] = None
94
94
  self.step_status: Optional[StepStatus] = None
95
95
 
96
+ # A user-specified workflow timeout. Takes priority over a propagated deadline.
97
+ self.workflow_timeout_ms: Optional[int] = None
98
+ # A propagated workflow deadline.
99
+ self.workflow_deadline_epoch_ms: Optional[int] = None
100
+
96
101
  def create_child(self) -> DBOSContext:
97
102
  rv = DBOSContext()
98
103
  rv.logger = self.logger
@@ -360,11 +365,60 @@ class SetWorkflowID:
360
365
  return False # Did not handle
361
366
 
362
367
 
368
+ class SetWorkflowTimeout:
369
+ """
370
+ Set the workflow timeout (in seconds) to be used for the enclosed workflow invocations.
371
+
372
+ Typical Usage
373
+ ```
374
+ with SetWorkflowTimeout(<timeout in seconds>):
375
+ result = workflow_function(...)
376
+ ```
377
+ """
378
+
379
+ def __init__(self, workflow_timeout_sec: Optional[float]) -> None:
380
+ if workflow_timeout_sec and not workflow_timeout_sec > 0:
381
+ raise Exception(
382
+ f"Invalid workflow timeout {workflow_timeout_sec}. Timeouts must be positive."
383
+ )
384
+ self.created_ctx = False
385
+ self.workflow_timeout_ms = (
386
+ int(workflow_timeout_sec * 1000)
387
+ if workflow_timeout_sec is not None
388
+ else None
389
+ )
390
+ self.saved_workflow_timeout: Optional[int] = None
391
+
392
+ def __enter__(self) -> SetWorkflowTimeout:
393
+ # Code to create a basic context
394
+ ctx = get_local_dbos_context()
395
+ if ctx is None:
396
+ self.created_ctx = True
397
+ _set_local_dbos_context(DBOSContext())
398
+ ctx = assert_current_dbos_context()
399
+ self.saved_workflow_timeout = ctx.workflow_timeout_ms
400
+ ctx.workflow_timeout_ms = self.workflow_timeout_ms
401
+ return self
402
+
403
+ def __exit__(
404
+ self,
405
+ exc_type: Optional[Type[BaseException]],
406
+ exc_value: Optional[BaseException],
407
+ traceback: Optional[TracebackType],
408
+ ) -> Literal[False]:
409
+ assert_current_dbos_context().workflow_timeout_ms = self.saved_workflow_timeout
410
+ # Code to clean up the basic context if we created it
411
+ if self.created_ctx:
412
+ _clear_local_dbos_context()
413
+ return False # Did not handle
414
+
415
+
363
416
  class EnterDBOSWorkflow(AbstractContextManager[DBOSContext, Literal[False]]):
364
417
  def __init__(self, attributes: TracedAttributes) -> None:
365
418
  self.created_ctx = False
366
419
  self.attributes = attributes
367
420
  self.is_temp_workflow = attributes["name"] == "temp_wf"
421
+ self.saved_workflow_timeout: Optional[int] = None
368
422
 
369
423
  def __enter__(self) -> DBOSContext:
370
424
  # Code to create a basic context
@@ -374,6 +428,10 @@ class EnterDBOSWorkflow(AbstractContextManager[DBOSContext, Literal[False]]):
374
428
  ctx = DBOSContext()
375
429
  _set_local_dbos_context(ctx)
376
430
  assert not ctx.is_within_workflow()
431
+ # Unset the workflow_timeout_ms context var so it is not applied to this
432
+ # workflow's children (instead we propagate the deadline)
433
+ self.saved_workflow_timeout = ctx.workflow_timeout_ms
434
+ ctx.workflow_timeout_ms = None
377
435
  ctx.start_workflow(
378
436
  None, self.attributes, self.is_temp_workflow
379
437
  ) # Will get from the context's next workflow ID
@@ -388,6 +446,10 @@ class EnterDBOSWorkflow(AbstractContextManager[DBOSContext, Literal[False]]):
388
446
  ctx = assert_current_dbos_context()
389
447
  assert ctx.is_within_workflow()
390
448
  ctx.end_workflow(exc_value, self.is_temp_workflow)
449
+ # Restore the saved workflow timeout
450
+ ctx.workflow_timeout_ms = self.saved_workflow_timeout
451
+ # Clear any propagating timeout
452
+ ctx.workflow_deadline_epoch_ms = None
391
453
  # Code to clean up the basic context if we created it
392
454
  if self.created_ctx:
393
455
  _clear_local_dbos_context()
dbos/_core.py CHANGED
@@ -3,6 +3,7 @@ import functools
3
3
  import inspect
4
4
  import json
5
5
  import sys
6
+ import threading
6
7
  import time
7
8
  import traceback
8
9
  from concurrent.futures import Future
@@ -14,11 +15,9 @@ from typing import (
14
15
  Coroutine,
15
16
  Generic,
16
17
  Optional,
17
- Tuple,
18
18
  TypeVar,
19
19
  Union,
20
20
  cast,
21
- overload,
22
21
  )
23
22
 
24
23
  from dbos._outcome import Immediate, NoResult, Outcome, Pending
@@ -59,7 +58,6 @@ from ._error import (
59
58
  )
60
59
  from ._registrations import (
61
60
  DEFAULT_MAX_RECOVERY_ATTEMPTS,
62
- DBOSFuncInfo,
63
61
  get_config_name,
64
62
  get_dbos_class_name,
65
63
  get_dbos_func_name,
@@ -75,6 +73,7 @@ from ._serialization import WorkflowInputs
75
73
  from ._sys_db import (
76
74
  GetEventWorkflowContext,
77
75
  OperationResultInternal,
76
+ WorkflowStatus,
78
77
  WorkflowStatusInternal,
79
78
  WorkflowStatusString,
80
79
  )
@@ -87,7 +86,6 @@ if TYPE_CHECKING:
87
86
  DBOSRegistry,
88
87
  IsolationLevel,
89
88
  )
90
- from ._workflow_commands import WorkflowStatus
91
89
 
92
90
  from sqlalchemy.exc import DBAPIError, InvalidRequestError
93
91
 
@@ -119,7 +117,7 @@ class WorkflowHandleFuture(Generic[R]):
119
117
  self.dbos._sys_db.record_get_result(self.workflow_id, serialized_r, None)
120
118
  return r
121
119
 
122
- def get_status(self) -> "WorkflowStatus":
120
+ def get_status(self) -> WorkflowStatus:
123
121
  stat = self.dbos.get_workflow_status(self.workflow_id)
124
122
  if stat is None:
125
123
  raise DBOSNonExistentWorkflowError(self.workflow_id)
@@ -146,7 +144,7 @@ class WorkflowHandlePolling(Generic[R]):
146
144
  self.dbos._sys_db.record_get_result(self.workflow_id, serialized_r, None)
147
145
  return r
148
146
 
149
- def get_status(self) -> "WorkflowStatus":
147
+ def get_status(self) -> WorkflowStatus:
150
148
  stat = self.dbos.get_workflow_status(self.workflow_id)
151
149
  if stat is None:
152
150
  raise DBOSNonExistentWorkflowError(self.workflow_id)
@@ -181,7 +179,7 @@ class WorkflowHandleAsyncTask(Generic[R]):
181
179
  )
182
180
  return r
183
181
 
184
- async def get_status(self) -> "WorkflowStatus":
182
+ async def get_status(self) -> WorkflowStatus:
185
183
  stat = await asyncio.to_thread(self.dbos.get_workflow_status, self.workflow_id)
186
184
  if stat is None:
187
185
  raise DBOSNonExistentWorkflowError(self.workflow_id)
@@ -217,7 +215,7 @@ class WorkflowHandleAsyncPolling(Generic[R]):
217
215
  )
218
216
  return r
219
217
 
220
- async def get_status(self) -> "WorkflowStatus":
218
+ async def get_status(self) -> WorkflowStatus:
221
219
  stat = await asyncio.to_thread(self.dbos.get_workflow_status, self.workflow_id)
222
220
  if stat is None:
223
221
  raise DBOSNonExistentWorkflowError(self.workflow_id)
@@ -227,19 +225,30 @@ class WorkflowHandleAsyncPolling(Generic[R]):
227
225
  def _init_workflow(
228
226
  dbos: "DBOS",
229
227
  ctx: DBOSContext,
228
+ *,
230
229
  inputs: WorkflowInputs,
231
230
  wf_name: str,
232
231
  class_name: Optional[str],
233
232
  config_name: Optional[str],
234
- temp_wf_type: Optional[str],
235
- queue: Optional[str] = None,
236
- max_recovery_attempts: int = DEFAULT_MAX_RECOVERY_ATTEMPTS,
233
+ queue: Optional[str],
234
+ workflow_timeout_ms: Optional[int],
235
+ workflow_deadline_epoch_ms: Optional[int],
236
+ max_recovery_attempts: Optional[int],
237
237
  ) -> WorkflowStatusInternal:
238
238
  wfid = (
239
239
  ctx.workflow_id
240
240
  if len(ctx.workflow_id) > 0
241
241
  else ctx.id_assigned_for_next_workflow
242
242
  )
243
+
244
+ # In debug mode, just return the existing status
245
+ if dbos.debug_mode:
246
+ get_status_result = dbos._sys_db.get_workflow_status(wfid)
247
+ if get_status_result is None:
248
+ raise DBOSNonExistentWorkflowError(wfid)
249
+ return get_status_result
250
+
251
+ # Initialize a workflow status object from the context
243
252
  status: WorkflowStatusInternal = {
244
253
  "workflow_uuid": wfid,
245
254
  "status": (
@@ -267,25 +276,47 @@ def _init_workflow(
267
276
  "queue_name": queue,
268
277
  "created_at": None,
269
278
  "updated_at": None,
279
+ "workflow_timeout_ms": workflow_timeout_ms,
280
+ "workflow_deadline_epoch_ms": workflow_deadline_epoch_ms,
270
281
  }
271
282
 
272
283
  # If we have a class name, the first arg is the instance and do not serialize
273
284
  if class_name is not None:
274
285
  inputs = {"args": inputs["args"][1:], "kwargs": inputs["kwargs"]}
275
286
 
276
- wf_status = status["status"]
277
- if dbos.debug_mode:
278
- get_status_result = dbos._sys_db.get_workflow_status(wfid)
279
- if get_status_result is None:
280
- raise DBOSNonExistentWorkflowError(wfid)
281
- wf_status = get_status_result["status"]
282
- else:
283
- wf_status = dbos._sys_db.init_workflow(
284
- status,
285
- _serialization.serialize_args(inputs),
286
- max_recovery_attempts=max_recovery_attempts,
287
- )
287
+ # Synchronously record the status and inputs for workflows
288
+ wf_status, workflow_deadline_epoch_ms = dbos._sys_db.init_workflow(
289
+ status,
290
+ _serialization.serialize_args(inputs),
291
+ max_recovery_attempts=max_recovery_attempts,
292
+ )
288
293
 
294
+ if workflow_deadline_epoch_ms is not None:
295
+ evt = threading.Event()
296
+ dbos.stop_events.append(evt)
297
+
298
+ def timeout_func() -> None:
299
+ try:
300
+ assert workflow_deadline_epoch_ms is not None
301
+ time_to_wait_sec = (
302
+ workflow_deadline_epoch_ms - (time.time() * 1000)
303
+ ) / 1000
304
+ if time_to_wait_sec > 0:
305
+ was_stopped = evt.wait(time_to_wait_sec)
306
+ if was_stopped:
307
+ return
308
+ dbos._sys_db.cancel_workflow(wfid)
309
+ except Exception as e:
310
+ dbos.logger.warning(
311
+ f"Exception in timeout thread for workflow {wfid}: {e}"
312
+ )
313
+
314
+ timeout_thread = threading.Thread(target=timeout_func, daemon=True)
315
+ timeout_thread.start()
316
+ dbos._background_threads.append(timeout_thread)
317
+
318
+ ctx.workflow_deadline_epoch_ms = workflow_deadline_epoch_ms
319
+ status["workflow_deadline_epoch_ms"] = workflow_deadline_epoch_ms
289
320
  status["status"] = wf_status
290
321
  return status
291
322
 
@@ -501,6 +532,13 @@ def start_workflow(
501
532
  "kwargs": kwargs,
502
533
  }
503
534
 
535
+ local_ctx = get_local_dbos_context()
536
+ workflow_timeout_ms, workflow_deadline_epoch_ms = _get_timeout_deadline(
537
+ local_ctx, queue_name
538
+ )
539
+ workflow_timeout_ms = (
540
+ local_ctx.workflow_timeout_ms if local_ctx is not None else None
541
+ )
504
542
  new_wf_id, new_wf_ctx = _get_new_wf()
505
543
 
506
544
  ctx = new_wf_ctx
@@ -519,8 +557,9 @@ def start_workflow(
519
557
  wf_name=get_dbos_func_name(func),
520
558
  class_name=get_dbos_class_name(fi, func, args),
521
559
  config_name=get_config_name(fi, func, args),
522
- temp_wf_type=get_temp_workflow_type(func),
523
560
  queue=queue_name,
561
+ workflow_timeout_ms=workflow_timeout_ms,
562
+ workflow_deadline_epoch_ms=workflow_deadline_epoch_ms,
524
563
  max_recovery_attempts=fi.max_recovery_attempts,
525
564
  )
526
565
 
@@ -583,6 +622,10 @@ async def start_workflow_async(
583
622
  "kwargs": kwargs,
584
623
  }
585
624
 
625
+ local_ctx = get_local_dbos_context()
626
+ workflow_timeout_ms, workflow_deadline_epoch_ms = _get_timeout_deadline(
627
+ local_ctx, queue_name
628
+ )
586
629
  new_wf_id, new_wf_ctx = _get_new_wf()
587
630
 
588
631
  ctx = new_wf_ctx
@@ -604,8 +647,9 @@ async def start_workflow_async(
604
647
  wf_name=get_dbos_func_name(func),
605
648
  class_name=get_dbos_class_name(fi, func, args),
606
649
  config_name=get_config_name(fi, func, args),
607
- temp_wf_type=get_temp_workflow_type(func),
608
650
  queue=queue_name,
651
+ workflow_timeout_ms=workflow_timeout_ms,
652
+ workflow_deadline_epoch_ms=workflow_deadline_epoch_ms,
609
653
  max_recovery_attempts=fi.max_recovery_attempts,
610
654
  )
611
655
 
@@ -653,7 +697,7 @@ else:
653
697
  def workflow_wrapper(
654
698
  dbosreg: "DBOSRegistry",
655
699
  func: Callable[P, R],
656
- max_recovery_attempts: int = DEFAULT_MAX_RECOVERY_ATTEMPTS,
700
+ max_recovery_attempts: Optional[int] = DEFAULT_MAX_RECOVERY_ATTEMPTS,
657
701
  ) -> Callable[P, R]:
658
702
  func.__orig_func = func # type: ignore
659
703
 
@@ -680,6 +724,9 @@ def workflow_wrapper(
680
724
  "kwargs": kwargs,
681
725
  }
682
726
  ctx = get_local_dbos_context()
727
+ workflow_timeout_ms, workflow_deadline_epoch_ms = _get_timeout_deadline(
728
+ ctx, queue=None
729
+ )
683
730
  enterWorkflowCtxMgr = (
684
731
  EnterDBOSChildWorkflow if ctx and ctx.is_workflow() else EnterDBOSWorkflow
685
732
  )
@@ -717,7 +764,9 @@ def workflow_wrapper(
717
764
  wf_name=get_dbos_func_name(func),
718
765
  class_name=get_dbos_class_name(fi, func, args),
719
766
  config_name=get_config_name(fi, func, args),
720
- temp_wf_type=get_temp_workflow_type(func),
767
+ queue=None,
768
+ workflow_timeout_ms=workflow_timeout_ms,
769
+ workflow_deadline_epoch_ms=workflow_deadline_epoch_ms,
721
770
  max_recovery_attempts=max_recovery_attempts,
722
771
  )
723
772
 
@@ -765,7 +814,7 @@ def workflow_wrapper(
765
814
 
766
815
 
767
816
  def decorate_workflow(
768
- reg: "DBOSRegistry", max_recovery_attempts: int
817
+ reg: "DBOSRegistry", max_recovery_attempts: Optional[int]
769
818
  ) -> Callable[[Callable[P, R]], Callable[P, R]]:
770
819
  def _workflow_decorator(func: Callable[P, R]) -> Callable[P, R]:
771
820
  wrapped_func = workflow_wrapper(reg, func, max_recovery_attempts)
@@ -1211,3 +1260,24 @@ def get_event(
1211
1260
  else:
1212
1261
  # Directly call it outside of a workflow
1213
1262
  return dbos._sys_db.get_event(workflow_id, key, timeout_seconds)
1263
+
1264
+
1265
+ def _get_timeout_deadline(
1266
+ ctx: Optional[DBOSContext], queue: Optional[str]
1267
+ ) -> tuple[Optional[int], Optional[int]]:
1268
+ if ctx is None:
1269
+ return None, None
1270
+ # If a timeout is explicitly specified, use it over any propagated deadline
1271
+ if ctx.workflow_timeout_ms:
1272
+ if queue:
1273
+ # Queued workflows are assigned a deadline on dequeue
1274
+ return ctx.workflow_timeout_ms, None
1275
+ else:
1276
+ # Otherwise, compute the deadline immediately
1277
+ return (
1278
+ ctx.workflow_timeout_ms,
1279
+ int(time.time() * 1000) + ctx.workflow_timeout_ms,
1280
+ )
1281
+ # Otherwise, return the propagated deadline, if any
1282
+ else:
1283
+ return None, ctx.workflow_deadline_epoch_ms
dbos/_dbos.py CHANGED
@@ -31,9 +31,9 @@ from typing import (
31
31
  from opentelemetry.trace import Span
32
32
 
33
33
  from dbos._conductor.conductor import ConductorWebsocket
34
+ from dbos._sys_db import WorkflowStatus
34
35
  from dbos._utils import INTERNAL_QUEUE_NAME, GlobalParams
35
36
  from dbos._workflow_commands import (
36
- WorkflowStatus,
37
37
  fork_workflow,
38
38
  list_queued_workflows,
39
39
  list_workflows,
@@ -68,7 +68,7 @@ from ._registrations import (
68
68
  )
69
69
  from ._roles import default_required_roles, required_roles
70
70
  from ._scheduler import ScheduledWorkflow, scheduled
71
- from ._sys_db import StepInfo, reset_system_database
71
+ from ._sys_db import StepInfo, WorkflowStatus, reset_system_database
72
72
  from ._tracer import dbos_tracer
73
73
 
74
74
  if TYPE_CHECKING:
@@ -114,7 +114,7 @@ from ._error import (
114
114
  from ._event_loop import BackgroundEventLoop
115
115
  from ._logger import add_otlp_to_all_loggers, config_logger, dbos_logger, init_logger
116
116
  from ._sys_db import SystemDatabase
117
- from ._workflow_commands import WorkflowStatus, get_workflow, list_workflow_steps
117
+ from ._workflow_commands import get_workflow, list_workflow_steps
118
118
 
119
119
  # Most DBOS functions are just any callable F, so decorators / wrappers work on F
120
120
  # There are cases where the parameters P and return value R should be separate
@@ -600,7 +600,7 @@ class DBOS:
600
600
  # Decorators for DBOS functionality
601
601
  @classmethod
602
602
  def workflow(
603
- cls, *, max_recovery_attempts: int = DEFAULT_MAX_RECOVERY_ATTEMPTS
603
+ cls, *, max_recovery_attempts: Optional[int] = DEFAULT_MAX_RECOVERY_ATTEMPTS
604
604
  ) -> Callable[[Callable[P, R]], Callable[P, R]]:
605
605
  """Decorate a function for use as a DBOS workflow."""
606
606
  return decorate_workflow(_get_or_create_dbos_registry(), max_recovery_attempts)
@@ -0,0 +1,44 @@
1
+ """workflow_timeout
2
+
3
+ Revision ID: 83f3732ae8e7
4
+ Revises: f4b9b32ba814
5
+ Create Date: 2025-04-16 17:05:36.642395
6
+
7
+ """
8
+
9
+ from typing import Sequence, Union
10
+
11
+ import sqlalchemy as sa
12
+ from alembic import op
13
+
14
+ # revision identifiers, used by Alembic.
15
+ revision: str = "83f3732ae8e7"
16
+ down_revision: Union[str, None] = "f4b9b32ba814"
17
+ branch_labels: Union[str, Sequence[str], None] = None
18
+ depends_on: Union[str, Sequence[str], None] = None
19
+
20
+
21
+ def upgrade() -> None:
22
+ op.add_column(
23
+ "workflow_status",
24
+ sa.Column(
25
+ "workflow_timeout_ms",
26
+ sa.BigInteger(),
27
+ nullable=True,
28
+ ),
29
+ schema="dbos",
30
+ )
31
+ op.add_column(
32
+ "workflow_status",
33
+ sa.Column(
34
+ "workflow_deadline_epoch_ms",
35
+ sa.BigInteger(),
36
+ nullable=True,
37
+ ),
38
+ schema="dbos",
39
+ )
40
+
41
+
42
+ def downgrade() -> None:
43
+ op.drop_column("workflow_status", "workflow_deadline_epoch_ms", schema="dbos")
44
+ op.drop_column("workflow_status", "workflow_timeout_ms", schema="dbos")
dbos/_registrations.py CHANGED
@@ -51,7 +51,7 @@ class DBOSFuncInfo:
51
51
  class_info: Optional[DBOSClassInfo] = None
52
52
  func_type: DBOSFuncType = DBOSFuncType.Unknown
53
53
  required_roles: Optional[List[str]] = None
54
- max_recovery_attempts: int = DEFAULT_MAX_RECOVERY_ATTEMPTS
54
+ max_recovery_attempts: Optional[int] = DEFAULT_MAX_RECOVERY_ATTEMPTS
55
55
 
56
56
 
57
57
  def get_or_create_class_info(
@@ -54,7 +54,9 @@ class SystemSchema:
54
54
  nullable=True,
55
55
  server_default=text("'0'::bigint"),
56
56
  ),
57
- Column("queue_name", Text),
57
+ Column("queue_name", Text, nullable=True),
58
+ Column("workflow_timeout_ms", BigInteger, nullable=True),
59
+ Column("workflow_deadline_epoch_ms", BigInteger, nullable=True),
58
60
  Index("workflow_status_created_at_index", "created_at"),
59
61
  Index("workflow_status_executor_id_index", "executor_id"),
60
62
  )