dbos 2.4.0a3__py3-none-any.whl → 2.6.0a8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of dbos might be problematic. Click here for more details.

dbos/_core.py CHANGED
@@ -91,6 +91,7 @@ F = TypeVar("F", bound=Callable[..., Any])
91
91
 
92
92
  TEMP_SEND_WF_NAME = "<temp>.temp_send_workflow"
93
93
  DEBOUNCER_WORKFLOW_NAME = "_dbos_debouncer_workflow"
94
+ DEFAULT_POLLING_INTERVAL = 1.0
94
95
 
95
96
 
96
97
  class WorkflowHandleFuture(Generic[R]):
@@ -103,7 +104,9 @@ class WorkflowHandleFuture(Generic[R]):
103
104
  def get_workflow_id(self) -> str:
104
105
  return self.workflow_id
105
106
 
106
- def get_result(self) -> R:
107
+ def get_result(
108
+ self, *, polling_interval_sec: float = DEFAULT_POLLING_INTERVAL
109
+ ) -> R:
107
110
  try:
108
111
  r = self.future.result()
109
112
  except Exception as e:
@@ -130,9 +133,13 @@ class WorkflowHandlePolling(Generic[R]):
130
133
  def get_workflow_id(self) -> str:
131
134
  return self.workflow_id
132
135
 
133
- def get_result(self) -> R:
136
+ def get_result(
137
+ self, *, polling_interval_sec: float = DEFAULT_POLLING_INTERVAL
138
+ ) -> R:
134
139
  try:
135
- r: R = self.dbos._sys_db.await_workflow_result(self.workflow_id)
140
+ r: R = self.dbos._sys_db.await_workflow_result(
141
+ self.workflow_id, polling_interval_sec
142
+ )
136
143
  except Exception as e:
137
144
  serialized_e = self.dbos._serializer.serialize(e)
138
145
  self.dbos._sys_db.record_get_result(self.workflow_id, None, serialized_e)
@@ -158,7 +165,9 @@ class WorkflowHandleAsyncTask(Generic[R]):
158
165
  def get_workflow_id(self) -> str:
159
166
  return self.workflow_id
160
167
 
161
- async def get_result(self) -> R:
168
+ async def get_result(
169
+ self, *, polling_interval_sec: float = DEFAULT_POLLING_INTERVAL
170
+ ) -> R:
162
171
  try:
163
172
  r = await self.task
164
173
  except Exception as e:
@@ -192,10 +201,14 @@ class WorkflowHandleAsyncPolling(Generic[R]):
192
201
  def get_workflow_id(self) -> str:
193
202
  return self.workflow_id
194
203
 
195
- async def get_result(self) -> R:
204
+ async def get_result(
205
+ self, *, polling_interval_sec: float = DEFAULT_POLLING_INTERVAL
206
+ ) -> R:
196
207
  try:
197
208
  r: R = await asyncio.to_thread(
198
- self.dbos._sys_db.await_workflow_result, self.workflow_id
209
+ self.dbos._sys_db.await_workflow_result,
210
+ self.workflow_id,
211
+ polling_interval_sec,
199
212
  )
200
213
  except Exception as e:
201
214
  serialized_e = self.dbos._serializer.serialize(e)
@@ -366,7 +379,7 @@ def _get_wf_invoke_func(
366
379
  )
367
380
  # Directly return the result if the workflow is already completed
368
381
  recorded_result: R = dbos._sys_db.await_workflow_result(
369
- status["workflow_uuid"]
382
+ status["workflow_uuid"], polling_interval=DEFAULT_POLLING_INTERVAL
370
383
  )
371
384
  return recorded_result
372
385
  try:
@@ -381,7 +394,9 @@ def _get_wf_invoke_func(
381
394
  return output
382
395
  except DBOSWorkflowConflictIDError:
383
396
  # Await the workflow result
384
- r: R = dbos._sys_db.await_workflow_result(status["workflow_uuid"])
397
+ r: R = dbos._sys_db.await_workflow_result(
398
+ status["workflow_uuid"], polling_interval=DEFAULT_POLLING_INTERVAL
399
+ )
385
400
  return r
386
401
  except DBOSWorkflowCancelledError as error:
387
402
  raise DBOSAwaitedWorkflowCancelledError(status["workflow_uuid"])
@@ -579,11 +594,14 @@ def start_workflow(
579
594
  ctx = new_wf_ctx
580
595
  new_child_workflow_id = ctx.id_assigned_for_next_workflow
581
596
  if ctx.has_parent():
582
- child_workflow_id = dbos._sys_db.check_child_workflow(
583
- ctx.parent_workflow_id, ctx.parent_workflow_fid
597
+ recorded_result = dbos._sys_db.check_operation_execution(
598
+ ctx.parent_workflow_id, ctx.parent_workflow_fid, get_dbos_func_name(func)
584
599
  )
585
- if child_workflow_id is not None:
586
- return WorkflowHandlePolling(child_workflow_id, dbos)
600
+ if recorded_result and recorded_result["error"]:
601
+ e: Exception = dbos._sys_db.serializer.deserialize(recorded_result["error"])
602
+ raise e
603
+ elif recorded_result and recorded_result["child_workflow_id"]:
604
+ return WorkflowHandlePolling(recorded_result["child_workflow_id"], dbos)
587
605
 
588
606
  status = _init_workflow(
589
607
  dbos,
@@ -675,13 +693,19 @@ async def start_workflow_async(
675
693
  ctx = new_wf_ctx
676
694
  new_child_workflow_id = ctx.id_assigned_for_next_workflow
677
695
  if ctx.has_parent():
678
- child_workflow_id = await asyncio.to_thread(
679
- dbos._sys_db.check_child_workflow,
696
+ recorded_result = await asyncio.to_thread(
697
+ dbos._sys_db.check_operation_execution,
680
698
  ctx.parent_workflow_id,
681
699
  ctx.parent_workflow_fid,
700
+ get_dbos_func_name(func),
682
701
  )
683
- if child_workflow_id is not None:
684
- return WorkflowHandleAsyncPolling(child_workflow_id, dbos)
702
+ if recorded_result and recorded_result["error"]:
703
+ e: Exception = dbos._sys_db.serializer.deserialize(recorded_result["error"])
704
+ raise e
705
+ elif recorded_result and recorded_result["child_workflow_id"]:
706
+ return WorkflowHandleAsyncPolling(
707
+ recorded_result["child_workflow_id"], dbos
708
+ )
685
709
 
686
710
  status = await asyncio.to_thread(
687
711
  _init_workflow,
@@ -788,7 +812,9 @@ def workflow_wrapper(
788
812
  c_wfid: str, dbos: "DBOS"
789
813
  ) -> Callable[[Callable[[], R]], R]:
790
814
  def recorded_result_inner(func: Callable[[], R]) -> R:
791
- r: R = dbos._sys_db.await_workflow_result(c_wfid)
815
+ r: R = dbos._sys_db.await_workflow_result(
816
+ c_wfid, polling_interval=DEFAULT_POLLING_INTERVAL
817
+ )
792
818
  return r
793
819
 
794
820
  return recorded_result_inner
@@ -798,11 +824,16 @@ def workflow_wrapper(
798
824
  workflow_id = ctx.workflow_id
799
825
 
800
826
  if ctx.has_parent():
801
- child_workflow_id = dbos._sys_db.check_child_workflow(
802
- ctx.parent_workflow_id, ctx.parent_workflow_fid
827
+ r = dbos._sys_db.check_operation_execution(
828
+ ctx.parent_workflow_id,
829
+ ctx.parent_workflow_fid,
830
+ get_dbos_func_name(func),
803
831
  )
804
- if child_workflow_id is not None:
805
- return recorded_result(child_workflow_id, dbos)
832
+ if r and r["error"]:
833
+ e: Exception = dbos._sys_db.serializer.deserialize(r["error"])
834
+ raise e
835
+ elif r and r["child_workflow_id"]:
836
+ return recorded_result(r["child_workflow_id"], dbos)
806
837
 
807
838
  status = _init_workflow(
808
839
  dbos,
@@ -889,12 +920,6 @@ def decorate_transaction(
889
920
  )
890
921
 
891
922
  dbos = dbosreg.dbos
892
- ctx = assert_current_dbos_context()
893
- status = dbos._sys_db.get_workflow_status(ctx.workflow_id)
894
- if status and status["status"] == WorkflowStatusString.CANCELLED.value:
895
- raise DBOSWorkflowCancelledError(
896
- f"Workflow {ctx.workflow_id} is cancelled. Aborting transaction {transaction_name}."
897
- )
898
923
  assert (
899
924
  dbos._app_db
900
925
  ), "Transactions can only be used if DBOS is configured with an application_database_url"
@@ -905,6 +930,26 @@ def decorate_transaction(
905
930
  }
906
931
  with EnterDBOSTransaction(session, attributes=attributes):
907
932
  ctx = assert_current_dbos_context()
933
+ # Check if the step record for this transaction exists
934
+ recorded_step_output = dbos._sys_db.check_operation_execution(
935
+ ctx.workflow_id, ctx.function_id, transaction_name
936
+ )
937
+ if recorded_step_output:
938
+ dbos.logger.debug(
939
+ f"Replaying transaction, id: {ctx.function_id}, name: {attributes['name']}"
940
+ )
941
+ if recorded_step_output["error"]:
942
+ step_error: Exception = dbos._serializer.deserialize(
943
+ recorded_step_output["error"]
944
+ )
945
+ raise step_error
946
+ elif recorded_step_output["output"]:
947
+ return dbos._serializer.deserialize(
948
+ recorded_step_output["output"]
949
+ )
950
+ else:
951
+ raise Exception("Output and error are both None")
952
+
908
953
  txn_output: TransactionResultInternal = {
909
954
  "workflow_uuid": ctx.workflow_id,
910
955
  "function_id": ctx.function_id,
@@ -915,6 +960,14 @@ def decorate_transaction(
915
960
  "txn_id": None,
916
961
  "function_name": transaction_name,
917
962
  }
963
+ step_output: OperationResultInternal = {
964
+ "workflow_uuid": ctx.workflow_id,
965
+ "function_id": ctx.function_id,
966
+ "function_name": transaction_name,
967
+ "output": None,
968
+ "error": None,
969
+ "started_at_epoch_ms": int(time.time() * 1000),
970
+ }
918
971
  retry_wait_seconds = 0.001
919
972
  backoff_factor = 1.5
920
973
  max_retry_wait_seconds = 2.0
@@ -953,8 +1006,18 @@ def decorate_transaction(
953
1006
  )
954
1007
  )
955
1008
  has_recorded_error = True
1009
+ step_output["error"] = recorded_output["error"]
1010
+ dbos._sys_db.record_operation_result(
1011
+ step_output
1012
+ )
956
1013
  raise deserialized_error
957
1014
  elif recorded_output["output"]:
1015
+ step_output["output"] = recorded_output[
1016
+ "output"
1017
+ ]
1018
+ dbos._sys_db.record_operation_result(
1019
+ step_output
1020
+ )
958
1021
  return dbos._serializer.deserialize(
959
1022
  recorded_output["output"]
960
1023
  )
@@ -1011,10 +1074,13 @@ def decorate_transaction(
1011
1074
  finally:
1012
1075
  # Don't record the error if it was already recorded
1013
1076
  if txn_error and not has_recorded_error:
1014
- txn_output["error"] = dbos._serializer.serialize(
1015
- txn_error
1077
+ step_output["error"] = txn_output["error"] = (
1078
+ dbos._serializer.serialize(txn_error)
1016
1079
  )
1017
1080
  dbos._app_db.record_transaction_error(txn_output)
1081
+ dbos._sys_db.record_operation_result(step_output)
1082
+ step_output["output"] = dbos._serializer.serialize(output)
1083
+ dbos._sys_db.record_operation_result(step_output)
1018
1084
  return output
1019
1085
 
1020
1086
  if inspect.iscoroutinefunction(func):
@@ -1283,7 +1349,9 @@ def set_event(dbos: "DBOS", key: str, value: Any) -> None:
1283
1349
  ctx.workflow_id, ctx.curr_step_function_id, key, value
1284
1350
  )
1285
1351
  elif cur_ctx.is_step():
1286
- dbos._sys_db.set_event_from_step(cur_ctx.workflow_id, key, value)
1352
+ dbos._sys_db.set_event_from_step(
1353
+ cur_ctx.workflow_id, cur_ctx.curr_step_function_id, key, value
1354
+ )
1287
1355
  else:
1288
1356
  raise DBOSException(
1289
1357
  "set_event() must be called from within a workflow or step"
dbos/_dbos.py CHANGED
@@ -7,7 +7,6 @@ import os
7
7
  import sys
8
8
  import threading
9
9
  import time
10
- import uuid
11
10
  from concurrent.futures import ThreadPoolExecutor
12
11
  from logging import Logger
13
12
  from typing import (
@@ -33,12 +32,13 @@ from dbos._conductor.conductor import ConductorWebsocket
33
32
  from dbos._debouncer import debouncer_workflow
34
33
  from dbos._serialization import DefaultSerializer, Serializer
35
34
  from dbos._sys_db import SystemDatabase, WorkflowStatus
36
- from dbos._utils import INTERNAL_QUEUE_NAME, GlobalParams
35
+ from dbos._utils import INTERNAL_QUEUE_NAME, GlobalParams, generate_uuid
37
36
  from dbos._workflow_commands import fork_workflow, list_queued_workflows, list_workflows
38
37
 
39
38
  from ._classproperty import classproperty
40
39
  from ._core import (
41
40
  DEBOUNCER_WORKFLOW_NAME,
41
+ DEFAULT_POLLING_INTERVAL,
42
42
  TEMP_SEND_WF_NAME,
43
43
  WorkflowHandleAsyncPolling,
44
44
  WorkflowHandlePolling,
@@ -112,7 +112,7 @@ from ._logger import (
112
112
  dbos_logger,
113
113
  init_logger,
114
114
  )
115
- from ._workflow_commands import get_workflow, list_workflow_steps
115
+ from ._workflow_commands import get_workflow
116
116
 
117
117
  # Most DBOS functions are just any callable F, so decorators / wrappers work on F
118
118
  # There are cases where the parameters P and return value R should be separate
@@ -336,6 +336,8 @@ class DBOS:
336
336
  self._executor_field: Optional[ThreadPoolExecutor] = None
337
337
  self._background_threads: List[threading.Thread] = []
338
338
  self.conductor_url: Optional[str] = conductor_url
339
+ if config.get("conductor_url"):
340
+ self.conductor_url = config.get("conductor_url")
339
341
  self.conductor_key: Optional[str] = conductor_key
340
342
  if config.get("conductor_key"):
341
343
  self.conductor_key = config.get("conductor_key")
@@ -444,7 +446,7 @@ class DBOS:
444
446
  if GlobalParams.app_version == "":
445
447
  GlobalParams.app_version = self._registry.compute_app_version()
446
448
  if self.conductor_key is not None:
447
- GlobalParams.executor_id = str(uuid.uuid4())
449
+ GlobalParams.executor_id = generate_uuid()
448
450
  dbos_logger.info(f"Executor ID: {GlobalParams.executor_id}")
449
451
  dbos_logger.info(f"Application version: {GlobalParams.app_version}")
450
452
  self._executor_field = ThreadPoolExecutor(max_workers=sys.maxsize)
@@ -460,6 +462,7 @@ class DBOS:
460
462
  debug_mode=debug_mode,
461
463
  schema=schema,
462
464
  serializer=self._serializer,
465
+ executor_id=GlobalParams.executor_id,
463
466
  )
464
467
  assert self._config["database"]["db_engine_kwargs"] is not None
465
468
  if self._config["database_url"]:
@@ -495,20 +498,21 @@ class DBOS:
495
498
  except Exception as e:
496
499
  dbos_logger.warning(f"Failed to start admin server: {e}")
497
500
 
498
- dbos_logger.debug("Retrieving local pending workflows for recovery")
499
- workflow_ids = self._sys_db.get_pending_workflows(
500
- GlobalParams.executor_id, GlobalParams.app_version
501
- )
502
- if (len(workflow_ids)) > 0:
503
- self.logger.info(
504
- f"Recovering {len(workflow_ids)} workflows from application version {GlobalParams.app_version}"
505
- )
506
- else:
507
- self.logger.info(
508
- f"No workflows to recover from application version {GlobalParams.app_version}"
501
+ # Recover local workflows if not using a recovery service
502
+ if not self.conductor_key and not GlobalParams.dbos_cloud:
503
+ dbos_logger.debug("Retrieving local pending workflows for recovery")
504
+ workflow_ids = self._sys_db.get_pending_workflows(
505
+ GlobalParams.executor_id, GlobalParams.app_version
509
506
  )
510
-
511
- self._executor.submit(startup_recovery_thread, self, workflow_ids)
507
+ if (len(workflow_ids)) > 0:
508
+ self.logger.info(
509
+ f"Recovering {len(workflow_ids)} workflows from application version {GlobalParams.app_version}"
510
+ )
511
+ else:
512
+ self.logger.info(
513
+ f"No workflows to recover from application version {GlobalParams.app_version}"
514
+ )
515
+ self._executor.submit(startup_recovery_thread, self, workflow_ids)
512
516
 
513
517
  # Listen to notifications
514
518
  dbos_logger.debug("Starting notifications listener thread")
@@ -568,8 +572,8 @@ class DBOS:
568
572
  conductor_registration_url = (
569
573
  f"https://console.dbos.dev/self-host?appname={app_name}"
570
574
  )
571
- print(
572
- f"To view and manage workflows, connect to DBOS Conductor at:{conductor_registration_url}"
575
+ dbos_logger.info(
576
+ f"To view and manage workflows, connect to DBOS Conductor at: {conductor_registration_url}"
573
577
  )
574
578
 
575
579
  # Flush handlers and add OTLP to all loggers if enabled
@@ -1089,7 +1093,6 @@ class DBOS:
1089
1093
  dbos_logger.info(f"Forking workflow: {workflow_id} from step {start_step}")
1090
1094
  return fork_workflow(
1091
1095
  _get_dbos_instance()._sys_db,
1092
- _get_dbos_instance()._app_db,
1093
1096
  workflow_id,
1094
1097
  start_step,
1095
1098
  application_version=application_version,
@@ -1266,9 +1269,7 @@ class DBOS:
1266
1269
  @classmethod
1267
1270
  def list_workflow_steps(cls, workflow_id: str) -> List[StepInfo]:
1268
1271
  def fn() -> List[StepInfo]:
1269
- return list_workflow_steps(
1270
- _get_dbos_instance()._sys_db, _get_dbos_instance()._app_db, workflow_id
1271
- )
1272
+ return _get_dbos_instance()._sys_db.list_workflow_steps(workflow_id)
1272
1273
 
1273
1274
  return _get_dbos_instance()._sys_db.call_function_as_step(
1274
1275
  fn, "DBOS.listWorkflowSteps"
@@ -1325,11 +1326,10 @@ class DBOS:
1325
1326
  return None
1326
1327
 
1327
1328
  @classproperty
1328
- def span(cls) -> "Span":
1329
+ def span(cls) -> Optional["Span"]:
1329
1330
  """Return the tracing `Span` associated with the current context."""
1330
1331
  ctx = assert_current_dbos_context()
1331
1332
  span = ctx.get_current_active_span()
1332
- assert span
1333
1333
  return span
1334
1334
 
1335
1335
  @classproperty
@@ -1383,7 +1383,7 @@ class DBOS:
1383
1383
  )
1384
1384
  elif ctx.is_step():
1385
1385
  _get_dbos_instance()._sys_db.write_stream_from_step(
1386
- ctx.workflow_id, key, value
1386
+ ctx.workflow_id, ctx.function_id, key, value
1387
1387
  )
1388
1388
  else:
1389
1389
  raise DBOSException(
@@ -1550,7 +1550,9 @@ class WorkflowHandle(Generic[R], Protocol):
1550
1550
  """Return the applicable workflow ID."""
1551
1551
  ...
1552
1552
 
1553
- def get_result(self) -> R:
1553
+ def get_result(
1554
+ self, *, polling_interval_sec: float = DEFAULT_POLLING_INTERVAL
1555
+ ) -> R:
1554
1556
  """Return the result of the workflow function invocation, waiting if necessary."""
1555
1557
  ...
1556
1558
 
@@ -1579,7 +1581,9 @@ class WorkflowHandleAsync(Generic[R], Protocol):
1579
1581
  """Return the applicable workflow ID."""
1580
1582
  ...
1581
1583
 
1582
- async def get_result(self) -> R:
1584
+ async def get_result(
1585
+ self, *, polling_interval_sec: float = DEFAULT_POLLING_INTERVAL
1586
+ ) -> R:
1583
1587
  """Return the result of the workflow function invocation, waiting if necessary."""
1584
1588
  ...
1585
1589
 
dbos/_dbos_config.py CHANGED
@@ -39,6 +39,7 @@ class DBOSConfig(TypedDict, total=False):
39
39
  enable_otlp (bool): If True, enable built-in DBOS OTLP tracing and logging.
40
40
  system_database_engine (sa.Engine): A custom system database engine. If provided, DBOS will not create an engine but use this instead.
41
41
  conductor_key (str): An API key for DBOS Conductor. Pass this in to connect your process to Conductor.
42
+ conductor_url (str): The websockets URL for your DBOS Conductor service. Only set if you're self-hosting Conductor.
42
43
  serializer (Serializer): A custom serializer and deserializer DBOS uses when storing program data in the system database
43
44
  """
44
45
 
@@ -60,6 +61,7 @@ class DBOSConfig(TypedDict, total=False):
60
61
  enable_otlp: Optional[bool]
61
62
  system_database_engine: Optional[sa.Engine]
62
63
  conductor_key: Optional[str]
64
+ conductor_url: Optional[str]
63
65
  serializer: Optional[Serializer]
64
66
 
65
67
 
@@ -264,8 +266,7 @@ def load_config(
264
266
  data["telemetry"]["OTLPExporter"]["tracesEndpoint"]
265
267
  ]
266
268
 
267
- data = cast(ConfigFile, data)
268
- return data # type: ignore
269
+ return cast(ConfigFile, data)
269
270
 
270
271
 
271
272
  def process_config(
@@ -407,25 +408,6 @@ def process_config(
407
408
 
408
409
  configure_db_engine_parameters(data["database"], connect_timeout=connect_timeout)
409
410
 
410
- assert data["system_database_url"] is not None
411
- # Pretty-print connection information, respecting log level
412
- if not silent and logs["logLevel"] == "INFO" or logs["logLevel"] == "DEBUG":
413
- printable_sys_db_url = make_url(data["system_database_url"]).render_as_string(
414
- hide_password=True
415
- )
416
- print(f"DBOS system database URL: {printable_sys_db_url}")
417
- if data["database_url"]:
418
- printable_app_db_url = make_url(data["database_url"]).render_as_string(
419
- hide_password=True
420
- )
421
- print(f"DBOS application database URL: {printable_app_db_url}")
422
- if data["system_database_url"].startswith("sqlite"):
423
- print(
424
- f"Using SQLite as a system database. The SQLite system database is for development and testing. PostgreSQL is recommended for production use."
425
- )
426
- else:
427
- print(f"Database engine parameters: {data['database']['db_engine_kwargs']}")
428
-
429
411
  # Return data as ConfigFile type
430
412
  return data
431
413
 
dbos/_debouncer.py CHANGED
@@ -2,7 +2,6 @@ import asyncio
2
2
  import math
3
3
  import time
4
4
  import types
5
- import uuid
6
5
  from typing import (
7
6
  TYPE_CHECKING,
8
7
  Any,
@@ -39,7 +38,7 @@ from dbos._error import DBOSQueueDeduplicatedError
39
38
  from dbos._queue import Queue
40
39
  from dbos._registrations import get_dbos_func_name
41
40
  from dbos._serialization import WorkflowInputs
42
- from dbos._utils import INTERNAL_QUEUE_NAME
41
+ from dbos._utils import INTERNAL_QUEUE_NAME, generate_uuid
43
42
 
44
43
  if TYPE_CHECKING:
45
44
  from dbos._dbos import WorkflowHandle, WorkflowHandleAsync
@@ -209,7 +208,7 @@ class Debouncer(Generic[P, R]):
209
208
 
210
209
  # Deterministically generate the user workflow ID and message ID
211
210
  def assign_debounce_ids() -> tuple[str, str]:
212
- return str(uuid.uuid4()), ctx.assign_workflow_id()
211
+ return generate_uuid(), ctx.assign_workflow_id()
213
212
 
214
213
  message_id, user_workflow_id = dbos._sys_db.call_function_as_step(
215
214
  assign_debounce_ids, "DBOS.assign_debounce_ids"
@@ -320,14 +319,14 @@ class DebouncerClient:
320
319
  "workflow_id": (
321
320
  self.workflow_options["workflow_id"]
322
321
  if self.workflow_options.get("workflow_id")
323
- else str(uuid.uuid4())
322
+ else generate_uuid()
324
323
  ),
325
324
  "app_version": self.workflow_options.get("app_version"),
326
325
  "deduplication_id": self.workflow_options.get("deduplication_id"),
327
326
  "priority": self.workflow_options.get("priority"),
328
327
  "workflow_timeout_sec": self.workflow_options.get("workflow_timeout"),
329
328
  }
330
- message_id = str(uuid.uuid4())
329
+ message_id = generate_uuid()
331
330
  while True:
332
331
  try:
333
332
  # Attempt to enqueue a debouncer for this workflow.
dbos/_fastapi.py CHANGED
@@ -1,4 +1,3 @@
1
- import uuid
2
1
  from typing import Any, Callable, MutableMapping, cast
3
2
 
4
3
  from fastapi import FastAPI
@@ -9,7 +8,7 @@ from starlette.types import ASGIApp, Receive, Scope, Send
9
8
  from . import DBOS
10
9
  from ._context import EnterDBOSHandler, OperationType, SetWorkflowID, TracedAttributes
11
10
  from ._error import DBOSException
12
- from ._utils import request_id_header
11
+ from ._utils import generate_uuid, request_id_header
13
12
 
14
13
 
15
14
  def _get_or_generate_request_id(request: FastAPIRequest) -> str:
@@ -17,7 +16,7 @@ def _get_or_generate_request_id(request: FastAPIRequest) -> str:
17
16
  if request_id is not None:
18
17
  return request_id
19
18
  else:
20
- return str(uuid.uuid4())
19
+ return generate_uuid()
21
20
 
22
21
 
23
22
  async def _dbos_error_handler(request: FastAPIRequest, gexc: Exception) -> JSONResponse:
@@ -88,5 +87,6 @@ def setup_fastapi_middleware(app: FastAPI, dbos: DBOS) -> None:
88
87
  and not dbos._config["telemetry"]["disable_otlp"]
89
88
  and hasattr(response, "status_code")
90
89
  ):
91
- DBOS.span.set_attribute("responseCode", response.status_code)
90
+ if DBOS.span is not None:
91
+ DBOS.span.set_attribute("responseCode", response.status_code)
92
92
  return response
dbos/_flask.py CHANGED
@@ -1,4 +1,3 @@
1
- import uuid
2
1
  from typing import Any
3
2
  from urllib.parse import urlparse
4
3
 
@@ -6,7 +5,7 @@ from flask import Flask
6
5
  from werkzeug.wrappers import Request as WRequest
7
6
 
8
7
  from ._context import EnterDBOSHandler, OperationType, SetWorkflowID, TracedAttributes
9
- from ._utils import request_id_header
8
+ from ._utils import generate_uuid, request_id_header
10
9
 
11
10
 
12
11
  class FlaskMiddleware:
@@ -41,7 +40,7 @@ def _get_or_generate_request_id(request: WRequest) -> str:
41
40
  if request_id is not None:
42
41
  return request_id
43
42
  else:
44
- return str(uuid.uuid4())
43
+ return generate_uuid()
45
44
 
46
45
 
47
46
  def setup_flask_middleware(app: Flask) -> None:
dbos/_logger.py CHANGED
@@ -71,6 +71,7 @@ def config_logger(config: "ConfigFile") -> None:
71
71
  if not disable_otlp:
72
72
 
73
73
  from opentelemetry._logs import get_logger_provider, set_logger_provider
74
+ from opentelemetry._logs._internal import ProxyLoggerProvider
74
75
  from opentelemetry.exporter.otlp.proto.http._log_exporter import OTLPLogExporter
75
76
  from opentelemetry.sdk._logs import LoggerProvider, LoggingHandler
76
77
  from opentelemetry.sdk._logs.export import BatchLogRecordProcessor
@@ -79,8 +80,9 @@ def config_logger(config: "ConfigFile") -> None:
79
80
 
80
81
  # Only set up OTLP provider and exporter if endpoints are provided
81
82
  log_provider = get_logger_provider()
82
- if otlp_logs_endpoints is not None:
83
- if not isinstance(log_provider, LoggerProvider):
83
+ if otlp_logs_endpoints is not None and len(otlp_logs_endpoints) > 0:
84
+ if isinstance(log_provider, ProxyLoggerProvider):
85
+ # Set a real LoggerProvider if it was previously a ProxyLoggerProvider
84
86
  log_provider = LoggerProvider(
85
87
  Resource.create(
86
88
  attributes={
@@ -91,7 +93,7 @@ def config_logger(config: "ConfigFile") -> None:
91
93
  set_logger_provider(log_provider)
92
94
 
93
95
  for e in otlp_logs_endpoints:
94
- log_provider.add_log_record_processor(
96
+ log_provider.add_log_record_processor( # type: ignore
95
97
  BatchLogRecordProcessor(
96
98
  OTLPLogExporter(endpoint=e),
97
99
  export_timeout_millis=5000,
@@ -100,10 +102,15 @@ def config_logger(config: "ConfigFile") -> None:
100
102
 
101
103
  # Even if no endpoints are provided, we still need a LoggerProvider to create the LoggingHandler
102
104
  global _otlp_handler
103
- _otlp_handler = LoggingHandler(logger_provider=log_provider)
104
-
105
- # Direct DBOS logs to OTLP
106
- dbos_logger.addHandler(_otlp_handler)
105
+ if _otlp_handler is None:
106
+ if isinstance(log_provider, ProxyLoggerProvider):
107
+ dbos_logger.warning(
108
+ "OTLP is enabled but logger provider not set, skipping log exporter setup."
109
+ )
110
+ else:
111
+ _otlp_handler = LoggingHandler(logger_provider=log_provider)
112
+ # Direct DBOS logs to OTLP
113
+ dbos_logger.addHandler(_otlp_handler)
107
114
 
108
115
  # Attach DBOS-specific attributes to all log entries.
109
116
  global _dbos_log_transformer
dbos/_migration.py CHANGED
@@ -228,6 +228,21 @@ ALTER TABLE \"{schema}\".operation_outputs ADD COLUMN started_at_epoch_ms BIGINT
228
228
  """
229
229
 
230
230
 
231
+ def get_dbos_migration_six(schema: str) -> str:
232
+ return f"""
233
+ CREATE TABLE \"{schema}\".workflow_events_history (
234
+ workflow_uuid TEXT NOT NULL,
235
+ function_id INTEGER NOT NULL,
236
+ key TEXT NOT NULL,
237
+ value TEXT NOT NULL,
238
+ PRIMARY KEY (workflow_uuid, function_id, key),
239
+ FOREIGN KEY (workflow_uuid) REFERENCES \"{schema}\".workflow_status(workflow_uuid)
240
+ ON UPDATE CASCADE ON DELETE CASCADE
241
+ );
242
+ ALTER TABLE \"{schema}\".streams ADD COLUMN function_id INTEGER NOT NULL DEFAULT 0;
243
+ """
244
+
245
+
231
246
  def get_dbos_migrations(schema: str) -> list[str]:
232
247
  return [
233
248
  get_dbos_migration_one(schema),
@@ -235,6 +250,7 @@ def get_dbos_migrations(schema: str) -> list[str]:
235
250
  get_dbos_migration_three(schema),
236
251
  get_dbos_migration_four(schema),
237
252
  get_dbos_migration_five(schema),
253
+ get_dbos_migration_six(schema),
238
254
  ]
239
255
 
240
256
 
@@ -343,6 +359,19 @@ ALTER TABLE operation_outputs ADD COLUMN started_at_epoch_ms BIGINT;
343
359
  ALTER TABLE operation_outputs ADD COLUMN completed_at_epoch_ms BIGINT;
344
360
  """
345
361
 
362
+ sqlite_migration_six = """
363
+ CREATE TABLE workflow_events_history (
364
+ workflow_uuid TEXT NOT NULL,
365
+ function_id INTEGER NOT NULL,
366
+ key TEXT NOT NULL,
367
+ value TEXT NOT NULL,
368
+ PRIMARY KEY (workflow_uuid, function_id, key),
369
+ FOREIGN KEY (workflow_uuid) REFERENCES workflow_status(workflow_uuid)
370
+ ON UPDATE CASCADE ON DELETE CASCADE
371
+ );
372
+ ALTER TABLE streams ADD COLUMN function_id INTEGER NOT NULL DEFAULT 0;
373
+ """
374
+
346
375
 
347
376
  sqlite_migrations = [
348
377
  sqlite_migration_one,
@@ -350,4 +379,5 @@ sqlite_migrations = [
350
379
  sqlite_migration_three,
351
380
  sqlite_migration_four,
352
381
  sqlite_migration_five,
382
+ sqlite_migration_six,
353
383
  ]