dbos 2.2.0__py3-none-any.whl → 2.4.0a5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
dbos/_sys_db.py CHANGED
@@ -106,7 +106,7 @@ class WorkflowStatus:
106
106
  updated_at: Optional[int]
107
107
  # If this workflow was enqueued, on which queue
108
108
  queue_name: Optional[str]
109
- # The executor to most recently executed this workflow
109
+ # The executor to most recently execute this workflow
110
110
  executor_id: Optional[str]
111
111
  # The application version on which this workflow was started
112
112
  app_version: Optional[str]
@@ -114,6 +114,14 @@ class WorkflowStatus:
114
114
  workflow_timeout_ms: Optional[int]
115
115
  # The deadline of a workflow, computed by adding its timeout to its start time.
116
116
  workflow_deadline_epoch_ms: Optional[int]
117
+ # Unique ID for deduplication on a queue
118
+ deduplication_id: Optional[str]
119
+ # Priority of the workflow on the queue, starting from 1 ~ 2,147,483,647. Default 0 (highest priority).
120
+ priority: Optional[int]
121
+ # If this workflow is enqueued on a partitioned queue, its partition key
122
+ queue_partition_key: Optional[str]
123
+ # If this workflow was forked from another, that workflow's ID.
124
+ forked_from: Optional[str]
117
125
 
118
126
  # INTERNAL FIELDS
119
127
 
@@ -141,19 +149,13 @@ class WorkflowStatusInternal(TypedDict):
141
149
  app_version: Optional[str]
142
150
  app_id: Optional[str]
143
151
  recovery_attempts: Optional[int]
144
- # The start-to-close timeout of the workflow in ms
145
152
  workflow_timeout_ms: Optional[int]
146
- # The deadline of a workflow, computed by adding its timeout to its start time.
147
- # Deadlines propagate to children. When the deadline is reached, the workflow is cancelled.
148
153
  workflow_deadline_epoch_ms: Optional[int]
149
- # Unique ID for deduplication on a queue
150
154
  deduplication_id: Optional[str]
151
- # Priority of the workflow on the queue, starting from 1 ~ 2,147,483,647. Default 0 (highest priority).
152
155
  priority: int
153
- # Serialized workflow inputs
154
156
  inputs: str
155
- # If this workflow is enqueued on a partitioned queue, its partition key
156
157
  queue_partition_key: Optional[str]
158
+ forked_from: Optional[str]
157
159
 
158
160
 
159
161
  class EnqueueOptionsInternal(TypedDict):
@@ -178,6 +180,7 @@ class OperationResultInternal(TypedDict):
178
180
  function_name: str
179
181
  output: Optional[str] # JSON (jsonpickle)
180
182
  error: Optional[str] # JSON (jsonpickle)
183
+ started_at_epoch_ms: int
181
184
 
182
185
 
183
186
  class GetEventWorkflowContext(TypedDict):
@@ -194,42 +197,34 @@ class GetWorkflowsInput:
194
197
  """
195
198
 
196
199
  def __init__(self) -> None:
197
- self.workflow_ids: Optional[List[str]] = (
198
- None # Search only in these workflow IDs
199
- )
200
- self.name: Optional[str] = None # The name of the workflow function
201
- self.authenticated_user: Optional[str] = None # The user who ran the workflow.
202
- self.start_time: Optional[str] = None # Timestamp in ISO 8601 format
203
- self.end_time: Optional[str] = None # Timestamp in ISO 8601 format
204
- self.status: Optional[List[str]] = (
205
- None # Get workflows with one of these statuses
206
- )
207
- self.application_version: Optional[str] = (
208
- None # The application version that ran this workflow. = None
209
- )
210
- self.limit: Optional[int] = (
211
- None # Return up to this many workflows IDs. IDs are ordered by workflow creation time.
212
- )
213
- self.offset: Optional[int] = (
214
- None # Offset into the matching records for pagination
215
- )
216
- self.sort_desc: bool = (
217
- False # If true, sort by created_at in DESC order. Default false (in ASC order).
218
- )
219
- self.workflow_id_prefix: Optional[str] = (
220
- None # If set, search for workflow IDs starting with this string
221
- )
222
-
223
-
224
- class GetQueuedWorkflowsInput(TypedDict):
225
- queue_name: Optional[str] # Get workflows belonging to this queue
226
- status: Optional[list[str]] # Get workflows with one of these statuses
227
- start_time: Optional[str] # Timestamp in ISO 8601 format
228
- end_time: Optional[str] # Timestamp in ISO 8601 format
229
- limit: Optional[int] # Return up to this many workflows IDs.
230
- offset: Optional[int] # Offset into the matching records for pagination
231
- name: Optional[str] # The name of the workflow function
232
- sort_desc: Optional[bool] # Sort by created_at in DESC or ASC order
200
+ # Search only in these workflow IDs
201
+ self.workflow_ids: Optional[List[str]] = None
202
+ # The name of the workflow function
203
+ self.name: Optional[str] = None
204
+ # The user who ran the workflow.
205
+ self.authenticated_user: Optional[str] = None
206
+ # Timestamp in ISO 8601 format
207
+ self.start_time: Optional[str] = None
208
+ # Timestamp in ISO 8601 format
209
+ self.end_time: Optional[str] = None
210
+ # Get workflows with one of these statuses
211
+ self.status: Optional[List[str]] = None
212
+ # The application version that ran this workflow.
213
+ self.application_version: Optional[str] = None
214
+ # Get workflows forked from this workflow ID.
215
+ self.forked_from: Optional[str] = None
216
+ # Return up to this many workflows IDs. IDs are ordered by workflow creation time.
217
+ self.limit: Optional[int] = None
218
+ # Offset into the matching records for pagination
219
+ self.offset: Optional[int] = None
220
+ # If true, sort by created_at in DESC order. Default false (in ASC order).
221
+ self.sort_desc: bool = False
222
+ # Search only for workflow IDs starting with this string
223
+ self.workflow_id_prefix: Optional[str] = None
224
+ # Search only for workflows enqueued on this queue
225
+ self.queue_name: Optional[str] = None
226
+ # Search only currently enqueued workflows
227
+ self.queues_only: bool = False
233
228
 
234
229
 
235
230
  class GetPendingWorkflowsOutput:
@@ -249,6 +244,10 @@ class StepInfo(TypedDict):
249
244
  error: Optional[Exception]
250
245
  # If the step starts or retrieves the result of a workflow, its ID
251
246
  child_workflow_id: Optional[str]
247
+ # The Unix epoch timestamp at which this step started
248
+ started_at_epoch_ms: Optional[int]
249
+ # The Unix epoch timestamp at which this step completed
250
+ completed_at_epoch_ms: Optional[int]
252
251
 
253
252
 
254
253
  _dbos_null_topic = "__null__topic__"
@@ -352,6 +351,7 @@ class SystemDatabase(ABC):
352
351
  engine: Optional[sa.Engine],
353
352
  schema: Optional[str],
354
353
  serializer: Serializer,
354
+ executor_id: Optional[str],
355
355
  debug_mode: bool = False,
356
356
  ) -> "SystemDatabase":
357
357
  """Factory method to create the appropriate SystemDatabase implementation based on URL."""
@@ -364,6 +364,7 @@ class SystemDatabase(ABC):
364
364
  engine=engine,
365
365
  schema=schema,
366
366
  serializer=serializer,
367
+ executor_id=executor_id,
367
368
  debug_mode=debug_mode,
368
369
  )
369
370
  else:
@@ -375,6 +376,7 @@ class SystemDatabase(ABC):
375
376
  engine=engine,
376
377
  schema=schema,
377
378
  serializer=serializer,
379
+ executor_id=executor_id,
378
380
  debug_mode=debug_mode,
379
381
  )
380
382
 
@@ -386,6 +388,7 @@ class SystemDatabase(ABC):
386
388
  engine: Optional[sa.Engine],
387
389
  schema: Optional[str],
388
390
  serializer: Serializer,
391
+ executor_id: Optional[str],
389
392
  debug_mode: bool = False,
390
393
  ):
391
394
  import sqlalchemy.dialects.postgresql as pg
@@ -411,6 +414,8 @@ class SystemDatabase(ABC):
411
414
 
412
415
  self.notifications_map = ThreadSafeConditionDict()
413
416
  self.workflow_events_map = ThreadSafeConditionDict()
417
+ self.executor_id = executor_id
418
+
414
419
  self._listener_thread_lock = threading.Lock()
415
420
 
416
421
  # Now we can run background processes
@@ -706,6 +711,7 @@ class SystemDatabase(ABC):
706
711
  assumed_role=status["assumed_role"],
707
712
  queue_name=INTERNAL_QUEUE_NAME,
708
713
  inputs=status["inputs"],
714
+ forked_from=original_workflow_id,
709
715
  )
710
716
  )
711
717
 
@@ -767,6 +773,7 @@ class SystemDatabase(ABC):
767
773
  SystemSchema.workflow_status.c.priority,
768
774
  SystemSchema.workflow_status.c.inputs,
769
775
  SystemSchema.workflow_status.c.queue_partition_key,
776
+ SystemSchema.workflow_status.c.forked_from,
770
777
  ).where(SystemSchema.workflow_status.c.workflow_uuid == workflow_uuid)
771
778
  ).fetchone()
772
779
  if row is None:
@@ -795,6 +802,7 @@ class SystemDatabase(ABC):
795
802
  "priority": row[17],
796
803
  "inputs": row[18],
797
804
  "queue_partition_key": row[19],
805
+ "forked_from": row[20],
798
806
  }
799
807
  return status
800
808
 
@@ -881,6 +889,10 @@ class SystemDatabase(ABC):
881
889
  SystemSchema.workflow_status.c.application_id,
882
890
  SystemSchema.workflow_status.c.workflow_deadline_epoch_ms,
883
891
  SystemSchema.workflow_status.c.workflow_timeout_ms,
892
+ SystemSchema.workflow_status.c.deduplication_id,
893
+ SystemSchema.workflow_status.c.priority,
894
+ SystemSchema.workflow_status.c.queue_partition_key,
895
+ SystemSchema.workflow_status.c.forked_from,
884
896
  ]
885
897
  if load_input:
886
898
  load_columns.append(SystemSchema.workflow_status.c.inputs)
@@ -888,7 +900,15 @@ class SystemDatabase(ABC):
888
900
  load_columns.append(SystemSchema.workflow_status.c.output)
889
901
  load_columns.append(SystemSchema.workflow_status.c.error)
890
902
 
891
- query = sa.select(*load_columns)
903
+ if input.queues_only:
904
+ query = sa.select(*load_columns).where(
905
+ sa.and_(
906
+ SystemSchema.workflow_status.c.queue_name.isnot(None),
907
+ SystemSchema.workflow_status.c.status.in_(["ENQUEUED", "PENDING"]),
908
+ )
909
+ )
910
+ else:
911
+ query = sa.select(*load_columns)
892
912
  if input.sort_desc:
893
913
  query = query.order_by(SystemSchema.workflow_status.c.created_at.desc())
894
914
  else:
@@ -917,6 +937,10 @@ class SystemDatabase(ABC):
917
937
  SystemSchema.workflow_status.c.application_version
918
938
  == input.application_version
919
939
  )
940
+ if input.forked_from:
941
+ query = query.where(
942
+ SystemSchema.workflow_status.c.forked_from == input.forked_from
943
+ )
920
944
  if input.workflow_ids:
921
945
  query = query.where(
922
946
  SystemSchema.workflow_status.c.workflow_uuid.in_(input.workflow_ids)
@@ -927,6 +951,10 @@ class SystemDatabase(ABC):
927
951
  input.workflow_id_prefix
928
952
  )
929
953
  )
954
+ if input.queue_name:
955
+ query = query.where(
956
+ SystemSchema.workflow_status.c.queue_name == input.queue_name
957
+ )
930
958
  if input.limit:
931
959
  query = query.limit(input.limit)
932
960
  if input.offset:
@@ -936,6 +964,7 @@ class SystemDatabase(ABC):
936
964
  rows = c.execute(query).fetchall()
937
965
 
938
966
  infos: List[WorkflowStatus] = []
967
+ workflow_ids: List[str] = []
939
968
  for row in rows:
940
969
  info = WorkflowStatus()
941
970
  info.workflow_id = row[0]
@@ -957,10 +986,14 @@ class SystemDatabase(ABC):
957
986
  info.app_id = row[14]
958
987
  info.workflow_deadline_epoch_ms = row[15]
959
988
  info.workflow_timeout_ms = row[16]
960
-
961
- raw_input = row[17] if load_input else None
962
- raw_output = row[18] if load_output else None
963
- raw_error = row[19] if load_output else None
989
+ info.deduplication_id = row[17]
990
+ info.priority = row[18]
991
+ info.queue_partition_key = row[19]
992
+ info.forked_from = row[20]
993
+
994
+ raw_input = row[21] if load_input else None
995
+ raw_output = row[22] if load_output else None
996
+ raw_error = row[23] if load_output else None
964
997
  inputs, output, exception = safe_deserialize(
965
998
  self.serializer,
966
999
  info.workflow_id,
@@ -972,122 +1005,10 @@ class SystemDatabase(ABC):
972
1005
  info.output = output
973
1006
  info.error = exception
974
1007
 
1008
+ workflow_ids.append(info.workflow_id)
975
1009
  infos.append(info)
976
1010
  return infos
977
1011
 
978
- def get_queued_workflows(
979
- self,
980
- input: GetQueuedWorkflowsInput,
981
- *,
982
- load_input: bool = True,
983
- ) -> List[WorkflowStatus]:
984
- """
985
- Retrieve a list of queued workflows result and inputs based on the input criteria. The result is a list of external-facing workflow status objects.
986
- """
987
- load_columns = [
988
- SystemSchema.workflow_status.c.workflow_uuid,
989
- SystemSchema.workflow_status.c.status,
990
- SystemSchema.workflow_status.c.name,
991
- SystemSchema.workflow_status.c.recovery_attempts,
992
- SystemSchema.workflow_status.c.config_name,
993
- SystemSchema.workflow_status.c.class_name,
994
- SystemSchema.workflow_status.c.authenticated_user,
995
- SystemSchema.workflow_status.c.authenticated_roles,
996
- SystemSchema.workflow_status.c.assumed_role,
997
- SystemSchema.workflow_status.c.queue_name,
998
- SystemSchema.workflow_status.c.executor_id,
999
- SystemSchema.workflow_status.c.created_at,
1000
- SystemSchema.workflow_status.c.updated_at,
1001
- SystemSchema.workflow_status.c.application_version,
1002
- SystemSchema.workflow_status.c.application_id,
1003
- SystemSchema.workflow_status.c.workflow_deadline_epoch_ms,
1004
- SystemSchema.workflow_status.c.workflow_timeout_ms,
1005
- ]
1006
- if load_input:
1007
- load_columns.append(SystemSchema.workflow_status.c.inputs)
1008
-
1009
- query = sa.select(*load_columns).where(
1010
- sa.and_(
1011
- SystemSchema.workflow_status.c.queue_name.isnot(None),
1012
- SystemSchema.workflow_status.c.status.in_(["ENQUEUED", "PENDING"]),
1013
- )
1014
- )
1015
- if input["sort_desc"]:
1016
- query = query.order_by(SystemSchema.workflow_status.c.created_at.desc())
1017
- else:
1018
- query = query.order_by(SystemSchema.workflow_status.c.created_at.asc())
1019
-
1020
- if input.get("name"):
1021
- query = query.where(SystemSchema.workflow_status.c.name == input["name"])
1022
-
1023
- if input.get("queue_name"):
1024
- query = query.where(
1025
- SystemSchema.workflow_status.c.queue_name == input["queue_name"]
1026
- )
1027
-
1028
- status = input.get("status", None)
1029
- if status:
1030
- query = query.where(SystemSchema.workflow_status.c.status.in_(status))
1031
- if "start_time" in input and input["start_time"] is not None:
1032
- query = query.where(
1033
- SystemSchema.workflow_status.c.created_at
1034
- >= datetime.datetime.fromisoformat(input["start_time"]).timestamp()
1035
- * 1000
1036
- )
1037
- if "end_time" in input and input["end_time"] is not None:
1038
- query = query.where(
1039
- SystemSchema.workflow_status.c.created_at
1040
- <= datetime.datetime.fromisoformat(input["end_time"]).timestamp() * 1000
1041
- )
1042
- if input.get("limit"):
1043
- query = query.limit(input["limit"])
1044
- if input.get("offset"):
1045
- query = query.offset(input["offset"])
1046
-
1047
- with self.engine.begin() as c:
1048
- rows = c.execute(query).fetchall()
1049
-
1050
- infos: List[WorkflowStatus] = []
1051
- for row in rows:
1052
- info = WorkflowStatus()
1053
- info.workflow_id = row[0]
1054
- info.status = row[1]
1055
- info.name = row[2]
1056
- info.recovery_attempts = row[3]
1057
- info.config_name = row[4]
1058
- info.class_name = row[5]
1059
- info.authenticated_user = row[6]
1060
- info.authenticated_roles = (
1061
- json.loads(row[7]) if row[7] is not None else None
1062
- )
1063
- info.assumed_role = row[8]
1064
- info.queue_name = row[9]
1065
- info.executor_id = row[10]
1066
- info.created_at = row[11]
1067
- info.updated_at = row[12]
1068
- info.app_version = row[13]
1069
- info.app_id = row[14]
1070
- info.workflow_deadline_epoch_ms = row[15]
1071
- info.workflow_timeout_ms = row[16]
1072
-
1073
- raw_input = row[17] if load_input else None
1074
-
1075
- # Error and Output are not loaded because they should always be None for queued workflows.
1076
- inputs, output, exception = safe_deserialize(
1077
- self.serializer,
1078
- info.workflow_id,
1079
- serialized_input=raw_input,
1080
- serialized_output=None,
1081
- serialized_exception=None,
1082
- )
1083
- info.input = inputs
1084
- info.output = output
1085
- info.error = exception
1086
-
1087
- infos.append(info)
1088
-
1089
- return infos
1090
-
1091
1012
  def get_pending_workflows(
1092
1013
  self, executor_id: str, app_version: str
1093
1014
  ) -> list[GetPendingWorkflowsOutput]:
@@ -1121,6 +1042,8 @@ class SystemDatabase(ABC):
1121
1042
  SystemSchema.operation_outputs.c.output,
1122
1043
  SystemSchema.operation_outputs.c.error,
1123
1044
  SystemSchema.operation_outputs.c.child_workflow_id,
1045
+ SystemSchema.operation_outputs.c.started_at_epoch_ms,
1046
+ SystemSchema.operation_outputs.c.completed_at_epoch_ms,
1124
1047
  ).where(SystemSchema.operation_outputs.c.workflow_uuid == workflow_id)
1125
1048
  ).fetchall()
1126
1049
  steps = []
@@ -1138,6 +1061,8 @@ class SystemDatabase(ABC):
1138
1061
  output=output,
1139
1062
  error=exception,
1140
1063
  child_workflow_id=row[4],
1064
+ started_at_epoch_ms=row[5],
1065
+ completed_at_epoch_ms=row[6],
1141
1066
  )
1142
1067
  steps.append(step)
1143
1068
  return steps
@@ -1150,10 +1075,33 @@ class SystemDatabase(ABC):
1150
1075
  error = result["error"]
1151
1076
  output = result["output"]
1152
1077
  assert error is None or output is None, "Only one of error or output can be set"
1078
+ wf_executor_id_row = conn.execute(
1079
+ sa.select(
1080
+ SystemSchema.workflow_status.c.executor_id,
1081
+ ).where(
1082
+ SystemSchema.workflow_status.c.workflow_uuid == result["workflow_uuid"]
1083
+ )
1084
+ ).fetchone()
1085
+ assert wf_executor_id_row is not None
1086
+ wf_executor_id = wf_executor_id_row[0]
1087
+ if self.executor_id is not None and wf_executor_id != self.executor_id:
1088
+ dbos_logger.debug(
1089
+ f'Resetting executor_id from {wf_executor_id} to {self.executor_id} for workflow {result["workflow_uuid"]}'
1090
+ )
1091
+ conn.execute(
1092
+ sa.update(SystemSchema.workflow_status)
1093
+ .values(executor_id=self.executor_id)
1094
+ .where(
1095
+ SystemSchema.workflow_status.c.workflow_uuid
1096
+ == result["workflow_uuid"]
1097
+ )
1098
+ )
1153
1099
  sql = sa.insert(SystemSchema.operation_outputs).values(
1154
1100
  workflow_uuid=result["workflow_uuid"],
1155
1101
  function_id=result["function_id"],
1156
1102
  function_name=result["function_name"],
1103
+ started_at_epoch_ms=result["started_at_epoch_ms"],
1104
+ completed_at_epoch_ms=int(time.time() * 1000),
1157
1105
  output=output,
1158
1106
  error=error,
1159
1107
  )
@@ -1340,6 +1288,7 @@ class SystemDatabase(ABC):
1340
1288
  topic: Optional[str] = None,
1341
1289
  ) -> None:
1342
1290
  function_name = "DBOS.send"
1291
+ start_time = int(time.time() * 1000)
1343
1292
  topic = topic if topic is not None else _dbos_null_topic
1344
1293
  with self.engine.begin() as c:
1345
1294
  recorded_output = self._check_operation_execution_txn(
@@ -1376,6 +1325,7 @@ class SystemDatabase(ABC):
1376
1325
  "workflow_uuid": workflow_uuid,
1377
1326
  "function_id": function_id,
1378
1327
  "function_name": function_name,
1328
+ "started_at_epoch_ms": start_time,
1379
1329
  "output": None,
1380
1330
  "error": None,
1381
1331
  }
@@ -1391,6 +1341,7 @@ class SystemDatabase(ABC):
1391
1341
  timeout_seconds: float = 60,
1392
1342
  ) -> Any:
1393
1343
  function_name = "DBOS.recv"
1344
+ start_time = int(time.time() * 1000)
1394
1345
  topic = topic if topic is not None else _dbos_null_topic
1395
1346
 
1396
1347
  # First, check for previous executions.
@@ -1475,6 +1426,7 @@ class SystemDatabase(ABC):
1475
1426
  "workflow_uuid": workflow_uuid,
1476
1427
  "function_id": function_id,
1477
1428
  "function_name": function_name,
1429
+ "started_at_epoch_ms": start_time,
1478
1430
  "output": self.serializer.serialize(
1479
1431
  message
1480
1432
  ), # None will be serialized to 'null'
@@ -1510,6 +1462,7 @@ class SystemDatabase(ABC):
1510
1462
  skip_sleep: bool = False,
1511
1463
  ) -> float:
1512
1464
  function_name = "DBOS.sleep"
1465
+ start_time = int(time.time() * 1000)
1513
1466
  recorded_output = self.check_operation_execution(
1514
1467
  workflow_uuid, function_id, function_name
1515
1468
  )
@@ -1530,6 +1483,7 @@ class SystemDatabase(ABC):
1530
1483
  "workflow_uuid": workflow_uuid,
1531
1484
  "function_id": function_id,
1532
1485
  "function_name": function_name,
1486
+ "started_at_epoch_ms": start_time,
1533
1487
  "output": self.serializer.serialize(end_time),
1534
1488
  "error": None,
1535
1489
  }
@@ -1550,6 +1504,7 @@ class SystemDatabase(ABC):
1550
1504
  message: Any,
1551
1505
  ) -> None:
1552
1506
  function_name = "DBOS.setEvent"
1507
+ start_time = int(time.time() * 1000)
1553
1508
  with self.engine.begin() as c:
1554
1509
  recorded_output = self._check_operation_execution_txn(
1555
1510
  workflow_uuid, function_id, function_name, conn=c
@@ -1579,6 +1534,7 @@ class SystemDatabase(ABC):
1579
1534
  "workflow_uuid": workflow_uuid,
1580
1535
  "function_id": function_id,
1581
1536
  "function_name": function_name,
1537
+ "started_at_epoch_ms": start_time,
1582
1538
  "output": None,
1583
1539
  "error": None,
1584
1540
  }
@@ -1639,6 +1595,7 @@ class SystemDatabase(ABC):
1639
1595
  caller_ctx: Optional[GetEventWorkflowContext] = None,
1640
1596
  ) -> Any:
1641
1597
  function_name = "DBOS.getEvent"
1598
+ start_time = int(time.time() * 1000)
1642
1599
  get_sql = sa.select(
1643
1600
  SystemSchema.workflow_events.c.value,
1644
1601
  ).where(
@@ -1713,6 +1670,7 @@ class SystemDatabase(ABC):
1713
1670
  "workflow_uuid": caller_ctx["workflow_uuid"],
1714
1671
  "function_id": caller_ctx["function_id"],
1715
1672
  "function_name": function_name,
1673
+ "started_at_epoch_ms": start_time,
1716
1674
  "output": self.serializer.serialize(
1717
1675
  value
1718
1676
  ), # None will be serialized to 'null'
@@ -1951,6 +1909,7 @@ class SystemDatabase(ABC):
1951
1909
 
1952
1910
  def call_function_as_step(self, fn: Callable[[], T], function_name: str) -> T:
1953
1911
  ctx = get_local_dbos_context()
1912
+ start_time = int(time.time() * 1000)
1954
1913
  if ctx and ctx.is_transaction():
1955
1914
  raise Exception(f"Invalid call to `{function_name}` inside a transaction")
1956
1915
  if ctx and ctx.is_workflow():
@@ -1978,6 +1937,7 @@ class SystemDatabase(ABC):
1978
1937
  "workflow_uuid": ctx.workflow_id,
1979
1938
  "function_id": ctx.function_id,
1980
1939
  "function_name": function_name,
1940
+ "started_at_epoch_ms": start_time,
1981
1941
  "output": self.serializer.serialize(result),
1982
1942
  "error": None,
1983
1943
  }
@@ -2056,6 +2016,7 @@ class SystemDatabase(ABC):
2056
2016
  if value == _dbos_stream_closed_sentinel
2057
2017
  else "DBOS.writeStream"
2058
2018
  )
2019
+ start_time = int(time.time() * 1000)
2059
2020
 
2060
2021
  with self.engine.begin() as c:
2061
2022
 
@@ -2102,6 +2063,7 @@ class SystemDatabase(ABC):
2102
2063
  "workflow_uuid": workflow_uuid,
2103
2064
  "function_id": function_id,
2104
2065
  "function_name": function_name,
2066
+ "started_at_epoch_ms": start_time,
2105
2067
  "output": None,
2106
2068
  "error": None,
2107
2069
  }
dbos/_sys_db_postgres.py CHANGED
@@ -41,7 +41,7 @@ class PostgresSystemDatabase(SystemDatabase):
41
41
  parameters={"db_name": sysdb_name},
42
42
  ).scalar():
43
43
  dbos_logger.info(f"Creating system database {sysdb_name}")
44
- conn.execute(sa.text(f"CREATE DATABASE {sysdb_name}"))
44
+ conn.execute(sa.text(f'CREATE DATABASE "{sysdb_name}"'))
45
45
  engine.dispose()
46
46
  else:
47
47
  # If we were provided an engine, validate it can connect
dbos/_tracer.py CHANGED
@@ -25,6 +25,10 @@ class DBOSTracer:
25
25
  def config(self, config: ConfigFile) -> None:
26
26
  self.otlp_attributes = config.get("telemetry", {}).get("otlp_attributes", {}) # type: ignore
27
27
  self.disable_otlp = config.get("telemetry", {}).get("disable_otlp", False) # type: ignore
28
+ otlp_traces_endpoints = (
29
+ config.get("telemetry", {}).get("OTLPExporter", {}).get("tracesEndpoint") # type: ignore
30
+ )
31
+
28
32
  if not self.disable_otlp:
29
33
  from opentelemetry import trace
30
34
  from opentelemetry.exporter.otlp.proto.http.trace_exporter import (
@@ -38,25 +42,26 @@ class DBOSTracer:
38
42
  )
39
43
  from opentelemetry.semconv.attributes.service_attributes import SERVICE_NAME
40
44
 
41
- if not isinstance(trace.get_tracer_provider(), TracerProvider):
42
- resource = Resource(
43
- attributes={
44
- SERVICE_NAME: config["name"],
45
- }
46
- )
47
-
48
- provider = TracerProvider(resource=resource)
49
- if os.environ.get("DBOS__CONSOLE_TRACES", None) is not None:
50
- processor = BatchSpanProcessor(ConsoleSpanExporter())
51
- provider.add_span_processor(processor)
52
- otlp_traces_endpoints = (
53
- config.get("telemetry", {}).get("OTLPExporter", {}).get("tracesEndpoint") # type: ignore
54
- )
55
- if otlp_traces_endpoints:
56
- for e in otlp_traces_endpoints:
57
- processor = BatchSpanProcessor(OTLPSpanExporter(endpoint=e))
58
- provider.add_span_processor(processor)
59
- trace.set_tracer_provider(provider)
45
+ tracer_provider = trace.get_tracer_provider()
46
+
47
+ # Only set up OTLP provider and exporter if endpoints are provided
48
+ if otlp_traces_endpoints is not None:
49
+ if not isinstance(tracer_provider, TracerProvider):
50
+ resource = Resource(
51
+ attributes={
52
+ SERVICE_NAME: config["name"],
53
+ }
54
+ )
55
+
56
+ tracer_provider = TracerProvider(resource=resource)
57
+ if os.environ.get("DBOS__CONSOLE_TRACES", None) is not None:
58
+ processor = BatchSpanProcessor(ConsoleSpanExporter())
59
+ tracer_provider.add_span_processor(processor)
60
+ trace.set_tracer_provider(tracer_provider)
61
+
62
+ for e in otlp_traces_endpoints:
63
+ processor = BatchSpanProcessor(OTLPSpanExporter(endpoint=e))
64
+ tracer_provider.add_span_processor(processor)
60
65
 
61
66
  def set_provider(self, provider: "Optional[TracerProvider]") -> None:
62
67
  self.provider = provider