dbos 2.2.0__py3-none-any.whl → 2.4.0a5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
dbos/_admin_server.py CHANGED
@@ -338,6 +338,7 @@ class AdminRequestHandler(BaseHTTPRequestHandler):
338
338
  end_time=filters.get("end_time"),
339
339
  status=filters.get("status"),
340
340
  app_version=filters.get("application_version"),
341
+ forked_from=filters.get("forked_from"),
341
342
  name=filters.get("workflow_name"),
342
343
  limit=filters.get("limit"),
343
344
  offset=filters.get("offset"),
@@ -364,6 +365,7 @@ class AdminRequestHandler(BaseHTTPRequestHandler):
364
365
  start_time=filters.get("start_time"),
365
366
  end_time=filters.get("end_time"),
366
367
  status=filters.get("status"),
368
+ forked_from=filters.get("forked_from"),
367
369
  name=filters.get("workflow_name"),
368
370
  limit=filters.get("limit"),
369
371
  offset=filters.get("offset"),
dbos/_app_db.py CHANGED
@@ -201,6 +201,8 @@ class ApplicationDatabase(ABC):
201
201
  else row[3]
202
202
  ),
203
203
  child_workflow_id=None,
204
+ started_at_epoch_ms=None,
205
+ completed_at_epoch_ms=None,
204
206
  )
205
207
  for row in rows
206
208
  ]
dbos/_client.py CHANGED
@@ -1,4 +1,5 @@
1
1
  import asyncio
2
+ import json
2
3
  import time
3
4
  import uuid
4
5
  from typing import (
@@ -63,6 +64,8 @@ class EnqueueOptions(_EnqueueOptionsRequired, total=False):
63
64
  priority: int
64
65
  max_recovery_attempts: int
65
66
  queue_partition_key: str
67
+ authenticated_user: str
68
+ authenticated_roles: list[str]
66
69
 
67
70
 
68
71
  def validate_enqueue_options(options: EnqueueOptions) -> None:
@@ -146,22 +149,27 @@ class DBOSClient:
146
149
  self._sys_db = SystemDatabase.create(
147
150
  system_database_url=system_database_url,
148
151
  engine_kwargs={
152
+ "connect_args": {"application_name": "dbos_transact_client"},
149
153
  "pool_timeout": 30,
150
154
  "max_overflow": 0,
151
155
  "pool_size": 2,
156
+ "pool_pre_ping": True,
152
157
  },
153
158
  engine=system_database_engine,
154
159
  schema=dbos_system_schema,
155
160
  serializer=serializer,
161
+ executor_id=None,
156
162
  )
157
163
  self._sys_db.check_connection()
158
164
  if application_database_url:
159
165
  self._app_db = ApplicationDatabase.create(
160
166
  database_url=application_database_url,
161
167
  engine_kwargs={
168
+ "connect_args": {"application_name": "dbos_transact_client"},
162
169
  "pool_timeout": 30,
163
170
  "max_overflow": 0,
164
171
  "pool_size": 2,
172
+ "pool_pre_ping": True,
165
173
  },
166
174
  schema=dbos_system_schema,
167
175
  serializer=serializer,
@@ -189,6 +197,13 @@ class DBOSClient:
189
197
  "queue_partition_key": options.get("queue_partition_key"),
190
198
  }
191
199
 
200
+ authenticated_user = options.get("authenticated_user")
201
+ authenticated_roles = (
202
+ json.dumps(options.get("authenticated_roles"))
203
+ if options.get("authenticated_roles")
204
+ else None
205
+ )
206
+
192
207
  inputs: WorkflowInputs = {
193
208
  "args": args,
194
209
  "kwargs": kwargs,
@@ -202,9 +217,9 @@ class DBOSClient:
202
217
  "queue_name": queue_name,
203
218
  "app_version": enqueue_options_internal["app_version"],
204
219
  "config_name": None,
205
- "authenticated_user": None,
220
+ "authenticated_user": authenticated_user,
206
221
  "assumed_role": None,
207
- "authenticated_roles": None,
222
+ "authenticated_roles": authenticated_roles,
208
223
  "output": None,
209
224
  "error": None,
210
225
  "created_at": None,
@@ -224,6 +239,7 @@ class DBOSClient:
224
239
  ),
225
240
  "inputs": self._serializer.serialize(inputs),
226
241
  "queue_partition_key": enqueue_options_internal["queue_partition_key"],
242
+ "forked_from": None,
227
243
  }
228
244
 
229
245
  self._sys_db.init_workflow(
@@ -290,6 +306,7 @@ class DBOSClient:
290
306
  "priority": 0,
291
307
  "inputs": self._serializer.serialize({"args": (), "kwargs": {}}),
292
308
  "queue_partition_key": None,
309
+ "forked_from": None,
293
310
  }
294
311
  with self._sys_db.engine.begin() as conn:
295
312
  self._sys_db._insert_workflow_status(
@@ -223,22 +223,21 @@ class ConductorWebsocket(threading.Thread):
223
223
  body = list_workflows_message.body
224
224
  infos = []
225
225
  try:
226
- load_input = body.get("load_input", False)
227
- load_output = body.get("load_output", False)
228
226
  infos = list_workflows(
229
227
  self.dbos._sys_db,
230
- workflow_ids=body["workflow_uuids"],
231
- user=body["authenticated_user"],
232
- start_time=body["start_time"],
233
- end_time=body["end_time"],
234
- status=body["status"],
235
- app_version=body["application_version"],
236
- name=body["workflow_name"],
237
- limit=body["limit"],
238
- offset=body["offset"],
239
- sort_desc=body["sort_desc"],
240
- load_input=load_input,
241
- load_output=load_output,
228
+ workflow_ids=body.get("workflow_uuids", None),
229
+ user=body.get("authenticated_user", None),
230
+ start_time=body.get("start_time", None),
231
+ end_time=body.get("end_time", None),
232
+ status=body.get("status", None),
233
+ app_version=body.get("application_version", None),
234
+ forked_from=body.get("forked_from", None),
235
+ name=body.get("workflow_name", None),
236
+ limit=body.get("limit", None),
237
+ offset=body.get("offset", None),
238
+ sort_desc=body.get("sort_desc", False),
239
+ load_input=body.get("load_input", False),
240
+ load_output=body.get("load_output", False),
242
241
  )
243
242
  except Exception as e:
244
243
  error_message = f"Exception encountered when listing workflows: {traceback.format_exc()}"
@@ -261,18 +260,18 @@ class ConductorWebsocket(threading.Thread):
261
260
  q_body = list_queued_workflows_message.body
262
261
  infos = []
263
262
  try:
264
- q_load_input = q_body.get("load_input", False)
265
263
  infos = list_queued_workflows(
266
264
  self.dbos._sys_db,
267
- start_time=q_body["start_time"],
268
- end_time=q_body["end_time"],
269
- status=q_body["status"],
270
- name=q_body["workflow_name"],
271
- limit=q_body["limit"],
272
- offset=q_body["offset"],
273
- queue_name=q_body["queue_name"],
274
- sort_desc=q_body["sort_desc"],
275
- load_input=q_load_input,
265
+ start_time=q_body.get("start_time", None),
266
+ end_time=q_body.get("end_time", None),
267
+ status=q_body.get("status", None),
268
+ forked_from=q_body.get("forked_from", None),
269
+ name=q_body.get("workflow_name", None),
270
+ limit=q_body.get("limit", None),
271
+ offset=q_body.get("offset", None),
272
+ queue_name=q_body.get("queue_name", None),
273
+ sort_desc=q_body.get("sort_desc", False),
274
+ load_input=q_body.get("load_input", False),
276
275
  )
277
276
  except Exception as e:
278
277
  error_message = f"Exception encountered when listing queued workflows: {traceback.format_exc()}"
@@ -118,6 +118,7 @@ class ListWorkflowsBody(TypedDict, total=False):
118
118
  end_time: Optional[str]
119
119
  status: Optional[str]
120
120
  application_version: Optional[str]
121
+ forked_from: Optional[str]
121
122
  limit: Optional[int]
122
123
  offset: Optional[int]
123
124
  sort_desc: bool
@@ -143,6 +144,12 @@ class WorkflowsOutput:
143
144
  QueueName: Optional[str]
144
145
  ApplicationVersion: Optional[str]
145
146
  ExecutorID: Optional[str]
147
+ WorkflowTimeoutMS: Optional[str]
148
+ WorkflowDeadlineEpochMS: Optional[str]
149
+ DeduplicationID: Optional[str]
150
+ Priority: Optional[str]
151
+ QueuePartitionKey: Optional[str]
152
+ ForkedFrom: Optional[str]
146
153
 
147
154
  @classmethod
148
155
  def from_workflow_information(cls, info: WorkflowStatus) -> "WorkflowsOutput":
@@ -152,12 +159,22 @@ class WorkflowsOutput:
152
159
  inputs_str = str(info.input) if info.input is not None else None
153
160
  outputs_str = str(info.output) if info.output is not None else None
154
161
  error_str = str(info.error) if info.error is not None else None
155
- request_str = None
156
162
  roles_str = (
157
163
  str(info.authenticated_roles)
158
164
  if info.authenticated_roles is not None
159
165
  else None
160
166
  )
167
+ workflow_timeout_ms_str = (
168
+ str(info.workflow_timeout_ms)
169
+ if info.workflow_timeout_ms is not None
170
+ else None
171
+ )
172
+ workflow_deadline_epoch_ms_str = (
173
+ str(info.workflow_deadline_epoch_ms)
174
+ if info.workflow_deadline_epoch_ms is not None
175
+ else None
176
+ )
177
+ priority_str = str(info.priority) if info.priority is not None else None
161
178
 
162
179
  return cls(
163
180
  WorkflowUUID=info.workflow_id,
@@ -176,6 +193,12 @@ class WorkflowsOutput:
176
193
  QueueName=info.queue_name,
177
194
  ApplicationVersion=info.app_version,
178
195
  ExecutorID=info.executor_id,
196
+ WorkflowTimeoutMS=workflow_timeout_ms_str,
197
+ WorkflowDeadlineEpochMS=workflow_deadline_epoch_ms_str,
198
+ DeduplicationID=info.deduplication_id,
199
+ Priority=priority_str,
200
+ QueuePartitionKey=info.queue_partition_key,
201
+ ForkedFrom=info.forked_from,
179
202
  )
180
203
 
181
204
 
@@ -186,14 +209,28 @@ class WorkflowSteps:
186
209
  output: Optional[str]
187
210
  error: Optional[str]
188
211
  child_workflow_id: Optional[str]
212
+ started_at_epoch_ms: Optional[str]
213
+ completed_at_epoch_ms: Optional[str]
189
214
 
190
215
  @classmethod
191
216
  def from_step_info(cls, info: StepInfo) -> "WorkflowSteps":
192
217
  output_str = str(info["output"]) if info["output"] is not None else None
193
218
  error_str = str(info["error"]) if info["error"] is not None else None
219
+ started_at_str = (
220
+ str(info["started_at_epoch_ms"])
221
+ if info["started_at_epoch_ms"] is not None
222
+ else None
223
+ )
224
+ completed_at_str = (
225
+ str(info["completed_at_epoch_ms"])
226
+ if info["completed_at_epoch_ms"] is not None
227
+ else None
228
+ )
194
229
  return cls(
195
230
  function_id=info["function_id"],
196
231
  function_name=info["function_name"],
232
+ started_at_epoch_ms=started_at_str,
233
+ completed_at_epoch_ms=completed_at_str,
197
234
  output=output_str,
198
235
  error=error_str,
199
236
  child_workflow_id=info["child_workflow_id"],
@@ -216,6 +253,7 @@ class ListQueuedWorkflowsBody(TypedDict, total=False):
216
253
  start_time: Optional[str]
217
254
  end_time: Optional[str]
218
255
  status: Optional[str]
256
+ forked_from: Optional[str]
219
257
  queue_name: Optional[str]
220
258
  limit: Optional[int]
221
259
  offset: Optional[int]
dbos/_core.py CHANGED
@@ -93,14 +93,6 @@ TEMP_SEND_WF_NAME = "<temp>.temp_send_workflow"
93
93
  DEBOUNCER_WORKFLOW_NAME = "_dbos_debouncer_workflow"
94
94
 
95
95
 
96
- def check_is_in_coroutine() -> bool:
97
- try:
98
- asyncio.get_running_loop()
99
- return True
100
- except RuntimeError:
101
- return False
102
-
103
-
104
96
  class WorkflowHandleFuture(Generic[R]):
105
97
 
106
98
  def __init__(self, workflow_id: str, future: Future[R], dbos: "DBOS"):
@@ -308,6 +300,7 @@ def _init_workflow(
308
300
  if enqueue_options is not None
309
301
  else None
310
302
  ),
303
+ "forked_from": None,
311
304
  }
312
305
 
313
306
  # Synchronously record the status and inputs for workflows
@@ -324,6 +317,7 @@ def _init_workflow(
324
317
  "function_name": wf_name,
325
318
  "output": None,
326
319
  "error": dbos._serializer.serialize(e),
320
+ "started_at_epoch_ms": int(time.time() * 1000),
327
321
  }
328
322
  dbos._sys_db.record_operation_result(result)
329
323
  raise
@@ -856,11 +850,6 @@ def workflow_wrapper(
856
850
  dbos._sys_db.record_get_result(workflow_id, serialized_r, None)
857
851
  return r
858
852
 
859
- if check_is_in_coroutine() and not inspect.iscoroutinefunction(func):
860
- dbos_logger.warning(
861
- f"Sync workflow ({get_dbos_func_name(func)}) shouldn't be invoked from within another async function. Define it as async or use asyncio.to_thread instead."
862
- )
863
-
864
853
  outcome = (
865
854
  wfOutcome.wrap(init_wf, dbos=dbos)
866
855
  .also(DBOSAssumeRole(rr))
@@ -1046,10 +1035,6 @@ def decorate_transaction(
1046
1035
  assert (
1047
1036
  ctx.is_workflow()
1048
1037
  ), "Transactions must be called from within workflows"
1049
- if check_is_in_coroutine():
1050
- dbos_logger.warning(
1051
- f"Transaction function ({get_dbos_func_name(func)}) shouldn't be invoked from within another async function. Use asyncio.to_thread instead."
1052
- )
1053
1038
  with DBOSAssumeRole(rr):
1054
1039
  return invoke_tx(*args, **kwargs)
1055
1040
  else:
@@ -1135,6 +1120,7 @@ def decorate_step(
1135
1120
  "function_name": step_name,
1136
1121
  "output": None,
1137
1122
  "error": None,
1123
+ "started_at_epoch_ms": int(time.time() * 1000),
1138
1124
  }
1139
1125
 
1140
1126
  try:
@@ -1194,10 +1180,6 @@ def decorate_step(
1194
1180
 
1195
1181
  @wraps(func)
1196
1182
  def wrapper(*args: Any, **kwargs: Any) -> Any:
1197
- if check_is_in_coroutine() and not inspect.iscoroutinefunction(func):
1198
- dbos_logger.warning(
1199
- f"Sync step ({get_dbos_func_name(func)}) shouldn't be invoked from within another async function. Define it as async or use asyncio.to_thread instead."
1200
- )
1201
1183
  # If the step is called from a workflow, run it as a step.
1202
1184
  # Otherwise, run it as a normal function.
1203
1185
  ctx = get_local_dbos_context()
dbos/_dbos.py CHANGED
@@ -460,6 +460,7 @@ class DBOS:
460
460
  debug_mode=debug_mode,
461
461
  schema=schema,
462
462
  serializer=self._serializer,
463
+ executor_id=GlobalParams.executor_id,
463
464
  )
464
465
  assert self._config["database"]["db_engine_kwargs"] is not None
465
466
  if self._config["database_url"]:
@@ -1127,7 +1128,9 @@ class DBOS:
1127
1128
  end_time: Optional[str] = None,
1128
1129
  name: Optional[str] = None,
1129
1130
  app_version: Optional[str] = None,
1131
+ forked_from: Optional[str] = None,
1130
1132
  user: Optional[str] = None,
1133
+ queue_name: Optional[str] = None,
1131
1134
  limit: Optional[int] = None,
1132
1135
  offset: Optional[int] = None,
1133
1136
  sort_desc: bool = False,
@@ -1144,6 +1147,7 @@ class DBOS:
1144
1147
  end_time=end_time,
1145
1148
  name=name,
1146
1149
  app_version=app_version,
1150
+ forked_from=forked_from,
1147
1151
  user=user,
1148
1152
  limit=limit,
1149
1153
  offset=offset,
@@ -1151,6 +1155,7 @@ class DBOS:
1151
1155
  workflow_id_prefix=workflow_id_prefix,
1152
1156
  load_input=load_input,
1153
1157
  load_output=load_output,
1158
+ queue_name=queue_name,
1154
1159
  )
1155
1160
 
1156
1161
  return _get_dbos_instance()._sys_db.call_function_as_step(
@@ -1167,6 +1172,7 @@ class DBOS:
1167
1172
  end_time: Optional[str] = None,
1168
1173
  name: Optional[str] = None,
1169
1174
  app_version: Optional[str] = None,
1175
+ forked_from: Optional[str] = None,
1170
1176
  user: Optional[str] = None,
1171
1177
  limit: Optional[int] = None,
1172
1178
  offset: Optional[int] = None,
@@ -1184,6 +1190,7 @@ class DBOS:
1184
1190
  end_time=end_time,
1185
1191
  name=name,
1186
1192
  app_version=app_version,
1193
+ forked_from=forked_from,
1187
1194
  user=user,
1188
1195
  limit=limit,
1189
1196
  offset=offset,
@@ -1199,6 +1206,7 @@ class DBOS:
1199
1206
  *,
1200
1207
  queue_name: Optional[str] = None,
1201
1208
  status: Optional[Union[str, List[str]]] = None,
1209
+ forked_from: Optional[str] = None,
1202
1210
  start_time: Optional[str] = None,
1203
1211
  end_time: Optional[str] = None,
1204
1212
  name: Optional[str] = None,
@@ -1212,6 +1220,7 @@ class DBOS:
1212
1220
  _get_dbos_instance()._sys_db,
1213
1221
  queue_name=queue_name,
1214
1222
  status=status,
1223
+ forked_from=forked_from,
1215
1224
  start_time=start_time,
1216
1225
  end_time=end_time,
1217
1226
  name=name,
@@ -1231,6 +1240,7 @@ class DBOS:
1231
1240
  *,
1232
1241
  queue_name: Optional[str] = None,
1233
1242
  status: Optional[Union[str, List[str]]] = None,
1243
+ forked_from: Optional[str] = None,
1234
1244
  start_time: Optional[str] = None,
1235
1245
  end_time: Optional[str] = None,
1236
1246
  name: Optional[str] = None,
@@ -1244,6 +1254,7 @@ class DBOS:
1244
1254
  cls.list_queued_workflows,
1245
1255
  queue_name=queue_name,
1246
1256
  status=status,
1257
+ forked_from=forked_from,
1247
1258
  start_time=start_time,
1248
1259
  end_time=end_time,
1249
1260
  name=name,
dbos/_dbos_config.py CHANGED
@@ -444,6 +444,7 @@ def configure_db_engine_parameters(
444
444
 
445
445
  # Configure user database engine parameters
446
446
  app_engine_kwargs: dict[str, Any] = {
447
+ "connect_args": {"application_name": "dbos_transact"},
447
448
  "pool_timeout": 30,
448
449
  "max_overflow": 0,
449
450
  "pool_size": 20,
@@ -477,8 +478,6 @@ def is_valid_database_url(database_url: str) -> bool:
477
478
  return True
478
479
  url = make_url(database_url)
479
480
  required_fields = [
480
- ("username", "Username must be specified in the connection URL"),
481
- ("host", "Host must be specified in the connection URL"),
482
481
  ("database", "Database name must be specified in the connection URL"),
483
482
  ]
484
483
  for field_name, error_message in required_fields:
dbos/_kafka.py CHANGED
@@ -1,6 +1,6 @@
1
1
  import re
2
2
  import threading
3
- from typing import TYPE_CHECKING, Any, Callable, NoReturn
3
+ from typing import TYPE_CHECKING, Any, Callable, Coroutine, NoReturn
4
4
 
5
5
  from confluent_kafka import Consumer, KafkaError, KafkaException
6
6
 
@@ -15,7 +15,9 @@ from ._kafka_message import KafkaMessage
15
15
  from ._logger import dbos_logger
16
16
  from ._registrations import get_dbos_func_name
17
17
 
18
- _KafkaConsumerWorkflow = Callable[[KafkaMessage], None]
18
+ _KafkaConsumerWorkflow = (
19
+ Callable[[KafkaMessage], None] | Callable[[KafkaMessage], Coroutine[Any, Any, None]]
20
+ )
19
21
 
20
22
  _kafka_queue: Queue
21
23
  _in_order_kafka_queues: dict[str, Queue] = {}
@@ -37,8 +39,8 @@ def _kafka_consumer_loop(
37
39
  in_order: bool,
38
40
  ) -> None:
39
41
 
40
- def on_error(err: KafkaError) -> NoReturn:
41
- raise KafkaException(err)
42
+ def on_error(err: KafkaError) -> None:
43
+ dbos_logger.error(f"Exception in Kafka consumer: {err}")
42
44
 
43
45
  config["error_cb"] = on_error
44
46
  if "auto.offset.reset" not in config:
dbos/_logger.py CHANGED
@@ -68,30 +68,37 @@ def config_logger(config: "ConfigFile") -> None:
68
68
  )
69
69
  disable_otlp = config.get("telemetry", {}).get("disable_otlp", False) # type: ignore
70
70
 
71
- if not disable_otlp and otlp_logs_endpoints:
71
+ if not disable_otlp:
72
72
 
73
- from opentelemetry._logs import set_logger_provider
73
+ from opentelemetry._logs import get_logger_provider, set_logger_provider
74
74
  from opentelemetry.exporter.otlp.proto.http._log_exporter import OTLPLogExporter
75
75
  from opentelemetry.sdk._logs import LoggerProvider, LoggingHandler
76
76
  from opentelemetry.sdk._logs.export import BatchLogRecordProcessor
77
77
  from opentelemetry.sdk.resources import Resource
78
78
  from opentelemetry.semconv.attributes.service_attributes import SERVICE_NAME
79
79
 
80
- log_provider = LoggerProvider(
81
- Resource.create(
82
- attributes={
83
- SERVICE_NAME: config["name"],
84
- }
85
- )
86
- )
87
- set_logger_provider(log_provider)
88
- for e in otlp_logs_endpoints:
89
- log_provider.add_log_record_processor(
90
- BatchLogRecordProcessor(
91
- OTLPLogExporter(endpoint=e),
92
- export_timeout_millis=5000,
80
+ # Only set up OTLP provider and exporter if endpoints are provided
81
+ log_provider = get_logger_provider()
82
+ if otlp_logs_endpoints is not None:
83
+ if not isinstance(log_provider, LoggerProvider):
84
+ log_provider = LoggerProvider(
85
+ Resource.create(
86
+ attributes={
87
+ SERVICE_NAME: config["name"],
88
+ }
89
+ )
90
+ )
91
+ set_logger_provider(log_provider)
92
+
93
+ for e in otlp_logs_endpoints:
94
+ log_provider.add_log_record_processor(
95
+ BatchLogRecordProcessor(
96
+ OTLPLogExporter(endpoint=e),
97
+ export_timeout_millis=5000,
98
+ )
93
99
  )
94
- )
100
+
101
+ # Even if no endpoints are provided, we still need a LoggerProvider to create the LoggingHandler
95
102
  global _otlp_handler
96
103
  _otlp_handler = LoggingHandler(logger_provider=log_provider)
97
104
 
dbos/_migration.py CHANGED
@@ -209,8 +209,33 @@ ALTER TABLE \"{schema}\".workflow_status ADD COLUMN queue_partition_key TEXT;
209
209
  """
210
210
 
211
211
 
212
+ def get_dbos_migration_three(schema: str) -> str:
213
+ return f"""
214
+ create index "idx_workflow_status_queue_status_started" on \"{schema}\"."workflow_status" ("queue_name", "status", "started_at_epoch_ms")
215
+ """
216
+
217
+
218
+ def get_dbos_migration_four(schema: str) -> str:
219
+ return f"""
220
+ ALTER TABLE \"{schema}\".workflow_status ADD COLUMN forked_from TEXT;
221
+ CREATE INDEX "idx_workflow_status_forked_from" ON \"{schema}\"."workflow_status" ("forked_from")
222
+ """
223
+
224
+
225
+ def get_dbos_migration_five(schema: str) -> str:
226
+ return f"""
227
+ ALTER TABLE \"{schema}\".operation_outputs ADD COLUMN started_at_epoch_ms BIGINT, ADD COLUMN completed_at_epoch_ms BIGINT;
228
+ """
229
+
230
+
212
231
  def get_dbos_migrations(schema: str) -> list[str]:
213
- return [get_dbos_migration_one(schema), get_dbos_migration_two(schema)]
232
+ return [
233
+ get_dbos_migration_one(schema),
234
+ get_dbos_migration_two(schema),
235
+ get_dbos_migration_three(schema),
236
+ get_dbos_migration_four(schema),
237
+ get_dbos_migration_five(schema),
238
+ ]
214
239
 
215
240
 
216
241
  def get_sqlite_timestamp_expr() -> str:
@@ -303,4 +328,26 @@ sqlite_migration_two = """
303
328
  ALTER TABLE workflow_status ADD COLUMN queue_partition_key TEXT;
304
329
  """
305
330
 
306
- sqlite_migrations = [sqlite_migration_one, sqlite_migration_two]
331
+ sqlite_migration_three = """
332
+ CREATE INDEX "idx_workflow_status_queue_status_started"
333
+ ON "workflow_status" ("queue_name", "status", "started_at_epoch_ms")
334
+ """
335
+
336
+ sqlite_migration_four = """
337
+ ALTER TABLE workflow_status ADD COLUMN forked_from TEXT;
338
+ CREATE INDEX "idx_workflow_status_forked_from" ON "workflow_status" ("forked_from")
339
+ """
340
+
341
+ sqlite_migration_five = """
342
+ ALTER TABLE operation_outputs ADD COLUMN started_at_epoch_ms BIGINT;
343
+ ALTER TABLE operation_outputs ADD COLUMN completed_at_epoch_ms BIGINT;
344
+ """
345
+
346
+
347
+ sqlite_migrations = [
348
+ sqlite_migration_one,
349
+ sqlite_migration_two,
350
+ sqlite_migration_three,
351
+ sqlite_migration_four,
352
+ sqlite_migration_five,
353
+ ]
dbos/_scheduler.py CHANGED
@@ -2,7 +2,7 @@ import random
2
2
  import threading
3
3
  import traceback
4
4
  from datetime import datetime, timezone
5
- from typing import TYPE_CHECKING, Callable
5
+ from typing import TYPE_CHECKING, Any, Callable, Coroutine
6
6
 
7
7
  from ._logger import dbos_logger
8
8
  from ._queue import Queue
@@ -14,7 +14,10 @@ from ._context import SetWorkflowID
14
14
  from ._croniter import croniter # type: ignore
15
15
  from ._registrations import get_dbos_func_name
16
16
 
17
- ScheduledWorkflow = Callable[[datetime, datetime], None]
17
+ ScheduledWorkflow = (
18
+ Callable[[datetime, datetime], None]
19
+ | Callable[[datetime, datetime], Coroutine[Any, Any, None]]
20
+ )
18
21
 
19
22
 
20
23
  def scheduler_loop(
@@ -78,6 +78,7 @@ class SystemSchema:
78
78
  Column("inputs", Text()),
79
79
  Column("priority", Integer(), nullable=False, server_default=text("'0'::int")),
80
80
  Column("queue_partition_key", Text()),
81
+ Column("forked_from", Text()),
81
82
  Index("workflow_status_created_at_index", "created_at"),
82
83
  Index("workflow_status_executor_id_index", "executor_id"),
83
84
  Index("workflow_status_status_index", "status"),
@@ -104,6 +105,8 @@ class SystemSchema:
104
105
  Column("output", Text, nullable=True),
105
106
  Column("error", Text, nullable=True),
106
107
  Column("child_workflow_id", Text, nullable=True),
108
+ Column("started_at_epoch_ms", BigInteger, nullable=True),
109
+ Column("completed_at_epoch_ms", BigInteger, nullable=True),
107
110
  PrimaryKeyConstraint("workflow_uuid", "function_id"),
108
111
  )
109
112
 
dbos/_serialization.py CHANGED
@@ -25,9 +25,13 @@ class Serializer(ABC):
25
25
  class DefaultSerializer(Serializer):
26
26
 
27
27
  def serialize(self, data: Any) -> str:
28
- pickled_data: bytes = pickle.dumps(data)
29
- encoded_data: str = base64.b64encode(pickled_data).decode("utf-8")
30
- return encoded_data
28
+ try:
29
+ pickled_data: bytes = pickle.dumps(data)
30
+ encoded_data: str = base64.b64encode(pickled_data).decode("utf-8")
31
+ return encoded_data
32
+ except Exception as e:
33
+ dbos_logger.error(f"Error serializing object: {data}", exc_info=e)
34
+ raise
31
35
 
32
36
  def deserialize(cls, serialized_data: str) -> Any:
33
37
  pickled_data: bytes = base64.b64decode(serialized_data)