dbos 2.4.0a1__py3-none-any.whl → 2.4.0a3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of dbos might be problematic. Click here for more details.

dbos/_admin_server.py CHANGED
@@ -338,6 +338,7 @@ class AdminRequestHandler(BaseHTTPRequestHandler):
338
338
  end_time=filters.get("end_time"),
339
339
  status=filters.get("status"),
340
340
  app_version=filters.get("application_version"),
341
+ forked_from=filters.get("forked_from"),
341
342
  name=filters.get("workflow_name"),
342
343
  limit=filters.get("limit"),
343
344
  offset=filters.get("offset"),
@@ -364,6 +365,7 @@ class AdminRequestHandler(BaseHTTPRequestHandler):
364
365
  start_time=filters.get("start_time"),
365
366
  end_time=filters.get("end_time"),
366
367
  status=filters.get("status"),
368
+ forked_from=filters.get("forked_from"),
367
369
  name=filters.get("workflow_name"),
368
370
  limit=filters.get("limit"),
369
371
  offset=filters.get("offset"),
dbos/_app_db.py CHANGED
@@ -201,6 +201,8 @@ class ApplicationDatabase(ABC):
201
201
  else row[3]
202
202
  ),
203
203
  child_workflow_id=None,
204
+ started_at_epoch_ms=None,
205
+ completed_at_epoch_ms=None,
204
206
  )
205
207
  for row in rows
206
208
  ]
dbos/_client.py CHANGED
@@ -149,9 +149,11 @@ class DBOSClient:
149
149
  self._sys_db = SystemDatabase.create(
150
150
  system_database_url=system_database_url,
151
151
  engine_kwargs={
152
+ "connect_args": {"application_name": "dbos_transact_client"},
152
153
  "pool_timeout": 30,
153
154
  "max_overflow": 0,
154
155
  "pool_size": 2,
156
+ "pool_pre_ping": True,
155
157
  },
156
158
  engine=system_database_engine,
157
159
  schema=dbos_system_schema,
@@ -162,9 +164,11 @@ class DBOSClient:
162
164
  self._app_db = ApplicationDatabase.create(
163
165
  database_url=application_database_url,
164
166
  engine_kwargs={
167
+ "connect_args": {"application_name": "dbos_transact_client"},
165
168
  "pool_timeout": 30,
166
169
  "max_overflow": 0,
167
170
  "pool_size": 2,
171
+ "pool_pre_ping": True,
168
172
  },
169
173
  schema=dbos_system_schema,
170
174
  serializer=serializer,
@@ -234,6 +238,7 @@ class DBOSClient:
234
238
  ),
235
239
  "inputs": self._serializer.serialize(inputs),
236
240
  "queue_partition_key": enqueue_options_internal["queue_partition_key"],
241
+ "forked_from": None,
237
242
  }
238
243
 
239
244
  self._sys_db.init_workflow(
@@ -300,6 +305,7 @@ class DBOSClient:
300
305
  "priority": 0,
301
306
  "inputs": self._serializer.serialize({"args": (), "kwargs": {}}),
302
307
  "queue_partition_key": None,
308
+ "forked_from": None,
303
309
  }
304
310
  with self._sys_db.engine.begin() as conn:
305
311
  self._sys_db._insert_workflow_status(
@@ -233,6 +233,7 @@ class ConductorWebsocket(threading.Thread):
233
233
  end_time=body["end_time"],
234
234
  status=body["status"],
235
235
  app_version=body["application_version"],
236
+ forked_from=body["forked_from"],
236
237
  name=body["workflow_name"],
237
238
  limit=body["limit"],
238
239
  offset=body["offset"],
@@ -267,6 +268,7 @@ class ConductorWebsocket(threading.Thread):
267
268
  start_time=q_body["start_time"],
268
269
  end_time=q_body["end_time"],
269
270
  status=q_body["status"],
271
+ forked_from=q_body["forked_from"],
270
272
  name=q_body["workflow_name"],
271
273
  limit=q_body["limit"],
272
274
  offset=q_body["offset"],
@@ -118,6 +118,7 @@ class ListWorkflowsBody(TypedDict, total=False):
118
118
  end_time: Optional[str]
119
119
  status: Optional[str]
120
120
  application_version: Optional[str]
121
+ forked_from: Optional[str]
121
122
  limit: Optional[int]
122
123
  offset: Optional[int]
123
124
  sort_desc: bool
@@ -143,6 +144,12 @@ class WorkflowsOutput:
143
144
  QueueName: Optional[str]
144
145
  ApplicationVersion: Optional[str]
145
146
  ExecutorID: Optional[str]
147
+ WorkflowTimeoutMS: Optional[str]
148
+ WorkflowDeadlineEpochMS: Optional[str]
149
+ DeduplicationID: Optional[str]
150
+ Priority: Optional[str]
151
+ QueuePartitionKey: Optional[str]
152
+ ForkedFrom: Optional[str]
146
153
 
147
154
  @classmethod
148
155
  def from_workflow_information(cls, info: WorkflowStatus) -> "WorkflowsOutput":
@@ -152,12 +159,22 @@ class WorkflowsOutput:
152
159
  inputs_str = str(info.input) if info.input is not None else None
153
160
  outputs_str = str(info.output) if info.output is not None else None
154
161
  error_str = str(info.error) if info.error is not None else None
155
- request_str = None
156
162
  roles_str = (
157
163
  str(info.authenticated_roles)
158
164
  if info.authenticated_roles is not None
159
165
  else None
160
166
  )
167
+ workflow_timeout_ms_str = (
168
+ str(info.workflow_timeout_ms)
169
+ if info.workflow_timeout_ms is not None
170
+ else None
171
+ )
172
+ workflow_deadline_epoch_ms_str = (
173
+ str(info.workflow_deadline_epoch_ms)
174
+ if info.workflow_deadline_epoch_ms is not None
175
+ else None
176
+ )
177
+ priority_str = str(info.priority) if info.priority is not None else None
161
178
 
162
179
  return cls(
163
180
  WorkflowUUID=info.workflow_id,
@@ -176,6 +193,12 @@ class WorkflowsOutput:
176
193
  QueueName=info.queue_name,
177
194
  ApplicationVersion=info.app_version,
178
195
  ExecutorID=info.executor_id,
196
+ WorkflowTimeoutMS=workflow_timeout_ms_str,
197
+ WorkflowDeadlineEpochMS=workflow_deadline_epoch_ms_str,
198
+ DeduplicationID=info.deduplication_id,
199
+ Priority=priority_str,
200
+ QueuePartitionKey=info.queue_partition_key,
201
+ ForkedFrom=info.forked_from,
179
202
  )
180
203
 
181
204
 
@@ -186,14 +209,28 @@ class WorkflowSteps:
186
209
  output: Optional[str]
187
210
  error: Optional[str]
188
211
  child_workflow_id: Optional[str]
212
+ started_at_epoch_ms: Optional[str]
213
+ completed_at_epoch_ms: Optional[str]
189
214
 
190
215
  @classmethod
191
216
  def from_step_info(cls, info: StepInfo) -> "WorkflowSteps":
192
217
  output_str = str(info["output"]) if info["output"] is not None else None
193
218
  error_str = str(info["error"]) if info["error"] is not None else None
219
+ started_at_str = (
220
+ str(info["started_at_epoch_ms"])
221
+ if info["started_at_epoch_ms"] is not None
222
+ else None
223
+ )
224
+ completed_at_str = (
225
+ str(info["completed_at_epoch_ms"])
226
+ if info["completed_at_epoch_ms"] is not None
227
+ else None
228
+ )
194
229
  return cls(
195
230
  function_id=info["function_id"],
196
231
  function_name=info["function_name"],
232
+ started_at_epoch_ms=started_at_str,
233
+ completed_at_epoch_ms=completed_at_str,
197
234
  output=output_str,
198
235
  error=error_str,
199
236
  child_workflow_id=info["child_workflow_id"],
@@ -216,6 +253,7 @@ class ListQueuedWorkflowsBody(TypedDict, total=False):
216
253
  start_time: Optional[str]
217
254
  end_time: Optional[str]
218
255
  status: Optional[str]
256
+ forked_from: Optional[str]
219
257
  queue_name: Optional[str]
220
258
  limit: Optional[int]
221
259
  offset: Optional[int]
dbos/_core.py CHANGED
@@ -300,6 +300,7 @@ def _init_workflow(
300
300
  if enqueue_options is not None
301
301
  else None
302
302
  ),
303
+ "forked_from": None,
303
304
  }
304
305
 
305
306
  # Synchronously record the status and inputs for workflows
@@ -316,6 +317,7 @@ def _init_workflow(
316
317
  "function_name": wf_name,
317
318
  "output": None,
318
319
  "error": dbos._serializer.serialize(e),
320
+ "started_at_epoch_ms": int(time.time() * 1000),
319
321
  }
320
322
  dbos._sys_db.record_operation_result(result)
321
323
  raise
@@ -1118,6 +1120,7 @@ def decorate_step(
1118
1120
  "function_name": step_name,
1119
1121
  "output": None,
1120
1122
  "error": None,
1123
+ "started_at_epoch_ms": int(time.time() * 1000),
1121
1124
  }
1122
1125
 
1123
1126
  try:
dbos/_dbos.py CHANGED
@@ -1127,7 +1127,9 @@ class DBOS:
1127
1127
  end_time: Optional[str] = None,
1128
1128
  name: Optional[str] = None,
1129
1129
  app_version: Optional[str] = None,
1130
+ forked_from: Optional[str] = None,
1130
1131
  user: Optional[str] = None,
1132
+ queue_name: Optional[str] = None,
1131
1133
  limit: Optional[int] = None,
1132
1134
  offset: Optional[int] = None,
1133
1135
  sort_desc: bool = False,
@@ -1144,6 +1146,7 @@ class DBOS:
1144
1146
  end_time=end_time,
1145
1147
  name=name,
1146
1148
  app_version=app_version,
1149
+ forked_from=forked_from,
1147
1150
  user=user,
1148
1151
  limit=limit,
1149
1152
  offset=offset,
@@ -1151,6 +1154,7 @@ class DBOS:
1151
1154
  workflow_id_prefix=workflow_id_prefix,
1152
1155
  load_input=load_input,
1153
1156
  load_output=load_output,
1157
+ queue_name=queue_name,
1154
1158
  )
1155
1159
 
1156
1160
  return _get_dbos_instance()._sys_db.call_function_as_step(
@@ -1167,6 +1171,7 @@ class DBOS:
1167
1171
  end_time: Optional[str] = None,
1168
1172
  name: Optional[str] = None,
1169
1173
  app_version: Optional[str] = None,
1174
+ forked_from: Optional[str] = None,
1170
1175
  user: Optional[str] = None,
1171
1176
  limit: Optional[int] = None,
1172
1177
  offset: Optional[int] = None,
@@ -1184,6 +1189,7 @@ class DBOS:
1184
1189
  end_time=end_time,
1185
1190
  name=name,
1186
1191
  app_version=app_version,
1192
+ forked_from=forked_from,
1187
1193
  user=user,
1188
1194
  limit=limit,
1189
1195
  offset=offset,
@@ -1199,6 +1205,7 @@ class DBOS:
1199
1205
  *,
1200
1206
  queue_name: Optional[str] = None,
1201
1207
  status: Optional[Union[str, List[str]]] = None,
1208
+ forked_from: Optional[str] = None,
1202
1209
  start_time: Optional[str] = None,
1203
1210
  end_time: Optional[str] = None,
1204
1211
  name: Optional[str] = None,
@@ -1212,6 +1219,7 @@ class DBOS:
1212
1219
  _get_dbos_instance()._sys_db,
1213
1220
  queue_name=queue_name,
1214
1221
  status=status,
1222
+ forked_from=forked_from,
1215
1223
  start_time=start_time,
1216
1224
  end_time=end_time,
1217
1225
  name=name,
@@ -1231,6 +1239,7 @@ class DBOS:
1231
1239
  *,
1232
1240
  queue_name: Optional[str] = None,
1233
1241
  status: Optional[Union[str, List[str]]] = None,
1242
+ forked_from: Optional[str] = None,
1234
1243
  start_time: Optional[str] = None,
1235
1244
  end_time: Optional[str] = None,
1236
1245
  name: Optional[str] = None,
@@ -1244,6 +1253,7 @@ class DBOS:
1244
1253
  cls.list_queued_workflows,
1245
1254
  queue_name=queue_name,
1246
1255
  status=status,
1256
+ forked_from=forked_from,
1247
1257
  start_time=start_time,
1248
1258
  end_time=end_time,
1249
1259
  name=name,
dbos/_migration.py CHANGED
@@ -215,11 +215,26 @@ create index "idx_workflow_status_queue_status_started" on \"{schema}\"."workflo
215
215
  """
216
216
 
217
217
 
218
+ def get_dbos_migration_four(schema: str) -> str:
219
+ return f"""
220
+ ALTER TABLE \"{schema}\".workflow_status ADD COLUMN forked_from TEXT;
221
+ CREATE INDEX "idx_workflow_status_forked_from" ON \"{schema}\"."workflow_status" ("forked_from")
222
+ """
223
+
224
+
225
+ def get_dbos_migration_five(schema: str) -> str:
226
+ return f"""
227
+ ALTER TABLE \"{schema}\".operation_outputs ADD COLUMN started_at_epoch_ms BIGINT, ADD COLUMN completed_at_epoch_ms BIGINT;
228
+ """
229
+
230
+
218
231
  def get_dbos_migrations(schema: str) -> list[str]:
219
232
  return [
220
233
  get_dbos_migration_one(schema),
221
234
  get_dbos_migration_two(schema),
222
235
  get_dbos_migration_three(schema),
236
+ get_dbos_migration_four(schema),
237
+ get_dbos_migration_five(schema),
223
238
  ]
224
239
 
225
240
 
@@ -318,4 +333,21 @@ CREATE INDEX "idx_workflow_status_queue_status_started"
318
333
  ON "workflow_status" ("queue_name", "status", "started_at_epoch_ms")
319
334
  """
320
335
 
321
- sqlite_migrations = [sqlite_migration_one, sqlite_migration_two, sqlite_migration_three]
336
+ sqlite_migration_four = """
337
+ ALTER TABLE workflow_status ADD COLUMN forked_from TEXT;
338
+ CREATE INDEX "idx_workflow_status_forked_from" ON "workflow_status" ("forked_from")
339
+ """
340
+
341
+ sqlite_migration_five = """
342
+ ALTER TABLE operation_outputs ADD COLUMN started_at_epoch_ms BIGINT;
343
+ ALTER TABLE operation_outputs ADD COLUMN completed_at_epoch_ms BIGINT;
344
+ """
345
+
346
+
347
+ sqlite_migrations = [
348
+ sqlite_migration_one,
349
+ sqlite_migration_two,
350
+ sqlite_migration_three,
351
+ sqlite_migration_four,
352
+ sqlite_migration_five,
353
+ ]
@@ -78,6 +78,7 @@ class SystemSchema:
78
78
  Column("inputs", Text()),
79
79
  Column("priority", Integer(), nullable=False, server_default=text("'0'::int")),
80
80
  Column("queue_partition_key", Text()),
81
+ Column("forked_from", Text()),
81
82
  Index("workflow_status_created_at_index", "created_at"),
82
83
  Index("workflow_status_executor_id_index", "executor_id"),
83
84
  Index("workflow_status_status_index", "status"),
@@ -104,6 +105,8 @@ class SystemSchema:
104
105
  Column("output", Text, nullable=True),
105
106
  Column("error", Text, nullable=True),
106
107
  Column("child_workflow_id", Text, nullable=True),
108
+ Column("started_at_epoch_ms", BigInteger, nullable=True),
109
+ Column("completed_at_epoch_ms", BigInteger, nullable=True),
107
110
  PrimaryKeyConstraint("workflow_uuid", "function_id"),
108
111
  )
109
112
 
dbos/_sys_db.py CHANGED
@@ -106,7 +106,7 @@ class WorkflowStatus:
106
106
  updated_at: Optional[int]
107
107
  # If this workflow was enqueued, on which queue
108
108
  queue_name: Optional[str]
109
- # The executor to most recently executed this workflow
109
+ # The executor to most recently execute this workflow
110
110
  executor_id: Optional[str]
111
111
  # The application version on which this workflow was started
112
112
  app_version: Optional[str]
@@ -114,6 +114,14 @@ class WorkflowStatus:
114
114
  workflow_timeout_ms: Optional[int]
115
115
  # The deadline of a workflow, computed by adding its timeout to its start time.
116
116
  workflow_deadline_epoch_ms: Optional[int]
117
+ # Unique ID for deduplication on a queue
118
+ deduplication_id: Optional[str]
119
+ # Priority of the workflow on the queue, starting from 1 ~ 2,147,483,647. Default 0 (highest priority).
120
+ priority: Optional[int]
121
+ # If this workflow is enqueued on a partitioned queue, its partition key
122
+ queue_partition_key: Optional[str]
123
+ # If this workflow was forked from another, that workflow's ID.
124
+ forked_from: Optional[str]
117
125
 
118
126
  # INTERNAL FIELDS
119
127
 
@@ -141,19 +149,13 @@ class WorkflowStatusInternal(TypedDict):
141
149
  app_version: Optional[str]
142
150
  app_id: Optional[str]
143
151
  recovery_attempts: Optional[int]
144
- # The start-to-close timeout of the workflow in ms
145
152
  workflow_timeout_ms: Optional[int]
146
- # The deadline of a workflow, computed by adding its timeout to its start time.
147
- # Deadlines propagate to children. When the deadline is reached, the workflow is cancelled.
148
153
  workflow_deadline_epoch_ms: Optional[int]
149
- # Unique ID for deduplication on a queue
150
154
  deduplication_id: Optional[str]
151
- # Priority of the workflow on the queue, starting from 1 ~ 2,147,483,647. Default 0 (highest priority).
152
155
  priority: int
153
- # Serialized workflow inputs
154
156
  inputs: str
155
- # If this workflow is enqueued on a partitioned queue, its partition key
156
157
  queue_partition_key: Optional[str]
158
+ forked_from: Optional[str]
157
159
 
158
160
 
159
161
  class EnqueueOptionsInternal(TypedDict):
@@ -178,6 +180,7 @@ class OperationResultInternal(TypedDict):
178
180
  function_name: str
179
181
  output: Optional[str] # JSON (jsonpickle)
180
182
  error: Optional[str] # JSON (jsonpickle)
183
+ started_at_epoch_ms: int
181
184
 
182
185
 
183
186
  class GetEventWorkflowContext(TypedDict):
@@ -194,42 +197,34 @@ class GetWorkflowsInput:
194
197
  """
195
198
 
196
199
  def __init__(self) -> None:
197
- self.workflow_ids: Optional[List[str]] = (
198
- None # Search only in these workflow IDs
199
- )
200
- self.name: Optional[str] = None # The name of the workflow function
201
- self.authenticated_user: Optional[str] = None # The user who ran the workflow.
202
- self.start_time: Optional[str] = None # Timestamp in ISO 8601 format
203
- self.end_time: Optional[str] = None # Timestamp in ISO 8601 format
204
- self.status: Optional[List[str]] = (
205
- None # Get workflows with one of these statuses
206
- )
207
- self.application_version: Optional[str] = (
208
- None # The application version that ran this workflow. = None
209
- )
210
- self.limit: Optional[int] = (
211
- None # Return up to this many workflows IDs. IDs are ordered by workflow creation time.
212
- )
213
- self.offset: Optional[int] = (
214
- None # Offset into the matching records for pagination
215
- )
216
- self.sort_desc: bool = (
217
- False # If true, sort by created_at in DESC order. Default false (in ASC order).
218
- )
219
- self.workflow_id_prefix: Optional[str] = (
220
- None # If set, search for workflow IDs starting with this string
221
- )
222
-
223
-
224
- class GetQueuedWorkflowsInput(TypedDict):
225
- queue_name: Optional[str] # Get workflows belonging to this queue
226
- status: Optional[list[str]] # Get workflows with one of these statuses
227
- start_time: Optional[str] # Timestamp in ISO 8601 format
228
- end_time: Optional[str] # Timestamp in ISO 8601 format
229
- limit: Optional[int] # Return up to this many workflows IDs.
230
- offset: Optional[int] # Offset into the matching records for pagination
231
- name: Optional[str] # The name of the workflow function
232
- sort_desc: Optional[bool] # Sort by created_at in DESC or ASC order
200
+ # Search only in these workflow IDs
201
+ self.workflow_ids: Optional[List[str]] = None
202
+ # The name of the workflow function
203
+ self.name: Optional[str] = None
204
+ # The user who ran the workflow.
205
+ self.authenticated_user: Optional[str] = None
206
+ # Timestamp in ISO 8601 format
207
+ self.start_time: Optional[str] = None
208
+ # Timestamp in ISO 8601 format
209
+ self.end_time: Optional[str] = None
210
+ # Get workflows with one of these statuses
211
+ self.status: Optional[List[str]] = None
212
+ # The application version that ran this workflow.
213
+ self.application_version: Optional[str] = None
214
+ # Get workflows forked from this workflow ID.
215
+ self.forked_from: Optional[str] = None
216
+ # Return up to this many workflows IDs. IDs are ordered by workflow creation time.
217
+ self.limit: Optional[int] = None
218
+ # Offset into the matching records for pagination
219
+ self.offset: Optional[int] = None
220
+ # If true, sort by created_at in DESC order. Default false (in ASC order).
221
+ self.sort_desc: bool = False
222
+ # Search only for workflow IDs starting with this string
223
+ self.workflow_id_prefix: Optional[str] = None
224
+ # Search only for workflows enqueued on this queue
225
+ self.queue_name: Optional[str] = None
226
+ # Search only currently enqueued workflows
227
+ self.queues_only: bool = False
233
228
 
234
229
 
235
230
  class GetPendingWorkflowsOutput:
@@ -249,6 +244,10 @@ class StepInfo(TypedDict):
249
244
  error: Optional[Exception]
250
245
  # If the step starts or retrieves the result of a workflow, its ID
251
246
  child_workflow_id: Optional[str]
247
+ # The Unix epoch timestamp at which this step started
248
+ started_at_epoch_ms: Optional[int]
249
+ # The Unix epoch timestamp at which this step completed
250
+ completed_at_epoch_ms: Optional[int]
252
251
 
253
252
 
254
253
  _dbos_null_topic = "__null__topic__"
@@ -706,6 +705,7 @@ class SystemDatabase(ABC):
706
705
  assumed_role=status["assumed_role"],
707
706
  queue_name=INTERNAL_QUEUE_NAME,
708
707
  inputs=status["inputs"],
708
+ forked_from=original_workflow_id,
709
709
  )
710
710
  )
711
711
 
@@ -767,6 +767,7 @@ class SystemDatabase(ABC):
767
767
  SystemSchema.workflow_status.c.priority,
768
768
  SystemSchema.workflow_status.c.inputs,
769
769
  SystemSchema.workflow_status.c.queue_partition_key,
770
+ SystemSchema.workflow_status.c.forked_from,
770
771
  ).where(SystemSchema.workflow_status.c.workflow_uuid == workflow_uuid)
771
772
  ).fetchone()
772
773
  if row is None:
@@ -795,6 +796,7 @@ class SystemDatabase(ABC):
795
796
  "priority": row[17],
796
797
  "inputs": row[18],
797
798
  "queue_partition_key": row[19],
799
+ "forked_from": row[20],
798
800
  }
799
801
  return status
800
802
 
@@ -881,6 +883,10 @@ class SystemDatabase(ABC):
881
883
  SystemSchema.workflow_status.c.application_id,
882
884
  SystemSchema.workflow_status.c.workflow_deadline_epoch_ms,
883
885
  SystemSchema.workflow_status.c.workflow_timeout_ms,
886
+ SystemSchema.workflow_status.c.deduplication_id,
887
+ SystemSchema.workflow_status.c.priority,
888
+ SystemSchema.workflow_status.c.queue_partition_key,
889
+ SystemSchema.workflow_status.c.forked_from,
884
890
  ]
885
891
  if load_input:
886
892
  load_columns.append(SystemSchema.workflow_status.c.inputs)
@@ -888,7 +894,15 @@ class SystemDatabase(ABC):
888
894
  load_columns.append(SystemSchema.workflow_status.c.output)
889
895
  load_columns.append(SystemSchema.workflow_status.c.error)
890
896
 
891
- query = sa.select(*load_columns)
897
+ if input.queues_only:
898
+ query = sa.select(*load_columns).where(
899
+ sa.and_(
900
+ SystemSchema.workflow_status.c.queue_name.isnot(None),
901
+ SystemSchema.workflow_status.c.status.in_(["ENQUEUED", "PENDING"]),
902
+ )
903
+ )
904
+ else:
905
+ query = sa.select(*load_columns)
892
906
  if input.sort_desc:
893
907
  query = query.order_by(SystemSchema.workflow_status.c.created_at.desc())
894
908
  else:
@@ -917,6 +931,10 @@ class SystemDatabase(ABC):
917
931
  SystemSchema.workflow_status.c.application_version
918
932
  == input.application_version
919
933
  )
934
+ if input.forked_from:
935
+ query = query.where(
936
+ SystemSchema.workflow_status.c.forked_from == input.forked_from
937
+ )
920
938
  if input.workflow_ids:
921
939
  query = query.where(
922
940
  SystemSchema.workflow_status.c.workflow_uuid.in_(input.workflow_ids)
@@ -927,6 +945,10 @@ class SystemDatabase(ABC):
927
945
  input.workflow_id_prefix
928
946
  )
929
947
  )
948
+ if input.queue_name:
949
+ query = query.where(
950
+ SystemSchema.workflow_status.c.queue_name == input.queue_name
951
+ )
930
952
  if input.limit:
931
953
  query = query.limit(input.limit)
932
954
  if input.offset:
@@ -936,6 +958,7 @@ class SystemDatabase(ABC):
936
958
  rows = c.execute(query).fetchall()
937
959
 
938
960
  infos: List[WorkflowStatus] = []
961
+ workflow_ids: List[str] = []
939
962
  for row in rows:
940
963
  info = WorkflowStatus()
941
964
  info.workflow_id = row[0]
@@ -957,10 +980,14 @@ class SystemDatabase(ABC):
957
980
  info.app_id = row[14]
958
981
  info.workflow_deadline_epoch_ms = row[15]
959
982
  info.workflow_timeout_ms = row[16]
960
-
961
- raw_input = row[17] if load_input else None
962
- raw_output = row[18] if load_output else None
963
- raw_error = row[19] if load_output else None
983
+ info.deduplication_id = row[17]
984
+ info.priority = row[18]
985
+ info.queue_partition_key = row[19]
986
+ info.forked_from = row[20]
987
+
988
+ raw_input = row[21] if load_input else None
989
+ raw_output = row[22] if load_output else None
990
+ raw_error = row[23] if load_output else None
964
991
  inputs, output, exception = safe_deserialize(
965
992
  self.serializer,
966
993
  info.workflow_id,
@@ -972,122 +999,10 @@ class SystemDatabase(ABC):
972
999
  info.output = output
973
1000
  info.error = exception
974
1001
 
1002
+ workflow_ids.append(info.workflow_id)
975
1003
  infos.append(info)
976
1004
  return infos
977
1005
 
978
- def get_queued_workflows(
979
- self,
980
- input: GetQueuedWorkflowsInput,
981
- *,
982
- load_input: bool = True,
983
- ) -> List[WorkflowStatus]:
984
- """
985
- Retrieve a list of queued workflows result and inputs based on the input criteria. The result is a list of external-facing workflow status objects.
986
- """
987
- load_columns = [
988
- SystemSchema.workflow_status.c.workflow_uuid,
989
- SystemSchema.workflow_status.c.status,
990
- SystemSchema.workflow_status.c.name,
991
- SystemSchema.workflow_status.c.recovery_attempts,
992
- SystemSchema.workflow_status.c.config_name,
993
- SystemSchema.workflow_status.c.class_name,
994
- SystemSchema.workflow_status.c.authenticated_user,
995
- SystemSchema.workflow_status.c.authenticated_roles,
996
- SystemSchema.workflow_status.c.assumed_role,
997
- SystemSchema.workflow_status.c.queue_name,
998
- SystemSchema.workflow_status.c.executor_id,
999
- SystemSchema.workflow_status.c.created_at,
1000
- SystemSchema.workflow_status.c.updated_at,
1001
- SystemSchema.workflow_status.c.application_version,
1002
- SystemSchema.workflow_status.c.application_id,
1003
- SystemSchema.workflow_status.c.workflow_deadline_epoch_ms,
1004
- SystemSchema.workflow_status.c.workflow_timeout_ms,
1005
- ]
1006
- if load_input:
1007
- load_columns.append(SystemSchema.workflow_status.c.inputs)
1008
-
1009
- query = sa.select(*load_columns).where(
1010
- sa.and_(
1011
- SystemSchema.workflow_status.c.queue_name.isnot(None),
1012
- SystemSchema.workflow_status.c.status.in_(["ENQUEUED", "PENDING"]),
1013
- )
1014
- )
1015
- if input["sort_desc"]:
1016
- query = query.order_by(SystemSchema.workflow_status.c.created_at.desc())
1017
- else:
1018
- query = query.order_by(SystemSchema.workflow_status.c.created_at.asc())
1019
-
1020
- if input.get("name"):
1021
- query = query.where(SystemSchema.workflow_status.c.name == input["name"])
1022
-
1023
- if input.get("queue_name"):
1024
- query = query.where(
1025
- SystemSchema.workflow_status.c.queue_name == input["queue_name"]
1026
- )
1027
-
1028
- status = input.get("status", None)
1029
- if status:
1030
- query = query.where(SystemSchema.workflow_status.c.status.in_(status))
1031
- if "start_time" in input and input["start_time"] is not None:
1032
- query = query.where(
1033
- SystemSchema.workflow_status.c.created_at
1034
- >= datetime.datetime.fromisoformat(input["start_time"]).timestamp()
1035
- * 1000
1036
- )
1037
- if "end_time" in input and input["end_time"] is not None:
1038
- query = query.where(
1039
- SystemSchema.workflow_status.c.created_at
1040
- <= datetime.datetime.fromisoformat(input["end_time"]).timestamp() * 1000
1041
- )
1042
- if input.get("limit"):
1043
- query = query.limit(input["limit"])
1044
- if input.get("offset"):
1045
- query = query.offset(input["offset"])
1046
-
1047
- with self.engine.begin() as c:
1048
- rows = c.execute(query).fetchall()
1049
-
1050
- infos: List[WorkflowStatus] = []
1051
- for row in rows:
1052
- info = WorkflowStatus()
1053
- info.workflow_id = row[0]
1054
- info.status = row[1]
1055
- info.name = row[2]
1056
- info.recovery_attempts = row[3]
1057
- info.config_name = row[4]
1058
- info.class_name = row[5]
1059
- info.authenticated_user = row[6]
1060
- info.authenticated_roles = (
1061
- json.loads(row[7]) if row[7] is not None else None
1062
- )
1063
- info.assumed_role = row[8]
1064
- info.queue_name = row[9]
1065
- info.executor_id = row[10]
1066
- info.created_at = row[11]
1067
- info.updated_at = row[12]
1068
- info.app_version = row[13]
1069
- info.app_id = row[14]
1070
- info.workflow_deadline_epoch_ms = row[15]
1071
- info.workflow_timeout_ms = row[16]
1072
-
1073
- raw_input = row[17] if load_input else None
1074
-
1075
- # Error and Output are not loaded because they should always be None for queued workflows.
1076
- inputs, output, exception = safe_deserialize(
1077
- self.serializer,
1078
- info.workflow_id,
1079
- serialized_input=raw_input,
1080
- serialized_output=None,
1081
- serialized_exception=None,
1082
- )
1083
- info.input = inputs
1084
- info.output = output
1085
- info.error = exception
1086
-
1087
- infos.append(info)
1088
-
1089
- return infos
1090
-
1091
1006
  def get_pending_workflows(
1092
1007
  self, executor_id: str, app_version: str
1093
1008
  ) -> list[GetPendingWorkflowsOutput]:
@@ -1121,6 +1036,8 @@ class SystemDatabase(ABC):
1121
1036
  SystemSchema.operation_outputs.c.output,
1122
1037
  SystemSchema.operation_outputs.c.error,
1123
1038
  SystemSchema.operation_outputs.c.child_workflow_id,
1039
+ SystemSchema.operation_outputs.c.started_at_epoch_ms,
1040
+ SystemSchema.operation_outputs.c.completed_at_epoch_ms,
1124
1041
  ).where(SystemSchema.operation_outputs.c.workflow_uuid == workflow_id)
1125
1042
  ).fetchall()
1126
1043
  steps = []
@@ -1138,6 +1055,8 @@ class SystemDatabase(ABC):
1138
1055
  output=output,
1139
1056
  error=exception,
1140
1057
  child_workflow_id=row[4],
1058
+ started_at_epoch_ms=row[5],
1059
+ completed_at_epoch_ms=row[6],
1141
1060
  )
1142
1061
  steps.append(step)
1143
1062
  return steps
@@ -1154,6 +1073,8 @@ class SystemDatabase(ABC):
1154
1073
  workflow_uuid=result["workflow_uuid"],
1155
1074
  function_id=result["function_id"],
1156
1075
  function_name=result["function_name"],
1076
+ started_at_epoch_ms=result["started_at_epoch_ms"],
1077
+ completed_at_epoch_ms=int(time.time() * 1000),
1157
1078
  output=output,
1158
1079
  error=error,
1159
1080
  )
@@ -1340,6 +1261,7 @@ class SystemDatabase(ABC):
1340
1261
  topic: Optional[str] = None,
1341
1262
  ) -> None:
1342
1263
  function_name = "DBOS.send"
1264
+ start_time = int(time.time() * 1000)
1343
1265
  topic = topic if topic is not None else _dbos_null_topic
1344
1266
  with self.engine.begin() as c:
1345
1267
  recorded_output = self._check_operation_execution_txn(
@@ -1376,6 +1298,7 @@ class SystemDatabase(ABC):
1376
1298
  "workflow_uuid": workflow_uuid,
1377
1299
  "function_id": function_id,
1378
1300
  "function_name": function_name,
1301
+ "started_at_epoch_ms": start_time,
1379
1302
  "output": None,
1380
1303
  "error": None,
1381
1304
  }
@@ -1391,6 +1314,7 @@ class SystemDatabase(ABC):
1391
1314
  timeout_seconds: float = 60,
1392
1315
  ) -> Any:
1393
1316
  function_name = "DBOS.recv"
1317
+ start_time = int(time.time() * 1000)
1394
1318
  topic = topic if topic is not None else _dbos_null_topic
1395
1319
 
1396
1320
  # First, check for previous executions.
@@ -1475,6 +1399,7 @@ class SystemDatabase(ABC):
1475
1399
  "workflow_uuid": workflow_uuid,
1476
1400
  "function_id": function_id,
1477
1401
  "function_name": function_name,
1402
+ "started_at_epoch_ms": start_time,
1478
1403
  "output": self.serializer.serialize(
1479
1404
  message
1480
1405
  ), # None will be serialized to 'null'
@@ -1510,6 +1435,7 @@ class SystemDatabase(ABC):
1510
1435
  skip_sleep: bool = False,
1511
1436
  ) -> float:
1512
1437
  function_name = "DBOS.sleep"
1438
+ start_time = int(time.time() * 1000)
1513
1439
  recorded_output = self.check_operation_execution(
1514
1440
  workflow_uuid, function_id, function_name
1515
1441
  )
@@ -1530,6 +1456,7 @@ class SystemDatabase(ABC):
1530
1456
  "workflow_uuid": workflow_uuid,
1531
1457
  "function_id": function_id,
1532
1458
  "function_name": function_name,
1459
+ "started_at_epoch_ms": start_time,
1533
1460
  "output": self.serializer.serialize(end_time),
1534
1461
  "error": None,
1535
1462
  }
@@ -1550,6 +1477,7 @@ class SystemDatabase(ABC):
1550
1477
  message: Any,
1551
1478
  ) -> None:
1552
1479
  function_name = "DBOS.setEvent"
1480
+ start_time = int(time.time() * 1000)
1553
1481
  with self.engine.begin() as c:
1554
1482
  recorded_output = self._check_operation_execution_txn(
1555
1483
  workflow_uuid, function_id, function_name, conn=c
@@ -1579,6 +1507,7 @@ class SystemDatabase(ABC):
1579
1507
  "workflow_uuid": workflow_uuid,
1580
1508
  "function_id": function_id,
1581
1509
  "function_name": function_name,
1510
+ "started_at_epoch_ms": start_time,
1582
1511
  "output": None,
1583
1512
  "error": None,
1584
1513
  }
@@ -1639,6 +1568,7 @@ class SystemDatabase(ABC):
1639
1568
  caller_ctx: Optional[GetEventWorkflowContext] = None,
1640
1569
  ) -> Any:
1641
1570
  function_name = "DBOS.getEvent"
1571
+ start_time = int(time.time() * 1000)
1642
1572
  get_sql = sa.select(
1643
1573
  SystemSchema.workflow_events.c.value,
1644
1574
  ).where(
@@ -1713,6 +1643,7 @@ class SystemDatabase(ABC):
1713
1643
  "workflow_uuid": caller_ctx["workflow_uuid"],
1714
1644
  "function_id": caller_ctx["function_id"],
1715
1645
  "function_name": function_name,
1646
+ "started_at_epoch_ms": start_time,
1716
1647
  "output": self.serializer.serialize(
1717
1648
  value
1718
1649
  ), # None will be serialized to 'null'
@@ -1951,6 +1882,7 @@ class SystemDatabase(ABC):
1951
1882
 
1952
1883
  def call_function_as_step(self, fn: Callable[[], T], function_name: str) -> T:
1953
1884
  ctx = get_local_dbos_context()
1885
+ start_time = int(time.time() * 1000)
1954
1886
  if ctx and ctx.is_transaction():
1955
1887
  raise Exception(f"Invalid call to `{function_name}` inside a transaction")
1956
1888
  if ctx and ctx.is_workflow():
@@ -1978,6 +1910,7 @@ class SystemDatabase(ABC):
1978
1910
  "workflow_uuid": ctx.workflow_id,
1979
1911
  "function_id": ctx.function_id,
1980
1912
  "function_name": function_name,
1913
+ "started_at_epoch_ms": start_time,
1981
1914
  "output": self.serializer.serialize(result),
1982
1915
  "error": None,
1983
1916
  }
@@ -2056,6 +1989,7 @@ class SystemDatabase(ABC):
2056
1989
  if value == _dbos_stream_closed_sentinel
2057
1990
  else "DBOS.writeStream"
2058
1991
  )
1992
+ start_time = int(time.time() * 1000)
2059
1993
 
2060
1994
  with self.engine.begin() as c:
2061
1995
 
@@ -2102,6 +2036,7 @@ class SystemDatabase(ABC):
2102
2036
  "workflow_uuid": workflow_uuid,
2103
2037
  "function_id": function_id,
2104
2038
  "function_name": function_name,
2039
+ "started_at_epoch_ms": start_time,
2105
2040
  "output": None,
2106
2041
  "error": None,
2107
2042
  }
@@ -1,4 +1,3 @@
1
- import time
2
1
  import uuid
3
2
  from datetime import datetime
4
3
  from typing import TYPE_CHECKING, List, Optional, Union
@@ -7,7 +6,6 @@ from dbos._context import get_local_dbos_context
7
6
 
8
7
  from ._app_db import ApplicationDatabase
9
8
  from ._sys_db import (
10
- GetQueuedWorkflowsInput,
11
9
  GetWorkflowsInput,
12
10
  StepInfo,
13
11
  SystemDatabase,
@@ -28,7 +26,9 @@ def list_workflows(
28
26
  end_time: Optional[str] = None,
29
27
  name: Optional[str] = None,
30
28
  app_version: Optional[str] = None,
29
+ forked_from: Optional[str] = None,
31
30
  user: Optional[str] = None,
31
+ queue_name: Optional[str] = None,
32
32
  limit: Optional[int] = None,
33
33
  offset: Optional[int] = None,
34
34
  sort_desc: bool = False,
@@ -43,6 +43,8 @@ def list_workflows(
43
43
  input.end_time = end_time
44
44
  input.status = status if status is None or isinstance(status, list) else [status]
45
45
  input.application_version = app_version
46
+ input.forked_from = forked_from
47
+ input.queue_name = queue_name
46
48
  input.limit = limit
47
49
  input.name = name
48
50
  input.offset = offset
@@ -61,6 +63,7 @@ def list_queued_workflows(
61
63
  *,
62
64
  queue_name: Optional[str] = None,
63
65
  status: Optional[Union[str, List[str]]] = None,
66
+ forked_from: Optional[str] = None,
64
67
  start_time: Optional[str] = None,
65
68
  end_time: Optional[str] = None,
66
69
  name: Optional[str] = None,
@@ -69,19 +72,20 @@ def list_queued_workflows(
69
72
  sort_desc: bool = False,
70
73
  load_input: bool = True,
71
74
  ) -> List[WorkflowStatus]:
72
- input: GetQueuedWorkflowsInput = {
73
- "queue_name": queue_name,
74
- "start_time": start_time,
75
- "end_time": end_time,
76
- "status": status if status is None or isinstance(status, list) else [status],
77
- "limit": limit,
78
- "name": name,
79
- "offset": offset,
80
- "sort_desc": sort_desc,
81
- }
82
-
83
- infos: List[WorkflowStatus] = sys_db.get_queued_workflows(
84
- input, load_input=load_input
75
+ input = GetWorkflowsInput()
76
+ input.start_time = start_time
77
+ input.end_time = end_time
78
+ input.status = status if status is None or isinstance(status, list) else [status]
79
+ input.forked_from = forked_from
80
+ input.limit = limit
81
+ input.name = name
82
+ input.offset = offset
83
+ input.sort_desc = sort_desc
84
+ input.queues_only = True
85
+ input.queue_name = queue_name
86
+
87
+ infos: List[WorkflowStatus] = sys_db.get_workflows(
88
+ input, load_input=load_input, load_output=False
85
89
  )
86
90
  return infos
87
91
 
dbos/cli/cli.py CHANGED
@@ -145,7 +145,7 @@ def start() -> None:
145
145
  if process.poll() is None:
146
146
  os.killpg(os.getpgid(process.pid), signum)
147
147
 
148
- # Exit
148
+ # Exit
149
149
  os._exit(process.returncode if process.returncode is not None else 1)
150
150
 
151
151
  # Configure the single handler only on Unix-like systems.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: dbos
3
- Version: 2.4.0a1
3
+ Version: 2.4.0a3
4
4
  Summary: Ultra-lightweight durable execution in Python
5
5
  Author-Email: "DBOS, Inc." <contact@dbos.dev>
6
6
  License: MIT
@@ -1,19 +1,19 @@
1
- dbos-2.4.0a1.dist-info/METADATA,sha256=wAuuiW_zghWbm2aYpSvKd_HR4ERFfu-HbRvOt0eTlhc,14532
2
- dbos-2.4.0a1.dist-info/WHEEL,sha256=9P2ygRxDrTJz3gsagc0Z96ukrxjr-LFBGOgv3AuKlCA,90
3
- dbos-2.4.0a1.dist-info/entry_points.txt,sha256=_QOQ3tVfEjtjBlr1jS4sHqHya9lI2aIEIWkz8dqYp14,58
4
- dbos-2.4.0a1.dist-info/licenses/LICENSE,sha256=VGZit_a5-kdw9WT6fY5jxAWVwGQzgLFyPWrcVVUhVNU,1067
1
+ dbos-2.4.0a3.dist-info/METADATA,sha256=_nEbIimxVj4IDZxqDRCNV9H9LdN7EXbc-sP2bLKrd7w,14532
2
+ dbos-2.4.0a3.dist-info/WHEEL,sha256=9P2ygRxDrTJz3gsagc0Z96ukrxjr-LFBGOgv3AuKlCA,90
3
+ dbos-2.4.0a3.dist-info/entry_points.txt,sha256=_QOQ3tVfEjtjBlr1jS4sHqHya9lI2aIEIWkz8dqYp14,58
4
+ dbos-2.4.0a3.dist-info/licenses/LICENSE,sha256=VGZit_a5-kdw9WT6fY5jxAWVwGQzgLFyPWrcVVUhVNU,1067
5
5
  dbos/__init__.py,sha256=M7FdFSBGhcvaLIXrNw_0eR68ijwMWV7_UEyimHMP_F4,1039
6
6
  dbos/__main__.py,sha256=G7Exn-MhGrVJVDbgNlpzhfh8WMX_72t3_oJaFT9Lmt8,653
7
- dbos/_admin_server.py,sha256=hubQJw5T8zGKCPNS6FQTXy8jQ8GTJxoYQaDTMlICl9k,16267
8
- dbos/_app_db.py,sha256=mvWQ66ebdbiD9fpGKHZBWNVEza6Ulo1D-3UoTB_LwRc,16378
7
+ dbos/_admin_server.py,sha256=Kce_Cv6JXZBABzfOcNJdVHOwEukmp7SvO24TSa-gLIM,16371
8
+ dbos/_app_db.py,sha256=3XHvTePe1JaAI42rO3waWGoEeDyXkFKGzTFwJxQHUmo,16464
9
9
  dbos/_classproperty.py,sha256=f0X-_BySzn3yFDRKB2JpCbLYQ9tLwt1XftfshvY7CBs,626
10
- dbos/_client.py,sha256=0VR9oWBn0i-34jNWHqkgeImKdg5aBefMWu2jaqRLH8Q,19658
11
- dbos/_conductor/conductor.py,sha256=3E_hL3c9g9yWqKZkvI6KA0-ZzPMPRo06TOzT1esMiek,24114
12
- dbos/_conductor/protocol.py,sha256=q3rgLxINFtWFigdOONc-4gX4vn66UmMlJQD6Kj8LnL4,7420
10
+ dbos/_client.py,sha256=8yrIqO5Hg-TdYS6P5sxxVWz_iusarS9Is8DU3WezoUQ,19966
11
+ dbos/_conductor/conductor.py,sha256=TB9hF7cQupiv8yJb3BZuIUYJkxQ7aJY7d0Apxkn0xss,24254
12
+ dbos/_conductor/protocol.py,sha256=nVjpcSw_OPoCM7NBU_IRWnk9dFQjOgAkg0ufhj8lFzI,8901
13
13
  dbos/_context.py,sha256=XKllmsDR_oMcWOuZnoe1X4yv2JeOi_vsAuyWC-mWs_o,28164
14
- dbos/_core.py,sha256=e-pKDbrvpN6BzcfyIZx4Nsb8wnMiGxLNzdpgtlRI-0I,50096
14
+ dbos/_core.py,sha256=FCspRQFRMFyHpkl4vqR8IEw3aitD-VWB77CMVQrlyy8,50257
15
15
  dbos/_croniter.py,sha256=XHAyUyibs_59sJQfSNWkP7rqQY6_XrlfuuCxk4jYqek,47559
16
- dbos/_dbos.py,sha256=dr32Z_NT36JkUxWGyYVX7xkl3bYJmgsxVMOX8H9_mpM,59394
16
+ dbos/_dbos.py,sha256=jjbnAKHBMzn0cmaDdF-U5rNeFZEWwH99RSIvtFRkrEQ,59803
17
17
  dbos/_dbos_config.py,sha256=mfajyeyeV1ZHaAg2GU3dxwvp_19wZtY2prNdVrXgPb8,24846
18
18
  dbos/_debouncer.py,sha256=qNjIVmWqTPp64M2cEbLnpgGmlKVdCaAKysD1BPJgWh4,15297
19
19
  dbos/_debug.py,sha256=0MfgNqutCUhI4PEmmra9x7f3DiFE_0nscfUCHdLimEY,1415
@@ -25,7 +25,7 @@ dbos/_flask.py,sha256=Npnakt-a3W5OykONFRkDRnumaDhTQmA0NPdUCGRYKXE,1652
25
25
  dbos/_kafka.py,sha256=cA3hXyT-FR4LQZnaBMVLTZn7oko76rcTUC_kOo6aSis,4352
26
26
  dbos/_kafka_message.py,sha256=NYvOXNG3Qn7bghn1pv3fg4Pbs86ILZGcK4IB-MLUNu0,409
27
27
  dbos/_logger.py,sha256=ByGkkGwEWaqE9z6E2VNDFOgu_z4LNe7_SxsVgAXzoT0,5081
28
- dbos/_migration.py,sha256=wxkdsWoT0nMixEP99MALPdPz2-Sucxy1mVPXZTuqRU0,10770
28
+ dbos/_migration.py,sha256=GJdxHhMUnsr3pjrGwi1f4PT76ABnn9kFUyqWp4Hakmw,11701
29
29
  dbos/_outcome.py,sha256=7HvosMfEHTh1U5P6xok7kFTGLwa2lPaul0YApb3UnN4,8191
30
30
  dbos/_queue.py,sha256=GmqZHl9smES1KSmpauhSdsnZFJHDyfvRArmC-jBibhw,6228
31
31
  dbos/_recovery.py,sha256=K-wlFhdf4yGRm6cUzyhcTjQUS0xp2T5rdNMLiiBErYg,2882
@@ -34,9 +34,9 @@ dbos/_roles.py,sha256=kCuhhg8XLtrHCgKgm44I0abIRTGHltf88OwjEKAUggk,2317
34
34
  dbos/_scheduler.py,sha256=PLiCSUujlfEfojTnHwzY-P_AEOVEx7bvWvU5BuMgLPY,2708
35
35
  dbos/_schemas/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
36
36
  dbos/_schemas/application_database.py,sha256=SypAS9l9EsaBHFn9FR8jmnqt01M74d9AF1AMa4m2hhI,1040
37
- dbos/_schemas/system_database.py,sha256=mNsBV0ttlqJArvOqGPY60WvtuiWrHCpYnVxtvMfe2LI,5544
37
+ dbos/_schemas/system_database.py,sha256=tQAFCnEyZ7bEXZm3FbGIYk5SNGk-AHA3R_vuR0hfH8s,5717
38
38
  dbos/_serialization.py,sha256=ZGrkN5UclSLOqMVZgYpT72pw1l888ZXRoYuu3pIg3PA,2957
39
- dbos/_sys_db.py,sha256=FDboSk58CyQCAFjOF_KMLnRtIw05OL3IpJHT1qwKEKo,87596
39
+ dbos/_sys_db.py,sha256=67R1EhifDJofDzUG9DZiX9UTAHshyuPNGQ7k3P3z004,85149
40
40
  dbos/_sys_db_postgres.py,sha256=_3m3hF6Pc23iZfUlIFYtDuC1Tw6KsjYqnDQE0HZpjt4,6965
41
41
  dbos/_sys_db_sqlite.py,sha256=ifjKdy-Z9vlVIBf5L6XnSaNjiBdvqPE73asVHim4A5Q,6998
42
42
  dbos/_templates/dbos-db-starter/README.md,sha256=GhxhBj42wjTt1fWEtwNriHbJuKb66Vzu89G4pxNHw2g,930
@@ -48,12 +48,12 @@ dbos/_templates/dbos-db-starter/migrations/create_table.py.dbos,sha256=pVm2Q0Asx
48
48
  dbos/_templates/dbos-db-starter/start_postgres_docker.py,sha256=lQVLlYO5YkhGPEgPqwGc7Y8uDKse9HsWv5fynJEFJHM,1681
49
49
  dbos/_tracer.py,sha256=jTlTkb5vUr_Ai5W9JIJf6FpYjAL0IWL52EWM_HXsi54,3958
50
50
  dbos/_utils.py,sha256=ZdoM1MDbHnlJrh31zfhp3iX62bAxK1kyvMwXnltC_84,1779
51
- dbos/_workflow_commands.py,sha256=k-i1bCfNrux43BHLT8wQ-l-MVZX3D6LGZLH7-uuiDRo,4951
51
+ dbos/_workflow_commands.py,sha256=ueTQXri2H4np6Sb2Citz3XEqG3a5jg_Q5NEFAx4YHmg,5160
52
52
  dbos/cli/_github_init.py,sha256=R_94Fnn40CAmPy-zM00lwHi0ndyfv57TmIooADjmag4,3378
53
53
  dbos/cli/_template_init.py,sha256=AltKk256VocgvxLpuTxpjJyACrdHFjbGoqYhHzeLae4,2649
54
- dbos/cli/cli.py,sha256=hPZJmrQZWn8mcXou7DHaHl8luSEQTEWaYlnIsLw8WY4,27150
54
+ dbos/cli/cli.py,sha256=AHz_JJj_qWCTRV8yT1RSA-hISFVIJrE9eUalApw9sxg,27149
55
55
  dbos/cli/migration.py,sha256=I0_0ngWTuCPQf6Symbpd0lizaxWUKe3uTYEmuCmsrdU,3775
56
56
  dbos/dbos-config.schema.json,sha256=47wofTZ5jlFynec7bG0L369tAXbRQQ2euBxBXvg4m9c,1730
57
57
  dbos/py.typed,sha256=QfzXT1Ktfk3Rj84akygc7_42z0lRpCq0Ilh8OXI6Zas,44
58
58
  version/__init__.py,sha256=L4sNxecRuqdtSFdpUGX3TtBi9KL3k7YsZVIvv-fv9-A,1678
59
- dbos-2.4.0a1.dist-info/RECORD,,
59
+ dbos-2.4.0a3.dist-info/RECORD,,
File without changes