dbos 0.26.0a14__py3-none-any.whl → 0.26.0a18__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
dbos/_admin_server.py CHANGED
@@ -7,6 +7,7 @@ from functools import partial
7
7
  from http.server import BaseHTTPRequestHandler, ThreadingHTTPServer
8
8
  from typing import TYPE_CHECKING, Any, List, TypedDict
9
9
 
10
+ from ._error import DBOSException
10
11
  from ._logger import dbos_logger
11
12
  from ._recovery import recover_pending_workflows
12
13
  from ._utils import GlobalParams
@@ -22,6 +23,7 @@ _workflow_queues_metadata_path = "/dbos-workflow-queues-metadata"
22
23
  # /workflows/:workflow_id/resume
23
24
  # /workflows/:workflow_id/restart
24
25
  # /workflows/:workflow_id/steps
26
+ # /workflows/:workflow_id/fork
25
27
 
26
28
 
27
29
  class AdminServer:
@@ -123,6 +125,9 @@ class AdminRequestHandler(BaseHTTPRequestHandler):
123
125
  restart_match = re.match(
124
126
  r"^/workflows/(?P<workflow_id>[^/]+)/restart$", self.path
125
127
  )
128
+ fork_match = re.match(
129
+ r"^/workflows/(?P<workflow_id>[^/]+)/fork$", self.path
130
+ )
126
131
  resume_match = re.match(
127
132
  r"^/workflows/(?P<workflow_id>[^/]+)/resume$", self.path
128
133
  )
@@ -130,7 +135,23 @@ class AdminRequestHandler(BaseHTTPRequestHandler):
130
135
  r"^/workflows/(?P<workflow_id>[^/]+)/cancel$", self.path
131
136
  )
132
137
 
133
- if restart_match:
138
+ if fork_match:
139
+ workflow_id = fork_match.group("workflow_id")
140
+ try:
141
+ data = json.loads(post_data.decode("utf-8"))
142
+ start_step: int = data.get("start_step", 1)
143
+ self._handle_fork(workflow_id, start_step)
144
+ except (json.JSONDecodeError, AttributeError) as e:
145
+ self.send_response(500)
146
+ self.send_header("Content-Type", "application/json")
147
+ self.end_headers()
148
+ self.wfile.write(
149
+ json.dumps({"error": f"Invalid JSON input: {str(e)}"}).encode(
150
+ "utf-8"
151
+ )
152
+ )
153
+ return
154
+ elif restart_match:
134
155
  workflow_id = restart_match.group("workflow_id")
135
156
  self._handle_restart(workflow_id)
136
157
  elif resume_match:
@@ -147,10 +168,33 @@ class AdminRequestHandler(BaseHTTPRequestHandler):
147
168
  return # Disable admin server request logging
148
169
 
149
170
  def _handle_restart(self, workflow_id: str) -> None:
150
- self.dbos.restart_workflow(workflow_id)
151
- print("Restarting workflow", workflow_id)
152
- self.send_response(204)
153
- self._end_headers()
171
+ try:
172
+ print(f"Restarting workflow {workflow_id}")
173
+ self.dbos.restart_workflow(workflow_id)
174
+ self.send_response(204)
175
+ self._end_headers()
176
+ except DBOSException as e:
177
+ print(f"Error restarting workflow: {e}")
178
+ self.send_response(500)
179
+ response_body = json.dumps({"error": str(e)}).encode("utf-8")
180
+ self.send_header("Content-Type", "application/json")
181
+ self.send_header("Content-Length", str(len(response_body)))
182
+ self.end_headers()
183
+ self.wfile.write(response_body)
184
+
185
+ def _handle_fork(self, workflow_id: str, start_step: int) -> None:
186
+ try:
187
+ self.dbos.fork_workflow(workflow_id, start_step)
188
+ self.send_response(204)
189
+ self._end_headers()
190
+ except DBOSException as e:
191
+ print(f"Error forking workflow: {e}")
192
+ self.send_response(500)
193
+ response_body = json.dumps({"error": str(e)}).encode("utf-8")
194
+ self.send_header("Content-Type", "application/json")
195
+ self.send_header("Content-Length", str(len(response_body)))
196
+ self.end_headers()
197
+ self.wfile.write(response_body)
154
198
 
155
199
  def _handle_resume(self, workflow_id: str) -> None:
156
200
  print("Resuming workflow", workflow_id)
dbos/_app_db.py CHANGED
@@ -228,3 +228,59 @@ class ApplicationDatabase:
228
228
  )
229
229
  for row in rows
230
230
  ]
231
+
232
+ def get_max_function_id(self, workflow_uuid: str) -> Optional[int]:
233
+ with self.engine.begin() as conn:
234
+ max_function_id_row = conn.execute(
235
+ sa.select(
236
+ sa.func.max(ApplicationSchema.transaction_outputs.c.function_id)
237
+ ).where(
238
+ ApplicationSchema.transaction_outputs.c.workflow_uuid
239
+ == workflow_uuid
240
+ )
241
+ ).fetchone()
242
+
243
+ max_function_id = max_function_id_row[0] if max_function_id_row else None
244
+
245
+ return max_function_id
246
+
247
+ def clone_workflow_transactions(
248
+ self, src_workflow_id: str, forked_workflow_id: str, start_step: int
249
+ ) -> None:
250
+ """
251
+ Copies all steps from dbos.transctions_outputs where function_id < input function_id
252
+ into a new workflow_uuid. Returns the new workflow_uuid.
253
+ """
254
+
255
+ with self.engine.begin() as conn:
256
+
257
+ insert_stmt = sa.insert(ApplicationSchema.transaction_outputs).from_select(
258
+ [
259
+ "workflow_uuid",
260
+ "function_id",
261
+ "output",
262
+ "error",
263
+ "txn_id",
264
+ "txn_snapshot",
265
+ "executor_id",
266
+ "function_name",
267
+ ],
268
+ sa.select(
269
+ sa.literal(forked_workflow_id).label("workflow_uuid"),
270
+ ApplicationSchema.transaction_outputs.c.function_id,
271
+ ApplicationSchema.transaction_outputs.c.output,
272
+ ApplicationSchema.transaction_outputs.c.error,
273
+ ApplicationSchema.transaction_outputs.c.txn_id,
274
+ ApplicationSchema.transaction_outputs.c.txn_snapshot,
275
+ ApplicationSchema.transaction_outputs.c.executor_id,
276
+ ApplicationSchema.transaction_outputs.c.function_name,
277
+ ).where(
278
+ (
279
+ ApplicationSchema.transaction_outputs.c.workflow_uuid
280
+ == src_workflow_id
281
+ )
282
+ & (ApplicationSchema.transaction_outputs.c.function_id < start_step)
283
+ ),
284
+ )
285
+
286
+ conn.execute(insert_stmt)
dbos/_client.py CHANGED
@@ -124,12 +124,7 @@ class DBOSClient:
124
124
  "kwargs": kwargs,
125
125
  }
126
126
 
127
- wf_status = self._sys_db.insert_workflow_status(status)
128
- self._sys_db.update_workflow_inputs(
129
- workflow_id, _serialization.serialize_args(inputs)
130
- )
131
- if wf_status == WorkflowStatusString.ENQUEUED.value:
132
- self._sys_db.enqueue(workflow_id, queue_name)
127
+ self._sys_db.init_workflow(status, _serialization.serialize_args(inputs))
133
128
  return workflow_id
134
129
 
135
130
  def enqueue(
@@ -184,7 +179,8 @@ class DBOSClient:
184
179
  "app_id": None,
185
180
  "app_version": None,
186
181
  }
187
- self._sys_db.insert_workflow_status(status)
182
+ with self._sys_db.engine.begin() as conn:
183
+ self._sys_db.insert_workflow_status(status, conn)
188
184
  self._sys_db.send(status["workflow_uuid"], 0, destination_id, message, topic)
189
185
 
190
186
  async def send_async(
@@ -233,6 +229,7 @@ class DBOSClient:
233
229
  limit: Optional[int] = None,
234
230
  offset: Optional[int] = None,
235
231
  sort_desc: bool = False,
232
+ workflow_id_prefix: Optional[str] = None,
236
233
  ) -> List[WorkflowStatus]:
237
234
  return list_workflows(
238
235
  self._sys_db,
@@ -246,6 +243,7 @@ class DBOSClient:
246
243
  limit=limit,
247
244
  offset=offset,
248
245
  sort_desc=sort_desc,
246
+ workflow_id_prefix=workflow_id_prefix,
249
247
  )
250
248
 
251
249
  async def list_workflows_async(
dbos/_core.py CHANGED
@@ -280,18 +280,12 @@ def _init_workflow(
280
280
  raise DBOSNonExistentWorkflowError(wfid)
281
281
  wf_status = get_status_result["status"]
282
282
  else:
283
- # Synchronously record the status and inputs for workflows
284
- # TODO: Make this transactional (and with the queue step below)
285
- wf_status = dbos._sys_db.insert_workflow_status(
286
- status, max_recovery_attempts=max_recovery_attempts
283
+ wf_status = dbos._sys_db.init_workflow(
284
+ status,
285
+ _serialization.serialize_args(inputs),
286
+ max_recovery_attempts=max_recovery_attempts,
287
287
  )
288
288
 
289
- # TODO: Modify the inputs if they were changed by `update_workflow_inputs`
290
- dbos._sys_db.update_workflow_inputs(wfid, _serialization.serialize_args(inputs))
291
-
292
- if queue is not None and wf_status == WorkflowStatusString.ENQUEUED.value:
293
- dbos._sys_db.enqueue(wfid, queue)
294
-
295
289
  status["status"] = wf_status
296
290
  return status
297
291
 
dbos/_dbos.py CHANGED
@@ -363,13 +363,13 @@ class DBOS:
363
363
  check_config_consistency(name=unvalidated_config["name"])
364
364
 
365
365
  if unvalidated_config is not None:
366
- self.config: ConfigFile = process_config(data=unvalidated_config)
366
+ self._config: ConfigFile = process_config(data=unvalidated_config)
367
367
  else:
368
368
  raise ValueError("No valid configuration was loaded.")
369
369
 
370
- set_env_vars(self.config)
371
- config_logger(self.config)
372
- dbos_tracer.config(self.config)
370
+ set_env_vars(self._config)
371
+ config_logger(self._config)
372
+ dbos_tracer.config(self._config)
373
373
  dbos_logger.info("Initializing DBOS")
374
374
 
375
375
  # If using FastAPI, set up middleware and lifecycle events
@@ -453,19 +453,19 @@ class DBOS:
453
453
  self._executor_field = ThreadPoolExecutor(max_workers=64)
454
454
  self._background_event_loop.start()
455
455
  self._sys_db_field = SystemDatabase(
456
- self.config["database"], debug_mode=debug_mode
456
+ self._config["database"], debug_mode=debug_mode
457
457
  )
458
458
  self._app_db_field = ApplicationDatabase(
459
- self.config["database"], debug_mode=debug_mode
459
+ self._config["database"], debug_mode=debug_mode
460
460
  )
461
461
 
462
462
  if debug_mode:
463
463
  return
464
464
 
465
- admin_port = self.config.get("runtimeConfig", {}).get("admin_port")
465
+ admin_port = self._config.get("runtimeConfig", {}).get("admin_port")
466
466
  if admin_port is None:
467
467
  admin_port = 3001
468
- run_admin_server = self.config.get("runtimeConfig", {}).get(
468
+ run_admin_server = self._config.get("runtimeConfig", {}).get(
469
469
  "run_admin_server"
470
470
  )
471
471
  if run_admin_server:
@@ -563,7 +563,7 @@ class DBOS:
563
563
  assert (
564
564
  not self._launched
565
565
  ), "The system database cannot be reset after DBOS is launched. Resetting the system database is a destructive operation that should only be used in a test environment."
566
- reset_system_database(self.config)
566
+ reset_system_database(self._config)
567
567
 
568
568
  def _destroy(self) -> None:
569
569
  self._initialized = False
@@ -960,14 +960,45 @@ class DBOS:
960
960
  def restart_workflow(cls, workflow_id: str) -> WorkflowHandle[Any]:
961
961
  """Restart a workflow with a new workflow ID"""
962
962
 
963
+ return cls.fork_workflow(workflow_id, 1)
964
+
965
+ @classmethod
966
+ def fork_workflow(
967
+ cls, workflow_id: str, start_step: int = 1
968
+ ) -> WorkflowHandle[Any]:
969
+ """Restart a workflow with a new workflow ID"""
970
+
971
+ def get_max_function_id(workflow_uuid: str) -> int:
972
+ max_transactions = (
973
+ _get_dbos_instance()._app_db.get_max_function_id(workflow_uuid) or 0
974
+ )
975
+ max_operations = (
976
+ _get_dbos_instance()._sys_db.get_max_function_id(workflow_uuid) or 0
977
+ )
978
+ return max(max_transactions, max_operations)
979
+
980
+ max_function_id = get_max_function_id(workflow_id)
981
+ if max_function_id > 0 and start_step > max_function_id:
982
+ raise DBOSException(
983
+ f"Cannot fork workflow {workflow_id} at step {start_step}. The workflow has {max_function_id} steps."
984
+ )
985
+
963
986
  def fn() -> str:
964
- dbos_logger.info(f"Restarting workflow: {workflow_id}")
965
- return _get_dbos_instance()._sys_db.fork_workflow(workflow_id)
987
+ forked_workflow_id = str(uuid.uuid4())
988
+ dbos_logger.info(f"Forking workflow: {workflow_id} from step {start_step}")
989
+
990
+ _get_dbos_instance()._app_db.clone_workflow_transactions(
991
+ workflow_id, forked_workflow_id, start_step
992
+ )
993
+
994
+ return _get_dbos_instance()._sys_db.fork_workflow(
995
+ workflow_id, forked_workflow_id, start_step
996
+ )
966
997
 
967
- forked_workflow_id = _get_dbos_instance()._sys_db.call_function_as_step(
968
- fn, "DBOS.restartWorkflow"
998
+ new_id = _get_dbos_instance()._sys_db.call_function_as_step(
999
+ fn, "DBOS.forkWorkflow"
969
1000
  )
970
- return cls.retrieve_workflow(forked_workflow_id)
1001
+ return cls.retrieve_workflow(new_id)
971
1002
 
972
1003
  @classmethod
973
1004
  def list_workflows(
@@ -983,6 +1014,7 @@ class DBOS:
983
1014
  limit: Optional[int] = None,
984
1015
  offset: Optional[int] = None,
985
1016
  sort_desc: bool = False,
1017
+ workflow_id_prefix: Optional[str] = None,
986
1018
  ) -> List[WorkflowStatus]:
987
1019
  def fn() -> List[WorkflowStatus]:
988
1020
  return list_workflows(
@@ -997,6 +1029,7 @@ class DBOS:
997
1029
  limit=limit,
998
1030
  offset=offset,
999
1031
  sort_desc=sort_desc,
1032
+ workflow_id_prefix=workflow_id_prefix,
1000
1033
  )
1001
1034
 
1002
1035
  return _get_dbos_instance()._sys_db.call_function_as_step(
@@ -1043,15 +1076,15 @@ class DBOS:
1043
1076
  """Return the DBOS `ConfigFile` for the current context."""
1044
1077
  global _dbos_global_instance
1045
1078
  if _dbos_global_instance is not None:
1046
- return _dbos_global_instance.config
1079
+ return _dbos_global_instance._config
1047
1080
  reg = _get_or_create_dbos_registry()
1048
1081
  if reg.config is not None:
1049
1082
  return reg.config
1050
- config = (
1083
+ loaded_config = (
1051
1084
  load_config()
1052
1085
  ) # This will return the processed & validated config (with defaults)
1053
- reg.config = config
1054
- return config
1086
+ reg.config = loaded_config
1087
+ return loaded_config
1055
1088
 
1056
1089
  @classproperty
1057
1090
  def sql_session(cls) -> Session:
dbos/_sys_db.py CHANGED
@@ -132,6 +132,9 @@ class GetWorkflowsInput:
132
132
  self.sort_desc: bool = (
133
133
  False # If true, sort by created_at in DESC order. Default false (in ASC order).
134
134
  )
135
+ self.workflow_id_prefix: Optional[str] = (
136
+ None # If set, search for workflow IDs starting with this string
137
+ )
135
138
 
136
139
 
137
140
  class GetQueuedWorkflowsInput(TypedDict):
@@ -282,6 +285,7 @@ class SystemDatabase:
282
285
  def insert_workflow_status(
283
286
  self,
284
287
  status: WorkflowStatusInternal,
288
+ conn: sa.Connection,
285
289
  *,
286
290
  max_recovery_attempts: int = DEFAULT_MAX_RECOVERY_ATTEMPTS,
287
291
  ) -> WorkflowStatuses:
@@ -325,8 +329,7 @@ class SystemDatabase:
325
329
 
326
330
  cmd = cmd.returning(SystemSchema.workflow_status.c.recovery_attempts, SystemSchema.workflow_status.c.status, SystemSchema.workflow_status.c.name, SystemSchema.workflow_status.c.class_name, SystemSchema.workflow_status.c.config_name, SystemSchema.workflow_status.c.queue_name) # type: ignore
327
331
 
328
- with self.engine.begin() as c:
329
- results = c.execute(cmd)
332
+ results = conn.execute(cmd)
330
333
 
331
334
  row = results.fetchone()
332
335
  if row is not None:
@@ -352,28 +355,30 @@ class SystemDatabase:
352
355
  # Every time we start executing a workflow (and thus attempt to insert its status), we increment `recovery_attempts` by 1.
353
356
  # When this number becomes equal to `maxRetries + 1`, we mark the workflow as `RETRIES_EXCEEDED`.
354
357
  if recovery_attempts > max_recovery_attempts + 1:
355
- with self.engine.begin() as c:
356
- c.execute(
357
- sa.delete(SystemSchema.workflow_queue).where(
358
- SystemSchema.workflow_queue.c.workflow_uuid
359
- == status["workflow_uuid"]
360
- )
358
+ delete_cmd = sa.delete(SystemSchema.workflow_queue).where(
359
+ SystemSchema.workflow_queue.c.workflow_uuid
360
+ == status["workflow_uuid"]
361
+ )
362
+ conn.execute(delete_cmd)
363
+
364
+ dlq_cmd = (
365
+ sa.update(SystemSchema.workflow_status)
366
+ .where(
367
+ SystemSchema.workflow_status.c.workflow_uuid
368
+ == status["workflow_uuid"]
361
369
  )
362
- c.execute(
363
- sa.update(SystemSchema.workflow_status)
364
- .where(
365
- SystemSchema.workflow_status.c.workflow_uuid
366
- == status["workflow_uuid"]
367
- )
368
- .where(
369
- SystemSchema.workflow_status.c.status
370
- == WorkflowStatusString.PENDING.value
371
- )
372
- .values(
373
- status=WorkflowStatusString.RETRIES_EXCEEDED.value,
374
- queue_name=None,
375
- )
370
+ .where(
371
+ SystemSchema.workflow_status.c.status
372
+ == WorkflowStatusString.PENDING.value
373
+ )
374
+ .values(
375
+ status=WorkflowStatusString.RETRIES_EXCEEDED.value,
376
+ queue_name=None,
376
377
  )
378
+ )
379
+ conn.execute(dlq_cmd)
380
+ # Need to commit here because we're throwing an exception
381
+ conn.commit()
377
382
  raise DBOSDeadLetterQueueError(
378
383
  status["workflow_uuid"], max_recovery_attempts
379
384
  )
@@ -489,15 +494,29 @@ class SystemDatabase:
489
494
  .values(status=WorkflowStatusString.ENQUEUED.value, recovery_attempts=0)
490
495
  )
491
496
 
492
- def fork_workflow(self, original_workflow_id: str) -> str:
497
+ def get_max_function_id(self, workflow_uuid: str) -> Optional[int]:
498
+ with self.engine.begin() as conn:
499
+ max_function_id_row = conn.execute(
500
+ sa.select(
501
+ sa.func.max(SystemSchema.operation_outputs.c.function_id)
502
+ ).where(SystemSchema.operation_outputs.c.workflow_uuid == workflow_uuid)
503
+ ).fetchone()
504
+
505
+ max_function_id = max_function_id_row[0] if max_function_id_row else None
506
+
507
+ return max_function_id
508
+
509
+ def fork_workflow(
510
+ self, original_workflow_id: str, forked_workflow_id: str, start_step: int = 1
511
+ ) -> str:
512
+
493
513
  status = self.get_workflow_status(original_workflow_id)
494
514
  if status is None:
495
515
  raise Exception(f"Workflow {original_workflow_id} not found")
496
516
  inputs = self.get_workflow_inputs(original_workflow_id)
497
517
  if inputs is None:
498
518
  raise Exception(f"Workflow {original_workflow_id} not found")
499
- # Generate a random ID for the forked workflow
500
- forked_workflow_id = str(uuid.uuid4())
519
+
501
520
  with self.engine.begin() as c:
502
521
  # Create an entry for the forked workflow with the same
503
522
  # initial values as the original.
@@ -524,6 +543,37 @@ class SystemDatabase:
524
543
  inputs=_serialization.serialize_args(inputs),
525
544
  )
526
545
  )
546
+
547
+ if start_step > 1:
548
+
549
+ # Copy the original workflow's outputs into the forked workflow
550
+ insert_stmt = sa.insert(SystemSchema.operation_outputs).from_select(
551
+ [
552
+ "workflow_uuid",
553
+ "function_id",
554
+ "output",
555
+ "error",
556
+ "function_name",
557
+ "child_workflow_id",
558
+ ],
559
+ sa.select(
560
+ sa.literal(forked_workflow_id).label("workflow_uuid"),
561
+ SystemSchema.operation_outputs.c.function_id,
562
+ SystemSchema.operation_outputs.c.output,
563
+ SystemSchema.operation_outputs.c.error,
564
+ SystemSchema.operation_outputs.c.function_name,
565
+ SystemSchema.operation_outputs.c.child_workflow_id,
566
+ ).where(
567
+ (
568
+ SystemSchema.operation_outputs.c.workflow_uuid
569
+ == original_workflow_id
570
+ )
571
+ & (SystemSchema.operation_outputs.c.function_id < start_step)
572
+ ),
573
+ )
574
+
575
+ c.execute(insert_stmt)
576
+
527
577
  # Enqueue the forked workflow on the internal queue
528
578
  c.execute(
529
579
  pg.insert(SystemSchema.workflow_queue).values(
@@ -607,7 +657,7 @@ class SystemDatabase:
607
657
  time.sleep(1)
608
658
 
609
659
  def update_workflow_inputs(
610
- self, workflow_uuid: str, inputs: str, conn: Optional[sa.Connection] = None
660
+ self, workflow_uuid: str, inputs: str, conn: sa.Connection
611
661
  ) -> None:
612
662
  if self._debug_mode:
613
663
  raise Exception("called update_workflow_inputs in debug mode")
@@ -624,11 +674,8 @@ class SystemDatabase:
624
674
  )
625
675
  .returning(SystemSchema.workflow_inputs.c.inputs)
626
676
  )
627
- if conn is not None:
628
- row = conn.execute(cmd).fetchone()
629
- else:
630
- with self.engine.begin() as c:
631
- row = c.execute(cmd).fetchone()
677
+
678
+ row = conn.execute(cmd).fetchone()
632
679
  if row is not None and row[0] != inputs:
633
680
  # In a distributed environment, scheduled workflows are enqueued multiple times with slightly different timestamps
634
681
  if not workflow_uuid.startswith("sched-"):
@@ -689,6 +736,12 @@ class SystemDatabase:
689
736
  query = query.where(
690
737
  SystemSchema.workflow_status.c.workflow_uuid.in_(input.workflow_ids)
691
738
  )
739
+ if input.workflow_id_prefix:
740
+ query = query.where(
741
+ SystemSchema.workflow_status.c.workflow_uuid.startswith(
742
+ input.workflow_id_prefix
743
+ )
744
+ )
692
745
  if input.limit:
693
746
  query = query.limit(input.limit)
694
747
  if input.offset:
@@ -1335,18 +1388,17 @@ class SystemDatabase:
1335
1388
  )
1336
1389
  return value
1337
1390
 
1338
- def enqueue(self, workflow_id: str, queue_name: str) -> None:
1391
+ def enqueue(self, workflow_id: str, queue_name: str, conn: sa.Connection) -> None:
1339
1392
  if self._debug_mode:
1340
1393
  raise Exception("called enqueue in debug mode")
1341
- with self.engine.begin() as c:
1342
- c.execute(
1343
- pg.insert(SystemSchema.workflow_queue)
1344
- .values(
1345
- workflow_uuid=workflow_id,
1346
- queue_name=queue_name,
1347
- )
1348
- .on_conflict_do_nothing()
1394
+ conn.execute(
1395
+ pg.insert(SystemSchema.workflow_queue)
1396
+ .values(
1397
+ workflow_uuid=workflow_id,
1398
+ queue_name=queue_name,
1349
1399
  )
1400
+ .on_conflict_do_nothing()
1401
+ )
1350
1402
 
1351
1403
  def start_queued_workflows(
1352
1404
  self, queue: "Queue", executor_id: str, app_version: str
@@ -1601,6 +1653,30 @@ class SystemDatabase:
1601
1653
  )
1602
1654
  return result
1603
1655
 
1656
+ def init_workflow(
1657
+ self,
1658
+ status: WorkflowStatusInternal,
1659
+ inputs: str,
1660
+ *,
1661
+ max_recovery_attempts: int = DEFAULT_MAX_RECOVERY_ATTEMPTS,
1662
+ ) -> WorkflowStatuses:
1663
+ """
1664
+ Synchronously record the status and inputs for workflows in a single transaction
1665
+ """
1666
+ with self.engine.begin() as conn:
1667
+ wf_status = self.insert_workflow_status(
1668
+ status, conn, max_recovery_attempts=max_recovery_attempts
1669
+ )
1670
+ # TODO: Modify the inputs if they were changed by `update_workflow_inputs`
1671
+ self.update_workflow_inputs(status["workflow_uuid"], inputs, conn)
1672
+
1673
+ if (
1674
+ status["queue_name"] is not None
1675
+ and wf_status == WorkflowStatusString.ENQUEUED.value
1676
+ ):
1677
+ self.enqueue(status["workflow_uuid"], status["queue_name"], conn)
1678
+ return wf_status
1679
+
1604
1680
 
1605
1681
  def reset_system_database(config: ConfigFile) -> None:
1606
1682
  sysdb_name = (
@@ -1,4 +1,5 @@
1
1
  import json
2
+ import uuid
2
3
  from typing import Any, List, Optional
3
4
 
4
5
  from . import _serialization
@@ -70,6 +71,7 @@ def list_workflows(
70
71
  offset: Optional[int] = None,
71
72
  sort_desc: bool = False,
72
73
  request: bool = False,
74
+ workflow_id_prefix: Optional[str] = None,
73
75
  ) -> List[WorkflowStatus]:
74
76
  input = GetWorkflowsInput()
75
77
  input.workflow_ids = workflow_ids
@@ -82,6 +84,7 @@ def list_workflows(
82
84
  input.name = name
83
85
  input.offset = offset
84
86
  input.sort_desc = sort_desc
87
+ input.workflow_id_prefix = workflow_id_prefix
85
88
 
86
89
  output: GetWorkflowsOutput = sys_db.get_workflows(input)
87
90
  infos: List[WorkflowStatus] = []
dbos/cli/cli.py CHANGED
@@ -433,13 +433,60 @@ def restart(
433
433
  ] = 3001,
434
434
  ) -> None:
435
435
  response = requests.post(
436
- f"http://{host}:{port}/workflows/{uuid}/restart", json=[], timeout=5
436
+ f"http://{host}:{port}/workflows/{uuid}/restart",
437
+ json=[],
438
+ timeout=5,
437
439
  )
438
440
 
439
441
  if response.status_code == 204:
440
442
  print(f"Workflow {uuid} has been restarted")
441
443
  else:
442
- print(f"Failed to resume workflow {uuid}. Status code: {response.status_code}")
444
+ error_message = response.json().get("error", "Unknown error")
445
+ print(
446
+ f"Failed to restart workflow {uuid}. "
447
+ f"Status code: {response.status_code}. "
448
+ f"Error: {error_message}"
449
+ )
450
+
451
+
452
+ @workflow.command(
453
+ help="fork a workflow from the beginning with a new id and from a step"
454
+ )
455
+ def fork(
456
+ uuid: Annotated[str, typer.Argument()],
457
+ host: Annotated[
458
+ typing.Optional[str],
459
+ typer.Option("--host", "-H", help="Specify the admin host"),
460
+ ] = "localhost",
461
+ port: Annotated[
462
+ typing.Optional[int],
463
+ typer.Option("--port", "-p", help="Specify the admin port"),
464
+ ] = 3001,
465
+ step: Annotated[
466
+ typing.Optional[int],
467
+ typer.Option(
468
+ "--step",
469
+ "-s",
470
+ help="Restart from this step (default: first step)",
471
+ ),
472
+ ] = 1,
473
+ ) -> None:
474
+ print(f"Forking workflow {uuid} from step {step}")
475
+ response = requests.post(
476
+ f"http://{host}:{port}/workflows/{uuid}/fork",
477
+ json={"start_step": step},
478
+ timeout=5,
479
+ )
480
+
481
+ if response.status_code == 204:
482
+ print(f"Workflow {uuid} has been forked")
483
+ else:
484
+ error_message = response.json().get("error", "Unknown error")
485
+ print(
486
+ f"Failed to fork workflow {uuid}. "
487
+ f"Status code: {response.status_code}. "
488
+ f"Error: {error_message}"
489
+ )
443
490
 
444
491
 
445
492
  @queue.command(name="list", help="List enqueued functions for your application")
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: dbos
3
- Version: 0.26.0a14
3
+ Version: 0.26.0a18
4
4
  Summary: Ultra-lightweight durable execution in Python
5
5
  Author-Email: "DBOS, Inc." <contact@dbos.dev>
6
6
  License: MIT
@@ -1,19 +1,19 @@
1
- dbos-0.26.0a14.dist-info/METADATA,sha256=KmqNCgW2bcxs1qddgonsP0MDPe1tt5tlUvs07bN-XDY,5554
2
- dbos-0.26.0a14.dist-info/WHEEL,sha256=tSfRZzRHthuv7vxpI4aehrdN9scLjk-dCJkPLzkHxGg,90
3
- dbos-0.26.0a14.dist-info/entry_points.txt,sha256=_QOQ3tVfEjtjBlr1jS4sHqHya9lI2aIEIWkz8dqYp14,58
4
- dbos-0.26.0a14.dist-info/licenses/LICENSE,sha256=VGZit_a5-kdw9WT6fY5jxAWVwGQzgLFyPWrcVVUhVNU,1067
1
+ dbos-0.26.0a18.dist-info/METADATA,sha256=QG3XkqovR0FvEIL1_sHK6K80-comGqhpyC1LCWUDEzA,5554
2
+ dbos-0.26.0a18.dist-info/WHEEL,sha256=tSfRZzRHthuv7vxpI4aehrdN9scLjk-dCJkPLzkHxGg,90
3
+ dbos-0.26.0a18.dist-info/entry_points.txt,sha256=_QOQ3tVfEjtjBlr1jS4sHqHya9lI2aIEIWkz8dqYp14,58
4
+ dbos-0.26.0a18.dist-info/licenses/LICENSE,sha256=VGZit_a5-kdw9WT6fY5jxAWVwGQzgLFyPWrcVVUhVNU,1067
5
5
  dbos/__init__.py,sha256=3NQfGlBiiUSM_v88STdVP3rNZvGkUL_9WbSotKb8Voo,873
6
6
  dbos/__main__.py,sha256=G7Exn-MhGrVJVDbgNlpzhfh8WMX_72t3_oJaFT9Lmt8,653
7
- dbos/_admin_server.py,sha256=vxPG_YJ6lYrkfPCSp42FiATVLBOij7Fm52Yngg5Z_tE,7027
8
- dbos/_app_db.py,sha256=IwnNlHEQYp2bl5BM66vVPFa40h8DOtvRgUWTJ1dz20A,8963
7
+ dbos/_admin_server.py,sha256=RrbABfR1D3p9c_QLrCSrgFuYce6FKi0fjMRIYLjO_Y8,9038
8
+ dbos/_app_db.py,sha256=Q9lEyCJFoZMTlnjMO8Pj8bczVmVWyDOP8qPQ6l5PpEU,11241
9
9
  dbos/_classproperty.py,sha256=f0X-_BySzn3yFDRKB2JpCbLYQ9tLwt1XftfshvY7CBs,626
10
- dbos/_client.py,sha256=5iaoFsu5wAqwjjj3EWusZ1eDbBAW8FwYazhokdCJ9h4,10964
10
+ dbos/_client.py,sha256=QiIR-mwRYb1ffgwGR96ICQgFORki2QpR5najtVJ2WsA,10906
11
11
  dbos/_conductor/conductor.py,sha256=HYzVL29IMMrs2Mnms_7cHJynCnmmEN5SDQOMjzn3UoU,16840
12
12
  dbos/_conductor/protocol.py,sha256=xN7pmooyF1pqbH1b6WhllU5718P7zSb_b0KCwA6bzcs,6716
13
13
  dbos/_context.py,sha256=I8sLkdKTTkZEz7wG-MjynaQB6XEF2bLXuwNksiauP7w,19430
14
- dbos/_core.py,sha256=de8GecFmW5DNf5dYfnpSX3IDO24Wc6pBpCC1VZ1iVyI,45505
14
+ dbos/_core.py,sha256=uxDIJui4WS_2V1k2np0Ifue_IRzLTyq-c52bgZSQYn4,45118
15
15
  dbos/_croniter.py,sha256=XHAyUyibs_59sJQfSNWkP7rqQY6_XrlfuuCxk4jYqek,47559
16
- dbos/_dbos.py,sha256=byXhhiG14nS3iU85NphvQ26vvnJ-gu1tMwTIoUc3dYc,46239
16
+ dbos/_dbos.py,sha256=jtvBQOvwdXFfknx9pDHgKC4DuiH58ICAs_0NoJQMI4w,47526
17
17
  dbos/_dbos_config.py,sha256=m05IFjM0jSwZBsnFMF_4qP2JkjVFc0gqyM2tnotXq20,20636
18
18
  dbos/_debug.py,sha256=MNlQVZ6TscGCRQeEEL0VE8Uignvr6dPeDDDefS3xgIE,1823
19
19
  dbos/_docker_pg_helper.py,sha256=NmcgqmR5rQA_4igfeqh8ugNT2z3YmoOvuep_MEtxTiY,5854
@@ -45,7 +45,7 @@ dbos/_schemas/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
45
45
  dbos/_schemas/application_database.py,sha256=SypAS9l9EsaBHFn9FR8jmnqt01M74d9AF1AMa4m2hhI,1040
46
46
  dbos/_schemas/system_database.py,sha256=W9eSpL7SZzQkxcEZ4W07BOcwkkDr35b9oCjUOgfHWek,5336
47
47
  dbos/_serialization.py,sha256=YCYv0qKAwAZ1djZisBC7khvKqG-5OcIv9t9EC5PFIog,1743
48
- dbos/_sys_db.py,sha256=vQzr6qACjtmjVS151OJ45BlhOeB1tBBZemyEzOsy5nc,68343
48
+ dbos/_sys_db.py,sha256=kfNR9R7rQ6MTqBuPt4OI5nZElIJNXlGuUjG_ypGKHWI,71195
49
49
  dbos/_templates/dbos-db-starter/README.md,sha256=GhxhBj42wjTt1fWEtwNriHbJuKb66Vzu89G4pxNHw2g,930
50
50
  dbos/_templates/dbos-db-starter/__package/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
51
51
  dbos/_templates/dbos-db-starter/__package/main.py,sha256=nJMN3ZD2lmwg4Dcgmiwqc-tQGuCJuJal2Xl85iA277U,2453
@@ -58,11 +58,11 @@ dbos/_templates/dbos-db-starter/migrations/versions/2024_07_31_180642_init.py,sh
58
58
  dbos/_templates/dbos-db-starter/start_postgres_docker.py,sha256=lQVLlYO5YkhGPEgPqwGc7Y8uDKse9HsWv5fynJEFJHM,1681
59
59
  dbos/_tracer.py,sha256=dFDSFlta-rfA3-ahIRLYwnnoAOmlavdxAGllqwFgnCA,2440
60
60
  dbos/_utils.py,sha256=nFRUHzVjXG5AusF85AlYHikj63Tzi-kQm992ihsrAxA,201
61
- dbos/_workflow_commands.py,sha256=Tf7_hZQoPgP90KHQjMNlBggCNrLLCNRJxHtAJLvarc4,6153
61
+ dbos/_workflow_commands.py,sha256=hHNcW4zopgxVXWfg3flHwqZEFGYpYp8ZAfUXmqiULUk,6261
62
62
  dbos/cli/_github_init.py,sha256=Y_bDF9gfO2jB1id4FV5h1oIxEJRWyqVjhb7bNEa5nQ0,3224
63
63
  dbos/cli/_template_init.py,sha256=-WW3kbq0W_Tq4WbMqb1UGJG3xvJb3woEY5VspG95Srk,2857
64
- dbos/cli/cli.py,sha256=Lb_RYmXoT5KH0xDbwaYpROE4c-svZ0eCq2Kxg7cAxTw,16537
64
+ dbos/cli/cli.py,sha256=1qCTs__A9LOEfU44XZ6TufwmRwe68ZEwbWEPli3vnVM,17873
65
65
  dbos/dbos-config.schema.json,sha256=i7jcxXqByKq0Jzv3nAUavONtj03vTwj6vWP4ylmBr8o,5694
66
66
  dbos/py.typed,sha256=QfzXT1Ktfk3Rj84akygc7_42z0lRpCq0Ilh8OXI6Zas,44
67
67
  version/__init__.py,sha256=L4sNxecRuqdtSFdpUGX3TtBi9KL3k7YsZVIvv-fv9-A,1678
68
- dbos-0.26.0a14.dist-info/RECORD,,
68
+ dbos-0.26.0a18.dist-info/RECORD,,