dbos 0.21.0a3__tar.gz → 0.21.0a4__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of dbos might be problematic. Click here for more details.

Files changed (90) hide show
  1. {dbos-0.21.0a3 → dbos-0.21.0a4}/PKG-INFO +1 -1
  2. {dbos-0.21.0a3 → dbos-0.21.0a4}/dbos/_dbos.py +4 -5
  3. {dbos-0.21.0a3 → dbos-0.21.0a4}/dbos/_sys_db.py +42 -7
  4. {dbos-0.21.0a3 → dbos-0.21.0a4}/dbos/_workflow_commands.py +8 -26
  5. {dbos-0.21.0a3 → dbos-0.21.0a4}/dbos/cli/cli.py +4 -4
  6. {dbos-0.21.0a3 → dbos-0.21.0a4}/pyproject.toml +1 -1
  7. {dbos-0.21.0a3 → dbos-0.21.0a4}/tests/test_admin_server.py +35 -30
  8. {dbos-0.21.0a3 → dbos-0.21.0a4}/tests/test_failures.py +21 -40
  9. {dbos-0.21.0a3 → dbos-0.21.0a4}/tests/test_queue.py +188 -0
  10. {dbos-0.21.0a3 → dbos-0.21.0a4}/tests/test_workflow_cmds.py +14 -14
  11. {dbos-0.21.0a3 → dbos-0.21.0a4}/LICENSE +0 -0
  12. {dbos-0.21.0a3 → dbos-0.21.0a4}/README.md +0 -0
  13. {dbos-0.21.0a3 → dbos-0.21.0a4}/dbos/__init__.py +0 -0
  14. {dbos-0.21.0a3 → dbos-0.21.0a4}/dbos/_admin_server.py +0 -0
  15. {dbos-0.21.0a3 → dbos-0.21.0a4}/dbos/_app_db.py +0 -0
  16. {dbos-0.21.0a3 → dbos-0.21.0a4}/dbos/_classproperty.py +0 -0
  17. {dbos-0.21.0a3 → dbos-0.21.0a4}/dbos/_cloudutils/authentication.py +0 -0
  18. {dbos-0.21.0a3 → dbos-0.21.0a4}/dbos/_cloudutils/cloudutils.py +0 -0
  19. {dbos-0.21.0a3 → dbos-0.21.0a4}/dbos/_cloudutils/databases.py +0 -0
  20. {dbos-0.21.0a3 → dbos-0.21.0a4}/dbos/_context.py +0 -0
  21. {dbos-0.21.0a3 → dbos-0.21.0a4}/dbos/_core.py +0 -0
  22. {dbos-0.21.0a3 → dbos-0.21.0a4}/dbos/_croniter.py +0 -0
  23. {dbos-0.21.0a3 → dbos-0.21.0a4}/dbos/_db_wizard.py +0 -0
  24. {dbos-0.21.0a3 → dbos-0.21.0a4}/dbos/_dbos_config.py +0 -0
  25. {dbos-0.21.0a3 → dbos-0.21.0a4}/dbos/_error.py +0 -0
  26. {dbos-0.21.0a3 → dbos-0.21.0a4}/dbos/_fastapi.py +0 -0
  27. {dbos-0.21.0a3 → dbos-0.21.0a4}/dbos/_flask.py +0 -0
  28. {dbos-0.21.0a3 → dbos-0.21.0a4}/dbos/_kafka.py +0 -0
  29. {dbos-0.21.0a3 → dbos-0.21.0a4}/dbos/_kafka_message.py +0 -0
  30. {dbos-0.21.0a3 → dbos-0.21.0a4}/dbos/_logger.py +0 -0
  31. {dbos-0.21.0a3 → dbos-0.21.0a4}/dbos/_migrations/env.py +0 -0
  32. {dbos-0.21.0a3 → dbos-0.21.0a4}/dbos/_migrations/script.py.mako +0 -0
  33. {dbos-0.21.0a3 → dbos-0.21.0a4}/dbos/_migrations/versions/04ca4f231047_workflow_queues_executor_id.py +0 -0
  34. {dbos-0.21.0a3 → dbos-0.21.0a4}/dbos/_migrations/versions/50f3227f0b4b_fix_job_queue.py +0 -0
  35. {dbos-0.21.0a3 → dbos-0.21.0a4}/dbos/_migrations/versions/5c361fc04708_added_system_tables.py +0 -0
  36. {dbos-0.21.0a3 → dbos-0.21.0a4}/dbos/_migrations/versions/a3b18ad34abe_added_triggers.py +0 -0
  37. {dbos-0.21.0a3 → dbos-0.21.0a4}/dbos/_migrations/versions/d76646551a6b_job_queue_limiter.py +0 -0
  38. {dbos-0.21.0a3 → dbos-0.21.0a4}/dbos/_migrations/versions/d76646551a6c_workflow_queue.py +0 -0
  39. {dbos-0.21.0a3 → dbos-0.21.0a4}/dbos/_migrations/versions/eab0cc1d9a14_job_queue.py +0 -0
  40. {dbos-0.21.0a3 → dbos-0.21.0a4}/dbos/_outcome.py +0 -0
  41. {dbos-0.21.0a3 → dbos-0.21.0a4}/dbos/_queue.py +0 -0
  42. {dbos-0.21.0a3 → dbos-0.21.0a4}/dbos/_recovery.py +0 -0
  43. {dbos-0.21.0a3 → dbos-0.21.0a4}/dbos/_registrations.py +0 -0
  44. {dbos-0.21.0a3 → dbos-0.21.0a4}/dbos/_request.py +0 -0
  45. {dbos-0.21.0a3 → dbos-0.21.0a4}/dbos/_roles.py +0 -0
  46. {dbos-0.21.0a3 → dbos-0.21.0a4}/dbos/_scheduler.py +0 -0
  47. {dbos-0.21.0a3 → dbos-0.21.0a4}/dbos/_schemas/__init__.py +0 -0
  48. {dbos-0.21.0a3 → dbos-0.21.0a4}/dbos/_schemas/application_database.py +0 -0
  49. {dbos-0.21.0a3 → dbos-0.21.0a4}/dbos/_schemas/system_database.py +0 -0
  50. {dbos-0.21.0a3 → dbos-0.21.0a4}/dbos/_serialization.py +0 -0
  51. {dbos-0.21.0a3 → dbos-0.21.0a4}/dbos/_templates/dbos-db-starter/README.md +0 -0
  52. {dbos-0.21.0a3 → dbos-0.21.0a4}/dbos/_templates/dbos-db-starter/__package/__init__.py +0 -0
  53. {dbos-0.21.0a3 → dbos-0.21.0a4}/dbos/_templates/dbos-db-starter/__package/main.py +0 -0
  54. {dbos-0.21.0a3 → dbos-0.21.0a4}/dbos/_templates/dbos-db-starter/__package/schema.py +0 -0
  55. {dbos-0.21.0a3 → dbos-0.21.0a4}/dbos/_templates/dbos-db-starter/alembic.ini +0 -0
  56. {dbos-0.21.0a3 → dbos-0.21.0a4}/dbos/_templates/dbos-db-starter/dbos-config.yaml.dbos +0 -0
  57. {dbos-0.21.0a3 → dbos-0.21.0a4}/dbos/_templates/dbos-db-starter/migrations/env.py.dbos +0 -0
  58. {dbos-0.21.0a3 → dbos-0.21.0a4}/dbos/_templates/dbos-db-starter/migrations/script.py.mako +0 -0
  59. {dbos-0.21.0a3 → dbos-0.21.0a4}/dbos/_templates/dbos-db-starter/migrations/versions/2024_07_31_180642_init.py +0 -0
  60. {dbos-0.21.0a3 → dbos-0.21.0a4}/dbos/_templates/dbos-db-starter/start_postgres_docker.py +0 -0
  61. {dbos-0.21.0a3 → dbos-0.21.0a4}/dbos/_tracer.py +0 -0
  62. {dbos-0.21.0a3 → dbos-0.21.0a4}/dbos/cli/_github_init.py +0 -0
  63. {dbos-0.21.0a3 → dbos-0.21.0a4}/dbos/cli/_template_init.py +0 -0
  64. {dbos-0.21.0a3 → dbos-0.21.0a4}/dbos/dbos-config.schema.json +0 -0
  65. {dbos-0.21.0a3 → dbos-0.21.0a4}/dbos/py.typed +0 -0
  66. {dbos-0.21.0a3 → dbos-0.21.0a4}/tests/__init__.py +0 -0
  67. {dbos-0.21.0a3 → dbos-0.21.0a4}/tests/atexit_no_ctor.py +0 -0
  68. {dbos-0.21.0a3 → dbos-0.21.0a4}/tests/atexit_no_launch.py +0 -0
  69. {dbos-0.21.0a3 → dbos-0.21.0a4}/tests/classdefs.py +0 -0
  70. {dbos-0.21.0a3 → dbos-0.21.0a4}/tests/conftest.py +0 -0
  71. {dbos-0.21.0a3 → dbos-0.21.0a4}/tests/more_classdefs.py +0 -0
  72. {dbos-0.21.0a3 → dbos-0.21.0a4}/tests/queuedworkflow.py +0 -0
  73. {dbos-0.21.0a3 → dbos-0.21.0a4}/tests/test_async.py +0 -0
  74. {dbos-0.21.0a3 → dbos-0.21.0a4}/tests/test_classdecorators.py +0 -0
  75. {dbos-0.21.0a3 → dbos-0.21.0a4}/tests/test_concurrency.py +0 -0
  76. {dbos-0.21.0a3 → dbos-0.21.0a4}/tests/test_config.py +0 -0
  77. {dbos-0.21.0a3 → dbos-0.21.0a4}/tests/test_croniter.py +0 -0
  78. {dbos-0.21.0a3 → dbos-0.21.0a4}/tests/test_dbos.py +0 -0
  79. {dbos-0.21.0a3 → dbos-0.21.0a4}/tests/test_fastapi.py +0 -0
  80. {dbos-0.21.0a3 → dbos-0.21.0a4}/tests/test_fastapi_roles.py +0 -0
  81. {dbos-0.21.0a3 → dbos-0.21.0a4}/tests/test_flask.py +0 -0
  82. {dbos-0.21.0a3 → dbos-0.21.0a4}/tests/test_kafka.py +0 -0
  83. {dbos-0.21.0a3 → dbos-0.21.0a4}/tests/test_outcome.py +0 -0
  84. {dbos-0.21.0a3 → dbos-0.21.0a4}/tests/test_package.py +0 -0
  85. {dbos-0.21.0a3 → dbos-0.21.0a4}/tests/test_scheduler.py +0 -0
  86. {dbos-0.21.0a3 → dbos-0.21.0a4}/tests/test_schema_migration.py +0 -0
  87. {dbos-0.21.0a3 → dbos-0.21.0a4}/tests/test_singleton.py +0 -0
  88. {dbos-0.21.0a3 → dbos-0.21.0a4}/tests/test_spans.py +0 -0
  89. {dbos-0.21.0a3 → dbos-0.21.0a4}/tests/test_sqlalchemy.py +0 -0
  90. {dbos-0.21.0a3 → dbos-0.21.0a4}/version/__init__.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: dbos
3
- Version: 0.21.0a3
3
+ Version: 0.21.0a4
4
4
  Summary: Ultra-lightweight durable execution in Python
5
5
  Author-Email: "DBOS, Inc." <contact@dbos.dev>
6
6
  License: MIT
@@ -800,14 +800,13 @@ class DBOS:
800
800
  @classmethod
801
801
  def cancel_workflow(cls, workflow_id: str) -> None:
802
802
  """Cancel a workflow by ID."""
803
- _get_dbos_instance()._sys_db.set_workflow_status(
804
- workflow_id, WorkflowStatusString.CANCELLED
805
- )
803
+ _get_dbos_instance()._sys_db.cancel_workflow(workflow_id)
806
804
 
807
805
  @classmethod
808
- def resume_workflow(cls, workflow_id: str) -> None:
806
+ def resume_workflow(cls, workflow_id: str) -> WorkflowHandle[Any]:
809
807
  """Resume a workflow by ID."""
810
- execute_workflow_by_id(_get_dbos_instance(), workflow_id, False)
808
+ _get_dbos_instance()._sys_db.resume_workflow(workflow_id)
809
+ return execute_workflow_by_id(_get_dbos_instance(), workflow_id, False)
811
810
 
812
811
  @classproperty
813
812
  def logger(cls) -> Logger:
@@ -390,20 +390,55 @@ class SystemDatabase:
390
390
  if status["workflow_uuid"] in self._temp_txn_wf_ids:
391
391
  self._exported_temp_txn_wf_status.add(status["workflow_uuid"])
392
392
 
393
- def set_workflow_status(
393
+ def cancel_workflow(
394
394
  self,
395
- workflow_uuid: str,
396
- status: WorkflowStatusString,
395
+ workflow_id: str,
397
396
  ) -> None:
398
397
  with self.engine.begin() as c:
399
- stmt = (
398
+ # Remove the workflow from the queues table so it does not block the table
399
+ c.execute(
400
+ sa.delete(SystemSchema.workflow_queue).where(
401
+ SystemSchema.workflow_queue.c.workflow_uuid == workflow_id
402
+ )
403
+ )
404
+ # Set the workflow's status to CANCELLED
405
+ c.execute(
400
406
  sa.update(SystemSchema.workflow_status)
401
- .where(SystemSchema.workflow_status.c.workflow_uuid == workflow_uuid)
407
+ .where(SystemSchema.workflow_status.c.workflow_uuid == workflow_id)
402
408
  .values(
403
- status=status,
409
+ status=WorkflowStatusString.CANCELLED.value,
410
+ )
411
+ )
412
+
413
+ def resume_workflow(
414
+ self,
415
+ workflow_id: str,
416
+ ) -> None:
417
+ with self.engine.begin() as c:
418
+ # Check the status of the workflow. If it is complete, do nothing.
419
+ row = c.execute(
420
+ sa.select(
421
+ SystemSchema.workflow_status.c.status,
422
+ ).where(SystemSchema.workflow_status.c.workflow_uuid == workflow_id)
423
+ ).fetchone()
424
+ if (
425
+ row is None
426
+ or row[0] == WorkflowStatusString.SUCCESS.value
427
+ or row[0] == WorkflowStatusString.ERROR.value
428
+ ):
429
+ return
430
+ # Remove the workflow from the queues table so resume can safely be called on an ENQUEUED workflow
431
+ c.execute(
432
+ sa.delete(SystemSchema.workflow_queue).where(
433
+ SystemSchema.workflow_queue.c.workflow_uuid == workflow_id
404
434
  )
405
435
  )
406
- c.execute(stmt)
436
+ # Set the workflow's status to PENDING and clear its recovery attempts.
437
+ c.execute(
438
+ sa.update(SystemSchema.workflow_status)
439
+ .where(SystemSchema.workflow_status.c.workflow_uuid == workflow_id)
440
+ .values(status=WorkflowStatusString.PENDING.value, recovery_attempts=0)
441
+ )
407
442
 
408
443
  def get_workflow_status(
409
444
  self, workflow_uuid: str
@@ -1,23 +1,14 @@
1
- import importlib
2
- import os
3
- import sys
4
- from typing import Any, List, Optional, cast
1
+ from typing import List, Optional, cast
5
2
 
6
3
  import typer
7
- from rich import print
8
4
 
9
- from dbos import DBOS
10
-
11
- from . import _serialization, load_config
12
- from ._core import execute_workflow_by_id
13
- from ._dbos_config import ConfigFile, _is_valid_app_name
5
+ from . import _serialization
6
+ from ._dbos_config import ConfigFile
14
7
  from ._sys_db import (
15
8
  GetWorkflowsInput,
16
9
  GetWorkflowsOutput,
17
10
  SystemDatabase,
18
11
  WorkflowStatuses,
19
- WorkflowStatusInternal,
20
- WorkflowStatusString,
21
12
  )
22
13
 
23
14
 
@@ -41,7 +32,7 @@ class WorkflowInformation:
41
32
  queue_name: Optional[str]
42
33
 
43
34
 
44
- def _list_workflows(
35
+ def list_workflows(
45
36
  config: ConfigFile,
46
37
  li: int,
47
38
  user: Optional[str],
@@ -91,17 +82,13 @@ def _list_workflows(
91
82
  sys_db.destroy()
92
83
 
93
84
 
94
- def _get_workflow(
85
+ def get_workflow(
95
86
  config: ConfigFile, uuid: str, request: bool
96
87
  ) -> Optional[WorkflowInformation]:
97
- sys_db = None
98
-
99
88
  try:
100
89
  sys_db = SystemDatabase(config)
101
-
102
90
  info = _get_workflow_info(sys_db, uuid, request)
103
91
  return info
104
-
105
92
  except Exception as e:
106
93
  typer.echo(f"Error getting workflow: {e}")
107
94
  return None
@@ -110,18 +97,13 @@ def _get_workflow(
110
97
  sys_db.destroy()
111
98
 
112
99
 
113
- def _cancel_workflow(config: ConfigFile, uuid: str) -> None:
114
- # config = load_config()
115
- sys_db = None
116
-
100
+ def cancel_workflow(config: ConfigFile, uuid: str) -> None:
117
101
  try:
118
102
  sys_db = SystemDatabase(config)
119
- sys_db.set_workflow_status(uuid, WorkflowStatusString.CANCELLED)
120
- return
121
-
103
+ sys_db.cancel_workflow(uuid)
122
104
  except Exception as e:
123
105
  typer.echo(f"Failed to connect to DBOS system database: {e}")
124
- return None
106
+ raise e
125
107
  finally:
126
108
  if sys_db:
127
109
  sys_db.destroy()
@@ -19,7 +19,7 @@ from .. import load_config
19
19
  from .._app_db import ApplicationDatabase
20
20
  from .._dbos_config import _is_valid_app_name
21
21
  from .._sys_db import SystemDatabase, reset_system_database
22
- from .._workflow_commands import _cancel_workflow, _get_workflow, _list_workflows
22
+ from .._workflow_commands import cancel_workflow, get_workflow, list_workflows
23
23
  from ..cli._github_init import create_template_from_github
24
24
  from ._template_init import copy_template, get_project_name, get_templates_directory
25
25
 
@@ -282,7 +282,7 @@ def list(
282
282
  ] = None,
283
283
  ) -> None:
284
284
  config = load_config()
285
- workflows = _list_workflows(
285
+ workflows = list_workflows(
286
286
  config, limit, user, starttime, endtime, status, request, appversion
287
287
  )
288
288
  print(jsonpickle.encode(workflows, unpicklable=False))
@@ -301,7 +301,7 @@ def get(
301
301
  ] = True,
302
302
  ) -> None:
303
303
  config = load_config()
304
- print(jsonpickle.encode(_get_workflow(config, uuid, request), unpicklable=False))
304
+ print(jsonpickle.encode(get_workflow(config, uuid, request), unpicklable=False))
305
305
 
306
306
 
307
307
  @workflow.command(
@@ -315,7 +315,7 @@ def cancel(
315
315
  ] = None,
316
316
  ) -> None:
317
317
  config = load_config()
318
- _cancel_workflow(config, uuid)
318
+ cancel_workflow(config, uuid)
319
319
  print(f"Workflow {uuid} has been cancelled")
320
320
 
321
321
 
@@ -27,7 +27,7 @@ dependencies = [
27
27
  ]
28
28
  requires-python = ">=3.9"
29
29
  readme = "README.md"
30
- version = "0.21.0a3"
30
+ version = "0.21.0a4"
31
31
 
32
32
  [project.license]
33
33
  text = "MIT"
@@ -151,54 +151,59 @@ runtimeConfig:
151
151
 
152
152
 
153
153
  def test_admin_workflow_resume(dbos: DBOS, config: ConfigFile) -> None:
154
+ counter: int = 0
154
155
 
155
156
  @DBOS.workflow()
156
157
  def simple_workflow() -> None:
157
- print("Executed Simple workflow")
158
- return
158
+ nonlocal counter
159
+ counter += 1
159
160
 
160
- # run the workflow
161
+ # Run the workflow and flush its results
161
162
  simple_workflow()
162
- time.sleep(1)
163
+ assert counter == 1
164
+ dbos._sys_db.wait_for_buffer_flush()
163
165
 
164
- # get the workflow list
165
- output = _workflow_commands._list_workflows(
166
+ # Verify the workflow has succeeded
167
+ output = _workflow_commands.list_workflows(
166
168
  config, 10, None, None, None, None, False, None
167
169
  )
168
170
  assert len(output) == 1, f"Expected list length to be 1, but got {len(output)}"
169
-
170
171
  assert output[0] != None, "Expected output to be not None"
171
-
172
172
  wfUuid = output[0].workflowUUID
173
-
174
- info = _workflow_commands._get_workflow(config, wfUuid, True)
173
+ info = _workflow_commands.get_workflow(config, wfUuid, True)
175
174
  assert info is not None, "Expected output to be not None"
176
-
177
175
  assert info.status == "SUCCESS", f"Expected status to be SUCCESS"
178
176
 
177
+ # Cancel the workflow. Verify it was cancelled
179
178
  response = requests.post(
180
179
  f"http://localhost:3001/workflows/{wfUuid}/cancel", json=[], timeout=5
181
180
  )
182
181
  assert response.status_code == 204
182
+ info = _workflow_commands.get_workflow(config, wfUuid, True)
183
+ assert info is not None
184
+ assert info.status == "CANCELLED", f"Expected status to be CANCELLED"
183
185
 
184
- info = _workflow_commands._get_workflow(config, wfUuid, True)
185
- if info is not None:
186
- assert info.status == "CANCELLED", f"Expected status to be CANCELLED"
187
- else:
188
- assert False, "Expected info to be not None"
189
-
186
+ # Resume the workflow. Verify that it succeeds again.
190
187
  response = requests.post(
191
188
  f"http://localhost:3001/workflows/{wfUuid}/resume", json=[], timeout=5
192
189
  )
193
190
  assert response.status_code == 204
191
+ dbos._sys_db.wait_for_buffer_flush()
192
+ assert counter == 2
193
+ info = _workflow_commands.get_workflow(config, wfUuid, True)
194
+ assert info is not None
195
+ assert info.status == "SUCCESS", f"Expected status to be SUCCESS"
194
196
 
195
- time.sleep(1)
196
-
197
- info = _workflow_commands._get_workflow(config, wfUuid, True)
198
- if info is not None:
199
- assert info.status == "SUCCESS", f"Expected status to be SUCCESS"
200
- else:
201
- assert False, "Expected info to be not None"
197
+ # Resume the workflow. Verify it does not run and status remains SUCCESS
198
+ response = requests.post(
199
+ f"http://localhost:3001/workflows/{wfUuid}/resume", json=[], timeout=5
200
+ )
201
+ assert response.status_code == 204
202
+ dbos._sys_db.wait_for_buffer_flush()
203
+ info = _workflow_commands.get_workflow(config, wfUuid, True)
204
+ assert info is not None
205
+ assert info.status == "SUCCESS", f"Expected status to be SUCCESS"
206
+ assert counter == 2
202
207
 
203
208
 
204
209
  def test_admin_workflow_restart(dbos: DBOS, config: ConfigFile) -> None:
@@ -213,7 +218,7 @@ def test_admin_workflow_restart(dbos: DBOS, config: ConfigFile) -> None:
213
218
  time.sleep(1)
214
219
 
215
220
  # get the workflow list
216
- output = _workflow_commands._list_workflows(
221
+ output = _workflow_commands.list_workflows(
217
222
  config, 10, None, None, None, None, False, None
218
223
  )
219
224
  assert len(output) == 1, f"Expected list length to be 1, but got {len(output)}"
@@ -222,7 +227,7 @@ def test_admin_workflow_restart(dbos: DBOS, config: ConfigFile) -> None:
222
227
 
223
228
  wfUuid = output[0].workflowUUID
224
229
 
225
- info = _workflow_commands._get_workflow(config, wfUuid, True)
230
+ info = _workflow_commands.get_workflow(config, wfUuid, True)
226
231
  assert info is not None, "Expected output to be not None"
227
232
 
228
233
  assert info.status == "SUCCESS", f"Expected status to be SUCCESS"
@@ -232,7 +237,7 @@ def test_admin_workflow_restart(dbos: DBOS, config: ConfigFile) -> None:
232
237
  )
233
238
  assert response.status_code == 204
234
239
 
235
- info = _workflow_commands._get_workflow(config, wfUuid, True)
240
+ info = _workflow_commands.get_workflow(config, wfUuid, True)
236
241
  if info is not None:
237
242
  assert info.status == "CANCELLED", f"Expected status to be CANCELLED"
238
243
  else:
@@ -245,13 +250,13 @@ def test_admin_workflow_restart(dbos: DBOS, config: ConfigFile) -> None:
245
250
 
246
251
  time.sleep(1)
247
252
 
248
- info = _workflow_commands._get_workflow(config, wfUuid, True)
253
+ info = _workflow_commands.get_workflow(config, wfUuid, True)
249
254
  if info is not None:
250
255
  assert info.status == "CANCELLED", f"Expected status to be CANCELLED"
251
256
  else:
252
257
  assert False, "Expected info to be not None"
253
258
 
254
- output = _workflow_commands._list_workflows(
259
+ output = _workflow_commands.list_workflows(
255
260
  config, 10, None, None, None, None, False, None
256
261
  )
257
262
  assert len(output) == 2, f"Expected list length to be 2, but got {len(output)}"
@@ -261,7 +266,7 @@ def test_admin_workflow_restart(dbos: DBOS, config: ConfigFile) -> None:
261
266
  else:
262
267
  new_wfUuid = output[0].workflowUUID
263
268
 
264
- info = _workflow_commands._get_workflow(config, new_wfUuid, True)
269
+ info = _workflow_commands.get_workflow(config, new_wfUuid, True)
265
270
  if info is not None:
266
271
  assert info.status == "SUCCESS", f"Expected status to be SUCCESS"
267
272
  else:
@@ -189,60 +189,41 @@ def test_dead_letter_queue(dbos: DBOS) -> None:
189
189
  recovery_count += 1
190
190
  event.wait()
191
191
 
192
- handle = DBOS.start_workflow(dead_letter_workflow)
192
+ # Start a workflow that blocks forever
193
+ wfid = str(uuid.uuid4())
194
+ with SetWorkflowID(wfid):
195
+ handle = DBOS.start_workflow(dead_letter_workflow)
193
196
 
197
+ # Attempt to recover the blocked workflow the maximum number of times
194
198
  for i in range(max_recovery_attempts):
195
199
  DBOS.recover_pending_workflows()
196
200
  assert recovery_count == i + 2
197
201
 
202
+ # Verify an additional attempt (either through recovery or through a direct call) throws a DLQ error
203
+ # and puts the workflow in the DLQ status.
198
204
  with pytest.raises(Exception) as exc_info:
199
205
  DBOS.recover_pending_workflows()
200
206
  assert exc_info.errisinstance(DBOSDeadLetterQueueError)
201
207
  assert handle.get_status().status == WorkflowStatusString.RETRIES_EXCEEDED.value
202
-
203
- event.set()
204
- assert handle.get_result() == None
205
- dbos._sys_db.wait_for_buffer_flush()
206
- assert handle.get_status().status == WorkflowStatusString.SUCCESS.value
207
-
208
-
209
- def test_enqueued_dead_letter_queue(dbos: DBOS) -> None:
210
- function_started_event = threading.Event()
211
- event = threading.Event()
212
- max_concurrency = 1
213
- max_recovery_attempts = 10
214
- recovery_count = 0
215
-
216
- @DBOS.workflow(max_recovery_attempts=max_recovery_attempts)
217
- def dead_letter_workflow() -> None:
218
- function_started_event.set()
219
- nonlocal recovery_count
220
- recovery_count += 1
221
- event.wait()
222
-
223
- @DBOS.workflow()
224
- def regular_workflow() -> None:
225
- return
226
-
227
- queue = Queue("test_queue", concurrency=max_concurrency)
228
- handle = queue.enqueue(dead_letter_workflow)
229
- function_started_event.wait()
230
-
231
- for i in range(max_recovery_attempts):
232
- DBOS.recover_pending_workflows()
233
- assert recovery_count == i + 2
234
-
235
- regular_handle = queue.enqueue(regular_workflow)
236
-
237
208
  with pytest.raises(Exception) as exc_info:
238
- DBOS.recover_pending_workflows()
209
+ with SetWorkflowID(wfid):
210
+ dead_letter_workflow()
239
211
  assert exc_info.errisinstance(DBOSDeadLetterQueueError)
240
- assert handle.get_status().status == WorkflowStatusString.RETRIES_EXCEEDED.value
241
212
 
242
- assert regular_handle.get_result() == None
213
+ # Resume the workflow. Verify it returns to PENDING status without error.
214
+ resumed_handle = dbos.resume_workflow(wfid)
215
+ assert (
216
+ handle.get_status().status
217
+ == resumed_handle.get_status().status
218
+ == WorkflowStatusString.PENDING.value
219
+ )
220
+
221
+ # Verify the workflow can recover again without error.
222
+ DBOS.recover_pending_workflows()
243
223
 
224
+ # Complete the blocked workflow
244
225
  event.set()
245
- assert handle.get_result() == None
226
+ assert handle.get_result() == resumed_handle.get_result() == None
246
227
  dbos._sys_db.wait_for_buffer_flush()
247
228
  assert handle.get_status().status == WorkflowStatusString.SUCCESS.value
248
229
 
@@ -17,6 +17,7 @@ from dbos import (
17
17
  SetWorkflowID,
18
18
  WorkflowHandle,
19
19
  )
20
+ from dbos._error import DBOSDeadLetterQueueError
20
21
  from dbos._schemas.system_database import SystemSchema
21
22
  from dbos._sys_db import WorkflowStatusString
22
23
  from tests.conftest import default_config
@@ -581,3 +582,190 @@ def test_duplicate_workflow_id(dbos: DBOS, caplog: pytest.LogCaptureFixture) ->
581
582
 
582
583
  # Reset logging
583
584
  logging.getLogger("dbos").propagate = original_propagate
585
+
586
+
587
+ def test_queue_recovery(dbos: DBOS) -> None:
588
+ step_counter: int = 0
589
+ queued_steps = 5
590
+
591
+ wfid = str(uuid.uuid4())
592
+ queue = Queue("test_queue")
593
+ step_events = [threading.Event() for _ in range(queued_steps)]
594
+ event = threading.Event()
595
+
596
+ @DBOS.workflow()
597
+ def test_workflow() -> list[int]:
598
+ assert DBOS.workflow_id == wfid
599
+ handles = []
600
+ for i in range(queued_steps):
601
+ h = queue.enqueue(test_step, i)
602
+ handles.append(h)
603
+ return [h.get_result() for h in handles]
604
+
605
+ @DBOS.step()
606
+ def test_step(i: int) -> int:
607
+ nonlocal step_counter
608
+ step_counter += 1
609
+ step_events[i].set()
610
+ event.wait()
611
+ return i
612
+
613
+ # Start the workflow. Wait for all five steps to start. Verify that they started.
614
+ with SetWorkflowID(wfid):
615
+ original_handle = DBOS.start_workflow(test_workflow)
616
+ for e in step_events:
617
+ e.wait()
618
+ assert step_counter == 5
619
+
620
+ # Recover the workflow, then resume it.
621
+ recovery_handles = DBOS.recover_pending_workflows()
622
+ event.set()
623
+ # There should be one handle for the workflow and another for each queued step.
624
+ assert len(recovery_handles) == queued_steps + 1
625
+ # Verify that both the recovered and original workflows complete correctly.
626
+ for h in recovery_handles:
627
+ if h.get_workflow_id() == wfid:
628
+ assert h.get_result() == [0, 1, 2, 3, 4]
629
+ assert original_handle.get_result() == [0, 1, 2, 3, 4]
630
+ # Each step should start twice, once originally and once in recovery.
631
+ assert step_counter == 10
632
+
633
+ # Rerun the workflow. Because each step is complete, none should start again.
634
+ with SetWorkflowID(wfid):
635
+ assert test_workflow() == [0, 1, 2, 3, 4]
636
+ assert step_counter == 10
637
+
638
+ # Verify all queue entries eventually get cleaned up.
639
+ assert queue_entries_are_cleaned_up(dbos)
640
+
641
+
642
+ def test_cancelling_queued_workflows(dbos: DBOS) -> None:
643
+ start_event = threading.Event()
644
+ blocking_event = threading.Event()
645
+
646
+ @DBOS.workflow()
647
+ def stuck_workflow() -> None:
648
+ start_event.set()
649
+ blocking_event.wait()
650
+
651
+ @DBOS.workflow()
652
+ def regular_workflow() -> None:
653
+ return
654
+
655
+ # Enqueue both the blocked workflow and a regular workflow on a queue with concurrency 1
656
+ queue = Queue("test_queue", concurrency=1)
657
+ wfid = str(uuid.uuid4())
658
+ with SetWorkflowID(wfid):
659
+ blocked_handle = queue.enqueue(stuck_workflow)
660
+ regular_handle = queue.enqueue(regular_workflow)
661
+
662
+ # Verify that the blocked workflow starts and is PENDING while the regular workflow remains ENQUEUED.
663
+ start_event.wait()
664
+ assert blocked_handle.get_status().status == WorkflowStatusString.PENDING.value
665
+ assert regular_handle.get_status().status == WorkflowStatusString.ENQUEUED.value
666
+
667
+ # Cancel the blocked workflow. Verify this lets the regular workflow run.
668
+ dbos.cancel_workflow(wfid)
669
+ assert blocked_handle.get_status().status == WorkflowStatusString.CANCELLED.value
670
+ assert regular_handle.get_result() == None
671
+
672
+ # Complete the blocked workflow
673
+ blocking_event.set()
674
+ assert blocked_handle.get_result() == None
675
+
676
+ # Verify all queue entries eventually get cleaned up.
677
+ assert queue_entries_are_cleaned_up(dbos)
678
+
679
+
680
+ def test_resuming_queued_workflows(dbos: DBOS) -> None:
681
+ start_event = threading.Event()
682
+ blocking_event = threading.Event()
683
+
684
+ @DBOS.workflow()
685
+ def stuck_workflow() -> None:
686
+ start_event.set()
687
+ blocking_event.wait()
688
+
689
+ @DBOS.workflow()
690
+ def regular_workflow() -> None:
691
+ return
692
+
693
+ # Enqueue a blocked workflow and two regular workflows on a queue with concurrency 1
694
+ queue = Queue("test_queue", concurrency=1)
695
+ wfid = str(uuid.uuid4())
696
+ blocked_handle = queue.enqueue(stuck_workflow)
697
+ with SetWorkflowID(wfid):
698
+ regular_handle_1 = queue.enqueue(regular_workflow)
699
+ regular_handle_2 = queue.enqueue(regular_workflow)
700
+
701
+ # Verify that the blocked workflow starts and is PENDING while the regular workflows remain ENQUEUED.
702
+ start_event.wait()
703
+ assert blocked_handle.get_status().status == WorkflowStatusString.PENDING.value
704
+ assert regular_handle_1.get_status().status == WorkflowStatusString.ENQUEUED.value
705
+ assert regular_handle_2.get_status().status == WorkflowStatusString.ENQUEUED.value
706
+
707
+ # Resume a regular workflow. Verify it completes.
708
+ dbos.resume_workflow(wfid)
709
+ assert regular_handle_1.get_result() == None
710
+
711
+ # Complete the blocked workflow. Verify the second regular workflow also completes.
712
+ blocking_event.set()
713
+ assert blocked_handle.get_result() == None
714
+ assert regular_handle_2.get_result() == None
715
+
716
+ # Verify all queue entries eventually get cleaned up.
717
+ assert queue_entries_are_cleaned_up(dbos)
718
+
719
+
720
+ def test_dlq_enqueued_workflows(dbos: DBOS) -> None:
721
+ start_event = threading.Event()
722
+ blocking_event = threading.Event()
723
+ max_recovery_attempts = 10
724
+ recovery_count = 0
725
+
726
+ @DBOS.workflow(max_recovery_attempts=max_recovery_attempts)
727
+ def blocked_workflow() -> None:
728
+ start_event.set()
729
+ nonlocal recovery_count
730
+ recovery_count += 1
731
+ blocking_event.wait()
732
+
733
+ @DBOS.workflow()
734
+ def regular_workflow() -> None:
735
+ return
736
+
737
+ # Enqueue both the blocked workflow and a regular workflow on a queue with concurrency 1
738
+ queue = Queue("test_queue", concurrency=1)
739
+ blocked_handle = queue.enqueue(blocked_workflow)
740
+ regular_handle = queue.enqueue(regular_workflow)
741
+
742
+ # Verify that the blocked workflow starts and is PENDING while the regular workflow remains ENQUEUED.
743
+ start_event.wait()
744
+ assert blocked_handle.get_status().status == WorkflowStatusString.PENDING.value
745
+ assert regular_handle.get_status().status == WorkflowStatusString.ENQUEUED.value
746
+
747
+ # Attempt to recover the blocked workflow the maximum number of times
748
+ for i in range(max_recovery_attempts):
749
+ DBOS.recover_pending_workflows()
750
+ assert recovery_count == i + 2
751
+
752
+ # Verify an additional recovery throws a DLQ error and puts the workflow in the DLQ status.
753
+ with pytest.raises(Exception) as exc_info:
754
+ DBOS.recover_pending_workflows()
755
+ assert exc_info.errisinstance(DBOSDeadLetterQueueError)
756
+ assert (
757
+ blocked_handle.get_status().status
758
+ == WorkflowStatusString.RETRIES_EXCEEDED.value
759
+ )
760
+
761
+ # Verify the blocked workflow entering the DLQ lets the regular workflow run
762
+ assert regular_handle.get_result() == None
763
+
764
+ # Complete the blocked workflow
765
+ blocking_event.set()
766
+ assert blocked_handle.get_result() == None
767
+ dbos._sys_db.wait_for_buffer_flush()
768
+ assert blocked_handle.get_status().status == WorkflowStatusString.SUCCESS.value
769
+
770
+ # Verify all queue entries eventually get cleaned up.
771
+ assert queue_entries_are_cleaned_up(dbos)
@@ -31,7 +31,7 @@ def test_list_workflow(dbos: DBOS, config: ConfigFile) -> None:
31
31
  simple_workflow()
32
32
  time.sleep(1) # wait for the workflow to complete
33
33
  # get the workflow list
34
- output = _workflow_commands._list_workflows(
34
+ output = _workflow_commands.list_workflows(
35
35
  config, 10, None, None, None, None, False, None
36
36
  )
37
37
  assert len(output) == 1, f"Expected list length to be 1, but got {len(output)}"
@@ -54,7 +54,7 @@ def test_list_workflow_limit(dbos: DBOS, config: ConfigFile) -> None:
54
54
  simple_workflow()
55
55
  time.sleep(1) # wait for the workflow to complete
56
56
  # get the workflow list
57
- output = _workflow_commands._list_workflows(
57
+ output = _workflow_commands.list_workflows(
58
58
  config, 2, None, None, None, None, False, None
59
59
  )
60
60
  assert len(output) == 2, f"Expected list length to be 1, but got {len(output)}"
@@ -72,12 +72,12 @@ def test_list_workflow_status(dbos: DBOS, config: ConfigFile) -> None:
72
72
  simple_workflow()
73
73
  time.sleep(1) # wait for the workflow to complete
74
74
  # get the workflow list
75
- output = _workflow_commands._list_workflows(
75
+ output = _workflow_commands.list_workflows(
76
76
  config, 10, None, None, None, "PENDING", False, None
77
77
  )
78
78
  assert len(output) == 0, f"Expected list length to be 0, but got {len(output)}"
79
79
 
80
- output = _workflow_commands._list_workflows(
80
+ output = _workflow_commands.list_workflows(
81
81
  config, 10, None, None, None, "SUCCESS", False, None
82
82
  )
83
83
  assert len(output) == 1, f"Expected list length to be 1, but got {len(output)}"
@@ -102,7 +102,7 @@ def test_list_workflow_start_end_times(dbos: DBOS, config: ConfigFile) -> None:
102
102
  endtime = datetime.now().isoformat()
103
103
  print(endtime)
104
104
 
105
- output = _workflow_commands._list_workflows(
105
+ output = _workflow_commands.list_workflows(
106
106
  config, 10, None, starttime, endtime, None, False, None
107
107
  )
108
108
  assert len(output) == 1, f"Expected list length to be 1, but got {len(output)}"
@@ -110,7 +110,7 @@ def test_list_workflow_start_end_times(dbos: DBOS, config: ConfigFile) -> None:
110
110
  newstarttime = (now - timedelta(seconds=30)).isoformat()
111
111
  newendtime = starttime
112
112
 
113
- output = _workflow_commands._list_workflows(
113
+ output = _workflow_commands.list_workflows(
114
114
  config, 10, None, newstarttime, newendtime, None, False, None
115
115
  )
116
116
  assert len(output) == 0, f"Expected list length to be 0, but got {len(output)}"
@@ -141,19 +141,19 @@ def test_list_workflow_end_times_positive(dbos: DBOS, config: ConfigFile) -> Non
141
141
  # get the workflow list
142
142
  time_3 = datetime.now().isoformat()
143
143
 
144
- output = _workflow_commands._list_workflows(
144
+ output = _workflow_commands.list_workflows(
145
145
  config, 10, None, time_0, time_1, None, False, None
146
146
  )
147
147
 
148
148
  assert len(output) == 0, f"Expected list length to be 0, but got {len(output)}"
149
149
 
150
- output = _workflow_commands._list_workflows(
150
+ output = _workflow_commands.list_workflows(
151
151
  config, 10, None, time_1, time_2, None, False, None
152
152
  )
153
153
 
154
154
  assert len(output) == 1, f"Expected list length to be 1, but got {len(output)}"
155
155
 
156
- output = _workflow_commands._list_workflows(
156
+ output = _workflow_commands.list_workflows(
157
157
  config, 10, None, time_1, time_3, None, False, None
158
158
  )
159
159
  assert len(output) == 2, f"Expected list length to be 2, but got {len(output)}"
@@ -171,7 +171,7 @@ def test_get_workflow(dbos: DBOS, config: ConfigFile) -> None:
171
171
  simple_workflow()
172
172
  time.sleep(1) # wait for the workflow to complete
173
173
  # get the workflow list
174
- output = _workflow_commands._list_workflows(
174
+ output = _workflow_commands.list_workflows(
175
175
  config, 10, None, None, None, None, False, None
176
176
  )
177
177
  assert len(output) == 1, f"Expected list length to be 1, but got {len(output)}"
@@ -180,7 +180,7 @@ def test_get_workflow(dbos: DBOS, config: ConfigFile) -> None:
180
180
 
181
181
  wfUuid = output[0].workflowUUID
182
182
 
183
- info = _workflow_commands._get_workflow(config, wfUuid, True)
183
+ info = _workflow_commands.get_workflow(config, wfUuid, True)
184
184
  assert info is not None, "Expected output to be not None"
185
185
 
186
186
  if info is not None:
@@ -199,7 +199,7 @@ def test_cancel_workflow(dbos: DBOS, config: ConfigFile) -> None:
199
199
  # run the workflow
200
200
  simple_workflow()
201
201
  # get the workflow list
202
- output = _workflow_commands._list_workflows(
202
+ output = _workflow_commands.list_workflows(
203
203
  config, 10, None, None, None, None, False, None
204
204
  )
205
205
  # assert len(output) == 1, f"Expected list length to be 1, but got {len(output)}"
@@ -208,9 +208,9 @@ def test_cancel_workflow(dbos: DBOS, config: ConfigFile) -> None:
208
208
  assert output[0] != None, "Expected output to be not None"
209
209
  wfUuid = output[0].workflowUUID
210
210
 
211
- _workflow_commands._cancel_workflow(config, wfUuid)
211
+ _workflow_commands.cancel_workflow(config, wfUuid)
212
212
 
213
- info = _workflow_commands._get_workflow(config, wfUuid, True)
213
+ info = _workflow_commands.get_workflow(config, wfUuid, True)
214
214
  assert info is not None, "Expected info to be not None"
215
215
  if info is not None:
216
216
  assert info.status == "CANCELLED", f"Expected status to be CANCELLED"
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes