dbos 1.4.1__tar.gz → 1.5.0a2__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (108) hide show
  1. {dbos-1.4.1 → dbos-1.5.0a2}/PKG-INFO +1 -1
  2. {dbos-1.4.1 → dbos-1.5.0a2}/dbos/_admin_server.py +21 -0
  3. {dbos-1.4.1 → dbos-1.5.0a2}/dbos/_app_db.py +18 -0
  4. {dbos-1.4.1 → dbos-1.5.0a2}/dbos/_sys_db.py +56 -0
  5. {dbos-1.4.1 → dbos-1.5.0a2}/dbos/_workflow_commands.py +36 -2
  6. {dbos-1.4.1 → dbos-1.5.0a2}/pyproject.toml +1 -1
  7. {dbos-1.4.1 → dbos-1.5.0a2}/tests/test_admin_server.py +40 -0
  8. {dbos-1.4.1 → dbos-1.5.0a2}/tests/test_workflow_management.py +122 -1
  9. {dbos-1.4.1 → dbos-1.5.0a2}/LICENSE +0 -0
  10. {dbos-1.4.1 → dbos-1.5.0a2}/README.md +0 -0
  11. {dbos-1.4.1 → dbos-1.5.0a2}/dbos/__init__.py +0 -0
  12. {dbos-1.4.1 → dbos-1.5.0a2}/dbos/__main__.py +0 -0
  13. {dbos-1.4.1 → dbos-1.5.0a2}/dbos/_classproperty.py +0 -0
  14. {dbos-1.4.1 → dbos-1.5.0a2}/dbos/_client.py +0 -0
  15. {dbos-1.4.1 → dbos-1.5.0a2}/dbos/_conductor/conductor.py +0 -0
  16. {dbos-1.4.1 → dbos-1.5.0a2}/dbos/_conductor/protocol.py +0 -0
  17. {dbos-1.4.1 → dbos-1.5.0a2}/dbos/_context.py +0 -0
  18. {dbos-1.4.1 → dbos-1.5.0a2}/dbos/_core.py +0 -0
  19. {dbos-1.4.1 → dbos-1.5.0a2}/dbos/_croniter.py +0 -0
  20. {dbos-1.4.1 → dbos-1.5.0a2}/dbos/_dbos.py +0 -0
  21. {dbos-1.4.1 → dbos-1.5.0a2}/dbos/_dbos_config.py +0 -0
  22. {dbos-1.4.1 → dbos-1.5.0a2}/dbos/_debug.py +0 -0
  23. {dbos-1.4.1 → dbos-1.5.0a2}/dbos/_docker_pg_helper.py +0 -0
  24. {dbos-1.4.1 → dbos-1.5.0a2}/dbos/_error.py +0 -0
  25. {dbos-1.4.1 → dbos-1.5.0a2}/dbos/_event_loop.py +0 -0
  26. {dbos-1.4.1 → dbos-1.5.0a2}/dbos/_fastapi.py +0 -0
  27. {dbos-1.4.1 → dbos-1.5.0a2}/dbos/_flask.py +0 -0
  28. {dbos-1.4.1 → dbos-1.5.0a2}/dbos/_kafka.py +0 -0
  29. {dbos-1.4.1 → dbos-1.5.0a2}/dbos/_kafka_message.py +0 -0
  30. {dbos-1.4.1 → dbos-1.5.0a2}/dbos/_logger.py +0 -0
  31. {dbos-1.4.1 → dbos-1.5.0a2}/dbos/_migrations/env.py +0 -0
  32. {dbos-1.4.1 → dbos-1.5.0a2}/dbos/_migrations/script.py.mako +0 -0
  33. {dbos-1.4.1 → dbos-1.5.0a2}/dbos/_migrations/versions/04ca4f231047_workflow_queues_executor_id.py +0 -0
  34. {dbos-1.4.1 → dbos-1.5.0a2}/dbos/_migrations/versions/27ac6900c6ad_add_queue_dedup.py +0 -0
  35. {dbos-1.4.1 → dbos-1.5.0a2}/dbos/_migrations/versions/50f3227f0b4b_fix_job_queue.py +0 -0
  36. {dbos-1.4.1 → dbos-1.5.0a2}/dbos/_migrations/versions/5c361fc04708_added_system_tables.py +0 -0
  37. {dbos-1.4.1 → dbos-1.5.0a2}/dbos/_migrations/versions/66478e1b95e5_consolidate_queues.py +0 -0
  38. {dbos-1.4.1 → dbos-1.5.0a2}/dbos/_migrations/versions/83f3732ae8e7_workflow_timeout.py +0 -0
  39. {dbos-1.4.1 → dbos-1.5.0a2}/dbos/_migrations/versions/933e86bdac6a_add_queue_priority.py +0 -0
  40. {dbos-1.4.1 → dbos-1.5.0a2}/dbos/_migrations/versions/a3b18ad34abe_added_triggers.py +0 -0
  41. {dbos-1.4.1 → dbos-1.5.0a2}/dbos/_migrations/versions/d76646551a6b_job_queue_limiter.py +0 -0
  42. {dbos-1.4.1 → dbos-1.5.0a2}/dbos/_migrations/versions/d76646551a6c_workflow_queue.py +0 -0
  43. {dbos-1.4.1 → dbos-1.5.0a2}/dbos/_migrations/versions/d994145b47b6_consolidate_inputs.py +0 -0
  44. {dbos-1.4.1 → dbos-1.5.0a2}/dbos/_migrations/versions/eab0cc1d9a14_job_queue.py +0 -0
  45. {dbos-1.4.1 → dbos-1.5.0a2}/dbos/_migrations/versions/f4b9b32ba814_functionname_childid_op_outputs.py +0 -0
  46. {dbos-1.4.1 → dbos-1.5.0a2}/dbos/_outcome.py +0 -0
  47. {dbos-1.4.1 → dbos-1.5.0a2}/dbos/_queue.py +0 -0
  48. {dbos-1.4.1 → dbos-1.5.0a2}/dbos/_recovery.py +0 -0
  49. {dbos-1.4.1 → dbos-1.5.0a2}/dbos/_registrations.py +0 -0
  50. {dbos-1.4.1 → dbos-1.5.0a2}/dbos/_roles.py +0 -0
  51. {dbos-1.4.1 → dbos-1.5.0a2}/dbos/_scheduler.py +0 -0
  52. {dbos-1.4.1 → dbos-1.5.0a2}/dbos/_schemas/__init__.py +0 -0
  53. {dbos-1.4.1 → dbos-1.5.0a2}/dbos/_schemas/application_database.py +0 -0
  54. {dbos-1.4.1 → dbos-1.5.0a2}/dbos/_schemas/system_database.py +0 -0
  55. {dbos-1.4.1 → dbos-1.5.0a2}/dbos/_serialization.py +0 -0
  56. {dbos-1.4.1 → dbos-1.5.0a2}/dbos/_templates/dbos-db-starter/README.md +0 -0
  57. {dbos-1.4.1 → dbos-1.5.0a2}/dbos/_templates/dbos-db-starter/__package/__init__.py +0 -0
  58. {dbos-1.4.1 → dbos-1.5.0a2}/dbos/_templates/dbos-db-starter/__package/main.py.dbos +0 -0
  59. {dbos-1.4.1 → dbos-1.5.0a2}/dbos/_templates/dbos-db-starter/__package/schema.py +0 -0
  60. {dbos-1.4.1 → dbos-1.5.0a2}/dbos/_templates/dbos-db-starter/alembic.ini +0 -0
  61. {dbos-1.4.1 → dbos-1.5.0a2}/dbos/_templates/dbos-db-starter/dbos-config.yaml.dbos +0 -0
  62. {dbos-1.4.1 → dbos-1.5.0a2}/dbos/_templates/dbos-db-starter/migrations/env.py.dbos +0 -0
  63. {dbos-1.4.1 → dbos-1.5.0a2}/dbos/_templates/dbos-db-starter/migrations/script.py.mako +0 -0
  64. {dbos-1.4.1 → dbos-1.5.0a2}/dbos/_templates/dbos-db-starter/migrations/versions/2024_07_31_180642_init.py +0 -0
  65. {dbos-1.4.1 → dbos-1.5.0a2}/dbos/_templates/dbos-db-starter/start_postgres_docker.py +0 -0
  66. {dbos-1.4.1 → dbos-1.5.0a2}/dbos/_tracer.py +0 -0
  67. {dbos-1.4.1 → dbos-1.5.0a2}/dbos/_utils.py +0 -0
  68. {dbos-1.4.1 → dbos-1.5.0a2}/dbos/cli/_github_init.py +0 -0
  69. {dbos-1.4.1 → dbos-1.5.0a2}/dbos/cli/_template_init.py +0 -0
  70. {dbos-1.4.1 → dbos-1.5.0a2}/dbos/cli/cli.py +0 -0
  71. {dbos-1.4.1 → dbos-1.5.0a2}/dbos/dbos-config.schema.json +0 -0
  72. {dbos-1.4.1 → dbos-1.5.0a2}/dbos/py.typed +0 -0
  73. {dbos-1.4.1 → dbos-1.5.0a2}/tests/__init__.py +0 -0
  74. {dbos-1.4.1 → dbos-1.5.0a2}/tests/atexit_no_ctor.py +0 -0
  75. {dbos-1.4.1 → dbos-1.5.0a2}/tests/atexit_no_launch.py +0 -0
  76. {dbos-1.4.1 → dbos-1.5.0a2}/tests/classdefs.py +0 -0
  77. {dbos-1.4.1 → dbos-1.5.0a2}/tests/client_collateral.py +0 -0
  78. {dbos-1.4.1 → dbos-1.5.0a2}/tests/client_worker.py +0 -0
  79. {dbos-1.4.1 → dbos-1.5.0a2}/tests/conftest.py +0 -0
  80. {dbos-1.4.1 → dbos-1.5.0a2}/tests/dupname_classdefs1.py +0 -0
  81. {dbos-1.4.1 → dbos-1.5.0a2}/tests/dupname_classdefsa.py +0 -0
  82. {dbos-1.4.1 → dbos-1.5.0a2}/tests/more_classdefs.py +0 -0
  83. {dbos-1.4.1 → dbos-1.5.0a2}/tests/queuedworkflow.py +0 -0
  84. {dbos-1.4.1 → dbos-1.5.0a2}/tests/test_async.py +0 -0
  85. {dbos-1.4.1 → dbos-1.5.0a2}/tests/test_classdecorators.py +0 -0
  86. {dbos-1.4.1 → dbos-1.5.0a2}/tests/test_cli.py +0 -0
  87. {dbos-1.4.1 → dbos-1.5.0a2}/tests/test_client.py +0 -0
  88. {dbos-1.4.1 → dbos-1.5.0a2}/tests/test_concurrency.py +0 -0
  89. {dbos-1.4.1 → dbos-1.5.0a2}/tests/test_config.py +0 -0
  90. {dbos-1.4.1 → dbos-1.5.0a2}/tests/test_croniter.py +0 -0
  91. {dbos-1.4.1 → dbos-1.5.0a2}/tests/test_dbos.py +0 -0
  92. {dbos-1.4.1 → dbos-1.5.0a2}/tests/test_debug.py +0 -0
  93. {dbos-1.4.1 → dbos-1.5.0a2}/tests/test_docker_secrets.py +0 -0
  94. {dbos-1.4.1 → dbos-1.5.0a2}/tests/test_failures.py +0 -0
  95. {dbos-1.4.1 → dbos-1.5.0a2}/tests/test_fastapi.py +0 -0
  96. {dbos-1.4.1 → dbos-1.5.0a2}/tests/test_fastapi_roles.py +0 -0
  97. {dbos-1.4.1 → dbos-1.5.0a2}/tests/test_flask.py +0 -0
  98. {dbos-1.4.1 → dbos-1.5.0a2}/tests/test_kafka.py +0 -0
  99. {dbos-1.4.1 → dbos-1.5.0a2}/tests/test_outcome.py +0 -0
  100. {dbos-1.4.1 → dbos-1.5.0a2}/tests/test_package.py +0 -0
  101. {dbos-1.4.1 → dbos-1.5.0a2}/tests/test_queue.py +0 -0
  102. {dbos-1.4.1 → dbos-1.5.0a2}/tests/test_scheduler.py +0 -0
  103. {dbos-1.4.1 → dbos-1.5.0a2}/tests/test_schema_migration.py +0 -0
  104. {dbos-1.4.1 → dbos-1.5.0a2}/tests/test_singleton.py +0 -0
  105. {dbos-1.4.1 → dbos-1.5.0a2}/tests/test_spans.py +0 -0
  106. {dbos-1.4.1 → dbos-1.5.0a2}/tests/test_sqlalchemy.py +0 -0
  107. {dbos-1.4.1 → dbos-1.5.0a2}/tests/test_workflow_introspection.py +0 -0
  108. {dbos-1.4.1 → dbos-1.5.0a2}/version/__init__.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: dbos
3
- Version: 1.4.1
3
+ Version: 1.5.0a2
4
4
  Summary: Ultra-lightweight durable execution in Python
5
5
  Author-Email: "DBOS, Inc." <contact@dbos.dev>
6
6
  License: MIT
@@ -7,6 +7,8 @@ from functools import partial
7
7
  from http.server import BaseHTTPRequestHandler, ThreadingHTTPServer
8
8
  from typing import TYPE_CHECKING, Any, List, Optional, TypedDict
9
9
 
10
+ from dbos._workflow_commands import garbage_collect, global_timeout
11
+
10
12
  from ._context import SetWorkflowID
11
13
  from ._error import DBOSException
12
14
  from ._logger import dbos_logger
@@ -20,6 +22,8 @@ _health_check_path = "/dbos-healthz"
20
22
  _workflow_recovery_path = "/dbos-workflow-recovery"
21
23
  _deactivate_path = "/deactivate"
22
24
  _workflow_queues_metadata_path = "/dbos-workflow-queues-metadata"
25
+ _garbage_collect_path = "/dbos-garbage-collect"
26
+ _global_timeout_path = "/dbos-global-timeout"
23
27
  # /workflows/:workflow_id/cancel
24
28
  # /workflows/:workflow_id/resume
25
29
  # /workflows/:workflow_id/restart
@@ -122,6 +126,23 @@ class AdminRequestHandler(BaseHTTPRequestHandler):
122
126
  self.send_response(200)
123
127
  self._end_headers()
124
128
  self.wfile.write(json.dumps(workflow_ids).encode("utf-8"))
129
+ elif self.path == _garbage_collect_path:
130
+ inputs = json.loads(post_data.decode("utf-8"))
131
+ cutoff_epoch_timestamp_ms = inputs.get("cutoff_epoch_timestamp_ms", None)
132
+ rows_threshold = inputs.get("rows_threshold", None)
133
+ garbage_collect(
134
+ self.dbos,
135
+ cutoff_epoch_timestamp_ms=cutoff_epoch_timestamp_ms,
136
+ rows_threshold=rows_threshold,
137
+ )
138
+ self.send_response(204)
139
+ self._end_headers()
140
+ elif self.path == _global_timeout_path:
141
+ inputs = json.loads(post_data.decode("utf-8"))
142
+ timeout_ms = inputs.get("timeout_ms", None)
143
+ global_timeout(self.dbos, timeout_ms)
144
+ self.send_response(204)
145
+ self._end_headers()
125
146
  else:
126
147
 
127
148
  restart_match = re.match(
@@ -256,3 +256,21 @@ class ApplicationDatabase:
256
256
  )
257
257
 
258
258
  conn.execute(insert_stmt)
259
+
260
+ def garbage_collect(
261
+ self, cutoff_epoch_timestamp_ms: int, pending_workflow_ids: list[str]
262
+ ) -> None:
263
+ with self.engine.begin() as c:
264
+ delete_query = sa.delete(ApplicationSchema.transaction_outputs).where(
265
+ ApplicationSchema.transaction_outputs.c.created_at
266
+ < cutoff_epoch_timestamp_ms
267
+ )
268
+
269
+ if len(pending_workflow_ids) > 0:
270
+ delete_query = delete_query.where(
271
+ ~ApplicationSchema.transaction_outputs.c.workflow_uuid.in_(
272
+ pending_workflow_ids
273
+ )
274
+ )
275
+
276
+ c.execute(delete_query)
@@ -1852,6 +1852,62 @@ class SystemDatabase:
1852
1852
  dbos_logger.error(f"Error connecting to the DBOS system database: {e}")
1853
1853
  raise
1854
1854
 
1855
+ def garbage_collect(
1856
+ self, cutoff_epoch_timestamp_ms: Optional[int], rows_threshold: Optional[int]
1857
+ ) -> Optional[tuple[int, list[str]]]:
1858
+ if rows_threshold is not None:
1859
+ with self.engine.begin() as c:
1860
+ # Get the created_at timestamp of the rows_threshold newest row
1861
+ result = c.execute(
1862
+ sa.select(SystemSchema.workflow_status.c.created_at)
1863
+ .order_by(SystemSchema.workflow_status.c.created_at.desc())
1864
+ .limit(1)
1865
+ .offset(rows_threshold - 1)
1866
+ ).fetchone()
1867
+
1868
+ if result is not None:
1869
+ rows_based_cutoff = result[0]
1870
+ # Use the more restrictive cutoff (higher timestamp = more recent = more deletion)
1871
+ if (
1872
+ cutoff_epoch_timestamp_ms is None
1873
+ or rows_based_cutoff > cutoff_epoch_timestamp_ms
1874
+ ):
1875
+ cutoff_epoch_timestamp_ms = rows_based_cutoff
1876
+
1877
+ if cutoff_epoch_timestamp_ms is None:
1878
+ return None
1879
+
1880
+ with self.engine.begin() as c:
1881
+ # Delete all workflows older than cutoff that are NOT PENDING or ENQUEUED
1882
+ c.execute(
1883
+ sa.delete(SystemSchema.workflow_status)
1884
+ .where(
1885
+ SystemSchema.workflow_status.c.created_at
1886
+ < cutoff_epoch_timestamp_ms
1887
+ )
1888
+ .where(
1889
+ ~SystemSchema.workflow_status.c.status.in_(
1890
+ [
1891
+ WorkflowStatusString.PENDING.value,
1892
+ WorkflowStatusString.ENQUEUED.value,
1893
+ ]
1894
+ )
1895
+ )
1896
+ )
1897
+
1898
+ # Then, get the IDs of all remaining old workflows
1899
+ pending_enqueued_result = c.execute(
1900
+ sa.select(SystemSchema.workflow_status.c.workflow_uuid).where(
1901
+ SystemSchema.workflow_status.c.created_at
1902
+ < cutoff_epoch_timestamp_ms
1903
+ )
1904
+ ).fetchall()
1905
+
1906
+ # Return the final cutoff and workflow IDs
1907
+ return cutoff_epoch_timestamp_ms, [
1908
+ row[0] for row in pending_enqueued_result
1909
+ ]
1910
+
1855
1911
 
1856
1912
  def reset_system_database(postgres_db_url: sa.URL, sysdb_name: str) -> None:
1857
1913
  try:
@@ -1,8 +1,9 @@
1
+ import time
1
2
  import uuid
2
- from typing import List, Optional
3
+ from datetime import datetime
4
+ from typing import TYPE_CHECKING, List, Optional
3
5
 
4
6
  from dbos._context import get_local_dbos_context
5
- from dbos._error import DBOSException
6
7
 
7
8
  from ._app_db import ApplicationDatabase
8
9
  from ._sys_db import (
@@ -11,8 +12,12 @@ from ._sys_db import (
11
12
  StepInfo,
12
13
  SystemDatabase,
13
14
  WorkflowStatus,
15
+ WorkflowStatusString,
14
16
  )
15
17
 
18
+ if TYPE_CHECKING:
19
+ from ._dbos import DBOS
20
+
16
21
 
17
22
  def list_workflows(
18
23
  sys_db: SystemDatabase,
@@ -118,3 +123,32 @@ def fork_workflow(
118
123
  application_version=application_version,
119
124
  )
120
125
  return forked_workflow_id
126
+
127
+
128
+ def garbage_collect(
129
+ dbos: "DBOS",
130
+ cutoff_epoch_timestamp_ms: Optional[int],
131
+ rows_threshold: Optional[int],
132
+ ) -> None:
133
+ if cutoff_epoch_timestamp_ms is None and rows_threshold is None:
134
+ return
135
+ result = dbos._sys_db.garbage_collect(
136
+ cutoff_epoch_timestamp_ms=cutoff_epoch_timestamp_ms,
137
+ rows_threshold=rows_threshold,
138
+ )
139
+ if result is not None:
140
+ cutoff_epoch_timestamp_ms, pending_workflow_ids = result
141
+ dbos._app_db.garbage_collect(cutoff_epoch_timestamp_ms, pending_workflow_ids)
142
+
143
+
144
+ def global_timeout(dbos: "DBOS", timeout_ms: int) -> None:
145
+ cutoff_epoch_timestamp_ms = int(time.time() * 1000) - timeout_ms
146
+ cutoff_iso = datetime.fromtimestamp(cutoff_epoch_timestamp_ms / 1000).isoformat()
147
+ for workflow in dbos.list_workflows(
148
+ status=WorkflowStatusString.PENDING.value, end_time=cutoff_iso
149
+ ):
150
+ dbos.cancel_workflow(workflow.workflow_id)
151
+ for workflow in dbos.list_workflows(
152
+ status=WorkflowStatusString.ENQUEUED.value, end_time=cutoff_iso
153
+ ):
154
+ dbos.cancel_workflow(workflow.workflow_id)
@@ -27,7 +27,7 @@ dependencies = [
27
27
  ]
28
28
  requires-python = ">=3.9"
29
29
  readme = "README.md"
30
- version = "1.4.1"
30
+ version = "1.5.0a2"
31
31
 
32
32
  [project.license]
33
33
  text = "MIT"
@@ -453,3 +453,43 @@ def test_admin_workflow_fork(dbos: DBOS, sys_db: SystemDatabase) -> None:
453
453
  ), f"Expected application version to be {new_version}, but got {handle.get_status().app_version}"
454
454
 
455
455
  assert worked, "Workflow did not finish successfully"
456
+
457
+
458
+ def test_admin_garbage_collect(dbos: DBOS) -> None:
459
+
460
+ @DBOS.workflow()
461
+ def workflow() -> str:
462
+ return DBOS.workflow_id
463
+
464
+ workflow()
465
+
466
+ assert len(DBOS.list_workflows()) == 1
467
+
468
+ response = requests.post(
469
+ f"http://localhost:3001/dbos-garbage-collect",
470
+ json={"cutoff_epoch_timestamp_ms": int(time.time() * 1000)},
471
+ timeout=5,
472
+ )
473
+ response.raise_for_status()
474
+
475
+ assert len(DBOS.list_workflows()) == 0
476
+
477
+
478
+ def test_admin_global_timeout(dbos: DBOS) -> None:
479
+
480
+ @DBOS.workflow()
481
+ def workflow() -> None:
482
+ while True:
483
+ DBOS.sleep(0.1)
484
+
485
+ handle = DBOS.start_workflow(workflow)
486
+ time.sleep(1)
487
+
488
+ response = requests.post(
489
+ f"http://localhost:3001/dbos-global-timeout",
490
+ json={"timeout_ms": 1000},
491
+ timeout=5,
492
+ )
493
+ response.raise_for_status()
494
+ with pytest.raises(DBOSWorkflowCancelledError):
495
+ handle.get_result()
@@ -1,14 +1,18 @@
1
1
  import threading
2
+ import time
2
3
  import uuid
3
4
  from typing import Callable
4
5
 
5
6
  import pytest
7
+ import sqlalchemy as sa
6
8
 
7
9
  # Public API
8
10
  from dbos import DBOS, Queue, SetWorkflowID
9
11
  from dbos._dbos import DBOSConfiguredInstance
10
- from dbos._error import DBOSException, DBOSWorkflowCancelledError
12
+ from dbos._error import DBOSWorkflowCancelledError
13
+ from dbos._schemas.application_database import ApplicationSchema
11
14
  from dbos._utils import INTERNAL_QUEUE_NAME, GlobalParams
15
+ from dbos._workflow_commands import garbage_collect, global_timeout
12
16
  from tests.conftest import queue_entries_are_cleaned_up
13
17
 
14
18
 
@@ -624,3 +628,120 @@ def test_fork_version(
624
628
  GlobalParams.app_version = new_version
625
629
  assert handle.get_result() == output
626
630
  assert queue_entries_are_cleaned_up(dbos)
631
+
632
+
633
+ def test_garbage_collection(dbos: DBOS) -> None:
634
+ event = threading.Event()
635
+
636
+ @DBOS.step()
637
+ def step(x: int) -> int:
638
+ return x
639
+
640
+ @DBOS.transaction()
641
+ def txn(x: int) -> int:
642
+ DBOS.sql_session.execute(sa.text("SELECT 1")).fetchall()
643
+ return x
644
+
645
+ @DBOS.workflow()
646
+ def workflow(x: int) -> int:
647
+ step(x)
648
+ txn(x)
649
+ return x
650
+
651
+ @DBOS.workflow()
652
+ def blocked_workflow() -> str:
653
+ txn(0)
654
+ event.wait()
655
+ return DBOS.workflow_id
656
+
657
+ num_workflows = 10
658
+
659
+ handle = DBOS.start_workflow(blocked_workflow)
660
+ for i in range(num_workflows):
661
+ assert workflow(i) == i
662
+
663
+ # Garbage collect all but one workflow
664
+ garbage_collect(dbos, cutoff_epoch_timestamp_ms=None, rows_threshold=1)
665
+ # Verify two workflows remain: the newest and the blocked workflow
666
+ workflows = DBOS.list_workflows()
667
+ assert len(workflows) == 2
668
+ assert workflows[0].workflow_id == handle.workflow_id
669
+ # Verify txn outputs are preserved only for the remaining workflows
670
+ with dbos._app_db.engine.begin() as c:
671
+ rows = c.execute(
672
+ sa.select(
673
+ ApplicationSchema.transaction_outputs.c.workflow_uuid,
674
+ )
675
+ ).all()
676
+ assert len(rows) == 2
677
+
678
+ # Garbage collect all previous workflows
679
+ garbage_collect(
680
+ dbos, cutoff_epoch_timestamp_ms=int(time.time() * 1000), rows_threshold=None
681
+ )
682
+ # Verify only the blocked workflow remains
683
+ workflows = DBOS.list_workflows()
684
+ assert len(workflows) == 1
685
+ assert workflows[0].workflow_id == handle.workflow_id
686
+ # Verify txn outputs are preserved only for the remaining workflow
687
+ with dbos._app_db.engine.begin() as c:
688
+ rows = c.execute(
689
+ sa.select(
690
+ ApplicationSchema.transaction_outputs.c.workflow_uuid,
691
+ )
692
+ ).all()
693
+ assert len(rows) == 1
694
+
695
+ # Finish the blocked workflow, garbage collect everything
696
+ event.set()
697
+ assert handle.get_result() is not None
698
+ garbage_collect(
699
+ dbos, cutoff_epoch_timestamp_ms=int(time.time() * 1000), rows_threshold=None
700
+ )
701
+ # Verify only the blocked workflow remains
702
+ workflows = DBOS.list_workflows()
703
+ assert len(workflows) == 0
704
+
705
+ # Verify GC runs without error on a blank table
706
+ garbage_collect(dbos, cutoff_epoch_timestamp_ms=None, rows_threshold=1)
707
+
708
+ # Run workflows, wait, run them again
709
+ for i in range(num_workflows):
710
+ assert workflow(i) == i
711
+ time.sleep(1)
712
+ for i in range(num_workflows):
713
+ assert workflow(i) == i
714
+
715
+ # GC the first half, verify only half were GC'ed
716
+ garbage_collect(
717
+ dbos,
718
+ cutoff_epoch_timestamp_ms=int(time.time() * 1000) - 1000,
719
+ rows_threshold=None,
720
+ )
721
+ workflows = DBOS.list_workflows()
722
+ assert len(workflows) == num_workflows
723
+
724
+
725
+ def test_global_timeout(dbos: DBOS) -> None:
726
+ event = threading.Event()
727
+
728
+ @DBOS.workflow()
729
+ def blocked_workflow() -> str:
730
+ while not event.wait(0):
731
+ DBOS.sleep(0.1)
732
+ return DBOS.workflow_id
733
+
734
+ num_workflows = 10
735
+ handles = [DBOS.start_workflow(blocked_workflow) for _ in range(num_workflows)]
736
+
737
+ # Wait one second, start one final workflow, then timeout all workflows started more than one second ago
738
+ time.sleep(1)
739
+ final_handle = DBOS.start_workflow(blocked_workflow)
740
+ global_timeout(dbos, 1000)
741
+
742
+ # Verify all workflows started before the global timeout are cancelled
743
+ for handle in handles:
744
+ with pytest.raises(DBOSWorkflowCancelledError):
745
+ handle.get_result()
746
+ event.set()
747
+ final_handle.get_result() is not None
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes