dbos 0.22.0a2__tar.gz → 0.22.0a5__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of dbos might be problematic. Click here for more details.

Files changed (90) hide show
  1. {dbos-0.22.0a2 → dbos-0.22.0a5}/PKG-INFO +1 -1
  2. {dbos-0.22.0a2 → dbos-0.22.0a5}/dbos/_db_wizard.py +9 -1
  3. {dbos-0.22.0a2 → dbos-0.22.0a5}/dbos/_dbos.py +4 -0
  4. {dbos-0.22.0a2 → dbos-0.22.0a5}/dbos/_queue.py +3 -1
  5. {dbos-0.22.0a2 → dbos-0.22.0a5}/dbos/_sys_db.py +69 -16
  6. {dbos-0.22.0a2 → dbos-0.22.0a5}/pyproject.toml +1 -1
  7. {dbos-0.22.0a2 → dbos-0.22.0a5}/tests/test_config.py +23 -0
  8. {dbos-0.22.0a2 → dbos-0.22.0a5}/tests/test_queue.py +139 -23
  9. {dbos-0.22.0a2 → dbos-0.22.0a5}/LICENSE +0 -0
  10. {dbos-0.22.0a2 → dbos-0.22.0a5}/README.md +0 -0
  11. {dbos-0.22.0a2 → dbos-0.22.0a5}/dbos/__init__.py +0 -0
  12. {dbos-0.22.0a2 → dbos-0.22.0a5}/dbos/_admin_server.py +0 -0
  13. {dbos-0.22.0a2 → dbos-0.22.0a5}/dbos/_app_db.py +0 -0
  14. {dbos-0.22.0a2 → dbos-0.22.0a5}/dbos/_classproperty.py +0 -0
  15. {dbos-0.22.0a2 → dbos-0.22.0a5}/dbos/_cloudutils/authentication.py +0 -0
  16. {dbos-0.22.0a2 → dbos-0.22.0a5}/dbos/_cloudutils/cloudutils.py +0 -0
  17. {dbos-0.22.0a2 → dbos-0.22.0a5}/dbos/_cloudutils/databases.py +0 -0
  18. {dbos-0.22.0a2 → dbos-0.22.0a5}/dbos/_context.py +0 -0
  19. {dbos-0.22.0a2 → dbos-0.22.0a5}/dbos/_core.py +0 -0
  20. {dbos-0.22.0a2 → dbos-0.22.0a5}/dbos/_croniter.py +0 -0
  21. {dbos-0.22.0a2 → dbos-0.22.0a5}/dbos/_dbos_config.py +0 -0
  22. {dbos-0.22.0a2 → dbos-0.22.0a5}/dbos/_error.py +0 -0
  23. {dbos-0.22.0a2 → dbos-0.22.0a5}/dbos/_fastapi.py +0 -0
  24. {dbos-0.22.0a2 → dbos-0.22.0a5}/dbos/_flask.py +0 -0
  25. {dbos-0.22.0a2 → dbos-0.22.0a5}/dbos/_kafka.py +0 -0
  26. {dbos-0.22.0a2 → dbos-0.22.0a5}/dbos/_kafka_message.py +0 -0
  27. {dbos-0.22.0a2 → dbos-0.22.0a5}/dbos/_logger.py +0 -0
  28. {dbos-0.22.0a2 → dbos-0.22.0a5}/dbos/_migrations/env.py +0 -0
  29. {dbos-0.22.0a2 → dbos-0.22.0a5}/dbos/_migrations/script.py.mako +0 -0
  30. {dbos-0.22.0a2 → dbos-0.22.0a5}/dbos/_migrations/versions/04ca4f231047_workflow_queues_executor_id.py +0 -0
  31. {dbos-0.22.0a2 → dbos-0.22.0a5}/dbos/_migrations/versions/50f3227f0b4b_fix_job_queue.py +0 -0
  32. {dbos-0.22.0a2 → dbos-0.22.0a5}/dbos/_migrations/versions/5c361fc04708_added_system_tables.py +0 -0
  33. {dbos-0.22.0a2 → dbos-0.22.0a5}/dbos/_migrations/versions/a3b18ad34abe_added_triggers.py +0 -0
  34. {dbos-0.22.0a2 → dbos-0.22.0a5}/dbos/_migrations/versions/d76646551a6b_job_queue_limiter.py +0 -0
  35. {dbos-0.22.0a2 → dbos-0.22.0a5}/dbos/_migrations/versions/d76646551a6c_workflow_queue.py +0 -0
  36. {dbos-0.22.0a2 → dbos-0.22.0a5}/dbos/_migrations/versions/eab0cc1d9a14_job_queue.py +0 -0
  37. {dbos-0.22.0a2 → dbos-0.22.0a5}/dbos/_outcome.py +0 -0
  38. {dbos-0.22.0a2 → dbos-0.22.0a5}/dbos/_recovery.py +0 -0
  39. {dbos-0.22.0a2 → dbos-0.22.0a5}/dbos/_registrations.py +0 -0
  40. {dbos-0.22.0a2 → dbos-0.22.0a5}/dbos/_request.py +0 -0
  41. {dbos-0.22.0a2 → dbos-0.22.0a5}/dbos/_roles.py +0 -0
  42. {dbos-0.22.0a2 → dbos-0.22.0a5}/dbos/_scheduler.py +0 -0
  43. {dbos-0.22.0a2 → dbos-0.22.0a5}/dbos/_schemas/__init__.py +0 -0
  44. {dbos-0.22.0a2 → dbos-0.22.0a5}/dbos/_schemas/application_database.py +0 -0
  45. {dbos-0.22.0a2 → dbos-0.22.0a5}/dbos/_schemas/system_database.py +0 -0
  46. {dbos-0.22.0a2 → dbos-0.22.0a5}/dbos/_serialization.py +0 -0
  47. {dbos-0.22.0a2 → dbos-0.22.0a5}/dbos/_templates/dbos-db-starter/README.md +0 -0
  48. {dbos-0.22.0a2 → dbos-0.22.0a5}/dbos/_templates/dbos-db-starter/__package/__init__.py +0 -0
  49. {dbos-0.22.0a2 → dbos-0.22.0a5}/dbos/_templates/dbos-db-starter/__package/main.py +0 -0
  50. {dbos-0.22.0a2 → dbos-0.22.0a5}/dbos/_templates/dbos-db-starter/__package/schema.py +0 -0
  51. {dbos-0.22.0a2 → dbos-0.22.0a5}/dbos/_templates/dbos-db-starter/alembic.ini +0 -0
  52. {dbos-0.22.0a2 → dbos-0.22.0a5}/dbos/_templates/dbos-db-starter/dbos-config.yaml.dbos +0 -0
  53. {dbos-0.22.0a2 → dbos-0.22.0a5}/dbos/_templates/dbos-db-starter/migrations/env.py.dbos +0 -0
  54. {dbos-0.22.0a2 → dbos-0.22.0a5}/dbos/_templates/dbos-db-starter/migrations/script.py.mako +0 -0
  55. {dbos-0.22.0a2 → dbos-0.22.0a5}/dbos/_templates/dbos-db-starter/migrations/versions/2024_07_31_180642_init.py +0 -0
  56. {dbos-0.22.0a2 → dbos-0.22.0a5}/dbos/_templates/dbos-db-starter/start_postgres_docker.py +0 -0
  57. {dbos-0.22.0a2 → dbos-0.22.0a5}/dbos/_tracer.py +0 -0
  58. {dbos-0.22.0a2 → dbos-0.22.0a5}/dbos/_workflow_commands.py +0 -0
  59. {dbos-0.22.0a2 → dbos-0.22.0a5}/dbos/cli/_github_init.py +0 -0
  60. {dbos-0.22.0a2 → dbos-0.22.0a5}/dbos/cli/_template_init.py +0 -0
  61. {dbos-0.22.0a2 → dbos-0.22.0a5}/dbos/cli/cli.py +0 -0
  62. {dbos-0.22.0a2 → dbos-0.22.0a5}/dbos/dbos-config.schema.json +0 -0
  63. {dbos-0.22.0a2 → dbos-0.22.0a5}/dbos/py.typed +0 -0
  64. {dbos-0.22.0a2 → dbos-0.22.0a5}/tests/__init__.py +0 -0
  65. {dbos-0.22.0a2 → dbos-0.22.0a5}/tests/atexit_no_ctor.py +0 -0
  66. {dbos-0.22.0a2 → dbos-0.22.0a5}/tests/atexit_no_launch.py +0 -0
  67. {dbos-0.22.0a2 → dbos-0.22.0a5}/tests/classdefs.py +0 -0
  68. {dbos-0.22.0a2 → dbos-0.22.0a5}/tests/conftest.py +0 -0
  69. {dbos-0.22.0a2 → dbos-0.22.0a5}/tests/more_classdefs.py +0 -0
  70. {dbos-0.22.0a2 → dbos-0.22.0a5}/tests/queuedworkflow.py +0 -0
  71. {dbos-0.22.0a2 → dbos-0.22.0a5}/tests/test_admin_server.py +0 -0
  72. {dbos-0.22.0a2 → dbos-0.22.0a5}/tests/test_async.py +0 -0
  73. {dbos-0.22.0a2 → dbos-0.22.0a5}/tests/test_classdecorators.py +0 -0
  74. {dbos-0.22.0a2 → dbos-0.22.0a5}/tests/test_concurrency.py +0 -0
  75. {dbos-0.22.0a2 → dbos-0.22.0a5}/tests/test_croniter.py +0 -0
  76. {dbos-0.22.0a2 → dbos-0.22.0a5}/tests/test_dbos.py +0 -0
  77. {dbos-0.22.0a2 → dbos-0.22.0a5}/tests/test_failures.py +0 -0
  78. {dbos-0.22.0a2 → dbos-0.22.0a5}/tests/test_fastapi.py +0 -0
  79. {dbos-0.22.0a2 → dbos-0.22.0a5}/tests/test_fastapi_roles.py +0 -0
  80. {dbos-0.22.0a2 → dbos-0.22.0a5}/tests/test_flask.py +0 -0
  81. {dbos-0.22.0a2 → dbos-0.22.0a5}/tests/test_kafka.py +0 -0
  82. {dbos-0.22.0a2 → dbos-0.22.0a5}/tests/test_outcome.py +0 -0
  83. {dbos-0.22.0a2 → dbos-0.22.0a5}/tests/test_package.py +0 -0
  84. {dbos-0.22.0a2 → dbos-0.22.0a5}/tests/test_scheduler.py +0 -0
  85. {dbos-0.22.0a2 → dbos-0.22.0a5}/tests/test_schema_migration.py +0 -0
  86. {dbos-0.22.0a2 → dbos-0.22.0a5}/tests/test_singleton.py +0 -0
  87. {dbos-0.22.0a2 → dbos-0.22.0a5}/tests/test_spans.py +0 -0
  88. {dbos-0.22.0a2 → dbos-0.22.0a5}/tests/test_sqlalchemy.py +0 -0
  89. {dbos-0.22.0a2 → dbos-0.22.0a5}/tests/test_workflow_cmds.py +0 -0
  90. {dbos-0.22.0a2 → dbos-0.22.0a5}/version/__init__.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: dbos
3
- Version: 0.22.0a2
3
+ Version: 0.22.0a5
4
4
  Summary: Ultra-lightweight durable execution in Python
5
5
  Author-Email: "DBOS, Inc." <contact@dbos.dev>
6
6
  License: MIT
@@ -45,8 +45,16 @@ def db_wizard(config: "ConfigFile", config_file_path: str) -> "ConfigFile":
45
45
  f"Could not connect to Postgres: password authentication failed: {db_connection_error}"
46
46
  )
47
47
  db_config = config["database"]
48
+
49
+ # Read the config file and check if the database hostname/port/username are set. If so, skip the wizard.
50
+ with open(config_file_path, "r") as file:
51
+ content = file.read()
52
+ local_config = yaml.safe_load(content)
48
53
  if (
49
- db_config["hostname"] != "localhost"
54
+ local_config["database"]["hostname"]
55
+ or local_config["database"]["port"]
56
+ or local_config["database"]["username"]
57
+ or db_config["hostname"] != "localhost"
50
58
  or db_config["port"] != 5432
51
59
  or db_config["username"] != "postgres"
52
60
  ):
@@ -997,6 +997,10 @@ def _dbos_exit_hook() -> None:
997
997
  )
998
998
  return
999
999
  if not _dbos_global_instance._launched:
1000
+ if _dbos_global_instance.fastapi is not None:
1001
+ # FastAPI lifespan middleware will call launch/destroy, so we can ignore this.
1002
+ # This is likely to happen during fastapi dev runs, where the reloader loads the module multiple times.
1003
+ return
1000
1004
  print("DBOS exiting; DBOS exists but launch() was not called")
1001
1005
  dbos_logger.warning("DBOS exiting; DBOS exists but launch() was not called")
1002
1006
  return
@@ -76,7 +76,9 @@ def queue_thread(stop_event: threading.Event, dbos: "DBOS") -> None:
76
76
  execute_workflow_by_id(dbos, id)
77
77
  except OperationalError as e:
78
78
  # Ignore serialization error
79
- if not isinstance(e.orig, errors.SerializationFailure):
79
+ if not isinstance(
80
+ e.orig, (errors.SerializationFailure, errors.LockNotAvailable)
81
+ ):
80
82
  dbos.logger.warning(
81
83
  f"Exception encountered in queue thread: {traceback.format_exc()}"
82
84
  )
@@ -189,6 +189,10 @@ class SystemDatabase:
189
189
  host=config["database"]["hostname"],
190
190
  port=config["database"]["port"],
191
191
  database="postgres",
192
+ # fills the "application_name" column in pg_stat_activity
193
+ query={
194
+ "application_name": f"dbos_transact_{os.environ.get('DBOS__VMID', 'local')}_{os.environ.get('DBOS__APPVERSION', '')}"
195
+ },
192
196
  )
193
197
  engine = sa.create_engine(postgres_db_url)
194
198
  with engine.connect() as conn:
@@ -207,6 +211,10 @@ class SystemDatabase:
207
211
  host=config["database"]["hostname"],
208
212
  port=config["database"]["port"],
209
213
  database=sysdb_name,
214
+ # fills the "application_name" column in pg_stat_activity
215
+ query={
216
+ "application_name": f"dbos_transact_{os.environ.get('DBOS__VMID', 'local')}_{os.environ.get('DBOS__APPVERSION', '')}"
217
+ },
210
218
  )
211
219
 
212
220
  # Create a connection pool for the system database
@@ -1307,6 +1315,55 @@ class SystemDatabase:
1307
1315
  # Dequeue functions eligible for this worker and ordered by the time at which they were enqueued.
1308
1316
  # If there is a global or local concurrency limit N, select only the N oldest enqueued
1309
1317
  # functions, else select all of them.
1318
+
1319
+ # First lets figure out how many tasks the worker can dequeue
1320
+ running_tasks_query = (
1321
+ sa.select(
1322
+ SystemSchema.workflow_queue.c.executor_id,
1323
+ sa.func.count().label("task_count"),
1324
+ )
1325
+ .where(SystemSchema.workflow_queue.c.queue_name == queue.name)
1326
+ .where(
1327
+ SystemSchema.workflow_queue.c.executor_id.isnot(
1328
+ None
1329
+ ) # Task is dequeued
1330
+ )
1331
+ .where(
1332
+ SystemSchema.workflow_queue.c.completed_at_epoch_ms.is_(
1333
+ None
1334
+ ) # Task is not completed
1335
+ )
1336
+ .group_by(SystemSchema.workflow_queue.c.executor_id)
1337
+ )
1338
+ running_tasks_result = c.execute(running_tasks_query).fetchall()
1339
+ running_tasks_result_dict = {row[0]: row[1] for row in running_tasks_result}
1340
+ running_tasks_for_this_worker = running_tasks_result_dict.get(
1341
+ executor_id, 0
1342
+ ) # Get count for current executor
1343
+
1344
+ max_tasks = float("inf")
1345
+ if queue.worker_concurrency is not None:
1346
+ # Worker local concurrency limit should always be >= running_tasks_for_this_worker
1347
+ # This should never happen but a check + warning doesn't hurt
1348
+ if running_tasks_for_this_worker > queue.worker_concurrency:
1349
+ dbos_logger.warning(
1350
+ f"Number of tasks on this worker ({running_tasks_for_this_worker}) exceeds the worker concurrency limit ({queue.worker_concurrency})"
1351
+ )
1352
+ max_tasks = max(
1353
+ 0, queue.worker_concurrency - running_tasks_for_this_worker
1354
+ )
1355
+ if queue.concurrency is not None:
1356
+ total_running_tasks = sum(running_tasks_result_dict.values())
1357
+ # Queue global concurrency limit should always be >= running_tasks_count
1358
+ # This should never happen but a check + warning doesn't hurt
1359
+ if total_running_tasks > queue.concurrency:
1360
+ dbos_logger.warning(
1361
+ f"Total running tasks ({total_running_tasks}) exceeds the global concurrency limit ({queue.concurrency})"
1362
+ )
1363
+ available_tasks = max(0, queue.concurrency - total_running_tasks)
1364
+ max_tasks = min(max_tasks, available_tasks)
1365
+
1366
+ # Lookup tasks
1310
1367
  query = (
1311
1368
  sa.select(
1312
1369
  SystemSchema.workflow_queue.c.workflow_uuid,
@@ -1315,29 +1372,25 @@ class SystemDatabase:
1315
1372
  )
1316
1373
  .where(SystemSchema.workflow_queue.c.queue_name == queue.name)
1317
1374
  .where(SystemSchema.workflow_queue.c.completed_at_epoch_ms == None)
1318
- .where(
1319
- # Only select functions that have not been started yet or have been started by this worker
1320
- or_(
1321
- SystemSchema.workflow_queue.c.executor_id == None,
1322
- SystemSchema.workflow_queue.c.executor_id == executor_id,
1323
- )
1324
- )
1375
+ .where(SystemSchema.workflow_queue.c.executor_id == None)
1325
1376
  .order_by(SystemSchema.workflow_queue.c.created_at_epoch_ms.asc())
1377
+ .with_for_update(nowait=True) # Error out early
1326
1378
  )
1327
- # Set a dequeue limit if necessary
1328
- if queue.worker_concurrency is not None:
1329
- query = query.limit(queue.worker_concurrency)
1330
- elif queue.concurrency is not None:
1331
- query = query.limit(queue.concurrency)
1379
+ # Apply limit only if max_tasks is finite
1380
+ if max_tasks != float("inf"):
1381
+ query = query.limit(int(max_tasks))
1332
1382
 
1333
1383
  rows = c.execute(query).fetchall()
1334
1384
 
1335
- # Now, get the workflow IDs of functions that have not yet been started
1336
- dequeued_ids: List[str] = [row[0] for row in rows if row[1] is None]
1385
+ # Get the workflow IDs
1386
+ dequeued_ids: List[str] = [row[0] for row in rows]
1387
+ if len(dequeued_ids) > 0:
1388
+ dbos_logger.debug(
1389
+ f"[{queue.name}] dequeueing {len(dequeued_ids)} task(s)"
1390
+ )
1337
1391
  ret_ids: list[str] = []
1338
- dbos_logger.debug(f"[{queue.name}] dequeueing {len(dequeued_ids)} task(s)")
1339
- for id in dequeued_ids:
1340
1392
 
1393
+ for id in dequeued_ids:
1341
1394
  # If we have a limiter, stop starting functions when the number
1342
1395
  # of functions started this period exceeds the limit.
1343
1396
  if queue.limiter is not None:
@@ -27,7 +27,7 @@ dependencies = [
27
27
  ]
28
28
  requires-python = ">=3.9"
29
29
  readme = "README.md"
30
- version = "0.22.0a2"
30
+ version = "0.22.0a5"
31
31
 
32
32
  [project.license]
33
33
  text = "MIT"
@@ -399,3 +399,26 @@ def test_db_connect_failed(mocker):
399
399
  load_config(mock_filename)
400
400
 
401
401
  assert "Could not connect to the database" in str(exc_info.value)
402
+
403
+
404
+ def test_no_db_wizard(mocker):
405
+ mock_config = """
406
+ name: "some-app"
407
+ language: "python"
408
+ runtimeConfig:
409
+ start:
410
+ - "python3 main.py"
411
+ database:
412
+ hostname: 'localhost'
413
+ port: 5432
414
+ username: 'postgres'
415
+ password: 'somerandom'
416
+
417
+ """
418
+ mocker.patch(
419
+ "builtins.open", side_effect=generate_mock_open(mock_filename, mock_config)
420
+ )
421
+
422
+ with pytest.raises(DBOSInitializationError) as exc_info:
423
+ load_config(mock_filename)
424
+ assert "Could not connect" in str(exc_info.value)
@@ -1,10 +1,11 @@
1
1
  import logging
2
+ import multiprocessing
3
+ import multiprocessing.synchronize
2
4
  import os
3
5
  import subprocess
4
6
  import threading
5
7
  import time
6
8
  import uuid
7
- from multiprocessing import Process
8
9
 
9
10
  import pytest
10
11
  import sqlalchemy as sa
@@ -362,11 +363,6 @@ def test_queue_workflow_in_recovered_workflow(dbos: DBOS) -> None:
362
363
  return
363
364
 
364
365
 
365
- ###########################
366
- # TEST WORKER CONCURRENCY #
367
- ###########################
368
-
369
-
370
366
  def test_one_at_a_time_with_worker_concurrency(dbos: DBOS) -> None:
371
367
  wf_counter = 0
372
368
  flag = False
@@ -406,12 +402,25 @@ def test_one_at_a_time_with_worker_concurrency(dbos: DBOS) -> None:
406
402
 
407
403
 
408
404
  # Declare a workflow globally (we need it to be registered across process under a known name)
405
+ start_event = threading.Event()
406
+ end_event = threading.Event()
407
+
408
+
409
409
  @DBOS.workflow()
410
410
  def worker_concurrency_test_workflow() -> None:
411
- pass
411
+ start_event.set()
412
+ end_event.wait()
412
413
 
413
414
 
414
- def run_dbos_test_in_process(i: int) -> None:
415
+ local_concurrency_limit: int = 5
416
+ global_concurrency_limit: int = local_concurrency_limit * 2
417
+
418
+
419
+ def run_dbos_test_in_process(
420
+ i: int,
421
+ start_signal: multiprocessing.synchronize.Event,
422
+ end_signal: multiprocessing.synchronize.Event,
423
+ ) -> None:
415
424
  dbos_config: ConfigFile = {
416
425
  "name": "test-app",
417
426
  "language": "python",
@@ -428,39 +437,144 @@ def run_dbos_test_in_process(i: int) -> None:
428
437
  },
429
438
  "telemetry": {},
430
439
  "env": {},
440
+ "application": {},
431
441
  }
432
442
  dbos = DBOS(config=dbos_config)
433
443
  DBOS.launch()
434
444
 
435
- Queue("test_queue", worker_concurrency=1)
436
- time.sleep(
437
- 2
438
- ) # Give some time for the parent worker to enqueue and for this worker to dequeue
439
-
445
+ Queue(
446
+ "test_queue",
447
+ worker_concurrency=local_concurrency_limit,
448
+ concurrency=global_concurrency_limit,
449
+ )
450
+ # Wait to dequeue as many tasks as we can locally
451
+ for _ in range(0, local_concurrency_limit):
452
+ start_event.wait()
453
+ start_event.clear()
454
+ # Signal the parent process we've dequeued
455
+ start_signal.set()
456
+ # Wait for the parent process to signal we can move on
457
+ end_signal.wait()
458
+ # Complete the task. 1 set should unblock them all
459
+ end_event.set()
460
+
461
+ # Now whatever is in the queue should be cleared up fast (start/end events are already set)
440
462
  queue_entries_are_cleaned_up(dbos)
441
463
 
442
- DBOS.destroy()
443
-
444
464
 
465
+ # Test global concurrency and worker utilization by carefully filling the queue up to 1) the local limit 2) the global limit
466
+ # For the global limit, we fill the queue in 2 steps, ensuring that the 2nd worker is able to cap its local utilization even
467
+ # after having dequeued some tasks already
445
468
  def test_worker_concurrency_with_n_dbos_instances(dbos: DBOS) -> None:
469
+ # Ensure children processes do not share global variables (including DBOS instance) with the parent
470
+ multiprocessing.set_start_method("spawn")
471
+
472
+ queue = Queue(
473
+ "test_queue", limiter={"limit": 0, "period": 1}
474
+ ) # This process cannot dequeue tasks
446
475
 
447
- # Start N proccesses to dequeue
476
+ # First, start local concurrency limit tasks
477
+ handles = []
478
+ for _ in range(0, local_concurrency_limit):
479
+ handles.append(queue.enqueue(worker_concurrency_test_workflow))
480
+
481
+ # Start 2 workers
448
482
  processes = []
449
- for i in range(0, 10):
483
+ start_signals = []
484
+ end_signals = []
485
+ manager = multiprocessing.Manager()
486
+ for i in range(0, 2):
450
487
  os.environ["DBOS__VMID"] = f"test-executor-{i}"
451
- process = Process(target=run_dbos_test_in_process, args=(i,))
488
+ start_signal = manager.Event()
489
+ start_signals.append(start_signal)
490
+ end_signal = manager.Event()
491
+ end_signals.append(end_signal)
492
+ process = multiprocessing.Process(
493
+ target=run_dbos_test_in_process, args=(i, start_signal, end_signal)
494
+ )
452
495
  process.start()
453
496
  processes.append(process)
497
+ del os.environ["DBOS__VMID"]
498
+
499
+ # Check that a single worker was able to acquire all the tasks
500
+ loop = True
501
+ while loop:
502
+ for signal in start_signals:
503
+ signal.wait(timeout=1)
504
+ if signal.is_set():
505
+ loop = False
506
+ executors = []
507
+ for handle in handles:
508
+ status = handle.get_status()
509
+ assert status.status == WorkflowStatusString.PENDING.value
510
+ executors.append(status.executor_id)
511
+ assert len(set(executors)) == 1
512
+
513
+ # Now enqueue less than the local concurrency limit. Check that the 2nd worker acquired them. We won't have a signal set from the worker so we need to sleep a little.
514
+ handles = []
515
+ for _ in range(0, local_concurrency_limit - 1):
516
+ handles.append(queue.enqueue(worker_concurrency_test_workflow))
517
+ time.sleep(2)
518
+ executors = []
519
+ for handle in handles:
520
+ status = handle.get_status()
521
+ assert status.status == WorkflowStatusString.PENDING.value
522
+ executors.append(status.executor_id)
523
+ assert len(set(executors)) == 1
524
+
525
+ # Now, enqueue two more tasks. This means qlen > local concurrency limit * 2 and qlen > global concurrency limit
526
+ # We should have 1 tasks PENDING and 1 ENQUEUED, thus meeting both local and global concurrency limits
527
+ handles = []
528
+ for _ in range(0, 2):
529
+ handles.append(queue.enqueue(worker_concurrency_test_workflow))
530
+ # we can check the signal because the 2nd executor will set it
531
+ num_dequeued = 0
532
+ while num_dequeued < 2:
533
+ for signal in start_signals:
534
+ signal.wait(timeout=1)
535
+ if signal.is_set():
536
+ num_dequeued += 1
537
+ executors = []
538
+ statuses = []
539
+ for handle in handles:
540
+ status = handle.get_status()
541
+ statuses.append(status.status)
542
+ executors.append(status.executor_id)
543
+ assert set(statuses) == {
544
+ WorkflowStatusString.PENDING.value,
545
+ WorkflowStatusString.ENQUEUED.value,
546
+ }
547
+ assert len(set(executors)) == 2
548
+ assert "local" in executors
549
+
550
+ # Now check in the DB that global concurrency is met
551
+ with dbos._sys_db.engine.begin() as conn:
552
+ query = (
553
+ sa.select(sa.func.count())
554
+ .select_from(SystemSchema.workflow_status)
555
+ .where(
556
+ SystemSchema.workflow_status.c.status
557
+ == WorkflowStatusString.PENDING.value
558
+ )
559
+ )
560
+ row = conn.execute(query).fetchone()
454
561
 
455
- # Enqueue N tasks but ensure this worker cannot dequeue
562
+ assert row is not None, "Query returned no results"
563
+ count = row[0]
564
+ assert (
565
+ count == global_concurrency_limit
566
+ ), f"Expected {global_concurrency_limit} workflows, found {count}"
456
567
 
457
- queue = Queue("test_queue", limiter={"limit": 0, "period": 1})
458
- for i in range(0, 10):
459
- queue.enqueue(worker_concurrency_test_workflow)
568
+ # Signal the workers they can move on
569
+ for signal in end_signals:
570
+ signal.set()
460
571
 
461
572
  for process in processes:
462
573
  process.join()
463
574
 
575
+ # Verify all queue entries eventually get cleaned up.
576
+ assert queue_entries_are_cleaned_up(dbos)
577
+
464
578
 
465
579
  # Test error cases where we have duplicated workflows starting with the same workflow ID.
466
580
  def test_duplicate_workflow_id(dbos: DBOS, caplog: pytest.LogCaptureFixture) -> None:
@@ -644,7 +758,9 @@ def test_queue_concurrency_under_recovery(dbos: DBOS) -> None:
644
758
  def noop() -> None:
645
759
  pass
646
760
 
647
- queue = Queue("test_queue", concurrency=2)
761
+ queue = Queue(
762
+ "test_queue", worker_concurrency=2
763
+ ) # covers global concurrency limit because we have a single process
648
764
  handle1 = queue.enqueue(blocked_workflow, 0)
649
765
  handle2 = queue.enqueue(blocked_workflow, 1)
650
766
  handle3 = queue.enqueue(noop)
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes