dbos 1.1.0a4__tar.gz → 1.2.0a4__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (106) hide show
  1. {dbos-1.1.0a4 → dbos-1.2.0a4}/PKG-INFO +1 -1
  2. {dbos-1.1.0a4 → dbos-1.2.0a4}/dbos/_admin_server.py +24 -4
  3. {dbos-1.1.0a4 → dbos-1.2.0a4}/dbos/_app_db.py +0 -15
  4. {dbos-1.1.0a4 → dbos-1.2.0a4}/dbos/_core.py +0 -1
  5. {dbos-1.1.0a4 → dbos-1.2.0a4}/dbos/_dbos.py +22 -2
  6. {dbos-1.1.0a4 → dbos-1.2.0a4}/dbos/_dbos_config.py +0 -23
  7. {dbos-1.1.0a4 → dbos-1.2.0a4}/dbos/_event_loop.py +10 -7
  8. {dbos-1.1.0a4 → dbos-1.2.0a4}/dbos/_queue.py +3 -0
  9. {dbos-1.1.0a4 → dbos-1.2.0a4}/dbos/_sys_db.py +37 -61
  10. {dbos-1.1.0a4 → dbos-1.2.0a4}/dbos/_workflow_commands.py +1 -10
  11. {dbos-1.1.0a4 → dbos-1.2.0a4}/pyproject.toml +1 -1
  12. {dbos-1.1.0a4 → dbos-1.2.0a4}/tests/test_admin_server.py +28 -1
  13. {dbos-1.1.0a4 → dbos-1.2.0a4}/tests/test_async.py +42 -1
  14. {dbos-1.1.0a4 → dbos-1.2.0a4}/tests/test_config.py +0 -36
  15. {dbos-1.1.0a4 → dbos-1.2.0a4}/tests/test_workflow_management.py +0 -11
  16. {dbos-1.1.0a4 → dbos-1.2.0a4}/LICENSE +0 -0
  17. {dbos-1.1.0a4 → dbos-1.2.0a4}/README.md +0 -0
  18. {dbos-1.1.0a4 → dbos-1.2.0a4}/dbos/__init__.py +0 -0
  19. {dbos-1.1.0a4 → dbos-1.2.0a4}/dbos/__main__.py +0 -0
  20. {dbos-1.1.0a4 → dbos-1.2.0a4}/dbos/_classproperty.py +0 -0
  21. {dbos-1.1.0a4 → dbos-1.2.0a4}/dbos/_client.py +0 -0
  22. {dbos-1.1.0a4 → dbos-1.2.0a4}/dbos/_conductor/conductor.py +0 -0
  23. {dbos-1.1.0a4 → dbos-1.2.0a4}/dbos/_conductor/protocol.py +0 -0
  24. {dbos-1.1.0a4 → dbos-1.2.0a4}/dbos/_context.py +0 -0
  25. {dbos-1.1.0a4 → dbos-1.2.0a4}/dbos/_croniter.py +0 -0
  26. {dbos-1.1.0a4 → dbos-1.2.0a4}/dbos/_debug.py +0 -0
  27. {dbos-1.1.0a4 → dbos-1.2.0a4}/dbos/_docker_pg_helper.py +0 -0
  28. {dbos-1.1.0a4 → dbos-1.2.0a4}/dbos/_error.py +0 -0
  29. {dbos-1.1.0a4 → dbos-1.2.0a4}/dbos/_fastapi.py +0 -0
  30. {dbos-1.1.0a4 → dbos-1.2.0a4}/dbos/_flask.py +0 -0
  31. {dbos-1.1.0a4 → dbos-1.2.0a4}/dbos/_kafka.py +0 -0
  32. {dbos-1.1.0a4 → dbos-1.2.0a4}/dbos/_kafka_message.py +0 -0
  33. {dbos-1.1.0a4 → dbos-1.2.0a4}/dbos/_logger.py +0 -0
  34. {dbos-1.1.0a4 → dbos-1.2.0a4}/dbos/_migrations/env.py +0 -0
  35. {dbos-1.1.0a4 → dbos-1.2.0a4}/dbos/_migrations/script.py.mako +0 -0
  36. {dbos-1.1.0a4 → dbos-1.2.0a4}/dbos/_migrations/versions/04ca4f231047_workflow_queues_executor_id.py +0 -0
  37. {dbos-1.1.0a4 → dbos-1.2.0a4}/dbos/_migrations/versions/27ac6900c6ad_add_queue_dedup.py +0 -0
  38. {dbos-1.1.0a4 → dbos-1.2.0a4}/dbos/_migrations/versions/50f3227f0b4b_fix_job_queue.py +0 -0
  39. {dbos-1.1.0a4 → dbos-1.2.0a4}/dbos/_migrations/versions/5c361fc04708_added_system_tables.py +0 -0
  40. {dbos-1.1.0a4 → dbos-1.2.0a4}/dbos/_migrations/versions/83f3732ae8e7_workflow_timeout.py +0 -0
  41. {dbos-1.1.0a4 → dbos-1.2.0a4}/dbos/_migrations/versions/933e86bdac6a_add_queue_priority.py +0 -0
  42. {dbos-1.1.0a4 → dbos-1.2.0a4}/dbos/_migrations/versions/a3b18ad34abe_added_triggers.py +0 -0
  43. {dbos-1.1.0a4 → dbos-1.2.0a4}/dbos/_migrations/versions/d76646551a6b_job_queue_limiter.py +0 -0
  44. {dbos-1.1.0a4 → dbos-1.2.0a4}/dbos/_migrations/versions/d76646551a6c_workflow_queue.py +0 -0
  45. {dbos-1.1.0a4 → dbos-1.2.0a4}/dbos/_migrations/versions/eab0cc1d9a14_job_queue.py +0 -0
  46. {dbos-1.1.0a4 → dbos-1.2.0a4}/dbos/_migrations/versions/f4b9b32ba814_functionname_childid_op_outputs.py +0 -0
  47. {dbos-1.1.0a4 → dbos-1.2.0a4}/dbos/_outcome.py +0 -0
  48. {dbos-1.1.0a4 → dbos-1.2.0a4}/dbos/_recovery.py +0 -0
  49. {dbos-1.1.0a4 → dbos-1.2.0a4}/dbos/_registrations.py +0 -0
  50. {dbos-1.1.0a4 → dbos-1.2.0a4}/dbos/_roles.py +0 -0
  51. {dbos-1.1.0a4 → dbos-1.2.0a4}/dbos/_scheduler.py +0 -0
  52. {dbos-1.1.0a4 → dbos-1.2.0a4}/dbos/_schemas/__init__.py +0 -0
  53. {dbos-1.1.0a4 → dbos-1.2.0a4}/dbos/_schemas/application_database.py +0 -0
  54. {dbos-1.1.0a4 → dbos-1.2.0a4}/dbos/_schemas/system_database.py +0 -0
  55. {dbos-1.1.0a4 → dbos-1.2.0a4}/dbos/_serialization.py +0 -0
  56. {dbos-1.1.0a4 → dbos-1.2.0a4}/dbos/_templates/dbos-db-starter/README.md +0 -0
  57. {dbos-1.1.0a4 → dbos-1.2.0a4}/dbos/_templates/dbos-db-starter/__package/__init__.py +0 -0
  58. {dbos-1.1.0a4 → dbos-1.2.0a4}/dbos/_templates/dbos-db-starter/__package/main.py.dbos +0 -0
  59. {dbos-1.1.0a4 → dbos-1.2.0a4}/dbos/_templates/dbos-db-starter/__package/schema.py +0 -0
  60. {dbos-1.1.0a4 → dbos-1.2.0a4}/dbos/_templates/dbos-db-starter/alembic.ini +0 -0
  61. {dbos-1.1.0a4 → dbos-1.2.0a4}/dbos/_templates/dbos-db-starter/dbos-config.yaml.dbos +0 -0
  62. {dbos-1.1.0a4 → dbos-1.2.0a4}/dbos/_templates/dbos-db-starter/migrations/env.py.dbos +0 -0
  63. {dbos-1.1.0a4 → dbos-1.2.0a4}/dbos/_templates/dbos-db-starter/migrations/script.py.mako +0 -0
  64. {dbos-1.1.0a4 → dbos-1.2.0a4}/dbos/_templates/dbos-db-starter/migrations/versions/2024_07_31_180642_init.py +0 -0
  65. {dbos-1.1.0a4 → dbos-1.2.0a4}/dbos/_templates/dbos-db-starter/start_postgres_docker.py +0 -0
  66. {dbos-1.1.0a4 → dbos-1.2.0a4}/dbos/_tracer.py +0 -0
  67. {dbos-1.1.0a4 → dbos-1.2.0a4}/dbos/_utils.py +0 -0
  68. {dbos-1.1.0a4 → dbos-1.2.0a4}/dbos/cli/_github_init.py +0 -0
  69. {dbos-1.1.0a4 → dbos-1.2.0a4}/dbos/cli/_template_init.py +0 -0
  70. {dbos-1.1.0a4 → dbos-1.2.0a4}/dbos/cli/cli.py +0 -0
  71. {dbos-1.1.0a4 → dbos-1.2.0a4}/dbos/dbos-config.schema.json +0 -0
  72. {dbos-1.1.0a4 → dbos-1.2.0a4}/dbos/py.typed +0 -0
  73. {dbos-1.1.0a4 → dbos-1.2.0a4}/tests/__init__.py +0 -0
  74. {dbos-1.1.0a4 → dbos-1.2.0a4}/tests/atexit_no_ctor.py +0 -0
  75. {dbos-1.1.0a4 → dbos-1.2.0a4}/tests/atexit_no_launch.py +0 -0
  76. {dbos-1.1.0a4 → dbos-1.2.0a4}/tests/classdefs.py +0 -0
  77. {dbos-1.1.0a4 → dbos-1.2.0a4}/tests/client_collateral.py +0 -0
  78. {dbos-1.1.0a4 → dbos-1.2.0a4}/tests/client_worker.py +0 -0
  79. {dbos-1.1.0a4 → dbos-1.2.0a4}/tests/conftest.py +0 -0
  80. {dbos-1.1.0a4 → dbos-1.2.0a4}/tests/dupname_classdefs1.py +0 -0
  81. {dbos-1.1.0a4 → dbos-1.2.0a4}/tests/dupname_classdefsa.py +0 -0
  82. {dbos-1.1.0a4 → dbos-1.2.0a4}/tests/more_classdefs.py +0 -0
  83. {dbos-1.1.0a4 → dbos-1.2.0a4}/tests/queuedworkflow.py +0 -0
  84. {dbos-1.1.0a4 → dbos-1.2.0a4}/tests/test_classdecorators.py +0 -0
  85. {dbos-1.1.0a4 → dbos-1.2.0a4}/tests/test_cli.py +0 -0
  86. {dbos-1.1.0a4 → dbos-1.2.0a4}/tests/test_client.py +0 -0
  87. {dbos-1.1.0a4 → dbos-1.2.0a4}/tests/test_concurrency.py +0 -0
  88. {dbos-1.1.0a4 → dbos-1.2.0a4}/tests/test_croniter.py +0 -0
  89. {dbos-1.1.0a4 → dbos-1.2.0a4}/tests/test_dbos.py +0 -0
  90. {dbos-1.1.0a4 → dbos-1.2.0a4}/tests/test_debug.py +0 -0
  91. {dbos-1.1.0a4 → dbos-1.2.0a4}/tests/test_docker_secrets.py +0 -0
  92. {dbos-1.1.0a4 → dbos-1.2.0a4}/tests/test_failures.py +0 -0
  93. {dbos-1.1.0a4 → dbos-1.2.0a4}/tests/test_fastapi.py +0 -0
  94. {dbos-1.1.0a4 → dbos-1.2.0a4}/tests/test_fastapi_roles.py +0 -0
  95. {dbos-1.1.0a4 → dbos-1.2.0a4}/tests/test_flask.py +0 -0
  96. {dbos-1.1.0a4 → dbos-1.2.0a4}/tests/test_kafka.py +0 -0
  97. {dbos-1.1.0a4 → dbos-1.2.0a4}/tests/test_outcome.py +0 -0
  98. {dbos-1.1.0a4 → dbos-1.2.0a4}/tests/test_package.py +0 -0
  99. {dbos-1.1.0a4 → dbos-1.2.0a4}/tests/test_queue.py +0 -0
  100. {dbos-1.1.0a4 → dbos-1.2.0a4}/tests/test_scheduler.py +0 -0
  101. {dbos-1.1.0a4 → dbos-1.2.0a4}/tests/test_schema_migration.py +0 -0
  102. {dbos-1.1.0a4 → dbos-1.2.0a4}/tests/test_singleton.py +0 -0
  103. {dbos-1.1.0a4 → dbos-1.2.0a4}/tests/test_spans.py +0 -0
  104. {dbos-1.1.0a4 → dbos-1.2.0a4}/tests/test_sqlalchemy.py +0 -0
  105. {dbos-1.1.0a4 → dbos-1.2.0a4}/tests/test_workflow_introspection.py +0 -0
  106. {dbos-1.1.0a4 → dbos-1.2.0a4}/version/__init__.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: dbos
3
- Version: 1.1.0a4
3
+ Version: 1.2.0a4
4
4
  Summary: Ultra-lightweight durable execution in Python
5
5
  Author-Email: "DBOS, Inc." <contact@dbos.dev>
6
6
  License: MIT
@@ -5,8 +5,9 @@ import re
5
5
  import threading
6
6
  from functools import partial
7
7
  from http.server import BaseHTTPRequestHandler, ThreadingHTTPServer
8
- from typing import TYPE_CHECKING, Any, List, TypedDict
8
+ from typing import TYPE_CHECKING, Any, List, Optional, TypedDict
9
9
 
10
+ from ._context import SetWorkflowID
10
11
  from ._error import DBOSException
11
12
  from ._logger import dbos_logger
12
13
  from ._recovery import recover_pending_workflows
@@ -141,7 +142,11 @@ class AdminRequestHandler(BaseHTTPRequestHandler):
141
142
  try:
142
143
  data = json.loads(post_data.decode("utf-8"))
143
144
  start_step: int = data.get("start_step", 1)
144
- self._handle_fork(workflow_id, start_step)
145
+ new_workflow_id: Optional[str] = data.get("new_workflow_id")
146
+ application_version: Optional[str] = data.get("application_version")
147
+ self._handle_fork(
148
+ workflow_id, start_step, new_workflow_id, application_version
149
+ )
145
150
  except (json.JSONDecodeError, AttributeError) as e:
146
151
  self.send_response(500)
147
152
  self.send_header("Content-Type", "application/json")
@@ -191,9 +196,24 @@ class AdminRequestHandler(BaseHTTPRequestHandler):
191
196
  self.end_headers()
192
197
  self.wfile.write(response_body)
193
198
 
194
- def _handle_fork(self, workflow_id: str, start_step: int) -> None:
199
+ def _handle_fork(
200
+ self,
201
+ workflow_id: str,
202
+ start_step: int,
203
+ new_workflow_id: Optional[str],
204
+ application_version: Optional[str],
205
+ ) -> None:
195
206
  try:
196
- handle = self.dbos.fork_workflow(workflow_id, start_step)
207
+ print(f"Forking workflow {workflow_id} from step {start_step}")
208
+ if new_workflow_id is not None:
209
+ with SetWorkflowID(new_workflow_id):
210
+ handle = self.dbos.fork_workflow(
211
+ workflow_id, start_step, application_version=application_version
212
+ )
213
+ else:
214
+ handle = self.dbos.fork_workflow(
215
+ workflow_id, start_step, application_version=application_version
216
+ )
197
217
  response_body = json.dumps(
198
218
  {
199
219
  "workflow_id": handle.workflow_id,
@@ -216,21 +216,6 @@ class ApplicationDatabase:
216
216
  for row in rows
217
217
  ]
218
218
 
219
- def get_max_function_id(self, workflow_uuid: str) -> Optional[int]:
220
- with self.engine.begin() as conn:
221
- max_function_id_row = conn.execute(
222
- sa.select(
223
- sa.func.max(ApplicationSchema.transaction_outputs.c.function_id)
224
- ).where(
225
- ApplicationSchema.transaction_outputs.c.workflow_uuid
226
- == workflow_uuid
227
- )
228
- ).fetchone()
229
-
230
- max_function_id = max_function_id_row[0] if max_function_id_row else None
231
-
232
- return max_function_id
233
-
234
219
  def clone_workflow_transactions(
235
220
  self, src_workflow_id: str, forked_workflow_id: str, start_step: int
236
221
  ) -> None:
@@ -602,7 +602,6 @@ async def start_workflow_async(
602
602
  *args: P.args,
603
603
  **kwargs: P.kwargs,
604
604
  ) -> "WorkflowHandleAsync[R]":
605
-
606
605
  # If the function has a class, add the class object as its first argument
607
606
  fself: Optional[object] = None
608
607
  if hasattr(func, "__self__"):
@@ -90,7 +90,6 @@ from ._context import (
90
90
  from ._dbos_config import (
91
91
  ConfigFile,
92
92
  DBOSConfig,
93
- check_config_consistency,
94
93
  overwrite_config,
95
94
  process_config,
96
95
  set_env_vars,
@@ -299,6 +298,7 @@ class DBOS:
299
298
 
300
299
  self._launched: bool = False
301
300
  self._debug_mode: bool = False
301
+ self._configured_threadpool: bool = False
302
302
  self._sys_db_field: Optional[SystemDatabase] = None
303
303
  self._app_db_field: Optional[ApplicationDatabase] = None
304
304
  self._registry: DBOSRegistry = _get_or_create_dbos_registry()
@@ -323,7 +323,6 @@ class DBOS:
323
323
  unvalidated_config = translate_dbos_config_to_config_file(config)
324
324
  if os.environ.get("DBOS__CLOUD") == "true":
325
325
  unvalidated_config = overwrite_config(unvalidated_config)
326
- check_config_consistency(name=unvalidated_config["name"])
327
326
 
328
327
  if unvalidated_config is not None:
329
328
  self._config: ConfigFile = process_config(data=unvalidated_config)
@@ -719,6 +718,7 @@ class DBOS:
719
718
  **kwargs: P.kwargs,
720
719
  ) -> WorkflowHandleAsync[R]:
721
720
  """Invoke a workflow function on the event loop, returning a handle to the ongoing execution."""
721
+ await cls._configure_asyncio_thread_pool()
722
722
  return await start_workflow_async(
723
723
  _get_dbos_instance(), func, None, True, *args, **kwargs
724
724
  )
@@ -736,6 +736,7 @@ class DBOS:
736
736
  async def get_workflow_status_async(
737
737
  cls, workflow_id: str
738
738
  ) -> Optional[WorkflowStatus]:
739
+ await cls._configure_asyncio_thread_pool()
739
740
  """Return the status of a workflow execution."""
740
741
  return await asyncio.to_thread(cls.get_workflow_status, workflow_id)
741
742
 
@@ -757,6 +758,7 @@ class DBOS:
757
758
  ) -> WorkflowHandleAsync[R]:
758
759
  """Return a `WorkflowHandle` for a workflow execution."""
759
760
  dbos = _get_dbos_instance()
761
+ await cls._configure_asyncio_thread_pool()
760
762
  if existing_workflow:
761
763
  stat = await dbos.get_workflow_status_async(workflow_id)
762
764
  if stat is None:
@@ -775,6 +777,7 @@ class DBOS:
775
777
  cls, destination_id: str, message: Any, topic: Optional[str] = None
776
778
  ) -> None:
777
779
  """Send a message to a workflow execution."""
780
+ await cls._configure_asyncio_thread_pool()
778
781
  await asyncio.to_thread(lambda: DBOS.send(destination_id, message, topic))
779
782
 
780
783
  @classmethod
@@ -797,6 +800,7 @@ class DBOS:
797
800
  This function is to be called from within a workflow.
798
801
  `recv_async` will return the message sent on `topic`, asyncronously waiting if necessary.
799
802
  """
803
+ await cls._configure_asyncio_thread_pool()
800
804
  return await asyncio.to_thread(lambda: DBOS.recv(topic, timeout_seconds))
801
805
 
802
806
  @classmethod
@@ -835,6 +839,7 @@ class DBOS:
835
839
  It is important to use `DBOS.sleep` or `DBOS.sleep_async` (as opposed to any other sleep) within workflows,
836
840
  as the DBOS sleep methods are durable and completed sleeps will be skipped during recovery.
837
841
  """
842
+ await cls._configure_asyncio_thread_pool()
838
843
  await asyncio.to_thread(lambda: DBOS.sleep(seconds))
839
844
 
840
845
  @classmethod
@@ -869,6 +874,7 @@ class DBOS:
869
874
  value(Any): A serializable value to associate with the key
870
875
 
871
876
  """
877
+ await cls._configure_asyncio_thread_pool()
872
878
  await asyncio.to_thread(lambda: DBOS.set_event(key, value))
873
879
 
874
880
  @classmethod
@@ -901,6 +907,7 @@ class DBOS:
901
907
  timeout_seconds(float): The amount of time to wait, in case `set_event` has not yet been called byt the workflow
902
908
 
903
909
  """
910
+ await cls._configure_asyncio_thread_pool()
904
911
  return await asyncio.to_thread(
905
912
  lambda: DBOS.get_event(workflow_id, key, timeout_seconds)
906
913
  )
@@ -929,6 +936,19 @@ class DBOS:
929
936
  fn, "DBOS.cancelWorkflow"
930
937
  )
931
938
 
939
+ @classmethod
940
+ async def _configure_asyncio_thread_pool(cls) -> None:
941
+ """
942
+ Configure the thread pool for asyncio.to_thread.
943
+
944
+ This function is called before the first call to asyncio.to_thread.
945
+ """
946
+ if _get_dbos_instance()._configured_threadpool:
947
+ return
948
+ loop = asyncio.get_running_loop()
949
+ loop.set_default_executor(_get_dbos_instance()._executor)
950
+ _get_dbos_instance()._configured_threadpool = True
951
+
932
952
  @classmethod
933
953
  def resume_workflow(cls, workflow_id: str) -> WorkflowHandle[Any]:
934
954
  """Resume a workflow by ID."""
@@ -529,26 +529,3 @@ def overwrite_config(provided_config: ConfigFile) -> ConfigFile:
529
529
  del provided_config["env"]
530
530
 
531
531
  return provided_config
532
-
533
-
534
- def check_config_consistency(
535
- *,
536
- name: str,
537
- config_file_path: str = DBOS_CONFIG_PATH,
538
- ) -> None:
539
- # First load the config file and check whether it is present
540
- try:
541
- config = load_config(config_file_path, silent=True, run_process_config=False)
542
- except FileNotFoundError:
543
- dbos_logger.debug(
544
- f"No configuration file {config_file_path} found. Skipping consistency check with provided config."
545
- )
546
- return
547
- except Exception as e:
548
- raise e
549
-
550
- # Check the name
551
- if name != config["name"]:
552
- raise DBOSInitializationError(
553
- f"Provided app name '{name}' does not match the app name '{config['name']}' in {config_file_path}."
554
- )
@@ -1,5 +1,6 @@
1
1
  import asyncio
2
2
  import threading
3
+ from concurrent.futures import ThreadPoolExecutor
3
4
  from typing import Any, Coroutine, Optional, TypeVar
4
5
 
5
6
 
@@ -33,15 +34,17 @@ class BackgroundEventLoop:
33
34
 
34
35
  def _run_event_loop(self) -> None:
35
36
  self._loop = asyncio.new_event_loop()
36
- asyncio.set_event_loop(self._loop)
37
+ with ThreadPoolExecutor(max_workers=64) as thread_pool:
38
+ self._loop.set_default_executor(thread_pool)
39
+ asyncio.set_event_loop(self._loop)
37
40
 
38
- self._running = True
39
- self._ready.set() # Signal that the loop is ready
41
+ self._running = True
42
+ self._ready.set() # Signal that the loop is ready
40
43
 
41
- try:
42
- self._loop.run_forever()
43
- finally:
44
- self._loop.close()
44
+ try:
45
+ self._loop.run_forever()
46
+ finally:
47
+ self._loop.close()
45
48
 
46
49
  async def _shutdown(self) -> None:
47
50
  if self._loop is None:
@@ -5,6 +5,7 @@ from typing import TYPE_CHECKING, Any, Callable, Coroutine, Optional, TypedDict
5
5
  from psycopg import errors
6
6
  from sqlalchemy.exc import OperationalError
7
7
 
8
+ from dbos._logger import dbos_logger
8
9
  from dbos._utils import GlobalParams
9
10
 
10
11
  from ._core import P, R, execute_workflow_by_id, start_workflow, start_workflow_async
@@ -56,6 +57,8 @@ class Queue:
56
57
  from ._dbos import _get_or_create_dbos_registry
57
58
 
58
59
  registry = _get_or_create_dbos_registry()
60
+ if self.name in registry.queue_info_map:
61
+ dbos_logger.warning(f"Queue {name} has already been declared")
59
62
  registry.queue_info_map[self.name] = self
60
63
 
61
64
  def enqueue(
@@ -601,18 +601,6 @@ class SystemDatabase:
601
601
  )
602
602
  )
603
603
 
604
- def get_max_function_id(self, workflow_uuid: str) -> Optional[int]:
605
- with self.engine.begin() as conn:
606
- max_function_id_row = conn.execute(
607
- sa.select(
608
- sa.func.max(SystemSchema.operation_outputs.c.function_id)
609
- ).where(SystemSchema.operation_outputs.c.workflow_uuid == workflow_uuid)
610
- ).fetchone()
611
-
612
- max_function_id = max_function_id_row[0] if max_function_id_row else None
613
-
614
- return max_function_id
615
-
616
604
  def fork_workflow(
617
605
  self,
618
606
  original_workflow_id: str,
@@ -1722,13 +1710,8 @@ class SystemDatabase:
1722
1710
  if num_recent_queries >= queue.limiter["limit"]:
1723
1711
  return []
1724
1712
 
1725
- # Dequeue functions eligible for this worker and ordered by the time at which they were enqueued.
1726
- # If there is a global or local concurrency limit N, select only the N oldest enqueued
1727
- # functions, else select all of them.
1728
-
1729
- # First lets figure out how many tasks are eligible for dequeue.
1730
- # This means figuring out how many unstarted tasks are within the local and global concurrency limits
1731
- running_tasks_query = (
1713
+ # Count how many workflows on this queue are currently PENDING both locally and globally.
1714
+ pending_tasks_query = (
1732
1715
  sa.select(
1733
1716
  SystemSchema.workflow_status.c.executor_id,
1734
1717
  sa.func.count().label("task_count"),
@@ -1742,41 +1725,37 @@ class SystemDatabase:
1742
1725
  )
1743
1726
  .where(SystemSchema.workflow_queue.c.queue_name == queue.name)
1744
1727
  .where(
1745
- SystemSchema.workflow_queue.c.started_at_epoch_ms.isnot(
1746
- None
1747
- ) # Task is started
1748
- )
1749
- .where(
1750
- SystemSchema.workflow_queue.c.completed_at_epoch_ms.is_(
1751
- None
1752
- ) # Task is not completed.
1728
+ SystemSchema.workflow_status.c.status
1729
+ == WorkflowStatusString.PENDING.value
1753
1730
  )
1754
1731
  .group_by(SystemSchema.workflow_status.c.executor_id)
1755
1732
  )
1756
- running_tasks_result = c.execute(running_tasks_query).fetchall()
1757
- running_tasks_result_dict = {row[0]: row[1] for row in running_tasks_result}
1758
- running_tasks_for_this_worker = running_tasks_result_dict.get(
1759
- executor_id, 0
1760
- ) # Get count for current executor
1733
+ pending_workflows = c.execute(pending_tasks_query).fetchall()
1734
+ pending_workflows_dict = {row[0]: row[1] for row in pending_workflows}
1735
+ local_pending_workflows = pending_workflows_dict.get(executor_id, 0)
1761
1736
 
1737
+ # Compute max_tasks, the number of workflows that can be dequeued given local and global concurrency limits,
1762
1738
  max_tasks = float("inf")
1763
1739
  if queue.worker_concurrency is not None:
1764
- max_tasks = max(
1765
- 0, queue.worker_concurrency - running_tasks_for_this_worker
1766
- )
1740
+ # Print a warning if the local concurrency limit is violated
1741
+ if local_pending_workflows > queue.worker_concurrency:
1742
+ dbos_logger.warning(
1743
+ f"The number of local pending workflows ({local_pending_workflows}) on queue {queue.name} exceeds the local concurrency limit ({queue.worker_concurrency})"
1744
+ )
1745
+ max_tasks = max(0, queue.worker_concurrency - local_pending_workflows)
1746
+
1767
1747
  if queue.concurrency is not None:
1768
- total_running_tasks = sum(running_tasks_result_dict.values())
1769
- # Queue global concurrency limit should always be >= running_tasks_count
1770
- # This should never happen but a check + warning doesn't hurt
1771
- if total_running_tasks > queue.concurrency:
1748
+ global_pending_workflows = sum(pending_workflows_dict.values())
1749
+ # Print a warning if the global concurrency limit is violated
1750
+ if global_pending_workflows > queue.concurrency:
1772
1751
  dbos_logger.warning(
1773
- f"Total running tasks ({total_running_tasks}) exceeds the global concurrency limit ({queue.concurrency})"
1752
+ f"The total number of pending workflows ({global_pending_workflows}) on queue {queue.name} exceeds the global concurrency limit ({queue.concurrency})"
1774
1753
  )
1775
- available_tasks = max(0, queue.concurrency - total_running_tasks)
1754
+ available_tasks = max(0, queue.concurrency - global_pending_workflows)
1776
1755
  max_tasks = min(max_tasks, available_tasks)
1777
1756
 
1778
1757
  # Retrieve the first max_tasks workflows in the queue.
1779
- # Only retrieve workflows of the appropriate version (or without version set)
1758
+ # Only retrieve workflows of the local version (or without version set)
1780
1759
  query = (
1781
1760
  sa.select(
1782
1761
  SystemSchema.workflow_queue.c.workflow_uuid,
@@ -1789,8 +1768,10 @@ class SystemDatabase:
1789
1768
  )
1790
1769
  )
1791
1770
  .where(SystemSchema.workflow_queue.c.queue_name == queue.name)
1792
- .where(SystemSchema.workflow_queue.c.started_at_epoch_ms == None)
1793
- .where(SystemSchema.workflow_queue.c.completed_at_epoch_ms == None)
1771
+ .where(
1772
+ SystemSchema.workflow_status.c.status
1773
+ == WorkflowStatusString.ENQUEUED.value
1774
+ )
1794
1775
  .where(
1795
1776
  sa.or_(
1796
1777
  SystemSchema.workflow_status.c.application_version
@@ -1819,20 +1800,16 @@ class SystemDatabase:
1819
1800
  ret_ids: list[str] = []
1820
1801
 
1821
1802
  for id in dequeued_ids:
1822
- # If we have a limiter, stop starting functions when the number
1823
- # of functions started this period exceeds the limit.
1803
+ # If we have a limiter, stop dequeueing workflows when the number
1804
+ # of workflows started this period exceeds the limit.
1824
1805
  if queue.limiter is not None:
1825
1806
  if len(ret_ids) + num_recent_queries >= queue.limiter["limit"]:
1826
1807
  break
1827
1808
 
1828
- # To start a function, first set its status to PENDING and update its executor ID
1829
- res = c.execute(
1809
+ # To start a workflow, first set its status to PENDING and update its executor ID
1810
+ c.execute(
1830
1811
  SystemSchema.workflow_status.update()
1831
1812
  .where(SystemSchema.workflow_status.c.workflow_uuid == id)
1832
- .where(
1833
- SystemSchema.workflow_status.c.status
1834
- == WorkflowStatusString.ENQUEUED.value
1835
- )
1836
1813
  .values(
1837
1814
  status=WorkflowStatusString.PENDING.value,
1838
1815
  application_version=app_version,
@@ -1855,16 +1832,15 @@ class SystemDatabase:
1855
1832
  ),
1856
1833
  )
1857
1834
  )
1858
- if res.rowcount > 0:
1859
- # Then give it a start time and assign the executor ID
1860
- c.execute(
1861
- SystemSchema.workflow_queue.update()
1862
- .where(SystemSchema.workflow_queue.c.workflow_uuid == id)
1863
- .values(started_at_epoch_ms=start_time_ms)
1864
- )
1865
- ret_ids.append(id)
1835
+ # Then give it a start time
1836
+ c.execute(
1837
+ SystemSchema.workflow_queue.update()
1838
+ .where(SystemSchema.workflow_queue.c.workflow_uuid == id)
1839
+ .values(started_at_epoch_ms=start_time_ms)
1840
+ )
1841
+ ret_ids.append(id)
1866
1842
 
1867
- # If we have a limiter, garbage-collect all completed functions started
1843
+ # If we have a limiter, garbage-collect all completed workflows started
1868
1844
  # before the period. If there's no limiter, there's no need--they were
1869
1845
  # deleted on completion.
1870
1846
  if queue.limiter is not None:
@@ -103,16 +103,7 @@ def fork_workflow(
103
103
  *,
104
104
  application_version: Optional[str],
105
105
  ) -> str:
106
- def get_max_function_id(workflow_uuid: str) -> int:
107
- max_transactions = app_db.get_max_function_id(workflow_uuid) or 0
108
- max_operations = sys_db.get_max_function_id(workflow_uuid) or 0
109
- return max(max_transactions, max_operations)
110
-
111
- max_function_id = get_max_function_id(workflow_id)
112
- if max_function_id > 0 and start_step > max_function_id:
113
- raise DBOSException(
114
- f"Cannot fork workflow {workflow_id} from step {start_step}. The workflow has {max_function_id} steps."
115
- )
106
+
116
107
  ctx = get_local_dbos_context()
117
108
  if ctx is not None and len(ctx.id_assigned_for_next_workflow) > 0:
118
109
  forked_workflow_id = ctx.id_assigned_for_next_workflow
@@ -27,7 +27,7 @@ dependencies = [
27
27
  ]
28
28
  requires-python = ">=3.9"
29
29
  readme = "README.md"
30
- version = "1.1.0a4"
30
+ version = "1.2.0a4"
31
31
 
32
32
  [project.license]
33
33
  text = "MIT"
@@ -11,7 +11,14 @@ import sqlalchemy as sa
11
11
  from requests.exceptions import ConnectionError
12
12
 
13
13
  # Public API
14
- from dbos import DBOS, DBOSConfig, Queue, SetWorkflowID, _workflow_commands
14
+ from dbos import (
15
+ DBOS,
16
+ DBOSConfig,
17
+ Queue,
18
+ SetWorkflowID,
19
+ WorkflowHandle,
20
+ _workflow_commands,
21
+ )
15
22
  from dbos._error import DBOSWorkflowCancelledError
16
23
  from dbos._schemas.system_database import SystemSchema
17
24
  from dbos._sys_db import SystemDatabase, WorkflowStatusString
@@ -425,4 +432,24 @@ def test_admin_workflow_fork(dbos: DBOS, sys_db: SystemDatabase) -> None:
425
432
  time.sleep(1)
426
433
  count += 1
427
434
 
435
+ # test for new_workflow_id and app version
436
+
437
+ new_version = "my_new_version"
438
+ GlobalParams.app_version = new_version
439
+
440
+ response = requests.post(
441
+ f"http://localhost:3001/workflows/{wfUuid}/fork",
442
+ json={"new_workflow_id": "123456", "application_version": new_version},
443
+ timeout=5,
444
+ )
445
+ assert response.status_code == 200
446
+
447
+ new_workflow_id = response.json().get("workflow_id")
448
+ assert new_workflow_id == "123456", "Expected new workflow ID is not 123456"
449
+
450
+ handle: WorkflowHandle[None] = dbos.retrieve_workflow(new_workflow_id)
451
+ assert (
452
+ handle.get_status().app_version == new_version
453
+ ), f"Expected application version to be {new_version}, but got {handle.get_status().app_version}"
454
+
428
455
  assert worked, "Workflow did not finish successfully"
@@ -1,7 +1,7 @@
1
1
  import asyncio
2
2
  import time
3
3
  import uuid
4
- from typing import Optional
4
+ from typing import List, Optional
5
5
 
6
6
  import pytest
7
7
  import sqlalchemy as sa
@@ -523,3 +523,44 @@ async def test_workflow_timeout_async(dbos: DBOS) -> None:
523
523
  with pytest.raises(Exception) as exc_info:
524
524
  await (await DBOS.retrieve_workflow_async(direct_child)).get_result()
525
525
  assert "was cancelled" in str(exc_info.value)
526
+
527
+
528
+ @pytest.mark.asyncio
529
+ async def test_max_parallel_workflows(dbos: DBOS) -> None:
530
+ queue = Queue("parallel_queue")
531
+
532
+ @DBOS.workflow()
533
+ async def test_workflow(i: int) -> int:
534
+ await DBOS.sleep_async(5)
535
+ return i
536
+
537
+ begin_time = time.time()
538
+
539
+ tasks: List[WorkflowHandleAsync[int]] = []
540
+ for i in range(50):
541
+ tasks.append(await DBOS.start_workflow_async(test_workflow, i))
542
+
543
+ # Wait for all tasks to complete
544
+ for i in range(50):
545
+ assert (await tasks[i].get_result()) == i, f"Task {i} should return {i}"
546
+
547
+ end_time = time.time()
548
+ assert (
549
+ end_time - begin_time < 10
550
+ ), "All tasks should complete in less than 10 seconds"
551
+
552
+ # Test enqueues
553
+ begin_time = time.time()
554
+ tasks = []
555
+
556
+ for i in range(50):
557
+ tasks.append(await queue.enqueue_async(test_workflow, i))
558
+
559
+ # Wait for all tasks to complete
560
+ for i in range(50):
561
+ assert (await tasks[i].get_result()) == i, f"Task {i} should return {i}"
562
+
563
+ end_time = time.time()
564
+ assert (
565
+ end_time - begin_time < 10
566
+ ), "All enqueued tasks should complete in less than 10 seconds"
@@ -14,7 +14,6 @@ from dbos import DBOS
14
14
  from dbos._dbos_config import (
15
15
  ConfigFile,
16
16
  DBOSConfig,
17
- check_config_consistency,
18
17
  configure_db_engine_parameters,
19
18
  load_config,
20
19
  overwrite_config,
@@ -1096,41 +1095,6 @@ def test_overwrite_config_missing_dbos_database_url(mocker):
1096
1095
  )
1097
1096
 
1098
1097
 
1099
- ####################
1100
- # PROVIDED CONFIGS vs CONFIG FILE
1101
- ####################
1102
-
1103
-
1104
- def test_no_discrepancy(mocker):
1105
- mock_config = """
1106
- name: "stock-prices" \
1107
- """
1108
- mocker.patch(
1109
- "builtins.open", side_effect=generate_mock_open("dbos-config.yaml", mock_config)
1110
- )
1111
- check_config_consistency(name="stock-prices")
1112
-
1113
-
1114
- def test_name_does_no_match(mocker):
1115
- mock_config = """
1116
- name: "stock-prices" \
1117
- """
1118
- mocker.patch(
1119
- "builtins.open", side_effect=generate_mock_open("dbos-config.yaml", mock_config)
1120
- )
1121
- with pytest.raises(DBOSInitializationError) as exc_info:
1122
- check_config_consistency(name="stock-prices-wrong")
1123
- assert (
1124
- "Provided app name 'stock-prices-wrong' does not match the app name 'stock-prices' in dbos-config.yaml"
1125
- in str(exc_info.value)
1126
- )
1127
-
1128
-
1129
- def test_no_config_file():
1130
- # Handles FileNotFoundError
1131
- check_config_consistency(name="stock-prices")
1132
-
1133
-
1134
1098
  ####################
1135
1099
  # DATABASES CONNECTION POOLS
1136
1100
  ####################
@@ -482,17 +482,6 @@ def test_restart_fromsteps_steps_tr(
482
482
  assert stepFourCount == 2
483
483
  assert trFiveCount == 3
484
484
 
485
- # invalid step
486
- try:
487
- forked_handle = DBOS.fork_workflow(wfid, 7)
488
- assert forked_handle.workflow_id != wfid
489
- forked_handle.get_result()
490
- except Exception as e:
491
- print(f"Exception: {e}")
492
- assert isinstance(e, DBOSException)
493
- assert "Cannot fork workflow" in str(e)
494
- assert trOneCount == 1
495
-
496
485
  # invalid < 1 will default to 1
497
486
  forked_handle = DBOS.fork_workflow(wfid, -1)
498
487
  assert forked_handle.workflow_id != wfid
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes