dbos 0.8.0a7__tar.gz → 0.9.0a0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (75) hide show
  1. {dbos-0.8.0a7 → dbos-0.9.0a0}/PKG-INFO +1 -1
  2. {dbos-0.8.0a7 → dbos-0.9.0a0}/dbos/dbos.py +3 -0
  3. {dbos-0.8.0a7 → dbos-0.9.0a0}/dbos/migrations/versions/50f3227f0b4b_fix_job_queue.py +2 -1
  4. {dbos-0.8.0a7 → dbos-0.9.0a0}/dbos/migrations/versions/d76646551a6b_job_queue_limiter.py +2 -1
  5. dbos-0.9.0a0/dbos/migrations/versions/d76646551a6c_workflow_queue.py +28 -0
  6. {dbos-0.8.0a7 → dbos-0.9.0a0}/dbos/migrations/versions/eab0cc1d9a14_job_queue.py +2 -1
  7. {dbos-0.8.0a7 → dbos-0.9.0a0}/dbos/queue.py +16 -6
  8. {dbos-0.8.0a7 → dbos-0.9.0a0}/dbos/request.py +1 -0
  9. {dbos-0.8.0a7 → dbos-0.9.0a0}/dbos/scheduler/scheduler.py +7 -1
  10. {dbos-0.8.0a7 → dbos-0.9.0a0}/dbos/schemas/system_database.py +2 -2
  11. {dbos-0.8.0a7 → dbos-0.9.0a0}/dbos/system_database.py +23 -19
  12. {dbos-0.8.0a7 → dbos-0.9.0a0}/pyproject.toml +1 -1
  13. {dbos-0.8.0a7 → dbos-0.9.0a0}/tests/test_queue.py +118 -3
  14. {dbos-0.8.0a7 → dbos-0.9.0a0}/LICENSE +0 -0
  15. {dbos-0.8.0a7 → dbos-0.9.0a0}/README.md +0 -0
  16. {dbos-0.8.0a7 → dbos-0.9.0a0}/dbos/__init__.py +0 -0
  17. {dbos-0.8.0a7 → dbos-0.9.0a0}/dbos/admin_sever.py +0 -0
  18. {dbos-0.8.0a7 → dbos-0.9.0a0}/dbos/application_database.py +0 -0
  19. {dbos-0.8.0a7 → dbos-0.9.0a0}/dbos/cli.py +0 -0
  20. {dbos-0.8.0a7 → dbos-0.9.0a0}/dbos/context.py +0 -0
  21. {dbos-0.8.0a7 → dbos-0.9.0a0}/dbos/core.py +0 -0
  22. {dbos-0.8.0a7 → dbos-0.9.0a0}/dbos/dbos-config.schema.json +0 -0
  23. {dbos-0.8.0a7 → dbos-0.9.0a0}/dbos/dbos_config.py +0 -0
  24. {dbos-0.8.0a7 → dbos-0.9.0a0}/dbos/decorators.py +0 -0
  25. {dbos-0.8.0a7 → dbos-0.9.0a0}/dbos/error.py +0 -0
  26. {dbos-0.8.0a7 → dbos-0.9.0a0}/dbos/fastapi.py +0 -0
  27. {dbos-0.8.0a7 → dbos-0.9.0a0}/dbos/flask.py +0 -0
  28. {dbos-0.8.0a7 → dbos-0.9.0a0}/dbos/kafka.py +0 -0
  29. {dbos-0.8.0a7 → dbos-0.9.0a0}/dbos/kafka_message.py +0 -0
  30. {dbos-0.8.0a7 → dbos-0.9.0a0}/dbos/logger.py +0 -0
  31. {dbos-0.8.0a7 → dbos-0.9.0a0}/dbos/migrations/env.py +0 -0
  32. {dbos-0.8.0a7 → dbos-0.9.0a0}/dbos/migrations/script.py.mako +0 -0
  33. {dbos-0.8.0a7 → dbos-0.9.0a0}/dbos/migrations/versions/5c361fc04708_added_system_tables.py +0 -0
  34. {dbos-0.8.0a7 → dbos-0.9.0a0}/dbos/migrations/versions/a3b18ad34abe_added_triggers.py +0 -0
  35. {dbos-0.8.0a7 → dbos-0.9.0a0}/dbos/py.typed +0 -0
  36. {dbos-0.8.0a7 → dbos-0.9.0a0}/dbos/recovery.py +0 -0
  37. {dbos-0.8.0a7 → dbos-0.9.0a0}/dbos/registrations.py +0 -0
  38. {dbos-0.8.0a7 → dbos-0.9.0a0}/dbos/roles.py +0 -0
  39. {dbos-0.8.0a7 → dbos-0.9.0a0}/dbos/scheduler/croniter.py +0 -0
  40. {dbos-0.8.0a7 → dbos-0.9.0a0}/dbos/schemas/__init__.py +0 -0
  41. {dbos-0.8.0a7 → dbos-0.9.0a0}/dbos/schemas/application_database.py +0 -0
  42. {dbos-0.8.0a7 → dbos-0.9.0a0}/dbos/templates/hello/README.md +0 -0
  43. {dbos-0.8.0a7 → dbos-0.9.0a0}/dbos/templates/hello/__package/__init__.py +0 -0
  44. {dbos-0.8.0a7 → dbos-0.9.0a0}/dbos/templates/hello/__package/main.py +0 -0
  45. {dbos-0.8.0a7 → dbos-0.9.0a0}/dbos/templates/hello/__package/schema.py +0 -0
  46. {dbos-0.8.0a7 → dbos-0.9.0a0}/dbos/templates/hello/alembic.ini +0 -0
  47. {dbos-0.8.0a7 → dbos-0.9.0a0}/dbos/templates/hello/dbos-config.yaml.dbos +0 -0
  48. {dbos-0.8.0a7 → dbos-0.9.0a0}/dbos/templates/hello/migrations/env.py.dbos +0 -0
  49. {dbos-0.8.0a7 → dbos-0.9.0a0}/dbos/templates/hello/migrations/script.py.mako +0 -0
  50. {dbos-0.8.0a7 → dbos-0.9.0a0}/dbos/templates/hello/migrations/versions/2024_07_31_180642_init.py +0 -0
  51. {dbos-0.8.0a7 → dbos-0.9.0a0}/dbos/templates/hello/start_postgres_docker.py +0 -0
  52. {dbos-0.8.0a7 → dbos-0.9.0a0}/dbos/tracer.py +0 -0
  53. {dbos-0.8.0a7 → dbos-0.9.0a0}/dbos/utils.py +0 -0
  54. {dbos-0.8.0a7 → dbos-0.9.0a0}/tests/__init__.py +0 -0
  55. {dbos-0.8.0a7 → dbos-0.9.0a0}/tests/atexit_no_ctor.py +0 -0
  56. {dbos-0.8.0a7 → dbos-0.9.0a0}/tests/atexit_no_launch.py +0 -0
  57. {dbos-0.8.0a7 → dbos-0.9.0a0}/tests/classdefs.py +0 -0
  58. {dbos-0.8.0a7 → dbos-0.9.0a0}/tests/conftest.py +0 -0
  59. {dbos-0.8.0a7 → dbos-0.9.0a0}/tests/more_classdefs.py +0 -0
  60. {dbos-0.8.0a7 → dbos-0.9.0a0}/tests/scheduler/test_croniter.py +0 -0
  61. {dbos-0.8.0a7 → dbos-0.9.0a0}/tests/scheduler/test_scheduler.py +0 -0
  62. {dbos-0.8.0a7 → dbos-0.9.0a0}/tests/test_admin_server.py +0 -0
  63. {dbos-0.8.0a7 → dbos-0.9.0a0}/tests/test_classdecorators.py +0 -0
  64. {dbos-0.8.0a7 → dbos-0.9.0a0}/tests/test_concurrency.py +0 -0
  65. {dbos-0.8.0a7 → dbos-0.9.0a0}/tests/test_config.py +0 -0
  66. {dbos-0.8.0a7 → dbos-0.9.0a0}/tests/test_dbos.py +0 -0
  67. {dbos-0.8.0a7 → dbos-0.9.0a0}/tests/test_failures.py +0 -0
  68. {dbos-0.8.0a7 → dbos-0.9.0a0}/tests/test_fastapi.py +0 -0
  69. {dbos-0.8.0a7 → dbos-0.9.0a0}/tests/test_fastapi_roles.py +0 -0
  70. {dbos-0.8.0a7 → dbos-0.9.0a0}/tests/test_flask.py +0 -0
  71. {dbos-0.8.0a7 → dbos-0.9.0a0}/tests/test_kafka.py +0 -0
  72. {dbos-0.8.0a7 → dbos-0.9.0a0}/tests/test_package.py +0 -0
  73. {dbos-0.8.0a7 → dbos-0.9.0a0}/tests/test_schema_migration.py +0 -0
  74. {dbos-0.8.0a7 → dbos-0.9.0a0}/tests/test_singleton.py +0 -0
  75. {dbos-0.8.0a7 → dbos-0.9.0a0}/version/__init__.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: dbos
3
- Version: 0.8.0a7
3
+ Version: 0.9.0a0
4
4
  Summary: Ultra-lightweight durable execution in Python
5
5
  Author-Email: "DBOS, Inc." <contact@dbos.dev>
6
6
  License: MIT
@@ -550,6 +550,7 @@ class DBOS:
550
550
  recovery_attempts=stat["recovery_attempts"],
551
551
  class_name=stat["class_name"],
552
552
  config_name=stat["config_name"],
553
+ queue_name=stat["queue_name"],
553
554
  authenticated_user=stat["authenticated_user"],
554
555
  assumed_role=stat["assumed_role"],
555
556
  authenticated_roles=(
@@ -756,6 +757,7 @@ class WorkflowStatus:
756
757
  name(str): The workflow function name
757
758
  class_name(str): For member functions, the name of the class containing the workflow function
758
759
  config_name(str): For instance member functions, the name of the class instance for the execution
760
+ queue_name(str): For workflows that are or were queued, the queue name
759
761
  authenticated_user(str): The user who invoked the workflow
760
762
  assumed_role(str): The access role used by the user to allow access to the workflow function
761
763
  authenticated_roles(List[str]): List of all access roles available to the authenticated user
@@ -768,6 +770,7 @@ class WorkflowStatus:
768
770
  name: str
769
771
  class_name: Optional[str]
770
772
  config_name: Optional[str]
773
+ queue_name: Optional[str]
771
774
  authenticated_user: Optional[str]
772
775
  assumed_role: Optional[str]
773
776
  authenticated_roles: Optional[List[str]]
@@ -1,4 +1,5 @@
1
- """fix_job_queue
1
+ """
2
+ Fix job queue PK.
2
3
 
3
4
  Revision ID: 50f3227f0b4b
4
5
  Revises: eab0cc1d9a14
@@ -1,4 +1,5 @@
1
- """job_queue_limiter
1
+ """
2
+ Adjust workflow queue to add columns for rate limiter.
2
3
 
3
4
  Revision ID: d76646551a6b
4
5
  Revises: 50f3227f0b4b
@@ -0,0 +1,28 @@
1
+ """workflow_queue
2
+
3
+ Revision ID: d76646551a6c
4
+ Revises: d76646551a6b
5
+ Create Date: 2024-09-27 12:00:00.0
6
+
7
+ """
8
+
9
+ from typing import Sequence, Union
10
+
11
+ import sqlalchemy as sa
12
+ from alembic import op
13
+
14
+ # revision identifiers, used by Alembic.
15
+ revision: str = "d76646551a6c"
16
+ down_revision: Union[str, None] = "d76646551a6b"
17
+ branch_labels: Union[str, Sequence[str], None] = None
18
+ depends_on: Union[str, Sequence[str], None] = None
19
+
20
+
21
+ def upgrade() -> None:
22
+ op.rename_table("job_queue", "workflow_queue", schema="dbos")
23
+ op.execute("CREATE VIEW dbos.job_queue AS SELECT * FROM dbos.workflow_queue;")
24
+
25
+
26
+ def downgrade() -> None:
27
+ op.execute("DROP VIEW dbos.job_queue;")
28
+ op.rename_table("workflow_queue", "job_queue", schema="dbos")
@@ -1,4 +1,5 @@
1
- """job_queue
1
+ """
2
+ Add workflow queue table.
2
3
 
3
4
  Revision ID: eab0cc1d9a14
4
5
  Revises: a3b18ad34abe
@@ -8,21 +8,31 @@ if TYPE_CHECKING:
8
8
  from dbos.dbos import DBOS, Workflow, WorkflowHandle
9
9
 
10
10
 
11
- # Limit the maximum number of functions from this queue
12
- # that can be started in a given period. If the limit is 5
13
- # and the period is 10, no more than 5 functions can be
14
- # started per 10 seconds.
15
- class Limiter(TypedDict):
11
+ class QueueRateLimit(TypedDict):
12
+ """
13
+ Limit the maximum number of workflows from this queue that can be started in a given period.
14
+
15
+ If the limit is 5 and the period is 10, no more than 5 functions can be
16
+ started per 10 seconds.
17
+ """
18
+
16
19
  limit: int
17
20
  period: float
18
21
 
19
22
 
20
23
  class Queue:
24
+ """
25
+ Workflow queue.
26
+
27
+ Workflow queues allow workflows to be started at a later time, based on concurrency and
28
+ rate limits.
29
+ """
30
+
21
31
  def __init__(
22
32
  self,
23
33
  name: str,
24
34
  concurrency: Optional[int] = None,
25
- limiter: Optional[Limiter] = None,
35
+ limiter: Optional[QueueRateLimit] = None,
26
36
  ) -> None:
27
37
  self.name = name
28
38
  self.concurrency = concurrency
@@ -13,6 +13,7 @@ class Address(NamedTuple):
13
13
  class Request:
14
14
  """
15
15
  Serializable HTTP Request object.
16
+
16
17
  Attributes:
17
18
  base_url(str): Base of URL requested, as in application code
18
19
  client(Optional[Address]): HTTP Client
@@ -2,6 +2,7 @@ import threading
2
2
  from datetime import datetime, timezone
3
3
  from typing import TYPE_CHECKING, Callable
4
4
 
5
+ from dbos.logger import dbos_logger
5
6
  from dbos.queue import Queue
6
7
 
7
8
  if TYPE_CHECKING:
@@ -18,7 +19,12 @@ scheduler_queue: Queue
18
19
  def scheduler_loop(
19
20
  func: ScheduledWorkflow, cron: str, stop_event: threading.Event
20
21
  ) -> None:
21
- iter = croniter(cron, datetime.now(timezone.utc), second_at_beginning=True)
22
+ try:
23
+ iter = croniter(cron, datetime.now(timezone.utc), second_at_beginning=True)
24
+ except Exception as e:
25
+ dbos_logger.error(
26
+ f'Cannot run scheduled function {func.__name__}. Invalid crontab "{cron}"'
27
+ )
22
28
  while not stop_event.is_set():
23
29
  nextExecTime = iter.get_next(datetime)
24
30
  sleepTime = nextExecTime - datetime.now(timezone.utc)
@@ -142,8 +142,8 @@ class SystemSchema:
142
142
  Column("last_run_time", BigInteger, nullable=False),
143
143
  )
144
144
 
145
- job_queue = Table(
146
- "job_queue",
145
+ workflow_queue = Table(
146
+ "workflow_queue",
147
147
  metadata_obj,
148
148
  Column(
149
149
  "workflow_uuid",
@@ -983,7 +983,7 @@ class SystemDatabase:
983
983
  return value
984
984
 
985
985
  def _flush_workflow_status_buffer(self) -> None:
986
- """Export the workflow status buffer to the database, up to the batch size"""
986
+ """Export the workflow status buffer to the database, up to the batch size."""
987
987
  if len(self._workflow_status_buffer) == 0:
988
988
  return
989
989
 
@@ -1079,7 +1079,7 @@ class SystemDatabase:
1079
1079
  def enqueue(self, workflow_id: str, queue_name: str) -> None:
1080
1080
  with self.engine.begin() as c:
1081
1081
  c.execute(
1082
- pg.insert(SystemSchema.job_queue)
1082
+ pg.insert(SystemSchema.workflow_queue)
1083
1083
  .values(
1084
1084
  workflow_uuid=workflow_id,
1085
1085
  queue_name=queue_name,
@@ -1099,10 +1099,13 @@ class SystemDatabase:
1099
1099
  if queue.limiter is not None:
1100
1100
  query = (
1101
1101
  sa.select(sa.func.count())
1102
- .select_from(SystemSchema.job_queue)
1103
- .where(SystemSchema.job_queue.c.started_at_epoch_ms.isnot(None))
1102
+ .select_from(SystemSchema.workflow_queue)
1103
+ .where(SystemSchema.workflow_queue.c.queue_name == queue.name)
1104
1104
  .where(
1105
- SystemSchema.job_queue.c.started_at_epoch_ms
1105
+ SystemSchema.workflow_queue.c.started_at_epoch_ms.isnot(None)
1106
+ )
1107
+ .where(
1108
+ SystemSchema.workflow_queue.c.started_at_epoch_ms
1106
1109
  > start_time_ms - limiter_period_ms
1107
1110
  )
1108
1111
  )
@@ -1116,12 +1119,12 @@ class SystemDatabase:
1116
1119
  # functions, else select all of them.
1117
1120
  query = (
1118
1121
  sa.select(
1119
- SystemSchema.job_queue.c.workflow_uuid,
1120
- SystemSchema.job_queue.c.started_at_epoch_ms,
1122
+ SystemSchema.workflow_queue.c.workflow_uuid,
1123
+ SystemSchema.workflow_queue.c.started_at_epoch_ms,
1121
1124
  )
1122
- .where(SystemSchema.job_queue.c.queue_name == queue.name)
1123
- .where(SystemSchema.job_queue.c.completed_at_epoch_ms == None)
1124
- .order_by(SystemSchema.job_queue.c.created_at_epoch_ms.asc())
1125
+ .where(SystemSchema.workflow_queue.c.queue_name == queue.name)
1126
+ .where(SystemSchema.workflow_queue.c.completed_at_epoch_ms == None)
1127
+ .order_by(SystemSchema.workflow_queue.c.created_at_epoch_ms.asc())
1125
1128
  )
1126
1129
  if queue.concurrency is not None:
1127
1130
  query = query.limit(queue.concurrency)
@@ -1152,8 +1155,8 @@ class SystemDatabase:
1152
1155
 
1153
1156
  # Then give it a start time
1154
1157
  c.execute(
1155
- SystemSchema.job_queue.update()
1156
- .where(SystemSchema.job_queue.c.workflow_uuid == id)
1158
+ SystemSchema.workflow_queue.update()
1159
+ .where(SystemSchema.workflow_queue.c.workflow_uuid == id)
1157
1160
  .values(started_at_epoch_ms=start_time_ms)
1158
1161
  )
1159
1162
  ret_ids.append(id)
@@ -1163,10 +1166,11 @@ class SystemDatabase:
1163
1166
  # deleted on completion.
1164
1167
  if queue.limiter is not None:
1165
1168
  c.execute(
1166
- sa.delete(SystemSchema.job_queue)
1167
- .where(SystemSchema.job_queue.c.completed_at_epoch_ms != None)
1169
+ sa.delete(SystemSchema.workflow_queue)
1170
+ .where(SystemSchema.workflow_queue.c.completed_at_epoch_ms != None)
1171
+ .where(SystemSchema.workflow_queue.c.queue_name == queue.name)
1168
1172
  .where(
1169
- SystemSchema.job_queue.c.started_at_epoch_ms
1173
+ SystemSchema.workflow_queue.c.started_at_epoch_ms
1170
1174
  < start_time_ms - limiter_period_ms
1171
1175
  )
1172
1176
  )
@@ -1178,13 +1182,13 @@ class SystemDatabase:
1178
1182
  with self.engine.begin() as c:
1179
1183
  if queue.limiter is None:
1180
1184
  c.execute(
1181
- sa.delete(SystemSchema.job_queue).where(
1182
- SystemSchema.job_queue.c.workflow_uuid == workflow_id
1185
+ sa.delete(SystemSchema.workflow_queue).where(
1186
+ SystemSchema.workflow_queue.c.workflow_uuid == workflow_id
1183
1187
  )
1184
1188
  )
1185
1189
  else:
1186
1190
  c.execute(
1187
- sa.update(SystemSchema.job_queue)
1188
- .where(SystemSchema.job_queue.c.workflow_uuid == workflow_id)
1191
+ sa.update(SystemSchema.workflow_queue)
1192
+ .where(SystemSchema.workflow_queue.c.workflow_uuid == workflow_id)
1189
1193
  .values(completed_at_epoch_ms=int(time.time() * 1000))
1190
1194
  )
@@ -23,7 +23,7 @@ dependencies = [
23
23
  ]
24
24
  requires-python = ">=3.9"
25
25
  readme = "README.md"
26
- version = "0.8.0a7"
26
+ version = "0.9.0a0"
27
27
 
28
28
  [project.license]
29
29
  text = "MIT"
@@ -15,7 +15,7 @@ def queue_entries_are_cleaned_up(dbos: DBOS) -> bool:
15
15
  success = False
16
16
  for i in range(max_tries):
17
17
  with dbos._sys_db.engine.begin() as c:
18
- query = sa.select(sa.func.count()).select_from(SystemSchema.job_queue)
18
+ query = sa.select(sa.func.count()).select_from(SystemSchema.workflow_queue)
19
19
  row = c.execute(query).fetchone()
20
20
  assert row is not None
21
21
  count = row[0]
@@ -77,6 +77,7 @@ def test_one_at_a_time(dbos: DBOS) -> None:
77
77
 
78
78
  queue = Queue("test_queue", 1)
79
79
  handle1 = queue.enqueue(workflow_one)
80
+ assert handle1.get_status().queue_name == "test_queue"
80
81
  handle2 = queue.enqueue(workflow_two)
81
82
 
82
83
  main_thread_event.wait()
@@ -120,6 +121,40 @@ def test_one_at_a_time_with_limiter(dbos: DBOS) -> None:
120
121
  assert handle2.get_result() == None
121
122
  assert flag
122
123
  assert wf_counter == 1
124
+ assert queue_entries_are_cleaned_up(dbos)
125
+
126
+
127
+ def test_queue_childwf(dbos: DBOS) -> None:
128
+ queue = Queue("child_queue", 3)
129
+
130
+ @DBOS.workflow()
131
+ def test_child_wf(val: str) -> str:
132
+ DBOS.recv("release", 30)
133
+ return val + "d"
134
+
135
+ @DBOS.workflow()
136
+ def test_workflow(var1: str, var2: str) -> str:
137
+ wfh1 = queue.enqueue(test_child_wf, var1)
138
+ wfh2 = queue.enqueue(test_child_wf, var2)
139
+ wfh3 = queue.enqueue(test_child_wf, var1)
140
+ wfh4 = queue.enqueue(test_child_wf, var2)
141
+
142
+ DBOS.sleep(1)
143
+ assert wfh4.get_status().status == "ENQUEUED"
144
+
145
+ DBOS.send(wfh1.get_workflow_id(), "go", "release")
146
+ DBOS.send(wfh2.get_workflow_id(), "go", "release")
147
+ DBOS.send(wfh3.get_workflow_id(), "go", "release")
148
+ DBOS.send(wfh4.get_workflow_id(), "go", "release")
149
+
150
+ return (
151
+ wfh1.get_result()
152
+ + wfh2.get_result()
153
+ + wfh3.get_result()
154
+ + wfh4.get_result()
155
+ )
156
+
157
+ assert test_workflow("a", "b") == "adbdadbd"
123
158
 
124
159
 
125
160
  def test_queue_step(dbos: DBOS) -> None:
@@ -192,16 +227,96 @@ def test_limiter(dbos: DBOS) -> None:
192
227
  # Verify that each "wave" of tasks started at the ~same time.
193
228
  for wave in range(num_waves):
194
229
  for i in range(wave * limit, (wave + 1) * limit - 1):
195
- assert times[i + 1] - times[i] < 0.1
230
+ assert times[i + 1] - times[i] < 0.2
231
+
232
+ # Verify that the gap between "waves" is ~equal to the period
233
+ for wave in range(num_waves - 1):
234
+ assert times[limit * (wave + 1)] - times[limit * wave] > period - 0.2
235
+ assert times[limit * (wave + 1)] - times[limit * wave] < period + 0.2
236
+
237
+ # Verify all workflows get the SUCCESS status eventually
238
+ dbos._sys_db.wait_for_buffer_flush()
239
+ for h in handles:
240
+ assert h.get_status().status == WorkflowStatusString.SUCCESS.value
241
+
242
+ # Verify all queue entries eventually get cleaned up.
243
+ assert queue_entries_are_cleaned_up(dbos)
244
+
245
+
246
+ def test_multiple_queues(dbos: DBOS) -> None:
247
+
248
+ wf_counter = 0
249
+ flag = False
250
+ workflow_event = threading.Event()
251
+ main_thread_event = threading.Event()
252
+
253
+ @DBOS.workflow()
254
+ def workflow_one() -> None:
255
+ nonlocal wf_counter
256
+ wf_counter += 1
257
+ main_thread_event.set()
258
+ workflow_event.wait()
259
+
260
+ @DBOS.workflow()
261
+ def workflow_two() -> None:
262
+ nonlocal flag
263
+ flag = True
264
+
265
+ concurrency_queue = Queue("test_concurrency_queue", 1)
266
+ handle1 = concurrency_queue.enqueue(workflow_one)
267
+ assert handle1.get_status().queue_name == "test_concurrency_queue"
268
+ handle2 = concurrency_queue.enqueue(workflow_two)
269
+
270
+ @DBOS.workflow()
271
+ def limited_workflow(var1: str, var2: str) -> float:
272
+ assert var1 == "abc" and var2 == "123"
273
+ return time.time()
274
+
275
+ limit = 5
276
+ period = 2
277
+ limiter_queue = Queue(
278
+ "test_limit_queue", limiter={"limit": limit, "period": period}
279
+ )
280
+
281
+ handles: list[WorkflowHandle[float]] = []
282
+ times: list[float] = []
283
+
284
+ # Launch a number of tasks equal to three times the limit.
285
+ # This should lead to three "waves" of the limit tasks being
286
+ # executed simultaneously, followed by a wait of the period,
287
+ # followed by the next wave.
288
+ num_waves = 3
289
+ for _ in range(limit * num_waves):
290
+ h = limiter_queue.enqueue(limited_workflow, "abc", "123")
291
+ handles.append(h)
292
+ for h in handles:
293
+ times.append(h.get_result())
294
+
295
+ # Verify that each "wave" of tasks started at the ~same time.
296
+ for wave in range(num_waves):
297
+ for i in range(wave * limit, (wave + 1) * limit - 1):
298
+ assert times[i + 1] - times[i] < 0.2
196
299
 
197
300
  # Verify that the gap between "waves" is ~equal to the period
198
301
  for wave in range(num_waves - 1):
199
- assert times[limit * wave] - times[limit * wave - 1] < period + 0.1
302
+ assert times[limit * (wave + 1)] - times[limit * wave] > period - 0.2
303
+ assert times[limit * (wave + 1)] - times[limit * wave] < period + 0.2
200
304
 
201
305
  # Verify all workflows get the SUCCESS status eventually
202
306
  dbos._sys_db.wait_for_buffer_flush()
203
307
  for h in handles:
204
308
  assert h.get_status().status == WorkflowStatusString.SUCCESS.value
205
309
 
310
+ # Verify that during all this time, the second task
311
+ # was not launched on the concurrency-limited queue.
312
+ # Then, finish the first task and verify the second
313
+ # task runs on schedule.
314
+ assert not flag
315
+ workflow_event.set()
316
+ assert handle1.get_result() == None
317
+ assert handle2.get_result() == None
318
+ assert flag
319
+ assert wf_counter == 1
320
+
206
321
  # Verify all queue entries eventually get cleaned up.
207
322
  assert queue_entries_are_cleaned_up(dbos)
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes