dbos 2.4.0a3__py3-none-any.whl → 2.6.0a8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of dbos might be problematic. Click here for more details.

dbos/_queue.py CHANGED
@@ -44,6 +44,7 @@ class Queue:
44
44
  worker_concurrency: Optional[int] = None,
45
45
  priority_enabled: bool = False,
46
46
  partition_queue: bool = False,
47
+ polling_interval_sec: float = 1.0,
47
48
  ) -> None:
48
49
  if (
49
50
  worker_concurrency is not None
@@ -53,12 +54,15 @@ class Queue:
53
54
  raise ValueError(
54
55
  "worker_concurrency must be less than or equal to concurrency"
55
56
  )
57
+ if polling_interval_sec <= 0.0:
58
+ raise ValueError("polling_interval_sec must be positive")
56
59
  self.name = name
57
60
  self.concurrency = concurrency
58
61
  self.worker_concurrency = worker_concurrency
59
62
  self.limiter = limiter
60
63
  self.priority_enabled = priority_enabled
61
64
  self.partition_queue = partition_queue
65
+ self.polling_interval_sec = polling_interval_sec
62
66
  from ._dbos import _get_or_create_dbos_registry
63
67
 
64
68
  registry = _get_or_create_dbos_registry()
@@ -108,50 +112,103 @@ class Queue:
108
112
  return await start_workflow_async(dbos, func, self.name, False, *args, **kwargs)
109
113
 
110
114
 
111
- def queue_thread(stop_event: threading.Event, dbos: "DBOS") -> None:
112
- polling_interval = 1.0
113
- min_polling_interval = 1.0
114
- max_polling_interval = 120.0
115
+ def queue_worker_thread(
116
+ stop_event: threading.Event, dbos: "DBOS", queue: Queue
117
+ ) -> None:
118
+ """Worker thread for processing a single queue."""
119
+ polling_interval = queue.polling_interval_sec
120
+ min_polling_interval = queue.polling_interval_sec
121
+ max_polling_interval = max(queue.polling_interval_sec, 120.0)
122
+
115
123
  while not stop_event.is_set():
116
124
  # Wait for the polling interval with jitter
117
125
  if stop_event.wait(timeout=polling_interval * random.uniform(0.95, 1.05)):
118
126
  return
119
- queues = dict(dbos._registry.queue_info_map)
120
- for _, queue in queues.items():
121
- try:
122
- if queue.partition_queue:
123
- dequeued_workflows = []
124
- queue_partition_keys = dbos._sys_db.get_queue_partitions(queue.name)
125
- for key in queue_partition_keys:
126
- dequeued_workflows += dbos._sys_db.start_queued_workflows(
127
- queue,
128
- GlobalParams.executor_id,
129
- GlobalParams.app_version,
130
- key,
131
- )
132
- else:
127
+
128
+ try:
129
+ if queue.partition_queue:
130
+ queue_partition_keys = dbos._sys_db.get_queue_partitions(queue.name)
131
+ for key in queue_partition_keys:
133
132
  dequeued_workflows = dbos._sys_db.start_queued_workflows(
134
- queue, GlobalParams.executor_id, GlobalParams.app_version, None
133
+ queue,
134
+ GlobalParams.executor_id,
135
+ GlobalParams.app_version,
136
+ key,
135
137
  )
138
+ for id in dequeued_workflows:
139
+ execute_workflow_by_id(dbos, id)
140
+ else:
141
+ dequeued_workflows = dbos._sys_db.start_queued_workflows(
142
+ queue, GlobalParams.executor_id, GlobalParams.app_version, None
143
+ )
136
144
  for id in dequeued_workflows:
137
145
  execute_workflow_by_id(dbos, id)
138
- except OperationalError as e:
139
- if isinstance(
140
- e.orig, (errors.SerializationFailure, errors.LockNotAvailable)
141
- ):
142
- # If a serialization error is encountered, increase the polling interval
143
- polling_interval = min(
144
- max_polling_interval,
145
- polling_interval * 2.0,
146
- )
147
- dbos.logger.warning(
148
- f"Contention detected in queue thread for {queue.name}. Increasing polling interval to {polling_interval:.2f}."
149
- )
150
- else:
151
- dbos.logger.warning(f"Exception encountered in queue thread: {e}")
152
- except Exception as e:
153
- if not stop_event.is_set():
154
- # Only print the error if the thread is not stopping
155
- dbos.logger.warning(f"Exception encountered in queue thread: {e}")
146
+ except OperationalError as e:
147
+ if isinstance(
148
+ e.orig, (errors.SerializationFailure, errors.LockNotAvailable)
149
+ ):
150
+ # If a serialization error is encountered, increase the polling interval
151
+ polling_interval = min(
152
+ max_polling_interval,
153
+ polling_interval * 2.0,
154
+ )
155
+ dbos.logger.warning(
156
+ f"Contention detected in queue thread for {queue.name}. Increasing polling interval to {polling_interval:.2f}."
157
+ )
158
+ else:
159
+ dbos.logger.warning(
160
+ f"Exception encountered in queue thread for {queue.name}: {e}"
161
+ )
162
+ except Exception as e:
163
+ if not stop_event.is_set():
164
+ # Only print the error if the thread is not stopping
165
+ dbos.logger.warning(
166
+ f"Exception encountered in queue thread for {queue.name}: {e}"
167
+ )
168
+
156
169
  # Attempt to scale back the polling interval on each iteration
157
170
  polling_interval = max(min_polling_interval, polling_interval * 0.9)
171
+
172
+
173
+ def queue_thread(stop_event: threading.Event, dbos: "DBOS") -> None:
174
+ """Main queue manager thread that spawns and monitors worker threads for each queue."""
175
+ queue_threads: dict[str, threading.Thread] = {}
176
+ check_interval = 1.0 # Check for new queues every second
177
+
178
+ while not stop_event.is_set():
179
+ # Check for new queues
180
+ current_queues = dict(dbos._registry.queue_info_map)
181
+
182
+ # Start threads for new queues
183
+ for queue_name, queue in current_queues.items():
184
+ if (
185
+ queue_name not in queue_threads
186
+ or not queue_threads[queue_name].is_alive()
187
+ ):
188
+ thread = threading.Thread(
189
+ target=queue_worker_thread,
190
+ args=(stop_event, dbos, queue),
191
+ name=f"queue-worker-{queue_name}",
192
+ daemon=True,
193
+ )
194
+ thread.start()
195
+ queue_threads[queue_name] = thread
196
+ dbos.logger.debug(f"Started worker thread for queue: {queue_name}")
197
+
198
+ # Wait for the check interval or stop event
199
+ if stop_event.wait(timeout=check_interval):
200
+ break
201
+
202
+ # Join all queue worker threads
203
+ dbos.logger.info("Stopping queue manager, joining all worker threads...")
204
+ for queue_name, thread in queue_threads.items():
205
+ if thread.is_alive():
206
+ thread.join(timeout=10.0) # Give each thread 10 seconds to finish
207
+ if thread.is_alive():
208
+ dbos.logger.debug(
209
+ f"Queue worker thread for {queue_name} did not stop in time"
210
+ )
211
+ else:
212
+ dbos.logger.debug(
213
+ f"Queue worker thread for {queue_name} stopped successfully"
214
+ )
@@ -35,6 +35,7 @@ class SystemSchema:
35
35
  cls.notifications.schema = schema_name
36
36
  cls.workflow_events.schema = schema_name
37
37
  cls.streams.schema = schema_name
38
+ cls.workflow_events_history.schema = schema_name
38
39
 
39
40
  workflow_status = Table(
40
41
  "workflow_status",
@@ -154,6 +155,24 @@ class SystemSchema:
154
155
  PrimaryKeyConstraint("workflow_uuid", "key"),
155
156
  )
156
157
 
158
+ # This is an immutable version of workflow_events. Two tables are needed for backwards compatibility.
159
+ workflow_events_history = Table(
160
+ "workflow_events_history",
161
+ metadata_obj,
162
+ Column(
163
+ "workflow_uuid",
164
+ Text,
165
+ ForeignKey(
166
+ "workflow_status.workflow_uuid", onupdate="CASCADE", ondelete="CASCADE"
167
+ ),
168
+ nullable=False,
169
+ ),
170
+ Column("key", Text, nullable=False),
171
+ Column("value", Text, nullable=False),
172
+ Column("function_id", Integer, nullable=False, server_default=text("'0'::int")),
173
+ PrimaryKeyConstraint("workflow_uuid", "key", "function_id"),
174
+ )
175
+
157
176
  streams = Table(
158
177
  "streams",
159
178
  metadata_obj,
@@ -168,5 +187,6 @@ class SystemSchema:
168
187
  Column("key", Text, nullable=False),
169
188
  Column("value", Text, nullable=False),
170
189
  Column("offset", Integer, nullable=False),
190
+ Column("function_id", Integer, nullable=False, server_default=text("'0'::int")),
171
191
  PrimaryKeyConstraint("workflow_uuid", "key", "offset"),
172
192
  )