dbos 0.8.0a3__tar.gz → 0.8.0a7__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of dbos might be problematic. Click here for more details.

Files changed (75) hide show
  1. {dbos-0.8.0a3 → dbos-0.8.0a7}/PKG-INFO +1 -1
  2. {dbos-0.8.0a3 → dbos-0.8.0a7}/dbos/core.py +39 -7
  3. {dbos-0.8.0a3 → dbos-0.8.0a7}/dbos/dbos.py +44 -36
  4. {dbos-0.8.0a3 → dbos-0.8.0a7}/dbos/error.py +11 -0
  5. dbos-0.8.0a7/dbos/migrations/versions/d76646551a6b_job_queue_limiter.py +42 -0
  6. {dbos-0.8.0a3 → dbos-0.8.0a7}/dbos/queue.py +21 -8
  7. {dbos-0.8.0a3 → dbos-0.8.0a7}/dbos/registrations.py +3 -0
  8. {dbos-0.8.0a3 → dbos-0.8.0a7}/dbos/schemas/system_database.py +8 -0
  9. {dbos-0.8.0a3 → dbos-0.8.0a7}/dbos/system_database.py +145 -22
  10. {dbos-0.8.0a3 → dbos-0.8.0a7}/pyproject.toml +1 -1
  11. {dbos-0.8.0a3 → dbos-0.8.0a7}/tests/test_dbos.py +81 -0
  12. {dbos-0.8.0a3 → dbos-0.8.0a7}/tests/test_failures.py +35 -1
  13. dbos-0.8.0a7/tests/test_queue.py +207 -0
  14. dbos-0.8.0a3/tests/test_queue.py +0 -110
  15. {dbos-0.8.0a3 → dbos-0.8.0a7}/LICENSE +0 -0
  16. {dbos-0.8.0a3 → dbos-0.8.0a7}/README.md +0 -0
  17. {dbos-0.8.0a3 → dbos-0.8.0a7}/dbos/__init__.py +0 -0
  18. {dbos-0.8.0a3 → dbos-0.8.0a7}/dbos/admin_sever.py +0 -0
  19. {dbos-0.8.0a3 → dbos-0.8.0a7}/dbos/application_database.py +0 -0
  20. {dbos-0.8.0a3 → dbos-0.8.0a7}/dbos/cli.py +0 -0
  21. {dbos-0.8.0a3 → dbos-0.8.0a7}/dbos/context.py +0 -0
  22. {dbos-0.8.0a3 → dbos-0.8.0a7}/dbos/dbos-config.schema.json +0 -0
  23. {dbos-0.8.0a3 → dbos-0.8.0a7}/dbos/dbos_config.py +0 -0
  24. {dbos-0.8.0a3 → dbos-0.8.0a7}/dbos/decorators.py +0 -0
  25. {dbos-0.8.0a3 → dbos-0.8.0a7}/dbos/fastapi.py +0 -0
  26. {dbos-0.8.0a3 → dbos-0.8.0a7}/dbos/flask.py +0 -0
  27. {dbos-0.8.0a3 → dbos-0.8.0a7}/dbos/kafka.py +0 -0
  28. {dbos-0.8.0a3 → dbos-0.8.0a7}/dbos/kafka_message.py +0 -0
  29. {dbos-0.8.0a3 → dbos-0.8.0a7}/dbos/logger.py +0 -0
  30. {dbos-0.8.0a3 → dbos-0.8.0a7}/dbos/migrations/env.py +0 -0
  31. {dbos-0.8.0a3 → dbos-0.8.0a7}/dbos/migrations/script.py.mako +0 -0
  32. {dbos-0.8.0a3 → dbos-0.8.0a7}/dbos/migrations/versions/50f3227f0b4b_fix_job_queue.py +0 -0
  33. {dbos-0.8.0a3 → dbos-0.8.0a7}/dbos/migrations/versions/5c361fc04708_added_system_tables.py +0 -0
  34. {dbos-0.8.0a3 → dbos-0.8.0a7}/dbos/migrations/versions/a3b18ad34abe_added_triggers.py +0 -0
  35. {dbos-0.8.0a3 → dbos-0.8.0a7}/dbos/migrations/versions/eab0cc1d9a14_job_queue.py +0 -0
  36. {dbos-0.8.0a3 → dbos-0.8.0a7}/dbos/py.typed +0 -0
  37. {dbos-0.8.0a3 → dbos-0.8.0a7}/dbos/recovery.py +0 -0
  38. {dbos-0.8.0a3 → dbos-0.8.0a7}/dbos/request.py +0 -0
  39. {dbos-0.8.0a3 → dbos-0.8.0a7}/dbos/roles.py +0 -0
  40. {dbos-0.8.0a3 → dbos-0.8.0a7}/dbos/scheduler/croniter.py +0 -0
  41. {dbos-0.8.0a3 → dbos-0.8.0a7}/dbos/scheduler/scheduler.py +0 -0
  42. {dbos-0.8.0a3 → dbos-0.8.0a7}/dbos/schemas/__init__.py +0 -0
  43. {dbos-0.8.0a3 → dbos-0.8.0a7}/dbos/schemas/application_database.py +0 -0
  44. {dbos-0.8.0a3 → dbos-0.8.0a7}/dbos/templates/hello/README.md +0 -0
  45. {dbos-0.8.0a3 → dbos-0.8.0a7}/dbos/templates/hello/__package/__init__.py +0 -0
  46. {dbos-0.8.0a3 → dbos-0.8.0a7}/dbos/templates/hello/__package/main.py +0 -0
  47. {dbos-0.8.0a3 → dbos-0.8.0a7}/dbos/templates/hello/__package/schema.py +0 -0
  48. {dbos-0.8.0a3 → dbos-0.8.0a7}/dbos/templates/hello/alembic.ini +0 -0
  49. {dbos-0.8.0a3 → dbos-0.8.0a7}/dbos/templates/hello/dbos-config.yaml.dbos +0 -0
  50. {dbos-0.8.0a3 → dbos-0.8.0a7}/dbos/templates/hello/migrations/env.py.dbos +0 -0
  51. {dbos-0.8.0a3 → dbos-0.8.0a7}/dbos/templates/hello/migrations/script.py.mako +0 -0
  52. {dbos-0.8.0a3 → dbos-0.8.0a7}/dbos/templates/hello/migrations/versions/2024_07_31_180642_init.py +0 -0
  53. {dbos-0.8.0a3 → dbos-0.8.0a7}/dbos/templates/hello/start_postgres_docker.py +0 -0
  54. {dbos-0.8.0a3 → dbos-0.8.0a7}/dbos/tracer.py +0 -0
  55. {dbos-0.8.0a3 → dbos-0.8.0a7}/dbos/utils.py +0 -0
  56. {dbos-0.8.0a3 → dbos-0.8.0a7}/tests/__init__.py +0 -0
  57. {dbos-0.8.0a3 → dbos-0.8.0a7}/tests/atexit_no_ctor.py +0 -0
  58. {dbos-0.8.0a3 → dbos-0.8.0a7}/tests/atexit_no_launch.py +0 -0
  59. {dbos-0.8.0a3 → dbos-0.8.0a7}/tests/classdefs.py +0 -0
  60. {dbos-0.8.0a3 → dbos-0.8.0a7}/tests/conftest.py +0 -0
  61. {dbos-0.8.0a3 → dbos-0.8.0a7}/tests/more_classdefs.py +0 -0
  62. {dbos-0.8.0a3 → dbos-0.8.0a7}/tests/scheduler/test_croniter.py +0 -0
  63. {dbos-0.8.0a3 → dbos-0.8.0a7}/tests/scheduler/test_scheduler.py +0 -0
  64. {dbos-0.8.0a3 → dbos-0.8.0a7}/tests/test_admin_server.py +0 -0
  65. {dbos-0.8.0a3 → dbos-0.8.0a7}/tests/test_classdecorators.py +0 -0
  66. {dbos-0.8.0a3 → dbos-0.8.0a7}/tests/test_concurrency.py +0 -0
  67. {dbos-0.8.0a3 → dbos-0.8.0a7}/tests/test_config.py +0 -0
  68. {dbos-0.8.0a3 → dbos-0.8.0a7}/tests/test_fastapi.py +0 -0
  69. {dbos-0.8.0a3 → dbos-0.8.0a7}/tests/test_fastapi_roles.py +0 -0
  70. {dbos-0.8.0a3 → dbos-0.8.0a7}/tests/test_flask.py +0 -0
  71. {dbos-0.8.0a3 → dbos-0.8.0a7}/tests/test_kafka.py +0 -0
  72. {dbos-0.8.0a3 → dbos-0.8.0a7}/tests/test_package.py +0 -0
  73. {dbos-0.8.0a3 → dbos-0.8.0a7}/tests/test_schema_migration.py +0 -0
  74. {dbos-0.8.0a3 → dbos-0.8.0a7}/tests/test_singleton.py +0 -0
  75. {dbos-0.8.0a3 → dbos-0.8.0a7}/version/__init__.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: dbos
3
- Version: 0.8.0a3
3
+ Version: 0.8.0a7
4
4
  Summary: Ultra-lightweight durable execution in Python
5
5
  Author-Email: "DBOS, Inc." <contact@dbos.dev>
6
6
  License: MIT
@@ -38,6 +38,7 @@ from dbos.error import (
38
38
  DBOSWorkflowFunctionNotFoundError,
39
39
  )
40
40
  from dbos.registrations import (
41
+ DEFAULT_MAX_RECOVERY_ATTEMPTS,
41
42
  get_config_name,
42
43
  get_dbos_class_name,
43
44
  get_dbos_func_name,
@@ -118,6 +119,7 @@ def _init_workflow(
118
119
  config_name: Optional[str],
119
120
  temp_wf_type: Optional[str],
120
121
  queue: Optional[str] = None,
122
+ max_recovery_attempts: int = DEFAULT_MAX_RECOVERY_ATTEMPTS,
121
123
  ) -> WorkflowStatusInternal:
122
124
  wfid = (
123
125
  ctx.workflow_id
@@ -157,7 +159,9 @@ def _init_workflow(
157
159
  # Synchronously record the status and inputs for workflows and single-step workflows
158
160
  # We also have to do this for single-step workflows because of the foreign key constraint on the operation outputs table
159
161
  # TODO: Make this transactional (and with the queue step below)
160
- dbos._sys_db.update_workflow_status(status, False, ctx.in_recovery)
162
+ dbos._sys_db.update_workflow_status(
163
+ status, False, ctx.in_recovery, max_recovery_attempts=max_recovery_attempts
164
+ )
161
165
  dbos._sys_db.update_workflow_inputs(wfid, utils.serialize_args(inputs))
162
166
  else:
163
167
  # Buffer the inputs for single-transaction workflows, but don't buffer the status
@@ -181,7 +185,8 @@ def _execute_workflow(
181
185
  status["status"] = "SUCCESS"
182
186
  status["output"] = utils.serialize(output)
183
187
  if status["queue_name"] is not None:
184
- dbos._sys_db.remove_from_queue(status["workflow_uuid"])
188
+ queue = dbos._registry.queue_info_map[status["queue_name"]]
189
+ dbos._sys_db.remove_from_queue(status["workflow_uuid"], queue)
185
190
  dbos._sys_db.buffer_workflow_status(status)
186
191
  except DBOSWorkflowConflictIDError:
187
192
  # Retrieve the workflow handle and wait for the result.
@@ -195,7 +200,8 @@ def _execute_workflow(
195
200
  status["status"] = "ERROR"
196
201
  status["error"] = utils.serialize_exception(error)
197
202
  if status["queue_name"] is not None:
198
- dbos._sys_db.remove_from_queue(status["workflow_uuid"])
203
+ queue = dbos._registry.queue_info_map[status["queue_name"]]
204
+ dbos._sys_db.remove_from_queue(status["workflow_uuid"], queue)
199
205
  dbos._sys_db.update_workflow_status(status)
200
206
  raise
201
207
 
@@ -218,7 +224,7 @@ def _execute_workflow_wthread(
218
224
  with EnterDBOSWorkflow(attributes):
219
225
  try:
220
226
  return _execute_workflow(dbos, status, func, *args, **kwargs)
221
- except Exception as e:
227
+ except Exception:
222
228
  dbos.logger.error(
223
229
  f"Exception encountered in asynchronous workflow: {traceback.format_exc()}"
224
230
  )
@@ -289,10 +295,15 @@ def _execute_workflow_id(dbos: "DBOS", workflow_id: str) -> "WorkflowHandle[Any]
289
295
  )
290
296
 
291
297
 
292
- def _workflow_wrapper(dbosreg: "_DBOSRegistry", func: F) -> F:
298
+ def _workflow_wrapper(
299
+ dbosreg: "_DBOSRegistry",
300
+ func: F,
301
+ max_recovery_attempts: int = DEFAULT_MAX_RECOVERY_ATTEMPTS,
302
+ ) -> F:
293
303
  func.__orig_func = func # type: ignore
294
304
 
295
305
  fi = get_or_create_func_info(func)
306
+ fi.max_recovery_attempts = max_recovery_attempts
296
307
 
297
308
  @wraps(func)
298
309
  def wrapper(*args: Any, **kwargs: Any) -> Any:
@@ -325,17 +336,21 @@ def _workflow_wrapper(dbosreg: "_DBOSRegistry", func: F) -> F:
325
336
  class_name=get_dbos_class_name(fi, func, args),
326
337
  config_name=get_config_name(fi, func, args),
327
338
  temp_wf_type=get_temp_workflow_type(func),
339
+ max_recovery_attempts=max_recovery_attempts,
328
340
  )
329
341
 
342
+ dbos.logger.debug(
343
+ f"Running workflow, id: {ctx.workflow_id}, name: {get_dbos_func_name(func)}"
344
+ )
330
345
  return _execute_workflow(dbos, status, func, *args, **kwargs)
331
346
 
332
347
  wrapped_func = cast(F, wrapper)
333
348
  return wrapped_func
334
349
 
335
350
 
336
- def _workflow(reg: "_DBOSRegistry") -> Callable[[F], F]:
351
+ def _workflow(reg: "_DBOSRegistry", max_recovery_attempts: int) -> Callable[[F], F]:
337
352
  def _workflow_decorator(func: F) -> F:
338
- wrapped_func = _workflow_wrapper(reg, func)
353
+ wrapped_func = _workflow_wrapper(reg, func, max_recovery_attempts)
339
354
  reg.register_wf_function(func.__qualname__, wrapped_func)
340
355
  return wrapped_func
341
356
 
@@ -401,6 +416,7 @@ def _start_workflow(
401
416
  config_name=get_config_name(fi, func, gin_args),
402
417
  temp_wf_type=get_temp_workflow_type(func),
403
418
  queue=queue_name,
419
+ max_recovery_attempts=fi.max_recovery_attempts,
404
420
  )
405
421
 
406
422
  if not execute_workflow:
@@ -477,6 +493,9 @@ def _transaction(
477
493
  )
478
494
  )
479
495
  if recorded_output:
496
+ dbos.logger.debug(
497
+ f"Replaying transaction, id: {ctx.function_id}, name: {attributes['name']}"
498
+ )
480
499
  if recorded_output["error"]:
481
500
  deserialized_error = (
482
501
  utils.deserialize_exception(
@@ -493,6 +512,11 @@ def _transaction(
493
512
  raise Exception(
494
513
  "Output and error are both None"
495
514
  )
515
+ else:
516
+ dbos.logger.debug(
517
+ f"Running transaction, id: {ctx.function_id}, name: {attributes['name']}"
518
+ )
519
+
496
520
  output = func(*args, **kwargs)
497
521
  txn_output["output"] = utils.serialize(output)
498
522
  assert (
@@ -590,6 +614,9 @@ def _step(
590
614
  ctx.workflow_id, ctx.function_id
591
615
  )
592
616
  if recorded_output:
617
+ dbos.logger.debug(
618
+ f"Replaying step, id: {ctx.function_id}, name: {attributes['name']}"
619
+ )
593
620
  if recorded_output["error"] is not None:
594
621
  deserialized_error = utils.deserialize_exception(
595
622
  recorded_output["error"]
@@ -599,6 +626,11 @@ def _step(
599
626
  return utils.deserialize(recorded_output["output"])
600
627
  else:
601
628
  raise Exception("Output and error are both None")
629
+ else:
630
+ dbos.logger.debug(
631
+ f"Running step, id: {ctx.function_id}, name: {attributes['name']}"
632
+ )
633
+
602
634
  output = None
603
635
  error = None
604
636
  local_max_attempts = max_attempts if retries_allowed else 1
@@ -5,6 +5,7 @@ import json
5
5
  import os
6
6
  import sys
7
7
  import threading
8
+ import traceback
8
9
  from concurrent.futures import ThreadPoolExecutor
9
10
  from dataclasses import dataclass
10
11
  from logging import Logger
@@ -42,6 +43,7 @@ from dbos.decorators import classproperty
42
43
  from dbos.queue import Queue, queue_thread
43
44
  from dbos.recovery import _recover_pending_workflows, _startup_recovery_thread
44
45
  from dbos.registrations import (
46
+ DEFAULT_MAX_RECOVERY_ATTEMPTS,
45
47
  DBOSClassInfo,
46
48
  get_or_create_class_info,
47
49
  set_dbos_func_name,
@@ -338,43 +340,47 @@ class DBOS:
338
340
  _dbos_global_instance._launch()
339
341
 
340
342
  def _launch(self) -> None:
341
- if self._launched:
342
- dbos_logger.warning(f"DBOS was already launched")
343
- return
344
- self._launched = True
345
- self._executor_field = ThreadPoolExecutor(max_workers=64)
346
- self._sys_db_field = SystemDatabase(self.config)
347
- self._app_db_field = ApplicationDatabase(self.config)
348
- self._admin_server_field = AdminServer(dbos=self)
349
-
350
- if not os.environ.get("DBOS__VMID"):
351
- workflow_ids = self._sys_db.get_pending_workflows("local")
352
- self._executor.submit(_startup_recovery_thread, self, workflow_ids)
353
-
354
- # Listen to notifications
355
- self._executor.submit(self._sys_db._notification_listener)
356
-
357
- # Start flush workflow buffers thread
358
- self._executor.submit(self._sys_db.flush_workflow_buffers)
359
-
360
- # Start the queue thread
361
- evt = threading.Event()
362
- self.stop_events.append(evt)
363
- self._executor.submit(queue_thread, evt, self)
364
-
365
- # Grab any pollers that were deferred and start them
366
- for evt, func, args, kwargs in self._registry.pollers:
343
+ try:
344
+ if self._launched:
345
+ dbos_logger.warning(f"DBOS was already launched")
346
+ return
347
+ self._launched = True
348
+ self._executor_field = ThreadPoolExecutor(max_workers=64)
349
+ self._sys_db_field = SystemDatabase(self.config)
350
+ self._app_db_field = ApplicationDatabase(self.config)
351
+ self._admin_server_field = AdminServer(dbos=self)
352
+
353
+ if not os.environ.get("DBOS__VMID"):
354
+ workflow_ids = self._sys_db.get_pending_workflows("local")
355
+ self._executor.submit(_startup_recovery_thread, self, workflow_ids)
356
+
357
+ # Listen to notifications
358
+ self._executor.submit(self._sys_db._notification_listener)
359
+
360
+ # Start flush workflow buffers thread
361
+ self._executor.submit(self._sys_db.flush_workflow_buffers)
362
+
363
+ # Start the queue thread
364
+ evt = threading.Event()
367
365
  self.stop_events.append(evt)
368
- self._executor.submit(func, *args, **kwargs)
369
- self._registry.pollers = []
366
+ self._executor.submit(queue_thread, evt, self)
370
367
 
371
- dbos_logger.info("DBOS launched")
368
+ # Grab any pollers that were deferred and start them
369
+ for evt, func, args, kwargs in self._registry.pollers:
370
+ self.stop_events.append(evt)
371
+ self._executor.submit(func, *args, **kwargs)
372
+ self._registry.pollers = []
372
373
 
373
- # Flush handlers and add OTLP to all loggers if enabled
374
- # to enable their export in DBOS Cloud
375
- for handler in dbos_logger.handlers:
376
- handler.flush()
377
- add_otlp_to_all_loggers()
374
+ dbos_logger.info("DBOS launched")
375
+
376
+ # Flush handlers and add OTLP to all loggers if enabled
377
+ # to enable their export in DBOS Cloud
378
+ for handler in dbos_logger.handlers:
379
+ handler.flush()
380
+ add_otlp_to_all_loggers()
381
+ except Exception:
382
+ dbos_logger.error(f"DBOS failed to launch: {traceback.format_exc()}")
383
+ raise
378
384
 
379
385
  def _destroy(self) -> None:
380
386
  self._initialized = False
@@ -401,9 +407,11 @@ class DBOS:
401
407
 
402
408
  # Decorators for DBOS functionality
403
409
  @classmethod
404
- def workflow(cls) -> Callable[[F], F]:
410
+ def workflow(
411
+ cls, *, max_recovery_attempts: int = DEFAULT_MAX_RECOVERY_ATTEMPTS
412
+ ) -> Callable[[F], F]:
405
413
  """Decorate a function for use as a DBOS workflow."""
406
- return _workflow(_get_or_create_dbos_registry())
414
+ return _workflow(_get_or_create_dbos_registry(), max_recovery_attempts)
407
415
 
408
416
  @classmethod
409
417
  def transaction(
@@ -32,6 +32,7 @@ class DBOSErrorCode(Enum):
32
32
  InitializationError = 3
33
33
  WorkflowFunctionNotFound = 4
34
34
  NonExistentWorkflowError = 5
35
+ DeadLetterQueueError = 6
35
36
  MaxStepRetriesExceeded = 7
36
37
  NotAuthorized = 8
37
38
 
@@ -86,6 +87,16 @@ class DBOSNonExistentWorkflowError(DBOSException):
86
87
  )
87
88
 
88
89
 
90
+ class DBOSDeadLetterQueueError(DBOSException):
91
+ """Exception raised when a workflow database record does not exist for a given ID."""
92
+
93
+ def __init__(self, wf_id: str, max_retries: int):
94
+ super().__init__(
95
+ f"Workflow {wf_id} has been moved to the dead-letter queue after exceeding the maximum of ${max_retries} retries",
96
+ dbos_error_code=DBOSErrorCode.DeadLetterQueueError.value,
97
+ )
98
+
99
+
89
100
  class DBOSNotAuthorizedError(DBOSException):
90
101
  """Exception raised by DBOS role-based security when the user is not authorized to access a function."""
91
102
 
@@ -0,0 +1,42 @@
1
+ """job_queue_limiter
2
+
3
+ Revision ID: d76646551a6b
4
+ Revises: 50f3227f0b4b
5
+ Create Date: 2024-09-25 14:48:10.218015
6
+
7
+ """
8
+
9
+ from typing import Sequence, Union
10
+
11
+ import sqlalchemy as sa
12
+ from alembic import op
13
+
14
+ # revision identifiers, used by Alembic.
15
+ revision: str = "d76646551a6b"
16
+ down_revision: Union[str, None] = "50f3227f0b4b"
17
+ branch_labels: Union[str, Sequence[str], None] = None
18
+ depends_on: Union[str, Sequence[str], None] = None
19
+
20
+
21
+ def upgrade() -> None:
22
+ op.add_column(
23
+ "job_queue",
24
+ sa.Column(
25
+ "started_at_epoch_ms",
26
+ sa.BigInteger(),
27
+ ),
28
+ schema="dbos",
29
+ )
30
+ op.add_column(
31
+ "job_queue",
32
+ sa.Column(
33
+ "completed_at_epoch_ms",
34
+ sa.BigInteger(),
35
+ ),
36
+ schema="dbos",
37
+ )
38
+
39
+
40
+ def downgrade() -> None:
41
+ op.drop_column("job_queue", "started_at_epoch_ms", schema="dbos")
42
+ op.drop_column("job_queue", "completed_at_epoch_ms", schema="dbos")
@@ -1,7 +1,6 @@
1
1
  import threading
2
- import time
3
2
  import traceback
4
- from typing import TYPE_CHECKING, Optional
3
+ from typing import TYPE_CHECKING, Optional, TypedDict
5
4
 
6
5
  from dbos.core import P, R, _execute_workflow_id, _start_workflow
7
6
 
@@ -9,10 +8,25 @@ if TYPE_CHECKING:
9
8
  from dbos.dbos import DBOS, Workflow, WorkflowHandle
10
9
 
11
10
 
11
+ # Limit the maximum number of functions from this queue
12
+ # that can be started in a given period. If the limit is 5
13
+ # and the period is 10, no more than 5 functions can be
14
+ # started per 10 seconds.
15
+ class Limiter(TypedDict):
16
+ limit: int
17
+ period: float
18
+
19
+
12
20
  class Queue:
13
- def __init__(self, name: str, concurrency: Optional[int] = None) -> None:
21
+ def __init__(
22
+ self,
23
+ name: str,
24
+ concurrency: Optional[int] = None,
25
+ limiter: Optional[Limiter] = None,
26
+ ) -> None:
14
27
  self.name = name
15
28
  self.concurrency = concurrency
29
+ self.limiter = limiter
16
30
  from dbos.dbos import _get_or_create_dbos_registry
17
31
 
18
32
  registry = _get_or_create_dbos_registry()
@@ -29,12 +43,11 @@ class Queue:
29
43
 
30
44
  def queue_thread(stop_event: threading.Event, dbos: "DBOS") -> None:
31
45
  while not stop_event.is_set():
32
- time.sleep(1)
33
- for queue_name, queue in dbos._registry.queue_info_map.items():
46
+ if stop_event.wait(timeout=1):
47
+ return
48
+ for _, queue in dbos._registry.queue_info_map.items():
34
49
  try:
35
- wf_ids = dbos._sys_db.start_queued_workflows(
36
- queue_name, queue.concurrency
37
- )
50
+ wf_ids = dbos._sys_db.start_queued_workflows(queue)
38
51
  for id in wf_ids:
39
52
  _execute_workflow_id(dbos, id)
40
53
  except Exception:
@@ -3,6 +3,8 @@ from enum import Enum
3
3
  from types import FunctionType
4
4
  from typing import Any, Callable, List, Literal, Optional, Tuple, Type, cast
5
5
 
6
+ DEFAULT_MAX_RECOVERY_ATTEMPTS = 50
7
+
6
8
 
7
9
  def get_dbos_func_name(f: Any) -> str:
8
10
  if hasattr(f, "dbos_function_name"):
@@ -47,6 +49,7 @@ class DBOSFuncInfo:
47
49
  self.class_info: Optional[DBOSClassInfo] = None
48
50
  self.func_type: DBOSFuncType = DBOSFuncType.Unknown
49
51
  self.required_roles: Optional[List[str]] = None
52
+ self.max_recovery_attempts = DEFAULT_MAX_RECOVERY_ATTEMPTS
50
53
 
51
54
 
52
55
  def get_or_create_class_info(cls: Type[Any]) -> DBOSClassInfo:
@@ -161,4 +161,12 @@ class SystemSchema:
161
161
  nullable=False,
162
162
  server_default=text("(EXTRACT(epoch FROM now()) * 1000::numeric)::bigint"),
163
163
  ),
164
+ Column(
165
+ "started_at_epoch_ms",
166
+ BigInteger(),
167
+ ),
168
+ Column(
169
+ "completed_at_epoch_ms",
170
+ BigInteger(),
171
+ ),
164
172
  )