dbos 2.2.0a3__tar.gz → 2.3.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (99) hide show
  1. {dbos-2.2.0a3 → dbos-2.3.0}/PKG-INFO +1 -1
  2. {dbos-2.2.0a3 → dbos-2.3.0}/dbos/_client.py +12 -2
  3. {dbos-2.2.0a3 → dbos-2.3.0}/dbos/_core.py +0 -21
  4. {dbos-2.2.0a3 → dbos-2.3.0}/dbos/_dbos_config.py +1 -2
  5. {dbos-2.2.0a3 → dbos-2.3.0}/dbos/_kafka.py +6 -4
  6. {dbos-2.2.0a3 → dbos-2.3.0}/dbos/_logger.py +23 -16
  7. {dbos-2.2.0a3 → dbos-2.3.0}/dbos/_scheduler.py +5 -2
  8. {dbos-2.2.0a3 → dbos-2.3.0}/dbos/_serialization.py +7 -3
  9. {dbos-2.2.0a3 → dbos-2.3.0}/dbos/_sys_db_postgres.py +1 -1
  10. {dbos-2.2.0a3 → dbos-2.3.0}/dbos/_tracer.py +24 -19
  11. {dbos-2.2.0a3 → dbos-2.3.0}/dbos/cli/cli.py +1 -15
  12. {dbos-2.2.0a3 → dbos-2.3.0}/pyproject.toml +1 -1
  13. {dbos-2.2.0a3 → dbos-2.3.0}/tests/test_client.py +32 -0
  14. {dbos-2.2.0a3 → dbos-2.3.0}/tests/test_config.py +29 -35
  15. {dbos-2.2.0a3 → dbos-2.3.0}/tests/test_dbos.py +60 -0
  16. {dbos-2.2.0a3 → dbos-2.3.0}/tests/test_failures.py +14 -1
  17. {dbos-2.2.0a3 → dbos-2.3.0}/tests/test_kafka.py +50 -17
  18. {dbos-2.2.0a3 → dbos-2.3.0}/tests/test_queue.py +1 -1
  19. {dbos-2.2.0a3 → dbos-2.3.0}/tests/test_scheduler.py +13 -0
  20. {dbos-2.2.0a3 → dbos-2.3.0}/tests/test_spans.py +1 -5
  21. {dbos-2.2.0a3 → dbos-2.3.0}/LICENSE +0 -0
  22. {dbos-2.2.0a3 → dbos-2.3.0}/README.md +0 -0
  23. {dbos-2.2.0a3 → dbos-2.3.0}/dbos/__init__.py +0 -0
  24. {dbos-2.2.0a3 → dbos-2.3.0}/dbos/__main__.py +0 -0
  25. {dbos-2.2.0a3 → dbos-2.3.0}/dbos/_admin_server.py +0 -0
  26. {dbos-2.2.0a3 → dbos-2.3.0}/dbos/_app_db.py +0 -0
  27. {dbos-2.2.0a3 → dbos-2.3.0}/dbos/_classproperty.py +0 -0
  28. {dbos-2.2.0a3 → dbos-2.3.0}/dbos/_conductor/conductor.py +0 -0
  29. {dbos-2.2.0a3 → dbos-2.3.0}/dbos/_conductor/protocol.py +0 -0
  30. {dbos-2.2.0a3 → dbos-2.3.0}/dbos/_context.py +0 -0
  31. {dbos-2.2.0a3 → dbos-2.3.0}/dbos/_croniter.py +0 -0
  32. {dbos-2.2.0a3 → dbos-2.3.0}/dbos/_dbos.py +0 -0
  33. {dbos-2.2.0a3 → dbos-2.3.0}/dbos/_debouncer.py +0 -0
  34. {dbos-2.2.0a3 → dbos-2.3.0}/dbos/_debug.py +0 -0
  35. {dbos-2.2.0a3 → dbos-2.3.0}/dbos/_docker_pg_helper.py +0 -0
  36. {dbos-2.2.0a3 → dbos-2.3.0}/dbos/_error.py +0 -0
  37. {dbos-2.2.0a3 → dbos-2.3.0}/dbos/_event_loop.py +0 -0
  38. {dbos-2.2.0a3 → dbos-2.3.0}/dbos/_fastapi.py +0 -0
  39. {dbos-2.2.0a3 → dbos-2.3.0}/dbos/_flask.py +0 -0
  40. {dbos-2.2.0a3 → dbos-2.3.0}/dbos/_kafka_message.py +0 -0
  41. {dbos-2.2.0a3 → dbos-2.3.0}/dbos/_migration.py +0 -0
  42. {dbos-2.2.0a3 → dbos-2.3.0}/dbos/_outcome.py +0 -0
  43. {dbos-2.2.0a3 → dbos-2.3.0}/dbos/_queue.py +0 -0
  44. {dbos-2.2.0a3 → dbos-2.3.0}/dbos/_recovery.py +0 -0
  45. {dbos-2.2.0a3 → dbos-2.3.0}/dbos/_registrations.py +0 -0
  46. {dbos-2.2.0a3 → dbos-2.3.0}/dbos/_roles.py +0 -0
  47. {dbos-2.2.0a3 → dbos-2.3.0}/dbos/_schemas/__init__.py +0 -0
  48. {dbos-2.2.0a3 → dbos-2.3.0}/dbos/_schemas/application_database.py +0 -0
  49. {dbos-2.2.0a3 → dbos-2.3.0}/dbos/_schemas/system_database.py +0 -0
  50. {dbos-2.2.0a3 → dbos-2.3.0}/dbos/_sys_db.py +0 -0
  51. {dbos-2.2.0a3 → dbos-2.3.0}/dbos/_sys_db_sqlite.py +0 -0
  52. {dbos-2.2.0a3 → dbos-2.3.0}/dbos/_templates/dbos-db-starter/README.md +0 -0
  53. {dbos-2.2.0a3 → dbos-2.3.0}/dbos/_templates/dbos-db-starter/__package/__init__.py +0 -0
  54. {dbos-2.2.0a3 → dbos-2.3.0}/dbos/_templates/dbos-db-starter/__package/main.py.dbos +0 -0
  55. {dbos-2.2.0a3 → dbos-2.3.0}/dbos/_templates/dbos-db-starter/__package/schema.py +0 -0
  56. {dbos-2.2.0a3 → dbos-2.3.0}/dbos/_templates/dbos-db-starter/dbos-config.yaml.dbos +0 -0
  57. {dbos-2.2.0a3 → dbos-2.3.0}/dbos/_templates/dbos-db-starter/migrations/create_table.py.dbos +0 -0
  58. {dbos-2.2.0a3 → dbos-2.3.0}/dbos/_templates/dbos-db-starter/start_postgres_docker.py +0 -0
  59. {dbos-2.2.0a3 → dbos-2.3.0}/dbos/_utils.py +0 -0
  60. {dbos-2.2.0a3 → dbos-2.3.0}/dbos/_workflow_commands.py +0 -0
  61. {dbos-2.2.0a3 → dbos-2.3.0}/dbos/cli/_github_init.py +0 -0
  62. {dbos-2.2.0a3 → dbos-2.3.0}/dbos/cli/_template_init.py +0 -0
  63. {dbos-2.2.0a3 → dbos-2.3.0}/dbos/cli/migration.py +0 -0
  64. {dbos-2.2.0a3 → dbos-2.3.0}/dbos/dbos-config.schema.json +0 -0
  65. {dbos-2.2.0a3 → dbos-2.3.0}/dbos/py.typed +0 -0
  66. {dbos-2.2.0a3 → dbos-2.3.0}/tests/__init__.py +0 -0
  67. {dbos-2.2.0a3 → dbos-2.3.0}/tests/atexit_no_ctor.py +0 -0
  68. {dbos-2.2.0a3 → dbos-2.3.0}/tests/atexit_no_launch.py +0 -0
  69. {dbos-2.2.0a3 → dbos-2.3.0}/tests/classdefs.py +0 -0
  70. {dbos-2.2.0a3 → dbos-2.3.0}/tests/client_collateral.py +0 -0
  71. {dbos-2.2.0a3 → dbos-2.3.0}/tests/client_worker.py +0 -0
  72. {dbos-2.2.0a3 → dbos-2.3.0}/tests/conftest.py +0 -0
  73. {dbos-2.2.0a3 → dbos-2.3.0}/tests/dupname_classdefs1.py +0 -0
  74. {dbos-2.2.0a3 → dbos-2.3.0}/tests/dupname_classdefsa.py +0 -0
  75. {dbos-2.2.0a3 → dbos-2.3.0}/tests/more_classdefs.py +0 -0
  76. {dbos-2.2.0a3 → dbos-2.3.0}/tests/queuedworkflow.py +0 -0
  77. {dbos-2.2.0a3 → dbos-2.3.0}/tests/script_without_fastapi.py +0 -0
  78. {dbos-2.2.0a3 → dbos-2.3.0}/tests/test_admin_server.py +0 -0
  79. {dbos-2.2.0a3 → dbos-2.3.0}/tests/test_async.py +0 -0
  80. {dbos-2.2.0a3 → dbos-2.3.0}/tests/test_async_workflow_management.py +0 -0
  81. {dbos-2.2.0a3 → dbos-2.3.0}/tests/test_classdecorators.py +0 -0
  82. {dbos-2.2.0a3 → dbos-2.3.0}/tests/test_cli.py +0 -0
  83. {dbos-2.2.0a3 → dbos-2.3.0}/tests/test_concurrency.py +0 -0
  84. {dbos-2.2.0a3 → dbos-2.3.0}/tests/test_croniter.py +0 -0
  85. {dbos-2.2.0a3 → dbos-2.3.0}/tests/test_debouncer.py +0 -0
  86. {dbos-2.2.0a3 → dbos-2.3.0}/tests/test_debug.py +0 -0
  87. {dbos-2.2.0a3 → dbos-2.3.0}/tests/test_docker_secrets.py +0 -0
  88. {dbos-2.2.0a3 → dbos-2.3.0}/tests/test_fastapi.py +0 -0
  89. {dbos-2.2.0a3 → dbos-2.3.0}/tests/test_fastapi_roles.py +0 -0
  90. {dbos-2.2.0a3 → dbos-2.3.0}/tests/test_flask.py +0 -0
  91. {dbos-2.2.0a3 → dbos-2.3.0}/tests/test_outcome.py +0 -0
  92. {dbos-2.2.0a3 → dbos-2.3.0}/tests/test_package.py +0 -0
  93. {dbos-2.2.0a3 → dbos-2.3.0}/tests/test_schema_migration.py +0 -0
  94. {dbos-2.2.0a3 → dbos-2.3.0}/tests/test_singleton.py +0 -0
  95. {dbos-2.2.0a3 → dbos-2.3.0}/tests/test_sqlalchemy.py +0 -0
  96. {dbos-2.2.0a3 → dbos-2.3.0}/tests/test_streaming.py +0 -0
  97. {dbos-2.2.0a3 → dbos-2.3.0}/tests/test_workflow_introspection.py +0 -0
  98. {dbos-2.2.0a3 → dbos-2.3.0}/tests/test_workflow_management.py +0 -0
  99. {dbos-2.2.0a3 → dbos-2.3.0}/version/__init__.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: dbos
3
- Version: 2.2.0a3
3
+ Version: 2.3.0
4
4
  Summary: Ultra-lightweight durable execution in Python
5
5
  Author-Email: "DBOS, Inc." <contact@dbos.dev>
6
6
  License: MIT
@@ -1,4 +1,5 @@
1
1
  import asyncio
2
+ import json
2
3
  import time
3
4
  import uuid
4
5
  from typing import (
@@ -63,6 +64,8 @@ class EnqueueOptions(_EnqueueOptionsRequired, total=False):
63
64
  priority: int
64
65
  max_recovery_attempts: int
65
66
  queue_partition_key: str
67
+ authenticated_user: str
68
+ authenticated_roles: list[str]
66
69
 
67
70
 
68
71
  def validate_enqueue_options(options: EnqueueOptions) -> None:
@@ -189,6 +192,13 @@ class DBOSClient:
189
192
  "queue_partition_key": options.get("queue_partition_key"),
190
193
  }
191
194
 
195
+ authenticated_user = options.get("authenticated_user")
196
+ authenticated_roles = (
197
+ json.dumps(options.get("authenticated_roles"))
198
+ if options.get("authenticated_roles")
199
+ else None
200
+ )
201
+
192
202
  inputs: WorkflowInputs = {
193
203
  "args": args,
194
204
  "kwargs": kwargs,
@@ -202,9 +212,9 @@ class DBOSClient:
202
212
  "queue_name": queue_name,
203
213
  "app_version": enqueue_options_internal["app_version"],
204
214
  "config_name": None,
205
- "authenticated_user": None,
215
+ "authenticated_user": authenticated_user,
206
216
  "assumed_role": None,
207
- "authenticated_roles": None,
217
+ "authenticated_roles": authenticated_roles,
208
218
  "output": None,
209
219
  "error": None,
210
220
  "created_at": None,
@@ -93,14 +93,6 @@ TEMP_SEND_WF_NAME = "<temp>.temp_send_workflow"
93
93
  DEBOUNCER_WORKFLOW_NAME = "_dbos_debouncer_workflow"
94
94
 
95
95
 
96
- def check_is_in_coroutine() -> bool:
97
- try:
98
- asyncio.get_running_loop()
99
- return True
100
- except RuntimeError:
101
- return False
102
-
103
-
104
96
  class WorkflowHandleFuture(Generic[R]):
105
97
 
106
98
  def __init__(self, workflow_id: str, future: Future[R], dbos: "DBOS"):
@@ -856,11 +848,6 @@ def workflow_wrapper(
856
848
  dbos._sys_db.record_get_result(workflow_id, serialized_r, None)
857
849
  return r
858
850
 
859
- if check_is_in_coroutine() and not inspect.iscoroutinefunction(func):
860
- dbos_logger.warning(
861
- f"Sync workflow ({get_dbos_func_name(func)}) shouldn't be invoked from within another async function. Define it as async or use asyncio.to_thread instead."
862
- )
863
-
864
851
  outcome = (
865
852
  wfOutcome.wrap(init_wf, dbos=dbos)
866
853
  .also(DBOSAssumeRole(rr))
@@ -1046,10 +1033,6 @@ def decorate_transaction(
1046
1033
  assert (
1047
1034
  ctx.is_workflow()
1048
1035
  ), "Transactions must be called from within workflows"
1049
- if check_is_in_coroutine():
1050
- dbos_logger.warning(
1051
- f"Transaction function ({get_dbos_func_name(func)}) shouldn't be invoked from within another async function. Use asyncio.to_thread instead."
1052
- )
1053
1036
  with DBOSAssumeRole(rr):
1054
1037
  return invoke_tx(*args, **kwargs)
1055
1038
  else:
@@ -1194,10 +1177,6 @@ def decorate_step(
1194
1177
 
1195
1178
  @wraps(func)
1196
1179
  def wrapper(*args: Any, **kwargs: Any) -> Any:
1197
- if check_is_in_coroutine() and not inspect.iscoroutinefunction(func):
1198
- dbos_logger.warning(
1199
- f"Sync step ({get_dbos_func_name(func)}) shouldn't be invoked from within another async function. Define it as async or use asyncio.to_thread instead."
1200
- )
1201
1180
  # If the step is called from a workflow, run it as a step.
1202
1181
  # Otherwise, run it as a normal function.
1203
1182
  ctx = get_local_dbos_context()
@@ -444,6 +444,7 @@ def configure_db_engine_parameters(
444
444
 
445
445
  # Configure user database engine parameters
446
446
  app_engine_kwargs: dict[str, Any] = {
447
+ "connect_args": {"application_name": "dbos_transact"},
447
448
  "pool_timeout": 30,
448
449
  "max_overflow": 0,
449
450
  "pool_size": 20,
@@ -477,8 +478,6 @@ def is_valid_database_url(database_url: str) -> bool:
477
478
  return True
478
479
  url = make_url(database_url)
479
480
  required_fields = [
480
- ("username", "Username must be specified in the connection URL"),
481
- ("host", "Host must be specified in the connection URL"),
482
481
  ("database", "Database name must be specified in the connection URL"),
483
482
  ]
484
483
  for field_name, error_message in required_fields:
@@ -1,6 +1,6 @@
1
1
  import re
2
2
  import threading
3
- from typing import TYPE_CHECKING, Any, Callable, NoReturn
3
+ from typing import TYPE_CHECKING, Any, Callable, Coroutine, NoReturn
4
4
 
5
5
  from confluent_kafka import Consumer, KafkaError, KafkaException
6
6
 
@@ -15,7 +15,9 @@ from ._kafka_message import KafkaMessage
15
15
  from ._logger import dbos_logger
16
16
  from ._registrations import get_dbos_func_name
17
17
 
18
- _KafkaConsumerWorkflow = Callable[[KafkaMessage], None]
18
+ _KafkaConsumerWorkflow = (
19
+ Callable[[KafkaMessage], None] | Callable[[KafkaMessage], Coroutine[Any, Any, None]]
20
+ )
19
21
 
20
22
  _kafka_queue: Queue
21
23
  _in_order_kafka_queues: dict[str, Queue] = {}
@@ -37,8 +39,8 @@ def _kafka_consumer_loop(
37
39
  in_order: bool,
38
40
  ) -> None:
39
41
 
40
- def on_error(err: KafkaError) -> NoReturn:
41
- raise KafkaException(err)
42
+ def on_error(err: KafkaError) -> None:
43
+ dbos_logger.error(f"Exception in Kafka consumer: {err}")
42
44
 
43
45
  config["error_cb"] = on_error
44
46
  if "auto.offset.reset" not in config:
@@ -68,30 +68,37 @@ def config_logger(config: "ConfigFile") -> None:
68
68
  )
69
69
  disable_otlp = config.get("telemetry", {}).get("disable_otlp", False) # type: ignore
70
70
 
71
- if not disable_otlp and otlp_logs_endpoints:
71
+ if not disable_otlp:
72
72
 
73
- from opentelemetry._logs import set_logger_provider
73
+ from opentelemetry._logs import get_logger_provider, set_logger_provider
74
74
  from opentelemetry.exporter.otlp.proto.http._log_exporter import OTLPLogExporter
75
75
  from opentelemetry.sdk._logs import LoggerProvider, LoggingHandler
76
76
  from opentelemetry.sdk._logs.export import BatchLogRecordProcessor
77
77
  from opentelemetry.sdk.resources import Resource
78
78
  from opentelemetry.semconv.attributes.service_attributes import SERVICE_NAME
79
79
 
80
- log_provider = LoggerProvider(
81
- Resource.create(
82
- attributes={
83
- SERVICE_NAME: config["name"],
84
- }
85
- )
86
- )
87
- set_logger_provider(log_provider)
88
- for e in otlp_logs_endpoints:
89
- log_provider.add_log_record_processor(
90
- BatchLogRecordProcessor(
91
- OTLPLogExporter(endpoint=e),
92
- export_timeout_millis=5000,
80
+ # Only set up OTLP provider and exporter if endpoints are provided
81
+ log_provider = get_logger_provider()
82
+ if otlp_logs_endpoints is not None:
83
+ if not isinstance(log_provider, LoggerProvider):
84
+ log_provider = LoggerProvider(
85
+ Resource.create(
86
+ attributes={
87
+ SERVICE_NAME: config["name"],
88
+ }
89
+ )
90
+ )
91
+ set_logger_provider(log_provider)
92
+
93
+ for e in otlp_logs_endpoints:
94
+ log_provider.add_log_record_processor(
95
+ BatchLogRecordProcessor(
96
+ OTLPLogExporter(endpoint=e),
97
+ export_timeout_millis=5000,
98
+ )
93
99
  )
94
- )
100
+
101
+ # Even if no endpoints are provided, we still need a LoggerProvider to create the LoggingHandler
95
102
  global _otlp_handler
96
103
  _otlp_handler = LoggingHandler(logger_provider=log_provider)
97
104
 
@@ -2,7 +2,7 @@ import random
2
2
  import threading
3
3
  import traceback
4
4
  from datetime import datetime, timezone
5
- from typing import TYPE_CHECKING, Callable
5
+ from typing import TYPE_CHECKING, Any, Callable, Coroutine
6
6
 
7
7
  from ._logger import dbos_logger
8
8
  from ._queue import Queue
@@ -14,7 +14,10 @@ from ._context import SetWorkflowID
14
14
  from ._croniter import croniter # type: ignore
15
15
  from ._registrations import get_dbos_func_name
16
16
 
17
- ScheduledWorkflow = Callable[[datetime, datetime], None]
17
+ ScheduledWorkflow = (
18
+ Callable[[datetime, datetime], None]
19
+ | Callable[[datetime, datetime], Coroutine[Any, Any, None]]
20
+ )
18
21
 
19
22
 
20
23
  def scheduler_loop(
@@ -25,9 +25,13 @@ class Serializer(ABC):
25
25
  class DefaultSerializer(Serializer):
26
26
 
27
27
  def serialize(self, data: Any) -> str:
28
- pickled_data: bytes = pickle.dumps(data)
29
- encoded_data: str = base64.b64encode(pickled_data).decode("utf-8")
30
- return encoded_data
28
+ try:
29
+ pickled_data: bytes = pickle.dumps(data)
30
+ encoded_data: str = base64.b64encode(pickled_data).decode("utf-8")
31
+ return encoded_data
32
+ except Exception as e:
33
+ dbos_logger.error(f"Error serializing object: {data}", exc_info=e)
34
+ raise
31
35
 
32
36
  def deserialize(cls, serialized_data: str) -> Any:
33
37
  pickled_data: bytes = base64.b64decode(serialized_data)
@@ -41,7 +41,7 @@ class PostgresSystemDatabase(SystemDatabase):
41
41
  parameters={"db_name": sysdb_name},
42
42
  ).scalar():
43
43
  dbos_logger.info(f"Creating system database {sysdb_name}")
44
- conn.execute(sa.text(f"CREATE DATABASE {sysdb_name}"))
44
+ conn.execute(sa.text(f'CREATE DATABASE "{sysdb_name}"'))
45
45
  engine.dispose()
46
46
  else:
47
47
  # If we were provided an engine, validate it can connect
@@ -25,6 +25,10 @@ class DBOSTracer:
25
25
  def config(self, config: ConfigFile) -> None:
26
26
  self.otlp_attributes = config.get("telemetry", {}).get("otlp_attributes", {}) # type: ignore
27
27
  self.disable_otlp = config.get("telemetry", {}).get("disable_otlp", False) # type: ignore
28
+ otlp_traces_endpoints = (
29
+ config.get("telemetry", {}).get("OTLPExporter", {}).get("tracesEndpoint") # type: ignore
30
+ )
31
+
28
32
  if not self.disable_otlp:
29
33
  from opentelemetry import trace
30
34
  from opentelemetry.exporter.otlp.proto.http.trace_exporter import (
@@ -38,25 +42,26 @@ class DBOSTracer:
38
42
  )
39
43
  from opentelemetry.semconv.attributes.service_attributes import SERVICE_NAME
40
44
 
41
- if not isinstance(trace.get_tracer_provider(), TracerProvider):
42
- resource = Resource(
43
- attributes={
44
- SERVICE_NAME: config["name"],
45
- }
46
- )
47
-
48
- provider = TracerProvider(resource=resource)
49
- if os.environ.get("DBOS__CONSOLE_TRACES", None) is not None:
50
- processor = BatchSpanProcessor(ConsoleSpanExporter())
51
- provider.add_span_processor(processor)
52
- otlp_traces_endpoints = (
53
- config.get("telemetry", {}).get("OTLPExporter", {}).get("tracesEndpoint") # type: ignore
54
- )
55
- if otlp_traces_endpoints:
56
- for e in otlp_traces_endpoints:
57
- processor = BatchSpanProcessor(OTLPSpanExporter(endpoint=e))
58
- provider.add_span_processor(processor)
59
- trace.set_tracer_provider(provider)
45
+ tracer_provider = trace.get_tracer_provider()
46
+
47
+ # Only set up OTLP provider and exporter if endpoints are provided
48
+ if otlp_traces_endpoints is not None:
49
+ if not isinstance(tracer_provider, TracerProvider):
50
+ resource = Resource(
51
+ attributes={
52
+ SERVICE_NAME: config["name"],
53
+ }
54
+ )
55
+
56
+ tracer_provider = TracerProvider(resource=resource)
57
+ if os.environ.get("DBOS__CONSOLE_TRACES", None) is not None:
58
+ processor = BatchSpanProcessor(ConsoleSpanExporter())
59
+ tracer_provider.add_span_processor(processor)
60
+ trace.set_tracer_provider(tracer_provider)
61
+
62
+ for e in otlp_traces_endpoints:
63
+ processor = BatchSpanProcessor(OTLPSpanExporter(endpoint=e))
64
+ tracer_provider.add_span_processor(processor)
60
65
 
61
66
  def set_provider(self, provider: "Optional[TracerProvider]") -> None:
62
67
  self.provider = provider
@@ -140,26 +140,12 @@ def start() -> None:
140
140
  Forward kill signals to children.
141
141
 
142
142
  When we receive a signal, send it to the entire process group of the child.
143
- If that doesn't work, SIGKILL them then exit.
144
143
  """
145
144
  # Send the signal to the child's entire process group
146
145
  if process.poll() is None:
147
146
  os.killpg(os.getpgid(process.pid), signum)
148
147
 
149
- # Give some time for the child to terminate
150
- for _ in range(10): # Wait up to 1 second
151
- if process.poll() is not None:
152
- break
153
- time.sleep(0.1)
154
-
155
- # If the child is still running, force kill it
156
- if process.poll() is None:
157
- try:
158
- os.killpg(os.getpgid(process.pid), signal.SIGKILL)
159
- except Exception:
160
- pass
161
-
162
- # Exit immediately
148
+ # Exit
163
149
  os._exit(process.returncode if process.returncode is not None else 1)
164
150
 
165
151
  # Configure the single handler only on Unix-like systems.
@@ -34,7 +34,7 @@ classifiers = [
34
34
  "Topic :: Software Development :: Libraries :: Python Modules",
35
35
  "Framework :: AsyncIO",
36
36
  ]
37
- version = "2.2.0a3"
37
+ version = "2.3.0"
38
38
 
39
39
  [project.license]
40
40
  text = "MIT"
@@ -593,3 +593,35 @@ def test_enqueue_with_priority(dbos: DBOS, client: DBOSClient) -> None:
593
593
  def test_client_bad_url() -> None:
594
594
  with pytest.raises(DBAPIError) as exc_info:
595
595
  DBOSClient("postgresql://postgres:fakepassword@localhost:5433/fake_database")
596
+
597
+
598
+ def test_client_auth(dbos: DBOS, client: DBOSClient) -> None:
599
+ run_client_collateral()
600
+
601
+ johnDoe: Person = {"first": "John", "last": "Doe", "age": 30}
602
+ wfid = str(uuid.uuid4())
603
+
604
+ user = "testuser"
605
+ roles = ["role1", "role2"]
606
+
607
+ options: EnqueueOptions = {
608
+ "queue_name": "test_queue",
609
+ "workflow_name": "enqueue_test",
610
+ "workflow_id": wfid,
611
+ "authenticated_user": user,
612
+ "authenticated_roles": roles,
613
+ }
614
+
615
+ handle: WorkflowHandle[str] = client.enqueue(options, 42, "test", johnDoe)
616
+ result = handle.get_result()
617
+ assert result == '42-test-{"first": "John", "last": "Doe", "age": 30}'
618
+
619
+ list_results = client.list_workflows()
620
+ assert len(list_results) == 1
621
+ assert list_results[0].workflow_id == wfid
622
+ assert list_results[0].status == "SUCCESS"
623
+ assert list_results[0].output == result
624
+ assert list_results[0].input is not None
625
+ assert list_results[0].authenticated_user == user
626
+ assert list_results[0].authenticated_roles == roles
627
+ assert list_results[0].assumed_role is None
@@ -209,7 +209,7 @@ def test_process_config_full():
209
209
  "max_overflow": 0,
210
210
  "pool_size": 20,
211
211
  "pool_pre_ping": True,
212
- "connect_args": {"connect_timeout": 1},
212
+ "connect_args": {"connect_timeout": 1, "application_name": "dbos_transact"},
213
213
  }
214
214
  assert configFile["database"]["sys_db_engine_kwargs"] == {
215
215
  "key": "value",
@@ -217,7 +217,7 @@ def test_process_config_full():
217
217
  "max_overflow": 0,
218
218
  "pool_size": 27,
219
219
  "pool_pre_ping": True,
220
- "connect_args": {"connect_timeout": 1},
220
+ "connect_args": {"connect_timeout": 1, "application_name": "dbos_transact"},
221
221
  }
222
222
  assert configFile["runtimeConfig"]["start"] == ["python3 main.py"]
223
223
  assert configFile["runtimeConfig"]["admin_port"] == 8001
@@ -255,7 +255,7 @@ def test_process_config_system_database():
255
255
  "max_overflow": 0,
256
256
  "pool_size": 20,
257
257
  "pool_pre_ping": True,
258
- "connect_args": {"connect_timeout": 1},
258
+ "connect_args": {"connect_timeout": 1, "application_name": "dbos_transact"},
259
259
  }
260
260
  assert configFile["database"]["sys_db_engine_kwargs"] == {
261
261
  "key": "value",
@@ -263,7 +263,7 @@ def test_process_config_system_database():
263
263
  "max_overflow": 0,
264
264
  "pool_size": 27,
265
265
  "pool_pre_ping": True,
266
- "connect_args": {"connect_timeout": 1},
266
+ "connect_args": {"connect_timeout": 1, "application_name": "dbos_transact"},
267
267
  }
268
268
 
269
269
 
@@ -397,14 +397,14 @@ def test_configure_db_engine_parameters_defaults():
397
397
  "max_overflow": 0,
398
398
  "pool_size": 20,
399
399
  "pool_pre_ping": True,
400
- "connect_args": {"connect_timeout": 10},
400
+ "connect_args": {"connect_timeout": 10, "application_name": "dbos_transact"},
401
401
  }
402
402
  assert data["sys_db_engine_kwargs"] == {
403
403
  "pool_timeout": 30,
404
404
  "max_overflow": 0,
405
405
  "pool_size": 20,
406
406
  "pool_pre_ping": True,
407
- "connect_args": {"connect_timeout": 10},
407
+ "connect_args": {"connect_timeout": 10, "application_name": "dbos_transact"},
408
408
  }
409
409
 
410
410
 
@@ -419,14 +419,14 @@ def test_configure_db_engine_parameters_custom_sys_db_pool_sizes():
419
419
  "max_overflow": 0,
420
420
  "pool_size": 20,
421
421
  "pool_pre_ping": True,
422
- "connect_args": {"connect_timeout": 10},
422
+ "connect_args": {"connect_timeout": 10, "application_name": "dbos_transact"},
423
423
  }
424
424
  assert data["sys_db_engine_kwargs"] == {
425
425
  "pool_timeout": 30,
426
426
  "max_overflow": 0,
427
427
  "pool_size": 35,
428
428
  "pool_pre_ping": True,
429
- "connect_args": {"connect_timeout": 10},
429
+ "connect_args": {"connect_timeout": 10, "application_name": "dbos_transact"},
430
430
  }
431
431
 
432
432
 
@@ -440,7 +440,11 @@ def test_configure_db_engine_parameters_user_kwargs_override():
440
440
  "pool_pre_ping": True,
441
441
  "custom_param": "value",
442
442
  "pool_size": 50,
443
- "connect_args": {"connect_timeout": 30, "key": "value"},
443
+ "connect_args": {
444
+ "connect_timeout": 30,
445
+ "key": "value",
446
+ "application_name": "dbos_transact",
447
+ },
444
448
  },
445
449
  }
446
450
 
@@ -453,7 +457,11 @@ def test_configure_db_engine_parameters_user_kwargs_override():
453
457
  "pool_pre_ping": True,
454
458
  "custom_param": "value",
455
459
  "pool_size": 50,
456
- "connect_args": {"connect_timeout": 30, "key": "value"},
460
+ "connect_args": {
461
+ "connect_timeout": 30,
462
+ "key": "value",
463
+ "application_name": "dbos_transact",
464
+ },
457
465
  }
458
466
 
459
467
  # System engine kwargs should use system pool size but same user overrides
@@ -463,7 +471,11 @@ def test_configure_db_engine_parameters_user_kwargs_override():
463
471
  "pool_pre_ping": True,
464
472
  "custom_param": "value",
465
473
  "pool_size": 35,
466
- "connect_args": {"connect_timeout": 30, "key": "value"},
474
+ "connect_args": {
475
+ "connect_timeout": 30,
476
+ "key": "value",
477
+ "application_name": "dbos_transact",
478
+ },
467
479
  }
468
480
 
469
481
 
@@ -487,7 +499,7 @@ def test_configure_db_engine_parameters_user_kwargs_and_db_url_connect_timeout()
487
499
  "pool_pre_ping": True,
488
500
  "custom_param": "value",
489
501
  "pool_size": 50,
490
- "connect_args": {"connect_timeout": 22},
502
+ "connect_args": {"connect_timeout": 22, "application_name": "dbos_transact"},
491
503
  }
492
504
 
493
505
  # System engine kwargs should use system pool size but same user overrides
@@ -497,7 +509,7 @@ def test_configure_db_engine_parameters_user_kwargs_and_db_url_connect_timeout()
497
509
  "pool_pre_ping": True,
498
510
  "custom_param": "value",
499
511
  "pool_size": 50,
500
- "connect_args": {"connect_timeout": 22},
512
+ "connect_args": {"connect_timeout": 22, "application_name": "dbos_transact"},
501
513
  }
502
514
 
503
515
 
@@ -556,7 +568,7 @@ def test_configure_db_engine_parameters_user_kwargs_mixed_params():
556
568
  "pool_pre_ping": True,
557
569
  "custom_param": "value",
558
570
  "pool_size": 50,
559
- "connect_args": {"connect_timeout": 10},
571
+ "connect_args": {"connect_timeout": 10, "application_name": "dbos_transact"},
560
572
  }
561
573
 
562
574
  # System engine kwargs should use system pool size but same user overrides
@@ -566,7 +578,7 @@ def test_configure_db_engine_parameters_user_kwargs_mixed_params():
566
578
  "pool_pre_ping": True,
567
579
  "custom_param": "value",
568
580
  "pool_size": 50,
569
- "connect_args": {"connect_timeout": 10},
581
+ "connect_args": {"connect_timeout": 10, "application_name": "dbos_transact"},
570
582
  }
571
583
 
572
584
 
@@ -581,14 +593,14 @@ def test_configure_db_engine_parameters_empty_user_kwargs():
581
593
  "max_overflow": 0,
582
594
  "pool_size": 20,
583
595
  "pool_pre_ping": True,
584
- "connect_args": {"connect_timeout": 10},
596
+ "connect_args": {"connect_timeout": 10, "application_name": "dbos_transact"},
585
597
  }
586
598
  assert data["sys_db_engine_kwargs"] == {
587
599
  "pool_timeout": 30,
588
600
  "max_overflow": 0,
589
601
  "pool_size": 20,
590
602
  "pool_pre_ping": True,
591
- "connect_args": {"connect_timeout": 10},
603
+ "connect_args": {"connect_timeout": 10, "application_name": "dbos_transact"},
592
604
  }
593
605
 
594
606
 
@@ -598,24 +610,6 @@ def test_configure_db_engine_parameters_empty_user_kwargs():
598
610
 
599
611
 
600
612
  def test_process_config_with_wrong_db_url():
601
- # Missing username
602
- config: ConfigFile = {
603
- "name": "some-app",
604
- "database_url": "postgres://:password@h:1234/dbname",
605
- }
606
- with pytest.raises(DBOSInitializationError) as exc_info:
607
- process_config(data=config)
608
- assert "Username must be specified in the connection URL" in str(exc_info.value)
609
-
610
- # Missing host
611
- config: ConfigFile = {
612
- "name": "some-app",
613
- "database_url": "postgres://user:password@:1234/dbname",
614
- }
615
- with pytest.raises(DBOSInitializationError) as exc_info:
616
- process_config(data=config)
617
- assert "Host must be specified in the connection URL" in str(exc_info.value)
618
-
619
613
  # Missing dbname
620
614
  config: ConfigFile = {
621
615
  "name": "some-app",
@@ -37,6 +37,7 @@ from dbos._error import (
37
37
  from dbos._schemas.system_database import SystemSchema
38
38
  from dbos._sys_db import GetWorkflowsInput
39
39
  from dbos._utils import GlobalParams
40
+ from tests.conftest import using_sqlite
40
41
 
41
42
 
42
43
  def test_simple_workflow(dbos: DBOS) -> None:
@@ -1796,6 +1797,65 @@ def test_without_appdb(config: DBOSConfig, cleanup_test_databases: None) -> None
1796
1797
  assert s["function_name"] == step.__qualname__
1797
1798
 
1798
1799
 
1800
+ def test_custom_database(
1801
+ config: DBOSConfig, db_engine: sa.Engine, cleanup_test_databases: None
1802
+ ) -> None:
1803
+ DBOS.destroy(destroy_registry=True)
1804
+ assert config["system_database_url"]
1805
+ custom_database = "F8nny_dAtaB@s3@-n@m3.sqlite"
1806
+ url = sa.make_url(config["system_database_url"])
1807
+ url = url.set(database=custom_database)
1808
+ config["system_database_url"] = url.render_as_string(hide_password=False)
1809
+ # Destroy the database if it exists
1810
+ if using_sqlite():
1811
+ parsed_url = sa.make_url(config["system_database_url"])
1812
+ db_path = parsed_url.database
1813
+ assert db_path is not None
1814
+ if os.path.exists(db_path):
1815
+ os.remove(db_path)
1816
+ else:
1817
+ with db_engine.connect() as connection:
1818
+ connection.execution_options(isolation_level="AUTOCOMMIT")
1819
+ connection.execute(
1820
+ sa.text(f'DROP DATABASE IF EXISTS "{custom_database}" WITH (FORCE)')
1821
+ )
1822
+ DBOS(config=config)
1823
+ DBOS.launch()
1824
+
1825
+ key = "key"
1826
+ val = "val"
1827
+
1828
+ @DBOS.transaction()
1829
+ def transaction() -> None:
1830
+ return
1831
+
1832
+ @DBOS.workflow()
1833
+ def recv_workflow() -> Any:
1834
+ transaction()
1835
+ DBOS.set_event(key, val)
1836
+ return DBOS.recv()
1837
+
1838
+ handle = DBOS.start_workflow(recv_workflow)
1839
+ assert DBOS.get_event(handle.workflow_id, key) == val
1840
+ DBOS.send(handle.workflow_id, val)
1841
+ assert handle.get_result() == val
1842
+ assert len(DBOS.list_workflows()) == 2
1843
+ steps = DBOS.list_workflow_steps(handle.workflow_id)
1844
+ assert len(steps) == 4
1845
+ assert "transaction" in steps[0]["function_name"]
1846
+ DBOS.destroy(destroy_registry=True)
1847
+
1848
+ # Test custom database with client
1849
+ client = DBOSClient(
1850
+ system_database_url=config["system_database_url"],
1851
+ application_database_url=config["application_database_url"],
1852
+ )
1853
+ assert len(client.list_workflows()) == 2
1854
+ steps = client.list_workflow_steps(handle.workflow_id)
1855
+ assert len(steps) == 4
1856
+ assert "transaction" in steps[0]["function_name"]
1857
+
1858
+
1799
1859
  def test_custom_schema(
1800
1860
  config: DBOSConfig, cleanup_test_databases: None, skip_with_sqlite: None
1801
1861
  ) -> None:
@@ -1,7 +1,7 @@
1
1
  import threading
2
2
  import time
3
3
  import uuid
4
- from typing import cast
4
+ from typing import Any, Generator, cast
5
5
 
6
6
  import pytest
7
7
  import sqlalchemy as sa
@@ -553,3 +553,16 @@ def test_unregistered_workflow(dbos: DBOS, config: DBOSConfig) -> None:
553
553
 
554
554
  with pytest.raises(DBOSWorkflowFunctionNotFoundError):
555
555
  DBOS._recover_pending_workflows()
556
+
557
+
558
+ def test_nonserializable_return(dbos: DBOS) -> None:
559
+ @DBOS.step()
560
+ def step() -> Generator[str, Any, None]:
561
+ yield "val"
562
+
563
+ @DBOS.workflow()
564
+ def workflow() -> None:
565
+ step()
566
+
567
+ with pytest.raises(TypeError):
568
+ workflow()
@@ -12,24 +12,25 @@ from dbos import DBOS, KafkaMessage
12
12
  # Without it, they're automatically skipped.
13
13
  # Here's a docker-compose script you can use to set up local Kafka:
14
14
 
15
- # version: "3.7"
16
15
  # services:
17
16
  # broker:
18
- # image: bitnami/kafka:latest
17
+ # image: apache/kafka:latest
19
18
  # hostname: broker
20
19
  # container_name: broker
21
20
  # ports:
22
21
  # - '9092:9092'
23
22
  # environment:
24
- # KAFKA_CFG_NODE_ID: 1
25
- # KAFKA_CFG_LISTENER_SECURITY_PROTOCOL_MAP: 'CONTROLLER:PLAINTEXT,PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT'
26
- # KAFKA_CFG_ADVERTISED_LISTENERS: 'PLAINTEXT_HOST://localhost:9092,PLAINTEXT://broker:19092'
27
- # KAFKA_CFG_PROCESS_ROLES: 'broker,controller'
28
- # KAFKA_CFG_CONTROLLER_QUORUM_VOTERS: '1@broker:29093'
29
- # KAFKA_CFG_LISTENERS: 'CONTROLLER://:29093,PLAINTEXT_HOST://:9092,PLAINTEXT://:19092'
30
- # KAFKA_CFG_INTER_BROKER_LISTENER_NAME: 'PLAINTEXT'
31
- # KAFKA_CFG_CONTROLLER_LISTENER_NAMES: 'CONTROLLER'
32
-
23
+ # KAFKA_NODE_ID: 1
24
+ # KAFKA_LISTENERS: PLAINTEXT://0.0.0.0:9092,CONTROLLER://0.0.0.0:9093
25
+ # KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://127.0.0.1:9092
26
+ # KAFKA_PROCESS_ROLES: broker,controller
27
+ # KAFKA_CONTROLLER_QUORUM_VOTERS: 1@localhost:9093
28
+ # KAFKA_CONTROLLER_LISTENER_NAMES: CONTROLLER
29
+ # KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,CONTROLLER:PLAINTEXT
30
+ # KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
31
+ # KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: 1
32
+ # KAFKA_TRANSACTION_STATE_LOG_MIN_ISR: 1
33
+ # CLUSTER_ID: MkU3OEVBNTcwNTJENDM2Qk
33
34
 
34
35
  NUM_EVENTS = 3
35
36
 
@@ -81,12 +82,44 @@ def test_kafka(dbos: DBOS) -> None:
81
82
  assert b"test message key" in msg.key # type: ignore
82
83
  assert b"test message value" in msg.value # type: ignore
83
84
  print(msg)
84
- if kafka_count == 3:
85
+ if kafka_count == NUM_EVENTS:
86
+ event.set()
87
+
88
+ wait = event.wait(timeout=10)
89
+ assert wait
90
+ assert kafka_count == NUM_EVENTS
91
+
92
+
93
+ def test_kafka_async(dbos: DBOS) -> None:
94
+ event = threading.Event()
95
+ kafka_count = 0
96
+ server = "localhost:9092"
97
+ topic = f"dbos-kafka-{random.randrange(1_000_000_000)}"
98
+
99
+ if not send_test_messages(server, topic):
100
+ pytest.skip("Kafka not available")
101
+
102
+ @DBOS.kafka_consumer(
103
+ {
104
+ "bootstrap.servers": server,
105
+ "group.id": "dbos-test",
106
+ "auto.offset.reset": "earliest",
107
+ },
108
+ [topic],
109
+ )
110
+ @DBOS.workflow()
111
+ async def test_kafka_workflow(msg: KafkaMessage) -> None:
112
+ nonlocal kafka_count
113
+ kafka_count += 1
114
+ assert b"test message key" in msg.key # type: ignore
115
+ assert b"test message value" in msg.value # type: ignore
116
+ print(msg)
117
+ if kafka_count == NUM_EVENTS:
85
118
  event.set()
86
119
 
87
120
  wait = event.wait(timeout=10)
88
121
  assert wait
89
- assert kafka_count == 3
122
+ assert kafka_count == NUM_EVENTS
90
123
 
91
124
 
92
125
  def test_kafka_in_order(dbos: DBOS) -> None:
@@ -114,12 +147,12 @@ def test_kafka_in_order(dbos: DBOS) -> None:
114
147
  kafka_count += 1
115
148
  assert f"test message key {kafka_count - 1}".encode() == msg.key
116
149
  print(msg)
117
- if kafka_count == 3:
150
+ if kafka_count == NUM_EVENTS:
118
151
  event.set()
119
152
 
120
153
  wait = event.wait(timeout=15)
121
154
  assert wait
122
- assert kafka_count == 3
155
+ assert kafka_count == NUM_EVENTS
123
156
  time.sleep(2) # Wait for things to clean up
124
157
 
125
158
 
@@ -150,9 +183,9 @@ def test_kafka_no_groupid(dbos: DBOS) -> None:
150
183
  assert b"test message key" in msg.key # type: ignore
151
184
  assert b"test message value" in msg.value # type: ignore
152
185
  print(msg)
153
- if kafka_count == 6:
186
+ if kafka_count == NUM_EVENTS * 2:
154
187
  event.set()
155
188
 
156
189
  wait = event.wait(timeout=10)
157
190
  assert wait
158
- assert kafka_count == 6
191
+ assert kafka_count == NUM_EVENTS * 2
@@ -1476,7 +1476,7 @@ def test_unsetting_timeout(dbos: DBOS) -> None:
1476
1476
  queue.enqueue(child)
1477
1477
 
1478
1478
  child_one, child_two = str(uuid.uuid4()), str(uuid.uuid4())
1479
- with SetWorkflowTimeout(1.0):
1479
+ with SetWorkflowTimeout(2.0):
1480
1480
  queue.enqueue(parent, child_one, child_two).get_result()
1481
1481
 
1482
1482
  # Verify child one, which has a propagated timeout, is cancelled
@@ -105,6 +105,19 @@ def test_scheduled_workflow(dbos: DBOS) -> None:
105
105
  assert wf_counter > 1 and wf_counter <= 5
106
106
 
107
107
 
108
+ def test_async_scheduled_workflow(dbos: DBOS) -> None:
109
+ wf_counter: int = 0
110
+
111
+ @DBOS.scheduled("* * * * * *")
112
+ @DBOS.workflow()
113
+ async def test_workflow(scheduled: datetime, actual: datetime) -> None:
114
+ nonlocal wf_counter
115
+ wf_counter += 1
116
+
117
+ time.sleep(5)
118
+ assert wf_counter > 1 and wf_counter <= 5
119
+
120
+
108
121
  def test_appdb_downtime(dbos: DBOS, skip_with_sqlite: None) -> None:
109
122
  wf_counter: int = 0
110
123
 
@@ -5,7 +5,6 @@ import pytest
5
5
  from fastapi import FastAPI
6
6
  from fastapi.testclient import TestClient
7
7
  from inline_snapshot import snapshot
8
- from opentelemetry._logs import set_logger_provider
9
8
  from opentelemetry.sdk import trace as tracesdk
10
9
  from opentelemetry.sdk._logs import LoggerProvider, LoggingHandler
11
10
  from opentelemetry.sdk._logs.export import BatchLogRecordProcessor, InMemoryLogExporter
@@ -17,6 +16,7 @@ from dbos import DBOS, DBOSConfig
17
16
  from dbos._logger import dbos_logger
18
17
  from dbos._tracer import dbos_tracer
19
18
  from dbos._utils import GlobalParams
19
+ from tests.conftest import default_config
20
20
 
21
21
 
22
22
  @dataclass
@@ -65,7 +65,6 @@ def test_spans(config: DBOSConfig) -> None:
65
65
  log_processor = BatchLogRecordProcessor(log_exporter)
66
66
  log_provider = LoggerProvider()
67
67
  log_provider.add_log_record_processor(log_processor)
68
- set_logger_provider(log_provider)
69
68
  dbos_logger.addHandler(LoggingHandler(logger_provider=log_provider))
70
69
 
71
70
  test_workflow()
@@ -192,7 +191,6 @@ async def test_spans_async(dbos: DBOS) -> None:
192
191
  log_processor = BatchLogRecordProcessor(log_exporter)
193
192
  log_provider = LoggerProvider()
194
193
  log_provider.add_log_record_processor(log_processor)
195
- set_logger_provider(log_provider)
196
194
  dbos_logger.addHandler(LoggingHandler(logger_provider=log_provider))
197
195
 
198
196
  await test_workflow()
@@ -303,7 +301,6 @@ def test_wf_fastapi(dbos_fastapi: Tuple[DBOS, FastAPI]) -> None:
303
301
  log_processor = BatchLogRecordProcessor(log_exporter)
304
302
  log_provider = LoggerProvider()
305
303
  log_provider.add_log_record_processor(log_processor)
306
- set_logger_provider(log_provider)
307
304
  dbos_logger.addHandler(LoggingHandler(logger_provider=log_provider))
308
305
 
309
306
  client = TestClient(app)
@@ -378,7 +375,6 @@ def test_disable_otlp_no_spans(config: DBOSConfig) -> None:
378
375
  log_processor = BatchLogRecordProcessor(log_exporter)
379
376
  log_provider = LoggerProvider()
380
377
  log_provider.add_log_record_processor(log_processor)
381
- set_logger_provider(log_provider)
382
378
  dbos_logger.addHandler(LoggingHandler(logger_provider=log_provider))
383
379
 
384
380
  test_workflow()
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes