dbos 2.3.0a4__py3-none-any.whl → 2.4.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
dbos/_admin_server.py CHANGED
@@ -338,6 +338,7 @@ class AdminRequestHandler(BaseHTTPRequestHandler):
338
338
  end_time=filters.get("end_time"),
339
339
  status=filters.get("status"),
340
340
  app_version=filters.get("application_version"),
341
+ forked_from=filters.get("forked_from"),
341
342
  name=filters.get("workflow_name"),
342
343
  limit=filters.get("limit"),
343
344
  offset=filters.get("offset"),
@@ -364,6 +365,7 @@ class AdminRequestHandler(BaseHTTPRequestHandler):
364
365
  start_time=filters.get("start_time"),
365
366
  end_time=filters.get("end_time"),
366
367
  status=filters.get("status"),
368
+ forked_from=filters.get("forked_from"),
367
369
  name=filters.get("workflow_name"),
368
370
  limit=filters.get("limit"),
369
371
  offset=filters.get("offset"),
dbos/_app_db.py CHANGED
@@ -201,6 +201,8 @@ class ApplicationDatabase(ABC):
201
201
  else row[3]
202
202
  ),
203
203
  child_workflow_id=None,
204
+ started_at_epoch_ms=None,
205
+ completed_at_epoch_ms=None,
204
206
  )
205
207
  for row in rows
206
208
  ]
dbos/_client.py CHANGED
@@ -1,7 +1,6 @@
1
1
  import asyncio
2
2
  import json
3
3
  import time
4
- import uuid
5
4
  from typing import (
6
5
  TYPE_CHECKING,
7
6
  Any,
@@ -20,6 +19,7 @@ import sqlalchemy as sa
20
19
  from dbos._app_db import ApplicationDatabase
21
20
  from dbos._context import MaxPriority, MinPriority
22
21
  from dbos._sys_db import SystemDatabase
22
+ from dbos._utils import generate_uuid
23
23
 
24
24
  if TYPE_CHECKING:
25
25
  from dbos._dbos import WorkflowHandle, WorkflowHandleAsync
@@ -149,22 +149,27 @@ class DBOSClient:
149
149
  self._sys_db = SystemDatabase.create(
150
150
  system_database_url=system_database_url,
151
151
  engine_kwargs={
152
+ "connect_args": {"application_name": "dbos_transact_client"},
152
153
  "pool_timeout": 30,
153
154
  "max_overflow": 0,
154
155
  "pool_size": 2,
156
+ "pool_pre_ping": True,
155
157
  },
156
158
  engine=system_database_engine,
157
159
  schema=dbos_system_schema,
158
160
  serializer=serializer,
161
+ executor_id=None,
159
162
  )
160
163
  self._sys_db.check_connection()
161
164
  if application_database_url:
162
165
  self._app_db = ApplicationDatabase.create(
163
166
  database_url=application_database_url,
164
167
  engine_kwargs={
168
+ "connect_args": {"application_name": "dbos_transact_client"},
165
169
  "pool_timeout": 30,
166
170
  "max_overflow": 0,
167
171
  "pool_size": 2,
172
+ "pool_pre_ping": True,
168
173
  },
169
174
  schema=dbos_system_schema,
170
175
  serializer=serializer,
@@ -183,7 +188,7 @@ class DBOSClient:
183
188
  max_recovery_attempts = DEFAULT_MAX_RECOVERY_ATTEMPTS
184
189
  workflow_id = options.get("workflow_id")
185
190
  if workflow_id is None:
186
- workflow_id = str(uuid.uuid4())
191
+ workflow_id = generate_uuid()
187
192
  workflow_timeout = options.get("workflow_timeout", None)
188
193
  enqueue_options_internal: EnqueueOptionsInternal = {
189
194
  "deduplication_id": options.get("deduplication_id"),
@@ -234,6 +239,7 @@ class DBOSClient:
234
239
  ),
235
240
  "inputs": self._serializer.serialize(inputs),
236
241
  "queue_partition_key": enqueue_options_internal["queue_partition_key"],
242
+ "forked_from": None,
237
243
  }
238
244
 
239
245
  self._sys_db.init_workflow(
@@ -275,7 +281,7 @@ class DBOSClient:
275
281
  topic: Optional[str] = None,
276
282
  idempotency_key: Optional[str] = None,
277
283
  ) -> None:
278
- idempotency_key = idempotency_key if idempotency_key else str(uuid.uuid4())
284
+ idempotency_key = idempotency_key if idempotency_key else generate_uuid()
279
285
  status: WorkflowStatusInternal = {
280
286
  "workflow_uuid": f"{destination_id}-{idempotency_key}",
281
287
  "status": WorkflowStatusString.SUCCESS.value,
@@ -300,6 +306,7 @@ class DBOSClient:
300
306
  "priority": 0,
301
307
  "inputs": self._serializer.serialize({"args": (), "kwargs": {}}),
302
308
  "queue_partition_key": None,
309
+ "forked_from": None,
303
310
  }
304
311
  with self._sys_db.engine.begin() as conn:
305
312
  self._sys_db._insert_workflow_status(
@@ -2,7 +2,6 @@ import socket
2
2
  import threading
3
3
  import time
4
4
  import traceback
5
- import uuid
6
5
  from importlib.metadata import version
7
6
  from typing import TYPE_CHECKING, Optional
8
7
 
@@ -11,7 +10,7 @@ from websockets.sync.client import connect
11
10
  from websockets.sync.connection import Connection
12
11
 
13
12
  from dbos._context import SetWorkflowID
14
- from dbos._utils import GlobalParams
13
+ from dbos._utils import GlobalParams, generate_uuid
15
14
  from dbos._workflow_commands import (
16
15
  garbage_collect,
17
16
  get_workflow,
@@ -192,7 +191,7 @@ class ConductorWebsocket(threading.Thread):
192
191
  fork_message = p.ForkWorkflowRequest.from_json(message)
193
192
  new_workflow_id = fork_message.body["new_workflow_id"]
194
193
  if new_workflow_id is None:
195
- new_workflow_id = str(uuid.uuid4())
194
+ new_workflow_id = generate_uuid()
196
195
  workflow_id = fork_message.body["workflow_id"]
197
196
  start_step = fork_message.body["start_step"]
198
197
  app_version = fork_message.body["application_version"]
@@ -223,22 +222,21 @@ class ConductorWebsocket(threading.Thread):
223
222
  body = list_workflows_message.body
224
223
  infos = []
225
224
  try:
226
- load_input = body.get("load_input", False)
227
- load_output = body.get("load_output", False)
228
225
  infos = list_workflows(
229
226
  self.dbos._sys_db,
230
- workflow_ids=body["workflow_uuids"],
231
- user=body["authenticated_user"],
232
- start_time=body["start_time"],
233
- end_time=body["end_time"],
234
- status=body["status"],
235
- app_version=body["application_version"],
236
- name=body["workflow_name"],
237
- limit=body["limit"],
238
- offset=body["offset"],
239
- sort_desc=body["sort_desc"],
240
- load_input=load_input,
241
- load_output=load_output,
227
+ workflow_ids=body.get("workflow_uuids", None),
228
+ user=body.get("authenticated_user", None),
229
+ start_time=body.get("start_time", None),
230
+ end_time=body.get("end_time", None),
231
+ status=body.get("status", None),
232
+ app_version=body.get("application_version", None),
233
+ forked_from=body.get("forked_from", None),
234
+ name=body.get("workflow_name", None),
235
+ limit=body.get("limit", None),
236
+ offset=body.get("offset", None),
237
+ sort_desc=body.get("sort_desc", False),
238
+ load_input=body.get("load_input", False),
239
+ load_output=body.get("load_output", False),
242
240
  )
243
241
  except Exception as e:
244
242
  error_message = f"Exception encountered when listing workflows: {traceback.format_exc()}"
@@ -261,18 +259,18 @@ class ConductorWebsocket(threading.Thread):
261
259
  q_body = list_queued_workflows_message.body
262
260
  infos = []
263
261
  try:
264
- q_load_input = q_body.get("load_input", False)
265
262
  infos = list_queued_workflows(
266
263
  self.dbos._sys_db,
267
- start_time=q_body["start_time"],
268
- end_time=q_body["end_time"],
269
- status=q_body["status"],
270
- name=q_body["workflow_name"],
271
- limit=q_body["limit"],
272
- offset=q_body["offset"],
273
- queue_name=q_body["queue_name"],
274
- sort_desc=q_body["sort_desc"],
275
- load_input=q_load_input,
264
+ start_time=q_body.get("start_time", None),
265
+ end_time=q_body.get("end_time", None),
266
+ status=q_body.get("status", None),
267
+ forked_from=q_body.get("forked_from", None),
268
+ name=q_body.get("workflow_name", None),
269
+ limit=q_body.get("limit", None),
270
+ offset=q_body.get("offset", None),
271
+ queue_name=q_body.get("queue_name", None),
272
+ sort_desc=q_body.get("sort_desc", False),
273
+ load_input=q_body.get("load_input", False),
276
274
  )
277
275
  except Exception as e:
278
276
  error_message = f"Exception encountered when listing queued workflows: {traceback.format_exc()}"
@@ -118,6 +118,7 @@ class ListWorkflowsBody(TypedDict, total=False):
118
118
  end_time: Optional[str]
119
119
  status: Optional[str]
120
120
  application_version: Optional[str]
121
+ forked_from: Optional[str]
121
122
  limit: Optional[int]
122
123
  offset: Optional[int]
123
124
  sort_desc: bool
@@ -143,6 +144,12 @@ class WorkflowsOutput:
143
144
  QueueName: Optional[str]
144
145
  ApplicationVersion: Optional[str]
145
146
  ExecutorID: Optional[str]
147
+ WorkflowTimeoutMS: Optional[str]
148
+ WorkflowDeadlineEpochMS: Optional[str]
149
+ DeduplicationID: Optional[str]
150
+ Priority: Optional[str]
151
+ QueuePartitionKey: Optional[str]
152
+ ForkedFrom: Optional[str]
146
153
 
147
154
  @classmethod
148
155
  def from_workflow_information(cls, info: WorkflowStatus) -> "WorkflowsOutput":
@@ -152,12 +159,22 @@ class WorkflowsOutput:
152
159
  inputs_str = str(info.input) if info.input is not None else None
153
160
  outputs_str = str(info.output) if info.output is not None else None
154
161
  error_str = str(info.error) if info.error is not None else None
155
- request_str = None
156
162
  roles_str = (
157
163
  str(info.authenticated_roles)
158
164
  if info.authenticated_roles is not None
159
165
  else None
160
166
  )
167
+ workflow_timeout_ms_str = (
168
+ str(info.workflow_timeout_ms)
169
+ if info.workflow_timeout_ms is not None
170
+ else None
171
+ )
172
+ workflow_deadline_epoch_ms_str = (
173
+ str(info.workflow_deadline_epoch_ms)
174
+ if info.workflow_deadline_epoch_ms is not None
175
+ else None
176
+ )
177
+ priority_str = str(info.priority) if info.priority is not None else None
161
178
 
162
179
  return cls(
163
180
  WorkflowUUID=info.workflow_id,
@@ -176,6 +193,12 @@ class WorkflowsOutput:
176
193
  QueueName=info.queue_name,
177
194
  ApplicationVersion=info.app_version,
178
195
  ExecutorID=info.executor_id,
196
+ WorkflowTimeoutMS=workflow_timeout_ms_str,
197
+ WorkflowDeadlineEpochMS=workflow_deadline_epoch_ms_str,
198
+ DeduplicationID=info.deduplication_id,
199
+ Priority=priority_str,
200
+ QueuePartitionKey=info.queue_partition_key,
201
+ ForkedFrom=info.forked_from,
179
202
  )
180
203
 
181
204
 
@@ -186,14 +209,28 @@ class WorkflowSteps:
186
209
  output: Optional[str]
187
210
  error: Optional[str]
188
211
  child_workflow_id: Optional[str]
212
+ started_at_epoch_ms: Optional[str]
213
+ completed_at_epoch_ms: Optional[str]
189
214
 
190
215
  @classmethod
191
216
  def from_step_info(cls, info: StepInfo) -> "WorkflowSteps":
192
217
  output_str = str(info["output"]) if info["output"] is not None else None
193
218
  error_str = str(info["error"]) if info["error"] is not None else None
219
+ started_at_str = (
220
+ str(info["started_at_epoch_ms"])
221
+ if info["started_at_epoch_ms"] is not None
222
+ else None
223
+ )
224
+ completed_at_str = (
225
+ str(info["completed_at_epoch_ms"])
226
+ if info["completed_at_epoch_ms"] is not None
227
+ else None
228
+ )
194
229
  return cls(
195
230
  function_id=info["function_id"],
196
231
  function_name=info["function_name"],
232
+ started_at_epoch_ms=started_at_str,
233
+ completed_at_epoch_ms=completed_at_str,
197
234
  output=output_str,
198
235
  error=error_str,
199
236
  child_workflow_id=info["child_workflow_id"],
@@ -216,6 +253,7 @@ class ListQueuedWorkflowsBody(TypedDict, total=False):
216
253
  start_time: Optional[str]
217
254
  end_time: Optional[str]
218
255
  status: Optional[str]
256
+ forked_from: Optional[str]
219
257
  queue_name: Optional[str]
220
258
  limit: Optional[int]
221
259
  offset: Optional[int]
dbos/_context.py CHANGED
@@ -2,7 +2,6 @@ from __future__ import annotations
2
2
 
3
3
  import json
4
4
  import os
5
- import uuid
6
5
  from contextlib import AbstractContextManager
7
6
  from contextvars import ContextVar
8
7
  from dataclasses import dataclass
@@ -15,7 +14,7 @@ if TYPE_CHECKING:
15
14
 
16
15
  from sqlalchemy.orm import Session
17
16
 
18
- from dbos._utils import GlobalParams
17
+ from dbos._utils import GlobalParams, generate_uuid
19
18
 
20
19
  from ._logger import dbos_logger
21
20
  from ._tracer import dbos_tracer
@@ -151,7 +150,7 @@ class DBOSContext:
151
150
  self.logger.warning(
152
151
  f"Multiple workflows started in the same SetWorkflowID block. Only the first workflow is assigned the specified workflow ID; subsequent workflows will use a generated workflow ID."
153
152
  )
154
- wfid = str(uuid.uuid4())
153
+ wfid = generate_uuid()
155
154
  return wfid
156
155
 
157
156
  def start_workflow(
dbos/_core.py CHANGED
@@ -300,6 +300,7 @@ def _init_workflow(
300
300
  if enqueue_options is not None
301
301
  else None
302
302
  ),
303
+ "forked_from": None,
303
304
  }
304
305
 
305
306
  # Synchronously record the status and inputs for workflows
@@ -316,6 +317,7 @@ def _init_workflow(
316
317
  "function_name": wf_name,
317
318
  "output": None,
318
319
  "error": dbos._serializer.serialize(e),
320
+ "started_at_epoch_ms": int(time.time() * 1000),
319
321
  }
320
322
  dbos._sys_db.record_operation_result(result)
321
323
  raise
@@ -1118,6 +1120,7 @@ def decorate_step(
1118
1120
  "function_name": step_name,
1119
1121
  "output": None,
1120
1122
  "error": None,
1123
+ "started_at_epoch_ms": int(time.time() * 1000),
1121
1124
  }
1122
1125
 
1123
1126
  try:
dbos/_dbos.py CHANGED
@@ -7,7 +7,6 @@ import os
7
7
  import sys
8
8
  import threading
9
9
  import time
10
- import uuid
11
10
  from concurrent.futures import ThreadPoolExecutor
12
11
  from logging import Logger
13
12
  from typing import (
@@ -33,7 +32,7 @@ from dbos._conductor.conductor import ConductorWebsocket
33
32
  from dbos._debouncer import debouncer_workflow
34
33
  from dbos._serialization import DefaultSerializer, Serializer
35
34
  from dbos._sys_db import SystemDatabase, WorkflowStatus
36
- from dbos._utils import INTERNAL_QUEUE_NAME, GlobalParams
35
+ from dbos._utils import INTERNAL_QUEUE_NAME, GlobalParams, generate_uuid
37
36
  from dbos._workflow_commands import fork_workflow, list_queued_workflows, list_workflows
38
37
 
39
38
  from ._classproperty import classproperty
@@ -444,7 +443,7 @@ class DBOS:
444
443
  if GlobalParams.app_version == "":
445
444
  GlobalParams.app_version = self._registry.compute_app_version()
446
445
  if self.conductor_key is not None:
447
- GlobalParams.executor_id = str(uuid.uuid4())
446
+ GlobalParams.executor_id = generate_uuid()
448
447
  dbos_logger.info(f"Executor ID: {GlobalParams.executor_id}")
449
448
  dbos_logger.info(f"Application version: {GlobalParams.app_version}")
450
449
  self._executor_field = ThreadPoolExecutor(max_workers=sys.maxsize)
@@ -460,6 +459,7 @@ class DBOS:
460
459
  debug_mode=debug_mode,
461
460
  schema=schema,
462
461
  serializer=self._serializer,
462
+ executor_id=GlobalParams.executor_id,
463
463
  )
464
464
  assert self._config["database"]["db_engine_kwargs"] is not None
465
465
  if self._config["database_url"]:
@@ -495,20 +495,21 @@ class DBOS:
495
495
  except Exception as e:
496
496
  dbos_logger.warning(f"Failed to start admin server: {e}")
497
497
 
498
- dbos_logger.debug("Retrieving local pending workflows for recovery")
499
- workflow_ids = self._sys_db.get_pending_workflows(
500
- GlobalParams.executor_id, GlobalParams.app_version
501
- )
502
- if (len(workflow_ids)) > 0:
503
- self.logger.info(
504
- f"Recovering {len(workflow_ids)} workflows from application version {GlobalParams.app_version}"
505
- )
506
- else:
507
- self.logger.info(
508
- f"No workflows to recover from application version {GlobalParams.app_version}"
498
+ # Recover local workflows if not using a recovery service
499
+ if not self.conductor_key and not GlobalParams.dbos_cloud:
500
+ dbos_logger.debug("Retrieving local pending workflows for recovery")
501
+ workflow_ids = self._sys_db.get_pending_workflows(
502
+ GlobalParams.executor_id, GlobalParams.app_version
509
503
  )
510
-
511
- self._executor.submit(startup_recovery_thread, self, workflow_ids)
504
+ if (len(workflow_ids)) > 0:
505
+ self.logger.info(
506
+ f"Recovering {len(workflow_ids)} workflows from application version {GlobalParams.app_version}"
507
+ )
508
+ else:
509
+ self.logger.info(
510
+ f"No workflows to recover from application version {GlobalParams.app_version}"
511
+ )
512
+ self._executor.submit(startup_recovery_thread, self, workflow_ids)
512
513
 
513
514
  # Listen to notifications
514
515
  dbos_logger.debug("Starting notifications listener thread")
@@ -1127,7 +1128,9 @@ class DBOS:
1127
1128
  end_time: Optional[str] = None,
1128
1129
  name: Optional[str] = None,
1129
1130
  app_version: Optional[str] = None,
1131
+ forked_from: Optional[str] = None,
1130
1132
  user: Optional[str] = None,
1133
+ queue_name: Optional[str] = None,
1131
1134
  limit: Optional[int] = None,
1132
1135
  offset: Optional[int] = None,
1133
1136
  sort_desc: bool = False,
@@ -1144,6 +1147,7 @@ class DBOS:
1144
1147
  end_time=end_time,
1145
1148
  name=name,
1146
1149
  app_version=app_version,
1150
+ forked_from=forked_from,
1147
1151
  user=user,
1148
1152
  limit=limit,
1149
1153
  offset=offset,
@@ -1151,6 +1155,7 @@ class DBOS:
1151
1155
  workflow_id_prefix=workflow_id_prefix,
1152
1156
  load_input=load_input,
1153
1157
  load_output=load_output,
1158
+ queue_name=queue_name,
1154
1159
  )
1155
1160
 
1156
1161
  return _get_dbos_instance()._sys_db.call_function_as_step(
@@ -1167,6 +1172,7 @@ class DBOS:
1167
1172
  end_time: Optional[str] = None,
1168
1173
  name: Optional[str] = None,
1169
1174
  app_version: Optional[str] = None,
1175
+ forked_from: Optional[str] = None,
1170
1176
  user: Optional[str] = None,
1171
1177
  limit: Optional[int] = None,
1172
1178
  offset: Optional[int] = None,
@@ -1184,6 +1190,7 @@ class DBOS:
1184
1190
  end_time=end_time,
1185
1191
  name=name,
1186
1192
  app_version=app_version,
1193
+ forked_from=forked_from,
1187
1194
  user=user,
1188
1195
  limit=limit,
1189
1196
  offset=offset,
@@ -1199,6 +1206,7 @@ class DBOS:
1199
1206
  *,
1200
1207
  queue_name: Optional[str] = None,
1201
1208
  status: Optional[Union[str, List[str]]] = None,
1209
+ forked_from: Optional[str] = None,
1202
1210
  start_time: Optional[str] = None,
1203
1211
  end_time: Optional[str] = None,
1204
1212
  name: Optional[str] = None,
@@ -1212,6 +1220,7 @@ class DBOS:
1212
1220
  _get_dbos_instance()._sys_db,
1213
1221
  queue_name=queue_name,
1214
1222
  status=status,
1223
+ forked_from=forked_from,
1215
1224
  start_time=start_time,
1216
1225
  end_time=end_time,
1217
1226
  name=name,
@@ -1231,6 +1240,7 @@ class DBOS:
1231
1240
  *,
1232
1241
  queue_name: Optional[str] = None,
1233
1242
  status: Optional[Union[str, List[str]]] = None,
1243
+ forked_from: Optional[str] = None,
1234
1244
  start_time: Optional[str] = None,
1235
1245
  end_time: Optional[str] = None,
1236
1246
  name: Optional[str] = None,
@@ -1244,6 +1254,7 @@ class DBOS:
1244
1254
  cls.list_queued_workflows,
1245
1255
  queue_name=queue_name,
1246
1256
  status=status,
1257
+ forked_from=forked_from,
1247
1258
  start_time=start_time,
1248
1259
  end_time=end_time,
1249
1260
  name=name,
dbos/_dbos_config.py CHANGED
@@ -264,8 +264,7 @@ def load_config(
264
264
  data["telemetry"]["OTLPExporter"]["tracesEndpoint"]
265
265
  ]
266
266
 
267
- data = cast(ConfigFile, data)
268
- return data # type: ignore
267
+ return cast(ConfigFile, data)
269
268
 
270
269
 
271
270
  def process_config(
dbos/_debouncer.py CHANGED
@@ -2,7 +2,6 @@ import asyncio
2
2
  import math
3
3
  import time
4
4
  import types
5
- import uuid
6
5
  from typing import (
7
6
  TYPE_CHECKING,
8
7
  Any,
@@ -39,7 +38,7 @@ from dbos._error import DBOSQueueDeduplicatedError
39
38
  from dbos._queue import Queue
40
39
  from dbos._registrations import get_dbos_func_name
41
40
  from dbos._serialization import WorkflowInputs
42
- from dbos._utils import INTERNAL_QUEUE_NAME
41
+ from dbos._utils import INTERNAL_QUEUE_NAME, generate_uuid
43
42
 
44
43
  if TYPE_CHECKING:
45
44
  from dbos._dbos import WorkflowHandle, WorkflowHandleAsync
@@ -209,7 +208,7 @@ class Debouncer(Generic[P, R]):
209
208
 
210
209
  # Deterministically generate the user workflow ID and message ID
211
210
  def assign_debounce_ids() -> tuple[str, str]:
212
- return str(uuid.uuid4()), ctx.assign_workflow_id()
211
+ return generate_uuid(), ctx.assign_workflow_id()
213
212
 
214
213
  message_id, user_workflow_id = dbos._sys_db.call_function_as_step(
215
214
  assign_debounce_ids, "DBOS.assign_debounce_ids"
@@ -320,14 +319,14 @@ class DebouncerClient:
320
319
  "workflow_id": (
321
320
  self.workflow_options["workflow_id"]
322
321
  if self.workflow_options.get("workflow_id")
323
- else str(uuid.uuid4())
322
+ else generate_uuid()
324
323
  ),
325
324
  "app_version": self.workflow_options.get("app_version"),
326
325
  "deduplication_id": self.workflow_options.get("deduplication_id"),
327
326
  "priority": self.workflow_options.get("priority"),
328
327
  "workflow_timeout_sec": self.workflow_options.get("workflow_timeout"),
329
328
  }
330
- message_id = str(uuid.uuid4())
329
+ message_id = generate_uuid()
331
330
  while True:
332
331
  try:
333
332
  # Attempt to enqueue a debouncer for this workflow.
dbos/_fastapi.py CHANGED
@@ -1,4 +1,3 @@
1
- import uuid
2
1
  from typing import Any, Callable, MutableMapping, cast
3
2
 
4
3
  from fastapi import FastAPI
@@ -9,7 +8,7 @@ from starlette.types import ASGIApp, Receive, Scope, Send
9
8
  from . import DBOS
10
9
  from ._context import EnterDBOSHandler, OperationType, SetWorkflowID, TracedAttributes
11
10
  from ._error import DBOSException
12
- from ._utils import request_id_header
11
+ from ._utils import generate_uuid, request_id_header
13
12
 
14
13
 
15
14
  def _get_or_generate_request_id(request: FastAPIRequest) -> str:
@@ -17,7 +16,7 @@ def _get_or_generate_request_id(request: FastAPIRequest) -> str:
17
16
  if request_id is not None:
18
17
  return request_id
19
18
  else:
20
- return str(uuid.uuid4())
19
+ return generate_uuid()
21
20
 
22
21
 
23
22
  async def _dbos_error_handler(request: FastAPIRequest, gexc: Exception) -> JSONResponse:
dbos/_flask.py CHANGED
@@ -1,4 +1,3 @@
1
- import uuid
2
1
  from typing import Any
3
2
  from urllib.parse import urlparse
4
3
 
@@ -6,7 +5,7 @@ from flask import Flask
6
5
  from werkzeug.wrappers import Request as WRequest
7
6
 
8
7
  from ._context import EnterDBOSHandler, OperationType, SetWorkflowID, TracedAttributes
9
- from ._utils import request_id_header
8
+ from ._utils import generate_uuid, request_id_header
10
9
 
11
10
 
12
11
  class FlaskMiddleware:
@@ -41,7 +40,7 @@ def _get_or_generate_request_id(request: WRequest) -> str:
41
40
  if request_id is not None:
42
41
  return request_id
43
42
  else:
44
- return str(uuid.uuid4())
43
+ return generate_uuid()
45
44
 
46
45
 
47
46
  def setup_flask_middleware(app: Flask) -> None:
dbos/_logger.py CHANGED
@@ -68,35 +68,43 @@ def config_logger(config: "ConfigFile") -> None:
68
68
  )
69
69
  disable_otlp = config.get("telemetry", {}).get("disable_otlp", False) # type: ignore
70
70
 
71
- if not disable_otlp and otlp_logs_endpoints:
71
+ if not disable_otlp:
72
72
 
73
- from opentelemetry._logs import set_logger_provider
73
+ from opentelemetry._logs import get_logger_provider, set_logger_provider
74
74
  from opentelemetry.exporter.otlp.proto.http._log_exporter import OTLPLogExporter
75
75
  from opentelemetry.sdk._logs import LoggerProvider, LoggingHandler
76
76
  from opentelemetry.sdk._logs.export import BatchLogRecordProcessor
77
77
  from opentelemetry.sdk.resources import Resource
78
78
  from opentelemetry.semconv.attributes.service_attributes import SERVICE_NAME
79
79
 
80
- log_provider = LoggerProvider(
81
- Resource.create(
82
- attributes={
83
- SERVICE_NAME: config["name"],
84
- }
85
- )
86
- )
87
- set_logger_provider(log_provider)
88
- for e in otlp_logs_endpoints:
89
- log_provider.add_log_record_processor(
90
- BatchLogRecordProcessor(
91
- OTLPLogExporter(endpoint=e),
92
- export_timeout_millis=5000,
80
+ # Only set up OTLP provider and exporter if endpoints are provided
81
+ log_provider = get_logger_provider()
82
+ if otlp_logs_endpoints is not None and len(otlp_logs_endpoints) > 0:
83
+ if not isinstance(log_provider, LoggerProvider):
84
+ log_provider = LoggerProvider(
85
+ Resource.create(
86
+ attributes={
87
+ SERVICE_NAME: config["name"],
88
+ }
89
+ )
90
+ )
91
+ set_logger_provider(log_provider)
92
+
93
+ for e in otlp_logs_endpoints:
94
+ log_provider.add_log_record_processor(
95
+ BatchLogRecordProcessor(
96
+ OTLPLogExporter(endpoint=e),
97
+ export_timeout_millis=5000,
98
+ )
93
99
  )
94
- )
100
+
101
+ # Even if no endpoints are provided, we still need a LoggerProvider to create the LoggingHandler
95
102
  global _otlp_handler
96
- _otlp_handler = LoggingHandler(logger_provider=log_provider)
103
+ if _otlp_handler is None:
104
+ _otlp_handler = LoggingHandler(logger_provider=log_provider)
97
105
 
98
- # Direct DBOS logs to OTLP
99
- dbos_logger.addHandler(_otlp_handler)
106
+ # Direct DBOS logs to OTLP
107
+ dbos_logger.addHandler(_otlp_handler)
100
108
 
101
109
  # Attach DBOS-specific attributes to all log entries.
102
110
  global _dbos_log_transformer