dbos 1.14.0a8__py3-none-any.whl → 1.15.0a1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of dbos might be problematic. Click here for more details.

Files changed (45) hide show
  1. dbos/_client.py +13 -14
  2. dbos/_context.py +12 -6
  3. dbos/_core.py +2 -7
  4. dbos/_dbos.py +5 -13
  5. dbos/_dbos_config.py +17 -29
  6. dbos/_debouncer.py +13 -24
  7. dbos/_debug.py +0 -8
  8. dbos/_docker_pg_helper.py +93 -51
  9. dbos/_fastapi.py +5 -1
  10. dbos/_logger.py +18 -21
  11. dbos/_migration.py +4 -41
  12. dbos/_serialization.py +19 -30
  13. dbos/_sys_db_postgres.py +2 -9
  14. dbos/_templates/dbos-db-starter/migrations/create_table.py.dbos +34 -0
  15. dbos/_tracer.py +42 -31
  16. dbos/cli/_github_init.py +22 -16
  17. dbos/cli/_template_init.py +5 -16
  18. dbos/cli/cli.py +20 -28
  19. {dbos-1.14.0a8.dist-info → dbos-1.15.0a1.dist-info}/METADATA +8 -16
  20. dbos-1.15.0a1.dist-info/RECORD +59 -0
  21. dbos/_alembic_migrations/env.py +0 -62
  22. dbos/_alembic_migrations/script.py.mako +0 -26
  23. dbos/_alembic_migrations/versions/01ce9f07bd10_streaming.py +0 -42
  24. dbos/_alembic_migrations/versions/04ca4f231047_workflow_queues_executor_id.py +0 -34
  25. dbos/_alembic_migrations/versions/27ac6900c6ad_add_queue_dedup.py +0 -45
  26. dbos/_alembic_migrations/versions/471b60d64126_dbos_migrations.py +0 -35
  27. dbos/_alembic_migrations/versions/50f3227f0b4b_fix_job_queue.py +0 -35
  28. dbos/_alembic_migrations/versions/5c361fc04708_added_system_tables.py +0 -193
  29. dbos/_alembic_migrations/versions/66478e1b95e5_consolidate_queues.py +0 -71
  30. dbos/_alembic_migrations/versions/83f3732ae8e7_workflow_timeout.py +0 -44
  31. dbos/_alembic_migrations/versions/933e86bdac6a_add_queue_priority.py +0 -35
  32. dbos/_alembic_migrations/versions/a3b18ad34abe_added_triggers.py +0 -72
  33. dbos/_alembic_migrations/versions/d76646551a6b_job_queue_limiter.py +0 -43
  34. dbos/_alembic_migrations/versions/d76646551a6c_workflow_queue.py +0 -28
  35. dbos/_alembic_migrations/versions/d994145b47b6_consolidate_inputs.py +0 -30
  36. dbos/_alembic_migrations/versions/eab0cc1d9a14_job_queue.py +0 -56
  37. dbos/_alembic_migrations/versions/f4b9b32ba814_functionname_childid_op_outputs.py +0 -46
  38. dbos/_templates/dbos-db-starter/alembic.ini +0 -116
  39. dbos/_templates/dbos-db-starter/migrations/env.py.dbos +0 -85
  40. dbos/_templates/dbos-db-starter/migrations/script.py.mako +0 -26
  41. dbos/_templates/dbos-db-starter/migrations/versions/2024_07_31_180642_init.py +0 -35
  42. dbos-1.14.0a8.dist-info/RECORD +0 -79
  43. {dbos-1.14.0a8.dist-info → dbos-1.15.0a1.dist-info}/WHEEL +0 -0
  44. {dbos-1.14.0a8.dist-info → dbos-1.15.0a1.dist-info}/entry_points.txt +0 -0
  45. {dbos-1.14.0a8.dist-info → dbos-1.15.0a1.dist-info}/licenses/LICENSE +0 -0
dbos/_client.py CHANGED
@@ -1,5 +1,4 @@
1
1
  import asyncio
2
- import sys
3
2
  import time
4
3
  import uuid
5
4
  from typing import (
@@ -15,17 +14,11 @@ from typing import (
15
14
  Union,
16
15
  )
17
16
 
17
+ from dbos import _serialization
18
18
  from dbos._app_db import ApplicationDatabase
19
19
  from dbos._context import MaxPriority, MinPriority
20
20
  from dbos._sys_db import SystemDatabase
21
21
 
22
- if sys.version_info < (3, 11):
23
- from typing_extensions import NotRequired
24
- else:
25
- from typing import NotRequired
26
-
27
- from dbos import _serialization
28
-
29
22
  if TYPE_CHECKING:
30
23
  from dbos._dbos import WorkflowHandle, WorkflowHandleAsync
31
24
 
@@ -58,14 +51,20 @@ from dbos._workflow_commands import (
58
51
  R = TypeVar("R", covariant=True) # A generic type for workflow return values
59
52
 
60
53
 
61
- class EnqueueOptions(TypedDict):
54
+ # Required EnqueueOptions fields
55
+ class _EnqueueOptionsRequired(TypedDict):
62
56
  workflow_name: str
63
57
  queue_name: str
64
- workflow_id: NotRequired[str]
65
- app_version: NotRequired[str]
66
- workflow_timeout: NotRequired[float]
67
- deduplication_id: NotRequired[str]
68
- priority: NotRequired[int]
58
+
59
+
60
+ # Optional EnqueueOptions fields
61
+ class EnqueueOptions(_EnqueueOptionsRequired, total=False):
62
+ workflow_id: str
63
+ app_version: str
64
+ workflow_timeout: float
65
+ deduplication_id: str
66
+ priority: int
67
+ max_recovery_attempts: int
69
68
 
70
69
 
71
70
  def validate_enqueue_options(options: EnqueueOptions) -> None:
dbos/_context.py CHANGED
@@ -8,9 +8,11 @@ from contextvars import ContextVar
8
8
  from dataclasses import dataclass
9
9
  from enum import Enum
10
10
  from types import TracebackType
11
- from typing import List, Literal, Optional, Type, TypedDict
11
+ from typing import TYPE_CHECKING, List, Literal, Optional, Type, TypedDict
12
+
13
+ if TYPE_CHECKING:
14
+ from opentelemetry.trace import Span
12
15
 
13
- from opentelemetry.trace import Span, Status, StatusCode, use_span
14
16
  from sqlalchemy.orm import Session
15
17
 
16
18
  from dbos._utils import GlobalParams
@@ -78,8 +80,8 @@ class ContextSpan:
78
80
  context_manager: The context manager that is used to manage the span's lifecycle.
79
81
  """
80
82
 
81
- span: Span
82
- context_manager: AbstractContextManager[Span]
83
+ span: "Span"
84
+ context_manager: "AbstractContextManager[Span]"
83
85
 
84
86
 
85
87
  class DBOSContext:
@@ -217,19 +219,21 @@ class DBOSContext:
217
219
 
218
220
  """ Return the current DBOS span if any. It must be a span created by DBOS."""
219
221
 
220
- def get_current_dbos_span(self) -> Optional[Span]:
222
+ def get_current_dbos_span(self) -> "Optional[Span]":
221
223
  if len(self.context_spans) > 0:
222
224
  return self.context_spans[-1].span
223
225
  return None
224
226
 
225
227
  """ Return the current active span if any. It might not be a DBOS span."""
226
228
 
227
- def get_current_active_span(self) -> Optional[Span]:
229
+ def get_current_active_span(self) -> "Optional[Span]":
228
230
  return dbos_tracer.get_current_span()
229
231
 
230
232
  def _start_span(self, attributes: TracedAttributes) -> None:
231
233
  if dbos_tracer.disable_otlp:
232
234
  return
235
+ from opentelemetry.trace import use_span
236
+
233
237
  attributes["operationUUID"] = (
234
238
  self.workflow_id if len(self.workflow_id) > 0 else None
235
239
  )
@@ -257,6 +261,8 @@ class DBOSContext:
257
261
  def _end_span(self, exc_value: Optional[BaseException]) -> None:
258
262
  if dbos_tracer.disable_otlp:
259
263
  return
264
+ from opentelemetry.trace import Status, StatusCode
265
+
260
266
  context_span = self.context_spans.pop()
261
267
  if exc_value is None:
262
268
  context_span.span.set_status(Status(StatusCode.OK))
dbos/_core.py CHANGED
@@ -14,6 +14,7 @@ from typing import (
14
14
  Coroutine,
15
15
  Generic,
16
16
  Optional,
17
+ ParamSpec,
17
18
  TypeVar,
18
19
  Union,
19
20
  cast,
@@ -22,14 +23,8 @@ from typing import (
22
23
  from dbos._outcome import Immediate, NoResult, Outcome, Pending
23
24
  from dbos._utils import GlobalParams, retriable_postgres_exception
24
25
 
25
- from ._app_db import ApplicationDatabase, TransactionResultInternal
26
-
27
- if sys.version_info < (3, 10):
28
- from typing_extensions import ParamSpec
29
- else:
30
- from typing import ParamSpec
31
-
32
26
  from . import _serialization
27
+ from ._app_db import ApplicationDatabase, TransactionResultInternal
33
28
  from ._context import (
34
29
  DBOSAssumeRole,
35
30
  DBOSContext,
dbos/_dbos.py CHANGED
@@ -28,9 +28,6 @@ from typing import (
28
28
  Union,
29
29
  )
30
30
 
31
- from opentelemetry.trace import Span
32
- from rich import print
33
-
34
31
  from dbos._conductor.conductor import ConductorWebsocket
35
32
  from dbos._debouncer import debouncer_workflow
36
33
  from dbos._sys_db import SystemDatabase, WorkflowStatus
@@ -53,7 +50,6 @@ from ._core import (
53
50
  set_event,
54
51
  start_workflow,
55
52
  start_workflow_async,
56
- workflow_wrapper,
57
53
  )
58
54
  from ._queue import Queue, queue_thread
59
55
  from ._recovery import recover_pending_workflows, startup_recovery_thread
@@ -62,8 +58,6 @@ from ._registrations import (
62
58
  DBOSClassInfo,
63
59
  _class_fqn,
64
60
  get_or_create_class_info,
65
- set_dbos_func_name,
66
- set_temp_workflow_type,
67
61
  )
68
62
  from ._roles import default_required_roles, required_roles
69
63
  from ._scheduler import ScheduledWorkflow, scheduled
@@ -80,13 +74,11 @@ if TYPE_CHECKING:
80
74
  from fastapi import FastAPI
81
75
  from ._kafka import _KafkaConsumerWorkflow
82
76
  from flask import Flask
77
+ from opentelemetry.trace import Span
83
78
 
84
- from sqlalchemy.orm import Session
79
+ from typing import ParamSpec
85
80
 
86
- if sys.version_info < (3, 10):
87
- from typing_extensions import ParamSpec
88
- else:
89
- from typing import ParamSpec
81
+ from sqlalchemy.orm import Session
90
82
 
91
83
  from ._admin_server import AdminServer
92
84
  from ._app_db import ApplicationDatabase
@@ -558,7 +550,7 @@ class DBOS:
558
550
  f"https://console.dbos.dev/self-host?appname={app_name}"
559
551
  )
560
552
  print(
561
- f"[bold]To view and manage workflows, connect to DBOS Conductor at:[/bold] [bold blue]{conductor_registration_url}[/bold blue]"
553
+ f"To view and manage workflows, connect to DBOS Conductor at:{conductor_registration_url}"
562
554
  )
563
555
 
564
556
  # Flush handlers and add OTLP to all loggers if enabled
@@ -1297,7 +1289,7 @@ class DBOS:
1297
1289
  return ctx.parent_workflow_id
1298
1290
 
1299
1291
  @classproperty
1300
- def span(cls) -> Span:
1292
+ def span(cls) -> "Span":
1301
1293
  """Return the tracing `Span` associated with the current context."""
1302
1294
  ctx = assert_current_dbos_context()
1303
1295
  span = ctx.get_current_active_span()
dbos/_dbos_config.py CHANGED
@@ -5,8 +5,6 @@ from importlib import resources
5
5
  from typing import Any, Dict, List, Optional, TypedDict, cast
6
6
 
7
7
  import yaml
8
- from jsonschema import ValidationError, validate
9
- from rich import print
10
8
  from sqlalchemy import make_url
11
9
 
12
10
  from ._error import DBOSInitializationError
@@ -36,7 +34,7 @@ class DBOSConfig(TypedDict, total=False):
36
34
  otlp_attributes (dict[str, str]): A set of custom attributes to apply OTLP-exported logs and traces
37
35
  application_version (str): Application version
38
36
  executor_id (str): Executor ID, used to identify the application instance in distributed environments
39
- disable_otlp (bool): If True, disables OTLP tracing and logging. Defaults to False.
37
+ enable_otlp (bool): If True, enable built-in DBOS OTLP tracing and logging.
40
38
  """
41
39
 
42
40
  name: str
@@ -54,7 +52,7 @@ class DBOSConfig(TypedDict, total=False):
54
52
  otlp_attributes: Optional[dict[str, str]]
55
53
  application_version: Optional[str]
56
54
  executor_id: Optional[str]
57
- disable_otlp: Optional[bool]
55
+ enable_otlp: Optional[bool]
58
56
 
59
57
 
60
58
  class RuntimeConfig(TypedDict, total=False):
@@ -97,7 +95,7 @@ class TelemetryConfig(TypedDict, total=False):
97
95
  logs: Optional[LoggerConfig]
98
96
  OTLPExporter: Optional[OTLPExporterConfig]
99
97
  otlp_attributes: Optional[dict[str, str]]
100
- disable_otlp: Optional[bool]
98
+ disable_otlp: bool
101
99
 
102
100
 
103
101
  class ConfigFile(TypedDict, total=False):
@@ -165,10 +163,12 @@ def translate_dbos_config_to_config_file(config: DBOSConfig) -> ConfigFile:
165
163
  ]
166
164
 
167
165
  # Telemetry config
166
+ enable_otlp = config.get("enable_otlp", None)
167
+ disable_otlp = True if enable_otlp is None else not enable_otlp
168
168
  telemetry: TelemetryConfig = {
169
169
  "OTLPExporter": {"tracesEndpoint": [], "logsEndpoint": []},
170
170
  "otlp_attributes": config.get("otlp_attributes", {}),
171
- "disable_otlp": config.get("disable_otlp", False),
171
+ "disable_otlp": disable_otlp,
172
172
  }
173
173
  # For mypy
174
174
  assert telemetry["OTLPExporter"] is not None
@@ -265,17 +265,6 @@ def load_config(
265
265
  )
266
266
  data = cast(Dict[str, Any], data)
267
267
 
268
- # Load the JSON schema relative to the package root
269
- schema_file = resources.files("dbos").joinpath("dbos-config.schema.json")
270
- with schema_file.open("r") as f:
271
- schema = json.load(f)
272
-
273
- # Validate the data against the schema
274
- try:
275
- validate(instance=data, schema=schema)
276
- except ValidationError as e:
277
- raise DBOSInitializationError(f"Validation error: {e}")
278
-
279
268
  # Special case: convert logsEndpoint and tracesEndpoint from strings to lists of strings, if present
280
269
  if "telemetry" in data and "OTLPExporter" in data["telemetry"]:
281
270
  if "logsEndpoint" in data["telemetry"]["OTLPExporter"]:
@@ -441,17 +430,13 @@ def process_config(
441
430
  printable_sys_db_url = make_url(data["system_database_url"]).render_as_string(
442
431
  hide_password=True
443
432
  )
444
- print(
445
- f"[bold blue]DBOS system database URL: {printable_sys_db_url}[/bold blue]"
446
- )
433
+ print(f"DBOS system database URL: {printable_sys_db_url}")
447
434
  if data["database_url"].startswith("sqlite"):
448
435
  print(
449
- f"[bold blue]Using SQLite as a system database. The SQLite system database is for development and testing. PostgreSQL is recommended for production use.[/bold blue]"
436
+ f"Using SQLite as a system database. The SQLite system database is for development and testing. PostgreSQL is recommended for production use."
450
437
  )
451
438
  else:
452
- print(
453
- f"[bold blue]Database engine parameters: {data['database']['db_engine_kwargs']}[/bold blue]"
454
- )
439
+ print(f"Database engine parameters: {data['database']['db_engine_kwargs']}")
455
440
 
456
441
  # Return data as ConfigFile type
457
442
  return data
@@ -563,12 +548,15 @@ def overwrite_config(provided_config: ConfigFile) -> ConfigFile:
563
548
  if "telemetry" not in provided_config or provided_config["telemetry"] is None:
564
549
  provided_config["telemetry"] = {
565
550
  "OTLPExporter": {"tracesEndpoint": [], "logsEndpoint": []},
551
+ "disable_otlp": False,
566
552
  }
567
- elif "OTLPExporter" not in provided_config["telemetry"]:
568
- provided_config["telemetry"]["OTLPExporter"] = {
569
- "tracesEndpoint": [],
570
- "logsEndpoint": [],
571
- }
553
+ else:
554
+ provided_config["telemetry"]["disable_otlp"] = False
555
+ if "OTLPExporter" not in provided_config["telemetry"]:
556
+ provided_config["telemetry"]["OTLPExporter"] = {
557
+ "tracesEndpoint": [],
558
+ "logsEndpoint": [],
559
+ }
572
560
 
573
561
  # This is a super messy from a typing perspective.
574
562
  # Some of ConfigFile keys are optional -- but in practice they'll always be present in hosted environments
dbos/_debouncer.py CHANGED
@@ -1,6 +1,5 @@
1
1
  import asyncio
2
2
  import math
3
- import sys
4
3
  import time
5
4
  import types
6
5
  import uuid
@@ -12,17 +11,12 @@ from typing import (
12
11
  Dict,
13
12
  Generic,
14
13
  Optional,
14
+ ParamSpec,
15
15
  Tuple,
16
16
  TypedDict,
17
17
  TypeVar,
18
- Union,
19
18
  )
20
19
 
21
- if sys.version_info < (3, 10):
22
- from typing_extensions import ParamSpec
23
- else:
24
- from typing import ParamSpec
25
-
26
20
  from dbos._client import (
27
21
  DBOSClient,
28
22
  EnqueueOptions,
@@ -147,7 +141,6 @@ class Debouncer(Generic[P, R]):
147
141
  self,
148
142
  workflow_name: str,
149
143
  *,
150
- debounce_key: str,
151
144
  debounce_timeout_sec: Optional[float] = None,
152
145
  queue: Optional[Queue] = None,
153
146
  ):
@@ -157,13 +150,11 @@ class Debouncer(Generic[P, R]):
157
150
  "queue_name": queue.name if queue else None,
158
151
  "workflow_name": workflow_name,
159
152
  }
160
- self.debounce_key = debounce_key
161
153
 
162
154
  @staticmethod
163
155
  def create(
164
156
  workflow: Callable[P, R],
165
157
  *,
166
- debounce_key: str,
167
158
  debounce_timeout_sec: Optional[float] = None,
168
159
  queue: Optional[Queue] = None,
169
160
  ) -> "Debouncer[P, R]":
@@ -172,7 +163,6 @@ class Debouncer(Generic[P, R]):
172
163
  raise TypeError("Only workflow functions may be debounced, not methods")
173
164
  return Debouncer[P, R](
174
165
  get_dbos_func_name(workflow),
175
- debounce_key=debounce_key,
176
166
  debounce_timeout_sec=debounce_timeout_sec,
177
167
  queue=queue,
178
168
  )
@@ -181,7 +171,6 @@ class Debouncer(Generic[P, R]):
181
171
  def create_async(
182
172
  workflow: Callable[P, Coroutine[Any, Any, R]],
183
173
  *,
184
- debounce_key: str,
185
174
  debounce_timeout_sec: Optional[float] = None,
186
175
  queue: Optional[Queue] = None,
187
176
  ) -> "Debouncer[P, R]":
@@ -190,13 +179,16 @@ class Debouncer(Generic[P, R]):
190
179
  raise TypeError("Only workflow functions may be debounced, not methods")
191
180
  return Debouncer[P, R](
192
181
  get_dbos_func_name(workflow),
193
- debounce_key=debounce_key,
194
182
  debounce_timeout_sec=debounce_timeout_sec,
195
183
  queue=queue,
196
184
  )
197
185
 
198
186
  def debounce(
199
- self, debounce_period_sec: float, *args: P.args, **kwargs: P.kwargs
187
+ self,
188
+ debounce_key: str,
189
+ debounce_period_sec: float,
190
+ *args: P.args,
191
+ **kwargs: P.kwargs,
200
192
  ) -> "WorkflowHandle[R]":
201
193
  from dbos._dbos import DBOS, _get_dbos_instance
202
194
 
@@ -232,9 +224,7 @@ class Debouncer(Generic[P, R]):
232
224
  while True:
233
225
  try:
234
226
  # Attempt to enqueue a debouncer for this workflow.
235
- deduplication_id = (
236
- f"{self.options['workflow_name']}-{self.debounce_key}"
237
- )
227
+ deduplication_id = f"{self.options['workflow_name']}-{debounce_key}"
238
228
  with SetEnqueueOptions(deduplication_id=deduplication_id):
239
229
  with SetWorkflowTimeout(None):
240
230
  internal_queue.enqueue(
@@ -284,6 +274,7 @@ class Debouncer(Generic[P, R]):
284
274
 
285
275
  async def debounce_async(
286
276
  self,
277
+ debounce_key: str,
287
278
  debounce_period_sec: float,
288
279
  *args: P.args,
289
280
  **kwargs: P.kwargs,
@@ -292,7 +283,7 @@ class Debouncer(Generic[P, R]):
292
283
 
293
284
  dbos = _get_dbos_instance()
294
285
  handle = await asyncio.to_thread(
295
- self.debounce, debounce_period_sec, *args, **kwargs
286
+ self.debounce, debounce_key, debounce_period_sec, *args, **kwargs
296
287
  )
297
288
  return WorkflowHandleAsyncPolling(handle.workflow_id, dbos)
298
289
 
@@ -304,7 +295,6 @@ class DebouncerClient:
304
295
  client: DBOSClient,
305
296
  workflow_options: EnqueueOptions,
306
297
  *,
307
- debounce_key: str,
308
298
  debounce_timeout_sec: Optional[float] = None,
309
299
  queue: Optional[Queue] = None,
310
300
  ):
@@ -314,11 +304,10 @@ class DebouncerClient:
314
304
  "queue_name": queue.name if queue else None,
315
305
  "workflow_name": workflow_options["workflow_name"],
316
306
  }
317
- self.debounce_key = debounce_key
318
307
  self.client = client
319
308
 
320
309
  def debounce(
321
- self, debounce_period_sec: float, *args: Any, **kwargs: Any
310
+ self, debounce_key: str, debounce_period_sec: float, *args: Any, **kwargs: Any
322
311
  ) -> "WorkflowHandle[R]":
323
312
 
324
313
  ctxOptions: ContextOptions = {
@@ -337,7 +326,7 @@ class DebouncerClient:
337
326
  try:
338
327
  # Attempt to enqueue a debouncer for this workflow.
339
328
  deduplication_id = (
340
- f"{self.debouncer_options['workflow_name']}-{self.debounce_key}"
329
+ f"{self.debouncer_options['workflow_name']}-{debounce_key}"
341
330
  )
342
331
  debouncer_options: EnqueueOptions = {
343
332
  "workflow_name": DEBOUNCER_WORKFLOW_NAME,
@@ -390,10 +379,10 @@ class DebouncerClient:
390
379
  )
391
380
 
392
381
  async def debounce_async(
393
- self, debounce_period_sec: float, *args: Any, **kwargs: Any
382
+ self, deboucne_key: str, debounce_period_sec: float, *args: Any, **kwargs: Any
394
383
  ) -> "WorkflowHandleAsync[R]":
395
384
  handle: "WorkflowHandle[R]" = await asyncio.to_thread(
396
- self.debounce, debounce_period_sec, *args, **kwargs
385
+ self.debounce, deboucne_key, debounce_period_sec, *args, **kwargs
397
386
  )
398
387
  return WorkflowHandleClientAsyncPolling[R](
399
388
  handle.workflow_id, self.client._sys_db
dbos/_debug.py CHANGED
@@ -4,8 +4,6 @@ import sys
4
4
  from pathlib import Path
5
5
  from typing import Union
6
6
 
7
- from fastapi_cli.discover import get_module_data_from_path
8
-
9
7
  from dbos import DBOS
10
8
 
11
9
 
@@ -34,12 +32,6 @@ def debug_workflow(workflow_id: str, entrypoint: Union[str, PythonModule]) -> No
34
32
 
35
33
 
36
34
  def parse_start_command(command: str) -> Union[str, PythonModule]:
37
- match = re.match(r"fastapi\s+run\s+(\.?[\w/]+\.py)", command)
38
- if match:
39
- # Mirror the logic in fastapi's run command by converting the path argument to a module
40
- mod_data = get_module_data_from_path(Path(match.group(1)))
41
- sys.path.insert(0, str(mod_data.extra_sys_path))
42
- return PythonModule(mod_data.module_import_str)
43
35
  match = re.match(r"python3?\s+(\.?[\w/]+\.py)", command)
44
36
  if match:
45
37
  return match.group(1)
dbos/_docker_pg_helper.py CHANGED
@@ -1,11 +1,10 @@
1
+ import json
1
2
  import logging
2
3
  import os
3
4
  import subprocess
4
5
  import time
5
6
 
6
- import docker
7
7
  import psycopg
8
- from docker.errors import APIError, NotFound
9
8
 
10
9
  logging.basicConfig(level=logging.INFO, format="%(levelname)s: %(message)s")
11
10
  from typing import Any, Dict, Optional, Tuple
@@ -86,48 +85,71 @@ def start_docker_postgres(pool_config: Dict[str, Any]) -> bool:
86
85
  image_name = "pgvector/pgvector:pg16"
87
86
 
88
87
  try:
89
- client = docker.from_env()
90
-
91
88
  # Check if the container already exists
92
89
  try:
93
- container = client.containers.get(container_name)
94
- if container.status == "running":
95
- logging.info(f"Container '{container_name}' is already running.")
96
- return True
97
- elif container.status == "exited":
98
- container.start()
99
- logging.info(
100
- f"Container '{container_name}' was stopped and has been restarted."
101
- )
102
- return True
103
- except NotFound:
104
- # Container doesn't exist, proceed with creation
90
+ result = subprocess.run(
91
+ f"docker inspect {container_name}",
92
+ shell=True,
93
+ text=True,
94
+ capture_output=True,
95
+ )
96
+
97
+ if result.returncode == 0:
98
+ # Container exists, check its status
99
+ container_info = json.loads(result.stdout)
100
+ status = container_info[0]["State"]["Status"]
101
+
102
+ if status == "running":
103
+ logging.info(f"Container '{container_name}' is already running.")
104
+ return True
105
+ elif status == "exited":
106
+ subprocess.run(
107
+ f"docker start {container_name}", shell=True, check=True
108
+ )
109
+ logging.info(
110
+ f"Container '{container_name}' was stopped and has been restarted."
111
+ )
112
+ return True
113
+ except (
114
+ subprocess.CalledProcessError,
115
+ json.JSONDecodeError,
116
+ KeyError,
117
+ IndexError,
118
+ ):
119
+ # Container doesn't exist or error parsing, proceed with creation
105
120
  pass
106
121
 
107
- # Pull the image if it doesn't exist
108
- imgs = client.images.list(name=image_name)
109
- if len(imgs) == 0:
122
+ # Check if the image exists locally
123
+ result = subprocess.run(
124
+ f"docker images -q {image_name}", shell=True, text=True, capture_output=True
125
+ )
126
+
127
+ if not result.stdout.strip():
110
128
  logging.info(f"Pulling Docker image {image_name}...")
111
- client.images.pull(image_name)
129
+ subprocess.run(f"docker pull {image_name}", shell=True, check=True)
112
130
 
113
131
  # Create and start the container
114
- container = client.containers.run(
115
- image=image_name,
116
- name=container_name,
117
- detach=True,
118
- environment={
119
- "POSTGRES_PASSWORD": pool_config["password"],
120
- "PGDATA": pg_data,
121
- },
122
- ports={"5432/tcp": pool_config["port"]},
123
- volumes={pg_data: {"bind": pg_data, "mode": "rw"}},
124
- remove=True, # Equivalent to --rm
132
+ cmd = [
133
+ "docker run",
134
+ "-d",
135
+ f"--name {container_name}",
136
+ f"-e POSTGRES_PASSWORD={pool_config['password']}",
137
+ f"-e PGDATA={pg_data}",
138
+ f"-p {pool_config['port']}:5432",
139
+ f"-v {pg_data}:{pg_data}",
140
+ "--rm",
141
+ image_name,
142
+ ]
143
+
144
+ result = subprocess.run(
145
+ " ".join(cmd), shell=True, text=True, capture_output=True, check=True
125
146
  )
126
147
 
127
- logging.info(f"Created container: {container.id}")
148
+ container_id = result.stdout.strip()
149
+ logging.info(f"Created container: {container_id}")
128
150
 
129
- except APIError as e:
130
- raise Exception(f"Docker API error: {str(e)}")
151
+ except subprocess.CalledProcessError as e:
152
+ raise Exception(f"Docker command error: {e.stderr if e.stderr else str(e)}")
131
153
 
132
154
  # Wait for PostgreSQL to be ready
133
155
  attempts = 30
@@ -148,15 +170,16 @@ def start_docker_postgres(pool_config: Dict[str, Any]) -> bool:
148
170
 
149
171
  def check_docker_installed() -> bool:
150
172
  """
151
- Check if Docker is installed and running using the docker library.
173
+ Check if Docker is installed and running using the Docker CLI.
152
174
 
153
175
  Returns:
154
176
  bool: True if Docker is installed and running, False otherwise.
155
177
  """
156
178
  try:
157
- client = docker.from_env()
158
- client.ping() # type: ignore
159
- return True
179
+ result = subprocess.run(
180
+ "docker version --format json", shell=True, capture_output=True, text=True
181
+ )
182
+ return result.returncode == 0
160
183
  except Exception:
161
184
  return False
162
185
 
@@ -176,22 +199,41 @@ def stop_docker_pg() -> None:
176
199
  try:
177
200
  logger.info(f"Stopping Docker Postgres container {container_name}...")
178
201
 
179
- client = docker.from_env()
180
-
181
- try:
182
- container = client.containers.get(container_name)
183
-
184
- if container.status == "running":
185
- container.stop()
186
- logger.info(
187
- f"Successfully stopped Docker Postgres container {container_name}."
188
- )
189
- else:
190
- logger.info(f"Container {container_name} exists but is not running.")
202
+ # Check if container exists
203
+ result = subprocess.run(
204
+ f"docker inspect {container_name}",
205
+ shell=True,
206
+ text=True,
207
+ capture_output=True,
208
+ )
191
209
 
192
- except docker.errors.NotFound:
210
+ if result.returncode == 0:
211
+ # Container exists, check its status
212
+ try:
213
+ container_info = json.loads(result.stdout)
214
+ status = container_info[0]["State"]["Status"]
215
+
216
+ if status == "running":
217
+ subprocess.run(
218
+ f"docker stop {container_name}", shell=True, check=True
219
+ )
220
+ logger.info(
221
+ f"Successfully stopped Docker Postgres container {container_name}."
222
+ )
223
+ else:
224
+ logger.info(
225
+ f"Container {container_name} exists but is not running."
226
+ )
227
+ except (json.JSONDecodeError, KeyError, IndexError) as e:
228
+ logger.error(f"Error parsing container info: {e}")
229
+ raise
230
+ else:
193
231
  logger.info(f"Container {container_name} does not exist.")
194
232
 
233
+ except subprocess.CalledProcessError as error:
234
+ error_message = error.stderr if error.stderr else str(error)
235
+ logger.error(f"Failed to stop Docker Postgres container: {error_message}")
236
+ raise
195
237
  except Exception as error:
196
238
  error_message = str(error)
197
239
  logger.error(f"Failed to stop Docker Postgres container: {error_message}")
dbos/_fastapi.py CHANGED
@@ -83,6 +83,10 @@ def setup_fastapi_middleware(app: FastAPI, dbos: DBOS) -> None:
83
83
  response = await call_next(request)
84
84
  else:
85
85
  response = await call_next(request)
86
- if hasattr(response, "status_code"):
86
+ if (
87
+ dbos._config["telemetry"]
88
+ and not dbos._config["telemetry"]["disable_otlp"]
89
+ and hasattr(response, "status_code")
90
+ ):
87
91
  DBOS.span.set_attribute("responseCode", response.status_code)
88
92
  return response