dbos 1.14.0a9__py3-none-any.whl → 1.15.0a2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of dbos might be problematic. Click here for more details.

Files changed (47) hide show
  1. dbos/_client.py +30 -35
  2. dbos/_context.py +12 -6
  3. dbos/_core.py +5 -8
  4. dbos/_dbos.py +15 -27
  5. dbos/_dbos_config.py +32 -42
  6. dbos/_debouncer.py +1 -7
  7. dbos/_debug.py +0 -8
  8. dbos/_docker_pg_helper.py +93 -51
  9. dbos/_fastapi.py +5 -1
  10. dbos/_logger.py +18 -21
  11. dbos/_migration.py +4 -41
  12. dbos/_serialization.py +19 -30
  13. dbos/_sys_db_postgres.py +2 -9
  14. dbos/_templates/dbos-db-starter/migrations/create_table.py.dbos +34 -0
  15. dbos/_tracer.py +42 -31
  16. dbos/_workflow_commands.py +9 -5
  17. dbos/cli/_github_init.py +22 -16
  18. dbos/cli/_template_init.py +5 -16
  19. dbos/cli/cli.py +27 -33
  20. dbos/cli/migration.py +15 -10
  21. {dbos-1.14.0a9.dist-info → dbos-1.15.0a2.dist-info}/METADATA +8 -16
  22. dbos-1.15.0a2.dist-info/RECORD +59 -0
  23. dbos/_alembic_migrations/env.py +0 -62
  24. dbos/_alembic_migrations/script.py.mako +0 -26
  25. dbos/_alembic_migrations/versions/01ce9f07bd10_streaming.py +0 -42
  26. dbos/_alembic_migrations/versions/04ca4f231047_workflow_queues_executor_id.py +0 -34
  27. dbos/_alembic_migrations/versions/27ac6900c6ad_add_queue_dedup.py +0 -45
  28. dbos/_alembic_migrations/versions/471b60d64126_dbos_migrations.py +0 -35
  29. dbos/_alembic_migrations/versions/50f3227f0b4b_fix_job_queue.py +0 -35
  30. dbos/_alembic_migrations/versions/5c361fc04708_added_system_tables.py +0 -193
  31. dbos/_alembic_migrations/versions/66478e1b95e5_consolidate_queues.py +0 -71
  32. dbos/_alembic_migrations/versions/83f3732ae8e7_workflow_timeout.py +0 -44
  33. dbos/_alembic_migrations/versions/933e86bdac6a_add_queue_priority.py +0 -35
  34. dbos/_alembic_migrations/versions/a3b18ad34abe_added_triggers.py +0 -72
  35. dbos/_alembic_migrations/versions/d76646551a6b_job_queue_limiter.py +0 -43
  36. dbos/_alembic_migrations/versions/d76646551a6c_workflow_queue.py +0 -28
  37. dbos/_alembic_migrations/versions/d994145b47b6_consolidate_inputs.py +0 -30
  38. dbos/_alembic_migrations/versions/eab0cc1d9a14_job_queue.py +0 -56
  39. dbos/_alembic_migrations/versions/f4b9b32ba814_functionname_childid_op_outputs.py +0 -46
  40. dbos/_templates/dbos-db-starter/alembic.ini +0 -116
  41. dbos/_templates/dbos-db-starter/migrations/env.py.dbos +0 -85
  42. dbos/_templates/dbos-db-starter/migrations/script.py.mako +0 -26
  43. dbos/_templates/dbos-db-starter/migrations/versions/2024_07_31_180642_init.py +0 -35
  44. dbos-1.14.0a9.dist-info/RECORD +0 -79
  45. {dbos-1.14.0a9.dist-info → dbos-1.15.0a2.dist-info}/WHEEL +0 -0
  46. {dbos-1.14.0a9.dist-info → dbos-1.15.0a2.dist-info}/entry_points.txt +0 -0
  47. {dbos-1.14.0a9.dist-info → dbos-1.15.0a2.dist-info}/licenses/LICENSE +0 -0
dbos/_docker_pg_helper.py CHANGED
@@ -1,11 +1,10 @@
1
+ import json
1
2
  import logging
2
3
  import os
3
4
  import subprocess
4
5
  import time
5
6
 
6
- import docker
7
7
  import psycopg
8
- from docker.errors import APIError, NotFound
9
8
 
10
9
  logging.basicConfig(level=logging.INFO, format="%(levelname)s: %(message)s")
11
10
  from typing import Any, Dict, Optional, Tuple
@@ -86,48 +85,71 @@ def start_docker_postgres(pool_config: Dict[str, Any]) -> bool:
86
85
  image_name = "pgvector/pgvector:pg16"
87
86
 
88
87
  try:
89
- client = docker.from_env()
90
-
91
88
  # Check if the container already exists
92
89
  try:
93
- container = client.containers.get(container_name)
94
- if container.status == "running":
95
- logging.info(f"Container '{container_name}' is already running.")
96
- return True
97
- elif container.status == "exited":
98
- container.start()
99
- logging.info(
100
- f"Container '{container_name}' was stopped and has been restarted."
101
- )
102
- return True
103
- except NotFound:
104
- # Container doesn't exist, proceed with creation
90
+ result = subprocess.run(
91
+ f"docker inspect {container_name}",
92
+ shell=True,
93
+ text=True,
94
+ capture_output=True,
95
+ )
96
+
97
+ if result.returncode == 0:
98
+ # Container exists, check its status
99
+ container_info = json.loads(result.stdout)
100
+ status = container_info[0]["State"]["Status"]
101
+
102
+ if status == "running":
103
+ logging.info(f"Container '{container_name}' is already running.")
104
+ return True
105
+ elif status == "exited":
106
+ subprocess.run(
107
+ f"docker start {container_name}", shell=True, check=True
108
+ )
109
+ logging.info(
110
+ f"Container '{container_name}' was stopped and has been restarted."
111
+ )
112
+ return True
113
+ except (
114
+ subprocess.CalledProcessError,
115
+ json.JSONDecodeError,
116
+ KeyError,
117
+ IndexError,
118
+ ):
119
+ # Container doesn't exist or error parsing, proceed with creation
105
120
  pass
106
121
 
107
- # Pull the image if it doesn't exist
108
- imgs = client.images.list(name=image_name)
109
- if len(imgs) == 0:
122
+ # Check if the image exists locally
123
+ result = subprocess.run(
124
+ f"docker images -q {image_name}", shell=True, text=True, capture_output=True
125
+ )
126
+
127
+ if not result.stdout.strip():
110
128
  logging.info(f"Pulling Docker image {image_name}...")
111
- client.images.pull(image_name)
129
+ subprocess.run(f"docker pull {image_name}", shell=True, check=True)
112
130
 
113
131
  # Create and start the container
114
- container = client.containers.run(
115
- image=image_name,
116
- name=container_name,
117
- detach=True,
118
- environment={
119
- "POSTGRES_PASSWORD": pool_config["password"],
120
- "PGDATA": pg_data,
121
- },
122
- ports={"5432/tcp": pool_config["port"]},
123
- volumes={pg_data: {"bind": pg_data, "mode": "rw"}},
124
- remove=True, # Equivalent to --rm
132
+ cmd = [
133
+ "docker run",
134
+ "-d",
135
+ f"--name {container_name}",
136
+ f"-e POSTGRES_PASSWORD={pool_config['password']}",
137
+ f"-e PGDATA={pg_data}",
138
+ f"-p {pool_config['port']}:5432",
139
+ f"-v {pg_data}:{pg_data}",
140
+ "--rm",
141
+ image_name,
142
+ ]
143
+
144
+ result = subprocess.run(
145
+ " ".join(cmd), shell=True, text=True, capture_output=True, check=True
125
146
  )
126
147
 
127
- logging.info(f"Created container: {container.id}")
148
+ container_id = result.stdout.strip()
149
+ logging.info(f"Created container: {container_id}")
128
150
 
129
- except APIError as e:
130
- raise Exception(f"Docker API error: {str(e)}")
151
+ except subprocess.CalledProcessError as e:
152
+ raise Exception(f"Docker command error: {e.stderr if e.stderr else str(e)}")
131
153
 
132
154
  # Wait for PostgreSQL to be ready
133
155
  attempts = 30
@@ -148,15 +170,16 @@ def start_docker_postgres(pool_config: Dict[str, Any]) -> bool:
148
170
 
149
171
  def check_docker_installed() -> bool:
150
172
  """
151
- Check if Docker is installed and running using the docker library.
173
+ Check if Docker is installed and running using the Docker CLI.
152
174
 
153
175
  Returns:
154
176
  bool: True if Docker is installed and running, False otherwise.
155
177
  """
156
178
  try:
157
- client = docker.from_env()
158
- client.ping() # type: ignore
159
- return True
179
+ result = subprocess.run(
180
+ "docker version --format json", shell=True, capture_output=True, text=True
181
+ )
182
+ return result.returncode == 0
160
183
  except Exception:
161
184
  return False
162
185
 
@@ -176,22 +199,41 @@ def stop_docker_pg() -> None:
176
199
  try:
177
200
  logger.info(f"Stopping Docker Postgres container {container_name}...")
178
201
 
179
- client = docker.from_env()
180
-
181
- try:
182
- container = client.containers.get(container_name)
183
-
184
- if container.status == "running":
185
- container.stop()
186
- logger.info(
187
- f"Successfully stopped Docker Postgres container {container_name}."
188
- )
189
- else:
190
- logger.info(f"Container {container_name} exists but is not running.")
202
+ # Check if container exists
203
+ result = subprocess.run(
204
+ f"docker inspect {container_name}",
205
+ shell=True,
206
+ text=True,
207
+ capture_output=True,
208
+ )
191
209
 
192
- except docker.errors.NotFound:
210
+ if result.returncode == 0:
211
+ # Container exists, check its status
212
+ try:
213
+ container_info = json.loads(result.stdout)
214
+ status = container_info[0]["State"]["Status"]
215
+
216
+ if status == "running":
217
+ subprocess.run(
218
+ f"docker stop {container_name}", shell=True, check=True
219
+ )
220
+ logger.info(
221
+ f"Successfully stopped Docker Postgres container {container_name}."
222
+ )
223
+ else:
224
+ logger.info(
225
+ f"Container {container_name} exists but is not running."
226
+ )
227
+ except (json.JSONDecodeError, KeyError, IndexError) as e:
228
+ logger.error(f"Error parsing container info: {e}")
229
+ raise
230
+ else:
193
231
  logger.info(f"Container {container_name} does not exist.")
194
232
 
233
+ except subprocess.CalledProcessError as error:
234
+ error_message = error.stderr if error.stderr else str(error)
235
+ logger.error(f"Failed to stop Docker Postgres container: {error_message}")
236
+ raise
195
237
  except Exception as error:
196
238
  error_message = str(error)
197
239
  logger.error(f"Failed to stop Docker Postgres container: {error_message}")
dbos/_fastapi.py CHANGED
@@ -83,6 +83,10 @@ def setup_fastapi_middleware(app: FastAPI, dbos: DBOS) -> None:
83
83
  response = await call_next(request)
84
84
  else:
85
85
  response = await call_next(request)
86
- if hasattr(response, "status_code"):
86
+ if (
87
+ dbos._config["telemetry"]
88
+ and not dbos._config["telemetry"]["disable_otlp"]
89
+ and hasattr(response, "status_code")
90
+ ):
87
91
  DBOS.span.set_attribute("responseCode", response.status_code)
88
92
  return response
dbos/_logger.py CHANGED
@@ -2,14 +2,6 @@ import logging
2
2
  import os
3
3
  from typing import TYPE_CHECKING, Any
4
4
 
5
- from opentelemetry._logs import set_logger_provider
6
- from opentelemetry.exporter.otlp.proto.http._log_exporter import OTLPLogExporter
7
- from opentelemetry.sdk._logs import LoggerProvider, LoggingHandler
8
- from opentelemetry.sdk._logs.export import BatchLogRecordProcessor
9
- from opentelemetry.sdk.resources import Resource
10
- from opentelemetry.semconv.resource import ResourceAttributes
11
- from opentelemetry.trace.span import format_trace_id
12
-
13
5
  from dbos._utils import GlobalParams
14
6
 
15
7
  if TYPE_CHECKING:
@@ -24,6 +16,7 @@ class DBOSLogTransformer(logging.Filter):
24
16
  super().__init__()
25
17
  self.app_id = os.environ.get("DBOS__APPID", "")
26
18
  self.otlp_attributes: dict[str, str] = config.get("telemetry", {}).get("otlp_attributes", {}) # type: ignore
19
+ self.disable_otlp = config.get("telemetry", {}).get("disable_otlp", True) # type: ignore
27
20
 
28
21
  def filter(self, record: Any) -> bool:
29
22
  record.applicationID = self.app_id
@@ -39,19 +32,15 @@ class DBOSLogTransformer(logging.Filter):
39
32
  if ctx:
40
33
  if ctx.is_within_workflow():
41
34
  record.operationUUID = ctx.workflow_id
42
- span = ctx.get_current_active_span()
43
- if span:
44
- trace_id = format_trace_id(span.get_span_context().trace_id)
45
- record.traceId = trace_id
46
-
47
- return True
35
+ if not self.disable_otlp:
36
+ from opentelemetry.trace.span import format_trace_id
48
37
 
38
+ span = ctx.get_current_active_span()
39
+ if span:
40
+ trace_id = format_trace_id(span.get_span_context().trace_id)
41
+ record.traceId = trace_id
49
42
 
50
- # Mitigation for https://github.com/open-telemetry/opentelemetry-python/issues/3193
51
- # Reduce the force flush timeout
52
- class PatchedOTLPLoggerProvider(LoggerProvider):
53
- def force_flush(self, timeout_millis: int = 5000) -> bool:
54
- return super().force_flush(timeout_millis)
43
+ return True
55
44
 
56
45
 
57
46
  def init_logger() -> None:
@@ -80,10 +69,18 @@ def config_logger(config: "ConfigFile") -> None:
80
69
  disable_otlp = config.get("telemetry", {}).get("disable_otlp", False) # type: ignore
81
70
 
82
71
  if not disable_otlp and otlp_logs_endpoints:
83
- log_provider = PatchedOTLPLoggerProvider(
72
+
73
+ from opentelemetry._logs import set_logger_provider
74
+ from opentelemetry.exporter.otlp.proto.http._log_exporter import OTLPLogExporter
75
+ from opentelemetry.sdk._logs import LoggerProvider, LoggingHandler
76
+ from opentelemetry.sdk._logs.export import BatchLogRecordProcessor
77
+ from opentelemetry.sdk.resources import Resource
78
+ from opentelemetry.semconv.attributes.service_attributes import SERVICE_NAME
79
+
80
+ log_provider = LoggerProvider(
84
81
  Resource.create(
85
82
  attributes={
86
- ResourceAttributes.SERVICE_NAME: config["name"],
83
+ SERVICE_NAME: config["name"],
87
84
  }
88
85
  )
89
86
  )
dbos/_migration.py CHANGED
@@ -1,16 +1,11 @@
1
- import logging
2
- import os
3
- import re
4
1
  import sys
5
2
 
6
3
  import sqlalchemy as sa
7
- from alembic import command
8
- from alembic.config import Config
9
4
 
10
5
  from ._logger import dbos_logger
11
6
 
12
7
 
13
- def ensure_dbos_schema(engine: sa.Engine) -> bool:
8
+ def ensure_dbos_schema(engine: sa.Engine) -> None:
14
9
  """
15
10
  True if using DBOS migrations (DBOS schema and migrations table already exist or were created)
16
11
  False if using Alembic migrations (DBOS schema exists, but dbos_migrations table doesn't)
@@ -22,10 +17,10 @@ def ensure_dbos_schema(engine: sa.Engine) -> bool:
22
17
  "SELECT schema_name FROM information_schema.schemata WHERE schema_name = 'dbos'"
23
18
  )
24
19
  )
25
- schema_existed = schema_result.fetchone() is not None
20
+ schema_exists = schema_result.fetchone() is not None
26
21
 
27
22
  # Create schema if it doesn't exist
28
- if not schema_existed:
23
+ if not schema_exists:
29
24
  conn.execute(sa.text("CREATE SCHEMA dbos"))
30
25
 
31
26
  # Check if dbos_migrations table exists
@@ -36,44 +31,12 @@ def ensure_dbos_schema(engine: sa.Engine) -> bool:
36
31
  )
37
32
  table_exists = table_result.fetchone() is not None
38
33
 
39
- if table_exists:
40
- return True
41
- elif schema_existed:
42
- return False
43
- else:
34
+ if not table_exists:
44
35
  conn.execute(
45
36
  sa.text(
46
37
  "CREATE TABLE dbos.dbos_migrations (version BIGINT NOT NULL PRIMARY KEY)"
47
38
  )
48
39
  )
49
- return True
50
-
51
-
52
- def run_alembic_migrations(engine: sa.Engine) -> None:
53
- """Run system database schema migrations with Alembic.
54
- This is DEPRECATED in favor of DBOS-managed migrations.
55
- It is retained only for backwards compatibility and
56
- will be removed in the next major version."""
57
- # Run a schema migration for the system database
58
- migration_dir = os.path.join(
59
- os.path.dirname(os.path.realpath(__file__)), "_alembic_migrations"
60
- )
61
- alembic_cfg = Config()
62
- alembic_cfg.set_main_option("script_location", migration_dir)
63
- logging.getLogger("alembic").setLevel(logging.WARNING)
64
- # Alembic requires the % in URL-escaped parameters to itself be escaped to %%.
65
- escaped_conn_string = re.sub(
66
- r"%(?=[0-9A-Fa-f]{2})",
67
- "%%",
68
- engine.url.render_as_string(hide_password=False),
69
- )
70
- alembic_cfg.set_main_option("sqlalchemy.url", escaped_conn_string)
71
- try:
72
- command.upgrade(alembic_cfg, "head")
73
- except Exception as e:
74
- dbos_logger.warning(
75
- f"Exception during system database construction. This is most likely because the system database was configured using a later version of DBOS: {e}"
76
- )
77
40
 
78
41
 
79
42
  def run_dbos_migrations(engine: sa.Engine) -> None:
dbos/_serialization.py CHANGED
@@ -1,8 +1,8 @@
1
+ import base64
2
+ import pickle
1
3
  import types
2
4
  from typing import Any, Dict, Optional, Tuple, TypedDict
3
5
 
4
- import jsonpickle # type: ignore
5
-
6
6
  from ._logger import dbos_logger
7
7
 
8
8
 
@@ -11,54 +11,43 @@ class WorkflowInputs(TypedDict):
11
11
  kwargs: Dict[str, Any]
12
12
 
13
13
 
14
- def _validate_item(data: Any) -> None:
15
- if isinstance(data, (types.MethodType)):
16
- raise TypeError("Serialized data item should not be a class method")
17
- if isinstance(data, (types.FunctionType)):
18
- if jsonpickle.decode(jsonpickle.encode(data, unpicklable=True)) is None:
19
- raise TypeError(
20
- "Serialized function should be defined at the top level of a module"
21
- )
22
-
23
-
24
14
  def serialize(data: Any) -> str:
25
- """Serialize an object to a JSON string using jsonpickle."""
26
- _validate_item(data)
27
- encoded_data: str = jsonpickle.encode(data, unpicklable=True)
15
+ pickled_data: bytes = pickle.dumps(data)
16
+ encoded_data: str = base64.b64encode(pickled_data).decode("utf-8")
28
17
  return encoded_data
29
18
 
30
19
 
31
20
  def serialize_args(data: WorkflowInputs) -> str:
32
- """Serialize args to a JSON string using jsonpickle."""
33
- arg: Any
34
- for arg in data["args"]:
35
- _validate_item(arg)
36
- for arg in data["kwargs"].values():
37
- _validate_item(arg)
38
- encoded_data: str = jsonpickle.encode(data, unpicklable=True)
21
+ """Serialize args to a base64-encoded string using pickle."""
22
+ pickled_data: bytes = pickle.dumps(data)
23
+ encoded_data: str = base64.b64encode(pickled_data).decode("utf-8")
39
24
  return encoded_data
40
25
 
41
26
 
42
27
  def serialize_exception(data: Exception) -> str:
43
- """Serialize an Exception object to a JSON string using jsonpickle."""
44
- encoded_data: str = jsonpickle.encode(data, unpicklable=True)
28
+ """Serialize an Exception object to a base64-encoded string using pickle."""
29
+ pickled_data: bytes = pickle.dumps(data)
30
+ encoded_data: str = base64.b64encode(pickled_data).decode("utf-8")
45
31
  return encoded_data
46
32
 
47
33
 
48
34
  def deserialize(serialized_data: str) -> Any:
49
- """Deserialize a JSON string back to a Python object using jsonpickle."""
50
- return jsonpickle.decode(serialized_data)
35
+ """Deserialize a base64-encoded string back to a Python object using pickle."""
36
+ pickled_data: bytes = base64.b64decode(serialized_data)
37
+ return pickle.loads(pickled_data)
51
38
 
52
39
 
53
40
  def deserialize_args(serialized_data: str) -> WorkflowInputs:
54
- """Deserialize a JSON string back to a Python object list using jsonpickle."""
55
- args: WorkflowInputs = jsonpickle.decode(serialized_data)
41
+ """Deserialize a base64-encoded string back to a Python object list using pickle."""
42
+ pickled_data: bytes = base64.b64decode(serialized_data)
43
+ args: WorkflowInputs = pickle.loads(pickled_data)
56
44
  return args
57
45
 
58
46
 
59
47
  def deserialize_exception(serialized_data: str) -> Exception:
60
- """Deserialize JSON string back to a Python Exception using jsonpickle."""
61
- exc: Exception = jsonpickle.decode(serialized_data)
48
+ """Deserialize a base64-encoded string back to a Python Exception using pickle."""
49
+ pickled_data: bytes = base64.b64decode(serialized_data)
50
+ exc: Exception = pickle.loads(pickled_data)
62
51
  return exc
63
52
 
64
53
 
dbos/_sys_db_postgres.py CHANGED
@@ -5,11 +5,7 @@ import psycopg
5
5
  import sqlalchemy as sa
6
6
  from sqlalchemy.exc import DBAPIError
7
7
 
8
- from dbos._migration import (
9
- ensure_dbos_schema,
10
- run_alembic_migrations,
11
- run_dbos_migrations,
12
- )
8
+ from dbos._migration import ensure_dbos_schema, run_dbos_migrations
13
9
  from dbos._schemas.system_database import SystemSchema
14
10
 
15
11
  from ._logger import dbos_logger
@@ -66,10 +62,7 @@ class PostgresSystemDatabase(SystemDatabase):
66
62
  conn.execute(sa.text(f"CREATE DATABASE {sysdb_name}"))
67
63
  engine.dispose()
68
64
 
69
- using_dbos_migrations = ensure_dbos_schema(self.engine)
70
- if not using_dbos_migrations:
71
- # Complete the Alembic migrations, create the dbos_migrations table
72
- run_alembic_migrations(self.engine)
65
+ ensure_dbos_schema(self.engine)
73
66
  run_dbos_migrations(self.engine)
74
67
 
75
68
  def _cleanup_connections(self) -> None:
@@ -0,0 +1,34 @@
1
+ """
2
+ Create the dbos_hello table using SQLAlchemy.
3
+ """
4
+
5
+ import os
6
+ from sqlalchemy import create_engine, MetaData, Table, Column, Integer, String
7
+
8
+
9
+ def create_dbos_hello_table() -> None:
10
+ """
11
+ Create the dbos_hello table in the database.
12
+
13
+ Args:
14
+ database_url: Database connection string. If not provided,
15
+ uses DATABASE_URL environment variable.
16
+ """
17
+ database_url = os.environ.get("DBOS_DATABASE_URL", "postgresql+psycopg://postgres:dbos@localhost:5432/${default_db_name}?connect_timeout=5")
18
+
19
+ engine = create_engine(database_url)
20
+ metadata = MetaData()
21
+
22
+ dbos_hello = Table(
23
+ 'dbos_hello',
24
+ metadata,
25
+ Column('greet_count', Integer, primary_key=True, autoincrement=True, nullable=False),
26
+ Column('name', String, nullable=False)
27
+ )
28
+
29
+ metadata.create_all(engine)
30
+ engine.dispose()
31
+
32
+
33
+ if __name__ == "__main__":
34
+ create_dbos_hello_table()
dbos/_tracer.py CHANGED
@@ -1,13 +1,9 @@
1
1
  import os
2
2
  from typing import TYPE_CHECKING, Optional
3
3
 
4
- from opentelemetry import trace
5
- from opentelemetry.exporter.otlp.proto.http.trace_exporter import OTLPSpanExporter
6
- from opentelemetry.sdk.resources import Resource
7
- from opentelemetry.sdk.trace import TracerProvider
8
- from opentelemetry.sdk.trace.export import BatchSpanProcessor, ConsoleSpanExporter
9
- from opentelemetry.semconv.resource import ResourceAttributes
10
- from opentelemetry.trace import Span
4
+ if TYPE_CHECKING:
5
+ from opentelemetry.trace import Span
6
+ from opentelemetry.sdk.trace import TracerProvider
11
7
 
12
8
  from dbos._utils import GlobalParams
13
9
 
@@ -29,34 +25,47 @@ class DBOSTracer:
29
25
  def config(self, config: ConfigFile) -> None:
30
26
  self.otlp_attributes = config.get("telemetry", {}).get("otlp_attributes", {}) # type: ignore
31
27
  self.disable_otlp = config.get("telemetry", {}).get("disable_otlp", False) # type: ignore
32
- if not self.disable_otlp and not isinstance(
33
- trace.get_tracer_provider(), TracerProvider
34
- ):
35
- resource = Resource(
36
- attributes={
37
- ResourceAttributes.SERVICE_NAME: config["name"],
38
- }
28
+ if not self.disable_otlp:
29
+ from opentelemetry import trace
30
+ from opentelemetry.exporter.otlp.proto.http.trace_exporter import (
31
+ OTLPSpanExporter,
39
32
  )
40
-
41
- provider = TracerProvider(resource=resource)
42
- if os.environ.get("DBOS__CONSOLE_TRACES", None) is not None:
43
- processor = BatchSpanProcessor(ConsoleSpanExporter())
44
- provider.add_span_processor(processor)
45
- otlp_traces_endpoints = (
46
- config.get("telemetry", {}).get("OTLPExporter", {}).get("tracesEndpoint") # type: ignore
33
+ from opentelemetry.sdk.resources import Resource
34
+ from opentelemetry.sdk.trace import TracerProvider
35
+ from opentelemetry.sdk.trace.export import (
36
+ BatchSpanProcessor,
37
+ ConsoleSpanExporter,
47
38
  )
48
- if otlp_traces_endpoints:
49
- for e in otlp_traces_endpoints:
50
- processor = BatchSpanProcessor(OTLPSpanExporter(endpoint=e))
39
+ from opentelemetry.semconv.attributes.service_attributes import SERVICE_NAME
40
+
41
+ if not isinstance(trace.get_tracer_provider(), TracerProvider):
42
+ resource = Resource(
43
+ attributes={
44
+ SERVICE_NAME: config["name"],
45
+ }
46
+ )
47
+
48
+ provider = TracerProvider(resource=resource)
49
+ if os.environ.get("DBOS__CONSOLE_TRACES", None) is not None:
50
+ processor = BatchSpanProcessor(ConsoleSpanExporter())
51
51
  provider.add_span_processor(processor)
52
- trace.set_tracer_provider(provider)
53
-
54
- def set_provider(self, provider: Optional[TracerProvider]) -> None:
52
+ otlp_traces_endpoints = (
53
+ config.get("telemetry", {}).get("OTLPExporter", {}).get("tracesEndpoint") # type: ignore
54
+ )
55
+ if otlp_traces_endpoints:
56
+ for e in otlp_traces_endpoints:
57
+ processor = BatchSpanProcessor(OTLPSpanExporter(endpoint=e))
58
+ provider.add_span_processor(processor)
59
+ trace.set_tracer_provider(provider)
60
+
61
+ def set_provider(self, provider: "Optional[TracerProvider]") -> None:
55
62
  self.provider = provider
56
63
 
57
64
  def start_span(
58
- self, attributes: "TracedAttributes", parent: Optional[Span] = None
59
- ) -> Span:
65
+ self, attributes: "TracedAttributes", parent: "Optional[Span]" = None
66
+ ) -> "Span":
67
+ from opentelemetry import trace
68
+
60
69
  tracer = (
61
70
  self.provider.get_tracer("dbos-tracer")
62
71
  if self.provider is not None
@@ -74,11 +83,13 @@ class DBOSTracer:
74
83
  span.set_attribute(k, v)
75
84
  return span
76
85
 
77
- def end_span(self, span: Span) -> None:
86
+ def end_span(self, span: "Span") -> None:
78
87
  span.end()
79
88
 
80
- def get_current_span(self) -> Optional[Span]:
89
+ def get_current_span(self) -> "Optional[Span]":
81
90
  # Return the current active span if any. It might not be a DBOS span.
91
+ from opentelemetry import trace
92
+
82
93
  span = trace.get_current_span()
83
94
  if span.get_span_context().is_valid:
84
95
  return span
@@ -98,10 +98,10 @@ def get_workflow(sys_db: SystemDatabase, workflow_id: str) -> Optional[WorkflowS
98
98
 
99
99
 
100
100
  def list_workflow_steps(
101
- sys_db: SystemDatabase, app_db: ApplicationDatabase, workflow_id: str
101
+ sys_db: SystemDatabase, app_db: Optional[ApplicationDatabase], workflow_id: str
102
102
  ) -> List[StepInfo]:
103
103
  steps = sys_db.get_workflow_steps(workflow_id)
104
- transactions = app_db.get_transactions(workflow_id)
104
+ transactions = app_db.get_transactions(workflow_id) if app_db else []
105
105
  merged_steps = steps + transactions
106
106
  merged_steps.sort(key=lambda step: step["function_id"])
107
107
  return merged_steps
@@ -109,7 +109,7 @@ def list_workflow_steps(
109
109
 
110
110
  def fork_workflow(
111
111
  sys_db: SystemDatabase,
112
- app_db: ApplicationDatabase,
112
+ app_db: Optional[ApplicationDatabase],
113
113
  workflow_id: str,
114
114
  start_step: int,
115
115
  *,
@@ -122,7 +122,8 @@ def fork_workflow(
122
122
  ctx.id_assigned_for_next_workflow = ""
123
123
  else:
124
124
  forked_workflow_id = str(uuid.uuid4())
125
- app_db.clone_workflow_transactions(workflow_id, forked_workflow_id, start_step)
125
+ if app_db:
126
+ app_db.clone_workflow_transactions(workflow_id, forked_workflow_id, start_step)
126
127
  sys_db.fork_workflow(
127
128
  workflow_id,
128
129
  forked_workflow_id,
@@ -145,7 +146,10 @@ def garbage_collect(
145
146
  )
146
147
  if result is not None:
147
148
  cutoff_epoch_timestamp_ms, pending_workflow_ids = result
148
- dbos._app_db.garbage_collect(cutoff_epoch_timestamp_ms, pending_workflow_ids)
149
+ if dbos._app_db:
150
+ dbos._app_db.garbage_collect(
151
+ cutoff_epoch_timestamp_ms, pending_workflow_ids
152
+ )
149
153
 
150
154
 
151
155
  def global_timeout(dbos: "DBOS", cutoff_epoch_timestamp_ms: int) -> None: