dbos 2.4.0a7__py3-none-any.whl → 2.6.0a8__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of dbos might be problematic. Click here for more details.
- dbos/__init__.py +2 -0
- dbos/_app_db.py +29 -87
- dbos/_client.py +12 -8
- dbos/_conductor/conductor.py +40 -5
- dbos/_conductor/protocol.py +23 -0
- dbos/_core.py +98 -30
- dbos/_dbos.py +15 -12
- dbos/_dbos_config.py +2 -19
- dbos/_fastapi.py +2 -1
- dbos/_logger.py +12 -6
- dbos/_migration.py +30 -0
- dbos/_queue.py +94 -37
- dbos/_schemas/system_database.py +20 -0
- dbos/_sys_db.py +302 -92
- dbos/_sys_db_postgres.py +18 -12
- dbos/_tracer.py +9 -2
- dbos/_workflow_commands.py +0 -15
- dbos/cli/cli.py +8 -18
- dbos/cli/migration.py +28 -1
- {dbos-2.4.0a7.dist-info → dbos-2.6.0a8.dist-info}/METADATA +1 -1
- {dbos-2.4.0a7.dist-info → dbos-2.6.0a8.dist-info}/RECORD +24 -24
- {dbos-2.4.0a7.dist-info → dbos-2.6.0a8.dist-info}/WHEEL +0 -0
- {dbos-2.4.0a7.dist-info → dbos-2.6.0a8.dist-info}/entry_points.txt +0 -0
- {dbos-2.4.0a7.dist-info → dbos-2.6.0a8.dist-info}/licenses/LICENSE +0 -0
dbos/_dbos.py
CHANGED
|
@@ -38,6 +38,7 @@ from dbos._workflow_commands import fork_workflow, list_queued_workflows, list_w
|
|
|
38
38
|
from ._classproperty import classproperty
|
|
39
39
|
from ._core import (
|
|
40
40
|
DEBOUNCER_WORKFLOW_NAME,
|
|
41
|
+
DEFAULT_POLLING_INTERVAL,
|
|
41
42
|
TEMP_SEND_WF_NAME,
|
|
42
43
|
WorkflowHandleAsyncPolling,
|
|
43
44
|
WorkflowHandlePolling,
|
|
@@ -111,7 +112,7 @@ from ._logger import (
|
|
|
111
112
|
dbos_logger,
|
|
112
113
|
init_logger,
|
|
113
114
|
)
|
|
114
|
-
from ._workflow_commands import get_workflow
|
|
115
|
+
from ._workflow_commands import get_workflow
|
|
115
116
|
|
|
116
117
|
# Most DBOS functions are just any callable F, so decorators / wrappers work on F
|
|
117
118
|
# There are cases where the parameters P and return value R should be separate
|
|
@@ -335,6 +336,8 @@ class DBOS:
|
|
|
335
336
|
self._executor_field: Optional[ThreadPoolExecutor] = None
|
|
336
337
|
self._background_threads: List[threading.Thread] = []
|
|
337
338
|
self.conductor_url: Optional[str] = conductor_url
|
|
339
|
+
if config.get("conductor_url"):
|
|
340
|
+
self.conductor_url = config.get("conductor_url")
|
|
338
341
|
self.conductor_key: Optional[str] = conductor_key
|
|
339
342
|
if config.get("conductor_key"):
|
|
340
343
|
self.conductor_key = config.get("conductor_key")
|
|
@@ -569,8 +572,8 @@ class DBOS:
|
|
|
569
572
|
conductor_registration_url = (
|
|
570
573
|
f"https://console.dbos.dev/self-host?appname={app_name}"
|
|
571
574
|
)
|
|
572
|
-
|
|
573
|
-
f"To view and manage workflows, connect to DBOS Conductor at:{conductor_registration_url}"
|
|
575
|
+
dbos_logger.info(
|
|
576
|
+
f"To view and manage workflows, connect to DBOS Conductor at: {conductor_registration_url}"
|
|
574
577
|
)
|
|
575
578
|
|
|
576
579
|
# Flush handlers and add OTLP to all loggers if enabled
|
|
@@ -1090,7 +1093,6 @@ class DBOS:
|
|
|
1090
1093
|
dbos_logger.info(f"Forking workflow: {workflow_id} from step {start_step}")
|
|
1091
1094
|
return fork_workflow(
|
|
1092
1095
|
_get_dbos_instance()._sys_db,
|
|
1093
|
-
_get_dbos_instance()._app_db,
|
|
1094
1096
|
workflow_id,
|
|
1095
1097
|
start_step,
|
|
1096
1098
|
application_version=application_version,
|
|
@@ -1267,9 +1269,7 @@ class DBOS:
|
|
|
1267
1269
|
@classmethod
|
|
1268
1270
|
def list_workflow_steps(cls, workflow_id: str) -> List[StepInfo]:
|
|
1269
1271
|
def fn() -> List[StepInfo]:
|
|
1270
|
-
return list_workflow_steps(
|
|
1271
|
-
_get_dbos_instance()._sys_db, _get_dbos_instance()._app_db, workflow_id
|
|
1272
|
-
)
|
|
1272
|
+
return _get_dbos_instance()._sys_db.list_workflow_steps(workflow_id)
|
|
1273
1273
|
|
|
1274
1274
|
return _get_dbos_instance()._sys_db.call_function_as_step(
|
|
1275
1275
|
fn, "DBOS.listWorkflowSteps"
|
|
@@ -1326,11 +1326,10 @@ class DBOS:
|
|
|
1326
1326
|
return None
|
|
1327
1327
|
|
|
1328
1328
|
@classproperty
|
|
1329
|
-
def span(cls) -> "Span":
|
|
1329
|
+
def span(cls) -> Optional["Span"]:
|
|
1330
1330
|
"""Return the tracing `Span` associated with the current context."""
|
|
1331
1331
|
ctx = assert_current_dbos_context()
|
|
1332
1332
|
span = ctx.get_current_active_span()
|
|
1333
|
-
assert span
|
|
1334
1333
|
return span
|
|
1335
1334
|
|
|
1336
1335
|
@classproperty
|
|
@@ -1384,7 +1383,7 @@ class DBOS:
|
|
|
1384
1383
|
)
|
|
1385
1384
|
elif ctx.is_step():
|
|
1386
1385
|
_get_dbos_instance()._sys_db.write_stream_from_step(
|
|
1387
|
-
ctx.workflow_id, key, value
|
|
1386
|
+
ctx.workflow_id, ctx.function_id, key, value
|
|
1388
1387
|
)
|
|
1389
1388
|
else:
|
|
1390
1389
|
raise DBOSException(
|
|
@@ -1551,7 +1550,9 @@ class WorkflowHandle(Generic[R], Protocol):
|
|
|
1551
1550
|
"""Return the applicable workflow ID."""
|
|
1552
1551
|
...
|
|
1553
1552
|
|
|
1554
|
-
def get_result(
|
|
1553
|
+
def get_result(
|
|
1554
|
+
self, *, polling_interval_sec: float = DEFAULT_POLLING_INTERVAL
|
|
1555
|
+
) -> R:
|
|
1555
1556
|
"""Return the result of the workflow function invocation, waiting if necessary."""
|
|
1556
1557
|
...
|
|
1557
1558
|
|
|
@@ -1580,7 +1581,9 @@ class WorkflowHandleAsync(Generic[R], Protocol):
|
|
|
1580
1581
|
"""Return the applicable workflow ID."""
|
|
1581
1582
|
...
|
|
1582
1583
|
|
|
1583
|
-
async def get_result(
|
|
1584
|
+
async def get_result(
|
|
1585
|
+
self, *, polling_interval_sec: float = DEFAULT_POLLING_INTERVAL
|
|
1586
|
+
) -> R:
|
|
1584
1587
|
"""Return the result of the workflow function invocation, waiting if necessary."""
|
|
1585
1588
|
...
|
|
1586
1589
|
|
dbos/_dbos_config.py
CHANGED
|
@@ -39,6 +39,7 @@ class DBOSConfig(TypedDict, total=False):
|
|
|
39
39
|
enable_otlp (bool): If True, enable built-in DBOS OTLP tracing and logging.
|
|
40
40
|
system_database_engine (sa.Engine): A custom system database engine. If provided, DBOS will not create an engine but use this instead.
|
|
41
41
|
conductor_key (str): An API key for DBOS Conductor. Pass this in to connect your process to Conductor.
|
|
42
|
+
conductor_url (str): The websockets URL for your DBOS Conductor service. Only set if you're self-hosting Conductor.
|
|
42
43
|
serializer (Serializer): A custom serializer and deserializer DBOS uses when storing program data in the system database
|
|
43
44
|
"""
|
|
44
45
|
|
|
@@ -60,6 +61,7 @@ class DBOSConfig(TypedDict, total=False):
|
|
|
60
61
|
enable_otlp: Optional[bool]
|
|
61
62
|
system_database_engine: Optional[sa.Engine]
|
|
62
63
|
conductor_key: Optional[str]
|
|
64
|
+
conductor_url: Optional[str]
|
|
63
65
|
serializer: Optional[Serializer]
|
|
64
66
|
|
|
65
67
|
|
|
@@ -406,25 +408,6 @@ def process_config(
|
|
|
406
408
|
|
|
407
409
|
configure_db_engine_parameters(data["database"], connect_timeout=connect_timeout)
|
|
408
410
|
|
|
409
|
-
assert data["system_database_url"] is not None
|
|
410
|
-
# Pretty-print connection information, respecting log level
|
|
411
|
-
if not silent and logs["logLevel"] == "INFO" or logs["logLevel"] == "DEBUG":
|
|
412
|
-
printable_sys_db_url = make_url(data["system_database_url"]).render_as_string(
|
|
413
|
-
hide_password=True
|
|
414
|
-
)
|
|
415
|
-
print(f"DBOS system database URL: {printable_sys_db_url}")
|
|
416
|
-
if data["database_url"]:
|
|
417
|
-
printable_app_db_url = make_url(data["database_url"]).render_as_string(
|
|
418
|
-
hide_password=True
|
|
419
|
-
)
|
|
420
|
-
print(f"DBOS application database URL: {printable_app_db_url}")
|
|
421
|
-
if data["system_database_url"].startswith("sqlite"):
|
|
422
|
-
print(
|
|
423
|
-
f"Using SQLite as a system database. The SQLite system database is for development and testing. PostgreSQL is recommended for production use."
|
|
424
|
-
)
|
|
425
|
-
else:
|
|
426
|
-
print(f"Database engine parameters: {data['database']['db_engine_kwargs']}")
|
|
427
|
-
|
|
428
411
|
# Return data as ConfigFile type
|
|
429
412
|
return data
|
|
430
413
|
|
dbos/_fastapi.py
CHANGED
|
@@ -87,5 +87,6 @@ def setup_fastapi_middleware(app: FastAPI, dbos: DBOS) -> None:
|
|
|
87
87
|
and not dbos._config["telemetry"]["disable_otlp"]
|
|
88
88
|
and hasattr(response, "status_code")
|
|
89
89
|
):
|
|
90
|
-
DBOS.span
|
|
90
|
+
if DBOS.span is not None:
|
|
91
|
+
DBOS.span.set_attribute("responseCode", response.status_code)
|
|
91
92
|
return response
|
dbos/_logger.py
CHANGED
|
@@ -71,6 +71,7 @@ def config_logger(config: "ConfigFile") -> None:
|
|
|
71
71
|
if not disable_otlp:
|
|
72
72
|
|
|
73
73
|
from opentelemetry._logs import get_logger_provider, set_logger_provider
|
|
74
|
+
from opentelemetry._logs._internal import ProxyLoggerProvider
|
|
74
75
|
from opentelemetry.exporter.otlp.proto.http._log_exporter import OTLPLogExporter
|
|
75
76
|
from opentelemetry.sdk._logs import LoggerProvider, LoggingHandler
|
|
76
77
|
from opentelemetry.sdk._logs.export import BatchLogRecordProcessor
|
|
@@ -80,7 +81,8 @@ def config_logger(config: "ConfigFile") -> None:
|
|
|
80
81
|
# Only set up OTLP provider and exporter if endpoints are provided
|
|
81
82
|
log_provider = get_logger_provider()
|
|
82
83
|
if otlp_logs_endpoints is not None and len(otlp_logs_endpoints) > 0:
|
|
83
|
-
if
|
|
84
|
+
if isinstance(log_provider, ProxyLoggerProvider):
|
|
85
|
+
# Set a real LoggerProvider if it was previously a ProxyLoggerProvider
|
|
84
86
|
log_provider = LoggerProvider(
|
|
85
87
|
Resource.create(
|
|
86
88
|
attributes={
|
|
@@ -91,7 +93,7 @@ def config_logger(config: "ConfigFile") -> None:
|
|
|
91
93
|
set_logger_provider(log_provider)
|
|
92
94
|
|
|
93
95
|
for e in otlp_logs_endpoints:
|
|
94
|
-
log_provider.add_log_record_processor(
|
|
96
|
+
log_provider.add_log_record_processor( # type: ignore
|
|
95
97
|
BatchLogRecordProcessor(
|
|
96
98
|
OTLPLogExporter(endpoint=e),
|
|
97
99
|
export_timeout_millis=5000,
|
|
@@ -101,10 +103,14 @@ def config_logger(config: "ConfigFile") -> None:
|
|
|
101
103
|
# Even if no endpoints are provided, we still need a LoggerProvider to create the LoggingHandler
|
|
102
104
|
global _otlp_handler
|
|
103
105
|
if _otlp_handler is None:
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
|
|
106
|
+
if isinstance(log_provider, ProxyLoggerProvider):
|
|
107
|
+
dbos_logger.warning(
|
|
108
|
+
"OTLP is enabled but logger provider not set, skipping log exporter setup."
|
|
109
|
+
)
|
|
110
|
+
else:
|
|
111
|
+
_otlp_handler = LoggingHandler(logger_provider=log_provider)
|
|
112
|
+
# Direct DBOS logs to OTLP
|
|
113
|
+
dbos_logger.addHandler(_otlp_handler)
|
|
108
114
|
|
|
109
115
|
# Attach DBOS-specific attributes to all log entries.
|
|
110
116
|
global _dbos_log_transformer
|
dbos/_migration.py
CHANGED
|
@@ -228,6 +228,21 @@ ALTER TABLE \"{schema}\".operation_outputs ADD COLUMN started_at_epoch_ms BIGINT
|
|
|
228
228
|
"""
|
|
229
229
|
|
|
230
230
|
|
|
231
|
+
def get_dbos_migration_six(schema: str) -> str:
|
|
232
|
+
return f"""
|
|
233
|
+
CREATE TABLE \"{schema}\".workflow_events_history (
|
|
234
|
+
workflow_uuid TEXT NOT NULL,
|
|
235
|
+
function_id INTEGER NOT NULL,
|
|
236
|
+
key TEXT NOT NULL,
|
|
237
|
+
value TEXT NOT NULL,
|
|
238
|
+
PRIMARY KEY (workflow_uuid, function_id, key),
|
|
239
|
+
FOREIGN KEY (workflow_uuid) REFERENCES \"{schema}\".workflow_status(workflow_uuid)
|
|
240
|
+
ON UPDATE CASCADE ON DELETE CASCADE
|
|
241
|
+
);
|
|
242
|
+
ALTER TABLE \"{schema}\".streams ADD COLUMN function_id INTEGER NOT NULL DEFAULT 0;
|
|
243
|
+
"""
|
|
244
|
+
|
|
245
|
+
|
|
231
246
|
def get_dbos_migrations(schema: str) -> list[str]:
|
|
232
247
|
return [
|
|
233
248
|
get_dbos_migration_one(schema),
|
|
@@ -235,6 +250,7 @@ def get_dbos_migrations(schema: str) -> list[str]:
|
|
|
235
250
|
get_dbos_migration_three(schema),
|
|
236
251
|
get_dbos_migration_four(schema),
|
|
237
252
|
get_dbos_migration_five(schema),
|
|
253
|
+
get_dbos_migration_six(schema),
|
|
238
254
|
]
|
|
239
255
|
|
|
240
256
|
|
|
@@ -343,6 +359,19 @@ ALTER TABLE operation_outputs ADD COLUMN started_at_epoch_ms BIGINT;
|
|
|
343
359
|
ALTER TABLE operation_outputs ADD COLUMN completed_at_epoch_ms BIGINT;
|
|
344
360
|
"""
|
|
345
361
|
|
|
362
|
+
sqlite_migration_six = """
|
|
363
|
+
CREATE TABLE workflow_events_history (
|
|
364
|
+
workflow_uuid TEXT NOT NULL,
|
|
365
|
+
function_id INTEGER NOT NULL,
|
|
366
|
+
key TEXT NOT NULL,
|
|
367
|
+
value TEXT NOT NULL,
|
|
368
|
+
PRIMARY KEY (workflow_uuid, function_id, key),
|
|
369
|
+
FOREIGN KEY (workflow_uuid) REFERENCES workflow_status(workflow_uuid)
|
|
370
|
+
ON UPDATE CASCADE ON DELETE CASCADE
|
|
371
|
+
);
|
|
372
|
+
ALTER TABLE streams ADD COLUMN function_id INTEGER NOT NULL DEFAULT 0;
|
|
373
|
+
"""
|
|
374
|
+
|
|
346
375
|
|
|
347
376
|
sqlite_migrations = [
|
|
348
377
|
sqlite_migration_one,
|
|
@@ -350,4 +379,5 @@ sqlite_migrations = [
|
|
|
350
379
|
sqlite_migration_three,
|
|
351
380
|
sqlite_migration_four,
|
|
352
381
|
sqlite_migration_five,
|
|
382
|
+
sqlite_migration_six,
|
|
353
383
|
]
|
dbos/_queue.py
CHANGED
|
@@ -44,6 +44,7 @@ class Queue:
|
|
|
44
44
|
worker_concurrency: Optional[int] = None,
|
|
45
45
|
priority_enabled: bool = False,
|
|
46
46
|
partition_queue: bool = False,
|
|
47
|
+
polling_interval_sec: float = 1.0,
|
|
47
48
|
) -> None:
|
|
48
49
|
if (
|
|
49
50
|
worker_concurrency is not None
|
|
@@ -53,12 +54,15 @@ class Queue:
|
|
|
53
54
|
raise ValueError(
|
|
54
55
|
"worker_concurrency must be less than or equal to concurrency"
|
|
55
56
|
)
|
|
57
|
+
if polling_interval_sec <= 0.0:
|
|
58
|
+
raise ValueError("polling_interval_sec must be positive")
|
|
56
59
|
self.name = name
|
|
57
60
|
self.concurrency = concurrency
|
|
58
61
|
self.worker_concurrency = worker_concurrency
|
|
59
62
|
self.limiter = limiter
|
|
60
63
|
self.priority_enabled = priority_enabled
|
|
61
64
|
self.partition_queue = partition_queue
|
|
65
|
+
self.polling_interval_sec = polling_interval_sec
|
|
62
66
|
from ._dbos import _get_or_create_dbos_registry
|
|
63
67
|
|
|
64
68
|
registry = _get_or_create_dbos_registry()
|
|
@@ -108,50 +112,103 @@ class Queue:
|
|
|
108
112
|
return await start_workflow_async(dbos, func, self.name, False, *args, **kwargs)
|
|
109
113
|
|
|
110
114
|
|
|
111
|
-
def
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
|
|
115
|
+
def queue_worker_thread(
|
|
116
|
+
stop_event: threading.Event, dbos: "DBOS", queue: Queue
|
|
117
|
+
) -> None:
|
|
118
|
+
"""Worker thread for processing a single queue."""
|
|
119
|
+
polling_interval = queue.polling_interval_sec
|
|
120
|
+
min_polling_interval = queue.polling_interval_sec
|
|
121
|
+
max_polling_interval = max(queue.polling_interval_sec, 120.0)
|
|
122
|
+
|
|
115
123
|
while not stop_event.is_set():
|
|
116
124
|
# Wait for the polling interval with jitter
|
|
117
125
|
if stop_event.wait(timeout=polling_interval * random.uniform(0.95, 1.05)):
|
|
118
126
|
return
|
|
119
|
-
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
|
|
123
|
-
|
|
124
|
-
queue_partition_keys = dbos._sys_db.get_queue_partitions(queue.name)
|
|
125
|
-
for key in queue_partition_keys:
|
|
126
|
-
dequeued_workflows += dbos._sys_db.start_queued_workflows(
|
|
127
|
-
queue,
|
|
128
|
-
GlobalParams.executor_id,
|
|
129
|
-
GlobalParams.app_version,
|
|
130
|
-
key,
|
|
131
|
-
)
|
|
132
|
-
else:
|
|
127
|
+
|
|
128
|
+
try:
|
|
129
|
+
if queue.partition_queue:
|
|
130
|
+
queue_partition_keys = dbos._sys_db.get_queue_partitions(queue.name)
|
|
131
|
+
for key in queue_partition_keys:
|
|
133
132
|
dequeued_workflows = dbos._sys_db.start_queued_workflows(
|
|
134
|
-
queue,
|
|
133
|
+
queue,
|
|
134
|
+
GlobalParams.executor_id,
|
|
135
|
+
GlobalParams.app_version,
|
|
136
|
+
key,
|
|
135
137
|
)
|
|
138
|
+
for id in dequeued_workflows:
|
|
139
|
+
execute_workflow_by_id(dbos, id)
|
|
140
|
+
else:
|
|
141
|
+
dequeued_workflows = dbos._sys_db.start_queued_workflows(
|
|
142
|
+
queue, GlobalParams.executor_id, GlobalParams.app_version, None
|
|
143
|
+
)
|
|
136
144
|
for id in dequeued_workflows:
|
|
137
145
|
execute_workflow_by_id(dbos, id)
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
|
|
152
|
-
|
|
153
|
-
|
|
154
|
-
|
|
155
|
-
|
|
146
|
+
except OperationalError as e:
|
|
147
|
+
if isinstance(
|
|
148
|
+
e.orig, (errors.SerializationFailure, errors.LockNotAvailable)
|
|
149
|
+
):
|
|
150
|
+
# If a serialization error is encountered, increase the polling interval
|
|
151
|
+
polling_interval = min(
|
|
152
|
+
max_polling_interval,
|
|
153
|
+
polling_interval * 2.0,
|
|
154
|
+
)
|
|
155
|
+
dbos.logger.warning(
|
|
156
|
+
f"Contention detected in queue thread for {queue.name}. Increasing polling interval to {polling_interval:.2f}."
|
|
157
|
+
)
|
|
158
|
+
else:
|
|
159
|
+
dbos.logger.warning(
|
|
160
|
+
f"Exception encountered in queue thread for {queue.name}: {e}"
|
|
161
|
+
)
|
|
162
|
+
except Exception as e:
|
|
163
|
+
if not stop_event.is_set():
|
|
164
|
+
# Only print the error if the thread is not stopping
|
|
165
|
+
dbos.logger.warning(
|
|
166
|
+
f"Exception encountered in queue thread for {queue.name}: {e}"
|
|
167
|
+
)
|
|
168
|
+
|
|
156
169
|
# Attempt to scale back the polling interval on each iteration
|
|
157
170
|
polling_interval = max(min_polling_interval, polling_interval * 0.9)
|
|
171
|
+
|
|
172
|
+
|
|
173
|
+
def queue_thread(stop_event: threading.Event, dbos: "DBOS") -> None:
|
|
174
|
+
"""Main queue manager thread that spawns and monitors worker threads for each queue."""
|
|
175
|
+
queue_threads: dict[str, threading.Thread] = {}
|
|
176
|
+
check_interval = 1.0 # Check for new queues every second
|
|
177
|
+
|
|
178
|
+
while not stop_event.is_set():
|
|
179
|
+
# Check for new queues
|
|
180
|
+
current_queues = dict(dbos._registry.queue_info_map)
|
|
181
|
+
|
|
182
|
+
# Start threads for new queues
|
|
183
|
+
for queue_name, queue in current_queues.items():
|
|
184
|
+
if (
|
|
185
|
+
queue_name not in queue_threads
|
|
186
|
+
or not queue_threads[queue_name].is_alive()
|
|
187
|
+
):
|
|
188
|
+
thread = threading.Thread(
|
|
189
|
+
target=queue_worker_thread,
|
|
190
|
+
args=(stop_event, dbos, queue),
|
|
191
|
+
name=f"queue-worker-{queue_name}",
|
|
192
|
+
daemon=True,
|
|
193
|
+
)
|
|
194
|
+
thread.start()
|
|
195
|
+
queue_threads[queue_name] = thread
|
|
196
|
+
dbos.logger.debug(f"Started worker thread for queue: {queue_name}")
|
|
197
|
+
|
|
198
|
+
# Wait for the check interval or stop event
|
|
199
|
+
if stop_event.wait(timeout=check_interval):
|
|
200
|
+
break
|
|
201
|
+
|
|
202
|
+
# Join all queue worker threads
|
|
203
|
+
dbos.logger.info("Stopping queue manager, joining all worker threads...")
|
|
204
|
+
for queue_name, thread in queue_threads.items():
|
|
205
|
+
if thread.is_alive():
|
|
206
|
+
thread.join(timeout=10.0) # Give each thread 10 seconds to finish
|
|
207
|
+
if thread.is_alive():
|
|
208
|
+
dbos.logger.debug(
|
|
209
|
+
f"Queue worker thread for {queue_name} did not stop in time"
|
|
210
|
+
)
|
|
211
|
+
else:
|
|
212
|
+
dbos.logger.debug(
|
|
213
|
+
f"Queue worker thread for {queue_name} stopped successfully"
|
|
214
|
+
)
|
dbos/_schemas/system_database.py
CHANGED
|
@@ -35,6 +35,7 @@ class SystemSchema:
|
|
|
35
35
|
cls.notifications.schema = schema_name
|
|
36
36
|
cls.workflow_events.schema = schema_name
|
|
37
37
|
cls.streams.schema = schema_name
|
|
38
|
+
cls.workflow_events_history.schema = schema_name
|
|
38
39
|
|
|
39
40
|
workflow_status = Table(
|
|
40
41
|
"workflow_status",
|
|
@@ -154,6 +155,24 @@ class SystemSchema:
|
|
|
154
155
|
PrimaryKeyConstraint("workflow_uuid", "key"),
|
|
155
156
|
)
|
|
156
157
|
|
|
158
|
+
# This is an immutable version of workflow_events. Two tables are needed for backwards compatibility.
|
|
159
|
+
workflow_events_history = Table(
|
|
160
|
+
"workflow_events_history",
|
|
161
|
+
metadata_obj,
|
|
162
|
+
Column(
|
|
163
|
+
"workflow_uuid",
|
|
164
|
+
Text,
|
|
165
|
+
ForeignKey(
|
|
166
|
+
"workflow_status.workflow_uuid", onupdate="CASCADE", ondelete="CASCADE"
|
|
167
|
+
),
|
|
168
|
+
nullable=False,
|
|
169
|
+
),
|
|
170
|
+
Column("key", Text, nullable=False),
|
|
171
|
+
Column("value", Text, nullable=False),
|
|
172
|
+
Column("function_id", Integer, nullable=False, server_default=text("'0'::int")),
|
|
173
|
+
PrimaryKeyConstraint("workflow_uuid", "key", "function_id"),
|
|
174
|
+
)
|
|
175
|
+
|
|
157
176
|
streams = Table(
|
|
158
177
|
"streams",
|
|
159
178
|
metadata_obj,
|
|
@@ -168,5 +187,6 @@ class SystemSchema:
|
|
|
168
187
|
Column("key", Text, nullable=False),
|
|
169
188
|
Column("value", Text, nullable=False),
|
|
170
189
|
Column("offset", Integer, nullable=False),
|
|
190
|
+
Column("function_id", Integer, nullable=False, server_default=text("'0'::int")),
|
|
171
191
|
PrimaryKeyConstraint("workflow_uuid", "key", "offset"),
|
|
172
192
|
)
|