dbos 0.25.0a12__tar.gz → 0.25.0a14__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {dbos-0.25.0a12 → dbos-0.25.0a14}/PKG-INFO +1 -1
- {dbos-0.25.0a12 → dbos-0.25.0a14}/dbos/_admin_server.py +11 -3
- {dbos-0.25.0a12 → dbos-0.25.0a14}/dbos/_core.py +20 -19
- {dbos-0.25.0a12 → dbos-0.25.0a14}/dbos/_dbos.py +3 -9
- {dbos-0.25.0a12 → dbos-0.25.0a14}/dbos/_migrations/versions/f4b9b32ba814_functionname_childid_op_outputs.py +1 -1
- {dbos-0.25.0a12 → dbos-0.25.0a14}/dbos/_sys_db.py +1 -125
- {dbos-0.25.0a12 → dbos-0.25.0a14}/pyproject.toml +1 -1
- {dbos-0.25.0a12 → dbos-0.25.0a14}/tests/test_admin_server.py +4 -6
- {dbos-0.25.0a12 → dbos-0.25.0a14}/tests/test_async.py +1 -4
- {dbos-0.25.0a12 → dbos-0.25.0a14}/tests/test_classdecorators.py +4 -5
- {dbos-0.25.0a12 → dbos-0.25.0a14}/tests/test_dbos.py +44 -36
- {dbos-0.25.0a12 → dbos-0.25.0a14}/tests/test_failures.py +13 -41
- {dbos-0.25.0a12 → dbos-0.25.0a14}/tests/test_fastapi.py +0 -1
- {dbos-0.25.0a12 → dbos-0.25.0a14}/tests/test_fastapi_roles.py +0 -1
- {dbos-0.25.0a12 → dbos-0.25.0a14}/tests/test_flask.py +0 -1
- {dbos-0.25.0a12 → dbos-0.25.0a14}/tests/test_queue.py +2 -48
- {dbos-0.25.0a12 → dbos-0.25.0a14}/tests/test_scheduler.py +0 -1
- {dbos-0.25.0a12 → dbos-0.25.0a14}/tests/test_workflow_cmds.py +0 -3
- {dbos-0.25.0a12 → dbos-0.25.0a14}/LICENSE +0 -0
- {dbos-0.25.0a12 → dbos-0.25.0a14}/README.md +0 -0
- {dbos-0.25.0a12 → dbos-0.25.0a14}/dbos/__init__.py +0 -0
- {dbos-0.25.0a12 → dbos-0.25.0a14}/dbos/__main__.py +0 -0
- {dbos-0.25.0a12 → dbos-0.25.0a14}/dbos/_app_db.py +0 -0
- {dbos-0.25.0a12 → dbos-0.25.0a14}/dbos/_classproperty.py +0 -0
- {dbos-0.25.0a12 → dbos-0.25.0a14}/dbos/_cloudutils/authentication.py +0 -0
- {dbos-0.25.0a12 → dbos-0.25.0a14}/dbos/_cloudutils/cloudutils.py +0 -0
- {dbos-0.25.0a12 → dbos-0.25.0a14}/dbos/_cloudutils/databases.py +0 -0
- {dbos-0.25.0a12 → dbos-0.25.0a14}/dbos/_conductor/conductor.py +0 -0
- {dbos-0.25.0a12 → dbos-0.25.0a14}/dbos/_conductor/protocol.py +0 -0
- {dbos-0.25.0a12 → dbos-0.25.0a14}/dbos/_context.py +0 -0
- {dbos-0.25.0a12 → dbos-0.25.0a14}/dbos/_croniter.py +0 -0
- {dbos-0.25.0a12 → dbos-0.25.0a14}/dbos/_db_wizard.py +0 -0
- {dbos-0.25.0a12 → dbos-0.25.0a14}/dbos/_dbos_config.py +0 -0
- {dbos-0.25.0a12 → dbos-0.25.0a14}/dbos/_debug.py +0 -0
- {dbos-0.25.0a12 → dbos-0.25.0a14}/dbos/_error.py +0 -0
- {dbos-0.25.0a12 → dbos-0.25.0a14}/dbos/_fastapi.py +0 -0
- {dbos-0.25.0a12 → dbos-0.25.0a14}/dbos/_flask.py +0 -0
- {dbos-0.25.0a12 → dbos-0.25.0a14}/dbos/_kafka.py +0 -0
- {dbos-0.25.0a12 → dbos-0.25.0a14}/dbos/_kafka_message.py +0 -0
- {dbos-0.25.0a12 → dbos-0.25.0a14}/dbos/_logger.py +0 -0
- {dbos-0.25.0a12 → dbos-0.25.0a14}/dbos/_migrations/env.py +0 -0
- {dbos-0.25.0a12 → dbos-0.25.0a14}/dbos/_migrations/script.py.mako +0 -0
- {dbos-0.25.0a12 → dbos-0.25.0a14}/dbos/_migrations/versions/04ca4f231047_workflow_queues_executor_id.py +0 -0
- {dbos-0.25.0a12 → dbos-0.25.0a14}/dbos/_migrations/versions/50f3227f0b4b_fix_job_queue.py +0 -0
- {dbos-0.25.0a12 → dbos-0.25.0a14}/dbos/_migrations/versions/5c361fc04708_added_system_tables.py +0 -0
- {dbos-0.25.0a12 → dbos-0.25.0a14}/dbos/_migrations/versions/a3b18ad34abe_added_triggers.py +0 -0
- {dbos-0.25.0a12 → dbos-0.25.0a14}/dbos/_migrations/versions/d76646551a6b_job_queue_limiter.py +0 -0
- {dbos-0.25.0a12 → dbos-0.25.0a14}/dbos/_migrations/versions/d76646551a6c_workflow_queue.py +0 -0
- {dbos-0.25.0a12 → dbos-0.25.0a14}/dbos/_migrations/versions/eab0cc1d9a14_job_queue.py +0 -0
- {dbos-0.25.0a12 → dbos-0.25.0a14}/dbos/_outcome.py +0 -0
- {dbos-0.25.0a12 → dbos-0.25.0a14}/dbos/_queue.py +0 -0
- {dbos-0.25.0a12 → dbos-0.25.0a14}/dbos/_recovery.py +0 -0
- {dbos-0.25.0a12 → dbos-0.25.0a14}/dbos/_registrations.py +0 -0
- {dbos-0.25.0a12 → dbos-0.25.0a14}/dbos/_request.py +0 -0
- {dbos-0.25.0a12 → dbos-0.25.0a14}/dbos/_roles.py +0 -0
- {dbos-0.25.0a12 → dbos-0.25.0a14}/dbos/_scheduler.py +0 -0
- {dbos-0.25.0a12 → dbos-0.25.0a14}/dbos/_schemas/__init__.py +0 -0
- {dbos-0.25.0a12 → dbos-0.25.0a14}/dbos/_schemas/application_database.py +0 -0
- {dbos-0.25.0a12 → dbos-0.25.0a14}/dbos/_schemas/system_database.py +0 -0
- {dbos-0.25.0a12 → dbos-0.25.0a14}/dbos/_serialization.py +0 -0
- {dbos-0.25.0a12 → dbos-0.25.0a14}/dbos/_templates/dbos-db-starter/README.md +0 -0
- {dbos-0.25.0a12 → dbos-0.25.0a14}/dbos/_templates/dbos-db-starter/__package/__init__.py +0 -0
- {dbos-0.25.0a12 → dbos-0.25.0a14}/dbos/_templates/dbos-db-starter/__package/main.py +0 -0
- {dbos-0.25.0a12 → dbos-0.25.0a14}/dbos/_templates/dbos-db-starter/__package/schema.py +0 -0
- {dbos-0.25.0a12 → dbos-0.25.0a14}/dbos/_templates/dbos-db-starter/alembic.ini +0 -0
- {dbos-0.25.0a12 → dbos-0.25.0a14}/dbos/_templates/dbos-db-starter/dbos-config.yaml.dbos +0 -0
- {dbos-0.25.0a12 → dbos-0.25.0a14}/dbos/_templates/dbos-db-starter/migrations/env.py.dbos +0 -0
- {dbos-0.25.0a12 → dbos-0.25.0a14}/dbos/_templates/dbos-db-starter/migrations/script.py.mako +0 -0
- {dbos-0.25.0a12 → dbos-0.25.0a14}/dbos/_templates/dbos-db-starter/migrations/versions/2024_07_31_180642_init.py +0 -0
- {dbos-0.25.0a12 → dbos-0.25.0a14}/dbos/_templates/dbos-db-starter/start_postgres_docker.py +0 -0
- {dbos-0.25.0a12 → dbos-0.25.0a14}/dbos/_tracer.py +0 -0
- {dbos-0.25.0a12 → dbos-0.25.0a14}/dbos/_utils.py +0 -0
- {dbos-0.25.0a12 → dbos-0.25.0a14}/dbos/_workflow_commands.py +0 -0
- {dbos-0.25.0a12 → dbos-0.25.0a14}/dbos/cli/_github_init.py +0 -0
- {dbos-0.25.0a12 → dbos-0.25.0a14}/dbos/cli/_template_init.py +0 -0
- {dbos-0.25.0a12 → dbos-0.25.0a14}/dbos/cli/cli.py +0 -0
- {dbos-0.25.0a12 → dbos-0.25.0a14}/dbos/dbos-config.schema.json +0 -0
- {dbos-0.25.0a12 → dbos-0.25.0a14}/dbos/py.typed +0 -0
- {dbos-0.25.0a12 → dbos-0.25.0a14}/tests/__init__.py +0 -0
- {dbos-0.25.0a12 → dbos-0.25.0a14}/tests/atexit_no_ctor.py +0 -0
- {dbos-0.25.0a12 → dbos-0.25.0a14}/tests/atexit_no_launch.py +0 -0
- {dbos-0.25.0a12 → dbos-0.25.0a14}/tests/classdefs.py +0 -0
- {dbos-0.25.0a12 → dbos-0.25.0a14}/tests/conftest.py +0 -0
- {dbos-0.25.0a12 → dbos-0.25.0a14}/tests/more_classdefs.py +0 -0
- {dbos-0.25.0a12 → dbos-0.25.0a14}/tests/queuedworkflow.py +0 -0
- {dbos-0.25.0a12 → dbos-0.25.0a14}/tests/test_concurrency.py +0 -0
- {dbos-0.25.0a12 → dbos-0.25.0a14}/tests/test_config.py +0 -0
- {dbos-0.25.0a12 → dbos-0.25.0a14}/tests/test_croniter.py +0 -0
- {dbos-0.25.0a12 → dbos-0.25.0a14}/tests/test_dbwizard.py +0 -0
- {dbos-0.25.0a12 → dbos-0.25.0a14}/tests/test_debug.py +0 -0
- {dbos-0.25.0a12 → dbos-0.25.0a14}/tests/test_kafka.py +0 -0
- {dbos-0.25.0a12 → dbos-0.25.0a14}/tests/test_outcome.py +0 -0
- {dbos-0.25.0a12 → dbos-0.25.0a14}/tests/test_package.py +0 -0
- {dbos-0.25.0a12 → dbos-0.25.0a14}/tests/test_schema_migration.py +0 -0
- {dbos-0.25.0a12 → dbos-0.25.0a14}/tests/test_singleton.py +0 -0
- {dbos-0.25.0a12 → dbos-0.25.0a14}/tests/test_spans.py +0 -0
- {dbos-0.25.0a12 → dbos-0.25.0a14}/tests/test_sqlalchemy.py +0 -0
- {dbos-0.25.0a12 → dbos-0.25.0a14}/tests/test_workflow_cancel.py +0 -0
- {dbos-0.25.0a12 → dbos-0.25.0a14}/version/__init__.py +0 -0
@@ -7,8 +7,6 @@ from functools import partial
|
|
7
7
|
from http.server import BaseHTTPRequestHandler, ThreadingHTTPServer
|
8
8
|
from typing import TYPE_CHECKING, Any, List, TypedDict
|
9
9
|
|
10
|
-
import jsonpickle # type: ignore
|
11
|
-
|
12
10
|
from ._logger import dbos_logger
|
13
11
|
from ._recovery import recover_pending_workflows
|
14
12
|
|
@@ -162,7 +160,17 @@ class AdminRequestHandler(BaseHTTPRequestHandler):
|
|
162
160
|
|
163
161
|
def _handle_steps(self, workflow_id: str) -> None:
|
164
162
|
steps = self.dbos._sys_db.get_workflow_steps(workflow_id)
|
165
|
-
|
163
|
+
|
164
|
+
updated_steps = [
|
165
|
+
{
|
166
|
+
**step,
|
167
|
+
"output": str(step["output"]) if step["output"] is not None else None,
|
168
|
+
"error": str(step["error"]) if step["error"] is not None else None,
|
169
|
+
}
|
170
|
+
for step in steps
|
171
|
+
]
|
172
|
+
|
173
|
+
json_steps = json.dumps(updated_steps).encode("utf-8")
|
166
174
|
self.send_response(200)
|
167
175
|
self._end_headers()
|
168
176
|
self.wfile.write(json_steps)
|
@@ -279,25 +279,14 @@ def _init_workflow(
|
|
279
279
|
raise DBOSNonExistentWorkflowError(wfid)
|
280
280
|
wf_status = get_status_result["status"]
|
281
281
|
else:
|
282
|
-
|
283
|
-
|
284
|
-
|
285
|
-
|
286
|
-
|
287
|
-
status, max_recovery_attempts=max_recovery_attempts
|
288
|
-
)
|
289
|
-
|
290
|
-
# TODO: Modify the inputs if they were changed by `update_workflow_inputs`
|
291
|
-
dbos._sys_db.update_workflow_inputs(
|
292
|
-
wfid, _serialization.serialize_args(inputs)
|
293
|
-
)
|
282
|
+
# Synchronously record the status and inputs for workflows
|
283
|
+
# TODO: Make this transactional (and with the queue step below)
|
284
|
+
wf_status = dbos._sys_db.insert_workflow_status(
|
285
|
+
status, max_recovery_attempts=max_recovery_attempts
|
286
|
+
)
|
294
287
|
|
295
|
-
|
296
|
-
|
297
|
-
|
298
|
-
dbos._sys_db.buffer_workflow_inputs(
|
299
|
-
wfid, _serialization.serialize_args(inputs)
|
300
|
-
)
|
288
|
+
# TODO: Modify the inputs if they were changed by `update_workflow_inputs`
|
289
|
+
dbos._sys_db.update_workflow_inputs(wfid, _serialization.serialize_args(inputs))
|
301
290
|
|
302
291
|
if queue is not None and wf_status == WorkflowStatusString.ENQUEUED.value:
|
303
292
|
dbos._sys_db.enqueue(wfid, queue)
|
@@ -311,6 +300,18 @@ def _get_wf_invoke_func(
|
|
311
300
|
status: WorkflowStatusInternal,
|
312
301
|
) -> Callable[[Callable[[], R]], R]:
|
313
302
|
def persist(func: Callable[[], R]) -> R:
|
303
|
+
if not dbos.debug_mode and (
|
304
|
+
status["status"] == WorkflowStatusString.ERROR.value
|
305
|
+
or status["status"] == WorkflowStatusString.SUCCESS.value
|
306
|
+
):
|
307
|
+
dbos.logger.debug(
|
308
|
+
f"Workflow {status['workflow_uuid']} is already completed with status {status['status']}"
|
309
|
+
)
|
310
|
+
# Directly return the result if the workflow is already completed
|
311
|
+
recorded_result: R = dbos._sys_db.await_workflow_result(
|
312
|
+
status["workflow_uuid"]
|
313
|
+
)
|
314
|
+
return recorded_result
|
314
315
|
try:
|
315
316
|
output = func()
|
316
317
|
status["status"] = "SUCCESS"
|
@@ -319,7 +320,7 @@ def _get_wf_invoke_func(
|
|
319
320
|
if status["queue_name"] is not None:
|
320
321
|
queue = dbos._registry.queue_info_map[status["queue_name"]]
|
321
322
|
dbos._sys_db.remove_from_queue(status["workflow_uuid"], queue)
|
322
|
-
dbos._sys_db.
|
323
|
+
dbos._sys_db.update_workflow_status(status)
|
323
324
|
return output
|
324
325
|
except DBOSWorkflowConflictIDError:
|
325
326
|
# Await the workflow result
|
@@ -485,14 +485,6 @@ class DBOS:
|
|
485
485
|
notification_listener_thread.start()
|
486
486
|
self._background_threads.append(notification_listener_thread)
|
487
487
|
|
488
|
-
# Start flush workflow buffers thread
|
489
|
-
flush_workflow_buffers_thread = threading.Thread(
|
490
|
-
target=self._sys_db.flush_workflow_buffers,
|
491
|
-
daemon=True,
|
492
|
-
)
|
493
|
-
flush_workflow_buffers_thread.start()
|
494
|
-
self._background_threads.append(flush_workflow_buffers_thread)
|
495
|
-
|
496
488
|
# Start the queue thread
|
497
489
|
evt = threading.Event()
|
498
490
|
self.stop_events.append(evt)
|
@@ -549,7 +541,9 @@ class DBOS:
|
|
549
541
|
if _dbos_global_instance is not None:
|
550
542
|
_dbos_global_instance._reset_system_database()
|
551
543
|
else:
|
552
|
-
dbos_logger.warning(
|
544
|
+
dbos_logger.warning(
|
545
|
+
"reset_system_database has no effect because global DBOS object does not exist"
|
546
|
+
)
|
553
547
|
|
554
548
|
def _reset_system_database(self) -> None:
|
555
549
|
assert (
|
@@ -166,8 +166,6 @@ class StepInfo(TypedDict):
|
|
166
166
|
|
167
167
|
|
168
168
|
_dbos_null_topic = "__null__topic__"
|
169
|
-
_buffer_flush_batch_size = 100
|
170
|
-
_buffer_flush_interval_secs = 1.0
|
171
169
|
|
172
170
|
|
173
171
|
class SystemDatabase:
|
@@ -264,32 +262,17 @@ class SystemDatabase:
|
|
264
262
|
self.notifications_map: Dict[str, threading.Condition] = {}
|
265
263
|
self.workflow_events_map: Dict[str, threading.Condition] = {}
|
266
264
|
|
267
|
-
# Initialize the workflow status and inputs buffers
|
268
|
-
self._workflow_status_buffer: Dict[str, WorkflowStatusInternal] = {}
|
269
|
-
self._workflow_inputs_buffer: Dict[str, str] = {}
|
270
|
-
# Two sets for tracking which single-transaction workflows have been exported to the status table
|
271
|
-
self._exported_temp_txn_wf_status: Set[str] = set()
|
272
|
-
self._temp_txn_wf_ids: Set[str] = set()
|
273
|
-
self._is_flushing_status_buffer = False
|
274
|
-
|
275
265
|
# Now we can run background processes
|
276
266
|
self._run_background_processes = True
|
277
267
|
self._debug_mode = debug_mode
|
278
268
|
|
279
269
|
# Destroy the pool when finished
|
280
270
|
def destroy(self) -> None:
|
281
|
-
self.wait_for_buffer_flush()
|
282
271
|
self._run_background_processes = False
|
283
272
|
if self.notification_conn is not None:
|
284
273
|
self.notification_conn.close()
|
285
274
|
self.engine.dispose()
|
286
275
|
|
287
|
-
def wait_for_buffer_flush(self) -> None:
|
288
|
-
# Wait until the buffers are flushed.
|
289
|
-
while self._is_flushing_status_buffer or not self._is_buffers_empty:
|
290
|
-
dbos_logger.debug("Waiting for system buffers to be exported")
|
291
|
-
time.sleep(1)
|
292
|
-
|
293
276
|
def insert_workflow_status(
|
294
277
|
self,
|
295
278
|
status: WorkflowStatusInternal,
|
@@ -440,10 +423,6 @@ class SystemDatabase:
|
|
440
423
|
with self.engine.begin() as c:
|
441
424
|
c.execute(cmd)
|
442
425
|
|
443
|
-
# If this is a single-transaction workflow, record that its status has been exported
|
444
|
-
if status["workflow_uuid"] in self._temp_txn_wf_ids:
|
445
|
-
self._exported_temp_txn_wf_status.add(status["workflow_uuid"])
|
446
|
-
|
447
426
|
def cancel_workflow(
|
448
427
|
self,
|
449
428
|
workflow_id: str,
|
@@ -621,10 +600,7 @@ class SystemDatabase:
|
|
621
600
|
f"Workflow {workflow_uuid} has been called multiple times with different inputs"
|
622
601
|
)
|
623
602
|
# TODO: actually changing the input
|
624
|
-
|
625
|
-
# Clean up the single-transaction tracking sets
|
626
|
-
self._exported_temp_txn_wf_status.discard(workflow_uuid)
|
627
|
-
self._temp_txn_wf_ids.discard(workflow_uuid)
|
603
|
+
|
628
604
|
return
|
629
605
|
|
630
606
|
def get_workflow_inputs(
|
@@ -1275,106 +1251,6 @@ class SystemDatabase:
|
|
1275
1251
|
)
|
1276
1252
|
return value
|
1277
1253
|
|
1278
|
-
def _flush_workflow_status_buffer(self) -> None:
|
1279
|
-
if self._debug_mode:
|
1280
|
-
raise Exception("called _flush_workflow_status_buffer in debug mode")
|
1281
|
-
|
1282
|
-
"""Export the workflow status buffer to the database, up to the batch size."""
|
1283
|
-
if len(self._workflow_status_buffer) == 0:
|
1284
|
-
return
|
1285
|
-
|
1286
|
-
# Record the exported status so far, and add them back on errors.
|
1287
|
-
exported_status: Dict[str, WorkflowStatusInternal] = {}
|
1288
|
-
with self.engine.begin() as c:
|
1289
|
-
exported = 0
|
1290
|
-
status_iter = iter(list(self._workflow_status_buffer))
|
1291
|
-
wf_id: Optional[str] = None
|
1292
|
-
while (
|
1293
|
-
exported < _buffer_flush_batch_size
|
1294
|
-
and (wf_id := next(status_iter, None)) is not None
|
1295
|
-
):
|
1296
|
-
# Pop the first key in the buffer (FIFO)
|
1297
|
-
status = self._workflow_status_buffer.pop(wf_id, None)
|
1298
|
-
if status is None:
|
1299
|
-
continue
|
1300
|
-
exported_status[wf_id] = status
|
1301
|
-
try:
|
1302
|
-
self.update_workflow_status(status, conn=c)
|
1303
|
-
exported += 1
|
1304
|
-
except Exception as e:
|
1305
|
-
dbos_logger.error(f"Error while flushing status buffer: {e}")
|
1306
|
-
c.rollback()
|
1307
|
-
# Add the exported status back to the buffer, so they can be retried next time
|
1308
|
-
self._workflow_status_buffer.update(exported_status)
|
1309
|
-
break
|
1310
|
-
|
1311
|
-
def _flush_workflow_inputs_buffer(self) -> None:
|
1312
|
-
if self._debug_mode:
|
1313
|
-
raise Exception("called _flush_workflow_inputs_buffer in debug mode")
|
1314
|
-
|
1315
|
-
"""Export the workflow inputs buffer to the database, up to the batch size."""
|
1316
|
-
if len(self._workflow_inputs_buffer) == 0:
|
1317
|
-
return
|
1318
|
-
|
1319
|
-
# Record exported inputs so far, and add them back on errors.
|
1320
|
-
exported_inputs: Dict[str, str] = {}
|
1321
|
-
with self.engine.begin() as c:
|
1322
|
-
exported = 0
|
1323
|
-
input_iter = iter(list(self._workflow_inputs_buffer))
|
1324
|
-
wf_id: Optional[str] = None
|
1325
|
-
while (
|
1326
|
-
exported < _buffer_flush_batch_size
|
1327
|
-
and (wf_id := next(input_iter, None)) is not None
|
1328
|
-
):
|
1329
|
-
if wf_id not in self._exported_temp_txn_wf_status:
|
1330
|
-
# Skip exporting inputs if the status has not been exported yet
|
1331
|
-
continue
|
1332
|
-
inputs = self._workflow_inputs_buffer.pop(wf_id, None)
|
1333
|
-
if inputs is None:
|
1334
|
-
continue
|
1335
|
-
exported_inputs[wf_id] = inputs
|
1336
|
-
try:
|
1337
|
-
self.update_workflow_inputs(wf_id, inputs, conn=c)
|
1338
|
-
exported += 1
|
1339
|
-
except Exception as e:
|
1340
|
-
dbos_logger.error(f"Error while flushing inputs buffer: {e}")
|
1341
|
-
c.rollback()
|
1342
|
-
# Add the exported inputs back to the buffer, so they can be retried next time
|
1343
|
-
self._workflow_inputs_buffer.update(exported_inputs)
|
1344
|
-
break
|
1345
|
-
|
1346
|
-
def flush_workflow_buffers(self) -> None:
|
1347
|
-
"""Flush the workflow status and inputs buffers periodically, via a background thread."""
|
1348
|
-
while self._run_background_processes:
|
1349
|
-
try:
|
1350
|
-
self._is_flushing_status_buffer = True
|
1351
|
-
# Must flush the status buffer first, as the inputs table has a foreign key constraint on the status table.
|
1352
|
-
self._flush_workflow_status_buffer()
|
1353
|
-
self._flush_workflow_inputs_buffer()
|
1354
|
-
self._is_flushing_status_buffer = False
|
1355
|
-
if self._is_buffers_empty:
|
1356
|
-
# Only sleep if both buffers are empty
|
1357
|
-
time.sleep(_buffer_flush_interval_secs)
|
1358
|
-
except Exception as e:
|
1359
|
-
dbos_logger.error(f"Error while flushing buffers: {e}")
|
1360
|
-
time.sleep(_buffer_flush_interval_secs)
|
1361
|
-
# Will retry next time
|
1362
|
-
|
1363
|
-
def buffer_workflow_status(self, status: WorkflowStatusInternal) -> None:
|
1364
|
-
self._workflow_status_buffer[status["workflow_uuid"]] = status
|
1365
|
-
|
1366
|
-
def buffer_workflow_inputs(self, workflow_id: str, inputs: str) -> None:
|
1367
|
-
# inputs is a serialized WorkflowInputs string
|
1368
|
-
self._workflow_inputs_buffer[workflow_id] = inputs
|
1369
|
-
self._temp_txn_wf_ids.add(workflow_id)
|
1370
|
-
|
1371
|
-
@property
|
1372
|
-
def _is_buffers_empty(self) -> bool:
|
1373
|
-
return (
|
1374
|
-
len(self._workflow_status_buffer) == 0
|
1375
|
-
and len(self._workflow_inputs_buffer) == 0
|
1376
|
-
)
|
1377
|
-
|
1378
1254
|
def enqueue(self, workflow_id: str, queue_name: str) -> None:
|
1379
1255
|
if self._debug_mode:
|
1380
1256
|
raise Exception("called enqueue in debug mode")
|
@@ -97,8 +97,6 @@ def test_admin_recovery(config: ConfigFile) -> None:
|
|
97
97
|
with SetWorkflowID(wfuuid):
|
98
98
|
assert test_workflow("bob", "bob") == "bob1bob"
|
99
99
|
|
100
|
-
dbos._sys_db.wait_for_buffer_flush()
|
101
|
-
|
102
100
|
# Manually update the database to pretend the workflow comes from another executor and is pending
|
103
101
|
with dbos._sys_db.engine.begin() as c:
|
104
102
|
query = (
|
@@ -254,10 +252,9 @@ def test_admin_workflow_resume(dbos: DBOS, sys_db: SystemDatabase) -> None:
|
|
254
252
|
nonlocal counter
|
255
253
|
counter += 1
|
256
254
|
|
257
|
-
# Run the workflow
|
255
|
+
# Run the workflow
|
258
256
|
simple_workflow()
|
259
257
|
assert counter == 1
|
260
|
-
dbos._sys_db.wait_for_buffer_flush()
|
261
258
|
|
262
259
|
# Verify the workflow has succeeded
|
263
260
|
output = DBOS.list_workflows()
|
@@ -295,7 +292,8 @@ def test_admin_workflow_resume(dbos: DBOS, sys_db: SystemDatabase) -> None:
|
|
295
292
|
)
|
296
293
|
assert response.status_code == 204
|
297
294
|
assert event.wait(timeout=5)
|
298
|
-
|
295
|
+
# Wait for the workflow to finish
|
296
|
+
DBOS.retrieve_workflow(wfUuid).get_result()
|
299
297
|
assert counter == 2
|
300
298
|
info = _workflow_commands.get_workflow(sys_db, wfUuid, True)
|
301
299
|
assert info is not None
|
@@ -307,7 +305,7 @@ def test_admin_workflow_resume(dbos: DBOS, sys_db: SystemDatabase) -> None:
|
|
307
305
|
f"http://localhost:3001/workflows/{wfUuid}/resume", json=[], timeout=5
|
308
306
|
)
|
309
307
|
assert response.status_code == 204
|
310
|
-
|
308
|
+
DBOS.retrieve_workflow(wfUuid).get_result()
|
311
309
|
info = _workflow_commands.get_workflow(sys_db, wfUuid, True)
|
312
310
|
assert info is not None
|
313
311
|
assert info.status == "SUCCESS", f"Expected status to be SUCCESS"
|
@@ -47,7 +47,6 @@ async def test_async_workflow(dbos: DBOS) -> None:
|
|
47
47
|
with SetWorkflowID(wfuuid):
|
48
48
|
result = await test_workflow("alice", "bob")
|
49
49
|
assert result == "alicetxn11bobstep1"
|
50
|
-
dbos._sys_db.wait_for_buffer_flush()
|
51
50
|
|
52
51
|
with SetWorkflowID(wfuuid):
|
53
52
|
result = await test_workflow("alice", "bob")
|
@@ -93,7 +92,6 @@ async def test_async_step(dbos: DBOS) -> None:
|
|
93
92
|
with SetWorkflowID(wfuuid):
|
94
93
|
result = await test_workflow("alice", "bob")
|
95
94
|
assert result == "alicetxn11bobstep1"
|
96
|
-
dbos._sys_db.wait_for_buffer_flush()
|
97
95
|
|
98
96
|
with SetWorkflowID(wfuuid):
|
99
97
|
result = await test_workflow("alice", "bob")
|
@@ -323,7 +321,6 @@ async def test_async_step_temp(dbos: DBOS) -> None:
|
|
323
321
|
with SetWorkflowID(wfuuid):
|
324
322
|
result = await test_step("alice")
|
325
323
|
assert result == "alicestep1"
|
326
|
-
dbos._sys_db.wait_for_buffer_flush()
|
327
324
|
|
328
325
|
with SetWorkflowID(wfuuid):
|
329
326
|
result = await test_step("alice")
|
@@ -373,7 +370,7 @@ async def test_start_workflow_async(dbos: DBOS) -> None:
|
|
373
370
|
result = await handle.get_result()
|
374
371
|
assert result == "alicebobstep1"
|
375
372
|
|
376
|
-
assert wf_counter ==
|
373
|
+
assert wf_counter == 1
|
377
374
|
assert step_counter == 1
|
378
375
|
assert wf_el_id == id(asyncio.get_running_loop())
|
379
376
|
assert step_el_id == id(asyncio.get_running_loop())
|
@@ -413,7 +413,7 @@ def test_class_recovery(dbos: DBOS) -> None:
|
|
413
413
|
# Test we can execute the workflow by uuid as recovery would do
|
414
414
|
handle = DBOS.execute_workflow_id("run1")
|
415
415
|
assert handle.get_result() == "ran"
|
416
|
-
assert exc_cnt ==
|
416
|
+
assert exc_cnt == 1
|
417
417
|
|
418
418
|
|
419
419
|
def test_inst_recovery(dbos: DBOS) -> None:
|
@@ -446,8 +446,9 @@ def test_inst_recovery(dbos: DBOS) -> None:
|
|
446
446
|
last_inst = None
|
447
447
|
handle = DBOS.execute_workflow_id(wfid)
|
448
448
|
assert handle.get_result() == "ran2"
|
449
|
-
assert exc_cnt ==
|
450
|
-
|
449
|
+
assert exc_cnt == 1
|
450
|
+
# Workflow has finished so last_inst should be None
|
451
|
+
assert last_inst is None
|
451
452
|
|
452
453
|
status = DBOS.retrieve_workflow(wfid).get_status()
|
453
454
|
assert status.class_name == "TestClass"
|
@@ -806,14 +807,12 @@ def test_inst_txn(dbos: DBOS) -> None:
|
|
806
807
|
|
807
808
|
with SetWorkflowID(wfid):
|
808
809
|
assert inst.transaction(input) == input * multiplier
|
809
|
-
dbos._sys_db.wait_for_buffer_flush()
|
810
810
|
status = DBOS.retrieve_workflow(wfid).get_status()
|
811
811
|
assert status.class_name == "TestClass"
|
812
812
|
assert status.config_name == "test_class"
|
813
813
|
|
814
814
|
handle = DBOS.start_workflow(inst.transaction, input)
|
815
815
|
assert handle.get_result() == input * multiplier
|
816
|
-
dbos._sys_db.wait_for_buffer_flush()
|
817
816
|
status = handle.get_status()
|
818
817
|
assert status.class_name == "TestClass"
|
819
818
|
assert status.config_name == "test_class"
|
@@ -33,7 +33,7 @@ def test_simple_workflow(dbos: DBOS) -> None:
|
|
33
33
|
wf_counter += 1
|
34
34
|
res = test_transaction(var2)
|
35
35
|
res2 = test_step(var)
|
36
|
-
DBOS.logger.info("I'm test_workflow")
|
36
|
+
DBOS.logger.info("I'm test_workflow " + var + var2)
|
37
37
|
return res + res2
|
38
38
|
|
39
39
|
@DBOS.transaction(isolation_level="REPEATABLE READ")
|
@@ -42,7 +42,7 @@ def test_simple_workflow(dbos: DBOS) -> None:
|
|
42
42
|
rows = DBOS.sql_session.execute(sa.text("SELECT 1")).fetchall()
|
43
43
|
nonlocal txn_counter
|
44
44
|
txn_counter += 1
|
45
|
-
DBOS.logger.info("I'm test_transaction")
|
45
|
+
DBOS.logger.info("I'm test_transaction " + var2)
|
46
46
|
return var2 + str(rows[0][0])
|
47
47
|
|
48
48
|
@DBOS.step()
|
@@ -53,7 +53,7 @@ def test_simple_workflow(dbos: DBOS) -> None:
|
|
53
53
|
assert DBOS.step_status.max_attempts is None
|
54
54
|
nonlocal step_counter
|
55
55
|
step_counter += 1
|
56
|
-
DBOS.logger.info("I'm test_step")
|
56
|
+
DBOS.logger.info("I'm test_step " + var)
|
57
57
|
return var
|
58
58
|
|
59
59
|
assert test_workflow("bob", "bob") == "bob1bob"
|
@@ -62,15 +62,17 @@ def test_simple_workflow(dbos: DBOS) -> None:
|
|
62
62
|
wfuuid = str(uuid.uuid4())
|
63
63
|
with SetWorkflowID(wfuuid):
|
64
64
|
assert test_workflow("alice", "alice") == "alice1alice"
|
65
|
+
assert wf_counter == 2
|
65
66
|
with SetWorkflowID(wfuuid):
|
66
67
|
assert test_workflow("alice", "alice") == "alice1alice"
|
67
68
|
assert txn_counter == 2 # Only increment once
|
68
69
|
assert step_counter == 2 # Only increment once
|
70
|
+
assert wf_counter == 2 # Only increment once
|
69
71
|
|
70
72
|
# Test we can execute the workflow by uuid
|
71
73
|
handle = DBOS.execute_workflow_id(wfuuid)
|
72
74
|
assert handle.get_result() == "alice1alice"
|
73
|
-
assert wf_counter ==
|
75
|
+
assert wf_counter == 2
|
74
76
|
|
75
77
|
|
76
78
|
def test_simple_workflow_attempts_counter(dbos: DBOS) -> None:
|
@@ -92,10 +94,7 @@ def test_simple_workflow_attempts_counter(dbos: DBOS) -> None:
|
|
92
94
|
assert result is not None
|
93
95
|
recovery_attempts, created_at, updated_at = result
|
94
96
|
assert recovery_attempts == i + 1
|
95
|
-
|
96
|
-
assert created_at == updated_at
|
97
|
-
else:
|
98
|
-
assert updated_at > created_at
|
97
|
+
assert updated_at > created_at
|
99
98
|
|
100
99
|
|
101
100
|
def test_child_workflow(dbos: DBOS) -> None:
|
@@ -258,12 +257,11 @@ def test_exception_workflow(dbos: DBOS) -> None:
|
|
258
257
|
assert bad_txn_counter == 2 # Only increment once
|
259
258
|
|
260
259
|
# Test we can execute the workflow by uuid, shouldn't throw errors
|
261
|
-
dbos._sys_db._flush_workflow_status_buffer()
|
262
260
|
handle = DBOS.execute_workflow_id(wfuuid)
|
263
261
|
with pytest.raises(Exception) as exc_info:
|
264
262
|
handle.get_result()
|
265
263
|
assert "test error" == str(exc_info.value)
|
266
|
-
assert wf_counter ==
|
264
|
+
assert wf_counter == 2 # The workflow error is directly returned without running
|
267
265
|
|
268
266
|
|
269
267
|
def test_temp_workflow(dbos: DBOS) -> None:
|
@@ -298,12 +296,6 @@ def test_temp_workflow(dbos: DBOS) -> None:
|
|
298
296
|
res = test_step("var")
|
299
297
|
assert res == "var"
|
300
298
|
|
301
|
-
# Flush workflow inputs buffer shouldn't fail due to foreign key violation.
|
302
|
-
# It should properly skip the transaction inputs.
|
303
|
-
dbos._sys_db._flush_workflow_inputs_buffer()
|
304
|
-
|
305
|
-
# Wait for buffers to flush
|
306
|
-
dbos._sys_db.wait_for_buffer_flush()
|
307
299
|
wfs = dbos._sys_db.get_workflows(gwi)
|
308
300
|
assert len(wfs.workflow_uuids) == 2
|
309
301
|
|
@@ -398,7 +390,6 @@ def test_recovery_workflow(dbos: DBOS) -> None:
|
|
398
390
|
with SetWorkflowID(wfuuid):
|
399
391
|
assert test_workflow("bob", "bob") == "bob1bob"
|
400
392
|
|
401
|
-
dbos._sys_db.wait_for_buffer_flush()
|
402
393
|
# Change the workflow status to pending
|
403
394
|
with dbos._sys_db.engine.begin() as c:
|
404
395
|
c.execute(
|
@@ -444,7 +435,6 @@ def test_recovery_workflow_step(dbos: DBOS) -> None:
|
|
444
435
|
with SetWorkflowID(wfuuid):
|
445
436
|
assert test_workflow("bob", "bob") == "bob"
|
446
437
|
|
447
|
-
dbos._sys_db.wait_for_buffer_flush()
|
448
438
|
# Change the workflow status to pending
|
449
439
|
with dbos._sys_db.engine.begin() as c:
|
450
440
|
c.execute(
|
@@ -481,14 +471,13 @@ def test_workflow_returns_none(dbos: DBOS) -> None:
|
|
481
471
|
assert test_workflow("bob", "bob") is None
|
482
472
|
assert wf_counter == 1
|
483
473
|
|
484
|
-
dbos._sys_db.wait_for_buffer_flush()
|
485
474
|
with SetWorkflowID(wfuuid):
|
486
475
|
assert test_workflow("bob", "bob") is None
|
487
|
-
assert wf_counter ==
|
476
|
+
assert wf_counter == 1
|
488
477
|
|
489
478
|
handle: WorkflowHandle[None] = DBOS.retrieve_workflow(wfuuid)
|
490
479
|
assert handle.get_result() == None
|
491
|
-
assert wf_counter ==
|
480
|
+
assert wf_counter == 1
|
492
481
|
|
493
482
|
# Change the workflow status to pending
|
494
483
|
with dbos._sys_db.engine.begin() as c:
|
@@ -501,7 +490,7 @@ def test_workflow_returns_none(dbos: DBOS) -> None:
|
|
501
490
|
workflow_handles = DBOS.recover_pending_workflows()
|
502
491
|
assert len(workflow_handles) == 1
|
503
492
|
assert workflow_handles[0].get_result() is None
|
504
|
-
assert wf_counter ==
|
493
|
+
assert wf_counter == 2
|
505
494
|
|
506
495
|
# Test that there was a recovery attempt of this
|
507
496
|
stat = workflow_handles[0].get_status()
|
@@ -528,7 +517,6 @@ def test_recovery_temp_workflow(dbos: DBOS) -> None:
|
|
528
517
|
res = test_transaction("bob")
|
529
518
|
assert res == "bob1"
|
530
519
|
|
531
|
-
dbos._sys_db.wait_for_buffer_flush()
|
532
520
|
wfs = dbos._sys_db.get_workflows(gwi)
|
533
521
|
assert len(wfs.workflow_uuids) == 1
|
534
522
|
assert wfs.workflow_uuids[0] == wfuuid
|
@@ -554,7 +542,6 @@ def test_recovery_temp_workflow(dbos: DBOS) -> None:
|
|
554
542
|
assert len(wfs.workflow_uuids) == 1
|
555
543
|
assert wfs.workflow_uuids[0] == wfuuid
|
556
544
|
|
557
|
-
dbos._sys_db.wait_for_buffer_flush()
|
558
545
|
wfi = dbos._sys_db.get_workflow_status(wfs.workflow_uuids[0])
|
559
546
|
assert wfi
|
560
547
|
assert wfi["name"].startswith("<temp>")
|
@@ -583,7 +570,6 @@ def test_recovery_thread(config: ConfigFile) -> None:
|
|
583
570
|
with SetWorkflowID(wfuuid):
|
584
571
|
assert test_workflow(test_var) == test_var
|
585
572
|
|
586
|
-
dbos._sys_db.wait_for_buffer_flush()
|
587
573
|
# Change the workflow status to pending
|
588
574
|
dbos._sys_db.update_workflow_status(
|
589
575
|
{
|
@@ -666,7 +652,7 @@ def test_start_workflow(dbos: DBOS) -> None:
|
|
666
652
|
context = assert_current_dbos_context()
|
667
653
|
assert not context.is_within_workflow()
|
668
654
|
assert txn_counter == 1
|
669
|
-
assert wf_counter ==
|
655
|
+
assert wf_counter == 1
|
670
656
|
|
671
657
|
|
672
658
|
def test_retrieve_workflow(dbos: DBOS) -> None:
|
@@ -778,7 +764,6 @@ def test_retrieve_workflow_in_workflow(dbos: DBOS) -> None:
|
|
778
764
|
fstat1 = wfh.get_status()
|
779
765
|
assert fstat1
|
780
766
|
fres = wfh.get_result()
|
781
|
-
dbos._sys_db.wait_for_buffer_flush() # Wait for status to export.
|
782
767
|
fstat2 = wfh.get_status()
|
783
768
|
assert fstat2
|
784
769
|
return fstat1.status + fres + fstat2.status
|
@@ -905,7 +890,7 @@ def test_send_recv(dbos: DBOS) -> None:
|
|
905
890
|
with SetWorkflowID(send_uuid):
|
906
891
|
res = test_send_workflow(handle.get_workflow_id(), "testtopic")
|
907
892
|
assert res == dest_uuid
|
908
|
-
assert send_counter ==
|
893
|
+
assert send_counter == 1
|
909
894
|
|
910
895
|
with SetWorkflowID(dest_uuid):
|
911
896
|
begin_time = time.time()
|
@@ -913,7 +898,7 @@ def test_send_recv(dbos: DBOS) -> None:
|
|
913
898
|
duration = time.time() - begin_time
|
914
899
|
assert duration < 3.0
|
915
900
|
assert res == "test2-test1-test3"
|
916
|
-
assert recv_counter ==
|
901
|
+
assert recv_counter == 1
|
917
902
|
|
918
903
|
with SetWorkflowID(timeout_uuid):
|
919
904
|
begin_time = time.time()
|
@@ -1142,7 +1127,9 @@ def test_multi_set_event(dbos: DBOS) -> None:
|
|
1142
1127
|
assert DBOS.get_event(wfid, "key") == "value2"
|
1143
1128
|
|
1144
1129
|
|
1145
|
-
def test_debug_logging(
|
1130
|
+
def test_debug_logging(
|
1131
|
+
dbos: DBOS, caplog: pytest.LogCaptureFixture, config: ConfigFile
|
1132
|
+
) -> None:
|
1146
1133
|
wfid = str(uuid.uuid4())
|
1147
1134
|
dest_wfid = str(uuid.uuid4())
|
1148
1135
|
|
@@ -1196,9 +1183,27 @@ def test_debug_logging(dbos: DBOS, caplog: pytest.LogCaptureFixture) -> None:
|
|
1196
1183
|
assert "Running recv" in caplog.text
|
1197
1184
|
caplog.clear()
|
1198
1185
|
|
1199
|
-
dbos._sys_db._flush_workflow_status_buffer()
|
1200
|
-
|
1201
1186
|
# Second run
|
1187
|
+
with SetWorkflowID(dest_wfid):
|
1188
|
+
dest_handle_2 = dbos.start_workflow(test_workflow_dest)
|
1189
|
+
|
1190
|
+
with SetWorkflowID(wfid):
|
1191
|
+
result3 = test_workflow()
|
1192
|
+
|
1193
|
+
assert result3 == result1
|
1194
|
+
assert "is already completed with status SUCCESS" in caplog.text
|
1195
|
+
|
1196
|
+
result4 = dest_handle_2.get_result()
|
1197
|
+
assert result4 == result2
|
1198
|
+
caplog.clear()
|
1199
|
+
|
1200
|
+
# Debug mode run
|
1201
|
+
DBOS.destroy()
|
1202
|
+
DBOS(config=config)
|
1203
|
+
logging.getLogger("dbos").propagate = True
|
1204
|
+
caplog.set_level(logging.DEBUG, "dbos")
|
1205
|
+
DBOS.launch(debug_mode=True)
|
1206
|
+
|
1202
1207
|
with SetWorkflowID(dest_wfid):
|
1203
1208
|
dest_handle_2 = dbos.start_workflow(test_workflow_dest)
|
1204
1209
|
|
@@ -1321,6 +1326,7 @@ def test_app_version(config: ConfigFile) -> None:
|
|
1321
1326
|
|
1322
1327
|
def test_recovery_appversion(config: ConfigFile) -> None:
|
1323
1328
|
input = 5
|
1329
|
+
os.environ["DBOS__VMID"] = "testexecutor"
|
1324
1330
|
|
1325
1331
|
DBOS.destroy(destroy_registry=True)
|
1326
1332
|
dbos = DBOS(config=config)
|
@@ -1336,7 +1342,6 @@ def test_recovery_appversion(config: ConfigFile) -> None:
|
|
1336
1342
|
assert test_workflow(input) == input
|
1337
1343
|
|
1338
1344
|
# Change the workflow status to pending
|
1339
|
-
dbos._sys_db.wait_for_buffer_flush()
|
1340
1345
|
with dbos._sys_db.engine.begin() as c:
|
1341
1346
|
c.execute(
|
1342
1347
|
sa.update(SystemSchema.workflow_status)
|
@@ -1345,6 +1350,7 @@ def test_recovery_appversion(config: ConfigFile) -> None:
|
|
1345
1350
|
)
|
1346
1351
|
|
1347
1352
|
# Reconstruct an identical environment to simulate a restart
|
1353
|
+
os.environ["DBOS__VMID"] = "testexecutor_another"
|
1348
1354
|
DBOS.destroy(destroy_registry=True)
|
1349
1355
|
dbos = DBOS(config=config)
|
1350
1356
|
|
@@ -1355,12 +1361,11 @@ def test_recovery_appversion(config: ConfigFile) -> None:
|
|
1355
1361
|
DBOS.launch()
|
1356
1362
|
|
1357
1363
|
# The workflow should successfully recover
|
1358
|
-
workflow_handles = DBOS.recover_pending_workflows()
|
1364
|
+
workflow_handles = DBOS.recover_pending_workflows(["testexecutor"])
|
1359
1365
|
assert len(workflow_handles) == 1
|
1360
1366
|
assert workflow_handles[0].get_result() == input
|
1361
1367
|
|
1362
1368
|
# Change the workflow status to pending
|
1363
|
-
dbos._sys_db.wait_for_buffer_flush()
|
1364
1369
|
with dbos._sys_db.engine.begin() as c:
|
1365
1370
|
c.execute(
|
1366
1371
|
sa.update(SystemSchema.workflow_status)
|
@@ -1379,5 +1384,8 @@ def test_recovery_appversion(config: ConfigFile) -> None:
|
|
1379
1384
|
DBOS.launch()
|
1380
1385
|
|
1381
1386
|
# The workflow should not recover
|
1382
|
-
workflow_handles = DBOS.recover_pending_workflows()
|
1387
|
+
workflow_handles = DBOS.recover_pending_workflows(["testexecutor"])
|
1383
1388
|
assert len(workflow_handles) == 0
|
1389
|
+
|
1390
|
+
# Clean up the environment variable
|
1391
|
+
del os.environ["DBOS__VMID"]
|
@@ -146,43 +146,6 @@ def test_notification_errors(dbos: DBOS) -> None:
|
|
146
146
|
assert duration < 3.0
|
147
147
|
|
148
148
|
|
149
|
-
def test_buffer_flush_errors(dbos: DBOS) -> None:
|
150
|
-
@DBOS.transaction()
|
151
|
-
def test_transaction(var: str) -> str:
|
152
|
-
rows = DBOS.sql_session.execute(sa.text("SELECT 1")).fetchall()
|
153
|
-
return var + str(rows[0][0])
|
154
|
-
|
155
|
-
cur_time: str = datetime.datetime.now().isoformat()
|
156
|
-
gwi: GetWorkflowsInput = GetWorkflowsInput()
|
157
|
-
gwi.start_time = cur_time
|
158
|
-
|
159
|
-
res = test_transaction("bob")
|
160
|
-
assert res == "bob1"
|
161
|
-
|
162
|
-
dbos._sys_db.wait_for_buffer_flush()
|
163
|
-
wfs = dbos._sys_db.get_workflows(gwi)
|
164
|
-
assert len(wfs.workflow_uuids) == 1
|
165
|
-
|
166
|
-
# Crash the system database connection and make sure the buffer flush works on time.
|
167
|
-
backup_engine = dbos._sys_db.engine
|
168
|
-
dbos._sys_db.engine = sa.create_engine(
|
169
|
-
"postgresql+psycopg://fake:database@localhost/fake_db"
|
170
|
-
)
|
171
|
-
|
172
|
-
res = test_transaction("bob")
|
173
|
-
assert res == "bob1"
|
174
|
-
|
175
|
-
# Should see some errors in the logs
|
176
|
-
time.sleep(2)
|
177
|
-
|
178
|
-
# Switch back to the original good engine.
|
179
|
-
dbos._sys_db.engine = backup_engine
|
180
|
-
|
181
|
-
dbos._sys_db.wait_for_buffer_flush()
|
182
|
-
wfs = dbos._sys_db.get_workflows(gwi)
|
183
|
-
assert len(wfs.workflow_uuids) == 2
|
184
|
-
|
185
|
-
|
186
149
|
def test_dead_letter_queue(dbos: DBOS) -> None:
|
187
150
|
event = threading.Event()
|
188
151
|
max_recovery_attempts = 20
|
@@ -229,7 +192,6 @@ def test_dead_letter_queue(dbos: DBOS) -> None:
|
|
229
192
|
# Complete the blocked workflow
|
230
193
|
event.set()
|
231
194
|
assert handle.get_result() == resumed_handle.get_result() == None
|
232
|
-
dbos._sys_db.wait_for_buffer_flush()
|
233
195
|
assert handle.get_status().status == WorkflowStatusString.SUCCESS.value
|
234
196
|
|
235
197
|
|
@@ -239,6 +201,8 @@ def test_wfstatus_invalid(dbos: DBOS) -> None:
|
|
239
201
|
return "done"
|
240
202
|
|
241
203
|
has_executed = False
|
204
|
+
event = threading.Event()
|
205
|
+
wfevent = threading.Event()
|
242
206
|
|
243
207
|
@DBOS.workflow()
|
244
208
|
def non_deterministic_workflow() -> None:
|
@@ -251,16 +215,24 @@ def test_wfstatus_invalid(dbos: DBOS) -> None:
|
|
251
215
|
handle.get_status()
|
252
216
|
res = handle.get_result()
|
253
217
|
assert res == "done"
|
218
|
+
wfevent.set()
|
219
|
+
event.wait()
|
254
220
|
return
|
255
221
|
|
256
222
|
wfuuid = str(uuid.uuid4())
|
257
223
|
with SetWorkflowID(wfuuid):
|
258
|
-
non_deterministic_workflow
|
224
|
+
handle1 = dbos.start_workflow(non_deterministic_workflow)
|
225
|
+
|
226
|
+
# Make sure the first one has reached the point where it waits for the event
|
227
|
+
wfevent.wait()
|
228
|
+
with SetWorkflowID(wfuuid):
|
229
|
+
handle2 = dbos.start_workflow(non_deterministic_workflow)
|
259
230
|
|
260
231
|
with pytest.raises(DBOSException) as exc_info:
|
261
|
-
|
262
|
-
non_deterministic_workflow()
|
232
|
+
handle2.get_result()
|
263
233
|
assert "Hint: Check if your workflow is deterministic." in str(exc_info.value)
|
234
|
+
event.set()
|
235
|
+
assert handle1.get_result() == None
|
264
236
|
|
265
237
|
|
266
238
|
def test_step_retries(dbos: DBOS) -> None:
|
@@ -134,7 +134,6 @@ def test_endpoint_recovery(dbos_fastapi: Tuple[DBOS, FastAPI]) -> None:
|
|
134
134
|
assert response.json().get("id1") == wfuuid
|
135
135
|
assert response.json().get("id2") != wfuuid
|
136
136
|
|
137
|
-
dbos._sys_db.wait_for_buffer_flush()
|
138
137
|
# Change the workflow status to pending
|
139
138
|
dbos._sys_db.update_workflow_status(
|
140
139
|
{
|
@@ -132,7 +132,6 @@ def test_simple_endpoint(dbos_fastapi: Tuple[DBOS, FastAPI]) -> None:
|
|
132
132
|
assert span.attributes["authenticatedUserRoles"] == '["user", "engineer"]'
|
133
133
|
|
134
134
|
# Verify that there is one workflow for this user.
|
135
|
-
dbos._sys_db.wait_for_buffer_flush()
|
136
135
|
gwi = GetWorkflowsInput()
|
137
136
|
gwi.authenticated_user = "user1"
|
138
137
|
wfl = dbos._sys_db.get_workflows(gwi)
|
@@ -95,7 +95,6 @@ def test_endpoint_recovery(dbos_flask: Tuple[DBOS, Flask]) -> None:
|
|
95
95
|
assert response.json.get("id1") == wfuuid
|
96
96
|
assert response.json.get("id2") != wfuuid
|
97
97
|
|
98
|
-
dbos._sys_db.wait_for_buffer_flush()
|
99
98
|
# Change the workflow status to pending
|
100
99
|
dbos._sys_db.update_workflow_status(
|
101
100
|
{
|
@@ -19,7 +19,7 @@ from dbos import (
|
|
19
19
|
WorkflowHandle,
|
20
20
|
)
|
21
21
|
from dbos._schemas.system_database import SystemSchema
|
22
|
-
from dbos._sys_db import WorkflowStatusString
|
22
|
+
from dbos._sys_db import WorkflowStatusString
|
23
23
|
from tests.conftest import default_config, queue_entries_are_cleaned_up
|
24
24
|
|
25
25
|
|
@@ -50,7 +50,7 @@ def test_simple_queue(dbos: DBOS) -> None:
|
|
50
50
|
assert handle.get_result() == "abcd123"
|
51
51
|
with SetWorkflowID(wfid):
|
52
52
|
assert test_workflow("abc", "123") == "abcd123"
|
53
|
-
assert wf_counter ==
|
53
|
+
assert wf_counter == 1
|
54
54
|
assert step_counter == 1
|
55
55
|
|
56
56
|
|
@@ -232,7 +232,6 @@ def test_limiter(dbos: DBOS) -> None:
|
|
232
232
|
assert times[limit * (wave + 1)] - times[limit * wave] < period + 0.2
|
233
233
|
|
234
234
|
# Verify all workflows get the SUCCESS status eventually
|
235
|
-
dbos._sys_db.wait_for_buffer_flush()
|
236
235
|
for h in handles:
|
237
236
|
assert h.get_status().status == WorkflowStatusString.SUCCESS.value
|
238
237
|
|
@@ -300,7 +299,6 @@ def test_multiple_queues(dbos: DBOS) -> None:
|
|
300
299
|
assert times[limit * (wave + 1)] - times[limit * wave] < period + 0.2
|
301
300
|
|
302
301
|
# Verify all workflows get the SUCCESS status eventually
|
303
|
-
dbos._sys_db.wait_for_buffer_flush()
|
304
302
|
for h in handles:
|
305
303
|
assert h.get_status().status == WorkflowStatusString.SUCCESS.value
|
306
304
|
|
@@ -901,49 +899,6 @@ def test_resuming_queued_workflows(dbos: DBOS) -> None:
|
|
901
899
|
assert queue_entries_are_cleaned_up(dbos)
|
902
900
|
|
903
901
|
|
904
|
-
# Test a race condition between removing a task from the queue and flushing the status buffer
|
905
|
-
def test_resuming_already_completed_queue_workflow(dbos: DBOS) -> None:
|
906
|
-
dbos._sys_db._run_background_processes = False # Disable buffer flush
|
907
|
-
|
908
|
-
start_event = threading.Event()
|
909
|
-
counter = 0
|
910
|
-
|
911
|
-
@DBOS.workflow()
|
912
|
-
def test_step() -> None:
|
913
|
-
start_event.set()
|
914
|
-
nonlocal counter
|
915
|
-
counter += 1
|
916
|
-
|
917
|
-
queue = Queue("test_queue")
|
918
|
-
handle = queue.enqueue(test_step)
|
919
|
-
start_event.wait()
|
920
|
-
start_event.clear()
|
921
|
-
time.sleep(_buffer_flush_interval_secs)
|
922
|
-
assert (
|
923
|
-
handle.get_status().status == WorkflowStatusString.PENDING.value
|
924
|
-
) # Not flushed
|
925
|
-
assert counter == 1 # But, really, it's completed
|
926
|
-
dbos._sys_db._workflow_status_buffer = (
|
927
|
-
{}
|
928
|
-
) # Clear buffer (simulates a process restart)
|
929
|
-
|
930
|
-
# Recovery picks up on the workflow and recovers it
|
931
|
-
recovered_ids = DBOS.recover_pending_workflows()
|
932
|
-
assert len(recovered_ids) == 1
|
933
|
-
assert recovered_ids[0].get_workflow_id() == handle.get_workflow_id()
|
934
|
-
start_event.wait()
|
935
|
-
assert counter == 2 # The workflow ran again
|
936
|
-
time.sleep(
|
937
|
-
_buffer_flush_interval_secs
|
938
|
-
) # This is actually to wait that _get_wf_invoke_func buffers the status
|
939
|
-
dbos._sys_db._flush_workflow_status_buffer() # Manually flush
|
940
|
-
assert (
|
941
|
-
handle.get_status().status == WorkflowStatusString.SUCCESS.value
|
942
|
-
) # Is recovered
|
943
|
-
assert handle.get_status().executor_id == "local"
|
944
|
-
assert handle.get_status().recovery_attempts == 2
|
945
|
-
|
946
|
-
|
947
902
|
def test_dlq_enqueued_workflows(dbos: DBOS) -> None:
|
948
903
|
start_event = threading.Event()
|
949
904
|
blocking_event = threading.Event()
|
@@ -1002,7 +957,6 @@ def test_dlq_enqueued_workflows(dbos: DBOS) -> None:
|
|
1002
957
|
# Complete the blocked workflow
|
1003
958
|
blocking_event.set()
|
1004
959
|
assert blocked_handle.get_result() == None
|
1005
|
-
dbos._sys_db.wait_for_buffer_flush()
|
1006
960
|
assert blocked_handle.get_status().status == WorkflowStatusString.SUCCESS.value
|
1007
961
|
with dbos._sys_db.engine.begin() as c:
|
1008
962
|
query = sa.select(SystemSchema.workflow_status.c.recovery_attempts).where(
|
@@ -27,7 +27,6 @@ def test_list_workflow(dbos: DBOS) -> None:
|
|
27
27
|
wfid = str(uuid.uuid4)
|
28
28
|
with SetWorkflowID(wfid):
|
29
29
|
assert simple_workflow(1) == 2
|
30
|
-
dbos._sys_db._flush_workflow_status_buffer()
|
31
30
|
|
32
31
|
# List the workflow, then test every output
|
33
32
|
outputs = DBOS.list_workflows()
|
@@ -766,8 +765,6 @@ async def test_callchild_first_asyncio(dbos: DBOS, sys_db: SystemDatabase) -> No
|
|
766
765
|
handle = await dbos.start_workflow_async(parentWorkflow)
|
767
766
|
child_id = await handle.get_result()
|
768
767
|
|
769
|
-
dbos._sys_db._flush_workflow_status_buffer()
|
770
|
-
|
771
768
|
wfsteps = _workflow_commands.list_workflow_steps(sys_db, wfid)
|
772
769
|
assert len(wfsteps) == 4
|
773
770
|
assert wfsteps[0]["function_name"] == child_workflow.__qualname__
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
{dbos-0.25.0a12 → dbos-0.25.0a14}/dbos/_migrations/versions/5c361fc04708_added_system_tables.py
RENAMED
File without changes
|
File without changes
|
{dbos-0.25.0a12 → dbos-0.25.0a14}/dbos/_migrations/versions/d76646551a6b_job_queue_limiter.py
RENAMED
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|