dbos 0.7.0a5__tar.gz → 0.7.0a8__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of dbos might be problematic. Click here for more details.
- {dbos-0.7.0a5 → dbos-0.7.0a8}/PKG-INFO +1 -1
- {dbos-0.7.0a5 → dbos-0.7.0a8}/dbos/__init__.py +2 -0
- {dbos-0.7.0a5 → dbos-0.7.0a8}/dbos/context.py +3 -2
- {dbos-0.7.0a5 → dbos-0.7.0a8}/dbos/core.py +69 -49
- {dbos-0.7.0a5 → dbos-0.7.0a8}/dbos/dbos.py +56 -45
- {dbos-0.7.0a5 → dbos-0.7.0a8}/dbos/kafka.py +27 -12
- dbos-0.7.0a8/dbos/migrations/versions/eab0cc1d9a14_job_queue.py +55 -0
- dbos-0.7.0a8/dbos/queue.py +36 -0
- {dbos-0.7.0a5 → dbos-0.7.0a8}/dbos/recovery.py +1 -1
- {dbos-0.7.0a5 → dbos-0.7.0a8}/dbos/scheduler/scheduler.py +7 -9
- {dbos-0.7.0a5 → dbos-0.7.0a8}/dbos/schemas/system_database.py +23 -0
- {dbos-0.7.0a5 → dbos-0.7.0a8}/dbos/system_database.py +55 -1
- {dbos-0.7.0a5 → dbos-0.7.0a8}/pyproject.toml +1 -1
- {dbos-0.7.0a5 → dbos-0.7.0a8}/tests/scheduler/test_scheduler.py +20 -6
- {dbos-0.7.0a5 → dbos-0.7.0a8}/tests/test_admin_server.py +3 -2
- {dbos-0.7.0a5 → dbos-0.7.0a8}/tests/test_dbos.py +22 -19
- {dbos-0.7.0a5 → dbos-0.7.0a8}/tests/test_failures.py +11 -11
- {dbos-0.7.0a5 → dbos-0.7.0a8}/tests/test_fastapi.py +3 -2
- {dbos-0.7.0a5 → dbos-0.7.0a8}/tests/test_fastapi_roles.py +3 -3
- {dbos-0.7.0a5 → dbos-0.7.0a8}/tests/test_flask.py +3 -2
- {dbos-0.7.0a5 → dbos-0.7.0a8}/tests/test_kafka.py +49 -7
- dbos-0.7.0a8/tests/test_queue.py +110 -0
- {dbos-0.7.0a5 → dbos-0.7.0a8}/tests/test_schema_migration.py +6 -6
- {dbos-0.7.0a5 → dbos-0.7.0a8}/LICENSE +0 -0
- {dbos-0.7.0a5 → dbos-0.7.0a8}/README.md +0 -0
- {dbos-0.7.0a5 → dbos-0.7.0a8}/dbos/admin_sever.py +0 -0
- {dbos-0.7.0a5 → dbos-0.7.0a8}/dbos/application_database.py +0 -0
- {dbos-0.7.0a5 → dbos-0.7.0a8}/dbos/cli.py +0 -0
- {dbos-0.7.0a5 → dbos-0.7.0a8}/dbos/dbos-config.schema.json +0 -0
- {dbos-0.7.0a5 → dbos-0.7.0a8}/dbos/dbos_config.py +0 -0
- {dbos-0.7.0a5 → dbos-0.7.0a8}/dbos/decorators.py +0 -0
- {dbos-0.7.0a5 → dbos-0.7.0a8}/dbos/error.py +0 -0
- {dbos-0.7.0a5 → dbos-0.7.0a8}/dbos/fastapi.py +0 -0
- {dbos-0.7.0a5 → dbos-0.7.0a8}/dbos/flask.py +0 -0
- {dbos-0.7.0a5 → dbos-0.7.0a8}/dbos/kafka_message.py +0 -0
- {dbos-0.7.0a5 → dbos-0.7.0a8}/dbos/logger.py +0 -0
- {dbos-0.7.0a5 → dbos-0.7.0a8}/dbos/migrations/env.py +0 -0
- {dbos-0.7.0a5 → dbos-0.7.0a8}/dbos/migrations/script.py.mako +0 -0
- {dbos-0.7.0a5 → dbos-0.7.0a8}/dbos/migrations/versions/5c361fc04708_added_system_tables.py +0 -0
- {dbos-0.7.0a5 → dbos-0.7.0a8}/dbos/migrations/versions/a3b18ad34abe_added_triggers.py +0 -0
- {dbos-0.7.0a5 → dbos-0.7.0a8}/dbos/py.typed +0 -0
- {dbos-0.7.0a5 → dbos-0.7.0a8}/dbos/registrations.py +0 -0
- {dbos-0.7.0a5 → dbos-0.7.0a8}/dbos/request.py +0 -0
- {dbos-0.7.0a5 → dbos-0.7.0a8}/dbos/roles.py +0 -0
- {dbos-0.7.0a5 → dbos-0.7.0a8}/dbos/scheduler/croniter.py +0 -0
- {dbos-0.7.0a5 → dbos-0.7.0a8}/dbos/schemas/__init__.py +0 -0
- {dbos-0.7.0a5 → dbos-0.7.0a8}/dbos/schemas/application_database.py +0 -0
- {dbos-0.7.0a5 → dbos-0.7.0a8}/dbos/templates/hello/README.md +0 -0
- {dbos-0.7.0a5 → dbos-0.7.0a8}/dbos/templates/hello/__package/__init__.py +0 -0
- {dbos-0.7.0a5 → dbos-0.7.0a8}/dbos/templates/hello/__package/main.py +0 -0
- {dbos-0.7.0a5 → dbos-0.7.0a8}/dbos/templates/hello/__package/schema.py +0 -0
- {dbos-0.7.0a5 → dbos-0.7.0a8}/dbos/templates/hello/alembic.ini +0 -0
- {dbos-0.7.0a5 → dbos-0.7.0a8}/dbos/templates/hello/dbos-config.yaml.dbos +0 -0
- {dbos-0.7.0a5 → dbos-0.7.0a8}/dbos/templates/hello/migrations/env.py.dbos +0 -0
- {dbos-0.7.0a5 → dbos-0.7.0a8}/dbos/templates/hello/migrations/script.py.mako +0 -0
- {dbos-0.7.0a5 → dbos-0.7.0a8}/dbos/templates/hello/migrations/versions/2024_07_31_180642_init.py +0 -0
- {dbos-0.7.0a5 → dbos-0.7.0a8}/dbos/templates/hello/start_postgres_docker.py +0 -0
- {dbos-0.7.0a5 → dbos-0.7.0a8}/dbos/tracer.py +0 -0
- {dbos-0.7.0a5 → dbos-0.7.0a8}/dbos/utils.py +0 -0
- {dbos-0.7.0a5 → dbos-0.7.0a8}/tests/__init__.py +0 -0
- {dbos-0.7.0a5 → dbos-0.7.0a8}/tests/atexit_no_ctor.py +0 -0
- {dbos-0.7.0a5 → dbos-0.7.0a8}/tests/atexit_no_launch.py +0 -0
- {dbos-0.7.0a5 → dbos-0.7.0a8}/tests/classdefs.py +0 -0
- {dbos-0.7.0a5 → dbos-0.7.0a8}/tests/conftest.py +0 -0
- {dbos-0.7.0a5 → dbos-0.7.0a8}/tests/more_classdefs.py +0 -0
- {dbos-0.7.0a5 → dbos-0.7.0a8}/tests/scheduler/test_croniter.py +0 -0
- {dbos-0.7.0a5 → dbos-0.7.0a8}/tests/test_classdecorators.py +0 -0
- {dbos-0.7.0a5 → dbos-0.7.0a8}/tests/test_concurrency.py +0 -0
- {dbos-0.7.0a5 → dbos-0.7.0a8}/tests/test_config.py +0 -0
- {dbos-0.7.0a5 → dbos-0.7.0a8}/tests/test_package.py +0 -0
- {dbos-0.7.0a5 → dbos-0.7.0a8}/tests/test_singleton.py +0 -0
- {dbos-0.7.0a5 → dbos-0.7.0a8}/version/__init__.py +0 -0
|
@@ -3,6 +3,7 @@ from .context import DBOSContextEnsure, SetWorkflowID
|
|
|
3
3
|
from .dbos import DBOS, DBOSConfiguredInstance, WorkflowHandle, WorkflowStatus
|
|
4
4
|
from .dbos_config import ConfigFile, get_dbos_database_url, load_config
|
|
5
5
|
from .kafka_message import KafkaMessage
|
|
6
|
+
from .queue import Queue
|
|
6
7
|
from .system_database import GetWorkflowsInput, WorkflowStatusString
|
|
7
8
|
|
|
8
9
|
__all__ = [
|
|
@@ -19,4 +20,5 @@ __all__ = [
|
|
|
19
20
|
"load_config",
|
|
20
21
|
"get_dbos_database_url",
|
|
21
22
|
"error",
|
|
23
|
+
"Queue",
|
|
22
24
|
]
|
|
@@ -3,6 +3,7 @@ from __future__ import annotations
|
|
|
3
3
|
import json
|
|
4
4
|
import os
|
|
5
5
|
import uuid
|
|
6
|
+
from contextlib import AbstractContextManager
|
|
6
7
|
from contextvars import ContextVar
|
|
7
8
|
from enum import Enum
|
|
8
9
|
from types import TracebackType
|
|
@@ -344,7 +345,7 @@ class SetWorkflowRecovery:
|
|
|
344
345
|
return False # Did not handle
|
|
345
346
|
|
|
346
347
|
|
|
347
|
-
class EnterDBOSWorkflow:
|
|
348
|
+
class EnterDBOSWorkflow(AbstractContextManager[DBOSContext, Literal[False]]):
|
|
348
349
|
def __init__(self, attributes: TracedAttributes) -> None:
|
|
349
350
|
self.created_ctx = False
|
|
350
351
|
self.attributes = attributes
|
|
@@ -377,7 +378,7 @@ class EnterDBOSWorkflow:
|
|
|
377
378
|
return False # Did not handle
|
|
378
379
|
|
|
379
380
|
|
|
380
|
-
class EnterDBOSChildWorkflow:
|
|
381
|
+
class EnterDBOSChildWorkflow(AbstractContextManager[DBOSContext, Literal[False]]):
|
|
381
382
|
def __init__(self, attributes: TracedAttributes) -> None:
|
|
382
383
|
self.parent_ctx: Optional[DBOSContext] = None
|
|
383
384
|
self.child_ctx: Optional[DBOSContext] = None
|
|
@@ -63,6 +63,7 @@ from dbos.system_database import (
|
|
|
63
63
|
OperationResultInternal,
|
|
64
64
|
WorkflowInputs,
|
|
65
65
|
WorkflowStatusInternal,
|
|
66
|
+
WorkflowStatusString,
|
|
66
67
|
)
|
|
67
68
|
|
|
68
69
|
if TYPE_CHECKING:
|
|
@@ -108,7 +109,7 @@ class _WorkflowHandlePolling(Generic[R]):
|
|
|
108
109
|
return self.workflow_id
|
|
109
110
|
|
|
110
111
|
def get_result(self) -> R:
|
|
111
|
-
res: R = self.dbos.
|
|
112
|
+
res: R = self.dbos._sys_db.await_workflow_result(self.workflow_id)
|
|
112
113
|
return res
|
|
113
114
|
|
|
114
115
|
def get_status(self) -> "WorkflowStatus":
|
|
@@ -126,6 +127,7 @@ def _init_workflow(
|
|
|
126
127
|
class_name: Optional[str],
|
|
127
128
|
config_name: Optional[str],
|
|
128
129
|
temp_wf_type: Optional[str],
|
|
130
|
+
queue: Optional[str] = None,
|
|
129
131
|
) -> WorkflowStatusInternal:
|
|
130
132
|
wfid = (
|
|
131
133
|
ctx.workflow_id
|
|
@@ -134,7 +136,11 @@ def _init_workflow(
|
|
|
134
136
|
)
|
|
135
137
|
status: WorkflowStatusInternal = {
|
|
136
138
|
"workflow_uuid": wfid,
|
|
137
|
-
"status":
|
|
139
|
+
"status": (
|
|
140
|
+
WorkflowStatusString.PENDING.value
|
|
141
|
+
if queue is None
|
|
142
|
+
else WorkflowStatusString.ENQUEUED.value
|
|
143
|
+
),
|
|
138
144
|
"name": wf_name,
|
|
139
145
|
"class_name": class_name,
|
|
140
146
|
"config_name": config_name,
|
|
@@ -150,20 +156,25 @@ def _init_workflow(
|
|
|
150
156
|
json.dumps(ctx.authenticated_roles) if ctx.authenticated_roles else None
|
|
151
157
|
),
|
|
152
158
|
"assumed_role": ctx.assumed_role,
|
|
159
|
+
"queue_name": queue,
|
|
153
160
|
}
|
|
154
161
|
|
|
155
162
|
# If we have a class name, the first arg is the instance and do not serialize
|
|
156
163
|
if class_name is not None:
|
|
157
164
|
inputs = {"args": inputs["args"][1:], "kwargs": inputs["kwargs"]}
|
|
158
165
|
|
|
159
|
-
if temp_wf_type != "transaction":
|
|
166
|
+
if temp_wf_type != "transaction" or queue is not None:
|
|
160
167
|
# Synchronously record the status and inputs for workflows and single-step workflows
|
|
161
168
|
# We also have to do this for single-step workflows because of the foreign key constraint on the operation outputs table
|
|
162
|
-
|
|
163
|
-
dbos.
|
|
169
|
+
# TODO: Make this transactional (and with the queue step below)
|
|
170
|
+
dbos._sys_db.update_workflow_status(status, False, ctx.in_recovery)
|
|
171
|
+
dbos._sys_db.update_workflow_inputs(wfid, utils.serialize(inputs))
|
|
164
172
|
else:
|
|
165
173
|
# Buffer the inputs for single-transaction workflows, but don't buffer the status
|
|
166
|
-
dbos.
|
|
174
|
+
dbos._sys_db.buffer_workflow_inputs(wfid, utils.serialize(inputs))
|
|
175
|
+
|
|
176
|
+
if queue is not None:
|
|
177
|
+
dbos._sys_db.enqueue(wfid, queue)
|
|
167
178
|
|
|
168
179
|
return status
|
|
169
180
|
|
|
@@ -179,7 +190,9 @@ def _execute_workflow(
|
|
|
179
190
|
output = func(*args, **kwargs)
|
|
180
191
|
status["status"] = "SUCCESS"
|
|
181
192
|
status["output"] = utils.serialize(output)
|
|
182
|
-
|
|
193
|
+
if status["queue_name"] is not None:
|
|
194
|
+
dbos._sys_db.remove_from_queue(status["workflow_uuid"])
|
|
195
|
+
dbos._sys_db.buffer_workflow_status(status)
|
|
183
196
|
except DBOSWorkflowConflictIDError:
|
|
184
197
|
# Retrieve the workflow handle and wait for the result.
|
|
185
198
|
# Must use existing_workflow=False because workflow status might not be set yet for single transaction workflows.
|
|
@@ -191,7 +204,9 @@ def _execute_workflow(
|
|
|
191
204
|
except Exception as error:
|
|
192
205
|
status["status"] = "ERROR"
|
|
193
206
|
status["error"] = utils.serialize(error)
|
|
194
|
-
|
|
207
|
+
if status["queue_name"] is not None:
|
|
208
|
+
dbos._sys_db.remove_from_queue(status["workflow_uuid"])
|
|
209
|
+
dbos._sys_db.update_workflow_status(status)
|
|
195
210
|
raise
|
|
196
211
|
|
|
197
212
|
return output
|
|
@@ -221,10 +236,10 @@ def _execute_workflow_wthread(
|
|
|
221
236
|
|
|
222
237
|
|
|
223
238
|
def _execute_workflow_id(dbos: "DBOS", workflow_id: str) -> "WorkflowHandle[Any]":
|
|
224
|
-
status = dbos.
|
|
239
|
+
status = dbos._sys_db.get_workflow_status(workflow_id)
|
|
225
240
|
if not status:
|
|
226
241
|
raise DBOSRecoveryError(workflow_id, "Workflow status not found")
|
|
227
|
-
inputs = dbos.
|
|
242
|
+
inputs = dbos._sys_db.get_workflow_inputs(workflow_id)
|
|
228
243
|
if not inputs:
|
|
229
244
|
raise DBOSRecoveryError(workflow_id, "Workflow inputs not found")
|
|
230
245
|
wf_func = dbos._registry.workflow_info_map.get(status["name"], None)
|
|
@@ -249,6 +264,8 @@ def _execute_workflow_id(dbos: "DBOS", workflow_id: str) -> "WorkflowHandle[Any]
|
|
|
249
264
|
return _start_workflow(
|
|
250
265
|
dbos,
|
|
251
266
|
wf_func,
|
|
267
|
+
status["queue_name"],
|
|
268
|
+
True,
|
|
252
269
|
dbos._registry.instance_info_map[iname],
|
|
253
270
|
*inputs["args"],
|
|
254
271
|
**inputs["kwargs"],
|
|
@@ -264,6 +281,8 @@ def _execute_workflow_id(dbos: "DBOS", workflow_id: str) -> "WorkflowHandle[Any]
|
|
|
264
281
|
return _start_workflow(
|
|
265
282
|
dbos,
|
|
266
283
|
wf_func,
|
|
284
|
+
status["queue_name"],
|
|
285
|
+
True,
|
|
267
286
|
dbos._registry.class_info_map[class_name],
|
|
268
287
|
*inputs["args"],
|
|
269
288
|
**inputs["kwargs"],
|
|
@@ -271,7 +290,12 @@ def _execute_workflow_id(dbos: "DBOS", workflow_id: str) -> "WorkflowHandle[Any]
|
|
|
271
290
|
else:
|
|
272
291
|
with SetWorkflowID(workflow_id):
|
|
273
292
|
return _start_workflow(
|
|
274
|
-
dbos,
|
|
293
|
+
dbos,
|
|
294
|
+
wf_func,
|
|
295
|
+
status["queue_name"],
|
|
296
|
+
True,
|
|
297
|
+
*inputs["args"],
|
|
298
|
+
**inputs["kwargs"],
|
|
275
299
|
)
|
|
276
300
|
|
|
277
301
|
|
|
@@ -298,34 +322,22 @@ def _workflow_wrapper(dbosreg: "_DBOSRegistry", func: F) -> F:
|
|
|
298
322
|
"kwargs": kwargs,
|
|
299
323
|
}
|
|
300
324
|
ctx = get_local_dbos_context()
|
|
301
|
-
|
|
302
|
-
|
|
303
|
-
|
|
304
|
-
|
|
305
|
-
|
|
306
|
-
|
|
307
|
-
|
|
308
|
-
|
|
309
|
-
|
|
310
|
-
|
|
311
|
-
|
|
312
|
-
)
|
|
313
|
-
|
|
314
|
-
|
|
315
|
-
else:
|
|
316
|
-
with EnterDBOSWorkflow(attributes), DBOSAssumeRole(rr):
|
|
317
|
-
ctx = assert_current_dbos_context()
|
|
318
|
-
status = _init_workflow(
|
|
319
|
-
dbos,
|
|
320
|
-
ctx,
|
|
321
|
-
inputs=inputs,
|
|
322
|
-
wf_name=get_dbos_func_name(func),
|
|
323
|
-
class_name=get_dbos_class_name(fi, func, args),
|
|
324
|
-
config_name=get_config_name(fi, func, args),
|
|
325
|
-
temp_wf_type=get_temp_workflow_type(func),
|
|
326
|
-
)
|
|
325
|
+
enterWorkflowCtxMgr = (
|
|
326
|
+
EnterDBOSChildWorkflow if ctx and ctx.is_workflow() else EnterDBOSWorkflow
|
|
327
|
+
)
|
|
328
|
+
with enterWorkflowCtxMgr(attributes), DBOSAssumeRole(rr):
|
|
329
|
+
ctx = assert_current_dbos_context() # Now the child ctx
|
|
330
|
+
status = _init_workflow(
|
|
331
|
+
dbos,
|
|
332
|
+
ctx,
|
|
333
|
+
inputs=inputs,
|
|
334
|
+
wf_name=get_dbos_func_name(func),
|
|
335
|
+
class_name=get_dbos_class_name(fi, func, args),
|
|
336
|
+
config_name=get_config_name(fi, func, args),
|
|
337
|
+
temp_wf_type=get_temp_workflow_type(func),
|
|
338
|
+
)
|
|
327
339
|
|
|
328
|
-
|
|
340
|
+
return _execute_workflow(dbos, status, func, *args, **kwargs)
|
|
329
341
|
|
|
330
342
|
wrapped_func = cast(F, wrapper)
|
|
331
343
|
return wrapped_func
|
|
@@ -343,6 +355,8 @@ def _workflow(reg: "_DBOSRegistry") -> Callable[[F], F]:
|
|
|
343
355
|
def _start_workflow(
|
|
344
356
|
dbos: "DBOS",
|
|
345
357
|
func: "Workflow[P, R]",
|
|
358
|
+
queue_name: Optional[str],
|
|
359
|
+
execute_workflow: bool,
|
|
346
360
|
*args: P.args,
|
|
347
361
|
**kwargs: P.kwargs,
|
|
348
362
|
) -> "WorkflowHandle[R]":
|
|
@@ -396,10 +410,14 @@ def _start_workflow(
|
|
|
396
410
|
class_name=get_dbos_class_name(fi, func, gin_args),
|
|
397
411
|
config_name=get_config_name(fi, func, gin_args),
|
|
398
412
|
temp_wf_type=get_temp_workflow_type(func),
|
|
413
|
+
queue=queue_name,
|
|
399
414
|
)
|
|
400
415
|
|
|
416
|
+
if not execute_workflow:
|
|
417
|
+
return _WorkflowHandlePolling(new_wf_id, dbos)
|
|
418
|
+
|
|
401
419
|
if fself is not None:
|
|
402
|
-
future = dbos.
|
|
420
|
+
future = dbos._executor.submit(
|
|
403
421
|
cast(Callable[..., R], _execute_workflow_wthread),
|
|
404
422
|
dbos,
|
|
405
423
|
status,
|
|
@@ -410,7 +428,7 @@ def _start_workflow(
|
|
|
410
428
|
**kwargs,
|
|
411
429
|
)
|
|
412
430
|
else:
|
|
413
|
-
future = dbos.
|
|
431
|
+
future = dbos._executor.submit(
|
|
414
432
|
cast(Callable[..., R], _execute_workflow_wthread),
|
|
415
433
|
dbos,
|
|
416
434
|
status,
|
|
@@ -432,7 +450,7 @@ def _transaction(
|
|
|
432
450
|
f"Function {func.__name__} invoked before DBOS initialized"
|
|
433
451
|
)
|
|
434
452
|
dbos = dbosreg.dbos
|
|
435
|
-
with dbos.
|
|
453
|
+
with dbos._app_db.sessionmaker() as session:
|
|
436
454
|
attributes: TracedAttributes = {
|
|
437
455
|
"name": func.__name__,
|
|
438
456
|
"operationType": OperationType.TRANSACTION.value,
|
|
@@ -510,7 +528,7 @@ def _transaction(
|
|
|
510
528
|
# Don't record the error if it was already recorded
|
|
511
529
|
if not has_recorded_error:
|
|
512
530
|
txn_output["error"] = utils.serialize(error)
|
|
513
|
-
dbos.
|
|
531
|
+
dbos._app_db.record_transaction_error(txn_output)
|
|
514
532
|
raise
|
|
515
533
|
return output
|
|
516
534
|
|
|
@@ -541,6 +559,7 @@ def _transaction(
|
|
|
541
559
|
set_dbos_func_name(temp_wf, "<temp>." + func.__qualname__)
|
|
542
560
|
set_temp_workflow_type(temp_wf, "transaction")
|
|
543
561
|
dbosreg.register_wf_function(get_dbos_func_name(temp_wf), wrapped_wf)
|
|
562
|
+
wrapper.__orig_func = temp_wf # type: ignore
|
|
544
563
|
|
|
545
564
|
return cast(F, wrapper)
|
|
546
565
|
|
|
@@ -575,7 +594,7 @@ def _step(
|
|
|
575
594
|
"output": None,
|
|
576
595
|
"error": None,
|
|
577
596
|
}
|
|
578
|
-
recorded_output = dbos.
|
|
597
|
+
recorded_output = dbos._sys_db.check_operation_execution(
|
|
579
598
|
ctx.workflow_id, ctx.function_id
|
|
580
599
|
)
|
|
581
600
|
if recorded_output:
|
|
@@ -622,7 +641,7 @@ def _step(
|
|
|
622
641
|
step_output["error"] = (
|
|
623
642
|
utils.serialize(error) if error is not None else None
|
|
624
643
|
)
|
|
625
|
-
dbos.
|
|
644
|
+
dbos._sys_db.record_operation_result(step_output)
|
|
626
645
|
|
|
627
646
|
if error is not None:
|
|
628
647
|
raise error
|
|
@@ -657,6 +676,7 @@ def _step(
|
|
|
657
676
|
set_dbos_func_name(temp_wf, "<temp>." + func.__qualname__)
|
|
658
677
|
set_temp_workflow_type(temp_wf, "step")
|
|
659
678
|
dbosreg.register_wf_function(get_dbos_func_name(temp_wf), wrapped_wf)
|
|
679
|
+
wrapper.__orig_func = temp_wf # type: ignore
|
|
660
680
|
|
|
661
681
|
return cast(F, wrapper)
|
|
662
682
|
|
|
@@ -671,7 +691,7 @@ def _send(
|
|
|
671
691
|
"name": "send",
|
|
672
692
|
}
|
|
673
693
|
with EnterDBOSStep(attributes) as ctx:
|
|
674
|
-
dbos.
|
|
694
|
+
dbos._sys_db.send(
|
|
675
695
|
ctx.workflow_id,
|
|
676
696
|
ctx.curr_step_function_id,
|
|
677
697
|
destination_id,
|
|
@@ -702,7 +722,7 @@ def _recv(
|
|
|
702
722
|
with EnterDBOSStep(attributes) as ctx:
|
|
703
723
|
ctx.function_id += 1 # Reserve for the sleep
|
|
704
724
|
timeout_function_id = ctx.function_id
|
|
705
|
-
return dbos.
|
|
725
|
+
return dbos._sys_db.recv(
|
|
706
726
|
ctx.workflow_id,
|
|
707
727
|
ctx.curr_step_function_id,
|
|
708
728
|
timeout_function_id,
|
|
@@ -725,7 +745,7 @@ def _set_event(dbos: "DBOS", key: str, value: Any) -> None:
|
|
|
725
745
|
"name": "set_event",
|
|
726
746
|
}
|
|
727
747
|
with EnterDBOSStep(attributes) as ctx:
|
|
728
|
-
dbos.
|
|
748
|
+
dbos._sys_db.set_event(
|
|
729
749
|
ctx.workflow_id, ctx.curr_step_function_id, key, value
|
|
730
750
|
)
|
|
731
751
|
else:
|
|
@@ -753,7 +773,7 @@ def _get_event(
|
|
|
753
773
|
"function_id": ctx.curr_step_function_id,
|
|
754
774
|
"timeout_function_id": timeout_function_id,
|
|
755
775
|
}
|
|
756
|
-
return dbos.
|
|
776
|
+
return dbos._sys_db.get_event(workflow_id, key, timeout_seconds, caller_ctx)
|
|
757
777
|
else:
|
|
758
778
|
# Directly call it outside of a workflow
|
|
759
|
-
return dbos.
|
|
779
|
+
return dbos._sys_db.get_event(workflow_id, key, timeout_seconds)
|
|
@@ -20,7 +20,6 @@ from typing import (
|
|
|
20
20
|
Tuple,
|
|
21
21
|
Type,
|
|
22
22
|
TypeVar,
|
|
23
|
-
cast,
|
|
24
23
|
)
|
|
25
24
|
|
|
26
25
|
from opentelemetry.trace import Span
|
|
@@ -40,6 +39,7 @@ from dbos.core import (
|
|
|
40
39
|
_WorkflowHandlePolling,
|
|
41
40
|
)
|
|
42
41
|
from dbos.decorators import classproperty
|
|
42
|
+
from dbos.queue import Queue, queue_thread
|
|
43
43
|
from dbos.recovery import _recover_pending_workflows, _startup_recovery_thread
|
|
44
44
|
from dbos.registrations import (
|
|
45
45
|
DBOSClassInfo,
|
|
@@ -138,6 +138,7 @@ class _DBOSRegistry:
|
|
|
138
138
|
self.workflow_info_map: dict[str, Workflow[..., Any]] = {}
|
|
139
139
|
self.class_info_map: dict[str, type] = {}
|
|
140
140
|
self.instance_info_map: dict[str, object] = {}
|
|
141
|
+
self.queue_info_map: dict[str, Queue] = {}
|
|
141
142
|
self.pollers: list[_RegisteredJob] = []
|
|
142
143
|
self.dbos: Optional[DBOS] = None
|
|
143
144
|
self.config: Optional[ConfigFile] = None
|
|
@@ -163,7 +164,7 @@ class _DBOSRegistry:
|
|
|
163
164
|
) -> None:
|
|
164
165
|
if self.dbos and self.dbos._launched:
|
|
165
166
|
self.dbos.stop_events.append(evt)
|
|
166
|
-
self.dbos.
|
|
167
|
+
self.dbos._executor.submit(func, *args, **kwargs)
|
|
167
168
|
else:
|
|
168
169
|
self.pollers.append((evt, func, args, kwargs))
|
|
169
170
|
|
|
@@ -265,15 +266,15 @@ class DBOS:
|
|
|
265
266
|
dbos_logger.info("Initializing DBOS")
|
|
266
267
|
self.config: ConfigFile = config
|
|
267
268
|
self._launched: bool = False
|
|
268
|
-
self.
|
|
269
|
-
self.
|
|
269
|
+
self._sys_db_field: Optional[SystemDatabase] = None
|
|
270
|
+
self._app_db_field: Optional[ApplicationDatabase] = None
|
|
270
271
|
self._registry: _DBOSRegistry = _get_or_create_dbos_registry()
|
|
271
272
|
self._registry.dbos = self
|
|
272
|
-
self.
|
|
273
|
+
self._admin_server_field: Optional[AdminServer] = None
|
|
273
274
|
self.stop_events: List[threading.Event] = []
|
|
274
275
|
self.fastapi: Optional["FastAPI"] = fastapi
|
|
275
276
|
self.flask: Optional["Flask"] = flask
|
|
276
|
-
self.
|
|
277
|
+
self._executor_field: Optional[ThreadPoolExecutor] = None
|
|
277
278
|
|
|
278
279
|
# If using FastAPI, set up middleware and lifecycle events
|
|
279
280
|
if self.fastapi is not None:
|
|
@@ -302,33 +303,33 @@ class DBOS:
|
|
|
302
303
|
handler.flush()
|
|
303
304
|
|
|
304
305
|
@property
|
|
305
|
-
def
|
|
306
|
-
if self.
|
|
306
|
+
def _executor(self) -> ThreadPoolExecutor:
|
|
307
|
+
if self._executor_field is None:
|
|
307
308
|
raise DBOSException("Executor accessed before DBOS was launched")
|
|
308
|
-
rv: ThreadPoolExecutor = self.
|
|
309
|
+
rv: ThreadPoolExecutor = self._executor_field
|
|
309
310
|
return rv
|
|
310
311
|
|
|
311
312
|
@property
|
|
312
|
-
def
|
|
313
|
-
if self.
|
|
313
|
+
def _sys_db(self) -> SystemDatabase:
|
|
314
|
+
if self._sys_db_field is None:
|
|
314
315
|
raise DBOSException("System database accessed before DBOS was launched")
|
|
315
|
-
rv: SystemDatabase = self.
|
|
316
|
+
rv: SystemDatabase = self._sys_db_field
|
|
316
317
|
return rv
|
|
317
318
|
|
|
318
319
|
@property
|
|
319
|
-
def
|
|
320
|
-
if self.
|
|
320
|
+
def _app_db(self) -> ApplicationDatabase:
|
|
321
|
+
if self._app_db_field is None:
|
|
321
322
|
raise DBOSException(
|
|
322
323
|
"Application database accessed before DBOS was launched"
|
|
323
324
|
)
|
|
324
|
-
rv: ApplicationDatabase = self.
|
|
325
|
+
rv: ApplicationDatabase = self._app_db_field
|
|
325
326
|
return rv
|
|
326
327
|
|
|
327
328
|
@property
|
|
328
|
-
def
|
|
329
|
-
if self.
|
|
329
|
+
def _admin_server(self) -> AdminServer:
|
|
330
|
+
if self._admin_server_field is None:
|
|
330
331
|
raise DBOSException("Admin server accessed before DBOS was launched")
|
|
331
|
-
rv: AdminServer = self.
|
|
332
|
+
rv: AdminServer = self._admin_server_field
|
|
332
333
|
return rv
|
|
333
334
|
|
|
334
335
|
@classmethod
|
|
@@ -341,25 +342,30 @@ class DBOS:
|
|
|
341
342
|
dbos_logger.warning(f"DBOS was already launched")
|
|
342
343
|
return
|
|
343
344
|
self._launched = True
|
|
344
|
-
self.
|
|
345
|
-
self.
|
|
346
|
-
self.
|
|
347
|
-
self.
|
|
345
|
+
self._executor_field = ThreadPoolExecutor(max_workers=64)
|
|
346
|
+
self._sys_db_field = SystemDatabase(self.config)
|
|
347
|
+
self._app_db_field = ApplicationDatabase(self.config)
|
|
348
|
+
self._admin_server_field = AdminServer(dbos=self)
|
|
348
349
|
|
|
349
350
|
if not os.environ.get("DBOS__VMID"):
|
|
350
|
-
workflow_ids = self.
|
|
351
|
-
self.
|
|
351
|
+
workflow_ids = self._sys_db.get_pending_workflows("local")
|
|
352
|
+
self._executor.submit(_startup_recovery_thread, self, workflow_ids)
|
|
352
353
|
|
|
353
354
|
# Listen to notifications
|
|
354
|
-
self.
|
|
355
|
+
self._executor.submit(self._sys_db._notification_listener)
|
|
355
356
|
|
|
356
357
|
# Start flush workflow buffers thread
|
|
357
|
-
self.
|
|
358
|
+
self._executor.submit(self._sys_db.flush_workflow_buffers)
|
|
359
|
+
|
|
360
|
+
# Start the queue thread
|
|
361
|
+
evt = threading.Event()
|
|
362
|
+
self.stop_events.append(evt)
|
|
363
|
+
self._executor.submit(queue_thread, evt, self)
|
|
358
364
|
|
|
359
365
|
# Grab any pollers that were deferred and start them
|
|
360
366
|
for evt, func, args, kwargs in self._registry.pollers:
|
|
361
367
|
self.stop_events.append(evt)
|
|
362
|
-
self.
|
|
368
|
+
self._executor.submit(func, *args, **kwargs)
|
|
363
369
|
self._registry.pollers = []
|
|
364
370
|
|
|
365
371
|
dbos_logger.info("DBOS launched")
|
|
@@ -374,20 +380,20 @@ class DBOS:
|
|
|
374
380
|
self._initialized = False
|
|
375
381
|
for event in self.stop_events:
|
|
376
382
|
event.set()
|
|
377
|
-
if self.
|
|
378
|
-
self.
|
|
379
|
-
self.
|
|
380
|
-
if self.
|
|
381
|
-
self.
|
|
382
|
-
self.
|
|
383
|
-
if self.
|
|
384
|
-
self.
|
|
385
|
-
self.
|
|
383
|
+
if self._sys_db_field is not None:
|
|
384
|
+
self._sys_db_field.destroy()
|
|
385
|
+
self._sys_db_field = None
|
|
386
|
+
if self._app_db_field is not None:
|
|
387
|
+
self._app_db_field.destroy()
|
|
388
|
+
self._app_db_field = None
|
|
389
|
+
if self._admin_server_field is not None:
|
|
390
|
+
self._admin_server_field.stop()
|
|
391
|
+
self._admin_server_field = None
|
|
386
392
|
# CB - This needs work, some things ought to stop before DBs are tossed out,
|
|
387
393
|
# on the other hand it hangs to move it
|
|
388
|
-
if self.
|
|
389
|
-
self.
|
|
390
|
-
self.
|
|
394
|
+
if self._executor_field is not None:
|
|
395
|
+
self._executor_field.shutdown(cancel_futures=True)
|
|
396
|
+
self._executor_field = None
|
|
391
397
|
|
|
392
398
|
@classmethod
|
|
393
399
|
def register_instance(cls, inst: object) -> None:
|
|
@@ -488,13 +494,18 @@ class DBOS:
|
|
|
488
494
|
|
|
489
495
|
@classmethod
|
|
490
496
|
def kafka_consumer(
|
|
491
|
-
cls,
|
|
497
|
+
cls,
|
|
498
|
+
config: dict[str, Any],
|
|
499
|
+
topics: list[str],
|
|
500
|
+
in_order: bool = False,
|
|
492
501
|
) -> Callable[[KafkaConsumerWorkflow], KafkaConsumerWorkflow]:
|
|
493
502
|
"""Decorate a function to be used as a Kafka consumer."""
|
|
494
503
|
try:
|
|
495
504
|
from dbos.kafka import kafka_consumer
|
|
496
505
|
|
|
497
|
-
return kafka_consumer(
|
|
506
|
+
return kafka_consumer(
|
|
507
|
+
_get_or_create_dbos_registry(), config, topics, in_order
|
|
508
|
+
)
|
|
498
509
|
except ModuleNotFoundError as e:
|
|
499
510
|
raise DBOSException(
|
|
500
511
|
f"{e.name} dependency not found. Please install {e.name} via your package manager."
|
|
@@ -508,7 +519,7 @@ class DBOS:
|
|
|
508
519
|
**kwargs: P.kwargs,
|
|
509
520
|
) -> WorkflowHandle[R]:
|
|
510
521
|
"""Invoke a workflow function in the background, returning a handle to the ongoing execution."""
|
|
511
|
-
return _start_workflow(_get_dbos_instance(), func, *args, **kwargs)
|
|
522
|
+
return _start_workflow(_get_dbos_instance(), func, None, True, *args, **kwargs)
|
|
512
523
|
|
|
513
524
|
@classmethod
|
|
514
525
|
def get_workflow_status(cls, workflow_id: str) -> Optional[WorkflowStatus]:
|
|
@@ -516,11 +527,11 @@ class DBOS:
|
|
|
516
527
|
ctx = get_local_dbos_context()
|
|
517
528
|
if ctx and ctx.is_within_workflow():
|
|
518
529
|
ctx.function_id += 1
|
|
519
|
-
stat = _get_dbos_instance().
|
|
530
|
+
stat = _get_dbos_instance()._sys_db.get_workflow_status_within_wf(
|
|
520
531
|
workflow_id, ctx.workflow_id, ctx.function_id
|
|
521
532
|
)
|
|
522
533
|
else:
|
|
523
|
-
stat = _get_dbos_instance().
|
|
534
|
+
stat = _get_dbos_instance()._sys_db.get_workflow_status(workflow_id)
|
|
524
535
|
if stat is None:
|
|
525
536
|
return None
|
|
526
537
|
|
|
@@ -584,7 +595,7 @@ class DBOS:
|
|
|
584
595
|
if seconds <= 0:
|
|
585
596
|
return
|
|
586
597
|
with EnterDBOSStep(attributes) as ctx:
|
|
587
|
-
_get_dbos_instance().
|
|
598
|
+
_get_dbos_instance()._sys_db.sleep(
|
|
588
599
|
ctx.workflow_id, ctx.curr_step_function_id, seconds
|
|
589
600
|
)
|
|
590
601
|
|
|
@@ -1,26 +1,30 @@
|
|
|
1
1
|
import threading
|
|
2
|
-
import
|
|
3
|
-
from dataclasses import dataclass
|
|
4
|
-
from typing import TYPE_CHECKING, Any, Callable, Generator, NoReturn, Optional, Union
|
|
2
|
+
from typing import TYPE_CHECKING, Any, Callable, NoReturn
|
|
5
3
|
|
|
6
4
|
from confluent_kafka import Consumer, KafkaError, KafkaException
|
|
7
|
-
|
|
5
|
+
|
|
6
|
+
from dbos.queue import Queue
|
|
8
7
|
|
|
9
8
|
if TYPE_CHECKING:
|
|
10
9
|
from dbos.dbos import _DBOSRegistry
|
|
11
10
|
|
|
12
11
|
from .context import SetWorkflowID
|
|
12
|
+
from .error import DBOSInitializationError
|
|
13
13
|
from .kafka_message import KafkaMessage
|
|
14
14
|
from .logger import dbos_logger
|
|
15
15
|
|
|
16
16
|
KafkaConsumerWorkflow = Callable[[KafkaMessage], None]
|
|
17
17
|
|
|
18
|
+
kafka_queue: Queue
|
|
19
|
+
in_order_kafka_queues: dict[str, Queue] = {}
|
|
20
|
+
|
|
18
21
|
|
|
19
22
|
def _kafka_consumer_loop(
|
|
20
23
|
func: KafkaConsumerWorkflow,
|
|
21
24
|
config: dict[str, Any],
|
|
22
25
|
topics: list[str],
|
|
23
26
|
stop_event: threading.Event,
|
|
27
|
+
in_order: bool,
|
|
24
28
|
) -> None:
|
|
25
29
|
|
|
26
30
|
def on_error(err: KafkaError) -> NoReturn:
|
|
@@ -70,24 +74,35 @@ def _kafka_consumer_loop(
|
|
|
70
74
|
with SetWorkflowID(
|
|
71
75
|
f"kafka-unique-id-{msg.topic}-{msg.partition}-{msg.offset}"
|
|
72
76
|
):
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
)
|
|
77
|
+
if in_order:
|
|
78
|
+
assert msg.topic is not None
|
|
79
|
+
queue = in_order_kafka_queues[msg.topic]
|
|
80
|
+
queue.enqueue(func, msg)
|
|
81
|
+
else:
|
|
82
|
+
kafka_queue.enqueue(func, msg)
|
|
79
83
|
|
|
80
84
|
finally:
|
|
81
85
|
consumer.close()
|
|
82
86
|
|
|
83
87
|
|
|
84
88
|
def kafka_consumer(
|
|
85
|
-
dbosreg: "_DBOSRegistry", config: dict[str, Any], topics: list[str]
|
|
89
|
+
dbosreg: "_DBOSRegistry", config: dict[str, Any], topics: list[str], in_order: bool
|
|
86
90
|
) -> Callable[[KafkaConsumerWorkflow], KafkaConsumerWorkflow]:
|
|
87
91
|
def decorator(func: KafkaConsumerWorkflow) -> KafkaConsumerWorkflow:
|
|
92
|
+
if in_order:
|
|
93
|
+
for topic in topics:
|
|
94
|
+
if topic.startswith("^"):
|
|
95
|
+
raise DBOSInitializationError(
|
|
96
|
+
f"Error: in-order processing is not supported for regular expression topic selectors ({topic})"
|
|
97
|
+
)
|
|
98
|
+
queue = Queue(f"_dbos_kafka_queue_topic_{topic}", concurrency=1)
|
|
99
|
+
in_order_kafka_queues[topic] = queue
|
|
100
|
+
else:
|
|
101
|
+
global kafka_queue
|
|
102
|
+
kafka_queue = Queue("_dbos_internal_queue")
|
|
88
103
|
stop_event = threading.Event()
|
|
89
104
|
dbosreg.register_poller(
|
|
90
|
-
stop_event, _kafka_consumer_loop, func, config, topics, stop_event
|
|
105
|
+
stop_event, _kafka_consumer_loop, func, config, topics, stop_event, in_order
|
|
91
106
|
)
|
|
92
107
|
return func
|
|
93
108
|
|