dbos 0.19.0a4__py3-none-any.whl → 0.20.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- dbos/_admin_server.py +45 -2
- dbos/_context.py +11 -2
- dbos/_core.py +66 -6
- dbos/_dbos.py +36 -1
- dbos/_error.py +11 -0
- dbos/_fastapi.py +16 -11
- dbos/_flask.py +6 -2
- dbos/_kafka.py +17 -1
- dbos/_queue.py +1 -0
- dbos/_sys_db.py +113 -38
- dbos/_workflow_commands.py +171 -0
- dbos/cli/_github_init.py +107 -0
- dbos/cli/_template_init.py +98 -0
- dbos/cli/cli.py +367 -0
- {dbos-0.19.0a4.dist-info → dbos-0.20.0.dist-info}/METADATA +21 -16
- {dbos-0.19.0a4.dist-info → dbos-0.20.0.dist-info}/RECORD +29 -26
- {dbos-0.19.0a4.dist-info → dbos-0.20.0.dist-info}/entry_points.txt +1 -1
- dbos/cli.py +0 -337
- /dbos/_templates/{hello → dbos-db-starter}/README.md +0 -0
- /dbos/_templates/{hello → dbos-db-starter}/__package/__init__.py +0 -0
- /dbos/_templates/{hello → dbos-db-starter}/__package/main.py +0 -0
- /dbos/_templates/{hello → dbos-db-starter}/__package/schema.py +0 -0
- /dbos/_templates/{hello → dbos-db-starter}/alembic.ini +0 -0
- /dbos/_templates/{hello → dbos-db-starter}/dbos-config.yaml.dbos +0 -0
- /dbos/_templates/{hello → dbos-db-starter}/migrations/env.py.dbos +0 -0
- /dbos/_templates/{hello → dbos-db-starter}/migrations/script.py.mako +0 -0
- /dbos/_templates/{hello → dbos-db-starter}/migrations/versions/2024_07_31_180642_init.py +0 -0
- /dbos/_templates/{hello → dbos-db-starter}/start_postgres_docker.py +0 -0
- {dbos-0.19.0a4.dist-info → dbos-0.20.0.dist-info}/WHEEL +0 -0
- {dbos-0.19.0a4.dist-info → dbos-0.20.0.dist-info}/licenses/LICENSE +0 -0
dbos/_admin_server.py
CHANGED
|
@@ -1,6 +1,7 @@
|
|
|
1
1
|
from __future__ import annotations
|
|
2
2
|
|
|
3
3
|
import json
|
|
4
|
+
import re
|
|
4
5
|
import threading
|
|
5
6
|
from functools import partial
|
|
6
7
|
from http.server import BaseHTTPRequestHandler, ThreadingHTTPServer
|
|
@@ -15,6 +16,9 @@ if TYPE_CHECKING:
|
|
|
15
16
|
_health_check_path = "/dbos-healthz"
|
|
16
17
|
_workflow_recovery_path = "/dbos-workflow-recovery"
|
|
17
18
|
_deactivate_path = "/deactivate"
|
|
19
|
+
# /workflows/:workflow_id/cancel
|
|
20
|
+
# /workflows/:workflow_id/resume
|
|
21
|
+
# /workflows/:workflow_id/restart
|
|
18
22
|
|
|
19
23
|
|
|
20
24
|
class AdminServer:
|
|
@@ -79,12 +83,51 @@ class AdminRequestHandler(BaseHTTPRequestHandler):
|
|
|
79
83
|
self._end_headers()
|
|
80
84
|
self.wfile.write(json.dumps(workflow_ids).encode("utf-8"))
|
|
81
85
|
else:
|
|
82
|
-
|
|
83
|
-
|
|
86
|
+
|
|
87
|
+
restart_match = re.match(
|
|
88
|
+
r"^/workflows/(?P<workflow_id>[^/]+)/restart$", self.path
|
|
89
|
+
)
|
|
90
|
+
resume_match = re.match(
|
|
91
|
+
r"^/workflows/(?P<workflow_id>[^/]+)/resume$", self.path
|
|
92
|
+
)
|
|
93
|
+
cancel_match = re.match(
|
|
94
|
+
r"^/workflows/(?P<workflow_id>[^/]+)/cancel$", self.path
|
|
95
|
+
)
|
|
96
|
+
|
|
97
|
+
if restart_match:
|
|
98
|
+
workflow_id = restart_match.group("workflow_id")
|
|
99
|
+
self._handle_restart(workflow_id)
|
|
100
|
+
elif resume_match:
|
|
101
|
+
workflow_id = resume_match.group("workflow_id")
|
|
102
|
+
self._handle_resume(workflow_id)
|
|
103
|
+
elif cancel_match:
|
|
104
|
+
workflow_id = cancel_match.group("workflow_id")
|
|
105
|
+
self._handle_cancel(workflow_id)
|
|
106
|
+
else:
|
|
107
|
+
self.send_response(404)
|
|
108
|
+
self._end_headers()
|
|
84
109
|
|
|
85
110
|
def log_message(self, format: str, *args: Any) -> None:
|
|
86
111
|
return # Disable admin server request logging
|
|
87
112
|
|
|
113
|
+
def _handle_restart(self, workflow_id: str) -> None:
|
|
114
|
+
self.dbos.restart_workflow(workflow_id)
|
|
115
|
+
print("Restarting workflow", workflow_id)
|
|
116
|
+
self.send_response(204)
|
|
117
|
+
self._end_headers()
|
|
118
|
+
|
|
119
|
+
def _handle_resume(self, workflow_id: str) -> None:
|
|
120
|
+
print("Resuming workflow", workflow_id)
|
|
121
|
+
self.dbos.resume_workflow(workflow_id)
|
|
122
|
+
self.send_response(204)
|
|
123
|
+
self._end_headers()
|
|
124
|
+
|
|
125
|
+
def _handle_cancel(self, workflow_id: str) -> None:
|
|
126
|
+
print("Cancelling workflow", workflow_id)
|
|
127
|
+
self.dbos.cancel_workflow(workflow_id)
|
|
128
|
+
self.send_response(204)
|
|
129
|
+
self._end_headers()
|
|
130
|
+
|
|
88
131
|
|
|
89
132
|
# Be consistent with DBOS-TS response.
|
|
90
133
|
class PerfUtilization(TypedDict):
|
dbos/_context.py
CHANGED
|
@@ -57,6 +57,7 @@ class DBOSContext:
|
|
|
57
57
|
self.request: Optional["Request"] = None
|
|
58
58
|
|
|
59
59
|
self.id_assigned_for_next_workflow: str = ""
|
|
60
|
+
self.is_within_set_workflow_id_block: bool = False
|
|
60
61
|
|
|
61
62
|
self.parent_workflow_id: str = ""
|
|
62
63
|
self.parent_workflow_fid: int = -1
|
|
@@ -78,6 +79,7 @@ class DBOSContext:
|
|
|
78
79
|
rv.logger = self.logger
|
|
79
80
|
rv.id_assigned_for_next_workflow = self.id_assigned_for_next_workflow
|
|
80
81
|
self.id_assigned_for_next_workflow = ""
|
|
82
|
+
rv.is_within_set_workflow_id_block = self.is_within_set_workflow_id_block
|
|
81
83
|
rv.parent_workflow_id = self.workflow_id
|
|
82
84
|
rv.parent_workflow_fid = self.function_id
|
|
83
85
|
rv.in_recovery = self.in_recovery
|
|
@@ -95,6 +97,10 @@ class DBOSContext:
|
|
|
95
97
|
if len(self.id_assigned_for_next_workflow) > 0:
|
|
96
98
|
wfid = self.id_assigned_for_next_workflow
|
|
97
99
|
else:
|
|
100
|
+
if self.is_within_set_workflow_id_block:
|
|
101
|
+
self.logger.warning(
|
|
102
|
+
f"Multiple workflows started in the same SetWorkflowID block. Only the first workflow is assigned the specified workflow ID; subsequent workflows will use a generated workflow ID."
|
|
103
|
+
)
|
|
98
104
|
wfid = str(uuid.uuid4())
|
|
99
105
|
return wfid
|
|
100
106
|
|
|
@@ -286,7 +292,7 @@ class DBOSContextSwap:
|
|
|
286
292
|
|
|
287
293
|
class SetWorkflowID:
|
|
288
294
|
"""
|
|
289
|
-
Set the workflow ID to be used for the enclosed workflow invocation.
|
|
295
|
+
Set the workflow ID to be used for the enclosed workflow invocation. Note: Only the first workflow will be started with the specified workflow ID within a `with SetWorkflowID` block.
|
|
290
296
|
|
|
291
297
|
Typical Usage
|
|
292
298
|
```
|
|
@@ -311,7 +317,9 @@ class SetWorkflowID:
|
|
|
311
317
|
if ctx is None:
|
|
312
318
|
self.created_ctx = True
|
|
313
319
|
_set_local_dbos_context(DBOSContext())
|
|
314
|
-
assert_current_dbos_context()
|
|
320
|
+
ctx = assert_current_dbos_context()
|
|
321
|
+
ctx.id_assigned_for_next_workflow = self.wfid
|
|
322
|
+
ctx.is_within_set_workflow_id_block = True
|
|
315
323
|
return self
|
|
316
324
|
|
|
317
325
|
def __exit__(
|
|
@@ -321,6 +329,7 @@ class SetWorkflowID:
|
|
|
321
329
|
traceback: Optional[TracebackType],
|
|
322
330
|
) -> Literal[False]:
|
|
323
331
|
# Code to clean up the basic context if we created it
|
|
332
|
+
assert_current_dbos_context().is_within_set_workflow_id_block = False
|
|
324
333
|
if self.created_ctx:
|
|
325
334
|
_clear_local_dbos_context()
|
|
326
335
|
return False # Did not handle
|
dbos/_core.py
CHANGED
|
@@ -188,6 +188,7 @@ def _init_workflow(
|
|
|
188
188
|
wf_status = dbos._sys_db.update_workflow_status(
|
|
189
189
|
status, False, ctx.in_recovery, max_recovery_attempts=max_recovery_attempts
|
|
190
190
|
)
|
|
191
|
+
# TODO: Modify the inputs if they were changed by `update_workflow_inputs`
|
|
191
192
|
dbos._sys_db.update_workflow_inputs(wfid, _serialization.serialize_args(inputs))
|
|
192
193
|
else:
|
|
193
194
|
# Buffer the inputs for single-transaction workflows, but don't buffer the status
|
|
@@ -265,7 +266,9 @@ def _execute_workflow_wthread(
|
|
|
265
266
|
raise
|
|
266
267
|
|
|
267
268
|
|
|
268
|
-
def execute_workflow_by_id(
|
|
269
|
+
def execute_workflow_by_id(
|
|
270
|
+
dbos: "DBOS", workflow_id: str, startNew: bool = False
|
|
271
|
+
) -> "WorkflowHandle[Any]":
|
|
269
272
|
status = dbos._sys_db.get_workflow_status(workflow_id)
|
|
270
273
|
if not status:
|
|
271
274
|
raise DBOSRecoveryError(workflow_id, "Workflow status not found")
|
|
@@ -292,7 +295,8 @@ def execute_workflow_by_id(dbos: "DBOS", workflow_id: str) -> "WorkflowHandle[An
|
|
|
292
295
|
workflow_id,
|
|
293
296
|
f"Cannot execute workflow because instance '{iname}' is not registered",
|
|
294
297
|
)
|
|
295
|
-
|
|
298
|
+
|
|
299
|
+
if startNew:
|
|
296
300
|
return start_workflow(
|
|
297
301
|
dbos,
|
|
298
302
|
wf_func,
|
|
@@ -302,6 +306,17 @@ def execute_workflow_by_id(dbos: "DBOS", workflow_id: str) -> "WorkflowHandle[An
|
|
|
302
306
|
*inputs["args"],
|
|
303
307
|
**inputs["kwargs"],
|
|
304
308
|
)
|
|
309
|
+
else:
|
|
310
|
+
with SetWorkflowID(workflow_id):
|
|
311
|
+
return start_workflow(
|
|
312
|
+
dbos,
|
|
313
|
+
wf_func,
|
|
314
|
+
status["queue_name"],
|
|
315
|
+
True,
|
|
316
|
+
dbos._registry.instance_info_map[iname],
|
|
317
|
+
*inputs["args"],
|
|
318
|
+
**inputs["kwargs"],
|
|
319
|
+
)
|
|
305
320
|
elif status["class_name"] is not None:
|
|
306
321
|
class_name = status["class_name"]
|
|
307
322
|
if class_name not in dbos._registry.class_info_map:
|
|
@@ -309,7 +324,8 @@ def execute_workflow_by_id(dbos: "DBOS", workflow_id: str) -> "WorkflowHandle[An
|
|
|
309
324
|
workflow_id,
|
|
310
325
|
f"Cannot execute workflow because class '{class_name}' is not registered",
|
|
311
326
|
)
|
|
312
|
-
|
|
327
|
+
|
|
328
|
+
if startNew:
|
|
313
329
|
return start_workflow(
|
|
314
330
|
dbos,
|
|
315
331
|
wf_func,
|
|
@@ -319,8 +335,19 @@ def execute_workflow_by_id(dbos: "DBOS", workflow_id: str) -> "WorkflowHandle[An
|
|
|
319
335
|
*inputs["args"],
|
|
320
336
|
**inputs["kwargs"],
|
|
321
337
|
)
|
|
338
|
+
else:
|
|
339
|
+
with SetWorkflowID(workflow_id):
|
|
340
|
+
return start_workflow(
|
|
341
|
+
dbos,
|
|
342
|
+
wf_func,
|
|
343
|
+
status["queue_name"],
|
|
344
|
+
True,
|
|
345
|
+
dbos._registry.class_info_map[class_name],
|
|
346
|
+
*inputs["args"],
|
|
347
|
+
**inputs["kwargs"],
|
|
348
|
+
)
|
|
322
349
|
else:
|
|
323
|
-
|
|
350
|
+
if startNew:
|
|
324
351
|
return start_workflow(
|
|
325
352
|
dbos,
|
|
326
353
|
wf_func,
|
|
@@ -329,6 +356,16 @@ def execute_workflow_by_id(dbos: "DBOS", workflow_id: str) -> "WorkflowHandle[An
|
|
|
329
356
|
*inputs["args"],
|
|
330
357
|
**inputs["kwargs"],
|
|
331
358
|
)
|
|
359
|
+
else:
|
|
360
|
+
with SetWorkflowID(workflow_id):
|
|
361
|
+
return start_workflow(
|
|
362
|
+
dbos,
|
|
363
|
+
wf_func,
|
|
364
|
+
status["queue_name"],
|
|
365
|
+
True,
|
|
366
|
+
*inputs["args"],
|
|
367
|
+
**inputs["kwargs"],
|
|
368
|
+
)
|
|
332
369
|
|
|
333
370
|
|
|
334
371
|
@overload
|
|
@@ -422,6 +459,9 @@ def start_workflow(
|
|
|
422
459
|
or wf_status == WorkflowStatusString.ERROR.value
|
|
423
460
|
or wf_status == WorkflowStatusString.SUCCESS.value
|
|
424
461
|
):
|
|
462
|
+
dbos.logger.debug(
|
|
463
|
+
f"Workflow {new_wf_id} already completed with status {wf_status}. Directly returning a workflow handle."
|
|
464
|
+
)
|
|
425
465
|
return WorkflowHandlePolling(new_wf_id, dbos)
|
|
426
466
|
|
|
427
467
|
if fself is not None:
|
|
@@ -448,6 +488,22 @@ def start_workflow(
|
|
|
448
488
|
return WorkflowHandleFuture(new_wf_id, future, dbos)
|
|
449
489
|
|
|
450
490
|
|
|
491
|
+
if sys.version_info < (3, 12):
|
|
492
|
+
|
|
493
|
+
def _mark_coroutine(func: Callable[P, R]) -> Callable[P, R]:
|
|
494
|
+
@wraps(func)
|
|
495
|
+
async def async_wrapper(*args: Any, **kwargs: Any) -> R:
|
|
496
|
+
return await func(*args, **kwargs) # type: ignore
|
|
497
|
+
|
|
498
|
+
return async_wrapper # type: ignore
|
|
499
|
+
|
|
500
|
+
else:
|
|
501
|
+
|
|
502
|
+
def _mark_coroutine(func: Callable[P, R]) -> Callable[P, R]:
|
|
503
|
+
inspect.markcoroutinefunction(func)
|
|
504
|
+
return func
|
|
505
|
+
|
|
506
|
+
|
|
451
507
|
def workflow_wrapper(
|
|
452
508
|
dbosreg: "DBOSRegistry",
|
|
453
509
|
func: Callable[P, R],
|
|
@@ -494,7 +550,7 @@ def workflow_wrapper(
|
|
|
494
550
|
temp_wf_type=get_temp_workflow_type(func),
|
|
495
551
|
max_recovery_attempts=max_recovery_attempts,
|
|
496
552
|
)
|
|
497
|
-
|
|
553
|
+
# TODO: maybe modify the parameters if they've been changed by `_init_workflow`
|
|
498
554
|
dbos.logger.debug(
|
|
499
555
|
f"Running workflow, id: {ctx.workflow_id}, name: {get_dbos_func_name(func)}"
|
|
500
556
|
)
|
|
@@ -508,7 +564,7 @@ def workflow_wrapper(
|
|
|
508
564
|
)
|
|
509
565
|
return outcome() # type: ignore
|
|
510
566
|
|
|
511
|
-
return wrapper
|
|
567
|
+
return _mark_coroutine(wrapper) if inspect.iscoroutinefunction(func) else wrapper
|
|
512
568
|
|
|
513
569
|
|
|
514
570
|
def decorate_workflow(
|
|
@@ -798,6 +854,10 @@ def decorate_step(
|
|
|
798
854
|
assert tempwf
|
|
799
855
|
return tempwf(*args, **kwargs)
|
|
800
856
|
|
|
857
|
+
wrapper = (
|
|
858
|
+
_mark_coroutine(wrapper) if inspect.iscoroutinefunction(func) else wrapper # type: ignore
|
|
859
|
+
)
|
|
860
|
+
|
|
801
861
|
def temp_wf_sync(*args: Any, **kwargs: Any) -> Any:
|
|
802
862
|
return wrapper(*args, **kwargs)
|
|
803
863
|
|
dbos/_dbos.py
CHANGED
|
@@ -56,6 +56,7 @@ from ._registrations import (
|
|
|
56
56
|
)
|
|
57
57
|
from ._roles import default_required_roles, required_roles
|
|
58
58
|
from ._scheduler import ScheduledWorkflow, scheduled
|
|
59
|
+
from ._sys_db import WorkflowStatusString, reset_system_database
|
|
59
60
|
from ._tracer import dbos_tracer
|
|
60
61
|
|
|
61
62
|
if TYPE_CHECKING:
|
|
@@ -231,6 +232,7 @@ class DBOS:
|
|
|
231
232
|
f"DBOS configured multiple times with conflicting information"
|
|
232
233
|
)
|
|
233
234
|
config = _dbos_global_registry.config
|
|
235
|
+
|
|
234
236
|
_dbos_global_instance = super().__new__(cls)
|
|
235
237
|
_dbos_global_instance.__init__(fastapi=fastapi, config=config, flask=flask) # type: ignore
|
|
236
238
|
else:
|
|
@@ -243,7 +245,7 @@ class DBOS:
|
|
|
243
245
|
return _dbos_global_instance
|
|
244
246
|
|
|
245
247
|
@classmethod
|
|
246
|
-
def destroy(cls, *, destroy_registry: bool =
|
|
248
|
+
def destroy(cls, *, destroy_registry: bool = False) -> None:
|
|
247
249
|
global _dbos_global_instance
|
|
248
250
|
if _dbos_global_instance is not None:
|
|
249
251
|
_dbos_global_instance._destroy()
|
|
@@ -407,6 +409,22 @@ class DBOS:
|
|
|
407
409
|
dbos_logger.error(f"DBOS failed to launch: {traceback.format_exc()}")
|
|
408
410
|
raise
|
|
409
411
|
|
|
412
|
+
@classmethod
|
|
413
|
+
def reset_system_database(cls) -> None:
|
|
414
|
+
"""
|
|
415
|
+
Destroy the DBOS system database. Useful for resetting the state of DBOS between tests.
|
|
416
|
+
This is a destructive operation and should only be used in a test environment.
|
|
417
|
+
More information on testing DBOS apps: https://docs.dbos.dev/python/tutorials/testing
|
|
418
|
+
"""
|
|
419
|
+
if _dbos_global_instance is not None:
|
|
420
|
+
_dbos_global_instance._reset_system_database()
|
|
421
|
+
|
|
422
|
+
def _reset_system_database(self) -> None:
|
|
423
|
+
assert (
|
|
424
|
+
not self._launched
|
|
425
|
+
), "The system database cannot be reset after DBOS is launched. Resetting the system database is a destructive operation that should only be used in a test environment."
|
|
426
|
+
reset_system_database(self.config)
|
|
427
|
+
|
|
410
428
|
def _destroy(self) -> None:
|
|
411
429
|
self._initialized = False
|
|
412
430
|
for event in self.stop_events:
|
|
@@ -767,6 +785,11 @@ class DBOS:
|
|
|
767
785
|
"""Execute a workflow by ID (for recovery)."""
|
|
768
786
|
return execute_workflow_by_id(_get_dbos_instance(), workflow_id)
|
|
769
787
|
|
|
788
|
+
@classmethod
|
|
789
|
+
def restart_workflow(cls, workflow_id: str) -> None:
|
|
790
|
+
"""Execute a workflow by ID (for recovery)."""
|
|
791
|
+
execute_workflow_by_id(_get_dbos_instance(), workflow_id, True)
|
|
792
|
+
|
|
770
793
|
@classmethod
|
|
771
794
|
def recover_pending_workflows(
|
|
772
795
|
cls, executor_ids: List[str] = ["local"]
|
|
@@ -774,6 +797,18 @@ class DBOS:
|
|
|
774
797
|
"""Find all PENDING workflows and execute them."""
|
|
775
798
|
return recover_pending_workflows(_get_dbos_instance(), executor_ids)
|
|
776
799
|
|
|
800
|
+
@classmethod
|
|
801
|
+
def cancel_workflow(cls, workflow_id: str) -> None:
|
|
802
|
+
"""Cancel a workflow by ID."""
|
|
803
|
+
_get_dbos_instance()._sys_db.set_workflow_status(
|
|
804
|
+
workflow_id, WorkflowStatusString.CANCELLED, False
|
|
805
|
+
)
|
|
806
|
+
|
|
807
|
+
@classmethod
|
|
808
|
+
def resume_workflow(cls, workflow_id: str) -> None:
|
|
809
|
+
"""Resume a workflow by ID."""
|
|
810
|
+
execute_workflow_by_id(_get_dbos_instance(), workflow_id, False)
|
|
811
|
+
|
|
777
812
|
@classproperty
|
|
778
813
|
def logger(cls) -> Logger:
|
|
779
814
|
"""Return the DBOS `Logger` for the current context."""
|
dbos/_error.py
CHANGED
|
@@ -35,6 +35,7 @@ class DBOSErrorCode(Enum):
|
|
|
35
35
|
DeadLetterQueueError = 6
|
|
36
36
|
MaxStepRetriesExceeded = 7
|
|
37
37
|
NotAuthorized = 8
|
|
38
|
+
ConflictingWorkflowError = 9
|
|
38
39
|
|
|
39
40
|
|
|
40
41
|
class DBOSWorkflowConflictIDError(DBOSException):
|
|
@@ -47,6 +48,16 @@ class DBOSWorkflowConflictIDError(DBOSException):
|
|
|
47
48
|
)
|
|
48
49
|
|
|
49
50
|
|
|
51
|
+
class DBOSConflictingWorkflowError(DBOSException):
|
|
52
|
+
"""Exception raised different workflows started with the same workflow ID."""
|
|
53
|
+
|
|
54
|
+
def __init__(self, workflow_id: str, message: Optional[str] = None):
|
|
55
|
+
super().__init__(
|
|
56
|
+
f"Conflicting workflow invocation with the same ID ({workflow_id}): {message}",
|
|
57
|
+
dbos_error_code=DBOSErrorCode.ConflictingWorkflowError.value,
|
|
58
|
+
)
|
|
59
|
+
|
|
60
|
+
|
|
50
61
|
class DBOSRecoveryError(DBOSException):
|
|
51
62
|
"""Exception raised when a workflow recovery fails."""
|
|
52
63
|
|
dbos/_fastapi.py
CHANGED
|
@@ -1,10 +1,10 @@
|
|
|
1
1
|
import uuid
|
|
2
|
-
from typing import Any, Callable, cast
|
|
2
|
+
from typing import Any, Callable, MutableMapping, cast
|
|
3
3
|
|
|
4
4
|
from fastapi import FastAPI
|
|
5
5
|
from fastapi import Request as FastAPIRequest
|
|
6
6
|
from fastapi.responses import JSONResponse
|
|
7
|
-
from starlette.types import ASGIApp,
|
|
7
|
+
from starlette.types import ASGIApp, Receive, Scope, Send
|
|
8
8
|
|
|
9
9
|
from . import DBOS
|
|
10
10
|
from ._context import (
|
|
@@ -61,15 +61,16 @@ class LifespanMiddleware:
|
|
|
61
61
|
|
|
62
62
|
async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None:
|
|
63
63
|
if scope["type"] == "lifespan":
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
if message["type"] == "lifespan.startup":
|
|
64
|
+
|
|
65
|
+
async def wrapped_send(message: MutableMapping[str, Any]) -> None:
|
|
66
|
+
if message["type"] == "lifespan.startup.complete":
|
|
67
67
|
self.dbos._launch()
|
|
68
|
-
|
|
69
|
-
elif message["type"] == "lifespan.shutdown":
|
|
68
|
+
elif message["type"] == "lifespan.shutdown.complete":
|
|
70
69
|
self.dbos._destroy()
|
|
71
|
-
|
|
72
|
-
|
|
70
|
+
await send(message)
|
|
71
|
+
|
|
72
|
+
# Call the original app with our wrapped functions
|
|
73
|
+
await self.app(scope, receive, wrapped_send)
|
|
73
74
|
else:
|
|
74
75
|
await self.app(scope, receive, send)
|
|
75
76
|
|
|
@@ -94,7 +95,11 @@ def setup_fastapi_middleware(app: FastAPI, dbos: DBOS) -> None:
|
|
|
94
95
|
with EnterDBOSHandler(attributes):
|
|
95
96
|
ctx = assert_current_dbos_context()
|
|
96
97
|
ctx.request = _make_request(request)
|
|
97
|
-
workflow_id = request.headers.get("dbos-idempotency-key"
|
|
98
|
-
|
|
98
|
+
workflow_id = request.headers.get("dbos-idempotency-key")
|
|
99
|
+
if workflow_id is not None:
|
|
100
|
+
# Set the workflow ID for the handler
|
|
101
|
+
with SetWorkflowID(workflow_id):
|
|
102
|
+
response = await call_next(request)
|
|
103
|
+
else:
|
|
99
104
|
response = await call_next(request)
|
|
100
105
|
return response
|
dbos/_flask.py
CHANGED
|
@@ -34,8 +34,12 @@ class FlaskMiddleware:
|
|
|
34
34
|
with EnterDBOSHandler(attributes):
|
|
35
35
|
ctx = assert_current_dbos_context()
|
|
36
36
|
ctx.request = _make_request(request)
|
|
37
|
-
workflow_id = request.headers.get("dbos-idempotency-key"
|
|
38
|
-
|
|
37
|
+
workflow_id = request.headers.get("dbos-idempotency-key")
|
|
38
|
+
if workflow_id is not None:
|
|
39
|
+
# Set the workflow ID for the handler
|
|
40
|
+
with SetWorkflowID(workflow_id):
|
|
41
|
+
response = self.app(environ, start_response)
|
|
42
|
+
else:
|
|
39
43
|
response = self.app(environ, start_response)
|
|
40
44
|
return response
|
|
41
45
|
|
dbos/_kafka.py
CHANGED
|
@@ -1,3 +1,4 @@
|
|
|
1
|
+
import re
|
|
1
2
|
import threading
|
|
2
3
|
from typing import TYPE_CHECKING, Any, Callable, NoReturn
|
|
3
4
|
|
|
@@ -19,6 +20,14 @@ _kafka_queue: Queue
|
|
|
19
20
|
_in_order_kafka_queues: dict[str, Queue] = {}
|
|
20
21
|
|
|
21
22
|
|
|
23
|
+
def safe_group_name(method_name: str, topics: list[str]) -> str:
|
|
24
|
+
safe_group_id = "-".join(
|
|
25
|
+
re.sub(r"[^a-zA-Z0-9\-]", "", str(r)) for r in [method_name, *topics]
|
|
26
|
+
)
|
|
27
|
+
|
|
28
|
+
return f"dbos-kafka-group-{safe_group_id}"[:255]
|
|
29
|
+
|
|
30
|
+
|
|
22
31
|
def _kafka_consumer_loop(
|
|
23
32
|
func: _KafkaConsumerWorkflow,
|
|
24
33
|
config: dict[str, Any],
|
|
@@ -34,6 +43,12 @@ def _kafka_consumer_loop(
|
|
|
34
43
|
if "auto.offset.reset" not in config:
|
|
35
44
|
config["auto.offset.reset"] = "earliest"
|
|
36
45
|
|
|
46
|
+
if config.get("group.id") is None:
|
|
47
|
+
config["group.id"] = safe_group_name(func.__qualname__, topics)
|
|
48
|
+
dbos_logger.warning(
|
|
49
|
+
f"Consumer group ID not found. Using generated group.id {config['group.id']}"
|
|
50
|
+
)
|
|
51
|
+
|
|
37
52
|
consumer = Consumer(config)
|
|
38
53
|
try:
|
|
39
54
|
consumer.subscribe(topics)
|
|
@@ -71,8 +86,9 @@ def _kafka_consumer_loop(
|
|
|
71
86
|
topic=cmsg.topic(),
|
|
72
87
|
value=cmsg.value(),
|
|
73
88
|
)
|
|
89
|
+
groupID = config.get("group.id")
|
|
74
90
|
with SetWorkflowID(
|
|
75
|
-
f"kafka-unique-id-{msg.topic}-{msg.partition}-{msg.offset}"
|
|
91
|
+
f"kafka-unique-id-{msg.topic}-{msg.partition}-{groupID}-{msg.offset}"
|
|
76
92
|
):
|
|
77
93
|
if in_order:
|
|
78
94
|
assert msg.topic is not None
|