dbos 0.26.1__tar.gz → 0.27.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of dbos might be problematic. Click here for more details.
- {dbos-0.26.1 → dbos-0.27.0}/PKG-INFO +1 -1
- {dbos-0.26.1 → dbos-0.27.0}/dbos/__init__.py +4 -1
- {dbos-0.26.1 → dbos-0.27.0}/dbos/_admin_server.py +5 -4
- {dbos-0.26.1 → dbos-0.27.0}/dbos/_app_db.py +17 -5
- {dbos-0.26.1 → dbos-0.27.0}/dbos/_client.py +46 -15
- {dbos-0.26.1 → dbos-0.27.0}/dbos/_context.py +50 -0
- {dbos-0.26.1 → dbos-0.27.0}/dbos/_core.py +13 -0
- {dbos-0.26.1 → dbos-0.27.0}/dbos/_dbos.py +35 -7
- {dbos-0.26.1 → dbos-0.27.0}/dbos/_dbos_config.py +6 -0
- {dbos-0.26.1 → dbos-0.27.0}/dbos/_error.py +28 -0
- dbos-0.27.0/dbos/_migrations/versions/27ac6900c6ad_add_queue_dedup.py +45 -0
- {dbos-0.26.1 → dbos-0.27.0}/dbos/_queue.py +5 -3
- {dbos-0.26.1 → dbos-0.27.0}/dbos/_schemas/system_database.py +9 -0
- dbos-0.27.0/dbos/_serialization.py +106 -0
- {dbos-0.26.1 → dbos-0.27.0}/dbos/_sys_db.py +98 -48
- {dbos-0.26.1 → dbos-0.27.0}/dbos/_tracer.py +9 -1
- {dbos-0.26.1 → dbos-0.27.0}/dbos/_workflow_commands.py +15 -2
- {dbos-0.26.1 → dbos-0.27.0}/dbos/cli/cli.py +166 -117
- {dbos-0.26.1 → dbos-0.27.0}/pyproject.toml +1 -1
- {dbos-0.26.1 → dbos-0.27.0}/tests/conftest.py +1 -1
- {dbos-0.26.1 → dbos-0.27.0}/tests/test_async.py +2 -2
- {dbos-0.26.1 → dbos-0.27.0}/tests/test_client.py +59 -8
- {dbos-0.26.1 → dbos-0.27.0}/tests/test_config.py +20 -0
- {dbos-0.26.1 → dbos-0.27.0}/tests/test_failures.py +46 -0
- {dbos-0.26.1 → dbos-0.27.0}/tests/test_package.py +76 -6
- {dbos-0.26.1 → dbos-0.27.0}/tests/test_queue.py +123 -0
- {dbos-0.26.1 → dbos-0.27.0}/tests/test_spans.py +24 -10
- {dbos-0.26.1 → dbos-0.27.0}/tests/test_workflow_management.py +66 -25
- dbos-0.26.1/dbos/_serialization.py +0 -55
- {dbos-0.26.1 → dbos-0.27.0}/LICENSE +0 -0
- {dbos-0.26.1 → dbos-0.27.0}/README.md +0 -0
- {dbos-0.26.1 → dbos-0.27.0}/dbos/__main__.py +0 -0
- {dbos-0.26.1 → dbos-0.27.0}/dbos/_classproperty.py +0 -0
- {dbos-0.26.1 → dbos-0.27.0}/dbos/_conductor/conductor.py +0 -0
- {dbos-0.26.1 → dbos-0.27.0}/dbos/_conductor/protocol.py +0 -0
- {dbos-0.26.1 → dbos-0.27.0}/dbos/_croniter.py +0 -0
- {dbos-0.26.1 → dbos-0.27.0}/dbos/_debug.py +0 -0
- {dbos-0.26.1 → dbos-0.27.0}/dbos/_docker_pg_helper.py +0 -0
- {dbos-0.26.1 → dbos-0.27.0}/dbos/_event_loop.py +0 -0
- {dbos-0.26.1 → dbos-0.27.0}/dbos/_fastapi.py +0 -0
- {dbos-0.26.1 → dbos-0.27.0}/dbos/_flask.py +0 -0
- {dbos-0.26.1 → dbos-0.27.0}/dbos/_kafka.py +0 -0
- {dbos-0.26.1 → dbos-0.27.0}/dbos/_kafka_message.py +0 -0
- {dbos-0.26.1 → dbos-0.27.0}/dbos/_logger.py +0 -0
- {dbos-0.26.1 → dbos-0.27.0}/dbos/_migrations/env.py +0 -0
- {dbos-0.26.1 → dbos-0.27.0}/dbos/_migrations/script.py.mako +0 -0
- {dbos-0.26.1 → dbos-0.27.0}/dbos/_migrations/versions/04ca4f231047_workflow_queues_executor_id.py +0 -0
- {dbos-0.26.1 → dbos-0.27.0}/dbos/_migrations/versions/50f3227f0b4b_fix_job_queue.py +0 -0
- {dbos-0.26.1 → dbos-0.27.0}/dbos/_migrations/versions/5c361fc04708_added_system_tables.py +0 -0
- {dbos-0.26.1 → dbos-0.27.0}/dbos/_migrations/versions/83f3732ae8e7_workflow_timeout.py +0 -0
- {dbos-0.26.1 → dbos-0.27.0}/dbos/_migrations/versions/a3b18ad34abe_added_triggers.py +0 -0
- {dbos-0.26.1 → dbos-0.27.0}/dbos/_migrations/versions/d76646551a6b_job_queue_limiter.py +0 -0
- {dbos-0.26.1 → dbos-0.27.0}/dbos/_migrations/versions/d76646551a6c_workflow_queue.py +0 -0
- {dbos-0.26.1 → dbos-0.27.0}/dbos/_migrations/versions/eab0cc1d9a14_job_queue.py +0 -0
- {dbos-0.26.1 → dbos-0.27.0}/dbos/_migrations/versions/f4b9b32ba814_functionname_childid_op_outputs.py +0 -0
- {dbos-0.26.1 → dbos-0.27.0}/dbos/_outcome.py +0 -0
- {dbos-0.26.1 → dbos-0.27.0}/dbos/_recovery.py +0 -0
- {dbos-0.26.1 → dbos-0.27.0}/dbos/_registrations.py +0 -0
- {dbos-0.26.1 → dbos-0.27.0}/dbos/_request.py +0 -0
- {dbos-0.26.1 → dbos-0.27.0}/dbos/_roles.py +0 -0
- {dbos-0.26.1 → dbos-0.27.0}/dbos/_scheduler.py +0 -0
- {dbos-0.26.1 → dbos-0.27.0}/dbos/_schemas/__init__.py +0 -0
- {dbos-0.26.1 → dbos-0.27.0}/dbos/_schemas/application_database.py +0 -0
- {dbos-0.26.1 → dbos-0.27.0}/dbos/_templates/dbos-db-starter/README.md +0 -0
- {dbos-0.26.1 → dbos-0.27.0}/dbos/_templates/dbos-db-starter/__package/__init__.py +0 -0
- {dbos-0.26.1 → dbos-0.27.0}/dbos/_templates/dbos-db-starter/__package/main.py +0 -0
- {dbos-0.26.1 → dbos-0.27.0}/dbos/_templates/dbos-db-starter/__package/schema.py +0 -0
- {dbos-0.26.1 → dbos-0.27.0}/dbos/_templates/dbos-db-starter/alembic.ini +0 -0
- {dbos-0.26.1 → dbos-0.27.0}/dbos/_templates/dbos-db-starter/dbos-config.yaml.dbos +0 -0
- {dbos-0.26.1 → dbos-0.27.0}/dbos/_templates/dbos-db-starter/migrations/env.py.dbos +0 -0
- {dbos-0.26.1 → dbos-0.27.0}/dbos/_templates/dbos-db-starter/migrations/script.py.mako +0 -0
- {dbos-0.26.1 → dbos-0.27.0}/dbos/_templates/dbos-db-starter/migrations/versions/2024_07_31_180642_init.py +0 -0
- {dbos-0.26.1 → dbos-0.27.0}/dbos/_templates/dbos-db-starter/start_postgres_docker.py +0 -0
- {dbos-0.26.1 → dbos-0.27.0}/dbos/_utils.py +0 -0
- {dbos-0.26.1 → dbos-0.27.0}/dbos/cli/_github_init.py +0 -0
- {dbos-0.26.1 → dbos-0.27.0}/dbos/cli/_template_init.py +0 -0
- {dbos-0.26.1 → dbos-0.27.0}/dbos/dbos-config.schema.json +0 -0
- {dbos-0.26.1 → dbos-0.27.0}/dbos/py.typed +0 -0
- {dbos-0.26.1 → dbos-0.27.0}/tests/__init__.py +0 -0
- {dbos-0.26.1 → dbos-0.27.0}/tests/atexit_no_ctor.py +0 -0
- {dbos-0.26.1 → dbos-0.27.0}/tests/atexit_no_launch.py +0 -0
- {dbos-0.26.1 → dbos-0.27.0}/tests/classdefs.py +0 -0
- {dbos-0.26.1 → dbos-0.27.0}/tests/client_collateral.py +0 -0
- {dbos-0.26.1 → dbos-0.27.0}/tests/client_worker.py +0 -0
- {dbos-0.26.1 → dbos-0.27.0}/tests/dupname_classdefs1.py +0 -0
- {dbos-0.26.1 → dbos-0.27.0}/tests/dupname_classdefsa.py +0 -0
- {dbos-0.26.1 → dbos-0.27.0}/tests/more_classdefs.py +0 -0
- {dbos-0.26.1 → dbos-0.27.0}/tests/queuedworkflow.py +0 -0
- {dbos-0.26.1 → dbos-0.27.0}/tests/test_admin_server.py +0 -0
- {dbos-0.26.1 → dbos-0.27.0}/tests/test_classdecorators.py +0 -0
- {dbos-0.26.1 → dbos-0.27.0}/tests/test_concurrency.py +0 -0
- {dbos-0.26.1 → dbos-0.27.0}/tests/test_croniter.py +0 -0
- {dbos-0.26.1 → dbos-0.27.0}/tests/test_dbos.py +0 -0
- {dbos-0.26.1 → dbos-0.27.0}/tests/test_debug.py +0 -0
- {dbos-0.26.1 → dbos-0.27.0}/tests/test_docker_secrets.py +0 -0
- {dbos-0.26.1 → dbos-0.27.0}/tests/test_fastapi.py +0 -0
- {dbos-0.26.1 → dbos-0.27.0}/tests/test_fastapi_roles.py +0 -0
- {dbos-0.26.1 → dbos-0.27.0}/tests/test_flask.py +0 -0
- {dbos-0.26.1 → dbos-0.27.0}/tests/test_kafka.py +0 -0
- {dbos-0.26.1 → dbos-0.27.0}/tests/test_outcome.py +0 -0
- {dbos-0.26.1 → dbos-0.27.0}/tests/test_scheduler.py +0 -0
- {dbos-0.26.1 → dbos-0.27.0}/tests/test_schema_migration.py +0 -0
- {dbos-0.26.1 → dbos-0.27.0}/tests/test_singleton.py +0 -0
- {dbos-0.26.1 → dbos-0.27.0}/tests/test_sqlalchemy.py +0 -0
- {dbos-0.26.1 → dbos-0.27.0}/tests/test_workflow_introspection.py +0 -0
- {dbos-0.26.1 → dbos-0.27.0}/version/__init__.py +0 -0
|
@@ -3,10 +3,11 @@ from ._client import DBOSClient, EnqueueOptions
|
|
|
3
3
|
from ._context import (
|
|
4
4
|
DBOSContextEnsure,
|
|
5
5
|
DBOSContextSetAuth,
|
|
6
|
+
SetEnqueueOptions,
|
|
6
7
|
SetWorkflowID,
|
|
7
8
|
SetWorkflowTimeout,
|
|
8
9
|
)
|
|
9
|
-
from ._dbos import DBOS, DBOSConfiguredInstance, WorkflowHandle
|
|
10
|
+
from ._dbos import DBOS, DBOSConfiguredInstance, WorkflowHandle, WorkflowHandleAsync
|
|
10
11
|
from ._dbos_config import ConfigFile, DBOSConfig, get_dbos_database_url, load_config
|
|
11
12
|
from ._kafka_message import KafkaMessage
|
|
12
13
|
from ._queue import Queue
|
|
@@ -25,7 +26,9 @@ __all__ = [
|
|
|
25
26
|
"KafkaMessage",
|
|
26
27
|
"SetWorkflowID",
|
|
27
28
|
"SetWorkflowTimeout",
|
|
29
|
+
"SetEnqueueOptions",
|
|
28
30
|
"WorkflowHandle",
|
|
31
|
+
"WorkflowHandleAsync",
|
|
29
32
|
"WorkflowStatus",
|
|
30
33
|
"WorkflowStatusString",
|
|
31
34
|
"load_config",
|
|
@@ -45,9 +45,10 @@ class AdminServer:
|
|
|
45
45
|
|
|
46
46
|
|
|
47
47
|
class AdminRequestHandler(BaseHTTPRequestHandler):
|
|
48
|
+
is_deactivated = False
|
|
49
|
+
|
|
48
50
|
def __init__(self, dbos: DBOS, *args: Any, **kwargs: Any) -> None:
|
|
49
51
|
self.dbos = dbos
|
|
50
|
-
self.is_deactivated = False
|
|
51
52
|
super().__init__(*args, **kwargs)
|
|
52
53
|
|
|
53
54
|
def _end_headers(self) -> None:
|
|
@@ -63,11 +64,11 @@ class AdminRequestHandler(BaseHTTPRequestHandler):
|
|
|
63
64
|
self._end_headers()
|
|
64
65
|
self.wfile.write("healthy".encode("utf-8"))
|
|
65
66
|
elif self.path == _deactivate_path:
|
|
66
|
-
if not
|
|
67
|
+
if not AdminRequestHandler.is_deactivated:
|
|
67
68
|
dbos_logger.info(
|
|
68
69
|
f"Deactivating DBOS executor {GlobalParams.executor_id} with version {GlobalParams.app_version}. This executor will complete existing workflows but will not start new workflows."
|
|
69
70
|
)
|
|
70
|
-
|
|
71
|
+
AdminRequestHandler.is_deactivated = True
|
|
71
72
|
# Stop all scheduled workflows, queues, and kafka loops
|
|
72
73
|
for event in self.dbos.stop_events:
|
|
73
74
|
event.set()
|
|
@@ -209,7 +210,7 @@ class AdminRequestHandler(BaseHTTPRequestHandler):
|
|
|
209
210
|
self._end_headers()
|
|
210
211
|
|
|
211
212
|
def _handle_steps(self, workflow_id: str) -> None:
|
|
212
|
-
steps = self.dbos.
|
|
213
|
+
steps = self.dbos.list_workflow_steps(workflow_id)
|
|
213
214
|
|
|
214
215
|
updated_steps = [
|
|
215
216
|
{
|
|
@@ -7,7 +7,7 @@ from sqlalchemy.exc import DBAPIError
|
|
|
7
7
|
from sqlalchemy.orm import Session, sessionmaker
|
|
8
8
|
|
|
9
9
|
from . import _serialization
|
|
10
|
-
from ._dbos_config import
|
|
10
|
+
from ._dbos_config import DatabaseConfig
|
|
11
11
|
from ._error import DBOSUnexpectedStepError, DBOSWorkflowConflictIDError
|
|
12
12
|
from ._schemas.application_database import ApplicationSchema
|
|
13
13
|
from ._sys_db import StepInfo
|
|
@@ -77,12 +77,24 @@ class ApplicationDatabase:
|
|
|
77
77
|
pool_size = database.get("app_db_pool_size")
|
|
78
78
|
if pool_size is None:
|
|
79
79
|
pool_size = 20
|
|
80
|
+
|
|
81
|
+
engine_kwargs = database.get("db_engine_kwargs")
|
|
82
|
+
if engine_kwargs is None:
|
|
83
|
+
engine_kwargs = {}
|
|
84
|
+
|
|
85
|
+
# Respect user-provided values. Otherwise, set defaults.
|
|
86
|
+
if "pool_size" not in engine_kwargs:
|
|
87
|
+
engine_kwargs["pool_size"] = pool_size
|
|
88
|
+
if "max_overflow" not in engine_kwargs:
|
|
89
|
+
engine_kwargs["max_overflow"] = 0
|
|
90
|
+
if "pool_timeout" not in engine_kwargs:
|
|
91
|
+
engine_kwargs["pool_timeout"] = 30
|
|
92
|
+
if "connect_args" not in engine_kwargs:
|
|
93
|
+
engine_kwargs["connect_args"] = connect_args
|
|
94
|
+
|
|
80
95
|
self.engine = sa.create_engine(
|
|
81
96
|
app_db_url,
|
|
82
|
-
|
|
83
|
-
max_overflow=0,
|
|
84
|
-
pool_timeout=30,
|
|
85
|
-
connect_args=connect_args,
|
|
97
|
+
**engine_kwargs,
|
|
86
98
|
)
|
|
87
99
|
self.sessionmaker = sessionmaker(bind=self.engine)
|
|
88
100
|
self.debug_mode = debug_mode
|
|
@@ -1,9 +1,10 @@
|
|
|
1
1
|
import asyncio
|
|
2
2
|
import sys
|
|
3
|
-
import time
|
|
4
3
|
import uuid
|
|
5
4
|
from typing import Any, Generic, List, Optional, TypedDict, TypeVar
|
|
6
5
|
|
|
6
|
+
from sqlalchemy import URL
|
|
7
|
+
|
|
7
8
|
from dbos._app_db import ApplicationDatabase
|
|
8
9
|
|
|
9
10
|
if sys.version_info < (3, 11):
|
|
@@ -18,6 +19,7 @@ from dbos._error import DBOSNonExistentWorkflowError
|
|
|
18
19
|
from dbos._registrations import DEFAULT_MAX_RECOVERY_ATTEMPTS
|
|
19
20
|
from dbos._serialization import WorkflowInputs
|
|
20
21
|
from dbos._sys_db import (
|
|
22
|
+
EnqueueOptionsInternal,
|
|
21
23
|
StepInfo,
|
|
22
24
|
SystemDatabase,
|
|
23
25
|
WorkflowStatus,
|
|
@@ -41,6 +43,7 @@ class EnqueueOptions(TypedDict):
|
|
|
41
43
|
workflow_id: NotRequired[str]
|
|
42
44
|
app_version: NotRequired[str]
|
|
43
45
|
workflow_timeout: NotRequired[float]
|
|
46
|
+
deduplication_id: NotRequired[str]
|
|
44
47
|
|
|
45
48
|
|
|
46
49
|
class WorkflowHandleClientPolling(Generic[R]):
|
|
@@ -57,7 +60,7 @@ class WorkflowHandleClientPolling(Generic[R]):
|
|
|
57
60
|
return res
|
|
58
61
|
|
|
59
62
|
def get_status(self) -> WorkflowStatus:
|
|
60
|
-
status = get_workflow(self._sys_db, self.workflow_id,
|
|
63
|
+
status = get_workflow(self._sys_db, self.workflow_id, False)
|
|
61
64
|
if status is None:
|
|
62
65
|
raise DBOSNonExistentWorkflowError(self.workflow_id)
|
|
63
66
|
return status
|
|
@@ -80,7 +83,7 @@ class WorkflowHandleClientAsyncPolling(Generic[R]):
|
|
|
80
83
|
|
|
81
84
|
async def get_status(self) -> WorkflowStatus:
|
|
82
85
|
status = await asyncio.to_thread(
|
|
83
|
-
get_workflow, self._sys_db, self.workflow_id,
|
|
86
|
+
get_workflow, self._sys_db, self.workflow_id, False
|
|
84
87
|
)
|
|
85
88
|
if status is None:
|
|
86
89
|
raise DBOSNonExistentWorkflowError(self.workflow_id)
|
|
@@ -94,6 +97,7 @@ class DBOSClient:
|
|
|
94
97
|
db_config["sys_db_name"] = system_database
|
|
95
98
|
self._sys_db = SystemDatabase(db_config)
|
|
96
99
|
self._app_db = ApplicationDatabase(db_config)
|
|
100
|
+
self._db_url = database_url
|
|
97
101
|
|
|
98
102
|
def destroy(self) -> None:
|
|
99
103
|
self._sys_db.destroy()
|
|
@@ -110,6 +114,9 @@ class DBOSClient:
|
|
|
110
114
|
if workflow_id is None:
|
|
111
115
|
workflow_id = str(uuid.uuid4())
|
|
112
116
|
workflow_timeout = options.get("workflow_timeout", None)
|
|
117
|
+
enqueue_options_internal: EnqueueOptionsInternal = {
|
|
118
|
+
"deduplication_id": options.get("deduplication_id"),
|
|
119
|
+
}
|
|
113
120
|
|
|
114
121
|
status: WorkflowStatusInternal = {
|
|
115
122
|
"workflow_uuid": workflow_id,
|
|
@@ -142,7 +149,10 @@ class DBOSClient:
|
|
|
142
149
|
}
|
|
143
150
|
|
|
144
151
|
self._sys_db.init_workflow(
|
|
145
|
-
status,
|
|
152
|
+
status,
|
|
153
|
+
_serialization.serialize_args(inputs),
|
|
154
|
+
max_recovery_attempts=None,
|
|
155
|
+
enqueue_options=enqueue_options_internal,
|
|
146
156
|
)
|
|
147
157
|
return workflow_id
|
|
148
158
|
|
|
@@ -159,13 +169,13 @@ class DBOSClient:
|
|
|
159
169
|
return WorkflowHandleClientAsyncPolling[R](workflow_id, self._sys_db)
|
|
160
170
|
|
|
161
171
|
def retrieve_workflow(self, workflow_id: str) -> WorkflowHandle[R]:
|
|
162
|
-
status = get_workflow(self._sys_db, workflow_id,
|
|
172
|
+
status = get_workflow(self._sys_db, workflow_id, False)
|
|
163
173
|
if status is None:
|
|
164
174
|
raise DBOSNonExistentWorkflowError(workflow_id)
|
|
165
175
|
return WorkflowHandleClientPolling[R](workflow_id, self._sys_db)
|
|
166
176
|
|
|
167
177
|
async def retrieve_workflow_async(self, workflow_id: str) -> WorkflowHandleAsync[R]:
|
|
168
|
-
status = asyncio.to_thread(get_workflow, self._sys_db, workflow_id,
|
|
178
|
+
status = asyncio.to_thread(get_workflow, self._sys_db, workflow_id, False)
|
|
169
179
|
if status is None:
|
|
170
180
|
raise DBOSNonExistentWorkflowError(workflow_id)
|
|
171
181
|
return WorkflowHandleClientAsyncPolling[R](workflow_id, self._sys_db)
|
|
@@ -233,11 +243,13 @@ class DBOSClient:
|
|
|
233
243
|
async def cancel_workflow_async(self, workflow_id: str) -> None:
|
|
234
244
|
await asyncio.to_thread(self.cancel_workflow, workflow_id)
|
|
235
245
|
|
|
236
|
-
def resume_workflow(self, workflow_id: str) ->
|
|
246
|
+
def resume_workflow(self, workflow_id: str) -> WorkflowHandle[Any]:
|
|
237
247
|
self._sys_db.resume_workflow(workflow_id)
|
|
248
|
+
return WorkflowHandleClientPolling[Any](workflow_id, self._sys_db)
|
|
238
249
|
|
|
239
|
-
async def resume_workflow_async(self, workflow_id: str) ->
|
|
250
|
+
async def resume_workflow_async(self, workflow_id: str) -> WorkflowHandleAsync[Any]:
|
|
240
251
|
await asyncio.to_thread(self.resume_workflow, workflow_id)
|
|
252
|
+
return WorkflowHandleClientAsyncPolling[Any](workflow_id, self._sys_db)
|
|
241
253
|
|
|
242
254
|
def list_workflows(
|
|
243
255
|
self,
|
|
@@ -351,16 +363,35 @@ class DBOSClient:
|
|
|
351
363
|
async def list_workflow_steps_async(self, workflow_id: str) -> List[StepInfo]:
|
|
352
364
|
return await asyncio.to_thread(self.list_workflow_steps, workflow_id)
|
|
353
365
|
|
|
354
|
-
def fork_workflow(
|
|
366
|
+
def fork_workflow(
|
|
367
|
+
self,
|
|
368
|
+
workflow_id: str,
|
|
369
|
+
start_step: int,
|
|
370
|
+
*,
|
|
371
|
+
application_version: Optional[str] = None,
|
|
372
|
+
) -> WorkflowHandle[Any]:
|
|
355
373
|
forked_workflow_id = fork_workflow(
|
|
356
|
-
self._sys_db,
|
|
374
|
+
self._sys_db,
|
|
375
|
+
self._app_db,
|
|
376
|
+
workflow_id,
|
|
377
|
+
start_step,
|
|
378
|
+
application_version=application_version,
|
|
357
379
|
)
|
|
358
|
-
return WorkflowHandleClientPolling[
|
|
380
|
+
return WorkflowHandleClientPolling[Any](forked_workflow_id, self._sys_db)
|
|
359
381
|
|
|
360
382
|
async def fork_workflow_async(
|
|
361
|
-
self,
|
|
362
|
-
|
|
383
|
+
self,
|
|
384
|
+
workflow_id: str,
|
|
385
|
+
start_step: int,
|
|
386
|
+
*,
|
|
387
|
+
application_version: Optional[str] = None,
|
|
388
|
+
) -> WorkflowHandleAsync[Any]:
|
|
363
389
|
forked_workflow_id = await asyncio.to_thread(
|
|
364
|
-
fork_workflow,
|
|
390
|
+
fork_workflow,
|
|
391
|
+
self._sys_db,
|
|
392
|
+
self._app_db,
|
|
393
|
+
workflow_id,
|
|
394
|
+
start_step,
|
|
395
|
+
application_version=application_version,
|
|
365
396
|
)
|
|
366
|
-
return WorkflowHandleClientAsyncPolling[
|
|
397
|
+
return WorkflowHandleClientAsyncPolling[Any](forked_workflow_id, self._sys_db)
|
|
@@ -98,6 +98,9 @@ class DBOSContext:
|
|
|
98
98
|
# A propagated workflow deadline.
|
|
99
99
|
self.workflow_deadline_epoch_ms: Optional[int] = None
|
|
100
100
|
|
|
101
|
+
# A user-specified deduplication ID for the enqueuing workflow.
|
|
102
|
+
self.deduplication_id: Optional[str] = None
|
|
103
|
+
|
|
101
104
|
def create_child(self) -> DBOSContext:
|
|
102
105
|
rv = DBOSContext()
|
|
103
106
|
rv.logger = self.logger
|
|
@@ -413,12 +416,53 @@ class SetWorkflowTimeout:
|
|
|
413
416
|
return False # Did not handle
|
|
414
417
|
|
|
415
418
|
|
|
419
|
+
class SetEnqueueOptions:
|
|
420
|
+
"""
|
|
421
|
+
Set the workflow enqueue options for the enclosed enqueue operation.
|
|
422
|
+
|
|
423
|
+
Usage:
|
|
424
|
+
```
|
|
425
|
+
with SetEnqueueOptions(deduplication_id=<deduplication id>):
|
|
426
|
+
queue.enqueue(...)
|
|
427
|
+
```
|
|
428
|
+
"""
|
|
429
|
+
|
|
430
|
+
def __init__(self, *, deduplication_id: Optional[str] = None) -> None:
|
|
431
|
+
self.created_ctx = False
|
|
432
|
+
self.deduplication_id: Optional[str] = deduplication_id
|
|
433
|
+
self.saved_deduplication_id: Optional[str] = None
|
|
434
|
+
|
|
435
|
+
def __enter__(self) -> SetEnqueueOptions:
|
|
436
|
+
# Code to create a basic context
|
|
437
|
+
ctx = get_local_dbos_context()
|
|
438
|
+
if ctx is None:
|
|
439
|
+
self.created_ctx = True
|
|
440
|
+
_set_local_dbos_context(DBOSContext())
|
|
441
|
+
ctx = assert_current_dbos_context()
|
|
442
|
+
self.saved_deduplication_id = ctx.deduplication_id
|
|
443
|
+
ctx.deduplication_id = self.deduplication_id
|
|
444
|
+
return self
|
|
445
|
+
|
|
446
|
+
def __exit__(
|
|
447
|
+
self,
|
|
448
|
+
exc_type: Optional[Type[BaseException]],
|
|
449
|
+
exc_value: Optional[BaseException],
|
|
450
|
+
traceback: Optional[TracebackType],
|
|
451
|
+
) -> Literal[False]:
|
|
452
|
+
assert_current_dbos_context().deduplication_id = self.saved_deduplication_id
|
|
453
|
+
# Code to clean up the basic context if we created it
|
|
454
|
+
if self.created_ctx:
|
|
455
|
+
_clear_local_dbos_context()
|
|
456
|
+
return False
|
|
457
|
+
|
|
458
|
+
|
|
416
459
|
class EnterDBOSWorkflow(AbstractContextManager[DBOSContext, Literal[False]]):
|
|
417
460
|
def __init__(self, attributes: TracedAttributes) -> None:
|
|
418
461
|
self.created_ctx = False
|
|
419
462
|
self.attributes = attributes
|
|
420
463
|
self.is_temp_workflow = attributes["name"] == "temp_wf"
|
|
421
464
|
self.saved_workflow_timeout: Optional[int] = None
|
|
465
|
+
self.saved_deduplication_id: Optional[str] = None
|
|
422
466
|
|
|
423
467
|
def __enter__(self) -> DBOSContext:
|
|
424
468
|
# Code to create a basic context
|
|
@@ -432,6 +476,10 @@ class EnterDBOSWorkflow(AbstractContextManager[DBOSContext, Literal[False]]):
|
|
|
432
476
|
# workflow's children (instead we propagate the deadline)
|
|
433
477
|
self.saved_workflow_timeout = ctx.workflow_timeout_ms
|
|
434
478
|
ctx.workflow_timeout_ms = None
|
|
479
|
+
# Unset the deduplication_id context var so it is not applied to this
|
|
480
|
+
# workflow's children
|
|
481
|
+
self.saved_deduplication_id = ctx.deduplication_id
|
|
482
|
+
ctx.deduplication_id = None
|
|
435
483
|
ctx.start_workflow(
|
|
436
484
|
None, self.attributes, self.is_temp_workflow
|
|
437
485
|
) # Will get from the context's next workflow ID
|
|
@@ -450,6 +498,8 @@ class EnterDBOSWorkflow(AbstractContextManager[DBOSContext, Literal[False]]):
|
|
|
450
498
|
ctx.workflow_timeout_ms = self.saved_workflow_timeout
|
|
451
499
|
# Clear any propagating timeout
|
|
452
500
|
ctx.workflow_deadline_epoch_ms = None
|
|
501
|
+
# Restore the saved deduplication ID
|
|
502
|
+
ctx.deduplication_id = self.saved_deduplication_id
|
|
453
503
|
# Code to clean up the basic context if we created it
|
|
454
504
|
if self.created_ctx:
|
|
455
505
|
_clear_local_dbos_context()
|
|
@@ -71,6 +71,7 @@ from ._registrations import (
|
|
|
71
71
|
from ._roles import check_required_roles
|
|
72
72
|
from ._serialization import WorkflowInputs
|
|
73
73
|
from ._sys_db import (
|
|
74
|
+
EnqueueOptionsInternal,
|
|
74
75
|
GetEventWorkflowContext,
|
|
75
76
|
OperationResultInternal,
|
|
76
77
|
WorkflowStatus,
|
|
@@ -234,6 +235,7 @@ def _init_workflow(
|
|
|
234
235
|
workflow_timeout_ms: Optional[int],
|
|
235
236
|
workflow_deadline_epoch_ms: Optional[int],
|
|
236
237
|
max_recovery_attempts: Optional[int],
|
|
238
|
+
enqueue_options: Optional[EnqueueOptionsInternal],
|
|
237
239
|
) -> WorkflowStatusInternal:
|
|
238
240
|
wfid = (
|
|
239
241
|
ctx.workflow_id
|
|
@@ -289,6 +291,7 @@ def _init_workflow(
|
|
|
289
291
|
status,
|
|
290
292
|
_serialization.serialize_args(inputs),
|
|
291
293
|
max_recovery_attempts=max_recovery_attempts,
|
|
294
|
+
enqueue_options=enqueue_options,
|
|
292
295
|
)
|
|
293
296
|
|
|
294
297
|
if workflow_deadline_epoch_ms is not None:
|
|
@@ -539,6 +542,9 @@ def start_workflow(
|
|
|
539
542
|
workflow_timeout_ms = (
|
|
540
543
|
local_ctx.workflow_timeout_ms if local_ctx is not None else None
|
|
541
544
|
)
|
|
545
|
+
enqueue_options = EnqueueOptionsInternal(
|
|
546
|
+
deduplication_id=local_ctx.deduplication_id if local_ctx is not None else None,
|
|
547
|
+
)
|
|
542
548
|
new_wf_id, new_wf_ctx = _get_new_wf()
|
|
543
549
|
|
|
544
550
|
ctx = new_wf_ctx
|
|
@@ -561,6 +567,7 @@ def start_workflow(
|
|
|
561
567
|
workflow_timeout_ms=workflow_timeout_ms,
|
|
562
568
|
workflow_deadline_epoch_ms=workflow_deadline_epoch_ms,
|
|
563
569
|
max_recovery_attempts=fi.max_recovery_attempts,
|
|
570
|
+
enqueue_options=enqueue_options,
|
|
564
571
|
)
|
|
565
572
|
|
|
566
573
|
wf_status = status["status"]
|
|
@@ -626,6 +633,9 @@ async def start_workflow_async(
|
|
|
626
633
|
workflow_timeout_ms, workflow_deadline_epoch_ms = _get_timeout_deadline(
|
|
627
634
|
local_ctx, queue_name
|
|
628
635
|
)
|
|
636
|
+
enqueue_options = EnqueueOptionsInternal(
|
|
637
|
+
deduplication_id=local_ctx.deduplication_id if local_ctx is not None else None,
|
|
638
|
+
)
|
|
629
639
|
new_wf_id, new_wf_ctx = _get_new_wf()
|
|
630
640
|
|
|
631
641
|
ctx = new_wf_ctx
|
|
@@ -651,6 +661,7 @@ async def start_workflow_async(
|
|
|
651
661
|
workflow_timeout_ms=workflow_timeout_ms,
|
|
652
662
|
workflow_deadline_epoch_ms=workflow_deadline_epoch_ms,
|
|
653
663
|
max_recovery_attempts=fi.max_recovery_attempts,
|
|
664
|
+
enqueue_options=enqueue_options,
|
|
654
665
|
)
|
|
655
666
|
|
|
656
667
|
if ctx.has_parent():
|
|
@@ -727,6 +738,7 @@ def workflow_wrapper(
|
|
|
727
738
|
workflow_timeout_ms, workflow_deadline_epoch_ms = _get_timeout_deadline(
|
|
728
739
|
ctx, queue=None
|
|
729
740
|
)
|
|
741
|
+
|
|
730
742
|
enterWorkflowCtxMgr = (
|
|
731
743
|
EnterDBOSChildWorkflow if ctx and ctx.is_workflow() else EnterDBOSWorkflow
|
|
732
744
|
)
|
|
@@ -768,6 +780,7 @@ def workflow_wrapper(
|
|
|
768
780
|
workflow_timeout_ms=workflow_timeout_ms,
|
|
769
781
|
workflow_deadline_epoch_ms=workflow_deadline_epoch_ms,
|
|
770
782
|
max_recovery_attempts=max_recovery_attempts,
|
|
783
|
+
enqueue_options=None,
|
|
771
784
|
)
|
|
772
785
|
|
|
773
786
|
# TODO: maybe modify the parameters if they've been changed by `_init_workflow`
|
|
@@ -64,8 +64,9 @@ from ._registrations import (
|
|
|
64
64
|
)
|
|
65
65
|
from ._roles import default_required_roles, required_roles
|
|
66
66
|
from ._scheduler import ScheduledWorkflow, scheduled
|
|
67
|
-
from .
|
|
68
|
-
from .
|
|
67
|
+
from ._schemas.system_database import SystemSchema
|
|
68
|
+
from ._sys_db import StepInfo, SystemDatabase, WorkflowStatus, reset_system_database
|
|
69
|
+
from ._tracer import DBOSTracer, dbos_tracer
|
|
69
70
|
|
|
70
71
|
if TYPE_CHECKING:
|
|
71
72
|
from fastapi import FastAPI
|
|
@@ -73,14 +74,15 @@ if TYPE_CHECKING:
|
|
|
73
74
|
from ._request import Request
|
|
74
75
|
from flask import Flask
|
|
75
76
|
|
|
77
|
+
from sqlalchemy import URL
|
|
76
78
|
from sqlalchemy.orm import Session
|
|
77
79
|
|
|
78
80
|
from ._request import Request
|
|
79
81
|
|
|
80
82
|
if sys.version_info < (3, 10):
|
|
81
|
-
from typing_extensions import ParamSpec
|
|
83
|
+
from typing_extensions import ParamSpec
|
|
82
84
|
else:
|
|
83
|
-
from typing import ParamSpec
|
|
85
|
+
from typing import ParamSpec
|
|
84
86
|
|
|
85
87
|
from ._admin_server import AdminServer
|
|
86
88
|
from ._app_db import ApplicationDatabase
|
|
@@ -109,7 +111,6 @@ from ._error import (
|
|
|
109
111
|
)
|
|
110
112
|
from ._event_loop import BackgroundEventLoop
|
|
111
113
|
from ._logger import add_otlp_to_all_loggers, config_logger, dbos_logger, init_logger
|
|
112
|
-
from ._sys_db import SystemDatabase
|
|
113
114
|
from ._workflow_commands import get_workflow, list_workflow_steps
|
|
114
115
|
|
|
115
116
|
# Most DBOS functions are just any callable F, so decorators / wrappers work on F
|
|
@@ -563,7 +564,22 @@ class DBOS:
|
|
|
563
564
|
assert (
|
|
564
565
|
not self._launched
|
|
565
566
|
), "The system database cannot be reset after DBOS is launched. Resetting the system database is a destructive operation that should only be used in a test environment."
|
|
566
|
-
|
|
567
|
+
|
|
568
|
+
sysdb_name = (
|
|
569
|
+
self._config["database"]["sys_db_name"]
|
|
570
|
+
if "sys_db_name" in self._config["database"]
|
|
571
|
+
and self._config["database"]["sys_db_name"]
|
|
572
|
+
else self._config["database"]["app_db_name"] + SystemSchema.sysdb_suffix
|
|
573
|
+
)
|
|
574
|
+
postgres_db_url = URL.create(
|
|
575
|
+
"postgresql+psycopg",
|
|
576
|
+
username=self._config["database"]["username"],
|
|
577
|
+
password=self._config["database"]["password"],
|
|
578
|
+
host=self._config["database"]["hostname"],
|
|
579
|
+
port=self._config["database"]["port"],
|
|
580
|
+
database="postgres",
|
|
581
|
+
)
|
|
582
|
+
reset_system_database(postgres_db_url, sysdb_name)
|
|
567
583
|
|
|
568
584
|
def _destroy(self) -> None:
|
|
569
585
|
self._initialized = False
|
|
@@ -962,7 +978,13 @@ class DBOS:
|
|
|
962
978
|
return cls.fork_workflow(workflow_id, 1)
|
|
963
979
|
|
|
964
980
|
@classmethod
|
|
965
|
-
def fork_workflow(
|
|
981
|
+
def fork_workflow(
|
|
982
|
+
cls,
|
|
983
|
+
workflow_id: str,
|
|
984
|
+
start_step: int,
|
|
985
|
+
*,
|
|
986
|
+
application_version: Optional[str] = None,
|
|
987
|
+
) -> WorkflowHandle[Any]:
|
|
966
988
|
"""Restart a workflow with a new workflow ID from a specific step"""
|
|
967
989
|
|
|
968
990
|
def fn() -> str:
|
|
@@ -972,6 +994,7 @@ class DBOS:
|
|
|
972
994
|
_get_dbos_instance()._app_db,
|
|
973
995
|
workflow_id,
|
|
974
996
|
start_step,
|
|
997
|
+
application_version=application_version,
|
|
975
998
|
)
|
|
976
999
|
|
|
977
1000
|
new_id = _get_dbos_instance()._sys_db.call_function_as_step(
|
|
@@ -1166,6 +1189,11 @@ class DBOS:
|
|
|
1166
1189
|
ctx.authenticated_user = authenticated_user
|
|
1167
1190
|
ctx.authenticated_roles = authenticated_roles
|
|
1168
1191
|
|
|
1192
|
+
@classproperty
|
|
1193
|
+
def tracer(self) -> DBOSTracer:
|
|
1194
|
+
"""Return the DBOS OpenTelemetry tracer."""
|
|
1195
|
+
return dbos_tracer
|
|
1196
|
+
|
|
1169
1197
|
|
|
1170
1198
|
class WorkflowHandle(Generic[R], Protocol):
|
|
1171
1199
|
"""
|
|
@@ -31,6 +31,7 @@ class DBOSConfig(TypedDict, total=False):
|
|
|
31
31
|
app_db_pool_size (int): Application database pool size
|
|
32
32
|
sys_db_name (str): System database name
|
|
33
33
|
sys_db_pool_size (int): System database pool size
|
|
34
|
+
db_engine_kwargs (Dict[str, Any]): SQLAlchemy engine kwargs (See https://docs.sqlalchemy.org/en/20/core/engines.html#sqlalchemy.create_engine)
|
|
34
35
|
log_level (str): Log level
|
|
35
36
|
otlp_traces_endpoints: List[str]: OTLP traces endpoints
|
|
36
37
|
otlp_logs_endpoints: List[str]: OTLP logs endpoints
|
|
@@ -43,6 +44,7 @@ class DBOSConfig(TypedDict, total=False):
|
|
|
43
44
|
app_db_pool_size: Optional[int]
|
|
44
45
|
sys_db_name: Optional[str]
|
|
45
46
|
sys_db_pool_size: Optional[int]
|
|
47
|
+
db_engine_kwargs: Optional[Dict[str, Any]]
|
|
46
48
|
log_level: Optional[str]
|
|
47
49
|
otlp_traces_endpoints: Optional[List[str]]
|
|
48
50
|
otlp_logs_endpoints: Optional[List[str]]
|
|
@@ -64,6 +66,7 @@ class DatabaseConfig(TypedDict, total=False):
|
|
|
64
66
|
app_db_pool_size (int): Application database pool size
|
|
65
67
|
sys_db_name (str): System database name
|
|
66
68
|
sys_db_pool_size (int): System database pool size
|
|
69
|
+
db_engine_kwargs (Dict[str, Any]): SQLAlchemy engine kwargs
|
|
67
70
|
migrate (List[str]): Migration commands to run on startup
|
|
68
71
|
"""
|
|
69
72
|
|
|
@@ -76,6 +79,7 @@ class DatabaseConfig(TypedDict, total=False):
|
|
|
76
79
|
app_db_pool_size: Optional[int]
|
|
77
80
|
sys_db_name: Optional[str]
|
|
78
81
|
sys_db_pool_size: Optional[int]
|
|
82
|
+
db_engine_kwargs: Optional[Dict[str, Any]]
|
|
79
83
|
ssl: Optional[bool] # Will be removed in a future version
|
|
80
84
|
ssl_ca: Optional[str] # Will be removed in a future version
|
|
81
85
|
migrate: Optional[List[str]]
|
|
@@ -183,6 +187,8 @@ def translate_dbos_config_to_config_file(config: DBOSConfig) -> ConfigFile:
|
|
|
183
187
|
db_config["app_db_pool_size"] = config.get("app_db_pool_size")
|
|
184
188
|
if "sys_db_pool_size" in config:
|
|
185
189
|
db_config["sys_db_pool_size"] = config.get("sys_db_pool_size")
|
|
190
|
+
if "db_engine_kwargs" in config:
|
|
191
|
+
db_config["db_engine_kwargs"] = config.get("db_engine_kwargs")
|
|
186
192
|
if db_config:
|
|
187
193
|
translated_config["database"] = db_config
|
|
188
194
|
|
|
@@ -61,6 +61,7 @@ class DBOSErrorCode(Enum):
|
|
|
61
61
|
ConflictingWorkflowError = 9
|
|
62
62
|
WorkflowCancelled = 10
|
|
63
63
|
UnexpectedStep = 11
|
|
64
|
+
QueueDeduplicated = 12
|
|
64
65
|
ConflictingRegistrationError = 25
|
|
65
66
|
|
|
66
67
|
|
|
@@ -133,12 +134,17 @@ class DBOSNotAuthorizedError(DBOSException):
|
|
|
133
134
|
"""Exception raised by DBOS role-based security when the user is not authorized to access a function."""
|
|
134
135
|
|
|
135
136
|
def __init__(self, msg: str):
|
|
137
|
+
self.msg = msg
|
|
136
138
|
super().__init__(
|
|
137
139
|
msg,
|
|
138
140
|
dbos_error_code=DBOSErrorCode.NotAuthorized.value,
|
|
139
141
|
)
|
|
140
142
|
self.status_code = 403
|
|
141
143
|
|
|
144
|
+
def __reduce__(self) -> Any:
|
|
145
|
+
# Tell jsonpickle how to reconstruct this object
|
|
146
|
+
return (self.__class__, (self.msg,))
|
|
147
|
+
|
|
142
148
|
|
|
143
149
|
class DBOSMaxStepRetriesExceeded(DBOSException):
|
|
144
150
|
"""Exception raised when a step was retried the maximimum number of times without success."""
|
|
@@ -178,6 +184,28 @@ class DBOSUnexpectedStepError(DBOSException):
|
|
|
178
184
|
)
|
|
179
185
|
|
|
180
186
|
|
|
187
|
+
class DBOSQueueDeduplicatedError(DBOSException):
|
|
188
|
+
"""Exception raised when a workflow is deduplicated in the queue."""
|
|
189
|
+
|
|
190
|
+
def __init__(
|
|
191
|
+
self, workflow_id: str, queue_name: str, deduplication_id: str
|
|
192
|
+
) -> None:
|
|
193
|
+
self.workflow_id = workflow_id
|
|
194
|
+
self.queue_name = queue_name
|
|
195
|
+
self.deduplication_id = deduplication_id
|
|
196
|
+
super().__init__(
|
|
197
|
+
f"Workflow {workflow_id} was deduplicated due to an existing workflow in queue {queue_name} with deduplication ID {deduplication_id}.",
|
|
198
|
+
dbos_error_code=DBOSErrorCode.QueueDeduplicated.value,
|
|
199
|
+
)
|
|
200
|
+
|
|
201
|
+
def __reduce__(self) -> Any:
|
|
202
|
+
# Tell jsonpickle how to reconstruct this object
|
|
203
|
+
return (
|
|
204
|
+
self.__class__,
|
|
205
|
+
(self.workflow_id, self.queue_name, self.deduplication_id),
|
|
206
|
+
)
|
|
207
|
+
|
|
208
|
+
|
|
181
209
|
#######################################
|
|
182
210
|
## BaseException
|
|
183
211
|
#######################################
|
|
@@ -0,0 +1,45 @@
|
|
|
1
|
+
"""add queue dedup
|
|
2
|
+
|
|
3
|
+
Revision ID: 27ac6900c6ad
|
|
4
|
+
Revises: 83f3732ae8e7
|
|
5
|
+
Create Date: 2025-04-23 16:18:48.530047
|
|
6
|
+
|
|
7
|
+
"""
|
|
8
|
+
|
|
9
|
+
from typing import Sequence, Union
|
|
10
|
+
|
|
11
|
+
import sqlalchemy as sa
|
|
12
|
+
from alembic import op
|
|
13
|
+
|
|
14
|
+
# revision identifiers, used by Alembic.
|
|
15
|
+
revision: str = "27ac6900c6ad"
|
|
16
|
+
down_revision: Union[str, None] = "83f3732ae8e7"
|
|
17
|
+
branch_labels: Union[str, Sequence[str], None] = None
|
|
18
|
+
depends_on: Union[str, Sequence[str], None] = None
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
def upgrade() -> None:
|
|
22
|
+
op.add_column(
|
|
23
|
+
"workflow_queue",
|
|
24
|
+
sa.Column(
|
|
25
|
+
"deduplication_id",
|
|
26
|
+
sa.Text(),
|
|
27
|
+
nullable=True,
|
|
28
|
+
),
|
|
29
|
+
schema="dbos",
|
|
30
|
+
)
|
|
31
|
+
|
|
32
|
+
# Unique constraint for queue_name, deduplication_id
|
|
33
|
+
op.create_unique_constraint(
|
|
34
|
+
"uq_workflow_queue_name_dedup_id",
|
|
35
|
+
"workflow_queue",
|
|
36
|
+
["queue_name", "deduplication_id"],
|
|
37
|
+
schema="dbos",
|
|
38
|
+
)
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
def downgrade() -> None:
|
|
42
|
+
op.drop_constraint(
|
|
43
|
+
"uq_workflow_queue_name_dedup_id", "workflow_queue", schema="dbos"
|
|
44
|
+
)
|
|
45
|
+
op.drop_column("workflow_queue", "deduplication_id", schema="dbos")
|
|
@@ -99,6 +99,8 @@ def queue_thread(stop_event: threading.Event, dbos: "DBOS") -> None:
|
|
|
99
99
|
f"Exception encountered in queue thread: {traceback.format_exc()}"
|
|
100
100
|
)
|
|
101
101
|
except Exception:
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
102
|
+
if not stop_event.is_set():
|
|
103
|
+
# Only print the error if the thread is not stopping
|
|
104
|
+
dbos.logger.warning(
|
|
105
|
+
f"Exception encountered in queue thread: {traceback.format_exc()}"
|
|
106
|
+
)
|
|
@@ -10,6 +10,7 @@ from sqlalchemy import (
|
|
|
10
10
|
String,
|
|
11
11
|
Table,
|
|
12
12
|
Text,
|
|
13
|
+
UniqueConstraint,
|
|
13
14
|
text,
|
|
14
15
|
)
|
|
15
16
|
|
|
@@ -174,4 +175,12 @@ class SystemSchema:
|
|
|
174
175
|
"completed_at_epoch_ms",
|
|
175
176
|
BigInteger(),
|
|
176
177
|
),
|
|
178
|
+
Column(
|
|
179
|
+
"deduplication_id",
|
|
180
|
+
Text,
|
|
181
|
+
nullable=True,
|
|
182
|
+
),
|
|
183
|
+
UniqueConstraint(
|
|
184
|
+
"queue_name", "deduplication_id", name="uq_workflow_queue_name_dedup_id"
|
|
185
|
+
),
|
|
177
186
|
)
|