dbos 2.4.0a3__py3-none-any.whl → 2.6.0a8__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of dbos might be problematic. Click here for more details.
- dbos/__init__.py +2 -0
- dbos/_app_db.py +29 -87
- dbos/_client.py +16 -11
- dbos/_conductor/conductor.py +65 -34
- dbos/_conductor/protocol.py +23 -0
- dbos/_context.py +2 -3
- dbos/_core.py +98 -30
- dbos/_dbos.py +32 -28
- dbos/_dbos_config.py +3 -21
- dbos/_debouncer.py +4 -5
- dbos/_fastapi.py +4 -4
- dbos/_flask.py +2 -3
- dbos/_logger.py +14 -7
- dbos/_migration.py +30 -0
- dbos/_queue.py +94 -37
- dbos/_schemas/system_database.py +20 -0
- dbos/_sys_db.py +329 -92
- dbos/_sys_db_postgres.py +18 -12
- dbos/_tracer.py +10 -3
- dbos/_utils.py +10 -0
- dbos/_workflow_commands.py +2 -17
- dbos/cli/cli.py +8 -18
- dbos/cli/migration.py +29 -1
- {dbos-2.4.0a3.dist-info → dbos-2.6.0a8.dist-info}/METADATA +1 -1
- {dbos-2.4.0a3.dist-info → dbos-2.6.0a8.dist-info}/RECORD +28 -28
- {dbos-2.4.0a3.dist-info → dbos-2.6.0a8.dist-info}/WHEEL +1 -1
- {dbos-2.4.0a3.dist-info → dbos-2.6.0a8.dist-info}/entry_points.txt +0 -0
- {dbos-2.4.0a3.dist-info → dbos-2.6.0a8.dist-info}/licenses/LICENSE +0 -0
dbos/__init__.py
CHANGED
|
@@ -14,6 +14,7 @@ from ._kafka_message import KafkaMessage
|
|
|
14
14
|
from ._queue import Queue
|
|
15
15
|
from ._serialization import Serializer
|
|
16
16
|
from ._sys_db import GetWorkflowsInput, WorkflowStatus, WorkflowStatusString
|
|
17
|
+
from .cli.migration import run_dbos_database_migrations
|
|
17
18
|
|
|
18
19
|
__all__ = [
|
|
19
20
|
"DBOSConfig",
|
|
@@ -37,4 +38,5 @@ __all__ = [
|
|
|
37
38
|
"Debouncer",
|
|
38
39
|
"DebouncerClient",
|
|
39
40
|
"Serializer",
|
|
41
|
+
"run_dbos_database_migrations",
|
|
40
42
|
]
|
dbos/_app_db.py
CHANGED
|
@@ -70,6 +70,17 @@ class ApplicationDatabase(ABC):
|
|
|
70
70
|
schema: Optional[str],
|
|
71
71
|
debug_mode: bool = False,
|
|
72
72
|
):
|
|
73
|
+
# Log application database connection information
|
|
74
|
+
printable_url = sa.make_url(database_url).render_as_string(hide_password=True)
|
|
75
|
+
dbos_logger.info(
|
|
76
|
+
f"Initializing DBOS application database with URL: {printable_url}"
|
|
77
|
+
)
|
|
78
|
+
if not database_url.startswith("sqlite"):
|
|
79
|
+
dbos_logger.info(
|
|
80
|
+
f"DBOS application database engine parameters: {engine_kwargs}"
|
|
81
|
+
)
|
|
82
|
+
|
|
83
|
+
# Configure and initialize the application database
|
|
73
84
|
if database_url.startswith("sqlite"):
|
|
74
85
|
self.schema = None
|
|
75
86
|
else:
|
|
@@ -173,81 +184,6 @@ class ApplicationDatabase(ABC):
|
|
|
173
184
|
}
|
|
174
185
|
return result
|
|
175
186
|
|
|
176
|
-
def get_transactions(self, workflow_uuid: str) -> List[StepInfo]:
|
|
177
|
-
with self.engine.begin() as conn:
|
|
178
|
-
rows = conn.execute(
|
|
179
|
-
sa.select(
|
|
180
|
-
ApplicationSchema.transaction_outputs.c.function_id,
|
|
181
|
-
ApplicationSchema.transaction_outputs.c.function_name,
|
|
182
|
-
ApplicationSchema.transaction_outputs.c.output,
|
|
183
|
-
ApplicationSchema.transaction_outputs.c.error,
|
|
184
|
-
).where(
|
|
185
|
-
ApplicationSchema.transaction_outputs.c.workflow_uuid
|
|
186
|
-
== workflow_uuid,
|
|
187
|
-
)
|
|
188
|
-
).all()
|
|
189
|
-
return [
|
|
190
|
-
StepInfo(
|
|
191
|
-
function_id=row[0],
|
|
192
|
-
function_name=row[1],
|
|
193
|
-
output=(
|
|
194
|
-
self.serializer.deserialize(row[2])
|
|
195
|
-
if row[2] is not None
|
|
196
|
-
else row[2]
|
|
197
|
-
),
|
|
198
|
-
error=(
|
|
199
|
-
self.serializer.deserialize(row[3])
|
|
200
|
-
if row[3] is not None
|
|
201
|
-
else row[3]
|
|
202
|
-
),
|
|
203
|
-
child_workflow_id=None,
|
|
204
|
-
started_at_epoch_ms=None,
|
|
205
|
-
completed_at_epoch_ms=None,
|
|
206
|
-
)
|
|
207
|
-
for row in rows
|
|
208
|
-
]
|
|
209
|
-
|
|
210
|
-
def clone_workflow_transactions(
|
|
211
|
-
self, src_workflow_id: str, forked_workflow_id: str, start_step: int
|
|
212
|
-
) -> None:
|
|
213
|
-
"""
|
|
214
|
-
Copies all steps from dbos.transctions_outputs where function_id < input function_id
|
|
215
|
-
into a new workflow_uuid. Returns the new workflow_uuid.
|
|
216
|
-
"""
|
|
217
|
-
|
|
218
|
-
with self.engine.begin() as conn:
|
|
219
|
-
|
|
220
|
-
insert_stmt = sa.insert(ApplicationSchema.transaction_outputs).from_select(
|
|
221
|
-
[
|
|
222
|
-
"workflow_uuid",
|
|
223
|
-
"function_id",
|
|
224
|
-
"output",
|
|
225
|
-
"error",
|
|
226
|
-
"txn_id",
|
|
227
|
-
"txn_snapshot",
|
|
228
|
-
"executor_id",
|
|
229
|
-
"function_name",
|
|
230
|
-
],
|
|
231
|
-
sa.select(
|
|
232
|
-
sa.literal(forked_workflow_id).label("workflow_uuid"),
|
|
233
|
-
ApplicationSchema.transaction_outputs.c.function_id,
|
|
234
|
-
ApplicationSchema.transaction_outputs.c.output,
|
|
235
|
-
ApplicationSchema.transaction_outputs.c.error,
|
|
236
|
-
ApplicationSchema.transaction_outputs.c.txn_id,
|
|
237
|
-
ApplicationSchema.transaction_outputs.c.txn_snapshot,
|
|
238
|
-
ApplicationSchema.transaction_outputs.c.executor_id,
|
|
239
|
-
ApplicationSchema.transaction_outputs.c.function_name,
|
|
240
|
-
).where(
|
|
241
|
-
(
|
|
242
|
-
ApplicationSchema.transaction_outputs.c.workflow_uuid
|
|
243
|
-
== src_workflow_id
|
|
244
|
-
)
|
|
245
|
-
& (ApplicationSchema.transaction_outputs.c.function_id < start_step)
|
|
246
|
-
),
|
|
247
|
-
)
|
|
248
|
-
|
|
249
|
-
conn.execute(insert_stmt)
|
|
250
|
-
|
|
251
187
|
def garbage_collect(
|
|
252
188
|
self, cutoff_epoch_timestamp_ms: int, pending_workflow_ids: list[str]
|
|
253
189
|
) -> None:
|
|
@@ -302,18 +238,24 @@ class PostgresApplicationDatabase(ApplicationDatabase):
|
|
|
302
238
|
return
|
|
303
239
|
# Check if the database exists
|
|
304
240
|
app_db_url = self.engine.url
|
|
305
|
-
|
|
306
|
-
|
|
307
|
-
|
|
308
|
-
|
|
309
|
-
|
|
310
|
-
|
|
311
|
-
|
|
312
|
-
|
|
313
|
-
|
|
314
|
-
|
|
315
|
-
|
|
316
|
-
|
|
241
|
+
try:
|
|
242
|
+
postgres_db_engine = sa.create_engine(
|
|
243
|
+
app_db_url.set(database="postgres"),
|
|
244
|
+
**self._engine_kwargs,
|
|
245
|
+
)
|
|
246
|
+
with postgres_db_engine.connect() as conn:
|
|
247
|
+
conn.execution_options(isolation_level="AUTOCOMMIT")
|
|
248
|
+
if not conn.execute(
|
|
249
|
+
sa.text("SELECT 1 FROM pg_database WHERE datname=:db_name"),
|
|
250
|
+
parameters={"db_name": app_db_url.database},
|
|
251
|
+
).scalar():
|
|
252
|
+
conn.execute(sa.text(f"CREATE DATABASE {app_db_url.database}"))
|
|
253
|
+
except Exception:
|
|
254
|
+
dbos_logger.warning(
|
|
255
|
+
f"Could not connect to postgres database to verify existence of {app_db_url.database}. Continuing..."
|
|
256
|
+
)
|
|
257
|
+
finally:
|
|
258
|
+
postgres_db_engine.dispose()
|
|
317
259
|
|
|
318
260
|
# Create the dbos schema and transaction_outputs table in the application database
|
|
319
261
|
with self.engine.begin() as conn:
|
dbos/_client.py
CHANGED
|
@@ -1,7 +1,6 @@
|
|
|
1
1
|
import asyncio
|
|
2
2
|
import json
|
|
3
3
|
import time
|
|
4
|
-
import uuid
|
|
5
4
|
from typing import (
|
|
6
5
|
TYPE_CHECKING,
|
|
7
6
|
Any,
|
|
@@ -19,7 +18,9 @@ import sqlalchemy as sa
|
|
|
19
18
|
|
|
20
19
|
from dbos._app_db import ApplicationDatabase
|
|
21
20
|
from dbos._context import MaxPriority, MinPriority
|
|
21
|
+
from dbos._core import DEFAULT_POLLING_INTERVAL
|
|
22
22
|
from dbos._sys_db import SystemDatabase
|
|
23
|
+
from dbos._utils import generate_uuid
|
|
23
24
|
|
|
24
25
|
if TYPE_CHECKING:
|
|
25
26
|
from dbos._dbos import WorkflowHandle, WorkflowHandleAsync
|
|
@@ -42,7 +43,6 @@ from dbos._workflow_commands import (
|
|
|
42
43
|
fork_workflow,
|
|
43
44
|
get_workflow,
|
|
44
45
|
list_queued_workflows,
|
|
45
|
-
list_workflow_steps,
|
|
46
46
|
list_workflows,
|
|
47
47
|
)
|
|
48
48
|
|
|
@@ -85,8 +85,12 @@ class WorkflowHandleClientPolling(Generic[R]):
|
|
|
85
85
|
def get_workflow_id(self) -> str:
|
|
86
86
|
return self.workflow_id
|
|
87
87
|
|
|
88
|
-
def get_result(
|
|
89
|
-
|
|
88
|
+
def get_result(
|
|
89
|
+
self, *, polling_interval_sec: float = DEFAULT_POLLING_INTERVAL
|
|
90
|
+
) -> R:
|
|
91
|
+
res: R = self._sys_db.await_workflow_result(
|
|
92
|
+
self.workflow_id, polling_interval_sec
|
|
93
|
+
)
|
|
90
94
|
return res
|
|
91
95
|
|
|
92
96
|
def get_status(self) -> WorkflowStatus:
|
|
@@ -105,9 +109,11 @@ class WorkflowHandleClientAsyncPolling(Generic[R]):
|
|
|
105
109
|
def get_workflow_id(self) -> str:
|
|
106
110
|
return self.workflow_id
|
|
107
111
|
|
|
108
|
-
async def get_result(
|
|
112
|
+
async def get_result(
|
|
113
|
+
self, *, polling_interval_sec: float = DEFAULT_POLLING_INTERVAL
|
|
114
|
+
) -> R:
|
|
109
115
|
res: R = await asyncio.to_thread(
|
|
110
|
-
self._sys_db.await_workflow_result, self.workflow_id
|
|
116
|
+
self._sys_db.await_workflow_result, self.workflow_id, polling_interval_sec
|
|
111
117
|
)
|
|
112
118
|
return res
|
|
113
119
|
|
|
@@ -158,6 +164,7 @@ class DBOSClient:
|
|
|
158
164
|
engine=system_database_engine,
|
|
159
165
|
schema=dbos_system_schema,
|
|
160
166
|
serializer=serializer,
|
|
167
|
+
executor_id=None,
|
|
161
168
|
)
|
|
162
169
|
self._sys_db.check_connection()
|
|
163
170
|
if application_database_url:
|
|
@@ -187,7 +194,7 @@ class DBOSClient:
|
|
|
187
194
|
max_recovery_attempts = DEFAULT_MAX_RECOVERY_ATTEMPTS
|
|
188
195
|
workflow_id = options.get("workflow_id")
|
|
189
196
|
if workflow_id is None:
|
|
190
|
-
workflow_id =
|
|
197
|
+
workflow_id = generate_uuid()
|
|
191
198
|
workflow_timeout = options.get("workflow_timeout", None)
|
|
192
199
|
enqueue_options_internal: EnqueueOptionsInternal = {
|
|
193
200
|
"deduplication_id": options.get("deduplication_id"),
|
|
@@ -280,7 +287,7 @@ class DBOSClient:
|
|
|
280
287
|
topic: Optional[str] = None,
|
|
281
288
|
idempotency_key: Optional[str] = None,
|
|
282
289
|
) -> None:
|
|
283
|
-
idempotency_key = idempotency_key if idempotency_key else
|
|
290
|
+
idempotency_key = idempotency_key if idempotency_key else generate_uuid()
|
|
284
291
|
status: WorkflowStatusInternal = {
|
|
285
292
|
"workflow_uuid": f"{destination_id}-{idempotency_key}",
|
|
286
293
|
"status": WorkflowStatusString.SUCCESS.value,
|
|
@@ -471,7 +478,7 @@ class DBOSClient:
|
|
|
471
478
|
)
|
|
472
479
|
|
|
473
480
|
def list_workflow_steps(self, workflow_id: str) -> List[StepInfo]:
|
|
474
|
-
return
|
|
481
|
+
return self._sys_db.list_workflow_steps(workflow_id)
|
|
475
482
|
|
|
476
483
|
async def list_workflow_steps_async(self, workflow_id: str) -> List[StepInfo]:
|
|
477
484
|
return await asyncio.to_thread(self.list_workflow_steps, workflow_id)
|
|
@@ -485,7 +492,6 @@ class DBOSClient:
|
|
|
485
492
|
) -> "WorkflowHandle[Any]":
|
|
486
493
|
forked_workflow_id = fork_workflow(
|
|
487
494
|
self._sys_db,
|
|
488
|
-
self._app_db,
|
|
489
495
|
workflow_id,
|
|
490
496
|
start_step,
|
|
491
497
|
application_version=application_version,
|
|
@@ -502,7 +508,6 @@ class DBOSClient:
|
|
|
502
508
|
forked_workflow_id = await asyncio.to_thread(
|
|
503
509
|
fork_workflow,
|
|
504
510
|
self._sys_db,
|
|
505
|
-
self._app_db,
|
|
506
511
|
workflow_id,
|
|
507
512
|
start_step,
|
|
508
513
|
application_version=application_version,
|
dbos/_conductor/conductor.py
CHANGED
|
@@ -2,7 +2,6 @@ import socket
|
|
|
2
2
|
import threading
|
|
3
3
|
import time
|
|
4
4
|
import traceback
|
|
5
|
-
import uuid
|
|
6
5
|
from importlib.metadata import version
|
|
7
6
|
from typing import TYPE_CHECKING, Optional
|
|
8
7
|
|
|
@@ -11,13 +10,12 @@ from websockets.sync.client import connect
|
|
|
11
10
|
from websockets.sync.connection import Connection
|
|
12
11
|
|
|
13
12
|
from dbos._context import SetWorkflowID
|
|
14
|
-
from dbos._utils import GlobalParams
|
|
13
|
+
from dbos._utils import GlobalParams, generate_uuid
|
|
15
14
|
from dbos._workflow_commands import (
|
|
16
15
|
garbage_collect,
|
|
17
16
|
get_workflow,
|
|
18
17
|
global_timeout,
|
|
19
18
|
list_queued_workflows,
|
|
20
|
-
list_workflow_steps,
|
|
21
19
|
list_workflows,
|
|
22
20
|
)
|
|
23
21
|
|
|
@@ -118,6 +116,8 @@ class ConductorWebsocket(threading.Thread):
|
|
|
118
116
|
executor_id=GlobalParams.executor_id,
|
|
119
117
|
application_version=GlobalParams.app_version,
|
|
120
118
|
hostname=socket.gethostname(),
|
|
119
|
+
language="python",
|
|
120
|
+
dbos_version=GlobalParams.dbos_version,
|
|
121
121
|
)
|
|
122
122
|
websocket.send(info_response.to_json())
|
|
123
123
|
self.dbos.logger.info("Connected to DBOS conductor")
|
|
@@ -192,7 +192,7 @@ class ConductorWebsocket(threading.Thread):
|
|
|
192
192
|
fork_message = p.ForkWorkflowRequest.from_json(message)
|
|
193
193
|
new_workflow_id = fork_message.body["new_workflow_id"]
|
|
194
194
|
if new_workflow_id is None:
|
|
195
|
-
new_workflow_id =
|
|
195
|
+
new_workflow_id = generate_uuid()
|
|
196
196
|
workflow_id = fork_message.body["workflow_id"]
|
|
197
197
|
start_step = fork_message.body["start_step"]
|
|
198
198
|
app_version = fork_message.body["application_version"]
|
|
@@ -223,23 +223,21 @@ class ConductorWebsocket(threading.Thread):
|
|
|
223
223
|
body = list_workflows_message.body
|
|
224
224
|
infos = []
|
|
225
225
|
try:
|
|
226
|
-
load_input = body.get("load_input", False)
|
|
227
|
-
load_output = body.get("load_output", False)
|
|
228
226
|
infos = list_workflows(
|
|
229
227
|
self.dbos._sys_db,
|
|
230
|
-
workflow_ids=body
|
|
231
|
-
user=body
|
|
232
|
-
start_time=body
|
|
233
|
-
end_time=body
|
|
234
|
-
status=body
|
|
235
|
-
app_version=body
|
|
236
|
-
forked_from=body
|
|
237
|
-
name=body
|
|
238
|
-
limit=body
|
|
239
|
-
offset=body
|
|
240
|
-
sort_desc=body
|
|
241
|
-
load_input=load_input,
|
|
242
|
-
load_output=load_output,
|
|
228
|
+
workflow_ids=body.get("workflow_uuids", None),
|
|
229
|
+
user=body.get("authenticated_user", None),
|
|
230
|
+
start_time=body.get("start_time", None),
|
|
231
|
+
end_time=body.get("end_time", None),
|
|
232
|
+
status=body.get("status", None),
|
|
233
|
+
app_version=body.get("application_version", None),
|
|
234
|
+
forked_from=body.get("forked_from", None),
|
|
235
|
+
name=body.get("workflow_name", None),
|
|
236
|
+
limit=body.get("limit", None),
|
|
237
|
+
offset=body.get("offset", None),
|
|
238
|
+
sort_desc=body.get("sort_desc", False),
|
|
239
|
+
load_input=body.get("load_input", False),
|
|
240
|
+
load_output=body.get("load_output", False),
|
|
243
241
|
)
|
|
244
242
|
except Exception as e:
|
|
245
243
|
error_message = f"Exception encountered when listing workflows: {traceback.format_exc()}"
|
|
@@ -262,19 +260,18 @@ class ConductorWebsocket(threading.Thread):
|
|
|
262
260
|
q_body = list_queued_workflows_message.body
|
|
263
261
|
infos = []
|
|
264
262
|
try:
|
|
265
|
-
q_load_input = q_body.get("load_input", False)
|
|
266
263
|
infos = list_queued_workflows(
|
|
267
264
|
self.dbos._sys_db,
|
|
268
|
-
start_time=q_body
|
|
269
|
-
end_time=q_body
|
|
270
|
-
status=q_body
|
|
271
|
-
forked_from=q_body
|
|
272
|
-
name=q_body
|
|
273
|
-
limit=q_body
|
|
274
|
-
offset=q_body
|
|
275
|
-
queue_name=q_body
|
|
276
|
-
sort_desc=q_body
|
|
277
|
-
load_input=
|
|
265
|
+
start_time=q_body.get("start_time", None),
|
|
266
|
+
end_time=q_body.get("end_time", None),
|
|
267
|
+
status=q_body.get("status", None),
|
|
268
|
+
forked_from=q_body.get("forked_from", None),
|
|
269
|
+
name=q_body.get("workflow_name", None),
|
|
270
|
+
limit=q_body.get("limit", None),
|
|
271
|
+
offset=q_body.get("offset", None),
|
|
272
|
+
queue_name=q_body.get("queue_name", None),
|
|
273
|
+
sort_desc=q_body.get("sort_desc", False),
|
|
274
|
+
load_input=q_body.get("load_input", False),
|
|
278
275
|
)
|
|
279
276
|
except Exception as e:
|
|
280
277
|
error_message = f"Exception encountered when listing queued workflows: {traceback.format_exc()}"
|
|
@@ -343,10 +340,8 @@ class ConductorWebsocket(threading.Thread):
|
|
|
343
340
|
list_steps_message = p.ListStepsRequest.from_json(message)
|
|
344
341
|
step_info = None
|
|
345
342
|
try:
|
|
346
|
-
|
|
347
|
-
|
|
348
|
-
self.dbos._app_db,
|
|
349
|
-
list_steps_message.workflow_id,
|
|
343
|
+
self.dbos._sys_db.list_workflow_steps(
|
|
344
|
+
list_steps_message.workflow_id
|
|
350
345
|
)
|
|
351
346
|
except Exception as e:
|
|
352
347
|
error_message = f"Exception encountered when getting workflow {list_steps_message.workflow_id}: {traceback.format_exc()}"
|
|
@@ -401,6 +396,42 @@ class ConductorWebsocket(threading.Thread):
|
|
|
401
396
|
error_message=error_message,
|
|
402
397
|
)
|
|
403
398
|
websocket.send(retention_response.to_json())
|
|
399
|
+
elif msg_type == p.MessageType.GET_METRICS:
|
|
400
|
+
get_metrics_message = p.GetMetricsRequest.from_json(message)
|
|
401
|
+
self.dbos.logger.debug(
|
|
402
|
+
f"Received metrics request for time range {get_metrics_message.start_time} to {get_metrics_message.end_time}"
|
|
403
|
+
)
|
|
404
|
+
metrics_data = []
|
|
405
|
+
if (
|
|
406
|
+
get_metrics_message.metric_class
|
|
407
|
+
== "workflow_step_count"
|
|
408
|
+
):
|
|
409
|
+
try:
|
|
410
|
+
sys_metrics = self.dbos._sys_db.get_metrics(
|
|
411
|
+
get_metrics_message.start_time,
|
|
412
|
+
get_metrics_message.end_time,
|
|
413
|
+
)
|
|
414
|
+
metrics_data = [
|
|
415
|
+
p.MetricData(
|
|
416
|
+
metric_type=m["metric_type"],
|
|
417
|
+
metric_name=m["metric_name"],
|
|
418
|
+
value=m["value"],
|
|
419
|
+
)
|
|
420
|
+
for m in sys_metrics
|
|
421
|
+
]
|
|
422
|
+
except Exception as e:
|
|
423
|
+
error_message = f"Exception encountered when getting metrics: {traceback.format_exc()}"
|
|
424
|
+
self.dbos.logger.error(error_message)
|
|
425
|
+
else:
|
|
426
|
+
error_message = f"Unexpected metric class: {get_metrics_message.metric_class}"
|
|
427
|
+
self.dbos.logger.warning(error_message)
|
|
428
|
+
get_metrics_response = p.GetMetricsResponse(
|
|
429
|
+
type=p.MessageType.GET_METRICS,
|
|
430
|
+
request_id=base_message.request_id,
|
|
431
|
+
metrics=metrics_data,
|
|
432
|
+
error_message=error_message,
|
|
433
|
+
)
|
|
434
|
+
websocket.send(get_metrics_response.to_json())
|
|
404
435
|
else:
|
|
405
436
|
self.dbos.logger.warning(
|
|
406
437
|
f"Unexpected message type: {msg_type}"
|
dbos/_conductor/protocol.py
CHANGED
|
@@ -19,6 +19,7 @@ class MessageType(str, Enum):
|
|
|
19
19
|
LIST_STEPS = "list_steps"
|
|
20
20
|
FORK_WORKFLOW = "fork_workflow"
|
|
21
21
|
RETENTION = "retention"
|
|
22
|
+
GET_METRICS = "get_metrics"
|
|
22
23
|
|
|
23
24
|
|
|
24
25
|
T = TypeVar("T", bound="BaseMessage")
|
|
@@ -63,6 +64,8 @@ class ExecutorInfoResponse(BaseMessage):
|
|
|
63
64
|
executor_id: str
|
|
64
65
|
application_version: str
|
|
65
66
|
hostname: Optional[str]
|
|
67
|
+
language: Optional[str]
|
|
68
|
+
dbos_version: Optional[str]
|
|
66
69
|
error_message: Optional[str] = None
|
|
67
70
|
|
|
68
71
|
|
|
@@ -339,3 +342,23 @@ class RetentionRequest(BaseMessage):
|
|
|
339
342
|
class RetentionResponse(BaseMessage):
|
|
340
343
|
success: bool
|
|
341
344
|
error_message: Optional[str] = None
|
|
345
|
+
|
|
346
|
+
|
|
347
|
+
@dataclass
|
|
348
|
+
class GetMetricsRequest(BaseMessage):
|
|
349
|
+
start_time: str # ISO 8601
|
|
350
|
+
end_time: str # ISO 8601
|
|
351
|
+
metric_class: str
|
|
352
|
+
|
|
353
|
+
|
|
354
|
+
@dataclass
|
|
355
|
+
class MetricData:
|
|
356
|
+
metric_type: str
|
|
357
|
+
metric_name: str
|
|
358
|
+
value: int
|
|
359
|
+
|
|
360
|
+
|
|
361
|
+
@dataclass
|
|
362
|
+
class GetMetricsResponse(BaseMessage):
|
|
363
|
+
metrics: List[MetricData]
|
|
364
|
+
error_message: Optional[str] = None
|
dbos/_context.py
CHANGED
|
@@ -2,7 +2,6 @@ from __future__ import annotations
|
|
|
2
2
|
|
|
3
3
|
import json
|
|
4
4
|
import os
|
|
5
|
-
import uuid
|
|
6
5
|
from contextlib import AbstractContextManager
|
|
7
6
|
from contextvars import ContextVar
|
|
8
7
|
from dataclasses import dataclass
|
|
@@ -15,7 +14,7 @@ if TYPE_CHECKING:
|
|
|
15
14
|
|
|
16
15
|
from sqlalchemy.orm import Session
|
|
17
16
|
|
|
18
|
-
from dbos._utils import GlobalParams
|
|
17
|
+
from dbos._utils import GlobalParams, generate_uuid
|
|
19
18
|
|
|
20
19
|
from ._logger import dbos_logger
|
|
21
20
|
from ._tracer import dbos_tracer
|
|
@@ -151,7 +150,7 @@ class DBOSContext:
|
|
|
151
150
|
self.logger.warning(
|
|
152
151
|
f"Multiple workflows started in the same SetWorkflowID block. Only the first workflow is assigned the specified workflow ID; subsequent workflows will use a generated workflow ID."
|
|
153
152
|
)
|
|
154
|
-
wfid =
|
|
153
|
+
wfid = generate_uuid()
|
|
155
154
|
return wfid
|
|
156
155
|
|
|
157
156
|
def start_workflow(
|