dbos 0.26.0a15__py3-none-any.whl → 0.26.0a18__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- dbos/_client.py +5 -7
- dbos/_core.py +4 -10
- dbos/_dbos.py +15 -13
- dbos/_sys_db.py +68 -37
- dbos/_workflow_commands.py +2 -0
- {dbos-0.26.0a15.dist-info → dbos-0.26.0a18.dist-info}/METADATA +1 -1
- {dbos-0.26.0a15.dist-info → dbos-0.26.0a18.dist-info}/RECORD +10 -10
- {dbos-0.26.0a15.dist-info → dbos-0.26.0a18.dist-info}/WHEEL +0 -0
- {dbos-0.26.0a15.dist-info → dbos-0.26.0a18.dist-info}/entry_points.txt +0 -0
- {dbos-0.26.0a15.dist-info → dbos-0.26.0a18.dist-info}/licenses/LICENSE +0 -0
dbos/_client.py
CHANGED
@@ -124,12 +124,7 @@ class DBOSClient:
|
|
124
124
|
"kwargs": kwargs,
|
125
125
|
}
|
126
126
|
|
127
|
-
|
128
|
-
self._sys_db.update_workflow_inputs(
|
129
|
-
workflow_id, _serialization.serialize_args(inputs)
|
130
|
-
)
|
131
|
-
if wf_status == WorkflowStatusString.ENQUEUED.value:
|
132
|
-
self._sys_db.enqueue(workflow_id, queue_name)
|
127
|
+
self._sys_db.init_workflow(status, _serialization.serialize_args(inputs))
|
133
128
|
return workflow_id
|
134
129
|
|
135
130
|
def enqueue(
|
@@ -184,7 +179,8 @@ class DBOSClient:
|
|
184
179
|
"app_id": None,
|
185
180
|
"app_version": None,
|
186
181
|
}
|
187
|
-
self._sys_db.
|
182
|
+
with self._sys_db.engine.begin() as conn:
|
183
|
+
self._sys_db.insert_workflow_status(status, conn)
|
188
184
|
self._sys_db.send(status["workflow_uuid"], 0, destination_id, message, topic)
|
189
185
|
|
190
186
|
async def send_async(
|
@@ -233,6 +229,7 @@ class DBOSClient:
|
|
233
229
|
limit: Optional[int] = None,
|
234
230
|
offset: Optional[int] = None,
|
235
231
|
sort_desc: bool = False,
|
232
|
+
workflow_id_prefix: Optional[str] = None,
|
236
233
|
) -> List[WorkflowStatus]:
|
237
234
|
return list_workflows(
|
238
235
|
self._sys_db,
|
@@ -246,6 +243,7 @@ class DBOSClient:
|
|
246
243
|
limit=limit,
|
247
244
|
offset=offset,
|
248
245
|
sort_desc=sort_desc,
|
246
|
+
workflow_id_prefix=workflow_id_prefix,
|
249
247
|
)
|
250
248
|
|
251
249
|
async def list_workflows_async(
|
dbos/_core.py
CHANGED
@@ -280,18 +280,12 @@ def _init_workflow(
|
|
280
280
|
raise DBOSNonExistentWorkflowError(wfid)
|
281
281
|
wf_status = get_status_result["status"]
|
282
282
|
else:
|
283
|
-
|
284
|
-
|
285
|
-
|
286
|
-
|
283
|
+
wf_status = dbos._sys_db.init_workflow(
|
284
|
+
status,
|
285
|
+
_serialization.serialize_args(inputs),
|
286
|
+
max_recovery_attempts=max_recovery_attempts,
|
287
287
|
)
|
288
288
|
|
289
|
-
# TODO: Modify the inputs if they were changed by `update_workflow_inputs`
|
290
|
-
dbos._sys_db.update_workflow_inputs(wfid, _serialization.serialize_args(inputs))
|
291
|
-
|
292
|
-
if queue is not None and wf_status == WorkflowStatusString.ENQUEUED.value:
|
293
|
-
dbos._sys_db.enqueue(wfid, queue)
|
294
|
-
|
295
289
|
status["status"] = wf_status
|
296
290
|
return status
|
297
291
|
|
dbos/_dbos.py
CHANGED
@@ -363,13 +363,13 @@ class DBOS:
|
|
363
363
|
check_config_consistency(name=unvalidated_config["name"])
|
364
364
|
|
365
365
|
if unvalidated_config is not None:
|
366
|
-
self.
|
366
|
+
self._config: ConfigFile = process_config(data=unvalidated_config)
|
367
367
|
else:
|
368
368
|
raise ValueError("No valid configuration was loaded.")
|
369
369
|
|
370
|
-
set_env_vars(self.
|
371
|
-
config_logger(self.
|
372
|
-
dbos_tracer.config(self.
|
370
|
+
set_env_vars(self._config)
|
371
|
+
config_logger(self._config)
|
372
|
+
dbos_tracer.config(self._config)
|
373
373
|
dbos_logger.info("Initializing DBOS")
|
374
374
|
|
375
375
|
# If using FastAPI, set up middleware and lifecycle events
|
@@ -453,19 +453,19 @@ class DBOS:
|
|
453
453
|
self._executor_field = ThreadPoolExecutor(max_workers=64)
|
454
454
|
self._background_event_loop.start()
|
455
455
|
self._sys_db_field = SystemDatabase(
|
456
|
-
self.
|
456
|
+
self._config["database"], debug_mode=debug_mode
|
457
457
|
)
|
458
458
|
self._app_db_field = ApplicationDatabase(
|
459
|
-
self.
|
459
|
+
self._config["database"], debug_mode=debug_mode
|
460
460
|
)
|
461
461
|
|
462
462
|
if debug_mode:
|
463
463
|
return
|
464
464
|
|
465
|
-
admin_port = self.
|
465
|
+
admin_port = self._config.get("runtimeConfig", {}).get("admin_port")
|
466
466
|
if admin_port is None:
|
467
467
|
admin_port = 3001
|
468
|
-
run_admin_server = self.
|
468
|
+
run_admin_server = self._config.get("runtimeConfig", {}).get(
|
469
469
|
"run_admin_server"
|
470
470
|
)
|
471
471
|
if run_admin_server:
|
@@ -563,7 +563,7 @@ class DBOS:
|
|
563
563
|
assert (
|
564
564
|
not self._launched
|
565
565
|
), "The system database cannot be reset after DBOS is launched. Resetting the system database is a destructive operation that should only be used in a test environment."
|
566
|
-
reset_system_database(self.
|
566
|
+
reset_system_database(self._config)
|
567
567
|
|
568
568
|
def _destroy(self) -> None:
|
569
569
|
self._initialized = False
|
@@ -1014,6 +1014,7 @@ class DBOS:
|
|
1014
1014
|
limit: Optional[int] = None,
|
1015
1015
|
offset: Optional[int] = None,
|
1016
1016
|
sort_desc: bool = False,
|
1017
|
+
workflow_id_prefix: Optional[str] = None,
|
1017
1018
|
) -> List[WorkflowStatus]:
|
1018
1019
|
def fn() -> List[WorkflowStatus]:
|
1019
1020
|
return list_workflows(
|
@@ -1028,6 +1029,7 @@ class DBOS:
|
|
1028
1029
|
limit=limit,
|
1029
1030
|
offset=offset,
|
1030
1031
|
sort_desc=sort_desc,
|
1032
|
+
workflow_id_prefix=workflow_id_prefix,
|
1031
1033
|
)
|
1032
1034
|
|
1033
1035
|
return _get_dbos_instance()._sys_db.call_function_as_step(
|
@@ -1074,15 +1076,15 @@ class DBOS:
|
|
1074
1076
|
"""Return the DBOS `ConfigFile` for the current context."""
|
1075
1077
|
global _dbos_global_instance
|
1076
1078
|
if _dbos_global_instance is not None:
|
1077
|
-
return _dbos_global_instance.
|
1079
|
+
return _dbos_global_instance._config
|
1078
1080
|
reg = _get_or_create_dbos_registry()
|
1079
1081
|
if reg.config is not None:
|
1080
1082
|
return reg.config
|
1081
|
-
|
1083
|
+
loaded_config = (
|
1082
1084
|
load_config()
|
1083
1085
|
) # This will return the processed & validated config (with defaults)
|
1084
|
-
reg.config =
|
1085
|
-
return
|
1086
|
+
reg.config = loaded_config
|
1087
|
+
return loaded_config
|
1086
1088
|
|
1087
1089
|
@classproperty
|
1088
1090
|
def sql_session(cls) -> Session:
|
dbos/_sys_db.py
CHANGED
@@ -132,6 +132,9 @@ class GetWorkflowsInput:
|
|
132
132
|
self.sort_desc: bool = (
|
133
133
|
False # If true, sort by created_at in DESC order. Default false (in ASC order).
|
134
134
|
)
|
135
|
+
self.workflow_id_prefix: Optional[str] = (
|
136
|
+
None # If set, search for workflow IDs starting with this string
|
137
|
+
)
|
135
138
|
|
136
139
|
|
137
140
|
class GetQueuedWorkflowsInput(TypedDict):
|
@@ -282,6 +285,7 @@ class SystemDatabase:
|
|
282
285
|
def insert_workflow_status(
|
283
286
|
self,
|
284
287
|
status: WorkflowStatusInternal,
|
288
|
+
conn: sa.Connection,
|
285
289
|
*,
|
286
290
|
max_recovery_attempts: int = DEFAULT_MAX_RECOVERY_ATTEMPTS,
|
287
291
|
) -> WorkflowStatuses:
|
@@ -325,8 +329,7 @@ class SystemDatabase:
|
|
325
329
|
|
326
330
|
cmd = cmd.returning(SystemSchema.workflow_status.c.recovery_attempts, SystemSchema.workflow_status.c.status, SystemSchema.workflow_status.c.name, SystemSchema.workflow_status.c.class_name, SystemSchema.workflow_status.c.config_name, SystemSchema.workflow_status.c.queue_name) # type: ignore
|
327
331
|
|
328
|
-
|
329
|
-
results = c.execute(cmd)
|
332
|
+
results = conn.execute(cmd)
|
330
333
|
|
331
334
|
row = results.fetchone()
|
332
335
|
if row is not None:
|
@@ -352,28 +355,30 @@ class SystemDatabase:
|
|
352
355
|
# Every time we start executing a workflow (and thus attempt to insert its status), we increment `recovery_attempts` by 1.
|
353
356
|
# When this number becomes equal to `maxRetries + 1`, we mark the workflow as `RETRIES_EXCEEDED`.
|
354
357
|
if recovery_attempts > max_recovery_attempts + 1:
|
355
|
-
|
356
|
-
c.
|
357
|
-
|
358
|
-
|
359
|
-
|
360
|
-
|
358
|
+
delete_cmd = sa.delete(SystemSchema.workflow_queue).where(
|
359
|
+
SystemSchema.workflow_queue.c.workflow_uuid
|
360
|
+
== status["workflow_uuid"]
|
361
|
+
)
|
362
|
+
conn.execute(delete_cmd)
|
363
|
+
|
364
|
+
dlq_cmd = (
|
365
|
+
sa.update(SystemSchema.workflow_status)
|
366
|
+
.where(
|
367
|
+
SystemSchema.workflow_status.c.workflow_uuid
|
368
|
+
== status["workflow_uuid"]
|
361
369
|
)
|
362
|
-
|
363
|
-
|
364
|
-
.
|
365
|
-
|
366
|
-
|
367
|
-
|
368
|
-
|
369
|
-
SystemSchema.workflow_status.c.status
|
370
|
-
== WorkflowStatusString.PENDING.value
|
371
|
-
)
|
372
|
-
.values(
|
373
|
-
status=WorkflowStatusString.RETRIES_EXCEEDED.value,
|
374
|
-
queue_name=None,
|
375
|
-
)
|
370
|
+
.where(
|
371
|
+
SystemSchema.workflow_status.c.status
|
372
|
+
== WorkflowStatusString.PENDING.value
|
373
|
+
)
|
374
|
+
.values(
|
375
|
+
status=WorkflowStatusString.RETRIES_EXCEEDED.value,
|
376
|
+
queue_name=None,
|
376
377
|
)
|
378
|
+
)
|
379
|
+
conn.execute(dlq_cmd)
|
380
|
+
# Need to commit here because we're throwing an exception
|
381
|
+
conn.commit()
|
377
382
|
raise DBOSDeadLetterQueueError(
|
378
383
|
status["workflow_uuid"], max_recovery_attempts
|
379
384
|
)
|
@@ -652,7 +657,7 @@ class SystemDatabase:
|
|
652
657
|
time.sleep(1)
|
653
658
|
|
654
659
|
def update_workflow_inputs(
|
655
|
-
self, workflow_uuid: str, inputs: str, conn:
|
660
|
+
self, workflow_uuid: str, inputs: str, conn: sa.Connection
|
656
661
|
) -> None:
|
657
662
|
if self._debug_mode:
|
658
663
|
raise Exception("called update_workflow_inputs in debug mode")
|
@@ -669,11 +674,8 @@ class SystemDatabase:
|
|
669
674
|
)
|
670
675
|
.returning(SystemSchema.workflow_inputs.c.inputs)
|
671
676
|
)
|
672
|
-
|
673
|
-
|
674
|
-
else:
|
675
|
-
with self.engine.begin() as c:
|
676
|
-
row = c.execute(cmd).fetchone()
|
677
|
+
|
678
|
+
row = conn.execute(cmd).fetchone()
|
677
679
|
if row is not None and row[0] != inputs:
|
678
680
|
# In a distributed environment, scheduled workflows are enqueued multiple times with slightly different timestamps
|
679
681
|
if not workflow_uuid.startswith("sched-"):
|
@@ -734,6 +736,12 @@ class SystemDatabase:
|
|
734
736
|
query = query.where(
|
735
737
|
SystemSchema.workflow_status.c.workflow_uuid.in_(input.workflow_ids)
|
736
738
|
)
|
739
|
+
if input.workflow_id_prefix:
|
740
|
+
query = query.where(
|
741
|
+
SystemSchema.workflow_status.c.workflow_uuid.startswith(
|
742
|
+
input.workflow_id_prefix
|
743
|
+
)
|
744
|
+
)
|
737
745
|
if input.limit:
|
738
746
|
query = query.limit(input.limit)
|
739
747
|
if input.offset:
|
@@ -1380,18 +1388,17 @@ class SystemDatabase:
|
|
1380
1388
|
)
|
1381
1389
|
return value
|
1382
1390
|
|
1383
|
-
def enqueue(self, workflow_id: str, queue_name: str) -> None:
|
1391
|
+
def enqueue(self, workflow_id: str, queue_name: str, conn: sa.Connection) -> None:
|
1384
1392
|
if self._debug_mode:
|
1385
1393
|
raise Exception("called enqueue in debug mode")
|
1386
|
-
|
1387
|
-
|
1388
|
-
|
1389
|
-
|
1390
|
-
|
1391
|
-
queue_name=queue_name,
|
1392
|
-
)
|
1393
|
-
.on_conflict_do_nothing()
|
1394
|
+
conn.execute(
|
1395
|
+
pg.insert(SystemSchema.workflow_queue)
|
1396
|
+
.values(
|
1397
|
+
workflow_uuid=workflow_id,
|
1398
|
+
queue_name=queue_name,
|
1394
1399
|
)
|
1400
|
+
.on_conflict_do_nothing()
|
1401
|
+
)
|
1395
1402
|
|
1396
1403
|
def start_queued_workflows(
|
1397
1404
|
self, queue: "Queue", executor_id: str, app_version: str
|
@@ -1646,6 +1653,30 @@ class SystemDatabase:
|
|
1646
1653
|
)
|
1647
1654
|
return result
|
1648
1655
|
|
1656
|
+
def init_workflow(
|
1657
|
+
self,
|
1658
|
+
status: WorkflowStatusInternal,
|
1659
|
+
inputs: str,
|
1660
|
+
*,
|
1661
|
+
max_recovery_attempts: int = DEFAULT_MAX_RECOVERY_ATTEMPTS,
|
1662
|
+
) -> WorkflowStatuses:
|
1663
|
+
"""
|
1664
|
+
Synchronously record the status and inputs for workflows in a single transaction
|
1665
|
+
"""
|
1666
|
+
with self.engine.begin() as conn:
|
1667
|
+
wf_status = self.insert_workflow_status(
|
1668
|
+
status, conn, max_recovery_attempts=max_recovery_attempts
|
1669
|
+
)
|
1670
|
+
# TODO: Modify the inputs if they were changed by `update_workflow_inputs`
|
1671
|
+
self.update_workflow_inputs(status["workflow_uuid"], inputs, conn)
|
1672
|
+
|
1673
|
+
if (
|
1674
|
+
status["queue_name"] is not None
|
1675
|
+
and wf_status == WorkflowStatusString.ENQUEUED.value
|
1676
|
+
):
|
1677
|
+
self.enqueue(status["workflow_uuid"], status["queue_name"], conn)
|
1678
|
+
return wf_status
|
1679
|
+
|
1649
1680
|
|
1650
1681
|
def reset_system_database(config: ConfigFile) -> None:
|
1651
1682
|
sysdb_name = (
|
dbos/_workflow_commands.py
CHANGED
@@ -71,6 +71,7 @@ def list_workflows(
|
|
71
71
|
offset: Optional[int] = None,
|
72
72
|
sort_desc: bool = False,
|
73
73
|
request: bool = False,
|
74
|
+
workflow_id_prefix: Optional[str] = None,
|
74
75
|
) -> List[WorkflowStatus]:
|
75
76
|
input = GetWorkflowsInput()
|
76
77
|
input.workflow_ids = workflow_ids
|
@@ -83,6 +84,7 @@ def list_workflows(
|
|
83
84
|
input.name = name
|
84
85
|
input.offset = offset
|
85
86
|
input.sort_desc = sort_desc
|
87
|
+
input.workflow_id_prefix = workflow_id_prefix
|
86
88
|
|
87
89
|
output: GetWorkflowsOutput = sys_db.get_workflows(input)
|
88
90
|
infos: List[WorkflowStatus] = []
|
@@ -1,19 +1,19 @@
|
|
1
|
-
dbos-0.26.
|
2
|
-
dbos-0.26.
|
3
|
-
dbos-0.26.
|
4
|
-
dbos-0.26.
|
1
|
+
dbos-0.26.0a18.dist-info/METADATA,sha256=QG3XkqovR0FvEIL1_sHK6K80-comGqhpyC1LCWUDEzA,5554
|
2
|
+
dbos-0.26.0a18.dist-info/WHEEL,sha256=tSfRZzRHthuv7vxpI4aehrdN9scLjk-dCJkPLzkHxGg,90
|
3
|
+
dbos-0.26.0a18.dist-info/entry_points.txt,sha256=_QOQ3tVfEjtjBlr1jS4sHqHya9lI2aIEIWkz8dqYp14,58
|
4
|
+
dbos-0.26.0a18.dist-info/licenses/LICENSE,sha256=VGZit_a5-kdw9WT6fY5jxAWVwGQzgLFyPWrcVVUhVNU,1067
|
5
5
|
dbos/__init__.py,sha256=3NQfGlBiiUSM_v88STdVP3rNZvGkUL_9WbSotKb8Voo,873
|
6
6
|
dbos/__main__.py,sha256=G7Exn-MhGrVJVDbgNlpzhfh8WMX_72t3_oJaFT9Lmt8,653
|
7
7
|
dbos/_admin_server.py,sha256=RrbABfR1D3p9c_QLrCSrgFuYce6FKi0fjMRIYLjO_Y8,9038
|
8
8
|
dbos/_app_db.py,sha256=Q9lEyCJFoZMTlnjMO8Pj8bczVmVWyDOP8qPQ6l5PpEU,11241
|
9
9
|
dbos/_classproperty.py,sha256=f0X-_BySzn3yFDRKB2JpCbLYQ9tLwt1XftfshvY7CBs,626
|
10
|
-
dbos/_client.py,sha256=
|
10
|
+
dbos/_client.py,sha256=QiIR-mwRYb1ffgwGR96ICQgFORki2QpR5najtVJ2WsA,10906
|
11
11
|
dbos/_conductor/conductor.py,sha256=HYzVL29IMMrs2Mnms_7cHJynCnmmEN5SDQOMjzn3UoU,16840
|
12
12
|
dbos/_conductor/protocol.py,sha256=xN7pmooyF1pqbH1b6WhllU5718P7zSb_b0KCwA6bzcs,6716
|
13
13
|
dbos/_context.py,sha256=I8sLkdKTTkZEz7wG-MjynaQB6XEF2bLXuwNksiauP7w,19430
|
14
|
-
dbos/_core.py,sha256=
|
14
|
+
dbos/_core.py,sha256=uxDIJui4WS_2V1k2np0Ifue_IRzLTyq-c52bgZSQYn4,45118
|
15
15
|
dbos/_croniter.py,sha256=XHAyUyibs_59sJQfSNWkP7rqQY6_XrlfuuCxk4jYqek,47559
|
16
|
-
dbos/_dbos.py,sha256=
|
16
|
+
dbos/_dbos.py,sha256=jtvBQOvwdXFfknx9pDHgKC4DuiH58ICAs_0NoJQMI4w,47526
|
17
17
|
dbos/_dbos_config.py,sha256=m05IFjM0jSwZBsnFMF_4qP2JkjVFc0gqyM2tnotXq20,20636
|
18
18
|
dbos/_debug.py,sha256=MNlQVZ6TscGCRQeEEL0VE8Uignvr6dPeDDDefS3xgIE,1823
|
19
19
|
dbos/_docker_pg_helper.py,sha256=NmcgqmR5rQA_4igfeqh8ugNT2z3YmoOvuep_MEtxTiY,5854
|
@@ -45,7 +45,7 @@ dbos/_schemas/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
45
45
|
dbos/_schemas/application_database.py,sha256=SypAS9l9EsaBHFn9FR8jmnqt01M74d9AF1AMa4m2hhI,1040
|
46
46
|
dbos/_schemas/system_database.py,sha256=W9eSpL7SZzQkxcEZ4W07BOcwkkDr35b9oCjUOgfHWek,5336
|
47
47
|
dbos/_serialization.py,sha256=YCYv0qKAwAZ1djZisBC7khvKqG-5OcIv9t9EC5PFIog,1743
|
48
|
-
dbos/_sys_db.py,sha256=
|
48
|
+
dbos/_sys_db.py,sha256=kfNR9R7rQ6MTqBuPt4OI5nZElIJNXlGuUjG_ypGKHWI,71195
|
49
49
|
dbos/_templates/dbos-db-starter/README.md,sha256=GhxhBj42wjTt1fWEtwNriHbJuKb66Vzu89G4pxNHw2g,930
|
50
50
|
dbos/_templates/dbos-db-starter/__package/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
51
51
|
dbos/_templates/dbos-db-starter/__package/main.py,sha256=nJMN3ZD2lmwg4Dcgmiwqc-tQGuCJuJal2Xl85iA277U,2453
|
@@ -58,11 +58,11 @@ dbos/_templates/dbos-db-starter/migrations/versions/2024_07_31_180642_init.py,sh
|
|
58
58
|
dbos/_templates/dbos-db-starter/start_postgres_docker.py,sha256=lQVLlYO5YkhGPEgPqwGc7Y8uDKse9HsWv5fynJEFJHM,1681
|
59
59
|
dbos/_tracer.py,sha256=dFDSFlta-rfA3-ahIRLYwnnoAOmlavdxAGllqwFgnCA,2440
|
60
60
|
dbos/_utils.py,sha256=nFRUHzVjXG5AusF85AlYHikj63Tzi-kQm992ihsrAxA,201
|
61
|
-
dbos/_workflow_commands.py,sha256=
|
61
|
+
dbos/_workflow_commands.py,sha256=hHNcW4zopgxVXWfg3flHwqZEFGYpYp8ZAfUXmqiULUk,6261
|
62
62
|
dbos/cli/_github_init.py,sha256=Y_bDF9gfO2jB1id4FV5h1oIxEJRWyqVjhb7bNEa5nQ0,3224
|
63
63
|
dbos/cli/_template_init.py,sha256=-WW3kbq0W_Tq4WbMqb1UGJG3xvJb3woEY5VspG95Srk,2857
|
64
64
|
dbos/cli/cli.py,sha256=1qCTs__A9LOEfU44XZ6TufwmRwe68ZEwbWEPli3vnVM,17873
|
65
65
|
dbos/dbos-config.schema.json,sha256=i7jcxXqByKq0Jzv3nAUavONtj03vTwj6vWP4ylmBr8o,5694
|
66
66
|
dbos/py.typed,sha256=QfzXT1Ktfk3Rj84akygc7_42z0lRpCq0Ilh8OXI6Zas,44
|
67
67
|
version/__init__.py,sha256=L4sNxecRuqdtSFdpUGX3TtBi9KL3k7YsZVIvv-fv9-A,1678
|
68
|
-
dbos-0.26.
|
68
|
+
dbos-0.26.0a18.dist-info/RECORD,,
|
File without changes
|
File without changes
|
File without changes
|