dbos 0.20.0a9__py3-none-any.whl → 0.21.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of dbos might be problematic. Click here for more details.

dbos/_context.py CHANGED
@@ -63,7 +63,6 @@ class DBOSContext:
63
63
  self.parent_workflow_fid: int = -1
64
64
  self.workflow_id: str = ""
65
65
  self.function_id: int = -1
66
- self.in_recovery: bool = False
67
66
 
68
67
  self.curr_step_function_id: int = -1
69
68
  self.curr_tx_function_id: int = -1
@@ -82,7 +81,6 @@ class DBOSContext:
82
81
  rv.is_within_set_workflow_id_block = self.is_within_set_workflow_id_block
83
82
  rv.parent_workflow_id = self.workflow_id
84
83
  rv.parent_workflow_fid = self.function_id
85
- rv.in_recovery = self.in_recovery
86
84
  rv.authenticated_user = self.authenticated_user
87
85
  rv.authenticated_roles = (
88
86
  self.authenticated_roles[:]
@@ -335,34 +333,6 @@ class SetWorkflowID:
335
333
  return False # Did not handle
336
334
 
337
335
 
338
- class SetWorkflowRecovery:
339
- def __init__(self) -> None:
340
- self.created_ctx = False
341
-
342
- def __enter__(self) -> SetWorkflowRecovery:
343
- # Code to create a basic context
344
- ctx = get_local_dbos_context()
345
- if ctx is None:
346
- self.created_ctx = True
347
- _set_local_dbos_context(DBOSContext())
348
- assert_current_dbos_context().in_recovery = True
349
-
350
- return self
351
-
352
- def __exit__(
353
- self,
354
- exc_type: Optional[Type[BaseException]],
355
- exc_value: Optional[BaseException],
356
- traceback: Optional[TracebackType],
357
- ) -> Literal[False]:
358
- assert assert_current_dbos_context().in_recovery == True
359
- assert_current_dbos_context().in_recovery = False
360
- # Code to clean up the basic context if we created it
361
- if self.created_ctx:
362
- _clear_local_dbos_context()
363
- return False # Did not handle
364
-
365
-
366
336
  class EnterDBOSWorkflow(AbstractContextManager[DBOSContext, Literal[False]]):
367
337
  def __init__(self, attributes: TracedAttributes) -> None:
368
338
  self.created_ctx = False
dbos/_core.py CHANGED
@@ -185,8 +185,8 @@ def _init_workflow(
185
185
  # Synchronously record the status and inputs for workflows and single-step workflows
186
186
  # We also have to do this for single-step workflows because of the foreign key constraint on the operation outputs table
187
187
  # TODO: Make this transactional (and with the queue step below)
188
- wf_status = dbos._sys_db.update_workflow_status(
189
- status, False, ctx.in_recovery, max_recovery_attempts=max_recovery_attempts
188
+ wf_status = dbos._sys_db.insert_workflow_status(
189
+ status, max_recovery_attempts=max_recovery_attempts
190
190
  )
191
191
  # TODO: Modify the inputs if they were changed by `update_workflow_inputs`
192
192
  dbos._sys_db.update_workflow_inputs(wfid, _serialization.serialize_args(inputs))
dbos/_dbos.py CHANGED
@@ -56,7 +56,7 @@ from ._registrations import (
56
56
  )
57
57
  from ._roles import default_required_roles, required_roles
58
58
  from ._scheduler import ScheduledWorkflow, scheduled
59
- from ._sys_db import WorkflowStatusString, reset_system_database
59
+ from ._sys_db import reset_system_database
60
60
  from ._tracer import dbos_tracer
61
61
 
62
62
  if TYPE_CHECKING:
@@ -613,6 +613,7 @@ class DBOS:
613
613
  workflow_id=workflow_id,
614
614
  status=stat["status"],
615
615
  name=stat["name"],
616
+ executor_id=stat["executor_id"],
616
617
  recovery_attempts=stat["recovery_attempts"],
617
618
  class_name=stat["class_name"],
618
619
  config_name=stat["config_name"],
@@ -800,14 +801,13 @@ class DBOS:
800
801
  @classmethod
801
802
  def cancel_workflow(cls, workflow_id: str) -> None:
802
803
  """Cancel a workflow by ID."""
803
- _get_dbos_instance()._sys_db.set_workflow_status(
804
- workflow_id, WorkflowStatusString.CANCELLED, False
805
- )
804
+ _get_dbos_instance()._sys_db.cancel_workflow(workflow_id)
806
805
 
807
806
  @classmethod
808
- def resume_workflow(cls, workflow_id: str) -> None:
807
+ def resume_workflow(cls, workflow_id: str) -> WorkflowHandle[Any]:
809
808
  """Resume a workflow by ID."""
810
- execute_workflow_by_id(_get_dbos_instance(), workflow_id, False)
809
+ _get_dbos_instance()._sys_db.resume_workflow(workflow_id)
810
+ return execute_workflow_by_id(_get_dbos_instance(), workflow_id, False)
811
811
 
812
812
  @classproperty
813
813
  def logger(cls) -> Logger:
@@ -910,6 +910,7 @@ class WorkflowStatus:
910
910
  workflow_id(str): The ID of the workflow execution
911
911
  status(str): The status of the execution, from `WorkflowStatusString`
912
912
  name(str): The workflow function name
913
+ executor_id(str): The ID of the executor running the workflow
913
914
  class_name(str): For member functions, the name of the class containing the workflow function
914
915
  config_name(str): For instance member functions, the name of the class instance for the execution
915
916
  queue_name(str): For workflows that are or were queued, the queue name
@@ -923,6 +924,7 @@ class WorkflowStatus:
923
924
  workflow_id: str
924
925
  status: str
925
926
  name: str
927
+ executor_id: Optional[str]
926
928
  class_name: Optional[str]
927
929
  config_name: Optional[str]
928
930
  queue_name: Optional[str]
dbos/_dbos_config.py CHANGED
@@ -123,7 +123,10 @@ def get_dbos_database_url(config_file_path: str = DBOS_CONFIG_PATH) -> str:
123
123
 
124
124
 
125
125
  def load_config(
126
- config_file_path: str = DBOS_CONFIG_PATH, *, use_db_wizard: bool = True
126
+ config_file_path: str = DBOS_CONFIG_PATH,
127
+ *,
128
+ use_db_wizard: bool = True,
129
+ silent: bool = False,
127
130
  ) -> ConfigFile:
128
131
  """
129
132
  Load the DBOS `ConfigFile` from the specified path (typically `dbos-config.yaml`).
@@ -188,18 +191,19 @@ def load_config(
188
191
  # Load the DB connection file. Use its values for missing fields from dbos-config.yaml. Use defaults otherwise.
189
192
  data = cast(ConfigFile, data)
190
193
  db_connection = load_db_connection()
191
- if data["database"].get("hostname"):
192
- print(
193
- "[bold blue]Loading database connection parameters from dbos-config.yaml[/bold blue]"
194
- )
195
- elif db_connection.get("hostname"):
196
- print(
197
- "[bold blue]Loading database connection parameters from .dbos/db_connection[/bold blue]"
198
- )
199
- else:
200
- print(
201
- "[bold blue]Using default database connection parameters (localhost)[/bold blue]"
202
- )
194
+ if not silent:
195
+ if data["database"].get("hostname"):
196
+ print(
197
+ "[bold blue]Loading database connection parameters from dbos-config.yaml[/bold blue]"
198
+ )
199
+ elif db_connection.get("hostname"):
200
+ print(
201
+ "[bold blue]Loading database connection parameters from .dbos/db_connection[/bold blue]"
202
+ )
203
+ else:
204
+ print(
205
+ "[bold blue]Using default database connection parameters (localhost)[/bold blue]"
206
+ )
203
207
 
204
208
  data["database"]["hostname"] = (
205
209
  data["database"].get("hostname") or db_connection.get("hostname") or "localhost"
dbos/_recovery.py CHANGED
@@ -4,24 +4,31 @@ import time
4
4
  import traceback
5
5
  from typing import TYPE_CHECKING, Any, List
6
6
 
7
- from ._context import SetWorkflowRecovery
8
7
  from ._core import execute_workflow_by_id
9
8
  from ._error import DBOSWorkflowFunctionNotFoundError
9
+ from ._sys_db import GetPendingWorkflowsOutput
10
10
 
11
11
  if TYPE_CHECKING:
12
12
  from ._dbos import DBOS, WorkflowHandle
13
13
 
14
14
 
15
- def startup_recovery_thread(dbos: "DBOS", workflow_ids: List[str]) -> None:
15
+ def startup_recovery_thread(
16
+ dbos: "DBOS", pending_workflows: List[GetPendingWorkflowsOutput]
17
+ ) -> None:
16
18
  """Attempt to recover local pending workflows on startup using a background thread."""
17
19
  stop_event = threading.Event()
18
20
  dbos.stop_events.append(stop_event)
19
- while not stop_event.is_set() and len(workflow_ids) > 0:
21
+ while not stop_event.is_set() and len(pending_workflows) > 0:
20
22
  try:
21
- for workflowID in list(workflow_ids):
22
- with SetWorkflowRecovery():
23
- execute_workflow_by_id(dbos, workflowID)
24
- workflow_ids.remove(workflowID)
23
+ for pending_workflow in list(pending_workflows):
24
+ if (
25
+ pending_workflow.queue_name
26
+ and pending_workflow.queue_name != "_dbos_internal_queue"
27
+ ):
28
+ dbos._sys_db.clear_queue_assignment(pending_workflow.workflow_uuid)
29
+ continue
30
+ execute_workflow_by_id(dbos, pending_workflow.workflow_uuid)
31
+ pending_workflows.remove(pending_workflow)
25
32
  except DBOSWorkflowFunctionNotFoundError:
26
33
  time.sleep(1)
27
34
  except Exception as e:
@@ -41,13 +48,23 @@ def recover_pending_workflows(
41
48
  f"Skip local recovery because it's running in a VM: {os.environ.get('DBOS__VMID')}"
42
49
  )
43
50
  dbos.logger.debug(f"Recovering pending workflows for executor: {executor_id}")
44
- workflow_ids = dbos._sys_db.get_pending_workflows(executor_id)
45
- dbos.logger.debug(f"Pending workflows: {workflow_ids}")
46
-
47
- for workflowID in workflow_ids:
48
- with SetWorkflowRecovery():
49
- handle = execute_workflow_by_id(dbos, workflowID)
50
- workflow_handles.append(handle)
51
+ pending_workflows = dbos._sys_db.get_pending_workflows(executor_id)
52
+ for pending_workflow in pending_workflows:
53
+ if (
54
+ pending_workflow.queue_name
55
+ and pending_workflow.queue_name != "_dbos_internal_queue"
56
+ ):
57
+ try:
58
+ dbos._sys_db.clear_queue_assignment(pending_workflow.workflow_uuid)
59
+ workflow_handles.append(
60
+ dbos.retrieve_workflow(pending_workflow.workflow_uuid)
61
+ )
62
+ except Exception as e:
63
+ dbos.logger.error(e)
64
+ else:
65
+ workflow_handles.append(
66
+ execute_workflow_by_id(dbos, pending_workflow.workflow_uuid)
67
+ )
51
68
 
52
69
  dbos.logger.info("Recovered pending workflows")
53
70
  return workflow_handles
dbos/_sys_db.py CHANGED
@@ -126,11 +126,26 @@ class GetWorkflowsInput:
126
126
  )
127
127
 
128
128
 
129
+ class GetQueuedWorkflowsInput(TypedDict):
130
+ queue_name: Optional[str]
131
+ status: Optional[str]
132
+ start_time: Optional[str] # Timestamp in ISO 8601 format
133
+ end_time: Optional[str] # Timestamp in ISO 8601 format
134
+ limit: Optional[int] # Return up to this many workflows IDs.
135
+ name: Optional[str] # The name of the workflow function
136
+
137
+
129
138
  class GetWorkflowsOutput:
130
139
  def __init__(self, workflow_uuids: List[str]):
131
140
  self.workflow_uuids = workflow_uuids
132
141
 
133
142
 
143
+ class GetPendingWorkflowsOutput:
144
+ def __init__(self, *, workflow_uuid: str, queue_name: Optional[str] = None):
145
+ self.workflow_uuid: str = workflow_uuid
146
+ self.queue_name: Optional[str] = queue_name
147
+
148
+
134
149
  class WorkflowInformation(TypedDict, total=False):
135
150
  workflow_uuid: str
136
151
  status: WorkflowStatuses # The status of the workflow.
@@ -243,66 +258,50 @@ class SystemDatabase:
243
258
  dbos_logger.debug("Waiting for system buffers to be exported")
244
259
  time.sleep(1)
245
260
 
246
- def update_workflow_status(
261
+ def insert_workflow_status(
247
262
  self,
248
263
  status: WorkflowStatusInternal,
249
- replace: bool = True,
250
- in_recovery: bool = False,
251
264
  *,
252
- conn: Optional[sa.Connection] = None,
253
265
  max_recovery_attempts: int = DEFAULT_MAX_RECOVERY_ATTEMPTS,
254
266
  ) -> WorkflowStatuses:
255
267
  wf_status: WorkflowStatuses = status["status"]
256
268
 
257
- cmd = pg.insert(SystemSchema.workflow_status).values(
258
- workflow_uuid=status["workflow_uuid"],
259
- status=status["status"],
260
- name=status["name"],
261
- class_name=status["class_name"],
262
- config_name=status["config_name"],
263
- output=status["output"],
264
- error=status["error"],
265
- executor_id=status["executor_id"],
266
- application_version=status["app_version"],
267
- application_id=status["app_id"],
268
- request=status["request"],
269
- authenticated_user=status["authenticated_user"],
270
- authenticated_roles=status["authenticated_roles"],
271
- assumed_role=status["assumed_role"],
272
- queue_name=status["queue_name"],
273
- )
274
- if replace:
275
- cmd = cmd.on_conflict_do_update(
276
- index_elements=["workflow_uuid"],
277
- set_=dict(
278
- status=status["status"],
279
- output=status["output"],
280
- error=status["error"],
281
- ),
282
- )
283
- elif in_recovery:
284
- cmd = cmd.on_conflict_do_update(
285
- index_elements=["workflow_uuid"],
286
- set_=dict(
287
- recovery_attempts=SystemSchema.workflow_status.c.recovery_attempts
288
- + 1,
269
+ cmd = (
270
+ pg.insert(SystemSchema.workflow_status)
271
+ .values(
272
+ workflow_uuid=status["workflow_uuid"],
273
+ status=status["status"],
274
+ name=status["name"],
275
+ class_name=status["class_name"],
276
+ config_name=status["config_name"],
277
+ output=status["output"],
278
+ error=status["error"],
279
+ executor_id=status["executor_id"],
280
+ application_version=status["app_version"],
281
+ application_id=status["app_id"],
282
+ request=status["request"],
283
+ authenticated_user=status["authenticated_user"],
284
+ authenticated_roles=status["authenticated_roles"],
285
+ assumed_role=status["assumed_role"],
286
+ queue_name=status["queue_name"],
287
+ recovery_attempts=(
288
+ 1 if wf_status != WorkflowStatusString.ENQUEUED.value else 0
289
289
  ),
290
290
  )
291
- else:
292
- # A blank update so that we can return the existing status
293
- cmd = cmd.on_conflict_do_update(
291
+ .on_conflict_do_update(
294
292
  index_elements=["workflow_uuid"],
295
293
  set_=dict(
296
- recovery_attempts=SystemSchema.workflow_status.c.recovery_attempts
294
+ recovery_attempts=(
295
+ SystemSchema.workflow_status.c.recovery_attempts + 1
296
+ ),
297
297
  ),
298
298
  )
299
+ )
300
+
299
301
  cmd = cmd.returning(SystemSchema.workflow_status.c.recovery_attempts, SystemSchema.workflow_status.c.status, SystemSchema.workflow_status.c.name, SystemSchema.workflow_status.c.class_name, SystemSchema.workflow_status.c.config_name, SystemSchema.workflow_status.c.queue_name) # type: ignore
300
302
 
301
- if conn is not None:
302
- results = conn.execute(cmd)
303
- else:
304
- with self.engine.begin() as c:
305
- results = c.execute(cmd)
303
+ with self.engine.begin() as c:
304
+ results = c.execute(cmd)
306
305
 
307
306
  row = results.fetchone()
308
307
  if row is not None:
@@ -325,7 +324,9 @@ class SystemDatabase:
325
324
  if err_msg is not None:
326
325
  raise DBOSConflictingWorkflowError(status["workflow_uuid"], err_msg)
327
326
 
328
- if in_recovery and recovery_attempts > max_recovery_attempts:
327
+ # Every time we start executing a workflow (and thus attempt to insert its status), we increment `recovery_attempts` by 1.
328
+ # When this number becomes equal to `maxRetries + 1`, we mark the workflow as `RETRIES_EXCEEDED`.
329
+ if recovery_attempts > max_recovery_attempts + 1:
329
330
  with self.engine.begin() as c:
330
331
  c.execute(
331
332
  sa.delete(SystemSchema.workflow_queue).where(
@@ -352,38 +353,107 @@ class SystemDatabase:
352
353
  status["workflow_uuid"], max_recovery_attempts
353
354
  )
354
355
 
355
- # Record we have exported status for this single-transaction workflow
356
+ return wf_status
357
+
358
+ def update_workflow_status(
359
+ self,
360
+ status: WorkflowStatusInternal,
361
+ *,
362
+ conn: Optional[sa.Connection] = None,
363
+ ) -> None:
364
+ wf_status: WorkflowStatuses = status["status"]
365
+
366
+ cmd = (
367
+ pg.insert(SystemSchema.workflow_status)
368
+ .values(
369
+ workflow_uuid=status["workflow_uuid"],
370
+ status=status["status"],
371
+ name=status["name"],
372
+ class_name=status["class_name"],
373
+ config_name=status["config_name"],
374
+ output=status["output"],
375
+ error=status["error"],
376
+ executor_id=status["executor_id"],
377
+ application_version=status["app_version"],
378
+ application_id=status["app_id"],
379
+ request=status["request"],
380
+ authenticated_user=status["authenticated_user"],
381
+ authenticated_roles=status["authenticated_roles"],
382
+ assumed_role=status["assumed_role"],
383
+ queue_name=status["queue_name"],
384
+ recovery_attempts=(
385
+ 1 if wf_status != WorkflowStatusString.ENQUEUED.value else 0
386
+ ),
387
+ )
388
+ .on_conflict_do_update(
389
+ index_elements=["workflow_uuid"],
390
+ set_=dict(
391
+ status=status["status"],
392
+ output=status["output"],
393
+ error=status["error"],
394
+ ),
395
+ )
396
+ )
397
+
398
+ if conn is not None:
399
+ conn.execute(cmd)
400
+ else:
401
+ with self.engine.begin() as c:
402
+ c.execute(cmd)
403
+
404
+ # If this is a single-transaction workflow, record that its status has been exported
356
405
  if status["workflow_uuid"] in self._temp_txn_wf_ids:
357
406
  self._exported_temp_txn_wf_status.add(status["workflow_uuid"])
358
407
 
359
- return wf_status
360
-
361
- def set_workflow_status(
408
+ def cancel_workflow(
362
409
  self,
363
- workflow_uuid: str,
364
- status: WorkflowStatusString,
365
- reset_recovery_attempts: bool,
410
+ workflow_id: str,
366
411
  ) -> None:
367
412
  with self.engine.begin() as c:
368
- stmt = (
413
+ # Remove the workflow from the queues table so it does not block the table
414
+ c.execute(
415
+ sa.delete(SystemSchema.workflow_queue).where(
416
+ SystemSchema.workflow_queue.c.workflow_uuid == workflow_id
417
+ )
418
+ )
419
+ # Set the workflow's status to CANCELLED
420
+ c.execute(
369
421
  sa.update(SystemSchema.workflow_status)
370
- .where(SystemSchema.workflow_status.c.workflow_uuid == workflow_uuid)
422
+ .where(SystemSchema.workflow_status.c.workflow_uuid == workflow_id)
371
423
  .values(
372
- status=status,
424
+ status=WorkflowStatusString.CANCELLED.value,
373
425
  )
374
426
  )
375
- c.execute(stmt)
376
427
 
377
- if reset_recovery_attempts:
378
- with self.engine.begin() as c:
379
- stmt = (
380
- sa.update(SystemSchema.workflow_status)
381
- .where(
382
- SystemSchema.workflow_status.c.workflow_uuid == workflow_uuid
383
- )
384
- .values(recovery_attempts=reset_recovery_attempts)
428
+ def resume_workflow(
429
+ self,
430
+ workflow_id: str,
431
+ ) -> None:
432
+ with self.engine.begin() as c:
433
+ # Check the status of the workflow. If it is complete, do nothing.
434
+ row = c.execute(
435
+ sa.select(
436
+ SystemSchema.workflow_status.c.status,
437
+ ).where(SystemSchema.workflow_status.c.workflow_uuid == workflow_id)
438
+ ).fetchone()
439
+ if (
440
+ row is None
441
+ or row[0] == WorkflowStatusString.SUCCESS.value
442
+ or row[0] == WorkflowStatusString.ERROR.value
443
+ ):
444
+ return
445
+ # Remove the workflow from the queues table so resume can safely be called on an ENQUEUED workflow
446
+ c.execute(
447
+ sa.delete(SystemSchema.workflow_queue).where(
448
+ SystemSchema.workflow_queue.c.workflow_uuid == workflow_id
385
449
  )
386
- c.execute(stmt)
450
+ )
451
+ # Set the workflow's status to PENDING and clear its recovery attempts.
452
+ c.execute(
453
+ sa.update(SystemSchema.workflow_status)
454
+ .where(SystemSchema.workflow_status.c.workflow_uuid == workflow_id)
455
+ .values(status=WorkflowStatusString.PENDING.value, recovery_attempts=0)
456
+ )
387
457
 
388
458
  def get_workflow_status(
389
459
  self, workflow_uuid: str
@@ -401,6 +471,7 @@ class SystemDatabase:
401
471
  SystemSchema.workflow_status.c.authenticated_roles,
402
472
  SystemSchema.workflow_status.c.assumed_role,
403
473
  SystemSchema.workflow_status.c.queue_name,
474
+ SystemSchema.workflow_status.c.executor_id,
404
475
  ).where(SystemSchema.workflow_status.c.workflow_uuid == workflow_uuid)
405
476
  ).fetchone()
406
477
  if row is None:
@@ -415,7 +486,7 @@ class SystemDatabase:
415
486
  "error": None,
416
487
  "app_id": None,
417
488
  "app_version": None,
418
- "executor_id": None,
489
+ "executor_id": row[10],
419
490
  "request": row[2],
420
491
  "recovery_attempts": row[3],
421
492
  "authenticated_user": row[6],
@@ -601,9 +672,8 @@ class SystemDatabase:
601
672
 
602
673
  def get_workflows(self, input: GetWorkflowsInput) -> GetWorkflowsOutput:
603
674
  query = sa.select(SystemSchema.workflow_status.c.workflow_uuid).order_by(
604
- SystemSchema.workflow_status.c.created_at.desc()
675
+ SystemSchema.workflow_status.c.created_at.asc()
605
676
  )
606
-
607
677
  if input.name:
608
678
  query = query.where(SystemSchema.workflow_status.c.name == input.name)
609
679
  if input.authenticated_user:
@@ -637,16 +707,73 @@ class SystemDatabase:
637
707
 
638
708
  return GetWorkflowsOutput(workflow_uuids)
639
709
 
640
- def get_pending_workflows(self, executor_id: str) -> list[str]:
710
+ def get_queued_workflows(
711
+ self, input: GetQueuedWorkflowsInput
712
+ ) -> GetWorkflowsOutput:
713
+
714
+ query = (
715
+ sa.select(SystemSchema.workflow_queue.c.workflow_uuid)
716
+ .join(
717
+ SystemSchema.workflow_status,
718
+ SystemSchema.workflow_queue.c.workflow_uuid
719
+ == SystemSchema.workflow_status.c.workflow_uuid,
720
+ )
721
+ .order_by(SystemSchema.workflow_status.c.created_at.asc())
722
+ )
723
+
724
+ if input.get("name"):
725
+ query = query.where(SystemSchema.workflow_status.c.name == input["name"])
726
+
727
+ if input.get("queue_name"):
728
+ query = query.where(
729
+ SystemSchema.workflow_queue.c.queue_name == input["queue_name"]
730
+ )
731
+
732
+ if input.get("status"):
733
+ query = query.where(
734
+ SystemSchema.workflow_status.c.status == input["status"]
735
+ )
736
+ if "start_time" in input and input["start_time"] is not None:
737
+ query = query.where(
738
+ SystemSchema.workflow_status.c.created_at
739
+ >= datetime.datetime.fromisoformat(input["start_time"]).timestamp()
740
+ * 1000
741
+ )
742
+ if "end_time" in input and input["end_time"] is not None:
743
+ query = query.where(
744
+ SystemSchema.workflow_status.c.created_at
745
+ <= datetime.datetime.fromisoformat(input["end_time"]).timestamp() * 1000
746
+ )
747
+ if input.get("limit"):
748
+ query = query.limit(input["limit"])
749
+
750
+ with self.engine.begin() as c:
751
+ rows = c.execute(query)
752
+ workflow_uuids = [row[0] for row in rows]
753
+
754
+ return GetWorkflowsOutput(workflow_uuids)
755
+
756
+ def get_pending_workflows(
757
+ self, executor_id: str
758
+ ) -> list[GetPendingWorkflowsOutput]:
641
759
  with self.engine.begin() as c:
642
760
  rows = c.execute(
643
- sa.select(SystemSchema.workflow_status.c.workflow_uuid).where(
761
+ sa.select(
762
+ SystemSchema.workflow_status.c.workflow_uuid,
763
+ SystemSchema.workflow_status.c.queue_name,
764
+ ).where(
644
765
  SystemSchema.workflow_status.c.status
645
766
  == WorkflowStatusString.PENDING.value,
646
767
  SystemSchema.workflow_status.c.executor_id == executor_id,
647
768
  )
648
769
  ).fetchall()
649
- return [row[0] for row in rows]
770
+ return [
771
+ GetPendingWorkflowsOutput(
772
+ workflow_uuid=row.workflow_uuid,
773
+ queue_name=row.queue_name,
774
+ )
775
+ for row in rows
776
+ ]
650
777
 
651
778
  def record_operation_result(
652
779
  self, result: OperationResultInternal, conn: Optional[sa.Connection] = None
@@ -1266,6 +1393,19 @@ class SystemDatabase:
1266
1393
  .values(completed_at_epoch_ms=int(time.time() * 1000))
1267
1394
  )
1268
1395
 
1396
+ def clear_queue_assignment(self, workflow_id: str) -> None:
1397
+ with self.engine.begin() as c:
1398
+ c.execute(
1399
+ sa.update(SystemSchema.workflow_queue)
1400
+ .where(SystemSchema.workflow_queue.c.workflow_uuid == workflow_id)
1401
+ .values(executor_id=None, started_at_epoch_ms=None)
1402
+ )
1403
+ c.execute(
1404
+ sa.update(SystemSchema.workflow_status)
1405
+ .where(SystemSchema.workflow_status.c.workflow_uuid == workflow_id)
1406
+ .values(executor_id=None, status=WorkflowStatusString.ENQUEUED.value)
1407
+ )
1408
+
1269
1409
 
1270
1410
  def reset_system_database(config: ConfigFile) -> None:
1271
1411
  sysdb_name = (
@@ -1,23 +1,15 @@
1
- import importlib
2
- import os
3
- import sys
4
- from typing import Any, List, Optional, cast
1
+ from typing import List, Optional, cast
5
2
 
6
3
  import typer
7
- from rich import print
8
4
 
9
- from dbos import DBOS
10
-
11
- from . import _serialization, load_config
12
- from ._core import execute_workflow_by_id
13
- from ._dbos_config import ConfigFile, _is_valid_app_name
5
+ from . import _serialization
6
+ from ._dbos_config import ConfigFile
14
7
  from ._sys_db import (
8
+ GetQueuedWorkflowsInput,
15
9
  GetWorkflowsInput,
16
10
  GetWorkflowsOutput,
17
11
  SystemDatabase,
18
12
  WorkflowStatuses,
19
- WorkflowStatusInternal,
20
- WorkflowStatusString,
21
13
  )
22
14
 
23
15
 
@@ -28,8 +20,8 @@ class WorkflowInformation:
28
20
  workflowClassName: Optional[str]
29
21
  workflowConfigName: Optional[str]
30
22
  input: Optional[_serialization.WorkflowInputs] # JSON (jsonpickle)
31
- output: Optional[str] # JSON (jsonpickle)
32
- error: Optional[str] # JSON (jsonpickle)
23
+ output: Optional[str] = None # JSON (jsonpickle)
24
+ error: Optional[str] = None # JSON (jsonpickle)
33
25
  executor_id: Optional[str]
34
26
  app_version: Optional[str]
35
27
  app_id: Optional[str]
@@ -41,19 +33,17 @@ class WorkflowInformation:
41
33
  queue_name: Optional[str]
42
34
 
43
35
 
44
- def _list_workflows(
36
+ def list_workflows(
45
37
  config: ConfigFile,
46
- li: int,
38
+ limit: int,
47
39
  user: Optional[str],
48
40
  starttime: Optional[str],
49
41
  endtime: Optional[str],
50
42
  status: Optional[str],
51
43
  request: bool,
52
44
  appversion: Optional[str],
45
+ name: Optional[str],
53
46
  ) -> List[WorkflowInformation]:
54
-
55
- sys_db = None
56
-
57
47
  try:
58
48
  sys_db = SystemDatabase(config)
59
49
 
@@ -64,24 +54,55 @@ def _list_workflows(
64
54
  if status is not None:
65
55
  input.status = cast(WorkflowStatuses, status)
66
56
  input.application_version = appversion
67
- input.limit = li
57
+ input.limit = limit
58
+ input.name = name
68
59
 
69
60
  output: GetWorkflowsOutput = sys_db.get_workflows(input)
70
-
71
61
  infos: List[WorkflowInformation] = []
62
+ for workflow_id in output.workflow_uuids:
63
+ info = _get_workflow_info(
64
+ sys_db, workflow_id, request
65
+ ) # Call the method for each ID
66
+ if info is not None:
67
+ infos.append(info)
68
+
69
+ return infos
70
+ except Exception as e:
71
+ typer.echo(f"Error listing workflows: {e}")
72
+ return []
73
+ finally:
74
+ if sys_db:
75
+ sys_db.destroy()
72
76
 
73
- if output.workflow_uuids is None:
74
- typer.echo("No workflows found")
75
- return {}
76
77
 
78
+ def list_queued_workflows(
79
+ config: ConfigFile,
80
+ limit: Optional[int] = None,
81
+ start_time: Optional[str] = None,
82
+ end_time: Optional[str] = None,
83
+ queue_name: Optional[str] = None,
84
+ status: Optional[str] = None,
85
+ name: Optional[str] = None,
86
+ request: bool = False,
87
+ ) -> List[WorkflowInformation]:
88
+ try:
89
+ sys_db = SystemDatabase(config)
90
+ input: GetQueuedWorkflowsInput = {
91
+ "queue_name": queue_name,
92
+ "start_time": start_time,
93
+ "end_time": end_time,
94
+ "status": status,
95
+ "limit": limit,
96
+ "name": name,
97
+ }
98
+ output: GetWorkflowsOutput = sys_db.get_queued_workflows(input)
99
+ infos: List[WorkflowInformation] = []
77
100
  for workflow_id in output.workflow_uuids:
78
101
  info = _get_workflow_info(
79
102
  sys_db, workflow_id, request
80
103
  ) # Call the method for each ID
81
-
82
104
  if info is not None:
83
105
  infos.append(info)
84
-
85
106
  return infos
86
107
  except Exception as e:
87
108
  typer.echo(f"Error listing workflows: {e}")
@@ -91,17 +112,13 @@ def _list_workflows(
91
112
  sys_db.destroy()
92
113
 
93
114
 
94
- def _get_workflow(
115
+ def get_workflow(
95
116
  config: ConfigFile, uuid: str, request: bool
96
117
  ) -> Optional[WorkflowInformation]:
97
- sys_db = None
98
-
99
118
  try:
100
119
  sys_db = SystemDatabase(config)
101
-
102
120
  info = _get_workflow_info(sys_db, uuid, request)
103
121
  return info
104
-
105
122
  except Exception as e:
106
123
  typer.echo(f"Error getting workflow: {e}")
107
124
  return None
@@ -110,18 +127,13 @@ def _get_workflow(
110
127
  sys_db.destroy()
111
128
 
112
129
 
113
- def _cancel_workflow(config: ConfigFile, uuid: str) -> None:
114
- # config = load_config()
115
- sys_db = None
116
-
130
+ def cancel_workflow(config: ConfigFile, uuid: str) -> None:
117
131
  try:
118
132
  sys_db = SystemDatabase(config)
119
- sys_db.set_workflow_status(uuid, WorkflowStatusString.CANCELLED, False)
120
- return
121
-
133
+ sys_db.cancel_workflow(uuid)
122
134
  except Exception as e:
123
135
  typer.echo(f"Failed to connect to DBOS system database: {e}")
124
- return None
136
+ raise e
125
137
  finally:
126
138
  if sys_db:
127
139
  sys_db.destroy()
dbos/cli/cli.py CHANGED
@@ -19,14 +19,21 @@ from .. import load_config
19
19
  from .._app_db import ApplicationDatabase
20
20
  from .._dbos_config import _is_valid_app_name
21
21
  from .._sys_db import SystemDatabase, reset_system_database
22
- from .._workflow_commands import _cancel_workflow, _get_workflow, _list_workflows
22
+ from .._workflow_commands import (
23
+ cancel_workflow,
24
+ get_workflow,
25
+ list_queued_workflows,
26
+ list_workflows,
27
+ )
23
28
  from ..cli._github_init import create_template_from_github
24
29
  from ._template_init import copy_template, get_project_name, get_templates_directory
25
30
 
26
31
  app = typer.Typer()
27
32
  workflow = typer.Typer()
33
+ queue = typer.Typer()
28
34
 
29
35
  app.add_typer(workflow, name="workflow", help="Manage DBOS workflows")
36
+ workflow.add_typer(queue, name="queue", help="Manage enqueued workflows")
30
37
 
31
38
 
32
39
  def _on_windows() -> bool:
@@ -272,18 +279,22 @@ def list(
272
279
  help="Retrieve workflows with this application version",
273
280
  ),
274
281
  ] = None,
282
+ name: Annotated[
283
+ typing.Optional[str],
284
+ typer.Option(
285
+ "--name",
286
+ "-n",
287
+ help="Retrieve workflows with this name",
288
+ ),
289
+ ] = None,
275
290
  request: Annotated[
276
291
  bool,
277
292
  typer.Option("--request", help="Retrieve workflow request information"),
278
293
  ] = True,
279
- appdir: Annotated[
280
- typing.Optional[str],
281
- typer.Option("--app-dir", "-d", help="Specify the application root directory"),
282
- ] = None,
283
294
  ) -> None:
284
- config = load_config()
285
- workflows = _list_workflows(
286
- config, limit, user, starttime, endtime, status, request, appversion
295
+ config = load_config(silent=True)
296
+ workflows = list_workflows(
297
+ config, limit, user, starttime, endtime, status, request, appversion, name
287
298
  )
288
299
  print(jsonpickle.encode(workflows, unpicklable=False))
289
300
 
@@ -291,17 +302,13 @@ def list(
291
302
  @workflow.command(help="Retrieve the status of a workflow")
292
303
  def get(
293
304
  uuid: Annotated[str, typer.Argument()],
294
- appdir: Annotated[
295
- typing.Optional[str],
296
- typer.Option("--app-dir", "-d", help="Specify the application root directory"),
297
- ] = None,
298
305
  request: Annotated[
299
306
  bool,
300
307
  typer.Option("--request", help="Retrieve workflow request information"),
301
308
  ] = True,
302
309
  ) -> None:
303
- config = load_config()
304
- print(jsonpickle.encode(_get_workflow(config, uuid, request), unpicklable=False))
310
+ config = load_config(silent=True)
311
+ print(jsonpickle.encode(get_workflow(config, uuid, request), unpicklable=False))
305
312
 
306
313
 
307
314
  @workflow.command(
@@ -309,13 +316,9 @@ def get(
309
316
  )
310
317
  def cancel(
311
318
  uuid: Annotated[str, typer.Argument()],
312
- appdir: Annotated[
313
- typing.Optional[str],
314
- typer.Option("--app-dir", "-d", help="Specify the application root directory"),
315
- ] = None,
316
319
  ) -> None:
317
320
  config = load_config()
318
- _cancel_workflow(config, uuid)
321
+ cancel_workflow(config, uuid)
319
322
  print(f"Workflow {uuid} has been cancelled")
320
323
 
321
324
 
@@ -363,5 +366,70 @@ def restart(
363
366
  print(f"Failed to resume workflow {uuid}. Status code: {response.status_code}")
364
367
 
365
368
 
369
+ @queue.command(name="list", help="List enqueued functions for your application")
370
+ def list_queue(
371
+ limit: Annotated[
372
+ typing.Optional[int],
373
+ typer.Option("--limit", "-l", help="Limit the results returned"),
374
+ ] = None,
375
+ start_time: Annotated[
376
+ typing.Optional[str],
377
+ typer.Option(
378
+ "--start-time",
379
+ "-s",
380
+ help="Retrieve functions starting after this timestamp (ISO 8601 format)",
381
+ ),
382
+ ] = None,
383
+ end_time: Annotated[
384
+ typing.Optional[str],
385
+ typer.Option(
386
+ "--end-time",
387
+ "-e",
388
+ help="Retrieve functions starting before this timestamp (ISO 8601 format)",
389
+ ),
390
+ ] = None,
391
+ status: Annotated[
392
+ typing.Optional[str],
393
+ typer.Option(
394
+ "--status",
395
+ "-S",
396
+ help="Retrieve functions with this status (PENDING, SUCCESS, ERROR, RETRIES_EXCEEDED, ENQUEUED, or CANCELLED)",
397
+ ),
398
+ ] = None,
399
+ queue_name: Annotated[
400
+ typing.Optional[str],
401
+ typer.Option(
402
+ "--queue-name",
403
+ "-q",
404
+ help="Retrieve functions on this queue",
405
+ ),
406
+ ] = None,
407
+ name: Annotated[
408
+ typing.Optional[str],
409
+ typer.Option(
410
+ "--name",
411
+ "-n",
412
+ help="Retrieve functions on this queue",
413
+ ),
414
+ ] = None,
415
+ request: Annotated[
416
+ bool,
417
+ typer.Option("--request", help="Retrieve workflow request information"),
418
+ ] = True,
419
+ ) -> None:
420
+ config = load_config(silent=True)
421
+ workflows = list_queued_workflows(
422
+ config=config,
423
+ limit=limit,
424
+ start_time=start_time,
425
+ end_time=end_time,
426
+ queue_name=queue_name,
427
+ status=status,
428
+ request=request,
429
+ name=name,
430
+ )
431
+ print(jsonpickle.encode(workflows, unpicklable=False))
432
+
433
+
366
434
  if __name__ == "__main__":
367
435
  app()
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: dbos
3
- Version: 0.20.0a9
3
+ Version: 0.21.0
4
4
  Summary: Ultra-lightweight durable execution in Python
5
5
  Author-Email: "DBOS, Inc." <contact@dbos.dev>
6
6
  License: MIT
@@ -1,7 +1,7 @@
1
- dbos-0.20.0a9.dist-info/METADATA,sha256=NX9gLPLU9KG0-Rt5dpCc6Em3XznKk6yJ3kN1nO7pG9I,5309
2
- dbos-0.20.0a9.dist-info/WHEEL,sha256=thaaA2w1JzcGC48WYufAs8nrYZjJm8LqNfnXFOFyCC4,90
3
- dbos-0.20.0a9.dist-info/entry_points.txt,sha256=_QOQ3tVfEjtjBlr1jS4sHqHya9lI2aIEIWkz8dqYp14,58
4
- dbos-0.20.0a9.dist-info/licenses/LICENSE,sha256=VGZit_a5-kdw9WT6fY5jxAWVwGQzgLFyPWrcVVUhVNU,1067
1
+ dbos-0.21.0.dist-info/METADATA,sha256=OPag79R2v9m9xnlTj2uV_DMrewS31-fiacqR38HzUH4,5307
2
+ dbos-0.21.0.dist-info/WHEEL,sha256=thaaA2w1JzcGC48WYufAs8nrYZjJm8LqNfnXFOFyCC4,90
3
+ dbos-0.21.0.dist-info/entry_points.txt,sha256=_QOQ3tVfEjtjBlr1jS4sHqHya9lI2aIEIWkz8dqYp14,58
4
+ dbos-0.21.0.dist-info/licenses/LICENSE,sha256=VGZit_a5-kdw9WT6fY5jxAWVwGQzgLFyPWrcVVUhVNU,1067
5
5
  dbos/__init__.py,sha256=CxRHBHEthPL4PZoLbZhp3rdm44-KkRTT2-7DkK9d4QQ,724
6
6
  dbos/_admin_server.py,sha256=PJgneZG9-64TapZrPeJtt73puAswRImCE5uce2k2PKU,4750
7
7
  dbos/_app_db.py,sha256=_tv2vmPjjiaikwgxH3mqxgJ4nUUcG2-0uMXKWCqVu1c,5509
@@ -9,12 +9,12 @@ dbos/_classproperty.py,sha256=f0X-_BySzn3yFDRKB2JpCbLYQ9tLwt1XftfshvY7CBs,626
9
9
  dbos/_cloudutils/authentication.py,sha256=V0fCWQN9stCkhbuuxgPTGpvuQcDqfU3KAxPAh01vKW4,5007
10
10
  dbos/_cloudutils/cloudutils.py,sha256=5e3CW1deSW-dI5G3QN0XbiVsBhyqT8wu7fuV2f8wtGU,7688
11
11
  dbos/_cloudutils/databases.py,sha256=x4187Djsyoa-QaG3Kog8JT2_GERsnqa93LIVanmVUmg,8393
12
- dbos/_context.py,sha256=RH08s_nee95vgxdz6AsYuVWF1LuJSVtOyIifblsa4pw,18760
13
- dbos/_core.py,sha256=-2oh2-NicMJBwTwrd2EQBQm4Vu0caozFeoS9Kj47DzM,36588
12
+ dbos/_context.py,sha256=FHB_fpE4fQt4fIJvAmMMsbY4xHwH77gsW01cFsRZjsE,17779
13
+ dbos/_core.py,sha256=nGiXyYgV8H5TRRZG0e8HCd5IZimufYQLmKNr7nBbwbo,36564
14
14
  dbos/_croniter.py,sha256=hbhgfsHBqclUS8VeLnJ9PSE9Z54z6mi4nnrr1aUXn0k,47561
15
15
  dbos/_db_wizard.py,sha256=xgKLna0_6Xi50F3o8msRosXba8NScHlpJR5ICVCkHDQ,7534
16
- dbos/_dbos.py,sha256=1PG142hzPBFguAbuBXaKS-YwzRdaIUW8087JCi78RmU,36193
17
- dbos/_dbos_config.py,sha256=h_q1gzudhsAMVkGMD0qQ6kLic6YhdJgzm50YFSIx9Bo,8196
16
+ dbos/_dbos.py,sha256=wAjdlUgDSIC_Q8D_GZYDoiKaxjtr6KNHeq6DDuUh9do,36340
17
+ dbos/_dbos_config.py,sha256=DfiqVVxNqnafkocSzLqBp1Ig5vCviDTDK_GO3zTtQqI,8298
18
18
  dbos/_error.py,sha256=vtaSsG0QW6cRlwfZ4zzZWy_IHCZlomwSlrDyGWuyn8c,4337
19
19
  dbos/_fastapi.py,sha256=ke03vqsSYDnO6XeOtOVFXj0-f-v1MGsOxa9McaROvNc,3616
20
20
  dbos/_flask.py,sha256=DZKUZR5-xOzPI7tYZ53r2PvvHVoAb8SYwLzMVFsVfjI,2608
@@ -32,7 +32,7 @@ dbos/_migrations/versions/d76646551a6c_workflow_queue.py,sha256=G942nophZ2uC2vc4
32
32
  dbos/_migrations/versions/eab0cc1d9a14_job_queue.py,sha256=uvhFOtqbBreCePhAxZfIT0qCAI7BiZTou9wt6QnbY7c,1412
33
33
  dbos/_outcome.py,sha256=FDMgWVjZ06vm9xO-38H17mTqBImUYQxgKs_bDCSIAhE,6648
34
34
  dbos/_queue.py,sha256=o_aczwualJTMoXb0XXL-Y5QH77OEukWzuerogbWi2ho,2779
35
- dbos/_recovery.py,sha256=jbzGYxICA2drzyzlBSy2UiXhKV_16tBVacKQdTkqf-w,2008
35
+ dbos/_recovery.py,sha256=rek9rm2CaENbbl_vu3To-BdXop7tMEyGvtoNiJLVxjQ,2772
36
36
  dbos/_registrations.py,sha256=mei6q6_3R5uei8i_Wo_TqGZs85s10shOekDX41sFYD0,6642
37
37
  dbos/_request.py,sha256=cX1B3Atlh160phgS35gF1VEEV4pD126c9F3BDgBmxZU,929
38
38
  dbos/_roles.py,sha256=iOsgmIAf1XVzxs3gYWdGRe1B880YfOw5fpU7Jwx8_A8,2271
@@ -41,7 +41,7 @@ dbos/_schemas/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
41
41
  dbos/_schemas/application_database.py,sha256=KeyoPrF7hy_ODXV7QNike_VFSD74QBRfQ76D7QyE9HI,966
42
42
  dbos/_schemas/system_database.py,sha256=rwp4EvCSaXcUoMaRczZCvETCxGp72k3-hvLyGUDkih0,5163
43
43
  dbos/_serialization.py,sha256=YCYv0qKAwAZ1djZisBC7khvKqG-5OcIv9t9EC5PFIog,1743
44
- dbos/_sys_db.py,sha256=eXFXzmw_bq5Qp3s2_OzjkQKQj9HxMbP4AyJ2VQnJ08g,53786
44
+ dbos/_sys_db.py,sha256=U5rXoS2gA4vm8YT6Rja_YyP2EXWLlo1HqDka1tnpRjk,59460
45
45
  dbos/_templates/dbos-db-starter/README.md,sha256=GhxhBj42wjTt1fWEtwNriHbJuKb66Vzu89G4pxNHw2g,930
46
46
  dbos/_templates/dbos-db-starter/__package/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
47
47
  dbos/_templates/dbos-db-starter/__package/main.py,sha256=eI0SS9Nwj-fldtiuSzIlIG6dC91GXXwdRsoHxv6S_WI,2719
@@ -53,11 +53,11 @@ dbos/_templates/dbos-db-starter/migrations/script.py.mako,sha256=MEqL-2qATlST9TA
53
53
  dbos/_templates/dbos-db-starter/migrations/versions/2024_07_31_180642_init.py,sha256=U5thFWGqNN4QLrNXT7wUUqftIFDNE5eSdqD8JNW1mec,942
54
54
  dbos/_templates/dbos-db-starter/start_postgres_docker.py,sha256=lQVLlYO5YkhGPEgPqwGc7Y8uDKse9HsWv5fynJEFJHM,1681
55
55
  dbos/_tracer.py,sha256=rvBY1RQU6DO7rL7EnaJJxGcmd4tP_PpGqUEE6imZnhY,2518
56
- dbos/_workflow_commands.py,sha256=tj-gJARjDJ5aYo0ii2udTAU4l36vbeXwmOYh8Q4y_ac,4625
56
+ dbos/_workflow_commands.py,sha256=gAynfrq5sAMhdNpMIphiAm_hC2-xk1ZyWEYA-whtfPs,5402
57
57
  dbos/cli/_github_init.py,sha256=Y_bDF9gfO2jB1id4FV5h1oIxEJRWyqVjhb7bNEa5nQ0,3224
58
58
  dbos/cli/_template_init.py,sha256=AfuMaO8bmr9WsPNHr6j2cp7kjVVZDUpH7KpbTg0hhFs,2722
59
- dbos/cli/cli.py,sha256=07TXdfDhImEOjB2-yhWJc1CK07_CSF-xF7TYCtB1kRY,12410
59
+ dbos/cli/cli.py,sha256=_tXw2IQrWW7fV_h51f_R99vEBSi6aMLz-vCOxKaENiQ,14155
60
60
  dbos/dbos-config.schema.json,sha256=X5TpXNcARGceX0zQs0fVgtZW_Xj9uBbY5afPt9Rz9yk,5741
61
61
  dbos/py.typed,sha256=QfzXT1Ktfk3Rj84akygc7_42z0lRpCq0Ilh8OXI6Zas,44
62
62
  version/__init__.py,sha256=L4sNxecRuqdtSFdpUGX3TtBi9KL3k7YsZVIvv-fv9-A,1678
63
- dbos-0.20.0a9.dist-info/RECORD,,
63
+ dbos-0.21.0.dist-info/RECORD,,
File without changes