lsst-ctrl-mpexec 29.2025.4800__tar.gz → 30.0.0rc1__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (70) hide show
  1. {lsst_ctrl_mpexec-29.2025.4800/python/lsst_ctrl_mpexec.egg-info → lsst_ctrl_mpexec-30.0.0rc1}/PKG-INFO +1 -1
  2. {lsst_ctrl_mpexec-29.2025.4800 → lsst_ctrl_mpexec-30.0.0rc1}/python/lsst/ctrl/mpexec/cli/butler_factory.py +30 -87
  3. {lsst_ctrl_mpexec-29.2025.4800 → lsst_ctrl_mpexec-30.0.0rc1}/python/lsst/ctrl/mpexec/cli/script/build.py +5 -1
  4. {lsst_ctrl_mpexec-29.2025.4800 → lsst_ctrl_mpexec-30.0.0rc1}/python/lsst/ctrl/mpexec/cli/script/cleanup.py +22 -23
  5. {lsst_ctrl_mpexec-29.2025.4800 → lsst_ctrl_mpexec-30.0.0rc1}/python/lsst/ctrl/mpexec/cli/script/pre_exec_init_qbb.py +4 -4
  6. {lsst_ctrl_mpexec-29.2025.4800 → lsst_ctrl_mpexec-30.0.0rc1}/python/lsst/ctrl/mpexec/cli/script/purge.py +20 -22
  7. {lsst_ctrl_mpexec-29.2025.4800 → lsst_ctrl_mpexec-30.0.0rc1}/python/lsst/ctrl/mpexec/cli/script/qgraph.py +100 -92
  8. {lsst_ctrl_mpexec-29.2025.4800 → lsst_ctrl_mpexec-30.0.0rc1}/python/lsst/ctrl/mpexec/cli/script/report.py +57 -57
  9. {lsst_ctrl_mpexec-29.2025.4800 → lsst_ctrl_mpexec-30.0.0rc1}/python/lsst/ctrl/mpexec/cli/script/run.py +63 -64
  10. {lsst_ctrl_mpexec-29.2025.4800 → lsst_ctrl_mpexec-30.0.0rc1}/python/lsst/ctrl/mpexec/showInfo.py +17 -15
  11. {lsst_ctrl_mpexec-29.2025.4800 → lsst_ctrl_mpexec-30.0.0rc1}/python/lsst/ctrl/mpexec/singleQuantumExecutor.py +10 -2
  12. lsst_ctrl_mpexec-30.0.0rc1/python/lsst/ctrl/mpexec/version.py +2 -0
  13. {lsst_ctrl_mpexec-29.2025.4800 → lsst_ctrl_mpexec-30.0.0rc1/python/lsst_ctrl_mpexec.egg-info}/PKG-INFO +1 -1
  14. {lsst_ctrl_mpexec-29.2025.4800 → lsst_ctrl_mpexec-30.0.0rc1}/tests/test_cliCmdCleanup.py +3 -2
  15. {lsst_ctrl_mpexec-29.2025.4800 → lsst_ctrl_mpexec-30.0.0rc1}/tests/test_cliCmdPurge.py +1 -0
  16. {lsst_ctrl_mpexec-29.2025.4800 → lsst_ctrl_mpexec-30.0.0rc1}/tests/test_cliCmdReport.py +2 -0
  17. {lsst_ctrl_mpexec-29.2025.4800 → lsst_ctrl_mpexec-30.0.0rc1}/tests/test_cliCmdUpdateGraphRun.py +1 -0
  18. {lsst_ctrl_mpexec-29.2025.4800 → lsst_ctrl_mpexec-30.0.0rc1}/tests/test_preExecInit.py +6 -0
  19. {lsst_ctrl_mpexec-29.2025.4800 → lsst_ctrl_mpexec-30.0.0rc1}/tests/test_run.py +4 -1
  20. lsst_ctrl_mpexec-29.2025.4800/python/lsst/ctrl/mpexec/version.py +0 -2
  21. {lsst_ctrl_mpexec-29.2025.4800 → lsst_ctrl_mpexec-30.0.0rc1}/COPYRIGHT +0 -0
  22. {lsst_ctrl_mpexec-29.2025.4800 → lsst_ctrl_mpexec-30.0.0rc1}/LICENSE +0 -0
  23. {lsst_ctrl_mpexec-29.2025.4800 → lsst_ctrl_mpexec-30.0.0rc1}/MANIFEST.in +0 -0
  24. {lsst_ctrl_mpexec-29.2025.4800 → lsst_ctrl_mpexec-30.0.0rc1}/README.rst +0 -0
  25. {lsst_ctrl_mpexec-29.2025.4800 → lsst_ctrl_mpexec-30.0.0rc1}/bsd_license.txt +0 -0
  26. {lsst_ctrl_mpexec-29.2025.4800 → lsst_ctrl_mpexec-30.0.0rc1}/doc/lsst.ctrl.mpexec/CHANGES.rst +0 -0
  27. {lsst_ctrl_mpexec-29.2025.4800 → lsst_ctrl_mpexec-30.0.0rc1}/doc/lsst.ctrl.mpexec/configuring-pipetask-tasks.rst +0 -0
  28. {lsst_ctrl_mpexec-29.2025.4800 → lsst_ctrl_mpexec-30.0.0rc1}/doc/lsst.ctrl.mpexec/index.rst +0 -0
  29. {lsst_ctrl_mpexec-29.2025.4800 → lsst_ctrl_mpexec-30.0.0rc1}/doc/lsst.ctrl.mpexec/pipetask.rst +0 -0
  30. {lsst_ctrl_mpexec-29.2025.4800 → lsst_ctrl_mpexec-30.0.0rc1}/gpl-v3.0.txt +0 -0
  31. {lsst_ctrl_mpexec-29.2025.4800 → lsst_ctrl_mpexec-30.0.0rc1}/pyproject.toml +0 -0
  32. {lsst_ctrl_mpexec-29.2025.4800 → lsst_ctrl_mpexec-30.0.0rc1}/python/lsst/__init__.py +0 -0
  33. {lsst_ctrl_mpexec-29.2025.4800 → lsst_ctrl_mpexec-30.0.0rc1}/python/lsst/ctrl/__init__.py +0 -0
  34. {lsst_ctrl_mpexec-29.2025.4800 → lsst_ctrl_mpexec-30.0.0rc1}/python/lsst/ctrl/mpexec/__init__.py +0 -0
  35. {lsst_ctrl_mpexec-29.2025.4800 → lsst_ctrl_mpexec-30.0.0rc1}/python/lsst/ctrl/mpexec/_pipeline_graph_factory.py +0 -0
  36. {lsst_ctrl_mpexec-29.2025.4800 → lsst_ctrl_mpexec-30.0.0rc1}/python/lsst/ctrl/mpexec/cli/__init__.py +0 -0
  37. {lsst_ctrl_mpexec-29.2025.4800 → lsst_ctrl_mpexec-30.0.0rc1}/python/lsst/ctrl/mpexec/cli/cmd/__init__.py +0 -0
  38. {lsst_ctrl_mpexec-29.2025.4800 → lsst_ctrl_mpexec-30.0.0rc1}/python/lsst/ctrl/mpexec/cli/cmd/commands.py +0 -0
  39. {lsst_ctrl_mpexec-29.2025.4800 → lsst_ctrl_mpexec-30.0.0rc1}/python/lsst/ctrl/mpexec/cli/opt/__init__.py +0 -0
  40. {lsst_ctrl_mpexec-29.2025.4800 → lsst_ctrl_mpexec-30.0.0rc1}/python/lsst/ctrl/mpexec/cli/opt/arguments.py +0 -0
  41. {lsst_ctrl_mpexec-29.2025.4800 → lsst_ctrl_mpexec-30.0.0rc1}/python/lsst/ctrl/mpexec/cli/opt/optionGroups.py +0 -0
  42. {lsst_ctrl_mpexec-29.2025.4800 → lsst_ctrl_mpexec-30.0.0rc1}/python/lsst/ctrl/mpexec/cli/opt/options.py +0 -0
  43. {lsst_ctrl_mpexec-29.2025.4800 → lsst_ctrl_mpexec-30.0.0rc1}/python/lsst/ctrl/mpexec/cli/pipetask.py +0 -0
  44. {lsst_ctrl_mpexec-29.2025.4800 → lsst_ctrl_mpexec-30.0.0rc1}/python/lsst/ctrl/mpexec/cli/script/__init__.py +0 -0
  45. {lsst_ctrl_mpexec-29.2025.4800 → lsst_ctrl_mpexec-30.0.0rc1}/python/lsst/ctrl/mpexec/cli/script/confirmable.py +0 -0
  46. {lsst_ctrl_mpexec-29.2025.4800 → lsst_ctrl_mpexec-30.0.0rc1}/python/lsst/ctrl/mpexec/cli/script/run_qbb.py +0 -0
  47. {lsst_ctrl_mpexec-29.2025.4800 → lsst_ctrl_mpexec-30.0.0rc1}/python/lsst/ctrl/mpexec/cli/script/update_graph_run.py +0 -0
  48. {lsst_ctrl_mpexec-29.2025.4800 → lsst_ctrl_mpexec-30.0.0rc1}/python/lsst/ctrl/mpexec/cli/utils.py +0 -0
  49. {lsst_ctrl_mpexec-29.2025.4800 → lsst_ctrl_mpexec-30.0.0rc1}/python/lsst/ctrl/mpexec/execFixupDataId.py +0 -0
  50. {lsst_ctrl_mpexec-29.2025.4800 → lsst_ctrl_mpexec-30.0.0rc1}/python/lsst/ctrl/mpexec/executionGraphFixup.py +0 -0
  51. {lsst_ctrl_mpexec-29.2025.4800 → lsst_ctrl_mpexec-30.0.0rc1}/python/lsst/ctrl/mpexec/log_capture.py +0 -0
  52. {lsst_ctrl_mpexec-29.2025.4800 → lsst_ctrl_mpexec-30.0.0rc1}/python/lsst/ctrl/mpexec/mpGraphExecutor.py +0 -0
  53. {lsst_ctrl_mpexec-29.2025.4800 → lsst_ctrl_mpexec-30.0.0rc1}/python/lsst/ctrl/mpexec/preExecInit.py +0 -0
  54. {lsst_ctrl_mpexec-29.2025.4800 → lsst_ctrl_mpexec-30.0.0rc1}/python/lsst/ctrl/mpexec/py.typed +0 -0
  55. {lsst_ctrl_mpexec-29.2025.4800 → lsst_ctrl_mpexec-30.0.0rc1}/python/lsst/ctrl/mpexec/quantumGraphExecutor.py +0 -0
  56. {lsst_ctrl_mpexec-29.2025.4800 → lsst_ctrl_mpexec-30.0.0rc1}/python/lsst/ctrl/mpexec/reports.py +0 -0
  57. {lsst_ctrl_mpexec-29.2025.4800 → lsst_ctrl_mpexec-30.0.0rc1}/python/lsst/ctrl/mpexec/separablePipelineExecutor.py +0 -0
  58. {lsst_ctrl_mpexec-29.2025.4800 → lsst_ctrl_mpexec-30.0.0rc1}/python/lsst/ctrl/mpexec/simple_pipeline_executor.py +0 -0
  59. {lsst_ctrl_mpexec-29.2025.4800 → lsst_ctrl_mpexec-30.0.0rc1}/python/lsst/ctrl/mpexec/taskFactory.py +0 -0
  60. {lsst_ctrl_mpexec-29.2025.4800 → lsst_ctrl_mpexec-30.0.0rc1}/python/lsst/ctrl/mpexec/util.py +0 -0
  61. {lsst_ctrl_mpexec-29.2025.4800 → lsst_ctrl_mpexec-30.0.0rc1}/python/lsst_ctrl_mpexec.egg-info/SOURCES.txt +0 -0
  62. {lsst_ctrl_mpexec-29.2025.4800 → lsst_ctrl_mpexec-30.0.0rc1}/python/lsst_ctrl_mpexec.egg-info/dependency_links.txt +0 -0
  63. {lsst_ctrl_mpexec-29.2025.4800 → lsst_ctrl_mpexec-30.0.0rc1}/python/lsst_ctrl_mpexec.egg-info/entry_points.txt +0 -0
  64. {lsst_ctrl_mpexec-29.2025.4800 → lsst_ctrl_mpexec-30.0.0rc1}/python/lsst_ctrl_mpexec.egg-info/requires.txt +0 -0
  65. {lsst_ctrl_mpexec-29.2025.4800 → lsst_ctrl_mpexec-30.0.0rc1}/python/lsst_ctrl_mpexec.egg-info/top_level.txt +0 -0
  66. {lsst_ctrl_mpexec-29.2025.4800 → lsst_ctrl_mpexec-30.0.0rc1}/python/lsst_ctrl_mpexec.egg-info/zip-safe +0 -0
  67. {lsst_ctrl_mpexec-29.2025.4800 → lsst_ctrl_mpexec-30.0.0rc1}/setup.cfg +0 -0
  68. {lsst_ctrl_mpexec-29.2025.4800 → lsst_ctrl_mpexec-30.0.0rc1}/tests/test_build.py +0 -0
  69. {lsst_ctrl_mpexec-29.2025.4800 → lsst_ctrl_mpexec-30.0.0rc1}/tests/test_cliUtils.py +0 -0
  70. {lsst_ctrl_mpexec-29.2025.4800 → lsst_ctrl_mpexec-30.0.0rc1}/tests/test_qgraph.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: lsst-ctrl-mpexec
3
- Version: 29.2025.4800
3
+ Version: 30.0.0rc1
4
4
  Summary: Pipeline execution infrastructure for the Rubin Observatory LSST Science Pipelines.
5
5
  Author-email: Rubin Observatory Data Management <dm-admin@lists.lsst.org>
6
6
  License-Expression: BSD-3-Clause OR GPL-3.0-or-later
@@ -311,96 +311,39 @@ class ButlerFactory:
311
311
  of ``args``.
312
312
  """
313
313
  butler = Butler.from_config(butler_config, writeable=False)
314
- self = cls(
315
- butler,
316
- output=output,
317
- output_run=output_run,
318
- inputs=inputs,
319
- extend_run=extend_run,
320
- rebase=rebase,
321
- writeable=False,
322
- )
323
- self.check(extend_run=extend_run, replace_run=replace_run, prune_replaced=prune_replaced)
324
- if self.output and self.output.exists:
325
- if replace_run:
326
- replaced = self.output.chain[0]
327
- inputs = list(self.output.chain[1:])
328
- _LOG.debug(
329
- "Simulating collection search in '%s' after removing '%s'.", self.output.name, replaced
330
- )
314
+ try:
315
+ self = cls(
316
+ butler,
317
+ output=output,
318
+ output_run=output_run,
319
+ inputs=inputs,
320
+ extend_run=extend_run,
321
+ rebase=rebase,
322
+ writeable=False,
323
+ )
324
+ self.check(extend_run=extend_run, replace_run=replace_run, prune_replaced=prune_replaced)
325
+ if self.output and self.output.exists:
326
+ if replace_run:
327
+ replaced = self.output.chain[0]
328
+ inputs = list(self.output.chain[1:])
329
+ _LOG.debug(
330
+ "Simulating collection search in '%s' after removing '%s'.",
331
+ self.output.name,
332
+ replaced,
333
+ )
334
+ else:
335
+ inputs = [self.output.name]
331
336
  else:
332
- inputs = [self.output.name]
333
- else:
334
- inputs = list(self.inputs)
335
- if extend_run:
336
- assert self.output_run is not None, "Output collection has to be specified."
337
- inputs.insert(0, self.output_run.name)
338
- collSearch = CollectionWildcard.from_expression(inputs).require_ordered()
337
+ inputs = list(self.inputs)
338
+ if extend_run:
339
+ assert self.output_run is not None, "Output collection has to be specified."
340
+ inputs.insert(0, self.output_run.name)
341
+ collSearch = CollectionWildcard.from_expression(inputs).require_ordered()
342
+ except Exception:
343
+ butler.close()
344
+ raise
339
345
  return butler, collSearch, self
340
346
 
341
- @classmethod
342
- def make_read_butler(
343
- cls,
344
- butler_config: ResourcePathExpression,
345
- *,
346
- output: str | None,
347
- output_run: str | None,
348
- inputs: str | Iterable[str],
349
- extend_run: bool = False,
350
- rebase: bool = False,
351
- replace_run: bool,
352
- prune_replaced: str | None = None,
353
- ) -> Butler:
354
- """Construct a read-only butler according to the given command-line
355
- arguments.
356
-
357
- Parameters
358
- ----------
359
- butler_config : convertible to `lsst.resources.ResourcePath`
360
- Path to configuration for the butler.
361
- output : `str` or `None`
362
- The name of a `~lsst.daf.butler.CollectionType.CHAINED`
363
- input/output collection.
364
- output_run : `str` or `None`
365
- The name of a `~lsst.daf.butler.CollectionType.RUN` input/output
366
- collection.
367
- inputs : `str` or `~collections.abc.Iterable` [`str`]
368
- Input collection name or iterable of collection names.
369
- extend_run : `bool`
370
- A boolean indicating whether ``output_run`` should already exist
371
- and be extended.
372
- rebase : `bool`
373
- A boolean indicating whether to force the ``output`` collection to
374
- be consistent with ``inputs`` and ``output`` run such that the
375
- ``output`` collection has output run collections first (i.e. those
376
- that start with the same prefix), then the new inputs, then any
377
- original inputs not included in the new inputs.
378
- replace_run : `bool`
379
- Whether the ``output_run`` should be replaced in the ``output``
380
- chain.
381
- prune_replaced : `str` or `None`
382
- If ``replace_run=True``, whether/how datasets in the old run should
383
- be removed. Options are ``"purge"``, ``"unstore"``, and `None`.
384
-
385
- Returns
386
- -------
387
- butler : `lsst.daf.butler.Butler`
388
- A read-only butler initialized with the given collections.
389
- """
390
- cls.define_datastore_cache() # Ensure that this butler can use a shared cache.
391
- butler, inputs, _ = cls._make_read_parts(
392
- butler_config,
393
- output=output,
394
- output_run=output_run,
395
- inputs=inputs,
396
- extend_run=extend_run,
397
- rebase=rebase,
398
- replace_run=replace_run,
399
- prune_replaced=prune_replaced,
400
- )
401
- _LOG.debug("Preparing butler to read from %s.", inputs)
402
- return Butler.from_config(butler=butler, collections=inputs)
403
-
404
347
  @classmethod
405
348
  def make_butler_and_collections(
406
349
  cls,
@@ -138,7 +138,11 @@ def build(
138
138
  if butler_config:
139
139
  butler = Butler.from_config(butler_config, writeable=False)
140
140
 
141
- pipeline_graph_factory = PipelineGraphFactory(pipeline, butler, select_tasks)
141
+ try:
142
+ pipeline_graph_factory = PipelineGraphFactory(pipeline, butler, select_tasks)
143
+ finally:
144
+ if butler is not None:
145
+ butler.close()
142
146
 
143
147
  if pipeline_dot:
144
148
  with open(pipeline_dot, "w") as stream:
@@ -95,8 +95,7 @@ class CleanupResult(ConfirmableResult):
95
95
  return msg
96
96
 
97
97
  def on_confirmation(self) -> None:
98
- butler = Butler.from_config(self.butler_config, writeable=True)
99
- with butler.transaction():
98
+ with Butler.from_config(self.butler_config, writeable=True) as butler, butler.transaction():
100
99
  for collection in self.others_to_remove:
101
100
  butler.registry.removeCollection(collection)
102
101
  butler.removeRuns(self.runs_to_remove)
@@ -128,25 +127,25 @@ def cleanup(
128
127
  collection : str
129
128
  The name of the chained collection.
130
129
  """
131
- butler = Butler.from_config(butler_config)
132
- result = CleanupResult(butler_config)
133
- try:
134
- to_keep = set(butler.registry.getCollectionChain(collection))
135
- except MissingCollectionError:
136
- result.failure = NoSuchCollectionFailure(collection)
137
- return result
138
- except CollectionTypeError:
139
- result.failure = NotChainedCollectionFailure(
140
- collection, butler.registry.getCollectionType(collection).name
141
- )
142
- return result
143
- to_keep.add(collection)
144
- glob = collection + "*"
145
- to_consider = set(butler.registry.queryCollections(glob))
146
- to_remove = to_consider - to_keep
147
- for r in to_remove:
148
- if butler.registry.getCollectionType(r) == CollectionType.RUN:
149
- result.runs_to_remove.append(r)
150
- else:
151
- result.others_to_remove.append(r)
130
+ with Butler.from_config(butler_config) as butler:
131
+ result = CleanupResult(butler_config)
132
+ try:
133
+ to_keep = set(butler.registry.getCollectionChain(collection))
134
+ except MissingCollectionError:
135
+ result.failure = NoSuchCollectionFailure(collection)
136
+ return result
137
+ except CollectionTypeError:
138
+ result.failure = NotChainedCollectionFailure(
139
+ collection, butler.registry.getCollectionType(collection).name
140
+ )
141
+ return result
142
+ to_keep.add(collection)
143
+ glob = collection + "*"
144
+ to_consider = set(butler.registry.queryCollections(glob))
145
+ to_remove = to_consider - to_keep
146
+ for r in to_remove:
147
+ if butler.registry.getCollectionType(r) == CollectionType.RUN:
148
+ result.runs_to_remove.append(r)
149
+ else:
150
+ result.others_to_remove.append(r)
152
151
  return result
@@ -94,7 +94,7 @@ def pre_exec_init_qbb(
94
94
 
95
95
  # Make QBB.
96
96
  _LOG.verbose("Initializing quantum-backed butler.")
97
- butler = qg.make_init_qbb(butler_config, config_search_paths=config_search_path)
98
- # Save all InitOutputs, configs, etc.
99
- _LOG.verbose("Instantiating tasks and saving init-outputs.")
100
- qg.init_output_run(butler)
97
+ with qg.make_init_qbb(butler_config, config_search_paths=config_search_path) as butler:
98
+ # Save all InitOutputs, configs, etc.
99
+ _LOG.verbose("Instantiating tasks and saving init-outputs.")
100
+ qg.init_output_run(butler)
@@ -156,8 +156,7 @@ class PurgeResult(ConfirmableResult):
156
156
  if self.failure:
157
157
  # This should not happen, it is a logic error.
158
158
  raise RuntimeError("Can not purge, there were errors preparing collections.")
159
- butler = Butler.from_config(self.butler_config, writeable=True)
160
- with butler.transaction():
159
+ with Butler.from_config(self.butler_config, writeable=True) as butler, butler.transaction():
161
160
  for c in itertools.chain(self.others_to_remove, self.chains_to_remove):
162
161
  butler.registry.removeCollection(c)
163
162
  butler.removeRuns(self.runs_to_remove)
@@ -290,24 +289,23 @@ def purge(
290
289
  to remove the datasets after confirmation, if needed.
291
290
  """
292
291
  result = PurgeResult(butler_config)
293
- butler = Butler.from_config(butler_config)
294
-
295
- try:
296
- collection_type = butler.registry.getCollectionType(collection)
297
- except MissingCollectionError:
298
- result.fail(TopCollectionNotFoundFailure(collection))
299
- return result
300
-
301
- if collection_type != CollectionType.CHAINED:
302
- result.fail(TopCollectionIsNotChainedFailure(collection, collection_type))
303
- elif parents := check_parents(butler, collection, []):
304
- result.fail(TopCollectionHasParentsFailure(collection, parents))
305
- else:
306
- prepare_to_remove(
307
- top_collection=collection,
308
- parent_collection=collection,
309
- purge_result=result,
310
- butler=butler,
311
- recursive=recursive,
312
- )
292
+ with Butler.from_config(butler_config) as butler:
293
+ try:
294
+ collection_type = butler.registry.getCollectionType(collection)
295
+ except MissingCollectionError:
296
+ result.fail(TopCollectionNotFoundFailure(collection))
297
+ return result
298
+
299
+ if collection_type != CollectionType.CHAINED:
300
+ result.fail(TopCollectionIsNotChainedFailure(collection, collection_type))
301
+ elif parents := check_parents(butler, collection, []):
302
+ result.fail(TopCollectionHasParentsFailure(collection, parents))
303
+ else:
304
+ prepare_to_remove(
305
+ top_collection=collection,
306
+ parent_collection=collection,
307
+ purge_result=result,
308
+ butler=butler,
309
+ recursive=recursive,
310
+ )
313
311
  return result
@@ -220,107 +220,115 @@ def qgraph(
220
220
  replace_run=replace_run,
221
221
  prune_replaced=prune_replaced,
222
222
  )
223
+ with butler:
224
+ if skip_existing and run:
225
+ skip_existing_in += (run,)
223
226
 
224
- if skip_existing and run:
225
- skip_existing_in += (run,)
226
-
227
- qgc: PredictedQuantumGraphComponents
228
- if qgraph is not None:
229
- # click passes empty tuple as default value for qgraph_node_id
230
- quantum_ids = (
231
- {uuid.UUID(q) if not isinstance(q, uuid.UUID) else q for q in qgraph_node_id}
232
- if qgraph_node_id
233
- else None
234
- )
235
- qgraph = ResourcePath(qgraph)
236
- match qgraph.getExtension():
237
- case ".qgraph":
238
- qgc = PredictedQuantumGraphComponents.from_old_quantum_graph(
239
- QuantumGraph.loadUri(
240
- qgraph,
241
- butler.dimensions,
242
- nodes=quantum_ids,
243
- graphID=BuildId(qgraph_id) if qgraph_id is not None else None,
227
+ qgc: PredictedQuantumGraphComponents
228
+ if qgraph is not None:
229
+ # click passes empty tuple as default value for qgraph_node_id
230
+ quantum_ids = (
231
+ {uuid.UUID(q) if not isinstance(q, uuid.UUID) else q for q in qgraph_node_id}
232
+ if qgraph_node_id
233
+ else None
234
+ )
235
+ qgraph = ResourcePath(qgraph)
236
+ match qgraph.getExtension():
237
+ case ".qgraph":
238
+ qgc = PredictedQuantumGraphComponents.from_old_quantum_graph(
239
+ QuantumGraph.loadUri(
240
+ qgraph,
241
+ butler.dimensions,
242
+ nodes=quantum_ids,
243
+ graphID=BuildId(qgraph_id) if qgraph_id is not None else None,
244
+ )
244
245
  )
245
- )
246
- case ".qg":
247
- if qgraph_id is not None:
248
- _LOG.warning("--qgraph-id is ignored when loading new '.qg' files.")
249
- if for_execution or for_init_output_run or save_qgraph or show.needs_full_qg:
250
- import_mode = TaskImportMode.ASSUME_CONSISTENT_EDGES
251
- else:
252
- import_mode = TaskImportMode.DO_NOT_IMPORT
253
- with PredictedQuantumGraph.open(qgraph, import_mode=import_mode) as reader:
254
- if for_execution or qgraph_dot or qgraph_mermaid or show.needs_full_qg or qgraph_node_id:
255
- # This reads everything for the given quanta.
256
- reader.read_execution_quanta(quantum_ids)
257
- elif for_init_output_run:
258
- reader.read_init_quanta()
246
+ case ".qg":
247
+ if qgraph_id is not None:
248
+ _LOG.warning("--qgraph-id is ignored when loading new '.qg' files.")
249
+ if for_execution or for_init_output_run or save_qgraph or show.needs_full_qg:
250
+ import_mode = TaskImportMode.ASSUME_CONSISTENT_EDGES
259
251
  else:
260
- reader.read_thin_graph()
261
- qgc = reader.components
262
- case ext:
263
- raise ValueError(f"Unrecognized extension for quantum graph: {ext!r}")
252
+ import_mode = TaskImportMode.DO_NOT_IMPORT
253
+ with PredictedQuantumGraph.open(qgraph, import_mode=import_mode) as reader:
254
+ if (
255
+ for_execution
256
+ or qgraph_dot
257
+ or qgraph_mermaid
258
+ or show.needs_full_qg
259
+ or qgraph_node_id
260
+ ):
261
+ # This reads everything for the given quanta.
262
+ reader.read_execution_quanta(quantum_ids)
263
+ elif for_init_output_run:
264
+ reader.read_init_quanta()
265
+ else:
266
+ reader.read_thin_graph()
267
+ qgc = reader.components
268
+ case ext:
269
+ raise ValueError(f"Unrecognized extension for quantum graph: {ext!r}")
264
270
 
265
- # pipeline can not be provided in this case
266
- if pipeline_graph_factory:
267
- raise ValueError(
268
- "Pipeline must not be given when quantum graph is read from "
269
- f"file: {bool(pipeline_graph_factory)}"
270
- )
271
- else:
272
- if pipeline_graph_factory is None:
273
- raise ValueError("Pipeline must be given when quantum graph is not read from file.")
274
- # We can't resolve the pipeline graph if we're mocking until after
275
- # we've done the mocking (and the QG build will resolve on its own
276
- # anyway).
277
- pipeline_graph = pipeline_graph_factory(resolve=False)
278
- if mock:
279
- from lsst.pipe.base.tests.mocks import mock_pipeline_graph
271
+ # pipeline can not be provided in this case
272
+ if pipeline_graph_factory:
273
+ raise ValueError(
274
+ "Pipeline must not be given when quantum graph is read from "
275
+ f"file: {bool(pipeline_graph_factory)}"
276
+ )
277
+ else:
278
+ if pipeline_graph_factory is None:
279
+ raise ValueError("Pipeline must be given when quantum graph is not read from file.")
280
+ # We can't resolve the pipeline graph if we're mocking until after
281
+ # we've done the mocking (and the QG build will resolve on its own
282
+ # anyway).
283
+ pipeline_graph = pipeline_graph_factory(resolve=False)
284
+ if mock:
285
+ from lsst.pipe.base.tests.mocks import mock_pipeline_graph
280
286
 
281
- pipeline_graph = mock_pipeline_graph(
287
+ pipeline_graph = mock_pipeline_graph(
288
+ pipeline_graph,
289
+ unmocked_dataset_types=unmocked_dataset_types,
290
+ force_failures=mock_failure,
291
+ )
292
+ data_id_tables = []
293
+ for table_file in data_id_table:
294
+ with ResourcePath(table_file).as_local() as local_path:
295
+ table = Table.read(local_path.ospath)
296
+ # Add the filename to the metadata for more logging
297
+ # information down in the QG builder.
298
+ table.meta["filename"] = table_file
299
+ data_id_tables.append(table)
300
+ # make execution plan (a.k.a. DAG) for pipeline
301
+ graph_builder = AllDimensionsQuantumGraphBuilder(
282
302
  pipeline_graph,
283
- unmocked_dataset_types=unmocked_dataset_types,
284
- force_failures=mock_failure,
303
+ butler,
304
+ where=data_query,
305
+ skip_existing_in=skip_existing_in,
306
+ clobber=clobber_outputs,
307
+ dataset_query_constraint=DatasetQueryConstraintVariant.fromExpression(
308
+ dataset_query_constraint
309
+ ),
310
+ input_collections=collections,
311
+ output_run=run,
312
+ data_id_tables=data_id_tables,
313
+ )
314
+ # Accumulate metadata (QB builder adds some of its own).
315
+ metadata = {
316
+ "butler_argument": str(butler_config),
317
+ "extend_run": extend_run,
318
+ "skip_existing_in": skip_existing_in,
319
+ "skip_existing": skip_existing,
320
+ "data_query": data_query,
321
+ }
322
+ assert run is not None, "Butler output run collection must be defined"
323
+ qgc = graph_builder.finish(
324
+ output, metadata=metadata, attach_datastore_records=qgraph_datastore_records
285
325
  )
286
- data_id_tables = []
287
- for table_file in data_id_table:
288
- with ResourcePath(table_file).as_local() as local_path:
289
- table = Table.read(local_path.ospath)
290
- # Add the filename to the metadata for more logging
291
- # information down in the QG builder.
292
- table.meta["filename"] = table_file
293
- data_id_tables.append(table)
294
- # make execution plan (a.k.a. DAG) for pipeline
295
- graph_builder = AllDimensionsQuantumGraphBuilder(
296
- pipeline_graph,
297
- butler,
298
- where=data_query,
299
- skip_existing_in=skip_existing_in,
300
- clobber=clobber_outputs,
301
- dataset_query_constraint=DatasetQueryConstraintVariant.fromExpression(dataset_query_constraint),
302
- input_collections=collections,
303
- output_run=run,
304
- data_id_tables=data_id_tables,
305
- )
306
- # Accumulate metadata (QB builder adds some of its own).
307
- metadata = {
308
- "butler_argument": str(butler_config),
309
- "extend_run": extend_run,
310
- "skip_existing_in": skip_existing_in,
311
- "skip_existing": skip_existing,
312
- "data_query": data_query,
313
- }
314
- assert run is not None, "Butler output run collection must be defined"
315
- qgc = graph_builder.finish(
316
- output, metadata=metadata, attach_datastore_records=qgraph_datastore_records
317
- )
318
326
 
319
- if save_qgraph:
320
- _LOG.verbose("Writing quantum graph to %r.", save_qgraph)
321
- qgc.write(save_qgraph)
327
+ if save_qgraph:
328
+ _LOG.verbose("Writing quantum graph to %r.", save_qgraph)
329
+ qgc.write(save_qgraph)
322
330
 
323
- qg = qgc.assemble()
331
+ qg = qgc.assemble()
324
332
 
325
333
  if not summarize_quantum_graph(qg):
326
334
  return None
@@ -66,54 +66,54 @@ def report(
66
66
  List only the counts (or data_ids if number of failures < 5). This
67
67
  option is good for those who just want to see totals.
68
68
  """
69
- butler = Butler.from_config(butler_config, writeable=False)
70
69
  qgraph = QuantumGraph.loadUri(qgraph_uri)
71
- report = QuantumGraphExecutionReport.make_reports(butler, qgraph)
72
- if not full_output_filename:
73
- # this is the option to print to the command-line
74
- summary_dict = report.to_summary_dict(butler, logs, human_readable=True)
75
- dataset_table_rows = []
76
- data_products = []
77
- quanta_summary = []
78
- error_summary = []
79
- for task in summary_dict.keys():
80
- for data_product in summary_dict[task]["outputs"]:
81
- dataset_table_rows.append(summary_dict[task]["outputs"][data_product])
82
- data_products.append(data_product)
70
+ with Butler.from_config(butler_config, writeable=False) as butler:
71
+ report = QuantumGraphExecutionReport.make_reports(butler, qgraph)
72
+ if not full_output_filename:
73
+ # this is the option to print to the command-line
74
+ summary_dict = report.to_summary_dict(butler, logs, human_readable=True)
75
+ dataset_table_rows = []
76
+ data_products = []
77
+ quanta_summary = []
78
+ error_summary = []
79
+ for task in summary_dict.keys():
80
+ for data_product in summary_dict[task]["outputs"]:
81
+ dataset_table_rows.append(summary_dict[task]["outputs"][data_product])
82
+ data_products.append(data_product)
83
83
 
84
- if len(summary_dict[task]["failed_quanta"]) > 5:
85
- quanta_summary.append(
86
- {
87
- "Task": task,
88
- "Failed": len(summary_dict[task]["failed_quanta"]),
89
- "Blocked": summary_dict[task]["n_quanta_blocked"],
90
- "Succeeded": summary_dict[task]["n_succeeded"],
91
- "Expected": summary_dict[task]["n_expected"],
92
- }
93
- )
94
- else:
95
- quanta_summary.append(
96
- {
97
- "Task": task,
98
- "Failed": summary_dict[task]["failed_quanta"],
99
- "Blocked": summary_dict[task]["n_quanta_blocked"],
100
- "Succeeded": summary_dict[task]["n_succeeded"],
101
- "Expected": summary_dict[task]["n_expected"],
102
- }
103
- )
104
- if "errors" in summary_dict[task].keys():
105
- error_summary.append({task: summary_dict[task]["errors"]})
106
- quanta = Table(quanta_summary)
107
- datasets = Table(dataset_table_rows)
108
- datasets.add_column(data_products, index=0, name="DatasetType")
109
- quanta.pprint_all()
110
- print("\n")
111
- if not brief:
112
- pprint.pprint(error_summary)
84
+ if len(summary_dict[task]["failed_quanta"]) > 5:
85
+ quanta_summary.append(
86
+ {
87
+ "Task": task,
88
+ "Failed": len(summary_dict[task]["failed_quanta"]),
89
+ "Blocked": summary_dict[task]["n_quanta_blocked"],
90
+ "Succeeded": summary_dict[task]["n_succeeded"],
91
+ "Expected": summary_dict[task]["n_expected"],
92
+ }
93
+ )
94
+ else:
95
+ quanta_summary.append(
96
+ {
97
+ "Task": task,
98
+ "Failed": summary_dict[task]["failed_quanta"],
99
+ "Blocked": summary_dict[task]["n_quanta_blocked"],
100
+ "Succeeded": summary_dict[task]["n_succeeded"],
101
+ "Expected": summary_dict[task]["n_expected"],
102
+ }
103
+ )
104
+ if "errors" in summary_dict[task].keys():
105
+ error_summary.append({task: summary_dict[task]["errors"]})
106
+ quanta = Table(quanta_summary)
107
+ datasets = Table(dataset_table_rows)
108
+ datasets.add_column(data_products, index=0, name="DatasetType")
109
+ quanta.pprint_all()
113
110
  print("\n")
114
- datasets.pprint_all()
115
- else:
116
- report.write_summary_yaml(butler, full_output_filename, do_store_logs=logs)
111
+ if not brief:
112
+ pprint.pprint(error_summary)
113
+ print("\n")
114
+ datasets.pprint_all()
115
+ else:
116
+ report.write_summary_yaml(butler, full_output_filename, do_store_logs=logs)
117
117
 
118
118
 
119
119
  def report_v2(
@@ -190,18 +190,18 @@ def report_v2(
190
190
  the flow of quanta and datasets through the graph and to identify where
191
191
  problems may be occurring.
192
192
  """
193
- butler = Butler.from_config(butler_config, writeable=False)
194
- qpg = QuantumProvenanceGraph(
195
- butler,
196
- qgraph_uris,
197
- collections=collections,
198
- where=where,
199
- curse_failed_logs=curse_failed_logs,
200
- read_caveats=read_caveats,
201
- use_qbb=use_qbb,
202
- n_cores=n_cores,
203
- )
204
- summary = qpg.to_summary(butler, do_store_logs=logs)
193
+ with Butler.from_config(butler_config, writeable=False) as butler:
194
+ qpg = QuantumProvenanceGraph(
195
+ butler,
196
+ qgraph_uris,
197
+ collections=collections,
198
+ where=where,
199
+ curse_failed_logs=curse_failed_logs,
200
+ read_caveats=read_caveats,
201
+ use_qbb=use_qbb,
202
+ n_cores=n_cores,
203
+ )
204
+ summary = qpg.to_summary(butler, do_store_logs=logs)
205
205
 
206
206
  if view_graph:
207
207
  from lsst.pipe.base.pipeline_graph.visualization import (
@@ -235,7 +235,7 @@ def run(
235
235
 
236
236
  # Make butler instance. QuantumGraph should have an output run defined,
237
237
  # but we ignore it here and let command line decide actual output run.
238
- butler = ButlerFactory.make_write_butler(
238
+ with ButlerFactory.make_write_butler(
239
239
  butler_config,
240
240
  qg.pipeline_graph,
241
241
  output=output,
@@ -245,74 +245,73 @@ def run(
245
245
  rebase=rebase,
246
246
  replace_run=replace_run,
247
247
  prune_replaced=prune_replaced,
248
- )
248
+ ) as butler:
249
+ assert butler.run is not None, "Guaranteed by make_write_butler."
250
+ if skip_existing:
251
+ skip_existing_in += (butler.run,)
249
252
 
250
- assert butler.run is not None, "Guaranteed by make_write_butler."
251
- if skip_existing:
252
- skip_existing_in += (butler.run,)
253
+ # Enable lsstDebug debugging. Note that this is done once in the
254
+ # main process before PreExecInit and it is also repeated before
255
+ # running each task in SingleQuantumExecutor (which may not be
256
+ # needed if `multiprocessing` always uses fork start method).
257
+ if enable_lsst_debug:
258
+ try:
259
+ _LOG.debug("Will try to import debug.py")
260
+ import debug # type: ignore # noqa: F401
261
+ except ImportError:
262
+ _LOG.warning("No 'debug' module found.")
253
263
 
254
- # Enable lsstDebug debugging. Note that this is done once in the
255
- # main process before PreExecInit and it is also repeated before
256
- # running each task in SingleQuantumExecutor (which may not be
257
- # needed if `multiprocessing` always uses fork start method).
258
- if enable_lsst_debug:
259
- try:
260
- _LOG.debug("Will try to import debug.py")
261
- import debug # type: ignore # noqa: F401
262
- except ImportError:
263
- _LOG.warning("No 'debug' module found.")
264
-
265
- # Save all InitOutputs, configs, etc.
266
- if register_dataset_types:
267
- qg.pipeline_graph.register_dataset_types(butler, include_packages=not no_versions)
268
- if not skip_init_writes:
269
- qg.write_init_outputs(butler, skip_existing=skip_existing)
270
- qg.write_configs(butler, compare_existing=extend_run)
271
- if not no_versions:
272
- qg.write_packages(butler, compare_existing=extend_run)
264
+ # Save all InitOutputs, configs, etc.
265
+ if register_dataset_types:
266
+ qg.pipeline_graph.register_dataset_types(butler, include_packages=not no_versions)
267
+ if not skip_init_writes:
268
+ qg.write_init_outputs(butler, skip_existing=skip_existing)
269
+ qg.write_configs(butler, compare_existing=extend_run)
270
+ if not no_versions:
271
+ qg.write_packages(butler, compare_existing=extend_run)
273
272
 
274
- if init_only:
275
- return
273
+ if init_only:
274
+ return
276
275
 
277
- if task_factory is None:
278
- task_factory = TaskFactory()
279
- resources = ExecutionResources(
280
- num_cores=cores_per_quantum, max_mem=memory_per_quantum, default_mem_units=u.MB
281
- )
282
- quantum_executor = SingleQuantumExecutor(
283
- butler=butler,
284
- task_factory=task_factory,
285
- skip_existing_in=skip_existing_in,
286
- clobber_outputs=clobber_outputs,
287
- enable_lsst_debug=enable_lsst_debug,
288
- resources=resources,
289
- raise_on_partial_outputs=raise_on_partial_outputs,
290
- )
276
+ if task_factory is None:
277
+ task_factory = TaskFactory()
278
+ resources = ExecutionResources(
279
+ num_cores=cores_per_quantum, max_mem=memory_per_quantum, default_mem_units=u.MB
280
+ )
281
+ quantum_executor = SingleQuantumExecutor(
282
+ butler=butler,
283
+ task_factory=task_factory,
284
+ skip_existing_in=skip_existing_in,
285
+ clobber_outputs=clobber_outputs,
286
+ enable_lsst_debug=enable_lsst_debug,
287
+ resources=resources,
288
+ raise_on_partial_outputs=raise_on_partial_outputs,
289
+ )
291
290
 
292
- if timeout is None:
293
- timeout = MP_TIMEOUT
294
- executor = MPGraphExecutor(
295
- num_proc=processes,
296
- timeout=timeout,
297
- start_method=start_method,
298
- quantum_executor=quantum_executor,
299
- fail_fast=fail_fast,
300
- pdb=pdb,
301
- execution_graph_fixup=_import_graph_fixup(graph_fixup),
302
- )
303
- # Have to reset connection pool to avoid sharing connections with
304
- # forked processes.
305
- butler.registry.resetConnectionPool()
306
- try:
307
- with lsst.utils.timer.profile(profile, _LOG):
308
- executor.execute(qg)
309
- finally:
310
- if summary:
311
- report = executor.getReport()
312
- if report:
313
- with ResourcePath(summary).open("w") as out:
314
- # Do not save fields that are not set.
315
- out.write(report.model_dump_json(exclude_none=True, indent=2))
291
+ if timeout is None:
292
+ timeout = MP_TIMEOUT
293
+ executor = MPGraphExecutor(
294
+ num_proc=processes,
295
+ timeout=timeout,
296
+ start_method=start_method,
297
+ quantum_executor=quantum_executor,
298
+ fail_fast=fail_fast,
299
+ pdb=pdb,
300
+ execution_graph_fixup=_import_graph_fixup(graph_fixup),
301
+ )
302
+ # Have to reset connection pool to avoid sharing connections with
303
+ # forked processes.
304
+ butler.registry.resetConnectionPool()
305
+ try:
306
+ with lsst.utils.timer.profile(profile, _LOG):
307
+ executor.execute(qg)
308
+ finally:
309
+ if summary:
310
+ report = executor.getReport()
311
+ if report:
312
+ with ResourcePath(summary).open("w") as out:
313
+ # Do not save fields that are not set.
314
+ out.write(report.model_dump_json(exclude_none=True, indent=2))
316
315
 
317
316
 
318
317
  def _import_graph_fixup(graph_fixup: str) -> ExecutionGraphFixup | None:
@@ -389,7 +389,7 @@ class ShowInfo:
389
389
  Path to configuration for the butler.
390
390
  """
391
391
 
392
- def dumpURIs(thisRef: DatasetRef) -> None:
392
+ def dumpURIs(butler: Butler, thisRef: DatasetRef) -> None:
393
393
  primary, components = butler.getURIs(thisRef, predict=True, run="TBD")
394
394
  if primary:
395
395
  print(f" {primary}", file=self.stream)
@@ -398,17 +398,19 @@ class ShowInfo:
398
398
  for compName, compUri in components.items():
399
399
  print(f" {compName}: {compUri}", file=self.stream)
400
400
 
401
- butler = Butler.from_config(butler_config)
402
- xgraph = qg.quantum_only_xgraph
403
- execution_quanta = qg.build_execution_quanta()
404
- for quantum_id, quantum_data in xgraph.nodes.items():
405
- print(f"Quantum {quantum_id}: {quantum_data['pipeline_node'].task_class_name}", file=self.stream)
406
- print(" inputs:", file=self.stream)
407
- execution_quantum = execution_quanta[quantum_id]
408
- for refs in execution_quantum.inputs.values():
409
- for ref in refs:
410
- dumpURIs(ref)
411
- print(" outputs:", file=self.stream)
412
- for refs in execution_quantum.outputs.values():
413
- for ref in refs:
414
- dumpURIs(ref)
401
+ with Butler.from_config(butler_config) as butler:
402
+ xgraph = qg.quantum_only_xgraph
403
+ execution_quanta = qg.build_execution_quanta()
404
+ for quantum_id, quantum_data in xgraph.nodes.items():
405
+ print(
406
+ f"Quantum {quantum_id}: {quantum_data['pipeline_node'].task_class_name}", file=self.stream
407
+ )
408
+ print(" inputs:", file=self.stream)
409
+ execution_quantum = execution_quanta[quantum_id]
410
+ for refs in execution_quantum.inputs.values():
411
+ for ref in refs:
412
+ dumpURIs(butler, ref)
413
+ print(" outputs:", file=self.stream)
414
+ for refs in execution_quantum.outputs.values():
415
+ for ref in refs:
416
+ dumpURIs(butler, ref)
@@ -40,6 +40,7 @@ import lsst.pipe.base.single_quantum_executor
40
40
  if TYPE_CHECKING:
41
41
  from lsst.daf.butler import Butler, ButlerMetrics, LimitedButler, Quantum
42
42
  from lsst.pipe.base import ExecutionResources, PipelineTask, QuantumSuccessCaveats, TaskFactory
43
+ from lsst.pipe.base.log_capture import _ExecutionLogRecordsExtra
43
44
  from lsst.pipe.base.pipeline_graph import TaskNode
44
45
 
45
46
 
@@ -136,9 +137,16 @@ class SingleQuantumExecutor(lsst.pipe.base.single_quantum_executor.SingleQuantum
136
137
  )
137
138
 
138
139
  def checkExistingOutputs(
139
- self, quantum: Quantum, task_node: TaskNode, /, limited_butler: LimitedButler
140
+ self,
141
+ quantum: Quantum,
142
+ task_node: TaskNode,
143
+ /,
144
+ limited_butler: LimitedButler,
145
+ log_extra: _ExecutionLogRecordsExtra,
140
146
  ) -> bool:
141
- return super()._check_existing_outputs(quantum, task_node, limited_butler=limited_butler)
147
+ return super()._check_existing_outputs(
148
+ quantum, task_node, limited_butler=limited_butler, log_extra=log_extra
149
+ )
142
150
 
143
151
  def updatedQuantumInputs(
144
152
  self, quantum: Quantum, task_node: TaskNode, /, limited_butler: LimitedButler
@@ -0,0 +1,2 @@
1
+ __all__ = ["__version__"]
2
+ __version__ = "30.0.0rc1"
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: lsst-ctrl-mpexec
3
- Version: 29.2025.4800
3
+ Version: 30.0.0rc1
4
4
  Summary: Pipeline execution infrastructure for the Rubin Observatory LSST Science Pipelines.
5
5
  Author-email: Rubin Observatory Data Management <dm-admin@lists.lsst.org>
6
6
  License-Expression: BSD-3-Clause OR GPL-3.0-or-later
@@ -55,6 +55,7 @@ class CleanupCollectionTest(unittest.TestCase):
55
55
  self.root,
56
56
  configFile=os.path.join(TESTDIR, "config/metricTestRepoButler.yaml"),
57
57
  )
58
+ self.enterContext(self.testRepo.butler)
58
59
 
59
60
  def tearDown(self):
60
61
  removeTestTempDir(self.root)
@@ -82,8 +83,8 @@ class CleanupCollectionTest(unittest.TestCase):
82
83
  self.assertIn("Will remove:\n runs: \n others: ingest\n", result.output)
83
84
  self.assertIn("Done.", result.output)
84
85
 
85
- butler = Butler.from_config(self.root)
86
- self.assertEqual(set(butler.registry.queryCollections()), {"in", "ingest/run"})
86
+ with Butler.from_config(self.root) as butler:
87
+ self.assertEqual(set(butler.registry.queryCollections()), {"in", "ingest/run"})
87
88
 
88
89
  def test_nonExistantCollection(self):
89
90
  """Test running cleanup on a collection that has never existed."""
@@ -54,6 +54,7 @@ class PurgeTest(unittest.TestCase):
54
54
  self.root,
55
55
  configFile=os.path.join(TESTDIR, "config/metricTestRepoButler.yaml"),
56
56
  )
57
+ self.enterContext(self.testRepo.butler)
57
58
 
58
59
  def tearDown(self):
59
60
  removeTestTempDir(self.root)
@@ -87,6 +87,7 @@ class ReportTest(unittest.TestCase):
87
87
  root=self.root,
88
88
  metadata=metadata,
89
89
  )
90
+ butler.close()
90
91
  # Check that we can get the proper run collection from the qgraph
91
92
  self.assertEqual(check_output_run(qgraph, "run"), [])
92
93
 
@@ -272,6 +273,7 @@ class ReportTest(unittest.TestCase):
272
273
  root=self.root,
273
274
  metadata=metadata,
274
275
  )
276
+ butler.close()
275
277
 
276
278
  # Check that we can get the proper run collection from the qgraph
277
279
  self.assertEqual(check_output_run(qgraph1, "run"), [])
@@ -54,6 +54,7 @@ class UpdateGraphRunTest(unittest.TestCase):
54
54
  def test_update(self):
55
55
  """Test for updating output run in a graph."""
56
56
  helper = InMemoryRepo()
57
+ self.enterContext(helper)
57
58
  helper.add_task()
58
59
  helper.add_task()
59
60
  qgc = helper.make_quantum_graph_builder().finish(attach_datastore_records=False)
@@ -62,6 +62,7 @@ class PreExecInitTestCase(unittest.TestCase):
62
62
  with self.subTest(extendRun=extendRun):
63
63
  with temporaryDirectory() as tmpdir:
64
64
  butler, qgraph = makeSimpleQGraph(root=tmpdir)
65
+ self.enterContext(butler)
65
66
  preExecInit = PreExecInit(butler=butler, taskFactory=taskFactory, extendRun=extendRun)
66
67
  preExecInit.saveInitOutputs(qgraph)
67
68
 
@@ -71,6 +72,7 @@ class PreExecInitTestCase(unittest.TestCase):
71
72
  with self.subTest(extendRun=extendRun):
72
73
  with temporaryDirectory() as tmpdir:
73
74
  butler, qgraph = makeSimpleQGraph(root=tmpdir)
75
+ self.enterContext(butler)
74
76
  preExecInit = PreExecInit(butler=butler, taskFactory=taskFactory, extendRun=extendRun)
75
77
  preExecInit.saveInitOutputs(qgraph)
76
78
  if extendRun:
@@ -86,6 +88,7 @@ class PreExecInitTestCase(unittest.TestCase):
86
88
  with self.subTest(extendRun=extendRun):
87
89
  with temporaryDirectory() as tmpdir:
88
90
  butler, qgraph = makeSimpleQGraph(root=tmpdir)
91
+ self.enterContext(butler)
89
92
  preExecInit = PreExecInit(butler=butler, taskFactory=None, extendRun=extendRun)
90
93
  preExecInit.saveConfigs(qgraph)
91
94
 
@@ -94,6 +97,7 @@ class PreExecInitTestCase(unittest.TestCase):
94
97
  with self.subTest(extendRun=extendRun):
95
98
  with temporaryDirectory() as tmpdir:
96
99
  butler, qgraph = makeSimpleQGraph(root=tmpdir)
100
+ self.enterContext(butler)
97
101
  preExecInit = PreExecInit(butler=butler, taskFactory=None, extendRun=extendRun)
98
102
  preExecInit.saveConfigs(qgraph)
99
103
  if extendRun:
@@ -109,6 +113,7 @@ class PreExecInitTestCase(unittest.TestCase):
109
113
  with self.subTest(extendRun=extendRun):
110
114
  with temporaryDirectory() as tmpdir:
111
115
  butler, qgraph = makeSimpleQGraph(root=tmpdir)
116
+ self.enterContext(butler)
112
117
  preExecInit = PreExecInit(butler=butler, taskFactory=None, extendRun=extendRun)
113
118
  preExecInit.savePackageVersions(qgraph)
114
119
 
@@ -117,6 +122,7 @@ class PreExecInitTestCase(unittest.TestCase):
117
122
  with self.subTest(extendRun=extendRun):
118
123
  with temporaryDirectory() as tmpdir:
119
124
  butler, qgraph = makeSimpleQGraph(root=tmpdir)
125
+ self.enterContext(butler)
120
126
  preExecInit = PreExecInit(butler=butler, taskFactory=None, extendRun=extendRun)
121
127
  preExecInit.savePackageVersions(qgraph)
122
128
  if extendRun:
@@ -40,6 +40,7 @@ from lsst.ctrl.mpexec.cli.cmd.commands import PipetaskCommand, coverage_context
40
40
  from lsst.ctrl.mpexec.cli.utils import collect_pipeline_actions
41
41
  from lsst.ctrl.mpexec.showInfo import ShowInfo
42
42
  from lsst.daf.butler import CollectionType, MissingCollectionError
43
+ from lsst.daf.butler.cli.utils import LogCliRunner
43
44
  from lsst.pipe.base.mp_graph_executor import MPGraphExecutorError
44
45
  from lsst.pipe.base.script import transfer_from_graph
45
46
  from lsst.pipe.base.tests.mocks import DirectButlerRepo, DynamicTestPipelineTaskConfig
@@ -60,7 +61,9 @@ class RunTestCase(unittest.TestCase):
60
61
  kwargs = collect_pipeline_actions(ctx, **kwargs)
61
62
  mock(**kwargs)
62
63
 
63
- runner = click.testing.CliRunner()
64
+ # At least one tests requires that we enable INFO logging so use
65
+ # the specialist runner.
66
+ runner = LogCliRunner()
64
67
  result = runner.invoke(fake_run, args, catch_exceptions=False)
65
68
  if result.exit_code != 0:
66
69
  raise RuntimeError(f"Failure getting default args for 'run': {result}")
@@ -1,2 +0,0 @@
1
- __all__ = ["__version__"]
2
- __version__ = "29.2025.4800"