lsst-ctrl-mpexec 29.2025.2400__py3-none-any.whl → 29.2025.3100__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (28) hide show
  1. lsst/ctrl/mpexec/__init__.py +1 -2
  2. lsst/ctrl/mpexec/cli/cmd/commands.py +1 -1
  3. lsst/ctrl/mpexec/cli/script/pre_exec_init_qbb.py +3 -1
  4. lsst/ctrl/mpexec/cli/script/run.py +2 -1
  5. lsst/ctrl/mpexec/cli/script/run_qbb.py +2 -1
  6. lsst/ctrl/mpexec/cmdLineFwk.py +23 -23
  7. lsst/ctrl/mpexec/execFixupDataId.py +9 -101
  8. lsst/ctrl/mpexec/executionGraphFixup.py +12 -37
  9. lsst/ctrl/mpexec/log_capture.py +9 -195
  10. lsst/ctrl/mpexec/mpGraphExecutor.py +60 -696
  11. lsst/ctrl/mpexec/quantumGraphExecutor.py +20 -90
  12. lsst/ctrl/mpexec/reports.py +30 -206
  13. lsst/ctrl/mpexec/separablePipelineExecutor.py +12 -263
  14. lsst/ctrl/mpexec/simple_pipeline_executor.py +11 -590
  15. lsst/ctrl/mpexec/singleQuantumExecutor.py +75 -532
  16. lsst/ctrl/mpexec/taskFactory.py +12 -38
  17. lsst/ctrl/mpexec/version.py +1 -1
  18. {lsst_ctrl_mpexec-29.2025.2400.dist-info → lsst_ctrl_mpexec-29.2025.3100.dist-info}/METADATA +1 -1
  19. {lsst_ctrl_mpexec-29.2025.2400.dist-info → lsst_ctrl_mpexec-29.2025.3100.dist-info}/RECORD +27 -28
  20. lsst/ctrl/mpexec/dotTools.py +0 -100
  21. {lsst_ctrl_mpexec-29.2025.2400.dist-info → lsst_ctrl_mpexec-29.2025.3100.dist-info}/WHEEL +0 -0
  22. {lsst_ctrl_mpexec-29.2025.2400.dist-info → lsst_ctrl_mpexec-29.2025.3100.dist-info}/entry_points.txt +0 -0
  23. {lsst_ctrl_mpexec-29.2025.2400.dist-info → lsst_ctrl_mpexec-29.2025.3100.dist-info}/licenses/COPYRIGHT +0 -0
  24. {lsst_ctrl_mpexec-29.2025.2400.dist-info → lsst_ctrl_mpexec-29.2025.3100.dist-info}/licenses/LICENSE +0 -0
  25. {lsst_ctrl_mpexec-29.2025.2400.dist-info → lsst_ctrl_mpexec-29.2025.3100.dist-info}/licenses/bsd_license.txt +0 -0
  26. {lsst_ctrl_mpexec-29.2025.2400.dist-info → lsst_ctrl_mpexec-29.2025.3100.dist-info}/licenses/gpl-v3.0.txt +0 -0
  27. {lsst_ctrl_mpexec-29.2025.2400.dist-info → lsst_ctrl_mpexec-29.2025.3100.dist-info}/top_level.txt +0 -0
  28. {lsst_ctrl_mpexec-29.2025.2400.dist-info → lsst_ctrl_mpexec-29.2025.3100.dist-info}/zip-safe +0 -0
@@ -27,14 +27,13 @@
27
27
 
28
28
  from ._pipeline_graph_factory import PipelineGraphFactory
29
29
  from .cmdLineFwk import *
30
- from .dotTools import *
31
30
  from .executionGraphFixup import *
32
31
  from .mpGraphExecutor import *
33
- from .preExecInit import *
34
32
  from .quantumGraphExecutor import *
35
33
  from .reports import *
36
34
  from .separablePipelineExecutor import *
37
35
  from .simple_pipeline_executor import *
38
36
  from .singleQuantumExecutor import *
37
+ from .preExecInit import *
39
38
  from .taskFactory import *
40
39
  from .version import * # Generated by sconsUtils
@@ -36,7 +36,6 @@ from typing import Any
36
36
  import click
37
37
 
38
38
  import lsst.pipe.base.cli.opt as pipeBaseOpts
39
- from lsst.ctrl.mpexec import Report
40
39
  from lsst.ctrl.mpexec.showInfo import ShowInfo
41
40
  from lsst.daf.butler.cli.opt import (
42
41
  collections_option,
@@ -49,6 +48,7 @@ from lsst.daf.butler.cli.opt import (
49
48
  where_option,
50
49
  )
51
50
  from lsst.daf.butler.cli.utils import MWCtxObj, catch_and_exit, option_section, unwrap
51
+ from lsst.pipe.base.quantum_reports import Report
52
52
 
53
53
  from .. import opt as ctrlMpExecOpts
54
54
  from .. import script
@@ -27,7 +27,9 @@
27
27
 
28
28
  from types import SimpleNamespace
29
29
 
30
- from ... import CmdLineFwk, TaskFactory
30
+ from lsst.pipe.base import TaskFactory
31
+
32
+ from ... import CmdLineFwk
31
33
 
32
34
 
33
35
  def pre_exec_init_qbb(
@@ -28,9 +28,10 @@
28
28
  import logging
29
29
  from types import SimpleNamespace
30
30
 
31
+ from lsst.pipe.base import TaskFactory
31
32
  from lsst.utils.threads import disable_implicit_threading
32
33
 
33
- from ... import CmdLineFwk, TaskFactory
34
+ from ... import CmdLineFwk
34
35
 
35
36
  _log = logging.getLogger(__name__)
36
37
 
@@ -28,9 +28,10 @@
28
28
  import logging
29
29
  from types import SimpleNamespace
30
30
 
31
+ from lsst.pipe.base import TaskFactory
31
32
  from lsst.utils.threads import disable_implicit_threading
32
33
 
33
- from ... import CmdLineFwk, TaskFactory
34
+ from ... import CmdLineFwk
34
35
 
35
36
  _log = logging.getLogger(__name__)
36
37
 
@@ -71,19 +71,19 @@ from lsst.pipe.base import (
71
71
  )
72
72
  from lsst.pipe.base.all_dimensions_quantum_graph_builder import AllDimensionsQuantumGraphBuilder
73
73
  from lsst.pipe.base.dot_tools import graph2dot
74
+ from lsst.pipe.base.execution_graph_fixup import ExecutionGraphFixup
74
75
  from lsst.pipe.base.mermaid_tools import graph2mermaid
76
+ from lsst.pipe.base.mp_graph_executor import MPGraphExecutor
75
77
  from lsst.pipe.base.pipeline_graph import NodeType
78
+ from lsst.pipe.base.quantum_reports import Report
79
+ from lsst.pipe.base.single_quantum_executor import SingleQuantumExecutor
76
80
  from lsst.resources import ResourcePath
77
81
  from lsst.utils import doImportType
78
82
  from lsst.utils.logging import VERBOSE, getLogger
79
83
  from lsst.utils.threads import disable_implicit_threading
80
84
 
81
85
  from ._pipeline_graph_factory import PipelineGraphFactory
82
- from .executionGraphFixup import ExecutionGraphFixup
83
- from .mpGraphExecutor import MPGraphExecutor
84
86
  from .preExecInit import PreExecInit, PreExecInitLimited
85
- from .reports import Report
86
- from .singleQuantumExecutor import SingleQuantumExecutor
87
87
 
88
88
  # ----------------------------------
89
89
  # Local non-exported definitions --
@@ -859,24 +859,24 @@ class CmdLineFwk:
859
859
  graphFixup = self._importGraphFixup(args)
860
860
  resources = self._make_execution_resources(args)
861
861
  quantumExecutor = SingleQuantumExecutor(
862
- butler,
863
- taskFactory,
864
- skipExistingIn=args.skip_existing_in,
865
- clobberOutputs=args.clobber_outputs,
866
- enableLsstDebug=args.enableLsstDebug,
862
+ butler=butler,
863
+ task_factory=taskFactory,
864
+ skip_existing_in=args.skip_existing_in,
865
+ clobber_outputs=args.clobber_outputs,
866
+ enable_lsst_debug=args.enableLsstDebug,
867
867
  resources=resources,
868
868
  raise_on_partial_outputs=args.raise_on_partial_outputs,
869
869
  )
870
870
 
871
871
  timeout = self.MP_TIMEOUT if args.timeout is None else args.timeout
872
872
  executor = MPGraphExecutor(
873
- numProc=args.processes,
873
+ num_proc=args.processes,
874
874
  timeout=timeout,
875
- startMethod=args.start_method,
876
- quantumExecutor=quantumExecutor,
877
- failFast=args.fail_fast,
875
+ start_method=args.start_method,
876
+ quantum_executor=quantumExecutor,
877
+ fail_fast=args.fail_fast,
878
878
  pdb=args.pdb,
879
- executionGraphFixup=graphFixup,
879
+ execution_graph_fixup=graphFixup,
880
880
  )
881
881
  # Have to reset connection pool to avoid sharing connections with
882
882
  # forked processes.
@@ -1028,24 +1028,24 @@ class CmdLineFwk:
1028
1028
  resources = self._make_execution_resources(args)
1029
1029
  quantumExecutor = SingleQuantumExecutor(
1030
1030
  butler=None,
1031
- taskFactory=task_factory,
1032
- enableLsstDebug=args.enableLsstDebug,
1031
+ task_factory=task_factory,
1032
+ enable_lsst_debug=args.enableLsstDebug,
1033
1033
  limited_butler_factory=_butler_factory,
1034
1034
  resources=resources,
1035
- assumeNoExistingOutputs=args.no_existing_outputs,
1036
- skipExisting=True,
1037
- clobberOutputs=True,
1035
+ assume_no_existing_outputs=args.no_existing_outputs,
1036
+ skip_existing=True,
1037
+ clobber_outputs=True,
1038
1038
  raise_on_partial_outputs=args.raise_on_partial_outputs,
1039
1039
  job_metadata=job_metadata,
1040
1040
  )
1041
1041
 
1042
1042
  timeout = self.MP_TIMEOUT if args.timeout is None else args.timeout
1043
1043
  executor = MPGraphExecutor(
1044
- numProc=args.processes,
1044
+ num_proc=args.processes,
1045
1045
  timeout=timeout,
1046
- startMethod=args.start_method,
1047
- quantumExecutor=quantumExecutor,
1048
- failFast=args.fail_fast,
1046
+ start_method=args.start_method,
1047
+ quantum_executor=quantumExecutor,
1048
+ fail_fast=args.fail_fast,
1049
1049
  pdb=args.pdb,
1050
1050
  )
1051
1051
  try:
@@ -25,108 +25,16 @@
25
25
  # You should have received a copy of the GNU General Public License
26
26
  # along with this program. If not, see <http://www.gnu.org/licenses/>.
27
27
 
28
- __all__ = ["ExecutionGraphFixup"]
28
+ __all__ = ("ExecFixupDataId",)
29
29
 
30
- import contextlib
31
- import itertools
32
- from collections import defaultdict
33
- from collections.abc import Sequence
34
- from typing import Any
30
+ import warnings
35
31
 
36
- import networkx as nx
32
+ from lsst.pipe.base.exec_fixup_data_id import ExecFixupDataId
37
33
 
38
- from lsst.pipe.base import QuantumGraph, QuantumNode
34
+ # TODO[DM-51962]: Remove this module.
39
35
 
40
- from .executionGraphFixup import ExecutionGraphFixup
41
-
42
-
43
- class ExecFixupDataId(ExecutionGraphFixup):
44
- """Implementation of ExecutionGraphFixup for ordering of tasks based
45
- on DataId values.
46
-
47
- This class is a trivial implementation mostly useful as an example,
48
- though it can be used to make actual fixup instances by defining
49
- a method that instantiates it, e.g.::
50
-
51
- # lsst/ap/verify/ci_fixup.py
52
-
53
- from lsst.ctrl.mpexec.execFixupDataId import ExecFixupDataId
54
-
55
-
56
- def assoc_fixup():
57
- return ExecFixupDataId(
58
- taskLabel="ap_assoc", dimensions=("visit", "detector")
59
- )
60
-
61
- and then executing pipetask::
62
-
63
- pipetask run --graph-fixup=lsst.ap.verify.ci_fixup.assoc_fixup ...
64
-
65
- This will add new dependencies between quanta executed by the task with
66
- label "ap_assoc". Quanta with higher visit number will depend on quanta
67
- with lower visit number and their execution will wait until lower visit
68
- number finishes.
69
-
70
- Parameters
71
- ----------
72
- taskLabel : `str`
73
- The label of the task for which to add dependencies.
74
- dimensions : `str` or sequence [`str`]
75
- One or more dimension names, quanta execution will be ordered
76
- according to values of these dimensions.
77
- reverse : `bool`, optional
78
- If `False` (default) then quanta with higher values of dimensions
79
- will be executed after quanta with lower values, otherwise the order
80
- is reversed.
81
- """
82
-
83
- def __init__(self, taskLabel: str, dimensions: str | Sequence[str], reverse: bool = False):
84
- self.taskLabel = taskLabel
85
- self.dimensions = dimensions
86
- self.reverse = reverse
87
- if isinstance(self.dimensions, str):
88
- self.dimensions = (self.dimensions,)
89
- else:
90
- self.dimensions = tuple(self.dimensions)
91
-
92
- def _key(self, qnode: QuantumNode) -> tuple[Any, ...]:
93
- """Produce comparison key for quantum data.
94
-
95
- Parameters
96
- ----------
97
- qnode : `QuantumNode`
98
- An individual node in a `~lsst.pipe.base.QuantumGraph`
99
-
100
- Returns
101
- -------
102
- key : `tuple`
103
- """
104
- dataId = qnode.quantum.dataId
105
- assert dataId is not None, "Quantum DataId cannot be None"
106
- key = tuple(dataId[dim] for dim in self.dimensions)
107
- return key
108
-
109
- def fixupQuanta(self, graph: QuantumGraph) -> QuantumGraph:
110
- taskDef = graph.findTaskDefByLabel(self.taskLabel)
111
- if taskDef is None:
112
- raise ValueError(f"Cannot find task with label {self.taskLabel}")
113
- quanta = list(graph.getNodesForTask(taskDef))
114
- keyQuanta = defaultdict(list)
115
- for q in quanta:
116
- key = self._key(q)
117
- keyQuanta[key].append(q)
118
- keys = sorted(keyQuanta.keys(), reverse=self.reverse)
119
- networkGraph = graph.graph
120
-
121
- for prev_key, key in itertools.pairwise(keys):
122
- for prev_node in keyQuanta[prev_key]:
123
- for node in keyQuanta[key]:
124
- # remove any existing edges between the two nodes, but
125
- # don't fail if there are not any. Both directions need
126
- # tried because in a directed graph, order maters
127
- for edge in ((node, prev_node), (prev_node, node)):
128
- with contextlib.suppress(nx.NetworkXException):
129
- networkGraph.remove_edge(*edge)
130
-
131
- networkGraph.add_edge(prev_node, node)
132
- return graph
36
+ warnings.warn(
37
+ "The execFixupDataId module has moved to lsst.pipe.base.exec_fixup_data_id. "
38
+ "This forwarding shim will be removed after v30.",
39
+ category=FutureWarning,
40
+ )
@@ -25,45 +25,20 @@
25
25
  # You should have received a copy of the GNU General Public License
26
26
  # along with this program. If not, see <http://www.gnu.org/licenses/>.
27
27
 
28
- __all__ = ["ExecutionGraphFixup"]
28
+ __all__ = ("ExecutionGraphFixup",)
29
29
 
30
- from abc import ABC, abstractmethod
30
+ from deprecated.sphinx import deprecated
31
31
 
32
- from lsst.pipe.base import QuantumGraph
32
+ import lsst.pipe.base.execution_graph_fixup
33
33
 
34
+ # TODO[DM-51962]: Remove this module.
34
35
 
35
- class ExecutionGraphFixup(ABC):
36
- """Interface for classes which update quantum graphs before execution.
37
36
 
38
- Primary goal of this class is to modify quanta dependencies which may
39
- not be possible to reflect in a quantum graph using standard tools.
40
- One known use case for that is to guarantee particular execution order
41
- of visits in CI jobs for cases when outcome depends on the processing
42
- order of visits (e.g. AP association pipeline).
43
-
44
- Instances of this class receive pre-ordered sequence of quanta
45
- (`~lsst.pipe.base.QuantumGraph` instances) and they are allowed to
46
- modify quanta data in place, for example update ``dependencies`` field to
47
- add additional dependencies. Returned list of quanta will be re-ordered
48
- once again by the graph executor to reflect new dependencies.
49
- """
50
-
51
- @abstractmethod
52
- def fixupQuanta(self, graph: QuantumGraph) -> QuantumGraph:
53
- """Update quanta in a graph.
54
-
55
- Potentially anything in the graph could be changed if it does not
56
- break executor assumptions. If modifications result in a dependency
57
- cycle the executor will raise an exception.
58
-
59
- Parameters
60
- ----------
61
- graph : QuantumGraph
62
- Quantum Graph that will be executed by the executor.
63
-
64
- Returns
65
- -------
66
- graph : QuantumGraph
67
- Modified graph.
68
- """
69
- raise NotImplementedError
37
+ @deprecated(
38
+ "The ExecutionGraphFixup class has moved to lsst.pipe.base.execution_graph_fixup. "
39
+ "This forwarding shim will be removed after v30.",
40
+ version="v30",
41
+ category=FutureWarning,
42
+ )
43
+ class ExecutionGraphFixup(lsst.pipe.base.execution_graph_fixup.ExecutionGraphFixup): # noqa: D101
44
+ pass
@@ -25,202 +25,16 @@
25
25
  # You should have received a copy of the GNU General Public License
26
26
  # along with this program. If not, see <http://www.gnu.org/licenses/>.
27
27
 
28
- from __future__ import annotations
28
+ __all__ = ("LogCapture",)
29
29
 
30
- __all__ = ["LogCapture"]
30
+ import warnings
31
31
 
32
- import logging
33
- import os
34
- import shutil
35
- import tempfile
36
- from collections.abc import Iterator
37
- from contextlib import contextmanager, suppress
38
- from logging import FileHandler
32
+ from lsst.pipe.base.log_capture import LogCapture
39
33
 
40
- from lsst.daf.butler import Butler, FileDataset, LimitedButler, Quantum
41
- from lsst.daf.butler.logging import ButlerLogRecordHandler, ButlerLogRecords, ButlerMDC, JsonLogFormatter
42
- from lsst.pipe.base import InvalidQuantumError
43
- from lsst.pipe.base.pipeline_graph import TaskNode
34
+ # TODO[DM-51962]: Remove this module.
44
35
 
45
- _LOG = logging.getLogger(__name__)
46
-
47
-
48
- class _LogCaptureFlag:
49
- """Simple flag to enable/disable log-to-butler saving."""
50
-
51
- store: bool = True
52
-
53
-
54
- class LogCapture:
55
- """Class handling capture of logging messages and their export to butler.
56
-
57
- Parameters
58
- ----------
59
- butler : `~lsst.daf.butler.LimitedButler`
60
- Data butler with limited API.
61
- full_butler : `~lsst.daf.butler.Butler` or `None`
62
- Data butler with full API, or `None` if full Butler is not available.
63
- If not none, then this must be the same instance as ``butler``.
64
- """
65
-
66
- stream_json_logs = True
67
- """If True each log record is written to a temporary file and ingested
68
- when quantum completes. If False the records are accumulated in memory
69
- and stored in butler on quantum completion. If full butler is not available
70
- then temporary file is not used."""
71
-
72
- def __init__(
73
- self,
74
- butler: LimitedButler,
75
- full_butler: Butler | None,
76
- ):
77
- self.butler = butler
78
- self.full_butler = full_butler
79
-
80
- @classmethod
81
- def from_limited(cls, butler: LimitedButler) -> LogCapture:
82
- return cls(butler, None)
83
-
84
- @classmethod
85
- def from_full(cls, butler: Butler) -> LogCapture:
86
- return cls(butler, butler)
87
-
88
- @contextmanager
89
- def capture_logging(self, task_node: TaskNode, /, quantum: Quantum) -> Iterator[_LogCaptureFlag]:
90
- """Configure logging system to capture logs for execution of this task.
91
-
92
- Parameters
93
- ----------
94
- task_node : `~lsst.pipe.base.pipeline_graph.TaskNode`
95
- The task definition.
96
- quantum : `~lsst.daf.butler.Quantum`
97
- Single Quantum instance.
98
-
99
- Notes
100
- -----
101
- Expected to be used as a context manager to ensure that logging
102
- records are inserted into the butler once the quantum has been
103
- executed:
104
-
105
- .. code-block:: py
106
-
107
- with self.capture_logging(task_node, quantum):
108
- # Run quantum and capture logs.
109
-
110
- Ths method can also setup logging to attach task- or
111
- quantum-specific information to log messages. Potentially this can
112
- take into account some info from task configuration as well.
113
- """
114
- # include quantum dataId and task label into MDC
115
- mdc = {"LABEL": task_node.label, "RUN": ""}
116
- if quantum.dataId:
117
- mdc["LABEL"] += f":{quantum.dataId}"
118
- if self.full_butler is not None:
119
- mdc["RUN"] = self.full_butler.run or ""
120
- ctx = _LogCaptureFlag()
121
- log_dataset_name = (
122
- task_node.log_output.dataset_type_name if task_node.log_output is not None else None
123
- )
124
-
125
- # Add a handler to the root logger to capture execution log output.
126
- if log_dataset_name is not None:
127
- # Either accumulate into ButlerLogRecords or stream JSON records to
128
- # file and ingest that (ingest is possible only with full butler).
129
- if self.stream_json_logs and self.full_butler is not None:
130
- # Create the log file in a temporary directory rather than
131
- # creating a temporary file. This is necessary because
132
- # temporary files are created with restrictive permissions
133
- # and during file ingest these permissions persist in the
134
- # datastore. Using a temp directory allows us to create
135
- # a file with umask default permissions.
136
- tmpdir = tempfile.mkdtemp(prefix="butler-temp-logs-")
137
-
138
- # Construct a file to receive the log records and "touch" it.
139
- log_file = os.path.join(tmpdir, f"butler-log-{task_node.label}.json")
140
- with open(log_file, "w"):
141
- pass
142
- log_handler_file = FileHandler(log_file)
143
- log_handler_file.setFormatter(JsonLogFormatter())
144
- logging.getLogger().addHandler(log_handler_file)
145
-
146
- try:
147
- with ButlerMDC.set_mdc(mdc):
148
- yield ctx
149
- finally:
150
- # Ensure that the logs are stored in butler.
151
- logging.getLogger().removeHandler(log_handler_file)
152
- log_handler_file.close()
153
- if ctx.store:
154
- self._ingest_log_records(quantum, log_dataset_name, log_file)
155
- shutil.rmtree(tmpdir, ignore_errors=True)
156
-
157
- else:
158
- log_handler_memory = ButlerLogRecordHandler()
159
- logging.getLogger().addHandler(log_handler_memory)
160
-
161
- try:
162
- with ButlerMDC.set_mdc(mdc):
163
- yield ctx
164
- finally:
165
- # Ensure that the logs are stored in butler.
166
- logging.getLogger().removeHandler(log_handler_memory)
167
- if ctx.store:
168
- self._store_log_records(quantum, log_dataset_name, log_handler_memory)
169
- log_handler_memory.records.clear()
170
-
171
- else:
172
- with ButlerMDC.set_mdc(mdc):
173
- yield ctx
174
-
175
- def _store_log_records(
176
- self, quantum: Quantum, dataset_type: str, log_handler: ButlerLogRecordHandler
177
- ) -> None:
178
- # DatasetRef has to be in the Quantum outputs, can lookup by name.
179
- try:
180
- [ref] = quantum.outputs[dataset_type]
181
- except LookupError as exc:
182
- raise InvalidQuantumError(
183
- f"Quantum outputs is missing log output dataset type {dataset_type};"
184
- " this could happen due to inconsistent options between QuantumGraph generation"
185
- " and execution"
186
- ) from exc
187
-
188
- self.butler.put(log_handler.records, ref)
189
-
190
- def _ingest_log_records(self, quantum: Quantum, dataset_type: str, filename: str) -> None:
191
- # If we are logging to an external file we must always try to
192
- # close it.
193
- assert self.full_butler is not None, "Expected to have full butler for ingest"
194
- ingested = False
195
- try:
196
- # DatasetRef has to be in the Quantum outputs, can lookup by name.
197
- try:
198
- [ref] = quantum.outputs[dataset_type]
199
- except LookupError as exc:
200
- raise InvalidQuantumError(
201
- f"Quantum outputs is missing log output dataset type {dataset_type};"
202
- " this could happen due to inconsistent options between QuantumGraph generation"
203
- " and execution"
204
- ) from exc
205
-
206
- # Need to ingest this file directly into butler.
207
- dataset = FileDataset(path=filename, refs=ref)
208
- try:
209
- self.full_butler.ingest(dataset, transfer="move")
210
- ingested = True
211
- except NotImplementedError:
212
- # Some datastores can't receive files (e.g. in-memory datastore
213
- # when testing), we store empty list for those just to have a
214
- # dataset. Alternative is to read the file as a
215
- # ButlerLogRecords object and put it.
216
- _LOG.info(
217
- "Log records could not be stored in this butler because the"
218
- " datastore can not ingest files, empty record list is stored instead."
219
- )
220
- records = ButlerLogRecords.from_records([])
221
- self.full_butler.put(records, ref)
222
- finally:
223
- # remove file if it is not ingested
224
- if not ingested:
225
- with suppress(OSError):
226
- os.remove(filename)
36
+ warnings.warn(
37
+ "The log_capture module has moved to lsst.pipe.base.log_capture. "
38
+ "This forwarding shim will be removed after v30.",
39
+ category=FutureWarning,
40
+ )