lsst-pipe-base 29.2025.4500__py3-none-any.whl → 29.2025.4700__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (29) hide show
  1. lsst/pipe/base/_status.py +156 -11
  2. lsst/pipe/base/log_capture.py +98 -7
  3. lsst/pipe/base/pipeline_graph/expressions.py +3 -3
  4. lsst/pipe/base/quantum_graph/_common.py +21 -1
  5. lsst/pipe/base/quantum_graph/_multiblock.py +14 -39
  6. lsst/pipe/base/quantum_graph/_predicted.py +90 -90
  7. lsst/pipe/base/quantum_graph/_provenance.py +345 -200
  8. lsst/pipe/base/quantum_graph/aggregator/_communicators.py +19 -19
  9. lsst/pipe/base/quantum_graph/aggregator/_progress.py +77 -84
  10. lsst/pipe/base/quantum_graph/aggregator/_scanner.py +201 -72
  11. lsst/pipe/base/quantum_graph/aggregator/_structs.py +45 -35
  12. lsst/pipe/base/quantum_graph/aggregator/_supervisor.py +15 -17
  13. lsst/pipe/base/quantum_graph/aggregator/_writer.py +57 -149
  14. lsst/pipe/base/quantum_graph_builder.py +0 -1
  15. lsst/pipe/base/quantum_provenance_graph.py +2 -44
  16. lsst/pipe/base/single_quantum_executor.py +43 -9
  17. lsst/pipe/base/tests/mocks/_data_id_match.py +1 -1
  18. lsst/pipe/base/tests/mocks/_pipeline_task.py +1 -1
  19. lsst/pipe/base/version.py +1 -1
  20. {lsst_pipe_base-29.2025.4500.dist-info → lsst_pipe_base-29.2025.4700.dist-info}/METADATA +1 -1
  21. {lsst_pipe_base-29.2025.4500.dist-info → lsst_pipe_base-29.2025.4700.dist-info}/RECORD +29 -29
  22. {lsst_pipe_base-29.2025.4500.dist-info → lsst_pipe_base-29.2025.4700.dist-info}/WHEEL +0 -0
  23. {lsst_pipe_base-29.2025.4500.dist-info → lsst_pipe_base-29.2025.4700.dist-info}/entry_points.txt +0 -0
  24. {lsst_pipe_base-29.2025.4500.dist-info → lsst_pipe_base-29.2025.4700.dist-info}/licenses/COPYRIGHT +0 -0
  25. {lsst_pipe_base-29.2025.4500.dist-info → lsst_pipe_base-29.2025.4700.dist-info}/licenses/LICENSE +0 -0
  26. {lsst_pipe_base-29.2025.4500.dist-info → lsst_pipe_base-29.2025.4700.dist-info}/licenses/bsd_license.txt +0 -0
  27. {lsst_pipe_base-29.2025.4500.dist-info → lsst_pipe_base-29.2025.4700.dist-info}/licenses/gpl-v3.0.txt +0 -0
  28. {lsst_pipe_base-29.2025.4500.dist-info → lsst_pipe_base-29.2025.4700.dist-info}/top_level.txt +0 -0
  29. {lsst_pipe_base-29.2025.4500.dist-info → lsst_pipe_base-29.2025.4700.dist-info}/zip-safe +0 -0
@@ -30,7 +30,6 @@ from __future__ import annotations
30
30
  __all__ = ("Writer",)
31
31
 
32
32
  import dataclasses
33
- import enum
34
33
  import itertools
35
34
  import logging
36
35
  import operator
@@ -62,69 +61,7 @@ from .._provenance import (
62
61
  ProvenanceQuantumModel,
63
62
  )
64
63
  from ._communicators import WriterCommunicator
65
- from ._structs import ScanResult
66
-
67
-
68
- class _CompressionState(enum.Enum):
69
- """Enumeration of the possible states of compression in `_ScanData`."""
70
-
71
- NOT_COMPRESSED = enum.auto()
72
- """Nothing is compressed."""
73
-
74
- LOG_AND_METADATA_COMPRESSED = enum.auto()
75
- """Only the logs and metadata are compressed."""
76
-
77
- ALL_COMPRESSED = enum.auto()
78
- """All `bytes` are compressed."""
79
-
80
-
81
- @dataclasses.dataclass
82
- class _ScanData:
83
- """Information from a quantum scan that has been partially processed for
84
- writing.
85
- """
86
-
87
- quantum_id: uuid.UUID
88
- """Unique ID of the quantum."""
89
-
90
- log_id: uuid.UUID
91
- """Unique ID of the log dataset."""
92
-
93
- metadata_id: uuid.UUID
94
- """Unique ID of the metadata dataset."""
95
-
96
- quantum: bytes = b""
97
- """Possibly-compressed JSON representation of the quantum provenance."""
98
-
99
- datasets: dict[uuid.UUID, bytes] = dataclasses.field(default_factory=dict)
100
- """Possibly-compressed JSON representation of output dataset provenance."""
101
-
102
- log: bytes = b""
103
- """Possibly-compressed log dataset content."""
104
-
105
- metadata: bytes = b""
106
- """Possibly-compressed metadata dataset content."""
107
-
108
- compression: _CompressionState = _CompressionState.NOT_COMPRESSED
109
- """Which data is compressed, if any."""
110
-
111
- def compress(self, compressor: Compressor) -> None:
112
- """Compress all data in place, if it isn't already.
113
-
114
- Parameters
115
- ----------
116
- compressor : `Compressor`
117
- Object that can compress `bytes`.
118
- """
119
- if self.compression is _CompressionState.NOT_COMPRESSED:
120
- self.metadata = compressor.compress(self.metadata)
121
- self.log = compressor.compress(self.log)
122
- self.compression = _CompressionState.LOG_AND_METADATA_COMPRESSED
123
- if self.compression is _CompressionState.LOG_AND_METADATA_COMPRESSED:
124
- self.quantum = compressor.compress(self.quantum)
125
- for key in self.datasets.keys():
126
- self.datasets[key] = compressor.compress(self.datasets[key])
127
- self.compression = _CompressionState.ALL_COMPRESSED
64
+ from ._structs import WriteRequest
128
65
 
129
66
 
130
67
  @dataclasses.dataclass
@@ -267,8 +204,8 @@ class Writer:
267
204
  with datasets as well as with quanta.
268
205
  """
269
206
 
270
- pending_compression_training: list[_ScanData] = dataclasses.field(default_factory=list)
271
- """Partially processed quantum scans that are being accumulated in order to
207
+ pending_compression_training: list[WriteRequest] = dataclasses.field(default_factory=list)
208
+ """Unprocessed quantum scans that are being accumulated in order to
272
209
  build a compression dictionary.
273
210
  """
274
211
 
@@ -299,7 +236,7 @@ class Writer:
299
236
  )
300
237
 
301
238
  def _populate_indices_and_outputs(self) -> None:
302
- all_uuids = set(self.predicted.quantum_indices.keys())
239
+ all_uuids = set(self.predicted.quantum_datasets.keys())
303
240
  for quantum in self.comms.periodically_check_for_cancel(
304
241
  itertools.chain(
305
242
  self.predicted.init_quanta.root,
@@ -329,13 +266,12 @@ class Writer:
329
266
  if not predicted_quantum.task_label:
330
267
  # Skip the 'packages' producer quantum.
331
268
  continue
332
- quantum_index = self.indices[predicted_quantum.quantum_id]
333
269
  for predicted_input in itertools.chain.from_iterable(predicted_quantum.inputs.values()):
334
- self.xgraph.add_edge(self.indices[predicted_input.dataset_id], quantum_index)
270
+ self.xgraph.add_edge(predicted_input.dataset_id, predicted_quantum.quantum_id)
335
271
  if predicted_input.dataset_id not in self.output_dataset_ids:
336
272
  self.overall_inputs.setdefault(predicted_input.dataset_id, predicted_input)
337
273
  for predicted_output in itertools.chain.from_iterable(predicted_quantum.outputs.values()):
338
- self.xgraph.add_edge(quantum_index, self.indices[predicted_output.dataset_id])
274
+ self.xgraph.add_edge(predicted_quantum.quantum_id, predicted_output.dataset_id)
339
275
 
340
276
  @staticmethod
341
277
  def run(predicted_path: str, comms: WriterCommunicator) -> None:
@@ -365,12 +301,11 @@ class Writer:
365
301
  self.comms.log.info("Polling for write requests from scanners.")
366
302
  for request in self.comms.poll():
367
303
  if data_writers is None:
368
- self.pending_compression_training.extend(self.make_scan_data(request))
304
+ self.pending_compression_training.append(request)
369
305
  if len(self.pending_compression_training) >= self.comms.config.zstd_dict_n_inputs:
370
306
  data_writers = self.make_data_writers()
371
307
  else:
372
- for scan_data in self.make_scan_data(request):
373
- self.write_scan_data(scan_data, data_writers)
308
+ self.process_request(request, data_writers)
374
309
  if data_writers is None:
375
310
  data_writers = self.make_data_writers()
376
311
  self.write_init_outputs(data_writers)
@@ -398,8 +333,8 @@ class Writer:
398
333
  )
399
334
  self.comms.check_for_cancel()
400
335
  self.comms.log.info("Compressing and writing queued scan requests.")
401
- for scan_data in self.pending_compression_training:
402
- self.write_scan_data(scan_data, data_writers)
336
+ for request in self.pending_compression_training:
337
+ self.process_request(request, data_writers)
403
338
  del self.pending_compression_training
404
339
  self.comms.check_for_cancel()
405
340
  self.write_overall_inputs(data_writers)
@@ -435,11 +370,11 @@ class Writer:
435
370
  predicted_quantum.datastore_records.clear()
436
371
  training_inputs.append(predicted_quantum.model_dump_json().encode())
437
372
  # Add the provenance quanta, metadata, and logs we've accumulated.
438
- for scan_data in self.pending_compression_training:
439
- assert scan_data.compression is _CompressionState.NOT_COMPRESSED
440
- training_inputs.append(scan_data.quantum)
441
- training_inputs.append(scan_data.metadata)
442
- training_inputs.append(scan_data.log)
373
+ for write_request in self.pending_compression_training:
374
+ assert not write_request.is_compressed, "We can't compress without the compression dictionary."
375
+ training_inputs.append(write_request.quantum)
376
+ training_inputs.append(write_request.metadata)
377
+ training_inputs.append(write_request.logs)
443
378
  return zstandard.train_dictionary(self.comms.config.zstd_dict_size, training_inputs)
444
379
 
445
380
  def write_init_outputs(self, data_writers: _DataWriters) -> None:
@@ -458,19 +393,16 @@ class Writer:
458
393
  continue
459
394
  existing_outputs = self.existing_init_outputs[predicted_init_quantum.quantum_id]
460
395
  for predicted_output in itertools.chain.from_iterable(predicted_init_quantum.outputs.values()):
461
- dataset_index = self.indices[predicted_output.dataset_id]
462
396
  provenance_output = ProvenanceDatasetModel.from_predicted(
463
397
  predicted_output,
464
- producer=self.indices[predicted_init_quantum.quantum_id],
465
- consumers=self.xgraph.successors(dataset_index),
398
+ producer=predicted_init_quantum.quantum_id,
399
+ consumers=self.xgraph.successors(predicted_output.dataset_id),
466
400
  )
467
- provenance_output.exists = predicted_output.dataset_id in existing_outputs
401
+ provenance_output.produced = predicted_output.dataset_id in existing_outputs
468
402
  data_writers.datasets.write_model(
469
403
  provenance_output.dataset_id, provenance_output, data_writers.compressor
470
404
  )
471
- init_quanta.root.append(
472
- ProvenanceInitQuantumModel.from_predicted(predicted_init_quantum, self.indices)
473
- )
405
+ init_quanta.root.append(ProvenanceInitQuantumModel.from_predicted(predicted_init_quantum))
474
406
  data_writers.graph.write_single_model("init_quanta", init_quanta)
475
407
 
476
408
  def write_overall_inputs(self, data_writers: _DataWriters) -> None:
@@ -484,13 +416,12 @@ class Writer:
484
416
  self.comms.log.info("Writing overall inputs.")
485
417
  for predicted_input in self.comms.periodically_check_for_cancel(self.overall_inputs.values()):
486
418
  if predicted_input.dataset_id not in data_writers.datasets.addresses:
487
- dataset_index = self.indices[predicted_input.dataset_id]
488
419
  data_writers.datasets.write_model(
489
420
  predicted_input.dataset_id,
490
421
  ProvenanceDatasetModel.from_predicted(
491
422
  predicted_input,
492
423
  producer=None,
493
- consumers=self.xgraph.successors(dataset_index),
424
+ consumers=self.xgraph.successors(predicted_input.dataset_id),
494
425
  ),
495
426
  data_writers.compressor,
496
427
  )
@@ -509,84 +440,61 @@ class Writer:
509
440
  data = packages.toBytes("json")
510
441
  data_writers.graph.write_single_block("packages", data)
511
442
 
512
- def make_scan_data(self, request: ScanResult) -> list[_ScanData]:
513
- """Process a `ScanResult` into `_ScanData`.
443
+ def process_request(self, request: WriteRequest, data_writers: _DataWriters) -> None:
444
+ """Process a `WriteRequest` into `_ScanData`.
514
445
 
515
446
  Parameters
516
447
  ----------
517
- request : `ScanResult`
448
+ request : `WriteRequest`
518
449
  Result of a quantum scan.
519
-
520
- Returns
521
- -------
522
- data : `list` [ `_ScanData` ]
523
- A zero- or single-element list of `_ScanData` to write or save for
524
- compression-dict training. A zero-element list is returned if the
525
- scan actually represents an init quantum.
450
+ data_writers : `_DataWriters`
451
+ Low-level writers struct.
526
452
  """
527
453
  if (existing_init_outputs := self.existing_init_outputs.get(request.quantum_id)) is not None:
528
454
  self.comms.log.debug("Handling init-output scan for %s.", request.quantum_id)
529
455
  existing_init_outputs.update(request.existing_outputs)
530
456
  self.comms.report_write()
531
- return []
457
+ return
532
458
  self.comms.log.debug("Handling quantum scan for %s.", request.quantum_id)
533
459
  predicted_quantum = self.predicted.quantum_datasets[request.quantum_id]
534
- quantum_index = self.indices[predicted_quantum.quantum_id]
535
- (metadata_output,) = predicted_quantum.outputs[acc.METADATA_OUTPUT_CONNECTION_NAME]
536
- (log_output,) = predicted_quantum.outputs[acc.LOG_OUTPUT_CONNECTION_NAME]
537
- data = _ScanData(
538
- request.quantum_id,
539
- metadata_id=metadata_output.dataset_id,
540
- log_id=log_output.dataset_id,
541
- compression=(
542
- _CompressionState.LOG_AND_METADATA_COMPRESSED
543
- if request.is_compressed
544
- else _CompressionState.NOT_COMPRESSED
545
- ),
546
- )
460
+ outputs: dict[uuid.UUID, bytes] = {}
547
461
  for predicted_output in itertools.chain.from_iterable(predicted_quantum.outputs.values()):
548
- dataset_index = self.indices[predicted_output.dataset_id]
549
462
  provenance_output = ProvenanceDatasetModel.from_predicted(
550
463
  predicted_output,
551
- producer=quantum_index,
552
- consumers=self.xgraph.successors(dataset_index),
464
+ producer=predicted_quantum.quantum_id,
465
+ consumers=self.xgraph.successors(predicted_output.dataset_id),
553
466
  )
554
- provenance_output.exists = provenance_output.dataset_id in request.existing_outputs
555
- data.datasets[provenance_output.dataset_id] = provenance_output.model_dump_json().encode()
556
- provenance_quantum = ProvenanceQuantumModel.from_predicted(predicted_quantum, self.indices)
557
- provenance_quantum.status = request.get_run_status()
558
- provenance_quantum.caveats = request.caveats
559
- provenance_quantum.exception = request.exception
560
- provenance_quantum.resource_usage = request.resource_usage
561
- data.quantum = provenance_quantum.model_dump_json().encode()
562
- data.metadata = request.metadata
563
- data.log = request.log
564
- return [data]
565
-
566
- def write_scan_data(self, scan_data: _ScanData, data_writers: _DataWriters) -> None:
567
- """Write scan data to the provenance graph.
568
-
569
- Parameters
570
- ----------
571
- scan_data : `_ScanData`
572
- Preprocessed information to write.
573
- data_writers : `_DataWriters`
574
- Low-level writers struct.
575
- """
576
- self.comms.log.debug("Writing quantum %s.", scan_data.quantum_id)
577
- scan_data.compress(data_writers.compressor)
578
- data_writers.quanta.write_bytes(scan_data.quantum_id, scan_data.quantum)
579
- for dataset_id, dataset_data in scan_data.datasets.items():
467
+ provenance_output.produced = provenance_output.dataset_id in request.existing_outputs
468
+ outputs[provenance_output.dataset_id] = data_writers.compressor.compress(
469
+ provenance_output.model_dump_json().encode()
470
+ )
471
+ if not request.quantum:
472
+ request.quantum = (
473
+ ProvenanceQuantumModel.from_predicted(predicted_quantum).model_dump_json().encode()
474
+ )
475
+ if request.is_compressed:
476
+ request.quantum = data_writers.compressor.compress(request.quantum)
477
+ if not request.is_compressed:
478
+ request.quantum = data_writers.compressor.compress(request.quantum)
479
+ if request.metadata:
480
+ request.metadata = data_writers.compressor.compress(request.metadata)
481
+ if request.logs:
482
+ request.logs = data_writers.compressor.compress(request.logs)
483
+ self.comms.log.debug("Writing quantum %s.", request.quantum_id)
484
+ data_writers.quanta.write_bytes(request.quantum_id, request.quantum)
485
+ for dataset_id, dataset_data in outputs.items():
580
486
  data_writers.datasets.write_bytes(dataset_id, dataset_data)
581
- if scan_data.metadata:
582
- address = data_writers.metadata.write_bytes(scan_data.quantum_id, scan_data.metadata)
583
- data_writers.metadata.addresses[scan_data.metadata_id] = address
584
- if scan_data.log:
585
- address = data_writers.logs.write_bytes(scan_data.quantum_id, scan_data.log)
586
- data_writers.logs.addresses[scan_data.log_id] = address
487
+ if request.metadata:
488
+ (metadata_output,) = predicted_quantum.outputs[acc.METADATA_OUTPUT_CONNECTION_NAME]
489
+ address = data_writers.metadata.write_bytes(request.quantum_id, request.metadata)
490
+ data_writers.metadata.addresses[metadata_output.dataset_id] = address
491
+ if request.logs:
492
+ (log_output,) = predicted_quantum.outputs[acc.LOG_OUTPUT_CONNECTION_NAME]
493
+ address = data_writers.logs.write_bytes(request.quantum_id, request.logs)
494
+ data_writers.logs.addresses[log_output.dataset_id] = address
587
495
  # We shouldn't need this predicted quantum anymore; delete it in the
588
496
  # hopes that'll free up some memory.
589
- del self.predicted.quantum_datasets[scan_data.quantum_id]
497
+ del self.predicted.quantum_datasets[request.quantum_id]
590
498
  self.comms.report_write()
591
499
 
592
500
 
@@ -1315,7 +1315,6 @@ class QuantumGraphBuilder(ABC):
1315
1315
  },
1316
1316
  )
1317
1317
  components.quantum_datasets[quantum_datasets.quantum_id] = quantum_datasets
1318
- components.set_quantum_indices()
1319
1318
  components.set_thin_graph()
1320
1319
  components.set_header_counts()
1321
1320
  return components
@@ -49,7 +49,7 @@ import threading
49
49
  import uuid
50
50
  from collections.abc import Callable, Iterator, Mapping, Sequence, Set
51
51
  from enum import Enum
52
- from typing import TYPE_CHECKING, Any, ClassVar, Literal, TypedDict, cast
52
+ from typing import Any, ClassVar, Literal, TypedDict, cast
53
53
 
54
54
  import astropy.table
55
55
  import networkx
@@ -72,7 +72,7 @@ from lsst.daf.butler import (
72
72
  from lsst.resources import ResourcePathExpression
73
73
  from lsst.utils.logging import PeriodicLogger, getLogger
74
74
 
75
- from ._status import QuantumSuccessCaveats
75
+ from ._status import ExceptionInfo, QuantumSuccessCaveats
76
76
  from .automatic_connection_constants import (
77
77
  LOG_OUTPUT_CONNECTION_NAME,
78
78
  LOG_OUTPUT_TEMPLATE,
@@ -82,9 +82,6 @@ from .automatic_connection_constants import (
82
82
  )
83
83
  from .graph import QuantumGraph, QuantumNode
84
84
 
85
- if TYPE_CHECKING:
86
- from ._task_metadata import TaskMetadata
87
-
88
85
  _LOG = getLogger(__name__)
89
86
 
90
87
 
@@ -188,45 +185,6 @@ class QuantumRunStatus(Enum):
188
185
  SUCCESSFUL = 1
189
186
 
190
187
 
191
- class ExceptionInfo(pydantic.BaseModel):
192
- """Information about an exception that was raised."""
193
-
194
- type_name: str
195
- """Fully-qualified Python type name for the exception raised."""
196
-
197
- message: str
198
- """String message included in the exception."""
199
-
200
- metadata: dict[str, float | int | str | bool | None]
201
- """Additional metadata included in the exception."""
202
-
203
- @classmethod
204
- def _from_metadata(cls, md: TaskMetadata) -> ExceptionInfo:
205
- """Construct from task metadata.
206
-
207
- Parameters
208
- ----------
209
- md : `TaskMetadata`
210
- Metadata about the error, as written by
211
- `AnnotatedPartialOutputsError`.
212
-
213
- Returns
214
- -------
215
- info : `ExceptionInfo`
216
- Information about the exception.
217
- """
218
- result = cls(type_name=md["type"], message=md["message"], metadata={})
219
- if "metadata" in md:
220
- raw_err_metadata = md["metadata"].to_dict()
221
- for k, v in raw_err_metadata.items():
222
- # Guard against error metadata we couldn't serialize later
223
- # via Pydantic; don't want one weird value bringing down our
224
- # ability to report on an entire run.
225
- if isinstance(v, float | int | str | bool):
226
- result.metadata[k] = v
227
- return result
228
-
229
-
230
188
  class QuantumRun(pydantic.BaseModel):
231
189
  """Information about a quantum in a given run collection."""
232
190
 
@@ -44,12 +44,19 @@ from lsst.daf.butler import (
44
44
  NamedKeyDict,
45
45
  Quantum,
46
46
  )
47
+ from lsst.utils.introspection import get_full_type_name
47
48
  from lsst.utils.timer import logInfo
48
49
 
49
50
  from ._quantumContext import ExecutionResources, QuantumContext
50
- from ._status import AnnotatedPartialOutputsError, InvalidQuantumError, NoWorkFound, QuantumSuccessCaveats
51
+ from ._status import (
52
+ AnnotatedPartialOutputsError,
53
+ ExceptionInfo,
54
+ InvalidQuantumError,
55
+ NoWorkFound,
56
+ QuantumSuccessCaveats,
57
+ )
51
58
  from .connections import AdjustQuantumHelper
52
- from .log_capture import LogCapture
59
+ from .log_capture import LogCapture, _ExecutionLogRecordsExtra
53
60
  from .pipeline_graph import TaskNode
54
61
  from .pipelineTask import PipelineTask
55
62
  from .quantum_graph_executor import QuantumExecutor
@@ -147,6 +154,7 @@ class SingleQuantumExecutor(QuantumExecutor):
147
154
  self._skip_existing = self._butler.run in self._butler.collections.query(
148
155
  skip_existing_in, flatten_chains=True
149
156
  )
157
+ self._previous_process_quanta: list[uuid.UUID] = []
150
158
 
151
159
  def execute(
152
160
  self, task_node: TaskNode, /, quantum: Quantum, quantum_id: uuid.UUID | None = None
@@ -196,7 +204,7 @@ class SingleQuantumExecutor(QuantumExecutor):
196
204
  # or raises an exception do not try to store logs, as they may be
197
205
  # already in butler.
198
206
  captureLog.store = False
199
- if self._check_existing_outputs(quantum, task_node, limited_butler):
207
+ if self._check_existing_outputs(quantum, task_node, limited_butler, captureLog.extra):
200
208
  _LOG.info(
201
209
  "Skipping already-successful quantum for label=%s dataId=%s.",
202
210
  task_node.label,
@@ -205,6 +213,9 @@ class SingleQuantumExecutor(QuantumExecutor):
205
213
  return quantum
206
214
  captureLog.store = True
207
215
 
216
+ captureLog.extra.previous_process_quanta.extend(self._previous_process_quanta)
217
+ if quantum_id is not None:
218
+ self._previous_process_quanta.append(quantum_id)
208
219
  try:
209
220
  quantum = self._updated_quantum_inputs(quantum, task_node, limited_butler)
210
221
  except NoWorkFound as exc:
@@ -261,6 +272,11 @@ class SingleQuantumExecutor(QuantumExecutor):
261
272
  e.__class__.__name__,
262
273
  str(e),
263
274
  )
275
+ captureLog.extra.exception = ExceptionInfo(
276
+ type_name=get_full_type_name(e),
277
+ message=str(e),
278
+ metadata={},
279
+ )
264
280
  raise
265
281
  else:
266
282
  quantumMetadata["butler_metrics"] = butler_metrics.model_dump()
@@ -268,11 +284,13 @@ class SingleQuantumExecutor(QuantumExecutor):
268
284
  # Stringify the UUID for easier compatibility with
269
285
  # PropertyList.
270
286
  quantumMetadata["outputs"] = [str(output) for output in outputsPut]
271
- logInfo(None, "end", metadata=quantumMetadata) # type: ignore[arg-type]
272
- fullMetadata = task.getFullMetadata()
273
- fullMetadata["quantum"] = quantumMetadata
274
- if self._job_metadata is not None:
275
- fullMetadata["job"] = self._job_metadata
287
+ finally:
288
+ logInfo(None, "end", metadata=quantumMetadata) # type: ignore[arg-type]
289
+ fullMetadata = task.getFullMetadata()
290
+ fullMetadata["quantum"] = quantumMetadata
291
+ if self._job_metadata is not None:
292
+ fullMetadata["job"] = self._job_metadata
293
+ captureLog.extra.metadata = fullMetadata
276
294
  self._write_metadata(quantum, fullMetadata, task_node, limited_butler)
277
295
  stopTime = time.time()
278
296
  _LOG.info(
@@ -284,7 +302,12 @@ class SingleQuantumExecutor(QuantumExecutor):
284
302
  return quantum
285
303
 
286
304
  def _check_existing_outputs(
287
- self, quantum: Quantum, task_node: TaskNode, /, limited_butler: LimitedButler
305
+ self,
306
+ quantum: Quantum,
307
+ task_node: TaskNode,
308
+ /,
309
+ limited_butler: LimitedButler,
310
+ log_extra: _ExecutionLogRecordsExtra,
288
311
  ) -> bool:
289
312
  """Decide whether this quantum needs to be executed.
290
313
 
@@ -302,6 +325,8 @@ class SingleQuantumExecutor(QuantumExecutor):
302
325
  Task definition structure.
303
326
  limited_butler : `~lsst.daf.butler.LimitedButler`
304
327
  Butler to use for querying and clobbering.
328
+ log_extra : `.log_capture.TaskLogRecordsExtra`
329
+ Extra information to attach to log records.
305
330
 
306
331
  Returns
307
332
  -------
@@ -337,6 +362,15 @@ class SingleQuantumExecutor(QuantumExecutor):
337
362
  "Looking for existing outputs in the way for label=%s dataId=%s.", task_node.label, quantum.dataId
338
363
  )
339
364
  ref_dict = limited_butler.stored_many(chain.from_iterable(quantum.outputs.values()))
365
+ if task_node.log_output is not None:
366
+ (log_ref,) = quantum.outputs[task_node.log_output.dataset_type_name]
367
+ if ref_dict[log_ref]:
368
+ _LOG.debug(
369
+ "Attaching logs from previous attempt on label=%s dataId=%s.",
370
+ task_node.label,
371
+ quantum.dataId,
372
+ )
373
+ log_extra.attach_previous_attempt(limited_butler.get(log_ref))
340
374
  existingRefs = [ref for ref, exists in ref_dict.items() if exists]
341
375
  missingRefs = [ref for ref, exists in ref_dict.items() if not exists]
342
376
  if existingRefs:
@@ -37,7 +37,7 @@ from uuid import UUID
37
37
  import astropy.time
38
38
 
39
39
  from lsst.daf.butler import DataId
40
- from lsst.daf.butler.registry.queries.expressions.parser import Node, TreeVisitor, parse_expression
40
+ from lsst.daf.butler.queries.expressions.parser import Node, TreeVisitor, parse_expression
41
41
 
42
42
 
43
43
  class _DataIdMatchTreeVisitor(TreeVisitor):
@@ -95,7 +95,7 @@ class ForcedFailure:
95
95
 
96
96
  memory_required: Quantity | None = None
97
97
  """If not `None`, this failure simulates an out-of-memory failure by
98
- raising only if this value exceeds `ExecutionResources.max_mem`.f
98
+ raising only if this value exceeds `ExecutionResources.max_mem`.
99
99
  """
100
100
 
101
101
  def set_config(self, config: MockPipelineTaskConfig) -> None:
lsst/pipe/base/version.py CHANGED
@@ -1,2 +1,2 @@
1
1
  __all__ = ["__version__"]
2
- __version__ = "29.2025.4500"
2
+ __version__ = "29.2025.4700"
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: lsst-pipe-base
3
- Version: 29.2025.4500
3
+ Version: 29.2025.4700
4
4
  Summary: Pipeline infrastructure for the Rubin Science Pipelines.
5
5
  Author-email: Rubin Observatory Data Management <dm-admin@lists.lsst.org>
6
6
  License-Expression: BSD-3-Clause OR GPL-3.0-or-later