lsst-pipe-base 29.2025.3900__py3-none-any.whl → 29.2025.4100__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (40) hide show
  1. lsst/pipe/base/_task_metadata.py +15 -0
  2. lsst/pipe/base/dot_tools.py +14 -152
  3. lsst/pipe/base/exec_fixup_data_id.py +17 -44
  4. lsst/pipe/base/execution_graph_fixup.py +49 -18
  5. lsst/pipe/base/graph/_versionDeserializers.py +6 -5
  6. lsst/pipe/base/graph/graph.py +30 -10
  7. lsst/pipe/base/graph/graphSummary.py +30 -0
  8. lsst/pipe/base/graph_walker.py +119 -0
  9. lsst/pipe/base/log_capture.py +5 -2
  10. lsst/pipe/base/mermaid_tools.py +11 -64
  11. lsst/pipe/base/mp_graph_executor.py +298 -236
  12. lsst/pipe/base/pipeline_graph/io.py +1 -1
  13. lsst/pipe/base/quantum_graph/__init__.py +32 -0
  14. lsst/pipe/base/quantum_graph/_common.py +632 -0
  15. lsst/pipe/base/quantum_graph/_multiblock.py +808 -0
  16. lsst/pipe/base/quantum_graph/_predicted.py +1950 -0
  17. lsst/pipe/base/quantum_graph/visualization.py +302 -0
  18. lsst/pipe/base/quantum_graph_builder.py +292 -34
  19. lsst/pipe/base/quantum_graph_executor.py +2 -1
  20. lsst/pipe/base/quantum_provenance_graph.py +16 -7
  21. lsst/pipe/base/quantum_reports.py +45 -0
  22. lsst/pipe/base/separable_pipeline_executor.py +126 -15
  23. lsst/pipe/base/simple_pipeline_executor.py +44 -43
  24. lsst/pipe/base/single_quantum_executor.py +1 -40
  25. lsst/pipe/base/tests/mocks/__init__.py +1 -1
  26. lsst/pipe/base/tests/mocks/_pipeline_task.py +16 -1
  27. lsst/pipe/base/tests/mocks/{_in_memory_repo.py → _repo.py} +324 -45
  28. lsst/pipe/base/tests/mocks/_storage_class.py +51 -0
  29. lsst/pipe/base/tests/simpleQGraph.py +11 -5
  30. lsst/pipe/base/version.py +1 -1
  31. {lsst_pipe_base-29.2025.3900.dist-info → lsst_pipe_base-29.2025.4100.dist-info}/METADATA +2 -1
  32. {lsst_pipe_base-29.2025.3900.dist-info → lsst_pipe_base-29.2025.4100.dist-info}/RECORD +40 -34
  33. {lsst_pipe_base-29.2025.3900.dist-info → lsst_pipe_base-29.2025.4100.dist-info}/WHEEL +0 -0
  34. {lsst_pipe_base-29.2025.3900.dist-info → lsst_pipe_base-29.2025.4100.dist-info}/entry_points.txt +0 -0
  35. {lsst_pipe_base-29.2025.3900.dist-info → lsst_pipe_base-29.2025.4100.dist-info}/licenses/COPYRIGHT +0 -0
  36. {lsst_pipe_base-29.2025.3900.dist-info → lsst_pipe_base-29.2025.4100.dist-info}/licenses/LICENSE +0 -0
  37. {lsst_pipe_base-29.2025.3900.dist-info → lsst_pipe_base-29.2025.4100.dist-info}/licenses/bsd_license.txt +0 -0
  38. {lsst_pipe_base-29.2025.3900.dist-info → lsst_pipe_base-29.2025.4100.dist-info}/licenses/gpl-v3.0.txt +0 -0
  39. {lsst_pipe_base-29.2025.3900.dist-info → lsst_pipe_base-29.2025.4100.dist-info}/top_level.txt +0 -0
  40. {lsst_pipe_base-29.2025.3900.dist-info → lsst_pipe_base-29.2025.4100.dist-info}/zip-safe +0 -0
@@ -27,18 +27,31 @@
27
27
 
28
28
  from __future__ import annotations
29
29
 
30
- __all__ = ("InMemoryRepo",)
30
+ __all__ = ("DirectButlerRepo", "InMemoryRepo", "MockRepo")
31
31
 
32
- from collections.abc import Iterable, Mapping
32
+ import tempfile
33
+ from abc import ABC, abstractmethod
34
+ from collections.abc import Iterable, Iterator, Mapping
35
+ from contextlib import contextmanager
33
36
  from typing import Any
34
37
 
35
- from lsst.daf.butler import CollectionType, DataCoordinate, DatasetRef, DatasetType, RegistryConfig
38
+ from lsst.daf.butler import (
39
+ Butler,
40
+ CollectionType,
41
+ DataCoordinate,
42
+ DatasetRef,
43
+ DatasetType,
44
+ DimensionConfig,
45
+ LimitedButler,
46
+ RegistryConfig,
47
+ )
36
48
  from lsst.daf.butler.tests.utils import create_populated_sqlite_registry
37
49
  from lsst.resources import ResourcePath, ResourcePathExpression
38
50
  from lsst.sphgeom import RangeSet
39
51
 
40
52
  from ...all_dimensions_quantum_graph_builder import AllDimensionsQuantumGraphBuilder
41
53
  from ...pipeline_graph import PipelineGraph
54
+ from ...quantum_graph import PredictedQuantumGraph
42
55
  from ...single_quantum_executor import SingleQuantumExecutor
43
56
  from ..in_memory_limited_butler import InMemoryLimitedButler
44
57
  from ._pipeline_task import (
@@ -49,16 +62,13 @@ from ._pipeline_task import (
49
62
  from ._storage_class import MockDataset, is_mock_name
50
63
 
51
64
 
52
- class InMemoryRepo:
53
- """A test helper that simulates a butler repository for task execution
54
- without any disk I/O.
65
+ class MockRepo(ABC):
66
+ """A test helper that populates a butler repository for task execution.
55
67
 
56
68
  Parameters
57
69
  ----------
58
- *args : `str` or `lsst.resources.ResourcePath`
59
- Butler YAML import files to load into the test repository.
60
- registry_config : `lsst.daf.butler.RegistryConfig`, optional
61
- Registry configuration for the repository.
70
+ butler : `lsst.daf.butler.Butler`
71
+ Butler to use for at least quantum graph building. Must be writeable.
62
72
  input_run : `str`, optional
63
73
  Name of a `~lsst.daf.butler.CollectionType.RUN` collection that will be
64
74
  used as an input to quantum graph generation. Input datasets created
@@ -67,49 +77,22 @@ class InMemoryRepo:
67
77
  Name of a `~lsst.daf.butler.CollectionType.CHAINED` collection that
68
78
  will be the direct input to quantum graph generation. This always
69
79
  includes ``input_run``.
70
- output_run : `str`, optional
71
- Name of a `~lsst.daf.butler.CollectionType.RUN` collection for
72
- execution outputs.
73
- use_import_collections_as_input : `bool` `str`, or \
74
- `~collections.abc.Iterable` [ `str`], optional
75
- Additional collections from YAML import files to include in
76
- ``input_chain``, or `True` to include all such collections (in
77
- chain-flattened lexicographical order).
78
- data_root : convertible to `lsst.resources.ResourcePath`, optional
79
- Root directory to join to each element in ``*args``. Defaults to
80
- the `lsst.daf.butler.tests.registry_data` package.
81
-
82
- Notes
83
- -----
84
- This helper maintains an `..pipeline_graph.PipelineGraph` and a
85
- no-datastore butler backed by an in-memory SQLite database for use in
86
- quantum graph generation.
80
+ input_children : `str` or `~collections.abc.Iterable` [ `str`], optional
81
+ Additional collections to include in ``input_chain``.
87
82
  """
88
83
 
89
84
  def __init__(
90
85
  self,
91
- *args: str | ResourcePath,
92
- registry_config: RegistryConfig | None = None,
86
+ butler: Butler,
93
87
  input_run: str = "input_run",
94
88
  input_chain: str = "input_chain",
95
- output_run: str = "output_run",
96
- use_import_collections_as_input: bool | str | Iterable[str] = True,
97
- data_root: ResourcePathExpression | None = "resource://lsst.daf.butler/tests/registry_data",
89
+ input_children: Iterable[str] = (),
98
90
  ):
99
- if data_root is not None:
100
- data_root = ResourcePath(data_root, forceDirectory=True)
101
- args = tuple(data_root.join(arg) for arg in args)
102
- self.butler = create_populated_sqlite_registry(*args, registry_config=registry_config)
91
+ self.butler = butler
103
92
  input_chain_definition = [input_run]
104
- if use_import_collections_as_input:
105
- if use_import_collections_as_input is True:
106
- use_import_collections_as_input = sorted(
107
- self.butler.collections.query("*", flatten_chains=True)
108
- )
109
- input_chain_definition += list(use_import_collections_as_input)
93
+ input_chain_definition.extend(input_children)
110
94
  self.input_run = input_run
111
95
  self.input_chain = input_chain
112
- self.output_run = output_run
113
96
  self.butler.collections.register(self.input_run)
114
97
  self.butler.collections.register(self.input_chain, CollectionType.CHAINED)
115
98
  self.butler.collections.redefine_chain(self.input_chain, input_chain_definition)
@@ -206,9 +189,55 @@ class InMemoryRepo:
206
189
  label = f"task_auto{self.last_auto_task_index}"
207
190
  self.pipeline_graph.add_task(label, task_class=task_class, config=config)
208
191
 
192
+ def make_quantum_graph(
193
+ self,
194
+ *,
195
+ output: str | None = None,
196
+ output_run: str = "output_run",
197
+ insert_mocked_inputs: bool = True,
198
+ register_output_dataset_types: bool = True,
199
+ ) -> PredictedQuantumGraph:
200
+ """Make a quantum graph from the pipeline task and internal data
201
+ repository.
202
+
203
+ Parameters
204
+ ----------
205
+ output : `str` or `None`, optional
206
+ Name of the output chained collection to embed within the quantum
207
+ graph. Note that this does not actually create this collection.
208
+ output_run : `str`, optional
209
+ Name of the `~lsst.daf.butler.CollectionType.RUN` collection for
210
+ execution outputs. Note that this does not actually create this
211
+ collection.
212
+ insert_mocked_inputs : `bool`, optional
213
+ Whether to automatically insert datasets for all overall inputs to
214
+ the pipeline graph whose dataset types have not already been
215
+ registered. If set to `False`, inputs must be provided by imported
216
+ YAML files or explicit calls to `insert_datasets`, which provides
217
+ more fine-grained control over the data IDs of the datasets.
218
+ register_output_dataset_types : `bool`, optional
219
+ If `True`, register all output dataset types.
220
+
221
+ Returns
222
+ -------
223
+ qg : `..quantum_graph.PredictedQuantumGraph`
224
+ Quantum graph. Datastore records will not be attached, since the
225
+ test helper does not actually have a datastore.
226
+ """
227
+ return (
228
+ self.make_quantum_graph_builder(
229
+ insert_mocked_inputs=insert_mocked_inputs,
230
+ register_output_dataset_types=register_output_dataset_types,
231
+ output_run=output_run,
232
+ )
233
+ .finish(output=output, attach_datastore_records=False)
234
+ .assemble()
235
+ )
236
+
209
237
  def make_quantum_graph_builder(
210
238
  self,
211
239
  *,
240
+ output_run: str = "output_run",
212
241
  insert_mocked_inputs: bool = True,
213
242
  register_output_dataset_types: bool = True,
214
243
  ) -> AllDimensionsQuantumGraphBuilder:
@@ -217,6 +246,10 @@ class InMemoryRepo:
217
246
 
218
247
  Parameters
219
248
  ----------
249
+ output_run : `str`, optional
250
+ Name of the `~lsst.daf.butler.CollectionType.RUN` collection for
251
+ execution outputs. Note that this does not actually create this
252
+ collection.
220
253
  insert_mocked_inputs : `bool`, optional
221
254
  Whether to automatically insert datasets for all overall inputs to
222
255
  the pipeline graph whose dataset types have not already been
@@ -244,7 +277,7 @@ class InMemoryRepo:
244
277
  self.pipeline_graph,
245
278
  self.butler,
246
279
  input_collections=[self.input_chain],
247
- output_run=self.output_run,
280
+ output_run=output_run,
248
281
  )
249
282
  if register_output_dataset_types:
250
283
  self.pipeline_graph.register_dataset_types(self.butler)
@@ -311,6 +344,117 @@ class InMemoryRepo:
311
344
  )
312
345
  else:
313
346
  data_ids = self.butler.query_data_ids(dimensions, *args, **kwargs, explain=False)
347
+ return self._insert_datasets_impl(dataset_type, data_ids)
348
+
349
+ @abstractmethod
350
+ def _insert_datasets_impl(
351
+ self, dataset_type: DatasetType, data_ids: list[DataCoordinate]
352
+ ) -> list[DatasetRef]:
353
+ """Insert datasets after their data IDs have been generated.
354
+
355
+ Parameters
356
+ ----------
357
+ dataset_type : `lsst.daf.butler.DatasetType`
358
+ Type of the datasets.
359
+ data_ids : `list` [ `lsst.daf.butler.DataCoordinate` ]
360
+ Data IDs of all datasets.
361
+
362
+ Returns
363
+ -------
364
+ refs : `list` [ `lsst.daf.butler.DatasetRef` ]
365
+ References to the new datasets.
366
+ """
367
+ raise NotImplementedError()
368
+
369
+ @abstractmethod
370
+ def make_single_quantum_executor(
371
+ self, qg: PredictedQuantumGraph
372
+ ) -> tuple[SingleQuantumExecutor, LimitedButler]:
373
+ """Make a single-quantum executor.
374
+
375
+ Parameters
376
+ ----------
377
+ qg : `..quantum_graph.PredictedQuantumGraph`
378
+ Graph whose quanta the executor must be capable of executing.
379
+
380
+ Returns
381
+ -------
382
+ executor : `..single_quantum_executor.SingleQuantumExecutor`
383
+ An executor for a single quantum.
384
+ butler : `lsst.daf.butler.LimitedButler`
385
+ The butler that the executor will write to.
386
+ """
387
+ raise NotImplementedError()
388
+
389
+
390
+ class InMemoryRepo(MockRepo):
391
+ """A test helper that simulates a butler repository for task execution
392
+ without any disk I/O.
393
+
394
+ Parameters
395
+ ----------
396
+ *args : `str` or `lsst.resources.ResourcePath`
397
+ Butler YAML import files to load into the test repository.
398
+ registry_config : `lsst.daf.butler.RegistryConfig`, optional
399
+ Registry configuration for the repository.
400
+ dimension_config : `lsst.daf.butler.DimensionConfig`, optional
401
+ Dimension universe configuration for the repository.
402
+ input_run : `str`, optional
403
+ Name of a `~lsst.daf.butler.CollectionType.RUN` collection that will be
404
+ used as an input to quantum graph generation. Input datasets created
405
+ by the helper are added to this collection.
406
+ input_chain : `str`, optional
407
+ Name of a `~lsst.daf.butler.CollectionType.CHAINED` collection that
408
+ will be the direct input to quantum graph generation. This always
409
+ includes ``input_run``.
410
+ use_import_collections_as_input : `bool`, `str`, or \
411
+ `~collections.abc.Iterable` [ `str`], optional
412
+ Additional collections from YAML import files to include in
413
+ ``input_chain``, or `True` to include all such collections (in
414
+ chain-flattened lexicographical order).
415
+ data_root : convertible to `lsst.resources.ResourcePath`, optional
416
+ Root directory to join to each element in ``*args``. Defaults to
417
+ the `lsst.daf.butler.tests.registry_data` package.
418
+
419
+ Notes
420
+ -----
421
+ This helper maintains an `..pipeline_graph.PipelineGraph` and a
422
+ no-datastore butler backed by an in-memory SQLite database for use in
423
+ quantum graph generation. It creates a separate in-memory limited butler
424
+ for execution as needed.
425
+ """
426
+
427
+ def __init__(
428
+ self,
429
+ *args: str | ResourcePath,
430
+ registry_config: RegistryConfig | None = None,
431
+ dimension_config: DimensionConfig | None = None,
432
+ input_run: str = "input_run",
433
+ input_chain: str = "input_chain",
434
+ use_import_collections_as_input: bool | str | Iterable[str] = True,
435
+ data_root: ResourcePathExpression | None = "resource://lsst.daf.butler/tests/registry_data",
436
+ ):
437
+ if data_root is not None:
438
+ data_root = ResourcePath(data_root, forceDirectory=True)
439
+ args = tuple(data_root.join(arg) for arg in args)
440
+ butler = create_populated_sqlite_registry(
441
+ *args, registry_config=registry_config, dimension_config=dimension_config
442
+ )
443
+ if use_import_collections_as_input:
444
+ if use_import_collections_as_input is True:
445
+ use_import_collections_as_input = sorted(butler.collections.query("*", flatten_chains=True))
446
+ else:
447
+ use_import_collections_as_input = ()
448
+ super().__init__(
449
+ butler,
450
+ input_run=input_run,
451
+ input_chain=input_chain,
452
+ input_children=list(use_import_collections_as_input),
453
+ )
454
+
455
+ def _insert_datasets_impl(
456
+ self, dataset_type: DatasetType, data_ids: list[DataCoordinate]
457
+ ) -> list[DatasetRef]:
314
458
  return self.butler.registry.insertDatasets(dataset_type, data_ids, run=self.input_run)
315
459
 
316
460
  def make_limited_butler(self) -> InMemoryLimitedButler:
@@ -343,9 +487,16 @@ class InMemoryRepo:
343
487
  )
344
488
  return butler
345
489
 
346
- def make_single_quantum_executor(self) -> tuple[SingleQuantumExecutor, InMemoryLimitedButler]:
490
+ def make_single_quantum_executor(
491
+ self, qg: PredictedQuantumGraph | None = None
492
+ ) -> tuple[SingleQuantumExecutor, InMemoryLimitedButler]:
347
493
  """Make a single-quantum executor backed by a new limited butler.
348
494
 
495
+ Parameters
496
+ ----------
497
+ qg : `..quantum_graph.PredictedQuantumGraph`
498
+ Ignored by this implementation.
499
+
349
500
  Returns
350
501
  -------
351
502
  executor : `..single_quantum_executor.SingleQuantumExecutor`
@@ -355,3 +506,131 @@ class InMemoryRepo:
355
506
  """
356
507
  butler = self.make_limited_butler()
357
508
  return SingleQuantumExecutor(limited_butler_factory=butler.factory), butler
509
+
510
+
511
+ class DirectButlerRepo(MockRepo):
512
+ """A test helper for task execution backed by a local direct butler.
513
+
514
+ Parameters
515
+ ----------
516
+ butler : `lsst.daf.butler.direct_butler.DirectButler`
517
+ Butler to write to.
518
+ *args : `str` or `lsst.resources.ResourcePath`
519
+ Butler YAML import files to load into the test repository.
520
+ input_run : `str`, optional
521
+ Name of a `~lsst.daf.butler.CollectionType.RUN` collection that will be
522
+ used as an input to quantum graph generation. Input datasets created
523
+ by the helper are added to this collection.
524
+ input_chain : `str`, optional
525
+ Name of a `~lsst.daf.butler.CollectionType.CHAINED` collection that
526
+ will be the direct input to quantum graph generation. This always
527
+ includes ``input_run``.
528
+ use_import_collections_as_input : `bool`, `str`, or \
529
+ `~collections.abc.Iterable` [ `str`], optional
530
+ Additional collections from YAML import files to include in
531
+ ``input_chain``, or `True` to include all such collections (in
532
+ chain-flattened lexicographical order).
533
+ data_root : convertible to `lsst.resources.ResourcePath`, optional
534
+ Root directory to join to each element in ``*args``. Defaults to
535
+ the `lsst.daf.butler.tests.registry_data` package.
536
+
537
+ Notes
538
+ -----
539
+ This helper maintains an `..pipeline_graph.PipelineGraph` and a
540
+ no-datastore butler backed by an in-memory SQLite database for use in
541
+ quantum graph generation. It creates a separate in-memory limited butler
542
+ for execution as needed.
543
+ """
544
+
545
+ def __init__(
546
+ self,
547
+ butler: Butler,
548
+ *args: str | ResourcePath,
549
+ input_run: str = "input_run",
550
+ input_chain: str = "input_chain",
551
+ use_import_collections_as_input: bool | str | Iterable[str] = True,
552
+ data_root: ResourcePathExpression | None = "resource://lsst.daf.butler/tests/registry_data",
553
+ ):
554
+ if data_root is not None:
555
+ data_root = ResourcePath(data_root, forceDirectory=True)
556
+ args = tuple(data_root.join(arg) for arg in args)
557
+ for arg in args:
558
+ butler.import_(filename=arg)
559
+ if use_import_collections_as_input:
560
+ if use_import_collections_as_input is True:
561
+ use_import_collections_as_input = sorted(butler.collections.query("*", flatten_chains=True))
562
+ else:
563
+ use_import_collections_as_input = ()
564
+ super().__init__(
565
+ butler,
566
+ input_run=input_run,
567
+ input_chain=input_chain,
568
+ input_children=list(use_import_collections_as_input),
569
+ )
570
+
571
+ @classmethod
572
+ @contextmanager
573
+ def make_temporary(
574
+ cls,
575
+ *args: str | ResourcePath,
576
+ input_run: str = "input_run",
577
+ input_chain: str = "input_chain",
578
+ use_import_collections_as_input: bool | str | Iterable[str] = True,
579
+ data_root: ResourcePathExpression | None = "resource://lsst.daf.butler/tests/registry_data",
580
+ **kwargs: Any,
581
+ ) -> Iterator[tuple[DirectButlerRepo, str]]:
582
+ with tempfile.TemporaryDirectory(ignore_cleanup_errors=True) as root:
583
+ config = Butler.makeRepo(root, **kwargs)
584
+ butler = Butler.from_config(config, writeable=True)
585
+ yield (
586
+ cls(
587
+ butler,
588
+ *args,
589
+ input_run=input_run,
590
+ input_chain=input_chain,
591
+ use_import_collections_as_input=use_import_collections_as_input,
592
+ data_root=data_root,
593
+ ),
594
+ root,
595
+ )
596
+
597
+ def _insert_datasets_impl(
598
+ self, dataset_type: DatasetType, data_ids: list[DataCoordinate]
599
+ ) -> list[DatasetRef]:
600
+ if is_mock_name(dataset_type.storageClass_name):
601
+ refs: list[DatasetRef] = []
602
+ for data_id in data_ids:
603
+ data_id = self.butler.registry.expandDataId(data_id)
604
+ ref = DatasetRef(dataset_type, data_id, run=self.input_run)
605
+ self.butler.put(
606
+ MockDataset(
607
+ dataset_id=ref.id,
608
+ dataset_type=ref.datasetType.to_simple(),
609
+ data_id=dict(ref.dataId.mapping),
610
+ run=ref.run,
611
+ ),
612
+ ref,
613
+ )
614
+ refs.append(ref)
615
+ return refs
616
+ else:
617
+ return self.butler.registry.insertDatasets(dataset_type, data_ids, run=self.input_run)
618
+
619
+ def make_single_quantum_executor(
620
+ self, qg: PredictedQuantumGraph | None = None
621
+ ) -> tuple[SingleQuantumExecutor, Butler]:
622
+ """Make a single-quantum executor backed by a new limited butler.
623
+
624
+ Parameters
625
+ ----------
626
+ qg : `..quantum_graph.PredictedQuantumGraph`
627
+ Ignored by this implementation.
628
+
629
+ Returns
630
+ -------
631
+ executor : `..single_quantum_executor.SingleQuantumExecutor`
632
+ An executor for a single quantum.
633
+ butler : `lsst.daf.butler.Butler`
634
+ The butler that the executor will write to.
635
+ """
636
+ return SingleQuantumExecutor(limited_butler_factory=None, butler=self.butler), self.butler
@@ -173,6 +173,12 @@ class MockDataset(pydantic.BaseModel):
173
173
  parameters: dict[str, str] | None = None
174
174
  """`repr` of all parameters applied when reading this dataset."""
175
175
 
176
+ int_value: int | None = None
177
+ """An arbitrary integer value stored in the mock dataset."""
178
+
179
+ str_value: int | None = None
180
+ """An arbitrary string value stored in the mock dataset."""
181
+
176
182
  @property
177
183
  def storage_class(self) -> str:
178
184
  return cast(str, self.dataset_type.storageClass)
@@ -236,6 +242,21 @@ class MockDataset(pydantic.BaseModel):
236
242
  """See `pydantic.BaseModel.model_json_schema`."""
237
243
  return super().model_json_schema(*args, **kwargs)
238
244
 
245
+ @classmethod
246
+ def model_validate(cls, *args: Any, **kwargs: Any) -> Any:
247
+ """See `pydantic.BaseModel.model_validate`."""
248
+ return super().model_validate(*args, **kwargs)
249
+
250
+ @classmethod
251
+ def model_validate_json(cls, *args: Any, **kwargs: Any) -> Any:
252
+ """See `pydantic.BaseModel.model_validate_json`."""
253
+ return super().model_validate_json(*args, **kwargs)
254
+
255
+ @classmethod
256
+ def model_validate_strings(cls, *args: Any, **kwargs: Any) -> Any:
257
+ """See `pydantic.BaseModel.model_validate_strings`."""
258
+ return super().model_validate_strings(*args, **kwargs)
259
+
239
260
 
240
261
  class ConvertedUnmockedDataset(pydantic.BaseModel):
241
262
  """A marker class that represents a conversion from a regular in-memory
@@ -275,6 +296,21 @@ class ConvertedUnmockedDataset(pydantic.BaseModel):
275
296
  """See `pydantic.BaseModel.model_json_schema`."""
276
297
  return super().model_json_schema(*args, **kwargs)
277
298
 
299
+ @classmethod
300
+ def model_validate(cls, *args: Any, **kwargs: Any) -> Any:
301
+ """See `pydantic.BaseModel.model_validate`."""
302
+ return super().model_validate(*args, **kwargs)
303
+
304
+ @classmethod
305
+ def model_validate_json(cls, *args: Any, **kwargs: Any) -> Any:
306
+ """See `pydantic.BaseModel.model_validate_json`."""
307
+ return super().model_validate_json(*args, **kwargs)
308
+
309
+ @classmethod
310
+ def model_validate_strings(cls, *args: Any, **kwargs: Any) -> Any:
311
+ """See `pydantic.BaseModel.model_validate_strings`."""
312
+ return super().model_validate_strings(*args, **kwargs)
313
+
278
314
 
279
315
  class MockDatasetQuantum(pydantic.BaseModel):
280
316
  """Description of the quantum that produced a mock dataset.
@@ -325,6 +361,21 @@ class MockDatasetQuantum(pydantic.BaseModel):
325
361
  """See `pydantic.BaseModel.model_json_schema`."""
326
362
  return super().model_json_schema(*args, **kwargs)
327
363
 
364
+ @classmethod
365
+ def model_validate(cls, *args: Any, **kwargs: Any) -> Any:
366
+ """See `pydantic.BaseModel.model_validate`."""
367
+ return super().model_validate(*args, **kwargs)
368
+
369
+ @classmethod
370
+ def model_validate_json(cls, *args: Any, **kwargs: Any) -> Any:
371
+ """See `pydantic.BaseModel.model_validate_json`."""
372
+ return super().model_validate_json(*args, **kwargs)
373
+
374
+ @classmethod
375
+ def model_validate_strings(cls, *args: Any, **kwargs: Any) -> Any:
376
+ """See `pydantic.BaseModel.model_validate_strings`."""
377
+ return super().model_validate_strings(*args, **kwargs)
378
+
328
379
 
329
380
  MockDataset.model_rebuild()
330
381
 
@@ -49,7 +49,13 @@ from .. import connectionTypes as cT
49
49
  from .._instrument import Instrument
50
50
  from ..all_dimensions_quantum_graph_builder import AllDimensionsQuantumGraphBuilder
51
51
  from ..all_dimensions_quantum_graph_builder import DatasetQueryConstraintVariant as DSQVariant
52
- from ..automatic_connection_constants import PACKAGES_INIT_OUTPUT_NAME, PACKAGES_INIT_OUTPUT_STORAGE_CLASS
52
+ from ..automatic_connection_constants import (
53
+ CONFIG_INIT_OUTPUT_CONNECTION_NAME,
54
+ LOG_OUTPUT_CONNECTION_NAME,
55
+ METADATA_OUTPUT_CONNECTION_NAME,
56
+ PACKAGES_INIT_OUTPUT_NAME,
57
+ PACKAGES_INIT_OUTPUT_STORAGE_CLASS,
58
+ )
53
59
  from ..config import PipelineTaskConfig
54
60
  from ..connections import PipelineTaskConnections
55
61
  from ..graph import QuantumGraph
@@ -361,13 +367,13 @@ def populateButler(
361
367
  if run is not None:
362
368
  butler.registry.registerRun(run)
363
369
  for dsType in dsTypes:
364
- if dsType == "packages":
370
+ if dsType == PACKAGES_INIT_OUTPUT_NAME:
365
371
  # Version is intentionally inconsistent.
366
372
  # Dict is convertible to Packages if Packages is installed.
367
373
  data: Any = {"python": "9.9.99"}
368
374
  butler.put(data, dsType, run=run)
369
375
  else:
370
- if dsType.endswith("_config"):
376
+ if dsType.endswith(CONFIG_INIT_OUTPUT_CONNECTION_NAME):
371
377
  # find a config from matching task name or make a new one
372
378
  taskLabel, _, _ = dsType.rpartition("_")
373
379
  task_node = pipeline_graph.tasks.get(taskLabel)
@@ -375,9 +381,9 @@ def populateButler(
375
381
  data = task_node.config
376
382
  else:
377
383
  data = AddTaskConfig()
378
- elif dsType.endswith("_metadata"):
384
+ elif dsType.endswith(METADATA_OUTPUT_CONNECTION_NAME):
379
385
  data = _TASK_FULL_METADATA_TYPE()
380
- elif dsType.endswith("_log"):
386
+ elif dsType.endswith(LOG_OUTPUT_CONNECTION_NAME):
381
387
  data = ButlerLogRecords.from_records([])
382
388
  else:
383
389
  data = numpy.array([0.0, 1.0, 2.0, 5.0])
lsst/pipe/base/version.py CHANGED
@@ -1,2 +1,2 @@
1
1
  __all__ = ["__version__"]
2
- __version__ = "29.2025.3900"
2
+ __version__ = "29.2025.4100"
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: lsst-pipe-base
3
- Version: 29.2025.3900
3
+ Version: 29.2025.4100
4
4
  Summary: Pipeline infrastructure for the Rubin Science Pipelines.
5
5
  Author-email: Rubin Observatory Data Management <dm-admin@lists.lsst.org>
6
6
  License: BSD 3-Clause License
@@ -31,6 +31,7 @@ Requires-Dist: wcwidth
31
31
  Requires-Dist: pyyaml>=5.1
32
32
  Requires-Dist: numpy>=1.17
33
33
  Requires-Dist: frozendict
34
+ Requires-Dist: zstandard<0.24,>=0.23.0
34
35
  Provides-Extra: test
35
36
  Requires-Dist: pytest>=3.2; extra == "test"
36
37
  Provides-Extra: mermaid