lsst-pipe-base 29.2025.3400__py3-none-any.whl → 29.2025.3500__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- lsst/pipe/base/dot_tools.py +14 -99
- lsst/pipe/base/graph/graph.py +4 -4
- lsst/pipe/base/mermaid_tools.py +23 -304
- lsst/pipe/base/mp_graph_executor.py +10 -1
- lsst/pipe/base/pipeline_graph/_edges.py +17 -3
- lsst/pipe/base/pipeline_graph/_nodes.py +30 -3
- lsst/pipe/base/pipeline_graph/_tasks.py +3 -1
- lsst/pipe/base/pipeline_graph/visualization/_dot.py +16 -6
- lsst/pipe/base/quantum_graph_builder.py +4 -1
- lsst/pipe/base/quantum_graph_skeleton.py +23 -4
- lsst/pipe/base/quantum_reports.py +16 -0
- lsst/pipe/base/simple_pipeline_executor.py +2 -1
- lsst/pipe/base/single_quantum_executor.py +1 -1
- lsst/pipe/base/tests/mocks/_in_memory_repo.py +1 -1
- lsst/pipe/base/version.py +1 -1
- {lsst_pipe_base-29.2025.3400.dist-info → lsst_pipe_base-29.2025.3500.dist-info}/METADATA +1 -1
- {lsst_pipe_base-29.2025.3400.dist-info → lsst_pipe_base-29.2025.3500.dist-info}/RECORD +25 -25
- {lsst_pipe_base-29.2025.3400.dist-info → lsst_pipe_base-29.2025.3500.dist-info}/WHEEL +0 -0
- {lsst_pipe_base-29.2025.3400.dist-info → lsst_pipe_base-29.2025.3500.dist-info}/entry_points.txt +0 -0
- {lsst_pipe_base-29.2025.3400.dist-info → lsst_pipe_base-29.2025.3500.dist-info}/licenses/COPYRIGHT +0 -0
- {lsst_pipe_base-29.2025.3400.dist-info → lsst_pipe_base-29.2025.3500.dist-info}/licenses/LICENSE +0 -0
- {lsst_pipe_base-29.2025.3400.dist-info → lsst_pipe_base-29.2025.3500.dist-info}/licenses/bsd_license.txt +0 -0
- {lsst_pipe_base-29.2025.3400.dist-info → lsst_pipe_base-29.2025.3500.dist-info}/licenses/gpl-v3.0.txt +0 -0
- {lsst_pipe_base-29.2025.3400.dist-info → lsst_pipe_base-29.2025.3500.dist-info}/top_level.txt +0 -0
- {lsst_pipe_base-29.2025.3400.dist-info → lsst_pipe_base-29.2025.3500.dist-info}/zip-safe +0 -0
lsst/pipe/base/dot_tools.py
CHANGED
|
@@ -38,17 +38,12 @@ __all__ = ["graph2dot", "pipeline2dot"]
|
|
|
38
38
|
# -------------------------------
|
|
39
39
|
import html
|
|
40
40
|
import io
|
|
41
|
-
import re
|
|
42
41
|
from collections.abc import Iterable
|
|
43
42
|
from typing import TYPE_CHECKING, Any
|
|
44
43
|
|
|
45
44
|
# -----------------------------
|
|
46
45
|
# Imports for other modules --
|
|
47
46
|
# -----------------------------
|
|
48
|
-
from lsst.daf.butler import DatasetType, DimensionUniverse
|
|
49
|
-
|
|
50
|
-
from . import connectionTypes
|
|
51
|
-
from .connections import iterConnections
|
|
52
47
|
from .pipeline import Pipeline
|
|
53
48
|
|
|
54
49
|
if TYPE_CHECKING:
|
|
@@ -234,7 +229,7 @@ def pipeline2dot(pipeline: Pipeline | Iterable[TaskDef], file: Any) -> None:
|
|
|
234
229
|
|
|
235
230
|
Parameters
|
|
236
231
|
----------
|
|
237
|
-
pipeline : `
|
|
232
|
+
pipeline : `.Pipeline` or `~collections.abc.Iterable` [ `.TaskDef` ]
|
|
238
233
|
Pipeline description.
|
|
239
234
|
file : `str` or file object
|
|
240
235
|
File where GraphViz graph (DOT language) is written, can be a file name
|
|
@@ -247,30 +242,7 @@ def pipeline2dot(pipeline: Pipeline | Iterable[TaskDef], file: Any) -> None:
|
|
|
247
242
|
ImportError
|
|
248
243
|
Raised if the task class cannot be imported.
|
|
249
244
|
"""
|
|
250
|
-
|
|
251
|
-
|
|
252
|
-
def expand_dimensions(connection: connectionTypes.BaseConnection) -> list[str]:
|
|
253
|
-
"""Return expanded list of dimensions, with special skypix treatment.
|
|
254
|
-
|
|
255
|
-
Parameters
|
|
256
|
-
----------
|
|
257
|
-
connection : `list` [`str`]
|
|
258
|
-
Connection to examine.
|
|
259
|
-
|
|
260
|
-
Returns
|
|
261
|
-
-------
|
|
262
|
-
dimensions : `list` [`str`]
|
|
263
|
-
Expanded list of dimensions.
|
|
264
|
-
"""
|
|
265
|
-
dimension_set = set()
|
|
266
|
-
if isinstance(connection, connectionTypes.DimensionedConnection):
|
|
267
|
-
dimension_set = set(connection.dimensions)
|
|
268
|
-
skypix_dim = []
|
|
269
|
-
if "skypix" in dimension_set:
|
|
270
|
-
dimension_set.remove("skypix")
|
|
271
|
-
skypix_dim = ["skypix"]
|
|
272
|
-
dimensions = universe.conform(dimension_set)
|
|
273
|
-
return list(dimensions.names) + skypix_dim
|
|
245
|
+
from .pipeline_graph import PipelineGraph, visualization
|
|
274
246
|
|
|
275
247
|
# open a file if needed
|
|
276
248
|
close = False
|
|
@@ -278,76 +250,19 @@ def pipeline2dot(pipeline: Pipeline | Iterable[TaskDef], file: Any) -> None:
|
|
|
278
250
|
file = open(file, "w")
|
|
279
251
|
close = True
|
|
280
252
|
|
|
281
|
-
print("digraph Pipeline {", file=file)
|
|
282
|
-
_renderDefault("graph", _ATTRIBS["defaultGraph"], file)
|
|
283
|
-
_renderDefault("node", _ATTRIBS["defaultNode"], file)
|
|
284
|
-
_renderDefault("edge", _ATTRIBS["defaultEdge"], file)
|
|
285
|
-
|
|
286
|
-
allDatasets: set[str | tuple[str, str]] = set()
|
|
287
253
|
if isinstance(pipeline, Pipeline):
|
|
288
|
-
|
|
289
|
-
|
|
290
|
-
|
|
291
|
-
|
|
292
|
-
|
|
293
|
-
|
|
294
|
-
|
|
295
|
-
|
|
296
|
-
|
|
297
|
-
|
|
298
|
-
|
|
299
|
-
|
|
300
|
-
|
|
301
|
-
# next line is workaround until DM-29658
|
|
302
|
-
labelToTaskName[taskDef.label] = taskNodeName
|
|
303
|
-
|
|
304
|
-
_renderTaskNode(taskNodeName, taskDef, file, None)
|
|
305
|
-
|
|
306
|
-
metadataRePattern = re.compile("^(.*)_metadata$")
|
|
307
|
-
for attr in sorted(iterConnections(taskDef.connections, "inputs"), key=lambda x: x.name):
|
|
308
|
-
if attr.name not in allDatasets:
|
|
309
|
-
dimensions = expand_dimensions(attr)
|
|
310
|
-
_renderDSTypeNode(attr.name, dimensions, file)
|
|
311
|
-
allDatasets.add(attr.name)
|
|
312
|
-
nodeName, component = DatasetType.splitDatasetTypeName(attr.name)
|
|
313
|
-
_renderEdge(attr.name, taskNodeName, file)
|
|
314
|
-
# connect component dataset types to the composite type that
|
|
315
|
-
# produced it
|
|
316
|
-
if component is not None and (nodeName, attr.name) not in allDatasets:
|
|
317
|
-
_renderEdge(nodeName, attr.name, file)
|
|
318
|
-
allDatasets.add((nodeName, attr.name))
|
|
319
|
-
if nodeName not in allDatasets:
|
|
320
|
-
dimensions = expand_dimensions(attr)
|
|
321
|
-
_renderDSTypeNode(nodeName, dimensions, file)
|
|
322
|
-
# The next if block is a workaround until DM-29658 at which time
|
|
323
|
-
# metadata connections should start working with the above code
|
|
324
|
-
if (match := metadataRePattern.match(attr.name)) is not None:
|
|
325
|
-
matchTaskLabel = match.group(1)
|
|
326
|
-
metadataNodesToLink.add((matchTaskLabel, attr.name))
|
|
327
|
-
|
|
328
|
-
for attr in sorted(iterConnections(taskDef.connections, "prerequisiteInputs"), key=lambda x: x.name):
|
|
329
|
-
if attr.name not in allDatasets:
|
|
330
|
-
dimensions = expand_dimensions(attr)
|
|
331
|
-
_renderDSTypeNode(attr.name, dimensions, file)
|
|
332
|
-
allDatasets.add(attr.name)
|
|
333
|
-
# use dashed line for prerequisite edges to distinguish them
|
|
334
|
-
_renderEdge(attr.name, taskNodeName, file, style="dashed")
|
|
335
|
-
|
|
336
|
-
for attr in sorted(iterConnections(taskDef.connections, "outputs"), key=lambda x: x.name):
|
|
337
|
-
if attr.name not in allDatasets:
|
|
338
|
-
dimensions = expand_dimensions(attr)
|
|
339
|
-
_renderDSTypeNode(attr.name, dimensions, file)
|
|
340
|
-
allDatasets.add(attr.name)
|
|
341
|
-
_renderEdge(taskNodeName, attr.name, file)
|
|
342
|
-
|
|
343
|
-
# This for loop is a workaround until DM-29658 at which time metadata
|
|
344
|
-
# connections should start working with the above code
|
|
345
|
-
for matchLabel, dsTypeName in metadataNodesToLink:
|
|
346
|
-
# only render an edge to metadata if the label is part of the current
|
|
347
|
-
# graph
|
|
348
|
-
if (result := labelToTaskName.get(matchLabel)) is not None:
|
|
349
|
-
_renderEdge(result, dsTypeName, file)
|
|
254
|
+
pg = pipeline.to_graph(visualization_only=True)
|
|
255
|
+
else:
|
|
256
|
+
pg = PipelineGraph()
|
|
257
|
+
for task_def in pipeline:
|
|
258
|
+
pg.add_task(
|
|
259
|
+
task_def.label,
|
|
260
|
+
task_class=task_def.taskClass,
|
|
261
|
+
config=task_def.config,
|
|
262
|
+
connections=task_def.connections,
|
|
263
|
+
)
|
|
264
|
+
pg.resolve(visualization_only=True)
|
|
265
|
+
visualization.show_dot(pg, stream=file, dataset_types=True)
|
|
350
266
|
|
|
351
|
-
print("}", file=file)
|
|
352
267
|
if close:
|
|
353
268
|
file.close()
|
lsst/pipe/base/graph/graph.py
CHANGED
|
@@ -191,10 +191,10 @@ class QuantumGraph:
|
|
|
191
191
|
"""
|
|
192
192
|
# Save packages to metadata
|
|
193
193
|
self._metadata = dict(metadata) if metadata is not None else {}
|
|
194
|
-
self._metadata
|
|
195
|
-
self._metadata
|
|
196
|
-
self._metadata
|
|
197
|
-
self._metadata
|
|
194
|
+
self._metadata.setdefault("packages", Packages.fromSystem())
|
|
195
|
+
self._metadata.setdefault("user", getpass.getuser())
|
|
196
|
+
self._metadata.setdefault("time", f"{datetime.datetime.now()}")
|
|
197
|
+
self._metadata.setdefault("full_command", " ".join(sys.argv))
|
|
198
198
|
|
|
199
199
|
self._buildId = _buildId if _buildId is not None else BuildId(f"{time.time()}-{os.getpid()}")
|
|
200
200
|
# Data structure used to identify relations between
|
lsst/pipe/base/mermaid_tools.py
CHANGED
|
@@ -33,15 +33,9 @@ from __future__ import annotations
|
|
|
33
33
|
|
|
34
34
|
__all__ = ["graph2mermaid", "pipeline2mermaid"]
|
|
35
35
|
|
|
36
|
-
import html
|
|
37
|
-
import re
|
|
38
36
|
from collections.abc import Iterable
|
|
39
|
-
from typing import TYPE_CHECKING, Any
|
|
37
|
+
from typing import TYPE_CHECKING, Any, Literal
|
|
40
38
|
|
|
41
|
-
from lsst.daf.butler import DatasetType, DimensionUniverse
|
|
42
|
-
|
|
43
|
-
from . import connectionTypes
|
|
44
|
-
from .connections import iterConnections
|
|
45
39
|
from .pipeline import Pipeline
|
|
46
40
|
|
|
47
41
|
if TYPE_CHECKING:
|
|
@@ -141,156 +135,6 @@ def graph2mermaid(qgraph: QuantumGraph, file: Any) -> None:
|
|
|
141
135
|
file.close()
|
|
142
136
|
|
|
143
137
|
|
|
144
|
-
def _expand_dimensions(dimension_list: set[str] | Iterable[str], universe: DimensionUniverse) -> list[str]:
|
|
145
|
-
"""Return expanded list of dimensions, with special skypix treatment.
|
|
146
|
-
|
|
147
|
-
Parameters
|
|
148
|
-
----------
|
|
149
|
-
dimension_set : `set` [`str`] or iterable of `str`
|
|
150
|
-
The original set of dimension names.
|
|
151
|
-
universe : DimensionUniverse
|
|
152
|
-
Used to conform the dimension set according to a known schema.
|
|
153
|
-
|
|
154
|
-
Returns
|
|
155
|
-
-------
|
|
156
|
-
dimensions : `list` [`str`]
|
|
157
|
-
Expanded list of dimensions.
|
|
158
|
-
"""
|
|
159
|
-
dimension_set = set(dimension_list)
|
|
160
|
-
skypix_dim = []
|
|
161
|
-
if "skypix" in dimension_set:
|
|
162
|
-
dimension_set.remove("skypix")
|
|
163
|
-
skypix_dim = ["skypix"]
|
|
164
|
-
dimensions = universe.conform(dimension_set)
|
|
165
|
-
return list(dimensions.names) + skypix_dim
|
|
166
|
-
|
|
167
|
-
|
|
168
|
-
def _format_dimensions(dims: list[str]) -> str:
|
|
169
|
-
"""Format and sort dimension names as a comma-separated list inside curly
|
|
170
|
-
braces.
|
|
171
|
-
|
|
172
|
-
For example, if dims=["detector", "visit"], returns "{detector, visit}".
|
|
173
|
-
|
|
174
|
-
Parameters
|
|
175
|
-
----------
|
|
176
|
-
dims : list of str
|
|
177
|
-
The dimension names to format and sort.
|
|
178
|
-
|
|
179
|
-
Returns
|
|
180
|
-
-------
|
|
181
|
-
str
|
|
182
|
-
The formatted dimension string, or an empty string if no dimensions.
|
|
183
|
-
"""
|
|
184
|
-
if not dims:
|
|
185
|
-
return ""
|
|
186
|
-
sorted_dims = sorted(dims)
|
|
187
|
-
return "{" + ", ".join(sorted_dims) + "}"
|
|
188
|
-
|
|
189
|
-
|
|
190
|
-
def _render_task_node(
|
|
191
|
-
task_id: str,
|
|
192
|
-
taskDef: TaskDef,
|
|
193
|
-
universe: DimensionUniverse,
|
|
194
|
-
file: Any,
|
|
195
|
-
show_dimensions: bool,
|
|
196
|
-
expand_dimensions: bool,
|
|
197
|
-
) -> None:
|
|
198
|
-
"""Render a single task node in the Mermaid diagram.
|
|
199
|
-
|
|
200
|
-
Parameters
|
|
201
|
-
----------
|
|
202
|
-
task_id : str
|
|
203
|
-
Unique Mermaid node identifier for this task.
|
|
204
|
-
taskDef : TaskDef
|
|
205
|
-
The pipeline task definition, which includes the task label, task name,
|
|
206
|
-
and connections.
|
|
207
|
-
universe : DimensionUniverse
|
|
208
|
-
Used to conform and sort the task's dimensions.
|
|
209
|
-
file : file-like
|
|
210
|
-
The output file-like object to write the Mermaid node definition.
|
|
211
|
-
show_dimensions : bool
|
|
212
|
-
If True, display the task's dimensions after conforming them.
|
|
213
|
-
expand_dimensions : bool
|
|
214
|
-
If True, expand dimension names to include all components.
|
|
215
|
-
"""
|
|
216
|
-
# Basic info: bold label, then task name.
|
|
217
|
-
lines = [
|
|
218
|
-
f"<b>{html.escape(taskDef.label)}</b>",
|
|
219
|
-
html.escape(taskDef.taskName),
|
|
220
|
-
]
|
|
221
|
-
|
|
222
|
-
# If requested, display the task's conformed dimensions.
|
|
223
|
-
if show_dimensions and taskDef.connections and taskDef.connections.dimensions:
|
|
224
|
-
if expand_dimensions:
|
|
225
|
-
task_dims = _expand_dimensions(taskDef.connections.dimensions, universe)
|
|
226
|
-
else:
|
|
227
|
-
task_dims = list(taskDef.connections.dimensions)
|
|
228
|
-
if task_dims:
|
|
229
|
-
dim_str = _format_dimensions(task_dims)
|
|
230
|
-
lines.append(f"<i>dimensions:</i> {dim_str}")
|
|
231
|
-
|
|
232
|
-
# Join with <br> for line breaks and define the node with the label.
|
|
233
|
-
label = "<br>".join(lines)
|
|
234
|
-
print(f'{task_id}["{label}"]', file=file)
|
|
235
|
-
print(f"class {task_id} task;", file=file)
|
|
236
|
-
|
|
237
|
-
|
|
238
|
-
def _render_dataset_node(
|
|
239
|
-
ds_id: str,
|
|
240
|
-
ds_name: str,
|
|
241
|
-
connection: connectionTypes.BaseConnection,
|
|
242
|
-
universe: DimensionUniverse,
|
|
243
|
-
file: Any,
|
|
244
|
-
show_dimensions: bool,
|
|
245
|
-
expand_dimensions: bool,
|
|
246
|
-
show_storage: bool,
|
|
247
|
-
) -> None:
|
|
248
|
-
"""Render a dataset-type node in the Mermaid diagram.
|
|
249
|
-
|
|
250
|
-
Parameters
|
|
251
|
-
----------
|
|
252
|
-
ds_id : str
|
|
253
|
-
Unique Mermaid node identifier for this dataset.
|
|
254
|
-
ds_name : str
|
|
255
|
-
The dataset type name.
|
|
256
|
-
connection : BaseConnection
|
|
257
|
-
The dataset connection object, potentially dimensioned and having a
|
|
258
|
-
storage class.
|
|
259
|
-
universe : DimensionUniverse
|
|
260
|
-
Used to conform and sort the dataset's dimensions if it is dimensioned.
|
|
261
|
-
file : file-like
|
|
262
|
-
The output file-like object to write the Mermaid node definition.
|
|
263
|
-
show_dimensions : bool
|
|
264
|
-
If True, display the dataset's conformed dimensions.
|
|
265
|
-
expand_dimensions : bool
|
|
266
|
-
If True, expand dimension names to include all components.
|
|
267
|
-
show_storage : bool
|
|
268
|
-
If True, display the dataset's storage class if available.
|
|
269
|
-
"""
|
|
270
|
-
# Start with the dataset name in bold.
|
|
271
|
-
lines = [f"<b>{html.escape(ds_name)}</b>"]
|
|
272
|
-
|
|
273
|
-
# If dimensioned and requested, show conformed dimensions.
|
|
274
|
-
ds_dims = []
|
|
275
|
-
if show_dimensions and isinstance(connection, connectionTypes.DimensionedConnection):
|
|
276
|
-
if expand_dimensions:
|
|
277
|
-
ds_dims = _expand_dimensions(connection.dimensions, universe)
|
|
278
|
-
else:
|
|
279
|
-
ds_dims = list(connection.dimensions)
|
|
280
|
-
|
|
281
|
-
if ds_dims:
|
|
282
|
-
dim_str = _format_dimensions(ds_dims)
|
|
283
|
-
lines.append(f"<i>dimensions:</i> {dim_str}")
|
|
284
|
-
|
|
285
|
-
# If storage class is available and requested, display it.
|
|
286
|
-
if show_storage and getattr(connection, "storageClass", None) is not None:
|
|
287
|
-
lines.append(f"<i>storage class:</i> {html.escape(str(connection.storageClass))}")
|
|
288
|
-
|
|
289
|
-
label = "<br>".join(lines)
|
|
290
|
-
print(f'{ds_id}["{label}"]', file=file)
|
|
291
|
-
print(f"class {ds_id} ds;", file=file)
|
|
292
|
-
|
|
293
|
-
|
|
294
138
|
def pipeline2mermaid(
|
|
295
139
|
pipeline: Pipeline | Iterable[TaskDef],
|
|
296
140
|
file: Any,
|
|
@@ -329,7 +173,7 @@ def pipeline2mermaid(
|
|
|
329
173
|
ImportError
|
|
330
174
|
Raised if the task class cannot be imported.
|
|
331
175
|
"""
|
|
332
|
-
|
|
176
|
+
from .pipeline_graph import PipelineGraph, visualization
|
|
333
177
|
|
|
334
178
|
# Ensure that pipeline is iterable of task definitions.
|
|
335
179
|
if isinstance(pipeline, Pipeline):
|
|
@@ -341,154 +185,29 @@ def pipeline2mermaid(
|
|
|
341
185
|
file = open(file, "w")
|
|
342
186
|
close = True
|
|
343
187
|
|
|
344
|
-
|
|
345
|
-
|
|
188
|
+
if isinstance(pipeline, Pipeline):
|
|
189
|
+
pg = pipeline.to_graph(visualization_only=True)
|
|
190
|
+
else:
|
|
191
|
+
pg = PipelineGraph()
|
|
192
|
+
for task_def in pipeline:
|
|
193
|
+
pg.add_task(
|
|
194
|
+
task_def.label,
|
|
195
|
+
task_class=task_def.taskClass,
|
|
196
|
+
config=task_def.config,
|
|
197
|
+
connections=task_def.connections,
|
|
198
|
+
)
|
|
199
|
+
pg.resolve(visualization_only=True)
|
|
200
|
+
|
|
201
|
+
dimensions: Literal["full", "concise"] | None = None
|
|
202
|
+
if show_dimensions:
|
|
203
|
+
if expand_dimensions:
|
|
204
|
+
dimensions = "full"
|
|
205
|
+
else:
|
|
206
|
+
dimensions = "concise"
|
|
346
207
|
|
|
347
|
-
|
|
348
|
-
|
|
349
|
-
"classDef task fill:#B1F2EF,color:#000,stroke:#000,stroke-width:3px,"
|
|
350
|
-
"font-family:Monospace,font-size:14px,text-align:left;",
|
|
351
|
-
file=file,
|
|
208
|
+
visualization.show_mermaid(
|
|
209
|
+
pg, stream=file, dataset_types=True, dimensions=dimensions, storage_classes=show_storage
|
|
352
210
|
)
|
|
353
|
-
print(
|
|
354
|
-
"classDef ds fill:#F5F5F5,color:#000,stroke:#00BABC,stroke-width:3px,"
|
|
355
|
-
"font-family:Monospace,font-size:14px,text-align:left,rx:10,ry:10;",
|
|
356
|
-
file=file,
|
|
357
|
-
)
|
|
358
|
-
|
|
359
|
-
# Track which datasets have been rendered to avoid duplicates.
|
|
360
|
-
allDatasets: set[str | tuple[str, str]] = set()
|
|
361
|
-
|
|
362
|
-
# Used for linking metadata datasets after tasks are processed.
|
|
363
|
-
labelToTaskName = {}
|
|
364
|
-
metadataNodesToLink = set()
|
|
365
|
-
|
|
366
|
-
# We'll store edges as (from_node, to_node, is_prerequisite) tuples.
|
|
367
|
-
edges: list[tuple[str, str, bool]] = []
|
|
368
|
-
|
|
369
|
-
def get_task_id(idx: int) -> str:
|
|
370
|
-
"""Generate a safe Mermaid node ID for a task.
|
|
371
|
-
|
|
372
|
-
Parameters
|
|
373
|
-
----------
|
|
374
|
-
idx : `int`
|
|
375
|
-
Task index.
|
|
376
|
-
|
|
377
|
-
Returns
|
|
378
|
-
-------
|
|
379
|
-
id : `str`
|
|
380
|
-
Node ID for a task.
|
|
381
|
-
"""
|
|
382
|
-
return f"TASK_{idx}"
|
|
383
|
-
|
|
384
|
-
def get_dataset_id(name: str) -> str:
|
|
385
|
-
"""Generate a safe Mermaid node ID for a dataset.
|
|
386
|
-
|
|
387
|
-
Parameters
|
|
388
|
-
----------
|
|
389
|
-
name : `str`
|
|
390
|
-
Dataset name.
|
|
391
|
-
|
|
392
|
-
Returns
|
|
393
|
-
-------
|
|
394
|
-
id : `str`
|
|
395
|
-
Node ID for the dataset.
|
|
396
|
-
"""
|
|
397
|
-
# Replace non-alphanumerics with underscores.
|
|
398
|
-
return "DATASET_" + re.sub(r"[^0-9A-Za-z_]", "_", name)
|
|
399
|
-
|
|
400
|
-
metadata_pattern = re.compile(r"^(.*)_metadata$")
|
|
401
|
-
|
|
402
|
-
# Sort tasks by label for consistent diagram ordering.
|
|
403
|
-
pipeline_tasks = sorted(pipeline, key=lambda x: x.label)
|
|
404
|
-
|
|
405
|
-
# Process each task and its connections.
|
|
406
|
-
for idx, taskDef in enumerate(pipeline_tasks):
|
|
407
|
-
task_id = get_task_id(idx)
|
|
408
|
-
labelToTaskName[taskDef.label] = task_id
|
|
409
|
-
|
|
410
|
-
# Render the task node.
|
|
411
|
-
_render_task_node(task_id, taskDef, universe, file, show_dimensions, expand_dimensions)
|
|
412
|
-
|
|
413
|
-
# Handle standard inputs (non-prerequisite).
|
|
414
|
-
for attr in sorted(iterConnections(taskDef.connections, "inputs"), key=lambda x: x.name):
|
|
415
|
-
ds_id = get_dataset_id(attr.name)
|
|
416
|
-
if attr.name not in allDatasets:
|
|
417
|
-
_render_dataset_node(
|
|
418
|
-
ds_id, attr.name, attr, universe, file, show_dimensions, expand_dimensions, show_storage
|
|
419
|
-
)
|
|
420
|
-
allDatasets.add(attr.name)
|
|
421
|
-
edges.append((ds_id, task_id, False))
|
|
422
|
-
|
|
423
|
-
# Handle component datasets (composite -> component).
|
|
424
|
-
nodeName, component = DatasetType.splitDatasetTypeName(attr.name)
|
|
425
|
-
if component is not None and (nodeName, attr.name) not in allDatasets:
|
|
426
|
-
ds_id_parent = get_dataset_id(nodeName)
|
|
427
|
-
if nodeName not in allDatasets:
|
|
428
|
-
_render_dataset_node(
|
|
429
|
-
ds_id_parent,
|
|
430
|
-
nodeName,
|
|
431
|
-
attr,
|
|
432
|
-
universe,
|
|
433
|
-
file,
|
|
434
|
-
show_dimensions,
|
|
435
|
-
expand_dimensions,
|
|
436
|
-
show_storage,
|
|
437
|
-
)
|
|
438
|
-
allDatasets.add(nodeName)
|
|
439
|
-
edges.append((ds_id_parent, ds_id, False))
|
|
440
|
-
allDatasets.add((nodeName, attr.name))
|
|
441
|
-
|
|
442
|
-
# If this is a metadata dataset, record it for linking later.
|
|
443
|
-
if (match := metadata_pattern.match(attr.name)) is not None:
|
|
444
|
-
matchTaskLabel = match.group(1)
|
|
445
|
-
metadataNodesToLink.add((matchTaskLabel, attr.name))
|
|
446
|
-
|
|
447
|
-
# Handle prerequisite inputs (to be drawn with a dashed line).
|
|
448
|
-
for attr in sorted(iterConnections(taskDef.connections, "prerequisiteInputs"), key=lambda x: x.name):
|
|
449
|
-
ds_id = get_dataset_id(attr.name)
|
|
450
|
-
if attr.name not in allDatasets:
|
|
451
|
-
_render_dataset_node(
|
|
452
|
-
ds_id, attr.name, attr, universe, file, show_dimensions, expand_dimensions, show_storage
|
|
453
|
-
)
|
|
454
|
-
allDatasets.add(attr.name)
|
|
455
|
-
edges.append((ds_id, task_id, True))
|
|
456
|
-
|
|
457
|
-
# If this is a metadata dataset, record it for linking later.
|
|
458
|
-
if (match := metadata_pattern.match(attr.name)) is not None:
|
|
459
|
-
matchTaskLabel = match.group(1)
|
|
460
|
-
metadataNodesToLink.add((matchTaskLabel, attr.name))
|
|
461
|
-
|
|
462
|
-
# Handle outputs (task -> dataset).
|
|
463
|
-
for attr in sorted(iterConnections(taskDef.connections, "outputs"), key=lambda x: x.name):
|
|
464
|
-
ds_id = get_dataset_id(attr.name)
|
|
465
|
-
if attr.name not in allDatasets:
|
|
466
|
-
_render_dataset_node(
|
|
467
|
-
ds_id, attr.name, attr, universe, file, show_dimensions, expand_dimensions, show_storage
|
|
468
|
-
)
|
|
469
|
-
allDatasets.add(attr.name)
|
|
470
|
-
edges.append((task_id, ds_id, False))
|
|
471
|
-
|
|
472
|
-
# Link metadata datasets after all tasks processed.
|
|
473
|
-
for matchLabel, dsTypeName in metadataNodesToLink:
|
|
474
|
-
if (result := labelToTaskName.get(matchLabel)) is not None:
|
|
475
|
-
ds_id = get_dataset_id(dsTypeName)
|
|
476
|
-
edges.append((result, ds_id, False))
|
|
477
|
-
|
|
478
|
-
# Print all edges and track which are prerequisite.
|
|
479
|
-
prereq_indices = []
|
|
480
|
-
for i, (f, t, p) in enumerate(edges):
|
|
481
|
-
print(f"{f} --> {t}", file=file)
|
|
482
|
-
if p:
|
|
483
|
-
prereq_indices.append(i)
|
|
484
|
-
|
|
485
|
-
# Apply default edge style
|
|
486
|
-
print("linkStyle default stroke:#000,stroke-width:1.5px,font-family:Monospace,font-size:14px;", file=file)
|
|
487
|
-
|
|
488
|
-
# Apply dashed style for all prerequisite edges in one line.
|
|
489
|
-
if prereq_indices:
|
|
490
|
-
prereq_str = ",".join(str(i) for i in prereq_indices)
|
|
491
|
-
print(f"linkStyle {prereq_str} stroke-dasharray:5;", file=file)
|
|
492
211
|
|
|
493
212
|
if close:
|
|
494
213
|
file.close()
|
|
@@ -159,7 +159,7 @@ class _Job:
|
|
|
159
159
|
quantumExecutor_pickle: bytes,
|
|
160
160
|
task_node_pickle: bytes,
|
|
161
161
|
quantum_pickle: bytes,
|
|
162
|
-
quantum_id: uuid.UUID
|
|
162
|
+
quantum_id: uuid.UUID,
|
|
163
163
|
logConfigState: list,
|
|
164
164
|
snd_conn: multiprocessing.connection.Connection,
|
|
165
165
|
fail_fast: bool,
|
|
@@ -174,6 +174,8 @@ class _Job:
|
|
|
174
174
|
Task definition structure, pickled.
|
|
175
175
|
quantum_pickle : `bytes`
|
|
176
176
|
Quantum for this task execution in pickled form.
|
|
177
|
+
quantum_id : `uuid.UUID`
|
|
178
|
+
Unique ID for the quantum.
|
|
177
179
|
logConfigState : `list`
|
|
178
180
|
Logging state from parent process.
|
|
179
181
|
snd_conn : `multiprocessing.Connection`
|
|
@@ -205,6 +207,7 @@ class _Job:
|
|
|
205
207
|
_, report = quantumExecutor.execute(task_node, quantum, quantum_id=quantum_id)
|
|
206
208
|
except RepeatableQuantumError as exc:
|
|
207
209
|
report = QuantumReport.from_exception(
|
|
210
|
+
quantumId=quantum_id,
|
|
208
211
|
exception=exc,
|
|
209
212
|
dataId=quantum.dataId,
|
|
210
213
|
taskLabel=task_node.label,
|
|
@@ -220,6 +223,7 @@ class _Job:
|
|
|
220
223
|
_LOG.fatal("Invalid quantum error for %s (%s): %s", task_node.label, quantum.dataId)
|
|
221
224
|
_LOG.fatal(exc, exc_info=True)
|
|
222
225
|
report = QuantumReport.from_exception(
|
|
226
|
+
quantumId=quantum_id,
|
|
223
227
|
exception=exc,
|
|
224
228
|
dataId=quantum.dataId,
|
|
225
229
|
taskLabel=task_node.label,
|
|
@@ -229,6 +233,7 @@ class _Job:
|
|
|
229
233
|
except Exception as exc:
|
|
230
234
|
_LOG.debug("exception from task %s dataId %s: %s", task_node.label, quantum.dataId, exc)
|
|
231
235
|
report = QuantumReport.from_exception(
|
|
236
|
+
quantumId=quantum_id,
|
|
232
237
|
exception=exc,
|
|
233
238
|
dataId=quantum.dataId,
|
|
234
239
|
taskLabel=task_node.label,
|
|
@@ -282,6 +287,7 @@ class _Job:
|
|
|
282
287
|
exitcode = self.process.exitcode if self.process.exitcode is not None else -1
|
|
283
288
|
assert self.qnode.quantum.dataId is not None, "Quantum DataId cannot be None"
|
|
284
289
|
report = QuantumReport.from_exit_code(
|
|
290
|
+
quantumId=self.qnode.nodeId,
|
|
285
291
|
exitCode=exitcode,
|
|
286
292
|
dataId=self.qnode.quantum.dataId,
|
|
287
293
|
taskLabel=self.qnode.task_node.label,
|
|
@@ -539,6 +545,7 @@ class MPGraphExecutor(QuantumGraphExecutor):
|
|
|
539
545
|
)
|
|
540
546
|
failedNodes.add(qnode)
|
|
541
547
|
failed_quantum_report = QuantumReport(
|
|
548
|
+
quantumId=qnode.nodeId,
|
|
542
549
|
status=ExecutionStatus.SKIPPED,
|
|
543
550
|
dataId=qnode.quantum.dataId,
|
|
544
551
|
taskLabel=task_node.label,
|
|
@@ -576,6 +583,7 @@ class MPGraphExecutor(QuantumGraphExecutor):
|
|
|
576
583
|
raise
|
|
577
584
|
except Exception as exc:
|
|
578
585
|
quantum_report = QuantumReport.from_exception(
|
|
586
|
+
quantumId=qnode.nodeId,
|
|
579
587
|
exception=exc,
|
|
580
588
|
dataId=qnode.quantum.dataId,
|
|
581
589
|
taskLabel=task_node.label,
|
|
@@ -722,6 +730,7 @@ class MPGraphExecutor(QuantumGraphExecutor):
|
|
|
722
730
|
assert job.qnode.quantum.dataId is not None, "Quantum DataId cannot be None"
|
|
723
731
|
if jobInputNodes & jobs.failedNodes:
|
|
724
732
|
quantum_report = QuantumReport(
|
|
733
|
+
quantumId=job.qnode.nodeId,
|
|
725
734
|
status=ExecutionStatus.SKIPPED,
|
|
726
735
|
dataId=job.qnode.quantum.dataId,
|
|
727
736
|
taskLabel=job.qnode.task_node.label,
|
|
@@ -258,6 +258,7 @@ class Edge(ABC):
|
|
|
258
258
|
in exported networkx graphs.
|
|
259
259
|
"""
|
|
260
260
|
return {
|
|
261
|
+
"connection_name": self.connection_name,
|
|
261
262
|
"parent_dataset_type_name": self.parent_dataset_type_name,
|
|
262
263
|
"storage_class_name": self.storage_class_name,
|
|
263
264
|
"is_init": bool,
|
|
@@ -606,7 +607,18 @@ class ReadEdge(Edge):
|
|
|
606
607
|
"type is registered."
|
|
607
608
|
)
|
|
608
609
|
else:
|
|
609
|
-
|
|
610
|
+
try:
|
|
611
|
+
all_current_components = current.storageClass.allComponents()
|
|
612
|
+
except (KeyError, ImportError):
|
|
613
|
+
if visualization_only:
|
|
614
|
+
current = DatasetType(
|
|
615
|
+
self.parent_dataset_type_name,
|
|
616
|
+
dimensions,
|
|
617
|
+
storageClass="<UNKNOWN>",
|
|
618
|
+
isCalibration=self.is_calibration,
|
|
619
|
+
)
|
|
620
|
+
return current, is_initial_query_constraint, is_prerequisite
|
|
621
|
+
raise
|
|
610
622
|
if self.component not in all_current_components:
|
|
611
623
|
raise IncompatibleDatasetTypeError(
|
|
612
624
|
f"Dataset type {self.parent_dataset_type_name!r} has storage class "
|
|
@@ -618,8 +630,10 @@ class ReadEdge(Edge):
|
|
|
618
630
|
# for the component the task wants, because we don't have the
|
|
619
631
|
# parent storage class.
|
|
620
632
|
current_component = all_current_components[self.component]
|
|
633
|
+
|
|
621
634
|
if (
|
|
622
|
-
|
|
635
|
+
not visualization_only
|
|
636
|
+
and current_component.name != self.storage_class_name
|
|
623
637
|
and not StorageClassFactory()
|
|
624
638
|
.getStorageClass(self.storage_class_name)
|
|
625
639
|
.can_convert(current_component)
|
|
@@ -652,7 +666,7 @@ class ReadEdge(Edge):
|
|
|
652
666
|
"compatible but different, registering the dataset type in the data repository "
|
|
653
667
|
"in advance will avoid this error."
|
|
654
668
|
)
|
|
655
|
-
elif not dataset_type.is_compatible_with(current):
|
|
669
|
+
elif not visualization_only and not dataset_type.is_compatible_with(current):
|
|
656
670
|
raise IncompatibleDatasetTypeError(
|
|
657
671
|
f"Incompatible definition for input dataset type {self.parent_dataset_type_name!r}; "
|
|
658
672
|
f"task {self.task_label!r} has {dataset_type}, but the definition "
|
|
@@ -27,12 +27,39 @@
|
|
|
27
27
|
from __future__ import annotations
|
|
28
28
|
|
|
29
29
|
__all__ = (
|
|
30
|
+
"NodeBipartite",
|
|
30
31
|
"NodeKey",
|
|
31
32
|
"NodeType",
|
|
32
33
|
)
|
|
33
34
|
|
|
34
35
|
import enum
|
|
35
|
-
|
|
36
|
+
import sys
|
|
37
|
+
from typing import Any, NamedTuple
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
class NodeBipartite(enum.IntEnum):
|
|
41
|
+
"""Constants for the 'bipartite' key in NetworkX graph views."""
|
|
42
|
+
|
|
43
|
+
DATASET_OR_TYPE = 0
|
|
44
|
+
"""Value for nodes that represent dataset types (in pipeline graphs)
|
|
45
|
+
or datasets (in quantum graphs).
|
|
46
|
+
"""
|
|
47
|
+
|
|
48
|
+
TASK_OR_QUANTUM = 1
|
|
49
|
+
"""Value for nodes that represent tasks (in pipeline graphs) or quanta
|
|
50
|
+
(in quantum graphs).
|
|
51
|
+
"""
|
|
52
|
+
|
|
53
|
+
if "sphinx" in sys.modules:
|
|
54
|
+
|
|
55
|
+
@classmethod
|
|
56
|
+
def from_bytes(cls, *args: Any, **kwargs: Any) -> Any: # pragma: no cover
|
|
57
|
+
"""See `IntEnum.from_bytes`."""
|
|
58
|
+
return super().from_bytes(*args, **kwargs)
|
|
59
|
+
|
|
60
|
+
def to_bytes(self, *args: Any, **kwargs: Any) -> Any: # pragma: no cover
|
|
61
|
+
"""See `IntEnum.to_bytes`."""
|
|
62
|
+
return super().to_bytes(self, *args, **kwargs)
|
|
36
63
|
|
|
37
64
|
|
|
38
65
|
class NodeType(enum.Enum):
|
|
@@ -43,13 +70,13 @@ class NodeType(enum.Enum):
|
|
|
43
70
|
TASK = 2
|
|
44
71
|
|
|
45
72
|
@property
|
|
46
|
-
def bipartite(self) ->
|
|
73
|
+
def bipartite(self) -> NodeBipartite:
|
|
47
74
|
"""The integer used as the "bipartite" key in networkx exports of a
|
|
48
75
|
`PipelineGraph`.
|
|
49
76
|
|
|
50
77
|
This key is used by the `networkx.algorithms.bipartite` module.
|
|
51
78
|
"""
|
|
52
|
-
return
|
|
79
|
+
return NodeBipartite(self is not NodeType.DATASET_TYPE)
|
|
53
80
|
|
|
54
81
|
def __lt__(self, other: NodeType) -> bool:
|
|
55
82
|
# We define __lt__ only to be able to provide deterministic tiebreaking
|
|
@@ -806,7 +806,9 @@ class TaskNode:
|
|
|
806
806
|
edge : `ReadEdge`
|
|
807
807
|
Input edge.
|
|
808
808
|
"""
|
|
809
|
-
|
|
809
|
+
if (edge := self.inputs.get(connection_name)) is not None:
|
|
810
|
+
return edge
|
|
811
|
+
return self.prerequisite_inputs[connection_name]
|
|
810
812
|
|
|
811
813
|
def get_output_edge(self, connection_name: str) -> WriteEdge:
|
|
812
814
|
"""Look up an output edge by connection name.
|
|
@@ -57,6 +57,7 @@ _OVERFLOW_MAX_LINES = 20
|
|
|
57
57
|
def show_dot(
|
|
58
58
|
pipeline_graph: PipelineGraph,
|
|
59
59
|
stream: TextIO = sys.stdout,
|
|
60
|
+
label_edge_connections: bool = False,
|
|
60
61
|
**kwargs: Any,
|
|
61
62
|
) -> None:
|
|
62
63
|
"""Write a DOT representation of the pipeline graph to a stream.
|
|
@@ -67,6 +68,8 @@ def show_dot(
|
|
|
67
68
|
Pipeline graph to show.
|
|
68
69
|
stream : `TextIO`, optional
|
|
69
70
|
Stream to write the DOT representation to.
|
|
71
|
+
label_edge_connections : `bool`, optional
|
|
72
|
+
If `True`, label edges with their connection names.
|
|
70
73
|
**kwargs
|
|
71
74
|
Additional keyword arguments to pass to `parse_display_args`.
|
|
72
75
|
"""
|
|
@@ -96,12 +99,19 @@ def show_dot(
|
|
|
96
99
|
formatted_overflow_ids = [f'"{overflow_id}"' for overflow_id in overflow_ids]
|
|
97
100
|
print(f"{{rank=sink; {'; '.join(formatted_overflow_ids)};}}", file=stream)
|
|
98
101
|
|
|
99
|
-
for from_node, to_node,
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
102
|
+
for from_node, to_node, edge_data in xgraph.edges(data=True):
|
|
103
|
+
edge_kwargs = {}
|
|
104
|
+
if edge_data.get("is_prerequisite", False):
|
|
105
|
+
edge_kwargs["style"] = "dashed"
|
|
106
|
+
if (connection_name := edge_data.get("connection_name", None)) is not None:
|
|
107
|
+
if (component := edge_data.get("component", None)) is not None:
|
|
108
|
+
if label_edge_connections:
|
|
109
|
+
edge_kwargs["xlabel"] = f"{connection_name} (.{component})"
|
|
110
|
+
else:
|
|
111
|
+
edge_kwargs["xlabel"] = f".{component}"
|
|
112
|
+
elif label_edge_connections:
|
|
113
|
+
edge_kwargs["xlabel"] = connection_name
|
|
114
|
+
_render_edge(from_node.node_id, to_node.node_id, stream, **edge_kwargs)
|
|
105
115
|
|
|
106
116
|
print("}", file=stream)
|
|
107
117
|
|
|
@@ -364,6 +364,9 @@ class QuantumGraphBuilder(ABC):
|
|
|
364
364
|
# with the quanta because no quantum knows if its the only
|
|
365
365
|
# consumer).
|
|
366
366
|
full_skeleton.remove_orphan_datasets()
|
|
367
|
+
# Add any dimension records not handled by the subclass, and
|
|
368
|
+
# aggregate any that were added directly to data IDs.
|
|
369
|
+
full_skeleton.attach_dimension_records(self.butler, self._pipeline_graph.get_all_dimensions())
|
|
367
370
|
if attach_datastore_records:
|
|
368
371
|
self._attach_datastore_records(full_skeleton)
|
|
369
372
|
# TODO initialize most metadata here instead of in ctrl_mpexec.
|
|
@@ -939,7 +942,7 @@ class QuantumGraphBuilder(ABC):
|
|
|
939
942
|
inputs: dict[DatasetKey | PrerequisiteDatasetKey, DatasetRef] = {}
|
|
940
943
|
outputs_for_skip: dict[DatasetKey, DatasetRef] = {}
|
|
941
944
|
outputs_in_the_way: dict[DatasetKey, DatasetRef] = {}
|
|
942
|
-
_, dataset_type_nodes = self._pipeline_graph.group_by_dimensions()
|
|
945
|
+
_, dataset_type_nodes = self._pipeline_graph.group_by_dimensions().get(self.universe.empty, ({}, {}))
|
|
943
946
|
dataset_types = [node.dataset_type for node in dataset_type_nodes.values()]
|
|
944
947
|
dataset_types.extend(self._global_init_output_types.values())
|
|
945
948
|
for dataset_type in dataset_types:
|
|
@@ -52,6 +52,7 @@ from lsst.daf.butler import (
|
|
|
52
52
|
DataIdValue,
|
|
53
53
|
DatasetRef,
|
|
54
54
|
DimensionDataAttacher,
|
|
55
|
+
DimensionDataExtractor,
|
|
55
56
|
DimensionGroup,
|
|
56
57
|
DimensionRecordSet,
|
|
57
58
|
)
|
|
@@ -726,10 +727,18 @@ class QuantumGraphSkeleton:
|
|
|
726
727
|
return self._xgraph.nodes[key]["data_id"]
|
|
727
728
|
|
|
728
729
|
def attach_dimension_records(
|
|
729
|
-
self,
|
|
730
|
+
self,
|
|
731
|
+
butler: Butler,
|
|
732
|
+
dimensions: DimensionGroup,
|
|
733
|
+
dimension_records: Iterable[DimensionRecordSet] = (),
|
|
730
734
|
) -> None:
|
|
731
735
|
"""Attach dimension records to the data IDs in the skeleton.
|
|
732
736
|
|
|
737
|
+
This both attaches records to data IDs in the skeleton and aggregates
|
|
738
|
+
any existing records on data IDS, so `get_dimension_data` returns all
|
|
739
|
+
dimension records used in the skeleton. It can be called multiple
|
|
740
|
+
times.
|
|
741
|
+
|
|
733
742
|
Parameters
|
|
734
743
|
----------
|
|
735
744
|
butler : `lsst.daf.butler.Butler`
|
|
@@ -737,7 +746,7 @@ class QuantumGraphSkeleton:
|
|
|
737
746
|
dimensions : `lsst.daf.butler.DimensionGroup`
|
|
738
747
|
Superset of all of the dimensions of all data IDs.
|
|
739
748
|
dimension_records : `~collections.abc.Iterable` [ \
|
|
740
|
-
`lsst.daf.butler.DimensionRecordSet` ]
|
|
749
|
+
`lsst.daf.butler.DimensionRecordSet` ], optional
|
|
741
750
|
Iterable of sets of dimension records to attach.
|
|
742
751
|
"""
|
|
743
752
|
for record_set in dimension_records:
|
|
@@ -748,10 +757,20 @@ class QuantumGraphSkeleton:
|
|
|
748
757
|
data_ids_to_expand: defaultdict[DimensionGroup, defaultdict[DataCoordinate, list[Key]]] = defaultdict(
|
|
749
758
|
lambda: defaultdict(list)
|
|
750
759
|
)
|
|
760
|
+
extractor = DimensionDataExtractor.from_dimension_group(dimensions)
|
|
751
761
|
data_id: DataCoordinate | None
|
|
752
762
|
for node_key in self:
|
|
753
763
|
if data_id := self[node_key].get("data_id"):
|
|
754
|
-
|
|
764
|
+
if data_id.hasRecords():
|
|
765
|
+
extractor.update([data_id])
|
|
766
|
+
else:
|
|
767
|
+
data_ids_to_expand[data_id.dimensions][data_id].append(node_key)
|
|
768
|
+
# Add records we extracted from data IDs that were already expanded, in
|
|
769
|
+
# case other nodes want them.
|
|
770
|
+
for record_set in extractor.records.values():
|
|
771
|
+
self._dimension_data.setdefault(
|
|
772
|
+
record_set.element.name, DimensionRecordSet(record_set.element)
|
|
773
|
+
).update(record_set)
|
|
755
774
|
attacher = DimensionDataAttacher(records=self._dimension_data.values(), dimensions=dimensions)
|
|
756
775
|
for dimensions, data_ids in data_ids_to_expand.items():
|
|
757
776
|
with butler.query() as query:
|
|
@@ -763,7 +782,7 @@ class QuantumGraphSkeleton:
|
|
|
763
782
|
for expanded_data_id, node_keys in zip(expanded_data_ids, data_ids.values()):
|
|
764
783
|
for node_key in node_keys:
|
|
765
784
|
self.set_data_id(node_key, expanded_data_id)
|
|
766
|
-
# Hold on to any records that we had to query for.
|
|
785
|
+
# Hold on to any records that we had to query for or extracted.
|
|
767
786
|
self._dimension_data = attacher.records
|
|
768
787
|
|
|
769
788
|
def get_dimension_data(self) -> list[DimensionRecordSet]:
|
|
@@ -31,6 +31,7 @@ __all__ = ["ExceptionInfo", "ExecutionStatus", "QuantumReport", "Report"]
|
|
|
31
31
|
|
|
32
32
|
import enum
|
|
33
33
|
import sys
|
|
34
|
+
import uuid
|
|
34
35
|
from typing import Any
|
|
35
36
|
|
|
36
37
|
import pydantic
|
|
@@ -137,6 +138,8 @@ class QuantumReport(pydantic.BaseModel):
|
|
|
137
138
|
in-process execution. Negative if process was killed by a signal.
|
|
138
139
|
exceptionInfo : `ExceptionInfo` or `None`, optional
|
|
139
140
|
Exception information if an exception was raised.
|
|
141
|
+
quantumId : `uuid.UUID`, optional
|
|
142
|
+
Unique identifier for the quantum.
|
|
140
143
|
"""
|
|
141
144
|
|
|
142
145
|
status: ExecutionStatus = ExecutionStatus.SUCCESS
|
|
@@ -156,6 +159,9 @@ class QuantumReport(pydantic.BaseModel):
|
|
|
156
159
|
exceptionInfo: ExceptionInfo | None = None
|
|
157
160
|
"""Exception information if exception was raised."""
|
|
158
161
|
|
|
162
|
+
quantumId: uuid.UUID | None = None
|
|
163
|
+
"""Unique identifier for the quantum."""
|
|
164
|
+
|
|
159
165
|
def __init__(
|
|
160
166
|
self,
|
|
161
167
|
dataId: DataId,
|
|
@@ -163,8 +169,10 @@ class QuantumReport(pydantic.BaseModel):
|
|
|
163
169
|
status: ExecutionStatus = ExecutionStatus.SUCCESS,
|
|
164
170
|
exitCode: int | None = None,
|
|
165
171
|
exceptionInfo: ExceptionInfo | None = None,
|
|
172
|
+
quantumId: uuid.UUID | None = None,
|
|
166
173
|
):
|
|
167
174
|
super().__init__(
|
|
175
|
+
quantumId=quantumId,
|
|
168
176
|
status=status,
|
|
169
177
|
dataId=_serializeDataId(dataId),
|
|
170
178
|
taskLabel=taskLabel,
|
|
@@ -180,6 +188,7 @@ class QuantumReport(pydantic.BaseModel):
|
|
|
180
188
|
taskLabel: str,
|
|
181
189
|
*,
|
|
182
190
|
exitCode: int | None = None,
|
|
191
|
+
quantumId: uuid.UUID | None = None,
|
|
183
192
|
) -> QuantumReport:
|
|
184
193
|
"""Construct report instance from an exception and other pieces of
|
|
185
194
|
data.
|
|
@@ -195,6 +204,8 @@ class QuantumReport(pydantic.BaseModel):
|
|
|
195
204
|
exitCode : `int`, optional
|
|
196
205
|
Exit code for the process, used when it is known that the process
|
|
197
206
|
will exit with that exit code.
|
|
207
|
+
quantumId : `uuid.UUID`, optional
|
|
208
|
+
Unique identifier for the quantum.
|
|
198
209
|
"""
|
|
199
210
|
return cls(
|
|
200
211
|
status=ExecutionStatus.FAILURE,
|
|
@@ -202,6 +213,7 @@ class QuantumReport(pydantic.BaseModel):
|
|
|
202
213
|
taskLabel=taskLabel,
|
|
203
214
|
exitCode=exitCode,
|
|
204
215
|
exceptionInfo=ExceptionInfo.from_exception(exception),
|
|
216
|
+
quantumId=quantumId,
|
|
205
217
|
)
|
|
206
218
|
|
|
207
219
|
@classmethod
|
|
@@ -210,6 +222,7 @@ class QuantumReport(pydantic.BaseModel):
|
|
|
210
222
|
exitCode: int,
|
|
211
223
|
dataId: DataId,
|
|
212
224
|
taskLabel: str,
|
|
225
|
+
quantumId: uuid.UUID | None = None,
|
|
213
226
|
) -> QuantumReport:
|
|
214
227
|
"""Construct report instance from an exit code and other pieces of
|
|
215
228
|
data.
|
|
@@ -222,12 +235,15 @@ class QuantumReport(pydantic.BaseModel):
|
|
|
222
235
|
The quantum Data ID.
|
|
223
236
|
taskLabel : `str`
|
|
224
237
|
The task label.
|
|
238
|
+
quantumId : `uuid.UUID`, optional
|
|
239
|
+
Unique identifier for the quantum.
|
|
225
240
|
"""
|
|
226
241
|
return cls(
|
|
227
242
|
status=ExecutionStatus.SUCCESS if exitCode == 0 else ExecutionStatus.FAILURE,
|
|
228
243
|
dataId=dataId,
|
|
229
244
|
taskLabel=taskLabel,
|
|
230
245
|
exitCode=exitCode,
|
|
246
|
+
quantumId=quantumId,
|
|
231
247
|
)
|
|
232
248
|
|
|
233
249
|
# Work around the fact that Sphinx chokes on Pydantic docstring formatting,
|
|
@@ -641,7 +641,8 @@ class SimplePipelineExecutor:
|
|
|
641
641
|
# be useful for callers who want to check the state of the repo in
|
|
642
642
|
# between.
|
|
643
643
|
return (
|
|
644
|
-
single_quantum_executor.execute(qnode.task_node, qnode.quantum)[0]
|
|
644
|
+
single_quantum_executor.execute(qnode.task_node, qnode.quantum, qnode.nodeId)[0]
|
|
645
|
+
for qnode in self.quantum_graph
|
|
645
646
|
)
|
|
646
647
|
|
|
647
648
|
def _transfer_qg_dimension_records(self, out_butler: Butler) -> None:
|
|
@@ -159,7 +159,7 @@ class SingleQuantumExecutor(QuantumExecutor):
|
|
|
159
159
|
self._butler.registry.refresh()
|
|
160
160
|
|
|
161
161
|
result = self._execute(task_node, quantum, quantum_id=quantum_id)
|
|
162
|
-
report = QuantumReport(dataId=quantum.dataId, taskLabel=task_node.label)
|
|
162
|
+
report = QuantumReport(quantumId=quantum_id, dataId=quantum.dataId, taskLabel=task_node.label)
|
|
163
163
|
return result, report
|
|
164
164
|
|
|
165
165
|
def _execute(
|
|
@@ -243,7 +243,7 @@ class InMemoryRepo:
|
|
|
243
243
|
builder = AllDimensionsQuantumGraphBuilder(
|
|
244
244
|
self.pipeline_graph,
|
|
245
245
|
self.butler,
|
|
246
|
-
input_collections=self.input_chain,
|
|
246
|
+
input_collections=[self.input_chain],
|
|
247
247
|
output_run=self.output_run,
|
|
248
248
|
)
|
|
249
249
|
if register_output_dataset_types:
|
lsst/pipe/base/version.py
CHANGED
|
@@ -1,2 +1,2 @@
|
|
|
1
1
|
__all__ = ["__version__"]
|
|
2
|
-
__version__ = "29.2025.
|
|
2
|
+
__version__ = "29.2025.3500"
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: lsst-pipe-base
|
|
3
|
-
Version: 29.2025.
|
|
3
|
+
Version: 29.2025.3500
|
|
4
4
|
Summary: Pipeline infrastructure for the Rubin Science Pipelines.
|
|
5
5
|
Author-email: Rubin Observatory Data Management <dm-admin@lists.lsst.org>
|
|
6
6
|
License: BSD 3-Clause License
|
|
@@ -15,32 +15,32 @@ lsst/pipe/base/config.py,sha256=yNipVEc6awwhU_O9I01g20OnvQrs28dAwkXuI1hrlYE,1198
|
|
|
15
15
|
lsst/pipe/base/configOverrides.py,sha256=B0An8EaX76VzWnC5dJxvyZ2AhVzawMtq7qlE9ma5lkc,14661
|
|
16
16
|
lsst/pipe/base/connectionTypes.py,sha256=inUDyzbM1sKMCtHaRkhx3dWSPHPBIDVMHOPhzB13Kdw,16720
|
|
17
17
|
lsst/pipe/base/connections.py,sha256=S_PgywIYoPlaCtGtDtD6S24yewVaPfdS_QgrhUAty7g,66725
|
|
18
|
-
lsst/pipe/base/dot_tools.py,sha256=
|
|
18
|
+
lsst/pipe/base/dot_tools.py,sha256=bxwJTIfG58LAxPvBzORgp99kYKJUQFK4mNVjEuj-Pgc,9862
|
|
19
19
|
lsst/pipe/base/exec_fixup_data_id.py,sha256=UG-yZboZijOjrPh0bKnAjEYJMpRqGAIgNZxIDYVa0l0,5048
|
|
20
20
|
lsst/pipe/base/execution_graph_fixup.py,sha256=_orQ_GT5f-VyRarcpaPD_cNEfo9AIWgum9HkMkcvNG8,2811
|
|
21
21
|
lsst/pipe/base/execution_reports.py,sha256=jYtWCD4PkEAeVUpKIxuiJJVgsCm7qiwCorWVgNHkVgU,17270
|
|
22
22
|
lsst/pipe/base/log_capture.py,sha256=tgJcq_eOIwywktagYXL0sCnafqNR0CJ7rfW09iXQ63k,9390
|
|
23
|
-
lsst/pipe/base/mermaid_tools.py,sha256=
|
|
24
|
-
lsst/pipe/base/mp_graph_executor.py,sha256=
|
|
23
|
+
lsst/pipe/base/mermaid_tools.py,sha256=s6TQCBjvrkw2t9yxjm3YEYsBKnUW2yHmt6SawQJ9Bto,7494
|
|
24
|
+
lsst/pipe/base/mp_graph_executor.py,sha256=45v915qU1dYQsvtAXtfms6kF3k49bptDe9IZNRpzfOQ,32130
|
|
25
25
|
lsst/pipe/base/pipeline.py,sha256=FVaiLhgw9Pzo-nzXKS0dLNafegP0AMZKLtPlSvOSkRU,37563
|
|
26
26
|
lsst/pipe/base/pipelineIR.py,sha256=DDOAYHnMP-iw021RDMYsZnvb21tWumLjYqO5d38q_Zk,44300
|
|
27
27
|
lsst/pipe/base/pipelineTask.py,sha256=K3GdjJLvy8A7I-jzQiERQZaYF7mC1LM3iB5TmUtbOCI,8394
|
|
28
28
|
lsst/pipe/base/prerequisite_helpers.py,sha256=bmiebQ4veSrypZgAXjmCBFfj8fUtPW9eRQaVShhxdBQ,28446
|
|
29
29
|
lsst/pipe/base/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
30
|
-
lsst/pipe/base/quantum_graph_builder.py,sha256=
|
|
30
|
+
lsst/pipe/base/quantum_graph_builder.py,sha256=Na8uLsOzPqvQqi9pI9bgHjWREhvWPWIA3_MvLLKl-R0,56621
|
|
31
31
|
lsst/pipe/base/quantum_graph_executor.py,sha256=KOOjko2weju02oXnvWTvwXDxbPJp3U1F0KTgDSOJgKg,4342
|
|
32
|
-
lsst/pipe/base/quantum_graph_skeleton.py,sha256=
|
|
32
|
+
lsst/pipe/base/quantum_graph_skeleton.py,sha256=GhSQjRHaErneGY4A4E0tERqg9QPEeYrlpmdLzqFXy6E,28586
|
|
33
33
|
lsst/pipe/base/quantum_provenance_graph.py,sha256=llXcqu-50dtjkt_sVqAhBU10htfkxMiiArNN0_GqL1g,93034
|
|
34
|
-
lsst/pipe/base/quantum_reports.py,sha256=
|
|
34
|
+
lsst/pipe/base/quantum_reports.py,sha256=GRAjhUa7L595AjBywJz95XT7K4OUFa-rudWQtQTrUlU,12230
|
|
35
35
|
lsst/pipe/base/separable_pipeline_executor.py,sha256=w7-cZ9koJNDhEVu2lMXqjNMhZvFVn5QbJfsqvxHkQFE,11928
|
|
36
|
-
lsst/pipe/base/simple_pipeline_executor.py,sha256=
|
|
37
|
-
lsst/pipe/base/single_quantum_executor.py,sha256=
|
|
36
|
+
lsst/pipe/base/simple_pipeline_executor.py,sha256=IYda1g258FWWESN5nQv6xwZJL5AZXhnDGC5iN_IohUg,29597
|
|
37
|
+
lsst/pipe/base/single_quantum_executor.py,sha256=4Uhg83vZU9P74K6wER4qisDB-dZukdrkJ7hxNjEF-rQ,28277
|
|
38
38
|
lsst/pipe/base/struct.py,sha256=Fa-UkpuXOxdzKWbHrMUkJYOszZuBXCm2NesXNR0IOPQ,5048
|
|
39
39
|
lsst/pipe/base/task.py,sha256=XHBd-7m1a4-6LgobBYA1DgY4H7EV-_RWKfxbhZbMmD4,15145
|
|
40
40
|
lsst/pipe/base/taskFactory.py,sha256=MsDGECJqZLSZk8SGhpuVhNaP32UWuNvxZiDcZExPFG8,3412
|
|
41
41
|
lsst/pipe/base/testUtils.py,sha256=lSBKMhoKflbi8JkMNYfEqqHNl-rtFI8UYT3QneDYpLo,18477
|
|
42
42
|
lsst/pipe/base/utils.py,sha256=JmEt3l0xrh9uayKrSXuQEq12aXOhDr2YXmbYduaxCko,1940
|
|
43
|
-
lsst/pipe/base/version.py,sha256=
|
|
43
|
+
lsst/pipe/base/version.py,sha256=UwJkYVJhfyZPaD0yyjLkfsS3KQL2uaxZwFIeRntHr30,55
|
|
44
44
|
lsst/pipe/base/cli/__init__.py,sha256=861tXIAW7SqtqNUYkjbeEdfg8lDswXsjJQca0gVCFz4,54
|
|
45
45
|
lsst/pipe/base/cli/_get_cli_subcommands.py,sha256=g_af64klRybBGKAg7fmBSZBdw2LYBAsFON_yQIMZON0,1289
|
|
46
46
|
lsst/pipe/base/cli/cmd/__init__.py,sha256=BGicstnryQ48rYcNRh4fa6Vy63ZIlZ_pPAEa17jhkwY,1519
|
|
@@ -54,23 +54,23 @@ lsst/pipe/base/graph/__init__.py,sha256=Zs2vwSFNiu1bYDsgrWQZ0qegG5F6PIjiQ5ZGT3Eq
|
|
|
54
54
|
lsst/pipe/base/graph/_implDetails.py,sha256=QQHVnCW78UnIbALXX_v7EW7g6MTUTuuR1Q_Ss_squUw,6784
|
|
55
55
|
lsst/pipe/base/graph/_loadHelpers.py,sha256=qUfjIgFezaXZRCFV7PFzmz1SSKFjRWOMWJePuyKiD24,12064
|
|
56
56
|
lsst/pipe/base/graph/_versionDeserializers.py,sha256=pXk63v6jkQSghSOoU1hpPkxVa82WVGitm2jrop85SeM,27992
|
|
57
|
-
lsst/pipe/base/graph/graph.py,sha256=
|
|
57
|
+
lsst/pipe/base/graph/graph.py,sha256=GEauk42yFXVkFfHxo0EKofQmAKLhfnfe0UzlwuvUsVQ,74240
|
|
58
58
|
lsst/pipe/base/graph/graphSummary.py,sha256=S3O84MddiXrmOD7U9Ek7rR22kdveGLhsor69FRvDjh4,5032
|
|
59
59
|
lsst/pipe/base/graph/quantumNode.py,sha256=l4mslxBgyUzBAqwjpx6XRP-UPxe-oRMxHJWt-_y3Dm0,7196
|
|
60
60
|
lsst/pipe/base/pipeline_graph/__init__.py,sha256=yTEuvlzbeKIHIm7GeRmGSsma1wpZFNv8j12WfSH-deY,1516
|
|
61
61
|
lsst/pipe/base/pipeline_graph/__main__.py,sha256=E6ugEwJbds22wjgcfcgzeyO04JofQwVhn_Y8kZYY1lQ,20769
|
|
62
62
|
lsst/pipe/base/pipeline_graph/_dataset_types.py,sha256=MzpiI4bOUgwUpnse4Bj_KFAUFm_uERCHWd0BwAhKksc,11333
|
|
63
|
-
lsst/pipe/base/pipeline_graph/_edges.py,sha256=
|
|
63
|
+
lsst/pipe/base/pipeline_graph/_edges.py,sha256=n6iCYql-TvAyM1xrINt7m02efjebJlIwhk254CCzfn8,35300
|
|
64
64
|
lsst/pipe/base/pipeline_graph/_exceptions.py,sha256=3jvCXms0_5ThLGtsOlKxsI1vWiq3gY4hba8fRBW0tgI,3943
|
|
65
65
|
lsst/pipe/base/pipeline_graph/_mapping_views.py,sha256=9nLKPA8j7sS09haShbJnEtGXbb4vy_cWpbLeMLBmVvs,9194
|
|
66
|
-
lsst/pipe/base/pipeline_graph/_nodes.py,sha256=
|
|
66
|
+
lsst/pipe/base/pipeline_graph/_nodes.py,sha256=GGXfzXvrjNbwPt-0w8cC0l_I6CCNskoDNjA8Ds4ILS0,4236
|
|
67
67
|
lsst/pipe/base/pipeline_graph/_pipeline_graph.py,sha256=G-P3r-AHBM1cMP7ex75M-Xtu4HlQiaIqly3er6BZC0A,121536
|
|
68
68
|
lsst/pipe/base/pipeline_graph/_task_subsets.py,sha256=lLvcndSGcZigteWd4eeAM8LxQ1lHPBoysY8PjJTxx1c,13244
|
|
69
|
-
lsst/pipe/base/pipeline_graph/_tasks.py,sha256=
|
|
69
|
+
lsst/pipe/base/pipeline_graph/_tasks.py,sha256=jTLpm5dZMXRNrGi3L45-3DtF95PGwhmejWLZ-zcSTzo,42802
|
|
70
70
|
lsst/pipe/base/pipeline_graph/expressions.py,sha256=MZ0qxGA4ctu_WqVjdjjezZF8Jd5174PWbio7EF2wdl0,7717
|
|
71
71
|
lsst/pipe/base/pipeline_graph/io.py,sha256=8s20Z0hYhO8AZpDJJrd6LR2iBx7ICm9uL7O7PsQMT0M,30925
|
|
72
72
|
lsst/pipe/base/pipeline_graph/visualization/__init__.py,sha256=qQctfWuFpcmgRdgu8Y6OsJ_pXpLKrCK-alqfVtIecls,1551
|
|
73
|
-
lsst/pipe/base/pipeline_graph/visualization/_dot.py,sha256=
|
|
73
|
+
lsst/pipe/base/pipeline_graph/visualization/_dot.py,sha256=hgy5Wk4GXptb9GbjPn8-0D9EjWsXKBEEVs1ocHLh_MA,13535
|
|
74
74
|
lsst/pipe/base/pipeline_graph/visualization/_formatting.py,sha256=NsBxXwdmISitr8_4wPc-T8CqVB-Mq4pv7DmUefFm3JU,17845
|
|
75
75
|
lsst/pipe/base/pipeline_graph/visualization/_layout.py,sha256=aMFl2Sgw_2-AfCBr_JBIWSs7VbSfSP7Nuol0mP9lkUo,17157
|
|
76
76
|
lsst/pipe/base/pipeline_graph/visualization/_merge.py,sha256=cBKhNjgymDkzYtVutrXd9IGa-eE4Q9jnHO9F18e64dY,15435
|
|
@@ -93,16 +93,16 @@ lsst/pipe/base/tests/simpleQGraph.py,sha256=G9C69caX8479JR9h48ERhOFvLTPJCoj5gKf_
|
|
|
93
93
|
lsst/pipe/base/tests/util.py,sha256=eWuIRz55HYgNmMkexinN9HjUFmPC3uapO8jMjcQY-ao,4010
|
|
94
94
|
lsst/pipe/base/tests/mocks/__init__.py,sha256=YheEqtwDsjkqLNLCYbDrbZmLj9y942fOWC_xKF3xmCk,1582
|
|
95
95
|
lsst/pipe/base/tests/mocks/_data_id_match.py,sha256=v33QZhZm-srXZAXZ8NbNKGN-_ql4AzaArBUk1lxhyss,7474
|
|
96
|
-
lsst/pipe/base/tests/mocks/_in_memory_repo.py,sha256=
|
|
96
|
+
lsst/pipe/base/tests/mocks/_in_memory_repo.py,sha256=l5j5b4ff_ATRTskIlzhlHr-w_GiIJ5-rfUd37vT2esA,16664
|
|
97
97
|
lsst/pipe/base/tests/mocks/_pipeline_task.py,sha256=xa2vy3HuMQifV0KR5sKfKRySqxSFhy-f1cP4bJ9EXZg,30010
|
|
98
98
|
lsst/pipe/base/tests/mocks/_storage_class.py,sha256=gC0czHURMk7PWj8N6dLxnY5V4HWX5i8ukb5SZbgWKy8,25257
|
|
99
|
-
lsst_pipe_base-29.2025.
|
|
100
|
-
lsst_pipe_base-29.2025.
|
|
101
|
-
lsst_pipe_base-29.2025.
|
|
102
|
-
lsst_pipe_base-29.2025.
|
|
103
|
-
lsst_pipe_base-29.2025.
|
|
104
|
-
lsst_pipe_base-29.2025.
|
|
105
|
-
lsst_pipe_base-29.2025.
|
|
106
|
-
lsst_pipe_base-29.2025.
|
|
107
|
-
lsst_pipe_base-29.2025.
|
|
108
|
-
lsst_pipe_base-29.2025.
|
|
99
|
+
lsst_pipe_base-29.2025.3500.dist-info/licenses/COPYRIGHT,sha256=kB3Z9_f6a6uFLGpEmNJT_n186CE65H6wHu4F6BNt_zA,368
|
|
100
|
+
lsst_pipe_base-29.2025.3500.dist-info/licenses/LICENSE,sha256=pRExkS03v0MQW-neNfIcaSL6aiAnoLxYgtZoFzQ6zkM,232
|
|
101
|
+
lsst_pipe_base-29.2025.3500.dist-info/licenses/bsd_license.txt,sha256=7MIcv8QRX9guUtqPSBDMPz2SnZ5swI-xZMqm_VDSfxY,1606
|
|
102
|
+
lsst_pipe_base-29.2025.3500.dist-info/licenses/gpl-v3.0.txt,sha256=OXLcl0T2SZ8Pmy2_dmlvKuetivmyPd5m1q-Gyd-zaYY,35149
|
|
103
|
+
lsst_pipe_base-29.2025.3500.dist-info/METADATA,sha256=GKunBUX56NevthsaNTuHJxK3v49G9e9eQSmjcer3qPU,2195
|
|
104
|
+
lsst_pipe_base-29.2025.3500.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
|
105
|
+
lsst_pipe_base-29.2025.3500.dist-info/entry_points.txt,sha256=bnmUhJBsChxMdqST9VmFBYYKxLQoToOfqW1wjW7khjk,64
|
|
106
|
+
lsst_pipe_base-29.2025.3500.dist-info/top_level.txt,sha256=eUWiOuVVm9wwTrnAgiJT6tp6HQHXxIhj2QSZ7NYZH80,5
|
|
107
|
+
lsst_pipe_base-29.2025.3500.dist-info/zip-safe,sha256=AbpHGcgLb-kRsJGnwFEktk7uzpZOCcBY74-YBdrKVGs,1
|
|
108
|
+
lsst_pipe_base-29.2025.3500.dist-info/RECORD,,
|
|
File without changes
|
{lsst_pipe_base-29.2025.3400.dist-info → lsst_pipe_base-29.2025.3500.dist-info}/entry_points.txt
RENAMED
|
File without changes
|
{lsst_pipe_base-29.2025.3400.dist-info → lsst_pipe_base-29.2025.3500.dist-info}/licenses/COPYRIGHT
RENAMED
|
File without changes
|
{lsst_pipe_base-29.2025.3400.dist-info → lsst_pipe_base-29.2025.3500.dist-info}/licenses/LICENSE
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
{lsst_pipe_base-29.2025.3400.dist-info → lsst_pipe_base-29.2025.3500.dist-info}/top_level.txt
RENAMED
|
File without changes
|
|
File without changes
|