dkist-processing-common 11.7.0rc6__py3-none-any.whl → 11.9.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- dkist_processing_common/config.py +28 -6
- dkist_processing_common/models/graphql.py +0 -33
- dkist_processing_common/tasks/l1_output_data.py +38 -32
- dkist_processing_common/tasks/mixin/globus.py +23 -26
- dkist_processing_common/tasks/mixin/metadata_store.py +0 -49
- dkist_processing_common/tasks/mixin/object_store.py +21 -0
- dkist_processing_common/tasks/mixin/quality/_metrics.py +4 -6
- dkist_processing_common/tasks/trial_catalog.py +49 -1
- dkist_processing_common/tasks/trial_output_data.py +1 -1
- dkist_processing_common/tests/mock_metadata_store.py +39 -4
- dkist_processing_common/tests/test_input_dataset.py +1 -37
- dkist_processing_common/tests/test_publish_catalog_messages.py +0 -21
- dkist_processing_common/tests/test_quality_mixin.py +11 -3
- dkist_processing_common/tests/test_submit_dataset_metadata.py +1 -5
- dkist_processing_common/tests/test_trial_catalog.py +72 -2
- dkist_processing_common/tests/test_trial_output_data.py +1 -2
- {dkist_processing_common-11.7.0rc6.dist-info → dkist_processing_common-11.9.1.dist-info}/METADATA +17 -13
- {dkist_processing_common-11.7.0rc6.dist-info → dkist_processing_common-11.9.1.dist-info}/RECORD +20 -26
- changelog/267.feature.1.rst +0 -1
- changelog/267.feature.2.rst +0 -1
- changelog/267.feature.rst +0 -1
- changelog/267.misc.rst +0 -1
- changelog/267.removal.1.rst +0 -2
- changelog/267.removal.rst +0 -1
- {dkist_processing_common-11.7.0rc6.dist-info → dkist_processing_common-11.9.1.dist-info}/WHEEL +0 -0
- {dkist_processing_common-11.7.0rc6.dist-info → dkist_processing_common-11.9.1.dist-info}/top_level.txt +0 -0
|
@@ -3,12 +3,20 @@
|
|
|
3
3
|
from dkist_processing_core.config import DKISTProcessingCoreConfiguration
|
|
4
4
|
from dkist_service_configuration.settings import DEFAULT_MESH_SERVICE
|
|
5
5
|
from dkist_service_configuration.settings import MeshService
|
|
6
|
+
from pydantic import BaseModel
|
|
6
7
|
from pydantic import Field
|
|
7
8
|
from talus import ConnectionRetryerFactory
|
|
8
9
|
from talus import ConsumerConnectionParameterFactory
|
|
9
10
|
from talus import ProducerConnectionParameterFactory
|
|
10
11
|
|
|
11
12
|
|
|
13
|
+
class GlobusClientCredential(BaseModel):
|
|
14
|
+
"""Globus client credential."""
|
|
15
|
+
|
|
16
|
+
client_id: str = Field(..., description="Globus client ID for transfers.")
|
|
17
|
+
client_secret: str = Field(..., description="Globus client secret for transfers.")
|
|
18
|
+
|
|
19
|
+
|
|
12
20
|
class DKISTProcessingCommonConfiguration(DKISTProcessingCoreConfiguration):
|
|
13
21
|
"""Common configurations."""
|
|
14
22
|
|
|
@@ -40,14 +48,28 @@ class DKISTProcessingCommonConfiguration(DKISTProcessingCoreConfiguration):
|
|
|
40
48
|
default=None, description="S3 download configuration for the object store."
|
|
41
49
|
)
|
|
42
50
|
# globus
|
|
43
|
-
|
|
44
|
-
|
|
51
|
+
globus_max_retries: int = Field(
|
|
52
|
+
default=5, description="Max retries for transient errors on calls to the globus api."
|
|
45
53
|
)
|
|
46
|
-
|
|
47
|
-
|
|
54
|
+
globus_inbound_client_credentials: list[GlobusClientCredential] = Field(
|
|
55
|
+
default_factory=list,
|
|
56
|
+
description="Globus client credentials for inbound transfers.",
|
|
57
|
+
examples=[
|
|
58
|
+
[
|
|
59
|
+
{"client_id": "id1", "client_secret": "secret1"},
|
|
60
|
+
{"client_id": "id2", "client_secret": "secret2"},
|
|
61
|
+
],
|
|
62
|
+
],
|
|
48
63
|
)
|
|
49
|
-
|
|
50
|
-
|
|
64
|
+
globus_outbound_client_credentials: list[GlobusClientCredential] = Field(
|
|
65
|
+
default_factory=list,
|
|
66
|
+
description="Globus client credentials for outbound transfers.",
|
|
67
|
+
examples=[
|
|
68
|
+
[
|
|
69
|
+
{"client_id": "id3", "client_secret": "secret3"},
|
|
70
|
+
{"client_id": "id4", "client_secret": "secret4"},
|
|
71
|
+
],
|
|
72
|
+
],
|
|
51
73
|
)
|
|
52
74
|
object_store_endpoint: str | None = Field(
|
|
53
75
|
default=None, description="Object store Globus Endpoint ID."
|
|
@@ -176,36 +176,3 @@ class RecipeRunProvenanceMutation(GraphqlBaseModel):
|
|
|
176
176
|
libraryVersions: str
|
|
177
177
|
workflowVersion: str
|
|
178
178
|
codeVersion: str | None = None
|
|
179
|
-
|
|
180
|
-
|
|
181
|
-
class QualityCreation(GraphqlBaseModel):
|
|
182
|
-
"""Quality data creation record."""
|
|
183
|
-
|
|
184
|
-
datasetId: str
|
|
185
|
-
metricCode: str
|
|
186
|
-
facet: str | None = None
|
|
187
|
-
name: str | None = None
|
|
188
|
-
description: str | None = None
|
|
189
|
-
statement: str | None = None
|
|
190
|
-
# JSON array
|
|
191
|
-
warnings: str | None = None
|
|
192
|
-
# JSON objects
|
|
193
|
-
plotData: str | None = None
|
|
194
|
-
multiPlotData: str | None = None
|
|
195
|
-
tableData: str | None = None
|
|
196
|
-
histogramData: str | None = None
|
|
197
|
-
modmatData: str | None = None
|
|
198
|
-
raincloudData: str | None = None
|
|
199
|
-
efficiencyData: str | None = None
|
|
200
|
-
|
|
201
|
-
|
|
202
|
-
class QualitiesRequest(GraphqlBaseModel):
|
|
203
|
-
"""Query parameters for quality data."""
|
|
204
|
-
|
|
205
|
-
datasetId: str
|
|
206
|
-
|
|
207
|
-
|
|
208
|
-
class QualityResponse(GraphqlBaseModel):
|
|
209
|
-
"""Query Response for quality data."""
|
|
210
|
-
|
|
211
|
-
qualityId: int
|
|
@@ -38,8 +38,9 @@ class L1OutputDataBase(OutputDataBase, ABC):
|
|
|
38
38
|
|
|
39
39
|
@property
|
|
40
40
|
def dataset_has_quality_data(self) -> bool:
|
|
41
|
-
"""Return True if
|
|
42
|
-
|
|
41
|
+
"""Return True if the dataset has quality data."""
|
|
42
|
+
path_count = self.count(tags=[Tag.output(), Tag.quality_data()])
|
|
43
|
+
return path_count > 0
|
|
43
44
|
|
|
44
45
|
def rollback(self):
|
|
45
46
|
"""Warn that the metadata-store and the interservice bus retain the effect of this tasks execution. Rolling back this task may not be achievable without other action."""
|
|
@@ -58,6 +59,9 @@ class TransferL1Data(TransferDataBase, GlobusMixin):
|
|
|
58
59
|
# Movie needs to be transferred separately as the movie headers need to go with it
|
|
59
60
|
self.transfer_movie()
|
|
60
61
|
|
|
62
|
+
with self.telemetry_span("Upload quality data"):
|
|
63
|
+
self.transfer_quality_data()
|
|
64
|
+
|
|
61
65
|
with self.telemetry_span("Upload science frames"):
|
|
62
66
|
self.transfer_output_frames()
|
|
63
67
|
|
|
@@ -101,6 +105,33 @@ class TransferL1Data(TransferDataBase, GlobusMixin):
|
|
|
101
105
|
content_type="video/mp4",
|
|
102
106
|
)
|
|
103
107
|
|
|
108
|
+
def transfer_quality_data(self):
|
|
109
|
+
"""Transfer quality data to the object store."""
|
|
110
|
+
paths = list(self.read(tags=[Tag.output(), Tag.quality_data()]))
|
|
111
|
+
if len(paths) == 0:
|
|
112
|
+
logger.info(
|
|
113
|
+
f"No quality data found to upload for dataset. recipe_run_id={self.recipe_run_id}"
|
|
114
|
+
)
|
|
115
|
+
return
|
|
116
|
+
|
|
117
|
+
if count := len(paths) > 1:
|
|
118
|
+
# dataset inventory does not support multiple quality data object keys
|
|
119
|
+
raise RuntimeError(
|
|
120
|
+
f"Found multiple quality data files to upload. Not supported."
|
|
121
|
+
f"{count=}, recipe_run_id={self.recipe_run_id}"
|
|
122
|
+
)
|
|
123
|
+
|
|
124
|
+
with self.telemetry_span(f"Uploading the trial quality data"):
|
|
125
|
+
path = paths[0]
|
|
126
|
+
logger.info(f"Uploading quality data: recipe_run_id={self.recipe_run_id}, {path=}")
|
|
127
|
+
quality_data_object_key = self.format_object_key(path)
|
|
128
|
+
self.object_store_upload_quality_data(
|
|
129
|
+
quality_data=path,
|
|
130
|
+
bucket=self.destination_bucket,
|
|
131
|
+
object_key=quality_data_object_key,
|
|
132
|
+
content_type="application/json",
|
|
133
|
+
)
|
|
134
|
+
|
|
104
135
|
|
|
105
136
|
class AssembleQualityData(L1OutputDataBase, QualityMixin):
|
|
106
137
|
"""
|
|
@@ -128,7 +159,7 @@ class AssembleQualityData(L1OutputDataBase, QualityMixin):
|
|
|
128
159
|
):
|
|
129
160
|
self.write(
|
|
130
161
|
quality_data,
|
|
131
|
-
tags=Tag.quality_data(),
|
|
162
|
+
tags=[Tag.output(), Tag.quality_data()],
|
|
132
163
|
encoder=quality_data_encoder,
|
|
133
164
|
relative_path=f"{self.constants.dataset_id}_quality_data.json",
|
|
134
165
|
)
|
|
@@ -136,31 +167,18 @@ class AssembleQualityData(L1OutputDataBase, QualityMixin):
|
|
|
136
167
|
|
|
137
168
|
class SubmitDatasetMetadata(L1OutputDataBase):
|
|
138
169
|
"""
|
|
139
|
-
Add
|
|
170
|
+
Add receipt account to the metadata store.
|
|
140
171
|
|
|
141
|
-
Add the quality data to the Quality database.
|
|
142
172
|
Add a Dataset Receipt Account record to Processing Support for use by the Dataset Catalog Locker.
|
|
143
|
-
Adds the number of files created during the calibration processing to the Processing Support table
|
|
173
|
+
Adds the number of files to be created during the calibration processing to the Processing Support table
|
|
144
174
|
for use by the Dataset Catalog Locker.
|
|
145
175
|
"""
|
|
146
176
|
|
|
147
177
|
def run(self) -> None:
|
|
148
178
|
"""Run method for this task."""
|
|
149
|
-
with self.telemetry_span(f"Storing quality data to metadata store"):
|
|
150
|
-
# each quality_data file is a list - this will combine the elements of multiple lists into a single list
|
|
151
|
-
quality_data = list(
|
|
152
|
-
chain.from_iterable(
|
|
153
|
-
self.read(tags=Tag.quality_data(), decoder=quality_data_decoder)
|
|
154
|
-
)
|
|
155
|
-
)
|
|
156
|
-
self.metadata_store_add_quality_data(
|
|
157
|
-
dataset_id=self.constants.dataset_id, quality_data=quality_data
|
|
158
|
-
)
|
|
159
179
|
with self.telemetry_span("Count Expected Outputs"):
|
|
160
180
|
dataset_id = self.constants.dataset_id
|
|
161
181
|
expected_object_count = self.count(tags=Tag.output())
|
|
162
|
-
if quality_data:
|
|
163
|
-
expected_object_count += 1
|
|
164
182
|
logger.info(
|
|
165
183
|
f"Adding Dataset Receipt Account: "
|
|
166
184
|
f"{dataset_id=}, {expected_object_count=}, recipe_run_id={self.recipe_run_id}"
|
|
@@ -230,24 +248,12 @@ class PublishCatalogAndQualityMessages(L1OutputDataBase, InterserviceBusMixin):
|
|
|
230
248
|
messages = [CatalogObjectMessage(body=body) for body in message_bodies]
|
|
231
249
|
return messages
|
|
232
250
|
|
|
233
|
-
@property
|
|
234
|
-
def quality_report_message(self) -> CreateQualityReportMessage:
|
|
235
|
-
"""Create the Quality Report Message."""
|
|
236
|
-
file_name = Path(f"{self.constants.dataset_id}_quality_report.pdf")
|
|
237
|
-
body = CreateQualityReportMessageBody(
|
|
238
|
-
bucket=self.destination_bucket,
|
|
239
|
-
objectName=self.format_object_key(file_name),
|
|
240
|
-
conversationId=str(self.recipe_run_id),
|
|
241
|
-
datasetId=self.constants.dataset_id,
|
|
242
|
-
incrementDatasetCatalogReceiptCount=True,
|
|
243
|
-
)
|
|
244
|
-
return CreateQualityReportMessage(body=body)
|
|
245
|
-
|
|
246
251
|
def run(self) -> None:
|
|
247
252
|
"""Run method for this task."""
|
|
248
253
|
with self.telemetry_span("Gather output data"):
|
|
249
254
|
frames = self.read(tags=self.output_frame_tags)
|
|
250
255
|
movies = self.read(tags=[Tag.output(), Tag.movie()])
|
|
256
|
+
quality_data = self.read(tags=[Tag.output(), Tag.quality_data()])
|
|
251
257
|
with self.telemetry_span("Create message objects"):
|
|
252
258
|
messages = []
|
|
253
259
|
messages += self.frame_messages(paths=frames)
|
|
@@ -256,7 +262,7 @@ class PublishCatalogAndQualityMessages(L1OutputDataBase, InterserviceBusMixin):
|
|
|
256
262
|
object_message_count = len(messages) - frame_message_count
|
|
257
263
|
dataset_has_quality_data = self.dataset_has_quality_data
|
|
258
264
|
if dataset_has_quality_data:
|
|
259
|
-
messages.
|
|
265
|
+
messages += self.object_messages(paths=quality_data, object_type="QDATA")
|
|
260
266
|
with self.telemetry_span(
|
|
261
267
|
f"Publish messages: {frame_message_count = }, {object_message_count = }, {dataset_has_quality_data = }"
|
|
262
268
|
):
|
|
@@ -9,6 +9,8 @@ from globus_sdk import ConfidentialAppAuthClient
|
|
|
9
9
|
from globus_sdk import GlobusError
|
|
10
10
|
from globus_sdk import TransferClient
|
|
11
11
|
from globus_sdk import TransferData
|
|
12
|
+
from globus_sdk.scopes import TransferScopes
|
|
13
|
+
from globus_sdk.transport import RetryConfig
|
|
12
14
|
|
|
13
15
|
from dkist_processing_common.config import common_configurations
|
|
14
16
|
|
|
@@ -31,27 +33,32 @@ class GlobusTransferItem:
|
|
|
31
33
|
class GlobusMixin:
|
|
32
34
|
"""Mixin to add methods to a Task to support globus transfers."""
|
|
33
35
|
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
36
|
+
def globus_transfer_client_factory(self, transfer_data: TransferData) -> TransferClient:
|
|
37
|
+
"""Create a globus transfer client based on the direction of transfer and round-robin the available application credentials."""
|
|
38
|
+
if (
|
|
39
|
+
transfer_data["source_endpoint"] == common_configurations.object_store_endpoint
|
|
40
|
+
): # inbound
|
|
41
|
+
client_credentials = common_configurations.globus_inbound_client_credentials
|
|
42
|
+
else: # outbound
|
|
43
|
+
client_credentials = common_configurations.globus_outbound_client_credentials
|
|
44
|
+
|
|
45
|
+
# Round-robin the client credentials based on the recipe run id
|
|
46
|
+
index = self.recipe_run_id % len(client_credentials)
|
|
47
|
+
selected_credential = client_credentials[index]
|
|
48
|
+
|
|
39
49
|
confidential_client = ConfidentialAppAuthClient(
|
|
40
|
-
client_id=
|
|
41
|
-
client_secret=
|
|
42
|
-
transport_params=common_configurations.globus_transport_params,
|
|
50
|
+
client_id=selected_credential.client_id,
|
|
51
|
+
client_secret=selected_credential.client_secret,
|
|
43
52
|
)
|
|
44
|
-
authorizer = ClientCredentialsAuthorizer(
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
return self._globus_transfer_client
|
|
53
|
+
authorizer = ClientCredentialsAuthorizer(confidential_client, scopes=TransferScopes)
|
|
54
|
+
retry_config = RetryConfig(max_retries=common_configurations.globus_max_retries)
|
|
55
|
+
|
|
56
|
+
return TransferClient(authorizer=authorizer, retry_config=retry_config)
|
|
49
57
|
|
|
50
58
|
def globus_transfer_scratch_to_object_store(
|
|
51
59
|
self,
|
|
52
60
|
transfer_items: list[GlobusTransferItem],
|
|
53
61
|
label: str = None,
|
|
54
|
-
sync_level: str = None,
|
|
55
62
|
verify_checksum: bool = True,
|
|
56
63
|
) -> None:
|
|
57
64
|
"""Transfer data from scratch to the object store."""
|
|
@@ -60,7 +67,6 @@ class GlobusMixin:
|
|
|
60
67
|
destination_endpoint=common_configurations.object_store_endpoint,
|
|
61
68
|
transfer_items=transfer_items,
|
|
62
69
|
label=label,
|
|
63
|
-
sync_level=sync_level,
|
|
64
70
|
verify_checksum=verify_checksum,
|
|
65
71
|
)
|
|
66
72
|
|
|
@@ -68,7 +74,6 @@ class GlobusMixin:
|
|
|
68
74
|
self,
|
|
69
75
|
transfer_items: list[GlobusTransferItem],
|
|
70
76
|
label: str = None,
|
|
71
|
-
sync_level: str = None,
|
|
72
77
|
verify_checksum: bool = True,
|
|
73
78
|
) -> None:
|
|
74
79
|
"""Transfer data from the object store to scratch."""
|
|
@@ -77,7 +82,6 @@ class GlobusMixin:
|
|
|
77
82
|
destination_endpoint=common_configurations.scratch_endpoint,
|
|
78
83
|
transfer_items=transfer_items,
|
|
79
84
|
label=label,
|
|
80
|
-
sync_level=sync_level,
|
|
81
85
|
verify_checksum=verify_checksum,
|
|
82
86
|
)
|
|
83
87
|
|
|
@@ -87,7 +91,6 @@ class GlobusMixin:
|
|
|
87
91
|
destination_endpoint: str,
|
|
88
92
|
transfer_items: list[GlobusTransferItem],
|
|
89
93
|
label: str = None,
|
|
90
|
-
sync_level: str = None,
|
|
91
94
|
verify_checksum: bool = True,
|
|
92
95
|
) -> TransferData:
|
|
93
96
|
"""Format a globus TransferData instance."""
|
|
@@ -95,7 +98,6 @@ class GlobusMixin:
|
|
|
95
98
|
source_endpoint=source_endpoint,
|
|
96
99
|
destination_endpoint=destination_endpoint,
|
|
97
100
|
label=label,
|
|
98
|
-
sync_level=sync_level,
|
|
99
101
|
verify_checksum=verify_checksum,
|
|
100
102
|
)
|
|
101
103
|
for item in transfer_items:
|
|
@@ -112,7 +114,6 @@ class GlobusMixin:
|
|
|
112
114
|
destination_endpoint: str,
|
|
113
115
|
transfer_items: list[GlobusTransferItem],
|
|
114
116
|
label: str = None,
|
|
115
|
-
sync_level: str = None,
|
|
116
117
|
verify_checksum: bool = True,
|
|
117
118
|
) -> None:
|
|
118
119
|
"""Perform a transfer of data using globus."""
|
|
@@ -121,7 +122,6 @@ class GlobusMixin:
|
|
|
121
122
|
destination_endpoint=destination_endpoint,
|
|
122
123
|
transfer_items=transfer_items,
|
|
123
124
|
label=label,
|
|
124
|
-
sync_level=sync_level,
|
|
125
125
|
verify_checksum=verify_checksum,
|
|
126
126
|
)
|
|
127
127
|
self._blocking_globus_transfer(transfer_data=transfer_data)
|
|
@@ -131,24 +131,21 @@ class GlobusMixin:
|
|
|
131
131
|
source_endpoint: str,
|
|
132
132
|
destination_endpoint: str,
|
|
133
133
|
label: str = None,
|
|
134
|
-
sync_level: str = None,
|
|
135
134
|
verify_checksum: bool = True,
|
|
136
135
|
) -> TransferData:
|
|
137
136
|
label = label or "Data Processing Transfer"
|
|
138
137
|
return TransferData(
|
|
139
|
-
transfer_client=self.globus_transfer_client,
|
|
140
138
|
source_endpoint=source_endpoint,
|
|
141
139
|
destination_endpoint=destination_endpoint,
|
|
142
140
|
label=label,
|
|
143
|
-
sync_level=sync_level,
|
|
144
141
|
verify_checksum=verify_checksum,
|
|
145
142
|
)
|
|
146
143
|
|
|
147
144
|
def _blocking_globus_transfer(self, transfer_data: TransferData) -> None:
|
|
148
|
-
tc = self.
|
|
149
|
-
logger.info(f"Starting globus transfer: label={transfer_data.get('label')}")
|
|
145
|
+
tc = self.globus_transfer_client_factory(transfer_data=transfer_data)
|
|
150
146
|
transfer_result = tc.submit_transfer(transfer_data)
|
|
151
147
|
task_id = transfer_result["task_id"]
|
|
148
|
+
logger.info(f"Starting globus transfer: label={transfer_data.get('label')}, {task_id=}, ")
|
|
152
149
|
polling_interval = 60
|
|
153
150
|
while not tc.task_wait(
|
|
154
151
|
task_id=task_id, timeout=polling_interval, polling_interval=polling_interval
|
|
@@ -1,6 +1,5 @@
|
|
|
1
1
|
"""Mixin for a WorkflowDataTaskBase subclass which implements Metadata Store data access functionality."""
|
|
2
2
|
|
|
3
|
-
import json
|
|
4
3
|
import logging
|
|
5
4
|
from functools import cached_property
|
|
6
5
|
from typing import Literal
|
|
@@ -8,15 +7,11 @@ from typing import Literal
|
|
|
8
7
|
from pydantic import validate_call
|
|
9
8
|
|
|
10
9
|
from dkist_processing_common._util.graphql import GraphQLClient
|
|
11
|
-
from dkist_processing_common.codecs.quality import QualityDataEncoder
|
|
12
10
|
from dkist_processing_common.config import common_configurations
|
|
13
11
|
from dkist_processing_common.models.graphql import DatasetCatalogReceiptAccountMutation
|
|
14
12
|
from dkist_processing_common.models.graphql import DatasetCatalogReceiptAccountResponse
|
|
15
13
|
from dkist_processing_common.models.graphql import InputDatasetPartResponse
|
|
16
14
|
from dkist_processing_common.models.graphql import InputDatasetRecipeRunResponse
|
|
17
|
-
from dkist_processing_common.models.graphql import QualitiesRequest
|
|
18
|
-
from dkist_processing_common.models.graphql import QualityCreation
|
|
19
|
-
from dkist_processing_common.models.graphql import QualityResponse
|
|
20
15
|
from dkist_processing_common.models.graphql import RecipeRunMutation
|
|
21
16
|
from dkist_processing_common.models.graphql import RecipeRunMutationResponse
|
|
22
17
|
from dkist_processing_common.models.graphql import RecipeRunProvenanceMutation
|
|
@@ -150,50 +145,6 @@ class MetadataStoreMixin:
|
|
|
150
145
|
mutation_response_cls=RecipeRunProvenanceResponse,
|
|
151
146
|
)
|
|
152
147
|
|
|
153
|
-
# QUALITY
|
|
154
|
-
|
|
155
|
-
def metadata_store_add_quality_data(self, dataset_id: str, quality_data: list[dict]):
|
|
156
|
-
"""Add the quality data to the metadata-store."""
|
|
157
|
-
if self.metadata_store_quality_data_exists(dataset_id):
|
|
158
|
-
raise RuntimeError(f"Quality data already persisted for dataset {dataset_id!r}")
|
|
159
|
-
for metric in quality_data:
|
|
160
|
-
if (metric_code := metric.get("metric_code")) is None:
|
|
161
|
-
name = metric.get("name")
|
|
162
|
-
raise ValueError(f"No metric_code for {name!r} in dataset {dataset_id!r}")
|
|
163
|
-
params = QualityCreation(
|
|
164
|
-
datasetId=dataset_id,
|
|
165
|
-
metricCode=metric_code,
|
|
166
|
-
facet=metric.get("facet"),
|
|
167
|
-
name=metric.get("name"),
|
|
168
|
-
description=metric.get("description"),
|
|
169
|
-
statement=metric.get("statement"),
|
|
170
|
-
# JSON array
|
|
171
|
-
warnings=json.dumps(metric.get("warnings")),
|
|
172
|
-
# JSON objects
|
|
173
|
-
plotData=json.dumps(metric.get("plot_data"), cls=QualityDataEncoder),
|
|
174
|
-
multiPlotData=json.dumps(metric.get("multi_plot_data"), cls=QualityDataEncoder),
|
|
175
|
-
tableData=json.dumps(metric.get("table_data"), cls=QualityDataEncoder),
|
|
176
|
-
histogramData=json.dumps(metric.get("histogram_data"), cls=QualityDataEncoder),
|
|
177
|
-
modmatData=json.dumps(metric.get("modmat_data"), cls=QualityDataEncoder),
|
|
178
|
-
raincloudData=json.dumps(metric.get("raincloud_data"), cls=QualityDataEncoder),
|
|
179
|
-
efficiencyData=json.dumps(metric.get("efficiency_data"), cls=QualityDataEncoder),
|
|
180
|
-
)
|
|
181
|
-
self.metadata_store_client.execute_gql_mutation(
|
|
182
|
-
mutation_base="createQuality",
|
|
183
|
-
mutation_parameters=params,
|
|
184
|
-
mutation_response_cls=QualityResponse,
|
|
185
|
-
)
|
|
186
|
-
|
|
187
|
-
def metadata_store_quality_data_exists(self, dataset_id: str) -> bool:
|
|
188
|
-
"""Return True if quality data exists in the metadata-store for the given dataset id."""
|
|
189
|
-
params = QualitiesRequest(datasetId=dataset_id)
|
|
190
|
-
response = self.metadata_store_client.execute_gql_query(
|
|
191
|
-
query_base="qualities",
|
|
192
|
-
query_response_cls=QualityResponse,
|
|
193
|
-
query_parameters=params,
|
|
194
|
-
)
|
|
195
|
-
return bool(response)
|
|
196
|
-
|
|
197
148
|
# INPUT DATASET RECIPE RUN
|
|
198
149
|
|
|
199
150
|
@cached_property
|
|
@@ -55,6 +55,27 @@ class ObjectStoreMixin:
|
|
|
55
55
|
},
|
|
56
56
|
)
|
|
57
57
|
|
|
58
|
+
def object_store_upload_quality_data(
|
|
59
|
+
self,
|
|
60
|
+
quality_data: Path | bytes,
|
|
61
|
+
bucket: str,
|
|
62
|
+
object_key: str,
|
|
63
|
+
content_type: str = "application/json",
|
|
64
|
+
):
|
|
65
|
+
"""Upload quality data to the object store."""
|
|
66
|
+
self.object_store_client.upload_object(
|
|
67
|
+
object_data=quality_data,
|
|
68
|
+
bucket=bucket,
|
|
69
|
+
object_key=object_key,
|
|
70
|
+
verify_checksum=True,
|
|
71
|
+
content_type=content_type,
|
|
72
|
+
metadata={
|
|
73
|
+
"groupname": "DATASET",
|
|
74
|
+
"groupid": self.constants.dataset_id,
|
|
75
|
+
"objecttype": "QDATA",
|
|
76
|
+
},
|
|
77
|
+
)
|
|
78
|
+
|
|
58
79
|
def object_store_remove_folder_objects(self, bucket: str, path: Path | str) -> list[str]:
|
|
59
80
|
"""
|
|
60
81
|
Remove folder objects (end with /) in the specified bucket and path.
|
|
@@ -1356,15 +1356,13 @@ class _WavecalQualityMixin:
|
|
|
1356
1356
|
Note that the residuals are the *unweighed* residuals.
|
|
1357
1357
|
"""
|
|
1358
1358
|
weight_data = np.ones(input_wavelength.size) if weights is None else weights
|
|
1359
|
-
prepared_weights =
|
|
1359
|
+
prepared_weights = fit_result.prepared_weights
|
|
1360
1360
|
residuals = fit_result.minimizer_result.residual / prepared_weights
|
|
1361
1361
|
residuals[~np.isfinite(residuals)] = 0.0
|
|
1362
|
-
best_fit_atlas = input_spectrum - residuals
|
|
1363
1362
|
normalized_residuals = residuals / input_spectrum
|
|
1364
1363
|
|
|
1365
|
-
|
|
1366
|
-
|
|
1367
|
-
best_fit_wavelength = wcs.spectral.pixel_to_world(np.arange(input_spectrum.size))
|
|
1364
|
+
best_fit_atlas = fit_result.best_fit_atlas
|
|
1365
|
+
best_fit_wavelength = fit_result.best_fit_wavelength_vector
|
|
1368
1366
|
|
|
1369
1367
|
finite_idx = (
|
|
1370
1368
|
np.isfinite(input_wavelength)
|
|
@@ -1378,7 +1376,7 @@ class _WavecalQualityMixin:
|
|
|
1378
1376
|
data = {
|
|
1379
1377
|
"input_wavelength_nm": input_wavelength.to_value(u.nm)[finite_idx].tolist(),
|
|
1380
1378
|
"input_spectrum": input_spectrum[finite_idx].tolist(),
|
|
1381
|
-
"best_fit_wavelength_nm": best_fit_wavelength
|
|
1379
|
+
"best_fit_wavelength_nm": best_fit_wavelength[finite_idx].tolist(),
|
|
1382
1380
|
"best_fit_atlas": best_fit_atlas[finite_idx].tolist(),
|
|
1383
1381
|
"normalized_residuals": normalized_residuals[finite_idx].tolist(),
|
|
1384
1382
|
"weights": None if weights is None else weight_data[finite_idx].tolist(),
|
|
@@ -5,15 +5,18 @@ import logging
|
|
|
5
5
|
from datetime import datetime
|
|
6
6
|
from itertools import chain
|
|
7
7
|
from pathlib import Path
|
|
8
|
+
from typing import Any
|
|
8
9
|
from typing import Generator
|
|
9
10
|
from uuid import uuid4
|
|
10
11
|
|
|
11
12
|
from dkist_processing_common.codecs.asdf import asdf_fileobj_encoder
|
|
13
|
+
from dkist_processing_common.codecs.basemodel import basemodel_decoder
|
|
12
14
|
from dkist_processing_common.codecs.fits import fits_access_decoder
|
|
13
15
|
from dkist_processing_common.codecs.json import json_encoder
|
|
14
16
|
from dkist_processing_common.codecs.path import path_decoder
|
|
15
17
|
from dkist_processing_common.codecs.quality import quality_data_decoder
|
|
16
18
|
from dkist_processing_common.models.fits_access import FitsAccessBase
|
|
19
|
+
from dkist_processing_common.models.input_dataset import InputDatasetPartDocumentList
|
|
17
20
|
from dkist_processing_common.models.tags import Tag
|
|
18
21
|
from dkist_processing_common.tasks.output_data_base import OutputDataBase
|
|
19
22
|
|
|
@@ -27,6 +30,7 @@ INVENTORY_EXTRA_INSTALLED = False
|
|
|
27
30
|
try:
|
|
28
31
|
from dkist_inventory.inventory import generate_asdf_filename
|
|
29
32
|
from dkist_inventory.inventory import generate_inventory_from_frame_inventory
|
|
33
|
+
from dkist_inventory.inventory import generate_quality_report_filename
|
|
30
34
|
|
|
31
35
|
INVENTORY_EXTRA_INSTALLED = True
|
|
32
36
|
except ModuleNotFoundError:
|
|
@@ -138,11 +142,15 @@ class CreateTrialAsdf(OutputDataBase):
|
|
|
138
142
|
|
|
139
143
|
def run(self) -> None:
|
|
140
144
|
"""Generate an ASDF file simulating the ASDF file that would be produced when cataloging the dataset."""
|
|
145
|
+
with self.telemetry_span("Collate input dataset parameters"):
|
|
146
|
+
parameters = self.parse_input_dataset_parameters()
|
|
147
|
+
|
|
141
148
|
with self.telemetry_span("Generate ASDF tree"):
|
|
142
149
|
tree = asdf_tree_from_filenames(
|
|
143
150
|
filenames=self.absolute_output_frame_paths,
|
|
144
151
|
hdu=1, # compressed
|
|
145
152
|
relative_to=self.scratch.workflow_base_path,
|
|
153
|
+
parameters=parameters,
|
|
146
154
|
)
|
|
147
155
|
|
|
148
156
|
trial_history = [
|
|
@@ -169,6 +177,37 @@ class CreateTrialAsdf(OutputDataBase):
|
|
|
169
177
|
),
|
|
170
178
|
)
|
|
171
179
|
|
|
180
|
+
def parse_input_dataset_parameters(self) -> list[dict[str, Any]]:
|
|
181
|
+
"""
|
|
182
|
+
Return the parameters associated with the dataset.
|
|
183
|
+
|
|
184
|
+
Returns
|
|
185
|
+
-------
|
|
186
|
+
list[dict[str, Any]]
|
|
187
|
+
A list of dictionaries, each containing a parameter name and its values.
|
|
188
|
+
|
|
189
|
+
Raises
|
|
190
|
+
------
|
|
191
|
+
ValueError
|
|
192
|
+
If there is not exactly one ``InputDatasetPartDocumentList`` found.
|
|
193
|
+
"""
|
|
194
|
+
part_docs_iter = self.read(
|
|
195
|
+
tags=Tag.input_dataset_parameters(),
|
|
196
|
+
decoder=basemodel_decoder,
|
|
197
|
+
model=InputDatasetPartDocumentList,
|
|
198
|
+
)
|
|
199
|
+
docs = list(part_docs_iter)
|
|
200
|
+
|
|
201
|
+
if not docs:
|
|
202
|
+
logger.warning("No parameter list decoded from files")
|
|
203
|
+
return []
|
|
204
|
+
|
|
205
|
+
if len(docs) > 1:
|
|
206
|
+
raise ValueError(f"Expected 1 parameter list, found {len(docs)}")
|
|
207
|
+
|
|
208
|
+
parameters = docs[0].model_dump(by_alias=True).get("doc_list", [])
|
|
209
|
+
return parameters
|
|
210
|
+
|
|
172
211
|
|
|
173
212
|
class CreateTrialQualityReport(OutputDataBase):
|
|
174
213
|
"""
|
|
@@ -186,6 +225,13 @@ class CreateTrialQualityReport(OutputDataBase):
|
|
|
186
225
|
f" but the required dependencies were not found."
|
|
187
226
|
)
|
|
188
227
|
|
|
228
|
+
if not INVENTORY_EXTRA_INSTALLED:
|
|
229
|
+
raise ModuleNotFoundError(
|
|
230
|
+
f"{self.__class__.__name__} Task requires the dkist-inventory package "
|
|
231
|
+
f"(e.g. via an 'inventory' pip_extra on dkist_processing_core.Workflow().add_node())"
|
|
232
|
+
f" but the required dependencies were not found."
|
|
233
|
+
)
|
|
234
|
+
|
|
189
235
|
def run(self) -> None:
|
|
190
236
|
"""Generate the quality report for the dataset."""
|
|
191
237
|
self.create_trial_quality_report()
|
|
@@ -207,5 +253,7 @@ class CreateTrialQualityReport(OutputDataBase):
|
|
|
207
253
|
self.write(
|
|
208
254
|
quality_report,
|
|
209
255
|
tags=[Tag.output(), Tag.quality_report()],
|
|
210
|
-
relative_path=
|
|
256
|
+
relative_path=generate_quality_report_filename(
|
|
257
|
+
dataset_id=self.constants.dataset_id
|
|
258
|
+
),
|
|
211
259
|
)
|
|
@@ -100,7 +100,7 @@ class TransferTrialData(TransferDataBase, GlobusMixin):
|
|
|
100
100
|
tag_list = []
|
|
101
101
|
tag_list += [[Tag.output(), Tag.dataset_inventory()]]
|
|
102
102
|
tag_list += [[Tag.output(), Tag.asdf()]]
|
|
103
|
-
tag_list += [[Tag.quality_data()]]
|
|
103
|
+
tag_list += [[Tag.output(), Tag.quality_data()]]
|
|
104
104
|
tag_list += [[Tag.output(), Tag.quality_report()]]
|
|
105
105
|
tag_list += [[Tag.output(), Tag.movie()]]
|
|
106
106
|
return tag_list
|
|
@@ -6,6 +6,7 @@ import json
|
|
|
6
6
|
from abc import ABC
|
|
7
7
|
from abc import abstractmethod
|
|
8
8
|
from datetime import datetime
|
|
9
|
+
from datetime import timedelta
|
|
9
10
|
from pathlib import Path
|
|
10
11
|
from uuid import uuid4
|
|
11
12
|
|
|
@@ -134,10 +135,6 @@ class InputDatasetRecipeRunResponseMapping(ResponseMapping):
|
|
|
134
135
|
return Unset
|
|
135
136
|
|
|
136
137
|
|
|
137
|
-
class QualityResponseMapping(ResponseMapping):
|
|
138
|
-
pass # TODO
|
|
139
|
-
|
|
140
|
-
|
|
141
138
|
def make_default_recipe_run_status_response() -> RecipeRunStatusResponse:
|
|
142
139
|
return RecipeRunStatusResponse(recipeRunStatusId=1)
|
|
143
140
|
|
|
@@ -234,3 +231,41 @@ def fake_gql_client():
|
|
|
234
231
|
Convenience fixture for default mock GQL client. To customize, use fake_gql_client_factory.
|
|
235
232
|
"""
|
|
236
233
|
return fake_gql_client_factory()
|
|
234
|
+
|
|
235
|
+
|
|
236
|
+
def input_dataset_parameters_part_factory(
|
|
237
|
+
parameter_count: int = 1,
|
|
238
|
+
parameter_value_count: int = 1,
|
|
239
|
+
has_date: bool = False,
|
|
240
|
+
has_file: bool = False,
|
|
241
|
+
) -> list[dict]:
|
|
242
|
+
"""Create a mock InputDatasetPartDocumentList with parameters."""
|
|
243
|
+
result = [
|
|
244
|
+
{
|
|
245
|
+
"parameterName": uuid4().hex[:6],
|
|
246
|
+
"parameterValues": [
|
|
247
|
+
{"parameterValueId": i, "parameterValue": json.dumps(uuid4().hex)}
|
|
248
|
+
for i in range(parameter_value_count)
|
|
249
|
+
],
|
|
250
|
+
}
|
|
251
|
+
for _ in range(parameter_count)
|
|
252
|
+
]
|
|
253
|
+
if has_date:
|
|
254
|
+
base = datetime(2018, 9, 14, 0, 0, 0) # This date is before any possible start dates
|
|
255
|
+
for parameter_index, data in enumerate(result):
|
|
256
|
+
for item in data["parameterValues"]:
|
|
257
|
+
dt = base + timedelta(days=parameter_index)
|
|
258
|
+
item["parameterValueStartDate"] = dt.isoformat()
|
|
259
|
+
if has_file:
|
|
260
|
+
for data in result:
|
|
261
|
+
param_list = data["parameterValues"]
|
|
262
|
+
for item in param_list:
|
|
263
|
+
item["parameterValue"] = json.dumps(
|
|
264
|
+
{
|
|
265
|
+
"__file__": {
|
|
266
|
+
"bucket": "data",
|
|
267
|
+
"objectKey": f"parameters/{data['parameterName']}/{uuid4().hex}.dat",
|
|
268
|
+
}
|
|
269
|
+
}
|
|
270
|
+
)
|
|
271
|
+
return result
|
|
@@ -1,5 +1,4 @@
|
|
|
1
1
|
import json
|
|
2
|
-
from datetime import datetime
|
|
3
2
|
from typing import Any
|
|
4
3
|
from uuid import uuid4
|
|
5
4
|
|
|
@@ -8,6 +7,7 @@ import pytest
|
|
|
8
7
|
from dkist_processing_common.codecs.basemodel import basemodel_decoder
|
|
9
8
|
from dkist_processing_common.models.input_dataset import InputDatasetPartDocumentList
|
|
10
9
|
from dkist_processing_common.models.tags import Tag
|
|
10
|
+
from dkist_processing_common.tests.mock_metadata_store import input_dataset_parameters_part_factory
|
|
11
11
|
|
|
12
12
|
|
|
13
13
|
def input_dataset_frames_part_factory(bucket_count: int = 1) -> list[dict]:
|
|
@@ -25,42 +25,6 @@ def flatten_frame_parts(frame_parts: list[dict]) -> list[tuple[str, str]]:
|
|
|
25
25
|
return result
|
|
26
26
|
|
|
27
27
|
|
|
28
|
-
def input_dataset_parameters_part_factory(
|
|
29
|
-
parameter_count: int = 1,
|
|
30
|
-
parameter_value_count: int = 1,
|
|
31
|
-
has_date: bool = False,
|
|
32
|
-
has_file: bool = False,
|
|
33
|
-
) -> list[dict]:
|
|
34
|
-
result = [
|
|
35
|
-
{
|
|
36
|
-
"parameterName": uuid4().hex[:6],
|
|
37
|
-
"parameterValues": [
|
|
38
|
-
{"parameterValueId": i, "parameterValue": json.dumps(uuid4().hex)}
|
|
39
|
-
for i in range(parameter_value_count)
|
|
40
|
-
],
|
|
41
|
-
}
|
|
42
|
-
for _ in range(parameter_count)
|
|
43
|
-
]
|
|
44
|
-
if has_date:
|
|
45
|
-
for data in result:
|
|
46
|
-
param_list = data["parameterValues"]
|
|
47
|
-
for item in param_list:
|
|
48
|
-
item["parameterValueStartDate"] = datetime(2022, 9, 14).isoformat()
|
|
49
|
-
if has_file:
|
|
50
|
-
for data in result:
|
|
51
|
-
param_list = data["parameterValues"]
|
|
52
|
-
for item in param_list:
|
|
53
|
-
item["parameterValue"] = json.dumps(
|
|
54
|
-
{
|
|
55
|
-
"__file__": {
|
|
56
|
-
"bucket": "data",
|
|
57
|
-
"objectKey": f"parameters/{data['parameterName']}/{uuid4().hex}.dat",
|
|
58
|
-
}
|
|
59
|
-
}
|
|
60
|
-
)
|
|
61
|
-
return result
|
|
62
|
-
|
|
63
|
-
|
|
64
28
|
@pytest.mark.parametrize(
|
|
65
29
|
"input_dataset_parts",
|
|
66
30
|
[
|
|
@@ -57,24 +57,3 @@ def test_object_messages(publish_catalog_and_quality_messages_task):
|
|
|
57
57
|
assert message.body.conversationId == str(task.recipe_run_id)
|
|
58
58
|
assert message.body.objectType == object_type
|
|
59
59
|
assert message.body.groupId == task.constants.dataset_id
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
def test_quality_report_message(publish_catalog_and_quality_messages_task):
|
|
63
|
-
"""
|
|
64
|
-
:Given: a PublishCatalogAndQualityMessages task
|
|
65
|
-
:When: creating quality report message
|
|
66
|
-
:Then: the attributes are correctly populated
|
|
67
|
-
"""
|
|
68
|
-
# Given
|
|
69
|
-
task, proposal_id = publish_catalog_and_quality_messages_task
|
|
70
|
-
# When
|
|
71
|
-
message = task.quality_report_message
|
|
72
|
-
# Then
|
|
73
|
-
assert isinstance(message, CreateQualityReportMessage)
|
|
74
|
-
assert message.body.bucket == task.destination_bucket
|
|
75
|
-
# objectName exists and can be evaluated as a valid path
|
|
76
|
-
assert message.body.objectName
|
|
77
|
-
_ = Path(message.body.objectName)
|
|
78
|
-
assert message.body.datasetId == task.constants.dataset_id
|
|
79
|
-
assert message.body.conversationId == str(task.recipe_run_id)
|
|
80
|
-
assert message.body.incrementDatasetCatalogReceiptCount is True
|
|
@@ -1214,12 +1214,20 @@ def wavecal_weights(wavecal_input_wavelength) -> np.ndarray:
|
|
|
1214
1214
|
|
|
1215
1215
|
|
|
1216
1216
|
@pytest.fixture(scope="session")
|
|
1217
|
-
def wavecal_fit_result(wavecal_input_wavelength) -> FitResult:
|
|
1217
|
+
def wavecal_fit_result(wavecal_input_wavelength, wavecal_input_spectrum) -> FitResult:
|
|
1218
1218
|
wavelength_params = WavelengthParameters(
|
|
1219
1219
|
crpix=1, crval=10.0, dispersion=1, grating_constant=1, order=1, incident_light_angle=0
|
|
1220
1220
|
)
|
|
1221
|
-
|
|
1222
|
-
|
|
1221
|
+
|
|
1222
|
+
residuals = np.random.random(wavecal_input_wavelength.size)
|
|
1223
|
+
residuals[-1] = np.nan
|
|
1224
|
+
minimizer_result = MinimizerResult(residual=residuals)
|
|
1225
|
+
return FitResult(
|
|
1226
|
+
wavelength_parameters=wavelength_params,
|
|
1227
|
+
minimizer_result=minimizer_result,
|
|
1228
|
+
input_wavelength_vector=wavecal_input_wavelength,
|
|
1229
|
+
input_spectrum=wavecal_input_spectrum,
|
|
1230
|
+
)
|
|
1223
1231
|
|
|
1224
1232
|
|
|
1225
1233
|
@pytest.mark.parametrize(
|
|
@@ -95,13 +95,10 @@ def test_submit_dataset_metadata(
|
|
|
95
95
|
mocker.patch(
|
|
96
96
|
"dkist_processing_common.tasks.mixin.metadata_store.GraphQLClient", new=fake_gql_client
|
|
97
97
|
)
|
|
98
|
-
# intercept
|
|
98
|
+
# intercept this GraphQLClient call so it can be confirmed
|
|
99
99
|
mocked_metadata_store_add_dataset_receipt_account = mocker.patch.object(
|
|
100
100
|
metadata_store.MetadataStoreMixin, "metadata_store_add_dataset_receipt_account"
|
|
101
101
|
)
|
|
102
|
-
mocked_metadata_store_add_quality_data = mocker.patch.object(
|
|
103
|
-
metadata_store.MetadataStoreMixin, "metadata_store_add_quality_data"
|
|
104
|
-
)
|
|
105
102
|
task = submit_dataset_metadata_task
|
|
106
103
|
|
|
107
104
|
# When
|
|
@@ -109,4 +106,3 @@ def test_submit_dataset_metadata(
|
|
|
109
106
|
|
|
110
107
|
# Then
|
|
111
108
|
mocked_metadata_store_add_dataset_receipt_account.assert_called_once()
|
|
112
|
-
mocked_metadata_store_add_quality_data.assert_called_once()
|
|
@@ -12,14 +12,30 @@ from sqids import Sqids
|
|
|
12
12
|
|
|
13
13
|
from dkist_processing_common._util.scratch import WorkflowFileSystem
|
|
14
14
|
from dkist_processing_common.codecs.asdf import asdf_decoder
|
|
15
|
+
from dkist_processing_common.codecs.basemodel import basemodel_encoder
|
|
15
16
|
from dkist_processing_common.codecs.bytes import bytes_decoder
|
|
16
17
|
from dkist_processing_common.codecs.fits import fits_hdulist_encoder
|
|
17
18
|
from dkist_processing_common.codecs.json import json_decoder
|
|
18
19
|
from dkist_processing_common.codecs.quality import quality_data_encoder
|
|
20
|
+
from dkist_processing_common.models.input_dataset import InputDatasetParameter
|
|
21
|
+
from dkist_processing_common.models.input_dataset import InputDatasetPartDocumentList
|
|
19
22
|
from dkist_processing_common.models.tags import Tag
|
|
20
23
|
from dkist_processing_common.tasks import CreateTrialAsdf
|
|
21
24
|
from dkist_processing_common.tasks import CreateTrialDatasetInventory
|
|
22
25
|
from dkist_processing_common.tasks import CreateTrialQualityReport
|
|
26
|
+
from dkist_processing_common.tests.mock_metadata_store import input_dataset_parameters_part_factory
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
@pytest.fixture()
|
|
30
|
+
def mock_input_dataset_parts() -> InputDatasetPartDocumentList:
|
|
31
|
+
"""An InputDatasetPartDocumentList with two parameters, each with one value and a date."""
|
|
32
|
+
raw = input_dataset_parameters_part_factory(
|
|
33
|
+
parameter_count=2,
|
|
34
|
+
parameter_value_count=1,
|
|
35
|
+
has_date=True,
|
|
36
|
+
has_file=False,
|
|
37
|
+
)
|
|
38
|
+
return InputDatasetPartDocumentList.model_validate({"doc_list": raw})
|
|
23
39
|
|
|
24
40
|
|
|
25
41
|
@pytest.fixture()
|
|
@@ -41,6 +57,24 @@ def scratch_with_l1_frames(recipe_run_id, tmp_path) -> WorkflowFileSystem:
|
|
|
41
57
|
scratch.write(
|
|
42
58
|
file_obj, tags=[Tag.output(), Tag.frame()], relative_path=f"{uuid4().hex}.dat"
|
|
43
59
|
)
|
|
60
|
+
|
|
61
|
+
return scratch
|
|
62
|
+
|
|
63
|
+
|
|
64
|
+
@pytest.fixture()
|
|
65
|
+
def scratch_with_l1_frames_and_parameters(
|
|
66
|
+
scratch_with_l1_frames, mock_input_dataset_parts
|
|
67
|
+
) -> WorkflowFileSystem:
|
|
68
|
+
"""Scratch instance for a recipe run id with tagged L1 frames and input parameters."""
|
|
69
|
+
scratch = scratch_with_l1_frames
|
|
70
|
+
|
|
71
|
+
# Write validated Pydantic model bytes expected by InputDatasetPartDocumentList
|
|
72
|
+
file_obj = basemodel_encoder(mock_input_dataset_parts)
|
|
73
|
+
scratch.write(
|
|
74
|
+
file_obj,
|
|
75
|
+
tags=Tag.input_dataset_parameters(),
|
|
76
|
+
relative_path=f"{uuid4().hex}.json",
|
|
77
|
+
)
|
|
44
78
|
return scratch
|
|
45
79
|
|
|
46
80
|
|
|
@@ -85,6 +119,22 @@ def create_trial_asdf_task(
|
|
|
85
119
|
task._purge()
|
|
86
120
|
|
|
87
121
|
|
|
122
|
+
@pytest.fixture(scope="function")
|
|
123
|
+
def create_trial_asdf_task_with_params(
|
|
124
|
+
recipe_run_id, tmp_path, scratch_with_l1_frames_and_parameters, fake_constants_db
|
|
125
|
+
) -> CreateTrialAsdf:
|
|
126
|
+
"""An instance of CreateTrialAsdf with L1 frames and input parameters tagged in scratch."""
|
|
127
|
+
task = CreateTrialAsdf(
|
|
128
|
+
recipe_run_id=recipe_run_id,
|
|
129
|
+
workflow_name="trial_asdf",
|
|
130
|
+
workflow_version="trial_asdf_version",
|
|
131
|
+
)
|
|
132
|
+
task.scratch = scratch_with_l1_frames_and_parameters
|
|
133
|
+
task.constants._update(fake_constants_db)
|
|
134
|
+
yield task
|
|
135
|
+
task._purge()
|
|
136
|
+
|
|
137
|
+
|
|
88
138
|
@pytest.fixture()
|
|
89
139
|
def create_trial_quality_report_task(
|
|
90
140
|
recipe_run_id, tmp_path, fake_constants_db
|
|
@@ -143,25 +193,32 @@ def test_create_trial_dataset_inventory(create_trial_dataset_inventory_task):
|
|
|
143
193
|
assert len(inventory) > 20 # a bunch
|
|
144
194
|
|
|
145
195
|
|
|
146
|
-
|
|
196
|
+
@pytest.mark.parametrize("with_params", [False, True], ids=["no_params", "with_params"])
|
|
197
|
+
def test_create_trial_asdf(with_params, request, recipe_run_id, mock_input_dataset_parts):
|
|
147
198
|
"""
|
|
148
199
|
:Given: An instance of CreateTrialAsdf with L1 frames tagged in scratch
|
|
149
200
|
:When: CreateTrialAsdf is run
|
|
150
201
|
:Then: An asdf file for the dataset is tagged in scratch
|
|
151
202
|
"""
|
|
152
|
-
task =
|
|
203
|
+
task = request.getfixturevalue(
|
|
204
|
+
"create_trial_asdf_task_with_params" if with_params else "create_trial_asdf_task"
|
|
205
|
+
)
|
|
153
206
|
# When
|
|
154
207
|
task()
|
|
208
|
+
|
|
155
209
|
# Then
|
|
156
210
|
asdf_tags = [Tag.output(), Tag.asdf()]
|
|
157
211
|
filepaths = list(task.scratch.find_all(tags=asdf_tags))
|
|
158
212
|
assert len(filepaths) == 1
|
|
159
213
|
dataset_id = Sqids(min_length=6, alphabet=ascii_uppercase).encode([recipe_run_id])
|
|
160
214
|
assert filepaths[0].name == f"INSTRUMENT_L1_20240416T160000_{dataset_id}_metadata.asdf"
|
|
215
|
+
|
|
161
216
|
results = list(task.read(tags=asdf_tags, decoder=asdf_decoder))
|
|
162
217
|
assert len(results) == 1
|
|
218
|
+
|
|
163
219
|
tree = results[0]
|
|
164
220
|
assert isinstance(tree, dict)
|
|
221
|
+
|
|
165
222
|
for file_name in tree["dataset"].files.filenames:
|
|
166
223
|
# This is a slightly better than check that `not Path(file_name).is_absolute()` because it confirms
|
|
167
224
|
# we've correctly stripped the path of *all* parents (not just those that start at root).
|
|
@@ -169,6 +226,19 @@ def test_create_trial_asdf(create_trial_asdf_task, recipe_run_id):
|
|
|
169
226
|
# `scratch.workflow_base_path`
|
|
170
227
|
assert Path(file_name).name == file_name
|
|
171
228
|
|
|
229
|
+
# Only check parameters when present
|
|
230
|
+
ds = tree["dataset"]
|
|
231
|
+
assert "parameters" in ds.meta
|
|
232
|
+
parameters = ds.meta["parameters"]
|
|
233
|
+
assert isinstance(parameters, list)
|
|
234
|
+
if with_params:
|
|
235
|
+
assert parameters, f"ASDF tree must include input parameters: {parameters}"
|
|
236
|
+
assert len(parameters) == len(mock_input_dataset_parts.doc_list)
|
|
237
|
+
for param in parameters:
|
|
238
|
+
assert InputDatasetParameter.model_validate(param) in mock_input_dataset_parts.doc_list
|
|
239
|
+
else:
|
|
240
|
+
assert ds.meta["parameters"] == []
|
|
241
|
+
|
|
172
242
|
|
|
173
243
|
def test_create_trial_quality_report(create_trial_quality_report_task):
|
|
174
244
|
"""
|
|
@@ -158,13 +158,12 @@ def complete_trial_output_task(
|
|
|
158
158
|
task.write(asdf_file_obj, relative_path=asdf_file_name, tags=[Tag.output(), Tag.asdf()])
|
|
159
159
|
|
|
160
160
|
# Write quality data
|
|
161
|
-
# quality data is not tagged as OUTPUT
|
|
162
161
|
quality_data_obj = uuid4().hex.encode("utf8")
|
|
163
162
|
quality_data_name = "quality_data.json"
|
|
164
163
|
task.write(
|
|
165
164
|
quality_data_obj,
|
|
166
165
|
relative_path=quality_data_name,
|
|
167
|
-
tags=Tag.quality_data(),
|
|
166
|
+
tags=[Tag.output(), Tag.quality_data()],
|
|
168
167
|
)
|
|
169
168
|
|
|
170
169
|
# Write a quality report file
|
{dkist_processing_common-11.7.0rc6.dist-info → dkist_processing_common-11.9.1.dist-info}/METADATA
RENAMED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: dkist-processing-common
|
|
3
|
-
Version: 11.
|
|
3
|
+
Version: 11.9.1
|
|
4
4
|
Summary: Common task classes used by the DKIST science data processing pipelines
|
|
5
5
|
Author-email: NSO / AURA <dkistdc@nso.edu>
|
|
6
6
|
License: BSD-3-Clause
|
|
@@ -17,12 +17,12 @@ Requires-Dist: asdf<4.0.0,>=3.5.0
|
|
|
17
17
|
Requires-Dist: astropy>=7.0.0
|
|
18
18
|
Requires-Dist: dkist-fits-specifications<5.0,>=4.0.0
|
|
19
19
|
Requires-Dist: dkist-header-validator<6.0,>=5.0.0
|
|
20
|
-
Requires-Dist: dkist-processing-core==6.0.
|
|
20
|
+
Requires-Dist: dkist-processing-core==6.0.1
|
|
21
21
|
Requires-Dist: dkist-processing-pac<4.0,>=3.1
|
|
22
22
|
Requires-Dist: dkist-service-configuration<5.0,>=4.1.7
|
|
23
23
|
Requires-Dist: dkist-spectral-lines<4.0,>=3.0.0
|
|
24
|
-
Requires-Dist: solar-wavelength-calibration<
|
|
25
|
-
Requires-Dist: globus-sdk<
|
|
24
|
+
Requires-Dist: solar-wavelength-calibration<3.0,>=2.0.0
|
|
25
|
+
Requires-Dist: globus-sdk<5.0.0,>=4.0.0
|
|
26
26
|
Requires-Dist: gqlclient[pydantic]==1.2.3
|
|
27
27
|
Requires-Dist: sqids==0.5.1
|
|
28
28
|
Requires-Dist: matplotlib>=3.4
|
|
@@ -31,7 +31,7 @@ Requires-Dist: numpy>=1.26.4
|
|
|
31
31
|
Requires-Dist: object-clerk==1.0.0
|
|
32
32
|
Requires-Dist: pandas>=1.4.2
|
|
33
33
|
Requires-Dist: pillow>=10.2.0
|
|
34
|
-
Requires-Dist: pydantic>=2.
|
|
34
|
+
Requires-Dist: pydantic>=2.7.2
|
|
35
35
|
Requires-Dist: redis==6.4.0
|
|
36
36
|
Requires-Dist: requests>=2.23
|
|
37
37
|
Requires-Dist: scipy>=1.15.1
|
|
@@ -57,9 +57,9 @@ Requires-Dist: pytest; extra == "docs"
|
|
|
57
57
|
Requires-Dist: towncrier<22.12.0; extra == "docs"
|
|
58
58
|
Requires-Dist: dkist-sphinx-theme; extra == "docs"
|
|
59
59
|
Provides-Extra: inventory
|
|
60
|
-
Requires-Dist: dkist-inventory<2.0,>=1.
|
|
60
|
+
Requires-Dist: dkist-inventory<2.0,>=1.11.2; extra == "inventory"
|
|
61
61
|
Provides-Extra: asdf
|
|
62
|
-
Requires-Dist: dkist-inventory[asdf]<2.0,>=1.
|
|
62
|
+
Requires-Dist: dkist-inventory[asdf]<2.0,>=1.11.2; extra == "asdf"
|
|
63
63
|
Provides-Extra: quality
|
|
64
64
|
Requires-Dist: dkist-quality<3.0,>=2.0.0; extra == "quality"
|
|
65
65
|
|
|
@@ -118,6 +118,10 @@ Environment Variables
|
|
|
118
118
|
- annotation=str required=False default='unknown-service-version' alias_priority=2 validation_alias='DKIST_SERVICE_VERSION' description='Service version for OpenTelemetry'
|
|
119
119
|
* - NOMAD_ALLOC_ID
|
|
120
120
|
- annotation=str required=False default='unknown-allocation-id' alias_priority=2 validation_alias='NOMAD_ALLOC_ID' description='Nomad allocation ID for OpenTelemetry'
|
|
121
|
+
* - NOMAD_ALLOC_NAME
|
|
122
|
+
- annotation=str required=False default='unknown-allocation-name' alias='NOMAD_ALLOC_NAME' alias_priority=2 description='Allocation name for the deployed container the task is running on.'
|
|
123
|
+
* - NOMAD_GROUP_NAME
|
|
124
|
+
- annotation=str required=False default='unknown-allocation-group' alias='NOMAD_GROUP_NAME' alias_priority=2 description='Allocation group for the deployed container the task is running on'
|
|
121
125
|
* - OTEL_EXPORTER_OTLP_TRACES_INSECURE
|
|
122
126
|
- annotation=bool required=False default=True description='Use insecure connection for OTLP traces'
|
|
123
127
|
* - OTEL_EXPORTER_OTLP_METRICS_INSECURE
|
|
@@ -158,12 +162,12 @@ Environment Variables
|
|
|
158
162
|
- annotation=Union[dict, NoneType] required=False default=None description='S3 upload configuration for the object store.'
|
|
159
163
|
* - S3_DOWNLOAD_CONFIG
|
|
160
164
|
- annotation=Union[dict, NoneType] required=False default=None description='S3 download configuration for the object store.'
|
|
161
|
-
* -
|
|
162
|
-
- annotation=
|
|
163
|
-
* -
|
|
164
|
-
- annotation=
|
|
165
|
-
* -
|
|
166
|
-
- annotation=
|
|
165
|
+
* - GLOBUS_MAX_RETRIES
|
|
166
|
+
- annotation=int required=False default=5 description='Max retries for transient errors on calls to the globus api.'
|
|
167
|
+
* - GLOBUS_INBOUND_CLIENT_CREDENTIALS
|
|
168
|
+
- annotation=list[GlobusClientCredential] required=False default_factory=list description='Globus client credentials for inbound transfers.' examples=[[{'client_id': 'id1', 'client_secret': 'secret1'}, {'client_id': 'id2', 'client_secret': 'secret2'}]]
|
|
169
|
+
* - GLOBUS_OUTBOUND_CLIENT_CREDENTIALS
|
|
170
|
+
- annotation=list[GlobusClientCredential] required=False default_factory=list description='Globus client credentials for outbound transfers.' examples=[[{'client_id': 'id3', 'client_secret': 'secret3'}, {'client_id': 'id4', 'client_secret': 'secret4'}]]
|
|
167
171
|
* - OBJECT_STORE_ENDPOINT
|
|
168
172
|
- annotation=Union[str, NoneType] required=False default=None description='Object store Globus Endpoint ID.'
|
|
169
173
|
* - SCRATCH_ENDPOINT
|
{dkist_processing_common-11.7.0rc6.dist-info → dkist_processing_common-11.9.1.dist-info}/RECORD
RENAMED
|
@@ -1,12 +1,6 @@
|
|
|
1
1
|
changelog/.gitempty,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
2
|
-
changelog/267.feature.1.rst,sha256=G5Xl7EwiRKAtftSUvalfF7iUL-WgqIvnQ3k6wyTYzaw,98
|
|
3
|
-
changelog/267.feature.2.rst,sha256=1z0TqX3159m-wFvEgElBjUp0XPWQvmqMlVlf4BzO-J8,99
|
|
4
|
-
changelog/267.feature.rst,sha256=g4bBK3EFQiQljW9lQrFh6rVOL8C6yxBYbXI4WOH8NE0,65
|
|
5
|
-
changelog/267.misc.rst,sha256=ciUqUesBbdxh41euyLhyD3Bl6IVT2TSIGeOg3LFjIvg,105
|
|
6
|
-
changelog/267.removal.1.rst,sha256=qiGl1FAy9ioLwfCh8EvM788wjFbOY3jM3eUJHPOzpu8,207
|
|
7
|
-
changelog/267.removal.rst,sha256=ecpctwJ-AE59sfbsTy-j3GYaPaN8h6oWphXUMNQ42Yw,103
|
|
8
2
|
dkist_processing_common/__init__.py,sha256=GQ9EBnYhkOnt-qODclAoLS_g5YVhurxfg1tjVtI9rDI,320
|
|
9
|
-
dkist_processing_common/config.py,sha256=
|
|
3
|
+
dkist_processing_common/config.py,sha256=f511KVpK24sQO4dDr4L6PMj5dz0jmWgnx2Y-3DpV0cw,5991
|
|
10
4
|
dkist_processing_common/manual.py,sha256=bIVVyLsbXMh-g_2L3kGROL-1TtJe0_XviHsp7Br31x8,7023
|
|
11
5
|
dkist_processing_common/_util/__init__.py,sha256=xf6JNpMKQgbhE2Jivymt-WO0WF6PpGt9rl604YpuTWk,92
|
|
12
6
|
dkist_processing_common/_util/constants.py,sha256=0_bWLsvusHD8GrTx4B6V7AieKAaFbN9crcAALaS8x5Q,3245
|
|
@@ -32,7 +26,7 @@ dkist_processing_common/models/dkist_location.py,sha256=6Nk0wvv4R8ptlrV7BXon7abq
|
|
|
32
26
|
dkist_processing_common/models/fits_access.py,sha256=imKqL4-_g6gTR-IeIjZ6qkMhQX3JujdrKFrTd9gOXnw,5605
|
|
33
27
|
dkist_processing_common/models/flower_pot.py,sha256=_J7DwHM8u5kQfdPCpk5pUmALtLrM1L_h-x8JW5BSjXA,5129
|
|
34
28
|
dkist_processing_common/models/fried_parameter.py,sha256=ro_H2Eo3I88lRf1wJjZfTc_XOjhgLt4whIQR_sjAFbM,1609
|
|
35
|
-
dkist_processing_common/models/graphql.py,sha256=
|
|
29
|
+
dkist_processing_common/models/graphql.py,sha256=QsKLbytpw_Qg9pJASscA7dZRfDbHLkpLZaWeqaHUDvo,5133
|
|
36
30
|
dkist_processing_common/models/input_dataset.py,sha256=19w_ydrxdzjJgpnhFELqUomr7GixURjzLOaX41ipOKk,4173
|
|
37
31
|
dkist_processing_common/models/message.py,sha256=ZEsPQalo5aKTOHfc5I15mNCe1KQcfJ3ivU7XBf8wnkM,1684
|
|
38
32
|
dkist_processing_common/models/message_queue_binding.py,sha256=Y4otwkkePrLRSjlrya8nlEaBvCCUgfGZAWZF9XqCQ9Y,1012
|
|
@@ -64,26 +58,26 @@ dkist_processing_common/parsers/wavelength.py,sha256=P5C9mG8DAKK3GB3vWNRBI5l7pAW
|
|
|
64
58
|
dkist_processing_common/tasks/__init__.py,sha256=l23ctjNsKJbHbbqaZBMeOPaOtw0hmITEljI_JJ-CVsU,627
|
|
65
59
|
dkist_processing_common/tasks/assemble_movie.py,sha256=1ixDG-f4ODt0vywqVccG3aodLljVO5OGlvuMO9EEvcU,12767
|
|
66
60
|
dkist_processing_common/tasks/base.py,sha256=itAHCvzcodo-q8_AjpWoRaM86BlcjWDpCIiUP7uwmP0,13236
|
|
67
|
-
dkist_processing_common/tasks/l1_output_data.py,sha256=
|
|
61
|
+
dkist_processing_common/tasks/l1_output_data.py,sha256=eF3BvTTH0Bb163_gpJ8epxkjve8YIdrYgPZO_LCMKKo,10717
|
|
68
62
|
dkist_processing_common/tasks/output_data_base.py,sha256=r1Bu3FX5zTVj66GTMWtaV_NdhxjyjSm661Bt2Mxmfi4,3685
|
|
69
63
|
dkist_processing_common/tasks/parse_l0_input_data.py,sha256=KguXT0Xavynu7C8NFMjsV4628LRoTvfeSuApb6v4Neg,18835
|
|
70
64
|
dkist_processing_common/tasks/quality_metrics.py,sha256=cvGF6tJ8yAvxOvkeG3tWxYwL885BrFW5X3V7_MSzL-A,12481
|
|
71
65
|
dkist_processing_common/tasks/teardown.py,sha256=rwT9lWINVDF11-az_nx-Z5ykMTX_SJCchobpU6sErgk,2360
|
|
72
66
|
dkist_processing_common/tasks/transfer_input_data.py,sha256=DAYfS-B1o-iBT9MXU-TiJG4Hv05Z0c_JzPrnFgvnK9g,5786
|
|
73
|
-
dkist_processing_common/tasks/trial_catalog.py,sha256=
|
|
74
|
-
dkist_processing_common/tasks/trial_output_data.py,sha256=
|
|
67
|
+
dkist_processing_common/tasks/trial_catalog.py,sha256=Yf-BKNCT_OHwJsxxZP8p2eRW04CcY0tw5_YIe1e9RQY,10535
|
|
68
|
+
dkist_processing_common/tasks/trial_output_data.py,sha256=pUdrNlAzuir4AUdfax5_MOplB-A9NrXErMJmAwtJmLA,6811
|
|
75
69
|
dkist_processing_common/tasks/write_l1.py,sha256=Xy834RTp3F95kLcW4ba5gfHMUocfZd82ZQQKnvQcP2M,23204
|
|
76
70
|
dkist_processing_common/tasks/mixin/__init__.py,sha256=-g-DQbU7m1bclJYuFe3Yh757V-35GIDTbstardKQ7nU,68
|
|
77
|
-
dkist_processing_common/tasks/mixin/globus.py,sha256=
|
|
71
|
+
dkist_processing_common/tasks/mixin/globus.py,sha256=ugejtZ_MR5LesQYuXM1uICd_yWDE7cZZr0qnWCh75R8,6732
|
|
78
72
|
dkist_processing_common/tasks/mixin/interservice_bus.py,sha256=M6R922l7gJSmmU_vswUXxy-c5DWNrIRjQu9H9CSgGfU,1081
|
|
79
|
-
dkist_processing_common/tasks/mixin/metadata_store.py,sha256=
|
|
80
|
-
dkist_processing_common/tasks/mixin/object_store.py,sha256=
|
|
73
|
+
dkist_processing_common/tasks/mixin/metadata_store.py,sha256=GLfh0a0ehuRC8Uml59PfLExuoxGRj6tzNMytlO5jZf4,9106
|
|
74
|
+
dkist_processing_common/tasks/mixin/object_store.py,sha256=l2sPzolmKsuYvUocHayT7PScliiFRIV12qIFYtCLII8,3888
|
|
81
75
|
dkist_processing_common/tasks/mixin/quality/__init__.py,sha256=GOI_PBUxTmYp5IIuYFbwpA5Vx0jUwpdBBYYrnZMTh0E,384
|
|
82
76
|
dkist_processing_common/tasks/mixin/quality/_base.py,sha256=qt9TZZ140skFWFmabrjlGdm60OLWEfx_xZAaohr6dLM,8492
|
|
83
|
-
dkist_processing_common/tasks/mixin/quality/_metrics.py,sha256=
|
|
77
|
+
dkist_processing_common/tasks/mixin/quality/_metrics.py,sha256=TtM6V7qy0U6ofFmjXNoJKkaECoPSfJsU4Ziu1UQgPZs,60457
|
|
84
78
|
dkist_processing_common/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
85
79
|
dkist_processing_common/tests/conftest.py,sha256=Tm-Yq956EAafpDtu1d7JjdVY0Unp9e4z9ak-yf4wnH4,22320
|
|
86
|
-
dkist_processing_common/tests/mock_metadata_store.py,sha256=
|
|
80
|
+
dkist_processing_common/tests/mock_metadata_store.py,sha256=i41xu5AY4A566YT0qSzhP7qpR0wZp_EFH-OFcAsV4vQ,9591
|
|
87
81
|
dkist_processing_common/tests/test_assemble_movie.py,sha256=dyVhowxB-Kc6GuxlDs74UrPtK9fwdUL7y5haA3Bidz0,4065
|
|
88
82
|
dkist_processing_common/tests/test_assemble_quality.py,sha256=-F22jMY6mPy65VZ1TZY2r1vsxMXOPmZHArGx70OD3BA,17832
|
|
89
83
|
dkist_processing_common/tests/test_base.py,sha256=gsyBG2R6Ufx7CzbHeGMagUwM9yCfpN4gCSZ6-aH2q48,6643
|
|
@@ -94,27 +88,27 @@ dkist_processing_common/tests/test_dkist_location.py,sha256=-_OoSw4SZDLFyIuOltHv
|
|
|
94
88
|
dkist_processing_common/tests/test_fits_access.py,sha256=a50B4IAAH5NH5zeudTqyy0b5uWKJwJuzQLUdK1LoOHM,12832
|
|
95
89
|
dkist_processing_common/tests/test_flower_pot.py,sha256=X9_UI3maa3ZQncV3jYHgovWnawDsdEkEB5vw6EAB96o,3151
|
|
96
90
|
dkist_processing_common/tests/test_fried_parameter.py,sha256=iXtlQIifZ6cDOkEi-YDgP3oAlss2loq08Uohgvy1byQ,1295
|
|
97
|
-
dkist_processing_common/tests/test_input_dataset.py,sha256=
|
|
91
|
+
dkist_processing_common/tests/test_input_dataset.py,sha256=wnQbZxBYywG5CEXces2WWk6I0QA7HjStaYSTVVbe5r0,8499
|
|
98
92
|
dkist_processing_common/tests/test_interservice_bus.py,sha256=QrBeZ8dh497h6nxA8-aVUIGDcSj8y9DIXIk9I_HkXr0,3001
|
|
99
93
|
dkist_processing_common/tests/test_interservice_bus_mixin.py,sha256=IptJkW7Qeu2Y742NKXEgkok2VdS600keLgCD3Y9iw3A,4131
|
|
100
94
|
dkist_processing_common/tests/test_manual_processing.py,sha256=iHF7yQPlar9niYAGXtFv28Gw3Undlds38yMfszk4ccY,1037
|
|
101
95
|
dkist_processing_common/tests/test_output_data_base.py,sha256=D8b1XKvbE3C5cGOiHq58yJ2pzQL3iL0wLZy_AkDdB9Y,3085
|
|
102
96
|
dkist_processing_common/tests/test_parameters.py,sha256=CUEUIGBPMCUXPll0G0UxFDbMXi8lmnjRwXBarGX1PAQ,14033
|
|
103
97
|
dkist_processing_common/tests/test_parse_l0_input_data.py,sha256=9OOqeMX8ReQO67ldoMHOBKLQg7Nd5qWHOEoHygcN5Ic,11889
|
|
104
|
-
dkist_processing_common/tests/test_publish_catalog_messages.py,sha256=
|
|
98
|
+
dkist_processing_common/tests/test_publish_catalog_messages.py,sha256=7WRsEwoLHGeaCmLTAW4tU_BlZw0e3hwx65uWSGzfuYE,2393
|
|
105
99
|
dkist_processing_common/tests/test_quality.py,sha256=IPz7liXcmoqWIsY78oX07Ui0nWHxoUH2FbKGEmMle7E,10258
|
|
106
|
-
dkist_processing_common/tests/test_quality_mixin.py,sha256=
|
|
100
|
+
dkist_processing_common/tests/test_quality_mixin.py,sha256=L-_kSIKs8A48LGt9QaItZWZqIcRF0MhBCAZQZYdSflk,55575
|
|
107
101
|
dkist_processing_common/tests/test_scratch.py,sha256=WO8C1VJlkcC5IzST9Hj08CyyrINwYcN8pyteD9x38xs,16482
|
|
108
102
|
dkist_processing_common/tests/test_stems.py,sha256=p__51u-b8vfWLI71aLxF3w2tcWtv6M6DyHJ7_6FZMHI,38949
|
|
109
|
-
dkist_processing_common/tests/test_submit_dataset_metadata.py,sha256
|
|
103
|
+
dkist_processing_common/tests/test_submit_dataset_metadata.py,sha256=-UicRcyRQAC9H3sbTYlJaH4-Yn6jKNyQEZhzZxojzqw,3543
|
|
110
104
|
dkist_processing_common/tests/test_tags.py,sha256=w5gmVfp3Ck92KNV80lJQRMz0OYgTYzWtwVUFWv1b5i8,5024
|
|
111
105
|
dkist_processing_common/tests/test_task_name.py,sha256=kqFr59XX2K87xzfTlClzDV4-Je1dx72LvdaJ22UE8UU,1233
|
|
112
106
|
dkist_processing_common/tests/test_task_parsing.py,sha256=2_OOmeZQWD17XAd_ECYmodJzD_iRIBKjCYdGh38BOx4,4421
|
|
113
107
|
dkist_processing_common/tests/test_teardown.py,sha256=DaliHSGsiQBZaFkf5wb3XBo6rHNPmx2bmQtVymYeBN4,5601
|
|
114
108
|
dkist_processing_common/tests/test_transfer_input_data.py,sha256=eyAAWXpTHQ8aew87-MncWpYBn4DAZrTSOL3LvlQfR5Q,12611
|
|
115
109
|
dkist_processing_common/tests/test_transfer_l1_output_data.py,sha256=PVGDJBEUk4kAeu8ivrhlCE7yd29R18t9kZLFx-mpBwY,2063
|
|
116
|
-
dkist_processing_common/tests/test_trial_catalog.py,sha256=
|
|
117
|
-
dkist_processing_common/tests/test_trial_output_data.py,sha256=
|
|
110
|
+
dkist_processing_common/tests/test_trial_catalog.py,sha256=CxjtVABE5Fw2EvyXR56IJ3PPi9QvEOjccH0OzzRWk30,9424
|
|
111
|
+
dkist_processing_common/tests/test_trial_output_data.py,sha256=fu3iGNV_FI8LOacezyt4HvXnxY3g1_UiBuRI63yz5Oo,11977
|
|
118
112
|
dkist_processing_common/tests/test_workflow_task_base.py,sha256=LTVusltNrsGUOvw9G323am4CXebgE4tJhP6gZCcS0CQ,10457
|
|
119
113
|
dkist_processing_common/tests/test_write_l1.py,sha256=alN-lozKEm6vKNdhtvzjnuPqv-NjHyUg16Op7SkMH-c,27964
|
|
120
114
|
docs/Makefile,sha256=qnlVz6PuBqE39NfHWuUnHhNEA-EFgT2-WJNNNy9ttfk,4598
|
|
@@ -125,7 +119,7 @@ docs/landing_page.rst,sha256=aPAuXFhBx73lEZ59B6E6JXxkK0LlxzD0n-HXqHrfumQ,746
|
|
|
125
119
|
docs/make.bat,sha256=mBAhtURwhQ7yc95pqwJzlhqBSvRknr1aqZ5s8NKvdKs,4513
|
|
126
120
|
docs/requirements.txt,sha256=Kbl_X4c7RQZw035YTeNB63We6I7pvXFU4T0Uflp2yDY,29
|
|
127
121
|
licenses/LICENSE.rst,sha256=piZaQplkzOMmH1NXg6QIdo9wwo9pPCoHkvm2-DmH76E,1462
|
|
128
|
-
dkist_processing_common-11.
|
|
129
|
-
dkist_processing_common-11.
|
|
130
|
-
dkist_processing_common-11.
|
|
131
|
-
dkist_processing_common-11.
|
|
122
|
+
dkist_processing_common-11.9.1.dist-info/METADATA,sha256=Ydji9eb7FYmBGirF5MWTn7tsBC58ULxT-9SOhF7Am5E,14062
|
|
123
|
+
dkist_processing_common-11.9.1.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
|
124
|
+
dkist_processing_common-11.9.1.dist-info/top_level.txt,sha256=LJhd1W-Vn90K8HnQDIE4r52YDpUjjMWDnllAWHBByW0,48
|
|
125
|
+
dkist_processing_common-11.9.1.dist-info/RECORD,,
|
changelog/267.feature.1.rst
DELETED
|
@@ -1 +0,0 @@
|
|
|
1
|
-
Add new bud types TaskAverageBud and TaskBeginDateBud, which is based on new TaskDatetimeBudBase.
|
changelog/267.feature.2.rst
DELETED
|
@@ -1 +0,0 @@
|
|
|
1
|
-
Add new bud type TaskContributingIdsBud, based on ContributingIdsBud, for for specific task types.
|
changelog/267.feature.rst
DELETED
|
@@ -1 +0,0 @@
|
|
|
1
|
-
Add new buds to parsing for what will become the dataset extras.
|
changelog/267.misc.rst
DELETED
|
@@ -1 +0,0 @@
|
|
|
1
|
-
Rename TimeFlowerBase and TaskTimeBudBase to RoundTimeFlowerBase and TaskRoundTimeBudBase, respectively.
|
changelog/267.removal.1.rst
DELETED
changelog/267.removal.rst
DELETED
|
@@ -1 +0,0 @@
|
|
|
1
|
-
Remove IdBud, which is just a TaskUniqueBud with the task set to observe, and therefore is not needed.
|
{dkist_processing_common-11.7.0rc6.dist-info → dkist_processing_common-11.9.1.dist-info}/WHEEL
RENAMED
|
File without changes
|
|
File without changes
|