dkist-processing-common 10.5.4__py3-none-any.whl → 12.1.0rc1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- changelog/280.misc.rst +1 -0
- changelog/282.feature.2.rst +2 -0
- changelog/282.feature.rst +2 -0
- changelog/284.feature.rst +1 -0
- changelog/285.feature.rst +2 -0
- changelog/285.misc.rst +2 -0
- changelog/286.feature.rst +2 -0
- changelog/287.misc.rst +1 -0
- dkist_processing_common/__init__.py +1 -0
- dkist_processing_common/_util/constants.py +1 -0
- dkist_processing_common/_util/graphql.py +1 -0
- dkist_processing_common/_util/scratch.py +9 -9
- dkist_processing_common/_util/tags.py +1 -0
- dkist_processing_common/codecs/array.py +20 -0
- dkist_processing_common/codecs/asdf.py +9 -3
- dkist_processing_common/codecs/basemodel.py +22 -0
- dkist_processing_common/codecs/bytes.py +1 -0
- dkist_processing_common/codecs/fits.py +37 -9
- dkist_processing_common/codecs/iobase.py +1 -0
- dkist_processing_common/codecs/json.py +1 -0
- dkist_processing_common/codecs/path.py +1 -0
- dkist_processing_common/codecs/quality.py +1 -1
- dkist_processing_common/codecs/str.py +1 -0
- dkist_processing_common/config.py +64 -25
- dkist_processing_common/manual.py +6 -8
- dkist_processing_common/models/constants.py +373 -37
- dkist_processing_common/models/dkist_location.py +27 -0
- dkist_processing_common/models/fits_access.py +48 -0
- dkist_processing_common/models/flower_pot.py +231 -9
- dkist_processing_common/models/fried_parameter.py +41 -0
- dkist_processing_common/models/graphql.py +66 -75
- dkist_processing_common/models/input_dataset.py +117 -0
- dkist_processing_common/models/message.py +1 -1
- dkist_processing_common/models/message_queue_binding.py +1 -1
- dkist_processing_common/models/metric_code.py +2 -0
- dkist_processing_common/models/parameters.py +65 -28
- dkist_processing_common/models/quality.py +50 -5
- dkist_processing_common/models/tags.py +23 -21
- dkist_processing_common/models/task_name.py +3 -2
- dkist_processing_common/models/telemetry.py +28 -0
- dkist_processing_common/models/wavelength.py +3 -1
- dkist_processing_common/parsers/average_bud.py +46 -0
- dkist_processing_common/parsers/cs_step.py +13 -12
- dkist_processing_common/parsers/dsps_repeat.py +6 -4
- dkist_processing_common/parsers/experiment_id_bud.py +12 -4
- dkist_processing_common/parsers/id_bud.py +42 -27
- dkist_processing_common/parsers/l0_fits_access.py +5 -3
- dkist_processing_common/parsers/l1_fits_access.py +51 -23
- dkist_processing_common/parsers/lookup_bud.py +125 -0
- dkist_processing_common/parsers/near_bud.py +21 -20
- dkist_processing_common/parsers/observing_program_id_bud.py +24 -0
- dkist_processing_common/parsers/proposal_id_bud.py +13 -5
- dkist_processing_common/parsers/quality.py +2 -0
- dkist_processing_common/parsers/retarder.py +32 -0
- dkist_processing_common/parsers/single_value_single_key_flower.py +6 -1
- dkist_processing_common/parsers/task.py +8 -6
- dkist_processing_common/parsers/time.py +178 -72
- dkist_processing_common/parsers/unique_bud.py +21 -22
- dkist_processing_common/parsers/wavelength.py +5 -3
- dkist_processing_common/tasks/__init__.py +3 -2
- dkist_processing_common/tasks/assemble_movie.py +4 -3
- dkist_processing_common/tasks/base.py +59 -60
- dkist_processing_common/tasks/l1_output_data.py +54 -53
- dkist_processing_common/tasks/mixin/globus.py +24 -27
- dkist_processing_common/tasks/mixin/interservice_bus.py +1 -0
- dkist_processing_common/tasks/mixin/metadata_store.py +108 -243
- dkist_processing_common/tasks/mixin/object_store.py +22 -0
- dkist_processing_common/tasks/mixin/quality/__init__.py +1 -0
- dkist_processing_common/tasks/mixin/quality/_base.py +8 -1
- dkist_processing_common/tasks/mixin/quality/_metrics.py +166 -14
- dkist_processing_common/tasks/output_data_base.py +4 -3
- dkist_processing_common/tasks/parse_l0_input_data.py +277 -15
- dkist_processing_common/tasks/quality_metrics.py +9 -9
- dkist_processing_common/tasks/teardown.py +7 -7
- dkist_processing_common/tasks/transfer_input_data.py +67 -69
- dkist_processing_common/tasks/trial_catalog.py +77 -17
- dkist_processing_common/tasks/trial_output_data.py +16 -17
- dkist_processing_common/tasks/write_l1.py +102 -72
- dkist_processing_common/tests/conftest.py +32 -173
- dkist_processing_common/tests/mock_metadata_store.py +271 -0
- dkist_processing_common/tests/test_assemble_movie.py +4 -4
- dkist_processing_common/tests/test_assemble_quality.py +32 -4
- dkist_processing_common/tests/test_base.py +5 -19
- dkist_processing_common/tests/test_codecs.py +103 -12
- dkist_processing_common/tests/test_constants.py +15 -0
- dkist_processing_common/tests/test_dkist_location.py +15 -0
- dkist_processing_common/tests/test_fits_access.py +56 -19
- dkist_processing_common/tests/test_flower_pot.py +147 -5
- dkist_processing_common/tests/test_fried_parameter.py +27 -0
- dkist_processing_common/tests/test_input_dataset.py +78 -361
- dkist_processing_common/tests/test_interservice_bus.py +1 -0
- dkist_processing_common/tests/test_interservice_bus_mixin.py +1 -1
- dkist_processing_common/tests/test_manual_processing.py +33 -0
- dkist_processing_common/tests/test_output_data_base.py +5 -7
- dkist_processing_common/tests/test_parameters.py +71 -22
- dkist_processing_common/tests/test_parse_l0_input_data.py +115 -32
- dkist_processing_common/tests/test_publish_catalog_messages.py +2 -24
- dkist_processing_common/tests/test_quality.py +1 -0
- dkist_processing_common/tests/test_quality_mixin.py +255 -23
- dkist_processing_common/tests/test_scratch.py +2 -1
- dkist_processing_common/tests/test_stems.py +511 -168
- dkist_processing_common/tests/test_submit_dataset_metadata.py +3 -7
- dkist_processing_common/tests/test_tags.py +1 -0
- dkist_processing_common/tests/test_task_name.py +1 -1
- dkist_processing_common/tests/test_task_parsing.py +17 -7
- dkist_processing_common/tests/test_teardown.py +28 -24
- dkist_processing_common/tests/test_transfer_input_data.py +270 -125
- dkist_processing_common/tests/test_transfer_l1_output_data.py +2 -3
- dkist_processing_common/tests/test_trial_catalog.py +83 -8
- dkist_processing_common/tests/test_trial_output_data.py +46 -73
- dkist_processing_common/tests/test_workflow_task_base.py +8 -10
- dkist_processing_common/tests/test_write_l1.py +298 -76
- dkist_processing_common-12.1.0rc1.dist-info/METADATA +265 -0
- dkist_processing_common-12.1.0rc1.dist-info/RECORD +134 -0
- {dkist_processing_common-10.5.4.dist-info → dkist_processing_common-12.1.0rc1.dist-info}/WHEEL +1 -1
- docs/conf.py +1 -0
- docs/index.rst +1 -1
- docs/landing_page.rst +13 -0
- dkist_processing_common/tasks/mixin/input_dataset.py +0 -166
- dkist_processing_common-10.5.4.dist-info/METADATA +0 -175
- dkist_processing_common-10.5.4.dist-info/RECORD +0 -112
- {dkist_processing_common-10.5.4.dist-info → dkist_processing_common-12.1.0rc1.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,271 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Support functions and constants for customized FakeGQLClient
|
|
3
|
+
"""
|
|
4
|
+
|
|
5
|
+
import json
|
|
6
|
+
from abc import ABC
|
|
7
|
+
from abc import abstractmethod
|
|
8
|
+
from datetime import datetime
|
|
9
|
+
from datetime import timedelta
|
|
10
|
+
from pathlib import Path
|
|
11
|
+
from uuid import uuid4
|
|
12
|
+
|
|
13
|
+
import pytest
|
|
14
|
+
from pydantic import BaseModel
|
|
15
|
+
|
|
16
|
+
from dkist_processing_common.models.graphql import InputDatasetInputDatasetPartResponse
|
|
17
|
+
from dkist_processing_common.models.graphql import InputDatasetPartResponse
|
|
18
|
+
from dkist_processing_common.models.graphql import InputDatasetPartTypeResponse
|
|
19
|
+
from dkist_processing_common.models.graphql import InputDatasetRecipeInstanceResponse
|
|
20
|
+
from dkist_processing_common.models.graphql import InputDatasetRecipeRunResponse
|
|
21
|
+
from dkist_processing_common.models.graphql import InputDatasetResponse
|
|
22
|
+
from dkist_processing_common.models.graphql import RecipeInstanceResponse
|
|
23
|
+
from dkist_processing_common.models.graphql import RecipeRunProvenanceResponse
|
|
24
|
+
from dkist_processing_common.models.graphql import RecipeRunResponse
|
|
25
|
+
from dkist_processing_common.models.graphql import RecipeRunStatusResponse
|
|
26
|
+
|
|
27
|
+
TILE_SIZE = 64
|
|
28
|
+
|
|
29
|
+
default_observe_frames_doc = [
|
|
30
|
+
{
|
|
31
|
+
"bucket": uuid4().hex[:6],
|
|
32
|
+
"object_keys": [Path(uuid4().hex[:6]).as_posix() for _ in range(3)],
|
|
33
|
+
}
|
|
34
|
+
]
|
|
35
|
+
|
|
36
|
+
default_calibration_frames_doc = [
|
|
37
|
+
{
|
|
38
|
+
"bucket": uuid4().hex[:6],
|
|
39
|
+
"object_keys": [Path(uuid4().hex[:6]).as_posix() for _ in range(3)],
|
|
40
|
+
},
|
|
41
|
+
{
|
|
42
|
+
"bucket": uuid4().hex[:6],
|
|
43
|
+
"object_keys": [Path(uuid4().hex[:6]).as_posix() for _ in range(3)],
|
|
44
|
+
},
|
|
45
|
+
]
|
|
46
|
+
|
|
47
|
+
default_parameters_doc = [
|
|
48
|
+
{
|
|
49
|
+
"parameterName": "param_name_1",
|
|
50
|
+
"parameterValues": [
|
|
51
|
+
{
|
|
52
|
+
"parameterValueId": 1,
|
|
53
|
+
"parameterValue": json.dumps([[1, 2, 3], [4, 5, 6], [7, 8, 9]]),
|
|
54
|
+
"parameterValueStartDate": datetime(2000, 1, 1).isoformat(),
|
|
55
|
+
}
|
|
56
|
+
],
|
|
57
|
+
},
|
|
58
|
+
{
|
|
59
|
+
"parameterName": "param_name_2",
|
|
60
|
+
"parameterValues": [
|
|
61
|
+
{
|
|
62
|
+
"parameterValueId": 2,
|
|
63
|
+
"parameterValue": json.dumps(
|
|
64
|
+
{
|
|
65
|
+
"__file__": {
|
|
66
|
+
"bucket": "data",
|
|
67
|
+
"objectKey": f"parameters/param_name/{uuid4().hex}.dat",
|
|
68
|
+
}
|
|
69
|
+
}
|
|
70
|
+
),
|
|
71
|
+
"parameterValueStartDate": datetime(2000, 1, 1).isoformat(),
|
|
72
|
+
},
|
|
73
|
+
{
|
|
74
|
+
"parameterValueId": 3,
|
|
75
|
+
"parameterValue": json.dumps(
|
|
76
|
+
{
|
|
77
|
+
"__file__": {
|
|
78
|
+
"bucket": "data",
|
|
79
|
+
"objectKey": f"parameters/param_name/{uuid4().hex}.dat",
|
|
80
|
+
}
|
|
81
|
+
}
|
|
82
|
+
),
|
|
83
|
+
"parameterValueStartDate": datetime(2000, 1, 2).isoformat(),
|
|
84
|
+
},
|
|
85
|
+
],
|
|
86
|
+
},
|
|
87
|
+
{
|
|
88
|
+
"parameterName": "param_name_4",
|
|
89
|
+
"parameterValues": [
|
|
90
|
+
{
|
|
91
|
+
"parameterValueId": 4,
|
|
92
|
+
"parameterValue": json.dumps({"a": 1, "b": 3.14159, "c": "foo", "d": [1, 2, 3]}),
|
|
93
|
+
"parameterValueStartDate": datetime(2000, 1, 1).isoformat(),
|
|
94
|
+
}
|
|
95
|
+
],
|
|
96
|
+
},
|
|
97
|
+
]
|
|
98
|
+
|
|
99
|
+
default_recipe_run_configuration = {"tile_size": TILE_SIZE}
|
|
100
|
+
|
|
101
|
+
|
|
102
|
+
class Unset:
|
|
103
|
+
pass
|
|
104
|
+
|
|
105
|
+
|
|
106
|
+
class ResponseMapping(BaseModel, ABC):
|
|
107
|
+
response: BaseModel
|
|
108
|
+
|
|
109
|
+
@abstractmethod
|
|
110
|
+
def match_query(self, query_base: str, query_response_cls: type):
|
|
111
|
+
pass
|
|
112
|
+
|
|
113
|
+
|
|
114
|
+
class RecipeRunStatusResponseMapping(ResponseMapping):
|
|
115
|
+
def match_query(self, query_base: str, query_response_cls: type):
|
|
116
|
+
if query_base == "recipeRunStatuses":
|
|
117
|
+
if query_response_cls == RecipeRunStatusResponse:
|
|
118
|
+
return self.response
|
|
119
|
+
return Unset
|
|
120
|
+
|
|
121
|
+
|
|
122
|
+
class RecipeRunResponseMapping(ResponseMapping):
|
|
123
|
+
def match_query(self, query_base: str, query_response_cls: type):
|
|
124
|
+
if query_base == "recipeRuns":
|
|
125
|
+
if query_response_cls == RecipeRunResponse:
|
|
126
|
+
return self.response
|
|
127
|
+
return Unset
|
|
128
|
+
|
|
129
|
+
|
|
130
|
+
class InputDatasetRecipeRunResponseMapping(ResponseMapping):
|
|
131
|
+
def match_query(self, query_base: str, query_response_cls: type):
|
|
132
|
+
if query_base == "recipeRuns":
|
|
133
|
+
if query_response_cls == InputDatasetRecipeRunResponse:
|
|
134
|
+
return self.response
|
|
135
|
+
return Unset
|
|
136
|
+
|
|
137
|
+
|
|
138
|
+
def make_default_recipe_run_status_response() -> RecipeRunStatusResponse:
|
|
139
|
+
return RecipeRunStatusResponse(recipeRunStatusId=1)
|
|
140
|
+
|
|
141
|
+
|
|
142
|
+
def make_default_recipe_run_response() -> RecipeRunResponse:
|
|
143
|
+
return RecipeRunResponse(
|
|
144
|
+
recipeInstanceId=1,
|
|
145
|
+
recipeInstance=RecipeInstanceResponse(
|
|
146
|
+
recipeId=1,
|
|
147
|
+
inputDatasetId=1,
|
|
148
|
+
),
|
|
149
|
+
configuration=json.dumps(default_recipe_run_configuration),
|
|
150
|
+
recipeRunProvenances=[
|
|
151
|
+
RecipeRunProvenanceResponse(recipeRunProvenanceId=1, isTaskManual=False),
|
|
152
|
+
],
|
|
153
|
+
)
|
|
154
|
+
|
|
155
|
+
|
|
156
|
+
def make_default_input_dataset_recipe_run_response() -> InputDatasetRecipeRunResponse:
|
|
157
|
+
return InputDatasetRecipeRunResponse(
|
|
158
|
+
recipeInstance=InputDatasetRecipeInstanceResponse(
|
|
159
|
+
inputDataset=InputDatasetResponse(
|
|
160
|
+
inputDatasetId=1,
|
|
161
|
+
isActive=True,
|
|
162
|
+
inputDatasetInputDatasetParts=[
|
|
163
|
+
InputDatasetInputDatasetPartResponse(
|
|
164
|
+
inputDatasetPart=InputDatasetPartResponse(
|
|
165
|
+
inputDatasetPartId=1,
|
|
166
|
+
inputDatasetPartDocument=json.dumps(default_parameters_doc),
|
|
167
|
+
inputDatasetPartType=InputDatasetPartTypeResponse(
|
|
168
|
+
inputDatasetPartTypeName="parameters"
|
|
169
|
+
),
|
|
170
|
+
)
|
|
171
|
+
),
|
|
172
|
+
InputDatasetInputDatasetPartResponse(
|
|
173
|
+
inputDatasetPart=InputDatasetPartResponse(
|
|
174
|
+
inputDatasetPartId=2,
|
|
175
|
+
inputDatasetPartDocument=json.dumps(default_observe_frames_doc),
|
|
176
|
+
inputDatasetPartType=InputDatasetPartTypeResponse(
|
|
177
|
+
inputDatasetPartTypeName="observe_frames"
|
|
178
|
+
),
|
|
179
|
+
)
|
|
180
|
+
),
|
|
181
|
+
InputDatasetInputDatasetPartResponse(
|
|
182
|
+
inputDatasetPart=InputDatasetPartResponse(
|
|
183
|
+
inputDatasetPartId=3,
|
|
184
|
+
inputDatasetPartDocument=json.dumps(default_calibration_frames_doc),
|
|
185
|
+
inputDatasetPartType=InputDatasetPartTypeResponse(
|
|
186
|
+
inputDatasetPartTypeName="calibration_frames"
|
|
187
|
+
),
|
|
188
|
+
)
|
|
189
|
+
),
|
|
190
|
+
],
|
|
191
|
+
),
|
|
192
|
+
),
|
|
193
|
+
)
|
|
194
|
+
|
|
195
|
+
|
|
196
|
+
default_response_mappings = (
|
|
197
|
+
RecipeRunStatusResponseMapping(response=make_default_recipe_run_status_response()),
|
|
198
|
+
RecipeRunResponseMapping(response=make_default_recipe_run_response()),
|
|
199
|
+
InputDatasetRecipeRunResponseMapping(response=make_default_input_dataset_recipe_run_response()),
|
|
200
|
+
)
|
|
201
|
+
|
|
202
|
+
|
|
203
|
+
def fake_gql_client_factory(response_mapping_override: ResponseMapping | None = None):
|
|
204
|
+
|
|
205
|
+
if response_mapping_override:
|
|
206
|
+
response_mappings = (response_mapping_override,) + default_response_mappings
|
|
207
|
+
else:
|
|
208
|
+
response_mappings = default_response_mappings
|
|
209
|
+
|
|
210
|
+
class FakeGQLClientClass:
|
|
211
|
+
def __init__(self, *args, **kwargs):
|
|
212
|
+
pass
|
|
213
|
+
|
|
214
|
+
def execute_gql_query(self, query_base: str, query_response_cls: type, *args, **kwargs):
|
|
215
|
+
# Overrides are prepended; first match is returned.
|
|
216
|
+
for rm in response_mappings:
|
|
217
|
+
response = rm.match_query(query_base, query_response_cls)
|
|
218
|
+
if response is not Unset:
|
|
219
|
+
return [response]
|
|
220
|
+
raise ValueError(f"Mocked response not found for {query_base=}, {query_response_cls=}")
|
|
221
|
+
|
|
222
|
+
@staticmethod
|
|
223
|
+
def execute_gql_mutation(**kwargs): ...
|
|
224
|
+
|
|
225
|
+
return FakeGQLClientClass
|
|
226
|
+
|
|
227
|
+
|
|
228
|
+
@pytest.fixture()
|
|
229
|
+
def fake_gql_client():
|
|
230
|
+
"""
|
|
231
|
+
Convenience fixture for default mock GQL client. To customize, use fake_gql_client_factory.
|
|
232
|
+
"""
|
|
233
|
+
return fake_gql_client_factory()
|
|
234
|
+
|
|
235
|
+
|
|
236
|
+
def input_dataset_parameters_part_factory(
|
|
237
|
+
parameter_count: int = 1,
|
|
238
|
+
parameter_value_count: int = 1,
|
|
239
|
+
has_date: bool = False,
|
|
240
|
+
has_file: bool = False,
|
|
241
|
+
) -> list[dict]:
|
|
242
|
+
"""Create a mock InputDatasetPartDocumentList with parameters."""
|
|
243
|
+
result = [
|
|
244
|
+
{
|
|
245
|
+
"parameterName": uuid4().hex[:6],
|
|
246
|
+
"parameterValues": [
|
|
247
|
+
{"parameterValueId": i, "parameterValue": json.dumps(uuid4().hex)}
|
|
248
|
+
for i in range(parameter_value_count)
|
|
249
|
+
],
|
|
250
|
+
}
|
|
251
|
+
for _ in range(parameter_count)
|
|
252
|
+
]
|
|
253
|
+
if has_date:
|
|
254
|
+
base = datetime(2018, 9, 14, 0, 0, 0) # This date is before any possible start dates
|
|
255
|
+
for parameter_index, data in enumerate(result):
|
|
256
|
+
for item in data["parameterValues"]:
|
|
257
|
+
dt = base + timedelta(days=parameter_index)
|
|
258
|
+
item["parameterValueStartDate"] = dt.isoformat()
|
|
259
|
+
if has_file:
|
|
260
|
+
for data in result:
|
|
261
|
+
param_list = data["parameterValues"]
|
|
262
|
+
for item in param_list:
|
|
263
|
+
item["parameterValue"] = json.dumps(
|
|
264
|
+
{
|
|
265
|
+
"__file__": {
|
|
266
|
+
"bucket": "data",
|
|
267
|
+
"objectKey": f"parameters/{data['parameterName']}/{uuid4().hex}.dat",
|
|
268
|
+
}
|
|
269
|
+
}
|
|
270
|
+
)
|
|
271
|
+
return result
|
|
@@ -10,7 +10,6 @@ from dkist_processing_common.models.constants import BudName
|
|
|
10
10
|
from dkist_processing_common.models.fits_access import FitsAccessBase
|
|
11
11
|
from dkist_processing_common.models.tags import Tag
|
|
12
12
|
from dkist_processing_common.tasks.assemble_movie import AssembleMovie
|
|
13
|
-
from dkist_processing_common.tests.conftest import FakeGQLClient
|
|
14
13
|
|
|
15
14
|
|
|
16
15
|
@pytest.fixture
|
|
@@ -61,7 +60,6 @@ def assemble_task_with_tagged_movie_frames(
|
|
|
61
60
|
task.constants._update(
|
|
62
61
|
{
|
|
63
62
|
BudName.num_dsps_repeats.value: num_dsps_repeats,
|
|
64
|
-
BudName.recipe_run_id.value: recipe_run_id,
|
|
65
63
|
}
|
|
66
64
|
)
|
|
67
65
|
for d in range(num_dsps_repeats):
|
|
@@ -85,14 +83,16 @@ def assemble_task_with_tagged_movie_frames(
|
|
|
85
83
|
"movie_dimensions",
|
|
86
84
|
[pytest.param((2048, 1536), id="Even_dims"), pytest.param((2047, 1535), id="Odd_dims")],
|
|
87
85
|
)
|
|
88
|
-
def test_assemble_movie(
|
|
86
|
+
def test_assemble_movie(
|
|
87
|
+
assemble_task_with_tagged_movie_frames, mocker, movie_dimensions, fake_gql_client
|
|
88
|
+
):
|
|
89
89
|
"""
|
|
90
90
|
Given: An AssembleMovie subclass with movie frames in scratch
|
|
91
91
|
When: Calling the task
|
|
92
92
|
Then: The movie is written and has an even number of pixels in both dimensions
|
|
93
93
|
"""
|
|
94
94
|
mocker.patch(
|
|
95
|
-
"dkist_processing_common.tasks.mixin.metadata_store.GraphQLClient", new=
|
|
95
|
+
"dkist_processing_common.tasks.mixin.metadata_store.GraphQLClient", new=fake_gql_client
|
|
96
96
|
)
|
|
97
97
|
assemble_task_with_tagged_movie_frames()
|
|
98
98
|
expected_dimensions = tuple([size + 1 if size % 2 else size for size in movie_dimensions])
|
|
@@ -220,6 +220,17 @@ def quality_metrics(dataframe_json) -> list[Metric]:
|
|
|
220
220
|
{"name": "hist 3", "value": 9.35, "warnings": "warning for historical metric 3"},
|
|
221
221
|
["QUALITY_HISTORICAL"],
|
|
222
222
|
),
|
|
223
|
+
Metric(
|
|
224
|
+
{
|
|
225
|
+
"input_wavelength_nm": [1001.0, 1002.0, 1003.0, 1004.0],
|
|
226
|
+
"input_spectrum": [1.0, 1.0, 0.5, 1.0],
|
|
227
|
+
"best_fit_wavelength_nm": [1001.5, 1002.6, 1003.7, 1004.8],
|
|
228
|
+
"best_fit_atlas": [1.0, 1.0, 0.4, 1.0],
|
|
229
|
+
"normalized_residuals": [0.0, 0.0, 0.1, 0.0],
|
|
230
|
+
"weights": None,
|
|
231
|
+
},
|
|
232
|
+
["QUALITY_WAVECAL_FIT"],
|
|
233
|
+
),
|
|
223
234
|
]
|
|
224
235
|
return metrics
|
|
225
236
|
|
|
@@ -247,6 +258,20 @@ def plot_data_expected() -> Callable[[str], bool]:
|
|
|
247
258
|
return expected
|
|
248
259
|
|
|
249
260
|
|
|
261
|
+
@pytest.fixture()
|
|
262
|
+
def multi_plot_data_expected() -> Callable[[str], bool]:
|
|
263
|
+
"""
|
|
264
|
+
Tightly coupled with quality_metrics fixture and resultant report metric name
|
|
265
|
+
"""
|
|
266
|
+
# names where multi_plot_data is expected to be populated
|
|
267
|
+
names = {"Wavelength Calibration Results"}
|
|
268
|
+
|
|
269
|
+
def expected(name: str) -> bool:
|
|
270
|
+
return name in names
|
|
271
|
+
|
|
272
|
+
return expected
|
|
273
|
+
|
|
274
|
+
|
|
250
275
|
@pytest.fixture()
|
|
251
276
|
def table_data_expected() -> Callable[[str], bool]:
|
|
252
277
|
"""
|
|
@@ -426,6 +451,7 @@ def test_assemble_quality_data(
|
|
|
426
451
|
assemble_quality_data_task,
|
|
427
452
|
recipe_run_id,
|
|
428
453
|
plot_data_expected,
|
|
454
|
+
multi_plot_data_expected,
|
|
429
455
|
table_data_expected,
|
|
430
456
|
modmat_data_expected,
|
|
431
457
|
histogram_data_expected,
|
|
@@ -447,14 +473,16 @@ def test_assemble_quality_data(
|
|
|
447
473
|
quality_data = list(
|
|
448
474
|
chain.from_iterable(task.read(tags=Tag.quality_data(), decoder=json_decoder))
|
|
449
475
|
)
|
|
450
|
-
# With polcal, this would be
|
|
451
|
-
assert len(quality_data) ==
|
|
476
|
+
# With polcal, this would be 20, but the polcal metrics are not included with this task
|
|
477
|
+
assert len(quality_data) == 16
|
|
452
478
|
for metric_data in quality_data:
|
|
453
479
|
rm: ReportMetric = ReportMetric.from_dict(metric_data)
|
|
454
480
|
assert isinstance(rm.name, str)
|
|
455
481
|
assert isinstance(rm.description, str)
|
|
456
482
|
if plot_data_expected(rm.name):
|
|
457
483
|
assert rm.plot_data
|
|
484
|
+
if multi_plot_data_expected(rm.name):
|
|
485
|
+
assert rm.multi_plot_data
|
|
458
486
|
if table_data_expected(rm.name):
|
|
459
487
|
assert rm.table_data
|
|
460
488
|
if modmat_data_expected(rm.name):
|
|
@@ -496,8 +524,8 @@ def test_assemble_quality_data_for_polcal(
|
|
|
496
524
|
quality_data = list(
|
|
497
525
|
chain.from_iterable(task.read(tags=Tag.quality_data(), decoder=json_decoder))
|
|
498
526
|
)
|
|
499
|
-
# this is
|
|
500
|
-
assert len(quality_data) ==
|
|
527
|
+
# this is 20 with polcal
|
|
528
|
+
assert len(quality_data) == 20
|
|
501
529
|
for metric_data in quality_data:
|
|
502
530
|
rm: ReportMetric = ReportMetric.from_dict(metric_data)
|
|
503
531
|
assert isinstance(rm.name, str)
|
|
@@ -7,7 +7,6 @@ from dkist_processing_common._util.scratch import WorkflowFileSystem
|
|
|
7
7
|
from dkist_processing_common.models.tags import StemName
|
|
8
8
|
from dkist_processing_common.models.tags import Tag
|
|
9
9
|
from dkist_processing_common.tasks import WorkflowTaskBase
|
|
10
|
-
from dkist_processing_common.tests.conftest import FakeGQLClient
|
|
11
10
|
|
|
12
11
|
|
|
13
12
|
class Task(WorkflowTaskBase):
|
|
@@ -33,7 +32,7 @@ def base_task(tmp_path, recipe_run_id):
|
|
|
33
32
|
|
|
34
33
|
@pytest.fixture
|
|
35
34
|
def tags_and_expected_generic_name() -> (list[str], str):
|
|
36
|
-
random_seed = f"
|
|
35
|
+
random_seed = f"ZZ:Z{uuid4().hex[:6]}"
|
|
37
36
|
tags = [
|
|
38
37
|
Tag.input(),
|
|
39
38
|
Tag.output(),
|
|
@@ -62,25 +61,11 @@ def tags_and_expected_generic_name() -> (list[str], str):
|
|
|
62
61
|
f"{StemName.cs_step.value.replace('_', '-')}-4_"
|
|
63
62
|
f"{StemName.modstate.value}-5_"
|
|
64
63
|
f"{StemName.movie.value}_"
|
|
65
|
-
f"{random_seed}"
|
|
64
|
+
f"{random_seed.replace(':', '-')}"
|
|
66
65
|
)
|
|
67
66
|
return tags, expected_base_name
|
|
68
67
|
|
|
69
68
|
|
|
70
|
-
def test_apm_spans(base_task):
|
|
71
|
-
"""
|
|
72
|
-
Given: A WorkflowTaskBase task
|
|
73
|
-
When: Calling the task-specific apm_steps with weird inputs
|
|
74
|
-
Then: Errors happen when they're supposed to and not when they're not supposed to
|
|
75
|
-
"""
|
|
76
|
-
with pytest.raises(RuntimeError):
|
|
77
|
-
with base_task.apm_processing_step("foo", span_type="bar"):
|
|
78
|
-
pass
|
|
79
|
-
|
|
80
|
-
with base_task.apm_task_step("foo", labels={"foo": "bar"}):
|
|
81
|
-
pass
|
|
82
|
-
|
|
83
|
-
|
|
84
69
|
def test_tags(base_task):
|
|
85
70
|
"""
|
|
86
71
|
Given: A WorkflowTaskBase task
|
|
@@ -123,6 +108,7 @@ def test_build_generic_tag_filename(base_task, tags_and_expected_generic_name):
|
|
|
123
108
|
[
|
|
124
109
|
pytest.param("A", id="single"),
|
|
125
110
|
pytest.param(["A", "B"], id="list"),
|
|
111
|
+
pytest.param(["A", ["B", "C"]], id="nested list"),
|
|
126
112
|
],
|
|
127
113
|
)
|
|
128
114
|
def test_write_workflow_task_tag(base_task, other_tags: str | list[str]):
|
|
@@ -143,10 +129,10 @@ def test_write_workflow_task_tag(base_task, other_tags: str | list[str]):
|
|
|
143
129
|
|
|
144
130
|
|
|
145
131
|
@pytest.fixture
|
|
146
|
-
def rollback_task_setup(tmp_path, recipe_run_id, base_task, mocker) -> dict:
|
|
132
|
+
def rollback_task_setup(tmp_path, recipe_run_id, base_task, mocker, fake_gql_client) -> dict:
|
|
147
133
|
"""Return setup data for a task that has data in scratch/constants written by 2 task names (The one from base_task and the RollbackTask)."""
|
|
148
134
|
mocker.patch(
|
|
149
|
-
"dkist_processing_common.tasks.mixin.metadata_store.GraphQLClient", new=
|
|
135
|
+
"dkist_processing_common.tasks.mixin.metadata_store.GraphQLClient", new=fake_gql_client
|
|
150
136
|
)
|
|
151
137
|
# add data that should remain
|
|
152
138
|
keep_tag = "test_keep_tag"
|
|
@@ -19,9 +19,15 @@ from astropy.io.fits import CompImageHDU
|
|
|
19
19
|
from astropy.io.fits import HDUList
|
|
20
20
|
from astropy.io.fits import Header
|
|
21
21
|
from astropy.io.fits import PrimaryHDU
|
|
22
|
+
from pydantic import BaseModel
|
|
23
|
+
from pydantic import Field
|
|
24
|
+
from pydantic import create_model
|
|
22
25
|
|
|
23
26
|
from dkist_processing_common.codecs.asdf import asdf_decoder
|
|
24
27
|
from dkist_processing_common.codecs.asdf import asdf_encoder
|
|
28
|
+
from dkist_processing_common.codecs.asdf import asdf_fileobj_encoder
|
|
29
|
+
from dkist_processing_common.codecs.basemodel import basemodel_decoder
|
|
30
|
+
from dkist_processing_common.codecs.basemodel import basemodel_encoder
|
|
25
31
|
from dkist_processing_common.codecs.bytes import bytes_decoder
|
|
26
32
|
from dkist_processing_common.codecs.bytes import bytes_encoder
|
|
27
33
|
from dkist_processing_common.codecs.fits import fits_access_decoder
|
|
@@ -33,10 +39,10 @@ from dkist_processing_common.codecs.iobase import iobase_decoder
|
|
|
33
39
|
from dkist_processing_common.codecs.iobase import iobase_encoder
|
|
34
40
|
from dkist_processing_common.codecs.json import json_decoder
|
|
35
41
|
from dkist_processing_common.codecs.json import json_encoder
|
|
42
|
+
from dkist_processing_common.codecs.quality import QualityDataEncoder
|
|
36
43
|
from dkist_processing_common.codecs.quality import quality_data_decoder
|
|
37
44
|
from dkist_processing_common.codecs.quality import quality_data_encoder
|
|
38
45
|
from dkist_processing_common.codecs.quality import quality_data_hook
|
|
39
|
-
from dkist_processing_common.codecs.quality import QualityDataEncoder
|
|
40
46
|
from dkist_processing_common.codecs.str import str_decoder
|
|
41
47
|
from dkist_processing_common.codecs.str import str_encoder
|
|
42
48
|
from dkist_processing_common.models.fits_access import FitsAccessBase
|
|
@@ -99,6 +105,14 @@ def path_to_json(dictionary, tmp_file) -> Path:
|
|
|
99
105
|
return tmp_file
|
|
100
106
|
|
|
101
107
|
|
|
108
|
+
@pytest.fixture
|
|
109
|
+
def pydantic_basemodel() -> BaseModel:
|
|
110
|
+
class Foo(BaseModel):
|
|
111
|
+
bar: int
|
|
112
|
+
|
|
113
|
+
return Foo(bar=123)
|
|
114
|
+
|
|
115
|
+
|
|
102
116
|
@pytest.fixture
|
|
103
117
|
def string() -> str:
|
|
104
118
|
return "string"
|
|
@@ -129,6 +143,11 @@ def asdf_tree() -> dict:
|
|
|
129
143
|
return {"metadata_value": "something", "data": np.empty((100, 100))}
|
|
130
144
|
|
|
131
145
|
|
|
146
|
+
@pytest.fixture
|
|
147
|
+
def asdf_obj(asdf_tree) -> dict:
|
|
148
|
+
return asdf.AsdfFile(asdf_tree)
|
|
149
|
+
|
|
150
|
+
|
|
132
151
|
@pytest.fixture
|
|
133
152
|
def path_to_asdf_file(asdf_tree, tmp_file) -> Path:
|
|
134
153
|
asdf_obj = asdf.AsdfFile(asdf_tree)
|
|
@@ -164,7 +183,7 @@ def primary_hdu_list(ndarray_object, fits_header) -> HDUList:
|
|
|
164
183
|
|
|
165
184
|
@pytest.fixture
|
|
166
185
|
def path_to_primary_fits_file(primary_hdu_list, tmp_file) -> Path:
|
|
167
|
-
primary_hdu_list.writeto(tmp_file)
|
|
186
|
+
primary_hdu_list.writeto(tmp_file, checksum=True)
|
|
168
187
|
return tmp_file
|
|
169
188
|
|
|
170
189
|
|
|
@@ -191,7 +210,7 @@ def compressed_hdu_list(ndarray_object, fits_header) -> HDUList:
|
|
|
191
210
|
|
|
192
211
|
@pytest.fixture
|
|
193
212
|
def path_to_compressed_fits_file(compressed_hdu_list, tmp_file) -> Path:
|
|
194
|
-
compressed_hdu_list.writeto(tmp_file)
|
|
213
|
+
compressed_hdu_list.writeto(tmp_file, checksum=True)
|
|
195
214
|
return tmp_file
|
|
196
215
|
|
|
197
216
|
|
|
@@ -350,8 +369,10 @@ class DummyFitsAccess(FitsAccessBase):
|
|
|
350
369
|
pytest.param("primary_hdu_list", fits_hdulist_encoder, id="fits uncompressed HDUList"),
|
|
351
370
|
pytest.param("compressed_hdu_list", fits_hdulist_encoder, id="fits compressed HDUList"),
|
|
352
371
|
pytest.param("dictionary", json_encoder, id="json"),
|
|
372
|
+
pytest.param("pydantic_basemodel", basemodel_encoder, id="pydantic basemodel"),
|
|
353
373
|
pytest.param("string", str_encoder, id="str"),
|
|
354
374
|
pytest.param("asdf_tree", asdf_encoder, id="asdf"),
|
|
375
|
+
pytest.param("asdf_obj", asdf_fileobj_encoder, id="asdf_obj"),
|
|
355
376
|
],
|
|
356
377
|
)
|
|
357
378
|
def test_encoder(data_fixture_name, encoder_function: Callable, request):
|
|
@@ -451,16 +472,27 @@ def test_bytesio_decoder(bytesIO_object, path_to_bytesIO):
|
|
|
451
472
|
pytest.param("path_to_compressed_fits_file", id="compressed"),
|
|
452
473
|
],
|
|
453
474
|
)
|
|
454
|
-
|
|
475
|
+
@pytest.mark.parametrize(
|
|
476
|
+
"checksum", [pytest.param(True, id="checksum"), pytest.param(False, id="no_checksum")]
|
|
477
|
+
)
|
|
478
|
+
@pytest.mark.parametrize(
|
|
479
|
+
"decompress", [pytest.param(True, id="decompress"), pytest.param(False, id="no_decompress")]
|
|
480
|
+
)
|
|
481
|
+
def test_fits_hdu_decoder(
|
|
482
|
+
path_fixture_name, ndarray_object, fits_header, request, checksum, decompress
|
|
483
|
+
):
|
|
455
484
|
"""
|
|
456
485
|
Given: Path to a FITS file
|
|
457
486
|
When: Decoding the path with the fits_hdu_decoder
|
|
458
487
|
Then: The correct data are returned
|
|
459
488
|
"""
|
|
460
489
|
file_path = request.getfixturevalue(path_fixture_name)
|
|
461
|
-
hdu = fits_hdu_decoder(file_path)
|
|
490
|
+
hdu = fits_hdu_decoder(file_path, checksum=checksum, disable_image_compression=not decompress)
|
|
462
491
|
|
|
463
|
-
|
|
492
|
+
if "compressed" in path_fixture_name and not decompress:
|
|
493
|
+
assert not np.array_equal(hdu.data, ndarray_object)
|
|
494
|
+
else:
|
|
495
|
+
assert np.array_equal(hdu.data, ndarray_object)
|
|
464
496
|
assert hdu.header["foo"] == fits_header["foo"]
|
|
465
497
|
|
|
466
498
|
|
|
@@ -471,7 +503,15 @@ def test_fits_hdu_decoder(path_fixture_name, ndarray_object, fits_header, reques
|
|
|
471
503
|
pytest.param("path_to_compressed_fits_file", id="compressed"),
|
|
472
504
|
],
|
|
473
505
|
)
|
|
474
|
-
|
|
506
|
+
@pytest.mark.parametrize(
|
|
507
|
+
"checksum", [pytest.param(True, id="checksum"), pytest.param(False, id="no_checksum")]
|
|
508
|
+
)
|
|
509
|
+
@pytest.mark.parametrize(
|
|
510
|
+
"decompress", [pytest.param(True, id="decompress"), pytest.param(False, id="no_decompress")]
|
|
511
|
+
)
|
|
512
|
+
def test_fits_access_decoder(
|
|
513
|
+
path_fixture_name, ndarray_object, fits_header, request, checksum, decompress
|
|
514
|
+
):
|
|
475
515
|
"""
|
|
476
516
|
Given: Path to a FITS file
|
|
477
517
|
When: Decoding the path with the fits_access_decoder
|
|
@@ -479,20 +519,47 @@ def test_fits_access_decoder(path_fixture_name, ndarray_object, fits_header, req
|
|
|
479
519
|
"""
|
|
480
520
|
file_path = request.getfixturevalue(path_fixture_name)
|
|
481
521
|
|
|
482
|
-
fits_obj = fits_access_decoder(
|
|
522
|
+
fits_obj = fits_access_decoder(
|
|
523
|
+
file_path,
|
|
524
|
+
fits_access_class=DummyFitsAccess,
|
|
525
|
+
checksum=checksum,
|
|
526
|
+
disable_image_compression=not decompress,
|
|
527
|
+
)
|
|
483
528
|
assert fits_obj.name == str(file_path)
|
|
484
|
-
assert np.array_equal(fits_obj.data, ndarray_object)
|
|
485
529
|
assert fits_obj.foo == fits_header["foo"]
|
|
530
|
+
if "compressed" in path_fixture_name and not decompress:
|
|
531
|
+
assert not np.array_equal(fits_obj.data, ndarray_object)
|
|
532
|
+
else:
|
|
533
|
+
assert np.array_equal(fits_obj.data, ndarray_object)
|
|
486
534
|
|
|
487
535
|
|
|
488
|
-
|
|
536
|
+
@pytest.mark.parametrize(
|
|
537
|
+
"path_fixture_name",
|
|
538
|
+
[
|
|
539
|
+
pytest.param("path_to_primary_fits_file", id="uncompressed"),
|
|
540
|
+
pytest.param("path_to_compressed_fits_file", id="compressed"),
|
|
541
|
+
],
|
|
542
|
+
)
|
|
543
|
+
@pytest.mark.parametrize(
|
|
544
|
+
"checksum", [pytest.param(True, id="checksum"), pytest.param(False, id="no_checksum")]
|
|
545
|
+
)
|
|
546
|
+
@pytest.mark.parametrize(
|
|
547
|
+
"decompress", [pytest.param(True, id="decompress"), pytest.param(False, id="no_decompress")]
|
|
548
|
+
)
|
|
549
|
+
def test_fits_array_decoder(path_fixture_name, ndarray_object, request, checksum, decompress):
|
|
489
550
|
"""
|
|
490
551
|
Given: Path to a FITS file
|
|
491
552
|
When: Decoding the path the fits_array_decoder
|
|
492
553
|
Then: The correct data are returned
|
|
493
554
|
"""
|
|
494
|
-
|
|
495
|
-
|
|
555
|
+
file_path = request.getfixturevalue(path_fixture_name)
|
|
556
|
+
array = fits_array_decoder(
|
|
557
|
+
file_path, checksum=checksum, disable_image_compression=not decompress
|
|
558
|
+
)
|
|
559
|
+
if "compressed" in path_fixture_name and not decompress:
|
|
560
|
+
assert not np.array_equal(array, ndarray_object)
|
|
561
|
+
else:
|
|
562
|
+
assert np.array_equal(ndarray_object, array)
|
|
496
563
|
|
|
497
564
|
|
|
498
565
|
def test_fits_array_decoder_autosqueeze(
|
|
@@ -593,6 +660,30 @@ def test_json_encoder_invalid(python_object: Any, expected_exception_type: type[
|
|
|
593
660
|
json_encoder(python_object)
|
|
594
661
|
|
|
595
662
|
|
|
663
|
+
def test_basemodel_decoder(valid_json_codec, path_to_text_file):
|
|
664
|
+
"""
|
|
665
|
+
Given: a python object that can be validated to a Pydantic BaseModel object is written to file as json
|
|
666
|
+
When: basemodel decoding is applied to the json file
|
|
667
|
+
Then: the string gets decoded to the correct Pydantic BaseModel object
|
|
668
|
+
"""
|
|
669
|
+
# write python object to file as json string
|
|
670
|
+
python_object = valid_json_codec["python_object"]
|
|
671
|
+
path = path_to_text_file(json.dumps({"foo": python_object}))
|
|
672
|
+
|
|
673
|
+
# create basemodel on the fly
|
|
674
|
+
DynamicBaseModel = create_model(
|
|
675
|
+
"DynamicBaseModel", foo=(Any, Field(default_factory=type(python_object)))
|
|
676
|
+
)
|
|
677
|
+
|
|
678
|
+
# get the same object via the basemodel decoder
|
|
679
|
+
decoded_obj = basemodel_decoder(path, model=DynamicBaseModel)
|
|
680
|
+
if python_object is nan:
|
|
681
|
+
# By definition, nan != nan
|
|
682
|
+
assert isnan(decoded_obj.foo)
|
|
683
|
+
else:
|
|
684
|
+
assert decoded_obj.foo == python_object
|
|
685
|
+
|
|
686
|
+
|
|
596
687
|
def test_quality_data_encoder_valid(valid_quality_codec):
|
|
597
688
|
"""
|
|
598
689
|
Given: a python object that can be encoded as a json string
|
|
@@ -40,6 +40,21 @@ class ConstantsFinal(ConstantsBase):
|
|
|
40
40
|
return self._db_dict["KEY 1"] ** 2 # Just to show that you can do whatever you want
|
|
41
41
|
|
|
42
42
|
|
|
43
|
+
def test_bud_names_in_constant_base():
|
|
44
|
+
"""
|
|
45
|
+
Given: a set of constants in the BudNames sting enumeration
|
|
46
|
+
When: ConstantBase class defines a set of properties
|
|
47
|
+
Then: the sets are the same (except for constants that are not in the redis database)
|
|
48
|
+
"""
|
|
49
|
+
all_bud_names = {b.name for b in BudName}
|
|
50
|
+
all_properties_in_constants_base = {
|
|
51
|
+
k for k, v in ConstantsBase.__dict__.items() if isinstance(v, property)
|
|
52
|
+
}
|
|
53
|
+
constants_not_in_redis = {"dataset_id", "stokes_params"}
|
|
54
|
+
all_buds_defined_in_constant_base = all_properties_in_constants_base - constants_not_in_redis
|
|
55
|
+
assert all_bud_names == all_buds_defined_in_constant_base
|
|
56
|
+
|
|
57
|
+
|
|
43
58
|
def test_constants_db_as_dict(test_constants_db, test_dict):
|
|
44
59
|
"""
|
|
45
60
|
Given: a ConstantsDb object and a python dictionary
|