dkist-processing-common 10.8.1rc1__py3-none-any.whl → 10.8.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (30) hide show
  1. dkist_processing_common/codecs/fits.py +6 -12
  2. dkist_processing_common/manual.py +5 -3
  3. dkist_processing_common/models/fried_parameter.py +41 -0
  4. dkist_processing_common/models/graphql.py +3 -13
  5. dkist_processing_common/models/parameters.py +28 -65
  6. dkist_processing_common/parsers/quality.py +1 -0
  7. dkist_processing_common/tasks/mixin/input_dataset.py +166 -0
  8. dkist_processing_common/tasks/mixin/metadata_store.py +4 -7
  9. dkist_processing_common/tasks/mixin/quality/_metrics.py +19 -14
  10. dkist_processing_common/tasks/quality_metrics.py +1 -1
  11. dkist_processing_common/tasks/transfer_input_data.py +70 -61
  12. dkist_processing_common/tasks/write_l1.py +29 -3
  13. dkist_processing_common/tests/conftest.py +7 -24
  14. dkist_processing_common/tests/test_codecs.py +0 -38
  15. dkist_processing_common/tests/test_fried_parameter.py +27 -0
  16. dkist_processing_common/tests/test_input_dataset.py +308 -79
  17. dkist_processing_common/tests/test_parameters.py +22 -71
  18. dkist_processing_common/tests/test_quality_mixin.py +32 -22
  19. dkist_processing_common/tests/test_transfer_input_data.py +45 -131
  20. dkist_processing_common/tests/test_write_l1.py +143 -10
  21. {dkist_processing_common-10.8.1rc1.dist-info → dkist_processing_common-10.8.3.dist-info}/METADATA +2 -2
  22. {dkist_processing_common-10.8.1rc1.dist-info → dkist_processing_common-10.8.3.dist-info}/RECORD +24 -27
  23. {dkist_processing_common-10.8.1rc1.dist-info → dkist_processing_common-10.8.3.dist-info}/WHEEL +1 -1
  24. changelog/235.feature.rst +0 -3
  25. changelog/235.misc.1.rst +0 -2
  26. changelog/235.misc.rst +0 -1
  27. dkist_processing_common/codecs/array.py +0 -19
  28. dkist_processing_common/codecs/basemodel.py +0 -21
  29. dkist_processing_common/models/input_dataset.py +0 -113
  30. {dkist_processing_common-10.8.1rc1.dist-info → dkist_processing_common-10.8.3.dist-info}/top_level.txt +0 -0
@@ -39,9 +39,9 @@ def quality_task(tmp_path, recipe_run_id):
39
39
  @pytest.fixture
40
40
  def plot_data():
41
41
  datetimes_a = ["2021-01-01T01:01:01", "2021-01-01T02:01:01"]
42
- values_a = [3, 4]
42
+ values_a = [0.1, 0.2]
43
43
  datetimes_b = ["2020-01-01T01:01:01", "2020-01-01T02:01:01"]
44
- values_b = [1, 2]
44
+ values_b = [0.15, 0.25]
45
45
  return datetimes_a, values_a, datetimes_b, values_b
46
46
 
47
47
 
@@ -198,8 +198,9 @@ def test_store_ao_status_and_fried_parameter(quality_task, ao_values):
198
198
  """
199
199
  task = quality_task
200
200
  datetimes = ["2020-01-01T01:01:01", "2020-01-01T02:01:01"]
201
- fried_values = [3.0, 4.0]
202
- combined_values = [[ao, r0] for ao, r0 in zip(ao_values, fried_values)]
201
+ fried_values = [0.1, 0.2]
202
+ oob_values = [25, 50]
203
+ combined_values = [[ao, r0, oob] for ao, r0, oob in zip(ao_values, fried_values, oob_values)]
203
204
  task.quality_store_ao_status_and_fried_parameter(datetimes=datetimes, values=combined_values)
204
205
  path = list(task.read(tags=Tag.quality("AO_STATUS")))
205
206
  assert len(path) == 1
@@ -220,10 +221,11 @@ def test_store_ao_status_and_fried_parameter(quality_task, ao_values):
220
221
  @pytest.mark.parametrize(
221
222
  "combined_values",
222
223
  [
223
- pytest.param([[True, 1], [None, 2]], id="AO_some_none"),
224
- pytest.param([[True, 1], [True, None]], id="Fried_some_none"),
225
- pytest.param([[None, 1], [None, 2]], id="AO_all_none"),
226
- pytest.param([[True, None], [True, None]], id="Fried_all_none"),
224
+ pytest.param([[True, 0.1, 25], [None, 0.2, 25]], id="AO_some_none"),
225
+ pytest.param([[True, 0.1, 25], [True, None, 25]], id="Fried_some_none"),
226
+ pytest.param([[None, 0.1, 25], [None, 0.2, 25]], id="AO_all_none"),
227
+ pytest.param([[True, None, 25], [True, None, 25]], id="Fried_all_none"),
228
+ pytest.param([[True, 0.1, None], [True, 0.2, None]], id="Out_of_bounds_all_none"),
227
229
  ],
228
230
  )
229
231
  def test_store_ao_status_and_fried_parameter_with_nones(quality_task, combined_values):
@@ -231,13 +233,14 @@ def test_store_ao_status_and_fried_parameter_with_nones(quality_task, combined_v
231
233
  datetimes = ["2020-01-01T01:01:01", "2020-01-01T02:01:01"]
232
234
  task.quality_store_ao_status_and_fried_parameter(datetimes=datetimes, values=combined_values)
233
235
  path = list(task.read(tags=Tag.quality("AO_STATUS")))
234
- ao_values = [ao for ao, r0 in combined_values]
235
- fried_values = [r0 for ao, r0 in combined_values]
236
+ ao_values = [ao for ao, r0, oob in combined_values]
237
+ fried_values = [r0 for ao, r0, oob in combined_values]
238
+ ao_out_of_bounds = [oob for ao, r0, oob in combined_values]
236
239
  if not all(ao is None for ao in ao_values):
237
240
  assert len(path) == 1
238
241
  with path[0].open() as f:
239
242
  data = json.load(f)
240
- assert len(data) == sum(1 for ao, r0 in combined_values if ao is not None)
243
+ assert len(data) == sum(1 for ao, r0, oob in combined_values if ao is not None)
241
244
  else:
242
245
  assert len(path) == 0
243
246
  path = list(task.read(tags=Tag.quality("FRIED_PARAMETER")))
@@ -246,7 +249,7 @@ def test_store_ao_status_and_fried_parameter_with_nones(quality_task, combined_v
246
249
  with path[0].open() as f:
247
250
  data = json.load(f)
248
251
  assert len(data["y_values"]) == sum(
249
- 1 for ao, r0 in combined_values if ao is True and r0 is not None
252
+ 1 for ao, r0, oob in combined_values if ao is True and r0 is not None
250
253
  )
251
254
  else:
252
255
  assert len(path) == 0
@@ -263,7 +266,8 @@ def test_build_ao_status(quality_task, plot_data):
263
266
  datetimes = datetimes_a + datetimes_b
264
267
  fried_values = values_a + values_b
265
268
  ao_values = [False, True, True, True]
266
- combined_values = [[ao, r0] for ao, r0 in zip(ao_values, fried_values)]
269
+ oob_values = [25, 50, None, 50]
270
+ combined_values = [[ao, r0, oob] for ao, r0, oob in zip(ao_values, fried_values, oob_values)]
267
271
  task.quality_store_ao_status_and_fried_parameter(datetimes=datetimes, values=combined_values)
268
272
  metric = task.quality_build_ao_status()
269
273
  assert metric["name"] == "Adaptive Optics Status"
@@ -284,9 +288,15 @@ def test_build_fried_parameter(quality_task, plot_data):
284
288
  task = quality_task
285
289
  datetimes_a, fried_values_a, datetimes_b, fried_values_b = plot_data
286
290
  ao_values_a = [True, True]
287
- combined_values_a = [[ao, r0] for ao, r0 in zip(ao_values_a, fried_values_a)]
291
+ oob_values_a = [25, 50]
292
+ combined_values_a = [
293
+ [ao, r0, oob] for ao, r0, oob in zip(ao_values_a, fried_values_a, oob_values_a)
294
+ ]
288
295
  ao_values_b = [True, True]
289
- combined_values_b = [[ao, r0] for ao, r0 in zip(ao_values_b, fried_values_b)]
296
+ oob_values_b = [25, 50]
297
+ combined_values_b = [
298
+ [ao, r0, oob] for ao, r0, oob in zip(ao_values_b, fried_values_b, oob_values_b)
299
+ ]
290
300
  task.quality_store_ao_status_and_fried_parameter(
291
301
  datetimes=datetimes_a, values=combined_values_a
292
302
  )
@@ -307,14 +317,14 @@ def test_build_fried_parameter(quality_task, plot_data):
307
317
  "2021-01-01T02:01:01",
308
318
  ]
309
319
  ]
310
- assert metric["plot_data"]["series_data"][""][1] == [1, 2, 3, 4]
320
+ assert metric["plot_data"]["series_data"][""][1] == [0.15, 0.25, 0.1, 0.2]
311
321
  assert metric["name"] == "Fried Parameter"
312
322
  assert metric["metric_code"] == "FRIED_PARAMETER"
313
323
  assert metric["facet"] is None
314
324
  assert metric["warnings"] is None
315
325
  assert (
316
326
  metric["statement"]
317
- == "Average valid Fried Parameter measurements for L1 dataset: 2.5 ± 1.12 m"
327
+ == "Average valid Fried Parameter measurements for L1 dataset: 0.18 ± 0.06 m"
318
328
  )
319
329
 
320
330
 
@@ -340,12 +350,12 @@ def test_build_light_level(quality_task, plot_data):
340
350
  "2021-01-01T02:01:01",
341
351
  ]
342
352
  ]
343
- assert metric["plot_data"]["series_data"][""][1] == [1, 2, 3, 4]
353
+ assert metric["plot_data"]["series_data"][""][1] == [0.15, 0.25, 0.1, 0.2]
344
354
  assert metric["name"] == "Light Level"
345
355
  assert metric["metric_code"] == "LIGHT_LEVEL"
346
356
  assert metric["facet"] is None
347
357
  assert metric["warnings"] is None
348
- assert metric["statement"] == f"Average Light Level for L1 dataset: 2.5 ± 1.12 adu"
358
+ assert metric["statement"] == f"Average Light Level for L1 dataset: 0.18 ± 0.06 adu"
349
359
 
350
360
 
351
361
  def test_build_frame_average(quality_task, plot_data):
@@ -461,7 +471,7 @@ def test_build_noise(quality_task, plot_data):
461
471
  "2021-01-01T02:01:01",
462
472
  ]
463
473
  ]
464
- assert metric["plot_data"]["series_data"]["I"][1] == [1, 2, 3, 4]
474
+ assert metric["plot_data"]["series_data"]["I"][1] == [0.15, 0.25, 0.1, 0.2]
465
475
  assert metric["name"] == "Noise Estimation"
466
476
  assert metric["metric_code"] == "NOISE"
467
477
  assert metric["facet"] is None
@@ -493,7 +503,7 @@ def test_build_sensitivity(quality_task, plot_data):
493
503
  "2021-01-01T02:01:01",
494
504
  ]
495
505
  ]
496
- assert metric["plot_data"]["series_data"]["I"][1] == [1, 2, 3, 4]
506
+ assert metric["plot_data"]["series_data"]["I"][1] == [0.15, 0.25, 0.1, 0.2]
497
507
  assert metric["name"] == f"Sensitivity"
498
508
  assert metric["metric_code"] == "SENSITIVITY"
499
509
  assert metric["facet"] is None
@@ -692,7 +702,7 @@ def test_build_report(quality_task, plot_data):
692
702
  task.quality_store_task_type_counts(task_type="dark", total_frames=100, frames_not_used=7)
693
703
  task.quality_store_task_type_counts(task_type="gain", total_frames=100, frames_not_used=0)
694
704
  task.quality_store_ao_status_and_fried_parameter(
695
- datetimes=datetimes, values=[[True, values[0]], [True, values[1]]]
705
+ datetimes=datetimes, values=[[True, values[0], values[0]], [True, values[1], values[1]]]
696
706
  )
697
707
  task.quality_store_light_level(datetimes=datetimes, values=values)
698
708
  task.quality_store_frame_average(
@@ -5,12 +5,10 @@ from pathlib import Path
5
5
  import pytest
6
6
 
7
7
  from dkist_processing_common._util.scratch import WorkflowFileSystem
8
- from dkist_processing_common.codecs.basemodel import basemodel_decoder
8
+ from dkist_processing_common.codecs.json import json_decoder
9
9
  from dkist_processing_common.models.graphql import InputDatasetRecipeRunResponse
10
- from dkist_processing_common.models.input_dataset import InputDatasetPartDocumentList
11
10
  from dkist_processing_common.models.tags import Tag
12
11
  from dkist_processing_common.tasks.transfer_input_data import TransferL0Data
13
- from dkist_processing_common.tests.conftest import create_input_frames
14
12
  from dkist_processing_common.tests.conftest import create_parameter_files
15
13
  from dkist_processing_common.tests.conftest import FakeGQLClient
16
14
 
@@ -20,7 +18,7 @@ class TransferL0DataTask(TransferL0Data):
20
18
  ...
21
19
 
22
20
 
23
- class FakeGQLClientMissingInputDatasetCalibrationPart(FakeGQLClient):
21
+ class FakeGQLClientMissingInputDatasetPart(FakeGQLClient):
24
22
  """Same metadata mocker with calibration input dataset part missing."""
25
23
 
26
24
  def execute_gql_query(self, **kwargs):
@@ -55,152 +53,103 @@ def transfer_l0_data_task(recipe_run_id, tmp_path, mocker):
55
53
 
56
54
 
57
55
  @pytest.fixture
58
- def transfer_l0_data_task_missing_calibration_part(recipe_run_id, tmp_path, mocker):
56
+ def transfer_l0_data_task_missing_part(recipe_run_id, tmp_path, mocker):
59
57
  yield from _transfer_l0_data_task_with_client(
60
- recipe_run_id, tmp_path, mocker, FakeGQLClientMissingInputDatasetCalibrationPart
58
+ recipe_run_id, tmp_path, mocker, FakeGQLClientMissingInputDatasetPart
61
59
  )
62
60
 
63
61
 
64
- @pytest.mark.parametrize(
65
- "expected_doc, tag",
66
- [
67
- pytest.param(
68
- FakeGQLClient.observe_frames_doc_object,
69
- Tag.input_dataset_observe_frames(),
70
- id="observe_frames",
71
- ),
72
- pytest.param(
73
- FakeGQLClient.calibration_frames_doc_object,
74
- Tag.input_dataset_calibration_frames(),
75
- id="calibration_frames",
76
- ),
77
- pytest.param(
78
- FakeGQLClient.parameters_doc_object,
79
- Tag.input_dataset_parameters(),
80
- id="parameters",
81
- ),
82
- ],
83
- )
84
- def test_download_dataset(transfer_l0_data_task, expected_doc, tag):
62
+ def test_download_dataset(transfer_l0_data_task):
85
63
  """
86
64
  :Given: a TransferL0Data task with a valid input dataset
87
65
  :When: downloading the dataset documents from the metadata store
88
- :Then: the correct documents are written to disk, along with tags for file parameters
66
+ :Then: the correct documents are written to disk
89
67
  """
90
68
  # Given
91
69
  task = transfer_l0_data_task
92
70
  # When
93
71
  task.download_input_dataset()
94
72
  # Then
95
- doc_from_file = next(
96
- task.read(tags=tag, decoder=basemodel_decoder, model=InputDatasetPartDocumentList)
73
+ expected_observe_doc = FakeGQLClient.observe_frames_doc_object
74
+ observe_doc_from_file = next(
75
+ task.read(tags=Tag.input_dataset_observe_frames(), decoder=json_decoder)
76
+ )
77
+ assert observe_doc_from_file == expected_observe_doc
78
+ expected_calibration_doc = FakeGQLClient.calibration_frames_doc_object
79
+ calibration_doc_from_file = next(
80
+ task.read(tags=Tag.input_dataset_calibration_frames(), decoder=json_decoder)
81
+ )
82
+ assert calibration_doc_from_file == expected_calibration_doc
83
+ expected_parameters_doc = FakeGQLClient.parameters_doc_object
84
+ parameters_doc_from_file = next(
85
+ task.read(tags=Tag.input_dataset_parameters(), decoder=json_decoder)
97
86
  )
98
- doc_list_from_file = doc_from_file.model_dump()["doc_list"]
99
- if (
100
- tag == Tag.input_dataset_parameters()
101
- ): # parameter doc gets written with tags for file objects
102
- for item in expected_doc:
103
- for val in item["parameterValues"]:
104
- if "__file__" in val["parameterValue"]:
105
- file_dict = json.loads(val["parameterValue"])["__file__"]
106
- file_dict["tag"] = Tag.parameter(Path(file_dict["objectKey"]).name)
107
- val["parameterValue"] = json.dumps({"__file__": file_dict})
108
- assert doc_list_from_file == expected_doc
87
+ assert parameters_doc_from_file == expected_parameters_doc
109
88
 
110
89
 
111
- def test_download_dataset_missing_part(transfer_l0_data_task_missing_calibration_part):
90
+ def test_download_dataset_missing_part(transfer_l0_data_task_missing_part):
112
91
  """
113
92
  :Given: a TransferL0Data task with a valid input dataset without calibration frames
114
93
  :When: downloading the dataset documents from the metadata store
115
94
  :Then: the correct number of documents are written to disk
116
95
  """
117
96
  # Given
118
- task = transfer_l0_data_task_missing_calibration_part
97
+ task = transfer_l0_data_task_missing_part
119
98
  # When
120
99
  task.download_input_dataset()
121
100
  # Then
122
101
  observe_doc_from_file = next(
123
- task.read(
124
- tags=Tag.input_dataset_observe_frames(),
125
- decoder=basemodel_decoder,
126
- model=InputDatasetPartDocumentList,
127
- )
102
+ task.read(tags=Tag.input_dataset_observe_frames(), decoder=json_decoder)
128
103
  )
129
104
  parameters_doc_from_file = next(
130
- task.read(
131
- tags=Tag.input_dataset_parameters(),
132
- decoder=basemodel_decoder,
133
- model=InputDatasetPartDocumentList,
134
- )
105
+ task.read(tags=Tag.input_dataset_parameters(), decoder=json_decoder)
135
106
  )
136
107
  with pytest.raises(StopIteration):
137
108
  calibration_doc_from_file = next(
138
- task.read(
139
- tags=Tag.input_dataset_calibration_frames(),
140
- decoder=basemodel_decoder,
141
- model=InputDatasetPartDocumentList,
142
- )
109
+ task.read(tags=Tag.input_dataset_calibration_frames(), decoder=json_decoder)
143
110
  )
144
111
 
145
112
 
146
- @pytest.mark.parametrize(
147
- "task_name",
148
- [
149
- pytest.param(
150
- "transfer_l0_data_task",
151
- id="observe_and_calibration_frames",
152
- ),
153
- pytest.param(
154
- "transfer_l0_data_task_missing_calibration_part",
155
- id="calibration_frames_missing",
156
- ),
157
- ],
158
- )
159
- def test_build_frame_transfer_list_formatted(request, task_name):
113
+ def test_format_frame_transfer_items(transfer_l0_data_task):
160
114
  """
161
- :Given: a TransferL0Data task with downloaded input dataset docs
162
- :When: building a list of frames in the input dataset formatted for transfer
163
- :Then: the correct items are correctly loaded into GlobusTransferItem objects
115
+ :Given: a TransferL0Data task with a downloaded input dataset
116
+ :When: formatting frames in the input dataset for transfer
117
+ :Then: the items are correctly loaded into GlobusTransferItem objects
164
118
  """
165
119
  # Given
166
- task = request.getfixturevalue(task_name)
120
+ task = transfer_l0_data_task
167
121
  task.download_input_dataset()
168
122
  # When
169
- observe_transfer_objects = task.build_transfer_list(doc_tag=Tag.input_dataset_observe_frames())
170
- calibration_transfer_objects = task.build_transfer_list(
171
- doc_tag=Tag.input_dataset_calibration_frames()
172
- )
173
- transfer_objects = observe_transfer_objects + calibration_transfer_objects
174
- formatted_transfer_items = task.format_transfer_items(input_dataset_objects=transfer_objects)
123
+ transfer_items = task.format_frame_transfer_items()
175
124
  # Then
176
125
  source_filenames = []
177
126
  destination_filenames = []
178
- expected_frames = list(FakeGQLClient.observe_frames_doc_object)
179
- if "missing_calibration_part" not in task_name:
180
- expected_frames += FakeGQLClient.calibration_frames_doc_object
181
- for frame_set in expected_frames:
127
+ all_frames = (
128
+ FakeGQLClient.observe_frames_doc_object + FakeGQLClient.calibration_frames_doc_object
129
+ )
130
+ for frame_set in all_frames:
182
131
  for key in frame_set["object_keys"]:
183
132
  source_filenames.append(os.path.join("/", frame_set["bucket"], key))
184
133
  destination_filenames.append(Path(key).name)
185
- assert len(formatted_transfer_items) == len(source_filenames)
186
- for item in formatted_transfer_items:
134
+ assert len(transfer_items) == len(source_filenames)
135
+ for item in transfer_items:
187
136
  assert item.source_path.as_posix() in source_filenames
188
137
  assert item.destination_path.name in destination_filenames
189
138
  assert not item.recursive
190
139
 
191
140
 
192
- def test_build_parameter_file_transfer_items(transfer_l0_data_task):
141
+ def test_format_parameter_file_transfer_items(transfer_l0_data_task):
193
142
  """
194
- :Given: a TransferL0Data task with downloaded input dataset docs
195
- :When: building a list of parameter files formatted for transfer
196
- :Then: the correct items are correctly loaded into GlobusTransferItem objects
143
+ :Given: a TransferL0Data task with a downloaded input dataset
144
+ :When: formatting parameter files in the input dataset for transfer
145
+ :Then: the items are correctly loaded into GlobusTransferItem objects
197
146
  """
198
147
  # Given
199
148
  task = transfer_l0_data_task
200
149
  task.download_input_dataset()
150
+ create_parameter_files(task)
201
151
  # When
202
- transfer_objects = task.build_transfer_list(doc_tag=Tag.input_dataset_parameters())
203
- formatted_transfer_items = task.format_transfer_items(input_dataset_objects=transfer_objects)
152
+ transfer_items = task.format_parameter_transfer_items()
204
153
  # Then
205
154
  source_filenames = []
206
155
  destination_filenames = []
@@ -213,44 +162,9 @@ def test_build_parameter_file_transfer_items(transfer_l0_data_task):
213
162
  object_key = value_dict["__file__"]["objectKey"]
214
163
  source_filenames.append(os.path.join("/", bucket, object_key))
215
164
  destination_filenames.append(Path(object_key).name)
216
- assert len(formatted_transfer_items) == len(source_filenames)
217
- for transfer_item in formatted_transfer_items:
165
+ assert len(transfer_items) == len(source_filenames)
166
+ for transfer_item in transfer_items:
218
167
  assert transfer_item.source_path.as_posix() in source_filenames
219
168
  assert transfer_item.destination_path.name in destination_filenames
220
169
  assert str(transfer_item.destination_path).startswith(str(task.scratch.workflow_base_path))
221
170
  assert not transfer_item.recursive
222
-
223
-
224
- def test_tag_transfer_items(transfer_l0_data_task):
225
- """
226
- :Given: a TransferL0Data task with downloaded input dataset frames and parameter files
227
- :When: tagging the downloaded files
228
- :Then: the downloaded items are correctly tagged
229
- """
230
- # Given
231
- task = transfer_l0_data_task
232
- task.download_input_dataset()
233
- observe_transfer_objects = task.build_transfer_list(doc_tag=Tag.input_dataset_observe_frames())
234
- calibration_transfer_objects = task.build_transfer_list(
235
- doc_tag=Tag.input_dataset_calibration_frames()
236
- )
237
- frame_transfer_objects = observe_transfer_objects + calibration_transfer_objects
238
- create_input_frames(task)
239
- parameter_transfer_objects = task.build_transfer_list(doc_tag=Tag.input_dataset_parameters())
240
- create_parameter_files(task)
241
- # When
242
- transfer_objects = frame_transfer_objects + parameter_transfer_objects
243
- task.tag_transfer_objects(input_dataset_objects=transfer_objects)
244
- # Then
245
- input_tags = [Tag.input(), Tag.frame()]
246
- input_frames_on_disk = list(task.scratch.find_all(tags=input_tags))
247
- for obj in frame_transfer_objects:
248
- destination_path = task.scratch.absolute_path(obj.object_key)
249
- assert destination_path in input_frames_on_disk
250
- assert len(input_frames_on_disk) == len(frame_transfer_objects)
251
- for obj in parameter_transfer_objects:
252
- destination_path = task.scratch.absolute_path(obj.object_key)
253
- param_tag = Tag.parameter(Path(obj.object_key))
254
- param_file_on_disk = list(task.scratch.find_all(tags=param_tag))
255
- assert destination_path in param_file_on_disk
256
- assert len(param_file_on_disk) == 1
@@ -25,7 +25,7 @@ from dkist_processing_common.tasks.write_l1 import WriteL1Frame
25
25
  from dkist_processing_common.tests.conftest import FakeGQLClient
26
26
  from dkist_processing_common.tests.conftest import TILE_SIZE
27
27
  from dkist_processing_common.tests.test_transfer_input_data import (
28
- FakeGQLClientMissingInputDatasetCalibrationPart,
28
+ FakeGQLClientMissingInputDatasetPart,
29
29
  )
30
30
 
31
31
 
@@ -101,6 +101,12 @@ class CompleteWriteL1Frame(WriteL1Frame):
101
101
  return WavelengthRange(min=1075.0 * u.nm, max=1085.0 * u.nm)
102
102
 
103
103
 
104
+ class CompleteWriteL1FrameWithEmptyWaveband(CompleteWriteL1Frame):
105
+ def get_wavelength_range(self, header: fits.Header) -> WavelengthRange:
106
+ # Return an empty range to test the empty waveband case
107
+ return WavelengthRange(min=10000.0 * u.nm, max=10050.0 * u.nm)
108
+
109
+
104
110
  @dataclass
105
111
  class FakeConstantDb:
106
112
  INSTRUMENT: str = "TEST"
@@ -157,9 +163,70 @@ def write_l1_task(request, recipe_run_id, tmp_path):
157
163
  @pytest.fixture(
158
164
  scope="function",
159
165
  params=[
160
- pytest.param({"AO_LOCK": True}, id="AO_LOCK_True"),
161
- pytest.param({"AO_LOCK": False}, id="AO_LOCK_False"),
162
- pytest.param({}, id="AO_LOCK_missing"),
166
+ pytest.param((1, "complete_common_header"), id="Intensity"),
167
+ pytest.param((4, "complete_polarimetric_header"), id="Polarimetric"),
168
+ ],
169
+ )
170
+ def write_l1_task_with_empty_waveband(recipe_run_id, tmp_path, request):
171
+ with CompleteWriteL1FrameWithEmptyWaveband(
172
+ recipe_run_id=recipe_run_id,
173
+ workflow_name="workflow_name",
174
+ workflow_version="workflow_version",
175
+ ) as task:
176
+ task.scratch = WorkflowFileSystem(recipe_run_id=recipe_run_id, scratch_base_path=tmp_path)
177
+ num_of_stokes_params, header_fixture_name = request.param
178
+ header = request.getfixturevalue(header_fixture_name)
179
+ stokes_params = ["I", "Q", "U", "V"]
180
+ used_stokes_params = []
181
+ hdu = fits.PrimaryHDU(data=np.random.random(size=(1, 128, 128)) * 10, header=header)
182
+ hdu.header["IPTASK"] = "level0_only key to be removed"
183
+ hdul = fits.HDUList([hdu])
184
+ for i in range(num_of_stokes_params):
185
+ task.write(
186
+ data=hdul,
187
+ tags=[
188
+ Tag.calibrated(),
189
+ Tag.frame(),
190
+ Tag.stokes(stokes_params[i]),
191
+ Tag.dsps_repeat(i),
192
+ ],
193
+ encoder=fits_hdulist_encoder,
194
+ )
195
+ used_stokes_params.append(stokes_params[i])
196
+ task.constants._update(asdict(FakeConstantDb()))
197
+ yield task, used_stokes_params, header
198
+ task._purge()
199
+
200
+
201
+ @pytest.fixture(
202
+ scope="function",
203
+ params=[
204
+ pytest.param(
205
+ {"AO_LOCK": True, "ATMOS_R0": 0.2, "OOBSHIFT": 17}, id="AO_LOCK_True_good_R0_good_oob"
206
+ ),
207
+ pytest.param(
208
+ {"AO_LOCK": True, "ATMOS_R0": 1, "OOBSHIFT": 17}, id="AO_LOCK_True_bad_R0_good_oob"
209
+ ),
210
+ pytest.param(
211
+ {"AO_LOCK": False, "ATMOS_R0": 0.2, "OOBSHIFT": 17}, id="AO_LOCK_False_good_R0_good_oob"
212
+ ),
213
+ pytest.param(
214
+ {"AO_LOCK": False, "ATMOS_R0": 1, "OOBSHIFT": 17}, id="AO_LOCK_False_bad_R0_good_oob"
215
+ ),
216
+ pytest.param(
217
+ {"AO_LOCK": True, "ATMOS_R0": 0.2, "OOBSHIFT": 150}, id="AO_LOCK_True_good_R0_bad_oob"
218
+ ),
219
+ pytest.param(
220
+ {"AO_LOCK": True, "ATMOS_R0": 1, "OOBSHIFT": 150}, id="AO_LOCK_True_bad_R0_bad_oob"
221
+ ),
222
+ pytest.param(
223
+ {"AO_LOCK": False, "ATMOS_R0": 0.2, "OOBSHIFT": 150}, id="AO_LOCK_False_good_R0_bad_oob"
224
+ ),
225
+ pytest.param(
226
+ {"AO_LOCK": False, "ATMOS_R0": 1, "OOBSHIFT": 150}, id="AO_LOCK_False_bad_R0_bad_oob"
227
+ ),
228
+ pytest.param({"ATMOS_R0": 0.2, "OOBSHIFT": 17}, id="AO_LOCK_missing"),
229
+ pytest.param({"ATMOS_R0": 0.2, "AO_LOCK": True}, id="OOBSHIFT_missing"),
163
230
  ],
164
231
  )
165
232
  def write_l1_task_no_data(request, recipe_run_id, tmp_path, complete_common_header):
@@ -172,10 +239,10 @@ def write_l1_task_no_data(request, recipe_run_id, tmp_path, complete_common_head
172
239
  ):
173
240
  task.scratch = WorkflowFileSystem(recipe_run_id=recipe_run_id, scratch_base_path=tmp_path)
174
241
  header = complete_common_header
175
- header.pop("AO_LOCK", None) # If it's not required, shouldn't be here in the first place??
242
+ header.pop("AO_LOCK", None)
243
+ header.pop("ATMOS_R0", None)
244
+ header.pop("OOBSHIFT", None)
176
245
  header.update(request.param)
177
- fried_parameter = 0.2
178
- header["ATMOS_R0"] = fried_parameter
179
246
  hdu = fits.PrimaryHDU(data=np.random.random(size=(1, 1, 1)) * 1, header=header)
180
247
  hdul = fits.HDUList([hdu])
181
248
  task.write(
@@ -187,7 +254,9 @@ def write_l1_task_no_data(request, recipe_run_id, tmp_path, complete_common_head
187
254
  encoder=fits_hdulist_encoder,
188
255
  )
189
256
  task.constants._update(asdict(FakeConstantDb()))
190
- yield task, header, fried_parameter
257
+ fried_parameter = request.param["ATMOS_R0"]
258
+ oob_shift = request.param.get("OOBSHIFT")
259
+ yield task, header, fried_parameter, oob_shift
191
260
  task._purge()
192
261
 
193
262
 
@@ -503,7 +572,7 @@ def test_missing_input_dataset_part(write_l1_task, mocker):
503
572
  """
504
573
  mocker.patch(
505
574
  "dkist_processing_common.tasks.mixin.metadata_store.GraphQLClient",
506
- new=FakeGQLClientMissingInputDatasetCalibrationPart,
575
+ new=FakeGQLClientMissingInputDatasetPart,
507
576
  )
508
577
  task, _, _ = write_l1_task
509
578
  task()
@@ -606,7 +675,7 @@ def test_check_r0_ao_lock(write_l1_task_no_data):
606
675
  :When: writing, check if the AO lock is on
607
676
  :Then: write the r0 value if AO lock on, don't write if AO lock off
608
677
  """
609
- task, header, r0 = write_l1_task_no_data
678
+ task, header, r0, _ = write_l1_task_no_data
610
679
  header_after_check = task.remove_invalid_r0_values(header=header)
611
680
  if header.get("AO_LOCK"):
612
681
  assert header_after_check["ATMOS_R0"] == header["ATMOS_R0"]
@@ -616,3 +685,67 @@ def test_check_r0_ao_lock(write_l1_task_no_data):
616
685
  with pytest.raises(KeyError, match="Keyword 'ATMOS_R0' not found"):
617
686
  invalid_r0 = header_after_check["ATMOS_R0"]
618
687
  assert header.get("AO_LOCK") != True
688
+
689
+
690
+ @pytest.mark.parametrize(
691
+ "wavelength, wavemin, wavemax, expected",
692
+ [
693
+ pytest.param(
694
+ 617,
695
+ 615,
696
+ 619,
697
+ "Fe I (617.33 nm)",
698
+ id="line_is_between_wavemin_and_wavemax_and_exists",
699
+ ),
700
+ pytest.param(
701
+ 700,
702
+ 698,
703
+ 702,
704
+ None,
705
+ id="line_is_between_wavemin_and_wavemax_and_does_not_exist",
706
+ ),
707
+ pytest.param(
708
+ 617,
709
+ 698,
710
+ 702,
711
+ None,
712
+ id="line_is_not_between_wavemin_and_wavemax_and_exists",
713
+ ),
714
+ ],
715
+ )
716
+ def test_get_waveband(write_l1_task, wavelength, wavemin, wavemax, expected):
717
+ """
718
+ :Given: an input wavelength contribution
719
+ :When: determining the waveband
720
+ :Then: the correct waveband is returned
721
+ """
722
+ wavelength_range = WavelengthRange(min=wavemin * u.nm, max=wavemax * u.nm)
723
+ task, _, _ = write_l1_task
724
+ waveband = task.get_waveband(wavelength=wavelength * u.nm, wavelength_range=wavelength_range)
725
+ assert waveband == expected
726
+
727
+
728
+ def test_empty_waveband(write_l1_task_with_empty_waveband, mocker):
729
+ """
730
+ :Given: a header converted to SPEC 214 L1 and a wavelength range that has no listed spectral lines
731
+ :When: checking the waveband key
732
+ :Then: it does not exist
733
+ """
734
+ mocker.patch(
735
+ "dkist_processing_common.tasks.mixin.metadata_store.GraphQLClient", new=FakeGQLClient
736
+ )
737
+ mocker.patch(
738
+ "dkist_processing_common.tasks.write_l1.WriteL1Frame.version_from_module_name",
739
+ new_callable=Mock,
740
+ return_value="fake_version_number",
741
+ )
742
+
743
+ task, _, _ = write_l1_task_with_empty_waveband
744
+ task()
745
+ files = list(task.read(tags=[Tag.frame(), Tag.output()]))
746
+ for file in files:
747
+ header = fits.open(file)[1].header
748
+ assert header["WAVEMIN"] == 10000
749
+ assert header["WAVEMAX"] == 10050
750
+ with pytest.raises(KeyError):
751
+ header["WAVEBAND"]
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: dkist-processing-common
3
- Version: 10.8.1rc1
3
+ Version: 10.8.3
4
4
  Summary: Common task classes used by the DKIST science data processing pipelines
5
5
  Author-email: NSO / AURA <dkistdc@nso.edu>
6
6
  License: BSD-3-Clause
@@ -17,7 +17,7 @@ Requires-Dist: asdf<4.0.0,>=3.5.0
17
17
  Requires-Dist: astropy>=7.0.0
18
18
  Requires-Dist: dkist-fits-specifications<5.0,>=4.0.0
19
19
  Requires-Dist: dkist-header-validator<6.0,>=5.0.0
20
- Requires-Dist: dkist-processing-core==5.1.0
20
+ Requires-Dist: dkist-processing-core==5.1.1
21
21
  Requires-Dist: dkist-processing-pac<4.0,>=3.1
22
22
  Requires-Dist: dkist-service-configuration<3.0,>=2.0.2
23
23
  Requires-Dist: dkist-spectral-lines<4.0,>=3.0.0