dkist-processing-common 12.0.0rc5__py3-none-any.whl → 12.2.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (35) hide show
  1. dkist_processing_common/codecs/fits.py +27 -6
  2. dkist_processing_common/models/constants.py +16 -10
  3. dkist_processing_common/models/extras.py +35 -0
  4. dkist_processing_common/models/flower_pot.py +230 -9
  5. dkist_processing_common/models/tags.py +13 -0
  6. dkist_processing_common/parsers/average_bud.py +0 -2
  7. dkist_processing_common/parsers/cs_step.py +10 -10
  8. dkist_processing_common/parsers/id_bud.py +8 -10
  9. dkist_processing_common/parsers/lookup_bud.py +7 -11
  10. dkist_processing_common/parsers/near_bud.py +7 -12
  11. dkist_processing_common/parsers/retarder.py +9 -13
  12. dkist_processing_common/parsers/time.py +19 -55
  13. dkist_processing_common/parsers/unique_bud.py +7 -14
  14. dkist_processing_common/tasks/l1_output_data.py +23 -14
  15. dkist_processing_common/tasks/output_data_base.py +25 -4
  16. dkist_processing_common/tasks/parse_l0_input_data.py +4 -2
  17. dkist_processing_common/tasks/transfer_input_data.py +1 -0
  18. dkist_processing_common/tasks/write_extra.py +333 -0
  19. dkist_processing_common/tasks/write_l1.py +2 -55
  20. dkist_processing_common/tasks/write_l1_base.py +67 -0
  21. dkist_processing_common/tests/test_codecs.py +57 -11
  22. dkist_processing_common/tests/test_construct_dataset_extras.py +224 -0
  23. dkist_processing_common/tests/test_flower_pot.py +147 -5
  24. dkist_processing_common/tests/test_output_data_base.py +24 -2
  25. dkist_processing_common/tests/test_parse_l0_input_data.py +28 -4
  26. dkist_processing_common/tests/test_stems.py +140 -193
  27. dkist_processing_common/tests/test_transfer_l1_output_data.py +1 -0
  28. dkist_processing_common/tests/test_trial_catalog.py +2 -0
  29. dkist_processing_common/tests/test_workflow_task_base.py +0 -11
  30. dkist_processing_common/tests/test_write_l1.py +0 -1
  31. {dkist_processing_common-12.0.0rc5.dist-info → dkist_processing_common-12.2.0.dist-info}/METADATA +4 -4
  32. {dkist_processing_common-12.0.0rc5.dist-info → dkist_processing_common-12.2.0.dist-info}/RECORD +34 -31
  33. {dkist_processing_common-12.0.0rc5.dist-info → dkist_processing_common-12.2.0.dist-info}/WHEEL +1 -1
  34. changelog/288.misc.rst +0 -1
  35. {dkist_processing_common-12.0.0rc5.dist-info → dkist_processing_common-12.2.0.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,333 @@
1
+ """Classes to support the generation of dataset extras."""
2
+
3
+ import uuid
4
+ from abc import ABC
5
+ from abc import abstractmethod
6
+ from datetime import datetime
7
+
8
+ import numpy as np
9
+ from astropy.io import fits
10
+ from astropy.time import Time
11
+ from dkist_fits_specifications.utils.formatter import reformat_dataset_extra_header
12
+ from dkist_header_validator.spec_validators import spec_extras_validator
13
+
14
+ from dkist_processing_common.codecs.fits import fits_hdulist_encoder
15
+ from dkist_processing_common.models.extras import DatasetExtraHeaderSection
16
+ from dkist_processing_common.models.extras import DatasetExtraType
17
+ from dkist_processing_common.models.tags import Tag
18
+ from dkist_processing_common.models.task_name import TaskName
19
+ from dkist_processing_common.tasks.write_l1_base import WriteL1Base
20
+
21
+
22
+ class WriteL1DatasetExtras(WriteL1Base, ABC):
23
+ """Class supporting the construction of dataset extras."""
24
+
25
+ def dataset_extra_headers(
26
+ self,
27
+ filename: str,
28
+ task_type: TaskName,
29
+ extra_name: DatasetExtraType,
30
+ end_time: str,
31
+ total_exposure: float | None = None,
32
+ readout_exposure: float | None = None,
33
+ ) -> dict:
34
+ """Provide common FITS header keys for dataset extras."""
35
+ # Build task specific header values
36
+ match task_type:
37
+ case TaskName.dark:
38
+ task_specific_observing_program_execution_id = (
39
+ self.constants.dark_observing_program_execution_ids
40
+ )
41
+ task_specific_date_begin = self.constants.dark_date_begin
42
+ task_specific_raw_frames_per_fpa = (
43
+ 0 # can be updated in construction of dataset extra if required
44
+ )
45
+ task_specific_telescope_tracking_mode = (
46
+ "None" # can be updated in construction of dataset extra if required
47
+ )
48
+ task_specific_coude_table_tracking_mode = (
49
+ "None" # can be updated in construction of dataset extra if required
50
+ )
51
+ task_specific_telescope_scanning_mode = (
52
+ "None" # can be updated in construction of dataset extra if required
53
+ )
54
+ task_specific_average_light_level = self.constants.dark_average_light_level
55
+ task_specific_average_telescope_elevation = (
56
+ self.constants.dark_average_telescope_elevation
57
+ )
58
+ task_specific_average_coude_table_angle = (
59
+ self.constants.dark_average_coude_table_angle
60
+ )
61
+ task_specific_average_telescope_azimuth = (
62
+ self.constants.dark_average_telescope_azimuth
63
+ )
64
+ task_specific_gos_level3_status = self.constants.dark_gos_level3_status
65
+ task_specific_gos_level3_lamp_status = self.constants.dark_gos_level3_lamp_status
66
+ task_specific_gos_polarizer_status = self.constants.dark_gos_polarizer_status
67
+ task_specific_gos_polarizer_angle = self.constants.dark_gos_polarizer_angle
68
+ task_specific_gos_retarder_status = self.constants.dark_gos_retarder_status
69
+ task_specific_gos_retarder_angle = self.constants.dark_gos_retarder_angle
70
+ task_specific_gos_level0_status = self.constants.dark_gos_level0_status
71
+ case TaskName.solar_gain:
72
+ task_specific_observing_program_execution_id = (
73
+ self.constants.solar_gain_observing_program_execution_ids
74
+ )
75
+
76
+ task_specific_date_begin = self.constants.solar_gain_date_begin
77
+ task_specific_raw_frames_per_fpa = self.constants.solar_gain_num_raw_frames_per_fpa
78
+ task_specific_telescope_tracking_mode = (
79
+ self.constants.solar_gain_telescope_tracking_mode
80
+ )
81
+ task_specific_coude_table_tracking_mode = (
82
+ self.constants.solar_gain_coude_table_tracking_mode
83
+ )
84
+ task_specific_telescope_scanning_mode = (
85
+ self.constants.solar_gain_telescope_scanning_mode
86
+ )
87
+ task_specific_average_light_level = self.constants.solar_gain_average_light_level
88
+ task_specific_average_telescope_elevation = (
89
+ self.constants.solar_gain_average_telescope_elevation
90
+ )
91
+ task_specific_average_coude_table_angle = (
92
+ self.constants.solar_gain_average_coude_table_angle
93
+ )
94
+ task_specific_average_telescope_azimuth = (
95
+ self.constants.solar_gain_average_telescope_azimuth
96
+ )
97
+ task_specific_gos_level3_status = self.constants.solar_gain_gos_level3_status
98
+ task_specific_gos_level3_lamp_status = (
99
+ self.constants.solar_gain_gos_level3_lamp_status
100
+ )
101
+ task_specific_gos_polarizer_status = self.constants.solar_gain_gos_polarizer_status
102
+ task_specific_gos_polarizer_angle = self.constants.solar_gain_gos_polarizer_angle
103
+ task_specific_gos_retarder_status = self.constants.solar_gain_gos_retarder_status
104
+ task_specific_gos_retarder_angle = self.constants.solar_gain_gos_retarder_angle
105
+ task_specific_gos_level0_status = self.constants.solar_gain_gos_level0_status
106
+ case TaskName.polcal:
107
+ task_specific_observing_program_execution_id = (
108
+ self.constants.polcal_observing_program_execution_ids
109
+ )
110
+
111
+ task_specific_date_begin = self.constants.polcal_date_begin
112
+ task_specific_raw_frames_per_fpa = self.constants.polcal_num_raw_frames_per_fpa
113
+ task_specific_telescope_tracking_mode = (
114
+ self.constants.polcal_telescope_tracking_mode
115
+ )
116
+ task_specific_coude_table_tracking_mode = (
117
+ self.constants.polcal_coude_table_tracking_mode
118
+ )
119
+ task_specific_telescope_scanning_mode = (
120
+ self.constants.polcal_telescope_scanning_mode
121
+ )
122
+ task_specific_average_light_level = self.constants.polcal_average_light_level
123
+ task_specific_average_telescope_elevation = (
124
+ self.constants.polcal_average_telescope_elevation
125
+ )
126
+ task_specific_average_coude_table_angle = (
127
+ self.constants.polcal_average_coude_table_angle
128
+ )
129
+ task_specific_average_telescope_azimuth = (
130
+ self.constants.polcal_average_telescope_azimuth
131
+ )
132
+ task_specific_gos_level3_status = None
133
+ task_specific_gos_level3_lamp_status = None
134
+ task_specific_gos_polarizer_status = None
135
+ task_specific_gos_polarizer_angle = None
136
+ task_specific_gos_retarder_status = None
137
+ task_specific_gos_retarder_angle = None
138
+ task_specific_gos_level0_status = None
139
+ case _:
140
+ raise ValueError(f"Unsupported task type {task_type}")
141
+
142
+ start_datetime = datetime.fromisoformat(task_specific_date_begin)
143
+ end_datetime = datetime.fromisoformat(end_time)
144
+
145
+ dataset_extra_header = {
146
+ DatasetExtraHeaderSection.common: {
147
+ "BUNIT": "count",
148
+ "DATE": Time.now().fits,
149
+ "DATE-BEG": task_specific_date_begin,
150
+ "DATE-END": end_time,
151
+ "TELAPSE": (end_datetime - start_datetime).total_seconds(),
152
+ "DATE-AVG": (start_datetime + (end_datetime - start_datetime) / 2).isoformat(),
153
+ "TIMESYS": "UTC",
154
+ "ORIGIN": "National Solar Observatory",
155
+ "TELESCOP": "Daniel K. Inouye Solar Telescope",
156
+ "OBSRVTRY": "Haleakala High Altitude Observatory Site",
157
+ "NETWORK": "NSF-DKIST",
158
+ "INSTRUME": self.constants.instrument,
159
+ "OBJECT": "unknown",
160
+ "CAM_ID": self.constants.camera_id,
161
+ "CAMERA": self.constants.camera_name,
162
+ "BITDEPTH": self.constants.camera_bit_depth,
163
+ "XPOSURE": total_exposure,
164
+ "TEXPOSUR": readout_exposure,
165
+ "HWBIN1": self.constants.hardware_binning_x,
166
+ "HWBIN2": self.constants.hardware_binning_y,
167
+ "SWBIN1": self.constants.software_binning_x,
168
+ "SWBIN2": self.constants.software_binning_y,
169
+ "NSUMEXP": task_specific_raw_frames_per_fpa,
170
+ "DSETID": self.constants.dataset_id,
171
+ "PROCTYPE": "L1_EXTRA",
172
+ "RRUNID": self.recipe_run_id,
173
+ "RECIPEID": self.metadata_store_recipe_run.recipeInstance.recipeId,
174
+ "RINSTID": self.metadata_store_recipe_run.recipeInstanceId,
175
+ "FILENAME": filename,
176
+ "HEAD_URL": "",
177
+ "INFO_URL": self.docs_base_url,
178
+ "CAL_URL": "",
179
+ "CALVERS": self.version_from_module_name(),
180
+ "IDSPARID": (
181
+ parameters.inputDatasetPartId
182
+ if (parameters := self.metadata_store_input_dataset_parameters)
183
+ else None
184
+ ),
185
+ "IDSOBSID": (
186
+ observe_frames.inputDatasetPartId
187
+ if (observe_frames := self.metadata_store_input_dataset_observe_frames)
188
+ else None
189
+ ),
190
+ "IDSCALID": (
191
+ calibration_frames.inputDatasetPartId
192
+ if (calibration_frames := self.metadata_store_input_dataset_calibration_frames)
193
+ else None
194
+ ),
195
+ "WKFLVERS": self.workflow_version,
196
+ "WKFLNAME": self.workflow_name,
197
+ "MANPROCD": self.workflow_had_manual_intervention,
198
+ "FILE_ID": uuid.uuid4().hex,
199
+ "OBSPR_ID": task_specific_observing_program_execution_id[
200
+ 0
201
+ ], # The OP IDs are stored sorted by number of appearances of each OP ID in the source task type frames
202
+ "EXTOBSID": ",".join(task_specific_observing_program_execution_id[1:]),
203
+ "EXPER_ID": self.constants.experiment_id,
204
+ "PROP_ID": self.constants.proposal_id,
205
+ "HLSVERS": self.constants.hls_version,
206
+ "LINEWAV": self.constants.wavelength,
207
+ "TELTRACK": (
208
+ task_specific_telescope_tracking_mode if task_type != TaskName.dark else None
209
+ ),
210
+ "TTBLTRCK": (
211
+ task_specific_coude_table_tracking_mode if task_type != TaskName.dark else None
212
+ ),
213
+ "TELSCAN": (
214
+ task_specific_telescope_scanning_mode if task_type != TaskName.dark else None
215
+ ),
216
+ "EXTNAME": extra_name,
217
+ },
218
+ DatasetExtraHeaderSection.aggregate: {
219
+ "AVGLLVL": task_specific_average_light_level,
220
+ "ATELEVAT": task_specific_average_telescope_elevation,
221
+ "ATTBLANG": task_specific_average_coude_table_angle,
222
+ "ATAZIMUT": task_specific_average_telescope_azimuth,
223
+ },
224
+ DatasetExtraHeaderSection.iptask: {
225
+ "IPTASK": "GAIN" if "GAIN" in task_type else task_type,
226
+ },
227
+ DatasetExtraHeaderSection.gos: {
228
+ "LVL3STAT": task_specific_gos_level3_status,
229
+ "LAMPSTAT": task_specific_gos_level3_lamp_status,
230
+ "LVL2STAT": task_specific_gos_polarizer_status,
231
+ "POLANGLE": task_specific_gos_polarizer_angle,
232
+ "LVL1STAT": task_specific_gos_retarder_status,
233
+ "RETANGLE": task_specific_gos_retarder_angle,
234
+ "LVL0STAT": task_specific_gos_level0_status,
235
+ },
236
+ }
237
+
238
+ # Remove specific headers from dark frames as they don't constants to fill them
239
+ if task_type == TaskName.dark:
240
+ for key in ["TELTRACK", "TTBLTRCK", "TELSCAN"]:
241
+ del dataset_extra_header[DatasetExtraHeaderSection.common][key]
242
+
243
+ # Remove specific headers from polcal frames as they don't have constants to fill them
244
+ if task_type == TaskName.polcal:
245
+ for key in [
246
+ "LVL3STAT",
247
+ "LAMPSTAT",
248
+ "LVL2STAT",
249
+ "POLANGLE",
250
+ "LVL1STAT",
251
+ "RETANGLE",
252
+ "LVL0STAT",
253
+ ]:
254
+ del dataset_extra_header[DatasetExtraHeaderSection.gos][key]
255
+
256
+ return dataset_extra_header
257
+
258
+ def build_dataset_extra_header(
259
+ self,
260
+ sections: list[DatasetExtraHeaderSection],
261
+ filename: str,
262
+ task_type: TaskName,
263
+ extra_name: DatasetExtraType,
264
+ total_exposure: float | None = None,
265
+ readout_exposure: float | None = None,
266
+ end_time: str | None = None,
267
+ ) -> fits.Header:
268
+ """Build FITS header for dataset extra file."""
269
+ header = fits.Header()
270
+ all_section_headers = self.dataset_extra_headers(
271
+ filename=filename,
272
+ task_type=task_type,
273
+ total_exposure=total_exposure,
274
+ readout_exposure=readout_exposure,
275
+ extra_name=extra_name,
276
+ end_time=end_time,
277
+ )
278
+ for section in sections:
279
+ header.update(all_section_headers[section].items())
280
+ return header
281
+
282
+ def format_extra_filename(self, extra_name: DatasetExtraType | str, detail: str | None = None):
283
+ """Format the filename of dataset extras for consistency."""
284
+ base_filename = f"{self.constants.instrument}_{self.constants.dataset_id}_{extra_name.replace(' ', '-')}"
285
+ if detail:
286
+ base_filename += "_" + detail
287
+ filename_counter = str(self.filename_counter.increment(base_filename))
288
+ return f"{base_filename}_{filename_counter}.fits"
289
+
290
+ def assemble_and_write_dataset_extra(
291
+ self,
292
+ data: np.ndarray | list[np.ndarray],
293
+ header: fits.Header | list[fits.Header],
294
+ filename: str,
295
+ ):
296
+ """Given the data and header information, write the dataset extra."""
297
+ if isinstance(data, list) and isinstance(header, list):
298
+ if len(data) != len(header):
299
+ raise ValueError(
300
+ f"{len(data)} data arrays were provided with {len(header)} headers. These must be equal."
301
+ )
302
+ if isinstance(data, np.ndarray):
303
+ data = [data]
304
+ if isinstance(header, fits.Header):
305
+ header = [header]
306
+ hdus = [fits.PrimaryHDU()] # The first HDU in the list is an empty PrimaryHDU
307
+ for i, data_array in enumerate(data):
308
+ tile_size = self.compute_tile_size_for_array(data_array)
309
+ hdu = fits.CompImageHDU(header=header[i], data=data_array, tile_shape=tile_size)
310
+ formatted_header = reformat_dataset_extra_header(hdu.header)
311
+ hdu = fits.CompImageHDU(header=formatted_header, data=hdu.data, tile_shape=tile_size)
312
+ hdus.append(hdu)
313
+ self.write(
314
+ data=fits.HDUList(hdus),
315
+ tags=[Tag.extra(), Tag.output()],
316
+ encoder=fits_hdulist_encoder,
317
+ relative_path=filename,
318
+ )
319
+ self.update_framevol(relative_path=filename)
320
+
321
+ # Check that the written file passes spec 214 validation if requested
322
+ if self.validate_l1_on_write:
323
+ spec_extras_validator.validate(self.scratch.absolute_path(filename), extra=False)
324
+
325
+ @abstractmethod
326
+ def run(self) -> None:
327
+ """
328
+ For each dataset extra.
329
+
330
+ * Gather the source data in whatever manner is necessary
331
+ * Build a header using the `build_dataset_extra_header` method to help with header construction
332
+ * Write the dataset extra using `assemble_and_write_dataset_extra()`
333
+ """
@@ -1,11 +1,9 @@
1
1
  """Task(s) for writing level 1 data as 214 compliant fits files."""
2
2
 
3
- import importlib
4
3
  import logging
5
4
  import uuid
6
5
  from abc import ABC
7
6
  from abc import abstractmethod
8
- from functools import cached_property
9
7
  from pathlib import Path
10
8
  from string import ascii_uppercase
11
9
  from typing import Literal
@@ -34,7 +32,7 @@ from dkist_processing_common.models.fried_parameter import r0_valid
34
32
  from dkist_processing_common.models.tags import Tag
35
33
  from dkist_processing_common.models.wavelength import WavelengthRange
36
34
  from dkist_processing_common.parsers.l0_fits_access import L0FitsAccess
37
- from dkist_processing_common.tasks import WorkflowTaskBase
35
+ from dkist_processing_common.tasks.write_l1_base import WriteL1Base
38
36
 
39
37
  logger = logging.getLogger(__name__)
40
38
 
@@ -43,7 +41,7 @@ __all__ = ["WriteL1Frame"]
43
41
  from dkist_processing_common.tasks.mixin.metadata_store import MetadataStoreMixin
44
42
 
45
43
 
46
- class WriteL1Frame(WorkflowTaskBase, MetadataStoreMixin, ABC):
44
+ class WriteL1Frame(WriteL1Base, ABC):
47
45
  """
48
46
  Task to convert final calibrated science frames into spec 214 compliant level 1 frames.
49
47
 
@@ -108,45 +106,6 @@ class WriteL1Frame(WorkflowTaskBase, MetadataStoreMixin, ABC):
108
106
  self.scratch.absolute_path(relative_path), extra=False
109
107
  )
110
108
 
111
- @cached_property
112
- def tile_size_param(self) -> int | None:
113
- """Get the tile size parameter for compression."""
114
- return self.metadata_store_recipe_run.configuration.tile_size
115
-
116
- @cached_property
117
- def validate_l1_on_write(self) -> bool:
118
- """Check for validate on write."""
119
- return self.metadata_store_recipe_run.configuration.validate_l1_on_write
120
-
121
- @cached_property
122
- def workflow_had_manual_intervention(self):
123
- """Indicate determining if any provenance capturing steps had manual intervention."""
124
- for provenance_record in self.metadata_store_recipe_run.recipeRunProvenances:
125
- if provenance_record.isTaskManual:
126
- return True
127
- return False
128
-
129
- def compute_tile_size_for_array(self, data: np.ndarray) -> list | None:
130
- """Determine the tile size to use for compression accounting for array shape minimums."""
131
- if self.tile_size_param is None:
132
- return None
133
- tile_size = []
134
- for dim_size in data.shape:
135
- if dim_size < self.tile_size_param:
136
- tile_size.append(dim_size)
137
- else:
138
- tile_size.append(self.tile_size_param)
139
- return tile_size
140
-
141
- def update_framevol(self, relative_path: str) -> None:
142
- """Update FRAMEVOL key to be exactly the size of the file on-disk."""
143
- full_path = self.scratch.workflow_base_path / relative_path
144
- compressed_size = full_path.stat().st_size / 1024 / 1024
145
- hdul = fits.open(full_path, mode="update")
146
- hdul[1].header["FRAMEVOL"] = compressed_size
147
- hdul.flush()
148
- del hdul
149
-
150
109
  def replace_header_values(self, header: fits.Header, data: np.ndarray) -> fits.Header:
151
110
  """Replace header values that should already exist with new values."""
152
111
  header["FILE_ID"] = uuid.uuid4().hex
@@ -430,18 +389,6 @@ class WriteL1Frame(WorkflowTaskBase, MetadataStoreMixin, ABC):
430
389
  )
431
390
  return header
432
391
 
433
- def version_from_module_name(self) -> str:
434
- """
435
- Get the value of __version__ from a module given its name.
436
-
437
- Returns
438
- -------
439
- The value of __version__
440
- """
441
- package = self.__module__.split(".")[0]
442
- module = importlib.import_module(package)
443
- return module.__version__
444
-
445
392
  @abstractmethod
446
393
  def add_dataset_headers(
447
394
  self, header: fits.Header, stokes: Literal["I", "Q", "U", "V"]
@@ -0,0 +1,67 @@
1
+ """Base class for writing L1 FITS products with headers."""
2
+
3
+ import importlib
4
+ from abc import ABC
5
+ from functools import cached_property
6
+
7
+ import numpy as np
8
+ from astropy.io import fits
9
+
10
+ from dkist_processing_common.tasks import WorkflowTaskBase
11
+ from dkist_processing_common.tasks.mixin.metadata_store import MetadataStoreMixin
12
+
13
+
14
+ class WriteL1Base(WorkflowTaskBase, MetadataStoreMixin, ABC):
15
+ """Base class for writing L1 FITS products with headers."""
16
+
17
+ def version_from_module_name(self) -> str:
18
+ """
19
+ Get the value of __version__ from a module given its name.
20
+
21
+ Returns
22
+ -------
23
+ The value of __version__
24
+ """
25
+ package = self.__module__.split(".")[0]
26
+ module = importlib.import_module(package)
27
+ return module.__version__
28
+
29
+ @cached_property
30
+ def workflow_had_manual_intervention(self) -> bool:
31
+ """Indicate determining if any provenance capturing steps had manual intervention."""
32
+ for provenance_record in self.metadata_store_recipe_run.recipeRunProvenances:
33
+ if provenance_record.isTaskManual:
34
+ return True
35
+ return False
36
+
37
+ def update_framevol(self, relative_path: str) -> None:
38
+ """Update FRAMEVOL key to be exactly the size of the file on-disk."""
39
+ full_path = self.scratch.workflow_base_path / relative_path
40
+ compressed_size = full_path.stat().st_size / 1024 / 1024
41
+ hdul = fits.open(full_path, mode="update")
42
+ for i in range(1, len(hdul)):
43
+ hdul[i].header["FRAMEVOL"] = compressed_size
44
+ hdul.flush()
45
+ del hdul
46
+
47
+ @cached_property
48
+ def tile_size_param(self) -> int | None:
49
+ """Get the tile size parameter for compression."""
50
+ return self.metadata_store_recipe_run.configuration.tile_size
51
+
52
+ def compute_tile_size_for_array(self, data: np.ndarray) -> list | None:
53
+ """Determine the tile size to use for compression accounting for array shape minimums."""
54
+ if self.tile_size_param is None:
55
+ return None
56
+ tile_size = []
57
+ for dim_size in data.shape:
58
+ if dim_size < self.tile_size_param:
59
+ tile_size.append(dim_size)
60
+ else:
61
+ tile_size.append(self.tile_size_param)
62
+ return tile_size
63
+
64
+ @cached_property
65
+ def validate_l1_on_write(self) -> bool:
66
+ """Check for validate on write."""
67
+ return self.metadata_store_recipe_run.configuration.validate_l1_on_write
@@ -183,7 +183,7 @@ def primary_hdu_list(ndarray_object, fits_header) -> HDUList:
183
183
 
184
184
  @pytest.fixture
185
185
  def path_to_primary_fits_file(primary_hdu_list, tmp_file) -> Path:
186
- primary_hdu_list.writeto(tmp_file)
186
+ primary_hdu_list.writeto(tmp_file, checksum=True)
187
187
  return tmp_file
188
188
 
189
189
 
@@ -210,7 +210,7 @@ def compressed_hdu_list(ndarray_object, fits_header) -> HDUList:
210
210
 
211
211
  @pytest.fixture
212
212
  def path_to_compressed_fits_file(compressed_hdu_list, tmp_file) -> Path:
213
- compressed_hdu_list.writeto(tmp_file)
213
+ compressed_hdu_list.writeto(tmp_file, checksum=True)
214
214
  return tmp_file
215
215
 
216
216
 
@@ -472,16 +472,27 @@ def test_bytesio_decoder(bytesIO_object, path_to_bytesIO):
472
472
  pytest.param("path_to_compressed_fits_file", id="compressed"),
473
473
  ],
474
474
  )
475
- def test_fits_hdu_decoder(path_fixture_name, ndarray_object, fits_header, request):
475
+ @pytest.mark.parametrize(
476
+ "checksum", [pytest.param(True, id="checksum"), pytest.param(False, id="no_checksum")]
477
+ )
478
+ @pytest.mark.parametrize(
479
+ "decompress", [pytest.param(True, id="decompress"), pytest.param(False, id="no_decompress")]
480
+ )
481
+ def test_fits_hdu_decoder(
482
+ path_fixture_name, ndarray_object, fits_header, request, checksum, decompress
483
+ ):
476
484
  """
477
485
  Given: Path to a FITS file
478
486
  When: Decoding the path with the fits_hdu_decoder
479
487
  Then: The correct data are returned
480
488
  """
481
489
  file_path = request.getfixturevalue(path_fixture_name)
482
- hdu = fits_hdu_decoder(file_path)
490
+ hdu = fits_hdu_decoder(file_path, checksum=checksum, disable_image_compression=not decompress)
483
491
 
484
- assert np.array_equal(hdu.data, ndarray_object)
492
+ if "compressed" in path_fixture_name and not decompress:
493
+ assert not np.array_equal(hdu.data, ndarray_object)
494
+ else:
495
+ assert np.array_equal(hdu.data, ndarray_object)
485
496
  assert hdu.header["foo"] == fits_header["foo"]
486
497
 
487
498
 
@@ -492,7 +503,15 @@ def test_fits_hdu_decoder(path_fixture_name, ndarray_object, fits_header, reques
492
503
  pytest.param("path_to_compressed_fits_file", id="compressed"),
493
504
  ],
494
505
  )
495
- def test_fits_access_decoder(path_fixture_name, ndarray_object, fits_header, request):
506
+ @pytest.mark.parametrize(
507
+ "checksum", [pytest.param(True, id="checksum"), pytest.param(False, id="no_checksum")]
508
+ )
509
+ @pytest.mark.parametrize(
510
+ "decompress", [pytest.param(True, id="decompress"), pytest.param(False, id="no_decompress")]
511
+ )
512
+ def test_fits_access_decoder(
513
+ path_fixture_name, ndarray_object, fits_header, request, checksum, decompress
514
+ ):
496
515
  """
497
516
  Given: Path to a FITS file
498
517
  When: Decoding the path with the fits_access_decoder
@@ -500,20 +519,47 @@ def test_fits_access_decoder(path_fixture_name, ndarray_object, fits_header, req
500
519
  """
501
520
  file_path = request.getfixturevalue(path_fixture_name)
502
521
 
503
- fits_obj = fits_access_decoder(file_path, fits_access_class=DummyFitsAccess)
522
+ fits_obj = fits_access_decoder(
523
+ file_path,
524
+ fits_access_class=DummyFitsAccess,
525
+ checksum=checksum,
526
+ disable_image_compression=not decompress,
527
+ )
504
528
  assert fits_obj.name == str(file_path)
505
- assert np.array_equal(fits_obj.data, ndarray_object)
506
529
  assert fits_obj.foo == fits_header["foo"]
530
+ if "compressed" in path_fixture_name and not decompress:
531
+ assert not np.array_equal(fits_obj.data, ndarray_object)
532
+ else:
533
+ assert np.array_equal(fits_obj.data, ndarray_object)
507
534
 
508
535
 
509
- def test_fits_array_decoder(path_to_primary_fits_file, ndarray_object):
536
+ @pytest.mark.parametrize(
537
+ "path_fixture_name",
538
+ [
539
+ pytest.param("path_to_primary_fits_file", id="uncompressed"),
540
+ pytest.param("path_to_compressed_fits_file", id="compressed"),
541
+ ],
542
+ )
543
+ @pytest.mark.parametrize(
544
+ "checksum", [pytest.param(True, id="checksum"), pytest.param(False, id="no_checksum")]
545
+ )
546
+ @pytest.mark.parametrize(
547
+ "decompress", [pytest.param(True, id="decompress"), pytest.param(False, id="no_decompress")]
548
+ )
549
+ def test_fits_array_decoder(path_fixture_name, ndarray_object, request, checksum, decompress):
510
550
  """
511
551
  Given: Path to a FITS file
512
552
  When: Decoding the path the fits_array_decoder
513
553
  Then: The correct data are returned
514
554
  """
515
- array = fits_array_decoder(path_to_primary_fits_file)
516
- assert np.array_equal(ndarray_object, array)
555
+ file_path = request.getfixturevalue(path_fixture_name)
556
+ array = fits_array_decoder(
557
+ file_path, checksum=checksum, disable_image_compression=not decompress
558
+ )
559
+ if "compressed" in path_fixture_name and not decompress:
560
+ assert not np.array_equal(array, ndarray_object)
561
+ else:
562
+ assert np.array_equal(ndarray_object, array)
517
563
 
518
564
 
519
565
  def test_fits_array_decoder_autosqueeze(