dkist-processing-common 12.1.0rc1__py3-none-any.whl → 12.2.0rc1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- changelog/272.feature.rst +1 -0
- dkist_processing_common/models/constants.py +6 -0
- dkist_processing_common/models/extras.py +35 -0
- dkist_processing_common/models/tags.py +13 -0
- dkist_processing_common/parsers/id_bud.py +7 -4
- dkist_processing_common/tasks/l1_output_data.py +26 -7
- dkist_processing_common/tasks/output_data_base.py +25 -4
- dkist_processing_common/tasks/write_extra.py +333 -0
- dkist_processing_common/tasks/write_l1.py +2 -55
- dkist_processing_common/tasks/write_l1_base.py +67 -0
- dkist_processing_common/tests/test_construct_dataset_extras.py +219 -0
- dkist_processing_common/tests/test_output_data_base.py +24 -2
- dkist_processing_common/tests/test_transfer_l1_output_data.py +1 -0
- dkist_processing_common/tests/test_trial_catalog.py +3 -0
- dkist_processing_common/tests/test_write_l1.py +0 -1
- {dkist_processing_common-12.1.0rc1.dist-info → dkist_processing_common-12.2.0rc1.dist-info}/METADATA +2 -2
- {dkist_processing_common-12.1.0rc1.dist-info → dkist_processing_common-12.2.0rc1.dist-info}/RECORD +19 -22
- changelog/280.misc.rst +0 -1
- changelog/282.feature.2.rst +0 -2
- changelog/282.feature.rst +0 -2
- changelog/284.feature.rst +0 -1
- changelog/285.feature.rst +0 -2
- changelog/285.misc.rst +0 -2
- changelog/286.feature.rst +0 -2
- changelog/287.misc.rst +0 -1
- {dkist_processing_common-12.1.0rc1.dist-info → dkist_processing_common-12.2.0rc1.dist-info}/WHEEL +0 -0
- {dkist_processing_common-12.1.0rc1.dist-info → dkist_processing_common-12.2.0rc1.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1 @@
|
|
|
1
|
+
Add the framework for using Dataset Extras, that is, other data that can be included with the L1 FITS files generated by the regular pipeline. A new abstract class, WriteL1DatasetExtras, provides helper functionality for use in the instrument pipelines.
|
|
@@ -285,18 +285,24 @@ class ConstantsBase:
|
|
|
285
285
|
def dark_observing_program_execution_ids(self) -> list[str]:
|
|
286
286
|
"""Return the observing program execution ids constant for the dark task."""
|
|
287
287
|
observing_programs = self._db_dict[BudName.dark_observing_program_execution_ids]
|
|
288
|
+
if isinstance(observing_programs, str):
|
|
289
|
+
observing_programs = [observing_programs]
|
|
288
290
|
return list(observing_programs)
|
|
289
291
|
|
|
290
292
|
@property
|
|
291
293
|
def solar_gain_observing_program_execution_ids(self) -> list[str]:
|
|
292
294
|
"""Return the observing program execution ids constant for the solar_gain task."""
|
|
293
295
|
observing_programs = self._db_dict[BudName.solar_gain_observing_program_execution_ids]
|
|
296
|
+
if isinstance(observing_programs, str):
|
|
297
|
+
observing_programs = [observing_programs]
|
|
294
298
|
return list(observing_programs)
|
|
295
299
|
|
|
296
300
|
@property
|
|
297
301
|
def polcal_observing_program_execution_ids(self) -> list[str]:
|
|
298
302
|
"""Return the observing program execution ids constant."""
|
|
299
303
|
observing_programs = self._db_dict[BudName.polcal_observing_program_execution_ids]
|
|
304
|
+
if isinstance(observing_programs, str):
|
|
305
|
+
observing_programs = [observing_programs]
|
|
300
306
|
return list(observing_programs)
|
|
301
307
|
|
|
302
308
|
@property
|
|
@@ -0,0 +1,35 @@
|
|
|
1
|
+
"""Autocomplete access to dataset extra header sections."""
|
|
2
|
+
|
|
3
|
+
from enum import StrEnum
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
class DatasetExtraHeaderSection(StrEnum):
|
|
7
|
+
"""Enum defining the possible header sections for dataset extras."""
|
|
8
|
+
|
|
9
|
+
common = "common"
|
|
10
|
+
aggregate = "aggregate"
|
|
11
|
+
iptask = "iptask"
|
|
12
|
+
gos = "gos"
|
|
13
|
+
wavecal = "wavecal"
|
|
14
|
+
atlas = "atlas"
|
|
15
|
+
test = "test"
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
class DatasetExtraType(StrEnum):
|
|
19
|
+
"""Enum defining options for dataset extra names."""
|
|
20
|
+
|
|
21
|
+
dark = "DARK"
|
|
22
|
+
background_light = "BACKGROUND LIGHT"
|
|
23
|
+
solar_gain = "SOLAR GAIN"
|
|
24
|
+
characteristic_spectra = "CHARACTERISTIC SPECTRA"
|
|
25
|
+
modulation_state_offsets = "MODULATION STATE OFFSETS"
|
|
26
|
+
beam_angles = "BEAM ANGLES"
|
|
27
|
+
spectral_curvature_shifts = "SPECTRAL CURVATURE SHIFTS"
|
|
28
|
+
wavelength_calibration_input_spectrum = "WAVELENGTH CALIBRATION INPUT SPECTRUM"
|
|
29
|
+
wavelength_calibration_reference_spectrum = "WAVELENGTH CALIBRATION REFERENCE SPECTRUM"
|
|
30
|
+
reference_wavelength_vector = "REFERENCE WAVELENGTH VECTOR"
|
|
31
|
+
demodulation_matrices = "DEMODULATION MATRICES"
|
|
32
|
+
polcal_as_science = "POLCAL AS SCIENCE"
|
|
33
|
+
bad_pixel_map = "BAD PIXEL MAP"
|
|
34
|
+
beam_offsets = "BEAM OFFSETS"
|
|
35
|
+
spectral_curvature_scales = "SPECTRAL CURVATURE SCALES"
|
|
@@ -38,6 +38,8 @@ class StemName(StrEnum):
|
|
|
38
38
|
dataset_inventory = "DATASET_INVENTORY"
|
|
39
39
|
asdf = "ASDF"
|
|
40
40
|
quality_report = "QUALITY_REPORT"
|
|
41
|
+
# Dataset extras
|
|
42
|
+
extra = "EXTRA"
|
|
41
43
|
|
|
42
44
|
|
|
43
45
|
class Tag:
|
|
@@ -450,3 +452,14 @@ class Tag:
|
|
|
450
452
|
An asdf tag
|
|
451
453
|
"""
|
|
452
454
|
return cls.format_tag(StemName.asdf)
|
|
455
|
+
|
|
456
|
+
@classmethod
|
|
457
|
+
def extra(cls) -> str:
|
|
458
|
+
"""
|
|
459
|
+
Return a dataset extra tag.
|
|
460
|
+
|
|
461
|
+
Returns
|
|
462
|
+
-------
|
|
463
|
+
A dataset extra tag
|
|
464
|
+
"""
|
|
465
|
+
return cls.format_tag(StemName.extra)
|
|
@@ -1,16 +1,17 @@
|
|
|
1
1
|
"""Base classes for ID bud parsing."""
|
|
2
2
|
|
|
3
|
+
from collections import Counter
|
|
3
4
|
from enum import StrEnum
|
|
4
5
|
from typing import Callable
|
|
5
6
|
from typing import Type
|
|
6
7
|
|
|
7
|
-
from dkist_processing_common.models.flower_pot import
|
|
8
|
+
from dkist_processing_common.models.flower_pot import ListStem
|
|
8
9
|
from dkist_processing_common.models.flower_pot import SpilledDirt
|
|
9
10
|
from dkist_processing_common.parsers.l0_fits_access import L0FitsAccess
|
|
10
11
|
from dkist_processing_common.parsers.task import passthrough_header_ip_task
|
|
11
12
|
|
|
12
13
|
|
|
13
|
-
class ContributingIdsBud(
|
|
14
|
+
class ContributingIdsBud(ListStem):
|
|
14
15
|
"""Base class for contributing ID buds."""
|
|
15
16
|
|
|
16
17
|
def __init__(self, constant_name: str, metadata_key: str | StrEnum):
|
|
@@ -35,13 +36,15 @@ class ContributingIdsBud(SetStem):
|
|
|
35
36
|
|
|
36
37
|
def getter(self) -> tuple[str, ...]:
|
|
37
38
|
"""
|
|
38
|
-
Get all ids seen for any type of frame.
|
|
39
|
+
Get all ids seen for any type of frame, sorted by the number of appearances of that ID.
|
|
39
40
|
|
|
40
41
|
Returns
|
|
41
42
|
-------
|
|
42
43
|
IDs from all types of frames
|
|
43
44
|
"""
|
|
44
|
-
|
|
45
|
+
counts = Counter(self.value_list) # Count the number of appearances of each ID
|
|
46
|
+
sorted_ids = tuple(str(item) for item, count in counts.most_common())
|
|
47
|
+
return sorted_ids
|
|
45
48
|
|
|
46
49
|
|
|
47
50
|
class TaskContributingIdsBud(ContributingIdsBud):
|
|
@@ -62,15 +62,19 @@ class TransferL1Data(TransferDataBase, GlobusMixin):
|
|
|
62
62
|
with self.telemetry_span("Upload quality data"):
|
|
63
63
|
self.transfer_quality_data()
|
|
64
64
|
|
|
65
|
-
with self.telemetry_span("Upload
|
|
65
|
+
with self.telemetry_span("Upload output frames"):
|
|
66
66
|
self.transfer_output_frames()
|
|
67
67
|
|
|
68
68
|
def transfer_output_frames(self):
|
|
69
|
-
"""Create a Globus transfer for all output data."""
|
|
70
|
-
|
|
69
|
+
"""Create a Globus transfer for all output data, as well as any available dataset extras."""
|
|
70
|
+
output_transfer_items = self.build_output_frame_transfer_list()
|
|
71
|
+
dataset_extra_transfer_items = self.build_dataset_extra_transfer_list()
|
|
72
|
+
transfer_items = output_transfer_items + dataset_extra_transfer_items
|
|
71
73
|
|
|
72
74
|
logger.info(
|
|
73
75
|
f"Preparing globus transfer {len(transfer_items)} items: "
|
|
76
|
+
f"{len(output_transfer_items)} output frames. "
|
|
77
|
+
f"{len(dataset_extra_transfer_items)} dataset extras. "
|
|
74
78
|
f"recipe_run_id={self.recipe_run_id}. "
|
|
75
79
|
f"transfer_items={transfer_items[:3]}..."
|
|
76
80
|
)
|
|
@@ -246,19 +250,34 @@ class PublishCatalogAndQualityMessages(L1OutputDataBase, InterserviceBusMixin):
|
|
|
246
250
|
def run(self) -> None:
|
|
247
251
|
"""Run method for this task."""
|
|
248
252
|
with self.telemetry_span("Gather output data"):
|
|
249
|
-
frames = self.read(
|
|
250
|
-
|
|
253
|
+
frames = self.read(
|
|
254
|
+
tags=self.output_frame_tags
|
|
255
|
+
) # frames is kept as a generator as it is much longer than the other file categories
|
|
256
|
+
extras = list(self.read(tags=self.extra_frame_tags))
|
|
257
|
+
movies = list(self.read(tags=[Tag.output(), Tag.movie()]))
|
|
251
258
|
quality_data = self.read(tags=[Tag.output(), Tag.quality_data()])
|
|
252
259
|
with self.telemetry_span("Create message objects"):
|
|
253
260
|
messages = []
|
|
254
261
|
messages += self.frame_messages(paths=frames)
|
|
255
262
|
frame_message_count = len(messages)
|
|
263
|
+
messages += self.frame_messages(paths=extras)
|
|
264
|
+
extra_message_count = len(extras)
|
|
256
265
|
messages += self.object_messages(paths=movies, object_type="MOVIE")
|
|
257
|
-
object_message_count = len(
|
|
266
|
+
object_message_count = len(movies)
|
|
258
267
|
dataset_has_quality_data = self.dataset_has_quality_data
|
|
259
268
|
if dataset_has_quality_data:
|
|
260
269
|
messages += self.object_messages(paths=quality_data, object_type="QDATA")
|
|
261
270
|
with self.telemetry_span(
|
|
262
|
-
f"Publish messages: {frame_message_count = }, {object_message_count = }, {dataset_has_quality_data = }"
|
|
271
|
+
f"Publish messages: {frame_message_count = }, {extra_message_count = }, {object_message_count = }, {dataset_has_quality_data = }"
|
|
263
272
|
):
|
|
273
|
+
messages.append(self.quality_report_message)
|
|
274
|
+
with self.telemetry_span("Publish Catalog Messages") as publish_span:
|
|
275
|
+
publish_span.set_attributes(
|
|
276
|
+
{
|
|
277
|
+
"frame_message_count": frame_message_count,
|
|
278
|
+
"extra_message_count": extra_message_count,
|
|
279
|
+
"object_message_count": object_message_count,
|
|
280
|
+
"dataset_has_quality_data": dataset_has_quality_data,
|
|
281
|
+
}
|
|
282
|
+
)
|
|
264
283
|
self.interservice_bus_publish(messages=messages)
|
|
@@ -22,19 +22,23 @@ class OutputDataBase(WorkflowTaskBase, ABC):
|
|
|
22
22
|
"""Get the destination bucket."""
|
|
23
23
|
return self.metadata_store_recipe_run.configuration.destination_bucket
|
|
24
24
|
|
|
25
|
-
def format_object_key(self, path: Path) -> str:
|
|
25
|
+
def format_object_key(self, path: Path, folder_modifier: str | None = None) -> str:
|
|
26
26
|
"""
|
|
27
27
|
Convert output paths into object store keys.
|
|
28
28
|
|
|
29
29
|
Parameters
|
|
30
30
|
----------
|
|
31
31
|
path: the Path to convert
|
|
32
|
+
folder_modifier: optional folder name to insert into the path
|
|
32
33
|
|
|
33
34
|
Returns
|
|
34
35
|
-------
|
|
35
36
|
formatted path in the object store
|
|
36
37
|
"""
|
|
37
|
-
|
|
38
|
+
if folder_modifier:
|
|
39
|
+
object_key = self.destination_folder / Path(folder_modifier) / Path(path.name)
|
|
40
|
+
else:
|
|
41
|
+
object_key = self.destination_folder / Path(path.name)
|
|
38
42
|
return str(object_key)
|
|
39
43
|
|
|
40
44
|
@property
|
|
@@ -52,6 +56,11 @@ class OutputDataBase(WorkflowTaskBase, ABC):
|
|
|
52
56
|
"""Tags that uniquely identify L1 fits frames i.e. the dataset-inventory-able frames."""
|
|
53
57
|
return [Tag.output(), Tag.frame()]
|
|
54
58
|
|
|
59
|
+
@property
|
|
60
|
+
def extra_frame_tags(self) -> list[str]:
|
|
61
|
+
"""Tags that uniquely identify dataset extra fits frames."""
|
|
62
|
+
return [Tag.output(), Tag.extra()]
|
|
63
|
+
|
|
55
64
|
|
|
56
65
|
class TransferDataBase(OutputDataBase, ObjectStoreMixin, ABC):
|
|
57
66
|
"""Base class for transferring data from scratch to somewhere else."""
|
|
@@ -73,9 +82,21 @@ class TransferDataBase(OutputDataBase, ObjectStoreMixin, ABC):
|
|
|
73
82
|
"""Build a list of GlobusTransfer items corresponding to all OUTPUT (i.e., L1) frames."""
|
|
74
83
|
science_frame_paths: list[Path] = list(self.read(tags=self.output_frame_tags))
|
|
75
84
|
|
|
85
|
+
return self.build_transfer_list(science_frame_paths)
|
|
86
|
+
|
|
87
|
+
def build_dataset_extra_transfer_list(self) -> list[GlobusTransferItem]:
|
|
88
|
+
"""Build a list of GlobusTransfer items corresponding to all extra dataset files."""
|
|
89
|
+
extra_paths: list[Path] = list(self.read(tags=self.extra_frame_tags))
|
|
90
|
+
|
|
91
|
+
return self.build_transfer_list(paths=extra_paths, destination_folder_modifier="extra")
|
|
92
|
+
|
|
93
|
+
def build_transfer_list(
|
|
94
|
+
self, paths: list[Path], destination_folder_modifier: str | None = None
|
|
95
|
+
) -> list[GlobusTransferItem]:
|
|
96
|
+
"""Given a list of paths, build a list of GlobusTransfer items."""
|
|
76
97
|
transfer_items = []
|
|
77
|
-
for p in
|
|
78
|
-
object_key = self.format_object_key(p)
|
|
98
|
+
for p in paths:
|
|
99
|
+
object_key = self.format_object_key(path=p, folder_modifier=destination_folder_modifier)
|
|
79
100
|
destination_path = Path(self.destination_bucket, object_key)
|
|
80
101
|
item = GlobusTransferItem(
|
|
81
102
|
source_path=p,
|
|
@@ -0,0 +1,333 @@
|
|
|
1
|
+
"""Classes to support the generation of dataset extras."""
|
|
2
|
+
|
|
3
|
+
import uuid
|
|
4
|
+
from abc import ABC
|
|
5
|
+
from abc import abstractmethod
|
|
6
|
+
from datetime import datetime
|
|
7
|
+
|
|
8
|
+
import numpy as np
|
|
9
|
+
from astropy.io import fits
|
|
10
|
+
from astropy.time import Time
|
|
11
|
+
from dkist_fits_specifications.utils.formatter import reformat_dataset_extra_header
|
|
12
|
+
from dkist_header_validator.spec_validators import spec_extras_validator
|
|
13
|
+
|
|
14
|
+
from dkist_processing_common.codecs.fits import fits_hdulist_encoder
|
|
15
|
+
from dkist_processing_common.models.extras import DatasetExtraHeaderSection
|
|
16
|
+
from dkist_processing_common.models.extras import DatasetExtraType
|
|
17
|
+
from dkist_processing_common.models.tags import Tag
|
|
18
|
+
from dkist_processing_common.models.task_name import TaskName
|
|
19
|
+
from dkist_processing_common.tasks.write_l1_base import WriteL1Base
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
class WriteL1DatasetExtras(WriteL1Base, ABC):
|
|
23
|
+
"""Class supporting the construction of dataset extras."""
|
|
24
|
+
|
|
25
|
+
def dataset_extra_headers(
|
|
26
|
+
self,
|
|
27
|
+
filename: str,
|
|
28
|
+
task_type: TaskName,
|
|
29
|
+
extra_name: DatasetExtraType,
|
|
30
|
+
total_exposure: float | None = None,
|
|
31
|
+
readout_exposure: float | None = None,
|
|
32
|
+
) -> dict:
|
|
33
|
+
"""Provide common FITS header keys for dataset extras."""
|
|
34
|
+
# Build task specific header values
|
|
35
|
+
match task_type:
|
|
36
|
+
case TaskName.dark:
|
|
37
|
+
task_specific_observing_program_execution_id = (
|
|
38
|
+
self.constants.dark_observing_program_execution_ids
|
|
39
|
+
)
|
|
40
|
+
task_specific_date_begin = self.constants.dark_date_begin
|
|
41
|
+
task_specific_date_end = self.constants.dark_date_end
|
|
42
|
+
task_specific_raw_frames_per_fpa = (
|
|
43
|
+
0 # can be updated in construction of dataset extra if required
|
|
44
|
+
)
|
|
45
|
+
task_specific_telescope_tracking_mode = (
|
|
46
|
+
"None" # can be updated in construction of dataset extra if required
|
|
47
|
+
)
|
|
48
|
+
task_specific_coude_table_tracking_mode = (
|
|
49
|
+
"None" # can be updated in construction of dataset extra if required
|
|
50
|
+
)
|
|
51
|
+
task_specific_telescope_scanning_mode = (
|
|
52
|
+
"None" # can be updated in construction of dataset extra if required
|
|
53
|
+
)
|
|
54
|
+
task_specific_average_light_level = self.constants.dark_average_light_level
|
|
55
|
+
task_specific_average_telescope_elevation = (
|
|
56
|
+
self.constants.dark_average_telescope_elevation
|
|
57
|
+
)
|
|
58
|
+
task_specific_average_coude_table_angle = (
|
|
59
|
+
self.constants.dark_average_coude_table_angle
|
|
60
|
+
)
|
|
61
|
+
task_specific_average_telescope_azimuth = (
|
|
62
|
+
self.constants.dark_average_telescope_azimuth
|
|
63
|
+
)
|
|
64
|
+
task_specific_gos_level3_status = self.constants.dark_gos_level3_status
|
|
65
|
+
task_specific_gos_level3_lamp_status = self.constants.dark_gos_level3_lamp_status
|
|
66
|
+
task_specific_gos_polarizer_status = self.constants.dark_gos_polarizer_status
|
|
67
|
+
task_specific_gos_polarizer_angle = self.constants.dark_gos_polarizer_angle
|
|
68
|
+
task_specific_gos_retarder_status = self.constants.dark_gos_retarder_status
|
|
69
|
+
task_specific_gos_retarder_angle = self.constants.dark_gos_retarder_angle
|
|
70
|
+
task_specific_gos_level0_status = self.constants.dark_gos_level0_status
|
|
71
|
+
case TaskName.solar_gain:
|
|
72
|
+
task_specific_observing_program_execution_id = (
|
|
73
|
+
self.constants.solar_gain_observing_program_execution_ids
|
|
74
|
+
)
|
|
75
|
+
|
|
76
|
+
task_specific_date_begin = self.constants.solar_gain_date_begin
|
|
77
|
+
task_specific_date_end = self.constants.solar_gain_date_end
|
|
78
|
+
task_specific_raw_frames_per_fpa = self.constants.solar_gain_num_raw_frames_per_fpa
|
|
79
|
+
task_specific_telescope_tracking_mode = (
|
|
80
|
+
self.constants.solar_gain_telescope_tracking_mode
|
|
81
|
+
)
|
|
82
|
+
task_specific_coude_table_tracking_mode = (
|
|
83
|
+
self.constants.solar_gain_coude_table_tracking_mode
|
|
84
|
+
)
|
|
85
|
+
task_specific_telescope_scanning_mode = (
|
|
86
|
+
self.constants.solar_gain_telescope_scanning_mode
|
|
87
|
+
)
|
|
88
|
+
task_specific_average_light_level = self.constants.solar_gain_average_light_level
|
|
89
|
+
task_specific_average_telescope_elevation = (
|
|
90
|
+
self.constants.solar_gain_average_telescope_elevation
|
|
91
|
+
)
|
|
92
|
+
task_specific_average_coude_table_angle = (
|
|
93
|
+
self.constants.solar_gain_average_coude_table_angle
|
|
94
|
+
)
|
|
95
|
+
task_specific_average_telescope_azimuth = (
|
|
96
|
+
self.constants.solar_gain_average_telescope_azimuth
|
|
97
|
+
)
|
|
98
|
+
task_specific_gos_level3_status = self.constants.solar_gain_gos_level3_status
|
|
99
|
+
task_specific_gos_level3_lamp_status = (
|
|
100
|
+
self.constants.solar_gain_gos_level3_lamp_status
|
|
101
|
+
)
|
|
102
|
+
task_specific_gos_polarizer_status = self.constants.solar_gain_gos_polarizer_status
|
|
103
|
+
task_specific_gos_polarizer_angle = self.constants.solar_gain_gos_polarizer_angle
|
|
104
|
+
task_specific_gos_retarder_status = self.constants.solar_gain_gos_retarder_status
|
|
105
|
+
task_specific_gos_retarder_angle = self.constants.solar_gain_gos_retarder_angle
|
|
106
|
+
task_specific_gos_level0_status = self.constants.solar_gain_gos_level0_status
|
|
107
|
+
case TaskName.polcal:
|
|
108
|
+
task_specific_observing_program_execution_id = (
|
|
109
|
+
self.constants.polcal_observing_program_execution_ids
|
|
110
|
+
)
|
|
111
|
+
|
|
112
|
+
task_specific_date_begin = self.constants.polcal_date_begin
|
|
113
|
+
task_specific_date_end = self.constants.polcal_date_end
|
|
114
|
+
task_specific_raw_frames_per_fpa = self.constants.polcal_num_raw_frames_per_fpa
|
|
115
|
+
task_specific_telescope_tracking_mode = (
|
|
116
|
+
self.constants.polcal_telescope_tracking_mode
|
|
117
|
+
)
|
|
118
|
+
task_specific_coude_table_tracking_mode = (
|
|
119
|
+
self.constants.polcal_coude_table_tracking_mode
|
|
120
|
+
)
|
|
121
|
+
task_specific_telescope_scanning_mode = (
|
|
122
|
+
self.constants.polcal_telescope_scanning_mode
|
|
123
|
+
)
|
|
124
|
+
task_specific_average_light_level = self.constants.polcal_average_light_level
|
|
125
|
+
task_specific_average_telescope_elevation = (
|
|
126
|
+
self.constants.polcal_average_telescope_elevation
|
|
127
|
+
)
|
|
128
|
+
task_specific_average_coude_table_angle = (
|
|
129
|
+
self.constants.polcal_average_coude_table_angle
|
|
130
|
+
)
|
|
131
|
+
task_specific_average_telescope_azimuth = (
|
|
132
|
+
self.constants.polcal_average_telescope_azimuth
|
|
133
|
+
)
|
|
134
|
+
task_specific_gos_level3_status = None
|
|
135
|
+
task_specific_gos_level3_lamp_status = None
|
|
136
|
+
task_specific_gos_polarizer_status = None
|
|
137
|
+
task_specific_gos_polarizer_angle = None
|
|
138
|
+
task_specific_gos_retarder_status = None
|
|
139
|
+
task_specific_gos_retarder_angle = None
|
|
140
|
+
task_specific_gos_level0_status = None
|
|
141
|
+
case _:
|
|
142
|
+
raise ValueError(f"Unsupported task type {task_type}")
|
|
143
|
+
|
|
144
|
+
start_datetime = datetime.fromisoformat(task_specific_date_begin)
|
|
145
|
+
end_datetime = datetime.fromisoformat(task_specific_date_end)
|
|
146
|
+
|
|
147
|
+
dataset_extra_header = {
|
|
148
|
+
DatasetExtraHeaderSection.common: {
|
|
149
|
+
"BUNIT": "count",
|
|
150
|
+
"DATE": Time.now().fits,
|
|
151
|
+
"DATE-BEG": task_specific_date_begin,
|
|
152
|
+
"DATE-END": task_specific_date_end,
|
|
153
|
+
"TELAPSE": (end_datetime - start_datetime).total_seconds(),
|
|
154
|
+
"DATE-AVG": (start_datetime + (end_datetime - start_datetime) / 2).isoformat(),
|
|
155
|
+
"TIMESYS": "UTC",
|
|
156
|
+
"ORIGIN": "National Solar Observatory",
|
|
157
|
+
"TELESCOP": "Daniel K. Inouye Solar Telescope",
|
|
158
|
+
"OBSRVTRY": "Haleakala High Altitude Observatory Site",
|
|
159
|
+
"NETWORK": "NSF-DKIST",
|
|
160
|
+
"INSTRUME": self.constants.instrument,
|
|
161
|
+
"OBJECT": "unknown",
|
|
162
|
+
"CAM_ID": self.constants.camera_id,
|
|
163
|
+
"CAMERA": self.constants.camera_name,
|
|
164
|
+
"BITDEPTH": self.constants.camera_bit_depth,
|
|
165
|
+
"XPOSURE": total_exposure,
|
|
166
|
+
"TEXPOSUR": readout_exposure,
|
|
167
|
+
"HWBIN1": self.constants.hardware_binning_x,
|
|
168
|
+
"HWBIN2": self.constants.hardware_binning_y,
|
|
169
|
+
"SWBIN1": self.constants.software_binning_x,
|
|
170
|
+
"SWBIN2": self.constants.software_binning_y,
|
|
171
|
+
"NSUMEXP": task_specific_raw_frames_per_fpa,
|
|
172
|
+
"DSETID": self.constants.dataset_id,
|
|
173
|
+
"PROCTYPE": "L1_EXTRA",
|
|
174
|
+
"RRUNID": self.recipe_run_id,
|
|
175
|
+
"RECIPEID": self.metadata_store_recipe_run.recipeInstance.recipeId,
|
|
176
|
+
"RINSTID": self.metadata_store_recipe_run.recipeInstanceId,
|
|
177
|
+
"FILENAME": filename,
|
|
178
|
+
"HEAD_URL": "",
|
|
179
|
+
"INFO_URL": self.docs_base_url,
|
|
180
|
+
"CAL_URL": "",
|
|
181
|
+
"CALVERS": self.version_from_module_name(),
|
|
182
|
+
"IDSPARID": (
|
|
183
|
+
parameters.inputDatasetPartId
|
|
184
|
+
if (parameters := self.metadata_store_input_dataset_parameters)
|
|
185
|
+
else None
|
|
186
|
+
),
|
|
187
|
+
"IDSOBSID": (
|
|
188
|
+
observe_frames.inputDatasetPartId
|
|
189
|
+
if (observe_frames := self.metadata_store_input_dataset_observe_frames)
|
|
190
|
+
else None
|
|
191
|
+
),
|
|
192
|
+
"IDSCALID": (
|
|
193
|
+
calibration_frames.inputDatasetPartId
|
|
194
|
+
if (calibration_frames := self.metadata_store_input_dataset_calibration_frames)
|
|
195
|
+
else None
|
|
196
|
+
),
|
|
197
|
+
"WKFLVERS": self.workflow_version,
|
|
198
|
+
"WKFLNAME": self.workflow_name,
|
|
199
|
+
"MANPROCD": self.workflow_had_manual_intervention,
|
|
200
|
+
"FILE_ID": uuid.uuid4().hex,
|
|
201
|
+
"OBSPR_ID": task_specific_observing_program_execution_id[
|
|
202
|
+
0
|
|
203
|
+
], # The OP IDs are stored sorted by number of appearances of each OP ID in the source task type frames
|
|
204
|
+
"EXTOBSID": ",".join(task_specific_observing_program_execution_id[1:]),
|
|
205
|
+
"EXPER_ID": self.constants.experiment_id,
|
|
206
|
+
"PROP_ID": self.constants.proposal_id,
|
|
207
|
+
"HLSVERS": self.constants.hls_version,
|
|
208
|
+
"LINEWAV": self.constants.wavelength,
|
|
209
|
+
"TELTRACK": (
|
|
210
|
+
task_specific_telescope_tracking_mode if task_type != TaskName.dark else None
|
|
211
|
+
),
|
|
212
|
+
"TTBLTRCK": (
|
|
213
|
+
task_specific_coude_table_tracking_mode if task_type != TaskName.dark else None
|
|
214
|
+
),
|
|
215
|
+
"TELSCAN": (
|
|
216
|
+
task_specific_telescope_scanning_mode if task_type != TaskName.dark else None
|
|
217
|
+
),
|
|
218
|
+
"EXTNAME": extra_name,
|
|
219
|
+
},
|
|
220
|
+
DatasetExtraHeaderSection.aggregate: {
|
|
221
|
+
"AVGLLVL": task_specific_average_light_level,
|
|
222
|
+
"ATELEVAT": task_specific_average_telescope_elevation,
|
|
223
|
+
"ATTBLANG": task_specific_average_coude_table_angle,
|
|
224
|
+
"ATAZIMUT": task_specific_average_telescope_azimuth,
|
|
225
|
+
},
|
|
226
|
+
DatasetExtraHeaderSection.iptask: {
|
|
227
|
+
"IPTASK": "GAIN" if "GAIN" in task_type else task_type,
|
|
228
|
+
},
|
|
229
|
+
DatasetExtraHeaderSection.gos: {
|
|
230
|
+
"LVL3STAT": task_specific_gos_level3_status,
|
|
231
|
+
"LAMPSTAT": task_specific_gos_level3_lamp_status,
|
|
232
|
+
"LVL2STAT": task_specific_gos_polarizer_status,
|
|
233
|
+
"POLANGLE": task_specific_gos_polarizer_angle,
|
|
234
|
+
"LVL1STAT": task_specific_gos_retarder_status,
|
|
235
|
+
"RETANGLE": task_specific_gos_retarder_angle,
|
|
236
|
+
"LVL0STAT": task_specific_gos_level0_status,
|
|
237
|
+
},
|
|
238
|
+
}
|
|
239
|
+
|
|
240
|
+
# Remove specific headers from dark frames as they don't constants to fill them
|
|
241
|
+
if task_type == TaskName.dark:
|
|
242
|
+
for key in ["TELTRACK", "TTBLTRCK", "TELSCAN"]:
|
|
243
|
+
del dataset_extra_header[DatasetExtraHeaderSection.common][key]
|
|
244
|
+
|
|
245
|
+
# Remove specific headers from polcal frames as they don't have constants to fill them
|
|
246
|
+
if task_type == TaskName.polcal:
|
|
247
|
+
for key in [
|
|
248
|
+
"LVL3STAT",
|
|
249
|
+
"LAMPSTAT",
|
|
250
|
+
"LVL2STAT",
|
|
251
|
+
"POLANGLE",
|
|
252
|
+
"LVL1STAT",
|
|
253
|
+
"RETANGLE",
|
|
254
|
+
"LVL0STAT",
|
|
255
|
+
]:
|
|
256
|
+
del dataset_extra_header[DatasetExtraHeaderSection.gos][key]
|
|
257
|
+
|
|
258
|
+
return dataset_extra_header
|
|
259
|
+
|
|
260
|
+
def build_dataset_extra_header(
|
|
261
|
+
self,
|
|
262
|
+
sections: list[DatasetExtraHeaderSection],
|
|
263
|
+
filename: str,
|
|
264
|
+
task_type: TaskName,
|
|
265
|
+
extra_name: DatasetExtraType,
|
|
266
|
+
total_exposure: float | None = None,
|
|
267
|
+
readout_exposure: float | None = None,
|
|
268
|
+
) -> fits.Header:
|
|
269
|
+
"""Build FITS header for dataset extra file."""
|
|
270
|
+
header = fits.Header()
|
|
271
|
+
all_section_headers = self.dataset_extra_headers(
|
|
272
|
+
filename=filename,
|
|
273
|
+
task_type=task_type,
|
|
274
|
+
total_exposure=total_exposure,
|
|
275
|
+
readout_exposure=readout_exposure,
|
|
276
|
+
extra_name=extra_name,
|
|
277
|
+
)
|
|
278
|
+
for section in sections:
|
|
279
|
+
header.update(all_section_headers[section].items())
|
|
280
|
+
return header
|
|
281
|
+
|
|
282
|
+
def format_extra_filename(self, extra_name: DatasetExtraType | str, detail: str | None = None):
|
|
283
|
+
"""Format the filename of dataset extras for consistency."""
|
|
284
|
+
base_filename = f"{self.constants.instrument}_{self.constants.dataset_id}_{extra_name.replace(' ', '-')}"
|
|
285
|
+
if detail:
|
|
286
|
+
base_filename += "_" + detail
|
|
287
|
+
filename_counter = str(self.filename_counter.increment(base_filename))
|
|
288
|
+
return f"{base_filename}_{filename_counter}.fits"
|
|
289
|
+
|
|
290
|
+
def assemble_and_write_dataset_extra(
|
|
291
|
+
self,
|
|
292
|
+
data: np.ndarray | list[np.ndarray],
|
|
293
|
+
header: fits.Header | list[fits.Header],
|
|
294
|
+
filename: str,
|
|
295
|
+
):
|
|
296
|
+
"""Given the data and header information, write the dataset extra."""
|
|
297
|
+
if isinstance(data, list) and isinstance(header, list):
|
|
298
|
+
if len(data) != len(header):
|
|
299
|
+
raise ValueError(
|
|
300
|
+
f"{len(data)} data arrays were provided with {len(header)} headers. These must be equal."
|
|
301
|
+
)
|
|
302
|
+
if isinstance(data, np.ndarray):
|
|
303
|
+
data = [data]
|
|
304
|
+
if isinstance(header, fits.Header):
|
|
305
|
+
header = [header]
|
|
306
|
+
hdus = [fits.PrimaryHDU()] # The first HDU in the list is an empty PrimaryHDU
|
|
307
|
+
for i, data_array in enumerate(data):
|
|
308
|
+
tile_size = self.compute_tile_size_for_array(data_array)
|
|
309
|
+
hdu = fits.CompImageHDU(header=header[i], data=data_array, tile_shape=tile_size)
|
|
310
|
+
formatted_header = reformat_dataset_extra_header(hdu.header)
|
|
311
|
+
hdu = fits.CompImageHDU(header=formatted_header, data=hdu.data, tile_shape=tile_size)
|
|
312
|
+
hdus.append(hdu)
|
|
313
|
+
self.write(
|
|
314
|
+
data=fits.HDUList(hdus),
|
|
315
|
+
tags=[Tag.extra(), Tag.output()],
|
|
316
|
+
encoder=fits_hdulist_encoder,
|
|
317
|
+
relative_path=filename,
|
|
318
|
+
)
|
|
319
|
+
self.update_framevol(relative_path=filename)
|
|
320
|
+
|
|
321
|
+
# Check that the written file passes spec 214 validation if requested
|
|
322
|
+
if self.validate_l1_on_write:
|
|
323
|
+
spec_extras_validator.validate(self.scratch.absolute_path(filename), extra=False)
|
|
324
|
+
|
|
325
|
+
@abstractmethod
|
|
326
|
+
def run(self) -> None:
|
|
327
|
+
"""
|
|
328
|
+
For each dataset extra.
|
|
329
|
+
|
|
330
|
+
* Gather the source data in whatever manner is necessary
|
|
331
|
+
* Build a header using the `build_dataset_extra_header` method to help with header construction
|
|
332
|
+
* Write the dataset extra using `assemble_and_write_dataset_extra()`
|
|
333
|
+
"""
|
|
@@ -1,11 +1,9 @@
|
|
|
1
1
|
"""Task(s) for writing level 1 data as 214 compliant fits files."""
|
|
2
2
|
|
|
3
|
-
import importlib
|
|
4
3
|
import logging
|
|
5
4
|
import uuid
|
|
6
5
|
from abc import ABC
|
|
7
6
|
from abc import abstractmethod
|
|
8
|
-
from functools import cached_property
|
|
9
7
|
from pathlib import Path
|
|
10
8
|
from string import ascii_uppercase
|
|
11
9
|
from typing import Literal
|
|
@@ -34,7 +32,7 @@ from dkist_processing_common.models.fried_parameter import r0_valid
|
|
|
34
32
|
from dkist_processing_common.models.tags import Tag
|
|
35
33
|
from dkist_processing_common.models.wavelength import WavelengthRange
|
|
36
34
|
from dkist_processing_common.parsers.l0_fits_access import L0FitsAccess
|
|
37
|
-
from dkist_processing_common.tasks import
|
|
35
|
+
from dkist_processing_common.tasks.write_l1_base import WriteL1Base
|
|
38
36
|
|
|
39
37
|
logger = logging.getLogger(__name__)
|
|
40
38
|
|
|
@@ -43,7 +41,7 @@ __all__ = ["WriteL1Frame"]
|
|
|
43
41
|
from dkist_processing_common.tasks.mixin.metadata_store import MetadataStoreMixin
|
|
44
42
|
|
|
45
43
|
|
|
46
|
-
class WriteL1Frame(
|
|
44
|
+
class WriteL1Frame(WriteL1Base, ABC):
|
|
47
45
|
"""
|
|
48
46
|
Task to convert final calibrated science frames into spec 214 compliant level 1 frames.
|
|
49
47
|
|
|
@@ -108,45 +106,6 @@ class WriteL1Frame(WorkflowTaskBase, MetadataStoreMixin, ABC):
|
|
|
108
106
|
self.scratch.absolute_path(relative_path), extra=False
|
|
109
107
|
)
|
|
110
108
|
|
|
111
|
-
@cached_property
|
|
112
|
-
def tile_size_param(self) -> int | None:
|
|
113
|
-
"""Get the tile size parameter for compression."""
|
|
114
|
-
return self.metadata_store_recipe_run.configuration.tile_size
|
|
115
|
-
|
|
116
|
-
@cached_property
|
|
117
|
-
def validate_l1_on_write(self) -> bool:
|
|
118
|
-
"""Check for validate on write."""
|
|
119
|
-
return self.metadata_store_recipe_run.configuration.validate_l1_on_write
|
|
120
|
-
|
|
121
|
-
@cached_property
|
|
122
|
-
def workflow_had_manual_intervention(self):
|
|
123
|
-
"""Indicate determining if any provenance capturing steps had manual intervention."""
|
|
124
|
-
for provenance_record in self.metadata_store_recipe_run.recipeRunProvenances:
|
|
125
|
-
if provenance_record.isTaskManual:
|
|
126
|
-
return True
|
|
127
|
-
return False
|
|
128
|
-
|
|
129
|
-
def compute_tile_size_for_array(self, data: np.ndarray) -> list | None:
|
|
130
|
-
"""Determine the tile size to use for compression accounting for array shape minimums."""
|
|
131
|
-
if self.tile_size_param is None:
|
|
132
|
-
return None
|
|
133
|
-
tile_size = []
|
|
134
|
-
for dim_size in data.shape:
|
|
135
|
-
if dim_size < self.tile_size_param:
|
|
136
|
-
tile_size.append(dim_size)
|
|
137
|
-
else:
|
|
138
|
-
tile_size.append(self.tile_size_param)
|
|
139
|
-
return tile_size
|
|
140
|
-
|
|
141
|
-
def update_framevol(self, relative_path: str) -> None:
|
|
142
|
-
"""Update FRAMEVOL key to be exactly the size of the file on-disk."""
|
|
143
|
-
full_path = self.scratch.workflow_base_path / relative_path
|
|
144
|
-
compressed_size = full_path.stat().st_size / 1024 / 1024
|
|
145
|
-
hdul = fits.open(full_path, mode="update")
|
|
146
|
-
hdul[1].header["FRAMEVOL"] = compressed_size
|
|
147
|
-
hdul.flush()
|
|
148
|
-
del hdul
|
|
149
|
-
|
|
150
109
|
def replace_header_values(self, header: fits.Header, data: np.ndarray) -> fits.Header:
|
|
151
110
|
"""Replace header values that should already exist with new values."""
|
|
152
111
|
header["FILE_ID"] = uuid.uuid4().hex
|
|
@@ -430,18 +389,6 @@ class WriteL1Frame(WorkflowTaskBase, MetadataStoreMixin, ABC):
|
|
|
430
389
|
)
|
|
431
390
|
return header
|
|
432
391
|
|
|
433
|
-
def version_from_module_name(self) -> str:
|
|
434
|
-
"""
|
|
435
|
-
Get the value of __version__ from a module given its name.
|
|
436
|
-
|
|
437
|
-
Returns
|
|
438
|
-
-------
|
|
439
|
-
The value of __version__
|
|
440
|
-
"""
|
|
441
|
-
package = self.__module__.split(".")[0]
|
|
442
|
-
module = importlib.import_module(package)
|
|
443
|
-
return module.__version__
|
|
444
|
-
|
|
445
392
|
@abstractmethod
|
|
446
393
|
def add_dataset_headers(
|
|
447
394
|
self, header: fits.Header, stokes: Literal["I", "Q", "U", "V"]
|
|
@@ -0,0 +1,67 @@
|
|
|
1
|
+
"""Base class for writing L1 FITS products with headers."""
|
|
2
|
+
|
|
3
|
+
import importlib
|
|
4
|
+
from abc import ABC
|
|
5
|
+
from functools import cached_property
|
|
6
|
+
|
|
7
|
+
import numpy as np
|
|
8
|
+
from astropy.io import fits
|
|
9
|
+
|
|
10
|
+
from dkist_processing_common.tasks import WorkflowTaskBase
|
|
11
|
+
from dkist_processing_common.tasks.mixin.metadata_store import MetadataStoreMixin
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class WriteL1Base(WorkflowTaskBase, MetadataStoreMixin, ABC):
|
|
15
|
+
"""Base class for writing L1 FITS products with headers."""
|
|
16
|
+
|
|
17
|
+
def version_from_module_name(self) -> str:
|
|
18
|
+
"""
|
|
19
|
+
Get the value of __version__ from a module given its name.
|
|
20
|
+
|
|
21
|
+
Returns
|
|
22
|
+
-------
|
|
23
|
+
The value of __version__
|
|
24
|
+
"""
|
|
25
|
+
package = self.__module__.split(".")[0]
|
|
26
|
+
module = importlib.import_module(package)
|
|
27
|
+
return module.__version__
|
|
28
|
+
|
|
29
|
+
@cached_property
|
|
30
|
+
def workflow_had_manual_intervention(self) -> bool:
|
|
31
|
+
"""Indicate determining if any provenance capturing steps had manual intervention."""
|
|
32
|
+
for provenance_record in self.metadata_store_recipe_run.recipeRunProvenances:
|
|
33
|
+
if provenance_record.isTaskManual:
|
|
34
|
+
return True
|
|
35
|
+
return False
|
|
36
|
+
|
|
37
|
+
def update_framevol(self, relative_path: str) -> None:
|
|
38
|
+
"""Update FRAMEVOL key to be exactly the size of the file on-disk."""
|
|
39
|
+
full_path = self.scratch.workflow_base_path / relative_path
|
|
40
|
+
compressed_size = full_path.stat().st_size / 1024 / 1024
|
|
41
|
+
hdul = fits.open(full_path, mode="update")
|
|
42
|
+
for i in range(1, len(hdul)):
|
|
43
|
+
hdul[i].header["FRAMEVOL"] = compressed_size
|
|
44
|
+
hdul.flush()
|
|
45
|
+
del hdul
|
|
46
|
+
|
|
47
|
+
@cached_property
|
|
48
|
+
def tile_size_param(self) -> int | None:
|
|
49
|
+
"""Get the tile size parameter for compression."""
|
|
50
|
+
return self.metadata_store_recipe_run.configuration.tile_size
|
|
51
|
+
|
|
52
|
+
def compute_tile_size_for_array(self, data: np.ndarray) -> list | None:
|
|
53
|
+
"""Determine the tile size to use for compression accounting for array shape minimums."""
|
|
54
|
+
if self.tile_size_param is None:
|
|
55
|
+
return None
|
|
56
|
+
tile_size = []
|
|
57
|
+
for dim_size in data.shape:
|
|
58
|
+
if dim_size < self.tile_size_param:
|
|
59
|
+
tile_size.append(dim_size)
|
|
60
|
+
else:
|
|
61
|
+
tile_size.append(self.tile_size_param)
|
|
62
|
+
return tile_size
|
|
63
|
+
|
|
64
|
+
@cached_property
|
|
65
|
+
def validate_l1_on_write(self) -> bool:
|
|
66
|
+
"""Check for validate on write."""
|
|
67
|
+
return self.metadata_store_recipe_run.configuration.validate_l1_on_write
|
|
@@ -0,0 +1,219 @@
|
|
|
1
|
+
from dataclasses import asdict
|
|
2
|
+
from dataclasses import dataclass
|
|
3
|
+
from pathlib import Path
|
|
4
|
+
|
|
5
|
+
import numpy as np
|
|
6
|
+
import pytest
|
|
7
|
+
from astropy.io import fits
|
|
8
|
+
|
|
9
|
+
from dkist_processing_common._util.scratch import WorkflowFileSystem
|
|
10
|
+
from dkist_processing_common.codecs.fits import fits_array_decoder
|
|
11
|
+
from dkist_processing_common.codecs.fits import fits_array_encoder
|
|
12
|
+
from dkist_processing_common.models.extras import DatasetExtraHeaderSection
|
|
13
|
+
from dkist_processing_common.models.extras import DatasetExtraType
|
|
14
|
+
from dkist_processing_common.models.tags import Tag
|
|
15
|
+
from dkist_processing_common.models.task_name import TaskName
|
|
16
|
+
from dkist_processing_common.tasks.write_extra import WriteL1DatasetExtras
|
|
17
|
+
from dkist_processing_common.tests.mock_metadata_store import RecipeRunResponseMapping
|
|
18
|
+
from dkist_processing_common.tests.mock_metadata_store import fake_gql_client_factory
|
|
19
|
+
from dkist_processing_common.tests.mock_metadata_store import make_default_recipe_run_response
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
@dataclass
|
|
23
|
+
class FakeConstantDb:
|
|
24
|
+
INSTRUMENT: str = "VBI"
|
|
25
|
+
DATASET_ID: str = "DATASETID"
|
|
26
|
+
AVERAGE_CADENCE: float = 10.0
|
|
27
|
+
MINIMUM_CADENCE: float = 10.0
|
|
28
|
+
MAXIMUM_CADENCE: float = 10.0
|
|
29
|
+
VARIANCE_CADENCE: float = 0.0
|
|
30
|
+
STOKES_PARAMS: tuple = ("I", "Q", "U", "V")
|
|
31
|
+
PROPOSAL_ID: str = "PROPID1"
|
|
32
|
+
EXPERIMENT_ID: str = "EXPERID1"
|
|
33
|
+
CAMERA_ID: str = "CAMERA1"
|
|
34
|
+
CAMERA_NAME: str = "Camera One"
|
|
35
|
+
CAMERA_BIT_DEPTH: int = 16
|
|
36
|
+
HARDWARE_BINNING_X: int = 1
|
|
37
|
+
HARDWARE_BINNING_Y: int = 1
|
|
38
|
+
SOFTWARE_BINNING_X: int = 1
|
|
39
|
+
SOFTWARE_BINNING_Y: int = 1
|
|
40
|
+
HLS_VERSION: str = "1.8"
|
|
41
|
+
WAVELENGTH: float = 854.2
|
|
42
|
+
# Dark
|
|
43
|
+
DARK_OBSERVING_PROGRAM_EXECUTION_IDS: tuple = ("OP1", "OP2", "OP3")
|
|
44
|
+
DARK_DATE_BEGIN: str = "2023-01-01T00:00:00"
|
|
45
|
+
DARK_DATE_END: str = "2023-01-01T01:00:00"
|
|
46
|
+
DARK_TELESCOPE_TRACKING_MODE: str = "None"
|
|
47
|
+
DARK_COUDE_TABLE_TRACKING_MODE: str = "fixed coude table angle"
|
|
48
|
+
DARK_TELESCOPE_SCANNING_MODE: str = "None"
|
|
49
|
+
DARK_AVERAGE_LIGHT_LEVEL: float = 5.0
|
|
50
|
+
DARK_AVERAGE_TELESCOPE_ELEVATION: float = 45.0
|
|
51
|
+
DARK_AVERAGE_COUDE_TABLE_ANGLE: float = 2.0
|
|
52
|
+
DARK_AVERAGE_TELESCOPE_AZIMUTH: float = 180.0
|
|
53
|
+
DARK_GOS_LEVEL3_STATUS: str = "clear"
|
|
54
|
+
DARK_GOS_LEVEL3_LAMP_STATUS: str = "off"
|
|
55
|
+
DARK_GOS_POLARIZER_STATUS: str = "clear"
|
|
56
|
+
DARK_GOS_POLARIZER_ANGLE: str = "0.0"
|
|
57
|
+
DARK_GOS_RETARDER_STATUS: str = "clear"
|
|
58
|
+
DARK_GOS_RETARDER_ANGLE: str = "0.0"
|
|
59
|
+
DARK_GOS_LEVEL0_STATUS: str = "DarkShutter"
|
|
60
|
+
# Solar Gain
|
|
61
|
+
SOLAR_GAIN_OBSERVING_PROGRAM_EXECUTION_IDS: tuple = ("OP1", "OP2", "OP3")
|
|
62
|
+
SOLAR_GAIN_DATE_BEGIN: str = "2023-01-01T00:00:00"
|
|
63
|
+
SOLAR_GAIN_DATE_END: str = "2023-01-01T01:00:00"
|
|
64
|
+
SOLAR_GAIN_NUM_RAW_FRAMES_PER_FPA: int = 1
|
|
65
|
+
SOLAR_GAIN_TELESCOPE_TRACKING_MODE: str = "None"
|
|
66
|
+
SOLAR_GAIN_COUDE_TABLE_TRACKING_MODE: str = "fixed coude table angle"
|
|
67
|
+
SOLAR_GAIN_TELESCOPE_SCANNING_MODE: str = "None"
|
|
68
|
+
SOLAR_GAIN_AVERAGE_LIGHT_LEVEL: float = 5.0
|
|
69
|
+
SOLAR_GAIN_AVERAGE_TELESCOPE_ELEVATION: float = 45.0
|
|
70
|
+
SOLAR_GAIN_AVERAGE_COUDE_TABLE_ANGLE: float = 2.0
|
|
71
|
+
SOLAR_GAIN_AVERAGE_TELESCOPE_AZIMUTH: float = 180.0
|
|
72
|
+
SOLAR_GAIN_GOS_LEVEL3_STATUS: str = "clear"
|
|
73
|
+
SOLAR_GAIN_GOS_LEVEL3_LAMP_STATUS: str = "off"
|
|
74
|
+
SOLAR_GAIN_GOS_POLARIZER_STATUS: str = "clear"
|
|
75
|
+
SOLAR_GAIN_GOS_POLARIZER_ANGLE: str = "0.0"
|
|
76
|
+
SOLAR_GAIN_GOS_RETARDER_STATUS: str = "clear"
|
|
77
|
+
SOLAR_GAIN_GOS_RETARDER_ANGLE: str = "0.0"
|
|
78
|
+
SOLAR_GAIN_GOS_LEVEL0_STATUS: str = "DarkShutter"
|
|
79
|
+
# Polcal
|
|
80
|
+
POLCAL_OBSERVING_PROGRAM_EXECUTION_IDS: tuple = ("OP1", "OP2", "OP3")
|
|
81
|
+
POLCAL_DATE_BEGIN: str = "2023-01-01T00:00:00"
|
|
82
|
+
POLCAL_DATE_END: str = "2023-01-01T01:00:00"
|
|
83
|
+
POLCAL_NUM_RAW_FRAMES_PER_FPA: int = 1
|
|
84
|
+
POLCAL_TELESCOPE_TRACKING_MODE: str = "None"
|
|
85
|
+
POLCAL_COUDE_TABLE_TRACKING_MODE: str = "fixed coude table angle"
|
|
86
|
+
POLCAL_TELESCOPE_SCANNING_MODE: str = "None"
|
|
87
|
+
POLCAL_AVERAGE_LIGHT_LEVEL: float = 5.0
|
|
88
|
+
POLCAL_AVERAGE_TELESCOPE_ELEVATION: float = 45.0
|
|
89
|
+
POLCAL_AVERAGE_COUDE_TABLE_ANGLE: float = 2.0
|
|
90
|
+
POLCAL_AVERAGE_TELESCOPE_AZIMUTH: float = 180.0
|
|
91
|
+
|
|
92
|
+
|
|
93
|
+
class ConstructDatasetExtrasTest(WriteL1DatasetExtras):
|
|
94
|
+
def run(self):
|
|
95
|
+
# Make a dataset extra for each task type
|
|
96
|
+
|
|
97
|
+
for task_type in [
|
|
98
|
+
TaskName.dark,
|
|
99
|
+
TaskName.solar_gain,
|
|
100
|
+
]:
|
|
101
|
+
filename = self.format_extra_filename(task_type, detail="BEAM1")
|
|
102
|
+
data = next(
|
|
103
|
+
self.read(
|
|
104
|
+
tags=[Tag.task(task_type), Tag.intermediate()], decoder=fits_array_decoder
|
|
105
|
+
)
|
|
106
|
+
)
|
|
107
|
+
header = self.build_dataset_extra_header(
|
|
108
|
+
sections=[
|
|
109
|
+
DatasetExtraHeaderSection.common,
|
|
110
|
+
DatasetExtraHeaderSection.aggregate,
|
|
111
|
+
DatasetExtraHeaderSection.iptask,
|
|
112
|
+
DatasetExtraHeaderSection.gos,
|
|
113
|
+
],
|
|
114
|
+
filename=filename,
|
|
115
|
+
task_type=task_type,
|
|
116
|
+
total_exposure=0.058,
|
|
117
|
+
readout_exposure=0.029,
|
|
118
|
+
extra_name=(
|
|
119
|
+
DatasetExtraType.dark if task_type == "DARK" else DatasetExtraType.solar_gain
|
|
120
|
+
),
|
|
121
|
+
)
|
|
122
|
+
|
|
123
|
+
self.assemble_and_write_dataset_extra(data=data, header=header, filename=filename)
|
|
124
|
+
|
|
125
|
+
task_type = TaskName.polcal
|
|
126
|
+
filename = self.format_extra_filename(task_type, detail="BEAM1")
|
|
127
|
+
data = next(
|
|
128
|
+
self.read(tags=[Tag.task(task_type), Tag.intermediate()], decoder=fits_array_decoder)
|
|
129
|
+
)
|
|
130
|
+
header = self.build_dataset_extra_header(
|
|
131
|
+
sections=[
|
|
132
|
+
DatasetExtraHeaderSection.common,
|
|
133
|
+
DatasetExtraHeaderSection.aggregate,
|
|
134
|
+
DatasetExtraHeaderSection.iptask,
|
|
135
|
+
DatasetExtraHeaderSection.gos,
|
|
136
|
+
],
|
|
137
|
+
filename=filename,
|
|
138
|
+
task_type=task_type,
|
|
139
|
+
total_exposure=0.058,
|
|
140
|
+
readout_exposure=0.029,
|
|
141
|
+
extra_name=DatasetExtraType.demodulation_matrices,
|
|
142
|
+
)
|
|
143
|
+
self.assemble_and_write_dataset_extra(data=data, header=header, filename=filename)
|
|
144
|
+
|
|
145
|
+
|
|
146
|
+
@pytest.fixture()
|
|
147
|
+
def construct_dataset_extras_task(request, recipe_run_id, tmp_path):
|
|
148
|
+
with ConstructDatasetExtrasTest(
|
|
149
|
+
recipe_run_id=recipe_run_id,
|
|
150
|
+
workflow_name="workflow_name",
|
|
151
|
+
workflow_version="workflow_version",
|
|
152
|
+
) as task:
|
|
153
|
+
task.scratch = WorkflowFileSystem(recipe_run_id=recipe_run_id, scratch_base_path=tmp_path)
|
|
154
|
+
# Write an intermediate product to be used as the source for each dataset extra
|
|
155
|
+
for task_type in [
|
|
156
|
+
TaskName.dark,
|
|
157
|
+
TaskName.solar_gain,
|
|
158
|
+
TaskName.polcal,
|
|
159
|
+
]:
|
|
160
|
+
task.write(
|
|
161
|
+
data=np.random.random(size=(1, 128, 128)),
|
|
162
|
+
tags=[Tag.task(task_type), Tag.intermediate()],
|
|
163
|
+
encoder=fits_array_encoder,
|
|
164
|
+
)
|
|
165
|
+
task.constants._update(asdict(FakeConstantDb()))
|
|
166
|
+
yield task
|
|
167
|
+
task._purge()
|
|
168
|
+
|
|
169
|
+
|
|
170
|
+
@pytest.fixture
|
|
171
|
+
def fake_gql_client_default_configuration():
|
|
172
|
+
"""Create GraphQL client Mock that returns result without recipe run configuration."""
|
|
173
|
+
recipe_run_response = make_default_recipe_run_response()
|
|
174
|
+
recipe_run_response.configuration = None
|
|
175
|
+
new_response_mapping = RecipeRunResponseMapping(response=recipe_run_response)
|
|
176
|
+
FakeGQLClientDefaultConfiguration = fake_gql_client_factory(
|
|
177
|
+
response_mapping_override=new_response_mapping
|
|
178
|
+
)
|
|
179
|
+
|
|
180
|
+
return FakeGQLClientDefaultConfiguration
|
|
181
|
+
|
|
182
|
+
|
|
183
|
+
def test_construct_dataset_extras(
|
|
184
|
+
construct_dataset_extras_task, mocker, fake_gql_client_default_configuration
|
|
185
|
+
):
|
|
186
|
+
"""
|
|
187
|
+
Given: A ConstructDatasetExtras task with source data
|
|
188
|
+
When: Running the ConstructDatasetExtras task
|
|
189
|
+
Then: A dataset extra files are produced with expected header values
|
|
190
|
+
"""
|
|
191
|
+
mocker.patch(
|
|
192
|
+
"dkist_processing_common.tasks.mixin.metadata_store.GraphQLClient",
|
|
193
|
+
new=fake_gql_client_default_configuration,
|
|
194
|
+
)
|
|
195
|
+
task = construct_dataset_extras_task
|
|
196
|
+
task()
|
|
197
|
+
dataset_extra_files = list(task.read(tags=[Tag.output(), Tag.extra()]))
|
|
198
|
+
assert len(dataset_extra_files) == 3
|
|
199
|
+
for filename in dataset_extra_files:
|
|
200
|
+
split_filename = Path(filename).name.split("_")
|
|
201
|
+
assert split_filename[0] == "VBI"
|
|
202
|
+
assert split_filename[1] == task.constants.dataset_id
|
|
203
|
+
assert split_filename[-2] == "BEAM1"
|
|
204
|
+
assert split_filename[-1] == "1.fits"
|
|
205
|
+
hdul = fits.open(filename)
|
|
206
|
+
for i in range(1, len(hdul)):
|
|
207
|
+
assert isinstance(hdul[i], fits.CompImageHDU)
|
|
208
|
+
header = hdul[i].header
|
|
209
|
+
assert header["LINEWAV"] == 854.2
|
|
210
|
+
assert header["INSTRUME"] == "VBI"
|
|
211
|
+
assert header["ATAZIMUT"] == 180.0
|
|
212
|
+
assert header["FRAMEVOL"] is not None
|
|
213
|
+
assert header["IDSOBSID"] == 2
|
|
214
|
+
assert header["XPOSURE"] == 0.058
|
|
215
|
+
assert header["OBSPR_ID"] == "OP1"
|
|
216
|
+
assert header["EXTOBSID"] == "OP2,OP3"
|
|
217
|
+
assert header["EXTNAME"] in ["DARK", "SOLAR GAIN", "DEMODULATION MATRICES"]
|
|
218
|
+
assert header["TELAPSE"] == 3600
|
|
219
|
+
assert header["DATE-AVG"] == "2023-01-01T00:30:00"
|
|
@@ -57,7 +57,11 @@ def transfer_data_task(recipe_run_id, tmp_path, mocker, fake_gql_client):
|
|
|
57
57
|
unwanted_file_obj = uuid4().hex.encode("utf8")
|
|
58
58
|
task.write(unwanted_file_obj, tags=[Tag.frame()])
|
|
59
59
|
|
|
60
|
-
|
|
60
|
+
# Write a dataset extra
|
|
61
|
+
extra_file_obj = uuid4().hex.encode("utf8")
|
|
62
|
+
task.write(extra_file_obj, tags=[Tag.output(), Tag.extra()])
|
|
63
|
+
|
|
64
|
+
yield task, output_file_obj, extra_file_obj
|
|
61
65
|
task._purge()
|
|
62
66
|
|
|
63
67
|
|
|
@@ -81,7 +85,7 @@ def test_build_output_frame_transfer_list(transfer_data_task):
|
|
|
81
85
|
When: Building a transfer list of all OUTPUT frames
|
|
82
86
|
Then: All OUTPUT frames are listed and no non-OUTPUT frames are listed
|
|
83
87
|
"""
|
|
84
|
-
task, output_file_obj = transfer_data_task
|
|
88
|
+
task, output_file_obj, _ = transfer_data_task
|
|
85
89
|
|
|
86
90
|
transfer_list = task.build_output_frame_transfer_list()
|
|
87
91
|
|
|
@@ -89,3 +93,21 @@ def test_build_output_frame_transfer_list(transfer_data_task):
|
|
|
89
93
|
transfer_item = transfer_list[0]
|
|
90
94
|
with transfer_item.source_path.open(mode="rb") as f:
|
|
91
95
|
assert output_file_obj == f.read()
|
|
96
|
+
|
|
97
|
+
|
|
98
|
+
def test_build_dataset_extra_transfer_list(transfer_data_task):
|
|
99
|
+
"""
|
|
100
|
+
Given: A task based on TransferDataBase with some files, some of which are EXTRA_OUTPUT
|
|
101
|
+
When: Building a transfer list of all EXTRA_OUTPUT frames
|
|
102
|
+
Then: All EXTRA_OUTPUT frames are listed and no non-EXTRA_OUTPUT frames are listed
|
|
103
|
+
"""
|
|
104
|
+
task, _, extra_file_obj = transfer_data_task
|
|
105
|
+
|
|
106
|
+
transfer_list = task.build_dataset_extra_transfer_list()
|
|
107
|
+
|
|
108
|
+
assert len(transfer_list) == 1
|
|
109
|
+
transfer_item = transfer_list[0]
|
|
110
|
+
assert "/extra/" not in str(transfer_item.source_path)
|
|
111
|
+
assert "/extra/" in str(transfer_item.destination_path)
|
|
112
|
+
with transfer_item.source_path.open(mode="rb") as f:
|
|
113
|
+
assert extra_file_obj == f.read()
|
|
@@ -25,6 +25,7 @@ def transfer_l1_data_task(recipe_run_id, tmp_path, fake_constants_db):
|
|
|
25
25
|
task.constants._update(fake_constants_db)
|
|
26
26
|
frame_path = task.scratch.workflow_base_path / Path("frame.fits")
|
|
27
27
|
movie_path = task.scratch.workflow_base_path / Path("movie.mp4")
|
|
28
|
+
extra_path = task.scratch.workflow_base_path / Path("extra.fits")
|
|
28
29
|
with open(frame_path, "w") as f:
|
|
29
30
|
f.write("Frame")
|
|
30
31
|
task.tag(path=frame_path, tags=[Tag.frame(), Tag.output()])
|
|
@@ -57,6 +57,9 @@ def scratch_with_l1_frames(recipe_run_id, tmp_path) -> WorkflowFileSystem:
|
|
|
57
57
|
scratch.write(
|
|
58
58
|
file_obj, tags=[Tag.output(), Tag.frame()], relative_path=f"{uuid4().hex}.dat"
|
|
59
59
|
)
|
|
60
|
+
scratch.write(
|
|
61
|
+
file_obj, tags=[Tag.output(), Tag.extra()], relative_path=f"{uuid4().hex}.dat"
|
|
62
|
+
)
|
|
60
63
|
|
|
61
64
|
return scratch
|
|
62
65
|
|
{dkist_processing_common-12.1.0rc1.dist-info → dkist_processing_common-12.2.0rc1.dist-info}/METADATA
RENAMED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: dkist-processing-common
|
|
3
|
-
Version: 12.
|
|
3
|
+
Version: 12.2.0rc1
|
|
4
4
|
Summary: Common task classes used by the DKIST science data processing pipelines
|
|
5
5
|
Author-email: NSO / AURA <dkistdc@nso.edu>
|
|
6
6
|
License: BSD-3-Clause
|
|
@@ -16,7 +16,7 @@ Description-Content-Type: text/x-rst
|
|
|
16
16
|
Requires-Dist: asdf<4.0.0,>=3.5.0
|
|
17
17
|
Requires-Dist: astropy>=7.0.0
|
|
18
18
|
Requires-Dist: dkist-fits-specifications<5.0,>=4.0.0
|
|
19
|
-
Requires-Dist: dkist-header-validator<6.0,>=5.
|
|
19
|
+
Requires-Dist: dkist-header-validator<6.0,>=5.3.0
|
|
20
20
|
Requires-Dist: dkist-processing-core==7.0.1
|
|
21
21
|
Requires-Dist: dkist-processing-pac<4.0,>=3.1
|
|
22
22
|
Requires-Dist: dkist-service-configuration<5.0,>=4.2.0
|
{dkist_processing_common-12.1.0rc1.dist-info → dkist_processing_common-12.2.0rc1.dist-info}/RECORD
RENAMED
|
@@ -1,12 +1,5 @@
|
|
|
1
1
|
changelog/.gitempty,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
2
|
-
changelog/
|
|
3
|
-
changelog/282.feature.2.rst,sha256=6OfMMb7pzbStUmHA4YldfprALhPR5Uc_qZrKh9VdenE,176
|
|
4
|
-
changelog/282.feature.rst,sha256=WHCYqxTGJlTrrjttCqRnZizIx2CEz34mLYisW36zJRo,201
|
|
5
|
-
changelog/284.feature.rst,sha256=Um3KupQuiTtX9tpLE4zLk3TW_wc3ZoCGxmFA-dGQJdU,120
|
|
6
|
-
changelog/285.feature.rst,sha256=FdvhAXvf1je9xLNSgZMl32TYomFibMKd1k6cf8ihkXI,142
|
|
7
|
-
changelog/285.misc.rst,sha256=7ie7VKD5yTMjbXs1V-jO2I2_vsCJ49WSK2g-B37NWFs,180
|
|
8
|
-
changelog/286.feature.rst,sha256=Dzqo-mB_plyAhUZGuOJ_e0fXNoRLmwgfXrjfi1hqGHg,189
|
|
9
|
-
changelog/287.misc.rst,sha256=mqc5FFqdtMFrZETadk6m_wkgtoH8kvJjnR-IGMYXDBY,52
|
|
2
|
+
changelog/272.feature.rst,sha256=hqbgG7F741E8KO0k_l9WNFNNi4A5ZYpdABnr34yhbmk,254
|
|
10
3
|
dkist_processing_common/__init__.py,sha256=GQ9EBnYhkOnt-qODclAoLS_g5YVhurxfg1tjVtI9rDI,320
|
|
11
4
|
dkist_processing_common/config.py,sha256=f511KVpK24sQO4dDr4L6PMj5dz0jmWgnx2Y-3DpV0cw,5991
|
|
12
5
|
dkist_processing_common/manual.py,sha256=bIVVyLsbXMh-g_2L3kGROL-1TtJe0_XviHsp7Br31x8,7023
|
|
@@ -29,8 +22,9 @@ dkist_processing_common/codecs/str.py,sha256=Xqt5k8IhLc95KiiNiFwB1JWcVVc6T8AfcLr
|
|
|
29
22
|
dkist_processing_common/fonts/Lato-Regular.ttf,sha256=1jbkaDIx-THtoiLViOlE0IK_0726AvkovuRhwPGFslE,656568
|
|
30
23
|
dkist_processing_common/fonts/__init__.py,sha256=hBvZRtkoGRPlNDWCK-ZePXdSIlThCcjwBDfYaamVgAw,101
|
|
31
24
|
dkist_processing_common/models/__init__.py,sha256=6LMqemdzVZ87fRrpAsbEnTtWZ02_Gu_oajsUlwGRH_Q,74
|
|
32
|
-
dkist_processing_common/models/constants.py,sha256
|
|
25
|
+
dkist_processing_common/models/constants.py,sha256=QHLDdMlYab6g5dEivBjkf7O3mW7aJZPSd1OC_EfuRDM,20490
|
|
33
26
|
dkist_processing_common/models/dkist_location.py,sha256=6Nk0wvv4R8ptlrV7BXon7abq4YLvmTdUmPsDN5G8nWc,971
|
|
27
|
+
dkist_processing_common/models/extras.py,sha256=RI4JWOinYl1rRyA4anNDj5nCSIrvwrix_dOod9bcyHA,1207
|
|
34
28
|
dkist_processing_common/models/fits_access.py,sha256=imKqL4-_g6gTR-IeIjZ6qkMhQX3JujdrKFrTd9gOXnw,5605
|
|
35
29
|
dkist_processing_common/models/flower_pot.py,sha256=bSmnfN1r5ASx_E9GtvFzdT7lciLCpMy80TeSLztakZk,12289
|
|
36
30
|
dkist_processing_common/models/fried_parameter.py,sha256=ro_H2Eo3I88lRf1wJjZfTc_XOjhgLt4whIQR_sjAFbM,1609
|
|
@@ -41,7 +35,7 @@ dkist_processing_common/models/message_queue_binding.py,sha256=Y4otwkkePrLRSjlry
|
|
|
41
35
|
dkist_processing_common/models/metric_code.py,sha256=WSLF9yqcVzk9L9u8WBhgtpUYUWYsG4ZFWFRFtezdUCM,848
|
|
42
36
|
dkist_processing_common/models/parameters.py,sha256=9An3SxUEBI-oYHjICQ_q-IIScTfpvVeAFH7jLzBzzWI,9649
|
|
43
37
|
dkist_processing_common/models/quality.py,sha256=TmDVbvPbfl5CIIs1ioD5guLUoEOFTfiJESvDjLTLl5s,3981
|
|
44
|
-
dkist_processing_common/models/tags.py,sha256=
|
|
38
|
+
dkist_processing_common/models/tags.py,sha256=emvQDsLwzfzqSvH2CwvOd85DRlb5pBCKgOgzjbEKxlY,12335
|
|
45
39
|
dkist_processing_common/models/task_name.py,sha256=uAl7qTK4Xx1nqPAhNAe5nAXqxwPwQzAq58YmoccX6xQ,567
|
|
46
40
|
dkist_processing_common/models/telemetry.py,sha256=XVcLNgHCZsP9L7oYiklyLUoqQtWt_xjEkuf70Kbudz4,839
|
|
47
41
|
dkist_processing_common/models/wavelength.py,sha256=4UhRVoNvCHZitXo5S1oRdewadbmGfmDK6wetMV06POA,967
|
|
@@ -50,7 +44,7 @@ dkist_processing_common/parsers/average_bud.py,sha256=mTlrBlo7Pe5WnNmui4wQC24-8Q
|
|
|
50
44
|
dkist_processing_common/parsers/cs_step.py,sha256=6SCSbCgVhEGxJIIrCBByWCmIad_vuje28jECFZ6qlZI,6454
|
|
51
45
|
dkist_processing_common/parsers/dsps_repeat.py,sha256=Jg6oI9-PtFQbQHbGul6_eiRzBKr0Z2HIGOitG0G5CD4,1642
|
|
52
46
|
dkist_processing_common/parsers/experiment_id_bud.py,sha256=LUehIqB56hmDwARph1itSUsPenFHScfrrRuZmcCi4xA,960
|
|
53
|
-
dkist_processing_common/parsers/id_bud.py,sha256=
|
|
47
|
+
dkist_processing_common/parsers/id_bud.py,sha256=QvXTOF9kjekNd_M2ZTQwPhhQwoj2m24dqp2OuCNyHB0,2479
|
|
54
48
|
dkist_processing_common/parsers/l0_fits_access.py,sha256=Ol3eo8yyNbGqbN2whhD2jBNoqhOrlwmH8DbMmCLtNk0,1033
|
|
55
49
|
dkist_processing_common/parsers/l1_fits_access.py,sha256=BWojvcFl_RmkrRWHe1WxDCsPeexervlRFPothuXAyoI,4410
|
|
56
50
|
dkist_processing_common/parsers/lookup_bud.py,sha256=IpDtwCsJRiOanDTpoaav3dZXe55WZpYWTnyxIiEWxvk,4047
|
|
@@ -67,15 +61,17 @@ dkist_processing_common/parsers/wavelength.py,sha256=P5C9mG8DAKK3GB3vWNRBI5l7pAW
|
|
|
67
61
|
dkist_processing_common/tasks/__init__.py,sha256=l23ctjNsKJbHbbqaZBMeOPaOtw0hmITEljI_JJ-CVsU,627
|
|
68
62
|
dkist_processing_common/tasks/assemble_movie.py,sha256=1ixDG-f4ODt0vywqVccG3aodLljVO5OGlvuMO9EEvcU,12767
|
|
69
63
|
dkist_processing_common/tasks/base.py,sha256=itAHCvzcodo-q8_AjpWoRaM86BlcjWDpCIiUP7uwmP0,13236
|
|
70
|
-
dkist_processing_common/tasks/l1_output_data.py,sha256=
|
|
71
|
-
dkist_processing_common/tasks/output_data_base.py,sha256=
|
|
64
|
+
dkist_processing_common/tasks/l1_output_data.py,sha256=GKmsjJ9FItQqY4ChreiGZVD_gZvCTJF6rzkVyDyaFP8,11499
|
|
65
|
+
dkist_processing_common/tasks/output_data_base.py,sha256=lJZ3olayA_nUWbxzozfbIbJFD3j2VpxPMt1CB9LB5IY,4763
|
|
72
66
|
dkist_processing_common/tasks/parse_l0_input_data.py,sha256=xcWmwSKwxEeManltCFrqVG224Vk-BYRE6g32VOK--rI,19241
|
|
73
67
|
dkist_processing_common/tasks/quality_metrics.py,sha256=cvGF6tJ8yAvxOvkeG3tWxYwL885BrFW5X3V7_MSzL-A,12481
|
|
74
68
|
dkist_processing_common/tasks/teardown.py,sha256=rwT9lWINVDF11-az_nx-Z5ykMTX_SJCchobpU6sErgk,2360
|
|
75
69
|
dkist_processing_common/tasks/transfer_input_data.py,sha256=4TJqlDjTc503QFvzSmMert99r9KHDwyd72r1kHAVhQA,5879
|
|
76
70
|
dkist_processing_common/tasks/trial_catalog.py,sha256=Yf-BKNCT_OHwJsxxZP8p2eRW04CcY0tw5_YIe1e9RQY,10535
|
|
77
71
|
dkist_processing_common/tasks/trial_output_data.py,sha256=pUdrNlAzuir4AUdfax5_MOplB-A9NrXErMJmAwtJmLA,6811
|
|
78
|
-
dkist_processing_common/tasks/
|
|
72
|
+
dkist_processing_common/tasks/write_extra.py,sha256=YObiq0xMy5Z1QtpahAKjfELlZ_hcTLtiuD1WKZREeg0,16366
|
|
73
|
+
dkist_processing_common/tasks/write_l1.py,sha256=upsaFN3S0r4MasrzjZ6i0gNF_bvQLf_oyYoRogB7odc,21163
|
|
74
|
+
dkist_processing_common/tasks/write_l1_base.py,sha256=iqejlYb3CSagUyi6U56nmgItzrwcQxLIDwgruxZho3A,2474
|
|
79
75
|
dkist_processing_common/tasks/mixin/__init__.py,sha256=-g-DQbU7m1bclJYuFe3Yh757V-35GIDTbstardKQ7nU,68
|
|
80
76
|
dkist_processing_common/tasks/mixin/globus.py,sha256=ugejtZ_MR5LesQYuXM1uICd_yWDE7cZZr0qnWCh75R8,6732
|
|
81
77
|
dkist_processing_common/tasks/mixin/interservice_bus.py,sha256=M6R922l7gJSmmU_vswUXxy-c5DWNrIRjQu9H9CSgGfU,1081
|
|
@@ -92,6 +88,7 @@ dkist_processing_common/tests/test_assemble_quality.py,sha256=-F22jMY6mPy65VZ1TZ
|
|
|
92
88
|
dkist_processing_common/tests/test_base.py,sha256=gsyBG2R6Ufx7CzbHeGMagUwM9yCfpN4gCSZ6-aH2q48,6643
|
|
93
89
|
dkist_processing_common/tests/test_codecs.py,sha256=WpF15UYklpNRgETI4EwXsgbNzxMcHlelfpprBbupC0I,23907
|
|
94
90
|
dkist_processing_common/tests/test_constants.py,sha256=I_KcJs7ScCn53GYhEO6qjWrrnfZuyC1IVYOy87Pjlg4,6565
|
|
91
|
+
dkist_processing_common/tests/test_construct_dataset_extras.py,sha256=LjFoLt1oChS298T8vmMK3xT0-5LSxRO3gUexxYSVY7w,9113
|
|
95
92
|
dkist_processing_common/tests/test_cs_step.py,sha256=RA0QD3D8eaL3YSOL_gIJ9wkngy14RQ2jbD-05KAziW4,2408
|
|
96
93
|
dkist_processing_common/tests/test_dkist_location.py,sha256=-_OoSw4SZDLFyIuOltHvM6PQjxm5hTiJQsiTGZ8Sadc,456
|
|
97
94
|
dkist_processing_common/tests/test_fits_access.py,sha256=a50B4IAAH5NH5zeudTqyy0b5uWKJwJuzQLUdK1LoOHM,12832
|
|
@@ -101,7 +98,7 @@ dkist_processing_common/tests/test_input_dataset.py,sha256=wnQbZxBYywG5CEXces2WW
|
|
|
101
98
|
dkist_processing_common/tests/test_interservice_bus.py,sha256=QrBeZ8dh497h6nxA8-aVUIGDcSj8y9DIXIk9I_HkXr0,3001
|
|
102
99
|
dkist_processing_common/tests/test_interservice_bus_mixin.py,sha256=IptJkW7Qeu2Y742NKXEgkok2VdS600keLgCD3Y9iw3A,4131
|
|
103
100
|
dkist_processing_common/tests/test_manual_processing.py,sha256=iHF7yQPlar9niYAGXtFv28Gw3Undlds38yMfszk4ccY,1037
|
|
104
|
-
dkist_processing_common/tests/test_output_data_base.py,sha256=
|
|
101
|
+
dkist_processing_common/tests/test_output_data_base.py,sha256=VoXW7g5yE2Lzb-HpyhVQYbPpiCZ7YYQAuShoAFVO5nE,3983
|
|
105
102
|
dkist_processing_common/tests/test_parameters.py,sha256=CUEUIGBPMCUXPll0G0UxFDbMXi8lmnjRwXBarGX1PAQ,14033
|
|
106
103
|
dkist_processing_common/tests/test_parse_l0_input_data.py,sha256=7yn1VGwC3S-0JOYIMflNHhaXEnPhwjJtewD2WxBgugM,13239
|
|
107
104
|
dkist_processing_common/tests/test_publish_catalog_messages.py,sha256=7WRsEwoLHGeaCmLTAW4tU_BlZw0e3hwx65uWSGzfuYE,2393
|
|
@@ -115,11 +112,11 @@ dkist_processing_common/tests/test_task_name.py,sha256=kqFr59XX2K87xzfTlClzDV4-J
|
|
|
115
112
|
dkist_processing_common/tests/test_task_parsing.py,sha256=2_OOmeZQWD17XAd_ECYmodJzD_iRIBKjCYdGh38BOx4,4421
|
|
116
113
|
dkist_processing_common/tests/test_teardown.py,sha256=DaliHSGsiQBZaFkf5wb3XBo6rHNPmx2bmQtVymYeBN4,5601
|
|
117
114
|
dkist_processing_common/tests/test_transfer_input_data.py,sha256=eyAAWXpTHQ8aew87-MncWpYBn4DAZrTSOL3LvlQfR5Q,12611
|
|
118
|
-
dkist_processing_common/tests/test_transfer_l1_output_data.py,sha256=
|
|
119
|
-
dkist_processing_common/tests/test_trial_catalog.py,sha256=
|
|
115
|
+
dkist_processing_common/tests/test_transfer_l1_output_data.py,sha256=OV2XMEkCbEuL9_i2S3P9Jfyf15tcBkAP3JNE8Jn_A9k,2137
|
|
116
|
+
dkist_processing_common/tests/test_trial_catalog.py,sha256=vnGLV859shiYEP6qOVoYga9vCBMRs6UkNnGtnaps26Q,9548
|
|
120
117
|
dkist_processing_common/tests/test_trial_output_data.py,sha256=fu3iGNV_FI8LOacezyt4HvXnxY3g1_UiBuRI63yz5Oo,11977
|
|
121
118
|
dkist_processing_common/tests/test_workflow_task_base.py,sha256=LTVusltNrsGUOvw9G323am4CXebgE4tJhP6gZCcS0CQ,10457
|
|
122
|
-
dkist_processing_common/tests/test_write_l1.py,sha256=
|
|
119
|
+
dkist_processing_common/tests/test_write_l1.py,sha256=qraecD9Vv6jsqg00XOSlM1IdK9l8i36VoHLoLj8ypiU,27918
|
|
123
120
|
docs/Makefile,sha256=qnlVz6PuBqE39NfHWuUnHhNEA-EFgT2-WJNNNy9ttfk,4598
|
|
124
121
|
docs/changelog.rst,sha256=S2jPASsWlQxSlAPqdvNrYvhk9k3FcFWNXFNDYXBSjl4,120
|
|
125
122
|
docs/conf.py,sha256=7W2iHKs3J5RhAz0JZafC_UnfMvcpZN7j4LLUmQtk2D0,1891
|
|
@@ -128,7 +125,7 @@ docs/landing_page.rst,sha256=aPAuXFhBx73lEZ59B6E6JXxkK0LlxzD0n-HXqHrfumQ,746
|
|
|
128
125
|
docs/make.bat,sha256=mBAhtURwhQ7yc95pqwJzlhqBSvRknr1aqZ5s8NKvdKs,4513
|
|
129
126
|
docs/requirements.txt,sha256=Kbl_X4c7RQZw035YTeNB63We6I7pvXFU4T0Uflp2yDY,29
|
|
130
127
|
licenses/LICENSE.rst,sha256=piZaQplkzOMmH1NXg6QIdo9wwo9pPCoHkvm2-DmH76E,1462
|
|
131
|
-
dkist_processing_common-12.
|
|
132
|
-
dkist_processing_common-12.
|
|
133
|
-
dkist_processing_common-12.
|
|
134
|
-
dkist_processing_common-12.
|
|
128
|
+
dkist_processing_common-12.2.0rc1.dist-info/METADATA,sha256=jYcIT0L5XRExn309Uky-DKXEKarMZJaHgfu0xF3EXl0,14214
|
|
129
|
+
dkist_processing_common-12.2.0rc1.dist-info/WHEEL,sha256=wUyA8OaulRlbfwMtmQsvNngGrxQHAvkKcvRmdizlJi0,92
|
|
130
|
+
dkist_processing_common-12.2.0rc1.dist-info/top_level.txt,sha256=LJhd1W-Vn90K8HnQDIE4r52YDpUjjMWDnllAWHBByW0,48
|
|
131
|
+
dkist_processing_common-12.2.0rc1.dist-info/RECORD,,
|
changelog/280.misc.rst
DELETED
|
@@ -1 +0,0 @@
|
|
|
1
|
-
Speed up the reading of INPUT files in Parse tasks by turning off image decompression and checksum checks.
|
changelog/282.feature.2.rst
DELETED
changelog/282.feature.rst
DELETED
changelog/284.feature.rst
DELETED
|
@@ -1 +0,0 @@
|
|
|
1
|
-
Speed up parsing of the `*CadenceBud`, `TaskDateBeginBud`, and `[Task]NearFloatBud` by basing these buds on `ListStem`.
|
changelog/285.feature.rst
DELETED
changelog/285.misc.rst
DELETED
changelog/286.feature.rst
DELETED
changelog/287.misc.rst
DELETED
|
@@ -1 +0,0 @@
|
|
|
1
|
-
Convert the TimeLookupBud to be a SetStem constant.
|
{dkist_processing_common-12.1.0rc1.dist-info → dkist_processing_common-12.2.0rc1.dist-info}/WHEEL
RENAMED
|
File without changes
|
|
File without changes
|