dkist-processing-cryonirsp 1.3.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of dkist-processing-cryonirsp might be problematic. Click here for more details.
- changelog/.gitempty +0 -0
- dkist_processing_cryonirsp/__init__.py +11 -0
- dkist_processing_cryonirsp/config.py +12 -0
- dkist_processing_cryonirsp/models/__init__.py +1 -0
- dkist_processing_cryonirsp/models/constants.py +248 -0
- dkist_processing_cryonirsp/models/exposure_conditions.py +26 -0
- dkist_processing_cryonirsp/models/parameters.py +296 -0
- dkist_processing_cryonirsp/models/tags.py +168 -0
- dkist_processing_cryonirsp/models/task_name.py +14 -0
- dkist_processing_cryonirsp/parsers/__init__.py +1 -0
- dkist_processing_cryonirsp/parsers/cryonirsp_l0_fits_access.py +111 -0
- dkist_processing_cryonirsp/parsers/cryonirsp_l1_fits_access.py +30 -0
- dkist_processing_cryonirsp/parsers/exposure_conditions.py +163 -0
- dkist_processing_cryonirsp/parsers/map_repeats.py +40 -0
- dkist_processing_cryonirsp/parsers/measurements.py +55 -0
- dkist_processing_cryonirsp/parsers/modstates.py +31 -0
- dkist_processing_cryonirsp/parsers/optical_density_filters.py +40 -0
- dkist_processing_cryonirsp/parsers/polarimetric_check.py +120 -0
- dkist_processing_cryonirsp/parsers/scan_step.py +412 -0
- dkist_processing_cryonirsp/parsers/time.py +80 -0
- dkist_processing_cryonirsp/parsers/wavelength.py +26 -0
- dkist_processing_cryonirsp/tasks/__init__.py +19 -0
- dkist_processing_cryonirsp/tasks/assemble_movie.py +202 -0
- dkist_processing_cryonirsp/tasks/bad_pixel_map.py +96 -0
- dkist_processing_cryonirsp/tasks/beam_boundaries_base.py +279 -0
- dkist_processing_cryonirsp/tasks/ci_beam_boundaries.py +55 -0
- dkist_processing_cryonirsp/tasks/ci_science.py +169 -0
- dkist_processing_cryonirsp/tasks/cryonirsp_base.py +67 -0
- dkist_processing_cryonirsp/tasks/dark.py +98 -0
- dkist_processing_cryonirsp/tasks/gain.py +251 -0
- dkist_processing_cryonirsp/tasks/instrument_polarization.py +447 -0
- dkist_processing_cryonirsp/tasks/l1_output_data.py +44 -0
- dkist_processing_cryonirsp/tasks/linearity_correction.py +582 -0
- dkist_processing_cryonirsp/tasks/make_movie_frames.py +302 -0
- dkist_processing_cryonirsp/tasks/mixin/__init__.py +1 -0
- dkist_processing_cryonirsp/tasks/mixin/beam_access.py +52 -0
- dkist_processing_cryonirsp/tasks/mixin/corrections.py +177 -0
- dkist_processing_cryonirsp/tasks/mixin/intermediate_frame.py +193 -0
- dkist_processing_cryonirsp/tasks/mixin/linearized_frame.py +309 -0
- dkist_processing_cryonirsp/tasks/mixin/shift_measurements.py +297 -0
- dkist_processing_cryonirsp/tasks/parse.py +281 -0
- dkist_processing_cryonirsp/tasks/quality_metrics.py +271 -0
- dkist_processing_cryonirsp/tasks/science_base.py +511 -0
- dkist_processing_cryonirsp/tasks/sp_beam_boundaries.py +270 -0
- dkist_processing_cryonirsp/tasks/sp_dispersion_axis_correction.py +484 -0
- dkist_processing_cryonirsp/tasks/sp_geometric.py +585 -0
- dkist_processing_cryonirsp/tasks/sp_science.py +299 -0
- dkist_processing_cryonirsp/tasks/sp_solar_gain.py +475 -0
- dkist_processing_cryonirsp/tasks/trial_output_data.py +61 -0
- dkist_processing_cryonirsp/tasks/write_l1.py +1033 -0
- dkist_processing_cryonirsp/tests/__init__.py +1 -0
- dkist_processing_cryonirsp/tests/conftest.py +456 -0
- dkist_processing_cryonirsp/tests/header_models.py +592 -0
- dkist_processing_cryonirsp/tests/local_trial_workflows/__init__.py +0 -0
- dkist_processing_cryonirsp/tests/local_trial_workflows/l0_cals_only.py +541 -0
- dkist_processing_cryonirsp/tests/local_trial_workflows/l0_to_l1.py +615 -0
- dkist_processing_cryonirsp/tests/local_trial_workflows/linearize_only.py +96 -0
- dkist_processing_cryonirsp/tests/local_trial_workflows/local_trial_helpers.py +592 -0
- dkist_processing_cryonirsp/tests/test_assemble_movie.py +144 -0
- dkist_processing_cryonirsp/tests/test_assemble_qualilty.py +517 -0
- dkist_processing_cryonirsp/tests/test_bad_pixel_maps.py +115 -0
- dkist_processing_cryonirsp/tests/test_ci_beam_boundaries.py +106 -0
- dkist_processing_cryonirsp/tests/test_ci_science.py +355 -0
- dkist_processing_cryonirsp/tests/test_corrections.py +126 -0
- dkist_processing_cryonirsp/tests/test_cryo_base.py +202 -0
- dkist_processing_cryonirsp/tests/test_cryo_constants.py +76 -0
- dkist_processing_cryonirsp/tests/test_dark.py +287 -0
- dkist_processing_cryonirsp/tests/test_gain.py +278 -0
- dkist_processing_cryonirsp/tests/test_instrument_polarization.py +531 -0
- dkist_processing_cryonirsp/tests/test_linearity_correction.py +245 -0
- dkist_processing_cryonirsp/tests/test_make_movie_frames.py +111 -0
- dkist_processing_cryonirsp/tests/test_parameters.py +266 -0
- dkist_processing_cryonirsp/tests/test_parse.py +1439 -0
- dkist_processing_cryonirsp/tests/test_quality.py +203 -0
- dkist_processing_cryonirsp/tests/test_sp_beam_boundaries.py +112 -0
- dkist_processing_cryonirsp/tests/test_sp_dispersion_axis_correction.py +155 -0
- dkist_processing_cryonirsp/tests/test_sp_geometric.py +319 -0
- dkist_processing_cryonirsp/tests/test_sp_make_movie_frames.py +121 -0
- dkist_processing_cryonirsp/tests/test_sp_science.py +483 -0
- dkist_processing_cryonirsp/tests/test_sp_solar.py +198 -0
- dkist_processing_cryonirsp/tests/test_trial_create_quality_report.py +79 -0
- dkist_processing_cryonirsp/tests/test_trial_output_data.py +251 -0
- dkist_processing_cryonirsp/tests/test_workflows.py +9 -0
- dkist_processing_cryonirsp/tests/test_write_l1.py +436 -0
- dkist_processing_cryonirsp/workflows/__init__.py +2 -0
- dkist_processing_cryonirsp/workflows/ci_l0_processing.py +77 -0
- dkist_processing_cryonirsp/workflows/sp_l0_processing.py +84 -0
- dkist_processing_cryonirsp/workflows/trial_workflows.py +190 -0
- dkist_processing_cryonirsp-1.3.4.dist-info/METADATA +194 -0
- dkist_processing_cryonirsp-1.3.4.dist-info/RECORD +111 -0
- dkist_processing_cryonirsp-1.3.4.dist-info/WHEEL +5 -0
- dkist_processing_cryonirsp-1.3.4.dist-info/top_level.txt +4 -0
- docs/Makefile +134 -0
- docs/bad_pixel_calibration.rst +47 -0
- docs/beam_angle_calculation.rst +53 -0
- docs/beam_boundary_computation.rst +88 -0
- docs/changelog.rst +7 -0
- docs/ci_science_calibration.rst +33 -0
- docs/conf.py +52 -0
- docs/index.rst +21 -0
- docs/l0_to_l1_cryonirsp_ci-full-trial.rst +10 -0
- docs/l0_to_l1_cryonirsp_ci.rst +10 -0
- docs/l0_to_l1_cryonirsp_sp-full-trial.rst +10 -0
- docs/l0_to_l1_cryonirsp_sp.rst +10 -0
- docs/linearization.rst +43 -0
- docs/make.bat +170 -0
- docs/requirements.txt +1 -0
- docs/requirements_table.rst +8 -0
- docs/scientific_changelog.rst +10 -0
- docs/sp_science_calibration.rst +59 -0
- licenses/LICENSE.rst +11 -0
|
@@ -0,0 +1,582 @@
|
|
|
1
|
+
"""CryoNIRSP Linearity Correction Task."""
|
|
2
|
+
from dataclasses import dataclass
|
|
3
|
+
from typing import Generator
|
|
4
|
+
|
|
5
|
+
import numpy as np
|
|
6
|
+
from astropy.io import fits
|
|
7
|
+
from dkist_processing_common.codecs.fits import fits_access_decoder
|
|
8
|
+
from dkist_processing_common.codecs.fits import fits_array_encoder
|
|
9
|
+
from dkist_processing_common.codecs.fits import fits_hdulist_encoder
|
|
10
|
+
from dkist_service_configuration.logging import logger
|
|
11
|
+
from numba import njit
|
|
12
|
+
from numba import prange
|
|
13
|
+
|
|
14
|
+
from dkist_processing_cryonirsp.models.tags import CryonirspTag
|
|
15
|
+
from dkist_processing_cryonirsp.parsers.cryonirsp_l0_fits_access import CryonirspRampFitsAccess
|
|
16
|
+
from dkist_processing_cryonirsp.tasks.cryonirsp_base import CryonirspTaskBase
|
|
17
|
+
|
|
18
|
+
GB_TO_BYTES: int = 1_000_000_000
|
|
19
|
+
|
|
20
|
+
__all__ = ["LinearityCorrection"]
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
@dataclass
|
|
24
|
+
class _RampSet:
|
|
25
|
+
current_ramp_set_num: int
|
|
26
|
+
time_obs: str
|
|
27
|
+
num_frames_in_ramp: int
|
|
28
|
+
exposure_times_ms: np.ndarray
|
|
29
|
+
frame_shape: tuple[int]
|
|
30
|
+
last_frame_name: str
|
|
31
|
+
last_frame_fits_access: CryonirspRampFitsAccess
|
|
32
|
+
frames_to_process: np.ndarray
|
|
33
|
+
index_offset_to_first_frame: int
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
class LinearityCorrection(CryonirspTaskBase):
|
|
37
|
+
"""Task class for performing linearity correction on all input frames, regardless of task type."""
|
|
38
|
+
|
|
39
|
+
record_provenance = True
|
|
40
|
+
|
|
41
|
+
def run(self):
|
|
42
|
+
"""
|
|
43
|
+
Run method for this task.
|
|
44
|
+
|
|
45
|
+
Steps to be performed:
|
|
46
|
+
- Iterate through frames by ramp set (identified by date-obs)
|
|
47
|
+
- Identify the frames in the ramp set and populate the ramp set data structure
|
|
48
|
+
- Perform linearity correction on the ramp set, minimizing the memory footprint based on a maximum memory limit for the ramp set
|
|
49
|
+
- Collate tags for linearity corrected frame(s)
|
|
50
|
+
- Write linearity corrected frame with updated tags
|
|
51
|
+
|
|
52
|
+
Returns
|
|
53
|
+
-------
|
|
54
|
+
None
|
|
55
|
+
"""
|
|
56
|
+
num_ramp_sets = len(self.constants.time_obs_list)
|
|
57
|
+
for ramp_set in self.identify_ramp_sets():
|
|
58
|
+
time_obs = ramp_set.time_obs
|
|
59
|
+
ramp_set_num = ramp_set.current_ramp_set_num
|
|
60
|
+
logger.info(
|
|
61
|
+
f"Processing frames from {time_obs}: ramp set {ramp_set_num} of {num_ramp_sets}"
|
|
62
|
+
)
|
|
63
|
+
output_array = self.reduce_ramp_set(
|
|
64
|
+
ramp_set=ramp_set,
|
|
65
|
+
mode="LookUpTable",
|
|
66
|
+
camera_readout_mode=self.constants.camera_readout_mode,
|
|
67
|
+
lin_curve=self.parameters.linearization_polyfit_coeffs,
|
|
68
|
+
thresholds=self.parameters.linearization_thresholds,
|
|
69
|
+
)
|
|
70
|
+
# Normalize by the exposure time and correct for the Optical Density filter
|
|
71
|
+
exposure_corrected_output_array = self.apply_exposure_corrections(
|
|
72
|
+
output_array, ramp_set
|
|
73
|
+
)
|
|
74
|
+
# Set the tags for the linearized output frame
|
|
75
|
+
tags = [
|
|
76
|
+
CryonirspTag.linearized(),
|
|
77
|
+
CryonirspTag.frame(),
|
|
78
|
+
CryonirspTag.time_obs(time_obs),
|
|
79
|
+
]
|
|
80
|
+
# The last frame in the ramp is used for the header
|
|
81
|
+
self.write(
|
|
82
|
+
data=exposure_corrected_output_array,
|
|
83
|
+
header=ramp_set.last_frame_fits_access.header,
|
|
84
|
+
tags=tags,
|
|
85
|
+
encoder=fits_array_encoder,
|
|
86
|
+
)
|
|
87
|
+
|
|
88
|
+
def identify_ramp_sets(self) -> Generator[_RampSet, None, None]:
|
|
89
|
+
"""
|
|
90
|
+
Identify all the ramp sets present in the input data.
|
|
91
|
+
|
|
92
|
+
Returns
|
|
93
|
+
-------
|
|
94
|
+
A generator of _RampSet objects
|
|
95
|
+
|
|
96
|
+
A ramp set consists of all the non-destructive readouts (NDRs) that form a single
|
|
97
|
+
exposure for the Cryonirsp cameras. All the frames from a single ramp must be processed
|
|
98
|
+
together. A ramp is identified as all the files having the same DATE-OBS value. Although
|
|
99
|
+
a ramp number header key exists, this value is not a unique identifier for a ramp set when
|
|
100
|
+
frames from multiple subtasks are combined into an input dataset in a single scratch dir.
|
|
101
|
+
|
|
102
|
+
If a ramp set contains only a single frame, it is discarded with a log note.
|
|
103
|
+
|
|
104
|
+
Returns
|
|
105
|
+
-------
|
|
106
|
+
Generator which yields _RampSet instances
|
|
107
|
+
"""
|
|
108
|
+
for ramp_set_num, time_obs in enumerate(self.constants.time_obs_list):
|
|
109
|
+
input_objects = list(
|
|
110
|
+
self.read(
|
|
111
|
+
tags=[
|
|
112
|
+
CryonirspTag.input(),
|
|
113
|
+
CryonirspTag.frame(),
|
|
114
|
+
CryonirspTag.time_obs(time_obs),
|
|
115
|
+
],
|
|
116
|
+
decoder=fits_access_decoder,
|
|
117
|
+
fits_access_class=CryonirspRampFitsAccess,
|
|
118
|
+
)
|
|
119
|
+
)
|
|
120
|
+
|
|
121
|
+
if not self.is_ramp_valid(input_objects):
|
|
122
|
+
continue
|
|
123
|
+
|
|
124
|
+
ramp_set = self.populate_ramp_set(time_obs, ramp_set_num)
|
|
125
|
+
yield ramp_set
|
|
126
|
+
|
|
127
|
+
def is_ramp_valid(self, ramp_object_list: list[CryonirspRampFitsAccess]) -> bool:
|
|
128
|
+
"""
|
|
129
|
+
Check if a given ramp is valid.
|
|
130
|
+
|
|
131
|
+
Current validity checks are:
|
|
132
|
+
|
|
133
|
+
1. All frames in the ramp have the same value for NUM_FRAMES_IN_RAMP
|
|
134
|
+
2. The value of NUM_FRAMES_IN_RAMP equals the length of actual frames found
|
|
135
|
+
|
|
136
|
+
If a ramp is not valid then warnings are logged and `False` is returned.
|
|
137
|
+
"""
|
|
138
|
+
frames_in_ramp_set = {o.num_frames_in_ramp for o in ramp_object_list}
|
|
139
|
+
task_type = ramp_object_list[0].ip_task_type
|
|
140
|
+
|
|
141
|
+
if len(frames_in_ramp_set) > 1:
|
|
142
|
+
logger.info(
|
|
143
|
+
f"Not all frames have the same FRAMES_IN_RAMP value. Set is {frames_in_ramp_set}. Ramp is task {task_type}. Skipping ramp."
|
|
144
|
+
)
|
|
145
|
+
return False
|
|
146
|
+
|
|
147
|
+
num_frames_in_ramp = frames_in_ramp_set.pop()
|
|
148
|
+
num_ramp_objects = len(ramp_object_list)
|
|
149
|
+
if num_ramp_objects != num_frames_in_ramp:
|
|
150
|
+
logger.info(
|
|
151
|
+
f"Missing some ramp frames. Expected {num_frames_in_ramp} from header value, but only have {num_ramp_objects}. Ramp is task {task_type}. Skipping ramp."
|
|
152
|
+
)
|
|
153
|
+
return False
|
|
154
|
+
|
|
155
|
+
return True
|
|
156
|
+
|
|
157
|
+
@staticmethod
|
|
158
|
+
def tag_list_for_single_ramp_frame(time_obs: str, frame_num: int) -> list[CryonirspTag]:
|
|
159
|
+
"""Return the tag list required to identify a single ramp frame."""
|
|
160
|
+
tags = [
|
|
161
|
+
CryonirspTag.input(),
|
|
162
|
+
CryonirspTag.frame(),
|
|
163
|
+
CryonirspTag.time_obs(time_obs),
|
|
164
|
+
CryonirspTag.curr_frame_in_ramp(frame_num),
|
|
165
|
+
]
|
|
166
|
+
return tags
|
|
167
|
+
|
|
168
|
+
def read_single_ramp_frame(self, time_obs: str, frame_num: int) -> CryonirspRampFitsAccess:
|
|
169
|
+
"""
|
|
170
|
+
Read a single file from a single ramp set based on the observe time and frame number.
|
|
171
|
+
|
|
172
|
+
Parameters
|
|
173
|
+
----------
|
|
174
|
+
time_obs
|
|
175
|
+
The DATE-OBS header value identifying the desired ramp set
|
|
176
|
+
frame_num
|
|
177
|
+
The frame number on the ramp to be accessed. This number is 1-based and is used to
|
|
178
|
+
generate the curr frame in ramp tag that identifies the desired frame
|
|
179
|
+
|
|
180
|
+
Returns
|
|
181
|
+
-------
|
|
182
|
+
A CryonirspRampFitsAccess object containing the desired frame
|
|
183
|
+
|
|
184
|
+
"""
|
|
185
|
+
tags = self.tag_list_for_single_ramp_frame(time_obs, frame_num)
|
|
186
|
+
fits_obj_list = list(
|
|
187
|
+
self.read(
|
|
188
|
+
tags=tags,
|
|
189
|
+
decoder=fits_access_decoder,
|
|
190
|
+
fits_access_class=CryonirspRampFitsAccess,
|
|
191
|
+
)
|
|
192
|
+
)
|
|
193
|
+
if len(fits_obj_list) != 1:
|
|
194
|
+
raise RuntimeError(f"Multiple files or no files for {tags =}")
|
|
195
|
+
fits_obj = fits_obj_list[0]
|
|
196
|
+
return fits_obj
|
|
197
|
+
|
|
198
|
+
def get_ordered_exposure_time_list(self, time_obs: str, num_frames_in_ramp: int) -> np.ndarray:
|
|
199
|
+
"""
|
|
200
|
+
Return a list of exposure times for this ramp, ordered by frame in ramp.
|
|
201
|
+
|
|
202
|
+
Parameters
|
|
203
|
+
----------
|
|
204
|
+
time_obs
|
|
205
|
+
The DATE-OBS value identifying the ramp
|
|
206
|
+
num_frames_in_ramp
|
|
207
|
+
The number of frames in the ramp
|
|
208
|
+
|
|
209
|
+
Returns
|
|
210
|
+
-------
|
|
211
|
+
np.ndarray of the exposure times for the NDRs in the ramp set.
|
|
212
|
+
|
|
213
|
+
This method iterates through all the frames in the ramp to construct the list. While this could
|
|
214
|
+
be incorporated into other methods that iterate through a ramp set, it is kept separate for clarity.
|
|
215
|
+
We read one frame at a time to not have all the frames in memory simultaneously.
|
|
216
|
+
"""
|
|
217
|
+
exp_time_list = []
|
|
218
|
+
for frame_num in range(1, num_frames_in_ramp + 1):
|
|
219
|
+
fits_obj = self.read_single_ramp_frame(time_obs, frame_num)
|
|
220
|
+
exp_time_list.append(fits_obj.fpa_exposure_time_ms)
|
|
221
|
+
return np.array(exp_time_list, dtype=np.float32)
|
|
222
|
+
|
|
223
|
+
def populate_ramp_set(self, time_obs: str, idx: int) -> _RampSet | None:
|
|
224
|
+
"""
|
|
225
|
+
Populate a _RampSet dataclass for the ramp identified by time_obs.
|
|
226
|
+
|
|
227
|
+
Parameters
|
|
228
|
+
----------
|
|
229
|
+
time_obs
|
|
230
|
+
The DATE-OBS value identifying the ramp
|
|
231
|
+
idx
|
|
232
|
+
The index number representing this ramp set out of the total number of ramp sets (zero-based)
|
|
233
|
+
|
|
234
|
+
Returns
|
|
235
|
+
-------
|
|
236
|
+
A populated _RampSet object representing the specified ramp in the input data set
|
|
237
|
+
|
|
238
|
+
The last frame in the set is read to access the shape of the data frame.
|
|
239
|
+
"""
|
|
240
|
+
actual_num_frames_in_ramp = self.count(CryonirspTag.time_obs(time_obs))
|
|
241
|
+
exp_times = self.get_ordered_exposure_time_list(time_obs, actual_num_frames_in_ramp)
|
|
242
|
+
last_frame = self.read_single_ramp_frame(time_obs, actual_num_frames_in_ramp)
|
|
243
|
+
ramp_set_num = idx + 1
|
|
244
|
+
# The default list of curr_frame_in_ramp tag numbers to use, which may be altered later on
|
|
245
|
+
frames_to_process = np.array(range(1, actual_num_frames_in_ramp + 1), dtype=int)
|
|
246
|
+
ramp_set = _RampSet(
|
|
247
|
+
current_ramp_set_num=ramp_set_num,
|
|
248
|
+
time_obs=time_obs,
|
|
249
|
+
num_frames_in_ramp=actual_num_frames_in_ramp,
|
|
250
|
+
exposure_times_ms=exp_times,
|
|
251
|
+
frame_shape=last_frame.data.shape,
|
|
252
|
+
last_frame_name=last_frame.name,
|
|
253
|
+
last_frame_fits_access=CryonirspRampFitsAccess.from_header(last_frame.header),
|
|
254
|
+
frames_to_process=frames_to_process,
|
|
255
|
+
# initial offset from zero-based array index to 1-based frame in ramp number
|
|
256
|
+
index_offset_to_first_frame=1,
|
|
257
|
+
)
|
|
258
|
+
return ramp_set
|
|
259
|
+
|
|
260
|
+
def apply_exposure_corrections(self, input_array: np.ndarray, ramp_set: _RampSet) -> np.ndarray:
|
|
261
|
+
"""
|
|
262
|
+
Normalize the array by converting to counts per second and correcting for Optical Density filter attenuation.
|
|
263
|
+
|
|
264
|
+
Parameters
|
|
265
|
+
----------
|
|
266
|
+
input_array
|
|
267
|
+
The linearized array top be normalized
|
|
268
|
+
ramp_set
|
|
269
|
+
The _RampSet object associated with the linearized array
|
|
270
|
+
|
|
271
|
+
Returns
|
|
272
|
+
-------
|
|
273
|
+
The normalized output array
|
|
274
|
+
|
|
275
|
+
"""
|
|
276
|
+
# Normalize the array by the final ramp exposure time converted to seconds
|
|
277
|
+
# This makes the output units counts per sec
|
|
278
|
+
exposure_normalized_array = input_array / (ramp_set.exposure_times_ms[-1] / 1000.0)
|
|
279
|
+
# Correct the counts for the Optical Density filter used
|
|
280
|
+
log_od_filter_attenuation = self.parameters.linearization_filter_attenuation_dict[
|
|
281
|
+
ramp_set.last_frame_fits_access.filter_name
|
|
282
|
+
]
|
|
283
|
+
od_filter_attenuation = 10**log_od_filter_attenuation
|
|
284
|
+
return exposure_normalized_array / od_filter_attenuation
|
|
285
|
+
|
|
286
|
+
def reduce_ramp_set(
|
|
287
|
+
self,
|
|
288
|
+
ramp_set: _RampSet,
|
|
289
|
+
mode: str = None,
|
|
290
|
+
camera_readout_mode: str = None,
|
|
291
|
+
lin_curve: np.ndarray = None,
|
|
292
|
+
thresholds: np.ndarray = None,
|
|
293
|
+
) -> np.ndarray:
|
|
294
|
+
"""
|
|
295
|
+
Process a single ramp from a set of input frames.
|
|
296
|
+
|
|
297
|
+
Parameters
|
|
298
|
+
----------
|
|
299
|
+
ramp_set
|
|
300
|
+
The _RampSet data structure for the current ramp
|
|
301
|
+
|
|
302
|
+
mode
|
|
303
|
+
'LookUpTable','FastCDS','FitUpTheRamp' (ignored if data is line by line)
|
|
304
|
+
|
|
305
|
+
camera_readout_mode
|
|
306
|
+
‘FastUpTheRamp, ‘SlowUpTheRamp’, or 'LineByLine’
|
|
307
|
+
|
|
308
|
+
lin_curve
|
|
309
|
+
The lincurve array is the set of coefficients for a 3rd order polynomial which represents the overall
|
|
310
|
+
non-linear response of the detector pixels to exposure time. The cubic is evaluated for each measured
|
|
311
|
+
pixel value and then used to correct the measured pixel value by dividing out the non-linear
|
|
312
|
+
response.
|
|
313
|
+
|
|
314
|
+
thresholds
|
|
315
|
+
The threshold array represents the flux value for each pixel above which the measured flux is
|
|
316
|
+
inaccurate and starts to decrease with increasing exposure time. This is used in the linearization
|
|
317
|
+
algorithm to mask off values in a ramp that exceed the threshold and use only the values below
|
|
318
|
+
the threshold to estimate the linear flux per non-destructive readout.
|
|
319
|
+
|
|
320
|
+
Returns
|
|
321
|
+
-------
|
|
322
|
+
processed array
|
|
323
|
+
"""
|
|
324
|
+
# NB: The threshold table is originally constructed for the full sensor size (2k x 2k)
|
|
325
|
+
# Extract the portion of the thresholds that corresponds to the ROI used in the camera.
|
|
326
|
+
roi_1_origin_x = self.constants.roi_1_origin_x
|
|
327
|
+
roi_1_origin_y = self.constants.roi_1_origin_y
|
|
328
|
+
roi_1_size_x = self.constants.roi_1_size_x
|
|
329
|
+
roi_1_size_y = self.constants.roi_1_size_y
|
|
330
|
+
thresh_roi = thresholds[
|
|
331
|
+
roi_1_origin_y : (roi_1_origin_y + roi_1_size_y),
|
|
332
|
+
roi_1_origin_x : (roi_1_origin_x + roi_1_size_x),
|
|
333
|
+
]
|
|
334
|
+
|
|
335
|
+
if mode == "LookUpTable" and camera_readout_mode == "FastUpTheRamp":
|
|
336
|
+
return self.reduce_ramp_set_for_lookup_table_and_fast_up_the_ramp(
|
|
337
|
+
ramp_set=ramp_set,
|
|
338
|
+
lin_curve=lin_curve,
|
|
339
|
+
thresholds=thresh_roi,
|
|
340
|
+
)
|
|
341
|
+
raise ValueError(
|
|
342
|
+
f"Linearization mode {mode} and camera readout mode {camera_readout_mode} is currently not supported."
|
|
343
|
+
)
|
|
344
|
+
|
|
345
|
+
def reduce_ramp_set_for_lookup_table_and_fast_up_the_ramp(
|
|
346
|
+
self,
|
|
347
|
+
ramp_set: _RampSet,
|
|
348
|
+
lin_curve: np.ndarray,
|
|
349
|
+
thresholds: np.ndarray,
|
|
350
|
+
) -> np.ndarray:
|
|
351
|
+
"""Process a single ramp from a set of input frames whose mode is 'LookUpTable' and camera readout mode is 'FastUpTheRamp'."""
|
|
352
|
+
# In this mode we toss the first frame in the ramp
|
|
353
|
+
ramp_set.num_frames_in_ramp -= 1
|
|
354
|
+
ramp_set.frames_to_process = ramp_set.frames_to_process[1:]
|
|
355
|
+
ramp_set.exposure_times_ms = ramp_set.exposure_times_ms[1:]
|
|
356
|
+
ramp_set.index_offset_to_first_frame += 1
|
|
357
|
+
processed_frame = self.linearize_fast_up_the_ramp_with_lookup_table(
|
|
358
|
+
ramp_set=ramp_set,
|
|
359
|
+
lin_curve=lin_curve,
|
|
360
|
+
thresholds=thresholds,
|
|
361
|
+
)
|
|
362
|
+
return processed_frame
|
|
363
|
+
|
|
364
|
+
def linearize_fast_up_the_ramp_with_lookup_table(
|
|
365
|
+
self,
|
|
366
|
+
ramp_set: _RampSet,
|
|
367
|
+
lin_curve: np.ndarray,
|
|
368
|
+
thresholds: np.ndarray,
|
|
369
|
+
) -> np.ndarray:
|
|
370
|
+
"""
|
|
371
|
+
Perform linearization on a set of ramp frames.
|
|
372
|
+
|
|
373
|
+
Parameters
|
|
374
|
+
----------
|
|
375
|
+
ramp_set
|
|
376
|
+
The _RampSet object for the ramp set to be linearized
|
|
377
|
+
lin_curve
|
|
378
|
+
The linearity coefficient array used in the algorithm
|
|
379
|
+
thresholds
|
|
380
|
+
The threshold array used in the algorithm
|
|
381
|
+
|
|
382
|
+
Returns
|
|
383
|
+
-------
|
|
384
|
+
The linearized array for this ramp set
|
|
385
|
+
|
|
386
|
+
The algorithm proceeds as follows:
|
|
387
|
+
1. The number of chunks required to process the full ramp chunk_stack is computed
|
|
388
|
+
2. Iterate over the number of chunks and do the following:
|
|
389
|
+
a. Compute the size of the current chunk (the last chunk may be smaller than the others)
|
|
390
|
+
b. Compute the slice object representing the portion of the frames to be extracted into the chunk
|
|
391
|
+
c. Load the frame slices into the chunk chunk_stack
|
|
392
|
+
d. Linearize the chunk chunk_stack
|
|
393
|
+
e. Store the linearized chunk in the proper location in the final linearized array
|
|
394
|
+
3. Return the linearized ramp, reshaping it to the original frame shape
|
|
395
|
+
"""
|
|
396
|
+
thresholds_flattened = thresholds.flatten()
|
|
397
|
+
frame_shape = ramp_set.frame_shape
|
|
398
|
+
linearized_frame = np.zeros(np.prod(frame_shape), dtype=np.float32)
|
|
399
|
+
num_frame_size_elements = int(np.prod(frame_shape))
|
|
400
|
+
chunk_size_nelem = self.compute_linear_chunk_size(
|
|
401
|
+
num_frame_size_elements, ramp_set.num_frames_in_ramp
|
|
402
|
+
)
|
|
403
|
+
# num_chunks = num full chunks + a single partial chunk, if needed
|
|
404
|
+
num_chunks = num_frame_size_elements // chunk_size_nelem + int(
|
|
405
|
+
(num_frame_size_elements % chunk_size_nelem) > 0
|
|
406
|
+
)
|
|
407
|
+
logger.info(
|
|
408
|
+
f"{num_chunks = }, {chunk_size_nelem = }, in bytes = {chunk_size_nelem * ramp_set.num_frames_in_ramp * np.dtype(np.float32).itemsize}"
|
|
409
|
+
)
|
|
410
|
+
|
|
411
|
+
# Iterate over all the chunks
|
|
412
|
+
elements_remaining = int(num_frame_size_elements)
|
|
413
|
+
offset = 0
|
|
414
|
+
for chunk in range(1, num_chunks + 1):
|
|
415
|
+
logger.info(f"Processing chunk {chunk} of {num_chunks}")
|
|
416
|
+
current_chunk_size_nelem = min(chunk_size_nelem, elements_remaining)
|
|
417
|
+
current_slice = slice(offset, offset + current_chunk_size_nelem)
|
|
418
|
+
chunk_stack = self.load_chunk_stack(
|
|
419
|
+
ramp_set, current_chunk_size_nelem, ramp_set.num_frames_in_ramp, current_slice
|
|
420
|
+
)
|
|
421
|
+
linearized_frame[current_slice] = self.linearize_chunk(
|
|
422
|
+
chunk_stack,
|
|
423
|
+
lin_curve,
|
|
424
|
+
thresholds_flattened[current_slice],
|
|
425
|
+
ramp_set.exposure_times_ms,
|
|
426
|
+
)
|
|
427
|
+
offset += chunk_size_nelem
|
|
428
|
+
elements_remaining -= chunk_size_nelem
|
|
429
|
+
|
|
430
|
+
return linearized_frame.reshape(frame_shape)
|
|
431
|
+
|
|
432
|
+
def load_chunk_stack(
|
|
433
|
+
self,
|
|
434
|
+
ramp_set: _RampSet,
|
|
435
|
+
current_chunk_size: int,
|
|
436
|
+
trimmed_frames_in_ramp: int,
|
|
437
|
+
current_slice: slice,
|
|
438
|
+
) -> np.ndarray:
|
|
439
|
+
"""
|
|
440
|
+
Load a chunk's worth of the ramp chunk_stack into an array and return it.
|
|
441
|
+
|
|
442
|
+
Parameters
|
|
443
|
+
----------
|
|
444
|
+
ramp_set
|
|
445
|
+
The ramp_set from which to load the chunk chunk_stack
|
|
446
|
+
current_chunk_size
|
|
447
|
+
The size in linear elements of the chunk chunk_stack (the number of pixel stacks in the chunk)
|
|
448
|
+
trimmed_frames_in_ramp
|
|
449
|
+
The final number of frames in the ramp set
|
|
450
|
+
current_slice
|
|
451
|
+
The slice of the frames to load into the chunk_stack
|
|
452
|
+
|
|
453
|
+
Returns
|
|
454
|
+
-------
|
|
455
|
+
The chunk chunk_stack for the specified ramp set and slice
|
|
456
|
+
|
|
457
|
+
Notes
|
|
458
|
+
-----
|
|
459
|
+
The files are read one at a time to minimize memory use. The frame_num loop variable is used
|
|
460
|
+
to identify the desired frame to read. It is one-based and is used to generate the curr_frame_in_ramp
|
|
461
|
+
tag. We tossed the first frame of the ramp, so we must start with 2. Conversely, the offset into the
|
|
462
|
+
array is zero-based and is 2 less than the frame number.
|
|
463
|
+
"""
|
|
464
|
+
chunk_stack = np.zeros((current_chunk_size, trimmed_frames_in_ramp), np.float32)
|
|
465
|
+
for frame_num in ramp_set.frames_to_process:
|
|
466
|
+
frame = self.read_single_ramp_frame(ramp_set.time_obs, frame_num).data
|
|
467
|
+
frame_pos_in_stack = frame_num - ramp_set.index_offset_to_first_frame
|
|
468
|
+
chunk_stack[:current_chunk_size, frame_pos_in_stack] = frame.flatten()[current_slice]
|
|
469
|
+
return chunk_stack
|
|
470
|
+
|
|
471
|
+
def compute_linear_chunk_size(self, frame_size_nelem: int, num_frames_in_ramp: int) -> int:
|
|
472
|
+
"""
|
|
473
|
+
Compute the number of pixel stacks that constitute a 'chunk'.
|
|
474
|
+
|
|
475
|
+
Parameters
|
|
476
|
+
----------
|
|
477
|
+
frame_size_nelem
|
|
478
|
+
The size of a data frame expressed as the total number of elements
|
|
479
|
+
num_frames_in_ramp
|
|
480
|
+
The number of frames in a ramp set. If any frames are to be tossed initially,
|
|
481
|
+
this number represents the final frame count after any discards.
|
|
482
|
+
|
|
483
|
+
Returns
|
|
484
|
+
-------
|
|
485
|
+
The number of pixel stacks in a chunk
|
|
486
|
+
|
|
487
|
+
A chunk is the largest linear stack of frame pixels that can be handled by the linearization
|
|
488
|
+
algorithm in one calculation without exceeding the task worker memory limitations. The algorithm
|
|
489
|
+
must hold essentially twice the size of the linear stack in memory. We assume we can safely
|
|
490
|
+
use 80% of the available memory for this processing.
|
|
491
|
+
|
|
492
|
+
The variables listed below are either in number of bytes or number of elements,
|
|
493
|
+
as indicated by their suffixes
|
|
494
|
+
"""
|
|
495
|
+
ramp_size_in_bytes = frame_size_nelem * np.dtype(np.float32).itemsize * num_frames_in_ramp
|
|
496
|
+
available_memory_in_gb = self.parameters.linearization_max_memory_gb
|
|
497
|
+
max_chunk_size_in_bytes = round(0.8 * available_memory_in_gb * GB_TO_BYTES // 2)
|
|
498
|
+
chunk_size_nelem = round(
|
|
499
|
+
min(max_chunk_size_in_bytes, ramp_size_in_bytes)
|
|
500
|
+
// np.dtype(np.float32).itemsize
|
|
501
|
+
// num_frames_in_ramp
|
|
502
|
+
)
|
|
503
|
+
return chunk_size_nelem
|
|
504
|
+
|
|
505
|
+
def linearize_chunk(
|
|
506
|
+
self, chunk_stack: np.ndarray, linc: np.ndarray, thresh: np.ndarray, exptimes: np.ndarray
|
|
507
|
+
) -> np.ndarray:
|
|
508
|
+
"""
|
|
509
|
+
Linearize a portion (chunk) of the entire ramp stack.
|
|
510
|
+
|
|
511
|
+
Parameters
|
|
512
|
+
----------
|
|
513
|
+
chunk_stack
|
|
514
|
+
The portion (chunk) of the overall ramp stack to be linearized
|
|
515
|
+
linc
|
|
516
|
+
The linearity coefficient array used in the algorithm
|
|
517
|
+
thresh
|
|
518
|
+
The threshold array used ion the algorithm
|
|
519
|
+
exptimes
|
|
520
|
+
The list of exposure times for the frames in the stack
|
|
521
|
+
|
|
522
|
+
Returns
|
|
523
|
+
-------
|
|
524
|
+
An array containing a linearized slice of the full ramp stack
|
|
525
|
+
|
|
526
|
+
"""
|
|
527
|
+
raw_data = self.lin_correct(chunk_stack, linc)
|
|
528
|
+
slopes = self.get_slopes(exptimes, raw_data, thresh)
|
|
529
|
+
# Scale the slopes by the exposure time to convert to counts
|
|
530
|
+
processed_frame = slopes * np.nanmax(exptimes)
|
|
531
|
+
return processed_frame
|
|
532
|
+
|
|
533
|
+
# The methods below are derived versions of the same codes in Tom Schad's h2rg.py
|
|
534
|
+
@staticmethod
|
|
535
|
+
@njit(parallel=False)
|
|
536
|
+
def lin_correct(raw_data: np.ndarray, linc: np.ndarray) -> np.ndarray:
|
|
537
|
+
"""
|
|
538
|
+
Correct the measured raw fluence to normalized flux per non-destructive readout (NDR).
|
|
539
|
+
|
|
540
|
+
Uses a 3rd order polynomial based on measured lamp calibration data to remove the non-linear
|
|
541
|
+
response of each pixel in the array. The resulting ramp is essentially linear in ADUs vs exposure time.
|
|
542
|
+
"""
|
|
543
|
+
return raw_data / (
|
|
544
|
+
linc[0] + raw_data * (linc[1] + raw_data * (linc[2] + raw_data * linc[3]))
|
|
545
|
+
)
|
|
546
|
+
|
|
547
|
+
@staticmethod
|
|
548
|
+
@njit(parallel=False)
|
|
549
|
+
def get_slopes(exptimes: np.ndarray, data: np.ndarray, thresholds: np.ndarray):
|
|
550
|
+
"""
|
|
551
|
+
Compute the weighted least squares estimate of the normalized flux per exposure time increment for a single ramp.
|
|
552
|
+
|
|
553
|
+
The threshold array represents the flux value for each pixel above which the measured flux is
|
|
554
|
+
inaccurate and starts to decrease with increasing exposure time. The threshold is used to set the weight for
|
|
555
|
+
a particular non-destructive readout (NDR) to zero if the pixel value exceeds the threshold. The thresholded
|
|
556
|
+
weights are then used to compute the weighted least squares estimate of the flux per NDR, which is the slope
|
|
557
|
+
of the ramp.
|
|
558
|
+
"""
|
|
559
|
+
num_pix, num_ramps = data.shape
|
|
560
|
+
slopes = np.zeros(num_pix)
|
|
561
|
+
|
|
562
|
+
for i in prange(num_pix):
|
|
563
|
+
px_data = data[i, :]
|
|
564
|
+
weights = np.sqrt(px_data)
|
|
565
|
+
weights[px_data > thresholds[i]] = 0.0
|
|
566
|
+
|
|
567
|
+
# If there are more than 2 NDRs that are below the threshold
|
|
568
|
+
if np.sum(weights > 0) >= 2:
|
|
569
|
+
weight_sum = np.sum(weights)
|
|
570
|
+
|
|
571
|
+
exp_time_weighted_mean = np.dot(weights, exptimes) / weight_sum
|
|
572
|
+
px_data_weighted_mean = np.dot(weights, px_data) / weight_sum
|
|
573
|
+
|
|
574
|
+
corrected_exp_times = exptimes - exp_time_weighted_mean
|
|
575
|
+
corrected_px_data = px_data - px_data_weighted_mean
|
|
576
|
+
|
|
577
|
+
weighted_exp_times = weights * corrected_exp_times
|
|
578
|
+
slopes[i] = np.dot(weighted_exp_times, corrected_px_data) / np.dot(
|
|
579
|
+
weighted_exp_times, corrected_exp_times
|
|
580
|
+
)
|
|
581
|
+
|
|
582
|
+
return slopes
|