imap-processing 1.0.1__py3-none-any.whl → 1.0.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- imap_processing/_version.py +2 -2
- imap_processing/cdf/config/imap_codice_global_cdf_attrs.yaml +18 -0
- imap_processing/cdf/config/imap_codice_l1a_variable_attrs.yaml +101 -258
- imap_processing/cdf/config/imap_enamaps_l2-common_variable_attrs.yaml +1 -1
- imap_processing/cdf/config/imap_hi_variable_attrs.yaml +12 -2
- imap_processing/cdf/config/imap_idex_global_cdf_attrs.yaml +1 -8
- imap_processing/cdf/config/imap_idex_l1b_variable_attrs.yaml +16 -5
- imap_processing/cdf/config/imap_idex_l2a_variable_attrs.yaml +27 -25
- imap_processing/cdf/config/imap_idex_l2b_variable_attrs.yaml +16 -16
- imap_processing/cdf/config/imap_idex_l2c_variable_attrs.yaml +2 -2
- imap_processing/cdf/config/imap_swapi_variable_attrs.yaml +2 -13
- imap_processing/cdf/config/imap_ultra_l1c_variable_attrs.yaml +12 -0
- imap_processing/cdf/utils.py +2 -2
- imap_processing/cli.py +4 -16
- imap_processing/codice/codice_l1a_lo_angular.py +362 -0
- imap_processing/codice/codice_l1a_lo_species.py +282 -0
- imap_processing/codice/codice_l1b.py +80 -97
- imap_processing/codice/codice_l2.py +270 -103
- imap_processing/codice/codice_new_l1a.py +64 -0
- imap_processing/codice/constants.py +37 -2
- imap_processing/codice/utils.py +270 -0
- imap_processing/ena_maps/ena_maps.py +51 -39
- imap_processing/ena_maps/utils/corrections.py +196 -14
- imap_processing/ena_maps/utils/naming.py +3 -1
- imap_processing/hi/hi_l1c.py +57 -19
- imap_processing/hi/hi_l2.py +89 -36
- imap_processing/ialirt/calculate_ingest.py +19 -1
- imap_processing/ialirt/constants.py +12 -6
- imap_processing/ialirt/generate_coverage.py +6 -1
- imap_processing/ialirt/l0/parse_mag.py +1 -0
- imap_processing/ialirt/l0/process_hit.py +1 -0
- imap_processing/ialirt/l0/process_swapi.py +1 -0
- imap_processing/ialirt/l0/process_swe.py +2 -0
- imap_processing/ialirt/process_ephemeris.py +6 -2
- imap_processing/ialirt/utils/create_xarray.py +3 -2
- imap_processing/lo/l1b/lo_l1b.py +12 -2
- imap_processing/lo/l1c/lo_l1c.py +4 -4
- imap_processing/lo/l2/lo_l2.py +101 -8
- imap_processing/quality_flags.py +1 -0
- imap_processing/swapi/constants.py +4 -0
- imap_processing/swapi/l1/swapi_l1.py +47 -20
- imap_processing/swapi/l2/swapi_l2.py +17 -3
- imap_processing/ultra/l1a/ultra_l1a.py +121 -72
- imap_processing/ultra/l1b/de.py +57 -1
- imap_processing/ultra/l1b/ultra_l1b_annotated.py +0 -1
- imap_processing/ultra/l1b/ultra_l1b_extended.py +24 -11
- imap_processing/ultra/l1c/helio_pset.py +34 -8
- imap_processing/ultra/l1c/l1c_lookup_utils.py +4 -2
- imap_processing/ultra/l1c/spacecraft_pset.py +13 -7
- imap_processing/ultra/l1c/ultra_l1c.py +6 -6
- imap_processing/ultra/l1c/ultra_l1c_pset_bins.py +79 -20
- imap_processing/ultra/l2/ultra_l2.py +2 -2
- imap_processing/ultra/utils/ultra_l1_utils.py +6 -0
- {imap_processing-1.0.1.dist-info → imap_processing-1.0.3.dist-info}/METADATA +1 -1
- {imap_processing-1.0.1.dist-info → imap_processing-1.0.3.dist-info}/RECORD +58 -54
- {imap_processing-1.0.1.dist-info → imap_processing-1.0.3.dist-info}/LICENSE +0 -0
- {imap_processing-1.0.1.dist-info → imap_processing-1.0.3.dist-info}/WHEEL +0 -0
- {imap_processing-1.0.1.dist-info → imap_processing-1.0.3.dist-info}/entry_points.txt +0 -0
imap_processing/codice/utils.py
CHANGED
|
@@ -5,7 +5,40 @@ This module contains utility classes and functions that are used by various
|
|
|
5
5
|
other CoDICE processing modules.
|
|
6
6
|
"""
|
|
7
7
|
|
|
8
|
+
import json
|
|
9
|
+
from dataclasses import dataclass
|
|
8
10
|
from enum import IntEnum
|
|
11
|
+
from pathlib import Path
|
|
12
|
+
|
|
13
|
+
import numpy as np
|
|
14
|
+
|
|
15
|
+
from imap_processing.spice.time import met_to_ttj2000ns
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
@dataclass
|
|
19
|
+
class ViewTabInfo:
|
|
20
|
+
"""
|
|
21
|
+
Class to hold view table information.
|
|
22
|
+
|
|
23
|
+
Attributes
|
|
24
|
+
----------
|
|
25
|
+
apid : int
|
|
26
|
+
The APID for the packet.
|
|
27
|
+
collapse_table : int
|
|
28
|
+
Collapse table id used to determine the collapse pattern.
|
|
29
|
+
sensor : int
|
|
30
|
+
Sensor id (0 for LO, 1 for HI).
|
|
31
|
+
three_d_collapsed : int
|
|
32
|
+
The 3D collapsed value from the LUT.
|
|
33
|
+
view_id : int
|
|
34
|
+
The view identifier from the packet.
|
|
35
|
+
"""
|
|
36
|
+
|
|
37
|
+
apid: int
|
|
38
|
+
collapse_table: int
|
|
39
|
+
sensor: int
|
|
40
|
+
three_d_collapsed: int
|
|
41
|
+
view_id: int
|
|
9
42
|
|
|
10
43
|
|
|
11
44
|
class CODICEAPID(IntEnum):
|
|
@@ -57,3 +90,240 @@ class CoDICECompression(IntEnum):
|
|
|
57
90
|
LOSSY_A_LOSSLESS = 4
|
|
58
91
|
LOSSY_B_LOSSLESS = 5
|
|
59
92
|
PACK_24_BIT = 6
|
|
93
|
+
|
|
94
|
+
|
|
95
|
+
def read_sci_lut(file_path: Path, table_id: str) -> dict:
|
|
96
|
+
"""
|
|
97
|
+
Read the SCI-LUT JSON file for a specific table ID.
|
|
98
|
+
|
|
99
|
+
Parameters
|
|
100
|
+
----------
|
|
101
|
+
file_path : pathlib.Path
|
|
102
|
+
Path to the SCI-LUT JSON file.
|
|
103
|
+
table_id : str
|
|
104
|
+
Table identifier to extract from the JSON.
|
|
105
|
+
|
|
106
|
+
Returns
|
|
107
|
+
-------
|
|
108
|
+
dict
|
|
109
|
+
The SCI-LUT data for the specified table id.
|
|
110
|
+
"""
|
|
111
|
+
sci_lut_data = json.loads(file_path.read_text()).get(f"{table_id}")
|
|
112
|
+
if sci_lut_data is None:
|
|
113
|
+
raise ValueError(f"SCI-LUT file does not have data for table ID {table_id}.")
|
|
114
|
+
return sci_lut_data
|
|
115
|
+
|
|
116
|
+
|
|
117
|
+
def get_view_tab_info(json_data: dict, view_id: int, apid: int) -> dict:
|
|
118
|
+
"""
|
|
119
|
+
Get the view table information for a specific view and APID.
|
|
120
|
+
|
|
121
|
+
Parameters
|
|
122
|
+
----------
|
|
123
|
+
json_data : dict
|
|
124
|
+
The JSON data loaded from the SCI-LUT file.
|
|
125
|
+
view_id : int
|
|
126
|
+
The view ID from the packet.
|
|
127
|
+
apid : int
|
|
128
|
+
The APID from the packet.
|
|
129
|
+
|
|
130
|
+
Returns
|
|
131
|
+
-------
|
|
132
|
+
dict
|
|
133
|
+
The view table information containing details like sensor,
|
|
134
|
+
collapse_table, data_product, etc.
|
|
135
|
+
"""
|
|
136
|
+
apid_hex = f"0x{apid:X}"
|
|
137
|
+
# This is how we get view information that will be used to get
|
|
138
|
+
# collapse pattern:
|
|
139
|
+
# table_id -> view_tab -> (view_id, apid) -> sensor -> collapse_table
|
|
140
|
+
view_tab = json_data.get("view_tab").get(f"({view_id}, {apid_hex})")
|
|
141
|
+
return view_tab
|
|
142
|
+
|
|
143
|
+
|
|
144
|
+
def get_collapse_pattern_shape(
|
|
145
|
+
json_data: dict, sensor_id: int, collapse_table_id: int
|
|
146
|
+
) -> tuple[int, ...]:
|
|
147
|
+
"""
|
|
148
|
+
Get the collapse pattern for a specific sensor id and collapse table id.
|
|
149
|
+
|
|
150
|
+
Parameters
|
|
151
|
+
----------
|
|
152
|
+
json_data : dict
|
|
153
|
+
The JSON data loaded from the SCI-LUT file.
|
|
154
|
+
sensor_id : int
|
|
155
|
+
Sensor identifier (0 for LO, 1 for HI).
|
|
156
|
+
collapse_table_id : int
|
|
157
|
+
Collapse table id to look up in the SCI-LUT.
|
|
158
|
+
|
|
159
|
+
Returns
|
|
160
|
+
-------
|
|
161
|
+
tuple[int, ...]
|
|
162
|
+
The reduced shape describing the collapsed pattern. Examples:
|
|
163
|
+
``(1,)`` for a fully collapsed 1-D pattern or ``(N, M)`` for a
|
|
164
|
+
reduced 2-D pattern.
|
|
165
|
+
"""
|
|
166
|
+
sensor = "lo" if sensor_id == 0 else "hi"
|
|
167
|
+
collapse_matrix = np.array(
|
|
168
|
+
json_data[f"collapse_{sensor}"][f"{collapse_table_id}"]["matrix"]
|
|
169
|
+
)
|
|
170
|
+
|
|
171
|
+
# Analyze the collapse pattern matrix to determine its reduced shape.
|
|
172
|
+
# Steps:
|
|
173
|
+
# - Extract non-zero elements from the matrix.
|
|
174
|
+
# - Reshape to group unique non-zero rows and columns.
|
|
175
|
+
# - If all non-zero values are identical, return (1,) for a fully collapsed pattern.
|
|
176
|
+
# - Otherwise, compute the number of unique rows and columns to describe the
|
|
177
|
+
# reduced shape.
|
|
178
|
+
non_zero_data = np.where(collapse_matrix != 0)
|
|
179
|
+
non_zero_reformatted = collapse_matrix[non_zero_data].reshape(
|
|
180
|
+
np.unique(non_zero_data[0]).size, np.unique(non_zero_data[1]).size
|
|
181
|
+
)
|
|
182
|
+
|
|
183
|
+
if np.unique(non_zero_reformatted).size == 1:
|
|
184
|
+
# all non-zero values are identical means -> fully collapsed
|
|
185
|
+
return (1,)
|
|
186
|
+
|
|
187
|
+
# If not fully collapsed, find repeated patterns in rows and columns
|
|
188
|
+
# to reduce shape further.
|
|
189
|
+
unique_rows = np.unique(non_zero_reformatted, axis=0)
|
|
190
|
+
unique_columns = np.unique(non_zero_reformatted, axis=1)
|
|
191
|
+
# Unique spin sectors and instrument azimuths to unpack data
|
|
192
|
+
unique_spin_sectors = unique_columns.shape[1]
|
|
193
|
+
unique_inst_azs = unique_rows.shape[0]
|
|
194
|
+
return (unique_spin_sectors, unique_inst_azs)
|
|
195
|
+
|
|
196
|
+
|
|
197
|
+
def index_to_position(
|
|
198
|
+
json_data: dict, sensor_id: int, collapse_table_id: int
|
|
199
|
+
) -> np.ndarray:
|
|
200
|
+
"""
|
|
201
|
+
Get the indices of non-zero unique rows in the collapse pattern matrix.
|
|
202
|
+
|
|
203
|
+
Parameters
|
|
204
|
+
----------
|
|
205
|
+
json_data : dict
|
|
206
|
+
The JSON data loaded from the SCI-LUT file.
|
|
207
|
+
sensor_id : int
|
|
208
|
+
Sensor identifier (0 for LO, 1 for HI).
|
|
209
|
+
collapse_table_id : int
|
|
210
|
+
Collapse table id to look up in the SCI-LUT.
|
|
211
|
+
|
|
212
|
+
Returns
|
|
213
|
+
-------
|
|
214
|
+
np.ndarray
|
|
215
|
+
Array of indices corresponding to non-zero unique rows.
|
|
216
|
+
"""
|
|
217
|
+
sensor = "lo" if sensor_id == 0 else "hi"
|
|
218
|
+
collapse_matrix = np.array(
|
|
219
|
+
json_data[f"collapse_{sensor}"][f"{collapse_table_id}"]["matrix"]
|
|
220
|
+
)
|
|
221
|
+
|
|
222
|
+
# Find unique non-zero rows and their original indices
|
|
223
|
+
non_zero_row_mask = np.any(collapse_matrix != 0, axis=1)
|
|
224
|
+
non_zero_rows = collapse_matrix[non_zero_row_mask]
|
|
225
|
+
_, unique_indices = np.unique(non_zero_rows, axis=0, return_index=True)
|
|
226
|
+
non_zero_row_indices = np.flatnonzero(non_zero_row_mask)[unique_indices]
|
|
227
|
+
return non_zero_row_indices
|
|
228
|
+
|
|
229
|
+
|
|
230
|
+
def get_codice_epoch_time(
|
|
231
|
+
acq_start_seconds: np.ndarray,
|
|
232
|
+
acq_start_subseconds: np.ndarray,
|
|
233
|
+
spin_period: np.ndarray,
|
|
234
|
+
view_tab_obj: ViewTabInfo,
|
|
235
|
+
) -> tuple[np.ndarray, np.ndarray]:
|
|
236
|
+
"""
|
|
237
|
+
Calculate center time and delta.
|
|
238
|
+
|
|
239
|
+
Parameters
|
|
240
|
+
----------
|
|
241
|
+
acq_start_seconds : np.ndarray
|
|
242
|
+
Array of acquisition start seconds.
|
|
243
|
+
acq_start_subseconds : np.ndarray
|
|
244
|
+
Array of acquisition start subseconds.
|
|
245
|
+
spin_period : np.ndarray
|
|
246
|
+
Array of spin periods.
|
|
247
|
+
view_tab_obj : ViewTabInfo
|
|
248
|
+
The view table information object. It contains information such as sensor ID
|
|
249
|
+
and three_d_collapsed value and others.
|
|
250
|
+
|
|
251
|
+
Returns
|
|
252
|
+
-------
|
|
253
|
+
tuple[np.ndarray, np.ndarray]
|
|
254
|
+
(center_times, delta_times).
|
|
255
|
+
"""
|
|
256
|
+
# If Lo sensor
|
|
257
|
+
if view_tab_obj.sensor == 0:
|
|
258
|
+
# Lo sensor, we need to set spins to be constant.
|
|
259
|
+
# 32 half spins makes full 16 spins for all non direct event products.
|
|
260
|
+
# But Lo direct event's spins is also 16 spins. Because of that, we can use
|
|
261
|
+
# the same calculation for all Lo products.
|
|
262
|
+
num_spins = 16.0
|
|
263
|
+
# If Hi sensor and Direct Event product
|
|
264
|
+
elif view_tab_obj.sensor == 1 and view_tab_obj.apid == CODICEAPID.COD_HI_PHA:
|
|
265
|
+
# Use constant 16 spins for Hi PHA
|
|
266
|
+
num_spins = 16.0
|
|
267
|
+
# If Non-Direct Event Hi product
|
|
268
|
+
else:
|
|
269
|
+
# Use 3d_collapsed value from LUT for other Hi products
|
|
270
|
+
num_spins = view_tab_obj.three_d_collapsed
|
|
271
|
+
|
|
272
|
+
# Units of 'spin ticks', where one 'spin tick' equals 320 microseconds.
|
|
273
|
+
# It takes multiple spins to collect data for a view.
|
|
274
|
+
spin_period_ns = spin_period.astype(np.float64) * 320 * 1e3 # Convert to ns
|
|
275
|
+
delta_times = (num_spins * spin_period_ns) / 2
|
|
276
|
+
# subseconds need to converted to seconds using this formula per CoDICE team:
|
|
277
|
+
# subseconds / 65536 gives seconds
|
|
278
|
+
center_times_seconds = (
|
|
279
|
+
acq_start_seconds + acq_start_subseconds / 65536 + (delta_times / 1e9)
|
|
280
|
+
)
|
|
281
|
+
|
|
282
|
+
return met_to_ttj2000ns(center_times_seconds), delta_times
|
|
283
|
+
|
|
284
|
+
|
|
285
|
+
def calculate_acq_time_per_step(low_stepping_tab: dict) -> np.ndarray:
|
|
286
|
+
"""
|
|
287
|
+
Calculate acquisition time per step from low stepping table.
|
|
288
|
+
|
|
289
|
+
Parameters
|
|
290
|
+
----------
|
|
291
|
+
low_stepping_tab : dict
|
|
292
|
+
The low stepping table from the SCI-LUT JSON.
|
|
293
|
+
|
|
294
|
+
Returns
|
|
295
|
+
-------
|
|
296
|
+
np.ndarray
|
|
297
|
+
Array of acquisition times per step of shape (num_esa_steps,).
|
|
298
|
+
"""
|
|
299
|
+
# These tunable values are used to calculate acquisition time per step
|
|
300
|
+
tunable_values = low_stepping_tab["tunable_values"]
|
|
301
|
+
|
|
302
|
+
# pre-calculate values
|
|
303
|
+
sector_time = tunable_values["spin_time_ms"] / tunable_values["num_sectors_ms"]
|
|
304
|
+
sector_margin_ms = tunable_values["sector_margin_ms"]
|
|
305
|
+
dwell_fraction = tunable_values["dwell_fraction_percentage"]
|
|
306
|
+
min_hv_settle_ms = tunable_values["min_hv_settle_ms"]
|
|
307
|
+
max_hv_settle_ms = tunable_values["max_hv_settle_ms"]
|
|
308
|
+
num_steps_data = np.array(
|
|
309
|
+
low_stepping_tab["num_steps"].get("data"), dtype=np.float64
|
|
310
|
+
)
|
|
311
|
+
# Total non-acquisition time is in column (BD) of science LUT
|
|
312
|
+
dwell_fraction_percentage = float(sector_time) * (100.0 - dwell_fraction) / 100.0
|
|
313
|
+
|
|
314
|
+
# Calculate HV settle time per step not adjusted for Min/Max.
|
|
315
|
+
# It's in column (BF) of science LUT.
|
|
316
|
+
non_adjusted_hv_settle_per_step = (
|
|
317
|
+
dwell_fraction_percentage - sector_margin_ms
|
|
318
|
+
) / num_steps_data
|
|
319
|
+
hv_settle_per_step = np.minimum(
|
|
320
|
+
np.maximum(non_adjusted_hv_settle_per_step, min_hv_settle_ms), max_hv_settle_ms
|
|
321
|
+
)
|
|
322
|
+
|
|
323
|
+
# acquisition time per step in milliseconds
|
|
324
|
+
# sector_time - sector_margin_ms / num_steps - hv_settle_per_step
|
|
325
|
+
acq_time_per_step = (
|
|
326
|
+
(sector_time - sector_margin_ms) / num_steps_data
|
|
327
|
+
) - hv_settle_per_step
|
|
328
|
+
# Convert to seconds
|
|
329
|
+
return acq_time_per_step / 1e3
|
|
@@ -647,28 +647,21 @@ class HiPointingSet(LoHiBasePointingSet):
|
|
|
647
647
|
----------
|
|
648
648
|
dataset : xarray.Dataset | str | Path
|
|
649
649
|
Hi L1C pointing set data loaded in a xarray.DataArray.
|
|
650
|
-
spin_phase : str
|
|
651
|
-
Include ENAs from "full", "ram" or "anti-ram" phases of the spin.
|
|
652
650
|
"""
|
|
653
651
|
|
|
654
|
-
def __init__(self, dataset: xr.Dataset | str | Path
|
|
655
|
-
super().__init__(dataset, spice_reference_frame=geometry.SpiceFrame.
|
|
652
|
+
def __init__(self, dataset: xr.Dataset | str | Path):
|
|
653
|
+
super().__init__(dataset, spice_reference_frame=geometry.SpiceFrame.IMAP_HAE)
|
|
654
|
+
|
|
655
|
+
self.spatial_coords = ("spin_angle_bin",)
|
|
656
656
|
|
|
657
|
-
#
|
|
658
|
-
|
|
659
|
-
|
|
657
|
+
# Naively generate the ram_mask variable assuming spacecraft frame
|
|
658
|
+
# binning. The ram_mask variable gets updated in the CG correction
|
|
659
|
+
# code if the CG correction is applied.
|
|
660
|
+
ram_mask = xr.zeros_like(self.data["spin_angle_bin"], dtype=bool)
|
|
660
661
|
# ram only includes spin-phase interval [0, 0.5)
|
|
661
662
|
# which is the first half of the spin_angle_bins
|
|
662
|
-
|
|
663
|
-
|
|
664
|
-
spin_angle_bin=slice(0, self.data["spin_angle_bin"].data.size // 2)
|
|
665
|
-
)
|
|
666
|
-
# anti-ram includes spin-phase interval [0.5, 1)
|
|
667
|
-
# which is the second half of the spin_angle_bins
|
|
668
|
-
elif spin_phase == "anti":
|
|
669
|
-
self.data = self.data.isel(
|
|
670
|
-
spin_angle_bin=slice(self.data["spin_angle_bin"].data.size // 2, None)
|
|
671
|
-
)
|
|
663
|
+
ram_mask[slice(0, self.data["spin_angle_bin"].data.size // 2)] = True
|
|
664
|
+
self.data["ram_mask"] = ram_mask
|
|
672
665
|
|
|
673
666
|
# Rename some PSET vars to match L2 variables
|
|
674
667
|
self.data = self.data.rename(
|
|
@@ -684,8 +677,6 @@ class HiPointingSet(LoHiBasePointingSet):
|
|
|
684
677
|
self.data["exposure_factor"], self.data["epoch"].values[0]
|
|
685
678
|
)
|
|
686
679
|
|
|
687
|
-
self.spatial_coords = ("spin_angle_bin",)
|
|
688
|
-
|
|
689
680
|
# Update az_el_points using the base class method
|
|
690
681
|
self.update_az_el_points()
|
|
691
682
|
|
|
@@ -810,12 +801,12 @@ class AbstractSkyMap(ABC):
|
|
|
810
801
|
"""
|
|
811
802
|
return self.az_el_points.shape[0]
|
|
812
803
|
|
|
813
|
-
def project_pset_values_to_map(
|
|
804
|
+
def project_pset_values_to_map( # noqa: PLR0912
|
|
814
805
|
self,
|
|
815
806
|
pointing_set: PointingSet,
|
|
816
807
|
value_keys: list[str] | None = None,
|
|
817
808
|
index_match_method: IndexMatchMethod = IndexMatchMethod.PUSH,
|
|
818
|
-
pset_valid_mask: NDArray | None = None,
|
|
809
|
+
pset_valid_mask: NDArray | xr.DataArray | None = None,
|
|
819
810
|
) -> None:
|
|
820
811
|
"""
|
|
821
812
|
Project a pointing set's values to the map grid.
|
|
@@ -837,7 +828,7 @@ class AbstractSkyMap(ABC):
|
|
|
837
828
|
index_match_method : IndexMatchMethod, optional
|
|
838
829
|
The method of index matching to use for all values.
|
|
839
830
|
Default is IndexMatchMethod.PUSH.
|
|
840
|
-
pset_valid_mask : NDArray, optional
|
|
831
|
+
pset_valid_mask : xarray.DataArray or NDArray, optional
|
|
841
832
|
A boolean mask of shape (number of pointing set pixels,) indicating
|
|
842
833
|
which pixels in the pointing set should be considered valid for projection.
|
|
843
834
|
If None, all pixels are considered valid. Default is None.
|
|
@@ -849,11 +840,12 @@ class AbstractSkyMap(ABC):
|
|
|
849
840
|
"""
|
|
850
841
|
if value_keys is None:
|
|
851
842
|
value_keys = list(pointing_set.data.data_vars.keys())
|
|
852
|
-
|
|
853
|
-
|
|
854
|
-
|
|
843
|
+
|
|
844
|
+
if missing_keys := set(value_keys) - set(pointing_set.data.data_vars):
|
|
845
|
+
raise KeyError(f"Value keys not found in pointing set: {missing_keys}")
|
|
855
846
|
|
|
856
847
|
if pset_valid_mask is None:
|
|
848
|
+
logger.debug("No pset_valid_mask provided, using all pixels as valid.")
|
|
857
849
|
pset_valid_mask = np.ones(pointing_set.num_points, dtype=bool)
|
|
858
850
|
|
|
859
851
|
if index_match_method is IndexMatchMethod.PUSH:
|
|
@@ -876,9 +868,12 @@ class AbstractSkyMap(ABC):
|
|
|
876
868
|
)
|
|
877
869
|
|
|
878
870
|
for value_key in value_keys:
|
|
871
|
+
if value_key not in pointing_set.data.data_vars:
|
|
872
|
+
raise ValueError(f"Value key {value_key} not found in pointing set.")
|
|
873
|
+
|
|
879
874
|
# If multiple spatial axes present
|
|
880
875
|
# (i.e (az, el) for rectangular coordinate PSET),
|
|
881
|
-
#
|
|
876
|
+
# stack them into a single coordinate to match the raveled indices
|
|
882
877
|
raveled_pset_data = pointing_set.data[value_key].stack(
|
|
883
878
|
{CoordNames.GENERIC_PIXEL.value: pointing_set.spatial_coords}
|
|
884
879
|
)
|
|
@@ -907,13 +902,22 @@ class AbstractSkyMap(ABC):
|
|
|
907
902
|
data_bc, indices_bc = xr.broadcast(
|
|
908
903
|
raveled_pset_data, matched_indices_push
|
|
909
904
|
)
|
|
905
|
+
# If the valid mask is a xr.DataArray, broadcast it to the same shape
|
|
906
|
+
if isinstance(pset_valid_mask, xr.DataArray):
|
|
907
|
+
stacked_valid_mask = pset_valid_mask.stack(
|
|
908
|
+
{CoordNames.GENERIC_PIXEL.value: pointing_set.spatial_coords}
|
|
909
|
+
)
|
|
910
|
+
_, pset_valid_mask_bc = xr.broadcast(data_bc, stacked_valid_mask)
|
|
911
|
+
pset_valid_mask_values = pset_valid_mask_bc.values
|
|
912
|
+
else:
|
|
913
|
+
pset_valid_mask_values = pset_valid_mask
|
|
910
914
|
|
|
911
915
|
# Extract numpy arrays for bincount operation
|
|
912
916
|
pointing_projected_values = map_utils.bin_single_array_at_indices(
|
|
913
917
|
value_array=data_bc.values,
|
|
914
918
|
projection_grid_shape=self.binning_grid_shape,
|
|
915
919
|
projection_indices=indices_bc.values,
|
|
916
|
-
input_valid_mask=
|
|
920
|
+
input_valid_mask=pset_valid_mask_values,
|
|
917
921
|
)
|
|
918
922
|
# TODO: we may need to allow for unweighted/weighted means here by
|
|
919
923
|
# dividing pointing_projected_values by some binned weights.
|
|
@@ -934,10 +938,6 @@ class AbstractSkyMap(ABC):
|
|
|
934
938
|
self.data_1d[value_key].values[..., valid_map_mask] += (
|
|
935
939
|
pointing_projected_values
|
|
936
940
|
)
|
|
937
|
-
else:
|
|
938
|
-
raise NotImplementedError(
|
|
939
|
-
"Only PUSH and PULL index matching methods are supported."
|
|
940
|
-
)
|
|
941
941
|
|
|
942
942
|
# TODO: The max epoch needs to include the pset duration. Right now it
|
|
943
943
|
# is just capturing the start epoch. See issue #1747
|
|
@@ -1266,12 +1266,13 @@ class RectangularSkyMap(AbstractSkyMap):
|
|
|
1266
1266
|
coords={**self.non_spatial_coords, **self.spatial_coords},
|
|
1267
1267
|
)
|
|
1268
1268
|
|
|
1269
|
-
def build_cdf_dataset(
|
|
1269
|
+
def build_cdf_dataset( # noqa: PLR0912
|
|
1270
1270
|
self,
|
|
1271
1271
|
instrument: str,
|
|
1272
1272
|
level: str,
|
|
1273
1273
|
descriptor: str,
|
|
1274
1274
|
sensor: str | None = None,
|
|
1275
|
+
drop_vars_with_no_attributes: bool = True,
|
|
1275
1276
|
) -> xr.Dataset:
|
|
1276
1277
|
"""
|
|
1277
1278
|
Format the data into a xarray.Dataset and add required CDF variables.
|
|
@@ -1286,6 +1287,12 @@ class RectangularSkyMap(AbstractSkyMap):
|
|
|
1286
1287
|
Descriptor for filename.
|
|
1287
1288
|
sensor : str, optional
|
|
1288
1289
|
Sensor number "45" or "90".
|
|
1290
|
+
drop_vars_with_no_attributes : bool, optional
|
|
1291
|
+
Default behavior is to drop any dataset variables that don't have
|
|
1292
|
+
attributes defined in the CDF attribute manager. This ensures that
|
|
1293
|
+
the output CDF doesn't have any of the intermedeiate variables left
|
|
1294
|
+
over from computations. Sometimes, it is useful to output the
|
|
1295
|
+
intermedeiate variables. To do so, set this to False.
|
|
1289
1296
|
|
|
1290
1297
|
Returns
|
|
1291
1298
|
-------
|
|
@@ -1389,13 +1396,18 @@ class RectangularSkyMap(AbstractSkyMap):
|
|
|
1389
1396
|
variable_name=name,
|
|
1390
1397
|
check_schema=check_schema,
|
|
1391
1398
|
)
|
|
1392
|
-
|
|
1393
|
-
|
|
1394
|
-
|
|
1395
|
-
|
|
1396
|
-
|
|
1397
|
-
|
|
1398
|
-
|
|
1399
|
+
cdf_ds[name].attrs.update(var_attrs)
|
|
1400
|
+
except KeyError:
|
|
1401
|
+
if drop_vars_with_no_attributes:
|
|
1402
|
+
logger.debug(
|
|
1403
|
+
f"Dropping variable '{name}' that has no attributes defined."
|
|
1404
|
+
)
|
|
1405
|
+
cdf_ds = cdf_ds.drop_vars(name)
|
|
1406
|
+
else:
|
|
1407
|
+
logger.debug(
|
|
1408
|
+
f"Variable '{name}' has no attributes defined. It will "
|
|
1409
|
+
f"be included in the output dataset with no attributes."
|
|
1410
|
+
)
|
|
1399
1411
|
|
|
1400
1412
|
# Manually adjust epoch attributes
|
|
1401
1413
|
cdf_ds["epoch"].attrs.update(
|