imap-processing 1.0.1__py3-none-any.whl → 1.0.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (46) hide show
  1. imap_processing/_version.py +2 -2
  2. imap_processing/cdf/config/imap_codice_l1a_variable_attrs.yaml +97 -254
  3. imap_processing/cdf/config/imap_enamaps_l2-common_variable_attrs.yaml +1 -1
  4. imap_processing/cdf/config/imap_swapi_variable_attrs.yaml +2 -13
  5. imap_processing/cdf/utils.py +2 -2
  6. imap_processing/cli.py +4 -16
  7. imap_processing/codice/codice_l1a_lo_angular.py +362 -0
  8. imap_processing/codice/codice_l1a_lo_species.py +282 -0
  9. imap_processing/codice/codice_l1b.py +62 -97
  10. imap_processing/codice/codice_l2.py +210 -96
  11. imap_processing/codice/codice_new_l1a.py +64 -0
  12. imap_processing/codice/constants.py +37 -2
  13. imap_processing/codice/utils.py +270 -0
  14. imap_processing/ena_maps/ena_maps.py +50 -39
  15. imap_processing/ena_maps/utils/corrections.py +196 -14
  16. imap_processing/ena_maps/utils/naming.py +3 -1
  17. imap_processing/hi/hi_l1c.py +34 -12
  18. imap_processing/hi/hi_l2.py +79 -36
  19. imap_processing/ialirt/generate_coverage.py +3 -1
  20. imap_processing/ialirt/l0/parse_mag.py +1 -0
  21. imap_processing/ialirt/l0/process_hit.py +1 -0
  22. imap_processing/ialirt/l0/process_swapi.py +1 -0
  23. imap_processing/ialirt/l0/process_swe.py +2 -0
  24. imap_processing/ialirt/process_ephemeris.py +6 -2
  25. imap_processing/ialirt/utils/create_xarray.py +3 -2
  26. imap_processing/lo/l1c/lo_l1c.py +1 -1
  27. imap_processing/lo/l2/lo_l2.py +6 -4
  28. imap_processing/quality_flags.py +1 -0
  29. imap_processing/swapi/constants.py +4 -0
  30. imap_processing/swapi/l1/swapi_l1.py +47 -20
  31. imap_processing/swapi/l2/swapi_l2.py +17 -3
  32. imap_processing/ultra/l1a/ultra_l1a.py +121 -72
  33. imap_processing/ultra/l1b/de.py +57 -1
  34. imap_processing/ultra/l1b/ultra_l1b_annotated.py +0 -1
  35. imap_processing/ultra/l1b/ultra_l1b_extended.py +24 -11
  36. imap_processing/ultra/l1c/helio_pset.py +28 -5
  37. imap_processing/ultra/l1c/l1c_lookup_utils.py +4 -2
  38. imap_processing/ultra/l1c/spacecraft_pset.py +9 -5
  39. imap_processing/ultra/l1c/ultra_l1c.py +6 -6
  40. imap_processing/ultra/l1c/ultra_l1c_pset_bins.py +82 -20
  41. imap_processing/ultra/l2/ultra_l2.py +2 -2
  42. {imap_processing-1.0.1.dist-info → imap_processing-1.0.2.dist-info}/METADATA +1 -1
  43. {imap_processing-1.0.1.dist-info → imap_processing-1.0.2.dist-info}/RECORD +46 -42
  44. {imap_processing-1.0.1.dist-info → imap_processing-1.0.2.dist-info}/LICENSE +0 -0
  45. {imap_processing-1.0.1.dist-info → imap_processing-1.0.2.dist-info}/WHEEL +0 -0
  46. {imap_processing-1.0.1.dist-info → imap_processing-1.0.2.dist-info}/entry_points.txt +0 -0
@@ -0,0 +1,64 @@
1
+ """CoDICE L1A processing functions."""
2
+
3
+ import logging
4
+
5
+ import xarray as xr
6
+ from imap_data_access import ProcessingInputCollection
7
+
8
+ from imap_processing import imap_module_directory
9
+ from imap_processing.codice.codice_l1a_lo_angular import l1a_lo_angular
10
+ from imap_processing.codice.codice_l1a_lo_species import l1a_lo_species
11
+ from imap_processing.codice.utils import (
12
+ CODICEAPID,
13
+ )
14
+ from imap_processing.utils import packet_file_to_datasets
15
+
16
+ logger = logging.getLogger(__name__)
17
+
18
+
19
+ def process_l1a(dependency: ProcessingInputCollection) -> list[xr.Dataset]:
20
+ """
21
+ Process L1A data based on descriptor and dependencies.
22
+
23
+ Parameters
24
+ ----------
25
+ dependency : ProcessingInputCollection
26
+ Collection of processing inputs required for L1A processing.
27
+
28
+ Returns
29
+ -------
30
+ list[xarray.Dataset]
31
+ List of processed L1A datasets generated from available APIDs.
32
+ """
33
+ # Get science data which is L0 packet file
34
+ science_file = dependency.get_file_paths(data_type="l0")[0]
35
+ # Get LUT file
36
+ lut_file = dependency.get_file_paths(descriptor="l1a-sci-lut")[0]
37
+
38
+ logger.info(f"Processing L1A for {science_file.name} with {lut_file.name}")
39
+
40
+ xtce_file = (
41
+ imap_module_directory / "codice/packet_definitions/codice_packet_definition.xml"
42
+ )
43
+ # Decom packet
44
+ datasets_by_apid = packet_file_to_datasets(
45
+ science_file,
46
+ xtce_file,
47
+ )
48
+
49
+ datasets = []
50
+ for apid in datasets_by_apid:
51
+ if apid == CODICEAPID.COD_LO_SW_SPECIES_COUNTS:
52
+ logger.info("Processing Lo SW Species Counts")
53
+ datasets.append(l1a_lo_species(datasets_by_apid[apid], lut_file))
54
+ elif apid == CODICEAPID.COD_LO_NSW_SPECIES_COUNTS:
55
+ logger.info("Processing Lo NSW Species Counts")
56
+ datasets.append(l1a_lo_species(datasets_by_apid[apid], lut_file))
57
+ elif apid == CODICEAPID.COD_LO_SW_ANGULAR_COUNTS:
58
+ logger.info("Processing Lo SW Angular Counts")
59
+ datasets.append(l1a_lo_angular(datasets_by_apid[apid], lut_file))
60
+ elif apid == CODICEAPID.COD_LO_NSW_ANGULAR_COUNTS:
61
+ logger.info("Processing Lo NSW Angular Counts")
62
+ datasets.append(l1a_lo_angular(datasets_by_apid[apid], lut_file))
63
+
64
+ return datasets
@@ -61,6 +61,8 @@ CODICEAPID_MAPPING = {
61
61
  SPIN_PERIOD_CONVERSION = 0.00032
62
62
  K_FACTOR = 5.76 # This is used to convert voltages to energies in L2
63
63
  HI_ACQUISITION_TIME = 0.59916
64
+ NUM_ESA_STEPS = 128
65
+ LO_DESPIN_SPIN_SECTORS = 24
64
66
 
65
67
  # CDF variable names used for lo data products
66
68
  LO_COUNTERS_SINGLES_VARIABLE_NAMES = ["apd_singles"]
@@ -2281,8 +2283,9 @@ HALF_SPIN_LUT = {
2281
2283
  }
2282
2284
 
2283
2285
  NSW_POSITIONS = [x for x in range(3, 22)]
2284
- SW_POSITIONS = [0]
2285
- PUI_POSITIONS = [0, 1, 2, 22, 23]
2286
+ SW_POSITIONS = [0, 1, 2, 22, 23]
2287
+ SOLAR_WIND_POSITIONS = [0]
2288
+ PUI_POSITIONS = SW_POSITIONS
2286
2289
  L2_GEOMETRIC_FACTOR = 0.013
2287
2290
  L2_HI_NUMBER_OF_SSD = 12.0
2288
2291
 
@@ -2320,3 +2323,35 @@ HI_L2_ELEVATION_ANGLE = np.array(
2320
2323
  ],
2321
2324
  dtype=float,
2322
2325
  )
2326
+
2327
+
2328
+ LO_POSITION_TO_ELEVATION_ANGLE = {
2329
+ "sw": {
2330
+ 1: 0,
2331
+ 2: 15,
2332
+ 24: 15,
2333
+ 3: 30,
2334
+ 23: 30,
2335
+ },
2336
+ "nsw": {
2337
+ 4: 45,
2338
+ 22: 45,
2339
+ 5: 60,
2340
+ 21: 60,
2341
+ 6: 75,
2342
+ 20: 75,
2343
+ 7: 90,
2344
+ 19: 90,
2345
+ 8: 105,
2346
+ 18: 105,
2347
+ 9: 120,
2348
+ 17: 120,
2349
+ 10: 135,
2350
+ 16: 135,
2351
+ 11: 150,
2352
+ 15: 150,
2353
+ 12: 165,
2354
+ 14: 165,
2355
+ 13: 180,
2356
+ },
2357
+ }
@@ -5,7 +5,40 @@ This module contains utility classes and functions that are used by various
5
5
  other CoDICE processing modules.
6
6
  """
7
7
 
8
+ import json
9
+ from dataclasses import dataclass
8
10
  from enum import IntEnum
11
+ from pathlib import Path
12
+
13
+ import numpy as np
14
+
15
+ from imap_processing.spice.time import met_to_ttj2000ns
16
+
17
+
18
+ @dataclass
19
+ class ViewTabInfo:
20
+ """
21
+ Class to hold view table information.
22
+
23
+ Attributes
24
+ ----------
25
+ apid : int
26
+ The APID for the packet.
27
+ collapse_table : int
28
+ Collapse table id used to determine the collapse pattern.
29
+ sensor : int
30
+ Sensor id (0 for LO, 1 for HI).
31
+ three_d_collapsed : int
32
+ The 3D collapsed value from the LUT.
33
+ view_id : int
34
+ The view identifier from the packet.
35
+ """
36
+
37
+ apid: int
38
+ collapse_table: int
39
+ sensor: int
40
+ three_d_collapsed: int
41
+ view_id: int
9
42
 
10
43
 
11
44
  class CODICEAPID(IntEnum):
@@ -57,3 +90,240 @@ class CoDICECompression(IntEnum):
57
90
  LOSSY_A_LOSSLESS = 4
58
91
  LOSSY_B_LOSSLESS = 5
59
92
  PACK_24_BIT = 6
93
+
94
+
95
+ def read_sci_lut(file_path: Path, table_id: str) -> dict:
96
+ """
97
+ Read the SCI-LUT JSON file for a specific table ID.
98
+
99
+ Parameters
100
+ ----------
101
+ file_path : pathlib.Path
102
+ Path to the SCI-LUT JSON file.
103
+ table_id : str
104
+ Table identifier to extract from the JSON.
105
+
106
+ Returns
107
+ -------
108
+ dict
109
+ The SCI-LUT data for the specified table id.
110
+ """
111
+ sci_lut_data = json.loads(file_path.read_text()).get(f"{table_id}")
112
+ if sci_lut_data is None:
113
+ raise ValueError(f"SCI-LUT file does not have data for table ID {table_id}.")
114
+ return sci_lut_data
115
+
116
+
117
+ def get_view_tab_info(json_data: dict, view_id: int, apid: int) -> dict:
118
+ """
119
+ Get the view table information for a specific view and APID.
120
+
121
+ Parameters
122
+ ----------
123
+ json_data : dict
124
+ The JSON data loaded from the SCI-LUT file.
125
+ view_id : int
126
+ The view ID from the packet.
127
+ apid : int
128
+ The APID from the packet.
129
+
130
+ Returns
131
+ -------
132
+ dict
133
+ The view table information containing details like sensor,
134
+ collapse_table, data_product, etc.
135
+ """
136
+ apid_hex = f"0x{apid:X}"
137
+ # This is how we get view information that will be used to get
138
+ # collapse pattern:
139
+ # table_id -> view_tab -> (view_id, apid) -> sensor -> collapse_table
140
+ view_tab = json_data.get("view_tab").get(f"({view_id}, {apid_hex})")
141
+ return view_tab
142
+
143
+
144
+ def get_collapse_pattern_shape(
145
+ json_data: dict, sensor_id: int, collapse_table_id: int
146
+ ) -> tuple[int, ...]:
147
+ """
148
+ Get the collapse pattern for a specific sensor id and collapse table id.
149
+
150
+ Parameters
151
+ ----------
152
+ json_data : dict
153
+ The JSON data loaded from the SCI-LUT file.
154
+ sensor_id : int
155
+ Sensor identifier (0 for LO, 1 for HI).
156
+ collapse_table_id : int
157
+ Collapse table id to look up in the SCI-LUT.
158
+
159
+ Returns
160
+ -------
161
+ tuple[int, ...]
162
+ The reduced shape describing the collapsed pattern. Examples:
163
+ ``(1,)`` for a fully collapsed 1-D pattern or ``(N, M)`` for a
164
+ reduced 2-D pattern.
165
+ """
166
+ sensor = "lo" if sensor_id == 0 else "hi"
167
+ collapse_matrix = np.array(
168
+ json_data[f"collapse_{sensor}"][f"{collapse_table_id}"]["matrix"]
169
+ )
170
+
171
+ # Analyze the collapse pattern matrix to determine its reduced shape.
172
+ # Steps:
173
+ # - Extract non-zero elements from the matrix.
174
+ # - Reshape to group unique non-zero rows and columns.
175
+ # - If all non-zero values are identical, return (1,) for a fully collapsed pattern.
176
+ # - Otherwise, compute the number of unique rows and columns to describe the
177
+ # reduced shape.
178
+ non_zero_data = np.where(collapse_matrix != 0)
179
+ non_zero_reformatted = collapse_matrix[non_zero_data].reshape(
180
+ np.unique(non_zero_data[0]).size, np.unique(non_zero_data[1]).size
181
+ )
182
+
183
+ if np.unique(non_zero_reformatted).size == 1:
184
+ # all non-zero values are identical means -> fully collapsed
185
+ return (1,)
186
+
187
+ # If not fully collapsed, find repeated patterns in rows and columns
188
+ # to reduce shape further.
189
+ unique_rows = np.unique(non_zero_reformatted, axis=0)
190
+ unique_columns = np.unique(non_zero_reformatted, axis=1)
191
+ # Unique spin sectors and instrument azimuths to unpack data
192
+ unique_spin_sectors = unique_columns.shape[1]
193
+ unique_inst_azs = unique_rows.shape[0]
194
+ return (unique_spin_sectors, unique_inst_azs)
195
+
196
+
197
+ def index_to_position(
198
+ json_data: dict, sensor_id: int, collapse_table_id: int
199
+ ) -> np.ndarray:
200
+ """
201
+ Get the indices of non-zero unique rows in the collapse pattern matrix.
202
+
203
+ Parameters
204
+ ----------
205
+ json_data : dict
206
+ The JSON data loaded from the SCI-LUT file.
207
+ sensor_id : int
208
+ Sensor identifier (0 for LO, 1 for HI).
209
+ collapse_table_id : int
210
+ Collapse table id to look up in the SCI-LUT.
211
+
212
+ Returns
213
+ -------
214
+ np.ndarray
215
+ Array of indices corresponding to non-zero unique rows.
216
+ """
217
+ sensor = "lo" if sensor_id == 0 else "hi"
218
+ collapse_matrix = np.array(
219
+ json_data[f"collapse_{sensor}"][f"{collapse_table_id}"]["matrix"]
220
+ )
221
+
222
+ # Find unique non-zero rows and their original indices
223
+ non_zero_row_mask = np.any(collapse_matrix != 0, axis=1)
224
+ non_zero_rows = collapse_matrix[non_zero_row_mask]
225
+ _, unique_indices = np.unique(non_zero_rows, axis=0, return_index=True)
226
+ non_zero_row_indices = np.flatnonzero(non_zero_row_mask)[unique_indices]
227
+ return non_zero_row_indices
228
+
229
+
230
+ def get_codice_epoch_time(
231
+ acq_start_seconds: np.ndarray,
232
+ acq_start_subseconds: np.ndarray,
233
+ spin_period: np.ndarray,
234
+ view_tab_obj: ViewTabInfo,
235
+ ) -> tuple[np.ndarray, np.ndarray]:
236
+ """
237
+ Calculate center time and delta.
238
+
239
+ Parameters
240
+ ----------
241
+ acq_start_seconds : np.ndarray
242
+ Array of acquisition start seconds.
243
+ acq_start_subseconds : np.ndarray
244
+ Array of acquisition start subseconds.
245
+ spin_period : np.ndarray
246
+ Array of spin periods.
247
+ view_tab_obj : ViewTabInfo
248
+ The view table information object. It contains information such as sensor ID
249
+ and three_d_collapsed value and others.
250
+
251
+ Returns
252
+ -------
253
+ tuple[np.ndarray, np.ndarray]
254
+ (center_times, delta_times).
255
+ """
256
+ # If Lo sensor
257
+ if view_tab_obj.sensor == 0:
258
+ # Lo sensor, we need to set spins to be constant.
259
+ # 32 half spins makes full 16 spins for all non direct event products.
260
+ # But Lo direct event's spins is also 16 spins. Because of that, we can use
261
+ # the same calculation for all Lo products.
262
+ num_spins = 16.0
263
+ # If Hi sensor and Direct Event product
264
+ elif view_tab_obj.sensor == 1 and view_tab_obj.apid == CODICEAPID.COD_HI_PHA:
265
+ # Use constant 16 spins for Hi PHA
266
+ num_spins = 16.0
267
+ # If Non-Direct Event Hi product
268
+ else:
269
+ # Use 3d_collapsed value from LUT for other Hi products
270
+ num_spins = view_tab_obj.three_d_collapsed
271
+
272
+ # Units of 'spin ticks', where one 'spin tick' equals 320 microseconds.
273
+ # It takes multiple spins to collect data for a view.
274
+ spin_period_ns = spin_period.astype(np.float64) * 320 * 1e3 # Convert to ns
275
+ delta_times = (num_spins * spin_period_ns) / 2
276
+ # subseconds need to converted to seconds using this formula per CoDICE team:
277
+ # subseconds / 65536 gives seconds
278
+ center_times_seconds = (
279
+ acq_start_seconds + acq_start_subseconds / 65536 + (delta_times / 1e9)
280
+ )
281
+
282
+ return met_to_ttj2000ns(center_times_seconds), delta_times
283
+
284
+
285
+ def calculate_acq_time_per_step(low_stepping_tab: dict) -> np.ndarray:
286
+ """
287
+ Calculate acquisition time per step from low stepping table.
288
+
289
+ Parameters
290
+ ----------
291
+ low_stepping_tab : dict
292
+ The low stepping table from the SCI-LUT JSON.
293
+
294
+ Returns
295
+ -------
296
+ np.ndarray
297
+ Array of acquisition times per step of shape (num_esa_steps,).
298
+ """
299
+ # These tunable values are used to calculate acquisition time per step
300
+ tunable_values = low_stepping_tab["tunable_values"]
301
+
302
+ # pre-calculate values
303
+ sector_time = tunable_values["spin_time_ms"] / tunable_values["num_sectors_ms"]
304
+ sector_margin_ms = tunable_values["sector_margin_ms"]
305
+ dwell_fraction = tunable_values["dwell_fraction_percentage"]
306
+ min_hv_settle_ms = tunable_values["min_hv_settle_ms"]
307
+ max_hv_settle_ms = tunable_values["max_hv_settle_ms"]
308
+ num_steps_data = np.array(
309
+ low_stepping_tab["num_steps"].get("data"), dtype=np.float64
310
+ )
311
+ # Total non-acquisition time is in column (BD) of science LUT
312
+ dwell_fraction_percentage = float(sector_time) * (100.0 - dwell_fraction) / 100.0
313
+
314
+ # Calculate HV settle time per step not adjusted for Min/Max.
315
+ # It's in column (BF) of science LUT.
316
+ non_adjusted_hv_settle_per_step = (
317
+ dwell_fraction_percentage - sector_margin_ms
318
+ ) / num_steps_data
319
+ hv_settle_per_step = np.minimum(
320
+ np.maximum(non_adjusted_hv_settle_per_step, min_hv_settle_ms), max_hv_settle_ms
321
+ )
322
+
323
+ # acquisition time per step in milliseconds
324
+ # sector_time - sector_margin_ms / num_steps - hv_settle_per_step
325
+ acq_time_per_step = (
326
+ (sector_time - sector_margin_ms) / num_steps_data
327
+ ) - hv_settle_per_step
328
+ # Convert to seconds
329
+ return acq_time_per_step / 1e3
@@ -647,28 +647,21 @@ class HiPointingSet(LoHiBasePointingSet):
647
647
  ----------
648
648
  dataset : xarray.Dataset | str | Path
649
649
  Hi L1C pointing set data loaded in a xarray.DataArray.
650
- spin_phase : str
651
- Include ENAs from "full", "ram" or "anti-ram" phases of the spin.
652
650
  """
653
651
 
654
- def __init__(self, dataset: xr.Dataset | str | Path, spin_phase: str):
655
- super().__init__(dataset, spice_reference_frame=geometry.SpiceFrame.ECLIPJ2000)
652
+ def __init__(self, dataset: xr.Dataset | str | Path):
653
+ super().__init__(dataset, spice_reference_frame=geometry.SpiceFrame.IMAP_HAE)
654
+
655
+ self.spatial_coords = ("spin_angle_bin",)
656
656
 
657
- # Filter out ENAs from non-selected portions of the spin.
658
- if spin_phase not in ["full", "ram", "anti"]:
659
- raise ValueError(f"Unrecognized spin_phase value: {spin_phase}.")
657
+ # Naively generate the ram_mask variable assuming spacecraft frame
658
+ # binning. The ram_mask variable gets updated in the CG correction
659
+ # code if the CG correction is applied.
660
+ ram_mask = xr.zeros_like(self.data["spin_angle_bin"], dtype=bool)
660
661
  # ram only includes spin-phase interval [0, 0.5)
661
662
  # which is the first half of the spin_angle_bins
662
- elif spin_phase == "ram":
663
- self.data = self.data.isel(
664
- spin_angle_bin=slice(0, self.data["spin_angle_bin"].data.size // 2)
665
- )
666
- # anti-ram includes spin-phase interval [0.5, 1)
667
- # which is the second half of the spin_angle_bins
668
- elif spin_phase == "anti":
669
- self.data = self.data.isel(
670
- spin_angle_bin=slice(self.data["spin_angle_bin"].data.size // 2, None)
671
- )
663
+ ram_mask[slice(0, self.data["spin_angle_bin"].data.size // 2)] = True
664
+ self.data["ram_mask"] = ram_mask
672
665
 
673
666
  # Rename some PSET vars to match L2 variables
674
667
  self.data = self.data.rename(
@@ -684,8 +677,6 @@ class HiPointingSet(LoHiBasePointingSet):
684
677
  self.data["exposure_factor"], self.data["epoch"].values[0]
685
678
  )
686
679
 
687
- self.spatial_coords = ("spin_angle_bin",)
688
-
689
680
  # Update az_el_points using the base class method
690
681
  self.update_az_el_points()
691
682
 
@@ -810,12 +801,12 @@ class AbstractSkyMap(ABC):
810
801
  """
811
802
  return self.az_el_points.shape[0]
812
803
 
813
- def project_pset_values_to_map(
804
+ def project_pset_values_to_map( # noqa: PLR0912
814
805
  self,
815
806
  pointing_set: PointingSet,
816
807
  value_keys: list[str] | None = None,
817
808
  index_match_method: IndexMatchMethod = IndexMatchMethod.PUSH,
818
- pset_valid_mask: NDArray | None = None,
809
+ pset_valid_mask: NDArray | xr.DataArray | None = None,
819
810
  ) -> None:
820
811
  """
821
812
  Project a pointing set's values to the map grid.
@@ -837,7 +828,7 @@ class AbstractSkyMap(ABC):
837
828
  index_match_method : IndexMatchMethod, optional
838
829
  The method of index matching to use for all values.
839
830
  Default is IndexMatchMethod.PUSH.
840
- pset_valid_mask : NDArray, optional
831
+ pset_valid_mask : xarray.DataArray or NDArray, optional
841
832
  A boolean mask of shape (number of pointing set pixels,) indicating
842
833
  which pixels in the pointing set should be considered valid for projection.
843
834
  If None, all pixels are considered valid. Default is None.
@@ -849,9 +840,9 @@ class AbstractSkyMap(ABC):
849
840
  """
850
841
  if value_keys is None:
851
842
  value_keys = list(pointing_set.data.data_vars.keys())
852
- for value_key in value_keys:
853
- if value_key not in pointing_set.data.data_vars:
854
- raise ValueError(f"Value key {value_key} not found in pointing set.")
843
+
844
+ if missing_keys := set(value_keys) - set(pointing_set.data.data_vars):
845
+ raise KeyError(f"Value keys not found in pointing set: {missing_keys}")
855
846
 
856
847
  if pset_valid_mask is None:
857
848
  pset_valid_mask = np.ones(pointing_set.num_points, dtype=bool)
@@ -876,9 +867,12 @@ class AbstractSkyMap(ABC):
876
867
  )
877
868
 
878
869
  for value_key in value_keys:
870
+ if value_key not in pointing_set.data.data_vars:
871
+ raise ValueError(f"Value key {value_key} not found in pointing set.")
872
+
879
873
  # If multiple spatial axes present
880
874
  # (i.e (az, el) for rectangular coordinate PSET),
881
- # flatten them in the values array to match the raveled indices
875
+ # stack them into a single coordinate to match the raveled indices
882
876
  raveled_pset_data = pointing_set.data[value_key].stack(
883
877
  {CoordNames.GENERIC_PIXEL.value: pointing_set.spatial_coords}
884
878
  )
@@ -907,13 +901,22 @@ class AbstractSkyMap(ABC):
907
901
  data_bc, indices_bc = xr.broadcast(
908
902
  raveled_pset_data, matched_indices_push
909
903
  )
904
+ # If the valid mask is a xr.DataArray, broadcast it to the same shape
905
+ if isinstance(pset_valid_mask, xr.DataArray):
906
+ stacked_valid_mask = pset_valid_mask.stack(
907
+ {CoordNames.GENERIC_PIXEL.value: pointing_set.spatial_coords}
908
+ )
909
+ pset_valid_mask_bc, _ = xr.broadcast(data_bc, stacked_valid_mask)
910
+ pset_valid_mask_values = pset_valid_mask_bc.values
911
+ else:
912
+ pset_valid_mask_values = pset_valid_mask
910
913
 
911
914
  # Extract numpy arrays for bincount operation
912
915
  pointing_projected_values = map_utils.bin_single_array_at_indices(
913
916
  value_array=data_bc.values,
914
917
  projection_grid_shape=self.binning_grid_shape,
915
918
  projection_indices=indices_bc.values,
916
- input_valid_mask=pset_valid_mask,
919
+ input_valid_mask=pset_valid_mask_values,
917
920
  )
918
921
  # TODO: we may need to allow for unweighted/weighted means here by
919
922
  # dividing pointing_projected_values by some binned weights.
@@ -934,10 +937,6 @@ class AbstractSkyMap(ABC):
934
937
  self.data_1d[value_key].values[..., valid_map_mask] += (
935
938
  pointing_projected_values
936
939
  )
937
- else:
938
- raise NotImplementedError(
939
- "Only PUSH and PULL index matching methods are supported."
940
- )
941
940
 
942
941
  # TODO: The max epoch needs to include the pset duration. Right now it
943
942
  # is just capturing the start epoch. See issue #1747
@@ -1266,12 +1265,13 @@ class RectangularSkyMap(AbstractSkyMap):
1266
1265
  coords={**self.non_spatial_coords, **self.spatial_coords},
1267
1266
  )
1268
1267
 
1269
- def build_cdf_dataset(
1268
+ def build_cdf_dataset( # noqa: PLR0912
1270
1269
  self,
1271
1270
  instrument: str,
1272
1271
  level: str,
1273
1272
  descriptor: str,
1274
1273
  sensor: str | None = None,
1274
+ drop_vars_with_no_attributes: bool = True,
1275
1275
  ) -> xr.Dataset:
1276
1276
  """
1277
1277
  Format the data into a xarray.Dataset and add required CDF variables.
@@ -1286,6 +1286,12 @@ class RectangularSkyMap(AbstractSkyMap):
1286
1286
  Descriptor for filename.
1287
1287
  sensor : str, optional
1288
1288
  Sensor number "45" or "90".
1289
+ drop_vars_with_no_attributes : bool, optional
1290
+ Default behavior is to drop any dataset variables that don't have
1291
+ attributes defined in the CDF attribute manager. This ensures that
1292
+ the output CDF doesn't have any of the intermedeiate variables left
1293
+ over from computations. Sometimes, it is useful to output the
1294
+ intermedeiate variables. To do so, set this to False.
1289
1295
 
1290
1296
  Returns
1291
1297
  -------
@@ -1389,13 +1395,18 @@ class RectangularSkyMap(AbstractSkyMap):
1389
1395
  variable_name=name,
1390
1396
  check_schema=check_schema,
1391
1397
  )
1392
- except KeyError as e:
1393
- raise KeyError(
1394
- f"Attributes for variable {name} not found in "
1395
- f"loaded variable attributes."
1396
- ) from e
1397
-
1398
- cdf_ds[name].attrs.update(var_attrs)
1398
+ cdf_ds[name].attrs.update(var_attrs)
1399
+ except KeyError:
1400
+ if drop_vars_with_no_attributes:
1401
+ logger.debug(
1402
+ f"Dropping variable '{name}' that has no attributes defined."
1403
+ )
1404
+ cdf_ds = cdf_ds.drop_vars(name)
1405
+ else:
1406
+ logger.debug(
1407
+ f"Variable '{name}' has no attributes defined. It will "
1408
+ f"be included in the output dataset with no attributes."
1409
+ )
1399
1410
 
1400
1411
  # Manually adjust epoch attributes
1401
1412
  cdf_ds["epoch"].attrs.update(