imap-processing 0.19.0__py3-none-any.whl → 0.19.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of imap-processing might be problematic. Click here for more details.

Files changed (73) hide show
  1. imap_processing/_version.py +2 -2
  2. imap_processing/cdf/config/imap_codice_global_cdf_attrs.yaml +6 -0
  3. imap_processing/cdf/config/imap_codice_l1a_variable_attrs.yaml +31 -894
  4. imap_processing/cdf/config/imap_codice_l1b_variable_attrs.yaml +279 -255
  5. imap_processing/cdf/config/imap_enamaps_l2-common_variable_attrs.yaml +55 -0
  6. imap_processing/cdf/config/imap_enamaps_l2-healpix_variable_attrs.yaml +29 -0
  7. imap_processing/cdf/config/imap_enamaps_l2-rectangular_variable_attrs.yaml +32 -0
  8. imap_processing/cdf/config/imap_glows_l1b_variable_attrs.yaml +3 -1
  9. imap_processing/cdf/config/imap_lo_global_cdf_attrs.yaml +5 -4
  10. imap_processing/cdf/config/imap_ultra_global_cdf_attrs.yaml +28 -16
  11. imap_processing/cdf/config/imap_ultra_l1b_variable_attrs.yaml +33 -31
  12. imap_processing/cdf/config/imap_ultra_l1c_variable_attrs.yaml +61 -1
  13. imap_processing/cli.py +62 -71
  14. imap_processing/codice/codice_l0.py +2 -1
  15. imap_processing/codice/codice_l1a.py +47 -49
  16. imap_processing/codice/codice_l1b.py +42 -32
  17. imap_processing/codice/codice_l2.py +105 -7
  18. imap_processing/codice/constants.py +50 -8
  19. imap_processing/codice/data/lo_stepping_values.csv +1 -1
  20. imap_processing/ena_maps/ena_maps.py +39 -18
  21. imap_processing/ena_maps/utils/corrections.py +291 -0
  22. imap_processing/ena_maps/utils/map_utils.py +20 -4
  23. imap_processing/glows/l1b/glows_l1b.py +38 -23
  24. imap_processing/glows/l1b/glows_l1b_data.py +10 -11
  25. imap_processing/hi/hi_l1c.py +4 -109
  26. imap_processing/hi/hi_l2.py +34 -23
  27. imap_processing/hi/utils.py +109 -0
  28. imap_processing/ialirt/l0/ialirt_spice.py +1 -1
  29. imap_processing/ialirt/l0/parse_mag.py +18 -4
  30. imap_processing/ialirt/l0/process_hit.py +9 -4
  31. imap_processing/ialirt/l0/process_swapi.py +9 -4
  32. imap_processing/ialirt/l0/process_swe.py +9 -4
  33. imap_processing/ialirt/utils/create_xarray.py +1 -1
  34. imap_processing/lo/ancillary_data/imap_lo_hydrogen-geometric-factor_v001.csv +75 -0
  35. imap_processing/lo/ancillary_data/imap_lo_oxygen-geometric-factor_v001.csv +75 -0
  36. imap_processing/lo/l1b/lo_l1b.py +90 -16
  37. imap_processing/lo/l1c/lo_l1c.py +164 -50
  38. imap_processing/lo/l2/lo_l2.py +941 -127
  39. imap_processing/mag/l1d/mag_l1d_data.py +36 -3
  40. imap_processing/mag/l2/mag_l2.py +2 -0
  41. imap_processing/mag/l2/mag_l2_data.py +4 -3
  42. imap_processing/quality_flags.py +14 -0
  43. imap_processing/spice/geometry.py +13 -8
  44. imap_processing/spice/pointing_frame.py +4 -2
  45. imap_processing/spice/repoint.py +49 -0
  46. imap_processing/ultra/constants.py +29 -0
  47. imap_processing/ultra/l0/decom_tools.py +58 -46
  48. imap_processing/ultra/l0/decom_ultra.py +21 -9
  49. imap_processing/ultra/l0/ultra_utils.py +4 -4
  50. imap_processing/ultra/l1b/badtimes.py +35 -11
  51. imap_processing/ultra/l1b/de.py +15 -9
  52. imap_processing/ultra/l1b/extendedspin.py +24 -12
  53. imap_processing/ultra/l1b/goodtimes.py +112 -0
  54. imap_processing/ultra/l1b/lookup_utils.py +1 -1
  55. imap_processing/ultra/l1b/ultra_l1b.py +7 -7
  56. imap_processing/ultra/l1b/ultra_l1b_culling.py +8 -4
  57. imap_processing/ultra/l1b/ultra_l1b_extended.py +79 -43
  58. imap_processing/ultra/l1c/helio_pset.py +68 -39
  59. imap_processing/ultra/l1c/l1c_lookup_utils.py +45 -12
  60. imap_processing/ultra/l1c/spacecraft_pset.py +81 -37
  61. imap_processing/ultra/l1c/ultra_l1c.py +27 -22
  62. imap_processing/ultra/l1c/ultra_l1c_culling.py +7 -0
  63. imap_processing/ultra/l1c/ultra_l1c_pset_bins.py +41 -41
  64. imap_processing/ultra/l2/ultra_l2.py +75 -18
  65. imap_processing/ultra/utils/ultra_l1_utils.py +10 -5
  66. {imap_processing-0.19.0.dist-info → imap_processing-0.19.3.dist-info}/METADATA +2 -2
  67. {imap_processing-0.19.0.dist-info → imap_processing-0.19.3.dist-info}/RECORD +71 -69
  68. imap_processing/ultra/l1b/cullingmask.py +0 -90
  69. imap_processing/ultra/l1c/histogram.py +0 -36
  70. /imap_processing/glows/ancillary/{imap_glows_pipeline_settings_20250923_v002.json → imap_glows_pipeline-settings_20250923_v002.json} +0 -0
  71. {imap_processing-0.19.0.dist-info → imap_processing-0.19.3.dist-info}/LICENSE +0 -0
  72. {imap_processing-0.19.0.dist-info → imap_processing-0.19.3.dist-info}/WHEEL +0 -0
  73. {imap_processing-0.19.0.dist-info → imap_processing-0.19.3.dist-info}/entry_points.txt +0 -0
@@ -12,10 +12,12 @@ dataset = process_codice_l2(l1_filename)
12
12
  import logging
13
13
  from pathlib import Path
14
14
 
15
+ import numpy as np
15
16
  import xarray as xr
16
17
 
17
18
  from imap_processing.cdf.imap_cdf_manager import ImapCdfAttributes
18
19
  from imap_processing.cdf.utils import load_cdf
20
+ from imap_processing.codice.constants import HALF_SPIN_LUT
19
21
 
20
22
  logger = logging.getLogger(__name__)
21
23
  logger.setLevel(logging.INFO)
@@ -54,6 +56,14 @@ def process_codice_l2(file_path: Path) -> xr.Dataset:
54
56
  cdf_attrs = ImapCdfAttributes()
55
57
  l2_dataset = add_dataset_attributes(l2_dataset, dataset_name, cdf_attrs)
56
58
 
59
+ # TODO: update list of datasets that need geometric factors (if needed)
60
+ # Compute geometric factors needed for intensity calculations
61
+ if dataset_name in [
62
+ "imap_codice_l2_lo-sw-species",
63
+ "imap_codice_l2_lo-nsw-species",
64
+ ]:
65
+ geometric_factors = compute_geometric_factors(l2_dataset)
66
+
57
67
  if dataset_name in [
58
68
  "imap_codice_l2_hi-counters-singles",
59
69
  "imap_codice_l2_hi-counters-aggregated",
@@ -63,6 +73,7 @@ def process_codice_l2(file_path: Path) -> xr.Dataset:
63
73
  "imap_codice_l2_lo-nsw-priority",
64
74
  ]:
65
75
  # No changes needed. Just save to an L2 CDF file.
76
+ # TODO: May not even need L2 files for these products
66
77
  pass
67
78
 
68
79
  elif dataset_name == "imap_codice_l2_hi-direct-events":
@@ -117,6 +128,8 @@ def process_codice_l2(file_path: Path) -> xr.Dataset:
117
128
  # Calculate the pickup ion sunward solar wind intensities using equation
118
129
  # described in section 11.2.4 of algorithm document.
119
130
  # Hopefully this can also apply to lo-ialirt
131
+ # TODO: WIP - needs to be completed
132
+ l2_dataset = process_lo_sw_species(l2_dataset, geometric_factors)
120
133
  pass
121
134
 
122
135
  elif dataset_name == "imap_codice_l2_lo-nsw-species":
@@ -132,14 +145,14 @@ def process_codice_l2(file_path: Path) -> xr.Dataset:
132
145
 
133
146
 
134
147
  def add_dataset_attributes(
135
- l2_dataset: xr.Dataset, dataset_name: str, cdf_attrs: ImapCdfAttributes
148
+ dataset: xr.Dataset, dataset_name: str, cdf_attrs: ImapCdfAttributes
136
149
  ) -> xr.Dataset:
137
150
  """
138
151
  Add the global and variable attributes to the dataset.
139
152
 
140
153
  Parameters
141
154
  ----------
142
- l2_dataset : xarray.Dataset
155
+ dataset : xarray.Dataset
143
156
  The dataset to update.
144
157
  dataset_name : str
145
158
  The name of the dataset.
@@ -155,12 +168,12 @@ def add_dataset_attributes(
155
168
  cdf_attrs.add_instrument_variable_attrs("codice", "l2")
156
169
 
157
170
  # Update the global attributes
158
- l2_dataset.attrs = cdf_attrs.get_global_attributes(dataset_name)
171
+ dataset.attrs = cdf_attrs.get_global_attributes(dataset_name)
159
172
 
160
173
  # Set the variable attributes
161
- for variable_name in l2_dataset.data_vars.keys():
174
+ for variable_name in dataset.data_vars.keys():
162
175
  try:
163
- l2_dataset[variable_name].attrs = cdf_attrs.get_variable_attributes(
176
+ dataset[variable_name].attrs = cdf_attrs.get_variable_attributes(
164
177
  variable_name, check_schema=False
165
178
  )
166
179
  except KeyError:
@@ -169,7 +182,7 @@ def add_dataset_attributes(
169
182
  descriptor = dataset_name.split("imap_codice_l2_")[-1]
170
183
  cdf_attrs_key = f"{descriptor}-{variable_name}"
171
184
  try:
172
- l2_dataset[variable_name].attrs = cdf_attrs.get_variable_attributes(
185
+ dataset[variable_name].attrs = cdf_attrs.get_variable_attributes(
173
186
  f"{cdf_attrs_key}", check_schema=False
174
187
  )
175
188
  except KeyError:
@@ -177,4 +190,89 @@ def add_dataset_attributes(
177
190
  f"Field '{variable_name}' and '{cdf_attrs_key}' not found in "
178
191
  f"attribute manager."
179
192
  )
180
- return l2_dataset
193
+ return dataset
194
+
195
+
196
+ def compute_geometric_factors(dataset: xr.Dataset) -> np.ndarray:
197
+ """
198
+ Calculate geometric factors needed for intensity calculations.
199
+
200
+ Geometric factors are determined by comparing the half-spin values per
201
+ esa_step in the HALF_SPIN_LUT to the rgfo_half_spin values in the provided
202
+ L2 dataset.
203
+
204
+ If the half-spin value is less than the corresponding rgfo_half_spin value,
205
+ the geometric factor is set to 0.75 (full mode); otherwise, it is set to 0.5
206
+ (reduced mode).
207
+
208
+ NOTE: Half spin values are associated with ESA steps which corresponds to the
209
+ index of the energy_per_charge dimension that is between 0 and 127.
210
+
211
+ Parameters
212
+ ----------
213
+ dataset : xarray.Dataset
214
+ The L2 dataset containing rgfo_half_spin data variable.
215
+
216
+ Returns
217
+ -------
218
+ geometric_factors : np.ndarray
219
+ A 2D array of geometric factors with shape (epoch, esa_steps).
220
+ """
221
+ # Convert the HALF_SPIN_LUT to a reverse mapping of esa_step to half_spin
222
+ esa_step_to_half_spin_map = {
223
+ val: key for key, vals in HALF_SPIN_LUT.items() for val in vals
224
+ }
225
+
226
+ # Create a list of half_spin values corresponding to ESA steps (0 to 127)
227
+ half_spin_values = np.array(
228
+ [esa_step_to_half_spin_map[step] for step in range(128)]
229
+ )
230
+
231
+ # Expand dimensions to compare each rgfo_half_spin value against
232
+ # all half_spin_values
233
+ rgfo_half_spin = dataset.rgfo_half_spin.data[:, np.newaxis] # Shape: (epoch, 1)
234
+
235
+ # Perform the comparison and calculate geometric factors
236
+ geometric_factors = np.where(half_spin_values < rgfo_half_spin, 0.75, 0.5)
237
+
238
+ return geometric_factors
239
+
240
+
241
+ def process_lo_sw_species(
242
+ dataset: xr.Dataset, geometric_factors: np.ndarray
243
+ ) -> xr.Dataset:
244
+ """
245
+ Process the lo-sw-species L2 dataset to calculate species intensities.
246
+
247
+ Parameters
248
+ ----------
249
+ dataset : xarray.Dataset
250
+ The L2 dataset to process.
251
+ geometric_factors : np.ndarray
252
+ The geometric factors array with shape (epoch, esa_steps).
253
+
254
+ Returns
255
+ -------
256
+ xarray.Dataset
257
+ The updated L2 dataset with species intensities calculated.
258
+ """
259
+ # TODO: WIP - implement intensity calculations
260
+ # valid_solar_wind_vars = [
261
+ # "hplus",
262
+ # "heplusplus",
263
+ # "cplus4",
264
+ # "cplus5",
265
+ # "cplus6",
266
+ # "oplus5",
267
+ # "oplus6",
268
+ # "oplus7",
269
+ # "oplus8",
270
+ # "ne",
271
+ # "mg",
272
+ # "si",
273
+ # "fe_loq",
274
+ # "fe_hiq",
275
+ # ]
276
+ # valid_pick_up_ion_vars = ["heplus", "cnoplus"]
277
+
278
+ return dataset
@@ -60,6 +60,7 @@ CODICEAPID_MAPPING = {
60
60
  # Numerical constants
61
61
  SPIN_PERIOD_CONVERSION = 0.00032
62
62
  K_FACTOR = 5.76 # This is used to convert voltages to energies in L2
63
+ HI_ACQUISITION_TIME = 0.59916
63
64
 
64
65
  # CDF variable names used for lo data products
65
66
  LO_COUNTERS_SINGLES_VARIABLE_NAMES = ["apd_singles"]
@@ -172,8 +173,6 @@ CODICE_HI_IAL_DATA_FIELDS = ["h"]
172
173
 
173
174
  # lo- and hi-counters-aggregated data product variables are dynamically
174
175
  # determined based on the number of active counters
175
- # TODO: Try to convince Joey to move to lower case variable names with
176
- # underscores?
177
176
  LO_COUNTERS_AGGREGATED_ACTIVE_VARIABLES = {
178
177
  "tcr": True,
179
178
  "dcr": True,
@@ -438,7 +437,7 @@ DATA_PRODUCT_CONFIGURATIONS: dict[CODICEAPID | int, dict] = {
438
437
  "instrument": "hi",
439
438
  "num_counters": len(
440
439
  HI_COUNTERS_AGGREGATED_VARIABLE_NAMES
441
- ), # The number of counters depends on the number of active counters
440
+ ), # The number of counters depends on the number of *active* counters
442
441
  "support_variables": ["data_quality", "spin_period"],
443
442
  "variable_names": HI_COUNTERS_AGGREGATED_VARIABLE_NAMES,
444
443
  },
@@ -527,7 +526,7 @@ DATA_PRODUCT_CONFIGURATIONS: dict[CODICEAPID | int, dict] = {
527
526
  "instrument": "lo",
528
527
  "num_counters": len(
529
528
  LO_COUNTERS_AGGREGATED_VARIABLE_NAMES
530
- ), # The number of counters depends on the number of active counters
529
+ ), # The number of counters depends on the number of *active* counters
531
530
  "support_variables": [
532
531
  "energy_table",
533
532
  "acquisition_time_per_step",
@@ -689,9 +688,9 @@ L1B_DATA_PRODUCT_CONFIGURATIONS: dict[str, dict] = {
689
688
  "num_spin_sectors": 24,
690
689
  "num_spins": 4,
691
690
  },
692
- "hi-priority": { # TODO: Ask Joey to define these
693
- "num_spin_sectors": 1,
694
- "num_spins": 1,
691
+ "hi-priority": {
692
+ "num_spin_sectors": 24,
693
+ "num_spins": 16,
695
694
  },
696
695
  "hi-sectored": {
697
696
  "num_spin_sectors": 2,
@@ -849,7 +848,7 @@ DE_DATA_PRODUCT_CONFIGURATIONS: dict[Any, dict[str, Any]] = {
849
848
  }
850
849
 
851
850
  # Define the packet fields needed to be stored in segmented data and their
852
- # corresponding bit lengths for direct event data products
851
+ # corresponding bit lengths for I-ALiRT data products
853
852
  IAL_BIT_STRUCTURE = {
854
853
  "SHCOARSE": 32,
855
854
  "PACKET_VERSION": 16,
@@ -1657,6 +1656,8 @@ PIXEL_ORIENTATIONS = {
1657
1656
  # processing. These are taken from the "Acq Time" column in the "Lo Stepping"
1658
1657
  # tab of the "*-SCI-LUT-*.xml" spreadsheet that largely defines CoDICE
1659
1658
  # processing.
1659
+ # TODO: Do away with this lookup table and instead calculate the acquisition
1660
+ # times. See GitHub issue #1945.
1660
1661
  ACQUISITION_TIMES = {
1661
1662
  0: [
1662
1663
  578.70833333,
@@ -2179,3 +2180,44 @@ ACQUISITION_TIMES = {
2179
2180
  96.45138889,
2180
2181
  ],
2181
2182
  }
2183
+
2184
+ # TODO: Update EFFICIENCY value when better information is available.
2185
+ # Constant for CoDICE Intensity calculations.
2186
+ EFFICIENCY = 1
2187
+
2188
+ # Lookup table for mapping half-spin (keys) to esa steps (values)
2189
+ # This is used to determine geometry factors L2
2190
+ HALF_SPIN_LUT = {
2191
+ 0: [0],
2192
+ 1: [1],
2193
+ 2: [2],
2194
+ 3: [3],
2195
+ 4: [4, 5],
2196
+ 5: [6, 7],
2197
+ 6: [8, 9],
2198
+ 7: [10, 11],
2199
+ 8: [12, 13, 14],
2200
+ 9: [15, 16, 17],
2201
+ 10: [18, 19, 20],
2202
+ 11: [21, 22, 23],
2203
+ 12: [24, 25, 26, 27],
2204
+ 13: [28, 29, 30, 31],
2205
+ 14: [32, 33, 34, 35],
2206
+ 15: [36, 37, 38, 39],
2207
+ 16: [40, 41, 42, 43, 44],
2208
+ 17: [45, 46, 47, 48, 49],
2209
+ 18: [50, 51, 52, 53, 54],
2210
+ 19: [55, 56, 57, 58, 59],
2211
+ 20: [60, 61, 62, 63, 64],
2212
+ 21: [65, 66, 67, 68, 69],
2213
+ 22: [70, 71, 72, 73, 74],
2214
+ 23: [75, 76, 77, 78, 79],
2215
+ 24: [80, 81, 82, 83, 84, 85],
2216
+ 25: [86, 87, 88, 89, 90, 91],
2217
+ 26: [92, 93, 94, 95, 96, 97],
2218
+ 27: [98, 99, 100, 101, 102, 103],
2219
+ 28: [104, 105, 106, 107, 108, 109],
2220
+ 29: [110, 111, 112, 113, 114, 115],
2221
+ 30: [116, 117, 118, 119, 120, 121],
2222
+ 31: [122, 123, 124, 125, 126, 127],
2223
+ }
@@ -1,4 +1,4 @@
1
- table_num,row_num,num_reps,store_data,e1,e2,e3,e4,e5,e6,e7,e8,acq_time
1
+ table_num,half_spin_num,num_reps,store_data,e1,e2,e3,e4,e5,e6,e7,e8,acq_time
2
2
  0,0,1,12,0,-,-,-,-,-,-,-,578.708333
3
3
  0,1,1,12,1,-,-,-,-,-,-,-,578.708333
4
4
  0,2,1,12,2,-,-,-,-,-,-,-,578.708333
@@ -768,6 +768,7 @@ class AbstractSkyMap(ABC):
768
768
  pointing_set: PointingSet,
769
769
  value_keys: list[str] | None = None,
770
770
  index_match_method: IndexMatchMethod = IndexMatchMethod.PUSH,
771
+ pset_valid_mask: NDArray | None = None,
771
772
  ) -> None:
772
773
  """
773
774
  Project a pointing set's values to the map grid.
@@ -789,6 +790,10 @@ class AbstractSkyMap(ABC):
789
790
  index_match_method : IndexMatchMethod, optional
790
791
  The method of index matching to use for all values.
791
792
  Default is IndexMatchMethod.PUSH.
793
+ pset_valid_mask : NDArray, optional
794
+ A boolean mask of shape (number of pointing set pixels,) indicating
795
+ which pixels in the pointing set should be considered valid for projection.
796
+ If None, all pixels are considered valid. Default is None.
792
797
 
793
798
  Raises
794
799
  ------
@@ -801,6 +806,9 @@ class AbstractSkyMap(ABC):
801
806
  if value_key not in pointing_set.data.data_vars:
802
807
  raise ValueError(f"Value key {value_key} not found in pointing set.")
803
808
 
809
+ if pset_valid_mask is None:
810
+ pset_valid_mask = np.ones(pointing_set.num_points, dtype=bool)
811
+
804
812
  if index_match_method is IndexMatchMethod.PUSH:
805
813
  # Determine the indices of the sky map grid that correspond to
806
814
  # each pixel in the pointing set.
@@ -860,22 +868,32 @@ class AbstractSkyMap(ABC):
860
868
  value_array=raveled_pset_data,
861
869
  projection_grid_shape=self.binning_grid_shape,
862
870
  projection_indices=matched_indices_push,
871
+ input_valid_mask=pset_valid_mask,
863
872
  )
873
+ # TODO: we may need to allow for unweighted/weighted means here by
874
+ # dividing pointing_projected_values by some binned weights.
875
+ # For unweighted means, we could use the number of pointing set pixels
876
+ # that correspond to each map pixel as the weights.
877
+ self.data_1d[value_key] += pointing_projected_values
864
878
  elif index_match_method is IndexMatchMethod.PULL:
879
+ valid_map_mask = pset_valid_mask[matched_indices_pull]
865
880
  # We know that there will only be one value per sky map pixel,
866
881
  # so we can use the matched indices directly
867
- pointing_projected_values = raveled_pset_data[..., matched_indices_pull]
882
+ pointing_projected_values = raveled_pset_data[
883
+ ..., matched_indices_pull[valid_map_mask]
884
+ ]
885
+ # TODO: we may need to allow for unweighted/weighted means here by
886
+ # dividing pointing_projected_values by some binned weights.
887
+ # For unweighted means, we could use the number of pointing set pixels
888
+ # that correspond to each map pixel as the weights.
889
+ self.data_1d[value_key].values[..., valid_map_mask] += (
890
+ pointing_projected_values
891
+ )
868
892
  else:
869
893
  raise NotImplementedError(
870
894
  "Only PUSH and PULL index matching methods are supported."
871
895
  )
872
896
 
873
- # TODO: we may need to allow for unweighted/weighted means here by
874
- # dividing pointing_projected_values by some binned weights.
875
- # For unweighted means, we could use the number of pointing set pixels
876
- # that correspond to each map pixel as the weights.
877
- self.data_1d[value_key] += pointing_projected_values
878
-
879
897
  # TODO: The max epoch needs to include the pset duration. Right now it
880
898
  # is just capturing the start epoch. See issue #1747
881
899
  self.min_epoch = min(self.min_epoch, pointing_set.epoch)
@@ -1169,6 +1187,10 @@ class RectangularSkyMap(AbstractSkyMap):
1169
1187
  # Rewrap each data array in the data_1d to the original 2D grid shape
1170
1188
  rewrapped_data = {}
1171
1189
  for key in self.data_1d.data_vars:
1190
+ # Don't rewrap non-spatial variables
1191
+ if CoordNames.GENERIC_PIXEL.value not in self.data_1d[key].coords:
1192
+ rewrapped_data[key] = self.data_1d[key]
1193
+ continue
1172
1194
  # drop pixel dim from the end, and add the spatial coords as dims
1173
1195
  rewrapped_dims = [
1174
1196
  dim
@@ -1274,18 +1296,17 @@ class RectangularSkyMap(AbstractSkyMap):
1274
1296
  name=f"{coord_name}_delta",
1275
1297
  dims=[coord_name],
1276
1298
  )
1277
- # Add energy delta_minus and delta_plus variables
1278
1299
  elif coord_name == CoordNames.ENERGY_L2.value:
1279
- cdf_ds[f"{coord_name}_delta_minus"] = xr.DataArray(
1280
- xr.full_like(cdf_ds[coord_name], np.nan),
1281
- name=f"{coord_name}_delta",
1282
- dims=[coord_name],
1283
- )
1284
- cdf_ds[f"{coord_name}_delta_plus"] = xr.DataArray(
1285
- xr.full_like(cdf_ds[coord_name], np.nan),
1286
- name=f"{coord_name}_delta",
1287
- dims=[coord_name],
1288
- )
1300
+ if f"{coord_name}_delta_minus" not in cdf_ds:
1301
+ raise KeyError(
1302
+ f"Required variable '{coord_name}_delta_minus' "
1303
+ f"not found in cdf Dataset."
1304
+ )
1305
+ if f"{coord_name}_delta_plus" not in cdf_ds:
1306
+ raise KeyError(
1307
+ f"Required variable '{coord_name}_delta_plus' "
1308
+ f"not found in cdf Dataset."
1309
+ )
1289
1310
 
1290
1311
  # Object which holds CDF attributes for the map
1291
1312
  cdf_attrs = ImapCdfAttributes()
@@ -0,0 +1,291 @@
1
+ """L2 corrections common to multiple IMAP ENA instruments."""
2
+
3
+ from pathlib import Path
4
+
5
+ import numpy as np
6
+ import pandas as pd
7
+ from numpy.polynomial import Polynomial
8
+
9
+
10
+ class PowerLawFluxCorrector:
11
+ """
12
+ IMAP-Lo flux correction algorithm implementation.
13
+
14
+ Based on Section 5 of the Mapping Algorithm Document. Applies corrections for
15
+ ESA transmission integration over energy bandpass using iterative
16
+ predictor-corrector scheme to estimate source fluxes from observed fluxes.
17
+
18
+ Parameters
19
+ ----------
20
+ coeffs_file : str or Path
21
+ Location of CSV file containing ESA transmission coefficients.
22
+ """
23
+
24
+ def __init__(self, coeffs_file: str | Path):
25
+ """Initialize PowerLawFluxCorrector."""
26
+ # Load the csv file
27
+ eta_coeffs_df = pd.read_csv(coeffs_file, index_col="esa_step")
28
+ # Create a lookup dictionary to get the correct np.polynomial.Polynomial
29
+ # for a given esa_step
30
+ coeff_columns = ["M0", "M1", "M2", "M3", "M4", "M5"]
31
+ self.polynomial_lookup = {
32
+ row.name: Polynomial(row[coeff_columns].values)
33
+ for _, row in eta_coeffs_df.iterrows()
34
+ }
35
+
36
+ def eta_esa(self, k: np.ndarray, gamma: np.ndarray) -> np.ndarray:
37
+ """
38
+ Calculate ESA transmission scale factor η_esa,k(γ) for each energy level.
39
+
40
+ Parameters
41
+ ----------
42
+ k : np.ndarray
43
+ Energy levels.
44
+ gamma : np.ndarray
45
+ Power-law slopes.
46
+
47
+ Returns
48
+ -------
49
+ np.ndarray
50
+ ESA transmission scale factors.
51
+ """
52
+ k = np.atleast_1d(k)
53
+ gamma = np.atleast_1d(gamma)
54
+ eta = np.empty_like(gamma)
55
+ for i, esa_step in enumerate(k):
56
+ eta[i] = self.polynomial_lookup[esa_step](gamma[i])
57
+ # Negative transmissions get set to 1
58
+ if eta[i] < 0:
59
+ eta[i] = 1
60
+
61
+ return eta
62
+
63
+ @staticmethod
64
+ def estimate_power_law_slope(
65
+ fluxes: np.ndarray,
66
+ energies: np.ndarray,
67
+ uncertainties: np.ndarray | None = None,
68
+ ) -> tuple[np.ndarray, np.ndarray | None]:
69
+ """
70
+ Estimate power-law slopes γ_k for each energy level using vectorized operations.
71
+
72
+ Implements equations (36)-(41) from the Mapping Algorithm Document v7
73
+ with proper boundary handling. Uses extended arrays with repeated
74
+ endpoints for unified calculation, and handles zero fluxes by falling
75
+ back to linear differencing or returning NaN where both central and
76
+ linear differencing fail.
77
+
78
+ Parameters
79
+ ----------
80
+ fluxes : np.ndarray
81
+ Array of differential fluxes [J_1, J_2, ..., J_7].
82
+ energies : np.ndarray
83
+ Array of energy levels [E_1, E_2, ..., E_7].
84
+ uncertainties : np.ndarray, optional
85
+ Array of flux uncertainties [δJ_1, δJ_2, ..., δJ_7].
86
+
87
+ Returns
88
+ -------
89
+ gamma : np.ndarray
90
+ Array of power-law slopes.
91
+ delta_gamma : np.ndarray or None
92
+ Array of uncertainty slopes (if uncertainties provided).
93
+ """
94
+ n_levels = len(fluxes)
95
+ gamma = np.full(n_levels, 0, dtype=float)
96
+ delta_gamma = (
97
+ np.full(n_levels, 0, dtype=float) if uncertainties is not None else None
98
+ )
99
+
100
+ # Create an array of indices that can be used to create a padded array where
101
+ # the padding duplicates the first element on the front and the last element
102
+ # on the end of the array
103
+ extended_inds = np.pad(np.arange(n_levels), 1, mode="edge")
104
+
105
+ # Compute logs, setting non-positive fluxes to NaN
106
+ log_fluxes = np.log(np.where(fluxes > 0, fluxes, np.nan))
107
+ log_energies = np.log(energies)
108
+ # Create extended arrays by repeating first and last values. This allows
109
+ # for linear differencing to be used on the ends and central differencing
110
+ # to be used on the interior of the array with a single vectorized equation.
111
+ # Interior points use central differencing equation:
112
+ # gamma_k = ln(J_{k+1}/J_{k-1}) / ln(E_{k+1}/E_{k-1})
113
+ # Left boundary uses linear forward differencing:
114
+ # gamma_k = ln(J_{k+1}/J_{k}) / ln(E_{k+1}/E_{k})
115
+ # Right boundary uses linear backward differencing:
116
+ # gamma_k = ln(J_{k}/J_{k-1}) / ln(E_{k}/E_{k-1})
117
+ log_extended_fluxes = log_fluxes[extended_inds]
118
+ log_extended_energies = log_energies[extended_inds]
119
+
120
+ # Extract the left and right log values to use in slope calculation
121
+ left_log_fluxes = log_extended_fluxes[:-2] # indices 0 to n_levels-1
122
+ right_log_fluxes = log_extended_fluxes[2:] # indices 2 to n_levels+1
123
+ left_log_energies = log_extended_energies[:-2]
124
+ right_log_energies = log_extended_energies[2:]
125
+
126
+ # Compute power-law slopes for valid indices
127
+ central_valid = np.isfinite(left_log_fluxes) & np.isfinite(right_log_fluxes)
128
+ gamma[central_valid] = (
129
+ (right_log_fluxes - left_log_fluxes)
130
+ / (right_log_energies - left_log_energies)
131
+ )[central_valid]
132
+
133
+ # Compute uncertainty slopes
134
+ if uncertainties is not None:
135
+ with np.errstate(divide="ignore"):
136
+ rel_unc_sq = (uncertainties / fluxes) ** 2
137
+ extended_rel_unc_sq = rel_unc_sq[extended_inds]
138
+ delta_gamma = np.sqrt(
139
+ extended_rel_unc_sq[:-2] + extended_rel_unc_sq[2:]
140
+ ) / (log_extended_energies[2:] - log_extended_energies[:-2])
141
+ delta_gamma[~central_valid] = 0
142
+
143
+ # Handle one-sided differencing for points where central differencing failed
144
+ need_fallback = ~central_valid & np.isfinite(log_fluxes)
145
+ # Exclude first and last points since they already use the correct
146
+ # one-sided differencing
147
+ interior_fallback = np.zeros_like(need_fallback, dtype=bool)
148
+ interior_fallback[1:-1] = need_fallback[1:-1]
149
+
150
+ if np.any(interior_fallback):
151
+ indices = np.where(interior_fallback)[0]
152
+
153
+ for k in indices:
154
+ # For interior points: try forward first, then backward
155
+ if k < n_levels - 1 and np.isfinite(log_fluxes[k + 1]):
156
+ gamma[k] = (log_fluxes[k + 1] - log_fluxes[k]) / (
157
+ log_energies[k + 1] - log_energies[k]
158
+ )
159
+
160
+ # Compute uncertainty slope using same differencing
161
+ if isinstance(delta_gamma, np.ndarray):
162
+ delta_gamma[k] = np.sqrt(rel_unc_sq[k + 1] + rel_unc_sq[k]) / (
163
+ log_energies[k + 1] - log_energies[k]
164
+ )
165
+
166
+ elif k > 0 and np.isfinite(log_fluxes[k - 1]):
167
+ gamma[k] = (log_fluxes[k] - log_fluxes[k - 1]) / (
168
+ log_energies[k] - log_energies[k - 1]
169
+ )
170
+
171
+ # Compute uncertainty slope using same differencing
172
+ if isinstance(delta_gamma, np.ndarray):
173
+ delta_gamma[k] = np.sqrt(rel_unc_sq[k] + rel_unc_sq[k - 1]) / (
174
+ log_energies[k] - log_energies[k - 1]
175
+ )
176
+
177
+ return gamma, delta_gamma
178
+
179
+ def predictor_corrector_iteration(
180
+ self,
181
+ observed_fluxes: np.ndarray,
182
+ observed_uncertainties: np.ndarray,
183
+ energies: np.ndarray,
184
+ max_iterations: int = 20,
185
+ convergence_threshold: float = 0.005,
186
+ ) -> tuple[np.ndarray, np.ndarray, int]:
187
+ """
188
+ Estimate source fluxes using iterative predictor-corrector scheme.
189
+
190
+ Implements the algorithm from Appendix A of the Mapping Algorithm Document.
191
+
192
+ Parameters
193
+ ----------
194
+ observed_fluxes : np.ndarray
195
+ Array of observed fluxes.
196
+ observed_uncertainties : numpy.ndarray
197
+ Array of observed uncertainties.
198
+ energies : np.ndarray
199
+ Array of energy levels.
200
+ max_iterations : int, optional
201
+ Maximum number of iterations, by default 20.
202
+ convergence_threshold : float, optional
203
+ RMS convergence criterion, by default 0.005 (0.5%).
204
+
205
+ Returns
206
+ -------
207
+ source_fluxes : np.ndarray
208
+ Final estimate of source fluxes.
209
+ source_uncertainties : np.ndarray
210
+ Final estimate of source uncertainties.
211
+ n_iterations : int
212
+ Number of iterations run.
213
+ """
214
+ n_levels = len(observed_fluxes)
215
+ energy_levels = np.arange(n_levels) + 1
216
+
217
+ # Initial power-law estimate from observed fluxes
218
+ gamma_initial, _ = self.estimate_power_law_slope(observed_fluxes, energies)
219
+
220
+ # Initial source flux estimate
221
+ eta_initial = self.eta_esa(energy_levels, gamma_initial)
222
+ source_fluxes_n = observed_fluxes / eta_initial
223
+
224
+ for _iteration in range(max_iterations):
225
+ # Store previous iteration
226
+ source_fluxes_prev = source_fluxes_n.copy()
227
+
228
+ # Predictor step
229
+ gamma_pred, _ = self.estimate_power_law_slope(source_fluxes_n, energies)
230
+ gamma_half = 0.5 * (gamma_initial + gamma_pred)
231
+
232
+ # Predictor source flux estimate
233
+ eta_half = self.eta_esa(energy_levels, gamma_half)
234
+ source_fluxes_half = observed_fluxes / eta_half
235
+
236
+ # Corrector step
237
+ gamma_corr, _ = self.estimate_power_law_slope(source_fluxes_half, energies)
238
+ gamma_n = 0.5 * (gamma_pred + gamma_corr)
239
+
240
+ # Final source flux estimate for this iteration
241
+ eta_final = self.eta_esa(energy_levels, gamma_n)
242
+ source_fluxes_n = observed_fluxes / eta_final
243
+ source_uncertainties = observed_uncertainties / eta_final
244
+
245
+ # Check convergence
246
+ ratios_sq = (source_fluxes_n / source_fluxes_prev) ** 2
247
+ chi_n = np.sqrt(np.mean(ratios_sq)) - 1
248
+
249
+ if chi_n < convergence_threshold:
250
+ break
251
+
252
+ return source_fluxes_n, source_uncertainties, _iteration + 1
253
+
254
+ def apply_flux_correction(
255
+ self, flux: np.ndarray, flux_stat_unc: np.ndarray, energies: np.ndarray
256
+ ) -> tuple[np.ndarray, np.ndarray]:
257
+ """
258
+ Apply flux correction to observed fluxes.
259
+
260
+ Iterative predictor-corrector scheme is run on each spatial pixel
261
+ individually to correct fluxes and statistical uncertainties. This method
262
+ is intended to be used with the unwrapped data in the ena_maps.AbstractSkyMap
263
+ class or child classes.
264
+
265
+ Parameters
266
+ ----------
267
+ flux : numpy.ndarray
268
+ Input flux with shape (n_energy, n_spatial_pixels).
269
+ flux_stat_unc : np.ndarray
270
+ Statistical uncertainty for input fluxes. Shape must match the shape
271
+ of flux.
272
+ energies : numpy.ndarray
273
+ Array of energy levels in units of eV or keV.
274
+
275
+ Returns
276
+ -------
277
+ tuple[numpy.ndarray, numpy.ndarray]
278
+ Corrected fluxes and flux uncertainties.
279
+ """
280
+ corrected_flux = np.empty_like(flux)
281
+ corrected_flux_stat_unc = np.empty_like(flux_stat_unc)
282
+
283
+ # loop over spatial pixels (last dimension)
284
+ for i_pixel in range(flux.shape[-1]):
285
+ corrected_flux[:, i_pixel], corrected_flux_stat_unc[:, i_pixel], _ = (
286
+ self.predictor_corrector_iteration(
287
+ flux[:, i_pixel], flux_stat_unc[:, i_pixel], energies
288
+ )
289
+ )
290
+
291
+ return corrected_flux, corrected_flux_stat_unc