imap-processing 0.19.0__py3-none-any.whl → 0.19.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of imap-processing might be problematic. Click here for more details.

Files changed (64) hide show
  1. imap_processing/_version.py +2 -2
  2. imap_processing/cdf/config/imap_codice_global_cdf_attrs.yaml +6 -0
  3. imap_processing/cdf/config/imap_codice_l1a_variable_attrs.yaml +31 -894
  4. imap_processing/cdf/config/imap_codice_l1b_variable_attrs.yaml +279 -255
  5. imap_processing/cdf/config/imap_enamaps_l2-common_variable_attrs.yaml +11 -0
  6. imap_processing/cdf/config/imap_glows_l1b_variable_attrs.yaml +3 -1
  7. imap_processing/cdf/config/imap_lo_global_cdf_attrs.yaml +5 -4
  8. imap_processing/cdf/config/imap_ultra_global_cdf_attrs.yaml +20 -8
  9. imap_processing/cdf/config/imap_ultra_l1b_variable_attrs.yaml +33 -31
  10. imap_processing/cdf/config/imap_ultra_l1c_variable_attrs.yaml +61 -1
  11. imap_processing/cli.py +62 -71
  12. imap_processing/codice/codice_l0.py +2 -1
  13. imap_processing/codice/codice_l1a.py +47 -49
  14. imap_processing/codice/codice_l1b.py +42 -32
  15. imap_processing/codice/codice_l2.py +105 -7
  16. imap_processing/codice/constants.py +50 -8
  17. imap_processing/codice/data/lo_stepping_values.csv +1 -1
  18. imap_processing/ena_maps/ena_maps.py +39 -18
  19. imap_processing/ena_maps/utils/corrections.py +291 -0
  20. imap_processing/ena_maps/utils/map_utils.py +20 -4
  21. imap_processing/glows/l1b/glows_l1b.py +38 -23
  22. imap_processing/glows/l1b/glows_l1b_data.py +10 -11
  23. imap_processing/hi/hi_l1c.py +4 -109
  24. imap_processing/hi/hi_l2.py +34 -23
  25. imap_processing/hi/utils.py +109 -0
  26. imap_processing/ialirt/l0/ialirt_spice.py +1 -0
  27. imap_processing/ialirt/utils/create_xarray.py +1 -1
  28. imap_processing/lo/ancillary_data/imap_lo_hydrogen-geometric-factor_v001.csv +75 -0
  29. imap_processing/lo/ancillary_data/imap_lo_oxygen-geometric-factor_v001.csv +75 -0
  30. imap_processing/lo/l1b/lo_l1b.py +90 -16
  31. imap_processing/lo/l1c/lo_l1c.py +164 -50
  32. imap_processing/lo/l2/lo_l2.py +941 -127
  33. imap_processing/mag/l1d/mag_l1d_data.py +36 -3
  34. imap_processing/mag/l2/mag_l2.py +2 -0
  35. imap_processing/mag/l2/mag_l2_data.py +4 -3
  36. imap_processing/quality_flags.py +14 -0
  37. imap_processing/spice/geometry.py +15 -8
  38. imap_processing/spice/pointing_frame.py +4 -2
  39. imap_processing/spice/repoint.py +49 -0
  40. imap_processing/ultra/constants.py +29 -0
  41. imap_processing/ultra/l1b/badtimes.py +35 -11
  42. imap_processing/ultra/l1b/de.py +15 -9
  43. imap_processing/ultra/l1b/extendedspin.py +24 -12
  44. imap_processing/ultra/l1b/goodtimes.py +112 -0
  45. imap_processing/ultra/l1b/lookup_utils.py +1 -1
  46. imap_processing/ultra/l1b/ultra_l1b.py +7 -7
  47. imap_processing/ultra/l1b/ultra_l1b_culling.py +8 -4
  48. imap_processing/ultra/l1b/ultra_l1b_extended.py +79 -43
  49. imap_processing/ultra/l1c/helio_pset.py +68 -39
  50. imap_processing/ultra/l1c/l1c_lookup_utils.py +45 -12
  51. imap_processing/ultra/l1c/spacecraft_pset.py +81 -37
  52. imap_processing/ultra/l1c/ultra_l1c.py +27 -22
  53. imap_processing/ultra/l1c/ultra_l1c_culling.py +7 -0
  54. imap_processing/ultra/l1c/ultra_l1c_pset_bins.py +41 -41
  55. imap_processing/ultra/l2/ultra_l2.py +54 -10
  56. imap_processing/ultra/utils/ultra_l1_utils.py +10 -5
  57. {imap_processing-0.19.0.dist-info → imap_processing-0.19.2.dist-info}/METADATA +1 -1
  58. {imap_processing-0.19.0.dist-info → imap_processing-0.19.2.dist-info}/RECORD +62 -60
  59. imap_processing/ultra/l1b/cullingmask.py +0 -90
  60. imap_processing/ultra/l1c/histogram.py +0 -36
  61. /imap_processing/glows/ancillary/{imap_glows_pipeline_settings_20250923_v002.json → imap_glows_pipeline-settings_20250923_v002.json} +0 -0
  62. {imap_processing-0.19.0.dist-info → imap_processing-0.19.2.dist-info}/LICENSE +0 -0
  63. {imap_processing-0.19.0.dist-info → imap_processing-0.19.2.dist-info}/WHEEL +0 -0
  64. {imap_processing-0.19.0.dist-info → imap_processing-0.19.2.dist-info}/entry_points.txt +0 -0
@@ -768,6 +768,7 @@ class AbstractSkyMap(ABC):
768
768
  pointing_set: PointingSet,
769
769
  value_keys: list[str] | None = None,
770
770
  index_match_method: IndexMatchMethod = IndexMatchMethod.PUSH,
771
+ pset_valid_mask: NDArray | None = None,
771
772
  ) -> None:
772
773
  """
773
774
  Project a pointing set's values to the map grid.
@@ -789,6 +790,10 @@ class AbstractSkyMap(ABC):
789
790
  index_match_method : IndexMatchMethod, optional
790
791
  The method of index matching to use for all values.
791
792
  Default is IndexMatchMethod.PUSH.
793
+ pset_valid_mask : NDArray, optional
794
+ A boolean mask of shape (number of pointing set pixels,) indicating
795
+ which pixels in the pointing set should be considered valid for projection.
796
+ If None, all pixels are considered valid. Default is None.
792
797
 
793
798
  Raises
794
799
  ------
@@ -801,6 +806,9 @@ class AbstractSkyMap(ABC):
801
806
  if value_key not in pointing_set.data.data_vars:
802
807
  raise ValueError(f"Value key {value_key} not found in pointing set.")
803
808
 
809
+ if pset_valid_mask is None:
810
+ pset_valid_mask = np.ones(pointing_set.num_points, dtype=bool)
811
+
804
812
  if index_match_method is IndexMatchMethod.PUSH:
805
813
  # Determine the indices of the sky map grid that correspond to
806
814
  # each pixel in the pointing set.
@@ -860,22 +868,32 @@ class AbstractSkyMap(ABC):
860
868
  value_array=raveled_pset_data,
861
869
  projection_grid_shape=self.binning_grid_shape,
862
870
  projection_indices=matched_indices_push,
871
+ input_valid_mask=pset_valid_mask,
863
872
  )
873
+ # TODO: we may need to allow for unweighted/weighted means here by
874
+ # dividing pointing_projected_values by some binned weights.
875
+ # For unweighted means, we could use the number of pointing set pixels
876
+ # that correspond to each map pixel as the weights.
877
+ self.data_1d[value_key] += pointing_projected_values
864
878
  elif index_match_method is IndexMatchMethod.PULL:
879
+ valid_map_mask = pset_valid_mask[matched_indices_pull]
865
880
  # We know that there will only be one value per sky map pixel,
866
881
  # so we can use the matched indices directly
867
- pointing_projected_values = raveled_pset_data[..., matched_indices_pull]
882
+ pointing_projected_values = raveled_pset_data[
883
+ ..., matched_indices_pull[valid_map_mask]
884
+ ]
885
+ # TODO: we may need to allow for unweighted/weighted means here by
886
+ # dividing pointing_projected_values by some binned weights.
887
+ # For unweighted means, we could use the number of pointing set pixels
888
+ # that correspond to each map pixel as the weights.
889
+ self.data_1d[value_key].values[..., valid_map_mask] += (
890
+ pointing_projected_values
891
+ )
868
892
  else:
869
893
  raise NotImplementedError(
870
894
  "Only PUSH and PULL index matching methods are supported."
871
895
  )
872
896
 
873
- # TODO: we may need to allow for unweighted/weighted means here by
874
- # dividing pointing_projected_values by some binned weights.
875
- # For unweighted means, we could use the number of pointing set pixels
876
- # that correspond to each map pixel as the weights.
877
- self.data_1d[value_key] += pointing_projected_values
878
-
879
897
  # TODO: The max epoch needs to include the pset duration. Right now it
880
898
  # is just capturing the start epoch. See issue #1747
881
899
  self.min_epoch = min(self.min_epoch, pointing_set.epoch)
@@ -1169,6 +1187,10 @@ class RectangularSkyMap(AbstractSkyMap):
1169
1187
  # Rewrap each data array in the data_1d to the original 2D grid shape
1170
1188
  rewrapped_data = {}
1171
1189
  for key in self.data_1d.data_vars:
1190
+ # Don't rewrap non-spatial variables
1191
+ if CoordNames.GENERIC_PIXEL.value not in self.data_1d[key].coords:
1192
+ rewrapped_data[key] = self.data_1d[key]
1193
+ continue
1172
1194
  # drop pixel dim from the end, and add the spatial coords as dims
1173
1195
  rewrapped_dims = [
1174
1196
  dim
@@ -1274,18 +1296,17 @@ class RectangularSkyMap(AbstractSkyMap):
1274
1296
  name=f"{coord_name}_delta",
1275
1297
  dims=[coord_name],
1276
1298
  )
1277
- # Add energy delta_minus and delta_plus variables
1278
1299
  elif coord_name == CoordNames.ENERGY_L2.value:
1279
- cdf_ds[f"{coord_name}_delta_minus"] = xr.DataArray(
1280
- xr.full_like(cdf_ds[coord_name], np.nan),
1281
- name=f"{coord_name}_delta",
1282
- dims=[coord_name],
1283
- )
1284
- cdf_ds[f"{coord_name}_delta_plus"] = xr.DataArray(
1285
- xr.full_like(cdf_ds[coord_name], np.nan),
1286
- name=f"{coord_name}_delta",
1287
- dims=[coord_name],
1288
- )
1300
+ if f"{coord_name}_delta_minus" not in cdf_ds:
1301
+ raise KeyError(
1302
+ f"Required variable '{coord_name}_delta_minus' "
1303
+ f"not found in cdf Dataset."
1304
+ )
1305
+ if f"{coord_name}_delta_plus" not in cdf_ds:
1306
+ raise KeyError(
1307
+ f"Required variable '{coord_name}_delta_plus' "
1308
+ f"not found in cdf Dataset."
1309
+ )
1289
1310
 
1290
1311
  # Object which holds CDF attributes for the map
1291
1312
  cdf_attrs = ImapCdfAttributes()
@@ -0,0 +1,291 @@
1
+ """L2 corrections common to multiple IMAP ENA instruments."""
2
+
3
+ from pathlib import Path
4
+
5
+ import numpy as np
6
+ import pandas as pd
7
+ from numpy.polynomial import Polynomial
8
+
9
+
10
+ class PowerLawFluxCorrector:
11
+ """
12
+ IMAP-Lo flux correction algorithm implementation.
13
+
14
+ Based on Section 5 of the Mapping Algorithm Document. Applies corrections for
15
+ ESA transmission integration over energy bandpass using iterative
16
+ predictor-corrector scheme to estimate source fluxes from observed fluxes.
17
+
18
+ Parameters
19
+ ----------
20
+ coeffs_file : str or Path
21
+ Location of CSV file containing ESA transmission coefficients.
22
+ """
23
+
24
+ def __init__(self, coeffs_file: str | Path):
25
+ """Initialize PowerLawFluxCorrector."""
26
+ # Load the csv file
27
+ eta_coeffs_df = pd.read_csv(coeffs_file, index_col="esa_step")
28
+ # Create a lookup dictionary to get the correct np.polynomial.Polynomial
29
+ # for a given esa_step
30
+ coeff_columns = ["M0", "M1", "M2", "M3", "M4", "M5"]
31
+ self.polynomial_lookup = {
32
+ row.name: Polynomial(row[coeff_columns].values)
33
+ for _, row in eta_coeffs_df.iterrows()
34
+ }
35
+
36
+ def eta_esa(self, k: np.ndarray, gamma: np.ndarray) -> np.ndarray:
37
+ """
38
+ Calculate ESA transmission scale factor η_esa,k(γ) for each energy level.
39
+
40
+ Parameters
41
+ ----------
42
+ k : np.ndarray
43
+ Energy levels.
44
+ gamma : np.ndarray
45
+ Power-law slopes.
46
+
47
+ Returns
48
+ -------
49
+ np.ndarray
50
+ ESA transmission scale factors.
51
+ """
52
+ k = np.atleast_1d(k)
53
+ gamma = np.atleast_1d(gamma)
54
+ eta = np.empty_like(gamma)
55
+ for i, esa_step in enumerate(k):
56
+ eta[i] = self.polynomial_lookup[esa_step](gamma[i])
57
+ # Negative transmissions get set to 1
58
+ if eta[i] < 0:
59
+ eta[i] = 1
60
+
61
+ return eta
62
+
63
+ @staticmethod
64
+ def estimate_power_law_slope(
65
+ fluxes: np.ndarray,
66
+ energies: np.ndarray,
67
+ uncertainties: np.ndarray | None = None,
68
+ ) -> tuple[np.ndarray, np.ndarray | None]:
69
+ """
70
+ Estimate power-law slopes γ_k for each energy level using vectorized operations.
71
+
72
+ Implements equations (36)-(41) from the Mapping Algorithm Document v7
73
+ with proper boundary handling. Uses extended arrays with repeated
74
+ endpoints for unified calculation, and handles zero fluxes by falling
75
+ back to linear differencing or returning NaN where both central and
76
+ linear differencing fail.
77
+
78
+ Parameters
79
+ ----------
80
+ fluxes : np.ndarray
81
+ Array of differential fluxes [J_1, J_2, ..., J_7].
82
+ energies : np.ndarray
83
+ Array of energy levels [E_1, E_2, ..., E_7].
84
+ uncertainties : np.ndarray, optional
85
+ Array of flux uncertainties [δJ_1, δJ_2, ..., δJ_7].
86
+
87
+ Returns
88
+ -------
89
+ gamma : np.ndarray
90
+ Array of power-law slopes.
91
+ delta_gamma : np.ndarray or None
92
+ Array of uncertainty slopes (if uncertainties provided).
93
+ """
94
+ n_levels = len(fluxes)
95
+ gamma = np.full(n_levels, 0, dtype=float)
96
+ delta_gamma = (
97
+ np.full(n_levels, 0, dtype=float) if uncertainties is not None else None
98
+ )
99
+
100
+ # Create an array of indices that can be used to create a padded array where
101
+ # the padding duplicates the first element on the front and the last element
102
+ # on the end of the array
103
+ extended_inds = np.pad(np.arange(n_levels), 1, mode="edge")
104
+
105
+ # Compute logs, setting non-positive fluxes to NaN
106
+ log_fluxes = np.log(np.where(fluxes > 0, fluxes, np.nan))
107
+ log_energies = np.log(energies)
108
+ # Create extended arrays by repeating first and last values. This allows
109
+ # for linear differencing to be used on the ends and central differencing
110
+ # to be used on the interior of the array with a single vectorized equation.
111
+ # Interior points use central differencing equation:
112
+ # gamma_k = ln(J_{k+1}/J_{k-1}) / ln(E_{k+1}/E_{k-1})
113
+ # Left boundary uses linear forward differencing:
114
+ # gamma_k = ln(J_{k+1}/J_{k}) / ln(E_{k+1}/E_{k})
115
+ # Right boundary uses linear backward differencing:
116
+ # gamma_k = ln(J_{k}/J_{k-1}) / ln(E_{k}/E_{k-1})
117
+ log_extended_fluxes = log_fluxes[extended_inds]
118
+ log_extended_energies = log_energies[extended_inds]
119
+
120
+ # Extract the left and right log values to use in slope calculation
121
+ left_log_fluxes = log_extended_fluxes[:-2] # indices 0 to n_levels-1
122
+ right_log_fluxes = log_extended_fluxes[2:] # indices 2 to n_levels+1
123
+ left_log_energies = log_extended_energies[:-2]
124
+ right_log_energies = log_extended_energies[2:]
125
+
126
+ # Compute power-law slopes for valid indices
127
+ central_valid = np.isfinite(left_log_fluxes) & np.isfinite(right_log_fluxes)
128
+ gamma[central_valid] = (
129
+ (right_log_fluxes - left_log_fluxes)
130
+ / (right_log_energies - left_log_energies)
131
+ )[central_valid]
132
+
133
+ # Compute uncertainty slopes
134
+ if uncertainties is not None:
135
+ with np.errstate(divide="ignore"):
136
+ rel_unc_sq = (uncertainties / fluxes) ** 2
137
+ extended_rel_unc_sq = rel_unc_sq[extended_inds]
138
+ delta_gamma = np.sqrt(
139
+ extended_rel_unc_sq[:-2] + extended_rel_unc_sq[2:]
140
+ ) / (log_extended_energies[2:] - log_extended_energies[:-2])
141
+ delta_gamma[~central_valid] = 0
142
+
143
+ # Handle one-sided differencing for points where central differencing failed
144
+ need_fallback = ~central_valid & np.isfinite(log_fluxes)
145
+ # Exclude first and last points since they already use the correct
146
+ # one-sided differencing
147
+ interior_fallback = np.zeros_like(need_fallback, dtype=bool)
148
+ interior_fallback[1:-1] = need_fallback[1:-1]
149
+
150
+ if np.any(interior_fallback):
151
+ indices = np.where(interior_fallback)[0]
152
+
153
+ for k in indices:
154
+ # For interior points: try forward first, then backward
155
+ if k < n_levels - 1 and np.isfinite(log_fluxes[k + 1]):
156
+ gamma[k] = (log_fluxes[k + 1] - log_fluxes[k]) / (
157
+ log_energies[k + 1] - log_energies[k]
158
+ )
159
+
160
+ # Compute uncertainty slope using same differencing
161
+ if isinstance(delta_gamma, np.ndarray):
162
+ delta_gamma[k] = np.sqrt(rel_unc_sq[k + 1] + rel_unc_sq[k]) / (
163
+ log_energies[k + 1] - log_energies[k]
164
+ )
165
+
166
+ elif k > 0 and np.isfinite(log_fluxes[k - 1]):
167
+ gamma[k] = (log_fluxes[k] - log_fluxes[k - 1]) / (
168
+ log_energies[k] - log_energies[k - 1]
169
+ )
170
+
171
+ # Compute uncertainty slope using same differencing
172
+ if isinstance(delta_gamma, np.ndarray):
173
+ delta_gamma[k] = np.sqrt(rel_unc_sq[k] + rel_unc_sq[k - 1]) / (
174
+ log_energies[k] - log_energies[k - 1]
175
+ )
176
+
177
+ return gamma, delta_gamma
178
+
179
+ def predictor_corrector_iteration(
180
+ self,
181
+ observed_fluxes: np.ndarray,
182
+ observed_uncertainties: np.ndarray,
183
+ energies: np.ndarray,
184
+ max_iterations: int = 20,
185
+ convergence_threshold: float = 0.005,
186
+ ) -> tuple[np.ndarray, np.ndarray, int]:
187
+ """
188
+ Estimate source fluxes using iterative predictor-corrector scheme.
189
+
190
+ Implements the algorithm from Appendix A of the Mapping Algorithm Document.
191
+
192
+ Parameters
193
+ ----------
194
+ observed_fluxes : np.ndarray
195
+ Array of observed fluxes.
196
+ observed_uncertainties : numpy.ndarray
197
+ Array of observed uncertainties.
198
+ energies : np.ndarray
199
+ Array of energy levels.
200
+ max_iterations : int, optional
201
+ Maximum number of iterations, by default 20.
202
+ convergence_threshold : float, optional
203
+ RMS convergence criterion, by default 0.005 (0.5%).
204
+
205
+ Returns
206
+ -------
207
+ source_fluxes : np.ndarray
208
+ Final estimate of source fluxes.
209
+ source_uncertainties : np.ndarray
210
+ Final estimate of source uncertainties.
211
+ n_iterations : int
212
+ Number of iterations run.
213
+ """
214
+ n_levels = len(observed_fluxes)
215
+ energy_levels = np.arange(n_levels) + 1
216
+
217
+ # Initial power-law estimate from observed fluxes
218
+ gamma_initial, _ = self.estimate_power_law_slope(observed_fluxes, energies)
219
+
220
+ # Initial source flux estimate
221
+ eta_initial = self.eta_esa(energy_levels, gamma_initial)
222
+ source_fluxes_n = observed_fluxes / eta_initial
223
+
224
+ for _iteration in range(max_iterations):
225
+ # Store previous iteration
226
+ source_fluxes_prev = source_fluxes_n.copy()
227
+
228
+ # Predictor step
229
+ gamma_pred, _ = self.estimate_power_law_slope(source_fluxes_n, energies)
230
+ gamma_half = 0.5 * (gamma_initial + gamma_pred)
231
+
232
+ # Predictor source flux estimate
233
+ eta_half = self.eta_esa(energy_levels, gamma_half)
234
+ source_fluxes_half = observed_fluxes / eta_half
235
+
236
+ # Corrector step
237
+ gamma_corr, _ = self.estimate_power_law_slope(source_fluxes_half, energies)
238
+ gamma_n = 0.5 * (gamma_pred + gamma_corr)
239
+
240
+ # Final source flux estimate for this iteration
241
+ eta_final = self.eta_esa(energy_levels, gamma_n)
242
+ source_fluxes_n = observed_fluxes / eta_final
243
+ source_uncertainties = observed_uncertainties / eta_final
244
+
245
+ # Check convergence
246
+ ratios_sq = (source_fluxes_n / source_fluxes_prev) ** 2
247
+ chi_n = np.sqrt(np.mean(ratios_sq)) - 1
248
+
249
+ if chi_n < convergence_threshold:
250
+ break
251
+
252
+ return source_fluxes_n, source_uncertainties, _iteration + 1
253
+
254
+ def apply_flux_correction(
255
+ self, flux: np.ndarray, flux_stat_unc: np.ndarray, energies: np.ndarray
256
+ ) -> tuple[np.ndarray, np.ndarray]:
257
+ """
258
+ Apply flux correction to observed fluxes.
259
+
260
+ Iterative predictor-corrector scheme is run on each spatial pixel
261
+ individually to correct fluxes and statistical uncertainties. This method
262
+ is intended to be used with the unwrapped data in the ena_maps.AbstractSkyMap
263
+ class or child classes.
264
+
265
+ Parameters
266
+ ----------
267
+ flux : numpy.ndarray
268
+ Input flux with shape (n_energy, n_spatial_pixels).
269
+ flux_stat_unc : np.ndarray
270
+ Statistical uncertainty for input fluxes. Shape must match the shape
271
+ of flux.
272
+ energies : numpy.ndarray
273
+ Array of energy levels in units of eV or keV.
274
+
275
+ Returns
276
+ -------
277
+ tuple[numpy.ndarray, numpy.ndarray]
278
+ Corrected fluxes and flux uncertainties.
279
+ """
280
+ corrected_flux = np.empty_like(flux)
281
+ corrected_flux_stat_unc = np.empty_like(flux_stat_unc)
282
+
283
+ # loop over spatial pixels (last dimension)
284
+ for i_pixel in range(flux.shape[-1]):
285
+ corrected_flux[:, i_pixel], corrected_flux_stat_unc[:, i_pixel], _ = (
286
+ self.predictor_corrector_iteration(
287
+ flux[:, i_pixel], flux_stat_unc[:, i_pixel], energies
288
+ )
289
+ )
290
+
291
+ return corrected_flux, corrected_flux_stat_unc
@@ -15,6 +15,7 @@ def bin_single_array_at_indices(
15
15
  projection_grid_shape: tuple[int, ...],
16
16
  projection_indices: NDArray,
17
17
  input_indices: NDArray | None = None,
18
+ input_valid_mask: NDArray | None = None,
18
19
  ) -> NDArray:
19
20
  """
20
21
  Bin an array of values at the given indices.
@@ -39,6 +40,9 @@ def bin_single_array_at_indices(
39
40
  1 dimensional. May be non-unique, depending on the projection method.
40
41
  If None (default), an arange of the same length as the
41
42
  final axis of value_array is used.
43
+ input_valid_mask : NDArray, optional
44
+ Boolean mask array for valid values in input grid.
45
+ If None, all pixels are considered valid. Default is None.
42
46
 
43
47
  Returns
44
48
  -------
@@ -55,6 +59,8 @@ def bin_single_array_at_indices(
55
59
  """
56
60
  if input_indices is None:
57
61
  input_indices = np.arange(value_array.shape[-1])
62
+ if input_valid_mask is None:
63
+ input_valid_mask = np.ones(value_array.shape[-1], dtype=bool)
58
64
 
59
65
  # Both sets of indices must be 1D with the same number of elements
60
66
  if input_indices.ndim != 1 or projection_indices.ndim != 1:
@@ -69,20 +75,25 @@ def bin_single_array_at_indices(
69
75
  " projection indices."
70
76
  )
71
77
 
78
+ input_valid_mask = np.asarray(input_valid_mask, dtype=bool)
79
+ mask_idx = input_valid_mask[input_indices]
80
+
72
81
  num_projection_indices = np.prod(projection_grid_shape)
73
82
 
83
+ # Only valid values are summed into bins.
74
84
  if value_array.ndim == 1:
85
+ values = value_array[input_indices]
75
86
  binned_values = np.bincount(
76
- projection_indices,
77
- weights=value_array[input_indices],
87
+ projection_indices[mask_idx],
88
+ weights=values[mask_idx],
78
89
  minlength=num_projection_indices,
79
90
  )
80
91
  elif value_array.ndim >= 2:
81
92
  # Apply bincount to each row independently
82
93
  binned_values = np.apply_along_axis(
83
94
  lambda x: np.bincount(
84
- projection_indices,
85
- weights=x[..., input_indices],
95
+ projection_indices[mask_idx],
96
+ weights=x[..., input_indices][mask_idx],
86
97
  minlength=num_projection_indices,
87
98
  ),
88
99
  axis=-1,
@@ -96,6 +107,7 @@ def bin_values_at_indices(
96
107
  projection_grid_shape: tuple[int, ...],
97
108
  projection_indices: NDArray,
98
109
  input_indices: NDArray | None = None,
110
+ input_valid_mask: NDArray | None = None,
99
111
  ) -> dict[str, NDArray]:
100
112
  """
101
113
  Project values from input grid to projection grid based on matched indices.
@@ -118,6 +130,9 @@ def bin_values_at_indices(
118
130
  Ordered indices for input grid, corresponding to indices in projection grid.
119
131
  1 dimensional. May be non-unique, depending on the projection method.
120
132
  If None (default), behavior is determined by bin_single_array_at_indices.
133
+ input_valid_mask : NDArray, optional
134
+ Boolean mask array for valid values in input grid.
135
+ If None, all pixels are considered valid. Default is None.
121
136
 
122
137
  Returns
123
138
  -------
@@ -137,6 +152,7 @@ def bin_values_at_indices(
137
152
  projection_grid_shape=projection_grid_shape,
138
153
  projection_indices=projection_indices,
139
154
  input_indices=input_indices,
155
+ input_valid_mask=input_valid_mask,
140
156
  )
141
157
 
142
158
  return binned_values_dict
@@ -14,7 +14,9 @@ from imap_processing.glows.l1b.glows_l1b_data import (
14
14
  AncillaryParameters,
15
15
  DirectEventL1B,
16
16
  HistogramL1B,
17
+ PipelineSettings,
17
18
  )
19
+ from imap_processing.spice.time import et_to_datetime64, ttj2000ns_to_et
18
20
 
19
21
 
20
22
  def glows_l1b(
@@ -23,6 +25,7 @@ def glows_l1b(
23
25
  uv_sources: xr.Dataset,
24
26
  suspected_transients: xr.Dataset,
25
27
  exclusions_by_instr_team: xr.Dataset,
28
+ pipeline_settings_dataset: xr.Dataset,
26
29
  ) -> xr.Dataset:
27
30
  """
28
31
  Will process the GLOWS L1B data and format the output datasets.
@@ -43,6 +46,9 @@ def glows_l1b(
43
46
  exclusions_by_instr_team : xr.Dataset
44
47
  Dataset containing manual exclusions by instrument team with time-based masks.
45
48
  This is the output from GlowsAncillaryCombiner.
49
+ pipeline_settings_dataset : xr.Dataset
50
+ Dataset containing pipeline settings, including the L1B conversion table and
51
+ other ancillary parameters.
46
52
 
47
53
  Returns
48
54
  -------
@@ -53,6 +59,8 @@ def glows_l1b(
53
59
  cdf_attrs.add_instrument_global_attrs("glows")
54
60
  cdf_attrs.add_instrument_variable_attrs("glows", "l1b")
55
61
 
62
+ day = et_to_datetime64(ttj2000ns_to_et(input_dataset["epoch"].data[0]))
63
+
56
64
  # Create ancillary exclusions object from passed-in datasets
57
65
  ancillary_exclusions = AncillaryExclusions(
58
66
  excluded_regions=excluded_regions,
@@ -60,6 +68,9 @@ def glows_l1b(
60
68
  suspected_transients=suspected_transients,
61
69
  exclusions_by_instr_team=exclusions_by_instr_team,
62
70
  )
71
+ pipeline_settings = PipelineSettings(
72
+ pipeline_settings_dataset.sel(epoch=day, method="nearest"),
73
+ )
63
74
 
64
75
  with open(
65
76
  Path(__file__).parents[1] / "ancillary" / "l1b_conversion_table_v001.json"
@@ -73,8 +84,11 @@ def glows_l1b(
73
84
  )
74
85
 
75
86
  if "hist" in logical_source:
87
+ output_dataarrays = process_histogram(
88
+ input_dataset, ancillary_exclusions, ancillary_parameters, pipeline_settings
89
+ )
76
90
  output_dataset = create_l1b_hist_output(
77
- input_dataset, cdf_attrs, ancillary_parameters, ancillary_exclusions
91
+ output_dataarrays, input_dataset["epoch"], input_dataset["bins"], cdf_attrs
78
92
  )
79
93
 
80
94
  elif "de" in logical_source:
@@ -158,6 +172,7 @@ def process_histogram(
158
172
  l1a: xr.Dataset,
159
173
  ancillary_exclusions: AncillaryExclusions,
160
174
  ancillary_parameters: AncillaryParameters,
175
+ pipeline_settings: PipelineSettings,
161
176
  ) -> xr.Dataset:
162
177
  """
163
178
  Will process the histogram data from the L1A dataset and return the L1B dataset.
@@ -176,6 +191,8 @@ def process_histogram(
176
191
  The ancillary exclusions data for bad-angle flag processing.
177
192
  ancillary_parameters : AncillaryParameters
178
193
  The ancillary parameters for decoding histogram data.
194
+ pipeline_settings : PipelineSettings
195
+ The pipeline settings including flag activation.
179
196
 
180
197
  Returns
181
198
  -------
@@ -231,7 +248,7 @@ def process_histogram(
231
248
  Tuple of processed L1B data arrays from HistogramL1B.output_data().
232
249
  """
233
250
  return HistogramL1B( # type: ignore[call-arg]
234
- *args, ancillary_exclusions, ancillary_parameters
251
+ *args, ancillary_exclusions, ancillary_parameters, pipeline_settings
235
252
  ).output_data()
236
253
 
237
254
  l1b_fields = xr.apply_ufunc(
@@ -248,37 +265,39 @@ def process_histogram(
248
265
 
249
266
 
250
267
  def create_l1b_hist_output(
251
- input_dataset: xr.Dataset,
268
+ l1b_dataarrays: tuple[xr.DataArray],
269
+ epoch: xr.DataArray,
270
+ bin_coord: xr.DataArray,
252
271
  cdf_attrs: ImapCdfAttributes,
253
- ancillary_parameters: AncillaryParameters,
254
- ancillary_exclusions: AncillaryExclusions,
255
272
  ) -> xr.Dataset:
256
273
  """
257
274
  Create the output dataset for the L1B histogram data.
258
275
 
259
- This function processes the input dataset and creates a new dataset with the
260
- appropriate attributes and data variables. It uses the `process_histogram` function
261
- to process the histogram data.
276
+ This function takes in the output from `process_histogram`, which is a tuple of
277
+ DataArrays matching the output L1B data variables, and assembles them into a
278
+ Dataset with the appropriate coordinates.
262
279
 
263
280
  Parameters
264
281
  ----------
265
- input_dataset : xr.Dataset
266
- The input L1A GLOWS Histogram dataset to process.
282
+ l1b_dataarrays : tuple[xr.DataArray]
283
+ The DataArrays for each variable in the L1B dataset. These align with the
284
+ fields in the HistogramL1B dataclass, which also describes each variable.
285
+ epoch : xr.DataArray
286
+ The epoch DataArray to use as a coordinate in the output dataset. Generally
287
+ equal to the L1A epoch.
288
+ bin_coord : xr.DataArray
289
+ An arange DataArray for the bins coordinate. Nominally expected to be equal to
290
+ `xr.DataArray(np.arange(number_of_bins_per_histogram), name="bins",
291
+ dims=["bins"])`. Pulled up from L1A.
267
292
  cdf_attrs : ImapCdfAttributes
268
293
  The CDF attributes to use for the output dataset.
269
- ancillary_parameters : AncillaryParameters
270
- The ancillary parameters to use for the output dataset. Generated from the
271
- l1b conversion table and pipeline setting ancillary files.
272
- ancillary_exclusions : AncillaryExclusions
273
- The ancillary exclusions to use for the output dataset. Generated from
274
- ancillary files.
275
294
 
276
295
  Returns
277
296
  -------
278
297
  output_dataset : xr.Dataset
279
298
  The output dataset with the processed histogram data and all attributes.
280
299
  """
281
- data_epoch = input_dataset["epoch"]
300
+ data_epoch = epoch
282
301
  data_epoch.attrs = cdf_attrs.get_variable_attributes("epoch", check_schema=False)
283
302
 
284
303
  flag_data = xr.DataArray(
@@ -318,7 +337,7 @@ def create_l1b_hist_output(
318
337
  )
319
338
 
320
339
  bin_data = xr.DataArray(
321
- input_dataset["bins"].data,
340
+ bin_coord.data,
322
341
  name="bins",
323
342
  dims=["bins"],
324
343
  attrs=cdf_attrs.get_variable_attributes("bins_attrs", check_schema=False),
@@ -331,10 +350,6 @@ def create_l1b_hist_output(
331
350
  attrs=cdf_attrs.get_variable_attributes("bins_label", check_schema=False),
332
351
  )
333
352
 
334
- output_dataarrays = process_histogram(
335
- input_dataset, ancillary_exclusions, ancillary_parameters
336
- )
337
-
338
353
  output_dataset = xr.Dataset(
339
354
  coords={
340
355
  "epoch": data_epoch,
@@ -352,7 +367,7 @@ def create_l1b_hist_output(
352
367
  # HistogramL1B dataclass, we can use dataclasses.fields to get the field names.
353
368
 
354
369
  fields = dataclasses.fields(HistogramL1B)
355
- for index, dataarray in enumerate(output_dataarrays):
370
+ for index, dataarray in enumerate(l1b_dataarrays):
356
371
  # Dataarray is already an xr.DataArray type, so we can just assign it
357
372
  output_dataset[fields[index].name] = dataarray
358
373
  output_dataset[fields[index].name].attrs = cdf_attrs.get_variable_attributes(