imap-processing 0.19.0__py3-none-any.whl → 0.19.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of imap-processing might be problematic. Click here for more details.

Files changed (73) hide show
  1. imap_processing/_version.py +2 -2
  2. imap_processing/cdf/config/imap_codice_global_cdf_attrs.yaml +6 -0
  3. imap_processing/cdf/config/imap_codice_l1a_variable_attrs.yaml +31 -894
  4. imap_processing/cdf/config/imap_codice_l1b_variable_attrs.yaml +279 -255
  5. imap_processing/cdf/config/imap_enamaps_l2-common_variable_attrs.yaml +55 -0
  6. imap_processing/cdf/config/imap_enamaps_l2-healpix_variable_attrs.yaml +29 -0
  7. imap_processing/cdf/config/imap_enamaps_l2-rectangular_variable_attrs.yaml +32 -0
  8. imap_processing/cdf/config/imap_glows_l1b_variable_attrs.yaml +3 -1
  9. imap_processing/cdf/config/imap_lo_global_cdf_attrs.yaml +5 -4
  10. imap_processing/cdf/config/imap_ultra_global_cdf_attrs.yaml +28 -16
  11. imap_processing/cdf/config/imap_ultra_l1b_variable_attrs.yaml +33 -31
  12. imap_processing/cdf/config/imap_ultra_l1c_variable_attrs.yaml +61 -1
  13. imap_processing/cli.py +62 -71
  14. imap_processing/codice/codice_l0.py +2 -1
  15. imap_processing/codice/codice_l1a.py +47 -49
  16. imap_processing/codice/codice_l1b.py +42 -32
  17. imap_processing/codice/codice_l2.py +105 -7
  18. imap_processing/codice/constants.py +50 -8
  19. imap_processing/codice/data/lo_stepping_values.csv +1 -1
  20. imap_processing/ena_maps/ena_maps.py +39 -18
  21. imap_processing/ena_maps/utils/corrections.py +291 -0
  22. imap_processing/ena_maps/utils/map_utils.py +20 -4
  23. imap_processing/glows/l1b/glows_l1b.py +38 -23
  24. imap_processing/glows/l1b/glows_l1b_data.py +10 -11
  25. imap_processing/hi/hi_l1c.py +4 -109
  26. imap_processing/hi/hi_l2.py +34 -23
  27. imap_processing/hi/utils.py +109 -0
  28. imap_processing/ialirt/l0/ialirt_spice.py +1 -1
  29. imap_processing/ialirt/l0/parse_mag.py +18 -4
  30. imap_processing/ialirt/l0/process_hit.py +9 -4
  31. imap_processing/ialirt/l0/process_swapi.py +9 -4
  32. imap_processing/ialirt/l0/process_swe.py +9 -4
  33. imap_processing/ialirt/utils/create_xarray.py +1 -1
  34. imap_processing/lo/ancillary_data/imap_lo_hydrogen-geometric-factor_v001.csv +75 -0
  35. imap_processing/lo/ancillary_data/imap_lo_oxygen-geometric-factor_v001.csv +75 -0
  36. imap_processing/lo/l1b/lo_l1b.py +90 -16
  37. imap_processing/lo/l1c/lo_l1c.py +164 -50
  38. imap_processing/lo/l2/lo_l2.py +941 -127
  39. imap_processing/mag/l1d/mag_l1d_data.py +36 -3
  40. imap_processing/mag/l2/mag_l2.py +2 -0
  41. imap_processing/mag/l2/mag_l2_data.py +4 -3
  42. imap_processing/quality_flags.py +14 -0
  43. imap_processing/spice/geometry.py +13 -8
  44. imap_processing/spice/pointing_frame.py +4 -2
  45. imap_processing/spice/repoint.py +49 -0
  46. imap_processing/ultra/constants.py +29 -0
  47. imap_processing/ultra/l0/decom_tools.py +58 -46
  48. imap_processing/ultra/l0/decom_ultra.py +21 -9
  49. imap_processing/ultra/l0/ultra_utils.py +4 -4
  50. imap_processing/ultra/l1b/badtimes.py +35 -11
  51. imap_processing/ultra/l1b/de.py +15 -9
  52. imap_processing/ultra/l1b/extendedspin.py +24 -12
  53. imap_processing/ultra/l1b/goodtimes.py +112 -0
  54. imap_processing/ultra/l1b/lookup_utils.py +1 -1
  55. imap_processing/ultra/l1b/ultra_l1b.py +7 -7
  56. imap_processing/ultra/l1b/ultra_l1b_culling.py +8 -4
  57. imap_processing/ultra/l1b/ultra_l1b_extended.py +79 -43
  58. imap_processing/ultra/l1c/helio_pset.py +68 -39
  59. imap_processing/ultra/l1c/l1c_lookup_utils.py +45 -12
  60. imap_processing/ultra/l1c/spacecraft_pset.py +81 -37
  61. imap_processing/ultra/l1c/ultra_l1c.py +27 -22
  62. imap_processing/ultra/l1c/ultra_l1c_culling.py +7 -0
  63. imap_processing/ultra/l1c/ultra_l1c_pset_bins.py +41 -41
  64. imap_processing/ultra/l2/ultra_l2.py +75 -18
  65. imap_processing/ultra/utils/ultra_l1_utils.py +10 -5
  66. {imap_processing-0.19.0.dist-info → imap_processing-0.19.3.dist-info}/METADATA +2 -2
  67. {imap_processing-0.19.0.dist-info → imap_processing-0.19.3.dist-info}/RECORD +71 -69
  68. imap_processing/ultra/l1b/cullingmask.py +0 -90
  69. imap_processing/ultra/l1c/histogram.py +0 -36
  70. /imap_processing/glows/ancillary/{imap_glows_pipeline_settings_20250923_v002.json → imap_glows_pipeline-settings_20250923_v002.json} +0 -0
  71. {imap_processing-0.19.0.dist-info → imap_processing-0.19.3.dist-info}/LICENSE +0 -0
  72. {imap_processing-0.19.0.dist-info → imap_processing-0.19.3.dist-info}/WHEEL +0 -0
  73. {imap_processing-0.19.0.dist-info → imap_processing-0.19.3.dist-info}/entry_points.txt +0 -0
@@ -15,6 +15,7 @@ def bin_single_array_at_indices(
15
15
  projection_grid_shape: tuple[int, ...],
16
16
  projection_indices: NDArray,
17
17
  input_indices: NDArray | None = None,
18
+ input_valid_mask: NDArray | None = None,
18
19
  ) -> NDArray:
19
20
  """
20
21
  Bin an array of values at the given indices.
@@ -39,6 +40,9 @@ def bin_single_array_at_indices(
39
40
  1 dimensional. May be non-unique, depending on the projection method.
40
41
  If None (default), an arange of the same length as the
41
42
  final axis of value_array is used.
43
+ input_valid_mask : NDArray, optional
44
+ Boolean mask array for valid values in input grid.
45
+ If None, all pixels are considered valid. Default is None.
42
46
 
43
47
  Returns
44
48
  -------
@@ -55,6 +59,8 @@ def bin_single_array_at_indices(
55
59
  """
56
60
  if input_indices is None:
57
61
  input_indices = np.arange(value_array.shape[-1])
62
+ if input_valid_mask is None:
63
+ input_valid_mask = np.ones(value_array.shape[-1], dtype=bool)
58
64
 
59
65
  # Both sets of indices must be 1D with the same number of elements
60
66
  if input_indices.ndim != 1 or projection_indices.ndim != 1:
@@ -69,20 +75,25 @@ def bin_single_array_at_indices(
69
75
  " projection indices."
70
76
  )
71
77
 
78
+ input_valid_mask = np.asarray(input_valid_mask, dtype=bool)
79
+ mask_idx = input_valid_mask[input_indices]
80
+
72
81
  num_projection_indices = np.prod(projection_grid_shape)
73
82
 
83
+ # Only valid values are summed into bins.
74
84
  if value_array.ndim == 1:
85
+ values = value_array[input_indices]
75
86
  binned_values = np.bincount(
76
- projection_indices,
77
- weights=value_array[input_indices],
87
+ projection_indices[mask_idx],
88
+ weights=values[mask_idx],
78
89
  minlength=num_projection_indices,
79
90
  )
80
91
  elif value_array.ndim >= 2:
81
92
  # Apply bincount to each row independently
82
93
  binned_values = np.apply_along_axis(
83
94
  lambda x: np.bincount(
84
- projection_indices,
85
- weights=x[..., input_indices],
95
+ projection_indices[mask_idx],
96
+ weights=x[..., input_indices][mask_idx],
86
97
  minlength=num_projection_indices,
87
98
  ),
88
99
  axis=-1,
@@ -96,6 +107,7 @@ def bin_values_at_indices(
96
107
  projection_grid_shape: tuple[int, ...],
97
108
  projection_indices: NDArray,
98
109
  input_indices: NDArray | None = None,
110
+ input_valid_mask: NDArray | None = None,
99
111
  ) -> dict[str, NDArray]:
100
112
  """
101
113
  Project values from input grid to projection grid based on matched indices.
@@ -118,6 +130,9 @@ def bin_values_at_indices(
118
130
  Ordered indices for input grid, corresponding to indices in projection grid.
119
131
  1 dimensional. May be non-unique, depending on the projection method.
120
132
  If None (default), behavior is determined by bin_single_array_at_indices.
133
+ input_valid_mask : NDArray, optional
134
+ Boolean mask array for valid values in input grid.
135
+ If None, all pixels are considered valid. Default is None.
121
136
 
122
137
  Returns
123
138
  -------
@@ -137,6 +152,7 @@ def bin_values_at_indices(
137
152
  projection_grid_shape=projection_grid_shape,
138
153
  projection_indices=projection_indices,
139
154
  input_indices=input_indices,
155
+ input_valid_mask=input_valid_mask,
140
156
  )
141
157
 
142
158
  return binned_values_dict
@@ -14,7 +14,9 @@ from imap_processing.glows.l1b.glows_l1b_data import (
14
14
  AncillaryParameters,
15
15
  DirectEventL1B,
16
16
  HistogramL1B,
17
+ PipelineSettings,
17
18
  )
19
+ from imap_processing.spice.time import et_to_datetime64, ttj2000ns_to_et
18
20
 
19
21
 
20
22
  def glows_l1b(
@@ -23,6 +25,7 @@ def glows_l1b(
23
25
  uv_sources: xr.Dataset,
24
26
  suspected_transients: xr.Dataset,
25
27
  exclusions_by_instr_team: xr.Dataset,
28
+ pipeline_settings_dataset: xr.Dataset,
26
29
  ) -> xr.Dataset:
27
30
  """
28
31
  Will process the GLOWS L1B data and format the output datasets.
@@ -43,6 +46,9 @@ def glows_l1b(
43
46
  exclusions_by_instr_team : xr.Dataset
44
47
  Dataset containing manual exclusions by instrument team with time-based masks.
45
48
  This is the output from GlowsAncillaryCombiner.
49
+ pipeline_settings_dataset : xr.Dataset
50
+ Dataset containing pipeline settings, including the L1B conversion table and
51
+ other ancillary parameters.
46
52
 
47
53
  Returns
48
54
  -------
@@ -53,6 +59,8 @@ def glows_l1b(
53
59
  cdf_attrs.add_instrument_global_attrs("glows")
54
60
  cdf_attrs.add_instrument_variable_attrs("glows", "l1b")
55
61
 
62
+ day = et_to_datetime64(ttj2000ns_to_et(input_dataset["epoch"].data[0]))
63
+
56
64
  # Create ancillary exclusions object from passed-in datasets
57
65
  ancillary_exclusions = AncillaryExclusions(
58
66
  excluded_regions=excluded_regions,
@@ -60,6 +68,9 @@ def glows_l1b(
60
68
  suspected_transients=suspected_transients,
61
69
  exclusions_by_instr_team=exclusions_by_instr_team,
62
70
  )
71
+ pipeline_settings = PipelineSettings(
72
+ pipeline_settings_dataset.sel(epoch=day, method="nearest"),
73
+ )
63
74
 
64
75
  with open(
65
76
  Path(__file__).parents[1] / "ancillary" / "l1b_conversion_table_v001.json"
@@ -73,8 +84,11 @@ def glows_l1b(
73
84
  )
74
85
 
75
86
  if "hist" in logical_source:
87
+ output_dataarrays = process_histogram(
88
+ input_dataset, ancillary_exclusions, ancillary_parameters, pipeline_settings
89
+ )
76
90
  output_dataset = create_l1b_hist_output(
77
- input_dataset, cdf_attrs, ancillary_parameters, ancillary_exclusions
91
+ output_dataarrays, input_dataset["epoch"], input_dataset["bins"], cdf_attrs
78
92
  )
79
93
 
80
94
  elif "de" in logical_source:
@@ -158,6 +172,7 @@ def process_histogram(
158
172
  l1a: xr.Dataset,
159
173
  ancillary_exclusions: AncillaryExclusions,
160
174
  ancillary_parameters: AncillaryParameters,
175
+ pipeline_settings: PipelineSettings,
161
176
  ) -> xr.Dataset:
162
177
  """
163
178
  Will process the histogram data from the L1A dataset and return the L1B dataset.
@@ -176,6 +191,8 @@ def process_histogram(
176
191
  The ancillary exclusions data for bad-angle flag processing.
177
192
  ancillary_parameters : AncillaryParameters
178
193
  The ancillary parameters for decoding histogram data.
194
+ pipeline_settings : PipelineSettings
195
+ The pipeline settings including flag activation.
179
196
 
180
197
  Returns
181
198
  -------
@@ -231,7 +248,7 @@ def process_histogram(
231
248
  Tuple of processed L1B data arrays from HistogramL1B.output_data().
232
249
  """
233
250
  return HistogramL1B( # type: ignore[call-arg]
234
- *args, ancillary_exclusions, ancillary_parameters
251
+ *args, ancillary_exclusions, ancillary_parameters, pipeline_settings
235
252
  ).output_data()
236
253
 
237
254
  l1b_fields = xr.apply_ufunc(
@@ -248,37 +265,39 @@ def process_histogram(
248
265
 
249
266
 
250
267
  def create_l1b_hist_output(
251
- input_dataset: xr.Dataset,
268
+ l1b_dataarrays: tuple[xr.DataArray],
269
+ epoch: xr.DataArray,
270
+ bin_coord: xr.DataArray,
252
271
  cdf_attrs: ImapCdfAttributes,
253
- ancillary_parameters: AncillaryParameters,
254
- ancillary_exclusions: AncillaryExclusions,
255
272
  ) -> xr.Dataset:
256
273
  """
257
274
  Create the output dataset for the L1B histogram data.
258
275
 
259
- This function processes the input dataset and creates a new dataset with the
260
- appropriate attributes and data variables. It uses the `process_histogram` function
261
- to process the histogram data.
276
+ This function takes in the output from `process_histogram`, which is a tuple of
277
+ DataArrays matching the output L1B data variables, and assembles them into a
278
+ Dataset with the appropriate coordinates.
262
279
 
263
280
  Parameters
264
281
  ----------
265
- input_dataset : xr.Dataset
266
- The input L1A GLOWS Histogram dataset to process.
282
+ l1b_dataarrays : tuple[xr.DataArray]
283
+ The DataArrays for each variable in the L1B dataset. These align with the
284
+ fields in the HistogramL1B dataclass, which also describes each variable.
285
+ epoch : xr.DataArray
286
+ The epoch DataArray to use as a coordinate in the output dataset. Generally
287
+ equal to the L1A epoch.
288
+ bin_coord : xr.DataArray
289
+ An arange DataArray for the bins coordinate. Nominally expected to be equal to
290
+ `xr.DataArray(np.arange(number_of_bins_per_histogram), name="bins",
291
+ dims=["bins"])`. Pulled up from L1A.
267
292
  cdf_attrs : ImapCdfAttributes
268
293
  The CDF attributes to use for the output dataset.
269
- ancillary_parameters : AncillaryParameters
270
- The ancillary parameters to use for the output dataset. Generated from the
271
- l1b conversion table and pipeline setting ancillary files.
272
- ancillary_exclusions : AncillaryExclusions
273
- The ancillary exclusions to use for the output dataset. Generated from
274
- ancillary files.
275
294
 
276
295
  Returns
277
296
  -------
278
297
  output_dataset : xr.Dataset
279
298
  The output dataset with the processed histogram data and all attributes.
280
299
  """
281
- data_epoch = input_dataset["epoch"]
300
+ data_epoch = epoch
282
301
  data_epoch.attrs = cdf_attrs.get_variable_attributes("epoch", check_schema=False)
283
302
 
284
303
  flag_data = xr.DataArray(
@@ -318,7 +337,7 @@ def create_l1b_hist_output(
318
337
  )
319
338
 
320
339
  bin_data = xr.DataArray(
321
- input_dataset["bins"].data,
340
+ bin_coord.data,
322
341
  name="bins",
323
342
  dims=["bins"],
324
343
  attrs=cdf_attrs.get_variable_attributes("bins_attrs", check_schema=False),
@@ -331,10 +350,6 @@ def create_l1b_hist_output(
331
350
  attrs=cdf_attrs.get_variable_attributes("bins_label", check_schema=False),
332
351
  )
333
352
 
334
- output_dataarrays = process_histogram(
335
- input_dataset, ancillary_exclusions, ancillary_parameters
336
- )
337
-
338
353
  output_dataset = xr.Dataset(
339
354
  coords={
340
355
  "epoch": data_epoch,
@@ -352,7 +367,7 @@ def create_l1b_hist_output(
352
367
  # HistogramL1B dataclass, we can use dataclasses.fields to get the field names.
353
368
 
354
369
  fields = dataclasses.fields(HistogramL1B)
355
- for index, dataarray in enumerate(output_dataarrays):
370
+ for index, dataarray in enumerate(l1b_dataarrays):
356
371
  # Dataarray is already an xr.DataArray type, so we can just assign it
357
372
  output_dataset[fields[index].name] = dataarray
358
373
  output_dataset[fields[index].name].attrs = cdf_attrs.get_variable_attributes(
@@ -681,7 +681,6 @@ class HistogramL1B:
681
681
  histogram: np.ndarray
682
682
  flight_software_version: str
683
683
  seq_count_in_pkts_file: int
684
- # ancillary_data_files: np.ndarray TODO Add this
685
684
  first_spin_id: int
686
685
  last_spin_id: int
687
686
  flags_set_onboard: int # TODO: this should be renamed in L1B
@@ -705,9 +704,7 @@ class HistogramL1B:
705
704
  imap_time_offset: np.double # No conversion needed from l1a->l1b
706
705
  glows_start_time: np.double # No conversion needed from l1a->l1b
707
706
  glows_time_offset: np.double # No conversion needed from l1a->l1b
708
- # unique_block_identifier: str = field(
709
- # init=False
710
- # ) # Could be datetime TODO: Can't put a string in data
707
+ unique_block_identifier: str = field(init=False)
711
708
  imap_spin_angle_bin_cntr: np.ndarray = field(init=False) # Same size as bins
712
709
  histogram_flag_array: np.ndarray = field(init=False)
713
710
  # These two are retrieved from spin data
@@ -724,10 +721,9 @@ class HistogramL1B:
724
721
  flags: np.ndarray = field(init=False)
725
722
  ancillary_exclusions: InitVar[AncillaryExclusions]
726
723
  ancillary_parameters: InitVar[AncillaryParameters]
724
+ pipeline_settings: InitVar[PipelineSettings]
727
725
  # TODO:
728
726
  # - Determine a good way to output flags as "human readable"
729
- # - Add spice pieces
730
- # - also unique identifiers
731
727
  # - Bad angle algorithm using SPICE locations
732
728
  # - Move ancillary file to AWS
733
729
 
@@ -739,6 +735,7 @@ class HistogramL1B:
739
735
  pulse_length_variance: np.double,
740
736
  ancillary_exclusions: AncillaryExclusions,
741
737
  ancillary_parameters: AncillaryParameters,
738
+ pipeline_settings: PipelineSettings,
742
739
  ) -> None:
743
740
  """
744
741
  Will process data.
@@ -759,6 +756,8 @@ class HistogramL1B:
759
756
  Ancillary exclusions data for bad-angle flag processing.
760
757
  ancillary_parameters : AncillaryParameters
761
758
  Ancillary parameters for decoding histogram data.
759
+ pipeline_settings : PipelineSettings
760
+ Pipeline settings for processing thresholds and flags.
762
761
  """
763
762
  # self.histogram_flag_array = np.zeros((2,))
764
763
  day = met_to_datetime64(self.imap_start_time)
@@ -804,9 +803,9 @@ class HistogramL1B:
804
803
  # is_inside_excluded_region, is_excluded_by_instr_team,
805
804
  # is_suspected_transient] x 3600 bins
806
805
  self.histogram_flag_array = self._compute_histogram_flag_array(day_exclusions)
807
- # self.unique_block_identifier = np.datetime_as_string(
808
- # np.datetime64(int(self.imap_start_time), "ns"), "s"
809
- # )
806
+ # Generate ISO datetime string using SPICE functions
807
+ datetime64_time = met_to_datetime64(self.imap_start_time)
808
+ self.unique_block_identifier = np.datetime_as_string(datetime64_time, "s")
810
809
  self.flags = np.ones((FLAG_LENGTH,), dtype=np.uint8)
811
810
 
812
811
  def update_spice_parameters(self) -> None:
@@ -852,7 +851,7 @@ class HistogramL1B:
852
851
  geometry.frame_transform(
853
852
  time_range,
854
853
  np.array([0, 0, 1]),
855
- SpiceFrame.IMAP_DPS,
854
+ SpiceFrame.IMAP_SPACECRAFT,
856
855
  SpiceFrame.ECLIPJ2000,
857
856
  )
858
857
  )
@@ -873,7 +872,7 @@ class HistogramL1B:
873
872
  )
874
873
  position = imap_state[:, :3]
875
874
  velocity = imap_state[:, 3:]
876
- # averange and standard deviation over time (rows)
875
+ # average and standard deviation over time (rows)
877
876
  self.spacecraft_location_average = np.average(position, axis=0)
878
877
  self.spacecraft_location_std_dev = np.std(position, axis=0)
879
878
  self.spacecraft_velocity_average = np.average(velocity, axis=0)
@@ -19,7 +19,7 @@ from imap_processing.hi.hi_l1a import (
19
19
  HALF_CLOCK_TICK_S,
20
20
  )
21
21
  from imap_processing.hi.utils import (
22
- CoincidenceBitmap,
22
+ CalibrationProductConfig,
23
23
  create_dataset_variables,
24
24
  full_dataarray,
25
25
  parse_sensor_number,
@@ -378,8 +378,9 @@ def pset_counts(
378
378
  filtered_de_df["spin_phase"].to_numpy() * N_SPIN_BINS
379
379
  ).astype(int)
380
380
  # When iterating over rows of a dataframe, the names of the multi-index
381
- # are not preserved. Below, `config_row.Index[0]` gets the cal_prod_num
382
- # value from the namedtuple representing the dataframe row.
381
+ # are not preserved. Below, `config_row.Index[0]` gets the
382
+ # calibration_prod value from the namedtuple representing the
383
+ # dataframe row.
383
384
  np.add.at(
384
385
  counts_var["counts"].data[0, i_esa, config_row.Index[0]],
385
386
  spin_bin_indices,
@@ -684,109 +685,3 @@ def good_time_and_phase_mask(
684
685
  """
685
686
  # TODO: Implement this once we have Goodtimes data product defined.
686
687
  return np.full_like(tick_mets, True, dtype=bool)
687
-
688
-
689
- @pd.api.extensions.register_dataframe_accessor("cal_prod_config")
690
- class CalibrationProductConfig:
691
- """
692
- Register custom accessor for calibration product configuration DataFrames.
693
-
694
- Parameters
695
- ----------
696
- pandas_obj : pandas.DataFrame
697
- Object to run validation and use accessor functions on.
698
- """
699
-
700
- index_columns = (
701
- "cal_prod_num",
702
- "esa_energy_step",
703
- )
704
- tof_detector_pairs = ("ab", "ac1", "bc1", "c1c2")
705
- required_columns = (
706
- "coincidence_type_list",
707
- *[
708
- f"tof_{det_pair}_{limit}"
709
- for det_pair in tof_detector_pairs
710
- for limit in ["low", "high"]
711
- ],
712
- )
713
-
714
- def __init__(self, pandas_obj: pd.DataFrame) -> None:
715
- self._validate(pandas_obj)
716
- self._obj = pandas_obj
717
- self._add_coincidence_values_column()
718
-
719
- def _validate(self, df: pd.DataFrame) -> None:
720
- """
721
- Validate the current configuration.
722
-
723
- Parameters
724
- ----------
725
- df : pandas.DataFrame
726
- Object to validate.
727
-
728
- Raises
729
- ------
730
- AttributeError : If the dataframe does not pass validation.
731
- """
732
- for index_name in self.index_columns:
733
- if index_name in df.index:
734
- raise AttributeError(
735
- f"Required index {index_name} not present in dataframe."
736
- )
737
- # Verify that the Dataframe has all the required columns
738
- for col in self.required_columns:
739
- if col not in df.columns:
740
- raise AttributeError(f"Required column {col} not present in dataframe.")
741
- # TODO: Verify that the same ESA energy steps exist in all unique calibration
742
- # product numbers
743
-
744
- def _add_coincidence_values_column(self) -> None:
745
- """Generate and add the coincidence_type_values column to the dataframe."""
746
- # Add a column that consists of the coincidence type strings converted
747
- # to integer values
748
- self._obj["coincidence_type_values"] = self._obj.apply(
749
- lambda row: tuple(
750
- CoincidenceBitmap.detector_hit_str_to_int(entry)
751
- for entry in row["coincidence_type_list"]
752
- ),
753
- axis=1,
754
- )
755
-
756
- @classmethod
757
- def from_csv(cls, path: Path) -> pd.DataFrame:
758
- """
759
- Read configuration CSV file into a pandas.DataFrame.
760
-
761
- Parameters
762
- ----------
763
- path : pathlib.Path
764
- Location of the Calibration Product configuration CSV file.
765
-
766
- Returns
767
- -------
768
- dataframe : pandas.DataFrame
769
- Validated calibration product configuration data frame.
770
- """
771
- df = pd.read_csv(
772
- path,
773
- index_col=cls.index_columns,
774
- converters={"coincidence_type_list": lambda s: tuple(s.split("|"))},
775
- comment="#",
776
- )
777
- # Force the _init_ method to run by using the namespace
778
- _ = df.cal_prod_config.number_of_products
779
- return df
780
-
781
- @property
782
- def number_of_products(self) -> int:
783
- """
784
- Get the number of calibration products in the current configuration.
785
-
786
- Returns
787
- -------
788
- number_of_products : int
789
- The maximum number of calibration products defined in the list of
790
- calibration product definitions.
791
- """
792
- return len(self._obj.index.unique(level="cal_prod_num"))
@@ -13,6 +13,7 @@ from imap_processing.ena_maps.ena_maps import (
13
13
  RectangularSkyMap,
14
14
  )
15
15
  from imap_processing.ena_maps.utils.naming import MapDescriptor
16
+ from imap_processing.hi.utils import CalibrationProductConfig
16
17
 
17
18
  logger = logging.getLogger(__name__)
18
19
 
@@ -153,14 +154,25 @@ def generate_hi_map(
153
154
  output_map.data_1d["obs_date_range"] = xr.zeros_like(output_map.data_1d["obs_date"])
154
155
 
155
156
  # Rename and convert coordinate from esa_energy_step energy
156
- esa_energies = esa_energy_lookup(
157
+ esa_df = esa_energy_df(
157
158
  esa_energies_path, output_map.data_1d["esa_energy_step"].data
158
159
  )
159
160
  output_map.data_1d = output_map.data_1d.rename({"esa_energy_step": "energy"})
160
- output_map.data_1d = output_map.data_1d.assign_coords(energy=esa_energies)
161
- # Set the energy_step_delta values
162
- # TODO: get the correct energy delta values (they are set to NaN) in
163
- # output_map.build_cdf_dataset()
161
+ output_map.data_1d = output_map.data_1d.assign_coords(
162
+ energy=esa_df["nominal_central_energy"].values
163
+ )
164
+ # Set the energy_step_delta values to the energy bandpass half-width-half-max
165
+ energy_delta = esa_df["bandpass_fwhm"].values / 2
166
+ output_map.data_1d["energy_delta_minus"] = xr.DataArray(
167
+ energy_delta,
168
+ name="energy_delta_minus",
169
+ dims=["energy"],
170
+ )
171
+ output_map.data_1d["energy_delta_plus"] = xr.DataArray(
172
+ energy_delta,
173
+ name="energy_delta_plus",
174
+ dims=["energy"],
175
+ )
164
176
 
165
177
  output_map.data_1d = output_map.data_1d.drop("esa_energy_step_label")
166
178
 
@@ -223,25 +235,26 @@ def calculate_ena_intensity(
223
235
  geometric_factors_path : str or pathlib.Path
224
236
  Where to get the geometric factors from.
225
237
  esa_energies_path : str or pathlib.Path
226
- Where to get the energies from.
238
+ Where to get the esa energies, energy deltas, and geometric factors.
227
239
 
228
240
  Returns
229
241
  -------
230
242
  intensity_vars : dict[str, xarray.DataArray]
231
243
  ENA Intensity with statistical and systematic uncertainties.
232
244
  """
233
- # TODO: Implement geometric factor lookup
234
- if geometric_factors_path:
235
- raise NotImplementedError
236
- geometric_factor = xr.DataArray(
237
- np.ones((map_ds["esa_energy_step"].size, map_ds["calibration_prod"].size)),
238
- coords=[map_ds["esa_energy_step"], map_ds["calibration_prod"]],
245
+ # read calibration product configuration file
246
+ cal_prod_df = CalibrationProductConfig.from_csv(geometric_factors_path)
247
+ # reindex_like removes esa_energy_steps and calibration products not in the
248
+ # map_ds esa_energy_step and calibration_product coordinates
249
+ geometric_factor = cal_prod_df.to_xarray().reindex_like(map_ds)["geometric_factor"]
250
+ geometric_factor = geometric_factor.transpose(
251
+ *[coord for coord in map_ds.coords if coord in geometric_factor.coords]
239
252
  )
240
-
241
- esa_energy = esa_energy_lookup(esa_energies_path, map_ds["esa_energy_step"].data)
253
+ energy_df = esa_energy_df(esa_energies_path, map_ds["esa_energy_step"].data)
254
+ esa_energy = energy_df.to_xarray()["nominal_central_energy"]
242
255
 
243
256
  # Convert ENA Signal Rate to Flux
244
- flux_conversion_divisor = geometric_factor * esa_energy[:, np.newaxis]
257
+ flux_conversion_divisor = geometric_factor * esa_energy
245
258
  intensity_vars = {
246
259
  "ena_intensity": map_ds["ena_signal_rates"] / flux_conversion_divisor,
247
260
  "ena_intensity_stat_unc": map_ds["ena_signal_rate_stat_unc"]
@@ -267,9 +280,9 @@ def calculate_ena_intensity(
267
280
  return intensity_vars
268
281
 
269
282
 
270
- def esa_energy_lookup(
283
+ def esa_energy_df(
271
284
  esa_energies_path: str | Path, esa_energy_steps: np.ndarray
272
- ) -> np.ndarray:
285
+ ) -> pd.DataFrame:
273
286
  """
274
287
  Lookup the nominal central energy values for given esa energy steps.
275
288
 
@@ -282,13 +295,11 @@ def esa_energy_lookup(
282
295
 
283
296
  Returns
284
297
  -------
285
- esa_energies: numpy.ndarray
286
- The nominal central energy for the given esa energy steps.
298
+ esa_energies_df: pandas.DataFrame
299
+ Full data frame from the csv file filtered to only include the
300
+ esa_energy_steps input.
287
301
  """
288
302
  esa_energies_lut = pd.read_csv(
289
303
  esa_energies_path, comment="#", index_col="esa_energy_step"
290
304
  )
291
- esa_energies = esa_energies_lut.loc[esa_energy_steps][
292
- "nominal_central_energy"
293
- ].values
294
- return esa_energies
305
+ return esa_energies_lut.loc[esa_energy_steps]
@@ -1,9 +1,12 @@
1
1
  """IMAP-Hi utils functions."""
2
2
 
3
+ from __future__ import annotations
4
+
3
5
  import re
4
6
  from collections.abc import Iterable, Sequence
5
7
  from dataclasses import dataclass
6
8
  from enum import IntEnum
9
+ from pathlib import Path
7
10
 
8
11
  import numpy as np
9
12
  import pandas as pd
@@ -392,3 +395,109 @@ class EsaEnergyStepLookupTable:
392
395
  return results.astype(self._esa_energy_step_dtype)[0]
393
396
  else:
394
397
  return results.astype(self._esa_energy_step_dtype)
398
+
399
+
400
+ @pd.api.extensions.register_dataframe_accessor("cal_prod_config")
401
+ class CalibrationProductConfig:
402
+ """
403
+ Register custom accessor for calibration product configuration DataFrames.
404
+
405
+ Parameters
406
+ ----------
407
+ pandas_obj : pandas.DataFrame
408
+ Object to run validation and use accessor functions on.
409
+ """
410
+
411
+ index_columns = (
412
+ "calibration_prod",
413
+ "esa_energy_step",
414
+ )
415
+ tof_detector_pairs = ("ab", "ac1", "bc1", "c1c2")
416
+ required_columns = (
417
+ "coincidence_type_list",
418
+ *[
419
+ f"tof_{det_pair}_{limit}"
420
+ for det_pair in tof_detector_pairs
421
+ for limit in ["low", "high"]
422
+ ],
423
+ )
424
+
425
+ def __init__(self, pandas_obj: pd.DataFrame) -> None:
426
+ self._validate(pandas_obj)
427
+ self._obj = pandas_obj
428
+ self._add_coincidence_values_column()
429
+
430
+ def _validate(self, df: pd.DataFrame) -> None:
431
+ """
432
+ Validate the current configuration.
433
+
434
+ Parameters
435
+ ----------
436
+ df : pandas.DataFrame
437
+ Object to validate.
438
+
439
+ Raises
440
+ ------
441
+ AttributeError : If the dataframe does not pass validation.
442
+ """
443
+ for index_name in self.index_columns:
444
+ if index_name in df.index:
445
+ raise AttributeError(
446
+ f"Required index {index_name} not present in dataframe."
447
+ )
448
+ # Verify that the Dataframe has all the required columns
449
+ for col in self.required_columns:
450
+ if col not in df.columns:
451
+ raise AttributeError(f"Required column {col} not present in dataframe.")
452
+ # TODO: Verify that the same ESA energy steps exist in all unique calibration
453
+ # product numbers
454
+
455
+ def _add_coincidence_values_column(self) -> None:
456
+ """Generate and add the coincidence_type_values column to the dataframe."""
457
+ # Add a column that consists of the coincidence type strings converted
458
+ # to integer values
459
+ self._obj["coincidence_type_values"] = self._obj.apply(
460
+ lambda row: tuple(
461
+ CoincidenceBitmap.detector_hit_str_to_int(entry)
462
+ for entry in row["coincidence_type_list"]
463
+ ),
464
+ axis=1,
465
+ )
466
+
467
+ @classmethod
468
+ def from_csv(cls, path: str | Path) -> pd.DataFrame:
469
+ """
470
+ Read configuration CSV file into a pandas.DataFrame.
471
+
472
+ Parameters
473
+ ----------
474
+ path : str or pathlib.Path
475
+ Location of the Calibration Product configuration CSV file.
476
+
477
+ Returns
478
+ -------
479
+ dataframe : pandas.DataFrame
480
+ Validated calibration product configuration data frame.
481
+ """
482
+ df = pd.read_csv(
483
+ path,
484
+ index_col=cls.index_columns,
485
+ converters={"coincidence_type_list": lambda s: tuple(s.split("|"))},
486
+ comment="#",
487
+ )
488
+ # Force the _init_ method to run by using the namespace
489
+ _ = df.cal_prod_config.number_of_products
490
+ return df
491
+
492
+ @property
493
+ def number_of_products(self) -> int:
494
+ """
495
+ Get the number of calibration products in the current configuration.
496
+
497
+ Returns
498
+ -------
499
+ number_of_products : int
500
+ The maximum number of calibration products defined in the list of
501
+ calibration product definitions.
502
+ """
503
+ return len(self._obj.index.unique(level="calibration_prod"))