imap-processing 1.0.0__py3-none-any.whl → 1.0.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (68) hide show
  1. imap_processing/_version.py +2 -2
  2. imap_processing/cdf/config/imap_codice_global_cdf_attrs.yaml +13 -1
  3. imap_processing/cdf/config/imap_codice_l1a_variable_attrs.yaml +97 -254
  4. imap_processing/cdf/config/imap_codice_l2-hi-omni_variable_attrs.yaml +635 -0
  5. imap_processing/cdf/config/imap_codice_l2-hi-sectored_variable_attrs.yaml +422 -0
  6. imap_processing/cdf/config/imap_enamaps_l2-common_variable_attrs.yaml +29 -22
  7. imap_processing/cdf/config/imap_enamaps_l2-healpix_variable_attrs.yaml +2 -0
  8. imap_processing/cdf/config/imap_enamaps_l2-rectangular_variable_attrs.yaml +12 -2
  9. imap_processing/cdf/config/imap_swapi_variable_attrs.yaml +2 -13
  10. imap_processing/cdf/utils.py +2 -2
  11. imap_processing/cli.py +10 -27
  12. imap_processing/codice/codice_l1a_lo_angular.py +362 -0
  13. imap_processing/codice/codice_l1a_lo_species.py +282 -0
  14. imap_processing/codice/codice_l1b.py +62 -97
  15. imap_processing/codice/codice_l2.py +801 -174
  16. imap_processing/codice/codice_new_l1a.py +64 -0
  17. imap_processing/codice/constants.py +96 -0
  18. imap_processing/codice/utils.py +270 -0
  19. imap_processing/ena_maps/ena_maps.py +157 -95
  20. imap_processing/ena_maps/utils/coordinates.py +5 -0
  21. imap_processing/ena_maps/utils/corrections.py +450 -0
  22. imap_processing/ena_maps/utils/map_utils.py +143 -42
  23. imap_processing/ena_maps/utils/naming.py +3 -1
  24. imap_processing/hi/hi_l1c.py +34 -12
  25. imap_processing/hi/hi_l2.py +82 -44
  26. imap_processing/ialirt/constants.py +7 -1
  27. imap_processing/ialirt/generate_coverage.py +3 -1
  28. imap_processing/ialirt/l0/parse_mag.py +1 -0
  29. imap_processing/ialirt/l0/process_codice.py +66 -0
  30. imap_processing/ialirt/l0/process_hit.py +1 -0
  31. imap_processing/ialirt/l0/process_swapi.py +1 -0
  32. imap_processing/ialirt/l0/process_swe.py +2 -0
  33. imap_processing/ialirt/process_ephemeris.py +6 -2
  34. imap_processing/ialirt/utils/create_xarray.py +4 -2
  35. imap_processing/idex/idex_l2a.py +2 -2
  36. imap_processing/idex/idex_l2b.py +1 -1
  37. imap_processing/lo/l1c/lo_l1c.py +62 -4
  38. imap_processing/lo/l2/lo_l2.py +85 -15
  39. imap_processing/mag/l1a/mag_l1a.py +2 -2
  40. imap_processing/mag/l1a/mag_l1a_data.py +71 -13
  41. imap_processing/mag/l1c/interpolation_methods.py +34 -13
  42. imap_processing/mag/l1c/mag_l1c.py +117 -67
  43. imap_processing/mag/l1d/mag_l1d_data.py +3 -1
  44. imap_processing/quality_flags.py +1 -0
  45. imap_processing/spice/geometry.py +11 -9
  46. imap_processing/spice/pointing_frame.py +77 -50
  47. imap_processing/swapi/constants.py +4 -0
  48. imap_processing/swapi/l1/swapi_l1.py +59 -24
  49. imap_processing/swapi/l2/swapi_l2.py +17 -3
  50. imap_processing/swe/utils/swe_constants.py +7 -7
  51. imap_processing/ultra/l1a/ultra_l1a.py +121 -72
  52. imap_processing/ultra/l1b/de.py +57 -1
  53. imap_processing/ultra/l1b/extendedspin.py +1 -1
  54. imap_processing/ultra/l1b/ultra_l1b_annotated.py +0 -1
  55. imap_processing/ultra/l1b/ultra_l1b_culling.py +2 -2
  56. imap_processing/ultra/l1b/ultra_l1b_extended.py +25 -12
  57. imap_processing/ultra/l1c/helio_pset.py +29 -6
  58. imap_processing/ultra/l1c/l1c_lookup_utils.py +4 -2
  59. imap_processing/ultra/l1c/spacecraft_pset.py +10 -6
  60. imap_processing/ultra/l1c/ultra_l1c.py +6 -6
  61. imap_processing/ultra/l1c/ultra_l1c_pset_bins.py +82 -20
  62. imap_processing/ultra/l2/ultra_l2.py +2 -2
  63. imap_processing-1.0.2.dist-info/METADATA +121 -0
  64. {imap_processing-1.0.0.dist-info → imap_processing-1.0.2.dist-info}/RECORD +67 -61
  65. imap_processing-1.0.0.dist-info/METADATA +0 -120
  66. {imap_processing-1.0.0.dist-info → imap_processing-1.0.2.dist-info}/LICENSE +0 -0
  67. {imap_processing-1.0.0.dist-info → imap_processing-1.0.2.dist-info}/WHEEL +0 -0
  68. {imap_processing-1.0.0.dist-info → imap_processing-1.0.2.dist-info}/entry_points.txt +0 -0
@@ -10,190 +10,128 @@ dataset = process_codice_l2(l1_filename)
10
10
  """
11
11
 
12
12
  import logging
13
- from pathlib import Path
14
13
 
15
14
  import numpy as np
15
+ import pandas as pd
16
16
  import xarray as xr
17
+ from imap_data_access import ProcessingInputCollection, ScienceFilePath
17
18
 
18
19
  from imap_processing.cdf.imap_cdf_manager import ImapCdfAttributes
19
20
  from imap_processing.cdf.utils import load_cdf
20
- from imap_processing.codice.constants import HALF_SPIN_LUT
21
+ from imap_processing.codice.constants import (
22
+ HALF_SPIN_LUT,
23
+ HI_L2_ELEVATION_ANGLE,
24
+ HI_OMNI_VARIABLE_NAMES,
25
+ HI_SECTORED_VARIABLE_NAMES,
26
+ L2_GEOMETRIC_FACTOR,
27
+ L2_HI_NUMBER_OF_SSD,
28
+ L2_HI_SECTORED_ANGLE,
29
+ LO_NSW_ANGULAR_VARIABLE_NAMES,
30
+ LO_NSW_SPECIES_VARIABLE_NAMES,
31
+ LO_POSITION_TO_ELEVATION_ANGLE,
32
+ LO_SW_ANGULAR_VARIABLE_NAMES,
33
+ LO_SW_PICKUP_ION_SPECIES_VARIABLE_NAMES,
34
+ LO_SW_SPECIES_VARIABLE_NAMES,
35
+ NSW_POSITIONS,
36
+ PUI_POSITIONS,
37
+ SOLAR_WIND_POSITIONS,
38
+ SW_POSITIONS,
39
+ )
21
40
 
22
41
  logger = logging.getLogger(__name__)
23
42
  logger.setLevel(logging.INFO)
24
43
 
25
44
 
26
- def process_codice_l2(file_path: Path) -> xr.Dataset:
45
+ def get_geometric_factor_lut(dependencies: ProcessingInputCollection) -> dict:
27
46
  """
28
- Will process CoDICE l1 data to create l2 data products.
47
+ Get the geometric factor lookup table.
29
48
 
30
49
  Parameters
31
50
  ----------
32
- file_path : pathlib.Path
33
- Path to the CoDICE L1 file to process.
51
+ dependencies : ProcessingInputCollection
52
+ The collection of processing input files.
34
53
 
35
54
  Returns
36
55
  -------
37
- l2_dataset : xarray.Dataset
38
- The``xarray`` dataset containing the science data and supporting metadata.
56
+ geometric_factor_lut : dict
57
+ A dict with a full and reduced mode array with shape (esa_steps, position).
39
58
  """
40
- logger.info(f"Processing {file_path}")
41
-
42
- # Open the l1 file
43
- l1_dataset = load_cdf(file_path)
44
-
45
- # Use the logical source as a way to distinguish between data products and
46
- # set some useful distinguishing variables
47
- # TODO: Could clean this up by using imap-data-access methods?
48
- dataset_name = l1_dataset.attrs["Logical_source"]
49
- data_level = dataset_name.removeprefix("imap_codice_").split("_")[0]
50
- dataset_name = dataset_name.replace(data_level, "l2")
51
-
52
- # Use the L1 data product as a starting point for L2
53
- l2_dataset = l1_dataset.copy()
54
-
55
- # Get the L2 CDF attributes
56
- cdf_attrs = ImapCdfAttributes()
57
- l2_dataset = add_dataset_attributes(l2_dataset, dataset_name, cdf_attrs)
58
-
59
- # TODO: update list of datasets that need geometric factors (if needed)
60
- # Compute geometric factors needed for intensity calculations
61
- if dataset_name in [
62
- "imap_codice_l2_lo-sw-species",
63
- "imap_codice_l2_lo-nsw-species",
64
- ]:
65
- geometric_factors = compute_geometric_factors(l2_dataset)
66
-
67
- if dataset_name in [
68
- "imap_codice_l2_hi-counters-singles",
69
- "imap_codice_l2_hi-counters-aggregated",
70
- "imap_codice_l2_lo-counters-singles",
71
- "imap_codice_l2_lo-counters-aggregated",
72
- "imap_codice_l2_lo-sw-priority",
73
- "imap_codice_l2_lo-nsw-priority",
74
- ]:
75
- # No changes needed. Just save to an L2 CDF file.
76
- # TODO: May not even need L2 files for these products
77
- pass
78
-
79
- elif dataset_name == "imap_codice_l2_hi-direct-events":
80
- # Convert the following data variables to physical units using
81
- # calibration data:
82
- # - ssd_energy
83
- # - tof
84
- # - elevation_angle
85
- # - spin_angle
86
- # These converted variables are *in addition* to the existing L1 variables
87
- # The other data variables require no changes
88
- # See section 11.1.2 of algorithm document
89
- pass
90
-
91
- elif dataset_name == "imap_codice_l2_hi-sectored":
92
- # Convert the sectored count rates using equation described in section
93
- # 11.1.3 of algorithm document.
94
- pass
95
-
96
- elif dataset_name == "imap_codice_l2_hi-omni":
97
- # Calculate the omni-directional intensity for each species using
98
- # equation described in section 11.1.4 of algorithm document
99
- # hopefully this can also apply to hi-ialirt
100
- pass
59
+ geometric_factors = pd.read_csv(
60
+ dependencies.get_file_paths(descriptor="l2-lo-gfactor")[0]
61
+ )
101
62
 
102
- elif dataset_name == "imap_codice_l2_lo-direct-events":
103
- # Convert the following data variables to physical units using
104
- # calibration data:
105
- # - apd_energy
106
- # - elevation_angle
107
- # - tof
108
- # - spin_sector
109
- # - esa_step
110
- # These converted variables are *in addition* to the existing L1 variables
111
- # The other data variables require no changes
112
- # See section 11.1.2 of algorithm document
113
- pass
63
+ # sort by esa step. They should already be sorted, but just in case
64
+ full = geometric_factors[geometric_factors["mode"] == "full"].sort_values(
65
+ by="esa_step"
66
+ )
67
+ reduced = geometric_factors[geometric_factors["mode"] == "reduced"].sort_values(
68
+ by="esa_step"
69
+ )
114
70
 
115
- elif dataset_name == "imap_codice_l2_lo-sw-angular":
116
- # Calculate the sunward angular intensities using equation described in
117
- # section 11.2.3 of algorithm document.
118
- pass
71
+ # Sort position columns to ensure the correct order
72
+ position_names_sorted = sorted(
73
+ [col for col in full if col.startswith("position")],
74
+ key=lambda x: int(x.split("_")[-1]),
75
+ )
119
76
 
120
- elif dataset_name == "imap_codice_l2_lo-nsw-angular":
121
- # Calculate the non-sunward angular intensities using equation described
122
- # in section 11.2.3 of algorithm document.
123
- pass
77
+ return {
78
+ "full": full[position_names_sorted].to_numpy(),
79
+ "reduced": reduced[position_names_sorted].to_numpy(),
80
+ }
124
81
 
125
- elif dataset_name == "imap_codice_l2_lo-sw-species":
126
- # Calculate the sunward solar wind species intensities using equation
127
- # described in section 11.2.4 of algorithm document.
128
- # Calculate the pickup ion sunward solar wind intensities using equation
129
- # described in section 11.2.4 of algorithm document.
130
- # Hopefully this can also apply to lo-ialirt
131
- # TODO: WIP - needs to be completed
132
- l2_dataset = process_lo_sw_species(l2_dataset, geometric_factors)
133
- pass
134
82
 
135
- elif dataset_name == "imap_codice_l2_lo-nsw-species":
136
- # Calculate the non-sunward solar wind species intensities using
137
- # equation described in section 11.2.4 of algorithm document.
138
- # Calculate the pickup ion non-sunward solar wind intensities using
139
- # equation described in section 11.2.4 of algorithm document.
140
- pass
83
+ def get_efficiency_lut(dependencies: ProcessingInputCollection) -> pd.DataFrame:
84
+ """
85
+ Get the efficiency lookup table.
141
86
 
142
- logger.info(f"\nFinal data product:\n{l2_dataset}\n")
87
+ Parameters
88
+ ----------
89
+ dependencies : ProcessingInputCollection
90
+ The collection of processing input files.
143
91
 
144
- return l2_dataset
92
+ Returns
93
+ -------
94
+ efficiency_lut : pandas.DataFrame
95
+ Contains the efficiency lookup table. Columns are:
96
+ species, product, esa_step, position_1, position_2, ..., position_24.
97
+ """
98
+ return pd.read_csv(dependencies.get_file_paths(descriptor="l2-lo-efficiency")[0])
145
99
 
146
100
 
147
- def add_dataset_attributes(
148
- dataset: xr.Dataset, dataset_name: str, cdf_attrs: ImapCdfAttributes
149
- ) -> xr.Dataset:
101
+ def get_species_efficiency(species: str, efficiency: pd.DataFrame) -> xr.DataArray:
150
102
  """
151
- Add the global and variable attributes to the dataset.
103
+ Get the efficiency values for a given species.
152
104
 
153
105
  Parameters
154
106
  ----------
155
- dataset : xarray.Dataset
156
- The dataset to update.
157
- dataset_name : str
158
- The name of the dataset.
159
- cdf_attrs : ImapCdfAttributes
160
- The attribute manager for CDF attributes.
107
+ species : str
108
+ The species name.
109
+ efficiency : pandas.DataFrame
110
+ The efficiency lookup table.
161
111
 
162
112
  Returns
163
113
  -------
164
- xarray.Dataset
165
- The updated dataset.
114
+ efficiency : xarray.DataArray
115
+ A 2D array of efficiencies with shape (epoch, esa_steps).
166
116
  """
167
- cdf_attrs.add_instrument_global_attrs("codice")
168
- cdf_attrs.add_instrument_variable_attrs("codice", "l2")
169
-
170
- # Update the global attributes
171
- dataset.attrs = cdf_attrs.get_global_attributes(dataset_name)
172
-
173
- # Set the variable attributes
174
- for variable_name in dataset.data_vars.keys():
175
- try:
176
- dataset[variable_name].attrs = cdf_attrs.get_variable_attributes(
177
- variable_name, check_schema=False
178
- )
179
- except KeyError:
180
- # Some variables may have a product descriptor prefix in the
181
- # cdf attributes key if they are common to multiple products.
182
- descriptor = dataset_name.split("imap_codice_l2_")[-1]
183
- cdf_attrs_key = f"{descriptor}-{variable_name}"
184
- try:
185
- dataset[variable_name].attrs = cdf_attrs.get_variable_attributes(
186
- f"{cdf_attrs_key}", check_schema=False
187
- )
188
- except KeyError:
189
- logger.error(
190
- f"Field '{variable_name}' and '{cdf_attrs_key}' not found in "
191
- f"attribute manager."
192
- )
193
- return dataset
117
+ species_efficiency = efficiency[efficiency["species"] == species].sort_values(
118
+ by="esa_step"
119
+ )
120
+ # Sort position columns to ensure the correct order
121
+ position_names_sorted = sorted(
122
+ [col for col in species_efficiency if col.startswith("position")],
123
+ key=lambda x: int(x.split("_")[-1]),
124
+ )
125
+ # Shape: (energy_table, inst_az)
126
+ return xr.DataArray(
127
+ species_efficiency[position_names_sorted].to_numpy(),
128
+ dims=("energy_table", "inst_az"),
129
+ )
194
130
 
195
131
 
196
- def compute_geometric_factors(dataset: xr.Dataset) -> np.ndarray:
132
+ def compute_geometric_factors(
133
+ dataset: xr.Dataset, geometric_factor_lookup: dict
134
+ ) -> xr.DataArray:
197
135
  """
198
136
  Calculate geometric factors needed for intensity calculations.
199
137
 
@@ -212,11 +150,13 @@ def compute_geometric_factors(dataset: xr.Dataset) -> np.ndarray:
212
150
  ----------
213
151
  dataset : xarray.Dataset
214
152
  The L2 dataset containing rgfo_half_spin data variable.
153
+ geometric_factor_lookup : dict
154
+ A dict with a full and reduced mode array with shape (esa_steps, position).
215
155
 
216
156
  Returns
217
157
  -------
218
- geometric_factors : np.ndarray
219
- A 2D array of geometric factors with shape (epoch, esa_steps).
158
+ geometric_factors : xarray.DataArray
159
+ A 3D array of geometric factors with shape (epoch, esa_steps, positions).
220
160
  """
221
161
  # Convert the HALF_SPIN_LUT to a reverse mapping of esa_step to half_spin
222
162
  esa_step_to_half_spin_map = {
@@ -227,52 +167,739 @@ def compute_geometric_factors(dataset: xr.Dataset) -> np.ndarray:
227
167
  half_spin_values = np.array(
228
168
  [esa_step_to_half_spin_map[step] for step in range(128)]
229
169
  )
230
-
231
170
  # Expand dimensions to compare each rgfo_half_spin value against
232
171
  # all half_spin_values
233
172
  rgfo_half_spin = dataset.rgfo_half_spin.data[:, np.newaxis] # Shape: (epoch, 1)
173
+ # Perform the comparison and calculate modes
174
+ # Modes will be true (reduced mode) anywhere half_spin >= rgfo_half_spin otherwise
175
+ # false (full mode)
176
+ modes = half_spin_values >= rgfo_half_spin
177
+
178
+ # Get the geometric factors based on the modes
179
+ gf = np.where(
180
+ modes[:, :, np.newaxis], # Shape (epoch, energy_table, 1)
181
+ geometric_factor_lookup[
182
+ "reduced"
183
+ ], # Shape (1, energy_table, 24) - reduced mode
184
+ geometric_factor_lookup["full"], # Shape (1, energy_table, 24) - full mode
185
+ ) # Shape: (epoch, energy_table, inst_az)
186
+
187
+ return xr.DataArray(gf, dims=("epoch", "energy_table", "inst_az"))
188
+
189
+
190
+ def calculate_intensity(
191
+ dataset: xr.Dataset,
192
+ species_list: list,
193
+ geometric_factors: xr.DataArray,
194
+ efficiency: pd.DataFrame,
195
+ positions: list,
196
+ average_across_positions: bool = False,
197
+ ) -> xr.Dataset:
198
+ """
199
+ Calculate species or angular intensities.
234
200
 
235
- # Perform the comparison and calculate geometric factors
236
- geometric_factors = np.where(half_spin_values < rgfo_half_spin, 0.75, 0.5)
201
+ Parameters
202
+ ----------
203
+ dataset : xarray.Dataset
204
+ The L2 dataset to process.
205
+ species_list : list
206
+ List of species variable names to calculate intensity.
207
+ geometric_factors : np.ndarray
208
+ The geometric factors array with shape (epoch, esa_steps).
209
+ efficiency : pandas.DataFrame
210
+ The efficiency lookup table.
211
+ positions : list
212
+ A list of position indices to select from the geometric factor and
213
+ efficiency lookup tables.
214
+ average_across_positions : bool
215
+ Whether to average the efficiencies and geometric factors across the selected
216
+ positions. Default is False.
217
+
218
+ Returns
219
+ -------
220
+ xarray.Dataset
221
+ The updated L2 dataset with species intensities calculated.
222
+ """
223
+ # Select the relevant positions from the geometric factors
224
+ geometric_factors = geometric_factors.isel(inst_az=positions)
225
+ if average_across_positions:
226
+ # take the mean geometric factor across positions
227
+ geometric_factors = geometric_factors.mean(dim="inst_az")
228
+ scalar = len(positions)
229
+ else:
230
+ scalar = 1
231
+ # Calculate the angular intensities using the provided geometric factors and
232
+ # efficiency.
233
+ # intensity = species_rate / (gm * eff * esa_step) for position and spin angle
234
+ for species in species_list:
235
+ # Select the relevant positions for the species from the efficiency LUT
236
+ # Shape: (epoch, energy_table, inst_az)
237
+ species_eff = get_species_efficiency(species, efficiency).isel(
238
+ inst_az=positions
239
+ )
240
+ if species_eff.size == 0:
241
+ logger.warning(f"No efficiency data found for species {species}. Skipping.")
242
+ continue
243
+
244
+ if average_across_positions:
245
+ # Take the mean efficiency across positions
246
+ species_eff = species_eff.mean(dim="inst_az")
247
+
248
+ # Shape: (epoch, energy_table, inst_az) or
249
+ # (epoch, energy_table) if averaged
250
+ denominator = scalar * geometric_factors * species_eff * dataset["energy_table"]
251
+ if species not in dataset:
252
+ logger.warning(
253
+ f"Species {species} not found in dataset. Filling with NaNS."
254
+ )
255
+ dataset[species] = np.full(dataset["energy_table"].data.shape, np.nan)
256
+ else:
257
+ dataset[species] = dataset[species] / denominator
258
+
259
+ # Also calculate uncertainty if available
260
+ species_uncertainty = f"unc_{species}"
261
+ if species_uncertainty not in dataset:
262
+ logger.warning(
263
+ f"Uncertainty {species_uncertainty} not found in dataset."
264
+ f" Filling with NaNS."
265
+ )
266
+ dataset[species_uncertainty] = np.full(
267
+ dataset["energy_table"].data.shape, np.nan
268
+ )
269
+ else:
270
+ dataset[species_uncertainty] = dataset[species_uncertainty] / denominator
237
271
 
238
- return geometric_factors
272
+ return dataset
239
273
 
240
274
 
241
- def process_lo_sw_species(
242
- dataset: xr.Dataset, geometric_factors: np.ndarray
275
+ def process_lo_species_intensity(
276
+ dataset: xr.Dataset,
277
+ species_list: list,
278
+ geometric_factors: xr.DataArray,
279
+ efficiency: pd.DataFrame,
280
+ positions: list,
243
281
  ) -> xr.Dataset:
244
282
  """
245
- Process the lo-sw-species L2 dataset to calculate species intensities.
283
+ Process the lo-species L2 dataset to calculate species intensities.
246
284
 
247
285
  Parameters
248
286
  ----------
249
287
  dataset : xarray.Dataset
250
288
  The L2 dataset to process.
251
- geometric_factors : np.ndarray
289
+ species_list : list
290
+ List of species variable names to calculate intensity.
291
+ geometric_factors : xarray.DataArray
252
292
  The geometric factors array with shape (epoch, esa_steps).
293
+ efficiency : pandas.DataFrame
294
+ The efficiency lookup table.
295
+ positions : list
296
+ A list of position indices to select from the geometric factor and
297
+ efficiency lookup tables.
253
298
 
254
299
  Returns
255
300
  -------
256
301
  xarray.Dataset
257
302
  The updated L2 dataset with species intensities calculated.
258
303
  """
259
- # TODO: WIP - implement intensity calculations
260
- # valid_solar_wind_vars = [
261
- # "hplus",
262
- # "heplusplus",
263
- # "cplus4",
264
- # "cplus5",
265
- # "cplus6",
266
- # "oplus5",
267
- # "oplus6",
268
- # "oplus7",
269
- # "oplus8",
270
- # "ne",
271
- # "mg",
272
- # "si",
273
- # "fe_loq",
274
- # "fe_hiq",
275
- # ]
276
- # valid_pick_up_ion_vars = ["heplus", "cnoplus"]
304
+ # Calculate the species intensities using the provided geometric factors and
305
+ # efficiency.
306
+ dataset = calculate_intensity(
307
+ dataset,
308
+ species_list,
309
+ geometric_factors,
310
+ efficiency,
311
+ positions,
312
+ average_across_positions=True,
313
+ )
277
314
 
278
315
  return dataset
316
+
317
+
318
+ def process_lo_angular_intensity(
319
+ dataset: xr.Dataset,
320
+ species_list: list,
321
+ geometric_factors: xr.DataArray,
322
+ efficiency: pd.DataFrame,
323
+ positions: list,
324
+ ) -> xr.Dataset:
325
+ """
326
+ Process the lo-species L2 dataset to calculate angular intensities.
327
+
328
+ Parameters
329
+ ----------
330
+ dataset : xarray.Dataset
331
+ The L2 dataset to process.
332
+ species_list : list
333
+ List of species variable names to calculate intensity.
334
+ geometric_factors : xarray.DataArray
335
+ The geometric factors array with shape (epoch, esa_steps).
336
+ efficiency : pandas.DataFrame
337
+ The efficiency lookup table.
338
+ positions : list
339
+ A list of position indices to select from the geometric factor and
340
+ efficiency lookup tables.
341
+
342
+ Returns
343
+ -------
344
+ xarray.Dataset
345
+ The updated L2 dataset with angular intensities calculated.
346
+ """
347
+ # Calculate the angular intensities using the provided geometric factors and
348
+ # efficiency.
349
+ dataset = calculate_intensity(
350
+ dataset,
351
+ species_list,
352
+ geometric_factors,
353
+ efficiency,
354
+ positions,
355
+ average_across_positions=False,
356
+ )
357
+ # transform positions to elevation angles
358
+ if positions == SW_POSITIONS:
359
+ pos_to_el = LO_POSITION_TO_ELEVATION_ANGLE["sw"]
360
+ elif positions == NSW_POSITIONS:
361
+ pos_to_el = LO_POSITION_TO_ELEVATION_ANGLE["nsw"]
362
+ else:
363
+ raise ValueError("Unknown positions for elevation angle mapping.")
364
+
365
+ # Create a new coordinate for elevation_angle based on inst_az
366
+ dataset = dataset.assign_coords(
367
+ elevation_angle=(
368
+ "inst_az",
369
+ [pos_to_el[pos] for pos in dataset["inst_az"].data],
370
+ )
371
+ )
372
+ # Take the mean across elevation angles and restore the original dimension order
373
+ dataset_converted = (
374
+ dataset[species_list]
375
+ .groupby("elevation_angle")
376
+ .sum(keep_attrs=True) # One position should always contain zeros so sum is safe
377
+ # Restore original dimension order because groupby moves the grouped
378
+ # dimension to the front
379
+ .transpose("epoch", "energy_table", "spin_sector", "elevation_angle", ...)
380
+ )
381
+ # Create a new coordinate for spin angle based on spin_sector
382
+ # Use equation from section 11.2.2 of algorithm document
383
+ dataset = dataset.assign_coords(
384
+ spin_angle=("spin_sector", dataset["spin_sector"].data * 15.0 + 7.5)
385
+ )
386
+
387
+ dataset = dataset.drop_vars(species_list).merge(dataset_converted)
388
+ return dataset
389
+
390
+
391
+ def process_hi_omni(dependencies: ProcessingInputCollection) -> xr.Dataset:
392
+ """
393
+ Process the hi-omni L1B dataset to calculate omni-directional intensities.
394
+
395
+ See section 11.1.3 of the CoDICE algorithm document for details.
396
+
397
+ The formula for omni-directional intensities is::
398
+
399
+ l1B species data / (
400
+ geometric_factor * number_of_ssd * efficiency * energy_passband
401
+ )
402
+
403
+ Geometric factor is constant for all species which is 0.013.
404
+ Number of SSD is constant for all species which is 12.
405
+ Efficiency is provided in a CSV file for each species and energy bin.
406
+ Energy passband is calculated from L1B variables energy_bin_minus + energy_bin_plus
407
+
408
+ Parameters
409
+ ----------
410
+ dependencies : ProcessingInputCollection
411
+ The collection of processing input files.
412
+
413
+ Returns
414
+ -------
415
+ xarray.Dataset
416
+ The updated L2 dataset with omni-directional intensities calculated.
417
+ """
418
+ l1b_file = dependencies.get_file_paths(descriptor="hi-omni")[0]
419
+ l1b_dataset = load_cdf(l1b_file)
420
+
421
+ # Read the efficiencies data from the CSV file
422
+ efficiencies_file = dependencies.get_file_paths(descriptor="l2-hi-omni-efficiency")[
423
+ 0
424
+ ]
425
+ efficiencies_df = pd.read_csv(efficiencies_file)
426
+ # Omni product has 8 species and each species has different shape.
427
+ # Eg.
428
+ # h - (epoch, 15)
429
+ # c - (epoch, 18)
430
+ # uh - (epoch, 5)
431
+ # etc.
432
+ # Because of that, we need to loop over each species and calculate
433
+ # omni-directional intensities separately.
434
+ for species in HI_OMNI_VARIABLE_NAMES:
435
+ species_data = efficiencies_df[efficiencies_df["species"] == species]
436
+ # Read current species' effificiency
437
+ species_efficiencies = species_data["average_efficiency"].values[np.newaxis, :]
438
+ # Calculate energy passband from L1B data
439
+ energy_passbands = (
440
+ l1b_dataset[f"energy_{species}_plus"]
441
+ + l1b_dataset[f"energy_{species}_minus"]
442
+ ).values[np.newaxis, :]
443
+ # Calculate omni-directional intensities
444
+ omni_direction_intensities = l1b_dataset[species] / (
445
+ L2_GEOMETRIC_FACTOR
446
+ * L2_HI_NUMBER_OF_SSD
447
+ * species_efficiencies
448
+ * energy_passbands
449
+ )
450
+ # Store by replacing existing species data with omni-directional intensities
451
+ l1b_dataset[species].values = omni_direction_intensities
452
+
453
+ # TODO: this may go away once Joey and I fix L1B CDF
454
+ # Update global CDF attributes
455
+ cdf_attrs = ImapCdfAttributes()
456
+ cdf_attrs.add_instrument_global_attrs("codice")
457
+ cdf_attrs.add_instrument_variable_attrs("codice", "l2-hi-omni")
458
+ l1b_dataset.attrs = cdf_attrs.get_global_attributes("imap_codice_l2_hi-omni")
459
+
460
+ # TODO: ask Joey to add attrs for epoch_delta_plus and epoch_delta_minus
461
+ # and update dimension to be 'epoch' in L1B data
462
+ for variable in l1b_dataset.data_vars:
463
+ if variable in ["epoch_delta_plus", "epoch_delta_minus", "data_quality"]:
464
+ l1b_dataset[variable].attrs = cdf_attrs.get_variable_attributes(
465
+ variable, check_schema=False
466
+ )
467
+ else:
468
+ l1b_dataset[variable].attrs = cdf_attrs.get_variable_attributes(
469
+ variable, check_schema=False
470
+ )
471
+
472
+ # Add these new coordinates
473
+ new_coords = {
474
+ "energy_h": l1b_dataset["energy_h"],
475
+ "energy_h_label": xr.DataArray(
476
+ l1b_dataset["energy_h"].values.astype(str),
477
+ dims=("energy_h",),
478
+ attrs=cdf_attrs.get_variable_attributes(
479
+ "energy_h_label", check_schema=False
480
+ ),
481
+ ),
482
+ "energy_he3": l1b_dataset["energy_he3"],
483
+ "energy_he3_label": xr.DataArray(
484
+ l1b_dataset["energy_he3"].values.astype(str),
485
+ dims=("energy_he3",),
486
+ attrs=cdf_attrs.get_variable_attributes(
487
+ "energy_he3_label", check_schema=False
488
+ ),
489
+ ),
490
+ "energy_he4": l1b_dataset["energy_he4"],
491
+ "energy_he4_label": xr.DataArray(
492
+ l1b_dataset["energy_he4"].values.astype(str),
493
+ dims=("energy_he4",),
494
+ attrs=cdf_attrs.get_variable_attributes(
495
+ "energy_he4_label", check_schema=False
496
+ ),
497
+ ),
498
+ "energy_c": l1b_dataset["energy_c"],
499
+ "energy_c_label": xr.DataArray(
500
+ l1b_dataset["energy_c"].values.astype(str),
501
+ dims=("energy_c",),
502
+ attrs=cdf_attrs.get_variable_attributes(
503
+ "energy_c_label", check_schema=False
504
+ ),
505
+ ),
506
+ "energy_o": l1b_dataset["energy_o"],
507
+ "energy_o_label": xr.DataArray(
508
+ l1b_dataset["energy_o"].values.astype(str),
509
+ dims=("energy_o",),
510
+ attrs=cdf_attrs.get_variable_attributes(
511
+ "energy_o_label", check_schema=False
512
+ ),
513
+ ),
514
+ "energy_ne_mg_si": l1b_dataset["energy_ne_mg_si"],
515
+ "energy_ne_mg_si_label": xr.DataArray(
516
+ l1b_dataset["energy_ne_mg_si"].values.astype(str),
517
+ dims=("energy_ne_mg_si",),
518
+ attrs=cdf_attrs.get_variable_attributes(
519
+ "energy_ne_mg_si_label", check_schema=False
520
+ ),
521
+ ),
522
+ "energy_fe": l1b_dataset["energy_fe"],
523
+ "energy_fe_label": xr.DataArray(
524
+ l1b_dataset["energy_fe"].values.astype(str),
525
+ dims=("energy_fe",),
526
+ attrs=cdf_attrs.get_variable_attributes(
527
+ "energy_fe_label", check_schema=False
528
+ ),
529
+ ),
530
+ "energy_uh": l1b_dataset["energy_uh"],
531
+ "energy_uh_label": xr.DataArray(
532
+ l1b_dataset["energy_uh"].values.astype(str),
533
+ dims=("energy_uh",),
534
+ attrs=cdf_attrs.get_variable_attributes(
535
+ "energy_uh_label", check_schema=False
536
+ ),
537
+ ),
538
+ "energy_junk": l1b_dataset["energy_junk"],
539
+ "energy_junk_label": xr.DataArray(
540
+ l1b_dataset["energy_junk"].values.astype(str),
541
+ dims=("energy_junk",),
542
+ attrs=cdf_attrs.get_variable_attributes(
543
+ "energy_junk_label", check_schema=False
544
+ ),
545
+ ),
546
+ "epoch": xr.DataArray(
547
+ l1b_dataset["epoch"].data,
548
+ dims=("epoch",),
549
+ attrs=cdf_attrs.get_variable_attributes("epoch", check_schema=False),
550
+ ),
551
+ }
552
+ l1b_dataset = l1b_dataset.assign_coords(new_coords)
553
+
554
+ return l1b_dataset
555
+
556
+
557
+ def process_hi_sectored(dependencies: ProcessingInputCollection) -> xr.Dataset:
558
+ """
559
+ Process the hi-omni L1B dataset to calculate omni-directional intensities.
560
+
561
+ See section 11.1.2 of the CoDICE algorithm document for details.
562
+
563
+ The formula for omni-directional intensities is::
564
+
565
+ l1b species data / (geometric_factor * efficiency * energy_passband)
566
+
567
+ Geometric factor is constant for all species and is 0.013.
568
+ Efficiency is provided in a CSV file for each species and energy bin and
569
+ position.
570
+ Energy passband is calculated from energy_bin_minus + energy_bin_plus
571
+
572
+ Parameters
573
+ ----------
574
+ dependencies : ProcessingInputCollection
575
+ The collection of processing input files.
576
+
577
+ Returns
578
+ -------
579
+ xarray.Dataset
580
+ The updated L2 dataset with omni-directional intensities calculated.
581
+ """
582
+ file_path = dependencies.get_file_paths(descriptor="hi-sectored")[0]
583
+ l1b_dataset = load_cdf(file_path)
584
+
585
+ # Update global CDF attributes
586
+ cdf_attrs = ImapCdfAttributes()
587
+ cdf_attrs.add_instrument_global_attrs("codice")
588
+ cdf_attrs.add_instrument_variable_attrs("codice", "l2-hi-sectored")
589
+
590
+ # Overwrite L1B variable attributes with L2 variable attributes
591
+ l2_dataset = xr.Dataset(
592
+ coords={
593
+ "spin_sector": l1b_dataset["spin_sector"],
594
+ "spin_sector_label": xr.DataArray(
595
+ l1b_dataset["spin_sector"].values.astype(str),
596
+ dims=("spin_sector",),
597
+ attrs=cdf_attrs.get_variable_attributes(
598
+ "spin_sector_label", check_schema=False
599
+ ),
600
+ ),
601
+ "energy_h": l1b_dataset["energy_h"],
602
+ "energy_h_label": xr.DataArray(
603
+ l1b_dataset["energy_h"].values.astype(str),
604
+ dims=("energy_h",),
605
+ attrs=cdf_attrs.get_variable_attributes(
606
+ "energy_h_label", check_schema=False
607
+ ),
608
+ ),
609
+ "energy_he3he4": l1b_dataset["energy_he3he4"],
610
+ "energy_he3he4_label": xr.DataArray(
611
+ l1b_dataset["energy_he3he4"].values.astype(str),
612
+ dims=("energy_he3he4",),
613
+ attrs=cdf_attrs.get_variable_attributes(
614
+ "energy_he3he4_label", check_schema=False
615
+ ),
616
+ ),
617
+ "energy_cno": l1b_dataset["energy_cno"],
618
+ "energy_cno_label": xr.DataArray(
619
+ l1b_dataset["energy_cno"].values.astype(str),
620
+ dims=("energy_cno",),
621
+ attrs=cdf_attrs.get_variable_attributes(
622
+ "energy_cno_label", check_schema=False
623
+ ),
624
+ ),
625
+ "energy_fe": l1b_dataset["energy_fe"],
626
+ "energy_fe_label": xr.DataArray(
627
+ l1b_dataset["energy_fe"].values.astype(str),
628
+ dims=("energy_fe",),
629
+ attrs=cdf_attrs.get_variable_attributes(
630
+ "energy_fe_label", check_schema=False
631
+ ),
632
+ ),
633
+ "epoch": l1b_dataset["epoch"],
634
+ "elevation_angle": xr.DataArray(
635
+ HI_L2_ELEVATION_ANGLE,
636
+ dims=("elevation_angle",),
637
+ attrs=cdf_attrs.get_variable_attributes(
638
+ "elevation_angle", check_schema=False
639
+ ),
640
+ ),
641
+ "elevation_angle_label": xr.DataArray(
642
+ HI_L2_ELEVATION_ANGLE.astype(str),
643
+ dims=("elevation_angle",),
644
+ attrs=cdf_attrs.get_variable_attributes(
645
+ "elevation_angle_label", check_schema=False
646
+ ),
647
+ ),
648
+ },
649
+ attrs=cdf_attrs.get_global_attributes("imap_codice_l2_hi-sectored"),
650
+ )
651
+
652
+ efficiencies_file = dependencies.get_file_paths(
653
+ descriptor="l2-hi-sectored-efficiency"
654
+ )[0]
655
+
656
+ # Calculate sectored intensities
657
+ efficiencies_df = pd.read_csv(efficiencies_file)
658
+ # Similar to hi-omni, each species has different shape.
659
+ # Because of that, we need to loop over each species and calculate
660
+ # sectored intensities separately.
661
+ for species in HI_SECTORED_VARIABLE_NAMES:
662
+ # Efficiencies from dataframe maps to different dimension in L1B data.
663
+ # For example:
664
+ # l1b species 'h' has shape:
665
+ # (epoch, 8, 12, 12) -> (time, energy, spin_sector, inst_az)
666
+ # efficiencies 'h' has shape after reading from CSV:
667
+ # (8, 12) -> (energy, inst_az)
668
+ # NOTE: 12 here maps to last 12 in above l1b dimension.
669
+ # Because of this, it's easier to work with the data in xarray.
670
+ # Xarray automatically aligns dimensions and coordinates, making it easier
671
+ # to work with multi-dimensional data. Thus, we convert the efficiencies
672
+ # to xarray.DataArray with dimensions (energy, inst_az)
673
+ species_data = efficiencies_df[efficiencies_df["species"] == species].values
674
+ species_efficiencies = xr.DataArray(
675
+ species_data[:, 2:].astype(
676
+ float
677
+ ), # Skip first two columns (species, energy_bin)
678
+ dims=(f"energy_{species}", "inst_az"),
679
+ coords=l1b_dataset[[f"energy_{species}", "inst_az"]],
680
+ )
681
+
682
+ # energy_passbands has shape:
683
+ # (8,) -> (energy)
684
+ energy_passbands = xr.DataArray(
685
+ l1b_dataset[f"energy_{species}_minus"]
686
+ + l1b_dataset[f"energy_{species}_plus"],
687
+ dims=(f"energy_{species}",),
688
+ coords=l2_dataset[[f"energy_{species}"]],
689
+ name="passband",
690
+ )
691
+
692
+ sectored_intensities = l1b_dataset[species] / (
693
+ L2_GEOMETRIC_FACTOR * species_efficiencies * energy_passbands
694
+ )
695
+
696
+ # Replace existing species data with omni-directional intensities
697
+ l2_dataset[species] = xr.DataArray(
698
+ sectored_intensities.data,
699
+ dims=("epoch", f"energy_{species}", "spin_sector", "elevation_angle"),
700
+ attrs=cdf_attrs.get_variable_attributes(species, check_schema=False),
701
+ )
702
+
703
+ # Calculate spin angle
704
+ # Formula:
705
+ # θ_(k,n) = (θ_(k,0)+30°* n) mod 360°
706
+ # where
707
+ # n is size of L2_HI_SECTORED_ANGLE, 0 to 11,
708
+ # k is size of inst_az from l1b, 0 to 11,
709
+ # Calculate spin angle by adding a base angle from L2_HI_SECTORED_ANGLE
710
+ # for each SSD index and then adding multiple of 30 degrees for each elevation.
711
+ # Then mod by 360 to keep it within 0-360 range.
712
+ elevation_angles = np.arange(len(l2_dataset["elevation_angle"].values)) * 30.0
713
+ spin_angles = (L2_HI_SECTORED_ANGLE[:, np.newaxis] + elevation_angles) % 360.0
714
+
715
+ # Add spin angle variable using the new elevation_angle dimension
716
+ l2_dataset["spin_angles"] = (("spin_sector", "elevation_angle"), spin_angles)
717
+ l2_dataset["spin_angles"].attrs = cdf_attrs.get_variable_attributes(
718
+ "spin_angles", check_schema=False
719
+ )
720
+
721
+ # Now carry over other variables from L1B to L2 dataset
722
+ for variable in l1b_dataset.data_vars:
723
+ if variable.startswith("epoch_") and variable != "epoch":
724
+ # get attrs with just that name
725
+ l2_dataset[variable] = xr.DataArray(
726
+ l1b_dataset[variable].data,
727
+ dims=("epoch",),
728
+ attrs=cdf_attrs.get_variable_attributes(variable, check_schema=False),
729
+ )
730
+ elif variable.startswith("energy_"):
731
+ l2_dataset[variable] = xr.DataArray(
732
+ l1b_dataset[variable].data,
733
+ dims=(f"energy_{variable.split('_')[1]}",),
734
+ attrs=cdf_attrs.get_variable_attributes(variable, check_schema=False),
735
+ )
736
+ elif variable.startswith("unc_"):
737
+ l2_dataset[variable] = xr.DataArray(
738
+ l1b_dataset[variable].data,
739
+ dims=(
740
+ "epoch",
741
+ f"energy_{variable.split('_')[1]}",
742
+ "spin_sector",
743
+ "elevation_angle",
744
+ ),
745
+ attrs=cdf_attrs.get_variable_attributes(variable),
746
+ )
747
+ elif variable == "data_quality":
748
+ l2_dataset[variable] = l1b_dataset[variable]
749
+ l2_dataset[variable].attrs.update(
750
+ cdf_attrs.get_variable_attributes(variable, check_schema=False)
751
+ )
752
+
753
+ l2_dataset["epoch"].attrs.update(
754
+ cdf_attrs.get_variable_attributes("epoch", check_schema=False)
755
+ )
756
+ return l2_dataset
757
+
758
+
759
+ def process_codice_l2(
760
+ descriptor: str, dependencies: ProcessingInputCollection
761
+ ) -> xr.Dataset:
762
+ """
763
+ Will process CoDICE l1 data to create l2 data products.
764
+
765
+ Parameters
766
+ ----------
767
+ descriptor : str
768
+ The descriptor for the CoDICE L1 file to process.
769
+ dependencies : ProcessingInputCollection
770
+ Collection of processing inputs such as ancillary data files.
771
+
772
+ Returns
773
+ -------
774
+ l2_dataset : xarray.Dataset
775
+ The``xarray`` dataset containing the science data and supporting metadata.
776
+ """
777
+ # This should get science files since ancillary or spice doesn't have data_type
778
+ # as data level.
779
+ file_path = dependencies.get_file_paths(descriptor=descriptor)[0]
780
+
781
+ # Now form product name from descriptor
782
+ descriptor = ScienceFilePath(file_path).descriptor
783
+ dataset_name = f"imap_codice_l2_{descriptor}"
784
+
785
+ # TODO: update list of datasets that need geometric factors (if needed)
786
+ # Compute geometric factors needed for intensity calculations
787
+ if dataset_name in [
788
+ "imap_codice_l2_lo-sw-species",
789
+ "imap_codice_l2_lo-nsw-species",
790
+ "imap_codice_l2_lo-nsw-angular",
791
+ "imap_codice_l2_lo-sw-angular",
792
+ ]:
793
+ l2_dataset = load_cdf(file_path).copy()
794
+
795
+ geometric_factor_lookup = get_geometric_factor_lut(dependencies)
796
+ efficiency_lookup = get_efficiency_lut(dependencies)
797
+ geometric_factors = compute_geometric_factors(
798
+ l2_dataset, geometric_factor_lookup
799
+ )
800
+ if dataset_name == "imap_codice_l2_lo-sw-species":
801
+ # Filter the efficiency lookup table for solar wind efficiencies
802
+ efficiencies = efficiency_lookup[efficiency_lookup["product"] == "sw"]
803
+ # Calculate the pickup ion sunward solar wind intensities using equation
804
+ # described in section 11.2.3 of algorithm document.
805
+ process_lo_species_intensity(
806
+ l2_dataset,
807
+ LO_SW_PICKUP_ION_SPECIES_VARIABLE_NAMES,
808
+ geometric_factors,
809
+ efficiencies,
810
+ PUI_POSITIONS,
811
+ )
812
+ # Calculate the sunward solar wind species intensities using equation
813
+ # described in section 11.2.3 of algorithm document.
814
+ process_lo_species_intensity(
815
+ l2_dataset,
816
+ LO_SW_SPECIES_VARIABLE_NAMES,
817
+ geometric_factors,
818
+ efficiencies,
819
+ SOLAR_WIND_POSITIONS,
820
+ )
821
+ elif dataset_name == "imap_codice_l2_lo-nsw-species":
822
+ # Filter the efficiency lookup table for non-solar wind efficiencies
823
+ efficiencies = efficiency_lookup[efficiency_lookup["product"] == "nsw"]
824
+ # Calculate the non-sunward species intensities using equation
825
+ # described in section 11.2.3 of algorithm document.
826
+ process_lo_species_intensity(
827
+ l2_dataset,
828
+ LO_NSW_SPECIES_VARIABLE_NAMES,
829
+ geometric_factors,
830
+ efficiencies,
831
+ NSW_POSITIONS,
832
+ )
833
+ elif dataset_name == "imap_codice_l2_lo-sw-angular":
834
+ efficiencies = efficiency_lookup[efficiency_lookup["product"] == "sw"]
835
+ # Calculate the sunward solar wind angular intensities using equation
836
+ # described in section 11.2.2 of algorithm document.
837
+ l2_dataset = process_lo_angular_intensity(
838
+ l2_dataset,
839
+ LO_SW_ANGULAR_VARIABLE_NAMES,
840
+ geometric_factors,
841
+ efficiencies,
842
+ SW_POSITIONS,
843
+ )
844
+ if dataset_name == "imap_codice_l2_lo-nsw-angular":
845
+ # Calculate the non sunward angular intensities
846
+ efficiencies = efficiency_lookup[efficiency_lookup["product"] == "nsw"]
847
+ l2_dataset = process_lo_angular_intensity(
848
+ l2_dataset,
849
+ LO_NSW_ANGULAR_VARIABLE_NAMES,
850
+ geometric_factors,
851
+ efficiencies,
852
+ NSW_POSITIONS,
853
+ )
854
+
855
+ if dataset_name in [
856
+ "imap_codice_l2_hi-counters-singles",
857
+ "imap_codice_l2_hi-counters-aggregated",
858
+ "imap_codice_l2_lo-counters-singles",
859
+ "imap_codice_l2_lo-counters-aggregated",
860
+ "imap_codice_l2_lo-sw-priority",
861
+ "imap_codice_l2_lo-nsw-priority",
862
+ ]:
863
+ # No changes needed. Just save to an L2 CDF file.
864
+ # TODO: May not even need L2 files for these products
865
+ pass
866
+
867
+ elif dataset_name == "imap_codice_l2_hi-direct-events":
868
+ # Convert the following data variables to physical units using
869
+ # calibration data:
870
+ # - ssd_energy
871
+ # - tof
872
+ # - elevation_angle
873
+ # - spin_angle
874
+ # These converted variables are *in addition* to the existing L1 variables
875
+ # The other data variables require no changes
876
+ # See section 11.1.2 of algorithm document
877
+ pass
878
+
879
+ elif dataset_name == "imap_codice_l2_hi-sectored":
880
+ # Convert the sectored count rates using equation described in section
881
+ # 11.1.3 of algorithm document.
882
+ l2_dataset = process_hi_sectored(dependencies)
883
+
884
+ elif dataset_name == "imap_codice_l2_hi-omni":
885
+ # Calculate the omni-directional intensity for each species using
886
+ # equation described in section 11.1.4 of algorithm document
887
+ # hopefully this can also apply to hi-ialirt
888
+ l2_dataset = process_hi_omni(dependencies)
889
+
890
+ elif dataset_name == "imap_codice_l2_lo-direct-events":
891
+ # Convert the following data variables to physical units using
892
+ # calibration data:
893
+ # - apd_energy
894
+ # - elevation_angle
895
+ # - tof
896
+ # - spin_sector
897
+ # - esa_step
898
+ # These converted variables are *in addition* to the existing L1 variables
899
+ # The other data variables require no changes
900
+ # See section 11.1.2 of algorithm document
901
+ pass
902
+
903
+ # logger.info(f"\nFinal data product:\n{l2_dataset}\n")
904
+
905
+ return l2_dataset