imap-processing 1.0.1__py3-none-any.whl → 1.0.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (58) hide show
  1. imap_processing/_version.py +2 -2
  2. imap_processing/cdf/config/imap_codice_global_cdf_attrs.yaml +18 -0
  3. imap_processing/cdf/config/imap_codice_l1a_variable_attrs.yaml +101 -258
  4. imap_processing/cdf/config/imap_enamaps_l2-common_variable_attrs.yaml +1 -1
  5. imap_processing/cdf/config/imap_hi_variable_attrs.yaml +12 -2
  6. imap_processing/cdf/config/imap_idex_global_cdf_attrs.yaml +1 -8
  7. imap_processing/cdf/config/imap_idex_l1b_variable_attrs.yaml +16 -5
  8. imap_processing/cdf/config/imap_idex_l2a_variable_attrs.yaml +27 -25
  9. imap_processing/cdf/config/imap_idex_l2b_variable_attrs.yaml +16 -16
  10. imap_processing/cdf/config/imap_idex_l2c_variable_attrs.yaml +2 -2
  11. imap_processing/cdf/config/imap_swapi_variable_attrs.yaml +2 -13
  12. imap_processing/cdf/config/imap_ultra_l1c_variable_attrs.yaml +12 -0
  13. imap_processing/cdf/utils.py +2 -2
  14. imap_processing/cli.py +4 -16
  15. imap_processing/codice/codice_l1a_lo_angular.py +362 -0
  16. imap_processing/codice/codice_l1a_lo_species.py +282 -0
  17. imap_processing/codice/codice_l1b.py +80 -97
  18. imap_processing/codice/codice_l2.py +270 -103
  19. imap_processing/codice/codice_new_l1a.py +64 -0
  20. imap_processing/codice/constants.py +37 -2
  21. imap_processing/codice/utils.py +270 -0
  22. imap_processing/ena_maps/ena_maps.py +51 -39
  23. imap_processing/ena_maps/utils/corrections.py +196 -14
  24. imap_processing/ena_maps/utils/naming.py +3 -1
  25. imap_processing/hi/hi_l1c.py +57 -19
  26. imap_processing/hi/hi_l2.py +89 -36
  27. imap_processing/ialirt/calculate_ingest.py +19 -1
  28. imap_processing/ialirt/constants.py +12 -6
  29. imap_processing/ialirt/generate_coverage.py +6 -1
  30. imap_processing/ialirt/l0/parse_mag.py +1 -0
  31. imap_processing/ialirt/l0/process_hit.py +1 -0
  32. imap_processing/ialirt/l0/process_swapi.py +1 -0
  33. imap_processing/ialirt/l0/process_swe.py +2 -0
  34. imap_processing/ialirt/process_ephemeris.py +6 -2
  35. imap_processing/ialirt/utils/create_xarray.py +3 -2
  36. imap_processing/lo/l1b/lo_l1b.py +12 -2
  37. imap_processing/lo/l1c/lo_l1c.py +4 -4
  38. imap_processing/lo/l2/lo_l2.py +101 -8
  39. imap_processing/quality_flags.py +1 -0
  40. imap_processing/swapi/constants.py +4 -0
  41. imap_processing/swapi/l1/swapi_l1.py +47 -20
  42. imap_processing/swapi/l2/swapi_l2.py +17 -3
  43. imap_processing/ultra/l1a/ultra_l1a.py +121 -72
  44. imap_processing/ultra/l1b/de.py +57 -1
  45. imap_processing/ultra/l1b/ultra_l1b_annotated.py +0 -1
  46. imap_processing/ultra/l1b/ultra_l1b_extended.py +24 -11
  47. imap_processing/ultra/l1c/helio_pset.py +34 -8
  48. imap_processing/ultra/l1c/l1c_lookup_utils.py +4 -2
  49. imap_processing/ultra/l1c/spacecraft_pset.py +13 -7
  50. imap_processing/ultra/l1c/ultra_l1c.py +6 -6
  51. imap_processing/ultra/l1c/ultra_l1c_pset_bins.py +79 -20
  52. imap_processing/ultra/l2/ultra_l2.py +2 -2
  53. imap_processing/ultra/utils/ultra_l1_utils.py +6 -0
  54. {imap_processing-1.0.1.dist-info → imap_processing-1.0.3.dist-info}/METADATA +1 -1
  55. {imap_processing-1.0.1.dist-info → imap_processing-1.0.3.dist-info}/RECORD +58 -54
  56. {imap_processing-1.0.1.dist-info → imap_processing-1.0.3.dist-info}/LICENSE +0 -0
  57. {imap_processing-1.0.1.dist-info → imap_processing-1.0.3.dist-info}/WHEEL +0 -0
  58. {imap_processing-1.0.1.dist-info → imap_processing-1.0.3.dist-info}/entry_points.txt +0 -0
@@ -11,14 +11,26 @@ from imap_processing.ena_maps.ena_maps import (
11
11
  HiPointingSet,
12
12
  RectangularSkyMap,
13
13
  )
14
- from imap_processing.ena_maps.utils.corrections import PowerLawFluxCorrector
14
+ from imap_processing.ena_maps.utils.corrections import (
15
+ PowerLawFluxCorrector,
16
+ apply_compton_getting_correction,
17
+ interpolate_map_flux_to_helio_frame,
18
+ )
15
19
  from imap_processing.ena_maps.utils.naming import MapDescriptor
16
20
  from imap_processing.hi.utils import CalibrationProductConfig
17
21
 
18
22
  logger = logging.getLogger(__name__)
19
23
 
24
+ SC_FRAME_VARS_TO_PROJECT = {
25
+ "counts",
26
+ "exposure_factor",
27
+ "bg_rates",
28
+ "bg_rates_unc",
29
+ "obs_date",
30
+ }
31
+ HELIO_FRAME_VARS_TO_PROJECT = SC_FRAME_VARS_TO_PROJECT | {"energy_sc"}
20
32
  # TODO: is an exposure time weighted average for obs_date appropriate?
21
- VARS_TO_EXPOSURE_TIME_AVERAGE = ["bg_rates", "bg_rates_unc", "obs_date"]
33
+ FULL_EXPOSURE_TIME_AVERAGE_SET = {"bg_rates", "bg_rates_unc", "obs_date", "energy_sc"}
22
34
 
23
35
 
24
36
  def hi_l2(
@@ -98,33 +110,74 @@ def generate_hi_map(
98
110
  The sky map with all the PSET data projected into the map.
99
111
  """
100
112
  output_map = descriptor.to_empty_map()
113
+ vars_to_bin = (
114
+ HELIO_FRAME_VARS_TO_PROJECT
115
+ if descriptor.frame_descriptor == "hf"
116
+ else SC_FRAME_VARS_TO_PROJECT
117
+ )
118
+ vars_to_exposure_time_average = FULL_EXPOSURE_TIME_AVERAGE_SET & vars_to_bin
101
119
 
102
120
  if not isinstance(output_map, RectangularSkyMap):
103
121
  raise NotImplementedError("Healpix map output not supported for Hi")
104
122
 
105
- # TODO: Implement Compton-Getting correction
106
- if descriptor.frame_descriptor != "sf":
107
- raise NotImplementedError("CG correction not implemented for Hi")
123
+ cached_esa_steps = None
108
124
 
109
125
  for pset_path in psets:
110
126
  logger.info(f"Processing {pset_path}")
111
- pset = HiPointingSet(pset_path, spin_phase=descriptor.spin_phase)
112
-
113
- # Background rate and uncertainty are exposure time weighted means in
114
- # the map.
115
- for var in VARS_TO_EXPOSURE_TIME_AVERAGE:
116
- pset.data[var] *= pset.data["exposure_factor"]
127
+ pset = HiPointingSet(pset_path)
128
+
129
+ # Store the first PSET esa_energy_step values and make sure every PSET
130
+ # contains the same set of esa_energy_step values.
131
+ # TODO: Correctly handle PSETs with different esa_energy_step values.
132
+ if cached_esa_steps is None:
133
+ cached_esa_steps = pset.data["esa_energy_step"].values.copy()
134
+ esa_ds = esa_energy_df(
135
+ l2_ancillary_path_dict["esa-energies"],
136
+ pset.data["esa_energy_step"].values,
137
+ ).to_xarray()
138
+ energy_kev = esa_ds["nominal_central_energy"]
139
+ if not np.array_equal(cached_esa_steps, pset.data["esa_energy_step"].values):
140
+ raise ValueError(
141
+ "All PSETs must have the same set of esa_energy_step values."
142
+ )
143
+
144
+ if descriptor.frame_descriptor == "hf":
145
+ # convert esa nominal central energy from keV to eV
146
+ esa_energy_ev = energy_kev * 1000
147
+ pset = apply_compton_getting_correction(pset, esa_energy_ev)
148
+
149
+ # Multiply variables that need to be exposure time weighted average by
150
+ # exposure factor.
151
+ for var in vars_to_exposure_time_average:
152
+ if var in pset.data:
153
+ pset.data[var] *= pset.data["exposure_factor"]
154
+
155
+ # Set the mask used to filter ram/anti-ram pixels
156
+ pset_valid_mask = None # Default to no mask (full spin)
157
+ if descriptor.spin_phase == "ram":
158
+ pset_valid_mask = pset.data["ram_mask"]
159
+ logger.debug(
160
+ f"Using ram mask with shape: {pset_valid_mask.shape} "
161
+ f"containing {np.prod(pset_valid_mask.shape)} pixels,"
162
+ f"{np.sum(pset_valid_mask.values)} of which are True."
163
+ )
164
+ elif descriptor.spin_phase == "anti":
165
+ pset_valid_mask = ~pset.data["ram_mask"]
166
+ logger.debug(
167
+ f"Using anti-ram mask with shape: {pset_valid_mask.shape} "
168
+ f"containing {np.prod(pset_valid_mask.shape)} pixels,"
169
+ f"{np.sum(pset_valid_mask.values)} of which are True."
170
+ )
117
171
 
118
172
  # Project (bin) the PSET variables into the map pixels
119
173
  output_map.project_pset_values_to_map(
120
- pset,
121
- ["counts", "exposure_factor", "bg_rates", "bg_rates_unc", "obs_date"],
174
+ pset, list(vars_to_bin), pset_valid_mask=pset_valid_mask
122
175
  )
123
176
 
124
177
  # Finish the exposure time weighted mean calculation of backgrounds
125
178
  # Allow divide by zero to fill set pixels with zero exposure time to NaN
126
179
  with np.errstate(divide="ignore"):
127
- for var in VARS_TO_EXPOSURE_TIME_AVERAGE:
180
+ for var in vars_to_exposure_time_average:
128
181
  output_map.data_1d[var] /= output_map.data_1d["exposure_factor"]
129
182
 
130
183
  output_map.data_1d.update(calculate_ena_signal_rates(output_map.data_1d))
@@ -138,30 +191,27 @@ def generate_hi_map(
138
191
  # TODO: Figure out how to compute obs_date_range (stddev of obs_date)
139
192
  output_map.data_1d["obs_date_range"] = xr.zeros_like(output_map.data_1d["obs_date"])
140
193
 
194
+ # Set the energy_step_delta values to the energy bandpass half-width-half-max
195
+ energy_delta = esa_ds["bandpass_fwhm"] / 2
196
+ output_map.data_1d["energy_delta_minus"] = energy_delta
197
+ output_map.data_1d["energy_delta_plus"] = energy_delta
198
+
141
199
  # Rename and convert coordinate from esa_energy_step energy
142
- esa_df = esa_energy_df(
143
- l2_ancillary_path_dict["esa-energies"],
144
- output_map.data_1d["esa_energy_step"].data,
145
- )
146
200
  output_map.data_1d = output_map.data_1d.rename({"esa_energy_step": "energy"})
147
- output_map.data_1d = output_map.data_1d.assign_coords(
148
- energy=esa_df["nominal_central_energy"].values
149
- )
150
- # Set the energy_step_delta values to the energy bandpass half-width-half-max
151
- energy_delta = esa_df["bandpass_fwhm"].values / 2
152
- output_map.data_1d["energy_delta_minus"] = xr.DataArray(
153
- energy_delta,
154
- name="energy_delta_minus",
155
- dims=["energy"],
156
- )
157
- output_map.data_1d["energy_delta_plus"] = xr.DataArray(
158
- energy_delta,
159
- name="energy_delta_plus",
160
- dims=["energy"],
161
- )
201
+ output_map.data_1d = output_map.data_1d.assign_coords(energy=energy_kev.values)
162
202
 
163
203
  output_map.data_1d = output_map.data_1d.drop("esa_energy_step_label")
164
204
 
205
+ # Apply Compton-Getting interpolation for heliocentric frame maps
206
+ if descriptor.frame_descriptor == "hf":
207
+ esa_energy_ev = esa_energy_ev.rename({"esa_energy_step": "energy"})
208
+ esa_energy_ev = esa_energy_ev.assign_coords(energy=energy_kev.values)
209
+ output_map.data_1d = interpolate_map_flux_to_helio_frame(
210
+ output_map.data_1d,
211
+ output_map.data_1d["energy"] * 1000, # Convert ESA energies to eV
212
+ esa_energy_ev, # heliocentric energies (same as ESA energies)
213
+ )
214
+
165
215
  return output_map
166
216
 
167
217
 
@@ -420,7 +470,7 @@ def _calculate_improved_stat_variance(
420
470
 
421
471
 
422
472
  def esa_energy_df(
423
- esa_energies_path: str | Path, esa_energy_steps: np.ndarray
473
+ esa_energies_path: str | Path, esa_energy_steps: np.ndarray | slice | None = None
424
474
  ) -> pd.DataFrame:
425
475
  """
426
476
  Lookup the nominal central energy values for given esa energy steps.
@@ -429,8 +479,9 @@ def esa_energy_df(
429
479
  ----------
430
480
  esa_energies_path : str or pathlib.Path
431
481
  Location of the calibration csv file containing the lookup data.
432
- esa_energy_steps : numpy.ndarray
433
- The ESA energy steps to get energies for.
482
+ esa_energy_steps : numpy.ndarray, slice, or None
483
+ The ESA energy steps to get energies for. If not provided (default is None),
484
+ the full dataframe is returned.
434
485
 
435
486
  Returns
436
487
  -------
@@ -438,6 +489,8 @@ def esa_energy_df(
438
489
  Full data frame from the csv file filtered to only include the
439
490
  esa_energy_steps input.
440
491
  """
492
+ if esa_energy_steps is None:
493
+ esa_energy_steps = slice(None)
441
494
  esa_energies_lut = pd.read_csv(
442
495
  esa_energies_path, comment="#", index_col="esa_energy_step"
443
496
  )
@@ -9,7 +9,7 @@ from imap_processing.ialirt.constants import STATIONS
9
9
  logger = logging.getLogger(__name__)
10
10
 
11
11
 
12
- def find_tcp_connections(
12
+ def find_tcp_connections( # noqa: PLR0912
13
13
  start_file_creation: datetime,
14
14
  end_file_creation: datetime,
15
15
  lines: list,
@@ -35,8 +35,16 @@ def find_tcp_connections(
35
35
  Output dictionary with tcp connection info.
36
36
  """
37
37
  current_starts: dict[str, datetime | None] = {}
38
+ partners_opened = set()
38
39
 
39
40
  for line in lines:
41
+ # Note if this line appears.
42
+ if "Opened raw record file" in line:
43
+ station = line.split("Opened raw record file for ")[1].split(
44
+ " antenna_partner"
45
+ )[0]
46
+ partners_opened.add(station)
47
+
40
48
  if "antenna partner connection is" not in line:
41
49
  continue
42
50
 
@@ -84,6 +92,16 @@ def find_tcp_connections(
84
92
  }
85
93
  )
86
94
 
95
+ # Handle stations with only "Opened raw record file" (no up/down)
96
+ for station in partners_opened:
97
+ if not realtime_summary["connection_times"][station]:
98
+ realtime_summary["connection_times"][station].append(
99
+ {
100
+ "start": datetime.isoformat(start_file_creation),
101
+ "end": datetime.isoformat(end_file_creation),
102
+ }
103
+ )
104
+
87
105
  # Filter out connection windows that are completely outside the time window
88
106
  for station in realtime_summary["connection_times"]:
89
107
  realtime_summary["connection_times"][station] = [
@@ -53,12 +53,6 @@ class StationProperties(NamedTuple):
53
53
  # Verified by Kiel and KSWC Observatory staff.
54
54
  # Notes: the KSWC station is not yet operational,
55
55
  # but will have the following properties:
56
- # "KSWC": StationProperties(
57
- # longitude=126.2958, # degrees East
58
- # latitude=33.4273, # degrees North
59
- # altitude=0.1, # approx 100 meters
60
- # min_elevation_deg=5, # 5 degrees is the requirement
61
- # ),
62
56
  STATIONS = {
63
57
  "Kiel": StationProperties(
64
58
  longitude=10.1808, # degrees East
@@ -66,10 +60,22 @@ STATIONS = {
66
60
  altitude=0.1, # approx 100 meters
67
61
  min_elevation_deg=5, # 5 degrees is the requirement
68
62
  ),
63
+ "Korea": StationProperties(
64
+ longitude=126.2958, # degrees East
65
+ latitude=33.4273, # degrees North
66
+ altitude=0.1, # approx 100 meters
67
+ min_elevation_deg=5, # 5 degrees is the requirement
68
+ ),
69
69
  "Manaus": StationProperties(
70
70
  longitude=-59.969334, # degrees East (negative = West)
71
71
  latitude=-2.891257, # degrees North (negative = South)
72
72
  altitude=0.1, # approx 100 meters
73
73
  min_elevation_deg=5, # 5 degrees is the requirement
74
74
  ),
75
+ "SANSA": StationProperties(
76
+ longitude=27.714, # degrees East (negative = West)
77
+ latitude=-25.888, # degrees North (negative = South)
78
+ altitude=1.542, # approx 1542 meters
79
+ min_elevation_deg=2, # 5 degrees is the requirement
80
+ ),
75
81
  }
@@ -57,6 +57,9 @@ def generate_coverage(
57
57
 
58
58
  stations = {
59
59
  "Kiel": STATIONS["Kiel"],
60
+ "Korea": STATIONS["Korea"],
61
+ "Manaus": STATIONS["Manaus"],
62
+ "SANSA": STATIONS["SANSA"],
60
63
  }
61
64
  coverage_dict = {}
62
65
  outage_dict = {}
@@ -77,7 +80,9 @@ def generate_coverage(
77
80
  dsn_outage_mask |= (time_range >= start_et) & (time_range <= end_et)
78
81
 
79
82
  for station_name, (lon, lat, alt, min_elevation) in stations.items():
80
- _azimuth, elevation = calculate_azimuth_and_elevation(lon, lat, alt, time_range)
83
+ _azimuth, elevation = calculate_azimuth_and_elevation(
84
+ lon, lat, alt, time_range, obsref="IAU_EARTH"
85
+ )
81
86
  visible = elevation > min_elevation
82
87
 
83
88
  outage_mask = np.zeros(time_range.shape, dtype=bool)
@@ -710,6 +710,7 @@ def process_packet(
710
710
  "met": int(met_all[i]),
711
711
  "met_in_utc": met_to_utc(met_all[i]).split(".")[0],
712
712
  "ttj2000ns": int(met_to_ttj2000ns(met_all[i])),
713
+ "instrument": "mag",
713
714
  "mag_epoch": int(mago_times_all[i]),
714
715
  "mag_B_GSE": [Decimal(str(v)) for v in gse_vector[i]],
715
716
  "mag_B_GSM": [Decimal(str(v)) for v in gsm_vector[i]],
@@ -171,6 +171,7 @@ def process_hit(xarray_data: xr.Dataset) -> list[dict]:
171
171
  "met": int(met),
172
172
  "met_in_utc": met_to_utc(met).split(".")[0],
173
173
  "ttj2000ns": int(met_to_ttj2000ns(met)),
174
+ "instrument": "hit",
174
175
  "hit_e_a_side_low_en": int(l1["IALRT_RATE_1"] + l1["IALRT_RATE_2"]),
175
176
  "hit_e_a_side_med_en": int(l1["IALRT_RATE_5"] + l1["IALRT_RATE_6"]),
176
177
  "hit_e_a_side_high_en": int(l1["IALRT_RATE_7"]),
@@ -226,6 +226,7 @@ def process_swapi_ialirt(
226
226
  "met": int(met_values[entry]),
227
227
  "met_in_utc": met_to_utc(met_values[entry]).split(".")[0],
228
228
  "ttj2000ns": int(met_to_ttj2000ns(met_values[entry])),
229
+ "instrument": "swapi",
229
230
  "swapi_pseudo_proton_speed": Decimal(solution["pseudo_speed"][entry]),
230
231
  "swapi_pseudo_proton_density": Decimal(
231
232
  solution["pseudo_density"][entry]
@@ -553,6 +553,7 @@ def process_swe(accumulated_data: xr.Dataset, in_flight_cal_files: list) -> list
553
553
  "met": met_first_half,
554
554
  "met_in_utc": met_to_utc(met_first_half).split(".")[0],
555
555
  "ttj2000ns": int(met_to_ttj2000ns(met_first_half)),
556
+ "instrument": "swe",
556
557
  "swe_normalized_counts": [int(val) for val in summed_first],
557
558
  "swe_counterstreaming_electrons": bde_first_half,
558
559
  },
@@ -563,6 +564,7 @@ def process_swe(accumulated_data: xr.Dataset, in_flight_cal_files: list) -> list
563
564
  "met": met_second_half,
564
565
  "met_in_utc": met_to_utc(met_second_half).split(".")[0],
565
566
  "ttj2000ns": int(met_to_ttj2000ns(met_second_half)),
567
+ "instrument": "swe",
566
568
  "swe_normalized_counts": [int(val) for val in summed_second],
567
569
  "swe_counterstreaming_electrons": bde_second_half,
568
570
  },
@@ -72,6 +72,7 @@ def calculate_azimuth_and_elevation(
72
72
  altitude: float,
73
73
  observation_time: float | np.ndarray,
74
74
  target: str = SpiceBody.IMAP.name,
75
+ obsref: str = "ITRF93",
75
76
  ) -> tuple:
76
77
  """
77
78
  Calculate azimuth and elevation.
@@ -91,6 +92,9 @@ def calculate_azimuth_and_elevation(
91
92
  is to be computed. Expressed as ephemeris time, seconds past J2000 TDB.
92
93
  target : str (Optional)
93
94
  The target body. Default is "IMAP".
95
+ obsref : str (Optional)
96
+ Body-fixed, body-centered reference frame wrt
97
+ observer's center.
94
98
 
95
99
  Returns
96
100
  -------
@@ -120,7 +124,7 @@ def calculate_azimuth_and_elevation(
120
124
  elplsz=True, # Elevation increases from the XY plane toward +Z
121
125
  obspos=ground_station_position_ecef, # observer pos. to center of motion
122
126
  obsctr="EARTH", # Name of the center of motion
123
- obsref="IAU_EARTH", # Body-fixed, body-centered reference frame wrt
127
+ obsref=obsref, # Body-fixed, body-centered reference frame wrt
124
128
  # observer's center
125
129
  )
126
130
  azimuth.append(np.rad2deg(azel_results[0][1]))
@@ -223,7 +227,7 @@ def build_output(
223
227
 
224
228
  # For now, assume that kernel management will be handled by ensure_spice
225
229
  azimuth, elevation = calculate_azimuth_and_elevation(
226
- longitude, latitude, altitude, time_range
230
+ longitude, latitude, altitude, time_range, obsref="ITRF93"
227
231
  )
228
232
 
229
233
  output_dict["time"] = et_to_utc(time_range, format_str="ISOC")
@@ -52,7 +52,7 @@ def create_xarray_from_records(records: list[dict]) -> xr.Dataset: # noqa: PLR0
52
52
  ["radial", "tangential", "normal"],
53
53
  name="RTN_component",
54
54
  dims=["RTN_component"],
55
- attrs=cdf_manager.get_variable_attributes("RTN_componentt", check_schema=False),
55
+ attrs=cdf_manager.get_variable_attributes("RTN_component", check_schema=False),
56
56
  )
57
57
 
58
58
  esa_step = xr.DataArray(
@@ -85,7 +85,7 @@ def create_xarray_from_records(records: list[dict]) -> xr.Dataset: # noqa: PLR0
85
85
  name="codice_hi_h_spin_angle",
86
86
  dims=["codice_hi_h_spin_angle"],
87
87
  attrs=cdf_manager.get_variable_attributes(
88
- "codice_hi_h_spin_anglen", check_schema=False
88
+ "codice_hi_h_spin_angle", check_schema=False
89
89
  ),
90
90
  )
91
91
 
@@ -156,6 +156,7 @@ def create_xarray_from_records(records: list[dict]) -> xr.Dataset: # noqa: PLR0
156
156
  "sc_velocity_GSE",
157
157
  "mag_hk_status",
158
158
  "spice_kernels",
159
+ "instrument",
159
160
  ]:
160
161
  continue
161
162
  elif key in ["mag_B_GSE", "mag_B_GSM", "mag_B_RTN"]:
@@ -19,6 +19,7 @@ from imap_processing.lo.l1b.tof_conversions import (
19
19
  from imap_processing.spice.geometry import (
20
20
  SpiceFrame,
21
21
  cartesian_to_latitudinal,
22
+ frame_transform,
22
23
  instrument_pointing,
23
24
  )
24
25
  from imap_processing.spice.repoint import get_pointing_times
@@ -760,8 +761,10 @@ def set_bad_or_goodtimes(
760
761
  # the bin_start and bin_end are 6 degree bins and need to be converted to
761
762
  # 0.1 degree bins to align with the spin_bins, so multiply by 60
762
763
  time_mask = (epochs[:, None] >= times_start) & (epochs[:, None] <= times_end)
764
+ # The ancillary file binning uses 0-59 for the 6 degree bins, so add 1 to bin_end
765
+ # so the upper bound is inclusive of the full bin range.
763
766
  bin_mask = (spin_bins[:, None] >= times_df["bin_start"].values * 60) & (
764
- spin_bins[:, None] <= times_df["bin_end"].values * 60
767
+ spin_bins[:, None] < (times_df["bin_end"].values + 1) * 60
765
768
  )
766
769
 
767
770
  # Combined mask for epochs that fall within the time and bin ranges
@@ -853,8 +856,15 @@ def set_pointing_bin(l1b_de: xr.Dataset) -> xr.Dataset:
853
856
  x = l1b_de["hae_x"]
854
857
  y = l1b_de["hae_y"]
855
858
  z = l1b_de["hae_z"]
859
+ # Convert from HAE to DPS coordinates
860
+ dps_xyz = frame_transform(
861
+ ttj2000ns_to_et(l1b_de["epoch"]),
862
+ np.column_stack((x, y, z)),
863
+ SpiceFrame.IMAP_HAE,
864
+ SpiceFrame.IMAP_DPS,
865
+ )
856
866
  # convert the pointing direction to latitudinal coordinates
857
- direction = cartesian_to_latitudinal(np.column_stack((x, y, z)))
867
+ direction = cartesian_to_latitudinal(dps_xyz)
858
868
  # first column: radius (Not needed)
859
869
  # second column: longitude
860
870
  lons = direction[:, 1]
@@ -295,14 +295,14 @@ def create_pset_counts(
295
295
  data = np.column_stack(
296
296
  (
297
297
  de_filtered["esa_step"],
298
- de_filtered["pointing_bin_lon"],
299
- de_filtered["pointing_bin_lat"],
298
+ de_filtered["spin_bin"],
299
+ de_filtered["off_angle_bin"],
300
300
  )
301
301
  )
302
302
  # Create the histogram with 3600 longitude bins, 40 latitude bins, and 7 energy bins
303
303
  lon_edges = np.arange(3601)
304
304
  lat_edges = np.arange(41)
305
- energy_edges = np.arange(8)
305
+ energy_edges = np.arange(1, 9)
306
306
 
307
307
  hist, _edges = np.histogramdd(
308
308
  data,
@@ -341,7 +341,7 @@ def calculate_exposure_times(counts: xr.DataArray, l1b_de: xr.Dataset) -> xr.Dat
341
341
  The exposure times for the L1B Direct Event dataset.
342
342
  """
343
343
  data = np.column_stack(
344
- (l1b_de["esa_step"], l1b_de["pointing_bin_lon"], l1b_de["pointing_bin_lat"])
344
+ (l1b_de["esa_step"], l1b_de["spin_bin"], l1b_de["off_angle_bin"])
345
345
  )
346
346
 
347
347
  result = binned_statistic_dd(
@@ -10,6 +10,7 @@ import xarray as xr
10
10
  from imap_processing.cdf.imap_cdf_manager import ImapCdfAttributes
11
11
  from imap_processing.ena_maps import ena_maps
12
12
  from imap_processing.ena_maps.ena_maps import AbstractSkyMap, RectangularSkyMap
13
+ from imap_processing.ena_maps.utils.corrections import PowerLawFluxCorrector
13
14
  from imap_processing.ena_maps.utils.naming import MapDescriptor
14
15
  from imap_processing.lo import lo_ancillary
15
16
  from imap_processing.spice.time import et_to_datetime64, ttj2000ns_to_et
@@ -77,7 +78,13 @@ def lo_l2(
77
78
  logger.info("Step 4: Calculating rates and intensities")
78
79
 
79
80
  # Determine if corrections are needed and prepare oxygen data if required
80
- sputtering_correction, bootstrap_correction, o_map_dataset = _prepare_corrections(
81
+ (
82
+ sputtering_correction,
83
+ bootstrap_correction,
84
+ flux_correction,
85
+ o_map_dataset,
86
+ flux_factors,
87
+ ) = _prepare_corrections(
81
88
  map_descriptor, descriptor, sci_dependencies, anc_dependencies
82
89
  )
83
90
 
@@ -85,7 +92,9 @@ def lo_l2(
85
92
  dataset,
86
93
  sputtering_correction=sputtering_correction,
87
94
  bootstrap_correction=bootstrap_correction,
95
+ flux_correction=flux_correction,
88
96
  o_map_dataset=o_map_dataset,
97
+ flux_factors=flux_factors,
89
98
  )
90
99
 
91
100
  logger.info("Step 5: Finalizing dataset with attributes")
@@ -100,7 +109,7 @@ def _prepare_corrections(
100
109
  descriptor: str,
101
110
  sci_dependencies: dict,
102
111
  anc_dependencies: list,
103
- ) -> tuple[bool, bool, xr.Dataset | None]:
112
+ ) -> tuple[bool, bool, bool, xr.Dataset | None, Path | None]:
104
113
  """
105
114
  Determine what corrections are needed and prepare oxygen dataset if required.
106
115
 
@@ -130,7 +139,9 @@ def _prepare_corrections(
130
139
  # Default values - no corrections needed
131
140
  sputtering_correction = False
132
141
  bootstrap_correction = False
142
+ flux_correction = False
133
143
  o_map_dataset = None
144
+ flux_factors: None | Path = None
134
145
 
135
146
  # Sputtering and bootstrap corrections are only applied to hydrogen ENA data
136
147
  # Guard against recursion: don't process oxygen for oxygen maps
@@ -145,7 +156,24 @@ def _prepare_corrections(
145
156
  sputtering_correction = True
146
157
  bootstrap_correction = True
147
158
 
148
- return sputtering_correction, bootstrap_correction, o_map_dataset
159
+ if "raw" not in map_descriptor.principal_data:
160
+ flux_correction = True
161
+ try:
162
+ flux_factors = next(
163
+ x for x in anc_dependencies if "esa-eta-fit-factors" in str(x)
164
+ )
165
+ except StopIteration:
166
+ raise ValueError(
167
+ "No flux correction factor file found in ancillary dependencies"
168
+ ) from None
169
+
170
+ return (
171
+ sputtering_correction,
172
+ bootstrap_correction,
173
+ flux_correction,
174
+ o_map_dataset,
175
+ flux_factors,
176
+ )
149
177
 
150
178
 
151
179
  # =============================================================================
@@ -664,7 +692,9 @@ def calculate_all_rates_and_intensities(
664
692
  dataset: xr.Dataset,
665
693
  sputtering_correction: bool = False,
666
694
  bootstrap_correction: bool = False,
695
+ flux_correction: bool = False,
667
696
  o_map_dataset: xr.Dataset | None = None,
697
+ flux_factors: Path | None = None,
668
698
  ) -> xr.Dataset:
669
699
  """
670
700
  Calculate rates and intensities with proper error propagation.
@@ -679,8 +709,13 @@ def calculate_all_rates_and_intensities(
679
709
  bootstrap_correction : bool, optional
680
710
  Whether to apply bootstrap corrections to intensities.
681
711
  Default is False.
712
+ flux_correction : bool, optional
713
+ Whether to apply flux corrections to intensities.
714
+ Default is False.
682
715
  o_map_dataset : xr.Dataset, optional
683
716
  Dataset specifically for oxygen, needed for sputtering corrections.
717
+ flux_factors : Path, optional
718
+ Path to flux factor file for flux corrections.
684
719
 
685
720
  Returns
686
721
  -------
@@ -705,7 +740,13 @@ def calculate_all_rates_and_intensities(
705
740
  if bootstrap_correction:
706
741
  dataset = calculate_bootstrap_corrections(dataset)
707
742
 
708
- # Step 6: Clean up intermediate variables
743
+ # Optional Step 6: Calculate flux corrections
744
+ if flux_correction:
745
+ if flux_factors is None:
746
+ raise ValueError("Flux factors file must be provided for flux corrections")
747
+ dataset = calculate_flux_corrections(dataset, flux_factors)
748
+
749
+ # Step 7: Clean up intermediate variables
709
750
  dataset = cleanup_intermediate_variables(dataset)
710
751
 
711
752
  return dataset
@@ -938,8 +979,10 @@ def calculate_bootstrap_corrections(dataset: xr.Dataset) -> xr.Dataset:
938
979
  bootstrap_factor_array,
939
980
  dims=["energy_i", "energy_k"],
940
981
  coords={
941
- "energy_i": list(range(7)),
942
- "energy_k": list(range(8)), # Include virtual channel 7 (index 7)
982
+ "energy_i": dataset["energy"].values,
983
+ # Add an extra coordinate for the virtual E8 channel, unused
984
+ # in the broadcasting calculations
985
+ "energy_k": np.concatenate([dataset["energy"].values, [np.nan]]),
943
986
  },
944
987
  )
945
988
 
@@ -1001,7 +1044,7 @@ def calculate_bootstrap_corrections(dataset: xr.Dataset) -> xr.Dataset:
1001
1044
  # NOTE: The paper uses 1-based indexing and we use 0-based indexing
1002
1045
  # so there is an off-by-one difference in the indices.
1003
1046
  bootstrap_intensity_i[:] = (
1004
- j_c_prime_i - bootstrap_factor.sel(energy_i=i, energy_k=7) * j_8_b[0, ...]
1047
+ j_c_prime_i - bootstrap_factor.isel(energy_i=i, energy_k=7) * j_8_b[0, ...]
1005
1048
  )
1006
1049
  # NOTE: We will square root at the end to get the uncertainty, but
1007
1050
  # all equations are with variances
@@ -1013,7 +1056,7 @@ def calculate_bootstrap_corrections(dataset: xr.Dataset) -> xr.Dataset:
1013
1056
 
1014
1057
  # Get bootstrap factors for this i and the relevant k values
1015
1058
  # Rename energy_k dimension to energy for alignment with intensity
1016
- bootstrap_factors_k = bootstrap_factor.sel(
1059
+ bootstrap_factors_k = bootstrap_factor.isel(
1017
1060
  energy_i=i, energy_k=k_indices
1018
1061
  ).rename({"energy_k": "energy"})
1019
1062
 
@@ -1082,6 +1125,56 @@ def calculate_bootstrap_corrections(dataset: xr.Dataset) -> xr.Dataset:
1082
1125
  return dataset
1083
1126
 
1084
1127
 
1128
+ def calculate_flux_corrections(dataset: xr.Dataset, flux_factors: Path) -> xr.Dataset:
1129
+ """
1130
+ Calculate flux corrections for intensities.
1131
+
1132
+ Uses the shared ena maps ``PowerLawFluxCorrector`` class to do the
1133
+ correction calculations.
1134
+
1135
+ Parameters
1136
+ ----------
1137
+ dataset : xr.Dataset
1138
+ Dataset with count rates, geometric factors, and center energies.
1139
+ flux_factors : Path
1140
+ Path to the eta flux factor file to use for corrections. Read in as
1141
+ an ancillary file in the preprocessing step.
1142
+
1143
+ Returns
1144
+ -------
1145
+ xr.Dataset
1146
+ Dataset with calculated flux-corrected intensities and their
1147
+ uncertainties for the specified species.
1148
+ """
1149
+ logger.info("Applying flux corrections")
1150
+
1151
+ # Flux correction
1152
+ corrector = PowerLawFluxCorrector(flux_factors)
1153
+ # FluxCorrector works on (energy, :) arrays, so we need to flatten the map
1154
+ # spatial dimensions for the correction and then reshape back after.
1155
+ input_shape = dataset["ena_intensity"].shape[1:] # Exclude epoch dimension
1156
+ intensity = dataset["ena_intensity"].values[0].reshape(len(dataset["energy"]), -1)
1157
+ stat_uncert = (
1158
+ dataset["ena_intensity_stat_uncert"]
1159
+ .values[0]
1160
+ .reshape(len(dataset["energy"]), -1)
1161
+ )
1162
+ corrected_intensity, corrected_stat_unc = corrector.apply_flux_correction(
1163
+ intensity,
1164
+ stat_uncert,
1165
+ dataset["energy"].data,
1166
+ )
1167
+ # Add the size 1 epoch dimension back in to the corrected fluxes.
1168
+ dataset["ena_intensity"].data = corrected_intensity.reshape(input_shape)[
1169
+ np.newaxis, ...
1170
+ ]
1171
+ dataset["ena_intensity_stat_uncert"].data = corrected_stat_unc.reshape(input_shape)[
1172
+ np.newaxis, ...
1173
+ ]
1174
+
1175
+ return dataset
1176
+
1177
+
1085
1178
  def cleanup_intermediate_variables(dataset: xr.Dataset) -> xr.Dataset:
1086
1179
  """
1087
1180
  Remove intermediate variables that were only needed for calculations.
@@ -64,6 +64,7 @@ class ImapAttitudeUltraFlags(FlagNameMixin):
64
64
  AUXMISMATCH = 2**1 # bit 1 # aux packet does not match Universal Spin Table
65
65
  SPINPHASE = 2**2 # bit 2 # spin phase flagged by Universal Spin Table
66
66
  SPINPERIOD = 2**3 # bit 3 # spin period flagged by Universal Spin Table
67
+ DURINGREPOINT = 2**4 # bit 4 # spin during a repointing
67
68
 
68
69
 
69
70
  class ImapRatesUltraFlags(FlagNameMixin):