imap-processing 0.19.0__py3-none-any.whl → 0.19.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of imap-processing might be problematic. Click here for more details.

Files changed (64) hide show
  1. imap_processing/_version.py +2 -2
  2. imap_processing/cdf/config/imap_codice_global_cdf_attrs.yaml +6 -0
  3. imap_processing/cdf/config/imap_codice_l1a_variable_attrs.yaml +31 -894
  4. imap_processing/cdf/config/imap_codice_l1b_variable_attrs.yaml +279 -255
  5. imap_processing/cdf/config/imap_enamaps_l2-common_variable_attrs.yaml +11 -0
  6. imap_processing/cdf/config/imap_glows_l1b_variable_attrs.yaml +3 -1
  7. imap_processing/cdf/config/imap_lo_global_cdf_attrs.yaml +5 -4
  8. imap_processing/cdf/config/imap_ultra_global_cdf_attrs.yaml +20 -8
  9. imap_processing/cdf/config/imap_ultra_l1b_variable_attrs.yaml +33 -31
  10. imap_processing/cdf/config/imap_ultra_l1c_variable_attrs.yaml +61 -1
  11. imap_processing/cli.py +62 -71
  12. imap_processing/codice/codice_l0.py +2 -1
  13. imap_processing/codice/codice_l1a.py +47 -49
  14. imap_processing/codice/codice_l1b.py +42 -32
  15. imap_processing/codice/codice_l2.py +105 -7
  16. imap_processing/codice/constants.py +50 -8
  17. imap_processing/codice/data/lo_stepping_values.csv +1 -1
  18. imap_processing/ena_maps/ena_maps.py +39 -18
  19. imap_processing/ena_maps/utils/corrections.py +291 -0
  20. imap_processing/ena_maps/utils/map_utils.py +20 -4
  21. imap_processing/glows/l1b/glows_l1b.py +38 -23
  22. imap_processing/glows/l1b/glows_l1b_data.py +10 -11
  23. imap_processing/hi/hi_l1c.py +4 -109
  24. imap_processing/hi/hi_l2.py +34 -23
  25. imap_processing/hi/utils.py +109 -0
  26. imap_processing/ialirt/l0/ialirt_spice.py +1 -0
  27. imap_processing/ialirt/utils/create_xarray.py +1 -1
  28. imap_processing/lo/ancillary_data/imap_lo_hydrogen-geometric-factor_v001.csv +75 -0
  29. imap_processing/lo/ancillary_data/imap_lo_oxygen-geometric-factor_v001.csv +75 -0
  30. imap_processing/lo/l1b/lo_l1b.py +90 -16
  31. imap_processing/lo/l1c/lo_l1c.py +164 -50
  32. imap_processing/lo/l2/lo_l2.py +941 -127
  33. imap_processing/mag/l1d/mag_l1d_data.py +36 -3
  34. imap_processing/mag/l2/mag_l2.py +2 -0
  35. imap_processing/mag/l2/mag_l2_data.py +4 -3
  36. imap_processing/quality_flags.py +14 -0
  37. imap_processing/spice/geometry.py +15 -8
  38. imap_processing/spice/pointing_frame.py +4 -2
  39. imap_processing/spice/repoint.py +49 -0
  40. imap_processing/ultra/constants.py +29 -0
  41. imap_processing/ultra/l1b/badtimes.py +35 -11
  42. imap_processing/ultra/l1b/de.py +15 -9
  43. imap_processing/ultra/l1b/extendedspin.py +24 -12
  44. imap_processing/ultra/l1b/goodtimes.py +112 -0
  45. imap_processing/ultra/l1b/lookup_utils.py +1 -1
  46. imap_processing/ultra/l1b/ultra_l1b.py +7 -7
  47. imap_processing/ultra/l1b/ultra_l1b_culling.py +8 -4
  48. imap_processing/ultra/l1b/ultra_l1b_extended.py +79 -43
  49. imap_processing/ultra/l1c/helio_pset.py +68 -39
  50. imap_processing/ultra/l1c/l1c_lookup_utils.py +45 -12
  51. imap_processing/ultra/l1c/spacecraft_pset.py +81 -37
  52. imap_processing/ultra/l1c/ultra_l1c.py +27 -22
  53. imap_processing/ultra/l1c/ultra_l1c_culling.py +7 -0
  54. imap_processing/ultra/l1c/ultra_l1c_pset_bins.py +41 -41
  55. imap_processing/ultra/l2/ultra_l2.py +54 -10
  56. imap_processing/ultra/utils/ultra_l1_utils.py +10 -5
  57. {imap_processing-0.19.0.dist-info → imap_processing-0.19.2.dist-info}/METADATA +1 -1
  58. {imap_processing-0.19.0.dist-info → imap_processing-0.19.2.dist-info}/RECORD +62 -60
  59. imap_processing/ultra/l1b/cullingmask.py +0 -90
  60. imap_processing/ultra/l1c/histogram.py +0 -36
  61. /imap_processing/glows/ancillary/{imap_glows_pipeline_settings_20250923_v002.json → imap_glows_pipeline-settings_20250923_v002.json} +0 -0
  62. {imap_processing-0.19.0.dist-info → imap_processing-0.19.2.dist-info}/LICENSE +0 -0
  63. {imap_processing-0.19.0.dist-info → imap_processing-0.19.2.dist-info}/WHEEL +0 -0
  64. {imap_processing-0.19.0.dist-info → imap_processing-0.19.2.dist-info}/entry_points.txt +0 -0
@@ -1,6 +1,7 @@
1
1
  # mypy: disable-error-code="unused-ignore"
2
2
  """Data classes for MAG L1D processing."""
3
3
 
4
+ import logging
4
5
  from dataclasses import InitVar, dataclass
5
6
 
6
7
  import numpy as np
@@ -14,7 +15,9 @@ from imap_processing.mag.l2.mag_l2 import retrieve_matrix_from_l2_calibration
14
15
  from imap_processing.mag.l2.mag_l2_data import MagL2L1dBase, ValidFrames
15
16
  from imap_processing.spice import spin
16
17
  from imap_processing.spice.geometry import frame_transform
17
- from imap_processing.spice.time import ttj2000ns_to_met
18
+ from imap_processing.spice.time import ttj2000ns_to_et, ttj2000ns_to_met
19
+
20
+ logger = logging.getLogger(__name__)
18
21
 
19
22
 
20
23
  @dataclass
@@ -166,6 +169,9 @@ class MagL1d(MagL2L1dBase): # type: ignore[misc]
166
169
  The day we are processing, in np.datetime64[D] format. This is used to
167
170
  truncate the data to exactly 24 hours.
168
171
  """
172
+ # The main data frame is MAGO, even though we have MAGI data included.
173
+ self.frame = ValidFrames.MAGO
174
+
169
175
  # set the magnitude before truncating
170
176
  self.magnitude = np.zeros(self.vectors.shape[0], dtype=np.float64) # type: ignore[has-type]
171
177
  self.truncate_to_24h(day)
@@ -272,15 +278,42 @@ class MagL1d(MagL2L1dBase): # type: ignore[misc]
272
278
  end_frame : ValidFrames
273
279
  The frame to rotate to. Should be one of the ValidFrames enum.
274
280
  """
281
+ # Self.frame should refer to the main data in self.vectors, which is MAGO
282
+ # data. For most frames, MAGO and MAGI are in the same frame, except the
283
+ # instrument reference frame.
284
+ if ValidFrames.MAGI in (self.frame, end_frame):
285
+ raise ValueError(
286
+ "MAGL1d.frame should never be equal to MAGI frame. If the "
287
+ "data is in the instrument frame, use MAGO."
288
+ )
289
+
275
290
  start_frame = self.frame
276
- super().rotate_frame(end_frame)
291
+
292
+ if self.epoch_et is None:
293
+ self.epoch_et: np.ndarray = ttj2000ns_to_et(self.epoch)
294
+ self.magi_epoch_et: np.ndarray = ttj2000ns_to_et(self.magi_epoch)
295
+
296
+ self.vectors = frame_transform(
297
+ self.epoch_et,
298
+ self.vectors,
299
+ from_frame=start_frame.value,
300
+ to_frame=end_frame.value,
301
+ )
302
+
303
+ # If we were in MAGO frame, we need to rotate MAGI vectors from MAGI to
304
+ # end_frame
305
+ if start_frame == ValidFrames.MAGO:
306
+ start_frame = ValidFrames.MAGI
307
+
277
308
  self.magi_vectors = frame_transform(
278
- self.magi_epoch,
309
+ self.magi_epoch_et,
279
310
  self.magi_vectors,
280
311
  from_frame=start_frame.value,
281
312
  to_frame=end_frame.value,
282
313
  )
283
314
 
315
+ self.frame = end_frame
316
+
284
317
  def _calibrate_and_offset_vectors(
285
318
  self,
286
319
  mago_calibration: np.ndarray,
@@ -90,6 +90,7 @@ def mag_l2(
90
90
  )
91
91
  # level 2 vectors don't include range
92
92
  vectors = cal_vectors[:, :3]
93
+ instrument_frame = ValidFrames.MAGO if always_output_mago else ValidFrames.MAGI
93
94
 
94
95
  l2_data = MagL2(
95
96
  vectors=vectors,
@@ -101,6 +102,7 @@ def mag_l2(
101
102
  data_mode=mode,
102
103
  offsets=offsets_dataset["offsets"].data,
103
104
  timedelta=offsets_dataset["timedeltas"].data,
105
+ frame=instrument_frame,
104
106
  )
105
107
 
106
108
  attributes = ImapCdfAttributes()
@@ -20,7 +20,8 @@ from imap_processing.spice.time import (
20
20
  class ValidFrames(Enum):
21
21
  """SPICE reference frames for output."""
22
22
 
23
- MAG = SpiceFrame.IMAP_MAG
23
+ MAGO = SpiceFrame.IMAP_MAG_O
24
+ MAGI = SpiceFrame.IMAP_MAG_I
24
25
  DSRF = SpiceFrame.IMAP_DPS
25
26
  SRF = SpiceFrame.IMAP_SPACECRAFT
26
27
  GSE = SpiceFrame.IMAP_GSE
@@ -56,7 +57,7 @@ class MagL2L1dBase:
56
57
  Quality bitmask for each vector. Should be of length n. Copied from offset
57
58
  file in L2, marked as good always in L1D.
58
59
  frame:
59
- The reference frame of the input vectors. Starts as the MAG instrument frame.
60
+ The reference frame of the input vectors. Defaults to the MAGO instrument frame.
60
61
  epoch_et: np.ndarray
61
62
  The epoch timestamps converted to ET format. Used for frame transformations.
62
63
  Calculated on first use and then saved. Should not be passed in.
@@ -70,7 +71,7 @@ class MagL2L1dBase:
70
71
  quality_bitmask: np.ndarray
71
72
  data_mode: DataMode
72
73
  magnitude: np.ndarray = field(init=False)
73
- frame: ValidFrames = ValidFrames.MAG
74
+ frame: ValidFrames = ValidFrames.MAGO
74
75
  epoch_et: np.ndarray | None = field(init=False, default=None)
75
76
 
76
77
  def generate_dataset(
@@ -43,6 +43,7 @@ class ImapDEOutliersUltraFlags(FlagNameMixin):
43
43
  NONE = CommonFlags.NONE
44
44
  FOV = 2**0 # bit 0
45
45
  PHCORR = 2**1 # bit 1
46
+ COINPH = 2**2 # bit 4 # Event validity
46
47
 
47
48
 
48
49
  class ImapHkUltraFlags(FlagNameMixin):
@@ -83,6 +84,13 @@ class ImapDEScatteringUltraFlags(FlagNameMixin):
83
84
  NAN_PHI_OR_THETA = 2**1 # bit 1
84
85
 
85
86
 
87
+ class ImapPSETUltraFlags(FlagNameMixin):
88
+ """IMAP Ultra Rates flags."""
89
+
90
+ NONE = CommonFlags.NONE
91
+ EARTH_FOV = 2**0 # bit 0
92
+
93
+
86
94
  class ImapInstrumentUltraFlags(FlagNameMixin):
87
95
  """IMAP Ultra flags using other instruments."""
88
96
 
@@ -131,3 +139,9 @@ class SWAPIFlags(
131
139
  SCEM_V_ST = 2**12 # bit 12
132
140
  SCEM_I_ST = 2**13 # bit 13
133
141
  SCEM_INT_ST = 2**14 # bit 14
142
+
143
+
144
+ class GLOWSL1bFlags(FlagNameMixin):
145
+ """Glows L1b flags."""
146
+
147
+ NONE = CommonFlags.NONE
@@ -21,7 +21,7 @@ from numpy.typing import NDArray
21
21
  class SpiceBody(IntEnum):
22
22
  """Enum containing SPICE IDs for bodies that we use."""
23
23
 
24
- # A subset of IMAP Specific bodies as defined in imap_wkcp.tf
24
+ # A subset of IMAP Specific bodies as defined in imap_001.tf
25
25
  IMAP = -43
26
26
  IMAP_SPACECRAFT = -43000
27
27
  # IMAP Pointing Frame (Despun) as defined in imap_science_xxx.tf
@@ -33,7 +33,7 @@ class SpiceBody(IntEnum):
33
33
 
34
34
 
35
35
  class SpiceFrame(IntEnum):
36
- """SPICE IDs for reference frames in imap_wkcp.tf and imap_science_xxx.tf."""
36
+ """SPICE IDs for reference frames in imap_###.tf and imap_science_xxx.tf."""
37
37
 
38
38
  # Standard SPICE Frames
39
39
  J2000 = spiceypy.irfnum("J2000")
@@ -41,7 +41,7 @@ class SpiceFrame(IntEnum):
41
41
  ITRF93 = 13000
42
42
  # IMAP Pointing Frame (Despun) as defined in imap_science_xxx.tf
43
43
  IMAP_DPS = -43901
44
- # IMAP specific as defined in imap_wkcp.tf
44
+ # IMAP specific as defined in imap_###.tf
45
45
  IMAP_SPACECRAFT = -43000
46
46
  IMAP_LO_BASE = -43100
47
47
  IMAP_LO_STAR_SENSOR = -43103
@@ -50,13 +50,17 @@ class SpiceFrame(IntEnum):
50
50
  IMAP_HI_90 = -43160
51
51
  IMAP_ULTRA_45 = -43200
52
52
  IMAP_ULTRA_90 = -43210
53
- IMAP_MAG = -43250
53
+ # TODO: remove IMAP_MAG frame once all usages have been removed
54
+ IMAP_MAG = -43999
55
+ IMAP_MAG_BOOM = -43250
56
+ IMAP_MAG_I = -43251
57
+ IMAP_MAG_O = -43252
54
58
  IMAP_SWE = -43300
55
59
  IMAP_SWAPI = -43350
56
60
  IMAP_CODICE = -43400
57
61
  IMAP_HIT = -43500
58
62
  IMAP_IDEX = -43700
59
- IMAP_GLOWS = -43750
63
+ IMAP_GLOWS = -43751
60
64
 
61
65
  # IMAP Science Frames (new additions from imap_science_xxx.tf)
62
66
  IMAP_OMD = -43900
@@ -87,7 +91,8 @@ BORESIGHT_LOOKUP = {
87
91
  SpiceFrame.IMAP_HI_90: np.array([0, 1, 0]),
88
92
  SpiceFrame.IMAP_ULTRA_45: np.array([0, 0, 1]),
89
93
  SpiceFrame.IMAP_ULTRA_90: np.array([0, 0, 1]),
90
- SpiceFrame.IMAP_MAG: np.array([0, 0, 1]),
94
+ SpiceFrame.IMAP_MAG_I: np.array([0, 0, 1]),
95
+ SpiceFrame.IMAP_MAG_O: np.array([0, 0, 1]),
91
96
  SpiceFrame.IMAP_SWE: np.array([-1, 0, 0]),
92
97
  SpiceFrame.IMAP_SWAPI: np.array([0, 1, 0]),
93
98
  SpiceFrame.IMAP_CODICE: np.array([0, 0, 1]),
@@ -162,7 +167,8 @@ def get_instrument_mounting_az_el(instrument: SpiceFrame) -> np.ndarray:
162
167
  SpiceFrame.IMAP_HI_90: np.array([0, 1, 0]),
163
168
  SpiceFrame.IMAP_ULTRA_45: np.array([0, 0, 1]),
164
169
  SpiceFrame.IMAP_ULTRA_90: np.array([0, 0, 1]),
165
- SpiceFrame.IMAP_MAG: np.array([-1, 0, 0]),
170
+ SpiceFrame.IMAP_MAG_I: np.array([-1, 0, 0]),
171
+ SpiceFrame.IMAP_MAG_O: np.array([-1, 0, 0]),
166
172
  SpiceFrame.IMAP_SWE: np.array([-1, 0, 0]),
167
173
  SpiceFrame.IMAP_SWAPI: np.array([0, 0, -1]),
168
174
  SpiceFrame.IMAP_CODICE: np.array([-1, 0, 0]),
@@ -214,7 +220,8 @@ def get_spacecraft_to_instrument_spin_phase_offset(instrument: SpiceFrame) -> fl
214
220
  SpiceFrame.IMAP_HIT: 120 / 360, # 30 + 90 = 120
215
221
  SpiceFrame.IMAP_SWE: 243 / 360, # 153 + 90 = 243
216
222
  SpiceFrame.IMAP_GLOWS: 217 / 360, # 127 + 90 = 217
217
- SpiceFrame.IMAP_MAG: 90 / 360, # 0 + 90 = 90
223
+ SpiceFrame.IMAP_MAG_I: 90 / 360, # 0 + 90 = 90
224
+ SpiceFrame.IMAP_MAG_O: 90 / 360, # 0 + 90 = 90
218
225
  }
219
226
  return phase_offset_lookup[instrument]
220
227
 
@@ -195,7 +195,7 @@ def calculate_pointing_attitude_segments(
195
195
 
196
196
  - Latest NAIF leapseconds kernel (naif0012.tls)
197
197
  - The latest IMAP sclk (imap_sclk_NNNN.tsc)
198
- - The latest IMAP frame kernel (imap_wkcp.tf)
198
+ - The latest IMAP frame kernel (imap_###.tf)
199
199
  - IMAP DPS frame kernel (imap_science_100.tf)
200
200
  - IMAP historical attitude kernel from which the pointing frame kernel will
201
201
  be generated.
@@ -210,7 +210,9 @@ def calculate_pointing_attitude_segments(
210
210
  count = spiceypy.ktotal("ck")
211
211
  loaded_ck_kernel, _, _, _ = spiceypy.kdata(count - 1, "ck")
212
212
  if str(ck_path) != loaded_ck_kernel:
213
- raise ValueError(f"Error: Expected CK kernel {ck_path}")
213
+ raise ValueError(
214
+ f"Error: Expected CK kernel {ck_path} but loaded {loaded_ck_kernel}"
215
+ )
214
216
 
215
217
  id_imap_spacecraft = spiceypy.gipool("FRAME_IMAP_SPACECRAFT", 0, 1)
216
218
 
@@ -9,6 +9,8 @@ import pandas as pd
9
9
  from numpy import typing as npt
10
10
 
11
11
  from imap_processing.spice import config
12
+ from imap_processing.spice.geometry import imap_state
13
+ from imap_processing.spice.time import met_to_sclkticks, sct_to_et
12
14
 
13
15
  logger = logging.getLogger(__name__)
14
16
 
@@ -221,3 +223,50 @@ def get_pointing_times(met_time: float) -> tuple[float, float]:
221
223
  ][0]
222
224
  pointing_end_met = repoint_df["repoint_start_met"].iloc[pointing_idx + 1].item()
223
225
  return pointing_start_met, pointing_end_met
226
+
227
+
228
+ def get_pointing_mid_time(met_time: float) -> float:
229
+ """
230
+ Get mid-point of the pointing for the given MET time.
231
+
232
+ Get the mid-point time between the end of one repoint and
233
+ start of the next. Input could be a MET time.
234
+
235
+ Parameters
236
+ ----------
237
+ met_time : float
238
+ The MET time in a repoint.
239
+
240
+ Returns
241
+ -------
242
+ repoint_mid_time : float
243
+ The mid MET time of the repoint maneuver.
244
+ """
245
+ pointing_start_met, pointing_end_met = get_pointing_times(met_time)
246
+ return (pointing_start_met + pointing_end_met) / 2
247
+
248
+
249
+ def get_mid_point_state(met_time: float) -> npt.NDArray:
250
+ """
251
+ Get IMAP state for the mid-point.
252
+
253
+ Get IMAP state for the mid-point of the pointing in
254
+ reference frame, ECLIPJ2000 and observer, SUN.
255
+
256
+ Parameters
257
+ ----------
258
+ met_time : float
259
+ The MET time in a pointing.
260
+
261
+ Returns
262
+ -------
263
+ mid_point_state : numpy.ndarray
264
+ The mid state of the pointing maneuver.
265
+ """
266
+ # Get mid point time in ET
267
+ mid_point_time = get_pointing_mid_time(met_time)
268
+ mid_point_time_et = sct_to_et(met_to_sclkticks(mid_point_time))
269
+
270
+ # Convert mid point time to state
271
+ pointing_state = imap_state(mid_point_time_et)
272
+ return pointing_state
@@ -80,6 +80,14 @@ class UltraConstants:
80
80
 
81
81
  # Thresholds for culling based on counts (keV).
82
82
  CULLING_ENERGY_BIN_EDGES: ClassVar[list] = [
83
+ 3.0,
84
+ 10.0,
85
+ 20.0,
86
+ 50.0,
87
+ 300.0,
88
+ 1e5,
89
+ ]
90
+ PSET_ENERGY_BIN_EDGES: ClassVar[list] = [
83
91
  3.385,
84
92
  4.13722222222222,
85
93
  5.05660493827161,
@@ -106,3 +114,24 @@ class UltraConstants:
106
114
  341.989454569026,
107
115
  1e5,
108
116
  ]
117
+
118
+ # Valid event filter constants
119
+ # Note these appear similar to image params constants
120
+ # but they should be used only for the valid event filter.
121
+ ETOFOFF1_EVENTFILTER = 100
122
+ ETOFOFF2_EVENTFILTER = -50
123
+ ETOFSLOPE1_EVENTFILTER = 6667
124
+ ETOFSLOPE2_EVENTFILTER = 7500
125
+ ETOFMAX_EVENTFILTER = 90
126
+ ETOFMIN_EVENTFILTER = -400
127
+ TOFDIFFTPMIN_EVENTFILTER = 226
128
+ TOFDIFFTPMAX_EVENTFILTER = 266
129
+
130
+ TOFXE_SPECIES_GROUPS: ClassVar[dict[str, list[int]]] = {
131
+ "proton": [3],
132
+ "non_proton": [20, 28, 36],
133
+ }
134
+ TOFXPH_SPECIES_GROUPS: ClassVar[dict[str, list[int]]] = {
135
+ "proton": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19],
136
+ "non_proton": [20, 21, 22, 23, 24, 25, 26],
137
+ }
@@ -7,13 +7,14 @@ from numpy.typing import NDArray
7
7
  from imap_processing.ultra.utils.ultra_l1_utils import create_dataset, extract_data_dict
8
8
 
9
9
  FILLVAL_UINT16 = 65535
10
+ FILLVAL_FLOAT32 = -1.0e31
10
11
  FILLVAL_FLOAT64 = -1.0e31
11
12
  FILLVAL_UINT32 = 4294967295
12
13
 
13
14
 
14
15
  def calculate_badtimes(
15
16
  extendedspin_dataset: xr.Dataset,
16
- cullingmask_spins: NDArray,
17
+ goodtimes_spins: NDArray,
17
18
  name: str,
18
19
  ) -> xr.Dataset:
19
20
  """
@@ -23,7 +24,7 @@ def calculate_badtimes(
23
24
  ----------
24
25
  extendedspin_dataset : xarray.Dataset
25
26
  Dataset containing the data.
26
- cullingmask_spins : np.ndarray
27
+ goodtimes_spins : np.ndarray
27
28
  Dataset containing the culled data.
28
29
  name : str
29
30
  Name of the dataset.
@@ -33,11 +34,9 @@ def calculate_badtimes(
33
34
  badtimes_dataset : xarray.Dataset
34
35
  Dataset containing the extendedspin data that has been culled.
35
36
  """
37
+ n_bins = extendedspin_dataset.dims["energy_bin_geometric_mean"]
36
38
  culled_spins = np.setdiff1d(
37
- extendedspin_dataset["spin_number"].values, cullingmask_spins
38
- )
39
- extendedspin_dataset = extendedspin_dataset.assign_coords(
40
- epoch=("spin_number", extendedspin_dataset["epoch"].values)
39
+ extendedspin_dataset["spin_number"].values, goodtimes_spins
41
40
  )
42
41
  filtered_dataset = extendedspin_dataset.sel(spin_number=culled_spins)
43
42
 
@@ -48,9 +47,6 @@ def calculate_badtimes(
48
47
  if badtimes_dataset["spin_number"].size == 0:
49
48
  badtimes_dataset = badtimes_dataset.drop_dims("spin_number")
50
49
  badtimes_dataset = badtimes_dataset.expand_dims(spin_number=[FILLVAL_UINT32])
51
- badtimes_dataset = badtimes_dataset.assign_coords(
52
- epoch=("spin_number", [extendedspin_dataset["epoch"].values[0]])
53
- )
54
50
  badtimes_dataset["spin_start_time"] = xr.DataArray(
55
51
  np.array([FILLVAL_FLOAT64], dtype="float64"), dims=["spin_number"]
56
52
  )
@@ -60,16 +56,44 @@ def calculate_badtimes(
60
56
  badtimes_dataset["spin_rate"] = xr.DataArray(
61
57
  np.array([FILLVAL_FLOAT64], dtype="float64"), dims=["spin_number"]
62
58
  )
59
+ badtimes_dataset["start_pulses_per_spin"] = xr.DataArray(
60
+ np.array([FILLVAL_FLOAT32], dtype="float32"),
61
+ dims=["spin_number"],
62
+ )
63
+ badtimes_dataset["stop_pulses_per_spin"] = xr.DataArray(
64
+ np.array([FILLVAL_FLOAT32], dtype="float32"),
65
+ dims=["spin_number"],
66
+ )
67
+ badtimes_dataset["coin_pulses_per_spin"] = xr.DataArray(
68
+ np.array([FILLVAL_FLOAT32], dtype="float32"),
69
+ dims=["spin_number"],
70
+ )
71
+ badtimes_dataset["rejected_events_per_spin"] = xr.DataArray(
72
+ np.array([FILLVAL_UINT32], dtype="uint32"),
73
+ dims=["spin_number"],
74
+ )
63
75
  badtimes_dataset["quality_attitude"] = xr.DataArray(
64
76
  np.array([FILLVAL_UINT16], dtype="uint16"), dims=["spin_number"]
65
77
  )
78
+ badtimes_dataset["quality_hk"] = xr.DataArray(
79
+ np.array([FILLVAL_UINT16], dtype="uint16"),
80
+ dims=["spin_number"],
81
+ )
82
+ badtimes_dataset["quality_instruments"] = xr.DataArray(
83
+ np.array([FILLVAL_UINT16], dtype="uint16"),
84
+ dims=["spin_number"],
85
+ )
66
86
  badtimes_dataset["quality_ena_rates"] = (
67
87
  ("energy_bin_geometric_mean", "spin_number"),
68
- np.full((3, 1), FILLVAL_UINT16, dtype="uint16"),
88
+ np.full((n_bins, 1), FILLVAL_UINT16, dtype="uint16"),
69
89
  )
70
90
  badtimes_dataset["ena_rates"] = (
71
91
  ("energy_bin_geometric_mean", "spin_number"),
72
- np.full((3, 1), FILLVAL_FLOAT64, dtype="float64"),
92
+ np.full((n_bins, 1), FILLVAL_FLOAT64, dtype="float64"),
93
+ )
94
+ badtimes_dataset["ena_rates_threshold"] = (
95
+ ("energy_bin_geometric_mean", "spin_number"),
96
+ np.full((n_bins, 1), FILLVAL_FLOAT32, dtype="float32"),
73
97
  )
74
98
 
75
99
  return badtimes_dataset
@@ -42,6 +42,7 @@ from imap_processing.ultra.l1b.ultra_l1b_extended import (
42
42
  from imap_processing.ultra.utils.ultra_l1_utils import create_dataset
43
43
 
44
44
  FILLVAL_UINT8 = 255
45
+ FILLVAL_UINT32 = 4294967295
45
46
  FILLVAL_FLOAT32 = -1.0e31
46
47
 
47
48
 
@@ -82,7 +83,6 @@ def calculate_de(
82
83
  "event_type",
83
84
  "de_event_met",
84
85
  "phase_angle",
85
- "spin",
86
86
  ]
87
87
  dataset_keys = [
88
88
  "coin_type",
@@ -90,7 +90,6 @@ def calculate_de(
90
90
  "stop_type",
91
91
  "shcoarse",
92
92
  "phase_angle",
93
- "spin",
94
93
  ]
95
94
 
96
95
  de_dict.update(
@@ -127,6 +126,7 @@ def calculate_de(
127
126
  magnitude_v = np.full(len(de_dataset["epoch"]), FILLVAL_FLOAT32, dtype=np.float32)
128
127
  energy = np.full(len(de_dataset["epoch"]), FILLVAL_FLOAT32, dtype=np.float32)
129
128
  e_bin = np.full(len(de_dataset["epoch"]), FILLVAL_UINT8, dtype=np.uint8)
129
+ e_bin_l1a = np.full(len(de_dataset["epoch"]), FILLVAL_UINT8, dtype=np.uint8)
130
130
  species_bin = np.full(len(de_dataset["epoch"]), FILLVAL_UINT8, dtype=np.uint8)
131
131
  t2 = np.full(len(de_dataset["epoch"]), FILLVAL_FLOAT32, dtype=np.float32)
132
132
  event_times = np.full(len(de_dataset["epoch"]), FILLVAL_FLOAT32, dtype=np.float32)
@@ -143,6 +143,7 @@ def calculate_de(
143
143
  quality_flags = np.full(
144
144
  de_dataset["epoch"].shape, ImapDEOutliersUltraFlags.NONE.value, dtype=np.uint16
145
145
  )
146
+
146
147
  scattering_quality_flags = np.full(
147
148
  de_dataset["epoch"].shape,
148
149
  ImapDEScatteringUltraFlags.NONE.value,
@@ -196,7 +197,6 @@ def calculate_de(
196
197
  (xb[ph_indices], yb[ph_indices]),
197
198
  d[ph_indices],
198
199
  )
199
- species_bin[ph_indices] = determine_species(tof[ph_indices], r[ph_indices], "PH")
200
200
  etof[ph_indices], xc[ph_indices] = get_coincidence_positions(
201
201
  de_dataset.isel(epoch=ph_indices),
202
202
  t2[ph_indices],
@@ -213,8 +213,13 @@ def calculate_de(
213
213
  etof[ph_indices],
214
214
  xc[ph_indices],
215
215
  xb[ph_indices],
216
+ de_dataset["stop_north_tdc"][ph_indices].values,
217
+ de_dataset["stop_south_tdc"][ph_indices].values,
218
+ de_dataset["stop_east_tdc"][ph_indices].values,
219
+ de_dataset["stop_west_tdc"][ph_indices].values,
216
220
  f"ultra{sensor}",
217
221
  ancillary_files,
222
+ quality_flags[ph_indices],
218
223
  )
219
224
  e_bin[ph_indices] = determine_ebin_pulse_height(
220
225
  energy[ph_indices],
@@ -224,6 +229,7 @@ def calculate_de(
224
229
  coinphvalid,
225
230
  ancillary_files,
226
231
  )
232
+ species_bin[ph_indices] = determine_species(e_bin[ph_indices], "PH")
227
233
  ctof[ph_indices], magnitude_v[ph_indices] = get_ctof(
228
234
  tof[ph_indices], r[ph_indices], "PH"
229
235
  )
@@ -257,9 +263,7 @@ def calculate_de(
257
263
  f"ultra{sensor}",
258
264
  ancillary_files,
259
265
  )
260
- species_bin[ssd_indices] = determine_species(
261
- tof[ssd_indices], r[ssd_indices], "SSD"
262
- )
266
+ species_bin[ssd_indices] = determine_species(e_bin[ssd_indices], "SSD")
263
267
  ctof[ssd_indices], magnitude_v[ssd_indices] = get_ctof(
264
268
  tof[ssd_indices], r[ssd_indices], "SSD"
265
269
  )
@@ -289,7 +293,6 @@ def calculate_de(
289
293
  de_dict["tof_start_stop"][valid_indices],
290
294
  )
291
295
  )
292
- de_dict["direct_event_velocity"] = velocities.astype(np.float32)
293
296
  de_dict["direct_event_unit_velocity"] = v_hat.astype(np.float32)
294
297
  de_dict["direct_event_unit_position"] = r_hat.astype(np.float32)
295
298
 
@@ -298,7 +301,10 @@ def calculate_de(
298
301
  )
299
302
  de_dict["tof_energy"] = tof_energy
300
303
  de_dict["energy"] = energy
301
- de_dict["ebin"] = e_bin
304
+ de_dict["computed_ebin"] = e_bin
305
+ valid_ebin = de_dataset["bin"].values != FILLVAL_UINT32
306
+ e_bin_l1a[valid_ebin] = de_dataset["bin"].values[valid_ebin]
307
+ de_dict["ebin"] = e_bin_l1a
302
308
  de_dict["species"] = species_bin
303
309
 
304
310
  # Annotated Events.
@@ -313,7 +319,7 @@ def calculate_de(
313
319
  helio_velocity[valid_events],
314
320
  ) = get_annotated_particle_velocity(
315
321
  event_times[valid_events],
316
- de_dict["direct_event_velocity"][valid_events],
322
+ velocities.astype(np.float32)[valid_events],
317
323
  ultra_frame,
318
324
  SpiceFrame.IMAP_DPS,
319
325
  SpiceFrame.IMAP_SPACECRAFT,
@@ -2,6 +2,7 @@
2
2
 
3
3
  import numpy as np
4
4
  import xarray as xr
5
+ from numpy.typing import NDArray
5
6
 
6
7
  from imap_processing.ultra.l1b.ultra_l1b_culling import (
7
8
  count_rejected_events_per_spin,
@@ -15,6 +16,7 @@ from imap_processing.ultra.l1b.ultra_l1b_culling import (
15
16
  from imap_processing.ultra.utils.ultra_l1_utils import create_dataset
16
17
 
17
18
  FILLVAL_UINT16 = 65535
19
+ FILLVAL_FLOAT32 = -1.0e31
18
20
 
19
21
 
20
22
  def calculate_extendedspin(
@@ -44,7 +46,7 @@ def calculate_extendedspin(
44
46
  de_dataset = dict_datasets[f"imap_ultra_l1b_{instrument_id}sensor-de"]
45
47
 
46
48
  extendedspin_dict = {}
47
- rates_qf, spin, energy_midpoints, n_sigma_per_energy = flag_rates(
49
+ rates_qf, spin, energy_bin_geometric_mean, n_sigma_per_energy = flag_rates(
48
50
  de_dataset["spin"].values,
49
51
  de_dataset["energy"].values,
50
52
  )
@@ -58,12 +60,6 @@ def calculate_extendedspin(
58
60
  hk_qf = flag_hk(de_dataset["spin"].values)
59
61
  inst_qf = flag_imap_instruments(de_dataset["spin"].values)
60
62
 
61
- # Get the first epoch for each spin.
62
- mask = xr.DataArray(np.isin(de_dataset["spin"], spin), dims="epoch")
63
- filtered_dataset = de_dataset.where(mask, drop=True)
64
- _, first_indices = np.unique(filtered_dataset["spin"].values, return_index=True)
65
- first_epochs = filtered_dataset["epoch"].values[first_indices]
66
-
67
63
  # Get the number of pulses per spin.
68
64
  pulses = get_pulses_per_spin(rates_dataset)
69
65
 
@@ -75,18 +71,34 @@ def calculate_extendedspin(
75
71
  de_dataset["quality_outliers"].values,
76
72
  )
77
73
  # These will be the coordinates.
78
- extendedspin_dict["epoch"] = first_epochs
79
74
  extendedspin_dict["spin_number"] = spin
80
- extendedspin_dict["energy_bin_geometric_mean"] = energy_midpoints
75
+ extendedspin_dict["energy_bin_geometric_mean"] = energy_bin_geometric_mean
81
76
 
82
77
  extendedspin_dict["ena_rates"] = count_rates
83
78
  extendedspin_dict["ena_rates_threshold"] = n_sigma_per_energy
84
79
  extendedspin_dict["spin_start_time"] = spin_starttime
85
80
  extendedspin_dict["spin_period"] = spin_period
86
81
  extendedspin_dict["spin_rate"] = spin_rates
87
- extendedspin_dict["start_pulses_per_spin"] = pulses.start_per_spin
88
- extendedspin_dict["stop_pulses_per_spin"] = pulses.stop_per_spin
89
- extendedspin_dict["coin_pulses_per_spin"] = pulses.coin_per_spin
82
+
83
+ # Get index of pulses.unique_spins corresponding to each spin.
84
+ idx: NDArray[np.intp] = np.searchsorted(pulses.unique_spins, spin)
85
+
86
+ # Validate that the spin values match
87
+ valid = (idx < pulses.unique_spins.size) & (pulses.unique_spins[idx] == spin)
88
+
89
+ start_per_spin = np.full(len(spin), FILLVAL_FLOAT32, dtype=np.float32)
90
+ stop_per_spin = np.full(len(spin), FILLVAL_FLOAT32, dtype=np.float32)
91
+ coin_per_spin = np.full(len(spin), FILLVAL_FLOAT32, dtype=np.float32)
92
+
93
+ # Fill only the valid ones
94
+ start_per_spin[valid] = pulses.start_per_spin[idx[valid]]
95
+ stop_per_spin[valid] = pulses.stop_per_spin[idx[valid]]
96
+ coin_per_spin[valid] = pulses.coin_per_spin[idx[valid]]
97
+
98
+ # account for rates spins which are not in the direct event spins
99
+ extendedspin_dict["start_pulses_per_spin"] = start_per_spin
100
+ extendedspin_dict["stop_pulses_per_spin"] = stop_per_spin
101
+ extendedspin_dict["coin_pulses_per_spin"] = coin_per_spin
90
102
  extendedspin_dict["rejected_events_per_spin"] = rejected_counts
91
103
  extendedspin_dict["quality_attitude"] = attitude_qf
92
104
  extendedspin_dict["quality_ena_rates"] = rates_qf