imap-processing 0.19.0__py3-none-any.whl → 0.19.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of imap-processing might be problematic. Click here for more details.

Files changed (73) hide show
  1. imap_processing/_version.py +2 -2
  2. imap_processing/cdf/config/imap_codice_global_cdf_attrs.yaml +6 -0
  3. imap_processing/cdf/config/imap_codice_l1a_variable_attrs.yaml +31 -894
  4. imap_processing/cdf/config/imap_codice_l1b_variable_attrs.yaml +279 -255
  5. imap_processing/cdf/config/imap_enamaps_l2-common_variable_attrs.yaml +55 -0
  6. imap_processing/cdf/config/imap_enamaps_l2-healpix_variable_attrs.yaml +29 -0
  7. imap_processing/cdf/config/imap_enamaps_l2-rectangular_variable_attrs.yaml +32 -0
  8. imap_processing/cdf/config/imap_glows_l1b_variable_attrs.yaml +3 -1
  9. imap_processing/cdf/config/imap_lo_global_cdf_attrs.yaml +5 -4
  10. imap_processing/cdf/config/imap_ultra_global_cdf_attrs.yaml +28 -16
  11. imap_processing/cdf/config/imap_ultra_l1b_variable_attrs.yaml +33 -31
  12. imap_processing/cdf/config/imap_ultra_l1c_variable_attrs.yaml +61 -1
  13. imap_processing/cli.py +62 -71
  14. imap_processing/codice/codice_l0.py +2 -1
  15. imap_processing/codice/codice_l1a.py +47 -49
  16. imap_processing/codice/codice_l1b.py +42 -32
  17. imap_processing/codice/codice_l2.py +105 -7
  18. imap_processing/codice/constants.py +50 -8
  19. imap_processing/codice/data/lo_stepping_values.csv +1 -1
  20. imap_processing/ena_maps/ena_maps.py +39 -18
  21. imap_processing/ena_maps/utils/corrections.py +291 -0
  22. imap_processing/ena_maps/utils/map_utils.py +20 -4
  23. imap_processing/glows/l1b/glows_l1b.py +38 -23
  24. imap_processing/glows/l1b/glows_l1b_data.py +10 -11
  25. imap_processing/hi/hi_l1c.py +4 -109
  26. imap_processing/hi/hi_l2.py +34 -23
  27. imap_processing/hi/utils.py +109 -0
  28. imap_processing/ialirt/l0/ialirt_spice.py +1 -1
  29. imap_processing/ialirt/l0/parse_mag.py +18 -4
  30. imap_processing/ialirt/l0/process_hit.py +9 -4
  31. imap_processing/ialirt/l0/process_swapi.py +9 -4
  32. imap_processing/ialirt/l0/process_swe.py +9 -4
  33. imap_processing/ialirt/utils/create_xarray.py +1 -1
  34. imap_processing/lo/ancillary_data/imap_lo_hydrogen-geometric-factor_v001.csv +75 -0
  35. imap_processing/lo/ancillary_data/imap_lo_oxygen-geometric-factor_v001.csv +75 -0
  36. imap_processing/lo/l1b/lo_l1b.py +90 -16
  37. imap_processing/lo/l1c/lo_l1c.py +164 -50
  38. imap_processing/lo/l2/lo_l2.py +941 -127
  39. imap_processing/mag/l1d/mag_l1d_data.py +36 -3
  40. imap_processing/mag/l2/mag_l2.py +2 -0
  41. imap_processing/mag/l2/mag_l2_data.py +4 -3
  42. imap_processing/quality_flags.py +14 -0
  43. imap_processing/spice/geometry.py +13 -8
  44. imap_processing/spice/pointing_frame.py +4 -2
  45. imap_processing/spice/repoint.py +49 -0
  46. imap_processing/ultra/constants.py +29 -0
  47. imap_processing/ultra/l0/decom_tools.py +58 -46
  48. imap_processing/ultra/l0/decom_ultra.py +21 -9
  49. imap_processing/ultra/l0/ultra_utils.py +4 -4
  50. imap_processing/ultra/l1b/badtimes.py +35 -11
  51. imap_processing/ultra/l1b/de.py +15 -9
  52. imap_processing/ultra/l1b/extendedspin.py +24 -12
  53. imap_processing/ultra/l1b/goodtimes.py +112 -0
  54. imap_processing/ultra/l1b/lookup_utils.py +1 -1
  55. imap_processing/ultra/l1b/ultra_l1b.py +7 -7
  56. imap_processing/ultra/l1b/ultra_l1b_culling.py +8 -4
  57. imap_processing/ultra/l1b/ultra_l1b_extended.py +79 -43
  58. imap_processing/ultra/l1c/helio_pset.py +68 -39
  59. imap_processing/ultra/l1c/l1c_lookup_utils.py +45 -12
  60. imap_processing/ultra/l1c/spacecraft_pset.py +81 -37
  61. imap_processing/ultra/l1c/ultra_l1c.py +27 -22
  62. imap_processing/ultra/l1c/ultra_l1c_culling.py +7 -0
  63. imap_processing/ultra/l1c/ultra_l1c_pset_bins.py +41 -41
  64. imap_processing/ultra/l2/ultra_l2.py +75 -18
  65. imap_processing/ultra/utils/ultra_l1_utils.py +10 -5
  66. {imap_processing-0.19.0.dist-info → imap_processing-0.19.3.dist-info}/METADATA +2 -2
  67. {imap_processing-0.19.0.dist-info → imap_processing-0.19.3.dist-info}/RECORD +71 -69
  68. imap_processing/ultra/l1b/cullingmask.py +0 -90
  69. imap_processing/ultra/l1c/histogram.py +0 -36
  70. /imap_processing/glows/ancillary/{imap_glows_pipeline_settings_20250923_v002.json → imap_glows_pipeline-settings_20250923_v002.json} +0 -0
  71. {imap_processing-0.19.0.dist-info → imap_processing-0.19.3.dist-info}/LICENSE +0 -0
  72. {imap_processing-0.19.0.dist-info → imap_processing-0.19.3.dist-info}/WHEEL +0 -0
  73. {imap_processing-0.19.0.dist-info → imap_processing-0.19.3.dist-info}/entry_points.txt +0 -0
@@ -1,6 +1,7 @@
1
1
  # mypy: disable-error-code="unused-ignore"
2
2
  """Data classes for MAG L1D processing."""
3
3
 
4
+ import logging
4
5
  from dataclasses import InitVar, dataclass
5
6
 
6
7
  import numpy as np
@@ -14,7 +15,9 @@ from imap_processing.mag.l2.mag_l2 import retrieve_matrix_from_l2_calibration
14
15
  from imap_processing.mag.l2.mag_l2_data import MagL2L1dBase, ValidFrames
15
16
  from imap_processing.spice import spin
16
17
  from imap_processing.spice.geometry import frame_transform
17
- from imap_processing.spice.time import ttj2000ns_to_met
18
+ from imap_processing.spice.time import ttj2000ns_to_et, ttj2000ns_to_met
19
+
20
+ logger = logging.getLogger(__name__)
18
21
 
19
22
 
20
23
  @dataclass
@@ -166,6 +169,9 @@ class MagL1d(MagL2L1dBase): # type: ignore[misc]
166
169
  The day we are processing, in np.datetime64[D] format. This is used to
167
170
  truncate the data to exactly 24 hours.
168
171
  """
172
+ # The main data frame is MAGO, even though we have MAGI data included.
173
+ self.frame = ValidFrames.MAGO
174
+
169
175
  # set the magnitude before truncating
170
176
  self.magnitude = np.zeros(self.vectors.shape[0], dtype=np.float64) # type: ignore[has-type]
171
177
  self.truncate_to_24h(day)
@@ -272,15 +278,42 @@ class MagL1d(MagL2L1dBase): # type: ignore[misc]
272
278
  end_frame : ValidFrames
273
279
  The frame to rotate to. Should be one of the ValidFrames enum.
274
280
  """
281
+ # Self.frame should refer to the main data in self.vectors, which is MAGO
282
+ # data. For most frames, MAGO and MAGI are in the same frame, except the
283
+ # instrument reference frame.
284
+ if ValidFrames.MAGI in (self.frame, end_frame):
285
+ raise ValueError(
286
+ "MAGL1d.frame should never be equal to MAGI frame. If the "
287
+ "data is in the instrument frame, use MAGO."
288
+ )
289
+
275
290
  start_frame = self.frame
276
- super().rotate_frame(end_frame)
291
+
292
+ if self.epoch_et is None:
293
+ self.epoch_et: np.ndarray = ttj2000ns_to_et(self.epoch)
294
+ self.magi_epoch_et: np.ndarray = ttj2000ns_to_et(self.magi_epoch)
295
+
296
+ self.vectors = frame_transform(
297
+ self.epoch_et,
298
+ self.vectors,
299
+ from_frame=start_frame.value,
300
+ to_frame=end_frame.value,
301
+ )
302
+
303
+ # If we were in MAGO frame, we need to rotate MAGI vectors from MAGI to
304
+ # end_frame
305
+ if start_frame == ValidFrames.MAGO:
306
+ start_frame = ValidFrames.MAGI
307
+
277
308
  self.magi_vectors = frame_transform(
278
- self.magi_epoch,
309
+ self.magi_epoch_et,
279
310
  self.magi_vectors,
280
311
  from_frame=start_frame.value,
281
312
  to_frame=end_frame.value,
282
313
  )
283
314
 
315
+ self.frame = end_frame
316
+
284
317
  def _calibrate_and_offset_vectors(
285
318
  self,
286
319
  mago_calibration: np.ndarray,
@@ -90,6 +90,7 @@ def mag_l2(
90
90
  )
91
91
  # level 2 vectors don't include range
92
92
  vectors = cal_vectors[:, :3]
93
+ instrument_frame = ValidFrames.MAGO if always_output_mago else ValidFrames.MAGI
93
94
 
94
95
  l2_data = MagL2(
95
96
  vectors=vectors,
@@ -101,6 +102,7 @@ def mag_l2(
101
102
  data_mode=mode,
102
103
  offsets=offsets_dataset["offsets"].data,
103
104
  timedelta=offsets_dataset["timedeltas"].data,
105
+ frame=instrument_frame,
104
106
  )
105
107
 
106
108
  attributes = ImapCdfAttributes()
@@ -20,7 +20,8 @@ from imap_processing.spice.time import (
20
20
  class ValidFrames(Enum):
21
21
  """SPICE reference frames for output."""
22
22
 
23
- MAG = SpiceFrame.IMAP_MAG
23
+ MAGO = SpiceFrame.IMAP_MAG_O
24
+ MAGI = SpiceFrame.IMAP_MAG_I
24
25
  DSRF = SpiceFrame.IMAP_DPS
25
26
  SRF = SpiceFrame.IMAP_SPACECRAFT
26
27
  GSE = SpiceFrame.IMAP_GSE
@@ -56,7 +57,7 @@ class MagL2L1dBase:
56
57
  Quality bitmask for each vector. Should be of length n. Copied from offset
57
58
  file in L2, marked as good always in L1D.
58
59
  frame:
59
- The reference frame of the input vectors. Starts as the MAG instrument frame.
60
+ The reference frame of the input vectors. Defaults to the MAGO instrument frame.
60
61
  epoch_et: np.ndarray
61
62
  The epoch timestamps converted to ET format. Used for frame transformations.
62
63
  Calculated on first use and then saved. Should not be passed in.
@@ -70,7 +71,7 @@ class MagL2L1dBase:
70
71
  quality_bitmask: np.ndarray
71
72
  data_mode: DataMode
72
73
  magnitude: np.ndarray = field(init=False)
73
- frame: ValidFrames = ValidFrames.MAG
74
+ frame: ValidFrames = ValidFrames.MAGO
74
75
  epoch_et: np.ndarray | None = field(init=False, default=None)
75
76
 
76
77
  def generate_dataset(
@@ -43,6 +43,7 @@ class ImapDEOutliersUltraFlags(FlagNameMixin):
43
43
  NONE = CommonFlags.NONE
44
44
  FOV = 2**0 # bit 0
45
45
  PHCORR = 2**1 # bit 1
46
+ COINPH = 2**2 # bit 4 # Event validity
46
47
 
47
48
 
48
49
  class ImapHkUltraFlags(FlagNameMixin):
@@ -83,6 +84,13 @@ class ImapDEScatteringUltraFlags(FlagNameMixin):
83
84
  NAN_PHI_OR_THETA = 2**1 # bit 1
84
85
 
85
86
 
87
+ class ImapPSETUltraFlags(FlagNameMixin):
88
+ """IMAP Ultra Rates flags."""
89
+
90
+ NONE = CommonFlags.NONE
91
+ EARTH_FOV = 2**0 # bit 0
92
+
93
+
86
94
  class ImapInstrumentUltraFlags(FlagNameMixin):
87
95
  """IMAP Ultra flags using other instruments."""
88
96
 
@@ -131,3 +139,9 @@ class SWAPIFlags(
131
139
  SCEM_V_ST = 2**12 # bit 12
132
140
  SCEM_I_ST = 2**13 # bit 13
133
141
  SCEM_INT_ST = 2**14 # bit 14
142
+
143
+
144
+ class GLOWSL1bFlags(FlagNameMixin):
145
+ """Glows L1b flags."""
146
+
147
+ NONE = CommonFlags.NONE
@@ -21,7 +21,7 @@ from numpy.typing import NDArray
21
21
  class SpiceBody(IntEnum):
22
22
  """Enum containing SPICE IDs for bodies that we use."""
23
23
 
24
- # A subset of IMAP Specific bodies as defined in imap_wkcp.tf
24
+ # A subset of IMAP Specific bodies as defined in imap_001.tf
25
25
  IMAP = -43
26
26
  IMAP_SPACECRAFT = -43000
27
27
  # IMAP Pointing Frame (Despun) as defined in imap_science_xxx.tf
@@ -33,7 +33,7 @@ class SpiceBody(IntEnum):
33
33
 
34
34
 
35
35
  class SpiceFrame(IntEnum):
36
- """SPICE IDs for reference frames in imap_wkcp.tf and imap_science_xxx.tf."""
36
+ """SPICE IDs for reference frames in imap_###.tf and imap_science_xxx.tf."""
37
37
 
38
38
  # Standard SPICE Frames
39
39
  J2000 = spiceypy.irfnum("J2000")
@@ -41,7 +41,7 @@ class SpiceFrame(IntEnum):
41
41
  ITRF93 = 13000
42
42
  # IMAP Pointing Frame (Despun) as defined in imap_science_xxx.tf
43
43
  IMAP_DPS = -43901
44
- # IMAP specific as defined in imap_wkcp.tf
44
+ # IMAP specific as defined in imap_###.tf
45
45
  IMAP_SPACECRAFT = -43000
46
46
  IMAP_LO_BASE = -43100
47
47
  IMAP_LO_STAR_SENSOR = -43103
@@ -50,13 +50,15 @@ class SpiceFrame(IntEnum):
50
50
  IMAP_HI_90 = -43160
51
51
  IMAP_ULTRA_45 = -43200
52
52
  IMAP_ULTRA_90 = -43210
53
- IMAP_MAG = -43250
53
+ IMAP_MAG_BOOM = -43250
54
+ IMAP_MAG_I = -43251
55
+ IMAP_MAG_O = -43252
54
56
  IMAP_SWE = -43300
55
57
  IMAP_SWAPI = -43350
56
58
  IMAP_CODICE = -43400
57
59
  IMAP_HIT = -43500
58
60
  IMAP_IDEX = -43700
59
- IMAP_GLOWS = -43750
61
+ IMAP_GLOWS = -43751
60
62
 
61
63
  # IMAP Science Frames (new additions from imap_science_xxx.tf)
62
64
  IMAP_OMD = -43900
@@ -87,7 +89,8 @@ BORESIGHT_LOOKUP = {
87
89
  SpiceFrame.IMAP_HI_90: np.array([0, 1, 0]),
88
90
  SpiceFrame.IMAP_ULTRA_45: np.array([0, 0, 1]),
89
91
  SpiceFrame.IMAP_ULTRA_90: np.array([0, 0, 1]),
90
- SpiceFrame.IMAP_MAG: np.array([0, 0, 1]),
92
+ SpiceFrame.IMAP_MAG_I: np.array([0, 0, 1]),
93
+ SpiceFrame.IMAP_MAG_O: np.array([0, 0, 1]),
91
94
  SpiceFrame.IMAP_SWE: np.array([-1, 0, 0]),
92
95
  SpiceFrame.IMAP_SWAPI: np.array([0, 1, 0]),
93
96
  SpiceFrame.IMAP_CODICE: np.array([0, 0, 1]),
@@ -162,7 +165,8 @@ def get_instrument_mounting_az_el(instrument: SpiceFrame) -> np.ndarray:
162
165
  SpiceFrame.IMAP_HI_90: np.array([0, 1, 0]),
163
166
  SpiceFrame.IMAP_ULTRA_45: np.array([0, 0, 1]),
164
167
  SpiceFrame.IMAP_ULTRA_90: np.array([0, 0, 1]),
165
- SpiceFrame.IMAP_MAG: np.array([-1, 0, 0]),
168
+ SpiceFrame.IMAP_MAG_I: np.array([-1, 0, 0]),
169
+ SpiceFrame.IMAP_MAG_O: np.array([-1, 0, 0]),
166
170
  SpiceFrame.IMAP_SWE: np.array([-1, 0, 0]),
167
171
  SpiceFrame.IMAP_SWAPI: np.array([0, 0, -1]),
168
172
  SpiceFrame.IMAP_CODICE: np.array([-1, 0, 0]),
@@ -214,7 +218,8 @@ def get_spacecraft_to_instrument_spin_phase_offset(instrument: SpiceFrame) -> fl
214
218
  SpiceFrame.IMAP_HIT: 120 / 360, # 30 + 90 = 120
215
219
  SpiceFrame.IMAP_SWE: 243 / 360, # 153 + 90 = 243
216
220
  SpiceFrame.IMAP_GLOWS: 217 / 360, # 127 + 90 = 217
217
- SpiceFrame.IMAP_MAG: 90 / 360, # 0 + 90 = 90
221
+ SpiceFrame.IMAP_MAG_I: 90 / 360, # 0 + 90 = 90
222
+ SpiceFrame.IMAP_MAG_O: 90 / 360, # 0 + 90 = 90
218
223
  }
219
224
  return phase_offset_lookup[instrument]
220
225
 
@@ -195,7 +195,7 @@ def calculate_pointing_attitude_segments(
195
195
 
196
196
  - Latest NAIF leapseconds kernel (naif0012.tls)
197
197
  - The latest IMAP sclk (imap_sclk_NNNN.tsc)
198
- - The latest IMAP frame kernel (imap_wkcp.tf)
198
+ - The latest IMAP frame kernel (imap_###.tf)
199
199
  - IMAP DPS frame kernel (imap_science_100.tf)
200
200
  - IMAP historical attitude kernel from which the pointing frame kernel will
201
201
  be generated.
@@ -210,7 +210,9 @@ def calculate_pointing_attitude_segments(
210
210
  count = spiceypy.ktotal("ck")
211
211
  loaded_ck_kernel, _, _, _ = spiceypy.kdata(count - 1, "ck")
212
212
  if str(ck_path) != loaded_ck_kernel:
213
- raise ValueError(f"Error: Expected CK kernel {ck_path}")
213
+ raise ValueError(
214
+ f"Error: Expected CK kernel {ck_path} but loaded {loaded_ck_kernel}"
215
+ )
214
216
 
215
217
  id_imap_spacecraft = spiceypy.gipool("FRAME_IMAP_SPACECRAFT", 0, 1)
216
218
 
@@ -9,6 +9,8 @@ import pandas as pd
9
9
  from numpy import typing as npt
10
10
 
11
11
  from imap_processing.spice import config
12
+ from imap_processing.spice.geometry import imap_state
13
+ from imap_processing.spice.time import met_to_sclkticks, sct_to_et
12
14
 
13
15
  logger = logging.getLogger(__name__)
14
16
 
@@ -221,3 +223,50 @@ def get_pointing_times(met_time: float) -> tuple[float, float]:
221
223
  ][0]
222
224
  pointing_end_met = repoint_df["repoint_start_met"].iloc[pointing_idx + 1].item()
223
225
  return pointing_start_met, pointing_end_met
226
+
227
+
228
+ def get_pointing_mid_time(met_time: float) -> float:
229
+ """
230
+ Get mid-point of the pointing for the given MET time.
231
+
232
+ Get the mid-point time between the end of one repoint and
233
+ start of the next. Input could be a MET time.
234
+
235
+ Parameters
236
+ ----------
237
+ met_time : float
238
+ The MET time in a repoint.
239
+
240
+ Returns
241
+ -------
242
+ repoint_mid_time : float
243
+ The mid MET time of the repoint maneuver.
244
+ """
245
+ pointing_start_met, pointing_end_met = get_pointing_times(met_time)
246
+ return (pointing_start_met + pointing_end_met) / 2
247
+
248
+
249
+ def get_mid_point_state(met_time: float) -> npt.NDArray:
250
+ """
251
+ Get IMAP state for the mid-point.
252
+
253
+ Get IMAP state for the mid-point of the pointing in
254
+ reference frame, ECLIPJ2000 and observer, SUN.
255
+
256
+ Parameters
257
+ ----------
258
+ met_time : float
259
+ The MET time in a pointing.
260
+
261
+ Returns
262
+ -------
263
+ mid_point_state : numpy.ndarray
264
+ The mid state of the pointing maneuver.
265
+ """
266
+ # Get mid point time in ET
267
+ mid_point_time = get_pointing_mid_time(met_time)
268
+ mid_point_time_et = sct_to_et(met_to_sclkticks(mid_point_time))
269
+
270
+ # Convert mid point time to state
271
+ pointing_state = imap_state(mid_point_time_et)
272
+ return pointing_state
@@ -80,6 +80,14 @@ class UltraConstants:
80
80
 
81
81
  # Thresholds for culling based on counts (keV).
82
82
  CULLING_ENERGY_BIN_EDGES: ClassVar[list] = [
83
+ 3.0,
84
+ 10.0,
85
+ 20.0,
86
+ 50.0,
87
+ 300.0,
88
+ 1e5,
89
+ ]
90
+ PSET_ENERGY_BIN_EDGES: ClassVar[list] = [
83
91
  3.385,
84
92
  4.13722222222222,
85
93
  5.05660493827161,
@@ -106,3 +114,24 @@ class UltraConstants:
106
114
  341.989454569026,
107
115
  1e5,
108
116
  ]
117
+
118
+ # Valid event filter constants
119
+ # Note these appear similar to image params constants
120
+ # but they should be used only for the valid event filter.
121
+ ETOFOFF1_EVENTFILTER = 100
122
+ ETOFOFF2_EVENTFILTER = -50
123
+ ETOFSLOPE1_EVENTFILTER = 6667
124
+ ETOFSLOPE2_EVENTFILTER = 7500
125
+ ETOFMAX_EVENTFILTER = 90
126
+ ETOFMIN_EVENTFILTER = -400
127
+ TOFDIFFTPMIN_EVENTFILTER = 226
128
+ TOFDIFFTPMAX_EVENTFILTER = 266
129
+
130
+ TOFXE_SPECIES_GROUPS: ClassVar[dict[str, list[int]]] = {
131
+ "proton": [3],
132
+ "non_proton": [20, 28, 36],
133
+ }
134
+ TOFXPH_SPECIES_GROUPS: ClassVar[dict[str, list[int]]] = {
135
+ "proton": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19],
136
+ "non_proton": [20, 21, 22, 23, 24, 25, 26],
137
+ }
@@ -157,6 +157,7 @@ def decompress_image(
157
157
  pixel0: int,
158
158
  binary_data: str,
159
159
  packet_props: PacketProperties,
160
+ planes_per_packet: int = 1,
160
161
  ) -> NDArray:
161
162
  """
162
163
  Will decompress a binary string representing an image into a matrix of pixel values.
@@ -174,11 +175,15 @@ def decompress_image(
174
175
  packet_props : PacketProperties
175
176
  Properties of the packet, including width bit, mantissa bit length and pixel
176
177
  window dimensions.
178
+ planes_per_packet : int
179
+ Number of image planes in the packet. Default is 1.
177
180
 
178
181
  Returns
179
182
  -------
180
- p_decom : NDArray
181
- A 2D numpy array representing pixel values.
183
+ planes : NDArray
184
+ A 3D numpy array representing pixel values.
185
+ The last two dimensions correspond to the image dimensions, and the first
186
+ is the number of image planes.
182
187
  Each pixel is stored as an unsigned 16-bit integer (uint16).
183
188
 
184
189
  Notes
@@ -199,51 +204,58 @@ def decompress_image(
199
204
  )
200
205
 
201
206
  blocks_per_row = cols // pixels_per_block
202
-
203
- # Compressed pixel matrix
204
- p = np.zeros((rows, cols), dtype=np.uint16)
205
- # Decompressed pixel matrix
206
- p_decom = np.zeros((rows, cols), dtype=np.int16)
207
-
207
+ current_pixel0 = pixel0 # Use the parameter for first plane
208
+ planes = []
209
+ plane_num = 0
208
210
  pos = 0 # Starting position in the binary string
209
-
210
- for i in range(rows):
211
- for j in range(blocks_per_row):
212
- # Read the width for the block.
213
- w, pos = read_and_advance(binary_data, width_bit, pos)
214
- for k in range(pixels_per_block):
215
- # Handle the special case in which the width is 0
216
- if w == 0:
217
- value = 0
218
- else:
219
- # Find the value of each pixel in the block
220
- value, pos = read_and_advance(binary_data, w, pos)
221
-
222
- # if the least significant bit of value is set (odd)
223
- if value & 0x01:
224
- # value >> 1: shifts bits of value one place to the right
225
- # ~: bitwise NOT operator (flips bits)
226
- delta_f = ~(value >> 1)
227
- else:
228
- delta_f = value >> 1
229
-
230
- # Calculate the new pixel value and update pixel0
231
- column_index = j * pixels_per_block + k
232
- # 0xff is the hexadecimal representation of the number 255,
233
- # Keeps only the last 8 bits of the result of pixel0 - delta_f
234
- # This operation ensures that the result is within the range
235
- # of an 8-bit byte (0-255)
236
- # Use np.int16 for the arithmetic operation to avoid overflow
237
- # Then implicitly cast back to the p's uint16 dtype for storage
238
- p[i][column_index] = np.int16(pixel0) - delta_f
239
- # Perform logarithmic decompression on the pixel value
240
- p_decom[i][column_index] = log_decompression(
241
- p[i][column_index], mantissa_bit_length
242
- )
243
- pixel0 = p[i][column_index]
244
- pixel0 = p[i][0]
245
-
246
- return p_decom
211
+ while plane_num < planes_per_packet:
212
+ # Compressed pixel matrix
213
+ p = np.zeros((rows, cols), dtype=np.uint16)
214
+ # Decompressed pixel matrix
215
+ p_decom = np.zeros((rows, cols), dtype=np.int16)
216
+
217
+ for i in range(rows):
218
+ for j in range(blocks_per_row):
219
+ # Read the width for the block.
220
+ w, pos = read_and_advance(binary_data, width_bit, pos)
221
+ for k in range(pixels_per_block):
222
+ # Handle the special case in which the width is 0
223
+ if w == 0:
224
+ value = 0
225
+ else:
226
+ # Find the value of each pixel in the block
227
+ value, pos = read_and_advance(binary_data, w, pos)
228
+
229
+ # if the least significant bit of value is set (odd)
230
+ if value & 0x01:
231
+ # value >> 1: shifts bits of value one place to the right
232
+ # ~: bitwise NOT operator (flips bits)
233
+ delta_f = ~(value >> 1)
234
+ else:
235
+ delta_f = value >> 1
236
+
237
+ # Calculate the new pixel value and update pixel0
238
+ column_index = j * pixels_per_block + k
239
+ # 0xff is the hexadecimal representation of the number 255,
240
+ # Keeps only the last 8 bits of the result of pixel0 - delta_f
241
+ # This operation ensures that the result is within the range
242
+ # of an 8-bit byte (0-255)
243
+ # Use np.int16 for the arithmetic operation to avoid overflow
244
+ # Then implicitly cast back to the p's uint16 dtype for storage
245
+ p[i][column_index] = np.int16(current_pixel0) - delta_f
246
+ # Perform logarithmic decompression on the pixel value
247
+ p_decom[i][column_index] = log_decompression(
248
+ p[i][column_index], mantissa_bit_length
249
+ )
250
+ current_pixel0 = p[i][column_index]
251
+ current_pixel0 = p[i][0]
252
+ planes.append(p_decom)
253
+ plane_num += 1
254
+ # Read P00 for the next plane (if not the last plane)
255
+ if plane_num < planes_per_packet:
256
+ current_pixel0, pos = read_and_advance(binary_data, 8, pos)
257
+
258
+ return np.stack(planes)
247
259
 
248
260
 
249
261
  def read_image_raw_events_binary(
@@ -81,54 +81,66 @@ def process_ultra_tof(ds: xr.Dataset, packet_props: PacketProperties) -> xr.Data
81
81
  decom_data: defaultdict[str, list[np.ndarray]] = defaultdict(list)
82
82
  decom_data["packetdata"] = []
83
83
  valid_epoch = []
84
-
85
84
  for val, group in ds.groupby("epoch"):
86
85
  if set(group["sid"].values) >= set(
87
86
  np.arange(0, image_planes, planes_per_packet)
88
87
  ):
88
+ plane_count = 0
89
89
  valid_epoch.append(val)
90
90
  group.sortby("sid")
91
91
 
92
92
  for key in scalar_keys:
93
- decom_data[key].append(group[key].values)
93
+ # Repeat the scalar values for each image plane. There may be cases
94
+ # where the last packet has fewer planes than the planes_per_packet, so
95
+ # we slice to ensure the correct length.
96
+ decom_data[key].append(
97
+ np.tile(group[key].values, planes_per_packet)[:image_planes]
98
+ )
94
99
 
95
100
  image = []
96
101
  for i in range(num_image_packets):
97
102
  binary = convert_to_binary_string(group["packetdata"].values[i])
103
+ # Determine how many planes to decompress in this packet.
104
+ # the last packet might have fewer planes than planes_per_packet.
105
+ # Take the minimum of the remaining planes or the max planes per packet
106
+ # value.
107
+ planes_in_packet = min(image_planes - plane_count, planes_per_packet)
98
108
  decompressed = decompress_image(
99
109
  group["p00"].values[i],
100
110
  binary,
101
111
  packet_props,
112
+ planes_in_packet,
102
113
  )
103
114
  image.append(decompressed)
115
+ plane_count += planes_in_packet
104
116
 
105
- decom_data["packetdata"].append(np.stack(image))
117
+ decom_data["packetdata"].append(np.concatenate(image, axis=0))
106
118
 
107
119
  for key in scalar_keys:
108
- decom_data[key] = np.stack(decom_data[key])
120
+ decom_data[key] = np.stack(decom_data[key], axis=0)
109
121
 
110
- decom_data["packetdata"] = np.stack(decom_data["packetdata"])
122
+ decom_data["packetdata"] = np.stack(decom_data["packetdata"], axis=0)
111
123
 
112
124
  coords = {
113
125
  "epoch": np.array(valid_epoch, dtype=np.uint64),
114
- "sid": xr.DataArray(np.arange(num_image_packets), dims=["sid"], name="sid"),
126
+ "plane": xr.DataArray(np.arange(image_planes), dims=["plane"], name="plane"),
115
127
  "row": xr.DataArray(np.arange(rows), dims=["row"], name="row"),
116
128
  "column": xr.DataArray(np.arange(cols), dims=["column"], name="column"),
117
129
  }
118
130
 
119
131
  dataset = xr.Dataset(coords=coords)
120
132
 
121
- # Add scalar keys (2D: epoch x sid)
133
+ # Add scalar keys (2D: epoch x packets)
122
134
  for key in scalar_keys:
123
135
  dataset[key] = xr.DataArray(
124
136
  decom_data[key],
125
- dims=["epoch", "sid"],
137
+ dims=["epoch", "plane"],
126
138
  )
127
139
 
128
140
  # Add PACKETDATA (4D: epoch x sid x row x column)
129
141
  dataset["packetdata"] = xr.DataArray(
130
142
  decom_data["packetdata"],
131
- dims=["epoch", "sid", "row", "column"],
143
+ dims=["epoch", "plane", "row", "column"],
132
144
  )
133
145
 
134
146
  return dataset
@@ -137,8 +137,8 @@ ULTRA_EXTOF_HIGH_ANGULAR = PacketProperties(
137
137
  ULTRA_EXTOF_HIGH_TIME = PacketProperties(
138
138
  apid=[888, 952],
139
139
  logical_source=[
140
- "imap_ultra_l1a_45sensor-histogram-ena-extof-hi-time",
141
- "imap_ultra_l1a_90sensor-histogram-ena-extof-hi-time",
140
+ "imap_ultra_l1a_45sensor-histogram-ion-extof-hi-time",
141
+ "imap_ultra_l1a_90sensor-histogram-ion-extof-hi-time",
142
142
  ],
143
143
  addition_to_logical_desc="Energy By Time of Flight High Time Images",
144
144
  width=4,
@@ -153,8 +153,8 @@ ULTRA_EXTOF_HIGH_TIME = PacketProperties(
153
153
  ULTRA_EXTOF_HIGH_ENERGY = PacketProperties(
154
154
  apid=[887, 951],
155
155
  logical_source=[
156
- "imap_ultra_l1a_45sensor-histogram-ena-extof-hi-nrg",
157
- "imap_ultra_l1a_90sensor-histogram-ena-extof-hi-nrg",
156
+ "imap_ultra_l1a_45sensor-histogram-ion-extof-hi-nrg",
157
+ "imap_ultra_l1a_90sensor-histogram-ion-extof-hi-nrg",
158
158
  ],
159
159
  addition_to_logical_desc="Energy By Time of Flight High Energy Images",
160
160
  width=4,
@@ -7,13 +7,14 @@ from numpy.typing import NDArray
7
7
  from imap_processing.ultra.utils.ultra_l1_utils import create_dataset, extract_data_dict
8
8
 
9
9
  FILLVAL_UINT16 = 65535
10
+ FILLVAL_FLOAT32 = -1.0e31
10
11
  FILLVAL_FLOAT64 = -1.0e31
11
12
  FILLVAL_UINT32 = 4294967295
12
13
 
13
14
 
14
15
  def calculate_badtimes(
15
16
  extendedspin_dataset: xr.Dataset,
16
- cullingmask_spins: NDArray,
17
+ goodtimes_spins: NDArray,
17
18
  name: str,
18
19
  ) -> xr.Dataset:
19
20
  """
@@ -23,7 +24,7 @@ def calculate_badtimes(
23
24
  ----------
24
25
  extendedspin_dataset : xarray.Dataset
25
26
  Dataset containing the data.
26
- cullingmask_spins : np.ndarray
27
+ goodtimes_spins : np.ndarray
27
28
  Dataset containing the culled data.
28
29
  name : str
29
30
  Name of the dataset.
@@ -33,11 +34,9 @@ def calculate_badtimes(
33
34
  badtimes_dataset : xarray.Dataset
34
35
  Dataset containing the extendedspin data that has been culled.
35
36
  """
37
+ n_bins = extendedspin_dataset.dims["energy_bin_geometric_mean"]
36
38
  culled_spins = np.setdiff1d(
37
- extendedspin_dataset["spin_number"].values, cullingmask_spins
38
- )
39
- extendedspin_dataset = extendedspin_dataset.assign_coords(
40
- epoch=("spin_number", extendedspin_dataset["epoch"].values)
39
+ extendedspin_dataset["spin_number"].values, goodtimes_spins
41
40
  )
42
41
  filtered_dataset = extendedspin_dataset.sel(spin_number=culled_spins)
43
42
 
@@ -48,9 +47,6 @@ def calculate_badtimes(
48
47
  if badtimes_dataset["spin_number"].size == 0:
49
48
  badtimes_dataset = badtimes_dataset.drop_dims("spin_number")
50
49
  badtimes_dataset = badtimes_dataset.expand_dims(spin_number=[FILLVAL_UINT32])
51
- badtimes_dataset = badtimes_dataset.assign_coords(
52
- epoch=("spin_number", [extendedspin_dataset["epoch"].values[0]])
53
- )
54
50
  badtimes_dataset["spin_start_time"] = xr.DataArray(
55
51
  np.array([FILLVAL_FLOAT64], dtype="float64"), dims=["spin_number"]
56
52
  )
@@ -60,16 +56,44 @@ def calculate_badtimes(
60
56
  badtimes_dataset["spin_rate"] = xr.DataArray(
61
57
  np.array([FILLVAL_FLOAT64], dtype="float64"), dims=["spin_number"]
62
58
  )
59
+ badtimes_dataset["start_pulses_per_spin"] = xr.DataArray(
60
+ np.array([FILLVAL_FLOAT32], dtype="float32"),
61
+ dims=["spin_number"],
62
+ )
63
+ badtimes_dataset["stop_pulses_per_spin"] = xr.DataArray(
64
+ np.array([FILLVAL_FLOAT32], dtype="float32"),
65
+ dims=["spin_number"],
66
+ )
67
+ badtimes_dataset["coin_pulses_per_spin"] = xr.DataArray(
68
+ np.array([FILLVAL_FLOAT32], dtype="float32"),
69
+ dims=["spin_number"],
70
+ )
71
+ badtimes_dataset["rejected_events_per_spin"] = xr.DataArray(
72
+ np.array([FILLVAL_UINT32], dtype="uint32"),
73
+ dims=["spin_number"],
74
+ )
63
75
  badtimes_dataset["quality_attitude"] = xr.DataArray(
64
76
  np.array([FILLVAL_UINT16], dtype="uint16"), dims=["spin_number"]
65
77
  )
78
+ badtimes_dataset["quality_hk"] = xr.DataArray(
79
+ np.array([FILLVAL_UINT16], dtype="uint16"),
80
+ dims=["spin_number"],
81
+ )
82
+ badtimes_dataset["quality_instruments"] = xr.DataArray(
83
+ np.array([FILLVAL_UINT16], dtype="uint16"),
84
+ dims=["spin_number"],
85
+ )
66
86
  badtimes_dataset["quality_ena_rates"] = (
67
87
  ("energy_bin_geometric_mean", "spin_number"),
68
- np.full((3, 1), FILLVAL_UINT16, dtype="uint16"),
88
+ np.full((n_bins, 1), FILLVAL_UINT16, dtype="uint16"),
69
89
  )
70
90
  badtimes_dataset["ena_rates"] = (
71
91
  ("energy_bin_geometric_mean", "spin_number"),
72
- np.full((3, 1), FILLVAL_FLOAT64, dtype="float64"),
92
+ np.full((n_bins, 1), FILLVAL_FLOAT64, dtype="float64"),
93
+ )
94
+ badtimes_dataset["ena_rates_threshold"] = (
95
+ ("energy_bin_geometric_mean", "spin_number"),
96
+ np.full((n_bins, 1), FILLVAL_FLOAT32, dtype="float32"),
73
97
  )
74
98
 
75
99
  return badtimes_dataset