imap-processing 0.17.0__py3-none-any.whl → 0.18.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of imap-processing might be problematic. Click here for more details.

Files changed (89) hide show
  1. imap_processing/_version.py +2 -2
  2. imap_processing/ccsds/excel_to_xtce.py +12 -0
  3. imap_processing/cdf/config/imap_codice_global_cdf_attrs.yaml +6 -6
  4. imap_processing/cdf/config/imap_codice_l1a_variable_attrs.yaml +11 -0
  5. imap_processing/cdf/config/imap_codice_l1b_variable_attrs.yaml +11 -0
  6. imap_processing/cdf/config/imap_codice_l2_variable_attrs.yaml +24 -0
  7. imap_processing/cdf/config/imap_hit_l1a_variable_attrs.yaml +163 -100
  8. imap_processing/cdf/config/imap_hit_l2_variable_attrs.yaml +4 -4
  9. imap_processing/cdf/config/imap_ialirt_l1_variable_attrs.yaml +97 -54
  10. imap_processing/cdf/config/imap_idex_l2b_variable_attrs.yaml +119 -36
  11. imap_processing/cdf/config/imap_idex_l2c_variable_attrs.yaml +16 -90
  12. imap_processing/cdf/config/imap_lo_global_cdf_attrs.yaml +30 -0
  13. imap_processing/cdf/config/imap_mag_global_cdf_attrs.yaml +15 -1
  14. imap_processing/cdf/config/imap_ultra_global_cdf_attrs.yaml +60 -0
  15. imap_processing/cdf/config/imap_ultra_l1b_variable_attrs.yaml +91 -11
  16. imap_processing/cli.py +28 -5
  17. imap_processing/codice/codice_l1a.py +36 -48
  18. imap_processing/codice/codice_l1b.py +1 -1
  19. imap_processing/codice/codice_l2.py +0 -9
  20. imap_processing/codice/constants.py +481 -498
  21. imap_processing/hit/l0/decom_hit.py +2 -2
  22. imap_processing/hit/l1a/hit_l1a.py +64 -24
  23. imap_processing/hit/l1b/constants.py +5 -0
  24. imap_processing/hit/l1b/hit_l1b.py +18 -16
  25. imap_processing/hit/l2/constants.py +1 -1
  26. imap_processing/hit/l2/hit_l2.py +4 -5
  27. imap_processing/ialirt/constants.py +21 -0
  28. imap_processing/ialirt/generate_coverage.py +188 -0
  29. imap_processing/ialirt/l0/parse_mag.py +62 -5
  30. imap_processing/ialirt/l0/process_swapi.py +1 -1
  31. imap_processing/ialirt/l0/process_swe.py +23 -7
  32. imap_processing/ialirt/utils/constants.py +22 -16
  33. imap_processing/ialirt/utils/create_xarray.py +42 -19
  34. imap_processing/idex/idex_constants.py +1 -5
  35. imap_processing/idex/idex_l2b.py +246 -67
  36. imap_processing/idex/idex_l2c.py +30 -196
  37. imap_processing/lo/l0/lo_apid.py +1 -0
  38. imap_processing/lo/l1a/lo_l1a.py +44 -0
  39. imap_processing/lo/packet_definitions/lo_xtce.xml +5359 -106
  40. imap_processing/mag/constants.py +1 -0
  41. imap_processing/mag/l1d/__init__.py +0 -0
  42. imap_processing/mag/l1d/mag_l1d.py +133 -0
  43. imap_processing/mag/l1d/mag_l1d_data.py +588 -0
  44. imap_processing/mag/l2/__init__.py +0 -0
  45. imap_processing/mag/l2/mag_l2.py +25 -20
  46. imap_processing/mag/l2/mag_l2_data.py +191 -130
  47. imap_processing/quality_flags.py +20 -2
  48. imap_processing/spice/geometry.py +25 -3
  49. imap_processing/spice/pointing_frame.py +1 -1
  50. imap_processing/spice/spin.py +4 -0
  51. imap_processing/spice/time.py +51 -0
  52. imap_processing/swapi/l2/swapi_l2.py +52 -8
  53. imap_processing/swapi/swapi_utils.py +1 -1
  54. imap_processing/swe/l1b/swe_l1b.py +2 -4
  55. imap_processing/ultra/constants.py +49 -1
  56. imap_processing/ultra/l0/decom_tools.py +15 -8
  57. imap_processing/ultra/l0/decom_ultra.py +35 -11
  58. imap_processing/ultra/l0/ultra_utils.py +97 -5
  59. imap_processing/ultra/l1a/ultra_l1a.py +25 -4
  60. imap_processing/ultra/l1b/cullingmask.py +3 -3
  61. imap_processing/ultra/l1b/de.py +53 -15
  62. imap_processing/ultra/l1b/extendedspin.py +26 -2
  63. imap_processing/ultra/l1b/lookup_utils.py +171 -50
  64. imap_processing/ultra/l1b/quality_flag_filters.py +14 -0
  65. imap_processing/ultra/l1b/ultra_l1b_culling.py +198 -5
  66. imap_processing/ultra/l1b/ultra_l1b_extended.py +304 -66
  67. imap_processing/ultra/l1c/helio_pset.py +54 -7
  68. imap_processing/ultra/l1c/spacecraft_pset.py +9 -1
  69. imap_processing/ultra/l1c/ultra_l1c.py +2 -0
  70. imap_processing/ultra/l1c/ultra_l1c_pset_bins.py +106 -109
  71. imap_processing/ultra/utils/ultra_l1_utils.py +13 -1
  72. {imap_processing-0.17.0.dist-info → imap_processing-0.18.0.dist-info}/METADATA +2 -2
  73. {imap_processing-0.17.0.dist-info → imap_processing-0.18.0.dist-info}/RECORD +76 -83
  74. imap_processing/ultra/lookup_tables/Angular_Profiles_FM45_LeftSlit.csv +0 -526
  75. imap_processing/ultra/lookup_tables/Angular_Profiles_FM45_RightSlit.csv +0 -526
  76. imap_processing/ultra/lookup_tables/Angular_Profiles_FM90_LeftSlit.csv +0 -526
  77. imap_processing/ultra/lookup_tables/Angular_Profiles_FM90_RightSlit.csv +0 -524
  78. imap_processing/ultra/lookup_tables/EgyNorm.mem.csv +0 -32769
  79. imap_processing/ultra/lookup_tables/FM45_Startup1_ULTRA_IMGPARAMS_20240719.csv +0 -2
  80. imap_processing/ultra/lookup_tables/FM90_Startup1_ULTRA_IMGPARAMS_20240719.csv +0 -2
  81. imap_processing/ultra/lookup_tables/dps_grid45_compressed.cdf +0 -0
  82. imap_processing/ultra/lookup_tables/ultra45_back-pos-luts.csv +0 -4097
  83. imap_processing/ultra/lookup_tables/ultra45_tdc_norm.csv +0 -2050
  84. imap_processing/ultra/lookup_tables/ultra90_back-pos-luts.csv +0 -4097
  85. imap_processing/ultra/lookup_tables/ultra90_tdc_norm.csv +0 -2050
  86. imap_processing/ultra/lookup_tables/yadjust.csv +0 -257
  87. {imap_processing-0.17.0.dist-info → imap_processing-0.18.0.dist-info}/LICENSE +0 -0
  88. {imap_processing-0.17.0.dist-info → imap_processing-0.18.0.dist-info}/WHEEL +0 -0
  89. {imap_processing-0.17.0.dist-info → imap_processing-0.18.0.dist-info}/entry_points.txt +0 -0
@@ -7,7 +7,9 @@ import numpy as np
7
7
  import xarray as xr
8
8
 
9
9
  from imap_processing.cdf.imap_cdf_manager import ImapCdfAttributes
10
- from imap_processing.mag.constants import DataMode
10
+ from imap_processing.mag.constants import FILLVAL, DataMode
11
+ from imap_processing.mag.l1b.mag_l1b import calibrate_vector
12
+ from imap_processing.spice.geometry import SpiceFrame, frame_transform
11
13
  from imap_processing.spice.time import (
12
14
  et_to_ttj2000ns,
13
15
  str_to_et,
@@ -17,26 +19,29 @@ from imap_processing.spice.time import (
17
19
  class ValidFrames(Enum):
18
20
  """SPICE reference frames for output."""
19
21
 
20
- dsrf = "dsrf"
21
- srf = "srf"
22
- rtn = "rtn"
23
- gse = "gse"
22
+ MAG = SpiceFrame.IMAP_MAG
23
+ DSRF = SpiceFrame.IMAP_DPS
24
+ SRF = SpiceFrame.IMAP_SPACECRAFT
25
+ # TODO: include RTN and GSE as valid frames
24
26
 
25
27
 
26
- @dataclass
27
- class MagL2:
28
+ @dataclass(kw_only=True)
29
+ class MagL2L1dBase:
28
30
  """
29
- Dataclass for MAG L2 data.
31
+ Base class for MAG L2 and L1D data.
30
32
 
31
- Since L2 and L1D should have the same structure, this can be used for either level.
33
+ Since these two data levels output identical files, and share some methods, this
34
+ superclass captures the tools in common, while allowing each subclass to define
35
+ individual attributes and algorithms.
32
36
 
33
- Some of the methods are also static, so they can be used in i-ALiRT processing.
37
+ May also be extended for I-ALiRT.
34
38
 
35
39
  Attributes
36
40
  ----------
37
41
  vectors: np.ndarray
38
42
  Magnetic field vectors of size (n, 3) where n is the number of vectors.
39
- Describes (x, y, z) components of the magnetic field.
43
+ Describes (x, y, z) components of the magnetic field. This field is the output
44
+ vectors, which are nominally from the MAGo sensor.
40
45
  epoch: np.ndarray
41
46
  Time of each vector in J2000 seconds. Should be of length n.
42
47
  range: np.ndarray
@@ -48,10 +53,8 @@ class MagL2:
48
53
  quality_bitmask: np.ndarray
49
54
  Quality bitmask for each vector. Should be of length n. Copied from offset
50
55
  file in L2, marked as good always in L1D.
51
- magnitude: np.ndarray
52
- Magnitude of each vector. Should be of length n. Calculated from L2 vectors.
53
- is_l1d: bool
54
- Flag to indicate if the data is L1D. Defaults to False.
56
+ frame:
57
+ The reference frame of the input vectors. Starts as the MAG instrument frame.
55
58
  """
56
59
 
57
60
  vectors: np.ndarray
@@ -62,120 +65,12 @@ class MagL2:
62
65
  quality_bitmask: np.ndarray
63
66
  data_mode: DataMode
64
67
  magnitude: np.ndarray = field(init=False)
65
- is_l1d: bool = False
66
- offsets: InitVar[np.ndarray] = None
67
- timedelta: InitVar[np.ndarray] = None
68
-
69
- def __post_init__(self, offsets: np.ndarray, timedelta: np.ndarray) -> None:
70
- """
71
- Calculate the magnitude of the vectors after initialization.
72
-
73
- Parameters
74
- ----------
75
- offsets : np.ndarray
76
- Offsets to apply to the vectors. Should be of shape (n, 3) where n is the
77
- number of vectors.
78
- timedelta : np.ndarray
79
- Time deltas to shift the timestamps by. Should be of length n.
80
- Given in seconds.
81
- """
82
- if offsets is not None:
83
- self.vectors = self.apply_offsets(self.vectors, offsets)
84
- if timedelta is not None:
85
- self.epoch = self.shift_timestamps(self.epoch, timedelta)
86
-
87
- self.magnitude = self.calculate_magnitude(self.vectors)
88
-
89
- @staticmethod
90
- def calculate_magnitude(
91
- vectors: np.ndarray,
92
- ) -> np.ndarray:
93
- """
94
- Given a list of vectors (x, y, z), calculate the magnitude of each vector.
95
-
96
- For an input list of vectors of size (n, 3) returns a list of magnitudes of
97
- size (n,).
98
-
99
- Parameters
100
- ----------
101
- vectors : np.ndarray
102
- Array of vectors to calculate the magnitude of.
103
-
104
- Returns
105
- -------
106
- np.ndarray
107
- Array of magnitudes of the input vectors.
108
- """
109
- return np.linalg.norm(vectors, axis=1) # type: ignore
110
-
111
- @staticmethod
112
- def apply_offsets(vectors: np.ndarray, offsets: np.ndarray) -> np.ndarray:
113
- """
114
- Apply the offsets to the vectors by adding them together.
115
-
116
- These offsets are used to shift the vectors in the x, y, and z directions.
117
- They can either be provided through a custom offsets datafile, or calculated
118
- using a gradiometry algorithm.
119
-
120
- Parameters
121
- ----------
122
- vectors : np.ndarray
123
- Array of vectors to apply the offsets to. Should be of shape (n, 3) where n
124
- is the number of vectors.
125
- offsets : np.ndarray
126
- Array of offsets to apply to the vectors. Should be of shape (n, 3) where n
127
- is the number of vectors.
128
-
129
- Returns
130
- -------
131
- np.ndarray
132
- Array of vectors with offsets applied. Should be of shape (n, 3).
133
- """
134
- if vectors.shape[0] != offsets.shape[0]:
135
- raise ValueError("Vectors and offsets must have the same length.")
136
-
137
- offset_vectors: np.ndarray = vectors[:, :3] + offsets
138
-
139
- # TODO: CDF files don't have NaNs. Emailed MAG to ask what this will look like.
140
- # Any values where offsets is nan must also be nan
141
- offset_vectors[np.isnan(offsets).any(axis=1)] = np.nan
142
-
143
- return offset_vectors
144
-
145
- @staticmethod
146
- def shift_timestamps(epoch: np.ndarray, timedelta: np.ndarray) -> np.ndarray:
147
- """
148
- Shift the timestamps by the given timedelta.
149
-
150
- If timedelta is positive, the epochs are shifted forward in time.
151
-
152
- Parameters
153
- ----------
154
- epoch : np.ndarray
155
- Array of timestamps to shift. Should be of length n.
156
- timedelta : np.ndarray
157
- Array of time deltas to shift the timestamps by. Should be the same length
158
- as epoch. Given in seconds.
159
-
160
- Returns
161
- -------
162
- np.ndarray
163
- Shifted timestamps.
164
- """
165
- if epoch.shape[0] != timedelta.shape[0]:
166
- raise ValueError(
167
- "Input Epoch and offsets timedeltas must be the same length."
168
- )
169
-
170
- timedelta_ns = timedelta * 1e9
171
- shifted_timestamps = epoch + timedelta_ns
172
- return shifted_timestamps
68
+ frame: ValidFrames = ValidFrames.MAG
173
69
 
174
70
  def generate_dataset(
175
71
  self,
176
72
  attribute_manager: ImapCdfAttributes,
177
73
  day: np.datetime64,
178
- frame: ValidFrames = ValidFrames.dsrf,
179
74
  ) -> xr.Dataset:
180
75
  """
181
76
  Generate an xarray dataset from the dataclass.
@@ -189,8 +84,6 @@ class MagL2:
189
84
  CDF attributes object for the correct level.
190
85
  day : np.datetime64
191
86
  The 24 hour day to process, as a numpy datetime format.
192
- frame : ValidFrames
193
- SPICE reference frame to rotate the data into.
194
87
 
195
88
  Returns
196
89
  -------
@@ -199,7 +92,9 @@ class MagL2:
199
92
  """
200
93
  self.truncate_to_24h(day)
201
94
 
202
- logical_source_id = f"imap_mag_l2_{self.data_mode.value.lower()}-{frame.name}"
95
+ logical_source_id = (
96
+ f"imap_mag_l2_{self.data_mode.value.lower()}-{self.frame.name.lower()}"
97
+ )
203
98
  direction = xr.DataArray(
204
99
  np.arange(3),
205
100
  name="direction",
@@ -242,8 +137,8 @@ class MagL2:
242
137
  )
243
138
 
244
139
  quality_bitmask = xr.DataArray(
245
- self.quality_flags,
246
- name="quality_flags",
140
+ self.quality_bitmask,
141
+ name="quality_bitmask",
247
142
  dims=["epoch"],
248
143
  attrs=attribute_manager.get_variable_attributes("qf"),
249
144
  )
@@ -298,7 +193,6 @@ class MagL2:
298
193
  """
299
194
  if self.epoch.shape[0] != self.vectors.shape[0]:
300
195
  raise ValueError("Timestamps and vectors are not the same shape!")
301
-
302
196
  start_timestamp_j2000 = et_to_ttj2000ns(str_to_et(str(timestamp)))
303
197
  end_timestamp_j2000 = et_to_ttj2000ns(
304
198
  str_to_et(str(timestamp + np.timedelta64(1, "D")))
@@ -313,3 +207,170 @@ class MagL2:
313
207
  self.magnitude = self.magnitude[day_start_index:day_end_index]
314
208
  self.quality_flags = self.quality_flags[day_start_index:day_end_index]
315
209
  self.quality_bitmask = self.quality_bitmask[day_start_index:day_end_index]
210
+
211
+ @staticmethod
212
+ def calculate_magnitude(
213
+ vectors: np.ndarray,
214
+ ) -> np.ndarray:
215
+ """
216
+ Given a list of vectors (x, y, z), calculate the magnitude of each vector.
217
+
218
+ For an input list of vectors of size (n, 3) returns a list of magnitudes of
219
+ size (n,).
220
+
221
+ Parameters
222
+ ----------
223
+ vectors : np.ndarray
224
+ Array of vectors to calculate the magnitude of.
225
+
226
+ Returns
227
+ -------
228
+ np.ndarray
229
+ Array of magnitudes of the input vectors.
230
+ """
231
+ return np.linalg.norm(vectors, axis=1)
232
+
233
+ @staticmethod
234
+ def apply_calibration(
235
+ vectors: np.ndarray, calibration_matrix: np.ndarray
236
+ ) -> np.ndarray:
237
+ """
238
+ Apply the calibration matrix to the vectors.
239
+
240
+ This works by repeatedly calling the function calibrate_vector on the vectors
241
+ input.
242
+
243
+ Parameters
244
+ ----------
245
+ vectors : np.ndarray
246
+ Array of vectors to apply the calibration to, including x,y,z and range.
247
+ Should be of shape (n, 4) where n is the number of vectors.
248
+ calibration_matrix : np.ndarray
249
+ Calibration matrix to apply to the vectors. Should be of shape (3, 3, 4).
250
+
251
+ Returns
252
+ -------
253
+ np.ndarray
254
+ Array of calibrated vectors. Should be of shape (n, 4).
255
+ """
256
+ calibrated_vectors = np.apply_along_axis(
257
+ func1d=calibrate_vector,
258
+ axis=1,
259
+ arr=vectors,
260
+ calibration_matrix=calibration_matrix,
261
+ )
262
+
263
+ return calibrated_vectors
264
+
265
+ @staticmethod
266
+ def shift_timestamps(epoch: np.ndarray, timedelta: np.ndarray) -> np.ndarray:
267
+ """
268
+ Shift the timestamps by the given timedelta.
269
+
270
+ If timedelta is positive, the epochs are shifted forward in time.
271
+
272
+ Parameters
273
+ ----------
274
+ epoch : np.ndarray
275
+ Array of timestamps to shift. Should be of length n.
276
+ timedelta : np.ndarray
277
+ Array of time deltas to shift the timestamps by. Should be the same length
278
+ as epoch. Given in seconds.
279
+
280
+ Returns
281
+ -------
282
+ np.ndarray
283
+ Shifted timestamps.
284
+ """
285
+ if epoch.shape[0] != timedelta.shape[0]:
286
+ raise ValueError(
287
+ "Input Epoch and offsets timedeltas must be the same length."
288
+ )
289
+
290
+ timedelta_ns = timedelta * 1e9
291
+ shifted_timestamps = epoch + timedelta_ns
292
+ return shifted_timestamps
293
+
294
+ def rotate_frame(self, end_frame: ValidFrames) -> None:
295
+ """
296
+ Rotate the vector data in the class to the output frame.
297
+
298
+ Parameters
299
+ ----------
300
+ end_frame : ValidFrames
301
+ The frame to rotate the data to. Must be one of the ValidFrames enum
302
+ values.
303
+ """
304
+ self.vectors = frame_transform(
305
+ self.epoch,
306
+ self.vectors,
307
+ from_frame=self.frame.value,
308
+ to_frame=end_frame.value,
309
+ )
310
+ self.frame = end_frame
311
+
312
+
313
+ @dataclass(kw_only=True)
314
+ class MagL2(MagL2L1dBase):
315
+ """
316
+ Dataclass for MAG L2 data.
317
+
318
+ Since L2 and L1D should have the same structure, this can be used for either level.
319
+
320
+ Some of the methods are also static, so they can be used in i-ALiRT processing.
321
+ """
322
+
323
+ offsets: InitVar[np.ndarray] = None
324
+ timedelta: InitVar[np.ndarray] = None
325
+
326
+ def __post_init__(self, offsets: np.ndarray, timedelta: np.ndarray) -> None:
327
+ """
328
+ Calculate the magnitude of the vectors after initialization.
329
+
330
+ Parameters
331
+ ----------
332
+ offsets : np.ndarray
333
+ Offsets to apply to the vectors. Should be of shape (n, 3) where n is the
334
+ number of vectors.
335
+ timedelta : np.ndarray
336
+ Time deltas to shift the timestamps by. Should be of length n.
337
+ Given in seconds.
338
+ """
339
+ if offsets is not None:
340
+ self.vectors = self.apply_offsets(self.vectors, offsets)
341
+ if timedelta is not None:
342
+ self.epoch = self.shift_timestamps(self.epoch, timedelta)
343
+
344
+ self.magnitude = self.calculate_magnitude(self.vectors)
345
+
346
+ @staticmethod
347
+ def apply_offsets(vectors: np.ndarray, offsets: np.ndarray) -> np.ndarray:
348
+ """
349
+ Apply the offsets to the vectors by adding them together.
350
+
351
+ These offsets are used to shift the vectors in the x, y, and z directions.
352
+ They can either be provided through a custom offsets datafile, or calculated
353
+ using a gradiometry algorithm.
354
+
355
+ Parameters
356
+ ----------
357
+ vectors : np.ndarray
358
+ Array of vectors to apply the offsets to. Should be of shape (n, 3) where n
359
+ is the number of vectors.
360
+ offsets : np.ndarray
361
+ Array of offsets to apply to the vectors. Should be of shape (n, 3) where n
362
+ is the number of vectors.
363
+
364
+ Returns
365
+ -------
366
+ np.ndarray
367
+ Array of vectors with offsets applied. Should be of shape (n, 3).
368
+ """
369
+ if vectors.shape[0] != offsets.shape[0]:
370
+ raise ValueError("Vectors and offsets must have the same length.")
371
+
372
+ offset_vectors: np.ndarray = vectors + offsets
373
+
374
+ # Any values where offsets is FILLVAL must also be FILLVAL
375
+ offset_vectors[(offsets == FILLVAL).any(axis=1), :] = FILLVAL
376
+ return offset_vectors
@@ -37,6 +37,14 @@ class ENAFlags(FlagNameMixin):
37
37
  BADSPIN = 2**2 # bit 2, Bad spin
38
38
 
39
39
 
40
+ class ImapDEUltraFlags(FlagNameMixin):
41
+ """IMAP Ultra flags."""
42
+
43
+ NONE = CommonFlags.NONE
44
+ FOV = 2**0 # bit 0
45
+ PHCORR = 2**1 # bit 1
46
+
47
+
40
48
  class ImapHkUltraFlags(FlagNameMixin):
41
49
  """IMAP Ultra flags."""
42
50
 
@@ -53,14 +61,24 @@ class ImapAttitudeUltraFlags(FlagNameMixin):
53
61
  NONE = CommonFlags.NONE
54
62
  SPINRATE = 2**0 # bit 0
55
63
  AUXMISMATCH = 2**1 # bit 1 # aux packet does not match Universal Spin Table
64
+ SPINPHASE = 2**2 # bit 2 # spin phase flagged by Universal Spin Table
65
+ SPINPERIOD = 2**3 # bit 3 # spin period flagged by Universal Spin Table
56
66
 
57
67
 
58
68
  class ImapRatesUltraFlags(FlagNameMixin):
59
69
  """IMAP Ultra Rates flags."""
60
70
 
61
71
  NONE = CommonFlags.NONE
62
- ZEROCOUNTS = 2**0 # bit 0
63
- HIGHRATES = 2**1 # bit 1
72
+ HIGHRATES = 2**0 # bit 0
73
+ FIRSTSPIN = 2**1 # bit 1
74
+ LASTSPIN = 2**2 # bit 2
75
+ PARTIALSPIN = 2**2 # bit 2
76
+
77
+
78
+ class ImapInstrumentUltraFlags(FlagNameMixin):
79
+ """IMAP Ultra flags using other instruments."""
80
+
81
+ NONE = CommonFlags.NONE
64
82
 
65
83
 
66
84
  class ImapLoFlags(FlagNameMixin):
@@ -27,7 +27,7 @@ class SpiceBody(IntEnum):
27
27
  # A subset of IMAP Specific bodies as defined in imap_wkcp.tf
28
28
  IMAP = -43
29
29
  IMAP_SPACECRAFT = -43000
30
- # IMAP Pointing Frame (Despun) as defined in imap_science_0001.tf
30
+ # IMAP Pointing Frame (Despun) as defined in imap_science_xxx.tf
31
31
  IMAP_DPS = -43901
32
32
  # Standard NAIF bodies
33
33
  SOLAR_SYSTEM_BARYCENTER = spiceypy.bodn2c("SOLAR_SYSTEM_BARYCENTER")
@@ -36,13 +36,13 @@ class SpiceBody(IntEnum):
36
36
 
37
37
 
38
38
  class SpiceFrame(IntEnum):
39
- """Enum containing SPICE IDs for reference frames, defined in imap_wkcp.tf."""
39
+ """SPICE IDs for reference frames in imap_wkcp.tf and imap_science_xxx.tf."""
40
40
 
41
41
  # Standard SPICE Frames
42
42
  J2000 = spiceypy.irfnum("J2000")
43
43
  ECLIPJ2000 = spiceypy.irfnum("ECLIPJ2000")
44
44
  ITRF93 = 13000
45
- # IMAP Pointing Frame (Despun) as defined in imap_science_0001.tf
45
+ # IMAP Pointing Frame (Despun) as defined in imap_science_xxx.tf
46
46
  IMAP_DPS = -43901
47
47
  # IMAP specific as defined in imap_wkcp.tf
48
48
  IMAP_SPACECRAFT = -43000
@@ -61,6 +61,28 @@ class SpiceFrame(IntEnum):
61
61
  IMAP_IDEX = -43700
62
62
  IMAP_GLOWS = -43750
63
63
 
64
+ # IMAP Science Frames (new additions from imap_science_xxx.tf)
65
+ IMAP_OMD = -43900
66
+ IMAP_EARTHFIXED = -43910
67
+ IMAP_ECLIPDATE = -43911
68
+ IMAP_MDI = -43912
69
+ IMAP_MDR = -43913
70
+ IMAP_GMC = -43914
71
+ IMAP_GEI = -43915
72
+ IMAP_GSE = -43916
73
+ IMAP_GSM = -43917
74
+ IMAP_SMD = -43918
75
+ IMAP_RTN = -43920
76
+ IMAP_HCI = -43921 # HGI_J2K
77
+ IMAP_HCD = -43922 # HGI_D
78
+ IMAP_HGC = -43923 # HGS_D
79
+ IMAP_HAE = -43924
80
+ IMAP_HAED = -43925
81
+ IMAP_HEE = -43926
82
+ IMAP_HRE = -43927
83
+ IMAP_HNU = -43928
84
+ IMAP_GCS = -43929
85
+
64
86
 
65
87
  BORESIGHT_LOOKUP = {
66
88
  SpiceFrame.IMAP_LO_BASE: np.array([0, -1, 0]),
@@ -200,7 +200,7 @@ def calculate_pointing_attitude_segments(
200
200
  - Latest NAIF leapseconds kernel (naif0012.tls)
201
201
  - The latest IMAP sclk (imap_sclk_NNNN.tsc)
202
202
  - The latest IMAP frame kernel (imap_wkcp.tf)
203
- - IMAP DPS frame kernel (imap_science_0001.tf)
203
+ - IMAP DPS frame kernel (imap_science_100.tf)
204
204
  - IMAP historical attitude kernel from which the pointing frame kernel will
205
205
  be generated.
206
206
  """
@@ -197,6 +197,10 @@ def interpolate_spin_data(query_met_times: Union[float, npt.NDArray]) -> pd.Data
197
197
  # spin_period_valid columns.
198
198
  invalid_spin_phase_range = (spin_phases < 0) | (spin_phases >= 1)
199
199
 
200
+ # TODO: add optional to filter this if this flag means
201
+ # that repointing is happening. otherwise, then keep it.
202
+ # This needs to be discussed and receive guidance at
203
+ # the project level.
200
204
  invalid_spins = (out_df["spin_phase_valid"].values == 0) | (
201
205
  out_df["spin_period_valid"].values == 0
202
206
  )
@@ -220,6 +220,57 @@ def et_to_datetime64(
220
220
  return np.array(et_to_utc(et), dtype=np.datetime64)[()]
221
221
 
222
222
 
223
+ @typing.no_type_check
224
+ @ensure_spice
225
+ def et_to_met(
226
+ et: Union[float, Collection[float]],
227
+ ) -> Union[float, np.ndarray]:
228
+ """
229
+ Convert ephemeris time to mission elapsed time (MET).
230
+
231
+ This function converts ET to spacecraft clock ticks and then to MET seconds.
232
+ This is the inverse of the MET to ET conversion process.
233
+
234
+ Parameters
235
+ ----------
236
+ et : Union[float, Collection[float]]
237
+ Input ephemeris time value(s) to be converted to MET.
238
+
239
+ Returns
240
+ -------
241
+ met: np.ndarray
242
+ Mission elapsed time in seconds.
243
+ """
244
+ vectorized_sce2c = _vectorize(spiceypy.sce2c, otypes=[float], excluded=[0])
245
+ sclk_ticks = vectorized_sce2c(IMAP_SC_ID, et)
246
+ met = np.asarray(sclk_ticks, dtype=float) * TICK_DURATION
247
+ return met
248
+
249
+
250
+ def ttj2000ns_to_met(
251
+ tt_ns: npt.ArrayLike,
252
+ ) -> npt.NDArray[float]:
253
+ """
254
+ Convert terrestrial time nanoseconds since J2000 to mission elapsed time (MET).
255
+
256
+ This is the inverse of met_to_ttj2000ns. The conversion process is:
257
+ TTJ2000ns -> ET -> MET
258
+
259
+ Parameters
260
+ ----------
261
+ tt_ns : float, numpy.ndarray
262
+ Number of nanoseconds since the J2000 epoch in the TT timescale.
263
+
264
+ Returns
265
+ -------
266
+ numpy.ndarray[float]
267
+ The mission elapsed time in seconds.
268
+ """
269
+ et = ttj2000ns_to_et(tt_ns)
270
+ met = et_to_met(et)
271
+ return met
272
+
273
+
223
274
  @typing.no_type_check
224
275
  @ensure_spice
225
276
  def sct_to_et(
@@ -16,27 +16,73 @@ TIME_PER_BIN = 0.167 # seconds
16
16
 
17
17
 
18
18
  def solve_full_sweep_energy(
19
- esa_lvl5_data: np.ndarray, esa_table_df: pd.DataFrame, lut_notes_df: pd.DataFrame
19
+ esa_lvl5_data: np.ndarray,
20
+ sweep_table: np.ndarray,
21
+ esa_table_df: pd.DataFrame,
22
+ lut_notes_df: pd.DataFrame,
23
+ data_time: npt.NDArray[np.datetime64],
20
24
  ) -> npt.NDArray:
21
25
  """
22
26
  Calculate the energy of each full sweep data.
23
27
 
28
+ Get the fixed energy values for steps 0-62 using the
29
+ esa_table_df information. It's important to ensure
30
+ that the correct fixed energy values are selected for
31
+ the specified time, as the sweep table can contain
32
+ different values depending on the operational phase
33
+ (e.g., I+T, pre-launch, post-launch). There may be
34
+ more fixed energy added in the future. TODO: add
35
+ document section once SWAPI document is updated.
36
+
37
+ Now, find the last 9 fine energy values using steps
38
+ noted in the section x in the algorithm document.
39
+
24
40
  Parameters
25
41
  ----------
26
42
  esa_lvl5_data : numpy.ndarray
27
43
  The L1 data input.
44
+ sweep_table : numpy.ndarray
45
+ Sweep table information.
28
46
  esa_table_df : pandas.DataFrame
29
47
  The ESA unit conversion table that contains first 63 energies.
30
48
  lut_notes_df : pandas.DataFrame
31
49
  The LUT notes table that contains the last 9 fine energies.
50
+ data_time : numpy.ndarray
51
+ The collection time of the data.
32
52
 
33
53
  Returns
34
54
  -------
35
55
  energy : numpy.ndarray
36
56
  The energy of each full sweep data.
37
57
  """
38
- # Read 0 - 62 energy steps' fixed energy value
39
- fixed_energy_values = esa_table_df["Energy"].values[:63]
58
+ # Convert timestamp from string to datetime
59
+ # and to the same format as data_time
60
+ esa_table_df["timestamp"] = pd.to_datetime(
61
+ esa_table_df["timestamp"], format="%m/%d/%Y %H:%M"
62
+ )
63
+ esa_table_df["timestamp"] = esa_table_df["timestamp"].to_numpy(
64
+ dtype="datetime64[ns]"
65
+ )
66
+
67
+ first_63_energies = []
68
+
69
+ for time, sweep_id in zip(data_time, sweep_table):
70
+ # Find the sweep's ESA data for the given time and sweep_id
71
+ subset = esa_table_df[
72
+ (esa_table_df["timestamp"] <= time) & (esa_table_df["Sweep #"] == sweep_id)
73
+ ]
74
+ if subset.empty:
75
+ first_63_energies.append(np.full(63, np.nan, dtype=np.float64))
76
+ continue
77
+
78
+ # Subset data can contain multiple 72 energy values with last 9 fine energies
79
+ # with 'Solve' value. We need to sort by time and ESA step to maintain correct
80
+ # order. Then take the last group of 72 steps values and select first 63
81
+ # values only.
82
+ subset = subset.sort_values(["timestamp", "ESA Step #"])
83
+ grouped = subset["Energy"].values.reshape(-1, 72)
84
+ first_63 = grouped[-1, :63]
85
+ first_63_energies.append(first_63)
40
86
 
41
87
  # Find last 9 fine energy values of all sweeps data
42
88
  # -------------------------------------------------
@@ -96,13 +142,9 @@ def solve_full_sweep_energy(
96
142
  # order it should be in:
97
143
  # [64, 65, 66, 67, 68, 69, 70, 71, 72]
98
144
  energy_values = np.flip(energy_values, axis=1)
99
- # Expand to match the number of rows in energy_values
100
- first_63_values = np.tile(
101
- fixed_energy_values, (energy_values.shape[0], 1)
102
- ) # (epoch, 63)
103
145
 
104
146
  # Append the first_63_values in front of energy_values
105
- sweeps_energy_value = np.hstack((first_63_values, energy_values))
147
+ sweeps_energy_value = np.hstack([first_63_energies, energy_values])
106
148
 
107
149
  return sweeps_energy_value
108
150
 
@@ -169,8 +211,10 @@ def swapi_l2(
169
211
  esa_lvl5_hex = np.vectorize(lambda x: format(x, "X"))(l1_dataset["esa_lvl5"].values)
170
212
  esa_energy = solve_full_sweep_energy(
171
213
  esa_lvl5_hex,
214
+ l1_dataset["sweep_table"].data,
172
215
  esa_table_df=esa_table_df,
173
216
  lut_notes_df=lut_notes_df,
217
+ data_time=np.array(l1_dataset["epoch"].data, dtype="datetime64[ns]"),
174
218
  )
175
219
 
176
220
  l2_dataset["swp_esa_energy"] = xr.DataArray(
@@ -51,7 +51,7 @@ def read_swapi_lut_table(file_path: Path) -> pd.DataFrame:
51
51
  .astype(str)
52
52
  .str.replace(",", "", regex=False)
53
53
  .replace("Solve", -1)
54
- .astype(np.int64)
54
+ .astype(np.float64)
55
55
  )
56
56
 
57
57
  return df