imap-processing 0.17.0__py3-none-any.whl → 0.18.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of imap-processing might be problematic. Click here for more details.

Files changed (89) hide show
  1. imap_processing/_version.py +2 -2
  2. imap_processing/ccsds/excel_to_xtce.py +12 -0
  3. imap_processing/cdf/config/imap_codice_global_cdf_attrs.yaml +6 -6
  4. imap_processing/cdf/config/imap_codice_l1a_variable_attrs.yaml +11 -0
  5. imap_processing/cdf/config/imap_codice_l1b_variable_attrs.yaml +11 -0
  6. imap_processing/cdf/config/imap_codice_l2_variable_attrs.yaml +24 -0
  7. imap_processing/cdf/config/imap_hit_l1a_variable_attrs.yaml +163 -100
  8. imap_processing/cdf/config/imap_hit_l2_variable_attrs.yaml +4 -4
  9. imap_processing/cdf/config/imap_ialirt_l1_variable_attrs.yaml +97 -54
  10. imap_processing/cdf/config/imap_idex_l2b_variable_attrs.yaml +119 -36
  11. imap_processing/cdf/config/imap_idex_l2c_variable_attrs.yaml +16 -90
  12. imap_processing/cdf/config/imap_lo_global_cdf_attrs.yaml +30 -0
  13. imap_processing/cdf/config/imap_mag_global_cdf_attrs.yaml +15 -1
  14. imap_processing/cdf/config/imap_ultra_global_cdf_attrs.yaml +60 -0
  15. imap_processing/cdf/config/imap_ultra_l1b_variable_attrs.yaml +91 -11
  16. imap_processing/cli.py +28 -5
  17. imap_processing/codice/codice_l1a.py +36 -48
  18. imap_processing/codice/codice_l1b.py +1 -1
  19. imap_processing/codice/codice_l2.py +0 -9
  20. imap_processing/codice/constants.py +481 -498
  21. imap_processing/hit/l0/decom_hit.py +2 -2
  22. imap_processing/hit/l1a/hit_l1a.py +64 -24
  23. imap_processing/hit/l1b/constants.py +5 -0
  24. imap_processing/hit/l1b/hit_l1b.py +18 -16
  25. imap_processing/hit/l2/constants.py +1 -1
  26. imap_processing/hit/l2/hit_l2.py +4 -5
  27. imap_processing/ialirt/constants.py +21 -0
  28. imap_processing/ialirt/generate_coverage.py +188 -0
  29. imap_processing/ialirt/l0/parse_mag.py +62 -5
  30. imap_processing/ialirt/l0/process_swapi.py +1 -1
  31. imap_processing/ialirt/l0/process_swe.py +23 -7
  32. imap_processing/ialirt/utils/constants.py +22 -16
  33. imap_processing/ialirt/utils/create_xarray.py +42 -19
  34. imap_processing/idex/idex_constants.py +1 -5
  35. imap_processing/idex/idex_l2b.py +246 -67
  36. imap_processing/idex/idex_l2c.py +30 -196
  37. imap_processing/lo/l0/lo_apid.py +1 -0
  38. imap_processing/lo/l1a/lo_l1a.py +44 -0
  39. imap_processing/lo/packet_definitions/lo_xtce.xml +5359 -106
  40. imap_processing/mag/constants.py +1 -0
  41. imap_processing/mag/l1d/__init__.py +0 -0
  42. imap_processing/mag/l1d/mag_l1d.py +133 -0
  43. imap_processing/mag/l1d/mag_l1d_data.py +588 -0
  44. imap_processing/mag/l2/__init__.py +0 -0
  45. imap_processing/mag/l2/mag_l2.py +25 -20
  46. imap_processing/mag/l2/mag_l2_data.py +191 -130
  47. imap_processing/quality_flags.py +20 -2
  48. imap_processing/spice/geometry.py +25 -3
  49. imap_processing/spice/pointing_frame.py +1 -1
  50. imap_processing/spice/spin.py +4 -0
  51. imap_processing/spice/time.py +51 -0
  52. imap_processing/swapi/l2/swapi_l2.py +52 -8
  53. imap_processing/swapi/swapi_utils.py +1 -1
  54. imap_processing/swe/l1b/swe_l1b.py +2 -4
  55. imap_processing/ultra/constants.py +49 -1
  56. imap_processing/ultra/l0/decom_tools.py +15 -8
  57. imap_processing/ultra/l0/decom_ultra.py +35 -11
  58. imap_processing/ultra/l0/ultra_utils.py +97 -5
  59. imap_processing/ultra/l1a/ultra_l1a.py +25 -4
  60. imap_processing/ultra/l1b/cullingmask.py +3 -3
  61. imap_processing/ultra/l1b/de.py +53 -15
  62. imap_processing/ultra/l1b/extendedspin.py +26 -2
  63. imap_processing/ultra/l1b/lookup_utils.py +171 -50
  64. imap_processing/ultra/l1b/quality_flag_filters.py +14 -0
  65. imap_processing/ultra/l1b/ultra_l1b_culling.py +198 -5
  66. imap_processing/ultra/l1b/ultra_l1b_extended.py +304 -66
  67. imap_processing/ultra/l1c/helio_pset.py +54 -7
  68. imap_processing/ultra/l1c/spacecraft_pset.py +9 -1
  69. imap_processing/ultra/l1c/ultra_l1c.py +2 -0
  70. imap_processing/ultra/l1c/ultra_l1c_pset_bins.py +106 -109
  71. imap_processing/ultra/utils/ultra_l1_utils.py +13 -1
  72. {imap_processing-0.17.0.dist-info → imap_processing-0.18.0.dist-info}/METADATA +2 -2
  73. {imap_processing-0.17.0.dist-info → imap_processing-0.18.0.dist-info}/RECORD +76 -83
  74. imap_processing/ultra/lookup_tables/Angular_Profiles_FM45_LeftSlit.csv +0 -526
  75. imap_processing/ultra/lookup_tables/Angular_Profiles_FM45_RightSlit.csv +0 -526
  76. imap_processing/ultra/lookup_tables/Angular_Profiles_FM90_LeftSlit.csv +0 -526
  77. imap_processing/ultra/lookup_tables/Angular_Profiles_FM90_RightSlit.csv +0 -524
  78. imap_processing/ultra/lookup_tables/EgyNorm.mem.csv +0 -32769
  79. imap_processing/ultra/lookup_tables/FM45_Startup1_ULTRA_IMGPARAMS_20240719.csv +0 -2
  80. imap_processing/ultra/lookup_tables/FM90_Startup1_ULTRA_IMGPARAMS_20240719.csv +0 -2
  81. imap_processing/ultra/lookup_tables/dps_grid45_compressed.cdf +0 -0
  82. imap_processing/ultra/lookup_tables/ultra45_back-pos-luts.csv +0 -4097
  83. imap_processing/ultra/lookup_tables/ultra45_tdc_norm.csv +0 -2050
  84. imap_processing/ultra/lookup_tables/ultra90_back-pos-luts.csv +0 -4097
  85. imap_processing/ultra/lookup_tables/ultra90_tdc_norm.csv +0 -2050
  86. imap_processing/ultra/lookup_tables/yadjust.csv +0 -257
  87. {imap_processing-0.17.0.dist-info → imap_processing-0.18.0.dist-info}/LICENSE +0 -0
  88. {imap_processing-0.17.0.dist-info → imap_processing-0.18.0.dist-info}/WHEEL +0 -0
  89. {imap_processing-0.17.0.dist-info → imap_processing-0.18.0.dist-info}/entry_points.txt +0 -0
@@ -0,0 +1,588 @@
1
+ # mypy: disable-error-code="unused-ignore"
2
+ """Data classes for MAG L1D processing."""
3
+
4
+ from dataclasses import InitVar, dataclass
5
+
6
+ import numpy as np
7
+ import xarray as xr
8
+
9
+ from imap_processing.mag.constants import FILLVAL, DataMode
10
+ from imap_processing.mag.l1c.interpolation_methods import linear
11
+ from imap_processing.mag.l2.mag_l2 import retrieve_matrix_from_l2_calibration
12
+ from imap_processing.mag.l2.mag_l2_data import MagL2L1dBase, ValidFrames
13
+ from imap_processing.spice import spin
14
+ from imap_processing.spice.geometry import frame_transform
15
+
16
+
17
+ @dataclass
18
+ class MagL1dConfiguration:
19
+ """
20
+ Configuration for MAG L1d processing.
21
+
22
+ Constructed from the combined ancillary dataset inputs from the L1D calibration
23
+ files and the day we are processing.
24
+
25
+ Parameters
26
+ ----------
27
+ calibration_dataset : xr.Dataset
28
+ The combined calibration dataset from the ancillary files. Created as the
29
+ output from MagAncillaryCombiner, which has day values pointing to the
30
+ calibration file for the given day.
31
+ day : np.datetime64
32
+ The day we are processing, in np.datetime64[D] format.
33
+
34
+ Attributes
35
+ ----------
36
+ calibration_offsets : np.ndarray
37
+ The offsets for the correct day. Should be size (2, 4, 3) where the first index
38
+ is 0 for MAGo and 1 for MAGi, the second index is the range (0-3), and the
39
+ third index is the axis (0-2).
40
+ mago_calibration : np.ndarray
41
+ Calibration matrix for the correct day for MAGo. Should be size (3, 3, 4).
42
+ magi_calibration : np.ndarray
43
+ Calibration matrix for the correct day for MAGi. Should be size (3, 3, 4).
44
+ spin_count_calibration : int
45
+ The number of spins to average over when calculating spin offsets.
46
+ quality_flag_threshold : np.float64
47
+ The quality flag threshold for the correct day.
48
+ spin_average_application_factor : np.float64
49
+ The spin average application factor for the correct day.
50
+ gradiometer_factor : np.ndarray
51
+ The gradiometer factor for the correct day. Should be size (3,).
52
+ apply_gradiometry : bool
53
+ Whether to apply gradiometry or not. Default is True.
54
+ """
55
+
56
+ calibration_offsets: np.ndarray
57
+ mago_calibration: np.ndarray
58
+ magi_calibration: np.ndarray
59
+ spin_count_calibration: int
60
+ quality_flag_threshold: np.float64
61
+ spin_average_application_factor: np.float64
62
+ gradiometer_factor: np.ndarray
63
+ apply_gradiometry: bool = True
64
+
65
+ def __init__(self, calibration_dataset: xr.Dataset, day: np.datetime64) -> None:
66
+ """
67
+ Create a MagL1dConfiguration from a calibration dataset and day.
68
+
69
+ Parameters
70
+ ----------
71
+ calibration_dataset : xr.Dataset
72
+ The combined calibration dataset from the ancillary files. Created as the
73
+ output from MagAncillaryCombiner, which has day values pointing to the
74
+ calibration file for the given day.
75
+ day : np.datetime64
76
+ The day we are processing, in np.datetime64[D] format.
77
+
78
+ """
79
+ self.mago_calibration = retrieve_matrix_from_l2_calibration(
80
+ calibration_dataset, day, use_mago=True
81
+ )
82
+
83
+ self.magi_calibration = retrieve_matrix_from_l2_calibration(
84
+ calibration_dataset, day, use_mago=False
85
+ )
86
+ self.calibration_offsets = calibration_dataset.sel(epoch=day)["offsets"].data
87
+ self.spin_count_calibration = calibration_dataset.sel(epoch=day)[
88
+ "number_of_spins"
89
+ ].data
90
+ self.quality_flag_threshold = calibration_dataset.sel(epoch=day)[
91
+ "quality_flag_threshold"
92
+ ].data
93
+ self.spin_average_application_factor = calibration_dataset.sel(epoch=day)[
94
+ "spin_average_application_factor"
95
+ ].data
96
+ self.gradiometer_factor = calibration_dataset.sel(epoch=day)[
97
+ "gradiometer_factor"
98
+ ].data
99
+
100
+
101
+ @dataclass(kw_only=True)
102
+ class MagL1d(MagL2L1dBase): # type: ignore[misc]
103
+ """
104
+ Class for handling IMAP MAG L1d data.
105
+
106
+ When the class is created, all the methods are called in the correct order to
107
+ run MAG L1d processing. The resulting instance can then be used to generate an
108
+ xarray dataset with the `generate_dataset` method.
109
+
110
+ Example:
111
+ ```
112
+ l1d_norm = MagL1d(
113
+ vectors=mago_vectors,
114
+ epoch=input_mago_norm["epoch"].data,
115
+ range=input_mago_norm["vectors"].data[:, 3],
116
+ global_attributes={},
117
+ quality_flags=np.zeros(len(input_mago_norm["epoch"].data)),
118
+ quality_bitmask=np.zeros(len(input_mago_norm["epoch"].data)),
119
+ data_mode=DataMode.NORM,
120
+ magi_vectors=magi_vectors,
121
+ magi_range=input_magi_norm["vectors"].data[:, 3],
122
+ config=config
123
+ )
124
+ output_dataset = l1d_norm.generate_dataset(attributes, day_to_process)
125
+ ```
126
+
127
+ Attributes
128
+ ----------
129
+ magi_vectors : np.ndarray
130
+ The MAGi vectors, shape (N, 3).
131
+ magi_range : np.ndarray
132
+ The MAGi range values, shape (N,).
133
+ magi_epoch : np.ndarray
134
+ The MAGi epoch values, shape (N,).
135
+ config : MagL1dConfiguration
136
+ The configuration for L1d processing, including calibration matrices and
137
+ offsets. This is generated from the input ancillary file and the
138
+ MagL1dConfiguration class.
139
+ spin_offsets : xr.Dataset, optional
140
+ The spin offsets dataset, if already calculated. If not provided, it will be
141
+ calculated during processing if in NORM mode.
142
+ day : np.datetime64
143
+ The day we are processing, in np.datetime64[D] format. This is used to
144
+ truncate the data to exactly 24 hours.
145
+ """
146
+
147
+ # TODO Quality flags
148
+ # TODO generate and output ancillary files
149
+ magi_vectors: np.ndarray
150
+ magi_range: np.ndarray
151
+ magi_epoch: np.ndarray
152
+ config: MagL1dConfiguration
153
+ spin_offsets: xr.Dataset = None
154
+ day: InitVar[np.datetime64]
155
+
156
+ def __post_init__(self, day: np.datetime64) -> None:
157
+ """
158
+ Run all processing steps to generate L1d data.
159
+
160
+ This updates class variables to match L1D outputs.
161
+
162
+ Parameters
163
+ ----------
164
+ day : np.datetime64
165
+ The day we are processing, in np.datetime64[D] format. This is used to
166
+ truncate the data to exactly 24 hours.
167
+ """
168
+ # set the magnitude before truncating
169
+ self.magnitude = np.zeros(self.vectors.shape[0], dtype=np.float64) # type: ignore[has-type]
170
+ self.truncate_to_24h(day)
171
+
172
+ self.vectors, self.magi_vectors = self._calibrate_and_offset_vectors(
173
+ self.config.mago_calibration,
174
+ self.config.magi_calibration,
175
+ self.config.calibration_offsets,
176
+ )
177
+ # We need to be in SRF for the spin offsets application and calculation
178
+ self.rotate_frame(ValidFrames.SRF)
179
+
180
+ if self.spin_offsets is None and self.data_mode == DataMode.NORM:
181
+ self.spin_offsets = self.calculate_spin_offsets()
182
+
183
+ self.vectors = self.apply_spin_offsets(
184
+ self.spin_offsets,
185
+ self.epoch,
186
+ self.vectors,
187
+ self.config.spin_average_application_factor,
188
+ )
189
+ self.magi_vectors = self.apply_spin_offsets(
190
+ self.spin_offsets,
191
+ self.magi_epoch,
192
+ self.magi_vectors,
193
+ self.config.spin_average_application_factor,
194
+ )
195
+
196
+ # we need to be in DSRF for the gradiometry offsets calculation and application
197
+ self.rotate_frame(ValidFrames.DSRF)
198
+
199
+ if self.config.apply_gradiometry:
200
+ self.gradiometry_offsets = self.calculate_gradiometry_offsets(
201
+ self.vectors, self.epoch, self.magi_vectors, self.magi_epoch
202
+ )
203
+ self.vectors = self.apply_gradiometry_offsets(
204
+ self.gradiometry_offsets, self.vectors, self.config.gradiometer_factor
205
+ )
206
+
207
+ self.magnitude = MagL2L1dBase.calculate_magnitude(vectors=self.vectors)
208
+ self.is_l1d = True
209
+
210
+ def rotate_frame(self, end_frame: ValidFrames) -> None:
211
+ """
212
+ Rotate the vectors to the desired frame.
213
+
214
+ Rotates both the mago vectors (self.vectors) and the magi vectors
215
+ (self.magi_vectors), then set self.frame to end_frame.
216
+
217
+ Parameters
218
+ ----------
219
+ end_frame : ValidFrames
220
+ The frame to rotate to. Should be one of the ValidFrames enum.
221
+ """
222
+ start_frame = self.frame
223
+ super().rotate_frame(end_frame)
224
+ self.magi_vectors = frame_transform(
225
+ self.magi_epoch,
226
+ self.magi_vectors,
227
+ from_frame=start_frame.value,
228
+ to_frame=end_frame.value,
229
+ )
230
+
231
+ def _calibrate_and_offset_vectors(
232
+ self,
233
+ mago_calibration: np.ndarray,
234
+ magi_calibration: np.ndarray,
235
+ offsets: np.ndarray,
236
+ ) -> tuple[np.ndarray, np.ndarray]:
237
+ """
238
+ Apply calibration and initial offset calculations from the configuration file.
239
+
240
+ Parameters
241
+ ----------
242
+ mago_calibration : np.ndarray
243
+ Calibration matrix for the correct day for MAGo. Should be size (3, 3, 4).
244
+ magi_calibration : np.ndarray
245
+ Calibration matrix for the correct day for MAGi. Should be size (3, 3, 4).
246
+ offsets : np.ndarray
247
+ Offsets for the correct day. Should be size (2, 4, 3) where the first index
248
+ is 0 for MAGo and 1 for MAGi, the second index is the range (0-3), and the
249
+ third index is the axis (0-2).
250
+
251
+ Returns
252
+ -------
253
+ tuple[np.ndarray, np.ndarray]
254
+ The calibrated and offset MAGo and MAGi vectors, each shape (N, 3)
255
+ (not including range).
256
+ """
257
+ vectors_plus_range_mago = np.concatenate(
258
+ (self.vectors, self.range[:, np.newaxis]), axis=1
259
+ )
260
+
261
+ vectors_plus_range_magi = np.concatenate(
262
+ (self.magi_vectors, self.magi_range[:, np.newaxis]), axis=1
263
+ )
264
+
265
+ mago_vectors = MagL2L1dBase.apply_calibration(
266
+ vectors_plus_range_mago, mago_calibration
267
+ )
268
+ magi_vectors = MagL2L1dBase.apply_calibration(
269
+ vectors_plus_range_magi, magi_calibration
270
+ )
271
+
272
+ mago_vectors = np.apply_along_axis(
273
+ func1d=self.apply_calibration_offset_single_vector,
274
+ axis=1,
275
+ arr=mago_vectors,
276
+ offsets=offsets,
277
+ is_magi=False,
278
+ )
279
+
280
+ magi_vectors = np.apply_along_axis(
281
+ func1d=self.apply_calibration_offset_single_vector,
282
+ axis=1,
283
+ arr=magi_vectors,
284
+ offsets=offsets,
285
+ is_magi=True,
286
+ )
287
+
288
+ return mago_vectors[:, :3], magi_vectors[:, :3]
289
+
290
+ @staticmethod
291
+ def apply_calibration_offset_single_vector(
292
+ input_vector: np.ndarray, offsets: np.ndarray, is_magi: bool = False
293
+ ) -> np.ndarray:
294
+ """
295
+ Apply the offset to a single vector.
296
+
297
+ Parameters
298
+ ----------
299
+ input_vector : np.ndarray
300
+ The input vector to offset, shape (4,) where the last element is the range.
301
+ offsets : np.ndarray
302
+ The offsets array, shape (2, 4, 3) where the first index is 0 for MAGo and
303
+ 1 for MAGi, the second index is the range (0-3), and the third index is the
304
+ axis (0-2).
305
+
306
+ is_magi : bool
307
+ Whether the input vector is from MAGi (True) or MAGo (False).
308
+
309
+ Returns
310
+ -------
311
+ np.ndarray
312
+ The offset vector, shape (4,) where the last element is unchanged.
313
+ """
314
+ # Offsets are in shape (sensor, range, axis)
315
+ updated_vector = input_vector.copy().astype(np.int64)
316
+ rng = int(input_vector[3])
317
+ x_y_z = input_vector[:3]
318
+ updated_vector[:3] = x_y_z - offsets[int(is_magi), rng, :]
319
+ return updated_vector
320
+
321
+ def calculate_spin_offsets(self) -> xr.Dataset:
322
+ """
323
+ Calculate the spin offsets for the current data.
324
+
325
+ Algorithm determined by section 7.3.5, step 6 of the algorithm document.
326
+
327
+ This should only be called on normal mode data in the SRF frame. It computes
328
+ the average spin during a chunk as specified in the config by
329
+ spin_count_calibration (nominally 240 spins), then creates a dataset containing
330
+ timestamps which correspond to the start of the validity for the offset.
331
+
332
+ This is only computed for the x and y axes (indices 0 and 1 of vectors) as the
333
+ z axis is the spinning axis in SRF and should not be affected by spins.
334
+
335
+ Any invalid spins are skipped and not included.
336
+
337
+ Returns
338
+ -------
339
+ spin_offsets : xr.Dataset
340
+ The spin offsets dataset, with dimensions:
341
+ - epoch: the timestamp where the offset becomes valid
342
+ - x_offset: the x offset values
343
+ - y_offset: the y offset values
344
+ """
345
+ # This needs to only happen for NM data
346
+ if self.data_mode != DataMode.NORM and self.frame != ValidFrames.SRF:
347
+ raise ValueError(
348
+ "Spin offsets can only be calculated in NORM mode and SRF frame."
349
+ )
350
+
351
+ # TODO: get the spin numbers which correspond to the epoch values for output
352
+ sc_spin_phase: np.ndarray = spin.get_spacecraft_spin_phase(self.epoch) # type: ignore
353
+ # mark vectors as nan where they are nan in sc_spin_phase
354
+ vectors = self.vectors.copy().astype(np.float64)
355
+
356
+ vectors[np.isnan(sc_spin_phase), :] = np.nan
357
+
358
+ # TODO: currently fully skipping spins with no valid data (not including
359
+ # them in the averaging OR IN SPIN COUNTING!) is this correct?
360
+
361
+ # first timestamp where spin phase is less than the previous value
362
+ # this is when the spin crosses zero
363
+ spin_starts = np.where(np.diff(sc_spin_phase) < 0)[0] + 1
364
+
365
+ # if the value switches from nan to a number, that is also a spin start (for an
366
+ # invalid spin)
367
+ nan_to_number = (
368
+ np.where(np.isnan(sc_spin_phase[:-1]) & ~np.isnan(sc_spin_phase[1:]))[0] + 1
369
+ )
370
+
371
+ # find the places spins start while skipping over invalid or missing data
372
+ # (marked as nan by get_spacecraft_spin_phase)
373
+ spin_starts = np.sort(np.concatenate((spin_starts, nan_to_number)))
374
+
375
+ chunk_start = 0
376
+ offset_epochs = []
377
+ x_avg = []
378
+ y_avg = []
379
+ while chunk_start < len(spin_starts):
380
+ # Take self.spin_count_calibration number of spins and put them into a chunk
381
+ chunk_indices = spin_starts[
382
+ chunk_start : chunk_start + self.config.spin_count_calibration + 1
383
+ ]
384
+ chunk_start = chunk_start + self.config.spin_count_calibration
385
+
386
+ # If we are in the end of the chunk, just grab all remaining data
387
+ if chunk_start >= len(spin_starts):
388
+ chunk_indices = np.append(chunk_indices, len(self.epoch))
389
+
390
+ chunk_vectors = self.vectors[chunk_indices[0] : chunk_indices[-1]]
391
+ chunk_epoch = self.epoch[chunk_indices[0] : chunk_indices[-1]]
392
+
393
+ # average the x and y axes (z is fixed, as the spin axis)
394
+ # TODO: is z the correct axis here?
395
+ avg_x = np.nanmean(chunk_vectors[:, 0])
396
+ avg_y = np.nanmean(chunk_vectors[:, 1])
397
+
398
+ if not np.isnan(avg_x) and not np.isnan(avg_y):
399
+ offset_epochs.append(chunk_epoch[0])
400
+ x_avg.append(avg_x)
401
+ y_avg.append(avg_y)
402
+
403
+ spin_epoch_dataarray = xr.DataArray(np.array(offset_epochs))
404
+
405
+ spin_offsets = xr.Dataset(coords={"epoch": spin_epoch_dataarray})
406
+
407
+ spin_offsets["x_offset"] = xr.DataArray(np.array(x_avg), dims=["epoch"])
408
+ spin_offsets["y_offset"] = xr.DataArray(np.array(y_avg), dims=["epoch"])
409
+
410
+ return spin_offsets
411
+
412
+ def generate_spin_offset_dataset(self) -> xr.Dataset | None:
413
+ """
414
+ Output the spin offsets file as a dataset.
415
+
416
+ Returns
417
+ -------
418
+ xr.Dataset | None
419
+ The spin offsets dataset. This function can be used to control the output
420
+ structure of the offsets dataset ancillary file, without affecting how
421
+ the offsets are used inside the class.
422
+ """
423
+ return self.spin_offsets
424
+
425
+ @staticmethod
426
+ def apply_spin_offsets(
427
+ spin_offsets: xr.Dataset,
428
+ epoch: np.ndarray,
429
+ vectors: np.ndarray,
430
+ spin_average_application_factor: np.float64,
431
+ ) -> np.ndarray:
432
+ """
433
+ Apply the spin offsets to the input vectors.
434
+
435
+ This uses the spin offsets calculated by `calculate_spin_offsets` (or passed in
436
+ to the class in burst mode) to apply the offsets to the input vectors.
437
+
438
+ For each vector, we take the nearest offset, multiply it by the
439
+ spin_average_application_factor calibration value, and subtract the offset from
440
+ the appropriate axis.
441
+
442
+ These spin offsets act as an automatic smoothing effect on the data over each
443
+ series of spins.
444
+
445
+ Parameters
446
+ ----------
447
+ spin_offsets : xr.Dataset
448
+ The spin offsets dataset.
449
+ epoch : np.ndarray
450
+ The epoch values for the input vectors, shape (N,).
451
+ vectors : np.ndarray
452
+ The input vectors to apply offsets to, shape (N, 3). Can be Mago, magi,
453
+ burst or norm. The same offsets file is applied to all.
454
+ spin_average_application_factor : np.float64
455
+ The spin average application factor from the configuration file.
456
+
457
+ Returns
458
+ -------
459
+ np.ndarray
460
+ The output vectors with spin offsets applied, shape (N, 3).
461
+ """
462
+ if spin_offsets is None:
463
+ raise ValueError("No spin offsets calculated to apply.")
464
+
465
+ output_vectors = np.full(vectors.shape, FILLVAL, dtype=np.int64)
466
+
467
+ for index in range(spin_offsets["epoch"].data.shape[0] - 1):
468
+ timestamp = spin_offsets["epoch"].data[index]
469
+ # for the first timestamp, catch all the beginning vectors
470
+ if index == 0:
471
+ timestamp = epoch[0]
472
+
473
+ end_timestamp = spin_offsets["epoch"].data[index + 1]
474
+
475
+ # for the last timestamp, catch all the ending vectors
476
+ if index + 2 >= len(spin_offsets["epoch"].data):
477
+ end_timestamp = epoch[-1] + 1
478
+
479
+ mask = (epoch >= timestamp) & (epoch < end_timestamp)
480
+
481
+ mask = mask & (vectors[:, 0] != FILLVAL)
482
+
483
+ if not np.any(mask):
484
+ continue
485
+
486
+ # TODO: should vectors be a float?
487
+ x_offset = (
488
+ spin_offsets["x_offset"].data[index] * spin_average_application_factor
489
+ )
490
+ y_offset = (
491
+ spin_offsets["y_offset"].data[index] * spin_average_application_factor
492
+ )
493
+
494
+ output_vectors[mask, 0] = vectors[mask, 0] - x_offset
495
+ output_vectors[mask, 1] = vectors[mask, 1] - y_offset
496
+
497
+ output_vectors[:, 2] = vectors[:, 2]
498
+
499
+ return output_vectors
500
+
501
+ @staticmethod
502
+ def calculate_gradiometry_offsets(
503
+ mago_vectors: np.ndarray,
504
+ mago_epoch: np.ndarray,
505
+ magi_vectors: np.ndarray,
506
+ magi_epoch: np.ndarray,
507
+ ) -> xr.Dataset:
508
+ """
509
+ Calculate the gradiometry offsets between MAGo and MAGi.
510
+
511
+ This uses linear interpolation to align the MAGi data to the MAGo timestamps,
512
+ then calculates the difference between the two sensors on each axis.
513
+
514
+ All vectors must be in the DSRF frame before starting.
515
+
516
+ Static method that can be used by i-ALiRT.
517
+
518
+ Parameters
519
+ ----------
520
+ mago_vectors : np.ndarray
521
+ The MAGo vectors, shape (N, 3).
522
+ mago_epoch : np.ndarray
523
+ The MAGo epoch values, shape (N,).
524
+ magi_vectors : np.ndarray
525
+ The MAGi vectors, shape (N, 3).
526
+ magi_epoch : np.ndarray
527
+ The MAGi epoch values, shape (N,).
528
+
529
+ Returns
530
+ -------
531
+ xr.Dataset
532
+ The gradiometer offsets dataset, with variables:
533
+ - epoch: the timestamp of the MAGo data
534
+ - gradiometer_offsets: the offset values (MAGi - MAGo) for each axis
535
+ """
536
+ aligned_magi = linear(
537
+ magi_vectors,
538
+ magi_epoch,
539
+ mago_epoch,
540
+ )
541
+
542
+ diff = aligned_magi - mago_vectors
543
+
544
+ grad_epoch = xr.DataArray(mago_epoch, dims=["epoch"])
545
+ direction = xr.DataArray(["x", "y", "z"], dims=["axis"])
546
+ grad_ds = xr.Dataset(coords={"epoch": grad_epoch, "direction": direction})
547
+ grad_ds["gradiometer_offsets"] = xr.DataArray(diff, dims=["epoch", "direction"])
548
+
549
+ return grad_ds
550
+
551
+ @staticmethod
552
+ def apply_gradiometry_offsets(
553
+ gradiometry_offsets: xr.Dataset,
554
+ vectors: np.ndarray,
555
+ gradiometer_factor: np.ndarray,
556
+ ) -> np.ndarray:
557
+ """
558
+ Apply the gradiometry offsets to the input vectors.
559
+
560
+ Gradiometry epoch and vectors epoch should align (i.e. the vectors should be
561
+ from mago).
562
+
563
+ The vectors should be in the DSRF frame.
564
+
565
+ Parameters
566
+ ----------
567
+ gradiometry_offsets : xr.Dataset
568
+ The gradiometry offsets dataset, as output by calculate_gradiometry_offsets.
569
+ vectors : np.ndarray
570
+ The input vectors to apply offsets to, shape (N, 3). Should be on the same
571
+ epoch as the gradiometry offsets.
572
+ gradiometer_factor : np.ndarray
573
+ A (3,3) element matrix to scale and rotate the gradiometer offsets.
574
+
575
+ Returns
576
+ -------
577
+ np.ndarray
578
+ The output vectors with gradiometry offsets applied, shape (N, 3).
579
+ """
580
+ offset_value = gradiometry_offsets["gradiometer_offsets"].data
581
+ offset_value = np.apply_along_axis(
582
+ np.dot,
583
+ 1,
584
+ offset_value,
585
+ gradiometer_factor,
586
+ )
587
+
588
+ return vectors - offset_value
File without changes
@@ -6,8 +6,7 @@ import xarray as xr
6
6
  from imap_processing.cdf.imap_cdf_manager import ImapCdfAttributes
7
7
  from imap_processing.mag import imap_mag_sdc_configuration_v001 as configuration
8
8
  from imap_processing.mag.constants import DataMode
9
- from imap_processing.mag.l1b.mag_l1b import calibrate_vector
10
- from imap_processing.mag.l2.mag_l2_data import MagL2
9
+ from imap_processing.mag.l2.mag_l2_data import MagL2, ValidFrames
11
10
 
12
11
 
13
12
  def mag_l2(
@@ -77,8 +76,8 @@ def mag_l2(
77
76
  always_output_mago = configuration.ALWAYS_OUTPUT_MAGO
78
77
 
79
78
  # TODO Check that the input file matches the offsets file
80
- # if not np.array_equal(input_data["epoch"].data, offsets_dataset["epoch"].data):
81
- # raise ValueError("Input file and offsets file must have the same timestamps.")
79
+ if not np.array_equal(input_data["epoch"].data, offsets_dataset["epoch"].data):
80
+ raise ValueError("Input file and offsets file must have the same timestamps.")
82
81
 
83
82
  day: np.datetime64 = day_to_process.astype("datetime64[D]")
84
83
 
@@ -86,29 +85,35 @@ def mag_l2(
86
85
  calibration_dataset, day, always_output_mago
87
86
  )
88
87
 
89
- vectors = np.apply_along_axis(
90
- func1d=calibrate_vector,
91
- axis=1,
92
- arr=input_data["vectors"].data,
93
- calibration_matrix=calibration_matrix,
88
+ cal_vectors = MagL2.apply_calibration(
89
+ vectors=input_data["vectors"].data, calibration_matrix=calibration_matrix
94
90
  )
95
-
96
- input_data = MagL2(
97
- vectors[:, :3], # level 2 vectors don't include range
98
- input_data["epoch"].data,
99
- input_data["vectors"].data[:, 3],
100
- {},
101
- np.zeros(len(input_data["epoch"].data)),
102
- np.zeros(len(input_data["epoch"].data)),
103
- mode,
91
+ # level 2 vectors don't include range
92
+ vectors = cal_vectors[:, :3]
93
+
94
+ l2_data = MagL2(
95
+ vectors=vectors,
96
+ epoch=input_data["epoch"].data,
97
+ range=input_data["vectors"].data[:, 3],
98
+ global_attributes={},
99
+ quality_flags=offsets_dataset["quality_flag"].data,
100
+ quality_bitmask=offsets_dataset["quality_bitmask"].data,
101
+ data_mode=mode,
104
102
  offsets=offsets_dataset["offsets"].data,
105
103
  timedelta=offsets_dataset["timedeltas"].data,
106
104
  )
105
+
107
106
  attributes = ImapCdfAttributes()
108
107
  attributes.add_instrument_global_attrs("mag")
109
- # temporarily point to l1c
110
108
  attributes.add_instrument_variable_attrs("mag", "l2")
111
- return [input_data.generate_dataset(attributes, day)]
109
+
110
+ # Rotate from the MAG frame into the SRF frame
111
+ l2_data.rotate_frame(ValidFrames.SRF)
112
+ imap_srf = l2_data.generate_dataset(attributes, day)
113
+ l2_data.rotate_frame(ValidFrames.DSRF)
114
+ imap_dsrf = l2_data.generate_dataset(attributes, day)
115
+
116
+ return [imap_dsrf, imap_srf]
112
117
 
113
118
 
114
119
  def retrieve_matrix_from_l2_calibration(