imap-processing 0.17.0__py3-none-any.whl → 0.19.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of imap-processing might be problematic. Click here for more details.

Files changed (141) hide show
  1. imap_processing/_version.py +2 -2
  2. imap_processing/ancillary/ancillary_dataset_combiner.py +161 -1
  3. imap_processing/ccsds/excel_to_xtce.py +12 -0
  4. imap_processing/cdf/config/imap_codice_global_cdf_attrs.yaml +6 -6
  5. imap_processing/cdf/config/imap_codice_l1a_variable_attrs.yaml +312 -274
  6. imap_processing/cdf/config/imap_codice_l1b_variable_attrs.yaml +39 -28
  7. imap_processing/cdf/config/imap_codice_l2_variable_attrs.yaml +1048 -183
  8. imap_processing/cdf/config/imap_constant_attrs.yaml +4 -2
  9. imap_processing/cdf/config/imap_glows_l1b_variable_attrs.yaml +12 -0
  10. imap_processing/cdf/config/imap_hi_global_cdf_attrs.yaml +5 -0
  11. imap_processing/cdf/config/imap_hit_global_cdf_attrs.yaml +10 -4
  12. imap_processing/cdf/config/imap_hit_l1a_variable_attrs.yaml +163 -100
  13. imap_processing/cdf/config/imap_hit_l2_variable_attrs.yaml +4 -4
  14. imap_processing/cdf/config/imap_ialirt_l1_variable_attrs.yaml +97 -54
  15. imap_processing/cdf/config/imap_idex_l2a_variable_attrs.yaml +33 -4
  16. imap_processing/cdf/config/imap_idex_l2b_variable_attrs.yaml +44 -44
  17. imap_processing/cdf/config/imap_idex_l2c_variable_attrs.yaml +77 -61
  18. imap_processing/cdf/config/imap_lo_global_cdf_attrs.yaml +30 -0
  19. imap_processing/cdf/config/imap_lo_l1a_variable_attrs.yaml +4 -15
  20. imap_processing/cdf/config/imap_lo_l1c_variable_attrs.yaml +189 -98
  21. imap_processing/cdf/config/imap_mag_global_cdf_attrs.yaml +99 -2
  22. imap_processing/cdf/config/imap_mag_l1c_variable_attrs.yaml +24 -1
  23. imap_processing/cdf/config/imap_ultra_global_cdf_attrs.yaml +60 -0
  24. imap_processing/cdf/config/imap_ultra_l1b_variable_attrs.yaml +99 -11
  25. imap_processing/cdf/config/imap_ultra_l1c_variable_attrs.yaml +50 -7
  26. imap_processing/cli.py +121 -44
  27. imap_processing/codice/codice_l1a.py +165 -77
  28. imap_processing/codice/codice_l1b.py +1 -1
  29. imap_processing/codice/codice_l2.py +118 -19
  30. imap_processing/codice/constants.py +1217 -1089
  31. imap_processing/decom.py +1 -4
  32. imap_processing/ena_maps/ena_maps.py +32 -25
  33. imap_processing/ena_maps/utils/naming.py +8 -2
  34. imap_processing/glows/ancillary/imap_glows_exclusions-by-instr-team_20250923_v002.dat +10 -0
  35. imap_processing/glows/ancillary/imap_glows_map-of-excluded-regions_20250923_v002.dat +393 -0
  36. imap_processing/glows/ancillary/imap_glows_map-of-uv-sources_20250923_v002.dat +593 -0
  37. imap_processing/glows/ancillary/imap_glows_pipeline_settings_20250923_v002.json +54 -0
  38. imap_processing/glows/ancillary/imap_glows_suspected-transients_20250923_v002.dat +10 -0
  39. imap_processing/glows/l1b/glows_l1b.py +99 -9
  40. imap_processing/glows/l1b/glows_l1b_data.py +350 -38
  41. imap_processing/glows/l2/glows_l2.py +11 -0
  42. imap_processing/hi/hi_l1a.py +124 -3
  43. imap_processing/hi/hi_l1b.py +154 -71
  44. imap_processing/hi/hi_l2.py +84 -51
  45. imap_processing/hi/utils.py +153 -8
  46. imap_processing/hit/l0/constants.py +3 -0
  47. imap_processing/hit/l0/decom_hit.py +5 -8
  48. imap_processing/hit/l1a/hit_l1a.py +375 -45
  49. imap_processing/hit/l1b/constants.py +5 -0
  50. imap_processing/hit/l1b/hit_l1b.py +61 -131
  51. imap_processing/hit/l2/constants.py +1 -1
  52. imap_processing/hit/l2/hit_l2.py +10 -11
  53. imap_processing/ialirt/calculate_ingest.py +219 -0
  54. imap_processing/ialirt/constants.py +32 -1
  55. imap_processing/ialirt/generate_coverage.py +201 -0
  56. imap_processing/ialirt/l0/ialirt_spice.py +5 -2
  57. imap_processing/ialirt/l0/parse_mag.py +337 -29
  58. imap_processing/ialirt/l0/process_hit.py +5 -3
  59. imap_processing/ialirt/l0/process_swapi.py +41 -25
  60. imap_processing/ialirt/l0/process_swe.py +23 -7
  61. imap_processing/ialirt/process_ephemeris.py +70 -14
  62. imap_processing/ialirt/utils/constants.py +22 -16
  63. imap_processing/ialirt/utils/create_xarray.py +42 -19
  64. imap_processing/idex/idex_constants.py +1 -5
  65. imap_processing/idex/idex_l0.py +2 -2
  66. imap_processing/idex/idex_l1a.py +2 -3
  67. imap_processing/idex/idex_l1b.py +2 -3
  68. imap_processing/idex/idex_l2a.py +130 -4
  69. imap_processing/idex/idex_l2b.py +313 -119
  70. imap_processing/idex/idex_utils.py +1 -3
  71. imap_processing/lo/l0/lo_apid.py +1 -0
  72. imap_processing/lo/l0/lo_science.py +25 -24
  73. imap_processing/lo/l1a/lo_l1a.py +44 -0
  74. imap_processing/lo/l1b/lo_l1b.py +3 -3
  75. imap_processing/lo/l1c/lo_l1c.py +116 -50
  76. imap_processing/lo/l2/lo_l2.py +29 -29
  77. imap_processing/lo/lo_ancillary.py +55 -0
  78. imap_processing/lo/packet_definitions/lo_xtce.xml +5359 -106
  79. imap_processing/mag/constants.py +1 -0
  80. imap_processing/mag/l1a/mag_l1a.py +1 -0
  81. imap_processing/mag/l1a/mag_l1a_data.py +26 -0
  82. imap_processing/mag/l1b/mag_l1b.py +3 -2
  83. imap_processing/mag/l1c/interpolation_methods.py +14 -15
  84. imap_processing/mag/l1c/mag_l1c.py +23 -6
  85. imap_processing/mag/l1d/__init__.py +0 -0
  86. imap_processing/mag/l1d/mag_l1d.py +176 -0
  87. imap_processing/mag/l1d/mag_l1d_data.py +725 -0
  88. imap_processing/mag/l2/__init__.py +0 -0
  89. imap_processing/mag/l2/mag_l2.py +25 -20
  90. imap_processing/mag/l2/mag_l2_data.py +199 -130
  91. imap_processing/quality_flags.py +28 -2
  92. imap_processing/spice/geometry.py +101 -36
  93. imap_processing/spice/pointing_frame.py +1 -7
  94. imap_processing/spice/repoint.py +29 -2
  95. imap_processing/spice/spin.py +32 -8
  96. imap_processing/spice/time.py +60 -19
  97. imap_processing/swapi/l1/swapi_l1.py +10 -4
  98. imap_processing/swapi/l2/swapi_l2.py +66 -24
  99. imap_processing/swapi/swapi_utils.py +1 -1
  100. imap_processing/swe/l1b/swe_l1b.py +3 -6
  101. imap_processing/ultra/constants.py +28 -3
  102. imap_processing/ultra/l0/decom_tools.py +15 -8
  103. imap_processing/ultra/l0/decom_ultra.py +35 -11
  104. imap_processing/ultra/l0/ultra_utils.py +102 -12
  105. imap_processing/ultra/l1a/ultra_l1a.py +26 -6
  106. imap_processing/ultra/l1b/cullingmask.py +6 -3
  107. imap_processing/ultra/l1b/de.py +122 -26
  108. imap_processing/ultra/l1b/extendedspin.py +29 -2
  109. imap_processing/ultra/l1b/lookup_utils.py +424 -50
  110. imap_processing/ultra/l1b/quality_flag_filters.py +23 -0
  111. imap_processing/ultra/l1b/ultra_l1b_culling.py +356 -5
  112. imap_processing/ultra/l1b/ultra_l1b_extended.py +534 -90
  113. imap_processing/ultra/l1c/helio_pset.py +127 -7
  114. imap_processing/ultra/l1c/l1c_lookup_utils.py +256 -0
  115. imap_processing/ultra/l1c/spacecraft_pset.py +90 -15
  116. imap_processing/ultra/l1c/ultra_l1c.py +6 -0
  117. imap_processing/ultra/l1c/ultra_l1c_culling.py +85 -0
  118. imap_processing/ultra/l1c/ultra_l1c_pset_bins.py +446 -341
  119. imap_processing/ultra/l2/ultra_l2.py +0 -1
  120. imap_processing/ultra/utils/ultra_l1_utils.py +40 -3
  121. imap_processing/utils.py +3 -4
  122. {imap_processing-0.17.0.dist-info → imap_processing-0.19.0.dist-info}/METADATA +3 -3
  123. {imap_processing-0.17.0.dist-info → imap_processing-0.19.0.dist-info}/RECORD +126 -126
  124. imap_processing/idex/idex_l2c.py +0 -250
  125. imap_processing/spice/kernels.py +0 -187
  126. imap_processing/ultra/lookup_tables/Angular_Profiles_FM45_LeftSlit.csv +0 -526
  127. imap_processing/ultra/lookup_tables/Angular_Profiles_FM45_RightSlit.csv +0 -526
  128. imap_processing/ultra/lookup_tables/Angular_Profiles_FM90_LeftSlit.csv +0 -526
  129. imap_processing/ultra/lookup_tables/Angular_Profiles_FM90_RightSlit.csv +0 -524
  130. imap_processing/ultra/lookup_tables/EgyNorm.mem.csv +0 -32769
  131. imap_processing/ultra/lookup_tables/FM45_Startup1_ULTRA_IMGPARAMS_20240719.csv +0 -2
  132. imap_processing/ultra/lookup_tables/FM90_Startup1_ULTRA_IMGPARAMS_20240719.csv +0 -2
  133. imap_processing/ultra/lookup_tables/dps_grid45_compressed.cdf +0 -0
  134. imap_processing/ultra/lookup_tables/ultra45_back-pos-luts.csv +0 -4097
  135. imap_processing/ultra/lookup_tables/ultra45_tdc_norm.csv +0 -2050
  136. imap_processing/ultra/lookup_tables/ultra90_back-pos-luts.csv +0 -4097
  137. imap_processing/ultra/lookup_tables/ultra90_tdc_norm.csv +0 -2050
  138. imap_processing/ultra/lookup_tables/yadjust.csv +0 -257
  139. {imap_processing-0.17.0.dist-info → imap_processing-0.19.0.dist-info}/LICENSE +0 -0
  140. {imap_processing-0.17.0.dist-info → imap_processing-0.19.0.dist-info}/WHEEL +0 -0
  141. {imap_processing-0.17.0.dist-info → imap_processing-0.19.0.dist-info}/entry_points.txt +0 -0
@@ -0,0 +1,725 @@
1
+ # mypy: disable-error-code="unused-ignore"
2
+ """Data classes for MAG L1D processing."""
3
+
4
+ from dataclasses import InitVar, dataclass
5
+
6
+ import numpy as np
7
+ import xarray as xr
8
+
9
+ from imap_processing.cdf.imap_cdf_manager import ImapCdfAttributes
10
+ from imap_processing.mag import imap_mag_sdc_configuration_v001 as configuration
11
+ from imap_processing.mag.constants import FILLVAL, DataMode
12
+ from imap_processing.mag.l1c.interpolation_methods import linear
13
+ from imap_processing.mag.l2.mag_l2 import retrieve_matrix_from_l2_calibration
14
+ from imap_processing.mag.l2.mag_l2_data import MagL2L1dBase, ValidFrames
15
+ from imap_processing.spice import spin
16
+ from imap_processing.spice.geometry import frame_transform
17
+ from imap_processing.spice.time import ttj2000ns_to_met
18
+
19
+
20
+ @dataclass
21
+ class MagL1dConfiguration:
22
+ """
23
+ Configuration for MAG L1d processing.
24
+
25
+ Constructed from the combined ancillary dataset inputs from the L1D calibration
26
+ files and the day we are processing.
27
+
28
+ Parameters
29
+ ----------
30
+ calibration_dataset : xr.Dataset
31
+ The combined calibration dataset from the ancillary files. Created as the
32
+ output from MagAncillaryCombiner, which has day values pointing to the
33
+ calibration file for the given day.
34
+ day : np.datetime64
35
+ The day we are processing, in np.datetime64[D] format.
36
+
37
+ Attributes
38
+ ----------
39
+ calibration_offsets : np.ndarray
40
+ The offsets for the correct day. Should be size (2, 4, 3) where the first index
41
+ is 0 for MAGo and 1 for MAGi, the second index is the range (0-3), and the
42
+ third index is the axis (0-2).
43
+ mago_calibration : np.ndarray
44
+ Calibration matrix for the correct day for MAGo. Should be size (3, 3, 4).
45
+ magi_calibration : np.ndarray
46
+ Calibration matrix for the correct day for MAGi. Should be size (3, 3, 4).
47
+ spin_count_calibration : int
48
+ The number of spins to average over when calculating spin offsets.
49
+ quality_flag_threshold : np.float64
50
+ The quality flag threshold for the correct day.
51
+ spin_average_application_factor : np.float64
52
+ The spin average application factor for the correct day.
53
+ gradiometer_factor : np.ndarray
54
+ The gradiometer factor for the correct day. Should be size (3,).
55
+ apply_gradiometry : bool
56
+ Whether to apply gradiometry or not. Default is True.
57
+ """
58
+
59
+ calibration_offsets: np.ndarray
60
+ mago_calibration: np.ndarray
61
+ magi_calibration: np.ndarray
62
+ spin_count_calibration: int
63
+ quality_flag_threshold: float
64
+ spin_average_application_factor: np.float64
65
+ gradiometer_factor: np.ndarray
66
+ apply_gradiometry: bool = True
67
+
68
+ def __init__(self, calibration_dataset: xr.Dataset, day: np.datetime64) -> None:
69
+ """
70
+ Create a MagL1dConfiguration from a calibration dataset and day.
71
+
72
+ Parameters
73
+ ----------
74
+ calibration_dataset : xr.Dataset
75
+ The combined calibration dataset from the ancillary files. Created as the
76
+ output from MagAncillaryCombiner, which has day values pointing to the
77
+ calibration file for the given day.
78
+ day : np.datetime64
79
+ The day we are processing, in np.datetime64[D] format.
80
+
81
+ """
82
+ self.mago_calibration = retrieve_matrix_from_l2_calibration(
83
+ calibration_dataset, day, use_mago=True
84
+ )
85
+
86
+ self.magi_calibration = retrieve_matrix_from_l2_calibration(
87
+ calibration_dataset, day, use_mago=False
88
+ )
89
+ self.calibration_offsets = calibration_dataset.sel(epoch=day)["offsets"].data
90
+ self.spin_count_calibration = calibration_dataset.sel(epoch=day)[
91
+ "number_of_spins"
92
+ ].data
93
+ self.quality_flag_threshold = calibration_dataset.sel(epoch=day)[
94
+ "quality_flag_threshold"
95
+ ].data
96
+ self.spin_average_application_factor = calibration_dataset.sel(epoch=day)[
97
+ "spin_average_application_factor"
98
+ ].data
99
+ self.gradiometer_factor = calibration_dataset.sel(epoch=day)[
100
+ "gradiometer_factor"
101
+ ].data
102
+
103
+
104
+ @dataclass(kw_only=True)
105
+ class MagL1d(MagL2L1dBase): # type: ignore[misc]
106
+ """
107
+ Class for handling IMAP MAG L1d data.
108
+
109
+ When the class is created, all the methods are called in the correct order to
110
+ run MAG L1d processing. The resulting instance can then be used to generate an
111
+ xarray dataset with the `generate_dataset` method.
112
+
113
+ Example:
114
+ ```
115
+ l1d_norm = MagL1d(
116
+ vectors=mago_vectors,
117
+ epoch=input_mago_norm["epoch"].data,
118
+ range=input_mago_norm["vectors"].data[:, 3],
119
+ global_attributes={},
120
+ quality_flags=np.zeros(len(input_mago_norm["epoch"].data)),
121
+ quality_bitmask=np.zeros(len(input_mago_norm["epoch"].data)),
122
+ data_mode=DataMode.NORM,
123
+ magi_vectors=magi_vectors,
124
+ magi_range=input_magi_norm["vectors"].data[:, 3],
125
+ config=config
126
+ )
127
+ output_dataset = l1d_norm.generate_dataset(attributes, day_to_process)
128
+ ```
129
+
130
+ Attributes
131
+ ----------
132
+ magi_vectors : np.ndarray
133
+ The MAGi vectors, shape (N, 3).
134
+ magi_range : np.ndarray
135
+ The MAGi range values, shape (N,).
136
+ magi_epoch : np.ndarray
137
+ The MAGi epoch values, shape (N,).
138
+ config : MagL1dConfiguration
139
+ The configuration for L1d processing, including calibration matrices and
140
+ offsets. This is generated from the input ancillary file and the
141
+ MagL1dConfiguration class.
142
+ spin_offsets : xr.Dataset, optional
143
+ The spin offsets dataset, if already calculated. If not provided, it will be
144
+ calculated during processing if in NORM mode.
145
+ day : np.datetime64
146
+ The day we are processing, in np.datetime64[D] format. This is used to
147
+ truncate the data to exactly 24 hours.
148
+ """
149
+
150
+ magi_vectors: np.ndarray
151
+ magi_range: np.ndarray
152
+ magi_epoch: np.ndarray
153
+ config: MagL1dConfiguration
154
+ spin_offsets: xr.Dataset = None
155
+ day: InitVar[np.datetime64]
156
+
157
+ def __post_init__(self, day: np.datetime64) -> None:
158
+ """
159
+ Run all processing steps to generate L1d data.
160
+
161
+ This updates class variables to match L1D outputs.
162
+
163
+ Parameters
164
+ ----------
165
+ day : np.datetime64
166
+ The day we are processing, in np.datetime64[D] format. This is used to
167
+ truncate the data to exactly 24 hours.
168
+ """
169
+ # set the magnitude before truncating
170
+ self.magnitude = np.zeros(self.vectors.shape[0], dtype=np.float64) # type: ignore[has-type]
171
+ self.truncate_to_24h(day)
172
+
173
+ self.vectors, self.magi_vectors = self._calibrate_and_offset_vectors(
174
+ self.config.mago_calibration,
175
+ self.config.magi_calibration,
176
+ self.config.calibration_offsets,
177
+ )
178
+ # We need to be in SRF for the spin offsets application and calculation
179
+ self.rotate_frame(ValidFrames.SRF)
180
+
181
+ if self.spin_offsets is None and self.data_mode == DataMode.NORM:
182
+ self.spin_offsets = self.calculate_spin_offsets()
183
+
184
+ self.vectors = self.apply_spin_offsets(
185
+ self.spin_offsets,
186
+ self.epoch, # type: ignore[has-type]
187
+ self.vectors,
188
+ self.config.spin_average_application_factor,
189
+ )
190
+ self.magi_vectors = self.apply_spin_offsets(
191
+ self.spin_offsets,
192
+ self.magi_epoch,
193
+ self.magi_vectors,
194
+ self.config.spin_average_application_factor,
195
+ )
196
+
197
+ # we need to be in DSRF for the gradiometry offsets calculation and application
198
+ self.rotate_frame(ValidFrames.DSRF)
199
+
200
+ if self.config.apply_gradiometry:
201
+ self.gradiometry_offsets = self.calculate_gradiometry_offsets(
202
+ self.vectors,
203
+ self.epoch, # type: ignore[has-type]
204
+ self.magi_vectors,
205
+ self.magi_epoch,
206
+ self.config.quality_flag_threshold,
207
+ )
208
+ self.vectors = self.apply_gradiometry_offsets(
209
+ self.gradiometry_offsets, self.vectors, self.config.gradiometer_factor
210
+ )
211
+
212
+ self.magnitude = MagL2L1dBase.calculate_magnitude(vectors=self.vectors)
213
+ self.is_l1d = True
214
+
215
+ def generate_dataset(
216
+ self,
217
+ attribute_manager: ImapCdfAttributes,
218
+ day: np.datetime64,
219
+ ) -> xr.Dataset:
220
+ """
221
+ Generate an xarray dataset from the dataclass.
222
+
223
+ This overrides the parent method to conditionally swap MAGO/MAGI data
224
+ based on the always_output_mago configuration setting.
225
+
226
+ Parameters
227
+ ----------
228
+ attribute_manager : ImapCdfAttributes
229
+ CDF attributes object for the correct level.
230
+ day : np.datetime64
231
+ The 24 hour day to process, as a numpy datetime format.
232
+
233
+ Returns
234
+ -------
235
+ xr.Dataset
236
+ Complete dataset ready to write to CDF file.
237
+ """
238
+ always_output_mago = configuration.ALWAYS_OUTPUT_MAGO
239
+
240
+ if not always_output_mago:
241
+ # Swap vectors and epochs to use MAGI data instead of MAGO
242
+ original_vectors: np.ndarray = self.vectors.copy()
243
+ original_epoch: np.ndarray = self.epoch.copy() # type: ignore[has-type]
244
+ original_range: np.ndarray = self.range.copy() # type: ignore[has-type]
245
+
246
+ self.vectors = self.magi_vectors # type: ignore[no-redef]
247
+ self.epoch = self.magi_epoch # type: ignore[no-redef]
248
+ self.range = self.magi_range # type: ignore[no-redef]
249
+
250
+ # Call parent generate_dataset method
251
+ dataset = super().generate_dataset(attribute_manager, day)
252
+
253
+ # Restore original vectors for any further processing
254
+ self.vectors = original_vectors
255
+ self.epoch = original_epoch
256
+ self.range = original_range
257
+ else:
258
+ # Use MAGO data (default behavior)
259
+ dataset = super().generate_dataset(attribute_manager, day)
260
+
261
+ return dataset
262
+
263
+ def rotate_frame(self, end_frame: ValidFrames) -> None:
264
+ """
265
+ Rotate the vectors to the desired frame.
266
+
267
+ Rotates both the mago vectors (self.vectors) and the magi vectors
268
+ (self.magi_vectors), then set self.frame to end_frame.
269
+
270
+ Parameters
271
+ ----------
272
+ end_frame : ValidFrames
273
+ The frame to rotate to. Should be one of the ValidFrames enum.
274
+ """
275
+ start_frame = self.frame
276
+ super().rotate_frame(end_frame)
277
+ self.magi_vectors = frame_transform(
278
+ self.magi_epoch,
279
+ self.magi_vectors,
280
+ from_frame=start_frame.value,
281
+ to_frame=end_frame.value,
282
+ )
283
+
284
+ def _calibrate_and_offset_vectors(
285
+ self,
286
+ mago_calibration: np.ndarray,
287
+ magi_calibration: np.ndarray,
288
+ offsets: np.ndarray,
289
+ ) -> tuple[np.ndarray, np.ndarray]:
290
+ """
291
+ Apply calibration and initial offset calculations from the configuration file.
292
+
293
+ Parameters
294
+ ----------
295
+ mago_calibration : np.ndarray
296
+ Calibration matrix for the correct day for MAGo. Should be size (3, 3, 4).
297
+ magi_calibration : np.ndarray
298
+ Calibration matrix for the correct day for MAGi. Should be size (3, 3, 4).
299
+ offsets : np.ndarray
300
+ Offsets for the correct day. Should be size (2, 4, 3) where the first index
301
+ is 0 for MAGo and 1 for MAGi, the second index is the range (0-3), and the
302
+ third index is the axis (0-2).
303
+
304
+ Returns
305
+ -------
306
+ tuple[np.ndarray, np.ndarray]
307
+ The calibrated and offset MAGo and MAGi vectors, each shape (N, 3)
308
+ (not including range).
309
+ """
310
+ vectors_plus_range_mago = np.concatenate(
311
+ (self.vectors, self.range[:, np.newaxis]), axis=1
312
+ )
313
+
314
+ vectors_plus_range_magi = np.concatenate(
315
+ (self.magi_vectors, self.magi_range[:, np.newaxis]), axis=1
316
+ )
317
+
318
+ mago_vectors = MagL2L1dBase.apply_calibration(
319
+ vectors_plus_range_mago, mago_calibration
320
+ )
321
+ magi_vectors = MagL2L1dBase.apply_calibration(
322
+ vectors_plus_range_magi, magi_calibration
323
+ )
324
+
325
+ mago_vectors = np.apply_along_axis(
326
+ func1d=self.apply_calibration_offset_single_vector,
327
+ axis=1,
328
+ arr=mago_vectors,
329
+ offsets=offsets,
330
+ is_magi=False,
331
+ )
332
+
333
+ magi_vectors = np.apply_along_axis(
334
+ func1d=self.apply_calibration_offset_single_vector,
335
+ axis=1,
336
+ arr=magi_vectors,
337
+ offsets=offsets,
338
+ is_magi=True,
339
+ )
340
+
341
+ return mago_vectors[:, :3], magi_vectors[:, :3]
342
+
343
+ @staticmethod
344
+ def apply_calibration_offset_single_vector(
345
+ input_vector: np.ndarray, offsets: np.ndarray, is_magi: bool = False
346
+ ) -> np.ndarray:
347
+ """
348
+ Apply the offset to a single vector.
349
+
350
+ Parameters
351
+ ----------
352
+ input_vector : np.ndarray
353
+ The input vector to offset, shape (4,) where the last element is the range.
354
+ offsets : np.ndarray
355
+ The offsets array, shape (2, 4, 3) where the first index is 0 for MAGo and
356
+ 1 for MAGi, the second index is the range (0-3), and the third index is the
357
+ axis (0-2).
358
+
359
+ is_magi : bool
360
+ Whether the input vector is from MAGi (True) or MAGo (False).
361
+
362
+ Returns
363
+ -------
364
+ np.ndarray
365
+ The offset vector, shape (4,) where the last element is unchanged.
366
+ """
367
+ # Offsets are in shape (sensor, range, axis)
368
+ updated_vector = input_vector.copy()
369
+ rng = int(input_vector[3])
370
+ x_y_z = input_vector[:3]
371
+ updated_vector[:3] = x_y_z - offsets[int(is_magi), rng, :]
372
+ return updated_vector
373
+
374
+ def calculate_spin_offsets(self) -> xr.Dataset:
375
+ """
376
+ Calculate the spin offsets for the current data.
377
+
378
+ Algorithm determined by section 7.3.5, step 6 of the algorithm document.
379
+
380
+ This should only be called on normal mode data in the SRF frame. It computes
381
+ the average spin during a chunk as specified in the config by
382
+ spin_count_calibration (nominally 240 spins), then creates a dataset containing
383
+ timestamps which correspond to the start of the validity for the offset.
384
+
385
+ This is only computed for the x and y axes (indices 0 and 1 of vectors) as the
386
+ z axis is the spinning axis in SRF and should not be affected by spins.
387
+
388
+ Any invalid spins are skipped and not included.
389
+
390
+ Returns
391
+ -------
392
+ spin_offsets : xr.Dataset
393
+ The spin offsets dataset, with dimensions:
394
+ - epoch: the timestamp where the offset becomes valid
395
+ - x_offset: the x offset values
396
+ - y_offset: the y offset values
397
+ """
398
+ # This needs to only happen for NM data
399
+ if self.data_mode != DataMode.NORM and self.frame != ValidFrames.SRF:
400
+ raise ValueError(
401
+ "Spin offsets can only be calculated in NORM mode and SRF frame."
402
+ )
403
+
404
+ epoch_met = ttj2000ns_to_met(self.epoch)
405
+ sc_spin_phase = spin.get_spacecraft_spin_phase(epoch_met)
406
+ # mark vectors as nan where they are nan in sc_spin_phase
407
+ vectors = self.vectors.copy().astype(np.float64)
408
+
409
+ vectors[np.isnan(sc_spin_phase), :] = np.nan
410
+
411
+ # first timestamp where spin phase is less than the previous value
412
+ # this is when the spin crosses zero
413
+ spin_starts = np.where(np.diff(sc_spin_phase) < 0)[0] + 1
414
+
415
+ # if the value switches from nan to a number, or from a number to nan, that
416
+ # is also a spin start
417
+ nan_to_number = np.where(np.diff(np.isnan(sc_spin_phase)) != 0)[0] + 1
418
+
419
+ # find the places spins start while skipping over invalid or missing data
420
+ # (marked as nan by get_spacecraft_spin_phase)
421
+ spin_starts = np.sort(np.concatenate((spin_starts, nan_to_number)))
422
+
423
+ # Get the expected spin period from the spin table
424
+ # Convert to nanoseconds to match epoch
425
+ spin_data = spin.get_spin_data()
426
+ # Use the median spin period as the expected value
427
+ expected_spin = np.median(spin_data["spin_period_sec"]) * 1e9
428
+
429
+ paired_nans = nan_to_number.reshape(-1, 2)
430
+
431
+ for start_of_gap, end_of_gap in paired_nans:
432
+ # in nan_to_number, we have the start and end for every nan gap
433
+ # if this gap spans more than 1 spin period, we need to insert
434
+ # additional spin_starts into spin_starts.
435
+
436
+ gap_start_time = self.epoch[start_of_gap]
437
+ gap_end_time = self.epoch[end_of_gap]
438
+
439
+ # Calculate the number of spins in this gap
440
+ number_of_spins = int((gap_end_time - gap_start_time) // expected_spin)
441
+ if number_of_spins > 1:
442
+ # Insert new spin starts into spin_starts
443
+ for i in range(1, number_of_spins):
444
+ estimated_start = gap_start_time + i * expected_spin
445
+ new_spin_index = (np.abs(self.epoch - estimated_start)).argmin()
446
+
447
+ spin_starts = np.append(spin_starts, new_spin_index)
448
+
449
+ # Now spin_starts contains all the indices where spins begin, including
450
+ # estimating skipped or missing spins.
451
+ spin_starts = np.sort(spin_starts)
452
+
453
+ chunk_start = 0
454
+ offset_epochs = []
455
+ x_avg_calcs: list[np.float64] = []
456
+ y_avg_calcs: list[np.float64] = []
457
+ validity_start_times = []
458
+ validity_end_times = []
459
+ start_spin_counters = []
460
+ end_spin_counters = []
461
+
462
+ while chunk_start < len(spin_starts):
463
+ # Take self.spin_count_calibration number of spins and put them into a chunk
464
+ chunk_indices = spin_starts[
465
+ chunk_start : chunk_start + self.config.spin_count_calibration + 1
466
+ ]
467
+ chunk_start_idx = chunk_start
468
+
469
+ chunk_vectors = self.vectors[chunk_indices[0] : chunk_indices[-1]]
470
+ chunk_epoch = self.epoch[chunk_indices[0] : chunk_indices[-1]]
471
+
472
+ # Check if more than half of the chunk data is NaN before processing
473
+ x_valid_count: int = int(np.sum(~np.isnan(chunk_vectors[:, 0])))
474
+ y_valid_count: int = int(np.sum(~np.isnan(chunk_vectors[:, 1])))
475
+ total_points = len(chunk_vectors)
476
+
477
+ # average the x and y axes (z is fixed, as the spin axis)
478
+ avg_x = np.nanmean(chunk_vectors[:, 0])
479
+ avg_y = np.nanmean(chunk_vectors[:, 1])
480
+
481
+ # Skip chunk if more than half of x or y data is NaN, or if we have less
482
+ # than half a spin.
483
+ # in this case, we should reuse the previous averages.
484
+ if (
485
+ x_valid_count <= total_points / 2
486
+ or y_valid_count <= total_points / 2
487
+ or total_points <= self.config.spin_count_calibration / 2
488
+ ):
489
+ avg_x = x_avg_calcs[-1] if x_avg_calcs else np.float64(FILLVAL)
490
+ avg_y = y_avg_calcs[-1] if y_avg_calcs else np.float64(FILLVAL)
491
+
492
+ if not np.isnan(avg_x) and not np.isnan(avg_y):
493
+ offset_epochs.append(chunk_epoch[0])
494
+ x_avg_calcs.append(avg_x)
495
+ y_avg_calcs.append(avg_y)
496
+
497
+ # Add validity time range for this chunk
498
+ validity_start_times.append(chunk_epoch[0])
499
+ validity_end_times.append(chunk_epoch[-1])
500
+
501
+ # Add spin counter information
502
+ start_spin_counters.append(chunk_start_idx)
503
+ end_spin_counters.append(
504
+ min(
505
+ chunk_start_idx + self.config.spin_count_calibration - 1,
506
+ len(spin_starts) - 1,
507
+ )
508
+ )
509
+
510
+ chunk_start = chunk_start + self.config.spin_count_calibration
511
+
512
+ spin_epoch_dataarray = xr.DataArray(np.array(offset_epochs))
513
+
514
+ spin_offsets = xr.Dataset(coords={"epoch": spin_epoch_dataarray})
515
+
516
+ spin_offsets["x_offset"] = xr.DataArray(np.array(x_avg_calcs), dims=["epoch"])
517
+ spin_offsets["y_offset"] = xr.DataArray(np.array(y_avg_calcs), dims=["epoch"])
518
+ spin_offsets["validity_start_time"] = xr.DataArray(
519
+ np.array(validity_start_times), dims=["epoch"]
520
+ )
521
+ spin_offsets["validity_end_time"] = xr.DataArray(
522
+ np.array(validity_end_times), dims=["epoch"]
523
+ )
524
+ spin_offsets["start_spin_counter"] = xr.DataArray(
525
+ np.array(start_spin_counters), dims=["epoch"]
526
+ )
527
+ spin_offsets["end_spin_counter"] = xr.DataArray(
528
+ np.array(end_spin_counters), dims=["epoch"]
529
+ )
530
+
531
+ return spin_offsets
532
+
533
+ def generate_spin_offset_dataset(self) -> xr.Dataset | None:
534
+ """
535
+ Output the spin offsets file as a dataset.
536
+
537
+ Returns
538
+ -------
539
+ xr.Dataset | None
540
+ The spin offsets dataset. This function can be used to control the output
541
+ structure of the offsets dataset ancillary file, without affecting how
542
+ the offsets are used inside the class.
543
+ """
544
+ return self.spin_offsets
545
+
546
+ @staticmethod
547
+ def apply_spin_offsets(
548
+ spin_offsets: xr.Dataset,
549
+ epoch: np.ndarray,
550
+ vectors: np.ndarray,
551
+ spin_average_application_factor: np.float64,
552
+ ) -> np.ndarray:
553
+ """
554
+ Apply the spin offsets to the input vectors.
555
+
556
+ This uses the spin offsets calculated by `calculate_spin_offsets` (or passed in
557
+ to the class in burst mode) to apply the offsets to the input vectors.
558
+
559
+ For each vector, we take the nearest offset, multiply it by the
560
+ spin_average_application_factor calibration value, and subtract the offset from
561
+ the appropriate axis.
562
+
563
+ These spin offsets act as an automatic smoothing effect on the data over each
564
+ series of spins.
565
+
566
+ Parameters
567
+ ----------
568
+ spin_offsets : xr.Dataset
569
+ The spin offsets dataset.
570
+ epoch : np.ndarray
571
+ The epoch values for the input vectors, shape (N,).
572
+ vectors : np.ndarray
573
+ The input vectors to apply offsets to, shape (N, 3). Can be Mago, magi,
574
+ burst or norm. The same offsets file is applied to all.
575
+ spin_average_application_factor : np.float64
576
+ The spin average application factor from the configuration file.
577
+
578
+ Returns
579
+ -------
580
+ np.ndarray
581
+ The output vectors with spin offsets applied, shape (N, 3).
582
+ """
583
+ if spin_offsets is None:
584
+ raise ValueError("No spin offsets calculated to apply.")
585
+
586
+ output_vectors = np.full(vectors.shape, FILLVAL, dtype=np.float64)
587
+
588
+ for index in range(spin_offsets["epoch"].data.shape[0] - 1):
589
+ timestamp = spin_offsets["epoch"].data[index]
590
+ # for the first timestamp, catch all the beginning vectors
591
+ if index == 0:
592
+ timestamp = epoch[0]
593
+
594
+ end_timestamp = spin_offsets["epoch"].data[index + 1]
595
+
596
+ # for the last timestamp, catch all the ending vectors
597
+ if index + 2 >= len(spin_offsets["epoch"].data):
598
+ end_timestamp = epoch[-1] + 1
599
+
600
+ mask = (epoch >= timestamp) & (epoch < end_timestamp)
601
+
602
+ mask = mask & (vectors[:, 0] != FILLVAL)
603
+
604
+ if not np.any(mask):
605
+ continue
606
+
607
+ x_offset = (
608
+ spin_offsets["x_offset"].data[index] * spin_average_application_factor
609
+ )
610
+ y_offset = (
611
+ spin_offsets["y_offset"].data[index] * spin_average_application_factor
612
+ )
613
+
614
+ output_vectors[mask, 0] = vectors[mask, 0] - x_offset
615
+ output_vectors[mask, 1] = vectors[mask, 1] - y_offset
616
+
617
+ output_vectors[:, 2] = vectors[:, 2]
618
+
619
+ return output_vectors
620
+
621
+ @staticmethod
622
+ def calculate_gradiometry_offsets(
623
+ mago_vectors: np.ndarray,
624
+ mago_epoch: np.ndarray,
625
+ magi_vectors: np.ndarray,
626
+ magi_epoch: np.ndarray,
627
+ quality_flag_threshold: float = np.inf,
628
+ ) -> xr.Dataset:
629
+ """
630
+ Calculate the gradiometry offsets between MAGo and MAGi.
631
+
632
+ This uses linear interpolation to align the MAGi data to the MAGo timestamps,
633
+ then calculates the difference between the two sensors on each axis.
634
+
635
+ All vectors must be in the DSRF frame before starting.
636
+
637
+ Static method that can be used by i-ALiRT.
638
+
639
+ Parameters
640
+ ----------
641
+ mago_vectors : np.ndarray
642
+ The MAGo vectors, shape (N, 3).
643
+ mago_epoch : np.ndarray
644
+ The MAGo epoch values, shape (N,).
645
+ magi_vectors : np.ndarray
646
+ The MAGi vectors, shape (N, 3).
647
+ magi_epoch : np.ndarray
648
+ The MAGi epoch values, shape (N,).
649
+ quality_flag_threshold : np.float64, optional
650
+ Threshold for quality flags. If the magnitude of gradiometer offset
651
+ exceeds this threshold, quality flag will be set. Default is np.inf
652
+ (no quality flags set).
653
+
654
+ Returns
655
+ -------
656
+ xr.Dataset
657
+ The gradiometer offsets dataset, with variables:
658
+ - epoch: the timestamp of the MAGo data
659
+ - gradiometer_offsets: the offset values (MAGi - MAGo) for each axis
660
+ - gradiometer_offset_magnitude: magnitude of the offset vector
661
+ - quality_flags: quality flags (1 if magnitude > threshold, 0 otherwise)
662
+ """
663
+ aligned_magi = linear(
664
+ magi_vectors,
665
+ magi_epoch,
666
+ mago_epoch,
667
+ )
668
+
669
+ diff = aligned_magi - mago_vectors
670
+
671
+ # Calculate magnitude of gradiometer offset for each vector
672
+ magnitude = np.linalg.norm(diff, axis=1)
673
+
674
+ # Set quality flags: 0 = good data (below threshold), 1 = bad data
675
+ quality_flags = (magnitude > quality_flag_threshold).astype(int)
676
+
677
+ grad_epoch = xr.DataArray(mago_epoch, dims=["epoch"])
678
+ direction = xr.DataArray(["x", "y", "z"], dims=["axis"])
679
+ grad_ds = xr.Dataset(coords={"epoch": grad_epoch, "direction": direction})
680
+ grad_ds["gradiometer_offsets"] = xr.DataArray(diff, dims=["epoch", "direction"])
681
+ grad_ds["gradiometer_offset_magnitude"] = xr.DataArray(
682
+ magnitude, dims=["epoch"]
683
+ )
684
+ grad_ds["quality_flags"] = xr.DataArray(quality_flags, dims=["epoch"])
685
+
686
+ return grad_ds
687
+
688
+ @staticmethod
689
+ def apply_gradiometry_offsets(
690
+ gradiometry_offsets: xr.Dataset,
691
+ vectors: np.ndarray,
692
+ gradiometer_factor: np.ndarray,
693
+ ) -> np.ndarray:
694
+ """
695
+ Apply the gradiometry offsets to the input vectors.
696
+
697
+ Gradiometry epoch and vectors epoch should align (i.e. the vectors should be
698
+ from mago).
699
+
700
+ The vectors should be in the DSRF frame.
701
+
702
+ Parameters
703
+ ----------
704
+ gradiometry_offsets : xr.Dataset
705
+ The gradiometry offsets dataset, as output by calculate_gradiometry_offsets.
706
+ vectors : np.ndarray
707
+ The input vectors to apply offsets to, shape (N, 3). Should be on the same
708
+ epoch as the gradiometry offsets.
709
+ gradiometer_factor : np.ndarray
710
+ A (3,3) element matrix to scale and rotate the gradiometer offsets.
711
+
712
+ Returns
713
+ -------
714
+ np.ndarray
715
+ The output vectors with gradiometry offsets applied, shape (N, 3).
716
+ """
717
+ offset_value = gradiometry_offsets["gradiometer_offsets"].data
718
+ offset_value = np.apply_along_axis(
719
+ np.dot,
720
+ 1,
721
+ offset_value,
722
+ gradiometer_factor,
723
+ )
724
+
725
+ return vectors - offset_value