imap-processing 0.16.2__py3-none-any.whl → 0.18.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of imap-processing might be problematic. Click here for more details.
- imap_processing/_version.py +2 -2
- imap_processing/ccsds/excel_to_xtce.py +12 -0
- imap_processing/cdf/config/imap_codice_global_cdf_attrs.yaml +6 -6
- imap_processing/cdf/config/imap_codice_l1a_variable_attrs.yaml +35 -0
- imap_processing/cdf/config/imap_codice_l1b_variable_attrs.yaml +35 -0
- imap_processing/cdf/config/imap_codice_l2_variable_attrs.yaml +24 -0
- imap_processing/cdf/config/imap_hi_variable_attrs.yaml +8 -8
- imap_processing/cdf/config/imap_hit_global_cdf_attrs.yaml +1 -1
- imap_processing/cdf/config/imap_hit_l1a_variable_attrs.yaml +163 -100
- imap_processing/cdf/config/imap_hit_l2_variable_attrs.yaml +398 -415
- imap_processing/cdf/config/imap_ialirt_l1_variable_attrs.yaml +97 -54
- imap_processing/cdf/config/imap_idex_global_cdf_attrs.yaml +9 -9
- imap_processing/cdf/config/imap_idex_l2b_variable_attrs.yaml +233 -57
- imap_processing/cdf/config/imap_idex_l2c_variable_attrs.yaml +16 -90
- imap_processing/cdf/config/imap_lo_global_cdf_attrs.yaml +30 -0
- imap_processing/cdf/config/imap_mag_global_cdf_attrs.yaml +15 -1
- imap_processing/cdf/config/imap_swapi_variable_attrs.yaml +19 -0
- imap_processing/cdf/config/imap_swe_l1b_variable_attrs.yaml +20 -0
- imap_processing/cdf/config/imap_swe_l2_variable_attrs.yaml +39 -0
- imap_processing/cdf/config/imap_ultra_global_cdf_attrs.yaml +168 -0
- imap_processing/cdf/config/imap_ultra_l1a_variable_attrs.yaml +103 -2
- imap_processing/cdf/config/imap_ultra_l1b_variable_attrs.yaml +91 -11
- imap_processing/cdf/utils.py +7 -1
- imap_processing/cli.py +42 -13
- imap_processing/codice/codice_l1a.py +125 -78
- imap_processing/codice/codice_l1b.py +1 -1
- imap_processing/codice/codice_l2.py +0 -9
- imap_processing/codice/constants.py +481 -498
- imap_processing/hi/hi_l1a.py +4 -4
- imap_processing/hi/hi_l1b.py +2 -2
- imap_processing/hi/packet_definitions/TLM_HI_COMBINED_SCI.xml +218 -38
- imap_processing/hit/hit_utils.py +2 -2
- imap_processing/hit/l0/decom_hit.py +4 -3
- imap_processing/hit/l1a/hit_l1a.py +64 -24
- imap_processing/hit/l1b/constants.py +5 -0
- imap_processing/hit/l1b/hit_l1b.py +18 -16
- imap_processing/hit/l2/constants.py +1 -1
- imap_processing/hit/l2/hit_l2.py +4 -4
- imap_processing/ialirt/constants.py +21 -0
- imap_processing/ialirt/generate_coverage.py +188 -0
- imap_processing/ialirt/l0/parse_mag.py +62 -5
- imap_processing/ialirt/l0/process_swapi.py +1 -1
- imap_processing/ialirt/l0/process_swe.py +23 -7
- imap_processing/ialirt/utils/constants.py +22 -16
- imap_processing/ialirt/utils/create_xarray.py +42 -19
- imap_processing/idex/idex_constants.py +8 -5
- imap_processing/idex/idex_l2b.py +554 -58
- imap_processing/idex/idex_l2c.py +30 -196
- imap_processing/lo/l0/lo_apid.py +1 -0
- imap_processing/lo/l0/lo_star_sensor.py +48 -0
- imap_processing/lo/l1a/lo_l1a.py +74 -30
- imap_processing/lo/packet_definitions/lo_xtce.xml +5359 -106
- imap_processing/mag/constants.py +1 -0
- imap_processing/mag/l0/decom_mag.py +9 -6
- imap_processing/mag/l0/mag_l0_data.py +46 -0
- imap_processing/mag/l1d/__init__.py +0 -0
- imap_processing/mag/l1d/mag_l1d.py +133 -0
- imap_processing/mag/l1d/mag_l1d_data.py +588 -0
- imap_processing/mag/l2/__init__.py +0 -0
- imap_processing/mag/l2/mag_l2.py +25 -20
- imap_processing/mag/l2/mag_l2_data.py +191 -130
- imap_processing/quality_flags.py +20 -2
- imap_processing/spice/geometry.py +25 -3
- imap_processing/spice/pointing_frame.py +1 -1
- imap_processing/spice/spin.py +4 -0
- imap_processing/spice/time.py +51 -0
- imap_processing/swapi/l1/swapi_l1.py +12 -2
- imap_processing/swapi/l2/swapi_l2.py +59 -14
- imap_processing/swapi/swapi_utils.py +1 -1
- imap_processing/swe/l1b/swe_l1b.py +11 -4
- imap_processing/swe/l2/swe_l2.py +111 -17
- imap_processing/ultra/constants.py +49 -1
- imap_processing/ultra/l0/decom_tools.py +28 -14
- imap_processing/ultra/l0/decom_ultra.py +225 -15
- imap_processing/ultra/l0/ultra_utils.py +281 -8
- imap_processing/ultra/l1a/ultra_l1a.py +77 -8
- imap_processing/ultra/l1b/cullingmask.py +3 -3
- imap_processing/ultra/l1b/de.py +53 -15
- imap_processing/ultra/l1b/extendedspin.py +26 -2
- imap_processing/ultra/l1b/lookup_utils.py +171 -50
- imap_processing/ultra/l1b/quality_flag_filters.py +14 -0
- imap_processing/ultra/l1b/ultra_l1b_culling.py +198 -5
- imap_processing/ultra/l1b/ultra_l1b_extended.py +304 -66
- imap_processing/ultra/l1c/helio_pset.py +54 -7
- imap_processing/ultra/l1c/spacecraft_pset.py +9 -1
- imap_processing/ultra/l1c/ultra_l1c.py +2 -0
- imap_processing/ultra/l1c/ultra_l1c_pset_bins.py +106 -109
- imap_processing/ultra/packet_definitions/ULTRA_SCI_COMBINED.xml +3 -3
- imap_processing/ultra/utils/ultra_l1_utils.py +13 -1
- imap_processing/utils.py +20 -42
- {imap_processing-0.16.2.dist-info → imap_processing-0.18.0.dist-info}/METADATA +2 -2
- {imap_processing-0.16.2.dist-info → imap_processing-0.18.0.dist-info}/RECORD +95 -103
- imap_processing/lo/l0/data_classes/star_sensor.py +0 -98
- imap_processing/lo/l0/utils/lo_base.py +0 -57
- imap_processing/ultra/lookup_tables/Angular_Profiles_FM45_LeftSlit.csv +0 -526
- imap_processing/ultra/lookup_tables/Angular_Profiles_FM45_RightSlit.csv +0 -526
- imap_processing/ultra/lookup_tables/Angular_Profiles_FM90_LeftSlit.csv +0 -526
- imap_processing/ultra/lookup_tables/Angular_Profiles_FM90_RightSlit.csv +0 -524
- imap_processing/ultra/lookup_tables/EgyNorm.mem.csv +0 -32769
- imap_processing/ultra/lookup_tables/FM45_Startup1_ULTRA_IMGPARAMS_20240719.csv +0 -2
- imap_processing/ultra/lookup_tables/FM90_Startup1_ULTRA_IMGPARAMS_20240719.csv +0 -2
- imap_processing/ultra/lookup_tables/dps_grid45_compressed.cdf +0 -0
- imap_processing/ultra/lookup_tables/ultra45_back-pos-luts.csv +0 -4097
- imap_processing/ultra/lookup_tables/ultra45_tdc_norm.csv +0 -2050
- imap_processing/ultra/lookup_tables/ultra90_back-pos-luts.csv +0 -4097
- imap_processing/ultra/lookup_tables/ultra90_tdc_norm.csv +0 -2050
- imap_processing/ultra/lookup_tables/yadjust.csv +0 -257
- {imap_processing-0.16.2.dist-info → imap_processing-0.18.0.dist-info}/LICENSE +0 -0
- {imap_processing-0.16.2.dist-info → imap_processing-0.18.0.dist-info}/WHEEL +0 -0
- {imap_processing-0.16.2.dist-info → imap_processing-0.18.0.dist-info}/entry_points.txt +0 -0
|
@@ -7,7 +7,9 @@ import numpy as np
|
|
|
7
7
|
import xarray as xr
|
|
8
8
|
|
|
9
9
|
from imap_processing.cdf.imap_cdf_manager import ImapCdfAttributes
|
|
10
|
-
from imap_processing.mag.constants import DataMode
|
|
10
|
+
from imap_processing.mag.constants import FILLVAL, DataMode
|
|
11
|
+
from imap_processing.mag.l1b.mag_l1b import calibrate_vector
|
|
12
|
+
from imap_processing.spice.geometry import SpiceFrame, frame_transform
|
|
11
13
|
from imap_processing.spice.time import (
|
|
12
14
|
et_to_ttj2000ns,
|
|
13
15
|
str_to_et,
|
|
@@ -17,26 +19,29 @@ from imap_processing.spice.time import (
|
|
|
17
19
|
class ValidFrames(Enum):
|
|
18
20
|
"""SPICE reference frames for output."""
|
|
19
21
|
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
|
|
22
|
+
MAG = SpiceFrame.IMAP_MAG
|
|
23
|
+
DSRF = SpiceFrame.IMAP_DPS
|
|
24
|
+
SRF = SpiceFrame.IMAP_SPACECRAFT
|
|
25
|
+
# TODO: include RTN and GSE as valid frames
|
|
24
26
|
|
|
25
27
|
|
|
26
|
-
@dataclass
|
|
27
|
-
class
|
|
28
|
+
@dataclass(kw_only=True)
|
|
29
|
+
class MagL2L1dBase:
|
|
28
30
|
"""
|
|
29
|
-
|
|
31
|
+
Base class for MAG L2 and L1D data.
|
|
30
32
|
|
|
31
|
-
Since
|
|
33
|
+
Since these two data levels output identical files, and share some methods, this
|
|
34
|
+
superclass captures the tools in common, while allowing each subclass to define
|
|
35
|
+
individual attributes and algorithms.
|
|
32
36
|
|
|
33
|
-
|
|
37
|
+
May also be extended for I-ALiRT.
|
|
34
38
|
|
|
35
39
|
Attributes
|
|
36
40
|
----------
|
|
37
41
|
vectors: np.ndarray
|
|
38
42
|
Magnetic field vectors of size (n, 3) where n is the number of vectors.
|
|
39
|
-
Describes (x, y, z) components of the magnetic field.
|
|
43
|
+
Describes (x, y, z) components of the magnetic field. This field is the output
|
|
44
|
+
vectors, which are nominally from the MAGo sensor.
|
|
40
45
|
epoch: np.ndarray
|
|
41
46
|
Time of each vector in J2000 seconds. Should be of length n.
|
|
42
47
|
range: np.ndarray
|
|
@@ -48,10 +53,8 @@ class MagL2:
|
|
|
48
53
|
quality_bitmask: np.ndarray
|
|
49
54
|
Quality bitmask for each vector. Should be of length n. Copied from offset
|
|
50
55
|
file in L2, marked as good always in L1D.
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
is_l1d: bool
|
|
54
|
-
Flag to indicate if the data is L1D. Defaults to False.
|
|
56
|
+
frame:
|
|
57
|
+
The reference frame of the input vectors. Starts as the MAG instrument frame.
|
|
55
58
|
"""
|
|
56
59
|
|
|
57
60
|
vectors: np.ndarray
|
|
@@ -62,120 +65,12 @@ class MagL2:
|
|
|
62
65
|
quality_bitmask: np.ndarray
|
|
63
66
|
data_mode: DataMode
|
|
64
67
|
magnitude: np.ndarray = field(init=False)
|
|
65
|
-
|
|
66
|
-
offsets: InitVar[np.ndarray] = None
|
|
67
|
-
timedelta: InitVar[np.ndarray] = None
|
|
68
|
-
|
|
69
|
-
def __post_init__(self, offsets: np.ndarray, timedelta: np.ndarray) -> None:
|
|
70
|
-
"""
|
|
71
|
-
Calculate the magnitude of the vectors after initialization.
|
|
72
|
-
|
|
73
|
-
Parameters
|
|
74
|
-
----------
|
|
75
|
-
offsets : np.ndarray
|
|
76
|
-
Offsets to apply to the vectors. Should be of shape (n, 3) where n is the
|
|
77
|
-
number of vectors.
|
|
78
|
-
timedelta : np.ndarray
|
|
79
|
-
Time deltas to shift the timestamps by. Should be of length n.
|
|
80
|
-
Given in seconds.
|
|
81
|
-
"""
|
|
82
|
-
if offsets is not None:
|
|
83
|
-
self.vectors = self.apply_offsets(self.vectors, offsets)
|
|
84
|
-
if timedelta is not None:
|
|
85
|
-
self.epoch = self.shift_timestamps(self.epoch, timedelta)
|
|
86
|
-
|
|
87
|
-
self.magnitude = self.calculate_magnitude(self.vectors)
|
|
88
|
-
|
|
89
|
-
@staticmethod
|
|
90
|
-
def calculate_magnitude(
|
|
91
|
-
vectors: np.ndarray,
|
|
92
|
-
) -> np.ndarray:
|
|
93
|
-
"""
|
|
94
|
-
Given a list of vectors (x, y, z), calculate the magnitude of each vector.
|
|
95
|
-
|
|
96
|
-
For an input list of vectors of size (n, 3) returns a list of magnitudes of
|
|
97
|
-
size (n,).
|
|
98
|
-
|
|
99
|
-
Parameters
|
|
100
|
-
----------
|
|
101
|
-
vectors : np.ndarray
|
|
102
|
-
Array of vectors to calculate the magnitude of.
|
|
103
|
-
|
|
104
|
-
Returns
|
|
105
|
-
-------
|
|
106
|
-
np.ndarray
|
|
107
|
-
Array of magnitudes of the input vectors.
|
|
108
|
-
"""
|
|
109
|
-
return np.linalg.norm(vectors, axis=1) # type: ignore
|
|
110
|
-
|
|
111
|
-
@staticmethod
|
|
112
|
-
def apply_offsets(vectors: np.ndarray, offsets: np.ndarray) -> np.ndarray:
|
|
113
|
-
"""
|
|
114
|
-
Apply the offsets to the vectors by adding them together.
|
|
115
|
-
|
|
116
|
-
These offsets are used to shift the vectors in the x, y, and z directions.
|
|
117
|
-
They can either be provided through a custom offsets datafile, or calculated
|
|
118
|
-
using a gradiometry algorithm.
|
|
119
|
-
|
|
120
|
-
Parameters
|
|
121
|
-
----------
|
|
122
|
-
vectors : np.ndarray
|
|
123
|
-
Array of vectors to apply the offsets to. Should be of shape (n, 3) where n
|
|
124
|
-
is the number of vectors.
|
|
125
|
-
offsets : np.ndarray
|
|
126
|
-
Array of offsets to apply to the vectors. Should be of shape (n, 3) where n
|
|
127
|
-
is the number of vectors.
|
|
128
|
-
|
|
129
|
-
Returns
|
|
130
|
-
-------
|
|
131
|
-
np.ndarray
|
|
132
|
-
Array of vectors with offsets applied. Should be of shape (n, 3).
|
|
133
|
-
"""
|
|
134
|
-
if vectors.shape[0] != offsets.shape[0]:
|
|
135
|
-
raise ValueError("Vectors and offsets must have the same length.")
|
|
136
|
-
|
|
137
|
-
offset_vectors: np.ndarray = vectors[:, :3] + offsets
|
|
138
|
-
|
|
139
|
-
# TODO: CDF files don't have NaNs. Emailed MAG to ask what this will look like.
|
|
140
|
-
# Any values where offsets is nan must also be nan
|
|
141
|
-
offset_vectors[np.isnan(offsets).any(axis=1)] = np.nan
|
|
142
|
-
|
|
143
|
-
return offset_vectors
|
|
144
|
-
|
|
145
|
-
@staticmethod
|
|
146
|
-
def shift_timestamps(epoch: np.ndarray, timedelta: np.ndarray) -> np.ndarray:
|
|
147
|
-
"""
|
|
148
|
-
Shift the timestamps by the given timedelta.
|
|
149
|
-
|
|
150
|
-
If timedelta is positive, the epochs are shifted forward in time.
|
|
151
|
-
|
|
152
|
-
Parameters
|
|
153
|
-
----------
|
|
154
|
-
epoch : np.ndarray
|
|
155
|
-
Array of timestamps to shift. Should be of length n.
|
|
156
|
-
timedelta : np.ndarray
|
|
157
|
-
Array of time deltas to shift the timestamps by. Should be the same length
|
|
158
|
-
as epoch. Given in seconds.
|
|
159
|
-
|
|
160
|
-
Returns
|
|
161
|
-
-------
|
|
162
|
-
np.ndarray
|
|
163
|
-
Shifted timestamps.
|
|
164
|
-
"""
|
|
165
|
-
if epoch.shape[0] != timedelta.shape[0]:
|
|
166
|
-
raise ValueError(
|
|
167
|
-
"Input Epoch and offsets timedeltas must be the same length."
|
|
168
|
-
)
|
|
169
|
-
|
|
170
|
-
timedelta_ns = timedelta * 1e9
|
|
171
|
-
shifted_timestamps = epoch + timedelta_ns
|
|
172
|
-
return shifted_timestamps
|
|
68
|
+
frame: ValidFrames = ValidFrames.MAG
|
|
173
69
|
|
|
174
70
|
def generate_dataset(
|
|
175
71
|
self,
|
|
176
72
|
attribute_manager: ImapCdfAttributes,
|
|
177
73
|
day: np.datetime64,
|
|
178
|
-
frame: ValidFrames = ValidFrames.dsrf,
|
|
179
74
|
) -> xr.Dataset:
|
|
180
75
|
"""
|
|
181
76
|
Generate an xarray dataset from the dataclass.
|
|
@@ -189,8 +84,6 @@ class MagL2:
|
|
|
189
84
|
CDF attributes object for the correct level.
|
|
190
85
|
day : np.datetime64
|
|
191
86
|
The 24 hour day to process, as a numpy datetime format.
|
|
192
|
-
frame : ValidFrames
|
|
193
|
-
SPICE reference frame to rotate the data into.
|
|
194
87
|
|
|
195
88
|
Returns
|
|
196
89
|
-------
|
|
@@ -199,7 +92,9 @@ class MagL2:
|
|
|
199
92
|
"""
|
|
200
93
|
self.truncate_to_24h(day)
|
|
201
94
|
|
|
202
|
-
logical_source_id =
|
|
95
|
+
logical_source_id = (
|
|
96
|
+
f"imap_mag_l2_{self.data_mode.value.lower()}-{self.frame.name.lower()}"
|
|
97
|
+
)
|
|
203
98
|
direction = xr.DataArray(
|
|
204
99
|
np.arange(3),
|
|
205
100
|
name="direction",
|
|
@@ -242,8 +137,8 @@ class MagL2:
|
|
|
242
137
|
)
|
|
243
138
|
|
|
244
139
|
quality_bitmask = xr.DataArray(
|
|
245
|
-
self.
|
|
246
|
-
name="
|
|
140
|
+
self.quality_bitmask,
|
|
141
|
+
name="quality_bitmask",
|
|
247
142
|
dims=["epoch"],
|
|
248
143
|
attrs=attribute_manager.get_variable_attributes("qf"),
|
|
249
144
|
)
|
|
@@ -298,7 +193,6 @@ class MagL2:
|
|
|
298
193
|
"""
|
|
299
194
|
if self.epoch.shape[0] != self.vectors.shape[0]:
|
|
300
195
|
raise ValueError("Timestamps and vectors are not the same shape!")
|
|
301
|
-
|
|
302
196
|
start_timestamp_j2000 = et_to_ttj2000ns(str_to_et(str(timestamp)))
|
|
303
197
|
end_timestamp_j2000 = et_to_ttj2000ns(
|
|
304
198
|
str_to_et(str(timestamp + np.timedelta64(1, "D")))
|
|
@@ -313,3 +207,170 @@ class MagL2:
|
|
|
313
207
|
self.magnitude = self.magnitude[day_start_index:day_end_index]
|
|
314
208
|
self.quality_flags = self.quality_flags[day_start_index:day_end_index]
|
|
315
209
|
self.quality_bitmask = self.quality_bitmask[day_start_index:day_end_index]
|
|
210
|
+
|
|
211
|
+
@staticmethod
|
|
212
|
+
def calculate_magnitude(
|
|
213
|
+
vectors: np.ndarray,
|
|
214
|
+
) -> np.ndarray:
|
|
215
|
+
"""
|
|
216
|
+
Given a list of vectors (x, y, z), calculate the magnitude of each vector.
|
|
217
|
+
|
|
218
|
+
For an input list of vectors of size (n, 3) returns a list of magnitudes of
|
|
219
|
+
size (n,).
|
|
220
|
+
|
|
221
|
+
Parameters
|
|
222
|
+
----------
|
|
223
|
+
vectors : np.ndarray
|
|
224
|
+
Array of vectors to calculate the magnitude of.
|
|
225
|
+
|
|
226
|
+
Returns
|
|
227
|
+
-------
|
|
228
|
+
np.ndarray
|
|
229
|
+
Array of magnitudes of the input vectors.
|
|
230
|
+
"""
|
|
231
|
+
return np.linalg.norm(vectors, axis=1)
|
|
232
|
+
|
|
233
|
+
@staticmethod
|
|
234
|
+
def apply_calibration(
|
|
235
|
+
vectors: np.ndarray, calibration_matrix: np.ndarray
|
|
236
|
+
) -> np.ndarray:
|
|
237
|
+
"""
|
|
238
|
+
Apply the calibration matrix to the vectors.
|
|
239
|
+
|
|
240
|
+
This works by repeatedly calling the function calibrate_vector on the vectors
|
|
241
|
+
input.
|
|
242
|
+
|
|
243
|
+
Parameters
|
|
244
|
+
----------
|
|
245
|
+
vectors : np.ndarray
|
|
246
|
+
Array of vectors to apply the calibration to, including x,y,z and range.
|
|
247
|
+
Should be of shape (n, 4) where n is the number of vectors.
|
|
248
|
+
calibration_matrix : np.ndarray
|
|
249
|
+
Calibration matrix to apply to the vectors. Should be of shape (3, 3, 4).
|
|
250
|
+
|
|
251
|
+
Returns
|
|
252
|
+
-------
|
|
253
|
+
np.ndarray
|
|
254
|
+
Array of calibrated vectors. Should be of shape (n, 4).
|
|
255
|
+
"""
|
|
256
|
+
calibrated_vectors = np.apply_along_axis(
|
|
257
|
+
func1d=calibrate_vector,
|
|
258
|
+
axis=1,
|
|
259
|
+
arr=vectors,
|
|
260
|
+
calibration_matrix=calibration_matrix,
|
|
261
|
+
)
|
|
262
|
+
|
|
263
|
+
return calibrated_vectors
|
|
264
|
+
|
|
265
|
+
@staticmethod
|
|
266
|
+
def shift_timestamps(epoch: np.ndarray, timedelta: np.ndarray) -> np.ndarray:
|
|
267
|
+
"""
|
|
268
|
+
Shift the timestamps by the given timedelta.
|
|
269
|
+
|
|
270
|
+
If timedelta is positive, the epochs are shifted forward in time.
|
|
271
|
+
|
|
272
|
+
Parameters
|
|
273
|
+
----------
|
|
274
|
+
epoch : np.ndarray
|
|
275
|
+
Array of timestamps to shift. Should be of length n.
|
|
276
|
+
timedelta : np.ndarray
|
|
277
|
+
Array of time deltas to shift the timestamps by. Should be the same length
|
|
278
|
+
as epoch. Given in seconds.
|
|
279
|
+
|
|
280
|
+
Returns
|
|
281
|
+
-------
|
|
282
|
+
np.ndarray
|
|
283
|
+
Shifted timestamps.
|
|
284
|
+
"""
|
|
285
|
+
if epoch.shape[0] != timedelta.shape[0]:
|
|
286
|
+
raise ValueError(
|
|
287
|
+
"Input Epoch and offsets timedeltas must be the same length."
|
|
288
|
+
)
|
|
289
|
+
|
|
290
|
+
timedelta_ns = timedelta * 1e9
|
|
291
|
+
shifted_timestamps = epoch + timedelta_ns
|
|
292
|
+
return shifted_timestamps
|
|
293
|
+
|
|
294
|
+
def rotate_frame(self, end_frame: ValidFrames) -> None:
|
|
295
|
+
"""
|
|
296
|
+
Rotate the vector data in the class to the output frame.
|
|
297
|
+
|
|
298
|
+
Parameters
|
|
299
|
+
----------
|
|
300
|
+
end_frame : ValidFrames
|
|
301
|
+
The frame to rotate the data to. Must be one of the ValidFrames enum
|
|
302
|
+
values.
|
|
303
|
+
"""
|
|
304
|
+
self.vectors = frame_transform(
|
|
305
|
+
self.epoch,
|
|
306
|
+
self.vectors,
|
|
307
|
+
from_frame=self.frame.value,
|
|
308
|
+
to_frame=end_frame.value,
|
|
309
|
+
)
|
|
310
|
+
self.frame = end_frame
|
|
311
|
+
|
|
312
|
+
|
|
313
|
+
@dataclass(kw_only=True)
|
|
314
|
+
class MagL2(MagL2L1dBase):
|
|
315
|
+
"""
|
|
316
|
+
Dataclass for MAG L2 data.
|
|
317
|
+
|
|
318
|
+
Since L2 and L1D should have the same structure, this can be used for either level.
|
|
319
|
+
|
|
320
|
+
Some of the methods are also static, so they can be used in i-ALiRT processing.
|
|
321
|
+
"""
|
|
322
|
+
|
|
323
|
+
offsets: InitVar[np.ndarray] = None
|
|
324
|
+
timedelta: InitVar[np.ndarray] = None
|
|
325
|
+
|
|
326
|
+
def __post_init__(self, offsets: np.ndarray, timedelta: np.ndarray) -> None:
|
|
327
|
+
"""
|
|
328
|
+
Calculate the magnitude of the vectors after initialization.
|
|
329
|
+
|
|
330
|
+
Parameters
|
|
331
|
+
----------
|
|
332
|
+
offsets : np.ndarray
|
|
333
|
+
Offsets to apply to the vectors. Should be of shape (n, 3) where n is the
|
|
334
|
+
number of vectors.
|
|
335
|
+
timedelta : np.ndarray
|
|
336
|
+
Time deltas to shift the timestamps by. Should be of length n.
|
|
337
|
+
Given in seconds.
|
|
338
|
+
"""
|
|
339
|
+
if offsets is not None:
|
|
340
|
+
self.vectors = self.apply_offsets(self.vectors, offsets)
|
|
341
|
+
if timedelta is not None:
|
|
342
|
+
self.epoch = self.shift_timestamps(self.epoch, timedelta)
|
|
343
|
+
|
|
344
|
+
self.magnitude = self.calculate_magnitude(self.vectors)
|
|
345
|
+
|
|
346
|
+
@staticmethod
|
|
347
|
+
def apply_offsets(vectors: np.ndarray, offsets: np.ndarray) -> np.ndarray:
|
|
348
|
+
"""
|
|
349
|
+
Apply the offsets to the vectors by adding them together.
|
|
350
|
+
|
|
351
|
+
These offsets are used to shift the vectors in the x, y, and z directions.
|
|
352
|
+
They can either be provided through a custom offsets datafile, or calculated
|
|
353
|
+
using a gradiometry algorithm.
|
|
354
|
+
|
|
355
|
+
Parameters
|
|
356
|
+
----------
|
|
357
|
+
vectors : np.ndarray
|
|
358
|
+
Array of vectors to apply the offsets to. Should be of shape (n, 3) where n
|
|
359
|
+
is the number of vectors.
|
|
360
|
+
offsets : np.ndarray
|
|
361
|
+
Array of offsets to apply to the vectors. Should be of shape (n, 3) where n
|
|
362
|
+
is the number of vectors.
|
|
363
|
+
|
|
364
|
+
Returns
|
|
365
|
+
-------
|
|
366
|
+
np.ndarray
|
|
367
|
+
Array of vectors with offsets applied. Should be of shape (n, 3).
|
|
368
|
+
"""
|
|
369
|
+
if vectors.shape[0] != offsets.shape[0]:
|
|
370
|
+
raise ValueError("Vectors and offsets must have the same length.")
|
|
371
|
+
|
|
372
|
+
offset_vectors: np.ndarray = vectors + offsets
|
|
373
|
+
|
|
374
|
+
# Any values where offsets is FILLVAL must also be FILLVAL
|
|
375
|
+
offset_vectors[(offsets == FILLVAL).any(axis=1), :] = FILLVAL
|
|
376
|
+
return offset_vectors
|
imap_processing/quality_flags.py
CHANGED
|
@@ -37,6 +37,14 @@ class ENAFlags(FlagNameMixin):
|
|
|
37
37
|
BADSPIN = 2**2 # bit 2, Bad spin
|
|
38
38
|
|
|
39
39
|
|
|
40
|
+
class ImapDEUltraFlags(FlagNameMixin):
|
|
41
|
+
"""IMAP Ultra flags."""
|
|
42
|
+
|
|
43
|
+
NONE = CommonFlags.NONE
|
|
44
|
+
FOV = 2**0 # bit 0
|
|
45
|
+
PHCORR = 2**1 # bit 1
|
|
46
|
+
|
|
47
|
+
|
|
40
48
|
class ImapHkUltraFlags(FlagNameMixin):
|
|
41
49
|
"""IMAP Ultra flags."""
|
|
42
50
|
|
|
@@ -53,14 +61,24 @@ class ImapAttitudeUltraFlags(FlagNameMixin):
|
|
|
53
61
|
NONE = CommonFlags.NONE
|
|
54
62
|
SPINRATE = 2**0 # bit 0
|
|
55
63
|
AUXMISMATCH = 2**1 # bit 1 # aux packet does not match Universal Spin Table
|
|
64
|
+
SPINPHASE = 2**2 # bit 2 # spin phase flagged by Universal Spin Table
|
|
65
|
+
SPINPERIOD = 2**3 # bit 3 # spin period flagged by Universal Spin Table
|
|
56
66
|
|
|
57
67
|
|
|
58
68
|
class ImapRatesUltraFlags(FlagNameMixin):
|
|
59
69
|
"""IMAP Ultra Rates flags."""
|
|
60
70
|
|
|
61
71
|
NONE = CommonFlags.NONE
|
|
62
|
-
|
|
63
|
-
|
|
72
|
+
HIGHRATES = 2**0 # bit 0
|
|
73
|
+
FIRSTSPIN = 2**1 # bit 1
|
|
74
|
+
LASTSPIN = 2**2 # bit 2
|
|
75
|
+
PARTIALSPIN = 2**2 # bit 2
|
|
76
|
+
|
|
77
|
+
|
|
78
|
+
class ImapInstrumentUltraFlags(FlagNameMixin):
|
|
79
|
+
"""IMAP Ultra flags using other instruments."""
|
|
80
|
+
|
|
81
|
+
NONE = CommonFlags.NONE
|
|
64
82
|
|
|
65
83
|
|
|
66
84
|
class ImapLoFlags(FlagNameMixin):
|
|
@@ -27,7 +27,7 @@ class SpiceBody(IntEnum):
|
|
|
27
27
|
# A subset of IMAP Specific bodies as defined in imap_wkcp.tf
|
|
28
28
|
IMAP = -43
|
|
29
29
|
IMAP_SPACECRAFT = -43000
|
|
30
|
-
# IMAP Pointing Frame (Despun) as defined in
|
|
30
|
+
# IMAP Pointing Frame (Despun) as defined in imap_science_xxx.tf
|
|
31
31
|
IMAP_DPS = -43901
|
|
32
32
|
# Standard NAIF bodies
|
|
33
33
|
SOLAR_SYSTEM_BARYCENTER = spiceypy.bodn2c("SOLAR_SYSTEM_BARYCENTER")
|
|
@@ -36,13 +36,13 @@ class SpiceBody(IntEnum):
|
|
|
36
36
|
|
|
37
37
|
|
|
38
38
|
class SpiceFrame(IntEnum):
|
|
39
|
-
"""
|
|
39
|
+
"""SPICE IDs for reference frames in imap_wkcp.tf and imap_science_xxx.tf."""
|
|
40
40
|
|
|
41
41
|
# Standard SPICE Frames
|
|
42
42
|
J2000 = spiceypy.irfnum("J2000")
|
|
43
43
|
ECLIPJ2000 = spiceypy.irfnum("ECLIPJ2000")
|
|
44
44
|
ITRF93 = 13000
|
|
45
|
-
# IMAP Pointing Frame (Despun) as defined in
|
|
45
|
+
# IMAP Pointing Frame (Despun) as defined in imap_science_xxx.tf
|
|
46
46
|
IMAP_DPS = -43901
|
|
47
47
|
# IMAP specific as defined in imap_wkcp.tf
|
|
48
48
|
IMAP_SPACECRAFT = -43000
|
|
@@ -61,6 +61,28 @@ class SpiceFrame(IntEnum):
|
|
|
61
61
|
IMAP_IDEX = -43700
|
|
62
62
|
IMAP_GLOWS = -43750
|
|
63
63
|
|
|
64
|
+
# IMAP Science Frames (new additions from imap_science_xxx.tf)
|
|
65
|
+
IMAP_OMD = -43900
|
|
66
|
+
IMAP_EARTHFIXED = -43910
|
|
67
|
+
IMAP_ECLIPDATE = -43911
|
|
68
|
+
IMAP_MDI = -43912
|
|
69
|
+
IMAP_MDR = -43913
|
|
70
|
+
IMAP_GMC = -43914
|
|
71
|
+
IMAP_GEI = -43915
|
|
72
|
+
IMAP_GSE = -43916
|
|
73
|
+
IMAP_GSM = -43917
|
|
74
|
+
IMAP_SMD = -43918
|
|
75
|
+
IMAP_RTN = -43920
|
|
76
|
+
IMAP_HCI = -43921 # HGI_J2K
|
|
77
|
+
IMAP_HCD = -43922 # HGI_D
|
|
78
|
+
IMAP_HGC = -43923 # HGS_D
|
|
79
|
+
IMAP_HAE = -43924
|
|
80
|
+
IMAP_HAED = -43925
|
|
81
|
+
IMAP_HEE = -43926
|
|
82
|
+
IMAP_HRE = -43927
|
|
83
|
+
IMAP_HNU = -43928
|
|
84
|
+
IMAP_GCS = -43929
|
|
85
|
+
|
|
64
86
|
|
|
65
87
|
BORESIGHT_LOOKUP = {
|
|
66
88
|
SpiceFrame.IMAP_LO_BASE: np.array([0, -1, 0]),
|
|
@@ -200,7 +200,7 @@ def calculate_pointing_attitude_segments(
|
|
|
200
200
|
- Latest NAIF leapseconds kernel (naif0012.tls)
|
|
201
201
|
- The latest IMAP sclk (imap_sclk_NNNN.tsc)
|
|
202
202
|
- The latest IMAP frame kernel (imap_wkcp.tf)
|
|
203
|
-
- IMAP DPS frame kernel (
|
|
203
|
+
- IMAP DPS frame kernel (imap_science_100.tf)
|
|
204
204
|
- IMAP historical attitude kernel from which the pointing frame kernel will
|
|
205
205
|
be generated.
|
|
206
206
|
"""
|
imap_processing/spice/spin.py
CHANGED
|
@@ -197,6 +197,10 @@ def interpolate_spin_data(query_met_times: Union[float, npt.NDArray]) -> pd.Data
|
|
|
197
197
|
# spin_period_valid columns.
|
|
198
198
|
invalid_spin_phase_range = (spin_phases < 0) | (spin_phases >= 1)
|
|
199
199
|
|
|
200
|
+
# TODO: add optional to filter this if this flag means
|
|
201
|
+
# that repointing is happening. otherwise, then keep it.
|
|
202
|
+
# This needs to be discussed and receive guidance at
|
|
203
|
+
# the project level.
|
|
200
204
|
invalid_spins = (out_df["spin_phase_valid"].values == 0) | (
|
|
201
205
|
out_df["spin_period_valid"].values == 0
|
|
202
206
|
)
|
imap_processing/spice/time.py
CHANGED
|
@@ -220,6 +220,57 @@ def et_to_datetime64(
|
|
|
220
220
|
return np.array(et_to_utc(et), dtype=np.datetime64)[()]
|
|
221
221
|
|
|
222
222
|
|
|
223
|
+
@typing.no_type_check
|
|
224
|
+
@ensure_spice
|
|
225
|
+
def et_to_met(
|
|
226
|
+
et: Union[float, Collection[float]],
|
|
227
|
+
) -> Union[float, np.ndarray]:
|
|
228
|
+
"""
|
|
229
|
+
Convert ephemeris time to mission elapsed time (MET).
|
|
230
|
+
|
|
231
|
+
This function converts ET to spacecraft clock ticks and then to MET seconds.
|
|
232
|
+
This is the inverse of the MET to ET conversion process.
|
|
233
|
+
|
|
234
|
+
Parameters
|
|
235
|
+
----------
|
|
236
|
+
et : Union[float, Collection[float]]
|
|
237
|
+
Input ephemeris time value(s) to be converted to MET.
|
|
238
|
+
|
|
239
|
+
Returns
|
|
240
|
+
-------
|
|
241
|
+
met: np.ndarray
|
|
242
|
+
Mission elapsed time in seconds.
|
|
243
|
+
"""
|
|
244
|
+
vectorized_sce2c = _vectorize(spiceypy.sce2c, otypes=[float], excluded=[0])
|
|
245
|
+
sclk_ticks = vectorized_sce2c(IMAP_SC_ID, et)
|
|
246
|
+
met = np.asarray(sclk_ticks, dtype=float) * TICK_DURATION
|
|
247
|
+
return met
|
|
248
|
+
|
|
249
|
+
|
|
250
|
+
def ttj2000ns_to_met(
|
|
251
|
+
tt_ns: npt.ArrayLike,
|
|
252
|
+
) -> npt.NDArray[float]:
|
|
253
|
+
"""
|
|
254
|
+
Convert terrestrial time nanoseconds since J2000 to mission elapsed time (MET).
|
|
255
|
+
|
|
256
|
+
This is the inverse of met_to_ttj2000ns. The conversion process is:
|
|
257
|
+
TTJ2000ns -> ET -> MET
|
|
258
|
+
|
|
259
|
+
Parameters
|
|
260
|
+
----------
|
|
261
|
+
tt_ns : float, numpy.ndarray
|
|
262
|
+
Number of nanoseconds since the J2000 epoch in the TT timescale.
|
|
263
|
+
|
|
264
|
+
Returns
|
|
265
|
+
-------
|
|
266
|
+
numpy.ndarray[float]
|
|
267
|
+
The mission elapsed time in seconds.
|
|
268
|
+
"""
|
|
269
|
+
et = ttj2000ns_to_et(tt_ns)
|
|
270
|
+
met = et_to_met(et)
|
|
271
|
+
return met
|
|
272
|
+
|
|
273
|
+
|
|
223
274
|
@typing.no_type_check
|
|
224
275
|
@ensure_spice
|
|
225
276
|
def sct_to_et(
|
|
@@ -558,8 +558,11 @@ def process_swapi_science(
|
|
|
558
558
|
# Step 3: Create xarray.Dataset
|
|
559
559
|
# ===================================================================
|
|
560
560
|
|
|
561
|
-
# epoch time. Should be same dimension as number of good sweeps
|
|
562
|
-
|
|
561
|
+
# epoch time. Should be same dimension as number of good sweeps.
|
|
562
|
+
# Use center time for epoch to line up with mission requests. Center time
|
|
563
|
+
# of SWAPI is time of 7th packet(aka SEQ_NUMBER == 6) creation time at the
|
|
564
|
+
# beginning of 7th packet.
|
|
565
|
+
epoch_values = good_sweep_sci["epoch"].data.reshape(total_full_sweeps, 12)[:, 6]
|
|
563
566
|
|
|
564
567
|
epoch_time = xr.DataArray(
|
|
565
568
|
epoch_values,
|
|
@@ -628,6 +631,13 @@ def process_swapi_science(
|
|
|
628
631
|
dims=["epoch"],
|
|
629
632
|
attrs=cdf_manager.get_variable_attributes("plan_id"),
|
|
630
633
|
)
|
|
634
|
+
# Store start time for L3 purposes per SWAPI requests
|
|
635
|
+
dataset["sci_start_time"] = xr.DataArray(
|
|
636
|
+
good_sweep_sci["epoch"].data.reshape(total_full_sweeps, 12)[:, 0],
|
|
637
|
+
name="sci_start_time",
|
|
638
|
+
dims=["epoch"],
|
|
639
|
+
attrs=cdf_manager.get_variable_attributes("sci_start_time"),
|
|
640
|
+
)
|
|
631
641
|
# Add ESA_LVL5 for L2 and L3 purposes.
|
|
632
642
|
# We need to store ESA_LVL5 at SEQ_NUMBER==11
|
|
633
643
|
# which is 71 energy step's ESA_LVL5 value. ESA_LVL5 gets
|