imap-processing 0.8.0__py3-none-any.whl → 0.9.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of imap-processing might be problematic. Click here for more details.
- imap_processing/_version.py +2 -2
- imap_processing/ccsds/excel_to_xtce.py +2 -0
- imap_processing/cdf/config/imap_hi_variable_attrs.yaml +100 -1
- imap_processing/cdf/config/imap_hit_global_cdf_attrs.yaml +14 -0
- imap_processing/cdf/config/imap_hit_l1a_variable_attrs.yaml +63 -1
- imap_processing/cdf/config/imap_idex_global_cdf_attrs.yaml +7 -0
- imap_processing/cdf/config/imap_idex_l1a_variable_attrs.yaml +574 -231
- imap_processing/cdf/config/imap_idex_l1b_variable_attrs.yaml +326 -0
- imap_processing/cdf/config/imap_lo_l1a_variable_attrs.yaml +33 -23
- imap_processing/cdf/config/imap_ultra_l1b_variable_attrs.yaml +7 -4
- imap_processing/cdf/utils.py +3 -5
- imap_processing/cli.py +13 -4
- imap_processing/codice/codice_l1a.py +5 -5
- imap_processing/codice/constants.py +9 -9
- imap_processing/codice/decompress.py +6 -2
- imap_processing/glows/l1a/glows_l1a.py +1 -2
- imap_processing/hi/l1a/hi_l1a.py +4 -4
- imap_processing/hi/l1a/histogram.py +106 -108
- imap_processing/hi/l1a/science_direct_event.py +91 -224
- imap_processing/hi/packet_definitions/TLM_HI_COMBINED_SCI.xml +3994 -0
- imap_processing/hit/l0/constants.py +2 -2
- imap_processing/hit/l0/decom_hit.py +12 -101
- imap_processing/hit/l1a/hit_l1a.py +164 -23
- imap_processing/ialirt/l0/process_codicelo.py +153 -0
- imap_processing/ialirt/l0/process_hit.py +5 -5
- imap_processing/ialirt/packet_definitions/ialirt_codicelo.xml +281 -0
- imap_processing/ialirt/process_ephemeris.py +212 -0
- imap_processing/idex/idex_l1a.py +55 -75
- imap_processing/idex/idex_l1b.py +192 -0
- imap_processing/idex/idex_variable_unpacking_and_eu_conversion.csv +33 -0
- imap_processing/idex/packet_definitions/idex_packet_definition.xml +97 -595
- imap_processing/lo/l0/decompression_tables/decompression_tables.py +16 -0
- imap_processing/lo/l0/lo_science.py +44 -12
- imap_processing/lo/l1a/lo_l1a.py +76 -8
- imap_processing/lo/packet_definitions/lo_xtce.xml +9877 -87
- imap_processing/mag/l1a/mag_l1a.py +1 -2
- imap_processing/mag/l1a/mag_l1a_data.py +1 -2
- imap_processing/mag/l1b/mag_l1b.py +2 -1
- imap_processing/spice/geometry.py +37 -19
- imap_processing/spice/time.py +144 -2
- imap_processing/swapi/l1/swapi_l1.py +3 -3
- imap_processing/swapi/packet_definitions/swapi_packet_definition.xml +1535 -446
- imap_processing/swe/l2/swe_l2.py +134 -17
- imap_processing/tests/ccsds/test_data/expected_output.xml +1 -1
- imap_processing/tests/codice/test_codice_l1a.py +8 -8
- imap_processing/tests/codice/test_decompress.py +4 -4
- imap_processing/tests/conftest.py +46 -43
- imap_processing/tests/hi/test_data/l0/H90_NHK_20241104.bin +0 -0
- imap_processing/tests/hi/test_data/l0/H90_sci_cnt_20241104.bin +0 -0
- imap_processing/tests/hi/test_data/l0/H90_sci_de_20241104.bin +0 -0
- imap_processing/tests/hi/test_hi_l1b.py +2 -2
- imap_processing/tests/hi/test_l1a.py +31 -58
- imap_processing/tests/hi/test_science_direct_event.py +58 -0
- imap_processing/tests/hit/test_data/sci_sample1.ccsds +0 -0
- imap_processing/tests/hit/test_decom_hit.py +60 -50
- imap_processing/tests/hit/test_hit_l1a.py +327 -12
- imap_processing/tests/hit/test_hit_l1b.py +76 -0
- imap_processing/tests/hit/validation_data/hskp_sample_eu.csv +89 -0
- imap_processing/tests/hit/validation_data/sci_sample_raw1.csv +29 -0
- imap_processing/tests/ialirt/test_data/l0/apid01152.tlm +0 -0
- imap_processing/tests/ialirt/test_data/l0/imap_codice_l1a_lo-ialirt_20241110193700_v0.0.0.cdf +0 -0
- imap_processing/tests/ialirt/unit/test_process_codicelo.py +106 -0
- imap_processing/tests/ialirt/unit/test_process_ephemeris.py +109 -0
- imap_processing/tests/ialirt/unit/test_process_hit.py +9 -6
- imap_processing/tests/idex/conftest.py +1 -1
- imap_processing/tests/idex/test_idex_l0.py +1 -1
- imap_processing/tests/idex/test_idex_l1a.py +7 -1
- imap_processing/tests/idex/test_idex_l1b.py +126 -0
- imap_processing/tests/lo/test_lo_l1a.py +7 -16
- imap_processing/tests/lo/test_lo_science.py +67 -3
- imap_processing/tests/lo/test_pkts/imap_lo_l0_raw_20240803_v002.pkts +0 -0
- imap_processing/tests/lo/validation_data/Instrument_FM1_T104_R129_20240803_ILO_SCI_DE_dec_DN_with_fills.csv +1999 -0
- imap_processing/tests/mag/test_mag_l1b.py +39 -5
- imap_processing/tests/spice/test_geometry.py +32 -6
- imap_processing/tests/spice/test_time.py +135 -6
- imap_processing/tests/swapi/test_swapi_decom.py +75 -69
- imap_processing/tests/swapi/test_swapi_l1.py +4 -4
- imap_processing/tests/swe/test_swe_l2.py +64 -8
- imap_processing/tests/test_utils.py +1 -1
- imap_processing/tests/ultra/test_data/l0/ultra45_raw_sc_ultrarawimg_withFSWcalcs_FM45_40P_Phi28p5_BeamCal_LinearScan_phi2850_theta-000_20240207T102740.csv +3314 -3314
- imap_processing/tests/ultra/unit/test_de.py +8 -3
- imap_processing/tests/ultra/unit/test_spatial_utils.py +125 -0
- imap_processing/tests/ultra/unit/test_ultra_l1b_extended.py +39 -29
- imap_processing/tests/ultra/unit/test_ultra_l1c_pset_bins.py +2 -25
- imap_processing/ultra/constants.py +4 -0
- imap_processing/ultra/l1b/de.py +8 -14
- imap_processing/ultra/l1b/ultra_l1b_extended.py +29 -70
- imap_processing/ultra/l1c/ultra_l1c_pset_bins.py +1 -36
- imap_processing/ultra/utils/spatial_utils.py +221 -0
- {imap_processing-0.8.0.dist-info → imap_processing-0.9.0.dist-info}/METADATA +1 -1
- {imap_processing-0.8.0.dist-info → imap_processing-0.9.0.dist-info}/RECORD +94 -76
- imap_processing/hi/l0/__init__.py +0 -0
- imap_processing/hi/l0/decom_hi.py +0 -24
- imap_processing/hi/packet_definitions/hi_packet_definition.xml +0 -482
- imap_processing/tests/hi/test_decom.py +0 -55
- imap_processing/tests/hi/test_l1a_sci_de.py +0 -72
- {imap_processing-0.8.0.dist-info → imap_processing-0.9.0.dist-info}/LICENSE +0 -0
- {imap_processing-0.8.0.dist-info → imap_processing-0.9.0.dist-info}/WHEEL +0 -0
- {imap_processing-0.8.0.dist-info → imap_processing-0.9.0.dist-info}/entry_points.txt +0 -0
|
@@ -5,7 +5,11 @@ import pytest
|
|
|
5
5
|
import xarray as xr
|
|
6
6
|
|
|
7
7
|
from imap_processing.cdf.utils import load_cdf, write_cdf
|
|
8
|
-
from imap_processing.mag.l1b.mag_l1b import
|
|
8
|
+
from imap_processing.mag.l1b.mag_l1b import (
|
|
9
|
+
calibrate_vector,
|
|
10
|
+
mag_l1b,
|
|
11
|
+
mag_l1b_processing,
|
|
12
|
+
)
|
|
9
13
|
|
|
10
14
|
|
|
11
15
|
@pytest.fixture(scope="module")
|
|
@@ -44,6 +48,7 @@ def mag_l1a_dataset():
|
|
|
44
48
|
output_dataset["compression_flags"] = compression_flags
|
|
45
49
|
output_dataset["direction_label"] = direction_label
|
|
46
50
|
output_dataset["compression_label"] = compression_label
|
|
51
|
+
output_dataset.attrs["Logical_source"] = ["imap_mag_l1a_norm-mago"]
|
|
47
52
|
|
|
48
53
|
return output_dataset
|
|
49
54
|
|
|
@@ -52,9 +57,8 @@ def test_mag_processing(mag_l1a_dataset):
|
|
|
52
57
|
mag_l1a_dataset.attrs["Logical_source"] = ["imap_mag_l1a_norm-mago"]
|
|
53
58
|
|
|
54
59
|
mag_l1b = mag_l1b_processing(mag_l1a_dataset)
|
|
55
|
-
|
|
56
60
|
np.testing.assert_allclose(
|
|
57
|
-
mag_l1b["vectors"][0].values, [2.
|
|
61
|
+
mag_l1b["vectors"][0].values, [2.2972, 2.2415, 2.2381, 0], atol=1e-4
|
|
58
62
|
)
|
|
59
63
|
np.testing.assert_allclose(mag_l1b["vectors"][1].values, [0, 0, 0, 0])
|
|
60
64
|
|
|
@@ -65,7 +69,7 @@ def test_mag_processing(mag_l1a_dataset):
|
|
|
65
69
|
mag_l1b = mag_l1b_processing(mag_l1a_dataset)
|
|
66
70
|
|
|
67
71
|
np.testing.assert_allclose(
|
|
68
|
-
mag_l1b["vectors"][0].values, [2.
|
|
72
|
+
mag_l1b["vectors"][0].values, [2.27538, 2.23416, 2.23682, 0], atol=1e-5
|
|
69
73
|
)
|
|
70
74
|
np.testing.assert_allclose(mag_l1b["vectors"][1].values, [0, 0, 0, 0])
|
|
71
75
|
|
|
@@ -118,7 +122,7 @@ def test_mag_compression_scale(mag_l1a_dataset):
|
|
|
118
122
|
mag_l1a_dataset.attrs["Logical_source"] = ["imap_mag_l1a_norm-mago"]
|
|
119
123
|
output = mag_l1b(mag_l1a_dataset, "v001")
|
|
120
124
|
|
|
121
|
-
calibrated_vectors = np.matmul(np.array([1, 1, 1])
|
|
125
|
+
calibrated_vectors = np.matmul(test_calibration, np.array([1, 1, 1]))
|
|
122
126
|
# 16 bit width is the standard
|
|
123
127
|
assert np.allclose(output["vectors"].data[0][:3], calibrated_vectors)
|
|
124
128
|
# uncompressed data is uncorrected
|
|
@@ -132,3 +136,33 @@ def test_mag_compression_scale(mag_l1a_dataset):
|
|
|
132
136
|
# width of 14 should be multiplied by 4
|
|
133
137
|
scaled_vectors = calibrated_vectors * 4
|
|
134
138
|
assert np.allclose(output["vectors"].data[3][:3], scaled_vectors)
|
|
139
|
+
|
|
140
|
+
|
|
141
|
+
def test_calibrate_vector():
|
|
142
|
+
# from MFOTOURFO
|
|
143
|
+
cal_array = np.array(
|
|
144
|
+
[
|
|
145
|
+
[
|
|
146
|
+
[2.29722020e00, 7.38200160e-02, 1.88479865e-02, 4.59777333e-03],
|
|
147
|
+
[0.00000000e00, 0.00000000e00, 0.00000000e00, 0.00000000e00],
|
|
148
|
+
[0.00000000e00, 0.00000000e00, 0.00000000e00, 0.00000000e00],
|
|
149
|
+
],
|
|
150
|
+
[
|
|
151
|
+
[3.48624576e-03, 1.09224000e-04, 3.26118600e-05, 5.02830000e-06],
|
|
152
|
+
[2.23802879e00, 7.23781440e-02, 1.84842873e-02, 4.50744060e-03],
|
|
153
|
+
[0.00000000e00, 0.00000000e00, 0.00000000e00, 0.00000000e00],
|
|
154
|
+
],
|
|
155
|
+
[
|
|
156
|
+
[-2.50787532e-03, -8.33760000e-05, -2.71240200e-05, 2.50509000e-06],
|
|
157
|
+
[-8.88437262e-03, -2.84256000e-04, -7.41600000e-05, -2.29399200e-05],
|
|
158
|
+
[2.24950008e00, 7.23836160e-02, 1.84847323e-02, 4.50945192e-03],
|
|
159
|
+
],
|
|
160
|
+
]
|
|
161
|
+
)
|
|
162
|
+
|
|
163
|
+
calibration_matrix = xr.DataArray(cal_array)
|
|
164
|
+
|
|
165
|
+
cal_vector = calibrate_vector(np.array([1.0, 1.0, 1.0, 0]), calibration_matrix)
|
|
166
|
+
expected_vector = np.array([2.2972, 2.2415, 2.2381, 0])
|
|
167
|
+
|
|
168
|
+
assert np.allclose(cal_vector, expected_vector, atol=1e-4)
|
|
@@ -204,6 +204,19 @@ def test_get_spacecraft_to_instrument_spin_phase_offset(instrument, expected_off
|
|
|
204
204
|
SpiceFrame.IMAP_SPACECRAFT,
|
|
205
205
|
SpiceFrame.IMAP_DPS,
|
|
206
206
|
),
|
|
207
|
+
# single et, multiple position vectors
|
|
208
|
+
(
|
|
209
|
+
["2025-04-30T12:00:00.000"],
|
|
210
|
+
np.array(
|
|
211
|
+
[
|
|
212
|
+
[1, 0, 0],
|
|
213
|
+
[0, 1, 0],
|
|
214
|
+
[0, 0, 1],
|
|
215
|
+
]
|
|
216
|
+
),
|
|
217
|
+
SpiceFrame.IMAP_SPACECRAFT,
|
|
218
|
+
SpiceFrame.IMAP_DPS,
|
|
219
|
+
),
|
|
207
220
|
],
|
|
208
221
|
)
|
|
209
222
|
def test_frame_transform(et_strings, position, from_frame, to_frame, furnish_kernels):
|
|
@@ -223,11 +236,24 @@ def test_frame_transform(et_strings, position, from_frame, to_frame, furnish_ker
|
|
|
223
236
|
et = np.array([spice.utc2et(et_str) for et_str in et_strings])
|
|
224
237
|
et_arg = et[0] if len(et) == 1 else et
|
|
225
238
|
result = frame_transform(et_arg, position, from_frame, to_frame)
|
|
226
|
-
# check the result shape before modifying for value checking
|
|
227
|
-
|
|
228
|
-
|
|
229
|
-
|
|
230
|
-
|
|
239
|
+
# check the result shape before modifying for value checking.
|
|
240
|
+
# There are 3 cases to consider:
|
|
241
|
+
|
|
242
|
+
# 1 event time, multiple position vectors:
|
|
243
|
+
if len(et) == 1 and position.ndim > 1:
|
|
244
|
+
assert result.shape == position.shape
|
|
245
|
+
# multiple event times, single position vector:
|
|
246
|
+
elif len(et) > 1 and position.ndim == 1:
|
|
247
|
+
assert result.shape == (len(et), 3)
|
|
248
|
+
# multiple event times, multiple position vectors (same number of each)
|
|
249
|
+
elif len(et) > 1 and position.ndim > 1:
|
|
250
|
+
assert result.shape == (len(et), 3)
|
|
251
|
+
|
|
252
|
+
# compare against pure SPICE calculation.
|
|
253
|
+
# If the result is a single position vector, broadcast it to first.
|
|
254
|
+
if position.ndim == 1:
|
|
255
|
+
position = np.broadcast_to(position, (len(et), 3))
|
|
256
|
+
result = np.broadcast_to(result, (len(et), 3))
|
|
231
257
|
for spice_et, spice_position, test_result in zip(et, position, result):
|
|
232
258
|
rotation_matrix = spice.pxform(from_frame.name, to_frame.name, spice_et)
|
|
233
259
|
spice_result = spice.mxv(rotation_matrix, spice_position)
|
|
@@ -254,7 +280,7 @@ def test_frame_transform_exceptions():
|
|
|
254
280
|
match="Mismatch in number of position vectors and Ephemeris times provided.",
|
|
255
281
|
):
|
|
256
282
|
frame_transform(
|
|
257
|
-
1,
|
|
283
|
+
[1, 2],
|
|
258
284
|
np.arange(9).reshape((3, 3)),
|
|
259
285
|
SpiceFrame.ECLIPJ2000,
|
|
260
286
|
SpiceFrame.IMAP_HIT,
|
|
@@ -2,22 +2,41 @@
|
|
|
2
2
|
|
|
3
3
|
import numpy as np
|
|
4
4
|
import pytest
|
|
5
|
-
import spiceypy
|
|
5
|
+
import spiceypy
|
|
6
6
|
|
|
7
7
|
from imap_processing.spice import IMAP_SC_ID
|
|
8
|
-
from imap_processing.spice.time import
|
|
8
|
+
from imap_processing.spice.time import (
|
|
9
|
+
TICK_DURATION,
|
|
10
|
+
_sct2e_wrapper,
|
|
11
|
+
et_to_utc,
|
|
12
|
+
j2000ns_to_j2000s,
|
|
13
|
+
met_to_datetime64,
|
|
14
|
+
met_to_j2000ns,
|
|
15
|
+
met_to_sclkticks,
|
|
16
|
+
met_to_utc,
|
|
17
|
+
str_to_et,
|
|
18
|
+
)
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
@pytest.mark.parametrize("met", [1, np.arange(10)])
|
|
22
|
+
def test_met_to_sclkticks(met):
|
|
23
|
+
"""Test coverage for met_to_sclkticks."""
|
|
24
|
+
# Tick duration is 20us as specified in imap_sclk_0000.tsc
|
|
25
|
+
expected = met * 1 / 20e-6
|
|
26
|
+
ticks = met_to_sclkticks(met)
|
|
27
|
+
np.testing.assert_array_equal(ticks, expected)
|
|
9
28
|
|
|
10
29
|
|
|
11
30
|
def test_met_to_j2000ns(furnish_time_kernels):
|
|
12
31
|
"""Test coverage for met_to_j2000ns function."""
|
|
13
32
|
utc = "2026-01-01T00:00:00.125"
|
|
14
|
-
et =
|
|
15
|
-
sclk_str =
|
|
33
|
+
et = spiceypy.str2et(utc)
|
|
34
|
+
sclk_str = spiceypy.sce2s(IMAP_SC_ID, et)
|
|
16
35
|
seconds, ticks = sclk_str.split("/")[1].split(":")
|
|
17
36
|
# There is some floating point error calculating tick duration from 1 clock
|
|
18
37
|
# tick so average over many clock ticks for better accuracy
|
|
19
38
|
spice_tick_duration = (
|
|
20
|
-
|
|
39
|
+
spiceypy.sct2e(IMAP_SC_ID, 1e12) - spiceypy.sct2e(IMAP_SC_ID, 0)
|
|
21
40
|
) / 1e12
|
|
22
41
|
met = float(seconds) + float(ticks) * spice_tick_duration
|
|
23
42
|
j2000ns = met_to_j2000ns(met)
|
|
@@ -30,7 +49,7 @@ def test_j2000ns_to_j2000s(furnish_time_kernels):
|
|
|
30
49
|
# Use spice to come up with reasonable J2000 values
|
|
31
50
|
utc = "2025-09-23T00:00:00.000"
|
|
32
51
|
# Test single value input
|
|
33
|
-
et =
|
|
52
|
+
et = spiceypy.str2et(utc)
|
|
34
53
|
epoch = int(et * 1e9)
|
|
35
54
|
j2000s = j2000ns_to_j2000s(epoch)
|
|
36
55
|
assert j2000s == et
|
|
@@ -42,6 +61,61 @@ def test_j2000ns_to_j2000s(furnish_time_kernels):
|
|
|
42
61
|
)
|
|
43
62
|
|
|
44
63
|
|
|
64
|
+
@pytest.mark.parametrize(
|
|
65
|
+
"expected_utc, precision",
|
|
66
|
+
[
|
|
67
|
+
("2024-01-01T00:00:00.000", 3),
|
|
68
|
+
(
|
|
69
|
+
[
|
|
70
|
+
"2024-01-01T00:00:00.000555",
|
|
71
|
+
"2025-09-23T00:00:00.000111",
|
|
72
|
+
"2040-11-14T10:23:48.156980",
|
|
73
|
+
],
|
|
74
|
+
6,
|
|
75
|
+
),
|
|
76
|
+
],
|
|
77
|
+
)
|
|
78
|
+
def test_met_to_utc(furnish_time_kernels, expected_utc, precision):
|
|
79
|
+
"""Test coverage for met_to_utc function."""
|
|
80
|
+
if isinstance(expected_utc, list):
|
|
81
|
+
et_arr = spiceypy.str2et(expected_utc)
|
|
82
|
+
sclk_ticks = np.array([spiceypy.sce2c(IMAP_SC_ID, et) for et in et_arr])
|
|
83
|
+
else:
|
|
84
|
+
et = spiceypy.str2et(expected_utc)
|
|
85
|
+
sclk_ticks = spiceypy.sce2c(IMAP_SC_ID, et)
|
|
86
|
+
met = sclk_ticks * TICK_DURATION
|
|
87
|
+
utc = met_to_utc(met, precision=precision)
|
|
88
|
+
np.testing.assert_array_equal(utc, expected_utc)
|
|
89
|
+
|
|
90
|
+
|
|
91
|
+
@pytest.mark.parametrize(
|
|
92
|
+
"utc",
|
|
93
|
+
[
|
|
94
|
+
"2024-01-01T00:00:00.000",
|
|
95
|
+
[
|
|
96
|
+
"2024-01-01T00:00:00.000",
|
|
97
|
+
"2025-09-23T00:00:00.000",
|
|
98
|
+
"2040-11-14T10:23:48.15698",
|
|
99
|
+
],
|
|
100
|
+
],
|
|
101
|
+
)
|
|
102
|
+
def test_met_to_datetime64(furnish_time_kernels, utc):
|
|
103
|
+
"""Test coverage for met_to_datetime64 function."""
|
|
104
|
+
if isinstance(utc, list):
|
|
105
|
+
expected_dt64 = np.array([np.datetime64(utc_str) for utc_str in utc])
|
|
106
|
+
et_arr = spiceypy.str2et(utc)
|
|
107
|
+
sclk_ticks = np.array([spiceypy.sce2c(IMAP_SC_ID, et) for et in et_arr])
|
|
108
|
+
else:
|
|
109
|
+
expected_dt64 = np.asarray(np.datetime64(utc))
|
|
110
|
+
et = spiceypy.str2et(utc)
|
|
111
|
+
sclk_ticks = spiceypy.sce2c(IMAP_SC_ID, et)
|
|
112
|
+
met = sclk_ticks * TICK_DURATION
|
|
113
|
+
dt64 = met_to_datetime64(met)
|
|
114
|
+
np.testing.assert_array_equal(
|
|
115
|
+
dt64.astype("datetime64[us]"), expected_dt64.astype("datetime64[us]")
|
|
116
|
+
)
|
|
117
|
+
|
|
118
|
+
|
|
45
119
|
@pytest.mark.parametrize("sclk_ticks", [0.0, np.arange(10)])
|
|
46
120
|
def test_sct2e_wrapper(sclk_ticks):
|
|
47
121
|
"""Test for `_sct2e_wrapper` function."""
|
|
@@ -50,3 +124,58 @@ def test_sct2e_wrapper(sclk_ticks):
|
|
|
50
124
|
assert isinstance(et, float)
|
|
51
125
|
else:
|
|
52
126
|
assert len(et) == len(sclk_ticks)
|
|
127
|
+
|
|
128
|
+
|
|
129
|
+
def test_str_to_et(furnish_time_kernels):
|
|
130
|
+
"""Test coverage for string to et conversion function."""
|
|
131
|
+
utc = "2017-07-14T19:46:00"
|
|
132
|
+
# Test single value input
|
|
133
|
+
expected_et = 553333629.1837274
|
|
134
|
+
actual_et = str_to_et(utc)
|
|
135
|
+
assert expected_et == actual_et
|
|
136
|
+
|
|
137
|
+
# Test list input
|
|
138
|
+
list_of_utc = [
|
|
139
|
+
"2017-08-14T19:46:00.000",
|
|
140
|
+
"2017-09-14T19:46:00.000",
|
|
141
|
+
"2017-10-14T19:46:00.000",
|
|
142
|
+
]
|
|
143
|
+
|
|
144
|
+
expected_et_array = np.array(
|
|
145
|
+
(556012029.1829445, 558690429.1824446, 561282429.1823651)
|
|
146
|
+
)
|
|
147
|
+
actual_et_array = str_to_et(list_of_utc)
|
|
148
|
+
assert np.array_equal(expected_et_array, actual_et_array)
|
|
149
|
+
|
|
150
|
+
# Test array input
|
|
151
|
+
array_of_utc = np.array(
|
|
152
|
+
[
|
|
153
|
+
"2017-08-14T19:46:00.000",
|
|
154
|
+
"2017-09-14T19:46:00.000",
|
|
155
|
+
"2017-10-14T19:46:00.000",
|
|
156
|
+
]
|
|
157
|
+
)
|
|
158
|
+
|
|
159
|
+
actual_et_array = str_to_et(array_of_utc)
|
|
160
|
+
assert np.array_equal(expected_et_array, actual_et_array)
|
|
161
|
+
|
|
162
|
+
|
|
163
|
+
def test_et_to_utc(furnish_time_kernels):
|
|
164
|
+
"""Test coverage for et to utc conversion function."""
|
|
165
|
+
et = 553333629.1837274
|
|
166
|
+
# Test single value input
|
|
167
|
+
expected_utc = "2017-07-14T19:46:00.000"
|
|
168
|
+
actual_utc = et_to_utc(et)
|
|
169
|
+
assert expected_utc == actual_utc
|
|
170
|
+
|
|
171
|
+
# Test array input
|
|
172
|
+
array_of_et = np.array((556012029.1829445, 558690429.1824446, 561282429.1823651))
|
|
173
|
+
expected_utc_array = np.array(
|
|
174
|
+
(
|
|
175
|
+
"2017-08-14T19:46:00.000",
|
|
176
|
+
"2017-09-14T19:46:00.000",
|
|
177
|
+
"2017-10-14T19:46:00.000",
|
|
178
|
+
)
|
|
179
|
+
)
|
|
180
|
+
actual_utc_array = et_to_utc(array_of_et)
|
|
181
|
+
assert np.array_equal(expected_utc_array, actual_utc_array)
|
|
@@ -2,36 +2,34 @@ import pandas as pd
|
|
|
2
2
|
import pytest
|
|
3
3
|
|
|
4
4
|
from imap_processing import imap_module_directory
|
|
5
|
-
from imap_processing.decom import decom_packets
|
|
6
5
|
from imap_processing.swapi.l1.swapi_l1 import (
|
|
7
6
|
SWAPIAPID,
|
|
8
7
|
)
|
|
9
|
-
from imap_processing.utils import
|
|
8
|
+
from imap_processing.utils import packet_file_to_datasets
|
|
10
9
|
|
|
11
10
|
|
|
12
11
|
@pytest.fixture(scope="session")
|
|
13
12
|
def decom_test_data(swapi_l0_test_data_path):
|
|
14
|
-
"""Read test data from file"""
|
|
13
|
+
"""Read test data from file with derived values"""
|
|
15
14
|
test_file = "imap_swapi_l0_raw_20240924_v001.pkts"
|
|
16
15
|
packet_file = imap_module_directory / swapi_l0_test_data_path / test_file
|
|
17
16
|
packet_definition = (
|
|
18
17
|
f"{imap_module_directory}/swapi/packet_definitions/swapi_packet_definition.xml"
|
|
19
18
|
)
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
|
|
19
|
+
return packet_file_to_datasets(
|
|
20
|
+
packet_file, packet_definition, use_derived_value=False
|
|
21
|
+
)
|
|
23
22
|
|
|
24
23
|
|
|
25
24
|
def test_number_of_packets(decom_test_data):
|
|
26
25
|
"""This test and validate number of packets."""
|
|
27
|
-
|
|
28
|
-
sci_packets = grouped_data[SWAPIAPID.SWP_SCI]
|
|
26
|
+
sci_packets = decom_test_data[SWAPIAPID.SWP_SCI]
|
|
29
27
|
expected_sci_packets = 153
|
|
30
|
-
assert len(sci_packets) == expected_sci_packets
|
|
28
|
+
assert len(sci_packets["epoch"]) == expected_sci_packets
|
|
31
29
|
|
|
32
|
-
hk_packets =
|
|
30
|
+
hk_packets = decom_test_data[SWAPIAPID.SWP_HK]
|
|
33
31
|
expected_hk_packets = 17
|
|
34
|
-
assert len(hk_packets) == expected_hk_packets
|
|
32
|
+
assert len(hk_packets["epoch"]) == expected_hk_packets
|
|
35
33
|
|
|
36
34
|
|
|
37
35
|
def test_swapi_sci_data(decom_test_data, swapi_l0_validation_data_path):
|
|
@@ -42,32 +40,30 @@ def test_swapi_sci_data(decom_test_data, swapi_l0_validation_data_path):
|
|
|
42
40
|
index_col="SHCOARSE",
|
|
43
41
|
)
|
|
44
42
|
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
validation_data = raw_validation_data.loc[first_data["SHCOARSE"]]
|
|
43
|
+
sci_packets = decom_test_data[SWAPIAPID.SWP_SCI]
|
|
44
|
+
first_data = sci_packets.isel(epoch=0)
|
|
45
|
+
validation_data = raw_validation_data.loc[first_data["shcoarse"].values]
|
|
49
46
|
|
|
50
47
|
# compare raw values of validation data
|
|
51
|
-
for key
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
)
|
|
48
|
+
for key in raw_validation_data.columns:
|
|
49
|
+
if key in [
|
|
50
|
+
"PHAPID",
|
|
51
|
+
"timestamp",
|
|
52
|
+
"PHGROUPF",
|
|
53
|
+
"PHSHF",
|
|
54
|
+
"PHVERNO",
|
|
55
|
+
"PHSEQCNT",
|
|
56
|
+
"PHDLEN",
|
|
57
|
+
"PHTYPE",
|
|
58
|
+
]:
|
|
59
|
+
continue
|
|
60
|
+
|
|
61
|
+
# for SHCOARSE we need the name of the column.
|
|
62
|
+
# This is done because pandas removed it from the
|
|
63
|
+
# main columns to make it the index.
|
|
64
|
+
assert first_data[key.lower()].values == (
|
|
65
|
+
validation_data[key] if key != "SHCOARSE" else validation_data.name
|
|
66
|
+
)
|
|
71
67
|
|
|
72
68
|
|
|
73
69
|
def test_swapi_hk_data(decom_test_data, swapi_l0_validation_data_path):
|
|
@@ -78,39 +74,49 @@ def test_swapi_hk_data(decom_test_data, swapi_l0_validation_data_path):
|
|
|
78
74
|
index_col="SHCOARSE",
|
|
79
75
|
)
|
|
80
76
|
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
bad_keys = [
|
|
86
|
-
"N5_V",
|
|
87
|
-
"SCEM_I",
|
|
88
|
-
"P5_I",
|
|
89
|
-
"PHD_LLD1_V",
|
|
90
|
-
"SPARE_4",
|
|
91
|
-
"P_CEM_CMD_LVL_MON",
|
|
92
|
-
"S_CEM_CMD_LVL_MON",
|
|
93
|
-
"ESA_CMD_LVL_MON",
|
|
94
|
-
"PHD_LLD2_V",
|
|
95
|
-
"CHKSUM",
|
|
96
|
-
]
|
|
77
|
+
hk_packets = decom_test_data[SWAPIAPID.SWP_HK]
|
|
78
|
+
first_data = hk_packets.isel(epoch=0)
|
|
79
|
+
validation_data = raw_validation_data.loc[first_data["shcoarse"].values]
|
|
80
|
+
|
|
97
81
|
# compare raw values of validation data
|
|
98
|
-
for key
|
|
99
|
-
if key
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
|
|
82
|
+
for key in raw_validation_data.columns:
|
|
83
|
+
if key in [
|
|
84
|
+
"PHAPID",
|
|
85
|
+
"timestamp",
|
|
86
|
+
"PHGROUPF",
|
|
87
|
+
"PHSHF",
|
|
88
|
+
"PHVERNO",
|
|
89
|
+
"PHSEQCNT",
|
|
90
|
+
"PHDLEN",
|
|
91
|
+
"PHTYPE",
|
|
92
|
+
]:
|
|
93
|
+
continue
|
|
94
|
+
|
|
95
|
+
value_mismatching_keys = [
|
|
96
|
+
"SCEM_I",
|
|
97
|
+
"N5_V",
|
|
98
|
+
"P5_I",
|
|
99
|
+
"PHD_LLD1_V",
|
|
100
|
+
"P_CEM_CMD_LVL_MON",
|
|
101
|
+
"S_CEM_CMD_LVL_MON",
|
|
102
|
+
"ESA_CMD_LVL_MON",
|
|
103
|
+
"PHD_LLD2_V",
|
|
104
|
+
"CHKSUM",
|
|
105
|
+
]
|
|
106
|
+
|
|
107
|
+
extra_keys_val_data = [
|
|
108
|
+
"ESA_GATE_SET",
|
|
109
|
+
"P5V_ESA_V_MON",
|
|
110
|
+
"M5V_ESA_V_MON",
|
|
111
|
+
"P5V_ESA_I_MON",
|
|
112
|
+
"M5V_ESA_I_MON",
|
|
113
|
+
]
|
|
114
|
+
|
|
115
|
+
if key in extra_keys_val_data or key in value_mismatching_keys:
|
|
114
116
|
continue
|
|
115
|
-
|
|
116
|
-
|
|
117
|
+
# for SHCOARSE we need the name of the column.
|
|
118
|
+
# This is done because pandas removed it from the
|
|
119
|
+
# main columns to make it the index.
|
|
120
|
+
assert first_data[key.lower()].values == (
|
|
121
|
+
validation_data[key] if key != "SHCOARSE" else validation_data.name
|
|
122
|
+
)
|
|
@@ -39,7 +39,7 @@ def test_filter_good_data():
|
|
|
39
39
|
total_sweeps = 3
|
|
40
40
|
ds = xr.Dataset(
|
|
41
41
|
{
|
|
42
|
-
"
|
|
42
|
+
"plan_id": xr.DataArray(np.full((total_sweeps * 12), 1)),
|
|
43
43
|
"sweep_table": xr.DataArray(np.repeat(np.arange(total_sweeps), 12)),
|
|
44
44
|
"mode": xr.DataArray(np.full((total_sweeps * 12), SWAPIMODE.HVSCI.value)),
|
|
45
45
|
},
|
|
@@ -69,9 +69,9 @@ def test_filter_good_data():
|
|
|
69
69
|
expected = [12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23]
|
|
70
70
|
np.testing.assert_array_equal(filter_good_data(ds), expected)
|
|
71
71
|
|
|
72
|
-
# Check for bad
|
|
72
|
+
# Check for bad plan_id data.
|
|
73
73
|
ds["sweep_table"] = xr.DataArray(np.repeat(np.arange(total_sweeps), 12))
|
|
74
|
-
ds["
|
|
74
|
+
ds["plan_id"][24 : total_sweeps * 12] = np.arange(0, 12)
|
|
75
75
|
np.testing.assert_array_equal(filter_good_data(ds), np.arange(0, 24))
|
|
76
76
|
|
|
77
77
|
|
|
@@ -157,7 +157,7 @@ def test_process_swapi_science(decom_test_data):
|
|
|
157
157
|
)
|
|
158
158
|
|
|
159
159
|
# make PLAN_ID data incorrect. Now processed data should have less sweeps
|
|
160
|
-
ds_data["
|
|
160
|
+
ds_data["plan_id"].data[:24] = np.arange(24)
|
|
161
161
|
processed_data = process_swapi_science(
|
|
162
162
|
ds_data, decom_test_data[SWAPIAPID.SWP_HK], data_version="001"
|
|
163
163
|
)
|
|
@@ -2,13 +2,19 @@ from unittest.mock import patch
|
|
|
2
2
|
|
|
3
3
|
import numpy as np
|
|
4
4
|
import pandas as pd
|
|
5
|
+
import pytest
|
|
5
6
|
import xarray as xr
|
|
6
7
|
|
|
8
|
+
from imap_processing import imap_module_directory
|
|
9
|
+
from imap_processing.swe.l1a.swe_l1a import swe_l1a
|
|
10
|
+
from imap_processing.swe.l1b.swe_l1b import swe_l1b
|
|
7
11
|
from imap_processing.swe.l2.swe_l2 import (
|
|
8
|
-
ELECTRON_MASS,
|
|
9
12
|
ENERGY_CONVERSION_FACTOR,
|
|
13
|
+
VELOCITY_CONVERSION_FACTOR,
|
|
14
|
+
calculate_flux,
|
|
10
15
|
calculate_phase_space_density,
|
|
11
16
|
get_particle_energy,
|
|
17
|
+
swe_l2,
|
|
12
18
|
)
|
|
13
19
|
from imap_processing.swe.utils.swe_utils import read_lookup_table
|
|
14
20
|
|
|
@@ -17,7 +23,7 @@ def test_get_particle_energy():
|
|
|
17
23
|
"""Test get_particle_energy function."""
|
|
18
24
|
all_energy = get_particle_energy()
|
|
19
25
|
expected_energy = read_lookup_table()["esa_v"].values * ENERGY_CONVERSION_FACTOR
|
|
20
|
-
|
|
26
|
+
np.testing.assert_array_equal(all_energy["energy"], expected_energy)
|
|
21
27
|
|
|
22
28
|
|
|
23
29
|
@patch("imap_processing.swe.l2.swe_l2.GEOMETRIC_FACTORS", new=np.full(7, 1))
|
|
@@ -50,20 +56,70 @@ def test_calculate_phase_space_density(patch_get_particle_energy):
|
|
|
50
56
|
),
|
|
51
57
|
}
|
|
52
58
|
)
|
|
53
|
-
|
|
54
|
-
assert
|
|
59
|
+
phase_space_density_ds = calculate_phase_space_density(l1b_dataset)
|
|
60
|
+
assert phase_space_density_ds["phase_space_density"].shape == (
|
|
61
|
+
total_sweeps,
|
|
62
|
+
24,
|
|
63
|
+
30,
|
|
64
|
+
7,
|
|
65
|
+
)
|
|
55
66
|
|
|
56
67
|
# Test that first sweep has correct values. In patch,
|
|
57
68
|
# 1. we have set GEOMETRIC_FACTORS to 1.
|
|
58
69
|
# 2. we have set energy to 1.
|
|
59
70
|
# 3. we have set science_data to 1.
|
|
60
71
|
# Using this in the formula, we calculate expected density value.
|
|
61
|
-
expected_calculated_density = (2 * 1) / (1 *
|
|
72
|
+
expected_calculated_density = (2 * 1) / (1 * VELOCITY_CONVERSION_FACTOR * 1**2)
|
|
62
73
|
expected_density = np.full((24, 30, 7), expected_calculated_density)
|
|
63
|
-
|
|
74
|
+
np.testing.assert_array_equal(
|
|
75
|
+
phase_space_density_ds["phase_space_density"][0].data, expected_density
|
|
76
|
+
)
|
|
64
77
|
|
|
65
78
|
# Test that second sweep has correct values, similar to first sweep,
|
|
66
79
|
# but with energy 2.
|
|
67
|
-
expected_calculated_density = (2 * 1) / (1 *
|
|
80
|
+
expected_calculated_density = (2 * 1) / (1 * VELOCITY_CONVERSION_FACTOR * 2**2)
|
|
68
81
|
expected_density = np.full((24, 30, 7), expected_calculated_density)
|
|
69
|
-
|
|
82
|
+
np.testing.assert_array_equal(
|
|
83
|
+
phase_space_density_ds["phase_space_density"][1].data, expected_density
|
|
84
|
+
)
|
|
85
|
+
assert type(phase_space_density_ds) == xr.Dataset
|
|
86
|
+
|
|
87
|
+
|
|
88
|
+
def test_calculate_flux():
|
|
89
|
+
"""Test calculate_flux function."""
|
|
90
|
+
# Create a dummy l1b dataset
|
|
91
|
+
total_sweeps = 2
|
|
92
|
+
l1b_dataset = xr.Dataset(
|
|
93
|
+
{
|
|
94
|
+
"science_data": (
|
|
95
|
+
["epoch", "energy", "angle", "cem"],
|
|
96
|
+
np.full((total_sweeps, 24, 30, 7), 1),
|
|
97
|
+
),
|
|
98
|
+
"acq_duration": (["epoch", "cycle"], np.full((total_sweeps, 4), 80.0)),
|
|
99
|
+
"esa_table_num": (
|
|
100
|
+
["epoch", "cycle"],
|
|
101
|
+
np.repeat([0, 1], 4).reshape(total_sweeps, 4),
|
|
102
|
+
),
|
|
103
|
+
}
|
|
104
|
+
)
|
|
105
|
+
|
|
106
|
+
flux = calculate_flux(l1b_dataset)
|
|
107
|
+
assert flux.shape == (total_sweeps, 24, 30, 7)
|
|
108
|
+
assert type(flux) == np.ndarray
|
|
109
|
+
|
|
110
|
+
|
|
111
|
+
@pytest.mark.usefixtures("use_fake_spin_data_for_time")
|
|
112
|
+
def test_swe_l2(use_fake_spin_data_for_time):
|
|
113
|
+
"""Test L2 processing."""
|
|
114
|
+
data_start_time = 453051293.099714
|
|
115
|
+
data_end_time = 453066734
|
|
116
|
+
use_fake_spin_data_for_time(data_start_time, data_end_time)
|
|
117
|
+
|
|
118
|
+
test_data_path = "tests/swe/l0_data/2024051010_SWE_SCIENCE_packet.bin"
|
|
119
|
+
l1a_datasets = swe_l1a(imap_module_directory / test_data_path, "002")
|
|
120
|
+
|
|
121
|
+
l1b_dataset = swe_l1b(l1a_datasets, "002")
|
|
122
|
+
l2_dataset = swe_l2(l1b_dataset, "002")
|
|
123
|
+
|
|
124
|
+
assert type(l2_dataset) == xr.Dataset
|
|
125
|
+
assert l2_dataset["spin_phase"].shape == (6, 24, 30, 7)
|
|
@@ -91,7 +91,7 @@ def test_packet_file_to_datasets(use_derived_value, expected_mode):
|
|
|
91
91
|
packet_files, packet_definition, use_derived_value=use_derived_value
|
|
92
92
|
)
|
|
93
93
|
# 3 apids in the test data
|
|
94
|
-
assert len(datasets_by_apid) ==
|
|
94
|
+
assert len(datasets_by_apid) == 2
|
|
95
95
|
data = datasets_by_apid[1188]
|
|
96
96
|
assert data["sec_hdr_flg"].dtype == np.uint8
|
|
97
97
|
assert data["pkt_apid"].dtype == np.uint16
|