imap-processing 0.7.0__py3-none-any.whl → 0.9.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of imap-processing might be problematic. Click here for more details.
- imap_processing/__init__.py +1 -1
- imap_processing/_version.py +2 -2
- imap_processing/ccsds/excel_to_xtce.py +36 -2
- imap_processing/cdf/config/imap_codice_global_cdf_attrs.yaml +1 -1
- imap_processing/cdf/config/imap_codice_l1a_variable_attrs.yaml +145 -30
- imap_processing/cdf/config/imap_glows_l1b_variable_attrs.yaml +36 -36
- imap_processing/cdf/config/imap_hi_variable_attrs.yaml +136 -9
- imap_processing/cdf/config/imap_hit_global_cdf_attrs.yaml +14 -0
- imap_processing/cdf/config/imap_hit_l1a_variable_attrs.yaml +63 -1
- imap_processing/cdf/config/imap_hit_l1b_variable_attrs.yaml +9 -0
- imap_processing/cdf/config/imap_idex_global_cdf_attrs.yaml +14 -7
- imap_processing/cdf/config/imap_idex_l1a_variable_attrs.yaml +577 -235
- imap_processing/cdf/config/imap_idex_l1b_variable_attrs.yaml +326 -0
- imap_processing/cdf/config/imap_lo_l1a_variable_attrs.yaml +33 -23
- imap_processing/cdf/config/imap_mag_l1_variable_attrs.yaml +24 -28
- imap_processing/cdf/config/imap_ultra_l1a_variable_attrs.yaml +1 -0
- imap_processing/cdf/config/imap_ultra_l1b_variable_attrs.yaml +137 -79
- imap_processing/cdf/config/imap_variable_schema.yaml +13 -0
- imap_processing/cdf/imap_cdf_manager.py +31 -27
- imap_processing/cdf/utils.py +3 -5
- imap_processing/cli.py +25 -14
- imap_processing/codice/codice_l1a.py +153 -63
- imap_processing/codice/constants.py +10 -10
- imap_processing/codice/decompress.py +10 -11
- imap_processing/codice/utils.py +1 -0
- imap_processing/glows/l1a/glows_l1a.py +1 -2
- imap_processing/glows/l1b/glows_l1b.py +3 -3
- imap_processing/glows/l1b/glows_l1b_data.py +59 -37
- imap_processing/glows/l2/glows_l2_data.py +123 -0
- imap_processing/hi/l1a/hi_l1a.py +4 -4
- imap_processing/hi/l1a/histogram.py +107 -109
- imap_processing/hi/l1a/science_direct_event.py +92 -225
- imap_processing/hi/l1b/hi_l1b.py +85 -11
- imap_processing/hi/l1c/hi_l1c.py +23 -1
- imap_processing/hi/packet_definitions/TLM_HI_COMBINED_SCI.xml +3994 -0
- imap_processing/hi/utils.py +1 -1
- imap_processing/hit/hit_utils.py +221 -0
- imap_processing/hit/l0/constants.py +118 -0
- imap_processing/hit/l0/decom_hit.py +100 -156
- imap_processing/hit/l1a/hit_l1a.py +170 -184
- imap_processing/hit/l1b/hit_l1b.py +33 -153
- imap_processing/ialirt/l0/process_codicelo.py +153 -0
- imap_processing/ialirt/l0/process_hit.py +5 -5
- imap_processing/ialirt/packet_definitions/ialirt_codicelo.xml +281 -0
- imap_processing/ialirt/process_ephemeris.py +212 -0
- imap_processing/idex/idex_l1a.py +65 -84
- imap_processing/idex/idex_l1b.py +192 -0
- imap_processing/idex/idex_variable_unpacking_and_eu_conversion.csv +33 -0
- imap_processing/idex/packet_definitions/idex_packet_definition.xml +97 -595
- imap_processing/lo/l0/decompression_tables/decompression_tables.py +17 -1
- imap_processing/lo/l0/lo_science.py +45 -13
- imap_processing/lo/l1a/lo_l1a.py +76 -8
- imap_processing/lo/packet_definitions/lo_xtce.xml +8344 -1849
- imap_processing/mag/l0/decom_mag.py +4 -3
- imap_processing/mag/l1a/mag_l1a.py +12 -13
- imap_processing/mag/l1a/mag_l1a_data.py +1 -2
- imap_processing/mag/l1b/mag_l1b.py +90 -7
- imap_processing/spice/geometry.py +156 -16
- imap_processing/spice/time.py +144 -2
- imap_processing/swapi/l1/swapi_l1.py +4 -4
- imap_processing/swapi/l2/swapi_l2.py +1 -1
- imap_processing/swapi/packet_definitions/swapi_packet_definition.xml +1535 -446
- imap_processing/swe/l1b/swe_l1b_science.py +8 -8
- imap_processing/swe/l2/swe_l2.py +134 -17
- imap_processing/tests/ccsds/test_data/expected_output.xml +2 -1
- imap_processing/tests/ccsds/test_excel_to_xtce.py +4 -4
- imap_processing/tests/cdf/test_imap_cdf_manager.py +0 -10
- imap_processing/tests/codice/conftest.py +1 -17
- imap_processing/tests/codice/data/imap_codice_l0_raw_20241110_v001.pkts +0 -0
- imap_processing/tests/codice/test_codice_l0.py +8 -2
- imap_processing/tests/codice/test_codice_l1a.py +127 -107
- imap_processing/tests/codice/test_codice_l1b.py +1 -0
- imap_processing/tests/codice/test_decompress.py +7 -7
- imap_processing/tests/conftest.py +100 -58
- imap_processing/tests/glows/conftest.py +6 -0
- imap_processing/tests/glows/test_glows_l1b.py +9 -9
- imap_processing/tests/glows/test_glows_l1b_data.py +9 -9
- imap_processing/tests/hi/test_data/l0/H90_NHK_20241104.bin +0 -0
- imap_processing/tests/hi/test_data/l0/H90_sci_cnt_20241104.bin +0 -0
- imap_processing/tests/hi/test_data/l0/H90_sci_de_20241104.bin +0 -0
- imap_processing/tests/hi/test_data/l1a/imap_hi_l1a_45sensor-de_20250415_v000.cdf +0 -0
- imap_processing/tests/hi/test_hi_l1b.py +73 -3
- imap_processing/tests/hi/test_hi_l1c.py +10 -2
- imap_processing/tests/hi/test_l1a.py +31 -58
- imap_processing/tests/hi/test_science_direct_event.py +58 -0
- imap_processing/tests/hi/test_utils.py +4 -3
- imap_processing/tests/hit/test_data/sci_sample1.ccsds +0 -0
- imap_processing/tests/hit/{test_hit_decom.py → test_decom_hit.py} +95 -36
- imap_processing/tests/hit/test_hit_l1a.py +299 -179
- imap_processing/tests/hit/test_hit_l1b.py +231 -24
- imap_processing/tests/hit/test_hit_utils.py +218 -0
- imap_processing/tests/hit/validation_data/hskp_sample_eu.csv +89 -0
- imap_processing/tests/hit/validation_data/sci_sample_raw1.csv +29 -0
- imap_processing/tests/ialirt/test_data/l0/apid01152.tlm +0 -0
- imap_processing/tests/ialirt/test_data/l0/imap_codice_l1a_lo-ialirt_20241110193700_v0.0.0.cdf +0 -0
- imap_processing/tests/ialirt/unit/test_process_codicelo.py +106 -0
- imap_processing/tests/ialirt/unit/test_process_ephemeris.py +109 -0
- imap_processing/tests/ialirt/unit/test_process_hit.py +9 -6
- imap_processing/tests/idex/conftest.py +2 -2
- imap_processing/tests/idex/imap_idex_l0_raw_20231214_v001.pkts +0 -0
- imap_processing/tests/idex/impact_14_tof_high_data.txt +4444 -4444
- imap_processing/tests/idex/test_idex_l0.py +4 -4
- imap_processing/tests/idex/test_idex_l1a.py +8 -2
- imap_processing/tests/idex/test_idex_l1b.py +126 -0
- imap_processing/tests/lo/test_lo_l1a.py +7 -16
- imap_processing/tests/lo/test_lo_science.py +69 -5
- imap_processing/tests/lo/test_pkts/imap_lo_l0_raw_20240803_v002.pkts +0 -0
- imap_processing/tests/lo/validation_data/Instrument_FM1_T104_R129_20240803_ILO_SCI_DE_dec_DN_with_fills.csv +1999 -0
- imap_processing/tests/mag/imap_mag_l1a_norm-magi_20251017_v001.cdf +0 -0
- imap_processing/tests/mag/test_mag_l1b.py +97 -7
- imap_processing/tests/spice/test_data/imap_ena_sim_metakernel.template +3 -1
- imap_processing/tests/spice/test_geometry.py +115 -9
- imap_processing/tests/spice/test_time.py +135 -6
- imap_processing/tests/swapi/test_swapi_decom.py +75 -69
- imap_processing/tests/swapi/test_swapi_l1.py +4 -4
- imap_processing/tests/swe/conftest.py +33 -0
- imap_processing/tests/swe/l1_validation/swe_l0_unpacked-data_20240510_v001_VALIDATION_L1B_v3.dat +4332 -0
- imap_processing/tests/swe/test_swe_l1b.py +29 -8
- imap_processing/tests/swe/test_swe_l2.py +64 -8
- imap_processing/tests/test_utils.py +2 -2
- imap_processing/tests/ultra/test_data/l0/ultra45_raw_sc_ultrarawimg_withFSWcalcs_FM45_40P_Phi28p5_BeamCal_LinearScan_phi2850_theta-000_20240207T102740.csv +3314 -3314
- imap_processing/tests/ultra/test_data/l1/dps_exposure_helio_45_E12.cdf +0 -0
- imap_processing/tests/ultra/test_data/l1/dps_exposure_helio_45_E24.cdf +0 -0
- imap_processing/tests/ultra/unit/test_de.py +113 -0
- imap_processing/tests/ultra/unit/test_spatial_utils.py +125 -0
- imap_processing/tests/ultra/unit/test_ultra_l1b.py +27 -3
- imap_processing/tests/ultra/unit/test_ultra_l1b_annotated.py +31 -10
- imap_processing/tests/ultra/unit/test_ultra_l1b_extended.py +55 -35
- imap_processing/tests/ultra/unit/test_ultra_l1c_pset_bins.py +10 -68
- imap_processing/ultra/constants.py +12 -3
- imap_processing/ultra/l1b/de.py +168 -30
- imap_processing/ultra/l1b/ultra_l1b_annotated.py +24 -10
- imap_processing/ultra/l1b/ultra_l1b_extended.py +46 -80
- imap_processing/ultra/l1c/ultra_l1c_pset_bins.py +60 -144
- imap_processing/ultra/utils/spatial_utils.py +221 -0
- {imap_processing-0.7.0.dist-info → imap_processing-0.9.0.dist-info}/METADATA +15 -14
- {imap_processing-0.7.0.dist-info → imap_processing-0.9.0.dist-info}/RECORD +142 -139
- imap_processing/cdf/cdf_attribute_manager.py +0 -322
- imap_processing/cdf/config/shared/default_global_cdf_attrs_schema.yaml +0 -246
- imap_processing/cdf/config/shared/default_variable_cdf_attrs_schema.yaml +0 -466
- imap_processing/hi/l0/decom_hi.py +0 -24
- imap_processing/hi/packet_definitions/hi_packet_definition.xml +0 -482
- imap_processing/hit/l0/data_classes/housekeeping.py +0 -240
- imap_processing/hit/l0/data_classes/science_packet.py +0 -259
- imap_processing/hit/l0/utils/hit_base.py +0 -57
- imap_processing/tests/cdf/shared/default_global_cdf_attrs_schema.yaml +0 -246
- imap_processing/tests/cdf/shared/default_variable_cdf_attrs_schema.yaml +0 -466
- imap_processing/tests/cdf/test_cdf_attribute_manager.py +0 -353
- imap_processing/tests/codice/data/imap_codice_l0_hi-counters-aggregated_20240429_v001.pkts +0 -0
- imap_processing/tests/codice/data/imap_codice_l0_hi-counters-singles_20240429_v001.pkts +0 -0
- imap_processing/tests/codice/data/imap_codice_l0_hi-omni_20240429_v001.pkts +0 -0
- imap_processing/tests/codice/data/imap_codice_l0_hi-pha_20240429_v001.pkts +0 -0
- imap_processing/tests/codice/data/imap_codice_l0_hi-sectored_20240429_v001.pkts +0 -0
- imap_processing/tests/codice/data/imap_codice_l0_hskp_20100101_v001.pkts +0 -0
- imap_processing/tests/codice/data/imap_codice_l0_lo-counters-aggregated_20240429_v001.pkts +0 -0
- imap_processing/tests/codice/data/imap_codice_l0_lo-counters-singles_20240429_v001.pkts +0 -0
- imap_processing/tests/codice/data/imap_codice_l0_lo-nsw-angular_20240429_v001.pkts +0 -0
- imap_processing/tests/codice/data/imap_codice_l0_lo-nsw-priority_20240429_v001.pkts +0 -0
- imap_processing/tests/codice/data/imap_codice_l0_lo-nsw-species_20240429_v001.pkts +0 -0
- imap_processing/tests/codice/data/imap_codice_l0_lo-pha_20240429_v001.pkts +0 -0
- imap_processing/tests/codice/data/imap_codice_l0_lo-sw-angular_20240429_v001.pkts +0 -0
- imap_processing/tests/codice/data/imap_codice_l0_lo-sw-priority_20240429_v001.pkts +0 -0
- imap_processing/tests/codice/data/imap_codice_l0_lo-sw-species_20240429_v001.pkts +0 -0
- imap_processing/tests/hi/test_decom.py +0 -55
- imap_processing/tests/hi/test_l1a_sci_de.py +0 -72
- imap_processing/tests/idex/imap_idex_l0_raw_20230725_v001.pkts +0 -0
- imap_processing/tests/mag/imap_mag_l1a_burst-magi_20231025_v001.cdf +0 -0
- /imap_processing/{hi/l0/__init__.py → tests/glows/test_glows_l2_data.py} +0 -0
- /imap_processing/tests/hit/test_data/{imap_hit_l0_hk_20100105_v001.pkts → imap_hit_l0_raw_20100105_v001.pkts} +0 -0
- {imap_processing-0.7.0.dist-info → imap_processing-0.9.0.dist-info}/LICENSE +0 -0
- {imap_processing-0.7.0.dist-info → imap_processing-0.9.0.dist-info}/WHEEL +0 -0
- {imap_processing-0.7.0.dist-info → imap_processing-0.9.0.dist-info}/entry_points.txt +0 -0
|
@@ -1,18 +1,17 @@
|
|
|
1
1
|
"""
|
|
2
2
|
Perform CoDICE l1a processing.
|
|
3
3
|
|
|
4
|
-
This module processes
|
|
4
|
+
This module processes CoDICE L0 files and creates L1a data products.
|
|
5
5
|
|
|
6
6
|
Notes
|
|
7
7
|
-----
|
|
8
|
-
from imap_processing.codice.codice_l0 import decom_packets
|
|
9
8
|
from imap_processing.codice.codice_l1a import process_codice_l1a
|
|
10
|
-
|
|
11
|
-
dataset = process_codice_l1a(packets)
|
|
9
|
+
processed_datasets = process_codice_l1a(path_to_l0_file)
|
|
12
10
|
"""
|
|
13
11
|
|
|
14
12
|
from __future__ import annotations
|
|
15
13
|
|
|
14
|
+
import ast
|
|
16
15
|
import logging
|
|
17
16
|
from pathlib import Path
|
|
18
17
|
from typing import Any
|
|
@@ -28,12 +27,10 @@ from imap_processing.codice import constants
|
|
|
28
27
|
from imap_processing.codice.codice_l0 import decom_packets
|
|
29
28
|
from imap_processing.codice.decompress import decompress
|
|
30
29
|
from imap_processing.codice.utils import CODICEAPID
|
|
31
|
-
from imap_processing.utils import convert_to_binary_string
|
|
32
30
|
|
|
33
31
|
logger = logging.getLogger(__name__)
|
|
34
32
|
logger.setLevel(logging.INFO)
|
|
35
33
|
|
|
36
|
-
# TODO: Add support for decomming multiple APIDs from a single file
|
|
37
34
|
# TODO: Determine what should go in event data CDF and how it should be
|
|
38
35
|
# structured.
|
|
39
36
|
|
|
@@ -87,23 +84,39 @@ class CoDICEL1aPipeline:
|
|
|
87
84
|
self.plan_step = plan_step
|
|
88
85
|
self.view_id = view_id
|
|
89
86
|
|
|
90
|
-
def decompress_data(self, science_values: str) -> None:
|
|
87
|
+
def decompress_data(self, science_values: list[str]) -> None:
|
|
91
88
|
"""
|
|
92
89
|
Perform decompression on the data.
|
|
93
90
|
|
|
94
|
-
The science data within the packet is a compressed
|
|
91
|
+
The science data within the packet is a compressed byte string of
|
|
95
92
|
values. Apply the appropriate decompression algorithm to get an array
|
|
96
93
|
of decompressed values.
|
|
97
94
|
|
|
98
95
|
Parameters
|
|
99
96
|
----------
|
|
100
|
-
science_values : str
|
|
101
|
-
A
|
|
97
|
+
science_values : list[str]
|
|
98
|
+
A list of byte strings representing the science values of the data
|
|
99
|
+
for each packet.
|
|
102
100
|
"""
|
|
103
|
-
|
|
101
|
+
# The compression algorithm depends on the instrument and view ID
|
|
102
|
+
if self.config["instrument"] == "lo":
|
|
103
|
+
compression_algorithm = constants.LO_COMPRESSION_ID_LOOKUP[self.view_id]
|
|
104
|
+
elif self.config["instrument"] == "hi":
|
|
105
|
+
compression_algorithm = constants.HI_COMPRESSION_ID_LOOKUP[self.view_id]
|
|
106
|
+
|
|
107
|
+
self.raw_data = []
|
|
108
|
+
for packet_data, byte_count in zip(
|
|
109
|
+
science_values, self.dataset.byte_count.data
|
|
110
|
+
):
|
|
111
|
+
# Convert from numpy array to byte object
|
|
112
|
+
values = ast.literal_eval(str(packet_data))
|
|
113
|
+
|
|
114
|
+
# Only use the values up to the byte count. Bytes after this are
|
|
115
|
+
# used as padding and are not needed
|
|
116
|
+
values = values[:byte_count]
|
|
104
117
|
|
|
105
|
-
|
|
106
|
-
|
|
118
|
+
decompressed_values = decompress(values, compression_algorithm)
|
|
119
|
+
self.raw_data.append(decompressed_values)
|
|
107
120
|
|
|
108
121
|
def define_coordinates(self) -> None:
|
|
109
122
|
"""
|
|
@@ -115,13 +128,13 @@ class CoDICEL1aPipeline:
|
|
|
115
128
|
|
|
116
129
|
for name in self.config["coords"]:
|
|
117
130
|
if name == "epoch":
|
|
118
|
-
values = self.
|
|
131
|
+
values = self.dataset.epoch.data
|
|
132
|
+
elif name == "esa_step":
|
|
133
|
+
values = np.arange(self.config["num_energy_steps"])
|
|
119
134
|
elif name == "inst_az":
|
|
120
135
|
values = np.arange(self.config["num_positions"])
|
|
121
136
|
elif name == "spin_sector":
|
|
122
137
|
values = np.arange(self.config["num_spin_sectors"])
|
|
123
|
-
elif name == "esa_step":
|
|
124
|
-
values = np.arange(self.config["num_energy_steps"])
|
|
125
138
|
else:
|
|
126
139
|
# TODO: Need to implement other types of coords
|
|
127
140
|
continue
|
|
@@ -145,7 +158,7 @@ class CoDICEL1aPipeline:
|
|
|
145
158
|
|
|
146
159
|
Returns
|
|
147
160
|
-------
|
|
148
|
-
|
|
161
|
+
processed_dataset : xarray.Dataset
|
|
149
162
|
The 'final' ``xarray`` dataset.
|
|
150
163
|
"""
|
|
151
164
|
# Create the main dataset to hold all the variables
|
|
@@ -154,12 +167,18 @@ class CoDICEL1aPipeline:
|
|
|
154
167
|
attrs=self.cdf_attrs.get_global_attributes(self.config["dataset_name"]),
|
|
155
168
|
)
|
|
156
169
|
|
|
157
|
-
#
|
|
158
|
-
|
|
159
|
-
|
|
170
|
+
# Stack the data so that it is easier to reshape and iterate over
|
|
171
|
+
all_data = np.stack(self.data)
|
|
172
|
+
|
|
173
|
+
# The dimension of all data is (epoch, num_counters, num_energy_steps,
|
|
174
|
+
# num_positions, num_spin_sectors) (or may be slightly different
|
|
175
|
+
# depending on the data product). In any case, iterate over the
|
|
176
|
+
# num_counters dimension to isolate the data for each counter so
|
|
177
|
+
# that it can be placed in a CDF data variable.
|
|
178
|
+
for counter, variable_name in zip(
|
|
179
|
+
range(all_data.shape[1]), self.config["variable_names"]
|
|
160
180
|
):
|
|
161
|
-
|
|
162
|
-
reshaped_variable_data = np.expand_dims(variable_data, axis=0)
|
|
181
|
+
counter_data = all_data[:, counter, :, :, :]
|
|
163
182
|
|
|
164
183
|
# Get the CDF attributes
|
|
165
184
|
descriptor = self.config["dataset_name"].split("imap_codice_l1a_")[-1]
|
|
@@ -168,7 +187,7 @@ class CoDICEL1aPipeline:
|
|
|
168
187
|
|
|
169
188
|
# Create the CDF data variable
|
|
170
189
|
dataset[variable_name] = xr.DataArray(
|
|
171
|
-
|
|
190
|
+
counter_data,
|
|
172
191
|
name=variable_name,
|
|
173
192
|
dims=self.config["dims"],
|
|
174
193
|
attrs=attrs,
|
|
@@ -322,32 +341,51 @@ class CoDICEL1aPipeline:
|
|
|
322
341
|
3D arrays representing dimensions such as spin sectors, positions, and
|
|
323
342
|
energies (depending on the data product).
|
|
324
343
|
"""
|
|
344
|
+
self.data = []
|
|
345
|
+
|
|
325
346
|
# For CoDICE-lo, data are a 3D arrays with a shape representing
|
|
326
347
|
# [<num_positions>,<num_spin_sectors>,<num_energy_steps>]
|
|
327
348
|
if self.config["instrument"] == "lo":
|
|
328
|
-
|
|
329
|
-
|
|
330
|
-
|
|
331
|
-
|
|
332
|
-
|
|
333
|
-
|
|
334
|
-
|
|
335
|
-
|
|
349
|
+
for packet_data in self.raw_data:
|
|
350
|
+
if packet_data:
|
|
351
|
+
reshaped_packet_data = np.array(
|
|
352
|
+
packet_data, dtype=np.uint32
|
|
353
|
+
).reshape(
|
|
354
|
+
(
|
|
355
|
+
self.config["num_counters"],
|
|
356
|
+
self.config["num_energy_steps"],
|
|
357
|
+
self.config["num_positions"],
|
|
358
|
+
self.config["num_spin_sectors"],
|
|
359
|
+
)
|
|
360
|
+
)
|
|
361
|
+
self.data.append(reshaped_packet_data)
|
|
362
|
+
else:
|
|
363
|
+
self.data.append(None)
|
|
336
364
|
|
|
337
365
|
# For CoDICE-hi, data are a 3D array with a shape representing
|
|
338
366
|
# [<num_energy_steps>,<num_positions>,<num_spin_sectors>]
|
|
339
367
|
elif self.config["instrument"] == "hi":
|
|
340
|
-
|
|
341
|
-
|
|
342
|
-
|
|
343
|
-
|
|
344
|
-
|
|
345
|
-
|
|
346
|
-
|
|
347
|
-
|
|
368
|
+
for packet_data in self.raw_data:
|
|
369
|
+
if packet_data:
|
|
370
|
+
reshaped_packet_data = np.array(
|
|
371
|
+
packet_data, dtype=np.uint32
|
|
372
|
+
).reshape(
|
|
373
|
+
(
|
|
374
|
+
self.config["num_counters"],
|
|
375
|
+
self.config["num_energy_steps"],
|
|
376
|
+
self.config["num_positions"],
|
|
377
|
+
self.config["num_spin_sectors"],
|
|
378
|
+
)
|
|
379
|
+
)
|
|
380
|
+
self.data.append(reshaped_packet_data)
|
|
381
|
+
else:
|
|
382
|
+
self.data.append(None)
|
|
383
|
+
|
|
384
|
+
# No longer need to keep the raw data around
|
|
385
|
+
del self.raw_data
|
|
348
386
|
|
|
349
387
|
def set_data_product_config(
|
|
350
|
-
self, apid: int,
|
|
388
|
+
self, apid: int, dataset: xr.Dataset, data_version: str
|
|
351
389
|
) -> None:
|
|
352
390
|
"""
|
|
353
391
|
Set the various settings for defining the data products.
|
|
@@ -356,14 +394,14 @@ class CoDICEL1aPipeline:
|
|
|
356
394
|
----------
|
|
357
395
|
apid : int
|
|
358
396
|
The APID of interest.
|
|
359
|
-
|
|
360
|
-
|
|
397
|
+
dataset : xarray.Dataset
|
|
398
|
+
The dataset for the APID of interest.
|
|
361
399
|
data_version : str
|
|
362
400
|
Version of the data product being created.
|
|
363
401
|
"""
|
|
364
402
|
# Set the packet dataset so that it can be easily called from various
|
|
365
403
|
# methods
|
|
366
|
-
self.
|
|
404
|
+
self.dataset = dataset
|
|
367
405
|
|
|
368
406
|
# Set various configurations of the data product
|
|
369
407
|
self.config: dict[str, Any] = constants.DATA_PRODUCT_CONFIGURATIONS.get(apid) # type: ignore
|
|
@@ -473,7 +511,7 @@ def create_hskp_dataset(
|
|
|
473
511
|
return dataset
|
|
474
512
|
|
|
475
513
|
|
|
476
|
-
def get_params(
|
|
514
|
+
def get_params(dataset: xr.Dataset) -> tuple[int, int, int, int]:
|
|
477
515
|
"""
|
|
478
516
|
Return the four 'main' parameters used for l1a processing.
|
|
479
517
|
|
|
@@ -483,8 +521,10 @@ def get_params(packet: xr.Dataset) -> tuple[int, int, int, int]:
|
|
|
483
521
|
|
|
484
522
|
Parameters
|
|
485
523
|
----------
|
|
486
|
-
|
|
487
|
-
|
|
524
|
+
dataset : xarray.Dataset
|
|
525
|
+
The dataset for the APID of interest. We expect each packet in the
|
|
526
|
+
dataset to have the same values for the four main parameters, so the
|
|
527
|
+
first index of the dataset can be used to determine them.
|
|
488
528
|
|
|
489
529
|
Returns
|
|
490
530
|
-------
|
|
@@ -502,15 +542,37 @@ def get_params(packet: xr.Dataset) -> tuple[int, int, int, int]:
|
|
|
502
542
|
view_id : int
|
|
503
543
|
Provides information about how data was collapsed and/or compressed.
|
|
504
544
|
"""
|
|
505
|
-
table_id = int(
|
|
506
|
-
plan_id = int(
|
|
507
|
-
plan_step = int(
|
|
508
|
-
view_id = int(
|
|
545
|
+
table_id = int(dataset.table_id.data[0])
|
|
546
|
+
plan_id = int(dataset.plan_id.data[0])
|
|
547
|
+
plan_step = int(dataset.plan_step.data[0])
|
|
548
|
+
view_id = int(dataset.view_id.data[0])
|
|
509
549
|
|
|
510
550
|
return table_id, plan_id, plan_step, view_id
|
|
511
551
|
|
|
512
552
|
|
|
513
|
-
def
|
|
553
|
+
def log_dataset_info(datasets: dict[int, xr.Dataset]) -> None:
|
|
554
|
+
"""
|
|
555
|
+
Log info about the input data to help with tracking and/or debugging.
|
|
556
|
+
|
|
557
|
+
Parameters
|
|
558
|
+
----------
|
|
559
|
+
datasets : dict[int, xarray.Dataset]
|
|
560
|
+
Mapping from apid to ``xarray`` dataset, one dataset per apid.
|
|
561
|
+
"""
|
|
562
|
+
launch_time = np.datetime64("2010-01-01T00:01:06.184", "ns")
|
|
563
|
+
logger.info("\nThis input file contains the following APIDs:\n")
|
|
564
|
+
for apid in datasets:
|
|
565
|
+
num_packets = len(datasets[apid].epoch.data)
|
|
566
|
+
time_deltas = [np.timedelta64(item, "ns") for item in datasets[apid].epoch.data]
|
|
567
|
+
times = [launch_time + delta for delta in time_deltas]
|
|
568
|
+
start = np.datetime_as_string(times[0])
|
|
569
|
+
end = np.datetime_as_string(times[-1])
|
|
570
|
+
logger.info(
|
|
571
|
+
f"{CODICEAPID(apid).name}: {num_packets} packets spanning {start} to {end}"
|
|
572
|
+
)
|
|
573
|
+
|
|
574
|
+
|
|
575
|
+
def process_codice_l1a(file_path: Path, data_version: str) -> list[xr.Dataset]:
|
|
514
576
|
"""
|
|
515
577
|
Will process CoDICE l0 data to create l1a data products.
|
|
516
578
|
|
|
@@ -523,38 +585,66 @@ def process_codice_l1a(file_path: Path, data_version: str) -> xr.Dataset:
|
|
|
523
585
|
|
|
524
586
|
Returns
|
|
525
587
|
-------
|
|
526
|
-
|
|
527
|
-
|
|
588
|
+
processed_datasets : list[xarray.Dataset]
|
|
589
|
+
A list of the ``xarray`` datasets containing the science data and
|
|
590
|
+
supporting metadata.
|
|
528
591
|
"""
|
|
529
592
|
# Decom the packets, group data by APID, and sort by time
|
|
530
593
|
datasets = decom_packets(file_path)
|
|
531
594
|
|
|
595
|
+
# Log some information about the contents of the data
|
|
596
|
+
log_dataset_info(datasets)
|
|
597
|
+
|
|
598
|
+
# Placeholder to hold the final, processed datasets
|
|
599
|
+
processed_datasets = []
|
|
600
|
+
|
|
601
|
+
# Process each APID separately
|
|
532
602
|
for apid in datasets:
|
|
533
|
-
|
|
603
|
+
dataset = datasets[apid]
|
|
534
604
|
logger.info(f"\nProcessing {CODICEAPID(apid).name} packet")
|
|
535
605
|
|
|
606
|
+
# Housekeeping data
|
|
536
607
|
if apid == CODICEAPID.COD_NHK:
|
|
537
|
-
|
|
608
|
+
processed_dataset = create_hskp_dataset(dataset, data_version)
|
|
609
|
+
logger.info(f"\nFinal data product:\n{processed_dataset}\n")
|
|
538
610
|
|
|
611
|
+
# Event data
|
|
539
612
|
elif apid in [CODICEAPID.COD_LO_PHA, CODICEAPID.COD_HI_PHA]:
|
|
540
|
-
|
|
613
|
+
processed_dataset = create_event_dataset(apid, dataset, data_version)
|
|
614
|
+
logger.info(f"\nFinal data product:\n{processed_dataset}\n")
|
|
541
615
|
|
|
616
|
+
# Everything else
|
|
542
617
|
elif apid in constants.APIDS_FOR_SCIENCE_PROCESSING:
|
|
543
618
|
# Extract the data
|
|
544
|
-
science_values =
|
|
545
|
-
science_values = convert_to_binary_string(science_values)
|
|
619
|
+
science_values = [packet.data for packet in dataset.data]
|
|
546
620
|
|
|
547
621
|
# Get the four "main" parameters for processing
|
|
548
|
-
table_id, plan_id, plan_step, view_id = get_params(
|
|
622
|
+
table_id, plan_id, plan_step, view_id = get_params(dataset)
|
|
549
623
|
|
|
550
624
|
# Run the pipeline to create a dataset for the product
|
|
551
625
|
pipeline = CoDICEL1aPipeline(table_id, plan_id, plan_step, view_id)
|
|
552
|
-
pipeline.set_data_product_config(apid,
|
|
626
|
+
pipeline.set_data_product_config(apid, dataset, data_version)
|
|
553
627
|
pipeline.decompress_data(science_values)
|
|
554
628
|
pipeline.reshape_data()
|
|
555
629
|
pipeline.define_coordinates()
|
|
556
|
-
|
|
630
|
+
processed_dataset = pipeline.define_data_variables()
|
|
557
631
|
|
|
558
|
-
|
|
632
|
+
logger.info(f"\nFinal data product:\n{processed_dataset}\n")
|
|
559
633
|
|
|
560
|
-
|
|
634
|
+
# TODO: Still need to implement I-ALiRT and hi-priorities data products
|
|
635
|
+
elif apid in [
|
|
636
|
+
CODICEAPID.COD_HI_INST_COUNTS_PRIORITIES,
|
|
637
|
+
CODICEAPID.COD_HI_IAL,
|
|
638
|
+
CODICEAPID.COD_LO_IAL,
|
|
639
|
+
]:
|
|
640
|
+
logger.info("\tStill need to properly implement")
|
|
641
|
+
processed_dataset = None
|
|
642
|
+
|
|
643
|
+
# For APIDs that don't require processing
|
|
644
|
+
else:
|
|
645
|
+
logger.info(f"\t{apid} does not require processing")
|
|
646
|
+
continue
|
|
647
|
+
|
|
648
|
+
processed_datasets.append(processed_dataset)
|
|
649
|
+
|
|
650
|
+
return processed_datasets
|
|
@@ -124,7 +124,7 @@ DATA_PRODUCT_CONFIGURATIONS = {
|
|
|
124
124
|
"instrument": "hi",
|
|
125
125
|
"num_counters": 3,
|
|
126
126
|
"num_energy_steps": 1, # TODO: Double check with Joey
|
|
127
|
-
"num_positions":
|
|
127
|
+
"num_positions": 12, # TODO: Double check with Joey
|
|
128
128
|
"num_spin_sectors": 1,
|
|
129
129
|
"support_variables": [], # No support variables for this one
|
|
130
130
|
"variable_names": HI_INST_COUNTS_SINGLES_VARIABLE_NAMES,
|
|
@@ -184,7 +184,7 @@ DATA_PRODUCT_CONFIGURATIONS = {
|
|
|
184
184
|
"energy_label",
|
|
185
185
|
], # TODO: These will likely change
|
|
186
186
|
"dataset_name": "imap_codice_l1a_lo-counters-aggregated",
|
|
187
|
-
"dims": ["epoch", "
|
|
187
|
+
"dims": ["epoch", "esa_step", "inst_az", "spin_sector"],
|
|
188
188
|
"instrument": "lo",
|
|
189
189
|
"num_counters": 1,
|
|
190
190
|
"num_energy_steps": 128,
|
|
@@ -205,7 +205,7 @@ DATA_PRODUCT_CONFIGURATIONS = {
|
|
|
205
205
|
"energy_label",
|
|
206
206
|
], # TODO: These will likely change
|
|
207
207
|
"dataset_name": "imap_codice_l1a_lo-counters-singles",
|
|
208
|
-
"dims": ["epoch", "
|
|
208
|
+
"dims": ["epoch", "esa_step", "inst_az", "spin_sector"],
|
|
209
209
|
"instrument": "lo",
|
|
210
210
|
"num_counters": 1,
|
|
211
211
|
"num_energy_steps": 128,
|
|
@@ -225,9 +225,9 @@ DATA_PRODUCT_CONFIGURATIONS = {
|
|
|
225
225
|
"variable_names": LO_INST_COUNTS_SINGLES_VARIABLE_NAMES,
|
|
226
226
|
},
|
|
227
227
|
CODICEAPID.COD_LO_SW_ANGULAR_COUNTS: {
|
|
228
|
-
"coords": ["epoch", "
|
|
228
|
+
"coords": ["epoch", "energy_label", "esa_step", "inst_az", "spin_sector"],
|
|
229
229
|
"dataset_name": "imap_codice_l1a_lo-sw-angular",
|
|
230
|
-
"dims": ["epoch", "
|
|
230
|
+
"dims": ["epoch", "esa_step", "inst_az", "spin_sector"],
|
|
231
231
|
"instrument": "lo",
|
|
232
232
|
"num_counters": 4,
|
|
233
233
|
"num_energy_steps": 128,
|
|
@@ -248,7 +248,7 @@ DATA_PRODUCT_CONFIGURATIONS = {
|
|
|
248
248
|
CODICEAPID.COD_LO_NSW_ANGULAR_COUNTS: {
|
|
249
249
|
"coords": ["epoch", "inst_az", "spin_sector", "esa_step", "energy_label"],
|
|
250
250
|
"dataset_name": "imap_codice_l1a_lo-nsw-angular",
|
|
251
|
-
"dims": ["epoch", "
|
|
251
|
+
"dims": ["epoch", "esa_step", "inst_az", "spin_sector"],
|
|
252
252
|
"instrument": "lo",
|
|
253
253
|
"num_counters": 1,
|
|
254
254
|
"num_energy_steps": 128,
|
|
@@ -269,7 +269,7 @@ DATA_PRODUCT_CONFIGURATIONS = {
|
|
|
269
269
|
CODICEAPID.COD_LO_SW_PRIORITY_COUNTS: {
|
|
270
270
|
"coords": ["epoch", "inst_az", "spin_sector", "esa_step", "energy_label"],
|
|
271
271
|
"dataset_name": "imap_codice_l1a_lo-sw-priority",
|
|
272
|
-
"dims": ["epoch", "
|
|
272
|
+
"dims": ["epoch", "esa_step", "inst_az", "spin_sector"],
|
|
273
273
|
"instrument": "lo",
|
|
274
274
|
"num_counters": 5,
|
|
275
275
|
"num_energy_steps": 128,
|
|
@@ -290,7 +290,7 @@ DATA_PRODUCT_CONFIGURATIONS = {
|
|
|
290
290
|
CODICEAPID.COD_LO_NSW_PRIORITY_COUNTS: {
|
|
291
291
|
"coords": ["epoch", "inst_az", "spin_sector", "esa_step", "energy_label"],
|
|
292
292
|
"dataset_name": "imap_codice_l1a_lo-nsw-priority",
|
|
293
|
-
"dims": ["epoch", "
|
|
293
|
+
"dims": ["epoch", "esa_step", "inst_az", "spin_sector"],
|
|
294
294
|
"instrument": "lo",
|
|
295
295
|
"num_counters": 2,
|
|
296
296
|
"num_energy_steps": 128,
|
|
@@ -311,7 +311,7 @@ DATA_PRODUCT_CONFIGURATIONS = {
|
|
|
311
311
|
CODICEAPID.COD_LO_SW_SPECIES_COUNTS: {
|
|
312
312
|
"coords": ["epoch", "inst_az", "spin_sector", "esa_step", "energy_label"],
|
|
313
313
|
"dataset_name": "imap_codice_l1a_lo-sw-species",
|
|
314
|
-
"dims": ["epoch", "
|
|
314
|
+
"dims": ["epoch", "esa_step", "inst_az", "spin_sector"],
|
|
315
315
|
"instrument": "lo",
|
|
316
316
|
"num_counters": 16,
|
|
317
317
|
"num_energy_steps": 128,
|
|
@@ -332,7 +332,7 @@ DATA_PRODUCT_CONFIGURATIONS = {
|
|
|
332
332
|
CODICEAPID.COD_LO_NSW_SPECIES_COUNTS: {
|
|
333
333
|
"coords": ["epoch", "inst_az", "spin_sector", "esa_step", "energy_label"],
|
|
334
334
|
"dataset_name": "imap_codice_l1a_lo-nsw-species",
|
|
335
|
-
"dims": ["epoch", "
|
|
335
|
+
"dims": ["epoch", "esa_step", "inst_az", "spin_sector"],
|
|
336
336
|
"instrument": "lo",
|
|
337
337
|
"num_counters": 8,
|
|
338
338
|
"num_energy_steps": 128,
|
|
@@ -50,7 +50,9 @@ def _apply_lossy_a(compressed_bytes: bytes) -> list[int]:
|
|
|
50
50
|
The 24- or 32-bit decompressed values.
|
|
51
51
|
"""
|
|
52
52
|
compressed_values = list(compressed_bytes)
|
|
53
|
-
decompressed_values = [
|
|
53
|
+
decompressed_values = [
|
|
54
|
+
LOSSY_A_TABLE[item - 1] if item > 0 else 0 for item in compressed_values
|
|
55
|
+
]
|
|
54
56
|
return decompressed_values
|
|
55
57
|
|
|
56
58
|
|
|
@@ -71,7 +73,9 @@ def _apply_lossy_b(compressed_bytes: bytes) -> list[int]:
|
|
|
71
73
|
The 24- or 32-bit decompressed values.
|
|
72
74
|
"""
|
|
73
75
|
compressed_values = list(compressed_bytes)
|
|
74
|
-
decompressed_values = [
|
|
76
|
+
decompressed_values = [
|
|
77
|
+
LOSSY_B_TABLE[item - 1] if item > 0 else 0 for item in compressed_values
|
|
78
|
+
]
|
|
75
79
|
return decompressed_values
|
|
76
80
|
|
|
77
81
|
|
|
@@ -94,9 +98,9 @@ def _apply_lzma_lossless(compressed_bytes: bytes) -> bytes:
|
|
|
94
98
|
return lzma_decompressed_values
|
|
95
99
|
|
|
96
100
|
|
|
97
|
-
def decompress(
|
|
101
|
+
def decompress(compressed_bytes: bytes, algorithm: IntEnum) -> list[int]:
|
|
98
102
|
"""
|
|
99
|
-
Perform decompression on a
|
|
103
|
+
Perform decompression on a byte stream into a list of integers.
|
|
100
104
|
|
|
101
105
|
Apply the appropriate decompression algorithm(s) based on the value
|
|
102
106
|
of the ``algorithm`` attribute. One or more individual algorithms may be
|
|
@@ -104,8 +108,8 @@ def decompress(compressed_binary: str, algorithm: IntEnum) -> list[int]:
|
|
|
104
108
|
|
|
105
109
|
Parameters
|
|
106
110
|
----------
|
|
107
|
-
|
|
108
|
-
The compressed
|
|
111
|
+
compressed_bytes : bytes
|
|
112
|
+
The compressed byte stream.
|
|
109
113
|
algorithm : int
|
|
110
114
|
The algorithm to apply. Supported algorithms are provided in the
|
|
111
115
|
``codice_utils.CoDICECompression`` class.
|
|
@@ -115,11 +119,6 @@ def decompress(compressed_binary: str, algorithm: IntEnum) -> list[int]:
|
|
|
115
119
|
decompressed_values : list[int]
|
|
116
120
|
The 24- or 32-bit decompressed values.
|
|
117
121
|
"""
|
|
118
|
-
# Convert the binary string to a byte stream
|
|
119
|
-
compressed_bytes = int(compressed_binary, 2).to_bytes(
|
|
120
|
-
(len(compressed_binary) + 7) // 8, byteorder="big"
|
|
121
|
-
)
|
|
122
|
-
|
|
123
122
|
# Apply the appropriate decompression algorithm
|
|
124
123
|
if algorithm == CoDICECompression.NO_COMPRESSION:
|
|
125
124
|
decompressed_values = list(compressed_bytes)
|
imap_processing/codice/utils.py
CHANGED
|
@@ -7,11 +7,10 @@ import numpy as np
|
|
|
7
7
|
import xarray as xr
|
|
8
8
|
|
|
9
9
|
from imap_processing.cdf.imap_cdf_manager import ImapCdfAttributes
|
|
10
|
-
from imap_processing.cdf.utils import J2000_EPOCH
|
|
11
10
|
from imap_processing.glows.l0.decom_glows import decom_packets
|
|
12
11
|
from imap_processing.glows.l0.glows_l0_data import DirectEventL0
|
|
13
12
|
from imap_processing.glows.l1a.glows_l1a_data import DirectEventL1A, HistogramL1A
|
|
14
|
-
from imap_processing.spice.time import met_to_j2000ns
|
|
13
|
+
from imap_processing.spice.time import J2000_EPOCH, met_to_j2000ns
|
|
15
14
|
|
|
16
15
|
|
|
17
16
|
def create_glows_attr_obj(data_version: str) -> ImapCdfAttributes:
|
|
@@ -255,10 +255,10 @@ def process_histogram(l1a: xr.Dataset) -> xr.Dataset:
|
|
|
255
255
|
"imap_spin_angle_bin_cntr": ["bins"],
|
|
256
256
|
"histogram_flag_array": ["bad_angle_flags", "bins"],
|
|
257
257
|
"spacecraft_location_average": ["ecliptic"],
|
|
258
|
-
"
|
|
258
|
+
"spacecraft_location_std_dev": ["ecliptic"],
|
|
259
259
|
"spacecraft_velocity_average": ["ecliptic"],
|
|
260
|
-
"
|
|
261
|
-
"flags": ["flag_dim"
|
|
260
|
+
"spacecraft_velocity_std_dev": ["ecliptic"],
|
|
261
|
+
"flags": ["flag_dim"],
|
|
262
262
|
}
|
|
263
263
|
|
|
264
264
|
# For each attribute, retrieve the dims from output_dimension_mapping or use an
|