imap-processing 1.0.0__py3-none-any.whl → 1.0.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- imap_processing/_version.py +2 -2
- imap_processing/cdf/config/imap_codice_global_cdf_attrs.yaml +13 -1
- imap_processing/cdf/config/imap_codice_l1a_variable_attrs.yaml +97 -254
- imap_processing/cdf/config/imap_codice_l2-hi-omni_variable_attrs.yaml +635 -0
- imap_processing/cdf/config/imap_codice_l2-hi-sectored_variable_attrs.yaml +422 -0
- imap_processing/cdf/config/imap_enamaps_l2-common_variable_attrs.yaml +29 -22
- imap_processing/cdf/config/imap_enamaps_l2-healpix_variable_attrs.yaml +2 -0
- imap_processing/cdf/config/imap_enamaps_l2-rectangular_variable_attrs.yaml +12 -2
- imap_processing/cdf/config/imap_swapi_variable_attrs.yaml +2 -13
- imap_processing/cdf/utils.py +2 -2
- imap_processing/cli.py +10 -27
- imap_processing/codice/codice_l1a_lo_angular.py +362 -0
- imap_processing/codice/codice_l1a_lo_species.py +282 -0
- imap_processing/codice/codice_l1b.py +62 -97
- imap_processing/codice/codice_l2.py +801 -174
- imap_processing/codice/codice_new_l1a.py +64 -0
- imap_processing/codice/constants.py +96 -0
- imap_processing/codice/utils.py +270 -0
- imap_processing/ena_maps/ena_maps.py +157 -95
- imap_processing/ena_maps/utils/coordinates.py +5 -0
- imap_processing/ena_maps/utils/corrections.py +450 -0
- imap_processing/ena_maps/utils/map_utils.py +143 -42
- imap_processing/ena_maps/utils/naming.py +3 -1
- imap_processing/hi/hi_l1c.py +34 -12
- imap_processing/hi/hi_l2.py +82 -44
- imap_processing/ialirt/constants.py +7 -1
- imap_processing/ialirt/generate_coverage.py +3 -1
- imap_processing/ialirt/l0/parse_mag.py +1 -0
- imap_processing/ialirt/l0/process_codice.py +66 -0
- imap_processing/ialirt/l0/process_hit.py +1 -0
- imap_processing/ialirt/l0/process_swapi.py +1 -0
- imap_processing/ialirt/l0/process_swe.py +2 -0
- imap_processing/ialirt/process_ephemeris.py +6 -2
- imap_processing/ialirt/utils/create_xarray.py +4 -2
- imap_processing/idex/idex_l2a.py +2 -2
- imap_processing/idex/idex_l2b.py +1 -1
- imap_processing/lo/l1c/lo_l1c.py +62 -4
- imap_processing/lo/l2/lo_l2.py +85 -15
- imap_processing/mag/l1a/mag_l1a.py +2 -2
- imap_processing/mag/l1a/mag_l1a_data.py +71 -13
- imap_processing/mag/l1c/interpolation_methods.py +34 -13
- imap_processing/mag/l1c/mag_l1c.py +117 -67
- imap_processing/mag/l1d/mag_l1d_data.py +3 -1
- imap_processing/quality_flags.py +1 -0
- imap_processing/spice/geometry.py +11 -9
- imap_processing/spice/pointing_frame.py +77 -50
- imap_processing/swapi/constants.py +4 -0
- imap_processing/swapi/l1/swapi_l1.py +59 -24
- imap_processing/swapi/l2/swapi_l2.py +17 -3
- imap_processing/swe/utils/swe_constants.py +7 -7
- imap_processing/ultra/l1a/ultra_l1a.py +121 -72
- imap_processing/ultra/l1b/de.py +57 -1
- imap_processing/ultra/l1b/extendedspin.py +1 -1
- imap_processing/ultra/l1b/ultra_l1b_annotated.py +0 -1
- imap_processing/ultra/l1b/ultra_l1b_culling.py +2 -2
- imap_processing/ultra/l1b/ultra_l1b_extended.py +25 -12
- imap_processing/ultra/l1c/helio_pset.py +29 -6
- imap_processing/ultra/l1c/l1c_lookup_utils.py +4 -2
- imap_processing/ultra/l1c/spacecraft_pset.py +10 -6
- imap_processing/ultra/l1c/ultra_l1c.py +6 -6
- imap_processing/ultra/l1c/ultra_l1c_pset_bins.py +82 -20
- imap_processing/ultra/l2/ultra_l2.py +2 -2
- imap_processing-1.0.2.dist-info/METADATA +121 -0
- {imap_processing-1.0.0.dist-info → imap_processing-1.0.2.dist-info}/RECORD +67 -61
- imap_processing-1.0.0.dist-info/METADATA +0 -120
- {imap_processing-1.0.0.dist-info → imap_processing-1.0.2.dist-info}/LICENSE +0 -0
- {imap_processing-1.0.0.dist-info → imap_processing-1.0.2.dist-info}/WHEEL +0 -0
- {imap_processing-1.0.0.dist-info → imap_processing-1.0.2.dist-info}/entry_points.txt +0 -0
|
@@ -0,0 +1,282 @@
|
|
|
1
|
+
"""CoDICE Lo Species L1A processing functions."""
|
|
2
|
+
|
|
3
|
+
import logging
|
|
4
|
+
from pathlib import Path
|
|
5
|
+
|
|
6
|
+
import numpy as np
|
|
7
|
+
import xarray as xr
|
|
8
|
+
|
|
9
|
+
from imap_processing.cdf.imap_cdf_manager import ImapCdfAttributes
|
|
10
|
+
from imap_processing.codice import constants
|
|
11
|
+
from imap_processing.codice.decompress import decompress
|
|
12
|
+
from imap_processing.codice.utils import (
|
|
13
|
+
CODICEAPID,
|
|
14
|
+
ViewTabInfo,
|
|
15
|
+
calculate_acq_time_per_step,
|
|
16
|
+
get_codice_epoch_time,
|
|
17
|
+
get_collapse_pattern_shape,
|
|
18
|
+
get_view_tab_info,
|
|
19
|
+
read_sci_lut,
|
|
20
|
+
)
|
|
21
|
+
|
|
22
|
+
logger = logging.getLogger(__name__)
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
def l1a_lo_species(unpacked_dataset: xr.Dataset, lut_file: Path) -> xr.Dataset:
|
|
26
|
+
"""
|
|
27
|
+
L1A processing code.
|
|
28
|
+
|
|
29
|
+
Parameters
|
|
30
|
+
----------
|
|
31
|
+
unpacked_dataset : xarray.Dataset
|
|
32
|
+
The decompressed and unpacked data from the packet file.
|
|
33
|
+
lut_file : pathlib.Path
|
|
34
|
+
Path to the LUT (Lookup Table) file used for processing.
|
|
35
|
+
|
|
36
|
+
Returns
|
|
37
|
+
-------
|
|
38
|
+
xarray.Dataset
|
|
39
|
+
The processed L1A dataset for the given species product.
|
|
40
|
+
"""
|
|
41
|
+
# Get these values from unpacked data. These are used to
|
|
42
|
+
# lookup in LUT table.
|
|
43
|
+
table_id = unpacked_dataset["table_id"].values[0]
|
|
44
|
+
view_id = unpacked_dataset["view_id"].values[0]
|
|
45
|
+
apid = unpacked_dataset["pkt_apid"].values[0]
|
|
46
|
+
plan_id = unpacked_dataset["plan_id"].values[0]
|
|
47
|
+
plan_step = unpacked_dataset["plan_step"].values[0]
|
|
48
|
+
|
|
49
|
+
logger.info(
|
|
50
|
+
f"Processing species with - APID: {apid}, View ID: {view_id}, "
|
|
51
|
+
f"Table ID: {table_id}, Plan ID: {plan_id}, Plan Step: {plan_step}"
|
|
52
|
+
)
|
|
53
|
+
|
|
54
|
+
# ========== Get LUT Data ===========
|
|
55
|
+
# Read information from LUT
|
|
56
|
+
sci_lut_data = read_sci_lut(lut_file, table_id)
|
|
57
|
+
|
|
58
|
+
view_tab_info = get_view_tab_info(sci_lut_data, view_id, apid)
|
|
59
|
+
view_tab_obj = ViewTabInfo(
|
|
60
|
+
apid=apid,
|
|
61
|
+
view_id=view_id,
|
|
62
|
+
sensor=view_tab_info["sensor"],
|
|
63
|
+
three_d_collapsed=view_tab_info["3d_collapse"],
|
|
64
|
+
collapse_table=view_tab_info["collapse_table"],
|
|
65
|
+
)
|
|
66
|
+
|
|
67
|
+
if view_tab_obj.sensor != 0:
|
|
68
|
+
raise ValueError("Unsupported sensor ID for Lo species processing.")
|
|
69
|
+
|
|
70
|
+
# ========= Decompress and Reshape Data ===========
|
|
71
|
+
# Lookup SW or NSW species based on APID
|
|
72
|
+
if view_tab_obj.apid == CODICEAPID.COD_LO_SW_SPECIES_COUNTS:
|
|
73
|
+
species_names = sci_lut_data["data_product_lo_tab"]["0"]["species"]["sw"][
|
|
74
|
+
"species_names"
|
|
75
|
+
]
|
|
76
|
+
logical_source_id = "imap_codice_l1a_lo-sw-species"
|
|
77
|
+
elif view_tab_obj.apid == CODICEAPID.COD_LO_NSW_SPECIES_COUNTS:
|
|
78
|
+
species_names = sci_lut_data["data_product_lo_tab"]["0"]["species"]["nsw"][
|
|
79
|
+
"species_names"
|
|
80
|
+
]
|
|
81
|
+
logical_source_id = "imap_codice_l1a_lo-nsw-species"
|
|
82
|
+
else:
|
|
83
|
+
raise ValueError(f"Unknown apid {view_tab_obj.apid} in Lo species processing.")
|
|
84
|
+
|
|
85
|
+
compression_algorithm = constants.LO_COMPRESSION_ID_LOOKUP[view_tab_obj.view_id]
|
|
86
|
+
# Decompress data using byte count information from decommed data
|
|
87
|
+
binary_data_list = unpacked_dataset["data"].values
|
|
88
|
+
byte_count_list = unpacked_dataset["byte_count"].values
|
|
89
|
+
|
|
90
|
+
# The decompressed data in the shape of (epoch, n). Then reshape later.
|
|
91
|
+
decompressed_data = [
|
|
92
|
+
decompress(
|
|
93
|
+
packet_data[:byte_count],
|
|
94
|
+
compression_algorithm,
|
|
95
|
+
)
|
|
96
|
+
for (packet_data, byte_count) in zip(
|
|
97
|
+
binary_data_list, byte_count_list, strict=False
|
|
98
|
+
)
|
|
99
|
+
]
|
|
100
|
+
|
|
101
|
+
# Look up collapse pattern using LUT table. This should return collapsed shape.
|
|
102
|
+
# For Lo species, it will be (1,)
|
|
103
|
+
collapsed_shape = get_collapse_pattern_shape(
|
|
104
|
+
sci_lut_data, view_tab_obj.sensor, view_tab_obj.collapse_table
|
|
105
|
+
)
|
|
106
|
+
|
|
107
|
+
# Reshape decompressed data to:
|
|
108
|
+
# (num_packets, num_species, esa_steps, *collapsed_shape)
|
|
109
|
+
# where collapsed_shape is usually (1,) for Lo species.
|
|
110
|
+
num_packets = len(binary_data_list)
|
|
111
|
+
num_species = len(species_names)
|
|
112
|
+
esa_steps = constants.NUM_ESA_STEPS
|
|
113
|
+
species_data = np.array(decompressed_data).reshape(
|
|
114
|
+
num_packets, num_species, esa_steps, *collapsed_shape
|
|
115
|
+
)
|
|
116
|
+
|
|
117
|
+
# ========== Get Voltage Data from LUT ===========
|
|
118
|
+
# Use plan id and plan step to get voltage data's table_number in ESA sweep table.
|
|
119
|
+
# Voltage data is (128,)
|
|
120
|
+
esa_table_number = sci_lut_data["plan_tab"][f"({plan_id}, {plan_step})"][
|
|
121
|
+
"lo_stepping"
|
|
122
|
+
]
|
|
123
|
+
voltage_data = sci_lut_data["esa_sweep_tab"][f"{esa_table_number}"]
|
|
124
|
+
|
|
125
|
+
# ========= Get Epoch Time Data ===========
|
|
126
|
+
# Epoch center time and delta
|
|
127
|
+
epoch_center, deltas = get_codice_epoch_time(
|
|
128
|
+
unpacked_dataset["acq_start_seconds"].values,
|
|
129
|
+
unpacked_dataset["acq_start_subseconds"].values,
|
|
130
|
+
unpacked_dataset["spin_period"].values,
|
|
131
|
+
view_tab_obj,
|
|
132
|
+
)
|
|
133
|
+
|
|
134
|
+
# ========== Create CDF Dataset with Metadata ===========
|
|
135
|
+
cdf_attrs = ImapCdfAttributes()
|
|
136
|
+
cdf_attrs.add_instrument_global_attrs("codice")
|
|
137
|
+
cdf_attrs.add_instrument_variable_attrs("codice", "l1a")
|
|
138
|
+
|
|
139
|
+
l1a_dataset = xr.Dataset(
|
|
140
|
+
coords={
|
|
141
|
+
"epoch": xr.DataArray(
|
|
142
|
+
epoch_center,
|
|
143
|
+
dims=("epoch",),
|
|
144
|
+
attrs=cdf_attrs.get_variable_attributes("epoch", check_schema=False),
|
|
145
|
+
),
|
|
146
|
+
"epoch_delta_minus": xr.DataArray(
|
|
147
|
+
deltas,
|
|
148
|
+
dims=("epoch",),
|
|
149
|
+
attrs=cdf_attrs.get_variable_attributes(
|
|
150
|
+
"epoch_delta_minus", check_schema=False
|
|
151
|
+
),
|
|
152
|
+
),
|
|
153
|
+
"epoch_delta_plus": xr.DataArray(
|
|
154
|
+
deltas,
|
|
155
|
+
dims=("epoch",),
|
|
156
|
+
attrs=cdf_attrs.get_variable_attributes(
|
|
157
|
+
"epoch_delta_plus", check_schema=False
|
|
158
|
+
),
|
|
159
|
+
),
|
|
160
|
+
"esa_step": xr.DataArray(
|
|
161
|
+
np.arange(128),
|
|
162
|
+
dims=("esa_step",),
|
|
163
|
+
attrs=cdf_attrs.get_variable_attributes("esa_step", check_schema=False),
|
|
164
|
+
),
|
|
165
|
+
"esa_step_label": xr.DataArray(
|
|
166
|
+
np.arange(128).astype(str),
|
|
167
|
+
dims=("esa_step",),
|
|
168
|
+
attrs=cdf_attrs.get_variable_attributes(
|
|
169
|
+
"esa_step_label", check_schema=False
|
|
170
|
+
),
|
|
171
|
+
),
|
|
172
|
+
"k_factor": xr.DataArray(
|
|
173
|
+
np.array([constants.K_FACTOR]),
|
|
174
|
+
dims=("k_factor",),
|
|
175
|
+
attrs=cdf_attrs.get_variable_attributes(
|
|
176
|
+
"k_factor_attrs", check_schema=False
|
|
177
|
+
),
|
|
178
|
+
),
|
|
179
|
+
"spin_sector": xr.DataArray(
|
|
180
|
+
np.array([0], dtype=np.uint8),
|
|
181
|
+
dims=("spin_sector",),
|
|
182
|
+
attrs=cdf_attrs.get_variable_attributes(
|
|
183
|
+
"spin_sector", check_schema=False
|
|
184
|
+
),
|
|
185
|
+
),
|
|
186
|
+
"spin_sector_label": xr.DataArray(
|
|
187
|
+
np.array(["0"]).astype(str),
|
|
188
|
+
dims=("spin_sector",),
|
|
189
|
+
attrs=cdf_attrs.get_variable_attributes(
|
|
190
|
+
"spin_sector_label", check_schema=False
|
|
191
|
+
),
|
|
192
|
+
),
|
|
193
|
+
},
|
|
194
|
+
attrs=cdf_attrs.get_global_attributes(logical_source_id),
|
|
195
|
+
)
|
|
196
|
+
# Add first few unique variables
|
|
197
|
+
l1a_dataset["spin_period"] = xr.DataArray(
|
|
198
|
+
unpacked_dataset["spin_period"].values * constants.SPIN_PERIOD_CONVERSION,
|
|
199
|
+
dims=("epoch",),
|
|
200
|
+
attrs=cdf_attrs.get_variable_attributes("spin_period"),
|
|
201
|
+
)
|
|
202
|
+
l1a_dataset["k_factor"] = xr.DataArray(
|
|
203
|
+
np.array([constants.K_FACTOR]),
|
|
204
|
+
dims=("k_factor",),
|
|
205
|
+
attrs=cdf_attrs.get_variable_attributes("k_factor_attrs", check_schema=False),
|
|
206
|
+
)
|
|
207
|
+
l1a_dataset["voltage_table"] = xr.DataArray(
|
|
208
|
+
np.array(voltage_data),
|
|
209
|
+
dims=("esa_step",),
|
|
210
|
+
attrs=cdf_attrs.get_variable_attributes("voltage_table", check_schema=False),
|
|
211
|
+
)
|
|
212
|
+
l1a_dataset["data_quality"] = xr.DataArray(
|
|
213
|
+
unpacked_dataset["suspect"].values,
|
|
214
|
+
dims=("epoch",),
|
|
215
|
+
attrs=cdf_attrs.get_variable_attributes("data_quality"),
|
|
216
|
+
)
|
|
217
|
+
l1a_dataset["acquisition_time_per_step"] = xr.DataArray(
|
|
218
|
+
calculate_acq_time_per_step(sci_lut_data["lo_stepping_tab"]),
|
|
219
|
+
dims=("esa_step",),
|
|
220
|
+
attrs=cdf_attrs.get_variable_attributes(
|
|
221
|
+
"acquisition_time_per_step", check_schema=False
|
|
222
|
+
),
|
|
223
|
+
)
|
|
224
|
+
|
|
225
|
+
# Carry over these variables from unpacked data to l1a_dataset
|
|
226
|
+
l1a_carryover_vars = [
|
|
227
|
+
"sw_bias_gain_mode",
|
|
228
|
+
"st_bias_gain_mode",
|
|
229
|
+
"rgfo_half_spin",
|
|
230
|
+
"nso_half_spin",
|
|
231
|
+
]
|
|
232
|
+
# Loop through them since we need to set their attrs too
|
|
233
|
+
for var in l1a_carryover_vars:
|
|
234
|
+
l1a_dataset[var] = xr.DataArray(
|
|
235
|
+
unpacked_dataset[var].values,
|
|
236
|
+
dims=("epoch",),
|
|
237
|
+
attrs=cdf_attrs.get_variable_attributes(var),
|
|
238
|
+
)
|
|
239
|
+
|
|
240
|
+
# Finally, add species data variables and their uncertainties
|
|
241
|
+
for idx, species in enumerate(species_names):
|
|
242
|
+
if view_tab_obj.apid == CODICEAPID.COD_LO_SW_SPECIES_COUNTS and species in [
|
|
243
|
+
"heplus",
|
|
244
|
+
"cnoplus",
|
|
245
|
+
]:
|
|
246
|
+
species_attrs = cdf_attrs.get_variable_attributes("lo-pui-species-attrs")
|
|
247
|
+
unc_attrs = cdf_attrs.get_variable_attributes("lo-pui-species-unc-attrs")
|
|
248
|
+
else:
|
|
249
|
+
species_attrs = cdf_attrs.get_variable_attributes("lo-species-attrs")
|
|
250
|
+
unc_attrs = cdf_attrs.get_variable_attributes("lo-species-unc-attrs")
|
|
251
|
+
|
|
252
|
+
direction = (
|
|
253
|
+
"Sunward"
|
|
254
|
+
if view_tab_obj.apid == CODICEAPID.COD_LO_SW_SPECIES_COUNTS
|
|
255
|
+
else "Non-Sunward"
|
|
256
|
+
)
|
|
257
|
+
# Replace {species} and {direction} in attrs
|
|
258
|
+
species_attrs["CATDESC"] = species_attrs["CATDESC"].format(
|
|
259
|
+
species=species, direction=direction
|
|
260
|
+
)
|
|
261
|
+
species_attrs["FIELDNAM"] = species_attrs["FIELDNAM"].format(
|
|
262
|
+
species=species, direction=direction
|
|
263
|
+
)
|
|
264
|
+
l1a_dataset[species] = xr.DataArray(
|
|
265
|
+
species_data[:, idx, :, :],
|
|
266
|
+
dims=("epoch", "esa_step", "spin_sector"),
|
|
267
|
+
attrs=species_attrs,
|
|
268
|
+
)
|
|
269
|
+
# Uncertainty data
|
|
270
|
+
unc_attrs["CATDESC"] = unc_attrs["CATDESC"].format(
|
|
271
|
+
species=species, direction=direction
|
|
272
|
+
)
|
|
273
|
+
unc_attrs["FIELDNAM"] = unc_attrs["FIELDNAM"].format(
|
|
274
|
+
species=species, direction=direction
|
|
275
|
+
)
|
|
276
|
+
l1a_dataset[f"unc_{species}"] = xr.DataArray(
|
|
277
|
+
np.sqrt(species_data[:, idx, :, :]),
|
|
278
|
+
dims=("epoch", "esa_step", "spin_sector"),
|
|
279
|
+
attrs=unc_attrs,
|
|
280
|
+
)
|
|
281
|
+
|
|
282
|
+
return l1a_dataset
|
|
@@ -15,20 +15,15 @@ from pathlib import Path
|
|
|
15
15
|
import numpy as np
|
|
16
16
|
import xarray as xr
|
|
17
17
|
|
|
18
|
-
from imap_processing import imap_module_directory
|
|
19
18
|
from imap_processing.cdf.imap_cdf_manager import ImapCdfAttributes
|
|
20
19
|
from imap_processing.cdf.utils import load_cdf
|
|
21
20
|
from imap_processing.codice import constants
|
|
22
|
-
from imap_processing.codice.utils import CODICEAPID
|
|
23
|
-
from imap_processing.utils import packet_file_to_datasets
|
|
24
21
|
|
|
25
22
|
logger = logging.getLogger(__name__)
|
|
26
23
|
logger.setLevel(logging.INFO)
|
|
27
24
|
|
|
28
25
|
|
|
29
|
-
def convert_to_rates(
|
|
30
|
-
dataset: xr.Dataset, descriptor: str, variable_name: str
|
|
31
|
-
) -> np.ndarray:
|
|
26
|
+
def convert_to_rates(dataset: xr.Dataset, descriptor: str) -> np.ndarray:
|
|
32
27
|
"""
|
|
33
28
|
Apply a conversion from counts to rates.
|
|
34
29
|
|
|
@@ -41,14 +36,17 @@ def convert_to_rates(
|
|
|
41
36
|
The L1b dataset containing the data to convert.
|
|
42
37
|
descriptor : str
|
|
43
38
|
The descriptor of the data product of interest.
|
|
44
|
-
variable_name : str
|
|
45
|
-
The variable name to apply the conversion to.
|
|
46
39
|
|
|
47
40
|
Returns
|
|
48
41
|
-------
|
|
49
42
|
rates_data : np.ndarray
|
|
50
43
|
The converted data array.
|
|
51
44
|
"""
|
|
45
|
+
# Variables to convert based on descriptor
|
|
46
|
+
variables_to_convert = getattr(
|
|
47
|
+
constants, f"{descriptor.upper().replace('-', '_')}_VARIABLE_NAMES"
|
|
48
|
+
)
|
|
49
|
+
|
|
52
50
|
if descriptor in [
|
|
53
51
|
"lo-counters-aggregated",
|
|
54
52
|
"lo-counters-singles",
|
|
@@ -58,41 +56,49 @@ def convert_to_rates(
|
|
|
58
56
|
"lo-sw-priority",
|
|
59
57
|
"lo-ialirt",
|
|
60
58
|
]:
|
|
61
|
-
#
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
# time data array to match the data variable shape
|
|
65
|
-
dims = [1] * dataset[variable_name].data.ndim
|
|
66
|
-
dims[1] = 128
|
|
67
|
-
acq_times = dataset.acquisition_time_per_step.data.reshape(dims) # (128)
|
|
68
|
-
# Now perform the calculation
|
|
69
|
-
rates_data = dataset[variable_name].data / (
|
|
70
|
-
acq_times
|
|
71
|
-
* 1e-3 # Converting from milliseconds to seconds
|
|
59
|
+
# Denominator to convert counts to rates
|
|
60
|
+
denominator = (
|
|
61
|
+
dataset.acquisition_time_per_step
|
|
72
62
|
* constants.L1B_DATA_PRODUCT_CONFIGURATIONS[descriptor]["num_spin_sectors"]
|
|
73
63
|
)
|
|
64
|
+
|
|
65
|
+
# Do not carry these variable attributes from L1a to L1b for above products
|
|
66
|
+
drop_variables = [
|
|
67
|
+
"k_factor",
|
|
68
|
+
"nso_half_spin",
|
|
69
|
+
"sw_bias_gain_mode",
|
|
70
|
+
"st_bias_gain_mode",
|
|
71
|
+
"spin_period",
|
|
72
|
+
]
|
|
73
|
+
dataset = dataset.drop_vars(drop_variables)
|
|
74
74
|
elif descriptor in [
|
|
75
75
|
"lo-nsw-species",
|
|
76
76
|
"lo-sw-species",
|
|
77
77
|
]:
|
|
78
|
-
#
|
|
79
|
-
#
|
|
80
|
-
#
|
|
81
|
-
# time data array to match the data variable shape (epoch, esa_step, sector)
|
|
82
|
-
dims = [1] * dataset[variable_name].data.ndim
|
|
83
|
-
dims[1] = 128
|
|
84
|
-
acq_times = dataset.acquisition_time_per_step.data.reshape(dims) # (128)
|
|
85
|
-
# acquisition time have an array of shape (128,). We match n_sector to that.
|
|
78
|
+
# Create n_sector with 'esa_step' dimension. This is done by xr.full_like
|
|
79
|
+
# with input dataset.acquisition_time_per_step. This ensures that the resulting
|
|
80
|
+
# n_sector has the same dimensions as acquisition_time_per_step.
|
|
86
81
|
# Per CoDICE, fill first 127 with default value of 12. Then fill last with 11.
|
|
87
|
-
n_sector =
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
# Now perform the calculation
|
|
91
|
-
rates_data = dataset[variable_name].data / (
|
|
92
|
-
acq_times
|
|
93
|
-
* 1e-3 # Converting from milliseconds to seconds
|
|
94
|
-
* n_sector[:, np.newaxis] # Spin sectors
|
|
82
|
+
n_sector = xr.full_like(
|
|
83
|
+
dataset.acquisition_time_per_step, 12.0, dtype=np.float64
|
|
95
84
|
)
|
|
85
|
+
n_sector[-1] = 11.0
|
|
86
|
+
|
|
87
|
+
# Denominator to convert counts to rates
|
|
88
|
+
denominator = dataset.acquisition_time_per_step * n_sector
|
|
89
|
+
|
|
90
|
+
# Do not carry these variable attributes from L1a to L1b for above products
|
|
91
|
+
drop_variables = [
|
|
92
|
+
"k_factor",
|
|
93
|
+
"nso_half_spin",
|
|
94
|
+
"sw_bias_gain_mode",
|
|
95
|
+
"st_bias_gain_mode",
|
|
96
|
+
"spin_period",
|
|
97
|
+
"voltage_table",
|
|
98
|
+
"acquisition_time_per_step",
|
|
99
|
+
]
|
|
100
|
+
dataset = dataset.drop_vars(drop_variables)
|
|
101
|
+
|
|
96
102
|
elif descriptor in [
|
|
97
103
|
"hi-counters-aggregated",
|
|
98
104
|
"hi-counters-singles",
|
|
@@ -101,15 +107,27 @@ def convert_to_rates(
|
|
|
101
107
|
"hi-sectored",
|
|
102
108
|
"hi-ialirt",
|
|
103
109
|
]:
|
|
104
|
-
#
|
|
105
|
-
|
|
106
|
-
rates_data = dataset[variable_name].data / (
|
|
110
|
+
# Denominator to convert counts to rates
|
|
111
|
+
denominator = (
|
|
107
112
|
constants.L1B_DATA_PRODUCT_CONFIGURATIONS[descriptor]["num_spin_sectors"]
|
|
108
113
|
* constants.L1B_DATA_PRODUCT_CONFIGURATIONS[descriptor]["num_spins"]
|
|
109
114
|
* constants.HI_ACQUISITION_TIME
|
|
110
115
|
)
|
|
111
116
|
|
|
112
|
-
|
|
117
|
+
# For each variable, convert counts and uncertainty to rates
|
|
118
|
+
for variable in variables_to_convert:
|
|
119
|
+
dataset[variable].data = dataset[variable].astype(np.float64) / denominator
|
|
120
|
+
# Carry over attrs and update as needed
|
|
121
|
+
dataset[variable].attrs["UNITS"] = "counts/s"
|
|
122
|
+
|
|
123
|
+
# Uncertainty calculation
|
|
124
|
+
unc_variable = f"unc_{variable}"
|
|
125
|
+
dataset[unc_variable].data = (
|
|
126
|
+
dataset[unc_variable].astype(np.float64) / denominator
|
|
127
|
+
)
|
|
128
|
+
dataset[unc_variable].attrs["UNITS"] = "1/s"
|
|
129
|
+
|
|
130
|
+
return dataset
|
|
113
131
|
|
|
114
132
|
|
|
115
133
|
def process_codice_l1b(file_path: Path) -> xr.Dataset:
|
|
@@ -136,70 +154,17 @@ def process_codice_l1b(file_path: Path) -> xr.Dataset:
|
|
|
136
154
|
dataset_name = l1a_dataset.attrs["Logical_source"].replace("_l1a_", "_l1b_")
|
|
137
155
|
descriptor = dataset_name.removeprefix("imap_codice_l1b_")
|
|
138
156
|
|
|
139
|
-
# Direct event data products do not have a level L1B
|
|
140
|
-
if descriptor in ["lo-direct-events", "hi-direct-events"]:
|
|
141
|
-
logger.warning("Encountered direct event data product. Skipping L1b processing")
|
|
142
|
-
return None
|
|
143
|
-
|
|
144
157
|
# Get the L1b CDF attributes
|
|
145
158
|
cdf_attrs = ImapCdfAttributes()
|
|
146
159
|
cdf_attrs.add_instrument_global_attrs("codice")
|
|
147
160
|
cdf_attrs.add_instrument_variable_attrs("codice", "l1b")
|
|
148
161
|
|
|
149
162
|
# Use the L1a data product as a starting point for L1b
|
|
150
|
-
l1b_dataset = l1a_dataset.copy()
|
|
163
|
+
l1b_dataset = l1a_dataset.copy(deep=True)
|
|
151
164
|
|
|
152
165
|
# Update the global attributes
|
|
153
166
|
l1b_dataset.attrs = cdf_attrs.get_global_attributes(dataset_name)
|
|
154
|
-
|
|
155
|
-
|
|
156
|
-
|
|
157
|
-
|
|
158
|
-
xtce_packet_definition = Path(
|
|
159
|
-
f"{imap_module_directory}/codice/packet_definitions/{xtce_filename}"
|
|
160
|
-
)
|
|
161
|
-
packet_file = (
|
|
162
|
-
imap_module_directory
|
|
163
|
-
/ "tests"
|
|
164
|
-
/ "codice"
|
|
165
|
-
/ "data"
|
|
166
|
-
/ "imap_codice_l0_raw_20241110_v001.pkts"
|
|
167
|
-
)
|
|
168
|
-
datasets: dict[int, xr.Dataset] = packet_file_to_datasets(
|
|
169
|
-
packet_file, xtce_packet_definition, use_derived_value=True
|
|
170
|
-
)
|
|
171
|
-
l1b_dataset = datasets[CODICEAPID.COD_NHK]
|
|
172
|
-
|
|
173
|
-
# TODO: Drop the same variables as we do in L1a? (see line 1103 in
|
|
174
|
-
# codice_l1a.py
|
|
175
|
-
|
|
176
|
-
else:
|
|
177
|
-
variables_to_convert = getattr(
|
|
178
|
-
constants, f"{descriptor.upper().replace('-', '_')}_VARIABLE_NAMES"
|
|
179
|
-
)
|
|
180
|
-
|
|
181
|
-
# Apply the conversion to rates
|
|
182
|
-
for variable_name in variables_to_convert:
|
|
183
|
-
l1b_dataset[variable_name].data = convert_to_rates(
|
|
184
|
-
l1b_dataset, descriptor, variable_name
|
|
185
|
-
)
|
|
186
|
-
# Set the variable attributes
|
|
187
|
-
cdf_attrs_key = f"{descriptor}-{variable_name}"
|
|
188
|
-
l1b_dataset[variable_name].attrs = cdf_attrs.get_variable_attributes(
|
|
189
|
-
cdf_attrs_key, check_schema=False
|
|
190
|
-
)
|
|
191
|
-
|
|
192
|
-
if descriptor in ["lo-sw-species", "lo-nsw-species"]:
|
|
193
|
-
# Do not carry these variable attributes from L1a to L1b
|
|
194
|
-
drop_variables = [
|
|
195
|
-
"k_factor",
|
|
196
|
-
"nso_half_spin",
|
|
197
|
-
"sw_bias_gain_mode",
|
|
198
|
-
"st_bias_gain_mode",
|
|
199
|
-
"spin_period",
|
|
200
|
-
]
|
|
201
|
-
l1b_dataset = l1b_dataset.drop_vars(drop_variables)
|
|
202
|
-
|
|
203
|
-
logger.info(f"\nFinal data product:\n{l1b_dataset}\n")
|
|
204
|
-
|
|
205
|
-
return l1b_dataset
|
|
167
|
+
return convert_to_rates(
|
|
168
|
+
l1b_dataset,
|
|
169
|
+
descriptor,
|
|
170
|
+
)
|