imap-processing 0.19.0__py3-none-any.whl → 0.19.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of imap-processing might be problematic. Click here for more details.
- imap_processing/_version.py +2 -2
- imap_processing/cdf/config/imap_codice_global_cdf_attrs.yaml +6 -0
- imap_processing/cdf/config/imap_codice_l1a_variable_attrs.yaml +31 -894
- imap_processing/cdf/config/imap_codice_l1b_variable_attrs.yaml +279 -255
- imap_processing/cdf/config/imap_enamaps_l2-common_variable_attrs.yaml +11 -0
- imap_processing/cdf/config/imap_glows_l1b_variable_attrs.yaml +3 -1
- imap_processing/cdf/config/imap_lo_global_cdf_attrs.yaml +5 -4
- imap_processing/cdf/config/imap_ultra_global_cdf_attrs.yaml +20 -8
- imap_processing/cdf/config/imap_ultra_l1b_variable_attrs.yaml +33 -31
- imap_processing/cdf/config/imap_ultra_l1c_variable_attrs.yaml +61 -1
- imap_processing/cli.py +62 -71
- imap_processing/codice/codice_l0.py +2 -1
- imap_processing/codice/codice_l1a.py +47 -49
- imap_processing/codice/codice_l1b.py +42 -32
- imap_processing/codice/codice_l2.py +105 -7
- imap_processing/codice/constants.py +50 -8
- imap_processing/codice/data/lo_stepping_values.csv +1 -1
- imap_processing/ena_maps/ena_maps.py +39 -18
- imap_processing/ena_maps/utils/corrections.py +291 -0
- imap_processing/ena_maps/utils/map_utils.py +20 -4
- imap_processing/glows/l1b/glows_l1b.py +38 -23
- imap_processing/glows/l1b/glows_l1b_data.py +10 -11
- imap_processing/hi/hi_l1c.py +4 -109
- imap_processing/hi/hi_l2.py +34 -23
- imap_processing/hi/utils.py +109 -0
- imap_processing/ialirt/l0/ialirt_spice.py +1 -0
- imap_processing/ialirt/utils/create_xarray.py +1 -1
- imap_processing/lo/ancillary_data/imap_lo_hydrogen-geometric-factor_v001.csv +75 -0
- imap_processing/lo/ancillary_data/imap_lo_oxygen-geometric-factor_v001.csv +75 -0
- imap_processing/lo/l1b/lo_l1b.py +90 -16
- imap_processing/lo/l1c/lo_l1c.py +164 -50
- imap_processing/lo/l2/lo_l2.py +941 -127
- imap_processing/mag/l1d/mag_l1d_data.py +36 -3
- imap_processing/mag/l2/mag_l2.py +2 -0
- imap_processing/mag/l2/mag_l2_data.py +4 -3
- imap_processing/quality_flags.py +14 -0
- imap_processing/spice/geometry.py +15 -8
- imap_processing/spice/pointing_frame.py +4 -2
- imap_processing/spice/repoint.py +49 -0
- imap_processing/ultra/constants.py +29 -0
- imap_processing/ultra/l1b/badtimes.py +35 -11
- imap_processing/ultra/l1b/de.py +15 -9
- imap_processing/ultra/l1b/extendedspin.py +24 -12
- imap_processing/ultra/l1b/goodtimes.py +112 -0
- imap_processing/ultra/l1b/lookup_utils.py +1 -1
- imap_processing/ultra/l1b/ultra_l1b.py +7 -7
- imap_processing/ultra/l1b/ultra_l1b_culling.py +8 -4
- imap_processing/ultra/l1b/ultra_l1b_extended.py +79 -43
- imap_processing/ultra/l1c/helio_pset.py +68 -39
- imap_processing/ultra/l1c/l1c_lookup_utils.py +45 -12
- imap_processing/ultra/l1c/spacecraft_pset.py +81 -37
- imap_processing/ultra/l1c/ultra_l1c.py +27 -22
- imap_processing/ultra/l1c/ultra_l1c_culling.py +7 -0
- imap_processing/ultra/l1c/ultra_l1c_pset_bins.py +41 -41
- imap_processing/ultra/l2/ultra_l2.py +54 -10
- imap_processing/ultra/utils/ultra_l1_utils.py +10 -5
- {imap_processing-0.19.0.dist-info → imap_processing-0.19.2.dist-info}/METADATA +1 -1
- {imap_processing-0.19.0.dist-info → imap_processing-0.19.2.dist-info}/RECORD +62 -60
- imap_processing/ultra/l1b/cullingmask.py +0 -90
- imap_processing/ultra/l1c/histogram.py +0 -36
- /imap_processing/glows/ancillary/{imap_glows_pipeline_settings_20250923_v002.json → imap_glows_pipeline-settings_20250923_v002.json} +0 -0
- {imap_processing-0.19.0.dist-info → imap_processing-0.19.2.dist-info}/LICENSE +0 -0
- {imap_processing-0.19.0.dist-info → imap_processing-0.19.2.dist-info}/WHEEL +0 -0
- {imap_processing-0.19.0.dist-info → imap_processing-0.19.2.dist-info}/entry_points.txt +0 -0
|
@@ -125,7 +125,7 @@ class CoDICEL1aPipeline:
|
|
|
125
125
|
# orientation and the azimuth determine which spin sector the data
|
|
126
126
|
# gets stored in.
|
|
127
127
|
# TODO: All these nested for-loops are bad. Try to find a better
|
|
128
|
-
# solution.
|
|
128
|
+
# solution. See GitHub issue #2136.
|
|
129
129
|
for i, epoch_data in enumerate(self.data):
|
|
130
130
|
for energy_index in range(num_energies):
|
|
131
131
|
pixel_orientation = constants.PIXEL_ORIENTATIONS[energy_index]
|
|
@@ -345,7 +345,7 @@ class CoDICEL1aPipeline:
|
|
|
345
345
|
# energy dimension
|
|
346
346
|
# TODO: This bit of code may no longer be needed once I can figure
|
|
347
347
|
# out how to run hi-sectored product through the
|
|
348
|
-
# create_binned_dataset function
|
|
348
|
+
# create_binned_dataset function. See GitHub issue #2137.
|
|
349
349
|
if self.config["dataset_name"] == "imap_codice_l1a_hi-sectored":
|
|
350
350
|
dims = [
|
|
351
351
|
f"energy_{variable_name}" if item == "esa_step" else item
|
|
@@ -367,7 +367,7 @@ class CoDICEL1aPipeline:
|
|
|
367
367
|
# longer need the "esa_step" coordinate
|
|
368
368
|
# TODO: This bit of code may no longer be needed once I can figure
|
|
369
369
|
# out how to run hi-sectored product through the
|
|
370
|
-
# create_binned_dataset function
|
|
370
|
+
# create_binned_dataset function. See GitHub issue #2137.
|
|
371
371
|
if self.config["dataset_name"] == "imap_codice_l1a_hi-sectored":
|
|
372
372
|
for species in self.config["energy_table"]:
|
|
373
373
|
dataset = self.define_energy_bins(dataset, species)
|
|
@@ -822,9 +822,6 @@ def group_ialirt_data(
|
|
|
822
822
|
|
|
823
823
|
# Workaround to get this function working for both I-ALiRT spacecraft
|
|
824
824
|
# data and CoDICE-specific I-ALiRT test data from Joey
|
|
825
|
-
# TODO: Once CoDICE I-ALiRT processing is more established, we can probably
|
|
826
|
-
# do away with processing the test data from Joey and just use the
|
|
827
|
-
# I-ALiRT data that is constructed closer to what we expect in-flight.
|
|
828
825
|
if hasattr(packets, "acquisition_time"):
|
|
829
826
|
time_key = "acquisition_time"
|
|
830
827
|
counter_key = "counter"
|
|
@@ -880,7 +877,7 @@ def create_binned_dataset(
|
|
|
880
877
|
Xarray dataset containing the final processed dataset.
|
|
881
878
|
"""
|
|
882
879
|
# TODO: hi-sectored data product should be processed similar to hi-omni,
|
|
883
|
-
# so I should be able to use this method.
|
|
880
|
+
# so I should be able to use this method. See GitHub issue #2137.
|
|
884
881
|
|
|
885
882
|
# Get the four "main" parameters for processing
|
|
886
883
|
table_id, plan_id, plan_step, view_id = get_params(dataset)
|
|
@@ -901,7 +898,7 @@ def create_binned_dataset(
|
|
|
901
898
|
attrs=pipeline.cdf_attrs.get_variable_attributes("epoch", check_schema=False),
|
|
902
899
|
)
|
|
903
900
|
# TODO: Figure out how to calculate epoch centers and deltas and store them
|
|
904
|
-
# in variables here
|
|
901
|
+
# in variables here. See GitHub issue #1501.
|
|
905
902
|
dataset = xr.Dataset(
|
|
906
903
|
coords={"epoch": coord},
|
|
907
904
|
attrs=pipeline.cdf_attrs.get_global_attributes(pipeline.config["dataset_name"]),
|
|
@@ -941,7 +938,7 @@ def create_binned_dataset(
|
|
|
941
938
|
return dataset
|
|
942
939
|
|
|
943
940
|
|
|
944
|
-
def create_direct_event_dataset(apid: int,
|
|
941
|
+
def create_direct_event_dataset(apid: int, unpacked_dataset: xr.Dataset) -> xr.Dataset:
|
|
945
942
|
"""
|
|
946
943
|
Create dataset for direct event data.
|
|
947
944
|
|
|
@@ -955,7 +952,7 @@ def create_direct_event_dataset(apid: int, packets: xr.Dataset) -> xr.Dataset:
|
|
|
955
952
|
dictionary. Padding is added to any fields that have less than 10000 events.
|
|
956
953
|
|
|
957
954
|
In order to process these data, we must take the decommed raw data, group
|
|
958
|
-
the
|
|
955
|
+
the unpacked_dataset appropriately based on their `seq_flgs`, decompress the data,
|
|
959
956
|
then arrange the data into CDF data variables for each priority and bit
|
|
960
957
|
field. For example, P2_SpinAngle represents the spin angles for the 2nd
|
|
961
958
|
priority data.
|
|
@@ -964,8 +961,8 @@ def create_direct_event_dataset(apid: int, packets: xr.Dataset) -> xr.Dataset:
|
|
|
964
961
|
----------
|
|
965
962
|
apid : int
|
|
966
963
|
The APID of the packet.
|
|
967
|
-
|
|
968
|
-
The
|
|
964
|
+
unpacked_dataset : xarray.Dataset
|
|
965
|
+
The unpacked dataset to process.
|
|
969
966
|
|
|
970
967
|
Returns
|
|
971
968
|
-------
|
|
@@ -973,13 +970,13 @@ def create_direct_event_dataset(apid: int, packets: xr.Dataset) -> xr.Dataset:
|
|
|
973
970
|
Xarray dataset containing the direct event data.
|
|
974
971
|
"""
|
|
975
972
|
# Group and decompress the data
|
|
976
|
-
grouped_data = group_data(
|
|
973
|
+
grouped_data = group_data(unpacked_dataset)
|
|
977
974
|
decompressed_data = [
|
|
978
975
|
decompress(group, CoDICECompression.LOSSLESS) for group in grouped_data
|
|
979
976
|
]
|
|
980
977
|
|
|
981
978
|
# Reshape the packet data into CDF-ready variables
|
|
982
|
-
|
|
979
|
+
reshaped_de_data = reshape_de_data(unpacked_dataset, decompressed_data, apid)
|
|
983
980
|
|
|
984
981
|
# Gather the CDF attributes
|
|
985
982
|
cdf_attrs = ImapCdfAttributes()
|
|
@@ -989,11 +986,11 @@ def create_direct_event_dataset(apid: int, packets: xr.Dataset) -> xr.Dataset:
|
|
|
989
986
|
# Determine the epochs to use in the dataset, which are the epochs whenever
|
|
990
987
|
# there is a start of a segment and the priority is 0
|
|
991
988
|
epoch_indices = np.where(
|
|
992
|
-
((
|
|
993
|
-
& (
|
|
989
|
+
((unpacked_dataset.seq_flgs.data == 3) | (unpacked_dataset.seq_flgs.data == 1))
|
|
990
|
+
& (unpacked_dataset.priority.data == 0)
|
|
994
991
|
)[0]
|
|
995
|
-
acq_start_seconds =
|
|
996
|
-
acq_start_subseconds =
|
|
992
|
+
acq_start_seconds = unpacked_dataset.acq_start_seconds[epoch_indices]
|
|
993
|
+
acq_start_subseconds = unpacked_dataset.acq_start_subseconds[epoch_indices]
|
|
997
994
|
|
|
998
995
|
# Calculate epoch variables
|
|
999
996
|
epochs, epochs_delta_minus, epochs_delta_plus = calculate_epoch_values(
|
|
@@ -1051,20 +1048,19 @@ def create_direct_event_dataset(apid: int, packets: xr.Dataset) -> xr.Dataset:
|
|
|
1051
1048
|
)
|
|
1052
1049
|
|
|
1053
1050
|
# Create the CDF data variables for each Priority and Field
|
|
1054
|
-
for
|
|
1055
|
-
|
|
1056
|
-
|
|
1057
|
-
|
|
1058
|
-
|
|
1059
|
-
|
|
1060
|
-
|
|
1061
|
-
|
|
1062
|
-
|
|
1063
|
-
|
|
1064
|
-
|
|
1065
|
-
|
|
1066
|
-
|
|
1067
|
-
)
|
|
1051
|
+
for field in constants.DE_DATA_PRODUCT_CONFIGURATIONS[apid]["cdf_fields"]:
|
|
1052
|
+
if field in ["num_events", "data_quality"]:
|
|
1053
|
+
attrs = cdf_attrs.get_variable_attributes("de_2d_attrs")
|
|
1054
|
+
dims = ["epoch", "priority"]
|
|
1055
|
+
else:
|
|
1056
|
+
attrs = cdf_attrs.get_variable_attributes("de_3d_attrs")
|
|
1057
|
+
dims = ["epoch", "priority", "event_num"]
|
|
1058
|
+
dataset[field] = xr.DataArray(
|
|
1059
|
+
np.array(reshaped_de_data[field]),
|
|
1060
|
+
name=field,
|
|
1061
|
+
dims=dims,
|
|
1062
|
+
attrs=attrs,
|
|
1063
|
+
)
|
|
1068
1064
|
|
|
1069
1065
|
return dataset
|
|
1070
1066
|
|
|
@@ -1490,7 +1486,7 @@ def reshape_de_data(
|
|
|
1490
1486
|
CDF variable names, and the values represent the data.
|
|
1491
1487
|
"""
|
|
1492
1488
|
# Dictionary to hold all the (soon to be restructured) direct event data
|
|
1493
|
-
|
|
1489
|
+
de_data: dict[str, np.ndarray] = {}
|
|
1494
1490
|
|
|
1495
1491
|
# Extract some useful variables
|
|
1496
1492
|
num_priorities = constants.DE_DATA_PRODUCT_CONFIGURATIONS[apid]["num_priorities"]
|
|
@@ -1510,18 +1506,20 @@ def reshape_de_data(
|
|
|
1510
1506
|
|
|
1511
1507
|
# Initialize data arrays for each priority and field to store the data
|
|
1512
1508
|
# We also need arrays to hold number of events and data quality
|
|
1513
|
-
for
|
|
1514
|
-
|
|
1515
|
-
|
|
1516
|
-
|
|
1517
|
-
|
|
1518
|
-
|
|
1519
|
-
|
|
1520
|
-
|
|
1521
|
-
|
|
1522
|
-
|
|
1523
|
-
)
|
|
1524
|
-
|
|
1509
|
+
for field in bit_structure:
|
|
1510
|
+
# if these two, no need to store
|
|
1511
|
+
if field not in ["Priority", "Spare"]:
|
|
1512
|
+
de_data[f"{field}"] = np.full(
|
|
1513
|
+
(num_epochs, num_priorities, 10000),
|
|
1514
|
+
bit_structure[field]["fillval"],
|
|
1515
|
+
dtype=bit_structure[field]["dtype"],
|
|
1516
|
+
)
|
|
1517
|
+
# Add other additional fields of l1a
|
|
1518
|
+
de_data["num_events"] = np.full(
|
|
1519
|
+
(num_epochs, num_priorities), 65535, dtype=np.uint16
|
|
1520
|
+
)
|
|
1521
|
+
|
|
1522
|
+
de_data["data_quality"] = np.full((num_epochs, num_priorities), 255, dtype=np.uint8)
|
|
1525
1523
|
|
|
1526
1524
|
# decompressed_data is one large list of values of length
|
|
1527
1525
|
# (<number of epochs> * <number of priorities>)
|
|
@@ -1545,8 +1543,8 @@ def reshape_de_data(
|
|
|
1545
1543
|
|
|
1546
1544
|
# Number of events and data quality can be determined at this stage
|
|
1547
1545
|
num_events = num_events_arr[epoch_start:epoch_end][i]
|
|
1548
|
-
|
|
1549
|
-
|
|
1546
|
+
de_data["num_events"][epoch_index, priority_num] = num_events
|
|
1547
|
+
de_data["data_quality"][epoch_index, priority_num] = data_quality[i]
|
|
1550
1548
|
|
|
1551
1549
|
# Iterate over each event
|
|
1552
1550
|
for event_index in range(num_events):
|
|
@@ -1577,12 +1575,12 @@ def reshape_de_data(
|
|
|
1577
1575
|
)
|
|
1578
1576
|
|
|
1579
1577
|
# Set the value into the data array
|
|
1580
|
-
|
|
1578
|
+
de_data[f"{field_name}"][epoch_index, priority_num, event_index] = (
|
|
1581
1579
|
value
|
|
1582
1580
|
)
|
|
1583
1581
|
bit_position += field_components["bit_length"]
|
|
1584
1582
|
|
|
1585
|
-
return
|
|
1583
|
+
return de_data
|
|
1586
1584
|
|
|
1587
1585
|
|
|
1588
1586
|
def process_codice_l1a(file_path: Path) -> list[xr.Dataset]:
|
|
@@ -9,18 +9,18 @@ from imap_processing.codice.codice_l1b import process_codice_l1b
|
|
|
9
9
|
dataset = process_codice_l1b(l1a_filenanme)
|
|
10
10
|
"""
|
|
11
11
|
|
|
12
|
-
# TODO: Figure out how to convert hi-priority data product. Need an updated
|
|
13
|
-
# algorithm document that describes this.
|
|
14
|
-
|
|
15
12
|
import logging
|
|
16
13
|
from pathlib import Path
|
|
17
14
|
|
|
18
15
|
import numpy as np
|
|
19
16
|
import xarray as xr
|
|
20
17
|
|
|
18
|
+
from imap_processing import imap_module_directory
|
|
21
19
|
from imap_processing.cdf.imap_cdf_manager import ImapCdfAttributes
|
|
22
20
|
from imap_processing.cdf.utils import load_cdf
|
|
23
21
|
from imap_processing.codice import constants
|
|
22
|
+
from imap_processing.codice.utils import CODICEAPID
|
|
23
|
+
from imap_processing.utils import packet_file_to_datasets
|
|
24
24
|
|
|
25
25
|
logger = logging.getLogger(__name__)
|
|
26
26
|
logger.setLevel(logging.INFO)
|
|
@@ -49,9 +49,6 @@ def convert_to_rates(
|
|
|
49
49
|
rates_data : np.ndarray
|
|
50
50
|
The converted data array.
|
|
51
51
|
"""
|
|
52
|
-
# TODO: Temporary workaround to create CDFs for SIT-4. Revisit after SIT-4.
|
|
53
|
-
acq_times = 1
|
|
54
|
-
|
|
55
52
|
if descriptor in [
|
|
56
53
|
"lo-counters-aggregated",
|
|
57
54
|
"lo-counters-singles",
|
|
@@ -65,6 +62,13 @@ def convert_to_rates(
|
|
|
65
62
|
]:
|
|
66
63
|
# Applying rate calculation described in section 10.2 of the algorithm
|
|
67
64
|
# document
|
|
65
|
+
# In order to divide by acquisition times, we must reshape the acq
|
|
66
|
+
# time data array to match the data variable shape
|
|
67
|
+
dims = [1] * dataset[variable_name].data.ndim
|
|
68
|
+
dims[1] = 128
|
|
69
|
+
acq_times = dataset.acquisition_time_per_step.data.reshape(dims)
|
|
70
|
+
|
|
71
|
+
# Now perform the calculation
|
|
68
72
|
rates_data = dataset[variable_name].data / (
|
|
69
73
|
acq_times
|
|
70
74
|
* 1e-6 # Converting from microseconds to seconds
|
|
@@ -83,10 +87,8 @@ def convert_to_rates(
|
|
|
83
87
|
rates_data = dataset[variable_name].data / (
|
|
84
88
|
constants.L1B_DATA_PRODUCT_CONFIGURATIONS[descriptor]["num_spin_sectors"]
|
|
85
89
|
* constants.L1B_DATA_PRODUCT_CONFIGURATIONS[descriptor]["num_spins"]
|
|
86
|
-
*
|
|
90
|
+
* constants.HI_ACQUISITION_TIME
|
|
87
91
|
)
|
|
88
|
-
elif descriptor == "hskp":
|
|
89
|
-
rates_data = dataset[variable_name].data / acq_times
|
|
90
92
|
|
|
91
93
|
return rates_data
|
|
92
94
|
|
|
@@ -131,35 +133,43 @@ def process_codice_l1b(file_path: Path) -> xr.Dataset:
|
|
|
131
133
|
# Update the global attributes
|
|
132
134
|
l1b_dataset.attrs = cdf_attrs.get_global_attributes(dataset_name)
|
|
133
135
|
|
|
134
|
-
#
|
|
135
|
-
# TODO: Figure out exactly which hskp variables need to be converted
|
|
136
|
-
# Housekeeping and binned datasets are treated a bit differently since
|
|
137
|
-
# not all variables need to be converted
|
|
136
|
+
# TODO: This was thrown together quickly and should be double-checked
|
|
138
137
|
if descriptor == "hskp":
|
|
139
|
-
|
|
140
|
-
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
|
|
138
|
+
xtce_filename = "codice_packet_definition.xml"
|
|
139
|
+
xtce_packet_definition = Path(
|
|
140
|
+
f"{imap_module_directory}/codice/packet_definitions/{xtce_filename}"
|
|
141
|
+
)
|
|
142
|
+
packet_file = (
|
|
143
|
+
imap_module_directory
|
|
144
|
+
/ "tests"
|
|
145
|
+
/ "codice"
|
|
146
|
+
/ "data"
|
|
147
|
+
/ "imap_codice_l0_raw_20241110_v001.pkts"
|
|
148
|
+
)
|
|
149
|
+
datasets: dict[int, xr.Dataset] = packet_file_to_datasets(
|
|
150
|
+
packet_file, xtce_packet_definition, use_derived_value=True
|
|
151
|
+
)
|
|
152
|
+
l1b_dataset = datasets[CODICEAPID.COD_NHK]
|
|
153
|
+
|
|
154
|
+
# TODO: Drop the same variables as we do in L1a? (see line 1103 in
|
|
155
|
+
# codice_l1a.py
|
|
156
|
+
|
|
147
157
|
else:
|
|
148
158
|
variables_to_convert = getattr(
|
|
149
159
|
constants, f"{descriptor.upper().replace('-', '_')}_VARIABLE_NAMES"
|
|
150
160
|
)
|
|
151
161
|
|
|
152
|
-
|
|
153
|
-
|
|
154
|
-
|
|
155
|
-
|
|
156
|
-
|
|
157
|
-
|
|
158
|
-
|
|
159
|
-
|
|
160
|
-
|
|
161
|
-
|
|
162
|
-
|
|
162
|
+
# Apply the conversion to rates
|
|
163
|
+
for variable_name in variables_to_convert:
|
|
164
|
+
l1b_dataset[variable_name].data = convert_to_rates(
|
|
165
|
+
l1b_dataset, descriptor, variable_name
|
|
166
|
+
)
|
|
167
|
+
|
|
168
|
+
# Set the variable attributes
|
|
169
|
+
cdf_attrs_key = f"{descriptor}-{variable_name}"
|
|
170
|
+
l1b_dataset[variable_name].attrs = cdf_attrs.get_variable_attributes(
|
|
171
|
+
cdf_attrs_key, check_schema=False
|
|
172
|
+
)
|
|
163
173
|
|
|
164
174
|
logger.info(f"\nFinal data product:\n{l1b_dataset}\n")
|
|
165
175
|
|
|
@@ -12,10 +12,12 @@ dataset = process_codice_l2(l1_filename)
|
|
|
12
12
|
import logging
|
|
13
13
|
from pathlib import Path
|
|
14
14
|
|
|
15
|
+
import numpy as np
|
|
15
16
|
import xarray as xr
|
|
16
17
|
|
|
17
18
|
from imap_processing.cdf.imap_cdf_manager import ImapCdfAttributes
|
|
18
19
|
from imap_processing.cdf.utils import load_cdf
|
|
20
|
+
from imap_processing.codice.constants import HALF_SPIN_LUT
|
|
19
21
|
|
|
20
22
|
logger = logging.getLogger(__name__)
|
|
21
23
|
logger.setLevel(logging.INFO)
|
|
@@ -54,6 +56,14 @@ def process_codice_l2(file_path: Path) -> xr.Dataset:
|
|
|
54
56
|
cdf_attrs = ImapCdfAttributes()
|
|
55
57
|
l2_dataset = add_dataset_attributes(l2_dataset, dataset_name, cdf_attrs)
|
|
56
58
|
|
|
59
|
+
# TODO: update list of datasets that need geometric factors (if needed)
|
|
60
|
+
# Compute geometric factors needed for intensity calculations
|
|
61
|
+
if dataset_name in [
|
|
62
|
+
"imap_codice_l2_lo-sw-species",
|
|
63
|
+
"imap_codice_l2_lo-nsw-species",
|
|
64
|
+
]:
|
|
65
|
+
geometric_factors = compute_geometric_factors(l2_dataset)
|
|
66
|
+
|
|
57
67
|
if dataset_name in [
|
|
58
68
|
"imap_codice_l2_hi-counters-singles",
|
|
59
69
|
"imap_codice_l2_hi-counters-aggregated",
|
|
@@ -63,6 +73,7 @@ def process_codice_l2(file_path: Path) -> xr.Dataset:
|
|
|
63
73
|
"imap_codice_l2_lo-nsw-priority",
|
|
64
74
|
]:
|
|
65
75
|
# No changes needed. Just save to an L2 CDF file.
|
|
76
|
+
# TODO: May not even need L2 files for these products
|
|
66
77
|
pass
|
|
67
78
|
|
|
68
79
|
elif dataset_name == "imap_codice_l2_hi-direct-events":
|
|
@@ -117,6 +128,8 @@ def process_codice_l2(file_path: Path) -> xr.Dataset:
|
|
|
117
128
|
# Calculate the pickup ion sunward solar wind intensities using equation
|
|
118
129
|
# described in section 11.2.4 of algorithm document.
|
|
119
130
|
# Hopefully this can also apply to lo-ialirt
|
|
131
|
+
# TODO: WIP - needs to be completed
|
|
132
|
+
l2_dataset = process_lo_sw_species(l2_dataset, geometric_factors)
|
|
120
133
|
pass
|
|
121
134
|
|
|
122
135
|
elif dataset_name == "imap_codice_l2_lo-nsw-species":
|
|
@@ -132,14 +145,14 @@ def process_codice_l2(file_path: Path) -> xr.Dataset:
|
|
|
132
145
|
|
|
133
146
|
|
|
134
147
|
def add_dataset_attributes(
|
|
135
|
-
|
|
148
|
+
dataset: xr.Dataset, dataset_name: str, cdf_attrs: ImapCdfAttributes
|
|
136
149
|
) -> xr.Dataset:
|
|
137
150
|
"""
|
|
138
151
|
Add the global and variable attributes to the dataset.
|
|
139
152
|
|
|
140
153
|
Parameters
|
|
141
154
|
----------
|
|
142
|
-
|
|
155
|
+
dataset : xarray.Dataset
|
|
143
156
|
The dataset to update.
|
|
144
157
|
dataset_name : str
|
|
145
158
|
The name of the dataset.
|
|
@@ -155,12 +168,12 @@ def add_dataset_attributes(
|
|
|
155
168
|
cdf_attrs.add_instrument_variable_attrs("codice", "l2")
|
|
156
169
|
|
|
157
170
|
# Update the global attributes
|
|
158
|
-
|
|
171
|
+
dataset.attrs = cdf_attrs.get_global_attributes(dataset_name)
|
|
159
172
|
|
|
160
173
|
# Set the variable attributes
|
|
161
|
-
for variable_name in
|
|
174
|
+
for variable_name in dataset.data_vars.keys():
|
|
162
175
|
try:
|
|
163
|
-
|
|
176
|
+
dataset[variable_name].attrs = cdf_attrs.get_variable_attributes(
|
|
164
177
|
variable_name, check_schema=False
|
|
165
178
|
)
|
|
166
179
|
except KeyError:
|
|
@@ -169,7 +182,7 @@ def add_dataset_attributes(
|
|
|
169
182
|
descriptor = dataset_name.split("imap_codice_l2_")[-1]
|
|
170
183
|
cdf_attrs_key = f"{descriptor}-{variable_name}"
|
|
171
184
|
try:
|
|
172
|
-
|
|
185
|
+
dataset[variable_name].attrs = cdf_attrs.get_variable_attributes(
|
|
173
186
|
f"{cdf_attrs_key}", check_schema=False
|
|
174
187
|
)
|
|
175
188
|
except KeyError:
|
|
@@ -177,4 +190,89 @@ def add_dataset_attributes(
|
|
|
177
190
|
f"Field '{variable_name}' and '{cdf_attrs_key}' not found in "
|
|
178
191
|
f"attribute manager."
|
|
179
192
|
)
|
|
180
|
-
return
|
|
193
|
+
return dataset
|
|
194
|
+
|
|
195
|
+
|
|
196
|
+
def compute_geometric_factors(dataset: xr.Dataset) -> np.ndarray:
|
|
197
|
+
"""
|
|
198
|
+
Calculate geometric factors needed for intensity calculations.
|
|
199
|
+
|
|
200
|
+
Geometric factors are determined by comparing the half-spin values per
|
|
201
|
+
esa_step in the HALF_SPIN_LUT to the rgfo_half_spin values in the provided
|
|
202
|
+
L2 dataset.
|
|
203
|
+
|
|
204
|
+
If the half-spin value is less than the corresponding rgfo_half_spin value,
|
|
205
|
+
the geometric factor is set to 0.75 (full mode); otherwise, it is set to 0.5
|
|
206
|
+
(reduced mode).
|
|
207
|
+
|
|
208
|
+
NOTE: Half spin values are associated with ESA steps which corresponds to the
|
|
209
|
+
index of the energy_per_charge dimension that is between 0 and 127.
|
|
210
|
+
|
|
211
|
+
Parameters
|
|
212
|
+
----------
|
|
213
|
+
dataset : xarray.Dataset
|
|
214
|
+
The L2 dataset containing rgfo_half_spin data variable.
|
|
215
|
+
|
|
216
|
+
Returns
|
|
217
|
+
-------
|
|
218
|
+
geometric_factors : np.ndarray
|
|
219
|
+
A 2D array of geometric factors with shape (epoch, esa_steps).
|
|
220
|
+
"""
|
|
221
|
+
# Convert the HALF_SPIN_LUT to a reverse mapping of esa_step to half_spin
|
|
222
|
+
esa_step_to_half_spin_map = {
|
|
223
|
+
val: key for key, vals in HALF_SPIN_LUT.items() for val in vals
|
|
224
|
+
}
|
|
225
|
+
|
|
226
|
+
# Create a list of half_spin values corresponding to ESA steps (0 to 127)
|
|
227
|
+
half_spin_values = np.array(
|
|
228
|
+
[esa_step_to_half_spin_map[step] for step in range(128)]
|
|
229
|
+
)
|
|
230
|
+
|
|
231
|
+
# Expand dimensions to compare each rgfo_half_spin value against
|
|
232
|
+
# all half_spin_values
|
|
233
|
+
rgfo_half_spin = dataset.rgfo_half_spin.data[:, np.newaxis] # Shape: (epoch, 1)
|
|
234
|
+
|
|
235
|
+
# Perform the comparison and calculate geometric factors
|
|
236
|
+
geometric_factors = np.where(half_spin_values < rgfo_half_spin, 0.75, 0.5)
|
|
237
|
+
|
|
238
|
+
return geometric_factors
|
|
239
|
+
|
|
240
|
+
|
|
241
|
+
def process_lo_sw_species(
|
|
242
|
+
dataset: xr.Dataset, geometric_factors: np.ndarray
|
|
243
|
+
) -> xr.Dataset:
|
|
244
|
+
"""
|
|
245
|
+
Process the lo-sw-species L2 dataset to calculate species intensities.
|
|
246
|
+
|
|
247
|
+
Parameters
|
|
248
|
+
----------
|
|
249
|
+
dataset : xarray.Dataset
|
|
250
|
+
The L2 dataset to process.
|
|
251
|
+
geometric_factors : np.ndarray
|
|
252
|
+
The geometric factors array with shape (epoch, esa_steps).
|
|
253
|
+
|
|
254
|
+
Returns
|
|
255
|
+
-------
|
|
256
|
+
xarray.Dataset
|
|
257
|
+
The updated L2 dataset with species intensities calculated.
|
|
258
|
+
"""
|
|
259
|
+
# TODO: WIP - implement intensity calculations
|
|
260
|
+
# valid_solar_wind_vars = [
|
|
261
|
+
# "hplus",
|
|
262
|
+
# "heplusplus",
|
|
263
|
+
# "cplus4",
|
|
264
|
+
# "cplus5",
|
|
265
|
+
# "cplus6",
|
|
266
|
+
# "oplus5",
|
|
267
|
+
# "oplus6",
|
|
268
|
+
# "oplus7",
|
|
269
|
+
# "oplus8",
|
|
270
|
+
# "ne",
|
|
271
|
+
# "mg",
|
|
272
|
+
# "si",
|
|
273
|
+
# "fe_loq",
|
|
274
|
+
# "fe_hiq",
|
|
275
|
+
# ]
|
|
276
|
+
# valid_pick_up_ion_vars = ["heplus", "cnoplus"]
|
|
277
|
+
|
|
278
|
+
return dataset
|
|
@@ -60,6 +60,7 @@ CODICEAPID_MAPPING = {
|
|
|
60
60
|
# Numerical constants
|
|
61
61
|
SPIN_PERIOD_CONVERSION = 0.00032
|
|
62
62
|
K_FACTOR = 5.76 # This is used to convert voltages to energies in L2
|
|
63
|
+
HI_ACQUISITION_TIME = 0.59916
|
|
63
64
|
|
|
64
65
|
# CDF variable names used for lo data products
|
|
65
66
|
LO_COUNTERS_SINGLES_VARIABLE_NAMES = ["apd_singles"]
|
|
@@ -172,8 +173,6 @@ CODICE_HI_IAL_DATA_FIELDS = ["h"]
|
|
|
172
173
|
|
|
173
174
|
# lo- and hi-counters-aggregated data product variables are dynamically
|
|
174
175
|
# determined based on the number of active counters
|
|
175
|
-
# TODO: Try to convince Joey to move to lower case variable names with
|
|
176
|
-
# underscores?
|
|
177
176
|
LO_COUNTERS_AGGREGATED_ACTIVE_VARIABLES = {
|
|
178
177
|
"tcr": True,
|
|
179
178
|
"dcr": True,
|
|
@@ -438,7 +437,7 @@ DATA_PRODUCT_CONFIGURATIONS: dict[CODICEAPID | int, dict] = {
|
|
|
438
437
|
"instrument": "hi",
|
|
439
438
|
"num_counters": len(
|
|
440
439
|
HI_COUNTERS_AGGREGATED_VARIABLE_NAMES
|
|
441
|
-
), # The number of counters depends on the number of active counters
|
|
440
|
+
), # The number of counters depends on the number of *active* counters
|
|
442
441
|
"support_variables": ["data_quality", "spin_period"],
|
|
443
442
|
"variable_names": HI_COUNTERS_AGGREGATED_VARIABLE_NAMES,
|
|
444
443
|
},
|
|
@@ -527,7 +526,7 @@ DATA_PRODUCT_CONFIGURATIONS: dict[CODICEAPID | int, dict] = {
|
|
|
527
526
|
"instrument": "lo",
|
|
528
527
|
"num_counters": len(
|
|
529
528
|
LO_COUNTERS_AGGREGATED_VARIABLE_NAMES
|
|
530
|
-
), # The number of counters depends on the number of active counters
|
|
529
|
+
), # The number of counters depends on the number of *active* counters
|
|
531
530
|
"support_variables": [
|
|
532
531
|
"energy_table",
|
|
533
532
|
"acquisition_time_per_step",
|
|
@@ -689,9 +688,9 @@ L1B_DATA_PRODUCT_CONFIGURATIONS: dict[str, dict] = {
|
|
|
689
688
|
"num_spin_sectors": 24,
|
|
690
689
|
"num_spins": 4,
|
|
691
690
|
},
|
|
692
|
-
"hi-priority": {
|
|
693
|
-
"num_spin_sectors":
|
|
694
|
-
"num_spins":
|
|
691
|
+
"hi-priority": {
|
|
692
|
+
"num_spin_sectors": 24,
|
|
693
|
+
"num_spins": 16,
|
|
695
694
|
},
|
|
696
695
|
"hi-sectored": {
|
|
697
696
|
"num_spin_sectors": 2,
|
|
@@ -849,7 +848,7 @@ DE_DATA_PRODUCT_CONFIGURATIONS: dict[Any, dict[str, Any]] = {
|
|
|
849
848
|
}
|
|
850
849
|
|
|
851
850
|
# Define the packet fields needed to be stored in segmented data and their
|
|
852
|
-
# corresponding bit lengths for
|
|
851
|
+
# corresponding bit lengths for I-ALiRT data products
|
|
853
852
|
IAL_BIT_STRUCTURE = {
|
|
854
853
|
"SHCOARSE": 32,
|
|
855
854
|
"PACKET_VERSION": 16,
|
|
@@ -1657,6 +1656,8 @@ PIXEL_ORIENTATIONS = {
|
|
|
1657
1656
|
# processing. These are taken from the "Acq Time" column in the "Lo Stepping"
|
|
1658
1657
|
# tab of the "*-SCI-LUT-*.xml" spreadsheet that largely defines CoDICE
|
|
1659
1658
|
# processing.
|
|
1659
|
+
# TODO: Do away with this lookup table and instead calculate the acquisition
|
|
1660
|
+
# times. See GitHub issue #1945.
|
|
1660
1661
|
ACQUISITION_TIMES = {
|
|
1661
1662
|
0: [
|
|
1662
1663
|
578.70833333,
|
|
@@ -2179,3 +2180,44 @@ ACQUISITION_TIMES = {
|
|
|
2179
2180
|
96.45138889,
|
|
2180
2181
|
],
|
|
2181
2182
|
}
|
|
2183
|
+
|
|
2184
|
+
# TODO: Update EFFICIENCY value when better information is available.
|
|
2185
|
+
# Constant for CoDICE Intensity calculations.
|
|
2186
|
+
EFFICIENCY = 1
|
|
2187
|
+
|
|
2188
|
+
# Lookup table for mapping half-spin (keys) to esa steps (values)
|
|
2189
|
+
# This is used to determine geometry factors L2
|
|
2190
|
+
HALF_SPIN_LUT = {
|
|
2191
|
+
0: [0],
|
|
2192
|
+
1: [1],
|
|
2193
|
+
2: [2],
|
|
2194
|
+
3: [3],
|
|
2195
|
+
4: [4, 5],
|
|
2196
|
+
5: [6, 7],
|
|
2197
|
+
6: [8, 9],
|
|
2198
|
+
7: [10, 11],
|
|
2199
|
+
8: [12, 13, 14],
|
|
2200
|
+
9: [15, 16, 17],
|
|
2201
|
+
10: [18, 19, 20],
|
|
2202
|
+
11: [21, 22, 23],
|
|
2203
|
+
12: [24, 25, 26, 27],
|
|
2204
|
+
13: [28, 29, 30, 31],
|
|
2205
|
+
14: [32, 33, 34, 35],
|
|
2206
|
+
15: [36, 37, 38, 39],
|
|
2207
|
+
16: [40, 41, 42, 43, 44],
|
|
2208
|
+
17: [45, 46, 47, 48, 49],
|
|
2209
|
+
18: [50, 51, 52, 53, 54],
|
|
2210
|
+
19: [55, 56, 57, 58, 59],
|
|
2211
|
+
20: [60, 61, 62, 63, 64],
|
|
2212
|
+
21: [65, 66, 67, 68, 69],
|
|
2213
|
+
22: [70, 71, 72, 73, 74],
|
|
2214
|
+
23: [75, 76, 77, 78, 79],
|
|
2215
|
+
24: [80, 81, 82, 83, 84, 85],
|
|
2216
|
+
25: [86, 87, 88, 89, 90, 91],
|
|
2217
|
+
26: [92, 93, 94, 95, 96, 97],
|
|
2218
|
+
27: [98, 99, 100, 101, 102, 103],
|
|
2219
|
+
28: [104, 105, 106, 107, 108, 109],
|
|
2220
|
+
29: [110, 111, 112, 113, 114, 115],
|
|
2221
|
+
30: [116, 117, 118, 119, 120, 121],
|
|
2222
|
+
31: [122, 123, 124, 125, 126, 127],
|
|
2223
|
+
}
|