imap-processing 1.0.1__py3-none-any.whl → 1.0.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- imap_processing/_version.py +2 -2
- imap_processing/cdf/config/imap_codice_l1a_variable_attrs.yaml +97 -254
- imap_processing/cdf/config/imap_enamaps_l2-common_variable_attrs.yaml +1 -1
- imap_processing/cdf/config/imap_swapi_variable_attrs.yaml +2 -13
- imap_processing/cdf/utils.py +2 -2
- imap_processing/cli.py +4 -16
- imap_processing/codice/codice_l1a_lo_angular.py +362 -0
- imap_processing/codice/codice_l1a_lo_species.py +282 -0
- imap_processing/codice/codice_l1b.py +62 -97
- imap_processing/codice/codice_l2.py +210 -96
- imap_processing/codice/codice_new_l1a.py +64 -0
- imap_processing/codice/constants.py +37 -2
- imap_processing/codice/utils.py +270 -0
- imap_processing/ena_maps/ena_maps.py +50 -39
- imap_processing/ena_maps/utils/corrections.py +196 -14
- imap_processing/ena_maps/utils/naming.py +3 -1
- imap_processing/hi/hi_l1c.py +34 -12
- imap_processing/hi/hi_l2.py +79 -36
- imap_processing/ialirt/generate_coverage.py +3 -1
- imap_processing/ialirt/l0/parse_mag.py +1 -0
- imap_processing/ialirt/l0/process_hit.py +1 -0
- imap_processing/ialirt/l0/process_swapi.py +1 -0
- imap_processing/ialirt/l0/process_swe.py +2 -0
- imap_processing/ialirt/process_ephemeris.py +6 -2
- imap_processing/ialirt/utils/create_xarray.py +3 -2
- imap_processing/lo/l1c/lo_l1c.py +1 -1
- imap_processing/lo/l2/lo_l2.py +6 -4
- imap_processing/quality_flags.py +1 -0
- imap_processing/swapi/constants.py +4 -0
- imap_processing/swapi/l1/swapi_l1.py +47 -20
- imap_processing/swapi/l2/swapi_l2.py +17 -3
- imap_processing/ultra/l1a/ultra_l1a.py +121 -72
- imap_processing/ultra/l1b/de.py +57 -1
- imap_processing/ultra/l1b/ultra_l1b_annotated.py +0 -1
- imap_processing/ultra/l1b/ultra_l1b_extended.py +24 -11
- imap_processing/ultra/l1c/helio_pset.py +28 -5
- imap_processing/ultra/l1c/l1c_lookup_utils.py +4 -2
- imap_processing/ultra/l1c/spacecraft_pset.py +9 -5
- imap_processing/ultra/l1c/ultra_l1c.py +6 -6
- imap_processing/ultra/l1c/ultra_l1c_pset_bins.py +82 -20
- imap_processing/ultra/l2/ultra_l2.py +2 -2
- {imap_processing-1.0.1.dist-info → imap_processing-1.0.2.dist-info}/METADATA +1 -1
- {imap_processing-1.0.1.dist-info → imap_processing-1.0.2.dist-info}/RECORD +46 -42
- {imap_processing-1.0.1.dist-info → imap_processing-1.0.2.dist-info}/LICENSE +0 -0
- {imap_processing-1.0.1.dist-info → imap_processing-1.0.2.dist-info}/WHEEL +0 -0
- {imap_processing-1.0.1.dist-info → imap_processing-1.0.2.dist-info}/entry_points.txt +0 -0
|
@@ -15,20 +15,15 @@ from pathlib import Path
|
|
|
15
15
|
import numpy as np
|
|
16
16
|
import xarray as xr
|
|
17
17
|
|
|
18
|
-
from imap_processing import imap_module_directory
|
|
19
18
|
from imap_processing.cdf.imap_cdf_manager import ImapCdfAttributes
|
|
20
19
|
from imap_processing.cdf.utils import load_cdf
|
|
21
20
|
from imap_processing.codice import constants
|
|
22
|
-
from imap_processing.codice.utils import CODICEAPID
|
|
23
|
-
from imap_processing.utils import packet_file_to_datasets
|
|
24
21
|
|
|
25
22
|
logger = logging.getLogger(__name__)
|
|
26
23
|
logger.setLevel(logging.INFO)
|
|
27
24
|
|
|
28
25
|
|
|
29
|
-
def convert_to_rates(
|
|
30
|
-
dataset: xr.Dataset, descriptor: str, variable_name: str
|
|
31
|
-
) -> np.ndarray:
|
|
26
|
+
def convert_to_rates(dataset: xr.Dataset, descriptor: str) -> np.ndarray:
|
|
32
27
|
"""
|
|
33
28
|
Apply a conversion from counts to rates.
|
|
34
29
|
|
|
@@ -41,14 +36,17 @@ def convert_to_rates(
|
|
|
41
36
|
The L1b dataset containing the data to convert.
|
|
42
37
|
descriptor : str
|
|
43
38
|
The descriptor of the data product of interest.
|
|
44
|
-
variable_name : str
|
|
45
|
-
The variable name to apply the conversion to.
|
|
46
39
|
|
|
47
40
|
Returns
|
|
48
41
|
-------
|
|
49
42
|
rates_data : np.ndarray
|
|
50
43
|
The converted data array.
|
|
51
44
|
"""
|
|
45
|
+
# Variables to convert based on descriptor
|
|
46
|
+
variables_to_convert = getattr(
|
|
47
|
+
constants, f"{descriptor.upper().replace('-', '_')}_VARIABLE_NAMES"
|
|
48
|
+
)
|
|
49
|
+
|
|
52
50
|
if descriptor in [
|
|
53
51
|
"lo-counters-aggregated",
|
|
54
52
|
"lo-counters-singles",
|
|
@@ -58,41 +56,49 @@ def convert_to_rates(
|
|
|
58
56
|
"lo-sw-priority",
|
|
59
57
|
"lo-ialirt",
|
|
60
58
|
]:
|
|
61
|
-
#
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
# time data array to match the data variable shape
|
|
65
|
-
dims = [1] * dataset[variable_name].data.ndim
|
|
66
|
-
dims[1] = 128
|
|
67
|
-
acq_times = dataset.acquisition_time_per_step.data.reshape(dims) # (128)
|
|
68
|
-
# Now perform the calculation
|
|
69
|
-
rates_data = dataset[variable_name].data / (
|
|
70
|
-
acq_times
|
|
71
|
-
* 1e-3 # Converting from milliseconds to seconds
|
|
59
|
+
# Denominator to convert counts to rates
|
|
60
|
+
denominator = (
|
|
61
|
+
dataset.acquisition_time_per_step
|
|
72
62
|
* constants.L1B_DATA_PRODUCT_CONFIGURATIONS[descriptor]["num_spin_sectors"]
|
|
73
63
|
)
|
|
64
|
+
|
|
65
|
+
# Do not carry these variable attributes from L1a to L1b for above products
|
|
66
|
+
drop_variables = [
|
|
67
|
+
"k_factor",
|
|
68
|
+
"nso_half_spin",
|
|
69
|
+
"sw_bias_gain_mode",
|
|
70
|
+
"st_bias_gain_mode",
|
|
71
|
+
"spin_period",
|
|
72
|
+
]
|
|
73
|
+
dataset = dataset.drop_vars(drop_variables)
|
|
74
74
|
elif descriptor in [
|
|
75
75
|
"lo-nsw-species",
|
|
76
76
|
"lo-sw-species",
|
|
77
77
|
]:
|
|
78
|
-
#
|
|
79
|
-
#
|
|
80
|
-
#
|
|
81
|
-
# time data array to match the data variable shape (epoch, esa_step, sector)
|
|
82
|
-
dims = [1] * dataset[variable_name].data.ndim
|
|
83
|
-
dims[1] = 128
|
|
84
|
-
acq_times = dataset.acquisition_time_per_step.data.reshape(dims) # (128)
|
|
85
|
-
# acquisition time have an array of shape (128,). We match n_sector to that.
|
|
78
|
+
# Create n_sector with 'esa_step' dimension. This is done by xr.full_like
|
|
79
|
+
# with input dataset.acquisition_time_per_step. This ensures that the resulting
|
|
80
|
+
# n_sector has the same dimensions as acquisition_time_per_step.
|
|
86
81
|
# Per CoDICE, fill first 127 with default value of 12. Then fill last with 11.
|
|
87
|
-
n_sector =
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
# Now perform the calculation
|
|
91
|
-
rates_data = dataset[variable_name].data / (
|
|
92
|
-
acq_times
|
|
93
|
-
* 1e-3 # Converting from milliseconds to seconds
|
|
94
|
-
* n_sector[:, np.newaxis] # Spin sectors
|
|
82
|
+
n_sector = xr.full_like(
|
|
83
|
+
dataset.acquisition_time_per_step, 12.0, dtype=np.float64
|
|
95
84
|
)
|
|
85
|
+
n_sector[-1] = 11.0
|
|
86
|
+
|
|
87
|
+
# Denominator to convert counts to rates
|
|
88
|
+
denominator = dataset.acquisition_time_per_step * n_sector
|
|
89
|
+
|
|
90
|
+
# Do not carry these variable attributes from L1a to L1b for above products
|
|
91
|
+
drop_variables = [
|
|
92
|
+
"k_factor",
|
|
93
|
+
"nso_half_spin",
|
|
94
|
+
"sw_bias_gain_mode",
|
|
95
|
+
"st_bias_gain_mode",
|
|
96
|
+
"spin_period",
|
|
97
|
+
"voltage_table",
|
|
98
|
+
"acquisition_time_per_step",
|
|
99
|
+
]
|
|
100
|
+
dataset = dataset.drop_vars(drop_variables)
|
|
101
|
+
|
|
96
102
|
elif descriptor in [
|
|
97
103
|
"hi-counters-aggregated",
|
|
98
104
|
"hi-counters-singles",
|
|
@@ -101,15 +107,27 @@ def convert_to_rates(
|
|
|
101
107
|
"hi-sectored",
|
|
102
108
|
"hi-ialirt",
|
|
103
109
|
]:
|
|
104
|
-
#
|
|
105
|
-
|
|
106
|
-
rates_data = dataset[variable_name].data / (
|
|
110
|
+
# Denominator to convert counts to rates
|
|
111
|
+
denominator = (
|
|
107
112
|
constants.L1B_DATA_PRODUCT_CONFIGURATIONS[descriptor]["num_spin_sectors"]
|
|
108
113
|
* constants.L1B_DATA_PRODUCT_CONFIGURATIONS[descriptor]["num_spins"]
|
|
109
114
|
* constants.HI_ACQUISITION_TIME
|
|
110
115
|
)
|
|
111
116
|
|
|
112
|
-
|
|
117
|
+
# For each variable, convert counts and uncertainty to rates
|
|
118
|
+
for variable in variables_to_convert:
|
|
119
|
+
dataset[variable].data = dataset[variable].astype(np.float64) / denominator
|
|
120
|
+
# Carry over attrs and update as needed
|
|
121
|
+
dataset[variable].attrs["UNITS"] = "counts/s"
|
|
122
|
+
|
|
123
|
+
# Uncertainty calculation
|
|
124
|
+
unc_variable = f"unc_{variable}"
|
|
125
|
+
dataset[unc_variable].data = (
|
|
126
|
+
dataset[unc_variable].astype(np.float64) / denominator
|
|
127
|
+
)
|
|
128
|
+
dataset[unc_variable].attrs["UNITS"] = "1/s"
|
|
129
|
+
|
|
130
|
+
return dataset
|
|
113
131
|
|
|
114
132
|
|
|
115
133
|
def process_codice_l1b(file_path: Path) -> xr.Dataset:
|
|
@@ -136,70 +154,17 @@ def process_codice_l1b(file_path: Path) -> xr.Dataset:
|
|
|
136
154
|
dataset_name = l1a_dataset.attrs["Logical_source"].replace("_l1a_", "_l1b_")
|
|
137
155
|
descriptor = dataset_name.removeprefix("imap_codice_l1b_")
|
|
138
156
|
|
|
139
|
-
# Direct event data products do not have a level L1B
|
|
140
|
-
if descriptor in ["lo-direct-events", "hi-direct-events"]:
|
|
141
|
-
logger.warning("Encountered direct event data product. Skipping L1b processing")
|
|
142
|
-
return None
|
|
143
|
-
|
|
144
157
|
# Get the L1b CDF attributes
|
|
145
158
|
cdf_attrs = ImapCdfAttributes()
|
|
146
159
|
cdf_attrs.add_instrument_global_attrs("codice")
|
|
147
160
|
cdf_attrs.add_instrument_variable_attrs("codice", "l1b")
|
|
148
161
|
|
|
149
162
|
# Use the L1a data product as a starting point for L1b
|
|
150
|
-
l1b_dataset = l1a_dataset.copy()
|
|
163
|
+
l1b_dataset = l1a_dataset.copy(deep=True)
|
|
151
164
|
|
|
152
165
|
# Update the global attributes
|
|
153
166
|
l1b_dataset.attrs = cdf_attrs.get_global_attributes(dataset_name)
|
|
154
|
-
|
|
155
|
-
|
|
156
|
-
|
|
157
|
-
|
|
158
|
-
xtce_packet_definition = Path(
|
|
159
|
-
f"{imap_module_directory}/codice/packet_definitions/{xtce_filename}"
|
|
160
|
-
)
|
|
161
|
-
packet_file = (
|
|
162
|
-
imap_module_directory
|
|
163
|
-
/ "tests"
|
|
164
|
-
/ "codice"
|
|
165
|
-
/ "data"
|
|
166
|
-
/ "imap_codice_l0_raw_20241110_v001.pkts"
|
|
167
|
-
)
|
|
168
|
-
datasets: dict[int, xr.Dataset] = packet_file_to_datasets(
|
|
169
|
-
packet_file, xtce_packet_definition, use_derived_value=True
|
|
170
|
-
)
|
|
171
|
-
l1b_dataset = datasets[CODICEAPID.COD_NHK]
|
|
172
|
-
|
|
173
|
-
# TODO: Drop the same variables as we do in L1a? (see line 1103 in
|
|
174
|
-
# codice_l1a.py
|
|
175
|
-
|
|
176
|
-
else:
|
|
177
|
-
variables_to_convert = getattr(
|
|
178
|
-
constants, f"{descriptor.upper().replace('-', '_')}_VARIABLE_NAMES"
|
|
179
|
-
)
|
|
180
|
-
|
|
181
|
-
# Apply the conversion to rates
|
|
182
|
-
for variable_name in variables_to_convert:
|
|
183
|
-
l1b_dataset[variable_name].data = convert_to_rates(
|
|
184
|
-
l1b_dataset, descriptor, variable_name
|
|
185
|
-
)
|
|
186
|
-
# Set the variable attributes
|
|
187
|
-
cdf_attrs_key = f"{descriptor}-{variable_name}"
|
|
188
|
-
l1b_dataset[variable_name].attrs = cdf_attrs.get_variable_attributes(
|
|
189
|
-
cdf_attrs_key, check_schema=False
|
|
190
|
-
)
|
|
191
|
-
|
|
192
|
-
if descriptor in ["lo-sw-species", "lo-nsw-species"]:
|
|
193
|
-
# Do not carry these variable attributes from L1a to L1b
|
|
194
|
-
drop_variables = [
|
|
195
|
-
"k_factor",
|
|
196
|
-
"nso_half_spin",
|
|
197
|
-
"sw_bias_gain_mode",
|
|
198
|
-
"st_bias_gain_mode",
|
|
199
|
-
"spin_period",
|
|
200
|
-
]
|
|
201
|
-
l1b_dataset = l1b_dataset.drop_vars(drop_variables)
|
|
202
|
-
|
|
203
|
-
logger.info(f"\nFinal data product:\n{l1b_dataset}\n")
|
|
204
|
-
|
|
205
|
-
return l1b_dataset
|
|
167
|
+
return convert_to_rates(
|
|
168
|
+
l1b_dataset,
|
|
169
|
+
descriptor,
|
|
170
|
+
)
|
|
@@ -26,11 +26,15 @@ from imap_processing.codice.constants import (
|
|
|
26
26
|
L2_GEOMETRIC_FACTOR,
|
|
27
27
|
L2_HI_NUMBER_OF_SSD,
|
|
28
28
|
L2_HI_SECTORED_ANGLE,
|
|
29
|
+
LO_NSW_ANGULAR_VARIABLE_NAMES,
|
|
29
30
|
LO_NSW_SPECIES_VARIABLE_NAMES,
|
|
31
|
+
LO_POSITION_TO_ELEVATION_ANGLE,
|
|
32
|
+
LO_SW_ANGULAR_VARIABLE_NAMES,
|
|
30
33
|
LO_SW_PICKUP_ION_SPECIES_VARIABLE_NAMES,
|
|
31
34
|
LO_SW_SPECIES_VARIABLE_NAMES,
|
|
32
35
|
NSW_POSITIONS,
|
|
33
36
|
PUI_POSITIONS,
|
|
37
|
+
SOLAR_WIND_POSITIONS,
|
|
34
38
|
SW_POSITIONS,
|
|
35
39
|
)
|
|
36
40
|
|
|
@@ -94,7 +98,7 @@ def get_efficiency_lut(dependencies: ProcessingInputCollection) -> pd.DataFrame:
|
|
|
94
98
|
return pd.read_csv(dependencies.get_file_paths(descriptor="l2-lo-efficiency")[0])
|
|
95
99
|
|
|
96
100
|
|
|
97
|
-
def get_species_efficiency(species: str, efficiency: pd.DataFrame) ->
|
|
101
|
+
def get_species_efficiency(species: str, efficiency: pd.DataFrame) -> xr.DataArray:
|
|
98
102
|
"""
|
|
99
103
|
Get the efficiency values for a given species.
|
|
100
104
|
|
|
@@ -107,7 +111,7 @@ def get_species_efficiency(species: str, efficiency: pd.DataFrame) -> np.ndarray
|
|
|
107
111
|
|
|
108
112
|
Returns
|
|
109
113
|
-------
|
|
110
|
-
efficiency :
|
|
114
|
+
efficiency : xarray.DataArray
|
|
111
115
|
A 2D array of efficiencies with shape (epoch, esa_steps).
|
|
112
116
|
"""
|
|
113
117
|
species_efficiency = efficiency[efficiency["species"] == species].sort_values(
|
|
@@ -118,13 +122,16 @@ def get_species_efficiency(species: str, efficiency: pd.DataFrame) -> np.ndarray
|
|
|
118
122
|
[col for col in species_efficiency if col.startswith("position")],
|
|
119
123
|
key=lambda x: int(x.split("_")[-1]),
|
|
120
124
|
)
|
|
121
|
-
# Shape: (
|
|
122
|
-
return
|
|
125
|
+
# Shape: (energy_table, inst_az)
|
|
126
|
+
return xr.DataArray(
|
|
127
|
+
species_efficiency[position_names_sorted].to_numpy(),
|
|
128
|
+
dims=("energy_table", "inst_az"),
|
|
129
|
+
)
|
|
123
130
|
|
|
124
131
|
|
|
125
132
|
def compute_geometric_factors(
|
|
126
133
|
dataset: xr.Dataset, geometric_factor_lookup: dict
|
|
127
|
-
) ->
|
|
134
|
+
) -> xr.DataArray:
|
|
128
135
|
"""
|
|
129
136
|
Calculate geometric factors needed for intensity calculations.
|
|
130
137
|
|
|
@@ -148,7 +155,7 @@ def compute_geometric_factors(
|
|
|
148
155
|
|
|
149
156
|
Returns
|
|
150
157
|
-------
|
|
151
|
-
geometric_factors :
|
|
158
|
+
geometric_factors : xarray.DataArray
|
|
152
159
|
A 3D array of geometric factors with shape (epoch, esa_steps, positions).
|
|
153
160
|
"""
|
|
154
161
|
# Convert the HALF_SPIN_LUT to a reverse mapping of esa_step to half_spin
|
|
@@ -170,22 +177,26 @@ def compute_geometric_factors(
|
|
|
170
177
|
|
|
171
178
|
# Get the geometric factors based on the modes
|
|
172
179
|
gf = np.where(
|
|
173
|
-
modes[:, :, np.newaxis], # Shape (epoch,
|
|
174
|
-
geometric_factor_lookup[
|
|
175
|
-
|
|
176
|
-
|
|
177
|
-
|
|
180
|
+
modes[:, :, np.newaxis], # Shape (epoch, energy_table, 1)
|
|
181
|
+
geometric_factor_lookup[
|
|
182
|
+
"reduced"
|
|
183
|
+
], # Shape (1, energy_table, 24) - reduced mode
|
|
184
|
+
geometric_factor_lookup["full"], # Shape (1, energy_table, 24) - full mode
|
|
185
|
+
) # Shape: (epoch, energy_table, inst_az)
|
|
178
186
|
|
|
187
|
+
return xr.DataArray(gf, dims=("epoch", "energy_table", "inst_az"))
|
|
179
188
|
|
|
180
|
-
|
|
189
|
+
|
|
190
|
+
def calculate_intensity(
|
|
181
191
|
dataset: xr.Dataset,
|
|
182
192
|
species_list: list,
|
|
183
|
-
geometric_factors:
|
|
193
|
+
geometric_factors: xr.DataArray,
|
|
184
194
|
efficiency: pd.DataFrame,
|
|
185
195
|
positions: list,
|
|
196
|
+
average_across_positions: bool = False,
|
|
186
197
|
) -> xr.Dataset:
|
|
187
198
|
"""
|
|
188
|
-
|
|
199
|
+
Calculate species or angular intensities.
|
|
189
200
|
|
|
190
201
|
Parameters
|
|
191
202
|
----------
|
|
@@ -200,6 +211,9 @@ def process_lo_species_intensity(
|
|
|
200
211
|
positions : list
|
|
201
212
|
A list of position indices to select from the geometric factor and
|
|
202
213
|
efficiency lookup tables.
|
|
214
|
+
average_across_positions : bool
|
|
215
|
+
Whether to average the efficiencies and geometric factors across the selected
|
|
216
|
+
positions. Default is False.
|
|
203
217
|
|
|
204
218
|
Returns
|
|
205
219
|
-------
|
|
@@ -207,37 +221,173 @@ def process_lo_species_intensity(
|
|
|
207
221
|
The updated L2 dataset with species intensities calculated.
|
|
208
222
|
"""
|
|
209
223
|
# Select the relevant positions from the geometric factors
|
|
210
|
-
geometric_factors = geometric_factors
|
|
211
|
-
|
|
212
|
-
|
|
213
|
-
|
|
214
|
-
|
|
215
|
-
|
|
224
|
+
geometric_factors = geometric_factors.isel(inst_az=positions)
|
|
225
|
+
if average_across_positions:
|
|
226
|
+
# take the mean geometric factor across positions
|
|
227
|
+
geometric_factors = geometric_factors.mean(dim="inst_az")
|
|
228
|
+
scalar = len(positions)
|
|
229
|
+
else:
|
|
230
|
+
scalar = 1
|
|
231
|
+
# Calculate the angular intensities using the provided geometric factors and
|
|
232
|
+
# efficiency.
|
|
233
|
+
# intensity = species_rate / (gm * eff * esa_step) for position and spin angle
|
|
216
234
|
for species in species_list:
|
|
217
235
|
# Select the relevant positions for the species from the efficiency LUT
|
|
218
|
-
# Shape: (epoch,
|
|
219
|
-
species_eff = get_species_efficiency(species, efficiency)
|
|
220
|
-
|
|
221
|
-
|
|
236
|
+
# Shape: (epoch, energy_table, inst_az)
|
|
237
|
+
species_eff = get_species_efficiency(species, efficiency).isel(
|
|
238
|
+
inst_az=positions
|
|
239
|
+
)
|
|
222
240
|
if species_eff.size == 0:
|
|
223
|
-
logger.warning("No efficiency data found for species {species}. Skipping.")
|
|
241
|
+
logger.warning(f"No efficiency data found for species {species}. Skipping.")
|
|
224
242
|
continue
|
|
225
|
-
|
|
226
|
-
|
|
227
|
-
|
|
228
|
-
|
|
229
|
-
|
|
243
|
+
|
|
244
|
+
if average_across_positions:
|
|
245
|
+
# Take the mean efficiency across positions
|
|
246
|
+
species_eff = species_eff.mean(dim="inst_az")
|
|
247
|
+
|
|
248
|
+
# Shape: (epoch, energy_table, inst_az) or
|
|
249
|
+
# (epoch, energy_table) if averaged
|
|
250
|
+
denominator = scalar * geometric_factors * species_eff * dataset["energy_table"]
|
|
230
251
|
if species not in dataset:
|
|
231
252
|
logger.warning(
|
|
232
253
|
f"Species {species} not found in dataset. Filling with NaNS."
|
|
233
254
|
)
|
|
234
255
|
dataset[species] = np.full(dataset["energy_table"].data.shape, np.nan)
|
|
235
256
|
else:
|
|
236
|
-
dataset[species] = dataset[species] / denominator
|
|
257
|
+
dataset[species] = dataset[species] / denominator
|
|
258
|
+
|
|
259
|
+
# Also calculate uncertainty if available
|
|
260
|
+
species_uncertainty = f"unc_{species}"
|
|
261
|
+
if species_uncertainty not in dataset:
|
|
262
|
+
logger.warning(
|
|
263
|
+
f"Uncertainty {species_uncertainty} not found in dataset."
|
|
264
|
+
f" Filling with NaNS."
|
|
265
|
+
)
|
|
266
|
+
dataset[species_uncertainty] = np.full(
|
|
267
|
+
dataset["energy_table"].data.shape, np.nan
|
|
268
|
+
)
|
|
269
|
+
else:
|
|
270
|
+
dataset[species_uncertainty] = dataset[species_uncertainty] / denominator
|
|
237
271
|
|
|
238
272
|
return dataset
|
|
239
273
|
|
|
240
274
|
|
|
275
|
+
def process_lo_species_intensity(
|
|
276
|
+
dataset: xr.Dataset,
|
|
277
|
+
species_list: list,
|
|
278
|
+
geometric_factors: xr.DataArray,
|
|
279
|
+
efficiency: pd.DataFrame,
|
|
280
|
+
positions: list,
|
|
281
|
+
) -> xr.Dataset:
|
|
282
|
+
"""
|
|
283
|
+
Process the lo-species L2 dataset to calculate species intensities.
|
|
284
|
+
|
|
285
|
+
Parameters
|
|
286
|
+
----------
|
|
287
|
+
dataset : xarray.Dataset
|
|
288
|
+
The L2 dataset to process.
|
|
289
|
+
species_list : list
|
|
290
|
+
List of species variable names to calculate intensity.
|
|
291
|
+
geometric_factors : xarray.DataArray
|
|
292
|
+
The geometric factors array with shape (epoch, esa_steps).
|
|
293
|
+
efficiency : pandas.DataFrame
|
|
294
|
+
The efficiency lookup table.
|
|
295
|
+
positions : list
|
|
296
|
+
A list of position indices to select from the geometric factor and
|
|
297
|
+
efficiency lookup tables.
|
|
298
|
+
|
|
299
|
+
Returns
|
|
300
|
+
-------
|
|
301
|
+
xarray.Dataset
|
|
302
|
+
The updated L2 dataset with species intensities calculated.
|
|
303
|
+
"""
|
|
304
|
+
# Calculate the species intensities using the provided geometric factors and
|
|
305
|
+
# efficiency.
|
|
306
|
+
dataset = calculate_intensity(
|
|
307
|
+
dataset,
|
|
308
|
+
species_list,
|
|
309
|
+
geometric_factors,
|
|
310
|
+
efficiency,
|
|
311
|
+
positions,
|
|
312
|
+
average_across_positions=True,
|
|
313
|
+
)
|
|
314
|
+
|
|
315
|
+
return dataset
|
|
316
|
+
|
|
317
|
+
|
|
318
|
+
def process_lo_angular_intensity(
|
|
319
|
+
dataset: xr.Dataset,
|
|
320
|
+
species_list: list,
|
|
321
|
+
geometric_factors: xr.DataArray,
|
|
322
|
+
efficiency: pd.DataFrame,
|
|
323
|
+
positions: list,
|
|
324
|
+
) -> xr.Dataset:
|
|
325
|
+
"""
|
|
326
|
+
Process the lo-species L2 dataset to calculate angular intensities.
|
|
327
|
+
|
|
328
|
+
Parameters
|
|
329
|
+
----------
|
|
330
|
+
dataset : xarray.Dataset
|
|
331
|
+
The L2 dataset to process.
|
|
332
|
+
species_list : list
|
|
333
|
+
List of species variable names to calculate intensity.
|
|
334
|
+
geometric_factors : xarray.DataArray
|
|
335
|
+
The geometric factors array with shape (epoch, esa_steps).
|
|
336
|
+
efficiency : pandas.DataFrame
|
|
337
|
+
The efficiency lookup table.
|
|
338
|
+
positions : list
|
|
339
|
+
A list of position indices to select from the geometric factor and
|
|
340
|
+
efficiency lookup tables.
|
|
341
|
+
|
|
342
|
+
Returns
|
|
343
|
+
-------
|
|
344
|
+
xarray.Dataset
|
|
345
|
+
The updated L2 dataset with angular intensities calculated.
|
|
346
|
+
"""
|
|
347
|
+
# Calculate the angular intensities using the provided geometric factors and
|
|
348
|
+
# efficiency.
|
|
349
|
+
dataset = calculate_intensity(
|
|
350
|
+
dataset,
|
|
351
|
+
species_list,
|
|
352
|
+
geometric_factors,
|
|
353
|
+
efficiency,
|
|
354
|
+
positions,
|
|
355
|
+
average_across_positions=False,
|
|
356
|
+
)
|
|
357
|
+
# transform positions to elevation angles
|
|
358
|
+
if positions == SW_POSITIONS:
|
|
359
|
+
pos_to_el = LO_POSITION_TO_ELEVATION_ANGLE["sw"]
|
|
360
|
+
elif positions == NSW_POSITIONS:
|
|
361
|
+
pos_to_el = LO_POSITION_TO_ELEVATION_ANGLE["nsw"]
|
|
362
|
+
else:
|
|
363
|
+
raise ValueError("Unknown positions for elevation angle mapping.")
|
|
364
|
+
|
|
365
|
+
# Create a new coordinate for elevation_angle based on inst_az
|
|
366
|
+
dataset = dataset.assign_coords(
|
|
367
|
+
elevation_angle=(
|
|
368
|
+
"inst_az",
|
|
369
|
+
[pos_to_el[pos] for pos in dataset["inst_az"].data],
|
|
370
|
+
)
|
|
371
|
+
)
|
|
372
|
+
# Take the mean across elevation angles and restore the original dimension order
|
|
373
|
+
dataset_converted = (
|
|
374
|
+
dataset[species_list]
|
|
375
|
+
.groupby("elevation_angle")
|
|
376
|
+
.sum(keep_attrs=True) # One position should always contain zeros so sum is safe
|
|
377
|
+
# Restore original dimension order because groupby moves the grouped
|
|
378
|
+
# dimension to the front
|
|
379
|
+
.transpose("epoch", "energy_table", "spin_sector", "elevation_angle", ...)
|
|
380
|
+
)
|
|
381
|
+
# Create a new coordinate for spin angle based on spin_sector
|
|
382
|
+
# Use equation from section 11.2.2 of algorithm document
|
|
383
|
+
dataset = dataset.assign_coords(
|
|
384
|
+
spin_angle=("spin_sector", dataset["spin_sector"].data * 15.0 + 7.5)
|
|
385
|
+
)
|
|
386
|
+
|
|
387
|
+
dataset = dataset.drop_vars(species_list).merge(dataset_converted)
|
|
388
|
+
return dataset
|
|
389
|
+
|
|
390
|
+
|
|
241
391
|
def process_hi_omni(dependencies: ProcessingInputCollection) -> xr.Dataset:
|
|
242
392
|
"""
|
|
243
393
|
Process the hi-omni L1B dataset to calculate omni-directional intensities.
|
|
@@ -637,6 +787,8 @@ def process_codice_l2(
|
|
|
637
787
|
if dataset_name in [
|
|
638
788
|
"imap_codice_l2_lo-sw-species",
|
|
639
789
|
"imap_codice_l2_lo-nsw-species",
|
|
790
|
+
"imap_codice_l2_lo-nsw-angular",
|
|
791
|
+
"imap_codice_l2_lo-sw-angular",
|
|
640
792
|
]:
|
|
641
793
|
l2_dataset = load_cdf(file_path).copy()
|
|
642
794
|
|
|
@@ -649,7 +801,7 @@ def process_codice_l2(
|
|
|
649
801
|
# Filter the efficiency lookup table for solar wind efficiencies
|
|
650
802
|
efficiencies = efficiency_lookup[efficiency_lookup["product"] == "sw"]
|
|
651
803
|
# Calculate the pickup ion sunward solar wind intensities using equation
|
|
652
|
-
# described in section 11.2.
|
|
804
|
+
# described in section 11.2.3 of algorithm document.
|
|
653
805
|
process_lo_species_intensity(
|
|
654
806
|
l2_dataset,
|
|
655
807
|
LO_SW_PICKUP_ION_SPECIES_VARIABLE_NAMES,
|
|
@@ -658,19 +810,19 @@ def process_codice_l2(
|
|
|
658
810
|
PUI_POSITIONS,
|
|
659
811
|
)
|
|
660
812
|
# Calculate the sunward solar wind species intensities using equation
|
|
661
|
-
# described in section 11.2.
|
|
813
|
+
# described in section 11.2.3 of algorithm document.
|
|
662
814
|
process_lo_species_intensity(
|
|
663
815
|
l2_dataset,
|
|
664
816
|
LO_SW_SPECIES_VARIABLE_NAMES,
|
|
665
817
|
geometric_factors,
|
|
666
818
|
efficiencies,
|
|
667
|
-
|
|
819
|
+
SOLAR_WIND_POSITIONS,
|
|
668
820
|
)
|
|
669
|
-
|
|
670
|
-
# Filter the efficiency lookup table for non
|
|
821
|
+
elif dataset_name == "imap_codice_l2_lo-nsw-species":
|
|
822
|
+
# Filter the efficiency lookup table for non-solar wind efficiencies
|
|
671
823
|
efficiencies = efficiency_lookup[efficiency_lookup["product"] == "nsw"]
|
|
672
824
|
# Calculate the non-sunward species intensities using equation
|
|
673
|
-
# described in section 11.2.
|
|
825
|
+
# described in section 11.2.3 of algorithm document.
|
|
674
826
|
process_lo_species_intensity(
|
|
675
827
|
l2_dataset,
|
|
676
828
|
LO_NSW_SPECIES_VARIABLE_NAMES,
|
|
@@ -678,6 +830,27 @@ def process_codice_l2(
|
|
|
678
830
|
efficiencies,
|
|
679
831
|
NSW_POSITIONS,
|
|
680
832
|
)
|
|
833
|
+
elif dataset_name == "imap_codice_l2_lo-sw-angular":
|
|
834
|
+
efficiencies = efficiency_lookup[efficiency_lookup["product"] == "sw"]
|
|
835
|
+
# Calculate the sunward solar wind angular intensities using equation
|
|
836
|
+
# described in section 11.2.2 of algorithm document.
|
|
837
|
+
l2_dataset = process_lo_angular_intensity(
|
|
838
|
+
l2_dataset,
|
|
839
|
+
LO_SW_ANGULAR_VARIABLE_NAMES,
|
|
840
|
+
geometric_factors,
|
|
841
|
+
efficiencies,
|
|
842
|
+
SW_POSITIONS,
|
|
843
|
+
)
|
|
844
|
+
if dataset_name == "imap_codice_l2_lo-nsw-angular":
|
|
845
|
+
# Calculate the non sunward angular intensities
|
|
846
|
+
efficiencies = efficiency_lookup[efficiency_lookup["product"] == "nsw"]
|
|
847
|
+
l2_dataset = process_lo_angular_intensity(
|
|
848
|
+
l2_dataset,
|
|
849
|
+
LO_NSW_ANGULAR_VARIABLE_NAMES,
|
|
850
|
+
geometric_factors,
|
|
851
|
+
efficiencies,
|
|
852
|
+
NSW_POSITIONS,
|
|
853
|
+
)
|
|
681
854
|
|
|
682
855
|
if dataset_name in [
|
|
683
856
|
"imap_codice_l2_hi-counters-singles",
|
|
@@ -727,65 +900,6 @@ def process_codice_l2(
|
|
|
727
900
|
# See section 11.1.2 of algorithm document
|
|
728
901
|
pass
|
|
729
902
|
|
|
730
|
-
elif dataset_name == "imap_codice_l2_lo-sw-angular":
|
|
731
|
-
# Calculate the sunward angular intensities using equation described in
|
|
732
|
-
# section 11.2.3 of algorithm document.
|
|
733
|
-
pass
|
|
734
|
-
|
|
735
|
-
elif dataset_name == "imap_codice_l2_lo-nsw-angular":
|
|
736
|
-
# Calculate the non-sunward angular intensities using equation described
|
|
737
|
-
# in section 11.2.3 of algorithm document.
|
|
738
|
-
pass
|
|
739
|
-
|
|
740
903
|
# logger.info(f"\nFinal data product:\n{l2_dataset}\n")
|
|
741
904
|
|
|
742
905
|
return l2_dataset
|
|
743
|
-
|
|
744
|
-
|
|
745
|
-
def add_dataset_attributes(
|
|
746
|
-
dataset: xr.Dataset, dataset_name: str, cdf_attrs: ImapCdfAttributes
|
|
747
|
-
) -> xr.Dataset:
|
|
748
|
-
"""
|
|
749
|
-
Add the global and variable attributes to the dataset.
|
|
750
|
-
|
|
751
|
-
Parameters
|
|
752
|
-
----------
|
|
753
|
-
dataset : xarray.Dataset
|
|
754
|
-
The dataset to update.
|
|
755
|
-
dataset_name : str
|
|
756
|
-
The name of the dataset.
|
|
757
|
-
cdf_attrs : ImapCdfAttributes
|
|
758
|
-
The attribute manager for CDF attributes.
|
|
759
|
-
|
|
760
|
-
Returns
|
|
761
|
-
-------
|
|
762
|
-
xarray.Dataset
|
|
763
|
-
The updated dataset.
|
|
764
|
-
"""
|
|
765
|
-
cdf_attrs.add_instrument_global_attrs("codice")
|
|
766
|
-
cdf_attrs.add_instrument_variable_attrs("codice", "l2")
|
|
767
|
-
|
|
768
|
-
# Update the global attributes
|
|
769
|
-
dataset.attrs = cdf_attrs.get_global_attributes(dataset_name)
|
|
770
|
-
|
|
771
|
-
# Set the variable attributes
|
|
772
|
-
for variable_name in dataset.data_vars.keys():
|
|
773
|
-
try:
|
|
774
|
-
dataset[variable_name].attrs = cdf_attrs.get_variable_attributes(
|
|
775
|
-
variable_name, check_schema=False
|
|
776
|
-
)
|
|
777
|
-
except KeyError:
|
|
778
|
-
# Some variables may have a product descriptor prefix in the
|
|
779
|
-
# cdf attributes key if they are common to multiple products.
|
|
780
|
-
descriptor = dataset_name.split("imap_codice_l2_")[-1]
|
|
781
|
-
cdf_attrs_key = f"{descriptor}-{variable_name}"
|
|
782
|
-
try:
|
|
783
|
-
dataset[variable_name].attrs = cdf_attrs.get_variable_attributes(
|
|
784
|
-
f"{cdf_attrs_key}", check_schema=False
|
|
785
|
-
)
|
|
786
|
-
except KeyError:
|
|
787
|
-
logger.error(
|
|
788
|
-
f"Field '{variable_name}' and '{cdf_attrs_key}' not found in "
|
|
789
|
-
f"attribute manager."
|
|
790
|
-
)
|
|
791
|
-
return dataset
|