imap-processing 1.0.0__py3-none-any.whl → 1.0.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- imap_processing/_version.py +2 -2
- imap_processing/cdf/config/imap_codice_global_cdf_attrs.yaml +13 -1
- imap_processing/cdf/config/imap_codice_l1a_variable_attrs.yaml +97 -254
- imap_processing/cdf/config/imap_codice_l2-hi-omni_variable_attrs.yaml +635 -0
- imap_processing/cdf/config/imap_codice_l2-hi-sectored_variable_attrs.yaml +422 -0
- imap_processing/cdf/config/imap_enamaps_l2-common_variable_attrs.yaml +29 -22
- imap_processing/cdf/config/imap_enamaps_l2-healpix_variable_attrs.yaml +2 -0
- imap_processing/cdf/config/imap_enamaps_l2-rectangular_variable_attrs.yaml +12 -2
- imap_processing/cdf/config/imap_swapi_variable_attrs.yaml +2 -13
- imap_processing/cdf/utils.py +2 -2
- imap_processing/cli.py +10 -27
- imap_processing/codice/codice_l1a_lo_angular.py +362 -0
- imap_processing/codice/codice_l1a_lo_species.py +282 -0
- imap_processing/codice/codice_l1b.py +62 -97
- imap_processing/codice/codice_l2.py +801 -174
- imap_processing/codice/codice_new_l1a.py +64 -0
- imap_processing/codice/constants.py +96 -0
- imap_processing/codice/utils.py +270 -0
- imap_processing/ena_maps/ena_maps.py +157 -95
- imap_processing/ena_maps/utils/coordinates.py +5 -0
- imap_processing/ena_maps/utils/corrections.py +450 -0
- imap_processing/ena_maps/utils/map_utils.py +143 -42
- imap_processing/ena_maps/utils/naming.py +3 -1
- imap_processing/hi/hi_l1c.py +34 -12
- imap_processing/hi/hi_l2.py +82 -44
- imap_processing/ialirt/constants.py +7 -1
- imap_processing/ialirt/generate_coverage.py +3 -1
- imap_processing/ialirt/l0/parse_mag.py +1 -0
- imap_processing/ialirt/l0/process_codice.py +66 -0
- imap_processing/ialirt/l0/process_hit.py +1 -0
- imap_processing/ialirt/l0/process_swapi.py +1 -0
- imap_processing/ialirt/l0/process_swe.py +2 -0
- imap_processing/ialirt/process_ephemeris.py +6 -2
- imap_processing/ialirt/utils/create_xarray.py +4 -2
- imap_processing/idex/idex_l2a.py +2 -2
- imap_processing/idex/idex_l2b.py +1 -1
- imap_processing/lo/l1c/lo_l1c.py +62 -4
- imap_processing/lo/l2/lo_l2.py +85 -15
- imap_processing/mag/l1a/mag_l1a.py +2 -2
- imap_processing/mag/l1a/mag_l1a_data.py +71 -13
- imap_processing/mag/l1c/interpolation_methods.py +34 -13
- imap_processing/mag/l1c/mag_l1c.py +117 -67
- imap_processing/mag/l1d/mag_l1d_data.py +3 -1
- imap_processing/quality_flags.py +1 -0
- imap_processing/spice/geometry.py +11 -9
- imap_processing/spice/pointing_frame.py +77 -50
- imap_processing/swapi/constants.py +4 -0
- imap_processing/swapi/l1/swapi_l1.py +59 -24
- imap_processing/swapi/l2/swapi_l2.py +17 -3
- imap_processing/swe/utils/swe_constants.py +7 -7
- imap_processing/ultra/l1a/ultra_l1a.py +121 -72
- imap_processing/ultra/l1b/de.py +57 -1
- imap_processing/ultra/l1b/extendedspin.py +1 -1
- imap_processing/ultra/l1b/ultra_l1b_annotated.py +0 -1
- imap_processing/ultra/l1b/ultra_l1b_culling.py +2 -2
- imap_processing/ultra/l1b/ultra_l1b_extended.py +25 -12
- imap_processing/ultra/l1c/helio_pset.py +29 -6
- imap_processing/ultra/l1c/l1c_lookup_utils.py +4 -2
- imap_processing/ultra/l1c/spacecraft_pset.py +10 -6
- imap_processing/ultra/l1c/ultra_l1c.py +6 -6
- imap_processing/ultra/l1c/ultra_l1c_pset_bins.py +82 -20
- imap_processing/ultra/l2/ultra_l2.py +2 -2
- imap_processing-1.0.2.dist-info/METADATA +121 -0
- {imap_processing-1.0.0.dist-info → imap_processing-1.0.2.dist-info}/RECORD +67 -61
- imap_processing-1.0.0.dist-info/METADATA +0 -120
- {imap_processing-1.0.0.dist-info → imap_processing-1.0.2.dist-info}/LICENSE +0 -0
- {imap_processing-1.0.0.dist-info → imap_processing-1.0.2.dist-info}/WHEEL +0 -0
- {imap_processing-1.0.0.dist-info → imap_processing-1.0.2.dist-info}/entry_points.txt +0 -0
|
@@ -342,7 +342,9 @@ class MapDescriptor:
|
|
|
342
342
|
elif frame_str == "gcs":
|
|
343
343
|
return SpiceFrame.IMAP_GCS
|
|
344
344
|
else:
|
|
345
|
-
raise NotImplementedError(
|
|
345
|
+
raise NotImplementedError(
|
|
346
|
+
f"Coordinate frame {frame_str} is not yet implemented."
|
|
347
|
+
)
|
|
346
348
|
|
|
347
349
|
def to_empty_map(
|
|
348
350
|
self,
|
imap_processing/hi/hi_l1c.py
CHANGED
|
@@ -102,7 +102,7 @@ def generate_pset_dataset(
|
|
|
102
102
|
|
|
103
103
|
pset_dataset = empty_pset_dataset(
|
|
104
104
|
de_dataset.epoch.data[0],
|
|
105
|
-
de_dataset.esa_energy_step
|
|
105
|
+
de_dataset.esa_energy_step,
|
|
106
106
|
config_df.cal_prod_config.number_of_products,
|
|
107
107
|
logical_source_parts["sensor"],
|
|
108
108
|
)
|
|
@@ -121,7 +121,7 @@ def generate_pset_dataset(
|
|
|
121
121
|
|
|
122
122
|
|
|
123
123
|
def empty_pset_dataset(
|
|
124
|
-
epoch_val: int, l1b_energy_steps:
|
|
124
|
+
epoch_val: int, l1b_energy_steps: xr.DataArray, n_cal_prods: int, sensor_str: str
|
|
125
125
|
) -> xr.Dataset:
|
|
126
126
|
"""
|
|
127
127
|
Allocate an empty xarray.Dataset with appropriate pset coordinates.
|
|
@@ -130,7 +130,7 @@ def empty_pset_dataset(
|
|
|
130
130
|
----------
|
|
131
131
|
epoch_val : int
|
|
132
132
|
The starting epoch in J2000 TT nanoseconds for data in the PSET.
|
|
133
|
-
l1b_energy_steps :
|
|
133
|
+
l1b_energy_steps : xarray.DataArray
|
|
134
134
|
The array of esa_energy_step data from the L1B DE product.
|
|
135
135
|
n_cal_prods : int
|
|
136
136
|
Number of calibration products to allocate.
|
|
@@ -164,8 +164,12 @@ def empty_pset_dataset(
|
|
|
164
164
|
"hi_pset_esa_energy_step", check_schema=False
|
|
165
165
|
).copy()
|
|
166
166
|
dtype = attrs.pop("dtype")
|
|
167
|
-
# Find the unique
|
|
168
|
-
|
|
167
|
+
# Find the unique esa_energy_steps from the L1B data
|
|
168
|
+
# Exclude 0 and FILLVAL
|
|
169
|
+
esa_energy_steps = np.array(
|
|
170
|
+
sorted(set(l1b_energy_steps.values) - {0, l1b_energy_steps.attrs["FILLVAL"]}),
|
|
171
|
+
dtype=dtype,
|
|
172
|
+
)
|
|
169
173
|
coords["esa_energy_step"] = xr.DataArray(
|
|
170
174
|
esa_energy_steps,
|
|
171
175
|
name="esa_energy_step",
|
|
@@ -571,11 +575,26 @@ def find_second_de_packet_data(l1b_dataset: xr.Dataset) -> xr.Dataset:
|
|
|
571
575
|
# We should get two CCSDS packets per 8-spin ESA step.
|
|
572
576
|
# Get the indices of the packet before each ESA change.
|
|
573
577
|
esa_step = epoch_dataset["esa_step"].values
|
|
578
|
+
esa_energy_step = epoch_dataset["esa_energy_step"].values
|
|
579
|
+
# A change in esa_step should indicate the location of the second packet in
|
|
580
|
+
# each pair of DE packets at an esa_energy_step. In practice, during some
|
|
581
|
+
# calibration activities, it was observed that the esa_energy_step can change
|
|
582
|
+
# when the esa_step did not. So, we look for either to change and use the
|
|
583
|
+
# indices of those changes to identify the second packet in each pair. We
|
|
584
|
+
# also need to add the last packet index and assume an energy step change
|
|
585
|
+
# occurs after the last packet.
|
|
574
586
|
second_esa_packet_idx = np.append(
|
|
575
|
-
np.flatnonzero(np.diff(esa_step) != 0)
|
|
587
|
+
np.flatnonzero((np.diff(esa_step) != 0) | (np.diff(esa_energy_step) != 0)),
|
|
588
|
+
len(esa_step) - 1,
|
|
589
|
+
)
|
|
590
|
+
# Remove esa energy steps at 0 - these are calibrations
|
|
591
|
+
keep_mask = esa_energy_step[second_esa_packet_idx] != 0
|
|
592
|
+
# Remove esa energy steps at FILLVAL - these are unidentified
|
|
593
|
+
keep_mask &= (
|
|
594
|
+
esa_energy_step[second_esa_packet_idx]
|
|
595
|
+
!= l1b_dataset["esa_energy_step"].attrs["FILLVAL"]
|
|
576
596
|
)
|
|
577
|
-
|
|
578
|
-
second_esa_packet_idx = second_esa_packet_idx[esa_step[second_esa_packet_idx] != 0]
|
|
597
|
+
second_esa_packet_idx = second_esa_packet_idx[keep_mask]
|
|
579
598
|
# Remove indices where we don't have two consecutive packets at the same ESA
|
|
580
599
|
if second_esa_packet_idx[0] == 0:
|
|
581
600
|
logger.warning(
|
|
@@ -584,7 +603,8 @@ def find_second_de_packet_data(l1b_dataset: xr.Dataset) -> xr.Dataset:
|
|
|
584
603
|
)
|
|
585
604
|
second_esa_packet_idx = second_esa_packet_idx[1:]
|
|
586
605
|
missing_esa_pair_mask = (
|
|
587
|
-
|
|
606
|
+
esa_energy_step[second_esa_packet_idx - 1]
|
|
607
|
+
!= esa_energy_step[second_esa_packet_idx]
|
|
588
608
|
)
|
|
589
609
|
if missing_esa_pair_mask.any():
|
|
590
610
|
logger.warning(
|
|
@@ -629,9 +649,11 @@ def get_de_clock_ticks_for_esa_step(
|
|
|
629
649
|
# ESA step group so this match is the end time. The start time is
|
|
630
650
|
# 8-spins earlier.
|
|
631
651
|
spin_start_mets = spin_df.spin_start_met.to_numpy()
|
|
632
|
-
# CCSDS MET has one second resolution, add
|
|
633
|
-
# greater than the spin start time it ended on.
|
|
634
|
-
|
|
652
|
+
# CCSDS MET has one second resolution, add two to it to make sure it is
|
|
653
|
+
# greater than the spin start time it ended on. Theotretically, adding
|
|
654
|
+
# one second should be sufficeint, but in practice, with flight data, adding
|
|
655
|
+
# two seconds was found to be necessary.
|
|
656
|
+
end_time_ind = np.flatnonzero(ccsds_met + 2 >= spin_start_mets).max()
|
|
635
657
|
|
|
636
658
|
# If the minimum absolute difference is greater than 1/2 the spin-phase
|
|
637
659
|
# we have a problem.
|
imap_processing/hi/hi_l2.py
CHANGED
|
@@ -11,14 +11,26 @@ from imap_processing.ena_maps.ena_maps import (
|
|
|
11
11
|
HiPointingSet,
|
|
12
12
|
RectangularSkyMap,
|
|
13
13
|
)
|
|
14
|
-
from imap_processing.ena_maps.utils.corrections import
|
|
14
|
+
from imap_processing.ena_maps.utils.corrections import (
|
|
15
|
+
PowerLawFluxCorrector,
|
|
16
|
+
apply_compton_getting_correction,
|
|
17
|
+
interpolate_map_flux_to_helio_frame,
|
|
18
|
+
)
|
|
15
19
|
from imap_processing.ena_maps.utils.naming import MapDescriptor
|
|
16
20
|
from imap_processing.hi.utils import CalibrationProductConfig
|
|
17
21
|
|
|
18
22
|
logger = logging.getLogger(__name__)
|
|
19
23
|
|
|
24
|
+
SC_FRAME_VARS_TO_PROJECT = {
|
|
25
|
+
"counts",
|
|
26
|
+
"exposure_factor",
|
|
27
|
+
"bg_rates",
|
|
28
|
+
"bg_rates_unc",
|
|
29
|
+
"obs_date",
|
|
30
|
+
}
|
|
31
|
+
HELIO_FRAME_VARS_TO_PROJECT = SC_FRAME_VARS_TO_PROJECT | {"energy_sc"}
|
|
20
32
|
# TODO: is an exposure time weighted average for obs_date appropriate?
|
|
21
|
-
|
|
33
|
+
FULL_EXPOSURE_TIME_AVERAGE_SET = {"bg_rates", "bg_rates_unc", "obs_date", "energy_sc"}
|
|
22
34
|
|
|
23
35
|
|
|
24
36
|
def hi_l2(
|
|
@@ -98,33 +110,64 @@ def generate_hi_map(
|
|
|
98
110
|
The sky map with all the PSET data projected into the map.
|
|
99
111
|
"""
|
|
100
112
|
output_map = descriptor.to_empty_map()
|
|
113
|
+
vars_to_bin = (
|
|
114
|
+
HELIO_FRAME_VARS_TO_PROJECT
|
|
115
|
+
if descriptor.frame_descriptor == "hf"
|
|
116
|
+
else SC_FRAME_VARS_TO_PROJECT
|
|
117
|
+
)
|
|
118
|
+
vars_to_exposure_time_average = FULL_EXPOSURE_TIME_AVERAGE_SET & vars_to_bin
|
|
101
119
|
|
|
102
120
|
if not isinstance(output_map, RectangularSkyMap):
|
|
103
121
|
raise NotImplementedError("Healpix map output not supported for Hi")
|
|
104
122
|
|
|
105
|
-
|
|
106
|
-
if descriptor.frame_descriptor != "sf":
|
|
107
|
-
raise NotImplementedError("CG correction not implemented for Hi")
|
|
123
|
+
cached_esa_steps = None
|
|
108
124
|
|
|
109
125
|
for pset_path in psets:
|
|
110
126
|
logger.info(f"Processing {pset_path}")
|
|
111
|
-
pset = HiPointingSet(pset_path
|
|
112
|
-
|
|
113
|
-
#
|
|
114
|
-
# the
|
|
115
|
-
|
|
116
|
-
|
|
127
|
+
pset = HiPointingSet(pset_path)
|
|
128
|
+
|
|
129
|
+
# Store the first PSET esa_energy_step values and make sure every PSET
|
|
130
|
+
# contains the same set of esa_energy_step values.
|
|
131
|
+
# TODO: Correctly handle PSETs with different esa_energy_step values.
|
|
132
|
+
if cached_esa_steps is None:
|
|
133
|
+
cached_esa_steps = pset.data["esa_energy_step"].values.copy()
|
|
134
|
+
esa_ds = esa_energy_df(
|
|
135
|
+
l2_ancillary_path_dict["esa-energies"],
|
|
136
|
+
pset.data["esa_energy_step"].values,
|
|
137
|
+
).to_xarray()
|
|
138
|
+
energy_kev = esa_ds["nominal_central_energy"]
|
|
139
|
+
if not np.array_equal(cached_esa_steps, pset.data["esa_energy_step"].values):
|
|
140
|
+
raise ValueError(
|
|
141
|
+
"All PSETs must have the same set of esa_energy_step values."
|
|
142
|
+
)
|
|
143
|
+
|
|
144
|
+
if descriptor.frame_descriptor == "hf":
|
|
145
|
+
# convert esa nominal central energy from keV to eV
|
|
146
|
+
esa_energy_ev = energy_kev * 1000
|
|
147
|
+
pset = apply_compton_getting_correction(pset, esa_energy_ev)
|
|
148
|
+
|
|
149
|
+
# Multiply variables that need to be exposure time weighted average by
|
|
150
|
+
# exposure factor.
|
|
151
|
+
for var in vars_to_exposure_time_average:
|
|
152
|
+
if var in pset.data:
|
|
153
|
+
pset.data[var] *= pset.data["exposure_factor"]
|
|
154
|
+
|
|
155
|
+
# Set the mask used to filter ram/anti-ram pixels
|
|
156
|
+
pset_valid_mask = None # Default to no mask (full spin)
|
|
157
|
+
if descriptor.spin_phase == "ram":
|
|
158
|
+
pset_valid_mask = pset.data["ram_mask"]
|
|
159
|
+
elif descriptor.spin_phase == "anti":
|
|
160
|
+
pset_valid_mask = ~pset.data["ram_mask"]
|
|
117
161
|
|
|
118
162
|
# Project (bin) the PSET variables into the map pixels
|
|
119
163
|
output_map.project_pset_values_to_map(
|
|
120
|
-
pset,
|
|
121
|
-
["counts", "exposure_factor", "bg_rates", "bg_rates_unc", "obs_date"],
|
|
164
|
+
pset, list(vars_to_bin), pset_valid_mask=pset_valid_mask
|
|
122
165
|
)
|
|
123
166
|
|
|
124
167
|
# Finish the exposure time weighted mean calculation of backgrounds
|
|
125
168
|
# Allow divide by zero to fill set pixels with zero exposure time to NaN
|
|
126
169
|
with np.errstate(divide="ignore"):
|
|
127
|
-
for var in
|
|
170
|
+
for var in vars_to_exposure_time_average:
|
|
128
171
|
output_map.data_1d[var] /= output_map.data_1d["exposure_factor"]
|
|
129
172
|
|
|
130
173
|
output_map.data_1d.update(calculate_ena_signal_rates(output_map.data_1d))
|
|
@@ -138,30 +181,27 @@ def generate_hi_map(
|
|
|
138
181
|
# TODO: Figure out how to compute obs_date_range (stddev of obs_date)
|
|
139
182
|
output_map.data_1d["obs_date_range"] = xr.zeros_like(output_map.data_1d["obs_date"])
|
|
140
183
|
|
|
184
|
+
# Set the energy_step_delta values to the energy bandpass half-width-half-max
|
|
185
|
+
energy_delta = esa_ds["bandpass_fwhm"] / 2
|
|
186
|
+
output_map.data_1d["energy_delta_minus"] = energy_delta
|
|
187
|
+
output_map.data_1d["energy_delta_plus"] = energy_delta
|
|
188
|
+
|
|
141
189
|
# Rename and convert coordinate from esa_energy_step energy
|
|
142
|
-
esa_df = esa_energy_df(
|
|
143
|
-
l2_ancillary_path_dict["esa-energies"],
|
|
144
|
-
output_map.data_1d["esa_energy_step"].data,
|
|
145
|
-
)
|
|
146
190
|
output_map.data_1d = output_map.data_1d.rename({"esa_energy_step": "energy"})
|
|
147
|
-
output_map.data_1d = output_map.data_1d.assign_coords(
|
|
148
|
-
energy=esa_df["nominal_central_energy"].values
|
|
149
|
-
)
|
|
150
|
-
# Set the energy_step_delta values to the energy bandpass half-width-half-max
|
|
151
|
-
energy_delta = esa_df["bandpass_fwhm"].values / 2
|
|
152
|
-
output_map.data_1d["energy_delta_minus"] = xr.DataArray(
|
|
153
|
-
energy_delta,
|
|
154
|
-
name="energy_delta_minus",
|
|
155
|
-
dims=["energy"],
|
|
156
|
-
)
|
|
157
|
-
output_map.data_1d["energy_delta_plus"] = xr.DataArray(
|
|
158
|
-
energy_delta,
|
|
159
|
-
name="energy_delta_plus",
|
|
160
|
-
dims=["energy"],
|
|
161
|
-
)
|
|
191
|
+
output_map.data_1d = output_map.data_1d.assign_coords(energy=energy_kev.values)
|
|
162
192
|
|
|
163
193
|
output_map.data_1d = output_map.data_1d.drop("esa_energy_step_label")
|
|
164
194
|
|
|
195
|
+
# Apply Compton-Getting interpolation for heliocentric frame maps
|
|
196
|
+
if descriptor.frame_descriptor == "hf":
|
|
197
|
+
esa_energy_ev = esa_energy_ev.rename({"esa_energy_step": "energy"})
|
|
198
|
+
esa_energy_ev = esa_energy_ev.assign_coords(energy=energy_kev.values)
|
|
199
|
+
output_map.data_1d = interpolate_map_flux_to_helio_frame(
|
|
200
|
+
output_map.data_1d,
|
|
201
|
+
output_map.data_1d["energy"] * 1000, # Convert ESA energies to eV
|
|
202
|
+
esa_energy_ev, # heliocentric energies (same as ESA energies)
|
|
203
|
+
)
|
|
204
|
+
|
|
165
205
|
return output_map
|
|
166
206
|
|
|
167
207
|
|
|
@@ -323,20 +363,15 @@ def combine_calibration_products(
|
|
|
323
363
|
# Perform inverse-variance weighted averaging
|
|
324
364
|
# Handle divide by zero and invalid values
|
|
325
365
|
with np.errstate(divide="ignore", invalid="ignore"):
|
|
326
|
-
# Calculate weights for statistical variance combination using only
|
|
327
|
-
# statistical variance
|
|
328
|
-
stat_weights = 1.0 / improved_stat_variance
|
|
329
|
-
|
|
330
|
-
# Combined statistical uncertainty from inverse-variance formula
|
|
331
|
-
combined_stat_unc = np.sqrt(1.0 / stat_weights.sum(dim="calibration_prod"))
|
|
332
|
-
|
|
333
366
|
# Use total variance weights for flux combination
|
|
334
367
|
flux_weights = 1.0 / total_variance
|
|
335
368
|
weighted_flux_sum = (ena_flux * flux_weights).sum(dim="calibration_prod")
|
|
336
369
|
combined_flux = weighted_flux_sum / flux_weights.sum(dim="calibration_prod")
|
|
337
370
|
|
|
338
371
|
map_ds["ena_intensity"] = combined_flux
|
|
339
|
-
map_ds["ena_intensity_stat_uncert"] =
|
|
372
|
+
map_ds["ena_intensity_stat_uncert"] = np.sqrt(
|
|
373
|
+
(map_ds["ena_intensity_stat_uncert"] ** 2).sum(dim="calibration_prod")
|
|
374
|
+
)
|
|
340
375
|
# For systematic error, just do quadrature sum over the systematic error for
|
|
341
376
|
# each calibration product.
|
|
342
377
|
map_ds["ena_intensity_sys_err"] = np.sqrt((sys_err**2).sum(dim="calibration_prod"))
|
|
@@ -425,7 +460,7 @@ def _calculate_improved_stat_variance(
|
|
|
425
460
|
|
|
426
461
|
|
|
427
462
|
def esa_energy_df(
|
|
428
|
-
esa_energies_path: str | Path, esa_energy_steps: np.ndarray
|
|
463
|
+
esa_energies_path: str | Path, esa_energy_steps: np.ndarray | slice | None = None
|
|
429
464
|
) -> pd.DataFrame:
|
|
430
465
|
"""
|
|
431
466
|
Lookup the nominal central energy values for given esa energy steps.
|
|
@@ -434,8 +469,9 @@ def esa_energy_df(
|
|
|
434
469
|
----------
|
|
435
470
|
esa_energies_path : str or pathlib.Path
|
|
436
471
|
Location of the calibration csv file containing the lookup data.
|
|
437
|
-
esa_energy_steps : numpy.ndarray
|
|
438
|
-
The ESA energy steps to get energies for.
|
|
472
|
+
esa_energy_steps : numpy.ndarray, slice, or None
|
|
473
|
+
The ESA energy steps to get energies for. If not provided (default is None),
|
|
474
|
+
the full dataframe is returned.
|
|
439
475
|
|
|
440
476
|
Returns
|
|
441
477
|
-------
|
|
@@ -443,6 +479,8 @@ def esa_energy_df(
|
|
|
443
479
|
Full data frame from the csv file filtered to only include the
|
|
444
480
|
esa_energy_steps input.
|
|
445
481
|
"""
|
|
482
|
+
if esa_energy_steps is None:
|
|
483
|
+
esa_energy_steps = slice(None)
|
|
446
484
|
esa_energies_lut = pd.read_csv(
|
|
447
485
|
esa_energies_path, comment="#", index_col="esa_energy_step"
|
|
448
486
|
)
|
|
@@ -65,5 +65,11 @@ STATIONS = {
|
|
|
65
65
|
latitude=54.2632, # degrees North
|
|
66
66
|
altitude=0.1, # approx 100 meters
|
|
67
67
|
min_elevation_deg=5, # 5 degrees is the requirement
|
|
68
|
-
)
|
|
68
|
+
),
|
|
69
|
+
"Manaus": StationProperties(
|
|
70
|
+
longitude=-59.969334, # degrees East (negative = West)
|
|
71
|
+
latitude=-2.891257, # degrees North (negative = South)
|
|
72
|
+
altitude=0.1, # approx 100 meters
|
|
73
|
+
min_elevation_deg=5, # 5 degrees is the requirement
|
|
74
|
+
),
|
|
69
75
|
}
|
|
@@ -77,7 +77,9 @@ def generate_coverage(
|
|
|
77
77
|
dsn_outage_mask |= (time_range >= start_et) & (time_range <= end_et)
|
|
78
78
|
|
|
79
79
|
for station_name, (lon, lat, alt, min_elevation) in stations.items():
|
|
80
|
-
|
|
80
|
+
_azimuth, elevation = calculate_azimuth_and_elevation(
|
|
81
|
+
lon, lat, alt, time_range, obsref="IAU_EARTH"
|
|
82
|
+
)
|
|
81
83
|
visible = elevation > min_elevation
|
|
82
84
|
|
|
83
85
|
outage_mask = np.zeros(time_range.shape, dtype=bool)
|
|
@@ -710,6 +710,7 @@ def process_packet(
|
|
|
710
710
|
"met": int(met_all[i]),
|
|
711
711
|
"met_in_utc": met_to_utc(met_all[i]).split(".")[0],
|
|
712
712
|
"ttj2000ns": int(met_to_ttj2000ns(met_all[i])),
|
|
713
|
+
"instrument": "mag",
|
|
713
714
|
"mag_epoch": int(mago_times_all[i]),
|
|
714
715
|
"mag_B_GSE": [Decimal(str(v)) for v in gse_vector[i]],
|
|
715
716
|
"mag_B_GSM": [Decimal(str(v)) for v in gsm_vector[i]],
|
|
@@ -4,11 +4,56 @@ import logging
|
|
|
4
4
|
from decimal import Decimal
|
|
5
5
|
from typing import Any
|
|
6
6
|
|
|
7
|
+
import numpy as np
|
|
7
8
|
import xarray as xr
|
|
8
9
|
|
|
10
|
+
from imap_processing.codice import decompress
|
|
11
|
+
from imap_processing.ialirt.utils.grouping import find_groups
|
|
12
|
+
|
|
9
13
|
logger = logging.getLogger(__name__)
|
|
10
14
|
|
|
15
|
+
FILLVAL_UINT8 = 255
|
|
11
16
|
FILLVAL_FLOAT32 = Decimal(str(-1.0e31))
|
|
17
|
+
COD_LO_COUNTER = 232
|
|
18
|
+
COD_HI_COUNTER = 197
|
|
19
|
+
COD_LO_RANGE = range(0, 15)
|
|
20
|
+
COD_HI_RANGE = range(0, 5)
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
def concatenate_bytes(grouped_data: xr.Dataset, group: int, sensor: str) -> bytearray:
|
|
24
|
+
"""
|
|
25
|
+
Concatenate all data fields for a specific group into a single bytearray.
|
|
26
|
+
|
|
27
|
+
Parameters
|
|
28
|
+
----------
|
|
29
|
+
grouped_data : xr.Dataset
|
|
30
|
+
The grouped CoDICE dataset containing cod_{sensor}_data_XX variables.
|
|
31
|
+
group : int
|
|
32
|
+
The group number to extract.
|
|
33
|
+
sensor : str
|
|
34
|
+
The sensor type, either 'lo' or 'hi'.
|
|
35
|
+
|
|
36
|
+
Returns
|
|
37
|
+
-------
|
|
38
|
+
current_data_stream: bytearray
|
|
39
|
+
The concatenated data stream for the selected group.
|
|
40
|
+
"""
|
|
41
|
+
current_data_stream = bytearray()
|
|
42
|
+
group_mask = (grouped_data["group"] == group).values
|
|
43
|
+
|
|
44
|
+
cod_ranges = {
|
|
45
|
+
"lo": COD_LO_RANGE,
|
|
46
|
+
"hi": COD_HI_RANGE,
|
|
47
|
+
}
|
|
48
|
+
|
|
49
|
+
# Loop through all data fields.
|
|
50
|
+
for field in cod_ranges[sensor]:
|
|
51
|
+
data_array = grouped_data[f"cod_{sensor}_data_{field:02}"].values[group_mask]
|
|
52
|
+
|
|
53
|
+
# Convert each value to uint8 and extend the byte stream
|
|
54
|
+
current_data_stream.extend(np.uint8(data_array).tobytes())
|
|
55
|
+
|
|
56
|
+
return current_data_stream
|
|
12
57
|
|
|
13
58
|
|
|
14
59
|
def process_codice(
|
|
@@ -35,6 +80,27 @@ def process_codice(
|
|
|
35
80
|
- Calculate L2 CoDICE pseudodensities (pg 37 of Algorithm Document)
|
|
36
81
|
- Calculate the public data products
|
|
37
82
|
"""
|
|
83
|
+
grouped_cod_lo_data = find_groups(
|
|
84
|
+
dataset, (0, COD_LO_COUNTER), "cod_lo_counter", "cod_lo_acq"
|
|
85
|
+
)
|
|
86
|
+
grouped_cod_hi_data = find_groups(
|
|
87
|
+
dataset, (0, COD_HI_COUNTER), "cod_hi_counter", "cod_hi_acq"
|
|
88
|
+
)
|
|
89
|
+
unique_cod_lo_groups = np.unique(grouped_cod_lo_data["group"])
|
|
90
|
+
unique_cod_hi_groups = np.unique(grouped_cod_hi_data["group"])
|
|
91
|
+
|
|
92
|
+
for group in unique_cod_lo_groups:
|
|
93
|
+
cod_lo_data_stream = concatenate_bytes(grouped_cod_lo_data, group, "lo")
|
|
94
|
+
|
|
95
|
+
# Decompress binary stream
|
|
96
|
+
decompressed_data = decompress._apply_pack_24_bit(bytes(cod_lo_data_stream))
|
|
97
|
+
|
|
98
|
+
for group in unique_cod_hi_groups:
|
|
99
|
+
cod_hi_data_stream = concatenate_bytes(grouped_cod_hi_data, group, "lo")
|
|
100
|
+
|
|
101
|
+
# Decompress binary stream
|
|
102
|
+
decompressed_data = decompress._apply_lossy_a(bytes(cod_hi_data_stream)) # noqa
|
|
103
|
+
|
|
38
104
|
# For I-ALiRT SIT, the test data being used has all zeros and thus no
|
|
39
105
|
# groups can be found, thus there is no data to process
|
|
40
106
|
# TODO: Once I-ALiRT test data is acquired that actually has data in it,
|
|
@@ -171,6 +171,7 @@ def process_hit(xarray_data: xr.Dataset) -> list[dict]:
|
|
|
171
171
|
"met": int(met),
|
|
172
172
|
"met_in_utc": met_to_utc(met).split(".")[0],
|
|
173
173
|
"ttj2000ns": int(met_to_ttj2000ns(met)),
|
|
174
|
+
"instrument": "hit",
|
|
174
175
|
"hit_e_a_side_low_en": int(l1["IALRT_RATE_1"] + l1["IALRT_RATE_2"]),
|
|
175
176
|
"hit_e_a_side_med_en": int(l1["IALRT_RATE_5"] + l1["IALRT_RATE_6"]),
|
|
176
177
|
"hit_e_a_side_high_en": int(l1["IALRT_RATE_7"]),
|
|
@@ -226,6 +226,7 @@ def process_swapi_ialirt(
|
|
|
226
226
|
"met": int(met_values[entry]),
|
|
227
227
|
"met_in_utc": met_to_utc(met_values[entry]).split(".")[0],
|
|
228
228
|
"ttj2000ns": int(met_to_ttj2000ns(met_values[entry])),
|
|
229
|
+
"instrument": "swapi",
|
|
229
230
|
"swapi_pseudo_proton_speed": Decimal(solution["pseudo_speed"][entry]),
|
|
230
231
|
"swapi_pseudo_proton_density": Decimal(
|
|
231
232
|
solution["pseudo_density"][entry]
|
|
@@ -553,6 +553,7 @@ def process_swe(accumulated_data: xr.Dataset, in_flight_cal_files: list) -> list
|
|
|
553
553
|
"met": met_first_half,
|
|
554
554
|
"met_in_utc": met_to_utc(met_first_half).split(".")[0],
|
|
555
555
|
"ttj2000ns": int(met_to_ttj2000ns(met_first_half)),
|
|
556
|
+
"instrument": "swe",
|
|
556
557
|
"swe_normalized_counts": [int(val) for val in summed_first],
|
|
557
558
|
"swe_counterstreaming_electrons": bde_first_half,
|
|
558
559
|
},
|
|
@@ -563,6 +564,7 @@ def process_swe(accumulated_data: xr.Dataset, in_flight_cal_files: list) -> list
|
|
|
563
564
|
"met": met_second_half,
|
|
564
565
|
"met_in_utc": met_to_utc(met_second_half).split(".")[0],
|
|
565
566
|
"ttj2000ns": int(met_to_ttj2000ns(met_second_half)),
|
|
567
|
+
"instrument": "swe",
|
|
566
568
|
"swe_normalized_counts": [int(val) for val in summed_second],
|
|
567
569
|
"swe_counterstreaming_electrons": bde_second_half,
|
|
568
570
|
},
|
|
@@ -72,6 +72,7 @@ def calculate_azimuth_and_elevation(
|
|
|
72
72
|
altitude: float,
|
|
73
73
|
observation_time: float | np.ndarray,
|
|
74
74
|
target: str = SpiceBody.IMAP.name,
|
|
75
|
+
obsref: str = "ITRF93",
|
|
75
76
|
) -> tuple:
|
|
76
77
|
"""
|
|
77
78
|
Calculate azimuth and elevation.
|
|
@@ -91,6 +92,9 @@ def calculate_azimuth_and_elevation(
|
|
|
91
92
|
is to be computed. Expressed as ephemeris time, seconds past J2000 TDB.
|
|
92
93
|
target : str (Optional)
|
|
93
94
|
The target body. Default is "IMAP".
|
|
95
|
+
obsref : str (Optional)
|
|
96
|
+
Body-fixed, body-centered reference frame wrt
|
|
97
|
+
observer's center.
|
|
94
98
|
|
|
95
99
|
Returns
|
|
96
100
|
-------
|
|
@@ -120,7 +124,7 @@ def calculate_azimuth_and_elevation(
|
|
|
120
124
|
elplsz=True, # Elevation increases from the XY plane toward +Z
|
|
121
125
|
obspos=ground_station_position_ecef, # observer pos. to center of motion
|
|
122
126
|
obsctr="EARTH", # Name of the center of motion
|
|
123
|
-
obsref=
|
|
127
|
+
obsref=obsref, # Body-fixed, body-centered reference frame wrt
|
|
124
128
|
# observer's center
|
|
125
129
|
)
|
|
126
130
|
azimuth.append(np.rad2deg(azel_results[0][1]))
|
|
@@ -223,7 +227,7 @@ def build_output(
|
|
|
223
227
|
|
|
224
228
|
# For now, assume that kernel management will be handled by ensure_spice
|
|
225
229
|
azimuth, elevation = calculate_azimuth_and_elevation(
|
|
226
|
-
longitude, latitude, altitude, time_range
|
|
230
|
+
longitude, latitude, altitude, time_range, obsref="ITRF93"
|
|
227
231
|
)
|
|
228
232
|
|
|
229
233
|
output_dict["time"] = et_to_utc(time_range, format_str="ISOC")
|
|
@@ -52,7 +52,7 @@ def create_xarray_from_records(records: list[dict]) -> xr.Dataset: # noqa: PLR0
|
|
|
52
52
|
["radial", "tangential", "normal"],
|
|
53
53
|
name="RTN_component",
|
|
54
54
|
dims=["RTN_component"],
|
|
55
|
-
attrs=cdf_manager.get_variable_attributes("
|
|
55
|
+
attrs=cdf_manager.get_variable_attributes("RTN_component", check_schema=False),
|
|
56
56
|
)
|
|
57
57
|
|
|
58
58
|
esa_step = xr.DataArray(
|
|
@@ -85,7 +85,7 @@ def create_xarray_from_records(records: list[dict]) -> xr.Dataset: # noqa: PLR0
|
|
|
85
85
|
name="codice_hi_h_spin_angle",
|
|
86
86
|
dims=["codice_hi_h_spin_angle"],
|
|
87
87
|
attrs=cdf_manager.get_variable_attributes(
|
|
88
|
-
"
|
|
88
|
+
"codice_hi_h_spin_angle", check_schema=False
|
|
89
89
|
),
|
|
90
90
|
)
|
|
91
91
|
|
|
@@ -155,6 +155,8 @@ def create_xarray_from_records(records: list[dict]) -> xr.Dataset: # noqa: PLR0
|
|
|
155
155
|
"sc_velocity_GSM",
|
|
156
156
|
"sc_velocity_GSE",
|
|
157
157
|
"mag_hk_status",
|
|
158
|
+
"spice_kernels",
|
|
159
|
+
"instrument",
|
|
158
160
|
]:
|
|
159
161
|
continue
|
|
160
162
|
elif key in ["mag_B_GSE", "mag_B_GSM", "mag_B_RTN"]:
|
imap_processing/idex/idex_l2a.py
CHANGED
|
@@ -118,7 +118,7 @@ def idex_l2a(l1b_dataset: xr.Dataset, ancillary_files: dict) -> xr.Dataset:
|
|
|
118
118
|
atomic_masses_path = f"{imap_module_directory}/idex/atomic_masses.csv"
|
|
119
119
|
atomic_masses = pd.read_csv(atomic_masses_path)
|
|
120
120
|
masses = atomic_masses["Mass"]
|
|
121
|
-
|
|
121
|
+
_stretches, _shifts, mass_scales = time_to_mass(tof_high.data, hs_time.data, masses)
|
|
122
122
|
|
|
123
123
|
# TODO use correct fillval
|
|
124
124
|
mass_scales_da = xr.DataArray(
|
|
@@ -379,7 +379,7 @@ def log_smooth_powerlaw(log_v: float, log_a: float, params: np.ndarray) -> float
|
|
|
379
379
|
# segments.
|
|
380
380
|
# vb and vc are the characteristic speeds where the slope transition happens, and k
|
|
381
381
|
# setting the sharpness of the transitions.
|
|
382
|
-
a1, a2, a3, vb, vc,
|
|
382
|
+
a1, a2, a3, vb, vc, _k, m = params
|
|
383
383
|
v = 10**log_v
|
|
384
384
|
base = log_a + a1 * log_v
|
|
385
385
|
transition1 = (1 + (v / vb) ** m) ** ((a2 - a1) / m)
|
imap_processing/idex/idex_l2b.py
CHANGED
|
@@ -645,7 +645,7 @@ def get_science_acquisition_on_percentage(evt_dataset: xr.Dataset) -> dict:
|
|
|
645
645
|
of year.
|
|
646
646
|
"""
|
|
647
647
|
# Get science acquisition start and stop times
|
|
648
|
-
|
|
648
|
+
_evt_logs, evt_time, evt_values = get_science_acquisition_timestamps(evt_dataset)
|
|
649
649
|
if len(evt_time) == 0:
|
|
650
650
|
logger.warning(
|
|
651
651
|
"No science acquisition events found in event dataset. Returning empty "
|
imap_processing/lo/l1c/lo_l1c.py
CHANGED
|
@@ -11,9 +11,14 @@ from scipy.stats import binned_statistic_dd
|
|
|
11
11
|
from imap_processing.cdf.imap_cdf_manager import ImapCdfAttributes
|
|
12
12
|
from imap_processing.lo import lo_ancillary
|
|
13
13
|
from imap_processing.lo.l1b.lo_l1b import set_bad_or_goodtimes
|
|
14
|
+
from imap_processing.spice.geometry import SpiceFrame, frame_transform_az_el
|
|
14
15
|
from imap_processing.spice.repoint import get_pointing_times
|
|
15
16
|
from imap_processing.spice.spin import get_spin_number
|
|
16
|
-
from imap_processing.spice.time import
|
|
17
|
+
from imap_processing.spice.time import (
|
|
18
|
+
met_to_ttj2000ns,
|
|
19
|
+
ttj2000ns_to_et,
|
|
20
|
+
ttj2000ns_to_met,
|
|
21
|
+
)
|
|
17
22
|
|
|
18
23
|
N_ESA_ENERGY_STEPS = 7
|
|
19
24
|
N_SPIN_ANGLE_BINS = 3600
|
|
@@ -164,6 +169,10 @@ def lo_l1c(sci_dependencies: dict, anc_dependencies: list) -> list[xr.Dataset]:
|
|
|
164
169
|
attr_mgr,
|
|
165
170
|
)
|
|
166
171
|
|
|
172
|
+
pset["hae_longitude"], pset["hae_latitude"] = set_pointing_directions(
|
|
173
|
+
pset["epoch"].item()
|
|
174
|
+
)
|
|
175
|
+
|
|
167
176
|
pset.attrs = attr_mgr.get_global_attributes(logical_source)
|
|
168
177
|
|
|
169
178
|
pset = pset.assign_coords(
|
|
@@ -293,9 +302,9 @@ def create_pset_counts(
|
|
|
293
302
|
# Create the histogram with 3600 longitude bins, 40 latitude bins, and 7 energy bins
|
|
294
303
|
lon_edges = np.arange(3601)
|
|
295
304
|
lat_edges = np.arange(41)
|
|
296
|
-
energy_edges = np.arange(
|
|
305
|
+
energy_edges = np.arange(1, 9)
|
|
297
306
|
|
|
298
|
-
hist,
|
|
307
|
+
hist, _edges = np.histogramdd(
|
|
299
308
|
data,
|
|
300
309
|
bins=[energy_edges, lon_edges, lat_edges],
|
|
301
310
|
)
|
|
@@ -572,7 +581,7 @@ def set_background_rates(
|
|
|
572
581
|
if row["type"] == "rate":
|
|
573
582
|
bg_rates[esa_step, bin_start:bin_end, :] = value
|
|
574
583
|
elif row["type"] == "sigma":
|
|
575
|
-
|
|
584
|
+
bg_sys_err[esa_step, bin_start:bin_end, :] = value
|
|
576
585
|
else:
|
|
577
586
|
raise ValueError("Unknown background type in ancillary file.")
|
|
578
587
|
# set the background rates, uncertainties, and systematic errors
|
|
@@ -597,3 +606,52 @@ def set_background_rates(
|
|
|
597
606
|
)
|
|
598
607
|
|
|
599
608
|
return bg_rates_data, bg_stat_uncert_data, bg_sys_err_data
|
|
609
|
+
|
|
610
|
+
|
|
611
|
+
def set_pointing_directions(epoch: float) -> tuple[xr.DataArray, xr.DataArray]:
|
|
612
|
+
"""
|
|
613
|
+
Set the pointing directions for the given epoch.
|
|
614
|
+
|
|
615
|
+
The pointing directions are calculated by transforming Spin and off angles
|
|
616
|
+
to HAE longitude and latitude using SPICE. This returns the HAE longitude and
|
|
617
|
+
latitude as (3600, 40) arrays for each the latitude and longitude.
|
|
618
|
+
|
|
619
|
+
Parameters
|
|
620
|
+
----------
|
|
621
|
+
epoch : float
|
|
622
|
+
The epoch time in TTJ2000ns.
|
|
623
|
+
|
|
624
|
+
Returns
|
|
625
|
+
-------
|
|
626
|
+
hae_longitude : xr.DataArray
|
|
627
|
+
The HAE longitude for each spin and off angle bin.
|
|
628
|
+
hae_latitude : xr.DataArray
|
|
629
|
+
The HAE latitude for each spin and off angle bin.
|
|
630
|
+
"""
|
|
631
|
+
et = ttj2000ns_to_et(epoch)
|
|
632
|
+
# create a meshgrid of spin and off angles using the bin centers
|
|
633
|
+
spin, off = np.meshgrid(
|
|
634
|
+
SPIN_ANGLE_BIN_CENTERS, OFF_ANGLE_BIN_CENTERS, indexing="ij"
|
|
635
|
+
)
|
|
636
|
+
dps_az_el = np.stack([spin, off], axis=-1)
|
|
637
|
+
|
|
638
|
+
# Transform from DPS Az/El to HAE lon/lat
|
|
639
|
+
hae_az_el = frame_transform_az_el(
|
|
640
|
+
et, dps_az_el, SpiceFrame.IMAP_DPS, SpiceFrame.IMAP_HAE, degrees=True
|
|
641
|
+
)
|
|
642
|
+
|
|
643
|
+
return xr.DataArray(
|
|
644
|
+
data=hae_az_el[:, :, 0].astype(np.float64),
|
|
645
|
+
dims=["spin_angle", "off_angle"],
|
|
646
|
+
# TODO: Add hae_longitude to yaml
|
|
647
|
+
# attrs=attr_mgr.get_variable_attributes(
|
|
648
|
+
# "hae_longitude"
|
|
649
|
+
# )
|
|
650
|
+
), xr.DataArray(
|
|
651
|
+
data=hae_az_el[:, :, 1].astype(np.float64),
|
|
652
|
+
dims=["spin_angle", "off_angle"],
|
|
653
|
+
# TODO: Add hae_longitude to yaml
|
|
654
|
+
# attrs=attr_mgr.get_variable_attributes(
|
|
655
|
+
# "hae_latitude"
|
|
656
|
+
# )
|
|
657
|
+
)
|