imap-processing 1.0.1__py3-none-any.whl → 1.0.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- imap_processing/_version.py +2 -2
- imap_processing/cdf/config/imap_codice_l1a_variable_attrs.yaml +97 -254
- imap_processing/cdf/config/imap_enamaps_l2-common_variable_attrs.yaml +1 -1
- imap_processing/cdf/config/imap_swapi_variable_attrs.yaml +2 -13
- imap_processing/cdf/utils.py +2 -2
- imap_processing/cli.py +4 -16
- imap_processing/codice/codice_l1a_lo_angular.py +362 -0
- imap_processing/codice/codice_l1a_lo_species.py +282 -0
- imap_processing/codice/codice_l1b.py +62 -97
- imap_processing/codice/codice_l2.py +210 -96
- imap_processing/codice/codice_new_l1a.py +64 -0
- imap_processing/codice/constants.py +37 -2
- imap_processing/codice/utils.py +270 -0
- imap_processing/ena_maps/ena_maps.py +50 -39
- imap_processing/ena_maps/utils/corrections.py +196 -14
- imap_processing/ena_maps/utils/naming.py +3 -1
- imap_processing/hi/hi_l1c.py +34 -12
- imap_processing/hi/hi_l2.py +79 -36
- imap_processing/ialirt/generate_coverage.py +3 -1
- imap_processing/ialirt/l0/parse_mag.py +1 -0
- imap_processing/ialirt/l0/process_hit.py +1 -0
- imap_processing/ialirt/l0/process_swapi.py +1 -0
- imap_processing/ialirt/l0/process_swe.py +2 -0
- imap_processing/ialirt/process_ephemeris.py +6 -2
- imap_processing/ialirt/utils/create_xarray.py +3 -2
- imap_processing/lo/l1c/lo_l1c.py +1 -1
- imap_processing/lo/l2/lo_l2.py +6 -4
- imap_processing/quality_flags.py +1 -0
- imap_processing/swapi/constants.py +4 -0
- imap_processing/swapi/l1/swapi_l1.py +47 -20
- imap_processing/swapi/l2/swapi_l2.py +17 -3
- imap_processing/ultra/l1a/ultra_l1a.py +121 -72
- imap_processing/ultra/l1b/de.py +57 -1
- imap_processing/ultra/l1b/ultra_l1b_annotated.py +0 -1
- imap_processing/ultra/l1b/ultra_l1b_extended.py +24 -11
- imap_processing/ultra/l1c/helio_pset.py +28 -5
- imap_processing/ultra/l1c/l1c_lookup_utils.py +4 -2
- imap_processing/ultra/l1c/spacecraft_pset.py +9 -5
- imap_processing/ultra/l1c/ultra_l1c.py +6 -6
- imap_processing/ultra/l1c/ultra_l1c_pset_bins.py +82 -20
- imap_processing/ultra/l2/ultra_l2.py +2 -2
- {imap_processing-1.0.1.dist-info → imap_processing-1.0.2.dist-info}/METADATA +1 -1
- {imap_processing-1.0.1.dist-info → imap_processing-1.0.2.dist-info}/RECORD +46 -42
- {imap_processing-1.0.1.dist-info → imap_processing-1.0.2.dist-info}/LICENSE +0 -0
- {imap_processing-1.0.1.dist-info → imap_processing-1.0.2.dist-info}/WHEEL +0 -0
- {imap_processing-1.0.1.dist-info → imap_processing-1.0.2.dist-info}/entry_points.txt +0 -0
|
@@ -72,6 +72,7 @@ def calculate_azimuth_and_elevation(
|
|
|
72
72
|
altitude: float,
|
|
73
73
|
observation_time: float | np.ndarray,
|
|
74
74
|
target: str = SpiceBody.IMAP.name,
|
|
75
|
+
obsref: str = "ITRF93",
|
|
75
76
|
) -> tuple:
|
|
76
77
|
"""
|
|
77
78
|
Calculate azimuth and elevation.
|
|
@@ -91,6 +92,9 @@ def calculate_azimuth_and_elevation(
|
|
|
91
92
|
is to be computed. Expressed as ephemeris time, seconds past J2000 TDB.
|
|
92
93
|
target : str (Optional)
|
|
93
94
|
The target body. Default is "IMAP".
|
|
95
|
+
obsref : str (Optional)
|
|
96
|
+
Body-fixed, body-centered reference frame wrt
|
|
97
|
+
observer's center.
|
|
94
98
|
|
|
95
99
|
Returns
|
|
96
100
|
-------
|
|
@@ -120,7 +124,7 @@ def calculate_azimuth_and_elevation(
|
|
|
120
124
|
elplsz=True, # Elevation increases from the XY plane toward +Z
|
|
121
125
|
obspos=ground_station_position_ecef, # observer pos. to center of motion
|
|
122
126
|
obsctr="EARTH", # Name of the center of motion
|
|
123
|
-
obsref=
|
|
127
|
+
obsref=obsref, # Body-fixed, body-centered reference frame wrt
|
|
124
128
|
# observer's center
|
|
125
129
|
)
|
|
126
130
|
azimuth.append(np.rad2deg(azel_results[0][1]))
|
|
@@ -223,7 +227,7 @@ def build_output(
|
|
|
223
227
|
|
|
224
228
|
# For now, assume that kernel management will be handled by ensure_spice
|
|
225
229
|
azimuth, elevation = calculate_azimuth_and_elevation(
|
|
226
|
-
longitude, latitude, altitude, time_range
|
|
230
|
+
longitude, latitude, altitude, time_range, obsref="ITRF93"
|
|
227
231
|
)
|
|
228
232
|
|
|
229
233
|
output_dict["time"] = et_to_utc(time_range, format_str="ISOC")
|
|
@@ -52,7 +52,7 @@ def create_xarray_from_records(records: list[dict]) -> xr.Dataset: # noqa: PLR0
|
|
|
52
52
|
["radial", "tangential", "normal"],
|
|
53
53
|
name="RTN_component",
|
|
54
54
|
dims=["RTN_component"],
|
|
55
|
-
attrs=cdf_manager.get_variable_attributes("
|
|
55
|
+
attrs=cdf_manager.get_variable_attributes("RTN_component", check_schema=False),
|
|
56
56
|
)
|
|
57
57
|
|
|
58
58
|
esa_step = xr.DataArray(
|
|
@@ -85,7 +85,7 @@ def create_xarray_from_records(records: list[dict]) -> xr.Dataset: # noqa: PLR0
|
|
|
85
85
|
name="codice_hi_h_spin_angle",
|
|
86
86
|
dims=["codice_hi_h_spin_angle"],
|
|
87
87
|
attrs=cdf_manager.get_variable_attributes(
|
|
88
|
-
"
|
|
88
|
+
"codice_hi_h_spin_angle", check_schema=False
|
|
89
89
|
),
|
|
90
90
|
)
|
|
91
91
|
|
|
@@ -156,6 +156,7 @@ def create_xarray_from_records(records: list[dict]) -> xr.Dataset: # noqa: PLR0
|
|
|
156
156
|
"sc_velocity_GSE",
|
|
157
157
|
"mag_hk_status",
|
|
158
158
|
"spice_kernels",
|
|
159
|
+
"instrument",
|
|
159
160
|
]:
|
|
160
161
|
continue
|
|
161
162
|
elif key in ["mag_B_GSE", "mag_B_GSM", "mag_B_RTN"]:
|
imap_processing/lo/l1c/lo_l1c.py
CHANGED
|
@@ -302,7 +302,7 @@ def create_pset_counts(
|
|
|
302
302
|
# Create the histogram with 3600 longitude bins, 40 latitude bins, and 7 energy bins
|
|
303
303
|
lon_edges = np.arange(3601)
|
|
304
304
|
lat_edges = np.arange(41)
|
|
305
|
-
energy_edges = np.arange(
|
|
305
|
+
energy_edges = np.arange(1, 9)
|
|
306
306
|
|
|
307
307
|
hist, _edges = np.histogramdd(
|
|
308
308
|
data,
|
imap_processing/lo/l2/lo_l2.py
CHANGED
|
@@ -938,8 +938,10 @@ def calculate_bootstrap_corrections(dataset: xr.Dataset) -> xr.Dataset:
|
|
|
938
938
|
bootstrap_factor_array,
|
|
939
939
|
dims=["energy_i", "energy_k"],
|
|
940
940
|
coords={
|
|
941
|
-
"energy_i":
|
|
942
|
-
|
|
941
|
+
"energy_i": dataset["energy"].values,
|
|
942
|
+
# Add an extra coordinate for the virtual E8 channel, unused
|
|
943
|
+
# in the broadcasting calculations
|
|
944
|
+
"energy_k": np.concatenate([dataset["energy"].values, [np.nan]]),
|
|
943
945
|
},
|
|
944
946
|
)
|
|
945
947
|
|
|
@@ -1001,7 +1003,7 @@ def calculate_bootstrap_corrections(dataset: xr.Dataset) -> xr.Dataset:
|
|
|
1001
1003
|
# NOTE: The paper uses 1-based indexing and we use 0-based indexing
|
|
1002
1004
|
# so there is an off-by-one difference in the indices.
|
|
1003
1005
|
bootstrap_intensity_i[:] = (
|
|
1004
|
-
j_c_prime_i - bootstrap_factor.
|
|
1006
|
+
j_c_prime_i - bootstrap_factor.isel(energy_i=i, energy_k=7) * j_8_b[0, ...]
|
|
1005
1007
|
)
|
|
1006
1008
|
# NOTE: We will square root at the end to get the uncertainty, but
|
|
1007
1009
|
# all equations are with variances
|
|
@@ -1013,7 +1015,7 @@ def calculate_bootstrap_corrections(dataset: xr.Dataset) -> xr.Dataset:
|
|
|
1013
1015
|
|
|
1014
1016
|
# Get bootstrap factors for this i and the relevant k values
|
|
1015
1017
|
# Rename energy_k dimension to energy for alignment with intensity
|
|
1016
|
-
bootstrap_factors_k = bootstrap_factor.
|
|
1018
|
+
bootstrap_factors_k = bootstrap_factor.isel(
|
|
1017
1019
|
energy_i=i, energy_k=k_indices
|
|
1018
1020
|
).rename({"energy_k": "energy"})
|
|
1019
1021
|
|
imap_processing/quality_flags.py
CHANGED
|
@@ -64,6 +64,7 @@ class ImapAttitudeUltraFlags(FlagNameMixin):
|
|
|
64
64
|
AUXMISMATCH = 2**1 # bit 1 # aux packet does not match Universal Spin Table
|
|
65
65
|
SPINPHASE = 2**2 # bit 2 # spin phase flagged by Universal Spin Table
|
|
66
66
|
SPINPERIOD = 2**3 # bit 3 # spin period flagged by Universal Spin Table
|
|
67
|
+
DURINGREPOINT = 2**4 # bit 4 # spin during a repointing
|
|
67
68
|
|
|
68
69
|
|
|
69
70
|
class ImapRatesUltraFlags(FlagNameMixin):
|
|
@@ -12,6 +12,8 @@ from imap_processing import imap_module_directory
|
|
|
12
12
|
from imap_processing.cdf.imap_cdf_manager import ImapCdfAttributes
|
|
13
13
|
from imap_processing.cdf.utils import load_cdf
|
|
14
14
|
from imap_processing.quality_flags import SWAPIFlags
|
|
15
|
+
from imap_processing.spice.time import met_to_utc, ttj2000ns_to_met
|
|
16
|
+
from imap_processing.swapi.constants import NUM_ENERGY_STEPS, NUM_PACKETS_PER_SWEEP
|
|
15
17
|
from imap_processing.swapi.swapi_utils import SWAPIAPID, SWAPIMODE
|
|
16
18
|
from imap_processing.utils import packet_file_to_datasets
|
|
17
19
|
|
|
@@ -41,10 +43,10 @@ def filter_good_data(full_sweep_sci: xr.Dataset) -> npt.NDArray:
|
|
|
41
43
|
"""
|
|
42
44
|
# PLAN_ID for current sweep should all be one value and
|
|
43
45
|
# SWEEP_TABLE should all be one value.
|
|
44
|
-
plan_id = full_sweep_sci["plan_id"].data.reshape(-1,
|
|
45
|
-
sweep_table = full_sweep_sci["sweep_table"].data.reshape(-1,
|
|
46
|
+
plan_id = full_sweep_sci["plan_id"].data.reshape(-1, NUM_PACKETS_PER_SWEEP)
|
|
47
|
+
sweep_table = full_sweep_sci["sweep_table"].data.reshape(-1, NUM_PACKETS_PER_SWEEP)
|
|
46
48
|
|
|
47
|
-
mode = full_sweep_sci["mode"].data.reshape(-1,
|
|
49
|
+
mode = full_sweep_sci["mode"].data.reshape(-1, NUM_PACKETS_PER_SWEEP)
|
|
48
50
|
|
|
49
51
|
sweep_indices = (sweep_table == sweep_table[:, 0, None]).all(axis=1)
|
|
50
52
|
plan_id_indices = (plan_id == plan_id[:, 0, None]).all(axis=1)
|
|
@@ -62,10 +64,10 @@ def filter_good_data(full_sweep_sci: xr.Dataset) -> npt.NDArray:
|
|
|
62
64
|
# From this: [0 24]
|
|
63
65
|
# To this: [[ 0 1 2 3 4 5 6 7 8 9 10 11]
|
|
64
66
|
# [24 25 26 27 28 29 30 31 32 33 34 35]]
|
|
65
|
-
cycle_start_indices = np.where(bad_data_indices == 0)[0] *
|
|
66
|
-
bad_cycle_indices = cycle_start_indices[..., None] + np.arange(
|
|
67
|
-
|
|
68
|
-
].reshape(-1)
|
|
67
|
+
cycle_start_indices = np.where(bad_data_indices == 0)[0] * NUM_PACKETS_PER_SWEEP
|
|
68
|
+
bad_cycle_indices = cycle_start_indices[..., None] + np.arange(
|
|
69
|
+
NUM_PACKETS_PER_SWEEP
|
|
70
|
+
)[None, ...].reshape(-1)
|
|
69
71
|
|
|
70
72
|
logger.debug("Cycle data was bad due to one of below reasons:")
|
|
71
73
|
logger.debug(
|
|
@@ -162,7 +164,7 @@ def find_sweep_starts(packets: xr.Dataset) -> npt.NDArray:
|
|
|
162
164
|
indices_start : numpy.ndarray
|
|
163
165
|
Array of indices of start cycle.
|
|
164
166
|
"""
|
|
165
|
-
if packets["shcoarse"].size <
|
|
167
|
+
if packets["shcoarse"].size < NUM_PACKETS_PER_SWEEP:
|
|
166
168
|
return np.array([], np.int64)
|
|
167
169
|
|
|
168
170
|
# calculate time difference between consecutive sweep
|
|
@@ -387,7 +389,7 @@ def process_sweep_data(full_sweep_sci: xr.Dataset, cem_prefix: str) -> xr.Datase
|
|
|
387
389
|
# [ 2 3 4 5 6 7 8 9 10 11 12 13]]]
|
|
388
390
|
# In other word, we grouped each cem's
|
|
389
391
|
# data by full sweep.
|
|
390
|
-
current_cem_counts = current_cem_counts.reshape(6, -1,
|
|
392
|
+
current_cem_counts = current_cem_counts.reshape(6, -1, NUM_PACKETS_PER_SWEEP)
|
|
391
393
|
|
|
392
394
|
# Then, we go from above to
|
|
393
395
|
# to this final output:
|
|
@@ -421,7 +423,7 @@ def process_sweep_data(full_sweep_sci: xr.Dataset, cem_prefix: str) -> xr.Datase
|
|
|
421
423
|
all_cem_data = np.stack(current_cem_counts, axis=-1)
|
|
422
424
|
# This line just flatten the inner most array to
|
|
423
425
|
# (total_full_sweeps x 72)
|
|
424
|
-
all_cem_data = all_cem_data.reshape(-1,
|
|
426
|
+
all_cem_data = all_cem_data.reshape(-1, NUM_ENERGY_STEPS)
|
|
425
427
|
return all_cem_data
|
|
426
428
|
|
|
427
429
|
|
|
@@ -490,7 +492,9 @@ def process_swapi_science(
|
|
|
490
492
|
# ===================================================================
|
|
491
493
|
# Quality flags
|
|
492
494
|
# ===================================================================
|
|
493
|
-
quality_flags_data = np.zeros(
|
|
495
|
+
quality_flags_data = np.zeros(
|
|
496
|
+
(total_full_sweeps, NUM_ENERGY_STEPS), dtype=np.uint16
|
|
497
|
+
)
|
|
494
498
|
|
|
495
499
|
# Add science data quality flags
|
|
496
500
|
# Have to match datatype to bitwise OR
|
|
@@ -547,7 +551,7 @@ def process_swapi_science(
|
|
|
547
551
|
|
|
548
552
|
for flag_name in hk_flags_name:
|
|
549
553
|
current_flag = np.repeat(good_sweep_hk_data[flag_name.lower()].data, 6).reshape(
|
|
550
|
-
-1,
|
|
554
|
+
-1, NUM_ENERGY_STEPS
|
|
551
555
|
)
|
|
552
556
|
# Use getattr to dynamically access the flag in SWAPIFlags class
|
|
553
557
|
flag_to_set = getattr(SWAPIFlags, flag_name)
|
|
@@ -568,7 +572,9 @@ def process_swapi_science(
|
|
|
568
572
|
# Use center time for epoch to line up with mission requests. Center time
|
|
569
573
|
# of SWAPI is time of 7th packet(aka SEQ_NUMBER == 6) creation time at the
|
|
570
574
|
# beginning of 7th packet.
|
|
571
|
-
epoch_values = good_sweep_sci["epoch"].data.reshape(
|
|
575
|
+
epoch_values = good_sweep_sci["epoch"].data.reshape(
|
|
576
|
+
total_full_sweeps, NUM_PACKETS_PER_SWEEP
|
|
577
|
+
)[:, 6]
|
|
572
578
|
|
|
573
579
|
epoch_time = xr.DataArray(
|
|
574
580
|
epoch_values,
|
|
@@ -626,20 +632,33 @@ def process_swapi_science(
|
|
|
626
632
|
|
|
627
633
|
# Add other support data
|
|
628
634
|
dataset["sweep_table"] = xr.DataArray(
|
|
629
|
-
good_sweep_sci["sweep_table"].data.reshape(
|
|
635
|
+
good_sweep_sci["sweep_table"].data.reshape(
|
|
636
|
+
total_full_sweeps, NUM_PACKETS_PER_SWEEP
|
|
637
|
+
)[:, 0],
|
|
630
638
|
name="sweep_table",
|
|
631
639
|
dims=["epoch"],
|
|
632
640
|
attrs=cdf_manager.get_variable_attributes("sweep_table"),
|
|
633
641
|
)
|
|
634
642
|
dataset["plan_id"] = xr.DataArray(
|
|
635
|
-
good_sweep_sci["plan_id"].data.reshape(
|
|
643
|
+
good_sweep_sci["plan_id"].data.reshape(
|
|
644
|
+
total_full_sweeps, NUM_PACKETS_PER_SWEEP
|
|
645
|
+
)[:, 0],
|
|
636
646
|
name="plan_id",
|
|
637
647
|
dims=["epoch"],
|
|
638
648
|
attrs=cdf_manager.get_variable_attributes("plan_id"),
|
|
639
649
|
)
|
|
640
650
|
# Store start time for L3 purposes per SWAPI requests
|
|
651
|
+
# Per SWAPI request, convert start time of sweep to UTC time.
|
|
652
|
+
sci_start_time = met_to_utc(
|
|
653
|
+
ttj2000ns_to_met(
|
|
654
|
+
good_sweep_sci["epoch"].data.reshape(
|
|
655
|
+
total_full_sweeps, NUM_PACKETS_PER_SWEEP
|
|
656
|
+
)[:, 0]
|
|
657
|
+
),
|
|
658
|
+
precision=0,
|
|
659
|
+
)
|
|
641
660
|
dataset["sci_start_time"] = xr.DataArray(
|
|
642
|
-
|
|
661
|
+
sci_start_time,
|
|
643
662
|
name="sci_start_time",
|
|
644
663
|
dims=["epoch"],
|
|
645
664
|
attrs=cdf_manager.get_variable_attributes("sci_start_time"),
|
|
@@ -650,7 +669,9 @@ def process_swapi_science(
|
|
|
650
669
|
# updated every 6th step. This is used in L2 to calculate last 9 fine
|
|
651
670
|
# energy steps.
|
|
652
671
|
dataset["esa_lvl5"] = xr.DataArray(
|
|
653
|
-
good_sweep_sci["esa_lvl5"].data.reshape(
|
|
672
|
+
good_sweep_sci["esa_lvl5"].data.reshape(
|
|
673
|
+
total_full_sweeps, NUM_PACKETS_PER_SWEEP
|
|
674
|
+
)[:, 11],
|
|
654
675
|
name="esa_lvl5",
|
|
655
676
|
dims=["epoch"],
|
|
656
677
|
attrs=cdf_manager.get_variable_attributes("esa_lvl5"),
|
|
@@ -661,19 +682,25 @@ def process_swapi_science(
|
|
|
661
682
|
# SWP_HK.FPGA_TYPE - Type number of the FPGA
|
|
662
683
|
# SWP_HK.FPGA_REV - Revision number of the FPGA
|
|
663
684
|
dataset["lut_choice"] = xr.DataArray(
|
|
664
|
-
good_sweep_hk_data["lut_choice"].data.reshape(
|
|
685
|
+
good_sweep_hk_data["lut_choice"].data.reshape(
|
|
686
|
+
total_full_sweeps, NUM_PACKETS_PER_SWEEP
|
|
687
|
+
)[:, 0],
|
|
665
688
|
name="lut_choice",
|
|
666
689
|
dims=["epoch"],
|
|
667
690
|
attrs=cdf_manager.get_variable_attributes("lut_choice"),
|
|
668
691
|
)
|
|
669
692
|
dataset["fpga_type"] = xr.DataArray(
|
|
670
|
-
good_sweep_hk_data["fpga_type"].data.reshape(
|
|
693
|
+
good_sweep_hk_data["fpga_type"].data.reshape(
|
|
694
|
+
total_full_sweeps, NUM_PACKETS_PER_SWEEP
|
|
695
|
+
)[:, 0],
|
|
671
696
|
name="fpga_type",
|
|
672
697
|
dims=["epoch"],
|
|
673
698
|
attrs=cdf_manager.get_variable_attributes("fpga_type"),
|
|
674
699
|
)
|
|
675
700
|
dataset["fpga_rev"] = xr.DataArray(
|
|
676
|
-
good_sweep_hk_data["fpga_rev"].data.reshape(
|
|
701
|
+
good_sweep_hk_data["fpga_rev"].data.reshape(
|
|
702
|
+
total_full_sweeps, NUM_PACKETS_PER_SWEEP
|
|
703
|
+
)[:, 0],
|
|
677
704
|
name="fpga_rev",
|
|
678
705
|
dims=["epoch"],
|
|
679
706
|
attrs=cdf_manager.get_variable_attributes("fpga_rev"),
|
|
@@ -8,6 +8,7 @@ import pandas as pd
|
|
|
8
8
|
import xarray as xr
|
|
9
9
|
|
|
10
10
|
from imap_processing.cdf.imap_cdf_manager import ImapCdfAttributes
|
|
11
|
+
from imap_processing.swapi.constants import NUM_ENERGY_STEPS
|
|
11
12
|
|
|
12
13
|
logger = logging.getLogger(__name__)
|
|
13
14
|
|
|
@@ -72,15 +73,28 @@ def solve_full_sweep_energy(
|
|
|
72
73
|
(esa_table_df["timestamp"] <= time) & (esa_table_df["Sweep #"] == sweep_id)
|
|
73
74
|
]
|
|
74
75
|
if subset.empty:
|
|
75
|
-
|
|
76
|
-
|
|
76
|
+
# Get the earliest timestamp available
|
|
77
|
+
earliest_time = esa_table_df["timestamp"].min()
|
|
78
|
+
|
|
79
|
+
# Find the sweep's ESA data for the earliest time and sweep_id
|
|
80
|
+
earliest_subset = esa_table_df[
|
|
81
|
+
(esa_table_df["timestamp"] == earliest_time)
|
|
82
|
+
& (esa_table_df["Sweep #"] == sweep_id)
|
|
83
|
+
]
|
|
84
|
+
if earliest_subset.empty:
|
|
85
|
+
raise ValueError(
|
|
86
|
+
f"No matching ESA table entry found for sweep ID {sweep_id} "
|
|
87
|
+
f"at time {time}, and no entries found for earliest time "
|
|
88
|
+
f"{earliest_time}."
|
|
89
|
+
)
|
|
90
|
+
subset = earliest_subset
|
|
77
91
|
|
|
78
92
|
# Subset data can contain multiple 72 energy values with last 9 fine energies
|
|
79
93
|
# with 'Solve' value. We need to sort by time and ESA step to maintain correct
|
|
80
94
|
# order. Then take the last group of 72 steps values and select first 63
|
|
81
95
|
# values only.
|
|
82
96
|
subset = subset.sort_values(["timestamp", "ESA Step #"])
|
|
83
|
-
grouped = subset["Energy"].values.reshape(-1,
|
|
97
|
+
grouped = subset["Energy"].values.reshape(-1, NUM_ENERGY_STEPS)
|
|
84
98
|
first_63 = grouped[-1, :63]
|
|
85
99
|
first_63_energies.append(first_63)
|
|
86
100
|
|
|
@@ -43,7 +43,7 @@ logger = logging.getLogger(__name__)
|
|
|
43
43
|
|
|
44
44
|
|
|
45
45
|
def ultra_l1a( # noqa: PLR0912
|
|
46
|
-
packet_file: str, apid_input: int | None = None
|
|
46
|
+
packet_file: str, apid_input: int | None = None, create_derived_l1b: bool = False
|
|
47
47
|
) -> list[xr.Dataset]:
|
|
48
48
|
"""
|
|
49
49
|
Will process ULTRA L0 data into L1A CDF files at output_filepath.
|
|
@@ -54,6 +54,8 @@ def ultra_l1a( # noqa: PLR0912
|
|
|
54
54
|
Path to the CCSDS data packet file.
|
|
55
55
|
apid_input : Optional[int]
|
|
56
56
|
Optional apid.
|
|
57
|
+
create_derived_l1b : bool
|
|
58
|
+
Whether to create the l1b datasets with derived values.
|
|
57
59
|
|
|
58
60
|
Returns
|
|
59
61
|
-------
|
|
@@ -64,7 +66,17 @@ def ultra_l1a( # noqa: PLR0912
|
|
|
64
66
|
f"{imap_module_directory}/ultra/packet_definitions/ULTRA_SCI_COMBINED.xml"
|
|
65
67
|
)
|
|
66
68
|
|
|
69
|
+
# Keep a list to track the two versions, l1a and l1b with the derived values.
|
|
70
|
+
decommutated_packet_datasets = []
|
|
67
71
|
datasets_by_apid = packet_file_to_datasets(packet_file, xtce)
|
|
72
|
+
decommutated_packet_datasets.append(datasets_by_apid)
|
|
73
|
+
if create_derived_l1b:
|
|
74
|
+
# For the housekeeping products, we can create the l1b at the same time
|
|
75
|
+
# as the l1a since there is no additional processing needed.
|
|
76
|
+
datasets_by_apid = packet_file_to_datasets(
|
|
77
|
+
packet_file, xtce, use_derived_value=True
|
|
78
|
+
)
|
|
79
|
+
decommutated_packet_datasets.append(datasets_by_apid)
|
|
68
80
|
|
|
69
81
|
output_datasets = []
|
|
70
82
|
|
|
@@ -109,77 +121,114 @@ def ultra_l1a( # noqa: PLR0912
|
|
|
109
121
|
attr_mgr.add_instrument_global_attrs("ultra")
|
|
110
122
|
attr_mgr.add_instrument_variable_attrs("ultra", "l1a")
|
|
111
123
|
|
|
112
|
-
for
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
|
|
118
|
-
|
|
119
|
-
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
|
|
123
|
-
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
ULTRA_ENERGY_RATES.
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
|
|
124
|
+
for i, datasets_by_apid in enumerate(decommutated_packet_datasets):
|
|
125
|
+
for apid in apids:
|
|
126
|
+
if apid in ULTRA_AUX.apid:
|
|
127
|
+
decom_ultra_dataset = datasets_by_apid[apid]
|
|
128
|
+
gattr_key = ULTRA_AUX.logical_source[ULTRA_AUX.apid.index(apid)]
|
|
129
|
+
elif apid in all_l1a_image_apids:
|
|
130
|
+
packet_props = all_l1a_image_apids[apid]
|
|
131
|
+
decom_ultra_dataset = process_ultra_tof(
|
|
132
|
+
datasets_by_apid[apid], packet_props
|
|
133
|
+
)
|
|
134
|
+
gattr_key = packet_props.logical_source[packet_props.apid.index(apid)]
|
|
135
|
+
elif apid in ULTRA_RATES.apid:
|
|
136
|
+
decom_ultra_dataset = process_ultra_rates(datasets_by_apid[apid])
|
|
137
|
+
decom_ultra_dataset = decom_ultra_dataset.drop_vars("fastdata_00")
|
|
138
|
+
gattr_key = ULTRA_RATES.logical_source[ULTRA_RATES.apid.index(apid)]
|
|
139
|
+
elif apid in ULTRA_ENERGY_RATES.apid:
|
|
140
|
+
decom_ultra_dataset = process_ultra_energy_rates(datasets_by_apid[apid])
|
|
141
|
+
decom_ultra_dataset = decom_ultra_dataset.drop_vars("ratedata")
|
|
142
|
+
gattr_key = ULTRA_ENERGY_RATES.logical_source[
|
|
143
|
+
ULTRA_ENERGY_RATES.apid.index(apid)
|
|
144
|
+
]
|
|
145
|
+
elif apid in all_event_apids:
|
|
146
|
+
# We don't want to process the event l1b datasets since those l1b
|
|
147
|
+
# products need more information
|
|
148
|
+
if i == 1:
|
|
149
|
+
continue
|
|
150
|
+
decom_ultra_dataset = process_ultra_events(datasets_by_apid[apid], apid)
|
|
151
|
+
gattr_key = all_event_apids[apid]
|
|
152
|
+
# Add coordinate attributes
|
|
153
|
+
attrs = attr_mgr.get_variable_attributes("event_id")
|
|
154
|
+
decom_ultra_dataset.coords["event_id"].attrs.update(attrs)
|
|
155
|
+
elif apid in ULTRA_ENERGY_SPECTRA.apid:
|
|
156
|
+
decom_ultra_dataset = process_ultra_energy_spectra(
|
|
157
|
+
datasets_by_apid[apid]
|
|
158
|
+
)
|
|
159
|
+
decom_ultra_dataset = decom_ultra_dataset.drop_vars("compdata")
|
|
160
|
+
gattr_key = ULTRA_ENERGY_SPECTRA.logical_source[
|
|
161
|
+
ULTRA_ENERGY_SPECTRA.apid.index(apid)
|
|
162
|
+
]
|
|
163
|
+
elif apid in ULTRA_MACROS_CHECKSUM.apid:
|
|
164
|
+
decom_ultra_dataset = process_ultra_macros_checksum(
|
|
165
|
+
datasets_by_apid[apid]
|
|
166
|
+
)
|
|
167
|
+
gattr_key = ULTRA_MACROS_CHECKSUM.logical_source[
|
|
168
|
+
ULTRA_MACROS_CHECKSUM.apid.index(apid)
|
|
169
|
+
]
|
|
170
|
+
elif apid in ULTRA_HK.apid:
|
|
171
|
+
decom_ultra_dataset = datasets_by_apid[apid]
|
|
172
|
+
gattr_key = ULTRA_HK.logical_source[ULTRA_HK.apid.index(apid)]
|
|
173
|
+
elif apid in ULTRA_CMD_TEXT.apid:
|
|
174
|
+
decom_ultra_dataset = datasets_by_apid[apid]
|
|
175
|
+
decoded_strings = [
|
|
176
|
+
s.decode("ascii").rstrip("\x00")
|
|
177
|
+
for s in decom_ultra_dataset["text"].values
|
|
178
|
+
]
|
|
179
|
+
decom_ultra_dataset = decom_ultra_dataset.drop_vars("text")
|
|
180
|
+
decom_ultra_dataset["text"] = xr.DataArray(
|
|
181
|
+
decoded_strings,
|
|
182
|
+
dims=["epoch"],
|
|
183
|
+
coords={"epoch": decom_ultra_dataset["epoch"]},
|
|
184
|
+
)
|
|
185
|
+
gattr_key = ULTRA_CMD_TEXT.logical_source[
|
|
186
|
+
ULTRA_CMD_TEXT.apid.index(apid)
|
|
187
|
+
]
|
|
188
|
+
elif apid in ULTRA_CMD_ECHO.apid:
|
|
189
|
+
decom_ultra_dataset = process_ultra_cmd_echo(datasets_by_apid[apid])
|
|
190
|
+
gattr_key = ULTRA_CMD_ECHO.logical_source[
|
|
191
|
+
ULTRA_CMD_ECHO.apid.index(apid)
|
|
192
|
+
]
|
|
193
|
+
else:
|
|
194
|
+
logger.error(f"APID {apid} not recognized.")
|
|
195
|
+
continue
|
|
196
|
+
|
|
197
|
+
decom_ultra_dataset.attrs.update(attr_mgr.get_global_attributes(gattr_key))
|
|
198
|
+
|
|
199
|
+
if i == 1:
|
|
200
|
+
# Derived values dataset at l1b
|
|
201
|
+
# We already have the l1a attributes, just update the l1a -> l1b
|
|
202
|
+
# in the metadata.
|
|
203
|
+
decom_ultra_dataset.attrs["Data_type"] = decom_ultra_dataset.attrs[
|
|
204
|
+
"Data_type"
|
|
205
|
+
].replace("1A", "1B")
|
|
206
|
+
decom_ultra_dataset.attrs["Logical_source"] = decom_ultra_dataset.attrs[
|
|
207
|
+
"Logical_source"
|
|
208
|
+
].replace("l1a", "l1b")
|
|
209
|
+
decom_ultra_dataset.attrs["Logical_source_description"] = (
|
|
210
|
+
decom_ultra_dataset.attrs["Logical_source_description"].replace(
|
|
211
|
+
"1A", "1B"
|
|
212
|
+
)
|
|
213
|
+
)
|
|
214
|
+
|
|
215
|
+
# Add data variable attributes
|
|
216
|
+
for key in decom_ultra_dataset.data_vars:
|
|
217
|
+
attrs = attr_mgr.get_variable_attributes(key.lower())
|
|
218
|
+
decom_ultra_dataset.data_vars[key].attrs.update(attrs)
|
|
219
|
+
if i == 1:
|
|
220
|
+
# For l1b datasets, the FILLVAL and VALIDMIN/MAX may be
|
|
221
|
+
# different datatypes, so we can't use them directly from l1a.
|
|
222
|
+
# just remove them for now since we don't really have a need for
|
|
223
|
+
# for them currently.
|
|
224
|
+
for attr_key in ["FILLVAL", "VALIDMIN", "VALIDMAX"]:
|
|
225
|
+
if attr_key in decom_ultra_dataset.data_vars[key].attrs:
|
|
226
|
+
decom_ultra_dataset.data_vars[key].attrs.pop(attr_key)
|
|
227
|
+
|
|
135
228
|
# Add coordinate attributes
|
|
136
|
-
attrs = attr_mgr.get_variable_attributes("
|
|
137
|
-
decom_ultra_dataset.coords["
|
|
138
|
-
|
|
139
|
-
decom_ultra_dataset
|
|
140
|
-
decom_ultra_dataset = decom_ultra_dataset.drop_vars("compdata")
|
|
141
|
-
gattr_key = ULTRA_ENERGY_SPECTRA.logical_source[
|
|
142
|
-
ULTRA_ENERGY_SPECTRA.apid.index(apid)
|
|
143
|
-
]
|
|
144
|
-
elif apid in ULTRA_MACROS_CHECKSUM.apid:
|
|
145
|
-
decom_ultra_dataset = process_ultra_macros_checksum(datasets_by_apid[apid])
|
|
146
|
-
gattr_key = ULTRA_MACROS_CHECKSUM.logical_source[
|
|
147
|
-
ULTRA_MACROS_CHECKSUM.apid.index(apid)
|
|
148
|
-
]
|
|
149
|
-
elif apid in ULTRA_HK.apid:
|
|
150
|
-
decom_ultra_dataset = datasets_by_apid[apid]
|
|
151
|
-
gattr_key = ULTRA_HK.logical_source[ULTRA_HK.apid.index(apid)]
|
|
152
|
-
elif apid in ULTRA_CMD_TEXT.apid:
|
|
153
|
-
decom_ultra_dataset = datasets_by_apid[apid]
|
|
154
|
-
decoded_strings = [
|
|
155
|
-
s.decode("ascii").rstrip("\x00")
|
|
156
|
-
for s in decom_ultra_dataset["text"].values
|
|
157
|
-
]
|
|
158
|
-
decom_ultra_dataset = decom_ultra_dataset.drop_vars("text")
|
|
159
|
-
decom_ultra_dataset["text"] = xr.DataArray(
|
|
160
|
-
decoded_strings,
|
|
161
|
-
dims=["epoch"],
|
|
162
|
-
coords={"epoch": decom_ultra_dataset["epoch"]},
|
|
163
|
-
)
|
|
164
|
-
gattr_key = ULTRA_CMD_TEXT.logical_source[ULTRA_CMD_TEXT.apid.index(apid)]
|
|
165
|
-
elif apid in ULTRA_CMD_ECHO.apid:
|
|
166
|
-
decom_ultra_dataset = process_ultra_cmd_echo(datasets_by_apid[apid])
|
|
167
|
-
gattr_key = ULTRA_CMD_ECHO.logical_source[ULTRA_CMD_ECHO.apid.index(apid)]
|
|
168
|
-
else:
|
|
169
|
-
logger.error(f"APID {apid} not recognized.")
|
|
170
|
-
continue
|
|
171
|
-
|
|
172
|
-
decom_ultra_dataset.attrs.update(attr_mgr.get_global_attributes(gattr_key))
|
|
173
|
-
|
|
174
|
-
# Add data variable attributes
|
|
175
|
-
for key in decom_ultra_dataset.data_vars:
|
|
176
|
-
attrs = attr_mgr.get_variable_attributes(key.lower())
|
|
177
|
-
decom_ultra_dataset.data_vars[key].attrs.update(attrs)
|
|
178
|
-
|
|
179
|
-
# Add coordinate attributes
|
|
180
|
-
attrs = attr_mgr.get_variable_attributes("epoch", check_schema=False)
|
|
181
|
-
decom_ultra_dataset.coords["epoch"].attrs.update(attrs)
|
|
182
|
-
|
|
183
|
-
output_datasets.append(decom_ultra_dataset)
|
|
229
|
+
attrs = attr_mgr.get_variable_attributes("epoch", check_schema=False)
|
|
230
|
+
decom_ultra_dataset.coords["epoch"].attrs.update(attrs)
|
|
231
|
+
|
|
232
|
+
output_datasets.append(decom_ultra_dataset)
|
|
184
233
|
|
|
185
234
|
return output_datasets
|
imap_processing/ultra/l1b/de.py
CHANGED
|
@@ -5,10 +5,13 @@ import xarray as xr
|
|
|
5
5
|
|
|
6
6
|
from imap_processing.cdf.utils import parse_filename_like
|
|
7
7
|
from imap_processing.quality_flags import (
|
|
8
|
+
ImapAttitudeUltraFlags,
|
|
8
9
|
ImapDEOutliersUltraFlags,
|
|
9
10
|
ImapDEScatteringUltraFlags,
|
|
10
11
|
)
|
|
11
12
|
from imap_processing.spice.geometry import SpiceFrame
|
|
13
|
+
from imap_processing.spice.repoint import get_repoint_data
|
|
14
|
+
from imap_processing.spice.time import et_to_met
|
|
12
15
|
from imap_processing.ultra.l1b.lookup_utils import get_geometric_factor
|
|
13
16
|
from imap_processing.ultra.l1b.ultra_l1b_annotated import (
|
|
14
17
|
get_annotated_particle_velocity,
|
|
@@ -74,6 +77,10 @@ def calculate_de(
|
|
|
74
77
|
spin_number = get_spin_number(
|
|
75
78
|
de_dataset["shcoarse"].values, de_dataset["spin"].values
|
|
76
79
|
)
|
|
80
|
+
repoint_id = de_dataset.attrs.get("Repointing", None)
|
|
81
|
+
if repoint_id is not None:
|
|
82
|
+
repoint_id = int(repoint_id.replace("repoint", ""))
|
|
83
|
+
|
|
77
84
|
de_dict["spin"] = spin_number
|
|
78
85
|
|
|
79
86
|
# Add already populated fields.
|
|
@@ -311,7 +318,19 @@ def calculate_de(
|
|
|
311
318
|
ultra_frame = getattr(SpiceFrame, f"IMAP_ULTRA_{sensor}")
|
|
312
319
|
|
|
313
320
|
# Account for counts=0 (event times have FILL value)
|
|
314
|
-
valid_events = event_times != FILLVAL_FLOAT32
|
|
321
|
+
valid_events = (event_times != FILLVAL_FLOAT32).copy()
|
|
322
|
+
# TODO - find a better solution than filtering out data from repointings?
|
|
323
|
+
if repoint_id is not None:
|
|
324
|
+
in_pointing = calculate_events_in_pointing(
|
|
325
|
+
repoint_id, event_times[valid_events]
|
|
326
|
+
)
|
|
327
|
+
# Update quality flags for valid events that are not in the pointing
|
|
328
|
+
quality_flags[valid_events][~in_pointing] |= (
|
|
329
|
+
ImapAttitudeUltraFlags.DURINGREPOINT.value
|
|
330
|
+
)
|
|
331
|
+
# Update valid_events to only include times within a pointing
|
|
332
|
+
valid_events[valid_events] &= in_pointing
|
|
333
|
+
|
|
315
334
|
if np.any(valid_events):
|
|
316
335
|
(
|
|
317
336
|
sc_velocity[valid_events],
|
|
@@ -371,3 +390,40 @@ def calculate_de(
|
|
|
371
390
|
dataset = create_dataset(de_dict, name, "l1b")
|
|
372
391
|
|
|
373
392
|
return dataset
|
|
393
|
+
|
|
394
|
+
|
|
395
|
+
def calculate_events_in_pointing(
|
|
396
|
+
repoint_id: int,
|
|
397
|
+
event_times: np.ndarray,
|
|
398
|
+
) -> np.ndarray:
|
|
399
|
+
"""
|
|
400
|
+
Calculate boolean array of events within a pointing.
|
|
401
|
+
|
|
402
|
+
Parameters
|
|
403
|
+
----------
|
|
404
|
+
repoint_id : int
|
|
405
|
+
The repointing ID.
|
|
406
|
+
event_times : np.ndarray
|
|
407
|
+
Array of event times in ET.
|
|
408
|
+
|
|
409
|
+
Returns
|
|
410
|
+
-------
|
|
411
|
+
in_pointing : np.ndarray
|
|
412
|
+
Boolean array indicating whether each event is within the pointing period
|
|
413
|
+
combined with the valid_events mask.
|
|
414
|
+
"""
|
|
415
|
+
# TODO add this as a helper function in repoint.py
|
|
416
|
+
repoint_data = get_repoint_data()
|
|
417
|
+
# To find the pointing start and stop, get the end of the current repointing
|
|
418
|
+
# and the start of the next repointing
|
|
419
|
+
repoint_row = repoint_data[repoint_data["repoint_id"] == repoint_id]
|
|
420
|
+
next_repoint_row = repoint_data[repoint_data["repoint_id"] == repoint_id + 1]
|
|
421
|
+
pointing_start_met = repoint_row["repoint_end_met"].values[0]
|
|
422
|
+
pointing_end_met = next_repoint_row["repoint_start_met"].values[0]
|
|
423
|
+
|
|
424
|
+
# Check which events are within the pointing
|
|
425
|
+
in_pointing = (et_to_met(event_times) >= pointing_start_met) & (
|
|
426
|
+
et_to_met(event_times) <= pointing_end_met
|
|
427
|
+
)
|
|
428
|
+
|
|
429
|
+
return in_pointing
|