imap-processing 1.0.1__py3-none-any.whl → 1.0.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (58) hide show
  1. imap_processing/_version.py +2 -2
  2. imap_processing/cdf/config/imap_codice_global_cdf_attrs.yaml +18 -0
  3. imap_processing/cdf/config/imap_codice_l1a_variable_attrs.yaml +101 -258
  4. imap_processing/cdf/config/imap_enamaps_l2-common_variable_attrs.yaml +1 -1
  5. imap_processing/cdf/config/imap_hi_variable_attrs.yaml +12 -2
  6. imap_processing/cdf/config/imap_idex_global_cdf_attrs.yaml +1 -8
  7. imap_processing/cdf/config/imap_idex_l1b_variable_attrs.yaml +16 -5
  8. imap_processing/cdf/config/imap_idex_l2a_variable_attrs.yaml +27 -25
  9. imap_processing/cdf/config/imap_idex_l2b_variable_attrs.yaml +16 -16
  10. imap_processing/cdf/config/imap_idex_l2c_variable_attrs.yaml +2 -2
  11. imap_processing/cdf/config/imap_swapi_variable_attrs.yaml +2 -13
  12. imap_processing/cdf/config/imap_ultra_l1c_variable_attrs.yaml +12 -0
  13. imap_processing/cdf/utils.py +2 -2
  14. imap_processing/cli.py +4 -16
  15. imap_processing/codice/codice_l1a_lo_angular.py +362 -0
  16. imap_processing/codice/codice_l1a_lo_species.py +282 -0
  17. imap_processing/codice/codice_l1b.py +80 -97
  18. imap_processing/codice/codice_l2.py +270 -103
  19. imap_processing/codice/codice_new_l1a.py +64 -0
  20. imap_processing/codice/constants.py +37 -2
  21. imap_processing/codice/utils.py +270 -0
  22. imap_processing/ena_maps/ena_maps.py +51 -39
  23. imap_processing/ena_maps/utils/corrections.py +196 -14
  24. imap_processing/ena_maps/utils/naming.py +3 -1
  25. imap_processing/hi/hi_l1c.py +57 -19
  26. imap_processing/hi/hi_l2.py +89 -36
  27. imap_processing/ialirt/calculate_ingest.py +19 -1
  28. imap_processing/ialirt/constants.py +12 -6
  29. imap_processing/ialirt/generate_coverage.py +6 -1
  30. imap_processing/ialirt/l0/parse_mag.py +1 -0
  31. imap_processing/ialirt/l0/process_hit.py +1 -0
  32. imap_processing/ialirt/l0/process_swapi.py +1 -0
  33. imap_processing/ialirt/l0/process_swe.py +2 -0
  34. imap_processing/ialirt/process_ephemeris.py +6 -2
  35. imap_processing/ialirt/utils/create_xarray.py +3 -2
  36. imap_processing/lo/l1b/lo_l1b.py +12 -2
  37. imap_processing/lo/l1c/lo_l1c.py +4 -4
  38. imap_processing/lo/l2/lo_l2.py +101 -8
  39. imap_processing/quality_flags.py +1 -0
  40. imap_processing/swapi/constants.py +4 -0
  41. imap_processing/swapi/l1/swapi_l1.py +47 -20
  42. imap_processing/swapi/l2/swapi_l2.py +17 -3
  43. imap_processing/ultra/l1a/ultra_l1a.py +121 -72
  44. imap_processing/ultra/l1b/de.py +57 -1
  45. imap_processing/ultra/l1b/ultra_l1b_annotated.py +0 -1
  46. imap_processing/ultra/l1b/ultra_l1b_extended.py +24 -11
  47. imap_processing/ultra/l1c/helio_pset.py +34 -8
  48. imap_processing/ultra/l1c/l1c_lookup_utils.py +4 -2
  49. imap_processing/ultra/l1c/spacecraft_pset.py +13 -7
  50. imap_processing/ultra/l1c/ultra_l1c.py +6 -6
  51. imap_processing/ultra/l1c/ultra_l1c_pset_bins.py +79 -20
  52. imap_processing/ultra/l2/ultra_l2.py +2 -2
  53. imap_processing/ultra/utils/ultra_l1_utils.py +6 -0
  54. {imap_processing-1.0.1.dist-info → imap_processing-1.0.3.dist-info}/METADATA +1 -1
  55. {imap_processing-1.0.1.dist-info → imap_processing-1.0.3.dist-info}/RECORD +58 -54
  56. {imap_processing-1.0.1.dist-info → imap_processing-1.0.3.dist-info}/LICENSE +0 -0
  57. {imap_processing-1.0.1.dist-info → imap_processing-1.0.3.dist-info}/WHEEL +0 -0
  58. {imap_processing-1.0.1.dist-info → imap_processing-1.0.3.dist-info}/entry_points.txt +0 -0
@@ -0,0 +1,4 @@
1
+ """Constants for SWAPI processing."""
2
+
3
+ NUM_PACKETS_PER_SWEEP = 12
4
+ NUM_ENERGY_STEPS = 72
@@ -12,6 +12,8 @@ from imap_processing import imap_module_directory
12
12
  from imap_processing.cdf.imap_cdf_manager import ImapCdfAttributes
13
13
  from imap_processing.cdf.utils import load_cdf
14
14
  from imap_processing.quality_flags import SWAPIFlags
15
+ from imap_processing.spice.time import met_to_utc, ttj2000ns_to_met
16
+ from imap_processing.swapi.constants import NUM_ENERGY_STEPS, NUM_PACKETS_PER_SWEEP
15
17
  from imap_processing.swapi.swapi_utils import SWAPIAPID, SWAPIMODE
16
18
  from imap_processing.utils import packet_file_to_datasets
17
19
 
@@ -41,10 +43,10 @@ def filter_good_data(full_sweep_sci: xr.Dataset) -> npt.NDArray:
41
43
  """
42
44
  # PLAN_ID for current sweep should all be one value and
43
45
  # SWEEP_TABLE should all be one value.
44
- plan_id = full_sweep_sci["plan_id"].data.reshape(-1, 12)
45
- sweep_table = full_sweep_sci["sweep_table"].data.reshape(-1, 12)
46
+ plan_id = full_sweep_sci["plan_id"].data.reshape(-1, NUM_PACKETS_PER_SWEEP)
47
+ sweep_table = full_sweep_sci["sweep_table"].data.reshape(-1, NUM_PACKETS_PER_SWEEP)
46
48
 
47
- mode = full_sweep_sci["mode"].data.reshape(-1, 12)
49
+ mode = full_sweep_sci["mode"].data.reshape(-1, NUM_PACKETS_PER_SWEEP)
48
50
 
49
51
  sweep_indices = (sweep_table == sweep_table[:, 0, None]).all(axis=1)
50
52
  plan_id_indices = (plan_id == plan_id[:, 0, None]).all(axis=1)
@@ -62,10 +64,10 @@ def filter_good_data(full_sweep_sci: xr.Dataset) -> npt.NDArray:
62
64
  # From this: [0 24]
63
65
  # To this: [[ 0 1 2 3 4 5 6 7 8 9 10 11]
64
66
  # [24 25 26 27 28 29 30 31 32 33 34 35]]
65
- cycle_start_indices = np.where(bad_data_indices == 0)[0] * 12
66
- bad_cycle_indices = cycle_start_indices[..., None] + np.arange(12)[
67
- None, ...
68
- ].reshape(-1)
67
+ cycle_start_indices = np.where(bad_data_indices == 0)[0] * NUM_PACKETS_PER_SWEEP
68
+ bad_cycle_indices = cycle_start_indices[..., None] + np.arange(
69
+ NUM_PACKETS_PER_SWEEP
70
+ )[None, ...].reshape(-1)
69
71
 
70
72
  logger.debug("Cycle data was bad due to one of below reasons:")
71
73
  logger.debug(
@@ -162,7 +164,7 @@ def find_sweep_starts(packets: xr.Dataset) -> npt.NDArray:
162
164
  indices_start : numpy.ndarray
163
165
  Array of indices of start cycle.
164
166
  """
165
- if packets["shcoarse"].size < 12:
167
+ if packets["shcoarse"].size < NUM_PACKETS_PER_SWEEP:
166
168
  return np.array([], np.int64)
167
169
 
168
170
  # calculate time difference between consecutive sweep
@@ -387,7 +389,7 @@ def process_sweep_data(full_sweep_sci: xr.Dataset, cem_prefix: str) -> xr.Datase
387
389
  # [ 2 3 4 5 6 7 8 9 10 11 12 13]]]
388
390
  # In other word, we grouped each cem's
389
391
  # data by full sweep.
390
- current_cem_counts = current_cem_counts.reshape(6, -1, 12)
392
+ current_cem_counts = current_cem_counts.reshape(6, -1, NUM_PACKETS_PER_SWEEP)
391
393
 
392
394
  # Then, we go from above to
393
395
  # to this final output:
@@ -421,7 +423,7 @@ def process_sweep_data(full_sweep_sci: xr.Dataset, cem_prefix: str) -> xr.Datase
421
423
  all_cem_data = np.stack(current_cem_counts, axis=-1)
422
424
  # This line just flatten the inner most array to
423
425
  # (total_full_sweeps x 72)
424
- all_cem_data = all_cem_data.reshape(-1, 72)
426
+ all_cem_data = all_cem_data.reshape(-1, NUM_ENERGY_STEPS)
425
427
  return all_cem_data
426
428
 
427
429
 
@@ -490,7 +492,9 @@ def process_swapi_science(
490
492
  # ===================================================================
491
493
  # Quality flags
492
494
  # ===================================================================
493
- quality_flags_data = np.zeros((total_full_sweeps, 72), dtype=np.uint16)
495
+ quality_flags_data = np.zeros(
496
+ (total_full_sweeps, NUM_ENERGY_STEPS), dtype=np.uint16
497
+ )
494
498
 
495
499
  # Add science data quality flags
496
500
  # Have to match datatype to bitwise OR
@@ -547,7 +551,7 @@ def process_swapi_science(
547
551
 
548
552
  for flag_name in hk_flags_name:
549
553
  current_flag = np.repeat(good_sweep_hk_data[flag_name.lower()].data, 6).reshape(
550
- -1, 72
554
+ -1, NUM_ENERGY_STEPS
551
555
  )
552
556
  # Use getattr to dynamically access the flag in SWAPIFlags class
553
557
  flag_to_set = getattr(SWAPIFlags, flag_name)
@@ -568,7 +572,9 @@ def process_swapi_science(
568
572
  # Use center time for epoch to line up with mission requests. Center time
569
573
  # of SWAPI is time of 7th packet(aka SEQ_NUMBER == 6) creation time at the
570
574
  # beginning of 7th packet.
571
- epoch_values = good_sweep_sci["epoch"].data.reshape(total_full_sweeps, 12)[:, 6]
575
+ epoch_values = good_sweep_sci["epoch"].data.reshape(
576
+ total_full_sweeps, NUM_PACKETS_PER_SWEEP
577
+ )[:, 6]
572
578
 
573
579
  epoch_time = xr.DataArray(
574
580
  epoch_values,
@@ -626,20 +632,33 @@ def process_swapi_science(
626
632
 
627
633
  # Add other support data
628
634
  dataset["sweep_table"] = xr.DataArray(
629
- good_sweep_sci["sweep_table"].data.reshape(total_full_sweeps, 12)[:, 0],
635
+ good_sweep_sci["sweep_table"].data.reshape(
636
+ total_full_sweeps, NUM_PACKETS_PER_SWEEP
637
+ )[:, 0],
630
638
  name="sweep_table",
631
639
  dims=["epoch"],
632
640
  attrs=cdf_manager.get_variable_attributes("sweep_table"),
633
641
  )
634
642
  dataset["plan_id"] = xr.DataArray(
635
- good_sweep_sci["plan_id"].data.reshape(total_full_sweeps, 12)[:, 0],
643
+ good_sweep_sci["plan_id"].data.reshape(
644
+ total_full_sweeps, NUM_PACKETS_PER_SWEEP
645
+ )[:, 0],
636
646
  name="plan_id",
637
647
  dims=["epoch"],
638
648
  attrs=cdf_manager.get_variable_attributes("plan_id"),
639
649
  )
640
650
  # Store start time for L3 purposes per SWAPI requests
651
+ # Per SWAPI request, convert start time of sweep to UTC time.
652
+ sci_start_time = met_to_utc(
653
+ ttj2000ns_to_met(
654
+ good_sweep_sci["epoch"].data.reshape(
655
+ total_full_sweeps, NUM_PACKETS_PER_SWEEP
656
+ )[:, 0]
657
+ ),
658
+ precision=0,
659
+ )
641
660
  dataset["sci_start_time"] = xr.DataArray(
642
- good_sweep_sci["epoch"].data.reshape(total_full_sweeps, 12)[:, 0],
661
+ sci_start_time,
643
662
  name="sci_start_time",
644
663
  dims=["epoch"],
645
664
  attrs=cdf_manager.get_variable_attributes("sci_start_time"),
@@ -650,7 +669,9 @@ def process_swapi_science(
650
669
  # updated every 6th step. This is used in L2 to calculate last 9 fine
651
670
  # energy steps.
652
671
  dataset["esa_lvl5"] = xr.DataArray(
653
- good_sweep_sci["esa_lvl5"].data.reshape(total_full_sweeps, 12)[:, 11],
672
+ good_sweep_sci["esa_lvl5"].data.reshape(
673
+ total_full_sweeps, NUM_PACKETS_PER_SWEEP
674
+ )[:, 11],
654
675
  name="esa_lvl5",
655
676
  dims=["epoch"],
656
677
  attrs=cdf_manager.get_variable_attributes("esa_lvl5"),
@@ -661,19 +682,25 @@ def process_swapi_science(
661
682
  # SWP_HK.FPGA_TYPE - Type number of the FPGA
662
683
  # SWP_HK.FPGA_REV - Revision number of the FPGA
663
684
  dataset["lut_choice"] = xr.DataArray(
664
- good_sweep_hk_data["lut_choice"].data.reshape(total_full_sweeps, 12)[:, 0],
685
+ good_sweep_hk_data["lut_choice"].data.reshape(
686
+ total_full_sweeps, NUM_PACKETS_PER_SWEEP
687
+ )[:, 0],
665
688
  name="lut_choice",
666
689
  dims=["epoch"],
667
690
  attrs=cdf_manager.get_variable_attributes("lut_choice"),
668
691
  )
669
692
  dataset["fpga_type"] = xr.DataArray(
670
- good_sweep_hk_data["fpga_type"].data.reshape(total_full_sweeps, 12)[:, 0],
693
+ good_sweep_hk_data["fpga_type"].data.reshape(
694
+ total_full_sweeps, NUM_PACKETS_PER_SWEEP
695
+ )[:, 0],
671
696
  name="fpga_type",
672
697
  dims=["epoch"],
673
698
  attrs=cdf_manager.get_variable_attributes("fpga_type"),
674
699
  )
675
700
  dataset["fpga_rev"] = xr.DataArray(
676
- good_sweep_hk_data["fpga_rev"].data.reshape(total_full_sweeps, 12)[:, 0],
701
+ good_sweep_hk_data["fpga_rev"].data.reshape(
702
+ total_full_sweeps, NUM_PACKETS_PER_SWEEP
703
+ )[:, 0],
677
704
  name="fpga_rev",
678
705
  dims=["epoch"],
679
706
  attrs=cdf_manager.get_variable_attributes("fpga_rev"),
@@ -8,6 +8,7 @@ import pandas as pd
8
8
  import xarray as xr
9
9
 
10
10
  from imap_processing.cdf.imap_cdf_manager import ImapCdfAttributes
11
+ from imap_processing.swapi.constants import NUM_ENERGY_STEPS
11
12
 
12
13
  logger = logging.getLogger(__name__)
13
14
 
@@ -72,15 +73,28 @@ def solve_full_sweep_energy(
72
73
  (esa_table_df["timestamp"] <= time) & (esa_table_df["Sweep #"] == sweep_id)
73
74
  ]
74
75
  if subset.empty:
75
- first_63_energies.append(np.full(63, np.nan, dtype=np.float64))
76
- continue
76
+ # Get the earliest timestamp available
77
+ earliest_time = esa_table_df["timestamp"].min()
78
+
79
+ # Find the sweep's ESA data for the earliest time and sweep_id
80
+ earliest_subset = esa_table_df[
81
+ (esa_table_df["timestamp"] == earliest_time)
82
+ & (esa_table_df["Sweep #"] == sweep_id)
83
+ ]
84
+ if earliest_subset.empty:
85
+ raise ValueError(
86
+ f"No matching ESA table entry found for sweep ID {sweep_id} "
87
+ f"at time {time}, and no entries found for earliest time "
88
+ f"{earliest_time}."
89
+ )
90
+ subset = earliest_subset
77
91
 
78
92
  # Subset data can contain multiple 72 energy values with last 9 fine energies
79
93
  # with 'Solve' value. We need to sort by time and ESA step to maintain correct
80
94
  # order. Then take the last group of 72 steps values and select first 63
81
95
  # values only.
82
96
  subset = subset.sort_values(["timestamp", "ESA Step #"])
83
- grouped = subset["Energy"].values.reshape(-1, 72)
97
+ grouped = subset["Energy"].values.reshape(-1, NUM_ENERGY_STEPS)
84
98
  first_63 = grouped[-1, :63]
85
99
  first_63_energies.append(first_63)
86
100
 
@@ -43,7 +43,7 @@ logger = logging.getLogger(__name__)
43
43
 
44
44
 
45
45
  def ultra_l1a( # noqa: PLR0912
46
- packet_file: str, apid_input: int | None = None
46
+ packet_file: str, apid_input: int | None = None, create_derived_l1b: bool = False
47
47
  ) -> list[xr.Dataset]:
48
48
  """
49
49
  Will process ULTRA L0 data into L1A CDF files at output_filepath.
@@ -54,6 +54,8 @@ def ultra_l1a( # noqa: PLR0912
54
54
  Path to the CCSDS data packet file.
55
55
  apid_input : Optional[int]
56
56
  Optional apid.
57
+ create_derived_l1b : bool
58
+ Whether to create the l1b datasets with derived values.
57
59
 
58
60
  Returns
59
61
  -------
@@ -64,7 +66,17 @@ def ultra_l1a( # noqa: PLR0912
64
66
  f"{imap_module_directory}/ultra/packet_definitions/ULTRA_SCI_COMBINED.xml"
65
67
  )
66
68
 
69
+ # Keep a list to track the two versions, l1a and l1b with the derived values.
70
+ decommutated_packet_datasets = []
67
71
  datasets_by_apid = packet_file_to_datasets(packet_file, xtce)
72
+ decommutated_packet_datasets.append(datasets_by_apid)
73
+ if create_derived_l1b:
74
+ # For the housekeeping products, we can create the l1b at the same time
75
+ # as the l1a since there is no additional processing needed.
76
+ datasets_by_apid = packet_file_to_datasets(
77
+ packet_file, xtce, use_derived_value=True
78
+ )
79
+ decommutated_packet_datasets.append(datasets_by_apid)
68
80
 
69
81
  output_datasets = []
70
82
 
@@ -109,77 +121,114 @@ def ultra_l1a( # noqa: PLR0912
109
121
  attr_mgr.add_instrument_global_attrs("ultra")
110
122
  attr_mgr.add_instrument_variable_attrs("ultra", "l1a")
111
123
 
112
- for apid in apids:
113
- if apid in ULTRA_AUX.apid:
114
- decom_ultra_dataset = datasets_by_apid[apid]
115
- gattr_key = ULTRA_AUX.logical_source[ULTRA_AUX.apid.index(apid)]
116
- elif apid in all_l1a_image_apids:
117
- packet_props = all_l1a_image_apids[apid]
118
- decom_ultra_dataset = process_ultra_tof(
119
- datasets_by_apid[apid], packet_props
120
- )
121
- gattr_key = packet_props.logical_source[packet_props.apid.index(apid)]
122
- elif apid in ULTRA_RATES.apid:
123
- decom_ultra_dataset = process_ultra_rates(datasets_by_apid[apid])
124
- decom_ultra_dataset = decom_ultra_dataset.drop_vars("fastdata_00")
125
- gattr_key = ULTRA_RATES.logical_source[ULTRA_RATES.apid.index(apid)]
126
- elif apid in ULTRA_ENERGY_RATES.apid:
127
- decom_ultra_dataset = process_ultra_energy_rates(datasets_by_apid[apid])
128
- decom_ultra_dataset = decom_ultra_dataset.drop_vars("ratedata")
129
- gattr_key = ULTRA_ENERGY_RATES.logical_source[
130
- ULTRA_ENERGY_RATES.apid.index(apid)
131
- ]
132
- elif apid in all_event_apids:
133
- decom_ultra_dataset = process_ultra_events(datasets_by_apid[apid], apid)
134
- gattr_key = all_event_apids[apid]
124
+ for i, datasets_by_apid in enumerate(decommutated_packet_datasets):
125
+ for apid in apids:
126
+ if apid in ULTRA_AUX.apid:
127
+ decom_ultra_dataset = datasets_by_apid[apid]
128
+ gattr_key = ULTRA_AUX.logical_source[ULTRA_AUX.apid.index(apid)]
129
+ elif apid in all_l1a_image_apids:
130
+ packet_props = all_l1a_image_apids[apid]
131
+ decom_ultra_dataset = process_ultra_tof(
132
+ datasets_by_apid[apid], packet_props
133
+ )
134
+ gattr_key = packet_props.logical_source[packet_props.apid.index(apid)]
135
+ elif apid in ULTRA_RATES.apid:
136
+ decom_ultra_dataset = process_ultra_rates(datasets_by_apid[apid])
137
+ decom_ultra_dataset = decom_ultra_dataset.drop_vars("fastdata_00")
138
+ gattr_key = ULTRA_RATES.logical_source[ULTRA_RATES.apid.index(apid)]
139
+ elif apid in ULTRA_ENERGY_RATES.apid:
140
+ decom_ultra_dataset = process_ultra_energy_rates(datasets_by_apid[apid])
141
+ decom_ultra_dataset = decom_ultra_dataset.drop_vars("ratedata")
142
+ gattr_key = ULTRA_ENERGY_RATES.logical_source[
143
+ ULTRA_ENERGY_RATES.apid.index(apid)
144
+ ]
145
+ elif apid in all_event_apids:
146
+ # We don't want to process the event l1b datasets since those l1b
147
+ # products need more information
148
+ if i == 1:
149
+ continue
150
+ decom_ultra_dataset = process_ultra_events(datasets_by_apid[apid], apid)
151
+ gattr_key = all_event_apids[apid]
152
+ # Add coordinate attributes
153
+ attrs = attr_mgr.get_variable_attributes("event_id")
154
+ decom_ultra_dataset.coords["event_id"].attrs.update(attrs)
155
+ elif apid in ULTRA_ENERGY_SPECTRA.apid:
156
+ decom_ultra_dataset = process_ultra_energy_spectra(
157
+ datasets_by_apid[apid]
158
+ )
159
+ decom_ultra_dataset = decom_ultra_dataset.drop_vars("compdata")
160
+ gattr_key = ULTRA_ENERGY_SPECTRA.logical_source[
161
+ ULTRA_ENERGY_SPECTRA.apid.index(apid)
162
+ ]
163
+ elif apid in ULTRA_MACROS_CHECKSUM.apid:
164
+ decom_ultra_dataset = process_ultra_macros_checksum(
165
+ datasets_by_apid[apid]
166
+ )
167
+ gattr_key = ULTRA_MACROS_CHECKSUM.logical_source[
168
+ ULTRA_MACROS_CHECKSUM.apid.index(apid)
169
+ ]
170
+ elif apid in ULTRA_HK.apid:
171
+ decom_ultra_dataset = datasets_by_apid[apid]
172
+ gattr_key = ULTRA_HK.logical_source[ULTRA_HK.apid.index(apid)]
173
+ elif apid in ULTRA_CMD_TEXT.apid:
174
+ decom_ultra_dataset = datasets_by_apid[apid]
175
+ decoded_strings = [
176
+ s.decode("ascii").rstrip("\x00")
177
+ for s in decom_ultra_dataset["text"].values
178
+ ]
179
+ decom_ultra_dataset = decom_ultra_dataset.drop_vars("text")
180
+ decom_ultra_dataset["text"] = xr.DataArray(
181
+ decoded_strings,
182
+ dims=["epoch"],
183
+ coords={"epoch": decom_ultra_dataset["epoch"]},
184
+ )
185
+ gattr_key = ULTRA_CMD_TEXT.logical_source[
186
+ ULTRA_CMD_TEXT.apid.index(apid)
187
+ ]
188
+ elif apid in ULTRA_CMD_ECHO.apid:
189
+ decom_ultra_dataset = process_ultra_cmd_echo(datasets_by_apid[apid])
190
+ gattr_key = ULTRA_CMD_ECHO.logical_source[
191
+ ULTRA_CMD_ECHO.apid.index(apid)
192
+ ]
193
+ else:
194
+ logger.error(f"APID {apid} not recognized.")
195
+ continue
196
+
197
+ decom_ultra_dataset.attrs.update(attr_mgr.get_global_attributes(gattr_key))
198
+
199
+ if i == 1:
200
+ # Derived values dataset at l1b
201
+ # We already have the l1a attributes, just update the l1a -> l1b
202
+ # in the metadata.
203
+ decom_ultra_dataset.attrs["Data_type"] = decom_ultra_dataset.attrs[
204
+ "Data_type"
205
+ ].replace("1A", "1B")
206
+ decom_ultra_dataset.attrs["Logical_source"] = decom_ultra_dataset.attrs[
207
+ "Logical_source"
208
+ ].replace("l1a", "l1b")
209
+ decom_ultra_dataset.attrs["Logical_source_description"] = (
210
+ decom_ultra_dataset.attrs["Logical_source_description"].replace(
211
+ "1A", "1B"
212
+ )
213
+ )
214
+
215
+ # Add data variable attributes
216
+ for key in decom_ultra_dataset.data_vars:
217
+ attrs = attr_mgr.get_variable_attributes(key.lower())
218
+ decom_ultra_dataset.data_vars[key].attrs.update(attrs)
219
+ if i == 1:
220
+ # For l1b datasets, the FILLVAL and VALIDMIN/MAX may be
221
+ # different datatypes, so we can't use them directly from l1a.
222
+ # just remove them for now since we don't really have a need for
223
+ # for them currently.
224
+ for attr_key in ["FILLVAL", "VALIDMIN", "VALIDMAX"]:
225
+ if attr_key in decom_ultra_dataset.data_vars[key].attrs:
226
+ decom_ultra_dataset.data_vars[key].attrs.pop(attr_key)
227
+
135
228
  # Add coordinate attributes
136
- attrs = attr_mgr.get_variable_attributes("event_id")
137
- decom_ultra_dataset.coords["event_id"].attrs.update(attrs)
138
- elif apid in ULTRA_ENERGY_SPECTRA.apid:
139
- decom_ultra_dataset = process_ultra_energy_spectra(datasets_by_apid[apid])
140
- decom_ultra_dataset = decom_ultra_dataset.drop_vars("compdata")
141
- gattr_key = ULTRA_ENERGY_SPECTRA.logical_source[
142
- ULTRA_ENERGY_SPECTRA.apid.index(apid)
143
- ]
144
- elif apid in ULTRA_MACROS_CHECKSUM.apid:
145
- decom_ultra_dataset = process_ultra_macros_checksum(datasets_by_apid[apid])
146
- gattr_key = ULTRA_MACROS_CHECKSUM.logical_source[
147
- ULTRA_MACROS_CHECKSUM.apid.index(apid)
148
- ]
149
- elif apid in ULTRA_HK.apid:
150
- decom_ultra_dataset = datasets_by_apid[apid]
151
- gattr_key = ULTRA_HK.logical_source[ULTRA_HK.apid.index(apid)]
152
- elif apid in ULTRA_CMD_TEXT.apid:
153
- decom_ultra_dataset = datasets_by_apid[apid]
154
- decoded_strings = [
155
- s.decode("ascii").rstrip("\x00")
156
- for s in decom_ultra_dataset["text"].values
157
- ]
158
- decom_ultra_dataset = decom_ultra_dataset.drop_vars("text")
159
- decom_ultra_dataset["text"] = xr.DataArray(
160
- decoded_strings,
161
- dims=["epoch"],
162
- coords={"epoch": decom_ultra_dataset["epoch"]},
163
- )
164
- gattr_key = ULTRA_CMD_TEXT.logical_source[ULTRA_CMD_TEXT.apid.index(apid)]
165
- elif apid in ULTRA_CMD_ECHO.apid:
166
- decom_ultra_dataset = process_ultra_cmd_echo(datasets_by_apid[apid])
167
- gattr_key = ULTRA_CMD_ECHO.logical_source[ULTRA_CMD_ECHO.apid.index(apid)]
168
- else:
169
- logger.error(f"APID {apid} not recognized.")
170
- continue
171
-
172
- decom_ultra_dataset.attrs.update(attr_mgr.get_global_attributes(gattr_key))
173
-
174
- # Add data variable attributes
175
- for key in decom_ultra_dataset.data_vars:
176
- attrs = attr_mgr.get_variable_attributes(key.lower())
177
- decom_ultra_dataset.data_vars[key].attrs.update(attrs)
178
-
179
- # Add coordinate attributes
180
- attrs = attr_mgr.get_variable_attributes("epoch", check_schema=False)
181
- decom_ultra_dataset.coords["epoch"].attrs.update(attrs)
182
-
183
- output_datasets.append(decom_ultra_dataset)
229
+ attrs = attr_mgr.get_variable_attributes("epoch", check_schema=False)
230
+ decom_ultra_dataset.coords["epoch"].attrs.update(attrs)
231
+
232
+ output_datasets.append(decom_ultra_dataset)
184
233
 
185
234
  return output_datasets
@@ -5,10 +5,13 @@ import xarray as xr
5
5
 
6
6
  from imap_processing.cdf.utils import parse_filename_like
7
7
  from imap_processing.quality_flags import (
8
+ ImapAttitudeUltraFlags,
8
9
  ImapDEOutliersUltraFlags,
9
10
  ImapDEScatteringUltraFlags,
10
11
  )
11
12
  from imap_processing.spice.geometry import SpiceFrame
13
+ from imap_processing.spice.repoint import get_repoint_data
14
+ from imap_processing.spice.time import et_to_met
12
15
  from imap_processing.ultra.l1b.lookup_utils import get_geometric_factor
13
16
  from imap_processing.ultra.l1b.ultra_l1b_annotated import (
14
17
  get_annotated_particle_velocity,
@@ -74,6 +77,10 @@ def calculate_de(
74
77
  spin_number = get_spin_number(
75
78
  de_dataset["shcoarse"].values, de_dataset["spin"].values
76
79
  )
80
+ repoint_id = de_dataset.attrs.get("Repointing", None)
81
+ if repoint_id is not None:
82
+ repoint_id = int(repoint_id.replace("repoint", ""))
83
+
77
84
  de_dict["spin"] = spin_number
78
85
 
79
86
  # Add already populated fields.
@@ -311,7 +318,19 @@ def calculate_de(
311
318
  ultra_frame = getattr(SpiceFrame, f"IMAP_ULTRA_{sensor}")
312
319
 
313
320
  # Account for counts=0 (event times have FILL value)
314
- valid_events = event_times != FILLVAL_FLOAT32
321
+ valid_events = (event_times != FILLVAL_FLOAT32).copy()
322
+ # TODO - find a better solution than filtering out data from repointings?
323
+ if repoint_id is not None:
324
+ in_pointing = calculate_events_in_pointing(
325
+ repoint_id, event_times[valid_events]
326
+ )
327
+ # Update quality flags for valid events that are not in the pointing
328
+ quality_flags[valid_events][~in_pointing] |= (
329
+ ImapAttitudeUltraFlags.DURINGREPOINT.value
330
+ )
331
+ # Update valid_events to only include times within a pointing
332
+ valid_events[valid_events] &= in_pointing
333
+
315
334
  if np.any(valid_events):
316
335
  (
317
336
  sc_velocity[valid_events],
@@ -371,3 +390,40 @@ def calculate_de(
371
390
  dataset = create_dataset(de_dict, name, "l1b")
372
391
 
373
392
  return dataset
393
+
394
+
395
+ def calculate_events_in_pointing(
396
+ repoint_id: int,
397
+ event_times: np.ndarray,
398
+ ) -> np.ndarray:
399
+ """
400
+ Calculate boolean array of events within a pointing.
401
+
402
+ Parameters
403
+ ----------
404
+ repoint_id : int
405
+ The repointing ID.
406
+ event_times : np.ndarray
407
+ Array of event times in ET.
408
+
409
+ Returns
410
+ -------
411
+ in_pointing : np.ndarray
412
+ Boolean array indicating whether each event is within the pointing period
413
+ combined with the valid_events mask.
414
+ """
415
+ # TODO add this as a helper function in repoint.py
416
+ repoint_data = get_repoint_data()
417
+ # To find the pointing start and stop, get the end of the current repointing
418
+ # and the start of the next repointing
419
+ repoint_row = repoint_data[repoint_data["repoint_id"] == repoint_id]
420
+ next_repoint_row = repoint_data[repoint_data["repoint_id"] == repoint_id + 1]
421
+ pointing_start_met = repoint_row["repoint_end_met"].values[0]
422
+ pointing_end_met = next_repoint_row["repoint_start_met"].values[0]
423
+
424
+ # Check which events are within the pointing
425
+ in_pointing = (et_to_met(event_times) >= pointing_start_met) & (
426
+ et_to_met(event_times) <= pointing_end_met
427
+ )
428
+
429
+ return in_pointing
@@ -52,7 +52,6 @@ def get_annotated_particle_velocity(
52
52
  from_frame=instrument_frame,
53
53
  to_frame=spacecraft_frame,
54
54
  )
55
-
56
55
  # Particle velocity in the pointing (DPS) frame wrt spacecraft.
57
56
  particle_velocity_dps_spacecraft = frame_transform(
58
57
  et=time,
@@ -14,7 +14,7 @@ from scipy.interpolate import LinearNDInterpolator, RegularGridInterpolator
14
14
 
15
15
  from imap_processing.quality_flags import ImapDEOutliersUltraFlags
16
16
  from imap_processing.spice.spin import get_spin_data
17
- from imap_processing.spice.time import sct_to_et
17
+ from imap_processing.spice.time import met_to_ttj2000ns, ttj2000ns_to_et
18
18
  from imap_processing.ultra.constants import UltraConstants
19
19
  from imap_processing.ultra.l1b.lookup_utils import (
20
20
  get_angular_profiles,
@@ -711,7 +711,7 @@ def get_energy_pulse_height(
711
711
  ylut[indices_bottom] = (yb[indices_bottom] / 100 + 82 / 2) * 32 / 82 # mm
712
712
 
713
713
  ph_correction_top, updated_flags_top = get_ph_corrected(
714
- "ultra45",
714
+ sensor,
715
715
  "tp",
716
716
  ancillary_files,
717
717
  np.round(xlut[indices_top]),
@@ -720,7 +720,7 @@ def get_energy_pulse_height(
720
720
  )
721
721
  quality_flags[indices_top] = updated_flags_top
722
722
  ph_correction_bottom, updated_flags_bottom = get_ph_corrected(
723
- "ultra45",
723
+ sensor,
724
724
  "bt",
725
725
  ancillary_files,
726
726
  np.round(xlut[indices_bottom]),
@@ -996,6 +996,7 @@ def get_eventtimes(
996
996
  t_spin_period_sec * phase_angle/720
997
997
  """
998
998
  spin_df = get_spin_data()
999
+
999
1000
  index = np.searchsorted(spin_df["spin_number"].values, spin)
1000
1001
  spin_starts = (
1001
1002
  spin_df["spin_start_sec_sclk"].values[index]
@@ -1003,10 +1004,13 @@ def get_eventtimes(
1003
1004
  )
1004
1005
 
1005
1006
  spin_period_sec = spin_df["spin_period_sec"].values[index]
1006
-
1007
1007
  event_times = spin_starts + spin_period_sec * (phase_angle / 720)
1008
1008
 
1009
- return sct_to_et(event_times), sct_to_et(spin_starts), spin_period_sec
1009
+ return (
1010
+ ttj2000ns_to_et(met_to_ttj2000ns(event_times)),
1011
+ ttj2000ns_to_et(met_to_ttj2000ns(spin_starts)),
1012
+ spin_period_sec,
1013
+ )
1010
1014
 
1011
1015
 
1012
1016
  def interpolate_fwhm(
@@ -1108,7 +1112,9 @@ def get_fwhm(
1108
1112
  return phi_interp, theta_interp
1109
1113
 
1110
1114
 
1111
- def get_efficiency_interpolator(ancillary_files: dict) -> RegularGridInterpolator:
1115
+ def get_efficiency_interpolator(
1116
+ ancillary_files: dict,
1117
+ ) -> tuple[RegularGridInterpolator, tuple, tuple]:
1112
1118
  """
1113
1119
  Return a callable function that interpolates efficiency values for each event.
1114
1120
 
@@ -1119,8 +1125,12 @@ def get_efficiency_interpolator(ancillary_files: dict) -> RegularGridInterpolato
1119
1125
 
1120
1126
  Returns
1121
1127
  -------
1122
- efficiency : NDArray
1123
- Interpolated efficiency values.
1128
+ interpolator : RegularGridInterpolator
1129
+ Callable function to interpolate efficiency values.
1130
+ theta_min_max : tuple
1131
+ Minimum and maximum theta values in the lookup table.
1132
+ phi_min_max : tuple
1133
+ Minimum and maximum phi values in the lookup table.
1124
1134
  """
1125
1135
  lookup_table = get_energy_efficiencies(ancillary_files)
1126
1136
 
@@ -1133,6 +1143,9 @@ def get_efficiency_interpolator(ancillary_files: dict) -> RegularGridInterpolato
1133
1143
  efficiency_grid = efficiency_2d.reshape(
1134
1144
  (len(theta_vals), len(phi_vals), len(energy_vals))
1135
1145
  )
1146
+ # Find the min and max values for theta and phi
1147
+ theta_min_max = (theta_vals.min(), theta_vals.max())
1148
+ phi_min_max = (phi_vals.min(), phi_vals.max())
1136
1149
 
1137
1150
  interpolator = RegularGridInterpolator(
1138
1151
  (theta_vals, phi_vals, energy_vals),
@@ -1141,7 +1154,7 @@ def get_efficiency_interpolator(ancillary_files: dict) -> RegularGridInterpolato
1141
1154
  fill_value=FILLVAL_FLOAT32,
1142
1155
  )
1143
1156
 
1144
- return interpolator
1157
+ return interpolator, theta_min_max, phi_min_max
1145
1158
 
1146
1159
 
1147
1160
  def get_efficiency(
@@ -1174,7 +1187,7 @@ def get_efficiency(
1174
1187
  Interpolated efficiency values.
1175
1188
  """
1176
1189
  if not interpolator:
1177
- interpolator = get_efficiency_interpolator(ancillary_files)
1190
+ interpolator, _, _ = get_efficiency_interpolator(ancillary_files)
1178
1191
 
1179
1192
  return interpolator((theta_inst, phi_inst, energy))
1180
1193
 
@@ -1343,7 +1356,7 @@ def is_back_tof_valid(
1343
1356
  From page 33 of the IMAP-Ultra Flight Software Specification document.
1344
1357
  """
1345
1358
  _, _, _, _, tofx, tofy = get_ph_tof_and_back_positions(
1346
- de_dataset, xf, "ultra45", ancillary_files
1359
+ de_dataset, xf, sensor, ancillary_files
1347
1360
  )
1348
1361
  diff = tofy - tofx
1349
1362