imap-processing 1.0.0__py3-none-any.whl → 1.0.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (68) hide show
  1. imap_processing/_version.py +2 -2
  2. imap_processing/cdf/config/imap_codice_global_cdf_attrs.yaml +13 -1
  3. imap_processing/cdf/config/imap_codice_l1a_variable_attrs.yaml +97 -254
  4. imap_processing/cdf/config/imap_codice_l2-hi-omni_variable_attrs.yaml +635 -0
  5. imap_processing/cdf/config/imap_codice_l2-hi-sectored_variable_attrs.yaml +422 -0
  6. imap_processing/cdf/config/imap_enamaps_l2-common_variable_attrs.yaml +29 -22
  7. imap_processing/cdf/config/imap_enamaps_l2-healpix_variable_attrs.yaml +2 -0
  8. imap_processing/cdf/config/imap_enamaps_l2-rectangular_variable_attrs.yaml +12 -2
  9. imap_processing/cdf/config/imap_swapi_variable_attrs.yaml +2 -13
  10. imap_processing/cdf/utils.py +2 -2
  11. imap_processing/cli.py +10 -27
  12. imap_processing/codice/codice_l1a_lo_angular.py +362 -0
  13. imap_processing/codice/codice_l1a_lo_species.py +282 -0
  14. imap_processing/codice/codice_l1b.py +62 -97
  15. imap_processing/codice/codice_l2.py +801 -174
  16. imap_processing/codice/codice_new_l1a.py +64 -0
  17. imap_processing/codice/constants.py +96 -0
  18. imap_processing/codice/utils.py +270 -0
  19. imap_processing/ena_maps/ena_maps.py +157 -95
  20. imap_processing/ena_maps/utils/coordinates.py +5 -0
  21. imap_processing/ena_maps/utils/corrections.py +450 -0
  22. imap_processing/ena_maps/utils/map_utils.py +143 -42
  23. imap_processing/ena_maps/utils/naming.py +3 -1
  24. imap_processing/hi/hi_l1c.py +34 -12
  25. imap_processing/hi/hi_l2.py +82 -44
  26. imap_processing/ialirt/constants.py +7 -1
  27. imap_processing/ialirt/generate_coverage.py +3 -1
  28. imap_processing/ialirt/l0/parse_mag.py +1 -0
  29. imap_processing/ialirt/l0/process_codice.py +66 -0
  30. imap_processing/ialirt/l0/process_hit.py +1 -0
  31. imap_processing/ialirt/l0/process_swapi.py +1 -0
  32. imap_processing/ialirt/l0/process_swe.py +2 -0
  33. imap_processing/ialirt/process_ephemeris.py +6 -2
  34. imap_processing/ialirt/utils/create_xarray.py +4 -2
  35. imap_processing/idex/idex_l2a.py +2 -2
  36. imap_processing/idex/idex_l2b.py +1 -1
  37. imap_processing/lo/l1c/lo_l1c.py +62 -4
  38. imap_processing/lo/l2/lo_l2.py +85 -15
  39. imap_processing/mag/l1a/mag_l1a.py +2 -2
  40. imap_processing/mag/l1a/mag_l1a_data.py +71 -13
  41. imap_processing/mag/l1c/interpolation_methods.py +34 -13
  42. imap_processing/mag/l1c/mag_l1c.py +117 -67
  43. imap_processing/mag/l1d/mag_l1d_data.py +3 -1
  44. imap_processing/quality_flags.py +1 -0
  45. imap_processing/spice/geometry.py +11 -9
  46. imap_processing/spice/pointing_frame.py +77 -50
  47. imap_processing/swapi/constants.py +4 -0
  48. imap_processing/swapi/l1/swapi_l1.py +59 -24
  49. imap_processing/swapi/l2/swapi_l2.py +17 -3
  50. imap_processing/swe/utils/swe_constants.py +7 -7
  51. imap_processing/ultra/l1a/ultra_l1a.py +121 -72
  52. imap_processing/ultra/l1b/de.py +57 -1
  53. imap_processing/ultra/l1b/extendedspin.py +1 -1
  54. imap_processing/ultra/l1b/ultra_l1b_annotated.py +0 -1
  55. imap_processing/ultra/l1b/ultra_l1b_culling.py +2 -2
  56. imap_processing/ultra/l1b/ultra_l1b_extended.py +25 -12
  57. imap_processing/ultra/l1c/helio_pset.py +29 -6
  58. imap_processing/ultra/l1c/l1c_lookup_utils.py +4 -2
  59. imap_processing/ultra/l1c/spacecraft_pset.py +10 -6
  60. imap_processing/ultra/l1c/ultra_l1c.py +6 -6
  61. imap_processing/ultra/l1c/ultra_l1c_pset_bins.py +82 -20
  62. imap_processing/ultra/l2/ultra_l2.py +2 -2
  63. imap_processing-1.0.2.dist-info/METADATA +121 -0
  64. {imap_processing-1.0.0.dist-info → imap_processing-1.0.2.dist-info}/RECORD +67 -61
  65. imap_processing-1.0.0.dist-info/METADATA +0 -120
  66. {imap_processing-1.0.0.dist-info → imap_processing-1.0.2.dist-info}/LICENSE +0 -0
  67. {imap_processing-1.0.0.dist-info → imap_processing-1.0.2.dist-info}/WHEEL +0 -0
  68. {imap_processing-1.0.0.dist-info → imap_processing-1.0.2.dist-info}/entry_points.txt +0 -0
@@ -12,6 +12,8 @@ from imap_processing import imap_module_directory
12
12
  from imap_processing.cdf.imap_cdf_manager import ImapCdfAttributes
13
13
  from imap_processing.cdf.utils import load_cdf
14
14
  from imap_processing.quality_flags import SWAPIFlags
15
+ from imap_processing.spice.time import met_to_utc, ttj2000ns_to_met
16
+ from imap_processing.swapi.constants import NUM_ENERGY_STEPS, NUM_PACKETS_PER_SWEEP
15
17
  from imap_processing.swapi.swapi_utils import SWAPIAPID, SWAPIMODE
16
18
  from imap_processing.utils import packet_file_to_datasets
17
19
 
@@ -41,10 +43,10 @@ def filter_good_data(full_sweep_sci: xr.Dataset) -> npt.NDArray:
41
43
  """
42
44
  # PLAN_ID for current sweep should all be one value and
43
45
  # SWEEP_TABLE should all be one value.
44
- plan_id = full_sweep_sci["plan_id"].data.reshape(-1, 12)
45
- sweep_table = full_sweep_sci["sweep_table"].data.reshape(-1, 12)
46
+ plan_id = full_sweep_sci["plan_id"].data.reshape(-1, NUM_PACKETS_PER_SWEEP)
47
+ sweep_table = full_sweep_sci["sweep_table"].data.reshape(-1, NUM_PACKETS_PER_SWEEP)
46
48
 
47
- mode = full_sweep_sci["mode"].data.reshape(-1, 12)
49
+ mode = full_sweep_sci["mode"].data.reshape(-1, NUM_PACKETS_PER_SWEEP)
48
50
 
49
51
  sweep_indices = (sweep_table == sweep_table[:, 0, None]).all(axis=1)
50
52
  plan_id_indices = (plan_id == plan_id[:, 0, None]).all(axis=1)
@@ -62,10 +64,10 @@ def filter_good_data(full_sweep_sci: xr.Dataset) -> npt.NDArray:
62
64
  # From this: [0 24]
63
65
  # To this: [[ 0 1 2 3 4 5 6 7 8 9 10 11]
64
66
  # [24 25 26 27 28 29 30 31 32 33 34 35]]
65
- cycle_start_indices = np.where(bad_data_indices == 0)[0] * 12
66
- bad_cycle_indices = cycle_start_indices[..., None] + np.arange(12)[
67
- None, ...
68
- ].reshape(-1)
67
+ cycle_start_indices = np.where(bad_data_indices == 0)[0] * NUM_PACKETS_PER_SWEEP
68
+ bad_cycle_indices = cycle_start_indices[..., None] + np.arange(
69
+ NUM_PACKETS_PER_SWEEP
70
+ )[None, ...].reshape(-1)
69
71
 
70
72
  logger.debug("Cycle data was bad due to one of below reasons:")
71
73
  logger.debug(
@@ -162,7 +164,7 @@ def find_sweep_starts(packets: xr.Dataset) -> npt.NDArray:
162
164
  indices_start : numpy.ndarray
163
165
  Array of indices of start cycle.
164
166
  """
165
- if packets["shcoarse"].size < 12:
167
+ if packets["shcoarse"].size < NUM_PACKETS_PER_SWEEP:
166
168
  return np.array([], np.int64)
167
169
 
168
170
  # calculate time difference between consecutive sweep
@@ -387,7 +389,7 @@ def process_sweep_data(full_sweep_sci: xr.Dataset, cem_prefix: str) -> xr.Datase
387
389
  # [ 2 3 4 5 6 7 8 9 10 11 12 13]]]
388
390
  # In other word, we grouped each cem's
389
391
  # data by full sweep.
390
- current_cem_counts = current_cem_counts.reshape(6, -1, 12)
392
+ current_cem_counts = current_cem_counts.reshape(6, -1, NUM_PACKETS_PER_SWEEP)
391
393
 
392
394
  # Then, we go from above to
393
395
  # to this final output:
@@ -421,7 +423,7 @@ def process_sweep_data(full_sweep_sci: xr.Dataset, cem_prefix: str) -> xr.Datase
421
423
  all_cem_data = np.stack(current_cem_counts, axis=-1)
422
424
  # This line just flatten the inner most array to
423
425
  # (total_full_sweeps x 72)
424
- all_cem_data = all_cem_data.reshape(-1, 72)
426
+ all_cem_data = all_cem_data.reshape(-1, NUM_ENERGY_STEPS)
425
427
  return all_cem_data
426
428
 
427
429
 
@@ -490,7 +492,9 @@ def process_swapi_science(
490
492
  # ===================================================================
491
493
  # Quality flags
492
494
  # ===================================================================
493
- quality_flags_data = np.zeros((total_full_sweeps, 72), dtype=np.uint16)
495
+ quality_flags_data = np.zeros(
496
+ (total_full_sweeps, NUM_ENERGY_STEPS), dtype=np.uint16
497
+ )
494
498
 
495
499
  # Add science data quality flags
496
500
  # Have to match datatype to bitwise OR
@@ -547,7 +551,7 @@ def process_swapi_science(
547
551
 
548
552
  for flag_name in hk_flags_name:
549
553
  current_flag = np.repeat(good_sweep_hk_data[flag_name.lower()].data, 6).reshape(
550
- -1, 72
554
+ -1, NUM_ENERGY_STEPS
551
555
  )
552
556
  # Use getattr to dynamically access the flag in SWAPIFlags class
553
557
  flag_to_set = getattr(SWAPIFlags, flag_name)
@@ -568,7 +572,9 @@ def process_swapi_science(
568
572
  # Use center time for epoch to line up with mission requests. Center time
569
573
  # of SWAPI is time of 7th packet(aka SEQ_NUMBER == 6) creation time at the
570
574
  # beginning of 7th packet.
571
- epoch_values = good_sweep_sci["epoch"].data.reshape(total_full_sweeps, 12)[:, 6]
575
+ epoch_values = good_sweep_sci["epoch"].data.reshape(
576
+ total_full_sweeps, NUM_PACKETS_PER_SWEEP
577
+ )[:, 6]
572
578
 
573
579
  epoch_time = xr.DataArray(
574
580
  epoch_values,
@@ -626,20 +632,33 @@ def process_swapi_science(
626
632
 
627
633
  # Add other support data
628
634
  dataset["sweep_table"] = xr.DataArray(
629
- good_sweep_sci["sweep_table"].data.reshape(total_full_sweeps, 12)[:, 0],
635
+ good_sweep_sci["sweep_table"].data.reshape(
636
+ total_full_sweeps, NUM_PACKETS_PER_SWEEP
637
+ )[:, 0],
630
638
  name="sweep_table",
631
639
  dims=["epoch"],
632
640
  attrs=cdf_manager.get_variable_attributes("sweep_table"),
633
641
  )
634
642
  dataset["plan_id"] = xr.DataArray(
635
- good_sweep_sci["plan_id"].data.reshape(total_full_sweeps, 12)[:, 0],
643
+ good_sweep_sci["plan_id"].data.reshape(
644
+ total_full_sweeps, NUM_PACKETS_PER_SWEEP
645
+ )[:, 0],
636
646
  name="plan_id",
637
647
  dims=["epoch"],
638
648
  attrs=cdf_manager.get_variable_attributes("plan_id"),
639
649
  )
640
650
  # Store start time for L3 purposes per SWAPI requests
651
+ # Per SWAPI request, convert start time of sweep to UTC time.
652
+ sci_start_time = met_to_utc(
653
+ ttj2000ns_to_met(
654
+ good_sweep_sci["epoch"].data.reshape(
655
+ total_full_sweeps, NUM_PACKETS_PER_SWEEP
656
+ )[:, 0]
657
+ ),
658
+ precision=0,
659
+ )
641
660
  dataset["sci_start_time"] = xr.DataArray(
642
- good_sweep_sci["epoch"].data.reshape(total_full_sweeps, 12)[:, 0],
661
+ sci_start_time,
643
662
  name="sci_start_time",
644
663
  dims=["epoch"],
645
664
  attrs=cdf_manager.get_variable_attributes("sci_start_time"),
@@ -650,7 +669,9 @@ def process_swapi_science(
650
669
  # updated every 6th step. This is used in L2 to calculate last 9 fine
651
670
  # energy steps.
652
671
  dataset["esa_lvl5"] = xr.DataArray(
653
- good_sweep_sci["esa_lvl5"].data.reshape(total_full_sweeps, 12)[:, 11],
672
+ good_sweep_sci["esa_lvl5"].data.reshape(
673
+ total_full_sweeps, NUM_PACKETS_PER_SWEEP
674
+ )[:, 11],
654
675
  name="esa_lvl5",
655
676
  dims=["epoch"],
656
677
  attrs=cdf_manager.get_variable_attributes("esa_lvl5"),
@@ -661,19 +682,25 @@ def process_swapi_science(
661
682
  # SWP_HK.FPGA_TYPE - Type number of the FPGA
662
683
  # SWP_HK.FPGA_REV - Revision number of the FPGA
663
684
  dataset["lut_choice"] = xr.DataArray(
664
- good_sweep_hk_data["lut_choice"].data.reshape(total_full_sweeps, 12)[:, 0],
685
+ good_sweep_hk_data["lut_choice"].data.reshape(
686
+ total_full_sweeps, NUM_PACKETS_PER_SWEEP
687
+ )[:, 0],
665
688
  name="lut_choice",
666
689
  dims=["epoch"],
667
690
  attrs=cdf_manager.get_variable_attributes("lut_choice"),
668
691
  )
669
692
  dataset["fpga_type"] = xr.DataArray(
670
- good_sweep_hk_data["fpga_type"].data.reshape(total_full_sweeps, 12)[:, 0],
693
+ good_sweep_hk_data["fpga_type"].data.reshape(
694
+ total_full_sweeps, NUM_PACKETS_PER_SWEEP
695
+ )[:, 0],
671
696
  name="fpga_type",
672
697
  dims=["epoch"],
673
698
  attrs=cdf_manager.get_variable_attributes("fpga_type"),
674
699
  )
675
700
  dataset["fpga_rev"] = xr.DataArray(
676
- good_sweep_hk_data["fpga_rev"].data.reshape(total_full_sweeps, 12)[:, 0],
701
+ good_sweep_hk_data["fpga_rev"].data.reshape(
702
+ total_full_sweeps, NUM_PACKETS_PER_SWEEP
703
+ )[:, 0],
677
704
  name="fpga_rev",
678
705
  dims=["epoch"],
679
706
  attrs=cdf_manager.get_variable_attributes("fpga_rev"),
@@ -727,7 +754,7 @@ def process_swapi_science(
727
754
  return dataset
728
755
 
729
756
 
730
- def swapi_l1(dependencies: ProcessingInputCollection) -> xr.Dataset:
757
+ def swapi_l1(dependencies: ProcessingInputCollection, descriptor: str) -> xr.Dataset:
731
758
  """
732
759
  Will process SWAPI level 0 data to level 1.
733
760
 
@@ -735,6 +762,9 @@ def swapi_l1(dependencies: ProcessingInputCollection) -> xr.Dataset:
735
762
  ----------
736
763
  dependencies : ProcessingInputCollection
737
764
  Input dependencies needed for L1 processing.
765
+ descriptor : str
766
+ Descriptor for the type of data to process.
767
+ Options are 'hk' or 'sci'.
738
768
 
739
769
  Returns
740
770
  -------
@@ -754,9 +784,11 @@ def swapi_l1(dependencies: ProcessingInputCollection) -> xr.Dataset:
754
784
  l0_files[0], xtce_definition, use_derived_value=False
755
785
  )
756
786
 
757
- hk_files = dependencies.get_file_paths(descriptor="hk")
758
- if hk_files and l0_unpacked_dict.get(SWAPIAPID.SWP_SCI, None) is not None:
787
+ if descriptor == "sci":
759
788
  logger.info(f"Processing SWAPI science data for {l0_files[0]}.")
789
+ if SWAPIAPID.SWP_SCI not in l0_unpacked_dict:
790
+ logger.warning("No SWP_SCI packets found.")
791
+ return []
760
792
  # process science data.
761
793
  # First read HK data.
762
794
  hk_files = dependencies.get_file_paths(descriptor="hk")
@@ -770,8 +802,11 @@ def swapi_l1(dependencies: ProcessingInputCollection) -> xr.Dataset:
770
802
  )
771
803
  return [sci_dataset]
772
804
 
773
- elif l0_unpacked_dict[SWAPIAPID.SWP_HK]:
805
+ elif descriptor == "hk":
774
806
  logger.info(f"Processing HK data for {l0_files[0]}.")
807
+ if SWAPIAPID.SWP_HK not in l0_unpacked_dict:
808
+ logger.warning("No SWP_HK packets found.")
809
+ return []
775
810
  # Get L1A and L1B HK data.
776
811
  l1a_hk_data = l0_unpacked_dict[SWAPIAPID.SWP_HK]
777
812
  l1b_hk_data = packet_file_to_datasets(
@@ -8,6 +8,7 @@ import pandas as pd
8
8
  import xarray as xr
9
9
 
10
10
  from imap_processing.cdf.imap_cdf_manager import ImapCdfAttributes
11
+ from imap_processing.swapi.constants import NUM_ENERGY_STEPS
11
12
 
12
13
  logger = logging.getLogger(__name__)
13
14
 
@@ -72,15 +73,28 @@ def solve_full_sweep_energy(
72
73
  (esa_table_df["timestamp"] <= time) & (esa_table_df["Sweep #"] == sweep_id)
73
74
  ]
74
75
  if subset.empty:
75
- first_63_energies.append(np.full(63, np.nan, dtype=np.float64))
76
- continue
76
+ # Get the earliest timestamp available
77
+ earliest_time = esa_table_df["timestamp"].min()
78
+
79
+ # Find the sweep's ESA data for the earliest time and sweep_id
80
+ earliest_subset = esa_table_df[
81
+ (esa_table_df["timestamp"] == earliest_time)
82
+ & (esa_table_df["Sweep #"] == sweep_id)
83
+ ]
84
+ if earliest_subset.empty:
85
+ raise ValueError(
86
+ f"No matching ESA table entry found for sweep ID {sweep_id} "
87
+ f"at time {time}, and no entries found for earliest time "
88
+ f"{earliest_time}."
89
+ )
90
+ subset = earliest_subset
77
91
 
78
92
  # Subset data can contain multiple 72 energy values with last 9 fine energies
79
93
  # with 'Solve' value. We need to sort by time and ESA step to maintain correct
80
94
  # order. Then take the last group of 72 steps values and select first 63
81
95
  # values only.
82
96
  subset = subset.sort_values(["timestamp", "ESA Step #"])
83
- grouped = subset["Energy"].values.reshape(-1, 72)
97
+ grouped = subset["Energy"].values.reshape(-1, NUM_ENERGY_STEPS)
84
98
  first_63 = grouped[-1, :63]
85
99
  first_63_energies.append(first_63)
86
100
 
@@ -16,13 +16,13 @@ ENERGY_CONVERSION_FACTOR = 4.75
16
16
  # 7 CEMs geometric factors in cm^2 sr eV/eV units.
17
17
  GEOMETRIC_FACTORS = np.array(
18
18
  [
19
- 435e-6,
20
- 599e-6,
21
- 808e-6,
22
- 781e-6,
23
- 876e-6,
24
- 548e-6,
25
- 432e-6,
19
+ 424.4e-6,
20
+ 564.5e-6,
21
+ 763.8e-6,
22
+ 916.9e-6,
23
+ 792.0e-6,
24
+ 667.7e-6,
25
+ 425.2e-6,
26
26
  ]
27
27
  )
28
28
 
@@ -43,7 +43,7 @@ logger = logging.getLogger(__name__)
43
43
 
44
44
 
45
45
  def ultra_l1a( # noqa: PLR0912
46
- packet_file: str, apid_input: int | None = None
46
+ packet_file: str, apid_input: int | None = None, create_derived_l1b: bool = False
47
47
  ) -> list[xr.Dataset]:
48
48
  """
49
49
  Will process ULTRA L0 data into L1A CDF files at output_filepath.
@@ -54,6 +54,8 @@ def ultra_l1a( # noqa: PLR0912
54
54
  Path to the CCSDS data packet file.
55
55
  apid_input : Optional[int]
56
56
  Optional apid.
57
+ create_derived_l1b : bool
58
+ Whether to create the l1b datasets with derived values.
57
59
 
58
60
  Returns
59
61
  -------
@@ -64,7 +66,17 @@ def ultra_l1a( # noqa: PLR0912
64
66
  f"{imap_module_directory}/ultra/packet_definitions/ULTRA_SCI_COMBINED.xml"
65
67
  )
66
68
 
69
+ # Keep a list to track the two versions, l1a and l1b with the derived values.
70
+ decommutated_packet_datasets = []
67
71
  datasets_by_apid = packet_file_to_datasets(packet_file, xtce)
72
+ decommutated_packet_datasets.append(datasets_by_apid)
73
+ if create_derived_l1b:
74
+ # For the housekeeping products, we can create the l1b at the same time
75
+ # as the l1a since there is no additional processing needed.
76
+ datasets_by_apid = packet_file_to_datasets(
77
+ packet_file, xtce, use_derived_value=True
78
+ )
79
+ decommutated_packet_datasets.append(datasets_by_apid)
68
80
 
69
81
  output_datasets = []
70
82
 
@@ -109,77 +121,114 @@ def ultra_l1a( # noqa: PLR0912
109
121
  attr_mgr.add_instrument_global_attrs("ultra")
110
122
  attr_mgr.add_instrument_variable_attrs("ultra", "l1a")
111
123
 
112
- for apid in apids:
113
- if apid in ULTRA_AUX.apid:
114
- decom_ultra_dataset = datasets_by_apid[apid]
115
- gattr_key = ULTRA_AUX.logical_source[ULTRA_AUX.apid.index(apid)]
116
- elif apid in all_l1a_image_apids:
117
- packet_props = all_l1a_image_apids[apid]
118
- decom_ultra_dataset = process_ultra_tof(
119
- datasets_by_apid[apid], packet_props
120
- )
121
- gattr_key = packet_props.logical_source[packet_props.apid.index(apid)]
122
- elif apid in ULTRA_RATES.apid:
123
- decom_ultra_dataset = process_ultra_rates(datasets_by_apid[apid])
124
- decom_ultra_dataset = decom_ultra_dataset.drop_vars("fastdata_00")
125
- gattr_key = ULTRA_RATES.logical_source[ULTRA_RATES.apid.index(apid)]
126
- elif apid in ULTRA_ENERGY_RATES.apid:
127
- decom_ultra_dataset = process_ultra_energy_rates(datasets_by_apid[apid])
128
- decom_ultra_dataset = decom_ultra_dataset.drop_vars("ratedata")
129
- gattr_key = ULTRA_ENERGY_RATES.logical_source[
130
- ULTRA_ENERGY_RATES.apid.index(apid)
131
- ]
132
- elif apid in all_event_apids:
133
- decom_ultra_dataset = process_ultra_events(datasets_by_apid[apid], apid)
134
- gattr_key = all_event_apids[apid]
124
+ for i, datasets_by_apid in enumerate(decommutated_packet_datasets):
125
+ for apid in apids:
126
+ if apid in ULTRA_AUX.apid:
127
+ decom_ultra_dataset = datasets_by_apid[apid]
128
+ gattr_key = ULTRA_AUX.logical_source[ULTRA_AUX.apid.index(apid)]
129
+ elif apid in all_l1a_image_apids:
130
+ packet_props = all_l1a_image_apids[apid]
131
+ decom_ultra_dataset = process_ultra_tof(
132
+ datasets_by_apid[apid], packet_props
133
+ )
134
+ gattr_key = packet_props.logical_source[packet_props.apid.index(apid)]
135
+ elif apid in ULTRA_RATES.apid:
136
+ decom_ultra_dataset = process_ultra_rates(datasets_by_apid[apid])
137
+ decom_ultra_dataset = decom_ultra_dataset.drop_vars("fastdata_00")
138
+ gattr_key = ULTRA_RATES.logical_source[ULTRA_RATES.apid.index(apid)]
139
+ elif apid in ULTRA_ENERGY_RATES.apid:
140
+ decom_ultra_dataset = process_ultra_energy_rates(datasets_by_apid[apid])
141
+ decom_ultra_dataset = decom_ultra_dataset.drop_vars("ratedata")
142
+ gattr_key = ULTRA_ENERGY_RATES.logical_source[
143
+ ULTRA_ENERGY_RATES.apid.index(apid)
144
+ ]
145
+ elif apid in all_event_apids:
146
+ # We don't want to process the event l1b datasets since those l1b
147
+ # products need more information
148
+ if i == 1:
149
+ continue
150
+ decom_ultra_dataset = process_ultra_events(datasets_by_apid[apid], apid)
151
+ gattr_key = all_event_apids[apid]
152
+ # Add coordinate attributes
153
+ attrs = attr_mgr.get_variable_attributes("event_id")
154
+ decom_ultra_dataset.coords["event_id"].attrs.update(attrs)
155
+ elif apid in ULTRA_ENERGY_SPECTRA.apid:
156
+ decom_ultra_dataset = process_ultra_energy_spectra(
157
+ datasets_by_apid[apid]
158
+ )
159
+ decom_ultra_dataset = decom_ultra_dataset.drop_vars("compdata")
160
+ gattr_key = ULTRA_ENERGY_SPECTRA.logical_source[
161
+ ULTRA_ENERGY_SPECTRA.apid.index(apid)
162
+ ]
163
+ elif apid in ULTRA_MACROS_CHECKSUM.apid:
164
+ decom_ultra_dataset = process_ultra_macros_checksum(
165
+ datasets_by_apid[apid]
166
+ )
167
+ gattr_key = ULTRA_MACROS_CHECKSUM.logical_source[
168
+ ULTRA_MACROS_CHECKSUM.apid.index(apid)
169
+ ]
170
+ elif apid in ULTRA_HK.apid:
171
+ decom_ultra_dataset = datasets_by_apid[apid]
172
+ gattr_key = ULTRA_HK.logical_source[ULTRA_HK.apid.index(apid)]
173
+ elif apid in ULTRA_CMD_TEXT.apid:
174
+ decom_ultra_dataset = datasets_by_apid[apid]
175
+ decoded_strings = [
176
+ s.decode("ascii").rstrip("\x00")
177
+ for s in decom_ultra_dataset["text"].values
178
+ ]
179
+ decom_ultra_dataset = decom_ultra_dataset.drop_vars("text")
180
+ decom_ultra_dataset["text"] = xr.DataArray(
181
+ decoded_strings,
182
+ dims=["epoch"],
183
+ coords={"epoch": decom_ultra_dataset["epoch"]},
184
+ )
185
+ gattr_key = ULTRA_CMD_TEXT.logical_source[
186
+ ULTRA_CMD_TEXT.apid.index(apid)
187
+ ]
188
+ elif apid in ULTRA_CMD_ECHO.apid:
189
+ decom_ultra_dataset = process_ultra_cmd_echo(datasets_by_apid[apid])
190
+ gattr_key = ULTRA_CMD_ECHO.logical_source[
191
+ ULTRA_CMD_ECHO.apid.index(apid)
192
+ ]
193
+ else:
194
+ logger.error(f"APID {apid} not recognized.")
195
+ continue
196
+
197
+ decom_ultra_dataset.attrs.update(attr_mgr.get_global_attributes(gattr_key))
198
+
199
+ if i == 1:
200
+ # Derived values dataset at l1b
201
+ # We already have the l1a attributes, just update the l1a -> l1b
202
+ # in the metadata.
203
+ decom_ultra_dataset.attrs["Data_type"] = decom_ultra_dataset.attrs[
204
+ "Data_type"
205
+ ].replace("1A", "1B")
206
+ decom_ultra_dataset.attrs["Logical_source"] = decom_ultra_dataset.attrs[
207
+ "Logical_source"
208
+ ].replace("l1a", "l1b")
209
+ decom_ultra_dataset.attrs["Logical_source_description"] = (
210
+ decom_ultra_dataset.attrs["Logical_source_description"].replace(
211
+ "1A", "1B"
212
+ )
213
+ )
214
+
215
+ # Add data variable attributes
216
+ for key in decom_ultra_dataset.data_vars:
217
+ attrs = attr_mgr.get_variable_attributes(key.lower())
218
+ decom_ultra_dataset.data_vars[key].attrs.update(attrs)
219
+ if i == 1:
220
+ # For l1b datasets, the FILLVAL and VALIDMIN/MAX may be
221
+ # different datatypes, so we can't use them directly from l1a.
222
+ # just remove them for now since we don't really have a need for
223
+ # for them currently.
224
+ for attr_key in ["FILLVAL", "VALIDMIN", "VALIDMAX"]:
225
+ if attr_key in decom_ultra_dataset.data_vars[key].attrs:
226
+ decom_ultra_dataset.data_vars[key].attrs.pop(attr_key)
227
+
135
228
  # Add coordinate attributes
136
- attrs = attr_mgr.get_variable_attributes("event_id")
137
- decom_ultra_dataset.coords["event_id"].attrs.update(attrs)
138
- elif apid in ULTRA_ENERGY_SPECTRA.apid:
139
- decom_ultra_dataset = process_ultra_energy_spectra(datasets_by_apid[apid])
140
- decom_ultra_dataset = decom_ultra_dataset.drop_vars("compdata")
141
- gattr_key = ULTRA_ENERGY_SPECTRA.logical_source[
142
- ULTRA_ENERGY_SPECTRA.apid.index(apid)
143
- ]
144
- elif apid in ULTRA_MACROS_CHECKSUM.apid:
145
- decom_ultra_dataset = process_ultra_macros_checksum(datasets_by_apid[apid])
146
- gattr_key = ULTRA_MACROS_CHECKSUM.logical_source[
147
- ULTRA_MACROS_CHECKSUM.apid.index(apid)
148
- ]
149
- elif apid in ULTRA_HK.apid:
150
- decom_ultra_dataset = datasets_by_apid[apid]
151
- gattr_key = ULTRA_HK.logical_source[ULTRA_HK.apid.index(apid)]
152
- elif apid in ULTRA_CMD_TEXT.apid:
153
- decom_ultra_dataset = datasets_by_apid[apid]
154
- decoded_strings = [
155
- s.decode("ascii").rstrip("\x00")
156
- for s in decom_ultra_dataset["text"].values
157
- ]
158
- decom_ultra_dataset = decom_ultra_dataset.drop_vars("text")
159
- decom_ultra_dataset["text"] = xr.DataArray(
160
- decoded_strings,
161
- dims=["epoch"],
162
- coords={"epoch": decom_ultra_dataset["epoch"]},
163
- )
164
- gattr_key = ULTRA_CMD_TEXT.logical_source[ULTRA_CMD_TEXT.apid.index(apid)]
165
- elif apid in ULTRA_CMD_ECHO.apid:
166
- decom_ultra_dataset = process_ultra_cmd_echo(datasets_by_apid[apid])
167
- gattr_key = ULTRA_CMD_ECHO.logical_source[ULTRA_CMD_ECHO.apid.index(apid)]
168
- else:
169
- logger.error(f"APID {apid} not recognized.")
170
- continue
171
-
172
- decom_ultra_dataset.attrs.update(attr_mgr.get_global_attributes(gattr_key))
173
-
174
- # Add data variable attributes
175
- for key in decom_ultra_dataset.data_vars:
176
- attrs = attr_mgr.get_variable_attributes(key.lower())
177
- decom_ultra_dataset.data_vars[key].attrs.update(attrs)
178
-
179
- # Add coordinate attributes
180
- attrs = attr_mgr.get_variable_attributes("epoch", check_schema=False)
181
- decom_ultra_dataset.coords["epoch"].attrs.update(attrs)
182
-
183
- output_datasets.append(decom_ultra_dataset)
229
+ attrs = attr_mgr.get_variable_attributes("epoch", check_schema=False)
230
+ decom_ultra_dataset.coords["epoch"].attrs.update(attrs)
231
+
232
+ output_datasets.append(decom_ultra_dataset)
184
233
 
185
234
  return output_datasets
@@ -5,10 +5,13 @@ import xarray as xr
5
5
 
6
6
  from imap_processing.cdf.utils import parse_filename_like
7
7
  from imap_processing.quality_flags import (
8
+ ImapAttitudeUltraFlags,
8
9
  ImapDEOutliersUltraFlags,
9
10
  ImapDEScatteringUltraFlags,
10
11
  )
11
12
  from imap_processing.spice.geometry import SpiceFrame
13
+ from imap_processing.spice.repoint import get_repoint_data
14
+ from imap_processing.spice.time import et_to_met
12
15
  from imap_processing.ultra.l1b.lookup_utils import get_geometric_factor
13
16
  from imap_processing.ultra.l1b.ultra_l1b_annotated import (
14
17
  get_annotated_particle_velocity,
@@ -74,6 +77,10 @@ def calculate_de(
74
77
  spin_number = get_spin_number(
75
78
  de_dataset["shcoarse"].values, de_dataset["spin"].values
76
79
  )
80
+ repoint_id = de_dataset.attrs.get("Repointing", None)
81
+ if repoint_id is not None:
82
+ repoint_id = int(repoint_id.replace("repoint", ""))
83
+
77
84
  de_dict["spin"] = spin_number
78
85
 
79
86
  # Add already populated fields.
@@ -311,7 +318,19 @@ def calculate_de(
311
318
  ultra_frame = getattr(SpiceFrame, f"IMAP_ULTRA_{sensor}")
312
319
 
313
320
  # Account for counts=0 (event times have FILL value)
314
- valid_events = event_times != FILLVAL_FLOAT32
321
+ valid_events = (event_times != FILLVAL_FLOAT32).copy()
322
+ # TODO - find a better solution than filtering out data from repointings?
323
+ if repoint_id is not None:
324
+ in_pointing = calculate_events_in_pointing(
325
+ repoint_id, event_times[valid_events]
326
+ )
327
+ # Update quality flags for valid events that are not in the pointing
328
+ quality_flags[valid_events][~in_pointing] |= (
329
+ ImapAttitudeUltraFlags.DURINGREPOINT.value
330
+ )
331
+ # Update valid_events to only include times within a pointing
332
+ valid_events[valid_events] &= in_pointing
333
+
315
334
  if np.any(valid_events):
316
335
  (
317
336
  sc_velocity[valid_events],
@@ -371,3 +390,40 @@ def calculate_de(
371
390
  dataset = create_dataset(de_dict, name, "l1b")
372
391
 
373
392
  return dataset
393
+
394
+
395
+ def calculate_events_in_pointing(
396
+ repoint_id: int,
397
+ event_times: np.ndarray,
398
+ ) -> np.ndarray:
399
+ """
400
+ Calculate boolean array of events within a pointing.
401
+
402
+ Parameters
403
+ ----------
404
+ repoint_id : int
405
+ The repointing ID.
406
+ event_times : np.ndarray
407
+ Array of event times in ET.
408
+
409
+ Returns
410
+ -------
411
+ in_pointing : np.ndarray
412
+ Boolean array indicating whether each event is within the pointing period
413
+ combined with the valid_events mask.
414
+ """
415
+ # TODO add this as a helper function in repoint.py
416
+ repoint_data = get_repoint_data()
417
+ # To find the pointing start and stop, get the end of the current repointing
418
+ # and the start of the next repointing
419
+ repoint_row = repoint_data[repoint_data["repoint_id"] == repoint_id]
420
+ next_repoint_row = repoint_data[repoint_data["repoint_id"] == repoint_id + 1]
421
+ pointing_start_met = repoint_row["repoint_end_met"].values[0]
422
+ pointing_end_met = next_repoint_row["repoint_start_met"].values[0]
423
+
424
+ # Check which events are within the pointing
425
+ in_pointing = (et_to_met(event_times) >= pointing_start_met) & (
426
+ et_to_met(event_times) <= pointing_end_met
427
+ )
428
+
429
+ return in_pointing
@@ -50,7 +50,7 @@ def calculate_extendedspin(
50
50
  de_dataset["spin"].values,
51
51
  de_dataset["energy"].values,
52
52
  )
53
- count_rates, _, counts, _ = get_energy_histogram(
53
+ count_rates, _, _counts, _ = get_energy_histogram(
54
54
  de_dataset["spin"].values, de_dataset["energy"].values
55
55
  )
56
56
  attitude_qf, spin_rates, spin_period, spin_starttime = flag_attitude(
@@ -52,7 +52,6 @@ def get_annotated_particle_velocity(
52
52
  from_frame=instrument_frame,
53
53
  to_frame=spacecraft_frame,
54
54
  )
55
-
56
55
  # Particle velocity in the pointing (DPS) frame wrt spacecraft.
57
56
  particle_velocity_dps_spacecraft = frame_transform(
58
57
  et=time,
@@ -255,7 +255,7 @@ def flag_rates(
255
255
  n_sigma_per_energy_reshape : NDArray
256
256
  N sigma per energy.
257
257
  """
258
- count_rates, spin_edges, counts, duration = get_energy_histogram(
258
+ count_rates, _spin_edges, _counts, duration = get_energy_histogram(
259
259
  spin_number, energy
260
260
  )
261
261
  quality_flags = np.full(
@@ -440,7 +440,7 @@ def get_pulses_per_spin(rates: xr.Dataset) -> RateResult:
440
440
  coin_pulses : NDArray
441
441
  Total coincidence pulses.
442
442
  """
443
- spin_number, duration = get_spin_and_duration(rates["shcoarse"], rates["spin"])
443
+ spin_number, _duration = get_spin_and_duration(rates["shcoarse"], rates["spin"])
444
444
 
445
445
  # Top coin pulses
446
446
  top_coin_pulses = np.stack(