imap-processing 0.19.2__py3-none-any.whl → 0.19.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of imap-processing might be problematic. Click here for more details.

Files changed (45) hide show
  1. imap_processing/_version.py +2 -2
  2. imap_processing/cdf/config/imap_codice_l1a_variable_attrs.yaml +90 -91
  3. imap_processing/cdf/config/imap_codice_l1b_variable_attrs.yaml +6 -6
  4. imap_processing/cdf/config/imap_enamaps_l2-common_variable_attrs.yaml +45 -6
  5. imap_processing/cdf/config/imap_enamaps_l2-healpix_variable_attrs.yaml +29 -0
  6. imap_processing/cdf/config/imap_enamaps_l2-rectangular_variable_attrs.yaml +32 -0
  7. imap_processing/cdf/config/imap_hi_global_cdf_attrs.yaml +1 -2
  8. imap_processing/cdf/config/imap_hi_variable_attrs.yaml +1 -0
  9. imap_processing/cdf/config/imap_ultra_global_cdf_attrs.yaml +8 -8
  10. imap_processing/cdf/config/imap_ultra_l1c_variable_attrs.yaml +8 -6
  11. imap_processing/cdf/utils.py +5 -0
  12. imap_processing/cli.py +72 -54
  13. imap_processing/codice/codice_l1a.py +44 -6
  14. imap_processing/codice/codice_l1b.py +35 -6
  15. imap_processing/codice/constants.py +10 -6
  16. imap_processing/ena_maps/ena_maps.py +2 -7
  17. imap_processing/glows/l1b/glows_l1b.py +29 -21
  18. imap_processing/hi/hi_l1a.py +49 -29
  19. imap_processing/hi/hi_l1b.py +34 -0
  20. imap_processing/hi/hi_l1c.py +23 -17
  21. imap_processing/hi/hi_l2.py +225 -81
  22. imap_processing/ialirt/l0/ialirt_spice.py +1 -2
  23. imap_processing/ialirt/l0/parse_mag.py +18 -4
  24. imap_processing/ialirt/l0/process_hit.py +9 -4
  25. imap_processing/ialirt/l0/process_swapi.py +9 -4
  26. imap_processing/ialirt/l0/process_swe.py +9 -4
  27. imap_processing/ialirt/utils/create_xarray.py +11 -1
  28. imap_processing/lo/l1b/lo_l1b.py +111 -77
  29. imap_processing/lo/l1c/lo_l1c.py +10 -11
  30. imap_processing/lo/l2/lo_l2.py +43 -22
  31. imap_processing/mag/l1c/interpolation_methods.py +9 -1
  32. imap_processing/mag/l1c/mag_l1c.py +99 -45
  33. imap_processing/spice/geometry.py +0 -2
  34. imap_processing/ultra/l0/decom_tools.py +58 -46
  35. imap_processing/ultra/l0/decom_ultra.py +21 -9
  36. imap_processing/ultra/l0/ultra_utils.py +4 -4
  37. imap_processing/ultra/l1c/helio_pset.py +2 -2
  38. imap_processing/ultra/l1c/spacecraft_pset.py +7 -4
  39. imap_processing/ultra/l2/ultra_l2.py +63 -23
  40. imap_processing/ultra/utils/ultra_l1_utils.py +4 -4
  41. {imap_processing-0.19.2.dist-info → imap_processing-0.19.4.dist-info}/METADATA +2 -2
  42. {imap_processing-0.19.2.dist-info → imap_processing-0.19.4.dist-info}/RECORD +45 -45
  43. {imap_processing-0.19.2.dist-info → imap_processing-0.19.4.dist-info}/LICENSE +0 -0
  44. {imap_processing-0.19.2.dist-info → imap_processing-0.19.4.dist-info}/WHEEL +0 -0
  45. {imap_processing-0.19.2.dist-info → imap_processing-0.19.4.dist-info}/entry_points.txt +0 -0
imap_processing/cli.py CHANGED
@@ -51,7 +51,7 @@ from imap_processing.cdf.utils import load_cdf, write_cdf
51
51
  # call cdf.utils.write_cdf
52
52
  from imap_processing.codice import codice_l1a, codice_l1b, codice_l2
53
53
  from imap_processing.glows.l1a.glows_l1a import glows_l1a
54
- from imap_processing.glows.l1b.glows_l1b import glows_l1b
54
+ from imap_processing.glows.l1b.glows_l1b import glows_l1b, glows_l1b_de
55
55
  from imap_processing.glows.l2.glows_l2 import glows_l2
56
56
  from imap_processing.hi import hi_l1a, hi_l1b, hi_l1c, hi_l2
57
57
  from imap_processing.hit.l1a.hit_l1a import hit_l1a
@@ -682,55 +682,60 @@ class Glows(ProcessInstrument):
682
682
  f"{science_files}."
683
683
  )
684
684
  input_dataset = load_cdf(science_files[0])
685
+ if "hist" in self.descriptor:
686
+ # Create file lists for each ancillary type
687
+ excluded_regions_files = dependencies.get_processing_inputs(
688
+ descriptor="map-of-excluded-regions"
689
+ )[0]
690
+ uv_sources_files = dependencies.get_processing_inputs(
691
+ descriptor="map-of-uv-sources"
692
+ )[0]
693
+ suspected_transients_files = dependencies.get_processing_inputs(
694
+ descriptor="suspected-transients"
695
+ )[0]
696
+ exclusions_by_instr_team_files = dependencies.get_processing_inputs(
697
+ descriptor="exclusions-by-instr-team"
698
+ )[0]
699
+ pipeline_settings = dependencies.get_processing_inputs(
700
+ descriptor="pipeline-settings"
701
+ )[0]
685
702
 
686
- # Create file lists for each ancillary type
687
- excluded_regions_files = dependencies.get_processing_inputs(
688
- descriptor="map-of-excluded-regions"
689
- )[0]
690
- uv_sources_files = dependencies.get_processing_inputs(
691
- descriptor="map-of-uv-sources"
692
- )[0]
693
- suspected_transients_files = dependencies.get_processing_inputs(
694
- descriptor="suspected-transients"
695
- )[0]
696
- exclusions_by_instr_team_files = dependencies.get_processing_inputs(
697
- descriptor="exclusions-by-instr-team"
698
- )[0]
699
- pipeline_settings = dependencies.get_processing_inputs(
700
- descriptor="pipeline-settings"
701
- )[0]
702
-
703
- # Use end date buffer for ancillary data
704
- current_day = np.datetime64(
705
- f"{self.start_date[:4]}-{self.start_date[4:6]}-{self.start_date[6:]}"
706
- )
707
- day_buffer = current_day + np.timedelta64(3, "D")
708
-
709
- # Create combiners for each ancillary dataset
710
- excluded_regions_combiner = GlowsAncillaryCombiner(
711
- excluded_regions_files, day_buffer
712
- )
713
- uv_sources_combiner = GlowsAncillaryCombiner(uv_sources_files, day_buffer)
714
- suspected_transients_combiner = GlowsAncillaryCombiner(
715
- suspected_transients_files, day_buffer
716
- )
717
- exclusions_by_instr_team_combiner = GlowsAncillaryCombiner(
718
- exclusions_by_instr_team_files, day_buffer
719
- )
720
- pipeline_settings_combiner = GlowsAncillaryCombiner(
721
- pipeline_settings, day_buffer
722
- )
703
+ # Use end date buffer for ancillary data
704
+ current_day = np.datetime64(
705
+ f"{self.start_date[:4]}-{self.start_date[4:6]}-{self.start_date[6:]}"
706
+ )
707
+ day_buffer = current_day + np.timedelta64(3, "D")
723
708
 
724
- datasets = [
725
- glows_l1b(
726
- input_dataset,
727
- excluded_regions_combiner.combined_dataset,
728
- uv_sources_combiner.combined_dataset,
729
- suspected_transients_combiner.combined_dataset,
730
- exclusions_by_instr_team_combiner.combined_dataset,
731
- pipeline_settings_combiner.combined_dataset,
709
+ # Create combiners for each ancillary dataset
710
+ excluded_regions_combiner = GlowsAncillaryCombiner(
711
+ excluded_regions_files, day_buffer
712
+ )
713
+ uv_sources_combiner = GlowsAncillaryCombiner(
714
+ uv_sources_files, day_buffer
715
+ )
716
+ suspected_transients_combiner = GlowsAncillaryCombiner(
717
+ suspected_transients_files, day_buffer
718
+ )
719
+ exclusions_by_instr_team_combiner = GlowsAncillaryCombiner(
720
+ exclusions_by_instr_team_files, day_buffer
721
+ )
722
+ pipeline_settings_combiner = GlowsAncillaryCombiner(
723
+ pipeline_settings, day_buffer
732
724
  )
733
- ]
725
+
726
+ datasets = [
727
+ glows_l1b(
728
+ input_dataset,
729
+ excluded_regions_combiner.combined_dataset,
730
+ uv_sources_combiner.combined_dataset,
731
+ suspected_transients_combiner.combined_dataset,
732
+ exclusions_by_instr_team_combiner.combined_dataset,
733
+ pipeline_settings_combiner.combined_dataset,
734
+ )
735
+ ]
736
+ else:
737
+ # Direct events
738
+ datasets = [glows_l1b_de(input_dataset)]
734
739
 
735
740
  if self.data_level == "l2":
736
741
  science_files = dependencies.get_file_paths(source="glows")
@@ -804,13 +809,26 @@ class Hi(ProcessInstrument):
804
809
  datasets = hi_l1c.hi_l1c(load_cdf(science_paths[0]), anc_paths[0])
805
810
  elif self.data_level == "l2":
806
811
  science_paths = dependencies.get_file_paths(source="hi", data_type="l1c")
807
- # TODO get ancillary paths
808
- geometric_factors_path = ""
809
- esa_energies_path = ""
812
+ anc_dependencies = dependencies.get_processing_inputs(data_type="ancillary")
813
+ if len(anc_dependencies) != 3:
814
+ raise ValueError(
815
+ f"Expected three ancillary dependencies for L2 processing including"
816
+ f"cal-prod, esa-energies, and esa-eta-fit-factors."
817
+ f"Got {[anc_dep.descriptor for anc_dep in anc_dependencies]}"
818
+ "."
819
+ )
820
+ # Get individual L2 ancillary dependencies
821
+ # Strip the "45sensor" or "90sensor" off the ancillary descriptor and
822
+ # create a mapping from descriptor to path
823
+ l2_ancillary_path_dict = {
824
+ "-".join(dep.descriptor.split("-")[1:]): dep.imap_file_paths[
825
+ 0
826
+ ].construct_path()
827
+ for dep in anc_dependencies
828
+ }
810
829
  datasets = hi_l2.hi_l2(
811
830
  science_paths,
812
- geometric_factors_path,
813
- esa_energies_path,
831
+ l2_ancillary_path_dict,
814
832
  self.descriptor,
815
833
  )
816
834
  else:
@@ -1119,9 +1137,9 @@ class Mag(ProcessInstrument):
1119
1137
  input_data = [load_cdf(dep) for dep in science_files]
1120
1138
  # Input datasets can be in any order, and are validated within mag_l1c
1121
1139
  if len(input_data) == 1:
1122
- datasets = [mag_l1c(input_data[0])]
1140
+ datasets = [mag_l1c(input_data[0], current_day)]
1123
1141
  elif len(input_data) == 2:
1124
- datasets = [mag_l1c(input_data[0], input_data[1])]
1142
+ datasets = [mag_l1c(input_data[0], current_day, input_data[1])]
1125
1143
  else:
1126
1144
  raise ValueError(
1127
1145
  f"Invalid dependencies found for MAG L1C:"
@@ -325,11 +325,23 @@ class CoDICEL1aPipeline:
325
325
  # different depending on the data product). In any case, iterate over
326
326
  # the num_counters dimension to isolate the data for each counter so
327
327
  # each counter's data can be placed in a separate CDF data variable.
328
+ # For Lo SW species, all_data has shape (9, 16, 128, 1) -> (epochs,
329
+ # num_counters, num_energy_steps, num_spin_sectors)
330
+ if self._is_lo_species_dataset():
331
+ # For Lo species datasets, counters are the second dimension (index 1)
332
+ num_counters = all_data.shape[1]
333
+ else:
334
+ # For all other datasets, counters are the last dimension
335
+ num_counters = all_data.shape[-1]
336
+
328
337
  for counter, variable_name in zip(
329
- range(all_data.shape[-1]), self.config["variable_names"], strict=False
338
+ range(num_counters), self.config["variable_names"], strict=False
330
339
  ):
331
340
  # Extract the counter data
332
- counter_data = all_data[..., counter]
341
+ if self._is_lo_species_dataset():
342
+ counter_data = all_data[:, counter, :, :]
343
+ else:
344
+ counter_data = all_data[..., counter]
333
345
 
334
346
  # Get the CDF attributes
335
347
  descriptor = self.config["dataset_name"].split("imap_codice_l1a_")[-1]
@@ -708,10 +720,18 @@ class CoDICEL1aPipeline:
708
720
 
709
721
  # Reshape the data based on how it is written to the data array of
710
722
  # the packet data. The number of counters is the last dimension / axis.
711
- reshape_dims = (
712
- *self.config["dims"].values(),
713
- self.config["num_counters"],
714
- )
723
+ if self._is_lo_species_dataset():
724
+ # For Lo species datasets, counters are the first dimension
725
+ reshape_dims = (
726
+ self.config["num_counters"],
727
+ *self.config["dims"].values(),
728
+ )
729
+ else:
730
+ # For all other datasets, counters are the last dimension
731
+ reshape_dims = (
732
+ *self.config["dims"].values(),
733
+ self.config["num_counters"],
734
+ )
715
735
  for packet_data in self.raw_data:
716
736
  reshaped_packet_data = np.array(packet_data, dtype=np.uint32).reshape(
717
737
  reshape_dims
@@ -725,6 +745,24 @@ class CoDICEL1aPipeline:
725
745
  # No longer need to keep the raw data around
726
746
  del self.raw_data
727
747
 
748
+ def _is_lo_species_dataset(self) -> bool:
749
+ """
750
+ Check if the current dataset is a Lo species dataset.
751
+
752
+ Lo species datasets have a different data structure where counters are the
753
+ second dimension (index 1) instead of the last dimension.
754
+
755
+ Returns
756
+ -------
757
+ bool
758
+ True if the dataset is a Lo species dataset
759
+ (lo-sw-species or lo-nsw-species), False otherwise.
760
+ """
761
+ return self.config["dataset_name"] in [
762
+ "imap_codice_l1a_lo-sw-species",
763
+ "imap_codice_l1a_lo-nsw-species",
764
+ ]
765
+
728
766
  def set_data_product_config(self, apid: int, dataset: xr.Dataset) -> None:
729
767
  """
730
768
  Set the various settings for defining the data products.
@@ -56,8 +56,6 @@ def convert_to_rates(
56
56
  "lo-sw-angular",
57
57
  "lo-nsw-priority",
58
58
  "lo-sw-priority",
59
- "lo-nsw-species",
60
- "lo-sw-species",
61
59
  "lo-ialirt",
62
60
  ]:
63
61
  # Applying rate calculation described in section 10.2 of the algorithm
@@ -66,14 +64,35 @@ def convert_to_rates(
66
64
  # time data array to match the data variable shape
67
65
  dims = [1] * dataset[variable_name].data.ndim
68
66
  dims[1] = 128
69
- acq_times = dataset.acquisition_time_per_step.data.reshape(dims)
70
-
67
+ acq_times = dataset.acquisition_time_per_step.data.reshape(dims) # (128)
71
68
  # Now perform the calculation
72
69
  rates_data = dataset[variable_name].data / (
73
70
  acq_times
74
- * 1e-6 # Converting from microseconds to seconds
71
+ * 1e-3 # Converting from milliseconds to seconds
75
72
  * constants.L1B_DATA_PRODUCT_CONFIGURATIONS[descriptor]["num_spin_sectors"]
76
73
  )
74
+ elif descriptor in [
75
+ "lo-nsw-species",
76
+ "lo-sw-species",
77
+ ]:
78
+ # Applying rate calculation described in section 10.2 of the algorithm
79
+ # document
80
+ # In order to divide by acquisition times, we must reshape the acq
81
+ # time data array to match the data variable shape (epoch, esa_step, sector)
82
+ dims = [1] * dataset[variable_name].data.ndim
83
+ dims[1] = 128
84
+ acq_times = dataset.acquisition_time_per_step.data.reshape(dims) # (128)
85
+ # acquisition time have an array of shape (128,). We match n_sector to that.
86
+ # Per CoDICE, fill first 127 with default value of 12. Then fill last with 11.
87
+ n_sector = np.full(128, 12, dtype=int)
88
+ n_sector[-1] = 11
89
+
90
+ # Now perform the calculation
91
+ rates_data = dataset[variable_name].data / (
92
+ acq_times
93
+ * 1e-3 # Converting from milliseconds to seconds
94
+ * n_sector[:, np.newaxis] # Spin sectors
95
+ )
77
96
  elif descriptor in [
78
97
  "hi-counters-aggregated",
79
98
  "hi-counters-singles",
@@ -164,13 +183,23 @@ def process_codice_l1b(file_path: Path) -> xr.Dataset:
164
183
  l1b_dataset[variable_name].data = convert_to_rates(
165
184
  l1b_dataset, descriptor, variable_name
166
185
  )
167
-
168
186
  # Set the variable attributes
169
187
  cdf_attrs_key = f"{descriptor}-{variable_name}"
170
188
  l1b_dataset[variable_name].attrs = cdf_attrs.get_variable_attributes(
171
189
  cdf_attrs_key, check_schema=False
172
190
  )
173
191
 
192
+ if descriptor in ["lo-sw-species", "lo-nsw-species"]:
193
+ # Do not carry these variable attributes from L1a to L1b
194
+ drop_variables = [
195
+ "k_factor",
196
+ "nso_half_spin",
197
+ "sw_bias_gain_mode",
198
+ "st_bias_gain_mode",
199
+ "spin_period",
200
+ ]
201
+ l1b_dataset = l1b_dataset.drop_vars(drop_variables)
202
+
174
203
  logger.info(f"\nFinal data product:\n{l1b_dataset}\n")
175
204
 
176
205
  return l1b_dataset
@@ -214,17 +214,17 @@ HI_COUNTERS_AGGREGATED_ACTIVE_VARIABLES = {
214
214
  "spo": True,
215
215
  "reserved1": False,
216
216
  "mst": True,
217
- "reserved2": False,
218
- "reserved3": False,
217
+ "ssdo": True,
218
+ "stssd": True,
219
219
  "reserved4": False,
220
220
  "reserved5": False,
221
- "low_tof_cutoff": False,
221
+ "low_tof_cutoff": True,
222
222
  "reserved6": False,
223
223
  "reserved7": False,
224
224
  "asic1_flag_invalid": True,
225
225
  "asic2_flag_invalid": True,
226
- "asic1_channel_invalid": False,
227
- "asic_2_channel_invalid": False,
226
+ "asic1_channel_invalid": True,
227
+ "asic2_channel_invalid": True,
228
228
  }
229
229
  HI_COUNTERS_AGGREGATED_VARIABLE_NAMES = [
230
230
  name
@@ -961,7 +961,11 @@ ESA_SWEEP_TABLE_ID_LOOKUP = {
961
961
  # use. Currently, LO Stepping table 0 is used for every plan_id/plan_step
962
962
  # combination, but may change in the future. These are defined in the "Lo
963
963
  # Stepping" tab of the "*-SCI-LUT-*.xml" spreadsheet that largely defines CoDICE
964
- # processing.
964
+ # processing. Eg.
965
+ # (plan_id, plan_step) -> id of acquisition time
966
+ # (0, 0) -> 0
967
+
968
+
965
969
  LO_STEPPING_TABLE_ID_LOOKUP = {
966
970
  (0, 0): 0,
967
971
  (0, 1): 0,
@@ -1222,7 +1222,6 @@ class RectangularSkyMap(AbstractSkyMap):
1222
1222
  self,
1223
1223
  instrument: str,
1224
1224
  level: str,
1225
- frame: str,
1226
1225
  descriptor: str,
1227
1226
  sensor: str | None = None,
1228
1227
  ) -> xr.Dataset:
@@ -1235,8 +1234,6 @@ class RectangularSkyMap(AbstractSkyMap):
1235
1234
  Instrument name. "hi", "lo", "ultra".
1236
1235
  level : str
1237
1236
  Product level. "l2" or "l3".
1238
- frame : str
1239
- Map frame. "sf", "hf" or "hk".
1240
1237
  descriptor : str
1241
1238
  Descriptor for filename.
1242
1239
  sensor : str, optional
@@ -1318,16 +1315,14 @@ class RectangularSkyMap(AbstractSkyMap):
1318
1315
  )
1319
1316
 
1320
1317
  # Now set global attributes
1321
- map_attrs = cdf_attrs.get_global_attributes(
1322
- f"imap_{instrument}_{level}_enamap-{frame}"
1323
- )
1318
+ map_attrs = cdf_attrs.get_global_attributes(f"imap_{instrument}_{level}_enamap")
1324
1319
  map_attrs["Spacing_degrees"] = str(self.spacing_deg)
1325
1320
  for key in ["Data_type", "Logical_source", "Logical_source_description"]:
1326
1321
  map_attrs[key] = map_attrs[key].format(
1327
1322
  descriptor=descriptor,
1328
1323
  sensor=sensor,
1329
1324
  )
1330
- # Always add the following attributes to the map
1325
+ # Always add the following attributes to the map
1331
1326
  map_attrs.update(
1332
1327
  {
1333
1328
  "Sky_tiling_type": self.tiling_type.value,
@@ -28,12 +28,12 @@ def glows_l1b(
28
28
  pipeline_settings_dataset: xr.Dataset,
29
29
  ) -> xr.Dataset:
30
30
  """
31
- Will process the GLOWS L1B data and format the output datasets.
31
+ Will process the histogram GLOWS L1B data and format the output datasets.
32
32
 
33
33
  Parameters
34
34
  ----------
35
35
  input_dataset : xr.Dataset
36
- Dataset of input values.
36
+ Dataset of input values for L1A histogram data.
37
37
  excluded_regions : xr.Dataset
38
38
  Dataset containing excluded sky regions with ecliptic coordinates. This
39
39
  is the output from GlowsAncillaryCombiner.
@@ -77,29 +77,37 @@ def glows_l1b(
77
77
  ) as f:
78
78
  ancillary_parameters = AncillaryParameters(json.loads(f.read()))
79
79
 
80
- logical_source = (
81
- input_dataset.attrs["Logical_source"][0]
82
- if isinstance(input_dataset.attrs["Logical_source"], list)
83
- else input_dataset.attrs["Logical_source"]
80
+ output_dataarrays = process_histogram(
81
+ input_dataset, ancillary_exclusions, ancillary_parameters, pipeline_settings
82
+ )
83
+ output_dataset = create_l1b_hist_output(
84
+ output_dataarrays, input_dataset["epoch"], input_dataset["bins"], cdf_attrs
84
85
  )
85
86
 
86
- if "hist" in logical_source:
87
- output_dataarrays = process_histogram(
88
- input_dataset, ancillary_exclusions, ancillary_parameters, pipeline_settings
89
- )
90
- output_dataset = create_l1b_hist_output(
91
- output_dataarrays, input_dataset["epoch"], input_dataset["bins"], cdf_attrs
92
- )
87
+ return output_dataset
93
88
 
94
- elif "de" in logical_source:
95
- output_dataset = create_l1b_de_output(input_dataset, cdf_attrs)
96
89
 
97
- else:
98
- raise ValueError(
99
- f"Logical_source {input_dataset.attrs['Logical_source']} for input file "
100
- f"does not match histogram "
101
- "('hist') or direct event ('de')."
102
- )
90
+ def glows_l1b_de(
91
+ input_dataset: xr.Dataset,
92
+ ) -> xr.Dataset:
93
+ """
94
+ Process GLOWS L1B direct events data.
95
+
96
+ Parameters
97
+ ----------
98
+ input_dataset : xr.Dataset
99
+ The input dataset to process.
100
+
101
+ Returns
102
+ -------
103
+ xr.Dataset
104
+ The processed L1B direct events dataset.
105
+ """
106
+ cdf_attrs = ImapCdfAttributes()
107
+ cdf_attrs.add_instrument_global_attrs("glows")
108
+ cdf_attrs.add_instrument_variable_attrs("glows", "l1b")
109
+
110
+ output_dataset = create_l1b_de_output(input_dataset, cdf_attrs)
103
111
 
104
112
  return output_dataset
105
113
 
@@ -57,23 +57,23 @@ TOTAL_COUNTERS = ("a_total", "b_total", "c_total", "fee_de_recd", "fee_de_sent")
57
57
  # This is a mapping of variable name to index when the dump_data in the
58
58
  # HVSCI MEMDMP packet is interpreted as an array of uint32 values.
59
59
  MEMDMP_DATA_INDS = {
60
- "lastbin_shorten": 9,
60
+ "lastbin_shorten": 10,
61
61
  "coinc_length": 60,
62
62
  "de_timetag": 65,
63
- "ab_min": 67,
64
- "ab_max": 68,
65
- "ac_min": 69,
66
- "ac_max": 70,
67
- "ba_min": 71,
68
- "ba_max": 72,
69
- "bc_min": 73,
70
- "bc_max": 74,
71
- "ca_min": 75,
72
- "ca_max": 76,
73
- "cb_min": 77,
74
- "cb_max": 78,
75
- "cc_min": 79,
76
- "cc_max": 80,
63
+ "ab_max": 67,
64
+ "ab_min": 68,
65
+ "ac_max": 69,
66
+ "ac_min": 70,
67
+ "ba_max": 71,
68
+ "ba_min": 72,
69
+ "bc_max": 73,
70
+ "bc_min": 74,
71
+ "ca_max": 75,
72
+ "ca_min": 76,
73
+ "cb_max": 77,
74
+ "cb_min": 78,
75
+ "cc_max": 79,
76
+ "cc_min": 80,
77
77
  "cfd_dac_a": 82,
78
78
  "cfd_dac_b": 83,
79
79
  "cfd_dac_c": 84,
@@ -264,25 +264,40 @@ def create_de_dataset(de_data_dict: dict[str, npt.ArrayLike]) -> xr.Dataset:
264
264
  attrs=epoch_attrs,
265
265
  )
266
266
 
267
- event_met_attrs = attr_mgr.get_variable_attributes(
268
- "hi_de_event_met", check_schema=False
269
- )
270
- # For L1A DE, event_met is its own dimension, so we remove the DEPEND_0 attribute
271
- _ = event_met_attrs.pop("DEPEND_0")
272
-
273
267
  # Compute the meta-event MET in seconds
274
268
  meta_event_met = (
275
269
  np.array(de_data_dict["esa_step_seconds"]).astype(np.float64)
276
270
  + np.array(de_data_dict["esa_step_milliseconds"]) * MILLISECOND_TO_S
277
271
  )
278
- # Compute the MET of each event in seconds
279
- # event MET = meta_event_met + de_clock
280
- # See Hi Algorithm Document section 2.2.5
281
- event_met_array = np.array(
282
- meta_event_met[de_data_dict["ccsds_index"]]
283
- + np.array(de_data_dict["de_tag"]) * DE_CLOCK_TICK_S,
284
- dtype=event_met_attrs.pop("dtype"),
272
+
273
+ event_met_attrs = attr_mgr.get_variable_attributes(
274
+ "hi_de_event_met", check_schema=False
285
275
  )
276
+ # For L1A DE, event_met is its own dimension, so we remove the DEPEND_0 attribute
277
+ _ = event_met_attrs.pop("DEPEND_0")
278
+ event_met_dtype = event_met_attrs.pop("dtype")
279
+
280
+ # If there are no events, add a single event with fill values
281
+ if len(de_data_dict["de_tag"]) == 0:
282
+ logger.warning(
283
+ "No direct events found in SCIDE packets. "
284
+ "Creating a false DE entry with fill values."
285
+ )
286
+ for key in ["de_tag", "trigger_id", "tof_1", "tof_2", "tof_3", "ccsds_index"]:
287
+ attrs = attr_mgr.get_variable_attributes(f"hi_de_{key}", check_schema=False)
288
+ de_data_dict[key] = [attrs["FILLVAL"]]
289
+ event_met_array = np.array([event_met_attrs["FILLVAL"]], dtype=event_met_dtype)
290
+ else:
291
+ # Compute the MET of each event in seconds
292
+ # event MET = meta_event_met + de_clock
293
+ # See Hi Algorithm Document section 2.2.5
294
+ event_met_array = np.array(
295
+ meta_event_met[de_data_dict["ccsds_index"]]
296
+ + np.array(de_data_dict["de_tag"]) * DE_CLOCK_TICK_S,
297
+ dtype=event_met_dtype,
298
+ )
299
+
300
+ # Create the event_met coordinate
286
301
  event_met = xr.DataArray(
287
302
  event_met_array,
288
303
  name="event_met",
@@ -290,10 +305,12 @@ def create_de_dataset(de_data_dict: dict[str, npt.ArrayLike]) -> xr.Dataset:
290
305
  attrs=event_met_attrs,
291
306
  )
292
307
 
308
+ # Create a dataset with only coordinates
293
309
  dataset = xr.Dataset(
294
310
  coords={"epoch": epoch, "event_met": event_met},
295
311
  )
296
312
 
313
+ # Add variable to the dataset
297
314
  for var_name, data in de_data_dict.items():
298
315
  attrs = attr_mgr.get_variable_attributes(
299
316
  f"hi_de_{var_name}", check_schema=False
@@ -557,9 +574,12 @@ def finish_memdmp_dataset(input_ds: xr.Dataset) -> xr.Dataset:
557
574
  # offset index with a stride of the number of bytes in the dump
558
575
  # data divided by 4 (32-bit values).
559
576
  new_vars[new_var] = xr.DataArray(
560
- data=full_uint32_data[offset::index_stride],
577
+ data=full_uint32_data[offset::index_stride].astype(np.uint32),
561
578
  dims=["epoch"],
562
579
  )
580
+ # Need to add one to de_timetag value
581
+ if new_var == "de_timetag":
582
+ new_vars[new_var].data += 1
563
583
 
564
584
  # Remove binary memory dump data and add parsed variables
565
585
  dataset = dataset.drop("dump_data")
@@ -158,6 +158,27 @@ def annotate_direct_events(
158
158
  return [l1b_de_dataset]
159
159
 
160
160
 
161
+ def any_good_direct_events(dataset: xr.Dataset) -> bool:
162
+ """
163
+ Test dataset to see if there are any good direct events.
164
+
165
+ Datasets can have no good direct events when there were no DEs in a pointing.
166
+ In this case, due to restrictions with cdflib, we have to write a single
167
+ bad DE in the CDF.
168
+
169
+ Parameters
170
+ ----------
171
+ dataset : xarray.Dataset
172
+ Run the check on this dataset.
173
+
174
+ Returns
175
+ -------
176
+ any_good_events : bool
177
+ True if there is at least one good direct event. False otherwise.
178
+ """
179
+ return bool(np.any(dataset["trigger_id"] != dataset["trigger_id"].attrs["FILLVAL"]))
180
+
181
+
161
182
  def compute_coincidence_type_and_tofs(
162
183
  dataset: xr.Dataset,
163
184
  ) -> dict[str, xr.DataArray]:
@@ -190,6 +211,9 @@ def compute_coincidence_type_and_tofs(
190
211
  len(dataset.event_met),
191
212
  att_manager_lookup_str="hi_de_{0}",
192
213
  )
214
+ # Check for no valid direct events.
215
+ if not any_good_direct_events(dataset):
216
+ return new_vars
193
217
 
194
218
  # compute masks needed for coincidence type and ToF calculations
195
219
  a_first = dataset.trigger_id.values == TriggerId.A
@@ -301,6 +325,9 @@ def de_nominal_bin_and_spin_phase(dataset: xr.Dataset) -> dict[str, xr.DataArray
301
325
  len(dataset.event_met),
302
326
  att_manager_lookup_str="hi_de_{0}",
303
327
  )
328
+ # Check for no valid direct events.
329
+ if not any_good_direct_events(dataset):
330
+ return new_vars
304
331
 
305
332
  # nominal_bin is the index number of the 90 4-degree bins that each DE would
306
333
  # be binned into in the histogram packet. The Hi histogram data is binned by
@@ -345,6 +372,10 @@ def compute_hae_coordinates(dataset: xr.Dataset) -> dict[str, xr.DataArray]:
345
372
  len(dataset.event_met),
346
373
  att_manager_lookup_str="hi_de_{0}",
347
374
  )
375
+ # Check for no valid direct events.
376
+ if not any_good_direct_events(dataset):
377
+ return new_vars
378
+
348
379
  # Per Section 2.2.5 of Algorithm Document, add 1/2 of tick duration
349
380
  # to MET before computing pointing.
350
381
  sclk_ticks = met_to_sclkticks(dataset.event_met.values + HALF_CLOCK_TICK_S)
@@ -387,6 +418,9 @@ def de_esa_energy_step(
387
418
  len(l1b_de_ds.epoch),
388
419
  att_manager_lookup_str="hi_de_{0}",
389
420
  )
421
+ # Check for no valid direct events.
422
+ if not any_good_direct_events(l1b_de_ds):
423
+ return new_vars
390
424
 
391
425
  # Get the LUT object using the HK data and esa-energies ancillary csv
392
426
  esa_energies_lut = pd.read_csv(esa_energies_anc, comment="#")