imap-processing 0.19.3__py3-none-any.whl → 1.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of imap-processing might be problematic. Click here for more details.

Files changed (39) hide show
  1. imap_processing/_version.py +2 -2
  2. imap_processing/cdf/config/imap_codice_l1a_variable_attrs.yaml +132 -133
  3. imap_processing/cdf/config/imap_codice_l1b_variable_attrs.yaml +133 -132
  4. imap_processing/cdf/config/imap_constant_attrs.yaml +1 -1
  5. imap_processing/cdf/config/imap_enamaps_l2-common_variable_attrs.yaml +54 -60
  6. imap_processing/cdf/config/imap_enamaps_l2-healpix_variable_attrs.yaml +1 -2
  7. imap_processing/cdf/config/imap_enamaps_l2-rectangular_variable_attrs.yaml +5 -3
  8. imap_processing/cdf/config/imap_hi_global_cdf_attrs.yaml +1 -2
  9. imap_processing/cdf/config/imap_hi_variable_attrs.yaml +1 -0
  10. imap_processing/cdf/config/imap_ultra_l1c_variable_attrs.yaml +8 -6
  11. imap_processing/cdf/utils.py +5 -0
  12. imap_processing/cli.py +72 -54
  13. imap_processing/codice/codice_l1a.py +82 -23
  14. imap_processing/codice/codice_l1b.py +35 -6
  15. imap_processing/codice/constants.py +173 -135
  16. imap_processing/ena_maps/ena_maps.py +15 -17
  17. imap_processing/glows/l1b/glows_l1b.py +29 -21
  18. imap_processing/hi/hi_l1a.py +49 -29
  19. imap_processing/hi/hi_l1b.py +34 -0
  20. imap_processing/hi/hi_l1c.py +23 -17
  21. imap_processing/hi/hi_l2.py +225 -81
  22. imap_processing/ialirt/l0/ialirt_spice.py +1 -1
  23. imap_processing/ialirt/l0/parse_mag.py +33 -0
  24. imap_processing/ialirt/utils/create_xarray.py +12 -1
  25. imap_processing/lo/l1b/lo_l1b.py +111 -77
  26. imap_processing/lo/l1c/lo_l1c.py +10 -11
  27. imap_processing/lo/l2/lo_l2.py +43 -22
  28. imap_processing/mag/l1c/interpolation_methods.py +9 -1
  29. imap_processing/mag/l1c/mag_l1c.py +99 -45
  30. imap_processing/spice/geometry.py +28 -19
  31. imap_processing/ultra/l1c/helio_pset.py +2 -2
  32. imap_processing/ultra/l1c/spacecraft_pset.py +7 -4
  33. imap_processing/ultra/l2/ultra_l2.py +54 -27
  34. imap_processing/ultra/utils/ultra_l1_utils.py +4 -4
  35. {imap_processing-0.19.3.dist-info → imap_processing-1.0.0.dist-info}/METADATA +1 -1
  36. {imap_processing-0.19.3.dist-info → imap_processing-1.0.0.dist-info}/RECORD +39 -39
  37. {imap_processing-0.19.3.dist-info → imap_processing-1.0.0.dist-info}/LICENSE +0 -0
  38. {imap_processing-0.19.3.dist-info → imap_processing-1.0.0.dist-info}/WHEEL +0 -0
  39. {imap_processing-0.19.3.dist-info → imap_processing-1.0.0.dist-info}/entry_points.txt +0 -0
@@ -52,9 +52,8 @@ ena_intensity:
52
52
  DEPEND_2: pixel_index
53
53
  LABL_PTR_1: energy_label
54
54
  LABL_PTR_2: pixel_index_label
55
- DISPLAY_TYPE: image
56
55
 
57
- ena_intensity_stat_unc:
56
+ ena_intensity_stat_uncert:
58
57
  DEPEND_1: energy
59
58
  DEPEND_2: pixel_index
60
59
  LABL_PTR_1: energy_label
@@ -19,9 +19,10 @@ longitude_label:
19
19
  DEPEND_1: longitude
20
20
 
21
21
  longitude_delta:
22
- VAR_TYPE: metadata
22
+ VAR_TYPE: support_data
23
23
  dtype: float32
24
24
  CATDESC: Half-width of longitude pixel
25
+ DEPEND_1: longitude
25
26
  FORMAT: F12.6
26
27
  UNITS: degrees
27
28
  FIELDNAM: longitude delta
@@ -35,9 +36,10 @@ latitude_label:
35
36
  DEPEND_1: latitude
36
37
 
37
38
  latitude_delta:
38
- VAR_TYPE: metadata
39
+ VAR_TYPE: support_data
39
40
  dtype: float32
40
41
  CATDESC: Half-width of latitude pixel
42
+ DEPEND_1: latitude
41
43
  FORMAT: F12.6
42
44
  UNITS: degrees
43
45
  FIELDNAM: latitude delta
@@ -65,7 +67,7 @@ ena_intensity:
65
67
  LABL_PTR_2: longitude_label
66
68
  LABL_PTR_3: latitude_label
67
69
 
68
- ena_intensity_stat_unc:
70
+ ena_intensity_stat_uncert:
69
71
  DEPEND_1: energy
70
72
  DEPEND_2: longitude
71
73
  DEPEND_3: latitude
@@ -54,8 +54,7 @@ imap_hi_l1c_pset_attrs:
54
54
  Logical_source: imap_hi_l1c_{sensor}-pset
55
55
  Logical_source_description: IMAP-Hi Instrument Level-1C Pointing Set Data.
56
56
 
57
- # TODO: Finalize these global attributes
58
- imap_hi_l2_enamap-sf:
57
+ imap_hi_l2_enamap:
59
58
  Data_type: L2_{descriptor}>Level-2 ENA Intensity Map for Hi{sensor}
60
59
  Logical_source: imap_hi_l2_{descriptor}
61
60
  Logical_source_description: IMAP-Hi Instrument Level-2 ENA Intensity Map Data for Hi{sensor} on rectangular tiling.
@@ -252,6 +252,7 @@ hi_de_ccsds_index:
252
252
  # LABL_PTR_i expects VAR_TYPE of metadata with char data type
253
253
  hi_hist_angle_label:
254
254
  CATDESC: Angle bin centers for histogram data.
255
+ DEPEND_1: angle
255
256
  FIELDNAM: ANGLE
256
257
  FORMAT: A5
257
258
  VAR_TYPE: metadata
@@ -65,15 +65,15 @@ dead_time_ratio:
65
65
 
66
66
  sensitivity:
67
67
  <<: *energy_dependent
68
- CATDESC: Calibration/sensitivity factor.
68
+ CATDESC: Averaged instrument sensitive area.
69
69
  FIELDNAM: sensitivity
70
70
  LABLAXIS: sensitivity
71
71
  # TODO: come back to format
72
- UNITS: counts/second
72
+ UNITS: cm^2
73
73
 
74
74
  geometric_function:
75
75
  <<: *energy_dependent
76
- CATDESC: The effective sensitive area as a function of theta and phi in instrument frame (energy independent).
76
+ CATDESC: Averaged aperture area as seen by the backstop.
77
77
  FIELDNAM: geometric_function
78
78
  LABLAXIS: Geometric Factor
79
79
  UNITS: cm^2
@@ -81,7 +81,7 @@ geometric_function:
81
81
 
82
82
  efficiency:
83
83
  <<: *energy_dependent
84
- CATDESC: Estimated event efficiency for particles path through the instrument.
84
+ CATDESC: Efficiency of the instrument in converting ENAs to valid events.
85
85
  FIELDNAM: efficiency
86
86
  LABLAXIS: Efficiency
87
87
  UNITS: " "
@@ -90,6 +90,7 @@ efficiency:
90
90
 
91
91
  exposure_factor:
92
92
  <<: *energy_dependent
93
+ DEPEND_0: epoch
93
94
  CATDESC: Exposure time with the deadtime correction applied for a pointing.
94
95
  FIELDNAM: exposure_factor
95
96
  LABLAXIS: exposure factor
@@ -101,19 +102,20 @@ helio_exposure_factor:
101
102
  CATDESC: Exposure time with the deadtime correction applied for a pointing.
102
103
  FIELDNAM: exposure_factor
103
104
  LABLAXIS: exposure factor
105
+ DEPEND_0: epoch
104
106
  # TODO: come back to format
105
107
  UNITS: seconds
106
108
 
107
109
  scatter_theta:
108
110
  <<: *energy_dependent
109
- CATDESC: Scattering theta values for a pointing.
111
+ CATDESC: Scattering theta values for a pointing (reported as gaussian uncertainty).
110
112
  FIELDNAM: scatter_theta
111
113
  LABLAXIS: scatter theta
112
114
  UNITS: degrees
113
115
 
114
116
  scatter_phi:
115
117
  <<: *energy_dependent
116
- CATDESC: Scattering phi values for a pointing.
118
+ CATDESC: Scattering phi values for a pointing (reported as gaussian uncertainty).
117
119
  FIELDNAM: scatter_phi
118
120
  LABLAXIS: scatter phi
119
121
  UNITS: degrees
@@ -2,6 +2,7 @@
2
2
 
3
3
  from __future__ import annotations
4
4
 
5
+ import datetime
5
6
  import logging
6
7
  import re
7
8
  import warnings
@@ -148,6 +149,10 @@ def write_cdf(
148
149
  dataset.attrs["Logical_file_id"] = file_path.stem
149
150
  # Add the processing version to the dataset attributes
150
151
  dataset.attrs["ground_software_version"] = imap_processing._version.__version__
152
+ dataset.attrs["Generation_date"] = datetime.datetime.now(
153
+ datetime.timezone.utc
154
+ ).strftime("%Y%m%d")
155
+ dataset.attrs["Generated_by"] = "IMAP Science Data Center"
151
156
 
152
157
  # Convert the xarray object to a CDF
153
158
  if "l1" in data_level:
imap_processing/cli.py CHANGED
@@ -51,7 +51,7 @@ from imap_processing.cdf.utils import load_cdf, write_cdf
51
51
  # call cdf.utils.write_cdf
52
52
  from imap_processing.codice import codice_l1a, codice_l1b, codice_l2
53
53
  from imap_processing.glows.l1a.glows_l1a import glows_l1a
54
- from imap_processing.glows.l1b.glows_l1b import glows_l1b
54
+ from imap_processing.glows.l1b.glows_l1b import glows_l1b, glows_l1b_de
55
55
  from imap_processing.glows.l2.glows_l2 import glows_l2
56
56
  from imap_processing.hi import hi_l1a, hi_l1b, hi_l1c, hi_l2
57
57
  from imap_processing.hit.l1a.hit_l1a import hit_l1a
@@ -682,55 +682,60 @@ class Glows(ProcessInstrument):
682
682
  f"{science_files}."
683
683
  )
684
684
  input_dataset = load_cdf(science_files[0])
685
+ if "hist" in self.descriptor:
686
+ # Create file lists for each ancillary type
687
+ excluded_regions_files = dependencies.get_processing_inputs(
688
+ descriptor="map-of-excluded-regions"
689
+ )[0]
690
+ uv_sources_files = dependencies.get_processing_inputs(
691
+ descriptor="map-of-uv-sources"
692
+ )[0]
693
+ suspected_transients_files = dependencies.get_processing_inputs(
694
+ descriptor="suspected-transients"
695
+ )[0]
696
+ exclusions_by_instr_team_files = dependencies.get_processing_inputs(
697
+ descriptor="exclusions-by-instr-team"
698
+ )[0]
699
+ pipeline_settings = dependencies.get_processing_inputs(
700
+ descriptor="pipeline-settings"
701
+ )[0]
685
702
 
686
- # Create file lists for each ancillary type
687
- excluded_regions_files = dependencies.get_processing_inputs(
688
- descriptor="map-of-excluded-regions"
689
- )[0]
690
- uv_sources_files = dependencies.get_processing_inputs(
691
- descriptor="map-of-uv-sources"
692
- )[0]
693
- suspected_transients_files = dependencies.get_processing_inputs(
694
- descriptor="suspected-transients"
695
- )[0]
696
- exclusions_by_instr_team_files = dependencies.get_processing_inputs(
697
- descriptor="exclusions-by-instr-team"
698
- )[0]
699
- pipeline_settings = dependencies.get_processing_inputs(
700
- descriptor="pipeline-settings"
701
- )[0]
702
-
703
- # Use end date buffer for ancillary data
704
- current_day = np.datetime64(
705
- f"{self.start_date[:4]}-{self.start_date[4:6]}-{self.start_date[6:]}"
706
- )
707
- day_buffer = current_day + np.timedelta64(3, "D")
708
-
709
- # Create combiners for each ancillary dataset
710
- excluded_regions_combiner = GlowsAncillaryCombiner(
711
- excluded_regions_files, day_buffer
712
- )
713
- uv_sources_combiner = GlowsAncillaryCombiner(uv_sources_files, day_buffer)
714
- suspected_transients_combiner = GlowsAncillaryCombiner(
715
- suspected_transients_files, day_buffer
716
- )
717
- exclusions_by_instr_team_combiner = GlowsAncillaryCombiner(
718
- exclusions_by_instr_team_files, day_buffer
719
- )
720
- pipeline_settings_combiner = GlowsAncillaryCombiner(
721
- pipeline_settings, day_buffer
722
- )
703
+ # Use end date buffer for ancillary data
704
+ current_day = np.datetime64(
705
+ f"{self.start_date[:4]}-{self.start_date[4:6]}-{self.start_date[6:]}"
706
+ )
707
+ day_buffer = current_day + np.timedelta64(3, "D")
723
708
 
724
- datasets = [
725
- glows_l1b(
726
- input_dataset,
727
- excluded_regions_combiner.combined_dataset,
728
- uv_sources_combiner.combined_dataset,
729
- suspected_transients_combiner.combined_dataset,
730
- exclusions_by_instr_team_combiner.combined_dataset,
731
- pipeline_settings_combiner.combined_dataset,
709
+ # Create combiners for each ancillary dataset
710
+ excluded_regions_combiner = GlowsAncillaryCombiner(
711
+ excluded_regions_files, day_buffer
712
+ )
713
+ uv_sources_combiner = GlowsAncillaryCombiner(
714
+ uv_sources_files, day_buffer
715
+ )
716
+ suspected_transients_combiner = GlowsAncillaryCombiner(
717
+ suspected_transients_files, day_buffer
718
+ )
719
+ exclusions_by_instr_team_combiner = GlowsAncillaryCombiner(
720
+ exclusions_by_instr_team_files, day_buffer
721
+ )
722
+ pipeline_settings_combiner = GlowsAncillaryCombiner(
723
+ pipeline_settings, day_buffer
732
724
  )
733
- ]
725
+
726
+ datasets = [
727
+ glows_l1b(
728
+ input_dataset,
729
+ excluded_regions_combiner.combined_dataset,
730
+ uv_sources_combiner.combined_dataset,
731
+ suspected_transients_combiner.combined_dataset,
732
+ exclusions_by_instr_team_combiner.combined_dataset,
733
+ pipeline_settings_combiner.combined_dataset,
734
+ )
735
+ ]
736
+ else:
737
+ # Direct events
738
+ datasets = [glows_l1b_de(input_dataset)]
734
739
 
735
740
  if self.data_level == "l2":
736
741
  science_files = dependencies.get_file_paths(source="glows")
@@ -804,13 +809,26 @@ class Hi(ProcessInstrument):
804
809
  datasets = hi_l1c.hi_l1c(load_cdf(science_paths[0]), anc_paths[0])
805
810
  elif self.data_level == "l2":
806
811
  science_paths = dependencies.get_file_paths(source="hi", data_type="l1c")
807
- # TODO get ancillary paths
808
- geometric_factors_path = ""
809
- esa_energies_path = ""
812
+ anc_dependencies = dependencies.get_processing_inputs(data_type="ancillary")
813
+ if len(anc_dependencies) != 3:
814
+ raise ValueError(
815
+ f"Expected three ancillary dependencies for L2 processing including"
816
+ f"cal-prod, esa-energies, and esa-eta-fit-factors."
817
+ f"Got {[anc_dep.descriptor for anc_dep in anc_dependencies]}"
818
+ "."
819
+ )
820
+ # Get individual L2 ancillary dependencies
821
+ # Strip the "45sensor" or "90sensor" off the ancillary descriptor and
822
+ # create a mapping from descriptor to path
823
+ l2_ancillary_path_dict = {
824
+ "-".join(dep.descriptor.split("-")[1:]): dep.imap_file_paths[
825
+ 0
826
+ ].construct_path()
827
+ for dep in anc_dependencies
828
+ }
810
829
  datasets = hi_l2.hi_l2(
811
830
  science_paths,
812
- geometric_factors_path,
813
- esa_energies_path,
831
+ l2_ancillary_path_dict,
814
832
  self.descriptor,
815
833
  )
816
834
  else:
@@ -1119,9 +1137,9 @@ class Mag(ProcessInstrument):
1119
1137
  input_data = [load_cdf(dep) for dep in science_files]
1120
1138
  # Input datasets can be in any order, and are validated within mag_l1c
1121
1139
  if len(input_data) == 1:
1122
- datasets = [mag_l1c(input_data[0])]
1140
+ datasets = [mag_l1c(input_data[0], current_day)]
1123
1141
  elif len(input_data) == 2:
1124
- datasets = [mag_l1c(input_data[0], input_data[1])]
1142
+ datasets = [mag_l1c(input_data[0], current_day, input_data[1])]
1125
1143
  else:
1126
1144
  raise ValueError(
1127
1145
  f"Invalid dependencies found for MAG L1C:"
@@ -89,7 +89,7 @@ class CoDICEL1aPipeline:
89
89
  self.plan_step = plan_step
90
90
  self.view_id = view_id
91
91
 
92
- def apply_despinning(self) -> None:
92
+ def apply_despinning(self) -> None: # noqa: PLR0912 (too many branches)
93
93
  """
94
94
  Apply the despinning algorithm to lo- angular and priority products.
95
95
 
@@ -108,10 +108,10 @@ class CoDICEL1aPipeline:
108
108
  # The dimensions are dependent on the specific data product
109
109
  if "angular" in self.config["dataset_name"]:
110
110
  despun_dims: tuple[int, ...] = (
111
+ num_counters,
111
112
  num_energies,
112
113
  num_positions,
113
114
  num_spins,
114
- num_counters,
115
115
  )
116
116
  elif "priority" in self.config["dataset_name"]:
117
117
  despun_dims = (num_energies, num_spins, num_counters)
@@ -130,23 +130,33 @@ class CoDICEL1aPipeline:
130
130
  for energy_index in range(num_energies):
131
131
  pixel_orientation = constants.PIXEL_ORIENTATIONS[energy_index]
132
132
  for spin_sector_index in range(num_spin_sectors):
133
- for azimuth_index in range(num_spins):
134
- if pixel_orientation == "A" and azimuth_index < 12:
133
+ for azimuth_index in range(num_positions):
134
+ if "-sw-" in self.config["dataset_name"]:
135
+ # do something
136
+ position_index = constants.SW_INDEX_TO_POSITION[
137
+ azimuth_index
138
+ ]
139
+ elif "-nsw-" in self.config["dataset_name"]:
140
+ position_index = constants.NSW_INDEX_TO_POSITION[
141
+ azimuth_index
142
+ ]
143
+
144
+ if pixel_orientation == "A" and position_index < 12:
135
145
  despun_spin_sector = spin_sector_index
136
- elif pixel_orientation == "A" and azimuth_index >= 12:
146
+ elif pixel_orientation == "A" and position_index >= 12:
137
147
  despun_spin_sector = spin_sector_index + 12
138
- elif pixel_orientation == "B" and azimuth_index < 12:
148
+ elif pixel_orientation == "B" and position_index < 12:
139
149
  despun_spin_sector = spin_sector_index + 12
140
- elif pixel_orientation == "B" and azimuth_index >= 12:
150
+ elif pixel_orientation == "B" and position_index >= 12:
141
151
  despun_spin_sector = spin_sector_index
142
152
 
143
153
  if "angular" in self.config["dataset_name"]:
144
154
  spin_data = epoch_data[
145
- energy_index, :, spin_sector_index, :
146
- ] # (5, 4)
147
- despun_data[i][energy_index, :, despun_spin_sector, :] = (
148
- spin_data
149
- )
155
+ :, energy_index, azimuth_index, spin_sector_index
156
+ ]
157
+ despun_data[i][
158
+ :, energy_index, azimuth_index, despun_spin_sector
159
+ ] = spin_data
150
160
  elif "priority" in self.config["dataset_name"]:
151
161
  spin_data = epoch_data[energy_index, spin_sector_index, :]
152
162
  despun_data[i][energy_index, despun_spin_sector, :] = (
@@ -325,11 +335,25 @@ class CoDICEL1aPipeline:
325
335
  # different depending on the data product). In any case, iterate over
326
336
  # the num_counters dimension to isolate the data for each counter so
327
337
  # each counter's data can be placed in a separate CDF data variable.
338
+ # For Lo SW species, all_data has shape (9, 16, 128, 1) -> (epochs,
339
+ # num_counters, num_energy_steps, num_spin_sectors)
340
+ if self._is_different_dimension():
341
+ # For Lo species datasets, counters are the second dimension (index 1)
342
+ num_counters = all_data.shape[1]
343
+ else:
344
+ # For all other datasets, counters are the last dimension
345
+ num_counters = all_data.shape[-1]
346
+
328
347
  for counter, variable_name in zip(
329
- range(all_data.shape[-1]), self.config["variable_names"], strict=False
348
+ range(num_counters), self.config["variable_names"], strict=False
330
349
  ):
331
350
  # Extract the counter data
332
- counter_data = all_data[..., counter]
351
+ if self._is_different_dimension():
352
+ counter_data = all_data[:, counter, :, :]
353
+ elif "sectored" in self.config["dataset_name"]:
354
+ counter_data = all_data[:, counter, :, :, :]
355
+ else:
356
+ counter_data = all_data[..., counter]
333
357
 
334
358
  # Get the CDF attributes
335
359
  descriptor = self.config["dataset_name"].split("imap_codice_l1a_")[-1]
@@ -708,10 +732,25 @@ class CoDICEL1aPipeline:
708
732
 
709
733
  # Reshape the data based on how it is written to the data array of
710
734
  # the packet data. The number of counters is the last dimension / axis.
711
- reshape_dims = (
712
- *self.config["dims"].values(),
713
- self.config["num_counters"],
714
- )
735
+ if self._is_different_dimension():
736
+ # For Lo species datasets, counters are the first dimension
737
+ reshape_dims = (
738
+ self.config["num_counters"],
739
+ *self.config["dims"].values(),
740
+ )
741
+ elif "sectored" in self.config["dataset_name"]:
742
+ # For sectored datasets, counters are the second dimension
743
+ reshape_dims = (
744
+ self.config["num_counters"],
745
+ *self.config["dims"].values(),
746
+ )
747
+ else:
748
+ # For all other datasets, counters are the last dimension
749
+ reshape_dims = (
750
+ *self.config["dims"].values(),
751
+ self.config["num_counters"],
752
+ )
753
+
715
754
  for packet_data in self.raw_data:
716
755
  reshaped_packet_data = np.array(packet_data, dtype=np.uint32).reshape(
717
756
  reshape_dims
@@ -725,6 +764,26 @@ class CoDICEL1aPipeline:
725
764
  # No longer need to keep the raw data around
726
765
  del self.raw_data
727
766
 
767
+ def _is_different_dimension(self) -> bool:
768
+ """
769
+ Check if the current dataset is a Lo species dataset.
770
+
771
+ Lo species datasets have a different data structure where counters are the
772
+ second dimension (index 1) instead of the last dimension.
773
+
774
+ Returns
775
+ -------
776
+ bool
777
+ True if the dataset is a Lo species dataset
778
+ (lo-sw-species or lo-nsw-species), False otherwise.
779
+ """
780
+ return self.config["dataset_name"] in [
781
+ "imap_codice_l1a_lo-sw-species",
782
+ "imap_codice_l1a_lo-nsw-species",
783
+ "imap_codice_l1a_lo-sw-angular",
784
+ "imap_codice_l1a_lo-nsw-angular",
785
+ ]
786
+
728
787
  def set_data_product_config(self, apid: int, dataset: xr.Dataset) -> None:
729
788
  """
730
789
  Set the various settings for defining the data products.
@@ -1615,23 +1674,23 @@ def process_codice_l1a(file_path: Path) -> list[xr.Dataset]:
1615
1674
  # Housekeeping data
1616
1675
  if apid == CODICEAPID.COD_NHK:
1617
1676
  processed_dataset = create_hskp_dataset(dataset)
1618
- logger.info(f"\nFinal data product:\n{processed_dataset}\n")
1677
+ logger.info(f"\nProcessed {CODICEAPID(apid).name} packet\n")
1619
1678
 
1620
1679
  # Event data
1621
1680
  elif apid in [CODICEAPID.COD_LO_PHA, CODICEAPID.COD_HI_PHA]:
1622
1681
  processed_dataset = create_direct_event_dataset(apid, dataset)
1623
- logger.info(f"\nFinal data product:\n{processed_dataset}\n")
1682
+ logger.info(f"\nProcessed {CODICEAPID(apid).name} packet\n")
1624
1683
 
1625
1684
  # I-ALiRT data
1626
1685
  elif apid in [CODICEAPID.COD_LO_IAL, CODICEAPID.COD_HI_IAL]:
1627
1686
  processed_dataset = create_ialirt_dataset(apid, dataset)
1628
- logger.info(f"\nFinal data product:\n{processed_dataset}\n")
1687
+ logger.info(f"\nProcessed {CODICEAPID(apid).name} packet\n")
1629
1688
 
1630
1689
  # hi-omni data
1631
1690
  elif apid == CODICEAPID.COD_HI_OMNI_SPECIES_COUNTS:
1632
1691
  science_values = [packet.data for packet in dataset.data]
1633
1692
  processed_dataset = create_binned_dataset(apid, dataset, science_values)
1634
- logger.info(f"\nFinal data product:\n{processed_dataset}\n")
1693
+ logger.info(f"\nProcessed {CODICEAPID(apid).name} packet\n")
1635
1694
 
1636
1695
  # Everything else
1637
1696
  elif apid in constants.APIDS_FOR_SCIENCE_PROCESSING:
@@ -1649,7 +1708,7 @@ def process_codice_l1a(file_path: Path) -> list[xr.Dataset]:
1649
1708
  pipeline.define_coordinates()
1650
1709
  processed_dataset = pipeline.define_data_variables()
1651
1710
 
1652
- logger.info(f"\nFinal data product:\n{processed_dataset}\n")
1711
+ logger.info(f"\nProcessed {CODICEAPID(apid).name} packet\n")
1653
1712
 
1654
1713
  # For APIDs that don't require processing
1655
1714
  else:
@@ -56,8 +56,6 @@ def convert_to_rates(
56
56
  "lo-sw-angular",
57
57
  "lo-nsw-priority",
58
58
  "lo-sw-priority",
59
- "lo-nsw-species",
60
- "lo-sw-species",
61
59
  "lo-ialirt",
62
60
  ]:
63
61
  # Applying rate calculation described in section 10.2 of the algorithm
@@ -66,14 +64,35 @@ def convert_to_rates(
66
64
  # time data array to match the data variable shape
67
65
  dims = [1] * dataset[variable_name].data.ndim
68
66
  dims[1] = 128
69
- acq_times = dataset.acquisition_time_per_step.data.reshape(dims)
70
-
67
+ acq_times = dataset.acquisition_time_per_step.data.reshape(dims) # (128)
71
68
  # Now perform the calculation
72
69
  rates_data = dataset[variable_name].data / (
73
70
  acq_times
74
- * 1e-6 # Converting from microseconds to seconds
71
+ * 1e-3 # Converting from milliseconds to seconds
75
72
  * constants.L1B_DATA_PRODUCT_CONFIGURATIONS[descriptor]["num_spin_sectors"]
76
73
  )
74
+ elif descriptor in [
75
+ "lo-nsw-species",
76
+ "lo-sw-species",
77
+ ]:
78
+ # Applying rate calculation described in section 10.2 of the algorithm
79
+ # document
80
+ # In order to divide by acquisition times, we must reshape the acq
81
+ # time data array to match the data variable shape (epoch, esa_step, sector)
82
+ dims = [1] * dataset[variable_name].data.ndim
83
+ dims[1] = 128
84
+ acq_times = dataset.acquisition_time_per_step.data.reshape(dims) # (128)
85
+ # acquisition time have an array of shape (128,). We match n_sector to that.
86
+ # Per CoDICE, fill first 127 with default value of 12. Then fill last with 11.
87
+ n_sector = np.full(128, 12, dtype=int)
88
+ n_sector[-1] = 11
89
+
90
+ # Now perform the calculation
91
+ rates_data = dataset[variable_name].data / (
92
+ acq_times
93
+ * 1e-3 # Converting from milliseconds to seconds
94
+ * n_sector[:, np.newaxis] # Spin sectors
95
+ )
77
96
  elif descriptor in [
78
97
  "hi-counters-aggregated",
79
98
  "hi-counters-singles",
@@ -164,13 +183,23 @@ def process_codice_l1b(file_path: Path) -> xr.Dataset:
164
183
  l1b_dataset[variable_name].data = convert_to_rates(
165
184
  l1b_dataset, descriptor, variable_name
166
185
  )
167
-
168
186
  # Set the variable attributes
169
187
  cdf_attrs_key = f"{descriptor}-{variable_name}"
170
188
  l1b_dataset[variable_name].attrs = cdf_attrs.get_variable_attributes(
171
189
  cdf_attrs_key, check_schema=False
172
190
  )
173
191
 
192
+ if descriptor in ["lo-sw-species", "lo-nsw-species"]:
193
+ # Do not carry these variable attributes from L1a to L1b
194
+ drop_variables = [
195
+ "k_factor",
196
+ "nso_half_spin",
197
+ "sw_bias_gain_mode",
198
+ "st_bias_gain_mode",
199
+ "spin_period",
200
+ ]
201
+ l1b_dataset = l1b_dataset.drop_vars(drop_variables)
202
+
174
203
  logger.info(f"\nFinal data product:\n{l1b_dataset}\n")
175
204
 
176
205
  return l1b_dataset