imap-processing 0.7.0__py3-none-any.whl → 0.9.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of imap-processing might be problematic. Click here for more details.

Files changed (172) hide show
  1. imap_processing/__init__.py +1 -1
  2. imap_processing/_version.py +2 -2
  3. imap_processing/ccsds/excel_to_xtce.py +36 -2
  4. imap_processing/cdf/config/imap_codice_global_cdf_attrs.yaml +1 -1
  5. imap_processing/cdf/config/imap_codice_l1a_variable_attrs.yaml +145 -30
  6. imap_processing/cdf/config/imap_glows_l1b_variable_attrs.yaml +36 -36
  7. imap_processing/cdf/config/imap_hi_variable_attrs.yaml +136 -9
  8. imap_processing/cdf/config/imap_hit_global_cdf_attrs.yaml +14 -0
  9. imap_processing/cdf/config/imap_hit_l1a_variable_attrs.yaml +63 -1
  10. imap_processing/cdf/config/imap_hit_l1b_variable_attrs.yaml +9 -0
  11. imap_processing/cdf/config/imap_idex_global_cdf_attrs.yaml +14 -7
  12. imap_processing/cdf/config/imap_idex_l1a_variable_attrs.yaml +577 -235
  13. imap_processing/cdf/config/imap_idex_l1b_variable_attrs.yaml +326 -0
  14. imap_processing/cdf/config/imap_lo_l1a_variable_attrs.yaml +33 -23
  15. imap_processing/cdf/config/imap_mag_l1_variable_attrs.yaml +24 -28
  16. imap_processing/cdf/config/imap_ultra_l1a_variable_attrs.yaml +1 -0
  17. imap_processing/cdf/config/imap_ultra_l1b_variable_attrs.yaml +137 -79
  18. imap_processing/cdf/config/imap_variable_schema.yaml +13 -0
  19. imap_processing/cdf/imap_cdf_manager.py +31 -27
  20. imap_processing/cdf/utils.py +3 -5
  21. imap_processing/cli.py +25 -14
  22. imap_processing/codice/codice_l1a.py +153 -63
  23. imap_processing/codice/constants.py +10 -10
  24. imap_processing/codice/decompress.py +10 -11
  25. imap_processing/codice/utils.py +1 -0
  26. imap_processing/glows/l1a/glows_l1a.py +1 -2
  27. imap_processing/glows/l1b/glows_l1b.py +3 -3
  28. imap_processing/glows/l1b/glows_l1b_data.py +59 -37
  29. imap_processing/glows/l2/glows_l2_data.py +123 -0
  30. imap_processing/hi/l1a/hi_l1a.py +4 -4
  31. imap_processing/hi/l1a/histogram.py +107 -109
  32. imap_processing/hi/l1a/science_direct_event.py +92 -225
  33. imap_processing/hi/l1b/hi_l1b.py +85 -11
  34. imap_processing/hi/l1c/hi_l1c.py +23 -1
  35. imap_processing/hi/packet_definitions/TLM_HI_COMBINED_SCI.xml +3994 -0
  36. imap_processing/hi/utils.py +1 -1
  37. imap_processing/hit/hit_utils.py +221 -0
  38. imap_processing/hit/l0/constants.py +118 -0
  39. imap_processing/hit/l0/decom_hit.py +100 -156
  40. imap_processing/hit/l1a/hit_l1a.py +170 -184
  41. imap_processing/hit/l1b/hit_l1b.py +33 -153
  42. imap_processing/ialirt/l0/process_codicelo.py +153 -0
  43. imap_processing/ialirt/l0/process_hit.py +5 -5
  44. imap_processing/ialirt/packet_definitions/ialirt_codicelo.xml +281 -0
  45. imap_processing/ialirt/process_ephemeris.py +212 -0
  46. imap_processing/idex/idex_l1a.py +65 -84
  47. imap_processing/idex/idex_l1b.py +192 -0
  48. imap_processing/idex/idex_variable_unpacking_and_eu_conversion.csv +33 -0
  49. imap_processing/idex/packet_definitions/idex_packet_definition.xml +97 -595
  50. imap_processing/lo/l0/decompression_tables/decompression_tables.py +17 -1
  51. imap_processing/lo/l0/lo_science.py +45 -13
  52. imap_processing/lo/l1a/lo_l1a.py +76 -8
  53. imap_processing/lo/packet_definitions/lo_xtce.xml +8344 -1849
  54. imap_processing/mag/l0/decom_mag.py +4 -3
  55. imap_processing/mag/l1a/mag_l1a.py +12 -13
  56. imap_processing/mag/l1a/mag_l1a_data.py +1 -2
  57. imap_processing/mag/l1b/mag_l1b.py +90 -7
  58. imap_processing/spice/geometry.py +156 -16
  59. imap_processing/spice/time.py +144 -2
  60. imap_processing/swapi/l1/swapi_l1.py +4 -4
  61. imap_processing/swapi/l2/swapi_l2.py +1 -1
  62. imap_processing/swapi/packet_definitions/swapi_packet_definition.xml +1535 -446
  63. imap_processing/swe/l1b/swe_l1b_science.py +8 -8
  64. imap_processing/swe/l2/swe_l2.py +134 -17
  65. imap_processing/tests/ccsds/test_data/expected_output.xml +2 -1
  66. imap_processing/tests/ccsds/test_excel_to_xtce.py +4 -4
  67. imap_processing/tests/cdf/test_imap_cdf_manager.py +0 -10
  68. imap_processing/tests/codice/conftest.py +1 -17
  69. imap_processing/tests/codice/data/imap_codice_l0_raw_20241110_v001.pkts +0 -0
  70. imap_processing/tests/codice/test_codice_l0.py +8 -2
  71. imap_processing/tests/codice/test_codice_l1a.py +127 -107
  72. imap_processing/tests/codice/test_codice_l1b.py +1 -0
  73. imap_processing/tests/codice/test_decompress.py +7 -7
  74. imap_processing/tests/conftest.py +100 -58
  75. imap_processing/tests/glows/conftest.py +6 -0
  76. imap_processing/tests/glows/test_glows_l1b.py +9 -9
  77. imap_processing/tests/glows/test_glows_l1b_data.py +9 -9
  78. imap_processing/tests/hi/test_data/l0/H90_NHK_20241104.bin +0 -0
  79. imap_processing/tests/hi/test_data/l0/H90_sci_cnt_20241104.bin +0 -0
  80. imap_processing/tests/hi/test_data/l0/H90_sci_de_20241104.bin +0 -0
  81. imap_processing/tests/hi/test_data/l1a/imap_hi_l1a_45sensor-de_20250415_v000.cdf +0 -0
  82. imap_processing/tests/hi/test_hi_l1b.py +73 -3
  83. imap_processing/tests/hi/test_hi_l1c.py +10 -2
  84. imap_processing/tests/hi/test_l1a.py +31 -58
  85. imap_processing/tests/hi/test_science_direct_event.py +58 -0
  86. imap_processing/tests/hi/test_utils.py +4 -3
  87. imap_processing/tests/hit/test_data/sci_sample1.ccsds +0 -0
  88. imap_processing/tests/hit/{test_hit_decom.py → test_decom_hit.py} +95 -36
  89. imap_processing/tests/hit/test_hit_l1a.py +299 -179
  90. imap_processing/tests/hit/test_hit_l1b.py +231 -24
  91. imap_processing/tests/hit/test_hit_utils.py +218 -0
  92. imap_processing/tests/hit/validation_data/hskp_sample_eu.csv +89 -0
  93. imap_processing/tests/hit/validation_data/sci_sample_raw1.csv +29 -0
  94. imap_processing/tests/ialirt/test_data/l0/apid01152.tlm +0 -0
  95. imap_processing/tests/ialirt/test_data/l0/imap_codice_l1a_lo-ialirt_20241110193700_v0.0.0.cdf +0 -0
  96. imap_processing/tests/ialirt/unit/test_process_codicelo.py +106 -0
  97. imap_processing/tests/ialirt/unit/test_process_ephemeris.py +109 -0
  98. imap_processing/tests/ialirt/unit/test_process_hit.py +9 -6
  99. imap_processing/tests/idex/conftest.py +2 -2
  100. imap_processing/tests/idex/imap_idex_l0_raw_20231214_v001.pkts +0 -0
  101. imap_processing/tests/idex/impact_14_tof_high_data.txt +4444 -4444
  102. imap_processing/tests/idex/test_idex_l0.py +4 -4
  103. imap_processing/tests/idex/test_idex_l1a.py +8 -2
  104. imap_processing/tests/idex/test_idex_l1b.py +126 -0
  105. imap_processing/tests/lo/test_lo_l1a.py +7 -16
  106. imap_processing/tests/lo/test_lo_science.py +69 -5
  107. imap_processing/tests/lo/test_pkts/imap_lo_l0_raw_20240803_v002.pkts +0 -0
  108. imap_processing/tests/lo/validation_data/Instrument_FM1_T104_R129_20240803_ILO_SCI_DE_dec_DN_with_fills.csv +1999 -0
  109. imap_processing/tests/mag/imap_mag_l1a_norm-magi_20251017_v001.cdf +0 -0
  110. imap_processing/tests/mag/test_mag_l1b.py +97 -7
  111. imap_processing/tests/spice/test_data/imap_ena_sim_metakernel.template +3 -1
  112. imap_processing/tests/spice/test_geometry.py +115 -9
  113. imap_processing/tests/spice/test_time.py +135 -6
  114. imap_processing/tests/swapi/test_swapi_decom.py +75 -69
  115. imap_processing/tests/swapi/test_swapi_l1.py +4 -4
  116. imap_processing/tests/swe/conftest.py +33 -0
  117. imap_processing/tests/swe/l1_validation/swe_l0_unpacked-data_20240510_v001_VALIDATION_L1B_v3.dat +4332 -0
  118. imap_processing/tests/swe/test_swe_l1b.py +29 -8
  119. imap_processing/tests/swe/test_swe_l2.py +64 -8
  120. imap_processing/tests/test_utils.py +2 -2
  121. imap_processing/tests/ultra/test_data/l0/ultra45_raw_sc_ultrarawimg_withFSWcalcs_FM45_40P_Phi28p5_BeamCal_LinearScan_phi2850_theta-000_20240207T102740.csv +3314 -3314
  122. imap_processing/tests/ultra/test_data/l1/dps_exposure_helio_45_E12.cdf +0 -0
  123. imap_processing/tests/ultra/test_data/l1/dps_exposure_helio_45_E24.cdf +0 -0
  124. imap_processing/tests/ultra/unit/test_de.py +113 -0
  125. imap_processing/tests/ultra/unit/test_spatial_utils.py +125 -0
  126. imap_processing/tests/ultra/unit/test_ultra_l1b.py +27 -3
  127. imap_processing/tests/ultra/unit/test_ultra_l1b_annotated.py +31 -10
  128. imap_processing/tests/ultra/unit/test_ultra_l1b_extended.py +55 -35
  129. imap_processing/tests/ultra/unit/test_ultra_l1c_pset_bins.py +10 -68
  130. imap_processing/ultra/constants.py +12 -3
  131. imap_processing/ultra/l1b/de.py +168 -30
  132. imap_processing/ultra/l1b/ultra_l1b_annotated.py +24 -10
  133. imap_processing/ultra/l1b/ultra_l1b_extended.py +46 -80
  134. imap_processing/ultra/l1c/ultra_l1c_pset_bins.py +60 -144
  135. imap_processing/ultra/utils/spatial_utils.py +221 -0
  136. {imap_processing-0.7.0.dist-info → imap_processing-0.9.0.dist-info}/METADATA +15 -14
  137. {imap_processing-0.7.0.dist-info → imap_processing-0.9.0.dist-info}/RECORD +142 -139
  138. imap_processing/cdf/cdf_attribute_manager.py +0 -322
  139. imap_processing/cdf/config/shared/default_global_cdf_attrs_schema.yaml +0 -246
  140. imap_processing/cdf/config/shared/default_variable_cdf_attrs_schema.yaml +0 -466
  141. imap_processing/hi/l0/decom_hi.py +0 -24
  142. imap_processing/hi/packet_definitions/hi_packet_definition.xml +0 -482
  143. imap_processing/hit/l0/data_classes/housekeeping.py +0 -240
  144. imap_processing/hit/l0/data_classes/science_packet.py +0 -259
  145. imap_processing/hit/l0/utils/hit_base.py +0 -57
  146. imap_processing/tests/cdf/shared/default_global_cdf_attrs_schema.yaml +0 -246
  147. imap_processing/tests/cdf/shared/default_variable_cdf_attrs_schema.yaml +0 -466
  148. imap_processing/tests/cdf/test_cdf_attribute_manager.py +0 -353
  149. imap_processing/tests/codice/data/imap_codice_l0_hi-counters-aggregated_20240429_v001.pkts +0 -0
  150. imap_processing/tests/codice/data/imap_codice_l0_hi-counters-singles_20240429_v001.pkts +0 -0
  151. imap_processing/tests/codice/data/imap_codice_l0_hi-omni_20240429_v001.pkts +0 -0
  152. imap_processing/tests/codice/data/imap_codice_l0_hi-pha_20240429_v001.pkts +0 -0
  153. imap_processing/tests/codice/data/imap_codice_l0_hi-sectored_20240429_v001.pkts +0 -0
  154. imap_processing/tests/codice/data/imap_codice_l0_hskp_20100101_v001.pkts +0 -0
  155. imap_processing/tests/codice/data/imap_codice_l0_lo-counters-aggregated_20240429_v001.pkts +0 -0
  156. imap_processing/tests/codice/data/imap_codice_l0_lo-counters-singles_20240429_v001.pkts +0 -0
  157. imap_processing/tests/codice/data/imap_codice_l0_lo-nsw-angular_20240429_v001.pkts +0 -0
  158. imap_processing/tests/codice/data/imap_codice_l0_lo-nsw-priority_20240429_v001.pkts +0 -0
  159. imap_processing/tests/codice/data/imap_codice_l0_lo-nsw-species_20240429_v001.pkts +0 -0
  160. imap_processing/tests/codice/data/imap_codice_l0_lo-pha_20240429_v001.pkts +0 -0
  161. imap_processing/tests/codice/data/imap_codice_l0_lo-sw-angular_20240429_v001.pkts +0 -0
  162. imap_processing/tests/codice/data/imap_codice_l0_lo-sw-priority_20240429_v001.pkts +0 -0
  163. imap_processing/tests/codice/data/imap_codice_l0_lo-sw-species_20240429_v001.pkts +0 -0
  164. imap_processing/tests/hi/test_decom.py +0 -55
  165. imap_processing/tests/hi/test_l1a_sci_de.py +0 -72
  166. imap_processing/tests/idex/imap_idex_l0_raw_20230725_v001.pkts +0 -0
  167. imap_processing/tests/mag/imap_mag_l1a_burst-magi_20231025_v001.cdf +0 -0
  168. /imap_processing/{hi/l0/__init__.py → tests/glows/test_glows_l2_data.py} +0 -0
  169. /imap_processing/tests/hit/test_data/{imap_hit_l0_hk_20100105_v001.pkts → imap_hit_l0_raw_20100105_v001.pkts} +0 -0
  170. {imap_processing-0.7.0.dist-info → imap_processing-0.9.0.dist-info}/LICENSE +0 -0
  171. {imap_processing-0.7.0.dist-info → imap_processing-0.9.0.dist-info}/WHEEL +0 -0
  172. {imap_processing-0.7.0.dist-info → imap_processing-0.9.0.dist-info}/entry_points.txt +0 -0
@@ -1,39 +1,25 @@
1
1
  """Decommutate HIT CCSDS data and create L1a data products."""
2
2
 
3
3
  import logging
4
- from enum import IntEnum
5
4
 
6
5
  import numpy as np
7
6
  import xarray as xr
8
7
 
9
- from imap_processing import imap_module_directory
10
8
  from imap_processing.cdf.imap_cdf_manager import ImapCdfAttributes
11
- from imap_processing.utils import packet_file_to_datasets
9
+ from imap_processing.hit.hit_utils import (
10
+ HitAPID,
11
+ get_attribute_manager,
12
+ get_datasets_by_apid,
13
+ process_housekeeping_data,
14
+ )
15
+ from imap_processing.hit.l0.constants import MOD_10_MAPPING
16
+ from imap_processing.hit.l0.decom_hit import decom_hit
12
17
 
13
18
  logger = logging.getLogger(__name__)
14
19
 
15
20
  # TODO review logging levels to use (debug vs. info)
16
21
 
17
22
 
18
- class HitAPID(IntEnum):
19
- """
20
- HIT APID Mappings.
21
-
22
- Attributes
23
- ----------
24
- HIT_HSKP: int
25
- Housekeeping
26
- HIT_SCIENCE : int
27
- Science
28
- HIT_IALRT : int
29
- I-ALiRT
30
- """
31
-
32
- HIT_HSKP = 1251
33
- HIT_SCIENCE = 1252
34
- HIT_IALRT = 1253
35
-
36
-
37
23
  def hit_l1a(packet_file: str, data_version: str) -> list[xr.Dataset]:
38
24
  """
39
25
  Will process HIT L0 data into L1A data products.
@@ -50,80 +36,137 @@ def hit_l1a(packet_file: str, data_version: str) -> list[xr.Dataset]:
50
36
  processed_data : list[xarray.Dataset]
51
37
  List of Datasets of L1A processed data.
52
38
  """
53
- # TODO add logging
54
-
55
- # Unpack ccsds file
56
- packet_definition = (
57
- imap_module_directory / "hit/packet_definitions/hit_packet_definitions.xml"
58
- )
59
- datasets_by_apid = packet_file_to_datasets(
60
- packet_file=packet_file,
61
- xtce_packet_definition=packet_definition,
62
- use_derived_value=False,
63
- )
39
+ # Unpack ccsds file to xarray datasets
40
+ datasets_by_apid = get_datasets_by_apid(packet_file)
64
41
 
65
42
  # Create the attribute manager for this data level
66
- attr_mgr = ImapCdfAttributes()
67
- attr_mgr.add_instrument_global_attrs(instrument="hit")
68
- attr_mgr.add_instrument_variable_attrs(instrument="hit", level="l1a")
69
- attr_mgr.add_global_attribute("Data_version", data_version)
43
+ attr_mgr = get_attribute_manager(data_version, "l1a")
70
44
 
71
- # Process science to l1a.
45
+ l1a_datasets = []
46
+
47
+ # Process l1a data products
72
48
  if HitAPID.HIT_HSKP in datasets_by_apid:
73
- datasets_by_apid[HitAPID.HIT_HSKP] = process_housekeeping(
74
- datasets_by_apid[HitAPID.HIT_HSKP], attr_mgr
49
+ logger.info("Creating HIT L1A housekeeping dataset")
50
+ l1a_datasets.append(
51
+ process_housekeeping_data(
52
+ datasets_by_apid[HitAPID.HIT_HSKP], attr_mgr, "imap_hit_l1a_hk"
53
+ )
75
54
  )
76
55
  if HitAPID.HIT_SCIENCE in datasets_by_apid:
77
- # TODO complete science data processing
78
- print("Skipping science data for now")
79
- datasets_by_apid[HitAPID.HIT_SCIENCE] = process_science(
80
- datasets_by_apid[HitAPID.HIT_SCIENCE], attr_mgr
56
+ l1a_datasets.extend(
57
+ process_science(datasets_by_apid[HitAPID.HIT_SCIENCE], attr_mgr)
81
58
  )
82
-
83
- return list(datasets_by_apid.values())
59
+ return l1a_datasets
84
60
 
85
61
 
86
- def concatenate_leak_variables(
87
- dataset: xr.Dataset, adc_channels: xr.DataArray
88
- ) -> xr.Dataset:
62
+ def subcom_sectorates(sci_dataset: xr.Dataset) -> None:
89
63
  """
90
- Concatenate leak variables in the dataset.
91
-
92
- Updates the housekeeping dataset to replace the individual
93
- leak_i_00, leak_i_01, ..., leak_i_63 variables with a single
94
- leak_i variable as a 2D array. "i" here represents current
95
- in the leakage current [Voltage] data.
64
+ Subcommutate sectorates data.
65
+
66
+ Sector rates data contains rates for 5 species and 10
67
+ energy ranges. This function subcommutates the sector
68
+ rates data by organizing the rates by species. Which
69
+ species and energy range the data belongs to is determined
70
+ by taking the mod 10 value of the corresponding header
71
+ minute count value in the dataset. A mapping of mod 10
72
+ values to species and energy ranges is provided in constants.py.
73
+
74
+ MOD_10_MAPPING = {
75
+ 0: {"species": "H", "energy_min": 1.8, "energy_max": 3.6},
76
+ 1: {"species": "H", "energy_min": 4, "energy_max": 6},
77
+ 2: {"species": "H", "energy_min": 6, "energy_max": 10},
78
+ 3: {"species": "4He", "energy_min": 4, "energy_max": 6},
79
+ ...
80
+ 9: {"species": "Fe", "energy_min": 4, "energy_max": 12}}
81
+
82
+ The data is added to the dataset as new data fields named
83
+ according to their species. They have 4 dimensions: epoch
84
+ energy index, declination, and azimuth. The energy index
85
+ dimension is used to distinguish between the different energy
86
+ ranges the data belongs to. The energy min and max values for
87
+ each species are also added to the dataset as new data fields.
96
88
 
97
89
  Parameters
98
90
  ----------
99
- dataset : xarray.Dataset
100
- Dataset containing 64 leak variables.
101
- adc_channels : xarray.DataArray
102
- DataArray to be used as a dimension for the concatenated leak variables.
103
-
104
- Returns
105
- -------
106
- dataset : xarray.Dataset
107
- Updated dataset with concatenated leak variables.
91
+ sci_dataset : xarray.Dataset
92
+ Xarray dataset containing parsed HIT science data.
108
93
  """
109
- # Stack 64 leak variables (leak_00, leak_01, ..., leak_63)
110
- leak_vars = [dataset[f"leak_i_{i:02d}"] for i in range(64)]
111
-
112
- # Concatenate along 'adc_channels' and reorder dimensions
113
- stacked_leaks = xr.concat(leak_vars, dim=adc_channels).transpose(
114
- "epoch", "adc_channels"
115
- )
116
- dataset["leak_i"] = stacked_leaks
117
-
118
- # Drop the individual leak variables
119
- updated_dataset = dataset.drop_vars([f"leak_i_{i:02d}" for i in range(64)])
120
-
121
- return updated_dataset
94
+ # TODO:
95
+ # - Update to use fill values defined in attribute manager which
96
+ # isn't defined for L1A science data yet
97
+ # - fix issues with fe_counts_sectored. The array has shape
98
+ # (epoch: 28, fe_energy_index: 1, declination: 8, azimuth: 15),
99
+ # but cdflib drops second dimension of size 1 and recognizes
100
+ # only 3 total dimensions. Are dimensions of 1 ignored?
101
+
102
+ # Calculate mod 10 values
103
+ hdr_min_count_mod_10 = sci_dataset.hdr_minute_cnt.values % 10
104
+
105
+ # Reference mod 10 mapping to initialize data structure for species and
106
+ # energy ranges and add 8x15 arrays with fill values for each science frame.
107
+ num_frames = len(hdr_min_count_mod_10)
108
+ # TODO: add more specific dtype for rates (ex. int16) once this is defined by HIT
109
+ data_by_species_and_energy_range = {
110
+ key: {**value, "rates": np.full((num_frames, 8, 15), fill_value=-1, dtype=int)}
111
+ for key, value in MOD_10_MAPPING.items()
112
+ }
113
+
114
+ # Update rates for science frames where data is available
115
+ for i, mod_10 in enumerate(hdr_min_count_mod_10):
116
+ data_by_species_and_energy_range[mod_10]["rates"][i] = sci_dataset[
117
+ "sectorates"
118
+ ].values[i]
119
+
120
+ # H has 3 energy ranges, 4He, CNO, NeMgSi have 2, and Fe has 1.
121
+ # Aggregate sector rates and energy min/max values for each species.
122
+ # First, initialize dictionaries to store rates and min/max energy values by species
123
+ data_by_species: dict = {
124
+ value["species"]: {"rates": [], "energy_min": [], "energy_max": []}
125
+ for value in data_by_species_and_energy_range.values()
126
+ }
127
+
128
+ for value in data_by_species_and_energy_range.values():
129
+ species = value["species"]
130
+ data_by_species[species]["rates"].append(value["rates"])
131
+ data_by_species[species]["energy_min"].append(value["energy_min"])
132
+ data_by_species[species]["energy_max"].append(value["energy_max"])
133
+
134
+ # Add sector rates by species to the dataset
135
+ for species_type, data in data_by_species.items():
136
+ # Rates data has shape: energy_index, epoch, declination, azimuth
137
+ # Convert rates to numpy array and transpose axes to get
138
+ # shape: epoch, energy_index, declination, azimuth
139
+ rates_data = np.transpose(np.array(data["rates"]), axes=(1, 0, 2, 3))
140
+
141
+ species = species_type.lower()
142
+ sci_dataset[f"{species}_counts_sectored"] = xr.DataArray(
143
+ data=rates_data,
144
+ dims=["epoch", f"{species}_energy_index", "declination", "azimuth"],
145
+ name=f"{species}_counts_sectored",
146
+ )
147
+ sci_dataset[f"{species}_energy_min"] = xr.DataArray(
148
+ data=np.array(data["energy_min"], dtype=np.int8),
149
+ dims=[f"{species}_energy_index"],
150
+ name=f"{species}_energy_min",
151
+ )
152
+ sci_dataset[f"{species}_energy_max"] = xr.DataArray(
153
+ data=np.array(data["energy_max"], dtype=np.int8),
154
+ dims=[f"{species}_energy_index"],
155
+ name=f"{species}_energy_max",
156
+ )
157
+ # add energy index coordinate to the dataset
158
+ sci_dataset.coords[f"{species}_energy_index"] = xr.DataArray(
159
+ np.arange(sci_dataset.sizes[f"{species}_energy_index"], dtype=np.int8),
160
+ dims=[f"{species}_energy_index"],
161
+ name=f"{species}_energy_index",
162
+ )
122
163
 
123
164
 
124
- def process_science(dataset: xr.Dataset, attr_mgr: ImapCdfAttributes) -> xr.Dataset:
165
+ def process_science(
166
+ dataset: xr.Dataset, attr_mgr: ImapCdfAttributes
167
+ ) -> list[xr.Dataset]:
125
168
  """
126
- Will process science dataset for CDF product.
169
+ Will process science datasets for CDF products.
127
170
 
128
171
  Process binary science data for CDF creation. The data is
129
172
  grouped into science frames, decommutated and decompressed,
@@ -134,121 +177,64 @@ def process_science(dataset: xr.Dataset, attr_mgr: ImapCdfAttributes) -> xr.Data
134
177
  Parameters
135
178
  ----------
136
179
  dataset : xarray.Dataset
137
- Dataset containing HIT science data.
180
+ A dataset containing HIT science data.
138
181
 
139
182
  attr_mgr : ImapCdfAttributes
140
183
  Attribute manager used to get the data product field's attributes.
141
184
 
142
185
  Returns
143
186
  -------
144
- dataset : xarray.Dataset
145
- An updated dataset ready for CDF conversion.
187
+ dataset : list
188
+ A list of science datasets ready for CDF conversion.
146
189
  """
147
190
  logger.info("Creating HIT L1A science datasets")
148
191
 
149
- # Logical sources for the two products.
150
- # logical_sources = ["imap_hit_l1a_sci-counts", "imap_hit_l1a_pulse-height-event"]
151
-
152
- # TODO: Complete this function
153
- # - call decom_hit.py to decommutate the science data
154
- # - split the science data into count rates and event datasets
155
- # - update dimensions and add attributes to the dataset and data arrays
156
- # - return list of two datasets (count rates and events)?
192
+ # Decommutate and decompress the science data
193
+ sci_dataset = decom_hit(dataset)
157
194
 
158
- # logger.info("HIT L1A event dataset created")
159
- # logger.info("HIT L1A count rates dataset created")
195
+ # Organize sector rates by species type
196
+ subcom_sectorates(sci_dataset)
160
197
 
161
- return dataset
162
-
163
-
164
- def process_housekeeping(
165
- dataset: xr.Dataset, attr_mgr: ImapCdfAttributes
166
- ) -> xr.Dataset:
167
- """
168
- Will process housekeeping dataset for CDF product.
169
-
170
- Updates the housekeeping dataset to replace with a single
171
- leak_i variable as a 2D array. Also updates the dataset
172
- attributes and coordinates and data variable dimensions
173
- according to specifications in a cdf yaml file.
174
-
175
- Parameters
176
- ----------
177
- dataset : xarray.Dataset
178
- Dataset containing HIT housekeeping data.
179
-
180
- attr_mgr : ImapCdfAttributes
181
- Attribute manager used to get the data product field's attributes.
182
-
183
- Returns
184
- -------
185
- dataset : xarray.Dataset
186
- An updated dataset ready for CDF conversion.
187
- """
188
- logger.info("Creating HIT L1A housekeeping dataset")
189
-
190
- logical_source = "imap_hit_l1a_hk"
191
-
192
- # Drop keys that are not CDF data variables
193
- drop_keys = [
194
- "pkt_apid",
195
- "sc_tick",
196
- "version",
197
- "type",
198
- "sec_hdr_flg",
199
- "seq_flgs",
200
- "src_seq_ctr",
201
- "pkt_len",
202
- "hskp_spare1",
203
- "hskp_spare2",
204
- "hskp_spare3",
205
- "hskp_spare4",
206
- "hskp_spare5",
207
- ]
208
-
209
- # Drop variables not needed for CDF
210
- dataset = dataset.drop_vars(drop_keys)
211
-
212
- # Create data arrays for dependencies
213
- adc_channels = xr.DataArray(
214
- np.arange(64, dtype=np.uint8),
215
- name="adc_channels",
216
- dims=["adc_channels"],
217
- attrs=attr_mgr.get_variable_attributes("adc_channels"),
218
- )
219
-
220
- # NOTE: LABL_PTR_1 should be CDF_CHAR.
221
- adc_channels_label = xr.DataArray(
222
- adc_channels.values.astype(str),
223
- name="adc_channels_label",
224
- dims=["adc_channels_label"],
225
- attrs=attr_mgr.get_variable_attributes("adc_channels_label"),
198
+ # Split the science data into count rates and event datasets
199
+ pha_raw_dataset = xr.Dataset(
200
+ {"pha_raw": sci_dataset["pha_raw"]}, coords={"epoch": sci_dataset["epoch"]}
226
201
  )
202
+ count_rates_dataset = sci_dataset.drop_vars("pha_raw")
227
203
 
228
- # Update dataset coordinates and attributes
229
- dataset = dataset.assign_coords(
230
- {
231
- "adc_channels": adc_channels,
232
- "adc_channels_label": adc_channels_label,
233
- }
234
- )
235
- dataset.attrs = attr_mgr.get_global_attributes(logical_source)
236
-
237
- # Stack 64 leak variables (leak_00, leak_01, ..., leak_63)
238
- dataset = concatenate_leak_variables(dataset, adc_channels)
239
-
240
- # Assign attributes and dimensions to each data array in the Dataset
241
- for field in dataset.data_vars.keys():
242
- # Create a dict of dimensions using the DEPEND_I keys in the
243
- # attributes
244
- dims = {
245
- key: value
246
- for key, value in attr_mgr.get_variable_attributes(field).items()
247
- if "DEPEND" in key
248
- }
249
- dataset[field].attrs = attr_mgr.get_variable_attributes(field)
250
- dataset[field].assign_coords(dims)
251
-
252
- dataset.epoch.attrs = attr_mgr.get_variable_attributes("epoch")
253
-
254
- return dataset
204
+ # Logical sources for the two products.
205
+ logical_sources = ["imap_hit_l1a_count-rates", "imap_hit_l1a_pulse-height-events"]
206
+
207
+ datasets = []
208
+ # Update attributes and dimensions
209
+ for dataset, logical_source in zip(
210
+ [count_rates_dataset, pha_raw_dataset], logical_sources
211
+ ):
212
+ dataset.attrs = attr_mgr.get_global_attributes(logical_source)
213
+
214
+ # TODO: Add CDF attributes to yaml once they're defined for L1A science data
215
+ # Assign attributes and dimensions to each data array in the Dataset
216
+ for field in dataset.data_vars.keys():
217
+ try:
218
+ # Create a dict of dimensions using the DEPEND_I keys in the
219
+ # attributes
220
+ dims = {
221
+ key: value
222
+ for key, value in attr_mgr.get_variable_attributes(field).items()
223
+ if "DEPEND" in key
224
+ }
225
+ dataset[field].attrs = attr_mgr.get_variable_attributes(field)
226
+ dataset[field].assign_coords(dims)
227
+ except KeyError:
228
+ print(f"Field {field} not found in attribute manager.")
229
+ logger.warning(f"Field {field} not found in attribute manager.")
230
+
231
+ dataset.epoch.attrs = attr_mgr.get_variable_attributes("epoch")
232
+ # Remove DEPEND_0 attribute from epoch variable added by attr_mgr.
233
+ # Not required for epoch
234
+ del dataset["epoch"].attrs["DEPEND_0"]
235
+
236
+ datasets.append(dataset)
237
+
238
+ logger.info(f"HIT L1A dataset created for {logical_source}")
239
+
240
+ return datasets
@@ -1,179 +1,59 @@
1
1
  """IMAP-HIT L1B data processing."""
2
2
 
3
3
  import logging
4
- from dataclasses import fields
5
4
 
6
- import numpy as np
7
5
  import xarray as xr
8
6
 
9
- from imap_processing.cdf.imap_cdf_manager import ImapCdfAttributes
10
- from imap_processing.hit.l0.data_classes.housekeeping import Housekeeping
11
- from imap_processing.spice.time import met_to_j2000ns
7
+ from imap_processing.hit.hit_utils import (
8
+ HitAPID,
9
+ get_attribute_manager,
10
+ get_datasets_by_apid,
11
+ process_housekeeping_data,
12
+ )
12
13
 
13
14
  logger = logging.getLogger(__name__)
14
15
 
15
16
  # TODO review logging levels to use (debug vs. info)
16
17
 
17
18
 
18
- def hit_l1b(l1a_dataset: xr.Dataset, data_version: str) -> list[xr.Dataset]:
19
+ def hit_l1b(dependencies: dict, data_version: str) -> list[xr.Dataset]:
19
20
  """
20
21
  Will process HIT data to L1B.
21
22
 
23
+ Processes dependencies needed to create L1B data products.
24
+
22
25
  Parameters
23
26
  ----------
24
- l1a_dataset : xarray.Dataset
25
- L1A data.
27
+ dependencies : dict
28
+ Dictionary of dependencies that are L1A xarray datasets
29
+ for science data and a file path string to a CCSDS file
30
+ for housekeeping data.
26
31
  data_version : str
27
32
  Version of the data product being created.
28
33
 
29
34
  Returns
30
35
  -------
31
- cdf_filepaths : xarray.Dataset
32
- L1B processed data.
36
+ processed_data : list[xarray.Dataset]
37
+ List of L1B datasets.
33
38
  """
34
- # create the attribute manager for this data level
35
- attr_mgr = ImapCdfAttributes()
36
- attr_mgr.add_instrument_global_attrs(instrument="hit")
37
- attr_mgr.add_instrument_variable_attrs(instrument="hit", level="l1b")
38
- attr_mgr.add_global_attribute("Data_version", data_version)
39
-
40
- # TODO: Check for type of L1A dataset and determine what L1B products to make
41
- # Need more info from instrument teams. Work with housekeeping data for now
42
- logical_source = "imap_hit_l1b_hk"
43
-
44
- # Create datasets
45
- datasets = []
46
- if "_hk" in logical_source:
47
- dataset = create_hk_dataset(attr_mgr)
48
- datasets.append(dataset)
49
- elif "_sci" in logical_source:
50
- # process science data. placeholder for future code
39
+ # Create the attribute manager for this data level
40
+ attr_mgr = get_attribute_manager(data_version, "l1b")
41
+
42
+ # Create L1B datasets
43
+ datasets: list = []
44
+ if "imap_hit_l0_raw" in dependencies:
45
+ # Unpack ccsds file to xarray datasets
46
+ packet_file = dependencies["imap_hit_l0_raw"]
47
+ datasets_by_apid = get_datasets_by_apid(packet_file, derived=True)
48
+ # Process housekeeping to l1b.
49
+ datasets.append(
50
+ process_housekeeping_data(
51
+ datasets_by_apid[HitAPID.HIT_HSKP], attr_mgr, "imap_hit_l1b_hk"
52
+ )
53
+ )
54
+ logger.info("HIT L1B housekeeping dataset created")
55
+ if "imap_hit_l1a_countrates" in dependencies:
56
+ # TODO: process science data. placeholder for future code
51
57
  pass
52
58
 
53
59
  return datasets
54
-
55
-
56
- # TODO: This is going to work differently when we have sample data
57
- def create_hk_dataset(attr_mgr: ImapCdfAttributes) -> xr.Dataset:
58
- """
59
- Create a housekeeping dataset.
60
-
61
- Parameters
62
- ----------
63
- attr_mgr : ImapCdfAttributes
64
- Attribute manager used to get the data product field's attributes.
65
-
66
- Returns
67
- -------
68
- hk_dataset : xarray.dataset
69
- Dataset with all data product fields in xarray.DataArray.
70
- """
71
- logger.info("Creating datasets for HIT L1B data")
72
-
73
- # TODO: TEMPORARY. Need to update to use the L1B data class once that exists.
74
- # Using l1a housekeeping data class for now since l1b housekeeping has the
75
- # same data fields
76
- data_fields = fields(Housekeeping)
77
-
78
- # TODO define keys to skip. This will change later.
79
- skip_keys = [
80
- "shcoarse",
81
- "ground_sw_version",
82
- "packet_file_name",
83
- "ccsds_header",
84
- "leak_i_raw",
85
- ]
86
-
87
- logical_source = "imap_hit_l1b_hk"
88
-
89
- # Create fake data for now
90
-
91
- # Convert integers into datetime64[s]
92
- epoch_converted_time = met_to_j2000ns([0, 1, 2])
93
-
94
- # Shape for dims
95
- n_epoch = 3
96
- n_channels = 64
97
-
98
- # Create xarray data arrays for dependencies
99
- epoch_time = xr.DataArray(
100
- data=epoch_converted_time,
101
- name="epoch",
102
- dims=["epoch"],
103
- attrs=attr_mgr.get_variable_attributes("epoch"),
104
- )
105
-
106
- adc_channels = xr.DataArray(
107
- np.arange(n_channels, dtype=np.uint16),
108
- name="adc_channels",
109
- dims=["adc_channels"],
110
- attrs=attr_mgr.get_variable_attributes("adc_channels"),
111
- )
112
-
113
- # Create xarray dataset
114
- hk_dataset = xr.Dataset(
115
- coords={"epoch": epoch_time, "adc_channels": adc_channels},
116
- attrs=attr_mgr.get_global_attributes(logical_source),
117
- )
118
-
119
- # Create xarray data array for each data field
120
- for data_field in data_fields:
121
- field = data_field.name.lower()
122
- if field not in skip_keys:
123
- # Create a list of all the dimensions using the DEPEND_I keys in the
124
- # attributes
125
- dims = [
126
- value
127
- for key, value in attr_mgr.get_variable_attributes(field).items()
128
- if "DEPEND" in key
129
- ]
130
-
131
- # TODO: This is temporary.
132
- # The data will be set in the data class when that's created
133
- if field == "leak_i":
134
- # 2D array - needs two dims
135
- hk_dataset[field] = xr.DataArray(
136
- np.ones((n_epoch, n_channels), dtype=np.uint16),
137
- dims=dims,
138
- attrs=attr_mgr.get_variable_attributes(field),
139
- )
140
- elif field in [
141
- "preamp_l234a",
142
- "preamp_l1a",
143
- "preamp_l1b",
144
- "preamp_l234b",
145
- "temp0",
146
- "temp1",
147
- "temp2",
148
- "temp3",
149
- "analog_temp",
150
- "hvps_temp",
151
- "idpu_temp",
152
- "lvps_temp",
153
- "ebox_3d4vd",
154
- "ebox_5d1vd",
155
- "ebox_p12va",
156
- "ebox_m12va",
157
- "ebox_p5d7va",
158
- "ebox_m5d7va",
159
- "ref_p5v",
160
- "l1ab_bias",
161
- "l2ab_bias",
162
- "l34a_bias",
163
- "l34b_bias",
164
- "ebox_p2d0vd",
165
- ]:
166
- hk_dataset[field] = xr.DataArray(
167
- np.ones(3, dtype=np.float16),
168
- dims=dims,
169
- attrs=attr_mgr.get_variable_attributes(field),
170
- )
171
- else:
172
- hk_dataset[field] = xr.DataArray(
173
- [1, 1, 1],
174
- dims=dims,
175
- attrs=attr_mgr.get_variable_attributes(field),
176
- )
177
-
178
- logger.info("HIT L1B datasets created")
179
- return hk_dataset