imap-processing 0.7.0__py3-none-any.whl → 0.9.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of imap-processing might be problematic. Click here for more details.

Files changed (172) hide show
  1. imap_processing/__init__.py +1 -1
  2. imap_processing/_version.py +2 -2
  3. imap_processing/ccsds/excel_to_xtce.py +36 -2
  4. imap_processing/cdf/config/imap_codice_global_cdf_attrs.yaml +1 -1
  5. imap_processing/cdf/config/imap_codice_l1a_variable_attrs.yaml +145 -30
  6. imap_processing/cdf/config/imap_glows_l1b_variable_attrs.yaml +36 -36
  7. imap_processing/cdf/config/imap_hi_variable_attrs.yaml +136 -9
  8. imap_processing/cdf/config/imap_hit_global_cdf_attrs.yaml +14 -0
  9. imap_processing/cdf/config/imap_hit_l1a_variable_attrs.yaml +63 -1
  10. imap_processing/cdf/config/imap_hit_l1b_variable_attrs.yaml +9 -0
  11. imap_processing/cdf/config/imap_idex_global_cdf_attrs.yaml +14 -7
  12. imap_processing/cdf/config/imap_idex_l1a_variable_attrs.yaml +577 -235
  13. imap_processing/cdf/config/imap_idex_l1b_variable_attrs.yaml +326 -0
  14. imap_processing/cdf/config/imap_lo_l1a_variable_attrs.yaml +33 -23
  15. imap_processing/cdf/config/imap_mag_l1_variable_attrs.yaml +24 -28
  16. imap_processing/cdf/config/imap_ultra_l1a_variable_attrs.yaml +1 -0
  17. imap_processing/cdf/config/imap_ultra_l1b_variable_attrs.yaml +137 -79
  18. imap_processing/cdf/config/imap_variable_schema.yaml +13 -0
  19. imap_processing/cdf/imap_cdf_manager.py +31 -27
  20. imap_processing/cdf/utils.py +3 -5
  21. imap_processing/cli.py +25 -14
  22. imap_processing/codice/codice_l1a.py +153 -63
  23. imap_processing/codice/constants.py +10 -10
  24. imap_processing/codice/decompress.py +10 -11
  25. imap_processing/codice/utils.py +1 -0
  26. imap_processing/glows/l1a/glows_l1a.py +1 -2
  27. imap_processing/glows/l1b/glows_l1b.py +3 -3
  28. imap_processing/glows/l1b/glows_l1b_data.py +59 -37
  29. imap_processing/glows/l2/glows_l2_data.py +123 -0
  30. imap_processing/hi/l1a/hi_l1a.py +4 -4
  31. imap_processing/hi/l1a/histogram.py +107 -109
  32. imap_processing/hi/l1a/science_direct_event.py +92 -225
  33. imap_processing/hi/l1b/hi_l1b.py +85 -11
  34. imap_processing/hi/l1c/hi_l1c.py +23 -1
  35. imap_processing/hi/packet_definitions/TLM_HI_COMBINED_SCI.xml +3994 -0
  36. imap_processing/hi/utils.py +1 -1
  37. imap_processing/hit/hit_utils.py +221 -0
  38. imap_processing/hit/l0/constants.py +118 -0
  39. imap_processing/hit/l0/decom_hit.py +100 -156
  40. imap_processing/hit/l1a/hit_l1a.py +170 -184
  41. imap_processing/hit/l1b/hit_l1b.py +33 -153
  42. imap_processing/ialirt/l0/process_codicelo.py +153 -0
  43. imap_processing/ialirt/l0/process_hit.py +5 -5
  44. imap_processing/ialirt/packet_definitions/ialirt_codicelo.xml +281 -0
  45. imap_processing/ialirt/process_ephemeris.py +212 -0
  46. imap_processing/idex/idex_l1a.py +65 -84
  47. imap_processing/idex/idex_l1b.py +192 -0
  48. imap_processing/idex/idex_variable_unpacking_and_eu_conversion.csv +33 -0
  49. imap_processing/idex/packet_definitions/idex_packet_definition.xml +97 -595
  50. imap_processing/lo/l0/decompression_tables/decompression_tables.py +17 -1
  51. imap_processing/lo/l0/lo_science.py +45 -13
  52. imap_processing/lo/l1a/lo_l1a.py +76 -8
  53. imap_processing/lo/packet_definitions/lo_xtce.xml +8344 -1849
  54. imap_processing/mag/l0/decom_mag.py +4 -3
  55. imap_processing/mag/l1a/mag_l1a.py +12 -13
  56. imap_processing/mag/l1a/mag_l1a_data.py +1 -2
  57. imap_processing/mag/l1b/mag_l1b.py +90 -7
  58. imap_processing/spice/geometry.py +156 -16
  59. imap_processing/spice/time.py +144 -2
  60. imap_processing/swapi/l1/swapi_l1.py +4 -4
  61. imap_processing/swapi/l2/swapi_l2.py +1 -1
  62. imap_processing/swapi/packet_definitions/swapi_packet_definition.xml +1535 -446
  63. imap_processing/swe/l1b/swe_l1b_science.py +8 -8
  64. imap_processing/swe/l2/swe_l2.py +134 -17
  65. imap_processing/tests/ccsds/test_data/expected_output.xml +2 -1
  66. imap_processing/tests/ccsds/test_excel_to_xtce.py +4 -4
  67. imap_processing/tests/cdf/test_imap_cdf_manager.py +0 -10
  68. imap_processing/tests/codice/conftest.py +1 -17
  69. imap_processing/tests/codice/data/imap_codice_l0_raw_20241110_v001.pkts +0 -0
  70. imap_processing/tests/codice/test_codice_l0.py +8 -2
  71. imap_processing/tests/codice/test_codice_l1a.py +127 -107
  72. imap_processing/tests/codice/test_codice_l1b.py +1 -0
  73. imap_processing/tests/codice/test_decompress.py +7 -7
  74. imap_processing/tests/conftest.py +100 -58
  75. imap_processing/tests/glows/conftest.py +6 -0
  76. imap_processing/tests/glows/test_glows_l1b.py +9 -9
  77. imap_processing/tests/glows/test_glows_l1b_data.py +9 -9
  78. imap_processing/tests/hi/test_data/l0/H90_NHK_20241104.bin +0 -0
  79. imap_processing/tests/hi/test_data/l0/H90_sci_cnt_20241104.bin +0 -0
  80. imap_processing/tests/hi/test_data/l0/H90_sci_de_20241104.bin +0 -0
  81. imap_processing/tests/hi/test_data/l1a/imap_hi_l1a_45sensor-de_20250415_v000.cdf +0 -0
  82. imap_processing/tests/hi/test_hi_l1b.py +73 -3
  83. imap_processing/tests/hi/test_hi_l1c.py +10 -2
  84. imap_processing/tests/hi/test_l1a.py +31 -58
  85. imap_processing/tests/hi/test_science_direct_event.py +58 -0
  86. imap_processing/tests/hi/test_utils.py +4 -3
  87. imap_processing/tests/hit/test_data/sci_sample1.ccsds +0 -0
  88. imap_processing/tests/hit/{test_hit_decom.py → test_decom_hit.py} +95 -36
  89. imap_processing/tests/hit/test_hit_l1a.py +299 -179
  90. imap_processing/tests/hit/test_hit_l1b.py +231 -24
  91. imap_processing/tests/hit/test_hit_utils.py +218 -0
  92. imap_processing/tests/hit/validation_data/hskp_sample_eu.csv +89 -0
  93. imap_processing/tests/hit/validation_data/sci_sample_raw1.csv +29 -0
  94. imap_processing/tests/ialirt/test_data/l0/apid01152.tlm +0 -0
  95. imap_processing/tests/ialirt/test_data/l0/imap_codice_l1a_lo-ialirt_20241110193700_v0.0.0.cdf +0 -0
  96. imap_processing/tests/ialirt/unit/test_process_codicelo.py +106 -0
  97. imap_processing/tests/ialirt/unit/test_process_ephemeris.py +109 -0
  98. imap_processing/tests/ialirt/unit/test_process_hit.py +9 -6
  99. imap_processing/tests/idex/conftest.py +2 -2
  100. imap_processing/tests/idex/imap_idex_l0_raw_20231214_v001.pkts +0 -0
  101. imap_processing/tests/idex/impact_14_tof_high_data.txt +4444 -4444
  102. imap_processing/tests/idex/test_idex_l0.py +4 -4
  103. imap_processing/tests/idex/test_idex_l1a.py +8 -2
  104. imap_processing/tests/idex/test_idex_l1b.py +126 -0
  105. imap_processing/tests/lo/test_lo_l1a.py +7 -16
  106. imap_processing/tests/lo/test_lo_science.py +69 -5
  107. imap_processing/tests/lo/test_pkts/imap_lo_l0_raw_20240803_v002.pkts +0 -0
  108. imap_processing/tests/lo/validation_data/Instrument_FM1_T104_R129_20240803_ILO_SCI_DE_dec_DN_with_fills.csv +1999 -0
  109. imap_processing/tests/mag/imap_mag_l1a_norm-magi_20251017_v001.cdf +0 -0
  110. imap_processing/tests/mag/test_mag_l1b.py +97 -7
  111. imap_processing/tests/spice/test_data/imap_ena_sim_metakernel.template +3 -1
  112. imap_processing/tests/spice/test_geometry.py +115 -9
  113. imap_processing/tests/spice/test_time.py +135 -6
  114. imap_processing/tests/swapi/test_swapi_decom.py +75 -69
  115. imap_processing/tests/swapi/test_swapi_l1.py +4 -4
  116. imap_processing/tests/swe/conftest.py +33 -0
  117. imap_processing/tests/swe/l1_validation/swe_l0_unpacked-data_20240510_v001_VALIDATION_L1B_v3.dat +4332 -0
  118. imap_processing/tests/swe/test_swe_l1b.py +29 -8
  119. imap_processing/tests/swe/test_swe_l2.py +64 -8
  120. imap_processing/tests/test_utils.py +2 -2
  121. imap_processing/tests/ultra/test_data/l0/ultra45_raw_sc_ultrarawimg_withFSWcalcs_FM45_40P_Phi28p5_BeamCal_LinearScan_phi2850_theta-000_20240207T102740.csv +3314 -3314
  122. imap_processing/tests/ultra/test_data/l1/dps_exposure_helio_45_E12.cdf +0 -0
  123. imap_processing/tests/ultra/test_data/l1/dps_exposure_helio_45_E24.cdf +0 -0
  124. imap_processing/tests/ultra/unit/test_de.py +113 -0
  125. imap_processing/tests/ultra/unit/test_spatial_utils.py +125 -0
  126. imap_processing/tests/ultra/unit/test_ultra_l1b.py +27 -3
  127. imap_processing/tests/ultra/unit/test_ultra_l1b_annotated.py +31 -10
  128. imap_processing/tests/ultra/unit/test_ultra_l1b_extended.py +55 -35
  129. imap_processing/tests/ultra/unit/test_ultra_l1c_pset_bins.py +10 -68
  130. imap_processing/ultra/constants.py +12 -3
  131. imap_processing/ultra/l1b/de.py +168 -30
  132. imap_processing/ultra/l1b/ultra_l1b_annotated.py +24 -10
  133. imap_processing/ultra/l1b/ultra_l1b_extended.py +46 -80
  134. imap_processing/ultra/l1c/ultra_l1c_pset_bins.py +60 -144
  135. imap_processing/ultra/utils/spatial_utils.py +221 -0
  136. {imap_processing-0.7.0.dist-info → imap_processing-0.9.0.dist-info}/METADATA +15 -14
  137. {imap_processing-0.7.0.dist-info → imap_processing-0.9.0.dist-info}/RECORD +142 -139
  138. imap_processing/cdf/cdf_attribute_manager.py +0 -322
  139. imap_processing/cdf/config/shared/default_global_cdf_attrs_schema.yaml +0 -246
  140. imap_processing/cdf/config/shared/default_variable_cdf_attrs_schema.yaml +0 -466
  141. imap_processing/hi/l0/decom_hi.py +0 -24
  142. imap_processing/hi/packet_definitions/hi_packet_definition.xml +0 -482
  143. imap_processing/hit/l0/data_classes/housekeeping.py +0 -240
  144. imap_processing/hit/l0/data_classes/science_packet.py +0 -259
  145. imap_processing/hit/l0/utils/hit_base.py +0 -57
  146. imap_processing/tests/cdf/shared/default_global_cdf_attrs_schema.yaml +0 -246
  147. imap_processing/tests/cdf/shared/default_variable_cdf_attrs_schema.yaml +0 -466
  148. imap_processing/tests/cdf/test_cdf_attribute_manager.py +0 -353
  149. imap_processing/tests/codice/data/imap_codice_l0_hi-counters-aggregated_20240429_v001.pkts +0 -0
  150. imap_processing/tests/codice/data/imap_codice_l0_hi-counters-singles_20240429_v001.pkts +0 -0
  151. imap_processing/tests/codice/data/imap_codice_l0_hi-omni_20240429_v001.pkts +0 -0
  152. imap_processing/tests/codice/data/imap_codice_l0_hi-pha_20240429_v001.pkts +0 -0
  153. imap_processing/tests/codice/data/imap_codice_l0_hi-sectored_20240429_v001.pkts +0 -0
  154. imap_processing/tests/codice/data/imap_codice_l0_hskp_20100101_v001.pkts +0 -0
  155. imap_processing/tests/codice/data/imap_codice_l0_lo-counters-aggregated_20240429_v001.pkts +0 -0
  156. imap_processing/tests/codice/data/imap_codice_l0_lo-counters-singles_20240429_v001.pkts +0 -0
  157. imap_processing/tests/codice/data/imap_codice_l0_lo-nsw-angular_20240429_v001.pkts +0 -0
  158. imap_processing/tests/codice/data/imap_codice_l0_lo-nsw-priority_20240429_v001.pkts +0 -0
  159. imap_processing/tests/codice/data/imap_codice_l0_lo-nsw-species_20240429_v001.pkts +0 -0
  160. imap_processing/tests/codice/data/imap_codice_l0_lo-pha_20240429_v001.pkts +0 -0
  161. imap_processing/tests/codice/data/imap_codice_l0_lo-sw-angular_20240429_v001.pkts +0 -0
  162. imap_processing/tests/codice/data/imap_codice_l0_lo-sw-priority_20240429_v001.pkts +0 -0
  163. imap_processing/tests/codice/data/imap_codice_l0_lo-sw-species_20240429_v001.pkts +0 -0
  164. imap_processing/tests/hi/test_decom.py +0 -55
  165. imap_processing/tests/hi/test_l1a_sci_de.py +0 -72
  166. imap_processing/tests/idex/imap_idex_l0_raw_20230725_v001.pkts +0 -0
  167. imap_processing/tests/mag/imap_mag_l1a_burst-magi_20231025_v001.cdf +0 -0
  168. /imap_processing/{hi/l0/__init__.py → tests/glows/test_glows_l2_data.py} +0 -0
  169. /imap_processing/tests/hit/test_data/{imap_hit_l0_hk_20100105_v001.pkts → imap_hit_l0_raw_20100105_v001.pkts} +0 -0
  170. {imap_processing-0.7.0.dist-info → imap_processing-0.9.0.dist-info}/LICENSE +0 -0
  171. {imap_processing-0.7.0.dist-info → imap_processing-0.9.0.dist-info}/WHEEL +0 -0
  172. {imap_processing-0.7.0.dist-info → imap_processing-0.9.0.dist-info}/entry_points.txt +0 -0
@@ -1,112 +1,71 @@
1
1
  import numpy as np
2
+ import pandas as pd
2
3
  import pytest
3
4
  import xarray as xr
4
5
 
5
6
  from imap_processing import imap_module_directory
6
- from imap_processing.cdf.imap_cdf_manager import ImapCdfAttributes
7
- from imap_processing.hit.l1a.hit_l1a import (
7
+ from imap_processing.hit.hit_utils import (
8
8
  HitAPID,
9
- concatenate_leak_variables,
10
- hit_l1a,
11
- process_housekeeping,
9
+ get_datasets_by_apid,
12
10
  )
13
- from imap_processing.utils import packet_file_to_datasets
11
+ from imap_processing.hit.l1a.hit_l1a import decom_hit, hit_l1a, subcom_sectorates
12
+
13
+ # TODO: Packet files are per apid at the moment so the tests currently
14
+ # reflect this. Eventually, HIT will provide a packet file with all apids
15
+ # and the tests will need to be updated.
14
16
 
15
17
 
16
18
  @pytest.fixture(scope="module")
17
- def packet_filepath():
19
+ def hk_packet_filepath():
18
20
  """Set path to test data file"""
19
21
  return (
20
- imap_module_directory / "tests/hit/test_data/imap_hit_l0_hk_20100105_v001.pkts"
21
- )
22
-
23
-
24
- @pytest.fixture(scope="module")
25
- def datasets(packet_filepath):
26
- """Create datasets from packet file"""
27
- packet_definition = (
28
- imap_module_directory / "hit/packet_definitions/" "hit_packet_definitions.xml"
29
- )
30
- datasets_by_apid = packet_file_to_datasets(
31
- packet_file=packet_filepath,
32
- xtce_packet_definition=packet_definition,
22
+ imap_module_directory / "tests/hit/test_data/imap_hit_l0_raw_20100105_v001.pkts"
33
23
  )
34
- return datasets_by_apid
35
24
 
36
25
 
37
26
  @pytest.fixture(scope="module")
38
- def attribute_manager():
39
- """Create the attribute manager"""
40
- attr_mgr = ImapCdfAttributes()
41
- attr_mgr.add_instrument_global_attrs(instrument="hit")
42
- attr_mgr.add_instrument_variable_attrs(instrument="hit", level="l1a")
43
- attr_mgr.add_global_attribute("Data_version", "001")
44
- return attr_mgr
45
-
27
+ def sci_packet_filepath():
28
+ """Set path to test data file"""
29
+ return imap_module_directory / "tests/hit/test_data/sci_sample1.ccsds"
46
30
 
47
- @pytest.fixture(scope="module")
48
- def housekeeping_dataset(datasets):
49
- """Get the housekeeping dataset"""
50
- return datasets[HitAPID.HIT_HSKP]
51
31
 
32
+ def test_validate_l1a_housekeeping_data(hk_packet_filepath):
33
+ """Validate the housekeeping dataset created by the L1A processing.
52
34
 
53
- def test_hit_l1a(packet_filepath):
54
- """Create L1A datasets from a packet file.
35
+ Compares the processed housekeeping data with expected values from
36
+ a validation csv file.
55
37
 
56
38
  Parameters
57
39
  ----------
58
- packet_filepath : str
59
- Path to ccsds file
40
+ hk_packet_filepath : str
41
+ File path to housekeeping ccsds file
60
42
  """
61
- processed_datasets = hit_l1a(packet_filepath, "001")
62
- # TODO: update assertions after science data processing is completed
63
- assert isinstance(processed_datasets, list)
64
- assert len(processed_datasets) == 1
65
- assert isinstance(processed_datasets[0], xr.Dataset)
66
- assert processed_datasets[0].attrs["Logical_source"] == "imap_hit_l1a_hk"
67
-
68
-
69
- def test_concatenate_leak_variables(housekeeping_dataset):
70
- """Test concatenation of leak_i variables"""
71
-
72
- # Create data array for leak_i dependency
73
- adc_channels = xr.DataArray(
74
- np.arange(64, dtype=np.uint8),
75
- name="adc_channels",
76
- dims=["adc_channels"],
77
- )
78
-
79
- updated_dataset = concatenate_leak_variables(housekeeping_dataset, adc_channels)
80
-
81
- # Assertions
82
- # ----------------
83
- assert "leak_i" in updated_dataset
84
- assert updated_dataset["leak_i"].shape == (88, 64)
85
- for i in range(64):
86
- # Check if the values in the `leak_i` variable match the values in
87
- # the original `leak_i_XX` variable.
88
- # - First access the `leak_i` variable in the `updated_dataset`.
89
- # The [:, i] selects all rows (`:`) and the `i`-th column of the `leak_i`
90
- # variable.
91
- # - Then access the `leak_i_XX` variable in the `housekeeping_dataset`.
92
- # The `f"leak_i_{i:02d}"` selects the variable with the name `leak_i_XX`
93
- # where `XX` is the `i`-th value.
94
- # - Compare values
95
- np.testing.assert_array_equal(
96
- updated_dataset["leak_i"][:, i], housekeeping_dataset[f"leak_i_{i:02d}"]
97
- )
43
+ datasets = hit_l1a(hk_packet_filepath, "001")
44
+ hk_dataset = None
45
+ for dataset in datasets:
46
+ if dataset.attrs["Logical_source"] == "imap_hit_l1a_hk":
47
+ hk_dataset = dataset
98
48
 
49
+ # Load the validation data
50
+ validation_file = (
51
+ imap_module_directory / "tests/hit/validation_data/hskp_sample_raw.csv"
52
+ )
53
+ validation_data = pd.read_csv(validation_file)
54
+ validation_data.columns = validation_data.columns.str.lower()
99
55
 
100
- def test_process_housekeeping(housekeeping_dataset, attribute_manager):
101
- """Test processing of housekeeping dataset"""
102
-
103
- # Call the function
104
- processed_hskp_dataset = process_housekeeping(
105
- housekeeping_dataset, attribute_manager
56
+ # Get a list of leak columns in ascending order
57
+ # (LEAK_I_00, LEAK_I_01, ..., LEAK_I_63)
58
+ # and group values into a single column
59
+ leak_columns = [col for col in validation_data.columns if col.startswith("leak")][
60
+ ::-1
61
+ ]
62
+ validation_data["leak_i"] = validation_data[leak_columns].apply(
63
+ lambda row: row.values, axis=1
106
64
  )
65
+ validation_data.drop(columns=leak_columns, inplace=True)
107
66
 
108
- # Define the keys that should have dropped from the dataset
109
- dropped_keys = {
67
+ # Define the keys that should have dropped from the housekeeping dataset
68
+ dropped_fields = {
110
69
  "pkt_apid",
111
70
  "sc_tick",
112
71
  "version",
@@ -121,104 +80,265 @@ def test_process_housekeeping(housekeeping_dataset, attribute_manager):
121
80
  "hskp_spare4",
122
81
  "hskp_spare5",
123
82
  }
124
- # Define the keys that should be present
125
- valid_keys = {
126
- "heater_on",
127
- "fsw_version_b",
128
- "ebox_m12va",
129
- "phasic_stat",
130
- "ebox_3d4vd",
131
- "ebox_p2d0vd",
132
- "temp1",
133
- "last_bad_seq_num",
134
- "ebox_m5d7va",
135
- "ebox_p12va",
136
- "table_status",
137
- "enable_50khz",
138
- "mram_disabled",
139
- "temp3",
140
- "preamp_l1a",
141
- "l2ab_bias",
142
- "l34b_bias",
143
- "fsw_version_c",
144
- "num_evnt_last_hk",
145
- "dac1_enable",
146
- "preamp_l234b",
147
- "analog_temp",
148
- "fee_running",
149
- "fsw_version_a",
150
- "num_errors",
151
- "test_pulser_on",
152
- "dac0_enable",
153
- "preamp_l1b",
154
- "l1ab_bias",
155
- "l34a_bias",
156
- "leak_i",
157
- "last_good_cmd",
158
- "lvps_temp",
159
- "idpu_temp",
160
- "temp2",
161
- "preamp_l234a",
162
- "last_good_seq_num",
163
- "num_good_cmds",
164
- "heater_control",
165
- "hvps_temp",
166
- "ebox_p5d7va",
167
- "spin_period_long",
168
- "enable_hvps",
169
- "temp0",
170
- "spin_period_short",
171
- "dyn_thresh_lvl",
172
- "num_bad_cmds",
173
- "adc_mode",
174
- "ebox_5d1vd",
175
- "active_heater",
176
- "last_error_num",
177
- "last_bad_cmd",
178
- "ref_p5v",
179
- "code_checksum",
180
- "mode",
83
+
84
+ # Define the keys that should be ignored in the validation
85
+ # like ccsds headers
86
+ ignore_validation_fields = {
87
+ "ccsds_version",
88
+ "ccsds_type",
89
+ "ccsds_sec_hdr_flag",
90
+ "ccsds_appid",
91
+ "ccsds_grp_flag",
92
+ "ccsds_seq_cnt",
93
+ "ccsds_length",
94
+ "shcoarse",
181
95
  }
182
96
 
183
- # Define the dataset attributes
184
- dataset_attrs = {
185
- "Data_level": "1A",
186
- "Data_type": "L1A_HK>Level-1A Housekeeping",
187
- "Data_version": "001",
188
- "Descriptor": "HIT>IMAP High-energy Ion Telescope",
189
- "Discipline": "Solar Physics>Heliospheric Physics",
190
- "File_naming_convention": "source_descriptor_datatype_yyyyMMdd_vNNN",
191
- "HTTP_LINK": "https://imap.princeton.edu/",
192
- "Instrument_type": "Particles (space)",
193
- "LINK_TITLE": "IMAP The Interstellar Mapping and Acceleration Probe",
194
- "Logical_file_id": None,
195
- "Logical_source": "imap_hit_l1a_hk",
196
- "Logical_source_description": "IMAP Mission HIT Instrument Level-1A "
197
- "Housekeeping Data.",
198
- "Mission_group": "IMAP",
199
- "PI_affiliation": "Princeton University",
200
- "PI_name": "Prof. David J. McComas",
201
- "Project": "STP>Solar Terrestrial Probes",
202
- "Source_name": "IMAP>Interstellar Mapping and Acceleration Probe",
203
- "TEXT": "The High-energy Ion Telescope (HIT) measures the elemental "
204
- "composition, energy spectra, angle distributions, and arrival "
205
- "times of high-energy ions. HIT delivers full-sky coverage from "
206
- "a wide instrument field-of-view (FOV) to enable a high resolution "
207
- "of ion measurements, such as observing shock-accelerated ions, "
208
- "determining the origin of the solar energetic particles (SEPs) "
209
- "spectra, and resolving particle transport in the heliosphere. "
210
- "See https://imap.princeton.edu/instruments/hit for more details.\n",
97
+ # Check that dropped variables are not in the dataset
98
+ assert set(dropped_fields).isdisjoint(set(hk_dataset.data_vars.keys()))
99
+
100
+ # Compare the housekeeping dataset with the expected validation data
101
+ for field in validation_data.columns:
102
+ if field not in ignore_validation_fields:
103
+ assert field in hk_dataset.data_vars.keys()
104
+ for pkt in range(validation_data.shape[0]):
105
+ assert np.array_equal(
106
+ hk_dataset[field][pkt].data, validation_data[field][pkt]
107
+ )
108
+
109
+
110
+ def test_subcom_sectorates(sci_packet_filepath):
111
+ """Test the subcom_sectorates function.
112
+
113
+ This function organizes the sector rates data
114
+ by species and adds the data as new variables
115
+ to the dataset.
116
+ """
117
+
118
+ # Unpack and decompress ccsds file to xarray datasets
119
+ sci_dataset = get_datasets_by_apid(sci_packet_filepath)[HitAPID.HIT_SCIENCE]
120
+ sci_dataset = decom_hit(sci_dataset)
121
+
122
+ # Call the function to be tested
123
+ subcom_sectorates(sci_dataset)
124
+
125
+ # Number of science frames in the dataset
126
+ frames = sci_dataset["epoch"].shape[0]
127
+
128
+ # Check if the dataset has the expected new variables
129
+ for species in ["h", "he4", "cno", "nemgsi", "fe"]:
130
+ assert f"{species}_counts_sectored" in sci_dataset
131
+ assert f"{species}_energy_min" in sci_dataset
132
+ assert f"{species}_energy_max" in sci_dataset
133
+
134
+ # Check the shape of the new data variables
135
+ if species == "h":
136
+ assert sci_dataset[f"{species}_counts_sectored"].shape == (frames, 3, 8, 15)
137
+ assert sci_dataset[f"{species}_energy_min"].shape == (3,)
138
+ elif species in ("4he", "cno", "nemgsi"):
139
+ assert sci_dataset[f"{species}_counts_sectored"].shape == (frames, 2, 8, 15)
140
+ assert sci_dataset[f"{species}_energy_min"].shape == (2,)
141
+ elif species == "fe":
142
+ assert sci_dataset[f"{species}_counts_sectored"].shape == (frames, 1, 8, 15)
143
+ assert sci_dataset[f"{species}_energy_min"].shape == (1,)
144
+ assert (
145
+ sci_dataset[f"{species}_energy_max"].shape
146
+ == sci_dataset[f"{species}_energy_min"].shape
147
+ )
148
+
149
+
150
+ def test_validate_l1a_counts_data(sci_packet_filepath):
151
+ """Compare the output of the L1A processing to the validation data.
152
+
153
+ This test compares the counts data product with the validation data.
154
+ The PHA data product is not validated since it's not being decommutated.
155
+
156
+ Parameters
157
+ ----------
158
+ sci_packet_filepath : str
159
+ Path to ccsds file for science data
160
+ """
161
+ # Process the sample data
162
+ processed_datasets = hit_l1a(sci_packet_filepath, "001")
163
+ l1a_counts_data = processed_datasets[0]
164
+
165
+ # Read in the validation data
166
+ validation_data = pd.read_csv(
167
+ imap_module_directory / "tests/hit/validation_data/sci_sample_raw1.csv"
168
+ )
169
+
170
+ # Helper functions for this test
171
+ def consolidate_rate_columns(data, rate_columns):
172
+ # The validation data isn't organized by arrays.
173
+ # Each value is in a separate column.
174
+ # Aggregate related data into arrays.
175
+ for new_col, prefix in rate_columns.items():
176
+ columns = [col for col in data.columns if prefix in col]
177
+ data[new_col] = data[columns].apply(lambda row: row.values, axis=1)
178
+ if new_col == "sectorates":
179
+ # Differentiate between the sectorate columns with three and
180
+ # five digits in the name. Those with three digits contain the
181
+ # sectorate value for the science frame and those with five digits
182
+ # are the sectorate values with the mod value appended to the end.
183
+ # The mod value determines the species and energy range for that
184
+ # science frame
185
+ sectorates_three_digits = data.filter(
186
+ regex=r"^SECTORATES_\d{3}$"
187
+ ).columns
188
+ sectorates_five_digits = data.filter(
189
+ regex=r"^SECTORATES_\d{3}_\d{1}$"
190
+ ).columns
191
+ data["sectorates"] = data[sectorates_three_digits].apply(
192
+ lambda row: row.values.reshape(8, 15), axis=1
193
+ )
194
+ data["sectorates_by_mod_val"] = data[sectorates_five_digits].apply(
195
+ lambda row: row.values, axis=1
196
+ )
197
+ data.drop(columns=columns, inplace=True)
198
+ return data
199
+
200
+ def process_single_rates(data):
201
+ # Combine the single rates for high and low gain into a 2D array
202
+ data["sngrates"] = data.apply(
203
+ lambda row: np.array([row["sngrates_hg"], row["sngrates_lg"]]), axis=1
204
+ )
205
+ data.drop(columns=["sngrates_hg", "sngrates_lg"], inplace=True)
206
+ return data
207
+
208
+ def process_sectorates(data):
209
+ # Add species and energy index to the data frame for each science frame
210
+ # First find the mod value for each science frame which equals the first index
211
+ # in the sectorates_by_mod_val array that has a value instead of a blank space
212
+ data["mod_10"] = data["sectorates_by_mod_val"].apply(
213
+ lambda row: next((i for i, value in enumerate(row) if value != " "), None)
214
+ )
215
+ # Mapping of mod value to species and energy index
216
+ species_energy = {
217
+ 0: {"species": "H", "energy_idx": 0},
218
+ 1: {"species": "H", "energy_idx": 1},
219
+ 2: {"species": "H", "energy_idx": 2},
220
+ 3: {"species": "He4", "energy_idx": 0},
221
+ 4: {"species": "He4", "energy_idx": 1},
222
+ 5: {"species": "CNO", "energy_idx": 0},
223
+ 6: {"species": "CNO", "energy_idx": 1},
224
+ 7: {"species": "NeMgSi", "energy_idx": 0},
225
+ 8: {"species": "NeMgSi", "energy_idx": 1},
226
+ 9: {"species": "Fe", "energy_idx": 0},
227
+ }
228
+ # Use the mod 10 value to determine the species and energy index
229
+ # for each science frame and add this information to the data frame
230
+ data["species"] = data["mod_10"].apply(
231
+ lambda row: species_energy[row]["species"].lower()
232
+ if row is not None
233
+ else None
234
+ )
235
+ data["energy_idx"] = data["mod_10"].apply(
236
+ lambda row: species_energy[row]["energy_idx"] if row is not None else None
237
+ )
238
+ data.drop(columns=["sectorates_by_mod_val", "mod_10"], inplace=True)
239
+ return data
240
+
241
+ def compare_data(expected_data, actual_data, skip):
242
+ # Compare the processed data to the validation data
243
+ for field in expected_data.columns:
244
+ if field not in [
245
+ "sc_tick",
246
+ "hdr_status_bits",
247
+ "species",
248
+ "energy_idx",
249
+ ]:
250
+ assert field in l1a_counts_data.data_vars.keys()
251
+ if field not in ignore:
252
+ for frame in range(expected_data.shape[0]):
253
+ if field == "species":
254
+ species = expected_data[field][frame]
255
+ energy_idx = expected_data["energy_idx"][frame]
256
+ assert np.array_equal(
257
+ actual_data[f"{species}_counts_sectored"][frame][
258
+ energy_idx
259
+ ].data,
260
+ expected_data["sectorates"][frame],
261
+ )
262
+ else:
263
+ assert np.array_equal(
264
+ actual_data[field][frame].data, expected_data[field][frame]
265
+ )
266
+
267
+ rate_columns = {
268
+ "coinrates": "COINRATES_",
269
+ "bufrates": "BUFRATES_",
270
+ "l2fgrates": "L2FGRATES_",
271
+ "l2bgrates": "L2BGRATES_",
272
+ "l3fgrates": "L3FGRATES_",
273
+ "l3bgrates": "L3BGRATES_",
274
+ "penfgrates": "PENFGRATES_",
275
+ "penbgrates": "PENBGRATES_",
276
+ "sectorates": "SECTORATES_",
277
+ "l4fgrates": "L4FGRATES_",
278
+ "l4bgrates": "L4BGRATES_",
279
+ "ialirtrates": "IALIRTRATES_",
280
+ "sngrates_hg": "SNGRATES_HG_",
281
+ "sngrates_lg": "SNGRATES_LG_",
211
282
  }
212
283
 
213
- # Define the coordinates and dimensions. Both have equivalent values
214
- dataset_coords_dims = {"epoch", "adc_channels", "adc_channels_label"}
215
-
216
- # Assertions
217
- # ----------------
218
- # Check that the dataset has the correct variables
219
- assert valid_keys == set(processed_hskp_dataset.data_vars.keys())
220
- assert set(dropped_keys).isdisjoint(set(processed_hskp_dataset.data_vars.keys()))
221
- # Check that the dataset has the correct attributes, coordinates, and dimensions
222
- assert processed_hskp_dataset.attrs == dataset_attrs
223
- assert processed_hskp_dataset.coords.keys() == dataset_coords_dims
224
- assert processed_hskp_dataset.sizes.keys() == dataset_coords_dims
284
+ # Prepare validation data for comparison with processed data
285
+ validation_data.columns = validation_data.columns.str.strip()
286
+ validation_data = consolidate_rate_columns(validation_data, rate_columns)
287
+ validation_data = process_single_rates(validation_data)
288
+ validation_data = process_sectorates(validation_data)
289
+
290
+ # Fields to skip in comparison. CCSDS headers plus a few others that are not
291
+ # relevant to the comparison.
292
+ # The CCSDS header fields contain data per packet in the dataset, but the
293
+ # validation data has a value per science frame so skipping comparison for now
294
+ ignore = [
295
+ "version",
296
+ "type",
297
+ "sec_hdr_flg",
298
+ "pkt_apid",
299
+ "seq_flgs",
300
+ "src_seq_ctr",
301
+ "pkt_len",
302
+ "sc_tick",
303
+ "hdr_status_bits",
304
+ "energy_idx",
305
+ ]
306
+
307
+ # Compare processed data to validation data
308
+ validation_data.columns = validation_data.columns.str.lower()
309
+ compare_data(validation_data, l1a_counts_data, ignore)
310
+
311
+ # TODO: add validation for hdr_status_bits once validation data has been updated
312
+ # to include this field broken out into its subfields
313
+
314
+ # TODO: add validation for CCSDS fields? currently validation data only has
315
+ # one value per frame and the processed data has one value per packet.
316
+
317
+
318
+ def test_hit_l1a(hk_packet_filepath, sci_packet_filepath):
319
+ """Create L1A datasets from packet files.
320
+
321
+ Parameters
322
+ ----------
323
+ hk_packet_filepath : str
324
+ Path to ccsds file for housekeeping data
325
+ sci_packet_filepath : str
326
+ Path to ccsds file for science data
327
+ """
328
+ for packet_filepath in [hk_packet_filepath, sci_packet_filepath]:
329
+ processed_datasets = hit_l1a(packet_filepath, "001")
330
+ assert isinstance(processed_datasets, list)
331
+ assert all(isinstance(ds, xr.Dataset) for ds in processed_datasets)
332
+ if packet_filepath == hk_packet_filepath:
333
+ assert len(processed_datasets) == 1
334
+ assert processed_datasets[0].attrs["Logical_source"] == "imap_hit_l1a_hk"
335
+ else:
336
+ assert len(processed_datasets) == 2
337
+ assert (
338
+ processed_datasets[0].attrs["Logical_source"]
339
+ == "imap_hit_l1a_count-rates"
340
+ )
341
+ assert (
342
+ processed_datasets[1].attrs["Logical_source"]
343
+ == "imap_hit_l1a_pulse-height-events"
344
+ )