imap-processing 0.12.0__py3-none-any.whl → 0.13.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of imap-processing might be problematic. Click here for more details.

Files changed (272) hide show
  1. imap_processing/__init__.py +1 -0
  2. imap_processing/_version.py +2 -2
  3. imap_processing/ccsds/ccsds_data.py +1 -2
  4. imap_processing/ccsds/excel_to_xtce.py +1 -2
  5. imap_processing/cdf/config/imap_codice_global_cdf_attrs.yaml +18 -12
  6. imap_processing/cdf/config/imap_codice_l1a_variable_attrs.yaml +569 -0
  7. imap_processing/cdf/config/imap_codice_l1b_variable_attrs.yaml +1846 -128
  8. imap_processing/cdf/config/imap_hit_global_cdf_attrs.yaml +5 -5
  9. imap_processing/cdf/config/imap_idex_global_cdf_attrs.yaml +20 -1
  10. imap_processing/cdf/config/imap_idex_l1a_variable_attrs.yaml +6 -4
  11. imap_processing/cdf/config/imap_idex_l1b_variable_attrs.yaml +3 -3
  12. imap_processing/cdf/config/imap_mag_global_cdf_attrs.yaml +15 -0
  13. imap_processing/cdf/config/imap_swapi_variable_attrs.yaml +22 -0
  14. imap_processing/cdf/config/imap_swe_l1b_variable_attrs.yaml +16 -0
  15. imap_processing/cdf/config/imap_ultra_global_cdf_attrs.yaml +178 -5
  16. imap_processing/cdf/config/imap_ultra_l1a_variable_attrs.yaml +5045 -41
  17. imap_processing/cdf/config/imap_ultra_l1b_variable_attrs.yaml +33 -19
  18. imap_processing/cdf/config/imap_ultra_l1c_variable_attrs.yaml +8 -48
  19. imap_processing/cdf/utils.py +41 -33
  20. imap_processing/cli.py +463 -234
  21. imap_processing/codice/codice_l1a.py +260 -47
  22. imap_processing/codice/codice_l1b.py +51 -152
  23. imap_processing/codice/constants.py +38 -1
  24. imap_processing/ena_maps/ena_maps.py +658 -65
  25. imap_processing/ena_maps/utils/coordinates.py +1 -1
  26. imap_processing/ena_maps/utils/spatial_utils.py +10 -5
  27. imap_processing/glows/l1a/glows_l1a.py +28 -99
  28. imap_processing/glows/l1a/glows_l1a_data.py +2 -2
  29. imap_processing/glows/l1b/glows_l1b.py +1 -4
  30. imap_processing/glows/l1b/glows_l1b_data.py +1 -3
  31. imap_processing/glows/l2/glows_l2.py +2 -5
  32. imap_processing/hi/l1a/hi_l1a.py +31 -12
  33. imap_processing/hi/l1b/hi_l1b.py +80 -43
  34. imap_processing/hi/l1c/hi_l1c.py +12 -16
  35. imap_processing/hit/ancillary/imap_hit_l1b-to-l2-sector-dt0-factors_20250219_v002.csv +81 -0
  36. imap_processing/hit/hit_utils.py +93 -35
  37. imap_processing/hit/l0/decom_hit.py +3 -1
  38. imap_processing/hit/l1a/hit_l1a.py +30 -25
  39. imap_processing/hit/l1b/constants.py +6 -2
  40. imap_processing/hit/l1b/hit_l1b.py +279 -318
  41. imap_processing/hit/l2/constants.py +37 -0
  42. imap_processing/hit/l2/hit_l2.py +373 -264
  43. imap_processing/ialirt/l0/parse_mag.py +138 -10
  44. imap_processing/ialirt/l0/process_swapi.py +69 -0
  45. imap_processing/ialirt/l0/process_swe.py +318 -22
  46. imap_processing/ialirt/packet_definitions/ialirt.xml +216 -212
  47. imap_processing/ialirt/packet_definitions/ialirt_codicehi.xml +1 -1
  48. imap_processing/ialirt/packet_definitions/ialirt_codicelo.xml +1 -1
  49. imap_processing/ialirt/packet_definitions/ialirt_swapi.xml +14 -14
  50. imap_processing/ialirt/utils/grouping.py +1 -1
  51. imap_processing/idex/idex_constants.py +9 -1
  52. imap_processing/idex/idex_l0.py +22 -8
  53. imap_processing/idex/idex_l1a.py +75 -44
  54. imap_processing/idex/idex_l1b.py +9 -8
  55. imap_processing/idex/idex_l2a.py +79 -45
  56. imap_processing/idex/idex_l2b.py +120 -0
  57. imap_processing/idex/idex_variable_unpacking_and_eu_conversion.csv +33 -39
  58. imap_processing/idex/packet_definitions/idex_housekeeping_packet_definition.xml +9130 -0
  59. imap_processing/lo/l0/lo_science.py +1 -2
  60. imap_processing/lo/l1a/lo_l1a.py +1 -4
  61. imap_processing/lo/l1b/lo_l1b.py +527 -6
  62. imap_processing/lo/l1b/tof_conversions.py +11 -0
  63. imap_processing/lo/l1c/lo_l1c.py +1 -4
  64. imap_processing/mag/constants.py +43 -0
  65. imap_processing/mag/imap_mag_sdc_configuration_v001.py +8 -0
  66. imap_processing/mag/l1a/mag_l1a.py +2 -9
  67. imap_processing/mag/l1a/mag_l1a_data.py +10 -10
  68. imap_processing/mag/l1b/mag_l1b.py +84 -17
  69. imap_processing/mag/l1c/interpolation_methods.py +180 -3
  70. imap_processing/mag/l1c/mag_l1c.py +236 -70
  71. imap_processing/mag/l2/mag_l2.py +140 -0
  72. imap_processing/mag/l2/mag_l2_data.py +288 -0
  73. imap_processing/spacecraft/quaternions.py +1 -3
  74. imap_processing/spice/geometry.py +3 -3
  75. imap_processing/spice/kernels.py +0 -276
  76. imap_processing/spice/pointing_frame.py +257 -0
  77. imap_processing/spice/repoint.py +48 -19
  78. imap_processing/spice/spin.py +38 -33
  79. imap_processing/spice/time.py +24 -0
  80. imap_processing/swapi/l1/swapi_l1.py +16 -12
  81. imap_processing/swapi/l2/swapi_l2.py +116 -4
  82. imap_processing/swapi/swapi_utils.py +32 -0
  83. imap_processing/swe/l1a/swe_l1a.py +2 -9
  84. imap_processing/swe/l1a/swe_science.py +8 -11
  85. imap_processing/swe/l1b/swe_l1b.py +898 -23
  86. imap_processing/swe/l2/swe_l2.py +21 -77
  87. imap_processing/swe/utils/swe_constants.py +1 -0
  88. imap_processing/tests/ccsds/test_excel_to_xtce.py +1 -1
  89. imap_processing/tests/cdf/test_utils.py +14 -16
  90. imap_processing/tests/codice/conftest.py +44 -33
  91. imap_processing/tests/codice/data/validation/imap_codice_l1a_hi-pha_20241110193700_v0.0.0.cdf +0 -0
  92. imap_processing/tests/codice/data/validation/imap_codice_l1a_lo-pha_20241110193700_v0.0.0.cdf +0 -0
  93. imap_processing/tests/codice/test_codice_l1a.py +20 -11
  94. imap_processing/tests/codice/test_codice_l1b.py +6 -7
  95. imap_processing/tests/conftest.py +78 -22
  96. imap_processing/tests/ena_maps/test_ena_maps.py +462 -33
  97. imap_processing/tests/ena_maps/test_spatial_utils.py +1 -1
  98. imap_processing/tests/glows/conftest.py +10 -14
  99. imap_processing/tests/glows/test_glows_decom.py +4 -4
  100. imap_processing/tests/glows/test_glows_l1a_cdf.py +6 -27
  101. imap_processing/tests/glows/test_glows_l1a_data.py +6 -8
  102. imap_processing/tests/glows/test_glows_l1b.py +11 -11
  103. imap_processing/tests/glows/test_glows_l1b_data.py +5 -5
  104. imap_processing/tests/glows/test_glows_l2.py +2 -8
  105. imap_processing/tests/hi/conftest.py +1 -1
  106. imap_processing/tests/hi/test_hi_l1b.py +10 -12
  107. imap_processing/tests/hi/test_hi_l1c.py +27 -24
  108. imap_processing/tests/hi/test_l1a.py +7 -9
  109. imap_processing/tests/hi/test_science_direct_event.py +2 -2
  110. imap_processing/tests/hit/helpers/l1_validation.py +44 -43
  111. imap_processing/tests/hit/test_decom_hit.py +1 -1
  112. imap_processing/tests/hit/test_hit_l1a.py +9 -9
  113. imap_processing/tests/hit/test_hit_l1b.py +172 -217
  114. imap_processing/tests/hit/test_hit_l2.py +380 -118
  115. imap_processing/tests/hit/test_hit_utils.py +122 -55
  116. imap_processing/tests/hit/validation_data/hit_l1b_standard_sample2_nsrl_v4_3decimals.csv +62 -62
  117. imap_processing/tests/hit/validation_data/sci_sample_raw.csv +1 -1
  118. imap_processing/tests/ialirt/unit/test_decom_ialirt.py +16 -81
  119. imap_processing/tests/ialirt/unit/test_grouping.py +2 -2
  120. imap_processing/tests/ialirt/unit/test_parse_mag.py +71 -16
  121. imap_processing/tests/ialirt/unit/test_process_codicehi.py +3 -3
  122. imap_processing/tests/ialirt/unit/test_process_codicelo.py +3 -10
  123. imap_processing/tests/ialirt/unit/test_process_ephemeris.py +4 -4
  124. imap_processing/tests/ialirt/unit/test_process_hit.py +3 -3
  125. imap_processing/tests/ialirt/unit/test_process_swapi.py +24 -16
  126. imap_processing/tests/ialirt/unit/test_process_swe.py +115 -7
  127. imap_processing/tests/idex/conftest.py +72 -7
  128. imap_processing/tests/idex/test_data/imap_idex_l0_raw_20241206_v001.pkts +0 -0
  129. imap_processing/tests/idex/test_data/imap_idex_l0_raw_20250108_v001.pkts +0 -0
  130. imap_processing/tests/idex/test_idex_l0.py +33 -11
  131. imap_processing/tests/idex/test_idex_l1a.py +50 -23
  132. imap_processing/tests/idex/test_idex_l1b.py +104 -25
  133. imap_processing/tests/idex/test_idex_l2a.py +48 -32
  134. imap_processing/tests/idex/test_idex_l2b.py +93 -0
  135. imap_processing/tests/lo/test_lo_l1a.py +3 -3
  136. imap_processing/tests/lo/test_lo_l1b.py +371 -6
  137. imap_processing/tests/lo/test_lo_l1c.py +1 -1
  138. imap_processing/tests/lo/test_lo_science.py +6 -7
  139. imap_processing/tests/lo/test_star_sensor.py +1 -1
  140. imap_processing/tests/mag/conftest.py +58 -9
  141. imap_processing/tests/mag/test_mag_decom.py +4 -3
  142. imap_processing/tests/mag/test_mag_l1a.py +13 -7
  143. imap_processing/tests/mag/test_mag_l1b.py +9 -9
  144. imap_processing/tests/mag/test_mag_l1c.py +151 -47
  145. imap_processing/tests/mag/test_mag_l2.py +130 -0
  146. imap_processing/tests/mag/test_mag_validation.py +144 -7
  147. imap_processing/tests/mag/validation/L1c/T013/mag-l1b-l1c-t013-magi-normal-in.csv +1217 -0
  148. imap_processing/tests/mag/validation/L1c/T013/mag-l1b-l1c-t013-magi-normal-out.csv +1857 -0
  149. imap_processing/tests/mag/validation/L1c/T013/mag-l1b-l1c-t013-mago-normal-in.csv +1217 -0
  150. imap_processing/tests/mag/validation/L1c/T013/mag-l1b-l1c-t013-mago-normal-out.csv +1857 -0
  151. imap_processing/tests/mag/validation/L1c/T014/mag-l1b-l1c-t014-magi-normal-in.csv +1217 -0
  152. imap_processing/tests/mag/validation/L1c/T014/mag-l1b-l1c-t014-magi-normal-out.csv +1793 -0
  153. imap_processing/tests/mag/validation/L1c/T014/mag-l1b-l1c-t014-mago-normal-in.csv +1217 -0
  154. imap_processing/tests/mag/validation/L1c/T014/mag-l1b-l1c-t014-mago-normal-out.csv +1793 -0
  155. imap_processing/tests/mag/validation/L1c/T015/mag-l1b-l1c-t015-magi-burst-in.csv +2561 -0
  156. imap_processing/tests/mag/validation/L1c/T015/mag-l1b-l1c-t015-magi-normal-in.csv +961 -0
  157. imap_processing/tests/mag/validation/L1c/T015/mag-l1b-l1c-t015-magi-normal-out.csv +1539 -0
  158. imap_processing/tests/mag/validation/L1c/T015/mag-l1b-l1c-t015-mago-normal-in.csv +1921 -0
  159. imap_processing/tests/mag/validation/L1c/T015/mag-l1b-l1c-t015-mago-normal-out.csv +2499 -0
  160. imap_processing/tests/mag/validation/L1c/T016/mag-l1b-l1c-t016-magi-normal-in.csv +865 -0
  161. imap_processing/tests/mag/validation/L1c/T016/mag-l1b-l1c-t016-magi-normal-out.csv +1196 -0
  162. imap_processing/tests/mag/validation/L1c/T016/mag-l1b-l1c-t016-mago-normal-in.csv +1729 -0
  163. imap_processing/tests/mag/validation/L1c/T016/mag-l1b-l1c-t016-mago-normal-out.csv +3053 -0
  164. imap_processing/tests/mag/validation/L2/imap_mag_l1b_norm-mago_20251017_v002.cdf +0 -0
  165. imap_processing/tests/mag/validation/calibration/imap_mag_l2-calibration-matrices_20251017_v004.cdf +0 -0
  166. imap_processing/tests/mag/validation/calibration/imap_mag_l2-offsets-norm_20251017_20251017_v001.cdf +0 -0
  167. imap_processing/tests/spacecraft/test_quaternions.py +1 -1
  168. imap_processing/tests/spice/test_data/fake_repoint_data.csv +4 -4
  169. imap_processing/tests/spice/test_data/fake_spin_data.csv +11 -11
  170. imap_processing/tests/spice/test_geometry.py +3 -3
  171. imap_processing/tests/spice/test_kernels.py +1 -200
  172. imap_processing/tests/spice/test_pointing_frame.py +185 -0
  173. imap_processing/tests/spice/test_repoint.py +20 -10
  174. imap_processing/tests/spice/test_spin.py +50 -9
  175. imap_processing/tests/spice/test_time.py +14 -0
  176. imap_processing/tests/swapi/lut/imap_swapi_esa-unit-conversion_20250211_v000.csv +73 -0
  177. imap_processing/tests/swapi/lut/imap_swapi_lut-notes_20250211_v000.csv +1025 -0
  178. imap_processing/tests/swapi/test_swapi_l1.py +7 -9
  179. imap_processing/tests/swapi/test_swapi_l2.py +180 -8
  180. imap_processing/tests/swe/lut/checker-board-indices.csv +24 -0
  181. imap_processing/tests/swe/lut/imap_swe_esa-lut_20250301_v000.csv +385 -0
  182. imap_processing/tests/swe/lut/imap_swe_l1b-in-flight-cal_20240510_20260716_v000.csv +3 -0
  183. imap_processing/tests/swe/test_swe_l1a.py +6 -6
  184. imap_processing/tests/swe/test_swe_l1a_science.py +3 -3
  185. imap_processing/tests/swe/test_swe_l1b.py +162 -24
  186. imap_processing/tests/swe/test_swe_l2.py +82 -102
  187. imap_processing/tests/test_cli.py +171 -88
  188. imap_processing/tests/test_utils.py +2 -1
  189. imap_processing/tests/ultra/data/mock_data.py +49 -21
  190. imap_processing/tests/ultra/unit/conftest.py +53 -70
  191. imap_processing/tests/ultra/unit/test_badtimes.py +2 -4
  192. imap_processing/tests/ultra/unit/test_cullingmask.py +4 -6
  193. imap_processing/tests/ultra/unit/test_de.py +3 -10
  194. imap_processing/tests/ultra/unit/test_decom_apid_880.py +27 -76
  195. imap_processing/tests/ultra/unit/test_decom_apid_881.py +15 -16
  196. imap_processing/tests/ultra/unit/test_decom_apid_883.py +12 -10
  197. imap_processing/tests/ultra/unit/test_decom_apid_896.py +202 -55
  198. imap_processing/tests/ultra/unit/test_lookup_utils.py +23 -1
  199. imap_processing/tests/ultra/unit/test_spacecraft_pset.py +3 -4
  200. imap_processing/tests/ultra/unit/test_ultra_l1a.py +84 -307
  201. imap_processing/tests/ultra/unit/test_ultra_l1b.py +30 -12
  202. imap_processing/tests/ultra/unit/test_ultra_l1b_annotated.py +2 -2
  203. imap_processing/tests/ultra/unit/test_ultra_l1b_culling.py +4 -1
  204. imap_processing/tests/ultra/unit/test_ultra_l1b_extended.py +163 -29
  205. imap_processing/tests/ultra/unit/test_ultra_l1c.py +5 -5
  206. imap_processing/tests/ultra/unit/test_ultra_l1c_pset_bins.py +32 -43
  207. imap_processing/tests/ultra/unit/test_ultra_l2.py +230 -0
  208. imap_processing/ultra/constants.py +1 -1
  209. imap_processing/ultra/l0/decom_tools.py +21 -34
  210. imap_processing/ultra/l0/decom_ultra.py +168 -204
  211. imap_processing/ultra/l0/ultra_utils.py +152 -136
  212. imap_processing/ultra/l1a/ultra_l1a.py +55 -243
  213. imap_processing/ultra/l1b/badtimes.py +1 -4
  214. imap_processing/ultra/l1b/cullingmask.py +2 -6
  215. imap_processing/ultra/l1b/de.py +62 -47
  216. imap_processing/ultra/l1b/extendedspin.py +8 -4
  217. imap_processing/ultra/l1b/lookup_utils.py +72 -9
  218. imap_processing/ultra/l1b/ultra_l1b.py +3 -8
  219. imap_processing/ultra/l1b/ultra_l1b_culling.py +4 -4
  220. imap_processing/ultra/l1b/ultra_l1b_extended.py +236 -78
  221. imap_processing/ultra/l1c/histogram.py +2 -6
  222. imap_processing/ultra/l1c/spacecraft_pset.py +2 -4
  223. imap_processing/ultra/l1c/ultra_l1c.py +1 -5
  224. imap_processing/ultra/l1c/ultra_l1c_pset_bins.py +107 -60
  225. imap_processing/ultra/l2/ultra_l2.py +299 -0
  226. imap_processing/ultra/lookup_tables/Angular_Profiles_FM45_LeftSlit.csv +526 -0
  227. imap_processing/ultra/lookup_tables/Angular_Profiles_FM45_RightSlit.csv +526 -0
  228. imap_processing/ultra/lookup_tables/Angular_Profiles_FM90_LeftSlit.csv +526 -0
  229. imap_processing/ultra/lookup_tables/Angular_Profiles_FM90_RightSlit.csv +526 -0
  230. imap_processing/ultra/lookup_tables/FM45_Startup1_ULTRA_IMGPARAMS_20240719.csv +2 -2
  231. imap_processing/ultra/lookup_tables/FM90_Startup1_ULTRA_IMGPARAMS_20240719.csv +2 -0
  232. imap_processing/ultra/packet_definitions/README.md +38 -0
  233. imap_processing/ultra/packet_definitions/ULTRA_SCI_COMBINED.xml +15302 -482
  234. imap_processing/ultra/utils/ultra_l1_utils.py +13 -12
  235. imap_processing/utils.py +1 -1
  236. {imap_processing-0.12.0.dist-info → imap_processing-0.13.0.dist-info}/METADATA +3 -2
  237. {imap_processing-0.12.0.dist-info → imap_processing-0.13.0.dist-info}/RECORD +264 -225
  238. imap_processing/hi/l1b/hi_eng_unit_convert_table.csv +0 -154
  239. imap_processing/mag/imap_mag_sdc-configuration_v001.yaml +0 -6
  240. imap_processing/mag/l1b/__init__.py +0 -0
  241. imap_processing/swe/l1b/swe_esa_lookup_table.csv +0 -1441
  242. imap_processing/swe/l1b/swe_l1b_science.py +0 -699
  243. imap_processing/tests/swe/test_swe_l1b_science.py +0 -103
  244. imap_processing/ultra/lookup_tables/dps_sensitivity45.cdf +0 -0
  245. imap_processing/ultra/lookup_tables/ultra_90_dps_exposure_compressed.cdf +0 -0
  246. /imap_processing/idex/packet_definitions/{idex_packet_definition.xml → idex_science_packet_definition.xml} +0 -0
  247. /imap_processing/tests/ialirt/{test_data → data}/l0/20240827095047_SWE_IALIRT_packet.bin +0 -0
  248. /imap_processing/tests/ialirt/{test_data → data}/l0/461971383-404.bin +0 -0
  249. /imap_processing/tests/ialirt/{test_data → data}/l0/461971384-405.bin +0 -0
  250. /imap_processing/tests/ialirt/{test_data → data}/l0/461971385-406.bin +0 -0
  251. /imap_processing/tests/ialirt/{test_data → data}/l0/461971386-407.bin +0 -0
  252. /imap_processing/tests/ialirt/{test_data → data}/l0/461971387-408.bin +0 -0
  253. /imap_processing/tests/ialirt/{test_data → data}/l0/461971388-409.bin +0 -0
  254. /imap_processing/tests/ialirt/{test_data → data}/l0/461971389-410.bin +0 -0
  255. /imap_processing/tests/ialirt/{test_data → data}/l0/461971390-411.bin +0 -0
  256. /imap_processing/tests/ialirt/{test_data → data}/l0/461971391-412.bin +0 -0
  257. /imap_processing/tests/ialirt/{test_data → data}/l0/BinLog CCSDS_FRAG_TLM_20240826_152323Z_IALIRT_data_for_SDC.bin +0 -0
  258. /imap_processing/tests/ialirt/{test_data → data}/l0/IALiRT Raw Packet Telemetry.txt +0 -0
  259. /imap_processing/tests/ialirt/{test_data → data}/l0/apid01152.tlm +0 -0
  260. /imap_processing/tests/ialirt/{test_data → data}/l0/eu_SWP_IAL_20240826_152033.csv +0 -0
  261. /imap_processing/tests/ialirt/{test_data → data}/l0/hi_fsw_view_1_ccsds.bin +0 -0
  262. /imap_processing/tests/ialirt/{test_data → data}/l0/hit_ialirt_sample.ccsds +0 -0
  263. /imap_processing/tests/ialirt/{test_data → data}/l0/hit_ialirt_sample.csv +0 -0
  264. /imap_processing/tests/ialirt/{test_data → data}/l0/idle_export_eu.SWE_IALIRT_20240827_093852.csv +0 -0
  265. /imap_processing/tests/ialirt/{test_data → data}/l0/imap_codice_l1a_hi-ialirt_20240523200000_v0.0.0.cdf +0 -0
  266. /imap_processing/tests/ialirt/{test_data → data}/l0/imap_codice_l1a_lo-ialirt_20241110193700_v0.0.0.cdf +0 -0
  267. /imap_processing/tests/ialirt/{test_data → data}/l0/sample_decoded_i-alirt_data.csv +0 -0
  268. /imap_processing/tests/mag/validation/{imap_calibration_mag_20240229_v01.cdf → calibration/imap_mag_l1b-calibration_20240229_v001.cdf} +0 -0
  269. /imap_processing/{swe/l1b/engineering_unit_convert_table.csv → tests/swe/lut/imap_swe_eu-conversion_20240510_v000.csv} +0 -0
  270. {imap_processing-0.12.0.dist-info → imap_processing-0.13.0.dist-info}/LICENSE +0 -0
  271. {imap_processing-0.12.0.dist-info → imap_processing-0.13.0.dist-info}/WHEEL +0 -0
  272. {imap_processing-0.12.0.dist-info → imap_processing-0.13.0.dist-info}/entry_points.txt +0 -0
imap_processing/cli.py CHANGED
@@ -1,5 +1,4 @@
1
1
  #!/usr/bin/env python3
2
- # ruff: noqa: PLR0913
3
2
  """
4
3
  Run the processing for a specific instrument & data level.
5
4
 
@@ -11,17 +10,21 @@ Examples
11
10
  imap_cli --instrument <instrument> --level <data_level>
12
11
  """
13
12
 
13
+ from __future__ import annotations
14
+
14
15
  import argparse
15
16
  import logging
17
+ import re
16
18
  import sys
17
19
  from abc import ABC, abstractmethod
18
- from json import loads
19
20
  from pathlib import Path
20
21
  from typing import final
21
- from urllib.error import HTTPError
22
22
 
23
23
  import imap_data_access
24
24
  import xarray as xr
25
+ from imap_data_access.processing_input import (
26
+ ProcessingInputCollection,
27
+ )
25
28
 
26
29
  import imap_processing
27
30
  from imap_processing._version import __version__, __version_tuple__ # noqa: F401
@@ -53,8 +56,11 @@ from imap_processing.lo.l1c import lo_l1c
53
56
  from imap_processing.mag.l1a.mag_l1a import mag_l1a
54
57
  from imap_processing.mag.l1b.mag_l1b import mag_l1b
55
58
  from imap_processing.mag.l1c.mag_l1c import mag_l1c
59
+ from imap_processing.mag.l2.mag_l2 import mag_l2
60
+ from imap_processing.spacecraft import quaternions
56
61
  from imap_processing.swapi.l1.swapi_l1 import swapi_l1
57
62
  from imap_processing.swapi.l2.swapi_l2 import swapi_l2
63
+ from imap_processing.swapi.swapi_utils import read_swapi_lut_table
58
64
  from imap_processing.swe.l1a.swe_l1a import swe_l1a
59
65
  from imap_processing.swe.l1b.swe_l1b import swe_l1b
60
66
  from imap_processing.ultra.l1a import ultra_l1a
@@ -74,14 +80,22 @@ def _parse_args() -> argparse.Namespace:
74
80
  --descriptor "all"
75
81
  --start-date "20231212"
76
82
  --version "v001"
77
- --dependency "[
78
- {
79
- 'instrument': 'mag',
80
- 'data_level': 'l0',
81
- 'descriptor': 'sci',
82
- 'version': 'v001',
83
- 'start_date': '20231212'
84
- }]"
83
+ --dependency '[
84
+ {
85
+ "type": "ancillary",
86
+ "files": [
87
+ "imap_mag_l1b-cal_20250101_v001.cdf",
88
+ "imap_mag_l1b-cal_20250103_20250104_v002.cdf"
89
+ ]
90
+ },
91
+ {
92
+ "type": "science",
93
+ "files": [
94
+ "imap_idex_l2_sci_20240312_v000.cdf",
95
+ "imap_idex_l2_sci_20240312_v001.cdf"
96
+ ]
97
+ }
98
+ ]'
85
99
  --upload-to-sdc
86
100
 
87
101
  Returns
@@ -93,17 +107,28 @@ def _parse_args() -> argparse.Namespace:
93
107
  "This command line program invokes the processing pipeline "
94
108
  "for a specific instrument and data level. Example usage: "
95
109
  '"imap_cli --instrument "mag" '
96
- '--data-level "l1a"'
97
- '--descriptor "all"'
98
- ' --start-date "20231212"'
99
- '--version "v001"'
110
+ '--data-level "l1a" '
111
+ '--descriptor "all" '
112
+ ' --start-date "20231212" '
113
+ '--repointing "repoint12345" '
114
+ '--version "v001" '
100
115
  '--dependency "['
101
- ' {"instrument": "mag",'
102
- ' "data_level": "l0",'
103
- ' "descriptor": "sci",'
104
- ' "version": "v001",'
105
- ' "start_date": "20231212"'
106
- '}]" --upload-to-sdc"'
116
+ " {"
117
+ ' "type": "ancillary",'
118
+ ' "files": ['
119
+ ' "imap_mag_l1b-cal_20250101_v001.cdf",'
120
+ ' "imap_mag_l1b-cal_20250103_20250104_v002.cdf"'
121
+ " ]"
122
+ " },"
123
+ " {"
124
+ ' "type": "science",'
125
+ ' "files": ['
126
+ ' "imap_idex_l2_sci_20240312_v000.cdf",'
127
+ ' "imap_idex_l2_sci_20240312_v001.cdf"'
128
+ " ]"
129
+ " }"
130
+ "]"
131
+ ' --upload-to-sdc"'
107
132
  )
108
133
  instrument_help = (
109
134
  "The instrument to process. Acceptable values are: "
@@ -119,11 +144,23 @@ def _parse_args() -> argparse.Namespace:
119
144
  )
120
145
  dependency_help = (
121
146
  "Dependency information in str format."
122
- "Example: '[{'instrument': 'mag',"
123
- "'data_level': 'l0',"
124
- "'descriptor': 'sci',"
125
- "'version': 'v001',"
126
- "'start_date': '20231212'}]"
147
+ "Example:"
148
+ "'["
149
+ " {"
150
+ ' "type": "ancillary",'
151
+ ' "files": ['
152
+ ' "imap_mag_l1b-cal_20250101_v001.cdf",'
153
+ ' "imap_mag_l1b-cal_20250103_20250104_v002.cdf"'
154
+ " ]"
155
+ " },"
156
+ " {"
157
+ ' "type": "science",'
158
+ ' "files": ['
159
+ ' "imap_idex_l2_sci_20240312_v000.cdf",'
160
+ ' "imap_idex_l2_sci_20240312_v001.cdf"'
161
+ " ]"
162
+ " }"
163
+ "]'"
127
164
  )
128
165
 
129
166
  parser = argparse.ArgumentParser(prog="imap_cli", description=description)
@@ -161,7 +198,7 @@ def _parse_args() -> argparse.Namespace:
161
198
  parser.add_argument(
162
199
  "--start-date",
163
200
  type=str,
164
- required=True,
201
+ required=False,
165
202
  help="Start time for the output data. Format: YYYYMMDD",
166
203
  )
167
204
 
@@ -169,10 +206,17 @@ def _parse_args() -> argparse.Namespace:
169
206
  "--end-date",
170
207
  type=str,
171
208
  required=False,
172
- help="End time for the output data. If not provided, start_time will be used "
209
+ help="DEPRECATED: Do not use this."
210
+ "End time for the output data. If not provided, start_time will be used "
173
211
  "for end_time. Format: YYYYMMDD",
174
212
  )
175
- # TODO: Will need to add some way of including pointing numbers
213
+ parser.add_argument(
214
+ "--repointing",
215
+ type=str,
216
+ required=False,
217
+ help="Repointing time for output data. Replaces start_time if both are "
218
+ "provided. Format: repoint#####",
219
+ )
176
220
 
177
221
  parser.add_argument(
178
222
  "--version",
@@ -218,6 +262,30 @@ def _validate_args(args: argparse.Namespace) -> None:
218
262
  " instrument, valid levels are: "
219
263
  f"{imap_processing.PROCESSING_LEVELS[args.instrument]}"
220
264
  )
265
+ if args.start_date is None and args.repointing is None:
266
+ raise ValueError(
267
+ "Either start_date or repointing must be provided. "
268
+ "Run 'imap_cli -h' for more information."
269
+ )
270
+
271
+ if (
272
+ args.start_date is not None
273
+ and not imap_data_access.ScienceFilePath.is_valid_date(args.start_date)
274
+ ):
275
+ raise ValueError(f"{args.start_date} is not a valid date, use format YYYYMMDD.")
276
+
277
+ if (
278
+ args.repointing is not None
279
+ and not imap_data_access.ScienceFilePath.is_valid_repointing(args.repointing)
280
+ ):
281
+ raise ValueError(
282
+ f"{args.repointing} is not a valid repointing, use format repoint#####."
283
+ )
284
+
285
+ if getattr(args, "end_date", None) is not None:
286
+ logger.warning(
287
+ "The end_date argument is deprecated and will be ignored. Do not use."
288
+ )
221
289
 
222
290
 
223
291
  class ProcessInstrument(ABC):
@@ -232,17 +300,41 @@ class ProcessInstrument(ABC):
232
300
  The descriptor of the data to process (e.g. ``sci``).
233
301
  dependency_str : str
234
302
  A string representation of the dependencies for the instrument in the
235
- format: "[{
236
- 'instrument': 'mag',
237
- 'data_level': 'l0',
238
- 'descriptor': 'sci',
239
- 'version': 'v00-01',
240
- 'start_date': '20231212'
241
- }]".
303
+ format:
304
+ '[
305
+ {
306
+ "type": "ancillary",
307
+ "files": [
308
+ "imap_mag_l1b-cal_20250101_v001.cdf",
309
+ "imap_mag_l1b-cal_20250103_20250104_v002.cdf"
310
+ ]
311
+ },
312
+ {
313
+ "type": "ancillary",
314
+ "files": [
315
+ "imap_mag_l1b-lut_20250101_v001.cdf",
316
+ ]
317
+ },
318
+ {
319
+ "type": "science",
320
+ "files": [
321
+ "imap_mag_l1a_norm-magi_20240312_v000.cdf",
322
+ "imap_mag_l1a_norm-magi_20240312_v001.cdf"
323
+ ]
324
+ },
325
+ {
326
+ "type": "science",
327
+ "files": [
328
+ "imap_idex_l2_sci_20240312_v000.cdf",
329
+ "imap_idex_l2_sci_20240312_v001.cdf"
330
+ ]
331
+ }
332
+ ]'
333
+ This is what ProcessingInputCollection.serialize() outputs.
242
334
  start_date : str
243
335
  The start date for the output data in YYYYMMDD format.
244
- end_date : str
245
- The end date for the output data in YYYYMMDD format.
336
+ repointing : str
337
+ The repointing for the output data in the format 'repoint#####'.
246
338
  version : str
247
339
  The version of the data in vXXX format.
248
340
  upload_to_sdc : bool
@@ -254,66 +346,22 @@ class ProcessInstrument(ABC):
254
346
  data_level: str,
255
347
  data_descriptor: str,
256
348
  dependency_str: str,
257
- start_date: str,
258
- end_date: str,
349
+ start_date: str | None,
350
+ repointing: str | None,
259
351
  version: str,
260
352
  upload_to_sdc: bool,
261
353
  ) -> None:
262
354
  self.data_level = data_level
263
355
  self.descriptor = data_descriptor
264
356
 
265
- # Convert string into a dictionary
266
- self.dependencies = loads(dependency_str.replace("'", '"'))
267
- self._dependency_list: list = []
357
+ self.dependency_str = dependency_str
268
358
 
269
359
  self.start_date = start_date
270
- self.end_date = end_date
271
- if not end_date:
272
- self.end_date = start_date
360
+ self.repointing = repointing
273
361
 
274
362
  self.version = version
275
363
  self.upload_to_sdc = upload_to_sdc
276
364
 
277
- def download_dependencies(self) -> list[Path]:
278
- """
279
- Download the dependencies for the instrument.
280
-
281
- Returns
282
- -------
283
- file_list : list[Path]
284
- A list of file paths to the downloaded dependencies.
285
- """
286
- file_list = []
287
- for dependency in self.dependencies:
288
- try:
289
- # TODO: Validate dep dict
290
- # TODO: determine what dependency information is optional
291
- return_query = imap_data_access.query(
292
- start_date=dependency["start_date"],
293
- end_date=dependency.get("end_date", None),
294
- instrument=dependency["instrument"],
295
- data_level=dependency["data_level"],
296
- version=dependency["version"],
297
- descriptor=dependency["descriptor"],
298
- )
299
- except HTTPError as e:
300
- raise ValueError(f"Unable to download files from {dependency}") from e
301
-
302
- if not return_query:
303
- raise FileNotFoundError(
304
- f"File not found for required dependency "
305
- f"{dependency} while attempting to create file."
306
- f"This should never occur "
307
- f"in normal processing."
308
- )
309
- file_list.extend(
310
- [
311
- imap_data_access.download(query_return["file_path"])
312
- for query_return in return_query
313
- ]
314
- )
315
- return file_list
316
-
317
365
  def upload_products(self, products: list[Path]) -> None:
318
366
  """
319
367
  Upload data products to the IMAP SDC.
@@ -348,10 +396,10 @@ class ProcessInstrument(ABC):
348
396
  logger.info("Beginning actual processing")
349
397
  products = self.do_processing(dependencies)
350
398
  logger.info("Beginning postprocessing (uploading data products)")
351
- self.post_processing(products)
399
+ self.post_processing(products, dependencies)
352
400
  logger.info("Processing complete")
353
401
 
354
- def pre_processing(self) -> list[Path]:
402
+ def pre_processing(self) -> ProcessingInputCollection:
355
403
  """
356
404
  Complete pre-processing.
357
405
 
@@ -361,24 +409,29 @@ class ProcessInstrument(ABC):
361
409
 
362
410
  Returns
363
411
  -------
364
- list[Path]
365
- List of dependencies downloaded from the IMAP SDC.
412
+ dependencies : ProcessingInputCollection
413
+ Object containing dependencies to process.
366
414
  """
367
- self._dependency_list = self.download_dependencies()
368
- return self._dependency_list
415
+ dependencies = ProcessingInputCollection()
416
+ dependencies.deserialize(self.dependency_str)
417
+ dependencies.download_all_files()
418
+ return dependencies
369
419
 
370
420
  @abstractmethod
371
- def do_processing(self, dependencies: list) -> list[xr.Dataset]:
421
+ def do_processing(
422
+ self, dependencies: ProcessingInputCollection
423
+ ) -> list[xr.Dataset]:
372
424
  """
373
425
  Abstract method that processes the IMAP processing steps.
374
426
 
375
- All child classes must implement this method. Input and outputs are
376
- typically lists of file paths but are free to any list.
427
+ All child classes must implement this method. Input is
428
+ object containing dependencies and output is
429
+ list of xr.Dataset containing processed data(s).
377
430
 
378
431
  Parameters
379
432
  ----------
380
- dependencies : list
381
- List of dependencies to process.
433
+ dependencies : ProcessingInputCollection
434
+ Object containing dependencies to process.
382
435
 
383
436
  Returns
384
437
  -------
@@ -387,7 +440,9 @@ class ProcessInstrument(ABC):
387
440
  """
388
441
  raise NotImplementedError
389
442
 
390
- def post_processing(self, datasets: list[xr.Dataset]) -> None:
443
+ def post_processing(
444
+ self, datasets: list[xr.Dataset], dependencies: ProcessingInputCollection
445
+ ) -> None:
391
446
  """
392
447
  Complete post-processing.
393
448
 
@@ -396,36 +451,64 @@ class ProcessInstrument(ABC):
396
451
  Child classes can override this method to customize the
397
452
  post-processing actions.
398
453
 
454
+ The values from start_date and/or repointing are used to generate the output
455
+ file name if supplied. All other filename fields are derived from the
456
+ dataset attributes.
457
+
399
458
  Parameters
400
459
  ----------
401
460
  datasets : list[xarray.Dataset]
402
461
  A list of datasets (products) produced by do_processing method.
462
+ dependencies : ProcessingInputCollection
463
+ Object containing dependencies to process.
403
464
  """
404
465
  if len(datasets) == 0:
405
466
  logger.info("No products to write to CDF file.")
406
467
  return
407
468
 
408
469
  logger.info("Writing products to local storage")
409
- logger.info("Parent files: %s", self._dependency_list)
410
470
 
411
- products = [
412
- write_cdf(dataset, parent_files=self._dependency_list)
413
- for dataset in datasets
414
- ]
471
+ logger.info("Dataset version: %s", self.version)
472
+ # Parent files used to create these datasets
473
+ # https://spdf.gsfc.nasa.gov/istp_guide/gattributes.html.
474
+ parent_files = [p.name for p in dependencies.get_file_paths()]
475
+ logger.info("Parent files: %s", parent_files)
476
+
477
+ # Format version to vXXX if not already in that format. Eg.
478
+ # If version is passed in as 1 or 001, it will be converted to v001.
479
+ r = re.compile(r"v\d{3}")
480
+ if not isinstance(self.version, str) or r.match(self.version) is None:
481
+ self.version = f"v{int(self.version):03d}" # vXXX
482
+
483
+ # Start date is either the start date or the repointing.
484
+ # if it is the repointing, default to using the first epoch in the file as
485
+ # start_date.
486
+ # If it is start_date, skip repointing in the output filename.
487
+
488
+ products = []
489
+ for ds in datasets:
490
+ ds.attrs["Data_version"] = self.version
491
+ ds.attrs["Repointing"] = self.repointing
492
+ ds.attrs["Start_date"] = self.start_date
493
+ ds.attrs["Parents"] = parent_files
494
+ products.append(write_cdf(ds))
495
+
415
496
  self.upload_products(products)
416
497
 
417
498
 
418
499
  class Codice(ProcessInstrument):
419
500
  """Process CoDICE."""
420
501
 
421
- def do_processing(self, dependencies: list) -> list[xr.Dataset]:
502
+ def do_processing(
503
+ self, dependencies: ProcessingInputCollection
504
+ ) -> list[xr.Dataset]:
422
505
  """
423
506
  Perform CoDICE specific processing.
424
507
 
425
508
  Parameters
426
509
  ----------
427
- dependencies : list
428
- List of dependencies to process.
510
+ dependencies : ProcessingInputCollection
511
+ Object containing dependencies to process.
429
512
 
430
513
  Returns
431
514
  -------
@@ -435,24 +518,27 @@ class Codice(ProcessInstrument):
435
518
  print(f"Processing CoDICE {self.data_level}")
436
519
  datasets: list[xr.Dataset] = []
437
520
 
521
+ dependency_list = dependencies.processing_input
438
522
  if self.data_level == "l1a":
439
- if len(dependencies) > 1:
523
+ if len(dependency_list) > 1:
440
524
  raise ValueError(
441
525
  f"Unexpected dependencies found for CoDICE L1a:"
442
- f"{dependencies}. Expected only one dependency."
526
+ f"{dependency_list}. Expected only one dependency."
443
527
  )
444
528
  # process data
445
- datasets = [codice_l1a.process_codice_l1a(dependencies[0], self.version)]
529
+ science_files = dependencies.get_file_paths(source="codice")
530
+ datasets = codice_l1a.process_codice_l1a(science_files[0])
446
531
 
447
532
  if self.data_level == "l1b":
448
- if len(dependencies) > 1:
533
+ if len(dependency_list) > 1:
449
534
  raise ValueError(
450
535
  f"Unexpected dependencies found for CoDICE L1b:"
451
- f"{dependencies}. Expected only one dependency."
536
+ f"{dependency_list}. Expected only one dependency."
452
537
  )
453
538
  # process data
454
- dependency = load_cdf(dependencies[0])
455
- datasets = [codice_l1b.process_codice_l1b(dependency, self.version)]
539
+ science_files = dependencies.get_file_paths(source="codice")
540
+ dependency = load_cdf(science_files[0])
541
+ datasets = [codice_l1b.process_codice_l1b(dependency)]
456
542
 
457
543
  return datasets
458
544
 
@@ -460,14 +546,16 @@ class Codice(ProcessInstrument):
460
546
  class Glows(ProcessInstrument):
461
547
  """Process GLOWS."""
462
548
 
463
- def do_processing(self, dependencies: list) -> list[xr.Dataset]:
549
+ def do_processing(
550
+ self, dependencies: ProcessingInputCollection
551
+ ) -> list[xr.Dataset]:
464
552
  """
465
553
  Perform GLOWS specific processing.
466
554
 
467
555
  Parameters
468
556
  ----------
469
- dependencies : list
470
- List of dependencies to process.
557
+ dependencies : ProcessingInputCollection
558
+ Object containing dependencies to process.
471
559
 
472
560
  Returns
473
561
  -------
@@ -477,31 +565,35 @@ class Glows(ProcessInstrument):
477
565
  print(f"Processing GLOWS {self.data_level}")
478
566
  datasets: list[xr.Dataset] = []
479
567
 
568
+ dependency_list = dependencies.processing_input
480
569
  if self.data_level == "l1a":
481
- if len(dependencies) > 1:
570
+ if len(dependency_list) > 1:
482
571
  raise ValueError(
483
572
  f"Unexpected dependencies found for GLOWS L1A:"
484
- f"{dependencies}. Expected only one input dependency."
573
+ f"{dependency_list}. Expected only one input dependency."
485
574
  )
486
- datasets = glows_l1a(dependencies[0], self.version)
575
+ science_files = dependencies.get_file_paths(source="glows")
576
+ datasets = glows_l1a(science_files[0])
487
577
 
488
578
  if self.data_level == "l1b":
489
- if len(dependencies) > 1:
579
+ if len(dependency_list) > 1:
490
580
  raise ValueError(
491
581
  f"Unexpected dependencies found for GLOWS L1B:"
492
- f"{dependencies}. Expected at least one input dependency."
582
+ f"{dependency_list}. Expected at least one input dependency."
493
583
  )
494
- input_dataset = load_cdf(dependencies[0])
495
- datasets = [glows_l1b(input_dataset, self.version)]
584
+ science_files = dependencies.get_file_paths(source="glows")
585
+ input_dataset = load_cdf(science_files[0])
586
+ datasets = [glows_l1b(input_dataset)]
496
587
 
497
588
  if self.data_level == "l2":
498
- if len(dependencies) > 1:
589
+ if len(dependency_list) > 1:
499
590
  raise ValueError(
500
591
  f"Unexpected dependencies found for GLOWS L2:"
501
- f"{dependencies}. Expected only one input dependency."
592
+ f"{dependency_list}. Expected only one input dependency."
502
593
  )
503
- input_dataset = load_cdf(dependencies[0])
504
- datasets = glows_l2(input_dataset, self.version)
594
+ science_files = dependencies.get_file_paths(source="glows")
595
+ input_dataset = load_cdf(science_files[0])
596
+ datasets = glows_l2(input_dataset)
505
597
 
506
598
  return datasets
507
599
 
@@ -509,14 +601,16 @@ class Glows(ProcessInstrument):
509
601
  class Hi(ProcessInstrument):
510
602
  """Process IMAP-Hi."""
511
603
 
512
- def do_processing(self, dependencies: list) -> list[xr.Dataset]:
604
+ def do_processing(
605
+ self, dependencies: ProcessingInputCollection
606
+ ) -> list[xr.Dataset]:
513
607
  """
514
608
  Perform IMAP-Hi specific processing.
515
609
 
516
610
  Parameters
517
611
  ----------
518
- dependencies : list
519
- List of dependencies to process.
612
+ dependencies : ProcessingInputCollection
613
+ Object containing dependencies to process.
520
614
 
521
615
  Returns
522
616
  -------
@@ -526,27 +620,34 @@ class Hi(ProcessInstrument):
526
620
  print(f"Processing IMAP-Hi {self.data_level}")
527
621
  datasets: list[xr.Dataset] = []
528
622
 
623
+ dependency_list = dependencies.processing_input
529
624
  if self.data_level == "l1a":
530
625
  # File path is expected output file path
531
- if len(dependencies) > 1:
626
+ if len(dependency_list) > 1:
532
627
  raise ValueError(
533
628
  f"Unexpected dependencies found for Hi L1A:"
534
- f"{dependencies}. Expected only one dependency."
629
+ f"{dependency_list}. Expected only one dependency."
535
630
  )
536
- datasets = hi_l1a.hi_l1a(dependencies[0], self.version)
631
+ science_files = dependencies.get_file_paths(source="hi")
632
+ datasets = hi_l1a.hi_l1a(science_files[0])
537
633
  elif self.data_level == "l1b":
538
- dependencies = [load_cdf(dependency) for dependency in dependencies]
539
- datasets = [hi_l1b.hi_l1b(dependencies[0], self.version)]
634
+ l0_files = dependencies.get_file_paths(source="hi", descriptor="raw")
635
+ if l0_files:
636
+ datasets = hi_l1b.hi_l1b(l0_files[0])
637
+ else:
638
+ l1a_files = dependencies.get_file_paths(source="hi")
639
+ datasets = hi_l1b.hi_l1b(load_cdf(l1a_files[0]))
540
640
  elif self.data_level == "l1c":
541
641
  # TODO: Add PSET calibration product config file dependency and remove
542
642
  # below injected dependency
543
- dependencies.append(
643
+ hi_dependencies = dependencies.get_file_paths(source="hi")
644
+ hi_dependencies.append(
544
645
  Path(__file__).parent
545
646
  / "tests/hi/test_data/l1"
546
647
  / "imap_his_pset-calibration-prod-config_20240101_v001.csv"
547
648
  )
548
- dependencies[0] = load_cdf(dependencies[0])
549
- datasets = [hi_l1c.hi_l1c(dependencies, self.version)]
649
+ hi_dependencies[0] = load_cdf(hi_dependencies[0])
650
+ datasets = hi_l1c.hi_l1c(hi_dependencies)
550
651
  else:
551
652
  raise NotImplementedError(
552
653
  f"Hi processing not implemented for level {self.data_level}"
@@ -557,14 +658,16 @@ class Hi(ProcessInstrument):
557
658
  class Hit(ProcessInstrument):
558
659
  """Process HIT."""
559
660
 
560
- def do_processing(self, dependencies: list) -> list[xr.Dataset]:
661
+ def do_processing(
662
+ self, dependencies: ProcessingInputCollection
663
+ ) -> list[xr.Dataset]:
561
664
  """
562
665
  Perform HIT specific processing.
563
666
 
564
667
  Parameters
565
668
  ----------
566
- dependencies : list
567
- List of dependencies to process.
669
+ dependencies : ProcessingInputCollection
670
+ Object containing dependencies to process.
568
671
 
569
672
  Returns
570
673
  -------
@@ -574,41 +677,47 @@ class Hit(ProcessInstrument):
574
677
  print(f"Processing HIT {self.data_level}")
575
678
  datasets: list[xr.Dataset] = []
576
679
 
680
+ dependency_list = dependencies.processing_input
577
681
  if self.data_level == "l1a":
578
- if len(dependencies) > 1:
682
+ if len(dependency_list) > 1:
579
683
  raise ValueError(
580
684
  f"Unexpected dependencies found for HIT L1A:"
581
- f"{dependencies}. Expected only one dependency."
685
+ f"{dependency_list}. Expected only one dependency."
582
686
  )
583
687
  # process data to L1A products
584
- datasets = hit_l1a(dependencies[0], self.version)
688
+ science_files = dependencies.get_file_paths(source="hit")
689
+ datasets = hit_l1a(science_files[0])
585
690
 
586
691
  elif self.data_level == "l1b":
587
- if len(dependencies) > 1:
692
+ if len(dependency_list) > 1:
588
693
  raise ValueError(
589
694
  f"Unexpected dependencies found for HIT L1B:"
590
- f"{dependencies}. Expected only one dependency."
695
+ f"{dependency_list}. Expected only one dependency."
591
696
  )
592
697
  data_dict = {}
593
- if self.dependencies[0]["data_level"] == "l0":
698
+ # TODO: Check this and update with new features as needed.
699
+ l0_files = dependencies.get_file_paths(source="hit", descriptor="raw")
700
+ l1a_files = dependencies.get_file_paths(source="hit")
701
+ if len(l0_files) > 0:
594
702
  # Add path to CCSDS file to process housekeeping
595
- data_dict["imap_hit_l0_raw"] = dependencies[0]
703
+ data_dict["imap_hit_l0_raw"] = l0_files[0]
596
704
  else:
597
705
  # Add L1A dataset to process science data
598
- l1a_dataset = load_cdf(dependencies[0])
706
+ l1a_dataset = load_cdf(l1a_files[0])
599
707
  data_dict[l1a_dataset.attrs["Logical_source"]] = l1a_dataset
600
708
  # process data to L1B products
601
- datasets = hit_l1b(data_dict, self.version)
709
+ datasets = hit_l1b(data_dict)
602
710
  elif self.data_level == "l2":
603
- if len(dependencies) > 1:
711
+ if len(dependency_list) > 1:
604
712
  raise ValueError(
605
713
  f"Unexpected dependencies found for HIT L2:"
606
- f"{dependencies}. Expected only one dependency."
714
+ f"{dependency_list}. Expected only one dependency."
607
715
  )
608
716
  # Add L1B dataset to process science data
609
- l1b_dataset = load_cdf(dependencies[0])
717
+ science_files = dependencies.get_file_paths(source="hit")
718
+ l1b_dataset = load_cdf(science_files[0])
610
719
  # process data to L2 products
611
- datasets = hit_l2(l1b_dataset, self.version)
720
+ datasets = hit_l2(l1b_dataset)
612
721
 
613
722
  return datasets
614
723
 
@@ -616,14 +725,16 @@ class Hit(ProcessInstrument):
616
725
  class Idex(ProcessInstrument):
617
726
  """Process IDEX."""
618
727
 
619
- def do_processing(self, dependencies: list) -> list[xr.Dataset]:
728
+ def do_processing(
729
+ self, dependencies: ProcessingInputCollection
730
+ ) -> list[xr.Dataset]:
620
731
  """
621
732
  Perform IDEX specific processing.
622
733
 
623
734
  Parameters
624
735
  ----------
625
- dependencies : list
626
- List of dependencies to process.
736
+ dependencies : ProcessingInputCollection
737
+ Object containing dependencies to process.
627
738
 
628
739
  Returns
629
740
  -------
@@ -633,37 +744,43 @@ class Idex(ProcessInstrument):
633
744
  print(f"Processing IDEX {self.data_level}")
634
745
  datasets: list[xr.Dataset] = []
635
746
 
747
+ dependency_list = dependencies.processing_input
636
748
  if self.data_level == "l1a":
637
- if len(dependencies) > 1:
749
+ if len(dependency_list) > 1:
638
750
  raise ValueError(
639
- f"Unexpected dependencies found for IDEX L1a:"
640
- f"{dependencies}. Expected only one dependency."
751
+ f"Unexpected dependencies found for IDEX L1A:"
752
+ f"{dependency_list}. Expected only one dependency."
641
753
  )
642
- # read CDF file
643
- datasets = [PacketParser(dependencies[0], self.version).data]
754
+ # get l0 file
755
+ science_files = dependencies.get_file_paths(source="idex")
756
+ datasets = PacketParser(science_files[0]).data
644
757
  elif self.data_level == "l1b":
645
- if len(dependencies) > 1:
758
+ if len(dependency_list) > 1:
646
759
  raise ValueError(
647
- f"Unexpected dependencies found for IDEX L1b:"
648
- f"{dependencies}. Expected only one dependency."
760
+ f"Unexpected dependencies found for IDEX L1B:"
761
+ f"{dependency_list}. Expected only one science dependency."
649
762
  )
763
+ # get CDF file
764
+ science_files = dependencies.get_file_paths(source="idex")
650
765
  # process data
651
- dependency = load_cdf(dependencies[0])
652
- datasets = [idex_l1b(dependency, self.version)]
766
+ dependency = load_cdf(science_files[0])
767
+ datasets = [idex_l1b(dependency)]
653
768
  return datasets
654
769
 
655
770
 
656
771
  class Lo(ProcessInstrument):
657
772
  """Process IMAP-Lo."""
658
773
 
659
- def do_processing(self, dependencies: list) -> list[xr.Dataset]:
774
+ def do_processing(
775
+ self, dependencies: ProcessingInputCollection
776
+ ) -> list[xr.Dataset]:
660
777
  """
661
778
  Perform IMAP-Lo specific processing.
662
779
 
663
780
  Parameters
664
781
  ----------
665
- dependencies : list
666
- List of dependencies to process.
782
+ dependencies : ProcessingInputCollection
783
+ Object containing dependencies to process.
667
784
 
668
785
  Returns
669
786
  -------
@@ -672,30 +789,39 @@ class Lo(ProcessInstrument):
672
789
  """
673
790
  print(f"Processing IMAP-Lo {self.data_level}")
674
791
  datasets: list[xr.Dataset] = []
792
+ dependency_list = dependencies.processing_input
675
793
  if self.data_level == "l1a":
676
794
  # L1A packet / products are 1 to 1. Should only have
677
795
  # one dependency file
678
- if len(dependencies) > 1:
796
+ if len(dependency_list) > 1:
679
797
  raise ValueError(
680
798
  f"Unexpected dependencies found for IMAP-Lo L1A:"
681
- f"{dependencies}. Expected only one dependency."
799
+ f"{dependency_list}. Expected only one dependency."
682
800
  )
683
- datasets = lo_l1a.lo_l1a(dependencies[0], self.version)
801
+ science_files = dependencies.get_file_paths(source="lo")
802
+ datasets = lo_l1a.lo_l1a(science_files[0])
684
803
 
685
804
  elif self.data_level == "l1b":
686
805
  data_dict = {}
687
- for dependency in dependencies:
688
- dataset = load_cdf(dependency)
806
+ # TODO: Check this and update with new features as needed.
807
+ for input_type in dependencies.processing_input:
808
+ science_files = dependencies.get_file_paths(
809
+ source="lo", descriptor=input_type.descriptor
810
+ )
811
+ dataset = load_cdf(science_files[0])
689
812
  data_dict[dataset.attrs["Logical_source"]] = dataset
690
- datasets = lo_l1b.lo_l1b(data_dict, self.version)
813
+ datasets = lo_l1b.lo_l1b(data_dict)
691
814
 
692
815
  elif self.data_level == "l1c":
693
816
  data_dict = {}
694
- for dependency in dependencies:
695
- dataset = load_cdf(dependency)
817
+ for input_type in dependencies.processing_input:
818
+ science_files = dependencies.get_file_paths(
819
+ source="lo", descriptor=input_type.descriptor
820
+ )
821
+ dataset = load_cdf(science_files[0])
696
822
  data_dict[dataset.attrs["Logical_source"]] = dataset
697
823
  # TODO: This is returning the wrong type
698
- datasets = lo_l1c.lo_l1c(data_dict, self.version)
824
+ datasets = lo_l1c.lo_l1c(data_dict)
699
825
 
700
826
  return datasets
701
827
 
@@ -703,14 +829,16 @@ class Lo(ProcessInstrument):
703
829
  class Mag(ProcessInstrument):
704
830
  """Process MAG."""
705
831
 
706
- def do_processing(self, dependencies: list[Path]) -> list[xr.Dataset]:
832
+ def do_processing(
833
+ self, dependencies: ProcessingInputCollection
834
+ ) -> list[xr.Dataset]:
707
835
  """
708
836
  Perform MAG specific processing.
709
837
 
710
838
  Parameters
711
839
  ----------
712
- dependencies : list[Path]
713
- List of dependencies to process.
840
+ dependencies : ProcessingInputCollection
841
+ Object containing dependencies to process.
714
842
 
715
843
  Returns
716
844
  -------
@@ -720,52 +848,119 @@ class Mag(ProcessInstrument):
720
848
  print(f"Processing MAG {self.data_level}")
721
849
  datasets: list[xr.Dataset] = []
722
850
 
851
+ dependency_list = dependencies.processing_input
852
+ science_files = dependencies.get_file_paths(source="mag")
723
853
  if self.data_level == "l1a":
724
854
  # File path is expected output file path
725
- if len(dependencies) > 1:
855
+ if len(dependency_list) > 1:
726
856
  raise ValueError(
727
857
  f"Unexpected dependencies found for MAG L1A:"
728
- f"{dependencies}. Expected only one dependency."
858
+ f"{dependency_list}. Expected only one dependency."
729
859
  )
730
860
  # TODO: Update this type
731
- datasets = mag_l1a(dependencies[0], data_version=self.version)
861
+
862
+ datasets = mag_l1a(science_files[0])
732
863
 
733
864
  if self.data_level == "l1b":
734
- if len(dependencies) > 1:
865
+ if len(dependency_list) > 1:
735
866
  raise ValueError(
736
867
  f"Unexpected dependencies found for MAG L1B:"
737
- f"{dependencies}. Expected only one dependency."
868
+ f"{dependency_list}. Expected only one dependency."
738
869
  )
739
- input_data = load_cdf(dependencies[0])
740
- datasets = [mag_l1b(input_data, self.version)]
870
+ input_data = load_cdf(science_files[0])
871
+ datasets = [mag_l1b(input_data)]
741
872
 
742
873
  if self.data_level == "l1c":
743
- # L1C depends on matching norm/burst files: eg burst-magi and norm-magi or
744
- # burst-mago and norm-mago
745
- if len(dependencies) != 2:
874
+ input_data = [load_cdf(dep) for dep in science_files]
875
+ # Input datasets can be in any order, and are validated within mag_l1c
876
+ if len(input_data) == 1:
877
+ datasets = [mag_l1c(input_data[0])]
878
+ elif len(input_data) == 2:
879
+ datasets = [mag_l1c(input_data[0], input_data[1])]
880
+ else:
746
881
  raise ValueError(
747
882
  f"Invalid dependencies found for MAG L1C:"
748
- f"{dependencies}. Expected two dependencies."
883
+ f"{dependencies}. Expected one or two dependencies."
749
884
  )
750
885
 
751
- input_data = [load_cdf(dep) for dep in dependencies]
752
- # Input datasets can be in any order
753
- datasets = [mag_l1c(input_data[0], input_data[1], self.version)]
886
+ if self.data_level == "l2":
887
+ # TODO: Overwrite dependencies with versions from offsets file
888
+ # TODO: Ensure that parent_files attribute works with that
889
+ input_data = load_cdf(science_files[0])
890
+ # TODO: use ancillary from input
891
+ calibration_dataset = load_cdf(
892
+ Path(__file__).parent
893
+ / "tests"
894
+ / "mag"
895
+ / "validation"
896
+ / "calibration"
897
+ / "imap_mag_l2-calibration-matrices_20251017_v004.cdf"
898
+ )
899
+
900
+ offset_dataset = load_cdf(
901
+ Path(__file__).parent
902
+ / "tests"
903
+ / "mag"
904
+ / "validation"
905
+ / "calibration"
906
+ / "imap_mag_l2-offsets-norm_20251017_20251017_v001.cdf"
907
+ )
908
+ # TODO: Test data missing
909
+ datasets = mag_l2(calibration_dataset, offset_dataset, input_data)
754
910
 
755
911
  return datasets
756
912
 
757
913
 
914
+ class Spacecraft(ProcessInstrument):
915
+ """Process Spacecraft data."""
916
+
917
+ def do_processing(
918
+ self, dependencies: ProcessingInputCollection
919
+ ) -> list[xr.Dataset]:
920
+ """
921
+ Perform Spacecraft specific processing.
922
+
923
+ Parameters
924
+ ----------
925
+ dependencies : ProcessingInputCollection
926
+ Object containing dependencies to process.
927
+
928
+ Returns
929
+ -------
930
+ datasets : xr.Dataset
931
+ Xr.Dataset of products.
932
+ """
933
+ print(f"Processing Spacecraft {self.data_level}")
934
+
935
+ if self.data_level != "l1a":
936
+ raise NotImplementedError(
937
+ f"Spacecraft processing not implemented for level {self.data_level}"
938
+ )
939
+
940
+ # File path is expected output file path
941
+ input_files = dependencies.get_file_paths(source="spacecraft")
942
+ if len(input_files) > 1:
943
+ raise ValueError(
944
+ f"Unexpected dependencies found for Spacecraft L1A: "
945
+ f"{input_files}. Expected only one dependency."
946
+ )
947
+ datasets = list(quaternions.process_quaternions(input_files[0]))
948
+ return datasets
949
+
950
+
758
951
  class Swapi(ProcessInstrument):
759
952
  """Process SWAPI."""
760
953
 
761
- def do_processing(self, dependencies: list) -> list[xr.Dataset]:
954
+ def do_processing(
955
+ self, dependencies: ProcessingInputCollection
956
+ ) -> list[xr.Dataset]:
762
957
  """
763
958
  Perform SWAPI specific processing.
764
959
 
765
960
  Parameters
766
961
  ----------
767
- dependencies : list
768
- List of dependencies to process.
962
+ dependencies : ProcessingInputCollection
963
+ Object containing dependencies to process.
769
964
 
770
965
  Returns
771
966
  -------
@@ -775,30 +970,53 @@ class Swapi(ProcessInstrument):
775
970
  print(f"Processing SWAPI {self.data_level}")
776
971
  datasets: list[xr.Dataset] = []
777
972
 
973
+ dependency_list = dependencies.processing_input
778
974
  if self.data_level == "l1":
779
975
  # For science, we expect l0 raw file and L1 housekeeping file
780
- if self.descriptor == "sci" and len(dependencies) != 2:
976
+ if self.descriptor == "sci" and len(dependency_list) != 2:
781
977
  raise ValueError(
782
978
  f"Unexpected dependencies found for SWAPI L1 science:"
783
- f"{dependencies}. Expected only two dependencies."
979
+ f"{dependency_list}. Expected only two dependencies."
784
980
  )
785
981
  # For housekeeping, we expect only L0 raw file
786
- if self.descriptor == "hk" and len(dependencies) != 1:
982
+ if self.descriptor == "hk" and len(dependency_list) != 1:
787
983
  raise ValueError(
788
984
  f"Unexpected dependencies found for SWAPI L1 housekeeping:"
789
- f"{dependencies}. Expected only one dependency."
985
+ f"{dependency_list}. Expected only one dependency."
790
986
  )
987
+
988
+ dependent_files = []
989
+ l0_files = dependencies.get_file_paths(descriptor="raw")
990
+ # TODO: handle multiples files as needed in the future
991
+ dependent_files.append(l0_files[0])
992
+
993
+ if self.descriptor == "sci":
994
+ # TODO: handle multiples files as needed in the future
995
+ hk_files = dependencies.get_file_paths(descriptor="hk")
996
+ dependent_files.append(hk_files[0])
997
+
791
998
  # process science or housekeeping data
792
- datasets = swapi_l1(dependencies, self.version)
999
+ datasets = swapi_l1(dependent_files)
793
1000
  elif self.data_level == "l2":
794
- if len(dependencies) > 1:
1001
+ if len(dependency_list) != 3:
795
1002
  raise ValueError(
796
1003
  f"Unexpected dependencies found for SWAPI L2:"
797
- f"{dependencies}. Expected only one dependency."
1004
+ f"{dependency_list}. Expected 3 dependencies."
798
1005
  )
799
1006
  # process data
800
- l1_dataset = load_cdf(dependencies[0])
801
- datasets = [swapi_l2(l1_dataset, self.version)]
1007
+ science_files = dependencies.get_file_paths(
1008
+ source="swapi", descriptor="sci"
1009
+ )
1010
+ esa_table_files = dependencies.get_file_paths(
1011
+ source="swapi", descriptor="esa-unit-conversion"
1012
+ )
1013
+ lut_notes_files = dependencies.get_file_paths(
1014
+ source="swapi", descriptor="lut-notes"
1015
+ )
1016
+ esa_table_df = read_swapi_lut_table(esa_table_files[0])
1017
+ lut_notes_df = read_swapi_lut_table(lut_notes_files[0])
1018
+ l1_dataset = load_cdf(science_files[0])
1019
+ datasets = [swapi_l2(l1_dataset, esa_table_df, lut_notes_df)]
802
1020
 
803
1021
  return datasets
804
1022
 
@@ -806,14 +1024,16 @@ class Swapi(ProcessInstrument):
806
1024
  class Swe(ProcessInstrument):
807
1025
  """Process SWE."""
808
1026
 
809
- def do_processing(self, dependencies: list) -> list[xr.Dataset]:
1027
+ def do_processing(
1028
+ self, dependencies: ProcessingInputCollection
1029
+ ) -> list[xr.Dataset]:
810
1030
  """
811
1031
  Perform SWE specific processing.
812
1032
 
813
1033
  Parameters
814
1034
  ----------
815
- dependencies : list
816
- List of dependencies to process.
1035
+ dependencies : ProcessingInputCollection
1036
+ Object containing dependencies to process.
817
1037
 
818
1038
  Returns
819
1039
  -------
@@ -823,26 +1043,32 @@ class Swe(ProcessInstrument):
823
1043
  print(f"Processing SWE {self.data_level}")
824
1044
  datasets: list[xr.Dataset] = []
825
1045
 
1046
+ dependency_list = dependencies.processing_input
826
1047
  if self.data_level == "l1a":
827
- if len(dependencies) > 1:
1048
+ if len(dependency_list) > 1:
828
1049
  raise ValueError(
829
1050
  f"Unexpected dependencies found for SWE L1A:"
830
- f"{dependencies}. Expected only one dependency."
1051
+ f"{dependency_list}. Expected only one dependency."
831
1052
  )
832
- datasets = swe_l1a(str(dependencies[0]), data_version=self.version)
1053
+ science_files = dependencies.get_file_paths(source="swe")
1054
+ datasets = swe_l1a(str(science_files[0]))
833
1055
  # Right now, we only process science data. Therefore,
834
1056
  # we expect only one dataset to be returned.
835
1057
 
836
1058
  elif self.data_level == "l1b":
837
- if len(dependencies) > 1:
1059
+ if len(dependency_list) != 4:
838
1060
  raise ValueError(
839
1061
  f"Unexpected dependencies found for SWE L1B:"
840
- f"{dependencies}. Expected only one dependency."
1062
+ f"{dependency_list}. Expected exactly four dependencies."
841
1063
  )
842
- # read CDF file
843
- l1a_dataset = load_cdf(dependencies[0])
844
- # TODO: read lookup table and in-flight calibration data here.
845
- datasets = swe_l1b(l1a_dataset, data_version=self.version)
1064
+
1065
+ science_files = dependencies.get_file_paths("swe", "sci")
1066
+ if len(science_files) > 1:
1067
+ raise ValueError(
1068
+ "Multiple science files processing is not supported for SWE L1B."
1069
+ )
1070
+
1071
+ datasets = swe_l1b(dependencies)
846
1072
  else:
847
1073
  print("Did not recognize data level. No processing done.")
848
1074
 
@@ -852,14 +1078,16 @@ class Swe(ProcessInstrument):
852
1078
  class Ultra(ProcessInstrument):
853
1079
  """Process IMAP-Ultra."""
854
1080
 
855
- def do_processing(self, dependencies: list) -> list[xr.Dataset]:
1081
+ def do_processing(
1082
+ self, dependencies: ProcessingInputCollection
1083
+ ) -> list[xr.Dataset]:
856
1084
  """
857
1085
  Perform IMAP-Ultra specific processing.
858
1086
 
859
1087
  Parameters
860
1088
  ----------
861
- dependencies : list
862
- List of dependencies to process.
1089
+ dependencies : ProcessingInputCollection
1090
+ Object containing dependencies to process.
863
1091
 
864
1092
  Returns
865
1093
  -------
@@ -869,29 +1097,30 @@ class Ultra(ProcessInstrument):
869
1097
  print(f"Processing IMAP-Ultra {self.data_level}")
870
1098
  datasets: list[xr.Dataset] = []
871
1099
 
1100
+ dependency_list = dependencies.processing_input
872
1101
  if self.data_level == "l1a":
873
1102
  # File path is expected output file path
874
- if len(dependencies) > 1:
1103
+ if len(dependency_list) > 1:
875
1104
  raise ValueError(
876
1105
  f"Unexpected dependencies found for ULTRA L1A:"
877
- f"{dependencies}. Expected only one dependency."
1106
+ f"{dependency_list}. Expected only one dependency."
878
1107
  )
879
-
880
- datasets = ultra_l1a.ultra_l1a(dependencies[0], self.version)
1108
+ science_files = dependencies.get_file_paths(source="ultra")
1109
+ datasets = ultra_l1a.ultra_l1a(science_files[0])
881
1110
 
882
1111
  elif self.data_level == "l1b":
883
1112
  data_dict = {}
884
- for dependency in dependencies:
885
- dataset = load_cdf(dependency)
1113
+ for dep in dependency_list:
1114
+ dataset = load_cdf(dep.imap_file_paths[0])
886
1115
  data_dict[dataset.attrs["Logical_source"]] = dataset
887
- datasets = ultra_l1b.ultra_l1b(data_dict, self.version)
1116
+ datasets = ultra_l1b.ultra_l1b(data_dict)
888
1117
 
889
1118
  elif self.data_level == "l1c":
890
1119
  data_dict = {}
891
- for dependency in dependencies:
892
- dataset = load_cdf(dependency)
1120
+ for dep in dependency_list:
1121
+ dataset = load_cdf(dep.imap_file_paths[0])
893
1122
  data_dict[dataset.attrs["Logical_source"]] = dataset
894
- datasets = ultra_l1c.ultra_l1c(data_dict, self.version)
1123
+ datasets = ultra_l1c.ultra_l1c(data_dict)
895
1124
 
896
1125
  return datasets
897
1126
 
@@ -914,7 +1143,7 @@ def main() -> None:
914
1143
  args.descriptor,
915
1144
  args.dependency,
916
1145
  args.start_date,
917
- args.end_date,
1146
+ args.repointing,
918
1147
  args.version,
919
1148
  args.upload_to_sdc,
920
1149
  )