imap-processing 0.14.0__py3-none-any.whl → 0.16.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of imap-processing might be problematic. Click here for more details.

Files changed (81) hide show
  1. imap_processing/_version.py +2 -2
  2. imap_processing/cdf/config/imap_codice_global_cdf_attrs.yaml +60 -35
  3. imap_processing/cdf/config/imap_codice_l1a_variable_attrs.yaml +765 -287
  4. imap_processing/cdf/config/imap_codice_l1b_variable_attrs.yaml +1577 -288
  5. imap_processing/cdf/config/imap_codice_l2_variable_attrs.yaml +1004 -0
  6. imap_processing/cdf/config/imap_enamaps_l2-common_variable_attrs.yaml +28 -0
  7. imap_processing/cdf/config/imap_enamaps_l2-healpix_variable_attrs.yaml +1 -1
  8. imap_processing/cdf/config/imap_enamaps_l2-rectangular_variable_attrs.yaml +18 -0
  9. imap_processing/cdf/config/imap_glows_l2_variable_attrs.yaml +39 -3
  10. imap_processing/cdf/config/imap_ialirt_global_cdf_attrs.yaml +18 -0
  11. imap_processing/cdf/config/imap_ialirt_l1_variable_attrs.yaml +353 -0
  12. imap_processing/cdf/config/imap_idex_l1a_variable_attrs.yaml +7 -0
  13. imap_processing/cdf/config/imap_idex_l1b_variable_attrs.yaml +11 -0
  14. imap_processing/cdf/config/imap_idex_l2a_variable_attrs.yaml +4 -0
  15. imap_processing/cdf/config/imap_idex_l2c_variable_attrs.yaml +7 -3
  16. imap_processing/cdf/config/imap_lo_global_cdf_attrs.yaml +6 -0
  17. imap_processing/cdf/config/imap_mag_l2_variable_attrs.yaml +114 -0
  18. imap_processing/cdf/config/imap_swe_global_cdf_attrs.yaml +11 -5
  19. imap_processing/cdf/config/imap_swe_l1b_variable_attrs.yaml +23 -1
  20. imap_processing/cdf/config/imap_ultra_l1b_variable_attrs.yaml +4 -0
  21. imap_processing/cdf/config/imap_ultra_l1c_variable_attrs.yaml +2 -2
  22. imap_processing/cli.py +145 -80
  23. imap_processing/codice/codice_l1a.py +140 -84
  24. imap_processing/codice/codice_l1b.py +91 -18
  25. imap_processing/codice/codice_l2.py +81 -0
  26. imap_processing/codice/constants.py +68 -0
  27. imap_processing/ena_maps/ena_maps.py +43 -1
  28. imap_processing/glows/l2/glows_l2_data.py +3 -6
  29. imap_processing/hi/hi_l1a.py +447 -0
  30. imap_processing/hi/{l1b/hi_l1b.py → hi_l1b.py} +1 -1
  31. imap_processing/hi/{l1c/hi_l1c.py → hi_l1c.py} +21 -21
  32. imap_processing/hi/{l2/hi_l2.py → hi_l2.py} +13 -13
  33. imap_processing/hi/utils.py +6 -6
  34. imap_processing/hit/l1b/hit_l1b.py +30 -11
  35. imap_processing/ialirt/constants.py +38 -0
  36. imap_processing/ialirt/l0/parse_mag.py +1 -1
  37. imap_processing/ialirt/l0/process_codice.py +91 -0
  38. imap_processing/ialirt/l0/process_hit.py +12 -21
  39. imap_processing/ialirt/l0/process_swapi.py +172 -23
  40. imap_processing/ialirt/l0/process_swe.py +3 -10
  41. imap_processing/ialirt/utils/constants.py +62 -0
  42. imap_processing/ialirt/utils/create_xarray.py +135 -0
  43. imap_processing/idex/idex_l2c.py +9 -9
  44. imap_processing/lo/l1b/lo_l1b.py +6 -1
  45. imap_processing/lo/l1c/lo_l1c.py +22 -13
  46. imap_processing/lo/l2/lo_l2.py +213 -0
  47. imap_processing/mag/l1c/mag_l1c.py +8 -1
  48. imap_processing/mag/l2/mag_l2.py +6 -2
  49. imap_processing/mag/l2/mag_l2_data.py +7 -5
  50. imap_processing/swe/l1a/swe_l1a.py +6 -6
  51. imap_processing/swe/l1b/swe_l1b.py +70 -11
  52. imap_processing/ultra/l0/decom_ultra.py +1 -1
  53. imap_processing/ultra/l0/ultra_utils.py +0 -4
  54. imap_processing/ultra/l1b/badtimes.py +7 -3
  55. imap_processing/ultra/l1b/cullingmask.py +7 -2
  56. imap_processing/ultra/l1b/de.py +26 -12
  57. imap_processing/ultra/l1b/lookup_utils.py +8 -7
  58. imap_processing/ultra/l1b/ultra_l1b.py +59 -48
  59. imap_processing/ultra/l1b/ultra_l1b_culling.py +50 -18
  60. imap_processing/ultra/l1b/ultra_l1b_extended.py +4 -4
  61. imap_processing/ultra/l1c/helio_pset.py +53 -0
  62. imap_processing/ultra/l1c/spacecraft_pset.py +20 -12
  63. imap_processing/ultra/l1c/ultra_l1c.py +49 -26
  64. imap_processing/ultra/l1c/ultra_l1c_pset_bins.py +40 -2
  65. imap_processing/ultra/l2/ultra_l2.py +47 -2
  66. imap_processing/ultra/lookup_tables/Angular_Profiles_FM90_RightSlit.csv +524 -526
  67. imap_processing/ultra/utils/ultra_l1_utils.py +51 -10
  68. {imap_processing-0.14.0.dist-info → imap_processing-0.16.0.dist-info}/METADATA +2 -2
  69. {imap_processing-0.14.0.dist-info → imap_processing-0.16.0.dist-info}/RECORD +72 -69
  70. imap_processing/hi/l1a/__init__.py +0 -0
  71. imap_processing/hi/l1a/hi_l1a.py +0 -98
  72. imap_processing/hi/l1a/histogram.py +0 -152
  73. imap_processing/hi/l1a/science_direct_event.py +0 -214
  74. imap_processing/hi/l1b/__init__.py +0 -0
  75. imap_processing/hi/l1c/__init__.py +0 -0
  76. imap_processing/hi/l2/__init__.py +0 -0
  77. imap_processing/ialirt/l0/process_codicehi.py +0 -156
  78. imap_processing/ialirt/l0/process_codicelo.py +0 -41
  79. {imap_processing-0.14.0.dist-info → imap_processing-0.16.0.dist-info}/LICENSE +0 -0
  80. {imap_processing-0.14.0.dist-info → imap_processing-0.16.0.dist-info}/WHEEL +0 -0
  81. {imap_processing-0.14.0.dist-info → imap_processing-0.16.0.dist-info}/entry_points.txt +0 -0
@@ -12,28 +12,34 @@ imap_swe_l1a_sci:
12
12
  <<: *instrument_base
13
13
  Data_type: L1A_SCI>Level-1A Science data
14
14
  Logical_source: imap_swe_l1a_sci
15
- Logical_source_description: SWE Instrument Level-1A Science Data
15
+ Logical_source_description: IMAP SWE Instrument Level-1A Science Data
16
16
 
17
17
  imap_swe_l1a_hk:
18
18
  <<: *instrument_base
19
19
  Data_type: L1A_HK>Level-1A Housekeeping data
20
20
  Logical_source: imap_swe_l1a_hk
21
- Logical_source_description: SWE Instrument Level-1A Housekeeping Data
21
+ Logical_source_description: IMAP SWE Instrument Level-1A Housekeeping Data
22
22
 
23
23
  imap_swe_l1a_cem-raw:
24
24
  <<: *instrument_base
25
25
  Data_type: L1A_CEM-RAW>Level-1A CEM Raw data
26
26
  Logical_source: imap_swe_l1a_cem-raw
27
- Logical_source_description: SWE Instrument Level-1A CEM Raw Data
27
+ Logical_source_description: IMAP SWE Instrument Level-1A CEM Raw Data
28
28
 
29
29
  imap_swe_l1b_sci:
30
30
  <<: *instrument_base
31
31
  Data_type: L1B_SCI>Level-1B Science data
32
32
  Logical_source: imap_swe_l1b_sci
33
- Logical_source_description: SWE Instrument Level-1B Science Data
33
+ Logical_source_description: IMAP SWE Instrument Level-1B Science Data
34
+
35
+ imap_swe_l1b_hk:
36
+ <<: *instrument_base
37
+ Data_type: L1B_HK>Level-1B Housekeeping data
38
+ Logical_source: imap_swe_l1b_hk
39
+ Logical_source_description: IMAP SWE Instrument Level-1B Housekeeping Data
34
40
 
35
41
  imap_swe_l2_sci:
36
42
  <<: *instrument_base
37
43
  Data_type: L2_SCI>Level-2 Science data
38
44
  Logical_source: imap_swe_l2_sci
39
- Logical_source_description: SWE Instrument Level-2 Science Data
45
+ Logical_source_description: IMAP SWE Instrument Level-2 Science Data
@@ -299,4 +299,26 @@ cksum:
299
299
  CATDESC: Checksum
300
300
  FIELDNAM: Checksum
301
301
  FORMAT: I5
302
- VALIDMAX: 65535
302
+ VALIDMAX: 65535
303
+
304
+ # <=== HK Variables ===>
305
+ # L1B HK data attrs for data with string values
306
+ l1b_hk_string_attrs:
307
+ CATDESC: Housekeeping derived data
308
+ FIELDNAM: Housekeeing Human Readable State
309
+ FORMAT: A80
310
+ VAR_TYPE: metadata
311
+ DEPEND_0: epoch
312
+
313
+ l1b_hk_attrs:
314
+ CATDESC: SWE HK data
315
+ FIELDNAM: SWE Housekeeping Data
316
+ LABLAXIS: Values
317
+ DEPEND_0: epoch
318
+ FILLVAL: -9223372036854775808
319
+ FORMAT: I19
320
+ UNITS: ' '
321
+ VALIDMIN: 0
322
+ VALIDMAX: 9223372036854769664
323
+ VAR_TYPE: support_data
324
+ DISPLAY_TYPE: time_series
@@ -334,6 +334,7 @@ spin_start_time:
334
334
  LABLAXIS: spin start time
335
335
  # TODO: come back to format
336
336
  UNITS: s
337
+ DEPEND_0: spin_number
337
338
 
338
339
  spin_period:
339
340
  <<: *default
@@ -341,6 +342,7 @@ spin_period:
341
342
  FIELDNAM: spin_period
342
343
  LABLAXIS: spin_period
343
344
  UNITS: s
345
+ DEPEND_0: spin_number
344
346
 
345
347
  spin_rate:
346
348
  <<: *default
@@ -348,6 +350,7 @@ spin_rate:
348
350
  FIELDNAM: spin_rate
349
351
  LABLAXIS: spin_rate
350
352
  UNITS: rpm
353
+ DEPEND_0: spin_number
351
354
 
352
355
  rate_start_pulses:
353
356
  <<: *default
@@ -424,6 +427,7 @@ quality_attitude:
424
427
  LABLAXIS: quality attitude
425
428
  # TODO: come back to format
426
429
  UNITS: " "
430
+ DEPEND_0: spin_number
427
431
 
428
432
  quality_instruments:
429
433
  <<: *default_uint16
@@ -48,7 +48,7 @@ exposure_factor:
48
48
  UNITS: seconds
49
49
 
50
50
  sensitivity:
51
- <<: *default
51
+ <<: *default_float32
52
52
  CATDESC: Calibration/sensitivity factor.
53
53
  FIELDNAM: sensitivity
54
54
  LABLAXIS: sensitivity
@@ -90,7 +90,7 @@ shcoarse:
90
90
  energy_bin_delta:
91
91
  <<: *default_float32
92
92
  CATDESC: Difference between the energy bin edges.
93
- DEPEND_0: energy_bin_geometric_mean
93
+ DEPEND_1: energy_bin_geometric_mean
94
94
  FIELDNAM: energy_bin_delta
95
95
  LABLAXIS: energy bin delta
96
96
  UNITS: keV
imap_processing/cli.py CHANGED
@@ -47,14 +47,11 @@ from imap_processing.cdf.utils import load_cdf, write_cdf
47
47
  # from imap_processing import cdf
48
48
  # In code:
49
49
  # call cdf.utils.write_cdf
50
- from imap_processing.codice import codice_l1a, codice_l1b
50
+ from imap_processing.codice import codice_l1a, codice_l1b, codice_l2
51
51
  from imap_processing.glows.l1a.glows_l1a import glows_l1a
52
52
  from imap_processing.glows.l1b.glows_l1b import glows_l1b
53
53
  from imap_processing.glows.l2.glows_l2 import glows_l2
54
- from imap_processing.hi.l1a import hi_l1a
55
- from imap_processing.hi.l1b import hi_l1b
56
- from imap_processing.hi.l1c import hi_l1c
57
- from imap_processing.hi.l2 import hi_l2
54
+ from imap_processing.hi import hi_l1a, hi_l1b, hi_l1c, hi_l2
58
55
  from imap_processing.hit.l1a.hit_l1a import hit_l1a
59
56
  from imap_processing.hit.l1b.hit_l1b import hit_l1b
60
57
  from imap_processing.hit.l2.hit_l2 import hit_l2
@@ -66,6 +63,8 @@ from imap_processing.idex.idex_l2c import idex_l2c
66
63
  from imap_processing.lo.l1a import lo_l1a
67
64
  from imap_processing.lo.l1b import lo_l1b
68
65
  from imap_processing.lo.l1c import lo_l1c
66
+ from imap_processing.lo.l2 import lo_l2
67
+ from imap_processing.mag.constants import DataMode
69
68
  from imap_processing.mag.l1a.mag_l1a import mag_l1a
70
69
  from imap_processing.mag.l1b.mag_l1b import mag_l1b
71
70
  from imap_processing.mag.l1c.mag_l1c import mag_l1c
@@ -77,6 +76,7 @@ from imap_processing.swapi.l2.swapi_l2 import swapi_l2
77
76
  from imap_processing.swapi.swapi_utils import read_swapi_lut_table
78
77
  from imap_processing.swe.l1a.swe_l1a import swe_l1a
79
78
  from imap_processing.swe.l1b.swe_l1b import swe_l1b
79
+ from imap_processing.swe.l2.swe_l2 import swe_l2
80
80
  from imap_processing.ultra.l1a import ultra_l1a
81
81
  from imap_processing.ultra.l1b import ultra_l1b
82
82
  from imap_processing.ultra.l1c import ultra_l1c
@@ -416,6 +416,7 @@ class ProcessInstrument(ABC):
416
416
  repointing=file_path.repointing,
417
417
  version=file_path.version,
418
418
  extension="cdf",
419
+ table="science",
419
420
  )
420
421
  if existing_file:
421
422
  raise ProcessInstrument.ImapFileExistsError(
@@ -610,27 +611,34 @@ class Codice(ProcessInstrument):
610
611
  print(f"Processing CoDICE {self.data_level}")
611
612
  datasets: list[xr.Dataset] = []
612
613
 
613
- dependency_list = dependencies.processing_input
614
614
  if self.data_level == "l1a":
615
- if len(dependency_list) > 1:
615
+ science_files = dependencies.get_file_paths(source="codice")
616
+ if len(science_files) != 1:
616
617
  raise ValueError(
617
- f"Unexpected dependencies found for CoDICE L1a:"
618
- f"{dependency_list}. Expected only one dependency."
618
+ f"CoDICE L1A requires exactly one input science file, received: "
619
+ f"{science_files}."
619
620
  )
620
621
  # process data
621
- science_files = dependencies.get_file_paths(source="codice")
622
622
  datasets = codice_l1a.process_codice_l1a(science_files[0])
623
623
 
624
624
  if self.data_level == "l1b":
625
- if len(dependency_list) > 1:
625
+ science_files = dependencies.get_file_paths(source="codice")
626
+ if len(science_files) != 1:
626
627
  raise ValueError(
627
- f"Unexpected dependencies found for CoDICE L1b:"
628
- f"{dependency_list}. Expected only one dependency."
628
+ f"CoDICE L1B requires exactly one input science file, received: "
629
+ f"{science_files}."
629
630
  )
630
631
  # process data
632
+ datasets = [codice_l1b.process_codice_l1b(science_files[0])]
633
+
634
+ if self.data_level == "l2":
631
635
  science_files = dependencies.get_file_paths(source="codice")
632
- dependency = load_cdf(science_files[0])
633
- datasets = [codice_l1b.process_codice_l1b(dependency)]
636
+ if len(science_files) != 1:
637
+ raise ValueError(
638
+ f"CoDICE L2 requires exactly one input science file, received: "
639
+ f"{science_files}."
640
+ )
641
+ datasets = [codice_l2.process_codice_l2(science_files[0])]
634
642
 
635
643
  return datasets
636
644
 
@@ -657,33 +665,32 @@ class Glows(ProcessInstrument):
657
665
  print(f"Processing GLOWS {self.data_level}")
658
666
  datasets: list[xr.Dataset] = []
659
667
 
660
- dependency_list = dependencies.processing_input
661
668
  if self.data_level == "l1a":
662
- if len(dependency_list) > 1:
669
+ science_files = dependencies.get_file_paths(source="glows")
670
+ if len(science_files) != 1:
663
671
  raise ValueError(
664
- f"Unexpected dependencies found for GLOWS L1A:"
665
- f"{dependency_list}. Expected only one input dependency."
672
+ f"GLOWS L1A requires exactly one input science file, received: "
673
+ f"{science_files}."
666
674
  )
667
- science_files = dependencies.get_file_paths(source="glows")
668
675
  datasets = glows_l1a(science_files[0])
669
676
 
670
677
  if self.data_level == "l1b":
671
- if len(dependency_list) > 1:
678
+ science_files = dependencies.get_file_paths(source="glows")
679
+ if len(science_files) != 1:
672
680
  raise ValueError(
673
- f"Unexpected dependencies found for GLOWS L1B:"
674
- f"{dependency_list}. Expected at least one input dependency."
681
+ f"GLOWS L1A requires exactly one input science file, received: "
682
+ f"{science_files}."
675
683
  )
676
- science_files = dependencies.get_file_paths(source="glows")
677
684
  input_dataset = load_cdf(science_files[0])
678
685
  datasets = [glows_l1b(input_dataset)]
679
686
 
680
687
  if self.data_level == "l2":
681
- if len(dependency_list) > 1:
688
+ science_files = dependencies.get_file_paths(source="glows")
689
+ if len(science_files) != 1:
682
690
  raise ValueError(
683
- f"Unexpected dependencies found for GLOWS L2:"
684
- f"{dependency_list}. Expected only one input dependency."
691
+ f"GLOWS L1A requires exactly one input science file, received: "
692
+ f"{science_files}."
685
693
  )
686
- science_files = dependencies.get_file_paths(source="glows")
687
694
  input_dataset = load_cdf(science_files[0])
688
695
  datasets = glows_l2(input_dataset)
689
696
 
@@ -781,25 +788,23 @@ class Hit(ProcessInstrument):
781
788
 
782
789
  dependency_list = dependencies.processing_input
783
790
  if self.data_level == "l1a":
784
- if len(dependency_list) > 1:
791
+ # 1 science files and 2 spice files
792
+ if len(dependency_list) > 3:
785
793
  raise ValueError(
786
794
  f"Unexpected dependencies found for HIT L1A:"
787
795
  f"{dependency_list}. Expected only one dependency."
788
796
  )
789
797
  # process data to L1A products
790
- science_files = dependencies.get_file_paths(source="hit")
798
+ science_files = dependencies.get_file_paths(source="hit", descriptor="raw")
791
799
  datasets = hit_l1a(science_files[0])
792
800
 
793
801
  elif self.data_level == "l1b":
794
- if len(dependency_list) > 1:
795
- raise ValueError(
796
- f"Unexpected dependencies found for HIT L1B:"
797
- f"{dependency_list}. Expected only one dependency."
798
- )
799
802
  data_dict = {}
800
- # TODO: Check this and update with new features as needed.
803
+ # TODO: Sean removed the file number error handling to work with the
804
+ # new SPICE dependencies for SIT-4. Need to review and make changes
805
+ # if needed.
801
806
  l0_files = dependencies.get_file_paths(source="hit", descriptor="raw")
802
- l1a_files = dependencies.get_file_paths(source="hit")
807
+ l1a_files = dependencies.get_file_paths(source="hit", data_type="l1a")
803
808
  if len(l0_files) > 0:
804
809
  # Add path to CCSDS file to process housekeeping
805
810
  data_dict["imap_hit_l0_raw"] = l0_files[0]
@@ -938,42 +943,50 @@ class Lo(ProcessInstrument):
938
943
  """
939
944
  print(f"Processing IMAP-Lo {self.data_level}")
940
945
  datasets: list[xr.Dataset] = []
941
- dependency_list = dependencies.processing_input
942
946
  if self.data_level == "l1a":
943
947
  # L1A packet / products are 1 to 1. Should only have
944
948
  # one dependency file
945
- if len(dependency_list) > 1:
949
+ science_files = dependencies.get_file_paths(source="lo", data_type="l0")
950
+ if len(science_files) > 1:
946
951
  raise ValueError(
947
952
  f"Unexpected dependencies found for IMAP-Lo L1A:"
948
- f"{dependency_list}. Expected only one dependency."
953
+ f"{science_files}. Expected only one dependency."
949
954
  )
950
- science_files = dependencies.get_file_paths(source="lo")
951
955
  datasets = lo_l1a.lo_l1a(science_files[0])
952
956
 
953
957
  elif self.data_level == "l1b":
954
958
  data_dict = {}
955
- # TODO: Check this and update with new features as needed.
956
- for input_type in dependencies.processing_input:
957
- science_files = dependencies.get_file_paths(
958
- source="lo", descriptor=input_type.descriptor
959
- )
960
- dataset = load_cdf(science_files[0])
959
+ science_files = dependencies.get_file_paths(source="lo", data_type="l1a")
960
+ logger.info(f"Science files for L1B: {science_files}")
961
+ for file in science_files:
962
+ dataset = load_cdf(file)
961
963
  data_dict[dataset.attrs["Logical_source"]] = dataset
962
964
  datasets = lo_l1b.lo_l1b(data_dict)
963
965
 
964
966
  elif self.data_level == "l1c":
965
967
  data_dict = {}
966
- for input_type in dependencies.processing_input:
967
- science_files = dependencies.get_file_paths(
968
- source="lo", descriptor=input_type.descriptor
969
- )
970
- dataset = load_cdf(science_files[0])
968
+ anc_dependencies: list = dependencies.get_file_paths(
969
+ source="lo", descriptor="goodtimes"
970
+ )
971
+ science_files = dependencies.get_file_paths(source="lo", descriptor="de")
972
+ for file in science_files:
973
+ dataset = load_cdf(file)
971
974
  data_dict[dataset.attrs["Logical_source"]] = dataset
972
- # TODO: add dependencies to S3 and dependency tree
973
- # setting to empty for now
974
- anc_depedencies: list = []
975
- datasets = lo_l1c.lo_l1c(data_dict, anc_depedencies)
975
+ datasets = lo_l1c.lo_l1c(data_dict, anc_dependencies)
976
976
 
977
+ elif self.data_level == "l2":
978
+ data_dict = {}
979
+ # TODO: Add ancillary descriptors when maps using them are
980
+ # implemented.
981
+ anc_dependencies = dependencies.get_file_paths(
982
+ source="lo",
983
+ )
984
+ science_files = dependencies.get_file_paths(source="lo", descriptor="pset")
985
+ psets = []
986
+ for file in science_files:
987
+ psets.append(load_cdf(file))
988
+ data_dict[psets[0].attrs["Logical_source"]] = psets
989
+ datasets = lo_l2.lo_l2(data_dict, anc_dependencies)
977
990
  return datasets
978
991
 
979
992
 
@@ -1071,12 +1084,14 @@ class Mag(ProcessInstrument):
1071
1084
  # TODO: Ensure that parent_files attribute works with that
1072
1085
  input_data = load_cdf(science_files[0])
1073
1086
 
1087
+ descriptor_no_frame = str.split(self.descriptor, "-")[0]
1088
+
1074
1089
  # We expect either a norm or a burst input descriptor.
1075
- offsets_desc = f"l2-offsets-{self.descriptor}"
1090
+ offsets_desc = f"l2-{descriptor_no_frame}-offsets"
1076
1091
  offsets = dependencies.get_processing_inputs(descriptor=offsets_desc)
1077
1092
 
1078
1093
  calibration = dependencies.get_processing_inputs(
1079
- descriptor="l2-calibration-matrices"
1094
+ descriptor="l2-calibration"
1080
1095
  )
1081
1096
 
1082
1097
  if (
@@ -1104,6 +1119,7 @@ class Mag(ProcessInstrument):
1104
1119
  offset_dataset,
1105
1120
  input_data,
1106
1121
  current_day,
1122
+ mode=DataMode(descriptor_no_frame.upper()),
1107
1123
  )
1108
1124
 
1109
1125
  return datasets
@@ -1254,17 +1270,20 @@ class Swe(ProcessInstrument):
1254
1270
  raise ValueError(
1255
1271
  f"Unexpected dependencies found for SWE L1A:"
1256
1272
  f"{dependency_list}. Expected only two dependencies."
1273
+ "L0 data and time kernels."
1257
1274
  )
1258
1275
  science_files = dependencies.get_file_paths(source="swe")
1259
1276
  datasets = swe_l1a(str(science_files[0]))
1260
1277
  # Right now, we only process science data. Therefore,
1261
1278
  # we expect only one dataset to be returned.
1262
1279
 
1263
- elif self.data_level == "l1b":
1264
- if len(dependency_list) != 4:
1280
+ elif self.data_level == "l1b" and self.descriptor == "sci":
1281
+ if len(dependency_list) != 5:
1265
1282
  raise ValueError(
1266
1283
  f"Unexpected dependencies found for SWE L1B:"
1267
- f"{dependency_list}. Expected exactly four dependencies."
1284
+ f"{dependency_list}. Expected exactly five dependencies."
1285
+ "L1A science, in-fligth cal, esa LUT, EU conversion and "
1286
+ "time kernels."
1268
1287
  )
1269
1288
 
1270
1289
  science_files = dependencies.get_file_paths("swe", "sci")
@@ -1274,6 +1293,30 @@ class Swe(ProcessInstrument):
1274
1293
  )
1275
1294
 
1276
1295
  datasets = swe_l1b(dependencies)
1296
+ elif self.data_level == "l1b" and self.descriptor == "hk":
1297
+ if len(dependency_list) != 2:
1298
+ raise ValueError(
1299
+ f"Unexpected dependencies found for SWE L1B HK:"
1300
+ f"{dependency_list}. Expected exactly two dependencies."
1301
+ "L0 data and time kernels."
1302
+ )
1303
+ # process data
1304
+ datasets = swe_l1b(dependencies)
1305
+ elif self.data_level == "l2":
1306
+ if len(dependency_list) != 2:
1307
+ raise ValueError(
1308
+ f"Unexpected dependencies found for SWE L2:"
1309
+ f"{dependency_list}. Expected exactly two dependencies."
1310
+ "L1B science and spin data."
1311
+ )
1312
+ # process data
1313
+ science_files = dependencies.get_file_paths(source="swe", descriptor="sci")
1314
+ if len(science_files) > 1:
1315
+ raise ValueError(
1316
+ "Multiple science files processing is not supported for SWE L2."
1317
+ )
1318
+ l1b_datasets = load_cdf(science_files[0])
1319
+ datasets = [swe_l2(l1b_datasets)]
1277
1320
  else:
1278
1321
  print("Did not recognize data level. No processing done.")
1279
1322
 
@@ -1302,31 +1345,53 @@ class Ultra(ProcessInstrument):
1302
1345
  print(f"Processing IMAP-Ultra {self.data_level}")
1303
1346
  datasets: list[xr.Dataset] = []
1304
1347
 
1305
- dependency_list = dependencies.processing_input
1306
1348
  if self.data_level == "l1a":
1307
- # File path is expected output file path
1308
- if len(dependency_list) > 1:
1349
+ science_files = dependencies.get_file_paths(source="ultra")
1350
+ if len(science_files) != 1:
1309
1351
  raise ValueError(
1310
- f"Unexpected dependencies found for ULTRA L1A:"
1311
- f"{dependency_list}. Expected only one dependency."
1352
+ f"Unexpected science_files found for ULTRA L1A:"
1353
+ f"{science_files}. Expected only one dependency."
1312
1354
  )
1313
- science_files = dependencies.get_file_paths(source="ultra")
1314
1355
  datasets = ultra_l1a.ultra_l1a(science_files[0])
1315
-
1316
1356
  elif self.data_level == "l1b":
1317
- data_dict = {}
1318
- for dep in dependency_list:
1319
- dataset = load_cdf(dep.imap_file_paths[0])
1320
- data_dict[dataset.attrs["Logical_source"]] = dataset
1321
- datasets = ultra_l1b.ultra_l1b(data_dict)
1322
-
1357
+ science_files = dependencies.get_file_paths(source="ultra", data_type="l1a")
1358
+ l1a_dict = {
1359
+ dataset.attrs["Logical_source"]: dataset
1360
+ for dataset in [load_cdf(sci_file) for sci_file in science_files]
1361
+ }
1362
+ science_files = dependencies.get_file_paths(source="ultra", data_type="l1b")
1363
+ l1b_dict = {
1364
+ dataset.attrs["Logical_source"]: dataset
1365
+ for dataset in [load_cdf(sci_file) for sci_file in science_files]
1366
+ }
1367
+ combined = {**l1a_dict, **l1b_dict}
1368
+ anc_paths = dependencies.get_file_paths(data_type="ancillary")
1369
+ ancillary_files = {}
1370
+ for path in anc_paths:
1371
+ ancillary_files[path.stem.split("_")[2]] = path
1372
+ datasets = ultra_l1b.ultra_l1b(combined, ancillary_files)
1323
1373
  elif self.data_level == "l1c":
1324
- data_dict = {}
1325
- for dep in dependency_list:
1326
- dataset = load_cdf(dep.imap_file_paths[0])
1327
- data_dict[dataset.attrs["Logical_source"]] = dataset
1328
- datasets = ultra_l1c.ultra_l1c(data_dict)
1329
-
1374
+ science_files = dependencies.get_file_paths(source="ultra", data_type="l1a")
1375
+ l1a_dict = {
1376
+ dataset.attrs["Logical_source"]: dataset
1377
+ for dataset in [load_cdf(sci_file) for sci_file in science_files]
1378
+ }
1379
+ science_files = dependencies.get_file_paths(source="ultra", data_type="l1b")
1380
+ l1b_dict = {
1381
+ dataset.attrs["Logical_source"]: dataset
1382
+ for dataset in [load_cdf(sci_file) for sci_file in science_files]
1383
+ }
1384
+ combined = {**l1a_dict, **l1b_dict}
1385
+ anc_paths = dependencies.get_file_paths(data_type="ancillary")
1386
+ ancillary_files = {}
1387
+ for path in anc_paths:
1388
+ ancillary_files[path.stem.split("_")[2]] = path
1389
+ spice_paths = dependencies.get_file_paths(data_type="spice")
1390
+ if spice_paths:
1391
+ has_spice = True
1392
+ else:
1393
+ has_spice = False
1394
+ datasets = ultra_l1c.ultra_l1c(combined, ancillary_files, has_spice)
1330
1395
  elif self.data_level == "l2":
1331
1396
  all_pset_filepaths = dependencies.get_file_paths(
1332
1397
  source="ultra", descriptor="pset"