imap-processing 0.14.0__py3-none-any.whl → 0.15.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of imap-processing might be problematic. Click here for more details.

Files changed (61) hide show
  1. imap_processing/_version.py +2 -2
  2. imap_processing/cdf/config/imap_codice_global_cdf_attrs.yaml +60 -35
  3. imap_processing/cdf/config/imap_codice_l1a_variable_attrs.yaml +765 -287
  4. imap_processing/cdf/config/imap_codice_l1b_variable_attrs.yaml +1577 -288
  5. imap_processing/cdf/config/imap_codice_l2_variable_attrs.yaml +1004 -0
  6. imap_processing/cdf/config/imap_enamaps_l2-common_variable_attrs.yaml +28 -0
  7. imap_processing/cdf/config/imap_enamaps_l2-healpix_variable_attrs.yaml +1 -1
  8. imap_processing/cdf/config/imap_enamaps_l2-rectangular_variable_attrs.yaml +18 -0
  9. imap_processing/cdf/config/imap_glows_l2_variable_attrs.yaml +39 -3
  10. imap_processing/cdf/config/imap_ialirt_global_cdf_attrs.yaml +18 -0
  11. imap_processing/cdf/config/imap_ialirt_l1_variable_attrs.yaml +370 -0
  12. imap_processing/cdf/config/imap_idex_l1a_variable_attrs.yaml +7 -0
  13. imap_processing/cdf/config/imap_idex_l1b_variable_attrs.yaml +11 -0
  14. imap_processing/cdf/config/imap_idex_l2a_variable_attrs.yaml +4 -0
  15. imap_processing/cdf/config/imap_idex_l2c_variable_attrs.yaml +7 -3
  16. imap_processing/cdf/config/imap_lo_global_cdf_attrs.yaml +6 -0
  17. imap_processing/cdf/config/imap_mag_l2_variable_attrs.yaml +114 -0
  18. imap_processing/cdf/config/imap_swe_global_cdf_attrs.yaml +11 -5
  19. imap_processing/cdf/config/imap_swe_l1b_variable_attrs.yaml +23 -1
  20. imap_processing/cdf/config/imap_ultra_l1b_variable_attrs.yaml +4 -0
  21. imap_processing/cdf/config/imap_ultra_l1c_variable_attrs.yaml +2 -2
  22. imap_processing/cli.py +144 -76
  23. imap_processing/codice/codice_l1a.py +53 -22
  24. imap_processing/codice/codice_l1b.py +91 -18
  25. imap_processing/codice/codice_l2.py +89 -0
  26. imap_processing/codice/constants.py +62 -5
  27. imap_processing/ena_maps/ena_maps.py +43 -1
  28. imap_processing/glows/l2/glows_l2_data.py +3 -6
  29. imap_processing/ialirt/l0/process_swe.py +2 -2
  30. imap_processing/ialirt/utils/constants.py +48 -0
  31. imap_processing/ialirt/utils/create_xarray.py +87 -0
  32. imap_processing/idex/idex_l2c.py +9 -9
  33. imap_processing/lo/l1b/lo_l1b.py +6 -1
  34. imap_processing/lo/l1c/lo_l1c.py +22 -13
  35. imap_processing/lo/l2/lo_l2.py +213 -0
  36. imap_processing/mag/l1c/mag_l1c.py +8 -1
  37. imap_processing/mag/l2/mag_l2.py +6 -2
  38. imap_processing/mag/l2/mag_l2_data.py +7 -5
  39. imap_processing/swe/l1a/swe_l1a.py +6 -6
  40. imap_processing/swe/l1b/swe_l1b.py +70 -11
  41. imap_processing/ultra/l0/decom_ultra.py +1 -1
  42. imap_processing/ultra/l0/ultra_utils.py +0 -4
  43. imap_processing/ultra/l1b/badtimes.py +7 -3
  44. imap_processing/ultra/l1b/cullingmask.py +7 -2
  45. imap_processing/ultra/l1b/de.py +26 -12
  46. imap_processing/ultra/l1b/lookup_utils.py +8 -7
  47. imap_processing/ultra/l1b/ultra_l1b.py +59 -48
  48. imap_processing/ultra/l1b/ultra_l1b_culling.py +50 -18
  49. imap_processing/ultra/l1b/ultra_l1b_extended.py +4 -4
  50. imap_processing/ultra/l1c/helio_pset.py +53 -0
  51. imap_processing/ultra/l1c/spacecraft_pset.py +20 -12
  52. imap_processing/ultra/l1c/ultra_l1c.py +49 -26
  53. imap_processing/ultra/l1c/ultra_l1c_pset_bins.py +40 -2
  54. imap_processing/ultra/l2/ultra_l2.py +47 -2
  55. imap_processing/ultra/lookup_tables/Angular_Profiles_FM90_RightSlit.csv +524 -526
  56. imap_processing/ultra/utils/ultra_l1_utils.py +47 -8
  57. {imap_processing-0.14.0.dist-info → imap_processing-0.15.0.dist-info}/METADATA +2 -2
  58. {imap_processing-0.14.0.dist-info → imap_processing-0.15.0.dist-info}/RECORD +61 -52
  59. {imap_processing-0.14.0.dist-info → imap_processing-0.15.0.dist-info}/LICENSE +0 -0
  60. {imap_processing-0.14.0.dist-info → imap_processing-0.15.0.dist-info}/WHEEL +0 -0
  61. {imap_processing-0.14.0.dist-info → imap_processing-0.15.0.dist-info}/entry_points.txt +0 -0
@@ -12,28 +12,34 @@ imap_swe_l1a_sci:
12
12
  <<: *instrument_base
13
13
  Data_type: L1A_SCI>Level-1A Science data
14
14
  Logical_source: imap_swe_l1a_sci
15
- Logical_source_description: SWE Instrument Level-1A Science Data
15
+ Logical_source_description: IMAP SWE Instrument Level-1A Science Data
16
16
 
17
17
  imap_swe_l1a_hk:
18
18
  <<: *instrument_base
19
19
  Data_type: L1A_HK>Level-1A Housekeeping data
20
20
  Logical_source: imap_swe_l1a_hk
21
- Logical_source_description: SWE Instrument Level-1A Housekeeping Data
21
+ Logical_source_description: IMAP SWE Instrument Level-1A Housekeeping Data
22
22
 
23
23
  imap_swe_l1a_cem-raw:
24
24
  <<: *instrument_base
25
25
  Data_type: L1A_CEM-RAW>Level-1A CEM Raw data
26
26
  Logical_source: imap_swe_l1a_cem-raw
27
- Logical_source_description: SWE Instrument Level-1A CEM Raw Data
27
+ Logical_source_description: IMAP SWE Instrument Level-1A CEM Raw Data
28
28
 
29
29
  imap_swe_l1b_sci:
30
30
  <<: *instrument_base
31
31
  Data_type: L1B_SCI>Level-1B Science data
32
32
  Logical_source: imap_swe_l1b_sci
33
- Logical_source_description: SWE Instrument Level-1B Science Data
33
+ Logical_source_description: IMAP SWE Instrument Level-1B Science Data
34
+
35
+ imap_swe_l1b_hk:
36
+ <<: *instrument_base
37
+ Data_type: L1B_HK>Level-1B Housekeeping data
38
+ Logical_source: imap_swe_l1b_hk
39
+ Logical_source_description: IMAP SWE Instrument Level-1B Housekeeping Data
34
40
 
35
41
  imap_swe_l2_sci:
36
42
  <<: *instrument_base
37
43
  Data_type: L2_SCI>Level-2 Science data
38
44
  Logical_source: imap_swe_l2_sci
39
- Logical_source_description: SWE Instrument Level-2 Science Data
45
+ Logical_source_description: IMAP SWE Instrument Level-2 Science Data
@@ -299,4 +299,26 @@ cksum:
299
299
  CATDESC: Checksum
300
300
  FIELDNAM: Checksum
301
301
  FORMAT: I5
302
- VALIDMAX: 65535
302
+ VALIDMAX: 65535
303
+
304
+ # <=== HK Variables ===>
305
+ # L1B HK data attrs for data with string values
306
+ l1b_hk_string_attrs:
307
+ CATDESC: Housekeeping derived data
308
+ FIELDNAM: Housekeeing Human Readable State
309
+ FORMAT: A80
310
+ VAR_TYPE: metadata
311
+ DEPEND_0: epoch
312
+
313
+ l1b_hk_attrs:
314
+ CATDESC: SWE HK data
315
+ FIELDNAM: SWE Housekeeping Data
316
+ LABLAXIS: Values
317
+ DEPEND_0: epoch
318
+ FILLVAL: -9223372036854775808
319
+ FORMAT: I19
320
+ UNITS: ' '
321
+ VALIDMIN: 0
322
+ VALIDMAX: 9223372036854769664
323
+ VAR_TYPE: support_data
324
+ DISPLAY_TYPE: time_series
@@ -334,6 +334,7 @@ spin_start_time:
334
334
  LABLAXIS: spin start time
335
335
  # TODO: come back to format
336
336
  UNITS: s
337
+ DEPEND_0: spin_number
337
338
 
338
339
  spin_period:
339
340
  <<: *default
@@ -341,6 +342,7 @@ spin_period:
341
342
  FIELDNAM: spin_period
342
343
  LABLAXIS: spin_period
343
344
  UNITS: s
345
+ DEPEND_0: spin_number
344
346
 
345
347
  spin_rate:
346
348
  <<: *default
@@ -348,6 +350,7 @@ spin_rate:
348
350
  FIELDNAM: spin_rate
349
351
  LABLAXIS: spin_rate
350
352
  UNITS: rpm
353
+ DEPEND_0: spin_number
351
354
 
352
355
  rate_start_pulses:
353
356
  <<: *default
@@ -424,6 +427,7 @@ quality_attitude:
424
427
  LABLAXIS: quality attitude
425
428
  # TODO: come back to format
426
429
  UNITS: " "
430
+ DEPEND_0: spin_number
427
431
 
428
432
  quality_instruments:
429
433
  <<: *default_uint16
@@ -48,7 +48,7 @@ exposure_factor:
48
48
  UNITS: seconds
49
49
 
50
50
  sensitivity:
51
- <<: *default
51
+ <<: *default_float32
52
52
  CATDESC: Calibration/sensitivity factor.
53
53
  FIELDNAM: sensitivity
54
54
  LABLAXIS: sensitivity
@@ -90,7 +90,7 @@ shcoarse:
90
90
  energy_bin_delta:
91
91
  <<: *default_float32
92
92
  CATDESC: Difference between the energy bin edges.
93
- DEPEND_0: energy_bin_geometric_mean
93
+ DEPEND_1: energy_bin_geometric_mean
94
94
  FIELDNAM: energy_bin_delta
95
95
  LABLAXIS: energy bin delta
96
96
  UNITS: keV
imap_processing/cli.py CHANGED
@@ -47,7 +47,7 @@ from imap_processing.cdf.utils import load_cdf, write_cdf
47
47
  # from imap_processing import cdf
48
48
  # In code:
49
49
  # call cdf.utils.write_cdf
50
- from imap_processing.codice import codice_l1a, codice_l1b
50
+ from imap_processing.codice import codice_l1a, codice_l1b, codice_l2
51
51
  from imap_processing.glows.l1a.glows_l1a import glows_l1a
52
52
  from imap_processing.glows.l1b.glows_l1b import glows_l1b
53
53
  from imap_processing.glows.l2.glows_l2 import glows_l2
@@ -66,6 +66,8 @@ from imap_processing.idex.idex_l2c import idex_l2c
66
66
  from imap_processing.lo.l1a import lo_l1a
67
67
  from imap_processing.lo.l1b import lo_l1b
68
68
  from imap_processing.lo.l1c import lo_l1c
69
+ from imap_processing.lo.l2 import lo_l2
70
+ from imap_processing.mag.constants import DataMode
69
71
  from imap_processing.mag.l1a.mag_l1a import mag_l1a
70
72
  from imap_processing.mag.l1b.mag_l1b import mag_l1b
71
73
  from imap_processing.mag.l1c.mag_l1c import mag_l1c
@@ -77,6 +79,7 @@ from imap_processing.swapi.l2.swapi_l2 import swapi_l2
77
79
  from imap_processing.swapi.swapi_utils import read_swapi_lut_table
78
80
  from imap_processing.swe.l1a.swe_l1a import swe_l1a
79
81
  from imap_processing.swe.l1b.swe_l1b import swe_l1b
82
+ from imap_processing.swe.l2.swe_l2 import swe_l2
80
83
  from imap_processing.ultra.l1a import ultra_l1a
81
84
  from imap_processing.ultra.l1b import ultra_l1b
82
85
  from imap_processing.ultra.l1c import ultra_l1c
@@ -416,6 +419,7 @@ class ProcessInstrument(ABC):
416
419
  repointing=file_path.repointing,
417
420
  version=file_path.version,
418
421
  extension="cdf",
422
+ table="science",
419
423
  )
420
424
  if existing_file:
421
425
  raise ProcessInstrument.ImapFileExistsError(
@@ -610,27 +614,34 @@ class Codice(ProcessInstrument):
610
614
  print(f"Processing CoDICE {self.data_level}")
611
615
  datasets: list[xr.Dataset] = []
612
616
 
613
- dependency_list = dependencies.processing_input
614
617
  if self.data_level == "l1a":
615
- if len(dependency_list) > 1:
618
+ science_files = dependencies.get_file_paths(source="codice")
619
+ if len(science_files) != 1:
616
620
  raise ValueError(
617
- f"Unexpected dependencies found for CoDICE L1a:"
618
- f"{dependency_list}. Expected only one dependency."
621
+ f"CoDICE L1A requires exactly one input science file, received: "
622
+ f"{science_files}."
619
623
  )
620
624
  # process data
621
- science_files = dependencies.get_file_paths(source="codice")
622
625
  datasets = codice_l1a.process_codice_l1a(science_files[0])
623
626
 
624
627
  if self.data_level == "l1b":
625
- if len(dependency_list) > 1:
628
+ science_files = dependencies.get_file_paths(source="codice")
629
+ if len(science_files) != 1:
626
630
  raise ValueError(
627
- f"Unexpected dependencies found for CoDICE L1b:"
628
- f"{dependency_list}. Expected only one dependency."
631
+ f"CoDICE L1B requires exactly one input science file, received: "
632
+ f"{science_files}."
629
633
  )
630
634
  # process data
635
+ datasets = [codice_l1b.process_codice_l1b(science_files[0])]
636
+
637
+ if self.data_level == "l2":
631
638
  science_files = dependencies.get_file_paths(source="codice")
632
- dependency = load_cdf(science_files[0])
633
- datasets = [codice_l1b.process_codice_l1b(dependency)]
639
+ if len(science_files) != 1:
640
+ raise ValueError(
641
+ f"CoDICE L2 requires exactly one input science file, received: "
642
+ f"{science_files}."
643
+ )
644
+ datasets = [codice_l2.process_codice_l2(science_files[0])]
634
645
 
635
646
  return datasets
636
647
 
@@ -657,33 +668,32 @@ class Glows(ProcessInstrument):
657
668
  print(f"Processing GLOWS {self.data_level}")
658
669
  datasets: list[xr.Dataset] = []
659
670
 
660
- dependency_list = dependencies.processing_input
661
671
  if self.data_level == "l1a":
662
- if len(dependency_list) > 1:
672
+ science_files = dependencies.get_file_paths(source="glows")
673
+ if len(science_files) != 1:
663
674
  raise ValueError(
664
- f"Unexpected dependencies found for GLOWS L1A:"
665
- f"{dependency_list}. Expected only one input dependency."
675
+ f"GLOWS L1A requires exactly one input science file, received: "
676
+ f"{science_files}."
666
677
  )
667
- science_files = dependencies.get_file_paths(source="glows")
668
678
  datasets = glows_l1a(science_files[0])
669
679
 
670
680
  if self.data_level == "l1b":
671
- if len(dependency_list) > 1:
681
+ science_files = dependencies.get_file_paths(source="glows")
682
+ if len(science_files) != 1:
672
683
  raise ValueError(
673
- f"Unexpected dependencies found for GLOWS L1B:"
674
- f"{dependency_list}. Expected at least one input dependency."
684
+ f"GLOWS L1A requires exactly one input science file, received: "
685
+ f"{science_files}."
675
686
  )
676
- science_files = dependencies.get_file_paths(source="glows")
677
687
  input_dataset = load_cdf(science_files[0])
678
688
  datasets = [glows_l1b(input_dataset)]
679
689
 
680
690
  if self.data_level == "l2":
681
- if len(dependency_list) > 1:
691
+ science_files = dependencies.get_file_paths(source="glows")
692
+ if len(science_files) != 1:
682
693
  raise ValueError(
683
- f"Unexpected dependencies found for GLOWS L2:"
684
- f"{dependency_list}. Expected only one input dependency."
694
+ f"GLOWS L1A requires exactly one input science file, received: "
695
+ f"{science_files}."
685
696
  )
686
- science_files = dependencies.get_file_paths(source="glows")
687
697
  input_dataset = load_cdf(science_files[0])
688
698
  datasets = glows_l2(input_dataset)
689
699
 
@@ -781,25 +791,23 @@ class Hit(ProcessInstrument):
781
791
 
782
792
  dependency_list = dependencies.processing_input
783
793
  if self.data_level == "l1a":
784
- if len(dependency_list) > 1:
794
+ # 1 science files and 2 spice files
795
+ if len(dependency_list) > 3:
785
796
  raise ValueError(
786
797
  f"Unexpected dependencies found for HIT L1A:"
787
798
  f"{dependency_list}. Expected only one dependency."
788
799
  )
789
800
  # process data to L1A products
790
- science_files = dependencies.get_file_paths(source="hit")
801
+ science_files = dependencies.get_file_paths(source="hit", descriptor="raw")
791
802
  datasets = hit_l1a(science_files[0])
792
803
 
793
804
  elif self.data_level == "l1b":
794
- if len(dependency_list) > 1:
795
- raise ValueError(
796
- f"Unexpected dependencies found for HIT L1B:"
797
- f"{dependency_list}. Expected only one dependency."
798
- )
799
805
  data_dict = {}
800
- # TODO: Check this and update with new features as needed.
806
+ # TODO: Sean removed the file number error handling to work with the
807
+ # new SPICE dependencies for SIT-4. Need to review and make changes
808
+ # if needed.
801
809
  l0_files = dependencies.get_file_paths(source="hit", descriptor="raw")
802
- l1a_files = dependencies.get_file_paths(source="hit")
810
+ l1a_files = dependencies.get_file_paths(source="hit", data_type="l1a")
803
811
  if len(l0_files) > 0:
804
812
  # Add path to CCSDS file to process housekeeping
805
813
  data_dict["imap_hit_l0_raw"] = l0_files[0]
@@ -938,42 +946,50 @@ class Lo(ProcessInstrument):
938
946
  """
939
947
  print(f"Processing IMAP-Lo {self.data_level}")
940
948
  datasets: list[xr.Dataset] = []
941
- dependency_list = dependencies.processing_input
942
949
  if self.data_level == "l1a":
943
950
  # L1A packet / products are 1 to 1. Should only have
944
951
  # one dependency file
945
- if len(dependency_list) > 1:
952
+ science_files = dependencies.get_file_paths(source="lo", data_type="l0")
953
+ if len(science_files) > 1:
946
954
  raise ValueError(
947
955
  f"Unexpected dependencies found for IMAP-Lo L1A:"
948
- f"{dependency_list}. Expected only one dependency."
956
+ f"{science_files}. Expected only one dependency."
949
957
  )
950
- science_files = dependencies.get_file_paths(source="lo")
951
958
  datasets = lo_l1a.lo_l1a(science_files[0])
952
959
 
953
960
  elif self.data_level == "l1b":
954
961
  data_dict = {}
955
- # TODO: Check this and update with new features as needed.
956
- for input_type in dependencies.processing_input:
957
- science_files = dependencies.get_file_paths(
958
- source="lo", descriptor=input_type.descriptor
959
- )
960
- dataset = load_cdf(science_files[0])
962
+ science_files = dependencies.get_file_paths(source="lo", data_type="l1a")
963
+ logger.info(f"Science files for L1B: {science_files}")
964
+ for file in science_files:
965
+ dataset = load_cdf(file)
961
966
  data_dict[dataset.attrs["Logical_source"]] = dataset
962
967
  datasets = lo_l1b.lo_l1b(data_dict)
963
968
 
964
969
  elif self.data_level == "l1c":
965
970
  data_dict = {}
966
- for input_type in dependencies.processing_input:
967
- science_files = dependencies.get_file_paths(
968
- source="lo", descriptor=input_type.descriptor
969
- )
970
- dataset = load_cdf(science_files[0])
971
+ anc_dependencies: list = dependencies.get_file_paths(
972
+ source="lo", descriptor="goodtimes"
973
+ )
974
+ science_files = dependencies.get_file_paths(source="lo", descriptor="de")
975
+ for file in science_files:
976
+ dataset = load_cdf(file)
971
977
  data_dict[dataset.attrs["Logical_source"]] = dataset
972
- # TODO: add dependencies to S3 and dependency tree
973
- # setting to empty for now
974
- anc_depedencies: list = []
975
- datasets = lo_l1c.lo_l1c(data_dict, anc_depedencies)
978
+ datasets = lo_l1c.lo_l1c(data_dict, anc_dependencies)
976
979
 
980
+ elif self.data_level == "l2":
981
+ data_dict = {}
982
+ # TODO: Add ancillary descriptors when maps using them are
983
+ # implemented.
984
+ anc_dependencies = dependencies.get_file_paths(
985
+ source="lo",
986
+ )
987
+ science_files = dependencies.get_file_paths(source="lo", descriptor="pset")
988
+ psets = []
989
+ for file in science_files:
990
+ psets.append(load_cdf(file))
991
+ data_dict[psets[0].attrs["Logical_source"]] = psets
992
+ datasets = lo_l2.lo_l2(data_dict, anc_dependencies)
977
993
  return datasets
978
994
 
979
995
 
@@ -1071,12 +1087,14 @@ class Mag(ProcessInstrument):
1071
1087
  # TODO: Ensure that parent_files attribute works with that
1072
1088
  input_data = load_cdf(science_files[0])
1073
1089
 
1090
+ descriptor_no_frame = str.split(self.descriptor, "-")[0]
1091
+
1074
1092
  # We expect either a norm or a burst input descriptor.
1075
- offsets_desc = f"l2-offsets-{self.descriptor}"
1093
+ offsets_desc = f"l2-{descriptor_no_frame}-offsets"
1076
1094
  offsets = dependencies.get_processing_inputs(descriptor=offsets_desc)
1077
1095
 
1078
1096
  calibration = dependencies.get_processing_inputs(
1079
- descriptor="l2-calibration-matrices"
1097
+ descriptor="l2-calibration"
1080
1098
  )
1081
1099
 
1082
1100
  if (
@@ -1104,6 +1122,7 @@ class Mag(ProcessInstrument):
1104
1122
  offset_dataset,
1105
1123
  input_data,
1106
1124
  current_day,
1125
+ mode=DataMode(descriptor_no_frame.upper()),
1107
1126
  )
1108
1127
 
1109
1128
  return datasets
@@ -1254,17 +1273,20 @@ class Swe(ProcessInstrument):
1254
1273
  raise ValueError(
1255
1274
  f"Unexpected dependencies found for SWE L1A:"
1256
1275
  f"{dependency_list}. Expected only two dependencies."
1276
+ "L0 data and time kernels."
1257
1277
  )
1258
1278
  science_files = dependencies.get_file_paths(source="swe")
1259
1279
  datasets = swe_l1a(str(science_files[0]))
1260
1280
  # Right now, we only process science data. Therefore,
1261
1281
  # we expect only one dataset to be returned.
1262
1282
 
1263
- elif self.data_level == "l1b":
1264
- if len(dependency_list) != 4:
1283
+ elif self.data_level == "l1b" and self.descriptor == "sci":
1284
+ if len(dependency_list) != 5:
1265
1285
  raise ValueError(
1266
1286
  f"Unexpected dependencies found for SWE L1B:"
1267
- f"{dependency_list}. Expected exactly four dependencies."
1287
+ f"{dependency_list}. Expected exactly five dependencies."
1288
+ "L1A science, in-fligth cal, esa LUT, EU conversion and "
1289
+ "time kernels."
1268
1290
  )
1269
1291
 
1270
1292
  science_files = dependencies.get_file_paths("swe", "sci")
@@ -1274,6 +1296,30 @@ class Swe(ProcessInstrument):
1274
1296
  )
1275
1297
 
1276
1298
  datasets = swe_l1b(dependencies)
1299
+ elif self.data_level == "l1b" and self.descriptor == "hk":
1300
+ if len(dependency_list) != 2:
1301
+ raise ValueError(
1302
+ f"Unexpected dependencies found for SWE L1B HK:"
1303
+ f"{dependency_list}. Expected exactly two dependencies."
1304
+ "L0 data and time kernels."
1305
+ )
1306
+ # process data
1307
+ datasets = swe_l1b(dependencies)
1308
+ elif self.data_level == "l2":
1309
+ if len(dependency_list) != 2:
1310
+ raise ValueError(
1311
+ f"Unexpected dependencies found for SWE L2:"
1312
+ f"{dependency_list}. Expected exactly two dependencies."
1313
+ "L1B science and spin data."
1314
+ )
1315
+ # process data
1316
+ science_files = dependencies.get_file_paths(source="swe", descriptor="sci")
1317
+ if len(science_files) > 1:
1318
+ raise ValueError(
1319
+ "Multiple science files processing is not supported for SWE L2."
1320
+ )
1321
+ l1b_datasets = load_cdf(science_files[0])
1322
+ datasets = [swe_l2(l1b_datasets)]
1277
1323
  else:
1278
1324
  print("Did not recognize data level. No processing done.")
1279
1325
 
@@ -1302,31 +1348,53 @@ class Ultra(ProcessInstrument):
1302
1348
  print(f"Processing IMAP-Ultra {self.data_level}")
1303
1349
  datasets: list[xr.Dataset] = []
1304
1350
 
1305
- dependency_list = dependencies.processing_input
1306
1351
  if self.data_level == "l1a":
1307
- # File path is expected output file path
1308
- if len(dependency_list) > 1:
1352
+ science_files = dependencies.get_file_paths(source="ultra")
1353
+ if len(science_files) != 1:
1309
1354
  raise ValueError(
1310
- f"Unexpected dependencies found for ULTRA L1A:"
1311
- f"{dependency_list}. Expected only one dependency."
1355
+ f"Unexpected science_files found for ULTRA L1A:"
1356
+ f"{science_files}. Expected only one dependency."
1312
1357
  )
1313
- science_files = dependencies.get_file_paths(source="ultra")
1314
1358
  datasets = ultra_l1a.ultra_l1a(science_files[0])
1315
-
1316
1359
  elif self.data_level == "l1b":
1317
- data_dict = {}
1318
- for dep in dependency_list:
1319
- dataset = load_cdf(dep.imap_file_paths[0])
1320
- data_dict[dataset.attrs["Logical_source"]] = dataset
1321
- datasets = ultra_l1b.ultra_l1b(data_dict)
1322
-
1360
+ science_files = dependencies.get_file_paths(source="ultra", data_type="l1a")
1361
+ l1a_dict = {
1362
+ dataset.attrs["Logical_source"]: dataset
1363
+ for dataset in [load_cdf(sci_file) for sci_file in science_files]
1364
+ }
1365
+ science_files = dependencies.get_file_paths(source="ultra", data_type="l1b")
1366
+ l1b_dict = {
1367
+ dataset.attrs["Logical_source"]: dataset
1368
+ for dataset in [load_cdf(sci_file) for sci_file in science_files]
1369
+ }
1370
+ combined = {**l1a_dict, **l1b_dict}
1371
+ anc_paths = dependencies.get_file_paths(data_type="ancillary")
1372
+ ancillary_files = {}
1373
+ for path in anc_paths:
1374
+ ancillary_files[path.stem.split("_")[2]] = path
1375
+ datasets = ultra_l1b.ultra_l1b(combined, ancillary_files)
1323
1376
  elif self.data_level == "l1c":
1324
- data_dict = {}
1325
- for dep in dependency_list:
1326
- dataset = load_cdf(dep.imap_file_paths[0])
1327
- data_dict[dataset.attrs["Logical_source"]] = dataset
1328
- datasets = ultra_l1c.ultra_l1c(data_dict)
1329
-
1377
+ science_files = dependencies.get_file_paths(source="ultra", data_type="l1a")
1378
+ l1a_dict = {
1379
+ dataset.attrs["Logical_source"]: dataset
1380
+ for dataset in [load_cdf(sci_file) for sci_file in science_files]
1381
+ }
1382
+ science_files = dependencies.get_file_paths(source="ultra", data_type="l1b")
1383
+ l1b_dict = {
1384
+ dataset.attrs["Logical_source"]: dataset
1385
+ for dataset in [load_cdf(sci_file) for sci_file in science_files]
1386
+ }
1387
+ combined = {**l1a_dict, **l1b_dict}
1388
+ anc_paths = dependencies.get_file_paths(data_type="ancillary")
1389
+ ancillary_files = {}
1390
+ for path in anc_paths:
1391
+ ancillary_files[path.stem.split("_")[2]] = path
1392
+ spice_paths = dependencies.get_file_paths(data_type="spice")
1393
+ if spice_paths:
1394
+ has_spice = True
1395
+ else:
1396
+ has_spice = False
1397
+ datasets = ultra_l1c.ultra_l1c(combined, ancillary_files, has_spice)
1330
1398
  elif self.data_level == "l2":
1331
1399
  all_pset_filepaths = dependencies.get_file_paths(
1332
1400
  source="ultra", descriptor="pset"