imap-processing 0.18.0__py3-none-any.whl → 0.19.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of imap-processing might be problematic. Click here for more details.
- imap_processing/_version.py +2 -2
- imap_processing/ancillary/ancillary_dataset_combiner.py +161 -1
- imap_processing/cdf/config/imap_codice_global_cdf_attrs.yaml +6 -0
- imap_processing/cdf/config/imap_codice_l1a_variable_attrs.yaml +221 -1057
- imap_processing/cdf/config/imap_codice_l1b_variable_attrs.yaml +307 -283
- imap_processing/cdf/config/imap_codice_l2_variable_attrs.yaml +1044 -203
- imap_processing/cdf/config/imap_constant_attrs.yaml +4 -2
- imap_processing/cdf/config/imap_enamaps_l2-common_variable_attrs.yaml +11 -0
- imap_processing/cdf/config/imap_glows_l1b_variable_attrs.yaml +15 -1
- imap_processing/cdf/config/imap_hi_global_cdf_attrs.yaml +5 -0
- imap_processing/cdf/config/imap_hit_global_cdf_attrs.yaml +10 -4
- imap_processing/cdf/config/imap_idex_l2a_variable_attrs.yaml +33 -4
- imap_processing/cdf/config/imap_idex_l2b_variable_attrs.yaml +8 -91
- imap_processing/cdf/config/imap_idex_l2c_variable_attrs.yaml +106 -16
- imap_processing/cdf/config/imap_lo_global_cdf_attrs.yaml +5 -4
- imap_processing/cdf/config/imap_lo_l1a_variable_attrs.yaml +4 -15
- imap_processing/cdf/config/imap_lo_l1c_variable_attrs.yaml +189 -98
- imap_processing/cdf/config/imap_mag_global_cdf_attrs.yaml +85 -2
- imap_processing/cdf/config/imap_mag_l1c_variable_attrs.yaml +24 -1
- imap_processing/cdf/config/imap_ultra_global_cdf_attrs.yaml +20 -8
- imap_processing/cdf/config/imap_ultra_l1b_variable_attrs.yaml +45 -35
- imap_processing/cdf/config/imap_ultra_l1c_variable_attrs.yaml +110 -7
- imap_processing/cli.py +138 -93
- imap_processing/codice/codice_l0.py +2 -1
- imap_processing/codice/codice_l1a.py +167 -69
- imap_processing/codice/codice_l1b.py +42 -32
- imap_processing/codice/codice_l2.py +215 -9
- imap_processing/codice/constants.py +790 -603
- imap_processing/codice/data/lo_stepping_values.csv +1 -1
- imap_processing/decom.py +1 -4
- imap_processing/ena_maps/ena_maps.py +71 -43
- imap_processing/ena_maps/utils/corrections.py +291 -0
- imap_processing/ena_maps/utils/map_utils.py +20 -4
- imap_processing/ena_maps/utils/naming.py +8 -2
- imap_processing/glows/ancillary/imap_glows_exclusions-by-instr-team_20250923_v002.dat +10 -0
- imap_processing/glows/ancillary/imap_glows_map-of-excluded-regions_20250923_v002.dat +393 -0
- imap_processing/glows/ancillary/imap_glows_map-of-uv-sources_20250923_v002.dat +593 -0
- imap_processing/glows/ancillary/imap_glows_pipeline-settings_20250923_v002.json +54 -0
- imap_processing/glows/ancillary/imap_glows_suspected-transients_20250923_v002.dat +10 -0
- imap_processing/glows/l1b/glows_l1b.py +123 -18
- imap_processing/glows/l1b/glows_l1b_data.py +358 -47
- imap_processing/glows/l2/glows_l2.py +11 -0
- imap_processing/hi/hi_l1a.py +124 -3
- imap_processing/hi/hi_l1b.py +154 -71
- imap_processing/hi/hi_l1c.py +4 -109
- imap_processing/hi/hi_l2.py +104 -60
- imap_processing/hi/utils.py +262 -8
- imap_processing/hit/l0/constants.py +3 -0
- imap_processing/hit/l0/decom_hit.py +3 -6
- imap_processing/hit/l1a/hit_l1a.py +311 -21
- imap_processing/hit/l1b/hit_l1b.py +54 -126
- imap_processing/hit/l2/hit_l2.py +6 -6
- imap_processing/ialirt/calculate_ingest.py +219 -0
- imap_processing/ialirt/constants.py +12 -2
- imap_processing/ialirt/generate_coverage.py +15 -2
- imap_processing/ialirt/l0/ialirt_spice.py +6 -2
- imap_processing/ialirt/l0/parse_mag.py +293 -42
- imap_processing/ialirt/l0/process_hit.py +5 -3
- imap_processing/ialirt/l0/process_swapi.py +41 -25
- imap_processing/ialirt/process_ephemeris.py +70 -14
- imap_processing/ialirt/utils/create_xarray.py +1 -1
- imap_processing/idex/idex_l0.py +2 -2
- imap_processing/idex/idex_l1a.py +2 -3
- imap_processing/idex/idex_l1b.py +2 -3
- imap_processing/idex/idex_l2a.py +130 -4
- imap_processing/idex/idex_l2b.py +158 -143
- imap_processing/idex/idex_utils.py +1 -3
- imap_processing/lo/ancillary_data/imap_lo_hydrogen-geometric-factor_v001.csv +75 -0
- imap_processing/lo/ancillary_data/imap_lo_oxygen-geometric-factor_v001.csv +75 -0
- imap_processing/lo/l0/lo_science.py +25 -24
- imap_processing/lo/l1b/lo_l1b.py +93 -19
- imap_processing/lo/l1c/lo_l1c.py +273 -93
- imap_processing/lo/l2/lo_l2.py +949 -135
- imap_processing/lo/lo_ancillary.py +55 -0
- imap_processing/mag/l1a/mag_l1a.py +1 -0
- imap_processing/mag/l1a/mag_l1a_data.py +26 -0
- imap_processing/mag/l1b/mag_l1b.py +3 -2
- imap_processing/mag/l1c/interpolation_methods.py +14 -15
- imap_processing/mag/l1c/mag_l1c.py +23 -6
- imap_processing/mag/l1d/mag_l1d.py +57 -14
- imap_processing/mag/l1d/mag_l1d_data.py +202 -32
- imap_processing/mag/l2/mag_l2.py +2 -0
- imap_processing/mag/l2/mag_l2_data.py +14 -5
- imap_processing/quality_flags.py +23 -1
- imap_processing/spice/geometry.py +89 -39
- imap_processing/spice/pointing_frame.py +4 -8
- imap_processing/spice/repoint.py +78 -2
- imap_processing/spice/spin.py +28 -8
- imap_processing/spice/time.py +12 -22
- imap_processing/swapi/l1/swapi_l1.py +10 -4
- imap_processing/swapi/l2/swapi_l2.py +15 -17
- imap_processing/swe/l1b/swe_l1b.py +1 -2
- imap_processing/ultra/constants.py +30 -24
- imap_processing/ultra/l0/ultra_utils.py +9 -11
- imap_processing/ultra/l1a/ultra_l1a.py +1 -2
- imap_processing/ultra/l1b/badtimes.py +35 -11
- imap_processing/ultra/l1b/de.py +95 -31
- imap_processing/ultra/l1b/extendedspin.py +31 -16
- imap_processing/ultra/l1b/goodtimes.py +112 -0
- imap_processing/ultra/l1b/lookup_utils.py +281 -28
- imap_processing/ultra/l1b/quality_flag_filters.py +10 -1
- imap_processing/ultra/l1b/ultra_l1b.py +7 -7
- imap_processing/ultra/l1b/ultra_l1b_culling.py +169 -7
- imap_processing/ultra/l1b/ultra_l1b_extended.py +311 -69
- imap_processing/ultra/l1c/helio_pset.py +139 -37
- imap_processing/ultra/l1c/l1c_lookup_utils.py +289 -0
- imap_processing/ultra/l1c/spacecraft_pset.py +140 -29
- imap_processing/ultra/l1c/ultra_l1c.py +33 -24
- imap_processing/ultra/l1c/ultra_l1c_culling.py +92 -0
- imap_processing/ultra/l1c/ultra_l1c_pset_bins.py +400 -292
- imap_processing/ultra/l2/ultra_l2.py +54 -11
- imap_processing/ultra/utils/ultra_l1_utils.py +37 -7
- imap_processing/utils.py +3 -4
- {imap_processing-0.18.0.dist-info → imap_processing-0.19.2.dist-info}/METADATA +2 -2
- {imap_processing-0.18.0.dist-info → imap_processing-0.19.2.dist-info}/RECORD +118 -109
- imap_processing/idex/idex_l2c.py +0 -84
- imap_processing/spice/kernels.py +0 -187
- imap_processing/ultra/l1b/cullingmask.py +0 -87
- imap_processing/ultra/l1c/histogram.py +0 -36
- {imap_processing-0.18.0.dist-info → imap_processing-0.19.2.dist-info}/LICENSE +0 -0
- {imap_processing-0.18.0.dist-info → imap_processing-0.19.2.dist-info}/WHEEL +0 -0
- {imap_processing-0.18.0.dist-info → imap_processing-0.19.2.dist-info}/entry_points.txt +0 -0
imap_processing/cli.py
CHANGED
|
@@ -24,8 +24,7 @@ import imap_data_access
|
|
|
24
24
|
import numpy as np
|
|
25
25
|
import spiceypy
|
|
26
26
|
import xarray as xr
|
|
27
|
-
from imap_data_access import
|
|
28
|
-
from imap_data_access.io import download
|
|
27
|
+
from imap_data_access.io import IMAPDataAccessError, download
|
|
29
28
|
from imap_data_access.processing_input import (
|
|
30
29
|
ProcessingInputCollection,
|
|
31
30
|
ProcessingInputType,
|
|
@@ -36,7 +35,10 @@ from imap_data_access.processing_input import (
|
|
|
36
35
|
|
|
37
36
|
import imap_processing
|
|
38
37
|
from imap_processing._version import __version__, __version_tuple__ # noqa: F401
|
|
39
|
-
from imap_processing.ancillary.ancillary_dataset_combiner import
|
|
38
|
+
from imap_processing.ancillary.ancillary_dataset_combiner import (
|
|
39
|
+
GlowsAncillaryCombiner,
|
|
40
|
+
MagAncillaryCombiner,
|
|
41
|
+
)
|
|
40
42
|
from imap_processing.cdf.utils import load_cdf, write_cdf
|
|
41
43
|
|
|
42
44
|
# TODO: change how we import things and also folder
|
|
@@ -59,7 +61,6 @@ from imap_processing.idex.idex_l1a import PacketParser
|
|
|
59
61
|
from imap_processing.idex.idex_l1b import idex_l1b
|
|
60
62
|
from imap_processing.idex.idex_l2a import idex_l2a
|
|
61
63
|
from imap_processing.idex.idex_l2b import idex_l2b
|
|
62
|
-
from imap_processing.idex.idex_l2c import idex_l2c
|
|
63
64
|
from imap_processing.lo.l1a import lo_l1a
|
|
64
65
|
from imap_processing.lo.l1b import lo_l1b
|
|
65
66
|
from imap_processing.lo.l1c import lo_l1c
|
|
@@ -379,7 +380,7 @@ class ProcessInstrument(ABC):
|
|
|
379
380
|
data_level: str,
|
|
380
381
|
data_descriptor: str,
|
|
381
382
|
dependency_str: str,
|
|
382
|
-
start_date: str
|
|
383
|
+
start_date: str,
|
|
383
384
|
repointing: str | None,
|
|
384
385
|
version: str,
|
|
385
386
|
upload_to_sdc: bool,
|
|
@@ -405,32 +406,23 @@ class ProcessInstrument(ABC):
|
|
|
405
406
|
A list of file paths to upload to the SDC.
|
|
406
407
|
"""
|
|
407
408
|
if self.upload_to_sdc:
|
|
408
|
-
|
|
409
|
-
for filename in products:
|
|
410
|
-
file_path = ScienceFilePath(filename)
|
|
411
|
-
existing_file = imap_data_access.query(
|
|
412
|
-
instrument=file_path.instrument,
|
|
413
|
-
data_level=file_path.data_level,
|
|
414
|
-
descriptor=file_path.descriptor,
|
|
415
|
-
start_date=file_path.start_date,
|
|
416
|
-
end_date=file_path.start_date,
|
|
417
|
-
repointing=file_path.repointing,
|
|
418
|
-
version=file_path.version,
|
|
419
|
-
extension="cdf",
|
|
420
|
-
table="science",
|
|
421
|
-
)
|
|
422
|
-
if existing_file:
|
|
423
|
-
raise ProcessInstrument.ImapFileExistsError(
|
|
424
|
-
f"File {filename} already exists in the IMAP SDC. "
|
|
425
|
-
"No files were uploaded."
|
|
426
|
-
f"Generated files: {products}."
|
|
427
|
-
)
|
|
428
|
-
|
|
429
|
-
if len(products) == 0:
|
|
409
|
+
if not products:
|
|
430
410
|
logger.info("No files to upload.")
|
|
411
|
+
return
|
|
412
|
+
|
|
431
413
|
for filename in products:
|
|
432
|
-
|
|
433
|
-
|
|
414
|
+
try:
|
|
415
|
+
logger.info(f"Uploading file: {filename}")
|
|
416
|
+
imap_data_access.upload(filename)
|
|
417
|
+
except IMAPDataAccessError as e:
|
|
418
|
+
msg = str(e)
|
|
419
|
+
if "FileAlreadyExists" in msg and "409" in msg:
|
|
420
|
+
logger.warning("Skipping upload of existing file, %s", filename)
|
|
421
|
+
continue
|
|
422
|
+
else:
|
|
423
|
+
logger.error(f"Upload failed with error: {msg}")
|
|
424
|
+
except Exception as e:
|
|
425
|
+
logger.error(f"Upload failed unknown error: {e}")
|
|
434
426
|
|
|
435
427
|
@final
|
|
436
428
|
def process(self) -> None:
|
|
@@ -674,7 +666,7 @@ class Glows(ProcessInstrument):
|
|
|
674
666
|
datasets: list[xr.Dataset] = []
|
|
675
667
|
|
|
676
668
|
if self.data_level == "l1a":
|
|
677
|
-
science_files = dependencies.get_file_paths(source="glows")
|
|
669
|
+
science_files = dependencies.get_file_paths(source="glows", data_type="l0")
|
|
678
670
|
if len(science_files) != 1:
|
|
679
671
|
raise ValueError(
|
|
680
672
|
f"GLOWS L1A requires exactly one input science file, received: "
|
|
@@ -683,14 +675,62 @@ class Glows(ProcessInstrument):
|
|
|
683
675
|
datasets = glows_l1a(science_files[0])
|
|
684
676
|
|
|
685
677
|
if self.data_level == "l1b":
|
|
686
|
-
science_files = dependencies.get_file_paths(source="glows")
|
|
678
|
+
science_files = dependencies.get_file_paths(source="glows", data_type="l1a")
|
|
687
679
|
if len(science_files) != 1:
|
|
688
680
|
raise ValueError(
|
|
689
|
-
f"GLOWS
|
|
681
|
+
f"GLOWS L1B requires exactly one input science file, received: "
|
|
690
682
|
f"{science_files}."
|
|
691
683
|
)
|
|
692
684
|
input_dataset = load_cdf(science_files[0])
|
|
693
|
-
|
|
685
|
+
|
|
686
|
+
# Create file lists for each ancillary type
|
|
687
|
+
excluded_regions_files = dependencies.get_processing_inputs(
|
|
688
|
+
descriptor="map-of-excluded-regions"
|
|
689
|
+
)[0]
|
|
690
|
+
uv_sources_files = dependencies.get_processing_inputs(
|
|
691
|
+
descriptor="map-of-uv-sources"
|
|
692
|
+
)[0]
|
|
693
|
+
suspected_transients_files = dependencies.get_processing_inputs(
|
|
694
|
+
descriptor="suspected-transients"
|
|
695
|
+
)[0]
|
|
696
|
+
exclusions_by_instr_team_files = dependencies.get_processing_inputs(
|
|
697
|
+
descriptor="exclusions-by-instr-team"
|
|
698
|
+
)[0]
|
|
699
|
+
pipeline_settings = dependencies.get_processing_inputs(
|
|
700
|
+
descriptor="pipeline-settings"
|
|
701
|
+
)[0]
|
|
702
|
+
|
|
703
|
+
# Use end date buffer for ancillary data
|
|
704
|
+
current_day = np.datetime64(
|
|
705
|
+
f"{self.start_date[:4]}-{self.start_date[4:6]}-{self.start_date[6:]}"
|
|
706
|
+
)
|
|
707
|
+
day_buffer = current_day + np.timedelta64(3, "D")
|
|
708
|
+
|
|
709
|
+
# Create combiners for each ancillary dataset
|
|
710
|
+
excluded_regions_combiner = GlowsAncillaryCombiner(
|
|
711
|
+
excluded_regions_files, day_buffer
|
|
712
|
+
)
|
|
713
|
+
uv_sources_combiner = GlowsAncillaryCombiner(uv_sources_files, day_buffer)
|
|
714
|
+
suspected_transients_combiner = GlowsAncillaryCombiner(
|
|
715
|
+
suspected_transients_files, day_buffer
|
|
716
|
+
)
|
|
717
|
+
exclusions_by_instr_team_combiner = GlowsAncillaryCombiner(
|
|
718
|
+
exclusions_by_instr_team_files, day_buffer
|
|
719
|
+
)
|
|
720
|
+
pipeline_settings_combiner = GlowsAncillaryCombiner(
|
|
721
|
+
pipeline_settings, day_buffer
|
|
722
|
+
)
|
|
723
|
+
|
|
724
|
+
datasets = [
|
|
725
|
+
glows_l1b(
|
|
726
|
+
input_dataset,
|
|
727
|
+
excluded_regions_combiner.combined_dataset,
|
|
728
|
+
uv_sources_combiner.combined_dataset,
|
|
729
|
+
suspected_transients_combiner.combined_dataset,
|
|
730
|
+
exclusions_by_instr_team_combiner.combined_dataset,
|
|
731
|
+
pipeline_settings_combiner.combined_dataset,
|
|
732
|
+
)
|
|
733
|
+
]
|
|
694
734
|
|
|
695
735
|
if self.data_level == "l2":
|
|
696
736
|
science_files = dependencies.get_file_paths(source="glows")
|
|
@@ -738,10 +778,18 @@ class Hi(ProcessInstrument):
|
|
|
738
778
|
elif self.data_level == "l1b":
|
|
739
779
|
l0_files = dependencies.get_file_paths(source="hi", descriptor="raw")
|
|
740
780
|
if l0_files:
|
|
741
|
-
datasets = hi_l1b.
|
|
781
|
+
datasets = hi_l1b.housekeeping(l0_files[0])
|
|
742
782
|
else:
|
|
743
|
-
|
|
744
|
-
|
|
783
|
+
l1a_de_file = dependencies.get_file_paths(
|
|
784
|
+
source="hi", data_type="l1a", descriptor="de"
|
|
785
|
+
)[0]
|
|
786
|
+
l1b_hk_file = dependencies.get_file_paths(
|
|
787
|
+
source="hi", data_type="l1b", descriptor="hk"
|
|
788
|
+
)[0]
|
|
789
|
+
esa_energies_csv = dependencies.get_file_paths(data_type="ancillary")[0]
|
|
790
|
+
datasets = hi_l1b.annotate_direct_events(
|
|
791
|
+
load_cdf(l1a_de_file), load_cdf(l1b_hk_file), esa_energies_csv
|
|
792
|
+
)
|
|
745
793
|
elif self.data_level == "l1c":
|
|
746
794
|
science_paths = dependencies.get_file_paths(source="hi", data_type="l1b")
|
|
747
795
|
if len(science_paths) != 1:
|
|
@@ -796,37 +844,41 @@ class Hit(ProcessInstrument):
|
|
|
796
844
|
|
|
797
845
|
dependency_list = dependencies.processing_input
|
|
798
846
|
if self.data_level == "l1a":
|
|
799
|
-
#
|
|
800
|
-
if len(dependency_list) >
|
|
847
|
+
# Two inputs - L0 and SPICE
|
|
848
|
+
if len(dependency_list) > 2:
|
|
801
849
|
raise ValueError(
|
|
802
850
|
f"Unexpected dependencies found for HIT L1A:"
|
|
803
|
-
f"{dependency_list}. Expected only
|
|
851
|
+
f"{dependency_list}. Expected only 2 dependencies, "
|
|
852
|
+
f"L0 and time kernels."
|
|
804
853
|
)
|
|
805
854
|
# process data to L1A products
|
|
806
855
|
science_files = dependencies.get_file_paths(source="hit", descriptor="raw")
|
|
807
|
-
datasets = hit_l1a(science_files[0])
|
|
856
|
+
datasets = hit_l1a(science_files[0], self.start_date)
|
|
808
857
|
|
|
809
858
|
elif self.data_level == "l1b":
|
|
810
|
-
data_dict = {}
|
|
811
|
-
# TODO: Sean removed the file number error handling to work with the
|
|
812
|
-
# new SPICE dependencies for SIT-4. Need to review and make changes
|
|
813
|
-
# if needed.
|
|
814
859
|
l0_files = dependencies.get_file_paths(source="hit", descriptor="raw")
|
|
815
860
|
l1a_files = dependencies.get_file_paths(source="hit", data_type="l1a")
|
|
816
|
-
if len(l0_files)
|
|
817
|
-
#
|
|
818
|
-
|
|
861
|
+
if len(l0_files) == 1:
|
|
862
|
+
# Path to CCSDS file to process housekeeping
|
|
863
|
+
dependency = l0_files[0]
|
|
819
864
|
else:
|
|
865
|
+
# 1 science file
|
|
866
|
+
if len(l1a_files) > 1:
|
|
867
|
+
raise ValueError(
|
|
868
|
+
f"Unexpected dependencies found for HIT L1B:"
|
|
869
|
+
f"{l1a_files}. Expected only one dependency."
|
|
870
|
+
)
|
|
820
871
|
# Add L1A dataset to process science data
|
|
821
|
-
|
|
822
|
-
data_dict[l1a_dataset.attrs["Logical_source"]] = l1a_dataset
|
|
872
|
+
dependency = load_cdf(l1a_files[0])
|
|
823
873
|
# process data to L1B products
|
|
824
|
-
datasets = hit_l1b(
|
|
874
|
+
datasets = [hit_l1b(dependency, self.descriptor)]
|
|
875
|
+
|
|
825
876
|
elif self.data_level == "l2":
|
|
877
|
+
# 1 science files and 4 ancillary files
|
|
826
878
|
if len(dependency_list) != 5:
|
|
827
879
|
raise ValueError(
|
|
828
880
|
f"Unexpected dependencies found for HIT L2:"
|
|
829
|
-
f"{dependency_list}. Expected only
|
|
881
|
+
f"{dependency_list}. Expected only five dependencies."
|
|
830
882
|
)
|
|
831
883
|
# Add L1B dataset to process science data
|
|
832
884
|
science_files = dependencies.get_file_paths(
|
|
@@ -846,7 +898,7 @@ class Hit(ProcessInstrument):
|
|
|
846
898
|
)
|
|
847
899
|
l1b_dataset = load_cdf(science_files[0])
|
|
848
900
|
# process data to L2 products
|
|
849
|
-
datasets = hit_l2(l1b_dataset, ancillary_files)
|
|
901
|
+
datasets = [hit_l2(l1b_dataset, ancillary_files)]
|
|
850
902
|
|
|
851
903
|
return datasets
|
|
852
904
|
|
|
@@ -895,14 +947,18 @@ class Idex(ProcessInstrument):
|
|
|
895
947
|
dependency = load_cdf(science_files[0])
|
|
896
948
|
datasets = [idex_l1b(dependency)]
|
|
897
949
|
elif self.data_level == "l2a":
|
|
898
|
-
if len(dependency_list) !=
|
|
950
|
+
if len(dependency_list) != 3:
|
|
899
951
|
raise ValueError(
|
|
900
952
|
f"Unexpected dependencies found for IDEX L2A:"
|
|
901
|
-
f"{dependency_list}. Expected
|
|
953
|
+
f"{dependency_list}. Expected three dependencies."
|
|
902
954
|
)
|
|
903
955
|
science_files = dependencies.get_file_paths(source="idex")
|
|
904
956
|
dependency = load_cdf(science_files[0])
|
|
905
|
-
|
|
957
|
+
anc_paths = dependencies.get_file_paths(data_type="ancillary")
|
|
958
|
+
ancillary_files = {}
|
|
959
|
+
for path in anc_paths:
|
|
960
|
+
ancillary_files[path.stem.split("_")[2]] = path
|
|
961
|
+
datasets = [idex_l2a(dependency, ancillary_files)]
|
|
906
962
|
elif self.data_level == "l2b":
|
|
907
963
|
if len(dependency_list) < 3 or len(dependency_list) > 4:
|
|
908
964
|
raise ValueError(
|
|
@@ -916,16 +972,7 @@ class Idex(ProcessInstrument):
|
|
|
916
972
|
hk_files = dependencies.get_file_paths(source="idex", descriptor="evt")
|
|
917
973
|
# Remove duplicate housekeeping files
|
|
918
974
|
hk_dependencies = [load_cdf(dep) for dep in list(set(hk_files))]
|
|
919
|
-
datasets =
|
|
920
|
-
elif self.data_level == "l2c":
|
|
921
|
-
if len(dependency_list) != 1:
|
|
922
|
-
raise ValueError(
|
|
923
|
-
f"Unexpected dependencies found for IDEX L2C:"
|
|
924
|
-
f"{dependency_list}. Expected only one dependency."
|
|
925
|
-
)
|
|
926
|
-
sci_files = dependencies.get_file_paths(source="idex", descriptor="sci-1mo")
|
|
927
|
-
dependencies = [load_cdf(f) for f in sci_files]
|
|
928
|
-
datasets = [idex_l2c(dependencies)]
|
|
975
|
+
datasets = idex_l2b(sci_dependencies, hk_dependencies)
|
|
929
976
|
return datasets
|
|
930
977
|
|
|
931
978
|
|
|
@@ -964,16 +1011,19 @@ class Lo(ProcessInstrument):
|
|
|
964
1011
|
elif self.data_level == "l1b":
|
|
965
1012
|
data_dict = {}
|
|
966
1013
|
science_files = dependencies.get_file_paths(source="lo", data_type="l1a")
|
|
1014
|
+
ancillary_files = dependencies.get_file_paths(
|
|
1015
|
+
source="lo", data_type="ancillary"
|
|
1016
|
+
)
|
|
967
1017
|
logger.info(f"Science files for L1B: {science_files}")
|
|
968
1018
|
for file in science_files:
|
|
969
1019
|
dataset = load_cdf(file)
|
|
970
1020
|
data_dict[dataset.attrs["Logical_source"]] = dataset
|
|
971
|
-
datasets = lo_l1b.lo_l1b(data_dict)
|
|
1021
|
+
datasets = lo_l1b.lo_l1b(data_dict, ancillary_files)
|
|
972
1022
|
|
|
973
1023
|
elif self.data_level == "l1c":
|
|
974
1024
|
data_dict = {}
|
|
975
1025
|
anc_dependencies: list = dependencies.get_file_paths(
|
|
976
|
-
source="lo",
|
|
1026
|
+
source="lo", data_type="ancillary"
|
|
977
1027
|
)
|
|
978
1028
|
science_files = dependencies.get_file_paths(source="lo", descriptor="de")
|
|
979
1029
|
for file in science_files:
|
|
@@ -983,17 +1033,13 @@ class Lo(ProcessInstrument):
|
|
|
983
1033
|
|
|
984
1034
|
elif self.data_level == "l2":
|
|
985
1035
|
data_dict = {}
|
|
986
|
-
# TODO: Add ancillary descriptors when maps using them are
|
|
987
|
-
# implemented.
|
|
988
|
-
anc_dependencies = dependencies.get_file_paths(
|
|
989
|
-
source="lo",
|
|
990
|
-
)
|
|
991
1036
|
science_files = dependencies.get_file_paths(source="lo", descriptor="pset")
|
|
992
|
-
|
|
993
|
-
|
|
994
|
-
|
|
1037
|
+
anc_dependencies = dependencies.get_file_paths(data_type="ancillary")
|
|
1038
|
+
|
|
1039
|
+
# Load all pset files into datasets
|
|
1040
|
+
psets = [load_cdf(file) for file in science_files]
|
|
995
1041
|
data_dict[psets[0].attrs["Logical_source"]] = psets
|
|
996
|
-
datasets = lo_l2.lo_l2(data_dict, anc_dependencies)
|
|
1042
|
+
datasets = lo_l2.lo_l2(data_dict, anc_dependencies, self.descriptor)
|
|
997
1043
|
return datasets
|
|
998
1044
|
|
|
999
1045
|
|
|
@@ -1174,8 +1220,8 @@ class Spacecraft(ProcessInstrument):
|
|
|
1174
1220
|
The list of processed products.
|
|
1175
1221
|
"""
|
|
1176
1222
|
print(f"Processing Spacecraft {self.data_level}")
|
|
1177
|
-
|
|
1178
|
-
if self.
|
|
1223
|
+
processed_dataset = []
|
|
1224
|
+
if self.descriptor == "quaternions":
|
|
1179
1225
|
# File path is expected output file path
|
|
1180
1226
|
input_files = dependencies.get_file_paths(source="spacecraft")
|
|
1181
1227
|
if len(input_files) > 1:
|
|
@@ -1184,26 +1230,21 @@ class Spacecraft(ProcessInstrument):
|
|
|
1184
1230
|
f"{input_files}. Expected only one dependency."
|
|
1185
1231
|
)
|
|
1186
1232
|
datasets = list(quaternions.process_quaternions(input_files[0]))
|
|
1187
|
-
|
|
1188
|
-
elif self.
|
|
1233
|
+
processed_dataset.extend(datasets)
|
|
1234
|
+
elif self.descriptor == "pointing-attitude":
|
|
1189
1235
|
spice_inputs = dependencies.get_file_paths(
|
|
1190
1236
|
data_type=SPICESource.SPICE.value
|
|
1191
1237
|
)
|
|
1192
1238
|
ah_paths = [path for path in spice_inputs if ".ah" in path.suffixes]
|
|
1193
|
-
if len(ah_paths) != 1:
|
|
1194
|
-
raise ValueError(
|
|
1195
|
-
f"Unexpected spice dependencies found for Spacecraft "
|
|
1196
|
-
f"pointing_kernel: {ah_paths}. Expected exactly one "
|
|
1197
|
-
f"attitude history file."
|
|
1198
|
-
)
|
|
1199
1239
|
pointing_kernel_paths = pointing_frame.generate_pointing_attitude_kernel(
|
|
1200
|
-
ah_paths[
|
|
1240
|
+
ah_paths[-1]
|
|
1201
1241
|
)
|
|
1202
|
-
|
|
1242
|
+
processed_dataset.extend(pointing_kernel_paths)
|
|
1203
1243
|
else:
|
|
1204
1244
|
raise NotImplementedError(
|
|
1205
1245
|
f"Spacecraft processing not implemented for level {self.data_level}"
|
|
1206
1246
|
)
|
|
1247
|
+
return processed_dataset
|
|
1207
1248
|
|
|
1208
1249
|
|
|
1209
1250
|
class Swapi(ProcessInstrument):
|
|
@@ -1407,7 +1448,10 @@ class Ultra(ProcessInstrument):
|
|
|
1407
1448
|
}
|
|
1408
1449
|
science_files = dependencies.get_file_paths(source="ultra", data_type="l1b")
|
|
1409
1450
|
l1b_dict = {
|
|
1410
|
-
|
|
1451
|
+
# TODO remove
|
|
1452
|
+
dataset.attrs["Logical_source"].replace(
|
|
1453
|
+
"cullingmask", "goodtimes"
|
|
1454
|
+
): dataset
|
|
1411
1455
|
for dataset in [load_cdf(sci_file) for sci_file in science_files]
|
|
1412
1456
|
}
|
|
1413
1457
|
combined = {**l1a_dict, **l1b_dict}
|
|
@@ -1416,11 +1460,12 @@ class Ultra(ProcessInstrument):
|
|
|
1416
1460
|
for path in anc_paths:
|
|
1417
1461
|
ancillary_files[path.stem.split("_")[2]] = path
|
|
1418
1462
|
spice_paths = dependencies.get_file_paths(data_type="spice")
|
|
1419
|
-
|
|
1420
|
-
|
|
1463
|
+
# Only the helio pset needs IMAP frames
|
|
1464
|
+
if any("imap_frames" in path.as_posix() for path in spice_paths):
|
|
1465
|
+
imap_frames = True
|
|
1421
1466
|
else:
|
|
1422
|
-
|
|
1423
|
-
datasets = ultra_l1c.ultra_l1c(combined, ancillary_files,
|
|
1467
|
+
imap_frames = False
|
|
1468
|
+
datasets = ultra_l1c.ultra_l1c(combined, ancillary_files, imap_frames)
|
|
1424
1469
|
elif self.data_level == "l2":
|
|
1425
1470
|
all_pset_filepaths = dependencies.get_file_paths(
|
|
1426
1471
|
source="ultra", descriptor="pset"
|
|
@@ -39,7 +39,8 @@ def decom_packets(packet_file: Path) -> dict[int, xr.Dataset]:
|
|
|
39
39
|
# TODO: Currently need to use the 'old' packet definition for housekeeping
|
|
40
40
|
# because the simulated housekeeping data being used has various
|
|
41
41
|
# mis-matches from the telemetry definition. This may be updated
|
|
42
|
-
# once new simulated housekeeping data are acquired.
|
|
42
|
+
# once new simulated housekeeping data are acquired. See GitHub issue
|
|
43
|
+
# #2135.
|
|
43
44
|
if "hskp" in str(packet_file):
|
|
44
45
|
xtce_filename = "P_COD_NHK.xml"
|
|
45
46
|
else:
|