imap-processing 0.18.0__py3-none-any.whl → 0.19.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of imap-processing might be problematic. Click here for more details.
- imap_processing/_version.py +2 -2
- imap_processing/ancillary/ancillary_dataset_combiner.py +161 -1
- imap_processing/cdf/config/imap_codice_l1a_variable_attrs.yaml +301 -274
- imap_processing/cdf/config/imap_codice_l1b_variable_attrs.yaml +28 -28
- imap_processing/cdf/config/imap_codice_l2_variable_attrs.yaml +1044 -203
- imap_processing/cdf/config/imap_constant_attrs.yaml +4 -2
- imap_processing/cdf/config/imap_glows_l1b_variable_attrs.yaml +12 -0
- imap_processing/cdf/config/imap_hi_global_cdf_attrs.yaml +5 -0
- imap_processing/cdf/config/imap_hit_global_cdf_attrs.yaml +10 -4
- imap_processing/cdf/config/imap_idex_l2a_variable_attrs.yaml +33 -4
- imap_processing/cdf/config/imap_idex_l2b_variable_attrs.yaml +8 -91
- imap_processing/cdf/config/imap_idex_l2c_variable_attrs.yaml +106 -16
- imap_processing/cdf/config/imap_lo_l1a_variable_attrs.yaml +4 -15
- imap_processing/cdf/config/imap_lo_l1c_variable_attrs.yaml +189 -98
- imap_processing/cdf/config/imap_mag_global_cdf_attrs.yaml +85 -2
- imap_processing/cdf/config/imap_mag_l1c_variable_attrs.yaml +24 -1
- imap_processing/cdf/config/imap_ultra_l1b_variable_attrs.yaml +12 -4
- imap_processing/cdf/config/imap_ultra_l1c_variable_attrs.yaml +50 -7
- imap_processing/cli.py +95 -41
- imap_processing/codice/codice_l1a.py +131 -31
- imap_processing/codice/codice_l2.py +118 -10
- imap_processing/codice/constants.py +740 -595
- imap_processing/decom.py +1 -4
- imap_processing/ena_maps/ena_maps.py +32 -25
- imap_processing/ena_maps/utils/naming.py +8 -2
- imap_processing/glows/ancillary/imap_glows_exclusions-by-instr-team_20250923_v002.dat +10 -0
- imap_processing/glows/ancillary/imap_glows_map-of-excluded-regions_20250923_v002.dat +393 -0
- imap_processing/glows/ancillary/imap_glows_map-of-uv-sources_20250923_v002.dat +593 -0
- imap_processing/glows/ancillary/imap_glows_pipeline_settings_20250923_v002.json +54 -0
- imap_processing/glows/ancillary/imap_glows_suspected-transients_20250923_v002.dat +10 -0
- imap_processing/glows/l1b/glows_l1b.py +99 -9
- imap_processing/glows/l1b/glows_l1b_data.py +350 -38
- imap_processing/glows/l2/glows_l2.py +11 -0
- imap_processing/hi/hi_l1a.py +124 -3
- imap_processing/hi/hi_l1b.py +154 -71
- imap_processing/hi/hi_l2.py +84 -51
- imap_processing/hi/utils.py +153 -8
- imap_processing/hit/l0/constants.py +3 -0
- imap_processing/hit/l0/decom_hit.py +3 -6
- imap_processing/hit/l1a/hit_l1a.py +311 -21
- imap_processing/hit/l1b/hit_l1b.py +54 -126
- imap_processing/hit/l2/hit_l2.py +6 -6
- imap_processing/ialirt/calculate_ingest.py +219 -0
- imap_processing/ialirt/constants.py +12 -2
- imap_processing/ialirt/generate_coverage.py +15 -2
- imap_processing/ialirt/l0/ialirt_spice.py +5 -2
- imap_processing/ialirt/l0/parse_mag.py +293 -42
- imap_processing/ialirt/l0/process_hit.py +5 -3
- imap_processing/ialirt/l0/process_swapi.py +41 -25
- imap_processing/ialirt/process_ephemeris.py +70 -14
- imap_processing/idex/idex_l0.py +2 -2
- imap_processing/idex/idex_l1a.py +2 -3
- imap_processing/idex/idex_l1b.py +2 -3
- imap_processing/idex/idex_l2a.py +130 -4
- imap_processing/idex/idex_l2b.py +158 -143
- imap_processing/idex/idex_utils.py +1 -3
- imap_processing/lo/l0/lo_science.py +25 -24
- imap_processing/lo/l1b/lo_l1b.py +3 -3
- imap_processing/lo/l1c/lo_l1c.py +116 -50
- imap_processing/lo/l2/lo_l2.py +29 -29
- imap_processing/lo/lo_ancillary.py +55 -0
- imap_processing/mag/l1a/mag_l1a.py +1 -0
- imap_processing/mag/l1a/mag_l1a_data.py +26 -0
- imap_processing/mag/l1b/mag_l1b.py +3 -2
- imap_processing/mag/l1c/interpolation_methods.py +14 -15
- imap_processing/mag/l1c/mag_l1c.py +23 -6
- imap_processing/mag/l1d/mag_l1d.py +57 -14
- imap_processing/mag/l1d/mag_l1d_data.py +167 -30
- imap_processing/mag/l2/mag_l2_data.py +10 -2
- imap_processing/quality_flags.py +9 -1
- imap_processing/spice/geometry.py +76 -33
- imap_processing/spice/pointing_frame.py +0 -6
- imap_processing/spice/repoint.py +29 -2
- imap_processing/spice/spin.py +28 -8
- imap_processing/spice/time.py +12 -22
- imap_processing/swapi/l1/swapi_l1.py +10 -4
- imap_processing/swapi/l2/swapi_l2.py +15 -17
- imap_processing/swe/l1b/swe_l1b.py +1 -2
- imap_processing/ultra/constants.py +1 -24
- imap_processing/ultra/l0/ultra_utils.py +9 -11
- imap_processing/ultra/l1a/ultra_l1a.py +1 -2
- imap_processing/ultra/l1b/cullingmask.py +6 -3
- imap_processing/ultra/l1b/de.py +81 -23
- imap_processing/ultra/l1b/extendedspin.py +13 -10
- imap_processing/ultra/l1b/lookup_utils.py +281 -28
- imap_processing/ultra/l1b/quality_flag_filters.py +10 -1
- imap_processing/ultra/l1b/ultra_l1b_culling.py +161 -3
- imap_processing/ultra/l1b/ultra_l1b_extended.py +253 -47
- imap_processing/ultra/l1c/helio_pset.py +97 -24
- imap_processing/ultra/l1c/l1c_lookup_utils.py +256 -0
- imap_processing/ultra/l1c/spacecraft_pset.py +83 -16
- imap_processing/ultra/l1c/ultra_l1c.py +6 -2
- imap_processing/ultra/l1c/ultra_l1c_culling.py +85 -0
- imap_processing/ultra/l1c/ultra_l1c_pset_bins.py +385 -277
- imap_processing/ultra/l2/ultra_l2.py +0 -1
- imap_processing/ultra/utils/ultra_l1_utils.py +28 -3
- imap_processing/utils.py +3 -4
- {imap_processing-0.18.0.dist-info → imap_processing-0.19.0.dist-info}/METADATA +2 -2
- {imap_processing-0.18.0.dist-info → imap_processing-0.19.0.dist-info}/RECORD +102 -95
- imap_processing/idex/idex_l2c.py +0 -84
- imap_processing/spice/kernels.py +0 -187
- {imap_processing-0.18.0.dist-info → imap_processing-0.19.0.dist-info}/LICENSE +0 -0
- {imap_processing-0.18.0.dist-info → imap_processing-0.19.0.dist-info}/WHEEL +0 -0
- {imap_processing-0.18.0.dist-info → imap_processing-0.19.0.dist-info}/entry_points.txt +0 -0
imap_processing/cli.py
CHANGED
|
@@ -36,7 +36,10 @@ from imap_data_access.processing_input import (
|
|
|
36
36
|
|
|
37
37
|
import imap_processing
|
|
38
38
|
from imap_processing._version import __version__, __version_tuple__ # noqa: F401
|
|
39
|
-
from imap_processing.ancillary.ancillary_dataset_combiner import
|
|
39
|
+
from imap_processing.ancillary.ancillary_dataset_combiner import (
|
|
40
|
+
GlowsAncillaryCombiner,
|
|
41
|
+
MagAncillaryCombiner,
|
|
42
|
+
)
|
|
40
43
|
from imap_processing.cdf.utils import load_cdf, write_cdf
|
|
41
44
|
|
|
42
45
|
# TODO: change how we import things and also folder
|
|
@@ -59,7 +62,6 @@ from imap_processing.idex.idex_l1a import PacketParser
|
|
|
59
62
|
from imap_processing.idex.idex_l1b import idex_l1b
|
|
60
63
|
from imap_processing.idex.idex_l2a import idex_l2a
|
|
61
64
|
from imap_processing.idex.idex_l2b import idex_l2b
|
|
62
|
-
from imap_processing.idex.idex_l2c import idex_l2c
|
|
63
65
|
from imap_processing.lo.l1a import lo_l1a
|
|
64
66
|
from imap_processing.lo.l1b import lo_l1b
|
|
65
67
|
from imap_processing.lo.l1c import lo_l1c
|
|
@@ -379,7 +381,7 @@ class ProcessInstrument(ABC):
|
|
|
379
381
|
data_level: str,
|
|
380
382
|
data_descriptor: str,
|
|
381
383
|
dependency_str: str,
|
|
382
|
-
start_date: str
|
|
384
|
+
start_date: str,
|
|
383
385
|
repointing: str | None,
|
|
384
386
|
version: str,
|
|
385
387
|
upload_to_sdc: bool,
|
|
@@ -686,11 +688,58 @@ class Glows(ProcessInstrument):
|
|
|
686
688
|
science_files = dependencies.get_file_paths(source="glows")
|
|
687
689
|
if len(science_files) != 1:
|
|
688
690
|
raise ValueError(
|
|
689
|
-
f"GLOWS
|
|
691
|
+
f"GLOWS L1B requires exactly one input science file, received: "
|
|
690
692
|
f"{science_files}."
|
|
691
693
|
)
|
|
692
694
|
input_dataset = load_cdf(science_files[0])
|
|
693
|
-
|
|
695
|
+
# TODO: Replace this by reading from AWS/ProcessingInputs
|
|
696
|
+
|
|
697
|
+
glows_ancillary_dir = Path(__file__).parent / "glows" / "ancillary"
|
|
698
|
+
|
|
699
|
+
# Create file lists for each ancillary type
|
|
700
|
+
excluded_regions_files = [
|
|
701
|
+
glows_ancillary_dir
|
|
702
|
+
/ "imap_glows_map-of-excluded-regions_20250923_v002.dat"
|
|
703
|
+
]
|
|
704
|
+
uv_sources_files = [
|
|
705
|
+
glows_ancillary_dir / "imap_glows_map-of-uv-sources_20250923_v002.dat"
|
|
706
|
+
]
|
|
707
|
+
suspected_transients_files = [
|
|
708
|
+
glows_ancillary_dir
|
|
709
|
+
/ "imap_glows_suspected-transients_20250923_v002.dat"
|
|
710
|
+
]
|
|
711
|
+
exclusions_by_instr_team_files = [
|
|
712
|
+
glows_ancillary_dir
|
|
713
|
+
/ "imap_glows_exclusions-by-instr-team_20250923_v002.dat"
|
|
714
|
+
]
|
|
715
|
+
|
|
716
|
+
# Use end date buffer for ancillary data
|
|
717
|
+
current_day = np.datetime64(
|
|
718
|
+
f"{self.start_date[:4]}-{self.start_date[4:6]}-{self.start_date[6:]}"
|
|
719
|
+
)
|
|
720
|
+
day_buffer = current_day + np.timedelta64(3, "D")
|
|
721
|
+
|
|
722
|
+
# Create combiners for each ancillary dataset
|
|
723
|
+
excluded_regions_combiner = GlowsAncillaryCombiner(
|
|
724
|
+
excluded_regions_files, day_buffer
|
|
725
|
+
)
|
|
726
|
+
uv_sources_combiner = GlowsAncillaryCombiner(uv_sources_files, day_buffer)
|
|
727
|
+
suspected_transients_combiner = GlowsAncillaryCombiner(
|
|
728
|
+
suspected_transients_files, day_buffer
|
|
729
|
+
)
|
|
730
|
+
exclusions_by_instr_team_combiner = GlowsAncillaryCombiner(
|
|
731
|
+
exclusions_by_instr_team_files, day_buffer
|
|
732
|
+
)
|
|
733
|
+
|
|
734
|
+
datasets = [
|
|
735
|
+
glows_l1b(
|
|
736
|
+
input_dataset,
|
|
737
|
+
excluded_regions_combiner.combined_dataset,
|
|
738
|
+
uv_sources_combiner.combined_dataset,
|
|
739
|
+
suspected_transients_combiner.combined_dataset,
|
|
740
|
+
exclusions_by_instr_team_combiner.combined_dataset,
|
|
741
|
+
)
|
|
742
|
+
]
|
|
694
743
|
|
|
695
744
|
if self.data_level == "l2":
|
|
696
745
|
science_files = dependencies.get_file_paths(source="glows")
|
|
@@ -738,10 +787,18 @@ class Hi(ProcessInstrument):
|
|
|
738
787
|
elif self.data_level == "l1b":
|
|
739
788
|
l0_files = dependencies.get_file_paths(source="hi", descriptor="raw")
|
|
740
789
|
if l0_files:
|
|
741
|
-
datasets = hi_l1b.
|
|
790
|
+
datasets = hi_l1b.housekeeping(l0_files[0])
|
|
742
791
|
else:
|
|
743
|
-
|
|
744
|
-
|
|
792
|
+
l1a_de_file = dependencies.get_file_paths(
|
|
793
|
+
source="hi", data_type="l1a", descriptor="de"
|
|
794
|
+
)[0]
|
|
795
|
+
l1b_hk_file = dependencies.get_file_paths(
|
|
796
|
+
source="hi", data_type="l1b", descriptor="hk"
|
|
797
|
+
)[0]
|
|
798
|
+
esa_energies_csv = dependencies.get_file_paths(data_type="ancillary")[0]
|
|
799
|
+
datasets = hi_l1b.annotate_direct_events(
|
|
800
|
+
load_cdf(l1a_de_file), load_cdf(l1b_hk_file), esa_energies_csv
|
|
801
|
+
)
|
|
745
802
|
elif self.data_level == "l1c":
|
|
746
803
|
science_paths = dependencies.get_file_paths(source="hi", data_type="l1b")
|
|
747
804
|
if len(science_paths) != 1:
|
|
@@ -796,37 +853,41 @@ class Hit(ProcessInstrument):
|
|
|
796
853
|
|
|
797
854
|
dependency_list = dependencies.processing_input
|
|
798
855
|
if self.data_level == "l1a":
|
|
799
|
-
#
|
|
800
|
-
if len(dependency_list) >
|
|
856
|
+
# Two inputs - L0 and SPICE
|
|
857
|
+
if len(dependency_list) > 2:
|
|
801
858
|
raise ValueError(
|
|
802
859
|
f"Unexpected dependencies found for HIT L1A:"
|
|
803
|
-
f"{dependency_list}. Expected only
|
|
860
|
+
f"{dependency_list}. Expected only 2 dependencies, "
|
|
861
|
+
f"L0 and time kernels."
|
|
804
862
|
)
|
|
805
863
|
# process data to L1A products
|
|
806
864
|
science_files = dependencies.get_file_paths(source="hit", descriptor="raw")
|
|
807
|
-
datasets = hit_l1a(science_files[0])
|
|
865
|
+
datasets = hit_l1a(science_files[0], self.start_date)
|
|
808
866
|
|
|
809
867
|
elif self.data_level == "l1b":
|
|
810
|
-
data_dict = {}
|
|
811
|
-
# TODO: Sean removed the file number error handling to work with the
|
|
812
|
-
# new SPICE dependencies for SIT-4. Need to review and make changes
|
|
813
|
-
# if needed.
|
|
814
868
|
l0_files = dependencies.get_file_paths(source="hit", descriptor="raw")
|
|
815
869
|
l1a_files = dependencies.get_file_paths(source="hit", data_type="l1a")
|
|
816
|
-
if len(l0_files)
|
|
817
|
-
#
|
|
818
|
-
|
|
870
|
+
if len(l0_files) == 1:
|
|
871
|
+
# Path to CCSDS file to process housekeeping
|
|
872
|
+
dependency = l0_files[0]
|
|
819
873
|
else:
|
|
874
|
+
# 1 science file
|
|
875
|
+
if len(l1a_files) > 1:
|
|
876
|
+
raise ValueError(
|
|
877
|
+
f"Unexpected dependencies found for HIT L1B:"
|
|
878
|
+
f"{l1a_files}. Expected only one dependency."
|
|
879
|
+
)
|
|
820
880
|
# Add L1A dataset to process science data
|
|
821
|
-
|
|
822
|
-
data_dict[l1a_dataset.attrs["Logical_source"]] = l1a_dataset
|
|
881
|
+
dependency = load_cdf(l1a_files[0])
|
|
823
882
|
# process data to L1B products
|
|
824
|
-
datasets = hit_l1b(
|
|
883
|
+
datasets = [hit_l1b(dependency, self.descriptor)]
|
|
884
|
+
|
|
825
885
|
elif self.data_level == "l2":
|
|
886
|
+
# 1 science files and 4 ancillary files
|
|
826
887
|
if len(dependency_list) != 5:
|
|
827
888
|
raise ValueError(
|
|
828
889
|
f"Unexpected dependencies found for HIT L2:"
|
|
829
|
-
f"{dependency_list}. Expected only
|
|
890
|
+
f"{dependency_list}. Expected only five dependencies."
|
|
830
891
|
)
|
|
831
892
|
# Add L1B dataset to process science data
|
|
832
893
|
science_files = dependencies.get_file_paths(
|
|
@@ -846,7 +907,7 @@ class Hit(ProcessInstrument):
|
|
|
846
907
|
)
|
|
847
908
|
l1b_dataset = load_cdf(science_files[0])
|
|
848
909
|
# process data to L2 products
|
|
849
|
-
datasets = hit_l2(l1b_dataset, ancillary_files)
|
|
910
|
+
datasets = [hit_l2(l1b_dataset, ancillary_files)]
|
|
850
911
|
|
|
851
912
|
return datasets
|
|
852
913
|
|
|
@@ -895,14 +956,18 @@ class Idex(ProcessInstrument):
|
|
|
895
956
|
dependency = load_cdf(science_files[0])
|
|
896
957
|
datasets = [idex_l1b(dependency)]
|
|
897
958
|
elif self.data_level == "l2a":
|
|
898
|
-
if len(dependency_list) !=
|
|
959
|
+
if len(dependency_list) != 3:
|
|
899
960
|
raise ValueError(
|
|
900
961
|
f"Unexpected dependencies found for IDEX L2A:"
|
|
901
|
-
f"{dependency_list}. Expected
|
|
962
|
+
f"{dependency_list}. Expected three dependencies."
|
|
902
963
|
)
|
|
903
964
|
science_files = dependencies.get_file_paths(source="idex")
|
|
904
965
|
dependency = load_cdf(science_files[0])
|
|
905
|
-
|
|
966
|
+
anc_paths = dependencies.get_file_paths(data_type="ancillary")
|
|
967
|
+
ancillary_files = {}
|
|
968
|
+
for path in anc_paths:
|
|
969
|
+
ancillary_files[path.stem.split("_")[2]] = path
|
|
970
|
+
datasets = [idex_l2a(dependency, ancillary_files)]
|
|
906
971
|
elif self.data_level == "l2b":
|
|
907
972
|
if len(dependency_list) < 3 or len(dependency_list) > 4:
|
|
908
973
|
raise ValueError(
|
|
@@ -916,16 +981,7 @@ class Idex(ProcessInstrument):
|
|
|
916
981
|
hk_files = dependencies.get_file_paths(source="idex", descriptor="evt")
|
|
917
982
|
# Remove duplicate housekeeping files
|
|
918
983
|
hk_dependencies = [load_cdf(dep) for dep in list(set(hk_files))]
|
|
919
|
-
datasets =
|
|
920
|
-
elif self.data_level == "l2c":
|
|
921
|
-
if len(dependency_list) != 1:
|
|
922
|
-
raise ValueError(
|
|
923
|
-
f"Unexpected dependencies found for IDEX L2C:"
|
|
924
|
-
f"{dependency_list}. Expected only one dependency."
|
|
925
|
-
)
|
|
926
|
-
sci_files = dependencies.get_file_paths(source="idex", descriptor="sci-1mo")
|
|
927
|
-
dependencies = [load_cdf(f) for f in sci_files]
|
|
928
|
-
datasets = [idex_l2c(dependencies)]
|
|
984
|
+
datasets = idex_l2b(sci_dependencies, hk_dependencies)
|
|
929
985
|
return datasets
|
|
930
986
|
|
|
931
987
|
|
|
@@ -985,15 +1041,13 @@ class Lo(ProcessInstrument):
|
|
|
985
1041
|
data_dict = {}
|
|
986
1042
|
# TODO: Add ancillary descriptors when maps using them are
|
|
987
1043
|
# implemented.
|
|
988
|
-
anc_dependencies =
|
|
989
|
-
source="lo",
|
|
990
|
-
)
|
|
1044
|
+
anc_dependencies = []
|
|
991
1045
|
science_files = dependencies.get_file_paths(source="lo", descriptor="pset")
|
|
992
1046
|
psets = []
|
|
993
1047
|
for file in science_files:
|
|
994
1048
|
psets.append(load_cdf(file))
|
|
995
1049
|
data_dict[psets[0].attrs["Logical_source"]] = psets
|
|
996
|
-
datasets = lo_l2.lo_l2(data_dict, anc_dependencies)
|
|
1050
|
+
datasets = lo_l2.lo_l2(data_dict, anc_dependencies, self.descriptor)
|
|
997
1051
|
return datasets
|
|
998
1052
|
|
|
999
1053
|
|
|
@@ -54,6 +54,8 @@ class CoDICEL1aPipeline:
|
|
|
54
54
|
|
|
55
55
|
Methods
|
|
56
56
|
-------
|
|
57
|
+
apply_despinning()
|
|
58
|
+
Apply the despinning algorithm to lo- angular and priority products.
|
|
57
59
|
decompress_data(science_values)
|
|
58
60
|
Perform decompression on the data.
|
|
59
61
|
define_coordinates()
|
|
@@ -87,6 +89,73 @@ class CoDICEL1aPipeline:
|
|
|
87
89
|
self.plan_step = plan_step
|
|
88
90
|
self.view_id = view_id
|
|
89
91
|
|
|
92
|
+
def apply_despinning(self) -> None:
|
|
93
|
+
"""
|
|
94
|
+
Apply the despinning algorithm to lo- angular and priority products.
|
|
95
|
+
|
|
96
|
+
This only applies to CoDICE-Lo angular and priority data products. See
|
|
97
|
+
sections 9.3.4 and 9.3.5 of the algorithm document for more details.
|
|
98
|
+
"""
|
|
99
|
+
# Determine the appropriate dimensions for the despun data
|
|
100
|
+
num_energies = self.config["dims"]["esa_step"]
|
|
101
|
+
num_spin_sectors = self.config["dims"]["spin_sector"]
|
|
102
|
+
num_spins = num_spin_sectors * 2
|
|
103
|
+
num_counters = self.config["num_counters"]
|
|
104
|
+
num_positions = self.config["dims"].get(
|
|
105
|
+
"inst_az"
|
|
106
|
+
) # Defaults to None if not present
|
|
107
|
+
|
|
108
|
+
# The dimensions are dependent on the specific data product
|
|
109
|
+
if "angular" in self.config["dataset_name"]:
|
|
110
|
+
despun_dims: tuple[int, ...] = (
|
|
111
|
+
num_energies,
|
|
112
|
+
num_positions,
|
|
113
|
+
num_spins,
|
|
114
|
+
num_counters,
|
|
115
|
+
)
|
|
116
|
+
elif "priority" in self.config["dataset_name"]:
|
|
117
|
+
despun_dims = (num_energies, num_spins, num_counters)
|
|
118
|
+
|
|
119
|
+
# Placeholder for finalized despun data
|
|
120
|
+
self.data: list[np.ndarray] # Needed to appease mypy
|
|
121
|
+
despun_data = [np.zeros(despun_dims) for _ in range(len(self.data))]
|
|
122
|
+
|
|
123
|
+
# Iterate over the energy and spin sector indices, and determine the
|
|
124
|
+
# appropriate pixel orientation. The combination of the pixel
|
|
125
|
+
# orientation and the azimuth determine which spin sector the data
|
|
126
|
+
# gets stored in.
|
|
127
|
+
# TODO: All these nested for-loops are bad. Try to find a better
|
|
128
|
+
# solution.
|
|
129
|
+
for i, epoch_data in enumerate(self.data):
|
|
130
|
+
for energy_index in range(num_energies):
|
|
131
|
+
pixel_orientation = constants.PIXEL_ORIENTATIONS[energy_index]
|
|
132
|
+
for spin_sector_index in range(num_spin_sectors):
|
|
133
|
+
for azimuth_index in range(num_spins):
|
|
134
|
+
if pixel_orientation == "A" and azimuth_index < 12:
|
|
135
|
+
despun_spin_sector = spin_sector_index
|
|
136
|
+
elif pixel_orientation == "A" and azimuth_index >= 12:
|
|
137
|
+
despun_spin_sector = spin_sector_index + 12
|
|
138
|
+
elif pixel_orientation == "B" and azimuth_index < 12:
|
|
139
|
+
despun_spin_sector = spin_sector_index + 12
|
|
140
|
+
elif pixel_orientation == "B" and azimuth_index >= 12:
|
|
141
|
+
despun_spin_sector = spin_sector_index
|
|
142
|
+
|
|
143
|
+
if "angular" in self.config["dataset_name"]:
|
|
144
|
+
spin_data = epoch_data[
|
|
145
|
+
energy_index, :, spin_sector_index, :
|
|
146
|
+
] # (5, 4)
|
|
147
|
+
despun_data[i][energy_index, :, despun_spin_sector, :] = (
|
|
148
|
+
spin_data
|
|
149
|
+
)
|
|
150
|
+
elif "priority" in self.config["dataset_name"]:
|
|
151
|
+
spin_data = epoch_data[energy_index, spin_sector_index, :]
|
|
152
|
+
despun_data[i][energy_index, despun_spin_sector, :] = (
|
|
153
|
+
spin_data
|
|
154
|
+
)
|
|
155
|
+
|
|
156
|
+
# Replace original data
|
|
157
|
+
self.data = despun_data
|
|
158
|
+
|
|
90
159
|
def decompress_data(self, science_values: list[NDArray[str]] | list[str]) -> None:
|
|
91
160
|
"""
|
|
92
161
|
Perform decompression on the data.
|
|
@@ -122,7 +191,7 @@ class CoDICEL1aPipeline:
|
|
|
122
191
|
|
|
123
192
|
else:
|
|
124
193
|
for packet_data, byte_count in zip(
|
|
125
|
-
science_values, self.dataset.byte_count.data
|
|
194
|
+
science_values, self.dataset.byte_count.data, strict=False
|
|
126
195
|
):
|
|
127
196
|
# Convert from numpy array to byte object
|
|
128
197
|
values = packet_data[()]
|
|
@@ -134,11 +203,14 @@ class CoDICEL1aPipeline:
|
|
|
134
203
|
decompressed_values = decompress(values, compression_algorithm)
|
|
135
204
|
self.raw_data.append(decompressed_values)
|
|
136
205
|
|
|
137
|
-
def define_coordinates(self) -> None:
|
|
206
|
+
def define_coordinates(self) -> None: # noqa: PLR0912 (too many branches)
|
|
138
207
|
"""
|
|
139
208
|
Create ``xr.DataArrays`` for the coords needed in the final dataset.
|
|
140
209
|
|
|
141
210
|
The coordinates for the dataset depend on the data product being made.
|
|
211
|
+
|
|
212
|
+
# TODO: Split this function up or simplify it to avoid too many branches
|
|
213
|
+
# error.
|
|
142
214
|
"""
|
|
143
215
|
self.coords = {}
|
|
144
216
|
|
|
@@ -169,13 +241,18 @@ class CoDICEL1aPipeline:
|
|
|
169
241
|
if name in [
|
|
170
242
|
"esa_step",
|
|
171
243
|
"inst_az",
|
|
172
|
-
"spin_sector",
|
|
173
244
|
"spin_sector_pairs",
|
|
174
245
|
"spin_sector_index",
|
|
175
246
|
"ssd_index",
|
|
176
247
|
]:
|
|
177
248
|
values = np.arange(self.config["dims"][name])
|
|
178
249
|
dims = [name]
|
|
250
|
+
elif name == "spin_sector":
|
|
251
|
+
if self.config["dataset_name"] in constants.REQUIRES_DESPINNING:
|
|
252
|
+
values = np.arange(24)
|
|
253
|
+
else:
|
|
254
|
+
values = np.arange(self.config["dims"][name])
|
|
255
|
+
dims = [name]
|
|
179
256
|
elif name == "spin_sector_pairs_label":
|
|
180
257
|
values = np.array(
|
|
181
258
|
[
|
|
@@ -197,7 +274,6 @@ class CoDICEL1aPipeline:
|
|
|
197
274
|
values = np.arange(self.config["dims"]["inst_az"]).astype(str)
|
|
198
275
|
dims = ["inst_az"]
|
|
199
276
|
elif name in [
|
|
200
|
-
"spin_sector_label",
|
|
201
277
|
"esa_step_label",
|
|
202
278
|
"spin_sector_index_label",
|
|
203
279
|
"ssd_index_label",
|
|
@@ -205,6 +281,13 @@ class CoDICEL1aPipeline:
|
|
|
205
281
|
key = name.removesuffix("_label")
|
|
206
282
|
values = np.arange(self.config["dims"][key]).astype(str)
|
|
207
283
|
dims = [key]
|
|
284
|
+
elif name == "spin_sector_label":
|
|
285
|
+
key = name.removesuffix("_label")
|
|
286
|
+
dims = [key]
|
|
287
|
+
if self.config["dataset_name"] in constants.REQUIRES_DESPINNING:
|
|
288
|
+
values = np.arange(24).astype(str)
|
|
289
|
+
else:
|
|
290
|
+
values = np.arange(self.config["dims"][key]).astype(str)
|
|
208
291
|
|
|
209
292
|
coord = xr.DataArray(
|
|
210
293
|
values,
|
|
@@ -243,7 +326,7 @@ class CoDICEL1aPipeline:
|
|
|
243
326
|
# the num_counters dimension to isolate the data for each counter so
|
|
244
327
|
# each counter's data can be placed in a separate CDF data variable.
|
|
245
328
|
for counter, variable_name in zip(
|
|
246
|
-
range(all_data.shape[-1]), self.config["variable_names"]
|
|
329
|
+
range(all_data.shape[-1]), self.config["variable_names"], strict=False
|
|
247
330
|
):
|
|
248
331
|
# Extract the counter data
|
|
249
332
|
counter_data = all_data[..., counter]
|
|
@@ -313,7 +396,7 @@ class CoDICEL1aPipeline:
|
|
|
313
396
|
``xarray`` dataset for the data product, with added energy variables.
|
|
314
397
|
"""
|
|
315
398
|
energy_bin_name = f"energy_{species}"
|
|
316
|
-
centers,
|
|
399
|
+
centers, deltas_minus, deltas_plus = self.get_hi_energy_table_data(
|
|
317
400
|
energy_bin_name.split("energy_")[-1]
|
|
318
401
|
)
|
|
319
402
|
|
|
@@ -326,11 +409,19 @@ class CoDICEL1aPipeline:
|
|
|
326
409
|
check_schema=False,
|
|
327
410
|
),
|
|
328
411
|
)
|
|
329
|
-
dataset[f"{energy_bin_name}
|
|
330
|
-
|
|
331
|
-
dims=[f"{energy_bin_name}
|
|
412
|
+
dataset[f"{energy_bin_name}_minus"] = xr.DataArray(
|
|
413
|
+
deltas_minus,
|
|
414
|
+
dims=[f"{energy_bin_name}_minus"],
|
|
415
|
+
attrs=self.cdf_attrs.get_variable_attributes(
|
|
416
|
+
f"{self.config['dataset_name'].split('_')[-1]}-{energy_bin_name}_minus",
|
|
417
|
+
check_schema=False,
|
|
418
|
+
),
|
|
419
|
+
)
|
|
420
|
+
dataset[f"{energy_bin_name}_plus"] = xr.DataArray(
|
|
421
|
+
deltas_plus,
|
|
422
|
+
dims=[f"{energy_bin_name}_plus"],
|
|
332
423
|
attrs=self.cdf_attrs.get_variable_attributes(
|
|
333
|
-
f"{self.config['dataset_name'].split('_')[-1]}-{energy_bin_name}
|
|
424
|
+
f"{self.config['dataset_name'].split('_')[-1]}-{energy_bin_name}_plus",
|
|
334
425
|
check_schema=False,
|
|
335
426
|
),
|
|
336
427
|
)
|
|
@@ -488,7 +579,7 @@ class CoDICEL1aPipeline:
|
|
|
488
579
|
|
|
489
580
|
def get_hi_energy_table_data(
|
|
490
581
|
self, species: str
|
|
491
|
-
) -> tuple[NDArray[float], NDArray[float]]:
|
|
582
|
+
) -> tuple[NDArray[float], NDArray[float], NDArray[float]]:
|
|
492
583
|
"""
|
|
493
584
|
Retrieve energy table data for CoDICE-Hi products.
|
|
494
585
|
|
|
@@ -506,22 +597,25 @@ class CoDICEL1aPipeline:
|
|
|
506
597
|
-------
|
|
507
598
|
centers : NDArray[float]
|
|
508
599
|
An array whose values represent the centers of the energy bins.
|
|
509
|
-
|
|
510
|
-
An array whose values represent the deltas of the energy bins.
|
|
600
|
+
deltas_minus : NDArray[float]
|
|
601
|
+
An array whose values represent the minus deltas of the energy bins.
|
|
602
|
+
deltas_plus : NDArray[float]
|
|
603
|
+
An array whose values represent the plus deltas of the energy bins.
|
|
511
604
|
"""
|
|
512
605
|
data_product = self.config["dataset_name"].split("-")[-1].upper()
|
|
513
|
-
energy_table =
|
|
514
|
-
|
|
515
|
-
# Find the centers and deltas of the energy bins
|
|
516
|
-
centers = np.array(
|
|
517
|
-
[
|
|
518
|
-
(energy_table[i] + energy_table[i + 1]) / 2
|
|
519
|
-
for i in range(len(energy_table) - 1)
|
|
520
|
-
]
|
|
606
|
+
energy_table = np.array(
|
|
607
|
+
getattr(constants, f"{data_product}_ENERGY_TABLE")[species]
|
|
521
608
|
)
|
|
522
|
-
deltas = energy_table[1:] - centers
|
|
523
609
|
|
|
524
|
-
|
|
610
|
+
# Find the geometric centers and deltas of the energy bins
|
|
611
|
+
# The delta minus is the difference between the center of the bin
|
|
612
|
+
# and the 'left edge' of the bin. The delta plus is the difference
|
|
613
|
+
# between the 'right edge' of the bin and the center of the bin
|
|
614
|
+
centers = np.sqrt(energy_table[:-1] * energy_table[1:])
|
|
615
|
+
deltas_minus = centers - energy_table[:-1]
|
|
616
|
+
deltas_plus = energy_table[1:] - centers
|
|
617
|
+
|
|
618
|
+
return centers, deltas_minus, deltas_plus
|
|
525
619
|
|
|
526
620
|
def reshape_binned_data(self, dataset: xr.Dataset) -> dict[str, list]:
|
|
527
621
|
"""
|
|
@@ -624,6 +718,10 @@ class CoDICEL1aPipeline:
|
|
|
624
718
|
)
|
|
625
719
|
self.data.append(reshaped_packet_data)
|
|
626
720
|
|
|
721
|
+
# Apply despinning if necessary
|
|
722
|
+
if self.config["dataset_name"] in constants.REQUIRES_DESPINNING:
|
|
723
|
+
self.apply_despinning()
|
|
724
|
+
|
|
627
725
|
# No longer need to keep the raw data around
|
|
628
726
|
del self.raw_data
|
|
629
727
|
|
|
@@ -955,9 +1053,9 @@ def create_direct_event_dataset(apid: int, packets: xr.Dataset) -> xr.Dataset:
|
|
|
955
1053
|
# Create the CDF data variables for each Priority and Field
|
|
956
1054
|
for i in range(constants.DE_DATA_PRODUCT_CONFIGURATIONS[apid]["num_priorities"]):
|
|
957
1055
|
for field in constants.DE_DATA_PRODUCT_CONFIGURATIONS[apid]["cdf_fields"]:
|
|
958
|
-
variable_name = f"
|
|
1056
|
+
variable_name = f"p{i}_{field}"
|
|
959
1057
|
attrs = cdf_attrs.get_variable_attributes(variable_name)
|
|
960
|
-
if field in ["
|
|
1058
|
+
if field in ["num_events", "data_quality"]:
|
|
961
1059
|
dims = ["epoch"]
|
|
962
1060
|
else:
|
|
963
1061
|
dims = ["epoch", "event_num"]
|
|
@@ -1415,13 +1513,15 @@ def reshape_de_data(
|
|
|
1415
1513
|
for priority_num in range(num_priorities):
|
|
1416
1514
|
for field in bit_structure:
|
|
1417
1515
|
if field not in ["Priority", "Spare"]:
|
|
1418
|
-
data[f"
|
|
1516
|
+
data[f"p{priority_num}_{field}"] = np.full(
|
|
1419
1517
|
(num_epochs, 10000),
|
|
1420
1518
|
bit_structure[field]["fillval"],
|
|
1421
1519
|
dtype=bit_structure[field]["dtype"],
|
|
1422
1520
|
)
|
|
1423
|
-
data[f"
|
|
1424
|
-
|
|
1521
|
+
data[f"p{priority_num}_num_events"] = np.full(
|
|
1522
|
+
num_epochs, 65535, dtype=np.uint16
|
|
1523
|
+
)
|
|
1524
|
+
data[f"p{priority_num}_data_quality"] = np.full(num_epochs, 255, dtype=np.uint8)
|
|
1425
1525
|
|
|
1426
1526
|
# decompressed_data is one large list of values of length
|
|
1427
1527
|
# (<number of epochs> * <number of priorities>)
|
|
@@ -1445,8 +1545,8 @@ def reshape_de_data(
|
|
|
1445
1545
|
|
|
1446
1546
|
# Number of events and data quality can be determined at this stage
|
|
1447
1547
|
num_events = num_events_arr[epoch_start:epoch_end][i]
|
|
1448
|
-
data[f"
|
|
1449
|
-
data[f"
|
|
1548
|
+
data[f"p{priority_num}_num_events"][epoch_index] = num_events
|
|
1549
|
+
data[f"p{priority_num}_data_quality"][epoch_index] = data_quality[i]
|
|
1450
1550
|
|
|
1451
1551
|
# Iterate over each event
|
|
1452
1552
|
for event_index in range(num_events):
|
|
@@ -1477,7 +1577,7 @@ def reshape_de_data(
|
|
|
1477
1577
|
)
|
|
1478
1578
|
|
|
1479
1579
|
# Set the value into the data array
|
|
1480
|
-
data[f"
|
|
1580
|
+
data[f"p{priority_num}_{field_name}"][epoch_index, event_index] = (
|
|
1481
1581
|
value
|
|
1482
1582
|
)
|
|
1483
1583
|
bit_position += field_components["bit_length"]
|
|
@@ -52,6 +52,105 @@ def process_codice_l2(file_path: Path) -> xr.Dataset:
|
|
|
52
52
|
|
|
53
53
|
# Get the L2 CDF attributes
|
|
54
54
|
cdf_attrs = ImapCdfAttributes()
|
|
55
|
+
l2_dataset = add_dataset_attributes(l2_dataset, dataset_name, cdf_attrs)
|
|
56
|
+
|
|
57
|
+
if dataset_name in [
|
|
58
|
+
"imap_codice_l2_hi-counters-singles",
|
|
59
|
+
"imap_codice_l2_hi-counters-aggregated",
|
|
60
|
+
"imap_codice_l2_lo-counters-singles",
|
|
61
|
+
"imap_codice_l2_lo-counters-aggregated",
|
|
62
|
+
"imap_codice_l2_lo-sw-priority",
|
|
63
|
+
"imap_codice_l2_lo-nsw-priority",
|
|
64
|
+
]:
|
|
65
|
+
# No changes needed. Just save to an L2 CDF file.
|
|
66
|
+
pass
|
|
67
|
+
|
|
68
|
+
elif dataset_name == "imap_codice_l2_hi-direct-events":
|
|
69
|
+
# Convert the following data variables to physical units using
|
|
70
|
+
# calibration data:
|
|
71
|
+
# - ssd_energy
|
|
72
|
+
# - tof
|
|
73
|
+
# - elevation_angle
|
|
74
|
+
# - spin_angle
|
|
75
|
+
# These converted variables are *in addition* to the existing L1 variables
|
|
76
|
+
# The other data variables require no changes
|
|
77
|
+
# See section 11.1.2 of algorithm document
|
|
78
|
+
pass
|
|
79
|
+
|
|
80
|
+
elif dataset_name == "imap_codice_l2_hi-sectored":
|
|
81
|
+
# Convert the sectored count rates using equation described in section
|
|
82
|
+
# 11.1.3 of algorithm document.
|
|
83
|
+
pass
|
|
84
|
+
|
|
85
|
+
elif dataset_name == "imap_codice_l2_hi-omni":
|
|
86
|
+
# Calculate the omni-directional intensity for each species using
|
|
87
|
+
# equation described in section 11.1.4 of algorithm document
|
|
88
|
+
# hopefully this can also apply to hi-ialirt
|
|
89
|
+
pass
|
|
90
|
+
|
|
91
|
+
elif dataset_name == "imap_codice_l2_lo-direct-events":
|
|
92
|
+
# Convert the following data variables to physical units using
|
|
93
|
+
# calibration data:
|
|
94
|
+
# - apd_energy
|
|
95
|
+
# - elevation_angle
|
|
96
|
+
# - tof
|
|
97
|
+
# - spin_sector
|
|
98
|
+
# - esa_step
|
|
99
|
+
# These converted variables are *in addition* to the existing L1 variables
|
|
100
|
+
# The other data variables require no changes
|
|
101
|
+
# See section 11.1.2 of algorithm document
|
|
102
|
+
pass
|
|
103
|
+
|
|
104
|
+
elif dataset_name == "imap_codice_l2_lo-sw-angular":
|
|
105
|
+
# Calculate the sunward angular intensities using equation described in
|
|
106
|
+
# section 11.2.3 of algorithm document.
|
|
107
|
+
pass
|
|
108
|
+
|
|
109
|
+
elif dataset_name == "imap_codice_l2_lo-nsw-angular":
|
|
110
|
+
# Calculate the non-sunward angular intensities using equation described
|
|
111
|
+
# in section 11.2.3 of algorithm document.
|
|
112
|
+
pass
|
|
113
|
+
|
|
114
|
+
elif dataset_name == "imap_codice_l2_lo-sw-species":
|
|
115
|
+
# Calculate the sunward solar wind species intensities using equation
|
|
116
|
+
# described in section 11.2.4 of algorithm document.
|
|
117
|
+
# Calculate the pickup ion sunward solar wind intensities using equation
|
|
118
|
+
# described in section 11.2.4 of algorithm document.
|
|
119
|
+
# Hopefully this can also apply to lo-ialirt
|
|
120
|
+
pass
|
|
121
|
+
|
|
122
|
+
elif dataset_name == "imap_codice_l2_lo-nsw-species":
|
|
123
|
+
# Calculate the non-sunward solar wind species intensities using
|
|
124
|
+
# equation described in section 11.2.4 of algorithm document.
|
|
125
|
+
# Calculate the pickup ion non-sunward solar wind intensities using
|
|
126
|
+
# equation described in section 11.2.4 of algorithm document.
|
|
127
|
+
pass
|
|
128
|
+
|
|
129
|
+
logger.info(f"\nFinal data product:\n{l2_dataset}\n")
|
|
130
|
+
|
|
131
|
+
return l2_dataset
|
|
132
|
+
|
|
133
|
+
|
|
134
|
+
def add_dataset_attributes(
|
|
135
|
+
l2_dataset: xr.Dataset, dataset_name: str, cdf_attrs: ImapCdfAttributes
|
|
136
|
+
) -> xr.Dataset:
|
|
137
|
+
"""
|
|
138
|
+
Add the global and variable attributes to the dataset.
|
|
139
|
+
|
|
140
|
+
Parameters
|
|
141
|
+
----------
|
|
142
|
+
l2_dataset : xarray.Dataset
|
|
143
|
+
The dataset to update.
|
|
144
|
+
dataset_name : str
|
|
145
|
+
The name of the dataset.
|
|
146
|
+
cdf_attrs : ImapCdfAttributes
|
|
147
|
+
The attribute manager for CDF attributes.
|
|
148
|
+
|
|
149
|
+
Returns
|
|
150
|
+
-------
|
|
151
|
+
xarray.Dataset
|
|
152
|
+
The updated dataset.
|
|
153
|
+
"""
|
|
55
154
|
cdf_attrs.add_instrument_global_attrs("codice")
|
|
56
155
|
cdf_attrs.add_instrument_variable_attrs("codice", "l2")
|
|
57
156
|
|
|
@@ -59,14 +158,23 @@ def process_codice_l2(file_path: Path) -> xr.Dataset:
|
|
|
59
158
|
l2_dataset.attrs = cdf_attrs.get_global_attributes(dataset_name)
|
|
60
159
|
|
|
61
160
|
# Set the variable attributes
|
|
62
|
-
for variable_name in l2_dataset:
|
|
63
|
-
|
|
64
|
-
variable_name
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
|
|
161
|
+
for variable_name in l2_dataset.data_vars.keys():
|
|
162
|
+
try:
|
|
163
|
+
l2_dataset[variable_name].attrs = cdf_attrs.get_variable_attributes(
|
|
164
|
+
variable_name, check_schema=False
|
|
165
|
+
)
|
|
166
|
+
except KeyError:
|
|
167
|
+
# Some variables may have a product descriptor prefix in the
|
|
168
|
+
# cdf attributes key if they are common to multiple products.
|
|
169
|
+
descriptor = dataset_name.split("imap_codice_l2_")[-1]
|
|
170
|
+
cdf_attrs_key = f"{descriptor}-{variable_name}"
|
|
171
|
+
try:
|
|
172
|
+
l2_dataset[variable_name].attrs = cdf_attrs.get_variable_attributes(
|
|
173
|
+
f"{cdf_attrs_key}", check_schema=False
|
|
174
|
+
)
|
|
175
|
+
except KeyError:
|
|
176
|
+
logger.error(
|
|
177
|
+
f"Field '{variable_name}' and '{cdf_attrs_key}' not found in "
|
|
178
|
+
f"attribute manager."
|
|
179
|
+
)
|
|
72
180
|
return l2_dataset
|