imap-processing 0.11.0__py3-none-any.whl → 0.12.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of imap-processing might be problematic. Click here for more details.
- imap_processing/__init__.py +10 -11
- imap_processing/_version.py +2 -2
- imap_processing/ccsds/excel_to_xtce.py +65 -16
- imap_processing/cdf/config/imap_codice_global_cdf_attrs.yaml +6 -28
- imap_processing/cdf/config/imap_codice_l1a_variable_attrs.yaml +365 -42
- imap_processing/cdf/config/imap_glows_global_cdf_attrs.yaml +0 -5
- imap_processing/cdf/config/imap_hi_global_cdf_attrs.yaml +10 -11
- imap_processing/cdf/config/imap_hi_variable_attrs.yaml +17 -19
- imap_processing/cdf/config/imap_hit_global_cdf_attrs.yaml +26 -13
- imap_processing/cdf/config/imap_hit_l1a_variable_attrs.yaml +106 -116
- imap_processing/cdf/config/imap_hit_l1b_variable_attrs.yaml +120 -145
- imap_processing/cdf/config/imap_hit_l2_variable_attrs.yaml +14 -0
- imap_processing/cdf/config/imap_idex_global_cdf_attrs.yaml +6 -9
- imap_processing/cdf/config/imap_idex_l1a_variable_attrs.yaml +1 -1
- imap_processing/cdf/config/imap_lo_global_cdf_attrs.yaml +0 -12
- imap_processing/cdf/config/imap_lo_l1a_variable_attrs.yaml +1 -1
- imap_processing/cdf/config/imap_mag_global_cdf_attrs.yaml +9 -21
- imap_processing/cdf/config/imap_mag_l1a_variable_attrs.yaml +361 -0
- imap_processing/cdf/config/imap_mag_l1b_variable_attrs.yaml +160 -0
- imap_processing/cdf/config/imap_mag_l1c_variable_attrs.yaml +160 -0
- imap_processing/cdf/config/imap_spacecraft_global_cdf_attrs.yaml +18 -0
- imap_processing/cdf/config/imap_spacecraft_variable_attrs.yaml +40 -0
- imap_processing/cdf/config/imap_swapi_global_cdf_attrs.yaml +1 -5
- imap_processing/cdf/config/imap_swe_global_cdf_attrs.yaml +12 -4
- imap_processing/cdf/config/imap_swe_l1a_variable_attrs.yaml +16 -2
- imap_processing/cdf/config/imap_swe_l1b_variable_attrs.yaml +48 -52
- imap_processing/cdf/config/imap_swe_l2_variable_attrs.yaml +71 -47
- imap_processing/cdf/config/imap_ultra_global_cdf_attrs.yaml +2 -14
- imap_processing/cdf/config/imap_ultra_l1b_variable_attrs.yaml +51 -2
- imap_processing/cdf/config/imap_ultra_l1c_variable_attrs.yaml +29 -14
- imap_processing/cdf/utils.py +13 -7
- imap_processing/cli.py +23 -8
- imap_processing/codice/codice_l1a.py +207 -85
- imap_processing/codice/constants.py +1322 -568
- imap_processing/codice/decompress.py +2 -6
- imap_processing/ena_maps/ena_maps.py +480 -116
- imap_processing/ena_maps/utils/coordinates.py +19 -0
- imap_processing/ena_maps/utils/map_utils.py +14 -17
- imap_processing/ena_maps/utils/spatial_utils.py +45 -47
- imap_processing/hi/l1a/hi_l1a.py +24 -18
- imap_processing/hi/l1a/histogram.py +0 -1
- imap_processing/hi/l1a/science_direct_event.py +6 -8
- imap_processing/hi/l1b/hi_l1b.py +31 -39
- imap_processing/hi/l1c/hi_l1c.py +405 -17
- imap_processing/hi/utils.py +58 -12
- imap_processing/hit/ancillary/imap_hit_l1b-to-l2-standard-dt0-factors_20250219_v002.csv +205 -0
- imap_processing/hit/ancillary/imap_hit_l1b-to-l2-standard-dt1-factors_20250219_v002.csv +205 -0
- imap_processing/hit/ancillary/imap_hit_l1b-to-l2-standard-dt2-factors_20250219_v002.csv +205 -0
- imap_processing/hit/ancillary/imap_hit_l1b-to-l2-standard-dt3-factors_20250219_v002.csv +205 -0
- imap_processing/hit/ancillary/imap_hit_l1b-to-l2-summed-dt0-factors_20250219_v002.csv +68 -0
- imap_processing/hit/hit_utils.py +173 -1
- imap_processing/hit/l0/constants.py +20 -11
- imap_processing/hit/l0/decom_hit.py +18 -4
- imap_processing/hit/l1a/hit_l1a.py +45 -54
- imap_processing/hit/l1b/constants.py +317 -0
- imap_processing/hit/l1b/hit_l1b.py +367 -18
- imap_processing/hit/l2/constants.py +281 -0
- imap_processing/hit/l2/hit_l2.py +614 -0
- imap_processing/hit/packet_definitions/hit_packet_definitions.xml +1323 -71
- imap_processing/ialirt/l0/mag_l0_ialirt_data.py +155 -0
- imap_processing/ialirt/l0/parse_mag.py +246 -0
- imap_processing/ialirt/l0/process_swe.py +252 -0
- imap_processing/ialirt/packet_definitions/ialirt.xml +7 -3
- imap_processing/ialirt/packet_definitions/ialirt_mag.xml +115 -0
- imap_processing/ialirt/utils/grouping.py +114 -0
- imap_processing/ialirt/utils/time.py +29 -0
- imap_processing/idex/atomic_masses.csv +22 -0
- imap_processing/idex/decode.py +2 -2
- imap_processing/idex/idex_constants.py +25 -0
- imap_processing/idex/idex_l1a.py +6 -7
- imap_processing/idex/idex_l1b.py +4 -31
- imap_processing/idex/idex_l2a.py +789 -0
- imap_processing/idex/idex_variable_unpacking_and_eu_conversion.csv +39 -33
- imap_processing/lo/l0/lo_science.py +6 -0
- imap_processing/lo/l1a/lo_l1a.py +0 -1
- imap_processing/lo/l1b/lo_l1b.py +177 -25
- imap_processing/mag/constants.py +8 -0
- imap_processing/mag/imap_mag_sdc-configuration_v001.yaml +6 -0
- imap_processing/mag/l0/decom_mag.py +10 -3
- imap_processing/mag/l1a/mag_l1a.py +22 -11
- imap_processing/mag/l1a/mag_l1a_data.py +28 -3
- imap_processing/mag/l1b/mag_l1b.py +190 -48
- imap_processing/mag/l1c/interpolation_methods.py +211 -0
- imap_processing/mag/l1c/mag_l1c.py +447 -9
- imap_processing/quality_flags.py +1 -0
- imap_processing/spacecraft/packet_definitions/scid_x252.xml +538 -0
- imap_processing/spacecraft/quaternions.py +123 -0
- imap_processing/spice/geometry.py +16 -19
- imap_processing/spice/repoint.py +120 -0
- imap_processing/swapi/l1/swapi_l1.py +4 -0
- imap_processing/swapi/l2/swapi_l2.py +0 -1
- imap_processing/swe/l1a/swe_l1a.py +47 -8
- imap_processing/swe/l1a/swe_science.py +5 -2
- imap_processing/swe/l1b/swe_l1b_science.py +103 -56
- imap_processing/swe/l2/swe_l2.py +60 -65
- imap_processing/swe/packet_definitions/swe_packet_definition.xml +1121 -1
- imap_processing/swe/utils/swe_constants.py +63 -0
- imap_processing/swe/utils/swe_utils.py +85 -28
- imap_processing/tests/ccsds/test_data/expected_output.xml +40 -1
- imap_processing/tests/ccsds/test_excel_to_xtce.py +23 -20
- imap_processing/tests/cdf/test_data/imap_instrument2_global_cdf_attrs.yaml +0 -2
- imap_processing/tests/codice/conftest.py +1 -1
- imap_processing/tests/codice/data/validation/imap_codice_l1a_hi-counters-aggregated_20241110193700_v0.0.0.cdf +0 -0
- imap_processing/tests/codice/data/validation/imap_codice_l1a_hi-counters-singles_20241110193700_v0.0.0.cdf +0 -0
- imap_processing/tests/codice/data/validation/imap_codice_l1a_hi-ialirt_20241110193700_v0.0.0.cdf +0 -0
- imap_processing/tests/codice/data/validation/imap_codice_l1a_hi-omni_20241110193700_v0.0.0.cdf +0 -0
- imap_processing/tests/codice/data/validation/imap_codice_l1a_hi-pha_20241110193700_v0.0.0.cdf +0 -0
- imap_processing/tests/codice/data/validation/imap_codice_l1a_hi-priorities_20241110193700_v0.0.0.cdf +0 -0
- imap_processing/tests/codice/data/validation/imap_codice_l1a_hi-sectored_20241110193700_v0.0.0.cdf +0 -0
- imap_processing/tests/codice/data/validation/imap_codice_l1a_lo-counters-aggregated_20241110193700_v0.0.0.cdf +0 -0
- imap_processing/tests/codice/data/validation/imap_codice_l1a_lo-counters-singles_20241110193700_v0.0.0.cdf +0 -0
- imap_processing/tests/codice/data/validation/imap_codice_l1a_lo-ialirt_20241110193700_v0.0.0.cdf +0 -0
- imap_processing/tests/codice/data/validation/imap_codice_l1a_lo-nsw-angular_20241110193700_v0.0.0.cdf +0 -0
- imap_processing/tests/codice/data/validation/imap_codice_l1a_lo-nsw-priority_20241110193700_v0.0.0.cdf +0 -0
- imap_processing/tests/codice/data/validation/imap_codice_l1a_lo-nsw-species_20241110193700_v0.0.0.cdf +0 -0
- imap_processing/tests/codice/data/validation/imap_codice_l1a_lo-pha_20241110193700_v0.0.0.cdf +0 -0
- imap_processing/tests/codice/data/validation/imap_codice_l1a_lo-sw-angular_20241110193700_v0.0.0.cdf +0 -0
- imap_processing/tests/codice/data/validation/imap_codice_l1a_lo-sw-priority_20241110193700_v0.0.0.cdf +0 -0
- imap_processing/tests/codice/data/validation/imap_codice_l1a_lo-sw-species_20241110193700_v0.0.0.cdf +0 -0
- imap_processing/tests/codice/test_codice_l1a.py +110 -46
- imap_processing/tests/codice/test_decompress.py +4 -4
- imap_processing/tests/conftest.py +166 -10
- imap_processing/tests/ena_maps/conftest.py +51 -0
- imap_processing/tests/ena_maps/test_ena_maps.py +638 -109
- imap_processing/tests/ena_maps/test_map_utils.py +66 -43
- imap_processing/tests/ena_maps/test_spatial_utils.py +16 -20
- imap_processing/tests/hi/data/l0/H45_diag_fee_20250208.bin +0 -0
- imap_processing/tests/hi/data/l0/H45_diag_fee_20250208_verify.csv +205 -0
- imap_processing/tests/hi/test_hi_l1b.py +12 -15
- imap_processing/tests/hi/test_hi_l1c.py +234 -6
- imap_processing/tests/hi/test_l1a.py +30 -0
- imap_processing/tests/hi/test_science_direct_event.py +1 -1
- imap_processing/tests/hi/test_utils.py +24 -2
- imap_processing/tests/hit/helpers/l1_validation.py +39 -39
- imap_processing/tests/hit/test_data/hskp_sample.ccsds +0 -0
- imap_processing/tests/hit/test_data/imap_hit_l0_raw_20100105_v001.pkts +0 -0
- imap_processing/tests/hit/test_decom_hit.py +4 -0
- imap_processing/tests/hit/test_hit_l1a.py +24 -28
- imap_processing/tests/hit/test_hit_l1b.py +304 -40
- imap_processing/tests/hit/test_hit_l2.py +454 -0
- imap_processing/tests/hit/test_hit_utils.py +112 -2
- imap_processing/tests/hit/validation_data/hskp_sample_eu_3_6_2025.csv +89 -0
- imap_processing/tests/hit/validation_data/hskp_sample_raw.csv +89 -88
- imap_processing/tests/ialirt/test_data/l0/461971383-404.bin +0 -0
- imap_processing/tests/ialirt/test_data/l0/461971384-405.bin +0 -0
- imap_processing/tests/ialirt/test_data/l0/461971385-406.bin +0 -0
- imap_processing/tests/ialirt/test_data/l0/461971386-407.bin +0 -0
- imap_processing/tests/ialirt/test_data/l0/461971387-408.bin +0 -0
- imap_processing/tests/ialirt/test_data/l0/461971388-409.bin +0 -0
- imap_processing/tests/ialirt/test_data/l0/461971389-410.bin +0 -0
- imap_processing/tests/ialirt/test_data/l0/461971390-411.bin +0 -0
- imap_processing/tests/ialirt/test_data/l0/461971391-412.bin +0 -0
- imap_processing/tests/ialirt/test_data/l0/sample_decoded_i-alirt_data.csv +383 -0
- imap_processing/tests/ialirt/unit/test_grouping.py +81 -0
- imap_processing/tests/ialirt/unit/test_parse_mag.py +168 -0
- imap_processing/tests/ialirt/unit/test_process_swe.py +208 -3
- imap_processing/tests/ialirt/unit/test_time.py +16 -0
- imap_processing/tests/idex/conftest.py +62 -6
- imap_processing/tests/idex/test_data/imap_idex_l0_raw_20231218_v001.pkts +0 -0
- imap_processing/tests/idex/test_data/impact_14_tof_high_data.txt +4508 -4508
- imap_processing/tests/idex/test_idex_l1a.py +48 -4
- imap_processing/tests/idex/test_idex_l1b.py +3 -3
- imap_processing/tests/idex/test_idex_l2a.py +383 -0
- imap_processing/tests/lo/test_cdfs/imap_lo_l1a_de_20241022_v002.cdf +0 -0
- imap_processing/tests/lo/test_cdfs/imap_lo_l1a_spin_20241022_v002.cdf +0 -0
- imap_processing/tests/lo/test_lo_l1b.py +148 -4
- imap_processing/tests/lo/test_lo_science.py +1 -0
- imap_processing/tests/mag/conftest.py +69 -0
- imap_processing/tests/mag/test_mag_decom.py +1 -1
- imap_processing/tests/mag/test_mag_l1a.py +38 -0
- imap_processing/tests/mag/test_mag_l1b.py +34 -53
- imap_processing/tests/mag/test_mag_l1c.py +251 -20
- imap_processing/tests/mag/test_mag_validation.py +109 -25
- imap_processing/tests/mag/validation/L1b/T009/MAGScience-normal-(2,2)-8s-20250204-16h39.csv +17 -0
- imap_processing/tests/mag/validation/L1b/T009/mag-l1a-l1b-t009-magi-out.csv +16 -16
- imap_processing/tests/mag/validation/L1b/T009/mag-l1a-l1b-t009-mago-out.csv +16 -16
- imap_processing/tests/mag/validation/L1b/T010/MAGScience-normal-(2,2)-8s-20250206-12h05.csv +17 -0
- imap_processing/tests/mag/validation/L1b/T011/MAGScience-normal-(2,2)-8s-20250204-16h08.csv +17 -0
- imap_processing/tests/mag/validation/L1b/T011/mag-l1a-l1b-t011-magi-out.csv +16 -16
- imap_processing/tests/mag/validation/L1b/T011/mag-l1a-l1b-t011-mago-out.csv +16 -16
- imap_processing/tests/mag/validation/L1b/T012/MAGScience-normal-(2,2)-8s-20250204-16h08.csv +17 -0
- imap_processing/tests/mag/validation/L1b/T012/data.bin +0 -0
- imap_processing/tests/mag/validation/L1b/T012/field_like_all_ranges.txt +19200 -0
- imap_processing/tests/mag/validation/L1b/T012/mag-l1a-l1b-t012-cal.cdf +0 -0
- imap_processing/tests/mag/validation/L1b/T012/mag-l1a-l1b-t012-in.csv +17 -0
- imap_processing/tests/mag/validation/L1b/T012/mag-l1a-l1b-t012-magi-out.csv +17 -0
- imap_processing/tests/mag/validation/L1b/T012/mag-l1a-l1b-t012-mago-out.csv +17 -0
- imap_processing/tests/mag/validation/imap_calibration_mag_20240229_v01.cdf +0 -0
- imap_processing/tests/spacecraft/__init__.py +0 -0
- imap_processing/tests/spacecraft/data/SSR_2024_190_20_08_12_0483851794_2_DA_apid0594_1packet.pkts +0 -0
- imap_processing/tests/spacecraft/test_quaternions.py +71 -0
- imap_processing/tests/spice/test_data/fake_repoint_data.csv +5 -0
- imap_processing/tests/spice/test_geometry.py +6 -9
- imap_processing/tests/spice/test_repoint.py +111 -0
- imap_processing/tests/swapi/test_swapi_l1.py +7 -3
- imap_processing/tests/swe/l0_data/2024051010_SWE_HK_packet.bin +0 -0
- imap_processing/tests/swe/l0_data/2024051011_SWE_CEM_RAW_packet.bin +0 -0
- imap_processing/tests/swe/l0_validation_data/idle_export_eu.SWE_APP_HK_20240510_092742.csv +49 -0
- imap_processing/tests/swe/l0_validation_data/idle_export_eu.SWE_CEM_RAW_20240510_092742.csv +593 -0
- imap_processing/tests/swe/test_swe_l1a.py +18 -0
- imap_processing/tests/swe/test_swe_l1a_cem_raw.py +52 -0
- imap_processing/tests/swe/test_swe_l1a_hk.py +68 -0
- imap_processing/tests/swe/test_swe_l1b_science.py +23 -4
- imap_processing/tests/swe/test_swe_l2.py +112 -30
- imap_processing/tests/test_cli.py +2 -2
- imap_processing/tests/test_utils.py +138 -16
- imap_processing/tests/ultra/data/l0/FM45_UltraFM45_Functional_2024-01-22T0105_20240122T010548.CCSDS +0 -0
- imap_processing/tests/ultra/data/l0/ultra45_raw_sc_ultraimgrates_20220530_00.csv +164 -0
- imap_processing/tests/ultra/{test_data → data}/l0/ultra45_raw_sc_ultrarawimg_withFSWcalcs_FM45_40P_Phi28p5_BeamCal_LinearScan_phi2850_theta-000_20240207T102740.csv +3243 -3243
- imap_processing/tests/ultra/data/mock_data.py +341 -0
- imap_processing/tests/ultra/unit/conftest.py +69 -26
- imap_processing/tests/ultra/unit/test_badtimes.py +2 -0
- imap_processing/tests/ultra/unit/test_cullingmask.py +4 -0
- imap_processing/tests/ultra/unit/test_de.py +12 -4
- imap_processing/tests/ultra/unit/test_decom_apid_881.py +44 -0
- imap_processing/tests/ultra/unit/test_spacecraft_pset.py +78 -0
- imap_processing/tests/ultra/unit/test_ultra_l1a.py +28 -12
- imap_processing/tests/ultra/unit/test_ultra_l1b.py +34 -6
- imap_processing/tests/ultra/unit/test_ultra_l1b_culling.py +22 -26
- imap_processing/tests/ultra/unit/test_ultra_l1b_extended.py +86 -51
- imap_processing/tests/ultra/unit/test_ultra_l1c_pset_bins.py +94 -52
- imap_processing/ultra/l0/decom_tools.py +6 -5
- imap_processing/ultra/l1a/ultra_l1a.py +28 -56
- imap_processing/ultra/l1b/de.py +72 -28
- imap_processing/ultra/l1b/extendedspin.py +12 -14
- imap_processing/ultra/l1b/ultra_l1b.py +34 -9
- imap_processing/ultra/l1b/ultra_l1b_culling.py +65 -29
- imap_processing/ultra/l1b/ultra_l1b_extended.py +64 -19
- imap_processing/ultra/l1c/spacecraft_pset.py +86 -0
- imap_processing/ultra/l1c/ultra_l1c.py +7 -4
- imap_processing/ultra/l1c/ultra_l1c_pset_bins.py +112 -61
- imap_processing/ultra/lookup_tables/ultra_90_dps_exposure_compressed.cdf +0 -0
- imap_processing/ultra/utils/ultra_l1_utils.py +20 -2
- imap_processing/utils.py +68 -28
- {imap_processing-0.11.0.dist-info → imap_processing-0.12.0.dist-info}/METADATA +8 -5
- {imap_processing-0.11.0.dist-info → imap_processing-0.12.0.dist-info}/RECORD +250 -199
- imap_processing/cdf/config/imap_mag_l1_variable_attrs.yaml +0 -237
- imap_processing/hi/l1a/housekeeping.py +0 -27
- imap_processing/tests/codice/data/imap_codice_l1a_hi-counters-aggregated_20240429_v001.cdf +0 -0
- imap_processing/tests/codice/data/imap_codice_l1a_hi-counters-singles_20240429_v001.cdf +0 -0
- imap_processing/tests/codice/data/imap_codice_l1a_hi-omni_20240429_v001.cdf +0 -0
- imap_processing/tests/codice/data/imap_codice_l1a_hi-sectored_20240429_v001.cdf +0 -0
- imap_processing/tests/codice/data/imap_codice_l1a_hskp_20100101_v001.cdf +0 -0
- imap_processing/tests/codice/data/imap_codice_l1a_lo-counters-aggregated_20240429_v001.cdf +0 -0
- imap_processing/tests/codice/data/imap_codice_l1a_lo-counters-singles_20240429_v001.cdf +0 -0
- imap_processing/tests/codice/data/imap_codice_l1a_lo-nsw-angular_20240429_v001.cdf +0 -0
- imap_processing/tests/codice/data/imap_codice_l1a_lo-nsw-priority_20240429_v001.cdf +0 -0
- imap_processing/tests/codice/data/imap_codice_l1a_lo-nsw-species_20240429_v001.cdf +0 -0
- imap_processing/tests/codice/data/imap_codice_l1a_lo-sw-angular_20240429_v001.cdf +0 -0
- imap_processing/tests/codice/data/imap_codice_l1a_lo-sw-priority_20240429_v001.cdf +0 -0
- imap_processing/tests/codice/data/imap_codice_l1a_lo-sw-species_20240429_v001.cdf +0 -0
- imap_processing/tests/codice/data/imap_codice_l1b_hi-counters-aggregated_20240429_v001.cdf +0 -0
- imap_processing/tests/codice/data/imap_codice_l1b_hi-counters-singles_20240429_v001.cdf +0 -0
- imap_processing/tests/codice/data/imap_codice_l1b_hi-omni_20240429_v001.cdf +0 -0
- imap_processing/tests/codice/data/imap_codice_l1b_hi-sectored_20240429_v001.cdf +0 -0
- imap_processing/tests/codice/data/imap_codice_l1b_hskp_20100101_v001.cdf +0 -0
- imap_processing/tests/codice/data/imap_codice_l1b_lo-counters-aggregated_20240429_v001.cdf +0 -0
- imap_processing/tests/codice/data/imap_codice_l1b_lo-counters-singles_20240429_v001.cdf +0 -0
- imap_processing/tests/codice/data/imap_codice_l1b_lo-nsw-angular_20240429_v001.cdf +0 -0
- imap_processing/tests/codice/data/imap_codice_l1b_lo-nsw-priority_20240429_v001.cdf +0 -0
- imap_processing/tests/codice/data/imap_codice_l1b_lo-nsw-species_20240429_v001.cdf +0 -0
- imap_processing/tests/codice/data/imap_codice_l1b_lo-sw-angular_20240429_v001.cdf +0 -0
- imap_processing/tests/codice/data/imap_codice_l1b_lo-sw-priority_20240429_v001.cdf +0 -0
- imap_processing/tests/codice/data/imap_codice_l1b_lo-sw-species_20240429_v001.cdf +0 -0
- imap_processing/tests/hi/data/l1/imap_hi_l1b_45sensor-de_20250415_v999.cdf +0 -0
- imap_processing/tests/hit/PREFLIGHT_raw_record_2023_256_15_59_04_apid1251.pkts +0 -0
- imap_processing/tests/hit/PREFLIGHT_raw_record_2023_256_15_59_04_apid1252.pkts +0 -0
- imap_processing/tests/hit/validation_data/hskp_sample_eu.csv +0 -89
- imap_processing/tests/hit/validation_data/sci_sample_raw1.csv +0 -29
- imap_processing/tests/idex/test_data/imap_idex_l0_raw_20231214_v001.pkts +0 -0
- imap_processing/tests/lo/test_cdfs/imap_lo_l1a_de_20100101_v001.cdf +0 -0
- imap_processing/tests/lo/test_cdfs/imap_lo_l1a_spin_20100101_v001.cdf +0 -0
- imap_processing/tests/ultra/test_data/mock_data.py +0 -161
- imap_processing/ultra/l1c/pset.py +0 -40
- /imap_processing/tests/ultra/{test_data → data}/l0/FM45_40P_Phi28p5_BeamCal_LinearScan_phi28.50_theta-0.00_20240207T102740.CCSDS +0 -0
- /imap_processing/tests/ultra/{test_data → data}/l0/FM45_7P_Phi0.0_BeamCal_LinearScan_phi0.04_theta-0.01_20230821T121304.CCSDS +0 -0
- /imap_processing/tests/ultra/{test_data → data}/l0/FM45_TV_Cycle6_Hot_Ops_Front212_20240124T063837.CCSDS +0 -0
- /imap_processing/tests/ultra/{test_data → data}/l0/Ultra45_EM_SwRI_Cal_Run7_ThetaScan_20220530T225054.CCSDS +0 -0
- /imap_processing/tests/ultra/{test_data → data}/l0/ultra45_raw_sc_auxdata_Ultra45_EM_SwRI_Cal_Run7_ThetaScan_20220530T225054.csv +0 -0
- /imap_processing/tests/ultra/{test_data → data}/l0/ultra45_raw_sc_enaphxtofhangimg_FM45_TV_Cycle6_Hot_Ops_Front212_20240124T063837.csv +0 -0
- /imap_processing/tests/ultra/{test_data → data}/l0/ultra45_raw_sc_ultraimgrates_Ultra45_EM_SwRI_Cal_Run7_ThetaScan_20220530T225054.csv +0 -0
- /imap_processing/tests/ultra/{test_data → data}/l0/ultra45_raw_sc_ultrarawimgevent_FM45_7P_Phi00_BeamCal_LinearScan_phi004_theta-001_20230821T121304.csv +0 -0
- /imap_processing/tests/ultra/{test_data → data}/l1/dps_exposure_helio_45_E1.cdf +0 -0
- /imap_processing/tests/ultra/{test_data → data}/l1/dps_exposure_helio_45_E12.cdf +0 -0
- /imap_processing/tests/ultra/{test_data → data}/l1/dps_exposure_helio_45_E24.cdf +0 -0
- {imap_processing-0.11.0.dist-info → imap_processing-0.12.0.dist-info}/LICENSE +0 -0
- {imap_processing-0.11.0.dist-info → imap_processing-0.12.0.dist-info}/WHEEL +0 -0
- {imap_processing-0.11.0.dist-info → imap_processing-0.12.0.dist-info}/entry_points.txt +0 -0
imap_processing/hi/l1c/hi_l1c.py
CHANGED
|
@@ -4,21 +4,41 @@ from __future__ import annotations
|
|
|
4
4
|
|
|
5
5
|
import logging
|
|
6
6
|
from pathlib import Path
|
|
7
|
+
from typing import NamedTuple
|
|
7
8
|
|
|
8
9
|
import numpy as np
|
|
9
10
|
import pandas as pd
|
|
10
11
|
import xarray as xr
|
|
12
|
+
from numpy import typing as npt
|
|
13
|
+
from numpy._typing import NDArray
|
|
11
14
|
|
|
12
15
|
from imap_processing.cdf.imap_cdf_manager import ImapCdfAttributes
|
|
13
16
|
from imap_processing.cdf.utils import parse_filename_like
|
|
14
|
-
from imap_processing.hi.
|
|
17
|
+
from imap_processing.hi.l1a.science_direct_event import (
|
|
18
|
+
DE_CLOCK_TICK_S,
|
|
19
|
+
HALF_CLOCK_TICK_S,
|
|
20
|
+
)
|
|
21
|
+
from imap_processing.hi.utils import (
|
|
22
|
+
CoincidenceBitmap,
|
|
23
|
+
create_dataset_variables,
|
|
24
|
+
full_dataarray,
|
|
25
|
+
parse_sensor_number,
|
|
26
|
+
)
|
|
15
27
|
from imap_processing.spice.geometry import (
|
|
16
28
|
SpiceFrame,
|
|
17
29
|
frame_transform,
|
|
18
30
|
frame_transform_az_el,
|
|
19
31
|
)
|
|
32
|
+
from imap_processing.spice.spin import (
|
|
33
|
+
get_instrument_spin_phase,
|
|
34
|
+
get_spin_data,
|
|
35
|
+
)
|
|
20
36
|
from imap_processing.spice.time import ttj2000ns_to_et
|
|
21
37
|
|
|
38
|
+
N_SPIN_BINS = 3600
|
|
39
|
+
SPIN_PHASE_BIN_EDGES = np.linspace(0, 1, N_SPIN_BINS + 1)
|
|
40
|
+
SPIN_PHASE_BIN_CENTERS = (SPIN_PHASE_BIN_EDGES[:-1] + SPIN_PHASE_BIN_EDGES[1:]) / 2
|
|
41
|
+
|
|
22
42
|
logger = logging.getLogger(__name__)
|
|
23
43
|
|
|
24
44
|
|
|
@@ -100,6 +120,10 @@ def generate_pset_dataset(
|
|
|
100
120
|
# Calculate and add despun_z, hae_latitude, and hae_longitude variables to
|
|
101
121
|
# the pset_dataset
|
|
102
122
|
pset_dataset.update(pset_geometry(pset_et, logical_source_parts["sensor"]))
|
|
123
|
+
# Bin the counts into the spin-bins
|
|
124
|
+
pset_dataset.update(pset_counts(pset_dataset.coords, config_df, de_dataset))
|
|
125
|
+
# Calculate and add the exposure time to the pset_dataset
|
|
126
|
+
pset_dataset.update(pset_exposure(pset_dataset.coords, de_dataset))
|
|
103
127
|
|
|
104
128
|
# TODO: The following section will go away as PSET algorithms to populate
|
|
105
129
|
# these variables are written.
|
|
@@ -107,8 +131,6 @@ def generate_pset_dataset(
|
|
|
107
131
|
attr_mgr.add_instrument_global_attrs("hi")
|
|
108
132
|
attr_mgr.add_instrument_variable_attrs(instrument="hi", level=None)
|
|
109
133
|
for var_name in [
|
|
110
|
-
"counts",
|
|
111
|
-
"exposure_times",
|
|
112
134
|
"background_rates",
|
|
113
135
|
"background_rates_uncertainty",
|
|
114
136
|
]:
|
|
@@ -188,7 +210,7 @@ def empty_pset_dataset(
|
|
|
188
210
|
).copy()
|
|
189
211
|
dtype = attrs.pop("dtype")
|
|
190
212
|
coords["spin_angle_bin"] = xr.DataArray(
|
|
191
|
-
np.arange(
|
|
213
|
+
np.arange(N_SPIN_BINS, dtype=dtype),
|
|
192
214
|
name="spin_angle_bin",
|
|
193
215
|
dims=["spin_angle_bin"],
|
|
194
216
|
attrs=attrs,
|
|
@@ -271,8 +293,8 @@ def pset_geometry(pset_et: float, sensor_str: str) -> dict[str, xr.DataArray]:
|
|
|
271
293
|
el = 0 if "90" in sensor_str else -45
|
|
272
294
|
dps_az_el = np.array(
|
|
273
295
|
[
|
|
274
|
-
|
|
275
|
-
np.full(
|
|
296
|
+
SPIN_PHASE_BIN_CENTERS * 360,
|
|
297
|
+
np.full(N_SPIN_BINS, el),
|
|
276
298
|
]
|
|
277
299
|
).T
|
|
278
300
|
hae_az_el = frame_transform_az_el(
|
|
@@ -282,7 +304,7 @@ def pset_geometry(pset_et: float, sensor_str: str) -> dict[str, xr.DataArray]:
|
|
|
282
304
|
geometry_vars.update(
|
|
283
305
|
create_dataset_variables(
|
|
284
306
|
["hae_latitude", "hae_longitude"],
|
|
285
|
-
(1,
|
|
307
|
+
(1, N_SPIN_BINS),
|
|
286
308
|
att_manager_lookup_str="hi_pset_{0}",
|
|
287
309
|
)
|
|
288
310
|
)
|
|
@@ -295,6 +317,358 @@ def pset_geometry(pset_et: float, sensor_str: str) -> dict[str, xr.DataArray]:
|
|
|
295
317
|
return geometry_vars
|
|
296
318
|
|
|
297
319
|
|
|
320
|
+
def pset_counts(
|
|
321
|
+
pset_coords: dict[str, xr.DataArray],
|
|
322
|
+
config_df: pd.DataFrame,
|
|
323
|
+
l1b_de_dataset: xr.Dataset,
|
|
324
|
+
) -> dict[str, xr.DataArray]:
|
|
325
|
+
"""
|
|
326
|
+
Bin direct events into PSET spin-bins.
|
|
327
|
+
|
|
328
|
+
Parameters
|
|
329
|
+
----------
|
|
330
|
+
pset_coords : dict[str, xr.DataArray]
|
|
331
|
+
The PSET coordinates from the xr.Dataset.
|
|
332
|
+
config_df : pd.DataFrame
|
|
333
|
+
The calibration product configuration dataframe.
|
|
334
|
+
l1b_de_dataset : xr.Dataset
|
|
335
|
+
The L1B dataset for the pointing being processed.
|
|
336
|
+
|
|
337
|
+
Returns
|
|
338
|
+
-------
|
|
339
|
+
dict[str, xr.DataArray]
|
|
340
|
+
Dictionary containing new exposure_times DataArray to be added to the PSET
|
|
341
|
+
dataset.
|
|
342
|
+
"""
|
|
343
|
+
# Generate exposure time variable filled with zeros
|
|
344
|
+
counts_var = create_dataset_variables(
|
|
345
|
+
["counts"],
|
|
346
|
+
coords=pset_coords,
|
|
347
|
+
att_manager_lookup_str="hi_pset_{0}",
|
|
348
|
+
fill_value=0,
|
|
349
|
+
)
|
|
350
|
+
|
|
351
|
+
# Convert list of DEs to pandas dataframe for ease indexing/filtering
|
|
352
|
+
de_df = l1b_de_dataset.drop_dims("epoch").to_pandas()
|
|
353
|
+
|
|
354
|
+
# Remove DEs not in Goodtimes/angles
|
|
355
|
+
good_mask = good_time_and_phase_mask(
|
|
356
|
+
l1b_de_dataset.event_met.values, l1b_de_dataset.spin_phase.values
|
|
357
|
+
)
|
|
358
|
+
de_df = de_df[good_mask]
|
|
359
|
+
|
|
360
|
+
# The calibration product configuration potentially has different coincidence
|
|
361
|
+
# types for each ESA and different TOF windows for each calibration product,
|
|
362
|
+
# esa energy step combination. Because of this we need to filter DEs that
|
|
363
|
+
# belong to each combo individually.
|
|
364
|
+
# Loop over the esa_energy_step values first
|
|
365
|
+
for esa_energy, esa_df in config_df.groupby(level="esa_energy_step"):
|
|
366
|
+
# Create a mask for all DEs at the current esa_energy_step.
|
|
367
|
+
# esa_energy_step is recorded for each packet rather than for each DE,
|
|
368
|
+
# so we use ccsds_index to get the esa_energy_step for each DE
|
|
369
|
+
esa_mask = (
|
|
370
|
+
l1b_de_dataset["esa_energy_step"].data[de_df["ccsds_index"].to_numpy()]
|
|
371
|
+
== esa_energy
|
|
372
|
+
)
|
|
373
|
+
# Now loop over the calibration products for the current ESA energy
|
|
374
|
+
for config_row in esa_df.itertuples():
|
|
375
|
+
# Remove DEs that are not at the current ESA energy and in the list
|
|
376
|
+
# of coincidence types for the current calibration product
|
|
377
|
+
type_mask = de_df["coincidence_type"].isin(
|
|
378
|
+
config_row.coincidence_type_values
|
|
379
|
+
)
|
|
380
|
+
filtered_de_df = de_df[(esa_mask & type_mask)]
|
|
381
|
+
|
|
382
|
+
# Use the TOF window mask to remove DEs with TOFs outside the allowed range
|
|
383
|
+
tof_fill_vals = {
|
|
384
|
+
f"tof_{detector_pair}": l1b_de_dataset[f"tof_{detector_pair}"].attrs[
|
|
385
|
+
"FILLVAL"
|
|
386
|
+
]
|
|
387
|
+
for detector_pair in CalibrationProductConfig.tof_detector_pairs
|
|
388
|
+
}
|
|
389
|
+
tof_in_window_mask = get_tof_window_mask(
|
|
390
|
+
filtered_de_df, config_row, tof_fill_vals
|
|
391
|
+
)
|
|
392
|
+
filtered_de_df = filtered_de_df[tof_in_window_mask]
|
|
393
|
+
|
|
394
|
+
# Bin remaining DEs into spin-bins
|
|
395
|
+
i_esa = np.flatnonzero(pset_coords["esa_energy_step"].data == esa_energy)[0]
|
|
396
|
+
# spin_phase is in the range [0, 1). Multiplying by N_SPIN_BINS and
|
|
397
|
+
# truncating to an integer gives the correct bin index
|
|
398
|
+
spin_bin_indices = (
|
|
399
|
+
filtered_de_df["spin_phase"].to_numpy() * N_SPIN_BINS
|
|
400
|
+
).astype(int)
|
|
401
|
+
# When iterating over rows of a dataframe, the names of the multi-index
|
|
402
|
+
# are not preserved. Below, `config_row.Index[0]` gets the cal_prod_num
|
|
403
|
+
# value from the namedtuple representing the dataframe row.
|
|
404
|
+
np.add.at(
|
|
405
|
+
counts_var["counts"].data[0, i_esa, config_row.Index[0]],
|
|
406
|
+
spin_bin_indices,
|
|
407
|
+
1,
|
|
408
|
+
)
|
|
409
|
+
return counts_var
|
|
410
|
+
|
|
411
|
+
|
|
412
|
+
def get_tof_window_mask(
|
|
413
|
+
de_df: pd.DataFrame, prod_config_row: NamedTuple, fill_vals: dict
|
|
414
|
+
) -> NDArray[bool]:
|
|
415
|
+
"""
|
|
416
|
+
Generate a mask indicating which DEs to keep based on TOF windows.
|
|
417
|
+
|
|
418
|
+
Parameters
|
|
419
|
+
----------
|
|
420
|
+
de_df : pd.DataFrame
|
|
421
|
+
The Direct Event dataframe for the DEs to filter based on the TOF
|
|
422
|
+
windows.
|
|
423
|
+
prod_config_row : namedtuple
|
|
424
|
+
A single row of the prod config dataframe represented as a named tuple.
|
|
425
|
+
fill_vals : dict
|
|
426
|
+
A dictionary containing the fill values used in the input DE TOF
|
|
427
|
+
dataframe values. This value should be derived from the L1B DE CDF
|
|
428
|
+
TOF variable attributes.
|
|
429
|
+
|
|
430
|
+
Returns
|
|
431
|
+
-------
|
|
432
|
+
window_mask : np.ndarray
|
|
433
|
+
A mask with one entry per DE in the input `de_df` indicating which DEs
|
|
434
|
+
contain TOF values within the windows specified by `prod_config_row`.
|
|
435
|
+
The mask is intended to directly filter the DE dataframe.
|
|
436
|
+
"""
|
|
437
|
+
detector_pairs = CalibrationProductConfig.tof_detector_pairs
|
|
438
|
+
tof_in_window_mask = np.empty((len(detector_pairs), len(de_df)), dtype=bool)
|
|
439
|
+
for i_pair, detector_pair in enumerate(detector_pairs):
|
|
440
|
+
low_limit = getattr(prod_config_row, f"tof_{detector_pair}_low")
|
|
441
|
+
high_limit = getattr(prod_config_row, f"tof_{detector_pair}_high")
|
|
442
|
+
tof_array = de_df[f"tof_{detector_pair}"].to_numpy()
|
|
443
|
+
# The TOF in window mask contains True wherever the TOF is within
|
|
444
|
+
# the configuration low/high bounds OR the FILLVAL is present. The
|
|
445
|
+
# FILLVAL indicates that the detector pair was not hit. DEs with
|
|
446
|
+
# the incorrect coincidence_type are already filtered out and this
|
|
447
|
+
# implementation simplifies combining the tof_in_window_masks in
|
|
448
|
+
# the next step.
|
|
449
|
+
tof_in_window_mask[i_pair] = np.logical_or(
|
|
450
|
+
np.logical_and(low_limit <= tof_array, tof_array <= high_limit),
|
|
451
|
+
tof_array == fill_vals[f"tof_{detector_pair}"],
|
|
452
|
+
)
|
|
453
|
+
return np.all(tof_in_window_mask, axis=0)
|
|
454
|
+
|
|
455
|
+
|
|
456
|
+
def pset_exposure(
|
|
457
|
+
pset_coords: dict[str, xr.DataArray], l1b_de_dataset: xr.Dataset
|
|
458
|
+
) -> dict[str, xr.DataArray]:
|
|
459
|
+
"""
|
|
460
|
+
Calculate PSET exposure time.
|
|
461
|
+
|
|
462
|
+
Parameters
|
|
463
|
+
----------
|
|
464
|
+
pset_coords : dict[str, xr.DataArray]
|
|
465
|
+
The PSET coordinates from the xr.Dataset.
|
|
466
|
+
l1b_de_dataset : xr.Dataset
|
|
467
|
+
The L1B dataset for the pointing being processed.
|
|
468
|
+
|
|
469
|
+
Returns
|
|
470
|
+
-------
|
|
471
|
+
dict[str, xr.DataArray]
|
|
472
|
+
Dictionary containing new exposure_times DataArray to be added to the PSET
|
|
473
|
+
dataset.
|
|
474
|
+
"""
|
|
475
|
+
# Extract the sensor number (45 or 90) for computing spin phase
|
|
476
|
+
sensor_number = parse_sensor_number(l1b_de_dataset.attrs["Logical_source"])
|
|
477
|
+
|
|
478
|
+
# Generate exposure time variable filled with zeros
|
|
479
|
+
exposure_var = create_dataset_variables(
|
|
480
|
+
["exposure_times"],
|
|
481
|
+
coords=pset_coords,
|
|
482
|
+
att_manager_lookup_str="hi_pset_{0}",
|
|
483
|
+
fill_value=0,
|
|
484
|
+
)
|
|
485
|
+
|
|
486
|
+
# Get a subset of the l1b_de_dataset that contains only the second
|
|
487
|
+
# of each pair of packets at an ESA step.
|
|
488
|
+
data_subset = find_second_de_packet_data(l1b_de_dataset)
|
|
489
|
+
|
|
490
|
+
# Get the pandas dataframe with spin data
|
|
491
|
+
spin_df = get_spin_data()
|
|
492
|
+
|
|
493
|
+
# Loop over each of the CCSDS data rows that have been identified as the second
|
|
494
|
+
# packet at an ESA step.
|
|
495
|
+
# When implementing this, the memory needed to avoid this for loop was computed
|
|
496
|
+
# and determined to be so large that the for loop is warranted.
|
|
497
|
+
for _, packet_row in data_subset.groupby("epoch"):
|
|
498
|
+
clock_tick_mets, clock_tick_weights = get_de_clock_ticks_for_esa_step(
|
|
499
|
+
packet_row["ccsds_met"].values, spin_df
|
|
500
|
+
)
|
|
501
|
+
|
|
502
|
+
# Clock tick MET times are accumulation "edges". To get the mean spin-phase
|
|
503
|
+
# for a given clock tick, add 1/2 clock tick and compute spin-phase.
|
|
504
|
+
spin_phases = np.atleast_1d(
|
|
505
|
+
get_instrument_spin_phase(
|
|
506
|
+
clock_tick_mets + HALF_CLOCK_TICK_S,
|
|
507
|
+
SpiceFrame[f"IMAP_HI_{sensor_number}"],
|
|
508
|
+
)
|
|
509
|
+
)
|
|
510
|
+
|
|
511
|
+
# Remove ticks not in good times/angles
|
|
512
|
+
good_mask = good_time_and_phase_mask(clock_tick_mets, spin_phases)
|
|
513
|
+
spin_phases = spin_phases[good_mask]
|
|
514
|
+
clock_tick_weights = clock_tick_weights[good_mask]
|
|
515
|
+
|
|
516
|
+
# TODO: Account for flyback time. See alg doc section 2.3.5
|
|
517
|
+
|
|
518
|
+
# Bin exposure times into spin-phase bins
|
|
519
|
+
new_exposure_times, _ = np.histogram(
|
|
520
|
+
spin_phases, bins=SPIN_PHASE_BIN_EDGES, weights=clock_tick_weights
|
|
521
|
+
)
|
|
522
|
+
# Accumulate the new exposure times for current esa_step
|
|
523
|
+
i_esa = np.flatnonzero(
|
|
524
|
+
pset_coords["esa_energy_step"].values
|
|
525
|
+
== packet_row["esa_energy_step"].values
|
|
526
|
+
)[0]
|
|
527
|
+
exposure_var["exposure_times"].values[:, i_esa] += new_exposure_times
|
|
528
|
+
|
|
529
|
+
return exposure_var
|
|
530
|
+
|
|
531
|
+
|
|
532
|
+
def find_second_de_packet_data(l1b_dataset: xr.Dataset) -> xr.Dataset:
|
|
533
|
+
"""
|
|
534
|
+
Find the telemetry entries for the second packet at an ESA step.
|
|
535
|
+
|
|
536
|
+
Parameters
|
|
537
|
+
----------
|
|
538
|
+
l1b_dataset : xr.Dataset
|
|
539
|
+
The L1B Direct Event Dataset for the current pointing.
|
|
540
|
+
|
|
541
|
+
Returns
|
|
542
|
+
-------
|
|
543
|
+
reduced_dataset : xr.Dataset
|
|
544
|
+
A dataset containing only the entries for the second packet at an ESA step.
|
|
545
|
+
"""
|
|
546
|
+
epoch_dataset = l1b_dataset.drop_dims("event_met")
|
|
547
|
+
# We should get two CCSDS packets per 8-spin ESA step.
|
|
548
|
+
# Get the indices of the packet before each ESA change.
|
|
549
|
+
esa_step = epoch_dataset["esa_step"].values
|
|
550
|
+
second_esa_packet_idx = np.append(
|
|
551
|
+
np.flatnonzero(np.diff(esa_step) != 0), len(esa_step) - 1
|
|
552
|
+
)
|
|
553
|
+
# Remove esa steps at 0 - these are calibrations
|
|
554
|
+
second_esa_packet_idx = second_esa_packet_idx[esa_step[second_esa_packet_idx] != 0]
|
|
555
|
+
# Remove indices where we don't have two consecutive packets at the same ESA
|
|
556
|
+
if second_esa_packet_idx[0] == 0:
|
|
557
|
+
logger.warning(
|
|
558
|
+
f"Removing packet 0 with ESA step: {esa_step[0]} from"
|
|
559
|
+
f"calculation of exposure time due to missing matched pair."
|
|
560
|
+
)
|
|
561
|
+
second_esa_packet_idx = second_esa_packet_idx[1:]
|
|
562
|
+
missing_esa_pair_mask = (
|
|
563
|
+
esa_step[second_esa_packet_idx - 1] != esa_step[second_esa_packet_idx]
|
|
564
|
+
)
|
|
565
|
+
if missing_esa_pair_mask.any():
|
|
566
|
+
logger.warning(
|
|
567
|
+
f"Removing {missing_esa_pair_mask.sum()} packets from exposure "
|
|
568
|
+
f"time calculation due to missing ESA step DE packet pairs."
|
|
569
|
+
)
|
|
570
|
+
second_esa_packet_idx = second_esa_packet_idx[~missing_esa_pair_mask]
|
|
571
|
+
# Reduce the dataset to just the second packet entries
|
|
572
|
+
data_subset = epoch_dataset.isel(epoch=second_esa_packet_idx)
|
|
573
|
+
return data_subset
|
|
574
|
+
|
|
575
|
+
|
|
576
|
+
def get_de_clock_ticks_for_esa_step(
|
|
577
|
+
ccsds_met: float, spin_df: pd.DataFrame
|
|
578
|
+
) -> tuple[np.ndarray, np.ndarray]:
|
|
579
|
+
"""
|
|
580
|
+
Generate an array of clock tick MET times for an 8-spin ESA step.
|
|
581
|
+
|
|
582
|
+
Find the closest spin start time in the input spin dataframe to the packet
|
|
583
|
+
creation time (`ccsds_met`) and generate an array of clock tick MET times
|
|
584
|
+
for the period covered by the previous 8-spin group and an array of weights
|
|
585
|
+
that represent the fraction of each clock tick that occurred in the 8-spin
|
|
586
|
+
group.
|
|
587
|
+
|
|
588
|
+
Parameters
|
|
589
|
+
----------
|
|
590
|
+
ccsds_met : float
|
|
591
|
+
The CCSDS MET of the second packet in a DE packet pair.
|
|
592
|
+
spin_df : pd.DataFrame
|
|
593
|
+
Universal spin table dataframe.
|
|
594
|
+
|
|
595
|
+
Returns
|
|
596
|
+
-------
|
|
597
|
+
clock_tick_mets : np.ndarray
|
|
598
|
+
Array of MET times that a clock tick occurred in an 8-spin group of spins
|
|
599
|
+
during which the ESA step was constant.
|
|
600
|
+
clock_tick_weights : np.ndarray
|
|
601
|
+
Array of weights to use when binning the clock tick MET times into spin-bins.
|
|
602
|
+
"""
|
|
603
|
+
# Find the last spin_table entry with the start less than the CCSDS MET.
|
|
604
|
+
# The CCSDS packet gets created just AFTER the final spin in the 8-spin
|
|
605
|
+
# ESA step group so this match is the end time. The start time is
|
|
606
|
+
# 8-spins earlier.
|
|
607
|
+
spin_start_mets = spin_df.spin_start_time.to_numpy()
|
|
608
|
+
# CCSDS MET has one second resolution, add one to it to make sure it is
|
|
609
|
+
# greater than the spin start time it ended on.
|
|
610
|
+
end_time_ind = np.flatnonzero(ccsds_met + 1 >= spin_start_mets).max()
|
|
611
|
+
|
|
612
|
+
# If the minimum absolute difference is greater than 1/2 the spin-phase
|
|
613
|
+
# we have a problem.
|
|
614
|
+
if (
|
|
615
|
+
ccsds_met - spin_start_mets[end_time_ind]
|
|
616
|
+
> spin_df.iloc[end_time_ind].spin_period_sec / 2
|
|
617
|
+
):
|
|
618
|
+
raise ValueError(
|
|
619
|
+
"The difference between ccsds_met and spin_start_met, "
|
|
620
|
+
f"{ccsds_met - spin_start_mets[end_time_ind]} seconds, "
|
|
621
|
+
f"is too large. Check the spin table loaded for this pointing."
|
|
622
|
+
)
|
|
623
|
+
# If the end time index less than 8, we don't have enough spins in the
|
|
624
|
+
# spin table to get a start time, so raise an error.
|
|
625
|
+
if end_time_ind < 8:
|
|
626
|
+
raise ValueError(
|
|
627
|
+
"Error determining start/end time for exposure time. "
|
|
628
|
+
f"The CCSDS MET time {ccsds_met} "
|
|
629
|
+
"is less than 8 spins from the loaded spin table data."
|
|
630
|
+
)
|
|
631
|
+
clock_tick_mets = np.arange(
|
|
632
|
+
spin_start_mets[end_time_ind - 8],
|
|
633
|
+
spin_start_mets[end_time_ind],
|
|
634
|
+
DE_CLOCK_TICK_S,
|
|
635
|
+
dtype=float,
|
|
636
|
+
)
|
|
637
|
+
# The final clock-tick bin has less exposure time because the next spin
|
|
638
|
+
# will trigger FSW to change ESA steps part way through that time. To
|
|
639
|
+
# account for this in exposure time calculation, assign an array of
|
|
640
|
+
# weights to use when binnig the clock-ticks to spin-bins. Weights are
|
|
641
|
+
# fractional clock ticks. All weights are 1 except for the last one in
|
|
642
|
+
# the array.
|
|
643
|
+
clock_tick_weights = np.ones_like(clock_tick_mets, dtype=float)
|
|
644
|
+
clock_tick_weights[-1] = (
|
|
645
|
+
spin_start_mets[end_time_ind] - clock_tick_mets[-1]
|
|
646
|
+
) / DE_CLOCK_TICK_S
|
|
647
|
+
return clock_tick_mets, clock_tick_weights
|
|
648
|
+
|
|
649
|
+
|
|
650
|
+
def good_time_and_phase_mask(
|
|
651
|
+
tick_mets: np.ndarray, spin_phases: np.ndarray
|
|
652
|
+
) -> npt.NDArray:
|
|
653
|
+
"""
|
|
654
|
+
Filter out the clock tick times that are not in good times and angles.
|
|
655
|
+
|
|
656
|
+
Parameters
|
|
657
|
+
----------
|
|
658
|
+
tick_mets : np.ndarray
|
|
659
|
+
Clock-tick MET times.
|
|
660
|
+
spin_phases : np.ndarray
|
|
661
|
+
Spin phases for each clock tick.
|
|
662
|
+
|
|
663
|
+
Returns
|
|
664
|
+
-------
|
|
665
|
+
keep_mask : np.ndarray
|
|
666
|
+
Boolean mask indicating which clock ticks are in good times/phases.
|
|
667
|
+
"""
|
|
668
|
+
# TODO: Implement this once we have Goodtimes data product defined.
|
|
669
|
+
return np.full_like(tick_mets, True, dtype=bool)
|
|
670
|
+
|
|
671
|
+
|
|
298
672
|
@pd.api.extensions.register_dataframe_accessor("cal_prod_config")
|
|
299
673
|
class CalibrationProductConfig:
|
|
300
674
|
"""
|
|
@@ -310,21 +684,20 @@ class CalibrationProductConfig:
|
|
|
310
684
|
"cal_prod_num",
|
|
311
685
|
"esa_energy_step",
|
|
312
686
|
)
|
|
687
|
+
tof_detector_pairs = ("ab", "ac1", "bc1", "c1c2")
|
|
313
688
|
required_columns = (
|
|
314
689
|
"coincidence_type_list",
|
|
315
|
-
|
|
316
|
-
|
|
317
|
-
|
|
318
|
-
|
|
319
|
-
|
|
320
|
-
"tof_bc1_high",
|
|
321
|
-
"tof_c1c2_low",
|
|
322
|
-
"tof_c1c2_high",
|
|
690
|
+
*[
|
|
691
|
+
f"tof_{det_pair}_{limit}"
|
|
692
|
+
for det_pair in tof_detector_pairs
|
|
693
|
+
for limit in ["low", "high"]
|
|
694
|
+
],
|
|
323
695
|
)
|
|
324
696
|
|
|
325
697
|
def __init__(self, pandas_obj: pd.DataFrame) -> None:
|
|
326
698
|
self._validate(pandas_obj)
|
|
327
699
|
self._obj = pandas_obj
|
|
700
|
+
self._add_coincidence_values_column()
|
|
328
701
|
|
|
329
702
|
def _validate(self, df: pd.DataFrame) -> None:
|
|
330
703
|
"""
|
|
@@ -351,6 +724,18 @@ class CalibrationProductConfig:
|
|
|
351
724
|
# TODO: Verify that the same ESA energy steps exist in all unique calibration
|
|
352
725
|
# product numbers
|
|
353
726
|
|
|
727
|
+
def _add_coincidence_values_column(self) -> None:
|
|
728
|
+
"""Generate and add the coincidence_type_values column to the dataframe."""
|
|
729
|
+
# Add a column that consists of the coincidence type strings converted
|
|
730
|
+
# to integer values
|
|
731
|
+
self._obj["coincidence_type_values"] = self._obj.apply(
|
|
732
|
+
lambda row: tuple(
|
|
733
|
+
CoincidenceBitmap.detector_hit_str_to_int(entry)
|
|
734
|
+
for entry in row["coincidence_type_list"]
|
|
735
|
+
),
|
|
736
|
+
axis=1,
|
|
737
|
+
)
|
|
738
|
+
|
|
354
739
|
@classmethod
|
|
355
740
|
def from_csv(cls, path: Path) -> pd.DataFrame:
|
|
356
741
|
"""
|
|
@@ -366,12 +751,15 @@ class CalibrationProductConfig:
|
|
|
366
751
|
dataframe : pandas.DataFrame
|
|
367
752
|
Validated calibration product configuration data frame.
|
|
368
753
|
"""
|
|
369
|
-
|
|
754
|
+
df = pd.read_csv(
|
|
370
755
|
path,
|
|
371
756
|
index_col=cls.index_columns,
|
|
372
|
-
converters={"coincidence_type_list": lambda s: s.split("|")},
|
|
757
|
+
converters={"coincidence_type_list": lambda s: tuple(s.split("|"))},
|
|
373
758
|
comment="#",
|
|
374
759
|
)
|
|
760
|
+
# Force the _init_ method to run by using the namespace
|
|
761
|
+
_ = df.cal_prod_config.number_of_products
|
|
762
|
+
return df
|
|
375
763
|
|
|
376
764
|
@property
|
|
377
765
|
def number_of_products(self) -> int:
|
imap_processing/hi/utils.py
CHANGED
|
@@ -4,7 +4,6 @@ import re
|
|
|
4
4
|
from collections.abc import Sequence
|
|
5
5
|
from dataclasses import dataclass
|
|
6
6
|
from enum import IntEnum
|
|
7
|
-
from numbers import Number
|
|
8
7
|
from typing import Optional, Union
|
|
9
8
|
|
|
10
9
|
import numpy as np
|
|
@@ -19,10 +18,12 @@ class HIAPID(IntEnum):
|
|
|
19
18
|
H45_APP_NHK = 754
|
|
20
19
|
H45_SCI_CNT = 769
|
|
21
20
|
H45_SCI_DE = 770
|
|
21
|
+
H45_DIAG_FEE = 772
|
|
22
22
|
|
|
23
23
|
H90_APP_NHK = 818
|
|
24
24
|
H90_SCI_CNT = 833
|
|
25
25
|
H90_SCI_DE = 834
|
|
26
|
+
H90_DIAG_FEE = 836
|
|
26
27
|
|
|
27
28
|
@property
|
|
28
29
|
def sensor(self) -> str:
|
|
@@ -100,7 +101,7 @@ def full_dataarray(
|
|
|
100
101
|
attrs: dict,
|
|
101
102
|
coords: Optional[dict[str, xr.DataArray]] = None,
|
|
102
103
|
shape: Optional[Union[int, Sequence[int]]] = None,
|
|
103
|
-
fill_value: Optional[
|
|
104
|
+
fill_value: Optional[float] = None,
|
|
104
105
|
) -> xr.DataArray:
|
|
105
106
|
"""
|
|
106
107
|
Generate an empty xarray.DataArray with appropriate attributes.
|
|
@@ -115,10 +116,14 @@ def full_dataarray(
|
|
|
115
116
|
Variable name.
|
|
116
117
|
attrs : dict
|
|
117
118
|
CDF variable attributes. Usually retrieved from ImapCdfAttributes.
|
|
118
|
-
coords : dict
|
|
119
|
-
Coordinate variables for the Dataset.
|
|
120
|
-
|
|
121
|
-
|
|
119
|
+
coords : dict, optional
|
|
120
|
+
Coordinate variables for the Dataset. This function will extract the
|
|
121
|
+
sizes of each dimension defined by the attributes dictionary to determine
|
|
122
|
+
the size of the DataArray to be created.
|
|
123
|
+
shape : int or tuple, optional
|
|
124
|
+
Shape of ndarray data array to instantiate in the xarray.DataArray. If
|
|
125
|
+
shape is provided, the DataArray created will have this shape regardless
|
|
126
|
+
of whether coordinates are provided or not.
|
|
122
127
|
fill_value : optional, float
|
|
123
128
|
Override the fill value that the DataArray will be filled with. If not
|
|
124
129
|
supplied, the "FILLVAL" value from `attrs` will be used.
|
|
@@ -152,9 +157,9 @@ def full_dataarray(
|
|
|
152
157
|
|
|
153
158
|
def create_dataset_variables(
|
|
154
159
|
variable_names: list[str],
|
|
155
|
-
variable_shape: Union[int, Sequence[int]],
|
|
160
|
+
variable_shape: Optional[Union[int, Sequence[int]]] = None,
|
|
156
161
|
coords: Optional[dict[str, xr.DataArray]] = None,
|
|
157
|
-
fill_value: Optional[
|
|
162
|
+
fill_value: Optional[float] = None,
|
|
158
163
|
att_manager_lookup_str: str = "{0}",
|
|
159
164
|
) -> dict[str, xr.DataArray]:
|
|
160
165
|
"""
|
|
@@ -166,10 +171,14 @@ def create_dataset_variables(
|
|
|
166
171
|
----------
|
|
167
172
|
variable_names : list[str]
|
|
168
173
|
List of variable names to create.
|
|
169
|
-
variable_shape : int or sequence of int
|
|
170
|
-
Shape of the new variables data ndarray.
|
|
171
|
-
|
|
172
|
-
|
|
174
|
+
variable_shape : int or sequence of int, optional
|
|
175
|
+
Shape of the new variables data ndarray. If not provided the shape will
|
|
176
|
+
attempt to be derived from the coords dictionary.
|
|
177
|
+
coords : dict, optional
|
|
178
|
+
Coordinate variables for the Dataset. If `variable_shape` is not provided
|
|
179
|
+
the dataset variables created will use this dictionary along with variable
|
|
180
|
+
attributes from the CdfAttributeManager to determine the shapes of the
|
|
181
|
+
dataset variables created.
|
|
173
182
|
fill_value : optional, number
|
|
174
183
|
Value to fill the new variables data arrays with. If not supplied,
|
|
175
184
|
the fill value is pulled from the CDF variable attributes "FILLVAL"
|
|
@@ -200,3 +209,40 @@ def create_dataset_variables(
|
|
|
200
209
|
var, attrs, shape=variable_shape, coords=coords, fill_value=fill_value
|
|
201
210
|
)
|
|
202
211
|
return new_variables
|
|
212
|
+
|
|
213
|
+
|
|
214
|
+
class CoincidenceBitmap(IntEnum):
|
|
215
|
+
"""IntEnum class for coincidence type bitmap values."""
|
|
216
|
+
|
|
217
|
+
A = 2**3
|
|
218
|
+
B = 2**2
|
|
219
|
+
C1 = 2**1
|
|
220
|
+
C2 = 2**0
|
|
221
|
+
|
|
222
|
+
@staticmethod
|
|
223
|
+
def detector_hit_str_to_int(detector_hit_str: str) -> int:
|
|
224
|
+
"""
|
|
225
|
+
Convert a detector hit string to a coincidence type integer value.
|
|
226
|
+
|
|
227
|
+
A detector hit string is a string containing all detectors that were hit
|
|
228
|
+
for a direct event. Possible detectors include: [A, B, C1, C2]. Converting
|
|
229
|
+
the detector hit string to a coincidence type integer value involves
|
|
230
|
+
summing the coincidence bitmap value for each detector hit. e.g. "AC1C2"
|
|
231
|
+
results in 2**3 + 2**1 + 2**0 = 11.
|
|
232
|
+
|
|
233
|
+
Parameters
|
|
234
|
+
----------
|
|
235
|
+
detector_hit_str : str
|
|
236
|
+
The string containing the set of detectors hit.
|
|
237
|
+
e.g. "AC1C2".
|
|
238
|
+
|
|
239
|
+
Returns
|
|
240
|
+
-------
|
|
241
|
+
coincidence_type : int
|
|
242
|
+
The integer value of the coincidence type.
|
|
243
|
+
"""
|
|
244
|
+
# Join all detector names with a pipe for use with regex
|
|
245
|
+
pattern = r"|".join(c.name for c in CoincidenceBitmap)
|
|
246
|
+
matches = re.findall(pattern, detector_hit_str)
|
|
247
|
+
# Sum the integer value assigned to the detector name for each match
|
|
248
|
+
return sum(CoincidenceBitmap[m] for m in matches)
|