imap-processing 0.6.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- imap_processing/__init__.py +34 -0
- imap_processing/_version.py +3 -0
- imap_processing/ccsds/__init__.py +0 -0
- imap_processing/ccsds/ccsds_data.py +55 -0
- imap_processing/ccsds/excel_to_xtce.py +477 -0
- imap_processing/cdf/__init__.py +0 -0
- imap_processing/cdf/cdf_attribute_manager.py +322 -0
- imap_processing/cdf/config/imap_codice_global_cdf_attrs.yaml +212 -0
- imap_processing/cdf/config/imap_codice_l1a_variable_attrs.yaml +1358 -0
- imap_processing/cdf/config/imap_codice_l1b_variable_attrs.yaml +391 -0
- imap_processing/cdf/config/imap_constant_attrs.yaml +33 -0
- imap_processing/cdf/config/imap_default_global_cdf_attrs.yaml +17 -0
- imap_processing/cdf/config/imap_glows_global_cdf_attrs.yaml +41 -0
- imap_processing/cdf/config/imap_glows_l1a_variable_attrs.yaml +499 -0
- imap_processing/cdf/config/imap_glows_l1b_variable_attrs.yaml +334 -0
- imap_processing/cdf/config/imap_hi_global_cdf_attrs.yaml +51 -0
- imap_processing/cdf/config/imap_hi_variable_attrs.yaml +435 -0
- imap_processing/cdf/config/imap_hit_global_cdf_attrs.yaml +27 -0
- imap_processing/cdf/config/imap_hit_l1a_variable_attrs.yaml +493 -0
- imap_processing/cdf/config/imap_hit_l1b_variable_attrs.yaml +564 -0
- imap_processing/cdf/config/imap_idex_global_cdf_attrs.yaml +24 -0
- imap_processing/cdf/config/imap_idex_l1a_variable_attrs.yaml +426 -0
- imap_processing/cdf/config/imap_lo_global_cdf_attrs.yaml +90 -0
- imap_processing/cdf/config/imap_lo_l1a_variable_attrs.yaml +487 -0
- imap_processing/cdf/config/imap_lo_l1b_variable_attrs.yaml +121 -0
- imap_processing/cdf/config/imap_lo_l1c_variable_attrs.yaml +179 -0
- imap_processing/cdf/config/imap_mag_global_cdf_attrs.yaml +97 -0
- imap_processing/cdf/config/imap_mag_l1_variable_attrs.yaml +201 -0
- imap_processing/cdf/config/imap_swapi_global_cdf_attrs.yaml +33 -0
- imap_processing/cdf/config/imap_swapi_variable_attrs.yaml +137 -0
- imap_processing/cdf/config/imap_swe_global_cdf_attrs.yaml +24 -0
- imap_processing/cdf/config/imap_swe_l1a_variable_attrs.yaml +234 -0
- imap_processing/cdf/config/imap_swe_l1b_variable_attrs.yaml +273 -0
- imap_processing/cdf/config/imap_ultra_global_cdf_attrs.yaml +100 -0
- imap_processing/cdf/config/imap_ultra_l1a_variable_attrs.yaml +52 -0
- imap_processing/cdf/config/imap_ultra_l1b_variable_attrs.yaml +297 -0
- imap_processing/cdf/config/imap_ultra_l1c_variable_attrs.yaml +121 -0
- imap_processing/cdf/config/shared/default_global_cdf_attrs_schema.yaml +246 -0
- imap_processing/cdf/config/shared/default_variable_cdf_attrs_schema.yaml +466 -0
- imap_processing/cdf/imap_cdf_manager.py +64 -0
- imap_processing/cdf/utils.py +147 -0
- imap_processing/cli.py +863 -0
- imap_processing/codice/__init__.py +1 -0
- imap_processing/codice/codice_l0.py +54 -0
- imap_processing/codice/codice_l1a.py +558 -0
- imap_processing/codice/codice_l1b.py +194 -0
- imap_processing/codice/constants.py +986 -0
- imap_processing/codice/data/esa_sweep_values.csv +257 -0
- imap_processing/codice/data/lo_stepping_values.csv +129 -0
- imap_processing/codice/decompress.py +142 -0
- imap_processing/codice/packet_definitions/P_COD_NHK.xml +618 -0
- imap_processing/codice/packet_definitions/codice_packet_definition.xml +5073 -0
- imap_processing/codice/utils.py +95 -0
- imap_processing/decom.py +40 -0
- imap_processing/glows/__init__.py +1 -0
- imap_processing/glows/ancillary/l1b_conversion_table_v001.json +42 -0
- imap_processing/glows/l0/__init__.py +0 -0
- imap_processing/glows/l0/decom_glows.py +91 -0
- imap_processing/glows/l0/glows_l0_data.py +194 -0
- imap_processing/glows/l1a/glows_l1a.py +424 -0
- imap_processing/glows/l1a/glows_l1a_data.py +555 -0
- imap_processing/glows/l1b/glows_l1b.py +270 -0
- imap_processing/glows/l1b/glows_l1b_data.py +583 -0
- imap_processing/glows/packet_definitions/GLX_COMBINED.xml +254 -0
- imap_processing/glows/packet_definitions/P_GLX_TMSCDE.xml +97 -0
- imap_processing/glows/packet_definitions/P_GLX_TMSCHIST.xml +215 -0
- imap_processing/glows/utils/__init__.py +0 -0
- imap_processing/glows/utils/constants.py +105 -0
- imap_processing/hi/__init__.py +1 -0
- imap_processing/hi/l0/__init__.py +0 -0
- imap_processing/hi/l0/decom_hi.py +24 -0
- imap_processing/hi/l1a/__init__.py +0 -0
- imap_processing/hi/l1a/hi_l1a.py +73 -0
- imap_processing/hi/l1a/histogram.py +142 -0
- imap_processing/hi/l1a/housekeeping.py +27 -0
- imap_processing/hi/l1a/science_direct_event.py +341 -0
- imap_processing/hi/l1b/__init__.py +0 -0
- imap_processing/hi/l1b/hi_eng_unit_convert_table.csv +154 -0
- imap_processing/hi/l1b/hi_l1b.py +127 -0
- imap_processing/hi/l1c/__init__.py +0 -0
- imap_processing/hi/l1c/hi_l1c.py +228 -0
- imap_processing/hi/packet_definitions/__init__.py +0 -0
- imap_processing/hi/packet_definitions/hi_packet_definition.xml +482 -0
- imap_processing/hi/utils.py +27 -0
- imap_processing/hit/__init__.py +1 -0
- imap_processing/hit/l0/__init__.py +0 -0
- imap_processing/hit/l0/data_classes/housekeeping.py +240 -0
- imap_processing/hit/l0/data_classes/science_packet.py +259 -0
- imap_processing/hit/l0/decom_hit.py +467 -0
- imap_processing/hit/l0/utils/hit_base.py +57 -0
- imap_processing/hit/l1a/__init__.py +0 -0
- imap_processing/hit/l1a/hit_l1a.py +254 -0
- imap_processing/hit/l1b/hit_l1b.py +179 -0
- imap_processing/hit/packet_definitions/hit_packet_definitions.xml +1276 -0
- imap_processing/ialirt/__init__.py +0 -0
- imap_processing/ialirt/l0/__init__.py +0 -0
- imap_processing/ialirt/l0/process_hit.py +220 -0
- imap_processing/ialirt/packet_definitions/__init__.py +0 -0
- imap_processing/ialirt/packet_definitions/ialirt.xml +778 -0
- imap_processing/ialirt/packet_definitions/ialirt_hit.xml +186 -0
- imap_processing/idex/__init__.py +2 -0
- imap_processing/idex/idex_constants.py +27 -0
- imap_processing/idex/idex_l0.py +31 -0
- imap_processing/idex/idex_l1a.py +631 -0
- imap_processing/idex/packet_definitions/idex_packet_definition.xml +3162 -0
- imap_processing/lo/__init__.py +1 -0
- imap_processing/lo/l0/__init__.py +0 -0
- imap_processing/lo/l0/data_classes/science_direct_events.py +215 -0
- imap_processing/lo/l0/data_classes/star_sensor.py +98 -0
- imap_processing/lo/l0/decompression_tables/12_to_16_bit.csv +4097 -0
- imap_processing/lo/l0/decompression_tables/8_to_12_bit.csv +257 -0
- imap_processing/lo/l0/decompression_tables/8_to_16_bit.csv +257 -0
- imap_processing/lo/l0/decompression_tables/decompression_tables.py +75 -0
- imap_processing/lo/l0/lo_apid.py +15 -0
- imap_processing/lo/l0/lo_science.py +150 -0
- imap_processing/lo/l0/utils/binary_string.py +59 -0
- imap_processing/lo/l0/utils/bit_decompression.py +62 -0
- imap_processing/lo/l0/utils/lo_base.py +57 -0
- imap_processing/lo/l1a/__init__.py +0 -0
- imap_processing/lo/l1a/lo_l1a.py +157 -0
- imap_processing/lo/l1b/lo_l1b.py +160 -0
- imap_processing/lo/l1c/lo_l1c.py +180 -0
- imap_processing/lo/packet_definitions/lo_xtce.xml +3541 -0
- imap_processing/mag/__init__.py +2 -0
- imap_processing/mag/constants.py +108 -0
- imap_processing/mag/l0/decom_mag.py +170 -0
- imap_processing/mag/l0/mag_l0_data.py +118 -0
- imap_processing/mag/l1a/mag_l1a.py +317 -0
- imap_processing/mag/l1a/mag_l1a_data.py +1007 -0
- imap_processing/mag/l1b/__init__.py +0 -0
- imap_processing/mag/l1b/imap_calibration_mag_20240229_v01.cdf +0 -0
- imap_processing/mag/l1b/mag_l1b.py +125 -0
- imap_processing/mag/l1c/mag_l1c.py +57 -0
- imap_processing/mag/packet_definitions/MAG_SCI_COMBINED.xml +235 -0
- imap_processing/quality_flags.py +91 -0
- imap_processing/spice/__init__.py +1 -0
- imap_processing/spice/geometry.py +322 -0
- imap_processing/spice/kernels.py +459 -0
- imap_processing/spice/time.py +72 -0
- imap_processing/swapi/__init__.py +1 -0
- imap_processing/swapi/l1/__init__.py +0 -0
- imap_processing/swapi/l1/swapi_l1.py +685 -0
- imap_processing/swapi/l2/__init__.py +0 -0
- imap_processing/swapi/l2/swapi_l2.py +107 -0
- imap_processing/swapi/packet_definitions/__init__.py +0 -0
- imap_processing/swapi/packet_definitions/swapi_packet_definition.xml +708 -0
- imap_processing/swapi/swapi_utils.py +25 -0
- imap_processing/swe/__init__.py +1 -0
- imap_processing/swe/l1a/__init__.py +0 -0
- imap_processing/swe/l1a/swe_l1a.py +48 -0
- imap_processing/swe/l1a/swe_science.py +223 -0
- imap_processing/swe/l1b/engineering_unit_convert_table.csv +65 -0
- imap_processing/swe/l1b/swe_esa_lookup_table.csv +1441 -0
- imap_processing/swe/l1b/swe_l1b.py +49 -0
- imap_processing/swe/l1b/swe_l1b_science.py +557 -0
- imap_processing/swe/packet_definitions/__init__.py +0 -0
- imap_processing/swe/packet_definitions/swe_packet_definition.xml +303 -0
- imap_processing/swe/utils/__init__.py +0 -0
- imap_processing/swe/utils/swe_utils.py +9 -0
- imap_processing/tests/__init__.py +0 -0
- imap_processing/tests/ccsds/test_data/expected_output.xml +171 -0
- imap_processing/tests/ccsds/test_excel_to_xtce.py +285 -0
- imap_processing/tests/cdf/__init__.py +0 -0
- imap_processing/tests/cdf/imap_default_global_cdf_attrs.yaml +8 -0
- imap_processing/tests/cdf/shared/default_global_cdf_attrs_schema.yaml +246 -0
- imap_processing/tests/cdf/shared/default_variable_cdf_attrs_schema.yaml +466 -0
- imap_processing/tests/cdf/test_cdf_attribute_manager.py +353 -0
- imap_processing/tests/cdf/test_data/imap_default_global_test_cdf_attrs.yaml +7 -0
- imap_processing/tests/cdf/test_data/imap_instrument1_global_cdf_attrs.yaml +14 -0
- imap_processing/tests/cdf/test_data/imap_instrument1_level1_variable_attrs.yaml +23 -0
- imap_processing/tests/cdf/test_data/imap_instrument2_global_cdf_attrs.yaml +23 -0
- imap_processing/tests/cdf/test_data/imap_instrument2_level2_variable_attrs.yaml +30 -0
- imap_processing/tests/cdf/test_data/imap_test_global.yaml +26 -0
- imap_processing/tests/cdf/test_data/imap_test_variable.yaml +41 -0
- imap_processing/tests/cdf/test_imap_cdf_manager.py +62 -0
- imap_processing/tests/cdf/test_utils.py +109 -0
- imap_processing/tests/codice/__init__.py +0 -0
- imap_processing/tests/codice/conftest.py +56 -0
- imap_processing/tests/codice/data/eu_unit_lookup_table.csv +101 -0
- imap_processing/tests/codice/data/idle_export_eu.COD_NHK_20230822_122700 2.csv +100 -0
- imap_processing/tests/codice/data/idle_export_raw.COD_NHK_20230822_122700.csv +100 -0
- imap_processing/tests/codice/data/imap_codice_l0_hi-counters-aggregated_20240429_v001.pkts +0 -0
- imap_processing/tests/codice/data/imap_codice_l0_hi-counters-singles_20240429_v001.pkts +0 -0
- imap_processing/tests/codice/data/imap_codice_l0_hi-omni_20240429_v001.pkts +0 -0
- imap_processing/tests/codice/data/imap_codice_l0_hi-pha_20240429_v001.pkts +0 -0
- imap_processing/tests/codice/data/imap_codice_l0_hi-sectored_20240429_v001.pkts +0 -0
- imap_processing/tests/codice/data/imap_codice_l0_hskp_20100101_v001.pkts +0 -0
- imap_processing/tests/codice/data/imap_codice_l0_lo-counters-aggregated_20240429_v001.pkts +0 -0
- imap_processing/tests/codice/data/imap_codice_l0_lo-counters-singles_20240429_v001.pkts +0 -0
- imap_processing/tests/codice/data/imap_codice_l0_lo-nsw-angular_20240429_v001.pkts +0 -0
- imap_processing/tests/codice/data/imap_codice_l0_lo-nsw-priority_20240429_v001.pkts +0 -0
- imap_processing/tests/codice/data/imap_codice_l0_lo-nsw-species_20240429_v001.pkts +0 -0
- imap_processing/tests/codice/data/imap_codice_l0_lo-pha_20240429_v001.pkts +0 -0
- imap_processing/tests/codice/data/imap_codice_l0_lo-sw-angular_20240429_v001.pkts +0 -0
- imap_processing/tests/codice/data/imap_codice_l0_lo-sw-priority_20240429_v001.pkts +0 -0
- imap_processing/tests/codice/data/imap_codice_l0_lo-sw-species_20240429_v001.pkts +0 -0
- imap_processing/tests/codice/data/imap_codice_l1a_hi-counters-aggregated_20240429_v001.cdf +0 -0
- imap_processing/tests/codice/data/imap_codice_l1a_hi-counters-singles_20240429_v001.cdf +0 -0
- imap_processing/tests/codice/data/imap_codice_l1a_hi-omni_20240429_v001.cdf +0 -0
- imap_processing/tests/codice/data/imap_codice_l1a_hi-sectored_20240429_v001.cdf +0 -0
- imap_processing/tests/codice/data/imap_codice_l1a_hskp_20100101_v001.cdf +0 -0
- imap_processing/tests/codice/data/imap_codice_l1a_lo-counters-aggregated_20240429_v001.cdf +0 -0
- imap_processing/tests/codice/data/imap_codice_l1a_lo-counters-singles_20240429_v001.cdf +0 -0
- imap_processing/tests/codice/data/imap_codice_l1a_lo-nsw-angular_20240429_v001.cdf +0 -0
- imap_processing/tests/codice/data/imap_codice_l1a_lo-nsw-priority_20240429_v001.cdf +0 -0
- imap_processing/tests/codice/data/imap_codice_l1a_lo-nsw-species_20240429_v001.cdf +0 -0
- imap_processing/tests/codice/data/imap_codice_l1a_lo-sw-angular_20240429_v001.cdf +0 -0
- imap_processing/tests/codice/data/imap_codice_l1a_lo-sw-priority_20240429_v001.cdf +0 -0
- imap_processing/tests/codice/data/imap_codice_l1a_lo-sw-species_20240429_v001.cdf +0 -0
- imap_processing/tests/codice/data/imap_codice_l1b_hi-counters-aggregated_20240429_v001.cdf +0 -0
- imap_processing/tests/codice/data/imap_codice_l1b_hi-counters-singles_20240429_v001.cdf +0 -0
- imap_processing/tests/codice/data/imap_codice_l1b_hi-omni_20240429_v001.cdf +0 -0
- imap_processing/tests/codice/data/imap_codice_l1b_hi-sectored_20240429_v001.cdf +0 -0
- imap_processing/tests/codice/data/imap_codice_l1b_hskp_20100101_v001.cdf +0 -0
- imap_processing/tests/codice/data/imap_codice_l1b_lo-counters-aggregated_20240429_v001.cdf +0 -0
- imap_processing/tests/codice/data/imap_codice_l1b_lo-counters-singles_20240429_v001.cdf +0 -0
- imap_processing/tests/codice/data/imap_codice_l1b_lo-nsw-angular_20240429_v001.cdf +0 -0
- imap_processing/tests/codice/data/imap_codice_l1b_lo-nsw-priority_20240429_v001.cdf +0 -0
- imap_processing/tests/codice/data/imap_codice_l1b_lo-nsw-species_20240429_v001.cdf +0 -0
- imap_processing/tests/codice/data/imap_codice_l1b_lo-sw-angular_20240429_v001.cdf +0 -0
- imap_processing/tests/codice/data/imap_codice_l1b_lo-sw-priority_20240429_v001.cdf +0 -0
- imap_processing/tests/codice/data/imap_codice_l1b_lo-sw-species_20240429_v001.cdf +0 -0
- imap_processing/tests/codice/test_codice_l0.py +144 -0
- imap_processing/tests/codice/test_codice_l1a.py +187 -0
- imap_processing/tests/codice/test_codice_l1b.py +60 -0
- imap_processing/tests/codice/test_decompress.py +50 -0
- imap_processing/tests/conftest.py +372 -0
- imap_processing/tests/glows/direct_events_validation_data_l1a.csv +5704 -0
- imap_processing/tests/glows/glows_test_packet_20110921_v01.pkts +0 -0
- imap_processing/tests/glows/test_glows_decom.py +133 -0
- imap_processing/tests/glows/test_glows_l1a_cdf.py +85 -0
- imap_processing/tests/glows/test_glows_l1a_data.py +510 -0
- imap_processing/tests/glows/test_glows_l1b.py +348 -0
- imap_processing/tests/glows/test_glows_l1b_data.py +70 -0
- imap_processing/tests/hi/__init__.py +0 -0
- imap_processing/tests/hi/conftest.py +133 -0
- imap_processing/tests/hi/test_data/l0/20231030_H45_APP_NHK.bin +0 -0
- imap_processing/tests/hi/test_data/l0/20231030_H45_APP_NHK.csv +201 -0
- imap_processing/tests/hi/test_data/l0/20231030_H45_SCI_CNT.bin +0 -0
- imap_processing/tests/hi/test_data/l0/20231030_H45_SCI_DE.bin +0 -0
- imap_processing/tests/hi/test_data/l0/README.txt +54 -0
- imap_processing/tests/hi/test_decom.py +55 -0
- imap_processing/tests/hi/test_hi_l1b.py +31 -0
- imap_processing/tests/hi/test_hi_l1c.py +69 -0
- imap_processing/tests/hi/test_l1a.py +96 -0
- imap_processing/tests/hi/test_l1a_sci_de.py +72 -0
- imap_processing/tests/hi/test_utils.py +15 -0
- imap_processing/tests/hit/PREFLIGHT_raw_record_2023_256_15_59_04_apid1251.pkts +0 -0
- imap_processing/tests/hit/PREFLIGHT_raw_record_2023_256_15_59_04_apid1252.pkts +0 -0
- imap_processing/tests/hit/__init__.py +0 -0
- imap_processing/tests/hit/test_data/imap_hit_l0_hk_20100105_v001.pkts +0 -0
- imap_processing/tests/hit/test_data/sci_sample.ccsds +0 -0
- imap_processing/tests/hit/test_hit_decom.py +230 -0
- imap_processing/tests/hit/test_hit_l1a.py +224 -0
- imap_processing/tests/hit/test_hit_l1b.py +52 -0
- imap_processing/tests/hit/validation_data/hskp_sample_raw.csv +88 -0
- imap_processing/tests/ialirt/__init__.py +0 -0
- imap_processing/tests/ialirt/test_data/l0/IALiRT Raw Packet Telemetry.txt +33 -0
- imap_processing/tests/ialirt/test_data/l0/hit_ialirt_sample.ccsds +0 -0
- imap_processing/tests/ialirt/test_data/l0/hit_ialirt_sample.csv +1001 -0
- imap_processing/tests/ialirt/unit/__init__.py +0 -0
- imap_processing/tests/ialirt/unit/test_decom_ialirt.py +94 -0
- imap_processing/tests/ialirt/unit/test_process_hit.py +226 -0
- imap_processing/tests/idex/__init__.py +0 -0
- imap_processing/tests/idex/conftest.py +22 -0
- imap_processing/tests/idex/imap_idex_l0_raw_20230725_v001.pkts +0 -0
- imap_processing/tests/idex/impact_14_tof_high_data.txt +8189 -0
- imap_processing/tests/idex/test_idex_l0.py +45 -0
- imap_processing/tests/idex/test_idex_l1a.py +91 -0
- imap_processing/tests/lo/__init__.py +0 -0
- imap_processing/tests/lo/test_binary_string.py +21 -0
- imap_processing/tests/lo/test_bit_decompression.py +39 -0
- imap_processing/tests/lo/test_cdfs/imap_lo_l0_raw_20240627_v001.pkts +0 -0
- imap_processing/tests/lo/test_cdfs/imap_lo_l1a_de_20100101_v001.cdf +0 -0
- imap_processing/tests/lo/test_cdfs/imap_lo_l1a_spin_20100101_v001.cdf +0 -0
- imap_processing/tests/lo/test_cdfs/imap_lo_l1b_de_20100101_v001.cdf +0 -0
- imap_processing/tests/lo/test_lo_l1a.py +66 -0
- imap_processing/tests/lo/test_lo_l1b.py +74 -0
- imap_processing/tests/lo/test_lo_l1c.py +66 -0
- imap_processing/tests/lo/test_science_counts.py +41 -0
- imap_processing/tests/lo/test_science_direct_events.py +209 -0
- imap_processing/tests/lo/test_star_sensor.py +35 -0
- imap_processing/tests/mag/imap_mag_l1a_burst-magi_20231025_v001.cdf +0 -0
- imap_processing/tests/mag/mag_l0_test_data.pkts +0 -0
- imap_processing/tests/mag/mag_l0_test_output.csv +37 -0
- imap_processing/tests/mag/mag_l1_test_data.pkts +0 -0
- imap_processing/tests/mag/mag_l1a_test_output.csv +97 -0
- imap_processing/tests/mag/test_mag_decom.py +117 -0
- imap_processing/tests/mag/test_mag_l1a.py +856 -0
- imap_processing/tests/mag/test_mag_l1b.py +77 -0
- imap_processing/tests/mag/test_mag_l1c.py +40 -0
- imap_processing/tests/spice/__init__.py +0 -0
- imap_processing/tests/spice/test_data/imap_ena_sim_metakernel.template +4 -0
- imap_processing/tests/spice/test_data/imap_science_0001.tf +171 -0
- imap_processing/tests/spice/test_data/imap_sclk_0000.tsc +156 -0
- imap_processing/tests/spice/test_data/imap_sim_ck_2hr_2secsampling_with_nutation.bc +0 -0
- imap_processing/tests/spice/test_data/imap_simple_metakernel.template +3 -0
- imap_processing/tests/spice/test_data/imap_spk_demo.bsp +0 -0
- imap_processing/tests/spice/test_data/imap_wkcp.tf +1806 -0
- imap_processing/tests/spice/test_data/naif0012.tls +150 -0
- imap_processing/tests/spice/test_data/sim_1yr_imap_attitude.bc +0 -0
- imap_processing/tests/spice/test_data/sim_1yr_imap_pointing_frame.bc +0 -0
- imap_processing/tests/spice/test_geometry.py +214 -0
- imap_processing/tests/spice/test_kernels.py +272 -0
- imap_processing/tests/spice/test_time.py +35 -0
- imap_processing/tests/swapi/__init__.py +0 -0
- imap_processing/tests/swapi/conftest.py +16 -0
- imap_processing/tests/swapi/l0_data/__init__.py +0 -0
- imap_processing/tests/swapi/l0_data/imap_swapi_l0_raw_20231012_v001.pkts +0 -0
- imap_processing/tests/swapi/l0_validation_data/__init__.py +0 -0
- imap_processing/tests/swapi/l0_validation_data/idle_export_eu.SWP_AUT_20231012_125245.csv +124 -0
- imap_processing/tests/swapi/l0_validation_data/idle_export_eu.SWP_HK_20231012_125245.csv +98 -0
- imap_processing/tests/swapi/l0_validation_data/idle_export_eu.SWP_MG_20231012_125245.csv +9 -0
- imap_processing/tests/swapi/l0_validation_data/idle_export_eu.SWP_SCI_20231012_125245.csv +72 -0
- imap_processing/tests/swapi/l0_validation_data/idle_export_raw.SWP_AUT_20231012_125245.csv +124 -0
- imap_processing/tests/swapi/l0_validation_data/idle_export_raw.SWP_HK_20231012_125245.csv +98 -0
- imap_processing/tests/swapi/l0_validation_data/idle_export_raw.SWP_MG_20231012_125245.csv +9 -0
- imap_processing/tests/swapi/l0_validation_data/idle_export_raw.SWP_SCI_20231012_125245.csv +72 -0
- imap_processing/tests/swapi/test_swapi_decom.py +135 -0
- imap_processing/tests/swapi/test_swapi_l1.py +354 -0
- imap_processing/tests/swapi/test_swapi_l2.py +21 -0
- imap_processing/tests/swe/__init__.py +0 -0
- imap_processing/tests/swe/conftest.py +35 -0
- imap_processing/tests/swe/decompressed/20230927173238_4th_quarter_decompressed.csv +181 -0
- imap_processing/tests/swe/decompressed/20230927173253_1st_quarter_decompressed.csv +181 -0
- imap_processing/tests/swe/decompressed/20230927173308_2nd_quarter_decompressed.csv +181 -0
- imap_processing/tests/swe/decompressed/20230927173323_3rd_quarter_decompressed.csv +181 -0
- imap_processing/tests/swe/l0_data/2024051010_SWE_SCIENCE_packet.bin +0 -0
- imap_processing/tests/swe/l0_validation_data/idle_export_eu.SWE_SCIENCE_20240510_092742.csv +544 -0
- imap_processing/tests/swe/l0_validation_data/idle_export_raw.SWE_SCIENCE_20240510_092742.csv +363 -0
- imap_processing/tests/swe/test_swe_l1a.py +12 -0
- imap_processing/tests/swe/test_swe_l1a_science.py +129 -0
- imap_processing/tests/swe/test_swe_l1b.py +61 -0
- imap_processing/tests/swe/test_swe_l1b_science.py +65 -0
- imap_processing/tests/test_cli.py +229 -0
- imap_processing/tests/test_decom.py +66 -0
- imap_processing/tests/test_quality_flags.py +71 -0
- imap_processing/tests/test_utils.py +107 -0
- imap_processing/tests/ultra/__init__.py +0 -0
- imap_processing/tests/ultra/test_data/l0/FM45_40P_Phi28p5_BeamCal_LinearScan_phi28.50_theta-0.00_20240207T102740.CCSDS +0 -0
- imap_processing/tests/ultra/test_data/l0/FM45_7P_Phi0.0_BeamCal_LinearScan_phi0.04_theta-0.01_20230821T121304.CCSDS +0 -0
- imap_processing/tests/ultra/test_data/l0/FM45_TV_Cycle6_Hot_Ops_Front212_20240124T063837.CCSDS +0 -0
- imap_processing/tests/ultra/test_data/l0/Ultra45_EM_SwRI_Cal_Run7_ThetaScan_20220530T225054.CCSDS +0 -0
- imap_processing/tests/ultra/test_data/l0/ultra45_raw_sc_auxdata_Ultra45_EM_SwRI_Cal_Run7_ThetaScan_20220530T225054.csv +24 -0
- imap_processing/tests/ultra/test_data/l0/ultra45_raw_sc_enaphxtofhangimg_FM45_TV_Cycle6_Hot_Ops_Front212_20240124T063837.csv +105 -0
- imap_processing/tests/ultra/test_data/l0/ultra45_raw_sc_ultraimgrates_Ultra45_EM_SwRI_Cal_Run7_ThetaScan_20220530T225054.csv +24 -0
- imap_processing/tests/ultra/test_data/l0/ultra45_raw_sc_ultrarawimg_withFSWcalcs_FM45_40P_Phi28p5_BeamCal_LinearScan_phi2850_theta-000_20240207T102740.csv +3314 -0
- imap_processing/tests/ultra/test_data/l0/ultra45_raw_sc_ultrarawimgevent_FM45_7P_Phi00_BeamCal_LinearScan_phi004_theta-001_20230821T121304.csv +702 -0
- imap_processing/tests/ultra/unit/__init__.py +0 -0
- imap_processing/tests/ultra/unit/conftest.py +210 -0
- imap_processing/tests/ultra/unit/test_decom_apid_880.py +98 -0
- imap_processing/tests/ultra/unit/test_decom_apid_881.py +50 -0
- imap_processing/tests/ultra/unit/test_decom_apid_883.py +44 -0
- imap_processing/tests/ultra/unit/test_decom_apid_896.py +104 -0
- imap_processing/tests/ultra/unit/test_lookup_utils.py +68 -0
- imap_processing/tests/ultra/unit/test_ultra_l1a.py +338 -0
- imap_processing/tests/ultra/unit/test_ultra_l1b.py +122 -0
- imap_processing/tests/ultra/unit/test_ultra_l1b_annotated.py +57 -0
- imap_processing/tests/ultra/unit/test_ultra_l1b_extended.py +342 -0
- imap_processing/tests/ultra/unit/test_ultra_l1c.py +104 -0
- imap_processing/tests/ultra/unit/test_ultra_l1c_pset_bins.py +35 -0
- imap_processing/ultra/__init__.py +1 -0
- imap_processing/ultra/constants.py +60 -0
- imap_processing/ultra/l0/__init__.py +0 -0
- imap_processing/ultra/l0/decom_tools.py +281 -0
- imap_processing/ultra/l0/decom_ultra.py +278 -0
- imap_processing/ultra/l0/ultra_utils.py +326 -0
- imap_processing/ultra/l1a/__init__.py +0 -0
- imap_processing/ultra/l1a/ultra_l1a.py +319 -0
- imap_processing/ultra/l1b/badtimes.py +26 -0
- imap_processing/ultra/l1b/cullingmask.py +26 -0
- imap_processing/ultra/l1b/de.py +59 -0
- imap_processing/ultra/l1b/extendedspin.py +45 -0
- imap_processing/ultra/l1b/lookup_utils.py +165 -0
- imap_processing/ultra/l1b/ultra_l1b.py +65 -0
- imap_processing/ultra/l1b/ultra_l1b_annotated.py +54 -0
- imap_processing/ultra/l1b/ultra_l1b_extended.py +764 -0
- imap_processing/ultra/l1c/histogram.py +36 -0
- imap_processing/ultra/l1c/pset.py +36 -0
- imap_processing/ultra/l1c/ultra_l1c.py +52 -0
- imap_processing/ultra/l1c/ultra_l1c_pset_bins.py +54 -0
- imap_processing/ultra/lookup_tables/EgyNorm.mem.csv +32769 -0
- imap_processing/ultra/lookup_tables/FM45_Startup1_ULTRA_IMGPARAMS_20240719.csv +2 -0
- imap_processing/ultra/lookup_tables/ultra45_back-pos-luts.csv +4097 -0
- imap_processing/ultra/lookup_tables/ultra45_tdc_norm.csv +2050 -0
- imap_processing/ultra/lookup_tables/ultra90_back-pos-luts.csv +4097 -0
- imap_processing/ultra/lookup_tables/ultra90_tdc_norm.csv +2050 -0
- imap_processing/ultra/lookup_tables/yadjust.csv +257 -0
- imap_processing/ultra/packet_definitions/ULTRA_SCI_COMBINED.xml +547 -0
- imap_processing/ultra/packet_definitions/__init__.py +0 -0
- imap_processing/ultra/utils/__init__.py +0 -0
- imap_processing/ultra/utils/ultra_l1_utils.py +50 -0
- imap_processing/utils.py +413 -0
- imap_processing-0.6.0.dist-info/LICENSE +21 -0
- imap_processing-0.6.0.dist-info/METADATA +107 -0
- imap_processing-0.6.0.dist-info/RECORD +398 -0
- imap_processing-0.6.0.dist-info/WHEEL +4 -0
- imap_processing-0.6.0.dist-info/entry_points.txt +4 -0
|
@@ -0,0 +1,1007 @@
|
|
|
1
|
+
"""Data classes for storing and processing MAG Level 1A data."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
import math
|
|
6
|
+
from dataclasses import InitVar, dataclass, field
|
|
7
|
+
from math import floor
|
|
8
|
+
|
|
9
|
+
import numpy as np
|
|
10
|
+
import numpy.typing as npt
|
|
11
|
+
|
|
12
|
+
from imap_processing.cdf.utils import J2000_EPOCH
|
|
13
|
+
from imap_processing.mag.constants import (
|
|
14
|
+
AXIS_COUNT,
|
|
15
|
+
FIBONACCI_SEQUENCE,
|
|
16
|
+
MAX_COMPRESSED_VECTOR_BITS,
|
|
17
|
+
MAX_FINE_TIME,
|
|
18
|
+
RANGE_BIT_WIDTH,
|
|
19
|
+
)
|
|
20
|
+
from imap_processing.spice.time import met_to_j2000ns
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
@dataclass
|
|
24
|
+
class TimeTuple:
|
|
25
|
+
"""
|
|
26
|
+
Class for storing fine time/coarse time for MAG data.
|
|
27
|
+
|
|
28
|
+
Course time is mission SCLK in seconds. Fine time is 16bit unsigned sub-second
|
|
29
|
+
counter.
|
|
30
|
+
|
|
31
|
+
Attributes
|
|
32
|
+
----------
|
|
33
|
+
coarse_time : int
|
|
34
|
+
Coarse time in seconds.
|
|
35
|
+
fine_time : int
|
|
36
|
+
Subsecond.
|
|
37
|
+
|
|
38
|
+
Methods
|
|
39
|
+
-------
|
|
40
|
+
to_seconds()
|
|
41
|
+
"""
|
|
42
|
+
|
|
43
|
+
coarse_time: int
|
|
44
|
+
fine_time: int
|
|
45
|
+
|
|
46
|
+
def __add__(self, seconds: float) -> TimeTuple:
|
|
47
|
+
"""
|
|
48
|
+
Add a number of seconds to the time tuple.
|
|
49
|
+
|
|
50
|
+
Parameters
|
|
51
|
+
----------
|
|
52
|
+
seconds : float
|
|
53
|
+
Number of seconds to add.
|
|
54
|
+
|
|
55
|
+
Returns
|
|
56
|
+
-------
|
|
57
|
+
time : TimeTuple
|
|
58
|
+
New time tuple with the current time tuple + seconds.
|
|
59
|
+
"""
|
|
60
|
+
# Add whole seconds to coarse time
|
|
61
|
+
coarse = self.coarse_time + floor(seconds)
|
|
62
|
+
# fine time is 1/65535th of a second
|
|
63
|
+
fine = self.fine_time + round((seconds % 1) * MAX_FINE_TIME)
|
|
64
|
+
|
|
65
|
+
# If fine is larger than the max, move the excess into coarse time.
|
|
66
|
+
if fine > MAX_FINE_TIME:
|
|
67
|
+
coarse = coarse + floor(fine / MAX_FINE_TIME)
|
|
68
|
+
fine = fine % MAX_FINE_TIME
|
|
69
|
+
|
|
70
|
+
return TimeTuple(coarse, fine)
|
|
71
|
+
|
|
72
|
+
def to_seconds(self) -> float:
|
|
73
|
+
"""
|
|
74
|
+
Convert time tuple into seconds (float).
|
|
75
|
+
|
|
76
|
+
Returns
|
|
77
|
+
-------
|
|
78
|
+
seconds : float
|
|
79
|
+
Time in seconds.
|
|
80
|
+
"""
|
|
81
|
+
return float(self.coarse_time + self.fine_time / MAX_FINE_TIME)
|
|
82
|
+
|
|
83
|
+
|
|
84
|
+
@dataclass
|
|
85
|
+
class MagL1aPacketProperties:
|
|
86
|
+
"""
|
|
87
|
+
Data class with Mag L1A per-packet data.
|
|
88
|
+
|
|
89
|
+
This contains per-packet variations in L1a data that is passed into CDF
|
|
90
|
+
files. Since each L1a file contains multiple packets, the variables in this
|
|
91
|
+
class vary by time in the end CDF file.
|
|
92
|
+
|
|
93
|
+
seconds_per_packet, and total_vectors are calculated from pus_ssubtype and
|
|
94
|
+
vecsec values, which are only passed in to the init method and cannot be
|
|
95
|
+
accessed from an instance as they are InitVars.
|
|
96
|
+
|
|
97
|
+
To use the class, pass in pus_ssubtype and either PRI_VECSEC or SEC_VECSEC,
|
|
98
|
+
then you can access seconds_per_packet and total_vectors.
|
|
99
|
+
|
|
100
|
+
Attributes
|
|
101
|
+
----------
|
|
102
|
+
shcoarse : int
|
|
103
|
+
Mission elapsed time for the packet
|
|
104
|
+
start_time : TimeTuple
|
|
105
|
+
Start time of the packet
|
|
106
|
+
vectors_per_second : int
|
|
107
|
+
Number of vectors per second
|
|
108
|
+
pus_ssubtype : int
|
|
109
|
+
PUS Service Subtype - used to calculate seconds_per_packet. This is an InitVar,
|
|
110
|
+
meaning it is only used when creating the class and cannot be accessed from an
|
|
111
|
+
instance of the class - instead seconds_per_packet should be used.
|
|
112
|
+
src_seq_ctr : int
|
|
113
|
+
Sequence counter from the ccsds header
|
|
114
|
+
compression : int
|
|
115
|
+
Science Data Compression Flag from level 0
|
|
116
|
+
mago_is_primary : int
|
|
117
|
+
1 if mago is designated the primary sensor, otherwise 0
|
|
118
|
+
seconds_per_packet : int
|
|
119
|
+
Number of seconds of data in this packet - calculated as pus_ssubtype + 1
|
|
120
|
+
total_vectors : int
|
|
121
|
+
Total number of vectors in this packet - calculated as
|
|
122
|
+
seconds_per_packet * vecsec
|
|
123
|
+
"""
|
|
124
|
+
|
|
125
|
+
shcoarse: int
|
|
126
|
+
start_time: TimeTuple
|
|
127
|
+
vectors_per_second: int
|
|
128
|
+
pus_ssubtype: InitVar[int]
|
|
129
|
+
src_seq_ctr: int # From ccsds header
|
|
130
|
+
compression: int
|
|
131
|
+
mago_is_primary: int
|
|
132
|
+
seconds_per_packet: int = field(init=False)
|
|
133
|
+
total_vectors: int = field(init=False)
|
|
134
|
+
|
|
135
|
+
def __post_init__(self, pus_ssubtype: int) -> None:
|
|
136
|
+
"""
|
|
137
|
+
Calculate seconds_per_packet and total_vectors.
|
|
138
|
+
|
|
139
|
+
Parameters
|
|
140
|
+
----------
|
|
141
|
+
pus_ssubtype : int
|
|
142
|
+
PUS Service Subtype, used to determine the seconds of data in the packet.
|
|
143
|
+
"""
|
|
144
|
+
# seconds of data in this packet is the SUBTYPE plus 1
|
|
145
|
+
self.seconds_per_packet = pus_ssubtype + 1
|
|
146
|
+
|
|
147
|
+
# VECSEC is already decoded in mag_l0
|
|
148
|
+
self.total_vectors = self.seconds_per_packet * self.vectors_per_second
|
|
149
|
+
|
|
150
|
+
|
|
151
|
+
@dataclass
|
|
152
|
+
class MagL1a:
|
|
153
|
+
"""
|
|
154
|
+
Data class for MAG Level 1A data.
|
|
155
|
+
|
|
156
|
+
One MAG L1A object corresponds to part of one MAG L0 packet, which corresponds to
|
|
157
|
+
one packet of data from the MAG instrument. Each L0 packet consists of data from
|
|
158
|
+
two sensors, MAGO (outboard) and MAGI (inboard). One of these sensors is designated
|
|
159
|
+
as the primary sensor (first part of data stream), and one as the secondary.
|
|
160
|
+
|
|
161
|
+
We expect the primary sensor to be MAGO, and the secondary to be MAGI, but this is
|
|
162
|
+
not guaranteed. Each MagL1A object contains data from one sensor. The
|
|
163
|
+
primary/secondary construct is only used to sort the vectors into MAGo and MAGi
|
|
164
|
+
data, and therefore is not used at higher levels.
|
|
165
|
+
|
|
166
|
+
Attributes
|
|
167
|
+
----------
|
|
168
|
+
is_mago : bool
|
|
169
|
+
True if the data is from MagO, False if data is from MagI
|
|
170
|
+
is_active : int
|
|
171
|
+
1 if the sensor is active, 0 if not
|
|
172
|
+
shcoarse : int
|
|
173
|
+
Mission elapsed time for the first packet, the start time for the whole day
|
|
174
|
+
vectors : numpy.ndarray
|
|
175
|
+
List of magnetic vector samples, starting at start_time. [x, y, z, range, time],
|
|
176
|
+
where time is numpy.datetime64[ns]
|
|
177
|
+
starting_packet : InitVar[MagL1aPacketProperties]
|
|
178
|
+
The packet properties for the first packet in the day. As an InitVar, this
|
|
179
|
+
cannot be accessed from an instance of the class. Instead, packet_definitions
|
|
180
|
+
should be used.
|
|
181
|
+
packet_definitions : dict[numpy.datetime64, MagL1aPacketProperties]
|
|
182
|
+
Dictionary of packet properties for each packet in the day. The key is the start
|
|
183
|
+
time of the packet, and the value is a dataclass of packet properties.
|
|
184
|
+
most_recent_sequence : int
|
|
185
|
+
Sequence number of the most recent packet added to the object
|
|
186
|
+
missing_sequences : list[int]
|
|
187
|
+
List of missing sequence numbers in the day
|
|
188
|
+
start_time : numpy.datetime64
|
|
189
|
+
Start time of the day, in ns since J2000 epoch
|
|
190
|
+
|
|
191
|
+
Methods
|
|
192
|
+
-------
|
|
193
|
+
append_vectors()
|
|
194
|
+
calculate_vector_time()
|
|
195
|
+
convert_diffs_to_vectors()
|
|
196
|
+
process_vector_data()
|
|
197
|
+
process_uncompressed_vectors()
|
|
198
|
+
process_compressed_vectors()
|
|
199
|
+
process_range_data_section()
|
|
200
|
+
_process_vector_section()
|
|
201
|
+
unpack_one_vector()
|
|
202
|
+
decode_fib_zig_zag()
|
|
203
|
+
twos_complement()
|
|
204
|
+
"""
|
|
205
|
+
|
|
206
|
+
is_mago: bool
|
|
207
|
+
is_active: int
|
|
208
|
+
shcoarse: int
|
|
209
|
+
vectors: np.ndarray
|
|
210
|
+
starting_packet: InitVar[MagL1aPacketProperties]
|
|
211
|
+
packet_definitions: dict[np.datetime64, MagL1aPacketProperties] = field(init=False)
|
|
212
|
+
most_recent_sequence: int = field(init=False)
|
|
213
|
+
missing_sequences: list[int] = field(default_factory=list)
|
|
214
|
+
start_time: np.datetime64 = field(init=False)
|
|
215
|
+
|
|
216
|
+
def __post_init__(self, starting_packet: MagL1aPacketProperties) -> None:
|
|
217
|
+
"""
|
|
218
|
+
Initialize the packet_definition dictionary and most_recent_sequence.
|
|
219
|
+
|
|
220
|
+
Parameters
|
|
221
|
+
----------
|
|
222
|
+
starting_packet : MagL1aPacketProperties
|
|
223
|
+
The packet properties for the first packet in the day, including start time.
|
|
224
|
+
"""
|
|
225
|
+
# TODO should this be from starting_packet
|
|
226
|
+
self.start_time = (J2000_EPOCH + met_to_j2000ns(self.shcoarse)).astype(
|
|
227
|
+
"datetime64[D]"
|
|
228
|
+
)
|
|
229
|
+
self.packet_definitions = {self.start_time: starting_packet}
|
|
230
|
+
# most_recent_sequence is the sequence number of the packet used to initialize
|
|
231
|
+
# the object
|
|
232
|
+
self.most_recent_sequence = starting_packet.src_seq_ctr
|
|
233
|
+
|
|
234
|
+
def append_vectors(
|
|
235
|
+
self, additional_vectors: np.ndarray, packet_properties: MagL1aPacketProperties
|
|
236
|
+
) -> None:
|
|
237
|
+
"""
|
|
238
|
+
Append additional vectors to the current vectors array.
|
|
239
|
+
|
|
240
|
+
Parameters
|
|
241
|
+
----------
|
|
242
|
+
additional_vectors : numpy.ndarray
|
|
243
|
+
New vectors to append.
|
|
244
|
+
packet_properties : MagL1aPacketProperties
|
|
245
|
+
Additional vector definition to add to the l0_packets dictionary.
|
|
246
|
+
"""
|
|
247
|
+
vector_sequence = packet_properties.src_seq_ctr
|
|
248
|
+
|
|
249
|
+
self.vectors = np.concatenate([self.vectors, additional_vectors])
|
|
250
|
+
self.packet_definitions[self.start_time] = packet_properties
|
|
251
|
+
|
|
252
|
+
# Every additional packet should be the next one in the sequence, if not, add
|
|
253
|
+
# the missing sequence(s) to the gap data
|
|
254
|
+
if not self.most_recent_sequence + 1 == vector_sequence:
|
|
255
|
+
self.missing_sequences += list(
|
|
256
|
+
range(self.most_recent_sequence + 1, vector_sequence)
|
|
257
|
+
)
|
|
258
|
+
self.most_recent_sequence = vector_sequence
|
|
259
|
+
|
|
260
|
+
@staticmethod
|
|
261
|
+
def calculate_vector_time(
|
|
262
|
+
vectors: np.ndarray, vectors_per_sec: int, start_time: TimeTuple
|
|
263
|
+
) -> npt.NDArray:
|
|
264
|
+
"""
|
|
265
|
+
Add timestamps to the vector list, turning the shape from (n, 4) to (n, 5).
|
|
266
|
+
|
|
267
|
+
The first vector starts at start_time, then each subsequent vector time is
|
|
268
|
+
computed by adding 1/vectors_per_second to the previous vector's time.
|
|
269
|
+
|
|
270
|
+
Parameters
|
|
271
|
+
----------
|
|
272
|
+
vectors : numpy.array
|
|
273
|
+
List of magnetic vector samples, starting at start_time. Shape of (n, 4).
|
|
274
|
+
vectors_per_sec : int
|
|
275
|
+
Number of vectors per second.
|
|
276
|
+
start_time : TimeTuple
|
|
277
|
+
Start time of the vectors, the timestamp of the first vector.
|
|
278
|
+
|
|
279
|
+
Returns
|
|
280
|
+
-------
|
|
281
|
+
vector_objects : numpy.ndarray
|
|
282
|
+
Vectors with timestamps added in seconds, calculated from
|
|
283
|
+
cdf.utils.met_to_j2000ns.
|
|
284
|
+
"""
|
|
285
|
+
timedelta = np.timedelta64(int(1 / vectors_per_sec * 1e9), "ns")
|
|
286
|
+
# TODO: validate that start_time from SHCOARSE is precise enough
|
|
287
|
+
start_time_ns = met_to_j2000ns(start_time.to_seconds())
|
|
288
|
+
|
|
289
|
+
# Calculate time skips for each vector in ns
|
|
290
|
+
times = np.reshape(
|
|
291
|
+
np.arange(
|
|
292
|
+
start_time_ns,
|
|
293
|
+
start_time_ns + timedelta * vectors.shape[0],
|
|
294
|
+
timedelta,
|
|
295
|
+
dtype=np.int64,
|
|
296
|
+
like=vectors,
|
|
297
|
+
),
|
|
298
|
+
(vectors.shape[0], -1),
|
|
299
|
+
)
|
|
300
|
+
vector_objects = np.concatenate([vectors, times], axis=1, dtype=np.int64)
|
|
301
|
+
return vector_objects
|
|
302
|
+
|
|
303
|
+
@staticmethod
|
|
304
|
+
def process_vector_data(
|
|
305
|
+
vector_data: np.ndarray,
|
|
306
|
+
primary_count: int,
|
|
307
|
+
secondary_count: int,
|
|
308
|
+
compression: int,
|
|
309
|
+
) -> tuple[np.ndarray, np.ndarray]:
|
|
310
|
+
"""
|
|
311
|
+
Transform raw vector data into Vectors.
|
|
312
|
+
|
|
313
|
+
Vectors are grouped into primary sensor and secondary sensor, and returned as a
|
|
314
|
+
tuple (primary sensor vectors, secondary sensor vectors).
|
|
315
|
+
|
|
316
|
+
Parameters
|
|
317
|
+
----------
|
|
318
|
+
vector_data : numpy.ndarray
|
|
319
|
+
Raw vector data, in bytes. Contains both primary and secondary vector data.
|
|
320
|
+
Can be either compressed or uncompressed.
|
|
321
|
+
primary_count : int
|
|
322
|
+
Count of the number of primary vectors.
|
|
323
|
+
secondary_count : int
|
|
324
|
+
Count of the number of secondary vectors.
|
|
325
|
+
compression : int
|
|
326
|
+
Flag indicating if the data is compressed (1) or uncompressed (0).
|
|
327
|
+
|
|
328
|
+
Returns
|
|
329
|
+
-------
|
|
330
|
+
(primary, secondary): (numpy.ndarray, numpy.ndarray)
|
|
331
|
+
Two arrays, each containing tuples of (x, y, z, sample_range) for each
|
|
332
|
+
vector sample.
|
|
333
|
+
"""
|
|
334
|
+
if compression:
|
|
335
|
+
# If the vectors are compressed, we need them to be uint8 to convert to
|
|
336
|
+
# bits.
|
|
337
|
+
return MagL1a.process_compressed_vectors(
|
|
338
|
+
vector_data.astype(np.uint8), primary_count, secondary_count
|
|
339
|
+
)
|
|
340
|
+
|
|
341
|
+
# If the vectors are uncompressed, we need them to be int32, as there are
|
|
342
|
+
# bitshifting operations. Either way, the return type should be int32.
|
|
343
|
+
return MagL1a.process_uncompressed_vectors(
|
|
344
|
+
vector_data.astype(np.int32), primary_count, secondary_count
|
|
345
|
+
)
|
|
346
|
+
|
|
347
|
+
@staticmethod
|
|
348
|
+
def process_uncompressed_vectors(
|
|
349
|
+
vector_data: np.ndarray, primary_count: int, secondary_count: int
|
|
350
|
+
) -> tuple[np.ndarray, np.ndarray]:
|
|
351
|
+
"""
|
|
352
|
+
Given raw uncompressed packet data, process into Vectors.
|
|
353
|
+
|
|
354
|
+
Vectors are grouped into primary sensor and secondary sensor, and returned as a
|
|
355
|
+
tuple (primary sensor vectors, secondary sensor vectors).
|
|
356
|
+
|
|
357
|
+
Written by MAG instrument team.
|
|
358
|
+
|
|
359
|
+
Parameters
|
|
360
|
+
----------
|
|
361
|
+
vector_data : numpy.ndarray
|
|
362
|
+
Raw vector data, in bytes. Contains both primary and secondary vector data
|
|
363
|
+
(first primary, then secondary).
|
|
364
|
+
primary_count : int
|
|
365
|
+
Count of the number of primary vectors.
|
|
366
|
+
secondary_count : int
|
|
367
|
+
Count of the number of secondary vectors.
|
|
368
|
+
|
|
369
|
+
Returns
|
|
370
|
+
-------
|
|
371
|
+
(primary, secondary): (numpy.ndarray, numpy.ndarray)
|
|
372
|
+
Two arrays, each containing tuples of (x, y, z, sample_range) for each
|
|
373
|
+
vector sample.
|
|
374
|
+
"""
|
|
375
|
+
|
|
376
|
+
def to_signed16(n: int) -> int:
|
|
377
|
+
"""
|
|
378
|
+
Convert an integer to a signed 16-bit integer.
|
|
379
|
+
|
|
380
|
+
Parameters
|
|
381
|
+
----------
|
|
382
|
+
n : int
|
|
383
|
+
The integer to be converted.
|
|
384
|
+
|
|
385
|
+
Returns
|
|
386
|
+
-------
|
|
387
|
+
int
|
|
388
|
+
Converted integer.
|
|
389
|
+
"""
|
|
390
|
+
n = n & 0xFFFF
|
|
391
|
+
return n | (-(n & 0x8000))
|
|
392
|
+
|
|
393
|
+
pos = 0
|
|
394
|
+
primary_vectors = []
|
|
395
|
+
secondary_vectors = []
|
|
396
|
+
|
|
397
|
+
# Since the vectors are stored as 50 bit chunks but accessed via hex (4 bit
|
|
398
|
+
# chunks) there is some shifting required for processing the bytes.
|
|
399
|
+
# However, from a bit processing perspective, the first 48 bits of each 50 bit
|
|
400
|
+
# chunk corresponds to 3 16 bit signed integers. The last 2 bits are the sensor
|
|
401
|
+
# range.
|
|
402
|
+
|
|
403
|
+
for i in range(primary_count + secondary_count): # 0..63 say
|
|
404
|
+
x, y, z, rng = 0, 0, 0, 0
|
|
405
|
+
if i % 4 == 0: # start at bit 0, take 8 bits + 8bits
|
|
406
|
+
# pos = 0, 25, 50...
|
|
407
|
+
x = (
|
|
408
|
+
((vector_data[pos + 0] & 0xFF) << 8)
|
|
409
|
+
| ((vector_data[pos + 1] & 0xFF) << 0)
|
|
410
|
+
) & 0xFFFF
|
|
411
|
+
y = (
|
|
412
|
+
((vector_data[pos + 2] & 0xFF) << 8)
|
|
413
|
+
| ((vector_data[pos + 3] & 0xFF) << 0)
|
|
414
|
+
) & 0xFFFF
|
|
415
|
+
z = (
|
|
416
|
+
((vector_data[pos + 4] & 0xFF) << 8)
|
|
417
|
+
| ((vector_data[pos + 5] & 0xFF) << 0)
|
|
418
|
+
) & 0xFFFF
|
|
419
|
+
rng = (vector_data[pos + 6] >> 6) & 0x3
|
|
420
|
+
pos += 6
|
|
421
|
+
elif i % 4 == 1: # start at bit 2, take 6 bits, 8 bit, 2 bits per vector
|
|
422
|
+
# pos = 6, 31...
|
|
423
|
+
x = (
|
|
424
|
+
((vector_data[pos + 0] & 0x3F) << 10)
|
|
425
|
+
| ((vector_data[pos + 1] & 0xFF) << 2)
|
|
426
|
+
| ((vector_data[pos + 2] >> 6) & 0x03)
|
|
427
|
+
) & 0xFFFF
|
|
428
|
+
y = (
|
|
429
|
+
((vector_data[pos + 2] & 0x3F) << 10)
|
|
430
|
+
| ((vector_data[pos + 3] & 0xFF) << 2)
|
|
431
|
+
| ((vector_data[pos + 4] >> 6) & 0x03)
|
|
432
|
+
) & 0xFFFF
|
|
433
|
+
z = (
|
|
434
|
+
((vector_data[pos + 4] & 0x3F) << 10)
|
|
435
|
+
| ((vector_data[pos + 5] & 0xFF) << 2)
|
|
436
|
+
| ((vector_data[pos + 6] >> 6) & 0x03)
|
|
437
|
+
) & 0xFFFF
|
|
438
|
+
rng = (vector_data[pos + 6] >> 4) & 0x3
|
|
439
|
+
pos += 6
|
|
440
|
+
elif i % 4 == 2: # start at bit 4, take 4 bits, 8 bits, 4 bits per vector
|
|
441
|
+
# pos = 12, 37...
|
|
442
|
+
x = (
|
|
443
|
+
((vector_data[pos + 0] & 0x0F) << 12)
|
|
444
|
+
| ((vector_data[pos + 1] & 0xFF) << 4)
|
|
445
|
+
| ((vector_data[pos + 2] >> 4) & 0x0F)
|
|
446
|
+
) & 0xFFFF
|
|
447
|
+
y = (
|
|
448
|
+
((vector_data[pos + 2] & 0x0F) << 12)
|
|
449
|
+
| ((vector_data[pos + 3] & 0xFF) << 4)
|
|
450
|
+
| ((vector_data[pos + 4] >> 4) & 0x0F)
|
|
451
|
+
) & 0xFFFF
|
|
452
|
+
z = (
|
|
453
|
+
((vector_data[pos + 4] & 0x0F) << 12)
|
|
454
|
+
| ((vector_data[pos + 5] & 0xFF) << 4)
|
|
455
|
+
| ((vector_data[pos + 6] >> 4) & 0x0F)
|
|
456
|
+
) & 0xFFFF
|
|
457
|
+
rng = (vector_data[pos + 6] >> 2) & 0x3
|
|
458
|
+
pos += 6
|
|
459
|
+
elif i % 4 == 3: # start at bit 6, take 2 bits, 8 bits, 6 bits per vector
|
|
460
|
+
# pos = 18, 43...
|
|
461
|
+
x = (
|
|
462
|
+
((vector_data[pos + 0] & 0x03) << 14)
|
|
463
|
+
| ((vector_data[pos + 1] & 0xFF) << 6)
|
|
464
|
+
| ((vector_data[pos + 2] >> 2) & 0x3F)
|
|
465
|
+
) & 0xFFFF
|
|
466
|
+
y = (
|
|
467
|
+
((vector_data[pos + 2] & 0x03) << 14)
|
|
468
|
+
| ((vector_data[pos + 3] & 0xFF) << 6)
|
|
469
|
+
| ((vector_data[pos + 4] >> 2) & 0x3F)
|
|
470
|
+
) & 0xFFFF
|
|
471
|
+
z = (
|
|
472
|
+
((vector_data[pos + 4] & 0x03) << 14)
|
|
473
|
+
| ((vector_data[pos + 5] & 0xFF) << 6)
|
|
474
|
+
| ((vector_data[pos + 6] >> 2) & 0x3F)
|
|
475
|
+
) & 0xFFFF
|
|
476
|
+
rng = (vector_data[pos + 6] >> 0) & 0x3
|
|
477
|
+
pos += 7
|
|
478
|
+
|
|
479
|
+
vector = (to_signed16(x), to_signed16(y), to_signed16(z), rng)
|
|
480
|
+
if i < primary_count:
|
|
481
|
+
primary_vectors.append(vector)
|
|
482
|
+
else:
|
|
483
|
+
secondary_vectors.append(vector)
|
|
484
|
+
|
|
485
|
+
return (
|
|
486
|
+
np.array(primary_vectors, dtype=np.int32),
|
|
487
|
+
np.array(secondary_vectors, dtype=np.int32),
|
|
488
|
+
)
|
|
489
|
+
|
|
490
|
+
@staticmethod
|
|
491
|
+
def process_compressed_vectors(
|
|
492
|
+
vector_data: np.ndarray, primary_count: int, secondary_count: int
|
|
493
|
+
) -> tuple[np.ndarray, np.ndarray]:
|
|
494
|
+
"""
|
|
495
|
+
Given raw compressed packet data, process into Vectors.
|
|
496
|
+
|
|
497
|
+
To do this, we need to decode the compressed data. The compressed data starts
|
|
498
|
+
with an 8 bit header that defines the width of the uncompressed vectors and
|
|
499
|
+
if there is a range data section. Then, the vector data follows, then the range
|
|
500
|
+
data section if it exists.
|
|
501
|
+
|
|
502
|
+
To decode, we start by decoding the first compression_width bits. This is an
|
|
503
|
+
uncompressed primary vector with range. Then, we proceed through the compressed
|
|
504
|
+
data, where each value is fibonacci and zig-zag encoded. This means each value
|
|
505
|
+
ends in 2 sequential ones (11). We split the data along these numbers until
|
|
506
|
+
we reach primary_count vectors.
|
|
507
|
+
|
|
508
|
+
The secondary vectors are decoded the same way, starting directly after the last
|
|
509
|
+
primary vector with an uncompressed secondary starting vector and then
|
|
510
|
+
secondary_count compressed vectors.
|
|
511
|
+
|
|
512
|
+
The compressed values are differences from the previous vector, so after
|
|
513
|
+
decoding we accumulate the values starting from the first known vector. The
|
|
514
|
+
range data is copied from the starting vector if range_data_section is not
|
|
515
|
+
included.
|
|
516
|
+
|
|
517
|
+
Then, if a range data section is included, we decode it and assign it to each
|
|
518
|
+
vector. There are 2 * (primary_count + secondary_count) bits assigned for the
|
|
519
|
+
range data section.
|
|
520
|
+
|
|
521
|
+
If any compressed vectors are > 60 bits long (MAX_COMPRESSED_VECTOR_BITS), then
|
|
522
|
+
we switch to uncompressed vectors for the rest of the processing.
|
|
523
|
+
|
|
524
|
+
Parameters
|
|
525
|
+
----------
|
|
526
|
+
vector_data : numpy.ndarray
|
|
527
|
+
Raw vector data, in bytes. Contains both primary and secondary vector data.
|
|
528
|
+
primary_count : int
|
|
529
|
+
Count of the number of primary vectors.
|
|
530
|
+
secondary_count : int
|
|
531
|
+
Count of the number of secondary vectors.
|
|
532
|
+
|
|
533
|
+
Returns
|
|
534
|
+
-------
|
|
535
|
+
(primary, secondary): (numpy.ndarray, numpy.ndarray)
|
|
536
|
+
Two arrays, each containing tuples of (x, y, z, sample_range) for each
|
|
537
|
+
vector sample.
|
|
538
|
+
"""
|
|
539
|
+
bit_array = np.unpackbits(vector_data)
|
|
540
|
+
# The first 8 bits are a header - 6 bits to indicate the compression width,
|
|
541
|
+
# 1 bit to indicate if there is a range data section, and 1 bit spare.
|
|
542
|
+
compression_width = int("".join([str(i) for i in bit_array[:6]]), 2)
|
|
543
|
+
has_range_data_section = bit_array[6] == 1
|
|
544
|
+
|
|
545
|
+
# The full vector includes 3 values of compression_width bits, and excludes
|
|
546
|
+
# range.
|
|
547
|
+
uncompressed_vector_size = compression_width * AXIS_COUNT
|
|
548
|
+
# plus 8 to get past the compression width and range data section
|
|
549
|
+
first_vector_width = uncompressed_vector_size + 8 + RANGE_BIT_WIDTH
|
|
550
|
+
first_vector = MagL1a.unpack_one_vector(
|
|
551
|
+
bit_array[8:first_vector_width], compression_width, True
|
|
552
|
+
)
|
|
553
|
+
|
|
554
|
+
# The range data length has 2 bits per vector, minus 2 for the uncompressed
|
|
555
|
+
# first vectors in the primary and secondary sensors.
|
|
556
|
+
# Then, the range data length is padded to the nearest 8 bits.
|
|
557
|
+
expected_range_data_length = (primary_count + secondary_count - 2) * 2
|
|
558
|
+
end_padding = expected_range_data_length // 8 * 8 - expected_range_data_length
|
|
559
|
+
|
|
560
|
+
end_vector = len(bit_array) - (
|
|
561
|
+
(expected_range_data_length - end_padding) * has_range_data_section
|
|
562
|
+
)
|
|
563
|
+
|
|
564
|
+
# Cut off the first vector width and the end range data section if it exists.
|
|
565
|
+
vector_bits = bit_array[first_vector_width - 1 : end_vector]
|
|
566
|
+
|
|
567
|
+
# Shift the bit array over one to the left, then sum them up. This is used to
|
|
568
|
+
# find all the places where two 1s occur next to each other, because the sum
|
|
569
|
+
# will be 2 for those indices.
|
|
570
|
+
# For example: [0 0 1 0 1 1] + [1 0 0 1 0 1] = [1 0 1 1 1 2], so the last index
|
|
571
|
+
# has 2 ones in a row.
|
|
572
|
+
# The first bit is invalid, so we remove it at the end.
|
|
573
|
+
sequential_ones = np.where(
|
|
574
|
+
np.add(
|
|
575
|
+
vector_bits,
|
|
576
|
+
np.roll(vector_bits, 1),
|
|
577
|
+
)[1:]
|
|
578
|
+
== 2
|
|
579
|
+
)[0]
|
|
580
|
+
# The first bit is only needed for the np.roll step, so now we remove it.
|
|
581
|
+
# we are left with compressed primary vectors, and all the secondary vectors.
|
|
582
|
+
vector_bits = vector_bits[1:]
|
|
583
|
+
|
|
584
|
+
# that unneeded first bit might give us a false first sequential ones value,
|
|
585
|
+
# so if the first index has 2 ones, skip it.
|
|
586
|
+
primary_boundaries = [
|
|
587
|
+
(
|
|
588
|
+
sequential_ones[0] + 1
|
|
589
|
+
if sequential_ones[0] != 0
|
|
590
|
+
else sequential_ones[1] + 1
|
|
591
|
+
)
|
|
592
|
+
]
|
|
593
|
+
secondary_boundaries = []
|
|
594
|
+
vector_count = 1
|
|
595
|
+
end_primary_vector = 0
|
|
596
|
+
for seq_val in sequential_ones:
|
|
597
|
+
if vector_count > primary_count + secondary_count:
|
|
598
|
+
break
|
|
599
|
+
# Add the end indices of each primary vector to primary_boundaries
|
|
600
|
+
# If we have 3 ones in a row, we should skip that index
|
|
601
|
+
|
|
602
|
+
if vector_count < primary_count and (seq_val - primary_boundaries[-1] > 0):
|
|
603
|
+
primary_boundaries.append(seq_val + 1)
|
|
604
|
+
|
|
605
|
+
# 3 boundaries equal one vector
|
|
606
|
+
if len(primary_boundaries) % AXIS_COUNT == 0:
|
|
607
|
+
vector_count += 1
|
|
608
|
+
# If the vector length is >60 bits, we switch to uncompressed.
|
|
609
|
+
# So we skip past all the remaining seq_ones.
|
|
610
|
+
if (
|
|
611
|
+
(len(primary_boundaries) > 4)
|
|
612
|
+
and (
|
|
613
|
+
primary_boundaries[-1] - primary_boundaries[-4]
|
|
614
|
+
> MAX_COMPRESSED_VECTOR_BITS
|
|
615
|
+
)
|
|
616
|
+
or (
|
|
617
|
+
vector_count == 2
|
|
618
|
+
and primary_boundaries[-1] > MAX_COMPRESSED_VECTOR_BITS
|
|
619
|
+
)
|
|
620
|
+
):
|
|
621
|
+
# Since we know how long each uncompressed vector is,
|
|
622
|
+
# we can determine the end of the primary vectors.
|
|
623
|
+
end_primary_vector = (
|
|
624
|
+
primary_boundaries[-1]
|
|
625
|
+
+ (primary_count - vector_count) * uncompressed_vector_size
|
|
626
|
+
)
|
|
627
|
+
vector_count = primary_count
|
|
628
|
+
|
|
629
|
+
# If the vector count is equal to the primary count, we are in the first
|
|
630
|
+
# uncompressed secondary vector.
|
|
631
|
+
if vector_count == primary_count:
|
|
632
|
+
# We won't have assigned end_primary_vector unless we hit uncompressed
|
|
633
|
+
# vectors in the primary path. If there are no uncompressed values,
|
|
634
|
+
# we can use the end of primary_boundaries.
|
|
635
|
+
end_primary_vector = (
|
|
636
|
+
primary_boundaries[-1]
|
|
637
|
+
if end_primary_vector == 0
|
|
638
|
+
else end_primary_vector
|
|
639
|
+
)
|
|
640
|
+
if seq_val > end_primary_vector + uncompressed_vector_size + 2:
|
|
641
|
+
# Split just after the uncompressed secondary vector
|
|
642
|
+
secondary_boundaries = [
|
|
643
|
+
end_primary_vector + uncompressed_vector_size + 2
|
|
644
|
+
]
|
|
645
|
+
# We have found the first secondary vector
|
|
646
|
+
secondary_boundaries += [seq_val + 1]
|
|
647
|
+
vector_count += 1
|
|
648
|
+
|
|
649
|
+
# If we're greater than primary_count, we are in the secondary vectors.
|
|
650
|
+
# Like before, we skip indices with 3 ones.
|
|
651
|
+
if vector_count > primary_count and seq_val - secondary_boundaries[-1] > 0:
|
|
652
|
+
secondary_boundaries.append(seq_val + 1)
|
|
653
|
+
# We have the start of the secondary vectors in
|
|
654
|
+
# secondary_boundaries, so we need to subtract one to determine
|
|
655
|
+
# the vector count. (in primary_boundaries we know we start at 0.)
|
|
656
|
+
if (len(secondary_boundaries) - 1) % AXIS_COUNT == 0:
|
|
657
|
+
vector_count += 1
|
|
658
|
+
if (
|
|
659
|
+
secondary_boundaries[-1] - secondary_boundaries[-4]
|
|
660
|
+
> MAX_COMPRESSED_VECTOR_BITS
|
|
661
|
+
):
|
|
662
|
+
# The rest of the secondary values are uncompressed.
|
|
663
|
+
vector_count = primary_count + secondary_count + 1
|
|
664
|
+
|
|
665
|
+
# Split along the boundaries of the primary vectors. This gives us a list of
|
|
666
|
+
# bit arrays, each corresponding to a primary value (1/3 of a vector).
|
|
667
|
+
primary_split_bits = np.split(
|
|
668
|
+
vector_bits,
|
|
669
|
+
primary_boundaries,
|
|
670
|
+
)[:-1]
|
|
671
|
+
primary_vectors = MagL1a._process_vector_section(
|
|
672
|
+
vector_bits,
|
|
673
|
+
primary_split_bits,
|
|
674
|
+
primary_boundaries[-1],
|
|
675
|
+
first_vector,
|
|
676
|
+
primary_count,
|
|
677
|
+
uncompressed_vector_size,
|
|
678
|
+
compression_width,
|
|
679
|
+
)
|
|
680
|
+
|
|
681
|
+
# Secondary vector processing
|
|
682
|
+
first_secondary_vector = MagL1a.unpack_one_vector(
|
|
683
|
+
vector_bits[
|
|
684
|
+
end_primary_vector : end_primary_vector + uncompressed_vector_size + 2
|
|
685
|
+
],
|
|
686
|
+
compression_width,
|
|
687
|
+
True,
|
|
688
|
+
)
|
|
689
|
+
|
|
690
|
+
# Split up the bit array, skipping past the primary vector and uncompressed
|
|
691
|
+
# starting vector
|
|
692
|
+
secondary_split_bits = np.split(
|
|
693
|
+
vector_bits[: secondary_boundaries[-1]], secondary_boundaries[:-1]
|
|
694
|
+
)[1:]
|
|
695
|
+
|
|
696
|
+
secondary_vectors = MagL1a._process_vector_section(
|
|
697
|
+
vector_bits,
|
|
698
|
+
secondary_split_bits,
|
|
699
|
+
secondary_boundaries[-1],
|
|
700
|
+
first_secondary_vector,
|
|
701
|
+
secondary_count,
|
|
702
|
+
uncompressed_vector_size,
|
|
703
|
+
compression_width,
|
|
704
|
+
)
|
|
705
|
+
|
|
706
|
+
# If there is a range data section, it describes all the data, compressed or
|
|
707
|
+
# uncompressed.
|
|
708
|
+
if has_range_data_section:
|
|
709
|
+
primary_vectors = MagL1a.process_range_data_section(
|
|
710
|
+
bit_array[end_vector : end_vector + (primary_count - 1) * 2],
|
|
711
|
+
primary_vectors,
|
|
712
|
+
)
|
|
713
|
+
secondary_vectors = MagL1a.process_range_data_section(
|
|
714
|
+
bit_array[
|
|
715
|
+
end_vector + (primary_count - 1) * 2 : end_vector
|
|
716
|
+
+ (primary_count + secondary_count - 2) * 2
|
|
717
|
+
],
|
|
718
|
+
secondary_vectors,
|
|
719
|
+
)
|
|
720
|
+
return primary_vectors, secondary_vectors
|
|
721
|
+
|
|
722
|
+
@staticmethod
|
|
723
|
+
def _process_vector_section( # noqa: PLR0913
|
|
724
|
+
vector_bits: np.ndarray,
|
|
725
|
+
split_bits: list,
|
|
726
|
+
last_index: int,
|
|
727
|
+
first_vector: np.ndarray,
|
|
728
|
+
vector_count: int,
|
|
729
|
+
uncompressed_vector_size: int,
|
|
730
|
+
compression_width: int,
|
|
731
|
+
) -> np.ndarray:
|
|
732
|
+
"""
|
|
733
|
+
Generate a section of vector data, primary or secondary.
|
|
734
|
+
|
|
735
|
+
Should only be used by process_compressed_vectors.
|
|
736
|
+
|
|
737
|
+
Parameters
|
|
738
|
+
----------
|
|
739
|
+
vector_bits : numpy.ndarray
|
|
740
|
+
Numpy array of bits, representing the vector data. Does not include the
|
|
741
|
+
first primary vector.
|
|
742
|
+
split_bits : list
|
|
743
|
+
An array of where to split vector bits, by passing in a list of indices.
|
|
744
|
+
last_index : int
|
|
745
|
+
The index of the last vector in the section (primary or secondary).
|
|
746
|
+
first_vector : numpy.ndarray
|
|
747
|
+
The first vector in the section, (x, y, z, range).
|
|
748
|
+
vector_count : numpy.ndarray
|
|
749
|
+
The number of vectors in the section (primary or secondary).
|
|
750
|
+
uncompressed_vector_size : int
|
|
751
|
+
The size of an uncompressed vector in bits.
|
|
752
|
+
compression_width : int
|
|
753
|
+
The width of the uncompressed values - uncompressed_vector_size/3.
|
|
754
|
+
|
|
755
|
+
Returns
|
|
756
|
+
-------
|
|
757
|
+
numpy.ndarray
|
|
758
|
+
An array of processed vectors.
|
|
759
|
+
"""
|
|
760
|
+
vector_diffs = list(map(MagL1a.decode_fib_zig_zag, split_bits))
|
|
761
|
+
vectors = MagL1a.convert_diffs_to_vectors(
|
|
762
|
+
first_vector, vector_diffs, vector_count
|
|
763
|
+
)
|
|
764
|
+
# If we are missing any vectors from primary_split_bits, we know we have
|
|
765
|
+
# uncompressed vectors to process.
|
|
766
|
+
compressed_count = math.ceil(len(split_bits) / AXIS_COUNT) + 1
|
|
767
|
+
uncompressed_count = vector_count - compressed_count
|
|
768
|
+
|
|
769
|
+
if uncompressed_count:
|
|
770
|
+
end = last_index + uncompressed_vector_size * uncompressed_count
|
|
771
|
+
uncompressed_vectors = vector_bits[last_index : end + 1]
|
|
772
|
+
|
|
773
|
+
for i in range(uncompressed_count):
|
|
774
|
+
decoded_vector = MagL1a.unpack_one_vector(
|
|
775
|
+
uncompressed_vectors[
|
|
776
|
+
i * uncompressed_vector_size : (i + 1)
|
|
777
|
+
* uncompressed_vector_size
|
|
778
|
+
],
|
|
779
|
+
compression_width,
|
|
780
|
+
False,
|
|
781
|
+
)
|
|
782
|
+
vectors[i + compressed_count] = decoded_vector
|
|
783
|
+
vectors[i + compressed_count][3] = vectors[0][3]
|
|
784
|
+
|
|
785
|
+
return vectors
|
|
786
|
+
|
|
787
|
+
@staticmethod
|
|
788
|
+
def process_range_data_section(
|
|
789
|
+
range_data: np.ndarray, vectors: np.ndarray
|
|
790
|
+
) -> np.ndarray:
|
|
791
|
+
"""
|
|
792
|
+
Given a range data section and vectors, return an updated vector array.
|
|
793
|
+
|
|
794
|
+
Each range value has 2 bits. range_data will have a length of n*2, where n is
|
|
795
|
+
the number of vectors in vectors.
|
|
796
|
+
|
|
797
|
+
Parameters
|
|
798
|
+
----------
|
|
799
|
+
range_data : numpy.ndarray
|
|
800
|
+
Array of range values, where each value is one bit. The range values have
|
|
801
|
+
2 bits per vector, so range data should be 2 * len(vectors) - 1 in length.
|
|
802
|
+
vectors : numpy.ndarray
|
|
803
|
+
Array of vectors, where each vector is a tuple of (x, y, z, range).
|
|
804
|
+
The range value will be overwritten by range_data, and x, y, z will remain
|
|
805
|
+
the same.
|
|
806
|
+
|
|
807
|
+
Returns
|
|
808
|
+
-------
|
|
809
|
+
numpy.ndarray
|
|
810
|
+
Updated array of vectors, identical to vectors with the range values
|
|
811
|
+
updated from range_data.
|
|
812
|
+
"""
|
|
813
|
+
if len(range_data) != (len(vectors) - 1) * 2:
|
|
814
|
+
raise ValueError(
|
|
815
|
+
"Incorrect length for range_data, there should be two bits per vector, "
|
|
816
|
+
"excluding the first."
|
|
817
|
+
)
|
|
818
|
+
|
|
819
|
+
updated_vectors: np.ndarray = np.copy(vectors)
|
|
820
|
+
range_str = "".join([str(i) for i in range_data])
|
|
821
|
+
for i in range(len(vectors) - 1):
|
|
822
|
+
range_int = int(range_str[i * 2 : i * 2 + 2], 2)
|
|
823
|
+
updated_vectors[i + 1][3] = range_int
|
|
824
|
+
return updated_vectors
|
|
825
|
+
|
|
826
|
+
@staticmethod
|
|
827
|
+
def convert_diffs_to_vectors(
|
|
828
|
+
first_vector: np.ndarray,
|
|
829
|
+
vector_differences: list[int],
|
|
830
|
+
vector_count: int,
|
|
831
|
+
) -> np.ndarray:
|
|
832
|
+
"""
|
|
833
|
+
Given a list of differences and the first vector, return calculated vectors.
|
|
834
|
+
|
|
835
|
+
This is calculated as follows:
|
|
836
|
+
vector[i][0] = vector[i-1][0] + vector_differences[i][0]
|
|
837
|
+
vector[i][1] = vector[i-1][1] + vector_differences[i][1]
|
|
838
|
+
vector[i][2] = vector[i-1][2] + vector_differences[i][2]
|
|
839
|
+
vector[i][3] = first_vector[3]
|
|
840
|
+
|
|
841
|
+
The third element of the array is the range value, which we assume is the same
|
|
842
|
+
as the first vector.
|
|
843
|
+
|
|
844
|
+
Parameters
|
|
845
|
+
----------
|
|
846
|
+
first_vector : numpy.ndarray
|
|
847
|
+
A numpy array of 3 signed integers and a range value, representing the
|
|
848
|
+
start vector.
|
|
849
|
+
vector_differences : numpy.ndarray
|
|
850
|
+
A numpy array of shape (expected_vector_count, 4) of signed integers,
|
|
851
|
+
representing the differences between vectors.
|
|
852
|
+
vector_count : int
|
|
853
|
+
The expected number of vectors in the output.
|
|
854
|
+
|
|
855
|
+
Returns
|
|
856
|
+
-------
|
|
857
|
+
numpy.ndarray
|
|
858
|
+
A numpy array of shape (expected_vector_count, 4) of signed integers,
|
|
859
|
+
representing the calculated vectors.
|
|
860
|
+
"""
|
|
861
|
+
vectors: np.ndarray = np.empty((vector_count, 4), dtype=np.int32)
|
|
862
|
+
vectors[0] = first_vector
|
|
863
|
+
if len(vector_differences) % AXIS_COUNT != 0:
|
|
864
|
+
raise ValueError(
|
|
865
|
+
"Error! Computed compressed vector differences are not "
|
|
866
|
+
"divisible by 3 - meaning some data is missing. "
|
|
867
|
+
"Expected length: %s, actual length: "
|
|
868
|
+
"%s",
|
|
869
|
+
vector_count * AXIS_COUNT,
|
|
870
|
+
len(vector_differences),
|
|
871
|
+
)
|
|
872
|
+
index = 0
|
|
873
|
+
vector_index = 1
|
|
874
|
+
for diff in vector_differences:
|
|
875
|
+
vectors[vector_index][index] = vectors[vector_index - 1][index] + diff
|
|
876
|
+
index += 1
|
|
877
|
+
if index == 3:
|
|
878
|
+
# Update range section to match that of the first vector
|
|
879
|
+
vectors[vector_index][3] = vectors[0][3]
|
|
880
|
+
index = 0
|
|
881
|
+
vector_index += 1
|
|
882
|
+
return vectors
|
|
883
|
+
|
|
884
|
+
@staticmethod
|
|
885
|
+
def unpack_one_vector(
|
|
886
|
+
vector_data: np.ndarray, width: int, has_range: int
|
|
887
|
+
) -> np.ndarray:
|
|
888
|
+
"""
|
|
889
|
+
Unpack a single vector from the vector data.
|
|
890
|
+
|
|
891
|
+
Input should be a numpy array of bits, eg [0, 0, 0, 1], of the length width*3,
|
|
892
|
+
or width*3 + 2 if has_range is True.
|
|
893
|
+
|
|
894
|
+
Parameters
|
|
895
|
+
----------
|
|
896
|
+
vector_data : numpy.ndarray
|
|
897
|
+
Vector data for the vector to unpack. This is uncompressed data as a numpy
|
|
898
|
+
array of bits (the output of np.unpackbits).
|
|
899
|
+
width : int
|
|
900
|
+
The width of each vector component in bits. This needs to be a multiple of
|
|
901
|
+
8 (including only whole bytes).
|
|
902
|
+
has_range : int
|
|
903
|
+
1 if the vector data includes range data, 0 if not. The first vector always
|
|
904
|
+
has range data.
|
|
905
|
+
|
|
906
|
+
Returns
|
|
907
|
+
-------
|
|
908
|
+
numpy.ndarray
|
|
909
|
+
Unpacked vector data as a numpy array of 3 signed ints plus a range (0 if
|
|
910
|
+
has_range is False).
|
|
911
|
+
"""
|
|
912
|
+
if np.any(vector_data > 1):
|
|
913
|
+
raise ValueError(
|
|
914
|
+
"unpack_one_vector method is expecting an array of bits as" "input."
|
|
915
|
+
)
|
|
916
|
+
|
|
917
|
+
if len(vector_data) != width * AXIS_COUNT + RANGE_BIT_WIDTH * has_range:
|
|
918
|
+
raise ValueError(
|
|
919
|
+
f"Invalid length {len(vector_data)} for vector data. Expected "
|
|
920
|
+
f"{width * AXIS_COUNT} or {width * AXIS_COUNT + RANGE_BIT_WIDTH} if "
|
|
921
|
+
f"has_range."
|
|
922
|
+
)
|
|
923
|
+
padding = np.zeros(8 - (width % 8), dtype=np.uint8)
|
|
924
|
+
|
|
925
|
+
# take slices of the input data and pack from an array of bits to an array of
|
|
926
|
+
# uint8 bytes
|
|
927
|
+
x = np.packbits(np.concatenate((padding, vector_data[:width])))
|
|
928
|
+
y = np.packbits(np.concatenate((padding, vector_data[width : 2 * width])))
|
|
929
|
+
z = np.packbits(np.concatenate((padding, vector_data[2 * width : 3 * width])))
|
|
930
|
+
|
|
931
|
+
range_string = "".join([str(i) for i in vector_data[-2:]])
|
|
932
|
+
|
|
933
|
+
rng = int(range_string, 2) if has_range else 0
|
|
934
|
+
|
|
935
|
+
# Convert to signed integers using twos complement
|
|
936
|
+
signed_vals: np.ndarray = np.array(
|
|
937
|
+
[
|
|
938
|
+
MagL1a.twos_complement(x, width),
|
|
939
|
+
MagL1a.twos_complement(y, width),
|
|
940
|
+
MagL1a.twos_complement(z, width),
|
|
941
|
+
rng,
|
|
942
|
+
],
|
|
943
|
+
dtype=np.int32,
|
|
944
|
+
)
|
|
945
|
+
return signed_vals
|
|
946
|
+
|
|
947
|
+
@staticmethod
|
|
948
|
+
def twos_complement(value: np.ndarray, bits: int) -> np.int32:
|
|
949
|
+
"""
|
|
950
|
+
Compute the two's complement of an integer.
|
|
951
|
+
|
|
952
|
+
This function will return the two's complement of a given bytearray value.
|
|
953
|
+
The input value should be a bytearray or a numpy array of uint8 values.
|
|
954
|
+
|
|
955
|
+
If the integer with respect to the number of bits does not have a sign bit
|
|
956
|
+
set (first bit is 0), then the input value is returned without modification.
|
|
957
|
+
|
|
958
|
+
Parameters
|
|
959
|
+
----------
|
|
960
|
+
value : numpy.ndarray
|
|
961
|
+
An array of bytes representing an integer. In numpy, this should be an
|
|
962
|
+
array of uint8 values.
|
|
963
|
+
bits : int
|
|
964
|
+
Number of bits to use for the 2's complement.
|
|
965
|
+
|
|
966
|
+
Returns
|
|
967
|
+
-------
|
|
968
|
+
numpy.int32
|
|
969
|
+
Two's complement of the input value, as a signed int.
|
|
970
|
+
"""
|
|
971
|
+
integer_value = int.from_bytes(value, "big")
|
|
972
|
+
if (integer_value & (1 << (bits - 1))) != 0:
|
|
973
|
+
output_value = integer_value - (1 << bits)
|
|
974
|
+
else:
|
|
975
|
+
output_value = integer_value
|
|
976
|
+
return np.int32(output_value)
|
|
977
|
+
|
|
978
|
+
@staticmethod
|
|
979
|
+
def decode_fib_zig_zag(code: np.ndarray) -> int:
|
|
980
|
+
"""
|
|
981
|
+
Decode a fibonacci and zig-zag encoded value.
|
|
982
|
+
|
|
983
|
+
Parameters
|
|
984
|
+
----------
|
|
985
|
+
code : numpy.ndarray
|
|
986
|
+
The code to decode, in the form of an array of bits (eg [0, 1, 0, 1, 1]).
|
|
987
|
+
This should always end in 2 ones (which indicates the end of a fibonacci
|
|
988
|
+
encoding).
|
|
989
|
+
|
|
990
|
+
Returns
|
|
991
|
+
-------
|
|
992
|
+
value: int
|
|
993
|
+
Signed integer value, with fibonacci and zig-zag encoding removed.
|
|
994
|
+
"""
|
|
995
|
+
if len(code) < 2 or code[-2] != 1 or code[-1] != 1:
|
|
996
|
+
raise ValueError(
|
|
997
|
+
f"Error when decoding {code} - fibonacci encoded values "
|
|
998
|
+
f"should end in 2 sequential ones."
|
|
999
|
+
)
|
|
1000
|
+
|
|
1001
|
+
# Fibonacci decoding
|
|
1002
|
+
code = code[:-1]
|
|
1003
|
+
value: int = sum(FIBONACCI_SEQUENCE[: len(code)] * code) - 1
|
|
1004
|
+
# Zig-zag decode (to go from uint to signed int)
|
|
1005
|
+
value = int((value >> 1) ^ (-(value & 1)))
|
|
1006
|
+
|
|
1007
|
+
return value
|