imap-processing 0.12.0__py3-none-any.whl → 0.13.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of imap-processing might be problematic. Click here for more details.
- imap_processing/__init__.py +1 -0
- imap_processing/_version.py +2 -2
- imap_processing/ccsds/ccsds_data.py +1 -2
- imap_processing/ccsds/excel_to_xtce.py +1 -2
- imap_processing/cdf/config/imap_codice_global_cdf_attrs.yaml +18 -12
- imap_processing/cdf/config/imap_codice_l1a_variable_attrs.yaml +569 -0
- imap_processing/cdf/config/imap_codice_l1b_variable_attrs.yaml +1846 -128
- imap_processing/cdf/config/imap_hit_global_cdf_attrs.yaml +5 -5
- imap_processing/cdf/config/imap_idex_global_cdf_attrs.yaml +20 -1
- imap_processing/cdf/config/imap_idex_l1a_variable_attrs.yaml +6 -4
- imap_processing/cdf/config/imap_idex_l1b_variable_attrs.yaml +3 -3
- imap_processing/cdf/config/imap_mag_global_cdf_attrs.yaml +15 -0
- imap_processing/cdf/config/imap_swapi_variable_attrs.yaml +22 -0
- imap_processing/cdf/config/imap_swe_l1b_variable_attrs.yaml +16 -0
- imap_processing/cdf/config/imap_ultra_global_cdf_attrs.yaml +178 -5
- imap_processing/cdf/config/imap_ultra_l1a_variable_attrs.yaml +5045 -41
- imap_processing/cdf/config/imap_ultra_l1b_variable_attrs.yaml +33 -19
- imap_processing/cdf/config/imap_ultra_l1c_variable_attrs.yaml +8 -48
- imap_processing/cdf/utils.py +41 -33
- imap_processing/cli.py +463 -234
- imap_processing/codice/codice_l1a.py +260 -47
- imap_processing/codice/codice_l1b.py +51 -152
- imap_processing/codice/constants.py +38 -1
- imap_processing/ena_maps/ena_maps.py +658 -65
- imap_processing/ena_maps/utils/coordinates.py +1 -1
- imap_processing/ena_maps/utils/spatial_utils.py +10 -5
- imap_processing/glows/l1a/glows_l1a.py +28 -99
- imap_processing/glows/l1a/glows_l1a_data.py +2 -2
- imap_processing/glows/l1b/glows_l1b.py +1 -4
- imap_processing/glows/l1b/glows_l1b_data.py +1 -3
- imap_processing/glows/l2/glows_l2.py +2 -5
- imap_processing/hi/l1a/hi_l1a.py +31 -12
- imap_processing/hi/l1b/hi_l1b.py +80 -43
- imap_processing/hi/l1c/hi_l1c.py +12 -16
- imap_processing/hit/ancillary/imap_hit_l1b-to-l2-sector-dt0-factors_20250219_v002.csv +81 -0
- imap_processing/hit/hit_utils.py +93 -35
- imap_processing/hit/l0/decom_hit.py +3 -1
- imap_processing/hit/l1a/hit_l1a.py +30 -25
- imap_processing/hit/l1b/constants.py +6 -2
- imap_processing/hit/l1b/hit_l1b.py +279 -318
- imap_processing/hit/l2/constants.py +37 -0
- imap_processing/hit/l2/hit_l2.py +373 -264
- imap_processing/ialirt/l0/parse_mag.py +138 -10
- imap_processing/ialirt/l0/process_swapi.py +69 -0
- imap_processing/ialirt/l0/process_swe.py +318 -22
- imap_processing/ialirt/packet_definitions/ialirt.xml +216 -212
- imap_processing/ialirt/packet_definitions/ialirt_codicehi.xml +1 -1
- imap_processing/ialirt/packet_definitions/ialirt_codicelo.xml +1 -1
- imap_processing/ialirt/packet_definitions/ialirt_swapi.xml +14 -14
- imap_processing/ialirt/utils/grouping.py +1 -1
- imap_processing/idex/idex_constants.py +9 -1
- imap_processing/idex/idex_l0.py +22 -8
- imap_processing/idex/idex_l1a.py +75 -44
- imap_processing/idex/idex_l1b.py +9 -8
- imap_processing/idex/idex_l2a.py +79 -45
- imap_processing/idex/idex_l2b.py +120 -0
- imap_processing/idex/idex_variable_unpacking_and_eu_conversion.csv +33 -39
- imap_processing/idex/packet_definitions/idex_housekeeping_packet_definition.xml +9130 -0
- imap_processing/lo/l0/lo_science.py +1 -2
- imap_processing/lo/l1a/lo_l1a.py +1 -4
- imap_processing/lo/l1b/lo_l1b.py +527 -6
- imap_processing/lo/l1b/tof_conversions.py +11 -0
- imap_processing/lo/l1c/lo_l1c.py +1 -4
- imap_processing/mag/constants.py +43 -0
- imap_processing/mag/imap_mag_sdc_configuration_v001.py +8 -0
- imap_processing/mag/l1a/mag_l1a.py +2 -9
- imap_processing/mag/l1a/mag_l1a_data.py +10 -10
- imap_processing/mag/l1b/mag_l1b.py +84 -17
- imap_processing/mag/l1c/interpolation_methods.py +180 -3
- imap_processing/mag/l1c/mag_l1c.py +236 -70
- imap_processing/mag/l2/mag_l2.py +140 -0
- imap_processing/mag/l2/mag_l2_data.py +288 -0
- imap_processing/spacecraft/quaternions.py +1 -3
- imap_processing/spice/geometry.py +3 -3
- imap_processing/spice/kernels.py +0 -276
- imap_processing/spice/pointing_frame.py +257 -0
- imap_processing/spice/repoint.py +48 -19
- imap_processing/spice/spin.py +38 -33
- imap_processing/spice/time.py +24 -0
- imap_processing/swapi/l1/swapi_l1.py +16 -12
- imap_processing/swapi/l2/swapi_l2.py +116 -4
- imap_processing/swapi/swapi_utils.py +32 -0
- imap_processing/swe/l1a/swe_l1a.py +2 -9
- imap_processing/swe/l1a/swe_science.py +8 -11
- imap_processing/swe/l1b/swe_l1b.py +898 -23
- imap_processing/swe/l2/swe_l2.py +21 -77
- imap_processing/swe/utils/swe_constants.py +1 -0
- imap_processing/tests/ccsds/test_excel_to_xtce.py +1 -1
- imap_processing/tests/cdf/test_utils.py +14 -16
- imap_processing/tests/codice/conftest.py +44 -33
- imap_processing/tests/codice/data/validation/imap_codice_l1a_hi-pha_20241110193700_v0.0.0.cdf +0 -0
- imap_processing/tests/codice/data/validation/imap_codice_l1a_lo-pha_20241110193700_v0.0.0.cdf +0 -0
- imap_processing/tests/codice/test_codice_l1a.py +20 -11
- imap_processing/tests/codice/test_codice_l1b.py +6 -7
- imap_processing/tests/conftest.py +78 -22
- imap_processing/tests/ena_maps/test_ena_maps.py +462 -33
- imap_processing/tests/ena_maps/test_spatial_utils.py +1 -1
- imap_processing/tests/glows/conftest.py +10 -14
- imap_processing/tests/glows/test_glows_decom.py +4 -4
- imap_processing/tests/glows/test_glows_l1a_cdf.py +6 -27
- imap_processing/tests/glows/test_glows_l1a_data.py +6 -8
- imap_processing/tests/glows/test_glows_l1b.py +11 -11
- imap_processing/tests/glows/test_glows_l1b_data.py +5 -5
- imap_processing/tests/glows/test_glows_l2.py +2 -8
- imap_processing/tests/hi/conftest.py +1 -1
- imap_processing/tests/hi/test_hi_l1b.py +10 -12
- imap_processing/tests/hi/test_hi_l1c.py +27 -24
- imap_processing/tests/hi/test_l1a.py +7 -9
- imap_processing/tests/hi/test_science_direct_event.py +2 -2
- imap_processing/tests/hit/helpers/l1_validation.py +44 -43
- imap_processing/tests/hit/test_decom_hit.py +1 -1
- imap_processing/tests/hit/test_hit_l1a.py +9 -9
- imap_processing/tests/hit/test_hit_l1b.py +172 -217
- imap_processing/tests/hit/test_hit_l2.py +380 -118
- imap_processing/tests/hit/test_hit_utils.py +122 -55
- imap_processing/tests/hit/validation_data/hit_l1b_standard_sample2_nsrl_v4_3decimals.csv +62 -62
- imap_processing/tests/hit/validation_data/sci_sample_raw.csv +1 -1
- imap_processing/tests/ialirt/unit/test_decom_ialirt.py +16 -81
- imap_processing/tests/ialirt/unit/test_grouping.py +2 -2
- imap_processing/tests/ialirt/unit/test_parse_mag.py +71 -16
- imap_processing/tests/ialirt/unit/test_process_codicehi.py +3 -3
- imap_processing/tests/ialirt/unit/test_process_codicelo.py +3 -10
- imap_processing/tests/ialirt/unit/test_process_ephemeris.py +4 -4
- imap_processing/tests/ialirt/unit/test_process_hit.py +3 -3
- imap_processing/tests/ialirt/unit/test_process_swapi.py +24 -16
- imap_processing/tests/ialirt/unit/test_process_swe.py +115 -7
- imap_processing/tests/idex/conftest.py +72 -7
- imap_processing/tests/idex/test_data/imap_idex_l0_raw_20241206_v001.pkts +0 -0
- imap_processing/tests/idex/test_data/imap_idex_l0_raw_20250108_v001.pkts +0 -0
- imap_processing/tests/idex/test_idex_l0.py +33 -11
- imap_processing/tests/idex/test_idex_l1a.py +50 -23
- imap_processing/tests/idex/test_idex_l1b.py +104 -25
- imap_processing/tests/idex/test_idex_l2a.py +48 -32
- imap_processing/tests/idex/test_idex_l2b.py +93 -0
- imap_processing/tests/lo/test_lo_l1a.py +3 -3
- imap_processing/tests/lo/test_lo_l1b.py +371 -6
- imap_processing/tests/lo/test_lo_l1c.py +1 -1
- imap_processing/tests/lo/test_lo_science.py +6 -7
- imap_processing/tests/lo/test_star_sensor.py +1 -1
- imap_processing/tests/mag/conftest.py +58 -9
- imap_processing/tests/mag/test_mag_decom.py +4 -3
- imap_processing/tests/mag/test_mag_l1a.py +13 -7
- imap_processing/tests/mag/test_mag_l1b.py +9 -9
- imap_processing/tests/mag/test_mag_l1c.py +151 -47
- imap_processing/tests/mag/test_mag_l2.py +130 -0
- imap_processing/tests/mag/test_mag_validation.py +144 -7
- imap_processing/tests/mag/validation/L1c/T013/mag-l1b-l1c-t013-magi-normal-in.csv +1217 -0
- imap_processing/tests/mag/validation/L1c/T013/mag-l1b-l1c-t013-magi-normal-out.csv +1857 -0
- imap_processing/tests/mag/validation/L1c/T013/mag-l1b-l1c-t013-mago-normal-in.csv +1217 -0
- imap_processing/tests/mag/validation/L1c/T013/mag-l1b-l1c-t013-mago-normal-out.csv +1857 -0
- imap_processing/tests/mag/validation/L1c/T014/mag-l1b-l1c-t014-magi-normal-in.csv +1217 -0
- imap_processing/tests/mag/validation/L1c/T014/mag-l1b-l1c-t014-magi-normal-out.csv +1793 -0
- imap_processing/tests/mag/validation/L1c/T014/mag-l1b-l1c-t014-mago-normal-in.csv +1217 -0
- imap_processing/tests/mag/validation/L1c/T014/mag-l1b-l1c-t014-mago-normal-out.csv +1793 -0
- imap_processing/tests/mag/validation/L1c/T015/mag-l1b-l1c-t015-magi-burst-in.csv +2561 -0
- imap_processing/tests/mag/validation/L1c/T015/mag-l1b-l1c-t015-magi-normal-in.csv +961 -0
- imap_processing/tests/mag/validation/L1c/T015/mag-l1b-l1c-t015-magi-normal-out.csv +1539 -0
- imap_processing/tests/mag/validation/L1c/T015/mag-l1b-l1c-t015-mago-normal-in.csv +1921 -0
- imap_processing/tests/mag/validation/L1c/T015/mag-l1b-l1c-t015-mago-normal-out.csv +2499 -0
- imap_processing/tests/mag/validation/L1c/T016/mag-l1b-l1c-t016-magi-normal-in.csv +865 -0
- imap_processing/tests/mag/validation/L1c/T016/mag-l1b-l1c-t016-magi-normal-out.csv +1196 -0
- imap_processing/tests/mag/validation/L1c/T016/mag-l1b-l1c-t016-mago-normal-in.csv +1729 -0
- imap_processing/tests/mag/validation/L1c/T016/mag-l1b-l1c-t016-mago-normal-out.csv +3053 -0
- imap_processing/tests/mag/validation/L2/imap_mag_l1b_norm-mago_20251017_v002.cdf +0 -0
- imap_processing/tests/mag/validation/calibration/imap_mag_l2-calibration-matrices_20251017_v004.cdf +0 -0
- imap_processing/tests/mag/validation/calibration/imap_mag_l2-offsets-norm_20251017_20251017_v001.cdf +0 -0
- imap_processing/tests/spacecraft/test_quaternions.py +1 -1
- imap_processing/tests/spice/test_data/fake_repoint_data.csv +4 -4
- imap_processing/tests/spice/test_data/fake_spin_data.csv +11 -11
- imap_processing/tests/spice/test_geometry.py +3 -3
- imap_processing/tests/spice/test_kernels.py +1 -200
- imap_processing/tests/spice/test_pointing_frame.py +185 -0
- imap_processing/tests/spice/test_repoint.py +20 -10
- imap_processing/tests/spice/test_spin.py +50 -9
- imap_processing/tests/spice/test_time.py +14 -0
- imap_processing/tests/swapi/lut/imap_swapi_esa-unit-conversion_20250211_v000.csv +73 -0
- imap_processing/tests/swapi/lut/imap_swapi_lut-notes_20250211_v000.csv +1025 -0
- imap_processing/tests/swapi/test_swapi_l1.py +7 -9
- imap_processing/tests/swapi/test_swapi_l2.py +180 -8
- imap_processing/tests/swe/lut/checker-board-indices.csv +24 -0
- imap_processing/tests/swe/lut/imap_swe_esa-lut_20250301_v000.csv +385 -0
- imap_processing/tests/swe/lut/imap_swe_l1b-in-flight-cal_20240510_20260716_v000.csv +3 -0
- imap_processing/tests/swe/test_swe_l1a.py +6 -6
- imap_processing/tests/swe/test_swe_l1a_science.py +3 -3
- imap_processing/tests/swe/test_swe_l1b.py +162 -24
- imap_processing/tests/swe/test_swe_l2.py +82 -102
- imap_processing/tests/test_cli.py +171 -88
- imap_processing/tests/test_utils.py +2 -1
- imap_processing/tests/ultra/data/mock_data.py +49 -21
- imap_processing/tests/ultra/unit/conftest.py +53 -70
- imap_processing/tests/ultra/unit/test_badtimes.py +2 -4
- imap_processing/tests/ultra/unit/test_cullingmask.py +4 -6
- imap_processing/tests/ultra/unit/test_de.py +3 -10
- imap_processing/tests/ultra/unit/test_decom_apid_880.py +27 -76
- imap_processing/tests/ultra/unit/test_decom_apid_881.py +15 -16
- imap_processing/tests/ultra/unit/test_decom_apid_883.py +12 -10
- imap_processing/tests/ultra/unit/test_decom_apid_896.py +202 -55
- imap_processing/tests/ultra/unit/test_lookup_utils.py +23 -1
- imap_processing/tests/ultra/unit/test_spacecraft_pset.py +3 -4
- imap_processing/tests/ultra/unit/test_ultra_l1a.py +84 -307
- imap_processing/tests/ultra/unit/test_ultra_l1b.py +30 -12
- imap_processing/tests/ultra/unit/test_ultra_l1b_annotated.py +2 -2
- imap_processing/tests/ultra/unit/test_ultra_l1b_culling.py +4 -1
- imap_processing/tests/ultra/unit/test_ultra_l1b_extended.py +163 -29
- imap_processing/tests/ultra/unit/test_ultra_l1c.py +5 -5
- imap_processing/tests/ultra/unit/test_ultra_l1c_pset_bins.py +32 -43
- imap_processing/tests/ultra/unit/test_ultra_l2.py +230 -0
- imap_processing/ultra/constants.py +1 -1
- imap_processing/ultra/l0/decom_tools.py +21 -34
- imap_processing/ultra/l0/decom_ultra.py +168 -204
- imap_processing/ultra/l0/ultra_utils.py +152 -136
- imap_processing/ultra/l1a/ultra_l1a.py +55 -243
- imap_processing/ultra/l1b/badtimes.py +1 -4
- imap_processing/ultra/l1b/cullingmask.py +2 -6
- imap_processing/ultra/l1b/de.py +62 -47
- imap_processing/ultra/l1b/extendedspin.py +8 -4
- imap_processing/ultra/l1b/lookup_utils.py +72 -9
- imap_processing/ultra/l1b/ultra_l1b.py +3 -8
- imap_processing/ultra/l1b/ultra_l1b_culling.py +4 -4
- imap_processing/ultra/l1b/ultra_l1b_extended.py +236 -78
- imap_processing/ultra/l1c/histogram.py +2 -6
- imap_processing/ultra/l1c/spacecraft_pset.py +2 -4
- imap_processing/ultra/l1c/ultra_l1c.py +1 -5
- imap_processing/ultra/l1c/ultra_l1c_pset_bins.py +107 -60
- imap_processing/ultra/l2/ultra_l2.py +299 -0
- imap_processing/ultra/lookup_tables/Angular_Profiles_FM45_LeftSlit.csv +526 -0
- imap_processing/ultra/lookup_tables/Angular_Profiles_FM45_RightSlit.csv +526 -0
- imap_processing/ultra/lookup_tables/Angular_Profiles_FM90_LeftSlit.csv +526 -0
- imap_processing/ultra/lookup_tables/Angular_Profiles_FM90_RightSlit.csv +526 -0
- imap_processing/ultra/lookup_tables/FM45_Startup1_ULTRA_IMGPARAMS_20240719.csv +2 -2
- imap_processing/ultra/lookup_tables/FM90_Startup1_ULTRA_IMGPARAMS_20240719.csv +2 -0
- imap_processing/ultra/packet_definitions/README.md +38 -0
- imap_processing/ultra/packet_definitions/ULTRA_SCI_COMBINED.xml +15302 -482
- imap_processing/ultra/utils/ultra_l1_utils.py +13 -12
- imap_processing/utils.py +1 -1
- {imap_processing-0.12.0.dist-info → imap_processing-0.13.0.dist-info}/METADATA +3 -2
- {imap_processing-0.12.0.dist-info → imap_processing-0.13.0.dist-info}/RECORD +264 -225
- imap_processing/hi/l1b/hi_eng_unit_convert_table.csv +0 -154
- imap_processing/mag/imap_mag_sdc-configuration_v001.yaml +0 -6
- imap_processing/mag/l1b/__init__.py +0 -0
- imap_processing/swe/l1b/swe_esa_lookup_table.csv +0 -1441
- imap_processing/swe/l1b/swe_l1b_science.py +0 -699
- imap_processing/tests/swe/test_swe_l1b_science.py +0 -103
- imap_processing/ultra/lookup_tables/dps_sensitivity45.cdf +0 -0
- imap_processing/ultra/lookup_tables/ultra_90_dps_exposure_compressed.cdf +0 -0
- /imap_processing/idex/packet_definitions/{idex_packet_definition.xml → idex_science_packet_definition.xml} +0 -0
- /imap_processing/tests/ialirt/{test_data → data}/l0/20240827095047_SWE_IALIRT_packet.bin +0 -0
- /imap_processing/tests/ialirt/{test_data → data}/l0/461971383-404.bin +0 -0
- /imap_processing/tests/ialirt/{test_data → data}/l0/461971384-405.bin +0 -0
- /imap_processing/tests/ialirt/{test_data → data}/l0/461971385-406.bin +0 -0
- /imap_processing/tests/ialirt/{test_data → data}/l0/461971386-407.bin +0 -0
- /imap_processing/tests/ialirt/{test_data → data}/l0/461971387-408.bin +0 -0
- /imap_processing/tests/ialirt/{test_data → data}/l0/461971388-409.bin +0 -0
- /imap_processing/tests/ialirt/{test_data → data}/l0/461971389-410.bin +0 -0
- /imap_processing/tests/ialirt/{test_data → data}/l0/461971390-411.bin +0 -0
- /imap_processing/tests/ialirt/{test_data → data}/l0/461971391-412.bin +0 -0
- /imap_processing/tests/ialirt/{test_data → data}/l0/BinLog CCSDS_FRAG_TLM_20240826_152323Z_IALIRT_data_for_SDC.bin +0 -0
- /imap_processing/tests/ialirt/{test_data → data}/l0/IALiRT Raw Packet Telemetry.txt +0 -0
- /imap_processing/tests/ialirt/{test_data → data}/l0/apid01152.tlm +0 -0
- /imap_processing/tests/ialirt/{test_data → data}/l0/eu_SWP_IAL_20240826_152033.csv +0 -0
- /imap_processing/tests/ialirt/{test_data → data}/l0/hi_fsw_view_1_ccsds.bin +0 -0
- /imap_processing/tests/ialirt/{test_data → data}/l0/hit_ialirt_sample.ccsds +0 -0
- /imap_processing/tests/ialirt/{test_data → data}/l0/hit_ialirt_sample.csv +0 -0
- /imap_processing/tests/ialirt/{test_data → data}/l0/idle_export_eu.SWE_IALIRT_20240827_093852.csv +0 -0
- /imap_processing/tests/ialirt/{test_data → data}/l0/imap_codice_l1a_hi-ialirt_20240523200000_v0.0.0.cdf +0 -0
- /imap_processing/tests/ialirt/{test_data → data}/l0/imap_codice_l1a_lo-ialirt_20241110193700_v0.0.0.cdf +0 -0
- /imap_processing/tests/ialirt/{test_data → data}/l0/sample_decoded_i-alirt_data.csv +0 -0
- /imap_processing/tests/mag/validation/{imap_calibration_mag_20240229_v01.cdf → calibration/imap_mag_l1b-calibration_20240229_v001.cdf} +0 -0
- /imap_processing/{swe/l1b/engineering_unit_convert_table.csv → tests/swe/lut/imap_swe_eu-conversion_20240510_v000.csv} +0 -0
- {imap_processing-0.12.0.dist-info → imap_processing-0.13.0.dist-info}/LICENSE +0 -0
- {imap_processing-0.12.0.dist-info → imap_processing-0.13.0.dist-info}/WHEEL +0 -0
- {imap_processing-0.12.0.dist-info → imap_processing-0.13.0.dist-info}/entry_points.txt +0 -0
|
@@ -1,22 +1,22 @@
|
|
|
1
1
|
"""MAG L1C processing module."""
|
|
2
2
|
|
|
3
3
|
import logging
|
|
4
|
-
from pathlib import Path
|
|
5
4
|
from typing import Optional
|
|
6
5
|
|
|
7
6
|
import numpy as np
|
|
8
7
|
import xarray as xr
|
|
9
|
-
import yaml
|
|
10
8
|
|
|
11
9
|
from imap_processing.cdf.imap_cdf_manager import ImapCdfAttributes
|
|
12
|
-
from imap_processing.mag
|
|
10
|
+
from imap_processing.mag import imap_mag_sdc_configuration_v001 as configuration
|
|
11
|
+
from imap_processing.mag.constants import ModeFlags, VecSec
|
|
13
12
|
from imap_processing.mag.l1c.interpolation_methods import InterpolationFunction
|
|
14
13
|
|
|
15
14
|
logger = logging.getLogger(__name__)
|
|
16
15
|
|
|
17
16
|
|
|
18
17
|
def mag_l1c(
|
|
19
|
-
first_input_dataset: xr.Dataset,
|
|
18
|
+
first_input_dataset: xr.Dataset,
|
|
19
|
+
second_input_dataset: xr.Dataset = None,
|
|
20
20
|
) -> xr.Dataset:
|
|
21
21
|
"""
|
|
22
22
|
Will process MAG L1C data from L1A data.
|
|
@@ -28,12 +28,10 @@ def mag_l1c(
|
|
|
28
28
|
first_input_dataset : xr.Dataset
|
|
29
29
|
The first input dataset to process. This can be either burst or norm data, for
|
|
30
30
|
mago or magi.
|
|
31
|
-
second_input_dataset : xr.Dataset
|
|
31
|
+
second_input_dataset : xr.Dataset, optional
|
|
32
32
|
The second input dataset to process. This should be burst if first_input_dataset
|
|
33
33
|
was norm, or norm if first_input_dataset was burst. It should match the
|
|
34
34
|
instrument - both inputs should be mago or magi.
|
|
35
|
-
version : str
|
|
36
|
-
The version of the output data.
|
|
37
35
|
|
|
38
36
|
Returns
|
|
39
37
|
-------
|
|
@@ -42,43 +40,41 @@ def mag_l1c(
|
|
|
42
40
|
"""
|
|
43
41
|
# TODO:
|
|
44
42
|
# find missing sequences and output them
|
|
45
|
-
#
|
|
43
|
+
# Fix gaps at the beginning of the day by going to previous day's file
|
|
44
|
+
# Fix gaps at the end of the day
|
|
45
|
+
# Allow for one input to be missing
|
|
46
|
+
# Missing burst file - just pass through norm file
|
|
47
|
+
# Missing norm file - go back to previous L1C file to find timestamps, then
|
|
48
|
+
# interpolate the entire day from burst
|
|
46
49
|
|
|
47
50
|
input_logical_source_1 = first_input_dataset.attrs["Logical_source"]
|
|
48
51
|
if isinstance(first_input_dataset.attrs["Logical_source"], list):
|
|
49
52
|
input_logical_source_1 = first_input_dataset.attrs["Logical_source"][0]
|
|
50
53
|
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
input_logical_source_2 = second_input_dataset.attrs["Logical_source"][0]
|
|
54
|
+
sensor = input_logical_source_1[-1:]
|
|
55
|
+
output_logical_source = f"imap_mag_l1c_norm-mag{sensor}"
|
|
54
56
|
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
output_logical_source = input_logical_source_1.replace("l1b", "l1c")
|
|
59
|
-
elif "norm" in input_logical_source_2 and "burst" in input_logical_source_1:
|
|
60
|
-
normal_mode_dataset = second_input_dataset
|
|
61
|
-
burst_mode_dataset = first_input_dataset
|
|
62
|
-
output_logical_source = input_logical_source_2.replace("l1b", "l1c")
|
|
57
|
+
normal_mode_dataset, burst_mode_dataset = select_datasets(
|
|
58
|
+
first_input_dataset, second_input_dataset
|
|
59
|
+
)
|
|
63
60
|
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
|
|
61
|
+
interp_function = InterpolationFunction[configuration.L1C_INTERPOLATION_METHOD]
|
|
62
|
+
if normal_mode_dataset and burst_mode_dataset:
|
|
63
|
+
full_interpolated_timeline = process_mag_l1c(
|
|
64
|
+
normal_mode_dataset, burst_mode_dataset, interp_function
|
|
67
65
|
)
|
|
66
|
+
elif normal_mode_dataset is not None:
|
|
67
|
+
full_interpolated_timeline = fill_normal_data(
|
|
68
|
+
normal_mode_dataset, normal_mode_dataset["epoch"].data
|
|
69
|
+
)
|
|
70
|
+
else:
|
|
71
|
+
# TODO: With only burst data, downsample by retrieving the timeline
|
|
72
|
+
raise NotImplementedError
|
|
68
73
|
|
|
69
|
-
|
|
70
|
-
Path(__file__).parent.parent / "imap_mag_sdc-configuration_v001.yaml"
|
|
71
|
-
) as f:
|
|
72
|
-
configuration = yaml.safe_load(f)
|
|
73
|
-
|
|
74
|
-
interp_function = InterpolationFunction[configuration["L1C_interpolation_method"]]
|
|
75
|
-
completed_timeline = process_mag_l1c(
|
|
76
|
-
normal_mode_dataset, burst_mode_dataset, interp_function
|
|
77
|
-
)
|
|
74
|
+
completed_timeline = remove_missing_data(full_interpolated_timeline)
|
|
78
75
|
|
|
79
76
|
attribute_manager = ImapCdfAttributes()
|
|
80
77
|
attribute_manager.add_instrument_global_attrs("mag")
|
|
81
|
-
attribute_manager.add_global_attribute("Data_version", version)
|
|
82
78
|
attribute_manager.add_instrument_variable_attrs("mag", "l1c")
|
|
83
79
|
compression = xr.DataArray(
|
|
84
80
|
np.arange(2),
|
|
@@ -186,6 +182,66 @@ def mag_l1c(
|
|
|
186
182
|
return output_dataset
|
|
187
183
|
|
|
188
184
|
|
|
185
|
+
def select_datasets(
|
|
186
|
+
first_input_dataset: xr.Dataset, second_input_dataset: Optional[xr.Dataset] = None
|
|
187
|
+
) -> tuple[xr.Dataset, xr.Dataset]:
|
|
188
|
+
"""
|
|
189
|
+
Given one or two datasets, assign one to norm and one to burst.
|
|
190
|
+
|
|
191
|
+
If only one dataset is provided, the other will be marked as None. If two are
|
|
192
|
+
provided, they will be validated to ensure one is norm and one is burst.
|
|
193
|
+
|
|
194
|
+
Parameters
|
|
195
|
+
----------
|
|
196
|
+
first_input_dataset : xr.Dataset
|
|
197
|
+
The first input dataset.
|
|
198
|
+
second_input_dataset : xr.Dataset, optional
|
|
199
|
+
The second input dataset.
|
|
200
|
+
|
|
201
|
+
Returns
|
|
202
|
+
-------
|
|
203
|
+
tuple
|
|
204
|
+
Tuple containing norm_mode_dataset, burst_mode_dataset.
|
|
205
|
+
"""
|
|
206
|
+
normal_mode_dataset = None
|
|
207
|
+
burst_mode_dataset = None
|
|
208
|
+
|
|
209
|
+
input_logical_source_1 = first_input_dataset.attrs["Logical_source"]
|
|
210
|
+
|
|
211
|
+
if isinstance(first_input_dataset.attrs["Logical_source"], list):
|
|
212
|
+
input_logical_source_1 = first_input_dataset.attrs["Logical_source"][0]
|
|
213
|
+
|
|
214
|
+
if "norm" in input_logical_source_1:
|
|
215
|
+
normal_mode_dataset = first_input_dataset
|
|
216
|
+
|
|
217
|
+
if "burst" in input_logical_source_1:
|
|
218
|
+
burst_mode_dataset = first_input_dataset
|
|
219
|
+
|
|
220
|
+
if second_input_dataset is None:
|
|
221
|
+
logger.info(
|
|
222
|
+
f"Only one input dataset provided with logical source "
|
|
223
|
+
f"{input_logical_source_1}"
|
|
224
|
+
)
|
|
225
|
+
else:
|
|
226
|
+
input_logical_source_2 = second_input_dataset.attrs["Logical_source"]
|
|
227
|
+
if isinstance(second_input_dataset.attrs["Logical_source"], list):
|
|
228
|
+
input_logical_source_2 = second_input_dataset.attrs["Logical_source"][0]
|
|
229
|
+
|
|
230
|
+
if "burst" in input_logical_source_2:
|
|
231
|
+
burst_mode_dataset = second_input_dataset
|
|
232
|
+
|
|
233
|
+
elif "norm" in input_logical_source_2:
|
|
234
|
+
normal_mode_dataset = second_input_dataset
|
|
235
|
+
|
|
236
|
+
# If there are two inputs, one should be norm and one should be burst
|
|
237
|
+
if normal_mode_dataset is None or burst_mode_dataset is None:
|
|
238
|
+
raise RuntimeError(
|
|
239
|
+
"L1C requires one normal mode and one burst mode input file."
|
|
240
|
+
)
|
|
241
|
+
|
|
242
|
+
return normal_mode_dataset, burst_mode_dataset
|
|
243
|
+
|
|
244
|
+
|
|
189
245
|
def process_mag_l1c(
|
|
190
246
|
normal_mode_dataset: xr.Dataset,
|
|
191
247
|
burst_mode_dataset: xr.Dataset,
|
|
@@ -222,14 +278,19 @@ def process_mag_l1c(
|
|
|
222
278
|
An (n, 8) shaped array containing the completed timeline.
|
|
223
279
|
"""
|
|
224
280
|
norm_epoch = normal_mode_dataset["epoch"].data
|
|
225
|
-
|
|
281
|
+
if "vectors_per_second" in normal_mode_dataset.attrs:
|
|
282
|
+
normal_vecsec_dict = vectors_per_second_from_string(
|
|
283
|
+
normal_mode_dataset.attrs["vectors_per_second"]
|
|
284
|
+
)
|
|
285
|
+
else:
|
|
286
|
+
normal_vecsec_dict = None
|
|
226
287
|
|
|
227
288
|
output_dataset = normal_mode_dataset.copy(deep=True)
|
|
228
289
|
output_dataset["sample_interpolated"] = xr.DataArray(
|
|
229
290
|
np.zeros(len(normal_mode_dataset))
|
|
230
291
|
)
|
|
231
292
|
|
|
232
|
-
gaps = find_all_gaps(norm_epoch,
|
|
293
|
+
gaps = find_all_gaps(norm_epoch, normal_vecsec_dict)
|
|
233
294
|
|
|
234
295
|
new_timeline = generate_timeline(norm_epoch, gaps)
|
|
235
296
|
norm_filled = fill_normal_data(normal_mode_dataset, new_timeline)
|
|
@@ -313,14 +374,52 @@ def interpolate_gaps(
|
|
|
313
374
|
burst_epochs = burst_dataset["epoch"].data
|
|
314
375
|
# Exclude range values
|
|
315
376
|
burst_vectors = burst_dataset["vectors"].data
|
|
377
|
+
# Default to two vectors per second
|
|
378
|
+
burst_vecsec_dict = {0: VecSec.TWO_VECS_PER_S.value}
|
|
379
|
+
if "vectors_per_second" in burst_dataset.attrs:
|
|
380
|
+
burst_vecsec_dict = vectors_per_second_from_string(
|
|
381
|
+
burst_dataset.attrs["vectors_per_second"]
|
|
382
|
+
)
|
|
316
383
|
|
|
317
384
|
for gap in gaps:
|
|
318
385
|
# TODO: we might need a few inputs before or after start/end
|
|
319
|
-
|
|
320
|
-
|
|
386
|
+
burst_gap_start = (np.abs(burst_epochs - gap[0])).argmin()
|
|
387
|
+
burst_gap_end = (np.abs(burst_epochs - gap[1])).argmin()
|
|
388
|
+
|
|
389
|
+
# for the CIC filter, we need 2x normal mode cadence seconds
|
|
390
|
+
|
|
391
|
+
norm_rate = VecSec(int(gap[2]))
|
|
392
|
+
|
|
393
|
+
# Input rate
|
|
394
|
+
# Find where burst_start is after the start of the timeline
|
|
395
|
+
burst_vecsec_index = (
|
|
396
|
+
np.searchsorted(
|
|
397
|
+
list(burst_vecsec_dict.keys()),
|
|
398
|
+
burst_epochs[burst_gap_start],
|
|
399
|
+
side="right",
|
|
400
|
+
)
|
|
401
|
+
- 1
|
|
402
|
+
)
|
|
403
|
+
burst_rate = VecSec(list(burst_vecsec_dict.values())[burst_vecsec_index])
|
|
404
|
+
|
|
405
|
+
required_seconds = (1 / norm_rate.value) * 2
|
|
406
|
+
burst_buffer = int(required_seconds * burst_rate.value)
|
|
407
|
+
|
|
408
|
+
burst_start = max(0, burst_gap_start - burst_buffer)
|
|
409
|
+
burst_end = min(len(burst_epochs) - 1, burst_gap_end + burst_buffer)
|
|
410
|
+
|
|
321
411
|
gap_timeline = filled_norm_timeline[
|
|
322
|
-
|
|
323
|
-
|
|
412
|
+
(filled_norm_timeline > gap[0]) & (filled_norm_timeline < gap[1])
|
|
413
|
+
]
|
|
414
|
+
logger.info(
|
|
415
|
+
f"difference between gap start and burst start: "
|
|
416
|
+
f"{gap_timeline[0] - burst_epochs[burst_start]}"
|
|
417
|
+
)
|
|
418
|
+
# Limit timestamps to only include the areas with burst data
|
|
419
|
+
gap_timeline = gap_timeline[
|
|
420
|
+
(
|
|
421
|
+
(gap_timeline >= burst_epochs[burst_start])
|
|
422
|
+
& (gap_timeline <= burst_epochs[burst_gap_end])
|
|
324
423
|
)
|
|
325
424
|
]
|
|
326
425
|
# do not include range
|
|
@@ -328,20 +427,25 @@ def interpolate_gaps(
|
|
|
328
427
|
burst_vectors[burst_start:burst_end, :3],
|
|
329
428
|
burst_epochs[burst_start:burst_end],
|
|
330
429
|
gap_timeline,
|
|
430
|
+
input_rate=burst_rate,
|
|
431
|
+
output_rate=norm_rate,
|
|
331
432
|
)
|
|
332
433
|
|
|
333
434
|
# gaps should not have data in timeline, still check it
|
|
334
435
|
for index, timestamp in enumerate(gap_timeline):
|
|
335
436
|
timeline_index = np.searchsorted(filled_norm_timeline[:, 0], timestamp)
|
|
336
|
-
if sum(
|
|
437
|
+
if sum(
|
|
438
|
+
filled_norm_timeline[timeline_index, 1:4]
|
|
439
|
+
) == 0 and burst_gap_start + index < len(burst_vectors):
|
|
337
440
|
filled_norm_timeline[timeline_index, 1:4] = gap_fill[index]
|
|
441
|
+
|
|
338
442
|
filled_norm_timeline[timeline_index, 4] = burst_vectors[
|
|
339
|
-
|
|
443
|
+
burst_gap_start + index, 3
|
|
340
444
|
]
|
|
341
445
|
filled_norm_timeline[timeline_index, 5] = ModeFlags.BURST.value
|
|
342
446
|
filled_norm_timeline[timeline_index, 6:8] = burst_dataset[
|
|
343
447
|
"compression_flags"
|
|
344
|
-
].data[
|
|
448
|
+
].data[burst_gap_start + index]
|
|
345
449
|
|
|
346
450
|
return filled_norm_timeline
|
|
347
451
|
|
|
@@ -392,7 +496,7 @@ def generate_timeline(epoch_data: np.ndarray, gaps: np.ndarray) -> np.ndarray:
|
|
|
392
496
|
|
|
393
497
|
|
|
394
498
|
def find_all_gaps(
|
|
395
|
-
epoch_data: np.ndarray,
|
|
499
|
+
epoch_data: np.ndarray, vecsec_dict: Optional[dict] = None
|
|
396
500
|
) -> np.ndarray:
|
|
397
501
|
"""
|
|
398
502
|
Find all the gaps in the epoch data.
|
|
@@ -405,32 +509,37 @@ def find_all_gaps(
|
|
|
405
509
|
----------
|
|
406
510
|
epoch_data : numpy.ndarray
|
|
407
511
|
The epoch data to find gaps in.
|
|
408
|
-
|
|
409
|
-
A
|
|
410
|
-
|
|
411
|
-
find the gaps. If not provided, a 1/2 second gap is assumed.
|
|
512
|
+
vecsec_dict : dict, optional
|
|
513
|
+
A dictionary of the form {start: vecsec, start: vecsec} where start is the time
|
|
514
|
+
in nanoseconds and vecsec is the number of vectors per second. This will be
|
|
515
|
+
used to find the gaps. If not provided, a 1/2 second gap is assumed.
|
|
412
516
|
|
|
413
517
|
Returns
|
|
414
518
|
-------
|
|
415
519
|
numpy.ndarray
|
|
416
|
-
An array of gaps with shape (n,
|
|
417
|
-
specified as (start, end) where start and end both exist in the
|
|
520
|
+
An array of gaps with shape (n, 3) where n is the number of gaps. The gaps are
|
|
521
|
+
specified as (start, end, vector_rate) where start and end both exist in the
|
|
522
|
+
timeline.
|
|
418
523
|
"""
|
|
419
|
-
gaps: np.ndarray = np.zeros((0,
|
|
420
|
-
if
|
|
421
|
-
|
|
422
|
-
|
|
423
|
-
|
|
424
|
-
|
|
425
|
-
|
|
426
|
-
|
|
427
|
-
|
|
524
|
+
gaps: np.ndarray = np.zeros((0, 3))
|
|
525
|
+
if vecsec_dict is None:
|
|
526
|
+
# TODO: when we go back to the previous file, also retrieve expected
|
|
527
|
+
# vectors per second
|
|
528
|
+
# If no vecsec is provided, assume 2 vectors per second
|
|
529
|
+
vecsec_dict = {0: VecSec.TWO_VECS_PER_S.value}
|
|
530
|
+
|
|
531
|
+
end_index = epoch_data.shape[0]
|
|
532
|
+
for start_time in reversed(sorted(vecsec_dict.keys())):
|
|
533
|
+
start_index = np.where(start_time == epoch_data)[0][0]
|
|
534
|
+
gaps = np.concatenate(
|
|
535
|
+
(
|
|
536
|
+
find_gaps(
|
|
537
|
+
epoch_data[start_index : end_index + 1], vecsec_dict[start_time]
|
|
538
|
+
),
|
|
539
|
+
gaps,
|
|
428
540
|
)
|
|
429
|
-
|
|
430
|
-
|
|
431
|
-
# TODO: How to handle this case
|
|
432
|
-
gaps = find_gaps(epoch_data, 2) # Assume half second gaps
|
|
433
|
-
# alternatively, I could try and find the average time between vectors
|
|
541
|
+
)
|
|
542
|
+
end_index = start_index
|
|
434
543
|
|
|
435
544
|
return gaps
|
|
436
545
|
|
|
@@ -439,8 +548,8 @@ def find_gaps(timeline_data: np.ndarray, vectors_per_second: int) -> np.ndarray:
|
|
|
439
548
|
"""
|
|
440
549
|
Find gaps in timeline_data that are larger than 1/vectors_per_second.
|
|
441
550
|
|
|
442
|
-
Returns timestamps (start_gap, end_gap) where startgap and
|
|
443
|
-
exist in timeline data.
|
|
551
|
+
Returns timestamps (start_gap, end_gap, vectors_per_second) where startgap and
|
|
552
|
+
endgap both exist in timeline data.
|
|
444
553
|
|
|
445
554
|
Parameters
|
|
446
555
|
----------
|
|
@@ -452,18 +561,27 @@ def find_gaps(timeline_data: np.ndarray, vectors_per_second: int) -> np.ndarray:
|
|
|
452
561
|
Returns
|
|
453
562
|
-------
|
|
454
563
|
numpy.ndarray
|
|
455
|
-
Array of timestamps of shape (n,
|
|
456
|
-
end_gap. Start_gap and end_gap both correspond
|
|
564
|
+
Array of timestamps of shape (n, 3) containing n gaps with start_gap and
|
|
565
|
+
end_gap, as well as vectors_per_second. Start_gap and end_gap both correspond
|
|
566
|
+
to points in timeline_data.
|
|
457
567
|
"""
|
|
458
568
|
# Expected difference between timestamps in nanoseconds.
|
|
459
569
|
expected_gap = 1 / vectors_per_second * 1e9
|
|
460
570
|
|
|
461
|
-
|
|
462
|
-
|
|
463
|
-
|
|
571
|
+
# TODO: timestamps can vary by a few ms. Per Alastair, this can be around 7.5% of
|
|
572
|
+
# cadence without counting as a "gap".
|
|
573
|
+
diffs = abs(np.diff(timeline_data))
|
|
574
|
+
# 3.5e7 == 7.5% of 0.5s in nanoseconds, a common gap. In the future, this number
|
|
575
|
+
# will be calculated from the expected gap.
|
|
576
|
+
gap_index = np.asarray(diffs - expected_gap > 3.5e7).nonzero()[0]
|
|
577
|
+
output: np.ndarray = np.zeros((len(gap_index), 3))
|
|
464
578
|
|
|
465
579
|
for index, gap in enumerate(gap_index):
|
|
466
|
-
output[index, :] = [
|
|
580
|
+
output[index, :] = [
|
|
581
|
+
timeline_data[gap],
|
|
582
|
+
timeline_data[gap + 1],
|
|
583
|
+
vectors_per_second,
|
|
584
|
+
]
|
|
467
585
|
|
|
468
586
|
# TODO: How should I handle/find gaps at the end?
|
|
469
587
|
return output
|
|
@@ -493,3 +611,51 @@ def generate_missing_timestamps(gap: np.ndarray) -> np.ndarray:
|
|
|
493
611
|
|
|
494
612
|
output: np.ndarray = np.arange(gap[0], gap[1], difference_ns)
|
|
495
613
|
return output
|
|
614
|
+
|
|
615
|
+
|
|
616
|
+
def vectors_per_second_from_string(vecsec_string: str) -> dict:
|
|
617
|
+
"""
|
|
618
|
+
Extract the vectors per second from a string into a dictionary.
|
|
619
|
+
|
|
620
|
+
Dictionary format: {start_time: vecsec, start_time: vecsec}.
|
|
621
|
+
|
|
622
|
+
Parameters
|
|
623
|
+
----------
|
|
624
|
+
vecsec_string : str
|
|
625
|
+
A string of the form "start:vecsec,start:vecsec" where start is the time in
|
|
626
|
+
nanoseconds and vecsec is the number of vectors per second.
|
|
627
|
+
|
|
628
|
+
Returns
|
|
629
|
+
-------
|
|
630
|
+
dict
|
|
631
|
+
A dictionary of the form {start_time: vecsec, start_time: vecsec}.
|
|
632
|
+
"""
|
|
633
|
+
vecsec_dict = {}
|
|
634
|
+
vecsec_segments = vecsec_string.split(",")
|
|
635
|
+
for vecsec_segment in vecsec_segments:
|
|
636
|
+
start_time, vecsec = vecsec_segment.split(":")
|
|
637
|
+
vecsec_dict[int(start_time)] = int(vecsec)
|
|
638
|
+
|
|
639
|
+
return vecsec_dict
|
|
640
|
+
|
|
641
|
+
|
|
642
|
+
def remove_missing_data(filled_timeline: np.ndarray) -> np.ndarray:
|
|
643
|
+
"""
|
|
644
|
+
Remove timestamps with no data from the filled timeline.
|
|
645
|
+
|
|
646
|
+
Anywhere that the generated flag is equal to -1, the data will be removed.
|
|
647
|
+
|
|
648
|
+
Parameters
|
|
649
|
+
----------
|
|
650
|
+
filled_timeline : numpy.ndarray
|
|
651
|
+
An (n, 8) shaped array containing the filled timeline.
|
|
652
|
+
Indices: 0 - epoch, 1-4 - vector x, y, z, and range, 5 - generated flag,
|
|
653
|
+
6-7 - compression flags.
|
|
654
|
+
|
|
655
|
+
Returns
|
|
656
|
+
-------
|
|
657
|
+
cleaned_array : numpy.ndarray
|
|
658
|
+
The filled timeline with missing data removed.
|
|
659
|
+
"""
|
|
660
|
+
cleaned_array: np.ndarray = filled_timeline[filled_timeline[:, 5] != -1]
|
|
661
|
+
return cleaned_array
|
|
@@ -0,0 +1,140 @@
|
|
|
1
|
+
"""Module to run MAG L2 processing."""
|
|
2
|
+
|
|
3
|
+
import numpy as np
|
|
4
|
+
import xarray as xr
|
|
5
|
+
|
|
6
|
+
from imap_processing.cdf.imap_cdf_manager import ImapCdfAttributes
|
|
7
|
+
from imap_processing.mag import imap_mag_sdc_configuration_v001 as configuration
|
|
8
|
+
from imap_processing.mag.constants import DataMode
|
|
9
|
+
from imap_processing.mag.l1b.mag_l1b import calibrate_vector
|
|
10
|
+
from imap_processing.mag.l2.mag_l2_data import MagL2
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
def mag_l2(
|
|
14
|
+
calibration_datasets: list[xr.Dataset],
|
|
15
|
+
offsets_dataset: xr.Dataset,
|
|
16
|
+
input_data: xr.Dataset,
|
|
17
|
+
) -> list[xr.Dataset]:
|
|
18
|
+
"""
|
|
19
|
+
Complete MAG L2 processing.
|
|
20
|
+
|
|
21
|
+
Processing uses 4 data input sources:
|
|
22
|
+
1. Calibration dataset
|
|
23
|
+
These calibration files are ancillary files and may require multiple files to
|
|
24
|
+
cover the entire timespan. They are not expected to change often. They are used
|
|
25
|
+
to provide rotation matrices to correct the frame of the vectors. The same
|
|
26
|
+
file(s) are used for both burst and norm calculations.
|
|
27
|
+
2. Offsets dataset
|
|
28
|
+
This is one, hand-created file which must correspond exactly to an L1B
|
|
29
|
+
(for burst) or L1C (for norm) data file. For each vector, this file includes
|
|
30
|
+
offsets, timedelta, and quality flags. The offsets are added to the vectors,
|
|
31
|
+
the timedelta is used to correct the epoch time, and the quality flags are
|
|
32
|
+
directly passed into the output file.
|
|
33
|
+
3. Input data
|
|
34
|
+
This is the L1B or L1C data file. It is used to provide the vectors and epoch
|
|
35
|
+
time. It should always be MAGo in the nominal case, but it is possible that we
|
|
36
|
+
will switch permanently to using MAGi (in the case of sensor failure, for
|
|
37
|
+
example.) The offsets dataset and the input
|
|
38
|
+
data are tightly related, so the input data filename is actually retrieved from
|
|
39
|
+
the offset dataset to ensure they always match.
|
|
40
|
+
4. sdc-configuration
|
|
41
|
+
This is a local configuration file for changes we never expect to make in
|
|
42
|
+
flight. This is in the IMAP local repo because changes to these settings will
|
|
43
|
+
require other code updates to validate the changes. In L2, the only setting used
|
|
44
|
+
is "always_output_mago", which indicates whether we should always output MAGo.
|
|
45
|
+
Note that if this ever is set to False, we will need to update the dependency
|
|
46
|
+
system to set MAGi files as an upstream dependency.
|
|
47
|
+
|
|
48
|
+
Input data can be burst or normal mode, but MUST match the file in offset_dataset.
|
|
49
|
+
TODO: retrieve the file from offset_dataset in cli.py.
|
|
50
|
+
Calibration dataset is the same for all runs.
|
|
51
|
+
|
|
52
|
+
MAGi data is not used unless we indicate it.
|
|
53
|
+
|
|
54
|
+
Parameters
|
|
55
|
+
----------
|
|
56
|
+
calibration_datasets : list[xr.Dataset]
|
|
57
|
+
Calibration ancillary file inputs.
|
|
58
|
+
offsets_dataset : xr.Dataset
|
|
59
|
+
Offset ancillary file input.
|
|
60
|
+
input_data : xr.Dataset
|
|
61
|
+
Input data from MAG L1C or L1B.
|
|
62
|
+
|
|
63
|
+
Returns
|
|
64
|
+
-------
|
|
65
|
+
list[xr.Dataset]
|
|
66
|
+
List of xarray datasets ready to write to CDF file. Expected to be four outputs
|
|
67
|
+
for different frames.
|
|
68
|
+
"""
|
|
69
|
+
# TODO we may need to combine multiple calibration datasets into one timeline.
|
|
70
|
+
|
|
71
|
+
always_output_mago = configuration.ALWAYS_OUTPUT_MAGO
|
|
72
|
+
|
|
73
|
+
# TODO Check that the input file matches the offsets file
|
|
74
|
+
if not np.array_equal(input_data["epoch"].data, offsets_dataset["epoch"].data):
|
|
75
|
+
raise ValueError("Input file and offsets file must have the same timestamps.")
|
|
76
|
+
|
|
77
|
+
calibration_matrix = retrieve_matrix_from_l2_calibration(
|
|
78
|
+
calibration_datasets, always_output_mago
|
|
79
|
+
)
|
|
80
|
+
|
|
81
|
+
vectors = np.apply_along_axis(
|
|
82
|
+
func1d=calibrate_vector,
|
|
83
|
+
axis=1,
|
|
84
|
+
arr=input_data["vectors"].data,
|
|
85
|
+
calibration_matrix=calibration_matrix,
|
|
86
|
+
)
|
|
87
|
+
|
|
88
|
+
basic_test_data = MagL2(
|
|
89
|
+
vectors[:, :3], # level 2 vectors don't include range
|
|
90
|
+
input_data["epoch"].data,
|
|
91
|
+
input_data["vectors"].data[:, 3],
|
|
92
|
+
{},
|
|
93
|
+
np.zeros(len(input_data["epoch"].data)),
|
|
94
|
+
np.zeros(len(input_data["epoch"].data)),
|
|
95
|
+
DataMode.NORM,
|
|
96
|
+
offsets=offsets_dataset["offsets"].data,
|
|
97
|
+
timedelta=offsets_dataset["timedeltas"].data,
|
|
98
|
+
)
|
|
99
|
+
attributes = ImapCdfAttributes()
|
|
100
|
+
attributes.add_instrument_global_attrs("mag")
|
|
101
|
+
# temporarily point to l1c
|
|
102
|
+
attributes.add_instrument_variable_attrs("mag", "l1c")
|
|
103
|
+
return [basic_test_data.generate_dataset(attributes)]
|
|
104
|
+
|
|
105
|
+
|
|
106
|
+
def retrieve_matrix_from_l2_calibration(
|
|
107
|
+
calibration_datasets: list[xr.Dataset], use_mago: bool = True
|
|
108
|
+
) -> xr.DataArray:
|
|
109
|
+
"""
|
|
110
|
+
Get the calibration matrix for the file.
|
|
111
|
+
|
|
112
|
+
Parameters
|
|
113
|
+
----------
|
|
114
|
+
calibration_datasets : list[xr.Dataset]
|
|
115
|
+
Ancillary file inputs for calibration.
|
|
116
|
+
use_mago : bool
|
|
117
|
+
Use the MAGo calibration matrix. Default is True.
|
|
118
|
+
|
|
119
|
+
Returns
|
|
120
|
+
-------
|
|
121
|
+
np.ndarray
|
|
122
|
+
Calibration matrix in the shape (3, 3, 4) to rotate vectors.
|
|
123
|
+
"""
|
|
124
|
+
# TODO: allow for multiple inputs
|
|
125
|
+
if isinstance(calibration_datasets, list):
|
|
126
|
+
calibration_dataset = calibration_datasets[0]
|
|
127
|
+
if len(calibration_datasets) > 1:
|
|
128
|
+
raise NotImplementedError
|
|
129
|
+
else:
|
|
130
|
+
calibration_dataset = calibration_datasets
|
|
131
|
+
|
|
132
|
+
if use_mago:
|
|
133
|
+
calibration_data = calibration_dataset["URFTOORFO"]
|
|
134
|
+
else:
|
|
135
|
+
calibration_data = calibration_dataset["URFTOORFI"]
|
|
136
|
+
|
|
137
|
+
# TODO will need to combine multiple files here
|
|
138
|
+
# TODO: Check validity of the calibration file?
|
|
139
|
+
|
|
140
|
+
return calibration_data
|