imap-processing 0.9.0__py3-none-any.whl → 0.11.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of imap-processing might be problematic. Click here for more details.
- imap_processing/_version.py +2 -2
- imap_processing/cdf/config/imap_codice_l1a_variable_attrs.yaml +749 -442
- imap_processing/cdf/config/imap_glows_global_cdf_attrs.yaml +7 -0
- imap_processing/cdf/config/imap_glows_l1a_variable_attrs.yaml +8 -2
- imap_processing/cdf/config/imap_glows_l1b_variable_attrs.yaml +0 -1
- imap_processing/cdf/config/imap_glows_l2_variable_attrs.yaml +358 -0
- imap_processing/cdf/config/imap_hi_variable_attrs.yaml +59 -25
- imap_processing/cdf/config/imap_hit_global_cdf_attrs.yaml +22 -0
- imap_processing/cdf/config/imap_idex_l1a_variable_attrs.yaml +32 -8
- imap_processing/cdf/config/imap_idex_l1b_variable_attrs.yaml +94 -5
- imap_processing/cdf/config/imap_lo_l1a_variable_attrs.yaml +65 -37
- imap_processing/cdf/config/imap_swapi_variable_attrs.yaml +16 -1
- imap_processing/cdf/config/imap_swe_global_cdf_attrs.yaml +7 -0
- imap_processing/cdf/config/imap_swe_l1a_variable_attrs.yaml +14 -14
- imap_processing/cdf/config/imap_swe_l1b_variable_attrs.yaml +25 -24
- imap_processing/cdf/config/imap_swe_l2_variable_attrs.yaml +238 -0
- imap_processing/cdf/config/imap_ultra_l1b_variable_attrs.yaml +100 -92
- imap_processing/cdf/utils.py +2 -2
- imap_processing/cli.py +45 -9
- imap_processing/codice/codice_l1a.py +104 -58
- imap_processing/codice/constants.py +111 -155
- imap_processing/codice/data/esa_sweep_values.csv +256 -256
- imap_processing/codice/data/lo_stepping_values.csv +128 -128
- imap_processing/ena_maps/ena_maps.py +519 -0
- imap_processing/ena_maps/utils/map_utils.py +145 -0
- imap_processing/ena_maps/utils/spatial_utils.py +226 -0
- imap_processing/glows/__init__.py +3 -0
- imap_processing/glows/ancillary/imap_glows_pipeline_settings_v001.json +52 -0
- imap_processing/glows/l1a/glows_l1a.py +72 -14
- imap_processing/glows/l1b/glows_l1b.py +2 -1
- imap_processing/glows/l1b/glows_l1b_data.py +25 -1
- imap_processing/glows/l2/glows_l2.py +324 -0
- imap_processing/glows/l2/glows_l2_data.py +156 -51
- imap_processing/hi/l1a/science_direct_event.py +57 -51
- imap_processing/hi/l1b/hi_l1b.py +43 -28
- imap_processing/hi/l1c/hi_l1c.py +225 -42
- imap_processing/hi/utils.py +20 -3
- imap_processing/hit/l0/constants.py +2 -2
- imap_processing/hit/l0/decom_hit.py +1 -1
- imap_processing/hit/l1a/hit_l1a.py +94 -13
- imap_processing/hit/l1b/hit_l1b.py +158 -9
- imap_processing/ialirt/l0/process_codicehi.py +156 -0
- imap_processing/ialirt/l0/process_codicelo.py +5 -2
- imap_processing/ialirt/packet_definitions/ialirt.xml +28 -20
- imap_processing/ialirt/packet_definitions/ialirt_codicehi.xml +241 -0
- imap_processing/ialirt/packet_definitions/ialirt_swapi.xml +170 -0
- imap_processing/ialirt/packet_definitions/ialirt_swe.xml +258 -0
- imap_processing/ialirt/process_ephemeris.py +72 -40
- imap_processing/idex/decode.py +241 -0
- imap_processing/idex/idex_l1a.py +143 -81
- imap_processing/idex/idex_l1b.py +244 -10
- imap_processing/lo/l0/lo_science.py +61 -0
- imap_processing/lo/l1a/lo_l1a.py +98 -10
- imap_processing/lo/l1b/lo_l1b.py +2 -2
- imap_processing/lo/l1c/lo_l1c.py +2 -2
- imap_processing/lo/packet_definitions/lo_xtce.xml +1082 -9178
- imap_processing/mag/l0/decom_mag.py +2 -2
- imap_processing/mag/l1a/mag_l1a.py +7 -7
- imap_processing/mag/l1a/mag_l1a_data.py +62 -30
- imap_processing/mag/l1b/mag_l1b.py +11 -6
- imap_processing/quality_flags.py +18 -3
- imap_processing/spice/geometry.py +149 -177
- imap_processing/spice/kernels.py +26 -26
- imap_processing/spice/spin.py +233 -0
- imap_processing/spice/time.py +96 -31
- imap_processing/swapi/l1/swapi_l1.py +60 -31
- imap_processing/swapi/packet_definitions/swapi_packet_definition.xml +363 -384
- imap_processing/swe/l1a/swe_l1a.py +8 -3
- imap_processing/swe/l1a/swe_science.py +24 -24
- imap_processing/swe/l1b/swe_l1b.py +2 -1
- imap_processing/swe/l1b/swe_l1b_science.py +181 -122
- imap_processing/swe/l2/swe_l2.py +337 -70
- imap_processing/swe/utils/swe_utils.py +28 -0
- imap_processing/tests/cdf/test_utils.py +2 -2
- imap_processing/tests/codice/conftest.py +20 -17
- imap_processing/tests/codice/data/validation/imap_codice_l1a_hskp_20241110193622_v0.0.0.cdf +0 -0
- imap_processing/tests/codice/data/validation/imap_codice_l1a_lo-counters-aggregated_20241110193700_v0.0.0.cdf +0 -0
- imap_processing/tests/codice/data/validation/imap_codice_l1a_lo-counters-singles_20241110193700_v0.0.0.cdf +0 -0
- imap_processing/tests/codice/data/validation/imap_codice_l1a_lo-nsw-angular_20241110193700_v0.0.0.cdf +0 -0
- imap_processing/tests/codice/data/validation/imap_codice_l1a_lo-nsw-priority_20241110193700_v0.0.0.cdf +0 -0
- imap_processing/tests/codice/data/validation/imap_codice_l1a_lo-nsw-species_20241110193700_v0.0.0.cdf +0 -0
- imap_processing/tests/codice/data/validation/imap_codice_l1a_lo-sw-angular_20241110193700_v0.0.0.cdf +0 -0
- imap_processing/tests/codice/data/validation/imap_codice_l1a_lo-sw-priority_20241110193700_v0.0.0.cdf +0 -0
- imap_processing/tests/codice/data/validation/imap_codice_l1a_lo-sw-species_20241110193700_v0.0.0.cdf +0 -0
- imap_processing/tests/codice/test_codice_l0.py +55 -121
- imap_processing/tests/codice/test_codice_l1a.py +147 -59
- imap_processing/tests/conftest.py +81 -22
- imap_processing/tests/ena_maps/test_ena_maps.py +309 -0
- imap_processing/tests/ena_maps/test_map_utils.py +286 -0
- imap_processing/tests/ena_maps/test_spatial_utils.py +161 -0
- imap_processing/tests/glows/conftest.py +7 -1
- imap_processing/tests/glows/test_glows_l1a_cdf.py +3 -7
- imap_processing/tests/glows/test_glows_l1a_data.py +34 -6
- imap_processing/tests/glows/test_glows_l1b_data.py +29 -17
- imap_processing/tests/glows/test_glows_l2.py +101 -0
- imap_processing/tests/hi/conftest.py +3 -3
- imap_processing/tests/hi/data/l1/imap_hi_l1b_45sensor-de_20250415_v999.cdf +0 -0
- imap_processing/tests/hi/data/l1/imap_his_pset-calibration-prod-config_20240101_v001.csv +31 -0
- imap_processing/tests/hi/test_hi_l1b.py +14 -9
- imap_processing/tests/hi/test_hi_l1c.py +136 -36
- imap_processing/tests/hi/test_l1a.py +0 -2
- imap_processing/tests/hi/test_science_direct_event.py +18 -14
- imap_processing/tests/hi/test_utils.py +16 -11
- imap_processing/tests/hit/helpers/__init__.py +0 -0
- imap_processing/tests/hit/helpers/l1_validation.py +405 -0
- imap_processing/tests/hit/test_data/sci_sample.ccsds +0 -0
- imap_processing/tests/hit/test_decom_hit.py +8 -10
- imap_processing/tests/hit/test_hit_l1a.py +117 -180
- imap_processing/tests/hit/test_hit_l1b.py +149 -55
- imap_processing/tests/hit/validation_data/hit_l1b_standard_sample2_nsrl_v4_3decimals.csv +62 -0
- imap_processing/tests/hit/validation_data/sci_sample_raw.csv +62 -0
- imap_processing/tests/ialirt/test_data/l0/20240827095047_SWE_IALIRT_packet.bin +0 -0
- imap_processing/tests/ialirt/test_data/l0/BinLog CCSDS_FRAG_TLM_20240826_152323Z_IALIRT_data_for_SDC.bin +0 -0
- imap_processing/tests/ialirt/test_data/l0/eu_SWP_IAL_20240826_152033.csv +644 -0
- imap_processing/tests/ialirt/test_data/l0/hi_fsw_view_1_ccsds.bin +0 -0
- imap_processing/tests/ialirt/test_data/l0/idle_export_eu.SWE_IALIRT_20240827_093852.csv +914 -0
- imap_processing/tests/ialirt/test_data/l0/imap_codice_l1a_hi-ialirt_20240523200000_v0.0.0.cdf +0 -0
- imap_processing/tests/ialirt/unit/test_process_codicehi.py +106 -0
- imap_processing/tests/ialirt/unit/test_process_ephemeris.py +33 -5
- imap_processing/tests/ialirt/unit/test_process_swapi.py +85 -0
- imap_processing/tests/ialirt/unit/test_process_swe.py +106 -0
- imap_processing/tests/idex/conftest.py +29 -1
- imap_processing/tests/idex/test_data/compressed_2023_102_14_24_55.pkts +0 -0
- imap_processing/tests/idex/test_data/non_compressed_2023_102_14_22_26.pkts +0 -0
- imap_processing/tests/idex/test_idex_l0.py +6 -3
- imap_processing/tests/idex/test_idex_l1a.py +151 -1
- imap_processing/tests/idex/test_idex_l1b.py +124 -2
- imap_processing/tests/lo/test_lo_l1a.py +62 -2
- imap_processing/tests/lo/test_lo_science.py +85 -0
- imap_processing/tests/lo/validation_data/Instrument_FM1_T104_R129_20240803_ILO_SPIN_EU.csv +2 -0
- imap_processing/tests/mag/conftest.py +16 -0
- imap_processing/tests/mag/test_mag_decom.py +6 -4
- imap_processing/tests/mag/test_mag_l1a.py +36 -7
- imap_processing/tests/mag/test_mag_l1b.py +55 -4
- imap_processing/tests/mag/test_mag_validation.py +148 -0
- imap_processing/tests/mag/validation/L1a/T001/all_p_ones.txt +19200 -0
- imap_processing/tests/mag/validation/L1a/T001/mag-l0-l1a-t001-in.bin +0 -0
- imap_processing/tests/mag/validation/L1a/T001/mag-l0-l1a-t001-out.csv +17 -0
- imap_processing/tests/mag/validation/L1a/T002/all_n_ones.txt +19200 -0
- imap_processing/tests/mag/validation/L1a/T002/mag-l0-l1a-t002-in.bin +0 -0
- imap_processing/tests/mag/validation/L1a/T002/mag-l0-l1a-t002-out.csv +17 -0
- imap_processing/tests/mag/validation/L1a/T003/field_like.txt +19200 -0
- imap_processing/tests/mag/validation/L1a/T003/mag-l0-l1a-t003-in.bin +0 -0
- imap_processing/tests/mag/validation/L1a/T003/mag-l0-l1a-t003-out.csv +17 -0
- imap_processing/tests/mag/validation/L1a/T004/field_like.txt +19200 -0
- imap_processing/tests/mag/validation/L1a/T004/mag-l0-l1a-t004-in.bin +0 -0
- imap_processing/tests/mag/validation/L1a/T004/mag-l0-l1a-t004-out.csv +17 -0
- imap_processing/tests/mag/validation/L1a/T005/field_like_range_change.txt +19200 -0
- imap_processing/tests/mag/validation/L1a/T005/mag-l0-l1a-t005-in.bin +0 -0
- imap_processing/tests/mag/validation/L1a/T005/mag-l0-l1a-t005-out.csv +17 -0
- imap_processing/tests/mag/validation/L1a/T006/hdr_field.txt +19200 -0
- imap_processing/tests/mag/validation/L1a/T006/mag-l0-l1a-t006-in.bin +0 -0
- imap_processing/tests/mag/validation/L1a/T006/mag-l0-l1a-t006-out.csv +17 -0
- imap_processing/tests/mag/validation/L1a/T007/hdr_field_and_range_change.txt +19200 -0
- imap_processing/tests/mag/validation/L1a/T007/mag-l0-l1a-t007-in.bin +0 -0
- imap_processing/tests/mag/validation/L1a/T007/mag-l0-l1a-t007-out.csv +17 -0
- imap_processing/tests/mag/validation/L1a/T008/field_like_range_change.txt +19200 -0
- imap_processing/tests/mag/validation/L1a/T008/mag-l0-l1a-t008-in.bin +0 -0
- imap_processing/tests/mag/validation/L1a/T008/mag-l0-l1a-t008-out.csv +17 -0
- imap_processing/tests/mag/validation/L1b/T009/data.bin +0 -0
- imap_processing/tests/mag/validation/L1b/T009/field_like_all_ranges.txt +19200 -0
- imap_processing/tests/mag/validation/L1b/T009/mag-l1a-l1b-t009-in.csv +17 -0
- imap_processing/tests/mag/validation/L1b/T009/mag-l1a-l1b-t009-magi-out.csv +17 -0
- imap_processing/tests/mag/validation/L1b/T009/mag-l1a-l1b-t009-mago-out.csv +17 -0
- imap_processing/tests/mag/validation/L1b/T010/data.bin +0 -0
- imap_processing/tests/mag/validation/L1b/T010/field_like_all_ranges.txt +19200 -0
- imap_processing/tests/mag/validation/L1b/T010/mag-l1a-l1b-t010-in.csv +17 -0
- imap_processing/tests/mag/validation/L1b/T010/mag-l1a-l1b-t010-magi-out.csv +17 -0
- imap_processing/tests/mag/validation/L1b/T010/mag-l1a-l1b-t010-mago-out.csv +17 -0
- imap_processing/tests/mag/validation/L1b/T011/data.bin +0 -0
- imap_processing/tests/mag/validation/L1b/T011/field_like_all_ranges.txt +19200 -0
- imap_processing/tests/mag/validation/L1b/T011/mag-l1a-l1b-t011-in.csv +17 -0
- imap_processing/tests/mag/validation/L1b/T011/mag-l1a-l1b-t011-magi-out.csv +17 -0
- imap_processing/tests/mag/validation/L1b/T011/mag-l1a-l1b-t011-mago-out.csv +17 -0
- imap_processing/tests/spice/test_geometry.py +128 -133
- imap_processing/tests/spice/test_kernels.py +37 -37
- imap_processing/tests/spice/test_spin.py +184 -0
- imap_processing/tests/spice/test_time.py +43 -20
- imap_processing/tests/swapi/test_swapi_l1.py +11 -10
- imap_processing/tests/swapi/test_swapi_l2.py +13 -3
- imap_processing/tests/swe/test_swe_l1a.py +1 -1
- imap_processing/tests/swe/test_swe_l1b.py +20 -3
- imap_processing/tests/swe/test_swe_l1b_science.py +54 -35
- imap_processing/tests/swe/test_swe_l2.py +148 -5
- imap_processing/tests/test_cli.py +39 -7
- imap_processing/tests/test_quality_flags.py +19 -19
- imap_processing/tests/test_utils.py +3 -2
- imap_processing/tests/ultra/test_data/l0/ultra45_raw_sc_ultrarawimg_withFSWcalcs_FM45_40P_Phi28p5_BeamCal_LinearScan_phi2850_theta-000_20240207T102740.csv +3314 -3314
- imap_processing/tests/ultra/test_data/mock_data.py +161 -0
- imap_processing/tests/ultra/unit/conftest.py +73 -0
- imap_processing/tests/ultra/unit/test_badtimes.py +58 -0
- imap_processing/tests/ultra/unit/test_cullingmask.py +87 -0
- imap_processing/tests/ultra/unit/test_de.py +61 -60
- imap_processing/tests/ultra/unit/test_ultra_l1a.py +3 -3
- imap_processing/tests/ultra/unit/test_ultra_l1b.py +51 -77
- imap_processing/tests/ultra/unit/test_ultra_l1b_annotated.py +5 -5
- imap_processing/tests/ultra/unit/test_ultra_l1b_culling.py +114 -0
- imap_processing/tests/ultra/unit/test_ultra_l1b_extended.py +86 -26
- imap_processing/tests/ultra/unit/test_ultra_l1c.py +1 -1
- imap_processing/tests/ultra/unit/test_ultra_l1c_pset_bins.py +3 -3
- imap_processing/ultra/constants.py +11 -1
- imap_processing/ultra/l1a/ultra_l1a.py +2 -2
- imap_processing/ultra/l1b/badtimes.py +22 -5
- imap_processing/ultra/l1b/cullingmask.py +31 -5
- imap_processing/ultra/l1b/de.py +32 -37
- imap_processing/ultra/l1b/extendedspin.py +44 -20
- imap_processing/ultra/l1b/ultra_l1b.py +21 -22
- imap_processing/ultra/l1b/ultra_l1b_culling.py +190 -0
- imap_processing/ultra/l1b/ultra_l1b_extended.py +81 -30
- imap_processing/ultra/l1c/histogram.py +6 -2
- imap_processing/ultra/l1c/pset.py +6 -2
- imap_processing/ultra/l1c/ultra_l1c.py +2 -3
- imap_processing/ultra/l1c/ultra_l1c_pset_bins.py +4 -3
- imap_processing/ultra/utils/ultra_l1_utils.py +70 -14
- imap_processing/utils.py +2 -2
- {imap_processing-0.9.0.dist-info → imap_processing-0.11.0.dist-info}/METADATA +7 -2
- {imap_processing-0.9.0.dist-info → imap_processing-0.11.0.dist-info}/RECORD +235 -152
- imap_processing/tests/codice/data/eu_unit_lookup_table.csv +0 -101
- imap_processing/tests/codice/data/idle_export_eu.COD_NHK_20230822_122700 2.csv +0 -100
- imap_processing/tests/codice/data/idle_export_raw.COD_NHK_20230822_122700.csv +0 -100
- imap_processing/tests/codice/data/imap_codice_l0_raw_20241110_v001.pkts +0 -0
- imap_processing/tests/hi/test_data/l1a/imap_hi_l1a_45sensor-de_20250415_v000.cdf +0 -0
- imap_processing/tests/hit/test_data/sci_sample1.ccsds +0 -0
- imap_processing/tests/ultra/unit/test_spatial_utils.py +0 -125
- imap_processing/ultra/utils/spatial_utils.py +0 -221
- /imap_processing/tests/hi/{test_data → data}/l0/20231030_H45_APP_NHK.bin +0 -0
- /imap_processing/tests/hi/{test_data → data}/l0/20231030_H45_APP_NHK.csv +0 -0
- /imap_processing/tests/hi/{test_data → data}/l0/20231030_H45_SCI_CNT.bin +0 -0
- /imap_processing/tests/hi/{test_data → data}/l0/20231030_H45_SCI_DE.bin +0 -0
- /imap_processing/tests/hi/{test_data → data}/l0/H90_NHK_20241104.bin +0 -0
- /imap_processing/tests/hi/{test_data → data}/l0/H90_sci_cnt_20241104.bin +0 -0
- /imap_processing/tests/hi/{test_data → data}/l0/H90_sci_de_20241104.bin +0 -0
- /imap_processing/tests/hi/{test_data → data}/l0/README.txt +0 -0
- /imap_processing/tests/idex/{imap_idex_l0_raw_20231214_v001.pkts → test_data/imap_idex_l0_raw_20231214_v001.pkts} +0 -0
- /imap_processing/tests/idex/{impact_14_tof_high_data.txt → test_data/impact_14_tof_high_data.txt} +0 -0
- /imap_processing/tests/mag/{imap_mag_l1a_norm-magi_20251017_v001.cdf → validation/imap_mag_l1a_norm-magi_20251017_v001.cdf} +0 -0
- /imap_processing/tests/mag/{mag_l0_test_data.pkts → validation/mag_l0_test_data.pkts} +0 -0
- /imap_processing/tests/mag/{mag_l0_test_output.csv → validation/mag_l0_test_output.csv} +0 -0
- /imap_processing/tests/mag/{mag_l1_test_data.pkts → validation/mag_l1_test_data.pkts} +0 -0
- /imap_processing/tests/mag/{mag_l1a_test_output.csv → validation/mag_l1a_test_output.csv} +0 -0
- {imap_processing-0.9.0.dist-info → imap_processing-0.11.0.dist-info}/LICENSE +0 -0
- {imap_processing-0.9.0.dist-info → imap_processing-0.11.0.dist-info}/WHEEL +0 -0
- {imap_processing-0.9.0.dist-info → imap_processing-0.11.0.dist-info}/entry_points.txt +0 -0
imap_processing/hi/utils.py
CHANGED
|
@@ -4,6 +4,7 @@ import re
|
|
|
4
4
|
from collections.abc import Sequence
|
|
5
5
|
from dataclasses import dataclass
|
|
6
6
|
from enum import IntEnum
|
|
7
|
+
from numbers import Number
|
|
7
8
|
from typing import Optional, Union
|
|
8
9
|
|
|
9
10
|
import numpy as np
|
|
@@ -99,6 +100,7 @@ def full_dataarray(
|
|
|
99
100
|
attrs: dict,
|
|
100
101
|
coords: Optional[dict[str, xr.DataArray]] = None,
|
|
101
102
|
shape: Optional[Union[int, Sequence[int]]] = None,
|
|
103
|
+
fill_value: Optional[Number] = None,
|
|
102
104
|
) -> xr.DataArray:
|
|
103
105
|
"""
|
|
104
106
|
Generate an empty xarray.DataArray with appropriate attributes.
|
|
@@ -117,6 +119,9 @@ def full_dataarray(
|
|
|
117
119
|
Coordinate variables for the Dataset.
|
|
118
120
|
shape : int or tuple
|
|
119
121
|
Shape of ndarray data array to instantiate in the xarray.DataArray.
|
|
122
|
+
fill_value : optional, float
|
|
123
|
+
Override the fill value that the DataArray will be filled with. If not
|
|
124
|
+
supplied, the "FILLVAL" value from `attrs` will be used.
|
|
120
125
|
|
|
121
126
|
Returns
|
|
122
127
|
-------
|
|
@@ -133,9 +138,11 @@ def full_dataarray(
|
|
|
133
138
|
shape = [coords[k].data.size for k in dims] # type: ignore
|
|
134
139
|
if hasattr(shape, "__len__") and len(shape) > len(dims):
|
|
135
140
|
dims.append("")
|
|
141
|
+
if fill_value is None:
|
|
142
|
+
fill_value = _attrs["FILLVAL"]
|
|
136
143
|
|
|
137
144
|
data_array = xr.DataArray(
|
|
138
|
-
np.full(shape,
|
|
145
|
+
np.full(shape, fill_value, dtype=dtype),
|
|
139
146
|
name=name,
|
|
140
147
|
dims=dims,
|
|
141
148
|
attrs=_attrs,
|
|
@@ -146,6 +153,8 @@ def full_dataarray(
|
|
|
146
153
|
def create_dataset_variables(
|
|
147
154
|
variable_names: list[str],
|
|
148
155
|
variable_shape: Union[int, Sequence[int]],
|
|
156
|
+
coords: Optional[dict[str, xr.DataArray]] = None,
|
|
157
|
+
fill_value: Optional[Number] = None,
|
|
149
158
|
att_manager_lookup_str: str = "{0}",
|
|
150
159
|
) -> dict[str, xr.DataArray]:
|
|
151
160
|
"""
|
|
@@ -157,8 +166,14 @@ def create_dataset_variables(
|
|
|
157
166
|
----------
|
|
158
167
|
variable_names : list[str]
|
|
159
168
|
List of variable names to create.
|
|
160
|
-
variable_shape :
|
|
169
|
+
variable_shape : int or sequence of int
|
|
161
170
|
Shape of the new variables data ndarray.
|
|
171
|
+
coords : dict
|
|
172
|
+
Coordinate variables for the Dataset.
|
|
173
|
+
fill_value : optional, number
|
|
174
|
+
Value to fill the new variables data arrays with. If not supplied,
|
|
175
|
+
the fill value is pulled from the CDF variable attributes "FILLVAL"
|
|
176
|
+
attribute.
|
|
162
177
|
att_manager_lookup_str : str
|
|
163
178
|
String defining how to build the string passed to the
|
|
164
179
|
CdfAttributeManager in order to retrieve the CdfAttributes for each
|
|
@@ -181,5 +196,7 @@ def create_dataset_variables(
|
|
|
181
196
|
attrs = attr_mgr.get_variable_attributes(
|
|
182
197
|
att_manager_lookup_str.format(var), check_schema=False
|
|
183
198
|
)
|
|
184
|
-
new_variables[var] = full_dataarray(
|
|
199
|
+
new_variables[var] = full_dataarray(
|
|
200
|
+
var, attrs, shape=variable_shape, coords=coords, fill_value=fill_value
|
|
201
|
+
)
|
|
185
202
|
return new_variables
|
|
@@ -47,7 +47,7 @@ COUNTS_DATA_STRUCTURE = {
|
|
|
47
47
|
"spare": HITPacking(24, 24, (1,)),
|
|
48
48
|
# ------------------------------------------
|
|
49
49
|
# erates - contains livetime counters
|
|
50
|
-
"
|
|
50
|
+
"livetime_counter": HITPacking(16, 16, (1,)), # livetime counter
|
|
51
51
|
"num_trig": HITPacking(16, 16, (1,)), # number of triggers
|
|
52
52
|
"num_reject": HITPacking(16, 16, (1,)), # number of rejected events
|
|
53
53
|
"num_acc_w_pha": HITPacking(
|
|
@@ -92,7 +92,7 @@ COUNTS_DATA_STRUCTURE = {
|
|
|
92
92
|
# -------------------------------------------
|
|
93
93
|
# other count rates
|
|
94
94
|
"coinrates": HITPacking(16, 416, (26,)), # coincidence rates
|
|
95
|
-
"
|
|
95
|
+
"pbufrates": HITPacking(16, 512, (32,)), # priority buffer rates
|
|
96
96
|
"l2fgrates": HITPacking(16, 2112, (132,)), # range 2 foreground rates
|
|
97
97
|
"l2bgrates": HITPacking(16, 192, (12,)), # range 2 background rates
|
|
98
98
|
"l3fgrates": HITPacking(16, 2672, (167,)), # range 3 foreground rates
|
|
@@ -129,7 +129,7 @@ def is_sequential(counters: np.ndarray) -> np.bool_:
|
|
|
129
129
|
bool
|
|
130
130
|
True if the sequence counters are sequential, False otherwise.
|
|
131
131
|
"""
|
|
132
|
-
return np.all(np.diff(counters) == 1)
|
|
132
|
+
return np.all((np.diff(counters) % 16384) == 1)
|
|
133
133
|
|
|
134
134
|
|
|
135
135
|
def get_valid_starting_indices(flags: np.ndarray, counters: np.ndarray) -> np.ndarray:
|
|
@@ -19,6 +19,9 @@ logger = logging.getLogger(__name__)
|
|
|
19
19
|
|
|
20
20
|
# TODO review logging levels to use (debug vs. info)
|
|
21
21
|
|
|
22
|
+
# Fill value for missing data
|
|
23
|
+
fillval = -9223372036854775808
|
|
24
|
+
|
|
22
25
|
|
|
23
26
|
def hit_l1a(packet_file: str, data_version: str) -> list[xr.Dataset]:
|
|
24
27
|
"""
|
|
@@ -91,14 +94,6 @@ def subcom_sectorates(sci_dataset: xr.Dataset) -> None:
|
|
|
91
94
|
sci_dataset : xarray.Dataset
|
|
92
95
|
Xarray dataset containing parsed HIT science data.
|
|
93
96
|
"""
|
|
94
|
-
# TODO:
|
|
95
|
-
# - Update to use fill values defined in attribute manager which
|
|
96
|
-
# isn't defined for L1A science data yet
|
|
97
|
-
# - fix issues with fe_counts_sectored. The array has shape
|
|
98
|
-
# (epoch: 28, fe_energy_index: 1, declination: 8, azimuth: 15),
|
|
99
|
-
# but cdflib drops second dimension of size 1 and recognizes
|
|
100
|
-
# only 3 total dimensions. Are dimensions of 1 ignored?
|
|
101
|
-
|
|
102
97
|
# Calculate mod 10 values
|
|
103
98
|
hdr_min_count_mod_10 = sci_dataset.hdr_minute_cnt.values % 10
|
|
104
99
|
|
|
@@ -107,7 +102,10 @@ def subcom_sectorates(sci_dataset: xr.Dataset) -> None:
|
|
|
107
102
|
num_frames = len(hdr_min_count_mod_10)
|
|
108
103
|
# TODO: add more specific dtype for rates (ex. int16) once this is defined by HIT
|
|
109
104
|
data_by_species_and_energy_range = {
|
|
110
|
-
key: {
|
|
105
|
+
key: {
|
|
106
|
+
**value,
|
|
107
|
+
"rates": np.full((num_frames, 8, 15), fill_value=fillval, dtype=int),
|
|
108
|
+
}
|
|
111
109
|
for key, value in MOD_10_MAPPING.items()
|
|
112
110
|
}
|
|
113
111
|
|
|
@@ -162,6 +160,85 @@ def subcom_sectorates(sci_dataset: xr.Dataset) -> None:
|
|
|
162
160
|
)
|
|
163
161
|
|
|
164
162
|
|
|
163
|
+
def calculate_uncertainties(dataset: xr.Dataset) -> xr.Dataset:
|
|
164
|
+
"""
|
|
165
|
+
Calculate uncertainties for each counts data variable in the dataset.
|
|
166
|
+
|
|
167
|
+
Calculate the upper and lower uncertainties. The uncertainty for
|
|
168
|
+
the raw Lev1A HIT data will be calculated as asymmetric Poisson
|
|
169
|
+
uncertainty as prescribed in Gehrels 1986 (DOI: 10.1086/164079).
|
|
170
|
+
See section 5.5 in the algorithm document for details.
|
|
171
|
+
|
|
172
|
+
The upper uncertainty will be calculated as
|
|
173
|
+
DELTA_PLUS = sqrt(counts + 1) + 1
|
|
174
|
+
|
|
175
|
+
The lower uncertainty will be calculated as
|
|
176
|
+
DELTA_MINUS = sqrt(counts)
|
|
177
|
+
|
|
178
|
+
Parameters
|
|
179
|
+
----------
|
|
180
|
+
dataset : xarray.Dataset
|
|
181
|
+
The dataset containing counts data.
|
|
182
|
+
|
|
183
|
+
Returns
|
|
184
|
+
-------
|
|
185
|
+
dataset : xarray.Dataset
|
|
186
|
+
The dataset with added uncertainties for each counts data variable.
|
|
187
|
+
"""
|
|
188
|
+
# Variables that aren't counts data and should be skipped in the calculation
|
|
189
|
+
ignore_vars = [
|
|
190
|
+
"version",
|
|
191
|
+
"type",
|
|
192
|
+
"sec_hdr_flg",
|
|
193
|
+
"pkt_apid",
|
|
194
|
+
"seq_flgs",
|
|
195
|
+
"src_seq_ctr",
|
|
196
|
+
"pkt_len",
|
|
197
|
+
"hdr_unit_num",
|
|
198
|
+
"hdr_frame_version",
|
|
199
|
+
"hdr_dynamic_threshold_state",
|
|
200
|
+
"hdr_leak_conv",
|
|
201
|
+
"hdr_heater_duty_cycle",
|
|
202
|
+
"hdr_code_ok",
|
|
203
|
+
"hdr_minute_cnt",
|
|
204
|
+
"livetime_counter",
|
|
205
|
+
"h_energy_min",
|
|
206
|
+
"h_energy_max",
|
|
207
|
+
"he4_energy_min",
|
|
208
|
+
"he4_energy_max",
|
|
209
|
+
"cno_energy_min",
|
|
210
|
+
"cno_energy_max",
|
|
211
|
+
"nemgsi_energy_min",
|
|
212
|
+
"nemgsi_energy_max",
|
|
213
|
+
"fe_energy_min",
|
|
214
|
+
"fe_energy_max",
|
|
215
|
+
]
|
|
216
|
+
|
|
217
|
+
# Counts data that need uncertainties calculated
|
|
218
|
+
count_vars = set(dataset.data_vars) - set(ignore_vars)
|
|
219
|
+
|
|
220
|
+
# Calculate uncertainties for counts data variables.
|
|
221
|
+
# Arrays with fill values (i.e. missing data) are skipped in this calculation
|
|
222
|
+
# but are kept in the new data arrays to retain shape and dimensions.
|
|
223
|
+
for var in count_vars:
|
|
224
|
+
mask = dataset[var] != fillval # Mask for valid values
|
|
225
|
+
# Ensure that the values are positive before taking the square root
|
|
226
|
+
safe_values_plus = np.maximum(dataset[var] + 1, 0).astype(np.float32)
|
|
227
|
+
safe_values_minus = np.maximum(dataset[var], 0).astype(np.float32)
|
|
228
|
+
|
|
229
|
+
dataset[f"{var}_delta_plus"] = xr.DataArray(
|
|
230
|
+
np.where(
|
|
231
|
+
mask, np.sqrt(safe_values_plus) + 1, dataset[var].astype(np.float32)
|
|
232
|
+
),
|
|
233
|
+
dims=dataset[var].dims,
|
|
234
|
+
)
|
|
235
|
+
dataset[f"{var}_delta_minus"] = xr.DataArray(
|
|
236
|
+
np.where(mask, np.sqrt(safe_values_minus), dataset[var].astype(np.float32)),
|
|
237
|
+
dims=dataset[var].dims,
|
|
238
|
+
)
|
|
239
|
+
return dataset
|
|
240
|
+
|
|
241
|
+
|
|
165
242
|
def process_science(
|
|
166
243
|
dataset: xr.Dataset, attr_mgr: ImapCdfAttributes
|
|
167
244
|
) -> list[xr.Dataset]:
|
|
@@ -201,6 +278,9 @@ def process_science(
|
|
|
201
278
|
)
|
|
202
279
|
count_rates_dataset = sci_dataset.drop_vars("pha_raw")
|
|
203
280
|
|
|
281
|
+
# Calculate uncertainties for count rates
|
|
282
|
+
count_rates_dataset = calculate_uncertainties(count_rates_dataset)
|
|
283
|
+
|
|
204
284
|
# Logical sources for the two products.
|
|
205
285
|
logical_sources = ["imap_hit_l1a_count-rates", "imap_hit_l1a_pulse-height-events"]
|
|
206
286
|
|
|
@@ -228,10 +308,11 @@ def process_science(
|
|
|
228
308
|
print(f"Field {field} not found in attribute manager.")
|
|
229
309
|
logger.warning(f"Field {field} not found in attribute manager.")
|
|
230
310
|
|
|
231
|
-
|
|
232
|
-
#
|
|
233
|
-
|
|
234
|
-
|
|
311
|
+
# Skip schema check for epoch to prevent attr_mgr from adding the
|
|
312
|
+
# DEPEND_0 attribute which isn't required for epoch
|
|
313
|
+
dataset.epoch.attrs = attr_mgr.get_variable_attributes(
|
|
314
|
+
"epoch", check_schema=False
|
|
315
|
+
)
|
|
235
316
|
|
|
236
317
|
datasets.append(dataset)
|
|
237
318
|
|
|
@@ -4,6 +4,7 @@ import logging
|
|
|
4
4
|
|
|
5
5
|
import xarray as xr
|
|
6
6
|
|
|
7
|
+
from imap_processing.cdf.imap_cdf_manager import ImapCdfAttributes
|
|
7
8
|
from imap_processing.hit.hit_utils import (
|
|
8
9
|
HitAPID,
|
|
9
10
|
get_attribute_manager,
|
|
@@ -26,7 +27,7 @@ def hit_l1b(dependencies: dict, data_version: str) -> list[xr.Dataset]:
|
|
|
26
27
|
----------
|
|
27
28
|
dependencies : dict
|
|
28
29
|
Dictionary of dependencies that are L1A xarray datasets
|
|
29
|
-
for science data and a file path string to
|
|
30
|
+
for science data and a file path string to an L0 file
|
|
30
31
|
for housekeeping data.
|
|
31
32
|
data_version : str
|
|
32
33
|
Version of the data product being created.
|
|
@@ -34,26 +35,174 @@ def hit_l1b(dependencies: dict, data_version: str) -> list[xr.Dataset]:
|
|
|
34
35
|
Returns
|
|
35
36
|
-------
|
|
36
37
|
processed_data : list[xarray.Dataset]
|
|
37
|
-
List of L1B datasets.
|
|
38
|
+
List of four L1B datasets.
|
|
38
39
|
"""
|
|
39
40
|
# Create the attribute manager for this data level
|
|
40
41
|
attr_mgr = get_attribute_manager(data_version, "l1b")
|
|
41
42
|
|
|
42
43
|
# Create L1B datasets
|
|
43
|
-
|
|
44
|
+
l1b_datasets: list = []
|
|
44
45
|
if "imap_hit_l0_raw" in dependencies:
|
|
45
46
|
# Unpack ccsds file to xarray datasets
|
|
46
47
|
packet_file = dependencies["imap_hit_l0_raw"]
|
|
47
48
|
datasets_by_apid = get_datasets_by_apid(packet_file, derived=True)
|
|
48
|
-
# Process housekeeping to
|
|
49
|
-
|
|
49
|
+
# Process housekeeping to L1B.
|
|
50
|
+
l1b_datasets.append(
|
|
50
51
|
process_housekeeping_data(
|
|
51
52
|
datasets_by_apid[HitAPID.HIT_HSKP], attr_mgr, "imap_hit_l1b_hk"
|
|
52
53
|
)
|
|
53
54
|
)
|
|
54
55
|
logger.info("HIT L1B housekeeping dataset created")
|
|
55
|
-
if "
|
|
56
|
-
#
|
|
57
|
-
|
|
56
|
+
if "imap_hit_l1a_count-rates" in dependencies:
|
|
57
|
+
# Process science data to L1B datasets
|
|
58
|
+
l1a_counts_dataset = dependencies["imap_hit_l1a_count-rates"]
|
|
59
|
+
l1b_datasets.extend(process_science_data(l1a_counts_dataset, attr_mgr))
|
|
60
|
+
logger.info("HIT L1B science datasets created")
|
|
58
61
|
|
|
59
|
-
return
|
|
62
|
+
return l1b_datasets
|
|
63
|
+
|
|
64
|
+
|
|
65
|
+
def process_science_data(
|
|
66
|
+
raw_counts_dataset: xr.Dataset, attr_mgr: ImapCdfAttributes
|
|
67
|
+
) -> list[xr.Dataset]:
|
|
68
|
+
"""
|
|
69
|
+
Will create L1B science datasets for CDF products.
|
|
70
|
+
|
|
71
|
+
Process L1A raw counts data to create L1B science data for
|
|
72
|
+
CDF creation. This function will create three L1B science
|
|
73
|
+
datasets: standard rates, summed rates, and sectored rates.
|
|
74
|
+
It will also update dataset attributes, coordinates and
|
|
75
|
+
data variable dimensions according to specifications in
|
|
76
|
+
a CDF yaml file.
|
|
77
|
+
|
|
78
|
+
Parameters
|
|
79
|
+
----------
|
|
80
|
+
raw_counts_dataset : xr.Dataset
|
|
81
|
+
The L1A counts dataset.
|
|
82
|
+
attr_mgr : AttributeManager
|
|
83
|
+
The attribute manager for the L1B data level.
|
|
84
|
+
|
|
85
|
+
Returns
|
|
86
|
+
-------
|
|
87
|
+
dataset : list
|
|
88
|
+
The processed L1B science datasets as xarray datasets.
|
|
89
|
+
"""
|
|
90
|
+
logger.info("Creating HIT L1B science datasets")
|
|
91
|
+
|
|
92
|
+
# Logical sources for the three L1B science products.
|
|
93
|
+
# TODO: add logical sources for other l1b products once processing functions
|
|
94
|
+
# are written. "imap_hit_l1b_summed-rates", "imap_hit_l1b_sectored-rates"
|
|
95
|
+
logical_sources = ["imap_hit_l1b_standard-rates"]
|
|
96
|
+
|
|
97
|
+
# TODO: Write functions to create the following datasets
|
|
98
|
+
# Process summed rates dataset
|
|
99
|
+
# Process sectored rates dataset
|
|
100
|
+
|
|
101
|
+
# Create a standard rates dataset
|
|
102
|
+
standard_rates_dataset = process_standard_rates_data(raw_counts_dataset)
|
|
103
|
+
|
|
104
|
+
l1b_science_datasets = []
|
|
105
|
+
# Update attributes and dimensions
|
|
106
|
+
for dataset, logical_source in zip([standard_rates_dataset], logical_sources):
|
|
107
|
+
dataset.attrs = attr_mgr.get_global_attributes(logical_source)
|
|
108
|
+
|
|
109
|
+
# TODO: Add CDF attributes to yaml once they're defined for L1B science data
|
|
110
|
+
# Assign attributes and dimensions to each data array in the Dataset
|
|
111
|
+
for field in dataset.data_vars.keys():
|
|
112
|
+
try:
|
|
113
|
+
# Create a dict of dimensions using the DEPEND_I keys in the
|
|
114
|
+
# attributes
|
|
115
|
+
dims = {
|
|
116
|
+
key: value
|
|
117
|
+
for key, value in attr_mgr.get_variable_attributes(field).items()
|
|
118
|
+
if "DEPEND" in key
|
|
119
|
+
}
|
|
120
|
+
dataset[field].attrs = attr_mgr.get_variable_attributes(field)
|
|
121
|
+
dataset[field].assign_coords(dims)
|
|
122
|
+
except KeyError:
|
|
123
|
+
print(f"Field {field} not found in attribute manager.")
|
|
124
|
+
logger.warning(f"Field {field} not found in attribute manager.")
|
|
125
|
+
|
|
126
|
+
# Skip schema check for epoch to prevent attr_mgr from adding the
|
|
127
|
+
# DEPEND_0 attribute which isn't required for epoch
|
|
128
|
+
dataset.epoch.attrs = attr_mgr.get_variable_attributes(
|
|
129
|
+
"epoch", check_schema=False
|
|
130
|
+
)
|
|
131
|
+
|
|
132
|
+
l1b_science_datasets.append(dataset)
|
|
133
|
+
|
|
134
|
+
logger.info(f"HIT L1B dataset created for {logical_source}")
|
|
135
|
+
|
|
136
|
+
return l1b_science_datasets
|
|
137
|
+
|
|
138
|
+
|
|
139
|
+
def process_standard_rates_data(raw_counts_dataset: xr.Dataset) -> xr.Dataset:
|
|
140
|
+
"""
|
|
141
|
+
Will process L1B standard rates data from raw L1A counts data.
|
|
142
|
+
|
|
143
|
+
Parameters
|
|
144
|
+
----------
|
|
145
|
+
raw_counts_dataset : xr.Dataset
|
|
146
|
+
The L1A counts dataset.
|
|
147
|
+
|
|
148
|
+
Returns
|
|
149
|
+
-------
|
|
150
|
+
xr.Dataset
|
|
151
|
+
The processed L1B standard rates dataset.
|
|
152
|
+
"""
|
|
153
|
+
# Create a new dataset to store the L1B standard rates
|
|
154
|
+
l1b_standard_rates_dataset = xr.Dataset()
|
|
155
|
+
|
|
156
|
+
# Add required coordinates from the raw_counts_dataset
|
|
157
|
+
coords = [
|
|
158
|
+
"epoch",
|
|
159
|
+
"gain",
|
|
160
|
+
"sngrates_index",
|
|
161
|
+
"coinrates_index",
|
|
162
|
+
"pbufrates_index",
|
|
163
|
+
"l2fgrates_index",
|
|
164
|
+
"l2bgrates_index",
|
|
165
|
+
"l3fgrates_index",
|
|
166
|
+
"l3bgrates_index",
|
|
167
|
+
"penfgrates_index",
|
|
168
|
+
"penbgrates_index",
|
|
169
|
+
"ialirtrates_index",
|
|
170
|
+
]
|
|
171
|
+
l1b_standard_rates_dataset = l1b_standard_rates_dataset.assign_coords(
|
|
172
|
+
{coord: raw_counts_dataset.coords[coord] for coord in coords}
|
|
173
|
+
)
|
|
174
|
+
|
|
175
|
+
# Add dynamic threshold field
|
|
176
|
+
l1b_standard_rates_dataset["dynamic_threshold_state"] = raw_counts_dataset[
|
|
177
|
+
"hdr_dynamic_threshold_state"
|
|
178
|
+
]
|
|
179
|
+
|
|
180
|
+
# Define fields from the raw_counts_dataset to calculate standard rates from
|
|
181
|
+
standard_rate_fields = [
|
|
182
|
+
"sngrates",
|
|
183
|
+
"coinrates",
|
|
184
|
+
"pbufrates",
|
|
185
|
+
"l2fgrates",
|
|
186
|
+
"l2bgrates",
|
|
187
|
+
"l3fgrates",
|
|
188
|
+
"l3bgrates",
|
|
189
|
+
"penfgrates",
|
|
190
|
+
"penbgrates",
|
|
191
|
+
"ialirtrates",
|
|
192
|
+
"l4fgrates",
|
|
193
|
+
"l4bgrates",
|
|
194
|
+
]
|
|
195
|
+
|
|
196
|
+
# Calculate livetime from the livetime counter
|
|
197
|
+
livetime = raw_counts_dataset["livetime_counter"] / 270
|
|
198
|
+
|
|
199
|
+
# Calculate standard rates by dividing the raw counts by livetime for
|
|
200
|
+
# data variables with names that contain a substring from a defined
|
|
201
|
+
# list of field names.
|
|
202
|
+
for var in raw_counts_dataset.data_vars:
|
|
203
|
+
if var != "livetime_counter" and any(
|
|
204
|
+
base_var in var for base_var in standard_rate_fields
|
|
205
|
+
):
|
|
206
|
+
l1b_standard_rates_dataset[var] = raw_counts_dataset[var] / livetime
|
|
207
|
+
|
|
208
|
+
return l1b_standard_rates_dataset
|
|
@@ -0,0 +1,156 @@
|
|
|
1
|
+
"""Functions to support I-ALiRT CoDICE Hi processing."""
|
|
2
|
+
|
|
3
|
+
import logging
|
|
4
|
+
from typing import Any
|
|
5
|
+
|
|
6
|
+
import numpy as np
|
|
7
|
+
import xarray as xr
|
|
8
|
+
|
|
9
|
+
logger = logging.getLogger(__name__)
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
def find_groups(data: xr.Dataset) -> xr.Dataset:
|
|
13
|
+
"""
|
|
14
|
+
Find all occurrences of the sequential set of 234 values 0-233.
|
|
15
|
+
|
|
16
|
+
If a value is missing, or we are starting/ending
|
|
17
|
+
in the middle of a sequence we do not count that as a valid group.
|
|
18
|
+
|
|
19
|
+
Parameters
|
|
20
|
+
----------
|
|
21
|
+
data : xr.Dataset
|
|
22
|
+
CoDICE Hi Dataset.
|
|
23
|
+
|
|
24
|
+
Returns
|
|
25
|
+
-------
|
|
26
|
+
grouped_data : xr.Dataset
|
|
27
|
+
Grouped data.
|
|
28
|
+
"""
|
|
29
|
+
subcom_range = (0, 233)
|
|
30
|
+
|
|
31
|
+
data = data.sortby("cod_hi_acq", ascending=True)
|
|
32
|
+
|
|
33
|
+
# Use cod_hi_counter == 0 to define the beginning of the group.
|
|
34
|
+
# Find cod_hi_acq at this index and use it as the beginning time for the group.
|
|
35
|
+
start_sc_ticks = data["cod_hi_acq"][(data["cod_hi_counter"] == subcom_range[0])]
|
|
36
|
+
start_sc_tick = start_sc_ticks.min()
|
|
37
|
+
# Use cod_hi_counter == 233 to define the end of the group.
|
|
38
|
+
last_sc_ticks = data["cod_hi_acq"][
|
|
39
|
+
([data["cod_hi_counter"] == subcom_range[-1]][-1])
|
|
40
|
+
]
|
|
41
|
+
last_sc_tick = last_sc_ticks.max()
|
|
42
|
+
|
|
43
|
+
# Filter out data before the first cod_hi_counter=0 and
|
|
44
|
+
# after the last cod_hi_counter=233 and cod_hi_counter values != 0-233.
|
|
45
|
+
grouped_data = data.where(
|
|
46
|
+
(data["cod_hi_acq"] >= start_sc_tick)
|
|
47
|
+
& (data["cod_hi_acq"] <= last_sc_tick)
|
|
48
|
+
& (data["cod_hi_counter"] >= subcom_range[0])
|
|
49
|
+
& (data["cod_hi_counter"] <= subcom_range[-1]),
|
|
50
|
+
drop=True,
|
|
51
|
+
)
|
|
52
|
+
|
|
53
|
+
# Assign labels based on the cod_hi_acq times.
|
|
54
|
+
group_labels = np.searchsorted(
|
|
55
|
+
start_sc_ticks, grouped_data["cod_hi_acq"], side="right"
|
|
56
|
+
)
|
|
57
|
+
# Example:
|
|
58
|
+
# grouped_data.coords
|
|
59
|
+
# Coordinates:
|
|
60
|
+
# * epoch (epoch) int64 7kB 315922822184000000 ... 315923721184000000
|
|
61
|
+
# * group (group) int64 7kB 1 1 1 1 1 1 1 1 1 ... 15 15 15 15 15 15 15 15 15
|
|
62
|
+
grouped_data["group"] = ("group", group_labels)
|
|
63
|
+
|
|
64
|
+
return grouped_data
|
|
65
|
+
|
|
66
|
+
|
|
67
|
+
def append_cod_hi_data(dataset: xr.Dataset) -> xr.Dataset:
|
|
68
|
+
"""
|
|
69
|
+
Append the cod_hi_## data values and create an xarray.
|
|
70
|
+
|
|
71
|
+
Parameters
|
|
72
|
+
----------
|
|
73
|
+
dataset : xr.Dataset
|
|
74
|
+
Original dataset of group.
|
|
75
|
+
|
|
76
|
+
Returns
|
|
77
|
+
-------
|
|
78
|
+
appended_dataset : xr.Dataset
|
|
79
|
+
Dataset with cod_hi_## stacked.
|
|
80
|
+
"""
|
|
81
|
+
# Number of codice hi data rows
|
|
82
|
+
num_cod_hi_rows = 5
|
|
83
|
+
cod_hi_data = np.stack(
|
|
84
|
+
[dataset[f"cod_hi_data_{i:02}"].values for i in range(num_cod_hi_rows)], axis=1
|
|
85
|
+
)
|
|
86
|
+
|
|
87
|
+
repeated_data = {
|
|
88
|
+
var: np.repeat(dataset[var].values, num_cod_hi_rows)
|
|
89
|
+
for var in dataset.data_vars
|
|
90
|
+
if not var.startswith("cod_hi_data_")
|
|
91
|
+
}
|
|
92
|
+
|
|
93
|
+
repeated_data["cod_hi_appended"] = cod_hi_data.flatten()
|
|
94
|
+
repeated_epoch = np.repeat(dataset["epoch"].values, num_cod_hi_rows)
|
|
95
|
+
|
|
96
|
+
appended_dataset = xr.Dataset(
|
|
97
|
+
data_vars={name: ("epoch", values) for name, values in repeated_data.items()},
|
|
98
|
+
coords={"epoch": repeated_epoch},
|
|
99
|
+
)
|
|
100
|
+
|
|
101
|
+
return appended_dataset
|
|
102
|
+
|
|
103
|
+
|
|
104
|
+
def process_codicehi(xarray_data: xr.Dataset) -> list[dict]:
|
|
105
|
+
"""
|
|
106
|
+
Create final data products.
|
|
107
|
+
|
|
108
|
+
Parameters
|
|
109
|
+
----------
|
|
110
|
+
xarray_data : xr.Dataset
|
|
111
|
+
Parsed data.
|
|
112
|
+
|
|
113
|
+
Returns
|
|
114
|
+
-------
|
|
115
|
+
codicehi_data : list[dict]
|
|
116
|
+
Dictionary of final data product.
|
|
117
|
+
|
|
118
|
+
Notes
|
|
119
|
+
-----
|
|
120
|
+
This function is incomplete and will need to be updated to include the
|
|
121
|
+
necessary calculations and data products.
|
|
122
|
+
- Calculate species counts (pg 27 of Algorithm Document)
|
|
123
|
+
- Calculate rates (assume 4 minutes per group)
|
|
124
|
+
- Calculate L2 CoDICE pseudodensities (pg 37 of Algorithm Document)
|
|
125
|
+
- Calculate the public data products
|
|
126
|
+
"""
|
|
127
|
+
grouped_data = find_groups(xarray_data)
|
|
128
|
+
unique_groups = np.unique(grouped_data["group"])
|
|
129
|
+
codicehi_data: list[dict[str, Any]] = [{}]
|
|
130
|
+
|
|
131
|
+
for group in unique_groups:
|
|
132
|
+
# cod_hi_counter values for the group should be 0-233 with no duplicates.
|
|
133
|
+
subcom_values = grouped_data["cod_hi_counter"][
|
|
134
|
+
(grouped_data["group"] == group).values
|
|
135
|
+
]
|
|
136
|
+
|
|
137
|
+
# Ensure no duplicates and all values from 0 to 233 are present
|
|
138
|
+
if not np.array_equal(subcom_values, np.arange(234)):
|
|
139
|
+
logger.warning(
|
|
140
|
+
f"Group {group} does not contain all values from 0 to "
|
|
141
|
+
f"233 without duplicates."
|
|
142
|
+
)
|
|
143
|
+
continue
|
|
144
|
+
|
|
145
|
+
mask = grouped_data["group"] == group
|
|
146
|
+
filtered_indices = np.where(mask)[0]
|
|
147
|
+
group_data = grouped_data.isel(epoch=filtered_indices)
|
|
148
|
+
|
|
149
|
+
append_cod_hi_data(group_data)
|
|
150
|
+
|
|
151
|
+
# TODO: calculate species counts
|
|
152
|
+
# TODO: calculate rates
|
|
153
|
+
# TODO: calculate L2 CoDICE pseudodensities
|
|
154
|
+
# TODO: calculate the public data products
|
|
155
|
+
|
|
156
|
+
return codicehi_data
|
|
@@ -41,9 +41,12 @@ def find_groups(data: xr.Dataset) -> xr.Dataset:
|
|
|
41
41
|
last_sc_tick = last_sc_ticks.max()
|
|
42
42
|
|
|
43
43
|
# Filter out data before the first cod_lo_counter=0 and
|
|
44
|
-
# after the last cod_lo_counter=232.
|
|
44
|
+
# after the last cod_lo_counter=232 and cod_lo_counter values != 0-232.
|
|
45
45
|
grouped_data = data.where(
|
|
46
|
-
(data["cod_lo_acq"] >= start_sc_tick)
|
|
46
|
+
(data["cod_lo_acq"] >= start_sc_tick)
|
|
47
|
+
& (data["cod_lo_acq"] <= last_sc_tick)
|
|
48
|
+
& (data["cod_lo_counter"] >= subcom_range[0])
|
|
49
|
+
& (data["cod_lo_counter"] <= subcom_range[-1]),
|
|
47
50
|
drop=True,
|
|
48
51
|
)
|
|
49
52
|
|