imap-processing 1.0.0__py3-none-any.whl → 1.0.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- imap_processing/_version.py +2 -2
- imap_processing/cdf/config/imap_codice_global_cdf_attrs.yaml +13 -1
- imap_processing/cdf/config/imap_codice_l1a_variable_attrs.yaml +97 -254
- imap_processing/cdf/config/imap_codice_l2-hi-omni_variable_attrs.yaml +635 -0
- imap_processing/cdf/config/imap_codice_l2-hi-sectored_variable_attrs.yaml +422 -0
- imap_processing/cdf/config/imap_enamaps_l2-common_variable_attrs.yaml +29 -22
- imap_processing/cdf/config/imap_enamaps_l2-healpix_variable_attrs.yaml +2 -0
- imap_processing/cdf/config/imap_enamaps_l2-rectangular_variable_attrs.yaml +12 -2
- imap_processing/cdf/config/imap_swapi_variable_attrs.yaml +2 -13
- imap_processing/cdf/utils.py +2 -2
- imap_processing/cli.py +10 -27
- imap_processing/codice/codice_l1a_lo_angular.py +362 -0
- imap_processing/codice/codice_l1a_lo_species.py +282 -0
- imap_processing/codice/codice_l1b.py +62 -97
- imap_processing/codice/codice_l2.py +801 -174
- imap_processing/codice/codice_new_l1a.py +64 -0
- imap_processing/codice/constants.py +96 -0
- imap_processing/codice/utils.py +270 -0
- imap_processing/ena_maps/ena_maps.py +157 -95
- imap_processing/ena_maps/utils/coordinates.py +5 -0
- imap_processing/ena_maps/utils/corrections.py +450 -0
- imap_processing/ena_maps/utils/map_utils.py +143 -42
- imap_processing/ena_maps/utils/naming.py +3 -1
- imap_processing/hi/hi_l1c.py +34 -12
- imap_processing/hi/hi_l2.py +82 -44
- imap_processing/ialirt/constants.py +7 -1
- imap_processing/ialirt/generate_coverage.py +3 -1
- imap_processing/ialirt/l0/parse_mag.py +1 -0
- imap_processing/ialirt/l0/process_codice.py +66 -0
- imap_processing/ialirt/l0/process_hit.py +1 -0
- imap_processing/ialirt/l0/process_swapi.py +1 -0
- imap_processing/ialirt/l0/process_swe.py +2 -0
- imap_processing/ialirt/process_ephemeris.py +6 -2
- imap_processing/ialirt/utils/create_xarray.py +4 -2
- imap_processing/idex/idex_l2a.py +2 -2
- imap_processing/idex/idex_l2b.py +1 -1
- imap_processing/lo/l1c/lo_l1c.py +62 -4
- imap_processing/lo/l2/lo_l2.py +85 -15
- imap_processing/mag/l1a/mag_l1a.py +2 -2
- imap_processing/mag/l1a/mag_l1a_data.py +71 -13
- imap_processing/mag/l1c/interpolation_methods.py +34 -13
- imap_processing/mag/l1c/mag_l1c.py +117 -67
- imap_processing/mag/l1d/mag_l1d_data.py +3 -1
- imap_processing/quality_flags.py +1 -0
- imap_processing/spice/geometry.py +11 -9
- imap_processing/spice/pointing_frame.py +77 -50
- imap_processing/swapi/constants.py +4 -0
- imap_processing/swapi/l1/swapi_l1.py +59 -24
- imap_processing/swapi/l2/swapi_l2.py +17 -3
- imap_processing/swe/utils/swe_constants.py +7 -7
- imap_processing/ultra/l1a/ultra_l1a.py +121 -72
- imap_processing/ultra/l1b/de.py +57 -1
- imap_processing/ultra/l1b/extendedspin.py +1 -1
- imap_processing/ultra/l1b/ultra_l1b_annotated.py +0 -1
- imap_processing/ultra/l1b/ultra_l1b_culling.py +2 -2
- imap_processing/ultra/l1b/ultra_l1b_extended.py +25 -12
- imap_processing/ultra/l1c/helio_pset.py +29 -6
- imap_processing/ultra/l1c/l1c_lookup_utils.py +4 -2
- imap_processing/ultra/l1c/spacecraft_pset.py +10 -6
- imap_processing/ultra/l1c/ultra_l1c.py +6 -6
- imap_processing/ultra/l1c/ultra_l1c_pset_bins.py +82 -20
- imap_processing/ultra/l2/ultra_l2.py +2 -2
- imap_processing-1.0.2.dist-info/METADATA +121 -0
- {imap_processing-1.0.0.dist-info → imap_processing-1.0.2.dist-info}/RECORD +67 -61
- imap_processing-1.0.0.dist-info/METADATA +0 -120
- {imap_processing-1.0.0.dist-info → imap_processing-1.0.2.dist-info}/LICENSE +0 -0
- {imap_processing-1.0.0.dist-info → imap_processing-1.0.2.dist-info}/WHEEL +0 -0
- {imap_processing-1.0.0.dist-info → imap_processing-1.0.2.dist-info}/entry_points.txt +0 -0
|
@@ -0,0 +1,64 @@
|
|
|
1
|
+
"""CoDICE L1A processing functions."""
|
|
2
|
+
|
|
3
|
+
import logging
|
|
4
|
+
|
|
5
|
+
import xarray as xr
|
|
6
|
+
from imap_data_access import ProcessingInputCollection
|
|
7
|
+
|
|
8
|
+
from imap_processing import imap_module_directory
|
|
9
|
+
from imap_processing.codice.codice_l1a_lo_angular import l1a_lo_angular
|
|
10
|
+
from imap_processing.codice.codice_l1a_lo_species import l1a_lo_species
|
|
11
|
+
from imap_processing.codice.utils import (
|
|
12
|
+
CODICEAPID,
|
|
13
|
+
)
|
|
14
|
+
from imap_processing.utils import packet_file_to_datasets
|
|
15
|
+
|
|
16
|
+
logger = logging.getLogger(__name__)
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
def process_l1a(dependency: ProcessingInputCollection) -> list[xr.Dataset]:
|
|
20
|
+
"""
|
|
21
|
+
Process L1A data based on descriptor and dependencies.
|
|
22
|
+
|
|
23
|
+
Parameters
|
|
24
|
+
----------
|
|
25
|
+
dependency : ProcessingInputCollection
|
|
26
|
+
Collection of processing inputs required for L1A processing.
|
|
27
|
+
|
|
28
|
+
Returns
|
|
29
|
+
-------
|
|
30
|
+
list[xarray.Dataset]
|
|
31
|
+
List of processed L1A datasets generated from available APIDs.
|
|
32
|
+
"""
|
|
33
|
+
# Get science data which is L0 packet file
|
|
34
|
+
science_file = dependency.get_file_paths(data_type="l0")[0]
|
|
35
|
+
# Get LUT file
|
|
36
|
+
lut_file = dependency.get_file_paths(descriptor="l1a-sci-lut")[0]
|
|
37
|
+
|
|
38
|
+
logger.info(f"Processing L1A for {science_file.name} with {lut_file.name}")
|
|
39
|
+
|
|
40
|
+
xtce_file = (
|
|
41
|
+
imap_module_directory / "codice/packet_definitions/codice_packet_definition.xml"
|
|
42
|
+
)
|
|
43
|
+
# Decom packet
|
|
44
|
+
datasets_by_apid = packet_file_to_datasets(
|
|
45
|
+
science_file,
|
|
46
|
+
xtce_file,
|
|
47
|
+
)
|
|
48
|
+
|
|
49
|
+
datasets = []
|
|
50
|
+
for apid in datasets_by_apid:
|
|
51
|
+
if apid == CODICEAPID.COD_LO_SW_SPECIES_COUNTS:
|
|
52
|
+
logger.info("Processing Lo SW Species Counts")
|
|
53
|
+
datasets.append(l1a_lo_species(datasets_by_apid[apid], lut_file))
|
|
54
|
+
elif apid == CODICEAPID.COD_LO_NSW_SPECIES_COUNTS:
|
|
55
|
+
logger.info("Processing Lo NSW Species Counts")
|
|
56
|
+
datasets.append(l1a_lo_species(datasets_by_apid[apid], lut_file))
|
|
57
|
+
elif apid == CODICEAPID.COD_LO_SW_ANGULAR_COUNTS:
|
|
58
|
+
logger.info("Processing Lo SW Angular Counts")
|
|
59
|
+
datasets.append(l1a_lo_angular(datasets_by_apid[apid], lut_file))
|
|
60
|
+
elif apid == CODICEAPID.COD_LO_NSW_ANGULAR_COUNTS:
|
|
61
|
+
logger.info("Processing Lo NSW Angular Counts")
|
|
62
|
+
datasets.append(l1a_lo_angular(datasets_by_apid[apid], lut_file))
|
|
63
|
+
|
|
64
|
+
return datasets
|
|
@@ -61,6 +61,8 @@ CODICEAPID_MAPPING = {
|
|
|
61
61
|
SPIN_PERIOD_CONVERSION = 0.00032
|
|
62
62
|
K_FACTOR = 5.76 # This is used to convert voltages to energies in L2
|
|
63
63
|
HI_ACQUISITION_TIME = 0.59916
|
|
64
|
+
NUM_ESA_STEPS = 128
|
|
65
|
+
LO_DESPIN_SPIN_SECTORS = 24
|
|
64
66
|
|
|
65
67
|
# CDF variable names used for lo data products
|
|
66
68
|
LO_COUNTERS_SINGLES_VARIABLE_NAMES = ["apd_singles"]
|
|
@@ -92,6 +94,26 @@ LO_SW_SPECIES_VARIABLE_NAMES = [
|
|
|
92
94
|
"heplus",
|
|
93
95
|
"cnoplus",
|
|
94
96
|
]
|
|
97
|
+
LO_SW_SOLAR_WIND_SPECIES_VARIABLE_NAMES = [
|
|
98
|
+
"hplus",
|
|
99
|
+
"heplusplus",
|
|
100
|
+
"cplus4",
|
|
101
|
+
"cplus5",
|
|
102
|
+
"cplus6",
|
|
103
|
+
"oplus5",
|
|
104
|
+
"oplus6",
|
|
105
|
+
"oplus7",
|
|
106
|
+
"oplus8",
|
|
107
|
+
"ne",
|
|
108
|
+
"mg",
|
|
109
|
+
"si",
|
|
110
|
+
"fe_loq",
|
|
111
|
+
"fe_hiq",
|
|
112
|
+
]
|
|
113
|
+
LO_SW_PICKUP_ION_SPECIES_VARIABLE_NAMES = [
|
|
114
|
+
"heplus",
|
|
115
|
+
"cnoplus",
|
|
116
|
+
]
|
|
95
117
|
LO_NSW_SPECIES_VARIABLE_NAMES = [
|
|
96
118
|
"hplus",
|
|
97
119
|
"heplusplus",
|
|
@@ -2259,3 +2281,77 @@ HALF_SPIN_LUT = {
|
|
|
2259
2281
|
30: [116, 117, 118, 119, 120, 121],
|
|
2260
2282
|
31: [122, 123, 124, 125, 126, 127],
|
|
2261
2283
|
}
|
|
2284
|
+
|
|
2285
|
+
NSW_POSITIONS = [x for x in range(3, 22)]
|
|
2286
|
+
SW_POSITIONS = [0, 1, 2, 22, 23]
|
|
2287
|
+
SOLAR_WIND_POSITIONS = [0]
|
|
2288
|
+
PUI_POSITIONS = SW_POSITIONS
|
|
2289
|
+
L2_GEOMETRIC_FACTOR = 0.013
|
|
2290
|
+
L2_HI_NUMBER_OF_SSD = 12.0
|
|
2291
|
+
|
|
2292
|
+
L2_HI_SECTORED_ANGLE = np.array(
|
|
2293
|
+
[
|
|
2294
|
+
285.00,
|
|
2295
|
+
244.11,
|
|
2296
|
+
228.69,
|
|
2297
|
+
225.00,
|
|
2298
|
+
228.69,
|
|
2299
|
+
244.11,
|
|
2300
|
+
285.00,
|
|
2301
|
+
325.89,
|
|
2302
|
+
341.31,
|
|
2303
|
+
345.00,
|
|
2304
|
+
341.31,
|
|
2305
|
+
325.89,
|
|
2306
|
+
]
|
|
2307
|
+
)
|
|
2308
|
+
|
|
2309
|
+
HI_L2_ELEVATION_ANGLE = np.array(
|
|
2310
|
+
[
|
|
2311
|
+
150.0,
|
|
2312
|
+
138.6,
|
|
2313
|
+
115.7,
|
|
2314
|
+
90.0,
|
|
2315
|
+
64.3,
|
|
2316
|
+
41.4,
|
|
2317
|
+
30.0,
|
|
2318
|
+
41.4,
|
|
2319
|
+
64.3,
|
|
2320
|
+
90.0,
|
|
2321
|
+
115.7,
|
|
2322
|
+
138.6,
|
|
2323
|
+
],
|
|
2324
|
+
dtype=float,
|
|
2325
|
+
)
|
|
2326
|
+
|
|
2327
|
+
|
|
2328
|
+
LO_POSITION_TO_ELEVATION_ANGLE = {
|
|
2329
|
+
"sw": {
|
|
2330
|
+
1: 0,
|
|
2331
|
+
2: 15,
|
|
2332
|
+
24: 15,
|
|
2333
|
+
3: 30,
|
|
2334
|
+
23: 30,
|
|
2335
|
+
},
|
|
2336
|
+
"nsw": {
|
|
2337
|
+
4: 45,
|
|
2338
|
+
22: 45,
|
|
2339
|
+
5: 60,
|
|
2340
|
+
21: 60,
|
|
2341
|
+
6: 75,
|
|
2342
|
+
20: 75,
|
|
2343
|
+
7: 90,
|
|
2344
|
+
19: 90,
|
|
2345
|
+
8: 105,
|
|
2346
|
+
18: 105,
|
|
2347
|
+
9: 120,
|
|
2348
|
+
17: 120,
|
|
2349
|
+
10: 135,
|
|
2350
|
+
16: 135,
|
|
2351
|
+
11: 150,
|
|
2352
|
+
15: 150,
|
|
2353
|
+
12: 165,
|
|
2354
|
+
14: 165,
|
|
2355
|
+
13: 180,
|
|
2356
|
+
},
|
|
2357
|
+
}
|
imap_processing/codice/utils.py
CHANGED
|
@@ -5,7 +5,40 @@ This module contains utility classes and functions that are used by various
|
|
|
5
5
|
other CoDICE processing modules.
|
|
6
6
|
"""
|
|
7
7
|
|
|
8
|
+
import json
|
|
9
|
+
from dataclasses import dataclass
|
|
8
10
|
from enum import IntEnum
|
|
11
|
+
from pathlib import Path
|
|
12
|
+
|
|
13
|
+
import numpy as np
|
|
14
|
+
|
|
15
|
+
from imap_processing.spice.time import met_to_ttj2000ns
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
@dataclass
|
|
19
|
+
class ViewTabInfo:
|
|
20
|
+
"""
|
|
21
|
+
Class to hold view table information.
|
|
22
|
+
|
|
23
|
+
Attributes
|
|
24
|
+
----------
|
|
25
|
+
apid : int
|
|
26
|
+
The APID for the packet.
|
|
27
|
+
collapse_table : int
|
|
28
|
+
Collapse table id used to determine the collapse pattern.
|
|
29
|
+
sensor : int
|
|
30
|
+
Sensor id (0 for LO, 1 for HI).
|
|
31
|
+
three_d_collapsed : int
|
|
32
|
+
The 3D collapsed value from the LUT.
|
|
33
|
+
view_id : int
|
|
34
|
+
The view identifier from the packet.
|
|
35
|
+
"""
|
|
36
|
+
|
|
37
|
+
apid: int
|
|
38
|
+
collapse_table: int
|
|
39
|
+
sensor: int
|
|
40
|
+
three_d_collapsed: int
|
|
41
|
+
view_id: int
|
|
9
42
|
|
|
10
43
|
|
|
11
44
|
class CODICEAPID(IntEnum):
|
|
@@ -57,3 +90,240 @@ class CoDICECompression(IntEnum):
|
|
|
57
90
|
LOSSY_A_LOSSLESS = 4
|
|
58
91
|
LOSSY_B_LOSSLESS = 5
|
|
59
92
|
PACK_24_BIT = 6
|
|
93
|
+
|
|
94
|
+
|
|
95
|
+
def read_sci_lut(file_path: Path, table_id: str) -> dict:
|
|
96
|
+
"""
|
|
97
|
+
Read the SCI-LUT JSON file for a specific table ID.
|
|
98
|
+
|
|
99
|
+
Parameters
|
|
100
|
+
----------
|
|
101
|
+
file_path : pathlib.Path
|
|
102
|
+
Path to the SCI-LUT JSON file.
|
|
103
|
+
table_id : str
|
|
104
|
+
Table identifier to extract from the JSON.
|
|
105
|
+
|
|
106
|
+
Returns
|
|
107
|
+
-------
|
|
108
|
+
dict
|
|
109
|
+
The SCI-LUT data for the specified table id.
|
|
110
|
+
"""
|
|
111
|
+
sci_lut_data = json.loads(file_path.read_text()).get(f"{table_id}")
|
|
112
|
+
if sci_lut_data is None:
|
|
113
|
+
raise ValueError(f"SCI-LUT file does not have data for table ID {table_id}.")
|
|
114
|
+
return sci_lut_data
|
|
115
|
+
|
|
116
|
+
|
|
117
|
+
def get_view_tab_info(json_data: dict, view_id: int, apid: int) -> dict:
|
|
118
|
+
"""
|
|
119
|
+
Get the view table information for a specific view and APID.
|
|
120
|
+
|
|
121
|
+
Parameters
|
|
122
|
+
----------
|
|
123
|
+
json_data : dict
|
|
124
|
+
The JSON data loaded from the SCI-LUT file.
|
|
125
|
+
view_id : int
|
|
126
|
+
The view ID from the packet.
|
|
127
|
+
apid : int
|
|
128
|
+
The APID from the packet.
|
|
129
|
+
|
|
130
|
+
Returns
|
|
131
|
+
-------
|
|
132
|
+
dict
|
|
133
|
+
The view table information containing details like sensor,
|
|
134
|
+
collapse_table, data_product, etc.
|
|
135
|
+
"""
|
|
136
|
+
apid_hex = f"0x{apid:X}"
|
|
137
|
+
# This is how we get view information that will be used to get
|
|
138
|
+
# collapse pattern:
|
|
139
|
+
# table_id -> view_tab -> (view_id, apid) -> sensor -> collapse_table
|
|
140
|
+
view_tab = json_data.get("view_tab").get(f"({view_id}, {apid_hex})")
|
|
141
|
+
return view_tab
|
|
142
|
+
|
|
143
|
+
|
|
144
|
+
def get_collapse_pattern_shape(
|
|
145
|
+
json_data: dict, sensor_id: int, collapse_table_id: int
|
|
146
|
+
) -> tuple[int, ...]:
|
|
147
|
+
"""
|
|
148
|
+
Get the collapse pattern for a specific sensor id and collapse table id.
|
|
149
|
+
|
|
150
|
+
Parameters
|
|
151
|
+
----------
|
|
152
|
+
json_data : dict
|
|
153
|
+
The JSON data loaded from the SCI-LUT file.
|
|
154
|
+
sensor_id : int
|
|
155
|
+
Sensor identifier (0 for LO, 1 for HI).
|
|
156
|
+
collapse_table_id : int
|
|
157
|
+
Collapse table id to look up in the SCI-LUT.
|
|
158
|
+
|
|
159
|
+
Returns
|
|
160
|
+
-------
|
|
161
|
+
tuple[int, ...]
|
|
162
|
+
The reduced shape describing the collapsed pattern. Examples:
|
|
163
|
+
``(1,)`` for a fully collapsed 1-D pattern or ``(N, M)`` for a
|
|
164
|
+
reduced 2-D pattern.
|
|
165
|
+
"""
|
|
166
|
+
sensor = "lo" if sensor_id == 0 else "hi"
|
|
167
|
+
collapse_matrix = np.array(
|
|
168
|
+
json_data[f"collapse_{sensor}"][f"{collapse_table_id}"]["matrix"]
|
|
169
|
+
)
|
|
170
|
+
|
|
171
|
+
# Analyze the collapse pattern matrix to determine its reduced shape.
|
|
172
|
+
# Steps:
|
|
173
|
+
# - Extract non-zero elements from the matrix.
|
|
174
|
+
# - Reshape to group unique non-zero rows and columns.
|
|
175
|
+
# - If all non-zero values are identical, return (1,) for a fully collapsed pattern.
|
|
176
|
+
# - Otherwise, compute the number of unique rows and columns to describe the
|
|
177
|
+
# reduced shape.
|
|
178
|
+
non_zero_data = np.where(collapse_matrix != 0)
|
|
179
|
+
non_zero_reformatted = collapse_matrix[non_zero_data].reshape(
|
|
180
|
+
np.unique(non_zero_data[0]).size, np.unique(non_zero_data[1]).size
|
|
181
|
+
)
|
|
182
|
+
|
|
183
|
+
if np.unique(non_zero_reformatted).size == 1:
|
|
184
|
+
# all non-zero values are identical means -> fully collapsed
|
|
185
|
+
return (1,)
|
|
186
|
+
|
|
187
|
+
# If not fully collapsed, find repeated patterns in rows and columns
|
|
188
|
+
# to reduce shape further.
|
|
189
|
+
unique_rows = np.unique(non_zero_reformatted, axis=0)
|
|
190
|
+
unique_columns = np.unique(non_zero_reformatted, axis=1)
|
|
191
|
+
# Unique spin sectors and instrument azimuths to unpack data
|
|
192
|
+
unique_spin_sectors = unique_columns.shape[1]
|
|
193
|
+
unique_inst_azs = unique_rows.shape[0]
|
|
194
|
+
return (unique_spin_sectors, unique_inst_azs)
|
|
195
|
+
|
|
196
|
+
|
|
197
|
+
def index_to_position(
|
|
198
|
+
json_data: dict, sensor_id: int, collapse_table_id: int
|
|
199
|
+
) -> np.ndarray:
|
|
200
|
+
"""
|
|
201
|
+
Get the indices of non-zero unique rows in the collapse pattern matrix.
|
|
202
|
+
|
|
203
|
+
Parameters
|
|
204
|
+
----------
|
|
205
|
+
json_data : dict
|
|
206
|
+
The JSON data loaded from the SCI-LUT file.
|
|
207
|
+
sensor_id : int
|
|
208
|
+
Sensor identifier (0 for LO, 1 for HI).
|
|
209
|
+
collapse_table_id : int
|
|
210
|
+
Collapse table id to look up in the SCI-LUT.
|
|
211
|
+
|
|
212
|
+
Returns
|
|
213
|
+
-------
|
|
214
|
+
np.ndarray
|
|
215
|
+
Array of indices corresponding to non-zero unique rows.
|
|
216
|
+
"""
|
|
217
|
+
sensor = "lo" if sensor_id == 0 else "hi"
|
|
218
|
+
collapse_matrix = np.array(
|
|
219
|
+
json_data[f"collapse_{sensor}"][f"{collapse_table_id}"]["matrix"]
|
|
220
|
+
)
|
|
221
|
+
|
|
222
|
+
# Find unique non-zero rows and their original indices
|
|
223
|
+
non_zero_row_mask = np.any(collapse_matrix != 0, axis=1)
|
|
224
|
+
non_zero_rows = collapse_matrix[non_zero_row_mask]
|
|
225
|
+
_, unique_indices = np.unique(non_zero_rows, axis=0, return_index=True)
|
|
226
|
+
non_zero_row_indices = np.flatnonzero(non_zero_row_mask)[unique_indices]
|
|
227
|
+
return non_zero_row_indices
|
|
228
|
+
|
|
229
|
+
|
|
230
|
+
def get_codice_epoch_time(
|
|
231
|
+
acq_start_seconds: np.ndarray,
|
|
232
|
+
acq_start_subseconds: np.ndarray,
|
|
233
|
+
spin_period: np.ndarray,
|
|
234
|
+
view_tab_obj: ViewTabInfo,
|
|
235
|
+
) -> tuple[np.ndarray, np.ndarray]:
|
|
236
|
+
"""
|
|
237
|
+
Calculate center time and delta.
|
|
238
|
+
|
|
239
|
+
Parameters
|
|
240
|
+
----------
|
|
241
|
+
acq_start_seconds : np.ndarray
|
|
242
|
+
Array of acquisition start seconds.
|
|
243
|
+
acq_start_subseconds : np.ndarray
|
|
244
|
+
Array of acquisition start subseconds.
|
|
245
|
+
spin_period : np.ndarray
|
|
246
|
+
Array of spin periods.
|
|
247
|
+
view_tab_obj : ViewTabInfo
|
|
248
|
+
The view table information object. It contains information such as sensor ID
|
|
249
|
+
and three_d_collapsed value and others.
|
|
250
|
+
|
|
251
|
+
Returns
|
|
252
|
+
-------
|
|
253
|
+
tuple[np.ndarray, np.ndarray]
|
|
254
|
+
(center_times, delta_times).
|
|
255
|
+
"""
|
|
256
|
+
# If Lo sensor
|
|
257
|
+
if view_tab_obj.sensor == 0:
|
|
258
|
+
# Lo sensor, we need to set spins to be constant.
|
|
259
|
+
# 32 half spins makes full 16 spins for all non direct event products.
|
|
260
|
+
# But Lo direct event's spins is also 16 spins. Because of that, we can use
|
|
261
|
+
# the same calculation for all Lo products.
|
|
262
|
+
num_spins = 16.0
|
|
263
|
+
# If Hi sensor and Direct Event product
|
|
264
|
+
elif view_tab_obj.sensor == 1 and view_tab_obj.apid == CODICEAPID.COD_HI_PHA:
|
|
265
|
+
# Use constant 16 spins for Hi PHA
|
|
266
|
+
num_spins = 16.0
|
|
267
|
+
# If Non-Direct Event Hi product
|
|
268
|
+
else:
|
|
269
|
+
# Use 3d_collapsed value from LUT for other Hi products
|
|
270
|
+
num_spins = view_tab_obj.three_d_collapsed
|
|
271
|
+
|
|
272
|
+
# Units of 'spin ticks', where one 'spin tick' equals 320 microseconds.
|
|
273
|
+
# It takes multiple spins to collect data for a view.
|
|
274
|
+
spin_period_ns = spin_period.astype(np.float64) * 320 * 1e3 # Convert to ns
|
|
275
|
+
delta_times = (num_spins * spin_period_ns) / 2
|
|
276
|
+
# subseconds need to converted to seconds using this formula per CoDICE team:
|
|
277
|
+
# subseconds / 65536 gives seconds
|
|
278
|
+
center_times_seconds = (
|
|
279
|
+
acq_start_seconds + acq_start_subseconds / 65536 + (delta_times / 1e9)
|
|
280
|
+
)
|
|
281
|
+
|
|
282
|
+
return met_to_ttj2000ns(center_times_seconds), delta_times
|
|
283
|
+
|
|
284
|
+
|
|
285
|
+
def calculate_acq_time_per_step(low_stepping_tab: dict) -> np.ndarray:
|
|
286
|
+
"""
|
|
287
|
+
Calculate acquisition time per step from low stepping table.
|
|
288
|
+
|
|
289
|
+
Parameters
|
|
290
|
+
----------
|
|
291
|
+
low_stepping_tab : dict
|
|
292
|
+
The low stepping table from the SCI-LUT JSON.
|
|
293
|
+
|
|
294
|
+
Returns
|
|
295
|
+
-------
|
|
296
|
+
np.ndarray
|
|
297
|
+
Array of acquisition times per step of shape (num_esa_steps,).
|
|
298
|
+
"""
|
|
299
|
+
# These tunable values are used to calculate acquisition time per step
|
|
300
|
+
tunable_values = low_stepping_tab["tunable_values"]
|
|
301
|
+
|
|
302
|
+
# pre-calculate values
|
|
303
|
+
sector_time = tunable_values["spin_time_ms"] / tunable_values["num_sectors_ms"]
|
|
304
|
+
sector_margin_ms = tunable_values["sector_margin_ms"]
|
|
305
|
+
dwell_fraction = tunable_values["dwell_fraction_percentage"]
|
|
306
|
+
min_hv_settle_ms = tunable_values["min_hv_settle_ms"]
|
|
307
|
+
max_hv_settle_ms = tunable_values["max_hv_settle_ms"]
|
|
308
|
+
num_steps_data = np.array(
|
|
309
|
+
low_stepping_tab["num_steps"].get("data"), dtype=np.float64
|
|
310
|
+
)
|
|
311
|
+
# Total non-acquisition time is in column (BD) of science LUT
|
|
312
|
+
dwell_fraction_percentage = float(sector_time) * (100.0 - dwell_fraction) / 100.0
|
|
313
|
+
|
|
314
|
+
# Calculate HV settle time per step not adjusted for Min/Max.
|
|
315
|
+
# It's in column (BF) of science LUT.
|
|
316
|
+
non_adjusted_hv_settle_per_step = (
|
|
317
|
+
dwell_fraction_percentage - sector_margin_ms
|
|
318
|
+
) / num_steps_data
|
|
319
|
+
hv_settle_per_step = np.minimum(
|
|
320
|
+
np.maximum(non_adjusted_hv_settle_per_step, min_hv_settle_ms), max_hv_settle_ms
|
|
321
|
+
)
|
|
322
|
+
|
|
323
|
+
# acquisition time per step in milliseconds
|
|
324
|
+
# sector_time - sector_margin_ms / num_steps - hv_settle_per_step
|
|
325
|
+
acq_time_per_step = (
|
|
326
|
+
(sector_time - sector_margin_ms) / num_steps_data
|
|
327
|
+
) - hv_settle_per_step
|
|
328
|
+
# Convert to seconds
|
|
329
|
+
return acq_time_per_step / 1e3
|