imap-processing 0.18.0__py3-none-any.whl → 0.19.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of imap-processing might be problematic. Click here for more details.
- imap_processing/_version.py +2 -2
- imap_processing/ancillary/ancillary_dataset_combiner.py +161 -1
- imap_processing/cdf/config/imap_codice_l1a_variable_attrs.yaml +301 -274
- imap_processing/cdf/config/imap_codice_l1b_variable_attrs.yaml +28 -28
- imap_processing/cdf/config/imap_codice_l2_variable_attrs.yaml +1044 -203
- imap_processing/cdf/config/imap_constant_attrs.yaml +4 -2
- imap_processing/cdf/config/imap_glows_l1b_variable_attrs.yaml +12 -0
- imap_processing/cdf/config/imap_hi_global_cdf_attrs.yaml +5 -0
- imap_processing/cdf/config/imap_hit_global_cdf_attrs.yaml +10 -4
- imap_processing/cdf/config/imap_idex_l2a_variable_attrs.yaml +33 -4
- imap_processing/cdf/config/imap_idex_l2b_variable_attrs.yaml +8 -91
- imap_processing/cdf/config/imap_idex_l2c_variable_attrs.yaml +106 -16
- imap_processing/cdf/config/imap_lo_l1a_variable_attrs.yaml +4 -15
- imap_processing/cdf/config/imap_lo_l1c_variable_attrs.yaml +189 -98
- imap_processing/cdf/config/imap_mag_global_cdf_attrs.yaml +85 -2
- imap_processing/cdf/config/imap_mag_l1c_variable_attrs.yaml +24 -1
- imap_processing/cdf/config/imap_ultra_l1b_variable_attrs.yaml +12 -4
- imap_processing/cdf/config/imap_ultra_l1c_variable_attrs.yaml +50 -7
- imap_processing/cli.py +95 -41
- imap_processing/codice/codice_l1a.py +131 -31
- imap_processing/codice/codice_l2.py +118 -10
- imap_processing/codice/constants.py +740 -595
- imap_processing/decom.py +1 -4
- imap_processing/ena_maps/ena_maps.py +32 -25
- imap_processing/ena_maps/utils/naming.py +8 -2
- imap_processing/glows/ancillary/imap_glows_exclusions-by-instr-team_20250923_v002.dat +10 -0
- imap_processing/glows/ancillary/imap_glows_map-of-excluded-regions_20250923_v002.dat +393 -0
- imap_processing/glows/ancillary/imap_glows_map-of-uv-sources_20250923_v002.dat +593 -0
- imap_processing/glows/ancillary/imap_glows_pipeline_settings_20250923_v002.json +54 -0
- imap_processing/glows/ancillary/imap_glows_suspected-transients_20250923_v002.dat +10 -0
- imap_processing/glows/l1b/glows_l1b.py +99 -9
- imap_processing/glows/l1b/glows_l1b_data.py +350 -38
- imap_processing/glows/l2/glows_l2.py +11 -0
- imap_processing/hi/hi_l1a.py +124 -3
- imap_processing/hi/hi_l1b.py +154 -71
- imap_processing/hi/hi_l2.py +84 -51
- imap_processing/hi/utils.py +153 -8
- imap_processing/hit/l0/constants.py +3 -0
- imap_processing/hit/l0/decom_hit.py +3 -6
- imap_processing/hit/l1a/hit_l1a.py +311 -21
- imap_processing/hit/l1b/hit_l1b.py +54 -126
- imap_processing/hit/l2/hit_l2.py +6 -6
- imap_processing/ialirt/calculate_ingest.py +219 -0
- imap_processing/ialirt/constants.py +12 -2
- imap_processing/ialirt/generate_coverage.py +15 -2
- imap_processing/ialirt/l0/ialirt_spice.py +5 -2
- imap_processing/ialirt/l0/parse_mag.py +293 -42
- imap_processing/ialirt/l0/process_hit.py +5 -3
- imap_processing/ialirt/l0/process_swapi.py +41 -25
- imap_processing/ialirt/process_ephemeris.py +70 -14
- imap_processing/idex/idex_l0.py +2 -2
- imap_processing/idex/idex_l1a.py +2 -3
- imap_processing/idex/idex_l1b.py +2 -3
- imap_processing/idex/idex_l2a.py +130 -4
- imap_processing/idex/idex_l2b.py +158 -143
- imap_processing/idex/idex_utils.py +1 -3
- imap_processing/lo/l0/lo_science.py +25 -24
- imap_processing/lo/l1b/lo_l1b.py +3 -3
- imap_processing/lo/l1c/lo_l1c.py +116 -50
- imap_processing/lo/l2/lo_l2.py +29 -29
- imap_processing/lo/lo_ancillary.py +55 -0
- imap_processing/mag/l1a/mag_l1a.py +1 -0
- imap_processing/mag/l1a/mag_l1a_data.py +26 -0
- imap_processing/mag/l1b/mag_l1b.py +3 -2
- imap_processing/mag/l1c/interpolation_methods.py +14 -15
- imap_processing/mag/l1c/mag_l1c.py +23 -6
- imap_processing/mag/l1d/mag_l1d.py +57 -14
- imap_processing/mag/l1d/mag_l1d_data.py +167 -30
- imap_processing/mag/l2/mag_l2_data.py +10 -2
- imap_processing/quality_flags.py +9 -1
- imap_processing/spice/geometry.py +76 -33
- imap_processing/spice/pointing_frame.py +0 -6
- imap_processing/spice/repoint.py +29 -2
- imap_processing/spice/spin.py +28 -8
- imap_processing/spice/time.py +12 -22
- imap_processing/swapi/l1/swapi_l1.py +10 -4
- imap_processing/swapi/l2/swapi_l2.py +15 -17
- imap_processing/swe/l1b/swe_l1b.py +1 -2
- imap_processing/ultra/constants.py +1 -24
- imap_processing/ultra/l0/ultra_utils.py +9 -11
- imap_processing/ultra/l1a/ultra_l1a.py +1 -2
- imap_processing/ultra/l1b/cullingmask.py +6 -3
- imap_processing/ultra/l1b/de.py +81 -23
- imap_processing/ultra/l1b/extendedspin.py +13 -10
- imap_processing/ultra/l1b/lookup_utils.py +281 -28
- imap_processing/ultra/l1b/quality_flag_filters.py +10 -1
- imap_processing/ultra/l1b/ultra_l1b_culling.py +161 -3
- imap_processing/ultra/l1b/ultra_l1b_extended.py +253 -47
- imap_processing/ultra/l1c/helio_pset.py +97 -24
- imap_processing/ultra/l1c/l1c_lookup_utils.py +256 -0
- imap_processing/ultra/l1c/spacecraft_pset.py +83 -16
- imap_processing/ultra/l1c/ultra_l1c.py +6 -2
- imap_processing/ultra/l1c/ultra_l1c_culling.py +85 -0
- imap_processing/ultra/l1c/ultra_l1c_pset_bins.py +385 -277
- imap_processing/ultra/l2/ultra_l2.py +0 -1
- imap_processing/ultra/utils/ultra_l1_utils.py +28 -3
- imap_processing/utils.py +3 -4
- {imap_processing-0.18.0.dist-info → imap_processing-0.19.0.dist-info}/METADATA +2 -2
- {imap_processing-0.18.0.dist-info → imap_processing-0.19.0.dist-info}/RECORD +102 -95
- imap_processing/idex/idex_l2c.py +0 -84
- imap_processing/spice/kernels.py +0 -187
- {imap_processing-0.18.0.dist-info → imap_processing-0.19.0.dist-info}/LICENSE +0 -0
- {imap_processing-0.18.0.dist-info → imap_processing-0.19.0.dist-info}/WHEEL +0 -0
- {imap_processing-0.18.0.dist-info → imap_processing-0.19.0.dist-info}/entry_points.txt +0 -0
|
@@ -8,22 +8,20 @@ Reference: https://spiceypy.readthedocs.io/en/main/documentation.html.
|
|
|
8
8
|
|
|
9
9
|
import logging
|
|
10
10
|
import typing
|
|
11
|
-
from
|
|
11
|
+
from datetime import datetime, timedelta
|
|
12
12
|
|
|
13
13
|
import numpy as np
|
|
14
14
|
import spiceypy
|
|
15
15
|
from numpy import ndarray
|
|
16
16
|
|
|
17
|
+
from imap_processing.ialirt.constants import STATIONS
|
|
17
18
|
from imap_processing.spice.geometry import SpiceBody, SpiceFrame, imap_state
|
|
18
|
-
from imap_processing.spice.kernels import ensure_spice
|
|
19
19
|
from imap_processing.spice.time import et_to_utc, str_to_et
|
|
20
20
|
|
|
21
21
|
# Logger setup
|
|
22
22
|
logger = logging.getLogger(__name__)
|
|
23
23
|
|
|
24
24
|
|
|
25
|
-
@typing.no_type_check
|
|
26
|
-
@ensure_spice
|
|
27
25
|
def latitude_longitude_to_ecef(
|
|
28
26
|
longitude: float, latitude: float, altitude: float
|
|
29
27
|
) -> ndarray:
|
|
@@ -68,12 +66,11 @@ def latitude_longitude_to_ecef(
|
|
|
68
66
|
|
|
69
67
|
|
|
70
68
|
@typing.no_type_check
|
|
71
|
-
@ensure_spice
|
|
72
69
|
def calculate_azimuth_and_elevation(
|
|
73
70
|
longitude: float,
|
|
74
71
|
latitude: float,
|
|
75
72
|
altitude: float,
|
|
76
|
-
observation_time:
|
|
73
|
+
observation_time: float | np.ndarray,
|
|
77
74
|
target: str = SpiceBody.IMAP.name,
|
|
78
75
|
) -> tuple:
|
|
79
76
|
"""
|
|
@@ -137,8 +134,8 @@ def calculate_doppler(
|
|
|
137
134
|
longitude: float,
|
|
138
135
|
latitude: float,
|
|
139
136
|
altitude: float,
|
|
140
|
-
observation_time:
|
|
141
|
-
) ->
|
|
137
|
+
observation_time: float | np.ndarray,
|
|
138
|
+
) -> float | ndarray[float]:
|
|
142
139
|
"""
|
|
143
140
|
Calculate the doppler velocity.
|
|
144
141
|
|
|
@@ -194,7 +191,7 @@ def build_output(
|
|
|
194
191
|
latitude: float,
|
|
195
192
|
altitude: float,
|
|
196
193
|
time_endpoints: tuple[str, str],
|
|
197
|
-
time_step: float,
|
|
194
|
+
time_step: float = 60,
|
|
198
195
|
) -> dict[str, np.ndarray]:
|
|
199
196
|
"""
|
|
200
197
|
Build the output dictionary containing time, azimuth, elevation, and doppler.
|
|
@@ -210,7 +207,7 @@ def build_output(
|
|
|
210
207
|
time_endpoints : tuple[str, str]
|
|
211
208
|
Start and stop times in UTC.
|
|
212
209
|
time_step : float
|
|
213
|
-
Seconds between data points.
|
|
210
|
+
Seconds between data points. Default is 60.
|
|
214
211
|
|
|
215
212
|
Returns
|
|
216
213
|
-------
|
|
@@ -230,10 +227,10 @@ def build_output(
|
|
|
230
227
|
)
|
|
231
228
|
|
|
232
229
|
output_dict["time"] = et_to_utc(time_range, format_str="ISOC")
|
|
233
|
-
output_dict["azimuth"] = azimuth
|
|
234
|
-
output_dict["elevation"] = elevation
|
|
235
|
-
output_dict["doppler"] =
|
|
236
|
-
longitude, latitude, altitude, time_range
|
|
230
|
+
output_dict["azimuth"] = np.round(azimuth, 6)
|
|
231
|
+
output_dict["elevation"] = np.round(elevation, 6)
|
|
232
|
+
output_dict["doppler"] = np.round(
|
|
233
|
+
calculate_doppler(longitude, latitude, altitude, time_range), 6
|
|
237
234
|
)
|
|
238
235
|
|
|
239
236
|
logger.info(
|
|
@@ -242,3 +239,62 @@ def build_output(
|
|
|
242
239
|
)
|
|
243
240
|
|
|
244
241
|
return output_dict
|
|
242
|
+
|
|
243
|
+
|
|
244
|
+
def generate_text_files(station: str, day: str) -> list[str]:
|
|
245
|
+
"""
|
|
246
|
+
Generate a pointing schedule text file and return it as a list of strings.
|
|
247
|
+
|
|
248
|
+
Parameters
|
|
249
|
+
----------
|
|
250
|
+
station : str
|
|
251
|
+
Station name.
|
|
252
|
+
day : str
|
|
253
|
+
The day for which to generate a pointing schedule, in ISO format.
|
|
254
|
+
Ex: "2025-08-11".
|
|
255
|
+
|
|
256
|
+
Returns
|
|
257
|
+
-------
|
|
258
|
+
lines : list[str]
|
|
259
|
+
A list of strings that makeup the lines of a pointing schedule file.
|
|
260
|
+
"""
|
|
261
|
+
station_properties = STATIONS[station]
|
|
262
|
+
|
|
263
|
+
day_as_datetime = datetime.fromisoformat(day)
|
|
264
|
+
time_endpoints = (
|
|
265
|
+
datetime.strftime(day_as_datetime, "%Y-%m-%d %H:%M:%S"),
|
|
266
|
+
datetime.strftime(day_as_datetime + timedelta(days=1), "%Y-%m-%d %H:%M:%S"),
|
|
267
|
+
)
|
|
268
|
+
output_dict = build_output(
|
|
269
|
+
station_properties[0],
|
|
270
|
+
station_properties[1],
|
|
271
|
+
station_properties[2],
|
|
272
|
+
time_endpoints,
|
|
273
|
+
)
|
|
274
|
+
|
|
275
|
+
lines = [
|
|
276
|
+
f"Station: {station}\n",
|
|
277
|
+
"Target: IMAP\n",
|
|
278
|
+
f"Creation date (UTC): {datetime.utcnow()}\n",
|
|
279
|
+
f"Start time: {time_endpoints[0]}\n",
|
|
280
|
+
f"End time: {time_endpoints[1]}\n",
|
|
281
|
+
"Cadence (sec): 60\n\n",
|
|
282
|
+
"Date/Time"
|
|
283
|
+
+ "Azimuth".rjust(29)
|
|
284
|
+
+ "Elevation".rjust(17)
|
|
285
|
+
+ "Doppler".rjust(15)
|
|
286
|
+
+ "\n",
|
|
287
|
+
"(UTC)" + "(deg.)".rjust(33) + "(deg.)".rjust(16) + "(km/s)".rjust(16) + "\n",
|
|
288
|
+
]
|
|
289
|
+
|
|
290
|
+
length = len(output_dict["time"])
|
|
291
|
+
for i in range(length):
|
|
292
|
+
lines.append(
|
|
293
|
+
f"{output_dict['time'][i]}"
|
|
294
|
+
+ f"{output_dict['azimuth'][i]}".rjust(16)
|
|
295
|
+
+ f"{output_dict['elevation'][i]}".rjust(16)
|
|
296
|
+
+ f"{output_dict['doppler'][i]}".rjust(15)
|
|
297
|
+
+ "\n"
|
|
298
|
+
)
|
|
299
|
+
|
|
300
|
+
return lines
|
imap_processing/idex/idex_l0.py
CHANGED
|
@@ -2,7 +2,7 @@
|
|
|
2
2
|
|
|
3
3
|
import logging
|
|
4
4
|
from pathlib import Path
|
|
5
|
-
from typing import Any
|
|
5
|
+
from typing import Any
|
|
6
6
|
|
|
7
7
|
from xarray import Dataset
|
|
8
8
|
|
|
@@ -13,7 +13,7 @@ logger = logging.getLogger(__name__)
|
|
|
13
13
|
|
|
14
14
|
|
|
15
15
|
def decom_packets(
|
|
16
|
-
packet_file:
|
|
16
|
+
packet_file: str | Path,
|
|
17
17
|
) -> tuple[list[Any], dict[int, Dataset], dict[int, Dataset]]:
|
|
18
18
|
"""
|
|
19
19
|
Decom IDEX data packets using IDEX packet definition.
|
imap_processing/idex/idex_l1a.py
CHANGED
|
@@ -17,7 +17,6 @@ Examples
|
|
|
17
17
|
import logging
|
|
18
18
|
from enum import IntEnum
|
|
19
19
|
from pathlib import Path
|
|
20
|
-
from typing import Union
|
|
21
20
|
|
|
22
21
|
import numpy as np
|
|
23
22
|
import numpy.typing as npt
|
|
@@ -61,7 +60,7 @@ class PacketParser:
|
|
|
61
60
|
The path and filename to the L0 file to read.
|
|
62
61
|
"""
|
|
63
62
|
|
|
64
|
-
def __init__(self, packet_file:
|
|
63
|
+
def __init__(self, packet_file: str | Path) -> None:
|
|
65
64
|
"""
|
|
66
65
|
Read a L0 pkts file and perform all of the decom work.
|
|
67
66
|
|
|
@@ -250,7 +249,7 @@ def _read_waveform_bits(waveform_raw: str, high_sample: bool = True) -> list[int
|
|
|
250
249
|
|
|
251
250
|
|
|
252
251
|
def calculate_idex_epoch_time(
|
|
253
|
-
shcoarse_time:
|
|
252
|
+
shcoarse_time: float | np.ndarray, shfine_time: float | np.ndarray
|
|
254
253
|
) -> npt.NDArray[np.int64]:
|
|
255
254
|
"""
|
|
256
255
|
Calculate the epoch time from the FPGA header time variables.
|
imap_processing/idex/idex_l1b.py
CHANGED
|
@@ -16,7 +16,6 @@ Examples
|
|
|
16
16
|
|
|
17
17
|
import logging
|
|
18
18
|
from enum import Enum
|
|
19
|
-
from typing import Union
|
|
20
19
|
|
|
21
20
|
import pandas as pd
|
|
22
21
|
import xarray as xr
|
|
@@ -226,7 +225,7 @@ def convert_waveforms(
|
|
|
226
225
|
|
|
227
226
|
def get_trigger_mode_and_level(
|
|
228
227
|
l1a_dataset: xr.Dataset,
|
|
229
|
-
) ->
|
|
228
|
+
) -> dict[str, xr.DataArray] | dict:
|
|
230
229
|
"""
|
|
231
230
|
Determine the trigger mode and threshold level for each event.
|
|
232
231
|
|
|
@@ -249,7 +248,7 @@ def get_trigger_mode_and_level(
|
|
|
249
248
|
|
|
250
249
|
def compute_trigger_values(
|
|
251
250
|
trigger_mode: int, trigger_controls: int, gain_channel: str
|
|
252
|
-
) ->
|
|
251
|
+
) -> tuple[str, int | float] | tuple[None, None]:
|
|
253
252
|
"""
|
|
254
253
|
Compute the trigger mode label and threshold level.
|
|
255
254
|
|
imap_processing/idex/idex_l2a.py
CHANGED
|
@@ -24,7 +24,7 @@ import pandas as pd
|
|
|
24
24
|
import xarray as xr
|
|
25
25
|
from numpy.typing import NDArray
|
|
26
26
|
from scipy.integrate import quad
|
|
27
|
-
from scipy.optimize import curve_fit
|
|
27
|
+
from scipy.optimize import curve_fit, root_scalar
|
|
28
28
|
from scipy.signal import butter, detrend, filtfilt, find_peaks
|
|
29
29
|
from scipy.stats import exponnorm
|
|
30
30
|
|
|
@@ -52,7 +52,33 @@ class BaselineNoiseTime(IntEnum):
|
|
|
52
52
|
STOP = -5
|
|
53
53
|
|
|
54
54
|
|
|
55
|
-
def
|
|
55
|
+
def load_calibration_files(ancillary_files: dict) -> tuple[NDArray, NDArray]:
|
|
56
|
+
"""
|
|
57
|
+
Load calibration files for IDEX L2A processing.
|
|
58
|
+
|
|
59
|
+
Parameters
|
|
60
|
+
----------
|
|
61
|
+
ancillary_files : dict
|
|
62
|
+
Dictionary containing paths to calibration files.
|
|
63
|
+
|
|
64
|
+
Returns
|
|
65
|
+
-------
|
|
66
|
+
numpy.ndarray
|
|
67
|
+
Calibration parameters for the rise time function.
|
|
68
|
+
numpy.ndarray
|
|
69
|
+
Calibration parameters for the charge yield function.
|
|
70
|
+
"""
|
|
71
|
+
# Load calibration coefficients from ancillary files
|
|
72
|
+
t_rise_params = pd.read_csv(
|
|
73
|
+
ancillary_files["l2a-calibration-curve-yield-params"], skiprows=1, header=None
|
|
74
|
+
).values.flatten()[:8]
|
|
75
|
+
yield_params = pd.read_csv(
|
|
76
|
+
ancillary_files["l2a-calibration-curve-t-rise"], skiprows=1, header=None
|
|
77
|
+
).values.flatten()[:8]
|
|
78
|
+
return t_rise_params, yield_params
|
|
79
|
+
|
|
80
|
+
|
|
81
|
+
def idex_l2a(l1b_dataset: xr.Dataset, ancillary_files: dict) -> xr.Dataset:
|
|
56
82
|
"""
|
|
57
83
|
Will process IDEX l1b data to create l2a data products.
|
|
58
84
|
|
|
@@ -68,6 +94,9 @@ def idex_l2a(l1b_dataset: xr.Dataset) -> xr.Dataset:
|
|
|
68
94
|
----------
|
|
69
95
|
l1b_dataset : xarray.Dataset
|
|
70
96
|
IDEX L1a dataset to process.
|
|
97
|
+
ancillary_files : dict
|
|
98
|
+
Ancillary files containing calibration coefficients needed to estimate
|
|
99
|
+
velocity and mass of the dust particles.
|
|
71
100
|
|
|
72
101
|
Returns
|
|
73
102
|
-------
|
|
@@ -79,6 +108,7 @@ def idex_l2a(l1b_dataset: xr.Dataset) -> xr.Dataset:
|
|
|
79
108
|
logger.info(
|
|
80
109
|
f"Running IDEX L2A processing on dataset: {l1b_dataset.attrs['Logical_source']}"
|
|
81
110
|
)
|
|
111
|
+
t_rise_params, yield_params = load_calibration_files(ancillary_files)
|
|
82
112
|
|
|
83
113
|
tof_high = l1b_dataset["TOF_High"]
|
|
84
114
|
hs_time = l1b_dataset["time_high_sample_rate"]
|
|
@@ -176,11 +206,24 @@ def idex_l2a(l1b_dataset: xr.Dataset) -> xr.Dataset:
|
|
|
176
206
|
output_dtypes=[np.float64] * 6,
|
|
177
207
|
keep_attrs=True,
|
|
178
208
|
)
|
|
209
|
+
# Calculate mass and velocity estimates
|
|
210
|
+
velocity_mass_results = xr.apply_ufunc(
|
|
211
|
+
calculate_velocity_and_mass,
|
|
212
|
+
fit_results[1], # signal amplitude
|
|
213
|
+
fit_results[0].data[:, 3], # fit params
|
|
214
|
+
output_core_dims=[[], []],
|
|
215
|
+
vectorize=True,
|
|
216
|
+
output_dtypes=[np.float64, np.float64],
|
|
217
|
+
keep_attrs=True,
|
|
218
|
+
kwargs={"t_rise_params": t_rise_params, "yield_params": yield_params},
|
|
219
|
+
)
|
|
220
|
+
|
|
179
221
|
waveform_name = waveform.lower()
|
|
180
222
|
output_vars = {
|
|
181
223
|
f"{waveform_name}_fit_parameters": fit_results[0],
|
|
182
224
|
f"{waveform_name}_impact_charge": fit_results[1],
|
|
183
|
-
f"{waveform_name}
|
|
225
|
+
f"{waveform_name}_velocity_estimate": velocity_mass_results[0],
|
|
226
|
+
f"{waveform_name}_dust_mass_estimate": velocity_mass_results[1],
|
|
184
227
|
# Same as impact_charge for now
|
|
185
228
|
f"{waveform_name}_chi_squared": fit_results[2],
|
|
186
229
|
f"{waveform_name}_reduced_chi_squared": fit_results[3],
|
|
@@ -261,6 +304,89 @@ def idex_l2a(l1b_dataset: xr.Dataset) -> xr.Dataset:
|
|
|
261
304
|
return l2a_dataset
|
|
262
305
|
|
|
263
306
|
|
|
307
|
+
def calculate_velocity_and_mass(
|
|
308
|
+
sig_amp: float, t_rise: float, t_rise_params: np.ndarray, yield_params: np.ndarray
|
|
309
|
+
) -> tuple[float, float]:
|
|
310
|
+
"""
|
|
311
|
+
Calculate velocity and mass estimates.
|
|
312
|
+
|
|
313
|
+
The fitted target signals are used to generate IDEX’s specific charge yield as a
|
|
314
|
+
function of the impact speed. The calibration curve is fitted with a
|
|
315
|
+
segmented power law distribution. The charge yield curve enables the mass of
|
|
316
|
+
the dust particle to be estimated from the total charge it generates on the target.
|
|
317
|
+
|
|
318
|
+
Parameters
|
|
319
|
+
----------
|
|
320
|
+
sig_amp : float
|
|
321
|
+
Signal amplitude.
|
|
322
|
+
t_rise : float
|
|
323
|
+
T_rise fit parameter from the target fit.
|
|
324
|
+
t_rise_params : np.ndarray
|
|
325
|
+
Calibration parameters for rise time.
|
|
326
|
+
yield_params : np.ndarray
|
|
327
|
+
Calibration parameters for yield.
|
|
328
|
+
|
|
329
|
+
Returns
|
|
330
|
+
-------
|
|
331
|
+
v_est : float
|
|
332
|
+
Estimated velocity.
|
|
333
|
+
mass_est : float
|
|
334
|
+
Estimated mass.
|
|
335
|
+
"""
|
|
336
|
+
log_a_t: float = np.log10(t_rise_params[0])
|
|
337
|
+
try:
|
|
338
|
+
root = root_scalar(
|
|
339
|
+
lambda lv: log_smooth_powerlaw(lv, log_a_t, t_rise_params[1:])
|
|
340
|
+
- np.log10(t_rise),
|
|
341
|
+
bracket=[-1, 2],
|
|
342
|
+
)
|
|
343
|
+
v_est = 10**root.root
|
|
344
|
+
except Exception:
|
|
345
|
+
logger.error(
|
|
346
|
+
"Unable to calculate velocity and mass estimate. "
|
|
347
|
+
"The root finding failed for power law function. "
|
|
348
|
+
"Returning nans for the estimate."
|
|
349
|
+
)
|
|
350
|
+
return np.nan, np.nan
|
|
351
|
+
|
|
352
|
+
log_a_y: float = np.log10(yield_params[0])
|
|
353
|
+
yield_val = 10 ** log_smooth_powerlaw(np.log10(v_est), log_a_y, yield_params[1:])
|
|
354
|
+
mass_est = sig_amp / yield_val
|
|
355
|
+
|
|
356
|
+
return v_est, mass_est
|
|
357
|
+
|
|
358
|
+
|
|
359
|
+
def log_smooth_powerlaw(log_v: float, log_a: float, params: np.ndarray) -> float:
|
|
360
|
+
"""
|
|
361
|
+
Define a smoothly transitioning power law to fit the calibration curve to.
|
|
362
|
+
|
|
363
|
+
Parameters
|
|
364
|
+
----------
|
|
365
|
+
log_v : float
|
|
366
|
+
Velocity.
|
|
367
|
+
log_a : float
|
|
368
|
+
Scale factor.
|
|
369
|
+
params : np.ndarray
|
|
370
|
+
Calibration parameters for the power law.
|
|
371
|
+
|
|
372
|
+
Returns
|
|
373
|
+
-------
|
|
374
|
+
float
|
|
375
|
+
The value of the power law at the given velocity.
|
|
376
|
+
"""
|
|
377
|
+
# Unpack the rest of the calibration parameters
|
|
378
|
+
# a1, a2, and a3 are the power law exponents for the low, medium, and high-velocity
|
|
379
|
+
# segments.
|
|
380
|
+
# vb and vc are the characteristic speeds where the slope transition happens, and k
|
|
381
|
+
# setting the sharpness of the transitions.
|
|
382
|
+
a1, a2, a3, vb, vc, k, m = params
|
|
383
|
+
v = 10**log_v
|
|
384
|
+
base = log_a + a1 * log_v
|
|
385
|
+
transition1 = (1 + (v / vb) ** m) ** ((a2 - a1) / m)
|
|
386
|
+
transition2 = (1 + (v / vc) ** m) ** ((a3 - a2) / m)
|
|
387
|
+
return base + np.log10(transition1 * transition2)
|
|
388
|
+
|
|
389
|
+
|
|
264
390
|
def time_to_mass(
|
|
265
391
|
tof_high: np.ndarray, high_sampling_time: np.ndarray, masses: np.ndarray
|
|
266
392
|
) -> tuple[NDArray, NDArray, NDArray]:
|
|
@@ -399,7 +525,7 @@ def calculate_kappa(mass_scales: np.ndarray, peaks_2d: list) -> NDArray:
|
|
|
399
525
|
kappas = np.asarray(
|
|
400
526
|
[
|
|
401
527
|
np.mean(mass_scale[peaks] - np.round(mass_scale[peaks]))
|
|
402
|
-
for mass_scale, peaks in zip(mass_scales, peaks_2d)
|
|
528
|
+
for mass_scale, peaks in zip(mass_scales, peaks_2d, strict=False)
|
|
403
529
|
]
|
|
404
530
|
)
|
|
405
531
|
return kappas
|