imap-processing 0.19.4__py3-none-any.whl → 1.0.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of imap-processing might be problematic. Click here for more details.
- imap_processing/_version.py +2 -2
- imap_processing/cdf/config/imap_codice_global_cdf_attrs.yaml +13 -1
- imap_processing/cdf/config/imap_codice_l1a_variable_attrs.yaml +44 -44
- imap_processing/cdf/config/imap_codice_l1b_variable_attrs.yaml +127 -126
- imap_processing/cdf/config/imap_codice_l2-hi-omni_variable_attrs.yaml +635 -0
- imap_processing/cdf/config/imap_codice_l2-hi-sectored_variable_attrs.yaml +422 -0
- imap_processing/cdf/config/imap_constant_attrs.yaml +1 -1
- imap_processing/cdf/config/imap_enamaps_l2-common_variable_attrs.yaml +61 -55
- imap_processing/cdf/config/imap_enamaps_l2-healpix_variable_attrs.yaml +3 -2
- imap_processing/cdf/config/imap_enamaps_l2-rectangular_variable_attrs.yaml +17 -5
- imap_processing/cli.py +6 -11
- imap_processing/codice/codice_l1a.py +42 -21
- imap_processing/codice/codice_l2.py +640 -127
- imap_processing/codice/constants.py +224 -129
- imap_processing/ena_maps/ena_maps.py +124 -70
- imap_processing/ena_maps/utils/coordinates.py +5 -0
- imap_processing/ena_maps/utils/corrections.py +268 -0
- imap_processing/ena_maps/utils/map_utils.py +143 -42
- imap_processing/hi/hi_l2.py +10 -15
- imap_processing/ialirt/constants.py +7 -1
- imap_processing/ialirt/generate_coverage.py +1 -1
- imap_processing/ialirt/l0/ialirt_spice.py +1 -1
- imap_processing/ialirt/l0/parse_mag.py +33 -0
- imap_processing/ialirt/l0/process_codice.py +66 -0
- imap_processing/ialirt/utils/create_xarray.py +2 -0
- imap_processing/idex/idex_l2a.py +2 -2
- imap_processing/idex/idex_l2b.py +1 -1
- imap_processing/lo/l1c/lo_l1c.py +61 -3
- imap_processing/lo/l2/lo_l2.py +79 -11
- imap_processing/mag/l1a/mag_l1a.py +2 -2
- imap_processing/mag/l1a/mag_l1a_data.py +71 -13
- imap_processing/mag/l1c/interpolation_methods.py +34 -13
- imap_processing/mag/l1c/mag_l1c.py +117 -67
- imap_processing/mag/l1d/mag_l1d_data.py +3 -1
- imap_processing/spice/geometry.py +39 -28
- imap_processing/spice/pointing_frame.py +77 -50
- imap_processing/swapi/l1/swapi_l1.py +12 -4
- imap_processing/swe/utils/swe_constants.py +7 -7
- imap_processing/ultra/l1b/extendedspin.py +1 -1
- imap_processing/ultra/l1b/ultra_l1b_culling.py +2 -2
- imap_processing/ultra/l1b/ultra_l1b_extended.py +1 -1
- imap_processing/ultra/l1c/helio_pset.py +1 -1
- imap_processing/ultra/l1c/spacecraft_pset.py +2 -2
- imap_processing/ultra/l2/ultra_l2.py +3 -3
- imap_processing-1.0.1.dist-info/METADATA +121 -0
- {imap_processing-0.19.4.dist-info → imap_processing-1.0.1.dist-info}/RECORD +49 -47
- imap_processing-0.19.4.dist-info/METADATA +0 -120
- {imap_processing-0.19.4.dist-info → imap_processing-1.0.1.dist-info}/LICENSE +0 -0
- {imap_processing-0.19.4.dist-info → imap_processing-1.0.1.dist-info}/WHEEL +0 -0
- {imap_processing-0.19.4.dist-info → imap_processing-1.0.1.dist-info}/entry_points.txt +0 -0
|
@@ -10,6 +10,89 @@ from numpy.typing import NDArray
|
|
|
10
10
|
logger = logging.getLogger(__name__)
|
|
11
11
|
|
|
12
12
|
|
|
13
|
+
def vectorized_bincount(
|
|
14
|
+
indices: NDArray, weights: NDArray | None = None, minlength: int = 0
|
|
15
|
+
) -> NDArray:
|
|
16
|
+
"""
|
|
17
|
+
Vectorized version of np.bincount for multi-dimensional arrays.
|
|
18
|
+
|
|
19
|
+
This function applies np.bincount across multi-dimensional input arrays by
|
|
20
|
+
adding offsets to the indices and flattening, then reshaping the result.
|
|
21
|
+
This approach allows broadcasting between indices and weights.
|
|
22
|
+
|
|
23
|
+
Parameters
|
|
24
|
+
----------
|
|
25
|
+
indices : NDArray
|
|
26
|
+
Array of non-negative integers to be binned. Can be multi-dimensional.
|
|
27
|
+
If multi-dimensional, bincount is applied independently along each
|
|
28
|
+
leading dimension.
|
|
29
|
+
weights : NDArray, optional
|
|
30
|
+
Array of weights that is broadcastable with indices. If provided, each
|
|
31
|
+
weight is accumulated into its corresponding bin. If None (default),
|
|
32
|
+
each index contributes a count of 1.
|
|
33
|
+
minlength : int, optional
|
|
34
|
+
Minimum number of bins in the output array. Applied to each independent
|
|
35
|
+
bincount operation. Default is 0.
|
|
36
|
+
|
|
37
|
+
Returns
|
|
38
|
+
-------
|
|
39
|
+
NDArray
|
|
40
|
+
Array of binned values with the same leading dimensions as the input
|
|
41
|
+
arrays, and a final dimension of size minlength (or the maximum index + 1,
|
|
42
|
+
whichever is larger).
|
|
43
|
+
|
|
44
|
+
See Also
|
|
45
|
+
--------
|
|
46
|
+
numpy.bincount : The underlying function being vectorized.
|
|
47
|
+
|
|
48
|
+
Examples
|
|
49
|
+
--------
|
|
50
|
+
>>> indices = np.array([[0, 1, 1], [2, 2, 3]])
|
|
51
|
+
>>> vectorized_bincount(indices, minlength=4)
|
|
52
|
+
array([[1., 2., 0., 0.],
|
|
53
|
+
[0., 0., 2., 1.]])
|
|
54
|
+
"""
|
|
55
|
+
# Handle 1D case directly
|
|
56
|
+
if indices.ndim == 1 and (weights is None or weights.ndim == 1):
|
|
57
|
+
return np.bincount(indices, weights=weights, minlength=minlength)
|
|
58
|
+
|
|
59
|
+
# For multi-dimensional arrays, broadcast indices and weights
|
|
60
|
+
if weights is not None:
|
|
61
|
+
indices_bc, weights_bc = np.broadcast_arrays(indices, weights)
|
|
62
|
+
weights_flat = weights_bc.ravel()
|
|
63
|
+
else:
|
|
64
|
+
indices_bc = indices
|
|
65
|
+
weights_flat = None
|
|
66
|
+
|
|
67
|
+
# Get the shape for reshaping output
|
|
68
|
+
non_spatial_shape = indices_bc.shape[:-1]
|
|
69
|
+
n_binsets = np.prod(non_spatial_shape)
|
|
70
|
+
|
|
71
|
+
# Determine actual minlength if not specified
|
|
72
|
+
if minlength == 0:
|
|
73
|
+
minlength = int(np.max(indices_bc)) + 1
|
|
74
|
+
|
|
75
|
+
# We want to flatten the multi-dimensional bincount problem into a 1D problem.
|
|
76
|
+
# This can be done by offsetting the indices for each element of each additional
|
|
77
|
+
# dimension by an integer multiple of the number of bins. Doing so gives
|
|
78
|
+
# each element in the additional dimensions its own set of 1D bins: index 0
|
|
79
|
+
# uses bins [0, minlength), index 1 uses bins [minlength, 2*minlength), etc.
|
|
80
|
+
offsets = np.arange(n_binsets).reshape(*non_spatial_shape, 1) * minlength
|
|
81
|
+
indices_flat = (indices_bc + offsets).ravel()
|
|
82
|
+
|
|
83
|
+
# Single bincount call with flattened data
|
|
84
|
+
binned_flat = np.bincount(
|
|
85
|
+
indices_flat, weights=weights_flat, minlength=n_binsets * minlength
|
|
86
|
+
)
|
|
87
|
+
|
|
88
|
+
# Reshape to separate each sample's bins
|
|
89
|
+
binned_values = binned_flat.reshape(n_binsets, -1)[:, :minlength].reshape(
|
|
90
|
+
*non_spatial_shape, minlength
|
|
91
|
+
)
|
|
92
|
+
|
|
93
|
+
return binned_values
|
|
94
|
+
|
|
95
|
+
|
|
13
96
|
def bin_single_array_at_indices(
|
|
14
97
|
value_array: NDArray,
|
|
15
98
|
projection_grid_shape: tuple[int, ...],
|
|
@@ -25,7 +108,7 @@ def bin_single_array_at_indices(
|
|
|
25
108
|
Parameters
|
|
26
109
|
----------
|
|
27
110
|
value_array : NDArray
|
|
28
|
-
Array of values to bin. The final axis
|
|
111
|
+
Array of values to bin. The final axis is the one and only spatial axis.
|
|
29
112
|
If other axes are present, they will be binned independently
|
|
30
113
|
along the spatial axis.
|
|
31
114
|
projection_grid_shape : tuple[int, ...]
|
|
@@ -34,71 +117,89 @@ def bin_single_array_at_indices(
|
|
|
34
117
|
or just (number of bins,) if the grid is 1D.
|
|
35
118
|
projection_indices : NDArray
|
|
36
119
|
Ordered indices for projection grid, corresponding to indices in input grid.
|
|
37
|
-
1 dimensional.
|
|
120
|
+
Can be 1-dimensional or multi-dimensional. If multi-dimensional, must be
|
|
121
|
+
broadcastable with value_array. May contain non-unique indices, depending
|
|
122
|
+
on the projection method.
|
|
38
123
|
input_indices : NDArray
|
|
39
124
|
Ordered indices for input grid, corresponding to indices in projection grid.
|
|
40
125
|
1 dimensional. May be non-unique, depending on the projection method.
|
|
41
|
-
If None (default), an arange of the same length as the
|
|
42
|
-
|
|
126
|
+
If None (default), an numpy.arange of the same length as the final axis of
|
|
127
|
+
value_array is used.
|
|
43
128
|
input_valid_mask : NDArray, optional
|
|
44
129
|
Boolean mask array for valid values in input grid.
|
|
45
130
|
If None, all pixels are considered valid. Default is None.
|
|
131
|
+
Must be broadcastable with value_array and projection_indices.
|
|
46
132
|
|
|
47
133
|
Returns
|
|
48
134
|
-------
|
|
49
135
|
NDArray
|
|
50
|
-
Binned values on the projection grid.
|
|
136
|
+
Binned values on the projection grid. The output shape depends on the
|
|
137
|
+
input shapes after broadcasting:
|
|
138
|
+
- If value_array is 1D: returns 1D array of shape (num_projection_indices,)
|
|
139
|
+
- If value_array is multi-dimensional: returns array with shape
|
|
140
|
+
(*value_array.shape[:-1], num_projection_indices), where the leading
|
|
141
|
+
dimensions match value_array's non-spatial dimensions and the final
|
|
142
|
+
dimension contains the binned values for each projection grid position.
|
|
143
|
+
- If projection_indices is multi-dimensional and broadcasts with value_array,
|
|
144
|
+
the output shape will be (broadcasted_shape[:-1], num_projection_indices).
|
|
51
145
|
|
|
52
146
|
Raises
|
|
53
147
|
------
|
|
54
148
|
ValueError
|
|
55
|
-
If
|
|
56
|
-
|
|
57
|
-
NotImplementedError
|
|
58
|
-
If the input value_array has dimensionality less than 1.
|
|
149
|
+
If input_indices is not a 1D array, or if the arrays cannot be
|
|
150
|
+
broadcast together.
|
|
59
151
|
"""
|
|
152
|
+
# Set and check input_indices
|
|
60
153
|
if input_indices is None:
|
|
61
154
|
input_indices = np.arange(value_array.shape[-1])
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
# Both sets of indices must be 1D with the same number of elements
|
|
66
|
-
if input_indices.ndim != 1 or projection_indices.ndim != 1:
|
|
155
|
+
# input_indices must be 1D
|
|
156
|
+
if input_indices.ndim != 1:
|
|
67
157
|
raise ValueError(
|
|
68
|
-
"
|
|
158
|
+
"input_indices must be a 1D array. "
|
|
69
159
|
"If using a rectangular grid, the indices must be unwrapped."
|
|
70
160
|
)
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
|
|
161
|
+
|
|
162
|
+
# Verify projection_indices is broadcastable with value_array
|
|
163
|
+
try:
|
|
164
|
+
broadcasted_shape = np.broadcast_shapes(
|
|
165
|
+
projection_indices.shape, value_array.shape
|
|
76
166
|
)
|
|
167
|
+
except ValueError as e:
|
|
168
|
+
raise ValueError(
|
|
169
|
+
f"projection_indices shape {projection_indices.shape} must be "
|
|
170
|
+
f"broadcastable with value_array shape {value_array.shape}"
|
|
171
|
+
) from e
|
|
77
172
|
|
|
78
|
-
|
|
79
|
-
|
|
173
|
+
# Set and check input_valid_mask
|
|
174
|
+
if input_valid_mask is None:
|
|
175
|
+
input_valid_mask = np.ones(value_array.shape[-1], dtype=bool)
|
|
176
|
+
else:
|
|
177
|
+
input_valid_mask = np.asarray(input_valid_mask, dtype=bool)
|
|
178
|
+
# Verify input_valid_mask is broadcastable with value_array
|
|
179
|
+
try:
|
|
180
|
+
np.broadcast_shapes(input_valid_mask.shape, value_array.shape)
|
|
181
|
+
except ValueError as e:
|
|
182
|
+
raise ValueError(
|
|
183
|
+
f"input_valid_mask shape {input_valid_mask.shape} must be "
|
|
184
|
+
f"broadcastable with value_array shape {value_array.shape}"
|
|
185
|
+
) from e
|
|
80
186
|
|
|
81
|
-
|
|
187
|
+
# Broadcast input_valid_mask to match value_array shape if needed
|
|
188
|
+
input_valid_mask_bc = np.broadcast_to(input_valid_mask, broadcasted_shape)
|
|
189
|
+
|
|
190
|
+
# Select values at input_indices positions along the spatial axis
|
|
191
|
+
values = value_array[..., input_indices]
|
|
192
|
+
|
|
193
|
+
# Apply mask: set invalid values to 0
|
|
194
|
+
values_masked = np.where(input_valid_mask_bc, values, 0)
|
|
195
|
+
|
|
196
|
+
num_projection_indices = int(np.prod(projection_grid_shape))
|
|
197
|
+
|
|
198
|
+
# Use vectorized_bincount to handle arbitrary dimensions
|
|
199
|
+
binned_values = vectorized_bincount(
|
|
200
|
+
projection_indices, weights=values_masked, minlength=num_projection_indices
|
|
201
|
+
)
|
|
82
202
|
|
|
83
|
-
# Only valid values are summed into bins.
|
|
84
|
-
if value_array.ndim == 1:
|
|
85
|
-
values = value_array[input_indices]
|
|
86
|
-
binned_values = np.bincount(
|
|
87
|
-
projection_indices[mask_idx],
|
|
88
|
-
weights=values[mask_idx],
|
|
89
|
-
minlength=num_projection_indices,
|
|
90
|
-
)
|
|
91
|
-
elif value_array.ndim >= 2:
|
|
92
|
-
# Apply bincount to each row independently
|
|
93
|
-
binned_values = np.apply_along_axis(
|
|
94
|
-
lambda x: np.bincount(
|
|
95
|
-
projection_indices[mask_idx],
|
|
96
|
-
weights=x[..., input_indices][mask_idx],
|
|
97
|
-
minlength=num_projection_indices,
|
|
98
|
-
),
|
|
99
|
-
axis=-1,
|
|
100
|
-
arr=value_array,
|
|
101
|
-
)
|
|
102
203
|
return binned_values
|
|
103
204
|
|
|
104
205
|
|
imap_processing/hi/hi_l2.py
CHANGED
|
@@ -229,7 +229,7 @@ def calculate_ena_intensity(
|
|
|
229
229
|
Returns
|
|
230
230
|
-------
|
|
231
231
|
map_ds : xarray.Dataset
|
|
232
|
-
Map dataset with new variables: ena_intensity,
|
|
232
|
+
Map dataset with new variables: ena_intensity, ena_intensity_stat_uncert,
|
|
233
233
|
ena_intensity_sys_err.
|
|
234
234
|
"""
|
|
235
235
|
# read calibration product configuration file
|
|
@@ -248,7 +248,7 @@ def calculate_ena_intensity(
|
|
|
248
248
|
# Convert ENA Signal Rate to Flux
|
|
249
249
|
flux_conversion_divisor = geometric_factor * esa_energy
|
|
250
250
|
map_ds["ena_intensity"] = map_ds["ena_signal_rates"] / flux_conversion_divisor
|
|
251
|
-
map_ds["
|
|
251
|
+
map_ds["ena_intensity_stat_uncert"] = (
|
|
252
252
|
map_ds["ena_signal_rate_stat_unc"] / flux_conversion_divisor
|
|
253
253
|
)
|
|
254
254
|
map_ds["ena_intensity_sys_err"] = map_ds["bg_rates_unc"] / flux_conversion_divisor
|
|
@@ -268,12 +268,12 @@ def calculate_ena_intensity(
|
|
|
268
268
|
# dimension by passing the zeroth element.
|
|
269
269
|
corrected_intensity, corrected_stat_unc = corrector.apply_flux_correction(
|
|
270
270
|
map_ds["ena_intensity"].values[0],
|
|
271
|
-
map_ds["
|
|
271
|
+
map_ds["ena_intensity_stat_uncert"].values[0],
|
|
272
272
|
esa_energy.data,
|
|
273
273
|
)
|
|
274
274
|
# Add the size 1 epoch dimension back in to the corrected fluxes.
|
|
275
275
|
map_ds["ena_intensity"].data = corrected_intensity[np.newaxis, ...]
|
|
276
|
-
map_ds["
|
|
276
|
+
map_ds["ena_intensity_stat_uncert"].data = corrected_stat_unc[np.newaxis, ...]
|
|
277
277
|
|
|
278
278
|
return map_ds
|
|
279
279
|
|
|
@@ -302,7 +302,7 @@ def combine_calibration_products(
|
|
|
302
302
|
Returns
|
|
303
303
|
-------
|
|
304
304
|
map_ds : xarray.Dataset
|
|
305
|
-
Map dataset with updated variables: ena_intensity,
|
|
305
|
+
Map dataset with updated variables: ena_intensity, ena_intensity_stat_uncert,
|
|
306
306
|
ena_intensity_sys_err now combined across calibration products at each
|
|
307
307
|
energy level.
|
|
308
308
|
"""
|
|
@@ -323,20 +323,15 @@ def combine_calibration_products(
|
|
|
323
323
|
# Perform inverse-variance weighted averaging
|
|
324
324
|
# Handle divide by zero and invalid values
|
|
325
325
|
with np.errstate(divide="ignore", invalid="ignore"):
|
|
326
|
-
# Calculate weights for statistical variance combination using only
|
|
327
|
-
# statistical variance
|
|
328
|
-
stat_weights = 1.0 / improved_stat_variance
|
|
329
|
-
|
|
330
|
-
# Combined statistical uncertainty from inverse-variance formula
|
|
331
|
-
combined_stat_unc = np.sqrt(1.0 / stat_weights.sum(dim="calibration_prod"))
|
|
332
|
-
|
|
333
326
|
# Use total variance weights for flux combination
|
|
334
327
|
flux_weights = 1.0 / total_variance
|
|
335
328
|
weighted_flux_sum = (ena_flux * flux_weights).sum(dim="calibration_prod")
|
|
336
329
|
combined_flux = weighted_flux_sum / flux_weights.sum(dim="calibration_prod")
|
|
337
330
|
|
|
338
331
|
map_ds["ena_intensity"] = combined_flux
|
|
339
|
-
map_ds["
|
|
332
|
+
map_ds["ena_intensity_stat_uncert"] = np.sqrt(
|
|
333
|
+
(map_ds["ena_intensity_stat_uncert"] ** 2).sum(dim="calibration_prod")
|
|
334
|
+
)
|
|
340
335
|
# For systematic error, just do quadrature sum over the systematic error for
|
|
341
336
|
# each calibration product.
|
|
342
337
|
map_ds["ena_intensity_sys_err"] = np.sqrt((sys_err**2).sum(dim="calibration_prod"))
|
|
@@ -377,7 +372,7 @@ def _calculate_improved_stat_variance(
|
|
|
377
372
|
|
|
378
373
|
if n_calib_prods <= 1:
|
|
379
374
|
# No improvement possible with single calibration product
|
|
380
|
-
return map_ds["
|
|
375
|
+
return map_ds["ena_intensity_stat_uncert"] ** 2
|
|
381
376
|
|
|
382
377
|
logger.debug("Computing geometric factor normalized signal rates")
|
|
383
378
|
|
|
@@ -417,7 +412,7 @@ def _calculate_improved_stat_variance(
|
|
|
417
412
|
# Handle invalid cases by falling back to original uncertainties
|
|
418
413
|
improved_variance = xr.where(
|
|
419
414
|
~np.isfinite(improved_variance) | (geometric_factors == 0),
|
|
420
|
-
map_ds["
|
|
415
|
+
map_ds["ena_intensity_stat_uncert"],
|
|
421
416
|
improved_variance,
|
|
422
417
|
)
|
|
423
418
|
|
|
@@ -65,5 +65,11 @@ STATIONS = {
|
|
|
65
65
|
latitude=54.2632, # degrees North
|
|
66
66
|
altitude=0.1, # approx 100 meters
|
|
67
67
|
min_elevation_deg=5, # 5 degrees is the requirement
|
|
68
|
-
)
|
|
68
|
+
),
|
|
69
|
+
"Manaus": StationProperties(
|
|
70
|
+
longitude=-59.969334, # degrees East (negative = West)
|
|
71
|
+
latitude=-2.891257, # degrees North (negative = South)
|
|
72
|
+
altitude=0.1, # approx 100 meters
|
|
73
|
+
min_elevation_deg=5, # 5 degrees is the requirement
|
|
74
|
+
),
|
|
69
75
|
}
|
|
@@ -77,7 +77,7 @@ def generate_coverage(
|
|
|
77
77
|
dsn_outage_mask |= (time_range >= start_et) & (time_range <= end_et)
|
|
78
78
|
|
|
79
79
|
for station_name, (lon, lat, alt, min_elevation) in stations.items():
|
|
80
|
-
|
|
80
|
+
_azimuth, elevation = calculate_azimuth_and_elevation(lon, lat, alt, time_range)
|
|
81
81
|
visible = elevation > min_elevation
|
|
82
82
|
|
|
83
83
|
outage_mask = np.zeros(time_range.shape, dtype=bool)
|
|
@@ -177,7 +177,7 @@ def transform_instrument_vectors_to_inertial(
|
|
|
177
177
|
)
|
|
178
178
|
|
|
179
179
|
# Get static mount matrix
|
|
180
|
-
mount_matrix = spice.pxform(instrument_frame.name, spacecraft_frame.name, 0.0)
|
|
180
|
+
mount_matrix = spice.pxform(instrument_frame.name, spacecraft_frame.name, 0.0).T
|
|
181
181
|
|
|
182
182
|
# Compute total rotations
|
|
183
183
|
total_rotations = compute_total_rotation(
|
|
@@ -719,6 +719,39 @@ def process_packet(
|
|
|
719
719
|
"mag_theta_B_GSM": Decimal(str(theta_gsm[i])),
|
|
720
720
|
"mag_phi_B_GSE": Decimal(str(phi_gse[i])),
|
|
721
721
|
"mag_theta_B_GSE": Decimal(str(theta_gse[i])),
|
|
722
|
+
"mag_hk_status": {
|
|
723
|
+
"hk1v5_warn": bool(status_data["hk1v5_warn"]),
|
|
724
|
+
"hk1v5_danger": bool(status_data["hk1v5_danger"]),
|
|
725
|
+
"hk1v5c_warn": bool(status_data["hk1v5c_warn"]),
|
|
726
|
+
"hk1v5c_danger": bool(status_data["hk1v5c_danger"]),
|
|
727
|
+
"hk1v8_warn": bool(status_data["hk1v8_warn"]),
|
|
728
|
+
"hk1v8_danger": bool(status_data["hk1v8_danger"]),
|
|
729
|
+
"hk1v8c_warn": bool(status_data["hk1v8c_warn"]),
|
|
730
|
+
"hk1v8c_danger": bool(status_data["hk1v8c_danger"]),
|
|
731
|
+
"fob_saturated": bool(status_data["fob_saturated"]),
|
|
732
|
+
"fib_saturated": bool(status_data["fib_saturated"]),
|
|
733
|
+
"mode": int(status_data["mode"]),
|
|
734
|
+
"icu_temp": int(status_data["icu_temp"]),
|
|
735
|
+
"hk2v5_warn": bool(status_data["hk2v5_warn"]),
|
|
736
|
+
"hk2v5_danger": bool(status_data["hk2v5_danger"]),
|
|
737
|
+
"hk2v5c_warn": bool(status_data["hk2v5c_warn"]),
|
|
738
|
+
"hk2v5c_danger": bool(status_data["hk2v5c_danger"]),
|
|
739
|
+
"hk3v3": int(status_data["hk3v3"]),
|
|
740
|
+
"hk3v3_current": int(status_data["hk3v3_current"]),
|
|
741
|
+
"pri_isvalid": bool(status_data["pri_isvalid"]),
|
|
742
|
+
"hkp8v5_warn": bool(status_data["hkp8v5_warn"]),
|
|
743
|
+
"hkp8v5_danger": bool(status_data["hkp8v5_danger"]),
|
|
744
|
+
"hkp8v5c_warn": bool(status_data["hkp8v5c_warn"]),
|
|
745
|
+
"hkp8v5c_danger": bool(status_data["hkp8v5c_danger"]),
|
|
746
|
+
"hkn8v5": int(status_data["hkn8v5"]),
|
|
747
|
+
"hkn8v5_current": int(status_data["hkn8v5_current"]),
|
|
748
|
+
"fob_temp": int(status_data["fob_temp"]),
|
|
749
|
+
"fib_temp": int(status_data["fib_temp"]),
|
|
750
|
+
"fob_range": int(status_data["fob_range"]),
|
|
751
|
+
"fib_range": int(status_data["fib_range"]),
|
|
752
|
+
"multbit_errs": bool(status_data["multbit_errs"]),
|
|
753
|
+
"sec_isvalid": bool(status_data["sec_isvalid"]),
|
|
754
|
+
},
|
|
722
755
|
}
|
|
723
756
|
)
|
|
724
757
|
|
|
@@ -4,11 +4,56 @@ import logging
|
|
|
4
4
|
from decimal import Decimal
|
|
5
5
|
from typing import Any
|
|
6
6
|
|
|
7
|
+
import numpy as np
|
|
7
8
|
import xarray as xr
|
|
8
9
|
|
|
10
|
+
from imap_processing.codice import decompress
|
|
11
|
+
from imap_processing.ialirt.utils.grouping import find_groups
|
|
12
|
+
|
|
9
13
|
logger = logging.getLogger(__name__)
|
|
10
14
|
|
|
15
|
+
FILLVAL_UINT8 = 255
|
|
11
16
|
FILLVAL_FLOAT32 = Decimal(str(-1.0e31))
|
|
17
|
+
COD_LO_COUNTER = 232
|
|
18
|
+
COD_HI_COUNTER = 197
|
|
19
|
+
COD_LO_RANGE = range(0, 15)
|
|
20
|
+
COD_HI_RANGE = range(0, 5)
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
def concatenate_bytes(grouped_data: xr.Dataset, group: int, sensor: str) -> bytearray:
|
|
24
|
+
"""
|
|
25
|
+
Concatenate all data fields for a specific group into a single bytearray.
|
|
26
|
+
|
|
27
|
+
Parameters
|
|
28
|
+
----------
|
|
29
|
+
grouped_data : xr.Dataset
|
|
30
|
+
The grouped CoDICE dataset containing cod_{sensor}_data_XX variables.
|
|
31
|
+
group : int
|
|
32
|
+
The group number to extract.
|
|
33
|
+
sensor : str
|
|
34
|
+
The sensor type, either 'lo' or 'hi'.
|
|
35
|
+
|
|
36
|
+
Returns
|
|
37
|
+
-------
|
|
38
|
+
current_data_stream: bytearray
|
|
39
|
+
The concatenated data stream for the selected group.
|
|
40
|
+
"""
|
|
41
|
+
current_data_stream = bytearray()
|
|
42
|
+
group_mask = (grouped_data["group"] == group).values
|
|
43
|
+
|
|
44
|
+
cod_ranges = {
|
|
45
|
+
"lo": COD_LO_RANGE,
|
|
46
|
+
"hi": COD_HI_RANGE,
|
|
47
|
+
}
|
|
48
|
+
|
|
49
|
+
# Loop through all data fields.
|
|
50
|
+
for field in cod_ranges[sensor]:
|
|
51
|
+
data_array = grouped_data[f"cod_{sensor}_data_{field:02}"].values[group_mask]
|
|
52
|
+
|
|
53
|
+
# Convert each value to uint8 and extend the byte stream
|
|
54
|
+
current_data_stream.extend(np.uint8(data_array).tobytes())
|
|
55
|
+
|
|
56
|
+
return current_data_stream
|
|
12
57
|
|
|
13
58
|
|
|
14
59
|
def process_codice(
|
|
@@ -35,6 +80,27 @@ def process_codice(
|
|
|
35
80
|
- Calculate L2 CoDICE pseudodensities (pg 37 of Algorithm Document)
|
|
36
81
|
- Calculate the public data products
|
|
37
82
|
"""
|
|
83
|
+
grouped_cod_lo_data = find_groups(
|
|
84
|
+
dataset, (0, COD_LO_COUNTER), "cod_lo_counter", "cod_lo_acq"
|
|
85
|
+
)
|
|
86
|
+
grouped_cod_hi_data = find_groups(
|
|
87
|
+
dataset, (0, COD_HI_COUNTER), "cod_hi_counter", "cod_hi_acq"
|
|
88
|
+
)
|
|
89
|
+
unique_cod_lo_groups = np.unique(grouped_cod_lo_data["group"])
|
|
90
|
+
unique_cod_hi_groups = np.unique(grouped_cod_hi_data["group"])
|
|
91
|
+
|
|
92
|
+
for group in unique_cod_lo_groups:
|
|
93
|
+
cod_lo_data_stream = concatenate_bytes(grouped_cod_lo_data, group, "lo")
|
|
94
|
+
|
|
95
|
+
# Decompress binary stream
|
|
96
|
+
decompressed_data = decompress._apply_pack_24_bit(bytes(cod_lo_data_stream))
|
|
97
|
+
|
|
98
|
+
for group in unique_cod_hi_groups:
|
|
99
|
+
cod_hi_data_stream = concatenate_bytes(grouped_cod_hi_data, group, "lo")
|
|
100
|
+
|
|
101
|
+
# Decompress binary stream
|
|
102
|
+
decompressed_data = decompress._apply_lossy_a(bytes(cod_hi_data_stream)) # noqa
|
|
103
|
+
|
|
38
104
|
# For I-ALiRT SIT, the test data being used has all zeros and thus no
|
|
39
105
|
# groups can be found, thus there is no data to process
|
|
40
106
|
# TODO: Once I-ALiRT test data is acquired that actually has data in it,
|
|
@@ -154,6 +154,8 @@ def create_xarray_from_records(records: list[dict]) -> xr.Dataset: # noqa: PLR0
|
|
|
154
154
|
"sc_position_GSE",
|
|
155
155
|
"sc_velocity_GSM",
|
|
156
156
|
"sc_velocity_GSE",
|
|
157
|
+
"mag_hk_status",
|
|
158
|
+
"spice_kernels",
|
|
157
159
|
]:
|
|
158
160
|
continue
|
|
159
161
|
elif key in ["mag_B_GSE", "mag_B_GSM", "mag_B_RTN"]:
|
imap_processing/idex/idex_l2a.py
CHANGED
|
@@ -118,7 +118,7 @@ def idex_l2a(l1b_dataset: xr.Dataset, ancillary_files: dict) -> xr.Dataset:
|
|
|
118
118
|
atomic_masses_path = f"{imap_module_directory}/idex/atomic_masses.csv"
|
|
119
119
|
atomic_masses = pd.read_csv(atomic_masses_path)
|
|
120
120
|
masses = atomic_masses["Mass"]
|
|
121
|
-
|
|
121
|
+
_stretches, _shifts, mass_scales = time_to_mass(tof_high.data, hs_time.data, masses)
|
|
122
122
|
|
|
123
123
|
# TODO use correct fillval
|
|
124
124
|
mass_scales_da = xr.DataArray(
|
|
@@ -379,7 +379,7 @@ def log_smooth_powerlaw(log_v: float, log_a: float, params: np.ndarray) -> float
|
|
|
379
379
|
# segments.
|
|
380
380
|
# vb and vc are the characteristic speeds where the slope transition happens, and k
|
|
381
381
|
# setting the sharpness of the transitions.
|
|
382
|
-
a1, a2, a3, vb, vc,
|
|
382
|
+
a1, a2, a3, vb, vc, _k, m = params
|
|
383
383
|
v = 10**log_v
|
|
384
384
|
base = log_a + a1 * log_v
|
|
385
385
|
transition1 = (1 + (v / vb) ** m) ** ((a2 - a1) / m)
|
imap_processing/idex/idex_l2b.py
CHANGED
|
@@ -645,7 +645,7 @@ def get_science_acquisition_on_percentage(evt_dataset: xr.Dataset) -> dict:
|
|
|
645
645
|
of year.
|
|
646
646
|
"""
|
|
647
647
|
# Get science acquisition start and stop times
|
|
648
|
-
|
|
648
|
+
_evt_logs, evt_time, evt_values = get_science_acquisition_timestamps(evt_dataset)
|
|
649
649
|
if len(evt_time) == 0:
|
|
650
650
|
logger.warning(
|
|
651
651
|
"No science acquisition events found in event dataset. Returning empty "
|
imap_processing/lo/l1c/lo_l1c.py
CHANGED
|
@@ -11,9 +11,14 @@ from scipy.stats import binned_statistic_dd
|
|
|
11
11
|
from imap_processing.cdf.imap_cdf_manager import ImapCdfAttributes
|
|
12
12
|
from imap_processing.lo import lo_ancillary
|
|
13
13
|
from imap_processing.lo.l1b.lo_l1b import set_bad_or_goodtimes
|
|
14
|
+
from imap_processing.spice.geometry import SpiceFrame, frame_transform_az_el
|
|
14
15
|
from imap_processing.spice.repoint import get_pointing_times
|
|
15
16
|
from imap_processing.spice.spin import get_spin_number
|
|
16
|
-
from imap_processing.spice.time import
|
|
17
|
+
from imap_processing.spice.time import (
|
|
18
|
+
met_to_ttj2000ns,
|
|
19
|
+
ttj2000ns_to_et,
|
|
20
|
+
ttj2000ns_to_met,
|
|
21
|
+
)
|
|
17
22
|
|
|
18
23
|
N_ESA_ENERGY_STEPS = 7
|
|
19
24
|
N_SPIN_ANGLE_BINS = 3600
|
|
@@ -164,6 +169,10 @@ def lo_l1c(sci_dependencies: dict, anc_dependencies: list) -> list[xr.Dataset]:
|
|
|
164
169
|
attr_mgr,
|
|
165
170
|
)
|
|
166
171
|
|
|
172
|
+
pset["hae_longitude"], pset["hae_latitude"] = set_pointing_directions(
|
|
173
|
+
pset["epoch"].item()
|
|
174
|
+
)
|
|
175
|
+
|
|
167
176
|
pset.attrs = attr_mgr.get_global_attributes(logical_source)
|
|
168
177
|
|
|
169
178
|
pset = pset.assign_coords(
|
|
@@ -295,7 +304,7 @@ def create_pset_counts(
|
|
|
295
304
|
lat_edges = np.arange(41)
|
|
296
305
|
energy_edges = np.arange(8)
|
|
297
306
|
|
|
298
|
-
hist,
|
|
307
|
+
hist, _edges = np.histogramdd(
|
|
299
308
|
data,
|
|
300
309
|
bins=[energy_edges, lon_edges, lat_edges],
|
|
301
310
|
)
|
|
@@ -572,7 +581,7 @@ def set_background_rates(
|
|
|
572
581
|
if row["type"] == "rate":
|
|
573
582
|
bg_rates[esa_step, bin_start:bin_end, :] = value
|
|
574
583
|
elif row["type"] == "sigma":
|
|
575
|
-
|
|
584
|
+
bg_sys_err[esa_step, bin_start:bin_end, :] = value
|
|
576
585
|
else:
|
|
577
586
|
raise ValueError("Unknown background type in ancillary file.")
|
|
578
587
|
# set the background rates, uncertainties, and systematic errors
|
|
@@ -597,3 +606,52 @@ def set_background_rates(
|
|
|
597
606
|
)
|
|
598
607
|
|
|
599
608
|
return bg_rates_data, bg_stat_uncert_data, bg_sys_err_data
|
|
609
|
+
|
|
610
|
+
|
|
611
|
+
def set_pointing_directions(epoch: float) -> tuple[xr.DataArray, xr.DataArray]:
|
|
612
|
+
"""
|
|
613
|
+
Set the pointing directions for the given epoch.
|
|
614
|
+
|
|
615
|
+
The pointing directions are calculated by transforming Spin and off angles
|
|
616
|
+
to HAE longitude and latitude using SPICE. This returns the HAE longitude and
|
|
617
|
+
latitude as (3600, 40) arrays for each the latitude and longitude.
|
|
618
|
+
|
|
619
|
+
Parameters
|
|
620
|
+
----------
|
|
621
|
+
epoch : float
|
|
622
|
+
The epoch time in TTJ2000ns.
|
|
623
|
+
|
|
624
|
+
Returns
|
|
625
|
+
-------
|
|
626
|
+
hae_longitude : xr.DataArray
|
|
627
|
+
The HAE longitude for each spin and off angle bin.
|
|
628
|
+
hae_latitude : xr.DataArray
|
|
629
|
+
The HAE latitude for each spin and off angle bin.
|
|
630
|
+
"""
|
|
631
|
+
et = ttj2000ns_to_et(epoch)
|
|
632
|
+
# create a meshgrid of spin and off angles using the bin centers
|
|
633
|
+
spin, off = np.meshgrid(
|
|
634
|
+
SPIN_ANGLE_BIN_CENTERS, OFF_ANGLE_BIN_CENTERS, indexing="ij"
|
|
635
|
+
)
|
|
636
|
+
dps_az_el = np.stack([spin, off], axis=-1)
|
|
637
|
+
|
|
638
|
+
# Transform from DPS Az/El to HAE lon/lat
|
|
639
|
+
hae_az_el = frame_transform_az_el(
|
|
640
|
+
et, dps_az_el, SpiceFrame.IMAP_DPS, SpiceFrame.IMAP_HAE, degrees=True
|
|
641
|
+
)
|
|
642
|
+
|
|
643
|
+
return xr.DataArray(
|
|
644
|
+
data=hae_az_el[:, :, 0].astype(np.float64),
|
|
645
|
+
dims=["spin_angle", "off_angle"],
|
|
646
|
+
# TODO: Add hae_longitude to yaml
|
|
647
|
+
# attrs=attr_mgr.get_variable_attributes(
|
|
648
|
+
# "hae_longitude"
|
|
649
|
+
# )
|
|
650
|
+
), xr.DataArray(
|
|
651
|
+
data=hae_az_el[:, :, 1].astype(np.float64),
|
|
652
|
+
dims=["spin_angle", "off_angle"],
|
|
653
|
+
# TODO: Add hae_longitude to yaml
|
|
654
|
+
# attrs=attr_mgr.get_variable_attributes(
|
|
655
|
+
# "hae_latitude"
|
|
656
|
+
# )
|
|
657
|
+
)
|