imap-processing 1.0.0__py3-none-any.whl → 1.0.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of imap-processing might be problematic. Click here for more details.

Files changed (43) hide show
  1. imap_processing/_version.py +2 -2
  2. imap_processing/cdf/config/imap_codice_global_cdf_attrs.yaml +13 -1
  3. imap_processing/cdf/config/imap_codice_l2-hi-omni_variable_attrs.yaml +635 -0
  4. imap_processing/cdf/config/imap_codice_l2-hi-sectored_variable_attrs.yaml +422 -0
  5. imap_processing/cdf/config/imap_enamaps_l2-common_variable_attrs.yaml +28 -21
  6. imap_processing/cdf/config/imap_enamaps_l2-healpix_variable_attrs.yaml +2 -0
  7. imap_processing/cdf/config/imap_enamaps_l2-rectangular_variable_attrs.yaml +12 -2
  8. imap_processing/cli.py +6 -11
  9. imap_processing/codice/codice_l2.py +640 -127
  10. imap_processing/codice/constants.py +61 -0
  11. imap_processing/ena_maps/ena_maps.py +111 -60
  12. imap_processing/ena_maps/utils/coordinates.py +5 -0
  13. imap_processing/ena_maps/utils/corrections.py +268 -0
  14. imap_processing/ena_maps/utils/map_utils.py +143 -42
  15. imap_processing/hi/hi_l2.py +3 -8
  16. imap_processing/ialirt/constants.py +7 -1
  17. imap_processing/ialirt/generate_coverage.py +1 -1
  18. imap_processing/ialirt/l0/process_codice.py +66 -0
  19. imap_processing/ialirt/utils/create_xarray.py +1 -0
  20. imap_processing/idex/idex_l2a.py +2 -2
  21. imap_processing/idex/idex_l2b.py +1 -1
  22. imap_processing/lo/l1c/lo_l1c.py +61 -3
  23. imap_processing/lo/l2/lo_l2.py +79 -11
  24. imap_processing/mag/l1a/mag_l1a.py +2 -2
  25. imap_processing/mag/l1a/mag_l1a_data.py +71 -13
  26. imap_processing/mag/l1c/interpolation_methods.py +34 -13
  27. imap_processing/mag/l1c/mag_l1c.py +117 -67
  28. imap_processing/mag/l1d/mag_l1d_data.py +3 -1
  29. imap_processing/spice/geometry.py +11 -9
  30. imap_processing/spice/pointing_frame.py +77 -50
  31. imap_processing/swapi/l1/swapi_l1.py +12 -4
  32. imap_processing/swe/utils/swe_constants.py +7 -7
  33. imap_processing/ultra/l1b/extendedspin.py +1 -1
  34. imap_processing/ultra/l1b/ultra_l1b_culling.py +2 -2
  35. imap_processing/ultra/l1b/ultra_l1b_extended.py +1 -1
  36. imap_processing/ultra/l1c/helio_pset.py +1 -1
  37. imap_processing/ultra/l1c/spacecraft_pset.py +2 -2
  38. imap_processing-1.0.1.dist-info/METADATA +121 -0
  39. {imap_processing-1.0.0.dist-info → imap_processing-1.0.1.dist-info}/RECORD +42 -40
  40. imap_processing-1.0.0.dist-info/METADATA +0 -120
  41. {imap_processing-1.0.0.dist-info → imap_processing-1.0.1.dist-info}/LICENSE +0 -0
  42. {imap_processing-1.0.0.dist-info → imap_processing-1.0.1.dist-info}/WHEEL +0 -0
  43. {imap_processing-1.0.0.dist-info → imap_processing-1.0.1.dist-info}/entry_points.txt +0 -0
@@ -4,7 +4,23 @@ from pathlib import Path
4
4
 
5
5
  import numpy as np
6
6
  import pandas as pd
7
+ import xarray as xr
7
8
  from numpy.polynomial import Polynomial
9
+ from scipy.constants import electron_volt, erg, proton_mass
10
+
11
+ from imap_processing.ena_maps.ena_maps import LoHiBasePointingSet
12
+ from imap_processing.ena_maps.utils.coordinates import CoordNames
13
+ from imap_processing.spice import geometry
14
+ from imap_processing.spice.time import ttj2000ns_to_et
15
+
16
+ # Physical constants for Compton-Getting correction
17
+ # Units: electron_volt = [J / eV]
18
+ # erg = [J / erg]
19
+ # To get [erg / eV], => electron_volt [J / eV] / erg [J / erg] = erg_per_ev [erg / eV]
20
+ ERG_PER_EV = electron_volt / erg # erg per eV - unit conversion factor
21
+ # Units: proton_mass = [kg]
22
+ # Here, we convert proton_mass to grams
23
+ PROTON_MASS_GRAMS = proton_mass * 1e3 # proton mass in grams
8
24
 
9
25
 
10
26
  class PowerLawFluxCorrector:
@@ -289,3 +305,255 @@ class PowerLawFluxCorrector:
289
305
  )
290
306
 
291
307
  return corrected_flux, corrected_flux_stat_unc
308
+
309
+
310
+ def _add_spacecraft_velocity_to_pset(pset: LoHiBasePointingSet) -> None:
311
+ """
312
+ Calculate and add spacecraft velocity data to pointing set.
313
+
314
+ Parameters
315
+ ----------
316
+ pset : LoHiBasePointingSet
317
+ Pointing set object to be updated.
318
+
319
+ Notes
320
+ -----
321
+ Adds the following DataArrays to pset.data:
322
+ - "sc_velocity": Spacecraft velocity vector (km/s) with dims ["x_y_z"]
323
+ - "sc_direction_vector": Spacecraft velocity unit vector with dims ["x_y_z"]
324
+ """
325
+ # Compute ephemeris time (J2000 seconds) of PSET midpoint time
326
+ # TODO: Use the Pointing midpoint time. Epoch should be start time
327
+ # but use it until we can make Lo and Hi PSETs have a consistent
328
+ # variable to hold the midpoint time.
329
+ et = ttj2000ns_to_et(pset.data["epoch"].values[0])
330
+ # Get spacecraft state in HAE frame
331
+ sc_state = geometry.imap_state(et, ref_frame=geometry.SpiceFrame.IMAP_HAE)
332
+ sc_velocity_vector = sc_state[3:6]
333
+
334
+ # Store spacecraft velocity as DataArray
335
+ pset.data["sc_velocity"] = xr.DataArray(
336
+ sc_velocity_vector, dims=[CoordNames.CARTESIAN_VECTOR.value]
337
+ )
338
+
339
+ # Calculate spacecraft speed and direction
340
+ sc_velocity_km_per_sec = np.linalg.norm(
341
+ pset.data["sc_velocity"], axis=-1, keepdims=True
342
+ )
343
+ pset.data["sc_direction_vector"] = pset.data["sc_velocity"] / sc_velocity_km_per_sec
344
+
345
+
346
+ def _add_cartesian_look_direction(pset: LoHiBasePointingSet) -> None:
347
+ """
348
+ Calculate and add look direction vectors to pointing set.
349
+
350
+ Parameters
351
+ ----------
352
+ pset : LoHiBasePointingSet
353
+ Pointing set object to be updated.
354
+
355
+ Notes
356
+ -----
357
+ Adds the following DataArray to pset.data:
358
+ - "look_direction": Cartesian unit vectors with dims [...spatial_dims, "x_y_z"]
359
+ """
360
+ longitudes = pset.data["hae_longitude"]
361
+ latitudes = pset.data["hae_latitude"]
362
+
363
+ # Stack spherical coordinates (r=1 for unit vectors, azimuth, elevation)
364
+ spherical_coords = np.stack(
365
+ [
366
+ np.ones_like(longitudes), # r = 1 for unit vectors
367
+ longitudes, # azimuth = longitude
368
+ latitudes, # elevation = latitude
369
+ ],
370
+ axis=-1,
371
+ )
372
+
373
+ # Convert to Cartesian coordinates and store as DataArray
374
+ pset.data["look_direction"] = xr.DataArray(
375
+ geometry.spherical_to_cartesian(spherical_coords),
376
+ dims=[*longitudes.dims, CoordNames.CARTESIAN_VECTOR.value],
377
+ )
378
+
379
+
380
+ def _calculate_compton_getting_transform(
381
+ pset: LoHiBasePointingSet,
382
+ energy_hf: xr.DataArray,
383
+ ) -> None:
384
+ """
385
+ Apply Compton-Getting transformation to compute ENA source directions.
386
+
387
+ This implements the Compton-Getting velocity transformation to correct
388
+ for the motion of the spacecraft through the heliosphere. The transformation
389
+ accounts for the Doppler shift of ENA energies and the aberration of
390
+ arrival directions.
391
+
392
+ All calculations are performed using xarray DataArrays to preserve
393
+ dimension information throughout the computation.
394
+
395
+ Parameters
396
+ ----------
397
+ pset : LoHiBasePointingSet
398
+ Pointing set object with sc_velocity, sc_direction_vector, and
399
+ look_direction already added.
400
+ energy_hf : xr.DataArray
401
+ ENA energies in the heliosphere frame in eV.
402
+
403
+ Notes
404
+ -----
405
+ The algorithm is based on the "Appendix A. The IMAP-Lo Mapping Algorithms"
406
+ document.
407
+ Adds the following DataArrays to pset.data:
408
+ - "energy_sc": ENA energies in spacecraft frame (eV)
409
+ - "ena_source_hae_longitude": ENA source longitudes in heliosphere frame (degrees)
410
+ - "ena_source_hae_latitude": ENA source latitudes in heliosphere frame (degrees)
411
+ """
412
+ # Store heliosphere frame energies
413
+ pset.data["energy_hf"] = energy_hf
414
+
415
+ # Calculate spacecraft speed
416
+ sc_velocity_km_per_sec = np.linalg.norm(
417
+ pset.data["sc_velocity"], axis=-1, keepdims=True
418
+ )
419
+
420
+ # Calculate dot product between look directions and spacecraft direction vector
421
+ # Use Einstein summation for efficient vectorized dot product
422
+ dot_product = xr.DataArray(
423
+ np.einsum(
424
+ "...i,...i->...",
425
+ pset.data["look_direction"],
426
+ pset.data["sc_direction_vector"],
427
+ ),
428
+ dims=pset.data["look_direction"].dims[:-1],
429
+ )
430
+
431
+ # Calculate the kinetic energy of a hydrogen ENA traveling at spacecraft velocity
432
+ # E_u = (1/2) * m * U_sc^2 (convert km/s to cm/s with 1.0e5 factor)
433
+ energy_u = (
434
+ 0.5 * PROTON_MASS_GRAMS * (sc_velocity_km_per_sec * 1e5) ** 2 / ERG_PER_EV
435
+ )
436
+
437
+ # Note: Tim thinks that this approach seems backwards. Here, we are assuming
438
+ # that ENAs are observed in the heliosphere frame at the ESA energy levels.
439
+ # We then calculate the velocity that said ENAs would have in the spacecraft
440
+ # frame as well as the CG corrected energy level in the spacecraft frame.
441
+ # We then use this velocity to calculate and the velocity of the spacecraft
442
+ # to do the vector math which determines the ENA source direction in the
443
+ # heliosphere frame.
444
+ # The ENAs are in fact observed in the spacecraft frame at a known energy
445
+ # level in the spacecraft frame. Why don't we use that energy level to
446
+ # calculate the source direction in the spacecraft frame and then do the
447
+ # vector math to find the source direction in the heliosphere frame? We
448
+ # would also need to calculate the CG corrected ENA energy in the heliosphere
449
+ # frame and keep track of that when binning.
450
+
451
+ # Calculate y values for each energy level (Equation 61)
452
+ # y_k = sqrt(E^h_k / E^u)
453
+ y = np.sqrt(pset.data["energy_hf"] / energy_u)
454
+
455
+ # Velocity magnitude factor calculation (Equation 62)
456
+ # x_k = (êₛ · û_sc) + sqrt(y² + (êₛ · û_sc)² - 1)
457
+ x = dot_product + np.sqrt(y**2 + dot_product**2 - 1)
458
+
459
+ # Calculate ENA speed in the spacecraft frame
460
+ # |v⃗_sc| = x_k * U_sc
461
+ velocity_sc = x * sc_velocity_km_per_sec
462
+
463
+ # Calculate the kinetic energy in the spacecraft frame
464
+ # E_sc = (1/2) * M_p * v_sc² (convert km/s to cm/s with 1.0e5 factor)
465
+ pset.data["energy_sc"] = (
466
+ 0.5 * PROTON_MASS_GRAMS * (velocity_sc * 1e5) ** 2 / ERG_PER_EV
467
+ )
468
+
469
+ # Calculate the velocity vector in the spacecraft frame
470
+ # v⃗_sc = |v_sc| * êₛ (velocity direction follows look direction)
471
+ velocity_vector_sc = velocity_sc * pset.data["look_direction"]
472
+
473
+ # Calculate the ENA velocity vector in the heliosphere frame
474
+ # v⃗_helio = v⃗_sc - U⃗_sc (simple velocity addition)
475
+ velocity_vector_helio = velocity_vector_sc - pset.data["sc_velocity"]
476
+
477
+ # Convert to spherical coordinates to get ENA source directions
478
+ ena_source_direction_helio = geometry.cartesian_to_spherical(
479
+ velocity_vector_helio.data
480
+ )
481
+
482
+ # Update the PSET hae_longitude and hae_latitude variables with the new
483
+ # energy-dependent values.
484
+ pset.data["hae_longitude"] = (
485
+ pset.data["energy_sc"].dims,
486
+ ena_source_direction_helio[..., 1],
487
+ )
488
+ pset.data["hae_latitude"] = (
489
+ pset.data["energy_sc"].dims,
490
+ ena_source_direction_helio[..., 2],
491
+ )
492
+
493
+ # For ram/anti-ram filtering we can use the sign of the scalar projection
494
+ # of the ENA source direction onto the spacecraft velocity vector.
495
+ # ram_mask = (v⃗_helio · û_sc) >= 0
496
+ ram_mask = (
497
+ np.einsum(
498
+ "...i,...i->...", velocity_vector_helio, pset.data["sc_direction_vector"]
499
+ )
500
+ >= 0
501
+ )
502
+ pset.data["ram_mask"] = xr.DataArray(
503
+ ram_mask,
504
+ dims=velocity_vector_helio.dims[:-1],
505
+ )
506
+
507
+
508
+ def apply_compton_getting_correction(
509
+ pset: LoHiBasePointingSet,
510
+ energy_hf: xr.DataArray,
511
+ ) -> None:
512
+ """
513
+ Apply Compton-Getting correction to a pointing set and update coordinates.
514
+
515
+ This function performs the Compton-Getting velocity transformation to correct
516
+ ENA observations for the motion of the spacecraft through the heliosphere.
517
+ The corrected coordinates represent the true source directions of the ENAs
518
+ in the heliosphere frame.
519
+
520
+ The pointing set is modified in-place: new variables are added to the dataset
521
+ for the corrected coordinates and energies, and the az_el_points attribute
522
+ is updated to use the corrected coordinates for binning.
523
+
524
+ All calculations are performed using xarray DataArrays to preserve dimension
525
+ information throughout the computation.
526
+
527
+ Parameters
528
+ ----------
529
+ pset : LoHiBasePointingSet
530
+ Pointing set object containing HAE longitude/latitude coordinates.
531
+ energy_hf : xr.DataArray
532
+ ENA energies in the heliosphere frame in eV. Must be 1D with an
533
+ energy dimension.
534
+
535
+ Notes
536
+ -----
537
+ This function adds the following variables to the pointing set dataset:
538
+ - "sc_velocity": Spacecraft velocity vector (km/s)
539
+ - "sc_direction_vector": Spacecraft velocity unit vector
540
+ - "look_direction": Cartesian unit vectors of observation directions
541
+ - "energy_hf": ENA energies in heliosphere frame (eV)
542
+ - "energy_sc": ENA energies in spacecraft frame (eV)
543
+ - "ena_source_hae_longitude": ENA source longitudes in heliosphere frame (degrees)
544
+ - "ena_source_hae_latitude": ENA source latitudes in heliosphere frame (degrees)
545
+
546
+ The az_el_points attribute is updated to use the corrected coordinates,
547
+ which will be used for subsequent binning operations.
548
+ """
549
+ # Step 1: Add spacecraft velocity and direction to pset
550
+ _add_spacecraft_velocity_to_pset(pset)
551
+
552
+ # Step 2: Calculate and add look direction vectors to pset
553
+ _add_cartesian_look_direction(pset)
554
+
555
+ # Step 3: Apply Compton-Getting transformation
556
+ _calculate_compton_getting_transform(pset, energy_hf)
557
+
558
+ # Step 4: Update az_el_points to use the corrected coordinates
559
+ pset.update_az_el_points()
@@ -10,6 +10,89 @@ from numpy.typing import NDArray
10
10
  logger = logging.getLogger(__name__)
11
11
 
12
12
 
13
+ def vectorized_bincount(
14
+ indices: NDArray, weights: NDArray | None = None, minlength: int = 0
15
+ ) -> NDArray:
16
+ """
17
+ Vectorized version of np.bincount for multi-dimensional arrays.
18
+
19
+ This function applies np.bincount across multi-dimensional input arrays by
20
+ adding offsets to the indices and flattening, then reshaping the result.
21
+ This approach allows broadcasting between indices and weights.
22
+
23
+ Parameters
24
+ ----------
25
+ indices : NDArray
26
+ Array of non-negative integers to be binned. Can be multi-dimensional.
27
+ If multi-dimensional, bincount is applied independently along each
28
+ leading dimension.
29
+ weights : NDArray, optional
30
+ Array of weights that is broadcastable with indices. If provided, each
31
+ weight is accumulated into its corresponding bin. If None (default),
32
+ each index contributes a count of 1.
33
+ minlength : int, optional
34
+ Minimum number of bins in the output array. Applied to each independent
35
+ bincount operation. Default is 0.
36
+
37
+ Returns
38
+ -------
39
+ NDArray
40
+ Array of binned values with the same leading dimensions as the input
41
+ arrays, and a final dimension of size minlength (or the maximum index + 1,
42
+ whichever is larger).
43
+
44
+ See Also
45
+ --------
46
+ numpy.bincount : The underlying function being vectorized.
47
+
48
+ Examples
49
+ --------
50
+ >>> indices = np.array([[0, 1, 1], [2, 2, 3]])
51
+ >>> vectorized_bincount(indices, minlength=4)
52
+ array([[1., 2., 0., 0.],
53
+ [0., 0., 2., 1.]])
54
+ """
55
+ # Handle 1D case directly
56
+ if indices.ndim == 1 and (weights is None or weights.ndim == 1):
57
+ return np.bincount(indices, weights=weights, minlength=minlength)
58
+
59
+ # For multi-dimensional arrays, broadcast indices and weights
60
+ if weights is not None:
61
+ indices_bc, weights_bc = np.broadcast_arrays(indices, weights)
62
+ weights_flat = weights_bc.ravel()
63
+ else:
64
+ indices_bc = indices
65
+ weights_flat = None
66
+
67
+ # Get the shape for reshaping output
68
+ non_spatial_shape = indices_bc.shape[:-1]
69
+ n_binsets = np.prod(non_spatial_shape)
70
+
71
+ # Determine actual minlength if not specified
72
+ if minlength == 0:
73
+ minlength = int(np.max(indices_bc)) + 1
74
+
75
+ # We want to flatten the multi-dimensional bincount problem into a 1D problem.
76
+ # This can be done by offsetting the indices for each element of each additional
77
+ # dimension by an integer multiple of the number of bins. Doing so gives
78
+ # each element in the additional dimensions its own set of 1D bins: index 0
79
+ # uses bins [0, minlength), index 1 uses bins [minlength, 2*minlength), etc.
80
+ offsets = np.arange(n_binsets).reshape(*non_spatial_shape, 1) * minlength
81
+ indices_flat = (indices_bc + offsets).ravel()
82
+
83
+ # Single bincount call with flattened data
84
+ binned_flat = np.bincount(
85
+ indices_flat, weights=weights_flat, minlength=n_binsets * minlength
86
+ )
87
+
88
+ # Reshape to separate each sample's bins
89
+ binned_values = binned_flat.reshape(n_binsets, -1)[:, :minlength].reshape(
90
+ *non_spatial_shape, minlength
91
+ )
92
+
93
+ return binned_values
94
+
95
+
13
96
  def bin_single_array_at_indices(
14
97
  value_array: NDArray,
15
98
  projection_grid_shape: tuple[int, ...],
@@ -25,7 +108,7 @@ def bin_single_array_at_indices(
25
108
  Parameters
26
109
  ----------
27
110
  value_array : NDArray
28
- Array of values to bin. The final axis be the one and only spatial axis.
111
+ Array of values to bin. The final axis is the one and only spatial axis.
29
112
  If other axes are present, they will be binned independently
30
113
  along the spatial axis.
31
114
  projection_grid_shape : tuple[int, ...]
@@ -34,71 +117,89 @@ def bin_single_array_at_indices(
34
117
  or just (number of bins,) if the grid is 1D.
35
118
  projection_indices : NDArray
36
119
  Ordered indices for projection grid, corresponding to indices in input grid.
37
- 1 dimensional. May be non-unique, depending on the projection method.
120
+ Can be 1-dimensional or multi-dimensional. If multi-dimensional, must be
121
+ broadcastable with value_array. May contain non-unique indices, depending
122
+ on the projection method.
38
123
  input_indices : NDArray
39
124
  Ordered indices for input grid, corresponding to indices in projection grid.
40
125
  1 dimensional. May be non-unique, depending on the projection method.
41
- If None (default), an arange of the same length as the
42
- final axis of value_array is used.
126
+ If None (default), an numpy.arange of the same length as the final axis of
127
+ value_array is used.
43
128
  input_valid_mask : NDArray, optional
44
129
  Boolean mask array for valid values in input grid.
45
130
  If None, all pixels are considered valid. Default is None.
131
+ Must be broadcastable with value_array and projection_indices.
46
132
 
47
133
  Returns
48
134
  -------
49
135
  NDArray
50
- Binned values on the projection grid.
136
+ Binned values on the projection grid. The output shape depends on the
137
+ input shapes after broadcasting:
138
+ - If value_array is 1D: returns 1D array of shape (num_projection_indices,)
139
+ - If value_array is multi-dimensional: returns array with shape
140
+ (*value_array.shape[:-1], num_projection_indices), where the leading
141
+ dimensions match value_array's non-spatial dimensions and the final
142
+ dimension contains the binned values for each projection grid position.
143
+ - If projection_indices is multi-dimensional and broadcasts with value_array,
144
+ the output shape will be (broadcasted_shape[:-1], num_projection_indices).
51
145
 
52
146
  Raises
53
147
  ------
54
148
  ValueError
55
- If the input and projection indices are not 1D arrays
56
- with the same number of elements.
57
- NotImplementedError
58
- If the input value_array has dimensionality less than 1.
149
+ If input_indices is not a 1D array, or if the arrays cannot be
150
+ broadcast together.
59
151
  """
152
+ # Set and check input_indices
60
153
  if input_indices is None:
61
154
  input_indices = np.arange(value_array.shape[-1])
62
- if input_valid_mask is None:
63
- input_valid_mask = np.ones(value_array.shape[-1], dtype=bool)
64
-
65
- # Both sets of indices must be 1D with the same number of elements
66
- if input_indices.ndim != 1 or projection_indices.ndim != 1:
155
+ # input_indices must be 1D
156
+ if input_indices.ndim != 1:
67
157
  raise ValueError(
68
- "Indices must be 1D arrays. "
158
+ "input_indices must be a 1D array. "
69
159
  "If using a rectangular grid, the indices must be unwrapped."
70
160
  )
71
- if input_indices.size != projection_indices.size:
72
- raise ValueError(
73
- "The number of input and projection indices must be the same. \n"
74
- f"Received {input_indices.size} input indices and {projection_indices.size}"
75
- " projection indices."
161
+
162
+ # Verify projection_indices is broadcastable with value_array
163
+ try:
164
+ broadcasted_shape = np.broadcast_shapes(
165
+ projection_indices.shape, value_array.shape
76
166
  )
167
+ except ValueError as e:
168
+ raise ValueError(
169
+ f"projection_indices shape {projection_indices.shape} must be "
170
+ f"broadcastable with value_array shape {value_array.shape}"
171
+ ) from e
77
172
 
78
- input_valid_mask = np.asarray(input_valid_mask, dtype=bool)
79
- mask_idx = input_valid_mask[input_indices]
173
+ # Set and check input_valid_mask
174
+ if input_valid_mask is None:
175
+ input_valid_mask = np.ones(value_array.shape[-1], dtype=bool)
176
+ else:
177
+ input_valid_mask = np.asarray(input_valid_mask, dtype=bool)
178
+ # Verify input_valid_mask is broadcastable with value_array
179
+ try:
180
+ np.broadcast_shapes(input_valid_mask.shape, value_array.shape)
181
+ except ValueError as e:
182
+ raise ValueError(
183
+ f"input_valid_mask shape {input_valid_mask.shape} must be "
184
+ f"broadcastable with value_array shape {value_array.shape}"
185
+ ) from e
80
186
 
81
- num_projection_indices = np.prod(projection_grid_shape)
187
+ # Broadcast input_valid_mask to match value_array shape if needed
188
+ input_valid_mask_bc = np.broadcast_to(input_valid_mask, broadcasted_shape)
189
+
190
+ # Select values at input_indices positions along the spatial axis
191
+ values = value_array[..., input_indices]
192
+
193
+ # Apply mask: set invalid values to 0
194
+ values_masked = np.where(input_valid_mask_bc, values, 0)
195
+
196
+ num_projection_indices = int(np.prod(projection_grid_shape))
197
+
198
+ # Use vectorized_bincount to handle arbitrary dimensions
199
+ binned_values = vectorized_bincount(
200
+ projection_indices, weights=values_masked, minlength=num_projection_indices
201
+ )
82
202
 
83
- # Only valid values are summed into bins.
84
- if value_array.ndim == 1:
85
- values = value_array[input_indices]
86
- binned_values = np.bincount(
87
- projection_indices[mask_idx],
88
- weights=values[mask_idx],
89
- minlength=num_projection_indices,
90
- )
91
- elif value_array.ndim >= 2:
92
- # Apply bincount to each row independently
93
- binned_values = np.apply_along_axis(
94
- lambda x: np.bincount(
95
- projection_indices[mask_idx],
96
- weights=x[..., input_indices][mask_idx],
97
- minlength=num_projection_indices,
98
- ),
99
- axis=-1,
100
- arr=value_array,
101
- )
102
203
  return binned_values
103
204
 
104
205
 
@@ -323,20 +323,15 @@ def combine_calibration_products(
323
323
  # Perform inverse-variance weighted averaging
324
324
  # Handle divide by zero and invalid values
325
325
  with np.errstate(divide="ignore", invalid="ignore"):
326
- # Calculate weights for statistical variance combination using only
327
- # statistical variance
328
- stat_weights = 1.0 / improved_stat_variance
329
-
330
- # Combined statistical uncertainty from inverse-variance formula
331
- combined_stat_unc = np.sqrt(1.0 / stat_weights.sum(dim="calibration_prod"))
332
-
333
326
  # Use total variance weights for flux combination
334
327
  flux_weights = 1.0 / total_variance
335
328
  weighted_flux_sum = (ena_flux * flux_weights).sum(dim="calibration_prod")
336
329
  combined_flux = weighted_flux_sum / flux_weights.sum(dim="calibration_prod")
337
330
 
338
331
  map_ds["ena_intensity"] = combined_flux
339
- map_ds["ena_intensity_stat_uncert"] = combined_stat_unc
332
+ map_ds["ena_intensity_stat_uncert"] = np.sqrt(
333
+ (map_ds["ena_intensity_stat_uncert"] ** 2).sum(dim="calibration_prod")
334
+ )
340
335
  # For systematic error, just do quadrature sum over the systematic error for
341
336
  # each calibration product.
342
337
  map_ds["ena_intensity_sys_err"] = np.sqrt((sys_err**2).sum(dim="calibration_prod"))
@@ -65,5 +65,11 @@ STATIONS = {
65
65
  latitude=54.2632, # degrees North
66
66
  altitude=0.1, # approx 100 meters
67
67
  min_elevation_deg=5, # 5 degrees is the requirement
68
- )
68
+ ),
69
+ "Manaus": StationProperties(
70
+ longitude=-59.969334, # degrees East (negative = West)
71
+ latitude=-2.891257, # degrees North (negative = South)
72
+ altitude=0.1, # approx 100 meters
73
+ min_elevation_deg=5, # 5 degrees is the requirement
74
+ ),
69
75
  }
@@ -77,7 +77,7 @@ def generate_coverage(
77
77
  dsn_outage_mask |= (time_range >= start_et) & (time_range <= end_et)
78
78
 
79
79
  for station_name, (lon, lat, alt, min_elevation) in stations.items():
80
- azimuth, elevation = calculate_azimuth_and_elevation(lon, lat, alt, time_range)
80
+ _azimuth, elevation = calculate_azimuth_and_elevation(lon, lat, alt, time_range)
81
81
  visible = elevation > min_elevation
82
82
 
83
83
  outage_mask = np.zeros(time_range.shape, dtype=bool)
@@ -4,11 +4,56 @@ import logging
4
4
  from decimal import Decimal
5
5
  from typing import Any
6
6
 
7
+ import numpy as np
7
8
  import xarray as xr
8
9
 
10
+ from imap_processing.codice import decompress
11
+ from imap_processing.ialirt.utils.grouping import find_groups
12
+
9
13
  logger = logging.getLogger(__name__)
10
14
 
15
+ FILLVAL_UINT8 = 255
11
16
  FILLVAL_FLOAT32 = Decimal(str(-1.0e31))
17
+ COD_LO_COUNTER = 232
18
+ COD_HI_COUNTER = 197
19
+ COD_LO_RANGE = range(0, 15)
20
+ COD_HI_RANGE = range(0, 5)
21
+
22
+
23
+ def concatenate_bytes(grouped_data: xr.Dataset, group: int, sensor: str) -> bytearray:
24
+ """
25
+ Concatenate all data fields for a specific group into a single bytearray.
26
+
27
+ Parameters
28
+ ----------
29
+ grouped_data : xr.Dataset
30
+ The grouped CoDICE dataset containing cod_{sensor}_data_XX variables.
31
+ group : int
32
+ The group number to extract.
33
+ sensor : str
34
+ The sensor type, either 'lo' or 'hi'.
35
+
36
+ Returns
37
+ -------
38
+ current_data_stream: bytearray
39
+ The concatenated data stream for the selected group.
40
+ """
41
+ current_data_stream = bytearray()
42
+ group_mask = (grouped_data["group"] == group).values
43
+
44
+ cod_ranges = {
45
+ "lo": COD_LO_RANGE,
46
+ "hi": COD_HI_RANGE,
47
+ }
48
+
49
+ # Loop through all data fields.
50
+ for field in cod_ranges[sensor]:
51
+ data_array = grouped_data[f"cod_{sensor}_data_{field:02}"].values[group_mask]
52
+
53
+ # Convert each value to uint8 and extend the byte stream
54
+ current_data_stream.extend(np.uint8(data_array).tobytes())
55
+
56
+ return current_data_stream
12
57
 
13
58
 
14
59
  def process_codice(
@@ -35,6 +80,27 @@ def process_codice(
35
80
  - Calculate L2 CoDICE pseudodensities (pg 37 of Algorithm Document)
36
81
  - Calculate the public data products
37
82
  """
83
+ grouped_cod_lo_data = find_groups(
84
+ dataset, (0, COD_LO_COUNTER), "cod_lo_counter", "cod_lo_acq"
85
+ )
86
+ grouped_cod_hi_data = find_groups(
87
+ dataset, (0, COD_HI_COUNTER), "cod_hi_counter", "cod_hi_acq"
88
+ )
89
+ unique_cod_lo_groups = np.unique(grouped_cod_lo_data["group"])
90
+ unique_cod_hi_groups = np.unique(grouped_cod_hi_data["group"])
91
+
92
+ for group in unique_cod_lo_groups:
93
+ cod_lo_data_stream = concatenate_bytes(grouped_cod_lo_data, group, "lo")
94
+
95
+ # Decompress binary stream
96
+ decompressed_data = decompress._apply_pack_24_bit(bytes(cod_lo_data_stream))
97
+
98
+ for group in unique_cod_hi_groups:
99
+ cod_hi_data_stream = concatenate_bytes(grouped_cod_hi_data, group, "lo")
100
+
101
+ # Decompress binary stream
102
+ decompressed_data = decompress._apply_lossy_a(bytes(cod_hi_data_stream)) # noqa
103
+
38
104
  # For I-ALiRT SIT, the test data being used has all zeros and thus no
39
105
  # groups can be found, thus there is no data to process
40
106
  # TODO: Once I-ALiRT test data is acquired that actually has data in it,
@@ -155,6 +155,7 @@ def create_xarray_from_records(records: list[dict]) -> xr.Dataset: # noqa: PLR0
155
155
  "sc_velocity_GSM",
156
156
  "sc_velocity_GSE",
157
157
  "mag_hk_status",
158
+ "spice_kernels",
158
159
  ]:
159
160
  continue
160
161
  elif key in ["mag_B_GSE", "mag_B_GSM", "mag_B_RTN"]:
@@ -118,7 +118,7 @@ def idex_l2a(l1b_dataset: xr.Dataset, ancillary_files: dict) -> xr.Dataset:
118
118
  atomic_masses_path = f"{imap_module_directory}/idex/atomic_masses.csv"
119
119
  atomic_masses = pd.read_csv(atomic_masses_path)
120
120
  masses = atomic_masses["Mass"]
121
- stretches, shifts, mass_scales = time_to_mass(tof_high.data, hs_time.data, masses)
121
+ _stretches, _shifts, mass_scales = time_to_mass(tof_high.data, hs_time.data, masses)
122
122
 
123
123
  # TODO use correct fillval
124
124
  mass_scales_da = xr.DataArray(
@@ -379,7 +379,7 @@ def log_smooth_powerlaw(log_v: float, log_a: float, params: np.ndarray) -> float
379
379
  # segments.
380
380
  # vb and vc are the characteristic speeds where the slope transition happens, and k
381
381
  # setting the sharpness of the transitions.
382
- a1, a2, a3, vb, vc, k, m = params
382
+ a1, a2, a3, vb, vc, _k, m = params
383
383
  v = 10**log_v
384
384
  base = log_a + a1 * log_v
385
385
  transition1 = (1 + (v / vb) ** m) ** ((a2 - a1) / m)
@@ -645,7 +645,7 @@ def get_science_acquisition_on_percentage(evt_dataset: xr.Dataset) -> dict:
645
645
  of year.
646
646
  """
647
647
  # Get science acquisition start and stop times
648
- evt_logs, evt_time, evt_values = get_science_acquisition_timestamps(evt_dataset)
648
+ _evt_logs, evt_time, evt_values = get_science_acquisition_timestamps(evt_dataset)
649
649
  if len(evt_time) == 0:
650
650
  logger.warning(
651
651
  "No science acquisition events found in event dataset. Returning empty "