imap-processing 0.19.2__py3-none-any.whl → 0.19.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of imap-processing might be problematic. Click here for more details.

Files changed (45) hide show
  1. imap_processing/_version.py +2 -2
  2. imap_processing/cdf/config/imap_codice_l1a_variable_attrs.yaml +90 -91
  3. imap_processing/cdf/config/imap_codice_l1b_variable_attrs.yaml +6 -6
  4. imap_processing/cdf/config/imap_enamaps_l2-common_variable_attrs.yaml +45 -6
  5. imap_processing/cdf/config/imap_enamaps_l2-healpix_variable_attrs.yaml +29 -0
  6. imap_processing/cdf/config/imap_enamaps_l2-rectangular_variable_attrs.yaml +32 -0
  7. imap_processing/cdf/config/imap_hi_global_cdf_attrs.yaml +1 -2
  8. imap_processing/cdf/config/imap_hi_variable_attrs.yaml +1 -0
  9. imap_processing/cdf/config/imap_ultra_global_cdf_attrs.yaml +8 -8
  10. imap_processing/cdf/config/imap_ultra_l1c_variable_attrs.yaml +8 -6
  11. imap_processing/cdf/utils.py +5 -0
  12. imap_processing/cli.py +72 -54
  13. imap_processing/codice/codice_l1a.py +44 -6
  14. imap_processing/codice/codice_l1b.py +35 -6
  15. imap_processing/codice/constants.py +10 -6
  16. imap_processing/ena_maps/ena_maps.py +2 -7
  17. imap_processing/glows/l1b/glows_l1b.py +29 -21
  18. imap_processing/hi/hi_l1a.py +49 -29
  19. imap_processing/hi/hi_l1b.py +34 -0
  20. imap_processing/hi/hi_l1c.py +23 -17
  21. imap_processing/hi/hi_l2.py +225 -81
  22. imap_processing/ialirt/l0/ialirt_spice.py +1 -2
  23. imap_processing/ialirt/l0/parse_mag.py +18 -4
  24. imap_processing/ialirt/l0/process_hit.py +9 -4
  25. imap_processing/ialirt/l0/process_swapi.py +9 -4
  26. imap_processing/ialirt/l0/process_swe.py +9 -4
  27. imap_processing/ialirt/utils/create_xarray.py +11 -1
  28. imap_processing/lo/l1b/lo_l1b.py +111 -77
  29. imap_processing/lo/l1c/lo_l1c.py +10 -11
  30. imap_processing/lo/l2/lo_l2.py +43 -22
  31. imap_processing/mag/l1c/interpolation_methods.py +9 -1
  32. imap_processing/mag/l1c/mag_l1c.py +99 -45
  33. imap_processing/spice/geometry.py +0 -2
  34. imap_processing/ultra/l0/decom_tools.py +58 -46
  35. imap_processing/ultra/l0/decom_ultra.py +21 -9
  36. imap_processing/ultra/l0/ultra_utils.py +4 -4
  37. imap_processing/ultra/l1c/helio_pset.py +2 -2
  38. imap_processing/ultra/l1c/spacecraft_pset.py +7 -4
  39. imap_processing/ultra/l2/ultra_l2.py +63 -23
  40. imap_processing/ultra/utils/ultra_l1_utils.py +4 -4
  41. {imap_processing-0.19.2.dist-info → imap_processing-0.19.4.dist-info}/METADATA +2 -2
  42. {imap_processing-0.19.2.dist-info → imap_processing-0.19.4.dist-info}/RECORD +45 -45
  43. {imap_processing-0.19.2.dist-info → imap_processing-0.19.4.dist-info}/LICENSE +0 -0
  44. {imap_processing-0.19.2.dist-info → imap_processing-0.19.4.dist-info}/WHEEL +0 -0
  45. {imap_processing-0.19.2.dist-info → imap_processing-0.19.4.dist-info}/entry_points.txt +0 -0
@@ -9,12 +9,14 @@ from imap_processing.cdf.imap_cdf_manager import ImapCdfAttributes
9
9
  from imap_processing.mag import imap_mag_sdc_configuration_v001 as configuration
10
10
  from imap_processing.mag.constants import ModeFlags, VecSec
11
11
  from imap_processing.mag.l1c.interpolation_methods import InterpolationFunction
12
+ from imap_processing.spice.time import et_to_ttj2000ns, str_to_et
12
13
 
13
14
  logger = logging.getLogger(__name__)
14
15
 
15
16
 
16
17
  def mag_l1c(
17
18
  first_input_dataset: xr.Dataset,
19
+ day_to_process: np.datetime64,
18
20
  second_input_dataset: xr.Dataset = None,
19
21
  ) -> xr.Dataset:
20
22
  """
@@ -27,6 +29,9 @@ def mag_l1c(
27
29
  first_input_dataset : xr.Dataset
28
30
  The first input dataset to process. This can be either burst or norm data, for
29
31
  mago or magi.
32
+ day_to_process : np.datetime64
33
+ The day to process, in np.datetime64[D] format. This is used to fill gaps at
34
+ the beginning or end of the day if needed.
30
35
  second_input_dataset : xr.Dataset, optional
31
36
  The second input dataset to process. This should be burst if first_input_dataset
32
37
  was norm, or norm if first_input_dataset was burst. It should match the
@@ -263,13 +268,15 @@ def process_mag_l1c(
263
268
  normal_mode_dataset: xr.Dataset,
264
269
  burst_mode_dataset: xr.Dataset,
265
270
  interpolation_function: InterpolationFunction,
271
+ day_to_process: np.datetime64 | None = None,
266
272
  ) -> np.ndarray:
267
273
  """
268
274
  Create MAG L1C data from L1B datasets.
269
275
 
270
276
  This function starts from the normal mode dataset and completes the following steps:
271
277
  1. find all the gaps in the dataset
272
- 2. generate a new timeline with the gaps filled
278
+ 2. generate a new timeline with the gaps filled, including new timestamps to fill
279
+ out the rest of the day to +/- 15 minutes on either side
273
280
  3. fill the timeline with normal mode data (so, all the non-gap timestamps)
274
281
  4. interpolate the gaps using the burst mode data and the method specified in
275
282
  interpolation_function.
@@ -288,6 +295,10 @@ def process_mag_l1c(
288
295
  The burst mode dataset, which is used to fill in the gaps in the normal mode.
289
296
  interpolation_function : InterpolationFunction
290
297
  The interpolation function to use to fill in the gaps.
298
+ day_to_process : np.datetime64, optional
299
+ The day to process, in np.datetime64[D] format. This is used to fill
300
+ gaps at the beginning or end of the day if needed. If not included, these
301
+ gaps will not be filled.
291
302
 
292
303
  Returns
293
304
  -------
@@ -306,8 +317,23 @@ def process_mag_l1c(
306
317
  output_dataset["sample_interpolated"] = xr.DataArray(
307
318
  np.zeros(len(normal_mode_dataset))
308
319
  )
320
+ day_start_ns = None
321
+ day_end_ns = None
309
322
 
310
- gaps = find_all_gaps(norm_epoch, normal_vecsec_dict)
323
+ if day_to_process is not None:
324
+ day_start = day_to_process.astype("datetime64[s]") - np.timedelta64(15, "m")
325
+
326
+ # get the end of the day plus 15 minutes
327
+ day_end = (
328
+ day_to_process.astype("datetime64[s]")
329
+ + np.timedelta64(1, "D")
330
+ + np.timedelta64(15, "m")
331
+ )
332
+
333
+ day_start_ns = et_to_ttj2000ns(str_to_et(str(day_start)))
334
+ day_end_ns = et_to_ttj2000ns(str_to_et(str(day_end)))
335
+
336
+ gaps = find_all_gaps(norm_epoch, normal_vecsec_dict, day_start_ns, day_end_ns)
311
337
 
312
338
  new_timeline = generate_timeline(norm_epoch, gaps)
313
339
  norm_filled = fill_normal_data(normal_mode_dataset, new_timeline)
@@ -319,7 +345,9 @@ def process_mag_l1c(
319
345
 
320
346
 
321
347
  def fill_normal_data(
322
- normal_dataset: xr.Dataset, new_timeline: np.ndarray
348
+ normal_dataset: xr.Dataset,
349
+ new_timeline: np.ndarray,
350
+ day_to_process: np.datetime64 | None = None,
323
351
  ) -> np.ndarray:
324
352
  """
325
353
  Fill the new timeline with the normal mode data.
@@ -332,6 +360,10 @@ def fill_normal_data(
332
360
  The normal mode dataset.
333
361
  new_timeline : np.ndarray
334
362
  A 1D array of timestamps to fill.
363
+ day_to_process : np.datetime64, optional
364
+ The day to process, in np.datetime64[D] format. This is used to fill
365
+ gaps at the beginning or end of the day if needed. If not included, these
366
+ gaps will not be filled.
335
367
 
336
368
  Returns
337
369
  -------
@@ -341,12 +373,11 @@ def fill_normal_data(
341
373
  Indices: 0 - epoch, 1-4 - vector x, y, z, and range, 5 - generated flag,
342
374
  6-7 - compression flags.
343
375
  """
344
- # TODO: fill with FILLVAL?
376
+ # TODO: fill with FILLVAL
345
377
  filled_timeline: np.ndarray = np.zeros((len(new_timeline), 8))
346
378
  filled_timeline[:, 0] = new_timeline
347
379
  # Flags, will also indicate any missed timestamps
348
380
  filled_timeline[:, 5] = ModeFlags.MISSING.value
349
-
350
381
  for index, timestamp in enumerate(normal_dataset["epoch"].data):
351
382
  timeline_index = np.searchsorted(new_timeline, timestamp)
352
383
  filled_timeline[timeline_index, 1:5] = normal_dataset["vectors"].data[index]
@@ -399,9 +430,11 @@ def interpolate_gaps(
399
430
  )
400
431
 
401
432
  for gap in gaps:
402
- # TODO: we might need a few inputs before or after start/end
433
+ # TODO: we need extra data at the beginning and end of the gap
403
434
  burst_gap_start = (np.abs(burst_epochs - gap[0])).argmin()
404
435
  burst_gap_end = (np.abs(burst_epochs - gap[1])).argmin()
436
+ # if this gap is too big, we may be missing burst data at the start or end of
437
+ # the day and shouldn't use it here.
405
438
 
406
439
  # for the CIC filter, we need 2x normal mode cadence seconds
407
440
 
@@ -428,10 +461,6 @@ def interpolate_gaps(
428
461
  gap_timeline = filled_norm_timeline[
429
462
  (filled_norm_timeline > gap[0]) & (filled_norm_timeline < gap[1])
430
463
  ]
431
- logger.info(
432
- f"difference between gap start and burst start: "
433
- f"{gap_timeline[0] - burst_epochs[burst_start]}"
434
- )
435
464
 
436
465
  short = (gap_timeline >= burst_epochs[burst_start]) & (
437
466
  gap_timeline <= burst_epochs[burst_gap_end]
@@ -487,40 +516,46 @@ def generate_timeline(epoch_data: np.ndarray, gaps: np.ndarray) -> np.ndarray:
487
516
  The existing timeline data, in the shape (n,).
488
517
  gaps : numpy.ndarray
489
518
  An array of gaps to fill, with shape (n, 2) where n is the number of gaps.
490
- The gap is specified as (start, end) where start and end both exist in the
491
- timeline already.
519
+ The gap is specified as (start, end).
492
520
 
493
521
  Returns
494
522
  -------
495
523
  numpy.ndarray
496
524
  The new timeline, filled with the existing data and the generated gaps.
497
525
  """
498
- full_timeline: np.ndarray = np.zeros(0)
499
-
500
- # When we have our gaps, generate the full timeline
501
- last_gap = 0
526
+ full_timeline: np.ndarray = np.array([])
527
+ last_index = 0
502
528
  for gap in gaps:
503
- gap_start_index = np.where(epoch_data == gap[0])[0]
504
- gap_end_index = np.where(epoch_data == gap[1])[0]
505
- if gap_start_index.size != 1 or gap_end_index.size != 1:
506
- raise ValueError("Gap start or end not found in input timeline")
507
-
529
+ epoch_start_index = np.searchsorted(epoch_data, gap[0], side="left")
508
530
  full_timeline = np.concatenate(
509
- (
510
- full_timeline,
511
- epoch_data[last_gap : gap_start_index[0]],
512
- generate_missing_timestamps(gap),
513
- )
531
+ (full_timeline, epoch_data[last_index:epoch_start_index])
514
532
  )
515
- last_gap = gap_end_index[0]
533
+ generated_timestamps = generate_missing_timestamps(gap)
534
+ if generated_timestamps.size == 0:
535
+ continue
536
+
537
+ # Remove any generated timestamps that are already in the timeline
538
+ # Use np.isin to check for exact matches
539
+ mask = ~np.isin(generated_timestamps, full_timeline)
540
+ generated_timestamps = generated_timestamps[mask]
541
+
542
+ if generated_timestamps.size == 0:
543
+ print("All generated timestamps already exist in timeline")
544
+ continue
516
545
 
517
- full_timeline = np.concatenate((full_timeline, epoch_data[last_gap:]))
546
+ full_timeline = np.concatenate((full_timeline, generated_timestamps))
547
+ last_index = int(np.searchsorted(epoch_data, gap[1], side="left"))
548
+
549
+ full_timeline = np.concatenate((full_timeline, epoch_data[last_index:]))
518
550
 
519
551
  return full_timeline
520
552
 
521
553
 
522
554
  def find_all_gaps(
523
- epoch_data: np.ndarray, vecsec_dict: dict | None = None
555
+ epoch_data: np.ndarray,
556
+ vecsec_dict: dict | None = None,
557
+ start_of_day_ns: float | None = None,
558
+ end_of_day_ns: float | None = None,
524
559
  ) -> np.ndarray:
525
560
  """
526
561
  Find all the gaps in the epoch data.
@@ -529,6 +564,9 @@ def find_all_gaps(
529
564
  it will assume a nominal 1/2 second gap. A gap is defined as missing data from the
530
565
  expected sequence as defined by vectors_per_second_attr.
531
566
 
567
+ If start_of_day_ns and end_of_day_ns are provided, gaps at the beginning and end of
568
+ the day will be added if the epoch_data does not cover the full day.
569
+
532
570
  Parameters
533
571
  ----------
534
572
  epoch_data : numpy.ndarray
@@ -537,6 +575,12 @@ def find_all_gaps(
537
575
  A dictionary of the form {start: vecsec, start: vecsec} where start is the time
538
576
  in nanoseconds and vecsec is the number of vectors per second. This will be
539
577
  used to find the gaps. If not provided, a 1/2 second gap is assumed.
578
+ start_of_day_ns : float, optional
579
+ The start of the day in nanoseconds since TTJ2000. If provided, a gap will be
580
+ added from this time to the first epoch if they don't match.
581
+ end_of_day_ns : float, optional
582
+ The end of the day in nanoseconds since TTJ2000. If provided, a gap will be
583
+ added from the last epoch to this time if they don't match.
540
584
 
541
585
  Returns
542
586
  -------
@@ -546,15 +590,23 @@ def find_all_gaps(
546
590
  timeline.
547
591
  """
548
592
  gaps: np.ndarray = np.zeros((0, 3))
549
- if vecsec_dict is None:
550
- # TODO: when we go back to the previous file, also retrieve expected
551
- # vectors per second
552
- # If no vecsec is provided, assume 2 vectors per second
553
- vecsec_dict = {0: VecSec.TWO_VECS_PER_S.value}
593
+
594
+ # TODO: when we go back to the previous file, also retrieve expected
595
+ # vectors per second
596
+
597
+ vecsec_dict = {0: VecSec.TWO_VECS_PER_S.value} | (vecsec_dict or {})
554
598
 
555
599
  end_index = epoch_data.shape[0]
600
+
601
+ if start_of_day_ns is not None and epoch_data[0] > start_of_day_ns:
602
+ # Add a gap from the start of the day to the first timestamp
603
+ gaps = np.concatenate(
604
+ (gaps, np.array([[start_of_day_ns, epoch_data[0], vecsec_dict[0]]]))
605
+ )
606
+
556
607
  for start_time in reversed(sorted(vecsec_dict.keys())):
557
- start_index = np.where(start_time == epoch_data)[0][0]
608
+ # Find the start index that is equal to or immediately after start_time
609
+ start_index = np.searchsorted(epoch_data, start_time, side="left")
558
610
  gaps = np.concatenate(
559
611
  (
560
612
  find_gaps(
@@ -565,6 +617,11 @@ def find_all_gaps(
565
617
  )
566
618
  end_index = start_index
567
619
 
620
+ if end_of_day_ns is not None and epoch_data[-1] < end_of_day_ns:
621
+ gaps = np.concatenate(
622
+ (gaps, np.array([[epoch_data[-1], end_of_day_ns, vecsec_dict[start_time]]]))
623
+ )
624
+
568
625
  return gaps
569
626
 
570
627
 
@@ -592,11 +649,9 @@ def find_gaps(timeline_data: np.ndarray, vectors_per_second: int) -> np.ndarray:
592
649
  # Expected difference between timestamps in nanoseconds.
593
650
  expected_gap = 1 / vectors_per_second * 1e9
594
651
 
595
- # TODO: timestamps can vary by a few ms. Per Alastair, this can be around 7.5% of
596
- # cadence without counting as a "gap".
597
652
  diffs = abs(np.diff(timeline_data))
598
- # 3.5e7 == 7.5% of 0.5s in nanoseconds, a common gap. In the future, this number
599
- # will be calculated from the expected gap.
653
+
654
+ # Gap can be up to 7.5% larger than expected vectors per second due to clock drift
600
655
  gap_index = np.asarray(diffs - expected_gap > expected_gap * 0.075).nonzero()[0]
601
656
  output: np.ndarray = np.zeros((len(gap_index), 3))
602
657
 
@@ -607,7 +662,6 @@ def find_gaps(timeline_data: np.ndarray, vectors_per_second: int) -> np.ndarray:
607
662
  vectors_per_second,
608
663
  ]
609
664
 
610
- # TODO: How should I handle/find gaps at the end?
611
665
  return output
612
666
 
613
667
 
@@ -622,7 +676,8 @@ def generate_missing_timestamps(gap: np.ndarray) -> np.ndarray:
622
676
  ----------
623
677
  gap : numpy.ndarray
624
678
  Array of timestamps of shape (2,) containing n gaps with start_gap and
625
- end_gap. Start_gap and end_gap both correspond to points in timeline_data.
679
+ end_gap. Start_gap and end_gap both correspond to points in timeline_data and
680
+ are included in the output timespan.
626
681
 
627
682
  Returns
628
683
  -------
@@ -630,9 +685,7 @@ def generate_missing_timestamps(gap: np.ndarray) -> np.ndarray:
630
685
  Completed timeline.
631
686
  """
632
687
  # Generated timestamps should always be 0.5 seconds apart
633
- # TODO: is this in the configuration file?
634
688
  difference_ns = 0.5 * 1e9
635
-
636
689
  output: np.ndarray = np.arange(gap[0], gap[1], difference_ns)
637
690
  return output
638
691
 
@@ -657,8 +710,9 @@ def vectors_per_second_from_string(vecsec_string: str) -> dict:
657
710
  vecsec_dict = {}
658
711
  vecsec_segments = vecsec_string.split(",")
659
712
  for vecsec_segment in vecsec_segments:
660
- start_time, vecsec = vecsec_segment.split(":")
661
- vecsec_dict[int(start_time)] = int(vecsec)
713
+ if vecsec_segment:
714
+ start_time, vecsec = vecsec_segment.split(":")
715
+ vecsec_dict[int(start_time)] = int(vecsec)
662
716
 
663
717
  return vecsec_dict
664
718
 
@@ -50,8 +50,6 @@ class SpiceFrame(IntEnum):
50
50
  IMAP_HI_90 = -43160
51
51
  IMAP_ULTRA_45 = -43200
52
52
  IMAP_ULTRA_90 = -43210
53
- # TODO: remove IMAP_MAG frame once all usages have been removed
54
- IMAP_MAG = -43999
55
53
  IMAP_MAG_BOOM = -43250
56
54
  IMAP_MAG_I = -43251
57
55
  IMAP_MAG_O = -43252
@@ -157,6 +157,7 @@ def decompress_image(
157
157
  pixel0: int,
158
158
  binary_data: str,
159
159
  packet_props: PacketProperties,
160
+ planes_per_packet: int = 1,
160
161
  ) -> NDArray:
161
162
  """
162
163
  Will decompress a binary string representing an image into a matrix of pixel values.
@@ -174,11 +175,15 @@ def decompress_image(
174
175
  packet_props : PacketProperties
175
176
  Properties of the packet, including width bit, mantissa bit length and pixel
176
177
  window dimensions.
178
+ planes_per_packet : int
179
+ Number of image planes in the packet. Default is 1.
177
180
 
178
181
  Returns
179
182
  -------
180
- p_decom : NDArray
181
- A 2D numpy array representing pixel values.
183
+ planes : NDArray
184
+ A 3D numpy array representing pixel values.
185
+ The last two dimensions correspond to the image dimensions, and the first
186
+ is the number of image planes.
182
187
  Each pixel is stored as an unsigned 16-bit integer (uint16).
183
188
 
184
189
  Notes
@@ -199,51 +204,58 @@ def decompress_image(
199
204
  )
200
205
 
201
206
  blocks_per_row = cols // pixels_per_block
202
-
203
- # Compressed pixel matrix
204
- p = np.zeros((rows, cols), dtype=np.uint16)
205
- # Decompressed pixel matrix
206
- p_decom = np.zeros((rows, cols), dtype=np.int16)
207
-
207
+ current_pixel0 = pixel0 # Use the parameter for first plane
208
+ planes = []
209
+ plane_num = 0
208
210
  pos = 0 # Starting position in the binary string
209
-
210
- for i in range(rows):
211
- for j in range(blocks_per_row):
212
- # Read the width for the block.
213
- w, pos = read_and_advance(binary_data, width_bit, pos)
214
- for k in range(pixels_per_block):
215
- # Handle the special case in which the width is 0
216
- if w == 0:
217
- value = 0
218
- else:
219
- # Find the value of each pixel in the block
220
- value, pos = read_and_advance(binary_data, w, pos)
221
-
222
- # if the least significant bit of value is set (odd)
223
- if value & 0x01:
224
- # value >> 1: shifts bits of value one place to the right
225
- # ~: bitwise NOT operator (flips bits)
226
- delta_f = ~(value >> 1)
227
- else:
228
- delta_f = value >> 1
229
-
230
- # Calculate the new pixel value and update pixel0
231
- column_index = j * pixels_per_block + k
232
- # 0xff is the hexadecimal representation of the number 255,
233
- # Keeps only the last 8 bits of the result of pixel0 - delta_f
234
- # This operation ensures that the result is within the range
235
- # of an 8-bit byte (0-255)
236
- # Use np.int16 for the arithmetic operation to avoid overflow
237
- # Then implicitly cast back to the p's uint16 dtype for storage
238
- p[i][column_index] = np.int16(pixel0) - delta_f
239
- # Perform logarithmic decompression on the pixel value
240
- p_decom[i][column_index] = log_decompression(
241
- p[i][column_index], mantissa_bit_length
242
- )
243
- pixel0 = p[i][column_index]
244
- pixel0 = p[i][0]
245
-
246
- return p_decom
211
+ while plane_num < planes_per_packet:
212
+ # Compressed pixel matrix
213
+ p = np.zeros((rows, cols), dtype=np.uint16)
214
+ # Decompressed pixel matrix
215
+ p_decom = np.zeros((rows, cols), dtype=np.int16)
216
+
217
+ for i in range(rows):
218
+ for j in range(blocks_per_row):
219
+ # Read the width for the block.
220
+ w, pos = read_and_advance(binary_data, width_bit, pos)
221
+ for k in range(pixels_per_block):
222
+ # Handle the special case in which the width is 0
223
+ if w == 0:
224
+ value = 0
225
+ else:
226
+ # Find the value of each pixel in the block
227
+ value, pos = read_and_advance(binary_data, w, pos)
228
+
229
+ # if the least significant bit of value is set (odd)
230
+ if value & 0x01:
231
+ # value >> 1: shifts bits of value one place to the right
232
+ # ~: bitwise NOT operator (flips bits)
233
+ delta_f = ~(value >> 1)
234
+ else:
235
+ delta_f = value >> 1
236
+
237
+ # Calculate the new pixel value and update pixel0
238
+ column_index = j * pixels_per_block + k
239
+ # 0xff is the hexadecimal representation of the number 255,
240
+ # Keeps only the last 8 bits of the result of pixel0 - delta_f
241
+ # This operation ensures that the result is within the range
242
+ # of an 8-bit byte (0-255)
243
+ # Use np.int16 for the arithmetic operation to avoid overflow
244
+ # Then implicitly cast back to the p's uint16 dtype for storage
245
+ p[i][column_index] = np.int16(current_pixel0) - delta_f
246
+ # Perform logarithmic decompression on the pixel value
247
+ p_decom[i][column_index] = log_decompression(
248
+ p[i][column_index], mantissa_bit_length
249
+ )
250
+ current_pixel0 = p[i][column_index]
251
+ current_pixel0 = p[i][0]
252
+ planes.append(p_decom)
253
+ plane_num += 1
254
+ # Read P00 for the next plane (if not the last plane)
255
+ if plane_num < planes_per_packet:
256
+ current_pixel0, pos = read_and_advance(binary_data, 8, pos)
257
+
258
+ return np.stack(planes)
247
259
 
248
260
 
249
261
  def read_image_raw_events_binary(
@@ -81,54 +81,66 @@ def process_ultra_tof(ds: xr.Dataset, packet_props: PacketProperties) -> xr.Data
81
81
  decom_data: defaultdict[str, list[np.ndarray]] = defaultdict(list)
82
82
  decom_data["packetdata"] = []
83
83
  valid_epoch = []
84
-
85
84
  for val, group in ds.groupby("epoch"):
86
85
  if set(group["sid"].values) >= set(
87
86
  np.arange(0, image_planes, planes_per_packet)
88
87
  ):
88
+ plane_count = 0
89
89
  valid_epoch.append(val)
90
90
  group.sortby("sid")
91
91
 
92
92
  for key in scalar_keys:
93
- decom_data[key].append(group[key].values)
93
+ # Repeat the scalar values for each image plane. There may be cases
94
+ # where the last packet has fewer planes than the planes_per_packet, so
95
+ # we slice to ensure the correct length.
96
+ decom_data[key].append(
97
+ np.tile(group[key].values, planes_per_packet)[:image_planes]
98
+ )
94
99
 
95
100
  image = []
96
101
  for i in range(num_image_packets):
97
102
  binary = convert_to_binary_string(group["packetdata"].values[i])
103
+ # Determine how many planes to decompress in this packet.
104
+ # the last packet might have fewer planes than planes_per_packet.
105
+ # Take the minimum of the remaining planes or the max planes per packet
106
+ # value.
107
+ planes_in_packet = min(image_planes - plane_count, planes_per_packet)
98
108
  decompressed = decompress_image(
99
109
  group["p00"].values[i],
100
110
  binary,
101
111
  packet_props,
112
+ planes_in_packet,
102
113
  )
103
114
  image.append(decompressed)
115
+ plane_count += planes_in_packet
104
116
 
105
- decom_data["packetdata"].append(np.stack(image))
117
+ decom_data["packetdata"].append(np.concatenate(image, axis=0))
106
118
 
107
119
  for key in scalar_keys:
108
- decom_data[key] = np.stack(decom_data[key])
120
+ decom_data[key] = np.stack(decom_data[key], axis=0)
109
121
 
110
- decom_data["packetdata"] = np.stack(decom_data["packetdata"])
122
+ decom_data["packetdata"] = np.stack(decom_data["packetdata"], axis=0)
111
123
 
112
124
  coords = {
113
125
  "epoch": np.array(valid_epoch, dtype=np.uint64),
114
- "sid": xr.DataArray(np.arange(num_image_packets), dims=["sid"], name="sid"),
126
+ "plane": xr.DataArray(np.arange(image_planes), dims=["plane"], name="plane"),
115
127
  "row": xr.DataArray(np.arange(rows), dims=["row"], name="row"),
116
128
  "column": xr.DataArray(np.arange(cols), dims=["column"], name="column"),
117
129
  }
118
130
 
119
131
  dataset = xr.Dataset(coords=coords)
120
132
 
121
- # Add scalar keys (2D: epoch x sid)
133
+ # Add scalar keys (2D: epoch x packets)
122
134
  for key in scalar_keys:
123
135
  dataset[key] = xr.DataArray(
124
136
  decom_data[key],
125
- dims=["epoch", "sid"],
137
+ dims=["epoch", "plane"],
126
138
  )
127
139
 
128
140
  # Add PACKETDATA (4D: epoch x sid x row x column)
129
141
  dataset["packetdata"] = xr.DataArray(
130
142
  decom_data["packetdata"],
131
- dims=["epoch", "sid", "row", "column"],
143
+ dims=["epoch", "plane", "row", "column"],
132
144
  )
133
145
 
134
146
  return dataset
@@ -137,8 +137,8 @@ ULTRA_EXTOF_HIGH_ANGULAR = PacketProperties(
137
137
  ULTRA_EXTOF_HIGH_TIME = PacketProperties(
138
138
  apid=[888, 952],
139
139
  logical_source=[
140
- "imap_ultra_l1a_45sensor-histogram-ena-extof-hi-time",
141
- "imap_ultra_l1a_90sensor-histogram-ena-extof-hi-time",
140
+ "imap_ultra_l1a_45sensor-histogram-ion-extof-hi-time",
141
+ "imap_ultra_l1a_90sensor-histogram-ion-extof-hi-time",
142
142
  ],
143
143
  addition_to_logical_desc="Energy By Time of Flight High Time Images",
144
144
  width=4,
@@ -153,8 +153,8 @@ ULTRA_EXTOF_HIGH_TIME = PacketProperties(
153
153
  ULTRA_EXTOF_HIGH_ENERGY = PacketProperties(
154
154
  apid=[887, 951],
155
155
  logical_source=[
156
- "imap_ultra_l1a_45sensor-histogram-ena-extof-hi-nrg",
157
- "imap_ultra_l1a_90sensor-histogram-ena-extof-hi-nrg",
156
+ "imap_ultra_l1a_45sensor-histogram-ion-extof-hi-nrg",
157
+ "imap_ultra_l1a_90sensor-histogram-ion-extof-hi-nrg",
158
158
  ],
159
159
  addition_to_logical_desc="Energy By Time of Flight High Energy Images",
160
160
  width=4,
@@ -71,7 +71,7 @@ def calculate_helio_pset(
71
71
  """
72
72
  pset_dict: dict[str, np.ndarray] = {}
73
73
  # Select only the species we are interested in.
74
- indices = np.where(np.isin(de_dataset["e_bin"].values, species_id))[0]
74
+ indices = np.where(np.isin(de_dataset["ebin"].values, species_id))[0]
75
75
  species_dataset = de_dataset.isel(epoch=indices)
76
76
 
77
77
  rejected = get_de_rejection_mask(
@@ -176,7 +176,7 @@ def calculate_helio_pset(
176
176
  pset_dict["latitude"] = latitude[np.newaxis, ...]
177
177
  pset_dict["longitude"] = longitude[np.newaxis, ...]
178
178
  pset_dict["energy_bin_geometric_mean"] = energy_bin_geometric_means
179
- pset_dict["helio_exposure_factor"] = exposure_time
179
+ pset_dict["helio_exposure_factor"] = exposure_time[np.newaxis, ...]
180
180
  pset_dict["pixel_index"] = healpix
181
181
  pset_dict["energy_bin_delta"] = np.diff(intervals, axis=1).squeeze()[
182
182
  np.newaxis, ...
@@ -72,7 +72,7 @@ def calculate_spacecraft_pset(
72
72
  pset_dict: dict[str, np.ndarray] = {}
73
73
 
74
74
  sensor = parse_filename_like(name)["sensor"][0:2]
75
- indices = np.where(np.isin(de_dataset["e_bin"].values, species_id))[0]
75
+ indices = np.where(np.isin(de_dataset["ebin"].values, species_id))[0]
76
76
  species_dataset = de_dataset.isel(epoch=indices)
77
77
 
78
78
  # If there are no species return None.
@@ -184,7 +184,7 @@ def calculate_spacecraft_pset(
184
184
  pset_dict["longitude"] = longitude[np.newaxis, ...]
185
185
  pset_dict["energy_bin_geometric_mean"] = energy_bin_geometric_means
186
186
  pset_dict["background_rates"] = background_rates[np.newaxis, ...]
187
- pset_dict["exposure_factor"] = exposure_pointing
187
+ pset_dict["exposure_factor"] = exposure_pointing[np.newaxis, ...]
188
188
  pset_dict["pixel_index"] = healpix
189
189
  pset_dict["energy_bin_delta"] = np.diff(intervals, axis=1).squeeze()[
190
190
  np.newaxis, ...
@@ -197,8 +197,11 @@ def calculate_spacecraft_pset(
197
197
  pset_dict["dead_time_ratio"] = deadtime_ratios
198
198
  pset_dict["spin_phase_step"] = np.arange(len(deadtime_ratios))
199
199
 
200
- pset_dict["scatter_theta"] = scattering_theta
201
- pset_dict["scatter_phi"] = scattering_phi
200
+ # Convert FWHM to gaussian uncertainty by dividing by 2.355
201
+ # See algorithm documentation (section 3.5.7, third bullet point) for more details
202
+ pset_dict["scatter_theta"] = scattering_theta / 2.355
203
+ pset_dict["scatter_phi"] = scattering_phi / 2.355
204
+
202
205
  pset_dict["scatter_threshold"] = scattering_thresholds
203
206
 
204
207
  # Add the energy delta plus/minus to the dataset