petpal 0.5.5__py3-none-any.whl → 0.5.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -17,22 +17,19 @@ TODO:
17
17
  * (stitch_broken_scans) Assumes non-BIDS key 'DecayFactor' instead of BIDS-required 'DecayCorrectionFactor' for
18
18
  ease-of-use with NIL data. Should be changed in the future.
19
19
  * (stitch_broken_scans) Currently writes intermediate files even if output_image_path is None.
20
- * (suvr) Allow list to be passed as ref_region to use multiple regions together as a reference region (i.e. left
21
- and right cerebellum gray matter).
22
20
 
23
21
  """
24
- import os
25
22
  import pathlib
26
23
  import datetime
27
- import tempfile
28
24
  import ants
29
25
  import nibabel
30
26
  import numpy as np
31
27
  from scipy.ndimage import center_of_mass
32
28
 
33
- from ..utils.useful_functions import weighted_series_sum
29
+ from .motion_target import determine_motion_target
30
+
34
31
  from ..utils import image_io, math_lib
35
- from ..preproc.decay_correction import undo_decay_correction, decay_correct
32
+ from .decay_correction import undo_decay_correction, decay_correct
36
33
 
37
34
  def stitch_broken_scans(input_image_path: str,
38
35
  output_image_path: str,
@@ -220,112 +217,18 @@ def rescale_image(input_image: ants.core.ANTsImage, rescale_constant: float, op:
220
217
  return input_image * rescale_constant
221
218
 
222
219
 
223
- def determine_motion_target(motion_target_option: str | tuple | list,
224
- input_image_4d_path: str = None,
225
- half_life: float = None) -> str:
226
- """
227
- Produce a motion target given the ``motion_target_option`` from a method
228
- running registrations on PET, i.e. :meth:`motion_correction` or
229
- :meth:`register_pet`.
230
-
231
- The motion target option can be a string or a tuple. If it is a string,
232
- then if this string is a file, use the file as the motion target.
233
-
234
- If it is the option ``weighted_series_sum``, then run
235
- :meth:`weighted_series_sum` and return the output path.
236
-
237
- If it is the option ``mean_image``, then compute the time-average of the
238
- 4D-PET image.
239
-
240
- If it is a tuple, run a weighted sum on the PET series on a range of
241
- frames. The elements of the tuple are treated as times in seconds, counted
242
- from the time of the first frame, i.e. (0,300) would average all frames
243
- from the first to the frame 300 seconds later. If the two elements are the
244
- same, returns the one frame closest to the entered time.
245
-
246
- Args:
247
- motion_target_option (str | tuple | list): Determines how the method behaves,
248
- according to the above description. Can be a file, a method
249
- ('weighted_series_sum' or 'mean_image'), or a tuple range e.g. (0,600).
250
- input_image_4d_path (str): Path to the PET image. This is intended to
251
- be supplied by the parent method employing this function. Default
252
- value None.
253
- half_life (float): Half life of the radiotracer used in the image
254
- located at ``input_image_4d_path``. Only used if a calculation is
255
- performed.
256
-
257
- Returns:
258
- out_image_file (str): File to use as a target to compute
259
- transformations on.
260
-
261
- Raises:
262
- ValueError: If ``motion_target_option`` does not match an acceptable option, or if
263
- ``half_life`` is not specifiedwhen ``motion_target_option`` is not 'mean_image'
264
- TypeError: If start and end time are incompatible with ``float`` type.
265
- """
266
- if motion_target_option != 'mean_image' and half_life is None:
267
- raise ValueError('half_life must be specified if not using "mean_image" for motion_target_option')
268
-
269
- if isinstance(motion_target_option, str):
270
- if os.path.exists(motion_target_option):
271
- return motion_target_option
272
-
273
- if motion_target_option == 'weighted_series_sum':
274
- out_image_file = tempfile.mkstemp(suffix='_wss.nii.gz')[1]
275
- weighted_series_sum(input_image_4d_path=input_image_4d_path,
276
- out_image_path=out_image_file,
277
- half_life=half_life,
278
- verbose=False)
279
- return out_image_file
280
-
281
- if motion_target_option == 'mean_image':
282
- out_image_file = tempfile.mkstemp(suffix='_mean.nii.gz')[1]
283
- input_img = ants.image_read(input_image_4d_path)
284
- mean_img = get_average_of_timeseries(input_image=input_img)
285
- ants.image_write(image=mean_img,filename=out_image_file)
286
- return out_image_file
287
-
288
- raise ValueError("motion_target_option did not match a file or 'weighted_series_sum'")
289
-
290
- if isinstance(motion_target_option, (list, tuple)):
291
-
292
- start_time = motion_target_option[0]
293
- end_time = motion_target_option[1]
294
-
295
- try:
296
- float(start_time)
297
- float(end_time)
298
- except Exception as exc:
299
- raise TypeError('Start time and end time of calculation must be '
300
- 'able to be cast into float! Provided values are '
301
- f"{start_time} and {end_time}.") from exc
302
-
303
- out_image_file = tempfile.mkstemp(suffix='_wss.nii.gz')[1]
304
- weighted_series_sum(input_image_4d_path=input_image_4d_path,
305
- out_image_path=out_image_file,
306
- half_life=half_life,
307
- verbose=False,
308
- start_time=float(start_time),
309
- end_time=float(end_time))
310
-
311
- return out_image_file
312
-
313
- raise ValueError('motion_target_option did not match str or tuple type.')
314
-
315
-
316
- def brain_mask(input_image_4d_path: str,
220
+ def brain_mask(input_image_path: str,
317
221
  out_image_path: str,
318
222
  atlas_image_path: str,
319
223
  atlas_mask_path: str,
320
- motion_target_option='mean_image',
321
- half_life: float=None):
224
+ motion_target_option='mean_image'):
322
225
  """
323
226
  Create a brain mask for a PET image. Create target PET image, which is then warped to a
324
227
  provided anatomical atlas. The transformation to atlas space is then applied to transform a
325
228
  provided mask in atlas space into PET space. This mask can then by used in various operations.
326
229
 
327
230
  Args:
328
- input_image_4d_path (str): Path to input 4D PET image.
231
+ input_image_path (str): Path to input 4D PET image.
329
232
  out_image_path (str): Path to which brain mask in PET space is written.
330
233
  atlas_image_path (str): Path to anatomical atlas image.
331
234
  atlas_mask_path (str): Path to brain mask in atlas space.
@@ -338,11 +241,9 @@ def brain_mask(input_image_4d_path: str,
338
241
  """
339
242
  atlas = ants.image_read(atlas_image_path)
340
243
  atlas_mask = ants.image_read(atlas_mask_path)
341
- pet_ref = ants.image_read(determine_motion_target(
342
- motion_target_option=motion_target_option,
343
- input_image_4d_path=input_image_4d_path,
344
- half_life=half_life
345
- ))
244
+ motion_target = determine_motion_target(motion_target_option=motion_target_option,
245
+ input_image_path=input_image_path)
246
+ pet_ref = ants.image_read(motion_target)
346
247
  xfm = ants.registration(
347
248
  fixed=atlas,
348
249
  moving=pet_ref,
@@ -370,7 +271,7 @@ def extract_mean_roi_tac_from_nifti_using_segmentation(input_image_4d_numpy: np.
370
271
  regional values. Currently, only the mean over a single region value is implemented.
371
272
 
372
273
  Args:
373
- input_image_4d_path (str): Path to a .nii or .nii.gz file containing a 4D
274
+ input_image_path (str): Path to a .nii or .nii.gz file containing a 4D
374
275
  PET image, registered to anatomical space.
375
276
  segmentation_image_path (str): Path to a .nii or .nii.gz file containing a 3D segmentation
376
277
  image, where integer indices label specific regions. Must have same sampling as PET
@@ -447,71 +348,6 @@ def binarize_image_with_threshold(input_image_numpy: np.ndarray,
447
348
  return bounded_image
448
349
 
449
350
 
450
- def get_average_of_timeseries(input_image: ants.ANTsImage):
451
- """
452
- Get average of a 4D ANTsImage and return as a 3D ANTsImage.
453
- """
454
- assert len(input_image.shape) == 4, "Input image must be 4D"
455
- mean_array = input_image.mean(axis=-1)
456
- mean_image = ants.from_numpy(data=mean_array,
457
- origin=input_image.origin[:-1],
458
- spacing=input_image.spacing[:-1],
459
- direction=input_image.direction[:-1,:-1])
460
- return mean_image
461
-
462
-
463
- def suvr(input_image_path: str,
464
- out_image_path: str | None,
465
- segmentation_image_path: str,
466
- ref_region: int,
467
- verbose: bool=False) -> ants.ANTsImage:
468
- """
469
- Computes an ``SUVR`` (Standard Uptake Value Ratio) by taking the average of
470
- an input image within a reference region, and dividing the input image by
471
- said average value.
472
-
473
- Args:
474
- input_image_path (str): Path to 3D weighted series sum or other
475
- parametric image on which we compute SUVR.
476
- out_image_path (str): Path to output image file which is written to. If None, no output is written.
477
- segmentation_image_path (str): Path to segmentation image, which we use
478
- to compute average uptake value in the reference region.
479
- ref_region (int): Region number mapping to the reference region in the
480
- segmentation image.
481
- verbose (bool): Set to ``True`` to output processing information. Default is False.
482
-
483
- Returns:
484
- ants.ANTsImage: SUVR parametric image
485
- """
486
- pet_img = ants.image_read(filename=input_image_path)
487
- pet_arr = pet_img.numpy()
488
- segmentation_img = ants.image_read(filename=segmentation_image_path,
489
- pixeltype='unsigned int')
490
- segmentation_arr = segmentation_img.numpy()
491
-
492
- if len(pet_arr.shape)!=3:
493
- raise ValueError("SUVR input image is not 3D. If your image is dynamic, try running 'weighted_series_sum'"
494
- " first.")
495
-
496
- ref_region_avg = extract_mean_roi_tac_from_nifti_using_segmentation(input_image_4d_numpy=pet_arr,
497
- segmentation_image_numpy=segmentation_arr,
498
- region=ref_region,
499
- verbose=verbose)
500
-
501
- suvr_arr = pet_arr / ref_region_avg[0]
502
-
503
- out_img = ants.from_numpy_like(data=suvr_arr,
504
- image=pet_img)
505
-
506
- if out_image_path is not None:
507
- ants.image_write(image=out_img,
508
- filename=out_image_path)
509
- image_io.safe_copy_meta(input_image_path=input_image_path,
510
- out_image_path=out_image_path)
511
-
512
- return out_img
513
-
514
-
515
351
  def gauss_blur(input_image_path: str,
516
352
  blur_size_mm: float,
517
353
  out_image_path: str,
@@ -556,7 +392,7 @@ def gauss_blur(input_image_path: str,
556
392
  return out_image
557
393
 
558
394
 
559
- def roi_tac(input_image_4d_path: str,
395
+ def roi_tac(input_image_path: str,
560
396
  roi_image_path: str,
561
397
  region: int,
562
398
  out_tac_path: str,
@@ -573,9 +409,9 @@ def roi_tac(input_image_4d_path: str,
573
409
  raise ValueError("'time_frame_keyword' must be one of "
574
410
  "'FrameReferenceTime' or 'FrameTimesStart'")
575
411
 
576
- pet_meta = image_io.load_metadata_for_nifti_with_same_filename(input_image_4d_path)
412
+ pet_meta = image_io.load_metadata_for_nifti_with_same_filename(input_image_path)
577
413
  tac_extraction_func = extract_mean_roi_tac_from_nifti_using_segmentation
578
- pet_numpy = nibabel.load(input_image_4d_path).get_fdata()
414
+ pet_numpy = nibabel.load(input_image_path).get_fdata()
579
415
  seg_numpy = nibabel.load(roi_image_path).get_fdata()
580
416
 
581
417
 
@@ -7,20 +7,21 @@ registration.
7
7
  import ants
8
8
  import numpy as np
9
9
 
10
+ from petpal.utils.useful_functions import gen_nd_image_based_on_image_list
10
11
 
11
- from .image_operations_4d import determine_motion_target
12
+
13
+ from .motion_target import determine_motion_target
12
14
  from ..utils import image_io
13
15
  from ..utils.scan_timing import ScanTimingInfo, get_window_index_pairs_for_image
14
16
  from ..utils.useful_functions import weighted_series_sum_over_window_indecies
15
17
  from ..utils.image_io import get_half_life_from_nifti
16
18
 
17
19
 
18
- def motion_corr(input_image_4d_path: str,
20
+ def motion_corr(input_image_path: str,
19
21
  motion_target_option: str | tuple,
20
22
  out_image_path: str,
21
23
  verbose: bool,
22
24
  type_of_transform: str = 'DenseRigid',
23
- half_life: float = None,
24
25
  **kwargs) -> tuple[np.ndarray, list[str], list[float]]:
25
26
  """
26
27
  Correct PET image series for inter-frame motion. Runs rigid motion
@@ -28,7 +29,7 @@ def motion_corr(input_image_4d_path: str,
28
29
  inputs.
29
30
 
30
31
  Args:
31
- input_image_4d_path (str): Path to a .nii or .nii.gz file containing a 4D
32
+ input_image_path (str): Path to a .nii or .nii.gz file containing a 4D
32
33
  PET image to be motion corrected.
33
34
  motion_target_option (str | tuple): Target image for computing
34
35
  transformation. See :meth:`determine_motion_target`.
@@ -40,8 +41,6 @@ def motion_corr(input_image_4d_path: str,
40
41
  'Translation'. Any transformation type that uses >6 degrees of
41
42
  freedom is not recommended, use with caution. See
42
43
  :py:func:`ants.registration`.
43
- half_life (float): Half life of the PET radioisotope in seconds. Used
44
- for certain settings of ``motion_target_option``.
45
44
  kwargs (keyword arguments): Additional arguments passed to
46
45
  :py:func:`ants.motion_correction`.
47
46
 
@@ -53,10 +52,9 @@ def motion_corr(input_image_4d_path: str,
53
52
  pet_moco_fd (list[float]): List of framewise displacement measure
54
53
  corresponding to each frame transform.
55
54
  """
56
- pet_ants = ants.image_read(input_image_4d_path)
55
+ pet_ants = ants.image_read(input_image_path)
57
56
  motion_target_image_path = determine_motion_target(motion_target_option=motion_target_option,
58
- input_image_4d_path=input_image_4d_path,
59
- half_life=half_life)
57
+ input_image_path=input_image_path)
60
58
 
61
59
  motion_target_image = ants.image_read(motion_target_image_path)
62
60
  pet_moco_ants_dict = ants.motion_correction(image=pet_ants,
@@ -71,21 +69,20 @@ def motion_corr(input_image_4d_path: str,
71
69
  pet_moco_fd = pet_moco_ants_dict['FD']
72
70
  pet_moco_np = pet_moco_ants.numpy()
73
71
  ants.image_write(image=pet_moco_ants,filename=out_image_path)
74
- image_io.safe_copy_meta(input_image_path=input_image_4d_path, out_image_path=out_image_path)
72
+ image_io.safe_copy_meta(input_image_path=input_image_path, out_image_path=out_image_path)
75
73
 
76
74
  if verbose:
77
75
  print(f"(ImageOps4d): motion corrected image saved to {out_image_path}")
78
76
  return pet_moco_np, pet_moco_params, pet_moco_fd
79
77
 
80
78
 
81
- def motion_corr_frame_list(input_image_4d_path: str,
79
+ def motion_corr_frame_list(input_image_path: str,
82
80
  motion_target_option: str | tuple,
83
81
  out_image_path: str,
84
82
  verbose: bool,
85
83
  frames_list: list = None,
86
84
  type_of_transform: str = 'Affine',
87
85
  transform_metric: str = 'mattes',
88
- half_life: float = None,
89
86
  **kwargs):
90
87
  r"""
91
88
  Perform per-frame motion correction on a 4D PET image.
@@ -94,7 +91,7 @@ def motion_corr_frame_list(input_image_4d_path: str,
94
91
  motion target. Only the frames in ``frames_list`` are motion corrected, all else are kept as is.
95
92
 
96
93
  Args:
97
- input_image_4d_path (str): Path to the input 4D PET image file.
94
+ input_image_path (str): Path to the input 4D PET image file.
98
95
  motion_target_option (str | tuple): Option to determine the motion target. This can
99
96
  be a path to a specific image file, a tuple of frame indices to generate a target, or
100
97
  specific options recognized by :func:`determine_motion_target`.
@@ -106,8 +103,6 @@ def motion_corr_frame_list(input_image_4d_path: str,
106
103
  is 'Affine'.
107
104
  transform_metric (str, optional): Metric to use for the transformation. Default is
108
105
  'mattes'.
109
- half_life (float, optional): Half-life value used by `determine_motion_target` if
110
- applicable. Default is None.
111
106
  **kwargs: Additional arguments passed to the `ants.registration` method.
112
107
 
113
108
  Returns:
@@ -119,7 +114,7 @@ def motion_corr_frame_list(input_image_4d_path: str,
119
114
 
120
115
  from petpal.preproc.motion_corr import motion_corr_frame_list
121
116
 
122
- motion_corr_frame_list(input_image_4d_path='/path/to/image.nii.gz',
117
+ motion_corr_frame_list(input_image_path='/path/to/image.nii.gz',
123
118
  motion_target_option='/path/to/target_image.nii.gz',
124
119
  out_image_path='/path/to/output_motion_corrected.nii.gz',
125
120
  verbose=True)
@@ -134,11 +129,10 @@ def motion_corr_frame_list(input_image_4d_path: str,
134
129
  path.
135
130
 
136
131
  """
137
- input_image = ants.image_read(input_image_4d_path)
132
+ input_image = ants.image_read(input_image_path)
138
133
 
139
134
  motion_target_path = determine_motion_target(motion_target_option=motion_target_option,
140
- input_image_4d_path=input_image_4d_path,
141
- half_life=half_life)
135
+ input_image_path=input_image_path)
142
136
  motion_target = ants.image_read(motion_target_path)
143
137
 
144
138
  frames_to_correct = np.zeros(input_image.shape[-1], dtype=bool)
@@ -182,15 +176,14 @@ def motion_corr_frame_list(input_image_4d_path: str,
182
176
  print(f"(ImageOps4d): motion corrected image saved to {out_image_path}")
183
177
 
184
178
 
185
- def motion_corr_frame_list_to_t1(input_image_4d_path: str,
179
+ def motion_corr_frame_list_to_t1(input_image_path: str,
186
180
  t1_image_path: str,
187
181
  motion_target_option: str | tuple,
188
182
  out_image_path: str,
189
183
  verbose: bool,
190
184
  frames_list: list = None,
191
185
  type_of_transform: str = 'AffineFast',
192
- transform_metric: str = "mattes",
193
- half_life: float = None):
186
+ transform_metric: str = "mattes"):
194
187
  r"""
195
188
  Perform motion correction of a 4D PET image to a T1 anatomical image.
196
189
 
@@ -203,7 +196,7 @@ def motion_corr_frame_list_to_t1(input_image_4d_path: str,
203
196
  transformed to the motion-target in T1-space.
204
197
 
205
198
  Args:
206
- input_image_4d_path (str): Path to the 4D PET image to be corrected.
199
+ input_image_path (str): Path to the 4D PET image to be corrected.
207
200
  t1_image_path (str): Path to the 3D T1 anatomical image.
208
201
  motion_target_option (str | tuple): Option for selecting the motion target image.
209
202
  Can be a path to a file or a tuple range. If None, the average of the PET timeseries
@@ -215,8 +208,6 @@ def motion_corr_frame_list_to_t1(input_image_4d_path: str,
215
208
  type_of_transform (str): Type of transformation used in registration. Default is
216
209
  'AffineFast'.
217
210
  transform_metric (str): Metric for transformation optimization. Default is 'mattes'.
218
- half_life (float, optional): Half-life of the PET radioisotope. Used if a calculation
219
- is required for the motion target.
220
211
 
221
212
  Returns:
222
213
  None
@@ -230,7 +221,7 @@ def motion_corr_frame_list_to_t1(input_image_4d_path: str,
230
221
  .. code-block:: python
231
222
 
232
223
 
233
- motion_corr_frame_list_to_t1(input_image_4d_path='pet_timeseries.nii.gz',
224
+ motion_corr_frame_list_to_t1(input_image_path='pet_timeseries.nii.gz',
234
225
  t1_image_path='t1_image.nii.gz',
235
226
  motion_target_option='mean_image',
236
227
  out_image_path='pet_corrected.nii.gz',
@@ -238,12 +229,11 @@ def motion_corr_frame_list_to_t1(input_image_4d_path: str,
238
229
 
239
230
  """
240
231
 
241
- input_image = ants.image_read(input_image_4d_path)
232
+ input_image = ants.image_read(input_image_path)
242
233
  t1_image = ants.image_read(t1_image_path)
243
234
 
244
235
  motion_target_path = determine_motion_target(motion_target_option=motion_target_option,
245
- input_image_4d_path=input_image_4d_path,
246
- half_life=half_life)
236
+ input_image_path=input_image_path)
247
237
  motion_target = ants.image_read(motion_target_path)
248
238
 
249
239
  motion_target_to_mpr_reg = ants.registration(fixed=t1_image,
@@ -297,13 +287,12 @@ def motion_corr_frame_list_to_t1(input_image_4d_path: str,
297
287
  print(f"(ImageOps4d): motion corrected image saved to {out_image_path}")
298
288
 
299
289
 
300
- def motion_corr_frames_above_mean_value(input_image_4d_path: str,
290
+ def motion_corr_frames_above_mean_value(input_image_path: str,
301
291
  out_image_path: str,
302
292
  motion_target_option: str | tuple,
303
293
  verbose: bool,
304
294
  type_of_transform: str = 'Affine',
305
295
  transform_metric: str = 'mattes',
306
- half_life: float = None,
307
296
  scale_factor=1.0,
308
297
  **kwargs):
309
298
  r"""
@@ -314,7 +303,7 @@ def motion_corr_frames_above_mean_value(input_image_4d_path: str,
314
303
  utilizes the :func:`motion_corr_frame_list` function to perform the motion correction.
315
304
 
316
305
  Args:
317
- input_image_4d_path (str): Path to the input 4D PET image file.
306
+ input_image_path (str): Path to the input 4D PET image file.
318
307
  motion_target_option (str | tuple): Option to determine the motion target. This can
319
308
  be a path to a specific image file, a tuple of frame indices to generate a target, or
320
309
  specific options recognized by :func:`determine_motion_target`.
@@ -324,8 +313,6 @@ def motion_corr_frames_above_mean_value(input_image_4d_path: str,
324
313
  Default is 'Affine'.
325
314
  transform_metric (str, optional): Metric to use for the transformation. Default is
326
315
  'mattes'.
327
- half_life (float, optional): Half-life value used by `determine_motion_target`, if
328
- applicable. Default is None.
329
316
  scale_factor (float, optional): Scale factor to apply to frame mean values before
330
317
  comparison. Default is 1.0.
331
318
  **kwargs: Additional arguments passed to the `ants.registration` method.
@@ -339,7 +326,7 @@ def motion_corr_frames_above_mean_value(input_image_4d_path: str,
339
326
 
340
327
  from petpal.preproc.motion_corr import motion_corr_frames_above_mean_value
341
328
 
342
- motion_corr_frames_above_mean_value(input_image_4d_path='/path/to/image.nii.gz',
329
+ motion_corr_frames_above_mean_value(input_image_path='/path/to/image.nii.gz',
343
330
  motion_target_option='/path/to/target_image.nii.gz',
344
331
  out_image_path='/path/to/output_motion_corrected.nii.gz',
345
332
  verbose=True,
@@ -356,28 +343,26 @@ def motion_corr_frames_above_mean_value(input_image_4d_path: str,
356
343
 
357
344
  """
358
345
 
359
- frames_list = _get_list_of_frames_above_total_mean(image_4d_path=input_image_4d_path,
346
+ frames_list = _get_list_of_frames_above_total_mean(image_4d_path=input_image_path,
360
347
  scale_factor=scale_factor)
361
348
 
362
- motion_corr_frame_list(input_image_4d_path=input_image_4d_path,
349
+ motion_corr_frame_list(input_image_path=input_image_path,
363
350
  motion_target_option=motion_target_option,
364
351
  out_image_path=out_image_path,
365
352
  verbose=verbose,
366
353
  frames_list=frames_list,
367
354
  type_of_transform=type_of_transform,
368
355
  transform_metric=transform_metric,
369
- half_life=half_life,
370
356
  **kwargs)
371
357
 
372
358
 
373
- def motion_corr_frames_above_mean_value_to_t1(input_image_4d_path: str,
359
+ def motion_corr_frames_above_mean_value_to_t1(input_image_path: str,
374
360
  t1_image_path: str,
375
361
  motion_target_option: str | tuple,
376
362
  out_image_path: str,
377
363
  verbose: bool,
378
364
  type_of_transform: str = 'AffineFast',
379
365
  transform_metric: str = "mattes",
380
- half_life: float = None,
381
366
  scale_factor: float = 1.0):
382
367
  """
383
368
  Perform motion correction on frames with mean values above the mean of a 4D PET image to a T1
@@ -389,7 +374,7 @@ def motion_corr_frames_above_mean_value_to_t1(input_image_4d_path: str,
389
374
  function.
390
375
 
391
376
  Args:
392
- input_image_4d_path (str): Path to the input 4D PET image file.
377
+ input_image_path (str): Path to the input 4D PET image file.
393
378
  t1_image_path (str): Path to the 3D T1 anatomical image.
394
379
  motion_target_option (str | tuple): Option to determine the motion target. This can
395
380
  be a path to a specific image file, a tuple of frame indices to generate a target, or
@@ -399,8 +384,6 @@ def motion_corr_frames_above_mean_value_to_t1(input_image_4d_path: str,
399
384
  type_of_transform (str, optional): Type of transformation to use for registration. Default
400
385
  is 'AffineFast'.
401
386
  transform_metric (str, optional): Metric to use for the transformation. Default is 'mattes'.
402
- half_life (float, optional): Half-life value used by `determine_motion_target`, if
403
- applicable. Default is None.
404
387
  scale_factor (float, optional): Scale factor applied to the mean voxel value of the entire
405
388
  image for comparison. Must be greater than 0. Default is 1.0.
406
389
 
@@ -413,7 +396,7 @@ def motion_corr_frames_above_mean_value_to_t1(input_image_4d_path: str,
413
396
 
414
397
  from petpal.preproc.motion_corr import motion_corr_frames_above_mean_value_to_t1
415
398
 
416
- motion_corr_frames_above_mean_value_to_t1(input_image_4d_path='/path/to/image.nii.gz',
399
+ motion_corr_frames_above_mean_value_to_t1(input_image_path='/path/to/image.nii.gz',
417
400
  t1_image_path='/path/to/t1_image.nii.gz',
418
401
  motion_target_option='/path/to/target_image.nii.gz',
419
402
  out_image_path='/path/to/output_motion_corrected.nii.gz',
@@ -430,18 +413,17 @@ def motion_corr_frames_above_mean_value_to_t1(input_image_4d_path: str,
430
413
  - The :func:`_get_list_of_frames_above_total_mean` function identifies
431
414
  the frames to be motion corrected based on their mean voxel values.
432
415
  """
433
- frames_list = _get_list_of_frames_above_total_mean(image_4d_path=input_image_4d_path,
416
+ frames_list = _get_list_of_frames_above_total_mean(image_4d_path=input_image_path,
434
417
  scale_factor=scale_factor)
435
418
 
436
- motion_corr_frame_list_to_t1(input_image_4d_path=input_image_4d_path,
419
+ motion_corr_frame_list_to_t1(input_image_path=input_image_path,
437
420
  t1_image_path=t1_image_path,
438
421
  motion_target_option=motion_target_option,
439
422
  out_image_path=out_image_path,
440
423
  verbose=verbose,
441
424
  frames_list=frames_list,
442
425
  type_of_transform=type_of_transform,
443
- transform_metric=transform_metric,
444
- half_life=half_life)
426
+ transform_metric=transform_metric)
445
427
 
446
428
 
447
429
  def windowed_motion_corr_to_target(input_image_path: str,
@@ -500,8 +482,7 @@ def windowed_motion_corr_to_target(input_image_path: str,
500
482
  frame_timing_info = ScanTimingInfo.from_nifti(image_path=input_image_path)
501
483
 
502
484
  target_image = determine_motion_target(motion_target_option=motion_target_option,
503
- input_image_4d_path=input_image_path,
504
- half_life=half_life)
485
+ input_image_path=input_image_path)
505
486
  target_image = ants.image_read(target_image)
506
487
 
507
488
  reg_kwargs_default = {'aff_metric' : 'mattes',
@@ -551,68 +532,6 @@ def gen_timeseries_from_image_list(image_list: list[ants.core.ANTsImage]) -> ant
551
532
  return ants.list_to_ndimage(tmp_image, image_list)
552
533
 
553
534
 
554
- def gen_nd_image_based_on_image_list(image_list: list[ants.core.ants_image.ANTsImage]):
555
- r"""
556
- Generate a 4D ANTsImage based on a list of 3D ANTsImages.
557
-
558
- This function takes a list of 3D ANTsImages and constructs a new 4D ANTsImage,
559
- where the additional dimension represents the number of frames (3D images) in the list.
560
- The 4D image retains the spacing, origin, direction, and shape properties of the 3D images,
561
- with appropriate modifications for the additional dimension.
562
-
563
- Args:
564
- image_list (list[ants.core.ants_image.ANTsImage]):
565
- List of 3D ANTsImage objects to be combined into a 4D image.
566
- The list must contain at least one image, and all images must have the same
567
- dimensions and properties.
568
-
569
- Returns:
570
- ants.core.ants_image.ANTsImage:
571
- A 4D ANTsImage constructed from the input list of 3D images. The additional
572
- dimension corresponds to the number of frames (length of the image list).
573
-
574
- Raises:
575
- AssertionError: If the `image_list` is empty or if the images in the list are not 3D.
576
-
577
- See Also
578
- * :func:`petpal.preproc.motion_corr.motion_corr_frame_list_to_t1`
579
-
580
- Example:
581
-
582
- .. code-block:: python
583
-
584
-
585
- import ants
586
- image1 = ants.image_read('frame1.nii.gz')
587
- image2 = ants.image_read('frame2.nii.gz')
588
- image_list = [image1, image2]
589
- result = _gen_nd_image_based_on_image_list(image_list)
590
- print(result.dimension) # 4
591
- image4d = ants.list_to_ndimage(result, image_list)
592
-
593
- """
594
- assert len(image_list) > 0
595
- assert image_list[0].dimension == 3
596
-
597
- num_frames = len(image_list)
598
- spacing_3d = image_list[0].spacing
599
- origin_3d = image_list[0].origin
600
- shape_3d = image_list[0].shape
601
- direction_3d = image_list[0].direction
602
-
603
- direction_4d = np.eye(4)
604
- direction_4d[:3, :3] = direction_3d
605
- spacing_4d = (*spacing_3d, 1.0)
606
- origin_4d = (*origin_3d, 0.0)
607
- shape_4d = (*shape_3d, num_frames)
608
-
609
- tmp_image = ants.make_image(imagesize=shape_4d,
610
- spacing=spacing_4d,
611
- origin=origin_4d,
612
- direction=direction_4d)
613
- return tmp_image
614
-
615
-
616
535
  def _get_list_of_frames_above_total_mean(image_4d_path: str,
617
536
  scale_factor: float = 1.0):
618
537
  """