petpal 0.5.10__py3-none-any.whl → 0.6.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
petpal/__init__.py CHANGED
@@ -5,6 +5,7 @@ from . import utils
5
5
  from . import visualizations
6
6
  from . import pipelines
7
7
  from . import meta
8
+ from . import io
8
9
 
9
10
  def main():
10
11
  print("PET-PAL (Positron Emission Tomography Processing and Analysis Library)")
petpal/cli/cli_preproc.py CHANGED
@@ -83,7 +83,12 @@ Examples:
83
83
  .. code-block:: bash
84
84
 
85
85
  petpal-preproc warp-pet-atlas -i /path/to/input_img.nii.gz -o petpal_reg-atlas.nii.gz --anatomical /path/to/anat.nii.gz --reference-atlas /path/to/atlas.nii.gz
86
-
86
+
87
+ * Crop segmentation to PET FOV:
88
+
89
+ .. code-block:: bash
90
+
91
+ petpal-preproc seg-crop -i /path/to/input_img.nii.gz -o petpal_cropped_seg.nii.gz --segmentation /path/to/segmentation.nii.gz
87
92
 
88
93
  See Also:
89
94
  * :mod:`~petpal.preproc.image_operations_4d` - module used for operations on 4D images.
@@ -99,7 +104,8 @@ from ..preproc import (image_operations_4d,
99
104
  motion_corr,
100
105
  register,
101
106
  regional_tac_extraction,
102
- standard_uptake_value)
107
+ standard_uptake_value,
108
+ segmentation_tools)
103
109
 
104
110
 
105
111
  _PREPROC_EXAMPLES_ = r"""
@@ -126,6 +132,10 @@ Examples:
126
132
  petpal-preproc warp-pet-atlas -i /path/to/input_img.nii.gz -o petpal_reg-atlas.nii.gz --anatomical /path/to/anat.nii.gz --reference-atlas /path/to/atlas.nii.gz
127
133
  - SUV:
128
134
  petpal-preproc suv -i /path/to/input_img.nii.gz -o petpal_suv.nii.gz --weight 75 --dose 250 --start-time 1200 --end-time 3600
135
+ - Crop segmentation image to PET FOV:
136
+ petpal-preproc seg-crop -i /path/to/input_img.nii.gz -o petpal_cropped_seg.nii.gz --segmentation /path/to/segmentation.nii.gz
137
+ - Add eroded white matter region to segmentation image:
138
+ petpal-preproc eroded-wm -i /path/to/input_segmentation.nii.gz -o petpal_seg_with_eroded_wm.nii.gz
129
139
  """
130
140
 
131
141
 
@@ -311,12 +321,11 @@ def _generate_args() -> argparse.ArgumentParser:
311
321
  help='Windowed motion correction for 4D PET'
312
322
  ' using ANTS')
313
323
  _add_common_args(parser_window_moco)
314
- parser_window_moco.add_argument('-t',
315
- '--motion-target',
316
- default='weighted_series_sum',
317
- type=str,
318
- help="Motion target option. Can be an image path , "
319
- "'weighted_series_sum' or 'mean_image'")
324
+ parser_window_moco.add_argument('--motion-target', default=None, nargs='+',
325
+ help="Motion target option. Can be an image path, "
326
+ "'weighted_series_sum' or a tuple "
327
+ "(i.e. '--motion-target 0 600' for first ten minutes).",
328
+ required=True)
320
329
  parser_window_moco.add_argument('-w', '--window-size', default=60.0, type=float,
321
330
  help="Window size in seconds.",)
322
331
  xfm_types = ['QuickRigid', 'Rigid', 'DenseRigid', 'Affine', 'AffineFast']
@@ -357,6 +366,19 @@ def _generate_args() -> argparse.ArgumentParser:
357
366
  required=True,
358
367
  help='End time for SUV calculation in seconds from scan start',
359
368
  type=float)
369
+
370
+ parser_seg_crop = subparsers.add_parser('seg-crop',help='Crop segmentation image to PET FOV')
371
+ _add_common_args(parser_seg_crop)
372
+ parser_seg_crop.add_argument('-s',
373
+ '--segmentation',
374
+ required=True,
375
+ help='Path to segmentation image',
376
+ type=str)
377
+
378
+ parser_eroded_wm = subparsers.add_parser('eroded-wm',
379
+ help='Add eroded white matter region to segmentation image')
380
+ _add_common_args(parser_eroded_wm)
381
+
360
382
  return parser
361
383
 
362
384
 
@@ -437,11 +459,11 @@ def main():
437
459
  start_time=args.start_time,
438
460
  end_time=args.end_time)
439
461
  case 'windowed_motion_corr':
440
- motion_corr.windowed_motion_corr_to_target(input_image_path=args.input_img,
441
- out_image_path=args.out_img,
442
- motion_target_option=motion_target,
443
- w_size=args.window_size,
444
- type_of_transform=args.transform_type)
462
+ motion_corrector = motion_corr.MotionCorrect()
463
+ motion_corrector(input_image_path=args.input_img,
464
+ output_image_path=args.out_img,
465
+ motion_target_option=motion_target,
466
+ window_duration=args.window_size)
445
467
  case 'rescale_image':
446
468
  input_img = ants.image_read(filename=args.input_img)
447
469
  out_img = image_operations_4d.rescale_image(input_image=input_img,
@@ -454,6 +476,15 @@ def main():
454
476
  end_time=args.end_time,
455
477
  weight=args.weight,
456
478
  dose=args.dose)
479
+ case 'seg_crop':
480
+ input_img = ants.image_read(filename=args.input_img)
481
+ seg_img = ants.image_read(filename=args.segmentation)
482
+ seg_cropped = segmentation_tools.seg_crop_to_pet_fov(pet_img=input_img,
483
+ segmentation_img=seg_img)
484
+ ants.image_write(seg_cropped,args.out_img)
485
+ case 'eroded_wm':
486
+ segmentation_tools.eroded_wm_segmentation(input_segmentation_path=args.input_img,
487
+ out_segmentation_path=args.out_img)
457
488
 
458
489
  if __name__ == "__main__":
459
490
  main()
petpal/io/__init__.py ADDED
@@ -0,0 +1,9 @@
1
+ from . import table
2
+ from . import image
3
+
4
+ def main():
5
+ print("PETPAL - Load and Save Module")
6
+
7
+
8
+ if __name__ == "__main__":
9
+ main()
petpal/io/image.py ADDED
@@ -0,0 +1,39 @@
1
+ """
2
+ Module for loading and saving images
3
+ """
4
+ from typing import Optional
5
+ from collections.abc import Callable
6
+ import ants
7
+
8
+
9
+ class ImageLoader:
10
+ """Class for reading image files, with extensions such as Nifti or MGZ.
11
+
12
+ See also: :py:docs:`~ants.image_read`.
13
+
14
+ Example:
15
+
16
+ .. code-block:: python
17
+
18
+ from petpal.io.image import ImageLoader
19
+
20
+ image_loader = ImageLoader()
21
+ my_img = image_loader.load('/path/to/img.nii.gz')
22
+
23
+ :ivar _loader: Function that loads an image file as an ants.ANTsImage object.
24
+ """
25
+ def __init__(self, loader: Optional[Callable[[str], ants.ANTsImage]] = None):
26
+ self._loader = loader or ants.image_read
27
+
28
+ def load(self, filename: str) -> ants.ANTsImage:
29
+ """Public read API that delegates to the configured reader.
30
+
31
+ Args:
32
+ filename (str): Path to file that will be loaded as ANTsImage.
33
+
34
+ Returns:
35
+ img (ants.ANTsImage): Image object loaded into Python."""
36
+ return self._loader(filename)
37
+
38
+ def __call__(self, filename: str) -> ants.ANTsImage:
39
+ return self.load(filename=filename)
petpal/io/table.py ADDED
@@ -0,0 +1,94 @@
1
+ """
2
+ Module for reading and writing tables as TSV and CSV files.
3
+ """
4
+ import os
5
+ import tempfile
6
+ from typing import Optional
7
+ from collections.abc import Callable
8
+ from pathlib import Path
9
+ import dataclasses
10
+ import pandas as pd
11
+
12
+
13
+ def get_tabular_separator(ext: str) -> str:
14
+ """Get the separator corresponding to a given tabular data filetype.
15
+
16
+ '.csv' will return ',' while '.tsv' and '.txt' will return '\t'. Any other input will raise a
17
+ ValueError.
18
+
19
+ Args:
20
+ ext (str): Extension to get matching separator for.
21
+
22
+ Returns:
23
+ sep (str): Separator matched from extension.
24
+
25
+ Raises:
26
+ ValueError: If extension is not .csv or .tsv.
27
+ """
28
+ matching_separators = {'.csv': ',', '.tsv': '\t', '.txt': '\t'}
29
+ try:
30
+ return matching_separators[ext]
31
+ except ValueError as exc:
32
+ error_msg = f"Only accepted extensions are {matching_separators.keys()}. Got {ext}."
33
+ raise ValueError(error_msg) from exc
34
+
35
+
36
+ @dataclasses.dataclass
37
+ class TableSaver:
38
+ """
39
+ Class for saving Pandas Database objects as CSV or TSV files based on a provided path.
40
+
41
+ - Default behavior writes atomically (write temp file + os.replace) to avoid partial files.
42
+ - Accepts an injectable writer callable for testing or alternative persistence backends.
43
+
44
+ Example:
45
+
46
+ .. code-block:: python
47
+
48
+ import pandas as pd
49
+ from petpal.io.table import TableSaver
50
+
51
+ table_saver = TableSaver()
52
+ my_data = pd.DataFrame(data={'time': [0, 1, 2], 'value': [1, 4, 9]})
53
+
54
+ # when file extension is .csv, uses commas to separate values
55
+ table_saver.save(my_data, 'table.csv')
56
+
57
+ # when file extension is .tsv or .txt, uses tabs to separate values
58
+ table_saver.save(my_data, 'table.txt')
59
+
60
+ :ivar _saver: Injectable tabular data saving function that saves a dataframe to a file.
61
+ """
62
+ def __init__(self, saver: Optional[Callable[[pd.DataFrame, str], None]] = None):
63
+ self._saver = saver or self._atomic_save
64
+
65
+ def _atomic_save(self, df: pd.DataFrame, path: str):
66
+ """Saves the data from a Pandas DataFrame object as a tabular file, such as CSV or TSV.
67
+
68
+ Args:
69
+ df (pd.DataFrame): Pandas DataFrame with data to be saved.
70
+ path (str): Path to file where data is saved.
71
+ """
72
+ dirpath = os.path.dirname(os.path.abspath(path)) or "."
73
+ suffix = Path(path).suffix
74
+ sep = get_tabular_separator(ext=suffix)
75
+ fd, tmp_path = tempfile.mkstemp(prefix="tmp_petpal_", dir=dirpath, suffix=suffix)
76
+ os.close(fd)
77
+ try:
78
+ df.to_csv(tmp_path, sep=sep)
79
+ os.replace(tmp_path, path)
80
+ finally:
81
+ if os.path.exists(tmp_path):
82
+ try:
83
+ os.remove(tmp_path)
84
+ except OSError:
85
+ pass
86
+
87
+ def save(self, df: pd.DataFrame, path: str) -> None:
88
+ """API that applies the table saving function assigned to `self._saver`.
89
+
90
+ Args:
91
+ df (pd.DataFrame): Pandas DataFrame with data to be saved.
92
+ path (str): Path to file where data is saved.
93
+ """
94
+ self._saver(df, path)
@@ -642,7 +642,7 @@ class ImageToImageStep(FunctionBasedStep):
642
642
  """
643
643
  defaults = dict(name=name, function=windowed_motion_corr_to_target,
644
644
  input_image_path='', output_image_path='',
645
- motion_target_option='weighted_series_sum', w_size=60.0,
645
+ motion_target_option='weighted_series_sum', window_duration=60.0,
646
646
  verbose=verbose)
647
647
  override_dict = defaults | overrides
648
648
  try:
@@ -4,17 +4,23 @@ Provides methods to motion correct 4D PET data. Includes method
4
4
  4D input data to optimize contrast when computing motion correction or
5
5
  registration.
6
6
  """
7
+ from typing import Optional
7
8
  import ants
8
9
  import numpy as np
10
+ import pandas as pd
11
+ from scipy.spatial.transform import Rotation
9
12
 
10
13
  from petpal.utils.useful_functions import gen_nd_image_based_on_image_list
11
-
12
-
13
14
  from .motion_target import determine_motion_target
14
15
  from ..utils import image_io
15
- from ..utils.scan_timing import ScanTimingInfo, get_window_index_pairs_for_image
16
- from ..utils.useful_functions import weighted_series_sum_over_window_indecies
17
- from ..utils.image_io import get_half_life_from_nifti
16
+ from ..utils.scan_timing import (ScanTimingInfo,
17
+ get_window_index_pairs_from_durations,
18
+ get_window_index_pairs_for_image)
19
+ from ..utils.useful_functions import (weighted_series_sum_over_window_indices,
20
+ coerce_outpath_extension)
21
+ from ..utils.image_io import get_half_life_from_nifti, safe_copy_meta
22
+ from ..io.table import TableSaver
23
+ from ..io.image import ImageLoader
18
24
 
19
25
 
20
26
  def motion_corr(input_image_path: str,
@@ -425,11 +431,230 @@ def motion_corr_frames_above_mean_value_to_t1(input_image_path: str,
425
431
  type_of_transform=type_of_transform,
426
432
  transform_metric=transform_metric)
427
433
 
434
+ class MotionCorrect:
435
+ """Run windowed motion correction on an image and save the result"""
436
+ def __init__(self,
437
+ image_loader: Optional[ImageLoader] = None,
438
+ table_saver: Optional[TableSaver] = None):
439
+ self.image_loader = image_loader or ImageLoader()
440
+ self.table_saver = table_saver or TableSaver()
441
+ self.input_img = None
442
+ self.target_img = None
443
+ self.scan_timing = None
444
+ self.half_life = None
445
+ self.reg_kwargs = self.default_reg_kwargs
446
+
447
+ @property
448
+ def default_reg_kwargs(self) -> dict:
449
+ """Default registration arguments passed on to :py:func:`~ants.registration`."""
450
+ reg_kwargs_default = {'aff_metric' : 'mattes',
451
+ 'write_composite_transform': True,
452
+ 'interpolator' : 'linear',
453
+ 'type_of_transform' : 'DenseRigid'}
454
+ return reg_kwargs_default
455
+
456
+ def set_reg_kwargs(self, **reg_kwargs):
457
+ """Modify the registration arguments passed on to :py:func:`~ants.registration`."""
458
+ self.reg_kwargs.update(**reg_kwargs)
459
+
460
+ def get_input_scan_properties(self, input_image_path: str):
461
+ """Load input image and get half life and scan timing."""
462
+ self.input_img = self.image_loader.load(filename=input_image_path)
463
+ self.half_life = get_half_life_from_nifti(image_path=input_image_path)
464
+ self.scan_timing = ScanTimingInfo.from_nifti(image_path=input_image_path)
465
+
466
+ def get_target_img(self, input_image_path: str, motion_target_option: str | tuple):
467
+ """Get the motion target and load it as an image."""
468
+ motion_target_path = determine_motion_target(motion_target_option=motion_target_option,
469
+ input_image_path=input_image_path)
470
+ self.target_img = self.image_loader.load(filename=motion_target_path)
471
+
472
+ def window_index_pairs(self, window_duration: float=300):
473
+ """The pair of indices corresponding to each window in the image."""
474
+ return get_window_index_pairs_from_durations(frame_durations=self.scan_timing.duration,
475
+ window_duration=window_duration)
476
+
477
+ def window_target_img(self, start_index: int, end_index: int):
478
+ """Calculates the sum over frames in the target image within the provided time window."""
479
+ return weighted_series_sum_over_window_indices(input_image_4d=self.input_img,
480
+ output_image_path=None,
481
+ window_start_id=start_index,
482
+ window_end_id=end_index,
483
+ half_life=self.half_life,
484
+ image_frame_info=self.scan_timing)
485
+
486
+ @staticmethod
487
+ def ants_xfm_to_rigid_pars(ants_xfm: ants.ANTsTransform):
488
+ """Convert an ants transform object to six parameters (3 translation, 3 rotation) and the
489
+ center reference point."""
490
+ xfm_in = np.reshape(ants_xfm.parameters,(4,3))
491
+ rot_matrix = xfm_in[:3,:]
492
+ translate_matrix = xfm_in[3,:]
493
+
494
+ scipy_rotation = Rotation.from_matrix(rot_matrix)
495
+ rot_pars = -scipy_rotation.as_euler('xyz',degrees=True)
496
+
497
+ xfm_out = list(rot_pars)+list(translate_matrix)+list(ants_xfm.fixed_parameters)
498
+ return xfm_out
499
+
500
+ def run_motion_correct(self, window_duration: float=300):
501
+ """Run motion correction on the input image to the target image."""
502
+ moco_img_stack = []
503
+ window_xfm_stack = []
504
+ input_img_list = ants.ndimage_to_list(self.input_img)
505
+ for _, (st_id, end_id) in enumerate(zip(*self.window_index_pairs(window_duration=window_duration))):
506
+ window_target_img = self.window_target_img(start_index=st_id, end_index=end_id)
507
+ window_registration = ants.registration(fixed=self.target_img,
508
+ moving=window_target_img,
509
+ **self.reg_kwargs)
510
+ window_xfm = ants.read_transform(window_registration['fwdtransforms'])
511
+ window_xfm_stack.append(self.ants_xfm_to_rigid_pars(window_xfm))
512
+ for frm_id in range(st_id, end_id):
513
+ moco_img_stack.append(ants.apply_transforms(fixed=self.target_img,
514
+ moving=input_img_list[frm_id],
515
+ transformlist=window_registration['fwdtransforms']))
516
+ moco_img = gen_timeseries_from_image_list(moco_img_stack)
517
+ return moco_img, np.asarray(window_xfm_stack)
518
+
519
+ def save_xfm_parameters(self, window_xfms: np.ndarray, filename: str):
520
+ """Save window transform parameters as a table.
521
+
522
+ Args:
523
+ window_xfms (np.ndarray): Rigid transform parameters ordered as rotation, translation,
524
+ centerpoint, then X, Y, Z axis, totalling 9 parameters for each window.
525
+ filename (str): Path to where table will be saved, including extension.
526
+
527
+ Raises:
528
+ ValueError: If transform type does not containt 'Rigid'. Saving transform parameters is
529
+ currently only available for rigid transforms."""
530
+ if 'Rigid' not in self.reg_kwargs['type_of_transform']:
531
+ raise ValueError("Saving transform parameters is only available for rigid "
532
+ "registrations. Current transform type: "
533
+ f"{self.reg_kwargs['type_of_transform']}")
534
+ xfm_columns = ['rot_x',
535
+ 'rot_y',
536
+ 'rot_z',
537
+ 'tra_x',
538
+ 'tra_y',
539
+ 'tra_z',
540
+ 'cen_x',
541
+ 'cen_y',
542
+ 'cen_z']
543
+ xfms_df = pd.DataFrame(data=window_xfms,
544
+ columns=xfm_columns)
545
+ xfms_df.index.name = 'window'
546
+ csv_filename = coerce_outpath_extension(path=filename, ext='.csv')
547
+ self.table_saver.save(xfms_df,csv_filename)
548
+
549
+ def __call__(self, input_image_path: str,
550
+ output_image_path: str,
551
+ motion_target_option: str | tuple,
552
+ window_duration: float = 300,
553
+ copy_metadata: bool = True,
554
+ save_xfm: bool = True,
555
+ **reg_kwargs):
556
+ """Motion correct a dynamic PET image.
557
+
558
+ Divides image into segments of duration in seconds `window_duration` and register each frame
559
+ to a target image, using the same transformation on for every frame in each window.
560
+
561
+ Args:
562
+ input_image_path (str): Path to dynamic PET image.
563
+ output_image_path (str): Path to which motion corrected image is saved.
564
+ motion_target_option (str | tuple): Path to motion target image, or specify time window
565
+ such as (0,600) or preset option such as 'mean_image'. See
566
+ :py:func:`~petpal.preproc.motion_target.determine_motion_target`.
567
+ window_duration (float): Duration of each window in seconds. Default 300.
568
+ copy_metadata (bool): Copies metadata info from input image to output image. Default
569
+ True.
570
+ save_xfm (bool): Saves motion correction transform parameters for translation,
571
+ rotation, and rotation center point. Only compatible with rigid transforms. Default
572
+ True.
573
+ """
574
+ self.get_input_scan_properties(input_image_path=input_image_path)
575
+ self.get_target_img(input_image_path=input_image_path,
576
+ motion_target_option=motion_target_option)
577
+
578
+ self.set_reg_kwargs(**reg_kwargs)
579
+
580
+ moco_img, window_xfms = self.run_motion_correct(window_duration=window_duration)
581
+
582
+ if save_xfm:
583
+ self.save_xfm_parameters(window_xfms=window_xfms, filename=output_image_path)
584
+ ants.image_write(image=moco_img, filename=output_image_path)
585
+ if copy_metadata:
586
+ safe_copy_meta(input_image_path=input_image_path, out_image_path=output_image_path)
587
+
588
+ return moco_img
589
+
590
+ def gen_timeseries_from_image_list(image_list: list[ants.core.ANTsImage]) -> ants.core.ANTsImage:
591
+ r"""
592
+ Takes a list of ANTs ndimages, and generates a 4D ndimage. Undoes :func:`ants.ndimage_to_list`
593
+ so that we take a list of 3D images and generates a 4D image.
594
+
595
+ Args:
596
+ image_list (list[ants.core.ANTsImage]): A list of ndimages.
597
+
598
+ Returns:
599
+ ants.core.ANTsImage: 4D ndimage.
600
+ """
601
+ tmp_image = gen_nd_image_based_on_image_list(image_list)
602
+ return ants.list_to_ndimage(tmp_image, image_list)
603
+
604
+
605
+ def _get_list_of_frames_above_total_mean(image_4d_path: str,
606
+ scale_factor: float = 1.0):
607
+ """
608
+ Get the frame indices where the frame mean is higher than the total mean of a 4D image.
609
+
610
+ This function calculates the mean voxel value of each frame in a 4D image and returns the
611
+ indices of the frames whose mean voxel value is greater than or equal to the mean voxel
612
+ value of the entire image, optionally scaled by a provided factor.
613
+
614
+ Args:
615
+ image_4d_path (str): Path to the input 4D PET image file.
616
+ scale_factor (float, optional): Scale factor applied to the mean voxel value of the entire
617
+ image for comparison. Must be greater than 0. Default is 1.0.
618
+
619
+ Returns:
620
+ list: A list of frame indices where the frame mean voxel value is greater than or equal to
621
+ the scaled total mean voxel value.
622
+
623
+ Example:
624
+
625
+ .. code-block:: python
626
+
627
+ from petpal.preproc.motion_corr import _get_list_of_frames_above_total_mean
628
+
629
+ frame_ids = _get_list_of_frames_above_total_mean(image_4d_path='/path/to/image.nii.gz',
630
+ scale_factor=1.2)
631
+
632
+ print(frame_ids) # Output: [0, 3, 5, ...]
633
+
634
+ Notes:
635
+ - The :func:`ants.image_read` from ANTsPy is used to read the 4D image into memory.
636
+ - The mean voxel value of the entire image is scaled by `scale_factor` for comparison with
637
+ individual frame means.
638
+ - The function uses the :func:`ants.ndimage_to_list` method from ANTsPy to convert the 4D
639
+ image into a list of 3D frames.
640
+
641
+ """
642
+ assert scale_factor > 0
643
+ image = ants.image_read(image_4d_path)
644
+ total_mean = scale_factor * image.mean()
645
+
646
+ frames_list = []
647
+ for frame_id, a_frame in enumerate(image.ndimage_to_list()):
648
+ if a_frame.mean() >= total_mean:
649
+ frames_list.append(frame_id)
650
+
651
+ return frames_list
652
+
428
653
 
429
654
  def windowed_motion_corr_to_target(input_image_path: str,
430
655
  out_image_path: str | None,
431
656
  motion_target_option: str | tuple,
432
- w_size: float,
657
+ window_duration: float,
433
658
  type_of_transform: str = 'QuickRigid',
434
659
  interpolator: str = 'linear',
435
660
  copy_metadata: bool = True,
@@ -445,6 +670,10 @@ def windowed_motion_corr_to_target(input_image_path: str,
445
670
  The motion-target will determine the space of the output image. If we provide a T1 image
446
671
  as the `motion_target_option`, the output image will be in T1-space.
447
672
 
673
+ Note:
674
+ This function is deprecated. Use :py:func:`~petpal.preproc.motion_corr.MotionCorrect`
675
+ instead.
676
+
448
677
  Args:
449
678
  input_image_path (str): Path to the input 4D PET image file.
450
679
  out_image_path (str | None): Path to save the resulting motion-corrected image. If
@@ -452,7 +681,7 @@ def windowed_motion_corr_to_target(input_image_path: str,
452
681
  motion_target_option (str | tuple): Option to determine the motion target. This can
453
682
  be a path to a specific image file, a tuple of frame indices to generate a target, or
454
683
  specific options recognized by :func:`determine_motion_target`.
455
- w_size (float): Window size in seconds for dividing the image into time sections.
684
+ window_duration (float): Window size in seconds for dividing the image into time sections.
456
685
  type_of_transform (str): Type of transformation to use in registration (default: 'QuickRigid').
457
686
  interpolator (str): Interpolation method for the transformation (default: 'linear').
458
687
  **kwargs: Additional arguments passed to :func:`ants.registration`.
@@ -462,7 +691,7 @@ def windowed_motion_corr_to_target(input_image_path: str,
462
691
 
463
692
  Workflow:
464
693
  1. Reads the input 4D image and splits it into individual frames.
465
- 2. Computes index windows based on the specified window size (`w_size`).
694
+ 2. Computes index windows based on the specified window size (`window_duration`).
466
695
  3. Extracts necessary frame timing information and the tracer's half-life.
467
696
  4. For each window:
468
697
  - Calculates a weighted sum image for the window.
@@ -477,7 +706,7 @@ def windowed_motion_corr_to_target(input_image_path: str,
477
706
  """
478
707
  input_image = ants.image_read(filename=input_image_path)
479
708
  input_image_list = ants.ndimage_to_list(input_image)
480
- window_idx_pairs = get_window_index_pairs_for_image(image_path=input_image_path, w_size=w_size)
709
+ window_idx_pairs = get_window_index_pairs_for_image(image_path=input_image_path, window_duration=window_duration)
481
710
  half_life = get_half_life_from_nifti(image_path=input_image_path)
482
711
  frame_timing_info = ScanTimingInfo.from_nifti(image_path=input_image_path)
483
712
 
@@ -490,13 +719,13 @@ def windowed_motion_corr_to_target(input_image_path: str,
490
719
  reg_kwargs = {**reg_kwargs_default, **kwargs}
491
720
 
492
721
  out_image = []
493
- for win_id, (st_id, end_id) in enumerate(zip(*window_idx_pairs)):
494
- window_tgt_image = weighted_series_sum_over_window_indecies(input_image_4d=input_image,
495
- output_image_path=None,
496
- window_start_id=st_id,
497
- window_end_id=end_id,
498
- half_life=half_life,
499
- image_frame_info=frame_timing_info)
722
+ for _, (st_id, end_id) in enumerate(zip(*window_idx_pairs)):
723
+ window_tgt_image = weighted_series_sum_over_window_indices(input_image_4d=input_image,
724
+ output_image_path=None,
725
+ window_start_id=st_id,
726
+ window_end_id=end_id,
727
+ half_life=half_life,
728
+ image_frame_info=frame_timing_info)
500
729
  window_registration = ants.registration(fixed=target_image,
501
730
  moving=window_tgt_image,
502
731
  type_of_transform=type_of_transform,
@@ -516,66 +745,3 @@ def windowed_motion_corr_to_target(input_image_path: str,
516
745
  image_io.safe_copy_meta(input_image_path=input_image_path,
517
746
  out_image_path=out_image_path)
518
747
  return out_image
519
-
520
- def gen_timeseries_from_image_list(image_list: list[ants.core.ANTsImage]) -> ants.core.ANTsImage:
521
- r"""
522
- Takes a list of ANTs ndimages, and generates a 4D ndimage. Undoes :func:`ants.ndimage_to_list`
523
- so that we take a list of 3D images and generates a 4D image.
524
-
525
- Args:
526
- image_list (list[ants.core.ANTsImage]): A list of ndimages.
527
-
528
- Returns:
529
- ants.core.ANTsImage: 4D ndimage.
530
- """
531
- tmp_image = gen_nd_image_based_on_image_list(image_list)
532
- return ants.list_to_ndimage(tmp_image, image_list)
533
-
534
-
535
- def _get_list_of_frames_above_total_mean(image_4d_path: str,
536
- scale_factor: float = 1.0):
537
- """
538
- Get the frame indices where the frame mean is higher than the total mean of a 4D image.
539
-
540
- This function calculates the mean voxel value of each frame in a 4D image and returns the
541
- indices of the frames whose mean voxel value is greater than or equal to the mean voxel
542
- value of the entire image, optionally scaled by a provided factor.
543
-
544
- Args:
545
- image_4d_path (str): Path to the input 4D PET image file.
546
- scale_factor (float, optional): Scale factor applied to the mean voxel value of the entire
547
- image for comparison. Must be greater than 0. Default is 1.0.
548
-
549
- Returns:
550
- list: A list of frame indices where the frame mean voxel value is greater than or equal to
551
- the scaled total mean voxel value.
552
-
553
- Example:
554
-
555
- .. code-block:: python
556
-
557
- from petpal.preproc.motion_corr import _get_list_of_frames_above_total_mean
558
-
559
- frame_ids = _get_list_of_frames_above_total_mean(image_4d_path='/path/to/image.nii.gz',
560
- scale_factor=1.2)
561
-
562
- print(frame_ids) # Output: [0, 3, 5, ...]
563
-
564
- Notes:
565
- - The :func:`ants.image_read` from ANTsPy is used to read the 4D image into memory.
566
- - The mean voxel value of the entire image is scaled by `scale_factor` for comparison with
567
- individual frame means.
568
- - The function uses the :func:`ants.ndimage_to_list` method from ANTsPy to convert the 4D
569
- image into a list of 3D frames.
570
-
571
- """
572
- assert scale_factor > 0
573
- image = ants.image_read(image_4d_path)
574
- total_mean = scale_factor * image.mean()
575
-
576
- frames_list = []
577
- for frame_id, a_frame in enumerate(image.ndimage_to_list()):
578
- if a_frame.mean() >= total_mean:
579
- frames_list.append(frame_id)
580
-
581
- return frames_list
@@ -84,7 +84,9 @@ def segmentations_merge(segmentation_primary: np.ndarray,
84
84
  regions added.
85
85
  """
86
86
  for region in regions:
87
- region_mask = (segmentation_secondary > region - 0.1) & (segmentation_secondary < region + 0.1)
87
+ condition_above = segmentation_secondary > region - 0.1
88
+ condition_below = segmentation_secondary < region + 0.1
89
+ region_mask = condition_above & condition_below
88
90
  segmentation_primary[region_mask] = region
89
91
  return segmentation_primary
90
92
 
@@ -229,8 +231,9 @@ def resample_segmentation(input_image_path: str,
229
231
  seg_image = nibabel.load(segmentation_image_path)
230
232
  pet_series = pet_image.get_fdata()
231
233
  image_first_frame = pet_series[:, :, :, 0]
234
+ to_vox_map_tuple = (image_first_frame.shape, pet_image.affine)
232
235
  seg_resampled = processing.resample_from_to(from_img=seg_image,
233
- to_vox_map=(image_first_frame.shape, pet_image.affine),
236
+ to_vox_map=to_vox_map_tuple,
234
237
  order=0)
235
238
  nibabel.save(seg_resampled, out_seg_path)
236
239
  if verbose:
@@ -238,23 +241,30 @@ def resample_segmentation(input_image_path: str,
238
241
 
239
242
 
240
243
  def vat_wm_ref_region(input_segmentation_path: str,
241
- out_segmentation_path: str):
244
+ out_segmentation_path: str | None) -> ants.ANTsImage:
242
245
  """
243
246
  Generates the cortical white matter reference region described in O'Donnell
244
- JL et al. (2024) PET Quantification of [18F]VAT in Human Brain and Its
247
+ JL et al. (2024).
248
+
249
+ Reference: O'Donnell JL et al. (2024). PET Quantification of [18F]VAT in Human Brain and Its
245
250
  Test-Retest Reproducibility and Age Dependence. J Nucl Med. 2024 Jun
246
251
  3;65(6):956-961. doi: 10.2967/jnumed.123.266860. PMID: 38604762; PMCID:
247
- PMC11149597. Requires FreeSurfer segmentation with original label mappings.
252
+ PMC11149597.
253
+
254
+ Requires FreeSurfer segmentation with original label mappings.
248
255
 
249
256
  Args:
250
257
  input_segmentation_path (str): Path to segmentation on which white
251
258
  matter reference region is computed.
252
259
  out_segmentation_path (str): Path to which white matter reference
253
260
  region mask image is saved.
261
+
262
+ Returns:
263
+ wm_erode (ants.ANTsImage): Eroded white matter reference region mask image.
254
264
  """
255
265
  wm_regions = [2,41,251,252,253,254,255,77,3000,3001,3002,3003,3004,3005,
256
266
  3006,3007,3008,3009,3010,3011,3012,3013,3014,3015,3016,3017,
257
- 3018,3019,3020,3021,3022,3023,3024,3025,3026,3027,3018,3029,
267
+ 3018,3019,3020,3021,3022,3023,3024,3025,3026,3027,3028,3029,
258
268
  3030,3031,3032,3033,3034,3035,4000,4001,4002,4003,4004,4005,
259
269
  4006,4007,4008,4009,4010,4011,4012,4013,4014,4015,4016,4017,
260
270
  4018,4019,4020,4021,4022,4023,4024,4025,4026,4027,4028,4029,
@@ -277,7 +287,45 @@ def vat_wm_ref_region(input_segmentation_path: str,
277
287
  wm_csf_eroded = ants.threshold_image(image=wm_csf_blurred, low_thresh=0.95, binary=True)
278
288
  wm_erode = ants.mask_image(image=wm_merged, mask=wm_csf_eroded)
279
289
 
280
- ants.image_write(image=wm_erode, filename=out_segmentation_path)
290
+ if out_segmentation_path is not None:
291
+ ants.image_write(image=wm_erode, filename=out_segmentation_path)
292
+
293
+ return wm_erode
294
+
295
+
296
+ def eroded_wm_segmentation(input_segmentation_path: str,
297
+ out_segmentation_path: str | None,
298
+ eroded_wm_region_mapping: int = 1) -> ants.ANTsImage:
299
+ """
300
+ Generates eroded white matter region on a segmentation image and merges it into the image,
301
+ saving result as a new segmentation image.
302
+
303
+ Requires FreeSurfer segmentation with original label mappings.
304
+
305
+ Args:
306
+ input_segmentation_path (str): Path to input freesurfer segmentation, such as aparc+aseg or
307
+ wmparc.
308
+ out_segmentation_path (str): Path to output segmentation image with replaced values in
309
+ eroded whited matter region.
310
+ eroded_wm_region_mapping (int): Segmentation mapping for the eroded white matter region in
311
+ the output image. Default "1".
312
+
313
+ Returns:
314
+ seg_img (ants.ANTsImage): Input segmentation image with values in eroded white matter
315
+ replaced with `eroded_wm_region_mapping`.
316
+
317
+ See also:
318
+ :meth:`~petpal.preproc.segmentation_tools.vat_wm_ref_region` - function that generates the
319
+ eroded white matter region.
320
+ """
321
+ wm_erode = vat_wm_ref_region(input_segmentation_path=input_segmentation_path,
322
+ out_segmentation_path=None)
323
+ seg_img = ants.image_read(input_segmentation_path)
324
+ seg_img[wm_erode==1] = int(eroded_wm_region_mapping)
325
+ if out_segmentation_path is not None:
326
+ ants.image_write(image=seg_img, filename=out_segmentation_path)
327
+
328
+ return seg_img
281
329
 
282
330
 
283
331
  def vat_wm_region_merge(wmparc_segmentation_path: str,
@@ -500,7 +548,8 @@ def calc_vesselness_mask_from_quantiled_vesselness(input_image: ants.core.ANTsIm
500
548
  morph_dil_radius: int = 0,
501
549
  z_crop: int = 3) -> ants.core.ANTsImage:
502
550
  """
503
- Generates a binary vesselness mask from a given vesselness image using quantile-based thresholding.
551
+ Generates a binary vesselness mask from a given vesselness image using quantile-based
552
+ thresholding.
504
553
 
505
554
  This function creates a binary mask by thresholding a vesselness image at a specified
506
555
  quantile of non-zero voxel values. Additionally, it allows for optional z-axis cropping
@@ -248,62 +248,64 @@ class ScanTimingInfo:
248
248
  decay=frame_decay)
249
249
 
250
250
 
251
- def get_window_index_pairs_from_durations(frame_durations: np.ndarray, w_size: float):
251
+ def get_window_index_pairs_from_durations(frame_durations: np.ndarray, window_duration: float):
252
252
  r"""
253
253
  Computes start and end index pairs for windows of a given size based on frame durations.
254
254
 
255
255
  Args:
256
256
  frame_durations (np.ndarray): Array of frame durations in seconds.
257
- w_size (float): Window size in seconds.
257
+ window_duration (float): Window size in seconds.
258
258
 
259
259
  Returns:
260
260
  np.ndarray: Array of shape (2, N), where the first row contains start indices,
261
261
  and the second row contains end indices for each window.
262
262
 
263
263
  Raises:
264
- ValueError: If `w_size` is less than or equal to 0.
265
- ValueError: If `w_size` is greater than the total duration of all frames.
264
+ ValueError: If `window_duration` is less than or equal to 0.
265
+ ValueError: If `window_duration` is greater than the total duration of all frames.
266
266
  """
267
- if w_size <= 0:
268
- raise ValueError("Window size has to be > 0")
269
- if w_size > np.sum(frame_durations):
270
- raise ValueError("Window size is larger than the whole scan.")
271
- _tmp_w_ids = [0]
272
- _w_dur_sum = 0
273
- for frm_id, frm_dur in enumerate(frame_durations):
274
- _w_dur_sum += frm_dur
275
- if _w_dur_sum >= w_size:
276
- _tmp_w_ids.append(frm_id + 1)
277
- _w_dur_sum = 0
278
- w_start_ids = np.asarray(_tmp_w_ids[:-1])
279
- w_end_ids = np.asarray(_tmp_w_ids[1:])
280
- id_pairs = np.vstack((w_start_ids, w_end_ids))
281
- return id_pairs
282
-
283
-
284
- def get_window_index_pairs_for_image(image_path: str, w_size: float):
267
+ if window_duration <= 0:
268
+ raise ValueError("Window duration has to be > 0")
269
+ if window_duration > np.sum(frame_durations):
270
+ raise ValueError("Window duration is longer than the whole scan.")
271
+ window_edge_indices = [0]
272
+ window_duration_rolling_sum = 0
273
+ for frame_index, frame_dur in enumerate(frame_durations):
274
+ window_duration_rolling_sum += frame_dur
275
+ if window_duration_rolling_sum >= window_duration:
276
+ window_edge_indices.append(frame_index + 1)
277
+ window_duration_rolling_sum = 0
278
+ if window_edge_indices[-1]!=len(frame_durations):
279
+ window_edge_indices.append(len(frame_durations))
280
+ window_start_indices = np.asarray(window_edge_indices[:-1])
281
+ window_end_indices = np.asarray(window_edge_indices[1:])
282
+ index_pairs = np.vstack((window_start_indices, window_end_indices))
283
+ return index_pairs
284
+
285
+
286
+ def get_window_index_pairs_for_image(image_path: str, window_duration: float):
285
287
  """
286
288
  Computes start and end index pairs for windows of a given size
287
289
  based on the frame durations of a NIfTI image.
288
290
 
289
291
  Args:
290
292
  image_path (str): Path to the NIfTI image file.
291
- w_size (float): Window size in seconds.
293
+ window_duration (float): Window size in seconds.
292
294
 
293
295
  Returns:
294
296
  np.ndarray: Array of shape (2, N), where the first row contains start indices,
295
297
  and the second row contains end indices for each window.
296
298
 
297
299
  Raises:
298
- ValueError: If `w_size` is less than or equal to 0.
299
- ValueError: If `w_size` is greater than the total duration of all frames.
300
+ ValueError: If `window_duration` is less than or equal to 0.
301
+ ValueError: If `window_duration` is greater than the total duration of all frames.
300
302
 
301
303
  See Also:
302
304
  :func:`get_window_index_pairs_from_durations`
303
305
  """
304
306
  image_frame_info = ScanTimingInfo.from_nifti(image_path=image_path)
305
307
  return get_window_index_pairs_from_durations(frame_durations=image_frame_info.duration,
306
- w_size=w_size)
308
+ window_duration=window_duration)
307
309
 
308
310
 
309
311
  def calculate_frame_reference_time(frame_duration: np.ndarray,
@@ -2,13 +2,14 @@
2
2
  Module to handle abstracted functionalities
3
3
  """
4
4
  from collections.abc import Callable
5
+ from pathlib import Path
6
+ import re
5
7
  import os
6
8
  import nibabel
7
9
  import numpy as np
8
10
  import pandas as pd
9
11
  from scipy.interpolate import interp1d
10
12
  import ants
11
- import re
12
13
 
13
14
  from . import image_io, math_lib, scan_timing
14
15
 
@@ -208,12 +209,12 @@ def weighted_series_sum(input_image_path: str,
208
209
 
209
210
  return image_weighted_sum
210
211
 
211
- def weighted_series_sum_over_window_indecies(input_image_4d: ants.core.ANTsImage | str,
212
- output_image_path: str | None,
213
- window_start_id: int,
214
- window_end_id: int,
215
- half_life: float,
216
- image_frame_info: scan_timing.ScanTimingInfo) -> ants.core.ANTsImage | None:
212
+ def weighted_series_sum_over_window_indices(input_image_4d: ants.core.ANTsImage | str,
213
+ output_image_path: str | None,
214
+ window_start_id: int,
215
+ window_end_id: int,
216
+ half_life: float,
217
+ image_frame_info: scan_timing.ScanTimingInfo) -> ants.core.ANTsImage | None:
217
218
  r"""
218
219
  Computes a weighted series sum over a specified window of indices for a 4D PET image.
219
220
 
@@ -236,7 +237,7 @@ def weighted_series_sum_over_window_indecies(input_image_4d: ants.core.ANTsImage
236
237
  Note:
237
238
  If `output_image_path` is provided, the computed image will be saved to the specified path.
238
239
  This allows us to utilize ANTs pipelines
239
- ``weighted_series_sum_over_window_indecies(...).get_center_of_mass()`` for example.
240
+ ``weighted_series_sum_over_window_indices(...).get_center_of_mass()`` for example.
240
241
 
241
242
  """
242
243
  if isinstance(input_image_4d, str):
@@ -532,3 +533,30 @@ def gen_nd_image_based_on_image_list(image_list: list[ants.ANTsImage]) -> ants.A
532
533
  origin=origin_4d,
533
534
  direction=direction_4d)
534
535
  return tmp_image
536
+
537
+ def coerce_outpath_extension(path: str, ext: str) -> str:
538
+ """Coerce a path to the same absolute path with a provided filetype extension.
539
+
540
+ Args:
541
+ path (str): Path to a file.
542
+ ext (str): Desired output extension.
543
+
544
+ Returns:
545
+ abs_path_with_extension (str): Absolute path of the input file with the modified extension.
546
+
547
+ Example:
548
+
549
+ .. code-block:: python
550
+
551
+ from petpal.utils.useful_functions import coerce_outpath_extension
552
+
553
+ my_path = 'my_file.nii.gz'
554
+ my_csv_file = coerce_outpath_extension(my_path, '.csv')
555
+ print(my_csv_file) # prints '/current/working/directory/my_file.csv'
556
+
557
+ """
558
+ path_obj = Path(path)
559
+ while path_obj.suffix!='':
560
+ path_obj = path_obj.with_suffix('')
561
+ path_obj_with_suffix = path_obj.with_suffix(ext)
562
+ return str(path_obj_with_suffix.absolute())
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: petpal
3
- Version: 0.5.10
3
+ Version: 0.6.1
4
4
  Summary: PET-PAL (Positron Emission Tomography Processing and Analysis Library)
5
5
  Project-URL: Repository, https://github.com/PETPAL-WUSM/PETPAL.git
6
6
  Author-email: Noah Goldman <noahg@wustl.edu>, Bradley Judge <bjudge@wustl.edu>, Furqan Dar <dar@wustl.edu>, Kenan Oestreich <kenan.oestreich@wustl.edu>
@@ -1,4 +1,4 @@
1
- petpal/__init__.py,sha256=rYYNkBkSkHGE18JIZPeVK8YBP7Z9c4mpQfKN0cdS75k,318
1
+ petpal/__init__.py,sha256=4s4cfSb6_kjQZfP3yvxGU8DahC7aW1MO3eoMlyzbY-M,335
2
2
  petpal/cli/__init__.py,sha256=RiQTAOhSeqw5BTVvdancX3JQj4CG8F9Qe4qWZR9nKio,434
3
3
  petpal/cli/cli_graphical_analysis.py,sha256=L-YhkKkjmf6WLaAt8GDt1VmXaJXOsjMrbVvPhiTViYM,5386
4
4
  petpal/cli/cli_graphical_plots.py,sha256=_2tlGtZ0hIVyEYtGviEzGZMNhFymUPg4ZvSVyMtT_dA,3211
@@ -6,7 +6,7 @@ petpal/cli/cli_idif.py,sha256=6lh_kJHcGjlHDXZOvbiuHrNqpk5FovVV5_j7_dPHTHU,5145
6
6
  petpal/cli/cli_parametric_images.py,sha256=JBFb8QlxZoGOzqvCJPFuZ7czzGWntJP5ZcfeM5-QF4Y,7385
7
7
  petpal/cli/cli_pib_processing.py,sha256=ye_yw0ZQ4cSrMNemGR7cU9v6epD7Wbq1xaNAJwLzV_8,6889
8
8
  petpal/cli/cli_plot_tacs.py,sha256=XycaYQQl9Jp5jqDp3QXOlVT2sXHYYpYSraEArxsfJec,6479
9
- petpal/cli/cli_preproc.py,sha256=y5YvRliXC3zNY8oBJgTkzwPH9YNwWnEUWgiltUAv7AU,21850
9
+ petpal/cli/cli_preproc.py,sha256=HSIKjUweRFGVkZH-7MtkaMQUOE8iTFCdkc9Sei2KedI,23473
10
10
  petpal/cli/cli_pvc.py,sha256=DC0JZ6p1pkc5BDgQ006bi9y0Mz32ENrjUaOtSvFobP4,3967
11
11
  petpal/cli/cli_reference_tissue_models.py,sha256=18BlKN4rMehyFbdq_yr88oztqR99_gBtWKImhwf7CQY,13283
12
12
  petpal/cli/cli_stats.py,sha256=Mqbzc9yYo9SVAfxzaYbAbk7EheW472l1PY_ptlD1DOI,2050
@@ -17,6 +17,9 @@ petpal/input_function/__init__.py,sha256=mWdwuVdMSgaHE0wviNE7TGGoOI9Y3sEKfKpBqLS
17
17
  petpal/input_function/blood_input.py,sha256=TNx3hL7M8Z_ZaZoTpt8LgLQhXnNyW0BOhgVZompwgxA,9206
18
18
  petpal/input_function/idif_necktangle.py,sha256=o5kyAqyT4C6o7zELY4EjyHrkJyX1BWcxvBqYiMjNyn4,7863
19
19
  petpal/input_function/pca_guided_idif.py,sha256=MPB59K5Z5oyIunIWFqFQts61z647xawLNkv8wICrKYM,44821
20
+ petpal/io/__init__.py,sha256=Jh4Q4Zh9tBEorKAn3HqsIco1oSIv_bO8O-RZAS15u0Q,135
21
+ petpal/io/image.py,sha256=C6wUUA7LyomrUE_EJi_SzLlrYMF4vUb76PixBLcR7j0,1140
22
+ petpal/io/table.py,sha256=b3SUg9m1_q_1c7EBjvhVxKFio5TKiB7GSTlhFPmaM9E,3237
20
23
  petpal/kinetic_modeling/__init__.py,sha256=tW4yRH3TwaXPwKPqdkrbQmSk9hjrF1yRkV_C59PPboQ,382
21
24
  petpal/kinetic_modeling/fit_tac_with_rtms.py,sha256=HpK7VWVCCNoSQABY9i28vYpZsMRmvgs4vdcM_ZbdaYE,20971
22
25
  petpal/kinetic_modeling/graphical_analysis.py,sha256=e3ZXP8jA3ZgvC2T718-gukeBPlxCmOedCb2KlcGTUp8,51003
@@ -33,18 +36,18 @@ petpal/pipelines/__init__.py,sha256=Qt9VwqAvVjadHR6Lsja05XIVQONHTCs30NFxiBvf_2k,
33
36
  petpal/pipelines/kinetic_modeling_steps.py,sha256=G6FyfhR0MJ83b4pnww_FhZcAY8PwAIXSYglyoj1mPoQ,32717
34
37
  petpal/pipelines/pca_guided_idif_steps.py,sha256=tuL3stKDlUQsHBTTzlYx_WgBKwaS8eHpieUjzB90Mq4,20550
35
38
  petpal/pipelines/pipelines.py,sha256=2KgeOvrXmjNrecTQMqIKMPv4tzvaRjr1FhvntVyvHX0,38391
36
- petpal/pipelines/preproc_steps.py,sha256=VC-KoMwxnfv0BxUEGneDjDy6HUYAub6JztaRHm3gm4M,37987
39
+ petpal/pipelines/preproc_steps.py,sha256=woYBhOV18YG9ILfap-uqRPH6SLVltpEXSChsfS0cf9Y,37996
37
40
  petpal/pipelines/steps_base.py,sha256=W7GVN5-tsXe_7fvhoMRIlOk5ggY8V2wpSSmDONphLnQ,18311
38
41
  petpal/pipelines/steps_containers.py,sha256=TV4LkPK48OnAds10vLcsPI3qtWjHsmwr6XGu3hup59Q,33767
39
42
  petpal/preproc/__init__.py,sha256=HmTVnCiKMxetDT_w-qU_62UOC39_A83LHXhZm07Q81I,504
40
43
  petpal/preproc/decay_correction.py,sha256=jT8X2lmMrdGf9-lo1ooKul_yomPqm1p24gB9On_GHl0,6872
41
44
  petpal/preproc/image_operations_4d.py,sha256=UEaMw9YwnX-TOvmKxsA2qsU5GRR1Tq46l7gQz7TVzdk,31372
42
- petpal/preproc/motion_corr.py,sha256=dz10qjXBVTF_RH5RPZ68drUVX2qyj-MnZ674_Ccwz2Y,28670
45
+ petpal/preproc/motion_corr.py,sha256=ke0pjh_jxlECjINwrGv-TZEM8geU7SnCDzrH8oViWTc,37454
43
46
  petpal/preproc/motion_target.py,sha256=_OJp3NoYcyD3Ke3wl2KbfOhbJ6dp6ZduR9LLz0rIaC0,3945
44
47
  petpal/preproc/partial_volume_corrections.py,sha256=J06j_Y_lhj3b3b9M5FbB2r2EPWQvoymG3GRUffSlYdE,6799
45
48
  petpal/preproc/regional_tac_extraction.py,sha256=ZXo2u-EAUg5wZj7GGYLMEaOAfLv8OCOR-Gd0xvih6Y4,22358
46
49
  petpal/preproc/register.py,sha256=NKg8mt_XMGa5HBdxYZh3sMu_KMJ0W41VHlX4Zl8wlyE,14171
47
- petpal/preproc/segmentation_tools.py,sha256=Xi1ZnBs3sp23MHWPPOLjuXi6qp4-igwIPXFJ4B_Yzsk,27186
50
+ petpal/preproc/segmentation_tools.py,sha256=CDD0NWV23rkNB56HjZGLsO4HbV61O57KsNzQsR4d06g,29106
48
51
  petpal/preproc/standard_uptake_value.py,sha256=YJIt0fl3fwMLl0tRYHpPPprMTaN4Q5JjQ5dx_CQX1nI,7494
49
52
  petpal/preproc/symmetric_geometric_transfer_matrix.py,sha256=Sr5qMTiNC76ZRKiGG5So7fceV_Lr0ql7UybO_kJgmNo,20360
50
53
  petpal/utils/__init__.py,sha256=PlxBIKUtNvtSFnNZqz8myszOysaYzS8nSILMK4haVGg,412
@@ -55,18 +58,18 @@ petpal/utils/decorators.py,sha256=9CVT4rXnSUqryNL83jmhQHiI6IGDr2E8jBqPD-KhYGA,49
55
58
  petpal/utils/image_io.py,sha256=2Dj2U-OaGL15lwt8KVyl_RZZZdRYdvzUahQWegO_JrA,17995
56
59
  petpal/utils/math_lib.py,sha256=DYt80lB1je-wFqHGuzOwfTOM1tPugomB889xBUdr99U,4725
57
60
  petpal/utils/metadata.py,sha256=O9exRDlqAmPAEcO9v7dsqzkYcSVLgRA207owEvNXXJ8,6129
58
- petpal/utils/scan_timing.py,sha256=CYtYuFquAnOQ2QfjXdeLjWrBDPM_k4vBI9oHQdpmVZ0,13908
61
+ petpal/utils/scan_timing.py,sha256=tIx8t9Xc1bfVtVeagfHlGG6dRRjMKS2_Fzac2YfAATg,14304
59
62
  petpal/utils/stats.py,sha256=paFdwVPIjlAi0wh5xU4x5WeydjKsEHuwzMLcDG_WzPc,6449
60
63
  petpal/utils/testing_utils.py,sha256=eMt1kklxK3rl8tm74I3yVNDotKh1CnYWLINDT7rzboM,9557
61
64
  petpal/utils/time_activity_curve.py,sha256=ZjirIVy6rxG1cEZhYzFhbi9FEixlgdBpVqcZXXz6c3U,40379
62
- petpal/utils/useful_functions.py,sha256=md2kTLbs45MhrjdMhvDYcbflPTRNPspRSIHiOeIxEqY,21361
65
+ petpal/utils/useful_functions.py,sha256=veHeGP1B2E7FhiY86tGL145T_SZiPgyLzJWZ0XKdra8,22277
63
66
  petpal/visualizations/__init__.py,sha256=bd0NHDVl6Z2BDhisEcob2iIcqfxUfgKJ4DEmlrXJRP4,205
64
67
  petpal/visualizations/graphical_plots.py,sha256=ZCKUeLX2TAQscuHjA4bzlFm1bACHIyCwDuNnjCakVWU,47297
65
68
  petpal/visualizations/image_visualization.py,sha256=Ob6TD4Q0pIrxi0m9SznK1TRWbX1Ea9Pt4wNMdRrTfTs,9124
66
69
  petpal/visualizations/qc_plots.py,sha256=iaCPe-LWWyM3OZzDPZodHZhP-z5fRdpUgaH7QS9VxPM,1243
67
70
  petpal/visualizations/tac_plots.py,sha256=zSGdptL-EnqhfDViAX8LFunln5a1b-NJ5ft7ZDcxQ38,15116
68
- petpal-0.5.10.dist-info/METADATA,sha256=QadOtBFu8TbKNi-WDticmziZXkREKywO0pP4F-AxAcI,2618
69
- petpal-0.5.10.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
70
- petpal-0.5.10.dist-info/entry_points.txt,sha256=0SZmyXqBxKzQg2eerDA16n2BdUEXyixEm0_AUo2dFns,653
71
- petpal-0.5.10.dist-info/licenses/LICENSE,sha256=OXLcl0T2SZ8Pmy2_dmlvKuetivmyPd5m1q-Gyd-zaYY,35149
72
- petpal-0.5.10.dist-info/RECORD,,
71
+ petpal-0.6.1.dist-info/METADATA,sha256=vUbDCSqzh7gdYg10pJonUsmvowIzs9V0YkzE8b9sOqA,2617
72
+ petpal-0.6.1.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
73
+ petpal-0.6.1.dist-info/entry_points.txt,sha256=0SZmyXqBxKzQg2eerDA16n2BdUEXyixEm0_AUo2dFns,653
74
+ petpal-0.6.1.dist-info/licenses/LICENSE,sha256=OXLcl0T2SZ8Pmy2_dmlvKuetivmyPd5m1q-Gyd-zaYY,35149
75
+ petpal-0.6.1.dist-info/RECORD,,