petpal 0.5.5__py3-none-any.whl → 0.5.7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- petpal/cli/cli_pib_processing.py +2 -3
- petpal/cli/cli_preproc.py +119 -93
- petpal/cli/cli_vat_processing.py +3 -2
- petpal/pipelines/pipelines.py +0 -3
- petpal/pipelines/preproc_steps.py +8 -7
- petpal/preproc/__init__.py +2 -0
- petpal/preproc/image_operations_4d.py +13 -177
- petpal/preproc/motion_corr.py +31 -112
- petpal/preproc/motion_target.py +90 -0
- petpal/preproc/regional_tac_extraction.py +3 -3
- petpal/preproc/register.py +4 -11
- petpal/preproc/segmentation_tools.py +5 -5
- petpal/preproc/standard_uptake_value.py +159 -0
- petpal/utils/stats.py +21 -0
- petpal/utils/useful_functions.py +106 -5
- {petpal-0.5.5.dist-info → petpal-0.5.7.dist-info}/METADATA +13 -4
- {petpal-0.5.5.dist-info → petpal-0.5.7.dist-info}/RECORD +20 -18
- {petpal-0.5.5.dist-info → petpal-0.5.7.dist-info}/entry_points.txt +0 -1
- {petpal-0.5.5.dist-info → petpal-0.5.7.dist-info}/WHEEL +0 -0
- {petpal-0.5.5.dist-info → petpal-0.5.7.dist-info}/licenses/LICENSE +0 -0
|
@@ -0,0 +1,90 @@
|
|
|
1
|
+
"""Module with function to get a motion target for motion correction and registration"""
|
|
2
|
+
import os
|
|
3
|
+
import tempfile
|
|
4
|
+
import ants
|
|
5
|
+
|
|
6
|
+
from ..utils.useful_functions import get_average_of_timeseries
|
|
7
|
+
from .standard_uptake_value import weighted_sum_for_suv
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
def determine_motion_target(motion_target_option: str | tuple | list,
|
|
11
|
+
input_image_path: str = None) -> str:
|
|
12
|
+
"""
|
|
13
|
+
Produce a motion target given the ``motion_target_option`` from a method
|
|
14
|
+
running registrations on PET, i.e. :meth:`motion_correction` or
|
|
15
|
+
:meth:`register_pet`.
|
|
16
|
+
|
|
17
|
+
The motion target option can be a string or a tuple. If it is a string,
|
|
18
|
+
then if this string is a file, use the file as the motion target.
|
|
19
|
+
|
|
20
|
+
If it is the option ``weighted_series_sum``, then run
|
|
21
|
+
:meth:`weighted_series_sum` and return the output path.
|
|
22
|
+
|
|
23
|
+
If it is the option ``mean_image``, then compute the time-average of the
|
|
24
|
+
4D-PET image.
|
|
25
|
+
|
|
26
|
+
If it is a tuple, run a weighted sum on the PET series on a range of
|
|
27
|
+
frames. The elements of the tuple are treated as times in seconds, counted
|
|
28
|
+
from the time of the first frame, i.e. (0,300) would average all frames
|
|
29
|
+
from the first to the frame 300 seconds later. If the two elements are the
|
|
30
|
+
same, returns the one frame closest to the entered time.
|
|
31
|
+
|
|
32
|
+
Args:
|
|
33
|
+
motion_target_option (str | tuple | list): Determines how the method behaves,
|
|
34
|
+
according to the above description. Can be a file, a method
|
|
35
|
+
('weighted_series_sum' or 'mean_image'), or a tuple range e.g. (0,600).
|
|
36
|
+
input_image_path (str): Path to the PET image. This is intended to
|
|
37
|
+
be supplied by the parent method employing this function. Default
|
|
38
|
+
value None.
|
|
39
|
+
|
|
40
|
+
Returns:
|
|
41
|
+
out_image_file (str): File to use as a target to compute
|
|
42
|
+
transformations on.
|
|
43
|
+
|
|
44
|
+
Raises:
|
|
45
|
+
ValueError: If ``motion_target_option`` is not a string, list, or tuple. If it is a string,
|
|
46
|
+
but does not match one of the preset options or path to a file, the error will also be
|
|
47
|
+
raised.
|
|
48
|
+
TypeError: If start and end time are incompatible with ``float`` type.
|
|
49
|
+
"""
|
|
50
|
+
if isinstance(motion_target_option, str):
|
|
51
|
+
if os.path.exists(motion_target_option):
|
|
52
|
+
return motion_target_option
|
|
53
|
+
|
|
54
|
+
if motion_target_option == 'weighted_series_sum':
|
|
55
|
+
out_image_file = tempfile.mkstemp(suffix='_wss.nii.gz')[1]
|
|
56
|
+
weighted_sum_for_suv(input_image_path=input_image_path,
|
|
57
|
+
output_image_path=out_image_file)
|
|
58
|
+
return out_image_file
|
|
59
|
+
|
|
60
|
+
if motion_target_option == 'mean_image':
|
|
61
|
+
out_image_file = tempfile.mkstemp(suffix='_mean.nii.gz')[1]
|
|
62
|
+
input_img = ants.image_read(input_image_path)
|
|
63
|
+
mean_img = get_average_of_timeseries(input_image=input_img)
|
|
64
|
+
ants.image_write(image=mean_img,filename=out_image_file)
|
|
65
|
+
return out_image_file
|
|
66
|
+
|
|
67
|
+
raise ValueError("motion_target_option did not match a file or 'weighted_series_sum'")
|
|
68
|
+
|
|
69
|
+
if isinstance(motion_target_option, (list, tuple)):
|
|
70
|
+
|
|
71
|
+
start_time = motion_target_option[0]
|
|
72
|
+
end_time = motion_target_option[1]
|
|
73
|
+
|
|
74
|
+
try:
|
|
75
|
+
float(start_time)
|
|
76
|
+
float(end_time)
|
|
77
|
+
except Exception as exc:
|
|
78
|
+
raise TypeError('Start time and end time of calculation must be '
|
|
79
|
+
'able to be cast into float! Provided values are '
|
|
80
|
+
f"{start_time} and {end_time}.") from exc
|
|
81
|
+
|
|
82
|
+
out_image_file = tempfile.mkstemp(suffix='_wss.nii.gz')[1]
|
|
83
|
+
weighted_sum_for_suv(input_image_path=input_image_path,
|
|
84
|
+
output_image_path=out_image_file,
|
|
85
|
+
start_time=float(start_time),
|
|
86
|
+
end_time=float(end_time))
|
|
87
|
+
|
|
88
|
+
return out_image_file
|
|
89
|
+
|
|
90
|
+
raise ValueError('motion_target_option did not match str or tuple type.')
|
|
@@ -192,7 +192,7 @@ def write_tacs(input_image_path: str,
|
|
|
192
192
|
print('Finished writing TACs.')
|
|
193
193
|
|
|
194
194
|
|
|
195
|
-
def roi_tac(
|
|
195
|
+
def roi_tac(input_image_path: str,
|
|
196
196
|
roi_image_path: str,
|
|
197
197
|
region: list[int] | int,
|
|
198
198
|
out_tac_path: str | None = None,
|
|
@@ -223,8 +223,8 @@ def roi_tac(input_image_4d_path: str,
|
|
|
223
223
|
raise ValueError("'time_frame_keyword' must be one of "
|
|
224
224
|
"'FrameReferenceTime' or 'FrameTimesStart'")
|
|
225
225
|
|
|
226
|
-
pet_meta = image_io.load_metadata_for_nifti_with_same_filename(
|
|
227
|
-
pet_numpy = ants.image_read(
|
|
226
|
+
pet_meta = image_io.load_metadata_for_nifti_with_same_filename(input_image_path)
|
|
227
|
+
pet_numpy = ants.image_read(input_image_path).numpy()
|
|
228
228
|
seg_numpy = ants.image_read(roi_image_path).numpy()
|
|
229
229
|
|
|
230
230
|
region_mask = combine_regions_as_mask(segmentation_img=seg_numpy,
|
petpal/preproc/register.py
CHANGED
|
@@ -10,7 +10,7 @@ import nibabel
|
|
|
10
10
|
import numpy as np
|
|
11
11
|
from nibabel.processing import resample_from_to
|
|
12
12
|
|
|
13
|
-
from .
|
|
13
|
+
from .motion_target import determine_motion_target
|
|
14
14
|
from ..utils import image_io
|
|
15
15
|
from ..utils.useful_functions import check_physical_space_for_ants_image_pair
|
|
16
16
|
|
|
@@ -29,15 +29,10 @@ def register_pet_to_pet(input_image_path: str,
|
|
|
29
29
|
Returns:
|
|
30
30
|
ants.ANTsImage: ANTsImage containing input image registered to reference image.
|
|
31
31
|
"""
|
|
32
|
-
|
|
33
|
-
half_life = image_io.get_half_life_from_nifti(image_path=input_image_path)
|
|
34
|
-
|
|
35
32
|
wss_input = determine_motion_target(motion_target_option='weighted_series_sum',
|
|
36
|
-
|
|
37
|
-
half_life=half_life)
|
|
33
|
+
input_image_path=input_image_path)
|
|
38
34
|
wss_reference = determine_motion_target(motion_target_option='weighted_series_sum',
|
|
39
|
-
|
|
40
|
-
half_life=half_life)
|
|
35
|
+
input_image_path=reference_pet_image_path)
|
|
41
36
|
|
|
42
37
|
wss_input_ants = ants.image_read(wss_input)
|
|
43
38
|
wss_reference_ants = ants.image_read(wss_reference)
|
|
@@ -67,7 +62,6 @@ def register_pet(input_reg_image_path: str,
|
|
|
67
62
|
motion_target_option: Union[str, tuple],
|
|
68
63
|
verbose: bool,
|
|
69
64
|
type_of_transform: str = 'DenseRigid',
|
|
70
|
-
half_life: float = None,
|
|
71
65
|
**kwargs):
|
|
72
66
|
"""
|
|
73
67
|
Computes and runs rigid registration of 4D PET image series to 3D anatomical image, typically
|
|
@@ -91,8 +85,7 @@ def register_pet(input_reg_image_path: str,
|
|
|
91
85
|
kwargs (keyword arguments): Additional arguments passed to :py:func:`ants.registration`.
|
|
92
86
|
"""
|
|
93
87
|
motion_target = determine_motion_target(motion_target_option=motion_target_option,
|
|
94
|
-
|
|
95
|
-
half_life=half_life)
|
|
88
|
+
input_image_path=input_reg_image_path)
|
|
96
89
|
motion_target_image = ants.image_read(motion_target)
|
|
97
90
|
mri_image = ants.image_read(reference_image_path)
|
|
98
91
|
pet_image_ants = ants.image_read(input_reg_image_path)
|
|
@@ -14,7 +14,7 @@ import nibabel
|
|
|
14
14
|
from nibabel import processing
|
|
15
15
|
import pandas as pd
|
|
16
16
|
|
|
17
|
-
from . import
|
|
17
|
+
from ..utils.useful_functions import gen_nd_image_based_on_image_list
|
|
18
18
|
from ..utils import math_lib
|
|
19
19
|
|
|
20
20
|
|
|
@@ -203,7 +203,7 @@ def replace_probabilistic_region(segmentation_numpy: np.ndarray,
|
|
|
203
203
|
return segmentation_numpy
|
|
204
204
|
|
|
205
205
|
|
|
206
|
-
def resample_segmentation(
|
|
206
|
+
def resample_segmentation(input_image_path: str,
|
|
207
207
|
segmentation_image_path: str,
|
|
208
208
|
out_seg_path: str,
|
|
209
209
|
verbose: bool):
|
|
@@ -215,7 +215,7 @@ def resample_segmentation(input_image_4d_path: str,
|
|
|
215
215
|
PET and ROI data are registered to the same space, but have different resolutions.
|
|
216
216
|
|
|
217
217
|
Args:
|
|
218
|
-
|
|
218
|
+
input_image_path (str): Path to a .nii or .nii.gz file containing a 4D
|
|
219
219
|
PET image, registered to anatomical space, to which the segmentation file is resampled.
|
|
220
220
|
segmentation_image_path (str): Path to a .nii or .nii.gz file containing a 3D segmentation
|
|
221
221
|
image, where integer indices label specific regions.
|
|
@@ -223,7 +223,7 @@ def resample_segmentation(input_image_4d_path: str,
|
|
|
223
223
|
image is written.
|
|
224
224
|
verbose (bool): Set to ``True`` to output processing information.
|
|
225
225
|
"""
|
|
226
|
-
pet_image = nibabel.load(
|
|
226
|
+
pet_image = nibabel.load(input_image_path)
|
|
227
227
|
seg_image = nibabel.load(segmentation_image_path)
|
|
228
228
|
pet_series = pet_image.get_fdata()
|
|
229
229
|
image_first_frame = pet_series[:, :, :, 0]
|
|
@@ -363,7 +363,7 @@ def gw_segmentation(freesurfer_path: str,
|
|
|
363
363
|
origin=freesurfer.origin,
|
|
364
364
|
spacing=freesurfer.spacing,
|
|
365
365
|
direction=freesurfer.direction)
|
|
366
|
-
gw_map_template =
|
|
366
|
+
gw_map_template = gen_nd_image_based_on_image_list([gm_img, wm_img])
|
|
367
367
|
gw_map_4d = ants.list_to_ndimage(image=gw_map_template,image_list=[gm_img,wm_img])
|
|
368
368
|
ants.image_write(gw_map_4d,output_path)
|
|
369
369
|
|
|
@@ -0,0 +1,159 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Module for functions calculating standard uptake value (SUV) and related measures, such as standard
|
|
3
|
+
uptake value ratio (SUVR).
|
|
4
|
+
"""
|
|
5
|
+
import ants
|
|
6
|
+
|
|
7
|
+
from ..utils.stats import mean_value_in_region
|
|
8
|
+
from ..utils.math_lib import weighted_sum_computation
|
|
9
|
+
from ..utils.useful_functions import gen_3d_img_from_timeseries, nearest_frame_to_timepoint
|
|
10
|
+
from ..utils.image_io import (get_half_life_from_nifti,
|
|
11
|
+
load_metadata_for_nifti_with_same_filename,
|
|
12
|
+
safe_copy_meta)
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
def weighted_sum_for_suv(input_image_path: str,
|
|
16
|
+
output_image_path: str | None,
|
|
17
|
+
start_time: float=0,
|
|
18
|
+
end_time: float=-1) -> ants.ANTsImage:
|
|
19
|
+
"""Function that calculates the weighted series sum for a PET image specifically for
|
|
20
|
+
calculating the standard uptake value (SUV) of the image.
|
|
21
|
+
|
|
22
|
+
Args:
|
|
23
|
+
input_image_path (str): Path to a 4D PET image which we calculate the sum on.
|
|
24
|
+
output_image_path (str): Path to which output image is saved. If None, returns
|
|
25
|
+
calculated image without saving.
|
|
26
|
+
start_time: Time in seconds from the start of the scan from which to begin sum calculation.
|
|
27
|
+
Only frames after selected time will be included in the sum. Default 0.
|
|
28
|
+
end_time: Time in seconds from the start of the scan from which to end sum calculation.
|
|
29
|
+
Only frames before selected time will be included in the sum. If -1, use all frames
|
|
30
|
+
after `start_time` in the calculation. Default -1.
|
|
31
|
+
|
|
32
|
+
Returns:
|
|
33
|
+
weighted_sum_img (ants.ANTsImage): 3D image resulting from the sum calculation.
|
|
34
|
+
"""
|
|
35
|
+
half_life = get_half_life_from_nifti(image_path=input_image_path)
|
|
36
|
+
if half_life <= 0:
|
|
37
|
+
raise ValueError('(ImageOps4d): Radioisotope half life is zero or negative.')
|
|
38
|
+
pet_meta = load_metadata_for_nifti_with_same_filename(input_image_path)
|
|
39
|
+
pet_img = ants.image_read(input_image_path)
|
|
40
|
+
frame_start = pet_meta['FrameTimesStart']
|
|
41
|
+
frame_duration = pet_meta['FrameDuration']
|
|
42
|
+
|
|
43
|
+
if 'DecayCorrectionFactor' in pet_meta.keys():
|
|
44
|
+
decay_correction = pet_meta['DecayCorrectionFactor']
|
|
45
|
+
elif 'DecayFactor' in pet_meta.keys():
|
|
46
|
+
decay_correction = pet_meta['DecayFactor']
|
|
47
|
+
else:
|
|
48
|
+
raise ValueError("Neither 'DecayCorrectionFactor' nor 'DecayFactor' exist in meta-data "
|
|
49
|
+
"file")
|
|
50
|
+
|
|
51
|
+
last_frame_time = frame_start[-1]
|
|
52
|
+
if end_time!=-1:
|
|
53
|
+
last_frame_time = end_time
|
|
54
|
+
scan_start = frame_start[0]
|
|
55
|
+
nearest_frame = nearest_frame_to_timepoint(frame_times=frame_start)
|
|
56
|
+
calc_first_frame = int(nearest_frame(start_time+scan_start))
|
|
57
|
+
calc_last_frame = int(nearest_frame(last_frame_time+scan_start))
|
|
58
|
+
if calc_first_frame==calc_last_frame:
|
|
59
|
+
calc_last_frame += 1
|
|
60
|
+
pet_series_adjusted = pet_img[:,:,:,calc_first_frame:calc_last_frame]
|
|
61
|
+
frame_start_adjusted = frame_start[calc_first_frame:calc_last_frame]
|
|
62
|
+
frame_duration_adjusted = frame_duration[calc_first_frame:calc_last_frame]
|
|
63
|
+
decay_correction_adjusted = decay_correction[calc_first_frame:calc_last_frame]
|
|
64
|
+
|
|
65
|
+
weighted_sum_arr = weighted_sum_computation(frame_duration=frame_duration_adjusted,
|
|
66
|
+
half_life=half_life,
|
|
67
|
+
pet_series=pet_series_adjusted,
|
|
68
|
+
frame_start=frame_start_adjusted,
|
|
69
|
+
decay_correction=decay_correction_adjusted)
|
|
70
|
+
weighted_sum_img = ants.from_numpy_like(weighted_sum_arr,gen_3d_img_from_timeseries(pet_img))
|
|
71
|
+
|
|
72
|
+
if output_image_path is not None:
|
|
73
|
+
ants.image_write(weighted_sum_img, output_image_path)
|
|
74
|
+
safe_copy_meta(input_image_path=input_image_path,
|
|
75
|
+
out_image_path=output_image_path)
|
|
76
|
+
|
|
77
|
+
return weighted_sum_img
|
|
78
|
+
|
|
79
|
+
|
|
80
|
+
def suv(input_image_path: str,
|
|
81
|
+
output_image_path: str | None,
|
|
82
|
+
weight: float,
|
|
83
|
+
dose: float,
|
|
84
|
+
start_time: float,
|
|
85
|
+
end_time: float) -> ants.ANTsImage:
|
|
86
|
+
"""Compute standard uptake value (SUV) over a pet image. Calculate the weighted image sum
|
|
87
|
+
then divide by the dose and multiplying by the weight of the participant.
|
|
88
|
+
|
|
89
|
+
Args:
|
|
90
|
+
input_image_path: Path to input PET image.
|
|
91
|
+
output_image_path: Path to which SUV image is saved.
|
|
92
|
+
weight: Weight of the participant in kg.
|
|
93
|
+
dose: Dose injected during the scan in MBq.
|
|
94
|
+
start_time: Start time for the SUV calculation in seconds.
|
|
95
|
+
end_time: End time for the SUV calculation in seconds.
|
|
96
|
+
|
|
97
|
+
Returns:
|
|
98
|
+
suv_img (ants.ANTsImage): The standard uptake value calculated on the input image."""
|
|
99
|
+
wss_img = weighted_sum_for_suv(input_image_path=input_image_path,
|
|
100
|
+
output_image_path=None,
|
|
101
|
+
start_time=start_time,
|
|
102
|
+
end_time=end_time)
|
|
103
|
+
suv_img = wss_img / (dose*1000) * weight
|
|
104
|
+
|
|
105
|
+
if output_image_path is not None:
|
|
106
|
+
ants.image_write(suv_img, output_image_path)
|
|
107
|
+
safe_copy_meta(input_image_path=input_image_path,
|
|
108
|
+
out_image_path=output_image_path)
|
|
109
|
+
|
|
110
|
+
return suv_img
|
|
111
|
+
|
|
112
|
+
|
|
113
|
+
def suvr(input_image_path: str,
|
|
114
|
+
output_image_path: str | None,
|
|
115
|
+
segmentation_image_path: str,
|
|
116
|
+
ref_region: int | list[int],
|
|
117
|
+
start_time: float,
|
|
118
|
+
end_time: float) -> ants.ANTsImage:
|
|
119
|
+
"""
|
|
120
|
+
Computes an ``SUVR`` (Standard Uptake Value Ratio) by taking the average of
|
|
121
|
+
an input image within a reference region, and dividing the input image by
|
|
122
|
+
said average value.
|
|
123
|
+
|
|
124
|
+
Args:
|
|
125
|
+
input_image_path (str): Path to 4D PET image.
|
|
126
|
+
output_image_path (str): Path to output image file which is written to. If None, no output
|
|
127
|
+
is written.
|
|
128
|
+
segmentation_image_path (str): Path to segmentation image, which we use
|
|
129
|
+
to compute average uptake value in the reference region.
|
|
130
|
+
ref_region (int): Region or list of region mappings over which to compute average SUV. If a
|
|
131
|
+
list is provided, combines all regions in the list as one reference region.
|
|
132
|
+
start_time: Time in seconds from the start of the scan from which to begin sum calculation.
|
|
133
|
+
Only frames after selected time will be included in the sum. Default 0.
|
|
134
|
+
end_time: Time in seconds from the start of the scan from which to end sum calculation.
|
|
135
|
+
Only frames before selected time will be included in the sum. If -1, use all frames
|
|
136
|
+
after `start_time` in the calculation. Default -1.
|
|
137
|
+
|
|
138
|
+
Returns:
|
|
139
|
+
ants.ANTsImage: SUVR parametric image
|
|
140
|
+
"""
|
|
141
|
+
sum_img = weighted_sum_for_suv(input_image_path=input_image_path,
|
|
142
|
+
output_image_path=None,
|
|
143
|
+
start_time=start_time,
|
|
144
|
+
end_time=end_time)
|
|
145
|
+
segmentation_img = ants.image_read(filename=segmentation_image_path)
|
|
146
|
+
|
|
147
|
+
ref_region_avg = mean_value_in_region(input_img=sum_img,
|
|
148
|
+
seg_img=segmentation_img,
|
|
149
|
+
mappings=ref_region)
|
|
150
|
+
|
|
151
|
+
suvr_img = sum_img / ref_region_avg
|
|
152
|
+
|
|
153
|
+
if output_image_path is not None:
|
|
154
|
+
ants.image_write(image=suvr_img,
|
|
155
|
+
filename=output_image_path)
|
|
156
|
+
safe_copy_meta(input_image_path=input_image_path,
|
|
157
|
+
out_image_path=output_image_path)
|
|
158
|
+
|
|
159
|
+
return suvr_img
|
petpal/utils/stats.py
CHANGED
|
@@ -6,6 +6,27 @@ import ants
|
|
|
6
6
|
from ..meta.label_maps import LabelMapLoader
|
|
7
7
|
from .useful_functions import check_physical_space_for_ants_image_pair
|
|
8
8
|
|
|
9
|
+
def mean_value_in_region(input_img: ants.ANTsImage,
|
|
10
|
+
seg_img: ants.ANTsImage,
|
|
11
|
+
mappings: int | list[int]) -> float:
|
|
12
|
+
"""Calculate the mean value in a 3D PET image over a region based on one or more integer
|
|
13
|
+
mappings corresponding to regions in a segmentation image.
|
|
14
|
+
|
|
15
|
+
Args:
|
|
16
|
+
input_img (ants.ANTsImage): 3D PET image over which to calculate the mean.
|
|
17
|
+
seg_img (ants.ANTsImage): Segmentation image in same space as `input_img`.
|
|
18
|
+
mappings (int | list[int]): One or more mappings to mask input_image over.
|
|
19
|
+
|
|
20
|
+
Returns:
|
|
21
|
+
region_mean (float): Mean PET value over voxels in the regions corresponding to
|
|
22
|
+
`mappings`."""
|
|
23
|
+
region_mask = ants.mask_image(input_img, seg_img, level=mappings)
|
|
24
|
+
region_arr = region_mask.numpy().flatten()
|
|
25
|
+
region_arr_nonzero = region_arr.nonzero()
|
|
26
|
+
voxel_arr = region_arr[region_arr_nonzero]
|
|
27
|
+
return voxel_arr.mean()
|
|
28
|
+
|
|
29
|
+
|
|
9
30
|
class RegionalStats:
|
|
10
31
|
"""Run statistics on each region in a parametric 3D PET kinetic model or other image.
|
|
11
32
|
|
petpal/utils/useful_functions.py
CHANGED
|
@@ -1,6 +1,7 @@
|
|
|
1
1
|
"""
|
|
2
2
|
Module to handle abstracted functionalities
|
|
3
3
|
"""
|
|
4
|
+
from collections.abc import Callable
|
|
4
5
|
import os
|
|
5
6
|
import nibabel
|
|
6
7
|
import numpy as np
|
|
@@ -88,7 +89,7 @@ def build_label_map(region_names: list[str]):
|
|
|
88
89
|
return abbreviated_names
|
|
89
90
|
|
|
90
91
|
|
|
91
|
-
def weighted_series_sum(
|
|
92
|
+
def weighted_series_sum(input_image_path: str,
|
|
92
93
|
out_image_path: str,
|
|
93
94
|
half_life: float,
|
|
94
95
|
verbose: bool=False,
|
|
@@ -124,7 +125,7 @@ def weighted_series_sum(input_image_4d_path: str,
|
|
|
124
125
|
# TODO: Determine half_life from .json rather than passing as argument.
|
|
125
126
|
|
|
126
127
|
Args:
|
|
127
|
-
|
|
128
|
+
input_image_path (str): Path to a .nii or .nii.gz file containing a 4D
|
|
128
129
|
PET image on which the weighted sum is calculated. Assume a metadata
|
|
129
130
|
file exists with the same path and file name, but with extension .json,
|
|
130
131
|
and follows BIDS standard.
|
|
@@ -147,8 +148,8 @@ def weighted_series_sum(input_image_4d_path: str,
|
|
|
147
148
|
"""
|
|
148
149
|
if half_life <= 0:
|
|
149
150
|
raise ValueError('(ImageOps4d): Radioisotope half life is zero or negative.')
|
|
150
|
-
pet_meta = image_io.load_metadata_for_nifti_with_same_filename(
|
|
151
|
-
pet_image = nibabel.load(
|
|
151
|
+
pet_meta = image_io.load_metadata_for_nifti_with_same_filename(input_image_path)
|
|
152
|
+
pet_image = nibabel.load(input_image_path)
|
|
152
153
|
pet_series = pet_image.get_fdata()
|
|
153
154
|
frame_start = pet_meta['FrameTimesStart']
|
|
154
155
|
frame_duration = pet_meta['FrameDuration']
|
|
@@ -202,7 +203,7 @@ def weighted_series_sum(input_image_4d_path: str,
|
|
|
202
203
|
nibabel.save(pet_sum_image, out_image_path)
|
|
203
204
|
if verbose:
|
|
204
205
|
print(f"(ImageOps4d): weighted sum image saved to {out_image_path}")
|
|
205
|
-
image_io.safe_copy_meta(input_image_path=
|
|
206
|
+
image_io.safe_copy_meta(input_image_path=input_image_path,
|
|
206
207
|
out_image_path=out_image_path)
|
|
207
208
|
|
|
208
209
|
return image_weighted_sum
|
|
@@ -431,3 +432,103 @@ def get_frame_from_timeseries(input_img: ants.ANTsImage, frame: int) -> ants.ANT
|
|
|
431
432
|
ants.set_direction( img_3d, subdirection )
|
|
432
433
|
|
|
433
434
|
return img_3d
|
|
435
|
+
|
|
436
|
+
|
|
437
|
+
def nearest_frame_to_timepoint(frame_times: np.ndarray) -> Callable[[float],float]:
|
|
438
|
+
"""Returns a step function that gets the index of the frame closest to a provided timepoint
|
|
439
|
+
based on an array of frame times, such as the frame starts or reference times.
|
|
440
|
+
|
|
441
|
+
Args:
|
|
442
|
+
frame_times (np.ndarray): The frame times on which to generate the step function.
|
|
443
|
+
|
|
444
|
+
Returns:
|
|
445
|
+
nearest_frame_func (Callable[[float],float]): A function that returns the time closest to
|
|
446
|
+
the provided timepoint.
|
|
447
|
+
"""
|
|
448
|
+
nearest_frame_func = interp1d(x=frame_times,
|
|
449
|
+
y=range(len(frame_times)),
|
|
450
|
+
kind='nearest',
|
|
451
|
+
bounds_error=False,
|
|
452
|
+
fill_value='extrapolate')
|
|
453
|
+
return nearest_frame_func
|
|
454
|
+
|
|
455
|
+
|
|
456
|
+
def get_average_of_timeseries(input_image: ants.ANTsImage) -> ants.ANTsImage:
|
|
457
|
+
"""
|
|
458
|
+
Get average of a 4D ANTsImage and return as a 3D ANTsImage.
|
|
459
|
+
|
|
460
|
+
Args:
|
|
461
|
+
input_image (ants.ANTsImage): 4D PET image over which to compute timeseries average.
|
|
462
|
+
|
|
463
|
+
Returns:
|
|
464
|
+
mean_image (ants.ANTsImage): 3D mean over time in the PET image.
|
|
465
|
+
"""
|
|
466
|
+
assert len(input_image.shape) == 4, "Input image must be 4D"
|
|
467
|
+
mean_array = input_image.mean(axis=-1)
|
|
468
|
+
mean_image = ants.from_numpy(data=mean_array,
|
|
469
|
+
origin=input_image.origin[:-1],
|
|
470
|
+
spacing=input_image.spacing[:-1],
|
|
471
|
+
direction=input_image.direction[:-1,:-1])
|
|
472
|
+
return mean_image
|
|
473
|
+
|
|
474
|
+
|
|
475
|
+
def gen_nd_image_based_on_image_list(image_list: list[ants.ANTsImage]) -> ants.ANTsImage:
|
|
476
|
+
r"""
|
|
477
|
+
Generate a 4D ANTsImage based on a list of 3D ANTsImages.
|
|
478
|
+
|
|
479
|
+
This function takes a list of 3D ANTsImages and constructs a new 4D ANTsImage,
|
|
480
|
+
where the additional dimension represents the number of frames (3D images) in the list.
|
|
481
|
+
The 4D image retains the spacing, origin, direction, and shape properties of the 3D images,
|
|
482
|
+
with appropriate modifications for the additional dimension.
|
|
483
|
+
|
|
484
|
+
Args:
|
|
485
|
+
image_list (list[ants.core.ants_image.ANTsImage]):
|
|
486
|
+
List of 3D ANTsImage objects to be combined into a 4D image.
|
|
487
|
+
The list must contain at least one image, and all images must have the same
|
|
488
|
+
dimensions and properties.
|
|
489
|
+
|
|
490
|
+
Returns:
|
|
491
|
+
ants.ANTsImage:
|
|
492
|
+
A 4D ANTsImage constructed from the input list of 3D images. The additional
|
|
493
|
+
dimension corresponds to the number of frames (length of the image list).
|
|
494
|
+
|
|
495
|
+
Raises:
|
|
496
|
+
AssertionError: If the `image_list` is empty or if the images in the list are not 3D.
|
|
497
|
+
|
|
498
|
+
See Also
|
|
499
|
+
* :func:`petpal.preproc.motion_corr.motion_corr_frame_list_to_t1`
|
|
500
|
+
|
|
501
|
+
Example:
|
|
502
|
+
|
|
503
|
+
.. code-block:: python
|
|
504
|
+
|
|
505
|
+
|
|
506
|
+
import ants
|
|
507
|
+
image1 = ants.image_read('frame1.nii.gz')
|
|
508
|
+
image2 = ants.image_read('frame2.nii.gz')
|
|
509
|
+
image_list = [image1, image2]
|
|
510
|
+
result = _gen_nd_image_based_on_image_list(image_list)
|
|
511
|
+
print(result.dimension) # 4
|
|
512
|
+
image4d = ants.list_to_ndimage(result, image_list)
|
|
513
|
+
|
|
514
|
+
"""
|
|
515
|
+
assert len(image_list) > 0
|
|
516
|
+
assert image_list[0].dimension == 3
|
|
517
|
+
|
|
518
|
+
num_frames = len(image_list)
|
|
519
|
+
spacing_3d = image_list[0].spacing
|
|
520
|
+
origin_3d = image_list[0].origin
|
|
521
|
+
shape_3d = image_list[0].shape
|
|
522
|
+
direction_3d = image_list[0].direction
|
|
523
|
+
|
|
524
|
+
direction_4d = np.eye(4)
|
|
525
|
+
direction_4d[:3, :3] = direction_3d
|
|
526
|
+
spacing_4d = (*spacing_3d, 1.0)
|
|
527
|
+
origin_4d = (*origin_3d, 0.0)
|
|
528
|
+
shape_4d = (*shape_3d, num_frames)
|
|
529
|
+
|
|
530
|
+
tmp_image = ants.make_image(imagesize=shape_4d,
|
|
531
|
+
spacing=spacing_4d,
|
|
532
|
+
origin=origin_4d,
|
|
533
|
+
direction=direction_4d)
|
|
534
|
+
return tmp_image
|
|
@@ -1,10 +1,9 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: petpal
|
|
3
|
-
Version: 0.5.
|
|
3
|
+
Version: 0.5.7
|
|
4
4
|
Summary: PET-PAL (Positron Emission Tomography Processing and Analysis Library)
|
|
5
5
|
Project-URL: Repository, https://github.com/PETPAL-WUSM/PETPAL.git
|
|
6
|
-
Author: Bradley Judge
|
|
7
|
-
Author-email: Furqan Dar <dar@wustl.edu>, Noah Goldman <noahg@wustl.edu>, Kenan Oestreich <kenan.oestreich@wustl.edu>
|
|
6
|
+
Author-email: Noah Goldman <noahg@wustl.edu>, Bradley Judge <bjudge@wustl.edu>, Furqan Dar <dar@wustl.edu>, Kenan Oestreich <kenan.oestreich@wustl.edu>
|
|
8
7
|
License-File: LICENSE
|
|
9
8
|
Classifier: Development Status :: 2 - Pre-Alpha
|
|
10
9
|
Classifier: Intended Audience :: Science/Research
|
|
@@ -45,7 +44,17 @@ Description-Content-Type: text/markdown
|
|
|
45
44
|
|
|
46
45
|
## Installation
|
|
47
46
|
|
|
48
|
-
|
|
47
|
+
### Using Pip
|
|
48
|
+
|
|
49
|
+
The simplest way to install PETPAL is using pip. First, ensure you are using Python version >=3.12. Then, run the following:
|
|
50
|
+
|
|
51
|
+
```shell
|
|
52
|
+
pip install petpal
|
|
53
|
+
```
|
|
54
|
+
|
|
55
|
+
### Build from source
|
|
56
|
+
|
|
57
|
+
Clone the repository using your preferred method. After navigating to the top-level directory (where `pyproject.toml` exists), we run the following command in the terminal:
|
|
49
58
|
|
|
50
59
|
```shell
|
|
51
60
|
pip install . # Installs the package
|
|
@@ -4,15 +4,15 @@ petpal/cli/cli_graphical_analysis.py,sha256=nGb0afMAigJgvbyEj5EXOCt6WNU35LNo3h3K
|
|
|
4
4
|
petpal/cli/cli_graphical_plots.py,sha256=_2tlGtZ0hIVyEYtGviEzGZMNhFymUPg4ZvSVyMtT_dA,3211
|
|
5
5
|
petpal/cli/cli_idif.py,sha256=6lh_kJHcGjlHDXZOvbiuHrNqpk5FovVV5_j7_dPHTHU,5145
|
|
6
6
|
petpal/cli/cli_parametric_images.py,sha256=JBFb8QlxZoGOzqvCJPFuZ7czzGWntJP5ZcfeM5-QF4Y,7385
|
|
7
|
-
petpal/cli/cli_pib_processing.py,sha256=
|
|
7
|
+
petpal/cli/cli_pib_processing.py,sha256=ye_yw0ZQ4cSrMNemGR7cU9v6epD7Wbq1xaNAJwLzV_8,6889
|
|
8
8
|
petpal/cli/cli_plot_tacs.py,sha256=XycaYQQl9Jp5jqDp3QXOlVT2sXHYYpYSraEArxsfJec,6479
|
|
9
|
-
petpal/cli/cli_preproc.py,sha256=
|
|
9
|
+
petpal/cli/cli_preproc.py,sha256=y5YvRliXC3zNY8oBJgTkzwPH9YNwWnEUWgiltUAv7AU,21850
|
|
10
10
|
petpal/cli/cli_pvc.py,sha256=DC0JZ6p1pkc5BDgQ006bi9y0Mz32ENrjUaOtSvFobP4,3967
|
|
11
11
|
petpal/cli/cli_reference_tissue_models.py,sha256=18BlKN4rMehyFbdq_yr88oztqR99_gBtWKImhwf7CQY,13283
|
|
12
12
|
petpal/cli/cli_stats.py,sha256=Mqbzc9yYo9SVAfxzaYbAbk7EheW472l1PY_ptlD1DOI,2050
|
|
13
13
|
petpal/cli/cli_tac_fitting.py,sha256=bCYwFAbxIKwnYBteAKnot5vOsk9F4Z1EJw2Xk0tZ9oo,14989
|
|
14
14
|
petpal/cli/cli_tac_interpolation.py,sha256=Nwf0CAfyEATLeiwuPSirS0DWDeerrIH2P-U0lpXpKWk,5734
|
|
15
|
-
petpal/cli/cli_vat_processing.py,sha256=
|
|
15
|
+
petpal/cli/cli_vat_processing.py,sha256=XrWcjZqgc5JAtp4fZ94ZLJ7h6fvdMhZvR30nh7GCD4w,12404
|
|
16
16
|
petpal/input_function/__init__.py,sha256=mWdwuVdMSgaHE0wviNE7TGGoOI9Y3sEKfKpBqLS-Ph4,151
|
|
17
17
|
petpal/input_function/blood_input.py,sha256=TNx3hL7M8Z_ZaZoTpt8LgLQhXnNyW0BOhgVZompwgxA,9206
|
|
18
18
|
petpal/input_function/idif_necktangle.py,sha256=o5kyAqyT4C6o7zELY4EjyHrkJyX1BWcxvBqYiMjNyn4,7863
|
|
@@ -32,18 +32,20 @@ petpal/meta/label_maps.py,sha256=cifp_KXQAgIbdWoinMpOQstEK4-pasSXlgML6xoEjHM,227
|
|
|
32
32
|
petpal/pipelines/__init__.py,sha256=Qt9VwqAvVjadHR6Lsja05XIVQONHTCs30NFxiBvf_2k,266
|
|
33
33
|
petpal/pipelines/kinetic_modeling_steps.py,sha256=G6FyfhR0MJ83b4pnww_FhZcAY8PwAIXSYglyoj1mPoQ,32717
|
|
34
34
|
petpal/pipelines/pca_guided_idif_steps.py,sha256=tuL3stKDlUQsHBTTzlYx_WgBKwaS8eHpieUjzB90Mq4,20550
|
|
35
|
-
petpal/pipelines/pipelines.py,sha256=
|
|
36
|
-
petpal/pipelines/preproc_steps.py,sha256=
|
|
35
|
+
petpal/pipelines/pipelines.py,sha256=2KgeOvrXmjNrecTQMqIKMPv4tzvaRjr1FhvntVyvHX0,38391
|
|
36
|
+
petpal/pipelines/preproc_steps.py,sha256=VC-KoMwxnfv0BxUEGneDjDy6HUYAub6JztaRHm3gm4M,37987
|
|
37
37
|
petpal/pipelines/steps_base.py,sha256=W7GVN5-tsXe_7fvhoMRIlOk5ggY8V2wpSSmDONphLnQ,18311
|
|
38
38
|
petpal/pipelines/steps_containers.py,sha256=TV4LkPK48OnAds10vLcsPI3qtWjHsmwr6XGu3hup59Q,33767
|
|
39
|
-
petpal/preproc/__init__.py,sha256=
|
|
39
|
+
petpal/preproc/__init__.py,sha256=HmTVnCiKMxetDT_w-qU_62UOC39_A83LHXhZm07Q81I,504
|
|
40
40
|
petpal/preproc/decay_correction.py,sha256=QCA_QcB6pqAD_wYSqtRKSvO6qWCaEzgL_qrAcSczRtw,6829
|
|
41
|
-
petpal/preproc/image_operations_4d.py,sha256=
|
|
42
|
-
petpal/preproc/motion_corr.py,sha256=
|
|
41
|
+
petpal/preproc/image_operations_4d.py,sha256=IqzwxaWxoWC1gmK00uuHIwlhx8e_eQ44C6yVFuu73W4,31371
|
|
42
|
+
petpal/preproc/motion_corr.py,sha256=dz10qjXBVTF_RH5RPZ68drUVX2qyj-MnZ674_Ccwz2Y,28670
|
|
43
|
+
petpal/preproc/motion_target.py,sha256=_OJp3NoYcyD3Ke3wl2KbfOhbJ6dp6ZduR9LLz0rIaC0,3945
|
|
43
44
|
petpal/preproc/partial_volume_corrections.py,sha256=J06j_Y_lhj3b3b9M5FbB2r2EPWQvoymG3GRUffSlYdE,6799
|
|
44
|
-
petpal/preproc/regional_tac_extraction.py,sha256=
|
|
45
|
-
petpal/preproc/register.py,sha256=
|
|
46
|
-
petpal/preproc/segmentation_tools.py,sha256=
|
|
45
|
+
petpal/preproc/regional_tac_extraction.py,sha256=qQDD9Z9p21DVUKokh_en2chOGP7F01wnDN156_74X8Q,19704
|
|
46
|
+
petpal/preproc/register.py,sha256=NKg8mt_XMGa5HBdxYZh3sMu_KMJ0W41VHlX4Zl8wlyE,14171
|
|
47
|
+
petpal/preproc/segmentation_tools.py,sha256=BUy8ij45mmetenvWzODVwNIThDkYiEtY6gTAqI8sIak,25703
|
|
48
|
+
petpal/preproc/standard_uptake_value.py,sha256=YJIt0fl3fwMLl0tRYHpPPprMTaN4Q5JjQ5dx_CQX1nI,7494
|
|
47
49
|
petpal/preproc/symmetric_geometric_transfer_matrix.py,sha256=ELkr7Mo233to1Rwml5YJ-aBvmTSk3LHNSdRhnX0WBDw,17575
|
|
48
50
|
petpal/utils/__init__.py,sha256=PlxBIKUtNvtSFnNZqz8myszOysaYzS8nSILMK4haVGg,412
|
|
49
51
|
petpal/utils/bids_utils.py,sha256=3eZAzwGpOBUQ5ShVBUJJpmUBUhr3VcOikR-KpGCsdE0,7664
|
|
@@ -54,17 +56,17 @@ petpal/utils/image_io.py,sha256=2Dj2U-OaGL15lwt8KVyl_RZZZdRYdvzUahQWegO_JrA,1799
|
|
|
54
56
|
petpal/utils/math_lib.py,sha256=DYt80lB1je-wFqHGuzOwfTOM1tPugomB889xBUdr99U,4725
|
|
55
57
|
petpal/utils/metadata.py,sha256=O9exRDlqAmPAEcO9v7dsqzkYcSVLgRA207owEvNXXJ8,6129
|
|
56
58
|
petpal/utils/scan_timing.py,sha256=j7i66Nt0Qs3fnQLUaycLHQTfVsNOqS7YS-xUJKm7nYk,11795
|
|
57
|
-
petpal/utils/stats.py,sha256=
|
|
59
|
+
petpal/utils/stats.py,sha256=paFdwVPIjlAi0wh5xU4x5WeydjKsEHuwzMLcDG_WzPc,6449
|
|
58
60
|
petpal/utils/testing_utils.py,sha256=eMt1kklxK3rl8tm74I3yVNDotKh1CnYWLINDT7rzboM,9557
|
|
59
61
|
petpal/utils/time_activity_curve.py,sha256=gX3PDYbeWblycvtvyiuFtnv1mBml_-93sIXKh2EmglM,39137
|
|
60
|
-
petpal/utils/useful_functions.py,sha256=
|
|
62
|
+
petpal/utils/useful_functions.py,sha256=md2kTLbs45MhrjdMhvDYcbflPTRNPspRSIHiOeIxEqY,21361
|
|
61
63
|
petpal/visualizations/__init__.py,sha256=bd0NHDVl6Z2BDhisEcob2iIcqfxUfgKJ4DEmlrXJRP4,205
|
|
62
64
|
petpal/visualizations/graphical_plots.py,sha256=ZCKUeLX2TAQscuHjA4bzlFm1bACHIyCwDuNnjCakVWU,47297
|
|
63
65
|
petpal/visualizations/image_visualization.py,sha256=Ob6TD4Q0pIrxi0m9SznK1TRWbX1Ea9Pt4wNMdRrTfTs,9124
|
|
64
66
|
petpal/visualizations/qc_plots.py,sha256=iaCPe-LWWyM3OZzDPZodHZhP-z5fRdpUgaH7QS9VxPM,1243
|
|
65
67
|
petpal/visualizations/tac_plots.py,sha256=zSGdptL-EnqhfDViAX8LFunln5a1b-NJ5ft7ZDcxQ38,15116
|
|
66
|
-
petpal-0.5.
|
|
67
|
-
petpal-0.5.
|
|
68
|
-
petpal-0.5.
|
|
69
|
-
petpal-0.5.
|
|
70
|
-
petpal-0.5.
|
|
68
|
+
petpal-0.5.7.dist-info/METADATA,sha256=9fNNCPhSznboKgTX9xPVDIDTJXquMiPoh66Bq-PN9lg,2617
|
|
69
|
+
petpal-0.5.7.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
|
|
70
|
+
petpal-0.5.7.dist-info/entry_points.txt,sha256=0SZmyXqBxKzQg2eerDA16n2BdUEXyixEm0_AUo2dFns,653
|
|
71
|
+
petpal-0.5.7.dist-info/licenses/LICENSE,sha256=OXLcl0T2SZ8Pmy2_dmlvKuetivmyPd5m1q-Gyd-zaYY,35149
|
|
72
|
+
petpal-0.5.7.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|