rapidtide 3.0.10__py3-none-any.whl → 3.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- rapidtide/Colortables.py +492 -27
- rapidtide/OrthoImageItem.py +1053 -47
- rapidtide/RapidtideDataset.py +1533 -86
- rapidtide/_version.py +3 -3
- rapidtide/calccoherence.py +196 -29
- rapidtide/calcnullsimfunc.py +191 -40
- rapidtide/calcsimfunc.py +245 -42
- rapidtide/correlate.py +1210 -393
- rapidtide/data/examples/src/testLD +56 -0
- rapidtide/data/examples/src/testalign +1 -1
- rapidtide/data/examples/src/testdelayvar +0 -1
- rapidtide/data/examples/src/testfmri +19 -1
- rapidtide/data/examples/src/testglmfilt +5 -5
- rapidtide/data/examples/src/testhappy +30 -1
- rapidtide/data/examples/src/testppgproc +17 -0
- rapidtide/data/examples/src/testrolloff +11 -0
- rapidtide/data/models/model_cnn_pytorch/best_model.pth +0 -0
- rapidtide/data/models/model_cnn_pytorch/loss.png +0 -0
- rapidtide/data/models/model_cnn_pytorch/loss.txt +1 -0
- rapidtide/data/models/model_cnn_pytorch/model.pth +0 -0
- rapidtide/data/models/model_cnn_pytorch/model_meta.json +68 -0
- rapidtide/data/reference/JHU-ArterialTerritoriesNoVent-LVL1_space-MNI152NLin2009cAsym_2mm.nii.gz +0 -0
- rapidtide/data/reference/JHU-ArterialTerritoriesNoVent-LVL1_space-MNI152NLin2009cAsym_2mm_mask.nii.gz +0 -0
- rapidtide/decorators.py +91 -0
- rapidtide/dlfilter.py +2225 -108
- rapidtide/dlfiltertorch.py +4843 -0
- rapidtide/externaltools.py +327 -12
- rapidtide/fMRIData_class.py +79 -40
- rapidtide/filter.py +1899 -810
- rapidtide/fit.py +2004 -574
- rapidtide/genericmultiproc.py +93 -18
- rapidtide/happy_supportfuncs.py +2044 -171
- rapidtide/helper_classes.py +584 -43
- rapidtide/io.py +2363 -370
- rapidtide/linfitfiltpass.py +341 -75
- rapidtide/makelaggedtcs.py +211 -20
- rapidtide/maskutil.py +423 -53
- rapidtide/miscmath.py +827 -121
- rapidtide/multiproc.py +210 -22
- rapidtide/patchmatch.py +234 -33
- rapidtide/peakeval.py +32 -30
- rapidtide/ppgproc.py +2203 -0
- rapidtide/qualitycheck.py +352 -39
- rapidtide/refinedelay.py +422 -57
- rapidtide/refineregressor.py +498 -184
- rapidtide/resample.py +671 -185
- rapidtide/scripts/applyppgproc.py +28 -0
- rapidtide/simFuncClasses.py +1052 -77
- rapidtide/simfuncfit.py +260 -46
- rapidtide/stats.py +540 -238
- rapidtide/tests/happycomp +9 -0
- rapidtide/tests/test_dlfiltertorch.py +627 -0
- rapidtide/tests/test_findmaxlag.py +24 -8
- rapidtide/tests/test_fullrunhappy_v1.py +0 -2
- rapidtide/tests/test_fullrunhappy_v2.py +0 -2
- rapidtide/tests/test_fullrunhappy_v3.py +1 -0
- rapidtide/tests/test_fullrunhappy_v4.py +2 -2
- rapidtide/tests/test_fullrunrapidtide_v7.py +1 -1
- rapidtide/tests/test_simroundtrip.py +8 -8
- rapidtide/tests/utils.py +9 -8
- rapidtide/tidepoolTemplate.py +142 -38
- rapidtide/tidepoolTemplate_alt.py +165 -44
- rapidtide/tidepoolTemplate_big.py +189 -52
- rapidtide/util.py +1217 -118
- rapidtide/voxelData.py +684 -37
- rapidtide/wiener.py +19 -12
- rapidtide/wiener2.py +113 -7
- rapidtide/wiener_doc.py +255 -0
- rapidtide/workflows/adjustoffset.py +105 -3
- rapidtide/workflows/aligntcs.py +85 -2
- rapidtide/workflows/applydlfilter.py +87 -10
- rapidtide/workflows/applyppgproc.py +522 -0
- rapidtide/workflows/atlasaverage.py +210 -47
- rapidtide/workflows/atlastool.py +100 -3
- rapidtide/workflows/calcSimFuncMap.py +294 -64
- rapidtide/workflows/calctexticc.py +201 -9
- rapidtide/workflows/ccorrica.py +97 -4
- rapidtide/workflows/cleanregressor.py +168 -29
- rapidtide/workflows/delayvar.py +163 -10
- rapidtide/workflows/diffrois.py +81 -3
- rapidtide/workflows/endtidalproc.py +144 -4
- rapidtide/workflows/fdica.py +195 -15
- rapidtide/workflows/filtnifti.py +70 -3
- rapidtide/workflows/filttc.py +74 -3
- rapidtide/workflows/fitSimFuncMap.py +206 -48
- rapidtide/workflows/fixtr.py +73 -3
- rapidtide/workflows/gmscalc.py +113 -3
- rapidtide/workflows/happy.py +813 -201
- rapidtide/workflows/happy2std.py +144 -12
- rapidtide/workflows/happy_parser.py +149 -8
- rapidtide/workflows/histnifti.py +118 -2
- rapidtide/workflows/histtc.py +84 -3
- rapidtide/workflows/linfitfilt.py +117 -4
- rapidtide/workflows/localflow.py +328 -28
- rapidtide/workflows/mergequality.py +79 -3
- rapidtide/workflows/niftidecomp.py +322 -18
- rapidtide/workflows/niftistats.py +174 -4
- rapidtide/workflows/pairproc.py +88 -2
- rapidtide/workflows/pairwisemergenifti.py +85 -2
- rapidtide/workflows/parser_funcs.py +1421 -40
- rapidtide/workflows/physiofreq.py +137 -11
- rapidtide/workflows/pixelcomp.py +208 -5
- rapidtide/workflows/plethquality.py +103 -21
- rapidtide/workflows/polyfitim.py +151 -11
- rapidtide/workflows/proj2flow.py +75 -2
- rapidtide/workflows/rankimage.py +111 -4
- rapidtide/workflows/rapidtide.py +272 -15
- rapidtide/workflows/rapidtide2std.py +98 -2
- rapidtide/workflows/rapidtide_parser.py +109 -9
- rapidtide/workflows/refineDelayMap.py +143 -33
- rapidtide/workflows/refineRegressor.py +682 -93
- rapidtide/workflows/regressfrommaps.py +152 -31
- rapidtide/workflows/resamplenifti.py +85 -3
- rapidtide/workflows/resampletc.py +91 -3
- rapidtide/workflows/retrolagtcs.py +98 -6
- rapidtide/workflows/retroregress.py +165 -9
- rapidtide/workflows/roisummarize.py +173 -5
- rapidtide/workflows/runqualitycheck.py +71 -3
- rapidtide/workflows/showarbcorr.py +147 -4
- rapidtide/workflows/showhist.py +86 -2
- rapidtide/workflows/showstxcorr.py +160 -3
- rapidtide/workflows/showtc.py +159 -3
- rapidtide/workflows/showxcorrx.py +184 -4
- rapidtide/workflows/showxy.py +185 -15
- rapidtide/workflows/simdata.py +262 -36
- rapidtide/workflows/spatialfit.py +77 -2
- rapidtide/workflows/spatialmi.py +251 -27
- rapidtide/workflows/spectrogram.py +305 -32
- rapidtide/workflows/synthASL.py +154 -3
- rapidtide/workflows/tcfrom2col.py +76 -2
- rapidtide/workflows/tcfrom3col.py +74 -2
- rapidtide/workflows/tidepool.py +2972 -133
- rapidtide/workflows/utils.py +19 -14
- rapidtide/workflows/utils_doc.py +293 -0
- rapidtide/workflows/variabilityizer.py +116 -3
- {rapidtide-3.0.10.dist-info → rapidtide-3.1.dist-info}/METADATA +10 -9
- {rapidtide-3.0.10.dist-info → rapidtide-3.1.dist-info}/RECORD +141 -122
- {rapidtide-3.0.10.dist-info → rapidtide-3.1.dist-info}/entry_points.txt +1 -0
- {rapidtide-3.0.10.dist-info → rapidtide-3.1.dist-info}/WHEEL +0 -0
- {rapidtide-3.0.10.dist-info → rapidtide-3.1.dist-info}/licenses/LICENSE +0 -0
- {rapidtide-3.0.10.dist-info → rapidtide-3.1.dist-info}/top_level.txt +0 -0
|
@@ -18,17 +18,43 @@
|
|
|
18
18
|
#
|
|
19
19
|
import argparse
|
|
20
20
|
import sys
|
|
21
|
+
from argparse import Namespace
|
|
22
|
+
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
|
|
21
23
|
|
|
22
24
|
import numpy as np
|
|
25
|
+
from numpy.typing import NDArray
|
|
23
26
|
|
|
24
27
|
import rapidtide.fit as tide_fit
|
|
25
28
|
import rapidtide.io as tide_io
|
|
26
29
|
from rapidtide.workflows.parser_funcs import is_valid_file
|
|
27
30
|
|
|
28
31
|
|
|
29
|
-
def _get_parser():
|
|
32
|
+
def _get_parser() -> Any:
|
|
30
33
|
"""
|
|
31
|
-
Argument parser for linfitfilt
|
|
34
|
+
Argument parser for linfitfilt.
|
|
35
|
+
|
|
36
|
+
This function constructs and returns an `argparse.ArgumentParser` object configured
|
|
37
|
+
for the `linfitfilt` command-line tool. It defines all required and optional arguments
|
|
38
|
+
needed to specify input files, regression parameters, and output options.
|
|
39
|
+
|
|
40
|
+
Returns
|
|
41
|
+
-------
|
|
42
|
+
argparse.ArgumentParser
|
|
43
|
+
Configured argument parser for the linfitfilt tool.
|
|
44
|
+
|
|
45
|
+
Notes
|
|
46
|
+
-----
|
|
47
|
+
The parser is designed to handle:
|
|
48
|
+
- Input NIfTI files (3D or 4D)
|
|
49
|
+
- EV (event-related) files for regression
|
|
50
|
+
- Data masking options
|
|
51
|
+
- Timecourse skipping for fitting
|
|
52
|
+
- Output filtering and saving options
|
|
53
|
+
|
|
54
|
+
Examples
|
|
55
|
+
--------
|
|
56
|
+
>>> parser = _get_parser()
|
|
57
|
+
>>> args = parser.parse_args()
|
|
32
58
|
"""
|
|
33
59
|
parser = argparse.ArgumentParser(
|
|
34
60
|
prog="linfitfilt",
|
|
@@ -76,7 +102,67 @@ def _get_parser():
|
|
|
76
102
|
return parser
|
|
77
103
|
|
|
78
104
|
|
|
79
|
-
def linfitfilt(
|
|
105
|
+
def linfitfilt(
|
|
106
|
+
inputfile: Any,
|
|
107
|
+
numskip: Any,
|
|
108
|
+
outputroot: Any,
|
|
109
|
+
evfilename: Any,
|
|
110
|
+
datamaskname: Any,
|
|
111
|
+
saveall: bool = True,
|
|
112
|
+
) -> None:
|
|
113
|
+
"""
|
|
114
|
+
Perform linear regression fitting on 4D NIfTI data using specified regressors.
|
|
115
|
+
|
|
116
|
+
This function fits a linear model to each voxel's time series using the provided
|
|
117
|
+
regressors and saves the resulting fit coefficients, R-squared values, and filtered
|
|
118
|
+
data. It supports both NIfTI and text-based regressor files, as well as FSL par files
|
|
119
|
+
for global regressors.
|
|
120
|
+
|
|
121
|
+
Parameters
|
|
122
|
+
----------
|
|
123
|
+
inputfile : str or pathlib.Path
|
|
124
|
+
Path to the input 4D NIfTI file containing the time series data.
|
|
125
|
+
numskip : int
|
|
126
|
+
Number of initial time points to skip when fitting the model.
|
|
127
|
+
outputroot : str
|
|
128
|
+
Root name for output NIfTI files (e.g., '_mean', '_fit0', '_R2', etc.).
|
|
129
|
+
evfilename : list of str or pathlib.Path
|
|
130
|
+
List of paths to files containing regressor time series. Can be NIfTI files,
|
|
131
|
+
text files, or FSL par files.
|
|
132
|
+
datamaskname : str or pathlib.Path, optional
|
|
133
|
+
Path to a NIfTI file defining the mask for data analysis. If None, a full mask
|
|
134
|
+
is assumed.
|
|
135
|
+
saveall : bool, optional
|
|
136
|
+
If True, save all intermediate and final output files. Default is True.
|
|
137
|
+
|
|
138
|
+
Returns
|
|
139
|
+
-------
|
|
140
|
+
None
|
|
141
|
+
This function does not return a value but saves multiple NIfTI files to disk.
|
|
142
|
+
|
|
143
|
+
Notes
|
|
144
|
+
-----
|
|
145
|
+
- The function performs ordinary least squares (OLS) regression for each voxel.
|
|
146
|
+
- The regressors are applied to the data after skipping the first `numskip` time points.
|
|
147
|
+
- Output files include:
|
|
148
|
+
- `<outputroot>_mean.nii.gz`: Mean of the data.
|
|
149
|
+
- `<outputroot>_fit{j}.nii.gz`: Fitted coefficients for regressor j.
|
|
150
|
+
- `<outputroot>_R2.nii.gz`: R-squared values for the model.
|
|
151
|
+
- `<outputroot>_totaltoremove.nii.gz`: Regressor contributions to be removed.
|
|
152
|
+
- `<outputroot>_trimmed.nii.gz`: Trimmed input data.
|
|
153
|
+
- `<outputroot>_filtered.nii.gz`: Final filtered data.
|
|
154
|
+
|
|
155
|
+
Examples
|
|
156
|
+
--------
|
|
157
|
+
>>> linfitfilt(
|
|
158
|
+
... inputfile='data.nii.gz',
|
|
159
|
+
... numskip=5,
|
|
160
|
+
... outputroot='output',
|
|
161
|
+
... evfilename=['regressor1.txt', 'regressor2.nii.gz'],
|
|
162
|
+
... datamaskname='mask.nii.gz',
|
|
163
|
+
... saveall=True
|
|
164
|
+
... )
|
|
165
|
+
"""
|
|
80
166
|
# initialize some variables
|
|
81
167
|
evdata = []
|
|
82
168
|
evisnifti = []
|
|
@@ -238,7 +324,34 @@ def linfitfilt(inputfile, numskip, outputroot, evfilename, datamaskname, saveall
|
|
|
238
324
|
tide_io.savetonifti(filtereddata, theheader, outputroot + "_filtered")
|
|
239
325
|
|
|
240
326
|
|
|
241
|
-
def main():
|
|
327
|
+
def main() -> None:
|
|
328
|
+
"""
|
|
329
|
+
Main function to execute the linear fit and filter pipeline.
|
|
330
|
+
|
|
331
|
+
This function parses command line arguments and executes the linear fit and
|
|
332
|
+
filter process on the input data file. It handles argument parsing exceptions
|
|
333
|
+
and provides help output when needed.
|
|
334
|
+
|
|
335
|
+
Parameters
|
|
336
|
+
----------
|
|
337
|
+
None
|
|
338
|
+
|
|
339
|
+
Returns
|
|
340
|
+
-------
|
|
341
|
+
None
|
|
342
|
+
This function does not return any value.
|
|
343
|
+
|
|
344
|
+
Notes
|
|
345
|
+
-----
|
|
346
|
+
The function relies on `_get_parser()` to create and return an argument parser
|
|
347
|
+
with the necessary command line options. The parsed arguments are then passed
|
|
348
|
+
to the `linfitfilt` function for processing.
|
|
349
|
+
|
|
350
|
+
Examples
|
|
351
|
+
--------
|
|
352
|
+
>>> main()
|
|
353
|
+
# Executes the linear fit and filter pipeline with arguments from command line
|
|
354
|
+
"""
|
|
242
355
|
try:
|
|
243
356
|
args = vars(_get_parser().parse_args())
|
|
244
357
|
except SystemExit:
|
rapidtide/workflows/localflow.py
CHANGED
|
@@ -19,8 +19,11 @@
|
|
|
19
19
|
import argparse
|
|
20
20
|
import copy
|
|
21
21
|
import time
|
|
22
|
+
from argparse import Namespace
|
|
23
|
+
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
|
|
22
24
|
|
|
23
25
|
import numpy as np
|
|
26
|
+
from numpy.typing import NDArray
|
|
24
27
|
from tqdm import tqdm
|
|
25
28
|
|
|
26
29
|
import rapidtide.correlate as tide_corr
|
|
@@ -41,7 +44,36 @@ DEFAULT_AMPTHRESH = 0.3
|
|
|
41
44
|
DEFAULT_MINLAGDIFF = 0.0
|
|
42
45
|
|
|
43
46
|
|
|
44
|
-
def _get_parser():
|
|
47
|
+
def _get_parser() -> Any:
|
|
48
|
+
"""
|
|
49
|
+
Create and configure an argument parser for the localflow command-line tool.
|
|
50
|
+
|
|
51
|
+
This function sets up an `argparse.ArgumentParser` with a set of predefined
|
|
52
|
+
command-line arguments used to control the behavior of the local flow analysis
|
|
53
|
+
pipeline. It includes options for input/output file handling, reconstruction
|
|
54
|
+
parameters, filtering, windowing, and debugging.
|
|
55
|
+
|
|
56
|
+
Returns
|
|
57
|
+
-------
|
|
58
|
+
argparse.ArgumentParser
|
|
59
|
+
Configured argument parser with all required and optional arguments.
|
|
60
|
+
|
|
61
|
+
Notes
|
|
62
|
+
-----
|
|
63
|
+
The parser includes the following argument groups:
|
|
64
|
+
- Required positional arguments: `inputfilename` and `outputroot`
|
|
65
|
+
- Optional arguments for reconstruction parameters such as `npasses`, `radius`,
|
|
66
|
+
`minlagdiff`, `ampthresh`, `gausssigma`, `oversampfac`, `dofit`, `detrendorder`,
|
|
67
|
+
and `nosphere`
|
|
68
|
+
- Miscellaneous options including `noprogressbar` and `debug`
|
|
69
|
+
|
|
70
|
+
Examples
|
|
71
|
+
--------
|
|
72
|
+
>>> parser = _get_parser()
|
|
73
|
+
>>> args = parser.parse_args()
|
|
74
|
+
>>> print(args.inputfilename)
|
|
75
|
+
'input.nii.gz'
|
|
76
|
+
"""
|
|
45
77
|
# get the command line parameters
|
|
46
78
|
parser = argparse.ArgumentParser(
|
|
47
79
|
prog="localflow",
|
|
@@ -149,17 +181,79 @@ def _get_parser():
|
|
|
149
181
|
|
|
150
182
|
|
|
151
183
|
def preprocdata(
|
|
152
|
-
fmridata,
|
|
153
|
-
themask,
|
|
154
|
-
theprefilter,
|
|
155
|
-
oversamplefactor,
|
|
156
|
-
Fs,
|
|
157
|
-
tr,
|
|
158
|
-
detrendorder=3,
|
|
159
|
-
windowfunc="hamming",
|
|
160
|
-
padseconds=0,
|
|
161
|
-
showprogressbar=True,
|
|
162
|
-
):
|
|
184
|
+
fmridata: Any,
|
|
185
|
+
themask: Any,
|
|
186
|
+
theprefilter: Any,
|
|
187
|
+
oversamplefactor: Any,
|
|
188
|
+
Fs: Any,
|
|
189
|
+
tr: Any,
|
|
190
|
+
detrendorder: int = 3,
|
|
191
|
+
windowfunc: str = "hamming",
|
|
192
|
+
padseconds: int = 0,
|
|
193
|
+
showprogressbar: bool = True,
|
|
194
|
+
) -> None:
|
|
195
|
+
"""
|
|
196
|
+
Preprocess fMRI data by resampling, filtering, and normalizing voxel time series.
|
|
197
|
+
|
|
198
|
+
This function applies a series of preprocessing steps to fMRI data including:
|
|
199
|
+
resampling to a higher temporal resolution, applying a filter, and detrending
|
|
200
|
+
with correlation normalization. It processes each voxel individually based on
|
|
201
|
+
a provided mask.
|
|
202
|
+
|
|
203
|
+
Parameters
|
|
204
|
+
----------
|
|
205
|
+
fmridata : array-like
|
|
206
|
+
4D fMRI data array with shape (nx, ny, nz, nt), where nx, ny, nz are spatial
|
|
207
|
+
dimensions and nt is the number of time points.
|
|
208
|
+
themask : array-like
|
|
209
|
+
3D binary mask array with the same spatial dimensions as `fmridata`. Voxels
|
|
210
|
+
with values > 0 are processed.
|
|
211
|
+
theprefilter : object
|
|
212
|
+
A filter object with an `apply` method that applies a temporal filter to the data.
|
|
213
|
+
oversamplefactor : float
|
|
214
|
+
Factor by which to oversample the data. Must be a positive number.
|
|
215
|
+
Fs : float
|
|
216
|
+
Sampling frequency of the original fMRI data in Hz.
|
|
217
|
+
tr : float
|
|
218
|
+
Repetition time (TR) of the fMRI acquisition in seconds.
|
|
219
|
+
detrendorder : int, optional
|
|
220
|
+
Order of the polynomial used for detrending. Default is 3.
|
|
221
|
+
windowfunc : str, optional
|
|
222
|
+
Window function used for correlation normalization. Default is "hamming".
|
|
223
|
+
padseconds : int, optional
|
|
224
|
+
Number of seconds to pad the resampled signal. Default is 0.
|
|
225
|
+
showprogressbar : bool, optional
|
|
226
|
+
Whether to display a progress bar during voxel processing. Default is True.
|
|
227
|
+
|
|
228
|
+
Returns
|
|
229
|
+
-------
|
|
230
|
+
tuple
|
|
231
|
+
A tuple containing:
|
|
232
|
+
- osfmridata_byvox : ndarray
|
|
233
|
+
Resampled and filtered fMRI data for processed voxels, shape (numvoxels, ostimepoints).
|
|
234
|
+
- ostimepoints : int
|
|
235
|
+
Number of time points in the oversampled data.
|
|
236
|
+
- oversamptr : float
|
|
237
|
+
Oversampled repetition time.
|
|
238
|
+
- numvoxelsprocessed : int
|
|
239
|
+
Total number of voxels processed.
|
|
240
|
+
|
|
241
|
+
Notes
|
|
242
|
+
-----
|
|
243
|
+
This function modifies the input data in-place during processing. The output includes
|
|
244
|
+
only the voxels that are marked as active in `themask`.
|
|
245
|
+
|
|
246
|
+
Examples
|
|
247
|
+
--------
|
|
248
|
+
>>> import numpy as np
|
|
249
|
+
>>> from some_module import preprocdata
|
|
250
|
+
>>> fmri_data = np.random.rand(64, 64, 32, 100)
|
|
251
|
+
>>> mask = np.ones((64, 64, 32))
|
|
252
|
+
>>> filter_obj = SomeFilter()
|
|
253
|
+
>>> result = preprocdata(
|
|
254
|
+
... fmri_data, mask, filter_obj, oversamplefactor=2.0, Fs=2.0, tr=2.0
|
|
255
|
+
... )
|
|
256
|
+
"""
|
|
163
257
|
numspatiallocs = fmridata.shape[0] * fmridata.shape[1] * fmridata.shape[2]
|
|
164
258
|
timepoints = fmridata.shape[3]
|
|
165
259
|
|
|
@@ -203,19 +297,75 @@ def preprocdata(
|
|
|
203
297
|
|
|
204
298
|
|
|
205
299
|
def getcorrloc(
|
|
206
|
-
thedata,
|
|
207
|
-
idx1,
|
|
208
|
-
idx2,
|
|
209
|
-
Fs,
|
|
210
|
-
dofit=False,
|
|
211
|
-
lagmin
|
|
212
|
-
lagmax=12.5,
|
|
213
|
-
widthmax=100.0,
|
|
214
|
-
negsearch=15.0,
|
|
215
|
-
possearch=15.0,
|
|
216
|
-
padding=0,
|
|
217
|
-
debug=False,
|
|
218
|
-
):
|
|
300
|
+
thedata: Any,
|
|
301
|
+
idx1: Any,
|
|
302
|
+
idx2: Any,
|
|
303
|
+
Fs: Any,
|
|
304
|
+
dofit: bool = False,
|
|
305
|
+
lagmin: float = -12.5,
|
|
306
|
+
lagmax: float = 12.5,
|
|
307
|
+
widthmax: float = 100.0,
|
|
308
|
+
negsearch: float = 15.0,
|
|
309
|
+
possearch: float = 15.0,
|
|
310
|
+
padding: int = 0,
|
|
311
|
+
debug: bool = False,
|
|
312
|
+
) -> None:
|
|
313
|
+
"""
|
|
314
|
+
Compute the cross-correlation peak between two time series and optionally fit it.
|
|
315
|
+
|
|
316
|
+
This function computes the cross-correlation between two time series selected
|
|
317
|
+
from `thedata` using indices `idx1` and `idx2`. It returns the maximum correlation
|
|
318
|
+
value, the corresponding time lag, a mask indicating success, and a failure reason.
|
|
319
|
+
|
|
320
|
+
Parameters
|
|
321
|
+
----------
|
|
322
|
+
thedata : array_like
|
|
323
|
+
Input data array of shape (n_channels, n_samples).
|
|
324
|
+
idx1 : int or array_like
|
|
325
|
+
Index or indices of the first time series in `thedata`.
|
|
326
|
+
idx2 : int or array_like
|
|
327
|
+
Index or indices of the second time series in `thedata`.
|
|
328
|
+
Fs : float
|
|
329
|
+
Sampling frequency of the data.
|
|
330
|
+
dofit : bool, optional
|
|
331
|
+
If True, perform a peak fit on the cross-correlation function. Default is False.
|
|
332
|
+
lagmin : float, optional
|
|
333
|
+
Minimum lag to consider in seconds. Default is -12.5.
|
|
334
|
+
lagmax : float, optional
|
|
335
|
+
Maximum lag to consider in seconds. Default is 12.5.
|
|
336
|
+
widthmax : float, optional
|
|
337
|
+
Maximum width for fitting. Default is 100.0.
|
|
338
|
+
negsearch : float, optional
|
|
339
|
+
Search range for negative lags in seconds. Default is 15.0.
|
|
340
|
+
possearch : float, optional
|
|
341
|
+
Search range for positive lags in seconds. Default is 15.0.
|
|
342
|
+
padding : int, optional
|
|
343
|
+
Zero-padding for FFT-based correlation. Default is 0.
|
|
344
|
+
debug : bool, optional
|
|
345
|
+
If True, print debug information. Default is False.
|
|
346
|
+
|
|
347
|
+
Returns
|
|
348
|
+
-------
|
|
349
|
+
tuple
|
|
350
|
+
A tuple of (maxcorr, maxtime, maskval, failreason) where:
|
|
351
|
+
- maxcorr: Maximum correlation value.
|
|
352
|
+
- maxtime: Time lag corresponding to maxcorr in seconds.
|
|
353
|
+
- maskval: Mask indicating fit success (1 = success, 0 = failure).
|
|
354
|
+
- failreason: Numeric code indicating reason for fit failure (0 = no failure).
|
|
355
|
+
|
|
356
|
+
Notes
|
|
357
|
+
-----
|
|
358
|
+
- If either time series contains all zeros, the function returns (0.0, 0.0, 0, 0).
|
|
359
|
+
- The function uses `tide_corr.fastcorrelate` for correlation and `tide_fit.simfuncpeakfit`
|
|
360
|
+
for fitting when `dofit=True`.
|
|
361
|
+
|
|
362
|
+
Examples
|
|
363
|
+
--------
|
|
364
|
+
>>> import numpy as np
|
|
365
|
+
>>> data = np.random.rand(10, 1000)
|
|
366
|
+
>>> corr, time, mask, reason = getcorrloc(data, 0, 1, Fs=100, dofit=True)
|
|
367
|
+
>>> print(f"Correlation: {corr}, Lag: {time}s")
|
|
368
|
+
"""
|
|
219
369
|
tc1 = thedata[idx1, :]
|
|
220
370
|
tc2 = thedata[idx2, :]
|
|
221
371
|
if np.any(tc1) != 0.0 and np.any(tc2) != 0.0:
|
|
@@ -299,14 +449,90 @@ def getcorrloc(
|
|
|
299
449
|
return 0.0, 0.0, 0, 0
|
|
300
450
|
|
|
301
451
|
|
|
302
|
-
def xyz2index(x, y, z, xsize, ysize, zsize):
|
|
452
|
+
def xyz2index(x: Any, y: Any, z: Any, xsize: Any, ysize: Any, zsize: Any) -> None:
|
|
453
|
+
"""
|
|
454
|
+
Convert 3D coordinates to a linear index for a 3D array.
|
|
455
|
+
|
|
456
|
+
This function maps 3D coordinates (x, y, z) to a linear index assuming
|
|
457
|
+
row-major order storage of a 3D array with dimensions (xsize, ysize, zsize).
|
|
458
|
+
|
|
459
|
+
Parameters
|
|
460
|
+
----------
|
|
461
|
+
x : Any
|
|
462
|
+
X-coordinate, should be between 0 and xsize-1 inclusive
|
|
463
|
+
y : Any
|
|
464
|
+
Y-coordinate, should be between 0 and ysize-1 inclusive
|
|
465
|
+
z : Any
|
|
466
|
+
Z-coordinate, should be between 0 and zsize-1 inclusive
|
|
467
|
+
xsize : Any
|
|
468
|
+
Size of the array along the x-axis
|
|
469
|
+
ysize : Any
|
|
470
|
+
Size of the array along the y-axis
|
|
471
|
+
zsize : Any
|
|
472
|
+
Size of the array along the z-axis
|
|
473
|
+
|
|
474
|
+
Returns
|
|
475
|
+
-------
|
|
476
|
+
int
|
|
477
|
+
Linear index if coordinates are valid (within bounds), -1 otherwise
|
|
478
|
+
|
|
479
|
+
Notes
|
|
480
|
+
-----
|
|
481
|
+
The function uses row-major order indexing: index = z + y * zsize + x * zsize * ysize
|
|
482
|
+
|
|
483
|
+
Examples
|
|
484
|
+
--------
|
|
485
|
+
>>> xyz2index(1, 2, 3, 10, 10, 10)
|
|
486
|
+
321
|
|
487
|
+
>>> xyz2index(15, 2, 3, 10, 10, 10)
|
|
488
|
+
-1
|
|
489
|
+
"""
|
|
303
490
|
if (0 <= x < xsize) and (0 <= y < ysize) and (0 <= z < zsize):
|
|
304
491
|
return int(z) + int(y) * int(zsize) + int(x) * int(zsize * ysize)
|
|
305
492
|
else:
|
|
306
493
|
return -1
|
|
307
494
|
|
|
308
495
|
|
|
309
|
-
def index2xyz(theindex, ysize, zsize):
|
|
496
|
+
def index2xyz(theindex: Any, ysize: Any, zsize: Any) -> None:
|
|
497
|
+
"""
|
|
498
|
+
Convert a linear index to 3D coordinates (x, y, z).
|
|
499
|
+
|
|
500
|
+
This function maps a 1D index to 3D coordinates within a 3D grid
|
|
501
|
+
with dimensions determined by ysize and zsize. The conversion assumes
|
|
502
|
+
row-major ordering where the index is distributed across the three
|
|
503
|
+
dimensions based on the product of the grid dimensions.
|
|
504
|
+
|
|
505
|
+
Parameters
|
|
506
|
+
----------
|
|
507
|
+
theindex : Any
|
|
508
|
+
The linear index to be converted to 3D coordinates
|
|
509
|
+
ysize : Any
|
|
510
|
+
The size of the grid in the y dimension
|
|
511
|
+
zsize : Any
|
|
512
|
+
The size of the grid in the z dimension
|
|
513
|
+
|
|
514
|
+
Returns
|
|
515
|
+
-------
|
|
516
|
+
tuple
|
|
517
|
+
A tuple containing (x, y, z) coordinates corresponding to the input index
|
|
518
|
+
|
|
519
|
+
Notes
|
|
520
|
+
-----
|
|
521
|
+
The function assumes that the grid dimensions are such that the total
|
|
522
|
+
number of elements is sufficient to accommodate the given index.
|
|
523
|
+
The conversion follows the formula:
|
|
524
|
+
- x = index // (ysize * zsize)
|
|
525
|
+
- y = (index - x * ysize * zsize) // zsize
|
|
526
|
+
- z = index - x * ysize * zsize - y * zsize
|
|
527
|
+
|
|
528
|
+
Examples
|
|
529
|
+
--------
|
|
530
|
+
>>> index2xyz(10, 3, 4)
|
|
531
|
+
(0, 0, 10)
|
|
532
|
+
|
|
533
|
+
>>> index2xyz(25, 3, 4)
|
|
534
|
+
(2, 0, 1)
|
|
535
|
+
"""
|
|
310
536
|
x = theindex // int(zsize * ysize)
|
|
311
537
|
theindex -= int(x) * int(zsize * ysize)
|
|
312
538
|
y = theindex // int(zsize)
|
|
@@ -315,7 +541,81 @@ def index2xyz(theindex, ysize, zsize):
|
|
|
315
541
|
return x, y, z
|
|
316
542
|
|
|
317
543
|
|
|
318
|
-
def localflow(args):
|
|
544
|
+
def localflow(args: Any) -> None:
|
|
545
|
+
"""
|
|
546
|
+
Perform local flow analysis on fMRI data.
|
|
547
|
+
|
|
548
|
+
This function processes fMRI data to compute local correlation and delay information
|
|
549
|
+
across spatial neighbors, followed by a reconstruction step to estimate time delays
|
|
550
|
+
in the signal propagation.
|
|
551
|
+
|
|
552
|
+
Parameters
|
|
553
|
+
----------
|
|
554
|
+
args : Any
|
|
555
|
+
An object containing various arguments for processing, including:
|
|
556
|
+
- inputfilename : str
|
|
557
|
+
Path to the input NIfTI file.
|
|
558
|
+
- outputroot : str
|
|
559
|
+
Root name for output files.
|
|
560
|
+
- gausssigma : float
|
|
561
|
+
Sigma for Gaussian spatial smoothing. If less than 0, automatically computed.
|
|
562
|
+
- oversampfactor : int
|
|
563
|
+
Oversampling factor for preprocessing. If -1, computed automatically.
|
|
564
|
+
- detrendorder : int
|
|
565
|
+
Order of detrending to apply.
|
|
566
|
+
- windowfunc : str
|
|
567
|
+
Window function to use for preprocessing.
|
|
568
|
+
- padseconds : float
|
|
569
|
+
Padding in seconds for preprocessing.
|
|
570
|
+
- showprogressbar : bool
|
|
571
|
+
Whether to show progress bars.
|
|
572
|
+
- dofit : bool
|
|
573
|
+
Whether to fit the correlation.
|
|
574
|
+
- debug : bool
|
|
575
|
+
Whether to enable debug mode.
|
|
576
|
+
- radius : float
|
|
577
|
+
Neighborhood radius in mm.
|
|
578
|
+
- npasses : int
|
|
579
|
+
Number of reconstruction passes.
|
|
580
|
+
- ampthresh : float
|
|
581
|
+
Amplitude threshold for valid correlations.
|
|
582
|
+
|
|
583
|
+
Returns
|
|
584
|
+
-------
|
|
585
|
+
None
|
|
586
|
+
This function does not return a value but saves multiple NIfTI files and timing logs.
|
|
587
|
+
|
|
588
|
+
Notes
|
|
589
|
+
-----
|
|
590
|
+
The function performs the following steps:
|
|
591
|
+
1. Reads and preprocesses input fMRI data.
|
|
592
|
+
2. Applies spatial filtering if specified.
|
|
593
|
+
3. Prepares data for correlation analysis.
|
|
594
|
+
4. Identifies spatial neighbors within a specified radius.
|
|
595
|
+
5. Computes local correlations and delays.
|
|
596
|
+
6. Reconstructs time delays using iterative averaging.
|
|
597
|
+
7. Saves results as NIfTI files.
|
|
598
|
+
|
|
599
|
+
Examples
|
|
600
|
+
--------
|
|
601
|
+
>>> import argparse
|
|
602
|
+
>>> args = argparse.Namespace(
|
|
603
|
+
... inputfilename="fmri.nii.gz",
|
|
604
|
+
... outputroot="output",
|
|
605
|
+
... gausssigma=-1.0,
|
|
606
|
+
... oversampfactor=-1,
|
|
607
|
+
... detrendorder=1,
|
|
608
|
+
... windowfunc="hann",
|
|
609
|
+
... padseconds=10.0,
|
|
610
|
+
... showprogressbar=True,
|
|
611
|
+
... dofit=True,
|
|
612
|
+
... debug=False,
|
|
613
|
+
... radius=5.0,
|
|
614
|
+
... npasses=5,
|
|
615
|
+
... ampthresh=0.1
|
|
616
|
+
... )
|
|
617
|
+
>>> localflow(args)
|
|
618
|
+
"""
|
|
319
619
|
# set default variable values
|
|
320
620
|
displayplots = False
|
|
321
621
|
|
|
@@ -17,6 +17,8 @@
|
|
|
17
17
|
#
|
|
18
18
|
#
|
|
19
19
|
import argparse
|
|
20
|
+
from argparse import Namespace
|
|
21
|
+
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
|
|
20
22
|
|
|
21
23
|
import pandas as pd
|
|
22
24
|
|
|
@@ -24,9 +26,29 @@ import rapidtide.io as tide_io
|
|
|
24
26
|
import rapidtide.stats as tide_stats
|
|
25
27
|
|
|
26
28
|
|
|
27
|
-
def _get_parser():
|
|
29
|
+
def _get_parser() -> Any:
|
|
28
30
|
"""
|
|
29
|
-
Argument parser for mergequality
|
|
31
|
+
Argument parser for mergequality.
|
|
32
|
+
|
|
33
|
+
Creates and configures an argument parser for the mergequality tool that merges
|
|
34
|
+
rapidtide quality check data from multiple runs.
|
|
35
|
+
|
|
36
|
+
Returns
|
|
37
|
+
-------
|
|
38
|
+
argparse.ArgumentParser
|
|
39
|
+
Configured argument parser object with all required and optional arguments
|
|
40
|
+
for the mergequality tool.
|
|
41
|
+
|
|
42
|
+
Notes
|
|
43
|
+
-----
|
|
44
|
+
The returned parser is configured with:
|
|
45
|
+
- Required arguments: --input and --outputroot
|
|
46
|
+
- Optional arguments: --keyfile, --showhists, --addgraymetrics, --addwhitemetrics, --debug
|
|
47
|
+
|
|
48
|
+
Examples
|
|
49
|
+
--------
|
|
50
|
+
>>> parser = _get_parser()
|
|
51
|
+
>>> args = parser.parse_args(['--input', 'run1.csv', 'run2.csv', '--outputroot', 'merged'])
|
|
30
52
|
"""
|
|
31
53
|
parser = argparse.ArgumentParser(
|
|
32
54
|
prog="mergequality",
|
|
@@ -68,7 +90,61 @@ def _get_parser():
|
|
|
68
90
|
return parser
|
|
69
91
|
|
|
70
92
|
|
|
71
|
-
def mergequality(args):
|
|
93
|
+
def mergequality(args: Any) -> None:
|
|
94
|
+
"""
|
|
95
|
+
Merge quality metrics from multiple input JSON files into a single CSV file.
|
|
96
|
+
|
|
97
|
+
This function reads quality metrics from input JSON files and combines them
|
|
98
|
+
into a structured DataFrame. It supports optional inclusion of gray and white
|
|
99
|
+
matter specific metrics based on command-line arguments. Histograms for each
|
|
100
|
+
metric are also generated and saved.
|
|
101
|
+
|
|
102
|
+
Parameters
|
|
103
|
+
----------
|
|
104
|
+
args : Any
|
|
105
|
+
An object containing the following attributes:
|
|
106
|
+
- input : list of str
|
|
107
|
+
List of input JSON file paths to process.
|
|
108
|
+
- outputroot : str
|
|
109
|
+
Base name for output CSV and histogram files.
|
|
110
|
+
- keyfile : str, optional
|
|
111
|
+
Path to a JSON file containing key metrics definitions. If None,
|
|
112
|
+
default metrics are used.
|
|
113
|
+
- addgraymetrics : bool
|
|
114
|
+
Whether to include gray matter specific metrics.
|
|
115
|
+
- addwhitemetrics : bool
|
|
116
|
+
Whether to include white matter specific metrics.
|
|
117
|
+
- showhists : bool
|
|
118
|
+
Whether to display histograms.
|
|
119
|
+
- debug : bool
|
|
120
|
+
Whether to print debug information.
|
|
121
|
+
|
|
122
|
+
Returns
|
|
123
|
+
-------
|
|
124
|
+
None
|
|
125
|
+
This function does not return a value but saves a CSV file and histogram
|
|
126
|
+
plots to disk.
|
|
127
|
+
|
|
128
|
+
Notes
|
|
129
|
+
-----
|
|
130
|
+
The function uses `tide_io.readdictfromjson` to read input JSON files and
|
|
131
|
+
`tide_stats.makeandsavehistogram` to generate histograms. Default metrics
|
|
132
|
+
are included for mask, regressor, lag, laggrad, strength, and MTT.
|
|
133
|
+
Gray and white matter metrics are conditionally added based on `args`.
|
|
134
|
+
|
|
135
|
+
Examples
|
|
136
|
+
--------
|
|
137
|
+
>>> args = type('Args', (), {
|
|
138
|
+
... 'input': ['file1.json', 'file2.json'],
|
|
139
|
+
... 'outputroot': 'output',
|
|
140
|
+
... 'keyfile': None,
|
|
141
|
+
... 'addgraymetrics': True,
|
|
142
|
+
... 'addwhitemetrics': False,
|
|
143
|
+
... 'showhists': True,
|
|
144
|
+
... 'debug': False
|
|
145
|
+
... })()
|
|
146
|
+
>>> mergequality(args)
|
|
147
|
+
"""
|
|
72
148
|
if args.debug:
|
|
73
149
|
print(f"{args.input=}")
|
|
74
150
|
print(f"{args.outputroot=}")
|