rapidtide 3.1__py3-none-any.whl → 3.1.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- rapidtide/OrthoImageItem.py +4 -4
- rapidtide/_version.py +3 -3
- rapidtide/calccoherence.py +4 -4
- rapidtide/calcnullsimfunc.py +2 -5
- rapidtide/calcsimfunc.py +1 -4
- rapidtide/correlate.py +130 -127
- rapidtide/data/examples/src/testfmri +41 -9
- rapidtide/data/examples/src/testhappy +8 -8
- rapidtide/dlfilter.py +21 -22
- rapidtide/dlfiltertorch.py +18 -19
- rapidtide/filter.py +4 -4
- rapidtide/fit.py +18 -18
- rapidtide/happy_supportfuncs.py +84 -82
- rapidtide/helper_classes.py +2 -2
- rapidtide/io.py +88 -83
- rapidtide/linfitfiltpass.py +30 -49
- rapidtide/makelaggedtcs.py +11 -16
- rapidtide/maskutil.py +30 -14
- rapidtide/miscmath.py +2 -2
- rapidtide/patchmatch.py +10 -11
- rapidtide/peakeval.py +1 -3
- rapidtide/ppgproc.py +3 -3
- rapidtide/qualitycheck.py +2 -2
- rapidtide/refinedelay.py +12 -3
- rapidtide/refineregressor.py +20 -29
- rapidtide/scripts/showxcorr_legacy.py +7 -7
- rapidtide/scripts/stupidramtricks.py +15 -17
- rapidtide/simFuncClasses.py +2 -2
- rapidtide/simfuncfit.py +27 -41
- rapidtide/tests/test_cleanregressor.py +1 -2
- rapidtide/tests/test_fullrunhappy_v3.py +11 -5
- rapidtide/tests/test_fullrunhappy_v4.py +9 -1
- rapidtide/tests/test_getparsers.py +11 -3
- rapidtide/tests/test_refinedelay.py +0 -1
- rapidtide/tests/test_simroundtrip.py +8 -0
- rapidtide/tests/test_stcorrelate.py +3 -1
- rapidtide/util.py +6 -6
- rapidtide/voxelData.py +1 -1
- rapidtide/wiener.py +122 -16
- rapidtide/wiener2.py +3 -3
- rapidtide/workflows/applyppgproc.py +33 -15
- rapidtide/workflows/calcSimFuncMap.py +11 -22
- rapidtide/workflows/ccorrica.py +4 -2
- rapidtide/workflows/cleanregressor.py +6 -11
- rapidtide/workflows/delayvar.py +8 -13
- rapidtide/workflows/fitSimFuncMap.py +2 -9
- rapidtide/workflows/happy.py +6 -6
- rapidtide/workflows/happy_parser.py +36 -25
- rapidtide/workflows/pairproc.py +10 -2
- rapidtide/workflows/pixelcomp.py +1 -2
- rapidtide/workflows/rankimage.py +1 -1
- rapidtide/workflows/rapidtide.py +98 -63
- rapidtide/workflows/refineDelayMap.py +7 -6
- rapidtide/workflows/refineRegressor.py +6 -16
- rapidtide/workflows/regressfrommaps.py +9 -6
- rapidtide/workflows/retrolagtcs.py +5 -7
- rapidtide/workflows/retroregress.py +11 -17
- rapidtide/workflows/roisummarize.py +11 -10
- rapidtide/workflows/showarbcorr.py +2 -2
- rapidtide/workflows/showxcorrx.py +6 -6
- rapidtide/workflows/simdata.py +31 -31
- rapidtide/workflows/spatialmi.py +0 -1
- rapidtide/workflows/tidepool.py +6 -4
- {rapidtide-3.1.dist-info → rapidtide-3.1.1.dist-info}/METADATA +8 -7
- {rapidtide-3.1.dist-info → rapidtide-3.1.1.dist-info}/RECORD +69 -70
- rapidtide/wiener_doc.py +0 -255
- {rapidtide-3.1.dist-info → rapidtide-3.1.1.dist-info}/WHEEL +0 -0
- {rapidtide-3.1.dist-info → rapidtide-3.1.1.dist-info}/entry_points.txt +0 -0
- {rapidtide-3.1.dist-info → rapidtide-3.1.1.dist-info}/licenses/LICENSE +0 -0
- {rapidtide-3.1.dist-info → rapidtide-3.1.1.dist-info}/top_level.txt +0 -0
rapidtide/io.py
CHANGED
|
@@ -27,6 +27,7 @@ from typing import Any, Dict, List, Optional, Tuple, Union
|
|
|
27
27
|
import nibabel as nib
|
|
28
28
|
import numpy as np
|
|
29
29
|
import pandas as pd
|
|
30
|
+
from numpy.typing import NDArray
|
|
30
31
|
|
|
31
32
|
from rapidtide.tests.utils import mse
|
|
32
33
|
|
|
@@ -34,7 +35,7 @@ from rapidtide.tests.utils import mse
|
|
|
34
35
|
# ---------------------------------------- NIFTI file manipulation ---------------------------
|
|
35
36
|
def readfromnifti(
|
|
36
37
|
inputfile: str, headeronly: bool = False
|
|
37
|
-
) -> Tuple[Any, Optional[
|
|
38
|
+
) -> Tuple[Any, Optional[NDArray], Any, NDArray, NDArray]:
|
|
38
39
|
"""
|
|
39
40
|
Open a nifti file and read in the various important parts
|
|
40
41
|
|
|
@@ -93,7 +94,7 @@ def readfromnifti(
|
|
|
93
94
|
|
|
94
95
|
def readfromcifti(
|
|
95
96
|
inputfile: str, debug: bool = False
|
|
96
|
-
) -> Tuple[Any, Any,
|
|
97
|
+
) -> Tuple[Any, Any, NDArray, Any, NDArray, NDArray, Optional[float]]:
|
|
97
98
|
"""
|
|
98
99
|
Open a cifti file and read in the various important parts
|
|
99
100
|
|
|
@@ -200,7 +201,7 @@ def getciftitr(cifti_hdr: Any) -> Tuple[float, float]:
|
|
|
200
201
|
|
|
201
202
|
|
|
202
203
|
# dims are the array dimensions along each axis
|
|
203
|
-
def parseniftidims(thedims:
|
|
204
|
+
def parseniftidims(thedims: NDArray) -> Tuple[int, int, int, int]:
|
|
204
205
|
"""
|
|
205
206
|
Split the dims array into individual elements
|
|
206
207
|
|
|
@@ -209,7 +210,7 @@ def parseniftidims(thedims: np.ndarray) -> Tuple[int, int, int, int]:
|
|
|
209
210
|
|
|
210
211
|
Parameters
|
|
211
212
|
----------
|
|
212
|
-
thedims :
|
|
213
|
+
thedims : NDArray of int
|
|
213
214
|
The NIfTI dimensions structure, where:
|
|
214
215
|
- thedims[0] contains the data type
|
|
215
216
|
- thedims[1] contains the number of points along x-axis (nx)
|
|
@@ -246,7 +247,7 @@ def parseniftidims(thedims: np.ndarray) -> Tuple[int, int, int, int]:
|
|
|
246
247
|
|
|
247
248
|
|
|
248
249
|
# sizes are the mapping between voxels and physical coordinates
|
|
249
|
-
def parseniftisizes(thesizes:
|
|
250
|
+
def parseniftisizes(thesizes: NDArray) -> Tuple[float, float, float, float]:
|
|
250
251
|
"""
|
|
251
252
|
Split the size array into individual elements
|
|
252
253
|
|
|
@@ -255,7 +256,7 @@ def parseniftisizes(thesizes: np.ndarray) -> Tuple[float, float, float, float]:
|
|
|
255
256
|
|
|
256
257
|
Parameters
|
|
257
258
|
----------
|
|
258
|
-
thesizes :
|
|
259
|
+
thesizes : NDArray of float
|
|
259
260
|
The NIfTI voxel size structure containing scaling information.
|
|
260
261
|
Expected to be an array where indices 1-4 correspond to
|
|
261
262
|
x, y, z, and t scaling factors respectively.
|
|
@@ -291,7 +292,7 @@ def parseniftisizes(thesizes: np.ndarray) -> Tuple[float, float, float, float]:
|
|
|
291
292
|
return thesizes[1], thesizes[2], thesizes[3], thesizes[4]
|
|
292
293
|
|
|
293
294
|
|
|
294
|
-
def dumparraytonifti(thearray:
|
|
295
|
+
def dumparraytonifti(thearray: NDArray, filename: str) -> None:
|
|
295
296
|
"""
|
|
296
297
|
Save a numpy array to a NIFTI file with an identity affine transform.
|
|
297
298
|
|
|
@@ -301,7 +302,7 @@ def dumparraytonifti(thearray: np.ndarray, filename: str) -> None:
|
|
|
301
302
|
|
|
302
303
|
Parameters
|
|
303
304
|
----------
|
|
304
|
-
thearray :
|
|
305
|
+
thearray : NDArray
|
|
305
306
|
The data array to save. Can be 2D, 3D, or 4D array representing
|
|
306
307
|
medical imaging data or other volumetric data.
|
|
307
308
|
filename : str
|
|
@@ -338,7 +339,7 @@ def dumparraytonifti(thearray: np.ndarray, filename: str) -> None:
|
|
|
338
339
|
savetonifti(thearray, outputheader, filename)
|
|
339
340
|
|
|
340
341
|
|
|
341
|
-
def savetonifti(thearray:
|
|
342
|
+
def savetonifti(thearray: NDArray, theheader: Any, thename: str, debug: bool = False) -> None:
|
|
342
343
|
"""
|
|
343
344
|
Save a data array out to a nifti file
|
|
344
345
|
|
|
@@ -426,7 +427,7 @@ def savetonifti(thearray: np.ndarray, theheader: Any, thename: str, debug: bool
|
|
|
426
427
|
output_nifti = None
|
|
427
428
|
|
|
428
429
|
|
|
429
|
-
def niftifromarray(data:
|
|
430
|
+
def niftifromarray(data: NDArray) -> Any:
|
|
430
431
|
"""
|
|
431
432
|
Create a NIFTI image object from a numpy array with identity affine.
|
|
432
433
|
|
|
@@ -436,7 +437,7 @@ def niftifromarray(data: np.ndarray) -> Any:
|
|
|
436
437
|
|
|
437
438
|
Parameters
|
|
438
439
|
----------
|
|
439
|
-
data :
|
|
440
|
+
data : NDArray
|
|
440
441
|
The data array to convert to NIFTI format. Can be 2D, 3D, or 4D array
|
|
441
442
|
representing image data with arbitrary data types.
|
|
442
443
|
|
|
@@ -470,7 +471,7 @@ def niftifromarray(data: np.ndarray) -> Any:
|
|
|
470
471
|
return nib.Nifti1Image(data, affine=np.eye(4))
|
|
471
472
|
|
|
472
473
|
|
|
473
|
-
def niftihdrfromarray(data:
|
|
474
|
+
def niftihdrfromarray(data: NDArray) -> Any:
|
|
474
475
|
"""
|
|
475
476
|
Create a NIFTI header from a numpy array with identity affine.
|
|
476
477
|
|
|
@@ -481,7 +482,7 @@ def niftihdrfromarray(data: np.ndarray) -> Any:
|
|
|
481
482
|
|
|
482
483
|
Parameters
|
|
483
484
|
----------
|
|
484
|
-
data :
|
|
485
|
+
data : NDArray
|
|
485
486
|
The data array to create a header for. The array can be of any shape and
|
|
486
487
|
data type, but should typically represent medical imaging data.
|
|
487
488
|
|
|
@@ -508,10 +509,10 @@ def niftihdrfromarray(data: np.ndarray) -> Any:
|
|
|
508
509
|
|
|
509
510
|
|
|
510
511
|
def makedestarray(
|
|
511
|
-
destshape: Union[Tuple,
|
|
512
|
+
destshape: Union[Tuple, NDArray],
|
|
512
513
|
filetype: str = "nifti",
|
|
513
|
-
rt_floattype:
|
|
514
|
-
) -> Tuple[
|
|
514
|
+
rt_floattype: np.dtype = np.float64,
|
|
515
|
+
) -> Tuple[NDArray, int]:
|
|
515
516
|
"""
|
|
516
517
|
Create a destination array for output data based on file type and shape.
|
|
517
518
|
|
|
@@ -524,8 +525,8 @@ def makedestarray(
|
|
|
524
525
|
to time; for 'text', it is expected to be a 1D or 2D shape.
|
|
525
526
|
filetype : str, optional
|
|
526
527
|
Type of output file. Must be one of 'nifti', 'cifti', or 'text'. Default is 'nifti'.
|
|
527
|
-
rt_floattype :
|
|
528
|
-
Data type for the output array. Default is 'float64'.
|
|
528
|
+
rt_floattype : np.dtype, optional
|
|
529
|
+
Data type for the output array. Default is 'np.float64'.
|
|
529
530
|
|
|
530
531
|
Returns
|
|
531
532
|
-------
|
|
@@ -591,12 +592,12 @@ def makedestarray(
|
|
|
591
592
|
|
|
592
593
|
|
|
593
594
|
def populatemap(
|
|
594
|
-
themap:
|
|
595
|
+
themap: NDArray,
|
|
595
596
|
internalspaceshape: int,
|
|
596
|
-
validvoxels: Optional[
|
|
597
|
-
outmaparray:
|
|
597
|
+
validvoxels: Optional[NDArray],
|
|
598
|
+
outmaparray: NDArray,
|
|
598
599
|
debug: bool = False,
|
|
599
|
-
) ->
|
|
600
|
+
) -> NDArray:
|
|
600
601
|
"""
|
|
601
602
|
Populate an output array with data from a map, handling valid voxel masking.
|
|
602
603
|
|
|
@@ -605,16 +606,16 @@ def populatemap(
|
|
|
605
606
|
|
|
606
607
|
Parameters
|
|
607
608
|
----------
|
|
608
|
-
themap :
|
|
609
|
+
themap : NDArray
|
|
609
610
|
The source data to populate into the output array. Shape is either
|
|
610
611
|
``(internalspaceshape,)`` for 1D or ``(internalspaceshape, N)`` for 2D.
|
|
611
612
|
internalspaceshape : int
|
|
612
613
|
The total spatial dimension size, used to determine the expected shape
|
|
613
614
|
of the input map and the output array.
|
|
614
|
-
validvoxels :
|
|
615
|
+
validvoxels : NDArray or None
|
|
615
616
|
Indices of valid voxels to populate. If None, all voxels are populated.
|
|
616
617
|
Shape should be ``(M,)`` where M is the number of valid voxels.
|
|
617
|
-
outmaparray :
|
|
618
|
+
outmaparray : NDArray
|
|
618
619
|
The destination array to populate. Shape should be either ``(internalspaceshape,)``
|
|
619
620
|
for 1D or ``(internalspaceshape, N)`` for 2D.
|
|
620
621
|
debug : bool, optional
|
|
@@ -622,7 +623,7 @@ def populatemap(
|
|
|
622
623
|
|
|
623
624
|
Returns
|
|
624
625
|
-------
|
|
625
|
-
|
|
626
|
+
NDArray
|
|
626
627
|
The populated output array with the same shape as `outmaparray`.
|
|
627
628
|
|
|
628
629
|
Notes
|
|
@@ -671,12 +672,12 @@ def populatemap(
|
|
|
671
672
|
def savemaplist(
|
|
672
673
|
outputname: str,
|
|
673
674
|
maplist: List[Tuple],
|
|
674
|
-
validvoxels: Optional[
|
|
675
|
-
destshape: Union[Tuple,
|
|
675
|
+
validvoxels: Optional[NDArray],
|
|
676
|
+
destshape: Union[Tuple, NDArray],
|
|
676
677
|
theheader: Any,
|
|
677
678
|
bidsbasedict: Dict[str, Any],
|
|
678
679
|
filetype: str = "nifti",
|
|
679
|
-
rt_floattype:
|
|
680
|
+
rt_floattype: np.dtype = np.float64,
|
|
680
681
|
cifti_hdr: Optional[Any] = None,
|
|
681
682
|
savejson: bool = True,
|
|
682
683
|
debug: bool = False,
|
|
@@ -750,14 +751,18 @@ def savemaplist(
|
|
|
750
751
|
filetype=filetype,
|
|
751
752
|
rt_floattype=rt_floattype,
|
|
752
753
|
)
|
|
754
|
+
if debug:
|
|
755
|
+
print("maplist:")
|
|
756
|
+
print(maplist)
|
|
753
757
|
for themap, mapsuffix, maptype, theunit, thedescription in maplist:
|
|
754
758
|
# copy the data into the output array, remapping if warranted
|
|
755
759
|
if debug:
|
|
760
|
+
print(f"processing map {mapsuffix}")
|
|
756
761
|
if validvoxels is None:
|
|
757
|
-
print(f"savemaplist: saving {mapsuffix}
|
|
762
|
+
print(f"savemaplist: saving {mapsuffix} of shape {themap.shape} to {destshape}")
|
|
758
763
|
else:
|
|
759
764
|
print(
|
|
760
|
-
f"savemaplist: saving {mapsuffix}
|
|
765
|
+
f"savemaplist: saving {mapsuffix} of shape {themap.shape} to {destshape} from {np.shape(validvoxels)[0]} valid voxels"
|
|
761
766
|
)
|
|
762
767
|
outmaparray = populatemap(
|
|
763
768
|
themap,
|
|
@@ -808,7 +813,7 @@ def savemaplist(
|
|
|
808
813
|
|
|
809
814
|
|
|
810
815
|
def savetocifti(
|
|
811
|
-
thearray:
|
|
816
|
+
thearray: NDArray,
|
|
812
817
|
theciftiheader: Any,
|
|
813
818
|
theniftiheader: Any,
|
|
814
819
|
thename: str,
|
|
@@ -1130,7 +1135,7 @@ def niftimerge(
|
|
|
1130
1135
|
axis: int = 3,
|
|
1131
1136
|
returndata: bool = False,
|
|
1132
1137
|
debug: bool = False,
|
|
1133
|
-
) -> Optional[Tuple[
|
|
1138
|
+
) -> Optional[Tuple[NDArray, Any]]:
|
|
1134
1139
|
"""
|
|
1135
1140
|
Merge multiple NIFTI files along a specified axis.
|
|
1136
1141
|
|
|
@@ -1157,7 +1162,7 @@ def niftimerge(
|
|
|
1157
1162
|
|
|
1158
1163
|
Returns
|
|
1159
1164
|
-------
|
|
1160
|
-
tuple of (
|
|
1165
|
+
tuple of (NDArray, Any) or None
|
|
1161
1166
|
If `returndata` is True, returns a tuple of:
|
|
1162
1167
|
- `output_data`: The merged NIFTI data as a numpy array.
|
|
1163
1168
|
- `infile_hdr`: The header from the last input file.
|
|
@@ -1374,7 +1379,7 @@ def getniftiroot(filename: str) -> str:
|
|
|
1374
1379
|
return filename
|
|
1375
1380
|
|
|
1376
1381
|
|
|
1377
|
-
def fmriheaderinfo(niftifilename: str) -> Tuple[
|
|
1382
|
+
def fmriheaderinfo(niftifilename: str) -> Tuple[NDArray, NDArray]:
|
|
1378
1383
|
"""
|
|
1379
1384
|
Retrieve the header information from a nifti file.
|
|
1380
1385
|
|
|
@@ -1389,7 +1394,7 @@ def fmriheaderinfo(niftifilename: str) -> Tuple[np.ndarray, np.ndarray]:
|
|
|
1389
1394
|
|
|
1390
1395
|
Returns
|
|
1391
1396
|
-------
|
|
1392
|
-
tuple of (
|
|
1397
|
+
tuple of (NDArray, NDArray)
|
|
1393
1398
|
A tuple containing:
|
|
1394
1399
|
- tr : float
|
|
1395
1400
|
The repetition time, in seconds
|
|
@@ -1495,7 +1500,7 @@ def checkspacematch(hdr1: Any, hdr2: Any, tolerance: float = 1.0e-3) -> bool:
|
|
|
1495
1500
|
return dimmatch and resmatch
|
|
1496
1501
|
|
|
1497
1502
|
|
|
1498
|
-
def checkspaceresmatch(sizes1:
|
|
1503
|
+
def checkspaceresmatch(sizes1: NDArray, sizes2: NDArray, tolerance: float = 1.0e-3) -> bool:
|
|
1499
1504
|
"""
|
|
1500
1505
|
Check the spatial pixdims of two nifti files to determine if they have the same resolution (within tolerance)
|
|
1501
1506
|
|
|
@@ -1547,17 +1552,17 @@ def checkspaceresmatch(sizes1: np.ndarray, sizes2: np.ndarray, tolerance: float
|
|
|
1547
1552
|
return True
|
|
1548
1553
|
|
|
1549
1554
|
|
|
1550
|
-
def checkspacedimmatch(dims1:
|
|
1555
|
+
def checkspacedimmatch(dims1: NDArray, dims2: NDArray, verbose: bool = False) -> bool:
|
|
1551
1556
|
"""
|
|
1552
1557
|
Check the dimension arrays of two nifti files to determine if they cover the same number of voxels in each dimension.
|
|
1553
1558
|
|
|
1554
1559
|
Parameters
|
|
1555
1560
|
----------
|
|
1556
|
-
dims1 :
|
|
1561
|
+
dims1 : NDArray
|
|
1557
1562
|
The dimension array from the first nifti file. Should contain spatial dimensions
|
|
1558
1563
|
(typically the first dimension is the number of time points, and dimensions 1-3
|
|
1559
1564
|
represent x, y, z spatial dimensions).
|
|
1560
|
-
dims2 :
|
|
1565
|
+
dims2 : NDArray
|
|
1561
1566
|
The dimension array from the second nifti file. Should contain spatial dimensions
|
|
1562
1567
|
(typically the first dimension is the number of time points, and dimensions 1-3
|
|
1563
1568
|
represent x, y, z spatial dimensions).
|
|
@@ -1600,8 +1605,8 @@ def checkspacedimmatch(dims1: np.ndarray, dims2: np.ndarray, verbose: bool = Fal
|
|
|
1600
1605
|
|
|
1601
1606
|
|
|
1602
1607
|
def checktimematch(
|
|
1603
|
-
dims1:
|
|
1604
|
-
dims2:
|
|
1608
|
+
dims1: NDArray,
|
|
1609
|
+
dims2: NDArray,
|
|
1605
1610
|
numskip1: int = 0,
|
|
1606
1611
|
numskip2: int = 0,
|
|
1607
1612
|
verbose: bool = False,
|
|
@@ -1615,9 +1620,9 @@ def checktimematch(
|
|
|
1615
1620
|
|
|
1616
1621
|
Parameters
|
|
1617
1622
|
----------
|
|
1618
|
-
dims1 :
|
|
1623
|
+
dims1 : NDArray
|
|
1619
1624
|
The dimension array from the first NIfTI file. The time dimension is expected to be at index 4.
|
|
1620
|
-
dims2 :
|
|
1625
|
+
dims2 : NDArray
|
|
1621
1626
|
The dimension array from the second NIfTI file. The time dimension is expected to be at index 4.
|
|
1622
1627
|
numskip1 : int, optional
|
|
1623
1628
|
Number of timepoints skipped at the beginning of file 1. Default is 0.
|
|
@@ -1670,8 +1675,8 @@ def checktimematch(
|
|
|
1670
1675
|
|
|
1671
1676
|
|
|
1672
1677
|
def checkdatamatch(
|
|
1673
|
-
data1:
|
|
1674
|
-
data2:
|
|
1678
|
+
data1: NDArray,
|
|
1679
|
+
data2: NDArray,
|
|
1675
1680
|
absthresh: float = 1e-12,
|
|
1676
1681
|
msethresh: float = 1e-12,
|
|
1677
1682
|
debug: bool = False,
|
|
@@ -1684,9 +1689,9 @@ def checkdatamatch(
|
|
|
1684
1689
|
|
|
1685
1690
|
Parameters
|
|
1686
1691
|
----------
|
|
1687
|
-
data1 :
|
|
1692
|
+
data1 : NDArray
|
|
1688
1693
|
First data array to compare
|
|
1689
|
-
data2 :
|
|
1694
|
+
data2 : NDArray
|
|
1690
1695
|
Second data array to compare
|
|
1691
1696
|
absthresh : float, optional
|
|
1692
1697
|
Absolute difference threshold. Default is 1e-12
|
|
@@ -1853,7 +1858,7 @@ def checkifparfile(filename: str) -> bool:
|
|
|
1853
1858
|
return False
|
|
1854
1859
|
|
|
1855
1860
|
|
|
1856
|
-
def readconfounds(filename: str, debug: bool = False) -> Dict[str,
|
|
1861
|
+
def readconfounds(filename: str, debug: bool = False) -> Dict[str, NDArray]:
|
|
1857
1862
|
"""
|
|
1858
1863
|
Read confound regressors from a text file.
|
|
1859
1864
|
|
|
@@ -1870,7 +1875,7 @@ def readconfounds(filename: str, debug: bool = False) -> Dict[str, np.ndarray]:
|
|
|
1870
1875
|
|
|
1871
1876
|
Returns
|
|
1872
1877
|
-------
|
|
1873
|
-
dict of str to
|
|
1878
|
+
dict of str to NDArray
|
|
1874
1879
|
Dictionary mapping confound names to timecourse arrays. Each key is a confound name
|
|
1875
1880
|
and each value is a 1D numpy array containing the timecourse data for that confound.
|
|
1876
1881
|
|
|
@@ -1907,7 +1912,7 @@ def readconfounds(filename: str, debug: bool = False) -> Dict[str, np.ndarray]:
|
|
|
1907
1912
|
return theconfounddict
|
|
1908
1913
|
|
|
1909
1914
|
|
|
1910
|
-
def readparfile(filename: str) -> Dict[str,
|
|
1915
|
+
def readparfile(filename: str) -> Dict[str, NDArray]:
|
|
1911
1916
|
"""
|
|
1912
1917
|
Read motion parameters from an FSL-style .par file.
|
|
1913
1918
|
|
|
@@ -1923,7 +1928,7 @@ def readparfile(filename: str) -> Dict[str, np.ndarray]:
|
|
|
1923
1928
|
|
|
1924
1929
|
Returns
|
|
1925
1930
|
-------
|
|
1926
|
-
dict of
|
|
1931
|
+
dict of NDArray
|
|
1927
1932
|
Dictionary containing the motion parameters as timecourses. Keys are:
|
|
1928
1933
|
- 'X': translation along x-axis
|
|
1929
1934
|
- 'Y': translation along y-axis
|
|
@@ -2098,7 +2103,7 @@ def readmotion(filename: str, tr: float = 1.0, colspec: Optional[str] = None) ->
|
|
|
2098
2103
|
return motiondict
|
|
2099
2104
|
|
|
2100
2105
|
|
|
2101
|
-
def sliceinfo(slicetimes:
|
|
2106
|
+
def sliceinfo(slicetimes: NDArray, tr: float) -> Tuple[int, float, NDArray]:
|
|
2102
2107
|
"""
|
|
2103
2108
|
Find out what slicetimes we have, their spacing, and which timepoint each slice occurs at. This assumes
|
|
2104
2109
|
uniform slice time spacing, but supports any slice acquisition order and multiband acquisitions.
|
|
@@ -2142,7 +2147,7 @@ def sliceinfo(slicetimes: np.ndarray, tr: float) -> Tuple[int, float, np.ndarray
|
|
|
2142
2147
|
return numsteps, minstep, sliceoffsets
|
|
2143
2148
|
|
|
2144
2149
|
|
|
2145
|
-
def getslicetimesfromfile(slicetimename: str) -> Tuple[
|
|
2150
|
+
def getslicetimesfromfile(slicetimename: str) -> Tuple[NDArray, bool, bool]:
|
|
2146
2151
|
"""
|
|
2147
2152
|
Read slice timing information from a file.
|
|
2148
2153
|
|
|
@@ -2158,9 +2163,9 @@ def getslicetimesfromfile(slicetimename: str) -> Tuple[np.ndarray, bool, bool]:
|
|
|
2158
2163
|
|
|
2159
2164
|
Returns
|
|
2160
2165
|
-------
|
|
2161
|
-
tuple of (
|
|
2166
|
+
tuple of (NDArray, bool, bool)
|
|
2162
2167
|
A tuple containing:
|
|
2163
|
-
- slicetimes :
|
|
2168
|
+
- slicetimes : NDArray
|
|
2164
2169
|
Array of slice timing values as floats
|
|
2165
2170
|
- normalizedtotr : bool
|
|
2166
2171
|
True if the slice times were normalized to TR (time resolution),
|
|
@@ -2275,7 +2280,7 @@ def writedicttojson(thedict: Dict[str, Any], thefilename: str) -> None:
|
|
|
2275
2280
|
The function automatically converts numpy data types:
|
|
2276
2281
|
- numpy.integer → Python int
|
|
2277
2282
|
- numpy.floating → Python float
|
|
2278
|
-
-
|
|
2283
|
+
- NDArray → Python list
|
|
2279
2284
|
|
|
2280
2285
|
The output JSON file will be formatted with:
|
|
2281
2286
|
- Sorted keys
|
|
@@ -2356,7 +2361,7 @@ def readdictfromjson(inputfilename: str) -> Dict[str, Any]:
|
|
|
2356
2361
|
return {}
|
|
2357
2362
|
|
|
2358
2363
|
|
|
2359
|
-
def readlabelledtsv(inputfilename: str, compressed: bool = False) -> Dict[str,
|
|
2364
|
+
def readlabelledtsv(inputfilename: str, compressed: bool = False) -> Dict[str, NDArray]:
|
|
2360
2365
|
"""
|
|
2361
2366
|
Read time series out of an fmriprep confounds tsv file
|
|
2362
2367
|
|
|
@@ -2370,7 +2375,7 @@ def readlabelledtsv(inputfilename: str, compressed: bool = False) -> Dict[str, n
|
|
|
2370
2375
|
|
|
2371
2376
|
Returns
|
|
2372
2377
|
-------
|
|
2373
|
-
dict of str to
|
|
2378
|
+
dict of str to NDArray
|
|
2374
2379
|
Dictionary containing all the timecourses in the file, keyed by the
|
|
2375
2380
|
column names from the first row of the tsv file. Each value is a
|
|
2376
2381
|
numpy array containing the time series data for that column.
|
|
@@ -2413,7 +2418,7 @@ def readlabelledtsv(inputfilename: str, compressed: bool = False) -> Dict[str, n
|
|
|
2413
2418
|
return confounddict
|
|
2414
2419
|
|
|
2415
2420
|
|
|
2416
|
-
def readcsv(inputfilename: str, debug: bool = False) -> Dict[str,
|
|
2421
|
+
def readcsv(inputfilename: str, debug: bool = False) -> Dict[str, NDArray]:
|
|
2417
2422
|
"""
|
|
2418
2423
|
Read time series out of an unlabelled csv file.
|
|
2419
2424
|
|
|
@@ -2431,7 +2436,7 @@ def readcsv(inputfilename: str, debug: bool = False) -> Dict[str, np.ndarray]:
|
|
|
2431
2436
|
|
|
2432
2437
|
Returns
|
|
2433
2438
|
-------
|
|
2434
|
-
dict of str to
|
|
2439
|
+
dict of str to NDArray
|
|
2435
2440
|
A dictionary where keys are column names (or generated names like "col0", "col1", etc.)
|
|
2436
2441
|
and values are NumPy arrays containing the time series data. If the file does not exist
|
|
2437
2442
|
or is invalid, an empty dictionary is returned.
|
|
@@ -2494,7 +2499,7 @@ def readcsv(inputfilename: str, debug: bool = False) -> Dict[str, np.ndarray]:
|
|
|
2494
2499
|
return timeseriesdict
|
|
2495
2500
|
|
|
2496
2501
|
|
|
2497
|
-
def readfslmat(inputfilename: str, debug: bool = False) -> Dict[str,
|
|
2502
|
+
def readfslmat(inputfilename: str, debug: bool = False) -> Dict[str, NDArray]:
|
|
2498
2503
|
"""
|
|
2499
2504
|
Read time series out of an FSL design.mat file
|
|
2500
2505
|
|
|
@@ -2507,7 +2512,7 @@ def readfslmat(inputfilename: str, debug: bool = False) -> Dict[str, np.ndarray]
|
|
|
2507
2512
|
|
|
2508
2513
|
Returns
|
|
2509
2514
|
-------
|
|
2510
|
-
dict of
|
|
2515
|
+
dict of NDArray
|
|
2511
2516
|
Dictionary containing all the timecourses in the file, keyed by column names.
|
|
2512
2517
|
If the first row exists, it is used as keys; otherwise, keys are generated as
|
|
2513
2518
|
"col1, col2...colN". Returns an empty dictionary if file does not exist or is not valid.
|
|
@@ -2691,7 +2696,7 @@ def makecolname(colnum: int, startcol: int) -> str:
|
|
|
2691
2696
|
|
|
2692
2697
|
def writebidstsv(
|
|
2693
2698
|
outputfileroot: str,
|
|
2694
|
-
data:
|
|
2699
|
+
data: NDArray,
|
|
2695
2700
|
samplerate: float,
|
|
2696
2701
|
extraheaderinfo: Optional[Dict[str, Any]] = None,
|
|
2697
2702
|
compressed: bool = True,
|
|
@@ -2719,7 +2724,7 @@ def writebidstsv(
|
|
|
2719
2724
|
outputfileroot : str
|
|
2720
2725
|
Root name of the output files (without extension). The function will write
|
|
2721
2726
|
``<outputfileroot>.tsv`` or ``<outputfileroot>.tsv.gz`` and ``<outputfileroot>.json``.
|
|
2722
|
-
data :
|
|
2727
|
+
data : NDArray
|
|
2723
2728
|
Time series data to be written. If 1D, it will be reshaped to (1, n_timesteps).
|
|
2724
2729
|
Shape should be (n_channels, n_timesteps).
|
|
2725
2730
|
samplerate : float
|
|
@@ -2890,7 +2895,7 @@ def writebidstsv(
|
|
|
2890
2895
|
|
|
2891
2896
|
def readvectorsfromtextfile(
|
|
2892
2897
|
fullfilespec: str, onecol: bool = False, debug: bool = False
|
|
2893
|
-
) -> Tuple[Optional[float], Optional[float], Optional[List[str]],
|
|
2898
|
+
) -> Tuple[Optional[float], Optional[float], Optional[List[str]], NDArray, Optional[bool], str]:
|
|
2894
2899
|
"""
|
|
2895
2900
|
Read time series data from a text-based file (TSV, CSV, MAT, or BIDS-style TSV).
|
|
2896
2901
|
|
|
@@ -3081,7 +3086,7 @@ def readbidstsv(
|
|
|
3081
3086
|
Optional[float],
|
|
3082
3087
|
Optional[float],
|
|
3083
3088
|
Optional[List[str]],
|
|
3084
|
-
Optional[
|
|
3089
|
+
Optional[NDArray],
|
|
3085
3090
|
Optional[bool],
|
|
3086
3091
|
Optional[str],
|
|
3087
3092
|
]:
|
|
@@ -3115,7 +3120,7 @@ def readbidstsv(
|
|
|
3115
3120
|
Time of first point in seconds.
|
|
3116
3121
|
columns : list of str
|
|
3117
3122
|
Names of the timecourses contained in the file.
|
|
3118
|
-
data :
|
|
3123
|
+
data : NDArray, optional
|
|
3119
3124
|
2D array of timecourses from the file. Returns None if file does not exist or is invalid.
|
|
3120
3125
|
is_compressed : bool
|
|
3121
3126
|
Indicates whether the TSV file was gzipped.
|
|
@@ -3295,7 +3300,7 @@ def readcolfrombidstsv(
|
|
|
3295
3300
|
columnname: Optional[str] = None,
|
|
3296
3301
|
neednotexist: bool = False,
|
|
3297
3302
|
debug: bool = False,
|
|
3298
|
-
) -> Tuple[Optional[float], Optional[float], Optional[
|
|
3303
|
+
) -> Tuple[Optional[float], Optional[float], Optional[NDArray]]:
|
|
3299
3304
|
"""
|
|
3300
3305
|
Read a specific column from a BIDS TSV file.
|
|
3301
3306
|
|
|
@@ -3327,7 +3332,7 @@ def readcolfrombidstsv(
|
|
|
3327
3332
|
Sampling rate extracted from the file, or None if no valid data found
|
|
3328
3333
|
- starttime : float or None
|
|
3329
3334
|
Start time extracted from the file, or None if no valid data found
|
|
3330
|
-
- data :
|
|
3335
|
+
- data : NDArray or None
|
|
3331
3336
|
The extracted column data as a 1D array, or None if no valid data found
|
|
3332
3337
|
|
|
3333
3338
|
Notes
|
|
@@ -3712,7 +3717,7 @@ def processnamespec(
|
|
|
3712
3717
|
return thename, thevals
|
|
3713
3718
|
|
|
3714
3719
|
|
|
3715
|
-
def readcolfromtextfile(inputfilespec: str) ->
|
|
3720
|
+
def readcolfromtextfile(inputfilespec: str) -> NDArray:
|
|
3716
3721
|
"""
|
|
3717
3722
|
Read columns from a text file and return as numpy array.
|
|
3718
3723
|
|
|
@@ -3739,7 +3744,7 @@ def readcolfromtextfile(inputfilespec: str) -> np.ndarray:
|
|
|
3739
3744
|
|
|
3740
3745
|
Returns
|
|
3741
3746
|
-------
|
|
3742
|
-
|
|
3747
|
+
NDArray
|
|
3743
3748
|
Numpy array containing the read data. Shape depends on the number of
|
|
3744
3749
|
columns specified and the number of rows in the input file.
|
|
3745
3750
|
|
|
@@ -3783,7 +3788,7 @@ def readvecs(
|
|
|
3783
3788
|
numskip: int = 0,
|
|
3784
3789
|
debug: bool = False,
|
|
3785
3790
|
thedtype: type = float,
|
|
3786
|
-
) ->
|
|
3791
|
+
) -> NDArray:
|
|
3787
3792
|
"""
|
|
3788
3793
|
Read vectors from a text file and return them as a transposed numpy array.
|
|
3789
3794
|
|
|
@@ -3805,7 +3810,7 @@ def readvecs(
|
|
|
3805
3810
|
|
|
3806
3811
|
Returns
|
|
3807
3812
|
-------
|
|
3808
|
-
|
|
3813
|
+
NDArray
|
|
3809
3814
|
A 2D numpy array where each row corresponds to a vector read from the file.
|
|
3810
3815
|
The array is transposed such that each column represents a vector.
|
|
3811
3816
|
|
|
@@ -3858,7 +3863,7 @@ def readvecs(
|
|
|
3858
3863
|
return theoutarray
|
|
3859
3864
|
|
|
3860
3865
|
|
|
3861
|
-
def readvec(inputfilename: str, numskip: int = 0) ->
|
|
3866
|
+
def readvec(inputfilename: str, numskip: int = 0) -> NDArray:
|
|
3862
3867
|
"""
|
|
3863
3868
|
Read a timecourse from a text or BIDS TSV file.
|
|
3864
3869
|
|
|
@@ -3881,7 +3886,7 @@ def readvec(inputfilename: str, numskip: int = 0) -> np.ndarray:
|
|
|
3881
3886
|
-------
|
|
3882
3887
|
tuple
|
|
3883
3888
|
A tuple containing:
|
|
3884
|
-
-
|
|
3889
|
+
- NDArray: The read timecourse data
|
|
3885
3890
|
- float, optional: Minimum value in the data
|
|
3886
3891
|
- float, optional: Maximum value in the data
|
|
3887
3892
|
|
|
@@ -3913,7 +3918,7 @@ def readtc(
|
|
|
3913
3918
|
colnum: Optional[int] = None,
|
|
3914
3919
|
colname: Optional[str] = None,
|
|
3915
3920
|
debug: bool = False,
|
|
3916
|
-
) -> Tuple[
|
|
3921
|
+
) -> Tuple[NDArray, Optional[float], Optional[float]]:
|
|
3917
3922
|
"""
|
|
3918
3923
|
Read timecourse data from a file, supporting BIDS TSV and other formats.
|
|
3919
3924
|
|
|
@@ -3937,7 +3942,7 @@ def readtc(
|
|
|
3937
3942
|
|
|
3938
3943
|
Returns
|
|
3939
3944
|
-------
|
|
3940
|
-
timecourse :
|
|
3945
|
+
timecourse : NDArray
|
|
3941
3946
|
The timecourse data as a 1D numpy array.
|
|
3942
3947
|
inputfreq : float or None
|
|
3943
3948
|
Sampling frequency (Hz) if available from the file metadata. Default is None.
|
|
@@ -4154,7 +4159,7 @@ def readdict(inputfilename: str) -> Dict[str, Any]:
|
|
|
4154
4159
|
return {}
|
|
4155
4160
|
|
|
4156
4161
|
|
|
4157
|
-
def writevec(thevec:
|
|
4162
|
+
def writevec(thevec: NDArray, outputfile: str, lineend: str = "") -> None:
|
|
4158
4163
|
"""
|
|
4159
4164
|
Write a vector to a text file, one value per line.
|
|
4160
4165
|
|
|
@@ -4206,7 +4211,7 @@ def writevec(thevec: np.ndarray, outputfile: str, lineend: str = "") -> None:
|
|
|
4206
4211
|
|
|
4207
4212
|
|
|
4208
4213
|
def writevectorstotextfile(
|
|
4209
|
-
thevecs:
|
|
4214
|
+
thevecs: NDArray,
|
|
4210
4215
|
outputfile: str,
|
|
4211
4216
|
samplerate: float = 1.0,
|
|
4212
4217
|
starttime: float = 0.0,
|
|
@@ -4226,7 +4231,7 @@ def writevectorstotextfile(
|
|
|
4226
4231
|
|
|
4227
4232
|
Parameters
|
|
4228
4233
|
----------
|
|
4229
|
-
thevecs :
|
|
4234
|
+
thevecs : NDArray
|
|
4230
4235
|
Data vectors to write. Should be a 2D array where each row is a vector.
|
|
4231
4236
|
outputfile : str
|
|
4232
4237
|
Output file path. The extension determines the file format if not explicitly specified.
|
|
@@ -4313,7 +4318,7 @@ def writevectorstotextfile(
|
|
|
4313
4318
|
|
|
4314
4319
|
# rewritten to guarantee file closure, combines writenpvec and writenpvecs
|
|
4315
4320
|
def writenpvecs(
|
|
4316
|
-
thevecs:
|
|
4321
|
+
thevecs: NDArray,
|
|
4317
4322
|
outputfile: str,
|
|
4318
4323
|
ascsv: bool = False,
|
|
4319
4324
|
headers: Optional[List[str]] = None,
|
|
@@ -4328,7 +4333,7 @@ def writenpvecs(
|
|
|
4328
4333
|
|
|
4329
4334
|
Parameters
|
|
4330
4335
|
----------
|
|
4331
|
-
thevecs :
|
|
4336
|
+
thevecs : NDArray
|
|
4332
4337
|
A 1D or 2D numpy array containing the data to be written. If 1D,
|
|
4333
4338
|
the array is written as a single column. If 2D, each column is
|
|
4334
4339
|
written as a separate line in the output file.
|