rapidtide 3.0.10__py3-none-any.whl → 3.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- rapidtide/Colortables.py +492 -27
- rapidtide/OrthoImageItem.py +1053 -47
- rapidtide/RapidtideDataset.py +1533 -86
- rapidtide/_version.py +3 -3
- rapidtide/calccoherence.py +196 -29
- rapidtide/calcnullsimfunc.py +191 -40
- rapidtide/calcsimfunc.py +245 -42
- rapidtide/correlate.py +1210 -393
- rapidtide/data/examples/src/testLD +56 -0
- rapidtide/data/examples/src/testalign +1 -1
- rapidtide/data/examples/src/testdelayvar +0 -1
- rapidtide/data/examples/src/testfmri +19 -1
- rapidtide/data/examples/src/testglmfilt +5 -5
- rapidtide/data/examples/src/testhappy +30 -1
- rapidtide/data/examples/src/testppgproc +17 -0
- rapidtide/data/examples/src/testrolloff +11 -0
- rapidtide/data/models/model_cnn_pytorch/best_model.pth +0 -0
- rapidtide/data/models/model_cnn_pytorch/loss.png +0 -0
- rapidtide/data/models/model_cnn_pytorch/loss.txt +1 -0
- rapidtide/data/models/model_cnn_pytorch/model.pth +0 -0
- rapidtide/data/models/model_cnn_pytorch/model_meta.json +68 -0
- rapidtide/data/reference/JHU-ArterialTerritoriesNoVent-LVL1_space-MNI152NLin2009cAsym_2mm.nii.gz +0 -0
- rapidtide/data/reference/JHU-ArterialTerritoriesNoVent-LVL1_space-MNI152NLin2009cAsym_2mm_mask.nii.gz +0 -0
- rapidtide/decorators.py +91 -0
- rapidtide/dlfilter.py +2225 -108
- rapidtide/dlfiltertorch.py +4843 -0
- rapidtide/externaltools.py +327 -12
- rapidtide/fMRIData_class.py +79 -40
- rapidtide/filter.py +1899 -810
- rapidtide/fit.py +2004 -574
- rapidtide/genericmultiproc.py +93 -18
- rapidtide/happy_supportfuncs.py +2044 -171
- rapidtide/helper_classes.py +584 -43
- rapidtide/io.py +2363 -370
- rapidtide/linfitfiltpass.py +341 -75
- rapidtide/makelaggedtcs.py +211 -20
- rapidtide/maskutil.py +423 -53
- rapidtide/miscmath.py +827 -121
- rapidtide/multiproc.py +210 -22
- rapidtide/patchmatch.py +234 -33
- rapidtide/peakeval.py +32 -30
- rapidtide/ppgproc.py +2203 -0
- rapidtide/qualitycheck.py +352 -39
- rapidtide/refinedelay.py +422 -57
- rapidtide/refineregressor.py +498 -184
- rapidtide/resample.py +671 -185
- rapidtide/scripts/applyppgproc.py +28 -0
- rapidtide/simFuncClasses.py +1052 -77
- rapidtide/simfuncfit.py +260 -46
- rapidtide/stats.py +540 -238
- rapidtide/tests/happycomp +9 -0
- rapidtide/tests/test_dlfiltertorch.py +627 -0
- rapidtide/tests/test_findmaxlag.py +24 -8
- rapidtide/tests/test_fullrunhappy_v1.py +0 -2
- rapidtide/tests/test_fullrunhappy_v2.py +0 -2
- rapidtide/tests/test_fullrunhappy_v3.py +1 -0
- rapidtide/tests/test_fullrunhappy_v4.py +2 -2
- rapidtide/tests/test_fullrunrapidtide_v7.py +1 -1
- rapidtide/tests/test_simroundtrip.py +8 -8
- rapidtide/tests/utils.py +9 -8
- rapidtide/tidepoolTemplate.py +142 -38
- rapidtide/tidepoolTemplate_alt.py +165 -44
- rapidtide/tidepoolTemplate_big.py +189 -52
- rapidtide/util.py +1217 -118
- rapidtide/voxelData.py +684 -37
- rapidtide/wiener.py +19 -12
- rapidtide/wiener2.py +113 -7
- rapidtide/wiener_doc.py +255 -0
- rapidtide/workflows/adjustoffset.py +105 -3
- rapidtide/workflows/aligntcs.py +85 -2
- rapidtide/workflows/applydlfilter.py +87 -10
- rapidtide/workflows/applyppgproc.py +522 -0
- rapidtide/workflows/atlasaverage.py +210 -47
- rapidtide/workflows/atlastool.py +100 -3
- rapidtide/workflows/calcSimFuncMap.py +294 -64
- rapidtide/workflows/calctexticc.py +201 -9
- rapidtide/workflows/ccorrica.py +97 -4
- rapidtide/workflows/cleanregressor.py +168 -29
- rapidtide/workflows/delayvar.py +163 -10
- rapidtide/workflows/diffrois.py +81 -3
- rapidtide/workflows/endtidalproc.py +144 -4
- rapidtide/workflows/fdica.py +195 -15
- rapidtide/workflows/filtnifti.py +70 -3
- rapidtide/workflows/filttc.py +74 -3
- rapidtide/workflows/fitSimFuncMap.py +206 -48
- rapidtide/workflows/fixtr.py +73 -3
- rapidtide/workflows/gmscalc.py +113 -3
- rapidtide/workflows/happy.py +813 -201
- rapidtide/workflows/happy2std.py +144 -12
- rapidtide/workflows/happy_parser.py +149 -8
- rapidtide/workflows/histnifti.py +118 -2
- rapidtide/workflows/histtc.py +84 -3
- rapidtide/workflows/linfitfilt.py +117 -4
- rapidtide/workflows/localflow.py +328 -28
- rapidtide/workflows/mergequality.py +79 -3
- rapidtide/workflows/niftidecomp.py +322 -18
- rapidtide/workflows/niftistats.py +174 -4
- rapidtide/workflows/pairproc.py +88 -2
- rapidtide/workflows/pairwisemergenifti.py +85 -2
- rapidtide/workflows/parser_funcs.py +1421 -40
- rapidtide/workflows/physiofreq.py +137 -11
- rapidtide/workflows/pixelcomp.py +208 -5
- rapidtide/workflows/plethquality.py +103 -21
- rapidtide/workflows/polyfitim.py +151 -11
- rapidtide/workflows/proj2flow.py +75 -2
- rapidtide/workflows/rankimage.py +111 -4
- rapidtide/workflows/rapidtide.py +272 -15
- rapidtide/workflows/rapidtide2std.py +98 -2
- rapidtide/workflows/rapidtide_parser.py +109 -9
- rapidtide/workflows/refineDelayMap.py +143 -33
- rapidtide/workflows/refineRegressor.py +682 -93
- rapidtide/workflows/regressfrommaps.py +152 -31
- rapidtide/workflows/resamplenifti.py +85 -3
- rapidtide/workflows/resampletc.py +91 -3
- rapidtide/workflows/retrolagtcs.py +98 -6
- rapidtide/workflows/retroregress.py +165 -9
- rapidtide/workflows/roisummarize.py +173 -5
- rapidtide/workflows/runqualitycheck.py +71 -3
- rapidtide/workflows/showarbcorr.py +147 -4
- rapidtide/workflows/showhist.py +86 -2
- rapidtide/workflows/showstxcorr.py +160 -3
- rapidtide/workflows/showtc.py +159 -3
- rapidtide/workflows/showxcorrx.py +184 -4
- rapidtide/workflows/showxy.py +185 -15
- rapidtide/workflows/simdata.py +262 -36
- rapidtide/workflows/spatialfit.py +77 -2
- rapidtide/workflows/spatialmi.py +251 -27
- rapidtide/workflows/spectrogram.py +305 -32
- rapidtide/workflows/synthASL.py +154 -3
- rapidtide/workflows/tcfrom2col.py +76 -2
- rapidtide/workflows/tcfrom3col.py +74 -2
- rapidtide/workflows/tidepool.py +2972 -133
- rapidtide/workflows/utils.py +19 -14
- rapidtide/workflows/utils_doc.py +293 -0
- rapidtide/workflows/variabilityizer.py +116 -3
- {rapidtide-3.0.10.dist-info → rapidtide-3.1.dist-info}/METADATA +10 -9
- {rapidtide-3.0.10.dist-info → rapidtide-3.1.dist-info}/RECORD +141 -122
- {rapidtide-3.0.10.dist-info → rapidtide-3.1.dist-info}/entry_points.txt +1 -0
- {rapidtide-3.0.10.dist-info → rapidtide-3.1.dist-info}/WHEEL +0 -0
- {rapidtide-3.0.10.dist-info → rapidtide-3.1.dist-info}/licenses/LICENSE +0 -0
- {rapidtide-3.0.10.dist-info → rapidtide-3.1.dist-info}/top_level.txt +0 -0
rapidtide/io.py
CHANGED
|
@@ -22,6 +22,7 @@ import operator as op
|
|
|
22
22
|
import os
|
|
23
23
|
import platform
|
|
24
24
|
import sys
|
|
25
|
+
from typing import Any, Dict, List, Optional, Tuple, Union
|
|
25
26
|
|
|
26
27
|
import nibabel as nib
|
|
27
28
|
import numpy as np
|
|
@@ -31,22 +32,45 @@ from rapidtide.tests.utils import mse
|
|
|
31
32
|
|
|
32
33
|
|
|
33
34
|
# ---------------------------------------- NIFTI file manipulation ---------------------------
|
|
34
|
-
def readfromnifti(
|
|
35
|
-
|
|
35
|
+
def readfromnifti(
|
|
36
|
+
inputfile: str, headeronly: bool = False
|
|
37
|
+
) -> Tuple[Any, Optional[np.ndarray], Any, np.ndarray, np.ndarray]:
|
|
38
|
+
"""
|
|
39
|
+
Open a nifti file and read in the various important parts
|
|
36
40
|
|
|
37
41
|
Parameters
|
|
38
42
|
----------
|
|
39
43
|
inputfile : str
|
|
40
|
-
The name of the nifti file.
|
|
44
|
+
The name of the nifti file. Can be provided with or without file extension
|
|
45
|
+
(.nii or .nii.gz).
|
|
46
|
+
headeronly : bool, optional
|
|
47
|
+
If True, only read the header without loading data. Default is False.
|
|
41
48
|
|
|
42
49
|
Returns
|
|
43
50
|
-------
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
51
|
+
tuple
|
|
52
|
+
A tuple containing:
|
|
53
|
+
|
|
54
|
+
- nim : nifti image structure
|
|
55
|
+
- nim_data : array-like or None
|
|
56
|
+
The image data if headeronly=False, None otherwise
|
|
57
|
+
- nim_hdr : nifti header
|
|
58
|
+
The header information copied from the nifti image
|
|
59
|
+
- thedims : int array
|
|
60
|
+
The dimensions from the nifti header
|
|
61
|
+
- thesizes : float array
|
|
62
|
+
The pixel dimensions from the nifti header
|
|
63
|
+
|
|
64
|
+
Notes
|
|
65
|
+
-----
|
|
66
|
+
This function automatically detects the file extension (.nii or .nii.gz) if
|
|
67
|
+
not provided in the inputfile parameter. If neither .nii nor .nii.gz extension
|
|
68
|
+
is found, it will look for the file with these extensions in order.
|
|
69
|
+
|
|
70
|
+
Examples
|
|
71
|
+
--------
|
|
72
|
+
>>> nim, data, hdr, dims, sizes = readfromnifti('my_image')
|
|
73
|
+
>>> nim, data, hdr, dims, sizes = readfromnifti('my_image.nii.gz', headeronly=True)
|
|
50
74
|
"""
|
|
51
75
|
if os.path.isfile(inputfile):
|
|
52
76
|
inputfilename = inputfile
|
|
@@ -67,13 +91,18 @@ def readfromnifti(inputfile, headeronly=False):
|
|
|
67
91
|
return nim, nim_data, nim_hdr, thedims, thesizes
|
|
68
92
|
|
|
69
93
|
|
|
70
|
-
def readfromcifti(
|
|
71
|
-
|
|
94
|
+
def readfromcifti(
|
|
95
|
+
inputfile: str, debug: bool = False
|
|
96
|
+
) -> Tuple[Any, Any, np.ndarray, Any, np.ndarray, np.ndarray, Optional[float]]:
|
|
97
|
+
"""
|
|
98
|
+
Open a cifti file and read in the various important parts
|
|
72
99
|
|
|
73
100
|
Parameters
|
|
74
101
|
----------
|
|
75
102
|
inputfile : str
|
|
76
103
|
The name of the cifti file.
|
|
104
|
+
debug : bool, optional
|
|
105
|
+
Enable debug output. Default is False
|
|
77
106
|
|
|
78
107
|
Returns
|
|
79
108
|
-------
|
|
@@ -110,7 +139,52 @@ def readfromcifti(inputfile, debug=False):
|
|
|
110
139
|
return cifti, cifti_hdr, nifti_data, nifti_hdr, thedims, thesizes, timestep
|
|
111
140
|
|
|
112
141
|
|
|
113
|
-
def getciftitr(cifti_hdr):
|
|
142
|
+
def getciftitr(cifti_hdr: Any) -> Tuple[float, float]:
|
|
143
|
+
"""
|
|
144
|
+
Extract the TR (repetition time) from a CIFTI header.
|
|
145
|
+
|
|
146
|
+
This function extracts timing information from a CIFTI header, specifically
|
|
147
|
+
the time between timepoints (TR) and the start time of the first timepoint.
|
|
148
|
+
It searches for a SeriesAxis in the CIFTI header matrix to extract this
|
|
149
|
+
information.
|
|
150
|
+
|
|
151
|
+
Parameters
|
|
152
|
+
----------
|
|
153
|
+
cifti_hdr : Any
|
|
154
|
+
The CIFTI header object containing timing information. This should be
|
|
155
|
+
a valid CIFTI header that supports the matrix.mapped_indices and
|
|
156
|
+
matrix.get_axis methods.
|
|
157
|
+
|
|
158
|
+
Returns
|
|
159
|
+
-------
|
|
160
|
+
tuple of (float, float)
|
|
161
|
+
A tuple containing:
|
|
162
|
+
- timestep : float
|
|
163
|
+
The TR (time between timepoints) in seconds
|
|
164
|
+
- starttime : float
|
|
165
|
+
The start time of the first timepoint in seconds
|
|
166
|
+
|
|
167
|
+
Raises
|
|
168
|
+
------
|
|
169
|
+
SystemExit
|
|
170
|
+
If no SeriesAxis is found in the CIFTI header, the function will
|
|
171
|
+
print an error message and exit the program.
|
|
172
|
+
|
|
173
|
+
Notes
|
|
174
|
+
-----
|
|
175
|
+
The function specifically looks for a SeriesAxis in the CIFTI header's
|
|
176
|
+
matrix. If multiple SeriesAxes exist, only the first one encountered
|
|
177
|
+
will be used. The timing information is extracted using the get_element()
|
|
178
|
+
method on the SeriesAxis object.
|
|
179
|
+
|
|
180
|
+
Examples
|
|
181
|
+
--------
|
|
182
|
+
>>> import nibabel as nib
|
|
183
|
+
>>> cifti_hdr = nib.load('file.cifti').header
|
|
184
|
+
>>> tr, start_time = getciftitr(cifti_hdr)
|
|
185
|
+
>>> print(f"TR: {tr} seconds, Start time: {start_time} seconds")
|
|
186
|
+
TR: 0.8 seconds, Start time: 0.0 seconds
|
|
187
|
+
"""
|
|
114
188
|
seriesaxis = None
|
|
115
189
|
for theaxis in cifti_hdr.matrix.mapped_indices:
|
|
116
190
|
if isinstance(cifti_hdr.matrix.get_axis(theaxis), nib.cifti2.SeriesAxis):
|
|
@@ -126,40 +200,136 @@ def getciftitr(cifti_hdr):
|
|
|
126
200
|
|
|
127
201
|
|
|
128
202
|
# dims are the array dimensions along each axis
|
|
129
|
-
def parseniftidims(thedims):
|
|
130
|
-
|
|
203
|
+
def parseniftidims(thedims: np.ndarray) -> Tuple[int, int, int, int]:
|
|
204
|
+
"""
|
|
205
|
+
Split the dims array into individual elements
|
|
206
|
+
|
|
207
|
+
This function extracts the dimension sizes from a NIfTI dimensions array,
|
|
208
|
+
returning the number of points along each spatial and temporal dimension.
|
|
131
209
|
|
|
132
210
|
Parameters
|
|
133
211
|
----------
|
|
134
|
-
thedims : int
|
|
135
|
-
The
|
|
212
|
+
thedims : numpy.ndarray of int
|
|
213
|
+
The NIfTI dimensions structure, where:
|
|
214
|
+
- thedims[0] contains the data type
|
|
215
|
+
- thedims[1] contains the number of points along x-axis (nx)
|
|
216
|
+
- thedims[2] contains the number of points along y-axis (ny)
|
|
217
|
+
- thedims[3] contains the number of points along z-axis (nz)
|
|
218
|
+
- thedims[4] contains the number of points along t-axis (nt)
|
|
136
219
|
|
|
137
220
|
Returns
|
|
138
221
|
-------
|
|
139
|
-
nx
|
|
140
|
-
Number of points along
|
|
222
|
+
nx : int
|
|
223
|
+
Number of points along the x-axis
|
|
224
|
+
ny : int
|
|
225
|
+
Number of points along the y-axis
|
|
226
|
+
nz : int
|
|
227
|
+
Number of points along the z-axis
|
|
228
|
+
nt : int
|
|
229
|
+
Number of points along the t-axis (time)
|
|
230
|
+
|
|
231
|
+
Notes
|
|
232
|
+
-----
|
|
233
|
+
The input array is expected to be a NIfTI dimensions array with at least 5 elements.
|
|
234
|
+
This function assumes the standard NIfTI dimension ordering where dimensions 1-4
|
|
235
|
+
correspond to spatial x, y, z, and temporal t dimensions respectively.
|
|
236
|
+
|
|
237
|
+
Examples
|
|
238
|
+
--------
|
|
239
|
+
>>> import numpy as np
|
|
240
|
+
>>> dims = np.array([0, 64, 64, 32, 100, 1, 1, 1])
|
|
241
|
+
>>> nx, ny, nz, nt = parseniftidims(dims)
|
|
242
|
+
>>> print(f"Dimensions: {nx} x {ny} x {nz} x {nt}")
|
|
243
|
+
Dimensions: 64 x 64 x 32 x 100
|
|
141
244
|
"""
|
|
142
245
|
return int(thedims[1]), int(thedims[2]), int(thedims[3]), int(thedims[4])
|
|
143
246
|
|
|
144
247
|
|
|
145
248
|
# sizes are the mapping between voxels and physical coordinates
|
|
146
|
-
def parseniftisizes(thesizes):
|
|
147
|
-
|
|
249
|
+
def parseniftisizes(thesizes: np.ndarray) -> Tuple[float, float, float, float]:
|
|
250
|
+
"""
|
|
251
|
+
Split the size array into individual elements
|
|
252
|
+
|
|
253
|
+
This function extracts voxel size information from a NIfTI header structure
|
|
254
|
+
and returns the scaling factors for spatial dimensions (x, y, z) and time (t).
|
|
148
255
|
|
|
149
256
|
Parameters
|
|
150
257
|
----------
|
|
151
|
-
thesizes : float
|
|
152
|
-
The
|
|
258
|
+
thesizes : np.ndarray of float
|
|
259
|
+
The NIfTI voxel size structure containing scaling information.
|
|
260
|
+
Expected to be an array where indices 1-4 correspond to
|
|
261
|
+
x, y, z, and t scaling factors respectively.
|
|
153
262
|
|
|
154
263
|
Returns
|
|
155
264
|
-------
|
|
156
|
-
dimx
|
|
157
|
-
Scaling from voxel number to physical coordinates
|
|
265
|
+
dimx : float
|
|
266
|
+
Scaling factor from voxel number to physical coordinates in x dimension
|
|
267
|
+
dimy : float
|
|
268
|
+
Scaling factor from voxel number to physical coordinates in y dimension
|
|
269
|
+
dimz : float
|
|
270
|
+
Scaling factor from voxel number to physical coordinates in z dimension
|
|
271
|
+
dimt : float
|
|
272
|
+
Scaling factor from voxel number to physical coordinates in t dimension
|
|
273
|
+
|
|
274
|
+
Notes
|
|
275
|
+
-----
|
|
276
|
+
The function assumes the input array follows the NIfTI standard where:
|
|
277
|
+
- Index 0: unused or padding
|
|
278
|
+
- Index 1: x-dimension scaling
|
|
279
|
+
- Index 2: y-dimension scaling
|
|
280
|
+
- Index 3: z-dimension scaling
|
|
281
|
+
- Index 4: t-dimension scaling
|
|
282
|
+
|
|
283
|
+
Examples
|
|
284
|
+
--------
|
|
285
|
+
>>> import numpy as np
|
|
286
|
+
>>> sizes = np.array([0.0, 2.0, 2.0, 2.0, 1.0])
|
|
287
|
+
>>> x, y, z, t = parseniftisizes(sizes)
|
|
288
|
+
>>> print(x, y, z, t)
|
|
289
|
+
2.0 2.0 2.0 1.0
|
|
158
290
|
"""
|
|
159
291
|
return thesizes[1], thesizes[2], thesizes[3], thesizes[4]
|
|
160
292
|
|
|
161
293
|
|
|
162
|
-
def dumparraytonifti(thearray, filename):
|
|
294
|
+
def dumparraytonifti(thearray: np.ndarray, filename: str) -> None:
|
|
295
|
+
"""
|
|
296
|
+
Save a numpy array to a NIFTI file with an identity affine transform.
|
|
297
|
+
|
|
298
|
+
This function saves a numpy array to a NIFTI file format with an identity
|
|
299
|
+
affine transformation matrix. The resulting NIFTI file will have unit
|
|
300
|
+
spacing and no rotation or translation.
|
|
301
|
+
|
|
302
|
+
Parameters
|
|
303
|
+
----------
|
|
304
|
+
thearray : numpy.ndarray
|
|
305
|
+
The data array to save. Can be 2D, 3D, or 4D array representing
|
|
306
|
+
medical imaging data or other volumetric data.
|
|
307
|
+
filename : str
|
|
308
|
+
The output filename (without extension). The function will append
|
|
309
|
+
'.nii' or '.nii.gz' extension based on the nibabel library's
|
|
310
|
+
default behavior.
|
|
311
|
+
|
|
312
|
+
Returns
|
|
313
|
+
-------
|
|
314
|
+
None
|
|
315
|
+
This function does not return any value. It saves the array to disk
|
|
316
|
+
as a NIFTI file.
|
|
317
|
+
|
|
318
|
+
Notes
|
|
319
|
+
-----
|
|
320
|
+
- The function uses an identity affine matrix with dimensions 4x4
|
|
321
|
+
- The affine matrix represents unit spacing with no rotation or translation
|
|
322
|
+
- This is useful for simple data storage without spatial information
|
|
323
|
+
- The function relies on the `savetonifti` helper function for the actual
|
|
324
|
+
NIFTI file writing operation
|
|
325
|
+
|
|
326
|
+
Examples
|
|
327
|
+
--------
|
|
328
|
+
>>> import numpy as np
|
|
329
|
+
>>> data = np.random.rand(64, 64, 64)
|
|
330
|
+
>>> dumparraytonifti(data, 'my_data')
|
|
331
|
+
>>> # Creates 'my_data.nii' file with identity affine transform
|
|
332
|
+
"""
|
|
163
333
|
outputaffine = np.zeros((4, 4), dtype=float)
|
|
164
334
|
for i in range(4):
|
|
165
335
|
outputaffine[i, i] = 1.0
|
|
@@ -168,8 +338,9 @@ def dumparraytonifti(thearray, filename):
|
|
|
168
338
|
savetonifti(thearray, outputheader, filename)
|
|
169
339
|
|
|
170
340
|
|
|
171
|
-
def savetonifti(thearray, theheader, thename, debug=False):
|
|
172
|
-
|
|
341
|
+
def savetonifti(thearray: np.ndarray, theheader: Any, thename: str, debug: bool = False) -> None:
|
|
342
|
+
"""
|
|
343
|
+
Save a data array out to a nifti file
|
|
173
344
|
|
|
174
345
|
Parameters
|
|
175
346
|
----------
|
|
@@ -179,10 +350,12 @@ def savetonifti(thearray, theheader, thename, debug=False):
|
|
|
179
350
|
A valid nifti header
|
|
180
351
|
thename : str
|
|
181
352
|
The name of the nifti file to save
|
|
353
|
+
debug : bool, optional
|
|
354
|
+
Enable debug output. Default is False
|
|
182
355
|
|
|
183
356
|
Returns
|
|
184
357
|
-------
|
|
185
|
-
|
|
358
|
+
None
|
|
186
359
|
"""
|
|
187
360
|
outputaffine = theheader.get_best_affine()
|
|
188
361
|
qaffine, qcode = theheader.get_qform(coded=True)
|
|
@@ -253,19 +426,142 @@ def savetonifti(thearray, theheader, thename, debug=False):
|
|
|
253
426
|
output_nifti = None
|
|
254
427
|
|
|
255
428
|
|
|
256
|
-
def niftifromarray(data):
|
|
429
|
+
def niftifromarray(data: np.ndarray) -> Any:
|
|
430
|
+
"""
|
|
431
|
+
Create a NIFTI image object from a numpy array with identity affine.
|
|
432
|
+
|
|
433
|
+
This function converts a numpy array into a NIFTI image object using an identity
|
|
434
|
+
affine transformation matrix. The resulting image has no spatial transformation
|
|
435
|
+
applied, meaning the voxel coordinates directly correspond to the array indices.
|
|
436
|
+
|
|
437
|
+
Parameters
|
|
438
|
+
----------
|
|
439
|
+
data : numpy.ndarray
|
|
440
|
+
The data array to convert to NIFTI format. Can be 2D, 3D, or 4D array
|
|
441
|
+
representing image data with arbitrary data types.
|
|
442
|
+
|
|
443
|
+
Returns
|
|
444
|
+
-------
|
|
445
|
+
nibabel.Nifti1Image
|
|
446
|
+
The NIFTI image object with identity affine matrix. The returned object
|
|
447
|
+
can be saved to disk using nibabel's save functionality.
|
|
448
|
+
|
|
449
|
+
Notes
|
|
450
|
+
-----
|
|
451
|
+
- The affine matrix is set to identity (4x4), which means no spatial
|
|
452
|
+
transformation is applied
|
|
453
|
+
- This function is useful for creating NIFTI images from processed data
|
|
454
|
+
that doesn't require spatial registration
|
|
455
|
+
- The data array is copied into the NIFTI image object
|
|
456
|
+
|
|
457
|
+
Examples
|
|
458
|
+
--------
|
|
459
|
+
>>> import numpy as np
|
|
460
|
+
>>> data = np.random.rand(64, 64, 32)
|
|
461
|
+
>>> img = niftifromarray(data)
|
|
462
|
+
>>> print(img.shape)
|
|
463
|
+
(64, 64, 32)
|
|
464
|
+
>>> print(img.affine)
|
|
465
|
+
[[1. 0. 0. 0.]
|
|
466
|
+
[0. 1. 0. 0.]
|
|
467
|
+
[0. 0. 1. 0.]
|
|
468
|
+
[0. 0. 0. 1.]]
|
|
469
|
+
"""
|
|
257
470
|
return nib.Nifti1Image(data, affine=np.eye(4))
|
|
258
471
|
|
|
259
472
|
|
|
260
|
-
def niftihdrfromarray(data):
|
|
473
|
+
def niftihdrfromarray(data: np.ndarray) -> Any:
|
|
474
|
+
"""
|
|
475
|
+
Create a NIFTI header from a numpy array with identity affine.
|
|
476
|
+
|
|
477
|
+
This function creates a NIFTI header object from a numpy array by constructing
|
|
478
|
+
a minimal NIFTI image with an identity affine matrix and extracting its header.
|
|
479
|
+
The resulting header contains basic NIFTI metadata but no spatial transformation
|
|
480
|
+
information beyond the identity matrix.
|
|
481
|
+
|
|
482
|
+
Parameters
|
|
483
|
+
----------
|
|
484
|
+
data : numpy.ndarray
|
|
485
|
+
The data array to create a header for. The array can be of any shape and
|
|
486
|
+
data type, but should typically represent medical imaging data.
|
|
487
|
+
|
|
488
|
+
Returns
|
|
489
|
+
-------
|
|
490
|
+
nibabel.Nifti1Header
|
|
491
|
+
The NIFTI header object containing metadata for the input data array.
|
|
492
|
+
|
|
493
|
+
Notes
|
|
494
|
+
-----
|
|
495
|
+
The returned header is a copy of the header from a NIFTI image with identity
|
|
496
|
+
affine matrix. This is useful for creating NIFTI headers without requiring
|
|
497
|
+
full NIFTI image files or spatial transformation information.
|
|
498
|
+
|
|
499
|
+
Examples
|
|
500
|
+
--------
|
|
501
|
+
>>> import numpy as np
|
|
502
|
+
>>> data = np.random.rand(64, 64, 64)
|
|
503
|
+
>>> header = niftihdrfromarray(data)
|
|
504
|
+
>>> print(header)
|
|
505
|
+
<nibabel.nifti1.Nifti1Header object at 0x...>
|
|
506
|
+
"""
|
|
261
507
|
return nib.Nifti1Image(data, affine=np.eye(4)).header.copy()
|
|
262
508
|
|
|
263
509
|
|
|
264
510
|
def makedestarray(
|
|
265
|
-
destshape,
|
|
266
|
-
filetype="nifti",
|
|
267
|
-
rt_floattype="float64",
|
|
268
|
-
):
|
|
511
|
+
destshape: Union[Tuple, np.ndarray],
|
|
512
|
+
filetype: str = "nifti",
|
|
513
|
+
rt_floattype: str = "float64",
|
|
514
|
+
) -> Tuple[np.ndarray, int]:
|
|
515
|
+
"""
|
|
516
|
+
Create a destination array for output data based on file type and shape.
|
|
517
|
+
|
|
518
|
+
Parameters
|
|
519
|
+
----------
|
|
520
|
+
destshape : tuple or numpy array
|
|
521
|
+
Shape specification for the output array. For 'nifti' files, this is expected
|
|
522
|
+
to be a 3D or 4D shape; for 'cifti', it is expected to be a 2D or 3D shape
|
|
523
|
+
where the last dimension corresponds to spatial data and the second-to-last
|
|
524
|
+
to time; for 'text', it is expected to be a 1D or 2D shape.
|
|
525
|
+
filetype : str, optional
|
|
526
|
+
Type of output file. Must be one of 'nifti', 'cifti', or 'text'. Default is 'nifti'.
|
|
527
|
+
rt_floattype : str, optional
|
|
528
|
+
Data type for the output array. Default is 'float64'.
|
|
529
|
+
|
|
530
|
+
Returns
|
|
531
|
+
-------
|
|
532
|
+
outmaparray : numpy array
|
|
533
|
+
Pre-allocated output array with appropriate shape and dtype. The shape depends
|
|
534
|
+
on the `filetype` and `destshape`:
|
|
535
|
+
- For 'nifti': 1D array if 3D input, 2D array if 4D input.
|
|
536
|
+
- For 'cifti': 1D or 2D array depending on time dimension.
|
|
537
|
+
- For 'text': 1D or 2D array depending on time dimension.
|
|
538
|
+
internalspaceshape : int
|
|
539
|
+
The flattened spatial dimension size used to determine the shape of the output array.
|
|
540
|
+
|
|
541
|
+
Notes
|
|
542
|
+
-----
|
|
543
|
+
This function handles different file types by interpreting the input `destshape`
|
|
544
|
+
differently:
|
|
545
|
+
- For 'nifti', the spatial dimensions are multiplied together to form the
|
|
546
|
+
`internalspaceshape`, and the time dimension is inferred from the fourth
|
|
547
|
+
axis if present.
|
|
548
|
+
- For 'cifti', the last dimension is treated as spatial, and the second-to-last
|
|
549
|
+
as temporal if it exceeds 1.
|
|
550
|
+
- For 'text', the first dimension is treated as spatial, and the second as time.
|
|
551
|
+
|
|
552
|
+
Examples
|
|
553
|
+
--------
|
|
554
|
+
>>> import numpy as np
|
|
555
|
+
>>> from typing import Tuple, Union
|
|
556
|
+
>>> makedestarray((64, 64, 32), filetype="nifti")
|
|
557
|
+
(array([0., 0., ..., 0.]), 2097152)
|
|
558
|
+
|
|
559
|
+
>>> makedestarray((100, 50), filetype="text")
|
|
560
|
+
(array([0., 0., ..., 0.]), 100)
|
|
561
|
+
|
|
562
|
+
>>> makedestarray((100, 50, 20), filetype="cifti")
|
|
563
|
+
(array([[0., 0., ..., 0.], ..., [0., 0., ..., 0.]]), 20)
|
|
564
|
+
"""
|
|
269
565
|
if filetype == "text":
|
|
270
566
|
try:
|
|
271
567
|
internalspaceshape = destshape[0]
|
|
@@ -295,12 +591,64 @@ def makedestarray(
|
|
|
295
591
|
|
|
296
592
|
|
|
297
593
|
def populatemap(
|
|
298
|
-
themap,
|
|
299
|
-
internalspaceshape,
|
|
300
|
-
validvoxels,
|
|
301
|
-
outmaparray,
|
|
302
|
-
debug=False,
|
|
303
|
-
):
|
|
594
|
+
themap: np.ndarray,
|
|
595
|
+
internalspaceshape: int,
|
|
596
|
+
validvoxels: Optional[np.ndarray],
|
|
597
|
+
outmaparray: np.ndarray,
|
|
598
|
+
debug: bool = False,
|
|
599
|
+
) -> np.ndarray:
|
|
600
|
+
"""
|
|
601
|
+
Populate an output array with data from a map, handling valid voxel masking.
|
|
602
|
+
|
|
603
|
+
This function populates an output array with data from a source map, optionally
|
|
604
|
+
masking invalid voxels. It supports both 1D and 2D output arrays.
|
|
605
|
+
|
|
606
|
+
Parameters
|
|
607
|
+
----------
|
|
608
|
+
themap : numpy.ndarray
|
|
609
|
+
The source data to populate into the output array. Shape is either
|
|
610
|
+
``(internalspaceshape,)`` for 1D or ``(internalspaceshape, N)`` for 2D.
|
|
611
|
+
internalspaceshape : int
|
|
612
|
+
The total spatial dimension size, used to determine the expected shape
|
|
613
|
+
of the input map and the output array.
|
|
614
|
+
validvoxels : numpy.ndarray or None
|
|
615
|
+
Indices of valid voxels to populate. If None, all voxels are populated.
|
|
616
|
+
Shape should be ``(M,)`` where M is the number of valid voxels.
|
|
617
|
+
outmaparray : numpy.ndarray
|
|
618
|
+
The destination array to populate. Shape should be either ``(internalspaceshape,)``
|
|
619
|
+
for 1D or ``(internalspaceshape, N)`` for 2D.
|
|
620
|
+
debug : bool, optional
|
|
621
|
+
Enable debug output. Default is False.
|
|
622
|
+
|
|
623
|
+
Returns
|
|
624
|
+
-------
|
|
625
|
+
numpy.ndarray
|
|
626
|
+
The populated output array with the same shape as `outmaparray`.
|
|
627
|
+
|
|
628
|
+
Notes
|
|
629
|
+
-----
|
|
630
|
+
- If `validvoxels` is provided, only the specified voxels are updated.
|
|
631
|
+
- The function modifies `outmaparray` in-place and returns it.
|
|
632
|
+
- For 2D arrays, the second dimension is preserved in the output.
|
|
633
|
+
|
|
634
|
+
Examples
|
|
635
|
+
--------
|
|
636
|
+
>>> import numpy as np
|
|
637
|
+
>>> themap = np.array([1, 2, 3, 4])
|
|
638
|
+
>>> outmaparray = np.zeros(4)
|
|
639
|
+
>>> validvoxels = np.array([0, 2])
|
|
640
|
+
>>> result = populatemap(themap, 4, validvoxels, outmaparray)
|
|
641
|
+
>>> print(result)
|
|
642
|
+
[1. 0. 3. 0.]
|
|
643
|
+
|
|
644
|
+
>>> outmaparray = np.zeros((4, 2))
|
|
645
|
+
>>> result = populatemap(themap.reshape((4, 1)), 4, None, outmaparray)
|
|
646
|
+
>>> print(result)
|
|
647
|
+
[[1.]
|
|
648
|
+
[2.]
|
|
649
|
+
[3.]
|
|
650
|
+
[4.]]
|
|
651
|
+
"""
|
|
304
652
|
if len(outmaparray.shape) == 1:
|
|
305
653
|
outmaparray[:] = 0.0
|
|
306
654
|
if validvoxels is not None:
|
|
@@ -321,18 +669,82 @@ def populatemap(
|
|
|
321
669
|
|
|
322
670
|
|
|
323
671
|
def savemaplist(
|
|
324
|
-
outputname,
|
|
325
|
-
maplist,
|
|
326
|
-
validvoxels,
|
|
327
|
-
destshape,
|
|
328
|
-
theheader,
|
|
329
|
-
bidsbasedict,
|
|
330
|
-
filetype="nifti",
|
|
331
|
-
rt_floattype="float64",
|
|
332
|
-
cifti_hdr=None,
|
|
333
|
-
savejson=True,
|
|
334
|
-
debug=False,
|
|
335
|
-
):
|
|
672
|
+
outputname: str,
|
|
673
|
+
maplist: List[Tuple],
|
|
674
|
+
validvoxels: Optional[np.ndarray],
|
|
675
|
+
destshape: Union[Tuple, np.ndarray],
|
|
676
|
+
theheader: Any,
|
|
677
|
+
bidsbasedict: Dict[str, Any],
|
|
678
|
+
filetype: str = "nifti",
|
|
679
|
+
rt_floattype: str = "float64",
|
|
680
|
+
cifti_hdr: Optional[Any] = None,
|
|
681
|
+
savejson: bool = True,
|
|
682
|
+
debug: bool = False,
|
|
683
|
+
) -> None:
|
|
684
|
+
"""
|
|
685
|
+
Save a list of data maps to files with appropriate BIDS metadata.
|
|
686
|
+
|
|
687
|
+
This function saves a list of data maps to output files (NIfTI, CIFTI, or text)
|
|
688
|
+
using the specified file type and includes BIDS-compliant metadata in JSON sidecars.
|
|
689
|
+
It supports mapping data into a destination array, handling valid voxels, and
|
|
690
|
+
writing out the final files with appropriate naming and headers.
|
|
691
|
+
|
|
692
|
+
Parameters
|
|
693
|
+
----------
|
|
694
|
+
outputname : str
|
|
695
|
+
Base name for output files (without extension).
|
|
696
|
+
maplist : list of tuples
|
|
697
|
+
List of (data, suffix, maptype, unit, description) tuples to save.
|
|
698
|
+
Each tuple corresponds to one map to be saved.
|
|
699
|
+
validvoxels : numpy array or None
|
|
700
|
+
Indices of valid voxels in the data. If None, all voxels are considered valid.
|
|
701
|
+
destshape : tuple or numpy array
|
|
702
|
+
Shape of the destination array into which data will be mapped.
|
|
703
|
+
theheader : nifti/cifti header
|
|
704
|
+
Header object for the output files (NIfTI or CIFTI).
|
|
705
|
+
bidsbasedict : dict
|
|
706
|
+
Base BIDS metadata to include in JSON sidecars.
|
|
707
|
+
filetype : str, optional
|
|
708
|
+
Output file type ('nifti', 'cifti', or 'text'). Default is 'nifti'.
|
|
709
|
+
rt_floattype : str, optional
|
|
710
|
+
Data type for output arrays. Default is 'float64'.
|
|
711
|
+
cifti_hdr : cifti header or None, optional
|
|
712
|
+
CIFTI header if filetype is 'cifti'. Default is None.
|
|
713
|
+
savejson : bool, optional
|
|
714
|
+
Whether to save JSON sidecar files. Default is True.
|
|
715
|
+
debug : bool, optional
|
|
716
|
+
Enable debug output. Default is False.
|
|
717
|
+
|
|
718
|
+
Returns
|
|
719
|
+
-------
|
|
720
|
+
None
|
|
721
|
+
This function does not return any value; it writes files to disk.
|
|
722
|
+
|
|
723
|
+
Notes
|
|
724
|
+
-----
|
|
725
|
+
- For CIFTI files, if the data is a series (multi-dimensional), it is saved with
|
|
726
|
+
the provided names; otherwise, it uses temporal offset and step information.
|
|
727
|
+
- The function uses `makedestarray` to prepare the output array and `populatemap`
|
|
728
|
+
to copy data into the array based on valid voxels.
|
|
729
|
+
- If `savejson` is True, a JSON file is created for each map with metadata
|
|
730
|
+
including unit and description.
|
|
731
|
+
|
|
732
|
+
Examples
|
|
733
|
+
--------
|
|
734
|
+
>>> savemaplist(
|
|
735
|
+
... outputname="sub-01_task-rest",
|
|
736
|
+
... maplist=[
|
|
737
|
+
... (data1, "stat", "stat", "z", "Statistical map"),
|
|
738
|
+
... (data2, "mask", "mask", None, "Binary mask"),
|
|
739
|
+
... ],
|
|
740
|
+
... validvoxels=valid_indices,
|
|
741
|
+
... destshape=(100, 100, 100),
|
|
742
|
+
... theheader=nifti_header,
|
|
743
|
+
... bidsbasedict={"Dataset": "MyDataset"},
|
|
744
|
+
... filetype="nifti",
|
|
745
|
+
... savejson=True,
|
|
746
|
+
... )
|
|
747
|
+
"""
|
|
336
748
|
outmaparray, internalspaceshape = makedestarray(
|
|
337
749
|
destshape,
|
|
338
750
|
filetype=filetype,
|
|
@@ -396,40 +808,66 @@ def savemaplist(
|
|
|
396
808
|
|
|
397
809
|
|
|
398
810
|
def savetocifti(
|
|
399
|
-
thearray,
|
|
400
|
-
theciftiheader,
|
|
401
|
-
theniftiheader,
|
|
402
|
-
thename,
|
|
403
|
-
isseries=False,
|
|
404
|
-
names=["placeholder"],
|
|
405
|
-
start=0.0,
|
|
406
|
-
step=1.0,
|
|
407
|
-
debug=False,
|
|
408
|
-
):
|
|
409
|
-
|
|
811
|
+
thearray: np.ndarray,
|
|
812
|
+
theciftiheader: Any,
|
|
813
|
+
theniftiheader: Any,
|
|
814
|
+
thename: str,
|
|
815
|
+
isseries: bool = False,
|
|
816
|
+
names: List[str] = ["placeholder"],
|
|
817
|
+
start: float = 0.0,
|
|
818
|
+
step: float = 1.0,
|
|
819
|
+
debug: bool = False,
|
|
820
|
+
) -> None:
|
|
821
|
+
"""
|
|
822
|
+
Save a data array out to a CIFTI file.
|
|
823
|
+
|
|
824
|
+
This function saves a given data array to a CIFTI file (either dense or parcellated,
|
|
825
|
+
scalar or series) based on the provided headers and parameters.
|
|
410
826
|
|
|
411
827
|
Parameters
|
|
412
828
|
----------
|
|
413
829
|
thearray : array-like
|
|
414
|
-
The data array to
|
|
830
|
+
The data array to be saved. The shape is expected to be (n_timepoints, n_vertices)
|
|
831
|
+
or (n_vertices,) for scalar data.
|
|
415
832
|
theciftiheader : cifti header
|
|
416
|
-
A valid
|
|
833
|
+
A valid CIFTI header object containing axis information, including BrainModelAxis
|
|
834
|
+
or ParcelsAxis.
|
|
417
835
|
theniftiheader : nifti header
|
|
418
|
-
A valid
|
|
836
|
+
A valid NIfTI header object to be used for setting the intent of the output file.
|
|
419
837
|
thename : str
|
|
420
|
-
The name of the
|
|
421
|
-
isseries: bool
|
|
422
|
-
True
|
|
423
|
-
|
|
424
|
-
|
|
425
|
-
|
|
426
|
-
|
|
427
|
-
|
|
428
|
-
|
|
838
|
+
The base name of the output CIFTI file (without extension).
|
|
839
|
+
isseries : bool, optional
|
|
840
|
+
If True, the output will be a time series file (dtseries or ptseries).
|
|
841
|
+
If False, it will be a scalar file (dscalar or pscalar). Default is False.
|
|
842
|
+
names : list of str, optional
|
|
843
|
+
Names for scalar maps when `isseries` is False. Default is ['placeholder'].
|
|
844
|
+
start : float, optional
|
|
845
|
+
Start time in seconds for the time series. Default is 0.0.
|
|
846
|
+
step : float, optional
|
|
847
|
+
Time step in seconds for the time series. Default is 1.0.
|
|
848
|
+
debug : bool, optional
|
|
849
|
+
If True, print debugging information. Default is False.
|
|
429
850
|
|
|
430
851
|
Returns
|
|
431
852
|
-------
|
|
432
|
-
|
|
853
|
+
None
|
|
854
|
+
This function does not return anything; it saves the file to disk.
|
|
855
|
+
|
|
856
|
+
Notes
|
|
857
|
+
-----
|
|
858
|
+
The function automatically detects whether the input CIFTI header contains a
|
|
859
|
+
BrainModelAxis or a ParcelsAxis and builds the appropriate output structure.
|
|
860
|
+
The correct CIFTI file extension (e.g., .dtseries.nii, .dscalar.nii) is appended
|
|
861
|
+
to the output filename based on the `isseries` and parcellation flags.
|
|
862
|
+
|
|
863
|
+
Examples
|
|
864
|
+
--------
|
|
865
|
+
>>> import numpy as np
|
|
866
|
+
>>> import nibabel as nib
|
|
867
|
+
>>> data = np.random.rand(100, 50)
|
|
868
|
+
>>> cifti_header = nib.load('input.cifti').header
|
|
869
|
+
>>> nifti_header = nib.load('input.nii').header
|
|
870
|
+
>>> savetocifti(data, cifti_header, nifti_header, 'output', isseries=True)
|
|
433
871
|
"""
|
|
434
872
|
if debug:
|
|
435
873
|
print("savetocifti:", thename)
|
|
@@ -525,19 +963,38 @@ def savetocifti(
|
|
|
525
963
|
nib.cifti2.save(img, thename + suffix)
|
|
526
964
|
|
|
527
965
|
|
|
528
|
-
def checkifnifti(filename):
|
|
529
|
-
|
|
966
|
+
def checkifnifti(filename: str) -> bool:
|
|
967
|
+
"""
|
|
968
|
+
Check to see if a file name is a valid nifti name.
|
|
969
|
+
|
|
970
|
+
This function determines whether a given filename has a valid NIfTI file extension.
|
|
971
|
+
NIfTI files typically have extensions ".nii" or ".nii.gz" for compressed files.
|
|
530
972
|
|
|
531
973
|
Parameters
|
|
532
974
|
----------
|
|
533
975
|
filename : str
|
|
534
|
-
The file name
|
|
976
|
+
The file name to check for valid NIfTI extension.
|
|
535
977
|
|
|
536
978
|
Returns
|
|
537
979
|
-------
|
|
538
|
-
|
|
539
|
-
True if
|
|
540
|
-
|
|
980
|
+
bool
|
|
981
|
+
True if the filename ends with ".nii" or ".nii.gz", False otherwise.
|
|
982
|
+
|
|
983
|
+
Notes
|
|
984
|
+
-----
|
|
985
|
+
This function only checks the file extension and does not verify if the file actually exists
|
|
986
|
+
or contains valid NIfTI data. It performs a simple string matching operation.
|
|
987
|
+
|
|
988
|
+
Examples
|
|
989
|
+
--------
|
|
990
|
+
>>> checkifnifti("image.nii")
|
|
991
|
+
True
|
|
992
|
+
>>> checkifnifti("data.nii.gz")
|
|
993
|
+
True
|
|
994
|
+
>>> checkifnifti("scan.json")
|
|
995
|
+
False
|
|
996
|
+
>>> checkifnifti("volume.nii.gz")
|
|
997
|
+
True
|
|
541
998
|
"""
|
|
542
999
|
if filename.endswith(".nii") or filename.endswith(".nii.gz"):
|
|
543
1000
|
return True
|
|
@@ -545,22 +1002,44 @@ def checkifnifti(filename):
|
|
|
545
1002
|
return False
|
|
546
1003
|
|
|
547
1004
|
|
|
548
|
-
def niftisplitext(filename):
|
|
549
|
-
|
|
1005
|
+
def niftisplitext(filename: str) -> Tuple[str, str]:
|
|
1006
|
+
"""
|
|
1007
|
+
Split nifti filename into name base and extension.
|
|
1008
|
+
|
|
1009
|
+
This function splits a NIfTI filename into its base name and extension components.
|
|
1010
|
+
It handles NIfTI files that may have double extensions (e.g., '.nii.gz') by properly
|
|
1011
|
+
combining the extensions.
|
|
550
1012
|
|
|
551
1013
|
Parameters
|
|
552
1014
|
----------
|
|
553
1015
|
filename : str
|
|
554
|
-
The file name
|
|
1016
|
+
The NIfTI file name to split, which may contain double extensions like '.nii.gz'
|
|
555
1017
|
|
|
556
1018
|
Returns
|
|
557
1019
|
-------
|
|
558
|
-
|
|
559
|
-
|
|
560
|
-
|
|
561
|
-
|
|
562
|
-
|
|
563
|
-
|
|
1020
|
+
tuple[str, str]
|
|
1021
|
+
A tuple containing:
|
|
1022
|
+
- name : str
|
|
1023
|
+
Base name of the NIfTI file (without extension)
|
|
1024
|
+
- ext : str
|
|
1025
|
+
Extension of the NIfTI file (including any additional extensions)
|
|
1026
|
+
|
|
1027
|
+
Notes
|
|
1028
|
+
-----
|
|
1029
|
+
This function is specifically designed for NIfTI files which commonly have
|
|
1030
|
+
double extensions (e.g., '.nii.gz', '.nii.bz2'). It properly handles these
|
|
1031
|
+
cases by combining the two extension components.
|
|
1032
|
+
|
|
1033
|
+
Examples
|
|
1034
|
+
--------
|
|
1035
|
+
>>> niftisplitext('image.nii.gz')
|
|
1036
|
+
('image', '.nii.gz')
|
|
1037
|
+
|
|
1038
|
+
>>> niftisplitext('data.nii')
|
|
1039
|
+
('data', '.nii')
|
|
1040
|
+
|
|
1041
|
+
>>> niftisplitext('volume.nii.bz2')
|
|
1042
|
+
('volume', '.nii.bz2')
|
|
564
1043
|
"""
|
|
565
1044
|
firstsplit = os.path.splitext(filename)
|
|
566
1045
|
secondsplit = os.path.splitext(firstsplit[0])
|
|
@@ -570,7 +1049,47 @@ def niftisplitext(filename):
|
|
|
570
1049
|
return firstsplit[0], firstsplit[1]
|
|
571
1050
|
|
|
572
1051
|
|
|
573
|
-
def niftisplit(inputfile, outputroot, axis=3):
|
|
1052
|
+
def niftisplit(inputfile: str, outputroot: str, axis: int = 3) -> None:
|
|
1053
|
+
"""
|
|
1054
|
+
Split a NIFTI file along a specified axis into separate files.
|
|
1055
|
+
|
|
1056
|
+
This function splits a NIFTI image along a given axis into multiple
|
|
1057
|
+
individual NIFTI files, each corresponding to a slice along that axis.
|
|
1058
|
+
The output files are named using the provided root name with zero-padded
|
|
1059
|
+
slice indices.
|
|
1060
|
+
|
|
1061
|
+
Parameters
|
|
1062
|
+
----------
|
|
1063
|
+
inputfile : str
|
|
1064
|
+
Path to the input NIFTI file to be split.
|
|
1065
|
+
outputroot : str
|
|
1066
|
+
Base name for the output files. Each output file will be named
|
|
1067
|
+
``outputroot + str(i).zfill(4)`` where ``i`` is the slice index.
|
|
1068
|
+
axis : int, optional
|
|
1069
|
+
Axis along which to split the NIFTI file. Valid values are 0-4,
|
|
1070
|
+
corresponding to the dimensions of the NIFTI file. Default is 3,
|
|
1071
|
+
which corresponds to the time axis in 4D or 5D NIFTI files.
|
|
1072
|
+
|
|
1073
|
+
Returns
|
|
1074
|
+
-------
|
|
1075
|
+
None
|
|
1076
|
+
This function does not return any value. It writes the split slices
|
|
1077
|
+
as separate NIFTI files to disk.
|
|
1078
|
+
|
|
1079
|
+
Notes
|
|
1080
|
+
-----
|
|
1081
|
+
- The function supports both 4D and 5D NIFTI files.
|
|
1082
|
+
- The header information is preserved for each output slice, with the
|
|
1083
|
+
dimension along the split axis set to 1.
|
|
1084
|
+
- Slice indices in the output file names are zero-padded to four digits
|
|
1085
|
+
(e.g., ``0000``, ``0001``, etc.).
|
|
1086
|
+
|
|
1087
|
+
Examples
|
|
1088
|
+
--------
|
|
1089
|
+
>>> niftisplit('input.nii.gz', 'slice_', axis=2)
|
|
1090
|
+
Splits the input NIFTI file along the third axis (axis=2) and saves
|
|
1091
|
+
the resulting slices as ``slice_0000.nii.gz``, ``slice_0001.nii.gz``, etc.
|
|
1092
|
+
"""
|
|
574
1093
|
infile, infile_data, infile_hdr, infiledims, infilesizes = readfromnifti(inputfile)
|
|
575
1094
|
theheader = copy.deepcopy(infile_hdr)
|
|
576
1095
|
numpoints = infiledims[axis + 1]
|
|
@@ -604,7 +1123,60 @@ def niftisplit(inputfile, outputroot, axis=3):
|
|
|
604
1123
|
savetonifti(thisslice, theheader, outputroot + str(i).zfill(4))
|
|
605
1124
|
|
|
606
1125
|
|
|
607
|
-
def niftimerge(
|
|
1126
|
+
def niftimerge(
|
|
1127
|
+
inputlist: List[str],
|
|
1128
|
+
outputname: str,
|
|
1129
|
+
writetodisk: bool = True,
|
|
1130
|
+
axis: int = 3,
|
|
1131
|
+
returndata: bool = False,
|
|
1132
|
+
debug: bool = False,
|
|
1133
|
+
) -> Optional[Tuple[np.ndarray, Any]]:
|
|
1134
|
+
"""
|
|
1135
|
+
Merge multiple NIFTI files along a specified axis.
|
|
1136
|
+
|
|
1137
|
+
This function reads a list of NIFTI files, concatenates their data along a
|
|
1138
|
+
specified axis, and optionally writes the result to a new NIFTI file. It can
|
|
1139
|
+
also return the merged data and header for further processing.
|
|
1140
|
+
|
|
1141
|
+
Parameters
|
|
1142
|
+
----------
|
|
1143
|
+
inputlist : list of str
|
|
1144
|
+
List of input NIFTI file paths to merge.
|
|
1145
|
+
outputname : str
|
|
1146
|
+
Path for the merged output NIFTI file.
|
|
1147
|
+
writetodisk : bool, optional
|
|
1148
|
+
If True, write the merged data to disk. Default is True.
|
|
1149
|
+
axis : int, optional
|
|
1150
|
+
Axis along which to concatenate the data (0-4). Default is 3, which
|
|
1151
|
+
corresponds to the time axis. The dimension of the output along this
|
|
1152
|
+
axis will be the number of input files.
|
|
1153
|
+
returndata : bool, optional
|
|
1154
|
+
If True, return the merged data array and header. Default is False.
|
|
1155
|
+
debug : bool, optional
|
|
1156
|
+
If True, print debug information during execution. Default is False.
|
|
1157
|
+
|
|
1158
|
+
Returns
|
|
1159
|
+
-------
|
|
1160
|
+
tuple of (numpy.ndarray, Any) or None
|
|
1161
|
+
If `returndata` is True, returns a tuple of:
|
|
1162
|
+
- `output_data`: The merged NIFTI data as a numpy array.
|
|
1163
|
+
- `infile_hdr`: The header from the last input file.
|
|
1164
|
+
If `returndata` is False, returns None.
|
|
1165
|
+
|
|
1166
|
+
Notes
|
|
1167
|
+
-----
|
|
1168
|
+
- The function assumes all input files have compatible dimensions except
|
|
1169
|
+
along the concatenation axis.
|
|
1170
|
+
- If the input file has 3D dimensions, it is reshaped to 4D before concatenation.
|
|
1171
|
+
- The output NIFTI header is updated to reflect the new dimension along the
|
|
1172
|
+
concatenation axis.
|
|
1173
|
+
|
|
1174
|
+
Examples
|
|
1175
|
+
--------
|
|
1176
|
+
>>> input_files = ['file1.nii', 'file2.nii', 'file3.nii']
|
|
1177
|
+
>>> niftimerge(input_files, 'merged.nii', axis=3, writetodisk=True)
|
|
1178
|
+
>>> data, header = niftimerge(input_files, 'merged.nii', returndata=True)
|
|
1179
|
+
"""
|
|
608
1180
|
inputdata = []
|
|
609
1181
|
for thefile in inputlist:
|
|
610
1182
|
if debug:
|
|
@@ -625,7 +1197,40 @@ def niftimerge(inputlist, outputname, writetodisk=True, axis=3, returndata=False
|
|
|
625
1197
|
return output_data, infile_hdr
|
|
626
1198
|
|
|
627
1199
|
|
|
628
|
-
def niftiroi(inputfile, outputfile, startpt, numpoints):
|
|
1200
|
+
def niftiroi(inputfile: str, outputfile: str, startpt: int, numpoints: int) -> None:
|
|
1201
|
+
"""
|
|
1202
|
+
Extract a region of interest (ROI) from a NIFTI file along the time axis.
|
|
1203
|
+
|
|
1204
|
+
This function extracts a specified number of timepoints from a NIFTI file starting
|
|
1205
|
+
at a given timepoint index. The extracted data is saved to a new NIFTI file.
|
|
1206
|
+
|
|
1207
|
+
Parameters
|
|
1208
|
+
----------
|
|
1209
|
+
inputfile : str
|
|
1210
|
+
Path to the input NIFTI file
|
|
1211
|
+
outputfile : str
|
|
1212
|
+
Path for the output ROI file
|
|
1213
|
+
startpt : int
|
|
1214
|
+
Starting timepoint index (0-based)
|
|
1215
|
+
numpoints : int
|
|
1216
|
+
Number of timepoints to extract
|
|
1217
|
+
|
|
1218
|
+
Returns
|
|
1219
|
+
-------
|
|
1220
|
+
None
|
|
1221
|
+
This function does not return any value but saves the extracted ROI to the specified output file.
|
|
1222
|
+
|
|
1223
|
+
Notes
|
|
1224
|
+
-----
|
|
1225
|
+
The function handles both 4D and 5D NIFTI files. For 5D files, the function preserves
|
|
1226
|
+
the fifth dimension in the output. The time dimension is reduced according to the
|
|
1227
|
+
specified number of points.
|
|
1228
|
+
|
|
1229
|
+
Examples
|
|
1230
|
+
--------
|
|
1231
|
+
>>> niftiroi('input.nii', 'output.nii', 10, 50)
|
|
1232
|
+
Extracts timepoints 10-59 from input.nii and saves to output.nii
|
|
1233
|
+
"""
|
|
629
1234
|
print(inputfile, outputfile, startpt, numpoints)
|
|
630
1235
|
infile, infile_data, infile_hdr, infiledims, infilesizes = readfromnifti(inputfile)
|
|
631
1236
|
theheader = copy.deepcopy(infile_hdr)
|
|
@@ -637,19 +1242,41 @@ def niftiroi(inputfile, outputfile, startpt, numpoints):
|
|
|
637
1242
|
savetonifti(output_data, theheader, outputfile)
|
|
638
1243
|
|
|
639
1244
|
|
|
640
|
-
def checkifcifti(filename, debug=False):
|
|
641
|
-
|
|
1245
|
+
def checkifcifti(filename: str, debug: bool = False) -> bool:
|
|
1246
|
+
"""
|
|
1247
|
+
Check to see if the specified file is CIFTI format
|
|
1248
|
+
|
|
1249
|
+
This function determines whether a given neuroimaging file is in CIFTI (Connectivity Information Format)
|
|
1250
|
+
by examining the file's header information. CIFTI files have specific intent codes that distinguish them
|
|
1251
|
+
from other neuroimaging formats like NIFTI.
|
|
642
1252
|
|
|
643
1253
|
Parameters
|
|
644
1254
|
----------
|
|
645
1255
|
filename : str
|
|
646
|
-
The file
|
|
1256
|
+
The path to the file to be checked for CIFTI format
|
|
1257
|
+
debug : bool, optional
|
|
1258
|
+
Enable debug output to see intermediate processing information. Default is False
|
|
647
1259
|
|
|
648
1260
|
Returns
|
|
649
1261
|
-------
|
|
650
|
-
|
|
651
|
-
True if the file header indicates this is a CIFTI file
|
|
652
|
-
|
|
1262
|
+
bool
|
|
1263
|
+
True if the file header indicates this is a CIFTI file (intent code between 3000 and 3099),
|
|
1264
|
+
False otherwise
|
|
1265
|
+
|
|
1266
|
+
Notes
|
|
1267
|
+
-----
|
|
1268
|
+
CIFTI files are identified by their intent code, which should be in the range [3000, 3100) for valid
|
|
1269
|
+
CIFTI format files. This function uses nibabel to load the file and examine its NIfTI header properties.
|
|
1270
|
+
|
|
1271
|
+
Examples
|
|
1272
|
+
--------
|
|
1273
|
+
>>> is_cifti = checkifcifti('my_data.nii.gz')
|
|
1274
|
+
>>> print(is_cifti)
|
|
1275
|
+
True
|
|
1276
|
+
|
|
1277
|
+
>>> is_cifti = checkifcifti('my_data.nii.gz', debug=True)
|
|
1278
|
+
>>> print(is_cifti)
|
|
1279
|
+
True
|
|
653
1280
|
"""
|
|
654
1281
|
theimg = nib.load(filename)
|
|
655
1282
|
thedict = vars(theimg)
|
|
@@ -666,19 +1293,36 @@ def checkifcifti(filename, debug=False):
|
|
|
666
1293
|
return False
|
|
667
1294
|
|
|
668
1295
|
|
|
669
|
-
def checkiftext(filename):
|
|
670
|
-
|
|
1296
|
+
def checkiftext(filename: str) -> bool:
|
|
1297
|
+
"""
|
|
1298
|
+
Check to see if the specified filename ends in '.txt'
|
|
1299
|
+
|
|
1300
|
+
This function determines whether a given filename has a '.txt' extension
|
|
1301
|
+
by checking if the string ends with the specified suffix.
|
|
671
1302
|
|
|
672
1303
|
Parameters
|
|
673
1304
|
----------
|
|
674
1305
|
filename : str
|
|
675
|
-
The file name
|
|
1306
|
+
The file name to check for '.txt' extension
|
|
676
1307
|
|
|
677
1308
|
Returns
|
|
678
1309
|
-------
|
|
679
|
-
|
|
680
|
-
True if filename ends with '.txt'
|
|
681
|
-
|
|
1310
|
+
bool
|
|
1311
|
+
True if filename ends with '.txt', False otherwise
|
|
1312
|
+
|
|
1313
|
+
Notes
|
|
1314
|
+
-----
|
|
1315
|
+
This function performs a case-sensitive check. For case-insensitive
|
|
1316
|
+
checking, convert the filename to lowercase before calling this function.
|
|
1317
|
+
|
|
1318
|
+
Examples
|
|
1319
|
+
--------
|
|
1320
|
+
>>> checkiftext("document.txt")
|
|
1321
|
+
True
|
|
1322
|
+
>>> checkiftext("image.jpg")
|
|
1323
|
+
False
|
|
1324
|
+
>>> checkiftext("notes.TXT")
|
|
1325
|
+
False
|
|
682
1326
|
"""
|
|
683
1327
|
if filename.endswith(".txt"):
|
|
684
1328
|
return True
|
|
@@ -686,19 +1330,41 @@ def checkiftext(filename):
|
|
|
686
1330
|
return False
|
|
687
1331
|
|
|
688
1332
|
|
|
689
|
-
def getniftiroot(filename):
|
|
690
|
-
|
|
1333
|
+
def getniftiroot(filename: str) -> str:
|
|
1334
|
+
"""
|
|
1335
|
+
Strip a nifti filename down to the root with no extensions.
|
|
1336
|
+
|
|
1337
|
+
This function removes NIfTI file extensions (.nii or .nii.gz) from a filename,
|
|
1338
|
+
returning only the root name without any extensions.
|
|
691
1339
|
|
|
692
1340
|
Parameters
|
|
693
1341
|
----------
|
|
694
1342
|
filename : str
|
|
695
|
-
The
|
|
1343
|
+
The NIfTI filename to strip of extensions
|
|
696
1344
|
|
|
697
1345
|
Returns
|
|
698
1346
|
-------
|
|
699
|
-
|
|
700
|
-
The
|
|
1347
|
+
str
|
|
1348
|
+
The filename without NIfTI extensions (.nii or .nii.gz)
|
|
1349
|
+
|
|
1350
|
+
Notes
|
|
1351
|
+
-----
|
|
1352
|
+
This function only removes the standard NIfTI extensions (.nii and .nii.gz).
|
|
1353
|
+
For filenames without these extensions, the original filename is returned unchanged.
|
|
1354
|
+
|
|
1355
|
+
Examples
|
|
1356
|
+
--------
|
|
1357
|
+
>>> getniftiroot("sub-01_task-rest_bold.nii")
|
|
1358
|
+
'sub-01_task-rest_bold'
|
|
1359
|
+
|
|
1360
|
+
>>> getniftiroot("anatomical.nii.gz")
|
|
1361
|
+
'anatomical'
|
|
701
1362
|
|
|
1363
|
+
>>> getniftiroot("image.nii.gz")
|
|
1364
|
+
'image'
|
|
1365
|
+
|
|
1366
|
+
>>> getniftiroot("data.txt")
|
|
1367
|
+
'data.txt'
|
|
702
1368
|
"""
|
|
703
1369
|
if filename.endswith(".nii"):
|
|
704
1370
|
return filename[:-4]
|
|
@@ -708,21 +1374,39 @@ def getniftiroot(filename):
|
|
|
708
1374
|
return filename
|
|
709
1375
|
|
|
710
1376
|
|
|
711
|
-
def fmriheaderinfo(niftifilename):
|
|
712
|
-
|
|
1377
|
+
def fmriheaderinfo(niftifilename: str) -> Tuple[np.ndarray, np.ndarray]:
|
|
1378
|
+
"""
|
|
1379
|
+
Retrieve the header information from a nifti file.
|
|
1380
|
+
|
|
1381
|
+
This function extracts repetition time and timepoints information from a NIfTI file header.
|
|
1382
|
+
The repetition time is returned in seconds, and the number of timepoints is extracted
|
|
1383
|
+
from the header dimensions.
|
|
713
1384
|
|
|
714
1385
|
Parameters
|
|
715
1386
|
----------
|
|
716
1387
|
niftifilename : str
|
|
717
|
-
The name of the
|
|
1388
|
+
The name of the NIfTI file to read header information from.
|
|
718
1389
|
|
|
719
1390
|
Returns
|
|
720
1391
|
-------
|
|
721
|
-
|
|
722
|
-
|
|
723
|
-
|
|
724
|
-
|
|
725
|
-
|
|
1392
|
+
tuple of (np.ndarray, np.ndarray)
|
|
1393
|
+
A tuple containing:
|
|
1394
|
+
- tr : float
|
|
1395
|
+
The repetition time, in seconds
|
|
1396
|
+
- timepoints : int
|
|
1397
|
+
The number of points along the time axis
|
|
1398
|
+
|
|
1399
|
+
Notes
|
|
1400
|
+
-----
|
|
1401
|
+
The function uses nibabel to load the NIfTI file and extracts header information
|
|
1402
|
+
from the 'dim' and 'pixdim' fields. If the time unit is specified as milliseconds,
|
|
1403
|
+
the repetition time is converted to seconds.
|
|
1404
|
+
|
|
1405
|
+
Examples
|
|
1406
|
+
--------
|
|
1407
|
+
>>> tr, timepoints = fmriheaderinfo('subject_01.nii.gz')
|
|
1408
|
+
>>> print(f"Repetition time: {tr} seconds")
|
|
1409
|
+
>>> print(f"Number of timepoints: {timepoints}")
|
|
726
1410
|
"""
|
|
727
1411
|
nim = nib.load(niftifilename)
|
|
728
1412
|
hdr = nim.header.copy()
|
|
@@ -733,8 +1417,9 @@ def fmriheaderinfo(niftifilename):
|
|
|
733
1417
|
return thesizes, thedims
|
|
734
1418
|
|
|
735
1419
|
|
|
736
|
-
def fmritimeinfo(niftifilename):
|
|
737
|
-
|
|
1420
|
+
def fmritimeinfo(niftifilename: str) -> Tuple[float, int]:
|
|
1421
|
+
"""
|
|
1422
|
+
Retrieve the repetition time and number of timepoints from a nifti file
|
|
738
1423
|
|
|
739
1424
|
Parameters
|
|
740
1425
|
----------
|
|
@@ -748,6 +1433,18 @@ def fmritimeinfo(niftifilename):
|
|
|
748
1433
|
timepoints : int
|
|
749
1434
|
The number of points along the time axis
|
|
750
1435
|
|
|
1436
|
+
Notes
|
|
1437
|
+
-----
|
|
1438
|
+
This function extracts the repetition time (TR) and number of timepoints from
|
|
1439
|
+
the NIfTI file header. The repetition time is extracted from the pixdim[4] field
|
|
1440
|
+
and converted to seconds if necessary. The number of timepoints is extracted
|
|
1441
|
+
from the dim[4] field.
|
|
1442
|
+
|
|
1443
|
+
Examples
|
|
1444
|
+
--------
|
|
1445
|
+
>>> tr, timepoints = fmritimeinfo('sub-01_task-rest_bold.nii.gz')
|
|
1446
|
+
>>> print(f"Repetition time: {tr}s, Timepoints: {timepoints}")
|
|
1447
|
+
Repetition time: 2.0s, Timepoints: 240
|
|
751
1448
|
"""
|
|
752
1449
|
nim = nib.load(niftifilename)
|
|
753
1450
|
hdr = nim.header.copy()
|
|
@@ -761,8 +1458,9 @@ def fmritimeinfo(niftifilename):
|
|
|
761
1458
|
return tr, timepoints
|
|
762
1459
|
|
|
763
1460
|
|
|
764
|
-
def checkspacematch(hdr1, hdr2, tolerance=1.0e-3):
|
|
765
|
-
|
|
1461
|
+
def checkspacematch(hdr1: Any, hdr2: Any, tolerance: float = 1.0e-3) -> bool:
|
|
1462
|
+
"""
|
|
1463
|
+
Check the headers of two nifti files to determine if they cover the same volume at the same resolution (within tolerance)
|
|
766
1464
|
|
|
767
1465
|
Parameters
|
|
768
1466
|
----------
|
|
@@ -770,35 +1468,74 @@ def checkspacematch(hdr1, hdr2, tolerance=1.0e-3):
|
|
|
770
1468
|
The header of the first file
|
|
771
1469
|
hdr2 : nifti header structure
|
|
772
1470
|
The header of the second file
|
|
1471
|
+
tolerance : float, optional
|
|
1472
|
+
Tolerance for comparison. Default is 1.0e-3
|
|
773
1473
|
|
|
774
1474
|
Returns
|
|
775
1475
|
-------
|
|
776
|
-
|
|
1476
|
+
bool
|
|
777
1477
|
True if the spatial dimensions and resolutions of the two files match.
|
|
778
1478
|
|
|
1479
|
+
Notes
|
|
1480
|
+
-----
|
|
1481
|
+
This function performs two checks:
|
|
1482
|
+
1. Dimension matching using `checkspaceresmatch` on pixel dimensions (`pixdim`)
|
|
1483
|
+
2. Spatial dimension matching using `checkspacedimmatch` on array dimensions (`dim`)
|
|
1484
|
+
|
|
1485
|
+
Examples
|
|
1486
|
+
--------
|
|
1487
|
+
>>> import nibabel as nib
|
|
1488
|
+
>>> img1 = nib.load('file1.nii.gz')
|
|
1489
|
+
>>> img2 = nib.load('file2.nii.gz')
|
|
1490
|
+
>>> checkspacematch(img1.header, img2.header)
|
|
1491
|
+
True
|
|
779
1492
|
"""
|
|
780
1493
|
dimmatch = checkspaceresmatch(hdr1["pixdim"], hdr2["pixdim"], tolerance=tolerance)
|
|
781
1494
|
resmatch = checkspacedimmatch(hdr1["dim"], hdr2["dim"])
|
|
782
1495
|
return dimmatch and resmatch
|
|
783
1496
|
|
|
784
1497
|
|
|
785
|
-
def checkspaceresmatch(sizes1, sizes2, tolerance=1.0e-3):
|
|
786
|
-
|
|
1498
|
+
def checkspaceresmatch(sizes1: np.ndarray, sizes2: np.ndarray, tolerance: float = 1.0e-3) -> bool:
|
|
1499
|
+
"""
|
|
1500
|
+
Check the spatial pixdims of two nifti files to determine if they have the same resolution (within tolerance)
|
|
787
1501
|
|
|
788
1502
|
Parameters
|
|
789
1503
|
----------
|
|
790
|
-
sizes1 :
|
|
791
|
-
The size array from the first nifti file
|
|
792
|
-
sizes2 :
|
|
793
|
-
The size array from the second nifti file
|
|
794
|
-
tolerance: float
|
|
795
|
-
The fractional difference that is permissible between the two sizes that will still match
|
|
1504
|
+
sizes1 : array_like
|
|
1505
|
+
The size array from the first nifti file, typically containing spatial dimensions and pixel sizes
|
|
1506
|
+
sizes2 : array_like
|
|
1507
|
+
The size array from the second nifti file, typically containing spatial dimensions and pixel sizes
|
|
1508
|
+
tolerance : float, optional
|
|
1509
|
+
The fractional difference that is permissible between the two sizes that will still match,
|
|
1510
|
+
default is 1.0e-3 (0.1%)
|
|
796
1511
|
|
|
797
1512
|
Returns
|
|
798
1513
|
-------
|
|
799
|
-
|
|
800
|
-
True if the spatial resolutions of the two files match
|
|
801
|
-
|
|
1514
|
+
bool
|
|
1515
|
+
True if the spatial resolutions of the two files match within the specified tolerance,
|
|
1516
|
+
False otherwise
|
|
1517
|
+
|
|
1518
|
+
Notes
|
|
1519
|
+
-----
|
|
1520
|
+
This function compares the spatial dimensions (indices 1-3) of two nifti file size arrays.
|
|
1521
|
+
The comparison is performed using fractional difference: |sizes1[i] - sizes2[i]| / sizes1[i].
|
|
1522
|
+
Only dimensions 1-3 are compared (typically x, y, z spatial dimensions).
|
|
1523
|
+
The function returns False immediately upon finding any dimension that exceeds the tolerance.
|
|
1524
|
+
|
|
1525
|
+
Examples
|
|
1526
|
+
--------
|
|
1527
|
+
>>> import numpy as np
|
|
1528
|
+
>>> sizes1 = np.array([1.0, 2.0, 2.0, 2.0])
|
|
1529
|
+
>>> sizes2 = np.array([1.0, 2.0005, 2.0005, 2.0005])
|
|
1530
|
+
>>> checkspaceresmatch(sizes1, sizes2, tolerance=1e-3)
|
|
1531
|
+
True
|
|
1532
|
+
|
|
1533
|
+
>>> sizes1 = np.array([1.0, 2.0, 2.0, 2.0])
|
|
1534
|
+
>>> sizes2 = np.array([1.0, 2.5, 2.5, 2.5])
|
|
1535
|
+
>>> checkspaceresmatch(sizes1, sizes2, tolerance=1e-3)
|
|
1536
|
+
File spatial resolutions do not match within tolerance of 0.001
|
|
1537
|
+
size of dimension 1: 2.0 != 2.5 (0.25 difference)
|
|
1538
|
+
False
|
|
802
1539
|
"""
|
|
803
1540
|
for i in range(1, 4):
|
|
804
1541
|
fracdiff = np.fabs(sizes1[i] - sizes2[i]) / sizes1[i]
|
|
@@ -810,20 +1547,47 @@ def checkspaceresmatch(sizes1, sizes2, tolerance=1.0e-3):
|
|
|
810
1547
|
return True
|
|
811
1548
|
|
|
812
1549
|
|
|
813
|
-
def checkspacedimmatch(dims1, dims2, verbose=False):
|
|
814
|
-
|
|
1550
|
+
def checkspacedimmatch(dims1: np.ndarray, dims2: np.ndarray, verbose: bool = False) -> bool:
|
|
1551
|
+
"""
|
|
1552
|
+
Check the dimension arrays of two nifti files to determine if they cover the same number of voxels in each dimension.
|
|
815
1553
|
|
|
816
1554
|
Parameters
|
|
817
1555
|
----------
|
|
818
|
-
dims1 :
|
|
819
|
-
The dimension array from the first nifti file
|
|
820
|
-
|
|
821
|
-
|
|
1556
|
+
dims1 : np.ndarray
|
|
1557
|
+
The dimension array from the first nifti file. Should contain spatial dimensions
|
|
1558
|
+
(typically the first dimension is the number of time points, and dimensions 1-3
|
|
1559
|
+
represent x, y, z spatial dimensions).
|
|
1560
|
+
dims2 : np.ndarray
|
|
1561
|
+
The dimension array from the second nifti file. Should contain spatial dimensions
|
|
1562
|
+
(typically the first dimension is the number of time points, and dimensions 1-3
|
|
1563
|
+
represent x, y, z spatial dimensions).
|
|
1564
|
+
verbose : bool, optional
|
|
1565
|
+
Enable verbose output. Default is False. When True, prints detailed information
|
|
1566
|
+
about dimension mismatches.
|
|
822
1567
|
|
|
823
1568
|
Returns
|
|
824
1569
|
-------
|
|
825
|
-
|
|
826
|
-
True if the spatial dimensions of the two files match.
|
|
1570
|
+
bool
|
|
1571
|
+
True if the spatial dimensions (dimensions 1-3) of the two files match.
|
|
1572
|
+
False if any of the spatial dimensions differ between the files.
|
|
1573
|
+
|
|
1574
|
+
Notes
|
|
1575
|
+
-----
|
|
1576
|
+
This function compares dimensions 1 through 3 (inclusive) of the two dimension arrays,
|
|
1577
|
+
which typically represent the spatial dimensions (x, y, z) of the nifti files.
|
|
1578
|
+
The first dimension is usually the number of time points and is not compared.
|
|
1579
|
+
|
|
1580
|
+
Examples
|
|
1581
|
+
--------
|
|
1582
|
+
>>> import numpy as np
|
|
1583
|
+
>>> dims1 = np.array([10, 64, 64, 32])
|
|
1584
|
+
>>> dims2 = np.array([10, 64, 64, 32])
|
|
1585
|
+
>>> checkspacedimmatch(dims1, dims2)
|
|
1586
|
+
True
|
|
1587
|
+
|
|
1588
|
+
>>> dims3 = np.array([10, 64, 64, 33])
|
|
1589
|
+
>>> checkspacedimmatch(dims1, dims3)
|
|
1590
|
+
False
|
|
827
1591
|
"""
|
|
828
1592
|
for i in range(1, 4):
|
|
829
1593
|
if dims1[i] != dims2[i]:
|
|
@@ -835,25 +1599,54 @@ def checkspacedimmatch(dims1, dims2, verbose=False):
|
|
|
835
1599
|
return True
|
|
836
1600
|
|
|
837
1601
|
|
|
838
|
-
def checktimematch(
|
|
839
|
-
|
|
1602
|
+
def checktimematch(
|
|
1603
|
+
dims1: np.ndarray,
|
|
1604
|
+
dims2: np.ndarray,
|
|
1605
|
+
numskip1: int = 0,
|
|
1606
|
+
numskip2: int = 0,
|
|
1607
|
+
verbose: bool = False,
|
|
1608
|
+
) -> bool:
|
|
1609
|
+
"""
|
|
1610
|
+
Check the dimensions of two nifti files to determine if they cover the same number of timepoints.
|
|
1611
|
+
|
|
1612
|
+
This function compares the time dimensions of two NIfTI files after accounting for skipped timepoints
|
|
1613
|
+
at the beginning of each file. It is commonly used to verify temporal consistency between paired
|
|
1614
|
+
NIfTI datasets.
|
|
840
1615
|
|
|
841
1616
|
Parameters
|
|
842
1617
|
----------
|
|
843
|
-
dims1 :
|
|
844
|
-
The dimension array from the first
|
|
845
|
-
dims2 :
|
|
846
|
-
The dimension array from the second
|
|
1618
|
+
dims1 : np.ndarray
|
|
1619
|
+
The dimension array from the first NIfTI file. The time dimension is expected to be at index 4.
|
|
1620
|
+
dims2 : np.ndarray
|
|
1621
|
+
The dimension array from the second NIfTI file. The time dimension is expected to be at index 4.
|
|
847
1622
|
numskip1 : int, optional
|
|
848
|
-
Number of timepoints skipped at the beginning of file 1
|
|
1623
|
+
Number of timepoints skipped at the beginning of file 1. Default is 0.
|
|
849
1624
|
numskip2 : int, optional
|
|
850
|
-
Number of timepoints skipped at the beginning of file 2
|
|
1625
|
+
Number of timepoints skipped at the beginning of file 2. Default is 0.
|
|
1626
|
+
verbose : bool, optional
|
|
1627
|
+
Enable verbose output. If True, prints detailed information about the comparison.
|
|
1628
|
+
Default is False.
|
|
851
1629
|
|
|
852
1630
|
Returns
|
|
853
1631
|
-------
|
|
854
|
-
|
|
855
|
-
True if the time
|
|
856
|
-
|
|
1632
|
+
bool
|
|
1633
|
+
True if the effective time dimensions of the two files match after accounting for skipped
|
|
1634
|
+
timepoints, False otherwise.
|
|
1635
|
+
|
|
1636
|
+
Notes
|
|
1637
|
+
-----
|
|
1638
|
+
The function assumes that the time dimension is stored at index 4 of the dimension arrays.
|
|
1639
|
+
This is typical for NIfTI files where dimensions are ordered as [x, y, z, t, ...].
|
|
1640
|
+
|
|
1641
|
+
Examples
|
|
1642
|
+
--------
|
|
1643
|
+
>>> import numpy as np
|
|
1644
|
+
>>> dims1 = np.array([64, 64, 32, 1, 100, 1])
|
|
1645
|
+
>>> dims2 = np.array([64, 64, 32, 1, 95, 1])
|
|
1646
|
+
>>> checktimematch(dims1, dims2, numskip1=0, numskip2=5)
|
|
1647
|
+
True
|
|
1648
|
+
>>> checktimematch(dims1, dims2, numskip1=0, numskip2=3)
|
|
1649
|
+
False
|
|
857
1650
|
"""
|
|
858
1651
|
if (dims1[4] - numskip1) != (dims2[4] - numskip2):
|
|
859
1652
|
if verbose:
|
|
@@ -876,7 +1669,56 @@ def checktimematch(dims1, dims2, numskip1=0, numskip2=0, verbose=False):
|
|
|
876
1669
|
return True
|
|
877
1670
|
|
|
878
1671
|
|
|
879
|
-
def checkdatamatch(
|
|
1672
|
+
def checkdatamatch(
|
|
1673
|
+
data1: np.ndarray,
|
|
1674
|
+
data2: np.ndarray,
|
|
1675
|
+
absthresh: float = 1e-12,
|
|
1676
|
+
msethresh: float = 1e-12,
|
|
1677
|
+
debug: bool = False,
|
|
1678
|
+
) -> Tuple[bool, bool]:
|
|
1679
|
+
"""
|
|
1680
|
+
Check if two data arrays match within specified tolerances.
|
|
1681
|
+
|
|
1682
|
+
This function compares two numpy arrays using both mean squared error (MSE) and
|
|
1683
|
+
maximum absolute difference metrics to determine if they match within given thresholds.
|
|
1684
|
+
|
|
1685
|
+
Parameters
|
|
1686
|
+
----------
|
|
1687
|
+
data1 : numpy.ndarray
|
|
1688
|
+
First data array to compare
|
|
1689
|
+
data2 : numpy.ndarray
|
|
1690
|
+
Second data array to compare
|
|
1691
|
+
absthresh : float, optional
|
|
1692
|
+
Absolute difference threshold. Default is 1e-12
|
|
1693
|
+
msethresh : float, optional
|
|
1694
|
+
Mean squared error threshold. Default is 1e-12
|
|
1695
|
+
debug : bool, optional
|
|
1696
|
+
Enable debug output. Default is False
|
|
1697
|
+
|
|
1698
|
+
Returns
|
|
1699
|
+
-------
|
|
1700
|
+
tuple of (bool, bool)
|
|
1701
|
+
msematch : bool
|
|
1702
|
+
True if mean squared error is below msethresh threshold
|
|
1703
|
+
absmatch : bool
|
|
1704
|
+
True if maximum absolute difference is below absthresh threshold
|
|
1705
|
+
|
|
1706
|
+
Notes
|
|
1707
|
+
-----
|
|
1708
|
+
The function uses numpy's `mse` function for mean squared error calculation
|
|
1709
|
+
and `np.max(np.fabs(data1 - data2))` for maximum absolute difference.
|
|
1710
|
+
|
|
1711
|
+
Examples
|
|
1712
|
+
--------
|
|
1713
|
+
>>> import numpy as np
|
|
1714
|
+
>>> data1 = np.array([1.0, 2.0, 3.0])
|
|
1715
|
+
>>> data2 = np.array([1.000000000001, 2.000000000001, 3.000000000001])
|
|
1716
|
+
>>> checkdatamatch(data1, data2)
|
|
1717
|
+
(True, True)
|
|
1718
|
+
|
|
1719
|
+
>>> checkdatamatch(data1, data2, absthresh=1e-15)
|
|
1720
|
+
(True, False)
|
|
1721
|
+
"""
|
|
880
1722
|
msediff = mse(data1, data2)
|
|
881
1723
|
absdiff = np.max(np.fabs(data1 - data2))
|
|
882
1724
|
if debug:
|
|
@@ -885,8 +1727,66 @@ def checkdatamatch(data1, data2, absthresh=1e-12, msethresh=1e-12, debug=False):
|
|
|
885
1727
|
|
|
886
1728
|
|
|
887
1729
|
def checkniftifilematch(
|
|
888
|
-
filename1
|
|
889
|
-
|
|
1730
|
+
filename1: str,
|
|
1731
|
+
filename2: str,
|
|
1732
|
+
absthresh: float = 1e-12,
|
|
1733
|
+
msethresh: float = 1e-12,
|
|
1734
|
+
spacetolerance: float = 1e-3,
|
|
1735
|
+
debug: bool = False,
|
|
1736
|
+
) -> bool:
|
|
1737
|
+
"""
|
|
1738
|
+
Check if two NIFTI files match in dimensions, resolution, and data values.
|
|
1739
|
+
|
|
1740
|
+
This function compares two NIFTI files for spatial compatibility and data
|
|
1741
|
+
equivalence. It verifies that the files have matching spatial dimensions,
|
|
1742
|
+
resolution, time dimensions, and that their voxel data values are within
|
|
1743
|
+
specified tolerances.
|
|
1744
|
+
|
|
1745
|
+
Parameters
|
|
1746
|
+
----------
|
|
1747
|
+
filename1 : str
|
|
1748
|
+
Path to the first NIFTI file to be compared.
|
|
1749
|
+
filename2 : str
|
|
1750
|
+
Path to the second NIFTI file to be compared.
|
|
1751
|
+
absthresh : float, optional
|
|
1752
|
+
Absolute difference threshold for voxel-wise data comparison.
|
|
1753
|
+
If any voxel differs by more than this value, the files are considered
|
|
1754
|
+
not to match. Default is 1e-12.
|
|
1755
|
+
msethresh : float, optional
|
|
1756
|
+
Mean squared error threshold for data comparison. If the MSE between
|
|
1757
|
+
the data arrays exceeds this value, the files are considered not to match.
|
|
1758
|
+
Default is 1e-12.
|
|
1759
|
+
spacetolerance : float, optional
|
|
1760
|
+
Tolerance for comparing spatial dimensions and resolution between files.
|
|
1761
|
+
Default is 1e-3.
|
|
1762
|
+
debug : bool, optional
|
|
1763
|
+
If True, enables debug output to assist in troubleshooting.
|
|
1764
|
+
Default is False.
|
|
1765
|
+
|
|
1766
|
+
Returns
|
|
1767
|
+
-------
|
|
1768
|
+
bool
|
|
1769
|
+
True if all checks (spatial, temporal, and data) pass within the specified
|
|
1770
|
+
tolerances; False otherwise.
|
|
1771
|
+
|
|
1772
|
+
Notes
|
|
1773
|
+
-----
|
|
1774
|
+
The function internally calls several helper functions:
|
|
1775
|
+
- `readfromnifti`: Reads NIFTI file metadata and data.
|
|
1776
|
+
- `checkspacematch`: Compares spatial dimensions and resolution.
|
|
1777
|
+
- `checktimematch`: Compares time dimensions.
|
|
1778
|
+
- `checkdatamatch`: Compares data values using MSE and absolute difference.
|
|
1779
|
+
|
|
1780
|
+
Examples
|
|
1781
|
+
--------
|
|
1782
|
+
>>> match = checkniftifilematch('file1.nii', 'file2.nii')
|
|
1783
|
+
>>> print(match)
|
|
1784
|
+
True
|
|
1785
|
+
|
|
1786
|
+
>>> match = checkniftifilematch('file1.nii', 'file2.nii', absthresh=1e-10)
|
|
1787
|
+
>>> print(match)
|
|
1788
|
+
False
|
|
1789
|
+
"""
|
|
890
1790
|
im1, im1_data, im1_hdr, im1_dims, im1_sizes = readfromnifti(filename1)
|
|
891
1791
|
im2, im2_data, im2_hdr, im2_dims, im2_sizes = readfromnifti(filename2)
|
|
892
1792
|
spacematch = checkspacematch(im1_hdr, im2_hdr, tolerance=spacetolerance)
|
|
@@ -916,19 +1816,36 @@ def checkniftifilematch(
|
|
|
916
1816
|
|
|
917
1817
|
|
|
918
1818
|
# --------------------------- non-NIFTI file I/O functions ------------------------------------------
|
|
919
|
-
def checkifparfile(filename):
|
|
920
|
-
|
|
1819
|
+
def checkifparfile(filename: str) -> bool:
|
|
1820
|
+
"""
|
|
1821
|
+
Checks to see if a file is an FSL style motion parameter file
|
|
1822
|
+
|
|
1823
|
+
This function determines whether a given filename corresponds to an FSL-style
|
|
1824
|
+
motion parameter file by checking if it ends with the '.par' extension.
|
|
921
1825
|
|
|
922
1826
|
Parameters
|
|
923
1827
|
----------
|
|
924
1828
|
filename : str
|
|
925
|
-
The name of the file in question.
|
|
1829
|
+
The name of the file in question, including the file extension.
|
|
926
1830
|
|
|
927
1831
|
Returns
|
|
928
1832
|
-------
|
|
929
|
-
|
|
930
|
-
True if filename ends
|
|
931
|
-
|
|
1833
|
+
bool
|
|
1834
|
+
True if the filename ends with '.par', False otherwise.
|
|
1835
|
+
|
|
1836
|
+
Notes
|
|
1837
|
+
-----
|
|
1838
|
+
FSL (FMRIB Software Library) motion parameter files typically have the '.par'
|
|
1839
|
+
extension and contain motion correction parameters for neuroimaging data.
|
|
1840
|
+
|
|
1841
|
+
Examples
|
|
1842
|
+
--------
|
|
1843
|
+
>>> checkifparfile("subject1.par")
|
|
1844
|
+
True
|
|
1845
|
+
>>> checkifparfile("subject1.txt")
|
|
1846
|
+
False
|
|
1847
|
+
>>> checkifparfile("motion.par")
|
|
1848
|
+
True
|
|
932
1849
|
"""
|
|
933
1850
|
if filename.endswith(".par"):
|
|
934
1851
|
return True
|
|
@@ -936,7 +1853,42 @@ def checkifparfile(filename):
|
|
|
936
1853
|
return False
|
|
937
1854
|
|
|
938
1855
|
|
|
939
|
-
def readconfounds(filename, debug=False):
|
|
1856
|
+
def readconfounds(filename: str, debug: bool = False) -> Dict[str, np.ndarray]:
|
|
1857
|
+
"""
|
|
1858
|
+
Read confound regressors from a text file.
|
|
1859
|
+
|
|
1860
|
+
This function reads confound regressors from a text file and returns them as a dictionary
|
|
1861
|
+
mapping confound names to timecourse arrays. The function handles both structured column
|
|
1862
|
+
names and automatically generated names for cases where column information is missing.
|
|
1863
|
+
|
|
1864
|
+
Parameters
|
|
1865
|
+
----------
|
|
1866
|
+
filename : str
|
|
1867
|
+
Path to the confounds file
|
|
1868
|
+
debug : bool, optional
|
|
1869
|
+
Enable debug output. Default is False
|
|
1870
|
+
|
|
1871
|
+
Returns
|
|
1872
|
+
-------
|
|
1873
|
+
dict of str to numpy.ndarray
|
|
1874
|
+
Dictionary mapping confound names to timecourse arrays. Each key is a confound name
|
|
1875
|
+
and each value is a 1D numpy array containing the timecourse data for that confound.
|
|
1876
|
+
|
|
1877
|
+
Notes
|
|
1878
|
+
-----
|
|
1879
|
+
The function internally calls `readvectorsfromtextfile` to parse the input file, which
|
|
1880
|
+
returns metadata including sample rate, start time, column names, and the actual data.
|
|
1881
|
+
If column names are not present in the file, automatically generated names are created
|
|
1882
|
+
in the format 'confound_000', 'confound_001', etc.
|
|
1883
|
+
|
|
1884
|
+
Examples
|
|
1885
|
+
--------
|
|
1886
|
+
>>> confounds = readconfounds('confounds.txt')
|
|
1887
|
+
>>> print(confounds.keys())
|
|
1888
|
+
dict_keys(['motion_000', 'motion_001', 'motion_002', 'scrubbing'])
|
|
1889
|
+
>>> print(confounds['motion_000'].shape)
|
|
1890
|
+
(1000,)
|
|
1891
|
+
"""
|
|
940
1892
|
(
|
|
941
1893
|
thesamplerate,
|
|
942
1894
|
thestarttime,
|
|
@@ -955,19 +1907,46 @@ def readconfounds(filename, debug=False):
|
|
|
955
1907
|
return theconfounddict
|
|
956
1908
|
|
|
957
1909
|
|
|
958
|
-
def readparfile(filename):
|
|
959
|
-
|
|
1910
|
+
def readparfile(filename: str) -> Dict[str, np.ndarray]:
|
|
1911
|
+
"""
|
|
1912
|
+
Read motion parameters from an FSL-style .par file.
|
|
1913
|
+
|
|
1914
|
+
This function reads motion parameters from FSL-style .par files and returns
|
|
1915
|
+
them as a dictionary with timecourses keyed by parameter names.
|
|
960
1916
|
|
|
961
1917
|
Parameters
|
|
962
1918
|
----------
|
|
963
1919
|
filename : str
|
|
964
|
-
The name of the file
|
|
1920
|
+
The name of the FSL-style .par file to read. This file should contain
|
|
1921
|
+
motion parameters in the standard FSL format with 6 columns representing
|
|
1922
|
+
translation (X, Y, Z) and rotation (RotX, RotY, RotZ) parameters.
|
|
965
1923
|
|
|
966
1924
|
Returns
|
|
967
1925
|
-------
|
|
968
|
-
|
|
969
|
-
|
|
970
|
-
|
|
1926
|
+
dict of numpy.ndarray
|
|
1927
|
+
Dictionary containing the motion parameters as timecourses. Keys are:
|
|
1928
|
+
- 'X': translation along x-axis
|
|
1929
|
+
- 'Y': translation along y-axis
|
|
1930
|
+
- 'Z': translation along z-axis
|
|
1931
|
+
- 'RotX': rotation around x-axis
|
|
1932
|
+
- 'RotY': rotation around y-axis
|
|
1933
|
+
- 'RotZ': rotation around z-axis
|
|
1934
|
+
Each value is a 1D numpy array containing the timecourse for that parameter.
|
|
1935
|
+
|
|
1936
|
+
Notes
|
|
1937
|
+
-----
|
|
1938
|
+
The .par file format expected by this function is the standard FSL format
|
|
1939
|
+
where each row represents a timepoint and each column represents a motion
|
|
1940
|
+
parameter. The function assumes the file contains exactly 6 columns in the
|
|
1941
|
+
order: X, Y, Z, RotX, RotY, RotZ.
|
|
1942
|
+
|
|
1943
|
+
Examples
|
|
1944
|
+
--------
|
|
1945
|
+
>>> motion_data = readparfile('motion.par')
|
|
1946
|
+
>>> print(motion_data.keys())
|
|
1947
|
+
dict_keys(['X', 'Y', 'Z', 'RotX', 'RotY', 'RotZ'])
|
|
1948
|
+
>>> print(motion_data['X'].shape)
|
|
1949
|
+
(100,) # assuming 100 timepoints
|
|
971
1950
|
"""
|
|
972
1951
|
labels = ["X", "Y", "Z", "RotX", "RotY", "RotZ"]
|
|
973
1952
|
motiontimeseries = readvecs(filename)
|
|
@@ -977,8 +1956,9 @@ def readparfile(filename):
|
|
|
977
1956
|
return motiondict
|
|
978
1957
|
|
|
979
1958
|
|
|
980
|
-
def readmotion(filename, tr=1.0, colspec=None):
|
|
981
|
-
|
|
1959
|
+
def readmotion(filename: str, tr: float = 1.0, colspec: Optional[str] = None) -> Dict[str, Any]:
|
|
1960
|
+
"""
|
|
1961
|
+
Read motion regressors from a file (.par, .tsv, or other text format).
|
|
982
1962
|
|
|
983
1963
|
Parameters
|
|
984
1964
|
----------
|
|
@@ -1118,25 +2098,41 @@ def readmotion(filename, tr=1.0, colspec=None):
|
|
|
1118
2098
|
return motiondict
|
|
1119
2099
|
|
|
1120
2100
|
|
|
1121
|
-
def sliceinfo(slicetimes, tr):
|
|
1122
|
-
|
|
2101
|
+
def sliceinfo(slicetimes: np.ndarray, tr: float) -> Tuple[int, float, np.ndarray]:
|
|
2102
|
+
"""
|
|
2103
|
+
Find out what slicetimes we have, their spacing, and which timepoint each slice occurs at. This assumes
|
|
1123
2104
|
uniform slice time spacing, but supports any slice acquisition order and multiband acquisitions.
|
|
1124
2105
|
|
|
1125
2106
|
Parameters
|
|
1126
2107
|
----------
|
|
1127
2108
|
slicetimes : 1d float array
|
|
1128
2109
|
List of all the slicetimes relative to the start of the TR
|
|
1129
|
-
tr: float
|
|
2110
|
+
tr : float
|
|
1130
2111
|
The TR of the acquisition
|
|
1131
2112
|
|
|
1132
2113
|
Returns
|
|
1133
2114
|
-------
|
|
1134
2115
|
numsteps : int
|
|
1135
2116
|
The number of unique slicetimes in the list
|
|
1136
|
-
stepsize: float
|
|
2117
|
+
stepsize : float
|
|
1137
2118
|
The stepsize in seconds between subsequent slice acquisitions
|
|
1138
|
-
sliceoffsets: 1d int array
|
|
2119
|
+
sliceoffsets : 1d int array
|
|
1139
2120
|
Which acquisition time each slice was acquired at
|
|
2121
|
+
|
|
2122
|
+
Notes
|
|
2123
|
+
-----
|
|
2124
|
+
This function assumes uniform slice time spacing and works with any slice acquisition order
|
|
2125
|
+
and multiband acquisitions. The function determines the minimum time step between slices
|
|
2126
|
+
and maps each slice to its corresponding timepoint within the TR.
|
|
2127
|
+
|
|
2128
|
+
Examples
|
|
2129
|
+
--------
|
|
2130
|
+
>>> import numpy as np
|
|
2131
|
+
>>> slicetimes = np.array([0.0, 0.1, 0.2, 0.3])
|
|
2132
|
+
>>> tr = 1.0
|
|
2133
|
+
>>> numsteps, stepsize, sliceoffsets = sliceinfo(slicetimes, tr)
|
|
2134
|
+
>>> print(numsteps, stepsize, sliceoffsets)
|
|
2135
|
+
(4, 0.1, [0 1 2 3])
|
|
1140
2136
|
"""
|
|
1141
2137
|
sortedtimes = np.sort(slicetimes)
|
|
1142
2138
|
diffs = sortedtimes[1:] - sortedtimes[0:-1]
|
|
@@ -1146,7 +2142,49 @@ def sliceinfo(slicetimes, tr):
|
|
|
1146
2142
|
return numsteps, minstep, sliceoffsets
|
|
1147
2143
|
|
|
1148
2144
|
|
|
1149
|
-
def getslicetimesfromfile(slicetimename):
|
|
2145
|
+
def getslicetimesfromfile(slicetimename: str) -> Tuple[np.ndarray, bool, bool]:
|
|
2146
|
+
"""
|
|
2147
|
+
Read slice timing information from a file.
|
|
2148
|
+
|
|
2149
|
+
This function reads slice timing data from either a JSON file (BIDS sidecar format)
|
|
2150
|
+
or a text file containing slice timing values. It returns the slice times along
|
|
2151
|
+
with metadata indicating how the data was processed.
|
|
2152
|
+
|
|
2153
|
+
Parameters
|
|
2154
|
+
----------
|
|
2155
|
+
slicetimename : str
|
|
2156
|
+
Path to the slice timing file. Can be either a JSON file (BIDS sidecar format)
|
|
2157
|
+
or a text file containing slice timing values.
|
|
2158
|
+
|
|
2159
|
+
Returns
|
|
2160
|
+
-------
|
|
2161
|
+
tuple of (np.ndarray, bool, bool)
|
|
2162
|
+
A tuple containing:
|
|
2163
|
+
- slicetimes : np.ndarray
|
|
2164
|
+
Array of slice timing values as floats
|
|
2165
|
+
- normalizedtotr : bool
|
|
2166
|
+
True if the slice times were normalized to TR (time resolution),
|
|
2167
|
+
False if they were read directly from a JSON file
|
|
2168
|
+
- fileisbidsjson : bool
|
|
2169
|
+
True if the input file was a BIDS JSON sidecar file,
|
|
2170
|
+
False if it was a text file
|
|
2171
|
+
|
|
2172
|
+
Notes
|
|
2173
|
+
-----
|
|
2174
|
+
- For JSON files, the function expects a "SliceTiming" key in the JSON dictionary
|
|
2175
|
+
- For text files, the function uses readvec() to parse the slice timing values
|
|
2176
|
+
- If a JSON file doesn't contain the required "SliceTiming" key, the function
|
|
2177
|
+
prints an error message and exits the program
|
|
2178
|
+
- Slice timing values are converted to float64 dtype for precision
|
|
2179
|
+
|
|
2180
|
+
Examples
|
|
2181
|
+
--------
|
|
2182
|
+
>>> slicetimes, normalized, is_bids = getslicetimesfromfile("sub-01_task-rest_bold.json")
|
|
2183
|
+
>>> print(slicetimes)
|
|
2184
|
+
[0.0, 0.1, 0.2, 0.3, 0.4]
|
|
2185
|
+
>>> print(normalized, is_bids)
|
|
2186
|
+
(False, True)
|
|
2187
|
+
"""
|
|
1150
2188
|
filebase, extension = os.path.splitext(slicetimename)
|
|
1151
2189
|
if extension == ".json":
|
|
1152
2190
|
jsoninfodict = readdictfromjson(slicetimename)
|
|
@@ -1167,19 +2205,41 @@ def getslicetimesfromfile(slicetimename):
|
|
|
1167
2205
|
return slicetimes, normalizedtotr, fileisbidsjson
|
|
1168
2206
|
|
|
1169
2207
|
|
|
1170
|
-
def readbidssidecar(inputfilename):
|
|
1171
|
-
|
|
2208
|
+
def readbidssidecar(inputfilename: str) -> Dict[str, Any]:
|
|
2209
|
+
"""
|
|
2210
|
+
Read key value pairs out of a BIDS sidecar file
|
|
2211
|
+
|
|
2212
|
+
This function reads JSON sidecar files commonly used in BIDS (Brain Imaging Data Structure)
|
|
2213
|
+
datasets and returns the key-value pairs as a dictionary.
|
|
1172
2214
|
|
|
1173
2215
|
Parameters
|
|
1174
2216
|
----------
|
|
1175
2217
|
inputfilename : str
|
|
1176
|
-
The name of the sidecar file (with extension)
|
|
2218
|
+
The name of the sidecar file (with extension). The function will automatically
|
|
2219
|
+
look for a corresponding .json file with the same base name.
|
|
1177
2220
|
|
|
1178
2221
|
Returns
|
|
1179
2222
|
-------
|
|
1180
|
-
|
|
1181
|
-
|
|
1182
|
-
|
|
2223
|
+
dict
|
|
2224
|
+
A dictionary containing the key-value pairs from the JSON sidecar file.
|
|
2225
|
+
Returns an empty dictionary if the sidecar file does not exist.
|
|
2226
|
+
|
|
2227
|
+
Notes
|
|
2228
|
+
-----
|
|
2229
|
+
The function expects the sidecar file to have the same base name as the input file
|
|
2230
|
+
but with a .json extension. For example, if inputfilename is "sub-01_task-rest_bold.nii.gz",
|
|
2231
|
+
the function will look for "sub-01_task-rest_bold.json".
|
|
2232
|
+
|
|
2233
|
+
Examples
|
|
2234
|
+
--------
|
|
2235
|
+
>>> sidecar_data = readbidssidecar("sub-01_task-rest_bold.nii.gz")
|
|
2236
|
+
>>> print(sidecar_data['RepetitionTime'])
|
|
2237
|
+
2.0
|
|
2238
|
+
|
|
2239
|
+
>>> sidecar_data = readbidssidecar("nonexistent_file.nii.gz")
|
|
2240
|
+
sidecar file does not exist
|
|
2241
|
+
>>> print(sidecar_data)
|
|
2242
|
+
{}
|
|
1183
2243
|
"""
|
|
1184
2244
|
thefileroot, theext = os.path.splitext(inputfilename)
|
|
1185
2245
|
if os.path.exists(thefileroot + ".json"):
|
|
@@ -1191,16 +2251,48 @@ def readbidssidecar(inputfilename):
|
|
|
1191
2251
|
return {}
|
|
1192
2252
|
|
|
1193
2253
|
|
|
1194
|
-
def writedicttojson(thedict, thefilename):
|
|
1195
|
-
|
|
2254
|
+
def writedicttojson(thedict: Dict[str, Any], thefilename: str) -> None:
|
|
2255
|
+
"""
|
|
2256
|
+
Write key-value pairs to a JSON file with proper numpy type handling.
|
|
2257
|
+
|
|
2258
|
+
This function writes a dictionary to a JSON file, automatically converting
|
|
2259
|
+
numpy data types to their Python equivalents to ensure proper JSON serialization.
|
|
1196
2260
|
|
|
1197
2261
|
Parameters
|
|
1198
2262
|
----------
|
|
1199
|
-
thedict : dict
|
|
1200
|
-
|
|
2263
|
+
thedict : dict[str, Any]
|
|
2264
|
+
Dictionary containing key-value pairs to be written to JSON file
|
|
1201
2265
|
thefilename : str
|
|
1202
|
-
|
|
2266
|
+
Path and name of the output JSON file (including extension)
|
|
1203
2267
|
|
|
2268
|
+
Returns
|
|
2269
|
+
-------
|
|
2270
|
+
None
|
|
2271
|
+
This function does not return any value
|
|
2272
|
+
|
|
2273
|
+
Notes
|
|
2274
|
+
-----
|
|
2275
|
+
The function automatically converts numpy data types:
|
|
2276
|
+
- numpy.integer → Python int
|
|
2277
|
+
- numpy.floating → Python float
|
|
2278
|
+
- numpy.ndarray → Python list
|
|
2279
|
+
|
|
2280
|
+
The output JSON file will be formatted with:
|
|
2281
|
+
- Sorted keys
|
|
2282
|
+
- 4-space indentation
|
|
2283
|
+
- Comma-separated values without spaces
|
|
2284
|
+
|
|
2285
|
+
Examples
|
|
2286
|
+
--------
|
|
2287
|
+
>>> import numpy as np
|
|
2288
|
+
>>> data = {
|
|
2289
|
+
... 'name': 'John',
|
|
2290
|
+
... 'age': np.int32(30),
|
|
2291
|
+
... 'score': np.float64(95.5),
|
|
2292
|
+
... 'values': np.array([1, 2, 3, 4])
|
|
2293
|
+
... }
|
|
2294
|
+
>>> writedicttojson(data, 'output.json')
|
|
2295
|
+
>>> # Creates output.json with properly formatted data
|
|
1204
2296
|
"""
|
|
1205
2297
|
thisdict = {}
|
|
1206
2298
|
for key in thedict:
|
|
@@ -1218,19 +2310,41 @@ def writedicttojson(thedict, thefilename):
|
|
|
1218
2310
|
)
|
|
1219
2311
|
|
|
1220
2312
|
|
|
1221
|
-
def readdictfromjson(inputfilename):
|
|
1222
|
-
|
|
2313
|
+
def readdictfromjson(inputfilename: str) -> Dict[str, Any]:
|
|
2314
|
+
"""
|
|
2315
|
+
Read key value pairs out of a json file.
|
|
2316
|
+
|
|
2317
|
+
This function reads a JSON file and returns its contents as a dictionary.
|
|
2318
|
+
The function automatically appends the ".json" extension to the input filename
|
|
2319
|
+
if it's not already present.
|
|
1223
2320
|
|
|
1224
2321
|
Parameters
|
|
1225
2322
|
----------
|
|
1226
2323
|
inputfilename : str
|
|
1227
|
-
The name of the json file (with extension)
|
|
2324
|
+
The name of the json file (with or without extension). If the extension
|
|
2325
|
+
is not provided, ".json" will be appended automatically.
|
|
1228
2326
|
|
|
1229
2327
|
Returns
|
|
1230
2328
|
-------
|
|
1231
|
-
|
|
1232
|
-
|
|
1233
|
-
|
|
2329
|
+
dict[str, Any]
|
|
2330
|
+
A dictionary containing the key-value pairs from the JSON file. Returns
|
|
2331
|
+
an empty dictionary if the specified file does not exist.
|
|
2332
|
+
|
|
2333
|
+
Notes
|
|
2334
|
+
-----
|
|
2335
|
+
- The function checks for the existence of the file before attempting to read it
|
|
2336
|
+
- If the input filename doesn't have a ".json" extension, it will be automatically added
|
|
2337
|
+
- If the file doesn't exist, a message will be printed and an empty dictionary returned
|
|
2338
|
+
|
|
2339
|
+
Examples
|
|
2340
|
+
--------
|
|
2341
|
+
>>> data = readdictfromjson("config")
|
|
2342
|
+
>>> print(data)
|
|
2343
|
+
{'key1': 'value1', 'key2': 'value2'}
|
|
2344
|
+
|
|
2345
|
+
>>> data = readdictfromjson("data.json")
|
|
2346
|
+
>>> print(data)
|
|
2347
|
+
{'name': 'John', 'age': 30}
|
|
1234
2348
|
"""
|
|
1235
2349
|
thefileroot, theext = os.path.splitext(inputfilename)
|
|
1236
2350
|
if os.path.exists(thefileroot + ".json"):
|
|
@@ -1242,21 +2356,43 @@ def readdictfromjson(inputfilename):
|
|
|
1242
2356
|
return {}
|
|
1243
2357
|
|
|
1244
2358
|
|
|
1245
|
-
def readlabelledtsv(inputfilename, compressed=False):
|
|
1246
|
-
|
|
2359
|
+
def readlabelledtsv(inputfilename: str, compressed: bool = False) -> Dict[str, np.ndarray]:
|
|
2360
|
+
"""
|
|
2361
|
+
Read time series out of an fmriprep confounds tsv file
|
|
1247
2362
|
|
|
1248
2363
|
Parameters
|
|
1249
2364
|
----------
|
|
1250
2365
|
inputfilename : str
|
|
1251
|
-
The root name of the tsv (
|
|
2366
|
+
The root name of the tsv file (without extension)
|
|
2367
|
+
compressed : bool, optional
|
|
2368
|
+
If True, reads from a gzipped tsv file (.tsv.gz), otherwise reads from
|
|
2369
|
+
a regular tsv file (.tsv). Default is False.
|
|
1252
2370
|
|
|
1253
2371
|
Returns
|
|
1254
2372
|
-------
|
|
1255
|
-
|
|
1256
|
-
|
|
1257
|
-
|
|
1258
|
-
|
|
1259
|
-
|
|
2373
|
+
dict of str to numpy.ndarray
|
|
2374
|
+
Dictionary containing all the timecourses in the file, keyed by the
|
|
2375
|
+
column names from the first row of the tsv file. Each value is a
|
|
2376
|
+
numpy array containing the time series data for that column.
|
|
2377
|
+
|
|
2378
|
+
Raises
|
|
2379
|
+
------
|
|
2380
|
+
FileNotFoundError
|
|
2381
|
+
If the specified tsv file (with appropriate extension) does not exist.
|
|
2382
|
+
|
|
2383
|
+
Notes
|
|
2384
|
+
-----
|
|
2385
|
+
- NaN values in the input file are replaced with 0.0
|
|
2386
|
+
- If the file does not exist or is not valid, an empty dictionary is returned
|
|
2387
|
+
- The function supports both compressed (.tsv.gz) and uncompressed (.tsv) files
|
|
2388
|
+
|
|
2389
|
+
Examples
|
|
2390
|
+
--------
|
|
2391
|
+
>>> confounds = readlabelledtsv("sub-01_task-rest_bold_confounds")
|
|
2392
|
+
>>> print(confounds.keys())
|
|
2393
|
+
dict_keys(['trans_x', 'trans_y', 'trans_z', 'rot_x', 'rot_y', 'rot_z'])
|
|
2394
|
+
>>> print(confounds['trans_x'].shape)
|
|
2395
|
+
(100,)
|
|
1260
2396
|
"""
|
|
1261
2397
|
confounddict = {}
|
|
1262
2398
|
if compressed:
|
|
@@ -1277,22 +2413,49 @@ def readlabelledtsv(inputfilename, compressed=False):
|
|
|
1277
2413
|
return confounddict
|
|
1278
2414
|
|
|
1279
2415
|
|
|
1280
|
-
def readcsv(inputfilename, debug=False):
|
|
1281
|
-
|
|
2416
|
+
def readcsv(inputfilename: str, debug: bool = False) -> Dict[str, np.ndarray]:
|
|
2417
|
+
"""
|
|
2418
|
+
Read time series out of an unlabelled csv file.
|
|
2419
|
+
|
|
2420
|
+
This function reads a CSV file and returns a dictionary of time series,
|
|
2421
|
+
where keys are column names (or generated names if no header is present)
|
|
2422
|
+
and values are NumPy arrays of the corresponding time series data.
|
|
1282
2423
|
|
|
1283
2424
|
Parameters
|
|
1284
2425
|
----------
|
|
1285
2426
|
inputfilename : str
|
|
1286
|
-
The root name of the
|
|
2427
|
+
The root name of the CSV file (without the '.csv' extension).
|
|
2428
|
+
debug : bool, optional
|
|
2429
|
+
If True, prints debug information about whether a header line is detected,
|
|
2430
|
+
by default False.
|
|
1287
2431
|
|
|
1288
2432
|
Returns
|
|
1289
2433
|
-------
|
|
1290
|
-
|
|
1291
|
-
|
|
1292
|
-
|
|
1293
|
-
|
|
1294
|
-
|
|
1295
|
-
|
|
2434
|
+
dict of str to np.ndarray
|
|
2435
|
+
A dictionary where keys are column names (or generated names like "col0", "col1", etc.)
|
|
2436
|
+
and values are NumPy arrays containing the time series data. If the file does not exist
|
|
2437
|
+
or is invalid, an empty dictionary is returned.
|
|
2438
|
+
|
|
2439
|
+
Notes
|
|
2440
|
+
-----
|
|
2441
|
+
- If the first column of the CSV contains non-numeric values, it is assumed to be a header.
|
|
2442
|
+
- If the first column is numeric, it is treated as part of the data, and columns are
|
|
2443
|
+
named "col0", "col1", etc.
|
|
2444
|
+
- NaN values in the CSV are replaced with 0.0.
|
|
2445
|
+
- If the file does not exist or cannot be read, a FileNotFoundError is raised.
|
|
2446
|
+
|
|
2447
|
+
Examples
|
|
2448
|
+
--------
|
|
2449
|
+
>>> data = readcsv("timeseries_data")
|
|
2450
|
+
>>> print(data.keys())
|
|
2451
|
+
['col0', 'col1', 'col2']
|
|
2452
|
+
>>> print(data['col0'])
|
|
2453
|
+
[1.0, 2.0, 3.0, 4.0]
|
|
2454
|
+
|
|
2455
|
+
>>> data = readcsv("labeled_data", debug=True)
|
|
2456
|
+
there is a header line
|
|
2457
|
+
>>> print(data.keys())
|
|
2458
|
+
['time', 'signal1', 'signal2']
|
|
1296
2459
|
"""
|
|
1297
2460
|
if not os.path.isfile(inputfilename + ".csv"):
|
|
1298
2461
|
raise FileNotFoundError(f"csv file {inputfilename}.csv does not exist")
|
|
@@ -1331,22 +2494,43 @@ def readcsv(inputfilename, debug=False):
|
|
|
1331
2494
|
return timeseriesdict
|
|
1332
2495
|
|
|
1333
2496
|
|
|
1334
|
-
def readfslmat(inputfilename, debug=False):
|
|
1335
|
-
|
|
2497
|
+
def readfslmat(inputfilename: str, debug: bool = False) -> Dict[str, np.ndarray]:
|
|
2498
|
+
"""
|
|
2499
|
+
Read time series out of an FSL design.mat file
|
|
1336
2500
|
|
|
1337
2501
|
Parameters
|
|
1338
2502
|
----------
|
|
1339
2503
|
inputfilename : str
|
|
1340
|
-
The root name of the
|
|
2504
|
+
The root name of the .mat file (no extension)
|
|
2505
|
+
debug : bool, optional
|
|
2506
|
+
If True, print the DataFrame contents for debugging purposes. Default is False
|
|
1341
2507
|
|
|
1342
2508
|
Returns
|
|
1343
2509
|
-------
|
|
1344
|
-
|
|
1345
|
-
|
|
1346
|
-
|
|
1347
|
-
|
|
1348
|
-
|
|
1349
|
-
|
|
2510
|
+
dict of numpy.ndarray
|
|
2511
|
+
Dictionary containing all the timecourses in the file, keyed by column names.
|
|
2512
|
+
If the first row exists, it is used as keys; otherwise, keys are generated as
|
|
2513
|
+
"col1, col2...colN". Returns an empty dictionary if file does not exist or is not valid.
|
|
2514
|
+
|
|
2515
|
+
Raises
|
|
2516
|
+
------
|
|
2517
|
+
FileNotFoundError
|
|
2518
|
+
If the specified FSL mat file does not exist
|
|
2519
|
+
|
|
2520
|
+
Notes
|
|
2521
|
+
-----
|
|
2522
|
+
This function reads FSL design.mat files and extracts time series data. The function
|
|
2523
|
+
skips the first 5 rows of the file (assumed to be header information) and treats
|
|
2524
|
+
subsequent rows as time series data. The column names are generated using the
|
|
2525
|
+
`makecolname` helper function.
|
|
2526
|
+
|
|
2527
|
+
Examples
|
|
2528
|
+
--------
|
|
2529
|
+
>>> timeseries = readfslmat("design")
|
|
2530
|
+
>>> print(timeseries.keys())
|
|
2531
|
+
dict_keys(['col0', 'col1', 'col2'])
|
|
2532
|
+
>>> print(timeseries['col0'])
|
|
2533
|
+
[0.1, 0.2, 0.3, 0.4]
|
|
1350
2534
|
"""
|
|
1351
2535
|
if not os.path.isfile(inputfilename + ".mat"):
|
|
1352
2536
|
raise FileNotFoundError(f"FSL mat file {inputfilename}.mat does not exist")
|
|
@@ -1366,7 +2550,51 @@ def readfslmat(inputfilename, debug=False):
|
|
|
1366
2550
|
return timeseriesdict
|
|
1367
2551
|
|
|
1368
2552
|
|
|
1369
|
-
def readoptionsfile(inputfileroot):
|
|
2553
|
+
def readoptionsfile(inputfileroot: str) -> Dict[str, Any]:
|
|
2554
|
+
"""
|
|
2555
|
+
Read a run options from a JSON or TXT configuration file.
|
|
2556
|
+
|
|
2557
|
+
This function attempts to read rapidtide run options from a file with the given root name,
|
|
2558
|
+
checking for `.json` and `.txt` extensions in that order. If neither file exists,
|
|
2559
|
+
a `FileNotFoundError` is raised. The function also handles backward compatibility
|
|
2560
|
+
for older options files by filling in default filter limits based on the `filtertype`.
|
|
2561
|
+
|
|
2562
|
+
Parameters
|
|
2563
|
+
----------
|
|
2564
|
+
inputfileroot : str
|
|
2565
|
+
The base name of the options file (without extension). The function will
|
|
2566
|
+
first look for `inputfileroot.json`, then `inputfileroot.txt`.
|
|
2567
|
+
|
|
2568
|
+
Returns
|
|
2569
|
+
-------
|
|
2570
|
+
Dict[str, Any]
|
|
2571
|
+
A dictionary containing the run options. The dictionary includes keys such as
|
|
2572
|
+
`filtertype`, `lowerstop`, `lowerpass`, `upperpass`, and `upperstop`, depending
|
|
2573
|
+
on the file content and filter type.
|
|
2574
|
+
|
|
2575
|
+
Raises
|
|
2576
|
+
------
|
|
2577
|
+
FileNotFoundError
|
|
2578
|
+
If neither `inputfileroot.json` nor `inputfileroot.txt` exists.
|
|
2579
|
+
|
|
2580
|
+
Notes
|
|
2581
|
+
-----
|
|
2582
|
+
For backward compatibility, older options files without `lowerpass` key are updated
|
|
2583
|
+
with default values based on the `filtertype`:
|
|
2584
|
+
|
|
2585
|
+
- "None": All limits set to 0.0 or -1.0
|
|
2586
|
+
- "vlf": 0.0, 0.0, 0.009, 0.010
|
|
2587
|
+
- "lfo": 0.009, 0.010, 0.15, 0.20
|
|
2588
|
+
- "resp": 0.15, 0.20, 0.4, 0.5
|
|
2589
|
+
- "card": 0.4, 0.5, 2.5, 3.0
|
|
2590
|
+
- "arb": Uses values from `arb_lowerstop`, `arb_lower`, `arb_upper`, `arb_upperstop`
|
|
2591
|
+
|
|
2592
|
+
Examples
|
|
2593
|
+
--------
|
|
2594
|
+
>>> options = readoptionsfile("myfilter")
|
|
2595
|
+
>>> print(options["filtertype"])
|
|
2596
|
+
'vlf'
|
|
2597
|
+
"""
|
|
1370
2598
|
if os.path.isfile(inputfileroot + ".json"):
|
|
1371
2599
|
# options saved as json
|
|
1372
2600
|
thedict = readdictfromjson(inputfileroot + ".json")
|
|
@@ -1420,51 +2648,138 @@ def readoptionsfile(inputfileroot):
|
|
|
1420
2648
|
return thedict
|
|
1421
2649
|
|
|
1422
2650
|
|
|
1423
|
-
def makecolname(colnum, startcol):
|
|
2651
|
+
def makecolname(colnum: int, startcol: int) -> str:
|
|
2652
|
+
"""
|
|
2653
|
+
Generate a column name in the format 'col_##' where ## is a zero-padded number.
|
|
2654
|
+
|
|
2655
|
+
This function creates standardized column names by adding a starting offset to
|
|
2656
|
+
a column number and formatting it with zero-padding to ensure consistent
|
|
2657
|
+
two-digit representation.
|
|
2658
|
+
|
|
2659
|
+
Parameters
|
|
2660
|
+
----------
|
|
2661
|
+
colnum : int
|
|
2662
|
+
The base column number to be used in the name generation.
|
|
2663
|
+
startcol : int
|
|
2664
|
+
The starting column offset to be added to colnum.
|
|
2665
|
+
|
|
2666
|
+
Returns
|
|
2667
|
+
-------
|
|
2668
|
+
str
|
|
2669
|
+
A column name in the format 'col_##' where ## represents the zero-padded
|
|
2670
|
+
sum of colnum and startcol.
|
|
2671
|
+
|
|
2672
|
+
Notes
|
|
2673
|
+
-----
|
|
2674
|
+
The resulting number is zero-padded to always have at least two digits.
|
|
2675
|
+
For example, if colnum=5 and startcol=10, the result will be 'col_15'.
|
|
2676
|
+
If colnum=1 and startcol=2, the result will be 'col_03'.
|
|
2677
|
+
|
|
2678
|
+
Examples
|
|
2679
|
+
--------
|
|
2680
|
+
>>> makecolname(0, 0)
|
|
2681
|
+
'col_00'
|
|
2682
|
+
|
|
2683
|
+
>>> makecolname(5, 10)
|
|
2684
|
+
'col_15'
|
|
2685
|
+
|
|
2686
|
+
>>> makecolname(1, 2)
|
|
2687
|
+
'col_03'
|
|
2688
|
+
"""
|
|
1424
2689
|
return f"col_{str(colnum + startcol).zfill(2)}"
|
|
1425
2690
|
|
|
1426
2691
|
|
|
1427
2692
|
def writebidstsv(
|
|
1428
|
-
outputfileroot,
|
|
1429
|
-
data,
|
|
1430
|
-
samplerate,
|
|
1431
|
-
extraheaderinfo=None,
|
|
1432
|
-
compressed=True,
|
|
1433
|
-
columns=None,
|
|
1434
|
-
xaxislabel="time",
|
|
1435
|
-
yaxislabel="arbitrary value",
|
|
1436
|
-
starttime=0.0,
|
|
1437
|
-
append=False,
|
|
1438
|
-
samplerate_tolerance=1e-6,
|
|
1439
|
-
starttime_tolerance=1e-6,
|
|
1440
|
-
colsinjson=True,
|
|
1441
|
-
colsintsv=False,
|
|
1442
|
-
omitjson=False,
|
|
1443
|
-
debug=False,
|
|
1444
|
-
):
|
|
1445
|
-
"""
|
|
1446
|
-
|
|
1447
|
-
|
|
1448
|
-
|
|
1449
|
-
|
|
1450
|
-
|
|
1451
|
-
|
|
1452
|
-
|
|
1453
|
-
|
|
1454
|
-
:
|
|
1455
|
-
|
|
1456
|
-
|
|
1457
|
-
:
|
|
1458
|
-
|
|
1459
|
-
|
|
1460
|
-
:
|
|
1461
|
-
|
|
1462
|
-
:
|
|
1463
|
-
|
|
1464
|
-
:
|
|
1465
|
-
|
|
1466
|
-
:
|
|
1467
|
-
|
|
2693
|
+
outputfileroot: str,
|
|
2694
|
+
data: np.ndarray,
|
|
2695
|
+
samplerate: float,
|
|
2696
|
+
extraheaderinfo: Optional[Dict[str, Any]] = None,
|
|
2697
|
+
compressed: bool = True,
|
|
2698
|
+
columns: Optional[List[str]] = None,
|
|
2699
|
+
xaxislabel: str = "time",
|
|
2700
|
+
yaxislabel: str = "arbitrary value",
|
|
2701
|
+
starttime: float = 0.0,
|
|
2702
|
+
append: bool = False,
|
|
2703
|
+
samplerate_tolerance: float = 1e-6,
|
|
2704
|
+
starttime_tolerance: float = 1e-6,
|
|
2705
|
+
colsinjson: bool = True,
|
|
2706
|
+
colsintsv: bool = False,
|
|
2707
|
+
omitjson: bool = False,
|
|
2708
|
+
debug: bool = False,
|
|
2709
|
+
) -> None:
|
|
2710
|
+
"""
|
|
2711
|
+
Write physiological or stimulation data to a BIDS-compatible TSV file with optional JSON sidecar.
|
|
2712
|
+
|
|
2713
|
+
This function writes time series data to a TSV file following BIDS conventions for physiological
|
|
2714
|
+
(``_physio``) and stimulation (``_stim``) data. It supports optional compression, appending to
|
|
2715
|
+
existing files, and includes metadata in a corresponding JSON file.
|
|
2716
|
+
|
|
2717
|
+
Parameters
|
|
2718
|
+
----------
|
|
2719
|
+
outputfileroot : str
|
|
2720
|
+
Root name of the output files (without extension). The function will write
|
|
2721
|
+
``<outputfileroot>.tsv`` or ``<outputfileroot>.tsv.gz`` and ``<outputfileroot>.json``.
|
|
2722
|
+
data : np.ndarray
|
|
2723
|
+
Time series data to be written. If 1D, it will be reshaped to (1, n_timesteps).
|
|
2724
|
+
Shape should be (n_channels, n_timesteps).
|
|
2725
|
+
samplerate : float
|
|
2726
|
+
Sampling frequency of the data in Hz.
|
|
2727
|
+
extraheaderinfo : dict, optional
|
|
2728
|
+
Additional key-value pairs to include in the JSON sidecar file.
|
|
2729
|
+
compressed : bool, default=True
|
|
2730
|
+
If True, compress the TSV file using gzip (.tsv.gz). If False, write uncompressed (.tsv).
|
|
2731
|
+
columns : list of str, optional
|
|
2732
|
+
Column names for the TSV file. If None, default names are generated using
|
|
2733
|
+
``makecolname``.
|
|
2734
|
+
xaxislabel : str, default="time"
|
|
2735
|
+
Label for the x-axis in the JSON sidecar.
|
|
2736
|
+
yaxislabel : str, default="arbitrary value"
|
|
2737
|
+
Label for the y-axis in the JSON sidecar.
|
|
2738
|
+
starttime : float, default=0.0
|
|
2739
|
+
Start time of the recording in seconds.
|
|
2740
|
+
append : bool, default=False
|
|
2741
|
+
If True, append data to an existing file. The function checks compatibility of
|
|
2742
|
+
sampling rate, start time, and number of columns.
|
|
2743
|
+
samplerate_tolerance : float, default=1e-6
|
|
2744
|
+
Tolerance for comparing sampling rates when appending data.
|
|
2745
|
+
starttime_tolerance : float, default=1e-6
|
|
2746
|
+
Tolerance for comparing start times when appending data.
|
|
2747
|
+
colsinjson : bool, default=True
|
|
2748
|
+
If True, include the column names in the JSON file under the "Columns" key.
|
|
2749
|
+
colsintsv : bool, default=False
|
|
2750
|
+
If True, write column headers in the TSV file. BIDS convention requires no headers.
|
|
2751
|
+
omitjson : bool, default=False
|
|
2752
|
+
If True, do not write the JSON sidecar file.
|
|
2753
|
+
debug : bool, default=False
|
|
2754
|
+
If True, print debug information during execution.
|
|
2755
|
+
|
|
2756
|
+
Returns
|
|
2757
|
+
-------
|
|
2758
|
+
None
|
|
2759
|
+
This function does not return any value.
|
|
2760
|
+
|
|
2761
|
+
Notes
|
|
2762
|
+
-----
|
|
2763
|
+
- BIDS-compliant TSV files require:
|
|
2764
|
+
1. Compression (.tsv.gz)
|
|
2765
|
+
2. Presence of "SamplingFrequency", "StartTime", and "Columns" in the JSON file
|
|
2766
|
+
3. No column headers in the TSV file
|
|
2767
|
+
4. File name ending in "_physio" or "_stim"
|
|
2768
|
+
- If ``append=True``, the function will validate compatibility of sampling rate, start time,
|
|
2769
|
+
and number of columns with the existing file.
|
|
2770
|
+
|
|
2771
|
+
Examples
|
|
2772
|
+
--------
|
|
2773
|
+
>>> import numpy as np
|
|
2774
|
+
>>> data = np.random.rand(2, 1000)
|
|
2775
|
+
>>> writebidstsv("sub-01_task-rest_physio", data, samplerate=100.0)
|
|
2776
|
+
>>> # Writes:
|
|
2777
|
+
>>> # sub-01_task-rest_physio.tsv.gz
|
|
2778
|
+
>>> # sub-01_task-rest_physio.json
|
|
2779
|
+
|
|
2780
|
+
See Also
|
|
2781
|
+
--------
|
|
2782
|
+
readbidstsv : Read BIDS physiological or stimulation data from TSV and JSON files.
|
|
1468
2783
|
"""
|
|
1469
2784
|
if debug:
|
|
1470
2785
|
print("entering writebidstsv:")
|
|
@@ -1485,7 +2800,9 @@ def writebidstsv(
|
|
|
1485
2800
|
reshapeddata = data
|
|
1486
2801
|
if append:
|
|
1487
2802
|
insamplerate, instarttime, incolumns, indata, incompressed, incolsource = readbidstsv(
|
|
1488
|
-
outputfileroot + ".json",
|
|
2803
|
+
outputfileroot + ".json",
|
|
2804
|
+
neednotexist=True,
|
|
2805
|
+
debug=debug,
|
|
1489
2806
|
)
|
|
1490
2807
|
if debug:
|
|
1491
2808
|
print("appending")
|
|
@@ -1571,36 +2888,58 @@ def writebidstsv(
|
|
|
1571
2888
|
)
|
|
1572
2889
|
|
|
1573
2890
|
|
|
1574
|
-
def readvectorsfromtextfile(
|
|
1575
|
-
|
|
2891
|
+
def readvectorsfromtextfile(
|
|
2892
|
+
fullfilespec: str, onecol: bool = False, debug: bool = False
|
|
2893
|
+
) -> Tuple[Optional[float], Optional[float], Optional[List[str]], np.ndarray, Optional[bool], str]:
|
|
2894
|
+
"""
|
|
2895
|
+
Read time series data from a text-based file (TSV, CSV, MAT, or BIDS-style TSV).
|
|
2896
|
+
|
|
2897
|
+
This function reads timecourse data from various file formats, including plain TSV,
|
|
2898
|
+
gzipped TSV (.tsv.gz), CSV, and BIDS-style continuous data files (.tsv with associated .json).
|
|
2899
|
+
It automatically detects the file type and parses the data accordingly.
|
|
1576
2900
|
|
|
1577
2901
|
Parameters
|
|
1578
2902
|
----------
|
|
1579
2903
|
fullfilespec : str
|
|
1580
|
-
|
|
1581
|
-
|
|
1582
|
-
|
|
1583
|
-
|
|
1584
|
-
|
|
1585
|
-
|
|
1586
|
-
|
|
2904
|
+
Path to the input file. May include a column specification (e.g., ``"file.tsv[0:5]"``).
|
|
2905
|
+
colspec : str, optional
|
|
2906
|
+
Column specification for selecting specific columns. For TSV/CSV files, this can be a
|
|
2907
|
+
comma-separated list of column names or integer indices. For BIDS-style TSV files, it
|
|
2908
|
+
should be a comma-separated list of column names.
|
|
2909
|
+
onecol : bool, optional
|
|
2910
|
+
If True, returns only the first column of data. Default is False.
|
|
2911
|
+
debug : bool, optional
|
|
2912
|
+
If True, prints additional debugging information. Default is False.
|
|
1587
2913
|
|
|
1588
2914
|
Returns
|
|
1589
2915
|
-------
|
|
1590
|
-
|
|
1591
|
-
|
|
1592
|
-
|
|
1593
|
-
|
|
1594
|
-
|
|
1595
|
-
|
|
1596
|
-
|
|
1597
|
-
|
|
1598
|
-
|
|
1599
|
-
|
|
1600
|
-
|
|
1601
|
-
|
|
1602
|
-
|
|
1603
|
-
|
|
2916
|
+
samplerate : float
|
|
2917
|
+
Sample rate in Hz. None if not knowable.
|
|
2918
|
+
starttime : float
|
|
2919
|
+
Time of first point, in seconds. None if not knowable.
|
|
2920
|
+
columns : str array
|
|
2921
|
+
Names of the timecourses contained in the file. None if not knowable.
|
|
2922
|
+
data : 2D numpy array
|
|
2923
|
+
Timecourses from the file.
|
|
2924
|
+
compressed : bool
|
|
2925
|
+
True if time data is gzipped (as in a .tsv.gz file).
|
|
2926
|
+
filetype : str
|
|
2927
|
+
One of "text", "csv", "plaintsv", "bidscontinuous".
|
|
2928
|
+
|
|
2929
|
+
Notes
|
|
2930
|
+
-----
|
|
2931
|
+
- If the file does not exist or is not valid, all return values are None.
|
|
2932
|
+
- For BIDS-style TSV files, the associated .json sidecar file is used to determine
|
|
2933
|
+
sample rate and start time.
|
|
2934
|
+
- For plain TSV files, column names are read from the header row.
|
|
2935
|
+
- If ``onecol`` is True, only the first column is returned.
|
|
2936
|
+
|
|
2937
|
+
Examples
|
|
2938
|
+
--------
|
|
2939
|
+
>>> samplerate, starttime, columns, data, compressed, filetype = readvectorsfromtextfile("data.tsv")
|
|
2940
|
+
>>> samplerate, starttime, columns, data, compressed, filetype = readvectorsfromtextfile("data.tsv[0:3]")
|
|
2941
|
+
>>> samplerate, starttime, columns, data, compressed, filetype = readvectorsfromtextfile("data.tsv", onecol=True)
|
|
2942
|
+
"""
|
|
1604
2943
|
|
|
1605
2944
|
thefilename, colspec = parsefilespec(fullfilespec)
|
|
1606
2945
|
thefileroot, theext = os.path.splitext(thefilename)
|
|
@@ -1732,31 +3071,76 @@ def readvectorsfromtextfile(fullfilespec, onecol=False, debug=False):
|
|
|
1732
3071
|
return thesamplerate, thestarttime, thecolumns, thedata, compressed, filetype
|
|
1733
3072
|
|
|
1734
3073
|
|
|
1735
|
-
def readbidstsv(
|
|
1736
|
-
|
|
3074
|
+
def readbidstsv(
|
|
3075
|
+
inputfilename: str,
|
|
3076
|
+
colspec: Optional[str] = None,
|
|
3077
|
+
warn: bool = True,
|
|
3078
|
+
neednotexist: bool = False,
|
|
3079
|
+
debug: bool = False,
|
|
3080
|
+
) -> Tuple[
|
|
3081
|
+
Optional[float],
|
|
3082
|
+
Optional[float],
|
|
3083
|
+
Optional[List[str]],
|
|
3084
|
+
Optional[np.ndarray],
|
|
3085
|
+
Optional[bool],
|
|
3086
|
+
Optional[str],
|
|
3087
|
+
]:
|
|
3088
|
+
"""
|
|
3089
|
+
Read BIDS-compatible TSV data file with associated JSON metadata.
|
|
3090
|
+
|
|
3091
|
+
This function reads a TSV file (optionally gzipped) and its corresponding JSON
|
|
3092
|
+
metadata file to extract timecourse data, sample rate, start time, and column names.
|
|
3093
|
+
It supports both compressed (.tsv.gz) and uncompressed (.tsv) TSV files.
|
|
1737
3094
|
|
|
1738
3095
|
Parameters
|
|
1739
3096
|
----------
|
|
1740
3097
|
inputfilename : str
|
|
1741
|
-
The root name of the
|
|
1742
|
-
colspec:
|
|
1743
|
-
A comma
|
|
1744
|
-
debug : bool
|
|
1745
|
-
|
|
3098
|
+
The root name of the TSV and accompanying JSON file (without extension).
|
|
3099
|
+
colspec : str, optional
|
|
3100
|
+
A comma-separated list of column names to return. If None, all columns are returned.
|
|
3101
|
+
debug : bool, optional
|
|
3102
|
+
If True, print additional debugging information. Default is False.
|
|
3103
|
+
warn : bool, optional
|
|
3104
|
+
If True, print warnings for missing metadata fields. Default is True.
|
|
3105
|
+
neednotexist : bool, optional
|
|
3106
|
+
If True, return None values instead of raising an exception if files do not exist.
|
|
3107
|
+
Default is False.
|
|
1746
3108
|
|
|
1747
3109
|
Returns
|
|
1748
3110
|
-------
|
|
3111
|
+
tuple of (samplerate, starttime, columns, data, is_compressed, columnsource)
|
|
1749
3112
|
samplerate : float
|
|
1750
|
-
Sample rate in Hz
|
|
3113
|
+
Sample rate in Hz.
|
|
1751
3114
|
starttime : float
|
|
1752
|
-
Time of first point
|
|
1753
|
-
columns : str
|
|
1754
|
-
Names of the timecourses contained in the file
|
|
1755
|
-
data :
|
|
1756
|
-
|
|
1757
|
-
|
|
1758
|
-
|
|
1759
|
-
|
|
3115
|
+
Time of first point in seconds.
|
|
3116
|
+
columns : list of str
|
|
3117
|
+
Names of the timecourses contained in the file.
|
|
3118
|
+
data : numpy.ndarray, optional
|
|
3119
|
+
2D array of timecourses from the file. Returns None if file does not exist or is invalid.
|
|
3120
|
+
is_compressed : bool
|
|
3121
|
+
Indicates whether the TSV file was gzipped.
|
|
3122
|
+
columnsource : str
|
|
3123
|
+
Source of column names: either 'json' or 'tsv'.
|
|
3124
|
+
|
|
3125
|
+
Notes
|
|
3126
|
+
-----
|
|
3127
|
+
- If the TSV file does not exist or is not valid, all return values are None.
|
|
3128
|
+
- If the JSON metadata file is missing required fields (SamplingFrequency, StartTime, Columns),
|
|
3129
|
+
default values are used and warnings are issued if `warn=True`.
|
|
3130
|
+
- The function handles both gzipped and uncompressed TSV files.
|
|
3131
|
+
- If a header line is found in the TSV file, it is skipped and a warning is issued.
|
|
3132
|
+
|
|
3133
|
+
Examples
|
|
3134
|
+
--------
|
|
3135
|
+
>>> samplerate, starttime, columns, data, is_compressed, source = readbidstsv('sub-01_task-rest')
|
|
3136
|
+
>>> print(f"Sample rate: {samplerate} Hz")
|
|
3137
|
+
Sample rate: 10.0 Hz
|
|
3138
|
+
|
|
3139
|
+
>>> samplerate, starttime, columns, data, is_compressed, source = readbidstsv(
|
|
3140
|
+
... 'sub-01_task-rest', colspec='column1,column2'
|
|
3141
|
+
... )
|
|
3142
|
+
>>> print(f"Selected columns: {columns}")
|
|
3143
|
+
Selected columns: ['column1', 'column2']
|
|
1760
3144
|
"""
|
|
1761
3145
|
thefileroot, theext = os.path.splitext(inputfilename)
|
|
1762
3146
|
if theext == ".gz":
|
|
@@ -1905,18 +3289,64 @@ def readbidstsv(inputfilename, colspec=None, warn=True, neednotexist=False, debu
|
|
|
1905
3289
|
raise FileNotFoundError(f"file pair {thefileroot}(.json/.tsv[.gz]) does not exist")
|
|
1906
3290
|
|
|
1907
3291
|
|
|
1908
|
-
def readcolfrombidstsv(
|
|
1909
|
-
|
|
3292
|
+
def readcolfrombidstsv(
|
|
3293
|
+
inputfilename: str,
|
|
3294
|
+
columnnum: int = 0,
|
|
3295
|
+
columnname: Optional[str] = None,
|
|
3296
|
+
neednotexist: bool = False,
|
|
3297
|
+
debug: bool = False,
|
|
3298
|
+
) -> Tuple[Optional[float], Optional[float], Optional[np.ndarray]]:
|
|
3299
|
+
"""
|
|
3300
|
+
Read a specific column from a BIDS TSV file.
|
|
3301
|
+
|
|
3302
|
+
Extracts a single column of data from a BIDS TSV file, either by column name
|
|
3303
|
+
or by column index. The function handles both compressed and uncompressed files.
|
|
1910
3304
|
|
|
1911
3305
|
Parameters
|
|
1912
3306
|
----------
|
|
1913
|
-
inputfilename
|
|
1914
|
-
|
|
1915
|
-
columnname
|
|
3307
|
+
inputfilename : str
|
|
3308
|
+
Path to the input BIDS TSV file (can be .tsv or .tsv.gz)
|
|
3309
|
+
columnname : str, optional
|
|
3310
|
+
Name of the column to extract. If specified, ``columnnum`` is ignored.
|
|
3311
|
+
Default is None.
|
|
3312
|
+
columnnum : int, optional
|
|
3313
|
+
Index of the column to extract (0-based). Ignored if ``columnname`` is specified.
|
|
3314
|
+
Default is 0.
|
|
3315
|
+
neednotexist : bool, optional
|
|
3316
|
+
If True, the function will not raise an error if the file does not exist.
|
|
3317
|
+
Default is False.
|
|
3318
|
+
debug : bool, optional
|
|
3319
|
+
Enable debug output. Default is False.
|
|
1916
3320
|
|
|
1917
3321
|
Returns
|
|
1918
3322
|
-------
|
|
1919
|
-
|
|
3323
|
+
tuple
|
|
3324
|
+
A tuple containing:
|
|
3325
|
+
|
|
3326
|
+
- samplerate : float or None
|
|
3327
|
+
Sampling rate extracted from the file, or None if no valid data found
|
|
3328
|
+
- starttime : float or None
|
|
3329
|
+
Start time extracted from the file, or None if no valid data found
|
|
3330
|
+
- data : numpy.ndarray or None
|
|
3331
|
+
The extracted column data as a 1D array, or None if no valid data found
|
|
3332
|
+
|
|
3333
|
+
Notes
|
|
3334
|
+
-----
|
|
3335
|
+
- If both ``columnname`` and ``columnnum`` are specified, ``columnname`` takes precedence
|
|
3336
|
+
- Column indices are 0-based
|
|
3337
|
+
- The function handles both compressed (.tsv.gz) and uncompressed (.tsv) files
|
|
3338
|
+
- Returns None for all values if no valid data is found
|
|
3339
|
+
|
|
3340
|
+
Examples
|
|
3341
|
+
--------
|
|
3342
|
+
>>> # Read first column by index
|
|
3343
|
+
>>> samplerate, starttime, data = readcolfrombidstsv('data.tsv', columnnum=0)
|
|
3344
|
+
|
|
3345
|
+
>>> # Read column by name
|
|
3346
|
+
>>> samplerate, starttime, data = readcolfrombidstsv('data.tsv', columnname='reaction_time')
|
|
3347
|
+
|
|
3348
|
+
>>> # Read column with debug output
|
|
3349
|
+
>>> samplerate, starttime, data = readcolfrombidstsv('data.tsv', columnname='rt', debug=True)
|
|
1920
3350
|
"""
|
|
1921
3351
|
samplerate, starttime, columns, data, compressed, colsource = readbidstsv(
|
|
1922
3352
|
inputfilename, neednotexist=neednotexist, debug=debug
|
|
@@ -1946,7 +3376,54 @@ def readcolfrombidstsv(inputfilename, columnnum=0, columnname=None, neednotexist
|
|
|
1946
3376
|
return samplerate, starttime, data[columnnum, :]
|
|
1947
3377
|
|
|
1948
3378
|
|
|
1949
|
-
def parsefilespec(filespec, debug=False):
|
|
3379
|
+
def parsefilespec(filespec: str, debug: bool = False) -> Tuple[str, Optional[str]]:
|
|
3380
|
+
"""
|
|
3381
|
+
Parse a file specification string into filename and column specification.
|
|
3382
|
+
|
|
3383
|
+
This function splits a file specification string using ':' as the delimiter.
|
|
3384
|
+
On Windows platforms, it handles special cases where the second character
|
|
3385
|
+
is ':' (e.g., "C:file.txt") by treating the first two parts as the filename.
|
|
3386
|
+
|
|
3387
|
+
Parameters
|
|
3388
|
+
----------
|
|
3389
|
+
filespec : str
|
|
3390
|
+
The file specification string to parse. Expected format is
|
|
3391
|
+
"filename[:column_specification]".
|
|
3392
|
+
debug : bool, optional
|
|
3393
|
+
If True, print debug information during execution. Default is False.
|
|
3394
|
+
|
|
3395
|
+
Returns
|
|
3396
|
+
-------
|
|
3397
|
+
tuple[str, str or None]
|
|
3398
|
+
A tuple containing:
|
|
3399
|
+
- thefilename : str
|
|
3400
|
+
The parsed filename part of the specification
|
|
3401
|
+
- thecolspec : str or None
|
|
3402
|
+
The parsed column specification, or None if not provided
|
|
3403
|
+
|
|
3404
|
+
Raises
|
|
3405
|
+
------
|
|
3406
|
+
ValueError
|
|
3407
|
+
If the file specification is malformed (e.g., too many parts when
|
|
3408
|
+
special case handling is not applicable).
|
|
3409
|
+
|
|
3410
|
+
Notes
|
|
3411
|
+
-----
|
|
3412
|
+
On Windows systems, this function correctly handles drive letter specifications
|
|
3413
|
+
such as "C:file.txt" by treating the first two elements ("C:" and "file.txt")
|
|
3414
|
+
as the filename part.
|
|
3415
|
+
|
|
3416
|
+
Examples
|
|
3417
|
+
--------
|
|
3418
|
+
>>> parsefilespec("data.csv")
|
|
3419
|
+
('data.csv', None)
|
|
3420
|
+
|
|
3421
|
+
>>> parsefilespec("data.csv:1,3,5")
|
|
3422
|
+
('data.csv', '1,3,5')
|
|
3423
|
+
|
|
3424
|
+
>>> parsefilespec("C:file.txt:col1")
|
|
3425
|
+
('C:file.txt', 'col1')
|
|
3426
|
+
"""
|
|
1950
3427
|
inputlist = filespec.split(":")
|
|
1951
3428
|
if debug:
|
|
1952
3429
|
print(f"PARSEFILESPEC: input string >>>{filespec}<<<")
|
|
@@ -1981,7 +3458,47 @@ def parsefilespec(filespec, debug=False):
|
|
|
1981
3458
|
return thefilename, thecolspec
|
|
1982
3459
|
|
|
1983
3460
|
|
|
1984
|
-
def unique(list1):
|
|
3461
|
+
def unique(list1: List[Any]) -> List[Any]:
|
|
3462
|
+
"""
|
|
3463
|
+
Convert a column specification string to a list of column indices.
|
|
3464
|
+
|
|
3465
|
+
This function parses a column specification string and converts it into a list of
|
|
3466
|
+
zero-based column indices. The specification can include ranges (e.g., "0-5") and
|
|
3467
|
+
individual column numbers (e.g., "7") separated by commas.
|
|
3468
|
+
|
|
3469
|
+
Parameters
|
|
3470
|
+
----------
|
|
3471
|
+
colspec : str or None
|
|
3472
|
+
Column specification string in format like "0-5,7,10-12" or predefined macro.
|
|
3473
|
+
If None, returns None.
|
|
3474
|
+
debug : bool, optional
|
|
3475
|
+
Enable debug output. Default is False
|
|
3476
|
+
|
|
3477
|
+
Returns
|
|
3478
|
+
-------
|
|
3479
|
+
list of int or None
|
|
3480
|
+
List of column indices corresponding to the specification, or None if input is None.
|
|
3481
|
+
Returns empty list if specification is empty or invalid.
|
|
3482
|
+
|
|
3483
|
+
Notes
|
|
3484
|
+
-----
|
|
3485
|
+
- Column indices are zero-based
|
|
3486
|
+
- Ranges are inclusive on both ends
|
|
3487
|
+
- Individual columns can be specified as single numbers
|
|
3488
|
+
- Multiple specifications can be combined with commas
|
|
3489
|
+
- Invalid ranges or columns will be skipped
|
|
3490
|
+
|
|
3491
|
+
Examples
|
|
3492
|
+
--------
|
|
3493
|
+
>>> colspectolist("0-2,5,7-9")
|
|
3494
|
+
[0, 1, 2, 5, 7, 8, 9]
|
|
3495
|
+
|
|
3496
|
+
>>> colspectolist("3,1-4,6")
|
|
3497
|
+
[3, 1, 2, 3, 4, 6]
|
|
3498
|
+
|
|
3499
|
+
>>> colspectolist(None)
|
|
3500
|
+
None
|
|
3501
|
+
"""
|
|
1985
3502
|
# initialize a null list
|
|
1986
3503
|
unique_list = []
|
|
1987
3504
|
|
|
@@ -1993,7 +3510,57 @@ def unique(list1):
|
|
|
1993
3510
|
return unique_list
|
|
1994
3511
|
|
|
1995
3512
|
|
|
1996
|
-
def colspectolist(colspec, debug=False):
|
|
3513
|
+
def colspectolist(colspec: Optional[str], debug: bool = False) -> Optional[List[int]]:
|
|
3514
|
+
"""
|
|
3515
|
+
Convert a column specification string into a sorted list of integers.
|
|
3516
|
+
|
|
3517
|
+
This function parses a column specification string that may contain
|
|
3518
|
+
individual integers, ranges (e.g., "1-5"), or predefined macros (e.g.,
|
|
3519
|
+
"APARC_GRAY"). It expands macros into their corresponding ranges and
|
|
3520
|
+
returns a sorted list of unique integers.
|
|
3521
|
+
|
|
3522
|
+
Parameters
|
|
3523
|
+
----------
|
|
3524
|
+
colspec : str or None
|
|
3525
|
+
A column specification string. Can include:
|
|
3526
|
+
- Individual integers (e.g., "1", "10")
|
|
3527
|
+
- Ranges (e.g., "1-5")
|
|
3528
|
+
- Predefined macros (e.g., "APARC_GRAY")
|
|
3529
|
+
If None, the function prints an error and returns None.
|
|
3530
|
+
debug : bool, optional
|
|
3531
|
+
If True, enables debug output showing processing steps. Default is False.
|
|
3532
|
+
|
|
3533
|
+
Returns
|
|
3534
|
+
-------
|
|
3535
|
+
list of int or None
|
|
3536
|
+
A sorted list of unique integers corresponding to the column
|
|
3537
|
+
specification. Returns None if an error occurs during processing.
|
|
3538
|
+
|
|
3539
|
+
Notes
|
|
3540
|
+
-----
|
|
3541
|
+
Predefined macros:
|
|
3542
|
+
- APARC_SUBCORTGRAY: 8-13,17-20,26-28,47-56,58-60,96,97
|
|
3543
|
+
- APARC_CORTGRAY: 1000-1035,2000-2035
|
|
3544
|
+
- APARC_GRAY: 8-13,17-20,26-28,47-56,58-60,96,97,1000-1035,2000-2035
|
|
3545
|
+
- APARC_WHITE: 2,7,41,46,177,219,3000-3035,4000-4035,5001,5002
|
|
3546
|
+
- APARC_CSF: 4,5,14,15,24,31,43,44,63,72
|
|
3547
|
+
- APARC_ALLBUTCSF: 2,7-13,17-20,26-28,41,46-56,58-60,96,97,177,219,1000-1035,2000-2035,3000-3035,4000-4035,5001,5002
|
|
3548
|
+
- SSEG_GRAY: 3,8,10-13,16-18,26,42,47,49-54,58
|
|
3549
|
+
- SSEG_WHITE: 2,7,41,46
|
|
3550
|
+
- SSEG_CSF: 4,5,14,15,24,43,44
|
|
3551
|
+
|
|
3552
|
+
Examples
|
|
3553
|
+
--------
|
|
3554
|
+
>>> colspectolist("1-3,5,7-9")
|
|
3555
|
+
[1, 2, 3, 5, 7, 8, 9]
|
|
3556
|
+
|
|
3557
|
+
>>> colspectolist("APARC_GRAY")
|
|
3558
|
+
[8, 9, 10, 11, 12, 13, 17, 18, 19, 20, 26, 27, 28, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 58, 59, 60, 96, 97, 1000, 1001, ..., 2035]
|
|
3559
|
+
|
|
3560
|
+
>>> colspectolist(None)
|
|
3561
|
+
COLSPECTOLIST: no range specification - exiting
|
|
3562
|
+
None
|
|
3563
|
+
"""
|
|
1997
3564
|
if colspec is None:
|
|
1998
3565
|
print("COLSPECTOLIST: no range specification - exiting")
|
|
1999
3566
|
return None
|
|
@@ -2001,6 +3568,46 @@ def colspectolist(colspec, debug=False):
|
|
|
2001
3568
|
theranges = colspec.split(",")
|
|
2002
3569
|
|
|
2003
3570
|
def safeint(s):
|
|
3571
|
+
"""
|
|
3572
|
+
Convert a value to integer safely, handling various input types.
|
|
3573
|
+
|
|
3574
|
+
This function attempts to convert the input value to an integer. It handles
|
|
3575
|
+
strings, floats, and other numeric types gracefully, with special handling
|
|
3576
|
+
for string representations that may contain commas or ranges.
|
|
3577
|
+
|
|
3578
|
+
Parameters
|
|
3579
|
+
----------
|
|
3580
|
+
value : str, int, float
|
|
3581
|
+
The value to convert to integer. If string, may contain comma-separated
|
|
3582
|
+
values or range notation (e.g., "2-5", "1,3,5").
|
|
3583
|
+
|
|
3584
|
+
Returns
|
|
3585
|
+
-------
|
|
3586
|
+
int or list of int
|
|
3587
|
+
Integer value or list of integers if input contains multiple values
|
|
3588
|
+
or ranges. Returns single integer for simple numeric inputs.
|
|
3589
|
+
|
|
3590
|
+
Notes
|
|
3591
|
+
-----
|
|
3592
|
+
- For string inputs containing commas, values are split and converted
|
|
3593
|
+
- For string inputs containing hyphens, ranges are expanded into individual integers
|
|
3594
|
+
- Non-numeric strings will raise ValueError
|
|
3595
|
+
- Float inputs are truncated to integers
|
|
3596
|
+
|
|
3597
|
+
Examples
|
|
3598
|
+
--------
|
|
3599
|
+
>>> safeint("42")
|
|
3600
|
+
42
|
|
3601
|
+
|
|
3602
|
+
>>> safeint("2,7-13,17-20")
|
|
3603
|
+
[2, 7, 8, 9, 10, 11, 12, 13, 17, 18, 19, 20]
|
|
3604
|
+
|
|
3605
|
+
>>> safeint(3.14)
|
|
3606
|
+
3
|
|
3607
|
+
|
|
3608
|
+
>>> safeint("10-15")
|
|
3609
|
+
[10, 11, 12, 13, 14, 15]
|
|
3610
|
+
"""
|
|
2004
3611
|
try:
|
|
2005
3612
|
int(s)
|
|
2006
3613
|
return int(s)
|
|
@@ -2058,7 +3665,43 @@ def colspectolist(colspec, debug=False):
|
|
|
2058
3665
|
return unique(sorted(collist))
|
|
2059
3666
|
|
|
2060
3667
|
|
|
2061
|
-
def processnamespec(
|
|
3668
|
+
def processnamespec(
|
|
3669
|
+
maskspec: str, spectext1: str, spectext2: str, debug: bool = False
|
|
3670
|
+
) -> Tuple[str, Optional[List[int]]]:
|
|
3671
|
+
"""
|
|
3672
|
+
Parse a file specification and extract filename and column specifications.
|
|
3673
|
+
|
|
3674
|
+
This function takes a file specification string and parses it to separate the filename
|
|
3675
|
+
from any column specification. The column specification is converted into a list of
|
|
3676
|
+
column indices for further processing.
|
|
3677
|
+
|
|
3678
|
+
Parameters
|
|
3679
|
+
----------
|
|
3680
|
+
maskspec : str
|
|
3681
|
+
Input file specification string containing filename and optional column specification
|
|
3682
|
+
debug : bool, optional
|
|
3683
|
+
Enable debug output. Default is False
|
|
3684
|
+
|
|
3685
|
+
Returns
|
|
3686
|
+
-------
|
|
3687
|
+
filename : str
|
|
3688
|
+
Parsed filename
|
|
3689
|
+
collist : list of int or None
|
|
3690
|
+
List of column indices, or None if no column spec provided
|
|
3691
|
+
|
|
3692
|
+
Notes
|
|
3693
|
+
-----
|
|
3694
|
+
The function uses `parsefilespec` to split the input string and `colspectolist` to
|
|
3695
|
+
convert column specifications into lists of integers.
|
|
3696
|
+
|
|
3697
|
+
Examples
|
|
3698
|
+
--------
|
|
3699
|
+
>>> processnamespec("data.txt:1,3,5")
|
|
3700
|
+
('data.txt', [1, 3, 5])
|
|
3701
|
+
|
|
3702
|
+
>>> processnamespec("data.txt")
|
|
3703
|
+
('data.txt', None)
|
|
3704
|
+
"""
|
|
2062
3705
|
thename, colspec = parsefilespec(maskspec)
|
|
2063
3706
|
if colspec is not None:
|
|
2064
3707
|
thevals = colspectolist(colspec)
|
|
@@ -2069,16 +3712,57 @@ def processnamespec(maskspec, spectext1, spectext2, debug=False):
|
|
|
2069
3712
|
return thename, thevals
|
|
2070
3713
|
|
|
2071
3714
|
|
|
2072
|
-
def readcolfromtextfile(inputfilespec):
|
|
2073
|
-
|
|
3715
|
+
def readcolfromtextfile(inputfilespec: str) -> np.ndarray:
|
|
3716
|
+
"""
|
|
3717
|
+
Read columns from a text file and return as numpy array.
|
|
3718
|
+
|
|
3719
|
+
This function reads data from a text file, optionally skipping header lines
|
|
3720
|
+
and specifying which columns to read. It supports various column specification
|
|
3721
|
+
formats and allows for debugging output.
|
|
2074
3722
|
|
|
2075
3723
|
Parameters
|
|
2076
3724
|
----------
|
|
2077
|
-
inputfilename
|
|
2078
|
-
|
|
3725
|
+
inputfilename : str
|
|
3726
|
+
Path to the input text file to read.
|
|
3727
|
+
colspec : str, optional
|
|
3728
|
+
Column specification string. Can be:
|
|
3729
|
+
- None: read all columns
|
|
3730
|
+
- Comma-separated column numbers (e.g., "1,3,5")
|
|
3731
|
+
- Column ranges (e.g., "1-3,5-7")
|
|
3732
|
+
- Single column number (e.g., "3")
|
|
3733
|
+
numskip : int, default: 0
|
|
3734
|
+
Number of header lines to skip before reading data.
|
|
3735
|
+
debug : bool, default: False
|
|
3736
|
+
If True, print debug information during execution.
|
|
3737
|
+
thedtype : type, default: float
|
|
3738
|
+
Data type to convert the read data to.
|
|
2079
3739
|
|
|
2080
3740
|
Returns
|
|
2081
3741
|
-------
|
|
3742
|
+
np.ndarray
|
|
3743
|
+
Numpy array containing the read data. Shape depends on the number of
|
|
3744
|
+
columns specified and the number of rows in the input file.
|
|
3745
|
+
|
|
3746
|
+
Notes
|
|
3747
|
+
-----
|
|
3748
|
+
- The function uses numpy's genfromtxt internally for reading the file
|
|
3749
|
+
- Column indexing starts from 1 (not 0)
|
|
3750
|
+
- If colspec is not provided, all columns are read
|
|
3751
|
+
- The function handles various text file formats including space and comma delimited data
|
|
3752
|
+
|
|
3753
|
+
Examples
|
|
3754
|
+
--------
|
|
3755
|
+
>>> # Read all columns from a file
|
|
3756
|
+
>>> data = readvecs('data.txt')
|
|
3757
|
+
|
|
3758
|
+
>>> # Read only columns 1, 3, and 5
|
|
3759
|
+
>>> data = readvecs('data.txt', colspec='1,3,5')
|
|
3760
|
+
|
|
3761
|
+
>>> # Read columns 2 through 4
|
|
3762
|
+
>>> data = readvecs('data.txt', colspec='2-4')
|
|
3763
|
+
|
|
3764
|
+
>>> # Skip first 5 lines and read columns 1 and 3
|
|
3765
|
+
>>> data = readvecs('data.txt', colspec='1,3', numskip=5)
|
|
2082
3766
|
"""
|
|
2083
3767
|
inputfilename, colspec = parsefilespec(inputfilespec)
|
|
2084
3768
|
if inputfilename is None:
|
|
@@ -2093,16 +3777,53 @@ def readcolfromtextfile(inputfilespec):
|
|
|
2093
3777
|
return inputdata[:, 0]
|
|
2094
3778
|
|
|
2095
3779
|
|
|
2096
|
-
def readvecs(
|
|
2097
|
-
|
|
3780
|
+
def readvecs(
|
|
3781
|
+
inputfilename: str,
|
|
3782
|
+
colspec: Optional[str] = None,
|
|
3783
|
+
numskip: int = 0,
|
|
3784
|
+
debug: bool = False,
|
|
3785
|
+
thedtype: type = float,
|
|
3786
|
+
) -> np.ndarray:
|
|
3787
|
+
"""
|
|
3788
|
+
Read vectors from a text file and return them as a transposed numpy array.
|
|
2098
3789
|
|
|
2099
3790
|
Parameters
|
|
2100
3791
|
----------
|
|
2101
|
-
inputfilename
|
|
3792
|
+
inputfilename : str
|
|
3793
|
+
The name of the text file to read data from.
|
|
3794
|
+
colspec : str, optional
|
|
3795
|
+
A string specifying which columns to read. If None, all columns in the first
|
|
3796
|
+
line are read. Default is None.
|
|
3797
|
+
numskip : int, optional
|
|
3798
|
+
Number of lines to skip at the beginning of the file. If 0, the function
|
|
3799
|
+
attempts to auto-detect if the first line contains headers. Default is 0.
|
|
3800
|
+
thedtype : type, optional
|
|
3801
|
+
The data type to convert the read values to. Default is float.
|
|
3802
|
+
debug : bool, optional
|
|
3803
|
+
If True, print debug information including input parameters and processing
|
|
3804
|
+
details. Default is False.
|
|
2102
3805
|
|
|
2103
3806
|
Returns
|
|
2104
3807
|
-------
|
|
2105
|
-
|
|
3808
|
+
numpy.ndarray
|
|
3809
|
+
A 2D numpy array where each row corresponds to a vector read from the file.
|
|
3810
|
+
The array is transposed such that each column represents a vector.
|
|
3811
|
+
|
|
3812
|
+
Notes
|
|
3813
|
+
-----
|
|
3814
|
+
- The function assumes that the input file contains numeric data separated by
|
|
3815
|
+
whitespace.
|
|
3816
|
+
- If `colspec` is not provided, all columns from the first line are read.
|
|
3817
|
+
- If `numskip` is 0, the function attempts to detect whether the first line
|
|
3818
|
+
contains headers by trying to convert the first element to a float.
|
|
3819
|
+
- The function raises a `ValueError` if any requested column index is out of
|
|
3820
|
+
bounds.
|
|
3821
|
+
|
|
3822
|
+
Examples
|
|
3823
|
+
--------
|
|
3824
|
+
>>> data = readvecs('data.txt')
|
|
3825
|
+
>>> data = readvecs('data.txt', colspec='1:3', numskip=1)
|
|
3826
|
+
>>> data = readvecs('data.txt', colspec='0,2,4', thedtype=int)
|
|
2106
3827
|
"""
|
|
2107
3828
|
if debug:
|
|
2108
3829
|
print(f"inputfilename: {inputfilename}")
|
|
@@ -2137,19 +3858,46 @@ def readvecs(inputfilename, colspec=None, numskip=0, debug=False, thedtype=float
|
|
|
2137
3858
|
return theoutarray
|
|
2138
3859
|
|
|
2139
3860
|
|
|
2140
|
-
def readvec(inputfilename, numskip=0):
|
|
2141
|
-
|
|
3861
|
+
def readvec(inputfilename: str, numskip: int = 0) -> np.ndarray:
|
|
3862
|
+
"""
|
|
3863
|
+
Read a timecourse from a text or BIDS TSV file.
|
|
3864
|
+
|
|
3865
|
+
This function reads numerical data from a text file and returns it as a numpy array.
|
|
3866
|
+
It can handle both plain text files and BIDS TSV files, with optional column selection
|
|
3867
|
+
and debugging output.
|
|
2142
3868
|
|
|
2143
3869
|
Parameters
|
|
2144
3870
|
----------
|
|
2145
3871
|
inputfilename : str
|
|
2146
|
-
|
|
3872
|
+
Path to the input file
|
|
3873
|
+
colnum : int, optional
|
|
3874
|
+
Column number to read (0-indexed). If None, reads all columns.
|
|
3875
|
+
colname : str, optional
|
|
3876
|
+
Column name to read. If None, reads all columns.
|
|
3877
|
+
debug : bool, optional
|
|
3878
|
+
If True, enables debug output. Default is False.
|
|
2147
3879
|
|
|
2148
3880
|
Returns
|
|
2149
3881
|
-------
|
|
2150
|
-
|
|
2151
|
-
|
|
2152
|
-
|
|
3882
|
+
tuple
|
|
3883
|
+
A tuple containing:
|
|
3884
|
+
- np.ndarray: The read timecourse data
|
|
3885
|
+
- float, optional: Minimum value in the data
|
|
3886
|
+
- float, optional: Maximum value in the data
|
|
3887
|
+
|
|
3888
|
+
Notes
|
|
3889
|
+
-----
|
|
3890
|
+
- The function handles both text files and BIDS TSV files
|
|
3891
|
+
- Empty lines are skipped during reading
|
|
3892
|
+
- Data is converted to float64 type
|
|
3893
|
+
- If both colnum and colname are provided, colnum takes precedence
|
|
3894
|
+
- The function returns the minimum and maximum values only when the data is read successfully
|
|
3895
|
+
|
|
3896
|
+
Examples
|
|
3897
|
+
--------
|
|
3898
|
+
>>> data, min_val, max_val = readtc('timecourse.txt')
|
|
3899
|
+
>>> data, min_val, max_val = readtc('bids_file.tsv', colnum=2)
|
|
3900
|
+
>>> data, min_val, max_val = readtc('data.txt', colname='signal', debug=True)
|
|
2153
3901
|
"""
|
|
2154
3902
|
inputvec = []
|
|
2155
3903
|
with open(inputfilename, "r") as thefile:
|
|
@@ -2160,7 +3908,57 @@ def readvec(inputfilename, numskip=0):
|
|
|
2160
3908
|
return np.asarray(inputvec, dtype=float)
|
|
2161
3909
|
|
|
2162
3910
|
|
|
2163
|
-
def readtc(
|
|
3911
|
+
def readtc(
|
|
3912
|
+
inputfilename: str,
|
|
3913
|
+
colnum: Optional[int] = None,
|
|
3914
|
+
colname: Optional[str] = None,
|
|
3915
|
+
debug: bool = False,
|
|
3916
|
+
) -> Tuple[np.ndarray, Optional[float], Optional[float]]:
|
|
3917
|
+
"""
|
|
3918
|
+
Read timecourse data from a file, supporting BIDS TSV and other formats.
|
|
3919
|
+
|
|
3920
|
+
This function reads timecourse data from a file, with support for BIDS TSV files
|
|
3921
|
+
and generic multi-column text files. For BIDS TSV files, a column name or number
|
|
3922
|
+
must be specified. For other file types, column selection is limited to numeric indices.
|
|
3923
|
+
|
|
3924
|
+
Parameters
|
|
3925
|
+
----------
|
|
3926
|
+
inputfilename : str
|
|
3927
|
+
Path to the input file to read. Can be a BIDS TSV file (`.tsv`) or a generic
|
|
3928
|
+
text file with multiple columns.
|
|
3929
|
+
colname : str or None, optional
|
|
3930
|
+
Column name to read from a BIDS TSV file. Required if the file is a BIDS TSV
|
|
3931
|
+
and `colnum` is not specified. Default is None.
|
|
3932
|
+
colnum : int or None, optional
|
|
3933
|
+
Column number to read from a BIDS TSV file or a generic multi-column file.
|
|
3934
|
+
Required for generic files when `colname` is not specified. Default is None.
|
|
3935
|
+
debug : bool, optional
|
|
3936
|
+
Enable debug output to print intermediate information. Default is False.
|
|
3937
|
+
|
|
3938
|
+
Returns
|
|
3939
|
+
-------
|
|
3940
|
+
timecourse : numpy.ndarray
|
|
3941
|
+
The timecourse data as a 1D numpy array.
|
|
3942
|
+
inputfreq : float or None
|
|
3943
|
+
Sampling frequency (Hz) if available from the file metadata. Default is None.
|
|
3944
|
+
inputstart : float or None
|
|
3945
|
+
Start time (seconds) if available from the file metadata. Default is None.
|
|
3946
|
+
|
|
3947
|
+
Notes
|
|
3948
|
+
-----
|
|
3949
|
+
- For BIDS TSV files (`.tsv`), the function reads the specified column using
|
|
3950
|
+
`readcolfrombidstsv`, which extracts metadata such as sampling frequency and
|
|
3951
|
+
start time.
|
|
3952
|
+
- For generic text files, the function transposes the data and selects the
|
|
3953
|
+
specified column if `colnum` is provided.
|
|
3954
|
+
- If the input file is a `.json` file, it is assumed to contain metadata for
|
|
3955
|
+
a BIDS TSV file and is processed accordingly.
|
|
3956
|
+
|
|
3957
|
+
Examples
|
|
3958
|
+
--------
|
|
3959
|
+
>>> timecourse, freq, start = readtc('data.tsv', colname='signal')
|
|
3960
|
+
>>> timecourse, freq, start = readtc('data.txt', colnum=0, debug=True)
|
|
3961
|
+
"""
|
|
2164
3962
|
# check file type
|
|
2165
3963
|
filebase, extension = os.path.splitext(inputfilename)
|
|
2166
3964
|
inputfreq = None
|
|
@@ -2192,16 +3990,47 @@ def readtc(inputfilename, colnum=None, colname=None, debug=False):
|
|
|
2192
3990
|
return timecourse, inputfreq, inputstart
|
|
2193
3991
|
|
|
2194
3992
|
|
|
2195
|
-
def readlabels(inputfilename):
|
|
2196
|
-
|
|
3993
|
+
def readlabels(inputfilename: str) -> List[str]:
|
|
3994
|
+
"""
|
|
3995
|
+
Write all the key value pairs from a dictionary to a text file.
|
|
2197
3996
|
|
|
2198
3997
|
Parameters
|
|
2199
3998
|
----------
|
|
2200
|
-
|
|
3999
|
+
thedict : dict
|
|
4000
|
+
A dictionary containing key-value pairs to be written to file.
|
|
4001
|
+
outputfile : str
|
|
4002
|
+
The name of the output file where dictionary contents will be saved.
|
|
4003
|
+
lineend : {'mac', 'win', 'linux'}, optional
|
|
4004
|
+
Line ending style to use. Default is 'linux'.
|
|
4005
|
+
- 'mac': Uses carriage return ('\r')
|
|
4006
|
+
- 'win': Uses carriage return + line feed ('\r\n')
|
|
4007
|
+
- 'linux': Uses line feed ('\n')
|
|
4008
|
+
machinereadable : bool, optional
|
|
4009
|
+
If True, outputs in a machine-readable format (default is False).
|
|
4010
|
+
When False, outputs in a human-readable format with key-value pairs on separate lines.
|
|
2201
4011
|
|
|
2202
4012
|
Returns
|
|
2203
4013
|
-------
|
|
2204
|
-
|
|
4014
|
+
None
|
|
4015
|
+
This function does not return any value.
|
|
4016
|
+
|
|
4017
|
+
Notes
|
|
4018
|
+
-----
|
|
4019
|
+
- The function will overwrite the output file if it already exists.
|
|
4020
|
+
- Keys and values are converted to strings before writing.
|
|
4021
|
+
- If `machinereadable` is True, the output format may differ from the default human-readable format.
|
|
4022
|
+
|
|
4023
|
+
Examples
|
|
4024
|
+
--------
|
|
4025
|
+
>>> my_dict = {'name': 'John', 'age': 30, 'city': 'New York'}
|
|
4026
|
+
>>> writedict(my_dict, 'output.txt')
|
|
4027
|
+
# Writes dictionary to output.txt in human-readable format
|
|
4028
|
+
|
|
4029
|
+
>>> writedict(my_dict, 'output.txt', lineend='win')
|
|
4030
|
+
# Writes dictionary with Windows-style line endings
|
|
4031
|
+
|
|
4032
|
+
>>> writedict(my_dict, 'output.txt', machinereadable=True)
|
|
4033
|
+
# Writes dictionary in machine-readable format
|
|
2205
4034
|
"""
|
|
2206
4035
|
inputvec = []
|
|
2207
4036
|
with open(inputfilename, "r") as thefile:
|
|
@@ -2211,22 +4040,42 @@ def readlabels(inputfilename):
|
|
|
2211
4040
|
return inputvec
|
|
2212
4041
|
|
|
2213
4042
|
|
|
2214
|
-
def writedict(
|
|
2215
|
-
|
|
2216
|
-
|
|
4043
|
+
def writedict(
|
|
4044
|
+
thedict: Dict[str, Any], outputfile: str, lineend: str = "", machinereadable: bool = False
|
|
4045
|
+
) -> None:
|
|
4046
|
+
"""
|
|
4047
|
+
Write a dictionary to a text file with customizable line endings and formatting.
|
|
2217
4048
|
|
|
2218
4049
|
Parameters
|
|
2219
4050
|
----------
|
|
2220
4051
|
thedict : dict
|
|
2221
|
-
|
|
4052
|
+
Dictionary containing key-value pairs to be written to file
|
|
2222
4053
|
outputfile : str
|
|
2223
|
-
|
|
2224
|
-
lineend :
|
|
2225
|
-
Line ending style to use
|
|
4054
|
+
Path to the output file where dictionary will be written
|
|
4055
|
+
lineend : str, optional
|
|
4056
|
+
Line ending style to use ('mac', 'win', 'linux'), default is 'linux'
|
|
4057
|
+
machinereadable : bool, optional
|
|
4058
|
+
If True, write in machine-readable JSON-like format with quotes around keys,
|
|
4059
|
+
default is False
|
|
2226
4060
|
|
|
2227
4061
|
Returns
|
|
2228
4062
|
-------
|
|
2229
|
-
|
|
4063
|
+
None
|
|
4064
|
+
Function writes to file but does not return any value
|
|
4065
|
+
|
|
4066
|
+
Notes
|
|
4067
|
+
-----
|
|
4068
|
+
- For 'mac' line endings, uses carriage return (`\\r`)
|
|
4069
|
+
- For 'win' line endings, uses carriage return + line feed (`\\r\\n`)
|
|
4070
|
+
- For 'linux' line endings, uses line feed (`\\n`)
|
|
4071
|
+
- When `machinereadable=True`, keys are quoted and formatted with tab separators
|
|
4072
|
+
- When `machinereadable=False`, keys are written without quotes
|
|
4073
|
+
|
|
4074
|
+
Examples
|
|
4075
|
+
--------
|
|
4076
|
+
>>> my_dict = {'name': 'John', 'age': 30}
|
|
4077
|
+
>>> writedict(my_dict, 'output.txt', lineend='linux', machinereadable=False)
|
|
4078
|
+
>>> writedict(my_dict, 'output.json', lineend='win', machinereadable=True)
|
|
2230
4079
|
"""
|
|
2231
4080
|
if lineend == "mac":
|
|
2232
4081
|
thelineending = "\r"
|
|
@@ -2252,19 +4101,42 @@ def writedict(thedict, outputfile, lineend="", machinereadable=False):
|
|
|
2252
4101
|
FILE.writelines("}" + thelineending)
|
|
2253
4102
|
|
|
2254
4103
|
|
|
2255
|
-
def readdict(inputfilename):
|
|
2256
|
-
|
|
4104
|
+
def readdict(inputfilename: str) -> Dict[str, Any]:
|
|
4105
|
+
"""
|
|
4106
|
+
Read a dictionary from a text file.
|
|
4107
|
+
|
|
4108
|
+
Read a dictionary from a text file where each line contains a key followed by one or more values.
|
|
4109
|
+
The key is the first element of each line (with the trailing character removed), and the values
|
|
4110
|
+
are the remaining elements on that line.
|
|
2257
4111
|
|
|
2258
4112
|
Parameters
|
|
2259
4113
|
----------
|
|
2260
4114
|
inputfilename : str
|
|
2261
|
-
The name of the
|
|
4115
|
+
The name of the input file to read the dictionary from.
|
|
2262
4116
|
|
|
2263
4117
|
Returns
|
|
2264
4118
|
-------
|
|
2265
|
-
|
|
2266
|
-
|
|
2267
|
-
|
|
4119
|
+
dict
|
|
4120
|
+
A dictionary where keys are the first element of each line (with last character removed)
|
|
4121
|
+
and values are the remaining elements. If a line contains only one value, that value is
|
|
4122
|
+
returned as a string rather than a list. If the file does not exist, an empty dictionary
|
|
4123
|
+
is returned.
|
|
4124
|
+
|
|
4125
|
+
Notes
|
|
4126
|
+
-----
|
|
4127
|
+
- The function assumes that the input file exists and is properly formatted
|
|
4128
|
+
- Keys are processed by removing the last character from the first field
|
|
4129
|
+
- Values are stored as lists unless there's only one value, in which case it's stored as a string
|
|
4130
|
+
- If the file does not exist, a message is printed and an empty dictionary is returned
|
|
4131
|
+
|
|
4132
|
+
Examples
|
|
4133
|
+
--------
|
|
4134
|
+
>>> # Assuming a file 'data.txt' with content:
|
|
4135
|
+
>>> # key1 val1 val2 val3
|
|
4136
|
+
>>> # key2 val4
|
|
4137
|
+
>>> result = readdict('data.txt')
|
|
4138
|
+
>>> print(result)
|
|
4139
|
+
{'key': ['val1', 'val2', 'val3'], 'key2': 'val4'}
|
|
2268
4140
|
"""
|
|
2269
4141
|
if os.path.exists(inputfilename):
|
|
2270
4142
|
thedict = {}
|
|
@@ -2282,20 +4154,39 @@ def readdict(inputfilename):
|
|
|
2282
4154
|
return {}
|
|
2283
4155
|
|
|
2284
4156
|
|
|
2285
|
-
def writevec(thevec, outputfile, lineend=""):
|
|
2286
|
-
|
|
4157
|
+
def writevec(thevec: np.ndarray, outputfile: str, lineend: str = "") -> None:
|
|
4158
|
+
"""
|
|
4159
|
+
Write a vector to a text file, one value per line.
|
|
4160
|
+
|
|
2287
4161
|
Parameters
|
|
2288
4162
|
----------
|
|
2289
4163
|
thevec : 1D numpy or python array
|
|
2290
|
-
The array to write.
|
|
4164
|
+
The array to write. Must be a 1D array-like object.
|
|
2291
4165
|
outputfile : str
|
|
2292
|
-
The name of the output file
|
|
2293
|
-
lineend : {
|
|
4166
|
+
The name of the output file to write to.
|
|
4167
|
+
lineend : {'mac', 'win', 'linux'}, optional
|
|
2294
4168
|
Line ending style to use. Default is 'linux'.
|
|
4169
|
+
- 'mac': Use Mac line endings (\r)
|
|
4170
|
+
- 'win': Use Windows line endings (\r\n)
|
|
4171
|
+
- 'linux': Use Linux line endings (\n)
|
|
2295
4172
|
|
|
2296
4173
|
Returns
|
|
2297
4174
|
-------
|
|
2298
|
-
|
|
4175
|
+
None
|
|
4176
|
+
This function does not return any value.
|
|
4177
|
+
|
|
4178
|
+
Notes
|
|
4179
|
+
-----
|
|
4180
|
+
The function opens the output file in binary mode for all line ending types except
|
|
4181
|
+
when an invalid lineend value is provided, in which case it opens in text mode
|
|
4182
|
+
with default line endings.
|
|
4183
|
+
|
|
4184
|
+
Examples
|
|
4185
|
+
--------
|
|
4186
|
+
>>> import numpy as np
|
|
4187
|
+
>>> vec = np.array([1, 2, 3, 4, 5])
|
|
4188
|
+
>>> writevec(vec, 'output.txt')
|
|
4189
|
+
>>> writevec(vec, 'output_win.txt', lineend='win')
|
|
2299
4190
|
"""
|
|
2300
4191
|
if lineend == "mac":
|
|
2301
4192
|
thelineending = "\r"
|
|
@@ -2315,16 +4206,75 @@ def writevec(thevec, outputfile, lineend=""):
|
|
|
2315
4206
|
|
|
2316
4207
|
|
|
2317
4208
|
def writevectorstotextfile(
|
|
2318
|
-
thevecs,
|
|
2319
|
-
outputfile,
|
|
2320
|
-
samplerate=1.0,
|
|
2321
|
-
starttime=0.0,
|
|
2322
|
-
columns=None,
|
|
2323
|
-
compressed=True,
|
|
2324
|
-
filetype="text",
|
|
2325
|
-
lineend="",
|
|
2326
|
-
debug=False,
|
|
2327
|
-
):
|
|
4209
|
+
thevecs: np.ndarray,
|
|
4210
|
+
outputfile: str,
|
|
4211
|
+
samplerate: float = 1.0,
|
|
4212
|
+
starttime: float = 0.0,
|
|
4213
|
+
columns: Optional[List[str]] = None,
|
|
4214
|
+
compressed: bool = True,
|
|
4215
|
+
filetype: str = "text",
|
|
4216
|
+
lineend: str = "",
|
|
4217
|
+
debug: bool = False,
|
|
4218
|
+
) -> None:
|
|
4219
|
+
"""
|
|
4220
|
+
Write vectors to a text file in various formats.
|
|
4221
|
+
|
|
4222
|
+
This function writes data vectors to a text file, supporting multiple output formats
|
|
4223
|
+
including plain text, CSV, BIDS continuous data, and plain TSV. The format is determined
|
|
4224
|
+
by the `filetype` parameter. It supports optional headers, line ending styles, and
|
|
4225
|
+
compression for BIDS formats.
|
|
4226
|
+
|
|
4227
|
+
Parameters
|
|
4228
|
+
----------
|
|
4229
|
+
thevecs : numpy.ndarray
|
|
4230
|
+
Data vectors to write. Should be a 2D array where each row is a vector.
|
|
4231
|
+
outputfile : str
|
|
4232
|
+
Output file path. The extension determines the file format if not explicitly specified.
|
|
4233
|
+
samplerate : float, optional
|
|
4234
|
+
Sampling rate in Hz. Default is 1.0. Used in BIDS formats.
|
|
4235
|
+
starttime : float, optional
|
|
4236
|
+
Start time in seconds. Default is 0.0. Used in BIDS formats.
|
|
4237
|
+
columns : list of str, optional
|
|
4238
|
+
Column names for the output file. If None, no headers are written.
|
|
4239
|
+
compressed : bool, optional
|
|
4240
|
+
Whether to compress the output file (for BIDS formats). Default is True.
|
|
4241
|
+
filetype : str, optional
|
|
4242
|
+
Output format. Options are:
|
|
4243
|
+
- 'text': Plain text with space-separated values
|
|
4244
|
+
- 'csv': Comma-separated values
|
|
4245
|
+
- 'bidscontinuous': BIDS continuous data format (TSV with JSON sidecar)
|
|
4246
|
+
- 'plaintsv': Plain TSV format without JSON sidecar
|
|
4247
|
+
Default is 'text'.
|
|
4248
|
+
lineend : str, optional
|
|
4249
|
+
Line ending style. Options are:
|
|
4250
|
+
- 'mac' (``\r``)
|
|
4251
|
+
- 'win' (``\r\n``)
|
|
4252
|
+
- 'linux' (``\n``)
|
|
4253
|
+
- '' (system default)
|
|
4254
|
+
Default is ''.
|
|
4255
|
+
debug : bool, optional
|
|
4256
|
+
Enable debug output. Default is False.
|
|
4257
|
+
|
|
4258
|
+
Returns
|
|
4259
|
+
-------
|
|
4260
|
+
None
|
|
4261
|
+
This function does not return any value.
|
|
4262
|
+
|
|
4263
|
+
Notes
|
|
4264
|
+
-----
|
|
4265
|
+
- For BIDS formats, the function uses `writebidstsv` internally and splits the
|
|
4266
|
+
output filename using `niftisplitext`.
|
|
4267
|
+
- The `columns` parameter is only used when writing headers.
|
|
4268
|
+
- The `lineend` parameter controls how newlines are written to the file.
|
|
4269
|
+
|
|
4270
|
+
Examples
|
|
4271
|
+
--------
|
|
4272
|
+
>>> import numpy as np
|
|
4273
|
+
>>> data = np.array([[1, 2, 3], [4, 5, 6]])
|
|
4274
|
+
>>> writevectorstotextfile(data, "output.txt", filetype="text")
|
|
4275
|
+
>>> writevectorstotextfile(data, "output.csv", filetype="csv", columns=["A", "B", "C"])
|
|
4276
|
+
>>> writevectorstotextfile(data, "output.tsv", filetype="bidscontinuous", samplerate=100.0)
|
|
4277
|
+
"""
|
|
2328
4278
|
if filetype == "text":
|
|
2329
4279
|
writenpvecs(thevecs, outputfile, headers=columns, lineend=lineend)
|
|
2330
4280
|
elif filetype == "csv":
|
|
@@ -2362,21 +4312,64 @@ def writevectorstotextfile(
|
|
|
2362
4312
|
|
|
2363
4313
|
|
|
2364
4314
|
# rewritten to guarantee file closure, combines writenpvec and writenpvecs
|
|
2365
|
-
def writenpvecs(
|
|
2366
|
-
|
|
4315
|
+
def writenpvecs(
|
|
4316
|
+
thevecs: np.ndarray,
|
|
4317
|
+
outputfile: str,
|
|
4318
|
+
ascsv: bool = False,
|
|
4319
|
+
headers: Optional[List[str]] = None,
|
|
4320
|
+
altmethod: bool = True,
|
|
4321
|
+
lineend: str = "",
|
|
4322
|
+
) -> None:
|
|
4323
|
+
"""
|
|
4324
|
+
Write out a two dimensional numpy array to a text file.
|
|
4325
|
+
|
|
4326
|
+
This function writes a numpy array to a text file, with options for
|
|
4327
|
+
CSV-style output, custom headers, and line ending styles.
|
|
2367
4328
|
|
|
2368
4329
|
Parameters
|
|
2369
4330
|
----------
|
|
2370
|
-
thevecs:
|
|
2371
|
-
|
|
4331
|
+
thevecs : np.ndarray
|
|
4332
|
+
A 1D or 2D numpy array containing the data to be written. If 1D,
|
|
4333
|
+
the array is written as a single column. If 2D, each column is
|
|
4334
|
+
written as a separate line in the output file.
|
|
2372
4335
|
outputfile : str
|
|
2373
|
-
The
|
|
2374
|
-
|
|
2375
|
-
|
|
4336
|
+
The path to the output file where the data will be written.
|
|
4337
|
+
ascsv : bool, optional
|
|
4338
|
+
If True, use comma as the separator; otherwise, use tab. Default is False.
|
|
4339
|
+
headers : list of str, optional
|
|
4340
|
+
A list of header strings to write at the beginning of the file.
|
|
4341
|
+
If provided, the number of headers must match the number of columns
|
|
4342
|
+
in the data (for 2D arrays) or 1 (for 1D arrays).
|
|
4343
|
+
altmethod : bool, optional
|
|
4344
|
+
If True, use an optimized method for writing 2D data. If False,
|
|
4345
|
+
use a nested loop approach. Default is True.
|
|
4346
|
+
lineend : str, optional
|
|
4347
|
+
Line ending style to use. Options are 'mac' (\r), 'win' (\r\n),
|
|
4348
|
+
'linux' (\n), or empty string (uses system default). Default is 'linux'.
|
|
2376
4349
|
|
|
2377
4350
|
Returns
|
|
2378
4351
|
-------
|
|
2379
|
-
|
|
4352
|
+
None
|
|
4353
|
+
This function does not return any value.
|
|
4354
|
+
|
|
4355
|
+
Notes
|
|
4356
|
+
-----
|
|
4357
|
+
- For 2D arrays, data is written column-wise.
|
|
4358
|
+
- When `altmethod` is True, the function uses vectorized operations
|
|
4359
|
+
for better performance.
|
|
4360
|
+
- If `headers` are provided, they are written as the first line
|
|
4361
|
+
in the file, separated by the chosen delimiter.
|
|
4362
|
+
|
|
4363
|
+
Examples
|
|
4364
|
+
--------
|
|
4365
|
+
>>> import numpy as np
|
|
4366
|
+
>>> data = np.array([[1, 2, 3], [4, 5, 6]])
|
|
4367
|
+
>>> writenpvecs(data, 'output.txt')
|
|
4368
|
+
# Writes data as tab-separated columns to 'output.txt'
|
|
4369
|
+
|
|
4370
|
+
>>> headers = ['Col1', 'Col2', 'Col3']
|
|
4371
|
+
>>> writenpvecs(data, 'output.csv', ascsv=True, headers=headers)
|
|
4372
|
+
# Writes CSV-formatted data with headers to 'output.csv'
|
|
2380
4373
|
"""
|
|
2381
4374
|
theshape = np.shape(thevecs)
|
|
2382
4375
|
if lineend == "mac":
|