rapidtide 3.0.11__py3-none-any.whl → 3.1.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (144) hide show
  1. rapidtide/Colortables.py +492 -27
  2. rapidtide/OrthoImageItem.py +1049 -46
  3. rapidtide/RapidtideDataset.py +1533 -86
  4. rapidtide/_version.py +3 -3
  5. rapidtide/calccoherence.py +196 -29
  6. rapidtide/calcnullsimfunc.py +188 -40
  7. rapidtide/calcsimfunc.py +242 -42
  8. rapidtide/correlate.py +1203 -383
  9. rapidtide/data/examples/src/testLD +56 -0
  10. rapidtide/data/examples/src/testalign +1 -1
  11. rapidtide/data/examples/src/testdelayvar +0 -1
  12. rapidtide/data/examples/src/testfmri +53 -3
  13. rapidtide/data/examples/src/testglmfilt +5 -5
  14. rapidtide/data/examples/src/testhappy +29 -7
  15. rapidtide/data/examples/src/testppgproc +17 -0
  16. rapidtide/data/examples/src/testrolloff +11 -0
  17. rapidtide/data/models/model_cnn_pytorch/best_model.pth +0 -0
  18. rapidtide/data/models/model_cnn_pytorch/loss.png +0 -0
  19. rapidtide/data/models/model_cnn_pytorch/loss.txt +1 -0
  20. rapidtide/data/models/model_cnn_pytorch/model.pth +0 -0
  21. rapidtide/data/models/model_cnn_pytorch/model_meta.json +68 -0
  22. rapidtide/decorators.py +91 -0
  23. rapidtide/dlfilter.py +2226 -110
  24. rapidtide/dlfiltertorch.py +4842 -0
  25. rapidtide/externaltools.py +327 -12
  26. rapidtide/fMRIData_class.py +79 -40
  27. rapidtide/filter.py +1899 -810
  28. rapidtide/fit.py +2011 -581
  29. rapidtide/genericmultiproc.py +93 -18
  30. rapidtide/happy_supportfuncs.py +2047 -172
  31. rapidtide/helper_classes.py +584 -43
  32. rapidtide/io.py +2370 -372
  33. rapidtide/linfitfiltpass.py +346 -99
  34. rapidtide/makelaggedtcs.py +210 -24
  35. rapidtide/maskutil.py +448 -62
  36. rapidtide/miscmath.py +827 -121
  37. rapidtide/multiproc.py +210 -22
  38. rapidtide/patchmatch.py +242 -42
  39. rapidtide/peakeval.py +31 -31
  40. rapidtide/ppgproc.py +2203 -0
  41. rapidtide/qualitycheck.py +352 -39
  42. rapidtide/refinedelay.py +431 -57
  43. rapidtide/refineregressor.py +494 -189
  44. rapidtide/resample.py +671 -185
  45. rapidtide/scripts/applyppgproc.py +28 -0
  46. rapidtide/scripts/showxcorr_legacy.py +7 -7
  47. rapidtide/scripts/stupidramtricks.py +15 -17
  48. rapidtide/simFuncClasses.py +1052 -77
  49. rapidtide/simfuncfit.py +269 -69
  50. rapidtide/stats.py +540 -238
  51. rapidtide/tests/happycomp +9 -0
  52. rapidtide/tests/test_cleanregressor.py +1 -2
  53. rapidtide/tests/test_dlfiltertorch.py +627 -0
  54. rapidtide/tests/test_findmaxlag.py +24 -8
  55. rapidtide/tests/test_fullrunhappy_v1.py +0 -2
  56. rapidtide/tests/test_fullrunhappy_v2.py +0 -2
  57. rapidtide/tests/test_fullrunhappy_v3.py +11 -4
  58. rapidtide/tests/test_fullrunhappy_v4.py +10 -2
  59. rapidtide/tests/test_fullrunrapidtide_v7.py +1 -1
  60. rapidtide/tests/test_getparsers.py +11 -3
  61. rapidtide/tests/test_refinedelay.py +0 -1
  62. rapidtide/tests/test_simroundtrip.py +16 -8
  63. rapidtide/tests/test_stcorrelate.py +3 -1
  64. rapidtide/tests/utils.py +9 -8
  65. rapidtide/tidepoolTemplate.py +142 -38
  66. rapidtide/tidepoolTemplate_alt.py +165 -44
  67. rapidtide/tidepoolTemplate_big.py +189 -52
  68. rapidtide/util.py +1217 -118
  69. rapidtide/voxelData.py +684 -37
  70. rapidtide/wiener.py +136 -23
  71. rapidtide/wiener2.py +113 -7
  72. rapidtide/workflows/adjustoffset.py +105 -3
  73. rapidtide/workflows/aligntcs.py +85 -2
  74. rapidtide/workflows/applydlfilter.py +87 -10
  75. rapidtide/workflows/applyppgproc.py +540 -0
  76. rapidtide/workflows/atlasaverage.py +210 -47
  77. rapidtide/workflows/atlastool.py +100 -3
  78. rapidtide/workflows/calcSimFuncMap.py +288 -69
  79. rapidtide/workflows/calctexticc.py +201 -9
  80. rapidtide/workflows/ccorrica.py +101 -6
  81. rapidtide/workflows/cleanregressor.py +165 -31
  82. rapidtide/workflows/delayvar.py +171 -23
  83. rapidtide/workflows/diffrois.py +81 -3
  84. rapidtide/workflows/endtidalproc.py +144 -4
  85. rapidtide/workflows/fdica.py +195 -15
  86. rapidtide/workflows/filtnifti.py +70 -3
  87. rapidtide/workflows/filttc.py +74 -3
  88. rapidtide/workflows/fitSimFuncMap.py +202 -51
  89. rapidtide/workflows/fixtr.py +73 -3
  90. rapidtide/workflows/gmscalc.py +113 -3
  91. rapidtide/workflows/happy.py +801 -199
  92. rapidtide/workflows/happy2std.py +144 -12
  93. rapidtide/workflows/happy_parser.py +163 -23
  94. rapidtide/workflows/histnifti.py +118 -2
  95. rapidtide/workflows/histtc.py +84 -3
  96. rapidtide/workflows/linfitfilt.py +117 -4
  97. rapidtide/workflows/localflow.py +328 -28
  98. rapidtide/workflows/mergequality.py +79 -3
  99. rapidtide/workflows/niftidecomp.py +322 -18
  100. rapidtide/workflows/niftistats.py +174 -4
  101. rapidtide/workflows/pairproc.py +98 -4
  102. rapidtide/workflows/pairwisemergenifti.py +85 -2
  103. rapidtide/workflows/parser_funcs.py +1421 -40
  104. rapidtide/workflows/physiofreq.py +137 -11
  105. rapidtide/workflows/pixelcomp.py +207 -5
  106. rapidtide/workflows/plethquality.py +103 -21
  107. rapidtide/workflows/polyfitim.py +151 -11
  108. rapidtide/workflows/proj2flow.py +75 -2
  109. rapidtide/workflows/rankimage.py +111 -4
  110. rapidtide/workflows/rapidtide.py +368 -76
  111. rapidtide/workflows/rapidtide2std.py +98 -2
  112. rapidtide/workflows/rapidtide_parser.py +109 -9
  113. rapidtide/workflows/refineDelayMap.py +144 -33
  114. rapidtide/workflows/refineRegressor.py +675 -96
  115. rapidtide/workflows/regressfrommaps.py +161 -37
  116. rapidtide/workflows/resamplenifti.py +85 -3
  117. rapidtide/workflows/resampletc.py +91 -3
  118. rapidtide/workflows/retrolagtcs.py +99 -9
  119. rapidtide/workflows/retroregress.py +176 -26
  120. rapidtide/workflows/roisummarize.py +174 -5
  121. rapidtide/workflows/runqualitycheck.py +71 -3
  122. rapidtide/workflows/showarbcorr.py +149 -6
  123. rapidtide/workflows/showhist.py +86 -2
  124. rapidtide/workflows/showstxcorr.py +160 -3
  125. rapidtide/workflows/showtc.py +159 -3
  126. rapidtide/workflows/showxcorrx.py +190 -10
  127. rapidtide/workflows/showxy.py +185 -15
  128. rapidtide/workflows/simdata.py +264 -38
  129. rapidtide/workflows/spatialfit.py +77 -2
  130. rapidtide/workflows/spatialmi.py +250 -27
  131. rapidtide/workflows/spectrogram.py +305 -32
  132. rapidtide/workflows/synthASL.py +154 -3
  133. rapidtide/workflows/tcfrom2col.py +76 -2
  134. rapidtide/workflows/tcfrom3col.py +74 -2
  135. rapidtide/workflows/tidepool.py +2971 -130
  136. rapidtide/workflows/utils.py +19 -14
  137. rapidtide/workflows/utils_doc.py +293 -0
  138. rapidtide/workflows/variabilityizer.py +116 -3
  139. {rapidtide-3.0.11.dist-info → rapidtide-3.1.1.dist-info}/METADATA +10 -8
  140. {rapidtide-3.0.11.dist-info → rapidtide-3.1.1.dist-info}/RECORD +144 -128
  141. {rapidtide-3.0.11.dist-info → rapidtide-3.1.1.dist-info}/entry_points.txt +1 -0
  142. {rapidtide-3.0.11.dist-info → rapidtide-3.1.1.dist-info}/WHEEL +0 -0
  143. {rapidtide-3.0.11.dist-info → rapidtide-3.1.1.dist-info}/licenses/LICENSE +0 -0
  144. {rapidtide-3.0.11.dist-info → rapidtide-3.1.1.dist-info}/top_level.txt +0 -0
rapidtide/io.py CHANGED
@@ -22,31 +22,56 @@ import operator as op
22
22
  import os
23
23
  import platform
24
24
  import sys
25
+ from typing import Any, Dict, List, Optional, Tuple, Union
25
26
 
26
27
  import nibabel as nib
27
28
  import numpy as np
28
29
  import pandas as pd
30
+ from numpy.typing import NDArray
29
31
 
30
32
  from rapidtide.tests.utils import mse
31
33
 
32
34
 
33
35
  # ---------------------------------------- NIFTI file manipulation ---------------------------
34
- def readfromnifti(inputfile, headeronly=False):
35
- r"""Open a nifti file and read in the various important parts
36
+ def readfromnifti(
37
+ inputfile: str, headeronly: bool = False
38
+ ) -> Tuple[Any, Optional[NDArray], Any, NDArray, NDArray]:
39
+ """
40
+ Open a nifti file and read in the various important parts
36
41
 
37
42
  Parameters
38
43
  ----------
39
44
  inputfile : str
40
- The name of the nifti file.
45
+ The name of the nifti file. Can be provided with or without file extension
46
+ (.nii or .nii.gz).
47
+ headeronly : bool, optional
48
+ If True, only read the header without loading data. Default is False.
41
49
 
42
50
  Returns
43
51
  -------
44
- nim : nifti image structure
45
- nim_data : array-like
46
- nim_hdr : nifti header
47
- thedims : int array
48
- thesizes : float array
49
-
52
+ tuple
53
+ A tuple containing:
54
+
55
+ - nim : nifti image structure
56
+ - nim_data : array-like or None
57
+ The image data if headeronly=False, None otherwise
58
+ - nim_hdr : nifti header
59
+ The header information copied from the nifti image
60
+ - thedims : int array
61
+ The dimensions from the nifti header
62
+ - thesizes : float array
63
+ The pixel dimensions from the nifti header
64
+
65
+ Notes
66
+ -----
67
+ This function automatically detects the file extension (.nii or .nii.gz) if
68
+ not provided in the inputfile parameter. If neither .nii nor .nii.gz extension
69
+ is found, it will look for the file with these extensions in order.
70
+
71
+ Examples
72
+ --------
73
+ >>> nim, data, hdr, dims, sizes = readfromnifti('my_image')
74
+ >>> nim, data, hdr, dims, sizes = readfromnifti('my_image.nii.gz', headeronly=True)
50
75
  """
51
76
  if os.path.isfile(inputfile):
52
77
  inputfilename = inputfile
@@ -67,13 +92,18 @@ def readfromnifti(inputfile, headeronly=False):
67
92
  return nim, nim_data, nim_hdr, thedims, thesizes
68
93
 
69
94
 
70
- def readfromcifti(inputfile, debug=False):
71
- r"""Open a cifti file and read in the various important parts
95
+ def readfromcifti(
96
+ inputfile: str, debug: bool = False
97
+ ) -> Tuple[Any, Any, NDArray, Any, NDArray, NDArray, Optional[float]]:
98
+ """
99
+ Open a cifti file and read in the various important parts
72
100
 
73
101
  Parameters
74
102
  ----------
75
103
  inputfile : str
76
104
  The name of the cifti file.
105
+ debug : bool, optional
106
+ Enable debug output. Default is False
77
107
 
78
108
  Returns
79
109
  -------
@@ -110,7 +140,52 @@ def readfromcifti(inputfile, debug=False):
110
140
  return cifti, cifti_hdr, nifti_data, nifti_hdr, thedims, thesizes, timestep
111
141
 
112
142
 
113
- def getciftitr(cifti_hdr):
143
+ def getciftitr(cifti_hdr: Any) -> Tuple[float, float]:
144
+ """
145
+ Extract the TR (repetition time) from a CIFTI header.
146
+
147
+ This function extracts timing information from a CIFTI header, specifically
148
+ the time between timepoints (TR) and the start time of the first timepoint.
149
+ It searches for a SeriesAxis in the CIFTI header matrix to extract this
150
+ information.
151
+
152
+ Parameters
153
+ ----------
154
+ cifti_hdr : Any
155
+ The CIFTI header object containing timing information. This should be
156
+ a valid CIFTI header that supports the matrix.mapped_indices and
157
+ matrix.get_axis methods.
158
+
159
+ Returns
160
+ -------
161
+ tuple of (float, float)
162
+ A tuple containing:
163
+ - timestep : float
164
+ The TR (time between timepoints) in seconds
165
+ - starttime : float
166
+ The start time of the first timepoint in seconds
167
+
168
+ Raises
169
+ ------
170
+ SystemExit
171
+ If no SeriesAxis is found in the CIFTI header, the function will
172
+ print an error message and exit the program.
173
+
174
+ Notes
175
+ -----
176
+ The function specifically looks for a SeriesAxis in the CIFTI header's
177
+ matrix. If multiple SeriesAxes exist, only the first one encountered
178
+ will be used. The timing information is extracted using the get_element()
179
+ method on the SeriesAxis object.
180
+
181
+ Examples
182
+ --------
183
+ >>> import nibabel as nib
184
+ >>> cifti_hdr = nib.load('file.cifti').header
185
+ >>> tr, start_time = getciftitr(cifti_hdr)
186
+ >>> print(f"TR: {tr} seconds, Start time: {start_time} seconds")
187
+ TR: 0.8 seconds, Start time: 0.0 seconds
188
+ """
114
189
  seriesaxis = None
115
190
  for theaxis in cifti_hdr.matrix.mapped_indices:
116
191
  if isinstance(cifti_hdr.matrix.get_axis(theaxis), nib.cifti2.SeriesAxis):
@@ -126,40 +201,136 @@ def getciftitr(cifti_hdr):
126
201
 
127
202
 
128
203
  # dims are the array dimensions along each axis
129
- def parseniftidims(thedims):
130
- r"""Split the dims array into individual elements
204
+ def parseniftidims(thedims: NDArray) -> Tuple[int, int, int, int]:
205
+ """
206
+ Split the dims array into individual elements
207
+
208
+ This function extracts the dimension sizes from a NIfTI dimensions array,
209
+ returning the number of points along each spatial and temporal dimension.
131
210
 
132
211
  Parameters
133
212
  ----------
134
- thedims : int array
135
- The nifti dims structure
213
+ thedims : NDArray of int
214
+ The NIfTI dimensions structure, where:
215
+ - thedims[0] contains the data type
216
+ - thedims[1] contains the number of points along x-axis (nx)
217
+ - thedims[2] contains the number of points along y-axis (ny)
218
+ - thedims[3] contains the number of points along z-axis (nz)
219
+ - thedims[4] contains the number of points along t-axis (nt)
136
220
 
137
221
  Returns
138
222
  -------
139
- nx, ny, nz, nt : int
140
- Number of points along each dimension
223
+ nx : int
224
+ Number of points along the x-axis
225
+ ny : int
226
+ Number of points along the y-axis
227
+ nz : int
228
+ Number of points along the z-axis
229
+ nt : int
230
+ Number of points along the t-axis (time)
231
+
232
+ Notes
233
+ -----
234
+ The input array is expected to be a NIfTI dimensions array with at least 5 elements.
235
+ This function assumes the standard NIfTI dimension ordering where dimensions 1-4
236
+ correspond to spatial x, y, z, and temporal t dimensions respectively.
237
+
238
+ Examples
239
+ --------
240
+ >>> import numpy as np
241
+ >>> dims = np.array([0, 64, 64, 32, 100, 1, 1, 1])
242
+ >>> nx, ny, nz, nt = parseniftidims(dims)
243
+ >>> print(f"Dimensions: {nx} x {ny} x {nz} x {nt}")
244
+ Dimensions: 64 x 64 x 32 x 100
141
245
  """
142
246
  return int(thedims[1]), int(thedims[2]), int(thedims[3]), int(thedims[4])
143
247
 
144
248
 
145
249
  # sizes are the mapping between voxels and physical coordinates
146
- def parseniftisizes(thesizes):
147
- r"""Split the size array into individual elements
250
+ def parseniftisizes(thesizes: NDArray) -> Tuple[float, float, float, float]:
251
+ """
252
+ Split the size array into individual elements
253
+
254
+ This function extracts voxel size information from a NIfTI header structure
255
+ and returns the scaling factors for spatial dimensions (x, y, z) and time (t).
148
256
 
149
257
  Parameters
150
258
  ----------
151
- thesizes : float array
152
- The nifti voxel size structure
259
+ thesizes : NDArray of float
260
+ The NIfTI voxel size structure containing scaling information.
261
+ Expected to be an array where indices 1-4 correspond to
262
+ x, y, z, and t scaling factors respectively.
153
263
 
154
264
  Returns
155
265
  -------
156
- dimx, dimy, dimz, dimt : float
157
- Scaling from voxel number to physical coordinates
266
+ dimx : float
267
+ Scaling factor from voxel number to physical coordinates in x dimension
268
+ dimy : float
269
+ Scaling factor from voxel number to physical coordinates in y dimension
270
+ dimz : float
271
+ Scaling factor from voxel number to physical coordinates in z dimension
272
+ dimt : float
273
+ Scaling factor from voxel number to physical coordinates in t dimension
274
+
275
+ Notes
276
+ -----
277
+ The function assumes the input array follows the NIfTI standard where:
278
+ - Index 0: unused or padding
279
+ - Index 1: x-dimension scaling
280
+ - Index 2: y-dimension scaling
281
+ - Index 3: z-dimension scaling
282
+ - Index 4: t-dimension scaling
283
+
284
+ Examples
285
+ --------
286
+ >>> import numpy as np
287
+ >>> sizes = np.array([0.0, 2.0, 2.0, 2.0, 1.0])
288
+ >>> x, y, z, t = parseniftisizes(sizes)
289
+ >>> print(x, y, z, t)
290
+ 2.0 2.0 2.0 1.0
158
291
  """
159
292
  return thesizes[1], thesizes[2], thesizes[3], thesizes[4]
160
293
 
161
294
 
162
- def dumparraytonifti(thearray, filename):
295
+ def dumparraytonifti(thearray: NDArray, filename: str) -> None:
296
+ """
297
+ Save a numpy array to a NIFTI file with an identity affine transform.
298
+
299
+ This function saves a numpy array to a NIFTI file format with an identity
300
+ affine transformation matrix. The resulting NIFTI file will have unit
301
+ spacing and no rotation or translation.
302
+
303
+ Parameters
304
+ ----------
305
+ thearray : NDArray
306
+ The data array to save. Can be 2D, 3D, or 4D array representing
307
+ medical imaging data or other volumetric data.
308
+ filename : str
309
+ The output filename (without extension). The function will append
310
+ '.nii' or '.nii.gz' extension based on the nibabel library's
311
+ default behavior.
312
+
313
+ Returns
314
+ -------
315
+ None
316
+ This function does not return any value. It saves the array to disk
317
+ as a NIFTI file.
318
+
319
+ Notes
320
+ -----
321
+ - The function uses an identity affine matrix with dimensions 4x4
322
+ - The affine matrix represents unit spacing with no rotation or translation
323
+ - This is useful for simple data storage without spatial information
324
+ - The function relies on the `savetonifti` helper function for the actual
325
+ NIFTI file writing operation
326
+
327
+ Examples
328
+ --------
329
+ >>> import numpy as np
330
+ >>> data = np.random.rand(64, 64, 64)
331
+ >>> dumparraytonifti(data, 'my_data')
332
+ >>> # Creates 'my_data.nii' file with identity affine transform
333
+ """
163
334
  outputaffine = np.zeros((4, 4), dtype=float)
164
335
  for i in range(4):
165
336
  outputaffine[i, i] = 1.0
@@ -168,8 +339,9 @@ def dumparraytonifti(thearray, filename):
168
339
  savetonifti(thearray, outputheader, filename)
169
340
 
170
341
 
171
- def savetonifti(thearray, theheader, thename, debug=False):
172
- r"""Save a data array out to a nifti file
342
+ def savetonifti(thearray: NDArray, theheader: Any, thename: str, debug: bool = False) -> None:
343
+ """
344
+ Save a data array out to a nifti file
173
345
 
174
346
  Parameters
175
347
  ----------
@@ -179,10 +351,12 @@ def savetonifti(thearray, theheader, thename, debug=False):
179
351
  A valid nifti header
180
352
  thename : str
181
353
  The name of the nifti file to save
354
+ debug : bool, optional
355
+ Enable debug output. Default is False
182
356
 
183
357
  Returns
184
358
  -------
185
-
359
+ None
186
360
  """
187
361
  outputaffine = theheader.get_best_affine()
188
362
  qaffine, qcode = theheader.get_qform(coded=True)
@@ -253,19 +427,142 @@ def savetonifti(thearray, theheader, thename, debug=False):
253
427
  output_nifti = None
254
428
 
255
429
 
256
- def niftifromarray(data):
430
+ def niftifromarray(data: NDArray) -> Any:
431
+ """
432
+ Create a NIFTI image object from a numpy array with identity affine.
433
+
434
+ This function converts a numpy array into a NIFTI image object using an identity
435
+ affine transformation matrix. The resulting image has no spatial transformation
436
+ applied, meaning the voxel coordinates directly correspond to the array indices.
437
+
438
+ Parameters
439
+ ----------
440
+ data : NDArray
441
+ The data array to convert to NIFTI format. Can be 2D, 3D, or 4D array
442
+ representing image data with arbitrary data types.
443
+
444
+ Returns
445
+ -------
446
+ nibabel.Nifti1Image
447
+ The NIFTI image object with identity affine matrix. The returned object
448
+ can be saved to disk using nibabel's save functionality.
449
+
450
+ Notes
451
+ -----
452
+ - The affine matrix is set to identity (4x4), which means no spatial
453
+ transformation is applied
454
+ - This function is useful for creating NIFTI images from processed data
455
+ that doesn't require spatial registration
456
+ - The data array is copied into the NIFTI image object
457
+
458
+ Examples
459
+ --------
460
+ >>> import numpy as np
461
+ >>> data = np.random.rand(64, 64, 32)
462
+ >>> img = niftifromarray(data)
463
+ >>> print(img.shape)
464
+ (64, 64, 32)
465
+ >>> print(img.affine)
466
+ [[1. 0. 0. 0.]
467
+ [0. 1. 0. 0.]
468
+ [0. 0. 1. 0.]
469
+ [0. 0. 0. 1.]]
470
+ """
257
471
  return nib.Nifti1Image(data, affine=np.eye(4))
258
472
 
259
473
 
260
- def niftihdrfromarray(data):
474
+ def niftihdrfromarray(data: NDArray) -> Any:
475
+ """
476
+ Create a NIFTI header from a numpy array with identity affine.
477
+
478
+ This function creates a NIFTI header object from a numpy array by constructing
479
+ a minimal NIFTI image with an identity affine matrix and extracting its header.
480
+ The resulting header contains basic NIFTI metadata but no spatial transformation
481
+ information beyond the identity matrix.
482
+
483
+ Parameters
484
+ ----------
485
+ data : NDArray
486
+ The data array to create a header for. The array can be of any shape and
487
+ data type, but should typically represent medical imaging data.
488
+
489
+ Returns
490
+ -------
491
+ nibabel.Nifti1Header
492
+ The NIFTI header object containing metadata for the input data array.
493
+
494
+ Notes
495
+ -----
496
+ The returned header is a copy of the header from a NIFTI image with identity
497
+ affine matrix. This is useful for creating NIFTI headers without requiring
498
+ full NIFTI image files or spatial transformation information.
499
+
500
+ Examples
501
+ --------
502
+ >>> import numpy as np
503
+ >>> data = np.random.rand(64, 64, 64)
504
+ >>> header = niftihdrfromarray(data)
505
+ >>> print(header)
506
+ <nibabel.nifti1.Nifti1Header object at 0x...>
507
+ """
261
508
  return nib.Nifti1Image(data, affine=np.eye(4)).header.copy()
262
509
 
263
510
 
264
511
  def makedestarray(
265
- destshape,
266
- filetype="nifti",
267
- rt_floattype="float64",
268
- ):
512
+ destshape: Union[Tuple, NDArray],
513
+ filetype: str = "nifti",
514
+ rt_floattype: np.dtype = np.float64,
515
+ ) -> Tuple[NDArray, int]:
516
+ """
517
+ Create a destination array for output data based on file type and shape.
518
+
519
+ Parameters
520
+ ----------
521
+ destshape : tuple or numpy array
522
+ Shape specification for the output array. For 'nifti' files, this is expected
523
+ to be a 3D or 4D shape; for 'cifti', it is expected to be a 2D or 3D shape
524
+ where the last dimension corresponds to spatial data and the second-to-last
525
+ to time; for 'text', it is expected to be a 1D or 2D shape.
526
+ filetype : str, optional
527
+ Type of output file. Must be one of 'nifti', 'cifti', or 'text'. Default is 'nifti'.
528
+ rt_floattype : np.dtype, optional
529
+ Data type for the output array. Default is 'np.float64'.
530
+
531
+ Returns
532
+ -------
533
+ outmaparray : numpy array
534
+ Pre-allocated output array with appropriate shape and dtype. The shape depends
535
+ on the `filetype` and `destshape`:
536
+ - For 'nifti': 1D array if 3D input, 2D array if 4D input.
537
+ - For 'cifti': 1D or 2D array depending on time dimension.
538
+ - For 'text': 1D or 2D array depending on time dimension.
539
+ internalspaceshape : int
540
+ The flattened spatial dimension size used to determine the shape of the output array.
541
+
542
+ Notes
543
+ -----
544
+ This function handles different file types by interpreting the input `destshape`
545
+ differently:
546
+ - For 'nifti', the spatial dimensions are multiplied together to form the
547
+ `internalspaceshape`, and the time dimension is inferred from the fourth
548
+ axis if present.
549
+ - For 'cifti', the last dimension is treated as spatial, and the second-to-last
550
+ as temporal if it exceeds 1.
551
+ - For 'text', the first dimension is treated as spatial, and the second as time.
552
+
553
+ Examples
554
+ --------
555
+ >>> import numpy as np
556
+ >>> from typing import Tuple, Union
557
+ >>> makedestarray((64, 64, 32), filetype="nifti")
558
+ (array([0., 0., ..., 0.]), 2097152)
559
+
560
+ >>> makedestarray((100, 50), filetype="text")
561
+ (array([0., 0., ..., 0.]), 100)
562
+
563
+ >>> makedestarray((100, 50, 20), filetype="cifti")
564
+ (array([[0., 0., ..., 0.], ..., [0., 0., ..., 0.]]), 20)
565
+ """
269
566
  if filetype == "text":
270
567
  try:
271
568
  internalspaceshape = destshape[0]
@@ -295,12 +592,64 @@ def makedestarray(
295
592
 
296
593
 
297
594
  def populatemap(
298
- themap,
299
- internalspaceshape,
300
- validvoxels,
301
- outmaparray,
302
- debug=False,
303
- ):
595
+ themap: NDArray,
596
+ internalspaceshape: int,
597
+ validvoxels: Optional[NDArray],
598
+ outmaparray: NDArray,
599
+ debug: bool = False,
600
+ ) -> NDArray:
601
+ """
602
+ Populate an output array with data from a map, handling valid voxel masking.
603
+
604
+ This function populates an output array with data from a source map, optionally
605
+ masking invalid voxels. It supports both 1D and 2D output arrays.
606
+
607
+ Parameters
608
+ ----------
609
+ themap : NDArray
610
+ The source data to populate into the output array. Shape is either
611
+ ``(internalspaceshape,)`` for 1D or ``(internalspaceshape, N)`` for 2D.
612
+ internalspaceshape : int
613
+ The total spatial dimension size, used to determine the expected shape
614
+ of the input map and the output array.
615
+ validvoxels : NDArray or None
616
+ Indices of valid voxels to populate. If None, all voxels are populated.
617
+ Shape should be ``(M,)`` where M is the number of valid voxels.
618
+ outmaparray : NDArray
619
+ The destination array to populate. Shape should be either ``(internalspaceshape,)``
620
+ for 1D or ``(internalspaceshape, N)`` for 2D.
621
+ debug : bool, optional
622
+ Enable debug output. Default is False.
623
+
624
+ Returns
625
+ -------
626
+ NDArray
627
+ The populated output array with the same shape as `outmaparray`.
628
+
629
+ Notes
630
+ -----
631
+ - If `validvoxels` is provided, only the specified voxels are updated.
632
+ - The function modifies `outmaparray` in-place and returns it.
633
+ - For 2D arrays, the second dimension is preserved in the output.
634
+
635
+ Examples
636
+ --------
637
+ >>> import numpy as np
638
+ >>> themap = np.array([1, 2, 3, 4])
639
+ >>> outmaparray = np.zeros(4)
640
+ >>> validvoxels = np.array([0, 2])
641
+ >>> result = populatemap(themap, 4, validvoxels, outmaparray)
642
+ >>> print(result)
643
+ [1. 0. 3. 0.]
644
+
645
+ >>> outmaparray = np.zeros((4, 2))
646
+ >>> result = populatemap(themap.reshape((4, 1)), 4, None, outmaparray)
647
+ >>> print(result)
648
+ [[1.]
649
+ [2.]
650
+ [3.]
651
+ [4.]]
652
+ """
304
653
  if len(outmaparray.shape) == 1:
305
654
  outmaparray[:] = 0.0
306
655
  if validvoxels is not None:
@@ -321,31 +670,99 @@ def populatemap(
321
670
 
322
671
 
323
672
  def savemaplist(
324
- outputname,
325
- maplist,
326
- validvoxels,
327
- destshape,
328
- theheader,
329
- bidsbasedict,
330
- filetype="nifti",
331
- rt_floattype="float64",
332
- cifti_hdr=None,
333
- savejson=True,
334
- debug=False,
335
- ):
673
+ outputname: str,
674
+ maplist: List[Tuple],
675
+ validvoxels: Optional[NDArray],
676
+ destshape: Union[Tuple, NDArray],
677
+ theheader: Any,
678
+ bidsbasedict: Dict[str, Any],
679
+ filetype: str = "nifti",
680
+ rt_floattype: np.dtype = np.float64,
681
+ cifti_hdr: Optional[Any] = None,
682
+ savejson: bool = True,
683
+ debug: bool = False,
684
+ ) -> None:
685
+ """
686
+ Save a list of data maps to files with appropriate BIDS metadata.
687
+
688
+ This function saves a list of data maps to output files (NIfTI, CIFTI, or text)
689
+ using the specified file type and includes BIDS-compliant metadata in JSON sidecars.
690
+ It supports mapping data into a destination array, handling valid voxels, and
691
+ writing out the final files with appropriate naming and headers.
692
+
693
+ Parameters
694
+ ----------
695
+ outputname : str
696
+ Base name for output files (without extension).
697
+ maplist : list of tuples
698
+ List of (data, suffix, maptype, unit, description) tuples to save.
699
+ Each tuple corresponds to one map to be saved.
700
+ validvoxels : numpy array or None
701
+ Indices of valid voxels in the data. If None, all voxels are considered valid.
702
+ destshape : tuple or numpy array
703
+ Shape of the destination array into which data will be mapped.
704
+ theheader : nifti/cifti header
705
+ Header object for the output files (NIfTI or CIFTI).
706
+ bidsbasedict : dict
707
+ Base BIDS metadata to include in JSON sidecars.
708
+ filetype : str, optional
709
+ Output file type ('nifti', 'cifti', or 'text'). Default is 'nifti'.
710
+ rt_floattype : str, optional
711
+ Data type for output arrays. Default is 'float64'.
712
+ cifti_hdr : cifti header or None, optional
713
+ CIFTI header if filetype is 'cifti'. Default is None.
714
+ savejson : bool, optional
715
+ Whether to save JSON sidecar files. Default is True.
716
+ debug : bool, optional
717
+ Enable debug output. Default is False.
718
+
719
+ Returns
720
+ -------
721
+ None
722
+ This function does not return any value; it writes files to disk.
723
+
724
+ Notes
725
+ -----
726
+ - For CIFTI files, if the data is a series (multi-dimensional), it is saved with
727
+ the provided names; otherwise, it uses temporal offset and step information.
728
+ - The function uses `makedestarray` to prepare the output array and `populatemap`
729
+ to copy data into the array based on valid voxels.
730
+ - If `savejson` is True, a JSON file is created for each map with metadata
731
+ including unit and description.
732
+
733
+ Examples
734
+ --------
735
+ >>> savemaplist(
736
+ ... outputname="sub-01_task-rest",
737
+ ... maplist=[
738
+ ... (data1, "stat", "stat", "z", "Statistical map"),
739
+ ... (data2, "mask", "mask", None, "Binary mask"),
740
+ ... ],
741
+ ... validvoxels=valid_indices,
742
+ ... destshape=(100, 100, 100),
743
+ ... theheader=nifti_header,
744
+ ... bidsbasedict={"Dataset": "MyDataset"},
745
+ ... filetype="nifti",
746
+ ... savejson=True,
747
+ ... )
748
+ """
336
749
  outmaparray, internalspaceshape = makedestarray(
337
750
  destshape,
338
751
  filetype=filetype,
339
752
  rt_floattype=rt_floattype,
340
753
  )
754
+ if debug:
755
+ print("maplist:")
756
+ print(maplist)
341
757
  for themap, mapsuffix, maptype, theunit, thedescription in maplist:
342
758
  # copy the data into the output array, remapping if warranted
343
759
  if debug:
760
+ print(f"processing map {mapsuffix}")
344
761
  if validvoxels is None:
345
- print(f"savemaplist: saving {mapsuffix} to {destshape}")
762
+ print(f"savemaplist: saving {mapsuffix} of shape {themap.shape} to {destshape}")
346
763
  else:
347
764
  print(
348
- f"savemaplist: saving {mapsuffix} to {destshape} from {np.shape(validvoxels)[0]} valid voxels"
765
+ f"savemaplist: saving {mapsuffix} of shape {themap.shape} to {destshape} from {np.shape(validvoxels)[0]} valid voxels"
349
766
  )
350
767
  outmaparray = populatemap(
351
768
  themap,
@@ -396,40 +813,66 @@ def savemaplist(
396
813
 
397
814
 
398
815
  def savetocifti(
399
- thearray,
400
- theciftiheader,
401
- theniftiheader,
402
- thename,
403
- isseries=False,
404
- names=["placeholder"],
405
- start=0.0,
406
- step=1.0,
407
- debug=False,
408
- ):
409
- r"""Save a data array out to a cifti
816
+ thearray: NDArray,
817
+ theciftiheader: Any,
818
+ theniftiheader: Any,
819
+ thename: str,
820
+ isseries: bool = False,
821
+ names: List[str] = ["placeholder"],
822
+ start: float = 0.0,
823
+ step: float = 1.0,
824
+ debug: bool = False,
825
+ ) -> None:
826
+ """
827
+ Save a data array out to a CIFTI file.
828
+
829
+ This function saves a given data array to a CIFTI file (either dense or parcellated,
830
+ scalar or series) based on the provided headers and parameters.
410
831
 
411
832
  Parameters
412
833
  ----------
413
834
  thearray : array-like
414
- The data array to save.
835
+ The data array to be saved. The shape is expected to be (n_timepoints, n_vertices)
836
+ or (n_vertices,) for scalar data.
415
837
  theciftiheader : cifti header
416
- A valid cifti header
838
+ A valid CIFTI header object containing axis information, including BrainModelAxis
839
+ or ParcelsAxis.
417
840
  theniftiheader : nifti header
418
- A valid nifti header
841
+ A valid NIfTI header object to be used for setting the intent of the output file.
419
842
  thename : str
420
- The name of the cifti file to save
421
- isseries: bool
422
- True if output is a dtseries, False if dtscalar
423
- start: float
424
- starttime in seconds
425
- step: float
426
- timestep in seconds
427
- debug: bool
428
- Print extended debugging information
843
+ The base name of the output CIFTI file (without extension).
844
+ isseries : bool, optional
845
+ If True, the output will be a time series file (dtseries or ptseries).
846
+ If False, it will be a scalar file (dscalar or pscalar). Default is False.
847
+ names : list of str, optional
848
+ Names for scalar maps when `isseries` is False. Default is ['placeholder'].
849
+ start : float, optional
850
+ Start time in seconds for the time series. Default is 0.0.
851
+ step : float, optional
852
+ Time step in seconds for the time series. Default is 1.0.
853
+ debug : bool, optional
854
+ If True, print debugging information. Default is False.
429
855
 
430
856
  Returns
431
857
  -------
432
-
858
+ None
859
+ This function does not return anything; it saves the file to disk.
860
+
861
+ Notes
862
+ -----
863
+ The function automatically detects whether the input CIFTI header contains a
864
+ BrainModelAxis or a ParcelsAxis and builds the appropriate output structure.
865
+ The correct CIFTI file extension (e.g., .dtseries.nii, .dscalar.nii) is appended
866
+ to the output filename based on the `isseries` and parcellation flags.
867
+
868
+ Examples
869
+ --------
870
+ >>> import numpy as np
871
+ >>> import nibabel as nib
872
+ >>> data = np.random.rand(100, 50)
873
+ >>> cifti_header = nib.load('input.cifti').header
874
+ >>> nifti_header = nib.load('input.nii').header
875
+ >>> savetocifti(data, cifti_header, nifti_header, 'output', isseries=True)
433
876
  """
434
877
  if debug:
435
878
  print("savetocifti:", thename)
@@ -525,19 +968,38 @@ def savetocifti(
525
968
  nib.cifti2.save(img, thename + suffix)
526
969
 
527
970
 
528
- def checkifnifti(filename):
529
- r"""Check to see if a file name is a valid nifti name.
971
+ def checkifnifti(filename: str) -> bool:
972
+ """
973
+ Check to see if a file name is a valid nifti name.
974
+
975
+ This function determines whether a given filename has a valid NIfTI file extension.
976
+ NIfTI files typically have extensions ".nii" or ".nii.gz" for compressed files.
530
977
 
531
978
  Parameters
532
979
  ----------
533
980
  filename : str
534
- The file name
981
+ The file name to check for valid NIfTI extension.
535
982
 
536
983
  Returns
537
984
  -------
538
- isnifti : bool
539
- True if name is a valid nifti file name.
540
-
985
+ bool
986
+ True if the filename ends with ".nii" or ".nii.gz", False otherwise.
987
+
988
+ Notes
989
+ -----
990
+ This function only checks the file extension and does not verify if the file actually exists
991
+ or contains valid NIfTI data. It performs a simple string matching operation.
992
+
993
+ Examples
994
+ --------
995
+ >>> checkifnifti("image.nii")
996
+ True
997
+ >>> checkifnifti("data.nii.gz")
998
+ True
999
+ >>> checkifnifti("scan.json")
1000
+ False
1001
+ >>> checkifnifti("volume.nii.gz")
1002
+ True
541
1003
  """
542
1004
  if filename.endswith(".nii") or filename.endswith(".nii.gz"):
543
1005
  return True
@@ -545,22 +1007,44 @@ def checkifnifti(filename):
545
1007
  return False
546
1008
 
547
1009
 
548
- def niftisplitext(filename):
549
- r"""Split nifti filename into name base and extensionn.
1010
+ def niftisplitext(filename: str) -> Tuple[str, str]:
1011
+ """
1012
+ Split nifti filename into name base and extension.
1013
+
1014
+ This function splits a NIfTI filename into its base name and extension components.
1015
+ It handles NIfTI files that may have double extensions (e.g., '.nii.gz') by properly
1016
+ combining the extensions.
550
1017
 
551
1018
  Parameters
552
1019
  ----------
553
1020
  filename : str
554
- The file name
1021
+ The NIfTI file name to split, which may contain double extensions like '.nii.gz'
555
1022
 
556
1023
  Returns
557
1024
  -------
558
- name : str
559
- Base name of the nifti file.
560
-
561
- ext : str
562
- Extension of the nifti file.
563
-
1025
+ tuple[str, str]
1026
+ A tuple containing:
1027
+ - name : str
1028
+ Base name of the NIfTI file (without extension)
1029
+ - ext : str
1030
+ Extension of the NIfTI file (including any additional extensions)
1031
+
1032
+ Notes
1033
+ -----
1034
+ This function is specifically designed for NIfTI files which commonly have
1035
+ double extensions (e.g., '.nii.gz', '.nii.bz2'). It properly handles these
1036
+ cases by combining the two extension components.
1037
+
1038
+ Examples
1039
+ --------
1040
+ >>> niftisplitext('image.nii.gz')
1041
+ ('image', '.nii.gz')
1042
+
1043
+ >>> niftisplitext('data.nii')
1044
+ ('data', '.nii')
1045
+
1046
+ >>> niftisplitext('volume.nii.bz2')
1047
+ ('volume', '.nii.bz2')
564
1048
  """
565
1049
  firstsplit = os.path.splitext(filename)
566
1050
  secondsplit = os.path.splitext(firstsplit[0])
@@ -570,7 +1054,47 @@ def niftisplitext(filename):
570
1054
  return firstsplit[0], firstsplit[1]
571
1055
 
572
1056
 
573
- def niftisplit(inputfile, outputroot, axis=3):
1057
+ def niftisplit(inputfile: str, outputroot: str, axis: int = 3) -> None:
1058
+ """
1059
+ Split a NIFTI file along a specified axis into separate files.
1060
+
1061
+ This function splits a NIFTI image along a given axis into multiple
1062
+ individual NIFTI files, each corresponding to a slice along that axis.
1063
+ The output files are named using the provided root name with zero-padded
1064
+ slice indices.
1065
+
1066
+ Parameters
1067
+ ----------
1068
+ inputfile : str
1069
+ Path to the input NIFTI file to be split.
1070
+ outputroot : str
1071
+ Base name for the output files. Each output file will be named
1072
+ ``outputroot + str(i).zfill(4)`` where ``i`` is the slice index.
1073
+ axis : int, optional
1074
+ Axis along which to split the NIFTI file. Valid values are 0-4,
1075
+ corresponding to the dimensions of the NIFTI file. Default is 3,
1076
+ which corresponds to the time axis in 4D or 5D NIFTI files.
1077
+
1078
+ Returns
1079
+ -------
1080
+ None
1081
+ This function does not return any value. It writes the split slices
1082
+ as separate NIFTI files to disk.
1083
+
1084
+ Notes
1085
+ -----
1086
+ - The function supports both 4D and 5D NIFTI files.
1087
+ - The header information is preserved for each output slice, with the
1088
+ dimension along the split axis set to 1.
1089
+ - Slice indices in the output file names are zero-padded to four digits
1090
+ (e.g., ``0000``, ``0001``, etc.).
1091
+
1092
+ Examples
1093
+ --------
1094
+ >>> niftisplit('input.nii.gz', 'slice_', axis=2)
1095
+ Splits the input NIFTI file along the third axis (axis=2) and saves
1096
+ the resulting slices as ``slice_0000.nii.gz``, ``slice_0001.nii.gz``, etc.
1097
+ """
574
1098
  infile, infile_data, infile_hdr, infiledims, infilesizes = readfromnifti(inputfile)
575
1099
  theheader = copy.deepcopy(infile_hdr)
576
1100
  numpoints = infiledims[axis + 1]
@@ -604,7 +1128,60 @@ def niftisplit(inputfile, outputroot, axis=3):
604
1128
  savetonifti(thisslice, theheader, outputroot + str(i).zfill(4))
605
1129
 
606
1130
 
607
- def niftimerge(inputlist, outputname, writetodisk=True, axis=3, returndata=False, debug=False):
1131
+ def niftimerge(
1132
+ inputlist: List[str],
1133
+ outputname: str,
1134
+ writetodisk: bool = True,
1135
+ axis: int = 3,
1136
+ returndata: bool = False,
1137
+ debug: bool = False,
1138
+ ) -> Optional[Tuple[NDArray, Any]]:
1139
+ """
1140
+ Merge multiple NIFTI files along a specified axis.
1141
+
1142
+ This function reads a list of NIFTI files, concatenates their data along a
1143
+ specified axis, and optionally writes the result to a new NIFTI file. It can
1144
+ also return the merged data and header for further processing.
1145
+
1146
+ Parameters
1147
+ ----------
1148
+ inputlist : list of str
1149
+ List of input NIFTI file paths to merge.
1150
+ outputname : str
1151
+ Path for the merged output NIFTI file.
1152
+ writetodisk : bool, optional
1153
+ If True, write the merged data to disk. Default is True.
1154
+ axis : int, optional
1155
+ Axis along which to concatenate the data (0-4). Default is 3, which
1156
+ corresponds to the time axis. The dimension of the output along this
1157
+ axis will be the number of input files.
1158
+ returndata : bool, optional
1159
+ If True, return the merged data array and header. Default is False.
1160
+ debug : bool, optional
1161
+ If True, print debug information during execution. Default is False.
1162
+
1163
+ Returns
1164
+ -------
1165
+ tuple of (NDArray, Any) or None
1166
+ If `returndata` is True, returns a tuple of:
1167
+ - `output_data`: The merged NIFTI data as a numpy array.
1168
+ - `infile_hdr`: The header from the last input file.
1169
+ If `returndata` is False, returns None.
1170
+
1171
+ Notes
1172
+ -----
1173
+ - The function assumes all input files have compatible dimensions except
1174
+ along the concatenation axis.
1175
+ - If the input file has 3D dimensions, it is reshaped to 4D before concatenation.
1176
+ - The output NIFTI header is updated to reflect the new dimension along the
1177
+ concatenation axis.
1178
+
1179
+ Examples
1180
+ --------
1181
+ >>> input_files = ['file1.nii', 'file2.nii', 'file3.nii']
1182
+ >>> niftimerge(input_files, 'merged.nii', axis=3, writetodisk=True)
1183
+ >>> data, header = niftimerge(input_files, 'merged.nii', returndata=True)
1184
+ """
608
1185
  inputdata = []
609
1186
  for thefile in inputlist:
610
1187
  if debug:
@@ -625,7 +1202,40 @@ def niftimerge(inputlist, outputname, writetodisk=True, axis=3, returndata=False
625
1202
  return output_data, infile_hdr
626
1203
 
627
1204
 
628
- def niftiroi(inputfile, outputfile, startpt, numpoints):
1205
+ def niftiroi(inputfile: str, outputfile: str, startpt: int, numpoints: int) -> None:
1206
+ """
1207
+ Extract a region of interest (ROI) from a NIFTI file along the time axis.
1208
+
1209
+ This function extracts a specified number of timepoints from a NIFTI file starting
1210
+ at a given timepoint index. The extracted data is saved to a new NIFTI file.
1211
+
1212
+ Parameters
1213
+ ----------
1214
+ inputfile : str
1215
+ Path to the input NIFTI file
1216
+ outputfile : str
1217
+ Path for the output ROI file
1218
+ startpt : int
1219
+ Starting timepoint index (0-based)
1220
+ numpoints : int
1221
+ Number of timepoints to extract
1222
+
1223
+ Returns
1224
+ -------
1225
+ None
1226
+ This function does not return any value but saves the extracted ROI to the specified output file.
1227
+
1228
+ Notes
1229
+ -----
1230
+ The function handles both 4D and 5D NIFTI files. For 5D files, the function preserves
1231
+ the fifth dimension in the output. The time dimension is reduced according to the
1232
+ specified number of points.
1233
+
1234
+ Examples
1235
+ --------
1236
+ >>> niftiroi('input.nii', 'output.nii', 10, 50)
1237
+ Extracts timepoints 10-59 from input.nii and saves to output.nii
1238
+ """
629
1239
  print(inputfile, outputfile, startpt, numpoints)
630
1240
  infile, infile_data, infile_hdr, infiledims, infilesizes = readfromnifti(inputfile)
631
1241
  theheader = copy.deepcopy(infile_hdr)
@@ -637,19 +1247,41 @@ def niftiroi(inputfile, outputfile, startpt, numpoints):
637
1247
  savetonifti(output_data, theheader, outputfile)
638
1248
 
639
1249
 
640
- def checkifcifti(filename, debug=False):
641
- r"""Check to see if the specified file is CIFTI format
1250
+ def checkifcifti(filename: str, debug: bool = False) -> bool:
1251
+ """
1252
+ Check to see if the specified file is CIFTI format
1253
+
1254
+ This function determines whether a given neuroimaging file is in CIFTI (Connectivity Information Format)
1255
+ by examining the file's header information. CIFTI files have specific intent codes that distinguish them
1256
+ from other neuroimaging formats like NIFTI.
642
1257
 
643
1258
  Parameters
644
1259
  ----------
645
1260
  filename : str
646
- The file name
1261
+ The path to the file to be checked for CIFTI format
1262
+ debug : bool, optional
1263
+ Enable debug output to see intermediate processing information. Default is False
647
1264
 
648
1265
  Returns
649
1266
  -------
650
- iscifti : bool
651
- True if the file header indicates this is a CIFTI file
652
-
1267
+ bool
1268
+ True if the file header indicates this is a CIFTI file (intent code between 3000 and 3099),
1269
+ False otherwise
1270
+
1271
+ Notes
1272
+ -----
1273
+ CIFTI files are identified by their intent code, which should be in the range [3000, 3100) for valid
1274
+ CIFTI format files. This function uses nibabel to load the file and examine its NIfTI header properties.
1275
+
1276
+ Examples
1277
+ --------
1278
+ >>> is_cifti = checkifcifti('my_data.nii.gz')
1279
+ >>> print(is_cifti)
1280
+ True
1281
+
1282
+ >>> is_cifti = checkifcifti('my_data.nii.gz', debug=True)
1283
+ >>> print(is_cifti)
1284
+ True
653
1285
  """
654
1286
  theimg = nib.load(filename)
655
1287
  thedict = vars(theimg)
@@ -666,19 +1298,36 @@ def checkifcifti(filename, debug=False):
666
1298
  return False
667
1299
 
668
1300
 
669
- def checkiftext(filename):
670
- r"""Check to see if the specified filename ends in '.txt'
1301
+ def checkiftext(filename: str) -> bool:
1302
+ """
1303
+ Check to see if the specified filename ends in '.txt'
1304
+
1305
+ This function determines whether a given filename has a '.txt' extension
1306
+ by checking if the string ends with the specified suffix.
671
1307
 
672
1308
  Parameters
673
1309
  ----------
674
1310
  filename : str
675
- The file name
1311
+ The file name to check for '.txt' extension
676
1312
 
677
1313
  Returns
678
1314
  -------
679
- istext : bool
680
- True if filename ends with '.txt'
681
-
1315
+ bool
1316
+ True if filename ends with '.txt', False otherwise
1317
+
1318
+ Notes
1319
+ -----
1320
+ This function performs a case-sensitive check. For case-insensitive
1321
+ checking, convert the filename to lowercase before calling this function.
1322
+
1323
+ Examples
1324
+ --------
1325
+ >>> checkiftext("document.txt")
1326
+ True
1327
+ >>> checkiftext("image.jpg")
1328
+ False
1329
+ >>> checkiftext("notes.TXT")
1330
+ False
682
1331
  """
683
1332
  if filename.endswith(".txt"):
684
1333
  return True
@@ -686,19 +1335,41 @@ def checkiftext(filename):
686
1335
  return False
687
1336
 
688
1337
 
689
- def getniftiroot(filename):
690
- r"""Strip a nifti filename down to the root with no extensions
1338
+ def getniftiroot(filename: str) -> str:
1339
+ """
1340
+ Strip a nifti filename down to the root with no extensions.
1341
+
1342
+ This function removes NIfTI file extensions (.nii or .nii.gz) from a filename,
1343
+ returning only the root name without any extensions.
691
1344
 
692
1345
  Parameters
693
1346
  ----------
694
1347
  filename : str
695
- The file name to strip
1348
+ The NIfTI filename to strip of extensions
696
1349
 
697
1350
  Returns
698
1351
  -------
699
- strippedname : str
700
- The file name without any nifti extensions
1352
+ str
1353
+ The filename without NIfTI extensions (.nii or .nii.gz)
1354
+
1355
+ Notes
1356
+ -----
1357
+ This function only removes the standard NIfTI extensions (.nii and .nii.gz).
1358
+ For filenames without these extensions, the original filename is returned unchanged.
1359
+
1360
+ Examples
1361
+ --------
1362
+ >>> getniftiroot("sub-01_task-rest_bold.nii")
1363
+ 'sub-01_task-rest_bold'
701
1364
 
1365
+ >>> getniftiroot("anatomical.nii.gz")
1366
+ 'anatomical'
1367
+
1368
+ >>> getniftiroot("image.nii.gz")
1369
+ 'image'
1370
+
1371
+ >>> getniftiroot("data.txt")
1372
+ 'data.txt'
702
1373
  """
703
1374
  if filename.endswith(".nii"):
704
1375
  return filename[:-4]
@@ -708,21 +1379,39 @@ def getniftiroot(filename):
708
1379
  return filename
709
1380
 
710
1381
 
711
- def fmriheaderinfo(niftifilename):
712
- r"""Retrieve the header information from a nifti file
1382
+ def fmriheaderinfo(niftifilename: str) -> Tuple[NDArray, NDArray]:
1383
+ """
1384
+ Retrieve the header information from a nifti file.
1385
+
1386
+ This function extracts repetition time and timepoints information from a NIfTI file header.
1387
+ The repetition time is returned in seconds, and the number of timepoints is extracted
1388
+ from the header dimensions.
713
1389
 
714
1390
  Parameters
715
1391
  ----------
716
1392
  niftifilename : str
717
- The name of the nifti file
1393
+ The name of the NIfTI file to read header information from.
718
1394
 
719
1395
  Returns
720
1396
  -------
721
- tr : float
722
- The repetition time, in seconds
723
- timepoints : int
724
- The number of points along the time axis
725
-
1397
+ tuple of (NDArray, NDArray)
1398
+ A tuple containing:
1399
+ - tr : float
1400
+ The repetition time, in seconds
1401
+ - timepoints : int
1402
+ The number of points along the time axis
1403
+
1404
+ Notes
1405
+ -----
1406
+ The function uses nibabel to load the NIfTI file and extracts header information
1407
+ from the 'dim' and 'pixdim' fields. If the time unit is specified as milliseconds,
1408
+ the repetition time is converted to seconds.
1409
+
1410
+ Examples
1411
+ --------
1412
+ >>> tr, timepoints = fmriheaderinfo('subject_01.nii.gz')
1413
+ >>> print(f"Repetition time: {tr} seconds")
1414
+ >>> print(f"Number of timepoints: {timepoints}")
726
1415
  """
727
1416
  nim = nib.load(niftifilename)
728
1417
  hdr = nim.header.copy()
@@ -733,8 +1422,9 @@ def fmriheaderinfo(niftifilename):
733
1422
  return thesizes, thedims
734
1423
 
735
1424
 
736
- def fmritimeinfo(niftifilename):
737
- r"""Retrieve the repetition time and number of timepoints from a nifti file
1425
+ def fmritimeinfo(niftifilename: str) -> Tuple[float, int]:
1426
+ """
1427
+ Retrieve the repetition time and number of timepoints from a nifti file
738
1428
 
739
1429
  Parameters
740
1430
  ----------
@@ -748,6 +1438,18 @@ def fmritimeinfo(niftifilename):
748
1438
  timepoints : int
749
1439
  The number of points along the time axis
750
1440
 
1441
+ Notes
1442
+ -----
1443
+ This function extracts the repetition time (TR) and number of timepoints from
1444
+ the NIfTI file header. The repetition time is extracted from the pixdim[4] field
1445
+ and converted to seconds if necessary. The number of timepoints is extracted
1446
+ from the dim[4] field.
1447
+
1448
+ Examples
1449
+ --------
1450
+ >>> tr, timepoints = fmritimeinfo('sub-01_task-rest_bold.nii.gz')
1451
+ >>> print(f"Repetition time: {tr}s, Timepoints: {timepoints}")
1452
+ Repetition time: 2.0s, Timepoints: 240
751
1453
  """
752
1454
  nim = nib.load(niftifilename)
753
1455
  hdr = nim.header.copy()
@@ -761,8 +1463,9 @@ def fmritimeinfo(niftifilename):
761
1463
  return tr, timepoints
762
1464
 
763
1465
 
764
- def checkspacematch(hdr1, hdr2, tolerance=1.0e-3):
765
- r"""Check the headers of two nifti files to determine if the cover the same volume at the same resolution (within tolerance)
1466
+ def checkspacematch(hdr1: Any, hdr2: Any, tolerance: float = 1.0e-3) -> bool:
1467
+ """
1468
+ Check the headers of two nifti files to determine if they cover the same volume at the same resolution (within tolerance)
766
1469
 
767
1470
  Parameters
768
1471
  ----------
@@ -770,35 +1473,74 @@ def checkspacematch(hdr1, hdr2, tolerance=1.0e-3):
770
1473
  The header of the first file
771
1474
  hdr2 : nifti header structure
772
1475
  The header of the second file
1476
+ tolerance : float, optional
1477
+ Tolerance for comparison. Default is 1.0e-3
773
1478
 
774
1479
  Returns
775
1480
  -------
776
- ismatched : bool
1481
+ bool
777
1482
  True if the spatial dimensions and resolutions of the two files match.
778
1483
 
1484
+ Notes
1485
+ -----
1486
+ This function performs two checks:
1487
+ 1. Dimension matching using `checkspaceresmatch` on pixel dimensions (`pixdim`)
1488
+ 2. Spatial dimension matching using `checkspacedimmatch` on array dimensions (`dim`)
1489
+
1490
+ Examples
1491
+ --------
1492
+ >>> import nibabel as nib
1493
+ >>> img1 = nib.load('file1.nii.gz')
1494
+ >>> img2 = nib.load('file2.nii.gz')
1495
+ >>> checkspacematch(img1.header, img2.header)
1496
+ True
779
1497
  """
780
1498
  dimmatch = checkspaceresmatch(hdr1["pixdim"], hdr2["pixdim"], tolerance=tolerance)
781
1499
  resmatch = checkspacedimmatch(hdr1["dim"], hdr2["dim"])
782
1500
  return dimmatch and resmatch
783
1501
 
784
1502
 
785
- def checkspaceresmatch(sizes1, sizes2, tolerance=1.0e-3):
786
- r"""Check the spatial pixdims of two nifti files to determine if they have the same resolution (within tolerance)
1503
+ def checkspaceresmatch(sizes1: NDArray, sizes2: NDArray, tolerance: float = 1.0e-3) -> bool:
1504
+ """
1505
+ Check the spatial pixdims of two nifti files to determine if they have the same resolution (within tolerance)
787
1506
 
788
1507
  Parameters
789
1508
  ----------
790
- sizes1 : float array
791
- The size array from the first nifti file
792
- sizes2 : float array
793
- The size array from the second nifti file
794
- tolerance: float
795
- The fractional difference that is permissible between the two sizes that will still match
1509
+ sizes1 : array_like
1510
+ The size array from the first nifti file, typically containing spatial dimensions and pixel sizes
1511
+ sizes2 : array_like
1512
+ The size array from the second nifti file, typically containing spatial dimensions and pixel sizes
1513
+ tolerance : float, optional
1514
+ The fractional difference that is permissible between the two sizes that will still match,
1515
+ default is 1.0e-3 (0.1%)
796
1516
 
797
1517
  Returns
798
1518
  -------
799
- ismatched : bool
800
- True if the spatial resolutions of the two files match.
801
-
1519
+ bool
1520
+ True if the spatial resolutions of the two files match within the specified tolerance,
1521
+ False otherwise
1522
+
1523
+ Notes
1524
+ -----
1525
+ This function compares the spatial dimensions (indices 1-3) of two nifti file size arrays.
1526
+ The comparison is performed using fractional difference: |sizes1[i] - sizes2[i]| / sizes1[i].
1527
+ Only dimensions 1-3 are compared (typically x, y, z spatial dimensions).
1528
+ The function returns False immediately upon finding any dimension that exceeds the tolerance.
1529
+
1530
+ Examples
1531
+ --------
1532
+ >>> import numpy as np
1533
+ >>> sizes1 = np.array([1.0, 2.0, 2.0, 2.0])
1534
+ >>> sizes2 = np.array([1.0, 2.0005, 2.0005, 2.0005])
1535
+ >>> checkspaceresmatch(sizes1, sizes2, tolerance=1e-3)
1536
+ True
1537
+
1538
+ >>> sizes1 = np.array([1.0, 2.0, 2.0, 2.0])
1539
+ >>> sizes2 = np.array([1.0, 2.5, 2.5, 2.5])
1540
+ >>> checkspaceresmatch(sizes1, sizes2, tolerance=1e-3)
1541
+ File spatial resolutions do not match within tolerance of 0.001
1542
+ size of dimension 1: 2.0 != 2.5 (0.25 difference)
1543
+ False
802
1544
  """
803
1545
  for i in range(1, 4):
804
1546
  fracdiff = np.fabs(sizes1[i] - sizes2[i]) / sizes1[i]
@@ -810,20 +1552,47 @@ def checkspaceresmatch(sizes1, sizes2, tolerance=1.0e-3):
810
1552
  return True
811
1553
 
812
1554
 
813
- def checkspacedimmatch(dims1, dims2, verbose=False):
814
- r"""Check the dimension arrays of two nifti files to determine if the cover the same number of voxels in each dimension
1555
+ def checkspacedimmatch(dims1: NDArray, dims2: NDArray, verbose: bool = False) -> bool:
1556
+ """
1557
+ Check the dimension arrays of two nifti files to determine if they cover the same number of voxels in each dimension.
815
1558
 
816
1559
  Parameters
817
1560
  ----------
818
- dims1 : int array
819
- The dimension array from the first nifti file
820
- dims2 : int array
821
- The dimension array from the second nifti file
1561
+ dims1 : NDArray
1562
+ The dimension array from the first nifti file. Should contain spatial dimensions
1563
+ (typically the first dimension is the number of time points, and dimensions 1-3
1564
+ represent x, y, z spatial dimensions).
1565
+ dims2 : NDArray
1566
+ The dimension array from the second nifti file. Should contain spatial dimensions
1567
+ (typically the first dimension is the number of time points, and dimensions 1-3
1568
+ represent x, y, z spatial dimensions).
1569
+ verbose : bool, optional
1570
+ Enable verbose output. Default is False. When True, prints detailed information
1571
+ about dimension mismatches.
822
1572
 
823
1573
  Returns
824
1574
  -------
825
- ismatched : bool
826
- True if the spatial dimensions of the two files match.
1575
+ bool
1576
+ True if the spatial dimensions (dimensions 1-3) of the two files match.
1577
+ False if any of the spatial dimensions differ between the files.
1578
+
1579
+ Notes
1580
+ -----
1581
+ This function compares dimensions 1 through 3 (inclusive) of the two dimension arrays,
1582
+ which typically represent the spatial dimensions (x, y, z) of the nifti files.
1583
+ The first dimension is usually the number of time points and is not compared.
1584
+
1585
+ Examples
1586
+ --------
1587
+ >>> import numpy as np
1588
+ >>> dims1 = np.array([10, 64, 64, 32])
1589
+ >>> dims2 = np.array([10, 64, 64, 32])
1590
+ >>> checkspacedimmatch(dims1, dims2)
1591
+ True
1592
+
1593
+ >>> dims3 = np.array([10, 64, 64, 33])
1594
+ >>> checkspacedimmatch(dims1, dims3)
1595
+ False
827
1596
  """
828
1597
  for i in range(1, 4):
829
1598
  if dims1[i] != dims2[i]:
@@ -835,25 +1604,54 @@ def checkspacedimmatch(dims1, dims2, verbose=False):
835
1604
  return True
836
1605
 
837
1606
 
838
- def checktimematch(dims1, dims2, numskip1=0, numskip2=0, verbose=False):
839
- r"""Check the dimensions of two nifti files to determine if the cover the same number of timepoints
1607
+ def checktimematch(
1608
+ dims1: NDArray,
1609
+ dims2: NDArray,
1610
+ numskip1: int = 0,
1611
+ numskip2: int = 0,
1612
+ verbose: bool = False,
1613
+ ) -> bool:
1614
+ """
1615
+ Check the dimensions of two nifti files to determine if they cover the same number of timepoints.
1616
+
1617
+ This function compares the time dimensions of two NIfTI files after accounting for skipped timepoints
1618
+ at the beginning of each file. It is commonly used to verify temporal consistency between paired
1619
+ NIfTI datasets.
840
1620
 
841
1621
  Parameters
842
1622
  ----------
843
- dims1 : int array
844
- The dimension array from the first nifti file
845
- dims2 : int array
846
- The dimension array from the second nifti file
1623
+ dims1 : NDArray
1624
+ The dimension array from the first NIfTI file. The time dimension is expected to be at index 4.
1625
+ dims2 : NDArray
1626
+ The dimension array from the second NIfTI file. The time dimension is expected to be at index 4.
847
1627
  numskip1 : int, optional
848
- Number of timepoints skipped at the beginning of file 1
1628
+ Number of timepoints skipped at the beginning of file 1. Default is 0.
849
1629
  numskip2 : int, optional
850
- Number of timepoints skipped at the beginning of file 2
1630
+ Number of timepoints skipped at the beginning of file 2. Default is 0.
1631
+ verbose : bool, optional
1632
+ Enable verbose output. If True, prints detailed information about the comparison.
1633
+ Default is False.
851
1634
 
852
1635
  Returns
853
1636
  -------
854
- ismatched : bool
855
- True if the time dimension of the two files match.
856
-
1637
+ bool
1638
+ True if the effective time dimensions of the two files match after accounting for skipped
1639
+ timepoints, False otherwise.
1640
+
1641
+ Notes
1642
+ -----
1643
+ The function assumes that the time dimension is stored at index 4 of the dimension arrays.
1644
+ This is typical for NIfTI files where dimensions are ordered as [x, y, z, t, ...].
1645
+
1646
+ Examples
1647
+ --------
1648
+ >>> import numpy as np
1649
+ >>> dims1 = np.array([64, 64, 32, 1, 100, 1])
1650
+ >>> dims2 = np.array([64, 64, 32, 1, 95, 1])
1651
+ >>> checktimematch(dims1, dims2, numskip1=0, numskip2=5)
1652
+ True
1653
+ >>> checktimematch(dims1, dims2, numskip1=0, numskip2=3)
1654
+ False
857
1655
  """
858
1656
  if (dims1[4] - numskip1) != (dims2[4] - numskip2):
859
1657
  if verbose:
@@ -876,7 +1674,56 @@ def checktimematch(dims1, dims2, numskip1=0, numskip2=0, verbose=False):
876
1674
  return True
877
1675
 
878
1676
 
879
- def checkdatamatch(data1, data2, absthresh=1e-12, msethresh=1e-12, debug=False):
1677
+ def checkdatamatch(
1678
+ data1: NDArray,
1679
+ data2: NDArray,
1680
+ absthresh: float = 1e-12,
1681
+ msethresh: float = 1e-12,
1682
+ debug: bool = False,
1683
+ ) -> Tuple[bool, bool]:
1684
+ """
1685
+ Check if two data arrays match within specified tolerances.
1686
+
1687
+ This function compares two numpy arrays using both mean squared error (MSE) and
1688
+ maximum absolute difference metrics to determine if they match within given thresholds.
1689
+
1690
+ Parameters
1691
+ ----------
1692
+ data1 : NDArray
1693
+ First data array to compare
1694
+ data2 : NDArray
1695
+ Second data array to compare
1696
+ absthresh : float, optional
1697
+ Absolute difference threshold. Default is 1e-12
1698
+ msethresh : float, optional
1699
+ Mean squared error threshold. Default is 1e-12
1700
+ debug : bool, optional
1701
+ Enable debug output. Default is False
1702
+
1703
+ Returns
1704
+ -------
1705
+ tuple of (bool, bool)
1706
+ msematch : bool
1707
+ True if mean squared error is below msethresh threshold
1708
+ absmatch : bool
1709
+ True if maximum absolute difference is below absthresh threshold
1710
+
1711
+ Notes
1712
+ -----
1713
+ The function uses numpy's `mse` function for mean squared error calculation
1714
+ and `np.max(np.fabs(data1 - data2))` for maximum absolute difference.
1715
+
1716
+ Examples
1717
+ --------
1718
+ >>> import numpy as np
1719
+ >>> data1 = np.array([1.0, 2.0, 3.0])
1720
+ >>> data2 = np.array([1.000000000001, 2.000000000001, 3.000000000001])
1721
+ >>> checkdatamatch(data1, data2)
1722
+ (True, True)
1723
+
1724
+ >>> checkdatamatch(data1, data2, absthresh=1e-15)
1725
+ (True, False)
1726
+ """
880
1727
  msediff = mse(data1, data2)
881
1728
  absdiff = np.max(np.fabs(data1 - data2))
882
1729
  if debug:
@@ -885,8 +1732,66 @@ def checkdatamatch(data1, data2, absthresh=1e-12, msethresh=1e-12, debug=False):
885
1732
 
886
1733
 
887
1734
  def checkniftifilematch(
888
- filename1, filename2, absthresh=1e-12, msethresh=1e-12, spacetolerance=1e-3, debug=False
889
- ):
1735
+ filename1: str,
1736
+ filename2: str,
1737
+ absthresh: float = 1e-12,
1738
+ msethresh: float = 1e-12,
1739
+ spacetolerance: float = 1e-3,
1740
+ debug: bool = False,
1741
+ ) -> bool:
1742
+ """
1743
+ Check if two NIFTI files match in dimensions, resolution, and data values.
1744
+
1745
+ This function compares two NIFTI files for spatial compatibility and data
1746
+ equivalence. It verifies that the files have matching spatial dimensions,
1747
+ resolution, time dimensions, and that their voxel data values are within
1748
+ specified tolerances.
1749
+
1750
+ Parameters
1751
+ ----------
1752
+ filename1 : str
1753
+ Path to the first NIFTI file to be compared.
1754
+ filename2 : str
1755
+ Path to the second NIFTI file to be compared.
1756
+ absthresh : float, optional
1757
+ Absolute difference threshold for voxel-wise data comparison.
1758
+ If any voxel differs by more than this value, the files are considered
1759
+ not to match. Default is 1e-12.
1760
+ msethresh : float, optional
1761
+ Mean squared error threshold for data comparison. If the MSE between
1762
+ the data arrays exceeds this value, the files are considered not to match.
1763
+ Default is 1e-12.
1764
+ spacetolerance : float, optional
1765
+ Tolerance for comparing spatial dimensions and resolution between files.
1766
+ Default is 1e-3.
1767
+ debug : bool, optional
1768
+ If True, enables debug output to assist in troubleshooting.
1769
+ Default is False.
1770
+
1771
+ Returns
1772
+ -------
1773
+ bool
1774
+ True if all checks (spatial, temporal, and data) pass within the specified
1775
+ tolerances; False otherwise.
1776
+
1777
+ Notes
1778
+ -----
1779
+ The function internally calls several helper functions:
1780
+ - `readfromnifti`: Reads NIFTI file metadata and data.
1781
+ - `checkspacematch`: Compares spatial dimensions and resolution.
1782
+ - `checktimematch`: Compares time dimensions.
1783
+ - `checkdatamatch`: Compares data values using MSE and absolute difference.
1784
+
1785
+ Examples
1786
+ --------
1787
+ >>> match = checkniftifilematch('file1.nii', 'file2.nii')
1788
+ >>> print(match)
1789
+ True
1790
+
1791
+ >>> match = checkniftifilematch('file1.nii', 'file2.nii', absthresh=1e-10)
1792
+ >>> print(match)
1793
+ False
1794
+ """
890
1795
  im1, im1_data, im1_hdr, im1_dims, im1_sizes = readfromnifti(filename1)
891
1796
  im2, im2_data, im2_hdr, im2_dims, im2_sizes = readfromnifti(filename2)
892
1797
  spacematch = checkspacematch(im1_hdr, im2_hdr, tolerance=spacetolerance)
@@ -916,19 +1821,36 @@ def checkniftifilematch(
916
1821
 
917
1822
 
918
1823
  # --------------------------- non-NIFTI file I/O functions ------------------------------------------
919
- def checkifparfile(filename):
920
- r"""Checks to see if a file is an FSL style motion parameter file
1824
+ def checkifparfile(filename: str) -> bool:
1825
+ """
1826
+ Checks to see if a file is an FSL style motion parameter file
1827
+
1828
+ This function determines whether a given filename corresponds to an FSL-style
1829
+ motion parameter file by checking if it ends with the '.par' extension.
921
1830
 
922
1831
  Parameters
923
1832
  ----------
924
1833
  filename : str
925
- The name of the file in question.
1834
+ The name of the file in question, including the file extension.
926
1835
 
927
1836
  Returns
928
1837
  -------
929
- isparfile : bool
930
- True if filename ends in '.par', False otherwise.
931
-
1838
+ bool
1839
+ True if the filename ends with '.par', False otherwise.
1840
+
1841
+ Notes
1842
+ -----
1843
+ FSL (FMRIB Software Library) motion parameter files typically have the '.par'
1844
+ extension and contain motion correction parameters for neuroimaging data.
1845
+
1846
+ Examples
1847
+ --------
1848
+ >>> checkifparfile("subject1.par")
1849
+ True
1850
+ >>> checkifparfile("subject1.txt")
1851
+ False
1852
+ >>> checkifparfile("motion.par")
1853
+ True
932
1854
  """
933
1855
  if filename.endswith(".par"):
934
1856
  return True
@@ -936,7 +1858,42 @@ def checkifparfile(filename):
936
1858
  return False
937
1859
 
938
1860
 
939
- def readconfounds(filename, debug=False):
1861
+ def readconfounds(filename: str, debug: bool = False) -> Dict[str, NDArray]:
1862
+ """
1863
+ Read confound regressors from a text file.
1864
+
1865
+ This function reads confound regressors from a text file and returns them as a dictionary
1866
+ mapping confound names to timecourse arrays. The function handles both structured column
1867
+ names and automatically generated names for cases where column information is missing.
1868
+
1869
+ Parameters
1870
+ ----------
1871
+ filename : str
1872
+ Path to the confounds file
1873
+ debug : bool, optional
1874
+ Enable debug output. Default is False
1875
+
1876
+ Returns
1877
+ -------
1878
+ dict of str to NDArray
1879
+ Dictionary mapping confound names to timecourse arrays. Each key is a confound name
1880
+ and each value is a 1D numpy array containing the timecourse data for that confound.
1881
+
1882
+ Notes
1883
+ -----
1884
+ The function internally calls `readvectorsfromtextfile` to parse the input file, which
1885
+ returns metadata including sample rate, start time, column names, and the actual data.
1886
+ If column names are not present in the file, automatically generated names are created
1887
+ in the format 'confound_000', 'confound_001', etc.
1888
+
1889
+ Examples
1890
+ --------
1891
+ >>> confounds = readconfounds('confounds.txt')
1892
+ >>> print(confounds.keys())
1893
+ dict_keys(['motion_000', 'motion_001', 'motion_002', 'scrubbing'])
1894
+ >>> print(confounds['motion_000'].shape)
1895
+ (1000,)
1896
+ """
940
1897
  (
941
1898
  thesamplerate,
942
1899
  thestarttime,
@@ -955,19 +1912,46 @@ def readconfounds(filename, debug=False):
955
1912
  return theconfounddict
956
1913
 
957
1914
 
958
- def readparfile(filename):
959
- r"""Checks to see if a file is an FSL style motion parameter file
1915
+ def readparfile(filename: str) -> Dict[str, NDArray]:
1916
+ """
1917
+ Read motion parameters from an FSL-style .par file.
1918
+
1919
+ This function reads motion parameters from FSL-style .par files and returns
1920
+ them as a dictionary with timecourses keyed by parameter names.
960
1921
 
961
1922
  Parameters
962
1923
  ----------
963
1924
  filename : str
964
- The name of the file in question.
1925
+ The name of the FSL-style .par file to read. This file should contain
1926
+ motion parameters in the standard FSL format with 6 columns representing
1927
+ translation (X, Y, Z) and rotation (RotX, RotY, RotZ) parameters.
965
1928
 
966
1929
  Returns
967
1930
  -------
968
- motiondict: dict
969
- All the timecourses in the file, keyed by name
970
-
1931
+ dict of NDArray
1932
+ Dictionary containing the motion parameters as timecourses. Keys are:
1933
+ - 'X': translation along x-axis
1934
+ - 'Y': translation along y-axis
1935
+ - 'Z': translation along z-axis
1936
+ - 'RotX': rotation around x-axis
1937
+ - 'RotY': rotation around y-axis
1938
+ - 'RotZ': rotation around z-axis
1939
+ Each value is a 1D numpy array containing the timecourse for that parameter.
1940
+
1941
+ Notes
1942
+ -----
1943
+ The .par file format expected by this function is the standard FSL format
1944
+ where each row represents a timepoint and each column represents a motion
1945
+ parameter. The function assumes the file contains exactly 6 columns in the
1946
+ order: X, Y, Z, RotX, RotY, RotZ.
1947
+
1948
+ Examples
1949
+ --------
1950
+ >>> motion_data = readparfile('motion.par')
1951
+ >>> print(motion_data.keys())
1952
+ dict_keys(['X', 'Y', 'Z', 'RotX', 'RotY', 'RotZ'])
1953
+ >>> print(motion_data['X'].shape)
1954
+ (100,) # assuming 100 timepoints
971
1955
  """
972
1956
  labels = ["X", "Y", "Z", "RotX", "RotY", "RotZ"]
973
1957
  motiontimeseries = readvecs(filename)
@@ -977,8 +1961,9 @@ def readparfile(filename):
977
1961
  return motiondict
978
1962
 
979
1963
 
980
- def readmotion(filename, tr=1.0, colspec=None):
981
- r"""Reads motion regressors from filename (from the columns specified in colspec, if given)
1964
+ def readmotion(filename: str, tr: float = 1.0, colspec: Optional[str] = None) -> Dict[str, Any]:
1965
+ """
1966
+ Read motion regressors from a file (.par, .tsv, or other text format).
982
1967
 
983
1968
  Parameters
984
1969
  ----------
@@ -1118,25 +2103,41 @@ def readmotion(filename, tr=1.0, colspec=None):
1118
2103
  return motiondict
1119
2104
 
1120
2105
 
1121
- def sliceinfo(slicetimes, tr):
1122
- r"""Find out what slicetimes we have, their spacing, and which timepoint each slice occurs at. This assumes
2106
+ def sliceinfo(slicetimes: NDArray, tr: float) -> Tuple[int, float, NDArray]:
2107
+ """
2108
+ Find out what slicetimes we have, their spacing, and which timepoint each slice occurs at. This assumes
1123
2109
  uniform slice time spacing, but supports any slice acquisition order and multiband acquisitions.
1124
2110
 
1125
2111
  Parameters
1126
2112
  ----------
1127
2113
  slicetimes : 1d float array
1128
2114
  List of all the slicetimes relative to the start of the TR
1129
- tr: float
2115
+ tr : float
1130
2116
  The TR of the acquisition
1131
2117
 
1132
2118
  Returns
1133
2119
  -------
1134
2120
  numsteps : int
1135
2121
  The number of unique slicetimes in the list
1136
- stepsize: float
2122
+ stepsize : float
1137
2123
  The stepsize in seconds between subsequent slice acquisitions
1138
- sliceoffsets: 1d int array
2124
+ sliceoffsets : 1d int array
1139
2125
  Which acquisition time each slice was acquired at
2126
+
2127
+ Notes
2128
+ -----
2129
+ This function assumes uniform slice time spacing and works with any slice acquisition order
2130
+ and multiband acquisitions. The function determines the minimum time step between slices
2131
+ and maps each slice to its corresponding timepoint within the TR.
2132
+
2133
+ Examples
2134
+ --------
2135
+ >>> import numpy as np
2136
+ >>> slicetimes = np.array([0.0, 0.1, 0.2, 0.3])
2137
+ >>> tr = 1.0
2138
+ >>> numsteps, stepsize, sliceoffsets = sliceinfo(slicetimes, tr)
2139
+ >>> print(numsteps, stepsize, sliceoffsets)
2140
+ (4, 0.1, [0 1 2 3])
1140
2141
  """
1141
2142
  sortedtimes = np.sort(slicetimes)
1142
2143
  diffs = sortedtimes[1:] - sortedtimes[0:-1]
@@ -1146,7 +2147,49 @@ def sliceinfo(slicetimes, tr):
1146
2147
  return numsteps, minstep, sliceoffsets
1147
2148
 
1148
2149
 
1149
- def getslicetimesfromfile(slicetimename):
2150
+ def getslicetimesfromfile(slicetimename: str) -> Tuple[NDArray, bool, bool]:
2151
+ """
2152
+ Read slice timing information from a file.
2153
+
2154
+ This function reads slice timing data from either a JSON file (BIDS sidecar format)
2155
+ or a text file containing slice timing values. It returns the slice times along
2156
+ with metadata indicating how the data was processed.
2157
+
2158
+ Parameters
2159
+ ----------
2160
+ slicetimename : str
2161
+ Path to the slice timing file. Can be either a JSON file (BIDS sidecar format)
2162
+ or a text file containing slice timing values.
2163
+
2164
+ Returns
2165
+ -------
2166
+ tuple of (NDArray, bool, bool)
2167
+ A tuple containing:
2168
+ - slicetimes : NDArray
2169
+ Array of slice timing values as floats
2170
+ - normalizedtotr : bool
2171
+ True if the slice times were normalized to TR (time resolution),
2172
+ False if they were read directly from a JSON file
2173
+ - fileisbidsjson : bool
2174
+ True if the input file was a BIDS JSON sidecar file,
2175
+ False if it was a text file
2176
+
2177
+ Notes
2178
+ -----
2179
+ - For JSON files, the function expects a "SliceTiming" key in the JSON dictionary
2180
+ - For text files, the function uses readvec() to parse the slice timing values
2181
+ - If a JSON file doesn't contain the required "SliceTiming" key, the function
2182
+ prints an error message and exits the program
2183
+ - Slice timing values are converted to float64 dtype for precision
2184
+
2185
+ Examples
2186
+ --------
2187
+ >>> slicetimes, normalized, is_bids = getslicetimesfromfile("sub-01_task-rest_bold.json")
2188
+ >>> print(slicetimes)
2189
+ [0.0, 0.1, 0.2, 0.3, 0.4]
2190
+ >>> print(normalized, is_bids)
2191
+ (False, True)
2192
+ """
1150
2193
  filebase, extension = os.path.splitext(slicetimename)
1151
2194
  if extension == ".json":
1152
2195
  jsoninfodict = readdictfromjson(slicetimename)
@@ -1167,19 +2210,41 @@ def getslicetimesfromfile(slicetimename):
1167
2210
  return slicetimes, normalizedtotr, fileisbidsjson
1168
2211
 
1169
2212
 
1170
- def readbidssidecar(inputfilename):
1171
- r"""Read key value pairs out of a BIDS sidecar file
2213
+ def readbidssidecar(inputfilename: str) -> Dict[str, Any]:
2214
+ """
2215
+ Read key value pairs out of a BIDS sidecar file
2216
+
2217
+ This function reads JSON sidecar files commonly used in BIDS (Brain Imaging Data Structure)
2218
+ datasets and returns the key-value pairs as a dictionary.
1172
2219
 
1173
2220
  Parameters
1174
2221
  ----------
1175
2222
  inputfilename : str
1176
- The name of the sidecar file (with extension)
2223
+ The name of the sidecar file (with extension). The function will automatically
2224
+ look for a corresponding .json file with the same base name.
1177
2225
 
1178
2226
  Returns
1179
2227
  -------
1180
- thedict : dict
1181
- The key value pairs from the json file
1182
-
2228
+ dict
2229
+ A dictionary containing the key-value pairs from the JSON sidecar file.
2230
+ Returns an empty dictionary if the sidecar file does not exist.
2231
+
2232
+ Notes
2233
+ -----
2234
+ The function expects the sidecar file to have the same base name as the input file
2235
+ but with a .json extension. For example, if inputfilename is "sub-01_task-rest_bold.nii.gz",
2236
+ the function will look for "sub-01_task-rest_bold.json".
2237
+
2238
+ Examples
2239
+ --------
2240
+ >>> sidecar_data = readbidssidecar("sub-01_task-rest_bold.nii.gz")
2241
+ >>> print(sidecar_data['RepetitionTime'])
2242
+ 2.0
2243
+
2244
+ >>> sidecar_data = readbidssidecar("nonexistent_file.nii.gz")
2245
+ sidecar file does not exist
2246
+ >>> print(sidecar_data)
2247
+ {}
1183
2248
  """
1184
2249
  thefileroot, theext = os.path.splitext(inputfilename)
1185
2250
  if os.path.exists(thefileroot + ".json"):
@@ -1191,16 +2256,48 @@ def readbidssidecar(inputfilename):
1191
2256
  return {}
1192
2257
 
1193
2258
 
1194
- def writedicttojson(thedict, thefilename):
1195
- r"""Write key value pairs to a json file
2259
+ def writedicttojson(thedict: Dict[str, Any], thefilename: str) -> None:
2260
+ """
2261
+ Write key-value pairs to a JSON file with proper numpy type handling.
2262
+
2263
+ This function writes a dictionary to a JSON file, automatically converting
2264
+ numpy data types to their Python equivalents to ensure proper JSON serialization.
1196
2265
 
1197
2266
  Parameters
1198
2267
  ----------
1199
- thedict : dict
1200
- The key value pairs from the json file
2268
+ thedict : dict[str, Any]
2269
+ Dictionary containing key-value pairs to be written to JSON file
1201
2270
  thefilename : str
1202
- The name of the json file (with extension)
2271
+ Path and name of the output JSON file (including extension)
1203
2272
 
2273
+ Returns
2274
+ -------
2275
+ None
2276
+ This function does not return any value
2277
+
2278
+ Notes
2279
+ -----
2280
+ The function automatically converts numpy data types:
2281
+ - numpy.integer → Python int
2282
+ - numpy.floating → Python float
2283
+ - NDArray → Python list
2284
+
2285
+ The output JSON file will be formatted with:
2286
+ - Sorted keys
2287
+ - 4-space indentation
2288
+ - Comma-separated values without spaces
2289
+
2290
+ Examples
2291
+ --------
2292
+ >>> import numpy as np
2293
+ >>> data = {
2294
+ ... 'name': 'John',
2295
+ ... 'age': np.int32(30),
2296
+ ... 'score': np.float64(95.5),
2297
+ ... 'values': np.array([1, 2, 3, 4])
2298
+ ... }
2299
+ >>> writedicttojson(data, 'output.json')
2300
+ >>> # Creates output.json with properly formatted data
1204
2301
  """
1205
2302
  thisdict = {}
1206
2303
  for key in thedict:
@@ -1218,19 +2315,41 @@ def writedicttojson(thedict, thefilename):
1218
2315
  )
1219
2316
 
1220
2317
 
1221
- def readdictfromjson(inputfilename):
1222
- r"""Read key value pairs out of a json file
2318
+ def readdictfromjson(inputfilename: str) -> Dict[str, Any]:
2319
+ """
2320
+ Read key value pairs out of a json file.
2321
+
2322
+ This function reads a JSON file and returns its contents as a dictionary.
2323
+ The function automatically appends the ".json" extension to the input filename
2324
+ if it's not already present.
1223
2325
 
1224
2326
  Parameters
1225
2327
  ----------
1226
2328
  inputfilename : str
1227
- The name of the json file (with extension)
2329
+ The name of the json file (with or without extension). If the extension
2330
+ is not provided, ".json" will be appended automatically.
1228
2331
 
1229
2332
  Returns
1230
2333
  -------
1231
- thedict : dict
1232
- The key value pairs from the json file
1233
-
2334
+ dict[str, Any]
2335
+ A dictionary containing the key-value pairs from the JSON file. Returns
2336
+ an empty dictionary if the specified file does not exist.
2337
+
2338
+ Notes
2339
+ -----
2340
+ - The function checks for the existence of the file before attempting to read it
2341
+ - If the input filename doesn't have a ".json" extension, it will be automatically added
2342
+ - If the file doesn't exist, a message will be printed and an empty dictionary returned
2343
+
2344
+ Examples
2345
+ --------
2346
+ >>> data = readdictfromjson("config")
2347
+ >>> print(data)
2348
+ {'key1': 'value1', 'key2': 'value2'}
2349
+
2350
+ >>> data = readdictfromjson("data.json")
2351
+ >>> print(data)
2352
+ {'name': 'John', 'age': 30}
1234
2353
  """
1235
2354
  thefileroot, theext = os.path.splitext(inputfilename)
1236
2355
  if os.path.exists(thefileroot + ".json"):
@@ -1242,21 +2361,43 @@ def readdictfromjson(inputfilename):
1242
2361
  return {}
1243
2362
 
1244
2363
 
1245
- def readlabelledtsv(inputfilename, compressed=False):
1246
- r"""Read time series out of an fmriprep confounds tsv file
2364
+ def readlabelledtsv(inputfilename: str, compressed: bool = False) -> Dict[str, NDArray]:
2365
+ """
2366
+ Read time series out of an fmriprep confounds tsv file
1247
2367
 
1248
2368
  Parameters
1249
2369
  ----------
1250
2370
  inputfilename : str
1251
- The root name of the tsv (no extension)
2371
+ The root name of the tsv file (without extension)
2372
+ compressed : bool, optional
2373
+ If True, reads from a gzipped tsv file (.tsv.gz), otherwise reads from
2374
+ a regular tsv file (.tsv). Default is False.
1252
2375
 
1253
2376
  Returns
1254
2377
  -------
1255
- confounddict: dict
1256
- All the timecourses in the file, keyed by the first row
1257
-
1258
- NOTE: If file does not exist or is not valid, return an empty dictionary
1259
-
2378
+ dict of str to NDArray
2379
+ Dictionary containing all the timecourses in the file, keyed by the
2380
+ column names from the first row of the tsv file. Each value is a
2381
+ numpy array containing the time series data for that column.
2382
+
2383
+ Raises
2384
+ ------
2385
+ FileNotFoundError
2386
+ If the specified tsv file (with appropriate extension) does not exist.
2387
+
2388
+ Notes
2389
+ -----
2390
+ - NaN values in the input file are replaced with 0.0
2391
+ - If the file does not exist or is not valid, an empty dictionary is returned
2392
+ - The function supports both compressed (.tsv.gz) and uncompressed (.tsv) files
2393
+
2394
+ Examples
2395
+ --------
2396
+ >>> confounds = readlabelledtsv("sub-01_task-rest_bold_confounds")
2397
+ >>> print(confounds.keys())
2398
+ dict_keys(['trans_x', 'trans_y', 'trans_z', 'rot_x', 'rot_y', 'rot_z'])
2399
+ >>> print(confounds['trans_x'].shape)
2400
+ (100,)
1260
2401
  """
1261
2402
  confounddict = {}
1262
2403
  if compressed:
@@ -1277,22 +2418,49 @@ def readlabelledtsv(inputfilename, compressed=False):
1277
2418
  return confounddict
1278
2419
 
1279
2420
 
1280
- def readcsv(inputfilename, debug=False):
1281
- r"""Read time series out of an unlabelled csv file
2421
+ def readcsv(inputfilename: str, debug: bool = False) -> Dict[str, NDArray]:
2422
+ """
2423
+ Read time series out of an unlabelled csv file.
2424
+
2425
+ This function reads a CSV file and returns a dictionary of time series,
2426
+ where keys are column names (or generated names if no header is present)
2427
+ and values are NumPy arrays of the corresponding time series data.
1282
2428
 
1283
2429
  Parameters
1284
2430
  ----------
1285
2431
  inputfilename : str
1286
- The root name of the csv (no extension)
2432
+ The root name of the CSV file (without the '.csv' extension).
2433
+ debug : bool, optional
2434
+ If True, prints debug information about whether a header line is detected,
2435
+ by default False.
1287
2436
 
1288
2437
  Returns
1289
2438
  -------
1290
- timeseriesdict: dict
1291
- All the timecourses in the file, keyed by the first row if it exists, by "col1, col2...colN"
1292
- if not.
1293
-
1294
- NOTE: If file does not exist or is not valid, return an empty dictionary
1295
-
2439
+ dict of str to NDArray
2440
+ A dictionary where keys are column names (or generated names like "col0", "col1", etc.)
2441
+ and values are NumPy arrays containing the time series data. If the file does not exist
2442
+ or is invalid, an empty dictionary is returned.
2443
+
2444
+ Notes
2445
+ -----
2446
+ - If the first column of the CSV contains non-numeric values, it is assumed to be a header.
2447
+ - If the first column is numeric, it is treated as part of the data, and columns are
2448
+ named "col0", "col1", etc.
2449
+ - NaN values in the CSV are replaced with 0.0.
2450
+ - If the file does not exist or cannot be read, a FileNotFoundError is raised.
2451
+
2452
+ Examples
2453
+ --------
2454
+ >>> data = readcsv("timeseries_data")
2455
+ >>> print(data.keys())
2456
+ ['col0', 'col1', 'col2']
2457
+ >>> print(data['col0'])
2458
+ [1.0, 2.0, 3.0, 4.0]
2459
+
2460
+ >>> data = readcsv("labeled_data", debug=True)
2461
+ there is a header line
2462
+ >>> print(data.keys())
2463
+ ['time', 'signal1', 'signal2']
1296
2464
  """
1297
2465
  if not os.path.isfile(inputfilename + ".csv"):
1298
2466
  raise FileNotFoundError(f"csv file {inputfilename}.csv does not exist")
@@ -1331,22 +2499,43 @@ def readcsv(inputfilename, debug=False):
1331
2499
  return timeseriesdict
1332
2500
 
1333
2501
 
1334
- def readfslmat(inputfilename, debug=False):
1335
- r"""Read time series out of an FSL design.mat file
2502
+ def readfslmat(inputfilename: str, debug: bool = False) -> Dict[str, NDArray]:
2503
+ """
2504
+ Read time series out of an FSL design.mat file
1336
2505
 
1337
2506
  Parameters
1338
2507
  ----------
1339
2508
  inputfilename : str
1340
- The root name of the csv (no extension)
2509
+ The root name of the .mat file (no extension)
2510
+ debug : bool, optional
2511
+ If True, print the DataFrame contents for debugging purposes. Default is False
1341
2512
 
1342
2513
  Returns
1343
2514
  -------
1344
- timeseriesdict: dict
1345
- All the timecourses in the file, keyed by the first row if it exists, by "col1, col2...colN"
1346
- if not.
1347
-
1348
- NOTE: If file does not exist or is not valid, return an empty dictionary
1349
-
2515
+ dict of NDArray
2516
+ Dictionary containing all the timecourses in the file, keyed by column names.
2517
+ If the first row exists, it is used as keys; otherwise, keys are generated as
2518
+ "col1, col2...colN". Returns an empty dictionary if file does not exist or is not valid.
2519
+
2520
+ Raises
2521
+ ------
2522
+ FileNotFoundError
2523
+ If the specified FSL mat file does not exist
2524
+
2525
+ Notes
2526
+ -----
2527
+ This function reads FSL design.mat files and extracts time series data. The function
2528
+ skips the first 5 rows of the file (assumed to be header information) and treats
2529
+ subsequent rows as time series data. The column names are generated using the
2530
+ `makecolname` helper function.
2531
+
2532
+ Examples
2533
+ --------
2534
+ >>> timeseries = readfslmat("design")
2535
+ >>> print(timeseries.keys())
2536
+ dict_keys(['col0', 'col1', 'col2'])
2537
+ >>> print(timeseries['col0'])
2538
+ [0.1, 0.2, 0.3, 0.4]
1350
2539
  """
1351
2540
  if not os.path.isfile(inputfilename + ".mat"):
1352
2541
  raise FileNotFoundError(f"FSL mat file {inputfilename}.mat does not exist")
@@ -1366,7 +2555,51 @@ def readfslmat(inputfilename, debug=False):
1366
2555
  return timeseriesdict
1367
2556
 
1368
2557
 
1369
- def readoptionsfile(inputfileroot):
2558
+ def readoptionsfile(inputfileroot: str) -> Dict[str, Any]:
2559
+ """
2560
+ Read a run options from a JSON or TXT configuration file.
2561
+
2562
+ This function attempts to read rapidtide run options from a file with the given root name,
2563
+ checking for `.json` and `.txt` extensions in that order. If neither file exists,
2564
+ a `FileNotFoundError` is raised. The function also handles backward compatibility
2565
+ for older options files by filling in default filter limits based on the `filtertype`.
2566
+
2567
+ Parameters
2568
+ ----------
2569
+ inputfileroot : str
2570
+ The base name of the options file (without extension). The function will
2571
+ first look for `inputfileroot.json`, then `inputfileroot.txt`.
2572
+
2573
+ Returns
2574
+ -------
2575
+ Dict[str, Any]
2576
+ A dictionary containing the run options. The dictionary includes keys such as
2577
+ `filtertype`, `lowerstop`, `lowerpass`, `upperpass`, and `upperstop`, depending
2578
+ on the file content and filter type.
2579
+
2580
+ Raises
2581
+ ------
2582
+ FileNotFoundError
2583
+ If neither `inputfileroot.json` nor `inputfileroot.txt` exists.
2584
+
2585
+ Notes
2586
+ -----
2587
+ For backward compatibility, older options files without `lowerpass` key are updated
2588
+ with default values based on the `filtertype`:
2589
+
2590
+ - "None": All limits set to 0.0 or -1.0
2591
+ - "vlf": 0.0, 0.0, 0.009, 0.010
2592
+ - "lfo": 0.009, 0.010, 0.15, 0.20
2593
+ - "resp": 0.15, 0.20, 0.4, 0.5
2594
+ - "card": 0.4, 0.5, 2.5, 3.0
2595
+ - "arb": Uses values from `arb_lowerstop`, `arb_lower`, `arb_upper`, `arb_upperstop`
2596
+
2597
+ Examples
2598
+ --------
2599
+ >>> options = readoptionsfile("myfilter")
2600
+ >>> print(options["filtertype"])
2601
+ 'vlf'
2602
+ """
1370
2603
  if os.path.isfile(inputfileroot + ".json"):
1371
2604
  # options saved as json
1372
2605
  thedict = readdictfromjson(inputfileroot + ".json")
@@ -1420,51 +2653,138 @@ def readoptionsfile(inputfileroot):
1420
2653
  return thedict
1421
2654
 
1422
2655
 
1423
- def makecolname(colnum, startcol):
2656
+ def makecolname(colnum: int, startcol: int) -> str:
2657
+ """
2658
+ Generate a column name in the format 'col_##' where ## is a zero-padded number.
2659
+
2660
+ This function creates standardized column names by adding a starting offset to
2661
+ a column number and formatting it with zero-padding to ensure consistent
2662
+ two-digit representation.
2663
+
2664
+ Parameters
2665
+ ----------
2666
+ colnum : int
2667
+ The base column number to be used in the name generation.
2668
+ startcol : int
2669
+ The starting column offset to be added to colnum.
2670
+
2671
+ Returns
2672
+ -------
2673
+ str
2674
+ A column name in the format 'col_##' where ## represents the zero-padded
2675
+ sum of colnum and startcol.
2676
+
2677
+ Notes
2678
+ -----
2679
+ The resulting number is zero-padded to always have at least two digits.
2680
+ For example, if colnum=5 and startcol=10, the result will be 'col_15'.
2681
+ If colnum=1 and startcol=2, the result will be 'col_03'.
2682
+
2683
+ Examples
2684
+ --------
2685
+ >>> makecolname(0, 0)
2686
+ 'col_00'
2687
+
2688
+ >>> makecolname(5, 10)
2689
+ 'col_15'
2690
+
2691
+ >>> makecolname(1, 2)
2692
+ 'col_03'
2693
+ """
1424
2694
  return f"col_{str(colnum + startcol).zfill(2)}"
1425
2695
 
1426
2696
 
1427
2697
  def writebidstsv(
1428
- outputfileroot,
1429
- data,
1430
- samplerate,
1431
- extraheaderinfo=None,
1432
- compressed=True,
1433
- columns=None,
1434
- xaxislabel="time",
1435
- yaxislabel="arbitrary value",
1436
- starttime=0.0,
1437
- append=False,
1438
- samplerate_tolerance=1e-6,
1439
- starttime_tolerance=1e-6,
1440
- colsinjson=True,
1441
- colsintsv=False,
1442
- omitjson=False,
1443
- debug=False,
1444
- ):
1445
- """
1446
- NB: to be strictly valid, a continuous BIDS tsv file (i.e. a "_physio" or "_stim" file) requires:
1447
- 1) The .tsv is compressed (.tsv.gz)
1448
- 2) "SamplingFrequency", "StartTime", "Columns" must exist and be in the .json file
1449
- 3) The tsv file does NOT have column headers.
1450
- 4) "_physio" or "_stim" has to be at the end of the name, although this seems a little flexible
1451
-
1452
- The first 3 are the defaults, but if you really want to override them, you can.
1453
-
1454
- :param outputfileroot:
1455
- :param data:
1456
- :param samplerate:
1457
- :param compressed:
1458
- :param columns:
1459
- :param xaxislabel:
1460
- :param yaxislabel:
1461
- :param starttime:
1462
- :param append:
1463
- :param colsinjson:
1464
- :param colsintsv:
1465
- :param omitjson:
1466
- :param debug:
1467
- :return:
2698
+ outputfileroot: str,
2699
+ data: NDArray,
2700
+ samplerate: float,
2701
+ extraheaderinfo: Optional[Dict[str, Any]] = None,
2702
+ compressed: bool = True,
2703
+ columns: Optional[List[str]] = None,
2704
+ xaxislabel: str = "time",
2705
+ yaxislabel: str = "arbitrary value",
2706
+ starttime: float = 0.0,
2707
+ append: bool = False,
2708
+ samplerate_tolerance: float = 1e-6,
2709
+ starttime_tolerance: float = 1e-6,
2710
+ colsinjson: bool = True,
2711
+ colsintsv: bool = False,
2712
+ omitjson: bool = False,
2713
+ debug: bool = False,
2714
+ ) -> None:
2715
+ """
2716
+ Write physiological or stimulation data to a BIDS-compatible TSV file with optional JSON sidecar.
2717
+
2718
+ This function writes time series data to a TSV file following BIDS conventions for physiological
2719
+ (``_physio``) and stimulation (``_stim``) data. It supports optional compression, appending to
2720
+ existing files, and includes metadata in a corresponding JSON file.
2721
+
2722
+ Parameters
2723
+ ----------
2724
+ outputfileroot : str
2725
+ Root name of the output files (without extension). The function will write
2726
+ ``<outputfileroot>.tsv`` or ``<outputfileroot>.tsv.gz`` and ``<outputfileroot>.json``.
2727
+ data : NDArray
2728
+ Time series data to be written. If 1D, it will be reshaped to (1, n_timesteps).
2729
+ Shape should be (n_channels, n_timesteps).
2730
+ samplerate : float
2731
+ Sampling frequency of the data in Hz.
2732
+ extraheaderinfo : dict, optional
2733
+ Additional key-value pairs to include in the JSON sidecar file.
2734
+ compressed : bool, default=True
2735
+ If True, compress the TSV file using gzip (.tsv.gz). If False, write uncompressed (.tsv).
2736
+ columns : list of str, optional
2737
+ Column names for the TSV file. If None, default names are generated using
2738
+ ``makecolname``.
2739
+ xaxislabel : str, default="time"
2740
+ Label for the x-axis in the JSON sidecar.
2741
+ yaxislabel : str, default="arbitrary value"
2742
+ Label for the y-axis in the JSON sidecar.
2743
+ starttime : float, default=0.0
2744
+ Start time of the recording in seconds.
2745
+ append : bool, default=False
2746
+ If True, append data to an existing file. The function checks compatibility of
2747
+ sampling rate, start time, and number of columns.
2748
+ samplerate_tolerance : float, default=1e-6
2749
+ Tolerance for comparing sampling rates when appending data.
2750
+ starttime_tolerance : float, default=1e-6
2751
+ Tolerance for comparing start times when appending data.
2752
+ colsinjson : bool, default=True
2753
+ If True, include the column names in the JSON file under the "Columns" key.
2754
+ colsintsv : bool, default=False
2755
+ If True, write column headers in the TSV file. BIDS convention requires no headers.
2756
+ omitjson : bool, default=False
2757
+ If True, do not write the JSON sidecar file.
2758
+ debug : bool, default=False
2759
+ If True, print debug information during execution.
2760
+
2761
+ Returns
2762
+ -------
2763
+ None
2764
+ This function does not return any value.
2765
+
2766
+ Notes
2767
+ -----
2768
+ - BIDS-compliant TSV files require:
2769
+ 1. Compression (.tsv.gz)
2770
+ 2. Presence of "SamplingFrequency", "StartTime", and "Columns" in the JSON file
2771
+ 3. No column headers in the TSV file
2772
+ 4. File name ending in "_physio" or "_stim"
2773
+ - If ``append=True``, the function will validate compatibility of sampling rate, start time,
2774
+ and number of columns with the existing file.
2775
+
2776
+ Examples
2777
+ --------
2778
+ >>> import numpy as np
2779
+ >>> data = np.random.rand(2, 1000)
2780
+ >>> writebidstsv("sub-01_task-rest_physio", data, samplerate=100.0)
2781
+ >>> # Writes:
2782
+ >>> # sub-01_task-rest_physio.tsv.gz
2783
+ >>> # sub-01_task-rest_physio.json
2784
+
2785
+ See Also
2786
+ --------
2787
+ readbidstsv : Read BIDS physiological or stimulation data from TSV and JSON files.
1468
2788
  """
1469
2789
  if debug:
1470
2790
  print("entering writebidstsv:")
@@ -1485,7 +2805,9 @@ def writebidstsv(
1485
2805
  reshapeddata = data
1486
2806
  if append:
1487
2807
  insamplerate, instarttime, incolumns, indata, incompressed, incolsource = readbidstsv(
1488
- outputfileroot + ".json", neednotexist=True, debug=debug,
2808
+ outputfileroot + ".json",
2809
+ neednotexist=True,
2810
+ debug=debug,
1489
2811
  )
1490
2812
  if debug:
1491
2813
  print("appending")
@@ -1571,36 +2893,58 @@ def writebidstsv(
1571
2893
  )
1572
2894
 
1573
2895
 
1574
- def readvectorsfromtextfile(fullfilespec, onecol=False, debug=False):
1575
- r"""Read one or more time series from some sort of text file
2896
+ def readvectorsfromtextfile(
2897
+ fullfilespec: str, onecol: bool = False, debug: bool = False
2898
+ ) -> Tuple[Optional[float], Optional[float], Optional[List[str]], NDArray, Optional[bool], str]:
2899
+ """
2900
+ Read time series data from a text-based file (TSV, CSV, MAT, or BIDS-style TSV).
2901
+
2902
+ This function reads timecourse data from various file formats, including plain TSV,
2903
+ gzipped TSV (.tsv.gz), CSV, and BIDS-style continuous data files (.tsv with associated .json).
2904
+ It automatically detects the file type and parses the data accordingly.
1576
2905
 
1577
2906
  Parameters
1578
2907
  ----------
1579
2908
  fullfilespec : str
1580
- The file name. If extension is .tsv or .json, it will be assumed to be either a BIDS tsv, or failing that,
1581
- a non-BIDS tsv. If the extension is .csv, it will be assumed to be a csv file. If the extension is .mat,
1582
- it will be assumed to be an FSL design.mat file. If any other extension or
1583
- no extension, it will be assumed to be a plain, whitespace separated text file.
1584
- colspec: A valid list and/or range of column numbers, or list of column names, or None
1585
- debug : bool
1586
- Output additional debugging information
2909
+ Path to the input file. May include a column specification (e.g., ``"file.tsv[0:5]"``).
2910
+ colspec : str, optional
2911
+ Column specification for selecting specific columns. For TSV/CSV files, this can be a
2912
+ comma-separated list of column names or integer indices. For BIDS-style TSV files, it
2913
+ should be a comma-separated list of column names.
2914
+ onecol : bool, optional
2915
+ If True, returns only the first column of data. Default is False.
2916
+ debug : bool, optional
2917
+ If True, prints additional debugging information. Default is False.
1587
2918
 
1588
2919
  Returns
1589
2920
  -------
1590
- samplerate : float
1591
- Sample rate in Hz. None if not knowable.
1592
- starttime : float
1593
- Time of first point, in seconds. None if not knowable.
1594
- columns : str array
1595
- Names of the timecourses contained in the file. None if not knowable.
1596
- data : 2D numpy array
1597
- Timecourses from the file
1598
- compressed: bool
1599
- True if time data is gzipped (as in a .tsv.gz file).
1600
- filetype: str
1601
- One of "text", "csv", "plaintsv", "bidscontinuous".
1602
-
1603
- NOTE: If file does not exist or is not valid, all return values are None"""
2921
+ samplerate : float
2922
+ Sample rate in Hz. None if not knowable.
2923
+ starttime : float
2924
+ Time of first point, in seconds. None if not knowable.
2925
+ columns : str array
2926
+ Names of the timecourses contained in the file. None if not knowable.
2927
+ data : 2D numpy array
2928
+ Timecourses from the file.
2929
+ compressed : bool
2930
+ True if time data is gzipped (as in a .tsv.gz file).
2931
+ filetype : str
2932
+ One of "text", "csv", "plaintsv", "bidscontinuous".
2933
+
2934
+ Notes
2935
+ -----
2936
+ - If the file does not exist or is not valid, all return values are None.
2937
+ - For BIDS-style TSV files, the associated .json sidecar file is used to determine
2938
+ sample rate and start time.
2939
+ - For plain TSV files, column names are read from the header row.
2940
+ - If ``onecol`` is True, only the first column is returned.
2941
+
2942
+ Examples
2943
+ --------
2944
+ >>> samplerate, starttime, columns, data, compressed, filetype = readvectorsfromtextfile("data.tsv")
2945
+ >>> samplerate, starttime, columns, data, compressed, filetype = readvectorsfromtextfile("data.tsv[0:3]")
2946
+ >>> samplerate, starttime, columns, data, compressed, filetype = readvectorsfromtextfile("data.tsv", onecol=True)
2947
+ """
1604
2948
 
1605
2949
  thefilename, colspec = parsefilespec(fullfilespec)
1606
2950
  thefileroot, theext = os.path.splitext(thefilename)
@@ -1732,31 +3076,76 @@ def readvectorsfromtextfile(fullfilespec, onecol=False, debug=False):
1732
3076
  return thesamplerate, thestarttime, thecolumns, thedata, compressed, filetype
1733
3077
 
1734
3078
 
1735
- def readbidstsv(inputfilename, colspec=None, warn=True, neednotexist=False, debug=False):
1736
- r"""Read time series out of a BIDS tsv file
3079
+ def readbidstsv(
3080
+ inputfilename: str,
3081
+ colspec: Optional[str] = None,
3082
+ warn: bool = True,
3083
+ neednotexist: bool = False,
3084
+ debug: bool = False,
3085
+ ) -> Tuple[
3086
+ Optional[float],
3087
+ Optional[float],
3088
+ Optional[List[str]],
3089
+ Optional[NDArray],
3090
+ Optional[bool],
3091
+ Optional[str],
3092
+ ]:
3093
+ """
3094
+ Read BIDS-compatible TSV data file with associated JSON metadata.
3095
+
3096
+ This function reads a TSV file (optionally gzipped) and its corresponding JSON
3097
+ metadata file to extract timecourse data, sample rate, start time, and column names.
3098
+ It supports both compressed (.tsv.gz) and uncompressed (.tsv) TSV files.
1737
3099
 
1738
3100
  Parameters
1739
3101
  ----------
1740
3102
  inputfilename : str
1741
- The root name of the tsv and accompanying json file (no extension)
1742
- colspec: list
1743
- A comma separated list of column names to return
1744
- debug : bool
1745
- Output additional debugging information
3103
+ The root name of the TSV and accompanying JSON file (without extension).
3104
+ colspec : str, optional
3105
+ A comma-separated list of column names to return. If None, all columns are returned.
3106
+ debug : bool, optional
3107
+ If True, print additional debugging information. Default is False.
3108
+ warn : bool, optional
3109
+ If True, print warnings for missing metadata fields. Default is True.
3110
+ neednotexist : bool, optional
3111
+ If True, return None values instead of raising an exception if files do not exist.
3112
+ Default is False.
1746
3113
 
1747
3114
  Returns
1748
3115
  -------
3116
+ tuple of (samplerate, starttime, columns, data, is_compressed, columnsource)
1749
3117
  samplerate : float
1750
- Sample rate in Hz
3118
+ Sample rate in Hz.
1751
3119
  starttime : float
1752
- Time of first point, in seconds
1753
- columns : str array
1754
- Names of the timecourses contained in the file
1755
- data : 2D numpy array
1756
- Timecourses from the file
1757
-
1758
- NOTE: If file does not exist or is not valid, all return values are None
1759
-
3120
+ Time of first point in seconds.
3121
+ columns : list of str
3122
+ Names of the timecourses contained in the file.
3123
+ data : NDArray, optional
3124
+ 2D array of timecourses from the file. Returns None if file does not exist or is invalid.
3125
+ is_compressed : bool
3126
+ Indicates whether the TSV file was gzipped.
3127
+ columnsource : str
3128
+ Source of column names: either 'json' or 'tsv'.
3129
+
3130
+ Notes
3131
+ -----
3132
+ - If the TSV file does not exist or is not valid, all return values are None.
3133
+ - If the JSON metadata file is missing required fields (SamplingFrequency, StartTime, Columns),
3134
+ default values are used and warnings are issued if `warn=True`.
3135
+ - The function handles both gzipped and uncompressed TSV files.
3136
+ - If a header line is found in the TSV file, it is skipped and a warning is issued.
3137
+
3138
+ Examples
3139
+ --------
3140
+ >>> samplerate, starttime, columns, data, is_compressed, source = readbidstsv('sub-01_task-rest')
3141
+ >>> print(f"Sample rate: {samplerate} Hz")
3142
+ Sample rate: 10.0 Hz
3143
+
3144
+ >>> samplerate, starttime, columns, data, is_compressed, source = readbidstsv(
3145
+ ... 'sub-01_task-rest', colspec='column1,column2'
3146
+ ... )
3147
+ >>> print(f"Selected columns: {columns}")
3148
+ Selected columns: ['column1', 'column2']
1760
3149
  """
1761
3150
  thefileroot, theext = os.path.splitext(inputfilename)
1762
3151
  if theext == ".gz":
@@ -1905,18 +3294,64 @@ def readbidstsv(inputfilename, colspec=None, warn=True, neednotexist=False, debu
1905
3294
  raise FileNotFoundError(f"file pair {thefileroot}(.json/.tsv[.gz]) does not exist")
1906
3295
 
1907
3296
 
1908
- def readcolfrombidstsv(inputfilename, columnnum=0, columnname=None, neednotexist=False, debug=False):
1909
- r"""
3297
+ def readcolfrombidstsv(
3298
+ inputfilename: str,
3299
+ columnnum: int = 0,
3300
+ columnname: Optional[str] = None,
3301
+ neednotexist: bool = False,
3302
+ debug: bool = False,
3303
+ ) -> Tuple[Optional[float], Optional[float], Optional[NDArray]]:
3304
+ """
3305
+ Read a specific column from a BIDS TSV file.
3306
+
3307
+ Extracts a single column of data from a BIDS TSV file, either by column name
3308
+ or by column index. The function handles both compressed and uncompressed files.
1910
3309
 
1911
3310
  Parameters
1912
3311
  ----------
1913
- inputfilename
1914
- columnnum
1915
- columnname
3312
+ inputfilename : str
3313
+ Path to the input BIDS TSV file (can be .tsv or .tsv.gz)
3314
+ columnname : str, optional
3315
+ Name of the column to extract. If specified, ``columnnum`` is ignored.
3316
+ Default is None.
3317
+ columnnum : int, optional
3318
+ Index of the column to extract (0-based). Ignored if ``columnname`` is specified.
3319
+ Default is 0.
3320
+ neednotexist : bool, optional
3321
+ If True, the function will not raise an error if the file does not exist.
3322
+ Default is False.
3323
+ debug : bool, optional
3324
+ Enable debug output. Default is False.
1916
3325
 
1917
3326
  Returns
1918
3327
  -------
1919
-
3328
+ tuple
3329
+ A tuple containing:
3330
+
3331
+ - samplerate : float or None
3332
+ Sampling rate extracted from the file, or None if no valid data found
3333
+ - starttime : float or None
3334
+ Start time extracted from the file, or None if no valid data found
3335
+ - data : NDArray or None
3336
+ The extracted column data as a 1D array, or None if no valid data found
3337
+
3338
+ Notes
3339
+ -----
3340
+ - If both ``columnname`` and ``columnnum`` are specified, ``columnname`` takes precedence
3341
+ - Column indices are 0-based
3342
+ - The function handles both compressed (.tsv.gz) and uncompressed (.tsv) files
3343
+ - Returns None for all values if no valid data is found
3344
+
3345
+ Examples
3346
+ --------
3347
+ >>> # Read first column by index
3348
+ >>> samplerate, starttime, data = readcolfrombidstsv('data.tsv', columnnum=0)
3349
+
3350
+ >>> # Read column by name
3351
+ >>> samplerate, starttime, data = readcolfrombidstsv('data.tsv', columnname='reaction_time')
3352
+
3353
+ >>> # Read column with debug output
3354
+ >>> samplerate, starttime, data = readcolfrombidstsv('data.tsv', columnname='rt', debug=True)
1920
3355
  """
1921
3356
  samplerate, starttime, columns, data, compressed, colsource = readbidstsv(
1922
3357
  inputfilename, neednotexist=neednotexist, debug=debug
@@ -1946,7 +3381,54 @@ def readcolfrombidstsv(inputfilename, columnnum=0, columnname=None, neednotexist
1946
3381
  return samplerate, starttime, data[columnnum, :]
1947
3382
 
1948
3383
 
1949
- def parsefilespec(filespec, debug=False):
3384
+ def parsefilespec(filespec: str, debug: bool = False) -> Tuple[str, Optional[str]]:
3385
+ """
3386
+ Parse a file specification string into filename and column specification.
3387
+
3388
+ This function splits a file specification string using ':' as the delimiter.
3389
+ On Windows platforms, it handles special cases where the second character
3390
+ is ':' (e.g., "C:file.txt") by treating the first two parts as the filename.
3391
+
3392
+ Parameters
3393
+ ----------
3394
+ filespec : str
3395
+ The file specification string to parse. Expected format is
3396
+ "filename[:column_specification]".
3397
+ debug : bool, optional
3398
+ If True, print debug information during execution. Default is False.
3399
+
3400
+ Returns
3401
+ -------
3402
+ tuple[str, str or None]
3403
+ A tuple containing:
3404
+ - thefilename : str
3405
+ The parsed filename part of the specification
3406
+ - thecolspec : str or None
3407
+ The parsed column specification, or None if not provided
3408
+
3409
+ Raises
3410
+ ------
3411
+ ValueError
3412
+ If the file specification is malformed (e.g., too many parts when
3413
+ special case handling is not applicable).
3414
+
3415
+ Notes
3416
+ -----
3417
+ On Windows systems, this function correctly handles drive letter specifications
3418
+ such as "C:file.txt" by treating the first two elements ("C:" and "file.txt")
3419
+ as the filename part.
3420
+
3421
+ Examples
3422
+ --------
3423
+ >>> parsefilespec("data.csv")
3424
+ ('data.csv', None)
3425
+
3426
+ >>> parsefilespec("data.csv:1,3,5")
3427
+ ('data.csv', '1,3,5')
3428
+
3429
+ >>> parsefilespec("C:file.txt:col1")
3430
+ ('C:file.txt', 'col1')
3431
+ """
1950
3432
  inputlist = filespec.split(":")
1951
3433
  if debug:
1952
3434
  print(f"PARSEFILESPEC: input string >>>{filespec}<<<")
@@ -1981,7 +3463,47 @@ def parsefilespec(filespec, debug=False):
1981
3463
  return thefilename, thecolspec
1982
3464
 
1983
3465
 
1984
- def unique(list1):
3466
+ def unique(list1: List[Any]) -> List[Any]:
3467
+ """
3468
+ Convert a column specification string to a list of column indices.
3469
+
3470
+ This function parses a column specification string and converts it into a list of
3471
+ zero-based column indices. The specification can include ranges (e.g., "0-5") and
3472
+ individual column numbers (e.g., "7") separated by commas.
3473
+
3474
+ Parameters
3475
+ ----------
3476
+ colspec : str or None
3477
+ Column specification string in format like "0-5,7,10-12" or predefined macro.
3478
+ If None, returns None.
3479
+ debug : bool, optional
3480
+ Enable debug output. Default is False
3481
+
3482
+ Returns
3483
+ -------
3484
+ list of int or None
3485
+ List of column indices corresponding to the specification, or None if input is None.
3486
+ Returns empty list if specification is empty or invalid.
3487
+
3488
+ Notes
3489
+ -----
3490
+ - Column indices are zero-based
3491
+ - Ranges are inclusive on both ends
3492
+ - Individual columns can be specified as single numbers
3493
+ - Multiple specifications can be combined with commas
3494
+ - Invalid ranges or columns will be skipped
3495
+
3496
+ Examples
3497
+ --------
3498
+ >>> colspectolist("0-2,5,7-9")
3499
+ [0, 1, 2, 5, 7, 8, 9]
3500
+
3501
+ >>> colspectolist("3,1-4,6")
3502
+ [3, 1, 2, 3, 4, 6]
3503
+
3504
+ >>> colspectolist(None)
3505
+ None
3506
+ """
1985
3507
  # initialize a null list
1986
3508
  unique_list = []
1987
3509
 
@@ -1993,7 +3515,57 @@ def unique(list1):
1993
3515
  return unique_list
1994
3516
 
1995
3517
 
1996
- def colspectolist(colspec, debug=False):
3518
+ def colspectolist(colspec: Optional[str], debug: bool = False) -> Optional[List[int]]:
3519
+ """
3520
+ Convert a column specification string into a sorted list of integers.
3521
+
3522
+ This function parses a column specification string that may contain
3523
+ individual integers, ranges (e.g., "1-5"), or predefined macros (e.g.,
3524
+ "APARC_GRAY"). It expands macros into their corresponding ranges and
3525
+ returns a sorted list of unique integers.
3526
+
3527
+ Parameters
3528
+ ----------
3529
+ colspec : str or None
3530
+ A column specification string. Can include:
3531
+ - Individual integers (e.g., "1", "10")
3532
+ - Ranges (e.g., "1-5")
3533
+ - Predefined macros (e.g., "APARC_GRAY")
3534
+ If None, the function prints an error and returns None.
3535
+ debug : bool, optional
3536
+ If True, enables debug output showing processing steps. Default is False.
3537
+
3538
+ Returns
3539
+ -------
3540
+ list of int or None
3541
+ A sorted list of unique integers corresponding to the column
3542
+ specification. Returns None if an error occurs during processing.
3543
+
3544
+ Notes
3545
+ -----
3546
+ Predefined macros:
3547
+ - APARC_SUBCORTGRAY: 8-13,17-20,26-28,47-56,58-60,96,97
3548
+ - APARC_CORTGRAY: 1000-1035,2000-2035
3549
+ - APARC_GRAY: 8-13,17-20,26-28,47-56,58-60,96,97,1000-1035,2000-2035
3550
+ - APARC_WHITE: 2,7,41,46,177,219,3000-3035,4000-4035,5001,5002
3551
+ - APARC_CSF: 4,5,14,15,24,31,43,44,63,72
3552
+ - APARC_ALLBUTCSF: 2,7-13,17-20,26-28,41,46-56,58-60,96,97,177,219,1000-1035,2000-2035,3000-3035,4000-4035,5001,5002
3553
+ - SSEG_GRAY: 3,8,10-13,16-18,26,42,47,49-54,58
3554
+ - SSEG_WHITE: 2,7,41,46
3555
+ - SSEG_CSF: 4,5,14,15,24,43,44
3556
+
3557
+ Examples
3558
+ --------
3559
+ >>> colspectolist("1-3,5,7-9")
3560
+ [1, 2, 3, 5, 7, 8, 9]
3561
+
3562
+ >>> colspectolist("APARC_GRAY")
3563
+ [8, 9, 10, 11, 12, 13, 17, 18, 19, 20, 26, 27, 28, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 58, 59, 60, 96, 97, 1000, 1001, ..., 2035]
3564
+
3565
+ >>> colspectolist(None)
3566
+ COLSPECTOLIST: no range specification - exiting
3567
+ None
3568
+ """
1997
3569
  if colspec is None:
1998
3570
  print("COLSPECTOLIST: no range specification - exiting")
1999
3571
  return None
@@ -2001,6 +3573,46 @@ def colspectolist(colspec, debug=False):
2001
3573
  theranges = colspec.split(",")
2002
3574
 
2003
3575
  def safeint(s):
3576
+ """
3577
+ Convert a value to integer safely, handling various input types.
3578
+
3579
+ This function attempts to convert the input value to an integer. It handles
3580
+ strings, floats, and other numeric types gracefully, with special handling
3581
+ for string representations that may contain commas or ranges.
3582
+
3583
+ Parameters
3584
+ ----------
3585
+ value : str, int, float
3586
+ The value to convert to integer. If string, may contain comma-separated
3587
+ values or range notation (e.g., "2-5", "1,3,5").
3588
+
3589
+ Returns
3590
+ -------
3591
+ int or list of int
3592
+ Integer value or list of integers if input contains multiple values
3593
+ or ranges. Returns single integer for simple numeric inputs.
3594
+
3595
+ Notes
3596
+ -----
3597
+ - For string inputs containing commas, values are split and converted
3598
+ - For string inputs containing hyphens, ranges are expanded into individual integers
3599
+ - Non-numeric strings will raise ValueError
3600
+ - Float inputs are truncated to integers
3601
+
3602
+ Examples
3603
+ --------
3604
+ >>> safeint("42")
3605
+ 42
3606
+
3607
+ >>> safeint("2,7-13,17-20")
3608
+ [2, 7, 8, 9, 10, 11, 12, 13, 17, 18, 19, 20]
3609
+
3610
+ >>> safeint(3.14)
3611
+ 3
3612
+
3613
+ >>> safeint("10-15")
3614
+ [10, 11, 12, 13, 14, 15]
3615
+ """
2004
3616
  try:
2005
3617
  int(s)
2006
3618
  return int(s)
@@ -2058,7 +3670,43 @@ def colspectolist(colspec, debug=False):
2058
3670
  return unique(sorted(collist))
2059
3671
 
2060
3672
 
2061
- def processnamespec(maskspec, spectext1, spectext2, debug=False):
3673
+ def processnamespec(
3674
+ maskspec: str, spectext1: str, spectext2: str, debug: bool = False
3675
+ ) -> Tuple[str, Optional[List[int]]]:
3676
+ """
3677
+ Parse a file specification and extract filename and column specifications.
3678
+
3679
+ This function takes a file specification string and parses it to separate the filename
3680
+ from any column specification. The column specification is converted into a list of
3681
+ column indices for further processing.
3682
+
3683
+ Parameters
3684
+ ----------
3685
+ maskspec : str
3686
+ Input file specification string containing filename and optional column specification
3687
+ debug : bool, optional
3688
+ Enable debug output. Default is False
3689
+
3690
+ Returns
3691
+ -------
3692
+ filename : str
3693
+ Parsed filename
3694
+ collist : list of int or None
3695
+ List of column indices, or None if no column spec provided
3696
+
3697
+ Notes
3698
+ -----
3699
+ The function uses `parsefilespec` to split the input string and `colspectolist` to
3700
+ convert column specifications into lists of integers.
3701
+
3702
+ Examples
3703
+ --------
3704
+ >>> processnamespec("data.txt:1,3,5")
3705
+ ('data.txt', [1, 3, 5])
3706
+
3707
+ >>> processnamespec("data.txt")
3708
+ ('data.txt', None)
3709
+ """
2062
3710
  thename, colspec = parsefilespec(maskspec)
2063
3711
  if colspec is not None:
2064
3712
  thevals = colspectolist(colspec)
@@ -2069,16 +3717,57 @@ def processnamespec(maskspec, spectext1, spectext2, debug=False):
2069
3717
  return thename, thevals
2070
3718
 
2071
3719
 
2072
- def readcolfromtextfile(inputfilespec):
2073
- r"""
3720
+ def readcolfromtextfile(inputfilespec: str) -> NDArray:
3721
+ """
3722
+ Read columns from a text file and return as numpy array.
3723
+
3724
+ This function reads data from a text file, optionally skipping header lines
3725
+ and specifying which columns to read. It supports various column specification
3726
+ formats and allows for debugging output.
2074
3727
 
2075
3728
  Parameters
2076
3729
  ----------
2077
- inputfilename
2078
- colspec
3730
+ inputfilename : str
3731
+ Path to the input text file to read.
3732
+ colspec : str, optional
3733
+ Column specification string. Can be:
3734
+ - None: read all columns
3735
+ - Comma-separated column numbers (e.g., "1,3,5")
3736
+ - Column ranges (e.g., "1-3,5-7")
3737
+ - Single column number (e.g., "3")
3738
+ numskip : int, default: 0
3739
+ Number of header lines to skip before reading data.
3740
+ debug : bool, default: False
3741
+ If True, print debug information during execution.
3742
+ thedtype : type, default: float
3743
+ Data type to convert the read data to.
2079
3744
 
2080
3745
  Returns
2081
3746
  -------
3747
+ NDArray
3748
+ Numpy array containing the read data. Shape depends on the number of
3749
+ columns specified and the number of rows in the input file.
3750
+
3751
+ Notes
3752
+ -----
3753
+ - The function uses numpy's genfromtxt internally for reading the file
3754
+ - Column indexing starts from 1 (not 0)
3755
+ - If colspec is not provided, all columns are read
3756
+ - The function handles various text file formats including space and comma delimited data
3757
+
3758
+ Examples
3759
+ --------
3760
+ >>> # Read all columns from a file
3761
+ >>> data = readvecs('data.txt')
3762
+
3763
+ >>> # Read only columns 1, 3, and 5
3764
+ >>> data = readvecs('data.txt', colspec='1,3,5')
3765
+
3766
+ >>> # Read columns 2 through 4
3767
+ >>> data = readvecs('data.txt', colspec='2-4')
3768
+
3769
+ >>> # Skip first 5 lines and read columns 1 and 3
3770
+ >>> data = readvecs('data.txt', colspec='1,3', numskip=5)
2082
3771
  """
2083
3772
  inputfilename, colspec = parsefilespec(inputfilespec)
2084
3773
  if inputfilename is None:
@@ -2093,16 +3782,53 @@ def readcolfromtextfile(inputfilespec):
2093
3782
  return inputdata[:, 0]
2094
3783
 
2095
3784
 
2096
- def readvecs(inputfilename, colspec=None, numskip=0, debug=False, thedtype=float):
2097
- r"""
3785
+ def readvecs(
3786
+ inputfilename: str,
3787
+ colspec: Optional[str] = None,
3788
+ numskip: int = 0,
3789
+ debug: bool = False,
3790
+ thedtype: type = float,
3791
+ ) -> NDArray:
3792
+ """
3793
+ Read vectors from a text file and return them as a transposed numpy array.
2098
3794
 
2099
3795
  Parameters
2100
3796
  ----------
2101
- inputfilename
3797
+ inputfilename : str
3798
+ The name of the text file to read data from.
3799
+ colspec : str, optional
3800
+ A string specifying which columns to read. If None, all columns in the first
3801
+ line are read. Default is None.
3802
+ numskip : int, optional
3803
+ Number of lines to skip at the beginning of the file. If 0, the function
3804
+ attempts to auto-detect if the first line contains headers. Default is 0.
3805
+ thedtype : type, optional
3806
+ The data type to convert the read values to. Default is float.
3807
+ debug : bool, optional
3808
+ If True, print debug information including input parameters and processing
3809
+ details. Default is False.
2102
3810
 
2103
3811
  Returns
2104
3812
  -------
2105
-
3813
+ NDArray
3814
+ A 2D numpy array where each row corresponds to a vector read from the file.
3815
+ The array is transposed such that each column represents a vector.
3816
+
3817
+ Notes
3818
+ -----
3819
+ - The function assumes that the input file contains numeric data separated by
3820
+ whitespace.
3821
+ - If `colspec` is not provided, all columns from the first line are read.
3822
+ - If `numskip` is 0, the function attempts to detect whether the first line
3823
+ contains headers by trying to convert the first element to a float.
3824
+ - The function raises a `ValueError` if any requested column index is out of
3825
+ bounds.
3826
+
3827
+ Examples
3828
+ --------
3829
+ >>> data = readvecs('data.txt')
3830
+ >>> data = readvecs('data.txt', colspec='1:3', numskip=1)
3831
+ >>> data = readvecs('data.txt', colspec='0,2,4', thedtype=int)
2106
3832
  """
2107
3833
  if debug:
2108
3834
  print(f"inputfilename: {inputfilename}")
@@ -2137,19 +3863,46 @@ def readvecs(inputfilename, colspec=None, numskip=0, debug=False, thedtype=float
2137
3863
  return theoutarray
2138
3864
 
2139
3865
 
2140
- def readvec(inputfilename, numskip=0):
2141
- r"""Read an array of floats in from a text file.
3866
+ def readvec(inputfilename: str, numskip: int = 0) -> NDArray:
3867
+ """
3868
+ Read a timecourse from a text or BIDS TSV file.
3869
+
3870
+ This function reads numerical data from a text file and returns it as a numpy array.
3871
+ It can handle both plain text files and BIDS TSV files, with optional column selection
3872
+ and debugging output.
2142
3873
 
2143
3874
  Parameters
2144
3875
  ----------
2145
3876
  inputfilename : str
2146
- The name of the text file
3877
+ Path to the input file
3878
+ colnum : int, optional
3879
+ Column number to read (0-indexed). If None, reads all columns.
3880
+ colname : str, optional
3881
+ Column name to read. If None, reads all columns.
3882
+ debug : bool, optional
3883
+ If True, enables debug output. Default is False.
2147
3884
 
2148
3885
  Returns
2149
3886
  -------
2150
- inputdata : 1D numpy float array
2151
- The data from the file
2152
-
3887
+ tuple
3888
+ A tuple containing:
3889
+ - NDArray: The read timecourse data
3890
+ - float, optional: Minimum value in the data
3891
+ - float, optional: Maximum value in the data
3892
+
3893
+ Notes
3894
+ -----
3895
+ - The function handles both text files and BIDS TSV files
3896
+ - Empty lines are skipped during reading
3897
+ - Data is converted to float64 type
3898
+ - If both colnum and colname are provided, colnum takes precedence
3899
+ - The function returns the minimum and maximum values only when the data is read successfully
3900
+
3901
+ Examples
3902
+ --------
3903
+ >>> data, min_val, max_val = readtc('timecourse.txt')
3904
+ >>> data, min_val, max_val = readtc('bids_file.tsv', colnum=2)
3905
+ >>> data, min_val, max_val = readtc('data.txt', colname='signal', debug=True)
2153
3906
  """
2154
3907
  inputvec = []
2155
3908
  with open(inputfilename, "r") as thefile:
@@ -2160,7 +3913,57 @@ def readvec(inputfilename, numskip=0):
2160
3913
  return np.asarray(inputvec, dtype=float)
2161
3914
 
2162
3915
 
2163
- def readtc(inputfilename, colnum=None, colname=None, debug=False):
3916
+ def readtc(
3917
+ inputfilename: str,
3918
+ colnum: Optional[int] = None,
3919
+ colname: Optional[str] = None,
3920
+ debug: bool = False,
3921
+ ) -> Tuple[NDArray, Optional[float], Optional[float]]:
3922
+ """
3923
+ Read timecourse data from a file, supporting BIDS TSV and other formats.
3924
+
3925
+ This function reads timecourse data from a file, with support for BIDS TSV files
3926
+ and generic multi-column text files. For BIDS TSV files, a column name or number
3927
+ must be specified. For other file types, column selection is limited to numeric indices.
3928
+
3929
+ Parameters
3930
+ ----------
3931
+ inputfilename : str
3932
+ Path to the input file to read. Can be a BIDS TSV file (`.tsv`) or a generic
3933
+ text file with multiple columns.
3934
+ colname : str or None, optional
3935
+ Column name to read from a BIDS TSV file. Required if the file is a BIDS TSV
3936
+ and `colnum` is not specified. Default is None.
3937
+ colnum : int or None, optional
3938
+ Column number to read from a BIDS TSV file or a generic multi-column file.
3939
+ Required for generic files when `colname` is not specified. Default is None.
3940
+ debug : bool, optional
3941
+ Enable debug output to print intermediate information. Default is False.
3942
+
3943
+ Returns
3944
+ -------
3945
+ timecourse : NDArray
3946
+ The timecourse data as a 1D numpy array.
3947
+ inputfreq : float or None
3948
+ Sampling frequency (Hz) if available from the file metadata. Default is None.
3949
+ inputstart : float or None
3950
+ Start time (seconds) if available from the file metadata. Default is None.
3951
+
3952
+ Notes
3953
+ -----
3954
+ - For BIDS TSV files (`.tsv`), the function reads the specified column using
3955
+ `readcolfrombidstsv`, which extracts metadata such as sampling frequency and
3956
+ start time.
3957
+ - For generic text files, the function transposes the data and selects the
3958
+ specified column if `colnum` is provided.
3959
+ - If the input file is a `.json` file, it is assumed to contain metadata for
3960
+ a BIDS TSV file and is processed accordingly.
3961
+
3962
+ Examples
3963
+ --------
3964
+ >>> timecourse, freq, start = readtc('data.tsv', colname='signal')
3965
+ >>> timecourse, freq, start = readtc('data.txt', colnum=0, debug=True)
3966
+ """
2164
3967
  # check file type
2165
3968
  filebase, extension = os.path.splitext(inputfilename)
2166
3969
  inputfreq = None
@@ -2192,16 +3995,47 @@ def readtc(inputfilename, colnum=None, colname=None, debug=False):
2192
3995
  return timecourse, inputfreq, inputstart
2193
3996
 
2194
3997
 
2195
- def readlabels(inputfilename):
2196
- r"""
3998
+ def readlabels(inputfilename: str) -> List[str]:
3999
+ """
4000
+ Write all the key value pairs from a dictionary to a text file.
2197
4001
 
2198
4002
  Parameters
2199
4003
  ----------
2200
- inputfilename
4004
+ thedict : dict
4005
+ A dictionary containing key-value pairs to be written to file.
4006
+ outputfile : str
4007
+ The name of the output file where dictionary contents will be saved.
4008
+ lineend : {'mac', 'win', 'linux'}, optional
4009
+ Line ending style to use. Default is 'linux'.
4010
+ - 'mac': Uses carriage return ('\r')
4011
+ - 'win': Uses carriage return + line feed ('\r\n')
4012
+ - 'linux': Uses line feed ('\n')
4013
+ machinereadable : bool, optional
4014
+ If True, outputs in a machine-readable format (default is False).
4015
+ When False, outputs in a human-readable format with key-value pairs on separate lines.
2201
4016
 
2202
4017
  Returns
2203
4018
  -------
2204
-
4019
+ None
4020
+ This function does not return any value.
4021
+
4022
+ Notes
4023
+ -----
4024
+ - The function will overwrite the output file if it already exists.
4025
+ - Keys and values are converted to strings before writing.
4026
+ - If `machinereadable` is True, the output format may differ from the default human-readable format.
4027
+
4028
+ Examples
4029
+ --------
4030
+ >>> my_dict = {'name': 'John', 'age': 30, 'city': 'New York'}
4031
+ >>> writedict(my_dict, 'output.txt')
4032
+ # Writes dictionary to output.txt in human-readable format
4033
+
4034
+ >>> writedict(my_dict, 'output.txt', lineend='win')
4035
+ # Writes dictionary with Windows-style line endings
4036
+
4037
+ >>> writedict(my_dict, 'output.txt', machinereadable=True)
4038
+ # Writes dictionary in machine-readable format
2205
4039
  """
2206
4040
  inputvec = []
2207
4041
  with open(inputfilename, "r") as thefile:
@@ -2211,22 +4045,42 @@ def readlabels(inputfilename):
2211
4045
  return inputvec
2212
4046
 
2213
4047
 
2214
- def writedict(thedict, outputfile, lineend="", machinereadable=False):
2215
- r"""
2216
- Write all the key value pairs from a dictionary to a text file.
4048
+ def writedict(
4049
+ thedict: Dict[str, Any], outputfile: str, lineend: str = "", machinereadable: bool = False
4050
+ ) -> None:
4051
+ """
4052
+ Write a dictionary to a text file with customizable line endings and formatting.
2217
4053
 
2218
4054
  Parameters
2219
4055
  ----------
2220
4056
  thedict : dict
2221
- A dictionary
4057
+ Dictionary containing key-value pairs to be written to file
2222
4058
  outputfile : str
2223
- The name of the output file
2224
- lineend : { 'mac', 'win', 'linux' }, optional
2225
- Line ending style to use. Default is 'linux'.
4059
+ Path to the output file where dictionary will be written
4060
+ lineend : str, optional
4061
+ Line ending style to use ('mac', 'win', 'linux'), default is 'linux'
4062
+ machinereadable : bool, optional
4063
+ If True, write in machine-readable JSON-like format with quotes around keys,
4064
+ default is False
2226
4065
 
2227
4066
  Returns
2228
4067
  -------
2229
-
4068
+ None
4069
+ Function writes to file but does not return any value
4070
+
4071
+ Notes
4072
+ -----
4073
+ - For 'mac' line endings, uses carriage return (`\\r`)
4074
+ - For 'win' line endings, uses carriage return + line feed (`\\r\\n`)
4075
+ - For 'linux' line endings, uses line feed (`\\n`)
4076
+ - When `machinereadable=True`, keys are quoted and formatted with tab separators
4077
+ - When `machinereadable=False`, keys are written without quotes
4078
+
4079
+ Examples
4080
+ --------
4081
+ >>> my_dict = {'name': 'John', 'age': 30}
4082
+ >>> writedict(my_dict, 'output.txt', lineend='linux', machinereadable=False)
4083
+ >>> writedict(my_dict, 'output.json', lineend='win', machinereadable=True)
2230
4084
  """
2231
4085
  if lineend == "mac":
2232
4086
  thelineending = "\r"
@@ -2252,19 +4106,42 @@ def writedict(thedict, outputfile, lineend="", machinereadable=False):
2252
4106
  FILE.writelines("}" + thelineending)
2253
4107
 
2254
4108
 
2255
- def readdict(inputfilename):
2256
- r"""Read key value pairs out of a text file
4109
+ def readdict(inputfilename: str) -> Dict[str, Any]:
4110
+ """
4111
+ Read a dictionary from a text file.
4112
+
4113
+ Read a dictionary from a text file where each line contains a key followed by one or more values.
4114
+ The key is the first element of each line (with the trailing character removed), and the values
4115
+ are the remaining elements on that line.
2257
4116
 
2258
4117
  Parameters
2259
4118
  ----------
2260
4119
  inputfilename : str
2261
- The name of the json file (with extension)
4120
+ The name of the input file to read the dictionary from.
2262
4121
 
2263
4122
  Returns
2264
4123
  -------
2265
- thedict : dict
2266
- The key value pairs from the json file
2267
-
4124
+ dict
4125
+ A dictionary where keys are the first element of each line (with last character removed)
4126
+ and values are the remaining elements. If a line contains only one value, that value is
4127
+ returned as a string rather than a list. If the file does not exist, an empty dictionary
4128
+ is returned.
4129
+
4130
+ Notes
4131
+ -----
4132
+ - The function assumes that the input file exists and is properly formatted
4133
+ - Keys are processed by removing the last character from the first field
4134
+ - Values are stored as lists unless there's only one value, in which case it's stored as a string
4135
+ - If the file does not exist, a message is printed and an empty dictionary is returned
4136
+
4137
+ Examples
4138
+ --------
4139
+ >>> # Assuming a file 'data.txt' with content:
4140
+ >>> # key1 val1 val2 val3
4141
+ >>> # key2 val4
4142
+ >>> result = readdict('data.txt')
4143
+ >>> print(result)
4144
+ {'key': ['val1', 'val2', 'val3'], 'key2': 'val4'}
2268
4145
  """
2269
4146
  if os.path.exists(inputfilename):
2270
4147
  thedict = {}
@@ -2282,20 +4159,39 @@ def readdict(inputfilename):
2282
4159
  return {}
2283
4160
 
2284
4161
 
2285
- def writevec(thevec, outputfile, lineend=""):
2286
- r"""Write a vector out to a text file.
4162
+ def writevec(thevec: NDArray, outputfile: str, lineend: str = "") -> None:
4163
+ """
4164
+ Write a vector to a text file, one value per line.
4165
+
2287
4166
  Parameters
2288
4167
  ----------
2289
4168
  thevec : 1D numpy or python array
2290
- The array to write.
4169
+ The array to write. Must be a 1D array-like object.
2291
4170
  outputfile : str
2292
- The name of the output file
2293
- lineend : { 'mac', 'win', 'linux' }, optional
4171
+ The name of the output file to write to.
4172
+ lineend : {'mac', 'win', 'linux'}, optional
2294
4173
  Line ending style to use. Default is 'linux'.
4174
+ - 'mac': Use Mac line endings (\r)
4175
+ - 'win': Use Windows line endings (\r\n)
4176
+ - 'linux': Use Linux line endings (\n)
2295
4177
 
2296
4178
  Returns
2297
4179
  -------
2298
-
4180
+ None
4181
+ This function does not return any value.
4182
+
4183
+ Notes
4184
+ -----
4185
+ The function opens the output file in binary mode for all line ending types except
4186
+ when an invalid lineend value is provided, in which case it opens in text mode
4187
+ with default line endings.
4188
+
4189
+ Examples
4190
+ --------
4191
+ >>> import numpy as np
4192
+ >>> vec = np.array([1, 2, 3, 4, 5])
4193
+ >>> writevec(vec, 'output.txt')
4194
+ >>> writevec(vec, 'output_win.txt', lineend='win')
2299
4195
  """
2300
4196
  if lineend == "mac":
2301
4197
  thelineending = "\r"
@@ -2315,16 +4211,75 @@ def writevec(thevec, outputfile, lineend=""):
2315
4211
 
2316
4212
 
2317
4213
  def writevectorstotextfile(
2318
- thevecs,
2319
- outputfile,
2320
- samplerate=1.0,
2321
- starttime=0.0,
2322
- columns=None,
2323
- compressed=True,
2324
- filetype="text",
2325
- lineend="",
2326
- debug=False,
2327
- ):
4214
+ thevecs: NDArray,
4215
+ outputfile: str,
4216
+ samplerate: float = 1.0,
4217
+ starttime: float = 0.0,
4218
+ columns: Optional[List[str]] = None,
4219
+ compressed: bool = True,
4220
+ filetype: str = "text",
4221
+ lineend: str = "",
4222
+ debug: bool = False,
4223
+ ) -> None:
4224
+ """
4225
+ Write vectors to a text file in various formats.
4226
+
4227
+ This function writes data vectors to a text file, supporting multiple output formats
4228
+ including plain text, CSV, BIDS continuous data, and plain TSV. The format is determined
4229
+ by the `filetype` parameter. It supports optional headers, line ending styles, and
4230
+ compression for BIDS formats.
4231
+
4232
+ Parameters
4233
+ ----------
4234
+ thevecs : NDArray
4235
+ Data vectors to write. Should be a 2D array where each row is a vector.
4236
+ outputfile : str
4237
+ Output file path. The extension determines the file format if not explicitly specified.
4238
+ samplerate : float, optional
4239
+ Sampling rate in Hz. Default is 1.0. Used in BIDS formats.
4240
+ starttime : float, optional
4241
+ Start time in seconds. Default is 0.0. Used in BIDS formats.
4242
+ columns : list of str, optional
4243
+ Column names for the output file. If None, no headers are written.
4244
+ compressed : bool, optional
4245
+ Whether to compress the output file (for BIDS formats). Default is True.
4246
+ filetype : str, optional
4247
+ Output format. Options are:
4248
+ - 'text': Plain text with space-separated values
4249
+ - 'csv': Comma-separated values
4250
+ - 'bidscontinuous': BIDS continuous data format (TSV with JSON sidecar)
4251
+ - 'plaintsv': Plain TSV format without JSON sidecar
4252
+ Default is 'text'.
4253
+ lineend : str, optional
4254
+ Line ending style. Options are:
4255
+ - 'mac' (``\r``)
4256
+ - 'win' (``\r\n``)
4257
+ - 'linux' (``\n``)
4258
+ - '' (system default)
4259
+ Default is ''.
4260
+ debug : bool, optional
4261
+ Enable debug output. Default is False.
4262
+
4263
+ Returns
4264
+ -------
4265
+ None
4266
+ This function does not return any value.
4267
+
4268
+ Notes
4269
+ -----
4270
+ - For BIDS formats, the function uses `writebidstsv` internally and splits the
4271
+ output filename using `niftisplitext`.
4272
+ - The `columns` parameter is only used when writing headers.
4273
+ - The `lineend` parameter controls how newlines are written to the file.
4274
+
4275
+ Examples
4276
+ --------
4277
+ >>> import numpy as np
4278
+ >>> data = np.array([[1, 2, 3], [4, 5, 6]])
4279
+ >>> writevectorstotextfile(data, "output.txt", filetype="text")
4280
+ >>> writevectorstotextfile(data, "output.csv", filetype="csv", columns=["A", "B", "C"])
4281
+ >>> writevectorstotextfile(data, "output.tsv", filetype="bidscontinuous", samplerate=100.0)
4282
+ """
2328
4283
  if filetype == "text":
2329
4284
  writenpvecs(thevecs, outputfile, headers=columns, lineend=lineend)
2330
4285
  elif filetype == "csv":
@@ -2362,21 +4317,64 @@ def writevectorstotextfile(
2362
4317
 
2363
4318
 
2364
4319
  # rewritten to guarantee file closure, combines writenpvec and writenpvecs
2365
- def writenpvecs(thevecs, outputfile, ascsv=False, headers=None, altmethod=True, lineend=""):
2366
- r"""Write out a two dimensional numpy array to a text file
4320
+ def writenpvecs(
4321
+ thevecs: NDArray,
4322
+ outputfile: str,
4323
+ ascsv: bool = False,
4324
+ headers: Optional[List[str]] = None,
4325
+ altmethod: bool = True,
4326
+ lineend: str = "",
4327
+ ) -> None:
4328
+ """
4329
+ Write out a two dimensional numpy array to a text file.
4330
+
4331
+ This function writes a numpy array to a text file, with options for
4332
+ CSV-style output, custom headers, and line ending styles.
2367
4333
 
2368
4334
  Parameters
2369
4335
  ----------
2370
- thevecs: 1D or 2D numpy array
2371
- The data to write to the file
4336
+ thevecs : NDArray
4337
+ A 1D or 2D numpy array containing the data to be written. If 1D,
4338
+ the array is written as a single column. If 2D, each column is
4339
+ written as a separate line in the output file.
2372
4340
  outputfile : str
2373
- The name of the output file
2374
- lineend : { 'mac', 'win', 'linux' }, optional
2375
- Line ending style to use. Default is 'linux'.
4341
+ The path to the output file where the data will be written.
4342
+ ascsv : bool, optional
4343
+ If True, use comma as the separator; otherwise, use tab. Default is False.
4344
+ headers : list of str, optional
4345
+ A list of header strings to write at the beginning of the file.
4346
+ If provided, the number of headers must match the number of columns
4347
+ in the data (for 2D arrays) or 1 (for 1D arrays).
4348
+ altmethod : bool, optional
4349
+ If True, use an optimized method for writing 2D data. If False,
4350
+ use a nested loop approach. Default is True.
4351
+ lineend : str, optional
4352
+ Line ending style to use. Options are 'mac' (\r), 'win' (\r\n),
4353
+ 'linux' (\n), or empty string (uses system default). Default is 'linux'.
2376
4354
 
2377
4355
  Returns
2378
4356
  -------
2379
-
4357
+ None
4358
+ This function does not return any value.
4359
+
4360
+ Notes
4361
+ -----
4362
+ - For 2D arrays, data is written column-wise.
4363
+ - When `altmethod` is True, the function uses vectorized operations
4364
+ for better performance.
4365
+ - If `headers` are provided, they are written as the first line
4366
+ in the file, separated by the chosen delimiter.
4367
+
4368
+ Examples
4369
+ --------
4370
+ >>> import numpy as np
4371
+ >>> data = np.array([[1, 2, 3], [4, 5, 6]])
4372
+ >>> writenpvecs(data, 'output.txt')
4373
+ # Writes data as tab-separated columns to 'output.txt'
4374
+
4375
+ >>> headers = ['Col1', 'Col2', 'Col3']
4376
+ >>> writenpvecs(data, 'output.csv', ascsv=True, headers=headers)
4377
+ # Writes CSV-formatted data with headers to 'output.csv'
2380
4378
  """
2381
4379
  theshape = np.shape(thevecs)
2382
4380
  if lineend == "mac":