rapidtide 3.0.11__py3-none-any.whl → 3.1.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (144) hide show
  1. rapidtide/Colortables.py +492 -27
  2. rapidtide/OrthoImageItem.py +1049 -46
  3. rapidtide/RapidtideDataset.py +1533 -86
  4. rapidtide/_version.py +3 -3
  5. rapidtide/calccoherence.py +196 -29
  6. rapidtide/calcnullsimfunc.py +188 -40
  7. rapidtide/calcsimfunc.py +242 -42
  8. rapidtide/correlate.py +1203 -383
  9. rapidtide/data/examples/src/testLD +56 -0
  10. rapidtide/data/examples/src/testalign +1 -1
  11. rapidtide/data/examples/src/testdelayvar +0 -1
  12. rapidtide/data/examples/src/testfmri +53 -3
  13. rapidtide/data/examples/src/testglmfilt +5 -5
  14. rapidtide/data/examples/src/testhappy +29 -7
  15. rapidtide/data/examples/src/testppgproc +17 -0
  16. rapidtide/data/examples/src/testrolloff +11 -0
  17. rapidtide/data/models/model_cnn_pytorch/best_model.pth +0 -0
  18. rapidtide/data/models/model_cnn_pytorch/loss.png +0 -0
  19. rapidtide/data/models/model_cnn_pytorch/loss.txt +1 -0
  20. rapidtide/data/models/model_cnn_pytorch/model.pth +0 -0
  21. rapidtide/data/models/model_cnn_pytorch/model_meta.json +68 -0
  22. rapidtide/decorators.py +91 -0
  23. rapidtide/dlfilter.py +2226 -110
  24. rapidtide/dlfiltertorch.py +4842 -0
  25. rapidtide/externaltools.py +327 -12
  26. rapidtide/fMRIData_class.py +79 -40
  27. rapidtide/filter.py +1899 -810
  28. rapidtide/fit.py +2011 -581
  29. rapidtide/genericmultiproc.py +93 -18
  30. rapidtide/happy_supportfuncs.py +2047 -172
  31. rapidtide/helper_classes.py +584 -43
  32. rapidtide/io.py +2370 -372
  33. rapidtide/linfitfiltpass.py +346 -99
  34. rapidtide/makelaggedtcs.py +210 -24
  35. rapidtide/maskutil.py +448 -62
  36. rapidtide/miscmath.py +827 -121
  37. rapidtide/multiproc.py +210 -22
  38. rapidtide/patchmatch.py +242 -42
  39. rapidtide/peakeval.py +31 -31
  40. rapidtide/ppgproc.py +2203 -0
  41. rapidtide/qualitycheck.py +352 -39
  42. rapidtide/refinedelay.py +431 -57
  43. rapidtide/refineregressor.py +494 -189
  44. rapidtide/resample.py +671 -185
  45. rapidtide/scripts/applyppgproc.py +28 -0
  46. rapidtide/scripts/showxcorr_legacy.py +7 -7
  47. rapidtide/scripts/stupidramtricks.py +15 -17
  48. rapidtide/simFuncClasses.py +1052 -77
  49. rapidtide/simfuncfit.py +269 -69
  50. rapidtide/stats.py +540 -238
  51. rapidtide/tests/happycomp +9 -0
  52. rapidtide/tests/test_cleanregressor.py +1 -2
  53. rapidtide/tests/test_dlfiltertorch.py +627 -0
  54. rapidtide/tests/test_findmaxlag.py +24 -8
  55. rapidtide/tests/test_fullrunhappy_v1.py +0 -2
  56. rapidtide/tests/test_fullrunhappy_v2.py +0 -2
  57. rapidtide/tests/test_fullrunhappy_v3.py +11 -4
  58. rapidtide/tests/test_fullrunhappy_v4.py +10 -2
  59. rapidtide/tests/test_fullrunrapidtide_v7.py +1 -1
  60. rapidtide/tests/test_getparsers.py +11 -3
  61. rapidtide/tests/test_refinedelay.py +0 -1
  62. rapidtide/tests/test_simroundtrip.py +16 -8
  63. rapidtide/tests/test_stcorrelate.py +3 -1
  64. rapidtide/tests/utils.py +9 -8
  65. rapidtide/tidepoolTemplate.py +142 -38
  66. rapidtide/tidepoolTemplate_alt.py +165 -44
  67. rapidtide/tidepoolTemplate_big.py +189 -52
  68. rapidtide/util.py +1217 -118
  69. rapidtide/voxelData.py +684 -37
  70. rapidtide/wiener.py +136 -23
  71. rapidtide/wiener2.py +113 -7
  72. rapidtide/workflows/adjustoffset.py +105 -3
  73. rapidtide/workflows/aligntcs.py +85 -2
  74. rapidtide/workflows/applydlfilter.py +87 -10
  75. rapidtide/workflows/applyppgproc.py +540 -0
  76. rapidtide/workflows/atlasaverage.py +210 -47
  77. rapidtide/workflows/atlastool.py +100 -3
  78. rapidtide/workflows/calcSimFuncMap.py +288 -69
  79. rapidtide/workflows/calctexticc.py +201 -9
  80. rapidtide/workflows/ccorrica.py +101 -6
  81. rapidtide/workflows/cleanregressor.py +165 -31
  82. rapidtide/workflows/delayvar.py +171 -23
  83. rapidtide/workflows/diffrois.py +81 -3
  84. rapidtide/workflows/endtidalproc.py +144 -4
  85. rapidtide/workflows/fdica.py +195 -15
  86. rapidtide/workflows/filtnifti.py +70 -3
  87. rapidtide/workflows/filttc.py +74 -3
  88. rapidtide/workflows/fitSimFuncMap.py +202 -51
  89. rapidtide/workflows/fixtr.py +73 -3
  90. rapidtide/workflows/gmscalc.py +113 -3
  91. rapidtide/workflows/happy.py +801 -199
  92. rapidtide/workflows/happy2std.py +144 -12
  93. rapidtide/workflows/happy_parser.py +163 -23
  94. rapidtide/workflows/histnifti.py +118 -2
  95. rapidtide/workflows/histtc.py +84 -3
  96. rapidtide/workflows/linfitfilt.py +117 -4
  97. rapidtide/workflows/localflow.py +328 -28
  98. rapidtide/workflows/mergequality.py +79 -3
  99. rapidtide/workflows/niftidecomp.py +322 -18
  100. rapidtide/workflows/niftistats.py +174 -4
  101. rapidtide/workflows/pairproc.py +98 -4
  102. rapidtide/workflows/pairwisemergenifti.py +85 -2
  103. rapidtide/workflows/parser_funcs.py +1421 -40
  104. rapidtide/workflows/physiofreq.py +137 -11
  105. rapidtide/workflows/pixelcomp.py +207 -5
  106. rapidtide/workflows/plethquality.py +103 -21
  107. rapidtide/workflows/polyfitim.py +151 -11
  108. rapidtide/workflows/proj2flow.py +75 -2
  109. rapidtide/workflows/rankimage.py +111 -4
  110. rapidtide/workflows/rapidtide.py +368 -76
  111. rapidtide/workflows/rapidtide2std.py +98 -2
  112. rapidtide/workflows/rapidtide_parser.py +109 -9
  113. rapidtide/workflows/refineDelayMap.py +144 -33
  114. rapidtide/workflows/refineRegressor.py +675 -96
  115. rapidtide/workflows/regressfrommaps.py +161 -37
  116. rapidtide/workflows/resamplenifti.py +85 -3
  117. rapidtide/workflows/resampletc.py +91 -3
  118. rapidtide/workflows/retrolagtcs.py +99 -9
  119. rapidtide/workflows/retroregress.py +176 -26
  120. rapidtide/workflows/roisummarize.py +174 -5
  121. rapidtide/workflows/runqualitycheck.py +71 -3
  122. rapidtide/workflows/showarbcorr.py +149 -6
  123. rapidtide/workflows/showhist.py +86 -2
  124. rapidtide/workflows/showstxcorr.py +160 -3
  125. rapidtide/workflows/showtc.py +159 -3
  126. rapidtide/workflows/showxcorrx.py +190 -10
  127. rapidtide/workflows/showxy.py +185 -15
  128. rapidtide/workflows/simdata.py +264 -38
  129. rapidtide/workflows/spatialfit.py +77 -2
  130. rapidtide/workflows/spatialmi.py +250 -27
  131. rapidtide/workflows/spectrogram.py +305 -32
  132. rapidtide/workflows/synthASL.py +154 -3
  133. rapidtide/workflows/tcfrom2col.py +76 -2
  134. rapidtide/workflows/tcfrom3col.py +74 -2
  135. rapidtide/workflows/tidepool.py +2971 -130
  136. rapidtide/workflows/utils.py +19 -14
  137. rapidtide/workflows/utils_doc.py +293 -0
  138. rapidtide/workflows/variabilityizer.py +116 -3
  139. {rapidtide-3.0.11.dist-info → rapidtide-3.1.1.dist-info}/METADATA +10 -8
  140. {rapidtide-3.0.11.dist-info → rapidtide-3.1.1.dist-info}/RECORD +144 -128
  141. {rapidtide-3.0.11.dist-info → rapidtide-3.1.1.dist-info}/entry_points.txt +1 -0
  142. {rapidtide-3.0.11.dist-info → rapidtide-3.1.1.dist-info}/WHEEL +0 -0
  143. {rapidtide-3.0.11.dist-info → rapidtide-3.1.1.dist-info}/licenses/LICENSE +0 -0
  144. {rapidtide-3.0.11.dist-info → rapidtide-3.1.1.dist-info}/top_level.txt +0 -0
rapidtide/maskutil.py CHANGED
@@ -18,9 +18,11 @@
18
18
  #
19
19
  import bisect
20
20
  import logging
21
+ from typing import Any, Callable, Optional, Tuple, Union
21
22
 
22
23
  import numpy as np
23
24
  from nilearn import masking
25
+ from numpy.typing import ArrayLike, NDArray
24
26
  from sklearn.decomposition import PCA
25
27
 
26
28
  import rapidtide.io as tide_io
@@ -30,16 +32,121 @@ import rapidtide.stats as tide_stats
30
32
  LGR = logging.getLogger("GENERAL")
31
33
 
32
34
 
33
- def resampmask(themask, thetargetres):
35
+ def resampmask(themask: ArrayLike, thetargetres: float) -> NDArray:
36
+ """Resample a mask to a target resolution.
37
+
38
+ Parameters
39
+ ----------
40
+ themask : array_like
41
+ Input mask array to be resampled.
42
+ thetargetres : float
43
+ Target resolution for the resampled mask.
44
+
45
+ Returns
46
+ -------
47
+ NDArray
48
+ Resampled mask array with the specified target resolution.
49
+
50
+ Notes
51
+ -----
52
+ This function currently returns the input mask unchanged. A full implementation
53
+ would perform actual resampling operations to adjust the mask to the target
54
+ resolution.
55
+
56
+ Examples
57
+ --------
58
+ >>> import numpy as np
59
+ >>> mask = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]])
60
+ >>> resampled = resampmask(mask, 0.5)
61
+ >>> print(resampled)
62
+ [[0 1 0]
63
+ [1 1 1]
64
+ [0 1 0]]
65
+ """
34
66
  resampmask = themask
35
67
  return themask
36
68
 
37
69
 
38
- def makeepimask(nim):
70
+ def makeepimask(nim: Any) -> Any:
71
+ """Compute EPI mask from neuroimaging data.
72
+
73
+ This function computes an EPI (Echo Planar Imaging) mask from neuroimaging data
74
+ using the masking.compute_epi_mask function from nilearn.
75
+
76
+ Parameters
77
+ ----------
78
+ nim : Any
79
+ Neuroimaging data object (typically Nifti1Image or similar) from which
80
+ to compute the EPI mask. This can be a nibabel image object or array-like
81
+ data representing neuroimaging volumes.
82
+
83
+ Returns
84
+ -------
85
+ Any
86
+ EPI mask computed from the input neuroimaging data. The return type
87
+ depends on the underlying masking.compute_epi_mask implementation and
88
+ typically represents a binary mask image.
89
+
90
+ Notes
91
+ -----
92
+ This function is a wrapper around nilearn's masking.compute_epi_mask function
93
+ and is commonly used in neuroimaging preprocessing pipelines to automatically
94
+ generate brain masks from EPI functional MRI data.
95
+
96
+ Examples
97
+ --------
98
+ >>> import nibabel as nib
99
+ >>> from nilearn.masking import compute_epi_mask
100
+ >>> # Assuming 'img' is a nibabel image object
101
+ >>> mask = makeepimask(img)
102
+ >>> print(mask.shape)
103
+ """
39
104
  return masking.compute_epi_mask(nim)
40
105
 
41
106
 
42
- def maketmask(filename, timeaxis, maskvector, debug=False):
107
+ def maketmask(
108
+ filename: str, timeaxis: ArrayLike, maskvector: NDArray, debug: bool = False
109
+ ) -> NDArray:
110
+ """Create a temporal mask from time interval data.
111
+
112
+ This function reads time interval data from a file and generates a binary mask
113
+ vector indicating which time points should be included in analysis. The mask
114
+ can be generated from either a simple vector of nonzero values or from time
115
+ intervals specified by start and duration values.
116
+
117
+ Parameters
118
+ ----------
119
+ filename : str
120
+ Path to the file containing time interval data. The file should contain
121
+ either a single vector of values or two rows of data (start times and durations).
122
+ timeaxis : ArrayLike
123
+ Array of time points corresponding to the fMRI time series. Used to map
124
+ time intervals to specific time indices.
125
+ maskvector : NDArray
126
+ Pre-allocated array to store the resulting temporal mask. Should be the
127
+ same length as the fMRI time series.
128
+ debug : bool, optional
129
+ If True, enables debug logging output. Default is False.
130
+
131
+ Returns
132
+ -------
133
+ NDArray
134
+ Binary mask vector where 1.0 indicates time points to include and 0.0
135
+ indicates time points to exclude.
136
+
137
+ Notes
138
+ -----
139
+ The function handles two input formats:
140
+ 1. Single row: Each nonzero value indicates inclusion of the corresponding time point
141
+ 2. Two rows: First row contains start times, second row contains durations
142
+
143
+ Examples
144
+ --------
145
+ >>> import numpy as np
146
+ >>> timeaxis = np.arange(0, 100, 2) # 50 time points
147
+ >>> maskvector = np.zeros(50)
148
+ >>> mask = maketmask('time_intervals.txt', timeaxis, maskvector)
149
+ """
43
150
  inputdata = tide_io.readvecs(filename)
44
151
  theshape = np.shape(inputdata)
45
152
  if theshape[0] == 1:
@@ -61,16 +168,75 @@ def maketmask(filename, timeaxis, maskvector, debug=False):
61
168
 
62
169
 
63
170
  def readamask(
64
- maskfilename,
65
- nim_hdr,
66
- xsize,
67
- istext=False,
68
- valslist=None,
69
- thresh=None,
70
- maskname="the",
71
- tolerance=1.0e-3,
72
- debug=False,
73
- ):
171
+ maskfilename: str,
172
+ nim_hdr: Any,
173
+ xsize: int,
174
+ istext: bool = False,
175
+ valslist: Optional[list] = None,
176
+ thresh: Optional[float] = None,
177
+ maskname: str = "the",
178
+ tolerance: float = 1.0e-3,
179
+ debug: bool = False,
180
+ ) -> NDArray:
181
+ """
182
+ Read and process a mask file, returning a binary mask array.
183
+
184
+ This function reads a mask from either a text file or NIfTI format, applies
185
+ optional thresholding or value selection, and returns a binary mask array
186
+ compatible with the input data dimensions.
187
+
188
+ Parameters
189
+ ----------
190
+ maskfilename : str
191
+ Path to the mask file. Can be in text format (if `istext=True`) or NIfTI format.
192
+ nim_hdr : Any
193
+ Header information from the NIfTI file of the input data. Used for spatial
194
+ dimension matching.
195
+ xsize : int
196
+ Expected size of the first dimension of the mask array.
197
+ istext : bool, optional
198
+ If True, the mask is read as a text file. Default is False.
199
+ valslist : list of int, optional
200
+ List of values to include in the mask. If provided, only voxels matching
201
+ these values are set to 1. Default is None.
202
+ thresh : float, optional
203
+ Threshold value for binarizing the mask. If provided, voxels greater than
204
+ `thresh` are set to 1, others to 0. Default is None.
205
+ maskname : str, optional
206
+ Name of the mask for logging and error messages. Default is "the".
207
+ tolerance : float, optional
208
+ Tolerance for spatial dimension matching between the mask and input data.
209
+ Default is 1e-3.
210
+ debug : bool, optional
211
+ If True, print debug information. Default is False.
212
+
213
+ Returns
214
+ -------
215
+ NDArray
216
+ A binary mask array of type `uint16`, where 1 indicates included voxels
217
+ and 0 indicates excluded voxels.
218
+
219
+ Notes
220
+ -----
221
+ - If `istext=True`, the mask file is expected to contain numeric values
222
+ arranged in a single column or row.
223
+ - If `thresh` is provided, the mask is binarized based on the threshold.
224
+ - If `valslist` is provided, only voxels matching values in the list are set to 1.
225
+ - The function raises a `ValueError` if spatial dimensions of the mask and
226
+ input data do not match within the specified tolerance.
227
+
228
+ Examples
229
+ --------
230
+ >>> mask = readamask(
231
+ ... maskfilename="mask.nii.gz",
232
+ ... nim_hdr=hdr,
233
+ ... xsize=64,
234
+ ... thresh=0.5,
235
+ ... maskname="brain"
236
+ ... )
237
+ >>> print(mask.shape)
238
+ (64, 64, 64)
239
+ """
74
240
  LGR.debug(f"readamask called with filename: {maskfilename} vals: {valslist}")
75
241
  if debug:
76
242
  print("getmaskset:")
@@ -108,19 +274,86 @@ def readamask(
108
274
 
109
275
 
110
276
  def getmaskset(
111
- maskname,
112
- includename,
113
- includevals,
114
- excludename,
115
- excludevals,
116
- datahdr,
117
- numspatiallocs,
118
- extramask=None,
119
- extramaskthresh=0.1,
120
- istext=False,
121
- tolerance=1.0e-3,
122
- debug=False,
123
- ):
277
+ maskname: str,
278
+ includename: Optional[str],
279
+ includevals: Optional[list],
280
+ excludename: Optional[str],
281
+ excludevals: Optional[list],
282
+ datahdr: Any,
283
+ numspatiallocs: int,
284
+ extramask: Optional[str] = None,
285
+ extramaskthresh: float = 0.1,
286
+ istext: bool = False,
287
+ tolerance: float = 1.0e-3,
288
+ debug: bool = False,
289
+ ) -> Tuple[Optional[NDArray], Optional[NDArray], Optional[NDArray]]:
290
+ """
291
+ Construct and return masks for inclusion, exclusion, and an additional mask.
292
+
293
+ This function builds masks based on provided parameters, including optional
294
+ inclusion and exclusion criteria, as well as an extra mask. It performs
295
+ validation to ensure that the resulting masks are not empty or overly restrictive.
296
+
297
+ Parameters
298
+ ----------
299
+ maskname : str
300
+ Name of the mask being constructed, used for logging and labeling.
301
+ includename : str, optional
302
+ File name or identifier for the mask to be used for inclusion.
303
+ includevals : list of float, optional
304
+ List of values to include in the inclusion mask. If ``None``, all values
305
+ are included.
306
+ excludename : str, optional
307
+ File name or identifier for the mask to be used for exclusion.
308
+ excludevals : list of float, optional
309
+ List of values to exclude from the exclusion mask. If ``None``, no values
310
+ are excluded.
311
+ datahdr : Any
312
+ Header information for the data, passed to mask reading functions.
313
+ numspatiallocs : int
314
+ Number of spatial locations in the data.
315
+ extramask : str, optional
316
+ File name or identifier for an additional mask to be applied.
317
+ extramaskthresh : float, default=0.1
318
+ Threshold value for the extra mask, used when reading the mask.
319
+ istext : bool, default=False
320
+ If ``True``, treat input files as text-based.
321
+ tolerance : float, default=1e-03
322
+ Tolerance for floating-point comparisons when reading masks.
323
+ debug : bool, default=False
324
+ If ``True``, print debug information during execution.
325
+
326
+ Returns
327
+ -------
328
+ tuple of (Optional[NDArray], Optional[NDArray], Optional[NDArray])
329
+ A tuple containing:
330
+ - ``internalincludemask``: The inclusion mask, reshaped to ``numspatiallocs``.
331
+ - ``internalexcludemask``: The exclusion mask, reshaped to ``numspatiallocs``.
332
+ - ``internalextramask``: The extra mask, reshaped to ``numspatiallocs``.
333
+
334
+ Notes
335
+ -----
336
+ - If both inclusion and exclusion masks are specified, the function ensures
337
+ that at least one voxel remains after applying both masks.
338
+ - If an extra mask is specified, it is applied in combination with the inclusion
339
+ and exclusion masks.
340
+ - The function raises a ``ValueError`` if any of the resulting masks are invalid:
341
+ e.g., empty inclusion mask, or masks that leave no voxels.
342
+
343
+ Examples
344
+ --------
345
+ >>> maskname = "brain"
346
+ >>> includename = "brain_include.nii"
347
+ >>> includevals = [1]
348
+ >>> excludename = "ventricles.nii"
349
+ >>> excludevals = [1]
350
+ >>> datahdr = header
351
+ >>> numspatiallocs = 10000
352
+ >>> includemask, excludemask, extramask = getmaskset(
353
+ ... maskname, includename, includevals, excludename, excludevals,
354
+ ... datahdr, numspatiallocs
355
+ ... )
356
+ """
124
357
  internalincludemask = None
125
358
  internalexcludemask = None
126
359
  internalextramask = None
@@ -204,17 +437,81 @@ def getmaskset(
204
437
 
205
438
 
206
439
  def getregionsignal(
207
- indata,
208
- filter=None,
209
- Fs=1.0,
210
- includemask=None,
211
- excludemask=None,
212
- signalgenmethod="sum",
213
- pcacomponents=0.8,
214
- signame="global mean",
215
- rt_floatset=np.float64,
216
- debug=False,
217
- ):
440
+ indata: NDArray,
441
+ filter: Optional[Any] = None,
442
+ Fs: float = 1.0,
443
+ includemask: Optional[NDArray] = None,
444
+ excludemask: Optional[NDArray] = None,
445
+ signalgenmethod: str = "sum",
446
+ pcacomponents: Union[float, str] = 0.8,
447
+ signame: str = "global mean",
448
+ rt_floattype: type = np.float64,
449
+ debug: bool = False,
450
+ ) -> Tuple[NDArray, NDArray]:
451
+ """
452
+ Compute a global signal from a 2D array of voxel data using specified methods.
453
+
454
+ This function computes a global signal from input data by applying optional masking,
455
+ and then combining voxel signals using one of several methods: sum, meanscale, PCA,
456
+ or random. The resulting signal can be filtered and normalized.
457
+
458
+ Parameters
459
+ ----------
460
+ indata : ndarray
461
+ Input 2D array of shape (n_voxels, n_timepoints) containing voxel time series.
462
+ filter : optional
463
+ A filter object with an `apply` method to apply to the computed signal.
464
+ Default is None.
465
+ Fs : float, optional
466
+ Sampling frequency (Hz) used for filtering. Default is 1.0.
467
+ includemask : ndarray, optional
468
+ Binary mask to include specific voxels. Voxels not included will be ignored.
469
+ Default is None.
470
+ excludemask : ndarray, optional
471
+ Binary mask to exclude specific voxels. Voxels marked as 1 will be excluded.
472
+ Default is None.
473
+ signalgenmethod : str, optional
474
+ Method used to generate the global signal. Options are:
475
+ - "sum": Mean of selected voxels (default).
476
+ - "meanscale": Scale each voxel by its mean before averaging.
477
+ - "pca": Use PCA to reduce dimensionality and compute signal.
478
+ - "random": Generate a random signal.
479
+ Default is "sum".
480
+ pcacomponents : float or str, optional
481
+ Number of PCA components to use. If float, specifies number of components;
482
+ if "mle", uses maximum likelihood estimation. Default is 0.8.
483
+ signame : str, optional
484
+ Name of the signal for logging purposes. Default is "global mean".
485
+ rt_floattype : type, optional
486
+ Data type for internal computations. Default is np.float64.
487
+ debug : bool, optional
488
+ If True, print debugging information. Default is False.
489
+
490
+ Returns
491
+ -------
492
+ tuple of ndarray
493
+ A tuple containing:
494
+ - normalized_global_signal : ndarray
495
+ The normalized global signal of shape (n_timepoints,).
496
+ - final_mask : ndarray
497
+ The final voxel mask used in computation, shape (n_voxels,).
498
+
499
+ Notes
500
+ -----
501
+ - The function applies `includemask` and `excludemask` sequentially to define
502
+ which voxels are used in signal computation.
503
+ - For "pca" method, PCA is applied to the transposed scaled voxel data.
504
+ - If filtering is applied, the signal is filtered in-place using the provided filter.
505
+
506
+ Examples
507
+ --------
508
+ >>> import numpy as np
509
+ >>> from sklearn.decomposition import PCA
510
+ >>> indata = np.random.rand(100, 50)
511
+ >>> signal, mask = getregionsignal(indata, signalgenmethod="sum")
512
+ >>> print(signal.shape)
513
+ (50,)
514
+ """
218
515
  # Start with all voxels
219
516
  themask = indata[:, 0] * 0 + 1
220
517
 
@@ -225,7 +522,7 @@ def getregionsignal(
225
522
  themask = themask * (1 - excludemask)
226
523
 
227
524
  # combine all the voxels using one of the three methods
228
- globalmean = rt_floatset(indata[0, :])
525
+ globalmean = (indata[0, :]).astype(rt_floattype)
229
526
  thesize = np.shape(themask)
230
527
  numvoxelsused = int(np.sum(np.where(themask > 0.0, 1, 0)))
231
528
  selectedvoxels = indata[np.where(themask > 0.0), :][0]
@@ -235,36 +532,49 @@ def getregionsignal(
235
532
  if signalgenmethod == "sum":
236
533
  globalmean = np.mean(selectedvoxels, axis=0)
237
534
  globalmean -= np.mean(globalmean)
535
+ if debug:
536
+ print("Sum method")
537
+ print(f"getregionsignal: {globalmean.shape=}")
238
538
  elif signalgenmethod == "meanscale":
239
539
  themean = np.mean(indata, axis=1)
240
540
  for vox in range(0, thesize[0]):
241
541
  if themask[vox] > 0.0:
242
542
  if themean[vox] != 0.0:
243
543
  globalmean += indata[vox, :] / themean[vox] - 1.0
544
+ if debug:
545
+ print("Meanscale method")
546
+ print(f"getregionsignal: {globalmean.shape=}")
244
547
  elif signalgenmethod == "pca":
245
548
  themean = np.mean(indata, axis=1)
246
549
  thevar = np.var(indata, axis=1)
247
- scaledvoxels = selectedvoxels * 0.0
550
+ scaledvoxels = np.zeros_like(selectedvoxels)
248
551
  for vox in range(0, selectedvoxels.shape[0]):
249
552
  scaledvoxels[vox, :] = selectedvoxels[vox, :] - themean[vox]
250
553
  if thevar[vox] > 0.0:
251
554
  scaledvoxels[vox, :] = selectedvoxels[vox, :] / thevar[vox]
252
555
  try:
253
- thefit = PCA(n_components=pcacomponents).fit(np.transpose(scaledvoxels))
556
+ thefit = PCA(n_components=pcacomponents).fit(scaledvoxels)
254
557
  except ValueError:
255
558
  if pcacomponents == "mle":
256
559
  LGR.warning("mle estimation failed - falling back to pcacomponents=0.8")
257
- thefit = PCA(n_components=0.8).fit(np.transpose(scaledvoxels))
560
+ thefit = PCA(n_components=0.8).fit(scaledvoxels)
258
561
  else:
259
562
  raise ValueError("unhandled math exception in PCA refinement - exiting")
260
563
 
261
564
  varex = 100.0 * np.cumsum(thefit.explained_variance_ratio_)[len(thefit.components_) - 1]
262
- thetransform = thefit.transform(np.transpose(scaledvoxels))
263
- if debug:
264
- print(f"getregionsignal: {thetransform.shape=}")
265
- globalmean = np.mean(thetransform, axis=0)
565
+ # thetransform = thefit.transform(np.transpose(scaledvoxels))
566
+ thetransform = thefit.transform(scaledvoxels)
567
+ cleanedvoxels = thefit.inverse_transform(thetransform) * thevar[:, None]
568
+ globalmean = np.mean(cleanedvoxels, axis=0)
266
569
  globalmean -= np.mean(globalmean)
267
570
  if debug:
571
+ print("PCA method")
572
+ print(
573
+ f"getregionsignal: {cleanedvoxels.shape=}, {thetransform.shape=}, {scaledvoxels.shape=}, {globalmean.shape=}"
574
+ )
575
+ print(
576
+ f"getregionsignal: {(thefit.components_).shape=}, {thefit.n_samples_=}, {thefit.n_features_in_=}"
577
+ )
268
578
  print(f"getregionsignal: {varex=}")
269
579
  LGR.info(
270
580
  f"Using {len(thefit.components_)} component(s), accounting for "
@@ -272,6 +582,9 @@ def getregionsignal(
272
582
  )
273
583
  elif signalgenmethod == "random":
274
584
  globalmean = np.random.standard_normal(size=len(globalmean))
585
+ if debug:
586
+ print("Random method")
587
+ print(f"getregionsignal: {globalmean.shape=}")
275
588
  else:
276
589
  raise ValueError(f"illegal signal generation method: {signalgenmethod}")
277
590
  LGR.info(f"used {numvoxelsused} voxels to calculate {signame} signal")
@@ -283,22 +596,95 @@ def getregionsignal(
283
596
 
284
597
 
285
598
  def saveregionaltimeseries(
286
- tcdesc,
287
- tcname,
288
- fmridata,
289
- includemask,
290
- fmrifreq,
291
- outputname,
292
- filter=None,
293
- initfile=False,
294
- excludemask=None,
295
- filedesc="regional",
296
- suffix="",
297
- signalgenmethod="sum",
298
- pcacomponents=0.8,
299
- rt_floatset=np.float64,
300
- debug=False,
301
- ):
599
+ tcdesc: str,
600
+ tcname: str,
601
+ fmridata: NDArray,
602
+ includemask: NDArray,
603
+ fmrifreq: float,
604
+ outputname: str,
605
+ filter: Optional[Any] = None,
606
+ initfile: bool = False,
607
+ excludemask: Optional[NDArray] = None,
608
+ filedesc: str = "regional",
609
+ suffix: str = "",
610
+ signalgenmethod: str = "sum",
611
+ pcacomponents: Union[float, str] = 0.8,
612
+ rt_floattype: type = np.float64,
613
+ debug: bool = False,
614
+ ) -> Tuple[NDArray, NDArray]:
615
+ """
616
+ Save regional time series data from fMRI data to a BIDS-compatible TSV file.
617
+
618
+ This function extracts regional signal time courses from fMRI data using the
619
+ specified masking and filtering parameters, then writes the results to a
620
+ BIDS-style TSV file. The function supports various signal generation methods
621
+ and can handle both inclusive and exclusive masking.
622
+
623
+ Parameters
624
+ ----------
625
+ tcdesc : str
626
+ Description of the time course for the output file header
627
+ tcname : str
628
+ Name of the time course to be used in the output file column header
629
+ fmridata : NDArray
630
+ 4D fMRI data array (time x x x y z)
631
+ includemask : NDArray
632
+ Binary mask defining regions to include in the analysis
633
+ fmrifreq : float
634
+ Sampling frequency of the fMRI data (Hz)
635
+ outputname : str
636
+ Base name for the output file (without extension)
637
+ filter : Optional[Any], default=None
638
+ Filter to apply to the time series data
639
+ initfile : bool, default=False
640
+ If True, initializes a new file; if False, appends to existing file
641
+ excludemask : Optional[NDArray], default=None
642
+ Binary mask defining regions to exclude from the analysis
643
+ filedesc : str, default="regional"
644
+ Description string for the output file name
645
+ suffix : str, default=""
646
+ Suffix to append to the column name in the output file
647
+ signalgenmethod : str, default="sum"
648
+ Method for generating the signal ('sum', 'mean', 'pca', etc.)
649
+ pcacomponents : Union[float, str], default=0.8
650
+ Number of PCA components to use (or fraction of variance explained)
651
+ rt_floattype : np.dtype, default=np.float64
652
+ Data type for floating point operations
653
+ debug : bool, default=False
654
+ If True, enables debug mode for additional logging
655
+
656
+ Returns
657
+ -------
658
+ Tuple[NDArray, NDArray]
659
+ Tuple containing:
660
+ - thetimecourse : NDArray
661
+ The extracted time course data
662
+ - themask : NDArray
663
+ The mask used for extraction
664
+
665
+ Notes
666
+ -----
667
+ The function uses `getregionsignal` to compute the regional signal and
668
+ `tide_io.writebidstsv` to write the output file in BIDS TSV format.
669
+ The output file name follows the pattern:
670
+ {outputname}_desc-{filedesc}_timeseries.tsv
671
+
672
+ Examples
673
+ --------
674
+ >>> import numpy as np
675
+ >>> fmri_data = np.random.rand(100, 10, 10, 10)
676
+ >>> mask = np.ones((10, 10, 10))
677
+ >>> timecourse, mask_used = saveregionaltimeseries(
678
+ ... tcdesc="mean_signal",
679
+ ... tcname="signal",
680
+ ... fmridata=fmri_data,
681
+ ... includemask=mask,
682
+ ... fmrifreq=2.0,
683
+ ... outputname="sub-01_task-rest",
684
+ ... filter=None,
685
+ ... initfile=True
686
+ ... )
687
+ """
302
688
  thetimecourse, themask = getregionsignal(
303
689
  fmridata,
304
690
  filter=filter,
@@ -308,7 +694,7 @@ def saveregionaltimeseries(
308
694
  signalgenmethod=signalgenmethod,
309
695
  pcacomponents=pcacomponents,
310
696
  signame=tcdesc,
311
- rt_floatset=rt_floatset,
697
+ rt_floattype=rt_floattype,
312
698
  debug=debug,
313
699
  )
314
700
  tide_io.writebidstsv(