rapidtide 3.0.11__py3-none-any.whl → 3.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (139) hide show
  1. rapidtide/Colortables.py +492 -27
  2. rapidtide/OrthoImageItem.py +1049 -46
  3. rapidtide/RapidtideDataset.py +1533 -86
  4. rapidtide/_version.py +3 -3
  5. rapidtide/calccoherence.py +196 -29
  6. rapidtide/calcnullsimfunc.py +191 -40
  7. rapidtide/calcsimfunc.py +245 -42
  8. rapidtide/correlate.py +1210 -393
  9. rapidtide/data/examples/src/testLD +56 -0
  10. rapidtide/data/examples/src/testalign +1 -1
  11. rapidtide/data/examples/src/testdelayvar +0 -1
  12. rapidtide/data/examples/src/testfmri +19 -1
  13. rapidtide/data/examples/src/testglmfilt +5 -5
  14. rapidtide/data/examples/src/testhappy +25 -3
  15. rapidtide/data/examples/src/testppgproc +17 -0
  16. rapidtide/data/examples/src/testrolloff +11 -0
  17. rapidtide/data/models/model_cnn_pytorch/best_model.pth +0 -0
  18. rapidtide/data/models/model_cnn_pytorch/loss.png +0 -0
  19. rapidtide/data/models/model_cnn_pytorch/loss.txt +1 -0
  20. rapidtide/data/models/model_cnn_pytorch/model.pth +0 -0
  21. rapidtide/data/models/model_cnn_pytorch/model_meta.json +68 -0
  22. rapidtide/decorators.py +91 -0
  23. rapidtide/dlfilter.py +2225 -108
  24. rapidtide/dlfiltertorch.py +4843 -0
  25. rapidtide/externaltools.py +327 -12
  26. rapidtide/fMRIData_class.py +79 -40
  27. rapidtide/filter.py +1899 -810
  28. rapidtide/fit.py +2004 -574
  29. rapidtide/genericmultiproc.py +93 -18
  30. rapidtide/happy_supportfuncs.py +2044 -171
  31. rapidtide/helper_classes.py +584 -43
  32. rapidtide/io.py +2363 -370
  33. rapidtide/linfitfiltpass.py +341 -75
  34. rapidtide/makelaggedtcs.py +211 -20
  35. rapidtide/maskutil.py +423 -53
  36. rapidtide/miscmath.py +827 -121
  37. rapidtide/multiproc.py +210 -22
  38. rapidtide/patchmatch.py +234 -33
  39. rapidtide/peakeval.py +32 -30
  40. rapidtide/ppgproc.py +2203 -0
  41. rapidtide/qualitycheck.py +352 -39
  42. rapidtide/refinedelay.py +422 -57
  43. rapidtide/refineregressor.py +498 -184
  44. rapidtide/resample.py +671 -185
  45. rapidtide/scripts/applyppgproc.py +28 -0
  46. rapidtide/simFuncClasses.py +1052 -77
  47. rapidtide/simfuncfit.py +260 -46
  48. rapidtide/stats.py +540 -238
  49. rapidtide/tests/happycomp +9 -0
  50. rapidtide/tests/test_dlfiltertorch.py +627 -0
  51. rapidtide/tests/test_findmaxlag.py +24 -8
  52. rapidtide/tests/test_fullrunhappy_v1.py +0 -2
  53. rapidtide/tests/test_fullrunhappy_v2.py +0 -2
  54. rapidtide/tests/test_fullrunhappy_v3.py +1 -0
  55. rapidtide/tests/test_fullrunhappy_v4.py +2 -2
  56. rapidtide/tests/test_fullrunrapidtide_v7.py +1 -1
  57. rapidtide/tests/test_simroundtrip.py +8 -8
  58. rapidtide/tests/utils.py +9 -8
  59. rapidtide/tidepoolTemplate.py +142 -38
  60. rapidtide/tidepoolTemplate_alt.py +165 -44
  61. rapidtide/tidepoolTemplate_big.py +189 -52
  62. rapidtide/util.py +1217 -118
  63. rapidtide/voxelData.py +684 -37
  64. rapidtide/wiener.py +19 -12
  65. rapidtide/wiener2.py +113 -7
  66. rapidtide/wiener_doc.py +255 -0
  67. rapidtide/workflows/adjustoffset.py +105 -3
  68. rapidtide/workflows/aligntcs.py +85 -2
  69. rapidtide/workflows/applydlfilter.py +87 -10
  70. rapidtide/workflows/applyppgproc.py +522 -0
  71. rapidtide/workflows/atlasaverage.py +210 -47
  72. rapidtide/workflows/atlastool.py +100 -3
  73. rapidtide/workflows/calcSimFuncMap.py +294 -64
  74. rapidtide/workflows/calctexticc.py +201 -9
  75. rapidtide/workflows/ccorrica.py +97 -4
  76. rapidtide/workflows/cleanregressor.py +168 -29
  77. rapidtide/workflows/delayvar.py +163 -10
  78. rapidtide/workflows/diffrois.py +81 -3
  79. rapidtide/workflows/endtidalproc.py +144 -4
  80. rapidtide/workflows/fdica.py +195 -15
  81. rapidtide/workflows/filtnifti.py +70 -3
  82. rapidtide/workflows/filttc.py +74 -3
  83. rapidtide/workflows/fitSimFuncMap.py +206 -48
  84. rapidtide/workflows/fixtr.py +73 -3
  85. rapidtide/workflows/gmscalc.py +113 -3
  86. rapidtide/workflows/happy.py +801 -199
  87. rapidtide/workflows/happy2std.py +144 -12
  88. rapidtide/workflows/happy_parser.py +138 -9
  89. rapidtide/workflows/histnifti.py +118 -2
  90. rapidtide/workflows/histtc.py +84 -3
  91. rapidtide/workflows/linfitfilt.py +117 -4
  92. rapidtide/workflows/localflow.py +328 -28
  93. rapidtide/workflows/mergequality.py +79 -3
  94. rapidtide/workflows/niftidecomp.py +322 -18
  95. rapidtide/workflows/niftistats.py +174 -4
  96. rapidtide/workflows/pairproc.py +88 -2
  97. rapidtide/workflows/pairwisemergenifti.py +85 -2
  98. rapidtide/workflows/parser_funcs.py +1421 -40
  99. rapidtide/workflows/physiofreq.py +137 -11
  100. rapidtide/workflows/pixelcomp.py +208 -5
  101. rapidtide/workflows/plethquality.py +103 -21
  102. rapidtide/workflows/polyfitim.py +151 -11
  103. rapidtide/workflows/proj2flow.py +75 -2
  104. rapidtide/workflows/rankimage.py +111 -4
  105. rapidtide/workflows/rapidtide.py +272 -15
  106. rapidtide/workflows/rapidtide2std.py +98 -2
  107. rapidtide/workflows/rapidtide_parser.py +109 -9
  108. rapidtide/workflows/refineDelayMap.py +143 -33
  109. rapidtide/workflows/refineRegressor.py +682 -93
  110. rapidtide/workflows/regressfrommaps.py +152 -31
  111. rapidtide/workflows/resamplenifti.py +85 -3
  112. rapidtide/workflows/resampletc.py +91 -3
  113. rapidtide/workflows/retrolagtcs.py +98 -6
  114. rapidtide/workflows/retroregress.py +165 -9
  115. rapidtide/workflows/roisummarize.py +173 -5
  116. rapidtide/workflows/runqualitycheck.py +71 -3
  117. rapidtide/workflows/showarbcorr.py +147 -4
  118. rapidtide/workflows/showhist.py +86 -2
  119. rapidtide/workflows/showstxcorr.py +160 -3
  120. rapidtide/workflows/showtc.py +159 -3
  121. rapidtide/workflows/showxcorrx.py +184 -4
  122. rapidtide/workflows/showxy.py +185 -15
  123. rapidtide/workflows/simdata.py +262 -36
  124. rapidtide/workflows/spatialfit.py +77 -2
  125. rapidtide/workflows/spatialmi.py +251 -27
  126. rapidtide/workflows/spectrogram.py +305 -32
  127. rapidtide/workflows/synthASL.py +154 -3
  128. rapidtide/workflows/tcfrom2col.py +76 -2
  129. rapidtide/workflows/tcfrom3col.py +74 -2
  130. rapidtide/workflows/tidepool.py +2969 -130
  131. rapidtide/workflows/utils.py +19 -14
  132. rapidtide/workflows/utils_doc.py +293 -0
  133. rapidtide/workflows/variabilityizer.py +116 -3
  134. {rapidtide-3.0.11.dist-info → rapidtide-3.1.dist-info}/METADATA +3 -2
  135. {rapidtide-3.0.11.dist-info → rapidtide-3.1.dist-info}/RECORD +139 -122
  136. {rapidtide-3.0.11.dist-info → rapidtide-3.1.dist-info}/entry_points.txt +1 -0
  137. {rapidtide-3.0.11.dist-info → rapidtide-3.1.dist-info}/WHEEL +0 -0
  138. {rapidtide-3.0.11.dist-info → rapidtide-3.1.dist-info}/licenses/LICENSE +0 -0
  139. {rapidtide-3.0.11.dist-info → rapidtide-3.1.dist-info}/top_level.txt +0 -0
rapidtide/wiener.py CHANGED
@@ -17,13 +17,20 @@
17
17
  #
18
18
  #
19
19
  import numpy as np
20
+ from numpy.typing import NDArray
20
21
  from tqdm import tqdm
21
22
 
22
23
  import rapidtide.fit as tide_fit
23
24
  import rapidtide.multiproc as tide_multiproc
24
25
 
25
26
 
26
- def _procOneVoxelWiener(vox, lagtc, inittc, rt_floatset=np.float64, rt_floattype="float64"):
27
+ def _procOneVoxelWiener(
28
+ vox: int,
29
+ lagtc: NDArray,
30
+ inittc: NDArray,
31
+ rt_floatset: type = np.float64,
32
+ rt_floattype: str = "float64",
33
+ ) -> tuple[int, NDArray, NDArray, NDArray, NDArray, NDArray, NDArray, NDArray]:
27
34
  thefit, R2 = tide_fit.mlregress(lagtc, inittc)
28
35
  fitcoff = rt_floatset(thefit[0, 1])
29
36
  datatoremove = rt_floatset(fitcoff * lagtc)
@@ -40,17 +47,17 @@ def _procOneVoxelWiener(vox, lagtc, inittc, rt_floatset=np.float64, rt_floattype
40
47
 
41
48
 
42
49
  def wienerpass(
43
- numspatiallocs,
44
- fmri_data,
45
- threshval,
46
- lagtc,
47
- optiondict,
48
- wienerdeconv,
49
- wpeak,
50
- resampref_y,
51
- rt_floatset=np.float64,
52
- rt_floattype="float64",
53
- ):
50
+ numspatiallocs: int,
51
+ fmri_data: NDArray,
52
+ threshval: float,
53
+ lagtc: NDArray,
54
+ optiondict: dict,
55
+ wienerdeconv: NDArray,
56
+ wpeak: NDArray,
57
+ resampref_y: NDArray,
58
+ rt_floatset: type = np.float64,
59
+ rt_floattype: str = "float64",
60
+ ) -> int:
54
61
  rt_floatset = (rt_floatset,)
55
62
  rt_floattype = rt_floattype
56
63
  inputshape = np.shape(fmri_data)
rapidtide/wiener2.py CHANGED
@@ -30,6 +30,7 @@ import matplotlib.pyplot as plt
30
30
  import numpy as np
31
31
  from matplotlib.backends.backend_pdf import PdfPages
32
32
  from numpy.fft import fft, ifft, ifftshift
33
+ from numpy.typing import NDArray
33
34
 
34
35
  plt.rcParams.update({"font.size": 6})
35
36
 
@@ -43,8 +44,42 @@ lambd_est = 1e-3 # estimated noise lev
43
44
  ##########################
44
45
 
45
46
 
46
- def gen_son(length):
47
- "Generate a synthetic un-reverberated 'sound event' template"
47
+ def gen_son(length: int) -> NDArray:
48
+ """
49
+ Generate a synthetic un-reverberated 'sound event' template.
50
+
51
+ This function creates a synthetic sound template by generating white noise,
52
+ integrating it, applying an envelope, and normalizing the result.
53
+
54
+ Parameters
55
+ ----------
56
+ length : int
57
+ The length of the output array in samples.
58
+
59
+ Returns
60
+ -------
61
+ NDArray
62
+ A normalized synthetic sound event template of shape (length,) containing
63
+ floating point values.
64
+
65
+ Notes
66
+ -----
67
+ The generated sound template follows these steps:
68
+ 1. Generate white noise using random.randn
69
+ 2. Integrate the noise using cumulative sum
70
+ 3. Apply a triangular envelope with 12.5% attack time
71
+ 4. Normalize the result to unit energy
72
+
73
+ Examples
74
+ --------
75
+ >>> import numpy as np
76
+ >>> son = gen_son(1000)
77
+ >>> print(son.shape)
78
+ (1000,)
79
+ >>> print(f"Energy: {np.sum(son * son):.2f}")
80
+ Energy: 1.00
81
+ """
82
+ # "Generate a synthetic un-reverberated 'sound event' template"
48
83
  # (whitenoise -> integrate -> envelope -> normalise)
49
84
  son = np.cumsum(np.random.randn(length))
50
85
  # apply envelope
@@ -55,8 +90,43 @@ def gen_son(length):
55
90
  return son
56
91
 
57
92
 
58
- def gen_ir(length):
59
- "Generate a synthetic impulse response"
93
+ def gen_ir(length: int) -> NDArray:
94
+ """
95
+ Generate a synthetic impulse response.
96
+
97
+ This function creates a synthetic impulse response with a quiet tail, attack envelope,
98
+ direct signal component, and early reflection spikes. The resulting impulse response
99
+ is normalized to unit energy.
100
+
101
+ Parameters
102
+ ----------
103
+ length : int
104
+ The length of the impulse response array to generate.
105
+
106
+ Returns
107
+ -------
108
+ NDArray
109
+ A normalized numpy array of shape (length,) representing the synthetic impulse response.
110
+
111
+ Notes
112
+ -----
113
+ The generated impulse response includes:
114
+ - A quiet tail with random noise
115
+ - An attack envelope that rises from 0.1 to 1 and then falls back to 0.1
116
+ - A direct signal component at index 5 with amplitude 1
117
+ - 10 early reflection spikes with random positions and amplitudes
118
+ - Normalization to unit energy (L2 norm equals 1)
119
+
120
+ Examples
121
+ --------
122
+ >>> import numpy as np
123
+ >>> ir = gen_ir(100)
124
+ >>> print(ir.shape)
125
+ (100,)
126
+ >>> print(np.isclose(np.sum(ir * ir), 1.0))
127
+ True
128
+ """
129
+ # "Generate a synthetic impulse response"
60
130
  # First we generate a quietish tail
61
131
  son = np.random.randn(length)
62
132
  attacklen = int(length // 2)
@@ -73,8 +143,44 @@ def gen_ir(length):
73
143
  return son
74
144
 
75
145
 
76
- def wiener_deconvolution(signal, kernel, lambd):
77
- "lambd is the SNR"
146
+ def wiener_deconvolution(signal: NDArray, kernel: NDArray, lambd: float) -> NDArray:
147
+ """
148
+ Perform Wiener deconvolution on a signal.
149
+
150
+ Wiener deconvolution is a method for reversing the effects of convolution
151
+ in the presence of noise. It uses a regularization parameter to balance
152
+ between deconvolution accuracy and noise amplification.
153
+
154
+ Parameters
155
+ ----------
156
+ signal : numpy.ndarray
157
+ Input signal to be deconvolved, assumed to be 1D.
158
+ kernel : numpy.ndarray
159
+ Convolution kernel (point spread function), assumed to be 1D.
160
+ lambd : float
161
+ Regularization parameter (signal-to-noise ratio). Higher values
162
+ result in more smoothing and less noise amplification.
163
+
164
+ Returns
165
+ -------
166
+ numpy.ndarray
167
+ Deconvolved signal with same length as input signal.
168
+
169
+ Notes
170
+ -----
171
+ The function zero-pads the kernel to match the signal length before
172
+ performing frequency domain operations. The Wiener filter is applied
173
+ in the frequency domain using the formula:
174
+ output = real(ifft(fft(signal) * conj(H) / (|H|² + λ²)))
175
+
176
+ Examples
177
+ --------
178
+ >>> import numpy as np
179
+ >>> signal = np.array([1, 2, 3, 2, 1])
180
+ >>> kernel = np.array([1, 0.5, 0.25])
181
+ >>> result = wiener_deconvolution(signal, kernel, lambd=0.1)
182
+ """
183
+ # "lambd is the SNR"
78
184
  kernel = np.hstack(
79
185
  (kernel, np.zeros(len(signal) - len(kernel)))
80
186
  ) # zero pad the kernel to same length
@@ -84,7 +190,7 @@ def wiener_deconvolution(signal, kernel, lambd):
84
190
 
85
191
 
86
192
  if __name__ == "__main__":
87
- "simple test: get one soundtype and one impulse response, convolve them, deconvolve them, and check the result (plot it!)"
193
+ # "simple test: get one soundtype and one impulse response, convolve them, deconvolve them, and check the result (plot it!)"
88
194
  son = gen_son(sonlen)
89
195
  ir = gen_ir(irlen)
90
196
  obs = np.convolve(son, ir, mode="full")
@@ -0,0 +1,255 @@
1
+ #!/usr/bin/env python
2
+ # -*- coding: utf-8 -*-
3
+ #
4
+ # Copyright 2016-2025 Blaise Frederick
5
+ #
6
+ # Licensed under the Apache License, Version 2.0 (the "License");
7
+ # you may not use this file except in compliance with the License.
8
+ # You may obtain a copy of the License at
9
+ #
10
+ # http://www.apache.org/licenses/LICENSE-2.0
11
+ #
12
+ # Unless required by applicable law or agreed to in writing, software
13
+ # distributed under the License is distributed on an "AS IS" BASIS,
14
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15
+ # See the License for the specific language governing permissions and
16
+ # limitations under the License.
17
+ #
18
+ #
19
+ import numpy as np
20
+ from numpy.typing import NDArray
21
+ from tqdm import tqdm
22
+
23
+ import rapidtide.fit as tide_fit
24
+ import rapidtide.multiproc as tide_multiproc
25
+
26
+
27
+ def _procOneVoxelWiener(
28
+ vox: int,
29
+ lagtc: NDArray,
30
+ inittc: NDArray,
31
+ rt_floatset: type = np.float64,
32
+ rt_floattype: str = "float64",
33
+ ) -> tuple[int, NDArray, NDArray, NDArray, NDArray, NDArray, NDArray, NDArray]:
34
+ """
35
+ Perform Wiener filter processing on a single voxel time series.
36
+
37
+ This function applies a Wiener filter to remove the lagged component from
38
+ the initial time course, returning both the filtered and unfiltered results
39
+ along with fitting statistics.
40
+
41
+ Parameters
42
+ ----------
43
+ vox : int
44
+ Voxel index identifier
45
+ lagtc : NDArray
46
+ Lagged time course data (input signal)
47
+ inittc : NDArray
48
+ Initial time course data (target signal)
49
+ rt_floatset : type, optional
50
+ Real-time float type for output arrays, default is np.float64
51
+ rt_floattype : str, optional
52
+ String representation of the real-time float type, default is "float64"
53
+
54
+ Returns
55
+ -------
56
+ tuple[int, NDArray, NDArray, NDArray, NDArray, NDArray, NDArray, NDArray]
57
+ A tuple containing:
58
+ - vox (int): Input voxel index
59
+ - intercept (NDArray): Regression intercept term
60
+ - sqrt_R2 (NDArray): Square root of coefficient of determination
61
+ - R2 (NDArray): Coefficient of determination
62
+ - fitcoff (NDArray): Fitting coefficient
63
+ - ratio (NDArray): Ratio of slope to intercept
64
+ - datatoremove (NDArray): Data to be removed (filtered signal)
65
+ - residual (NDArray): Residual signal (unfiltered data)
66
+
67
+ Notes
68
+ -----
69
+ This function uses maximum likelihood regression to estimate the relationship
70
+ between lagged and initial time courses, then applies the Wiener filter
71
+ to remove the lagged component from the initial signal.
72
+
73
+ Examples
74
+ --------
75
+ >>> import numpy as np
76
+ >>> lagtc = np.array([1.0, 2.0, 3.0, 4.0])
77
+ >>> inittc = np.array([2.0, 4.0, 6.0, 8.0])
78
+ >>> result = _procOneVoxelWiener(0, lagtc, inittc)
79
+ >>> print(result[0]) # voxel index
80
+ 0
81
+ >>> print(result[4]) # fitting coefficient
82
+ 2.0
83
+ """
84
+ thefit, R2 = tide_fit.mlregress(lagtc, inittc)
85
+ fitcoff = rt_floatset(thefit[0, 1])
86
+ datatoremove = rt_floatset(fitcoff * lagtc)
87
+ return (
88
+ vox,
89
+ rt_floatset(thefit[0, 0]),
90
+ rt_floatset(np.sqrt(R2)),
91
+ rt_floatset(R2),
92
+ fitcoff,
93
+ rt_floatset(thefit[0, 1] / thefit[0, 0]),
94
+ datatoremove,
95
+ rt_floatset(inittc - datatoremove),
96
+ )
97
+
98
+
99
+ def wienerpass(
100
+ numspatiallocs: int,
101
+ fmri_data: NDArray,
102
+ threshval: float,
103
+ lagtc: NDArray,
104
+ optiondict: dict,
105
+ wienerdeconv: NDArray,
106
+ wpeak: NDArray,
107
+ resampref_y: NDArray,
108
+ rt_floatset: type = np.float64,
109
+ rt_floattype: str = "float64",
110
+ ) -> int:
111
+ """
112
+ Perform Wiener deconvolution on fMRI data voxels.
113
+
114
+ This function applies Wiener deconvolution to each voxel in the fMRI data
115
+ based on the provided lagged time course and threshold. It supports both
116
+ single-threaded and multi-threaded processing depending on the configuration
117
+ in `optiondict`.
118
+
119
+ Parameters
120
+ ----------
121
+ numspatiallocs : int
122
+ Number of spatial locations (voxels) in the fMRI data.
123
+ fmri_data : numpy.ndarray
124
+ 2D array of fMRI data with shape (numspatiallocs, timepoints).
125
+ threshval : float
126
+ Threshold value for masking voxels based on mean signal intensity.
127
+ lagtc : numpy.ndarray
128
+ 2D array of lagged time courses with shape (numspatiallocs, timepoints).
129
+ optiondict : dict
130
+ Dictionary containing processing options including:
131
+ - 'nprocs': number of processors to use (default: 1)
132
+ - 'showprogressbar': whether to show progress bar (default: True)
133
+ - 'mp_chunksize': chunk size for multiprocessing (default: 10)
134
+ wienerdeconv : numpy.ndarray
135
+ Wiener deconvolution kernel or filter.
136
+ wpeak : numpy.ndarray
137
+ Peak values associated with the Wiener deconvolution.
138
+ resampref_y : numpy.ndarray
139
+ Resampled reference signal for filtering.
140
+ rt_floatset : type, optional
141
+ Data type for floating-point numbers, default is `np.float64`.
142
+ rt_floattype : str, optional
143
+ String representation of the floating-point data type, default is "float64".
144
+
145
+ Returns
146
+ -------
147
+ int
148
+ Total number of voxels processed.
149
+
150
+ Notes
151
+ -----
152
+ - Voxels are masked based on their mean signal intensity exceeding `threshval`.
153
+ - If `nprocs` > 1, multiprocessing is used to process voxels in parallel.
154
+ - The function modifies global variables such as `meanvalue`, `rvalue`, etc.,
155
+ which are assumed to be defined in the outer scope.
156
+
157
+ Examples
158
+ --------
159
+ >>> import numpy as np
160
+ >>> fmri_data = np.random.rand(100, 50)
161
+ >>> lagtc = np.random.rand(100, 50)
162
+ >>> optiondict = {'nprocs': 4, 'showprogressbar': True, 'mp_chunksize': 5}
163
+ >>> result = wienerpass(
164
+ ... numspatiallocs=100,
165
+ ... fmri_data=fmri_data,
166
+ ... threshval=0.1,
167
+ ... lagtc=lagtc,
168
+ ... optiondict=optiondict,
169
+ ... wienerdeconv=np.array([1, 2, 1]),
170
+ ... wpeak=np.array([0.5]),
171
+ ... resampref_y=np.array([1, 1, 1])
172
+ ... )
173
+ >>> print(result)
174
+ 100
175
+ """
176
+ rt_floatset = (rt_floatset,)
177
+ rt_floattype = rt_floattype
178
+ inputshape = np.shape(fmri_data)
179
+ themask = np.where(np.mean(fmri_data, axis=1) > threshval, 1, 0)
180
+ if optiondict["nprocs"] > 1:
181
+ # define the consumer function here so it inherits most of the arguments
182
+ def Wiener_consumer(inQ, outQ):
183
+ while True:
184
+ try:
185
+ # get a new message
186
+ val = inQ.get()
187
+
188
+ # this is the 'TERM' signal
189
+ if val is None:
190
+ break
191
+
192
+ # process and send the data
193
+ outQ.put(
194
+ _procOneVoxelWiener(
195
+ val,
196
+ lagtc[val, :],
197
+ fmri_data[val, :],
198
+ rt_floatset=rt_floatset,
199
+ rt_floattype=rt_floattype,
200
+ )
201
+ )
202
+
203
+ except Exception as e:
204
+ print("error!", e)
205
+ break
206
+
207
+ data_out = tide_multiproc.run_multiproc(
208
+ Wiener_consumer,
209
+ inputshape,
210
+ themask,
211
+ nprocs=optiondict["nprocs"],
212
+ showprogressbar=True,
213
+ chunksize=optiondict["mp_chunksize"],
214
+ )
215
+ # unpack the data
216
+ volumetotal = 0
217
+ for voxel in data_out:
218
+ meanvalue[voxel[0]] = voxel[1]
219
+ rvalue[voxel[0]] = voxel[2]
220
+ r2value[voxel[0]] = voxel[3]
221
+ fitcoff[voxel[0]] = voxel[4]
222
+ fitNorm[voxel[0]] = voxel[5]
223
+ datatoremove[voxel[0], :] = voxel[6]
224
+ filtereddata[voxel[0], :] = voxel[7]
225
+ volumetotal += 1
226
+ data_out = []
227
+ else:
228
+ volumetotal = 0
229
+ for vox in tqdm(
230
+ range(0, numspatiallocs),
231
+ desc="Voxel",
232
+ unit="voxels",
233
+ disable=(not optiondict["showprogressbar"]),
234
+ ):
235
+ inittc = fmri_data[vox, :].copy()
236
+ if np.mean(inittc) >= threshval:
237
+ (
238
+ dummy,
239
+ meanvalue[vox],
240
+ rvalue[vox],
241
+ r2value[vox],
242
+ fitcoff[vox],
243
+ fitNorm[vox],
244
+ datatoremove[vox],
245
+ filtereddata[vox],
246
+ ) = _procOneVoxelWiener(
247
+ vox,
248
+ lagtc[vox, :],
249
+ inittc,
250
+ rt_floatset=rt_floatset,
251
+ t_floattype=rt_floattype,
252
+ )
253
+ volumetotal += 1
254
+
255
+ return volumetotal
@@ -18,6 +18,7 @@
18
18
  #
19
19
  import argparse
20
20
  import copy
21
+ from typing import Any
21
22
 
22
23
  import matplotlib.pyplot as plt
23
24
  import numpy as np
@@ -33,9 +34,35 @@ DEFAULT_PEAKTHRESH = 0.33
33
34
  DEFAULT_HISTBINS = 151
34
35
 
35
36
 
36
- def _get_parser():
37
+ def _get_parser() -> Any:
37
38
  """
38
- Argument parser for adjust offset
39
+ Argument parser for adjust offset.
40
+
41
+ This function constructs and returns an `argparse.ArgumentParser` object configured
42
+ for parsing command-line arguments used by the `adjustoffset` tool. It defines
43
+ various options for adjusting the offset of a rapidtide delay map, including
44
+ masking, histogram-based peak detection, and output control.
45
+
46
+ Returns
47
+ -------
48
+ argparse.ArgumentParser
49
+ Configured argument parser for the adjustoffset tool.
50
+
51
+ Notes
52
+ -----
53
+ The parser includes support for:
54
+ - Input and output file specifications
55
+ - Masking options (include, exclude, extra)
56
+ - Histogram-based offset estimation
57
+ - Search range limiting
58
+ - Debugging and display options
59
+
60
+ Examples
61
+ --------
62
+ >>> parser = _get_parser()
63
+ >>> args = parser.parse_args(['input.nii', 'output_root'])
64
+ >>> print(args.inputmap)
65
+ 'input.nii'
39
66
  """
40
67
  parser = argparse.ArgumentParser(
41
68
  prog="adjustoffset",
@@ -159,7 +186,82 @@ def _get_parser():
159
186
  return parser
160
187
 
161
188
 
162
- def adjustoffset(args):
189
+ def adjustoffset(args: Any) -> None:
190
+ """
191
+ Adjust the offset of a NIfTI map based on histogram analysis and optional masking.
192
+
193
+ This function reads a NIfTI map file, applies optional inclusion and exclusion masks,
194
+ and computes a peak location from the histogram of valid voxels. The computed offset
195
+ is then added to the map values, unless a fixed offset is specified via `args.setoffset`.
196
+
197
+ Parameters
198
+ ----------
199
+ args : Any
200
+ An object containing the following attributes:
201
+ - `inputmap` : str
202
+ Path to the input NIfTI map file.
203
+ - `debug` : bool
204
+ If True, prints debug information.
205
+ - `includespec` : str, optional
206
+ Specification for including voxels in the analysis.
207
+ - `excludespec` : str, optional
208
+ Specification for excluding voxels from the analysis.
209
+ - `extramaskname` : str, optional
210
+ Path to an additional mask file.
211
+ - `histbins` : int
212
+ Number of histogram bins to use.
213
+ - `searchrange` : tuple of float, optional
214
+ Range of values to consider for histogram analysis.
215
+ - `refine` : bool
216
+ Whether to refine the peak detection.
217
+ - `pickleft` : bool
218
+ Whether to pick the leftmost peak.
219
+ - `pickleftthresh` : float, optional
220
+ Threshold for leftmost peak picking.
221
+ - `display` : bool
222
+ Whether to display the histogram.
223
+ - `histonly` : bool
224
+ If True, only compute and display the histogram, do not adjust the map.
225
+ - `setoffset` : float, optional
226
+ Fixed offset value to apply to the map.
227
+ - `outputroot` : str
228
+ Root name for output files.
229
+
230
+ Returns
231
+ -------
232
+ None
233
+ This function does not return a value but saves two NIfTI files:
234
+ - `<outputroot>_maskmap.nii.gz`: The generated mask map.
235
+ - `<outputroot>_adjustedmaxtime.nii.gz`: The adjusted map with offset applied.
236
+
237
+ Notes
238
+ -----
239
+ - The function uses `tide_io.readfromnifti` to read the input map and `tide_io.savetonifti` to save outputs.
240
+ - Masks are generated using `tide_mask.getmaskset` based on inclusion/exclusion specifications.
241
+ - Histogram analysis is performed using `tide_stats.gethistprops` and `tide_stats.makehistogram`.
242
+ - If `args.setoffset` is provided, it overrides the computed peak location as the offset.
243
+
244
+ Examples
245
+ --------
246
+ >>> class Args:
247
+ ... inputmap = "input_map.nii.gz"
248
+ ... debug = True
249
+ ... includespec = "brain"
250
+ ... excludespec = None
251
+ ... extramaskname = None
252
+ ... histbins = 100
253
+ ... searchrange = (0.0, 10.0)
254
+ ... refine = True
255
+ ... pickleft = False
256
+ ... pickleftthresh = 0.5
257
+ ... display = False
258
+ ... histonly = False
259
+ ... setoffset = None
260
+ ... outputroot = "output"
261
+ ...
262
+ >>> args = Args()
263
+ >>> adjustoffset(args)
264
+ """
163
265
  if args.debug:
164
266
  print(f"reading map file {args.inputmap}")
165
267
  (
@@ -17,6 +17,7 @@
17
17
  #
18
18
  #
19
19
  import argparse
20
+ from typing import Any
20
21
 
21
22
  import numpy as np
22
23
 
@@ -28,7 +29,32 @@ import rapidtide.resample as tide_resample
28
29
  import rapidtide.workflows.parser_funcs as pf
29
30
 
30
31
 
31
- def _get_parser():
32
+ def _get_parser() -> Any:
33
+ """
34
+ Construct and return an argument parser for aligning two time series.
35
+
36
+ This function sets up an `argparse.ArgumentParser` with required and optional
37
+ arguments for resampling and aligning two time series datasets. It supports
38
+ specifying input files, sample rates, output file, and various processing options
39
+ such as plotting and verbosity.
40
+
41
+ Returns
42
+ -------
43
+ argparse.ArgumentParser
44
+ Configured argument parser object with all necessary arguments for
45
+ time series alignment.
46
+
47
+ Notes
48
+ -----
49
+ The function uses a custom helper `pf.is_float` to validate sample rate inputs.
50
+ It also adds search range and filter options via `pf.addsearchrangeopts` and
51
+ `pf.addfilteropts`.
52
+
53
+ Examples
54
+ --------
55
+ >>> parser = _get_parser()
56
+ >>> args = parser.parse_args()
57
+ """
32
58
  # get the command line parameters
33
59
  parser = argparse.ArgumentParser(
34
60
  prog="aligntcs",
@@ -83,7 +109,64 @@ def _get_parser():
83
109
  return parser
84
110
 
85
111
 
86
- def aligntcs(args):
112
+ def aligntcs(args: Any) -> None:
113
+ """
114
+ Align two time series using cross-correlation and resampling.
115
+
116
+ This function reads two input time series from text files, aligns them based on
117
+ cross-correlation, and writes the aligned second time series to an output file.
118
+ Optional plotting of cross-correlation and aligned signals can be enabled via
119
+ the `displayplots` argument in `args`.
120
+
121
+ Parameters
122
+ ----------
123
+ args : Any
124
+ An object containing the following attributes:
125
+ - infile1 : str
126
+ Path to the first input text file.
127
+ - infile2 : str
128
+ Path to the second input text file.
129
+ - insamplerate1 : float
130
+ Sampling rate of the first input signal.
131
+ - insamplerate2 : float
132
+ Sampling rate of the second input signal.
133
+ - outputfile : str
134
+ Path to the output file where the aligned second signal will be written.
135
+ - lagmin : float
136
+ Minimum lag for cross-correlation search.
137
+ - lagmax : float
138
+ Maximum lag for cross-correlation search.
139
+ - displayplots : bool
140
+ If True, displays cross-correlation and aligned signals using matplotlib.
141
+
142
+ Returns
143
+ -------
144
+ None
145
+ This function does not return a value but writes the aligned data to a file
146
+ and optionally displays plots.
147
+
148
+ Notes
149
+ -----
150
+ - The function applies a prefilter to the input data before alignment.
151
+ - The second time series is resampled to match the timing of the first.
152
+ - Cross-correlation is performed using a fast correlation method.
153
+ - If `displayplots` is True, the function will use the 'TkAgg' backend for matplotlib.
154
+
155
+ Examples
156
+ --------
157
+ >>> import argparse
158
+ >>> args = argparse.Namespace(
159
+ ... infile1='signal1.txt',
160
+ ... infile2='signal2.txt',
161
+ ... insamplerate1=100.0,
162
+ ... insamplerate2=100.0,
163
+ ... outputfile='aligned_signal2.txt',
164
+ ... lagmin=-0.1,
165
+ ... lagmax=0.1,
166
+ ... displayplots=False
167
+ ... )
168
+ >>> aligntcs(args)
169
+ """
87
170
  if args.displayplots:
88
171
  import matplotlib as mpl
89
172