rapidtide 3.0.11__py3-none-any.whl → 3.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (139) hide show
  1. rapidtide/Colortables.py +492 -27
  2. rapidtide/OrthoImageItem.py +1049 -46
  3. rapidtide/RapidtideDataset.py +1533 -86
  4. rapidtide/_version.py +3 -3
  5. rapidtide/calccoherence.py +196 -29
  6. rapidtide/calcnullsimfunc.py +191 -40
  7. rapidtide/calcsimfunc.py +245 -42
  8. rapidtide/correlate.py +1210 -393
  9. rapidtide/data/examples/src/testLD +56 -0
  10. rapidtide/data/examples/src/testalign +1 -1
  11. rapidtide/data/examples/src/testdelayvar +0 -1
  12. rapidtide/data/examples/src/testfmri +19 -1
  13. rapidtide/data/examples/src/testglmfilt +5 -5
  14. rapidtide/data/examples/src/testhappy +25 -3
  15. rapidtide/data/examples/src/testppgproc +17 -0
  16. rapidtide/data/examples/src/testrolloff +11 -0
  17. rapidtide/data/models/model_cnn_pytorch/best_model.pth +0 -0
  18. rapidtide/data/models/model_cnn_pytorch/loss.png +0 -0
  19. rapidtide/data/models/model_cnn_pytorch/loss.txt +1 -0
  20. rapidtide/data/models/model_cnn_pytorch/model.pth +0 -0
  21. rapidtide/data/models/model_cnn_pytorch/model_meta.json +68 -0
  22. rapidtide/decorators.py +91 -0
  23. rapidtide/dlfilter.py +2225 -108
  24. rapidtide/dlfiltertorch.py +4843 -0
  25. rapidtide/externaltools.py +327 -12
  26. rapidtide/fMRIData_class.py +79 -40
  27. rapidtide/filter.py +1899 -810
  28. rapidtide/fit.py +2004 -574
  29. rapidtide/genericmultiproc.py +93 -18
  30. rapidtide/happy_supportfuncs.py +2044 -171
  31. rapidtide/helper_classes.py +584 -43
  32. rapidtide/io.py +2363 -370
  33. rapidtide/linfitfiltpass.py +341 -75
  34. rapidtide/makelaggedtcs.py +211 -20
  35. rapidtide/maskutil.py +423 -53
  36. rapidtide/miscmath.py +827 -121
  37. rapidtide/multiproc.py +210 -22
  38. rapidtide/patchmatch.py +234 -33
  39. rapidtide/peakeval.py +32 -30
  40. rapidtide/ppgproc.py +2203 -0
  41. rapidtide/qualitycheck.py +352 -39
  42. rapidtide/refinedelay.py +422 -57
  43. rapidtide/refineregressor.py +498 -184
  44. rapidtide/resample.py +671 -185
  45. rapidtide/scripts/applyppgproc.py +28 -0
  46. rapidtide/simFuncClasses.py +1052 -77
  47. rapidtide/simfuncfit.py +260 -46
  48. rapidtide/stats.py +540 -238
  49. rapidtide/tests/happycomp +9 -0
  50. rapidtide/tests/test_dlfiltertorch.py +627 -0
  51. rapidtide/tests/test_findmaxlag.py +24 -8
  52. rapidtide/tests/test_fullrunhappy_v1.py +0 -2
  53. rapidtide/tests/test_fullrunhappy_v2.py +0 -2
  54. rapidtide/tests/test_fullrunhappy_v3.py +1 -0
  55. rapidtide/tests/test_fullrunhappy_v4.py +2 -2
  56. rapidtide/tests/test_fullrunrapidtide_v7.py +1 -1
  57. rapidtide/tests/test_simroundtrip.py +8 -8
  58. rapidtide/tests/utils.py +9 -8
  59. rapidtide/tidepoolTemplate.py +142 -38
  60. rapidtide/tidepoolTemplate_alt.py +165 -44
  61. rapidtide/tidepoolTemplate_big.py +189 -52
  62. rapidtide/util.py +1217 -118
  63. rapidtide/voxelData.py +684 -37
  64. rapidtide/wiener.py +19 -12
  65. rapidtide/wiener2.py +113 -7
  66. rapidtide/wiener_doc.py +255 -0
  67. rapidtide/workflows/adjustoffset.py +105 -3
  68. rapidtide/workflows/aligntcs.py +85 -2
  69. rapidtide/workflows/applydlfilter.py +87 -10
  70. rapidtide/workflows/applyppgproc.py +522 -0
  71. rapidtide/workflows/atlasaverage.py +210 -47
  72. rapidtide/workflows/atlastool.py +100 -3
  73. rapidtide/workflows/calcSimFuncMap.py +294 -64
  74. rapidtide/workflows/calctexticc.py +201 -9
  75. rapidtide/workflows/ccorrica.py +97 -4
  76. rapidtide/workflows/cleanregressor.py +168 -29
  77. rapidtide/workflows/delayvar.py +163 -10
  78. rapidtide/workflows/diffrois.py +81 -3
  79. rapidtide/workflows/endtidalproc.py +144 -4
  80. rapidtide/workflows/fdica.py +195 -15
  81. rapidtide/workflows/filtnifti.py +70 -3
  82. rapidtide/workflows/filttc.py +74 -3
  83. rapidtide/workflows/fitSimFuncMap.py +206 -48
  84. rapidtide/workflows/fixtr.py +73 -3
  85. rapidtide/workflows/gmscalc.py +113 -3
  86. rapidtide/workflows/happy.py +801 -199
  87. rapidtide/workflows/happy2std.py +144 -12
  88. rapidtide/workflows/happy_parser.py +138 -9
  89. rapidtide/workflows/histnifti.py +118 -2
  90. rapidtide/workflows/histtc.py +84 -3
  91. rapidtide/workflows/linfitfilt.py +117 -4
  92. rapidtide/workflows/localflow.py +328 -28
  93. rapidtide/workflows/mergequality.py +79 -3
  94. rapidtide/workflows/niftidecomp.py +322 -18
  95. rapidtide/workflows/niftistats.py +174 -4
  96. rapidtide/workflows/pairproc.py +88 -2
  97. rapidtide/workflows/pairwisemergenifti.py +85 -2
  98. rapidtide/workflows/parser_funcs.py +1421 -40
  99. rapidtide/workflows/physiofreq.py +137 -11
  100. rapidtide/workflows/pixelcomp.py +208 -5
  101. rapidtide/workflows/plethquality.py +103 -21
  102. rapidtide/workflows/polyfitim.py +151 -11
  103. rapidtide/workflows/proj2flow.py +75 -2
  104. rapidtide/workflows/rankimage.py +111 -4
  105. rapidtide/workflows/rapidtide.py +272 -15
  106. rapidtide/workflows/rapidtide2std.py +98 -2
  107. rapidtide/workflows/rapidtide_parser.py +109 -9
  108. rapidtide/workflows/refineDelayMap.py +143 -33
  109. rapidtide/workflows/refineRegressor.py +682 -93
  110. rapidtide/workflows/regressfrommaps.py +152 -31
  111. rapidtide/workflows/resamplenifti.py +85 -3
  112. rapidtide/workflows/resampletc.py +91 -3
  113. rapidtide/workflows/retrolagtcs.py +98 -6
  114. rapidtide/workflows/retroregress.py +165 -9
  115. rapidtide/workflows/roisummarize.py +173 -5
  116. rapidtide/workflows/runqualitycheck.py +71 -3
  117. rapidtide/workflows/showarbcorr.py +147 -4
  118. rapidtide/workflows/showhist.py +86 -2
  119. rapidtide/workflows/showstxcorr.py +160 -3
  120. rapidtide/workflows/showtc.py +159 -3
  121. rapidtide/workflows/showxcorrx.py +184 -4
  122. rapidtide/workflows/showxy.py +185 -15
  123. rapidtide/workflows/simdata.py +262 -36
  124. rapidtide/workflows/spatialfit.py +77 -2
  125. rapidtide/workflows/spatialmi.py +251 -27
  126. rapidtide/workflows/spectrogram.py +305 -32
  127. rapidtide/workflows/synthASL.py +154 -3
  128. rapidtide/workflows/tcfrom2col.py +76 -2
  129. rapidtide/workflows/tcfrom3col.py +74 -2
  130. rapidtide/workflows/tidepool.py +2969 -130
  131. rapidtide/workflows/utils.py +19 -14
  132. rapidtide/workflows/utils_doc.py +293 -0
  133. rapidtide/workflows/variabilityizer.py +116 -3
  134. {rapidtide-3.0.11.dist-info → rapidtide-3.1.dist-info}/METADATA +3 -2
  135. {rapidtide-3.0.11.dist-info → rapidtide-3.1.dist-info}/RECORD +139 -122
  136. {rapidtide-3.0.11.dist-info → rapidtide-3.1.dist-info}/entry_points.txt +1 -0
  137. {rapidtide-3.0.11.dist-info → rapidtide-3.1.dist-info}/WHEEL +0 -0
  138. {rapidtide-3.0.11.dist-info → rapidtide-3.1.dist-info}/licenses/LICENSE +0 -0
  139. {rapidtide-3.0.11.dist-info → rapidtide-3.1.dist-info}/top_level.txt +0 -0
rapidtide/_version.py CHANGED
@@ -8,11 +8,11 @@ import json
8
8
 
9
9
  version_json = '''
10
10
  {
11
- "date": "2025-08-27T10:21:33-0400",
11
+ "date": "2025-11-08T09:22:00-0500",
12
12
  "dirty": false,
13
13
  "error": null,
14
- "full-revisionid": "078ea587f66134f5bb1f53817240da52156cf7f1",
15
- "version": "3.0.11"
14
+ "full-revisionid": "d57dfd2f1902964ec2bdc7a760f361eea22f22aa",
15
+ "version": "3.1"
16
16
  }
17
17
  ''' # END VERSION_JSON
18
18
 
@@ -19,8 +19,10 @@
19
19
  import gc
20
20
  import logging
21
21
  import warnings
22
+ from typing import Any
22
23
 
23
24
  import numpy as np
25
+ from numpy.typing import NDArray
24
26
 
25
27
  import rapidtide.genericmultiproc as tide_genericmultiproc
26
28
 
@@ -29,10 +31,57 @@ LGR = logging.getLogger("GENERAL")
29
31
 
30
32
 
31
33
  def _procOneVoxelCoherence(
32
- vox,
33
- voxelargs,
34
- **kwargs,
35
- ):
34
+ vox: int,
35
+ voxelargs: list,
36
+ **kwargs: Any,
37
+ ) -> tuple[int, NDArray, NDArray, float, float]:
38
+ """
39
+ Process coherence for a single voxel.
40
+
41
+ This function computes coherence values for a given voxel using the provided
42
+ coherence calculator and fMRI time course data. It returns the voxel index
43
+ along with coherence values and the location of the maximum coherence.
44
+
45
+ Parameters
46
+ ----------
47
+ vox : int
48
+ The voxel index being processed.
49
+ voxelargs : list
50
+ A list containing two elements: the coherence calculator object and
51
+ the fMRI time course data (fmritc).
52
+ **kwargs : Any
53
+ Additional keyword arguments that can override default options:
54
+ - alt : bool, optional (default: False)
55
+ Flag to indicate alternative computation mode.
56
+ - debug : bool, optional (default: False)
57
+ Flag to enable debug printing.
58
+
59
+ Returns
60
+ -------
61
+ tuple[int, NDArray, NDArray, float, float]
62
+ A tuple containing:
63
+ - vox : int
64
+ The input voxel index
65
+ - thecoherence_x : NDArray
66
+ X-axis coherence values
67
+ - thecoherence_y : NDArray
68
+ Y-axis coherence values
69
+ - thecoherence_y[maxindex] : float
70
+ Maximum coherence value
71
+ - thecoherence_x[maxindex] : float
72
+ X-coordinate corresponding to maximum coherence
73
+
74
+ Notes
75
+ -----
76
+ The function uses the `theCoherer.run()` method to compute coherence values.
77
+ When `alt=True`, the function returns additional dummy values from the
78
+ coherence calculation. The maximum coherence is determined using `np.argmax()`.
79
+
80
+ Examples
81
+ --------
82
+ >>> result = _procOneVoxelCoherence(10, [coherer_obj, fmri_data], alt=True)
83
+ >>> voxel_idx, x_vals, y_vals, max_coherence, max_x = result
84
+ """
36
85
  options = {
37
86
  "alt": False,
38
87
  "debug": False,
@@ -64,51 +113,169 @@ def _procOneVoxelCoherence(
64
113
  )
65
114
 
66
115
 
67
- def _packvoxeldata(voxnum, voxelargs):
116
+ def _packvoxeldata(voxnum: int, voxelargs: list) -> list:
117
+ """
118
+ Pack voxel data for processing.
119
+
120
+ Parameters
121
+ ----------
122
+ voxnum : int
123
+ The voxel number to extract from the second element of voxelargs.
124
+ voxelargs : list
125
+ A list containing voxel arguments where:
126
+ - voxelargs[0] is the first voxel argument (returned as-is)
127
+ - voxelargs[1] is a 2D array from which row voxnum is extracted
128
+
129
+ Returns
130
+ -------
131
+ list
132
+ A list containing:
133
+ - voxelargs[0] (unchanged)
134
+ - The voxnum-th row of voxelargs[1] as a 1D array
135
+
136
+ Notes
137
+ -----
138
+ This function is typically used in voxel-based data processing workflows
139
+ where data needs to be extracted and reorganized for further analysis.
140
+
141
+ Examples
142
+ --------
143
+ >>> voxelargs = [10, [[1, 2, 3], [4, 5, 6], [7, 8, 9]]]
144
+ >>> _packvoxeldata(1, voxelargs)
145
+ [10, [4, 5, 6]]
146
+ """
68
147
  return [
69
148
  voxelargs[0],
70
149
  (voxelargs[1])[voxnum, :],
71
150
  ]
72
151
 
73
152
 
74
- def _unpackvoxeldata(retvals, voxelproducts):
153
+ def _unpackvoxeldata(retvals: tuple, voxelproducts: list) -> None:
154
+ """
155
+ Unpack voxel data from retvals tuple into corresponding voxel product lists.
156
+
157
+ This function takes a tuple of voxel data and distributes the values into
158
+ three separate voxel product lists based on the index specified in the first
159
+ element of the retvals tuple.
160
+
161
+ Parameters
162
+ ----------
163
+ retvals : tuple
164
+ A tuple containing voxel data where:
165
+ - retvals[0] : int, index for insertion
166
+ - retvals[1] : unused
167
+ - retvals[2] : value to insert into voxelproducts[0]
168
+ - retvals[3] : value to insert into voxelproducts[1]
169
+ - retvals[4] : value to insert into voxelproducts[2]
170
+ voxelproducts : list
171
+ A list of three voxel product arrays/lists where:
172
+ - voxelproducts[0] : first voxel product array
173
+ - voxelproducts[1] : second voxel product array
174
+ - voxelproducts[2] : third voxel product array
175
+
176
+ Returns
177
+ -------
178
+ None
179
+ This function modifies the voxelproducts lists in-place and does not return anything.
180
+
181
+ Notes
182
+ -----
183
+ The function assumes that retvals contains exactly 5 elements and that
184
+ voxelproducts contains exactly 3 elements. The first element of retvals
185
+ is used as an index to determine the position where values should be inserted
186
+ into each of the three voxel product arrays.
187
+
188
+ Examples
189
+ --------
190
+ >>> voxel1 = [0, 0, 0]
191
+ >>> voxel2 = [0, 0, 0]
192
+ >>> voxel3 = [0, 0, 0]
193
+ >>> retvals = (1, None, 10, 20, 30)
194
+ >>> voxelproducts = [voxel1, voxel2, voxel3]
195
+ >>> _unpackvoxeldata(retvals, voxelproducts)
196
+ >>> print(voxel1[1])
197
+ 10
198
+ >>> print(voxel2[1])
199
+ 20
200
+ >>> print(voxel3[1])
201
+ 30
202
+ """
75
203
  (voxelproducts[0])[retvals[0]] = retvals[2]
76
204
  (voxelproducts[1])[retvals[0]] = retvals[3]
77
205
  (voxelproducts[2])[retvals[0]] = retvals[4]
78
206
 
79
207
 
80
208
  def coherencepass(
81
- fmridata,
82
- theCoherer,
83
- coherencefunc,
84
- coherencepeakval,
85
- coherencepeakfreq,
86
- alt=False,
87
- chunksize=1000,
88
- nprocs=1,
89
- alwaysmultiproc=False,
90
- showprogressbar=True,
91
- debug=False,
92
- ):
209
+ fmridata: NDArray,
210
+ theCoherer: Any,
211
+ coherencefunc: NDArray,
212
+ coherencepeakval: NDArray,
213
+ coherencepeakfreq: NDArray,
214
+ alt: bool = False,
215
+ chunksize: int = 1000,
216
+ nprocs: int = 1,
217
+ alwaysmultiproc: bool = False,
218
+ showprogressbar: bool = True,
219
+ debug: bool = False,
220
+ ) -> int:
93
221
  """
222
+ Perform coherence analysis on fMRI data across voxels using multiprocessing.
223
+
224
+ This function applies coherence analysis to each voxel in the input fMRI data,
225
+ storing results in the provided output arrays. It supports parallel processing
226
+ for improved performance and includes optional debugging and progress tracking.
94
227
 
95
228
  Parameters
96
229
  ----------
97
- fmridata
98
- theCoherer
99
- coherencefunc
100
- coherencepeakval
101
- coherencepeakfreq
102
- chunksize
103
- nprocs
104
- alwaysmultiproc
105
- showprogressbar
106
- rt_floatset
107
- rt_floattype
230
+ fmridata : numpy.ndarray
231
+ Input fMRI data array with shape (time, voxels).
232
+ theCoherer : Any
233
+ Object or function used to perform coherence calculations.
234
+ coherencefunc : numpy.ndarray
235
+ Array to store coherence function results for each voxel.
236
+ coherencepeakval : numpy.ndarray
237
+ Array to store peak coherence values for each voxel.
238
+ coherencepeakfreq : numpy.ndarray
239
+ Array to store peak coherence frequencies for each voxel.
240
+ alt : bool, optional
241
+ If True, use alternative coherence calculation method. Default is False.
242
+ chunksize : int, optional
243
+ Number of voxels to process in each chunk during multiprocessing.
244
+ Default is 1000.
245
+ nprocs : int, optional
246
+ Number of processes to use for multiprocessing. Default is 1.
247
+ alwaysmultiproc : bool, optional
248
+ If True, always use multiprocessing even for small datasets.
249
+ Default is False.
250
+ showprogressbar : bool, optional
251
+ If True, display a progress bar during processing. Default is True.
252
+ debug : bool, optional
253
+ If True, enable debug logging. Default is False.
108
254
 
109
255
  Returns
110
256
  -------
257
+ int
258
+ Total number of voxels processed.
259
+
260
+ Notes
261
+ -----
262
+ This function uses `tide_genericmultiproc.run_multiproc` to distribute
263
+ voxel-wise coherence computations across multiple processes. The results
264
+ are stored directly into the provided output arrays (`coherencefunc`,
265
+ `coherencepeakval`, `coherencepeakfreq`).
111
266
 
267
+ Examples
268
+ --------
269
+ >>> import numpy as np
270
+ >>> fmri_data = np.random.rand(100, 50)
271
+ >>> coherer = SomeCohererClass()
272
+ >>> coherence_func = np.zeros((100, 50))
273
+ >>> peak_val = np.zeros((1, 50))
274
+ >>> peak_freq = np.zeros((1, 50))
275
+ >>> n_voxels = coherencepass(
276
+ ... fmri_data, coherer, coherence_func, peak_val, peak_freq
277
+ ... )
278
+ >>> print(f"Processed {n_voxels} voxels")
112
279
  """
113
280
  inputshape = np.shape(fmridata)
114
281
  voxelargs = [theCoherer, fmridata]
@@ -16,9 +16,12 @@
16
16
  # limitations under the License.
17
17
  #
18
18
  #
19
+ import logging
19
20
  import sys
21
+ from typing import Any
20
22
 
21
23
  import numpy as np
24
+ from numpy.typing import NDArray
22
25
 
23
26
  import rapidtide.filter as tide_filt
24
27
  import rapidtide.genericmultiproc as tide_genericmultiproc
@@ -27,10 +30,63 @@ import rapidtide.miscmath as tide_math
27
30
 
28
31
  # note: rawtimecourse has been filtered, but NOT windowed
29
32
  def _procOneNullCorrelationx(
30
- vox,
31
- voxelargs,
32
- **kwargs,
33
- ):
33
+ vox: int,
34
+ voxelargs: list,
35
+ **kwargs: Any,
36
+ ) -> tuple[int, float]:
37
+ """
38
+ Process a single voxel to compute the maximum correlation value from a null correlation test.
39
+
40
+ This function performs a permutation-based null correlation test for a given voxel. It shuffles
41
+ the reference time course according to the specified method and computes the cross-correlation
42
+ with the original time course. The maximum correlation value is returned along with the voxel index.
43
+
44
+ Parameters
45
+ ----------
46
+ vox : int
47
+ The voxel index to process.
48
+ voxelargs : list
49
+ A list containing the following elements in order:
50
+ - `normalizedreftc`: Normalized reference time course.
51
+ - `rawtcfft_r`: Raw FFT magnitude of the reference time course.
52
+ - `rawtcfft_ang`: Raw FFT phase of the reference time course.
53
+ - `theCorrelator`: Correlator object used for cross-correlation.
54
+ - `thefitter`: Fitter object used for fitting the correlation peak.
55
+ **kwargs : Any
56
+ Additional keyword arguments that can override default options:
57
+ - permutationmethod : str, optional
58
+ The method used for shuffling the reference time course.
59
+ Options are 'shuffle' (default) or 'phaserandom'.
60
+ - debug : bool, optional
61
+ If True, prints debug information including the permutation method used.
62
+
63
+ Returns
64
+ -------
65
+ tuple[int, float]
66
+ A tuple containing:
67
+ - vox : int
68
+ The voxel index passed as input.
69
+ - maxval : float
70
+ The maximum correlation value obtained from the fitted correlation.
71
+
72
+ Notes
73
+ -----
74
+ This function supports two permutation methods:
75
+ - 'shuffle': Randomly shuffles the reference time course.
76
+ - 'phaserandom': Shuffles the phase of the FFT of the reference time course while preserving
77
+ the magnitude.
78
+
79
+ Examples
80
+ --------
81
+ >>> result = _procOneNullCorrelationx(
82
+ ... vox=10,
83
+ ... voxelargs=[ref_tc, fft_r, fft_ang, correlator, fitter],
84
+ ... permutationmethod='shuffle',
85
+ ... debug=True
86
+ ... )
87
+ >>> print(result)
88
+ (10, 0.85)
89
+ """
34
90
 
35
91
  options = {
36
92
  "permutationmethod": "shuffle",
@@ -79,56 +135,151 @@ def _procOneNullCorrelationx(
79
135
  return vox, maxval
80
136
 
81
137
 
82
- def _packvoxeldata(voxnum, voxelargs):
83
- return [voxelargs[0], voxelargs[1], voxelargs[2], voxelargs[3], voxelargs[4]]
138
+ def _packvoxeldata(voxnum: int, voxelargs: list) -> list:
139
+ """
140
+ Pack voxel data into a list format.
84
141
 
142
+ Parameters
143
+ ----------
144
+ voxnum : int
145
+ The voxel number identifier.
146
+ voxelargs : list
147
+ List containing voxel arguments to be packed. Expected to contain at least 5 elements.
85
148
 
86
- def _unpackvoxeldata(retvals, voxelproducts):
87
- (voxelproducts[0])[retvals[0]] = retvals[1]
149
+ Returns
150
+ -------
151
+ list
152
+ A list containing the first 5 elements from voxelargs in order:
153
+ [voxelargs[0], voxelargs[1], voxelargs[2], voxelargs[3], voxelargs[4]]
88
154
 
155
+ Notes
156
+ -----
157
+ This function currently returns a fixed subset of the input list. For proper functionality,
158
+ the voxnum parameter is not utilized in the current implementation.
89
159
 
90
- def getNullDistributionData(
91
- Fs,
92
- theCorrelator,
93
- thefitter,
94
- LGR,
95
- numestreps=0,
96
- nprocs=1,
97
- alwaysmultiproc=False,
98
- showprogressbar=True,
99
- chunksize=1000,
100
- permutationmethod="shuffle",
101
- rt_floatset=np.float64,
102
- rt_floattype="float64",
103
- debug=False,
104
- ):
105
- r"""Calculate a set of null correlations to determine the distribution of correlation values. This can
106
- be used to find the spurious correlation threshold
160
+ Examples
161
+ --------
162
+ >>> _packvoxeldata(1, [10, 20, 30, 40, 50, 60])
163
+ [10, 20, 30, 40, 50]
164
+ """
165
+ return [voxelargs[0], voxelargs[1], voxelargs[2], voxelargs[3], voxelargs[4]]
166
+
167
+
168
+ def _unpackvoxeldata(retvals: tuple, voxelproducts: list) -> None:
169
+ """
170
+ Unpack voxel data by assigning values to specified indices.
171
+
172
+ This function takes return values and assigns them to a specific location
173
+ within a voxel product structure based on the provided indices.
107
174
 
108
175
  Parameters
109
176
  ----------
110
- Fs: float
111
- The sample frequency of rawtimecourse, in Hz
177
+ retvals : tuple
178
+ A tuple containing two elements: the first element is the index
179
+ used to access the voxel product, and the second element is the
180
+ value to be assigned.
181
+ voxelproducts : list
182
+ A list of voxel product structures where the assignment will occur.
183
+ The function modifies the first element of this list in-place.
112
184
 
113
- rawtimecourse : 1D numpy array
114
- The test regressor. This should be filtered to the desired bandwidth, but NOT windowed.
115
- :param rawtimecourse:
185
+ Returns
186
+ -------
187
+ None
188
+ This function modifies the voxelproducts list in-place and does not
189
+ return any value.
116
190
 
117
- corrscale: 1D numpy array
118
- The time axis of the cross correlation function.
191
+ Notes
192
+ -----
193
+ The function assumes that voxelproducts[0] is a mutable structure (like
194
+ a list or array) that supports item assignment. The first element of
195
+ retvals is used as an index to access voxelproducts[0], and the second
196
+ element of retvals is assigned to that location.
119
197
 
120
- filterfunc: function
121
- This is a preconfigured NoncausalFilter function which is used to filter data to the desired bandwidth
198
+ Examples
199
+ --------
200
+ >>> voxel_data = [[0, 0, 0]]
201
+ >>> _unpackvoxeldata((1, 42), voxel_data)
202
+ >>> voxel_data
203
+ [[0, 42, 0]]
204
+ """
205
+ (voxelproducts[0])[retvals[0]] = retvals[1]
122
206
 
123
- corrorigin: int
124
- The bin number in the correlation timescale corresponding to 0.0 seconds delay
125
207
 
126
- negbins: int
127
- The lower edge of the search range for correlation peaks, in number of bins below corrorigin
208
+ def getNullDistributionData(
209
+ Fs: float,
210
+ theCorrelator: Any,
211
+ thefitter: Any,
212
+ LGR: logging.Logger,
213
+ numestreps: int = 0,
214
+ nprocs: int = 1,
215
+ alwaysmultiproc: bool = False,
216
+ showprogressbar: bool = True,
217
+ chunksize: int = 1000,
218
+ permutationmethod: str = "shuffle",
219
+ rt_floatset: type = np.float64,
220
+ rt_floattype: str = "float64",
221
+ debug: bool = False,
222
+ ) -> NDArray:
223
+ """
224
+ Calculate a set of null correlations to determine the distribution of correlation values.
225
+
226
+ This function generates a distribution of correlation values by performing permutations
227
+ on the reference time course. The resulting distribution can be used to identify
228
+ spurious correlation thresholds.
229
+
230
+ Parameters
231
+ ----------
232
+ Fs : float
233
+ The sample frequency of the raw time course, in Hz.
234
+ theCorrelator : Any
235
+ An object containing the reference time course and related filtering parameters.
236
+ thefitter : Any
237
+ An object used for fitting the correlation data.
238
+ LGR : logging.Logger
239
+ Logger instance for logging messages during execution.
240
+ numestreps : int, optional
241
+ Number of null correlation estimates to compute. Default is 0.
242
+ nprocs : int, optional
243
+ Number of processes to use for multiprocessing. Default is 1.
244
+ alwaysmultiproc : bool, optional
245
+ If True, always use multiprocessing even for small datasets. Default is False.
246
+ showprogressbar : bool, optional
247
+ If True, display a progress bar during computation. Default is True.
248
+ chunksize : int, optional
249
+ Size of chunks for multiprocessing. Default is 1000.
250
+ permutationmethod : str, optional
251
+ Permutation method to use ('shuffle' or other supported methods). Default is 'shuffle'.
252
+ rt_floatset : type, optional
253
+ The floating-point type to use for internal calculations. Default is np.float64.
254
+ rt_floattype : str, optional
255
+ String representation of the floating-point type. Default is 'float64'.
256
+ debug : bool, optional
257
+ If True, enable debug output. Default is False.
258
+
259
+ Returns
260
+ -------
261
+ NDArray
262
+ Array of correlation values representing the null distribution.
128
263
 
129
- posbins: int
130
- The upper edge of the search range for correlation peaks, in number of bins above corrorigin
264
+ Notes
265
+ -----
266
+ This function applies normalization and filtering to the reference time course before
267
+ computing correlations. It supports parallel processing via multiprocessing for
268
+ improved performance when `numestreps` is large.
131
269
 
270
+ Examples
271
+ --------
272
+ >>> import numpy as np
273
+ >>> from some_module import getNullDistributionData
274
+ >>> result = getNullDistributionData(
275
+ ... Fs=100.0,
276
+ ... theCorrelator=correlator_obj,
277
+ ... thefitter=fitter_obj,
278
+ ... LGR=logging.getLogger(__name__),
279
+ ... numestreps=1000,
280
+ ... nprocs=4
281
+ ... )
282
+ >>> print(f"Null correlation distribution shape: {result.shape}")
132
283
  """
133
284
  inputshape = np.asarray([numestreps])
134
285
  normalizedreftc = theCorrelator.ncprefilter.apply(