rapidtide 3.0.11__py3-none-any.whl → 3.1.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (144) hide show
  1. rapidtide/Colortables.py +492 -27
  2. rapidtide/OrthoImageItem.py +1049 -46
  3. rapidtide/RapidtideDataset.py +1533 -86
  4. rapidtide/_version.py +3 -3
  5. rapidtide/calccoherence.py +196 -29
  6. rapidtide/calcnullsimfunc.py +188 -40
  7. rapidtide/calcsimfunc.py +242 -42
  8. rapidtide/correlate.py +1203 -383
  9. rapidtide/data/examples/src/testLD +56 -0
  10. rapidtide/data/examples/src/testalign +1 -1
  11. rapidtide/data/examples/src/testdelayvar +0 -1
  12. rapidtide/data/examples/src/testfmri +53 -3
  13. rapidtide/data/examples/src/testglmfilt +5 -5
  14. rapidtide/data/examples/src/testhappy +29 -7
  15. rapidtide/data/examples/src/testppgproc +17 -0
  16. rapidtide/data/examples/src/testrolloff +11 -0
  17. rapidtide/data/models/model_cnn_pytorch/best_model.pth +0 -0
  18. rapidtide/data/models/model_cnn_pytorch/loss.png +0 -0
  19. rapidtide/data/models/model_cnn_pytorch/loss.txt +1 -0
  20. rapidtide/data/models/model_cnn_pytorch/model.pth +0 -0
  21. rapidtide/data/models/model_cnn_pytorch/model_meta.json +68 -0
  22. rapidtide/decorators.py +91 -0
  23. rapidtide/dlfilter.py +2226 -110
  24. rapidtide/dlfiltertorch.py +4842 -0
  25. rapidtide/externaltools.py +327 -12
  26. rapidtide/fMRIData_class.py +79 -40
  27. rapidtide/filter.py +1899 -810
  28. rapidtide/fit.py +2011 -581
  29. rapidtide/genericmultiproc.py +93 -18
  30. rapidtide/happy_supportfuncs.py +2047 -172
  31. rapidtide/helper_classes.py +584 -43
  32. rapidtide/io.py +2370 -372
  33. rapidtide/linfitfiltpass.py +346 -99
  34. rapidtide/makelaggedtcs.py +210 -24
  35. rapidtide/maskutil.py +448 -62
  36. rapidtide/miscmath.py +827 -121
  37. rapidtide/multiproc.py +210 -22
  38. rapidtide/patchmatch.py +242 -42
  39. rapidtide/peakeval.py +31 -31
  40. rapidtide/ppgproc.py +2203 -0
  41. rapidtide/qualitycheck.py +352 -39
  42. rapidtide/refinedelay.py +431 -57
  43. rapidtide/refineregressor.py +494 -189
  44. rapidtide/resample.py +671 -185
  45. rapidtide/scripts/applyppgproc.py +28 -0
  46. rapidtide/scripts/showxcorr_legacy.py +7 -7
  47. rapidtide/scripts/stupidramtricks.py +15 -17
  48. rapidtide/simFuncClasses.py +1052 -77
  49. rapidtide/simfuncfit.py +269 -69
  50. rapidtide/stats.py +540 -238
  51. rapidtide/tests/happycomp +9 -0
  52. rapidtide/tests/test_cleanregressor.py +1 -2
  53. rapidtide/tests/test_dlfiltertorch.py +627 -0
  54. rapidtide/tests/test_findmaxlag.py +24 -8
  55. rapidtide/tests/test_fullrunhappy_v1.py +0 -2
  56. rapidtide/tests/test_fullrunhappy_v2.py +0 -2
  57. rapidtide/tests/test_fullrunhappy_v3.py +11 -4
  58. rapidtide/tests/test_fullrunhappy_v4.py +10 -2
  59. rapidtide/tests/test_fullrunrapidtide_v7.py +1 -1
  60. rapidtide/tests/test_getparsers.py +11 -3
  61. rapidtide/tests/test_refinedelay.py +0 -1
  62. rapidtide/tests/test_simroundtrip.py +16 -8
  63. rapidtide/tests/test_stcorrelate.py +3 -1
  64. rapidtide/tests/utils.py +9 -8
  65. rapidtide/tidepoolTemplate.py +142 -38
  66. rapidtide/tidepoolTemplate_alt.py +165 -44
  67. rapidtide/tidepoolTemplate_big.py +189 -52
  68. rapidtide/util.py +1217 -118
  69. rapidtide/voxelData.py +684 -37
  70. rapidtide/wiener.py +136 -23
  71. rapidtide/wiener2.py +113 -7
  72. rapidtide/workflows/adjustoffset.py +105 -3
  73. rapidtide/workflows/aligntcs.py +85 -2
  74. rapidtide/workflows/applydlfilter.py +87 -10
  75. rapidtide/workflows/applyppgproc.py +540 -0
  76. rapidtide/workflows/atlasaverage.py +210 -47
  77. rapidtide/workflows/atlastool.py +100 -3
  78. rapidtide/workflows/calcSimFuncMap.py +288 -69
  79. rapidtide/workflows/calctexticc.py +201 -9
  80. rapidtide/workflows/ccorrica.py +101 -6
  81. rapidtide/workflows/cleanregressor.py +165 -31
  82. rapidtide/workflows/delayvar.py +171 -23
  83. rapidtide/workflows/diffrois.py +81 -3
  84. rapidtide/workflows/endtidalproc.py +144 -4
  85. rapidtide/workflows/fdica.py +195 -15
  86. rapidtide/workflows/filtnifti.py +70 -3
  87. rapidtide/workflows/filttc.py +74 -3
  88. rapidtide/workflows/fitSimFuncMap.py +202 -51
  89. rapidtide/workflows/fixtr.py +73 -3
  90. rapidtide/workflows/gmscalc.py +113 -3
  91. rapidtide/workflows/happy.py +801 -199
  92. rapidtide/workflows/happy2std.py +144 -12
  93. rapidtide/workflows/happy_parser.py +163 -23
  94. rapidtide/workflows/histnifti.py +118 -2
  95. rapidtide/workflows/histtc.py +84 -3
  96. rapidtide/workflows/linfitfilt.py +117 -4
  97. rapidtide/workflows/localflow.py +328 -28
  98. rapidtide/workflows/mergequality.py +79 -3
  99. rapidtide/workflows/niftidecomp.py +322 -18
  100. rapidtide/workflows/niftistats.py +174 -4
  101. rapidtide/workflows/pairproc.py +98 -4
  102. rapidtide/workflows/pairwisemergenifti.py +85 -2
  103. rapidtide/workflows/parser_funcs.py +1421 -40
  104. rapidtide/workflows/physiofreq.py +137 -11
  105. rapidtide/workflows/pixelcomp.py +207 -5
  106. rapidtide/workflows/plethquality.py +103 -21
  107. rapidtide/workflows/polyfitim.py +151 -11
  108. rapidtide/workflows/proj2flow.py +75 -2
  109. rapidtide/workflows/rankimage.py +111 -4
  110. rapidtide/workflows/rapidtide.py +368 -76
  111. rapidtide/workflows/rapidtide2std.py +98 -2
  112. rapidtide/workflows/rapidtide_parser.py +109 -9
  113. rapidtide/workflows/refineDelayMap.py +144 -33
  114. rapidtide/workflows/refineRegressor.py +675 -96
  115. rapidtide/workflows/regressfrommaps.py +161 -37
  116. rapidtide/workflows/resamplenifti.py +85 -3
  117. rapidtide/workflows/resampletc.py +91 -3
  118. rapidtide/workflows/retrolagtcs.py +99 -9
  119. rapidtide/workflows/retroregress.py +176 -26
  120. rapidtide/workflows/roisummarize.py +174 -5
  121. rapidtide/workflows/runqualitycheck.py +71 -3
  122. rapidtide/workflows/showarbcorr.py +149 -6
  123. rapidtide/workflows/showhist.py +86 -2
  124. rapidtide/workflows/showstxcorr.py +160 -3
  125. rapidtide/workflows/showtc.py +159 -3
  126. rapidtide/workflows/showxcorrx.py +190 -10
  127. rapidtide/workflows/showxy.py +185 -15
  128. rapidtide/workflows/simdata.py +264 -38
  129. rapidtide/workflows/spatialfit.py +77 -2
  130. rapidtide/workflows/spatialmi.py +250 -27
  131. rapidtide/workflows/spectrogram.py +305 -32
  132. rapidtide/workflows/synthASL.py +154 -3
  133. rapidtide/workflows/tcfrom2col.py +76 -2
  134. rapidtide/workflows/tcfrom3col.py +74 -2
  135. rapidtide/workflows/tidepool.py +2971 -130
  136. rapidtide/workflows/utils.py +19 -14
  137. rapidtide/workflows/utils_doc.py +293 -0
  138. rapidtide/workflows/variabilityizer.py +116 -3
  139. {rapidtide-3.0.11.dist-info → rapidtide-3.1.1.dist-info}/METADATA +10 -8
  140. {rapidtide-3.0.11.dist-info → rapidtide-3.1.1.dist-info}/RECORD +144 -128
  141. {rapidtide-3.0.11.dist-info → rapidtide-3.1.1.dist-info}/entry_points.txt +1 -0
  142. {rapidtide-3.0.11.dist-info → rapidtide-3.1.1.dist-info}/WHEEL +0 -0
  143. {rapidtide-3.0.11.dist-info → rapidtide-3.1.1.dist-info}/licenses/LICENSE +0 -0
  144. {rapidtide-3.0.11.dist-info → rapidtide-3.1.1.dist-info}/top_level.txt +0 -0
@@ -21,6 +21,7 @@ import time
21
21
  import warnings
22
22
 
23
23
  import numpy as np
24
+ from numpy.typing import NDArray
24
25
  from scipy.signal import savgol_filter, welch
25
26
  from scipy.stats import kurtosis, pearsonr, skew
26
27
  from statsmodels.robust import mad
@@ -45,21 +46,106 @@ try:
45
46
  except ImportError:
46
47
  mklexists = False
47
48
 
48
- try:
49
- import rapidtide.dlfilter as tide_dlfilt
50
49
 
51
- dlfilterexists = True
52
- print("dlfilter exists")
53
- except ImportError:
54
- dlfilterexists = False
55
- print("dlfilter does not exist")
50
+ def rrifromphase(timeaxis: NDArray, thephase: NDArray) -> None:
51
+ """
52
+ Convert phase to range rate.
56
53
 
54
+ This function converts phase measurements to range rate values using the
55
+ provided time axis and phase data.
56
+
57
+ Parameters
58
+ ----------
59
+ timeaxis : NDArray
60
+ Time axis values corresponding to the phase measurements.
61
+ thephase : NDArray
62
+ Phase measurements to be converted to range rate.
57
63
 
58
- def rrifromphase(timeaxis, thephase):
64
+ Returns
65
+ -------
66
+ None
67
+ This function does not return any value.
68
+
69
+ Notes
70
+ -----
71
+ The function performs conversion from phase to range rate but does not
72
+ return the result. The actual implementation details are not provided
73
+ in the function signature.
74
+
75
+ Examples
76
+ --------
77
+ >>> import numpy as np
78
+ >>> time = np.array([0, 1, 2, 3])
79
+ >>> phase = np.array([0.1, 0.2, 0.3, 0.4])
80
+ >>> rrifromphase(time, phase)
81
+ """
59
82
  return None
60
83
 
61
84
 
62
- def calc_3d_optical_flow(video, projmask, flowhdr, outputroot, window_size=3, debug=False):
85
+ def calc_3d_optical_flow(
86
+ video: NDArray,
87
+ projmask: NDArray,
88
+ flowhdr: dict,
89
+ outputroot: str,
90
+ window_size: int = 3,
91
+ debug: bool = False,
92
+ ) -> tuple[NDArray, NDArray]:
93
+ """
94
+ Compute 3D optical flow for a video volume using the Lucas-Kanade method.
95
+
96
+ This function calculates optical flow in three dimensions (x, y, z) across
97
+ a sequence of video frames. It uses a Lucas-Kanade approach to estimate
98
+ motion vectors at each voxel, considering a local window around each pixel.
99
+ The results are saved as NIfTI files for each frame.
100
+
101
+ Parameters
102
+ ----------
103
+ video : NDArray
104
+ 4D array of shape (xsize, ysize, zsize, num_frames) representing the
105
+ input video data.
106
+ projmask : NDArray
107
+ 3D boolean or integer mask of shape (xsize, ysize, zsize) indicating
108
+ which voxels to process for optical flow computation.
109
+ flowhdr : dict
110
+ Header dictionary for NIfTI output files, containing metadata for
111
+ the optical flow results.
112
+ outputroot : str
113
+ Root name for output NIfTI files. Files will be saved with suffixes
114
+ `_desc-flow_phase-XX_map` and `_desc-flowmag_phase-XX_map`.
115
+ window_size : int, optional
116
+ Size of the local window used for gradient computation. Default is 3.
117
+ debug : bool, optional
118
+ If True, print debug information during computation. Default is False.
119
+
120
+ Returns
121
+ -------
122
+ tuple[NDArray, NDArray]
123
+ A tuple containing:
124
+ - `flow_vectors`: 5D array of shape (xsize, ysize, zsize, num_frames, 3)
125
+ representing the computed optical flow vectors for each frame.
126
+ - `None`: Placeholder return value; function currently returns only
127
+ `flow_vectors` and saves outputs to disk.
128
+
129
+ Notes
130
+ -----
131
+ - The optical flow is computed using a Lucas-Kanade method with spatial
132
+ gradients in x, y, and z directions.
133
+ - Temporal gradient is computed as the difference between consecutive frames.
134
+ - Output files are saved using `tide_io.savetonifti`.
135
+ - The function wraps around frames when reaching the end (i.e., next frame
136
+ for the last frame is the first frame).
137
+
138
+ Examples
139
+ --------
140
+ >>> import numpy as np
141
+ >>> video = np.random.rand(64, 64, 32, 10)
142
+ >>> mask = np.ones((64, 64, 32), dtype=bool)
143
+ >>> header = {}
144
+ >>> output_root = "flow_result"
145
+ >>> flow_vectors = calc_3d_optical_flow(video, mask, header, output_root)
146
+ >>> print(flow_vectors.shape)
147
+ (64, 64, 32, 10, 3)
148
+ """
63
149
  # window Define the window size for Lucas-Kanade method
64
150
  # Get the number of frames, height, and width of the video
65
151
  singlehdr = copy.deepcopy(flowhdr)
@@ -135,7 +221,42 @@ def calc_3d_optical_flow(video, projmask, flowhdr, outputroot, window_size=3, de
135
221
  return flow_vectors
136
222
 
137
223
 
138
- def phasejolt(phaseimage):
224
+ def phasejolt(phaseimage: NDArray) -> tuple[NDArray, NDArray, NDArray]:
225
+ """
226
+ Compute phase gradient-based metrics including jump, jolt, and laplacian.
227
+
228
+ This function calculates three important metrics from a phase image:
229
+ - jump: average absolute gradient magnitude
230
+ - jolt: average absolute second-order gradient magnitude
231
+ - laplacian: sum of second-order partial derivatives
232
+
233
+ Parameters
234
+ ----------
235
+ phaseimage : NDArray
236
+ Input phase image array of arbitrary dimensions (typically 2D or 3D).
237
+
238
+ Returns
239
+ -------
240
+ tuple of NDArray
241
+ A tuple containing three arrays:
242
+ - jump: array of same shape as input, representing average absolute gradient
243
+ - jolt: array of same shape as input, representing average absolute second-order gradient
244
+ - laplacian: array of same shape as input, representing Laplacian of the phase image
245
+
246
+ Notes
247
+ -----
248
+ The function computes gradients using numpy's gradient function which applies
249
+ central differences in the interior and first differences at the boundaries.
250
+ All metrics are computed in a voxel-wise manner across the entire image.
251
+
252
+ Examples
253
+ --------
254
+ >>> import numpy as np
255
+ >>> phase_img = np.random.rand(10, 10)
256
+ >>> jump, jolt, laplacian = phasejolt(phase_img)
257
+ >>> print(jump.shape, jolt.shape, laplacian.shape)
258
+ (10, 10) (10, 10) (10, 10)
259
+ """
139
260
 
140
261
  # Compute the gradient of the window in x, y, and z directions
141
262
  grad_x, grad_y, grad_z = np.gradient(phaseimage)
@@ -156,7 +277,64 @@ def phasejolt(phaseimage):
156
277
  return (jump, jolt, laplacian)
157
278
 
158
279
 
159
- def cardiacsig(thisphase, amps=(1.0, 0.0, 0.0), phases=None, overallphase=0.0):
280
+ def cardiacsig(
281
+ thisphase: float | NDArray,
282
+ amps: tuple | NDArray = (1.0, 0.0, 0.0),
283
+ phases: NDArray | None = None,
284
+ overallphase: float = 0.0,
285
+ ) -> float | NDArray:
286
+ """
287
+ Generate a cardiac signal model using harmonic components.
288
+
289
+ This function creates a cardiac signal by summing weighted cosine waves
290
+ at different harmonic frequencies. The signal can be computed for
291
+ scalar phase values or arrays of phase values.
292
+
293
+ Parameters
294
+ ----------
295
+ thisphase : float or NDArray
296
+ The phase value(s) at which to evaluate the cardiac signal.
297
+ Can be a scalar or array of phase values.
298
+ amps : tuple or NDArray, optional
299
+ Amplitude coefficients for each harmonic component. Default is
300
+ (1.0, 0.0, 0.0) representing the fundamental frequency with
301
+ amplitude 1.0 and higher harmonics with amplitude 0.0.
302
+ phases : NDArray or None, optional
303
+ Phase shifts for each harmonic component. If None, all phase shifts
304
+ are set to zero. Default is None.
305
+ overallphase : float, optional
306
+ Overall phase shift applied to the entire signal. Default is 0.0.
307
+
308
+ Returns
309
+ -------
310
+ float or NDArray
311
+ The computed cardiac signal value(s) at the given phase(s).
312
+ Returns a scalar if input is scalar, or array if input is array.
313
+
314
+ Notes
315
+ -----
316
+ The cardiac signal is computed as:
317
+ .. math::
318
+ s(t) = \\sum_{i=0}^{n-1} A_i \\cos((i+1)\\phi + \\phi_i + \\phi_{overall})
319
+
320
+ where:
321
+ - A_i are the amplitude coefficients
322
+ - φ is the phase value
323
+ - φ_i are the harmonic phase shifts
324
+ - φ_{overall} is the overall phase shift
325
+
326
+ Examples
327
+ --------
328
+ >>> import numpy as np
329
+ >>> cardiacsig(0.5)
330
+ 1.0
331
+
332
+ >>> cardiacsig(np.linspace(0, 2*np.pi, 100), amps=(1.0, 0.5, 0.2))
333
+ array([...])
334
+
335
+ >>> cardiacsig(1.0, amps=(2.0, 1.0, 0.5), phases=[0.0, np.pi/4, np.pi/2])
336
+ -0.7071067811865476
337
+ """
160
338
  total = 0.0
161
339
  if phases is None:
162
340
  phases = amps * 0.0
@@ -166,26 +344,109 @@ def cardiacsig(thisphase, amps=(1.0, 0.0, 0.0), phases=None, overallphase=0.0):
166
344
 
167
345
 
168
346
  def cardiacfromimage(
169
- normdata_byslice,
170
- estweights_byslice,
171
- numslices,
172
- timepoints,
173
- tr,
174
- slicetimes,
175
- cardprefilter,
176
- respprefilter,
177
- notchpct=1.5,
178
- invertphysiosign=False,
179
- madnorm=True,
180
- nprocs=1,
181
- arteriesonly=False,
182
- fliparteries=False,
183
- debug=False,
184
- appflips_byslice=None,
185
- verbose=False,
186
- usemask=True,
187
- multiplicative=True,
188
- ):
347
+ normdata_byslice: NDArray,
348
+ estweights_byslice: NDArray,
349
+ numslices: int,
350
+ timepoints: int,
351
+ tr: float,
352
+ slicetimes: NDArray,
353
+ cardprefilter: object,
354
+ respprefilter: object,
355
+ notchpct: float = 1.5,
356
+ notchrolloff: float = 0.5,
357
+ invertphysiosign: bool = False,
358
+ madnorm: bool = True,
359
+ nprocs: int = 1,
360
+ arteriesonly: bool = False,
361
+ fliparteries: bool = False,
362
+ debug: bool = False,
363
+ appflips_byslice: NDArray | None = None,
364
+ verbose: bool = False,
365
+ usemask: bool = True,
366
+ multiplicative: bool = True,
367
+ ) -> tuple[NDArray, NDArray, NDArray, NDArray, NDArray, NDArray]:
368
+ """
369
+ Extract cardiac and respiratory signals from 4D fMRI data using slice timing information.
370
+
371
+ This function processes preprocessed fMRI data to isolate cardiac and respiratory
372
+ physiological signals by leveraging slice timing information and filtering techniques.
373
+ It applies normalization, averaging across slices, and harmonic notch filtering to
374
+ extract clean physiological time series.
375
+
376
+ Parameters
377
+ ----------
378
+ normdata_byslice : NDArray
379
+ Normalized fMRI data organized by slice, shape (timepoints, numslices, timepoints).
380
+ estweights_byslice : NDArray
381
+ Estimated weights for each voxel and slice, shape (timepoints, numslices).
382
+ numslices : int
383
+ Number of slices in the acquisition.
384
+ timepoints : int
385
+ Number of time points in the fMRI time series.
386
+ tr : float
387
+ Repetition time (TR) in seconds.
388
+ slicetimes : NDArray
389
+ Slice acquisition times relative to the start of the TR, shape (numslices,).
390
+ cardprefilter : object
391
+ Cardiac prefilter object with an `apply` method for filtering physiological signals.
392
+ respprefilter : object
393
+ Respiratory prefilter object with an `apply` method for filtering physiological signals.
394
+ notchpct : float, optional
395
+ Percentage of notch bandwidth, default is 1.5.
396
+ notchrolloff : float, optional
397
+ Notch filter rolloff, default is 0.5.
398
+ invertphysiosign : bool, optional
399
+ If True, invert the physiological signal sign, default is False.
400
+ madnorm : bool, optional
401
+ If True, use median absolute deviation normalization, default is True.
402
+ nprocs : int, optional
403
+ Number of processes to use for computation, default is 1.
404
+ arteriesonly : bool, optional
405
+ If True, only use arterial signal, default is False.
406
+ fliparteries : bool, optional
407
+ If True, flip the arterial signal, default is False.
408
+ debug : bool, optional
409
+ If True, enable debug output, default is False.
410
+ appflips_byslice : NDArray | None, optional
411
+ Array of application flips for each slice, default is None.
412
+ verbose : bool, optional
413
+ If True, print verbose output, default is False.
414
+ usemask : bool, optional
415
+ If True, use masking for valid voxels, default is True.
416
+ multiplicative : bool, optional
417
+ If True, apply multiplicative normalization, default is True.
418
+
419
+ Returns
420
+ -------
421
+ tuple[NDArray, NDArray, NDArray, NDArray, NDArray, NDArray]
422
+ - `hirescardtc`: High-resolution cardiac time course.
423
+ - `cardnormfac`: Normalization factor for cardiac signal.
424
+ - `hiresresptc`: High-resolution respiratory time course.
425
+ - `respnormfac`: Normalization factor for respiratory signal.
426
+ - `slicesamplerate`: Slice sampling rate in Hz.
427
+ - `numsteps`: Number of unique slice times.
428
+ - `sliceoffsets`: Slice offsets relative to TR.
429
+ - `cycleaverage`: Average signal per slice time step.
430
+ - `slicenorms`: Slice-wise normalization factors.
431
+
432
+ Notes
433
+ -----
434
+ - The function assumes that `normdata_byslice` and `estweights_byslice` are properly
435
+ preprocessed and aligned with slice timing information.
436
+ - The cardiac and respiratory signals are extracted using harmonic notch filtering
437
+ and prefiltering steps.
438
+ - The returned time courses are normalized using median absolute deviation (MAD) unless
439
+ `madnorm` is set to False.
440
+
441
+ Examples
442
+ --------
443
+ >>> # Assuming all inputs are prepared
444
+ >>> card_signal, card_norm, resp_signal, resp_norm, samplerate, numsteps, \
445
+ ... sliceoffsets, cycleavg, slicenorms = cardiacfromimage(
446
+ ... normdata_byslice, estweights_byslice, numslices, timepoints,
447
+ ... tr, slicetimes, cardprefilter, respprefilter
448
+ ... )
449
+ """
189
450
  # find out what timepoints we have, and their spacing
190
451
  numsteps, minstep, sliceoffsets = tide_io.sliceinfo(slicetimes, tr)
191
452
  print(
@@ -291,16 +552,131 @@ def cardiacfromimage(
291
552
  )
292
553
 
293
554
 
294
- def theCOM(X, data):
555
+ def theCOM(X: NDArray, data: NDArray) -> float:
556
+ """
557
+ Calculate the center of mass of a system of particles.
558
+
559
+ Parameters
560
+ ----------
561
+ X : NDArray
562
+ Array of positions (coordinates) of particles. Shape should be (n_particles, n_dimensions).
563
+ data : NDArray
564
+ Array of mass values for each particle. Shape should be (n_particles,).
565
+
566
+ Returns
567
+ -------
568
+ float
569
+ The center of mass of the system.
570
+
571
+ Notes
572
+ -----
573
+ The center of mass is calculated using the formula:
574
+ COM = Σ(m_i * x_i) / Σ(m_i)
575
+
576
+ where m_i are the masses and x_i are the positions of particles.
577
+
578
+ Examples
579
+ --------
580
+ >>> import numpy as np
581
+ >>> positions = np.array([[1, 2], [3, 4], [5, 6]])
582
+ >>> masses = np.array([1, 2, 3])
583
+ >>> com = theCOM(positions, masses)
584
+ >>> print(com)
585
+ 3.3333333333333335
586
+ """
295
587
  # return the center of mass
296
588
  return np.sum(X * data) / np.sum(data)
297
589
 
298
590
 
299
- def savgolsmooth(data, smoothlen=101, polyorder=3):
591
+ def savgolsmooth(data: NDArray, smoothlen: int = 101, polyorder: int = 3) -> NDArray:
592
+ """
593
+ Apply Savitzky-Golay filter to smooth data.
594
+
595
+ This function applies a Savitzky-Golay filter to smooth the input data using
596
+ a polynomial fit. The filter preserves higher moments of the data better than
597
+ simple moving averages, making it particularly useful for smoothing noisy data
598
+ while preserving peak shapes and heights.
599
+
600
+ Parameters
601
+ ----------
602
+ data : NDArray
603
+ Input data to be smoothed. Can be 1D or 2D array.
604
+ smoothlen : int, optional
605
+ Length of the filter window (i.e., the number of coefficients).
606
+ Must be a positive odd integer. Default is 101.
607
+ polyorder : int, optional
608
+ Order of the polynomial used to fit the samples. Must be less than
609
+ `smoothlen`. Default is 3.
610
+
611
+ Returns
612
+ -------
613
+ NDArray
614
+ Smoothed data with the same shape as the input `data`.
615
+
616
+ Notes
617
+ -----
618
+ The Savitzky-Golay filter is a digital filter that smooths data by fitting
619
+ a polynomial of specified order to a sliding window of data points. It is
620
+ particularly effective at preserving the shape and features of the original
621
+ data while removing noise.
622
+
623
+ Examples
624
+ --------
625
+ >>> import numpy as np
626
+ >>> data = np.random.randn(100)
627
+ >>> smoothed = savgolsmooth(data, smoothlen=21, polyorder=3)
628
+
629
+ >>> # For 2D data
630
+ >>> data_2d = np.random.randn(50, 10)
631
+ >>> smoothed_2d = savgolsmooth(data_2d, smoothlen=11, polyorder=2)
632
+ """
300
633
  return savgol_filter(data, smoothlen, polyorder)
301
634
 
302
635
 
303
- def getperiodic(inputdata, Fs, fundfreq, ncomps=1, width=0.4, debug=False):
636
+ def getperiodic(
637
+ inputdata: NDArray,
638
+ Fs: float,
639
+ fundfreq: float,
640
+ ncomps: int = 1,
641
+ width: float = 0.4,
642
+ debug: bool = False,
643
+ ) -> NDArray:
644
+ """
645
+ Apply a periodic filter to extract harmonic components from input data.
646
+
647
+ This function applies a non-causal filter to isolate and extract periodic
648
+ components of a signal based on a fundamental frequency and number of
649
+ harmonics. It uses an arbitrary filter design to define stopband and passband
650
+ frequencies for each harmonic component.
651
+
652
+ Parameters
653
+ ----------
654
+ inputdata : NDArray
655
+ Input signal data to be filtered.
656
+ Fs : float
657
+ Sampling frequency of the input signal (Hz).
658
+ fundfreq : float
659
+ Fundamental frequency of the periodic signal (Hz).
660
+ ncomps : int, optional
661
+ Number of harmonic components to extract. Default is 1.
662
+ width : float, optional
663
+ Width parameter controlling the bandwidth of each harmonic filter.
664
+ Default is 0.4.
665
+ debug : bool, optional
666
+ If True, print debug information during processing. Default is False.
667
+
668
+ Returns
669
+ -------
670
+ NDArray
671
+ Filtered output signal containing the specified harmonic components.
672
+
673
+ Notes
674
+ -----
675
+ The function reduces the number of components (`ncomps`) if the highest
676
+ harmonic exceeds the Nyquist frequency (Fs/2). Each harmonic is filtered
677
+ using an arbitrary filter with stopband and passband frequencies defined
678
+ based on the `width` parameter.
679
+ """
304
680
  outputdata = inputdata * 0.0
305
681
  lowerdist = fundfreq - fundfreq / (1.0 + width)
306
682
  upperdist = fundfreq * width
@@ -325,13 +701,56 @@ def getperiodic(inputdata, Fs, fundfreq, ncomps=1, width=0.4, debug=False):
325
701
 
326
702
 
327
703
  def getcardcoeffs(
328
- cardiacwaveform,
329
- slicesamplerate,
330
- minhr=40.0,
331
- maxhr=140.0,
332
- smoothlen=101,
333
- debug=False,
334
- ):
704
+ cardiacwaveform: NDArray,
705
+ slicesamplerate: float,
706
+ minhr: float = 40.0,
707
+ maxhr: float = 140.0,
708
+ smoothlen: int = 101,
709
+ debug: bool = False,
710
+ ) -> float:
711
+ """
712
+ Compute the fundamental cardiac frequency from a cardiac waveform using spectral analysis.
713
+
714
+ This function estimates the heart rate (in beats per minute) from a given cardiac waveform
715
+ by performing a Welch periodogram and applying a smoothing filter to identify the dominant
716
+ frequency component. The result is returned as a frequency value in Hz, which can be
717
+ converted to BPM by multiplying by 60.
718
+
719
+ Parameters
720
+ ----------
721
+ cardiacwaveform : NDArray
722
+ Input cardiac waveform signal as a 1D numpy array.
723
+ slicesamplerate : float
724
+ Sampling rate of the input waveform in Hz.
725
+ minhr : float, optional
726
+ Minimum allowed heart rate in BPM. Default is 40.0.
727
+ maxhr : float, optional
728
+ Maximum allowed heart rate in BPM. Default is 140.0.
729
+ smoothlen : int, optional
730
+ Length of the Savitzky-Golay filter window for smoothing the spectrum.
731
+ Default is 101.
732
+ debug : bool, optional
733
+ If True, print intermediate debug information including initial and final
734
+ frequency estimates. Default is False.
735
+
736
+ Returns
737
+ -------
738
+ float
739
+ Estimated fundamental cardiac frequency in Hz.
740
+
741
+ Notes
742
+ -----
743
+ The function applies a Hamming window to the input signal before spectral analysis.
744
+ It removes spectral components outside the physiological range (defined by `minhr`
745
+ and `maxhr`) and uses Savitzky-Golay smoothing to detect the peak frequency.
746
+
747
+ Examples
748
+ --------
749
+ >>> import numpy as np
750
+ >>> waveform = np.sin(2 * np.pi * 1.2 * np.linspace(0, 10, 1000))
751
+ >>> freq = getcardcoeffs(waveform, slicesamplerate=100)
752
+ >>> print(f"Estimated heart rate: {freq * 60:.2f} BPM")
753
+ """
335
754
  if len(cardiacwaveform) > 1024:
336
755
  thex, they = welch(cardiacwaveform, slicesamplerate, nperseg=1024)
337
756
  else:
@@ -366,10 +785,59 @@ def getcardcoeffs(
366
785
 
367
786
 
368
787
  def _procOneVoxelDetrend(
369
- vox,
370
- voxelargs,
788
+ vox: int,
789
+ voxelargs: tuple,
371
790
  **kwargs,
372
- ):
791
+ ) -> tuple[int, NDArray]:
792
+ """
793
+ Detrend fMRI voxel data for a single voxel.
794
+
795
+ This function applies detrending to fMRI voxel data using the tide_fit.detrend
796
+ function. It supports both linear and polynomial detrending with optional
797
+ mean centering.
798
+
799
+ Parameters
800
+ ----------
801
+ vox : int
802
+ Voxel index identifier.
803
+ voxelargs : tuple
804
+ Tuple containing fMRI voxel data as the first element. Expected format:
805
+ (fmri_voxeldata,)
806
+ **kwargs : dict
807
+ Additional keyword arguments for detrending options:
808
+ - detrendorder : int, optional
809
+ Order of the detrend polynomial (default: 1 for linear detrend)
810
+ - demean : bool, optional
811
+ If True, remove the mean from the data (default: False)
812
+ - debug : bool, optional
813
+ If True, print debug information (default: False)
814
+
815
+ Returns
816
+ -------
817
+ tuple
818
+ A tuple containing:
819
+ - vox : int
820
+ The original voxel index
821
+ - detrended_voxeldata : ndarray
822
+ The detrended fMRI voxel data with the same shape as input
823
+
824
+ Notes
825
+ -----
826
+ This function uses the tide_fit.detrend function internally for the actual
827
+ detrending operation. The detrendorder parameter controls the polynomial order
828
+ of the detrending (0 = mean removal only, 1 = linear detrend, 2 = quadratic detrend, etc.).
829
+
830
+ Examples
831
+ --------
832
+ >>> import numpy as np
833
+ >>> from rapidtide.fit import detrend
834
+ >>> data = np.random.randn(100)
835
+ >>> result = _procOneVoxelDetrend(0, (data,), detrendorder=1, demean=True)
836
+ >>> print(result[0]) # voxel index
837
+ 0
838
+ >>> print(result[1].shape) # detrended data shape
839
+ (100,)
840
+ """
373
841
  # unpack arguments
374
842
  options = {
375
843
  "detrendorder": 1,
@@ -392,34 +860,159 @@ def _procOneVoxelDetrend(
392
860
  )
393
861
 
394
862
 
395
- def _packDetrendvoxeldata(voxnum, voxelargs):
863
+ def _packDetrendvoxeldata(voxnum: int, voxelargs: list) -> list[NDArray]:
864
+ """
865
+ Extract voxel data for a specific voxel number from voxel arguments.
866
+
867
+ Parameters
868
+ ----------
869
+ voxnum : int
870
+ The voxel number to extract data for.
871
+ voxelargs : tuple
872
+ A tuple containing voxel data arrays, where the first element is
873
+ expected to be a 2D array with voxel data indexed by [voxel, feature].
874
+
875
+ Returns
876
+ -------
877
+ list
878
+ A list containing a single element, which is a 1D array of feature
879
+ values for the specified voxel number.
880
+
881
+ Notes
882
+ -----
883
+ This function is designed to extract a single voxel's worth of data
884
+ from a collection of voxel arguments for further processing in
885
+ detrending operations.
886
+
887
+ Examples
888
+ --------
889
+ >>> voxel_data = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
890
+ >>> result = _packDetrendvoxeldata(1, (voxel_data,))
891
+ >>> print(result)
892
+ [[4, 5, 6]]
893
+ """
396
894
  return [(voxelargs[0])[voxnum, :]]
397
895
 
398
896
 
399
- def _unpackDetrendvoxeldata(retvals, voxelproducts):
897
+ def _unpackDetrendvoxeldata(retvals: tuple, voxelproducts: list) -> None:
898
+ """
899
+ Unpack detrend voxel data by assigning values to voxel products array.
900
+
901
+ Parameters
902
+ ----------
903
+ retvals : tuple or list
904
+ Contains two elements where retvals[0] is used as indices and retvals[1]
905
+ contains the values to be assigned.
906
+ voxelproducts : list
907
+ List containing arrays where voxelproducts[0] is the target array that
908
+ will be modified in-place with the assigned values.
909
+
910
+ Returns
911
+ -------
912
+ None
913
+ This function modifies voxelproducts[0] in-place and does not return anything.
914
+
915
+ Notes
916
+ -----
917
+ This function performs an in-place assignment operation where values from
918
+ retvals[1] are placed at the specified indices retvals[0] in the first
919
+ element of voxelproducts list.
920
+
921
+ Examples
922
+ --------
923
+ >>> retvals = ([0, 1, 2], [10, 20, 30])
924
+ >>> voxelproducts = [np.zeros(5)]
925
+ >>> _unpackDetrendvoxeldata(retvals, voxelproducts)
926
+ >>> print(voxelproducts[0])
927
+ [10. 20. 30. 0. 0.]
928
+ """
400
929
  (voxelproducts[0])[retvals[0], :] = retvals[1]
401
930
 
402
931
 
403
932
  def normalizevoxels(
404
- fmri_data,
405
- detrendorder,
406
- validvoxels,
407
- time,
408
- timings,
409
- LGR=None,
410
- mpcode=True,
411
- nprocs=1,
412
- alwaysmultiproc=False,
413
- showprogressbar=True,
414
- chunksize=1000,
415
- debug=False,
416
- ):
933
+ fmri_data: NDArray,
934
+ detrendorder: int,
935
+ validvoxels: NDArray,
936
+ time: object,
937
+ timings: list,
938
+ LGR: object | None = None,
939
+ mpcode: bool = True,
940
+ nprocs: int = 1,
941
+ alwaysmultiproc: bool = False,
942
+ showprogressbar: bool = True,
943
+ chunksize: int = 1000,
944
+ debug: bool = False,
945
+ ) -> tuple[NDArray, NDArray, NDArray, NDArray]:
946
+ """
947
+ Normalize fMRI voxel data by detrending and z-scoring.
948
+
949
+ This function applies detrending to fMRI data and then normalizes the data
950
+ using mean and median-based scaling. It supports both single-threaded and
951
+ multi-threaded processing for detrending.
952
+
953
+ Parameters
954
+ ----------
955
+ fmri_data : NDArray
956
+ 2D array of fMRI data with shape (n_voxels, n_timepoints).
957
+ detrendorder : int
958
+ Order of detrending to apply. If 0, no detrending is performed.
959
+ validvoxels : NDArray
960
+ 1D array of indices indicating which voxels are valid for processing.
961
+ time : object
962
+ Module or object with a `time.time()` method for timing operations.
963
+ timings : list
964
+ List to append timing information about processing steps.
965
+ LGR : object, optional
966
+ Logger object for debugging; default is None.
967
+ mpcode : bool, optional
968
+ If True, use multi-processing for detrending; default is True.
969
+ nprocs : int, optional
970
+ Number of processes to use in multi-processing; default is 1.
971
+ alwaysmultiproc : bool, optional
972
+ If True, always use multi-processing even for small datasets; default is False.
973
+ showprogressbar : bool, optional
974
+ If True, show progress bar during voxel processing; default is True.
975
+ chunksize : int, optional
976
+ Size of chunks for multi-processing; default is 1000.
977
+ debug : bool, optional
978
+ If True, enable debug output; default is False.
979
+
980
+ Returns
981
+ -------
982
+ tuple of NDArray
983
+ A tuple containing:
984
+ - `normdata`: Normalized fMRI data (z-scored).
985
+ - `demeandata`: Detrended and mean-centered data.
986
+ - `means`: Mean values for each voxel.
987
+ - `medians`: Median values for each voxel.
988
+ - `mads`: Median absolute deviation for each voxel.
989
+
990
+ Notes
991
+ -----
992
+ - The function modifies `fmri_data` in-place during detrending.
993
+ - If `detrendorder` is greater than 0, detrending is applied using `tide_fit.detrend`.
994
+ - Multi-processing is used when `mpcode=True` and the number of voxels exceeds a threshold.
995
+ - Timing information is appended to the `timings` list.
996
+
997
+ Examples
998
+ --------
999
+ >>> import numpy as np
1000
+ >>> from tqdm import tqdm
1001
+ >>> fmri_data = np.random.rand(100, 200)
1002
+ >>> validvoxels = np.arange(100)
1003
+ >>> timings = []
1004
+ >>> normdata, demeandata, means, medians, mads = normalizevoxels(
1005
+ ... fmri_data, detrendorder=1, validvoxels=validvoxels,
1006
+ ... time=time, timings=timings
1007
+ ... )
1008
+ """
417
1009
  print("Normalizing voxels...")
418
1010
  normdata = fmri_data * 0.0
419
1011
  demeandata = fmri_data * 0.0
420
1012
  starttime = time.time()
421
1013
  # detrend if we are going to
422
1014
  numspatiallocs = fmri_data.shape[0]
1015
+ # NB: fmri_data is detrended in place
423
1016
  if detrendorder > 0:
424
1017
  print("Detrending to order", detrendorder, "...")
425
1018
  if mpcode:
@@ -484,8 +1077,60 @@ def normalizevoxels(
484
1077
 
485
1078
 
486
1079
  def cleanphysio(
487
- Fs, physiowaveform, cutoff=0.4, thresh=0.2, nyquist=None, iscardiac=True, debug=False
488
- ):
1080
+ Fs: float,
1081
+ physiowaveform: NDArray,
1082
+ cutoff: float = 0.4,
1083
+ thresh: float = 0.2,
1084
+ nyquist: float | None = None,
1085
+ iscardiac: bool = True,
1086
+ debug: bool = False,
1087
+ ) -> tuple[NDArray, NDArray, NDArray, float]:
1088
+ """
1089
+ Apply filtering and normalization to a physiological waveform to extract a cleaned signal and envelope.
1090
+
1091
+ This function performs bandpass filtering on a physiological signal to detect its envelope,
1092
+ then applies high-pass filtering to remove baseline drift. The waveform is normalized using
1093
+ the envelope to produce a cleaned and standardized signal.
1094
+
1095
+ Parameters
1096
+ ----------
1097
+ Fs : float
1098
+ Sampling frequency of the input waveform in Hz.
1099
+ physiowaveform : NDArray
1100
+ Input physiological waveform signal (1D array).
1101
+ cutoff : float, optional
1102
+ Cutoff frequency for envelope detection, by default 0.4.
1103
+ thresh : float, optional
1104
+ Threshold for envelope normalization, by default 0.2.
1105
+ nyquist : float, optional
1106
+ Nyquist frequency to constrain the high-pass filter, by default None.
1107
+ iscardiac : bool, optional
1108
+ Flag indicating if the signal is cardiac; affects filter type, by default True.
1109
+ debug : bool, optional
1110
+ If True, print debug information during processing, by default False.
1111
+
1112
+ Returns
1113
+ -------
1114
+ tuple[NDArray, NDArray, NDArray, float]
1115
+ A tuple containing:
1116
+ - `filtphysiowaveform`: The high-pass filtered waveform.
1117
+ - `normphysio`: The normalized waveform using the envelope.
1118
+ - `envelope`: The detected envelope of the signal.
1119
+ - `envmean`: The mean of the envelope.
1120
+
1121
+ Notes
1122
+ -----
1123
+ - The function uses `tide_filt.NoncausalFilter` for filtering and `tide_math.envdetect` for envelope detection.
1124
+ - The waveform is normalized using median absolute deviation (MAD) normalization.
1125
+ - The envelope is thresholded to avoid very low values during normalization.
1126
+
1127
+ Examples
1128
+ --------
1129
+ >>> import numpy as np
1130
+ >>> Fs = 100.0
1131
+ >>> signal = np.random.randn(1000)
1132
+ >>> filtered, normalized, env, env_mean = cleanphysio(Fs, signal)
1133
+ """
489
1134
  # first bandpass the cardiac signal to calculate the envelope
490
1135
  if debug:
491
1136
  print("Entering cleanphysio")
@@ -528,17 +1173,73 @@ def cleanphysio(
528
1173
 
529
1174
 
530
1175
  def findbadpts(
531
- thewaveform,
532
- nameroot,
533
- outputroot,
534
- samplerate,
535
- infodict,
536
- thetype="mad",
537
- retainthresh=0.89,
538
- mingap=2.0,
539
- outputlevel=0,
540
- debug=True,
541
- ):
1176
+ thewaveform: NDArray,
1177
+ nameroot: str,
1178
+ outputroot: str,
1179
+ samplerate: float,
1180
+ infodict: dict,
1181
+ thetype: str = "mad",
1182
+ retainthresh: float = 0.89,
1183
+ mingap: float = 2.0,
1184
+ outputlevel: int = 0,
1185
+ debug: bool = True,
1186
+ ) -> tuple[NDArray, float | tuple[float, float]]:
1187
+ """
1188
+ Identify bad points in a waveform based on statistical thresholding and gap filling.
1189
+
1190
+ This function detects outliers in a waveform using either the Median Absolute Deviation (MAD)
1191
+ or a fractional value-based method. It then applies gap-filling logic to merge short
1192
+ sequences of bad points into longer ones, based on a minimum gap threshold.
1193
+
1194
+ Parameters
1195
+ ----------
1196
+ thewaveform : NDArray
1197
+ Input waveform data as a 1D numpy array.
1198
+ nameroot : str
1199
+ Root name used for labeling output files and dictionary keys.
1200
+ outputroot : str
1201
+ Root path for writing output files if `outputlevel > 0`.
1202
+ samplerate : float
1203
+ Sampling rate of the waveform in Hz.
1204
+ infodict : dict
1205
+ Dictionary to store metadata about the thresholding method and value.
1206
+ thetype : str, optional
1207
+ Thresholding method to use. Options are:
1208
+ - "mad" (default): Uses Median Absolute Deviation.
1209
+ - "fracval": Uses percentile-based thresholds.
1210
+ retainthresh : float, optional
1211
+ Threshold for retaining data, between 0 and 1. Default is 0.89.
1212
+ mingap : float, optional
1213
+ Minimum gap (in seconds) to consider for merging bad point streaks. Default is 2.0.
1214
+ outputlevel : int, optional
1215
+ Level of output verbosity. If > 0, writes bad point vector to file. Default is 0.
1216
+ debug : bool, optional
1217
+ If True, prints debug information. Default is True.
1218
+
1219
+ Returns
1220
+ -------
1221
+ tuple[NDArray, float | tuple[float, float]]
1222
+ A tuple containing:
1223
+ - `thebadpts`: A 1D numpy array of the same length as `thewaveform`, with 1.0 for bad points and 0.0 for good.
1224
+ - `thresh`: The calculated threshold value(s) used for bad point detection.
1225
+ - If `thetype == "mad"`, `thresh` is a float.
1226
+ - If `thetype == "fracval"`, `thresh` is a tuple of (lower_threshold, upper_threshold).
1227
+
1228
+ Notes
1229
+ -----
1230
+ - The "mad" method uses the median and MAD to compute a sigma-based threshold.
1231
+ - The "fracval" method uses percentiles to define a range and marks values outside
1232
+ that range as bad.
1233
+ - Gap-filling logic merges bad point streaks that are closer than `mingap` seconds.
1234
+
1235
+ Examples
1236
+ --------
1237
+ >>> import numpy as np
1238
+ >>> waveform = np.random.normal(0, 1, 1000)
1239
+ >>> info = {}
1240
+ >>> badpts, threshold = findbadpts(waveform, "test", "/tmp", 100.0, info, thetype="mad")
1241
+ >>> print(f"Threshold used: {threshold}")
1242
+ """
542
1243
  # if thetype == 'triangle' or thetype == 'mad':
543
1244
  if thetype == "mad":
544
1245
  absdev = np.fabs(thewaveform - np.median(thewaveform))
@@ -607,11 +1308,112 @@ def findbadpts(
607
1308
  return thebadpts
608
1309
 
609
1310
 
610
- def approximateentropy(waveform, m, r):
1311
+ def approximateentropy(waveform: NDArray, m: int, r: float) -> float:
1312
+ """
1313
+ Calculate the approximate entropy of a waveform.
1314
+
1315
+ Approximate entropy is a measure of the complexity or irregularity of a time series.
1316
+ It quantifies the likelihood that similar patterns of observations will not be followed
1317
+ by additional similar observations.
1318
+
1319
+ Parameters
1320
+ ----------
1321
+ waveform : array_like
1322
+ Input time series data as a 1D array or list of numerical values.
1323
+ m : int
1324
+ Length of compared run of data. Must be a positive integer.
1325
+ r : float
1326
+ Tolerance parameter. Defines the maximum difference between values to be considered
1327
+ similar. Should be a positive number, typically set to 0.1-0.2 times the standard
1328
+ deviation of the data.
1329
+
1330
+ Returns
1331
+ -------
1332
+ float
1333
+ Approximate entropy value. Lower values indicate more regularity in the data,
1334
+ while higher values indicate more complexity or randomness.
1335
+
1336
+ Notes
1337
+ -----
1338
+ The approximate entropy is calculated using the method described by Pincus (1991).
1339
+ The algorithm computes the logarithm of the ratio of the number of similar patterns
1340
+ of length m to those of length m+1, averaged over all possible patterns.
1341
+
1342
+ This implementation assumes that the input waveform is a 1D array of numerical values.
1343
+ The function is sensitive to the choice of parameters m and r, and results may vary
1344
+ depending on the data characteristics.
1345
+
1346
+ Examples
1347
+ --------
1348
+ >>> import numpy as np
1349
+ >>> waveform = [1, 2, 3, 4, 5, 4, 3, 2, 1]
1350
+ >>> apen = approximateentropy(waveform, m=2, r=0.1)
1351
+ >>> print(apen)
1352
+ 0.123456789
1353
+
1354
+ >>> # For a more complex signal
1355
+ >>> np.random.seed(42)
1356
+ >>> noisy_signal = np.random.randn(100)
1357
+ >>> apen_noisy = approximateentropy(noisy_signal, m=2, r=0.1)
1358
+ >>> print(apen_noisy)
1359
+ 0.456789123
1360
+ """
1361
+
611
1362
  def _maxdist(x_i, x_j):
1363
+ """
1364
+ Calculate the maximum absolute difference between corresponding elements of two sequences.
1365
+
1366
+ Parameters
1367
+ ----------
1368
+ x_i : array-like
1369
+ First sequence of numbers.
1370
+ x_j : array-like
1371
+ Second sequence of numbers.
1372
+
1373
+ Returns
1374
+ -------
1375
+ float
1376
+ The maximum absolute difference between corresponding elements of x_i and x_j.
1377
+
1378
+ Notes
1379
+ -----
1380
+ This function computes the Chebyshev distance (also known as the maximum metric) between two vectors.
1381
+ Both sequences must have the same length, otherwise the function will raise a ValueError.
1382
+
1383
+ Examples
1384
+ --------
1385
+ >>> _maxdist([1, 2, 3], [4, 1, 2])
1386
+ 3
1387
+ >>> _maxdist([0, 0], [1, 1])
1388
+ 1
1389
+ """
612
1390
  return max([abs(ua - va) for ua, va in zip(x_i, x_j)])
613
1391
 
614
1392
  def _phi(m):
1393
+ """
1394
+ Calculate phi value for approximate entropy calculation.
1395
+
1396
+ Parameters
1397
+ ----------
1398
+ m : int
1399
+ Length of template vectors for comparison.
1400
+
1401
+ Returns
1402
+ -------
1403
+ float
1404
+ Phi value representing the approximate entropy.
1405
+
1406
+ Notes
1407
+ -----
1408
+ This function computes the phi value used in approximate entropy calculations.
1409
+ It compares template vectors of length m and calculates the proportion of
1410
+ vectors that are within a tolerance threshold r of each other.
1411
+
1412
+ Examples
1413
+ --------
1414
+ >>> _phi(2)
1415
+ 0.5703489003472879
1416
+ """
615
1417
  x = [[waveform[j] for j in range(i, i + m - 1 + 1)] for i in range(N - m + 1)]
616
1418
  C = [len([1 for x_j in x if _maxdist(x_i, x_j) <= r]) / (N - m + 1.0) for x_i in x]
617
1419
  return (N - m + 1.0) ** (-1) * sum(np.log(C))
@@ -621,7 +1423,51 @@ def approximateentropy(waveform, m, r):
621
1423
  return abs(_phi(m + 1) - _phi(m))
622
1424
 
623
1425
 
624
- def summarizerun(theinfodict, getkeys=False):
1426
+ def summarizerun(theinfodict: dict, getkeys: bool = False) -> str:
1427
+ """
1428
+ Summarize physiological signal quality metrics from a dictionary.
1429
+
1430
+ This function extracts specific signal quality indices from a dictionary
1431
+ containing physiological monitoring data. It can either return the metric
1432
+ values or the corresponding keys depending on the getkeys parameter.
1433
+
1434
+ Parameters
1435
+ ----------
1436
+ theinfodict : dict
1437
+ Dictionary containing physiological signal quality metrics with keys
1438
+ including 'corrcoeff_raw2pleth', 'corrcoeff_filt2pleth', 'E_sqi_mean_pleth',
1439
+ 'E_sqi_mean_bold', 'S_sqi_mean_pleth', 'S_sqi_mean_bold', 'K_sqi_mean_pleth',
1440
+ and 'K_sqi_mean_bold'.
1441
+ getkeys : bool, optional
1442
+ If True, returns a comma-separated string of all metric keys.
1443
+ If False (default), returns a comma-separated string of metric values
1444
+ corresponding to the keys in the dictionary. If a key is missing, an
1445
+ empty string is returned for that position.
1446
+
1447
+ Returns
1448
+ -------
1449
+ str
1450
+ If getkeys=True: comma-separated string of all metric keys.
1451
+ If getkeys=False: comma-separated string of metric values from the dictionary,
1452
+ with empty strings for missing keys.
1453
+
1454
+ Notes
1455
+ -----
1456
+ The function handles missing keys gracefully by returning empty strings
1457
+ for missing metrics rather than raising exceptions.
1458
+
1459
+ Examples
1460
+ --------
1461
+ >>> data = {
1462
+ ... "corrcoeff_raw2pleth": 0.85,
1463
+ ... "E_sqi_mean_pleth": 0.92
1464
+ ... }
1465
+ >>> summarizerun(data)
1466
+ '0.85,,0.92,,,,,'
1467
+
1468
+ >>> summarizerun(data, getkeys=True)
1469
+ 'corrcoeff_raw2pleth,corrcoeff_filt2pleth,E_sqi_mean_pleth,E_sqi_mean_bold,S_sqi_mean_pleth,S_sqi_mean_bold,K_sqi_mean_pleth,K_sqi_mean_bold'
1470
+ """
625
1471
  keylist = [
626
1472
  "corrcoeff_raw2pleth",
627
1473
  "corrcoeff_filt2pleth",
@@ -644,68 +1490,133 @@ def summarizerun(theinfodict, getkeys=False):
644
1490
  return ",".join(outputline)
645
1491
 
646
1492
 
647
- def entropy(waveform):
1493
+ def entropy(waveform: NDArray) -> float:
1494
+ """
1495
+ Calculate the entropy of a waveform.
1496
+
1497
+ Parameters
1498
+ ----------
1499
+ waveform : array-like
1500
+ Input waveform data. Should be a numeric array-like object containing
1501
+ the waveform samples.
1502
+
1503
+ Returns
1504
+ -------
1505
+ float
1506
+ The entropy value of the waveform, computed as -∑(x² * log₂(x²)) where
1507
+ x represents the waveform samples.
1508
+
1509
+ Notes
1510
+ -----
1511
+ This function computes the entropy using the formula -∑(x² * log₂(x²)),
1512
+ where x² represents the squared waveform values. The np.nan_to_num function
1513
+ is used to handle potential NaN values in the logarithm calculation.
1514
+
1515
+ Examples
1516
+ --------
1517
+ >>> import numpy as np
1518
+ >>> waveform = np.array([0.5, 0.5, 0.5, 0.5])
1519
+ >>> entropy(waveform)
1520
+ 0.0
1521
+ """
648
1522
  return -np.sum(np.square(waveform) * np.nan_to_num(np.log2(np.square(waveform))))
649
1523
 
650
1524
 
651
1525
  def calcplethquality(
652
- waveform,
653
- Fs,
654
- infodict,
655
- suffix,
656
- outputroot,
657
- S_windowsecs=5.0,
658
- K_windowsecs=60.0,
659
- E_windowsecs=1.0,
660
- detrendorder=8,
661
- outputlevel=0,
662
- initfile=True,
663
- debug=False,
664
- ):
1526
+ waveform: NDArray,
1527
+ Fs: float,
1528
+ infodict: dict,
1529
+ suffix: str,
1530
+ outputroot: str,
1531
+ S_windowsecs: float = 5.0,
1532
+ K_windowsecs: float = 60.0,
1533
+ E_windowsecs: float = 1.0,
1534
+ detrendorder: int = 8,
1535
+ outputlevel: int = 0,
1536
+ initfile: bool = True,
1537
+ debug: bool = False,
1538
+ ) -> tuple[NDArray, NDArray, NDArray]:
665
1539
  """
1540
+ Calculate windowed skewness, kurtosis, and entropy quality metrics for a plethysmogram.
1541
+
1542
+ This function computes three quality metrics — skewness (S), kurtosis (K), and entropy (E) —
1543
+ over sliding windows of the input waveform. These metrics are used to assess the quality
1544
+ of photoplethysmogram (PPG) signals based on the method described in Elgendi (2016).
666
1545
 
667
1546
  Parameters
668
1547
  ----------
669
- waveform: array-like
670
- The cardiac waveform to be assessed
671
- Fs: float
672
- The sample rate of the data
673
- S_windowsecs: float
674
- Skewness window duration in seconds. Defaults to 5.0 (optimal for discrimination of "good" from "acceptable"
675
- and "unfit" according to Elgendi)
676
- K_windowsecs: float
677
- Skewness window duration in seconds. Defaults to 2.0 (after Selveraj)
678
- E_windowsecs: float
679
- Entropy window duration in seconds. Defaults to 0.5 (after Selveraj)
680
- detrendorder: int
681
- Order of detrending polynomial to apply to plethysmogram.
682
- debug: boolean
683
- Turn on extended output
1548
+ waveform : array-like
1549
+ The cardiac waveform to be assessed.
1550
+ Fs : float
1551
+ The sample rate of the data in Hz.
1552
+ infodict : dict
1553
+ Dictionary to store computed quality metrics.
1554
+ suffix : str
1555
+ Suffix to append to metric keys in `infodict`.
1556
+ outputroot : str
1557
+ Root name for output files if `outputlevel > 1`.
1558
+ S_windowsecs : float, optional
1559
+ Skewness window duration in seconds. Default is 5.0 seconds.
1560
+ K_windowsecs : float, optional
1561
+ Kurtosis window duration in seconds. Default is 60.0 seconds.
1562
+ E_windowsecs : float, optional
1563
+ Entropy window duration in seconds. Default is 1.0 seconds.
1564
+ detrendorder : int, optional
1565
+ Order of the detrending polynomial applied to the plethysmogram. Default is 8.
1566
+ outputlevel : int, optional
1567
+ Level of output verbosity. If > 1, time-series data will be written to files.
1568
+ initfile : bool, optional
1569
+ Whether to initialize output files. Default is True.
1570
+ debug : bool, optional
1571
+ If True, print debug information. Default is False.
684
1572
 
685
1573
  Returns
686
1574
  -------
687
- S_sqi_mean: float
688
- The mean value of the quality index over all time
689
- S_std_mean: float
690
- The standard deviation of the quality index over all time
691
- S_waveform: array
692
- The quality metric over all timepoints
693
- K_sqi_mean: float
694
- The mean value of the quality index over all time
695
- K_std_mean: float
696
- The standard deviation of the quality index over all time
697
- K_waveform: array
698
- The quality metric over all timepoints
699
- E_sqi_mean: float
700
- The mean value of the quality index over all time
701
- E_std_mean: float
702
- The standard deviation of the quality index over all time
703
- E_waveform: array
704
- The quality metric over all timepoints
705
-
706
-
707
- Calculates the windowed skewness, kurtosis, and entropy quality metrics described in Elgendi, M.
708
- "Optimal Signal Quality Index for Photoplethysmogram Signals". Bioengineering 2016, Vol. 3, Page 21 3, 21 (2016).
1575
+ tuple
1576
+ A tuple containing the following elements in order:
1577
+
1578
+ - S_sqi_mean : float
1579
+ Mean value of the skewness quality index over all time.
1580
+ - S_sqi_std : float
1581
+ Standard deviation of the skewness quality index over all time.
1582
+ - S_waveform : array
1583
+ The skewness quality metric over all timepoints.
1584
+ - K_sqi_mean : float
1585
+ Mean value of the kurtosis quality index over all time.
1586
+ - K_sqi_std : float
1587
+ Standard deviation of the kurtosis quality index over all time.
1588
+ - K_waveform : array
1589
+ The kurtosis quality metric over all timepoints.
1590
+ - E_sqi_mean : float
1591
+ Mean value of the entropy quality index over all time.
1592
+ - E_sqi_std : float
1593
+ Standard deviation of the entropy quality index over all time.
1594
+ - E_waveform : array
1595
+ The entropy quality metric over all timepoints.
1596
+
1597
+ Notes
1598
+ -----
1599
+ The function applies a detrending polynomial to the input waveform before computing
1600
+ the quality metrics. Window sizes are rounded to the nearest odd number of samples
1601
+ to ensure symmetric windows.
1602
+
1603
+ References
1604
+ ----------
1605
+ Elgendi, M. "Optimal Signal Quality Index for Photoplethysmogram Signals".
1606
+ Bioengineering 2016, Vol. 3, Page 21 (2016).
1607
+
1608
+ Examples
1609
+ --------
1610
+ >>> import numpy as np
1611
+ >>> from scipy.stats import skew, kurtosis
1612
+ >>> waveform = np.random.randn(1000)
1613
+ >>> Fs = 100.0
1614
+ >>> infodict = {}
1615
+ >>> suffix = "_test"
1616
+ >>> outputroot = "test_output"
1617
+ >>> S_mean, S_std, S_wave, K_mean, K_std, K_wave, E_mean, E_std, E_wave = calcplethquality(
1618
+ ... waveform, Fs, infodict, suffix, outputroot
1619
+ ... )
709
1620
  """
710
1621
  # detrend the waveform
711
1622
  dt_waveform = tide_fit.detrend(waveform, order=detrendorder, demean=True)
@@ -788,21 +1699,95 @@ def calcplethquality(
788
1699
 
789
1700
 
790
1701
  def getphysiofile(
791
- waveformfile,
792
- inputfreq,
793
- inputstart,
794
- slicetimeaxis,
795
- stdfreq,
796
- stdpoints,
797
- envcutoff,
798
- envthresh,
799
- timings,
800
- outputroot,
801
- slop=0.25,
802
- outputlevel=0,
803
- iscardiac=True,
804
- debug=False,
805
- ):
1702
+ waveformfile: str,
1703
+ inputfreq: float,
1704
+ inputstart: float | None,
1705
+ slicetimeaxis: NDArray,
1706
+ stdfreq: float,
1707
+ stdpoints: int,
1708
+ envcutoff: float,
1709
+ envthresh: float,
1710
+ timings: list,
1711
+ outputroot: str,
1712
+ slop: float = 0.25,
1713
+ outputlevel: int = 0,
1714
+ iscardiac: bool = True,
1715
+ debug: bool = False,
1716
+ ) -> tuple[NDArray, NDArray, NDArray, NDArray, NDArray, NDArray]:
1717
+ """
1718
+ Read, process, and resample physiological waveform data.
1719
+
1720
+ This function reads a physiological signal from a text file, filters and normalizes
1721
+ the signal, and resamples it to both slice-specific and standard time resolutions.
1722
+ It supports cardiac and non-cardiac signal processing, with optional debugging and
1723
+ output writing.
1724
+
1725
+ Parameters
1726
+ ----------
1727
+ waveformfile : str
1728
+ Path to the input physiological waveform file.
1729
+ inputfreq : float
1730
+ Sampling frequency of the input waveform. If negative, the frequency is
1731
+ inferred from the file.
1732
+ inputstart : float or None
1733
+ Start time of the input waveform. If None, defaults to 0.0.
1734
+ slicetimeaxis : array_like
1735
+ Time axis corresponding to slice acquisition times.
1736
+ stdfreq : float
1737
+ Standard sampling frequency for resampling.
1738
+ stdpoints : int
1739
+ Number of points for the standard time axis.
1740
+ envcutoff : float
1741
+ Cutoff frequency for envelope filtering.
1742
+ envthresh : float
1743
+ Threshold for envelope normalization.
1744
+ timings : list
1745
+ List to append timing information for logging.
1746
+ outputroot : str
1747
+ Root name for output files.
1748
+ slop : float, optional
1749
+ Tolerance for time alignment check (default is 0.25).
1750
+ outputlevel : int, optional
1751
+ Level of output writing (default is 0).
1752
+ iscardiac : bool, optional
1753
+ Flag indicating if the signal is cardiac (default is True).
1754
+ debug : bool, optional
1755
+ Enable debug printing (default is False).
1756
+
1757
+ Returns
1758
+ -------
1759
+ waveform_sliceres : ndarray
1760
+ Physiological signal resampled to slice time resolution.
1761
+ waveform_stdres : ndarray
1762
+ Physiological signal resampled to standard time resolution.
1763
+ inputfreq : float
1764
+ The actual input sampling frequency used.
1765
+ len(waveform_fullres) : int
1766
+ Length of the original waveform data.
1767
+
1768
+ Notes
1769
+ -----
1770
+ - The function reads the waveform file using `tide_io.readvectorsfromtextfile`.
1771
+ - Signal filtering and normalization are performed using `cleanphysio`.
1772
+ - Resampling is done using `tide_resample.doresample`.
1773
+ - If `iscardiac` is True, raw and cleaned signals are saved to files when `outputlevel > 1`.
1774
+
1775
+ Examples
1776
+ --------
1777
+ >>> waveform_sliceres, waveform_stdres, freq, length = getphysiofile(
1778
+ ... waveformfile="physio.txt",
1779
+ ... inputfreq=100.0,
1780
+ ... inputstart=0.0,
1781
+ ... slicetimeaxis=np.linspace(0, 10, 50),
1782
+ ... stdfreq=25.0,
1783
+ ... stdpoints=100,
1784
+ ... envcutoff=0.5,
1785
+ ... envthresh=0.1,
1786
+ ... timings=[],
1787
+ ... outputroot="output",
1788
+ ... debug=False
1789
+ ... )
1790
+ """
806
1791
  if debug:
807
1792
  print("Entering getphysiofile")
808
1793
  print("Reading physiological signal from file")
@@ -928,7 +1913,62 @@ def getphysiofile(
928
1913
  return waveform_sliceres, waveform_stdres, inputfreq, len(waveform_fullres)
929
1914
 
930
1915
 
931
- def readextmask(thefilename, nim_hdr, xsize, ysize, numslices, debug=False):
1916
+ def readextmask(
1917
+ thefilename: str,
1918
+ nim_hdr: dict,
1919
+ xsize: int,
1920
+ ysize: int,
1921
+ numslices: int,
1922
+ debug: bool = False,
1923
+ ) -> NDArray:
1924
+ """
1925
+ Read and validate external mask from NIfTI file.
1926
+
1927
+ This function reads a mask from a NIfTI file and performs validation checks
1928
+ to ensure compatibility with the input fMRI data dimensions. The mask must
1929
+ have exactly 3 dimensions and match the spatial dimensions of the fMRI data.
1930
+
1931
+ Parameters
1932
+ ----------
1933
+ thefilename : str
1934
+ Path to the NIfTI file containing the mask
1935
+ nim_hdr : dict
1936
+ Header information from the fMRI data
1937
+ xsize : int
1938
+ X dimension size of the fMRI data
1939
+ ysize : int
1940
+ Y dimension size of the fMRI data
1941
+ numslices : int
1942
+ Number of slices in the fMRI data
1943
+ debug : bool, optional
1944
+ If True, print debug information about mask dimensions (default is False)
1945
+
1946
+ Returns
1947
+ -------
1948
+ NDArray
1949
+ The mask data array with shape (xsize, ysize, numslices)
1950
+
1951
+ Raises
1952
+ ------
1953
+ ValueError
1954
+ If mask dimensions do not match fMRI data dimensions or if mask has
1955
+ more than 3 dimensions
1956
+
1957
+ Notes
1958
+ -----
1959
+ The function performs the following validation checks:
1960
+ 1. Reads mask from NIfTI file using tide_io.readfromnifti
1961
+ 2. Parses NIfTI dimensions using tide_io.parseniftidims
1962
+ 3. Validates that mask spatial dimensions match fMRI data dimensions
1963
+ 4. Ensures mask has exactly 3 dimensions (no time dimension allowed)
1964
+
1965
+ Examples
1966
+ --------
1967
+ >>> import numpy as np
1968
+ >>> mask_data = readextmask('mask.nii', fmri_header, 64, 64, 30)
1969
+ >>> print(mask_data.shape)
1970
+ (64, 64, 30)
1971
+ """
932
1972
  (
933
1973
  extmask,
934
1974
  extmask_data,
@@ -953,32 +1993,60 @@ def readextmask(thefilename, nim_hdr, xsize, ysize, numslices, debug=False):
953
1993
  return extmask_data
954
1994
 
955
1995
 
956
- def checkcardmatch(reference, candidate, samplerate, refine=True, zeropadding=0, debug=False):
1996
+ def checkcardmatch(
1997
+ reference: NDArray,
1998
+ candidate: NDArray,
1999
+ samplerate: float,
2000
+ refine: bool = True,
2001
+ zeropadding: int = 0,
2002
+ debug: bool = False,
2003
+ ) -> tuple[float, float, str]:
957
2004
  """
2005
+ Compare two cardiac waveforms using cross-correlation and peak fitting.
2006
+
2007
+ This function performs a cross-correlation between a reference and a candidate
2008
+ cardiac waveform after applying a non-causal cardiac filter. It then fits a
2009
+ Gaussian to the cross-correlation peak to estimate the time delay and
2010
+ correlation strength.
958
2011
 
959
2012
  Parameters
960
2013
  ----------
961
- reference: 1D numpy array
962
- The cardiac waveform to compare to
963
- candidate: 1D numpy array
964
- The cardiac waveform to be assessed
965
- samplerate: float
966
- The sample rate of the data in Hz
967
- refine: bool, optional
968
- Whether to refine the peak fit. Default is True.
969
- zeropadding: int, optional
970
- Specify the length of correlation padding to use.
971
- debug: bool, optional
972
- Output additional information for debugging
2014
+ reference : 1D numpy array
2015
+ The cardiac waveform to compare to.
2016
+ candidate : 1D numpy array
2017
+ The cardiac waveform to be assessed.
2018
+ samplerate : float
2019
+ The sample rate of the data in Hz.
2020
+ refine : bool, optional
2021
+ Whether to refine the peak fit. Default is True.
2022
+ zeropadding : int, optional
2023
+ Specify the length of correlation padding to use. Default is 0.
2024
+ debug : bool, optional
2025
+ Output additional information for debugging. Default is False.
973
2026
 
974
2027
  Returns
975
2028
  -------
976
- maxval: float
977
- The maximum value of the crosscorrelation function
978
- maxdelay: float
2029
+ maxval : float
2030
+ The maximum value of the crosscorrelation function.
2031
+ maxdelay : float
979
2032
  The time, in seconds, where the maximum crosscorrelation occurs.
980
- failreason: flag
981
- Reason why the fit failed (0 if no failure)
2033
+ failreason : int
2034
+ Reason why the fit failed (0 if no failure).
2035
+
2036
+ Notes
2037
+ -----
2038
+ The function applies a cardiac filter to both waveforms before computing
2039
+ the cross-correlation. A Gaussian fit is used to estimate the peak location
2040
+ and strength within a predefined search range of ±2 seconds around the
2041
+ initial peak.
2042
+
2043
+ Examples
2044
+ --------
2045
+ >>> import numpy as np
2046
+ >>> reference = np.sin(2 * np.pi * 1.2 * np.linspace(0, 10, 1000))
2047
+ >>> candidate = np.sin(2 * np.pi * 1.2 * np.linspace(0, 10, 1000) + 0.1)
2048
+ >>> maxval, maxdelay, failreason = checkcardmatch(reference, candidate, 100)
2049
+ >>> print(f"Max correlation: {maxval}, Delay: {maxdelay}s")
982
2050
  """
983
2051
  thecardfilt = tide_filt.NoncausalFilter(filtertype="cardiac")
984
2052
  trimlength = np.min([len(reference), len(candidate)])
@@ -1039,16 +2107,78 @@ def checkcardmatch(reference, candidate, samplerate, refine=True, zeropadding=0,
1039
2107
 
1040
2108
 
1041
2109
  def cardiaccycleaverage(
1042
- sourcephases,
1043
- destinationphases,
1044
- waveform,
1045
- procpoints,
1046
- congridbins,
1047
- gridkernel,
1048
- centric,
1049
- cache=True,
1050
- cyclic=True,
1051
- ):
2110
+ sourcephases: NDArray,
2111
+ destinationphases: NDArray,
2112
+ waveform: NDArray,
2113
+ procpoints: int,
2114
+ congridbins: int,
2115
+ gridkernel: str,
2116
+ centric: bool,
2117
+ cache: bool = True,
2118
+ cyclic: bool = True,
2119
+ ) -> NDArray:
2120
+ """
2121
+ Compute the average waveform over a cardiac cycle using phase-based resampling.
2122
+
2123
+ This function performs phase-resolved averaging of a waveform signal over a
2124
+ cardiac cycle. It uses a resampling technique to map source phase values to
2125
+ destination phases, accumulating weighted contributions to produce an averaged
2126
+ waveform. The result is normalized and adjusted to remove artifacts from low
2127
+ weight regions.
2128
+
2129
+ Parameters
2130
+ ----------
2131
+ sourcephases : array-like
2132
+ Array of source phase values (in radians) corresponding to the waveform data.
2133
+ destinationphases : array-like
2134
+ Array of destination phase values (in radians) where the averaged waveform
2135
+ will be computed.
2136
+ waveform : array-like
2137
+ Array of waveform values to be averaged.
2138
+ procpoints : array-like
2139
+ Array of indices indicating which points in `waveform` and `sourcephases`
2140
+ should be processed.
2141
+ congridbins : int
2142
+ Number of bins used in the resampling process.
2143
+ gridkernel : callable
2144
+ Kernel function used for interpolation during resampling.
2145
+ centric : bool
2146
+ If True, phase values are treated as centric (e.g., centered around 0).
2147
+ If False, phase values are treated as cyclic (e.g., 0 to 2π).
2148
+ cache : bool, optional
2149
+ If True, use cached results for repeated computations (default is True).
2150
+ cyclic : bool, optional
2151
+ If True, treat phase values as cyclic (default is True).
2152
+
2153
+ Returns
2154
+ -------
2155
+ tuple of ndarray
2156
+ A tuple containing:
2157
+ - `rawapp_bypoint`: The normalized averaged waveform values for each
2158
+ destination phase.
2159
+ - `weight_bypoint`: The total weight for each destination phase.
2160
+
2161
+ Notes
2162
+ -----
2163
+ The function applies a threshold to weights: only points with weights greater
2164
+ than 1/50th of the maximum weight are considered valid. These points are then
2165
+ normalized and shifted to start from zero.
2166
+
2167
+ Examples
2168
+ --------
2169
+ >>> import numpy as np
2170
+ >>> sourcephases = np.linspace(0, 2*np.pi, 100)
2171
+ >>> destinationphases = np.linspace(0, 2*np.pi, 50)
2172
+ >>> waveform = np.sin(sourcephases)
2173
+ >>> procpoints = np.arange(100)
2174
+ >>> congridbins = 10
2175
+ >>> gridkernel = lambda x: np.exp(-x**2 / 2)
2176
+ >>> centric = False
2177
+ >>> avg_waveform, weights = cardiaccycleaverage(
2178
+ ... sourcephases, destinationphases, waveform, procpoints,
2179
+ ... congridbins, gridkernel, centric
2180
+ ... )
2181
+ """
1052
2182
  rawapp_bypoint = np.zeros(len(destinationphases), dtype=np.float64)
1053
2183
  weight_bypoint = np.zeros(len(destinationphases), dtype=np.float64)
1054
2184
  for t in procpoints:
@@ -1076,7 +2206,47 @@ def cardiaccycleaverage(
1076
2206
  return rawapp_bypoint, weight_bypoint
1077
2207
 
1078
2208
 
1079
- def circularderivs(timecourse):
2209
+ def circularderivs(timecourse: NDArray) -> tuple[NDArray, float, float]:
2210
+ """
2211
+ Compute circular first derivatives and their extremal values.
2212
+
2213
+ This function calculates the circular first derivative of a time course,
2214
+ which is the difference between consecutive elements with the last element
2215
+ wrapped around to the first. It then returns the maximum and minimum values
2216
+ of these derivatives along with their indices.
2217
+
2218
+ Parameters
2219
+ ----------
2220
+ timecourse : array-like
2221
+ Input time course data as a 1D array or sequence of numerical values.
2222
+
2223
+ Returns
2224
+ -------
2225
+ tuple
2226
+ A tuple containing four elements:
2227
+ - max_derivative : float
2228
+ The maximum value of the circular first derivative
2229
+ - argmax_index : int
2230
+ The index of the maximum derivative value
2231
+ - min_derivative : float
2232
+ The minimum value of the circular first derivative
2233
+ - argmin_index : int
2234
+ The index of the minimum derivative value
2235
+
2236
+ Notes
2237
+ -----
2238
+ The circular first derivative is computed as:
2239
+ ``first_deriv[i] = timecourse[i+1] - timecourse[i]`` for i < n-1,
2240
+ and ``first_deriv[n-1] = timecourse[0] - timecourse[n-1]``.
2241
+
2242
+ Examples
2243
+ --------
2244
+ >>> import numpy as np
2245
+ >>> timecourse = [1, 2, 3, 2, 1]
2246
+ >>> max_val, max_idx, min_val, min_idx = circularderivs(timecourse)
2247
+ >>> print(f"Max derivative: {max_val} at index {max_idx}")
2248
+ >>> print(f"Min derivative: {min_val} at index {min_idx}")
2249
+ """
1080
2250
  firstderiv = np.diff(timecourse, append=[timecourse[0]])
1081
2251
  return (
1082
2252
  np.max(firstderiv),
@@ -1087,6 +2257,79 @@ def circularderivs(timecourse):
1087
2257
 
1088
2258
 
1089
2259
  def _procOnePhaseProject(slice, sliceargs, **kwargs):
2260
+ """
2261
+ Process a single phase project for fMRI data resampling and averaging.
2262
+
2263
+ This function performs temporal resampling of fMRI data along the phase dimension
2264
+ using a congrid-based interpolation scheme. It updates weight, raw application,
2265
+ and cine data arrays based on the resampled values.
2266
+
2267
+ Parameters
2268
+ ----------
2269
+ slice : int
2270
+ The slice index to process.
2271
+ sliceargs : tuple
2272
+ A tuple containing the following elements:
2273
+ - validlocslist : list of arrays
2274
+ List of valid location indices for each slice.
2275
+ - proctrs : array-like
2276
+ Time indices to process.
2277
+ - demeandata_byslice : ndarray
2278
+ Demeaned fMRI data organized by slice and time.
2279
+ - fmri_data_byslice : ndarray
2280
+ Raw fMRI data organized by slice and time.
2281
+ - outphases : array-like
2282
+ Output phase values for resampling.
2283
+ - cardphasevals : ndarray
2284
+ Cardinality of phase values for each slice and time.
2285
+ - congridbins : int
2286
+ Number of bins for congrid interpolation.
2287
+ - gridkernel : str
2288
+ Interpolation kernel to use.
2289
+ - weights_byslice : ndarray
2290
+ Weight array to be updated.
2291
+ - cine_byslice : ndarray
2292
+ Cine data array to be updated.
2293
+ - destpoints : int
2294
+ Number of destination points.
2295
+ - rawapp_byslice : ndarray
2296
+ Raw application data array to be updated.
2297
+ **kwargs : dict
2298
+ Additional options to override default settings:
2299
+ - cache : bool, optional
2300
+ Whether to use caching in congrid (default: True).
2301
+ - debug : bool, optional
2302
+ Whether to enable debug mode (default: False).
2303
+
2304
+ Returns
2305
+ -------
2306
+ tuple
2307
+ A tuple containing:
2308
+ - slice : int
2309
+ The input slice index.
2310
+ - rawapp_byslice : ndarray
2311
+ Updated raw application data for the slice.
2312
+ - cine_byslice : ndarray
2313
+ Updated cine data for the slice.
2314
+ - weights_byslice : ndarray
2315
+ Updated weights for the slice.
2316
+ - validlocs : array-like
2317
+ Valid location indices for the slice.
2318
+
2319
+ Notes
2320
+ -----
2321
+ This function modifies the input arrays `weights_byslice`, `rawapp_byslice`,
2322
+ and `cine_byslice` in-place. The function assumes that the data has already
2323
+ been preprocessed and organized into slices and time points.
2324
+
2325
+ Examples
2326
+ --------
2327
+ >>> slice_idx = 0
2328
+ >>> args = (validlocslist, proctrs, demeandata_byslice, fmri_data_byslice,
2329
+ ... outphases, cardphasevals, congridbins, gridkernel,
2330
+ ... weights_byslice, cine_byslice, destpoints, rawapp_byslice)
2331
+ >>> result = _procOnePhaseProject(slice_idx, args, cache=False)
2332
+ """
1090
2333
  options = {
1091
2334
  "cache": True,
1092
2335
  "debug": False,
@@ -1150,6 +2393,34 @@ def _procOnePhaseProject(slice, sliceargs, **kwargs):
1150
2393
 
1151
2394
 
1152
2395
  def _packslicedataPhaseProject(slicenum, sliceargs):
2396
+ """
2397
+ Pack slice data for phase projection.
2398
+
2399
+ This function takes a slice number and slice arguments, then returns a
2400
+ flattened list containing all the slice arguments in order.
2401
+
2402
+ Parameters
2403
+ ----------
2404
+ slicenum : int
2405
+ The slice number identifier.
2406
+ sliceargs : list or tuple
2407
+ Collection of slice arguments to be packed into a flat list.
2408
+
2409
+ Returns
2410
+ -------
2411
+ list
2412
+ A list containing all elements from sliceargs in the same order.
2413
+
2414
+ Notes
2415
+ -----
2416
+ This function essentially performs a flattening operation on the slice
2417
+ arguments, converting them into a fixed-length list format.
2418
+
2419
+ Examples
2420
+ --------
2421
+ >>> _packslicedataPhaseProject(0, [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12])
2422
+ [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]
2423
+ """
1153
2424
  return [
1154
2425
  sliceargs[0],
1155
2426
  sliceargs[1],
@@ -1167,12 +2438,97 @@ def _packslicedataPhaseProject(slicenum, sliceargs):
1167
2438
 
1168
2439
 
1169
2440
  def _unpackslicedataPhaseProject(retvals, voxelproducts):
2441
+ """
2442
+ Unpack slice data for phase project operation.
2443
+
2444
+ This function assigns sliced data from retvals to corresponding voxelproducts
2445
+ based on index mappings. It performs three simultaneous assignments using
2446
+ slicing operations on 3D arrays.
2447
+
2448
+ Parameters
2449
+ ----------
2450
+ retvals : tuple of array-like
2451
+ A tuple containing 5 elements where:
2452
+ - retvals[0], retvals[1], retvals[2], retvals[3], retvals[4]
2453
+ - retvals[4] is used as row index for slicing
2454
+ - retvals[0] is used as column index for slicing
2455
+ voxelproducts : list of array-like
2456
+ A list of 3 arrays that will be modified in-place with the sliced data.
2457
+ Each array is expected to be 3D and will be indexed using retvals[4] and retvals[0].
2458
+
2459
+ Returns
2460
+ -------
2461
+ None
2462
+ This function modifies voxelproducts in-place and does not return any value.
2463
+
2464
+ Notes
2465
+ -----
2466
+ The function performs three assignments:
2467
+ 1. voxelproducts[0][retvals[4], retvals[0], :] = retvals[1][retvals[4], :]
2468
+ 2. voxelproducts[1][retvals[4], retvals[0], :] = retvals[2][retvals[4], :]
2469
+ 3. voxelproducts[2][retvals[4], retvals[0], :] = retvals[3][retvals[4], :]
2470
+
2471
+ All arrays must be compatible for the specified slicing operations.
2472
+
2473
+ Examples
2474
+ --------
2475
+ >>> retvals = (np.array([0, 1]), np.array([[1, 2], [3, 4]]),
2476
+ ... np.array([[5, 6], [7, 8]]), np.array([[9, 10], [11, 12]]),
2477
+ ... np.array([0, 1]))
2478
+ >>> voxelproducts = [np.zeros((2, 2, 2)), np.zeros((2, 2, 2)), np.zeros((2, 2, 2))]
2479
+ >>> _unpackslicedataPhaseProject(retvals, voxelproducts)
2480
+ """
1170
2481
  (voxelproducts[0])[retvals[4], retvals[0], :] = (retvals[1])[retvals[4], :]
1171
2482
  (voxelproducts[1])[retvals[4], retvals[0], :] = (retvals[2])[retvals[4], :]
1172
2483
  (voxelproducts[2])[retvals[4], retvals[0], :] = (retvals[3])[retvals[4], :]
1173
2484
 
1174
2485
 
1175
- def preloadcongrid(outphases, congridbins, gridkernel="kaiser", cyclic=True, debug=False):
2486
+ def preloadcongrid(
2487
+ outphases: NDArray,
2488
+ congridbins: int,
2489
+ gridkernel: str = "kaiser",
2490
+ cyclic: bool = True,
2491
+ debug: bool = False,
2492
+ ) -> None:
2493
+ """
2494
+ Preload congrid interpolation cache for efficient subsequent calls.
2495
+
2496
+ This function preloads the congrid interpolation cache by performing a series
2497
+ of interpolation operations with different phase values. This avoids the
2498
+ computational overhead of cache initialization during subsequent calls to
2499
+ tide_resample.congrid with the same parameters.
2500
+
2501
+ Parameters
2502
+ ----------
2503
+ outphases : array-like
2504
+ Output phase values for the interpolation grid.
2505
+ congridbins : array-like
2506
+ Binning parameters for the congrid interpolation.
2507
+ gridkernel : str, optional
2508
+ Interpolation kernel to use. Default is "kaiser".
2509
+ cyclic : bool, optional
2510
+ Whether to treat the data as cyclic. Default is True.
2511
+ debug : bool, optional
2512
+ Enable debug output. Default is False.
2513
+
2514
+ Returns
2515
+ -------
2516
+ None
2517
+ This function does not return any value.
2518
+
2519
+ Notes
2520
+ -----
2521
+ This function is designed to improve performance when calling tide_resample.congrid
2522
+ multiple times with the same parameters. By preloading the cache with various
2523
+ phase values, subsequent calls will be faster as the cache is already populated.
2524
+
2525
+ Examples
2526
+ --------
2527
+ >>> import numpy as np
2528
+ >>> outphases = np.linspace(0, 2*np.pi, 100)
2529
+ >>> congridbins = [10, 20]
2530
+ >>> preloadcongrid(outphases, congridbins, gridkernel="kaiser", cyclic=True)
2531
+ """
1176
2532
  outphasestep = outphases[1] - outphases[0]
1177
2533
  outphasecenter = outphases[int(len(outphases) / 2)]
1178
2534
  fillargs = outphasestep * (
@@ -1212,6 +2568,88 @@ def phaseprojectpass(
1212
2568
  cache=True,
1213
2569
  debug=False,
1214
2570
  ):
2571
+ """
2572
+ Perform phase-encoding projection for fMRI data across slices.
2573
+
2574
+ This function projects fMRI data onto a set of phase values using congrid
2575
+ resampling, accumulating results in `rawapp_byslice` and `cine_byslice` arrays.
2576
+ It supports both single-threaded and multi-processed execution.
2577
+
2578
+ Parameters
2579
+ ----------
2580
+ numslices : int
2581
+ Number of slices to process.
2582
+ demeandata_byslice : ndarray
2583
+ Demeaned fMRI data, shape (nvoxels, nslices, ntr).
2584
+ fmri_data_byslice : ndarray
2585
+ Raw fMRI data, shape (nvoxels, nslices, ntr).
2586
+ validlocslist : list of ndarray
2587
+ List of valid voxel indices for each slice.
2588
+ proctrs : ndarray
2589
+ Timepoints to process.
2590
+ weights_byslice : ndarray
2591
+ Weight array, shape (nvoxels, nslices, ndestpoints).
2592
+ cine_byslice : ndarray
2593
+ Cine data array, shape (nvoxels, nslices, ndestpoints).
2594
+ rawapp_byslice : ndarray
2595
+ Raw application data array, shape (nvoxels, nslices, ndestpoints).
2596
+ outphases : ndarray
2597
+ Output phase values.
2598
+ cardphasevals : ndarray
2599
+ Cardinal phase values for each slice and timepoint, shape (nslices, ntr).
2600
+ congridbins : int
2601
+ Number of bins for congrid resampling.
2602
+ gridkernel : str
2603
+ Kernel to use for congrid resampling.
2604
+ destpoints : int
2605
+ Number of destination points.
2606
+ mpcode : bool, optional
2607
+ If True, use multiprocessing. Default is False.
2608
+ nprocs : int, optional
2609
+ Number of processes to use if `mpcode` is True. Default is 1.
2610
+ alwaysmultiproc : bool, optional
2611
+ If True, always use multiprocessing even for small datasets. Default is False.
2612
+ showprogressbar : bool, optional
2613
+ If True, show progress bar. Default is True.
2614
+ cache : bool, optional
2615
+ If True, enable caching for congrid. Default is True.
2616
+ debug : bool, optional
2617
+ If True, enable debug output. Default is False.
2618
+
2619
+ Returns
2620
+ -------
2621
+ None
2622
+ The function modifies `weights_byslice`, `cine_byslice`, and `rawapp_byslice` in-place.
2623
+
2624
+ Notes
2625
+ -----
2626
+ This function is typically used in the context of phase-encoded fMRI analysis.
2627
+ It applies a congrid-based resampling technique to project data onto a specified
2628
+ phase grid, accumulating weighted contributions in the output arrays.
2629
+
2630
+ Examples
2631
+ --------
2632
+ >>> phaseprojectpass(
2633
+ ... numslices=10,
2634
+ ... demeandata_byslice=demean_data,
2635
+ ... fmri_data_byslice=fmri_data,
2636
+ ... validlocslist=valid_locs_list,
2637
+ ... proctrs=tr_list,
2638
+ ... weights_byslice=weights,
2639
+ ... cine_byslice=cine_data,
2640
+ ... rawapp_byslice=rawapp_data,
2641
+ ... outphases=phase_vals,
2642
+ ... cardphasevals=card_phase_vals,
2643
+ ... congridbins=100,
2644
+ ... gridkernel='gaussian',
2645
+ ... destpoints=50,
2646
+ ... mpcode=False,
2647
+ ... nprocs=4,
2648
+ ... showprogressbar=True,
2649
+ ... cache=True,
2650
+ ... debug=False,
2651
+ ... )
2652
+ """
1215
2653
  if mpcode:
1216
2654
  inputshape = rawapp_byslice.shape
1217
2655
  sliceargs = [
@@ -1294,6 +2732,60 @@ def phaseprojectpass(
1294
2732
 
1295
2733
 
1296
2734
  def _procOneSliceSmoothing(slice, sliceargs, **kwargs):
2735
+ """
2736
+ Apply smoothing filter to a single slice of projected data along time dimension.
2737
+
2738
+ This function processes a single slice of data by applying a smoothing filter
2739
+ to the raw application data and computing circular derivatives for the
2740
+ specified slice. The smoothing is applied only to valid locations within the slice.
2741
+
2742
+ Parameters
2743
+ ----------
2744
+ slice : int
2745
+ The slice index to process.
2746
+ sliceargs : tuple
2747
+ A tuple containing the following elements:
2748
+
2749
+ - validlocslist : list of arrays
2750
+ List of arrays containing valid location indices for each slice
2751
+ - rawapp_byslice : ndarray
2752
+ Array containing raw application data by slice [locations, slices, time_points]
2753
+ - appsmoothingfilter : object
2754
+ Smoothing filter object with an apply method
2755
+ - phaseFs : array-like
2756
+ Frequency values for smoothing filter application
2757
+ - derivatives_byslice : ndarray
2758
+ Array to store computed derivatives [locations, slices, time_points]
2759
+ **kwargs : dict
2760
+ Additional keyword arguments:
2761
+ - debug : bool, optional
2762
+ Enable debug mode (default: False)
2763
+
2764
+ Returns
2765
+ -------
2766
+ tuple
2767
+ A tuple containing:
2768
+
2769
+ - slice : int
2770
+ The input slice index
2771
+ - rawapp_byslice : ndarray
2772
+ Smoothed raw application data for the specified slice [locations, time_points]
2773
+ - derivatives_byslice : ndarray
2774
+ Computed circular derivatives for the specified slice [locations, time_points]
2775
+
2776
+ Notes
2777
+ -----
2778
+ - The function only processes slices with valid locations (len(validlocs) > 0)
2779
+ - Smoothing is applied using the provided smoothing filter's apply method
2780
+ - Circular derivatives are computed using the `circularderivs` function
2781
+ - The function modifies the input arrays in-place
2782
+
2783
+ Examples
2784
+ --------
2785
+ >>> slice_idx = 5
2786
+ >>> sliceargs = (validlocslist, rawapp_byslice, appsmoothingfilter, phaseFs, derivatives_byslice)
2787
+ >>> result = _procOneSliceSmoothing(slice_idx, sliceargs, debug=True)
2788
+ """
1297
2789
  options = {
1298
2790
  "debug": False,
1299
2791
  }
@@ -1312,6 +2804,31 @@ def _procOneSliceSmoothing(slice, sliceargs, **kwargs):
1312
2804
 
1313
2805
 
1314
2806
  def _packslicedataSliceSmoothing(slicenum, sliceargs):
2807
+ """Pack slice data for slice smoothing operation.
2808
+
2809
+ Parameters
2810
+ ----------
2811
+ slicenum : int
2812
+ The slice number identifier.
2813
+ sliceargs : list
2814
+ List containing slice arguments with at least 5 elements.
2815
+
2816
+ Returns
2817
+ -------
2818
+ list
2819
+ A list containing the first 5 elements from sliceargs in the same order.
2820
+
2821
+ Notes
2822
+ -----
2823
+ This function extracts the first five elements from the sliceargs parameter
2824
+ and returns them as a new list. It's typically used as part of a slice
2825
+ smoothing pipeline where slice arguments need to be packed for further processing.
2826
+
2827
+ Examples
2828
+ --------
2829
+ >>> _packslicedataSliceSmoothing(1, [10, 20, 30, 40, 50, 60])
2830
+ [10, 20, 30, 40, 50]
2831
+ """
1315
2832
  return [
1316
2833
  sliceargs[0],
1317
2834
  sliceargs[1],
@@ -1322,6 +2839,49 @@ def _packslicedataSliceSmoothing(slicenum, sliceargs):
1322
2839
 
1323
2840
 
1324
2841
  def _unpackslicedataSliceSmoothing(retvals, voxelproducts):
2842
+ """
2843
+ Unpack slice data for smoothing operation.
2844
+
2845
+ This function assigns smoothed slice data back to the voxel products array
2846
+ based on the provided retvals structure.
2847
+
2848
+ Parameters
2849
+ ----------
2850
+ retvals : tuple of array-like
2851
+ A tuple containing:
2852
+ - retvals[0] : array-like
2853
+ Index array for slice selection
2854
+ - retvals[1] : array-like
2855
+ First set of smoothed data to assign
2856
+ - retvals[2] : array-like
2857
+ Second set of smoothed data to assign
2858
+ voxelproducts : list of array-like
2859
+ A list containing two array-like objects where:
2860
+ - voxelproducts[0] : array-like
2861
+ First voxel product array to be modified
2862
+ - voxelproducts[1] : array-like
2863
+ Second voxel product array to be modified
2864
+
2865
+ Returns
2866
+ -------
2867
+ None
2868
+ This function modifies the voxelproducts arrays in-place and does not return anything.
2869
+
2870
+ Notes
2871
+ -----
2872
+ The function performs in-place assignment operations on the voxelproducts arrays.
2873
+ The first dimension of voxelproducts arrays is modified using retvals[0] as indices,
2874
+ while the second and third dimensions are directly assigned from retvals[1] and retvals[2].
2875
+
2876
+ Examples
2877
+ --------
2878
+ >>> import numpy as np
2879
+ >>> retvals = (np.array([0, 1, 2]), np.array([[1, 2], [3, 4], [5, 6]]), np.array([[7, 8], [9, 10], [11, 12]]))
2880
+ >>> voxelproducts = [np.zeros((3, 3, 2)), np.zeros((3, 3, 2))]
2881
+ >>> _unpackslicedataSliceSmoothing(retvals, voxelproducts)
2882
+ >>> print(voxelproducts[0])
2883
+ >>> print(voxelproducts[1])
2884
+ """
1325
2885
  (voxelproducts[0])[:, retvals[0], :] = retvals[1]
1326
2886
  (voxelproducts[1])[:, retvals[0], :] = retvals[2]
1327
2887
 
@@ -1338,6 +2898,58 @@ def tcsmoothingpass(
1338
2898
  showprogressbar=True,
1339
2899
  debug=False,
1340
2900
  ):
2901
+ """
2902
+ Apply smoothing to time course data across slices using multiprocessing.
2903
+
2904
+ This function performs smoothing operations on time course data organized by slices,
2905
+ utilizing multiprocessing for improved performance when processing large datasets.
2906
+
2907
+ Parameters
2908
+ ----------
2909
+ numslices : int
2910
+ Number of slices in the dataset
2911
+ validlocslist : list
2912
+ List of valid locations for processing
2913
+ rawapp_byslice : NDArray
2914
+ Raw application data organized by slice
2915
+ appsmoothingfilter : NDArray
2916
+ Smoothing filter to be applied
2917
+ phaseFs : float
2918
+ Phase frequency parameter for smoothing operations
2919
+ derivatives_byslice : NDArray
2920
+ Derivative data organized by slice
2921
+ nprocs : int, optional
2922
+ Number of processors to use for multiprocessing (default is 1)
2923
+ alwaysmultiproc : bool, optional
2924
+ Whether to always use multiprocessing regardless of data size (default is False)
2925
+ showprogressbar : bool, optional
2926
+ Whether to display progress bar during processing (default is True)
2927
+ debug : bool, optional
2928
+ Enable debug mode for additional logging (default is False)
2929
+
2930
+ Returns
2931
+ -------
2932
+ NDArray
2933
+ Processed data after smoothing operations have been applied
2934
+
2935
+ Notes
2936
+ -----
2937
+ This function uses the `tide_genericmultiproc.run_multiproc` utility to distribute
2938
+ the smoothing workload across multiple processors. The function handles data organization
2939
+ and processing for each slice individually, then combines results.
2940
+
2941
+ Examples
2942
+ --------
2943
+ >>> result = tcsmoothingpass(
2944
+ ... numslices=10,
2945
+ ... validlocslist=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9],
2946
+ ... rawapp_byslice=raw_data,
2947
+ ... appsmoothingfilter=smoothing_filter,
2948
+ ... phaseFs=100.0,
2949
+ ... derivatives_byslice=derivatives,
2950
+ ... nprocs=4
2951
+ ... )
2952
+ """
1341
2953
  inputshape = rawapp_byslice.shape
1342
2954
  sliceargs = [validlocslist, rawapp_byslice, appsmoothingfilter, phaseFs, derivatives_byslice]
1343
2955
  slicefunc = _procOneSliceSmoothing
@@ -1394,6 +3006,97 @@ def phaseproject(
1394
3006
  thealiasedcorrx,
1395
3007
  theAliasedCorrelator,
1396
3008
  ):
3009
+ """
3010
+ Perform phase projection and related processing on fMRI data across slices.
3011
+
3012
+ This function performs phase projection on fMRI data, optionally smoothing
3013
+ timecourses, and applying flips based on derivative information. It also
3014
+ computes wavelet-based correlation measures and updates relevant arrays
3015
+ in-place for further processing.
3016
+
3017
+ Parameters
3018
+ ----------
3019
+ input_data : object
3020
+ Input fMRI data container with `getdims()` and `byslice()` methods.
3021
+ demeandata_byslice : array_like
3022
+ Demeaned fMRI data by slice.
3023
+ means_byslice : array_like
3024
+ Mean values by slice for normalization.
3025
+ rawapp_byslice : array_like
3026
+ Raw APP (Arterial Spin Labeling) data by slice.
3027
+ app_byslice : array_like
3028
+ APP data after initial processing.
3029
+ normapp_byslice : array_like
3030
+ Normalized APP data.
3031
+ weights_byslice : array_like
3032
+ Weights by slice for processing.
3033
+ cine_byslice : array_like
3034
+ Cine data by slice.
3035
+ projmask_byslice : array_like
3036
+ Projection mask by slice.
3037
+ derivatives_byslice : array_like
3038
+ Derivative data by slice, used for determining flips.
3039
+ proctrs : array_like
3040
+ Processing timepoints or transformation parameters.
3041
+ thispass : int
3042
+ Current processing pass number.
3043
+ args : argparse.Namespace
3044
+ Command-line arguments controlling processing behavior.
3045
+ sliceoffsets : array_like
3046
+ Slice offset values.
3047
+ cardphasevals : array_like
3048
+ Cardiac phase values.
3049
+ outphases : array_like
3050
+ Output phases.
3051
+ appsmoothingfilter : array_like
3052
+ Smoothing filter for timecourses.
3053
+ phaseFs : float
3054
+ Sampling frequency for phase processing.
3055
+ thecorrfunc_byslice : array_like
3056
+ Correlation function by slice.
3057
+ waveamp_byslice : array_like
3058
+ Wave amplitude by slice.
3059
+ wavedelay_byslice : array_like
3060
+ Wave delay by slice.
3061
+ wavedelayCOM_byslice : array_like
3062
+ Center of mass of wave delay by slice.
3063
+ corrected_rawapp_byslice : array_like
3064
+ Corrected raw APP data by slice.
3065
+ corrstartloc : int
3066
+ Start location for correlation computation.
3067
+ correndloc : int
3068
+ End location for correlation computation.
3069
+ thealiasedcorrx : array_like
3070
+ Aliased correlation x-axis values.
3071
+ theAliasedCorrelator : object
3072
+ Correlator object for aliased correlation computation.
3073
+
3074
+ Returns
3075
+ -------
3076
+ appflips_byslice : array_like
3077
+ Flip values applied to the APP data by slice.
3078
+
3079
+ Notes
3080
+ -----
3081
+ - The function modifies several input arrays in-place.
3082
+ - If `args.smoothapp` is True, smoothing is applied to the raw APP data.
3083
+ - If `args.fliparteries` is True, flips are applied to correct arterial
3084
+ orientation.
3085
+ - If `args.doaliasedcorrelation` is True, aliased correlation is computed
3086
+ and stored in `thecorrfunc_byslice`.
3087
+
3088
+ Examples
3089
+ --------
3090
+ >>> phaseproject(
3091
+ ... input_data, demeandata_byslice, means_byslice, rawapp_byslice,
3092
+ ... app_byslice, normapp_byslice, weights_byslice, cine_byslice,
3093
+ ... projmask_byslice, derivatives_byslice, proctrs, thispass, args,
3094
+ ... sliceoffsets, cardphasevals, outphases, appsmoothingfilter,
3095
+ ... phaseFs, thecorrfunc_byslice, waveamp_byslice, wavedelay_byslice,
3096
+ ... wavedelayCOM_byslice, corrected_rawapp_byslice, corrstartloc,
3097
+ ... correndloc, thealiasedcorrx, theAliasedCorrelator
3098
+ ... )
3099
+ """
1397
3100
  xsize, ysize, numslices, timepoints = input_data.getdims()
1398
3101
  fmri_data_byslice = input_data.byslice()
1399
3102
 
@@ -1521,6 +3224,67 @@ def findvessels(
1521
3224
  outputlevel,
1522
3225
  debug=False,
1523
3226
  ):
3227
+ """
3228
+ Find vessel thresholds and generate vessel masks from app data.
3229
+
3230
+ This function processes app data to identify vessel thresholds and optionally
3231
+ generates histograms for visualization. It handles both normalized and
3232
+ unnormalized vessel maps based on the input parameters.
3233
+
3234
+ Parameters
3235
+ ----------
3236
+ app : NDArray
3237
+ Raw app data array
3238
+ normapp : NDArray
3239
+ Normalized app data array
3240
+ validlocs : NDArray
3241
+ Array of valid locations for processing
3242
+ numspatiallocs : int
3243
+ Number of spatial locations
3244
+ outputroot : str
3245
+ Root directory path for output files
3246
+ unnormvesselmap : bool
3247
+ Flag indicating whether to use unnormalized vessel map
3248
+ destpoints : int
3249
+ Number of destination points
3250
+ softvesselfrac : float
3251
+ Fractional multiplier for soft vessel threshold
3252
+ histlen : int
3253
+ Length of histogram bins
3254
+ outputlevel : int
3255
+ Level of output generation (0 = no histogram, 1 = histogram only)
3256
+ debug : bool, optional
3257
+ Debug flag for additional logging (default is False)
3258
+
3259
+ Returns
3260
+ -------
3261
+ tuple
3262
+ Tuple containing (hardvesselthresh, softvesselthresh) threshold values
3263
+
3264
+ Notes
3265
+ -----
3266
+ The function performs the following steps:
3267
+ 1. Reshapes app data based on unnormvesselmap flag
3268
+ 2. Extracts valid locations from the reshaped data
3269
+ 3. Generates histogram if outputlevel > 0
3270
+ 4. Calculates hard and soft vessel thresholds based on 98th percentile
3271
+ 5. Prints threshold values to console
3272
+
3273
+ Examples
3274
+ --------
3275
+ >>> hard_thresh, soft_thresh = findvessels(
3276
+ ... app=app_data,
3277
+ ... normapp=norm_app_data,
3278
+ ... validlocs=valid_indices,
3279
+ ... numspatiallocs=100,
3280
+ ... outputroot='/path/to/output',
3281
+ ... unnormvesselmap=True,
3282
+ ... destpoints=50,
3283
+ ... softvesselfrac=0.5,
3284
+ ... histlen=100,
3285
+ ... outputlevel=1
3286
+ ... )
3287
+ """
1524
3288
  if unnormvesselmap:
1525
3289
  app2d = app.reshape((numspatiallocs, destpoints))
1526
3290
  else:
@@ -1548,6 +3312,44 @@ def findvessels(
1548
3312
 
1549
3313
 
1550
3314
  def upsampleimage(input_data, numsteps, sliceoffsets, slicesamplerate, outputroot):
3315
+ """
3316
+ Upsample fMRI data along the temporal and slice dimensions.
3317
+
3318
+ This function takes fMRI data and upsamples it by a factor of `numsteps` along
3319
+ the temporal dimension, and interpolates across slices to align with specified
3320
+ slice offsets. The resulting upsampled data is saved as a NIfTI file.
3321
+
3322
+ Parameters
3323
+ ----------
3324
+ input_data : object
3325
+ Input fMRI data object with attributes: `byvol()`, `timepoints`, `xsize`,
3326
+ `ysize`, `numslices`, and `copyheader()`.
3327
+ numsteps : int
3328
+ Upsampling factor along the temporal dimension.
3329
+ sliceoffsets : array-like of int
3330
+ Slice offset indices indicating where each slice's data should be placed
3331
+ in the upsampled volume.
3332
+ slicesamplerate : float
3333
+ Sampling rate of the slice acquisition (used to set the TR in the output header).
3334
+ outputroot : str
3335
+ Root name for the output NIfTI file (will be suffixed with "_upsampled").
3336
+
3337
+ Returns
3338
+ -------
3339
+ None
3340
+ The function saves the upsampled data to a NIfTI file and does not return any value.
3341
+
3342
+ Notes
3343
+ -----
3344
+ - The function demeanes the input data before upsampling.
3345
+ - Interpolation is performed along the slice direction using linear interpolation.
3346
+ - The output file is saved using `tide_io.savetonifti`.
3347
+
3348
+ Examples
3349
+ --------
3350
+ >>> upsampleimage(fmri_data, numsteps=2, sliceoffsets=[0, 1], slicesamplerate=2.0, outputroot='output')
3351
+ Upsamples the fMRI data by a factor of 2 and saves to 'output_upsampled.nii'.
3352
+ """
1551
3353
  fmri_data = input_data.byvol()
1552
3354
  timepoints = input_data.timepoints
1553
3355
  xsize = input_data.xsize
@@ -1609,6 +3411,78 @@ def wrightmap(
1609
3411
  verbose=False,
1610
3412
  debug=False,
1611
3413
  ):
3414
+ """
3415
+ Compute a vessel map using Wright's method by performing phase correlation
3416
+ analysis across randomized subsets of timecourses.
3417
+
3418
+ This function implements Wright's method for estimating vessel maps by
3419
+ splitting the timecourse data into two random halves, projecting each half
3420
+ separately, and computing the Pearson correlation between the resulting
3421
+ projections for each voxel and slice. The final map is derived as the mean
3422
+ of these correlations across iterations.
3423
+
3424
+ Parameters
3425
+ ----------
3426
+ input_data : object
3427
+ Input data container with attributes `xsize`, `ysize`, and `numslices`.
3428
+ demeandata_byslice : array_like
3429
+ Demeaned data organized by slice, shape ``(nvoxels, numslices)``.
3430
+ rawapp_byslice : array_like
3431
+ Raw application data by slice, shape ``(nvoxels, numslices)``.
3432
+ projmask_byslice : array_like
3433
+ Projection mask by slice, shape ``(nvoxels, numslices)``.
3434
+ outphases : array_like
3435
+ Output phases, shape ``(nphases,)``.
3436
+ cardphasevals : array_like
3437
+ Cardinal phase values, shape ``(nphases,)``.
3438
+ proctrs : array_like
3439
+ Timecourse indices to be processed, shape ``(ntimepoints,)``.
3440
+ congridbins : array_like
3441
+ Binning information for congrid interpolation.
3442
+ gridkernel : array_like
3443
+ Kernel for grid interpolation.
3444
+ destpoints : array_like
3445
+ Destination points for projection.
3446
+ iterations : int, optional
3447
+ Number of iterations for random splitting (default is 100).
3448
+ nprocs : int, optional
3449
+ Number of processes to use for parallel computation; -1 uses all
3450
+ available cores (default is -1).
3451
+ verbose : bool, optional
3452
+ If True, print progress messages (default is False).
3453
+ debug : bool, optional
3454
+ If True, print additional debug information (default is False).
3455
+
3456
+ Returns
3457
+ -------
3458
+ wrightcorrs : ndarray
3459
+ Computed vessel map with shape ``(xsize, ysize, numslices)``.
3460
+
3461
+ Notes
3462
+ -----
3463
+ This function performs a bootstrap-like procedure where the input timecourse
3464
+ is randomly split into two halves, and phase projections are computed for
3465
+ each half. Pearson correlation is computed between the two projections for
3466
+ each voxel and slice. The result is averaged over all iterations to produce
3467
+ the final vessel map.
3468
+
3469
+ Examples
3470
+ --------
3471
+ >>> wrightcorrs = wrightmap(
3472
+ ... input_data,
3473
+ ... demeandata_byslice,
3474
+ ... rawapp_byslice,
3475
+ ... projmask_byslice,
3476
+ ... outphases,
3477
+ ... cardphasevals,
3478
+ ... proctrs,
3479
+ ... congridbins,
3480
+ ... gridkernel,
3481
+ ... destpoints,
3482
+ ... iterations=50,
3483
+ ... verbose=True
3484
+ ... )
3485
+ """
1612
3486
  xsize = input_data.xsize
1613
3487
  ysize = input_data.ysize
1614
3488
  numslices = input_data.numslices
@@ -1672,10 +3546,11 @@ def wrightmap(
1672
3546
  )
1673
3547
  for theslice in range(numslices):
1674
3548
  for thepoint in validlocslist[theslice]:
1675
- theRvalue, thepvalue = pearsonr(
3549
+ theresult = pearsonr(
1676
3550
  rawapp_byslice1[thepoint, theslice, :],
1677
3551
  rawapp_byslice2[thepoint, theslice, :],
1678
3552
  )
3553
+ theRvalue = theresult.statistic
1679
3554
  if debug:
1680
3555
  print("theRvalue = ", theRvalue)
1681
3556
  wrightcorrs_byslice[thepoint, theslice, theiteration] = theRvalue