rapidtide 3.0.11__py3-none-any.whl → 3.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (139) hide show
  1. rapidtide/Colortables.py +492 -27
  2. rapidtide/OrthoImageItem.py +1049 -46
  3. rapidtide/RapidtideDataset.py +1533 -86
  4. rapidtide/_version.py +3 -3
  5. rapidtide/calccoherence.py +196 -29
  6. rapidtide/calcnullsimfunc.py +191 -40
  7. rapidtide/calcsimfunc.py +245 -42
  8. rapidtide/correlate.py +1210 -393
  9. rapidtide/data/examples/src/testLD +56 -0
  10. rapidtide/data/examples/src/testalign +1 -1
  11. rapidtide/data/examples/src/testdelayvar +0 -1
  12. rapidtide/data/examples/src/testfmri +19 -1
  13. rapidtide/data/examples/src/testglmfilt +5 -5
  14. rapidtide/data/examples/src/testhappy +25 -3
  15. rapidtide/data/examples/src/testppgproc +17 -0
  16. rapidtide/data/examples/src/testrolloff +11 -0
  17. rapidtide/data/models/model_cnn_pytorch/best_model.pth +0 -0
  18. rapidtide/data/models/model_cnn_pytorch/loss.png +0 -0
  19. rapidtide/data/models/model_cnn_pytorch/loss.txt +1 -0
  20. rapidtide/data/models/model_cnn_pytorch/model.pth +0 -0
  21. rapidtide/data/models/model_cnn_pytorch/model_meta.json +68 -0
  22. rapidtide/decorators.py +91 -0
  23. rapidtide/dlfilter.py +2225 -108
  24. rapidtide/dlfiltertorch.py +4843 -0
  25. rapidtide/externaltools.py +327 -12
  26. rapidtide/fMRIData_class.py +79 -40
  27. rapidtide/filter.py +1899 -810
  28. rapidtide/fit.py +2004 -574
  29. rapidtide/genericmultiproc.py +93 -18
  30. rapidtide/happy_supportfuncs.py +2044 -171
  31. rapidtide/helper_classes.py +584 -43
  32. rapidtide/io.py +2363 -370
  33. rapidtide/linfitfiltpass.py +341 -75
  34. rapidtide/makelaggedtcs.py +211 -20
  35. rapidtide/maskutil.py +423 -53
  36. rapidtide/miscmath.py +827 -121
  37. rapidtide/multiproc.py +210 -22
  38. rapidtide/patchmatch.py +234 -33
  39. rapidtide/peakeval.py +32 -30
  40. rapidtide/ppgproc.py +2203 -0
  41. rapidtide/qualitycheck.py +352 -39
  42. rapidtide/refinedelay.py +422 -57
  43. rapidtide/refineregressor.py +498 -184
  44. rapidtide/resample.py +671 -185
  45. rapidtide/scripts/applyppgproc.py +28 -0
  46. rapidtide/simFuncClasses.py +1052 -77
  47. rapidtide/simfuncfit.py +260 -46
  48. rapidtide/stats.py +540 -238
  49. rapidtide/tests/happycomp +9 -0
  50. rapidtide/tests/test_dlfiltertorch.py +627 -0
  51. rapidtide/tests/test_findmaxlag.py +24 -8
  52. rapidtide/tests/test_fullrunhappy_v1.py +0 -2
  53. rapidtide/tests/test_fullrunhappy_v2.py +0 -2
  54. rapidtide/tests/test_fullrunhappy_v3.py +1 -0
  55. rapidtide/tests/test_fullrunhappy_v4.py +2 -2
  56. rapidtide/tests/test_fullrunrapidtide_v7.py +1 -1
  57. rapidtide/tests/test_simroundtrip.py +8 -8
  58. rapidtide/tests/utils.py +9 -8
  59. rapidtide/tidepoolTemplate.py +142 -38
  60. rapidtide/tidepoolTemplate_alt.py +165 -44
  61. rapidtide/tidepoolTemplate_big.py +189 -52
  62. rapidtide/util.py +1217 -118
  63. rapidtide/voxelData.py +684 -37
  64. rapidtide/wiener.py +19 -12
  65. rapidtide/wiener2.py +113 -7
  66. rapidtide/wiener_doc.py +255 -0
  67. rapidtide/workflows/adjustoffset.py +105 -3
  68. rapidtide/workflows/aligntcs.py +85 -2
  69. rapidtide/workflows/applydlfilter.py +87 -10
  70. rapidtide/workflows/applyppgproc.py +522 -0
  71. rapidtide/workflows/atlasaverage.py +210 -47
  72. rapidtide/workflows/atlastool.py +100 -3
  73. rapidtide/workflows/calcSimFuncMap.py +294 -64
  74. rapidtide/workflows/calctexticc.py +201 -9
  75. rapidtide/workflows/ccorrica.py +97 -4
  76. rapidtide/workflows/cleanregressor.py +168 -29
  77. rapidtide/workflows/delayvar.py +163 -10
  78. rapidtide/workflows/diffrois.py +81 -3
  79. rapidtide/workflows/endtidalproc.py +144 -4
  80. rapidtide/workflows/fdica.py +195 -15
  81. rapidtide/workflows/filtnifti.py +70 -3
  82. rapidtide/workflows/filttc.py +74 -3
  83. rapidtide/workflows/fitSimFuncMap.py +206 -48
  84. rapidtide/workflows/fixtr.py +73 -3
  85. rapidtide/workflows/gmscalc.py +113 -3
  86. rapidtide/workflows/happy.py +801 -199
  87. rapidtide/workflows/happy2std.py +144 -12
  88. rapidtide/workflows/happy_parser.py +138 -9
  89. rapidtide/workflows/histnifti.py +118 -2
  90. rapidtide/workflows/histtc.py +84 -3
  91. rapidtide/workflows/linfitfilt.py +117 -4
  92. rapidtide/workflows/localflow.py +328 -28
  93. rapidtide/workflows/mergequality.py +79 -3
  94. rapidtide/workflows/niftidecomp.py +322 -18
  95. rapidtide/workflows/niftistats.py +174 -4
  96. rapidtide/workflows/pairproc.py +88 -2
  97. rapidtide/workflows/pairwisemergenifti.py +85 -2
  98. rapidtide/workflows/parser_funcs.py +1421 -40
  99. rapidtide/workflows/physiofreq.py +137 -11
  100. rapidtide/workflows/pixelcomp.py +208 -5
  101. rapidtide/workflows/plethquality.py +103 -21
  102. rapidtide/workflows/polyfitim.py +151 -11
  103. rapidtide/workflows/proj2flow.py +75 -2
  104. rapidtide/workflows/rankimage.py +111 -4
  105. rapidtide/workflows/rapidtide.py +272 -15
  106. rapidtide/workflows/rapidtide2std.py +98 -2
  107. rapidtide/workflows/rapidtide_parser.py +109 -9
  108. rapidtide/workflows/refineDelayMap.py +143 -33
  109. rapidtide/workflows/refineRegressor.py +682 -93
  110. rapidtide/workflows/regressfrommaps.py +152 -31
  111. rapidtide/workflows/resamplenifti.py +85 -3
  112. rapidtide/workflows/resampletc.py +91 -3
  113. rapidtide/workflows/retrolagtcs.py +98 -6
  114. rapidtide/workflows/retroregress.py +165 -9
  115. rapidtide/workflows/roisummarize.py +173 -5
  116. rapidtide/workflows/runqualitycheck.py +71 -3
  117. rapidtide/workflows/showarbcorr.py +147 -4
  118. rapidtide/workflows/showhist.py +86 -2
  119. rapidtide/workflows/showstxcorr.py +160 -3
  120. rapidtide/workflows/showtc.py +159 -3
  121. rapidtide/workflows/showxcorrx.py +184 -4
  122. rapidtide/workflows/showxy.py +185 -15
  123. rapidtide/workflows/simdata.py +262 -36
  124. rapidtide/workflows/spatialfit.py +77 -2
  125. rapidtide/workflows/spatialmi.py +251 -27
  126. rapidtide/workflows/spectrogram.py +305 -32
  127. rapidtide/workflows/synthASL.py +154 -3
  128. rapidtide/workflows/tcfrom2col.py +76 -2
  129. rapidtide/workflows/tcfrom3col.py +74 -2
  130. rapidtide/workflows/tidepool.py +2969 -130
  131. rapidtide/workflows/utils.py +19 -14
  132. rapidtide/workflows/utils_doc.py +293 -0
  133. rapidtide/workflows/variabilityizer.py +116 -3
  134. {rapidtide-3.0.11.dist-info → rapidtide-3.1.dist-info}/METADATA +3 -2
  135. {rapidtide-3.0.11.dist-info → rapidtide-3.1.dist-info}/RECORD +139 -122
  136. {rapidtide-3.0.11.dist-info → rapidtide-3.1.dist-info}/entry_points.txt +1 -0
  137. {rapidtide-3.0.11.dist-info → rapidtide-3.1.dist-info}/WHEEL +0 -0
  138. {rapidtide-3.0.11.dist-info → rapidtide-3.1.dist-info}/licenses/LICENSE +0 -0
  139. {rapidtide-3.0.11.dist-info → rapidtide-3.1.dist-info}/top_level.txt +0 -0
@@ -45,21 +45,106 @@ try:
45
45
  except ImportError:
46
46
  mklexists = False
47
47
 
48
- try:
49
- import rapidtide.dlfilter as tide_dlfilt
50
48
 
51
- dlfilterexists = True
52
- print("dlfilter exists")
53
- except ImportError:
54
- dlfilterexists = False
55
- print("dlfilter does not exist")
49
+ def rrifromphase(timeaxis: np.ndarray, thephase: np.ndarray) -> None:
50
+ """
51
+ Convert phase to range rate.
56
52
 
53
+ This function converts phase measurements to range rate values using the
54
+ provided time axis and phase data.
55
+
56
+ Parameters
57
+ ----------
58
+ timeaxis : np.ndarray
59
+ Time axis values corresponding to the phase measurements.
60
+ thephase : np.ndarray
61
+ Phase measurements to be converted to range rate.
57
62
 
58
- def rrifromphase(timeaxis, thephase):
63
+ Returns
64
+ -------
65
+ None
66
+ This function does not return any value.
67
+
68
+ Notes
69
+ -----
70
+ The function performs conversion from phase to range rate but does not
71
+ return the result. The actual implementation details are not provided
72
+ in the function signature.
73
+
74
+ Examples
75
+ --------
76
+ >>> import numpy as np
77
+ >>> time = np.array([0, 1, 2, 3])
78
+ >>> phase = np.array([0.1, 0.2, 0.3, 0.4])
79
+ >>> rrifromphase(time, phase)
80
+ """
59
81
  return None
60
82
 
61
83
 
62
- def calc_3d_optical_flow(video, projmask, flowhdr, outputroot, window_size=3, debug=False):
84
+ def calc_3d_optical_flow(
85
+ video: np.ndarray,
86
+ projmask: np.ndarray,
87
+ flowhdr: dict,
88
+ outputroot: str,
89
+ window_size: int = 3,
90
+ debug: bool = False,
91
+ ) -> tuple[np.ndarray, np.ndarray]:
92
+ """
93
+ Compute 3D optical flow for a video volume using the Lucas-Kanade method.
94
+
95
+ This function calculates optical flow in three dimensions (x, y, z) across
96
+ a sequence of video frames. It uses a Lucas-Kanade approach to estimate
97
+ motion vectors at each voxel, considering a local window around each pixel.
98
+ The results are saved as NIfTI files for each frame.
99
+
100
+ Parameters
101
+ ----------
102
+ video : np.ndarray
103
+ 4D array of shape (xsize, ysize, zsize, num_frames) representing the
104
+ input video data.
105
+ projmask : np.ndarray
106
+ 3D boolean or integer mask of shape (xsize, ysize, zsize) indicating
107
+ which voxels to process for optical flow computation.
108
+ flowhdr : dict
109
+ Header dictionary for NIfTI output files, containing metadata for
110
+ the optical flow results.
111
+ outputroot : str
112
+ Root name for output NIfTI files. Files will be saved with suffixes
113
+ `_desc-flow_phase-XX_map` and `_desc-flowmag_phase-XX_map`.
114
+ window_size : int, optional
115
+ Size of the local window used for gradient computation. Default is 3.
116
+ debug : bool, optional
117
+ If True, print debug information during computation. Default is False.
118
+
119
+ Returns
120
+ -------
121
+ tuple[np.ndarray, np.ndarray]
122
+ A tuple containing:
123
+ - `flow_vectors`: 5D array of shape (xsize, ysize, zsize, num_frames, 3)
124
+ representing the computed optical flow vectors for each frame.
125
+ - `None`: Placeholder return value; function currently returns only
126
+ `flow_vectors` and saves outputs to disk.
127
+
128
+ Notes
129
+ -----
130
+ - The optical flow is computed using a Lucas-Kanade method with spatial
131
+ gradients in x, y, and z directions.
132
+ - Temporal gradient is computed as the difference between consecutive frames.
133
+ - Output files are saved using `tide_io.savetonifti`.
134
+ - The function wraps around frames when reaching the end (i.e., next frame
135
+ for the last frame is the first frame).
136
+
137
+ Examples
138
+ --------
139
+ >>> import numpy as np
140
+ >>> video = np.random.rand(64, 64, 32, 10)
141
+ >>> mask = np.ones((64, 64, 32), dtype=bool)
142
+ >>> header = {}
143
+ >>> output_root = "flow_result"
144
+ >>> flow_vectors = calc_3d_optical_flow(video, mask, header, output_root)
145
+ >>> print(flow_vectors.shape)
146
+ (64, 64, 32, 10, 3)
147
+ """
63
148
  # window Define the window size for Lucas-Kanade method
64
149
  # Get the number of frames, height, and width of the video
65
150
  singlehdr = copy.deepcopy(flowhdr)
@@ -135,7 +220,42 @@ def calc_3d_optical_flow(video, projmask, flowhdr, outputroot, window_size=3, de
135
220
  return flow_vectors
136
221
 
137
222
 
138
- def phasejolt(phaseimage):
223
+ def phasejolt(phaseimage: np.ndarray) -> tuple[np.ndarray, np.ndarray, np.ndarray]:
224
+ """
225
+ Compute phase gradient-based metrics including jump, jolt, and laplacian.
226
+
227
+ This function calculates three important metrics from a phase image:
228
+ - jump: average absolute gradient magnitude
229
+ - jolt: average absolute second-order gradient magnitude
230
+ - laplacian: sum of second-order partial derivatives
231
+
232
+ Parameters
233
+ ----------
234
+ phaseimage : numpy.ndarray
235
+ Input phase image array of arbitrary dimensions (typically 2D or 3D).
236
+
237
+ Returns
238
+ -------
239
+ tuple of numpy.ndarray
240
+ A tuple containing three arrays:
241
+ - jump: array of same shape as input, representing average absolute gradient
242
+ - jolt: array of same shape as input, representing average absolute second-order gradient
243
+ - laplacian: array of same shape as input, representing Laplacian of the phase image
244
+
245
+ Notes
246
+ -----
247
+ The function computes gradients using numpy's gradient function which applies
248
+ central differences in the interior and first differences at the boundaries.
249
+ All metrics are computed in a voxel-wise manner across the entire image.
250
+
251
+ Examples
252
+ --------
253
+ >>> import numpy as np
254
+ >>> phase_img = np.random.rand(10, 10)
255
+ >>> jump, jolt, laplacian = phasejolt(phase_img)
256
+ >>> print(jump.shape, jolt.shape, laplacian.shape)
257
+ (10, 10) (10, 10) (10, 10)
258
+ """
139
259
 
140
260
  # Compute the gradient of the window in x, y, and z directions
141
261
  grad_x, grad_y, grad_z = np.gradient(phaseimage)
@@ -156,7 +276,64 @@ def phasejolt(phaseimage):
156
276
  return (jump, jolt, laplacian)
157
277
 
158
278
 
159
- def cardiacsig(thisphase, amps=(1.0, 0.0, 0.0), phases=None, overallphase=0.0):
279
+ def cardiacsig(
280
+ thisphase: float | np.ndarray,
281
+ amps: tuple | np.ndarray = (1.0, 0.0, 0.0),
282
+ phases: np.ndarray | None = None,
283
+ overallphase: float = 0.0,
284
+ ) -> float | np.ndarray:
285
+ """
286
+ Generate a cardiac signal model using harmonic components.
287
+
288
+ This function creates a cardiac signal by summing weighted cosine waves
289
+ at different harmonic frequencies. The signal can be computed for
290
+ scalar phase values or arrays of phase values.
291
+
292
+ Parameters
293
+ ----------
294
+ thisphase : float or np.ndarray
295
+ The phase value(s) at which to evaluate the cardiac signal.
296
+ Can be a scalar or array of phase values.
297
+ amps : tuple or np.ndarray, optional
298
+ Amplitude coefficients for each harmonic component. Default is
299
+ (1.0, 0.0, 0.0) representing the fundamental frequency with
300
+ amplitude 1.0 and higher harmonics with amplitude 0.0.
301
+ phases : np.ndarray or None, optional
302
+ Phase shifts for each harmonic component. If None, all phase shifts
303
+ are set to zero. Default is None.
304
+ overallphase : float, optional
305
+ Overall phase shift applied to the entire signal. Default is 0.0.
306
+
307
+ Returns
308
+ -------
309
+ float or np.ndarray
310
+ The computed cardiac signal value(s) at the given phase(s).
311
+ Returns a scalar if input is scalar, or array if input is array.
312
+
313
+ Notes
314
+ -----
315
+ The cardiac signal is computed as:
316
+ .. math::
317
+ s(t) = \\sum_{i=0}^{n-1} A_i \\cos((i+1)\\phi + \\phi_i + \\phi_{overall})
318
+
319
+ where:
320
+ - A_i are the amplitude coefficients
321
+ - φ is the phase value
322
+ - φ_i are the harmonic phase shifts
323
+ - φ_{overall} is the overall phase shift
324
+
325
+ Examples
326
+ --------
327
+ >>> import numpy as np
328
+ >>> cardiacsig(0.5)
329
+ 1.0
330
+
331
+ >>> cardiacsig(np.linspace(0, 2*np.pi, 100), amps=(1.0, 0.5, 0.2))
332
+ array([...])
333
+
334
+ >>> cardiacsig(1.0, amps=(2.0, 1.0, 0.5), phases=[0.0, np.pi/4, np.pi/2])
335
+ -0.7071067811865476
336
+ """
160
337
  total = 0.0
161
338
  if phases is None:
162
339
  phases = amps * 0.0
@@ -166,26 +343,109 @@ def cardiacsig(thisphase, amps=(1.0, 0.0, 0.0), phases=None, overallphase=0.0):
166
343
 
167
344
 
168
345
  def cardiacfromimage(
169
- normdata_byslice,
170
- estweights_byslice,
171
- numslices,
172
- timepoints,
173
- tr,
174
- slicetimes,
175
- cardprefilter,
176
- respprefilter,
177
- notchpct=1.5,
178
- invertphysiosign=False,
179
- madnorm=True,
180
- nprocs=1,
181
- arteriesonly=False,
182
- fliparteries=False,
183
- debug=False,
184
- appflips_byslice=None,
185
- verbose=False,
186
- usemask=True,
187
- multiplicative=True,
188
- ):
346
+ normdata_byslice: np.ndarray,
347
+ estweights_byslice: np.ndarray,
348
+ numslices: int,
349
+ timepoints: int,
350
+ tr: float,
351
+ slicetimes: np.ndarray,
352
+ cardprefilter: object,
353
+ respprefilter: object,
354
+ notchpct: float = 1.5,
355
+ notchrolloff: float = 0.5,
356
+ invertphysiosign: bool = False,
357
+ madnorm: bool = True,
358
+ nprocs: int = 1,
359
+ arteriesonly: bool = False,
360
+ fliparteries: bool = False,
361
+ debug: bool = False,
362
+ appflips_byslice: np.ndarray | None = None,
363
+ verbose: bool = False,
364
+ usemask: bool = True,
365
+ multiplicative: bool = True,
366
+ ) -> tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
367
+ """
368
+ Extract cardiac and respiratory signals from 4D fMRI data using slice timing information.
369
+
370
+ This function processes preprocessed fMRI data to isolate cardiac and respiratory
371
+ physiological signals by leveraging slice timing information and filtering techniques.
372
+ It applies normalization, averaging across slices, and harmonic notch filtering to
373
+ extract clean physiological time series.
374
+
375
+ Parameters
376
+ ----------
377
+ normdata_byslice : np.ndarray
378
+ Normalized fMRI data organized by slice, shape (timepoints, numslices, timepoints).
379
+ estweights_byslice : np.ndarray
380
+ Estimated weights for each voxel and slice, shape (timepoints, numslices).
381
+ numslices : int
382
+ Number of slices in the acquisition.
383
+ timepoints : int
384
+ Number of time points in the fMRI time series.
385
+ tr : float
386
+ Repetition time (TR) in seconds.
387
+ slicetimes : np.ndarray
388
+ Slice acquisition times relative to the start of the TR, shape (numslices,).
389
+ cardprefilter : object
390
+ Cardiac prefilter object with an `apply` method for filtering physiological signals.
391
+ respprefilter : object
392
+ Respiratory prefilter object with an `apply` method for filtering physiological signals.
393
+ notchpct : float, optional
394
+ Percentage of notch bandwidth, default is 1.5.
395
+ notchrolloff : float, optional
396
+ Notch filter rolloff, default is 0.5.
397
+ invertphysiosign : bool, optional
398
+ If True, invert the physiological signal sign, default is False.
399
+ madnorm : bool, optional
400
+ If True, use median absolute deviation normalization, default is True.
401
+ nprocs : int, optional
402
+ Number of processes to use for computation, default is 1.
403
+ arteriesonly : bool, optional
404
+ If True, only use arterial signal, default is False.
405
+ fliparteries : bool, optional
406
+ If True, flip the arterial signal, default is False.
407
+ debug : bool, optional
408
+ If True, enable debug output, default is False.
409
+ appflips_byslice : np.ndarray | None, optional
410
+ Array of application flips for each slice, default is None.
411
+ verbose : bool, optional
412
+ If True, print verbose output, default is False.
413
+ usemask : bool, optional
414
+ If True, use masking for valid voxels, default is True.
415
+ multiplicative : bool, optional
416
+ If True, apply multiplicative normalization, default is True.
417
+
418
+ Returns
419
+ -------
420
+ tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.ndarray]
421
+ - `hirescardtc`: High-resolution cardiac time course.
422
+ - `cardnormfac`: Normalization factor for cardiac signal.
423
+ - `hiresresptc`: High-resolution respiratory time course.
424
+ - `respnormfac`: Normalization factor for respiratory signal.
425
+ - `slicesamplerate`: Slice sampling rate in Hz.
426
+ - `numsteps`: Number of unique slice times.
427
+ - `sliceoffsets`: Slice offsets relative to TR.
428
+ - `cycleaverage`: Average signal per slice time step.
429
+ - `slicenorms`: Slice-wise normalization factors.
430
+
431
+ Notes
432
+ -----
433
+ - The function assumes that `normdata_byslice` and `estweights_byslice` are properly
434
+ preprocessed and aligned with slice timing information.
435
+ - The cardiac and respiratory signals are extracted using harmonic notch filtering
436
+ and prefiltering steps.
437
+ - The returned time courses are normalized using median absolute deviation (MAD) unless
438
+ `madnorm` is set to False.
439
+
440
+ Examples
441
+ --------
442
+ >>> # Assuming all inputs are prepared
443
+ >>> card_signal, card_norm, resp_signal, resp_norm, samplerate, numsteps, \
444
+ ... sliceoffsets, cycleavg, slicenorms = cardiacfromimage(
445
+ ... normdata_byslice, estweights_byslice, numslices, timepoints,
446
+ ... tr, slicetimes, cardprefilter, respprefilter
447
+ ... )
448
+ """
189
449
  # find out what timepoints we have, and their spacing
190
450
  numsteps, minstep, sliceoffsets = tide_io.sliceinfo(slicetimes, tr)
191
451
  print(
@@ -291,16 +551,131 @@ def cardiacfromimage(
291
551
  )
292
552
 
293
553
 
294
- def theCOM(X, data):
554
+ def theCOM(X: np.ndarray, data: np.ndarray) -> float:
555
+ """
556
+ Calculate the center of mass of a system of particles.
557
+
558
+ Parameters
559
+ ----------
560
+ X : np.ndarray
561
+ Array of positions (coordinates) of particles. Shape should be (n_particles, n_dimensions).
562
+ data : np.ndarray
563
+ Array of mass values for each particle. Shape should be (n_particles,).
564
+
565
+ Returns
566
+ -------
567
+ float
568
+ The center of mass of the system.
569
+
570
+ Notes
571
+ -----
572
+ The center of mass is calculated using the formula:
573
+ COM = Σ(m_i * x_i) / Σ(m_i)
574
+
575
+ where m_i are the masses and x_i are the positions of particles.
576
+
577
+ Examples
578
+ --------
579
+ >>> import numpy as np
580
+ >>> positions = np.array([[1, 2], [3, 4], [5, 6]])
581
+ >>> masses = np.array([1, 2, 3])
582
+ >>> com = theCOM(positions, masses)
583
+ >>> print(com)
584
+ 3.3333333333333335
585
+ """
295
586
  # return the center of mass
296
587
  return np.sum(X * data) / np.sum(data)
297
588
 
298
589
 
299
- def savgolsmooth(data, smoothlen=101, polyorder=3):
590
+ def savgolsmooth(data: np.ndarray, smoothlen: int = 101, polyorder: int = 3) -> np.ndarray:
591
+ """
592
+ Apply Savitzky-Golay filter to smooth data.
593
+
594
+ This function applies a Savitzky-Golay filter to smooth the input data using
595
+ a polynomial fit. The filter preserves higher moments of the data better than
596
+ simple moving averages, making it particularly useful for smoothing noisy data
597
+ while preserving peak shapes and heights.
598
+
599
+ Parameters
600
+ ----------
601
+ data : np.ndarray
602
+ Input data to be smoothed. Can be 1D or 2D array.
603
+ smoothlen : int, optional
604
+ Length of the filter window (i.e., the number of coefficients).
605
+ Must be a positive odd integer. Default is 101.
606
+ polyorder : int, optional
607
+ Order of the polynomial used to fit the samples. Must be less than
608
+ `smoothlen`. Default is 3.
609
+
610
+ Returns
611
+ -------
612
+ np.ndarray
613
+ Smoothed data with the same shape as the input `data`.
614
+
615
+ Notes
616
+ -----
617
+ The Savitzky-Golay filter is a digital filter that smooths data by fitting
618
+ a polynomial of specified order to a sliding window of data points. It is
619
+ particularly effective at preserving the shape and features of the original
620
+ data while removing noise.
621
+
622
+ Examples
623
+ --------
624
+ >>> import numpy as np
625
+ >>> data = np.random.randn(100)
626
+ >>> smoothed = savgolsmooth(data, smoothlen=21, polyorder=3)
627
+
628
+ >>> # For 2D data
629
+ >>> data_2d = np.random.randn(50, 10)
630
+ >>> smoothed_2d = savgolsmooth(data_2d, smoothlen=11, polyorder=2)
631
+ """
300
632
  return savgol_filter(data, smoothlen, polyorder)
301
633
 
302
634
 
303
- def getperiodic(inputdata, Fs, fundfreq, ncomps=1, width=0.4, debug=False):
635
+ def getperiodic(
636
+ inputdata: np.ndarray,
637
+ Fs: float,
638
+ fundfreq: float,
639
+ ncomps: int = 1,
640
+ width: float = 0.4,
641
+ debug: bool = False,
642
+ ) -> np.ndarray:
643
+ """
644
+ Apply a periodic filter to extract harmonic components from input data.
645
+
646
+ This function applies a non-causal filter to isolate and extract periodic
647
+ components of a signal based on a fundamental frequency and number of
648
+ harmonics. It uses an arbitrary filter design to define stopband and passband
649
+ frequencies for each harmonic component.
650
+
651
+ Parameters
652
+ ----------
653
+ inputdata : np.ndarray
654
+ Input signal data to be filtered.
655
+ Fs : float
656
+ Sampling frequency of the input signal (Hz).
657
+ fundfreq : float
658
+ Fundamental frequency of the periodic signal (Hz).
659
+ ncomps : int, optional
660
+ Number of harmonic components to extract. Default is 1.
661
+ width : float, optional
662
+ Width parameter controlling the bandwidth of each harmonic filter.
663
+ Default is 0.4.
664
+ debug : bool, optional
665
+ If True, print debug information during processing. Default is False.
666
+
667
+ Returns
668
+ -------
669
+ np.ndarray
670
+ Filtered output signal containing the specified harmonic components.
671
+
672
+ Notes
673
+ -----
674
+ The function reduces the number of components (`ncomps`) if the highest
675
+ harmonic exceeds the Nyquist frequency (Fs/2). Each harmonic is filtered
676
+ using an arbitrary filter with stopband and passband frequencies defined
677
+ based on the `width` parameter.
678
+ """
304
679
  outputdata = inputdata * 0.0
305
680
  lowerdist = fundfreq - fundfreq / (1.0 + width)
306
681
  upperdist = fundfreq * width
@@ -325,13 +700,56 @@ def getperiodic(inputdata, Fs, fundfreq, ncomps=1, width=0.4, debug=False):
325
700
 
326
701
 
327
702
  def getcardcoeffs(
328
- cardiacwaveform,
329
- slicesamplerate,
330
- minhr=40.0,
331
- maxhr=140.0,
332
- smoothlen=101,
333
- debug=False,
334
- ):
703
+ cardiacwaveform: np.ndarray,
704
+ slicesamplerate: float,
705
+ minhr: float = 40.0,
706
+ maxhr: float = 140.0,
707
+ smoothlen: int = 101,
708
+ debug: bool = False,
709
+ ) -> float:
710
+ """
711
+ Compute the fundamental cardiac frequency from a cardiac waveform using spectral analysis.
712
+
713
+ This function estimates the heart rate (in beats per minute) from a given cardiac waveform
714
+ by performing a Welch periodogram and applying a smoothing filter to identify the dominant
715
+ frequency component. The result is returned as a frequency value in Hz, which can be
716
+ converted to BPM by multiplying by 60.
717
+
718
+ Parameters
719
+ ----------
720
+ cardiacwaveform : np.ndarray
721
+ Input cardiac waveform signal as a 1D numpy array.
722
+ slicesamplerate : float
723
+ Sampling rate of the input waveform in Hz.
724
+ minhr : float, optional
725
+ Minimum allowed heart rate in BPM. Default is 40.0.
726
+ maxhr : float, optional
727
+ Maximum allowed heart rate in BPM. Default is 140.0.
728
+ smoothlen : int, optional
729
+ Length of the Savitzky-Golay filter window for smoothing the spectrum.
730
+ Default is 101.
731
+ debug : bool, optional
732
+ If True, print intermediate debug information including initial and final
733
+ frequency estimates. Default is False.
734
+
735
+ Returns
736
+ -------
737
+ float
738
+ Estimated fundamental cardiac frequency in Hz.
739
+
740
+ Notes
741
+ -----
742
+ The function applies a Hamming window to the input signal before spectral analysis.
743
+ It removes spectral components outside the physiological range (defined by `minhr`
744
+ and `maxhr`) and uses Savitzky-Golay smoothing to detect the peak frequency.
745
+
746
+ Examples
747
+ --------
748
+ >>> import numpy as np
749
+ >>> waveform = np.sin(2 * np.pi * 1.2 * np.linspace(0, 10, 1000))
750
+ >>> freq = getcardcoeffs(waveform, slicesamplerate=100)
751
+ >>> print(f"Estimated heart rate: {freq * 60:.2f} BPM")
752
+ """
335
753
  if len(cardiacwaveform) > 1024:
336
754
  thex, they = welch(cardiacwaveform, slicesamplerate, nperseg=1024)
337
755
  else:
@@ -366,10 +784,59 @@ def getcardcoeffs(
366
784
 
367
785
 
368
786
  def _procOneVoxelDetrend(
369
- vox,
370
- voxelargs,
787
+ vox: int,
788
+ voxelargs: tuple,
371
789
  **kwargs,
372
- ):
790
+ ) -> tuple[int, np.ndarray]:
791
+ """
792
+ Detrend fMRI voxel data for a single voxel.
793
+
794
+ This function applies detrending to fMRI voxel data using the tide_fit.detrend
795
+ function. It supports both linear and polynomial detrending with optional
796
+ mean centering.
797
+
798
+ Parameters
799
+ ----------
800
+ vox : int
801
+ Voxel index identifier.
802
+ voxelargs : tuple
803
+ Tuple containing fMRI voxel data as the first element. Expected format:
804
+ (fmri_voxeldata,)
805
+ **kwargs : dict
806
+ Additional keyword arguments for detrending options:
807
+ - detrendorder : int, optional
808
+ Order of the detrend polynomial (default: 1 for linear detrend)
809
+ - demean : bool, optional
810
+ If True, remove the mean from the data (default: False)
811
+ - debug : bool, optional
812
+ If True, print debug information (default: False)
813
+
814
+ Returns
815
+ -------
816
+ tuple
817
+ A tuple containing:
818
+ - vox : int
819
+ The original voxel index
820
+ - detrended_voxeldata : ndarray
821
+ The detrended fMRI voxel data with the same shape as input
822
+
823
+ Notes
824
+ -----
825
+ This function uses the tide_fit.detrend function internally for the actual
826
+ detrending operation. The detrendorder parameter controls the polynomial order
827
+ of the detrending (0 = mean removal only, 1 = linear detrend, 2 = quadratic detrend, etc.).
828
+
829
+ Examples
830
+ --------
831
+ >>> import numpy as np
832
+ >>> from rapidtide.fit import detrend
833
+ >>> data = np.random.randn(100)
834
+ >>> result = _procOneVoxelDetrend(0, (data,), detrendorder=1, demean=True)
835
+ >>> print(result[0]) # voxel index
836
+ 0
837
+ >>> print(result[1].shape) # detrended data shape
838
+ (100,)
839
+ """
373
840
  # unpack arguments
374
841
  options = {
375
842
  "detrendorder": 1,
@@ -392,34 +859,159 @@ def _procOneVoxelDetrend(
392
859
  )
393
860
 
394
861
 
395
- def _packDetrendvoxeldata(voxnum, voxelargs):
862
+ def _packDetrendvoxeldata(voxnum: int, voxelargs: list) -> list[np.ndarray]:
863
+ """
864
+ Extract voxel data for a specific voxel number from voxel arguments.
865
+
866
+ Parameters
867
+ ----------
868
+ voxnum : int
869
+ The voxel number to extract data for.
870
+ voxelargs : tuple
871
+ A tuple containing voxel data arrays, where the first element is
872
+ expected to be a 2D array with voxel data indexed by [voxel, feature].
873
+
874
+ Returns
875
+ -------
876
+ list
877
+ A list containing a single element, which is a 1D array of feature
878
+ values for the specified voxel number.
879
+
880
+ Notes
881
+ -----
882
+ This function is designed to extract a single voxel's worth of data
883
+ from a collection of voxel arguments for further processing in
884
+ detrending operations.
885
+
886
+ Examples
887
+ --------
888
+ >>> voxel_data = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
889
+ >>> result = _packDetrendvoxeldata(1, (voxel_data,))
890
+ >>> print(result)
891
+ [[4, 5, 6]]
892
+ """
396
893
  return [(voxelargs[0])[voxnum, :]]
397
894
 
398
895
 
399
- def _unpackDetrendvoxeldata(retvals, voxelproducts):
896
+ def _unpackDetrendvoxeldata(retvals: tuple, voxelproducts: list) -> None:
897
+ """
898
+ Unpack detrend voxel data by assigning values to voxel products array.
899
+
900
+ Parameters
901
+ ----------
902
+ retvals : tuple or list
903
+ Contains two elements where retvals[0] is used as indices and retvals[1]
904
+ contains the values to be assigned.
905
+ voxelproducts : list
906
+ List containing arrays where voxelproducts[0] is the target array that
907
+ will be modified in-place with the assigned values.
908
+
909
+ Returns
910
+ -------
911
+ None
912
+ This function modifies voxelproducts[0] in-place and does not return anything.
913
+
914
+ Notes
915
+ -----
916
+ This function performs an in-place assignment operation where values from
917
+ retvals[1] are placed at the specified indices retvals[0] in the first
918
+ element of voxelproducts list.
919
+
920
+ Examples
921
+ --------
922
+ >>> retvals = ([0, 1, 2], [10, 20, 30])
923
+ >>> voxelproducts = [np.zeros(5)]
924
+ >>> _unpackDetrendvoxeldata(retvals, voxelproducts)
925
+ >>> print(voxelproducts[0])
926
+ [10. 20. 30. 0. 0.]
927
+ """
400
928
  (voxelproducts[0])[retvals[0], :] = retvals[1]
401
929
 
402
930
 
403
931
  def normalizevoxels(
404
- fmri_data,
405
- detrendorder,
406
- validvoxels,
407
- time,
408
- timings,
409
- LGR=None,
410
- mpcode=True,
411
- nprocs=1,
412
- alwaysmultiproc=False,
413
- showprogressbar=True,
414
- chunksize=1000,
415
- debug=False,
416
- ):
932
+ fmri_data: np.ndarray,
933
+ detrendorder: int,
934
+ validvoxels: np.ndarray,
935
+ time: object,
936
+ timings: list,
937
+ LGR: object | None = None,
938
+ mpcode: bool = True,
939
+ nprocs: int = 1,
940
+ alwaysmultiproc: bool = False,
941
+ showprogressbar: bool = True,
942
+ chunksize: int = 1000,
943
+ debug: bool = False,
944
+ ) -> tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
945
+ """
946
+ Normalize fMRI voxel data by detrending and z-scoring.
947
+
948
+ This function applies detrending to fMRI data and then normalizes the data
949
+ using mean and median-based scaling. It supports both single-threaded and
950
+ multi-threaded processing for detrending.
951
+
952
+ Parameters
953
+ ----------
954
+ fmri_data : np.ndarray
955
+ 2D array of fMRI data with shape (n_voxels, n_timepoints).
956
+ detrendorder : int
957
+ Order of detrending to apply. If 0, no detrending is performed.
958
+ validvoxels : np.ndarray
959
+ 1D array of indices indicating which voxels are valid for processing.
960
+ time : object
961
+ Module or object with a `time.time()` method for timing operations.
962
+ timings : list
963
+ List to append timing information about processing steps.
964
+ LGR : object, optional
965
+ Logger object for debugging; default is None.
966
+ mpcode : bool, optional
967
+ If True, use multi-processing for detrending; default is True.
968
+ nprocs : int, optional
969
+ Number of processes to use in multi-processing; default is 1.
970
+ alwaysmultiproc : bool, optional
971
+ If True, always use multi-processing even for small datasets; default is False.
972
+ showprogressbar : bool, optional
973
+ If True, show progress bar during voxel processing; default is True.
974
+ chunksize : int, optional
975
+ Size of chunks for multi-processing; default is 1000.
976
+ debug : bool, optional
977
+ If True, enable debug output; default is False.
978
+
979
+ Returns
980
+ -------
981
+ tuple of np.ndarray
982
+ A tuple containing:
983
+ - `normdata`: Normalized fMRI data (z-scored).
984
+ - `demeandata`: Detrended and mean-centered data.
985
+ - `means`: Mean values for each voxel.
986
+ - `medians`: Median values for each voxel.
987
+ - `mads`: Median absolute deviation for each voxel.
988
+
989
+ Notes
990
+ -----
991
+ - The function modifies `fmri_data` in-place during detrending.
992
+ - If `detrendorder` is greater than 0, detrending is applied using `tide_fit.detrend`.
993
+ - Multi-processing is used when `mpcode=True` and the number of voxels exceeds a threshold.
994
+ - Timing information is appended to the `timings` list.
995
+
996
+ Examples
997
+ --------
998
+ >>> import numpy as np
999
+ >>> from tqdm import tqdm
1000
+ >>> fmri_data = np.random.rand(100, 200)
1001
+ >>> validvoxels = np.arange(100)
1002
+ >>> timings = []
1003
+ >>> normdata, demeandata, means, medians, mads = normalizevoxels(
1004
+ ... fmri_data, detrendorder=1, validvoxels=validvoxels,
1005
+ ... time=time, timings=timings
1006
+ ... )
1007
+ """
417
1008
  print("Normalizing voxels...")
418
1009
  normdata = fmri_data * 0.0
419
1010
  demeandata = fmri_data * 0.0
420
1011
  starttime = time.time()
421
1012
  # detrend if we are going to
422
1013
  numspatiallocs = fmri_data.shape[0]
1014
+ # NB: fmri_data is detrended in place
423
1015
  if detrendorder > 0:
424
1016
  print("Detrending to order", detrendorder, "...")
425
1017
  if mpcode:
@@ -484,8 +1076,60 @@ def normalizevoxels(
484
1076
 
485
1077
 
486
1078
  def cleanphysio(
487
- Fs, physiowaveform, cutoff=0.4, thresh=0.2, nyquist=None, iscardiac=True, debug=False
488
- ):
1079
+ Fs: float,
1080
+ physiowaveform: np.ndarray,
1081
+ cutoff: float = 0.4,
1082
+ thresh: float = 0.2,
1083
+ nyquist: float | None = None,
1084
+ iscardiac: bool = True,
1085
+ debug: bool = False,
1086
+ ) -> tuple[np.ndarray, np.ndarray, np.ndarray, float]:
1087
+ """
1088
+ Apply filtering and normalization to a physiological waveform to extract a cleaned signal and envelope.
1089
+
1090
+ This function performs bandpass filtering on a physiological signal to detect its envelope,
1091
+ then applies high-pass filtering to remove baseline drift. The waveform is normalized using
1092
+ the envelope to produce a cleaned and standardized signal.
1093
+
1094
+ Parameters
1095
+ ----------
1096
+ Fs : float
1097
+ Sampling frequency of the input waveform in Hz.
1098
+ physiowaveform : np.ndarray
1099
+ Input physiological waveform signal (1D array).
1100
+ cutoff : float, optional
1101
+ Cutoff frequency for envelope detection, by default 0.4.
1102
+ thresh : float, optional
1103
+ Threshold for envelope normalization, by default 0.2.
1104
+ nyquist : float, optional
1105
+ Nyquist frequency to constrain the high-pass filter, by default None.
1106
+ iscardiac : bool, optional
1107
+ Flag indicating if the signal is cardiac; affects filter type, by default True.
1108
+ debug : bool, optional
1109
+ If True, print debug information during processing, by default False.
1110
+
1111
+ Returns
1112
+ -------
1113
+ tuple[np.ndarray, np.ndarray, np.ndarray, float]
1114
+ A tuple containing:
1115
+ - `filtphysiowaveform`: The high-pass filtered waveform.
1116
+ - `normphysio`: The normalized waveform using the envelope.
1117
+ - `envelope`: The detected envelope of the signal.
1118
+ - `envmean`: The mean of the envelope.
1119
+
1120
+ Notes
1121
+ -----
1122
+ - The function uses `tide_filt.NoncausalFilter` for filtering and `tide_math.envdetect` for envelope detection.
1123
+ - The waveform is normalized using median absolute deviation (MAD) normalization.
1124
+ - The envelope is thresholded to avoid very low values during normalization.
1125
+
1126
+ Examples
1127
+ --------
1128
+ >>> import numpy as np
1129
+ >>> Fs = 100.0
1130
+ >>> signal = np.random.randn(1000)
1131
+ >>> filtered, normalized, env, env_mean = cleanphysio(Fs, signal)
1132
+ """
489
1133
  # first bandpass the cardiac signal to calculate the envelope
490
1134
  if debug:
491
1135
  print("Entering cleanphysio")
@@ -528,17 +1172,73 @@ def cleanphysio(
528
1172
 
529
1173
 
530
1174
  def findbadpts(
531
- thewaveform,
532
- nameroot,
533
- outputroot,
534
- samplerate,
535
- infodict,
536
- thetype="mad",
537
- retainthresh=0.89,
538
- mingap=2.0,
539
- outputlevel=0,
540
- debug=True,
541
- ):
1175
+ thewaveform: np.ndarray,
1176
+ nameroot: str,
1177
+ outputroot: str,
1178
+ samplerate: float,
1179
+ infodict: dict,
1180
+ thetype: str = "mad",
1181
+ retainthresh: float = 0.89,
1182
+ mingap: float = 2.0,
1183
+ outputlevel: int = 0,
1184
+ debug: bool = True,
1185
+ ) -> tuple[np.ndarray, float | tuple[float, float]]:
1186
+ """
1187
+ Identify bad points in a waveform based on statistical thresholding and gap filling.
1188
+
1189
+ This function detects outliers in a waveform using either the Median Absolute Deviation (MAD)
1190
+ or a fractional value-based method. It then applies gap-filling logic to merge short
1191
+ sequences of bad points into longer ones, based on a minimum gap threshold.
1192
+
1193
+ Parameters
1194
+ ----------
1195
+ thewaveform : np.ndarray
1196
+ Input waveform data as a 1D numpy array.
1197
+ nameroot : str
1198
+ Root name used for labeling output files and dictionary keys.
1199
+ outputroot : str
1200
+ Root path for writing output files if `outputlevel > 0`.
1201
+ samplerate : float
1202
+ Sampling rate of the waveform in Hz.
1203
+ infodict : dict
1204
+ Dictionary to store metadata about the thresholding method and value.
1205
+ thetype : str, optional
1206
+ Thresholding method to use. Options are:
1207
+ - "mad" (default): Uses Median Absolute Deviation.
1208
+ - "fracval": Uses percentile-based thresholds.
1209
+ retainthresh : float, optional
1210
+ Threshold for retaining data, between 0 and 1. Default is 0.89.
1211
+ mingap : float, optional
1212
+ Minimum gap (in seconds) to consider for merging bad point streaks. Default is 2.0.
1213
+ outputlevel : int, optional
1214
+ Level of output verbosity. If > 0, writes bad point vector to file. Default is 0.
1215
+ debug : bool, optional
1216
+ If True, prints debug information. Default is True.
1217
+
1218
+ Returns
1219
+ -------
1220
+ tuple[np.ndarray, float | tuple[float, float]]
1221
+ A tuple containing:
1222
+ - `thebadpts`: A 1D numpy array of the same length as `thewaveform`, with 1.0 for bad points and 0.0 for good.
1223
+ - `thresh`: The calculated threshold value(s) used for bad point detection.
1224
+ - If `thetype == "mad"`, `thresh` is a float.
1225
+ - If `thetype == "fracval"`, `thresh` is a tuple of (lower_threshold, upper_threshold).
1226
+
1227
+ Notes
1228
+ -----
1229
+ - The "mad" method uses the median and MAD to compute a sigma-based threshold.
1230
+ - The "fracval" method uses percentiles to define a range and marks values outside
1231
+ that range as bad.
1232
+ - Gap-filling logic merges bad point streaks that are closer than `mingap` seconds.
1233
+
1234
+ Examples
1235
+ --------
1236
+ >>> import numpy as np
1237
+ >>> waveform = np.random.normal(0, 1, 1000)
1238
+ >>> info = {}
1239
+ >>> badpts, threshold = findbadpts(waveform, "test", "/tmp", 100.0, info, thetype="mad")
1240
+ >>> print(f"Threshold used: {threshold}")
1241
+ """
542
1242
  # if thetype == 'triangle' or thetype == 'mad':
543
1243
  if thetype == "mad":
544
1244
  absdev = np.fabs(thewaveform - np.median(thewaveform))
@@ -607,11 +1307,112 @@ def findbadpts(
607
1307
  return thebadpts
608
1308
 
609
1309
 
610
- def approximateentropy(waveform, m, r):
1310
+ def approximateentropy(waveform: np.ndarray, m: int, r: float) -> float:
1311
+ """
1312
+ Calculate the approximate entropy of a waveform.
1313
+
1314
+ Approximate entropy is a measure of the complexity or irregularity of a time series.
1315
+ It quantifies the likelihood that similar patterns of observations will not be followed
1316
+ by additional similar observations.
1317
+
1318
+ Parameters
1319
+ ----------
1320
+ waveform : array_like
1321
+ Input time series data as a 1D array or list of numerical values.
1322
+ m : int
1323
+ Length of compared run of data. Must be a positive integer.
1324
+ r : float
1325
+ Tolerance parameter. Defines the maximum difference between values to be considered
1326
+ similar. Should be a positive number, typically set to 0.1-0.2 times the standard
1327
+ deviation of the data.
1328
+
1329
+ Returns
1330
+ -------
1331
+ float
1332
+ Approximate entropy value. Lower values indicate more regularity in the data,
1333
+ while higher values indicate more complexity or randomness.
1334
+
1335
+ Notes
1336
+ -----
1337
+ The approximate entropy is calculated using the method described by Pincus (1991).
1338
+ The algorithm computes the logarithm of the ratio of the number of similar patterns
1339
+ of length m to those of length m+1, averaged over all possible patterns.
1340
+
1341
+ This implementation assumes that the input waveform is a 1D array of numerical values.
1342
+ The function is sensitive to the choice of parameters m and r, and results may vary
1343
+ depending on the data characteristics.
1344
+
1345
+ Examples
1346
+ --------
1347
+ >>> import numpy as np
1348
+ >>> waveform = [1, 2, 3, 4, 5, 4, 3, 2, 1]
1349
+ >>> apen = approximateentropy(waveform, m=2, r=0.1)
1350
+ >>> print(apen)
1351
+ 0.123456789
1352
+
1353
+ >>> # For a more complex signal
1354
+ >>> np.random.seed(42)
1355
+ >>> noisy_signal = np.random.randn(100)
1356
+ >>> apen_noisy = approximateentropy(noisy_signal, m=2, r=0.1)
1357
+ >>> print(apen_noisy)
1358
+ 0.456789123
1359
+ """
1360
+
611
1361
  def _maxdist(x_i, x_j):
1362
+ """
1363
+ Calculate the maximum absolute difference between corresponding elements of two sequences.
1364
+
1365
+ Parameters
1366
+ ----------
1367
+ x_i : array-like
1368
+ First sequence of numbers.
1369
+ x_j : array-like
1370
+ Second sequence of numbers.
1371
+
1372
+ Returns
1373
+ -------
1374
+ float
1375
+ The maximum absolute difference between corresponding elements of x_i and x_j.
1376
+
1377
+ Notes
1378
+ -----
1379
+ This function computes the Chebyshev distance (also known as the maximum metric) between two vectors.
1380
+ Both sequences must have the same length, otherwise the function will raise a ValueError.
1381
+
1382
+ Examples
1383
+ --------
1384
+ >>> _maxdist([1, 2, 3], [4, 1, 2])
1385
+ 3
1386
+ >>> _maxdist([0, 0], [1, 1])
1387
+ 1
1388
+ """
612
1389
  return max([abs(ua - va) for ua, va in zip(x_i, x_j)])
613
1390
 
614
1391
  def _phi(m):
1392
+ """
1393
+ Calculate phi value for approximate entropy calculation.
1394
+
1395
+ Parameters
1396
+ ----------
1397
+ m : int
1398
+ Length of template vectors for comparison.
1399
+
1400
+ Returns
1401
+ -------
1402
+ float
1403
+ Phi value representing the approximate entropy.
1404
+
1405
+ Notes
1406
+ -----
1407
+ This function computes the phi value used in approximate entropy calculations.
1408
+ It compares template vectors of length m and calculates the proportion of
1409
+ vectors that are within a tolerance threshold r of each other.
1410
+
1411
+ Examples
1412
+ --------
1413
+ >>> _phi(2)
1414
+ 0.5703489003472879
1415
+ """
615
1416
  x = [[waveform[j] for j in range(i, i + m - 1 + 1)] for i in range(N - m + 1)]
616
1417
  C = [len([1 for x_j in x if _maxdist(x_i, x_j) <= r]) / (N - m + 1.0) for x_i in x]
617
1418
  return (N - m + 1.0) ** (-1) * sum(np.log(C))
@@ -621,7 +1422,51 @@ def approximateentropy(waveform, m, r):
621
1422
  return abs(_phi(m + 1) - _phi(m))
622
1423
 
623
1424
 
624
- def summarizerun(theinfodict, getkeys=False):
1425
+ def summarizerun(theinfodict: dict, getkeys: bool = False) -> str:
1426
+ """
1427
+ Summarize physiological signal quality metrics from a dictionary.
1428
+
1429
+ This function extracts specific signal quality indices from a dictionary
1430
+ containing physiological monitoring data. It can either return the metric
1431
+ values or the corresponding keys depending on the getkeys parameter.
1432
+
1433
+ Parameters
1434
+ ----------
1435
+ theinfodict : dict
1436
+ Dictionary containing physiological signal quality metrics with keys
1437
+ including 'corrcoeff_raw2pleth', 'corrcoeff_filt2pleth', 'E_sqi_mean_pleth',
1438
+ 'E_sqi_mean_bold', 'S_sqi_mean_pleth', 'S_sqi_mean_bold', 'K_sqi_mean_pleth',
1439
+ and 'K_sqi_mean_bold'.
1440
+ getkeys : bool, optional
1441
+ If True, returns a comma-separated string of all metric keys.
1442
+ If False (default), returns a comma-separated string of metric values
1443
+ corresponding to the keys in the dictionary. If a key is missing, an
1444
+ empty string is returned for that position.
1445
+
1446
+ Returns
1447
+ -------
1448
+ str
1449
+ If getkeys=True: comma-separated string of all metric keys.
1450
+ If getkeys=False: comma-separated string of metric values from the dictionary,
1451
+ with empty strings for missing keys.
1452
+
1453
+ Notes
1454
+ -----
1455
+ The function handles missing keys gracefully by returning empty strings
1456
+ for missing metrics rather than raising exceptions.
1457
+
1458
+ Examples
1459
+ --------
1460
+ >>> data = {
1461
+ ... "corrcoeff_raw2pleth": 0.85,
1462
+ ... "E_sqi_mean_pleth": 0.92
1463
+ ... }
1464
+ >>> summarizerun(data)
1465
+ '0.85,,0.92,,,,,'
1466
+
1467
+ >>> summarizerun(data, getkeys=True)
1468
+ 'corrcoeff_raw2pleth,corrcoeff_filt2pleth,E_sqi_mean_pleth,E_sqi_mean_bold,S_sqi_mean_pleth,S_sqi_mean_bold,K_sqi_mean_pleth,K_sqi_mean_bold'
1469
+ """
625
1470
  keylist = [
626
1471
  "corrcoeff_raw2pleth",
627
1472
  "corrcoeff_filt2pleth",
@@ -644,68 +1489,133 @@ def summarizerun(theinfodict, getkeys=False):
644
1489
  return ",".join(outputline)
645
1490
 
646
1491
 
647
- def entropy(waveform):
1492
+ def entropy(waveform: np.ndarray) -> float:
1493
+ """
1494
+ Calculate the entropy of a waveform.
1495
+
1496
+ Parameters
1497
+ ----------
1498
+ waveform : array-like
1499
+ Input waveform data. Should be a numeric array-like object containing
1500
+ the waveform samples.
1501
+
1502
+ Returns
1503
+ -------
1504
+ float
1505
+ The entropy value of the waveform, computed as -∑(x² * log₂(x²)) where
1506
+ x represents the waveform samples.
1507
+
1508
+ Notes
1509
+ -----
1510
+ This function computes the entropy using the formula -∑(x² * log₂(x²)),
1511
+ where x² represents the squared waveform values. The np.nan_to_num function
1512
+ is used to handle potential NaN values in the logarithm calculation.
1513
+
1514
+ Examples
1515
+ --------
1516
+ >>> import numpy as np
1517
+ >>> waveform = np.array([0.5, 0.5, 0.5, 0.5])
1518
+ >>> entropy(waveform)
1519
+ 0.0
1520
+ """
648
1521
  return -np.sum(np.square(waveform) * np.nan_to_num(np.log2(np.square(waveform))))
649
1522
 
650
1523
 
651
1524
  def calcplethquality(
652
- waveform,
653
- Fs,
654
- infodict,
655
- suffix,
656
- outputroot,
657
- S_windowsecs=5.0,
658
- K_windowsecs=60.0,
659
- E_windowsecs=1.0,
660
- detrendorder=8,
661
- outputlevel=0,
662
- initfile=True,
663
- debug=False,
664
- ):
1525
+ waveform: np.ndarray,
1526
+ Fs: float,
1527
+ infodict: dict,
1528
+ suffix: str,
1529
+ outputroot: str,
1530
+ S_windowsecs: float = 5.0,
1531
+ K_windowsecs: float = 60.0,
1532
+ E_windowsecs: float = 1.0,
1533
+ detrendorder: int = 8,
1534
+ outputlevel: int = 0,
1535
+ initfile: bool = True,
1536
+ debug: bool = False,
1537
+ ) -> tuple[np.ndarray, np.ndarray, np.ndarray]:
665
1538
  """
1539
+ Calculate windowed skewness, kurtosis, and entropy quality metrics for a plethysmogram.
1540
+
1541
+ This function computes three quality metrics — skewness (S), kurtosis (K), and entropy (E) —
1542
+ over sliding windows of the input waveform. These metrics are used to assess the quality
1543
+ of photoplethysmogram (PPG) signals based on the method described in Elgendi (2016).
666
1544
 
667
1545
  Parameters
668
1546
  ----------
669
- waveform: array-like
670
- The cardiac waveform to be assessed
671
- Fs: float
672
- The sample rate of the data
673
- S_windowsecs: float
674
- Skewness window duration in seconds. Defaults to 5.0 (optimal for discrimination of "good" from "acceptable"
675
- and "unfit" according to Elgendi)
676
- K_windowsecs: float
677
- Skewness window duration in seconds. Defaults to 2.0 (after Selveraj)
678
- E_windowsecs: float
679
- Entropy window duration in seconds. Defaults to 0.5 (after Selveraj)
680
- detrendorder: int
681
- Order of detrending polynomial to apply to plethysmogram.
682
- debug: boolean
683
- Turn on extended output
1547
+ waveform : array-like
1548
+ The cardiac waveform to be assessed.
1549
+ Fs : float
1550
+ The sample rate of the data in Hz.
1551
+ infodict : dict
1552
+ Dictionary to store computed quality metrics.
1553
+ suffix : str
1554
+ Suffix to append to metric keys in `infodict`.
1555
+ outputroot : str
1556
+ Root name for output files if `outputlevel > 1`.
1557
+ S_windowsecs : float, optional
1558
+ Skewness window duration in seconds. Default is 5.0 seconds.
1559
+ K_windowsecs : float, optional
1560
+ Kurtosis window duration in seconds. Default is 60.0 seconds.
1561
+ E_windowsecs : float, optional
1562
+ Entropy window duration in seconds. Default is 1.0 seconds.
1563
+ detrendorder : int, optional
1564
+ Order of the detrending polynomial applied to the plethysmogram. Default is 8.
1565
+ outputlevel : int, optional
1566
+ Level of output verbosity. If > 1, time-series data will be written to files.
1567
+ initfile : bool, optional
1568
+ Whether to initialize output files. Default is True.
1569
+ debug : bool, optional
1570
+ If True, print debug information. Default is False.
684
1571
 
685
1572
  Returns
686
1573
  -------
687
- S_sqi_mean: float
688
- The mean value of the quality index over all time
689
- S_std_mean: float
690
- The standard deviation of the quality index over all time
691
- S_waveform: array
692
- The quality metric over all timepoints
693
- K_sqi_mean: float
694
- The mean value of the quality index over all time
695
- K_std_mean: float
696
- The standard deviation of the quality index over all time
697
- K_waveform: array
698
- The quality metric over all timepoints
699
- E_sqi_mean: float
700
- The mean value of the quality index over all time
701
- E_std_mean: float
702
- The standard deviation of the quality index over all time
703
- E_waveform: array
704
- The quality metric over all timepoints
705
-
706
-
707
- Calculates the windowed skewness, kurtosis, and entropy quality metrics described in Elgendi, M.
708
- "Optimal Signal Quality Index for Photoplethysmogram Signals". Bioengineering 2016, Vol. 3, Page 21 3, 21 (2016).
1574
+ tuple
1575
+ A tuple containing the following elements in order:
1576
+
1577
+ - S_sqi_mean : float
1578
+ Mean value of the skewness quality index over all time.
1579
+ - S_sqi_std : float
1580
+ Standard deviation of the skewness quality index over all time.
1581
+ - S_waveform : array
1582
+ The skewness quality metric over all timepoints.
1583
+ - K_sqi_mean : float
1584
+ Mean value of the kurtosis quality index over all time.
1585
+ - K_sqi_std : float
1586
+ Standard deviation of the kurtosis quality index over all time.
1587
+ - K_waveform : array
1588
+ The kurtosis quality metric over all timepoints.
1589
+ - E_sqi_mean : float
1590
+ Mean value of the entropy quality index over all time.
1591
+ - E_sqi_std : float
1592
+ Standard deviation of the entropy quality index over all time.
1593
+ - E_waveform : array
1594
+ The entropy quality metric over all timepoints.
1595
+
1596
+ Notes
1597
+ -----
1598
+ The function applies a detrending polynomial to the input waveform before computing
1599
+ the quality metrics. Window sizes are rounded to the nearest odd number of samples
1600
+ to ensure symmetric windows.
1601
+
1602
+ References
1603
+ ----------
1604
+ Elgendi, M. "Optimal Signal Quality Index for Photoplethysmogram Signals".
1605
+ Bioengineering 2016, Vol. 3, Page 21 (2016).
1606
+
1607
+ Examples
1608
+ --------
1609
+ >>> import numpy as np
1610
+ >>> from scipy.stats import skew, kurtosis
1611
+ >>> waveform = np.random.randn(1000)
1612
+ >>> Fs = 100.0
1613
+ >>> infodict = {}
1614
+ >>> suffix = "_test"
1615
+ >>> outputroot = "test_output"
1616
+ >>> S_mean, S_std, S_wave, K_mean, K_std, K_wave, E_mean, E_std, E_wave = calcplethquality(
1617
+ ... waveform, Fs, infodict, suffix, outputroot
1618
+ ... )
709
1619
  """
710
1620
  # detrend the waveform
711
1621
  dt_waveform = tide_fit.detrend(waveform, order=detrendorder, demean=True)
@@ -788,21 +1698,95 @@ def calcplethquality(
788
1698
 
789
1699
 
790
1700
  def getphysiofile(
791
- waveformfile,
792
- inputfreq,
793
- inputstart,
794
- slicetimeaxis,
795
- stdfreq,
796
- stdpoints,
797
- envcutoff,
798
- envthresh,
799
- timings,
800
- outputroot,
801
- slop=0.25,
802
- outputlevel=0,
803
- iscardiac=True,
804
- debug=False,
805
- ):
1701
+ waveformfile: str,
1702
+ inputfreq: float,
1703
+ inputstart: float | None,
1704
+ slicetimeaxis: np.ndarray,
1705
+ stdfreq: float,
1706
+ stdpoints: int,
1707
+ envcutoff: float,
1708
+ envthresh: float,
1709
+ timings: list,
1710
+ outputroot: str,
1711
+ slop: float = 0.25,
1712
+ outputlevel: int = 0,
1713
+ iscardiac: bool = True,
1714
+ debug: bool = False,
1715
+ ) -> tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
1716
+ """
1717
+ Read, process, and resample physiological waveform data.
1718
+
1719
+ This function reads a physiological signal from a text file, filters and normalizes
1720
+ the signal, and resamples it to both slice-specific and standard time resolutions.
1721
+ It supports cardiac and non-cardiac signal processing, with optional debugging and
1722
+ output writing.
1723
+
1724
+ Parameters
1725
+ ----------
1726
+ waveformfile : str
1727
+ Path to the input physiological waveform file.
1728
+ inputfreq : float
1729
+ Sampling frequency of the input waveform. If negative, the frequency is
1730
+ inferred from the file.
1731
+ inputstart : float or None
1732
+ Start time of the input waveform. If None, defaults to 0.0.
1733
+ slicetimeaxis : array_like
1734
+ Time axis corresponding to slice acquisition times.
1735
+ stdfreq : float
1736
+ Standard sampling frequency for resampling.
1737
+ stdpoints : int
1738
+ Number of points for the standard time axis.
1739
+ envcutoff : float
1740
+ Cutoff frequency for envelope filtering.
1741
+ envthresh : float
1742
+ Threshold for envelope normalization.
1743
+ timings : list
1744
+ List to append timing information for logging.
1745
+ outputroot : str
1746
+ Root name for output files.
1747
+ slop : float, optional
1748
+ Tolerance for time alignment check (default is 0.25).
1749
+ outputlevel : int, optional
1750
+ Level of output writing (default is 0).
1751
+ iscardiac : bool, optional
1752
+ Flag indicating if the signal is cardiac (default is True).
1753
+ debug : bool, optional
1754
+ Enable debug printing (default is False).
1755
+
1756
+ Returns
1757
+ -------
1758
+ waveform_sliceres : ndarray
1759
+ Physiological signal resampled to slice time resolution.
1760
+ waveform_stdres : ndarray
1761
+ Physiological signal resampled to standard time resolution.
1762
+ inputfreq : float
1763
+ The actual input sampling frequency used.
1764
+ len(waveform_fullres) : int
1765
+ Length of the original waveform data.
1766
+
1767
+ Notes
1768
+ -----
1769
+ - The function reads the waveform file using `tide_io.readvectorsfromtextfile`.
1770
+ - Signal filtering and normalization are performed using `cleanphysio`.
1771
+ - Resampling is done using `tide_resample.doresample`.
1772
+ - If `iscardiac` is True, raw and cleaned signals are saved to files when `outputlevel > 1`.
1773
+
1774
+ Examples
1775
+ --------
1776
+ >>> waveform_sliceres, waveform_stdres, freq, length = getphysiofile(
1777
+ ... waveformfile="physio.txt",
1778
+ ... inputfreq=100.0,
1779
+ ... inputstart=0.0,
1780
+ ... slicetimeaxis=np.linspace(0, 10, 50),
1781
+ ... stdfreq=25.0,
1782
+ ... stdpoints=100,
1783
+ ... envcutoff=0.5,
1784
+ ... envthresh=0.1,
1785
+ ... timings=[],
1786
+ ... outputroot="output",
1787
+ ... debug=False
1788
+ ... )
1789
+ """
806
1790
  if debug:
807
1791
  print("Entering getphysiofile")
808
1792
  print("Reading physiological signal from file")
@@ -928,7 +1912,62 @@ def getphysiofile(
928
1912
  return waveform_sliceres, waveform_stdres, inputfreq, len(waveform_fullres)
929
1913
 
930
1914
 
931
- def readextmask(thefilename, nim_hdr, xsize, ysize, numslices, debug=False):
1915
+ def readextmask(
1916
+ thefilename: str,
1917
+ nim_hdr: dict,
1918
+ xsize: int,
1919
+ ysize: int,
1920
+ numslices: int,
1921
+ debug: bool = False,
1922
+ ) -> np.ndarray:
1923
+ """
1924
+ Read and validate external mask from NIfTI file.
1925
+
1926
+ This function reads a mask from a NIfTI file and performs validation checks
1927
+ to ensure compatibility with the input fMRI data dimensions. The mask must
1928
+ have exactly 3 dimensions and match the spatial dimensions of the fMRI data.
1929
+
1930
+ Parameters
1931
+ ----------
1932
+ thefilename : str
1933
+ Path to the NIfTI file containing the mask
1934
+ nim_hdr : dict
1935
+ Header information from the fMRI data
1936
+ xsize : int
1937
+ X dimension size of the fMRI data
1938
+ ysize : int
1939
+ Y dimension size of the fMRI data
1940
+ numslices : int
1941
+ Number of slices in the fMRI data
1942
+ debug : bool, optional
1943
+ If True, print debug information about mask dimensions (default is False)
1944
+
1945
+ Returns
1946
+ -------
1947
+ numpy.ndarray
1948
+ The mask data array with shape (xsize, ysize, numslices)
1949
+
1950
+ Raises
1951
+ ------
1952
+ ValueError
1953
+ If mask dimensions do not match fMRI data dimensions or if mask has
1954
+ more than 3 dimensions
1955
+
1956
+ Notes
1957
+ -----
1958
+ The function performs the following validation checks:
1959
+ 1. Reads mask from NIfTI file using tide_io.readfromnifti
1960
+ 2. Parses NIfTI dimensions using tide_io.parseniftidims
1961
+ 3. Validates that mask spatial dimensions match fMRI data dimensions
1962
+ 4. Ensures mask has exactly 3 dimensions (no time dimension allowed)
1963
+
1964
+ Examples
1965
+ --------
1966
+ >>> import numpy as np
1967
+ >>> mask_data = readextmask('mask.nii', fmri_header, 64, 64, 30)
1968
+ >>> print(mask_data.shape)
1969
+ (64, 64, 30)
1970
+ """
932
1971
  (
933
1972
  extmask,
934
1973
  extmask_data,
@@ -953,32 +1992,60 @@ def readextmask(thefilename, nim_hdr, xsize, ysize, numslices, debug=False):
953
1992
  return extmask_data
954
1993
 
955
1994
 
956
- def checkcardmatch(reference, candidate, samplerate, refine=True, zeropadding=0, debug=False):
1995
+ def checkcardmatch(
1996
+ reference: np.ndarray,
1997
+ candidate: np.ndarray,
1998
+ samplerate: float,
1999
+ refine: bool = True,
2000
+ zeropadding: int = 0,
2001
+ debug: bool = False,
2002
+ ) -> tuple[float, float, str]:
957
2003
  """
2004
+ Compare two cardiac waveforms using cross-correlation and peak fitting.
2005
+
2006
+ This function performs a cross-correlation between a reference and a candidate
2007
+ cardiac waveform after applying a non-causal cardiac filter. It then fits a
2008
+ Gaussian to the cross-correlation peak to estimate the time delay and
2009
+ correlation strength.
958
2010
 
959
2011
  Parameters
960
2012
  ----------
961
- reference: 1D numpy array
962
- The cardiac waveform to compare to
963
- candidate: 1D numpy array
964
- The cardiac waveform to be assessed
965
- samplerate: float
966
- The sample rate of the data in Hz
967
- refine: bool, optional
968
- Whether to refine the peak fit. Default is True.
969
- zeropadding: int, optional
970
- Specify the length of correlation padding to use.
971
- debug: bool, optional
972
- Output additional information for debugging
2013
+ reference : 1D numpy array
2014
+ The cardiac waveform to compare to.
2015
+ candidate : 1D numpy array
2016
+ The cardiac waveform to be assessed.
2017
+ samplerate : float
2018
+ The sample rate of the data in Hz.
2019
+ refine : bool, optional
2020
+ Whether to refine the peak fit. Default is True.
2021
+ zeropadding : int, optional
2022
+ Specify the length of correlation padding to use. Default is 0.
2023
+ debug : bool, optional
2024
+ Output additional information for debugging. Default is False.
973
2025
 
974
2026
  Returns
975
2027
  -------
976
- maxval: float
977
- The maximum value of the crosscorrelation function
978
- maxdelay: float
2028
+ maxval : float
2029
+ The maximum value of the crosscorrelation function.
2030
+ maxdelay : float
979
2031
  The time, in seconds, where the maximum crosscorrelation occurs.
980
- failreason: flag
981
- Reason why the fit failed (0 if no failure)
2032
+ failreason : int
2033
+ Reason why the fit failed (0 if no failure).
2034
+
2035
+ Notes
2036
+ -----
2037
+ The function applies a cardiac filter to both waveforms before computing
2038
+ the cross-correlation. A Gaussian fit is used to estimate the peak location
2039
+ and strength within a predefined search range of ±2 seconds around the
2040
+ initial peak.
2041
+
2042
+ Examples
2043
+ --------
2044
+ >>> import numpy as np
2045
+ >>> reference = np.sin(2 * np.pi * 1.2 * np.linspace(0, 10, 1000))
2046
+ >>> candidate = np.sin(2 * np.pi * 1.2 * np.linspace(0, 10, 1000) + 0.1)
2047
+ >>> maxval, maxdelay, failreason = checkcardmatch(reference, candidate, 100)
2048
+ >>> print(f"Max correlation: {maxval}, Delay: {maxdelay}s")
982
2049
  """
983
2050
  thecardfilt = tide_filt.NoncausalFilter(filtertype="cardiac")
984
2051
  trimlength = np.min([len(reference), len(candidate)])
@@ -1039,16 +2106,78 @@ def checkcardmatch(reference, candidate, samplerate, refine=True, zeropadding=0,
1039
2106
 
1040
2107
 
1041
2108
  def cardiaccycleaverage(
1042
- sourcephases,
1043
- destinationphases,
1044
- waveform,
1045
- procpoints,
1046
- congridbins,
1047
- gridkernel,
1048
- centric,
1049
- cache=True,
1050
- cyclic=True,
1051
- ):
2109
+ sourcephases: np.ndarray,
2110
+ destinationphases: np.ndarray,
2111
+ waveform: np.ndarray,
2112
+ procpoints: int,
2113
+ congridbins: int,
2114
+ gridkernel: str,
2115
+ centric: bool,
2116
+ cache: bool = True,
2117
+ cyclic: bool = True,
2118
+ ) -> np.ndarray:
2119
+ """
2120
+ Compute the average waveform over a cardiac cycle using phase-based resampling.
2121
+
2122
+ This function performs phase-resolved averaging of a waveform signal over a
2123
+ cardiac cycle. It uses a resampling technique to map source phase values to
2124
+ destination phases, accumulating weighted contributions to produce an averaged
2125
+ waveform. The result is normalized and adjusted to remove artifacts from low
2126
+ weight regions.
2127
+
2128
+ Parameters
2129
+ ----------
2130
+ sourcephases : array-like
2131
+ Array of source phase values (in radians) corresponding to the waveform data.
2132
+ destinationphases : array-like
2133
+ Array of destination phase values (in radians) where the averaged waveform
2134
+ will be computed.
2135
+ waveform : array-like
2136
+ Array of waveform values to be averaged.
2137
+ procpoints : array-like
2138
+ Array of indices indicating which points in `waveform` and `sourcephases`
2139
+ should be processed.
2140
+ congridbins : int
2141
+ Number of bins used in the resampling process.
2142
+ gridkernel : callable
2143
+ Kernel function used for interpolation during resampling.
2144
+ centric : bool
2145
+ If True, phase values are treated as centric (e.g., centered around 0).
2146
+ If False, phase values are treated as cyclic (e.g., 0 to 2π).
2147
+ cache : bool, optional
2148
+ If True, use cached results for repeated computations (default is True).
2149
+ cyclic : bool, optional
2150
+ If True, treat phase values as cyclic (default is True).
2151
+
2152
+ Returns
2153
+ -------
2154
+ tuple of ndarray
2155
+ A tuple containing:
2156
+ - `rawapp_bypoint`: The normalized averaged waveform values for each
2157
+ destination phase.
2158
+ - `weight_bypoint`: The total weight for each destination phase.
2159
+
2160
+ Notes
2161
+ -----
2162
+ The function applies a threshold to weights: only points with weights greater
2163
+ than 1/50th of the maximum weight are considered valid. These points are then
2164
+ normalized and shifted to start from zero.
2165
+
2166
+ Examples
2167
+ --------
2168
+ >>> import numpy as np
2169
+ >>> sourcephases = np.linspace(0, 2*np.pi, 100)
2170
+ >>> destinationphases = np.linspace(0, 2*np.pi, 50)
2171
+ >>> waveform = np.sin(sourcephases)
2172
+ >>> procpoints = np.arange(100)
2173
+ >>> congridbins = 10
2174
+ >>> gridkernel = lambda x: np.exp(-x**2 / 2)
2175
+ >>> centric = False
2176
+ >>> avg_waveform, weights = cardiaccycleaverage(
2177
+ ... sourcephases, destinationphases, waveform, procpoints,
2178
+ ... congridbins, gridkernel, centric
2179
+ ... )
2180
+ """
1052
2181
  rawapp_bypoint = np.zeros(len(destinationphases), dtype=np.float64)
1053
2182
  weight_bypoint = np.zeros(len(destinationphases), dtype=np.float64)
1054
2183
  for t in procpoints:
@@ -1076,7 +2205,47 @@ def cardiaccycleaverage(
1076
2205
  return rawapp_bypoint, weight_bypoint
1077
2206
 
1078
2207
 
1079
- def circularderivs(timecourse):
2208
+ def circularderivs(timecourse: np.ndarray) -> tuple[np.ndarray, float, float]:
2209
+ """
2210
+ Compute circular first derivatives and their extremal values.
2211
+
2212
+ This function calculates the circular first derivative of a time course,
2213
+ which is the difference between consecutive elements with the last element
2214
+ wrapped around to the first. It then returns the maximum and minimum values
2215
+ of these derivatives along with their indices.
2216
+
2217
+ Parameters
2218
+ ----------
2219
+ timecourse : array-like
2220
+ Input time course data as a 1D array or sequence of numerical values.
2221
+
2222
+ Returns
2223
+ -------
2224
+ tuple
2225
+ A tuple containing four elements:
2226
+ - max_derivative : float
2227
+ The maximum value of the circular first derivative
2228
+ - argmax_index : int
2229
+ The index of the maximum derivative value
2230
+ - min_derivative : float
2231
+ The minimum value of the circular first derivative
2232
+ - argmin_index : int
2233
+ The index of the minimum derivative value
2234
+
2235
+ Notes
2236
+ -----
2237
+ The circular first derivative is computed as:
2238
+ ``first_deriv[i] = timecourse[i+1] - timecourse[i]`` for i < n-1,
2239
+ and ``first_deriv[n-1] = timecourse[0] - timecourse[n-1]``.
2240
+
2241
+ Examples
2242
+ --------
2243
+ >>> import numpy as np
2244
+ >>> timecourse = [1, 2, 3, 2, 1]
2245
+ >>> max_val, max_idx, min_val, min_idx = circularderivs(timecourse)
2246
+ >>> print(f"Max derivative: {max_val} at index {max_idx}")
2247
+ >>> print(f"Min derivative: {min_val} at index {min_idx}")
2248
+ """
1080
2249
  firstderiv = np.diff(timecourse, append=[timecourse[0]])
1081
2250
  return (
1082
2251
  np.max(firstderiv),
@@ -1087,6 +2256,79 @@ def circularderivs(timecourse):
1087
2256
 
1088
2257
 
1089
2258
  def _procOnePhaseProject(slice, sliceargs, **kwargs):
2259
+ """
2260
+ Process a single phase project for fMRI data resampling and averaging.
2261
+
2262
+ This function performs temporal resampling of fMRI data along the phase dimension
2263
+ using a congrid-based interpolation scheme. It updates weight, raw application,
2264
+ and cine data arrays based on the resampled values.
2265
+
2266
+ Parameters
2267
+ ----------
2268
+ slice : int
2269
+ The slice index to process.
2270
+ sliceargs : tuple
2271
+ A tuple containing the following elements:
2272
+ - validlocslist : list of arrays
2273
+ List of valid location indices for each slice.
2274
+ - proctrs : array-like
2275
+ Time indices to process.
2276
+ - demeandata_byslice : ndarray
2277
+ Demeaned fMRI data organized by slice and time.
2278
+ - fmri_data_byslice : ndarray
2279
+ Raw fMRI data organized by slice and time.
2280
+ - outphases : array-like
2281
+ Output phase values for resampling.
2282
+ - cardphasevals : ndarray
2283
+ Cardinality of phase values for each slice and time.
2284
+ - congridbins : int
2285
+ Number of bins for congrid interpolation.
2286
+ - gridkernel : str
2287
+ Interpolation kernel to use.
2288
+ - weights_byslice : ndarray
2289
+ Weight array to be updated.
2290
+ - cine_byslice : ndarray
2291
+ Cine data array to be updated.
2292
+ - destpoints : int
2293
+ Number of destination points.
2294
+ - rawapp_byslice : ndarray
2295
+ Raw application data array to be updated.
2296
+ **kwargs : dict
2297
+ Additional options to override default settings:
2298
+ - cache : bool, optional
2299
+ Whether to use caching in congrid (default: True).
2300
+ - debug : bool, optional
2301
+ Whether to enable debug mode (default: False).
2302
+
2303
+ Returns
2304
+ -------
2305
+ tuple
2306
+ A tuple containing:
2307
+ - slice : int
2308
+ The input slice index.
2309
+ - rawapp_byslice : ndarray
2310
+ Updated raw application data for the slice.
2311
+ - cine_byslice : ndarray
2312
+ Updated cine data for the slice.
2313
+ - weights_byslice : ndarray
2314
+ Updated weights for the slice.
2315
+ - validlocs : array-like
2316
+ Valid location indices for the slice.
2317
+
2318
+ Notes
2319
+ -----
2320
+ This function modifies the input arrays `weights_byslice`, `rawapp_byslice`,
2321
+ and `cine_byslice` in-place. The function assumes that the data has already
2322
+ been preprocessed and organized into slices and time points.
2323
+
2324
+ Examples
2325
+ --------
2326
+ >>> slice_idx = 0
2327
+ >>> args = (validlocslist, proctrs, demeandata_byslice, fmri_data_byslice,
2328
+ ... outphases, cardphasevals, congridbins, gridkernel,
2329
+ ... weights_byslice, cine_byslice, destpoints, rawapp_byslice)
2330
+ >>> result = _procOnePhaseProject(slice_idx, args, cache=False)
2331
+ """
1090
2332
  options = {
1091
2333
  "cache": True,
1092
2334
  "debug": False,
@@ -1150,6 +2392,34 @@ def _procOnePhaseProject(slice, sliceargs, **kwargs):
1150
2392
 
1151
2393
 
1152
2394
  def _packslicedataPhaseProject(slicenum, sliceargs):
2395
+ """
2396
+ Pack slice data for phase projection.
2397
+
2398
+ This function takes a slice number and slice arguments, then returns a
2399
+ flattened list containing all the slice arguments in order.
2400
+
2401
+ Parameters
2402
+ ----------
2403
+ slicenum : int
2404
+ The slice number identifier.
2405
+ sliceargs : list or tuple
2406
+ Collection of slice arguments to be packed into a flat list.
2407
+
2408
+ Returns
2409
+ -------
2410
+ list
2411
+ A list containing all elements from sliceargs in the same order.
2412
+
2413
+ Notes
2414
+ -----
2415
+ This function essentially performs a flattening operation on the slice
2416
+ arguments, converting them into a fixed-length list format.
2417
+
2418
+ Examples
2419
+ --------
2420
+ >>> _packslicedataPhaseProject(0, [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12])
2421
+ [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]
2422
+ """
1153
2423
  return [
1154
2424
  sliceargs[0],
1155
2425
  sliceargs[1],
@@ -1167,12 +2437,97 @@ def _packslicedataPhaseProject(slicenum, sliceargs):
1167
2437
 
1168
2438
 
1169
2439
  def _unpackslicedataPhaseProject(retvals, voxelproducts):
2440
+ """
2441
+ Unpack slice data for phase project operation.
2442
+
2443
+ This function assigns sliced data from retvals to corresponding voxelproducts
2444
+ based on index mappings. It performs three simultaneous assignments using
2445
+ slicing operations on 3D arrays.
2446
+
2447
+ Parameters
2448
+ ----------
2449
+ retvals : tuple of array-like
2450
+ A tuple containing 5 elements where:
2451
+ - retvals[0], retvals[1], retvals[2], retvals[3], retvals[4]
2452
+ - retvals[4] is used as row index for slicing
2453
+ - retvals[0] is used as column index for slicing
2454
+ voxelproducts : list of array-like
2455
+ A list of 3 arrays that will be modified in-place with the sliced data.
2456
+ Each array is expected to be 3D and will be indexed using retvals[4] and retvals[0].
2457
+
2458
+ Returns
2459
+ -------
2460
+ None
2461
+ This function modifies voxelproducts in-place and does not return any value.
2462
+
2463
+ Notes
2464
+ -----
2465
+ The function performs three assignments:
2466
+ 1. voxelproducts[0][retvals[4], retvals[0], :] = retvals[1][retvals[4], :]
2467
+ 2. voxelproducts[1][retvals[4], retvals[0], :] = retvals[2][retvals[4], :]
2468
+ 3. voxelproducts[2][retvals[4], retvals[0], :] = retvals[3][retvals[4], :]
2469
+
2470
+ All arrays must be compatible for the specified slicing operations.
2471
+
2472
+ Examples
2473
+ --------
2474
+ >>> retvals = (np.array([0, 1]), np.array([[1, 2], [3, 4]]),
2475
+ ... np.array([[5, 6], [7, 8]]), np.array([[9, 10], [11, 12]]),
2476
+ ... np.array([0, 1]))
2477
+ >>> voxelproducts = [np.zeros((2, 2, 2)), np.zeros((2, 2, 2)), np.zeros((2, 2, 2))]
2478
+ >>> _unpackslicedataPhaseProject(retvals, voxelproducts)
2479
+ """
1170
2480
  (voxelproducts[0])[retvals[4], retvals[0], :] = (retvals[1])[retvals[4], :]
1171
2481
  (voxelproducts[1])[retvals[4], retvals[0], :] = (retvals[2])[retvals[4], :]
1172
2482
  (voxelproducts[2])[retvals[4], retvals[0], :] = (retvals[3])[retvals[4], :]
1173
2483
 
1174
2484
 
1175
- def preloadcongrid(outphases, congridbins, gridkernel="kaiser", cyclic=True, debug=False):
2485
+ def preloadcongrid(
2486
+ outphases: np.ndarray,
2487
+ congridbins: int,
2488
+ gridkernel: str = "kaiser",
2489
+ cyclic: bool = True,
2490
+ debug: bool = False,
2491
+ ) -> None:
2492
+ """
2493
+ Preload congrid interpolation cache for efficient subsequent calls.
2494
+
2495
+ This function preloads the congrid interpolation cache by performing a series
2496
+ of interpolation operations with different phase values. This avoids the
2497
+ computational overhead of cache initialization during subsequent calls to
2498
+ tide_resample.congrid with the same parameters.
2499
+
2500
+ Parameters
2501
+ ----------
2502
+ outphases : array-like
2503
+ Output phase values for the interpolation grid.
2504
+ congridbins : array-like
2505
+ Binning parameters for the congrid interpolation.
2506
+ gridkernel : str, optional
2507
+ Interpolation kernel to use. Default is "kaiser".
2508
+ cyclic : bool, optional
2509
+ Whether to treat the data as cyclic. Default is True.
2510
+ debug : bool, optional
2511
+ Enable debug output. Default is False.
2512
+
2513
+ Returns
2514
+ -------
2515
+ None
2516
+ This function does not return any value.
2517
+
2518
+ Notes
2519
+ -----
2520
+ This function is designed to improve performance when calling tide_resample.congrid
2521
+ multiple times with the same parameters. By preloading the cache with various
2522
+ phase values, subsequent calls will be faster as the cache is already populated.
2523
+
2524
+ Examples
2525
+ --------
2526
+ >>> import numpy as np
2527
+ >>> outphases = np.linspace(0, 2*np.pi, 100)
2528
+ >>> congridbins = [10, 20]
2529
+ >>> preloadcongrid(outphases, congridbins, gridkernel="kaiser", cyclic=True)
2530
+ """
1176
2531
  outphasestep = outphases[1] - outphases[0]
1177
2532
  outphasecenter = outphases[int(len(outphases) / 2)]
1178
2533
  fillargs = outphasestep * (
@@ -1212,6 +2567,88 @@ def phaseprojectpass(
1212
2567
  cache=True,
1213
2568
  debug=False,
1214
2569
  ):
2570
+ """
2571
+ Perform phase-encoding projection for fMRI data across slices.
2572
+
2573
+ This function projects fMRI data onto a set of phase values using congrid
2574
+ resampling, accumulating results in `rawapp_byslice` and `cine_byslice` arrays.
2575
+ It supports both single-threaded and multi-processed execution.
2576
+
2577
+ Parameters
2578
+ ----------
2579
+ numslices : int
2580
+ Number of slices to process.
2581
+ demeandata_byslice : ndarray
2582
+ Demeaned fMRI data, shape (nvoxels, nslices, ntr).
2583
+ fmri_data_byslice : ndarray
2584
+ Raw fMRI data, shape (nvoxels, nslices, ntr).
2585
+ validlocslist : list of ndarray
2586
+ List of valid voxel indices for each slice.
2587
+ proctrs : ndarray
2588
+ Timepoints to process.
2589
+ weights_byslice : ndarray
2590
+ Weight array, shape (nvoxels, nslices, ndestpoints).
2591
+ cine_byslice : ndarray
2592
+ Cine data array, shape (nvoxels, nslices, ndestpoints).
2593
+ rawapp_byslice : ndarray
2594
+ Raw application data array, shape (nvoxels, nslices, ndestpoints).
2595
+ outphases : ndarray
2596
+ Output phase values.
2597
+ cardphasevals : ndarray
2598
+ Cardinal phase values for each slice and timepoint, shape (nslices, ntr).
2599
+ congridbins : int
2600
+ Number of bins for congrid resampling.
2601
+ gridkernel : str
2602
+ Kernel to use for congrid resampling.
2603
+ destpoints : int
2604
+ Number of destination points.
2605
+ mpcode : bool, optional
2606
+ If True, use multiprocessing. Default is False.
2607
+ nprocs : int, optional
2608
+ Number of processes to use if `mpcode` is True. Default is 1.
2609
+ alwaysmultiproc : bool, optional
2610
+ If True, always use multiprocessing even for small datasets. Default is False.
2611
+ showprogressbar : bool, optional
2612
+ If True, show progress bar. Default is True.
2613
+ cache : bool, optional
2614
+ If True, enable caching for congrid. Default is True.
2615
+ debug : bool, optional
2616
+ If True, enable debug output. Default is False.
2617
+
2618
+ Returns
2619
+ -------
2620
+ None
2621
+ The function modifies `weights_byslice`, `cine_byslice`, and `rawapp_byslice` in-place.
2622
+
2623
+ Notes
2624
+ -----
2625
+ This function is typically used in the context of phase-encoded fMRI analysis.
2626
+ It applies a congrid-based resampling technique to project data onto a specified
2627
+ phase grid, accumulating weighted contributions in the output arrays.
2628
+
2629
+ Examples
2630
+ --------
2631
+ >>> phaseprojectpass(
2632
+ ... numslices=10,
2633
+ ... demeandata_byslice=demean_data,
2634
+ ... fmri_data_byslice=fmri_data,
2635
+ ... validlocslist=valid_locs_list,
2636
+ ... proctrs=tr_list,
2637
+ ... weights_byslice=weights,
2638
+ ... cine_byslice=cine_data,
2639
+ ... rawapp_byslice=rawapp_data,
2640
+ ... outphases=phase_vals,
2641
+ ... cardphasevals=card_phase_vals,
2642
+ ... congridbins=100,
2643
+ ... gridkernel='gaussian',
2644
+ ... destpoints=50,
2645
+ ... mpcode=False,
2646
+ ... nprocs=4,
2647
+ ... showprogressbar=True,
2648
+ ... cache=True,
2649
+ ... debug=False,
2650
+ ... )
2651
+ """
1215
2652
  if mpcode:
1216
2653
  inputshape = rawapp_byslice.shape
1217
2654
  sliceargs = [
@@ -1294,6 +2731,60 @@ def phaseprojectpass(
1294
2731
 
1295
2732
 
1296
2733
  def _procOneSliceSmoothing(slice, sliceargs, **kwargs):
2734
+ """
2735
+ Apply smoothing filter to a single slice of projected data along time dimension.
2736
+
2737
+ This function processes a single slice of data by applying a smoothing filter
2738
+ to the raw application data and computing circular derivatives for the
2739
+ specified slice. The smoothing is applied only to valid locations within the slice.
2740
+
2741
+ Parameters
2742
+ ----------
2743
+ slice : int
2744
+ The slice index to process.
2745
+ sliceargs : tuple
2746
+ A tuple containing the following elements:
2747
+
2748
+ - validlocslist : list of arrays
2749
+ List of arrays containing valid location indices for each slice
2750
+ - rawapp_byslice : ndarray
2751
+ Array containing raw application data by slice [locations, slices, time_points]
2752
+ - appsmoothingfilter : object
2753
+ Smoothing filter object with an apply method
2754
+ - phaseFs : array-like
2755
+ Frequency values for smoothing filter application
2756
+ - derivatives_byslice : ndarray
2757
+ Array to store computed derivatives [locations, slices, time_points]
2758
+ **kwargs : dict
2759
+ Additional keyword arguments:
2760
+ - debug : bool, optional
2761
+ Enable debug mode (default: False)
2762
+
2763
+ Returns
2764
+ -------
2765
+ tuple
2766
+ A tuple containing:
2767
+
2768
+ - slice : int
2769
+ The input slice index
2770
+ - rawapp_byslice : ndarray
2771
+ Smoothed raw application data for the specified slice [locations, time_points]
2772
+ - derivatives_byslice : ndarray
2773
+ Computed circular derivatives for the specified slice [locations, time_points]
2774
+
2775
+ Notes
2776
+ -----
2777
+ - The function only processes slices with valid locations (len(validlocs) > 0)
2778
+ - Smoothing is applied using the provided smoothing filter's apply method
2779
+ - Circular derivatives are computed using the `circularderivs` function
2780
+ - The function modifies the input arrays in-place
2781
+
2782
+ Examples
2783
+ --------
2784
+ >>> slice_idx = 5
2785
+ >>> sliceargs = (validlocslist, rawapp_byslice, appsmoothingfilter, phaseFs, derivatives_byslice)
2786
+ >>> result = _procOneSliceSmoothing(slice_idx, sliceargs, debug=True)
2787
+ """
1297
2788
  options = {
1298
2789
  "debug": False,
1299
2790
  }
@@ -1312,6 +2803,31 @@ def _procOneSliceSmoothing(slice, sliceargs, **kwargs):
1312
2803
 
1313
2804
 
1314
2805
  def _packslicedataSliceSmoothing(slicenum, sliceargs):
2806
+ """Pack slice data for slice smoothing operation.
2807
+
2808
+ Parameters
2809
+ ----------
2810
+ slicenum : int
2811
+ The slice number identifier.
2812
+ sliceargs : list
2813
+ List containing slice arguments with at least 5 elements.
2814
+
2815
+ Returns
2816
+ -------
2817
+ list
2818
+ A list containing the first 5 elements from sliceargs in the same order.
2819
+
2820
+ Notes
2821
+ -----
2822
+ This function extracts the first five elements from the sliceargs parameter
2823
+ and returns them as a new list. It's typically used as part of a slice
2824
+ smoothing pipeline where slice arguments need to be packed for further processing.
2825
+
2826
+ Examples
2827
+ --------
2828
+ >>> _packslicedataSliceSmoothing(1, [10, 20, 30, 40, 50, 60])
2829
+ [10, 20, 30, 40, 50]
2830
+ """
1315
2831
  return [
1316
2832
  sliceargs[0],
1317
2833
  sliceargs[1],
@@ -1322,6 +2838,49 @@ def _packslicedataSliceSmoothing(slicenum, sliceargs):
1322
2838
 
1323
2839
 
1324
2840
  def _unpackslicedataSliceSmoothing(retvals, voxelproducts):
2841
+ """
2842
+ Unpack slice data for smoothing operation.
2843
+
2844
+ This function assigns smoothed slice data back to the voxel products array
2845
+ based on the provided retvals structure.
2846
+
2847
+ Parameters
2848
+ ----------
2849
+ retvals : tuple of array-like
2850
+ A tuple containing:
2851
+ - retvals[0] : array-like
2852
+ Index array for slice selection
2853
+ - retvals[1] : array-like
2854
+ First set of smoothed data to assign
2855
+ - retvals[2] : array-like
2856
+ Second set of smoothed data to assign
2857
+ voxelproducts : list of array-like
2858
+ A list containing two array-like objects where:
2859
+ - voxelproducts[0] : array-like
2860
+ First voxel product array to be modified
2861
+ - voxelproducts[1] : array-like
2862
+ Second voxel product array to be modified
2863
+
2864
+ Returns
2865
+ -------
2866
+ None
2867
+ This function modifies the voxelproducts arrays in-place and does not return anything.
2868
+
2869
+ Notes
2870
+ -----
2871
+ The function performs in-place assignment operations on the voxelproducts arrays.
2872
+ The first dimension of voxelproducts arrays is modified using retvals[0] as indices,
2873
+ while the second and third dimensions are directly assigned from retvals[1] and retvals[2].
2874
+
2875
+ Examples
2876
+ --------
2877
+ >>> import numpy as np
2878
+ >>> retvals = (np.array([0, 1, 2]), np.array([[1, 2], [3, 4], [5, 6]]), np.array([[7, 8], [9, 10], [11, 12]]))
2879
+ >>> voxelproducts = [np.zeros((3, 3, 2)), np.zeros((3, 3, 2))]
2880
+ >>> _unpackslicedataSliceSmoothing(retvals, voxelproducts)
2881
+ >>> print(voxelproducts[0])
2882
+ >>> print(voxelproducts[1])
2883
+ """
1325
2884
  (voxelproducts[0])[:, retvals[0], :] = retvals[1]
1326
2885
  (voxelproducts[1])[:, retvals[0], :] = retvals[2]
1327
2886
 
@@ -1338,6 +2897,58 @@ def tcsmoothingpass(
1338
2897
  showprogressbar=True,
1339
2898
  debug=False,
1340
2899
  ):
2900
+ """
2901
+ Apply smoothing to time course data across slices using multiprocessing.
2902
+
2903
+ This function performs smoothing operations on time course data organized by slices,
2904
+ utilizing multiprocessing for improved performance when processing large datasets.
2905
+
2906
+ Parameters
2907
+ ----------
2908
+ numslices : int
2909
+ Number of slices in the dataset
2910
+ validlocslist : list
2911
+ List of valid locations for processing
2912
+ rawapp_byslice : numpy.ndarray
2913
+ Raw application data organized by slice
2914
+ appsmoothingfilter : numpy.ndarray
2915
+ Smoothing filter to be applied
2916
+ phaseFs : float
2917
+ Phase frequency parameter for smoothing operations
2918
+ derivatives_byslice : numpy.ndarray
2919
+ Derivative data organized by slice
2920
+ nprocs : int, optional
2921
+ Number of processors to use for multiprocessing (default is 1)
2922
+ alwaysmultiproc : bool, optional
2923
+ Whether to always use multiprocessing regardless of data size (default is False)
2924
+ showprogressbar : bool, optional
2925
+ Whether to display progress bar during processing (default is True)
2926
+ debug : bool, optional
2927
+ Enable debug mode for additional logging (default is False)
2928
+
2929
+ Returns
2930
+ -------
2931
+ numpy.ndarray
2932
+ Processed data after smoothing operations have been applied
2933
+
2934
+ Notes
2935
+ -----
2936
+ This function uses the `tide_genericmultiproc.run_multiproc` utility to distribute
2937
+ the smoothing workload across multiple processors. The function handles data organization
2938
+ and processing for each slice individually, then combines results.
2939
+
2940
+ Examples
2941
+ --------
2942
+ >>> result = tcsmoothingpass(
2943
+ ... numslices=10,
2944
+ ... validlocslist=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9],
2945
+ ... rawapp_byslice=raw_data,
2946
+ ... appsmoothingfilter=smoothing_filter,
2947
+ ... phaseFs=100.0,
2948
+ ... derivatives_byslice=derivatives,
2949
+ ... nprocs=4
2950
+ ... )
2951
+ """
1341
2952
  inputshape = rawapp_byslice.shape
1342
2953
  sliceargs = [validlocslist, rawapp_byslice, appsmoothingfilter, phaseFs, derivatives_byslice]
1343
2954
  slicefunc = _procOneSliceSmoothing
@@ -1394,6 +3005,97 @@ def phaseproject(
1394
3005
  thealiasedcorrx,
1395
3006
  theAliasedCorrelator,
1396
3007
  ):
3008
+ """
3009
+ Perform phase projection and related processing on fMRI data across slices.
3010
+
3011
+ This function performs phase projection on fMRI data, optionally smoothing
3012
+ timecourses, and applying flips based on derivative information. It also
3013
+ computes wavelet-based correlation measures and updates relevant arrays
3014
+ in-place for further processing.
3015
+
3016
+ Parameters
3017
+ ----------
3018
+ input_data : object
3019
+ Input fMRI data container with `getdims()` and `byslice()` methods.
3020
+ demeandata_byslice : array_like
3021
+ Demeaned fMRI data by slice.
3022
+ means_byslice : array_like
3023
+ Mean values by slice for normalization.
3024
+ rawapp_byslice : array_like
3025
+ Raw APP (Arterial Spin Labeling) data by slice.
3026
+ app_byslice : array_like
3027
+ APP data after initial processing.
3028
+ normapp_byslice : array_like
3029
+ Normalized APP data.
3030
+ weights_byslice : array_like
3031
+ Weights by slice for processing.
3032
+ cine_byslice : array_like
3033
+ Cine data by slice.
3034
+ projmask_byslice : array_like
3035
+ Projection mask by slice.
3036
+ derivatives_byslice : array_like
3037
+ Derivative data by slice, used for determining flips.
3038
+ proctrs : array_like
3039
+ Processing timepoints or transformation parameters.
3040
+ thispass : int
3041
+ Current processing pass number.
3042
+ args : argparse.Namespace
3043
+ Command-line arguments controlling processing behavior.
3044
+ sliceoffsets : array_like
3045
+ Slice offset values.
3046
+ cardphasevals : array_like
3047
+ Cardiac phase values.
3048
+ outphases : array_like
3049
+ Output phases.
3050
+ appsmoothingfilter : array_like
3051
+ Smoothing filter for timecourses.
3052
+ phaseFs : float
3053
+ Sampling frequency for phase processing.
3054
+ thecorrfunc_byslice : array_like
3055
+ Correlation function by slice.
3056
+ waveamp_byslice : array_like
3057
+ Wave amplitude by slice.
3058
+ wavedelay_byslice : array_like
3059
+ Wave delay by slice.
3060
+ wavedelayCOM_byslice : array_like
3061
+ Center of mass of wave delay by slice.
3062
+ corrected_rawapp_byslice : array_like
3063
+ Corrected raw APP data by slice.
3064
+ corrstartloc : int
3065
+ Start location for correlation computation.
3066
+ correndloc : int
3067
+ End location for correlation computation.
3068
+ thealiasedcorrx : array_like
3069
+ Aliased correlation x-axis values.
3070
+ theAliasedCorrelator : object
3071
+ Correlator object for aliased correlation computation.
3072
+
3073
+ Returns
3074
+ -------
3075
+ appflips_byslice : array_like
3076
+ Flip values applied to the APP data by slice.
3077
+
3078
+ Notes
3079
+ -----
3080
+ - The function modifies several input arrays in-place.
3081
+ - If `args.smoothapp` is True, smoothing is applied to the raw APP data.
3082
+ - If `args.fliparteries` is True, flips are applied to correct arterial
3083
+ orientation.
3084
+ - If `args.doaliasedcorrelation` is True, aliased correlation is computed
3085
+ and stored in `thecorrfunc_byslice`.
3086
+
3087
+ Examples
3088
+ --------
3089
+ >>> phaseproject(
3090
+ ... input_data, demeandata_byslice, means_byslice, rawapp_byslice,
3091
+ ... app_byslice, normapp_byslice, weights_byslice, cine_byslice,
3092
+ ... projmask_byslice, derivatives_byslice, proctrs, thispass, args,
3093
+ ... sliceoffsets, cardphasevals, outphases, appsmoothingfilter,
3094
+ ... phaseFs, thecorrfunc_byslice, waveamp_byslice, wavedelay_byslice,
3095
+ ... wavedelayCOM_byslice, corrected_rawapp_byslice, corrstartloc,
3096
+ ... correndloc, thealiasedcorrx, theAliasedCorrelator
3097
+ ... )
3098
+ """
1397
3099
  xsize, ysize, numslices, timepoints = input_data.getdims()
1398
3100
  fmri_data_byslice = input_data.byslice()
1399
3101
 
@@ -1521,6 +3223,67 @@ def findvessels(
1521
3223
  outputlevel,
1522
3224
  debug=False,
1523
3225
  ):
3226
+ """
3227
+ Find vessel thresholds and generate vessel masks from app data.
3228
+
3229
+ This function processes app data to identify vessel thresholds and optionally
3230
+ generates histograms for visualization. It handles both normalized and
3231
+ unnormalized vessel maps based on the input parameters.
3232
+
3233
+ Parameters
3234
+ ----------
3235
+ app : numpy.ndarray
3236
+ Raw app data array
3237
+ normapp : numpy.ndarray
3238
+ Normalized app data array
3239
+ validlocs : numpy.ndarray
3240
+ Array of valid locations for processing
3241
+ numspatiallocs : int
3242
+ Number of spatial locations
3243
+ outputroot : str
3244
+ Root directory path for output files
3245
+ unnormvesselmap : bool
3246
+ Flag indicating whether to use unnormalized vessel map
3247
+ destpoints : int
3248
+ Number of destination points
3249
+ softvesselfrac : float
3250
+ Fractional multiplier for soft vessel threshold
3251
+ histlen : int
3252
+ Length of histogram bins
3253
+ outputlevel : int
3254
+ Level of output generation (0 = no histogram, 1 = histogram only)
3255
+ debug : bool, optional
3256
+ Debug flag for additional logging (default is False)
3257
+
3258
+ Returns
3259
+ -------
3260
+ tuple
3261
+ Tuple containing (hardvesselthresh, softvesselthresh) threshold values
3262
+
3263
+ Notes
3264
+ -----
3265
+ The function performs the following steps:
3266
+ 1. Reshapes app data based on unnormvesselmap flag
3267
+ 2. Extracts valid locations from the reshaped data
3268
+ 3. Generates histogram if outputlevel > 0
3269
+ 4. Calculates hard and soft vessel thresholds based on 98th percentile
3270
+ 5. Prints threshold values to console
3271
+
3272
+ Examples
3273
+ --------
3274
+ >>> hard_thresh, soft_thresh = findvessels(
3275
+ ... app=app_data,
3276
+ ... normapp=norm_app_data,
3277
+ ... validlocs=valid_indices,
3278
+ ... numspatiallocs=100,
3279
+ ... outputroot='/path/to/output',
3280
+ ... unnormvesselmap=True,
3281
+ ... destpoints=50,
3282
+ ... softvesselfrac=0.5,
3283
+ ... histlen=100,
3284
+ ... outputlevel=1
3285
+ ... )
3286
+ """
1524
3287
  if unnormvesselmap:
1525
3288
  app2d = app.reshape((numspatiallocs, destpoints))
1526
3289
  else:
@@ -1548,6 +3311,44 @@ def findvessels(
1548
3311
 
1549
3312
 
1550
3313
  def upsampleimage(input_data, numsteps, sliceoffsets, slicesamplerate, outputroot):
3314
+ """
3315
+ Upsample fMRI data along the temporal and slice dimensions.
3316
+
3317
+ This function takes fMRI data and upsamples it by a factor of `numsteps` along
3318
+ the temporal dimension, and interpolates across slices to align with specified
3319
+ slice offsets. The resulting upsampled data is saved as a NIfTI file.
3320
+
3321
+ Parameters
3322
+ ----------
3323
+ input_data : object
3324
+ Input fMRI data object with attributes: `byvol()`, `timepoints`, `xsize`,
3325
+ `ysize`, `numslices`, and `copyheader()`.
3326
+ numsteps : int
3327
+ Upsampling factor along the temporal dimension.
3328
+ sliceoffsets : array-like of int
3329
+ Slice offset indices indicating where each slice's data should be placed
3330
+ in the upsampled volume.
3331
+ slicesamplerate : float
3332
+ Sampling rate of the slice acquisition (used to set the TR in the output header).
3333
+ outputroot : str
3334
+ Root name for the output NIfTI file (will be suffixed with "_upsampled").
3335
+
3336
+ Returns
3337
+ -------
3338
+ None
3339
+ The function saves the upsampled data to a NIfTI file and does not return any value.
3340
+
3341
+ Notes
3342
+ -----
3343
+ - The function demeanes the input data before upsampling.
3344
+ - Interpolation is performed along the slice direction using linear interpolation.
3345
+ - The output file is saved using `tide_io.savetonifti`.
3346
+
3347
+ Examples
3348
+ --------
3349
+ >>> upsampleimage(fmri_data, numsteps=2, sliceoffsets=[0, 1], slicesamplerate=2.0, outputroot='output')
3350
+ Upsamples the fMRI data by a factor of 2 and saves to 'output_upsampled.nii'.
3351
+ """
1551
3352
  fmri_data = input_data.byvol()
1552
3353
  timepoints = input_data.timepoints
1553
3354
  xsize = input_data.xsize
@@ -1609,6 +3410,78 @@ def wrightmap(
1609
3410
  verbose=False,
1610
3411
  debug=False,
1611
3412
  ):
3413
+ """
3414
+ Compute a vessel map using Wright's method by performing phase correlation
3415
+ analysis across randomized subsets of timecourses.
3416
+
3417
+ This function implements Wright's method for estimating vessel maps by
3418
+ splitting the timecourse data into two random halves, projecting each half
3419
+ separately, and computing the Pearson correlation between the resulting
3420
+ projections for each voxel and slice. The final map is derived as the mean
3421
+ of these correlations across iterations.
3422
+
3423
+ Parameters
3424
+ ----------
3425
+ input_data : object
3426
+ Input data container with attributes `xsize`, `ysize`, and `numslices`.
3427
+ demeandata_byslice : array_like
3428
+ Demeaned data organized by slice, shape ``(nvoxels, numslices)``.
3429
+ rawapp_byslice : array_like
3430
+ Raw application data by slice, shape ``(nvoxels, numslices)``.
3431
+ projmask_byslice : array_like
3432
+ Projection mask by slice, shape ``(nvoxels, numslices)``.
3433
+ outphases : array_like
3434
+ Output phases, shape ``(nphases,)``.
3435
+ cardphasevals : array_like
3436
+ Cardinal phase values, shape ``(nphases,)``.
3437
+ proctrs : array_like
3438
+ Timecourse indices to be processed, shape ``(ntimepoints,)``.
3439
+ congridbins : array_like
3440
+ Binning information for congrid interpolation.
3441
+ gridkernel : array_like
3442
+ Kernel for grid interpolation.
3443
+ destpoints : array_like
3444
+ Destination points for projection.
3445
+ iterations : int, optional
3446
+ Number of iterations for random splitting (default is 100).
3447
+ nprocs : int, optional
3448
+ Number of processes to use for parallel computation; -1 uses all
3449
+ available cores (default is -1).
3450
+ verbose : bool, optional
3451
+ If True, print progress messages (default is False).
3452
+ debug : bool, optional
3453
+ If True, print additional debug information (default is False).
3454
+
3455
+ Returns
3456
+ -------
3457
+ wrightcorrs : ndarray
3458
+ Computed vessel map with shape ``(xsize, ysize, numslices)``.
3459
+
3460
+ Notes
3461
+ -----
3462
+ This function performs a bootstrap-like procedure where the input timecourse
3463
+ is randomly split into two halves, and phase projections are computed for
3464
+ each half. Pearson correlation is computed between the two projections for
3465
+ each voxel and slice. The result is averaged over all iterations to produce
3466
+ the final vessel map.
3467
+
3468
+ Examples
3469
+ --------
3470
+ >>> wrightcorrs = wrightmap(
3471
+ ... input_data,
3472
+ ... demeandata_byslice,
3473
+ ... rawapp_byslice,
3474
+ ... projmask_byslice,
3475
+ ... outphases,
3476
+ ... cardphasevals,
3477
+ ... proctrs,
3478
+ ... congridbins,
3479
+ ... gridkernel,
3480
+ ... destpoints,
3481
+ ... iterations=50,
3482
+ ... verbose=True
3483
+ ... )
3484
+ """
1612
3485
  xsize = input_data.xsize
1613
3486
  ysize = input_data.ysize
1614
3487
  numslices = input_data.numslices