rapidtide 2.9.5__py3-none-any.whl → 3.1.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (405) hide show
  1. cloud/gmscalc-HCPYA +1 -1
  2. cloud/mount-and-run +2 -0
  3. cloud/rapidtide-HCPYA +3 -3
  4. rapidtide/Colortables.py +538 -38
  5. rapidtide/OrthoImageItem.py +1094 -51
  6. rapidtide/RapidtideDataset.py +1709 -114
  7. rapidtide/__init__.py +0 -8
  8. rapidtide/_version.py +4 -4
  9. rapidtide/calccoherence.py +242 -97
  10. rapidtide/calcnullsimfunc.py +240 -140
  11. rapidtide/calcsimfunc.py +314 -129
  12. rapidtide/correlate.py +1211 -389
  13. rapidtide/data/examples/src/testLD +56 -0
  14. rapidtide/data/examples/src/test_findmaxlag.py +2 -2
  15. rapidtide/data/examples/src/test_mlregressallt.py +32 -17
  16. rapidtide/data/examples/src/testalign +1 -1
  17. rapidtide/data/examples/src/testatlasaverage +35 -7
  18. rapidtide/data/examples/src/testboth +21 -0
  19. rapidtide/data/examples/src/testcifti +11 -0
  20. rapidtide/data/examples/src/testdelayvar +13 -0
  21. rapidtide/data/examples/src/testdlfilt +25 -0
  22. rapidtide/data/examples/src/testfft +35 -0
  23. rapidtide/data/examples/src/testfileorfloat +37 -0
  24. rapidtide/data/examples/src/testfmri +94 -27
  25. rapidtide/data/examples/src/testfuncs +3 -3
  26. rapidtide/data/examples/src/testglmfilt +8 -6
  27. rapidtide/data/examples/src/testhappy +84 -51
  28. rapidtide/data/examples/src/testinitdelay +19 -0
  29. rapidtide/data/examples/src/testmodels +33 -0
  30. rapidtide/data/examples/src/testnewrefine +26 -0
  31. rapidtide/data/examples/src/testnoiseamp +21 -0
  32. rapidtide/data/examples/src/testppgproc +17 -0
  33. rapidtide/data/examples/src/testrefineonly +22 -0
  34. rapidtide/data/examples/src/testretro +26 -13
  35. rapidtide/data/examples/src/testretrolagtcs +16 -0
  36. rapidtide/data/examples/src/testrolloff +11 -0
  37. rapidtide/data/examples/src/testsimdata +45 -28
  38. rapidtide/data/models/model_cnn_pytorch/loss.png +0 -0
  39. rapidtide/data/models/model_cnn_pytorch/loss.txt +1 -0
  40. rapidtide/data/models/model_cnn_pytorch/model.pth +0 -0
  41. rapidtide/data/models/model_cnn_pytorch/model_meta.json +68 -0
  42. rapidtide/data/models/model_cnn_pytorch_fulldata/loss.png +0 -0
  43. rapidtide/data/models/model_cnn_pytorch_fulldata/loss.txt +1 -0
  44. rapidtide/data/models/model_cnn_pytorch_fulldata/model.pth +0 -0
  45. rapidtide/data/models/model_cnn_pytorch_fulldata/model_meta.json +80 -0
  46. rapidtide/data/models/model_cnnbp_pytorch_fullldata/loss.png +0 -0
  47. rapidtide/data/models/model_cnnbp_pytorch_fullldata/loss.txt +1 -0
  48. rapidtide/data/models/model_cnnbp_pytorch_fullldata/model.pth +0 -0
  49. rapidtide/data/models/model_cnnbp_pytorch_fullldata/model_meta.json +138 -0
  50. rapidtide/data/models/model_cnnfft_pytorch_fulldata/loss.png +0 -0
  51. rapidtide/data/models/model_cnnfft_pytorch_fulldata/loss.txt +1 -0
  52. rapidtide/data/models/model_cnnfft_pytorch_fulldata/model.pth +0 -0
  53. rapidtide/data/models/model_cnnfft_pytorch_fulldata/model_meta.json +128 -0
  54. rapidtide/data/models/model_ppgattention_pytorch_w128_fulldata/loss.png +0 -0
  55. rapidtide/data/models/model_ppgattention_pytorch_w128_fulldata/loss.txt +1 -0
  56. rapidtide/data/models/model_ppgattention_pytorch_w128_fulldata/model.pth +0 -0
  57. rapidtide/data/models/model_ppgattention_pytorch_w128_fulldata/model_meta.json +49 -0
  58. rapidtide/data/models/model_revised_tf2/model.keras +0 -0
  59. rapidtide/data/models/{model_serdar → model_revised_tf2}/model_meta.json +1 -1
  60. rapidtide/data/models/model_serdar2_tf2/model.keras +0 -0
  61. rapidtide/data/models/{model_serdar2 → model_serdar2_tf2}/model_meta.json +1 -1
  62. rapidtide/data/models/model_serdar_tf2/model.keras +0 -0
  63. rapidtide/data/models/{model_revised → model_serdar_tf2}/model_meta.json +1 -1
  64. rapidtide/data/reference/HCP1200v2_MTT_2mm.nii.gz +0 -0
  65. rapidtide/data/reference/HCP1200v2_binmask_2mm.nii.gz +0 -0
  66. rapidtide/data/reference/HCP1200v2_csf_2mm.nii.gz +0 -0
  67. rapidtide/data/reference/HCP1200v2_gray_2mm.nii.gz +0 -0
  68. rapidtide/data/reference/HCP1200v2_graylaghist.json +7 -0
  69. rapidtide/data/reference/HCP1200v2_graylaghist.tsv.gz +0 -0
  70. rapidtide/data/reference/HCP1200v2_laghist.json +7 -0
  71. rapidtide/data/reference/HCP1200v2_laghist.tsv.gz +0 -0
  72. rapidtide/data/reference/HCP1200v2_mask_2mm.nii.gz +0 -0
  73. rapidtide/data/reference/HCP1200v2_maxcorr_2mm.nii.gz +0 -0
  74. rapidtide/data/reference/HCP1200v2_maxtime_2mm.nii.gz +0 -0
  75. rapidtide/data/reference/HCP1200v2_maxwidth_2mm.nii.gz +0 -0
  76. rapidtide/data/reference/HCP1200v2_negmask_2mm.nii.gz +0 -0
  77. rapidtide/data/reference/HCP1200v2_timepercentile_2mm.nii.gz +0 -0
  78. rapidtide/data/reference/HCP1200v2_white_2mm.nii.gz +0 -0
  79. rapidtide/data/reference/HCP1200v2_whitelaghist.json +7 -0
  80. rapidtide/data/reference/HCP1200v2_whitelaghist.tsv.gz +0 -0
  81. rapidtide/data/reference/JHU-ArterialTerritoriesNoVent-LVL1-seg2.xml +131 -0
  82. rapidtide/data/reference/JHU-ArterialTerritoriesNoVent-LVL1-seg2_regions.txt +60 -0
  83. rapidtide/data/reference/JHU-ArterialTerritoriesNoVent-LVL1-seg2_space-MNI152NLin6Asym_2mm.nii.gz +0 -0
  84. rapidtide/data/reference/JHU-ArterialTerritoriesNoVent-LVL1_space-MNI152NLin2009cAsym_2mm.nii.gz +0 -0
  85. rapidtide/data/reference/JHU-ArterialTerritoriesNoVent-LVL1_space-MNI152NLin2009cAsym_2mm_mask.nii.gz +0 -0
  86. rapidtide/data/reference/JHU-ArterialTerritoriesNoVent-LVL1_space-MNI152NLin6Asym_2mm_mask.nii.gz +0 -0
  87. rapidtide/data/reference/JHU-ArterialTerritoriesNoVent-LVL2_space-MNI152NLin6Asym_2mm_mask.nii.gz +0 -0
  88. rapidtide/data/reference/MNI152_T1_1mm_Brain_FAST_seg.nii.gz +0 -0
  89. rapidtide/data/reference/MNI152_T1_1mm_Brain_Mask.nii.gz +0 -0
  90. rapidtide/data/reference/MNI152_T1_2mm_Brain_FAST_seg.nii.gz +0 -0
  91. rapidtide/data/reference/MNI152_T1_2mm_Brain_Mask.nii.gz +0 -0
  92. rapidtide/decorators.py +91 -0
  93. rapidtide/dlfilter.py +2553 -414
  94. rapidtide/dlfiltertorch.py +5201 -0
  95. rapidtide/externaltools.py +328 -13
  96. rapidtide/fMRIData_class.py +178 -0
  97. rapidtide/ffttools.py +168 -0
  98. rapidtide/filter.py +2704 -1462
  99. rapidtide/fit.py +2361 -579
  100. rapidtide/genericmultiproc.py +197 -0
  101. rapidtide/happy_supportfuncs.py +3255 -548
  102. rapidtide/helper_classes.py +590 -1181
  103. rapidtide/io.py +2569 -468
  104. rapidtide/linfitfiltpass.py +784 -0
  105. rapidtide/makelaggedtcs.py +267 -97
  106. rapidtide/maskutil.py +555 -25
  107. rapidtide/miscmath.py +867 -137
  108. rapidtide/multiproc.py +217 -44
  109. rapidtide/patchmatch.py +752 -0
  110. rapidtide/peakeval.py +32 -32
  111. rapidtide/ppgproc.py +2205 -0
  112. rapidtide/qualitycheck.py +353 -40
  113. rapidtide/refinedelay.py +854 -0
  114. rapidtide/refineregressor.py +939 -0
  115. rapidtide/resample.py +725 -204
  116. rapidtide/scripts/__init__.py +1 -0
  117. rapidtide/scripts/{adjustoffset → adjustoffset.py} +7 -2
  118. rapidtide/scripts/{aligntcs → aligntcs.py} +7 -2
  119. rapidtide/scripts/{applydlfilter → applydlfilter.py} +7 -2
  120. rapidtide/scripts/applyppgproc.py +28 -0
  121. rapidtide/scripts/{atlasaverage → atlasaverage.py} +7 -2
  122. rapidtide/scripts/{atlastool → atlastool.py} +7 -2
  123. rapidtide/scripts/{calcicc → calcicc.py} +7 -2
  124. rapidtide/scripts/{calctexticc → calctexticc.py} +7 -2
  125. rapidtide/scripts/{calcttest → calcttest.py} +7 -2
  126. rapidtide/scripts/{ccorrica → ccorrica.py} +7 -2
  127. rapidtide/scripts/delayvar.py +28 -0
  128. rapidtide/scripts/{diffrois → diffrois.py} +7 -2
  129. rapidtide/scripts/{endtidalproc → endtidalproc.py} +7 -2
  130. rapidtide/scripts/{fdica → fdica.py} +7 -2
  131. rapidtide/scripts/{filtnifti → filtnifti.py} +7 -2
  132. rapidtide/scripts/{filttc → filttc.py} +7 -2
  133. rapidtide/scripts/{fingerprint → fingerprint.py} +20 -16
  134. rapidtide/scripts/{fixtr → fixtr.py} +7 -2
  135. rapidtide/scripts/{gmscalc → gmscalc.py} +7 -2
  136. rapidtide/scripts/{happy → happy.py} +7 -2
  137. rapidtide/scripts/{happy2std → happy2std.py} +7 -2
  138. rapidtide/scripts/{happywarp → happywarp.py} +8 -4
  139. rapidtide/scripts/{histnifti → histnifti.py} +7 -2
  140. rapidtide/scripts/{histtc → histtc.py} +7 -2
  141. rapidtide/scripts/{glmfilt → linfitfilt.py} +7 -4
  142. rapidtide/scripts/{localflow → localflow.py} +7 -2
  143. rapidtide/scripts/{mergequality → mergequality.py} +7 -2
  144. rapidtide/scripts/{pairproc → pairproc.py} +7 -2
  145. rapidtide/scripts/{pairwisemergenifti → pairwisemergenifti.py} +7 -2
  146. rapidtide/scripts/{physiofreq → physiofreq.py} +7 -2
  147. rapidtide/scripts/{pixelcomp → pixelcomp.py} +7 -2
  148. rapidtide/scripts/{plethquality → plethquality.py} +7 -2
  149. rapidtide/scripts/{polyfitim → polyfitim.py} +7 -2
  150. rapidtide/scripts/{proj2flow → proj2flow.py} +7 -2
  151. rapidtide/scripts/{rankimage → rankimage.py} +7 -2
  152. rapidtide/scripts/{rapidtide → rapidtide.py} +7 -2
  153. rapidtide/scripts/{rapidtide2std → rapidtide2std.py} +7 -2
  154. rapidtide/scripts/{resamplenifti → resamplenifti.py} +7 -2
  155. rapidtide/scripts/{resampletc → resampletc.py} +7 -2
  156. rapidtide/scripts/retrolagtcs.py +28 -0
  157. rapidtide/scripts/retroregress.py +28 -0
  158. rapidtide/scripts/{roisummarize → roisummarize.py} +7 -2
  159. rapidtide/scripts/{runqualitycheck → runqualitycheck.py} +7 -2
  160. rapidtide/scripts/{showarbcorr → showarbcorr.py} +7 -2
  161. rapidtide/scripts/{showhist → showhist.py} +7 -2
  162. rapidtide/scripts/{showstxcorr → showstxcorr.py} +7 -2
  163. rapidtide/scripts/{showtc → showtc.py} +7 -2
  164. rapidtide/scripts/{showxcorr_legacy → showxcorr_legacy.py} +8 -8
  165. rapidtide/scripts/{showxcorrx → showxcorrx.py} +7 -2
  166. rapidtide/scripts/{showxy → showxy.py} +7 -2
  167. rapidtide/scripts/{simdata → simdata.py} +7 -2
  168. rapidtide/scripts/{spatialdecomp → spatialdecomp.py} +7 -2
  169. rapidtide/scripts/{spatialfit → spatialfit.py} +7 -2
  170. rapidtide/scripts/{spatialmi → spatialmi.py} +7 -2
  171. rapidtide/scripts/{spectrogram → spectrogram.py} +7 -2
  172. rapidtide/scripts/stupidramtricks.py +238 -0
  173. rapidtide/scripts/{synthASL → synthASL.py} +7 -2
  174. rapidtide/scripts/{tcfrom2col → tcfrom2col.py} +7 -2
  175. rapidtide/scripts/{tcfrom3col → tcfrom3col.py} +7 -2
  176. rapidtide/scripts/{temporaldecomp → temporaldecomp.py} +7 -2
  177. rapidtide/scripts/{testhrv → testhrv.py} +1 -1
  178. rapidtide/scripts/{threeD → threeD.py} +7 -2
  179. rapidtide/scripts/{tidepool → tidepool.py} +7 -2
  180. rapidtide/scripts/{variabilityizer → variabilityizer.py} +7 -2
  181. rapidtide/simFuncClasses.py +2113 -0
  182. rapidtide/simfuncfit.py +312 -108
  183. rapidtide/stats.py +579 -247
  184. rapidtide/tests/.coveragerc +27 -6
  185. rapidtide-2.9.5.data/scripts/fdica → rapidtide/tests/cleanposttest +4 -6
  186. rapidtide/tests/happycomp +9 -0
  187. rapidtide/tests/resethappytargets +1 -1
  188. rapidtide/tests/resetrapidtidetargets +1 -1
  189. rapidtide/tests/resettargets +1 -1
  190. rapidtide/tests/runlocaltest +3 -3
  191. rapidtide/tests/showkernels +1 -1
  192. rapidtide/tests/test_aliasedcorrelate.py +4 -4
  193. rapidtide/tests/test_aligntcs.py +1 -1
  194. rapidtide/tests/test_calcicc.py +1 -1
  195. rapidtide/tests/test_cleanregressor.py +184 -0
  196. rapidtide/tests/test_congrid.py +70 -81
  197. rapidtide/tests/test_correlate.py +1 -1
  198. rapidtide/tests/test_corrpass.py +4 -4
  199. rapidtide/tests/test_delayestimation.py +54 -59
  200. rapidtide/tests/test_dlfiltertorch.py +437 -0
  201. rapidtide/tests/test_doresample.py +2 -2
  202. rapidtide/tests/test_externaltools.py +69 -0
  203. rapidtide/tests/test_fastresampler.py +9 -5
  204. rapidtide/tests/test_filter.py +96 -57
  205. rapidtide/tests/test_findmaxlag.py +50 -19
  206. rapidtide/tests/test_fullrunhappy_v1.py +15 -10
  207. rapidtide/tests/test_fullrunhappy_v2.py +19 -13
  208. rapidtide/tests/test_fullrunhappy_v3.py +28 -13
  209. rapidtide/tests/test_fullrunhappy_v4.py +30 -11
  210. rapidtide/tests/test_fullrunhappy_v5.py +62 -0
  211. rapidtide/tests/test_fullrunrapidtide_v1.py +61 -7
  212. rapidtide/tests/test_fullrunrapidtide_v2.py +27 -15
  213. rapidtide/tests/test_fullrunrapidtide_v3.py +28 -8
  214. rapidtide/tests/test_fullrunrapidtide_v4.py +16 -8
  215. rapidtide/tests/test_fullrunrapidtide_v5.py +15 -6
  216. rapidtide/tests/test_fullrunrapidtide_v6.py +142 -0
  217. rapidtide/tests/test_fullrunrapidtide_v7.py +114 -0
  218. rapidtide/tests/test_fullrunrapidtide_v8.py +66 -0
  219. rapidtide/tests/test_getparsers.py +158 -0
  220. rapidtide/tests/test_io.py +59 -18
  221. rapidtide/tests/{test_glmpass.py → test_linfitfiltpass.py} +10 -10
  222. rapidtide/tests/test_mi.py +1 -1
  223. rapidtide/tests/test_miscmath.py +1 -1
  224. rapidtide/tests/test_motionregress.py +5 -5
  225. rapidtide/tests/test_nullcorr.py +6 -9
  226. rapidtide/tests/test_padvec.py +216 -0
  227. rapidtide/tests/test_parserfuncs.py +101 -0
  228. rapidtide/tests/test_phaseanalysis.py +1 -1
  229. rapidtide/tests/test_rapidtideparser.py +59 -53
  230. rapidtide/tests/test_refinedelay.py +296 -0
  231. rapidtide/tests/test_runmisc.py +5 -5
  232. rapidtide/tests/test_sharedmem.py +60 -0
  233. rapidtide/tests/test_simroundtrip.py +132 -0
  234. rapidtide/tests/test_simulate.py +1 -1
  235. rapidtide/tests/test_stcorrelate.py +4 -2
  236. rapidtide/tests/test_timeshift.py +2 -2
  237. rapidtide/tests/test_valtoindex.py +1 -1
  238. rapidtide/tests/test_zRapidtideDataset.py +5 -3
  239. rapidtide/tests/utils.py +10 -9
  240. rapidtide/tidepoolTemplate.py +88 -70
  241. rapidtide/tidepoolTemplate.ui +60 -46
  242. rapidtide/tidepoolTemplate_alt.py +88 -53
  243. rapidtide/tidepoolTemplate_alt.ui +62 -52
  244. rapidtide/tidepoolTemplate_alt_qt6.py +921 -0
  245. rapidtide/tidepoolTemplate_big.py +1125 -0
  246. rapidtide/tidepoolTemplate_big.ui +2386 -0
  247. rapidtide/tidepoolTemplate_big_qt6.py +1129 -0
  248. rapidtide/tidepoolTemplate_qt6.py +793 -0
  249. rapidtide/util.py +1389 -148
  250. rapidtide/voxelData.py +1048 -0
  251. rapidtide/wiener.py +138 -25
  252. rapidtide/wiener2.py +114 -8
  253. rapidtide/workflows/adjustoffset.py +107 -5
  254. rapidtide/workflows/aligntcs.py +86 -3
  255. rapidtide/workflows/applydlfilter.py +231 -89
  256. rapidtide/workflows/applyppgproc.py +540 -0
  257. rapidtide/workflows/atlasaverage.py +309 -48
  258. rapidtide/workflows/atlastool.py +130 -9
  259. rapidtide/workflows/calcSimFuncMap.py +490 -0
  260. rapidtide/workflows/calctexticc.py +202 -10
  261. rapidtide/workflows/ccorrica.py +123 -15
  262. rapidtide/workflows/cleanregressor.py +415 -0
  263. rapidtide/workflows/delayvar.py +1268 -0
  264. rapidtide/workflows/diffrois.py +84 -6
  265. rapidtide/workflows/endtidalproc.py +149 -9
  266. rapidtide/workflows/fdica.py +197 -17
  267. rapidtide/workflows/filtnifti.py +71 -4
  268. rapidtide/workflows/filttc.py +76 -5
  269. rapidtide/workflows/fitSimFuncMap.py +578 -0
  270. rapidtide/workflows/fixtr.py +74 -4
  271. rapidtide/workflows/gmscalc.py +116 -6
  272. rapidtide/workflows/happy.py +1242 -480
  273. rapidtide/workflows/happy2std.py +145 -13
  274. rapidtide/workflows/happy_parser.py +277 -59
  275. rapidtide/workflows/histnifti.py +120 -4
  276. rapidtide/workflows/histtc.py +85 -4
  277. rapidtide/workflows/{glmfilt.py → linfitfilt.py} +128 -14
  278. rapidtide/workflows/localflow.py +329 -29
  279. rapidtide/workflows/mergequality.py +80 -4
  280. rapidtide/workflows/niftidecomp.py +323 -19
  281. rapidtide/workflows/niftistats.py +178 -8
  282. rapidtide/workflows/pairproc.py +99 -5
  283. rapidtide/workflows/pairwisemergenifti.py +86 -3
  284. rapidtide/workflows/parser_funcs.py +1488 -56
  285. rapidtide/workflows/physiofreq.py +139 -12
  286. rapidtide/workflows/pixelcomp.py +211 -9
  287. rapidtide/workflows/plethquality.py +105 -23
  288. rapidtide/workflows/polyfitim.py +159 -19
  289. rapidtide/workflows/proj2flow.py +76 -3
  290. rapidtide/workflows/rankimage.py +115 -8
  291. rapidtide/workflows/rapidtide.py +1833 -1919
  292. rapidtide/workflows/rapidtide2std.py +101 -3
  293. rapidtide/workflows/rapidtide_parser.py +607 -372
  294. rapidtide/workflows/refineDelayMap.py +249 -0
  295. rapidtide/workflows/refineRegressor.py +1215 -0
  296. rapidtide/workflows/regressfrommaps.py +308 -0
  297. rapidtide/workflows/resamplenifti.py +86 -4
  298. rapidtide/workflows/resampletc.py +92 -4
  299. rapidtide/workflows/retrolagtcs.py +442 -0
  300. rapidtide/workflows/retroregress.py +1501 -0
  301. rapidtide/workflows/roisummarize.py +176 -7
  302. rapidtide/workflows/runqualitycheck.py +72 -7
  303. rapidtide/workflows/showarbcorr.py +172 -16
  304. rapidtide/workflows/showhist.py +87 -3
  305. rapidtide/workflows/showstxcorr.py +161 -4
  306. rapidtide/workflows/showtc.py +172 -10
  307. rapidtide/workflows/showxcorrx.py +250 -62
  308. rapidtide/workflows/showxy.py +186 -16
  309. rapidtide/workflows/simdata.py +418 -112
  310. rapidtide/workflows/spatialfit.py +83 -8
  311. rapidtide/workflows/spatialmi.py +252 -29
  312. rapidtide/workflows/spectrogram.py +306 -33
  313. rapidtide/workflows/synthASL.py +157 -6
  314. rapidtide/workflows/tcfrom2col.py +77 -3
  315. rapidtide/workflows/tcfrom3col.py +75 -3
  316. rapidtide/workflows/tidepool.py +3829 -666
  317. rapidtide/workflows/utils.py +45 -19
  318. rapidtide/workflows/utils_doc.py +293 -0
  319. rapidtide/workflows/variabilityizer.py +118 -5
  320. {rapidtide-2.9.5.dist-info → rapidtide-3.1.3.dist-info}/METADATA +30 -223
  321. rapidtide-3.1.3.dist-info/RECORD +393 -0
  322. {rapidtide-2.9.5.dist-info → rapidtide-3.1.3.dist-info}/WHEEL +1 -1
  323. rapidtide-3.1.3.dist-info/entry_points.txt +65 -0
  324. rapidtide-3.1.3.dist-info/top_level.txt +2 -0
  325. rapidtide/calcandfitcorrpairs.py +0 -262
  326. rapidtide/data/examples/src/testoutputsize +0 -45
  327. rapidtide/data/models/model_revised/model.h5 +0 -0
  328. rapidtide/data/models/model_serdar/model.h5 +0 -0
  329. rapidtide/data/models/model_serdar2/model.h5 +0 -0
  330. rapidtide/data/reference/ASPECTS_nlin_asym_09c_2mm.nii.gz +0 -0
  331. rapidtide/data/reference/ASPECTS_nlin_asym_09c_2mm_mask.nii.gz +0 -0
  332. rapidtide/data/reference/ATTbasedFlowTerritories_split_nlin_asym_09c_2mm.nii.gz +0 -0
  333. rapidtide/data/reference/ATTbasedFlowTerritories_split_nlin_asym_09c_2mm_mask.nii.gz +0 -0
  334. rapidtide/data/reference/HCP1200_binmask_2mm_2009c_asym.nii.gz +0 -0
  335. rapidtide/data/reference/HCP1200_lag_2mm_2009c_asym.nii.gz +0 -0
  336. rapidtide/data/reference/HCP1200_mask_2mm_2009c_asym.nii.gz +0 -0
  337. rapidtide/data/reference/HCP1200_negmask_2mm_2009c_asym.nii.gz +0 -0
  338. rapidtide/data/reference/HCP1200_sigma_2mm_2009c_asym.nii.gz +0 -0
  339. rapidtide/data/reference/HCP1200_strength_2mm_2009c_asym.nii.gz +0 -0
  340. rapidtide/glmpass.py +0 -434
  341. rapidtide/refine_factored.py +0 -641
  342. rapidtide/scripts/retroglm +0 -23
  343. rapidtide/workflows/glmfrommaps.py +0 -202
  344. rapidtide/workflows/retroglm.py +0 -643
  345. rapidtide-2.9.5.data/scripts/adjustoffset +0 -23
  346. rapidtide-2.9.5.data/scripts/aligntcs +0 -23
  347. rapidtide-2.9.5.data/scripts/applydlfilter +0 -23
  348. rapidtide-2.9.5.data/scripts/atlasaverage +0 -23
  349. rapidtide-2.9.5.data/scripts/atlastool +0 -23
  350. rapidtide-2.9.5.data/scripts/calcicc +0 -22
  351. rapidtide-2.9.5.data/scripts/calctexticc +0 -23
  352. rapidtide-2.9.5.data/scripts/calcttest +0 -22
  353. rapidtide-2.9.5.data/scripts/ccorrica +0 -23
  354. rapidtide-2.9.5.data/scripts/diffrois +0 -23
  355. rapidtide-2.9.5.data/scripts/endtidalproc +0 -23
  356. rapidtide-2.9.5.data/scripts/filtnifti +0 -23
  357. rapidtide-2.9.5.data/scripts/filttc +0 -23
  358. rapidtide-2.9.5.data/scripts/fingerprint +0 -593
  359. rapidtide-2.9.5.data/scripts/fixtr +0 -23
  360. rapidtide-2.9.5.data/scripts/glmfilt +0 -24
  361. rapidtide-2.9.5.data/scripts/gmscalc +0 -22
  362. rapidtide-2.9.5.data/scripts/happy +0 -25
  363. rapidtide-2.9.5.data/scripts/happy2std +0 -23
  364. rapidtide-2.9.5.data/scripts/happywarp +0 -350
  365. rapidtide-2.9.5.data/scripts/histnifti +0 -23
  366. rapidtide-2.9.5.data/scripts/histtc +0 -23
  367. rapidtide-2.9.5.data/scripts/localflow +0 -23
  368. rapidtide-2.9.5.data/scripts/mergequality +0 -23
  369. rapidtide-2.9.5.data/scripts/pairproc +0 -23
  370. rapidtide-2.9.5.data/scripts/pairwisemergenifti +0 -23
  371. rapidtide-2.9.5.data/scripts/physiofreq +0 -23
  372. rapidtide-2.9.5.data/scripts/pixelcomp +0 -23
  373. rapidtide-2.9.5.data/scripts/plethquality +0 -23
  374. rapidtide-2.9.5.data/scripts/polyfitim +0 -23
  375. rapidtide-2.9.5.data/scripts/proj2flow +0 -23
  376. rapidtide-2.9.5.data/scripts/rankimage +0 -23
  377. rapidtide-2.9.5.data/scripts/rapidtide +0 -23
  378. rapidtide-2.9.5.data/scripts/rapidtide2std +0 -23
  379. rapidtide-2.9.5.data/scripts/resamplenifti +0 -23
  380. rapidtide-2.9.5.data/scripts/resampletc +0 -23
  381. rapidtide-2.9.5.data/scripts/retroglm +0 -23
  382. rapidtide-2.9.5.data/scripts/roisummarize +0 -23
  383. rapidtide-2.9.5.data/scripts/runqualitycheck +0 -23
  384. rapidtide-2.9.5.data/scripts/showarbcorr +0 -23
  385. rapidtide-2.9.5.data/scripts/showhist +0 -23
  386. rapidtide-2.9.5.data/scripts/showstxcorr +0 -23
  387. rapidtide-2.9.5.data/scripts/showtc +0 -23
  388. rapidtide-2.9.5.data/scripts/showxcorr_legacy +0 -536
  389. rapidtide-2.9.5.data/scripts/showxcorrx +0 -23
  390. rapidtide-2.9.5.data/scripts/showxy +0 -23
  391. rapidtide-2.9.5.data/scripts/simdata +0 -23
  392. rapidtide-2.9.5.data/scripts/spatialdecomp +0 -23
  393. rapidtide-2.9.5.data/scripts/spatialfit +0 -23
  394. rapidtide-2.9.5.data/scripts/spatialmi +0 -23
  395. rapidtide-2.9.5.data/scripts/spectrogram +0 -23
  396. rapidtide-2.9.5.data/scripts/synthASL +0 -23
  397. rapidtide-2.9.5.data/scripts/tcfrom2col +0 -23
  398. rapidtide-2.9.5.data/scripts/tcfrom3col +0 -23
  399. rapidtide-2.9.5.data/scripts/temporaldecomp +0 -23
  400. rapidtide-2.9.5.data/scripts/threeD +0 -236
  401. rapidtide-2.9.5.data/scripts/tidepool +0 -23
  402. rapidtide-2.9.5.data/scripts/variabilityizer +0 -23
  403. rapidtide-2.9.5.dist-info/RECORD +0 -357
  404. rapidtide-2.9.5.dist-info/top_level.txt +0 -86
  405. {rapidtide-2.9.5.dist-info → rapidtide-3.1.3.dist-info/licenses}/LICENSE +0 -0
@@ -1,7 +1,7 @@
1
1
  #!/usr/bin/env python
2
2
  # -*- coding: utf-8 -*-
3
3
  #
4
- # Copyright 2018-2024 Blaise Frederick
4
+ # Copyright 2018-2025 Blaise Frederick
5
5
  #
6
6
  # Licensed under the Apache License, Version 2.0 (the "License");
7
7
  # you may not use this file except in compliance with the License.
@@ -19,16 +19,19 @@
19
19
  import copy
20
20
  import time
21
21
  import warnings
22
+ from dataclasses import dataclass
22
23
 
23
24
  import numpy as np
25
+ from numpy.typing import NDArray
24
26
  from scipy.signal import savgol_filter, welch
25
- from scipy.stats import kurtosis, skew
27
+ from scipy.stats import kurtosis, pearsonr, skew
26
28
  from statsmodels.robust import mad
27
29
  from tqdm import tqdm
28
30
 
29
31
  import rapidtide.correlate as tide_corr
30
32
  import rapidtide.filter as tide_filt
31
33
  import rapidtide.fit as tide_fit
34
+ import rapidtide.genericmultiproc as tide_genericmultiproc
32
35
  import rapidtide.io as tide_io
33
36
  import rapidtide.miscmath as tide_math
34
37
  import rapidtide.resample as tide_resample
@@ -44,21 +47,106 @@ try:
44
47
  except ImportError:
45
48
  mklexists = False
46
49
 
47
- try:
48
- import rapidtide.dlfilter as tide_dlfilt
49
50
 
50
- dlfilterexists = True
51
- print("dlfilter exists")
52
- except ImportError:
53
- dlfilterexists = False
54
- print("dlfilter does not exist")
51
+ def rrifromphase(timeaxis: NDArray, thephase: NDArray) -> None:
52
+ """
53
+ Convert phase to range rate.
54
+
55
+ This function converts phase measurements to range rate values using the
56
+ provided time axis and phase data.
55
57
 
58
+ Parameters
59
+ ----------
60
+ timeaxis : NDArray
61
+ Time axis values corresponding to the phase measurements.
62
+ thephase : NDArray
63
+ Phase measurements to be converted to range rate.
56
64
 
57
- def rrifromphase(timeaxis, thephase):
65
+ Returns
66
+ -------
67
+ None
68
+ This function does not return any value.
69
+
70
+ Notes
71
+ -----
72
+ The function performs conversion from phase to range rate but does not
73
+ return the result. The actual implementation details are not provided
74
+ in the function signature.
75
+
76
+ Examples
77
+ --------
78
+ >>> import numpy as np
79
+ >>> time = np.array([0, 1, 2, 3])
80
+ >>> phase = np.array([0.1, 0.2, 0.3, 0.4])
81
+ >>> rrifromphase(time, phase)
82
+ """
58
83
  return None
59
84
 
60
85
 
61
- def calc_3d_optical_flow(video, projmask, flowhdr, outputroot, window_size=3, debug=False):
86
+ def calc_3d_optical_flow(
87
+ video: NDArray,
88
+ projmask: NDArray,
89
+ flowhdr: dict,
90
+ outputroot: str,
91
+ window_size: int = 3,
92
+ debug: bool = False,
93
+ ) -> tuple[NDArray, NDArray]:
94
+ """
95
+ Compute 3D optical flow for a video volume using the Lucas-Kanade method.
96
+
97
+ This function calculates optical flow in three dimensions (x, y, z) across
98
+ a sequence of video frames. It uses a Lucas-Kanade approach to estimate
99
+ motion vectors at each voxel, considering a local window around each pixel.
100
+ The results are saved as NIfTI files for each frame.
101
+
102
+ Parameters
103
+ ----------
104
+ video : NDArray
105
+ 4D array of shape (xsize, ysize, zsize, num_frames) representing the
106
+ input video data.
107
+ projmask : NDArray
108
+ 3D boolean or integer mask of shape (xsize, ysize, zsize) indicating
109
+ which voxels to process for optical flow computation.
110
+ flowhdr : dict
111
+ Header dictionary for NIfTI output files, containing metadata for
112
+ the optical flow results.
113
+ outputroot : str
114
+ Root name for output NIfTI files. Files will be saved with suffixes
115
+ `_desc-flow_phase-XX_map` and `_desc-flowmag_phase-XX_map`.
116
+ window_size : int, optional
117
+ Size of the local window used for gradient computation. Default is 3.
118
+ debug : bool, optional
119
+ If True, print debug information during computation. Default is False.
120
+
121
+ Returns
122
+ -------
123
+ tuple[NDArray, NDArray]
124
+ A tuple containing:
125
+ - `flow_vectors`: 5D array of shape (xsize, ysize, zsize, num_frames, 3)
126
+ representing the computed optical flow vectors for each frame.
127
+ - `None`: Placeholder return value; function currently returns only
128
+ `flow_vectors` and saves outputs to disk.
129
+
130
+ Notes
131
+ -----
132
+ - The optical flow is computed using a Lucas-Kanade method with spatial
133
+ gradients in x, y, and z directions.
134
+ - Temporal gradient is computed as the difference between consecutive frames.
135
+ - Output files are saved using `tide_io.savetonifti`.
136
+ - The function wraps around frames when reaching the end (i.e., next frame
137
+ for the last frame is the first frame).
138
+
139
+ Examples
140
+ --------
141
+ >>> import numpy as np
142
+ >>> video = np.random.rand(64, 64, 32, 10)
143
+ >>> mask = np.ones((64, 64, 32), dtype=bool)
144
+ >>> header = {}
145
+ >>> output_root = "flow_result"
146
+ >>> flow_vectors = calc_3d_optical_flow(video, mask, header, output_root)
147
+ >>> print(flow_vectors.shape)
148
+ (64, 64, 32, 10, 3)
149
+ """
62
150
  # window Define the window size for Lucas-Kanade method
63
151
  # Get the number of frames, height, and width of the video
64
152
  singlehdr = copy.deepcopy(flowhdr)
@@ -134,147 +222,830 @@ def calc_3d_optical_flow(video, projmask, flowhdr, outputroot, window_size=3, de
134
222
  return flow_vectors
135
223
 
136
224
 
137
- def cardiacsig(thisphase, amps=(1.0, 0.0, 0.0), phases=None, overallphase=0.0):
225
+ def phasejolt(phaseimage: NDArray) -> tuple[NDArray, NDArray, NDArray]:
226
+ """
227
+ Compute phase gradient-based metrics including jump, jolt, and laplacian.
228
+
229
+ This function calculates three important metrics from a phase image:
230
+ - jump: average absolute gradient magnitude
231
+ - jolt: average absolute second-order gradient magnitude
232
+ - laplacian: sum of second-order partial derivatives
233
+
234
+ Parameters
235
+ ----------
236
+ phaseimage : NDArray
237
+ Input phase image array of arbitrary dimensions (typically 2D or 3D).
238
+
239
+ Returns
240
+ -------
241
+ tuple of NDArray
242
+ A tuple containing three arrays:
243
+ - jump: array of same shape as input, representing average absolute gradient
244
+ - jolt: array of same shape as input, representing average absolute second-order gradient
245
+ - laplacian: array of same shape as input, representing Laplacian of the phase image
246
+
247
+ Notes
248
+ -----
249
+ The function computes gradients using numpy's gradient function which applies
250
+ central differences in the interior and first differences at the boundaries.
251
+ All metrics are computed in a voxel-wise manner across the entire image.
252
+
253
+ Examples
254
+ --------
255
+ >>> import numpy as np
256
+ >>> phase_img = np.random.rand(10, 10)
257
+ >>> jump, jolt, laplacian = phasejolt(phase_img)
258
+ >>> print(jump.shape, jolt.shape, laplacian.shape)
259
+ (10, 10) (10, 10) (10, 10)
260
+ """
261
+
262
+ # Compute the gradient of the window in x, y, and z directions
263
+ grad_x, grad_y, grad_z = np.gradient(phaseimage)
264
+
265
+ # Now compute the second order gradients of the window in x, y, and z directions
266
+ grad_xx, grad_xy, grad_xz = np.gradient(grad_x)
267
+ grad_yx, grad_yy, grad_yz = np.gradient(grad_y)
268
+ grad_zx, grad_zy, grad_zz = np.gradient(grad_z)
269
+
270
+ # Calculate our metrics of interest
271
+ jump = (np.fabs(grad_x) + np.fabs(grad_y) + np.fabs(grad_z)) / 3.0
272
+ jolt = (
273
+ (np.fabs(grad_xx) + np.fabs(grad_xy) + np.fabs(grad_xz))
274
+ + (np.fabs(grad_yx) + np.fabs(grad_yy) + np.fabs(grad_yz))
275
+ + (np.fabs(grad_zx) + np.fabs(grad_zy) + np.fabs(grad_zz))
276
+ ) / 9.0
277
+ laplacian = grad_xx + grad_yy + grad_zz
278
+ return (jump, jolt, laplacian)
279
+
280
+
281
+ def cardiacsig(
282
+ thisphase: float | NDArray,
283
+ amps: tuple | NDArray = (1.0, 0.0, 0.0),
284
+ phases: NDArray | None = None,
285
+ overallphase: float = 0.0,
286
+ ) -> float | NDArray:
287
+ """
288
+ Generate a cardiac signal model using harmonic components.
289
+
290
+ This function creates a cardiac signal by summing weighted cosine waves
291
+ at different harmonic frequencies. The signal can be computed for
292
+ scalar phase values or arrays of phase values.
293
+
294
+ Parameters
295
+ ----------
296
+ thisphase : float or NDArray
297
+ The phase value(s) at which to evaluate the cardiac signal.
298
+ Can be a scalar or array of phase values.
299
+ amps : tuple or NDArray, optional
300
+ Amplitude coefficients for each harmonic component. Default is
301
+ (1.0, 0.0, 0.0) representing the fundamental frequency with
302
+ amplitude 1.0 and higher harmonics with amplitude 0.0.
303
+ phases : NDArray or None, optional
304
+ Phase shifts for each harmonic component. If None, all phase shifts
305
+ are set to zero. Default is None.
306
+ overallphase : float, optional
307
+ Overall phase shift applied to the entire signal. Default is 0.0.
308
+
309
+ Returns
310
+ -------
311
+ float or NDArray
312
+ The computed cardiac signal value(s) at the given phase(s).
313
+ Returns a scalar if input is scalar, or array if input is array.
314
+
315
+ Notes
316
+ -----
317
+ The cardiac signal is computed as:
318
+ .. math::
319
+ s(t) = \\sum_{i=0}^{n-1} A_i \\cos((i+1)\\phi + \\phi_i + \\phi_{overall})
320
+
321
+ where:
322
+ - A_i are the amplitude coefficients
323
+ - φ is the phase value
324
+ - φ_i are the harmonic phase shifts
325
+ - φ_{overall} is the overall phase shift
326
+
327
+ Examples
328
+ --------
329
+ >>> import numpy as np
330
+ >>> cardiacsig(0.5)
331
+ 1.0
332
+
333
+ >>> cardiacsig(np.linspace(0, 2*np.pi, 100), amps=(1.0, 0.5, 0.2))
334
+ array([...])
335
+
336
+ >>> cardiacsig(1.0, amps=(2.0, 1.0, 0.5), phases=[0.0, np.pi/4, np.pi/2])
337
+ -0.7071067811865476
338
+ """
138
339
  total = 0.0
139
340
  if phases is None:
140
- phases = amps * 0.0
341
+ phases = np.zeros_like(amps)
141
342
  for i in range(len(amps)):
142
343
  total += amps[i] * np.cos((i + 1) * thisphase + phases[i] + overallphase)
143
344
  return total
144
345
 
145
346
 
146
- def cardiacfromimage(
147
- normdata_byslice,
148
- estmask_byslice,
149
- numslices,
150
- timepoints,
151
- tr,
152
- slicetimes,
153
- cardprefilter,
154
- respprefilter,
155
- notchpct=1.5,
156
- invertphysiosign=False,
157
- madnorm=True,
158
- nprocs=1,
159
- arteriesonly=False,
160
- fliparteries=False,
161
- debug=False,
162
- appflips_byslice=None,
163
- verbose=False,
164
- usemask=True,
165
- multiplicative=True,
166
- ):
167
- # find out what timepoints we have, and their spacing
168
- numsteps, minstep, sliceoffsets = tide_io.sliceinfo(slicetimes, tr)
169
- print(
170
- len(slicetimes),
171
- "slice times with",
172
- numsteps,
173
- "unique values - diff is",
174
- "{:.3f}".format(minstep),
175
- )
347
+ # Constants for signal processing
348
+ SIGN_NORMAL = 1.0
349
+ SIGN_INVERTED = -1.0
350
+ SIGNAL_INVERSION_FACTOR = -1.0
176
351
 
177
- # set inversion factor
178
- if invertphysiosign:
179
- thesign = -1.0
180
- else:
181
- thesign = 1.0
182
352
 
183
- # make sure there is an appflips array
353
+ @dataclass
354
+ class CardiacExtractionConfig:
355
+ """
356
+ Configuration for cardiac signal extraction.
357
+
358
+ Parameters
359
+ ----------
360
+ notchpct : float
361
+ Percentage of notch bandwidth, default is 1.5.
362
+ notchrolloff : float
363
+ Notch filter rolloff, default is 0.5.
364
+ invertphysiosign : bool
365
+ If True, invert the physiological signal sign, default is False.
366
+ madnorm : bool
367
+ If True, use median absolute deviation normalization, default is True.
368
+ nprocs : int
369
+ Number of processes to use for computation, default is 1.
370
+ arteriesonly : bool
371
+ If True, only use arterial signal, default is False.
372
+ fliparteries : bool
373
+ If True, flip the arterial signal, default is False.
374
+ debug : bool
375
+ If True, enable debug output, default is False.
376
+ verbose : bool
377
+ If True, print verbose output, default is False.
378
+ usemask : bool
379
+ If True, use masking for valid voxels, default is True.
380
+ multiplicative : bool
381
+ If True, apply multiplicative normalization, default is True.
382
+ """
383
+
384
+ notchpct: float = 1.5
385
+ notchrolloff: float = 0.5
386
+ invertphysiosign: bool = False
387
+ madnorm: bool = True
388
+ nprocs: int = 1
389
+ arteriesonly: bool = False
390
+ fliparteries: bool = False
391
+ debug: bool = False
392
+ verbose: bool = False
393
+ usemask: bool = True
394
+ multiplicative: bool = True
395
+
396
+
397
+ @dataclass
398
+ class CardiacExtractionResult:
399
+ """
400
+ Results from cardiac signal extraction.
401
+
402
+ This dataclass supports tuple unpacking for backward compatibility.
403
+
404
+ Attributes
405
+ ----------
406
+ hirescardtc : NDArray
407
+ High-resolution cardiac time course.
408
+ cardnormfac : float
409
+ Normalization factor for cardiac signal.
410
+ hiresresptc : NDArray
411
+ High-resolution respiratory time course.
412
+ respnormfac : float
413
+ Normalization factor for respiratory signal.
414
+ slicesamplerate : float
415
+ Slice sampling rate in Hz.
416
+ numsteps : int
417
+ Number of unique slice times.
418
+ sliceoffsets : NDArray
419
+ Slice offsets relative to TR.
420
+ cycleaverage : NDArray
421
+ Average signal per slice time step.
422
+ slicenorms : NDArray
423
+ Slice-wise normalization factors.
424
+ """
425
+
426
+ hirescardtc: NDArray
427
+ cardnormfac: float
428
+ hiresresptc: NDArray
429
+ respnormfac: float
430
+ slicesamplerate: float
431
+ numsteps: int
432
+ sliceoffsets: NDArray
433
+ cycleaverage: NDArray
434
+ slicenorms: NDArray
435
+
436
+ def __iter__(self):
437
+ """Support tuple unpacking for backward compatibility."""
438
+ return iter(
439
+ (
440
+ self.hirescardtc,
441
+ self.cardnormfac,
442
+ self.hiresresptc,
443
+ self.respnormfac,
444
+ self.slicesamplerate,
445
+ self.numsteps,
446
+ self.sliceoffsets,
447
+ self.cycleaverage,
448
+ self.slicenorms,
449
+ )
450
+ )
451
+
452
+
453
+ def _validate_cardiacfromimage_inputs(
454
+ normdata_byslice: NDArray,
455
+ estweights_byslice: NDArray,
456
+ numslices: int,
457
+ timepoints: int,
458
+ tr: float,
459
+ ) -> None:
460
+ """
461
+ Validate input dimensions and values for cardiacfromimage.
462
+
463
+ Parameters
464
+ ----------
465
+ normdata_byslice : NDArray
466
+ Normalized fMRI data organized by slice.
467
+ estweights_byslice : NDArray
468
+ Estimated weights for each voxel and slice.
469
+ numslices : int
470
+ Number of slices in the acquisition.
471
+ timepoints : int
472
+ Number of time points in the fMRI time series.
473
+ tr : float
474
+ Repetition time (TR) in seconds.
475
+
476
+ Raises
477
+ ------
478
+ ValueError
479
+ If input dimensions or values are invalid.
480
+ """
481
+ if timepoints <= 0:
482
+ raise ValueError(f"timepoints must be positive, got {timepoints}")
483
+
484
+ if numslices <= 0:
485
+ raise ValueError(f"numslices must be positive, got {numslices}")
486
+
487
+ if tr <= 0:
488
+ raise ValueError(f"tr must be positive, got {tr}")
489
+
490
+ if normdata_byslice.shape[1] != numslices:
491
+ raise ValueError(
492
+ f"normdata_byslice slice dimension {normdata_byslice.shape[1]} "
493
+ f"does not match numslices {numslices}"
494
+ )
495
+
496
+ if normdata_byslice.shape[2] != timepoints:
497
+ raise ValueError(
498
+ f"normdata_byslice timepoint dimension {normdata_byslice.shape[2]} "
499
+ f"does not match timepoints {timepoints}"
500
+ )
501
+
502
+ if estweights_byslice.shape[1] != numslices:
503
+ raise ValueError(
504
+ f"estweights_byslice slice dimension {estweights_byslice.shape[1]} "
505
+ f"does not match numslices {numslices}"
506
+ )
507
+
508
+
509
+ def _prepare_weights(
510
+ estweights_byslice: NDArray,
511
+ appflips_byslice: NDArray | None,
512
+ arteriesonly: bool,
513
+ fliparteries: bool,
514
+ ) -> tuple[NDArray, NDArray]:
515
+ """
516
+ Prepare appflips and weight arrays based on configuration.
517
+
518
+ Parameters
519
+ ----------
520
+ estweights_byslice : NDArray
521
+ Estimated weights for each voxel and slice.
522
+ appflips_byslice : NDArray | None
523
+ Array of application flips for each slice.
524
+ arteriesonly : bool
525
+ If True, only use arterial signal.
526
+ fliparteries : bool
527
+ If True, flip the arterial signal.
528
+
529
+ Returns
530
+ -------
531
+ tuple[NDArray, NDArray]
532
+ Processed appflips_byslice and theseweights_byslice arrays.
533
+ """
534
+ # Make sure there is an appflips array
184
535
  if appflips_byslice is None:
185
- appflips_byslice = estmask_byslice * 0.0 + 1.0
536
+ appflips_byslice = np.ones_like(estweights_byslice)
186
537
  else:
187
538
  if arteriesonly:
188
539
  appflips_byslice[np.where(appflips_byslice > 0.0)] = 0.0
189
540
 
190
- # make slice means
191
- print("Making slice means...")
192
- hirestc = np.zeros((timepoints * numsteps), dtype=np.float64)
541
+ # Prepare weights
542
+ if fliparteries:
543
+ theseweights_byslice = appflips_byslice.astype(np.float64) * estweights_byslice
544
+ else:
545
+ theseweights_byslice = estweights_byslice
546
+
547
+ return appflips_byslice, theseweights_byslice
548
+
549
+
550
+ def _compute_slice_averages(
551
+ normdata_byslice: NDArray,
552
+ theseweights_byslice: NDArray,
553
+ numslices: int,
554
+ timepoints: int,
555
+ numsteps: int,
556
+ sliceoffsets: NDArray,
557
+ signal_sign: float,
558
+ madnorm: bool,
559
+ usemask: bool,
560
+ multiplicative: bool,
561
+ verbose: bool,
562
+ ) -> tuple[NDArray, NDArray, NDArray]:
563
+ """
564
+ Compute averaged signals for each slice with normalization.
565
+
566
+ Parameters
567
+ ----------
568
+ normdata_byslice : NDArray
569
+ Normalized fMRI data organized by slice.
570
+ theseweights_byslice : NDArray
571
+ Processed weights for each voxel and slice.
572
+ numslices : int
573
+ Number of slices in the acquisition.
574
+ timepoints : int
575
+ Number of time points in the fMRI time series.
576
+ numsteps : int
577
+ Number of unique slice times.
578
+ sliceoffsets : NDArray
579
+ Slice offsets relative to TR.
580
+ signal_sign : float
581
+ Sign factor for physiological signal (+1.0 or -1.0).
582
+ madnorm : bool
583
+ If True, use median absolute deviation normalization.
584
+ usemask : bool
585
+ If True, use masking for valid voxels.
586
+ multiplicative : bool
587
+ If True, apply multiplicative normalization.
588
+ verbose : bool
589
+ If True, print verbose output.
590
+
591
+ Returns
592
+ -------
593
+ tuple[NDArray, NDArray, NDArray]
594
+ - high_res_timecourse: High-resolution time course across all slices
595
+ - cycleaverage: Average signal per slice time step
596
+ - slicenorms: Normalization factors for each slice
597
+ """
598
+ high_res_timecourse = np.zeros((timepoints * numsteps), dtype=np.float64)
193
599
  cycleaverage = np.zeros((numsteps), dtype=np.float64)
194
- sliceavs = np.zeros((numslices, timepoints), dtype=np.float64)
600
+ slice_averages = np.zeros((numslices, timepoints), dtype=np.float64)
195
601
  slicenorms = np.zeros((numslices), dtype=np.float64)
602
+
196
603
  if not verbose:
197
604
  print("Averaging slices...")
198
- if fliparteries:
199
- thismask_byslice = appflips_byslice.astype(np.int64) * estmask_byslice
200
- else:
201
- thismask_byslice = estmask_byslice
202
- for theslice in range(numslices):
605
+
606
+ for slice_idx in range(numslices):
203
607
  if verbose:
204
- print("Averaging slice", theslice)
608
+ print("Averaging slice", slice_idx)
609
+
610
+ # Find valid voxels for this slice
205
611
  if usemask:
206
- validestvoxels = np.where(np.abs(thismask_byslice[:, theslice]) > 0)[0]
612
+ valid_voxel_indices = np.where(np.abs(theseweights_byslice[:, slice_idx]) > 0)[0]
207
613
  else:
208
- validestvoxels = np.where(np.abs(thismask_byslice[:, theslice] >= 0))[0]
209
- if len(validestvoxels) > 0:
614
+ valid_voxel_indices = np.where(np.abs(theseweights_byslice[:, slice_idx] >= 0))[0]
615
+
616
+ if len(valid_voxel_indices) > 0:
617
+ # Compute weighted average for this slice
618
+ weighted_slice_data = np.mean(
619
+ normdata_byslice[valid_voxel_indices, slice_idx, :]
620
+ * theseweights_byslice[valid_voxel_indices, slice_idx, np.newaxis],
621
+ axis=0,
622
+ )
623
+
624
+ # Apply normalization if requested
210
625
  if madnorm:
211
- sliceavs[theslice, :], slicenorms[theslice] = tide_math.madnormalize(
212
- np.mean(
213
- normdata_byslice[validestvoxels, theslice, :]
214
- * thismask_byslice[validestvoxels, theslice, np.newaxis],
215
- axis=0,
216
- ),
217
- returnnormfac=True,
626
+ slice_averages[slice_idx, :], slicenorms[slice_idx] = tide_math.madnormalize(
627
+ weighted_slice_data
218
628
  )
219
629
  else:
220
- sliceavs[theslice, :] = np.mean(
221
- normdata_byslice[validestvoxels, theslice, :]
222
- * thismask_byslice[validestvoxels, theslice, np.newaxis],
223
- axis=0,
224
- )
225
- slicenorms[theslice] = 1.0
630
+ slice_averages[slice_idx, :] = weighted_slice_data
631
+ slicenorms[slice_idx] = 1.0
632
+
633
+ # Build high-resolution time course
226
634
  for t in range(timepoints):
227
- hirestc[numsteps * t + sliceoffsets[theslice]] += thesign * sliceavs[theslice, t]
635
+ high_res_timecourse[numsteps * t + sliceoffsets[slice_idx]] += (
636
+ signal_sign * slice_averages[slice_idx, t]
637
+ )
638
+ else:
639
+ if verbose:
640
+ print(f"CARDIACFROMIMAGE: slice {slice_idx} contains no non-zero voxels")
641
+
642
+ # Compute cycle average
228
643
  for i in range(numsteps):
229
- cycleaverage[i] = np.mean(hirestc[i:-1:numsteps])
230
- for t in range(len(hirestc)):
644
+ cycleaverage[i] = np.mean(high_res_timecourse[i:-1:numsteps])
645
+
646
+ # Apply cycle average correction
647
+ for t in range(len(high_res_timecourse)):
231
648
  if multiplicative:
232
- hirestc[t] /= cycleaverage[t % numsteps] + 1.0
649
+ high_res_timecourse[t] /= cycleaverage[t % numsteps] + 1.0
233
650
  else:
234
- hirestc[t] -= cycleaverage[t % numsteps]
651
+ high_res_timecourse[t] -= cycleaverage[t % numsteps]
652
+
235
653
  if not verbose:
236
654
  print("done")
237
- slicesamplerate = 1.0 * numsteps / tr
238
- print("Slice sample rate is ", "{:.3f}".format(slicesamplerate))
239
655
 
240
- # delete the TR frequency and the first subharmonic
241
- print("Notch filtering...")
242
- filthirestc = tide_filt.harmonicnotchfilter(
243
- hirestc, slicesamplerate, 1.0 / tr, notchpct=notchpct, debug=debug
656
+ return high_res_timecourse, cycleaverage, slicenorms
657
+
658
+
659
+ def _normalize_and_filter_signal(
660
+ prefilter: tide_filt.NoncausalFilter,
661
+ slicesamplerate: float,
662
+ filtered_timecourse: NDArray,
663
+ slicenorms: NDArray,
664
+ ) -> tuple[NDArray, float]:
665
+ """
666
+ Apply filter and MAD normalization to signal.
667
+
668
+ Parameters
669
+ ----------
670
+ prefilter : tide_filt.NoncausalFilter
671
+ Prefilter object with an `apply` method for filtering physiological signals.
672
+ slicesamplerate : float
673
+ Slice sampling rate in Hz.
674
+ filtered_timecourse : NDArray
675
+ Input time course to filter and normalize.
676
+ slicenorms : NDArray
677
+ Slice-wise normalization factors.
678
+
679
+ Returns
680
+ -------
681
+ tuple[NDArray, float]
682
+ - Filtered and normalized signal
683
+ - Normalization factor
684
+ """
685
+ signal, normfac = tide_math.madnormalize(prefilter.apply(slicesamplerate, filtered_timecourse))
686
+ signal *= SIGNAL_INVERSION_FACTOR
687
+ normfac *= np.mean(slicenorms)
688
+ return signal, normfac
689
+
690
+
691
+ def _extract_physiological_signals(
692
+ filtered_timecourse: NDArray,
693
+ slicesamplerate: float,
694
+ cardprefilter: tide_filt.NoncausalFilter,
695
+ respprefilter: tide_filt.NoncausalFilter,
696
+ slicenorms: NDArray,
697
+ ) -> tuple[NDArray, float, NDArray, float]:
698
+ """
699
+ Extract and normalize cardiac and respiratory signals.
700
+
701
+ Parameters
702
+ ----------
703
+ filtered_timecourse : NDArray
704
+ Notch-filtered high-resolution time course.
705
+ slicesamplerate : float
706
+ Slice sampling rate in Hz.
707
+ cardprefilter : tide_filt.NoncausalFilter
708
+ Cardiac prefilter object.
709
+ respprefilter : tide_filt.NoncausalFilter
710
+ Respiratory prefilter object.
711
+ slicenorms : NDArray
712
+ Slice-wise normalization factors.
713
+
714
+ Returns
715
+ -------
716
+ tuple[NDArray, float, NDArray, float]
717
+ - hirescardtc: High-resolution cardiac time course
718
+ - cardnormfac: Cardiac normalization factor
719
+ - hiresresptc: High-resolution respiratory time course
720
+ - respnormfac: Respiratory normalization factor
721
+ """
722
+ hirescardtc, cardnormfac = _normalize_and_filter_signal(
723
+ cardprefilter, slicesamplerate, filtered_timecourse, slicenorms
244
724
  )
245
725
 
246
- # now get the cardiac and respiratory waveforms
247
- hirescardtc, cardnormfac = tide_math.madnormalize(
248
- cardprefilter.apply(slicesamplerate, filthirestc), returnnormfac=True
726
+ hiresresptc, respnormfac = _normalize_and_filter_signal(
727
+ respprefilter, slicesamplerate, filtered_timecourse, slicenorms
728
+ )
729
+
730
+ return hirescardtc, cardnormfac, hiresresptc, respnormfac
731
+
732
+
733
+ def cardiacfromimage(
734
+ normdata_byslice: NDArray,
735
+ estweights_byslice: NDArray,
736
+ numslices: int,
737
+ timepoints: int,
738
+ tr: float,
739
+ slicetimes: NDArray,
740
+ cardprefilter: tide_filt.NoncausalFilter,
741
+ respprefilter: tide_filt.NoncausalFilter,
742
+ config: CardiacExtractionConfig,
743
+ appflips_byslice: NDArray | None = None,
744
+ ) -> CardiacExtractionResult:
745
+ """
746
+ Extract cardiac and respiratory signals from 4D fMRI data using slice timing.
747
+
748
+ This function processes preprocessed fMRI data to isolate cardiac and respiratory
749
+ physiological signals by leveraging slice timing information and filtering techniques.
750
+ It applies normalization, averaging across slices, and harmonic notch filtering to
751
+ extract clean physiological time series.
752
+
753
+ Parameters
754
+ ----------
755
+ normdata_byslice : NDArray
756
+ Normalized fMRI data organized by slice, shape (voxels, numslices, timepoints).
757
+ estweights_byslice : NDArray
758
+ Estimated weights for each voxel and slice, shape (voxels, numslices).
759
+ numslices : int
760
+ Number of slices in the acquisition.
761
+ timepoints : int
762
+ Number of time points in the fMRI time series.
763
+ tr : float
764
+ Repetition time (TR) in seconds.
765
+ slicetimes : NDArray
766
+ Slice acquisition times relative to the start of the TR, shape (numslices,).
767
+ cardprefilter : tide_filt.NoncausalFilter
768
+ Cardiac prefilter object with an `apply` method for filtering physiological signals.
769
+ respprefilter : tide_filt.NoncausalFilter
770
+ Respiratory prefilter object with an `apply` method for filtering physiological signals.
771
+ config : CardiacExtractionConfig | None, optional
772
+ Configuration object containing all processing parameters. If None, uses default config.
773
+ If provided along with individual parameters, individual parameters override config values.
774
+ appflips_byslice : NDArray | None, optional
775
+ Array of application flips for each slice, default is None.
776
+
777
+ Returns
778
+ -------
779
+ CardiacExtractionResult
780
+ Dataclass containing:
781
+ - hirescardtc: High-resolution cardiac time course
782
+ - cardnormfac: Normalization factor for cardiac signal
783
+ - hiresresptc: High-resolution respiratory time course
784
+ - respnormfac: Normalization factor for respiratory signal
785
+ - slicesamplerate: Slice sampling rate in Hz
786
+ - numsteps: Number of unique slice times
787
+ - sliceoffsets: Slice offsets relative to TR
788
+ - cycleaverage: Average signal per slice time step
789
+ - slicenorms: Slice-wise normalization factors
790
+
791
+ Notes
792
+ -----
793
+ - The function assumes that `normdata_byslice` and `estweights_byslice` are properly
794
+ preprocessed and aligned with slice timing information.
795
+ - The cardiac and respiratory signals are extracted using harmonic notch filtering
796
+ and prefiltering steps.
797
+ - For backward compatibility, individual parameters can be passed instead of config.
798
+ Individual parameters override config values when both are provided.
799
+
800
+ Examples
801
+ --------
802
+ >>> # Using config object (recommended)
803
+ >>> config = CardiacExtractionConfig(madnorm=True, verbose=False)
804
+ >>> result = cardiacfromimage(
805
+ ... normdata_byslice, estweights_byslice, numslices, timepoints,
806
+ ... tr, slicetimes, cardprefilter, respprefilter, config=config
807
+ ... )
808
+ >>> print(result.slicesamplerate)
809
+
810
+ >>> # Backward compatible usage (returns same result)
811
+ >>> result = cardiacfromimage(
812
+ ... normdata_byslice, estweights_byslice, numslices, timepoints,
813
+ ... tr, slicetimes, cardprefilter, respprefilter
814
+ ... )
815
+ """
816
+
817
+ # Validate inputs
818
+ _validate_cardiacfromimage_inputs(
819
+ normdata_byslice, estweights_byslice, numslices, timepoints, tr
249
820
  )
250
- hirescardtc *= -1.0
251
- cardnormfac *= np.mean(slicenorms)
252
821
 
253
- hiresresptc, respnormfac = tide_math.madnormalize(
254
- respprefilter.apply(slicesamplerate, filthirestc), returnnormfac=True
822
+ # Find out what timepoints we have, and their spacing
823
+ numsteps, minstep, sliceoffsets = tide_io.sliceinfo(slicetimes, tr)
824
+ print(
825
+ len(slicetimes),
826
+ "slice times with",
827
+ numsteps,
828
+ "unique values - diff is",
829
+ f"{minstep:.3f}",
255
830
  )
256
- hiresresptc *= -1.0
257
- respnormfac *= np.mean(slicenorms)
258
831
 
259
- return (
260
- hirescardtc,
261
- cardnormfac,
262
- hiresresptc,
263
- respnormfac,
264
- slicesamplerate,
832
+ # Determine signal sign
833
+ signal_sign = SIGN_INVERTED if config.invertphysiosign else SIGN_NORMAL
834
+
835
+ # Prepare weights
836
+ appflips_byslice, theseweights_byslice = _prepare_weights(
837
+ estweights_byslice,
838
+ appflips_byslice,
839
+ config.arteriesonly,
840
+ config.fliparteries,
841
+ )
842
+
843
+ # Compute slice averages
844
+ print("Making slice means...")
845
+ high_res_timecourse, cycleaverage, slicenorms = _compute_slice_averages(
846
+ normdata_byslice,
847
+ theseweights_byslice,
848
+ numslices,
849
+ timepoints,
265
850
  numsteps,
266
851
  sliceoffsets,
267
- cycleaverage,
852
+ signal_sign,
853
+ config.madnorm,
854
+ config.usemask,
855
+ config.multiplicative,
856
+ config.verbose,
857
+ )
858
+
859
+ # sanity check the output waveforms
860
+ hrtc_pp = np.max(high_res_timecourse) - np.min(high_res_timecourse)
861
+ if hrtc_pp == 0.0:
862
+ raise ValueError(
863
+ f"CARDIACFROMIMAGE: high_res_timecourse has no variation prior to filtering!"
864
+ )
865
+ cycleav_pp = np.max(cycleaverage) - np.min(cycleaverage)
866
+ if cycleav_pp == 0.0:
867
+ raise ValueError(f"CARDIACFROMIMAGE: cycleaverage has no variation prior to filtering!")
868
+ slicenorms_pp = np.max(slicenorms) - np.min(slicenorms)
869
+ if slicenorms_pp == 0.0:
870
+ raise ValueError(f"CARDIACFROMIMAGE: slicenorms has no variation prior to filtering!")
871
+
872
+ # Calculate slice sample rate
873
+ slicesamplerate = 1.0 * numsteps / tr
874
+ print(f"Slice sample rate is {slicesamplerate:.3f}")
875
+
876
+ # Delete the TR frequency and the first subharmonic
877
+ print("Notch filtering...")
878
+ filtered_timecourse = tide_filt.harmonicnotchfilter(
879
+ high_res_timecourse,
880
+ slicesamplerate,
881
+ 1.0 / tr,
882
+ notchpct=config.notchpct,
883
+ debug=config.debug,
884
+ )
885
+
886
+ # sanity check the filtered waveform
887
+ filtered_pp = np.max(filtered_timecourse) - np.min(filtered_timecourse)
888
+ if filtered_pp == 0.0:
889
+ raise ValueError(
890
+ f"CARDIACFROMIMAGE: high_res_timecourse has no variation after notch filtering!"
891
+ )
892
+
893
+ # Extract cardiac and respiratory waveforms
894
+ hirescardtc, cardnormfac, hiresresptc, respnormfac = _extract_physiological_signals(
895
+ filtered_timecourse,
896
+ slicesamplerate,
897
+ cardprefilter,
898
+ respprefilter,
268
899
  slicenorms,
269
900
  )
270
901
 
902
+ # sanity check the physiological waveform
903
+ hirescardtc_pp = np.max(hirescardtc) - np.min(hirescardtc)
904
+ if hirescardtc_pp == 0.0:
905
+ raise ValueError(f"CARDIACFROMIMAGE: hirescardtc has no variation after extraction!")
906
+ hiresresptc_pp = np.max(hiresresptc) - np.min(hiresresptc)
907
+ if hiresresptc_pp == 0.0:
908
+ raise ValueError(f"CARDIACFROMIMAGE: hiresresptc has no variation after extraction!")
909
+
910
+ return CardiacExtractionResult(
911
+ hirescardtc=hirescardtc,
912
+ cardnormfac=cardnormfac,
913
+ hiresresptc=hiresresptc,
914
+ respnormfac=respnormfac,
915
+ slicesamplerate=slicesamplerate,
916
+ numsteps=numsteps,
917
+ sliceoffsets=sliceoffsets,
918
+ cycleaverage=cycleaverage,
919
+ slicenorms=slicenorms,
920
+ )
921
+
922
+
923
+ def theCOM(X: NDArray, data: NDArray) -> float:
924
+ """
925
+ Calculate the center of mass of a system of particles.
926
+
927
+ Parameters
928
+ ----------
929
+ X : NDArray
930
+ Array of positions (coordinates) of particles. Shape should be (n_particles, n_dimensions).
931
+ data : NDArray
932
+ Array of mass values for each particle. Shape should be (n_particles,).
933
+
934
+ Returns
935
+ -------
936
+ float
937
+ The center of mass of the system.
938
+
939
+ Notes
940
+ -----
941
+ The center of mass is calculated using the formula:
942
+ COM = Σ(m_i * x_i) / Σ(m_i)
943
+
944
+ where m_i are the masses and x_i are the positions of particles.
945
+
946
+ Examples
947
+ --------
948
+ >>> import numpy as np
949
+ >>> positions = np.array([[1, 2], [3, 4], [5, 6]])
950
+ >>> masses = np.array([1, 2, 3])
951
+ >>> com = theCOM(positions, masses)
952
+ >>> print(com)
953
+ 3.3333333333333335
954
+ """
955
+ # return the center of mass
956
+ return np.sum(X * data) / np.sum(data)
957
+
958
+
959
+ def savgolsmooth(data: NDArray, smoothlen: int = 101, polyorder: int = 3) -> NDArray:
960
+ """
961
+ Apply Savitzky-Golay filter to smooth data.
962
+
963
+ This function applies a Savitzky-Golay filter to smooth the input data using
964
+ a polynomial fit. The filter preserves higher moments of the data better than
965
+ simple moving averages, making it particularly useful for smoothing noisy data
966
+ while preserving peak shapes and heights.
967
+
968
+ Parameters
969
+ ----------
970
+ data : NDArray
971
+ Input data to be smoothed. Can be 1D or 2D array.
972
+ smoothlen : int, optional
973
+ Length of the filter window (i.e., the number of coefficients).
974
+ Must be a positive odd integer. Default is 101.
975
+ polyorder : int, optional
976
+ Order of the polynomial used to fit the samples. Must be less than
977
+ `smoothlen`. Default is 3.
271
978
 
272
- def savgolsmooth(data, smoothlen=101, polyorder=3):
979
+ Returns
980
+ -------
981
+ NDArray
982
+ Smoothed data with the same shape as the input `data`.
983
+
984
+ Notes
985
+ -----
986
+ The Savitzky-Golay filter is a digital filter that smooths data by fitting
987
+ a polynomial of specified order to a sliding window of data points. It is
988
+ particularly effective at preserving the shape and features of the original
989
+ data while removing noise.
990
+
991
+ Examples
992
+ --------
993
+ >>> import numpy as np
994
+ >>> data = np.random.randn(100)
995
+ >>> smoothed = savgolsmooth(data, smoothlen=21, polyorder=3)
996
+
997
+ >>> # For 2D data
998
+ >>> data_2d = np.random.randn(50, 10)
999
+ >>> smoothed_2d = savgolsmooth(data_2d, smoothlen=11, polyorder=2)
1000
+ """
273
1001
  return savgol_filter(data, smoothlen, polyorder)
274
1002
 
275
1003
 
276
- def getperiodic(inputdata, Fs, fundfreq, ncomps=1, width=0.4, debug=False):
277
- outputdata = inputdata * 0.0
1004
+ def getperiodic(
1005
+ inputdata: NDArray,
1006
+ Fs: float,
1007
+ fundfreq: float,
1008
+ ncomps: int = 1,
1009
+ width: float = 0.4,
1010
+ debug: bool = False,
1011
+ ) -> NDArray:
1012
+ """
1013
+ Apply a periodic filter to extract harmonic components from input data.
1014
+
1015
+ This function applies a non-causal filter to isolate and extract periodic
1016
+ components of a signal based on a fundamental frequency and number of
1017
+ harmonics. It uses an arbitrary filter design to define stopband and passband
1018
+ frequencies for each harmonic component.
1019
+
1020
+ Parameters
1021
+ ----------
1022
+ inputdata : NDArray
1023
+ Input signal data to be filtered.
1024
+ Fs : float
1025
+ Sampling frequency of the input signal (Hz).
1026
+ fundfreq : float
1027
+ Fundamental frequency of the periodic signal (Hz).
1028
+ ncomps : int, optional
1029
+ Number of harmonic components to extract. Default is 1.
1030
+ width : float, optional
1031
+ Width parameter controlling the bandwidth of each harmonic filter.
1032
+ Default is 0.4.
1033
+ debug : bool, optional
1034
+ If True, print debug information during processing. Default is False.
1035
+
1036
+ Returns
1037
+ -------
1038
+ NDArray
1039
+ Filtered output signal containing the specified harmonic components.
1040
+
1041
+ Notes
1042
+ -----
1043
+ The function reduces the number of components (`ncomps`) if the highest
1044
+ harmonic exceeds the Nyquist frequency (Fs/2). Each harmonic is filtered
1045
+ using an arbitrary filter with stopband and passband frequencies defined
1046
+ based on the `width` parameter.
1047
+ """
1048
+ outputdata = np.zeros_like(inputdata)
278
1049
  lowerdist = fundfreq - fundfreq / (1.0 + width)
279
1050
  upperdist = fundfreq * width
280
1051
  if debug:
@@ -298,13 +1069,56 @@ def getperiodic(inputdata, Fs, fundfreq, ncomps=1, width=0.4, debug=False):
298
1069
 
299
1070
 
300
1071
  def getcardcoeffs(
301
- cardiacwaveform,
302
- slicesamplerate,
303
- minhr=40.0,
304
- maxhr=140.0,
305
- smoothlen=101,
306
- debug=False,
307
- ):
1072
+ cardiacwaveform: NDArray,
1073
+ slicesamplerate: float,
1074
+ minhr: float = 40.0,
1075
+ maxhr: float = 140.0,
1076
+ smoothlen: int = 101,
1077
+ debug: bool = False,
1078
+ ) -> float:
1079
+ """
1080
+ Compute the fundamental cardiac frequency from a cardiac waveform using spectral analysis.
1081
+
1082
+ This function estimates the heart rate (in beats per minute) from a given cardiac waveform
1083
+ by performing a Welch periodogram and applying a smoothing filter to identify the dominant
1084
+ frequency component. The result is returned as a frequency value in Hz, which can be
1085
+ converted to BPM by multiplying by 60.
1086
+
1087
+ Parameters
1088
+ ----------
1089
+ cardiacwaveform : NDArray
1090
+ Input cardiac waveform signal as a 1D numpy array.
1091
+ slicesamplerate : float
1092
+ Sampling rate of the input waveform in Hz.
1093
+ minhr : float, optional
1094
+ Minimum allowed heart rate in BPM. Default is 40.0.
1095
+ maxhr : float, optional
1096
+ Maximum allowed heart rate in BPM. Default is 140.0.
1097
+ smoothlen : int, optional
1098
+ Length of the Savitzky-Golay filter window for smoothing the spectrum.
1099
+ Default is 101.
1100
+ debug : bool, optional
1101
+ If True, print intermediate debug information including initial and final
1102
+ frequency estimates. Default is False.
1103
+
1104
+ Returns
1105
+ -------
1106
+ float
1107
+ Estimated fundamental cardiac frequency in Hz.
1108
+
1109
+ Notes
1110
+ -----
1111
+ The function applies a Hamming window to the input signal before spectral analysis.
1112
+ It removes spectral components outside the physiological range (defined by `minhr`
1113
+ and `maxhr`) and uses Savitzky-Golay smoothing to detect the peak frequency.
1114
+
1115
+ Examples
1116
+ --------
1117
+ >>> import numpy as np
1118
+ >>> waveform = np.sin(2 * np.pi * 1.2 * np.linspace(0, 10, 1000))
1119
+ >>> freq = getcardcoeffs(waveform, slicesamplerate=100)
1120
+ >>> print(f"Estimated heart rate: {freq * 60:.2f} BPM")
1121
+ """
308
1122
  if len(cardiacwaveform) > 1024:
309
1123
  thex, they = welch(cardiacwaveform, slicesamplerate, nperseg=1024)
310
1124
  else:
@@ -338,60 +1152,373 @@ def getcardcoeffs(
338
1152
  return peakfreq
339
1153
 
340
1154
 
341
- def normalizevoxels(fmri_data, detrendorder, validvoxels, time, timings, showprogressbar=False):
342
- print("Normalizing voxels...")
343
- normdata = fmri_data * 0.0
344
- demeandata = fmri_data * 0.0
345
- starttime = time.time()
346
- # detrend if we are going to
347
- numspatiallocs = fmri_data.shape[0]
348
- if detrendorder > 0:
349
- print("Detrending to order", detrendorder, "...")
350
- for idx, thevox in enumerate(
351
- tqdm(
352
- validvoxels,
353
- desc="Voxel",
354
- unit="voxels",
355
- disable=(not showprogressbar),
356
- )
357
- ):
358
- fmri_data[thevox, :] = tide_fit.detrend(
359
- fmri_data[thevox, :], order=detrendorder, demean=False
360
- )
361
- timings.append(["Detrending finished", time.time(), numspatiallocs, "voxels"])
362
- print(" done")
1155
+ def _procOneVoxelDetrend(
1156
+ vox: int,
1157
+ voxelargs: tuple,
1158
+ **kwargs,
1159
+ ) -> tuple[int, NDArray]:
1160
+ """
1161
+ Detrend fMRI voxel data for a single voxel.
363
1162
 
364
- means = np.mean(fmri_data[:, :], axis=1).flatten()
365
- demeandata[validvoxels, :] = fmri_data[validvoxels, :] - means[validvoxels, None]
366
- normdata[validvoxels, :] = np.nan_to_num(demeandata[validvoxels, :] / means[validvoxels, None])
367
- medians = np.median(normdata[:, :], axis=1).flatten()
368
- mads = mad(normdata[:, :], axis=1).flatten()
369
- timings.append(["Normalization finished", time.time(), numspatiallocs, "voxels"])
370
- print("Normalization took", "{:.3f}".format(time.time() - starttime), "seconds")
371
- return normdata, demeandata, means, medians, mads
1163
+ This function applies detrending to fMRI voxel data using the tide_fit.detrend
1164
+ function. It supports both linear and polynomial detrending with optional
1165
+ mean centering.
372
1166
 
1167
+ Parameters
1168
+ ----------
1169
+ vox : int
1170
+ Voxel index identifier.
1171
+ voxelargs : tuple
1172
+ Tuple containing fMRI voxel data as the first element. Expected format:
1173
+ (fmri_voxeldata,)
1174
+ **kwargs : dict
1175
+ Additional keyword arguments for detrending options:
1176
+ - detrendorder : int, optional
1177
+ Order of the detrend polynomial (default: 1 for linear detrend)
1178
+ - demean : bool, optional
1179
+ If True, remove the mean from the data (default: False)
1180
+ - debug : bool, optional
1181
+ If True, print debug information (default: False)
373
1182
 
374
- def cleanphysio(
375
- Fs, physiowaveform, cutoff=0.4, thresh=0.2, nyquist=None, iscardiac=True, debug=False
376
- ):
377
- # first bandpass the cardiac signal to calculate the envelope
1183
+ Returns
1184
+ -------
1185
+ tuple
1186
+ A tuple containing:
1187
+ - vox : int
1188
+ The original voxel index
1189
+ - detrended_voxeldata : ndarray
1190
+ The detrended fMRI voxel data with the same shape as input
1191
+
1192
+ Notes
1193
+ -----
1194
+ This function uses the tide_fit.detrend function internally for the actual
1195
+ detrending operation. The detrendorder parameter controls the polynomial order
1196
+ of the detrending (0 = mean removal only, 1 = linear detrend, 2 = quadratic detrend, etc.).
1197
+
1198
+ Examples
1199
+ --------
1200
+ >>> import numpy as np
1201
+ >>> from rapidtide.fit import detrend
1202
+ >>> data = np.random.randn(100)
1203
+ >>> result = _procOneVoxelDetrend(0, (data,), detrendorder=1, demean=True)
1204
+ >>> print(result[0]) # voxel index
1205
+ 0
1206
+ >>> print(result[1].shape) # detrended data shape
1207
+ (100,)
1208
+ """
1209
+ # unpack arguments
1210
+ options = {
1211
+ "detrendorder": 1,
1212
+ "demean": False,
1213
+ "debug": False,
1214
+ }
1215
+ options.update(kwargs)
1216
+ detrendorder = options["detrendorder"]
1217
+ demean = options["demean"]
1218
+ debug = options["debug"]
1219
+ [fmri_voxeldata] = voxelargs
378
1220
  if debug:
379
- print("Entering cleanphysio")
1221
+ print(f"{vox=}, {detrendorder=}, {demean=}, {fmri_voxeldata.shape=}")
380
1222
 
381
- print("Filtering")
382
- physiofilter = tide_filt.NoncausalFilter("cardiac", debug=debug)
1223
+ detrended_voxeldata = tide_fit.detrend(fmri_voxeldata, order=detrendorder, demean=demean)
383
1224
 
384
- print("Envelope detection")
385
- envelope = tide_math.envdetect(
386
- Fs,
387
- tide_math.madnormalize(physiofilter.apply(Fs, tide_math.madnormalize(physiowaveform))),
388
- cutoff=cutoff,
1225
+ return (
1226
+ vox,
1227
+ detrended_voxeldata,
389
1228
  )
390
- envmean = np.mean(envelope)
391
1229
 
392
- # now patch the envelope function to eliminate very low values
393
- envlowerlim = thresh * np.max(envelope)
394
- envelope = np.where(envelope >= envlowerlim, envelope, envlowerlim)
1230
+
1231
+ def _packDetrendvoxeldata(voxnum: int, voxelargs: list) -> list[NDArray]:
1232
+ """
1233
+ Extract voxel data for a specific voxel number from voxel arguments.
1234
+
1235
+ Parameters
1236
+ ----------
1237
+ voxnum : int
1238
+ The voxel number to extract data for.
1239
+ voxelargs : tuple
1240
+ A tuple containing voxel data arrays, where the first element is
1241
+ expected to be a 2D array with voxel data indexed by [voxel, feature].
1242
+
1243
+ Returns
1244
+ -------
1245
+ list
1246
+ A list containing a single element, which is a 1D array of feature
1247
+ values for the specified voxel number.
1248
+
1249
+ Notes
1250
+ -----
1251
+ This function is designed to extract a single voxel's worth of data
1252
+ from a collection of voxel arguments for further processing in
1253
+ detrending operations.
1254
+
1255
+ Examples
1256
+ --------
1257
+ >>> voxel_data = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
1258
+ >>> result = _packDetrendvoxeldata(1, (voxel_data,))
1259
+ >>> print(result)
1260
+ [[4, 5, 6]]
1261
+ """
1262
+ return [(voxelargs[0])[voxnum, :]]
1263
+
1264
+
1265
+ def _unpackDetrendvoxeldata(retvals: tuple, voxelproducts: list) -> None:
1266
+ """
1267
+ Unpack detrend voxel data by assigning values to voxel products array.
1268
+
1269
+ Parameters
1270
+ ----------
1271
+ retvals : tuple or list
1272
+ Contains two elements where retvals[0] is used as indices and retvals[1]
1273
+ contains the values to be assigned.
1274
+ voxelproducts : list
1275
+ List containing arrays where voxelproducts[0] is the target array that
1276
+ will be modified in-place with the assigned values.
1277
+
1278
+ Returns
1279
+ -------
1280
+ None
1281
+ This function modifies voxelproducts[0] in-place and does not return anything.
1282
+
1283
+ Notes
1284
+ -----
1285
+ This function performs an in-place assignment operation where values from
1286
+ retvals[1] are placed at the specified indices retvals[0] in the first
1287
+ element of voxelproducts list.
1288
+
1289
+ Examples
1290
+ --------
1291
+ >>> retvals = ([0, 1, 2], [10, 20, 30])
1292
+ >>> voxelproducts = [np.zeros(5)]
1293
+ >>> _unpackDetrendvoxeldata(retvals, voxelproducts)
1294
+ >>> print(voxelproducts[0])
1295
+ [10. 20. 30. 0. 0.]
1296
+ """
1297
+ (voxelproducts[0])[retvals[0], :] = retvals[1]
1298
+
1299
+
1300
+ def normalizevoxels(
1301
+ fmri_data: NDArray,
1302
+ detrendorder: int,
1303
+ validvoxels: NDArray,
1304
+ time: object,
1305
+ timings: list,
1306
+ LGR: object | None = None,
1307
+ mpcode: bool = True,
1308
+ nprocs: int = 1,
1309
+ alwaysmultiproc: bool = False,
1310
+ showprogressbar: bool = True,
1311
+ chunksize: int = 1000,
1312
+ debug: bool = False,
1313
+ ) -> tuple[NDArray, NDArray, NDArray, NDArray, NDArray]:
1314
+ """
1315
+ Normalize fMRI voxel data by detrending and z-scoring.
1316
+
1317
+ This function applies detrending to fMRI data and then normalizes the data
1318
+ using mean and median-based scaling. It supports both single-threaded and
1319
+ multi-threaded processing for detrending.
1320
+
1321
+ Parameters
1322
+ ----------
1323
+ fmri_data : NDArray
1324
+ 2D array of fMRI data with shape (n_voxels, n_timepoints).
1325
+ detrendorder : int
1326
+ Order of detrending to apply. If 0, no detrending is performed.
1327
+ validvoxels : NDArray
1328
+ 1D array of indices indicating which voxels are valid for processing.
1329
+ time : object
1330
+ Module or object with a `time.time()` method for timing operations.
1331
+ timings : list
1332
+ List to append timing information about processing steps.
1333
+ LGR : object, optional
1334
+ Logger object for debugging; default is None.
1335
+ mpcode : bool, optional
1336
+ If True, use multi-processing for detrending; default is True.
1337
+ nprocs : int, optional
1338
+ Number of processes to use in multi-processing; default is 1.
1339
+ alwaysmultiproc : bool, optional
1340
+ If True, always use multi-processing even for small datasets; default is False.
1341
+ showprogressbar : bool, optional
1342
+ If True, show progress bar during voxel processing; default is True.
1343
+ chunksize : int, optional
1344
+ Size of chunks for multi-processing; default is 1000.
1345
+ debug : bool, optional
1346
+ If True, enable debug output; default is False.
1347
+
1348
+ Returns
1349
+ -------
1350
+ tuple of NDArray
1351
+ A tuple containing:
1352
+ - `normdata`: Normalized fMRI data (z-scored).
1353
+ - `demeandata`: Detrended and mean-centered data.
1354
+ - `means`: Mean values for each voxel.
1355
+ - `medians`: Median values for each voxel.
1356
+ - `mads`: Median absolute deviation for each voxel.
1357
+
1358
+ Notes
1359
+ -----
1360
+ - The function modifies `fmri_data` in-place during detrending.
1361
+ - If `detrendorder` is greater than 0, detrending is applied using `tide_fit.detrend`.
1362
+ - Multi-processing is used when `mpcode=True` and the number of voxels exceeds a threshold.
1363
+ - Timing information is appended to the `timings` list.
1364
+
1365
+ Examples
1366
+ --------
1367
+ >>> import numpy as np
1368
+ >>> from tqdm import tqdm
1369
+ >>> fmri_data = np.random.rand(100, 200)
1370
+ >>> validvoxels = np.arange(100)
1371
+ >>> timings = []
1372
+ >>> normdata, demeandata, means, medians, mads = normalizevoxels(
1373
+ ... fmri_data, detrendorder=1, validvoxels=validvoxels,
1374
+ ... time=time, timings=timings
1375
+ ... )
1376
+ """
1377
+ print("Normalizing voxels...")
1378
+ normdata = np.zeros_like(fmri_data)
1379
+ demeandata = np.zeros_like(fmri_data)
1380
+ starttime = time.time()
1381
+ # detrend if we are going to
1382
+ numspatiallocs = fmri_data.shape[0]
1383
+ # NB: fmri_data is detrended in place
1384
+ if detrendorder > 0:
1385
+ print("Detrending to order", detrendorder, "...")
1386
+ if mpcode:
1387
+ if debug:
1388
+ print(f"detrend multiproc path: {detrendorder=}")
1389
+ inputshape = fmri_data.shape
1390
+ voxelargs = [
1391
+ fmri_data,
1392
+ ]
1393
+ voxelfunc = _procOneVoxelDetrend
1394
+ packfunc = _packDetrendvoxeldata
1395
+ unpackfunc = _unpackDetrendvoxeldata
1396
+ voxelmask = np.zeros_like(fmri_data[:, 0])
1397
+ voxelmask[validvoxels] = 1
1398
+ voxeltargets = [fmri_data]
1399
+
1400
+ numspatiallocs = tide_genericmultiproc.run_multiproc(
1401
+ voxelfunc,
1402
+ packfunc,
1403
+ unpackfunc,
1404
+ voxelargs,
1405
+ voxeltargets,
1406
+ inputshape,
1407
+ voxelmask,
1408
+ LGR,
1409
+ nprocs,
1410
+ alwaysmultiproc,
1411
+ showprogressbar,
1412
+ chunksize,
1413
+ debug=debug,
1414
+ detrendorder=detrendorder,
1415
+ demean=False,
1416
+ )
1417
+ else:
1418
+ if debug:
1419
+ print(f"detrend nonmultiproc path: {detrendorder=}")
1420
+ for idx, thevox in enumerate(
1421
+ tqdm(
1422
+ validvoxels,
1423
+ desc="Voxel",
1424
+ unit="voxels",
1425
+ disable=(not showprogressbar),
1426
+ )
1427
+ ):
1428
+ fmri_data[thevox, :] = tide_fit.detrend(
1429
+ fmri_data[thevox, :], order=detrendorder, demean=False
1430
+ )
1431
+ timings.append(["Detrending finished", time.time(), numspatiallocs, "voxels"])
1432
+ print(" done")
1433
+
1434
+ timings.append(["Detrending finished", time.time(), numspatiallocs, "voxels"])
1435
+ print(" done")
1436
+
1437
+ means = np.mean(fmri_data[:, :], axis=1).flatten()
1438
+ demeandata[validvoxels, :] = fmri_data[validvoxels, :] - means[validvoxels, None]
1439
+ normdata[validvoxels, :] = np.nan_to_num(demeandata[validvoxels, :] / means[validvoxels, None])
1440
+ medians = np.median(normdata[:, :], axis=1).flatten()
1441
+ mads = mad(normdata[:, :], axis=1).flatten()
1442
+ timings.append(["Normalization finished", time.time(), numspatiallocs, "voxels"])
1443
+ print("Normalization took", "{:.3f}".format(time.time() - starttime), "seconds")
1444
+ return normdata, demeandata, means, medians, mads
1445
+
1446
+
1447
+ def cleanphysio(
1448
+ Fs: float,
1449
+ physiowaveform: NDArray,
1450
+ cutoff: float = 0.4,
1451
+ thresh: float = 0.2,
1452
+ nyquist: float | None = None,
1453
+ iscardiac: bool = True,
1454
+ debug: bool = False,
1455
+ ) -> tuple[NDArray, NDArray, NDArray, float]:
1456
+ """
1457
+ Apply filtering and normalization to a physiological waveform to extract a cleaned signal and envelope.
1458
+
1459
+ This function performs bandpass filtering on a physiological signal to detect its envelope,
1460
+ then applies high-pass filtering to remove baseline drift. The waveform is normalized using
1461
+ the envelope to produce a cleaned and standardized signal.
1462
+
1463
+ Parameters
1464
+ ----------
1465
+ Fs : float
1466
+ Sampling frequency of the input waveform in Hz.
1467
+ physiowaveform : NDArray
1468
+ Input physiological waveform signal (1D array).
1469
+ cutoff : float, optional
1470
+ Cutoff frequency for envelope detection, by default 0.4.
1471
+ thresh : float, optional
1472
+ Threshold for envelope normalization, by default 0.2.
1473
+ nyquist : float, optional
1474
+ Nyquist frequency to constrain the high-pass filter, by default None.
1475
+ iscardiac : bool, optional
1476
+ Flag indicating if the signal is cardiac; affects filter type, by default True.
1477
+ debug : bool, optional
1478
+ If True, print debug information during processing, by default False.
1479
+
1480
+ Returns
1481
+ -------
1482
+ tuple[NDArray, NDArray, NDArray, float]
1483
+ A tuple containing:
1484
+ - `filtphysiowaveform`: The high-pass filtered waveform.
1485
+ - `normphysio`: The normalized waveform using the envelope.
1486
+ - `envelope`: The detected envelope of the signal.
1487
+ - `envmean`: The mean of the envelope.
1488
+
1489
+ Notes
1490
+ -----
1491
+ - The function uses `tide_filt.NoncausalFilter` for filtering and `tide_math.envdetect` for envelope detection.
1492
+ - The waveform is normalized using median absolute deviation (MAD) normalization.
1493
+ - The envelope is thresholded to avoid very low values during normalization.
1494
+
1495
+ Examples
1496
+ --------
1497
+ >>> import numpy as np
1498
+ >>> Fs = 100.0
1499
+ >>> signal = np.random.randn(1000)
1500
+ >>> filtered, normalized, env, env_mean = cleanphysio(Fs, signal)
1501
+ """
1502
+ # first bandpass the cardiac signal to calculate the envelope
1503
+ if debug:
1504
+ print("Entering cleanphysio")
1505
+
1506
+ print("Filtering")
1507
+ physiofilter = tide_filt.NoncausalFilter("cardiac", debug=debug)
1508
+
1509
+ print("Envelope detection")
1510
+ envelope = tide_math.envdetect(
1511
+ Fs,
1512
+ tide_math.madnormalize(physiofilter.apply(Fs, tide_math.madnormalize(physiowaveform)[0]))[
1513
+ 0
1514
+ ],
1515
+ cutoff=cutoff,
1516
+ )
1517
+ envmean = np.mean(envelope)
1518
+
1519
+ # now patch the envelope function to eliminate very low values
1520
+ envlowerlim = thresh * np.max(envelope)
1521
+ envelope = np.where(envelope >= envlowerlim, envelope, envlowerlim)
395
1522
 
396
1523
  # now high pass the waveform to eliminate baseline
397
1524
  arb_lowerstop, arb_lowerpass, arb_upperpass, arb_upperstop = physiofilter.getfreqs()
@@ -404,10 +1531,10 @@ def cleanphysio(
404
1531
  arb_upperstop = nyquist
405
1532
  physiofilter.setfreqs(arb_lowerstop, arb_lowerpass, arb_upperpass, arb_upperstop)
406
1533
  filtphysiowaveform = tide_math.madnormalize(
407
- physiofilter.apply(Fs, tide_math.madnormalize(physiowaveform))
408
- )
1534
+ physiofilter.apply(Fs, tide_math.madnormalize(physiowaveform)[0])
1535
+ )[0]
409
1536
  print("Normalizing")
410
- normphysio = tide_math.madnormalize(envmean * filtphysiowaveform / envelope)
1537
+ normphysio = tide_math.madnormalize(envmean * filtphysiowaveform / envelope)[0]
411
1538
 
412
1539
  # return the filtered waveform, the normalized waveform, and the envelope
413
1540
  if debug:
@@ -416,17 +1543,73 @@ def cleanphysio(
416
1543
 
417
1544
 
418
1545
  def findbadpts(
419
- thewaveform,
420
- nameroot,
421
- outputroot,
422
- samplerate,
423
- infodict,
424
- thetype="mad",
425
- retainthresh=0.89,
426
- mingap=2.0,
427
- outputlevel=0,
428
- debug=True,
429
- ):
1546
+ thewaveform: NDArray,
1547
+ nameroot: str,
1548
+ outputroot: str,
1549
+ samplerate: float,
1550
+ infodict: dict,
1551
+ thetype: str = "mad",
1552
+ retainthresh: float = 0.89,
1553
+ mingap: float = 2.0,
1554
+ outputlevel: int = 0,
1555
+ debug: bool = True,
1556
+ ) -> tuple[NDArray, float | tuple[float, float]]:
1557
+ """
1558
+ Identify bad points in a waveform based on statistical thresholding and gap filling.
1559
+
1560
+ This function detects outliers in a waveform using either the Median Absolute Deviation (MAD)
1561
+ or a fractional value-based method. It then applies gap-filling logic to merge short
1562
+ sequences of bad points into longer ones, based on a minimum gap threshold.
1563
+
1564
+ Parameters
1565
+ ----------
1566
+ thewaveform : NDArray
1567
+ Input waveform data as a 1D numpy array.
1568
+ nameroot : str
1569
+ Root name used for labeling output files and dictionary keys.
1570
+ outputroot : str
1571
+ Root path for writing output files if `outputlevel > 0`.
1572
+ samplerate : float
1573
+ Sampling rate of the waveform in Hz.
1574
+ infodict : dict
1575
+ Dictionary to store metadata about the thresholding method and value.
1576
+ thetype : str, optional
1577
+ Thresholding method to use. Options are:
1578
+ - "mad" (default): Uses Median Absolute Deviation.
1579
+ - "fracval": Uses percentile-based thresholds.
1580
+ retainthresh : float, optional
1581
+ Threshold for retaining data, between 0 and 1. Default is 0.89.
1582
+ mingap : float, optional
1583
+ Minimum gap (in seconds) to consider for merging bad point streaks. Default is 2.0.
1584
+ outputlevel : int, optional
1585
+ Level of output verbosity. If > 0, writes bad point vector to file. Default is 0.
1586
+ debug : bool, optional
1587
+ If True, prints debug information. Default is True.
1588
+
1589
+ Returns
1590
+ -------
1591
+ tuple[NDArray, float | tuple[float, float]]
1592
+ A tuple containing:
1593
+ - `thebadpts`: A 1D numpy array of the same length as `thewaveform`, with 1.0 for bad points and 0.0 for good.
1594
+ - `thresh`: The calculated threshold value(s) used for bad point detection.
1595
+ - If `thetype == "mad"`, `thresh` is a float.
1596
+ - If `thetype == "fracval"`, `thresh` is a tuple of (lower_threshold, upper_threshold).
1597
+
1598
+ Notes
1599
+ -----
1600
+ - The "mad" method uses the median and MAD to compute a sigma-based threshold.
1601
+ - The "fracval" method uses percentiles to define a range and marks values outside
1602
+ that range as bad.
1603
+ - Gap-filling logic merges bad point streaks that are closer than `mingap` seconds.
1604
+
1605
+ Examples
1606
+ --------
1607
+ >>> import numpy as np
1608
+ >>> waveform = np.random.normal(0, 1, 1000)
1609
+ >>> info = {}
1610
+ >>> badpts, threshold = findbadpts(waveform, "test", "/tmp", 100.0, info, thetype="mad")
1611
+ >>> print(f"Threshold used: {threshold}")
1612
+ """
430
1613
  # if thetype == 'triangle' or thetype == 'mad':
431
1614
  if thetype == "mad":
432
1615
  absdev = np.fabs(thewaveform - np.median(thewaveform))
@@ -438,7 +1621,7 @@ def findbadpts(
438
1621
  thresh = numsigma * sigma
439
1622
  thebadpts = np.where(absdev >= thresh, 1.0, 0.0)
440
1623
  print(
441
- "Bad point threshhold set to",
1624
+ "Bad point threshold set to",
442
1625
  "{:.3f}".format(thresh),
443
1626
  "using the",
444
1627
  thetype,
@@ -495,11 +1678,112 @@ def findbadpts(
495
1678
  return thebadpts
496
1679
 
497
1680
 
498
- def approximateentropy(waveform, m, r):
1681
+ def approximateentropy(waveform: NDArray, m: int, r: float) -> float:
1682
+ """
1683
+ Calculate the approximate entropy of a waveform.
1684
+
1685
+ Approximate entropy is a measure of the complexity or irregularity of a time series.
1686
+ It quantifies the likelihood that similar patterns of observations will not be followed
1687
+ by additional similar observations.
1688
+
1689
+ Parameters
1690
+ ----------
1691
+ waveform : array_like
1692
+ Input time series data as a 1D array or list of numerical values.
1693
+ m : int
1694
+ Length of compared run of data. Must be a positive integer.
1695
+ r : float
1696
+ Tolerance parameter. Defines the maximum difference between values to be considered
1697
+ similar. Should be a positive number, typically set to 0.1-0.2 times the standard
1698
+ deviation of the data.
1699
+
1700
+ Returns
1701
+ -------
1702
+ float
1703
+ Approximate entropy value. Lower values indicate more regularity in the data,
1704
+ while higher values indicate more complexity or randomness.
1705
+
1706
+ Notes
1707
+ -----
1708
+ The approximate entropy is calculated using the method described by Pincus (1991).
1709
+ The algorithm computes the logarithm of the ratio of the number of similar patterns
1710
+ of length m to those of length m+1, averaged over all possible patterns.
1711
+
1712
+ This implementation assumes that the input waveform is a 1D array of numerical values.
1713
+ The function is sensitive to the choice of parameters m and r, and results may vary
1714
+ depending on the data characteristics.
1715
+
1716
+ Examples
1717
+ --------
1718
+ >>> import numpy as np
1719
+ >>> waveform = [1, 2, 3, 4, 5, 4, 3, 2, 1]
1720
+ >>> apen = approximateentropy(waveform, m=2, r=0.1)
1721
+ >>> print(apen)
1722
+ 0.123456789
1723
+
1724
+ >>> # For a more complex signal
1725
+ >>> np.random.seed(42)
1726
+ >>> noisy_signal = np.random.randn(100)
1727
+ >>> apen_noisy = approximateentropy(noisy_signal, m=2, r=0.1)
1728
+ >>> print(apen_noisy)
1729
+ 0.456789123
1730
+ """
1731
+
499
1732
  def _maxdist(x_i, x_j):
1733
+ """
1734
+ Calculate the maximum absolute difference between corresponding elements of two sequences.
1735
+
1736
+ Parameters
1737
+ ----------
1738
+ x_i : array-like
1739
+ First sequence of numbers.
1740
+ x_j : array-like
1741
+ Second sequence of numbers.
1742
+
1743
+ Returns
1744
+ -------
1745
+ float
1746
+ The maximum absolute difference between corresponding elements of x_i and x_j.
1747
+
1748
+ Notes
1749
+ -----
1750
+ This function computes the Chebyshev distance (also known as the maximum metric) between two vectors.
1751
+ Both sequences must have the same length, otherwise the function will raise a ValueError.
1752
+
1753
+ Examples
1754
+ --------
1755
+ >>> _maxdist([1, 2, 3], [4, 1, 2])
1756
+ 3
1757
+ >>> _maxdist([0, 0], [1, 1])
1758
+ 1
1759
+ """
500
1760
  return max([abs(ua - va) for ua, va in zip(x_i, x_j)])
501
1761
 
502
1762
  def _phi(m):
1763
+ """
1764
+ Calculate phi value for approximate entropy calculation.
1765
+
1766
+ Parameters
1767
+ ----------
1768
+ m : int
1769
+ Length of template vectors for comparison.
1770
+
1771
+ Returns
1772
+ -------
1773
+ float
1774
+ Phi value representing the approximate entropy.
1775
+
1776
+ Notes
1777
+ -----
1778
+ This function computes the phi value used in approximate entropy calculations.
1779
+ It compares template vectors of length m and calculates the proportion of
1780
+ vectors that are within a tolerance threshold r of each other.
1781
+
1782
+ Examples
1783
+ --------
1784
+ >>> _phi(2)
1785
+ 0.5703489003472879
1786
+ """
503
1787
  x = [[waveform[j] for j in range(i, i + m - 1 + 1)] for i in range(N - m + 1)]
504
1788
  C = [len([1 for x_j in x if _maxdist(x_i, x_j) <= r]) / (N - m + 1.0) for x_i in x]
505
1789
  return (N - m + 1.0) ** (-1) * sum(np.log(C))
@@ -509,68 +1793,222 @@ def approximateentropy(waveform, m, r):
509
1793
  return abs(_phi(m + 1) - _phi(m))
510
1794
 
511
1795
 
512
- def entropy(waveform):
1796
+ def summarizerun(theinfodict: dict, getkeys: bool = False) -> str:
1797
+ """
1798
+ Summarize physiological signal quality metrics from a dictionary.
1799
+
1800
+ This function extracts specific signal quality indices from a dictionary
1801
+ containing physiological monitoring data. It can either return the metric
1802
+ values or the corresponding keys depending on the getkeys parameter.
1803
+
1804
+ Parameters
1805
+ ----------
1806
+ theinfodict : dict
1807
+ Dictionary containing physiological signal quality metrics with keys
1808
+ including 'corrcoeff_raw2pleth', 'corrcoeff_filt2pleth', 'E_sqi_mean_pleth',
1809
+ 'E_sqi_mean_bold', 'S_sqi_mean_pleth', 'S_sqi_mean_bold', 'K_sqi_mean_pleth',
1810
+ and 'K_sqi_mean_bold'.
1811
+ getkeys : bool, optional
1812
+ If True, returns a comma-separated string of all metric keys.
1813
+ If False (default), returns a comma-separated string of metric values
1814
+ corresponding to the keys in the dictionary. If a key is missing, an
1815
+ empty string is returned for that position.
1816
+
1817
+ Returns
1818
+ -------
1819
+ str
1820
+ If getkeys=True: comma-separated string of all metric keys.
1821
+ If getkeys=False: comma-separated string of metric values from the dictionary,
1822
+ with empty strings for missing keys.
1823
+
1824
+ Notes
1825
+ -----
1826
+ The function handles missing keys gracefully by returning empty strings
1827
+ for missing metrics rather than raising exceptions.
1828
+
1829
+ Examples
1830
+ --------
1831
+ >>> data = {
1832
+ ... "corrcoeff_raw2pleth": 0.85,
1833
+ ... "E_sqi_mean_pleth": 0.92
1834
+ ... }
1835
+ >>> summarizerun(data)
1836
+ '0.85,,0.92,,,,,'
1837
+
1838
+ >>> summarizerun(data, getkeys=True)
1839
+ 'corrcoeff_raw2pleth,corrcoeff_filt2pleth,E_sqi_mean_pleth,E_sqi_mean_bold,S_sqi_mean_pleth,S_sqi_mean_bold,K_sqi_mean_pleth,K_sqi_mean_bold'
1840
+ """
1841
+ keylist = [
1842
+ "corrcoeff_raw2pleth",
1843
+ "corrcoeff_filt2pleth",
1844
+ "E_sqi_mean_pleth",
1845
+ "E_sqi_mean_bold",
1846
+ "S_sqi_mean_pleth",
1847
+ "S_sqi_mean_bold",
1848
+ "K_sqi_mean_pleth",
1849
+ "K_sqi_mean_bold",
1850
+ ]
1851
+ if getkeys:
1852
+ return ",".join(keylist)
1853
+ else:
1854
+ outputline = []
1855
+ for thekey in keylist:
1856
+ try:
1857
+ outputline.append(str(theinfodict[thekey]))
1858
+ except KeyError:
1859
+ outputline.append("")
1860
+ return ",".join(outputline)
1861
+
1862
+
1863
+ def entropy(waveform: NDArray) -> float:
1864
+ """
1865
+ Calculate the entropy of a waveform.
1866
+
1867
+ Parameters
1868
+ ----------
1869
+ waveform : array-like
1870
+ Input waveform data. Should be a numeric array-like object containing
1871
+ the waveform samples.
1872
+
1873
+ Returns
1874
+ -------
1875
+ float
1876
+ The entropy value of the waveform, computed as -∑(x² * log₂(x²)) where
1877
+ x represents the waveform samples.
1878
+
1879
+ Notes
1880
+ -----
1881
+ This function computes the entropy using the formula -∑(x² * log₂(x²)),
1882
+ where x² represents the squared waveform values. The np.nan_to_num function
1883
+ is used to handle potential NaN values in the logarithm calculation.
1884
+
1885
+ Examples
1886
+ --------
1887
+ >>> import numpy as np
1888
+ >>> waveform = np.array([0.5, 0.5, 0.5, 0.5])
1889
+ >>> entropy(waveform)
1890
+ 0.0
1891
+ """
513
1892
  return -np.sum(np.square(waveform) * np.nan_to_num(np.log2(np.square(waveform))))
514
1893
 
515
1894
 
516
1895
  def calcplethquality(
517
- waveform,
518
- Fs,
519
- infodict,
520
- suffix,
521
- outputroot,
522
- S_windowsecs=5.0,
523
- K_windowsecs=60.0,
524
- E_windowsecs=1.0,
525
- detrendorder=8,
526
- outputlevel=0,
527
- initfile=True,
528
- debug=False,
529
- ):
1896
+ waveform: NDArray,
1897
+ Fs: float,
1898
+ infodict: dict,
1899
+ suffix: str,
1900
+ outputroot: str,
1901
+ S_windowsecs: float = 5.0,
1902
+ K_windowsecs: float = 60.0,
1903
+ E_windowsecs: float = 1.0,
1904
+ detrendorder: int = 8,
1905
+ outputlevel: int = 0,
1906
+ initfile: bool = True,
1907
+ debug: bool = False,
1908
+ ) -> None:
530
1909
  """
1910
+ Calculate windowed skewness, kurtosis, and entropy quality metrics for a plethysmogram.
1911
+
1912
+ This function computes three quality metrics — skewness (S), kurtosis (K), and entropy (E) —
1913
+ over sliding windows of the input waveform. These metrics are used to assess the quality
1914
+ of photoplethysmogram (PPG) signals based on the method described in Elgendi (2016).
531
1915
 
532
1916
  Parameters
533
1917
  ----------
534
- waveform: array-like
535
- The cardiac waveform to be assessed
536
- Fs: float
537
- The sample rate of the data
538
- S_windowsecs: float
539
- Skewness window duration in seconds. Defaults to 5.0 (optimal for discrimination of "good" from "acceptable"
540
- and "unfit" according to Elgendi)
541
- K_windowsecs: float
542
- Skewness window duration in seconds. Defaults to 2.0 (after Selveraj)
543
- E_windowsecs: float
544
- Entropy window duration in seconds. Defaults to 0.5 (after Selveraj)
545
- detrendorder: int
546
- Order of detrending polynomial to apply to plethysmogram.
547
- debug: boolean
548
- Turn on extended output
1918
+ waveform : array-like
1919
+ The cardiac waveform to be assessed.
1920
+ Fs : float
1921
+ The sample rate of the data in Hz.
1922
+ infodict : dict
1923
+ Dictionary to store computed quality metrics.
1924
+ suffix : str
1925
+ Suffix to append to metric keys in `infodict`.
1926
+ outputroot : str
1927
+ Root name for output files if `outputlevel > 1`.
1928
+ S_windowsecs : float, optional
1929
+ Skewness window duration in seconds. Default is 5.0 seconds.
1930
+ K_windowsecs : float, optional
1931
+ Kurtosis window duration in seconds. Default is 60.0 seconds.
1932
+ E_windowsecs : float, optional
1933
+ Entropy window duration in seconds. Default is 1.0 seconds.
1934
+ detrendorder : int, optional
1935
+ Order of the detrending polynomial applied to the plethysmogram. Default is 8.
1936
+ outputlevel : int, optional
1937
+ Level of output verbosity. If > 1, time-series data will be written to files.
1938
+ initfile : bool, optional
1939
+ Whether to initialize output files. Default is True.
1940
+ debug : bool, optional
1941
+ If True, print debug information. Default is False.
549
1942
 
550
1943
  Returns
551
1944
  -------
552
- S_sqi_mean: float
553
- The mean value of the quality index over all time
554
- S_std_mean: float
555
- The standard deviation of the quality index over all time
556
- S_waveform: array
557
- The quality metric over all timepoints
558
- K_sqi_mean: float
559
- The mean value of the quality index over all time
560
- K_std_mean: float
561
- The standard deviation of the quality index over all time
562
- K_waveform: array
563
- The quality metric over all timepoints
564
- E_sqi_mean: float
565
- The mean value of the quality index over all time
566
- E_std_mean: float
567
- The standard deviation of the quality index over all time
568
- E_waveform: array
569
- The quality metric over all timepoints
570
-
571
-
572
- Calculates the windowed skewness, kurtosis, and entropy quality metrics described in Elgendi, M.
573
- "Optimal Signal Quality Index for Photoplethysmogram Signals". Bioengineering 2016, Vol. 3, Page 21 3, 21 (2016).
1945
+ None
1946
+ All generated values are returned in infodict
1947
+ tuple
1948
+ A tuple containing the following elements in order:
1949
+
1950
+ - S_sqi_mean : float
1951
+ Mean value of the skewness quality index over all time.
1952
+ - S_sqi_std : float
1953
+ Standard deviation of the skewness quality index over all time.
1954
+ - S_waveform : array
1955
+ The skewness quality metric over all timepoints.
1956
+ - K_sqi_mean : float
1957
+ Mean value of the kurtosis quality index over all time.
1958
+ - K_sqi_std : float
1959
+ Standard deviation of the kurtosis quality index over all time.
1960
+ - K_waveform : array
1961
+ The kurtosis quality metric over all timepoints.
1962
+ - E_sqi_mean : float
1963
+ Mean value of the entropy quality index over all time.
1964
+ - E_sqi_std : float
1965
+ Standard deviation of the entropy quality index over all time.
1966
+ - E_waveform : array
1967
+ The entropy quality metric over all timepoints.
1968
+
1969
+ Notes
1970
+ -----
1971
+ The function applies a detrending polynomial to the input waveform before computing
1972
+ the quality metrics. Window sizes are rounded to the nearest odd number of samples
1973
+ to ensure symmetric windows.
1974
+
1975
+ The following values are put into infodict:
1976
+ - S_sqi_mean : float
1977
+ Mean value of the skewness quality index over all time.
1978
+ - S_sqi_std : float
1979
+ Standard deviation of the skewness quality index over all time.
1980
+ - S_waveform : array
1981
+ The skewness quality metric over all timepoints.
1982
+ - K_sqi_mean : float
1983
+ Mean value of the kurtosis quality index over all time.
1984
+ - K_sqi_std : float
1985
+ Standard deviation of the kurtosis quality index over all time.
1986
+ - K_waveform : array
1987
+ The kurtosis quality metric over all timepoints.
1988
+ - E_sqi_mean : float
1989
+ Mean value of the entropy quality index over all time.
1990
+ - E_sqi_std : float
1991
+ Standard deviation of the entropy quality index over all time.
1992
+ - E_waveform : array
1993
+ The entropy quality metric over all timepoints.
1994
+
1995
+ References
1996
+ ----------
1997
+ Elgendi, M. "Optimal Signal Quality Index for Photoplethysmogram Signals".
1998
+ Bioengineering 2016, Vol. 3, Page 21 (2016).
1999
+
2000
+ Examples
2001
+ --------
2002
+ >>> import numpy as np
2003
+ >>> from scipy.stats import skew, kurtosis
2004
+ >>> waveform = np.random.randn(1000)
2005
+ >>> Fs = 100.0
2006
+ >>> infodict = {}
2007
+ >>> suffix = "_test"
2008
+ >>> outputroot = "test_output"
2009
+ >>> S_mean, S_std, S_wave, K_mean, K_std, K_wave, E_mean, E_std, E_wave = calcplethquality(
2010
+ ... waveform, Fs, infodict, suffix, outputroot
2011
+ ... )
574
2012
  """
575
2013
  # detrend the waveform
576
2014
  dt_waveform = tide_fit.detrend(waveform, order=detrendorder, demean=True)
@@ -578,13 +2016,13 @@ def calcplethquality(
578
2016
  # calculate S_sqi and K_sqi over a sliding window. Window size should be an odd number of points.
579
2017
  S_windowpts = int(np.round(S_windowsecs * Fs, 0))
580
2018
  S_windowpts += 1 - S_windowpts % 2
581
- S_waveform = dt_waveform * 0.0
2019
+ S_waveform = np.zeros_like(dt_waveform)
582
2020
  K_windowpts = int(np.round(K_windowsecs * Fs, 0))
583
2021
  K_windowpts += 1 - K_windowpts % 2
584
- K_waveform = dt_waveform * 0.0
2022
+ K_waveform = np.zeros_like(dt_waveform)
585
2023
  E_windowpts = int(np.round(E_windowsecs * Fs, 0))
586
2024
  E_windowpts += 1 - E_windowpts % 2
587
- E_waveform = dt_waveform * 0.0
2025
+ E_waveform = np.zeros_like(dt_waveform)
588
2026
 
589
2027
  if debug:
590
2028
  print("S_windowsecs, S_windowpts:", S_windowsecs, S_windowpts)
@@ -606,17 +2044,23 @@ def calcplethquality(
606
2044
  E_waveform[i] = approximateentropy(dt_waveform[startpt : endpt + 1], 2, r)
607
2045
 
608
2046
  S_sqi_mean = np.mean(S_waveform)
2047
+ S_sqi_median = np.median(S_waveform)
609
2048
  S_sqi_std = np.std(S_waveform)
610
2049
  K_sqi_mean = np.mean(K_waveform)
2050
+ K_sqi_median = np.median(K_waveform)
611
2051
  K_sqi_std = np.std(K_waveform)
612
2052
  E_sqi_mean = np.mean(E_waveform)
2053
+ E_sqi_median = np.median(E_waveform)
613
2054
  E_sqi_std = np.std(E_waveform)
614
2055
 
615
2056
  infodict["S_sqi_mean" + suffix] = S_sqi_mean
2057
+ infodict["S_sqi_median" + suffix] = S_sqi_median
616
2058
  infodict["S_sqi_std" + suffix] = S_sqi_std
617
2059
  infodict["K_sqi_mean" + suffix] = K_sqi_mean
2060
+ infodict["K_sqi_median" + suffix] = K_sqi_median
618
2061
  infodict["K_sqi_std" + suffix] = K_sqi_std
619
2062
  infodict["E_sqi_mean" + suffix] = E_sqi_mean
2063
+ infodict["E_sqi_median" + suffix] = E_sqi_median
620
2064
  infodict["E_sqi_std" + suffix] = E_sqi_std
621
2065
 
622
2066
  if outputlevel > 1:
@@ -647,21 +2091,95 @@ def calcplethquality(
647
2091
 
648
2092
 
649
2093
  def getphysiofile(
650
- waveformfile,
651
- inputfreq,
652
- inputstart,
653
- slicetimeaxis,
654
- stdfreq,
655
- stdpoints,
656
- envcutoff,
657
- envthresh,
658
- timings,
659
- outputroot,
660
- slop=0.25,
661
- outputlevel=0,
662
- iscardiac=True,
663
- debug=False,
664
- ):
2094
+ waveformfile: str,
2095
+ inputfreq: float,
2096
+ inputstart: float | None,
2097
+ slicetimeaxis: NDArray,
2098
+ stdfreq: float,
2099
+ stdpoints: int,
2100
+ envcutoff: float,
2101
+ envthresh: float,
2102
+ timings: list,
2103
+ outputroot: str,
2104
+ slop: float = 0.25,
2105
+ outputlevel: int = 0,
2106
+ iscardiac: bool = True,
2107
+ debug: bool = False,
2108
+ ) -> tuple[NDArray, NDArray, float, int]:
2109
+ """
2110
+ Read, process, and resample physiological waveform data.
2111
+
2112
+ This function reads a physiological signal from a text file, filters and normalizes
2113
+ the signal, and resamples it to both slice-specific and standard time resolutions.
2114
+ It supports cardiac and non-cardiac signal processing, with optional debugging and
2115
+ output writing.
2116
+
2117
+ Parameters
2118
+ ----------
2119
+ waveformfile : str
2120
+ Path to the input physiological waveform file.
2121
+ inputfreq : float
2122
+ Sampling frequency of the input waveform. If negative, the frequency is
2123
+ inferred from the file.
2124
+ inputstart : float or None
2125
+ Start time of the input waveform. If None, defaults to 0.0.
2126
+ slicetimeaxis : array_like
2127
+ Time axis corresponding to slice acquisition times.
2128
+ stdfreq : float
2129
+ Standard sampling frequency for resampling.
2130
+ stdpoints : int
2131
+ Number of points for the standard time axis.
2132
+ envcutoff : float
2133
+ Cutoff frequency for envelope filtering.
2134
+ envthresh : float
2135
+ Threshold for envelope normalization.
2136
+ timings : list
2137
+ List to append timing information for logging.
2138
+ outputroot : str
2139
+ Root name for output files.
2140
+ slop : float, optional
2141
+ Tolerance for time alignment check (default is 0.25).
2142
+ outputlevel : int, optional
2143
+ Level of output writing (default is 0).
2144
+ iscardiac : bool, optional
2145
+ Flag indicating if the signal is cardiac (default is True).
2146
+ debug : bool, optional
2147
+ Enable debug printing (default is False).
2148
+
2149
+ Returns
2150
+ -------
2151
+ waveform_sliceres : NDArray
2152
+ Physiological signal resampled to slice time resolution.
2153
+ waveform_stdres : NDArray
2154
+ Physiological signal resampled to standard time resolution.
2155
+ inputfreq : float
2156
+ The actual input sampling frequency used.
2157
+ len(waveform_fullres) : int
2158
+ Length of the original waveform data.
2159
+
2160
+ Notes
2161
+ -----
2162
+ - The function reads the waveform file using `tide_io.readvectorsfromtextfile`.
2163
+ - Signal filtering and normalization are performed using `cleanphysio`.
2164
+ - Resampling is done using `tide_resample.doresample`.
2165
+ - If `iscardiac` is True, raw and cleaned signals are saved to files when `outputlevel > 1`.
2166
+
2167
+ Examples
2168
+ --------
2169
+ >>> waveform_sliceres, waveform_stdres, freq, length = getphysiofile(
2170
+ ... waveformfile="physio.txt",
2171
+ ... inputfreq=100.0,
2172
+ ... inputstart=0.0,
2173
+ ... slicetimeaxis=np.linspace(0, 10, 50),
2174
+ ... stdfreq=25.0,
2175
+ ... stdpoints=100,
2176
+ ... envcutoff=0.5,
2177
+ ... envthresh=0.1,
2178
+ ... timings=[],
2179
+ ... outputroot="output",
2180
+ ... debug=False
2181
+ ... )
2182
+ """
665
2183
  if debug:
666
2184
  print("Entering getphysiofile")
667
2185
  print("Reading physiological signal from file")
@@ -771,7 +2289,7 @@ def getphysiofile(
771
2289
  method="univariate",
772
2290
  padlen=0,
773
2291
  )
774
- )
2292
+ )[0]
775
2293
 
776
2294
  timings.append(
777
2295
  [
@@ -787,7 +2305,62 @@ def getphysiofile(
787
2305
  return waveform_sliceres, waveform_stdres, inputfreq, len(waveform_fullres)
788
2306
 
789
2307
 
790
- def readextmask(thefilename, nim_hdr, xsize, ysize, numslices, debug=False):
2308
+ def readextmask(
2309
+ thefilename: str,
2310
+ nim_hdr: dict,
2311
+ xsize: int,
2312
+ ysize: int,
2313
+ numslices: int,
2314
+ debug: bool = False,
2315
+ ) -> NDArray:
2316
+ """
2317
+ Read and validate external mask from NIfTI file.
2318
+
2319
+ This function reads a mask from a NIfTI file and performs validation checks
2320
+ to ensure compatibility with the input fMRI data dimensions. The mask must
2321
+ have exactly 3 dimensions and match the spatial dimensions of the fMRI data.
2322
+
2323
+ Parameters
2324
+ ----------
2325
+ thefilename : str
2326
+ Path to the NIfTI file containing the mask
2327
+ nim_hdr : dict
2328
+ Header information from the fMRI data
2329
+ xsize : int
2330
+ X dimension size of the fMRI data
2331
+ ysize : int
2332
+ Y dimension size of the fMRI data
2333
+ numslices : int
2334
+ Number of slices in the fMRI data
2335
+ debug : bool, optional
2336
+ If True, print debug information about mask dimensions (default is False)
2337
+
2338
+ Returns
2339
+ -------
2340
+ NDArray
2341
+ The mask data array with shape (xsize, ysize, numslices)
2342
+
2343
+ Raises
2344
+ ------
2345
+ ValueError
2346
+ If mask dimensions do not match fMRI data dimensions or if mask has
2347
+ more than 3 dimensions
2348
+
2349
+ Notes
2350
+ -----
2351
+ The function performs the following validation checks:
2352
+ 1. Reads mask from NIfTI file using tide_io.readfromnifti
2353
+ 2. Parses NIfTI dimensions using tide_io.parseniftidims
2354
+ 3. Validates that mask spatial dimensions match fMRI data dimensions
2355
+ 4. Ensures mask has exactly 3 dimensions (no time dimension allowed)
2356
+
2357
+ Examples
2358
+ --------
2359
+ >>> import numpy as np
2360
+ >>> mask_data = readextmask('mask.nii', fmri_header, 64, 64, 30)
2361
+ >>> print(mask_data.shape)
2362
+ (64, 64, 30)
2363
+ """
791
2364
  (
792
2365
  extmask,
793
2366
  extmask_data,
@@ -812,32 +2385,60 @@ def readextmask(thefilename, nim_hdr, xsize, ysize, numslices, debug=False):
812
2385
  return extmask_data
813
2386
 
814
2387
 
815
- def checkcardmatch(reference, candidate, samplerate, refine=True, zeropadding=0, debug=False):
2388
+ def checkcardmatch(
2389
+ reference: NDArray,
2390
+ candidate: NDArray,
2391
+ samplerate: float,
2392
+ refine: bool = True,
2393
+ zeropadding: int = 0,
2394
+ debug: bool = False,
2395
+ ) -> tuple[float, float, str]:
816
2396
  """
2397
+ Compare two cardiac waveforms using cross-correlation and peak fitting.
2398
+
2399
+ This function performs a cross-correlation between a reference and a candidate
2400
+ cardiac waveform after applying a non-causal cardiac filter. It then fits a
2401
+ Gaussian to the cross-correlation peak to estimate the time delay and
2402
+ correlation strength.
817
2403
 
818
2404
  Parameters
819
2405
  ----------
820
- reference: 1D numpy array
821
- The cardiac waveform to compare to
822
- candidate: 1D numpy array
823
- The cardiac waveform to be assessed
824
- samplerate: float
825
- The sample rate of the data in Hz
826
- refine: bool, optional
827
- Whether to refine the peak fit. Default is True.
828
- zeropadding: int, optional
829
- Specify the length of correlation padding to use.
830
- debug: bool, optional
831
- Output additional information for debugging
2406
+ reference : 1D numpy array
2407
+ The cardiac waveform to compare to.
2408
+ candidate : 1D numpy array
2409
+ The cardiac waveform to be assessed.
2410
+ samplerate : float
2411
+ The sample rate of the data in Hz.
2412
+ refine : bool, optional
2413
+ Whether to refine the peak fit. Default is True.
2414
+ zeropadding : int, optional
2415
+ Specify the length of correlation padding to use. Default is 0.
2416
+ debug : bool, optional
2417
+ Output additional information for debugging. Default is False.
832
2418
 
833
2419
  Returns
834
2420
  -------
835
- maxval: float
836
- The maximum value of the crosscorrelation function
837
- maxdelay: float
2421
+ maxval : float
2422
+ The maximum value of the crosscorrelation function.
2423
+ maxdelay : float
838
2424
  The time, in seconds, where the maximum crosscorrelation occurs.
839
- failreason: flag
840
- Reason why the fit failed (0 if no failure)
2425
+ failreason : int
2426
+ Reason why the fit failed (0 if no failure).
2427
+
2428
+ Notes
2429
+ -----
2430
+ The function applies a cardiac filter to both waveforms before computing
2431
+ the cross-correlation. A Gaussian fit is used to estimate the peak location
2432
+ and strength within a predefined search range of ±2 seconds around the
2433
+ initial peak.
2434
+
2435
+ Examples
2436
+ --------
2437
+ >>> import numpy as np
2438
+ >>> reference = np.sin(2 * np.pi * 1.2 * np.linspace(0, 10, 1000))
2439
+ >>> candidate = np.sin(2 * np.pi * 1.2 * np.linspace(0, 10, 1000) + 0.1)
2440
+ >>> maxval, maxdelay, failreason = checkcardmatch(reference, candidate, 100)
2441
+ >>> print(f"Max correlation: {maxval}, Delay: {maxdelay}s")
841
2442
  """
842
2443
  thecardfilt = tide_filt.NoncausalFilter(filtertype="cardiac")
843
2444
  trimlength = np.min([len(reference), len(candidate)])
@@ -898,15 +2499,78 @@ def checkcardmatch(reference, candidate, samplerate, refine=True, zeropadding=0,
898
2499
 
899
2500
 
900
2501
  def cardiaccycleaverage(
901
- sourcephases,
902
- destinationphases,
903
- waveform,
904
- procpoints,
905
- congridbins,
906
- gridkernel,
907
- centric,
908
- cyclic=True,
909
- ):
2502
+ sourcephases: NDArray,
2503
+ destinationphases: NDArray,
2504
+ waveform: NDArray,
2505
+ procpoints: int,
2506
+ congridbins: int,
2507
+ gridkernel: str,
2508
+ centric: bool,
2509
+ cache: bool = True,
2510
+ cyclic: bool = True,
2511
+ ) -> NDArray:
2512
+ """
2513
+ Compute the average waveform over a cardiac cycle using phase-based resampling.
2514
+
2515
+ This function performs phase-resolved averaging of a waveform signal over a
2516
+ cardiac cycle. It uses a resampling technique to map source phase values to
2517
+ destination phases, accumulating weighted contributions to produce an averaged
2518
+ waveform. The result is normalized and adjusted to remove artifacts from low
2519
+ weight regions.
2520
+
2521
+ Parameters
2522
+ ----------
2523
+ sourcephases : array-like
2524
+ Array of source phase values (in radians) corresponding to the waveform data.
2525
+ destinationphases : array-like
2526
+ Array of destination phase values (in radians) where the averaged waveform
2527
+ will be computed.
2528
+ waveform : array-like
2529
+ Array of waveform values to be averaged.
2530
+ procpoints : array-like
2531
+ Array of indices indicating which points in `waveform` and `sourcephases`
2532
+ should be processed.
2533
+ congridbins : int
2534
+ Number of bins used in the resampling process.
2535
+ gridkernel : callable
2536
+ Kernel function used for interpolation during resampling.
2537
+ centric : bool
2538
+ If True, phase values are treated as centric (e.g., centered around 0).
2539
+ If False, phase values are treated as cyclic (e.g., 0 to 2π).
2540
+ cache : bool, optional
2541
+ If True, use cached results for repeated computations (default is True).
2542
+ cyclic : bool, optional
2543
+ If True, treat phase values as cyclic (default is True).
2544
+
2545
+ Returns
2546
+ -------
2547
+ tuple of ndarray
2548
+ A tuple containing:
2549
+ - `rawapp_bypoint`: The normalized averaged waveform values for each
2550
+ destination phase.
2551
+ - `weight_bypoint`: The total weight for each destination phase.
2552
+
2553
+ Notes
2554
+ -----
2555
+ The function applies a threshold to weights: only points with weights greater
2556
+ than 1/50th of the maximum weight are considered valid. These points are then
2557
+ normalized and shifted to start from zero.
2558
+
2559
+ Examples
2560
+ --------
2561
+ >>> import numpy as np
2562
+ >>> sourcephases = np.linspace(0, 2*np.pi, 100)
2563
+ >>> destinationphases = np.linspace(0, 2*np.pi, 50)
2564
+ >>> waveform = np.sin(sourcephases)
2565
+ >>> procpoints = np.arange(100)
2566
+ >>> congridbins = 10
2567
+ >>> gridkernel = lambda x: np.exp(-x**2 / 2)
2568
+ >>> centric = False
2569
+ >>> avg_waveform, weights = cardiaccycleaverage(
2570
+ ... sourcephases, destinationphases, waveform, procpoints,
2571
+ ... congridbins, gridkernel, centric
2572
+ ... )
2573
+ """
910
2574
  rawapp_bypoint = np.zeros(len(destinationphases), dtype=np.float64)
911
2575
  weight_bypoint = np.zeros(len(destinationphases), dtype=np.float64)
912
2576
  for t in procpoints:
@@ -916,13 +2580,14 @@ def cardiaccycleaverage(
916
2580
  1.0,
917
2581
  congridbins,
918
2582
  kernel=gridkernel,
2583
+ cache=cache,
919
2584
  cyclic=cyclic,
920
2585
  )
921
2586
  for i in range(len(theindices)):
922
2587
  weight_bypoint[theindices[i]] += theweights[i]
923
2588
  rawapp_bypoint[theindices[i]] += theweights[i] * waveform[t]
924
2589
  rawapp_bypoint = np.where(
925
- weight_bypoint > np.max(weight_bypoint) / 50.0,
2590
+ weight_bypoint > (np.max(weight_bypoint) / 50.0),
926
2591
  np.nan_to_num(rawapp_bypoint / weight_bypoint),
927
2592
  0.0,
928
2593
  )
@@ -933,7 +2598,47 @@ def cardiaccycleaverage(
933
2598
  return rawapp_bypoint, weight_bypoint
934
2599
 
935
2600
 
936
- def circularderivs(timecourse):
2601
+ def circularderivs(timecourse: NDArray) -> tuple[NDArray, float, float]:
2602
+ """
2603
+ Compute circular first derivatives and their extremal values.
2604
+
2605
+ This function calculates the circular first derivative of a time course,
2606
+ which is the difference between consecutive elements with the last element
2607
+ wrapped around to the first. It then returns the maximum and minimum values
2608
+ of these derivatives along with their indices.
2609
+
2610
+ Parameters
2611
+ ----------
2612
+ timecourse : array-like
2613
+ Input time course data as a 1D array or sequence of numerical values.
2614
+
2615
+ Returns
2616
+ -------
2617
+ tuple
2618
+ A tuple containing four elements:
2619
+ - max_derivative : float
2620
+ The maximum value of the circular first derivative
2621
+ - argmax_index : int
2622
+ The index of the maximum derivative value
2623
+ - min_derivative : float
2624
+ The minimum value of the circular first derivative
2625
+ - argmin_index : int
2626
+ The index of the minimum derivative value
2627
+
2628
+ Notes
2629
+ -----
2630
+ The circular first derivative is computed as:
2631
+ ``first_deriv[i] = timecourse[i+1] - timecourse[i]`` for i < n-1,
2632
+ and ``first_deriv[n-1] = timecourse[0] - timecourse[n-1]``.
2633
+
2634
+ Examples
2635
+ --------
2636
+ >>> import numpy as np
2637
+ >>> timecourse = [1, 2, 3, 2, 1]
2638
+ >>> max_val, max_idx, min_val, min_idx = circularderivs(timecourse)
2639
+ >>> print(f"Max derivative: {max_val} at index {max_idx}")
2640
+ >>> print(f"Min derivative: {min_val} at index {min_idx}")
2641
+ """
937
2642
  firstderiv = np.diff(timecourse, append=[timecourse[0]])
938
2643
  return (
939
2644
  np.max(firstderiv),
@@ -943,301 +2648,1303 @@ def circularderivs(timecourse):
943
2648
  )
944
2649
 
945
2650
 
946
- def phaseproject(
947
- datatoproject,
948
- means,
949
- destpoints,
950
- numsteps,
951
- timings,
952
- cardfromfmri_sliceres,
953
- instantaneous_cardiacphase,
954
- thispass,
955
- numpasses,
956
- args,
957
- outputroot,
958
- slicesamplerate,
959
- pleth_sliceres,
960
- mrsamplerate,
961
- projmask_byslice,
962
- cardphasevals,
963
- thetimes,
964
- centric=True,
965
- passstring="",
966
- badpointlist=None,
967
- congridbins=3.0,
968
- gridkernel="kaiser",
969
- ):
970
- xsize, ysize, numslices, timepoints = datatoproject.shape
971
- # construct the destination arrays
972
- tide_util.logmem("before making destination arrays")
973
- app = np.zeros((xsize, ysize, numslices, destpoints), dtype=np.float64)
974
- app_byslice = app.reshape((xsize * ysize, numslices, destpoints))
975
- cine = np.zeros((xsize, ysize, numslices, destpoints), dtype=np.float64)
976
- cine_byslice = cine.reshape((xsize * ysize, numslices, destpoints))
977
- rawapp = np.zeros((xsize, ysize, numslices, destpoints), dtype=np.float64)
978
- rawapp_byslice = rawapp.reshape((xsize * ysize, numslices, destpoints))
979
- corrected_rawapp = np.zeros((xsize, ysize, numslices, destpoints), dtype=np.float64)
980
- corrected_rawapp_byslice = rawapp.reshape((xsize * ysize, numslices, destpoints))
981
- normapp = np.zeros((xsize, ysize, numslices, destpoints), dtype=np.float64)
982
- normapp_byslice = normapp.reshape((xsize * ysize, numslices, destpoints))
983
- weights = np.zeros((xsize, ysize, numslices, destpoints), dtype=np.float64)
984
- weight_byslice = weights.reshape((xsize * ysize, numslices, destpoints))
985
- derivatives = np.zeros((xsize, ysize, numslices, 4), dtype=np.float64)
986
- derivatives_byslice = derivatives.reshape((xsize * ysize, numslices, 4))
987
-
988
- timings.append(["Output arrays allocated" + passstring, time.time(), None, None])
989
-
990
- if centric:
991
- outphases = np.linspace(-np.pi, np.pi, num=destpoints, endpoint=False)
992
- else:
993
- outphases = np.linspace(0.0, 2.0 * np.pi, num=destpoints, endpoint=False)
994
- phasestep = outphases[1] - outphases[0]
995
-
996
- #######################################################################################################
997
- #
998
- # now do the phase projection
999
- #
1000
- #
1001
- datatoproject_byslice = datatoproject.reshape((xsize * ysize, numslices, timepoints))
1002
- means_byslice = means.reshape((xsize * ysize, numslices))
1003
-
1004
- timings.append(["Phase projection to image started" + passstring, time.time(), None, None])
1005
- print("Starting phase projection")
1006
- proctrs = range(timepoints) # proctrs is the list of all fmri trs to be projected
1007
- procpoints = range(
1008
- timepoints * numsteps
1009
- ) # procpoints is the list of all sliceres datapoints to be projected
1010
- if badpointlist is not None:
1011
- censortrs = np.zeros(timepoints, dtype="int")
1012
- censorpoints = np.zeros(timepoints * numsteps, dtype="int")
1013
- censortrs[np.where(badpointlist > 0.0)[0] // numsteps] = 1
1014
- censorpoints[np.where(badpointlist > 0.0)[0]] = 1
1015
- proctrs = np.where(censortrs < 1)[0]
1016
- procpoints = np.where(censorpoints < 1)[0]
1017
-
1018
- # do phase averaging
1019
- app_bypoint = cardiaccycleaverage(
1020
- instantaneous_cardiacphase,
2651
+ def _procOnePhaseProject(slice, sliceargs, **kwargs):
2652
+ """
2653
+ Process a single phase project for fMRI data resampling and averaging.
2654
+
2655
+ This function performs temporal resampling of fMRI data along the phase dimension
2656
+ using a congrid-based interpolation scheme. It updates weight, raw application,
2657
+ and cine data arrays based on the resampled values.
2658
+
2659
+ Parameters
2660
+ ----------
2661
+ slice : int
2662
+ The slice index to process.
2663
+ sliceargs : tuple
2664
+ A tuple containing the following elements:
2665
+ - validlocslist : list of arrays
2666
+ List of valid location indices for each slice.
2667
+ - proctrs : array-like
2668
+ Time indices to process.
2669
+ - demeandata_byslice : ndarray
2670
+ Demeaned fMRI data organized by slice and time.
2671
+ - fmri_data_byslice : ndarray
2672
+ Raw fMRI data organized by slice and time.
2673
+ - outphases : array-like
2674
+ Output phase values for resampling.
2675
+ - cardphasevals : ndarray
2676
+ Cardinality of phase values for each slice and time.
2677
+ - congridbins : int
2678
+ Number of bins for congrid interpolation.
2679
+ - gridkernel : str
2680
+ Interpolation kernel to use.
2681
+ - weights_byslice : ndarray
2682
+ Weight array to be updated.
2683
+ - cine_byslice : ndarray
2684
+ Cine data array to be updated.
2685
+ - destpoints : int
2686
+ Number of destination points.
2687
+ - rawapp_byslice : ndarray
2688
+ Raw application data array to be updated.
2689
+ **kwargs : dict
2690
+ Additional options to override default settings:
2691
+ - cache : bool, optional
2692
+ Whether to use caching in congrid (default: True).
2693
+ - debug : bool, optional
2694
+ Whether to enable debug mode (default: False).
2695
+
2696
+ Returns
2697
+ -------
2698
+ tuple
2699
+ A tuple containing:
2700
+ - slice : int
2701
+ The input slice index.
2702
+ - rawapp_byslice : ndarray
2703
+ Updated raw application data for the slice.
2704
+ - cine_byslice : ndarray
2705
+ Updated cine data for the slice.
2706
+ - weights_byslice : ndarray
2707
+ Updated weights for the slice.
2708
+ - validlocs : array-like
2709
+ Valid location indices for the slice.
2710
+
2711
+ Notes
2712
+ -----
2713
+ This function modifies the input arrays `weights_byslice`, `rawapp_byslice`,
2714
+ and `cine_byslice` in-place. The function assumes that the data has already
2715
+ been preprocessed and organized into slices and time points.
2716
+
2717
+ Examples
2718
+ --------
2719
+ >>> slice_idx = 0
2720
+ >>> args = (validlocslist, proctrs, demeandata_byslice, fmri_data_byslice,
2721
+ ... outphases, cardphasevals, congridbins, gridkernel,
2722
+ ... weights_byslice, cine_byslice, destpoints, rawapp_byslice)
2723
+ >>> result = _procOnePhaseProject(slice_idx, args, cache=False)
2724
+ """
2725
+ options = {
2726
+ "cache": True,
2727
+ "debug": False,
2728
+ }
2729
+ options.update(kwargs)
2730
+ cache = options["cache"]
2731
+ debug = options["debug"]
2732
+ (
2733
+ validlocslist,
2734
+ proctrs,
2735
+ demeandata_byslice,
2736
+ fmri_data_byslice,
1021
2737
  outphases,
1022
- cardfromfmri_sliceres,
1023
- procpoints,
2738
+ cardphasevals,
1024
2739
  congridbins,
1025
2740
  gridkernel,
1026
- centric,
1027
- cyclic=True,
1028
- )
1029
- if thispass == numpasses - 1:
1030
- tide_io.writebidstsv(
1031
- outputroot + "_desc-cardiaccyclefromfmri_timeseries",
1032
- app_bypoint,
1033
- 1.0 / (outphases[1] - outphases[0]),
1034
- starttime=outphases[0],
1035
- columns=["cardiaccyclefromfmri"],
1036
- append=False,
1037
- debug=args.debug,
1038
- )
1039
-
1040
- # now do time averaging
1041
- lookaheadval = int(slicesamplerate / 4.0)
1042
- print("lookaheadval = ", lookaheadval)
1043
- wrappedcardiacphase = tide_math.phasemod(instantaneous_cardiacphase, centric=centric)
1044
- max_peaks, min_peaks = tide_fit.peakdetect(wrappedcardiacphase, lookahead=lookaheadval)
1045
- # start on a maximum
1046
- if max_peaks[0][0] > min_peaks[0][0]:
1047
- min_peaks = min_peaks[1:]
1048
- # work only with pairs
1049
- if len(max_peaks) > len(min_peaks):
1050
- max_peaks = max_peaks[:-1]
1051
-
1052
- zerophaselocs = []
1053
- for idx, peak in enumerate(max_peaks):
1054
- minloc = min_peaks[idx][0]
1055
- maxloc = max_peaks[idx][0]
1056
- minval = min_peaks[idx][1]
1057
- maxval = max_peaks[idx][1]
1058
- if minloc > 0:
1059
- if wrappedcardiacphase[minloc - 1] < wrappedcardiacphase[minloc]:
1060
- minloc -= 1
1061
- minval = wrappedcardiacphase[minloc]
1062
- phasediff = minval - (maxval - 2.0 * np.pi)
1063
- timediff = minloc - maxloc
1064
- zerophaselocs.append(1.0 * minloc - (minval - outphases[0]) * timediff / phasediff)
1065
- # print(idx, [maxloc, maxval], [minloc, minval], phasediff, timediff, zerophaselocs[-1])
1066
- instantaneous_cardiactime = instantaneous_cardiacphase * 0.0
1067
-
1068
- whichpeak = 0
1069
- for t in procpoints:
1070
- if whichpeak < len(zerophaselocs) - 1:
1071
- if t > zerophaselocs[whichpeak + 1]:
1072
- whichpeak += 1
1073
- if t > zerophaselocs[whichpeak]:
1074
- instantaneous_cardiactime[t] = (t - zerophaselocs[whichpeak]) / slicesamplerate
1075
- # print(t, whichpeak, zerophaselocs[whichpeak], instantaneous_cardiactime[t])
1076
- maxtime = (
1077
- np.ceil(
1078
- int(
1079
- 1.02
1080
- * tide_stats.getfracval(instantaneous_cardiactime, 0.98)
1081
- // args.pulsereconstepsize
2741
+ weights_byslice,
2742
+ cine_byslice,
2743
+ destpoints,
2744
+ rawapp_byslice,
2745
+ ) = sliceargs
2746
+ # now smooth the projected data along the time dimension
2747
+ validlocs = validlocslist[slice]
2748
+ if len(validlocs) > 0:
2749
+ for t in proctrs:
2750
+ filteredmr = -demeandata_byslice[validlocs, slice, t]
2751
+ cinemr = fmri_data_byslice[validlocs, slice, t]
2752
+ thevals, theweights, theindices = tide_resample.congrid(
2753
+ outphases,
2754
+ cardphasevals[slice, t],
2755
+ 1.0,
2756
+ congridbins,
2757
+ kernel=gridkernel,
2758
+ cache=cache,
2759
+ cyclic=True,
1082
2760
  )
2761
+ for i in range(len(theindices)):
2762
+ weights_byslice[validlocs, slice, theindices[i]] += theweights[i]
2763
+ rawapp_byslice[validlocs, slice, theindices[i]] += filteredmr
2764
+ cine_byslice[validlocs, slice, theindices[i]] += theweights[i] * cinemr
2765
+ for d in range(destpoints):
2766
+ if weights_byslice[validlocs[0], slice, d] == 0.0:
2767
+ weights_byslice[validlocs, slice, d] = 1.0
2768
+ rawapp_byslice[validlocs, slice, :] = np.nan_to_num(
2769
+ rawapp_byslice[validlocs, slice, :] / weights_byslice[validlocs, slice, :]
1083
2770
  )
1084
- * args.pulsereconstepsize
1085
- )
1086
- outtimes = np.linspace(
1087
- 0.0, maxtime, num=int(maxtime / args.pulsereconstepsize), endpoint=False
2771
+ cine_byslice[validlocs, slice, :] = np.nan_to_num(
2772
+ cine_byslice[validlocs, slice, :] / weights_byslice[validlocs, slice, :]
2773
+ )
2774
+ else:
2775
+ rawapp_byslice[:, slice, :] = 0.0
2776
+ cine_byslice[:, slice, :] = 0.0
2777
+
2778
+ return (
2779
+ slice,
2780
+ rawapp_byslice[:, slice, :],
2781
+ cine_byslice[:, slice, :],
2782
+ weights_byslice[:, slice, :],
2783
+ validlocs,
1088
2784
  )
1089
- atp_bypoint = cardiaccycleaverage(
1090
- instantaneous_cardiactime,
1091
- outtimes,
1092
- cardfromfmri_sliceres,
1093
- procpoints,
1094
- congridbins,
1095
- gridkernel,
1096
- False,
1097
- cyclic=True,
2785
+
2786
+
2787
+ def _packslicedataPhaseProject(slicenum, sliceargs):
2788
+ """
2789
+ Pack slice data for phase projection.
2790
+
2791
+ This function takes a slice number and slice arguments, then returns a
2792
+ flattened list containing all the slice arguments in order.
2793
+
2794
+ Parameters
2795
+ ----------
2796
+ slicenum : int
2797
+ The slice number identifier.
2798
+ sliceargs : list or tuple
2799
+ Collection of slice arguments to be packed into a flat list.
2800
+
2801
+ Returns
2802
+ -------
2803
+ list
2804
+ A list containing all elements from sliceargs in the same order.
2805
+
2806
+ Notes
2807
+ -----
2808
+ This function essentially performs a flattening operation on the slice
2809
+ arguments, converting them into a fixed-length list format.
2810
+
2811
+ Examples
2812
+ --------
2813
+ >>> _packslicedataPhaseProject(0, [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12])
2814
+ [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]
2815
+ """
2816
+ return [
2817
+ sliceargs[0],
2818
+ sliceargs[1],
2819
+ sliceargs[2],
2820
+ sliceargs[3],
2821
+ sliceargs[4],
2822
+ sliceargs[5],
2823
+ sliceargs[6],
2824
+ sliceargs[7],
2825
+ sliceargs[8],
2826
+ sliceargs[9],
2827
+ sliceargs[10],
2828
+ sliceargs[11],
2829
+ ]
2830
+
2831
+
2832
+ def _unpackslicedataPhaseProject(retvals, voxelproducts):
2833
+ """
2834
+ Unpack slice data for phase project operation.
2835
+
2836
+ This function assigns sliced data from retvals to corresponding voxelproducts
2837
+ based on index mappings. It performs three simultaneous assignments using
2838
+ slicing operations on 3D arrays.
2839
+
2840
+ Parameters
2841
+ ----------
2842
+ retvals : tuple of array-like
2843
+ A tuple containing 5 elements where:
2844
+ - retvals[0], retvals[1], retvals[2], retvals[3], retvals[4]
2845
+ - retvals[4] is used as row index for slicing
2846
+ - retvals[0] is used as column index for slicing
2847
+ voxelproducts : list of array-like
2848
+ A list of 3 arrays that will be modified in-place with the sliced data.
2849
+ Each array is expected to be 3D and will be indexed using retvals[4] and retvals[0].
2850
+
2851
+ Returns
2852
+ -------
2853
+ None
2854
+ This function modifies voxelproducts in-place and does not return any value.
2855
+
2856
+ Notes
2857
+ -----
2858
+ The function performs three assignments:
2859
+ 1. voxelproducts[0][retvals[4], retvals[0], :] = retvals[1][retvals[4], :]
2860
+ 2. voxelproducts[1][retvals[4], retvals[0], :] = retvals[2][retvals[4], :]
2861
+ 3. voxelproducts[2][retvals[4], retvals[0], :] = retvals[3][retvals[4], :]
2862
+
2863
+ All arrays must be compatible for the specified slicing operations.
2864
+
2865
+ Examples
2866
+ --------
2867
+ >>> retvals = (np.array([0, 1]), np.array([[1, 2], [3, 4]]),
2868
+ ... np.array([[5, 6], [7, 8]]), np.array([[9, 10], [11, 12]]),
2869
+ ... np.array([0, 1]))
2870
+ >>> voxelproducts = [np.zeros((2, 2, 2)), np.zeros((2, 2, 2)), np.zeros((2, 2, 2))]
2871
+ >>> _unpackslicedataPhaseProject(retvals, voxelproducts)
2872
+ """
2873
+ (voxelproducts[0])[retvals[4], retvals[0], :] = (retvals[1])[retvals[4], :]
2874
+ (voxelproducts[1])[retvals[4], retvals[0], :] = (retvals[2])[retvals[4], :]
2875
+ (voxelproducts[2])[retvals[4], retvals[0], :] = (retvals[3])[retvals[4], :]
2876
+
2877
+
2878
+ def preloadcongrid(
2879
+ outphases: NDArray,
2880
+ congridbins: int,
2881
+ gridkernel: str = "kaiser",
2882
+ cyclic: bool = True,
2883
+ debug: bool = False,
2884
+ ) -> None:
2885
+ """
2886
+ Preload congrid interpolation cache for efficient subsequent calls.
2887
+
2888
+ This function preloads the congrid interpolation cache by performing a series
2889
+ of interpolation operations with different phase values. This avoids the
2890
+ computational overhead of cache initialization during subsequent calls to
2891
+ tide_resample.congrid with the same parameters.
2892
+
2893
+ Parameters
2894
+ ----------
2895
+ outphases : array-like
2896
+ Output phase values for the interpolation grid.
2897
+ congridbins : array-like
2898
+ Binning parameters for the congrid interpolation.
2899
+ gridkernel : str, optional
2900
+ Interpolation kernel to use. Default is "kaiser".
2901
+ cyclic : bool, optional
2902
+ Whether to treat the data as cyclic. Default is True.
2903
+ debug : bool, optional
2904
+ Enable debug output. Default is False.
2905
+
2906
+ Returns
2907
+ -------
2908
+ None
2909
+ This function does not return any value.
2910
+
2911
+ Notes
2912
+ -----
2913
+ This function is designed to improve performance when calling tide_resample.congrid
2914
+ multiple times with the same parameters. By preloading the cache with various
2915
+ phase values, subsequent calls will be faster as the cache is already populated.
2916
+
2917
+ Examples
2918
+ --------
2919
+ >>> import numpy as np
2920
+ >>> outphases = np.linspace(0, 2*np.pi, 100)
2921
+ >>> congridbins = [10, 20]
2922
+ >>> preloadcongrid(outphases, congridbins, gridkernel="kaiser", cyclic=True)
2923
+ """
2924
+ outphasestep = outphases[1] - outphases[0]
2925
+ outphasecenter = outphases[int(len(outphases) / 2)]
2926
+ fillargs = outphasestep * (
2927
+ np.linspace(-0.5, 0.5, 10001, endpoint=True, dtype=float) + outphasecenter
1098
2928
  )
1099
- if thispass == numpasses - 1:
1100
- tide_io.writebidstsv(
1101
- outputroot + "_desc-cardpulsefromfmri_timeseries",
1102
- atp_bypoint,
1103
- 1.0 / (outtimes[1] - outtimes[0]),
1104
- starttime=outtimes[0],
1105
- columns=["pulsefromfmri"],
1106
- append=False,
1107
- debug=args.debug,
2929
+ for thearg in fillargs:
2930
+ dummy, dummy, dummy = tide_resample.congrid(
2931
+ outphases,
2932
+ thearg,
2933
+ 1.0,
2934
+ congridbins,
2935
+ kernel=gridkernel,
2936
+ cyclic=cyclic,
2937
+ cache=True,
2938
+ debug=debug,
1108
2939
  )
1109
2940
 
1110
- if not args.verbose:
1111
- print("Phase projecting...")
1112
2941
 
1113
- # make a lowpass filter for the projected data. Limit frequency to 3 cycles per 2pi (1/6th Fs)
1114
- phaseFs = 1.0 / phasestep
1115
- phaseFc = phaseFs / 6.0
1116
- appsmoothingfilter = tide_filt.NoncausalFilter("arb", cyclic=True, padtime=0.0)
1117
- appsmoothingfilter.setfreqs(0.0, 0.0, phaseFc, phaseFc)
2942
+ def phaseprojectpass(
2943
+ numslices,
2944
+ demeandata_byslice,
2945
+ fmri_data_byslice,
2946
+ validlocslist,
2947
+ proctrs,
2948
+ weights_byslice,
2949
+ cine_byslice,
2950
+ rawapp_byslice,
2951
+ outphases,
2952
+ cardphasevals,
2953
+ congridbins,
2954
+ gridkernel,
2955
+ destpoints,
2956
+ mpcode=False,
2957
+ nprocs=1,
2958
+ alwaysmultiproc=False,
2959
+ showprogressbar=True,
2960
+ cache=True,
2961
+ debug=False,
2962
+ ):
2963
+ """
2964
+ Perform phase-encoding projection for fMRI data across slices.
2965
+
2966
+ This function projects fMRI data onto a set of phase values using congrid
2967
+ resampling, accumulating results in `rawapp_byslice` and `cine_byslice` arrays.
2968
+ It supports both single-threaded and multi-processed execution.
2969
+
2970
+ Parameters
2971
+ ----------
2972
+ numslices : int
2973
+ Number of slices to process.
2974
+ demeandata_byslice : ndarray
2975
+ Demeaned fMRI data, shape (nvoxels, nslices, ntr).
2976
+ fmri_data_byslice : ndarray
2977
+ Raw fMRI data, shape (nvoxels, nslices, ntr).
2978
+ validlocslist : list of ndarray
2979
+ List of valid voxel indices for each slice.
2980
+ proctrs : ndarray
2981
+ Timepoints to process.
2982
+ weights_byslice : ndarray
2983
+ Weight array, shape (nvoxels, nslices, ndestpoints).
2984
+ cine_byslice : ndarray
2985
+ Cine data array, shape (nvoxels, nslices, ndestpoints).
2986
+ rawapp_byslice : ndarray
2987
+ Raw application data array, shape (nvoxels, nslices, ndestpoints).
2988
+ outphases : ndarray
2989
+ Output phase values.
2990
+ cardphasevals : ndarray
2991
+ Cardinal phase values for each slice and timepoint, shape (nslices, ntr).
2992
+ congridbins : int
2993
+ Number of bins for congrid resampling.
2994
+ gridkernel : str
2995
+ Kernel to use for congrid resampling.
2996
+ destpoints : int
2997
+ Number of destination points.
2998
+ mpcode : bool, optional
2999
+ If True, use multiprocessing. Default is False.
3000
+ nprocs : int, optional
3001
+ Number of processes to use if `mpcode` is True. Default is 1.
3002
+ alwaysmultiproc : bool, optional
3003
+ If True, always use multiprocessing even for small datasets. Default is False.
3004
+ showprogressbar : bool, optional
3005
+ If True, show progress bar. Default is True.
3006
+ cache : bool, optional
3007
+ If True, enable caching for congrid. Default is True.
3008
+ debug : bool, optional
3009
+ If True, enable debug output. Default is False.
1118
3010
 
1119
- # setup for aliased correlation if we're going to do it
1120
- if args.doaliasedcorrelation and (thispass == numpasses - 1):
1121
- if args.cardiacfilename:
1122
- signal_sliceres = pleth_sliceres
1123
- # signal_stdres = pleth_stdres
1124
- else:
1125
- signal_sliceres = cardfromfmri_sliceres
1126
- # signal_stdres = dlfilteredcard_stdres
1127
- corrsearchvals = (
1128
- np.linspace(0.0, args.aliasedcorrelationwidth, num=args.aliasedcorrelationpts)
1129
- - args.aliasedcorrelationwidth / 2.0
1130
- )
1131
- theAliasedCorrelator = tide_corr.AliasedCorrelator(
1132
- signal_sliceres,
1133
- slicesamplerate,
1134
- numsteps,
1135
- )
1136
- thecorrfunc = np.zeros(
1137
- (xsize, ysize, numslices, args.aliasedcorrelationpts), dtype=np.float64
3011
+ Returns
3012
+ -------
3013
+ None
3014
+ The function modifies `weights_byslice`, `cine_byslice`, and `rawapp_byslice` in-place.
3015
+
3016
+ Notes
3017
+ -----
3018
+ This function is typically used in the context of phase-encoded fMRI analysis.
3019
+ It applies a congrid-based resampling technique to project data onto a specified
3020
+ phase grid, accumulating weighted contributions in the output arrays.
3021
+
3022
+ Examples
3023
+ --------
3024
+ >>> phaseprojectpass(
3025
+ ... numslices=10,
3026
+ ... demeandata_byslice=demean_data,
3027
+ ... fmri_data_byslice=fmri_data,
3028
+ ... validlocslist=valid_locs_list,
3029
+ ... proctrs=tr_list,
3030
+ ... weights_byslice=weights,
3031
+ ... cine_byslice=cine_data,
3032
+ ... rawapp_byslice=rawapp_data,
3033
+ ... outphases=phase_vals,
3034
+ ... cardphasevals=card_phase_vals,
3035
+ ... congridbins=100,
3036
+ ... gridkernel='gaussian',
3037
+ ... destpoints=50,
3038
+ ... mpcode=False,
3039
+ ... nprocs=4,
3040
+ ... showprogressbar=True,
3041
+ ... cache=True,
3042
+ ... debug=False,
3043
+ ... )
3044
+ """
3045
+ if mpcode:
3046
+ inputshape = rawapp_byslice.shape
3047
+ sliceargs = [
3048
+ validlocslist,
3049
+ proctrs,
3050
+ demeandata_byslice,
3051
+ fmri_data_byslice,
3052
+ outphases,
3053
+ cardphasevals,
3054
+ congridbins,
3055
+ gridkernel,
3056
+ weights_byslice,
3057
+ cine_byslice,
3058
+ destpoints,
3059
+ rawapp_byslice,
3060
+ ]
3061
+ slicefunc = _procOnePhaseProject
3062
+ packfunc = _packslicedataPhaseProject
3063
+ unpackfunc = _unpackslicedataPhaseProject
3064
+ slicetargets = [rawapp_byslice, cine_byslice, weights_byslice]
3065
+ slicemask = np.ones_like(rawapp_byslice[0, :, 0])
3066
+
3067
+ slicetotal = tide_genericmultiproc.run_multiproc(
3068
+ slicefunc,
3069
+ packfunc,
3070
+ unpackfunc,
3071
+ sliceargs,
3072
+ slicetargets,
3073
+ inputshape,
3074
+ slicemask,
3075
+ None,
3076
+ nprocs,
3077
+ alwaysmultiproc,
3078
+ showprogressbar,
3079
+ 8,
3080
+ indexaxis=1,
3081
+ procunit="slices",
3082
+ cache=cache,
3083
+ debug=debug,
1138
3084
  )
1139
- thecorrfunc_byslice = thecorrfunc.reshape(
1140
- (xsize * ysize, numslices, args.aliasedcorrelationpts)
3085
+ else:
3086
+ for theslice in tqdm(
3087
+ range(numslices),
3088
+ desc="Slice",
3089
+ unit="slices",
3090
+ disable=(not showprogressbar),
3091
+ ):
3092
+ validlocs = validlocslist[theslice]
3093
+ if len(validlocs) > 0:
3094
+ for t in proctrs:
3095
+ filteredmr = -demeandata_byslice[validlocs, theslice, t]
3096
+ cinemr = fmri_data_byslice[validlocs, theslice, t]
3097
+ thevals, theweights, theindices = tide_resample.congrid(
3098
+ outphases,
3099
+ cardphasevals[theslice, t],
3100
+ 1.0,
3101
+ congridbins,
3102
+ kernel=gridkernel,
3103
+ cyclic=True,
3104
+ cache=cache,
3105
+ debug=debug,
3106
+ )
3107
+ for i in range(len(theindices)):
3108
+ weights_byslice[validlocs, theslice, theindices[i]] += theweights[i]
3109
+ rawapp_byslice[validlocs, theslice, theindices[i]] += filteredmr
3110
+ cine_byslice[validlocs, theslice, theindices[i]] += theweights[i] * cinemr
3111
+ for d in range(destpoints):
3112
+ if weights_byslice[validlocs[0], theslice, d] == 0.0:
3113
+ weights_byslice[validlocs, theslice, d] = 1.0
3114
+ rawapp_byslice[validlocs, theslice, :] = np.nan_to_num(
3115
+ rawapp_byslice[validlocs, theslice, :]
3116
+ / weights_byslice[validlocs, theslice, :]
3117
+ )
3118
+ cine_byslice[validlocs, theslice, :] = np.nan_to_num(
3119
+ cine_byslice[validlocs, theslice, :] / weights_byslice[validlocs, theslice, :]
3120
+ )
3121
+ else:
3122
+ rawapp_byslice[:, theslice, :] = 0.0
3123
+ cine_byslice[:, theslice, :] = 0.0
3124
+
3125
+
3126
+ def _procOneSliceSmoothing(slice, sliceargs, **kwargs):
3127
+ """
3128
+ Apply smoothing filter to a single slice of projected data along time dimension.
3129
+
3130
+ This function processes a single slice of data by applying a smoothing filter
3131
+ to the raw application data and computing circular derivatives for the
3132
+ specified slice. The smoothing is applied only to valid locations within the slice.
3133
+
3134
+ Parameters
3135
+ ----------
3136
+ slice : int
3137
+ The slice index to process.
3138
+ sliceargs : tuple
3139
+ A tuple containing the following elements:
3140
+
3141
+ - validlocslist : list of arrays
3142
+ List of arrays containing valid location indices for each slice
3143
+ - rawapp_byslice : ndarray
3144
+ Array containing raw application data by slice [locations, slices, time_points]
3145
+ - appsmoothingfilter : object
3146
+ Smoothing filter object with an apply method
3147
+ - phaseFs : array-like
3148
+ Frequency values for smoothing filter application
3149
+ - derivatives_byslice : ndarray
3150
+ Array to store computed derivatives [locations, slices, time_points]
3151
+ **kwargs : dict
3152
+ Additional keyword arguments:
3153
+ - debug : bool, optional
3154
+ Enable debug mode (default: False)
3155
+
3156
+ Returns
3157
+ -------
3158
+ tuple
3159
+ A tuple containing:
3160
+
3161
+ - slice : int
3162
+ The input slice index
3163
+ - rawapp_byslice : ndarray
3164
+ Smoothed raw application data for the specified slice [locations, time_points]
3165
+ - derivatives_byslice : ndarray
3166
+ Computed circular derivatives for the specified slice [locations, time_points]
3167
+
3168
+ Notes
3169
+ -----
3170
+ - The function only processes slices with valid locations (len(validlocs) > 0)
3171
+ - Smoothing is applied using the provided smoothing filter's apply method
3172
+ - Circular derivatives are computed using the `circularderivs` function
3173
+ - The function modifies the input arrays in-place
3174
+
3175
+ Examples
3176
+ --------
3177
+ >>> slice_idx = 5
3178
+ >>> sliceargs = (validlocslist, rawapp_byslice, appsmoothingfilter, phaseFs, derivatives_byslice)
3179
+ >>> result = _procOneSliceSmoothing(slice_idx, sliceargs, debug=True)
3180
+ """
3181
+ options = {
3182
+ "debug": False,
3183
+ }
3184
+ options.update(kwargs)
3185
+ debug = options["debug"]
3186
+ (validlocslist, rawapp_byslice, appsmoothingfilter, phaseFs, derivatives_byslice) = sliceargs
3187
+ # now smooth the projected data along the time dimension
3188
+ validlocs = validlocslist[slice]
3189
+ if len(validlocs) > 0:
3190
+ for loc in validlocs:
3191
+ rawapp_byslice[loc, slice, :] = appsmoothingfilter.apply(
3192
+ phaseFs, rawapp_byslice[loc, slice, :]
3193
+ )
3194
+ derivatives_byslice[loc, slice, :] = circularderivs(rawapp_byslice[loc, slice, :])
3195
+ return slice, rawapp_byslice[:, slice, :], derivatives_byslice[:, slice, :]
3196
+
3197
+
3198
+ def _packslicedataSliceSmoothing(slicenum, sliceargs):
3199
+ """Pack slice data for slice smoothing operation.
3200
+
3201
+ Parameters
3202
+ ----------
3203
+ slicenum : int
3204
+ The slice number identifier.
3205
+ sliceargs : list
3206
+ List containing slice arguments with at least 5 elements.
3207
+
3208
+ Returns
3209
+ -------
3210
+ list
3211
+ A list containing the first 5 elements from sliceargs in the same order.
3212
+
3213
+ Notes
3214
+ -----
3215
+ This function extracts the first five elements from the sliceargs parameter
3216
+ and returns them as a new list. It's typically used as part of a slice
3217
+ smoothing pipeline where slice arguments need to be packed for further processing.
3218
+
3219
+ Examples
3220
+ --------
3221
+ >>> _packslicedataSliceSmoothing(1, [10, 20, 30, 40, 50, 60])
3222
+ [10, 20, 30, 40, 50]
3223
+ """
3224
+ return [
3225
+ sliceargs[0],
3226
+ sliceargs[1],
3227
+ sliceargs[2],
3228
+ sliceargs[3],
3229
+ sliceargs[4],
3230
+ ]
3231
+
3232
+
3233
+ def _unpackslicedataSliceSmoothing(retvals, voxelproducts):
3234
+ """
3235
+ Unpack slice data for smoothing operation.
3236
+
3237
+ This function assigns smoothed slice data back to the voxel products array
3238
+ based on the provided retvals structure.
3239
+
3240
+ Parameters
3241
+ ----------
3242
+ retvals : tuple of array-like
3243
+ A tuple containing:
3244
+ - retvals[0] : array-like
3245
+ Index array for slice selection
3246
+ - retvals[1] : array-like
3247
+ First set of smoothed data to assign
3248
+ - retvals[2] : array-like
3249
+ Second set of smoothed data to assign
3250
+ voxelproducts : list of array-like
3251
+ A list containing two array-like objects where:
3252
+ - voxelproducts[0] : array-like
3253
+ First voxel product array to be modified
3254
+ - voxelproducts[1] : array-like
3255
+ Second voxel product array to be modified
3256
+
3257
+ Returns
3258
+ -------
3259
+ None
3260
+ This function modifies the voxelproducts arrays in-place and does not return anything.
3261
+
3262
+ Notes
3263
+ -----
3264
+ The function performs in-place assignment operations on the voxelproducts arrays.
3265
+ The first dimension of voxelproducts arrays is modified using retvals[0] as indices,
3266
+ while the second and third dimensions are directly assigned from retvals[1] and retvals[2].
3267
+
3268
+ Examples
3269
+ --------
3270
+ >>> import numpy as np
3271
+ >>> retvals = (np.array([0, 1, 2]), np.array([[1, 2], [3, 4], [5, 6]]), np.array([[7, 8], [9, 10], [11, 12]]))
3272
+ >>> voxelproducts = [np.zeros((3, 3, 2)), np.zeros((3, 3, 2))]
3273
+ >>> _unpackslicedataSliceSmoothing(retvals, voxelproducts)
3274
+ >>> print(voxelproducts[0])
3275
+ >>> print(voxelproducts[1])
3276
+ """
3277
+ (voxelproducts[0])[:, retvals[0], :] = retvals[1]
3278
+ (voxelproducts[1])[:, retvals[0], :] = retvals[2]
3279
+
3280
+
3281
+ def tcsmoothingpass(
3282
+ numslices,
3283
+ validlocslist,
3284
+ rawapp_byslice,
3285
+ appsmoothingfilter,
3286
+ phaseFs,
3287
+ derivatives_byslice,
3288
+ nprocs=1,
3289
+ alwaysmultiproc=False,
3290
+ showprogressbar=True,
3291
+ debug=False,
3292
+ ):
3293
+ """
3294
+ Apply smoothing to time course data across slices using multiprocessing.
3295
+
3296
+ This function performs smoothing operations on time course data organized by slices,
3297
+ utilizing multiprocessing for improved performance when processing large datasets.
3298
+
3299
+ Parameters
3300
+ ----------
3301
+ numslices : int
3302
+ Number of slices in the dataset
3303
+ validlocslist : list
3304
+ List of valid locations for processing
3305
+ rawapp_byslice : NDArray
3306
+ Raw application data organized by slice
3307
+ appsmoothingfilter : NDArray
3308
+ Smoothing filter to be applied
3309
+ phaseFs : float
3310
+ Phase frequency parameter for smoothing operations
3311
+ derivatives_byslice : NDArray
3312
+ Derivative data organized by slice
3313
+ nprocs : int, optional
3314
+ Number of processors to use for multiprocessing (default is 1)
3315
+ alwaysmultiproc : bool, optional
3316
+ Whether to always use multiprocessing regardless of data size (default is False)
3317
+ showprogressbar : bool, optional
3318
+ Whether to display progress bar during processing (default is True)
3319
+ debug : bool, optional
3320
+ Enable debug mode for additional logging (default is False)
3321
+
3322
+ Returns
3323
+ -------
3324
+ NDArray
3325
+ Processed data after smoothing operations have been applied
3326
+
3327
+ Notes
3328
+ -----
3329
+ This function uses the `tide_genericmultiproc.run_multiproc` utility to distribute
3330
+ the smoothing workload across multiple processors. The function handles data organization
3331
+ and processing for each slice individually, then combines results.
3332
+
3333
+ Examples
3334
+ --------
3335
+ >>> result = tcsmoothingpass(
3336
+ ... numslices=10,
3337
+ ... validlocslist=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9],
3338
+ ... rawapp_byslice=raw_data,
3339
+ ... appsmoothingfilter=smoothing_filter,
3340
+ ... phaseFs=100.0,
3341
+ ... derivatives_byslice=derivatives,
3342
+ ... nprocs=4
3343
+ ... )
3344
+ """
3345
+ inputshape = rawapp_byslice.shape
3346
+ sliceargs = [validlocslist, rawapp_byslice, appsmoothingfilter, phaseFs, derivatives_byslice]
3347
+ slicefunc = _procOneSliceSmoothing
3348
+ packfunc = _packslicedataSliceSmoothing
3349
+ unpackfunc = _unpackslicedataSliceSmoothing
3350
+ slicetargets = [rawapp_byslice, derivatives_byslice]
3351
+ slicemask = np.ones_like(rawapp_byslice[0, :, 0])
3352
+
3353
+ slicetotal = tide_genericmultiproc.run_multiproc(
3354
+ slicefunc,
3355
+ packfunc,
3356
+ unpackfunc,
3357
+ sliceargs,
3358
+ slicetargets,
3359
+ inputshape,
3360
+ slicemask,
3361
+ None,
3362
+ nprocs,
3363
+ alwaysmultiproc,
3364
+ showprogressbar,
3365
+ 16,
3366
+ indexaxis=1,
3367
+ procunit="slices",
3368
+ debug=debug,
3369
+ )
3370
+
3371
+
3372
+ def phaseproject(
3373
+ input_data,
3374
+ demeandata_byslice,
3375
+ means_byslice,
3376
+ rawapp_byslice,
3377
+ app_byslice,
3378
+ normapp_byslice,
3379
+ weights_byslice,
3380
+ cine_byslice,
3381
+ projmask_byslice,
3382
+ derivatives_byslice,
3383
+ proctrs,
3384
+ thispass,
3385
+ args,
3386
+ sliceoffsets,
3387
+ cardphasevals,
3388
+ outphases,
3389
+ appsmoothingfilter,
3390
+ phaseFs,
3391
+ thecorrfunc_byslice,
3392
+ waveamp_byslice,
3393
+ wavedelay_byslice,
3394
+ wavedelayCOM_byslice,
3395
+ corrected_rawapp_byslice,
3396
+ corrstartloc,
3397
+ correndloc,
3398
+ thealiasedcorrx,
3399
+ theAliasedCorrelator,
3400
+ ):
3401
+ """
3402
+ Perform phase projection and related processing on fMRI data across slices.
3403
+
3404
+ This function performs phase projection on fMRI data, optionally smoothing
3405
+ timecourses, and applying flips based on derivative information. It also
3406
+ computes wavelet-based correlation measures and updates relevant arrays
3407
+ in-place for further processing.
3408
+
3409
+ Parameters
3410
+ ----------
3411
+ input_data : object
3412
+ Input fMRI data container with `getdims()` and `byslice()` methods.
3413
+ demeandata_byslice : array_like
3414
+ Demeaned fMRI data by slice.
3415
+ means_byslice : array_like
3416
+ Mean values by slice for normalization.
3417
+ rawapp_byslice : array_like
3418
+ Raw APP (Arterial Spin Labeling) data by slice.
3419
+ app_byslice : array_like
3420
+ APP data after initial processing.
3421
+ normapp_byslice : array_like
3422
+ Normalized APP data.
3423
+ weights_byslice : array_like
3424
+ Weights by slice for processing.
3425
+ cine_byslice : array_like
3426
+ Cine data by slice.
3427
+ projmask_byslice : array_like
3428
+ Projection mask by slice.
3429
+ derivatives_byslice : array_like
3430
+ Derivative data by slice, used for determining flips.
3431
+ proctrs : array_like
3432
+ Processing timepoints or transformation parameters.
3433
+ thispass : int
3434
+ Current processing pass number.
3435
+ args : argparse.Namespace
3436
+ Command-line arguments controlling processing behavior.
3437
+ sliceoffsets : array_like
3438
+ Slice offset values.
3439
+ cardphasevals : array_like
3440
+ Cardiac phase values.
3441
+ outphases : array_like
3442
+ Output phases.
3443
+ appsmoothingfilter : array_like
3444
+ Smoothing filter for timecourses.
3445
+ phaseFs : float
3446
+ Sampling frequency for phase processing.
3447
+ thecorrfunc_byslice : array_like
3448
+ Correlation function by slice.
3449
+ waveamp_byslice : array_like
3450
+ Wave amplitude by slice.
3451
+ wavedelay_byslice : array_like
3452
+ Wave delay by slice.
3453
+ wavedelayCOM_byslice : array_like
3454
+ Center of mass of wave delay by slice.
3455
+ corrected_rawapp_byslice : array_like
3456
+ Corrected raw APP data by slice.
3457
+ corrstartloc : int
3458
+ Start location for correlation computation.
3459
+ correndloc : int
3460
+ End location for correlation computation.
3461
+ thealiasedcorrx : array_like
3462
+ Aliased correlation x-axis values.
3463
+ theAliasedCorrelator : object
3464
+ Correlator object for aliased correlation computation.
3465
+
3466
+ Returns
3467
+ -------
3468
+ appflips_byslice : array_like
3469
+ Flip values applied to the APP data by slice.
3470
+
3471
+ Notes
3472
+ -----
3473
+ - The function modifies several input arrays in-place.
3474
+ - If `args.smoothapp` is True, smoothing is applied to the raw APP data.
3475
+ - If `args.fliparteries` is True, flips are applied to correct arterial
3476
+ orientation.
3477
+ - If `args.doaliasedcorrelation` is True, aliased correlation is computed
3478
+ and stored in `thecorrfunc_byslice`.
3479
+
3480
+ Examples
3481
+ --------
3482
+ >>> phaseproject(
3483
+ ... input_data, demeandata_byslice, means_byslice, rawapp_byslice,
3484
+ ... app_byslice, normapp_byslice, weights_byslice, cine_byslice,
3485
+ ... projmask_byslice, derivatives_byslice, proctrs, thispass, args,
3486
+ ... sliceoffsets, cardphasevals, outphases, appsmoothingfilter,
3487
+ ... phaseFs, thecorrfunc_byslice, waveamp_byslice, wavedelay_byslice,
3488
+ ... wavedelayCOM_byslice, corrected_rawapp_byslice, corrstartloc,
3489
+ ... correndloc, thealiasedcorrx, theAliasedCorrelator
3490
+ ... )
3491
+ """
3492
+ xsize, ysize, numslices, timepoints = input_data.getdims()
3493
+ fmri_data_byslice = input_data.byslice()
3494
+
3495
+ # first find the validlocs for each slice
3496
+ validlocslist = []
3497
+ if args.verbose:
3498
+ print("Finding validlocs")
3499
+ for theslice in range(numslices):
3500
+ validlocslist.append(np.where(projmask_byslice[:, theslice] > 0)[0])
3501
+
3502
+ # phase project each slice
3503
+ print("Phase projecting")
3504
+ phaseprojectpass(
3505
+ numslices,
3506
+ demeandata_byslice,
3507
+ fmri_data_byslice,
3508
+ validlocslist,
3509
+ proctrs,
3510
+ weights_byslice,
3511
+ cine_byslice,
3512
+ rawapp_byslice,
3513
+ outphases,
3514
+ cardphasevals,
3515
+ args.congridbins,
3516
+ args.gridkernel,
3517
+ args.destpoints,
3518
+ cache=args.congridcache,
3519
+ mpcode=args.mpphaseproject,
3520
+ nprocs=args.nprocs,
3521
+ showprogressbar=args.showprogressbar,
3522
+ )
3523
+
3524
+ # smooth the phase projection, if requested
3525
+ if args.smoothapp:
3526
+ print("Smoothing timecourses")
3527
+ tcsmoothingpass(
3528
+ numslices,
3529
+ validlocslist,
3530
+ rawapp_byslice,
3531
+ appsmoothingfilter,
3532
+ phaseFs,
3533
+ derivatives_byslice,
3534
+ nprocs=args.nprocs,
3535
+ showprogressbar=args.showprogressbar,
1141
3536
  )
1142
- wavedelay = np.zeros((xsize, ysize, numslices), dtype=np.float64)
1143
- wavedelay_byslice = wavedelay.reshape((xsize * ysize, numslices))
1144
- waveamp = np.zeros((xsize, ysize, numslices), dtype=np.float64)
1145
- waveamp_byslice = waveamp.reshape((xsize * ysize, numslices))
1146
3537
 
1147
- # now project the data
3538
+ # now do the flips
3539
+ print("Doing flips")
1148
3540
  for theslice in tqdm(
1149
3541
  range(numslices),
1150
3542
  desc="Slice",
1151
3543
  unit="slices",
3544
+ disable=(not args.showprogressbar),
1152
3545
  ):
1153
- if args.verbose:
1154
- print("Phase projecting for slice", theslice)
1155
- validlocs = np.where(projmask_byslice[:, theslice] > 0)[0]
1156
- # indexlist = range(0, len(cardphasevals[theslice, :]))
3546
+ # now do the flips
3547
+ validlocs = validlocslist[theslice]
1157
3548
  if len(validlocs) > 0:
1158
- for t in proctrs:
1159
- filteredmr = -datatoproject_byslice[validlocs, theslice, t]
1160
- cinemr = (
1161
- datatoproject_byslice[validlocs, theslice, t]
1162
- + means_byslice[validlocs, theslice, t]
1163
- )
1164
- thevals, theweights, theindices = tide_resample.congrid(
1165
- outphases,
1166
- cardphasevals[theslice, t],
1167
- 1.0,
1168
- congridbins,
1169
- kernel=gridkernel,
1170
- cyclic=True,
1171
- )
1172
- for i in range(len(theindices)):
1173
- weight_byslice[validlocs, theslice, theindices[i]] += theweights[i]
1174
- rawapp_byslice[validlocs, theslice, theindices[i]] += (
1175
- theweights[i] * filteredmr
1176
- )
1177
- cine_byslice[validlocs, theslice, theindices[i]] += theweights[i] * cinemr
1178
- for d in range(destpoints):
1179
- if weight_byslice[validlocs[0], theslice, d] == 0.0:
1180
- weight_byslice[validlocs, theslice, d] = 1.0
1181
- rawapp_byslice[validlocs, theslice, :] = np.nan_to_num(
1182
- rawapp_byslice[validlocs, theslice, :] / weight_byslice[validlocs, theslice, :]
3549
+ appflips_byslice = np.where(
3550
+ -derivatives_byslice[:, :, 2] > derivatives_byslice[:, :, 0], -1.0, 1.0
1183
3551
  )
1184
- cine_byslice[validlocs, theslice, :] = np.nan_to_num(
1185
- cine_byslice[validlocs, theslice, :] / weight_byslice[validlocs, theslice, :]
3552
+ timecoursemean = np.mean(rawapp_byslice[validlocs, theslice, :], axis=1).reshape(
3553
+ (-1, 1)
1186
3554
  )
1187
- else:
1188
- rawapp_byslice[:, theslice, :] = 0.0
1189
- cine_byslice[:, theslice, :] = 0.0
1190
-
1191
- # smooth the projected data along the time dimension
1192
- if args.smoothapp:
1193
- for loc in validlocs:
1194
- rawapp_byslice[loc, theslice, :] = appsmoothingfilter.apply(
1195
- phaseFs, rawapp_byslice[loc, theslice, :]
1196
- )
1197
- derivatives_byslice[loc, theslice, :] = circularderivs(
1198
- rawapp_byslice[loc, theslice, :]
1199
- )
1200
- appflips_byslice = np.where(
1201
- -derivatives_byslice[:, :, 2] > derivatives_byslice[:, :, 0], -1.0, 1.0
1202
- )
1203
- timecoursemean = np.mean(rawapp_byslice[validlocs, theslice, :], axis=1).reshape((-1, 1))
1204
- if args.fliparteries:
1205
- corrected_rawapp_byslice[validlocs, theslice, :] = (
1206
- rawapp_byslice[validlocs, theslice, :] - timecoursemean
1207
- ) * appflips_byslice[validlocs, theslice, None] + timecoursemean
1208
- if args.doaliasedcorrelation and (thispass == numpasses - 1):
1209
- for theloc in validlocs:
1210
- thecorrfunc_byslice[theloc, theslice, :] = theAliasedCorrelator.apply(
1211
- -appflips_byslice[theloc, theslice]
1212
- * datatoproject_byslice[theloc, theslice, :],
1213
- -thetimes[theslice][0],
1214
- )
1215
- maxloc = np.argmax(thecorrfunc_byslice[theloc, theslice, :])
1216
- wavedelay_byslice[theloc, theslice] = corrsearchvals[maxloc]
1217
- waveamp_byslice[theloc, theslice] = thecorrfunc_byslice[
1218
- theloc, theslice, maxloc
1219
- ]
1220
- else:
1221
- corrected_rawapp_byslice[validlocs, theslice, :] = rawapp_byslice[
1222
- validlocs, theslice, :
1223
- ]
1224
- if args.doaliasedcorrelation and (thispass == numpasses - 1):
1225
- for theloc in validlocs:
1226
- thecorrfunc_byslice[theloc, theslice, :] = theAliasedCorrelator.apply(
1227
- -datatoproject_byslice[theloc, theslice, :],
1228
- -thetimes[theslice][0],
1229
- )
1230
- maxloc = np.argmax(np.abs(thecorrfunc_byslice[theloc, theslice, :]))
1231
- wavedelay_byslice[theloc, theslice] = corrsearchvals[maxloc]
1232
- waveamp_byslice[theloc, theslice] = thecorrfunc_byslice[
1233
- theloc, theslice, maxloc
1234
- ]
1235
- timecoursemin = np.min(corrected_rawapp_byslice[validlocs, theslice, :], axis=1).reshape(
1236
- (-1, 1)
3555
+ if args.fliparteries:
3556
+ corrected_rawapp_byslice[validlocs, theslice, :] = (
3557
+ rawapp_byslice[validlocs, theslice, :] - timecoursemean
3558
+ ) * appflips_byslice[validlocs, theslice, None] + timecoursemean
3559
+ if args.doaliasedcorrelation and (thispass > 0):
3560
+ for theloc in validlocs:
3561
+ thecorrfunc_byslice[theloc, theslice, :] = theAliasedCorrelator.apply(
3562
+ -appflips_byslice[theloc, theslice]
3563
+ * demeandata_byslice[theloc, theslice, :],
3564
+ int(sliceoffsets[theslice]),
3565
+ )[corrstartloc : correndloc + 1]
3566
+ maxloc = np.argmax(thecorrfunc_byslice[theloc, theslice, :])
3567
+ wavedelay_byslice[theloc, theslice] = (
3568
+ thealiasedcorrx[corrstartloc : correndloc + 1]
3569
+ )[maxloc]
3570
+ waveamp_byslice[theloc, theslice] = np.fabs(
3571
+ thecorrfunc_byslice[theloc, theslice, maxloc]
3572
+ )
3573
+ wavedelayCOM_byslice[theloc, theslice] = theCOM(
3574
+ thealiasedcorrx[corrstartloc : correndloc + 1],
3575
+ np.fabs(thecorrfunc_byslice[theloc, theslice, :]),
3576
+ )
3577
+ else:
3578
+ corrected_rawapp_byslice[validlocs, theslice, :] = rawapp_byslice[
3579
+ validlocs, theslice, :
3580
+ ]
3581
+ if args.doaliasedcorrelation and (thispass > 0):
3582
+ for theloc in validlocs:
3583
+ thecorrfunc_byslice[theloc, theslice, :] = theAliasedCorrelator.apply(
3584
+ -demeandata_byslice[theloc, theslice, :],
3585
+ int(sliceoffsets[theslice]),
3586
+ )[corrstartloc : correndloc + 1]
3587
+ maxloc = np.argmax(np.abs(thecorrfunc_byslice[theloc, theslice, :]))
3588
+ wavedelay_byslice[theloc, theslice] = (
3589
+ thealiasedcorrx[corrstartloc : correndloc + 1]
3590
+ )[maxloc]
3591
+ waveamp_byslice[theloc, theslice] = np.fabs(
3592
+ thecorrfunc_byslice[theloc, theslice, maxloc]
3593
+ )
3594
+ timecoursemin = np.min(
3595
+ corrected_rawapp_byslice[validlocs, theslice, :], axis=1
3596
+ ).reshape((-1, 1))
3597
+ app_byslice[validlocs, theslice, :] = (
3598
+ corrected_rawapp_byslice[validlocs, theslice, :] - timecoursemin
3599
+ )
3600
+ normapp_byslice[validlocs, theslice, :] = np.nan_to_num(
3601
+ app_byslice[validlocs, theslice, :] / means_byslice[validlocs, theslice, None]
3602
+ )
3603
+ return appflips_byslice
3604
+
3605
+
3606
+ def findvessels(
3607
+ app,
3608
+ normapp,
3609
+ validlocs,
3610
+ numspatiallocs,
3611
+ outputroot,
3612
+ unnormvesselmap,
3613
+ destpoints,
3614
+ softvesselfrac,
3615
+ histlen,
3616
+ outputlevel,
3617
+ debug=False,
3618
+ ):
3619
+ """
3620
+ Find vessel thresholds and generate vessel masks from app data.
3621
+
3622
+ This function processes app data to identify vessel thresholds and optionally
3623
+ generates histograms for visualization. It handles both normalized and
3624
+ unnormalized vessel maps based on the input parameters.
3625
+
3626
+ Parameters
3627
+ ----------
3628
+ app : NDArray
3629
+ Raw app data array
3630
+ normapp : NDArray
3631
+ Normalized app data array
3632
+ validlocs : NDArray
3633
+ Array of valid locations for processing
3634
+ numspatiallocs : int
3635
+ Number of spatial locations
3636
+ outputroot : str
3637
+ Root directory path for output files
3638
+ unnormvesselmap : bool
3639
+ Flag indicating whether to use unnormalized vessel map
3640
+ destpoints : int
3641
+ Number of destination points
3642
+ softvesselfrac : float
3643
+ Fractional multiplier for soft vessel threshold
3644
+ histlen : int
3645
+ Length of histogram bins
3646
+ outputlevel : int
3647
+ Level of output generation (0 = no histogram, 1 = histogram only)
3648
+ debug : bool, optional
3649
+ Debug flag for additional logging (default is False)
3650
+
3651
+ Returns
3652
+ -------
3653
+ tuple
3654
+ Tuple containing (hardvesselthresh, softvesselthresh) threshold values
3655
+
3656
+ Notes
3657
+ -----
3658
+ The function performs the following steps:
3659
+ 1. Reshapes app data based on unnormvesselmap flag
3660
+ 2. Extracts valid locations from the reshaped data
3661
+ 3. Generates histogram if outputlevel > 0
3662
+ 4. Calculates hard and soft vessel thresholds based on 98th percentile
3663
+ 5. Prints threshold values to console
3664
+
3665
+ Examples
3666
+ --------
3667
+ >>> hard_thresh, soft_thresh = findvessels(
3668
+ ... app=app_data,
3669
+ ... normapp=norm_app_data,
3670
+ ... validlocs=valid_indices,
3671
+ ... numspatiallocs=100,
3672
+ ... outputroot='/path/to/output',
3673
+ ... unnormvesselmap=True,
3674
+ ... destpoints=50,
3675
+ ... softvesselfrac=0.5,
3676
+ ... histlen=100,
3677
+ ... outputlevel=1
3678
+ ... )
3679
+ """
3680
+ if unnormvesselmap:
3681
+ app2d = app.reshape((numspatiallocs, destpoints))
3682
+ else:
3683
+ app2d = normapp.reshape((numspatiallocs, destpoints))
3684
+ histinput = app2d[validlocs, :].reshape((len(validlocs), destpoints))
3685
+ if outputlevel > 0:
3686
+ namesuffix = "_desc-apppeaks_hist"
3687
+ tide_stats.makeandsavehistogram(
3688
+ histinput,
3689
+ histlen,
3690
+ 0,
3691
+ outputroot + namesuffix,
3692
+ debug=debug,
1237
3693
  )
1238
- app_byslice[validlocs, theslice, :] = (
1239
- corrected_rawapp_byslice[validlocs, theslice, :] - timecoursemin
3694
+
3695
+ # find vessel thresholds
3696
+ tide_util.logmem("before making vessel masks")
3697
+ hardvesselthresh = tide_stats.getfracvals(np.max(histinput, axis=1), [0.98])[0] / 2.0
3698
+ softvesselthresh = softvesselfrac * hardvesselthresh
3699
+ print(
3700
+ "hard, soft vessel thresholds set to",
3701
+ "{:.3f}".format(hardvesselthresh),
3702
+ "{:.3f}".format(softvesselthresh),
3703
+ )
3704
+
3705
+
3706
+ def upsampleimage(input_data, numsteps, sliceoffsets, slicesamplerate, outputroot):
3707
+ """
3708
+ Upsample fMRI data along the temporal and slice dimensions.
3709
+
3710
+ This function takes fMRI data and upsamples it by a factor of `numsteps` along
3711
+ the temporal dimension, and interpolates across slices to align with specified
3712
+ slice offsets. The resulting upsampled data is saved as a NIfTI file.
3713
+
3714
+ Parameters
3715
+ ----------
3716
+ input_data : object
3717
+ Input fMRI data object with attributes: `byvol()`, `timepoints`, `xsize`,
3718
+ `ysize`, `numslices`, and `copyheader()`.
3719
+ numsteps : int
3720
+ Upsampling factor along the temporal dimension.
3721
+ sliceoffsets : array-like of int
3722
+ Slice offset indices indicating where each slice's data should be placed
3723
+ in the upsampled volume.
3724
+ slicesamplerate : float
3725
+ Sampling rate of the slice acquisition (used to set the TR in the output header).
3726
+ outputroot : str
3727
+ Root name for the output NIfTI file (will be suffixed with "_upsampled").
3728
+
3729
+ Returns
3730
+ -------
3731
+ None
3732
+ The function saves the upsampled data to a NIfTI file and does not return any value.
3733
+
3734
+ Notes
3735
+ -----
3736
+ - The function demeanes the input data before upsampling.
3737
+ - Interpolation is performed along the slice direction using linear interpolation.
3738
+ - The output file is saved using `tide_io.savetonifti`.
3739
+
3740
+ Examples
3741
+ --------
3742
+ >>> upsampleimage(fmri_data, numsteps=2, sliceoffsets=[0, 1], slicesamplerate=2.0, outputroot='output')
3743
+ Upsamples the fMRI data by a factor of 2 and saves to 'output_upsampled.nii'.
3744
+ """
3745
+ fmri_data = input_data.byvol()
3746
+ timepoints = input_data.timepoints
3747
+ xsize = input_data.xsize
3748
+ ysize = input_data.ysize
3749
+ numslices = input_data.numslices
3750
+
3751
+ # allocate the image
3752
+ print(f"upsampling fmri data by a factor of {numsteps}")
3753
+ upsampleimage = np.zeros((xsize, ysize, numslices, numsteps * timepoints), dtype=float)
3754
+
3755
+ # demean the raw data
3756
+ meanfmri = fmri_data.mean(axis=1)
3757
+ demeaned_data = fmri_data - meanfmri[:, None]
3758
+
3759
+ # drop in the raw data
3760
+ for theslice in range(numslices):
3761
+ upsampleimage[
3762
+ :, :, theslice, sliceoffsets[theslice] : timepoints * numsteps : numsteps
3763
+ ] = demeaned_data.reshape((xsize, ysize, numslices, timepoints))[:, :, theslice, :]
3764
+
3765
+ upsampleimage_byslice = upsampleimage.reshape(xsize * ysize, numslices, timepoints * numsteps)
3766
+
3767
+ # interpolate along the slice direction
3768
+ thedstlocs = np.linspace(0, numslices, num=len(sliceoffsets), endpoint=False)
3769
+ print(f"len(destlocst), destlocs: {len(thedstlocs)}, {thedstlocs}")
3770
+ for thetimepoint in range(0, timepoints * numsteps):
3771
+ thestep = thetimepoint % numsteps
3772
+ print(f"interpolating step {thestep}")
3773
+ thesrclocs = np.where(sliceoffsets == thestep)[0]
3774
+ print(f"timepoint: {thetimepoint}, sourcelocs: {thesrclocs}")
3775
+ for thexyvoxel in range(xsize * ysize):
3776
+ theinterps = np.interp(
3777
+ thedstlocs,
3778
+ 1.0 * thesrclocs,
3779
+ upsampleimage_byslice[thexyvoxel, thesrclocs, thetimepoint],
3780
+ )
3781
+ upsampleimage_byslice[thexyvoxel, :, thetimepoint] = 1.0 * theinterps
3782
+
3783
+ theheader = input_data.copyheader(
3784
+ numtimepoints=(timepoints * numsteps), tr=(1.0 / slicesamplerate)
3785
+ )
3786
+ tide_io.savetonifti(upsampleimage, theheader, outputroot + "_upsampled")
3787
+ print("upsampling complete")
3788
+
3789
+
3790
+ def wrightmap(
3791
+ input_data,
3792
+ demeandata_byslice,
3793
+ rawapp_byslice,
3794
+ projmask_byslice,
3795
+ outphases,
3796
+ cardphasevals,
3797
+ proctrs,
3798
+ congridbins,
3799
+ gridkernel,
3800
+ destpoints,
3801
+ iterations=100,
3802
+ nprocs=-1,
3803
+ verbose=False,
3804
+ debug=False,
3805
+ ):
3806
+ """
3807
+ Compute a vessel map using Wright's method by performing phase correlation
3808
+ analysis across randomized subsets of timecourses.
3809
+
3810
+ This function implements Wright's method for estimating vessel maps by
3811
+ splitting the timecourse data into two random halves, projecting each half
3812
+ separately, and computing the Pearson correlation between the resulting
3813
+ projections for each voxel and slice. The final map is derived as the mean
3814
+ of these correlations across iterations.
3815
+
3816
+ Parameters
3817
+ ----------
3818
+ input_data : object
3819
+ Input data container with attributes `xsize`, `ysize`, and `numslices`.
3820
+ demeandata_byslice : array_like
3821
+ Demeaned data organized by slice, shape ``(nvoxels, numslices)``.
3822
+ rawapp_byslice : array_like
3823
+ Raw application data by slice, shape ``(nvoxels, numslices)``.
3824
+ projmask_byslice : array_like
3825
+ Projection mask by slice, shape ``(nvoxels, numslices)``.
3826
+ outphases : array_like
3827
+ Output phases, shape ``(nphases,)``.
3828
+ cardphasevals : array_like
3829
+ Cardinal phase values, shape ``(nphases,)``.
3830
+ proctrs : array_like
3831
+ Timecourse indices to be processed, shape ``(ntimepoints,)``.
3832
+ congridbins : array_like
3833
+ Binning information for congrid interpolation.
3834
+ gridkernel : array_like
3835
+ Kernel for grid interpolation.
3836
+ destpoints : array_like
3837
+ Destination points for projection.
3838
+ iterations : int, optional
3839
+ Number of iterations for random splitting (default is 100).
3840
+ nprocs : int, optional
3841
+ Number of processes to use for parallel computation; -1 uses all
3842
+ available cores (default is -1).
3843
+ verbose : bool, optional
3844
+ If True, print progress messages (default is False).
3845
+ debug : bool, optional
3846
+ If True, print additional debug information (default is False).
3847
+
3848
+ Returns
3849
+ -------
3850
+ wrightcorrs : ndarray
3851
+ Computed vessel map with shape ``(xsize, ysize, numslices)``.
3852
+
3853
+ Notes
3854
+ -----
3855
+ This function performs a bootstrap-like procedure where the input timecourse
3856
+ is randomly split into two halves, and phase projections are computed for
3857
+ each half. Pearson correlation is computed between the two projections for
3858
+ each voxel and slice. The result is averaged over all iterations to produce
3859
+ the final vessel map.
3860
+
3861
+ Examples
3862
+ --------
3863
+ >>> wrightcorrs = wrightmap(
3864
+ ... input_data,
3865
+ ... demeandata_byslice,
3866
+ ... rawapp_byslice,
3867
+ ... projmask_byslice,
3868
+ ... outphases,
3869
+ ... cardphasevals,
3870
+ ... proctrs,
3871
+ ... congridbins,
3872
+ ... gridkernel,
3873
+ ... destpoints,
3874
+ ... iterations=50,
3875
+ ... verbose=True
3876
+ ... )
3877
+ """
3878
+ xsize = input_data.xsize
3879
+ ysize = input_data.ysize
3880
+ numslices = input_data.numslices
3881
+ # make a vessel map using Wright's method
3882
+ wrightcorrs_byslice = np.zeros((xsize * ysize, numslices, iterations))
3883
+ # first find the validlocs for each slice
3884
+ validlocslist = []
3885
+ if verbose:
3886
+ print("Finding validlocs")
3887
+ for theslice in range(numslices):
3888
+ validlocslist.append(np.where(projmask_byslice[:, theslice] > 0)[0])
3889
+ for theiteration in range(iterations):
3890
+ print(f"wright iteration: {theiteration + 1} of {iterations}")
3891
+ # split timecourse into two sets
3892
+ scrambledprocs = np.random.permutation(proctrs)
3893
+ proctrs1 = scrambledprocs[: int(len(scrambledprocs) // 2)]
3894
+ proctrs2 = scrambledprocs[int(len(scrambledprocs) // 2) :]
3895
+ if debug:
3896
+ print(f"{proctrs1=}, {proctrs2=}")
3897
+
3898
+ # phase project each slice
3899
+ rawapp_byslice1 = np.zeros_like(rawapp_byslice)
3900
+ cine_byslice1 = np.zeros_like(rawapp_byslice)
3901
+ weights_byslice1 = np.zeros_like(rawapp_byslice)
3902
+ phaseprojectpass(
3903
+ numslices,
3904
+ demeandata_byslice,
3905
+ input_data.byslice(),
3906
+ validlocslist,
3907
+ proctrs1,
3908
+ weights_byslice1,
3909
+ cine_byslice1,
3910
+ rawapp_byslice1,
3911
+ outphases,
3912
+ cardphasevals,
3913
+ congridbins,
3914
+ gridkernel,
3915
+ destpoints,
3916
+ nprocs=nprocs,
3917
+ showprogressbar=False,
1240
3918
  )
1241
- normapp_byslice[validlocs, theslice, :] = np.nan_to_num(
1242
- app_byslice[validlocs, theslice, :] / means_byslice[validlocs, theslice, None]
3919
+ rawapp_byslice2 = np.zeros_like(rawapp_byslice)
3920
+ cine_byslice2 = np.zeros_like(rawapp_byslice)
3921
+ weights_byslice2 = np.zeros_like(rawapp_byslice)
3922
+ phaseprojectpass(
3923
+ numslices,
3924
+ demeandata_byslice,
3925
+ input_data.byslice(),
3926
+ validlocslist,
3927
+ proctrs2,
3928
+ weights_byslice2,
3929
+ cine_byslice2,
3930
+ rawapp_byslice2,
3931
+ outphases,
3932
+ cardphasevals,
3933
+ congridbins,
3934
+ gridkernel,
3935
+ destpoints,
3936
+ nprocs=nprocs,
3937
+ showprogressbar=False,
1243
3938
  )
3939
+ for theslice in range(numslices):
3940
+ for thepoint in validlocslist[theslice]:
3941
+ theresult = pearsonr(
3942
+ rawapp_byslice1[thepoint, theslice, :],
3943
+ rawapp_byslice2[thepoint, theslice, :],
3944
+ )
3945
+ theRvalue = theresult.statistic
3946
+ if debug:
3947
+ print("theRvalue = ", theRvalue)
3948
+ wrightcorrs_byslice[thepoint, theslice, theiteration] = theRvalue
3949
+ wrightcorrs = np.mean(wrightcorrs_byslice, axis=2).reshape(xsize, ysize, numslices)
3950
+ return wrightcorrs