rapidtide 2.9.5__py3-none-any.whl → 3.1.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (405) hide show
  1. cloud/gmscalc-HCPYA +1 -1
  2. cloud/mount-and-run +2 -0
  3. cloud/rapidtide-HCPYA +3 -3
  4. rapidtide/Colortables.py +538 -38
  5. rapidtide/OrthoImageItem.py +1094 -51
  6. rapidtide/RapidtideDataset.py +1709 -114
  7. rapidtide/__init__.py +0 -8
  8. rapidtide/_version.py +4 -4
  9. rapidtide/calccoherence.py +242 -97
  10. rapidtide/calcnullsimfunc.py +240 -140
  11. rapidtide/calcsimfunc.py +314 -129
  12. rapidtide/correlate.py +1211 -389
  13. rapidtide/data/examples/src/testLD +56 -0
  14. rapidtide/data/examples/src/test_findmaxlag.py +2 -2
  15. rapidtide/data/examples/src/test_mlregressallt.py +32 -17
  16. rapidtide/data/examples/src/testalign +1 -1
  17. rapidtide/data/examples/src/testatlasaverage +35 -7
  18. rapidtide/data/examples/src/testboth +21 -0
  19. rapidtide/data/examples/src/testcifti +11 -0
  20. rapidtide/data/examples/src/testdelayvar +13 -0
  21. rapidtide/data/examples/src/testdlfilt +25 -0
  22. rapidtide/data/examples/src/testfft +35 -0
  23. rapidtide/data/examples/src/testfileorfloat +37 -0
  24. rapidtide/data/examples/src/testfmri +94 -27
  25. rapidtide/data/examples/src/testfuncs +3 -3
  26. rapidtide/data/examples/src/testglmfilt +8 -6
  27. rapidtide/data/examples/src/testhappy +84 -51
  28. rapidtide/data/examples/src/testinitdelay +19 -0
  29. rapidtide/data/examples/src/testmodels +33 -0
  30. rapidtide/data/examples/src/testnewrefine +26 -0
  31. rapidtide/data/examples/src/testnoiseamp +21 -0
  32. rapidtide/data/examples/src/testppgproc +17 -0
  33. rapidtide/data/examples/src/testrefineonly +22 -0
  34. rapidtide/data/examples/src/testretro +26 -13
  35. rapidtide/data/examples/src/testretrolagtcs +16 -0
  36. rapidtide/data/examples/src/testrolloff +11 -0
  37. rapidtide/data/examples/src/testsimdata +45 -28
  38. rapidtide/data/models/model_cnn_pytorch/loss.png +0 -0
  39. rapidtide/data/models/model_cnn_pytorch/loss.txt +1 -0
  40. rapidtide/data/models/model_cnn_pytorch/model.pth +0 -0
  41. rapidtide/data/models/model_cnn_pytorch/model_meta.json +68 -0
  42. rapidtide/data/models/model_cnn_pytorch_fulldata/loss.png +0 -0
  43. rapidtide/data/models/model_cnn_pytorch_fulldata/loss.txt +1 -0
  44. rapidtide/data/models/model_cnn_pytorch_fulldata/model.pth +0 -0
  45. rapidtide/data/models/model_cnn_pytorch_fulldata/model_meta.json +80 -0
  46. rapidtide/data/models/model_cnnbp_pytorch_fullldata/loss.png +0 -0
  47. rapidtide/data/models/model_cnnbp_pytorch_fullldata/loss.txt +1 -0
  48. rapidtide/data/models/model_cnnbp_pytorch_fullldata/model.pth +0 -0
  49. rapidtide/data/models/model_cnnbp_pytorch_fullldata/model_meta.json +138 -0
  50. rapidtide/data/models/model_cnnfft_pytorch_fulldata/loss.png +0 -0
  51. rapidtide/data/models/model_cnnfft_pytorch_fulldata/loss.txt +1 -0
  52. rapidtide/data/models/model_cnnfft_pytorch_fulldata/model.pth +0 -0
  53. rapidtide/data/models/model_cnnfft_pytorch_fulldata/model_meta.json +128 -0
  54. rapidtide/data/models/model_ppgattention_pytorch_w128_fulldata/loss.png +0 -0
  55. rapidtide/data/models/model_ppgattention_pytorch_w128_fulldata/loss.txt +1 -0
  56. rapidtide/data/models/model_ppgattention_pytorch_w128_fulldata/model.pth +0 -0
  57. rapidtide/data/models/model_ppgattention_pytorch_w128_fulldata/model_meta.json +49 -0
  58. rapidtide/data/models/model_revised_tf2/model.keras +0 -0
  59. rapidtide/data/models/{model_serdar → model_revised_tf2}/model_meta.json +1 -1
  60. rapidtide/data/models/model_serdar2_tf2/model.keras +0 -0
  61. rapidtide/data/models/{model_serdar2 → model_serdar2_tf2}/model_meta.json +1 -1
  62. rapidtide/data/models/model_serdar_tf2/model.keras +0 -0
  63. rapidtide/data/models/{model_revised → model_serdar_tf2}/model_meta.json +1 -1
  64. rapidtide/data/reference/HCP1200v2_MTT_2mm.nii.gz +0 -0
  65. rapidtide/data/reference/HCP1200v2_binmask_2mm.nii.gz +0 -0
  66. rapidtide/data/reference/HCP1200v2_csf_2mm.nii.gz +0 -0
  67. rapidtide/data/reference/HCP1200v2_gray_2mm.nii.gz +0 -0
  68. rapidtide/data/reference/HCP1200v2_graylaghist.json +7 -0
  69. rapidtide/data/reference/HCP1200v2_graylaghist.tsv.gz +0 -0
  70. rapidtide/data/reference/HCP1200v2_laghist.json +7 -0
  71. rapidtide/data/reference/HCP1200v2_laghist.tsv.gz +0 -0
  72. rapidtide/data/reference/HCP1200v2_mask_2mm.nii.gz +0 -0
  73. rapidtide/data/reference/HCP1200v2_maxcorr_2mm.nii.gz +0 -0
  74. rapidtide/data/reference/HCP1200v2_maxtime_2mm.nii.gz +0 -0
  75. rapidtide/data/reference/HCP1200v2_maxwidth_2mm.nii.gz +0 -0
  76. rapidtide/data/reference/HCP1200v2_negmask_2mm.nii.gz +0 -0
  77. rapidtide/data/reference/HCP1200v2_timepercentile_2mm.nii.gz +0 -0
  78. rapidtide/data/reference/HCP1200v2_white_2mm.nii.gz +0 -0
  79. rapidtide/data/reference/HCP1200v2_whitelaghist.json +7 -0
  80. rapidtide/data/reference/HCP1200v2_whitelaghist.tsv.gz +0 -0
  81. rapidtide/data/reference/JHU-ArterialTerritoriesNoVent-LVL1-seg2.xml +131 -0
  82. rapidtide/data/reference/JHU-ArterialTerritoriesNoVent-LVL1-seg2_regions.txt +60 -0
  83. rapidtide/data/reference/JHU-ArterialTerritoriesNoVent-LVL1-seg2_space-MNI152NLin6Asym_2mm.nii.gz +0 -0
  84. rapidtide/data/reference/JHU-ArterialTerritoriesNoVent-LVL1_space-MNI152NLin2009cAsym_2mm.nii.gz +0 -0
  85. rapidtide/data/reference/JHU-ArterialTerritoriesNoVent-LVL1_space-MNI152NLin2009cAsym_2mm_mask.nii.gz +0 -0
  86. rapidtide/data/reference/JHU-ArterialTerritoriesNoVent-LVL1_space-MNI152NLin6Asym_2mm_mask.nii.gz +0 -0
  87. rapidtide/data/reference/JHU-ArterialTerritoriesNoVent-LVL2_space-MNI152NLin6Asym_2mm_mask.nii.gz +0 -0
  88. rapidtide/data/reference/MNI152_T1_1mm_Brain_FAST_seg.nii.gz +0 -0
  89. rapidtide/data/reference/MNI152_T1_1mm_Brain_Mask.nii.gz +0 -0
  90. rapidtide/data/reference/MNI152_T1_2mm_Brain_FAST_seg.nii.gz +0 -0
  91. rapidtide/data/reference/MNI152_T1_2mm_Brain_Mask.nii.gz +0 -0
  92. rapidtide/decorators.py +91 -0
  93. rapidtide/dlfilter.py +2553 -414
  94. rapidtide/dlfiltertorch.py +5201 -0
  95. rapidtide/externaltools.py +328 -13
  96. rapidtide/fMRIData_class.py +178 -0
  97. rapidtide/ffttools.py +168 -0
  98. rapidtide/filter.py +2704 -1462
  99. rapidtide/fit.py +2361 -579
  100. rapidtide/genericmultiproc.py +197 -0
  101. rapidtide/happy_supportfuncs.py +3255 -548
  102. rapidtide/helper_classes.py +590 -1181
  103. rapidtide/io.py +2569 -468
  104. rapidtide/linfitfiltpass.py +784 -0
  105. rapidtide/makelaggedtcs.py +267 -97
  106. rapidtide/maskutil.py +555 -25
  107. rapidtide/miscmath.py +867 -137
  108. rapidtide/multiproc.py +217 -44
  109. rapidtide/patchmatch.py +752 -0
  110. rapidtide/peakeval.py +32 -32
  111. rapidtide/ppgproc.py +2205 -0
  112. rapidtide/qualitycheck.py +353 -40
  113. rapidtide/refinedelay.py +854 -0
  114. rapidtide/refineregressor.py +939 -0
  115. rapidtide/resample.py +725 -204
  116. rapidtide/scripts/__init__.py +1 -0
  117. rapidtide/scripts/{adjustoffset → adjustoffset.py} +7 -2
  118. rapidtide/scripts/{aligntcs → aligntcs.py} +7 -2
  119. rapidtide/scripts/{applydlfilter → applydlfilter.py} +7 -2
  120. rapidtide/scripts/applyppgproc.py +28 -0
  121. rapidtide/scripts/{atlasaverage → atlasaverage.py} +7 -2
  122. rapidtide/scripts/{atlastool → atlastool.py} +7 -2
  123. rapidtide/scripts/{calcicc → calcicc.py} +7 -2
  124. rapidtide/scripts/{calctexticc → calctexticc.py} +7 -2
  125. rapidtide/scripts/{calcttest → calcttest.py} +7 -2
  126. rapidtide/scripts/{ccorrica → ccorrica.py} +7 -2
  127. rapidtide/scripts/delayvar.py +28 -0
  128. rapidtide/scripts/{diffrois → diffrois.py} +7 -2
  129. rapidtide/scripts/{endtidalproc → endtidalproc.py} +7 -2
  130. rapidtide/scripts/{fdica → fdica.py} +7 -2
  131. rapidtide/scripts/{filtnifti → filtnifti.py} +7 -2
  132. rapidtide/scripts/{filttc → filttc.py} +7 -2
  133. rapidtide/scripts/{fingerprint → fingerprint.py} +20 -16
  134. rapidtide/scripts/{fixtr → fixtr.py} +7 -2
  135. rapidtide/scripts/{gmscalc → gmscalc.py} +7 -2
  136. rapidtide/scripts/{happy → happy.py} +7 -2
  137. rapidtide/scripts/{happy2std → happy2std.py} +7 -2
  138. rapidtide/scripts/{happywarp → happywarp.py} +8 -4
  139. rapidtide/scripts/{histnifti → histnifti.py} +7 -2
  140. rapidtide/scripts/{histtc → histtc.py} +7 -2
  141. rapidtide/scripts/{glmfilt → linfitfilt.py} +7 -4
  142. rapidtide/scripts/{localflow → localflow.py} +7 -2
  143. rapidtide/scripts/{mergequality → mergequality.py} +7 -2
  144. rapidtide/scripts/{pairproc → pairproc.py} +7 -2
  145. rapidtide/scripts/{pairwisemergenifti → pairwisemergenifti.py} +7 -2
  146. rapidtide/scripts/{physiofreq → physiofreq.py} +7 -2
  147. rapidtide/scripts/{pixelcomp → pixelcomp.py} +7 -2
  148. rapidtide/scripts/{plethquality → plethquality.py} +7 -2
  149. rapidtide/scripts/{polyfitim → polyfitim.py} +7 -2
  150. rapidtide/scripts/{proj2flow → proj2flow.py} +7 -2
  151. rapidtide/scripts/{rankimage → rankimage.py} +7 -2
  152. rapidtide/scripts/{rapidtide → rapidtide.py} +7 -2
  153. rapidtide/scripts/{rapidtide2std → rapidtide2std.py} +7 -2
  154. rapidtide/scripts/{resamplenifti → resamplenifti.py} +7 -2
  155. rapidtide/scripts/{resampletc → resampletc.py} +7 -2
  156. rapidtide/scripts/retrolagtcs.py +28 -0
  157. rapidtide/scripts/retroregress.py +28 -0
  158. rapidtide/scripts/{roisummarize → roisummarize.py} +7 -2
  159. rapidtide/scripts/{runqualitycheck → runqualitycheck.py} +7 -2
  160. rapidtide/scripts/{showarbcorr → showarbcorr.py} +7 -2
  161. rapidtide/scripts/{showhist → showhist.py} +7 -2
  162. rapidtide/scripts/{showstxcorr → showstxcorr.py} +7 -2
  163. rapidtide/scripts/{showtc → showtc.py} +7 -2
  164. rapidtide/scripts/{showxcorr_legacy → showxcorr_legacy.py} +8 -8
  165. rapidtide/scripts/{showxcorrx → showxcorrx.py} +7 -2
  166. rapidtide/scripts/{showxy → showxy.py} +7 -2
  167. rapidtide/scripts/{simdata → simdata.py} +7 -2
  168. rapidtide/scripts/{spatialdecomp → spatialdecomp.py} +7 -2
  169. rapidtide/scripts/{spatialfit → spatialfit.py} +7 -2
  170. rapidtide/scripts/{spatialmi → spatialmi.py} +7 -2
  171. rapidtide/scripts/{spectrogram → spectrogram.py} +7 -2
  172. rapidtide/scripts/stupidramtricks.py +238 -0
  173. rapidtide/scripts/{synthASL → synthASL.py} +7 -2
  174. rapidtide/scripts/{tcfrom2col → tcfrom2col.py} +7 -2
  175. rapidtide/scripts/{tcfrom3col → tcfrom3col.py} +7 -2
  176. rapidtide/scripts/{temporaldecomp → temporaldecomp.py} +7 -2
  177. rapidtide/scripts/{testhrv → testhrv.py} +1 -1
  178. rapidtide/scripts/{threeD → threeD.py} +7 -2
  179. rapidtide/scripts/{tidepool → tidepool.py} +7 -2
  180. rapidtide/scripts/{variabilityizer → variabilityizer.py} +7 -2
  181. rapidtide/simFuncClasses.py +2113 -0
  182. rapidtide/simfuncfit.py +312 -108
  183. rapidtide/stats.py +579 -247
  184. rapidtide/tests/.coveragerc +27 -6
  185. rapidtide-2.9.5.data/scripts/fdica → rapidtide/tests/cleanposttest +4 -6
  186. rapidtide/tests/happycomp +9 -0
  187. rapidtide/tests/resethappytargets +1 -1
  188. rapidtide/tests/resetrapidtidetargets +1 -1
  189. rapidtide/tests/resettargets +1 -1
  190. rapidtide/tests/runlocaltest +3 -3
  191. rapidtide/tests/showkernels +1 -1
  192. rapidtide/tests/test_aliasedcorrelate.py +4 -4
  193. rapidtide/tests/test_aligntcs.py +1 -1
  194. rapidtide/tests/test_calcicc.py +1 -1
  195. rapidtide/tests/test_cleanregressor.py +184 -0
  196. rapidtide/tests/test_congrid.py +70 -81
  197. rapidtide/tests/test_correlate.py +1 -1
  198. rapidtide/tests/test_corrpass.py +4 -4
  199. rapidtide/tests/test_delayestimation.py +54 -59
  200. rapidtide/tests/test_dlfiltertorch.py +437 -0
  201. rapidtide/tests/test_doresample.py +2 -2
  202. rapidtide/tests/test_externaltools.py +69 -0
  203. rapidtide/tests/test_fastresampler.py +9 -5
  204. rapidtide/tests/test_filter.py +96 -57
  205. rapidtide/tests/test_findmaxlag.py +50 -19
  206. rapidtide/tests/test_fullrunhappy_v1.py +15 -10
  207. rapidtide/tests/test_fullrunhappy_v2.py +19 -13
  208. rapidtide/tests/test_fullrunhappy_v3.py +28 -13
  209. rapidtide/tests/test_fullrunhappy_v4.py +30 -11
  210. rapidtide/tests/test_fullrunhappy_v5.py +62 -0
  211. rapidtide/tests/test_fullrunrapidtide_v1.py +61 -7
  212. rapidtide/tests/test_fullrunrapidtide_v2.py +27 -15
  213. rapidtide/tests/test_fullrunrapidtide_v3.py +28 -8
  214. rapidtide/tests/test_fullrunrapidtide_v4.py +16 -8
  215. rapidtide/tests/test_fullrunrapidtide_v5.py +15 -6
  216. rapidtide/tests/test_fullrunrapidtide_v6.py +142 -0
  217. rapidtide/tests/test_fullrunrapidtide_v7.py +114 -0
  218. rapidtide/tests/test_fullrunrapidtide_v8.py +66 -0
  219. rapidtide/tests/test_getparsers.py +158 -0
  220. rapidtide/tests/test_io.py +59 -18
  221. rapidtide/tests/{test_glmpass.py → test_linfitfiltpass.py} +10 -10
  222. rapidtide/tests/test_mi.py +1 -1
  223. rapidtide/tests/test_miscmath.py +1 -1
  224. rapidtide/tests/test_motionregress.py +5 -5
  225. rapidtide/tests/test_nullcorr.py +6 -9
  226. rapidtide/tests/test_padvec.py +216 -0
  227. rapidtide/tests/test_parserfuncs.py +101 -0
  228. rapidtide/tests/test_phaseanalysis.py +1 -1
  229. rapidtide/tests/test_rapidtideparser.py +59 -53
  230. rapidtide/tests/test_refinedelay.py +296 -0
  231. rapidtide/tests/test_runmisc.py +5 -5
  232. rapidtide/tests/test_sharedmem.py +60 -0
  233. rapidtide/tests/test_simroundtrip.py +132 -0
  234. rapidtide/tests/test_simulate.py +1 -1
  235. rapidtide/tests/test_stcorrelate.py +4 -2
  236. rapidtide/tests/test_timeshift.py +2 -2
  237. rapidtide/tests/test_valtoindex.py +1 -1
  238. rapidtide/tests/test_zRapidtideDataset.py +5 -3
  239. rapidtide/tests/utils.py +10 -9
  240. rapidtide/tidepoolTemplate.py +88 -70
  241. rapidtide/tidepoolTemplate.ui +60 -46
  242. rapidtide/tidepoolTemplate_alt.py +88 -53
  243. rapidtide/tidepoolTemplate_alt.ui +62 -52
  244. rapidtide/tidepoolTemplate_alt_qt6.py +921 -0
  245. rapidtide/tidepoolTemplate_big.py +1125 -0
  246. rapidtide/tidepoolTemplate_big.ui +2386 -0
  247. rapidtide/tidepoolTemplate_big_qt6.py +1129 -0
  248. rapidtide/tidepoolTemplate_qt6.py +793 -0
  249. rapidtide/util.py +1389 -148
  250. rapidtide/voxelData.py +1048 -0
  251. rapidtide/wiener.py +138 -25
  252. rapidtide/wiener2.py +114 -8
  253. rapidtide/workflows/adjustoffset.py +107 -5
  254. rapidtide/workflows/aligntcs.py +86 -3
  255. rapidtide/workflows/applydlfilter.py +231 -89
  256. rapidtide/workflows/applyppgproc.py +540 -0
  257. rapidtide/workflows/atlasaverage.py +309 -48
  258. rapidtide/workflows/atlastool.py +130 -9
  259. rapidtide/workflows/calcSimFuncMap.py +490 -0
  260. rapidtide/workflows/calctexticc.py +202 -10
  261. rapidtide/workflows/ccorrica.py +123 -15
  262. rapidtide/workflows/cleanregressor.py +415 -0
  263. rapidtide/workflows/delayvar.py +1268 -0
  264. rapidtide/workflows/diffrois.py +84 -6
  265. rapidtide/workflows/endtidalproc.py +149 -9
  266. rapidtide/workflows/fdica.py +197 -17
  267. rapidtide/workflows/filtnifti.py +71 -4
  268. rapidtide/workflows/filttc.py +76 -5
  269. rapidtide/workflows/fitSimFuncMap.py +578 -0
  270. rapidtide/workflows/fixtr.py +74 -4
  271. rapidtide/workflows/gmscalc.py +116 -6
  272. rapidtide/workflows/happy.py +1242 -480
  273. rapidtide/workflows/happy2std.py +145 -13
  274. rapidtide/workflows/happy_parser.py +277 -59
  275. rapidtide/workflows/histnifti.py +120 -4
  276. rapidtide/workflows/histtc.py +85 -4
  277. rapidtide/workflows/{glmfilt.py → linfitfilt.py} +128 -14
  278. rapidtide/workflows/localflow.py +329 -29
  279. rapidtide/workflows/mergequality.py +80 -4
  280. rapidtide/workflows/niftidecomp.py +323 -19
  281. rapidtide/workflows/niftistats.py +178 -8
  282. rapidtide/workflows/pairproc.py +99 -5
  283. rapidtide/workflows/pairwisemergenifti.py +86 -3
  284. rapidtide/workflows/parser_funcs.py +1488 -56
  285. rapidtide/workflows/physiofreq.py +139 -12
  286. rapidtide/workflows/pixelcomp.py +211 -9
  287. rapidtide/workflows/plethquality.py +105 -23
  288. rapidtide/workflows/polyfitim.py +159 -19
  289. rapidtide/workflows/proj2flow.py +76 -3
  290. rapidtide/workflows/rankimage.py +115 -8
  291. rapidtide/workflows/rapidtide.py +1833 -1919
  292. rapidtide/workflows/rapidtide2std.py +101 -3
  293. rapidtide/workflows/rapidtide_parser.py +607 -372
  294. rapidtide/workflows/refineDelayMap.py +249 -0
  295. rapidtide/workflows/refineRegressor.py +1215 -0
  296. rapidtide/workflows/regressfrommaps.py +308 -0
  297. rapidtide/workflows/resamplenifti.py +86 -4
  298. rapidtide/workflows/resampletc.py +92 -4
  299. rapidtide/workflows/retrolagtcs.py +442 -0
  300. rapidtide/workflows/retroregress.py +1501 -0
  301. rapidtide/workflows/roisummarize.py +176 -7
  302. rapidtide/workflows/runqualitycheck.py +72 -7
  303. rapidtide/workflows/showarbcorr.py +172 -16
  304. rapidtide/workflows/showhist.py +87 -3
  305. rapidtide/workflows/showstxcorr.py +161 -4
  306. rapidtide/workflows/showtc.py +172 -10
  307. rapidtide/workflows/showxcorrx.py +250 -62
  308. rapidtide/workflows/showxy.py +186 -16
  309. rapidtide/workflows/simdata.py +418 -112
  310. rapidtide/workflows/spatialfit.py +83 -8
  311. rapidtide/workflows/spatialmi.py +252 -29
  312. rapidtide/workflows/spectrogram.py +306 -33
  313. rapidtide/workflows/synthASL.py +157 -6
  314. rapidtide/workflows/tcfrom2col.py +77 -3
  315. rapidtide/workflows/tcfrom3col.py +75 -3
  316. rapidtide/workflows/tidepool.py +3829 -666
  317. rapidtide/workflows/utils.py +45 -19
  318. rapidtide/workflows/utils_doc.py +293 -0
  319. rapidtide/workflows/variabilityizer.py +118 -5
  320. {rapidtide-2.9.5.dist-info → rapidtide-3.1.3.dist-info}/METADATA +30 -223
  321. rapidtide-3.1.3.dist-info/RECORD +393 -0
  322. {rapidtide-2.9.5.dist-info → rapidtide-3.1.3.dist-info}/WHEEL +1 -1
  323. rapidtide-3.1.3.dist-info/entry_points.txt +65 -0
  324. rapidtide-3.1.3.dist-info/top_level.txt +2 -0
  325. rapidtide/calcandfitcorrpairs.py +0 -262
  326. rapidtide/data/examples/src/testoutputsize +0 -45
  327. rapidtide/data/models/model_revised/model.h5 +0 -0
  328. rapidtide/data/models/model_serdar/model.h5 +0 -0
  329. rapidtide/data/models/model_serdar2/model.h5 +0 -0
  330. rapidtide/data/reference/ASPECTS_nlin_asym_09c_2mm.nii.gz +0 -0
  331. rapidtide/data/reference/ASPECTS_nlin_asym_09c_2mm_mask.nii.gz +0 -0
  332. rapidtide/data/reference/ATTbasedFlowTerritories_split_nlin_asym_09c_2mm.nii.gz +0 -0
  333. rapidtide/data/reference/ATTbasedFlowTerritories_split_nlin_asym_09c_2mm_mask.nii.gz +0 -0
  334. rapidtide/data/reference/HCP1200_binmask_2mm_2009c_asym.nii.gz +0 -0
  335. rapidtide/data/reference/HCP1200_lag_2mm_2009c_asym.nii.gz +0 -0
  336. rapidtide/data/reference/HCP1200_mask_2mm_2009c_asym.nii.gz +0 -0
  337. rapidtide/data/reference/HCP1200_negmask_2mm_2009c_asym.nii.gz +0 -0
  338. rapidtide/data/reference/HCP1200_sigma_2mm_2009c_asym.nii.gz +0 -0
  339. rapidtide/data/reference/HCP1200_strength_2mm_2009c_asym.nii.gz +0 -0
  340. rapidtide/glmpass.py +0 -434
  341. rapidtide/refine_factored.py +0 -641
  342. rapidtide/scripts/retroglm +0 -23
  343. rapidtide/workflows/glmfrommaps.py +0 -202
  344. rapidtide/workflows/retroglm.py +0 -643
  345. rapidtide-2.9.5.data/scripts/adjustoffset +0 -23
  346. rapidtide-2.9.5.data/scripts/aligntcs +0 -23
  347. rapidtide-2.9.5.data/scripts/applydlfilter +0 -23
  348. rapidtide-2.9.5.data/scripts/atlasaverage +0 -23
  349. rapidtide-2.9.5.data/scripts/atlastool +0 -23
  350. rapidtide-2.9.5.data/scripts/calcicc +0 -22
  351. rapidtide-2.9.5.data/scripts/calctexticc +0 -23
  352. rapidtide-2.9.5.data/scripts/calcttest +0 -22
  353. rapidtide-2.9.5.data/scripts/ccorrica +0 -23
  354. rapidtide-2.9.5.data/scripts/diffrois +0 -23
  355. rapidtide-2.9.5.data/scripts/endtidalproc +0 -23
  356. rapidtide-2.9.5.data/scripts/filtnifti +0 -23
  357. rapidtide-2.9.5.data/scripts/filttc +0 -23
  358. rapidtide-2.9.5.data/scripts/fingerprint +0 -593
  359. rapidtide-2.9.5.data/scripts/fixtr +0 -23
  360. rapidtide-2.9.5.data/scripts/glmfilt +0 -24
  361. rapidtide-2.9.5.data/scripts/gmscalc +0 -22
  362. rapidtide-2.9.5.data/scripts/happy +0 -25
  363. rapidtide-2.9.5.data/scripts/happy2std +0 -23
  364. rapidtide-2.9.5.data/scripts/happywarp +0 -350
  365. rapidtide-2.9.5.data/scripts/histnifti +0 -23
  366. rapidtide-2.9.5.data/scripts/histtc +0 -23
  367. rapidtide-2.9.5.data/scripts/localflow +0 -23
  368. rapidtide-2.9.5.data/scripts/mergequality +0 -23
  369. rapidtide-2.9.5.data/scripts/pairproc +0 -23
  370. rapidtide-2.9.5.data/scripts/pairwisemergenifti +0 -23
  371. rapidtide-2.9.5.data/scripts/physiofreq +0 -23
  372. rapidtide-2.9.5.data/scripts/pixelcomp +0 -23
  373. rapidtide-2.9.5.data/scripts/plethquality +0 -23
  374. rapidtide-2.9.5.data/scripts/polyfitim +0 -23
  375. rapidtide-2.9.5.data/scripts/proj2flow +0 -23
  376. rapidtide-2.9.5.data/scripts/rankimage +0 -23
  377. rapidtide-2.9.5.data/scripts/rapidtide +0 -23
  378. rapidtide-2.9.5.data/scripts/rapidtide2std +0 -23
  379. rapidtide-2.9.5.data/scripts/resamplenifti +0 -23
  380. rapidtide-2.9.5.data/scripts/resampletc +0 -23
  381. rapidtide-2.9.5.data/scripts/retroglm +0 -23
  382. rapidtide-2.9.5.data/scripts/roisummarize +0 -23
  383. rapidtide-2.9.5.data/scripts/runqualitycheck +0 -23
  384. rapidtide-2.9.5.data/scripts/showarbcorr +0 -23
  385. rapidtide-2.9.5.data/scripts/showhist +0 -23
  386. rapidtide-2.9.5.data/scripts/showstxcorr +0 -23
  387. rapidtide-2.9.5.data/scripts/showtc +0 -23
  388. rapidtide-2.9.5.data/scripts/showxcorr_legacy +0 -536
  389. rapidtide-2.9.5.data/scripts/showxcorrx +0 -23
  390. rapidtide-2.9.5.data/scripts/showxy +0 -23
  391. rapidtide-2.9.5.data/scripts/simdata +0 -23
  392. rapidtide-2.9.5.data/scripts/spatialdecomp +0 -23
  393. rapidtide-2.9.5.data/scripts/spatialfit +0 -23
  394. rapidtide-2.9.5.data/scripts/spatialmi +0 -23
  395. rapidtide-2.9.5.data/scripts/spectrogram +0 -23
  396. rapidtide-2.9.5.data/scripts/synthASL +0 -23
  397. rapidtide-2.9.5.data/scripts/tcfrom2col +0 -23
  398. rapidtide-2.9.5.data/scripts/tcfrom3col +0 -23
  399. rapidtide-2.9.5.data/scripts/temporaldecomp +0 -23
  400. rapidtide-2.9.5.data/scripts/threeD +0 -236
  401. rapidtide-2.9.5.data/scripts/tidepool +0 -23
  402. rapidtide-2.9.5.data/scripts/variabilityizer +0 -23
  403. rapidtide-2.9.5.dist-info/RECORD +0 -357
  404. rapidtide-2.9.5.dist-info/top_level.txt +0 -86
  405. {rapidtide-2.9.5.dist-info → rapidtide-3.1.3.dist-info/licenses}/LICENSE +0 -0
rapidtide/fit.py CHANGED
@@ -1,7 +1,7 @@
1
1
  #!/usr/bin/env python
2
2
  # -*- coding: utf-8 -*-
3
3
  #
4
- # Copyright 2016-2024 Blaise Frederick
4
+ # Copyright 2016-2025 Blaise Frederick
5
5
  #
6
6
  # Licensed under the Apache License, Version 2.0 (the "License");
7
7
  # you may not use this file except in compliance with the License.
@@ -18,181 +18,273 @@
18
18
  #
19
19
  import sys
20
20
  import warnings
21
+ from typing import Any, Callable, Optional, Tuple, Union
21
22
 
22
23
  import matplotlib.pyplot as plt
23
24
  import numpy as np
24
25
  import scipy as sp
25
26
  import scipy.special as sps
26
- import tqdm
27
+ import statsmodels.api as sm
27
28
  from numpy.polynomial import Polynomial
29
+ from numpy.typing import ArrayLike, NDArray
30
+ from scipy import signal
28
31
  from scipy.optimize import curve_fit
29
32
  from scipy.signal import find_peaks, hilbert
30
33
  from scipy.stats import entropy, moment
31
34
  from sklearn.linear_model import LinearRegression
32
35
  from statsmodels.robust import mad
36
+ from statsmodels.tsa.ar_model import AutoReg, ar_select_order
37
+ from tqdm import tqdm
33
38
 
39
+ import rapidtide.miscmath as tide_math
34
40
  import rapidtide.util as tide_util
41
+ from rapidtide.decorators import conditionaljit, conditionaljit2
35
42
 
36
43
  # ---------------------------------------- Global constants -------------------------------------------
37
44
  defaultbutterorder = 6
38
45
  MAXLINES = 10000000
39
- donotbeaggressive = True
40
-
41
- # ----------------------------------------- Conditional imports ---------------------------------------
42
- try:
43
- from memory_profiler import profile
44
-
45
- memprofilerexists = True
46
- except ImportError:
47
- memprofilerexists = False
48
-
49
- try:
50
- from numba import jit
51
- except ImportError:
52
- donotusenumba = True
53
- else:
54
- donotusenumba = False
55
-
56
-
57
- def conditionaljit():
58
- def resdec(f):
59
- if donotusenumba:
60
- return f
61
- return jit(f, nopython=True)
62
-
63
- return resdec
64
-
65
-
66
- def conditionaljit2():
67
- def resdec(f):
68
- if donotusenumba or donotbeaggressive:
69
- return f
70
- return jit(f, nopython=True)
71
-
72
- return resdec
73
-
74
-
75
- def disablenumba():
76
- global donotusenumba
77
- donotusenumba = True
78
46
 
79
47
 
80
48
  # --------------------------- Fitting functions -------------------------------------------------
81
- def gaussresidualssk(p, y, x):
82
- """
83
-
84
- Parameters
85
- ----------
86
- p
87
- y
88
- x
89
-
90
- Returns
91
- -------
92
-
49
+ def gaussskresiduals(p: NDArray, y: NDArray, x: NDArray) -> NDArray:
93
50
  """
94
- err = y - gausssk_eval(x, p)
95
- return err
51
+ Calculate residuals for skewed Gaussian fit.
96
52
 
97
-
98
- def gaussskresiduals(p, y, x):
99
- """
53
+ This function computes the residuals (observed values minus fitted values)
54
+ for a skewed Gaussian model. The residuals are used to assess the quality
55
+ of the fit and are commonly used in optimization routines.
100
56
 
101
57
  Parameters
102
58
  ----------
103
- p
104
- y
105
- x
59
+ p : NDArray
60
+ Skewed Gaussian parameters [amplitude, center, width, skewness]
61
+ y : NDArray
62
+ Observed y values
63
+ x : NDArray
64
+ x values
106
65
 
107
66
  Returns
108
67
  -------
109
-
68
+ residuals : NDArray
69
+ Residuals (y - fitted values) for the skewed Gaussian model
70
+
71
+ Notes
72
+ -----
73
+ The function relies on the `gausssk_eval` function to compute the fitted
74
+ values of the skewed Gaussian model given the parameters and x values.
75
+
76
+ Examples
77
+ --------
78
+ >>> import numpy as np
79
+ >>> x = np.linspace(-5, 5, 100)
80
+ >>> p = np.array([1.0, 0.0, 1.0, 0.5]) # amplitude, center, width, skewness
81
+ >>> y = gausssk_eval(x, p) + np.random.normal(0, 0.1, len(x))
82
+ >>> residuals = gaussskresiduals(p, y, x)
83
+ >>> print(f"Mean residual: {np.mean(residuals):.6f}")
110
84
  """
111
85
  return y - gausssk_eval(x, p)
112
86
 
113
87
 
114
88
  @conditionaljit()
115
- def gaussresiduals(p, y, x):
89
+ def gaussresiduals(p: NDArray, y: NDArray, x: NDArray) -> NDArray:
116
90
  """
91
+ Calculate residuals for Gaussian fit.
92
+
93
+ This function computes the residuals (observed values minus fitted values)
94
+ for a Gaussian function with parameters [amplitude, center, width].
117
95
 
118
96
  Parameters
119
97
  ----------
120
- p
121
- y
122
- x
98
+ p : NDArray
99
+ Gaussian parameters [amplitude, center, width]
100
+ y : NDArray
101
+ Observed y values
102
+ x : NDArray
103
+ x values
123
104
 
124
105
  Returns
125
106
  -------
126
-
107
+ NDArray
108
+ Residuals (y - fitted values) where fitted values are calculated as:
109
+ y_fit = amplitude * exp(-((x - center) ** 2) / (2 * width ** 2))
110
+
111
+ Notes
112
+ -----
113
+ The Gaussian function is defined as:
114
+ f(x) = amplitude * exp(-((x - center) ** 2) / (2 * width ** 2))
115
+
116
+ Examples
117
+ --------
118
+ >>> import numpy as np
119
+ >>> p = np.array([1.0, 0.0, 0.5]) # amplitude=1.0, center=0.0, width=0.5
120
+ >>> y = np.array([0.5, 0.8, 1.0, 0.8, 0.5])
121
+ >>> x = np.linspace(-2, 2, 5)
122
+ >>> residuals = gaussresiduals(p, y, x)
123
+ >>> print(residuals)
127
124
  """
128
125
  return y - p[0] * np.exp(-((x - p[1]) ** 2) / (2.0 * p[2] * p[2]))
129
126
 
130
127
 
131
- def trapezoidresiduals(p, y, x, toplength):
128
+ def trapezoidresiduals(p: NDArray, y: NDArray, x: NDArray, toplength: float) -> NDArray:
132
129
  """
130
+ Calculate residuals for trapezoid fit.
131
+
132
+ This function computes the residuals (observed values minus fitted values) for a trapezoid
133
+ function fit. The trapezoid is defined by amplitude, center, and width parameters, with
134
+ a specified flat top length.
133
135
 
134
136
  Parameters
135
137
  ----------
136
- p
137
- y
138
- x
139
- toplength
138
+ p : NDArray
139
+ Trapezoid parameters [amplitude, center, width]
140
+ y : NDArray
141
+ Observed y values
142
+ x : NDArray
143
+ x values
144
+ toplength : float
145
+ Length of the flat top of the trapezoid
140
146
 
141
147
  Returns
142
148
  -------
143
-
149
+ residuals : NDArray
150
+ Residuals (y - fitted values)
151
+
152
+ Notes
153
+ -----
154
+ The function uses `trapezoid_eval_loop` to evaluate the trapezoid function with the
155
+ given parameters and returns the difference between observed and predicted values.
156
+
157
+ Examples
158
+ --------
159
+ >>> import numpy as np
160
+ >>> x = np.linspace(0, 10, 100)
161
+ >>> y = trapezoid_eval_loop(x, 2.0, [1.0, 5.0, 3.0]) + np.random.normal(0, 0.1, 100)
162
+ >>> p = [1.0, 5.0, 3.0]
163
+ >>> residuals = trapezoidresiduals(p, y, x, 2.0)
144
164
  """
145
165
  return y - trapezoid_eval_loop(x, toplength, p)
146
166
 
147
167
 
148
- def risetimeresiduals(p, y, x):
168
+ def risetimeresiduals(p: NDArray, y: NDArray, x: NDArray) -> NDArray:
149
169
  """
170
+ Calculate residuals for rise time fit.
171
+
172
+ This function computes the residuals between observed data and fitted rise time model
173
+ by subtracting the evaluated model from the observed values.
150
174
 
151
175
  Parameters
152
176
  ----------
153
- p
154
- y
155
- x
177
+ p : NDArray
178
+ Rise time parameters [amplitude, start, rise time] where:
179
+ - amplitude: peak value of the rise time curve
180
+ - start: starting time of the rise
181
+ - rise time: time constant for the rise process
182
+ y : NDArray
183
+ Observed y values (dependent variable)
184
+ x : NDArray
185
+ x values (independent variable, typically time)
156
186
 
157
187
  Returns
158
188
  -------
159
-
189
+ residuals : NDArray
190
+ Residuals (y - fitted values) representing the difference between
191
+ observed data and model predictions
192
+
193
+ Notes
194
+ -----
195
+ This function assumes the existence of a `risetime_eval_loop` function that
196
+ evaluates the rise time model at given x values with parameters p.
197
+
198
+ Examples
199
+ --------
200
+ >>> import numpy as np
201
+ >>> p = np.array([1.0, 0.0, 0.5])
202
+ >>> y = np.array([0.1, 0.3, 0.7, 0.9])
203
+ >>> x = np.array([0.0, 0.2, 0.4, 0.6])
204
+ >>> residuals = risetimeresiduals(p, y, x)
205
+ >>> print(residuals)
160
206
  """
161
207
  return y - risetime_eval_loop(x, p)
162
208
 
163
209
 
164
- def gausssk_eval(x, p):
210
+ def gausssk_eval(x: NDArray, p: NDArray) -> NDArray:
165
211
  """
212
+ Evaluate a skewed Gaussian function.
213
+
214
+ This function computes a skewed Gaussian distribution using the method described
215
+ by Azzalini and Dacunha (1996) for generating skewed normal distributions.
166
216
 
167
217
  Parameters
168
218
  ----------
169
- x
170
- p
219
+ x : NDArray
220
+ x values at which to evaluate the function
221
+ p : NDArray
222
+ Skewed Gaussian parameters [amplitude, center, width, skewness]
223
+ - amplitude: scaling factor for the peak height
224
+ - center: location parameter (mean of the underlying normal distribution)
225
+ - width: scale parameter (standard deviation of the underlying normal distribution)
226
+ - skewness: skewness parameter (controls the asymmetry of the distribution)
171
227
 
172
228
  Returns
173
229
  -------
174
-
230
+ y : NDArray
231
+ Evaluated skewed Gaussian values
232
+
233
+ Notes
234
+ -----
235
+ The skewed Gaussian is defined as:
236
+ f(x) = amplitude * φ((x-center)/width) * Φ(skewness * (x-center)/width)
237
+ where φ is the standard normal PDF and Φ is the standard normal CDF.
238
+
239
+ Examples
240
+ --------
241
+ >>> import numpy as np
242
+ >>> x = np.linspace(-5, 5, 100)
243
+ >>> params = [1.0, 0.0, 1.0, 2.0] # amplitude, center, width, skewness
244
+ >>> y = gausssk_eval(x, params)
175
245
  """
176
246
  t = (x - p[1]) / p[2]
177
247
  return p[0] * sp.stats.norm.pdf(t) * sp.stats.norm.cdf(p[3] * t)
178
248
 
179
249
 
180
250
  # @conditionaljit()
181
- def kaiserbessel_eval(x, p):
251
+ def kaiserbessel_eval(x: NDArray, p: NDArray) -> NDArray:
182
252
  """
183
253
 
254
+ Evaluate the Kaiser-Bessel window function.
255
+
256
+ This function computes the Kaiser-Bessel window function, which is commonly used in
257
+ signal processing and medical imaging applications for gridding and convolution operations.
258
+ The window is defined by parameters alpha (or beta) and tau (or W/2).
259
+
184
260
  Parameters
185
261
  ----------
186
- x: array-like
187
- arguments to the KB function
188
- p: array-like
262
+ x : NDArray
263
+ Arguments to the KB function, typically representing spatial or frequency coordinates
264
+ p : NDArray
189
265
  The Kaiser-Bessel window parameters [alpha, tau] (wikipedia) or [beta, W/2] (Jackson, J. I., Meyer, C. H.,
190
266
  Nishimura, D. G. & Macovski, A. Selection of a convolution function for Fourier inversion using gridding
191
267
  [computerised tomography application]. IEEE Trans. Med. Imaging 10, 473–478 (1991))
192
268
 
193
269
  Returns
194
270
  -------
271
+ NDArray
272
+ The evaluated Kaiser-Bessel window function values corresponding to input x
273
+
274
+ Notes
275
+ -----
276
+ The Kaiser-Bessel window is defined as:
277
+ KB(x) = I0(α√(1-(x/τ)²)) / (τ * I0(α)) for |x| ≤ τ
278
+ KB(x) = 0 for |x| > τ
279
+
280
+ where I0 is the zeroth-order modified Bessel function of the first kind.
195
281
 
282
+ Examples
283
+ --------
284
+ >>> import numpy as np
285
+ >>> x = np.linspace(-1, 1, 100)
286
+ >>> p = np.array([4.0, 0.5]) # alpha=4.0, tau=0.5
287
+ >>> result = kaiserbessel_eval(x, p)
196
288
  """
197
289
  normfac = sps.i0(p[0] * np.sqrt(1.0 - np.square((0.0 / p[1])))) / p[1]
198
290
  sqrtargs = 1.0 - np.square((x / p[1]))
@@ -205,33 +297,83 @@ def kaiserbessel_eval(x, p):
205
297
 
206
298
 
207
299
  @conditionaljit()
208
- def gauss_eval(x, p):
300
+ def gauss_eval(
301
+ x: NDArray[np.floating[Any]], p: NDArray[np.floating[Any]]
302
+ ) -> NDArray[np.floating[Any]]:
209
303
  """
304
+ Evaluate a Gaussian function.
305
+
306
+ This function computes the values of a Gaussian (normal) distribution
307
+ at given x points with specified parameters.
210
308
 
211
309
  Parameters
212
310
  ----------
213
- x
214
- p
311
+ x : NDArray[np.floating[Any]]
312
+ x values at which to evaluate the Gaussian function
313
+ p : NDArray[np.floating[Any]]
314
+ Gaussian parameters [amplitude, center, width] where:
315
+ - amplitude: peak height of the Gaussian
316
+ - center: x-value of the Gaussian center
317
+ - width: standard deviation of the Gaussian
215
318
 
216
319
  Returns
217
320
  -------
218
-
321
+ y : NDArray[np.floating[Any]]
322
+ Evaluated Gaussian values with the same shape as x
323
+
324
+ Notes
325
+ -----
326
+ The Gaussian function is defined as:
327
+ f(x) = amplitude * exp(-((x - center)^2) / (2 * width^2))
328
+
329
+ Examples
330
+ --------
331
+ >>> import numpy as np
332
+ >>> x = np.linspace(-5, 5, 100)
333
+ >>> params = np.array([1.0, 0.0, 1.0]) # amplitude=1, center=0, width=1
334
+ >>> y = gauss_eval(x, params)
335
+ >>> print(y.shape)
336
+ (100,)
219
337
  """
220
338
  return p[0] * np.exp(-((x - p[1]) ** 2) / (2.0 * p[2] * p[2]))
221
339
 
222
340
 
223
- def trapezoid_eval_loop(x, toplength, p):
341
+ def trapezoid_eval_loop(x: NDArray, toplength: float, p: NDArray) -> NDArray:
224
342
  """
343
+ Evaluate a trapezoid function at multiple points using a loop.
344
+
345
+ This function evaluates a trapezoid-shaped function at given x values. The trapezoid
346
+ is defined by its amplitude, center, and total width, with the flat top length
347
+ specified separately.
225
348
 
226
349
  Parameters
227
350
  ----------
228
- x
229
- toplength
230
- p
351
+ x : NDArray
352
+ x values at which to evaluate the function
353
+ toplength : float
354
+ Length of the flat top of the trapezoid
355
+ p : NDArray
356
+ Trapezoid parameters [amplitude, center, width]
231
357
 
232
358
  Returns
233
359
  -------
234
-
360
+ y : NDArray
361
+ Evaluated trapezoid values
362
+
363
+ Notes
364
+ -----
365
+ The trapezoid function is defined as:
366
+ - Zero outside the range [center - width/2, center + width/2]
367
+ - Linearly increasing from 0 to amplitude in the range [center - width/2, center - width/2 + toplength/2]
368
+ - Constant at amplitude in the range [center - width/2 + toplength/2, center + width/2 - toplength/2]
369
+ - Linearly decreasing from amplitude to 0 in the range [center + width/2 - toplength/2, center + width/2]
370
+
371
+ Examples
372
+ --------
373
+ >>> import numpy as np
374
+ >>> x = np.linspace(0, 10, 100)
375
+ >>> p = [1.0, 5.0, 4.0] # amplitude=1.0, center=5.0, width=4.0
376
+ >>> result = trapezoid_eval_loop(x, 2.0, p)
235
377
  """
236
378
  r = np.zeros(len(x), dtype="float64")
237
379
  for i in range(0, len(x)):
@@ -239,17 +381,40 @@ def trapezoid_eval_loop(x, toplength, p):
239
381
  return r
240
382
 
241
383
 
242
- def risetime_eval_loop(x, p):
384
+ def risetime_eval_loop(x: NDArray, p: NDArray) -> NDArray:
243
385
  """
386
+ Evaluate a rise time function.
387
+
388
+ This function evaluates a rise time function for a given set of x values and parameters.
389
+ It iterates through each x value and applies the risetime_eval function to compute
390
+ the corresponding y values.
244
391
 
245
392
  Parameters
246
393
  ----------
247
- x
248
- p
394
+ x : NDArray
395
+ x values at which to evaluate the function
396
+ p : NDArray
397
+ Rise time parameters [amplitude, start, rise time]
249
398
 
250
399
  Returns
251
400
  -------
252
-
401
+ y : NDArray
402
+ Evaluated rise time function values
403
+
404
+ Notes
405
+ -----
406
+ This function uses a loop-based approach for evaluating the rise time function.
407
+ For better performance with large arrays, consider using vectorized operations
408
+ instead of this loop-based implementation.
409
+
410
+ Examples
411
+ --------
412
+ >>> import numpy as np
413
+ >>> x = np.array([0, 1, 2, 3, 4])
414
+ >>> p = np.array([1.0, 0.0, 1.0])
415
+ >>> result = risetime_eval_loop(x, p)
416
+ >>> print(result)
417
+ [0. 0.63212056 0.86466472 0.95021293 0.98168436]
253
418
  """
254
419
  r = np.zeros(len(x), dtype="float64")
255
420
  for i in range(0, len(x)):
@@ -258,18 +423,51 @@ def risetime_eval_loop(x, p):
258
423
 
259
424
 
260
425
  @conditionaljit()
261
- def trapezoid_eval(x, toplength, p):
426
+ def trapezoid_eval(
427
+ x: Union[float, NDArray], toplength: float, p: NDArray
428
+ ) -> Union[float, NDArray]:
262
429
  """
430
+ Evaluate the trapezoidal function at given points.
431
+
432
+ The trapezoidal function is defined as:
433
+
434
+ f(x) = A * (1 - exp(-x / tau)) if 0 <= x < L
435
+
436
+ f(x) = A * exp(-(x - L) / gamma) if x >= L
437
+
438
+ where A, tau, and gamma are parameters, and L is the length of the top plateau.
263
439
 
264
440
  Parameters
265
441
  ----------
266
- x
267
- toplength
268
- p
442
+ x : float or NDArray
443
+ The point or vector at which to evaluate the trapezoidal function.
444
+ toplength : float
445
+ The length of the top plateau of the trapezoid.
446
+ p : NDArray
447
+ A list or tuple of four values [A, tau, gamma, L] where:
448
+ - A is the amplitude,
449
+ - tau is the time constant for the rising edge,
450
+ - gamma is the time constant for the falling edge,
451
+ - L is the length of the top plateau.
269
452
 
270
453
  Returns
271
454
  -------
272
-
455
+ float or NDArray
456
+ The value of the trapezoidal function at x. Returns a scalar if x is scalar,
457
+ or an array if x is an array.
458
+
459
+ Notes
460
+ -----
461
+ This function is vectorized and can handle arrays of input points.
462
+
463
+ Examples
464
+ --------
465
+ >>> import numpy as np
466
+ >>> p = [1.0, 2.0, 3.0, 4.0] # A=1.0, tau=2.0, gamma=3.0, L=4.0
467
+ >>> trapezoid_eval(2.0, 4.0, p)
468
+ 0.3934693402873665
469
+ >>> trapezoid_eval(np.array([1.0, 2.0, 5.0]), 4.0, p)
470
+ array([0.39346934, 0.63212056, 0.22313016])
273
471
  """
274
472
  corrx = x - p[0]
275
473
  if corrx < 0.0:
@@ -281,17 +479,47 @@ def trapezoid_eval(x, toplength, p):
281
479
 
282
480
 
283
481
  @conditionaljit()
284
- def risetime_eval(x, p):
482
+ def risetime_eval(
483
+ x: Union[float, NDArray[np.floating[Any]]], p: NDArray[np.floating[Any]]
484
+ ) -> Union[float, NDArray[np.floating[Any]]]:
285
485
  """
486
+ Evaluates the rise time function at a given point.
487
+
488
+ The rise time function is defined as:
489
+
490
+ f(x) = A * (1 - exp(-x / tau))
491
+
492
+ where A and tau are parameters.
286
493
 
287
494
  Parameters
288
495
  ----------
289
- x
290
- p
496
+ x : float or NDArray
497
+ The point at which to evaluate the rise time function.
498
+ p : NDArray
499
+ An array of three values [x0, A, tau] where:
500
+ - x0: offset parameter
501
+ - A: amplitude parameter
502
+ - tau: time constant parameter
291
503
 
292
504
  Returns
293
505
  -------
294
-
506
+ float or NDArray
507
+ The value of the rise time function at x. Returns 0.0 if x < x0.
508
+
509
+ Notes
510
+ -----
511
+ This function is vectorized and can handle arrays of input points.
512
+ The function implements a shifted exponential rise function commonly used
513
+ in signal processing and physics applications.
514
+
515
+ Examples
516
+ --------
517
+ >>> import numpy as np
518
+ >>> p = [1.0, 2.0, 0.5] # x0=1.0, A=2.0, tau=0.5
519
+ >>> risetime_eval(2.0, p)
520
+ 1.2642411176571153
521
+ >>> risetime_eval(np.array([0.5, 1.5, 2.5]), p)
522
+ array([0. , 0.63212056, 1.26424112])
295
523
  """
296
524
  corrx = x - p[0]
297
525
  if corrx < 0.0:
@@ -301,32 +529,113 @@ def risetime_eval(x, p):
301
529
 
302
530
 
303
531
  def gasboxcar(
304
- data,
305
- samplerate,
306
- firstpeakstart,
307
- firstpeakend,
308
- secondpeakstart,
309
- secondpeakend,
310
- risetime=3.0,
311
- falltime=3.0,
312
- ):
532
+ data: NDArray[np.floating[Any]],
533
+ samplerate: float,
534
+ firstpeakstart: float,
535
+ firstpeakend: float,
536
+ secondpeakstart: float,
537
+ secondpeakend: float,
538
+ risetime: float = 3.0,
539
+ falltime: float = 3.0,
540
+ ) -> None:
541
+ """
542
+ Apply gas boxcar filtering to the input data.
543
+
544
+ This function applies a gas boxcar filtering operation to the provided data array,
545
+ which is commonly used in gas detection and analysis applications to smooth and
546
+ enhance specific signal features.
547
+
548
+ Parameters
549
+ ----------
550
+ data : NDArray
551
+ Input data array to be filtered
552
+ samplerate : float
553
+ Sampling rate of the input data in Hz
554
+ firstpeakstart : float
555
+ Start time of the first peak in seconds
556
+ firstpeakend : float
557
+ End time of the first peak in seconds
558
+ secondpeakstart : float
559
+ Start time of the second peak in seconds
560
+ secondpeakend : float
561
+ End time of the second peak in seconds
562
+ risetime : float, optional
563
+ Rise time parameter for the boxcar filter in seconds, default is 3.0
564
+ falltime : float, optional
565
+ Fall time parameter for the boxcar filter in seconds, default is 3.0
566
+
567
+ Returns
568
+ -------
569
+ None
570
+ This function modifies the input data in-place and returns None
571
+
572
+ Notes
573
+ -----
574
+ The gas boxcar filtering operation is designed to enhance gas detection signals
575
+ by applying specific filtering parameters based on the peak timing information.
576
+ The function assumes that the input data is properly formatted and that the
577
+ time parameters are within the valid range of the data.
578
+
579
+ Examples
580
+ --------
581
+ >>> import numpy as np
582
+ >>> data = np.random.rand(1000)
583
+ >>> gasboxcar(data, samplerate=100.0, firstpeakstart=10.0,
584
+ ... firstpeakend=15.0, secondpeakstart=20.0,
585
+ ... secondpeakend=25.0, risetime=2.0, falltime=2.0)
586
+ """
313
587
  return None
314
588
 
315
589
 
316
590
  # generate the polynomial fit timecourse from the coefficients
317
591
  @conditionaljit()
318
- def trendgen(thexvals, thefitcoffs, demean):
592
+ def trendgen(
593
+ thexvals: NDArray[np.floating[Any]], thefitcoffs: NDArray[np.floating[Any]], demean: bool
594
+ ) -> NDArray[np.floating[Any]]:
319
595
  """
596
+ Generate a polynomial trend based on input x-values and coefficients.
597
+
598
+ This function constructs a polynomial trend using the provided x-values and
599
+ a set of polynomial coefficients. The order of the polynomial is determined
600
+ from the shape of the `thefitcoffs` array. Optionally, a constant term
601
+ (the highest order coefficient) can be included or excluded from the trend.
320
602
 
321
603
  Parameters
322
604
  ----------
323
- thexvals
324
- thefitcoffs
325
- demean
605
+ thexvals : NDArray[np.floating[Any]]
606
+ The x-values (independent variable) at which to evaluate the polynomial trend.
607
+ Expected to be a numpy array or similar.
608
+ thefitcoffs : NDArray[np.floating[Any]]
609
+ A 1D array of polynomial coefficients. The length of this array minus one
610
+ determines the order of the polynomial. Coefficients are expected to be
611
+ ordered from the highest power of x down to the constant term (e.g.,
612
+ [a_n, a_n-1, ..., a_1, a_0] for a polynomial a_n*x^n + ... + a_0).
613
+ demean : bool
614
+ If True, the constant term (thefitcoffs[order]) is added to the generated
615
+ trend. If False, the constant term is excluded, effectively generating
616
+ a trend that is "demeaned" or centered around zero (assuming the constant
617
+ term represents the mean or offset).
326
618
 
327
619
  Returns
328
620
  -------
329
-
621
+ NDArray[np.floating[Any]]
622
+ A numpy array containing the calculated polynomial trend, with the same
623
+ shape as `thexvals`.
624
+
625
+ Notes
626
+ -----
627
+ This function implicitly assumes that `thexvals` is a numpy array or
628
+ behaves similarly for element-wise multiplication (`np.multiply`).
629
+
630
+ Examples
631
+ --------
632
+ >>> import numpy as np
633
+ >>> x = np.linspace(0, 1, 5)
634
+ >>> coeffs = np.array([1, 0, 1]) # x^2 + 1
635
+ >>> trendgen(x, coeffs, demean=True)
636
+ array([1. , 1.0625, 1.25 , 1.5625, 2. ])
637
+ >>> trendgen(x, coeffs, demean=False)
638
+ array([-0. , -0.0625, -0.25 , -0.5625, -1. ])
330
639
  """
331
640
  theshape = thefitcoffs.shape
332
641
  order = theshape[0] - 1
@@ -342,83 +651,269 @@ def trendgen(thexvals, thefitcoffs, demean):
342
651
 
343
652
 
344
653
  # @conditionaljit()
345
- def detrend(inputdata, order=1, demean=False):
654
+ def detrend(
655
+ inputdata: NDArray[np.floating[Any]], order: int = 1, demean: bool = False
656
+ ) -> NDArray[np.floating[Any]]:
346
657
  """
658
+ Estimates and removes a polynomial trend timecourse.
659
+
660
+ This routine calculates a polynomial defined by a set of coefficients
661
+ at specified time points to create a trend timecourse, and subtracts it
662
+ from the input signal. Optionally, it can remove the mean of the input
663
+ data as well.
347
664
 
348
665
  Parameters
349
666
  ----------
350
- inputdata
351
- order
352
- demean
667
+ inputdata : NDArray[np.floating[Any]]
668
+ A 1D NumPy array of input data from which the trend will be removed.
669
+ order : int, optional
670
+ The order of the polynomial to fit to the data. Default is 1 (linear).
671
+ demean : bool, optional
672
+ If True, the mean of the input data is subtracted before fitting the
673
+ polynomial trend. Default is False.
353
674
 
354
675
  Returns
355
676
  -------
356
-
677
+ NDArray[np.floating[Any]]
678
+ A 1D NumPy array of the detrended data, with the polynomial trend removed.
679
+
680
+ Notes
681
+ -----
682
+ - This function uses `numpy.polynomial.Polynomial.fit` to fit a polynomial
683
+ to the input data and then evaluates it using `trendgen`.
684
+ - If a `RankWarning` is raised during fitting (e.g., due to insufficient
685
+ data or poor conditioning), the function defaults to a zero-order
686
+ polynomial (constant trend).
687
+ - The time points are centered around zero, ranging from -N/2 to N/2,
688
+ where N is the length of the input data.
689
+
690
+ Examples
691
+ --------
692
+ >>> import numpy as np
693
+ >>> data = np.array([1, 2, 3, 4, 5])
694
+ >>> detrended = detrend(data, order=1)
695
+ >>> print(detrended)
696
+ [0. 0. 0. 0. 0.]
357
697
  """
358
698
  thetimepoints = np.arange(0.0, len(inputdata), 1.0) - len(inputdata) / 2.0
359
699
  try:
360
700
  thecoffs = Polynomial.fit(thetimepoints, inputdata, order).convert().coef[::-1]
361
- except np.lib.polynomial.RankWarning:
362
- thecoffs = [0.0, 0.0]
701
+ except np.exceptions.RankWarning:
702
+ thecoffs = np.array([0.0, 0.0])
363
703
  thefittc = trendgen(thetimepoints, thecoffs, demean)
364
704
  return inputdata - thefittc
365
705
 
366
706
 
367
- @conditionaljit()
368
- def findfirstabove(theyvals, thevalue):
707
+ def prewhiten(
708
+ series: NDArray[np.floating[Any]], nlags: Optional[int] = None, debug: bool = False
709
+ ) -> NDArray[np.floating[Any]]:
369
710
  """
711
+ Prewhiten a time series using an AR model estimated via statsmodels.
712
+ The resulting series has the same length as the input.
370
713
 
371
714
  Parameters
372
715
  ----------
373
- theyvals
374
- thevalue
716
+ series : NDArray[np.floating[Any]]
717
+ Input 1D time series data.
718
+ nlags : int, optional
719
+ Order of the autoregressive model. If None, automatically chosen via AIC.
720
+ Default is None.
721
+ debug : bool, optional
722
+ If True, additional debug information may be printed. Default is False.
375
723
 
376
724
  Returns
377
725
  -------
726
+ whitened : NDArray[np.floating[Any]]
727
+ Prewhitened series of the same length as input. The prewhitening removes
728
+ the autoregressive structure from the data, leaving only the residuals.
729
+
730
+ Notes
731
+ -----
732
+ This function fits an AR(p) model to the input series using `statsmodels.tsa.ARIMA`
733
+ and applies the inverse AR filter to prewhiten the data. If `nlags` is not provided,
734
+ the function automatically selects the best model order based on the Akaike Information Criterion (AIC).
735
+
736
+ Examples
737
+ --------
738
+ >>> import numpy as np
739
+ >>> from statsmodels.tsa.arima.model import ARIMA
740
+ >>> series = np.random.randn(100)
741
+ >>> whitened = prewhiten(series)
742
+ >>> print(whitened.shape)
743
+ (100,)
744
+ """
745
+ series = np.asarray(series)
746
+
747
+ # Fit AR(p) model using ARIMA
748
+ if nlags is None:
749
+ best_aic, best_model, best_p = np.inf, None, None
750
+ for p in range(1, min(10, len(series) // 5)):
751
+ try:
752
+ model = sm.tsa.ARIMA(series, order=(p, 0, 0)).fit()
753
+ if model.aic < best_aic:
754
+ best_aic, best_model, best_p = model.aic, model, p
755
+ except Exception:
756
+ continue
757
+ model = best_model
758
+ if model is None:
759
+ raise RuntimeError("Failed to fit any AR model.")
760
+ else:
761
+ model = sm.tsa.ARIMA(series, order=(nlags, 0, 0)).fit()
378
762
 
379
- """
380
- for i in range(0, len(theyvals)):
381
- if theyvals[i] >= thevalue:
382
- return i
383
- return len(theyvals)
763
+ # Extract AR coefficients and apply filter
764
+ ar_params = model.arparams
765
+ b = np.array([1.0]) # numerator (no MA component)
766
+ a = np.r_[1.0, -ar_params] # denominator (AR polynomial)
384
767
 
768
+ # Apply the inverse AR filter (prewhitening)
769
+ whitened = signal.lfilter(b, a, series)
385
770
 
386
- def findtrapezoidfunc(
387
- thexvals,
388
- theyvals,
389
- thetoplength,
390
- initguess=None,
391
- debug=False,
392
- minrise=0.0,
393
- maxrise=200.0,
394
- minfall=0.0,
395
- maxfall=200.0,
396
- minstart=-100.0,
397
- maxstart=100.0,
398
- refine=False,
399
- displayplots=False,
400
- ):
771
+ # return whitened, model
772
+ return whitened
773
+
774
+
775
+ def prewhiten2(
776
+ timecourse: NDArray[np.floating[Any]], nlags: int, debug: bool = False, sel: bool = False
777
+ ) -> NDArray[np.floating[Any]]:
401
778
  """
779
+ Prewhiten a time course using autoregressive modeling.
780
+
781
+ This function applies prewhitening to a time course by fitting an autoregressive
782
+ model and then applying the corresponding filter to remove temporal autocorrelation.
402
783
 
403
784
  Parameters
404
785
  ----------
405
- thexvals
406
- theyvals
407
- thetoplength
408
- initguess
409
- debug
410
- minrise
411
- maxrise
412
- minfall
413
- maxfall
414
- minstart
415
- maxstart
416
- refine
417
- displayplots
786
+ timecourse : NDArray[np.floating[Any]]
787
+ Input time course to be prewhitened, shape (n_times,)
788
+ nlags : int
789
+ Number of lags to use for the autoregressive model
790
+ debug : bool, optional
791
+ If True, print model summary and display diagnostic plots, by default False
792
+ sel : bool, optional
793
+ If True, use automatic lag selection, by default False
418
794
 
419
795
  Returns
420
796
  -------
797
+ NDArray[np.floating[Any]]
798
+ Prewhitened time course with standardized normalization applied
799
+
800
+ Notes
801
+ -----
802
+ The prewhitening process involves:
803
+ 1. Fitting an autoregressive model to the input time course
804
+ 2. Computing filter coefficients from the model parameters
805
+ 3. Applying the filter using scipy.signal.lfilter
806
+ 4. Standardizing the result using tide_math.stdnormalize
807
+
808
+ When `sel=True`, the function uses `ar_select_order` for automatic lag selection
809
+ instead of using the fixed number of lags specified by `nlags`.
810
+
811
+ Examples
812
+ --------
813
+ >>> import numpy as np
814
+ >>> timecourse = np.random.randn(100)
815
+ >>> whitened = prewhiten2(timecourse, nlags=3)
816
+ >>> # With debugging enabled
817
+ >>> whitened = prewhiten2(timecourse, nlags=3, debug=True)
818
+ """
819
+ if not sel:
820
+ ar_model = AutoReg(timecourse, lags=nlags)
821
+ ar_fit = ar_model.fit()
822
+ else:
823
+ ar_model = ar_select_order(timecourse, nlags)
824
+ ar_model.ar_lags
825
+ ar_fit = ar_model.model.fit()
826
+ if debug:
827
+ print(ar_fit.summary())
828
+ fig = plt.figure(figsize=(16, 9))
829
+ fig = ar_fit.plot_diagnostics(fig=fig, lags=nlags)
830
+ plt.show()
831
+ ar_params = ar_fit.params
832
+
833
+ # The prewhitening filter coefficients are 1 for the numerator and
834
+ # (1, -ar_params[1]) for the denominator
835
+ b = [1]
836
+ a = np.insert(-ar_params[1:], 0, 1)
837
+
838
+ # Apply the filter to prewhiten the signal
839
+ return tide_math.stdnormalize(signal.lfilter(b, a, timecourse))
840
+
421
841
 
842
+ def findtrapezoidfunc(
843
+ thexvals: NDArray[np.floating[Any]],
844
+ theyvals: NDArray[np.floating[Any]],
845
+ thetoplength: float,
846
+ initguess: NDArray[np.floating[Any]] | None = None,
847
+ debug: bool = False,
848
+ minrise: float = 0.0,
849
+ maxrise: float = 200.0,
850
+ minfall: float = 0.0,
851
+ maxfall: float = 200.0,
852
+ minstart: float = -100.0,
853
+ maxstart: float = 100.0,
854
+ refine: bool = False,
855
+ displayplots: bool = False,
856
+ ) -> Tuple[float, float, float, float, int]:
857
+ """
858
+ Find the best-fitting trapezoidal function parameters to a data set.
859
+
860
+ This function uses least-squares optimization to fit a trapezoidal function
861
+ defined by `trapezoid_eval` to the input data (`theyvals`), using `thexvals`
862
+ as the independent variable. The shape of the trapezoid is fixed by `thetoplength`.
863
+
864
+ Parameters
865
+ ----------
866
+ thexvals : NDArray[np.floating[Any]]
867
+ Independent variable values (time points) for the data.
868
+ theyvals : NDArray[np.floating[Any]]
869
+ Dependent variable values (signal intensity) corresponding to `thexvals`.
870
+ thetoplength : float
871
+ The length of the top plateau of the trapezoid function.
872
+ initguess : NDArray[np.floating[Any]], optional
873
+ Initial guess for [start, amplitude, risetime, falltime].
874
+ If None, uses defaults based on data statistics.
875
+ debug : bool, optional
876
+ If True, print intermediate values during computation (default: False).
877
+ minrise : float, optional
878
+ Minimum allowed rise time parameter (default: 0.0).
879
+ maxrise : float, optional
880
+ Maximum allowed rise time parameter (default: 200.0).
881
+ minfall : float, optional
882
+ Minimum allowed fall time parameter (default: 0.0).
883
+ maxfall : float, optional
884
+ Maximum allowed fall time parameter (default: 200.0).
885
+ minstart : float, optional
886
+ Minimum allowed start time parameter (default: -100.0).
887
+ maxstart : float, optional
888
+ Maximum allowed start time parameter (default: 100.0).
889
+ refine : bool, optional
890
+ If True, perform additional refinement steps (not implemented in this version).
891
+ displayplots : bool, optional
892
+ If True, display plots during computation (not implemented in this version).
893
+
894
+ Returns
895
+ -------
896
+ tuple of floats
897
+ The fitted parameters [start, amplitude, risetime, falltime] if successful,
898
+ or [0.0, 0.0, 0.0, 0.0] if the solution is outside the valid parameter bounds.
899
+ A fifth value (integer) indicating success (1) or failure (0).
900
+
901
+ Notes
902
+ -----
903
+ The optimization is performed using `scipy.optimize.leastsq` with a residual
904
+ function `trapezoidresiduals`. The function returns a tuple of five elements:
905
+ (start, amplitude, risetime, falltime, success_flag), where success_flag is 1
906
+ if all parameters are within the specified bounds, and 0 otherwise.
907
+
908
+ Examples
909
+ --------
910
+ >>> import numpy as np
911
+ >>> x = np.linspace(0, 10, 100)
912
+ >>> y = trapezoid_eval(x, start=2, amplitude=5, risetime=1, falltime=1, top_length=4)
913
+ >>> y += np.random.normal(0, 0.1, len(y)) # Add noise
914
+ >>> params = findtrapezoidfunc(x, y, thetoplength=4)
915
+ >>> print(params)
916
+ (2.05, 4.98, 1.02, 1.01, 1)
422
917
  """
423
918
  # guess at parameters: risestart, riseamplitude, risetime
424
919
  if initguess is None:
@@ -452,35 +947,68 @@ def findtrapezoidfunc(
452
947
 
453
948
 
454
949
  def findrisetimefunc(
455
- thexvals,
456
- theyvals,
457
- initguess=None,
458
- debug=False,
459
- minrise=0.0,
460
- maxrise=200.0,
461
- minstart=-100.0,
462
- maxstart=100.0,
463
- refine=False,
464
- displayplots=False,
465
- ):
466
- """
950
+ thexvals: NDArray[np.floating[Any]],
951
+ theyvals: NDArray[np.floating[Any]],
952
+ initguess: NDArray[np.floating[Any]] | None = None,
953
+ debug: bool = False,
954
+ minrise: float = 0.0,
955
+ maxrise: float = 200.0,
956
+ minstart: float = -100.0,
957
+ maxstart: float = 100.0,
958
+ refine: bool = False,
959
+ displayplots: bool = False,
960
+ ) -> Tuple[float, float, float, int]:
961
+ """
962
+ Find the rise time of a signal by fitting a model to the data.
963
+
964
+ This function fits a rise time model to the provided signal data using least squares
965
+ optimization. It returns the estimated start time, amplitude, and rise time of the signal,
966
+ along with a success flag indicating whether the fit is within specified bounds.
467
967
 
468
968
  Parameters
469
969
  ----------
470
- thexvals
471
- theyvals
472
- initguess
473
- debug
474
- minrise
475
- maxrise
476
- minstart
477
- maxstart
478
- refine
479
- displayplots
970
+ thexvals : NDArray[np.floating[Any]]
971
+ Array of x-axis values (time or independent variable).
972
+ theyvals : NDArray[np.floating[Any]]
973
+ Array of y-axis values (signal or dependent variable).
974
+ initguess : NDArray[np.floating[Any]] | None, optional
975
+ Initial guess for [start_time, amplitude, rise_time]. If None, defaults are used.
976
+ debug : bool, optional
977
+ If True, prints the x and y values during processing (default is False).
978
+ minrise : float, optional
979
+ Minimum allowed rise time (default is 0.0).
980
+ maxrise : float, optional
981
+ Maximum allowed rise time (default is 200.0).
982
+ minstart : float, optional
983
+ Minimum allowed start time (default is -100.0).
984
+ maxstart : float, optional
985
+ Maximum allowed start time (default is 100.0).
986
+ refine : bool, optional
987
+ Placeholder for future refinement logic (default is False).
988
+ displayplots : bool, optional
989
+ Placeholder for future plotting logic (default is False).
480
990
 
481
991
  Returns
482
992
  -------
483
-
993
+ Tuple[float, float, float, int]
994
+ A tuple containing:
995
+ - start_time: Estimated start time of the rise.
996
+ - amplitude: Estimated amplitude of the rise.
997
+ - rise_time: Estimated rise time.
998
+ - success: 1 if the fit is within bounds, 0 otherwise.
999
+
1000
+ Notes
1001
+ -----
1002
+ The function uses `scipy.optimize.leastsq` to perform the fitting. The model being fitted
1003
+ is defined in the `risetimeresiduals` function, which must be defined elsewhere in the code.
1004
+
1005
+ Examples
1006
+ --------
1007
+ >>> import numpy as np
1008
+ >>> x = np.linspace(0, 10, 100)
1009
+ >>> y = np.exp(-x / 2) * np.sin(x)
1010
+ >>> start, amp, rise, success = findrisetimefunc(x, y)
1011
+ >>> print(f"Start: {start}, Amplitude: {amp}, Rise Time: {rise}, Success: {success}")
484
1012
  """
485
1013
  # guess at parameters: risestart, riseamplitude, risetime
486
1014
  if initguess is None:
@@ -508,21 +1036,68 @@ def findrisetimefunc(
508
1036
 
509
1037
 
510
1038
  def territorydecomp(
511
- inputmap, template, atlas, inputmask=None, intercept=True, fitorder=1, debug=False
512
- ):
513
- """
1039
+ inputmap: NDArray[np.floating[Any]],
1040
+ template: NDArray,
1041
+ atlas: NDArray,
1042
+ inputmask: Optional[NDArray] = None,
1043
+ intercept: bool = True,
1044
+ fitorder: int = 1,
1045
+ debug: bool = False,
1046
+ ) -> Tuple[NDArray, NDArray, NDArray]:
1047
+ """
1048
+ Decompose an input map into territories defined by an atlas using polynomial regression.
1049
+
1050
+ This function performs a decomposition of an input map (e.g., a brain image) into
1051
+ distinct regions (territories) as defined by an atlas. For each territory, it fits
1052
+ a polynomial model to the template values and the corresponding data in that region.
1053
+ The resulting coefficients are used to project the model back onto the original map.
514
1054
 
515
1055
  Parameters
516
1056
  ----------
517
- inputmap
518
- atlas
519
- inputmask
520
- fitorder
521
- debug
1057
+ inputmap : NDArray[np.floating[Any]]
1058
+ Input data to be decomposed. Can be 3D or 4D (e.g., time series).
1059
+ template : NDArray
1060
+ Template values corresponding to the spatial locations in `inputmap`.
1061
+ Should have the same shape as `inputmap` (or be broadcastable).
1062
+ atlas : NDArray
1063
+ Atlas defining the territories. Each unique integer value represents a distinct region.
1064
+ Must have the same shape as `inputmap`.
1065
+ inputmask : NDArray, optional
1066
+ Mask to define valid voxels in `inputmap`. If None, all voxels are considered valid.
1067
+ Should have the same shape as `inputmap`.
1068
+ intercept : bool, optional
1069
+ If True, include an intercept term in the polynomial fit (default: True).
1070
+ fitorder : int, optional
1071
+ The order of the polynomial to fit for each territory (default: 1).
1072
+ debug : bool, optional
1073
+ If True, print debugging information during computation (default: False).
522
1074
 
523
1075
  Returns
524
1076
  -------
525
-
1077
+ tuple of NDArray
1078
+ A tuple containing:
1079
+ - fitmap : NDArray
1080
+ The decomposed map with fitted values projected back onto the original spatial locations.
1081
+ - thecoffs : NDArray
1082
+ Array of polynomial coefficients for each territory and map. Shape is (nummaps, numterritories, fitorder+1)
1083
+ if `intercept` is True, or (nummaps, numterritories, fitorder) otherwise.
1084
+ - theR2s : NDArray
1085
+ R-squared values for the fits for each territory and map. Shape is (nummaps, numterritories).
1086
+
1087
+ Notes
1088
+ -----
1089
+ - The function assumes that `inputmap` and `template` are aligned in space.
1090
+ - If `inputmask` is not provided, all voxels are considered valid.
1091
+ - The number of territories is determined by the maximum value in `atlas`.
1092
+ - For each territory, a polynomial regression is performed using the template values as predictors.
1093
+
1094
+ Examples
1095
+ --------
1096
+ >>> import numpy as np
1097
+ >>> inputmap = np.random.rand(10, 10, 10)
1098
+ >>> template = np.random.rand(10, 10, 10)
1099
+ >>> atlas = np.ones((10, 10, 10), dtype=int)
1100
+ >>> fitmap, coeffs, r2s = territorydecomp(inputmap, template, atlas)
526
1101
  """
527
1102
  datadims = len(inputmap.shape)
528
1103
  if datadims > 3:
@@ -532,10 +1107,10 @@ def territorydecomp(
532
1107
 
533
1108
  if nummaps > 1:
534
1109
  if inputmask is None:
535
- inputmask = inputmap[:, :, :, 0] * 0.0 + 1.0
1110
+ inputmask = np.ones_like(inputmap[:, :, :, 0])
536
1111
  else:
537
1112
  if inputmask is None:
538
- inputmask = inputmap * 0.0 + 1.0
1113
+ inputmask = np.ones_like(inputmap)
539
1114
 
540
1115
  tempmask = np.where(inputmask > 0.0, 1, 0)
541
1116
  maskdims = len(tempmask.shape)
@@ -544,7 +1119,7 @@ def territorydecomp(
544
1119
  else:
545
1120
  nummasks = 1
546
1121
 
547
- fitmap = inputmap * 0.0
1122
+ fitmap = np.zeros_like(inputmap)
548
1123
 
549
1124
  if intercept:
550
1125
  thecoffs = np.zeros((nummaps, np.max(atlas), fitorder + 1))
@@ -553,7 +1128,7 @@ def territorydecomp(
553
1128
  if debug:
554
1129
  print(f"thecoffs.shape: {thecoffs.shape}")
555
1130
  print(f"intercept: {intercept}, fitorder: {fitorder}")
556
- theRs = np.zeros((nummaps, np.max(atlas)))
1131
+ theR2s = np.zeros((nummaps, np.max(atlas)))
557
1132
  for whichmap in range(nummaps):
558
1133
  if nummaps == 1:
559
1134
  thismap = inputmap
@@ -576,33 +1151,87 @@ def territorydecomp(
576
1151
  evs = []
577
1152
  for order in range(1, fitorder + 1):
578
1153
  evs.append(np.power(template[maskedvoxels], order))
579
- thefit, R = mlregress(evs, thismap[maskedvoxels], intercept=intercept)
1154
+ thefit, R2 = mlregress(
1155
+ np.asarray(evs), thismap[maskedvoxels], intercept=intercept
1156
+ )
580
1157
  thecoffs[whichmap, i - 1, :] = np.asarray(thefit[0]).reshape((-1))
581
- theRs[whichmap, i - 1] = 1.0 * R
1158
+ theR2s[whichmap, i - 1] = 1.0 * R2
582
1159
  thisfit[maskedvoxels] = mlproject(thecoffs[whichmap, i - 1, :], evs, intercept)
583
1160
  else:
584
1161
  thecoffs[whichmap, i - 1, 0] = np.mean(thismap[maskedvoxels])
585
- theRs[whichmap, i - 1] = 1.0
1162
+ theR2s[whichmap, i - 1] = 1.0
586
1163
  thisfit[maskedvoxels] = np.mean(thismap[maskedvoxels])
587
1164
 
588
- return fitmap, thecoffs, theRs
1165
+ return fitmap, thecoffs, theR2s
589
1166
 
590
1167
 
591
1168
  def territorystats(
592
- inputmap, atlas, inputmask=None, entropybins=101, entropyrange=None, debug=False
593
- ):
1169
+ inputmap: NDArray[np.floating[Any]],
1170
+ atlas: NDArray,
1171
+ inputmask: NDArray | None = None,
1172
+ entropybins: int = 101,
1173
+ entropyrange: Tuple[float, float] | None = None,
1174
+ debug: bool = False,
1175
+ ) -> Tuple[NDArray, NDArray, NDArray, NDArray, NDArray, NDArray, NDArray, NDArray, NDArray]:
594
1176
  """
1177
+ Compute descriptive statistics for regions defined by an atlas within a multi-dimensional input map.
1178
+
1179
+ This function calculates various statistical measures (mean, standard deviation, median, etc.)
1180
+ for each region (territory) defined in the `atlas` array, based on the data in `inputmap`.
1181
+ It supports both single and multi-map inputs, and optionally uses a mask to define valid regions.
595
1182
 
596
1183
  Parameters
597
1184
  ----------
598
- inputmap
599
- atlas
600
- inputmask
601
- debug
1185
+ inputmap : NDArray[np.floating[Any]]
1186
+ Input data array of shape (X, Y, Z) or (X, Y, Z, N), where N is the number of maps.
1187
+ atlas : ndarray
1188
+ Atlas array defining regions of interest, with each region labeled by an integer.
1189
+ Must be the same spatial dimensions as `inputmap`.
1190
+ inputmask : ndarray, optional
1191
+ Boolean or binary mask array of the same shape as `inputmap`. If None, all voxels are considered valid.
1192
+ entropybins : int, default=101
1193
+ Number of bins to use when computing entropy.
1194
+ entropyrange : tuple of float, optional
1195
+ Range (min, max) for histogram binning when computing entropy. If None, uses the full range of data.
1196
+ debug : bool, default=False
1197
+ If True, prints debug information during computation.
602
1198
 
603
1199
  Returns
604
1200
  -------
605
-
1201
+ tuple of ndarray
1202
+ A tuple containing:
1203
+ - statsmap : ndarray
1204
+ Zero-initialized array of the same shape as `inputmap`, used for storing statistics.
1205
+ - themeans : ndarray
1206
+ Array of shape (N, max(atlas)) containing the mean values for each region in each map.
1207
+ - thestds : ndarray
1208
+ Array of shape (N, max(atlas)) containing the standard deviations for each region in each map.
1209
+ - themedians : ndarray
1210
+ Array of shape (N, max(atlas)) containing the median values for each region in each map.
1211
+ - themads : ndarray
1212
+ Array of shape (N, max(atlas)) containing the median absolute deviations for each region in each map.
1213
+ - thevariances : ndarray
1214
+ Array of shape (N, max(atlas)) containing the variance values for each region in each map.
1215
+ - theskewnesses : ndarray
1216
+ Array of shape (N, max(atlas)) containing the skewness values for each region in each map.
1217
+ - thekurtoses : ndarray
1218
+ Array of shape (N, max(atlas)) containing the kurtosis values for each region in each map.
1219
+ - theentropies : ndarray
1220
+ Array of shape (N, max(atlas)) containing the entropy values for each region in each map.
1221
+
1222
+ Notes
1223
+ -----
1224
+ - The function supports both 3D and 4D input arrays. For 4D arrays, each map is processed separately.
1225
+ - Entropy is computed using the probability distribution from a histogram of voxel values.
1226
+ - If `inputmask` is not provided, all voxels are considered valid.
1227
+ - The `atlas` labels are expected to start from 1, and regions are indexed accordingly.
1228
+
1229
+ Examples
1230
+ --------
1231
+ >>> import numpy as np
1232
+ >>> inputmap = np.random.rand(10, 10, 10)
1233
+ >>> atlas = np.ones((10, 10, 10), dtype=int)
1234
+ >>> statsmap, means, stds, medians, mads, variances, skewnesses, kurtoses, entropies = territorystats(inputmap, atlas)
606
1235
  """
607
1236
  datadims = len(inputmap.shape)
608
1237
  if datadims > 3:
@@ -612,10 +1241,10 @@ def territorystats(
612
1241
 
613
1242
  if nummaps > 1:
614
1243
  if inputmask is None:
615
- inputmask = inputmap[:, :, :, 0] * 0.0 + 1.0
1244
+ inputmask = np.ones_like(inputmap[:, :, :, 0])
616
1245
  else:
617
1246
  if inputmask is None:
618
- inputmask = inputmap * 0.0 + 1.0
1247
+ inputmask = np.ones_like(inputmap)
619
1248
 
620
1249
  tempmask = np.where(inputmask > 0.0, 1, 0)
621
1250
  maskdims = len(tempmask.shape)
@@ -624,7 +1253,7 @@ def territorystats(
624
1253
  else:
625
1254
  nummasks = 1
626
1255
 
627
- statsmap = inputmap * 0.0
1256
+ statsmap = np.zeros_like(inputmap)
628
1257
 
629
1258
  themeans = np.zeros((nummaps, np.max(atlas)))
630
1259
  thestds = np.zeros((nummaps, np.max(atlas)))
@@ -639,7 +1268,7 @@ def territorystats(
639
1268
  thevoxels = inputmap[np.where(inputmask > 0.0)]
640
1269
  else:
641
1270
  thevoxels = inputmap
642
- entropyrange = [np.min(thevoxels), np.max(thevoxels)]
1271
+ entropyrange = (np.min(thevoxels), np.max(thevoxels))
643
1272
  if debug:
644
1273
  print(f"entropy bins: {entropybins}")
645
1274
  print(f"entropy range: {entropyrange}")
@@ -666,9 +1295,9 @@ def territorystats(
666
1295
  thestds[whichmap, i - 1] = np.std(thismap[maskedvoxels])
667
1296
  themedians[whichmap, i - 1] = np.median(thismap[maskedvoxels])
668
1297
  themads[whichmap, i - 1] = mad(thismap[maskedvoxels])
669
- thevariances[whichmap, i - 1] = moment(thismap[maskedvoxels], moment=2)
670
- theskewnesses[whichmap, i - 1] = moment(thismap[maskedvoxels], moment=3)
671
- thekurtoses[whichmap, i - 1] = moment(thismap[maskedvoxels], moment=4)
1298
+ thevariances[whichmap, i - 1] = moment(thismap[maskedvoxels], order=2)
1299
+ theskewnesses[whichmap, i - 1] = moment(thismap[maskedvoxels], order=3)
1300
+ thekurtoses[whichmap, i - 1] = moment(thismap[maskedvoxels], order=4)
672
1301
  theentropies[whichmap, i - 1] = entropy(
673
1302
  np.histogram(
674
1303
  thismap[maskedvoxels], bins=entropybins, range=entropyrange, density=True
@@ -689,7 +1318,59 @@ def territorystats(
689
1318
 
690
1319
 
691
1320
  @conditionaljit()
692
- def refinepeak_quad(x, y, peakindex, stride=1):
1321
+ def refinepeak_quad(
1322
+ x: NDArray[np.floating[Any]], y: NDArray[np.floating[Any]], peakindex: int, stride: int = 1
1323
+ ) -> Tuple[float, float, float, Optional[bool], bool]:
1324
+ """
1325
+ Refine the location and properties of a peak using quadratic interpolation.
1326
+
1327
+ This function takes a peak index and a set of data points to perform
1328
+ quadratic interpolation around the peak to estimate its precise location,
1329
+ value, and width. It also determines whether the point is a local maximum or minimum.
1330
+
1331
+ Parameters
1332
+ ----------
1333
+ x : NDArray[np.floating[Any]]
1334
+ Independent variable values (e.g., time points).
1335
+ y : NDArray[np.floating[Any]]
1336
+ Dependent variable values (e.g., signal intensity) corresponding to `x`.
1337
+ peakindex : int
1338
+ Index of the peak in the arrays `x` and `y`.
1339
+ stride : int, optional
1340
+ Number of data points to use on either side of the peak for interpolation.
1341
+ Default is 1.
1342
+
1343
+ Returns
1344
+ -------
1345
+ tuple
1346
+ A tuple containing:
1347
+ - peakloc : float
1348
+ The refined location of the peak.
1349
+ - peakval : float
1350
+ The refined value at the peak.
1351
+ - peakwidth : float
1352
+ The estimated width of the peak.
1353
+ - ismax : bool or None
1354
+ True if the point is a local maximum, False if it's a local minimum,
1355
+ and None if the point cannot be determined (e.g., at boundaries).
1356
+ - badfit : bool
1357
+ True if the fit could not be performed due to invalid conditions,
1358
+ such as being at the boundary or having equal values on both sides.
1359
+
1360
+ Notes
1361
+ -----
1362
+ The function uses a quadratic fit to estimate peak properties. It checks for
1363
+ valid conditions before performing the fit, including ensuring that the peak
1364
+ is not at the edge of the data and that it's either a local maximum or minimum.
1365
+
1366
+ Examples
1367
+ --------
1368
+ >>> import numpy as np
1369
+ >>> x = np.linspace(0, 10, 100)
1370
+ >>> y = np.exp(-0.5 * (x - 5)**2) + 0.1 * np.random.random(100)
1371
+ >>> peakloc, peakval, peakwidth, ismax, badfit = refinepeak_quad(x, y, 50, stride=2)
1372
+ >>> print(f"Peak location: {peakloc:.2f}, Peak value: {peakval:.2f}")
1373
+ """
693
1374
  # first make sure this actually is a peak
694
1375
  ismax = None
695
1376
  badfit = False
@@ -720,55 +1401,247 @@ def refinepeak_quad(x, y, peakindex, stride=1):
720
1401
 
721
1402
  @conditionaljit2()
722
1403
  def findmaxlag_gauss(
723
- thexcorr_x,
724
- thexcorr_y,
725
- lagmin,
726
- lagmax,
727
- widthmax,
728
- edgebufferfrac=0.0,
729
- threshval=0.0,
730
- uthreshval=30.0,
731
- debug=False,
732
- tweaklims=True,
733
- zerooutbadfit=True,
734
- refine=False,
735
- maxguess=0.0,
736
- useguess=False,
737
- searchfrac=0.5,
738
- fastgauss=False,
739
- lagmod=1000.0,
740
- enforcethresh=True,
741
- absmaxsigma=1000.0,
742
- absminsigma=0.1,
743
- displayplots=False,
744
- ):
745
- """
1404
+ thexcorr_x: NDArray[np.floating[Any]],
1405
+ thexcorr_y: NDArray[np.floating[Any]],
1406
+ lagmin: float,
1407
+ lagmax: float,
1408
+ widthmax: float,
1409
+ edgebufferfrac: float = 0.0,
1410
+ threshval: float = 0.0,
1411
+ uthreshval: float = 30.0,
1412
+ debug: bool = False,
1413
+ tweaklims: bool = True,
1414
+ zerooutbadfit: bool = True,
1415
+ refine: bool = False,
1416
+ maxguess: float = 0.0,
1417
+ useguess: bool = False,
1418
+ searchfrac: float = 0.5,
1419
+ fastgauss: bool = False,
1420
+ lagmod: float = 1000.0,
1421
+ enforcethresh: bool = True,
1422
+ absmaxsigma: float = 1000.0,
1423
+ absminsigma: float = 0.1,
1424
+ displayplots: bool = False,
1425
+ ) -> Tuple[int, np.float64, np.float64, np.float64, np.uint16, np.uint16, int, int]:
1426
+ """
1427
+ Find the maximum lag in a cross-correlation function by fitting a Gaussian curve to the peak.
1428
+
1429
+ This function locates the peak in a cross-correlation function and optionally fits a Gaussian
1430
+ curve to determine the precise lag time, amplitude, and width. It includes extensive error
1431
+ checking and validation to ensure robust results.
1432
+
1433
+ Parameters
1434
+ ----------
1435
+ thexcorr_x : NDArray[np.floating[Any]]
1436
+ X-axis values (lag times) of the cross-correlation function.
1437
+ thexcorr_y : NDArray[np.floating[Any]]
1438
+ Y-axis values (correlation coefficients) of the cross-correlation function.
1439
+ lagmin : float
1440
+ Minimum allowable lag value in seconds.
1441
+ lagmax : float
1442
+ Maximum allowable lag value in seconds.
1443
+ widthmax : float
1444
+ Maximum allowable width of the Gaussian peak in seconds.
1445
+ edgebufferfrac : float, optional
1446
+ Fraction of array length to exclude from each edge during search. Default is 0.0.
1447
+ threshval : float, optional
1448
+ Minimum correlation threshold for a valid peak. Default is 0.0.
1449
+ uthreshval : float, optional
1450
+ Upper threshold value (currently unused). Default is 30.0.
1451
+ debug : bool, optional
1452
+ Enable debug output showing initial vs final parameter values. Default is False.
1453
+ tweaklims : bool, optional
1454
+ Automatically adjust search limits to avoid edge artifacts. Default is True.
1455
+ zerooutbadfit : bool, optional
1456
+ Set output to zero when fit fails rather than using initial guess. Default is True.
1457
+ refine : bool, optional
1458
+ Perform least-squares refinement of the Gaussian fit. Default is False.
1459
+ maxguess : float, optional
1460
+ Initial guess for maximum lag position. Used when useguess=True. Default is 0.0.
1461
+ useguess : bool, optional
1462
+ Use the provided maxguess instead of finding peak automatically. Default is False.
1463
+ searchfrac : float, optional
1464
+ Fraction of peak height used to determine initial width estimate. Default is 0.5.
1465
+ fastgauss : bool, optional
1466
+ Use fast non-iterative Gaussian fitting (less accurate). Default is False.
1467
+ lagmod : float, optional
1468
+ Modulus for lag values to handle wraparound. Default is 1000.0.
1469
+ enforcethresh : bool, optional
1470
+ Enforce minimum threshold requirements. Default is True.
1471
+ absmaxsigma : float, optional
1472
+ Absolute maximum allowed sigma (width) value. Default is 1000.0.
1473
+ absminsigma : float, optional
1474
+ Absolute minimum allowed sigma (width) value. Default is 0.1.
1475
+ displayplots : bool, optional
1476
+ Show matplotlib plots of data and fitted curve. Default is False.
1477
+
1478
+ Returns
1479
+ -------
1480
+ maxindex : int
1481
+ Array index of the maximum correlation value.
1482
+ maxlag : numpy.float64
1483
+ Time lag at maximum correlation in seconds.
1484
+ maxval : numpy.float64
1485
+ Maximum correlation coefficient value.
1486
+ maxsigma : numpy.float64
1487
+ Width (sigma) of the fitted Gaussian peak.
1488
+ maskval : numpy.uint16
1489
+ Validity mask (1 = valid fit, 0 = invalid fit).
1490
+ failreason : numpy.uint16
1491
+ Bitwise failure reason code. Possible values:
1492
+ - 0x01: Correlation amplitude below threshold
1493
+ - 0x02: Correlation amplitude above maximum (>1.0)
1494
+ - 0x04: Search window too narrow (<3 points)
1495
+ - 0x08: Fitted width exceeds widthmax
1496
+ - 0x10: Fitted lag outside [lagmin, lagmax] range
1497
+ - 0x20: Peak found at edge of search range
1498
+ - 0x40: Fitting procedure failed
1499
+ - 0x80: Initial parameter estimation failed
1500
+ fitstart : int
1501
+ Starting index used for fitting.
1502
+ fitend : int
1503
+ Ending index used for fitting.
1504
+
1505
+ Notes
1506
+ -----
1507
+ - The function assumes cross-correlation data where Y-values represent correlation
1508
+ coefficients (typically in range [-1, 1]).
1509
+ - When refine=False, uses simple peak-finding based on maximum value.
1510
+ - When refine=True, performs least-squares Gaussian fit for sub-bin precision.
1511
+ - All time-related parameters (lagmin, lagmax, widthmax) should be in the same
1512
+ units as thexcorr_x.
1513
+ - The fastgauss option provides faster but less accurate non-iterative fitting.
1514
+
1515
+ Examples
1516
+ --------
1517
+ Basic usage without refinement:
1518
+
1519
+ >>> maxindex, maxlag, maxval, maxsigma, maskval, failreason, fitstart, fitend = \\
1520
+ ... findmaxlag_gauss(lag_times, correlations, -10.0, 10.0, 5.0)
1521
+ >>> if maskval == 1:
1522
+ ... print(f"Peak found at lag: {maxlag:.3f} s, correlation: {maxval:.3f}")
1523
+
1524
+ Advanced usage with refinement:
1525
+
1526
+ >>> maxindex, maxlag, maxval, maxsigma, maskval, failreason, fitstart, fitend = \\
1527
+ ... findmaxlag_gauss(lag_times, correlations, -5.0, 5.0, 2.0,
1528
+ ... refine=True, threshval=0.1, displayplots=True)
1529
+
1530
+ Using an initial guess:
1531
+
1532
+ >>> maxindex, maxlag, maxval, maxsigma, maskval, failreason, fitstart, fitend = \\
1533
+ ... findmaxlag_gauss(lag_times, correlations, -10.0, 10.0, 3.0,
1534
+ ... useguess=True, maxguess=2.5, refine=True)
1535
+ """
1536
+ """
1537
+ Find the maximum lag in a cross-correlation function by fitting a Gaussian curve to the peak.
1538
+
1539
+ This function locates the peak in a cross-correlation function and optionally fits a Gaussian
1540
+ curve to determine the precise lag time, amplitude, and width. It includes extensive error
1541
+ checking and validation to ensure robust results.
746
1542
 
747
1543
  Parameters
748
1544
  ----------
749
- thexcorr_x
750
- thexcorr_y
751
- lagmin
752
- lagmax
753
- widthmax
754
- edgebufferfrac
755
- threshval
756
- uthreshval
757
- debug
758
- tweaklims
759
- zerooutbadfit
760
- refine
761
- maxguess
762
- useguess
763
- searchfrac
764
- fastgauss
765
- lagmod
766
- enforcethresh
767
- displayplots
1545
+ thexcorr_x : NDArray
1546
+ X-axis values (lag times) of the cross-correlation function.
1547
+ thexcorr_y : NDArray
1548
+ Y-axis values (correlation coefficients) of the cross-correlation function.
1549
+ lagmin : float
1550
+ Minimum allowable lag value in seconds.
1551
+ lagmax : float
1552
+ Maximum allowable lag value in seconds.
1553
+ widthmax : float
1554
+ Maximum allowable width of the Gaussian peak in seconds.
1555
+ edgebufferfrac : float, optional
1556
+ Fraction of array length to exclude from each edge during search. Default is 0.0.
1557
+ threshval : float, optional
1558
+ Minimum correlation threshold for a valid peak. Default is 0.0.
1559
+ uthreshval : float, optional
1560
+ Upper threshold value (currently unused). Default is 30.0.
1561
+ debug : bool, optional
1562
+ Enable debug output showing initial vs final parameter values. Default is False.
1563
+ tweaklims : bool, optional
1564
+ Automatically adjust search limits to avoid edge artifacts. Default is True.
1565
+ zerooutbadfit : bool, optional
1566
+ Set output to zero when fit fails rather than using initial guess. Default is True.
1567
+ refine : bool, optional
1568
+ Perform least-squares refinement of the Gaussian fit. Default is False.
1569
+ maxguess : float, optional
1570
+ Initial guess for maximum lag position. Used when useguess=True. Default is 0.0.
1571
+ useguess : bool, optional
1572
+ Use the provided maxguess instead of finding peak automatically. Default is False.
1573
+ searchfrac : float, optional
1574
+ Fraction of peak height used to determine initial width estimate. Default is 0.5.
1575
+ fastgauss : bool, optional
1576
+ Use fast non-iterative Gaussian fitting (less accurate). Default is False.
1577
+ lagmod : float, optional
1578
+ Modulus for lag values to handle wraparound. Default is 1000.0.
1579
+ enforcethresh : bool, optional
1580
+ Enforce minimum threshold requirements. Default is True.
1581
+ absmaxsigma : float, optional
1582
+ Absolute maximum allowed sigma (width) value. Default is 1000.0.
1583
+ absminsigma : float, optional
1584
+ Absolute minimum allowed sigma (width) value. Default is 0.1.
1585
+ displayplots : bool, optional
1586
+ Show matplotlib plots of data and fitted curve. Default is False.
768
1587
 
769
1588
  Returns
770
1589
  -------
771
-
1590
+ maxindex : int
1591
+ Array index of the maximum correlation value.
1592
+ maxlag : numpy.float64
1593
+ Time lag at maximum correlation in seconds.
1594
+ maxval : numpy.float64
1595
+ Maximum correlation coefficient value.
1596
+ maxsigma : numpy.float64
1597
+ Width (sigma) of the fitted Gaussian peak.
1598
+ maskval : numpy.uint16
1599
+ Validity mask (1 = valid fit, 0 = invalid fit).
1600
+ failreason : numpy.uint16
1601
+ Bitwise failure reason code. Possible values:
1602
+ - 0x01: Correlation amplitude below threshold
1603
+ - 0x02: Correlation amplitude above maximum (>1.0)
1604
+ - 0x04: Search window too narrow (<3 points)
1605
+ - 0x08: Fitted width exceeds widthmax
1606
+ - 0x10: Fitted lag outside [lagmin, lagmax] range
1607
+ - 0x20: Peak found at edge of search range
1608
+ - 0x40: Fitting procedure failed
1609
+ - 0x80: Initial parameter estimation failed
1610
+ fitstart : int
1611
+ Starting index used for fitting.
1612
+ fitend : int
1613
+ Ending index used for fitting.
1614
+
1615
+ Notes
1616
+ -----
1617
+ - The function assumes cross-correlation data where Y-values represent correlation
1618
+ coefficients (typically in range [-1, 1]).
1619
+ - When refine=False, uses simple peak-finding based on maximum value.
1620
+ - When refine=True, performs least-squares Gaussian fit for sub-bin precision.
1621
+ - All time-related parameters (lagmin, lagmax, widthmax) should be in the same
1622
+ units as thexcorr_x.
1623
+ - The fastgauss option provides faster but less accurate non-iterative fitting.
1624
+
1625
+ Examples
1626
+ --------
1627
+ Basic usage without refinement:
1628
+
1629
+ >>> maxindex, maxlag, maxval, maxsigma, maskval, failreason, fitstart, fitend = \\
1630
+ ... findmaxlag_gauss(lag_times, correlations, -10.0, 10.0, 5.0)
1631
+ >>> if maskval == 1:
1632
+ ... print(f"Peak found at lag: {maxlag:.3f} s, correlation: {maxval:.3f}")
1633
+
1634
+ Advanced usage with refinement:
1635
+
1636
+ >>> maxindex, maxlag, maxval, maxsigma, maskval, failreason, fitstart, fitend = \\
1637
+ ... findmaxlag_gauss(lag_times, correlations, -5.0, 5.0, 2.0,
1638
+ ... refine=True, threshval=0.1, displayplots=True)
1639
+
1640
+ Using an initial guess:
1641
+
1642
+ >>> maxindex, maxlag, maxval, maxsigma, maskval, failreason, fitstart, fitend = \\
1643
+ ... findmaxlag_gauss(lag_times, correlations, -10.0, 10.0, 3.0,
1644
+ ... useguess=True, maxguess=2.5, refine=True)
772
1645
  """
773
1646
  # set initial parameters
774
1647
  # widthmax is in seconds
@@ -788,7 +1661,7 @@ def findmaxlag_gauss(
788
1661
  if tweaklims:
789
1662
  lowerlim = 0
790
1663
  upperlim = numlagbins - 1
791
- while (thexcorr_y[lowerlim + 1] < thexcorr_y[lowerlim]) and (lowerlim + 1) < upperlim:
1664
+ while (thexcorr_y[lowerlim + 1] < thexcorr_y[lowerlim]) and (lowerlim + 1) <= upperlim:
792
1665
  lowerlim += 1
793
1666
  while (thexcorr_y[upperlim - 1] < thexcorr_y[upperlim]) and (upperlim - 1) > lowerlim:
794
1667
  upperlim -= 1
@@ -843,7 +1716,9 @@ def findmaxlag_gauss(
843
1716
  if (maxindex - j < lowerlimit) or (j > searchbins):
844
1717
  j -= 1
845
1718
  # This is calculated from first principles, but it's always big by a factor or ~1.4.
846
- # Which makes me think I dropped a factor if sqrt(2). So fix that with a final division
1719
+ # Which makes me think I dropped a factor if sqrt(2). So fix that with a final division.
1720
+ if searchfrac <= 0 or searchfrac >= 1:
1721
+ raise ValueError("searchfrac must be between 0 and 1 (exclusive)")
847
1722
  maxsigma_init = np.float64(
848
1723
  ((i + j + 1) * binwidth / (2.0 * np.sqrt(-np.log(searchfrac)))) / np.sqrt(2.0)
849
1724
  )
@@ -866,15 +1741,15 @@ def findmaxlag_gauss(
866
1741
  )
867
1742
  if maxsigma_init > widthmax:
868
1743
  failreason += FML_BADWIDTH
869
- maxsigma_init = widthmax
1744
+ maxsigma_init = np.float64(widthmax)
870
1745
  if (maxval_init < threshval) and enforcethresh:
871
1746
  failreason += FML_BADAMPLOW
872
1747
  if maxval_init < 0.0:
873
1748
  failreason += FML_BADAMPLOW
874
- maxval_init = 0.0
1749
+ maxval_init = np.float64(0.0)
875
1750
  if maxval_init > 1.0:
876
1751
  failreason |= FML_BADAMPHIGH
877
- maxval_init = 1.0
1752
+ maxval_init = np.float64(1.0)
878
1753
  if failreason > 0:
879
1754
  maskval = np.uint16(0)
880
1755
  if failreason > 0 and zerooutbadfit:
@@ -899,12 +1774,22 @@ def findmaxlag_gauss(
899
1774
  p0 = np.array([maxval_init, maxlag_init, maxsigma_init], dtype="float64")
900
1775
 
901
1776
  if fitend - fitstart >= 3:
902
- plsq, dummy = sp.optimize.leastsq(
903
- gaussresiduals, p0, args=(data, X), maxfev=5000
904
- )
905
- maxval = plsq[0]
906
- maxlag = np.fmod((1.0 * plsq[1]), lagmod)
907
- maxsigma = plsq[2]
1777
+ try:
1778
+ plsq, ier = sp.optimize.leastsq(
1779
+ gaussresiduals, p0, args=(data, X), maxfev=5000
1780
+ )
1781
+ if ier not in [1, 2, 3, 4]: # Check for successful convergence
1782
+ maxval = np.float64(0.0)
1783
+ maxlag = np.float64(0.0)
1784
+ maxsigma = np.float64(0.0)
1785
+ else:
1786
+ maxval = plsq[0]
1787
+ maxlag = np.fmod((1.0 * plsq[1]), lagmod)
1788
+ maxsigma = plsq[2]
1789
+ except:
1790
+ maxval = np.float64(0.0)
1791
+ maxlag = np.float64(0.0)
1792
+ maxsigma = np.float64(0.0)
908
1793
  # if maxval > 1.0, fit failed catastrophically, zero out or reset to initial value
909
1794
  # corrected logic for 1.1.6
910
1795
  if (np.fabs(maxval)) > 1.0 or (lagmin > maxlag) or (maxlag > lagmax):
@@ -912,7 +1797,7 @@ def findmaxlag_gauss(
912
1797
  maxval = np.float64(0.0)
913
1798
  maxlag = np.float64(0.0)
914
1799
  maxsigma = np.float64(0.0)
915
- maskval = np.int16(0)
1800
+ maskval = np.uint16(0)
916
1801
  else:
917
1802
  maxval = np.float64(maxval_init)
918
1803
  maxlag = np.float64(maxlag_init)
@@ -922,12 +1807,12 @@ def findmaxlag_gauss(
922
1807
  maxval = np.float64(0.0)
923
1808
  maxlag = np.float64(0.0)
924
1809
  maxsigma = np.float64(0.0)
925
- maskval = np.int16(0)
1810
+ maskval = np.uint16(0)
926
1811
  else:
927
1812
  if maxsigma > absmaxsigma:
928
- maxsigma = absmaxsigma
1813
+ maxsigma = np.float64(absmaxsigma)
929
1814
  else:
930
- maxsigma = absminsigma
1815
+ maxsigma = np.float64(absminsigma)
931
1816
 
932
1817
  else:
933
1818
  maxval = np.float64(maxval_init)
@@ -973,18 +1858,52 @@ def findmaxlag_gauss(
973
1858
 
974
1859
 
975
1860
  @conditionaljit2()
976
- def maxindex_noedge(thexcorr_x, thexcorr_y, bipolar=False):
1861
+ def maxindex_noedge(
1862
+ thexcorr_x: NDArray, thexcorr_y: NDArray, bipolar: bool = False
1863
+ ) -> Tuple[int, float]:
977
1864
  """
1865
+ Find the index of the maximum value in cross-correlation data, avoiding edge effects.
1866
+
1867
+ This function searches for the maximum value in the cross-correlation data while
1868
+ ensuring that the result is not located at the edges of the data array. It handles
1869
+ both unipolar and bipolar cases, returning the index and a flip factor for bipolar
1870
+ cases where the minimum absolute value might be larger than the maximum.
978
1871
 
979
1872
  Parameters
980
1873
  ----------
981
- thexcorr_x
982
- thexcorr_y
983
- bipolar
1874
+ thexcorr_x : NDArray
1875
+ Array containing the x-coordinates of the cross-correlation data
1876
+ thexcorr_y : NDArray
1877
+ Array containing the y-coordinates (cross-correlation values) of the data
1878
+ bipolar : bool, optional
1879
+ If True, considers both positive and negative values when finding the maximum.
1880
+ If False, only considers positive values. Default is False.
984
1881
 
985
1882
  Returns
986
1883
  -------
987
-
1884
+ Tuple[int, float]
1885
+ A tuple containing:
1886
+ - int: The index of the maximum value in the cross-correlation data
1887
+ - float: Flip factor (-1.0 if bipolar case and minimum absolute value is larger,
1888
+ 1.0 otherwise)
1889
+
1890
+ Notes
1891
+ -----
1892
+ The function iteratively adjusts the search range to avoid edge effects by
1893
+ incrementing lowerlim when maxindex is 0, and decrementing upperlim when
1894
+ maxindex equals upperlim. This ensures the returned index is not at the boundaries
1895
+ of the input arrays.
1896
+
1897
+ Examples
1898
+ --------
1899
+ >>> import numpy as np
1900
+ >>> x = np.array([0, 1, 2, 3, 4])
1901
+ >>> y = np.array([0.1, 0.5, 0.8, 0.3, 0.2])
1902
+ >>> index, flip = maxindex_noedge(x, y)
1903
+ >>> print(index)
1904
+ 2
1905
+ >>> print(flip)
1906
+ 1.0
988
1907
  """
989
1908
  lowerlim = 0
990
1909
  upperlim = len(thexcorr_x) - 1
@@ -1013,24 +1932,53 @@ def maxindex_noedge(thexcorr_x, thexcorr_y, bipolar=False):
1013
1932
  return maxindex, flipfac
1014
1933
 
1015
1934
 
1016
- def gaussfitsk(height, loc, width, skewness, xvals, yvals):
1935
+ def gaussfitsk(
1936
+ height: float, loc: float, width: float, skewness: float, xvals: ArrayLike, yvals: ArrayLike
1937
+ ) -> NDArray:
1017
1938
  """
1939
+ Fit a skewed Gaussian function to data using least squares optimization.
1940
+
1941
+ This function performs least squares fitting of a skewed Gaussian model to the
1942
+ provided data points. The model includes parameters for height, location, width,
1943
+ and skewness of the Gaussian distribution.
1018
1944
 
1019
1945
  Parameters
1020
1946
  ----------
1021
- height
1022
- loc
1023
- width
1024
- skewness
1025
- xvals
1026
- yvals
1947
+ height : float
1948
+ The amplitude or height of the Gaussian peak.
1949
+ loc : float
1950
+ The location (mean) of the Gaussian peak.
1951
+ width : float
1952
+ The width (standard deviation) of the Gaussian peak.
1953
+ skewness : float
1954
+ The skewness parameter that controls the asymmetry of the Gaussian.
1955
+ xvals : array-like
1956
+ The x-coordinates of the data points to be fitted.
1957
+ yvals : array-like
1958
+ The y-coordinates of the data points to be fitted.
1027
1959
 
1028
1960
  Returns
1029
1961
  -------
1030
-
1962
+ ndarray
1963
+ Array containing the optimized parameters [height, loc, width, skewness] that
1964
+ best fit the data according to the least squares method.
1965
+
1966
+ Notes
1967
+ -----
1968
+ This function uses `scipy.optimize.leastsq` internally for the optimization
1969
+ process. The fitting is performed using the `gaussskresiduals` residual function
1970
+ which should be defined elsewhere in the codebase.
1971
+
1972
+ Examples
1973
+ --------
1974
+ >>> import numpy as np
1975
+ >>> x = np.linspace(-5, 5, 100)
1976
+ >>> y = gaussfitsk(1.0, 0.0, 1.0, 0.0, x, y_data)
1977
+ >>> print(y)
1978
+ [height_opt, loc_opt, width_opt, skewness_opt]
1031
1979
  """
1032
1980
  plsq, dummy = sp.optimize.leastsq(
1033
- gaussresidualssk,
1981
+ gaussskresiduals,
1034
1982
  np.array([height, loc, width, skewness]),
1035
1983
  args=(yvals, xvals),
1036
1984
  maxfev=5000,
@@ -1038,40 +1986,233 @@ def gaussfitsk(height, loc, width, skewness, xvals, yvals):
1038
1986
  return plsq
1039
1987
 
1040
1988
 
1041
- def gaussfunc(x, height, loc, FWHM):
1989
+ def gaussfunc(x: NDArray, height: float, loc: float, FWHM: float) -> NDArray:
1990
+ """
1991
+ Calculate a Gaussian function.
1992
+
1993
+ This function computes a Gaussian (normal) distribution with specified height,
1994
+ location, and Full Width at Half Maximum (FWHM).
1995
+
1996
+ Parameters
1997
+ ----------
1998
+ x : NDArray
1999
+ Array of values at which to evaluate the Gaussian function.
2000
+ height : float
2001
+ The maximum height of the Gaussian curve.
2002
+ loc : float
2003
+ The location (mean) of the Gaussian curve.
2004
+ FWHM : float
2005
+ The Full Width at Half Maximum of the Gaussian curve.
2006
+
2007
+ Returns
2008
+ -------
2009
+ NDArray
2010
+ Array of Gaussian function values evaluated at x.
2011
+
2012
+ Notes
2013
+ -----
2014
+ The Gaussian function is defined as:
2015
+ f(x) = height * exp(-((x - loc) ** 2) / (2 * (FWHM / 2.355) ** 2))
2016
+
2017
+ The conversion from FWHM to standard deviation (sigma) uses the relationship:
2018
+ sigma = FWHM / (2 * sqrt(2 * log(2))) ≈ FWHM / 2.355
2019
+
2020
+ Examples
2021
+ --------
2022
+ >>> import numpy as np
2023
+ >>> x = np.linspace(-5, 5, 100)
2024
+ >>> y = gaussfunc(x, height=1.0, loc=0.0, FWHM=2.0)
2025
+ >>> print(y.shape)
2026
+ (100,)
2027
+ """
1042
2028
  return height * np.exp(-((x - loc) ** 2) / (2 * (FWHM / 2.355) ** 2))
1043
2029
 
1044
2030
 
1045
- def gaussfit2(height, loc, width, xvals, yvals):
2031
+ def gaussfit2(
2032
+ height: float, loc: float, width: float, xvals: NDArray, yvals: NDArray
2033
+ ) -> Tuple[float, float, float]:
2034
+ """
2035
+ Calculate a Gaussian function.
2036
+
2037
+ This function computes a Gaussian (normal) distribution with specified height,
2038
+ location, and Full Width at Half Maximum (FWHM).
2039
+
2040
+ Parameters
2041
+ ----------
2042
+ x : array_like
2043
+ Input values for which to compute the Gaussian function
2044
+ height : float
2045
+ Height (amplitude) of the Gaussian peak
2046
+ loc : float
2047
+ Location (mean) of the Gaussian peak
2048
+ FWHM : float
2049
+ Full Width at Half Maximum of the Gaussian peak
2050
+
2051
+ Returns
2052
+ -------
2053
+ ndarray
2054
+ Array of Gaussian function values computed at input x values
2055
+
2056
+ Notes
2057
+ -----
2058
+ The Gaussian function is computed using the formula:
2059
+ f(x) = height * exp(-((x - loc)^2) / (2 * (FWHM / 2.355)^2))
2060
+
2061
+ The conversion from FWHM to sigma (standard deviation) uses the relationship:
2062
+ sigma = FWHM / 2.355
2063
+
2064
+ Examples
2065
+ --------
2066
+ >>> import numpy as np
2067
+ >>> x = np.linspace(-5, 5, 100)
2068
+ >>> y = gaussfunc(x, height=1.0, loc=0.0, FWHM=2.0)
2069
+ >>> print(y.shape)
2070
+ (100,)
2071
+ """
1046
2072
  popt, pcov = curve_fit(gaussfunc, xvals, yvals, p0=[height, loc, width])
1047
2073
  return popt[0], popt[1], popt[2]
1048
2074
 
1049
2075
 
1050
- def sincfunc(x, height, loc, FWHM, baseline):
2076
+ def sincfunc(x: NDArray, height: float, loc: float, FWHM: float, baseline: float) -> NDArray:
2077
+ """
2078
+ Compute a scaled and shifted sinc function.
2079
+
2080
+ This function evaluates a sinc function with specified height, location,
2081
+ full width at half maximum, and baseline offset. The sinc function is
2082
+ scaled by a factor that relates the FWHM to the sinc function's natural
2083
+ scaling.
2084
+
2085
+ Parameters
2086
+ ----------
2087
+ x : NDArray
2088
+ Input array of values where the function is evaluated.
2089
+ height : float
2090
+ Height of the sinc function peak.
2091
+ loc : float
2092
+ Location (center) of the sinc function peak.
2093
+ FWHM : float
2094
+ Full width at half maximum of the sinc function.
2095
+ baseline : float
2096
+ Baseline offset added to the sinc function values.
2097
+
2098
+ Returns
2099
+ -------
2100
+ NDArray
2101
+ Array of sinc function values with the same shape as input `x`.
2102
+
2103
+ Notes
2104
+ -----
2105
+ The sinc function is defined as sin(πx)/(πx) with the convention that
2106
+ sinc(0) = 1. The scaling factor 3.79098852 is chosen to relate the FWHM
2107
+ to the natural sinc function properties.
2108
+
2109
+ Examples
2110
+ --------
2111
+ >>> import numpy as np
2112
+ >>> x = np.linspace(-5, 5, 100)
2113
+ >>> y = sincfunc(x, height=2.0, loc=0.0, FWHM=1.0, baseline=1.0)
2114
+ >>> print(y.shape)
2115
+ (100,)
2116
+ """
1051
2117
  return height * np.sinc((3.79098852 / (FWHM * np.pi)) * (x - loc)) + baseline
1052
2118
 
1053
2119
 
1054
2120
  # found this sinc fitting routine (and optimization) here:
1055
2121
  # https://stackoverflow.com/questions/49676116/why-cant-scipy-optimize-curve-fit-fit-my-data-using-a-numpy-sinc-function
1056
- def sincfit(height, loc, width, baseline, xvals, yvals):
2122
+ def sincfit(
2123
+ height: float, loc: float, width: float, baseline: float, xvals: NDArray, yvals: NDArray
2124
+ ) -> Tuple[NDArray, NDArray]:
2125
+ """
2126
+ Sinc function for fitting and modeling.
2127
+
2128
+ This function implements a scaled and shifted sinc function commonly used in
2129
+ signal processing and data fitting applications.
2130
+
2131
+ Parameters
2132
+ ----------
2133
+ x : ndarray
2134
+ Array of x-values where the function is evaluated.
2135
+ height : float
2136
+ Height of the sinc function peak.
2137
+ loc : float
2138
+ Location (center) of the sinc function peak.
2139
+ FWHM : float
2140
+ Full Width at Half Maximum of the sinc function.
2141
+ baseline : float
2142
+ Baseline offset added to the sinc function.
2143
+
2144
+ Returns
2145
+ -------
2146
+ ndarray
2147
+ Array of sinc function values evaluated at x.
2148
+
2149
+ Notes
2150
+ -----
2151
+ The sinc function is defined as sin(πx)/(πx) with the convention that sinc(0) = 1.
2152
+ This implementation uses a scaled version with the specified FWHM parameter.
2153
+
2154
+ Examples
2155
+ --------
2156
+ >>> import numpy as np
2157
+ >>> x = np.linspace(-5, 5, 100)
2158
+ >>> y = sincfunc(x, height=1.0, loc=0.0, FWHM=2.0, baseline=0.0)
2159
+ >>> print(y.shape)
2160
+ (100,)
2161
+ """
1057
2162
  popt, pcov = curve_fit(sincfunc, xvals, yvals, p0=[height, loc, width, baseline])
1058
2163
  return popt, pcov
1059
2164
 
1060
2165
 
1061
- def gaussfit(height, loc, width, xvals, yvals):
2166
+ def gaussfit(
2167
+ height: float, loc: float, width: float, xvals: NDArray, yvals: NDArray
2168
+ ) -> Tuple[float, float, float]:
1062
2169
  """
2170
+ Performs a non-linear least squares fit of a Gaussian function to data.
2171
+
2172
+ This routine uses `scipy.optimize.leastsq` to find the optimal parameters
2173
+ (height, location, and width) that best describe a Gaussian curve fitted
2174
+ to the provided `yvals` data against `xvals`. It requires an external
2175
+ `gaussresiduals` function to compute the residuals.
1063
2176
 
1064
2177
  Parameters
1065
2178
  ----------
1066
- height
1067
- loc
1068
- width
1069
- xvals
1070
- yvals
2179
+ height : float
2180
+ Initial guess for the amplitude or peak height of the Gaussian.
2181
+ loc : float
2182
+ Initial guess for the mean (center) of the Gaussian.
2183
+ width : float
2184
+ Initial guess for the standard deviation (width) of the Gaussian.
2185
+ xvals : NDArray
2186
+ The independent variable data points.
2187
+ yvals : NDArray
2188
+ The dependent variable data points to which the Gaussian will be fitted.
1071
2189
 
1072
2190
  Returns
1073
2191
  -------
1074
-
2192
+ tuple of float
2193
+ A tuple containing the fitted parameters:
2194
+ - height: The fitted height (amplitude) of the Gaussian.
2195
+ - loc: The fitted location (mean) of the Gaussian.
2196
+ - width: The fitted width (standard deviation) of the Gaussian.
2197
+
2198
+ Notes
2199
+ -----
2200
+ - This function relies on an external function `gaussresiduals(params, y, x)`
2201
+ which should calculate the difference between the observed `y` values and
2202
+ the Gaussian function evaluated at `x` with the given `params` (height, loc, width).
2203
+ - `scipy.optimize.leastsq` is used for the optimization, which requires
2204
+ `scipy` and `numpy` to be imported (e.g., `import scipy.optimize as sp`
2205
+ and `import numpy as np`).
2206
+ - The optimization may fail if initial guesses are too far from the true values
2207
+ or if the data does not well-support a Gaussian fit.
2208
+
2209
+ Examples
2210
+ --------
2211
+ >>> import numpy as np
2212
+ >>> x = np.linspace(-5, 5, 100)
2213
+ >>> y = 2 * np.exp(-0.5 * ((x - 1) / 0.5)**2) + np.random.normal(0, 0.1, 100)
2214
+ >>> height, loc, width = gaussfit(1.0, 0.0, 1.0, x, y)
2215
+ >>> print(f"Fitted height: {height:.2f}, location: {loc:.2f}, width: {width:.2f}")
1075
2216
  """
1076
2217
  plsq, dummy = sp.optimize.leastsq(
1077
2218
  gaussresiduals, np.array([height, loc, width]), args=(yvals, xvals), maxfev=5000
@@ -1079,12 +2220,57 @@ def gaussfit(height, loc, width, xvals, yvals):
1079
2220
  return plsq[0], plsq[1], plsq[2]
1080
2221
 
1081
2222
 
1082
- def gram_schmidt(theregressors, debug=False):
2223
+ def gram_schmidt(theregressors: NDArray, debug: bool = False) -> NDArray:
2224
+ """
2225
+ Performs Gram-Schmidt orthogonalization on a set of vectors.
2226
+
2227
+ This routine takes a set of input vectors (rows of a 2D array) and
2228
+ transforms them into an orthonormal basis using the Gram-Schmidt process.
2229
+ It ensures that the resulting vectors are mutually orthogonal and
2230
+ have a unit norm. Linearly dependent vectors are effectively skipped
2231
+ if their orthogonal component is negligible.
2232
+
2233
+ Parameters
2234
+ ----------
2235
+ theregressors : NDArray
2236
+ A 2D NumPy array where each row represents a vector to be orthogonalized.
2237
+ debug : bool, optional
2238
+ If True, prints debug information about input and output dimensions.
2239
+ Default is False.
2240
+
2241
+ Returns
2242
+ -------
2243
+ NDArray
2244
+ A 2D NumPy array representing the orthonormal basis. Each row is an
2245
+ orthonormal vector. The number of rows may be less than the input if
2246
+ some vectors were linearly dependent.
2247
+
2248
+ Notes
2249
+ -----
2250
+ - The function normalizes each orthogonalized vector to unit length.
2251
+ - A small tolerance (1e-10) is used to check if a vector's orthogonal
2252
+ component is effectively zero, indicating linear dependence.
2253
+ - Requires the `numpy` library for array operations and linear algebra.
2254
+
2255
+ Examples
2256
+ --------
2257
+ >>> import numpy as np
2258
+ >>> vectors = np.array([[2, 1], [3, 4]])
2259
+ >>> basis = gram_schmidt(vectors)
2260
+ >>> print(basis)
2261
+ [[0.89442719 0.4472136 ]
2262
+ [-0.4472136 0.89442719]]
2263
+ """
2264
+
1083
2265
  if debug:
1084
2266
  print("gram_schmidt, input dimensions:", theregressors.shape)
1085
- basis = []
2267
+ basis: list[float] = []
1086
2268
  for i in range(theregressors.shape[0]):
1087
- w = theregressors[i, :] - np.sum(np.dot(theregressors[i, :], b) * b for b in basis)
2269
+ if basis:
2270
+ projections = np.array([np.dot(theregressors[i, :], b) * b for b in basis])
2271
+ w = theregressors[i, :] - np.sum(projections, axis=0)
2272
+ else:
2273
+ w = theregressors[i, :]
1088
2274
  if (np.fabs(w) > 1e-10).any():
1089
2275
  basis.append(w / np.linalg.norm(w))
1090
2276
  outputbasis = np.array(basis)
@@ -1093,8 +2279,54 @@ def gram_schmidt(theregressors, debug=False):
1093
2279
  return outputbasis
1094
2280
 
1095
2281
 
1096
- def mlproject(thefit, theevs, intercept):
1097
- thedest = theevs[0] * 0.0
2282
+ def mlproject(thefit: NDArray, theevs: list, intercept: bool) -> NDArray:
2283
+ """
2284
+ Calculates a linear combination (weighted sum) of explanatory variables.
2285
+
2286
+ This routine computes a predicted output by multiplying a set of
2287
+ explanatory variables by corresponding coefficients and summing the results.
2288
+ It can optionally include an intercept term. This is a common operation
2289
+ in linear regression and other statistical models.
2290
+
2291
+ Parameters
2292
+ ----------
2293
+ thefit : NDArray
2294
+ A 1D array or list of coefficients (weights) to be applied to the
2295
+ explanatory variables. If `intercept` is True, the first element of
2296
+ `thefit` is treated as the intercept.
2297
+ theevs : list of NDArray
2298
+ A list where each element is a 1D NumPy array representing an
2299
+ explanatory variable (feature time series). The length of `theevs`
2300
+ should match the number of non-intercept coefficients in `thefit`.
2301
+ intercept : bool
2302
+ If True, the first element of `thefit` is used as an intercept term,
2303
+ and the remaining elements of `thefit` are applied to `theevs`. If False,
2304
+ no intercept is added, and all elements of `thefit` are applied to
2305
+ `theevs` starting from the first element.
2306
+
2307
+ Returns
2308
+ -------
2309
+ NDArray
2310
+ A 1D NumPy array representing the calculated linear combination.
2311
+ Its length will be the same as the explanatory variables.
2312
+
2313
+ Notes
2314
+ -----
2315
+ The calculation performed is conceptually equivalent to:
2316
+ `output = intercept_term + (coefficient_1 * ev_1) + (coefficient_2 * ev_2) + ...`
2317
+ where `intercept_term` is `thefit[0]` if `intercept` is True, otherwise 0.
2318
+
2319
+ Examples
2320
+ --------
2321
+ >>> import numpy as np
2322
+ >>> thefit = np.array([1.0, 2.0, 3.0])
2323
+ >>> theevs = [np.array([1, 2, 3]), np.array([4, 5, 6])]
2324
+ >>> result = mlproject(thefit, theevs, intercept=True)
2325
+ >>> print(result)
2326
+ [ 9. 14. 19.]
2327
+ """
2328
+
2329
+ thedest = np.zeros_like(theevs[0])
1098
2330
  if intercept:
1099
2331
  thedest[:] = thefit[0]
1100
2332
  startpt = 1
@@ -1105,29 +2337,107 @@ def mlproject(thefit, theevs, intercept):
1105
2337
  return thedest
1106
2338
 
1107
2339
 
1108
- def mlregress(X, y, intercept=True, debug=False):
2340
+ def olsregress(
2341
+ X: ArrayLike, y: ArrayLike, intercept: bool = True, debug: bool = False
2342
+ ) -> Tuple[NDArray, float]:
1109
2343
  """
2344
+ Perform ordinary least squares regression.
1110
2345
 
1111
2346
  Parameters
1112
2347
  ----------
1113
- x
1114
- y
1115
- intercept
2348
+ X : array-like
2349
+ Independent variables (features) matrix of shape (n_samples, n_features).
2350
+ y : array-like
2351
+ Dependent variable (target) vector of shape (n_samples,).
2352
+ intercept : bool, optional
2353
+ Whether to add a constant term (intercept) to the model. Default is True.
2354
+ debug : bool, optional
2355
+ Whether to enable debug mode. Default is False.
1116
2356
 
1117
2357
  Returns
1118
2358
  -------
2359
+ tuple
2360
+ A tuple containing:
2361
+ - params : ndarray
2362
+ Estimated regression coefficients (including intercept if specified)
2363
+ - rsquared : float
2364
+ Square root of the coefficient of determination (R-squared)
2365
+
2366
+ Notes
2367
+ -----
2368
+ This function uses statsmodels OLS regression to fit a linear model.
2369
+ If intercept is True, a constant term is added to the design matrix.
2370
+ The function returns the regression parameters and the square root of R-squared.
2371
+
2372
+ Examples
2373
+ --------
2374
+ >>> import numpy as np
2375
+ >>> X = np.array([[1, 2], [3, 4], [5, 6]])
2376
+ >>> y = np.array([1, 2, 3])
2377
+ >>> params, r_squared = olsregress(X, y)
2378
+ >>> print(params)
2379
+ [0.1 0.4 0.2]
2380
+ """
2381
+ if intercept:
2382
+ X = sm.add_constant(X, prepend=True)
2383
+ model = sm.OLS(y, exog=X)
2384
+ thefit = model.fit()
2385
+ return thefit.params, np.sqrt(thefit.rsquared)
1119
2386
 
2387
+
2388
+ # @conditionaljit()
2389
+ def mlregress(
2390
+ X: NDArray[np.floating[Any]],
2391
+ y: NDArray[np.floating[Any]],
2392
+ intercept: bool = True,
2393
+ debug: bool = False,
2394
+ ) -> Tuple[NDArray[np.floating[Any]], float]:
1120
2395
  """
1121
- """Return the coefficients from a multiple linear regression, along with R, the coefficient of determination.
2396
+ Perform multiple linear regression and return coefficients and R-squared value.
1122
2397
 
1123
- x: The independent variables (pxn or nxp).
1124
- y: The dependent variable (1xn or nx1).
1125
- intercept: Specifies whether or not the slope intercept should be considered.
2398
+ This function fits a multiple linear regression model to the input data and
2399
+ returns the regression coefficients (including intercept if specified) along
2400
+ with the coefficient of determination (R-squared).
1126
2401
 
1127
- The routine computes the coefficients (b_0, b_1, ..., b_p) from the data (x,y) under
1128
- the assumption that y = b0 + b_1 * x_1 + b_2 * x_2 + ... + b_p * x_p.
2402
+ Parameters
2403
+ ----------
2404
+ X : NDArray[np.floating[Any]]
2405
+ Input feature matrix of shape (n_samples, n_features) or (n_samples,)
2406
+ If 1D array is provided, it will be treated as a single feature.
2407
+ y : NDArray[np.floating[Any]]
2408
+ Target values of shape (n_samples,) or (n_samples, 1)
2409
+ If 1D array is provided, it will be treated as a single target.
2410
+ intercept : bool, optional
2411
+ Whether to calculate and include intercept term in the model.
2412
+ Default is True.
2413
+ debug : bool, optional
2414
+ If True, print debug information about the input shapes and processing steps.
2415
+ Default is False.
1129
2416
 
1130
- If intercept is False, the routine assumes that b0 = 0 and returns (b_1, b_2, ..., b_p).
2417
+ Returns
2418
+ -------
2419
+ Tuple[NDArray[np.floating[Any]], float]
2420
+ A tuple containing:
2421
+ - coefficients : NDArray[np.floating[Any]] of shape (n_features + 1, 1) where the first
2422
+ element is the intercept (if intercept=True) and subsequent elements
2423
+ are the regression coefficients for each feature
2424
+ - R2 : float, the coefficient of determination (R-squared) of the fitted model
2425
+
2426
+ Notes
2427
+ -----
2428
+ The function automatically handles shape adjustments for input arrays,
2429
+ ensuring that the number of samples in X matches the number of target values in y.
2430
+ If the input X is 1D, it will be converted to 2D. If the shapes don't match initially,
2431
+ the function will attempt to transpose X to match the number of samples in y.
2432
+
2433
+ Examples
2434
+ --------
2435
+ >>> import numpy as np
2436
+ >>> X = np.array([[1, 2], [3, 4], [5, 6]])
2437
+ >>> y = np.array([3, 7, 11])
2438
+ >>> coeffs, r2 = mlregress(X, y)
2439
+ >>> print(f"Coefficients: {coeffs.flatten()}")
2440
+ >>> print(f"R-squared: {r2}")
1131
2441
  """
1132
2442
  if debug:
1133
2443
  print(f"mlregress initial: {X.shape=}, {y.shape=}")
@@ -1154,94 +2464,83 @@ def mlregress(X, y, intercept=True, debug=False):
1154
2464
  reg.fit(X, y)
1155
2465
  coffs = reg.coef_
1156
2466
  theintercept = reg.intercept_
1157
- R = reg.score(X, y)
2467
+ R2 = reg.score(X, y)
1158
2468
  coffs = np.insert(coffs, 0, theintercept, axis=0)
1159
- return np.asmatrix(coffs), R
1160
-
1161
-
1162
- ### I don't remember where this came from. Need to check license
1163
- def mlregress_old(x, y, intercept=True, debug=False):
1164
- """
1165
-
1166
- Parameters
1167
- ----------
1168
- x
1169
- y
1170
- intercept
1171
-
1172
- Returns
1173
- -------
1174
-
1175
- """
1176
- """Return the coefficients from a multiple linear regression, along with R, the coefficient of determination.
1177
-
1178
- x: The independent variables (pxn or nxp).
1179
- y: The dependent variable (1xn or nx1).
1180
- intercept: Specifies whether or not the slope intercept should be considered.
1181
-
1182
- The routine computes the coefficients (b_0, b_1, ..., b_p) from the data (x,y) under
1183
- the assumption that y = b0 + b_1 * x_1 + b_2 * x_2 + ... + b_p * x_p.
1184
-
1185
- If intercept is False, the routine assumes that b0 = 0 and returns (b_1, b_2, ..., b_p).
1186
- """
1187
- if debug:
1188
- print(f"mlregress initial: {x.shape=}, {y.shape=}")
1189
- warnings.filterwarnings("ignore", "invalid*")
1190
- y = np.atleast_1d(y)
1191
- n = y.shape[0]
1192
-
1193
- x = np.atleast_2d(x)
1194
- p, nx = x.shape
1195
-
1196
- if debug:
1197
- print(f"mlregress: {n=}, {p=}, {nx=}")
1198
-
1199
- if nx != n:
1200
- x = x.transpose()
1201
- p, nx = x.shape
1202
- if nx != n:
1203
- raise AttributeError(
1204
- "x and y must have have the same number of samples (%d and %d)" % (nx, n)
1205
- )
1206
- if debug:
1207
- print(f"mlregress final: {x.shape=}, {y.shape=}")
1208
-
1209
- if intercept is True:
1210
- xc = np.vstack((np.ones(n), x))
1211
- beta = np.ones(p + 1)
1212
- else:
1213
- xc = x
1214
- beta = np.ones(p)
1215
-
1216
- solution = np.linalg.lstsq(np.asmatrix(xc).T, np.asmatrix(y).T, rcond=-1)
1217
-
1218
- # Computation of the coefficient of determination.
1219
- Rx = np.atleast_2d(np.corrcoef(x, rowvar=1))
1220
- c = np.corrcoef(x, y, rowvar=1)[-1, :p]
1221
- try:
1222
- R2 = np.dot(np.dot(c, np.linalg.inv(Rx)), c.T)
1223
- except np.linalg.LinAlgError:
1224
- return None, None
1225
- R = np.sqrt(R2)
1226
-
1227
- return np.atleast_1d(solution[0].T), R
2469
+ return np.asmatrix(coffs), R2
1228
2470
 
1229
2471
 
1230
2472
  def calcexpandedregressors(
1231
- confounddict, labels=None, start=0, end=-1, deriv=True, order=1, debug=False
1232
- ):
1233
- r"""Calculates various motion related timecourses from motion data dict, and returns an array
2473
+ confounddict: dict,
2474
+ labels: Optional[list] = None,
2475
+ start: int = 0,
2476
+ end: int = -1,
2477
+ deriv: bool = True,
2478
+ order: int = 1,
2479
+ debug: bool = False,
2480
+ ) -> Tuple[NDArray, list]:
2481
+ """
2482
+ Calculate expanded regressors from a dictionary of confound vectors.
2483
+
2484
+ This routine generates a comprehensive set of motion-related regressors by
2485
+ including higher-order polynomial terms and derivatives of the original
2486
+ confound timecourses. It is commonly used in neuroimaging analysis to
2487
+ account for subject movement.
1234
2488
 
1235
2489
  Parameters
1236
2490
  ----------
1237
- confounddict: dict
1238
- A dictionary of the confound vectors
2491
+ confounddict : dict
2492
+ A dictionary where keys are labels (e.g., 'rot_x', 'trans_y') and values
2493
+ are the corresponding 1D time series (NumPy arrays or lists).
2494
+ labels : list, optional
2495
+ A list of specific confound labels from `confounddict` to process. If None,
2496
+ all labels in `confounddict` will be used. Default is None.
2497
+ start : int, optional
2498
+ The starting index (inclusive) for slicing the timecourses. Default is 0.
2499
+ end : int, optional
2500
+ The ending index (exclusive) for slicing the timecourses. If -1, slicing
2501
+ continues to the end of the timecourse. Default is -1.
2502
+ deriv : bool, optional
2503
+ If True, the first derivative of each selected timecourse (and its
2504
+ polynomial expansions) is calculated and included as a regressor.
2505
+ Default is True.
2506
+ order : int, optional
2507
+ The polynomial order for expansion. If `order > 1`, terms like `label^2`,
2508
+ `label^3`, up to `label^order` will be included. Default is 1 (no
2509
+ polynomial expansion).
2510
+ debug : bool, optional
2511
+ If True, prints debug information during processing. Default is False.
1239
2512
 
1240
2513
  Returns
1241
2514
  -------
1242
- motionregressors: array
1243
- All the derivative timecourses to use in a numpy array
1244
-
2515
+ tuple of (NDArray, list)
2516
+ A tuple containing:
2517
+ - outputregressors : NDArray
2518
+ A 2D NumPy array where each row represents a generated regressor
2519
+ (original, polynomial, or derivative) and columns represent time points.
2520
+ - outlabels : list of str
2521
+ A list of strings providing the labels for each row in `outputregressors`,
2522
+ indicating what each regressor represents (e.g., 'rot_x', 'rot_x^2',
2523
+ 'rot_x_deriv').
2524
+
2525
+ Notes
2526
+ -----
2527
+ - The derivatives are calculated using `numpy.gradient`.
2528
+ - The function handles slicing of the timecourses based on `start` and `end`
2529
+ parameters.
2530
+ - The output regressors are concatenated horizontally to form the final
2531
+ `outputregressors` array.
2532
+
2533
+ Examples
2534
+ --------
2535
+ >>> confounddict = {
2536
+ ... 'rot_x': [0.1, 0.2, 0.3],
2537
+ ... 'trans_y': [0.05, 0.1, 0.15]
2538
+ ... }
2539
+ >>> regressors, labels = calcexpandedregressors(confounddict, order=2, deriv=True)
2540
+ >>> print(regressors.shape)
2541
+ (4, 3)
2542
+ >>> print(labels)
2543
+ ['rot_x', 'trans_y', 'rot_x^2', 'trans_y^2', 'rot_x_deriv', 'trans_y_deriv']
1245
2544
  """
1246
2545
  if labels is None:
1247
2546
  localconfounddict = confounddict.copy()
@@ -1291,29 +2590,60 @@ def calcexpandedregressors(
1291
2590
  return outputregressors, outlabels
1292
2591
 
1293
2592
 
1294
- def derivativeglmfilt(thedata, theevs, nderivs=1, debug=False):
1295
- r"""First perform multicomponent expansion on theevs (each ev replaced by itself,
1296
- its square, its cube, etc.). Then perform a glm fit of thedata using the vectors
1297
- in thenewevs and return the result.
2593
+ @conditionaljit()
2594
+ def derivativelinfitfilt(
2595
+ thedata: NDArray, theevs: NDArray, nderivs: int = 1, debug: bool = False
2596
+ ) -> Tuple[NDArray, NDArray, NDArray, float, NDArray]:
2597
+ """
2598
+ Perform multicomponent expansion on explanatory variables and fit the data using linear regression.
2599
+
2600
+ First, each explanatory variable is expanded into multiple components by taking
2601
+ successive derivatives (or powers, in the case of scalar inputs). Then, a linear
2602
+ fit is performed on the input data using the expanded set of explanatory variables.
1298
2603
 
1299
2604
  Parameters
1300
2605
  ----------
1301
- thedata : 1D numpy array
1302
- Input data of length N to be filtered
1303
- :param thedata:
2606
+ thedata : NDArray
2607
+ Input data of length N to be filtered.
2608
+ theevs : NDArray
2609
+ NxP array of explanatory variables to be fit. If 1D, it is treated as a single
2610
+ explanatory variable.
2611
+ nderivs : int, optional
2612
+ Number of derivative components to compute for each explanatory variable.
2613
+ Default is 1. For each input variable, this creates a sequence of
2614
+ derivatives: original, first derivative, second derivative, etc.
2615
+ debug : bool, optional
2616
+ Flag to toggle debugging output. Default is False.
1304
2617
 
1305
- theevs : 2D numpy array
1306
- NxP array of explanatory variables to be fit
1307
- :param theevs:
1308
-
1309
- nderivs : integer
1310
- Number of components to use for each ev. Each successive component is a
1311
- higher power of the initial ev (initial, square, cube, etc.)
1312
- :param nderivs:
1313
-
1314
- debug: bool
1315
- Flag to toggle debugging output
1316
- :param debug:
2618
+ Returns
2619
+ -------
2620
+ tuple
2621
+ A tuple containing:
2622
+ - filtered : ndarray
2623
+ The filtered version of `thedata` after fitting.
2624
+ - thenewevs : ndarray
2625
+ The expanded set of explanatory variables (original + derivatives).
2626
+ - datatoremove : ndarray
2627
+ The part of the data that was removed during fitting.
2628
+ - R : float
2629
+ The coefficient of determination (R²) of the fit.
2630
+ - coffs : ndarray
2631
+ The coefficients of the linear fit.
2632
+
2633
+ Notes
2634
+ -----
2635
+ This function is useful for filtering data when the underlying signal is expected
2636
+ to have smooth variations, and derivative information can improve the fit.
2637
+ The expansion of each variable into its derivatives allows for better modeling
2638
+ of local trends in the data.
2639
+
2640
+ Examples
2641
+ --------
2642
+ >>> import numpy as np
2643
+ >>> from typing import Tuple
2644
+ >>> thedata = np.array([1, 2, 3, 4, 5])
2645
+ >>> theevs = np.array([[1, 2], [2, 3], [3, 4], [4, 5], [5, 6]])
2646
+ >>> filtered, expanded_ev, removed, R, coeffs = derivativelinfitfilt(thedata, theevs, nderivs=2)
1317
2647
  """
1318
2648
  if debug:
1319
2649
  print(f"{thedata.shape=}")
@@ -1337,36 +2667,66 @@ def derivativeglmfilt(thedata, theevs, nderivs=1, debug=False):
1337
2667
  if debug:
1338
2668
  print(f"{nderivs=}")
1339
2669
  print(f"{thenewevs.shape=}")
1340
- filtered, datatoremove, R, coffs = glmfilt(thedata, thenewevs, debug=debug)
2670
+ filtered, datatoremove, R, coffs, dummy = linfitfilt(thedata, thenewevs, debug=debug)
1341
2671
  if debug:
1342
2672
  print(f"{R=}")
1343
2673
 
1344
2674
  return filtered, thenewevs, datatoremove, R, coffs
1345
2675
 
1346
2676
 
1347
- def expandedglmfilt(thedata, theevs, ncomps=1, debug=False):
1348
- r"""First perform multicomponent expansion on theevs (each ev replaced by itself,
1349
- its square, its cube, etc.). Then perform a glm fit of thedata using the vectors
1350
- in thenewevs and return the result.
2677
+ @conditionaljit()
2678
+ def expandedlinfitfilt(
2679
+ thedata: NDArray, theevs: NDArray, ncomps: int = 1, debug: bool = False
2680
+ ) -> Tuple[NDArray, NDArray, NDArray, float, NDArray]:
2681
+ """
2682
+ Perform multicomponent expansion on explanatory variables and fit a linear model.
2683
+
2684
+ First, perform multicomponent expansion on the explanatory variables (`theevs`),
2685
+ where each variable is replaced by itself, its square, its cube, etc., up to `ncomps`
2686
+ components. Then, perform a multiple regression fit of `thedata` using the expanded
2687
+ explanatory variables and return the filtered data, the fitted model components,
2688
+ the residual sum of squares, and the coefficients.
1351
2689
 
1352
2690
  Parameters
1353
2691
  ----------
1354
- thedata : 1D numpy array
1355
- Input data of length N to be filtered
1356
- :param thedata:
1357
-
1358
- theevs : 2D numpy array
1359
- NxP array of explanatory variables to be fit
1360
- :param theevs:
1361
-
1362
- ncomps : integer
1363
- Number of components to use for each ev. Each successive component is a
1364
- higher power of the initial ev (initial, square, cube, etc.)
1365
- :param ncomps:
2692
+ thedata : NDArray
2693
+ Input data of length N to be filtered.
2694
+ theevs : array_like
2695
+ NxP array of explanatory variables to be fit.
2696
+ ncomps : int, optional
2697
+ Number of components to use for each ev. Each successive component is a
2698
+ higher power of the initial ev (initial, square, cube, etc.). Default is 1.
2699
+ debug : bool, optional
2700
+ Flag to toggle debugging output. Default is False.
1366
2701
 
1367
- debug: bool
1368
- Flag to toggle debugging output
1369
- :param debug:
2702
+ Returns
2703
+ -------
2704
+ filtered : ndarray
2705
+ The filtered version of `thedata` after fitting and removing the linear model.
2706
+ thenewevs : ndarray
2707
+ The expanded explanatory variables used in the fit.
2708
+ datatoremove : ndarray
2709
+ The portion of `thedata` that was removed during the fitting process.
2710
+ R : float
2711
+ Residual sum of squares from the linear fit.
2712
+ coffs : ndarray
2713
+ The coefficients of the linear fit.
2714
+
2715
+ Notes
2716
+ -----
2717
+ If `ncomps` is 1, no expansion is performed and `theevs` is used directly.
2718
+ For each column in `theevs`, the expanded columns are created by taking powers
2719
+ of the original column (1st, 2nd, ..., ncomps-th power).
2720
+
2721
+ Examples
2722
+ --------
2723
+ >>> import numpy as np
2724
+ >>> from typing import Tuple
2725
+ >>> thedata = np.array([1, 2, 3, 4, 5])
2726
+ >>> theevs = np.array([[1, 2], [2, 3], [3, 4], [4, 5], [5, 6]])
2727
+ >>> filtered, expanded_ev, removed, R, coeffs = expandedlinfitfilt(thedata, theevs, ncomps=2)
2728
+ >>> print(filtered)
2729
+ [0. 0. 0. 0. 0.]
1370
2730
  """
1371
2731
  if debug:
1372
2732
  print(f"{thedata.shape=}")
@@ -1390,43 +2750,77 @@ def expandedglmfilt(thedata, theevs, ncomps=1, debug=False):
1390
2750
  if debug:
1391
2751
  print(f"{ncomps=}")
1392
2752
  print(f"{thenewevs.shape=}")
1393
- filtered, datatoremove, R, coffs = glmfilt(thedata, thenewevs, debug=debug)
2753
+ filtered, datatoremove, R, coffs, dummy = linfitfilt(thedata, thenewevs, debug=debug)
1394
2754
  if debug:
1395
2755
  print(f"{R=}")
1396
2756
 
1397
2757
  return filtered, thenewevs, datatoremove, R, coffs
1398
2758
 
1399
2759
 
1400
- def glmfilt(thedata, theevs, returnintercept=False, debug=False):
1401
- r"""Performs a glm fit of thedata using the vectors in theevs
2760
+ @conditionaljit()
2761
+ def linfitfilt(
2762
+ thedata: NDArray, theevs: NDArray, debug: bool = False
2763
+ ) -> Tuple[NDArray, NDArray, float, NDArray, float]:
2764
+ """
2765
+ Performs a multiple regression fit of thedata using the vectors in theevs
1402
2766
  and returns the result.
1403
2767
 
2768
+ This function fits a linear model to the input data using the explanatory
2769
+ variables provided in `theevs`, then removes the fitted component from the
2770
+ original data to produce a filtered version.
2771
+
1404
2772
  Parameters
1405
2773
  ----------
1406
- thedata : 1D numpy array
1407
- Input data of length N to be filtered
1408
- :param thedata:
2774
+ thedata : NDArray
2775
+ Input data of length N to be filtered.
2776
+ theevs : NDArray
2777
+ NxP array of explanatory variables to be fit. If 1D, treated as a single
2778
+ explanatory variable.
2779
+ returnintercept : bool, optional
2780
+ If True, also return the intercept term from the regression. Default is False.
2781
+ debug : bool, optional
2782
+ If True, print debugging information during execution. Default is False.
1409
2783
 
1410
- theevs : 2D numpy array
1411
- NxP array of explanatory variables to be fit
1412
- :param theevs:
1413
-
1414
- debug: bool
1415
- Flag to toggle debugging output
1416
- :param debug:
2784
+ Returns
2785
+ -------
2786
+ filtered : ndarray
2787
+ The filtered data, i.e., the original data with the fitted component removed.
2788
+ datatoremove : ndarray
2789
+ The component of thedata that was removed during filtering.
2790
+ R2 : float
2791
+ The coefficient of determination (R-squared) of the regression.
2792
+ retcoffs : ndarray
2793
+ The regression coefficients (excluding intercept) for each explanatory variable.
2794
+ theintercept : float, optional
2795
+ The intercept term from the regression. Only returned if `returnintercept=True`.
2796
+
2797
+ Notes
2798
+ -----
2799
+ This function uses `mlregress` internally to perform the linear regression.
2800
+ The intercept is always included in the model, but only returned if explicitly
2801
+ requested via `returnintercept`.
2802
+
2803
+ Examples
2804
+ --------
2805
+ >>> import numpy as np
2806
+ >>> thedata = np.array([1, 2, 3, 4, 5])
2807
+ >>> theevs = np.array([[1, 1], [1, 2], [1, 3], [1, 4], [1, 5]])
2808
+ >>> filtered, datatoremove, R2, retcoffs, dummy = linfitfilt(thedata, theevs)
2809
+ >>> print(filtered)
2810
+ [0. 0. 0. 0. 0.]
1417
2811
  """
1418
2812
 
1419
2813
  if debug:
1420
2814
  print(f"{thedata.shape=}")
1421
2815
  print(f"{theevs.shape=}")
1422
- thefit, R = mlregress(theevs, thedata, debug=debug)
2816
+ thefit, R2 = mlregress(theevs, thedata, debug=debug)
1423
2817
  retcoffs = np.zeros((thefit.shape[1] - 1), dtype=float)
1424
2818
  if debug:
1425
2819
  print(f"{thefit.shape=}")
1426
2820
  print(f"{thefit=}")
1427
- print(f"{R=}")
2821
+ print(f"{R2=}")
1428
2822
  print(f"{retcoffs.shape=}")
1429
- datatoremove = thedata * 0.0
2823
+ datatoremove = np.zeros_like(thedata)
1430
2824
 
1431
2825
  if theevs.ndim > 1:
1432
2826
  for ev in range(1, thefit.shape[1]):
@@ -1445,42 +2839,63 @@ def glmfilt(thedata, theevs, returnintercept=False, debug=False):
1445
2839
  filtered = thedata - datatoremove
1446
2840
  if debug:
1447
2841
  print(f"{retcoffs=}")
1448
- if returnintercept:
1449
- return filtered, datatoremove, R, retcoffs, theintercept
1450
- else:
1451
- return filtered, datatoremove, R, retcoffs
2842
+ return filtered, datatoremove, R2, retcoffs, theintercept
1452
2843
 
1453
2844
 
1454
- def confoundglm(
1455
- data,
1456
- regressors,
1457
- debug=False,
1458
- showprogressbar=True,
1459
- rt_floatset=np.float64,
1460
- rt_floattype="float64",
1461
- ):
1462
- r"""Filters multiple regressors out of an array of data
2845
+ @conditionaljit()
2846
+ def confoundregress(
2847
+ data: NDArray,
2848
+ regressors: NDArray,
2849
+ debug: bool = False,
2850
+ showprogressbar: bool = True,
2851
+ rt_floattype: np.dtype = np.float64,
2852
+ ) -> Tuple[NDArray, NDArray]:
2853
+ """
2854
+ Filters multiple regressors out of an array of data using linear regression.
2855
+
2856
+ This function removes the effect of nuisance regressors from each voxel's timecourse
2857
+ by fitting a linear model and subtracting the predicted signal.
1463
2858
 
1464
2859
  Parameters
1465
2860
  ----------
1466
2861
  data : 2d numpy array
1467
- A data array. First index is the spatial dimension, second is the time (filtering) dimension.
1468
-
1469
- regressors: 2d numpy array
1470
- The set of regressors to filter out of each timecourse. The first dimension is the regressor number, second is the time (filtering) dimension:
1471
-
1472
- debug : boolean
1473
- Print additional diagnostic information if True
2862
+ A data array where the first index is the spatial dimension (e.g., voxels),
2863
+ and the second index is the time (filtering) dimension.
2864
+ regressors : 2d numpy array
2865
+ The set of regressors to filter out of each timecourse. The first dimension
2866
+ is the regressor number, and the second is the time (filtering) dimension.
2867
+ debug : bool, optional
2868
+ Print additional diagnostic information if True. Default is False.
2869
+ showprogressbar : bool, optional
2870
+ Show progress bar during processing. Default is True.
2871
+ rt_floattype : np.dtype, optional
2872
+ The data type used for floating-point calculations. Default is np.float64.
1474
2873
 
1475
2874
  Returns
1476
2875
  -------
2876
+ filtereddata : 2d numpy array
2877
+ The data with regressors removed, same shape as input `data`.
2878
+ r2value : 1d numpy array
2879
+ The R-squared value for each voxel's regression fit, shape (data.shape[0],).
2880
+
2881
+ Notes
2882
+ -----
2883
+ This function uses `mlregress` internally to perform the linear regression for each voxel.
2884
+ The regressors are applied in the order they appear in the input array.
2885
+
2886
+ Examples
2887
+ --------
2888
+ >>> import numpy as np
2889
+ >>> data = np.random.rand(100, 1000)
2890
+ >>> regressors = np.random.rand(3, 1000)
2891
+ >>> filtered_data, r2_values = confoundregress(data, regressors, debug=True)
1477
2892
  """
1478
2893
  if debug:
1479
2894
  print("data shape:", data.shape)
1480
2895
  print("regressors shape:", regressors.shape)
1481
2896
  datatoremove = np.zeros(data.shape[1], dtype=rt_floattype)
1482
- filtereddata = data * 0.0
1483
- r2value = data[:, 0] * 0.0
2897
+ filtereddata = np.zeros_like(data)
2898
+ r2value = np.zeros_like(data[:, 0])
1484
2899
  for i in tqdm(
1485
2900
  range(data.shape[0]),
1486
2901
  desc="Voxel",
@@ -1488,13 +2903,13 @@ def confoundglm(
1488
2903
  disable=(not showprogressbar),
1489
2904
  ):
1490
2905
  datatoremove *= 0.0
1491
- thefit, R = mlregress(regressors, data[i, :])
2906
+ thefit, R2 = mlregress(regressors, data[i, :])
1492
2907
  if i == 0 and debug:
1493
2908
  print("fit shape:", thefit.shape)
1494
2909
  for j in range(regressors.shape[0]):
1495
- datatoremove += rt_floatset(rt_floatset(thefit[0, 1 + j]) * regressors[j, :])
2910
+ datatoremove += (thefit[0, 1 + j] * regressors[j, :]).astype(rt_floattype)
1496
2911
  filtereddata[i, :] = data[i, :] - datatoremove
1497
- r2value[i] = R * R
2912
+ r2value[i] = R2
1498
2913
  return filtereddata, r2value
1499
2914
 
1500
2915
 
@@ -1505,18 +2920,67 @@ def confoundglm(
1505
2920
  # You can redistribute it and/or modify it under the terms of the Do What The
1506
2921
  # Fuck You Want To Public License, Version 2, as published by Sam Hocevar. See
1507
2922
  # http://www.wtfpl.net/ for more details.
1508
- def getpeaks(xvals, yvals, xrange=None, bipolar=False, displayplots=False):
2923
+ def getpeaks(
2924
+ xvals: NDArray,
2925
+ yvals: NDArray,
2926
+ xrange: Optional[Tuple[float, float]] = None,
2927
+ bipolar: bool = False,
2928
+ displayplots: bool = False,
2929
+ ) -> list:
2930
+ """
2931
+ Find peaks in y-values within a specified range and optionally display results.
2932
+
2933
+ This function identifies local maxima (and optionally minima) in the input
2934
+ y-values and returns their coordinates along with an offset from the origin.
2935
+ It supports filtering by a range of x-values and can handle both unipolar and
2936
+ bipolar peak detection.
2937
+
2938
+ Parameters
2939
+ ----------
2940
+ xvals : NDArray
2941
+ X-axis values corresponding to the y-values.
2942
+ yvals : NDArray
2943
+ Y-axis values where peaks are to be detected.
2944
+ xrange : tuple of float, optional
2945
+ A tuple (min, max) specifying the range of x-values to consider.
2946
+ If None, the full range is used.
2947
+ bipolar : bool, optional
2948
+ If True, detect both positive and negative peaks (minima and maxima).
2949
+ If False, only detect positive peaks.
2950
+ displayplots : bool, optional
2951
+ If True, display a plot showing the data and detected peaks.
2952
+
2953
+ Returns
2954
+ -------
2955
+ list of lists
2956
+ A list of peaks, each represented as [x_value, y_value, offset_from_origin].
2957
+ The offset is calculated using `tide_util.valtoindex` relative to x=0.
2958
+
2959
+ Notes
2960
+ -----
2961
+ - The function uses `scipy.signal.find_peaks` to detect peaks.
2962
+ - If `bipolar` is True, both positive and negative peaks are included.
2963
+ - The `displayplots` option requires `matplotlib.pyplot` to be imported as `plt`.
2964
+
2965
+ Examples
2966
+ --------
2967
+ >>> x = np.linspace(-10, 10, 100)
2968
+ >>> y = np.sin(x)
2969
+ >>> peaks = getpeaks(x, y, xrange=(-5, 5), bipolar=True)
2970
+ >>> print(peaks)
2971
+ [[-1.5707963267948966, 1.0, -25], [1.5707963267948966, 1.0, 25]]
2972
+ """
1509
2973
  peaks, dummy = find_peaks(yvals, height=0)
1510
2974
  if bipolar:
1511
2975
  negpeaks, dummy = find_peaks(-yvals, height=0)
1512
2976
  peaks = np.concatenate((peaks, negpeaks))
1513
2977
  procpeaks = []
1514
2978
  if xrange is None:
1515
- lagmin = xvals[0]
1516
- lagmax = xvals[-1]
2979
+ lagmin = xvals[0] + 0.0
2980
+ lagmax = xvals[-1] + 0.0
1517
2981
  else:
1518
- lagmin = xrange[0]
1519
- lagmax = xrange[1]
2982
+ lagmin = xrange[0] + 0.0
2983
+ lagmax = xrange[1] + 0.0
1520
2984
  originloc = tide_util.valtoindex(xvals, 0.0, discrete=False)
1521
2985
  for thepeak in peaks:
1522
2986
  if lagmin <= xvals[thepeak] <= lagmax:
@@ -1553,19 +3017,44 @@ def getpeaks(xvals, yvals, xrange=None, bipolar=False, displayplots=False):
1553
3017
  return procpeaks
1554
3018
 
1555
3019
 
1556
- def parabfit(x_axis, y_axis, peakloc, points):
3020
+ def parabfit(x_axis: NDArray, y_axis: NDArray, peakloc: int, points: int) -> Tuple[float, float]:
1557
3021
  """
3022
+ Fit a parabola to a localized region around a peak and return the peak coordinates.
3023
+
3024
+ This function performs a quadratic curve fitting on a subset of data surrounding
3025
+ a specified peak location. It uses a parabolic model of the form a*(x-tau)^2 + c
3026
+ to estimate the precise peak position and amplitude.
1558
3027
 
1559
3028
  Parameters
1560
3029
  ----------
1561
- x_axis
1562
- y_axis
1563
- peakloc
1564
- peaksize
3030
+ x_axis : NDArray
3031
+ Array of x-axis values (typically time or frequency).
3032
+ y_axis : NDArray
3033
+ Array of y-axis values (typically signal amplitude).
3034
+ peakloc : int
3035
+ Index location of the peak in the data arrays.
3036
+ points : int
3037
+ Number of points to include in the local fit around the peak.
1565
3038
 
1566
3039
  Returns
1567
3040
  -------
1568
-
3041
+ Tuple[float, float]
3042
+ A tuple containing (x_peak, y_peak) - the fitted peak coordinates.
3043
+
3044
+ Notes
3045
+ -----
3046
+ The function uses a least-squares fitting approach with scipy.optimize.curve_fit.
3047
+ Initial parameter estimates are derived analytically based on the peak location
3048
+ and a distance calculation. The parabolic model assumes the peak has a symmetric
3049
+ quadratic shape.
3050
+
3051
+ Examples
3052
+ --------
3053
+ >>> import numpy as np
3054
+ >>> x = np.linspace(0, 10, 100)
3055
+ >>> y = 2 * (x - 5)**2 + 1
3056
+ >>> peak_x, peak_y = parabfit(x, y, 50, 10)
3057
+ >>> print(f"Peak at x={peak_x:.2f}, y={peak_y:.2f}")
1569
3058
  """
1570
3059
  func = lambda x, a, tau, c: a * ((x - tau) ** 2) + c
1571
3060
  distance = abs(x_axis[peakloc[1][0]] - x_axis[peakloc[0][0]]) / 4
@@ -1590,20 +3079,48 @@ def parabfit(x_axis, y_axis, peakloc, points):
1590
3079
  return x, y
1591
3080
 
1592
3081
 
1593
- def _datacheck_peakdetect(x_axis, y_axis):
3082
+ def _datacheck_peakdetect(x_axis: Optional[NDArray], y_axis: NDArray) -> Tuple[NDArray, NDArray]:
1594
3083
  """
3084
+ Validate and convert input arrays for peak detection.
1595
3085
 
1596
3086
  Parameters
1597
3087
  ----------
1598
- x_axis
1599
- y_axis
3088
+ x_axis : NDArray, optional
3089
+ X-axis values. If None, range(len(y_axis)) is used.
3090
+ y_axis : NDArray
3091
+ Y-axis values to be processed.
1600
3092
 
1601
3093
  Returns
1602
3094
  -------
1603
-
3095
+ tuple of ndarray
3096
+ Tuple containing (x_axis, y_axis) as numpy arrays.
3097
+
3098
+ Raises
3099
+ ------
3100
+ ValueError
3101
+ If input vectors y_axis and x_axis have different lengths.
3102
+
3103
+ Notes
3104
+ -----
3105
+ This function ensures that both input arrays are converted to numpy arrays
3106
+ and have matching shapes. If x_axis is None, it defaults to a range
3107
+ corresponding to the length of y_axis.
3108
+
3109
+ Examples
3110
+ --------
3111
+ >>> import numpy as np
3112
+ >>> x, y = _datacheck_peakdetect([1, 2, 3], [4, 5, 6])
3113
+ >>> print(x)
3114
+ [1 2 3]
3115
+ >>> print(y)
3116
+ [4 5 6]
3117
+
3118
+ >>> x, y = _datacheck_peakdetect(None, [4, 5, 6])
3119
+ >>> print(x)
3120
+ [0 1 2]
1604
3121
  """
1605
3122
  if x_axis is None:
1606
- x_axis = range(len(y_axis))
3123
+ x_axis = np.arange(0, len(y_axis))
1607
3124
 
1608
3125
  if np.shape(y_axis) != np.shape(x_axis):
1609
3126
  raise ValueError("Input vectors y_axis and x_axis must have same length")
@@ -1614,43 +3131,58 @@ def _datacheck_peakdetect(x_axis, y_axis):
1614
3131
  return x_axis, y_axis
1615
3132
 
1616
3133
 
1617
- def peakdetect(y_axis, x_axis=None, lookahead=200, delta=0.0):
3134
+ def peakdetect(
3135
+ y_axis: NDArray[np.floating[Any]],
3136
+ x_axis: Optional[NDArray[np.floating[Any]]] = None,
3137
+ lookahead: int = 200,
3138
+ delta: float = 0.0,
3139
+ ) -> list:
1618
3140
  """
1619
- Converted from/based on a MATLAB script at:
1620
- http://billauer.co.il/peakdet.html
1621
-
1622
- function for detecting local maxima and minima in a signal.
1623
- Discovers peaks by searching for values which are surrounded by lower
1624
- or larger values for maxima and minima respectively
1625
-
1626
- keyword arguments:
1627
- y_axis -- A list containing the signal over which to find peaks
3141
+ Detect local maxima and minima in a signal.
1628
3142
 
1629
- x_axis -- A x-axis whose values correspond to the y_axis list and is used
1630
- in the return to specify the position of the peaks. If omitted an
1631
- index of the y_axis is used.
1632
- (default: None)
1633
-
1634
- lookahead -- distance to look ahead from a peak candidate to determine if
1635
- it is the actual peak
1636
- (default: 200)
1637
- '(samples / period) / f' where '4 >= f >= 1.25' might be a good value
1638
-
1639
- delta -- this specifies a minimum difference between a peak and
1640
- the following points, before a peak may be considered a peak. Useful
1641
- to hinder the function from picking up false peaks towards to end of
1642
- the signal. To work well delta should be set to delta >= RMSnoise * 5.
1643
- (default: 0)
1644
- When omitted delta function causes a 20% decrease in speed.
1645
- When used Correctly it can double the speed of the function
3143
+ This function is based on a MATLAB script by Billauer, and identifies peaks
3144
+ by searching for values that are surrounded by lower (for maxima) or larger
3145
+ (for minima) values. It uses a lookahead window to confirm that a candidate
3146
+ is indeed a peak and not noise or jitter.
1646
3147
 
3148
+ Parameters
3149
+ ----------
3150
+ y_axis : NDArray[np.floating[Any]]
3151
+ A list or array containing the signal over which to find peaks.
3152
+ x_axis : NDArray[np.floating[Any]], optional
3153
+ An x-axis whose values correspond to the y_axis list. If omitted,
3154
+ an index of the y_axis is used. Default is None.
3155
+ lookahead : int, optional
3156
+ Distance to look ahead from a peak candidate to determine if it is
3157
+ the actual peak. Default is 200.
3158
+ delta : float, optional
3159
+ Minimum difference between a peak and the following points. If set,
3160
+ this helps avoid false peaks towards the end of the signal. Default is 0.0.
1647
3161
 
1648
- return: two lists [max_peaks, min_peaks] containing the positive and
1649
- negative peaks respectively. Each cell of the lists contains a tuple
1650
- of: (position, peak_value)
1651
- to get the average peak value do: np.mean(max_peaks, 0)[1] on the
1652
- results to unpack one of the lists into x, y coordinates do:
1653
- x, y = zip(*max_peaks)
3162
+ Returns
3163
+ -------
3164
+ list of lists
3165
+ A list containing two sublists: ``[max_peaks, min_peaks]``.
3166
+ Each sublist contains tuples of the form ``(position, peak_value)``.
3167
+ For example, to unpack maxima into x and y coordinates:
3168
+ ``x, y = zip(*max_peaks)``.
3169
+
3170
+ Notes
3171
+ -----
3172
+ - The function assumes that the input signal is sampled at regular intervals.
3173
+ - If ``delta`` is not provided, the function runs slower but may detect more
3174
+ peaks.
3175
+ - When ``delta`` is correctly specified (e.g., as 5 * RMS noise), it can
3176
+ significantly improve performance.
3177
+
3178
+ Examples
3179
+ --------
3180
+ >>> import numpy as np
3181
+ >>> x = np.linspace(0, 10, 100)
3182
+ >>> y = np.sin(x) + 0.5 * np.sin(3 * x)
3183
+ >>> max_peaks, min_peaks = peakdetect(y, x, lookahead=10, delta=0.1)
3184
+ >>> print("Max peaks:", max_peaks)
3185
+ >>> print("Min peaks:", min_peaks)
1654
3186
  """
1655
3187
  max_peaks = []
1656
3188
  min_peaks = []
@@ -1664,7 +3196,7 @@ def peakdetect(y_axis, x_axis=None, lookahead=200, delta=0.0):
1664
3196
  # perform some checks
1665
3197
  if lookahead < 1:
1666
3198
  raise ValueError("Lookahead must be '1' or above in value")
1667
- if not (np.isscalar(delta) and delta >= 0):
3199
+ if not (np.isscalar(delta) and (delta >= 0.0)):
1668
3200
  raise ValueError("delta must be a positive number")
1669
3201
 
1670
3202
  # maxima and minima candidates are temporarily stored in
@@ -1699,7 +3231,7 @@ def peakdetect(y_axis, x_axis=None, lookahead=200, delta=0.0):
1699
3231
  # mxpos = x_axis[np.where(y_axis[index:index+lookahead]==mx)]
1700
3232
 
1701
3233
  ####look for min####
1702
- if y > mn + delta and mn != -np.inf:
3234
+ if (y > mn + delta) and (mn != -np.inf):
1703
3235
  # Minima peak candidate found
1704
3236
  # look ahead in signal to ensure that this is a peak and not jitter
1705
3237
  if y_axis[index : index + lookahead].min() > mn:
@@ -1729,11 +3261,50 @@ def peakdetect(y_axis, x_axis=None, lookahead=200, delta=0.0):
1729
3261
  return [max_peaks, min_peaks]
1730
3262
 
1731
3263
 
1732
- def ocscreetest(eigenvals, debug=False, displayplots=False):
3264
+ def ocscreetest(eigenvals: NDArray, debug: bool = False, displayplots: bool = False) -> int:
3265
+ """
3266
+ Perform eigenvalue screening using the OCSCREE test to determine the number of retained components.
3267
+
3268
+ This function implements a variant of the scree test for determining the number of significant
3269
+ eigenvalues in a dataset. It uses a linear regression approach to model the eigenvalue decay
3270
+ and identifies the point where the observed eigenvalues fall below the predicted values.
3271
+
3272
+ Parameters
3273
+ ----------
3274
+ eigenvals : NDArray
3275
+ Array of eigenvalues, typically sorted in descending order.
3276
+ debug : bool, optional
3277
+ If True, print intermediate calculations for debugging purposes. Default is False.
3278
+ displayplots : bool, optional
3279
+ If True, display plots of the original eigenvalues, regression coefficients (a and b),
3280
+ and the predicted eigenvalue curve. Default is False.
3281
+
3282
+ Returns
3283
+ -------
3284
+ int
3285
+ The index of the last retained component based on the OCSCREE criterion.
3286
+
3287
+ Notes
3288
+ -----
3289
+ The function performs the following steps:
3290
+ 1. Initialize arrays for regression coefficients 'a' and 'b'.
3291
+ 2. Compute regression coefficients from the eigenvalues.
3292
+ 3. Predict eigenvalues using the regression model.
3293
+ 4. Identify the point where the actual eigenvalues drop below the predicted values.
3294
+ 5. Optionally display diagnostic plots.
3295
+
3296
+ Examples
3297
+ --------
3298
+ >>> import numpy as np
3299
+ >>> eigenvals = np.array([3.5, 2.1, 1.8, 1.2, 0.9, 0.5])
3300
+ >>> result = ocscreetest(eigenvals)
3301
+ >>> print(result)
3302
+ 3
3303
+ """
1733
3304
  num = len(eigenvals)
1734
- a = eigenvals * 0.0
1735
- b = eigenvals * 0.0
1736
- prediction = eigenvals * 0.0
3305
+ a = np.zeros_like(eigenvals)
3306
+ b = np.zeros_like(eigenvals)
3307
+ prediction = np.zeros_like(eigenvals)
1737
3308
  for i in range(num - 3, 1, -1):
1738
3309
  b[i] = (eigenvals[-1] - eigenvals[i + 1]) / (num - 1 - i - 1)
1739
3310
  a[i] = eigenvals[i + 1] - b[i + 1]
@@ -1766,7 +3337,45 @@ def ocscreetest(eigenvals, debug=False, displayplots=False):
1766
3337
  return i
1767
3338
 
1768
3339
 
1769
- def afscreetest(eigenvals, displayplots=False):
3340
+ def afscreetest(eigenvals: NDArray, displayplots: bool = False) -> int:
3341
+ """
3342
+ Detect the optimal number of components using the second derivative of eigenvalues.
3343
+
3344
+ This function applies a second derivative analysis to the eigenvalues to identify
3345
+ the point where the rate of change of eigenvalues begins to decrease significantly,
3346
+ which typically indicates the optimal number of components to retain.
3347
+
3348
+ Parameters
3349
+ ----------
3350
+ eigenvals : NDArray
3351
+ Array of eigenvalues, typically from a PCA or similar decomposition.
3352
+ Should be sorted in descending order.
3353
+ displayplots : bool, optional
3354
+ If True, display plots showing the original eigenvalues, first derivative,
3355
+ and second derivative (default is False).
3356
+
3357
+ Returns
3358
+ -------
3359
+ int
3360
+ The index of the optimal number of components, adjusted by subtracting 1
3361
+ from the location of maximum second derivative.
3362
+
3363
+ Notes
3364
+ -----
3365
+ The method works by:
3366
+ 1. Computing the first derivative of eigenvalues
3367
+ 2. Computing the second derivative of the first derivative
3368
+ 3. Finding the maximum of the second derivative
3369
+ 4. Returning the index of this maximum minus 1
3370
+
3371
+ Examples
3372
+ --------
3373
+ >>> import numpy as np
3374
+ >>> eigenvals = np.array([5.0, 3.0, 1.5, 0.8, 0.2])
3375
+ >>> optimal_components = afscreetest(eigenvals)
3376
+ >>> print(optimal_components)
3377
+ 1
3378
+ """
1770
3379
  num = len(eigenvals)
1771
3380
  firstderiv = np.gradient(eigenvals, edge_order=2)
1772
3381
  secondderiv = np.gradient(firstderiv, edge_order=2)
@@ -1784,10 +3393,56 @@ def afscreetest(eigenvals, displayplots=False):
1784
3393
  ax3.set_title("Second derivative")
1785
3394
  plt.plot(secondderiv, color="g")
1786
3395
  plt.show()
1787
- return maxaccloc - 1
3396
+ return int(maxaccloc - 1)
3397
+
1788
3398
 
3399
+ def phaseanalysis(
3400
+ firstharmonic: NDArray, displayplots: bool = False
3401
+ ) -> Tuple[NDArray, NDArray, NDArray]:
3402
+ """
3403
+ Perform phase analysis on a signal using analytic signal representation.
1789
3404
 
1790
- def phaseanalysis(firstharmonic, displayplots=False):
3405
+ This function computes the analytic signal of the input signal using the Hilbert transform,
3406
+ and extracts the instantaneous phase and amplitude envelope. Optionally displays plots
3407
+ of the analytic signal, phase, and amplitude.
3408
+
3409
+ Parameters
3410
+ ----------
3411
+ firstharmonic : NDArray
3412
+ Input signal to analyze. Should be a 1D NDArray object.
3413
+ displayplots : bool, optional
3414
+ If True, displays plots of the analytic signal, phase, and amplitude.
3415
+ Default is False.
3416
+
3417
+ Returns
3418
+ -------
3419
+ tuple of ndarray
3420
+ A tuple containing:
3421
+ - instantaneous_phase : ndarray
3422
+ The unwrapped instantaneous phase of the signal
3423
+ - amplitude_envelope : ndarray
3424
+ The amplitude envelope of the signal
3425
+ - analytic_signal : ndarray
3426
+ The analytic signal (complex-valued)
3427
+
3428
+ Notes
3429
+ -----
3430
+ The function uses `scipy.signal.hilbert` to compute the analytic signal,
3431
+ which is defined as: :math:`x_a(t) = x(t) + j\\hat{x}(t)` where :math:`\\hat{x}(t)`
3432
+ is the Hilbert transform of :math:`x(t)`.
3433
+
3434
+ The instantaneous phase is computed as the angle of the analytic signal and is
3435
+ unwrapped to remove discontinuities.
3436
+
3437
+ Examples
3438
+ --------
3439
+ >>> import numpy as np
3440
+ >>> from scipy.signal import hilbert
3441
+ >>> signal = np.sin(2 * np.pi * 5 * np.linspace(0, 1, 100))
3442
+ >>> phase, amp, analytic = phaseanalysis(signal)
3443
+ >>> print(f"Phase shape: {phase.shape}")
3444
+ Phase shape: (100,)
3445
+ """
1791
3446
  print("entering phaseanalysis")
1792
3447
  analytic_signal = hilbert(firstharmonic)
1793
3448
  amplitude_envelope = np.abs(analytic_signal)
@@ -1845,28 +3500,119 @@ FML_FITFAIL = (
1845
3500
 
1846
3501
 
1847
3502
  def simfuncpeakfit(
1848
- incorrfunc,
1849
- corrtimeaxis,
1850
- useguess=False,
1851
- maxguess=0.0,
1852
- displayplots=False,
1853
- functype="correlation",
1854
- peakfittype="gauss",
1855
- searchfrac=0.5,
1856
- lagmod=1000.0,
1857
- enforcethresh=True,
1858
- allowhighfitamps=False,
1859
- lagmin=-30.0,
1860
- lagmax=30.0,
1861
- absmaxsigma=1000.0,
1862
- absminsigma=0.25,
1863
- hardlimit=True,
1864
- bipolar=False,
1865
- lthreshval=0.0,
1866
- uthreshval=1.0,
1867
- zerooutbadfit=True,
1868
- debug=False,
1869
- ):
3503
+ incorrfunc: NDArray,
3504
+ corrtimeaxis: NDArray,
3505
+ useguess: bool = False,
3506
+ maxguess: float = 0.0,
3507
+ displayplots: bool = False,
3508
+ functype: str = "correlation",
3509
+ peakfittype: str = "gauss",
3510
+ searchfrac: float = 0.5,
3511
+ lagmod: float = 1000.0,
3512
+ enforcethresh: bool = True,
3513
+ allowhighfitamps: bool = False,
3514
+ lagmin: float = -30.0,
3515
+ lagmax: float = 30.0,
3516
+ absmaxsigma: float = 1000.0,
3517
+ absminsigma: float = 0.25,
3518
+ hardlimit: bool = True,
3519
+ bipolar: bool = False,
3520
+ lthreshval: float = 0.0,
3521
+ uthreshval: float = 1.0,
3522
+ zerooutbadfit: bool = True,
3523
+ debug: bool = False,
3524
+ ) -> Tuple[int, np.float64, np.float64, np.float64, np.uint16, np.uint32, int, int]:
3525
+ """
3526
+ Fit a peak in a correlation or mutual information function.
3527
+
3528
+ This function performs peak fitting on a correlation or mutual information
3529
+ function to extract peak parameters such as location, amplitude, and width.
3530
+ It supports various fitting methods and includes error handling and
3531
+ validation for fit parameters.
3532
+
3533
+ Parameters
3534
+ ----------
3535
+ incorrfunc : NDArray
3536
+ Input correlation or mutual information function values.
3537
+ corrtimeaxis : NDArray
3538
+ Time axis corresponding to the correlation function.
3539
+ useguess : bool, optional
3540
+ If True, use `maxguess` as an initial guess for the peak location.
3541
+ Default is False.
3542
+ maxguess : float, optional
3543
+ Initial guess for the peak location in seconds. Used only if `useguess` is True.
3544
+ Default is 0.0.
3545
+ displayplots : bool, optional
3546
+ If True, display plots of the peak and fit. Default is False.
3547
+ functype : str, optional
3548
+ Type of function to fit. Options are 'correlation', 'mutualinfo', or 'hybrid'.
3549
+ Default is 'correlation'.
3550
+ peakfittype : str, optional
3551
+ Type of peak fitting to perform. Options are 'gauss', 'fastgauss', 'quad',
3552
+ 'fastquad', 'COM', or 'None'. Default is 'gauss'.
3553
+ searchfrac : float, optional
3554
+ Fraction of the peak maximum to define the search range for peak width.
3555
+ Default is 0.5.
3556
+ lagmod : float, optional
3557
+ Modulus for lag values, used to wrap around the lag values.
3558
+ Default is 1000.0.
3559
+ enforcethresh : bool, optional
3560
+ If True, enforce amplitude thresholds. Default is True.
3561
+ allowhighfitamps : bool, optional
3562
+ If True, allow fit amplitudes to exceed 1.0. Default is False.
3563
+ lagmin : float, optional
3564
+ Minimum allowed lag value in seconds. Default is -30.0.
3565
+ lagmax : float, optional
3566
+ Maximum allowed lag value in seconds. Default is 30.0.
3567
+ absmaxsigma : float, optional
3568
+ Maximum allowed sigma value in seconds. Default is 1000.0.
3569
+ absminsigma : float, optional
3570
+ Minimum allowed sigma value in seconds. Default is 0.25.
3571
+ hardlimit : bool, optional
3572
+ If True, enforce hard limits on lag values. Default is True.
3573
+ bipolar : bool, optional
3574
+ If True, allow negative correlation values. Default is False.
3575
+ lthreshval : float, optional
3576
+ Lower threshold for amplitude validation. Default is 0.0.
3577
+ uthreshval : float, optional
3578
+ Upper threshold for amplitude validation. Default is 1.0.
3579
+ zerooutbadfit : bool, optional
3580
+ If True, set fit results to zero if fit fails. Default is True.
3581
+ debug : bool, optional
3582
+ If True, print debug information. Default is False.
3583
+
3584
+ Returns
3585
+ -------
3586
+ tuple of int, float, float, float, int, int, int, int
3587
+ A tuple containing:
3588
+ - maxindex: Index of the peak maximum.
3589
+ - maxlag: Fitted peak lag in seconds.
3590
+ - maxval: Fitted peak amplitude.
3591
+ - maxsigma: Fitted peak width (sigma) in seconds.
3592
+ - maskval: Mask indicating fit success (1 for success, 0 for failure).
3593
+ - failreason: Reason for fit failure (bitmask).
3594
+ - peakstart: Start index of the peak region used for fitting.
3595
+ - peakend: End index of the peak region used for fitting.
3596
+
3597
+ Notes
3598
+ -----
3599
+ - The function automatically handles different types of correlation functions
3600
+ and mutual information functions with appropriate baseline corrections.
3601
+ - Various fitting methods are supported, each with its own strengths and
3602
+ trade-offs in terms of speed and accuracy.
3603
+ - Fit results are validated against physical constraints and thresholds.
3604
+
3605
+ Examples
3606
+ --------
3607
+ >>> import numpy as np
3608
+ >>> from scipy import signal
3609
+ >>> # Create sample data
3610
+ >>> t = np.linspace(-50, 50, 1000)
3611
+ >>> corr = np.exp(-0.5 * (t / 2)**2) + 0.1 * np.random.randn(1000)
3612
+ >>> maxindex, maxlag, maxval, maxsigma, maskval, failreason, peakstart, peakend = \
3613
+ ... simfuncpeakfit(corr, t, peakfittype='gauss')
3614
+ >>> print(f"Peak lag: {maxlag:.2f} s, Amplitude: {maxval:.2f}, Width: {maxsigma:.2f} s")
3615
+ """
1870
3616
  # check to make sure xcorr_x and xcorr_y match
1871
3617
  if corrtimeaxis is None:
1872
3618
  print("Correlation time axis is not defined - exiting")
@@ -1931,7 +3677,7 @@ def simfuncpeakfit(
1931
3677
  baselinedev = 0.0
1932
3678
  else:
1933
3679
  # for mutual information, there is a nonzero baseline, so we want the difference from that.
1934
- baseline = np.median(corrfunc)
3680
+ baseline = float(np.median(corrfunc))
1935
3681
  baselinedev = mad(corrfunc)
1936
3682
  if debug:
1937
3683
  print("baseline, baselinedev:", baseline, baselinedev)
@@ -1964,8 +3710,8 @@ def simfuncpeakfit(
1964
3710
 
1965
3711
  peakpoints[0] = 0
1966
3712
  peakpoints[-1] = 0
1967
- peakstart = np.max([1, maxindex - 1])
1968
- peakend = np.min([len(corrtimeaxis) - 2, maxindex + 1])
3713
+ peakstart = int(np.max([1, maxindex - 1]))
3714
+ peakend = int(np.min([len(corrtimeaxis) - 2, maxindex + 1]))
1969
3715
  if debug:
1970
3716
  print("initial peakstart, peakend:", peakstart, peakend)
1971
3717
  if functype == "mutualinfo":
@@ -2031,7 +3777,7 @@ def simfuncpeakfit(
2031
3777
  print("bad initial")
2032
3778
  if maxsigma_init > absmaxsigma:
2033
3779
  failreason |= FML_INITWIDTHHIGH
2034
- maxsigma_init = absmaxsigma
3780
+ maxsigma_init = np.float64(absmaxsigma)
2035
3781
  if debug:
2036
3782
  print("bad initial width - too high")
2037
3783
  if peakend - peakstart < 2:
@@ -2084,7 +3830,7 @@ def simfuncpeakfit(
2084
3830
  data = corrfunc[peakstart : peakend + 1]
2085
3831
  maxval = maxval_init
2086
3832
  maxlag = np.sum(X * data) / np.sum(data)
2087
- maxsigma = 10.0
3833
+ maxsigma = np.float64(10.0)
2088
3834
  elif peakfittype == "gauss":
2089
3835
  X = corrtimeaxis[peakstart : peakend + 1] - baseline
2090
3836
  data = corrfunc[peakstart : peakend + 1]
@@ -2094,10 +3840,15 @@ def simfuncpeakfit(
2094
3840
  if debug:
2095
3841
  print("fit input array:", p0)
2096
3842
  try:
2097
- plsq, dummy = sp.optimize.leastsq(gaussresiduals, p0, args=(data, X), maxfev=5000)
2098
- maxval = plsq[0] + baseline
2099
- maxlag = np.fmod((1.0 * plsq[1]), lagmod)
2100
- maxsigma = plsq[2]
3843
+ plsq, ier = sp.optimize.leastsq(gaussresiduals, p0, args=(data, X), maxfev=5000)
3844
+ if ier not in [1, 2, 3, 4]: # Check for successful convergence
3845
+ maxval = np.float64(0.0)
3846
+ maxlag = np.float64(0.0)
3847
+ maxsigma = np.float64(0.0)
3848
+ else:
3849
+ maxval = plsq[0] + baseline
3850
+ maxlag = np.fmod((1.0 * plsq[1]), lagmod)
3851
+ maxsigma = plsq[2]
2101
3852
  except:
2102
3853
  maxval = np.float64(0.0)
2103
3854
  maxlag = np.float64(0.0)
@@ -2130,10 +3881,10 @@ def simfuncpeakfit(
2130
3881
  if debug:
2131
3882
  print("poly coffs:", a, b, c)
2132
3883
  print("maxlag, maxval, maxsigma:", maxlag, maxval, maxsigma)
2133
- except np.lib.polynomial.RankWarning:
2134
- maxlag = 0.0
2135
- maxval = 0.0
2136
- maxsigma = 0.0
3884
+ except np.exceptions.RankWarning:
3885
+ maxlag = np.float64(0.0)
3886
+ maxval = np.float64(0.0)
3887
+ maxsigma = np.float64(0.0)
2137
3888
  if debug:
2138
3889
  print("\n")
2139
3890
  for i in range(len(X)):
@@ -2158,7 +3909,7 @@ def simfuncpeakfit(
2158
3909
  if (functype == "correlation") or (functype == "hybrid"):
2159
3910
  if maxval < lowestcorrcoeff:
2160
3911
  failreason |= FML_FITAMPLOW
2161
- maxval = lowestcorrcoeff
3912
+ maxval = np.float64(lowestcorrcoeff)
2162
3913
  if debug:
2163
3914
  print("bad fit amp: maxval is lower than lower limit")
2164
3915
  fitfail = True
@@ -2195,22 +3946,22 @@ def simfuncpeakfit(
2195
3946
  print("bad lag after refinement")
2196
3947
  if lagmin > maxlag:
2197
3948
  failreason |= FML_FITLAGLOW
2198
- maxlag = lagmin
3949
+ maxlag = np.float64(lagmin)
2199
3950
  else:
2200
3951
  failreason |= FML_FITLAGHIGH
2201
- maxlag = lagmax
3952
+ maxlag = np.float64(lagmax)
2202
3953
  fitfail = True
2203
3954
  if maxsigma > absmaxsigma:
2204
3955
  failreason |= FML_FITWIDTHHIGH
2205
3956
  if debug:
2206
3957
  print("bad width after refinement:", maxsigma, ">", absmaxsigma)
2207
- maxsigma = absmaxsigma
3958
+ maxsigma = np.float64(absmaxsigma)
2208
3959
  fitfail = True
2209
3960
  if maxsigma < absminsigma:
2210
3961
  failreason |= FML_FITWIDTHLOW
2211
3962
  if debug:
2212
3963
  print("bad width after refinement:", maxsigma, "<", absminsigma)
2213
- maxsigma = absminsigma
3964
+ maxsigma = np.float64(absminsigma)
2214
3965
  fitfail = True
2215
3966
  if fitfail:
2216
3967
  if debug:
@@ -2266,16 +4017,47 @@ def simfuncpeakfit(
2266
4017
  )
2267
4018
 
2268
4019
 
2269
- def _maxindex_noedge(corrfunc, corrtimeaxis, bipolar=False):
4020
+ def _maxindex_noedge(
4021
+ corrfunc: NDArray, corrtimeaxis: NDArray, bipolar: bool = False
4022
+ ) -> Tuple[int, float]:
2270
4023
  """
4024
+ Find the index of the maximum correlation value, avoiding edge effects.
4025
+
4026
+ This function locates the maximum (or minimum, if bipolar=True) correlation value
4027
+ within the given time axis range, while avoiding edge effects by progressively
4028
+ narrowing the search window.
2271
4029
 
2272
4030
  Parameters
2273
4031
  ----------
2274
- corrfunc
4032
+ corrfunc : NDArray
4033
+ Correlation function values to search for maximum.
4034
+ corrtimeaxis : NDArray
4035
+ Time axis corresponding to the correlation function.
4036
+ bipolar : bool, optional
4037
+ If True, considers both positive and negative correlation values.
4038
+ Default is False.
2275
4039
 
2276
4040
  Returns
2277
4041
  -------
2278
-
4042
+ Tuple[int, float]
4043
+ A tuple containing:
4044
+ - int: Index of the maximum correlation value
4045
+ - float: Flip factor (-1.0 if minimum was selected, 1.0 otherwise)
4046
+
4047
+ Notes
4048
+ -----
4049
+ The function iteratively narrows the search range by excluding edges
4050
+ where the maximum was found. This helps avoid edge effects in correlation
4051
+ analysis. When bipolar=True, the function compares both maximum and minimum
4052
+ absolute values to determine the optimal selection.
4053
+
4054
+ Examples
4055
+ --------
4056
+ >>> corrfunc = np.array([0.1, 0.5, 0.3, 0.8, 0.2])
4057
+ >>> corrtimeaxis = np.array([0, 1, 2, 3, 4])
4058
+ >>> index, flip = _maxindex_noedge(corrfunc, corrtimeaxis)
4059
+ >>> print(index)
4060
+ 3
2279
4061
  """
2280
4062
  lowerlim = 0
2281
4063
  upperlim = len(corrtimeaxis) - 1