rapidtide 2.9.5__py3-none-any.whl → 3.1.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (405) hide show
  1. cloud/gmscalc-HCPYA +1 -1
  2. cloud/mount-and-run +2 -0
  3. cloud/rapidtide-HCPYA +3 -3
  4. rapidtide/Colortables.py +538 -38
  5. rapidtide/OrthoImageItem.py +1094 -51
  6. rapidtide/RapidtideDataset.py +1709 -114
  7. rapidtide/__init__.py +0 -8
  8. rapidtide/_version.py +4 -4
  9. rapidtide/calccoherence.py +242 -97
  10. rapidtide/calcnullsimfunc.py +240 -140
  11. rapidtide/calcsimfunc.py +314 -129
  12. rapidtide/correlate.py +1211 -389
  13. rapidtide/data/examples/src/testLD +56 -0
  14. rapidtide/data/examples/src/test_findmaxlag.py +2 -2
  15. rapidtide/data/examples/src/test_mlregressallt.py +32 -17
  16. rapidtide/data/examples/src/testalign +1 -1
  17. rapidtide/data/examples/src/testatlasaverage +35 -7
  18. rapidtide/data/examples/src/testboth +21 -0
  19. rapidtide/data/examples/src/testcifti +11 -0
  20. rapidtide/data/examples/src/testdelayvar +13 -0
  21. rapidtide/data/examples/src/testdlfilt +25 -0
  22. rapidtide/data/examples/src/testfft +35 -0
  23. rapidtide/data/examples/src/testfileorfloat +37 -0
  24. rapidtide/data/examples/src/testfmri +94 -27
  25. rapidtide/data/examples/src/testfuncs +3 -3
  26. rapidtide/data/examples/src/testglmfilt +8 -6
  27. rapidtide/data/examples/src/testhappy +84 -51
  28. rapidtide/data/examples/src/testinitdelay +19 -0
  29. rapidtide/data/examples/src/testmodels +33 -0
  30. rapidtide/data/examples/src/testnewrefine +26 -0
  31. rapidtide/data/examples/src/testnoiseamp +21 -0
  32. rapidtide/data/examples/src/testppgproc +17 -0
  33. rapidtide/data/examples/src/testrefineonly +22 -0
  34. rapidtide/data/examples/src/testretro +26 -13
  35. rapidtide/data/examples/src/testretrolagtcs +16 -0
  36. rapidtide/data/examples/src/testrolloff +11 -0
  37. rapidtide/data/examples/src/testsimdata +45 -28
  38. rapidtide/data/models/model_cnn_pytorch/loss.png +0 -0
  39. rapidtide/data/models/model_cnn_pytorch/loss.txt +1 -0
  40. rapidtide/data/models/model_cnn_pytorch/model.pth +0 -0
  41. rapidtide/data/models/model_cnn_pytorch/model_meta.json +68 -0
  42. rapidtide/data/models/model_cnn_pytorch_fulldata/loss.png +0 -0
  43. rapidtide/data/models/model_cnn_pytorch_fulldata/loss.txt +1 -0
  44. rapidtide/data/models/model_cnn_pytorch_fulldata/model.pth +0 -0
  45. rapidtide/data/models/model_cnn_pytorch_fulldata/model_meta.json +80 -0
  46. rapidtide/data/models/model_cnnbp_pytorch_fullldata/loss.png +0 -0
  47. rapidtide/data/models/model_cnnbp_pytorch_fullldata/loss.txt +1 -0
  48. rapidtide/data/models/model_cnnbp_pytorch_fullldata/model.pth +0 -0
  49. rapidtide/data/models/model_cnnbp_pytorch_fullldata/model_meta.json +138 -0
  50. rapidtide/data/models/model_cnnfft_pytorch_fulldata/loss.png +0 -0
  51. rapidtide/data/models/model_cnnfft_pytorch_fulldata/loss.txt +1 -0
  52. rapidtide/data/models/model_cnnfft_pytorch_fulldata/model.pth +0 -0
  53. rapidtide/data/models/model_cnnfft_pytorch_fulldata/model_meta.json +128 -0
  54. rapidtide/data/models/model_ppgattention_pytorch_w128_fulldata/loss.png +0 -0
  55. rapidtide/data/models/model_ppgattention_pytorch_w128_fulldata/loss.txt +1 -0
  56. rapidtide/data/models/model_ppgattention_pytorch_w128_fulldata/model.pth +0 -0
  57. rapidtide/data/models/model_ppgattention_pytorch_w128_fulldata/model_meta.json +49 -0
  58. rapidtide/data/models/model_revised_tf2/model.keras +0 -0
  59. rapidtide/data/models/{model_serdar → model_revised_tf2}/model_meta.json +1 -1
  60. rapidtide/data/models/model_serdar2_tf2/model.keras +0 -0
  61. rapidtide/data/models/{model_serdar2 → model_serdar2_tf2}/model_meta.json +1 -1
  62. rapidtide/data/models/model_serdar_tf2/model.keras +0 -0
  63. rapidtide/data/models/{model_revised → model_serdar_tf2}/model_meta.json +1 -1
  64. rapidtide/data/reference/HCP1200v2_MTT_2mm.nii.gz +0 -0
  65. rapidtide/data/reference/HCP1200v2_binmask_2mm.nii.gz +0 -0
  66. rapidtide/data/reference/HCP1200v2_csf_2mm.nii.gz +0 -0
  67. rapidtide/data/reference/HCP1200v2_gray_2mm.nii.gz +0 -0
  68. rapidtide/data/reference/HCP1200v2_graylaghist.json +7 -0
  69. rapidtide/data/reference/HCP1200v2_graylaghist.tsv.gz +0 -0
  70. rapidtide/data/reference/HCP1200v2_laghist.json +7 -0
  71. rapidtide/data/reference/HCP1200v2_laghist.tsv.gz +0 -0
  72. rapidtide/data/reference/HCP1200v2_mask_2mm.nii.gz +0 -0
  73. rapidtide/data/reference/HCP1200v2_maxcorr_2mm.nii.gz +0 -0
  74. rapidtide/data/reference/HCP1200v2_maxtime_2mm.nii.gz +0 -0
  75. rapidtide/data/reference/HCP1200v2_maxwidth_2mm.nii.gz +0 -0
  76. rapidtide/data/reference/HCP1200v2_negmask_2mm.nii.gz +0 -0
  77. rapidtide/data/reference/HCP1200v2_timepercentile_2mm.nii.gz +0 -0
  78. rapidtide/data/reference/HCP1200v2_white_2mm.nii.gz +0 -0
  79. rapidtide/data/reference/HCP1200v2_whitelaghist.json +7 -0
  80. rapidtide/data/reference/HCP1200v2_whitelaghist.tsv.gz +0 -0
  81. rapidtide/data/reference/JHU-ArterialTerritoriesNoVent-LVL1-seg2.xml +131 -0
  82. rapidtide/data/reference/JHU-ArterialTerritoriesNoVent-LVL1-seg2_regions.txt +60 -0
  83. rapidtide/data/reference/JHU-ArterialTerritoriesNoVent-LVL1-seg2_space-MNI152NLin6Asym_2mm.nii.gz +0 -0
  84. rapidtide/data/reference/JHU-ArterialTerritoriesNoVent-LVL1_space-MNI152NLin2009cAsym_2mm.nii.gz +0 -0
  85. rapidtide/data/reference/JHU-ArterialTerritoriesNoVent-LVL1_space-MNI152NLin2009cAsym_2mm_mask.nii.gz +0 -0
  86. rapidtide/data/reference/JHU-ArterialTerritoriesNoVent-LVL1_space-MNI152NLin6Asym_2mm_mask.nii.gz +0 -0
  87. rapidtide/data/reference/JHU-ArterialTerritoriesNoVent-LVL2_space-MNI152NLin6Asym_2mm_mask.nii.gz +0 -0
  88. rapidtide/data/reference/MNI152_T1_1mm_Brain_FAST_seg.nii.gz +0 -0
  89. rapidtide/data/reference/MNI152_T1_1mm_Brain_Mask.nii.gz +0 -0
  90. rapidtide/data/reference/MNI152_T1_2mm_Brain_FAST_seg.nii.gz +0 -0
  91. rapidtide/data/reference/MNI152_T1_2mm_Brain_Mask.nii.gz +0 -0
  92. rapidtide/decorators.py +91 -0
  93. rapidtide/dlfilter.py +2553 -414
  94. rapidtide/dlfiltertorch.py +5201 -0
  95. rapidtide/externaltools.py +328 -13
  96. rapidtide/fMRIData_class.py +178 -0
  97. rapidtide/ffttools.py +168 -0
  98. rapidtide/filter.py +2704 -1462
  99. rapidtide/fit.py +2361 -579
  100. rapidtide/genericmultiproc.py +197 -0
  101. rapidtide/happy_supportfuncs.py +3255 -548
  102. rapidtide/helper_classes.py +590 -1181
  103. rapidtide/io.py +2569 -468
  104. rapidtide/linfitfiltpass.py +784 -0
  105. rapidtide/makelaggedtcs.py +267 -97
  106. rapidtide/maskutil.py +555 -25
  107. rapidtide/miscmath.py +867 -137
  108. rapidtide/multiproc.py +217 -44
  109. rapidtide/patchmatch.py +752 -0
  110. rapidtide/peakeval.py +32 -32
  111. rapidtide/ppgproc.py +2205 -0
  112. rapidtide/qualitycheck.py +353 -40
  113. rapidtide/refinedelay.py +854 -0
  114. rapidtide/refineregressor.py +939 -0
  115. rapidtide/resample.py +725 -204
  116. rapidtide/scripts/__init__.py +1 -0
  117. rapidtide/scripts/{adjustoffset → adjustoffset.py} +7 -2
  118. rapidtide/scripts/{aligntcs → aligntcs.py} +7 -2
  119. rapidtide/scripts/{applydlfilter → applydlfilter.py} +7 -2
  120. rapidtide/scripts/applyppgproc.py +28 -0
  121. rapidtide/scripts/{atlasaverage → atlasaverage.py} +7 -2
  122. rapidtide/scripts/{atlastool → atlastool.py} +7 -2
  123. rapidtide/scripts/{calcicc → calcicc.py} +7 -2
  124. rapidtide/scripts/{calctexticc → calctexticc.py} +7 -2
  125. rapidtide/scripts/{calcttest → calcttest.py} +7 -2
  126. rapidtide/scripts/{ccorrica → ccorrica.py} +7 -2
  127. rapidtide/scripts/delayvar.py +28 -0
  128. rapidtide/scripts/{diffrois → diffrois.py} +7 -2
  129. rapidtide/scripts/{endtidalproc → endtidalproc.py} +7 -2
  130. rapidtide/scripts/{fdica → fdica.py} +7 -2
  131. rapidtide/scripts/{filtnifti → filtnifti.py} +7 -2
  132. rapidtide/scripts/{filttc → filttc.py} +7 -2
  133. rapidtide/scripts/{fingerprint → fingerprint.py} +20 -16
  134. rapidtide/scripts/{fixtr → fixtr.py} +7 -2
  135. rapidtide/scripts/{gmscalc → gmscalc.py} +7 -2
  136. rapidtide/scripts/{happy → happy.py} +7 -2
  137. rapidtide/scripts/{happy2std → happy2std.py} +7 -2
  138. rapidtide/scripts/{happywarp → happywarp.py} +8 -4
  139. rapidtide/scripts/{histnifti → histnifti.py} +7 -2
  140. rapidtide/scripts/{histtc → histtc.py} +7 -2
  141. rapidtide/scripts/{glmfilt → linfitfilt.py} +7 -4
  142. rapidtide/scripts/{localflow → localflow.py} +7 -2
  143. rapidtide/scripts/{mergequality → mergequality.py} +7 -2
  144. rapidtide/scripts/{pairproc → pairproc.py} +7 -2
  145. rapidtide/scripts/{pairwisemergenifti → pairwisemergenifti.py} +7 -2
  146. rapidtide/scripts/{physiofreq → physiofreq.py} +7 -2
  147. rapidtide/scripts/{pixelcomp → pixelcomp.py} +7 -2
  148. rapidtide/scripts/{plethquality → plethquality.py} +7 -2
  149. rapidtide/scripts/{polyfitim → polyfitim.py} +7 -2
  150. rapidtide/scripts/{proj2flow → proj2flow.py} +7 -2
  151. rapidtide/scripts/{rankimage → rankimage.py} +7 -2
  152. rapidtide/scripts/{rapidtide → rapidtide.py} +7 -2
  153. rapidtide/scripts/{rapidtide2std → rapidtide2std.py} +7 -2
  154. rapidtide/scripts/{resamplenifti → resamplenifti.py} +7 -2
  155. rapidtide/scripts/{resampletc → resampletc.py} +7 -2
  156. rapidtide/scripts/retrolagtcs.py +28 -0
  157. rapidtide/scripts/retroregress.py +28 -0
  158. rapidtide/scripts/{roisummarize → roisummarize.py} +7 -2
  159. rapidtide/scripts/{runqualitycheck → runqualitycheck.py} +7 -2
  160. rapidtide/scripts/{showarbcorr → showarbcorr.py} +7 -2
  161. rapidtide/scripts/{showhist → showhist.py} +7 -2
  162. rapidtide/scripts/{showstxcorr → showstxcorr.py} +7 -2
  163. rapidtide/scripts/{showtc → showtc.py} +7 -2
  164. rapidtide/scripts/{showxcorr_legacy → showxcorr_legacy.py} +8 -8
  165. rapidtide/scripts/{showxcorrx → showxcorrx.py} +7 -2
  166. rapidtide/scripts/{showxy → showxy.py} +7 -2
  167. rapidtide/scripts/{simdata → simdata.py} +7 -2
  168. rapidtide/scripts/{spatialdecomp → spatialdecomp.py} +7 -2
  169. rapidtide/scripts/{spatialfit → spatialfit.py} +7 -2
  170. rapidtide/scripts/{spatialmi → spatialmi.py} +7 -2
  171. rapidtide/scripts/{spectrogram → spectrogram.py} +7 -2
  172. rapidtide/scripts/stupidramtricks.py +238 -0
  173. rapidtide/scripts/{synthASL → synthASL.py} +7 -2
  174. rapidtide/scripts/{tcfrom2col → tcfrom2col.py} +7 -2
  175. rapidtide/scripts/{tcfrom3col → tcfrom3col.py} +7 -2
  176. rapidtide/scripts/{temporaldecomp → temporaldecomp.py} +7 -2
  177. rapidtide/scripts/{testhrv → testhrv.py} +1 -1
  178. rapidtide/scripts/{threeD → threeD.py} +7 -2
  179. rapidtide/scripts/{tidepool → tidepool.py} +7 -2
  180. rapidtide/scripts/{variabilityizer → variabilityizer.py} +7 -2
  181. rapidtide/simFuncClasses.py +2113 -0
  182. rapidtide/simfuncfit.py +312 -108
  183. rapidtide/stats.py +579 -247
  184. rapidtide/tests/.coveragerc +27 -6
  185. rapidtide-2.9.5.data/scripts/fdica → rapidtide/tests/cleanposttest +4 -6
  186. rapidtide/tests/happycomp +9 -0
  187. rapidtide/tests/resethappytargets +1 -1
  188. rapidtide/tests/resetrapidtidetargets +1 -1
  189. rapidtide/tests/resettargets +1 -1
  190. rapidtide/tests/runlocaltest +3 -3
  191. rapidtide/tests/showkernels +1 -1
  192. rapidtide/tests/test_aliasedcorrelate.py +4 -4
  193. rapidtide/tests/test_aligntcs.py +1 -1
  194. rapidtide/tests/test_calcicc.py +1 -1
  195. rapidtide/tests/test_cleanregressor.py +184 -0
  196. rapidtide/tests/test_congrid.py +70 -81
  197. rapidtide/tests/test_correlate.py +1 -1
  198. rapidtide/tests/test_corrpass.py +4 -4
  199. rapidtide/tests/test_delayestimation.py +54 -59
  200. rapidtide/tests/test_dlfiltertorch.py +437 -0
  201. rapidtide/tests/test_doresample.py +2 -2
  202. rapidtide/tests/test_externaltools.py +69 -0
  203. rapidtide/tests/test_fastresampler.py +9 -5
  204. rapidtide/tests/test_filter.py +96 -57
  205. rapidtide/tests/test_findmaxlag.py +50 -19
  206. rapidtide/tests/test_fullrunhappy_v1.py +15 -10
  207. rapidtide/tests/test_fullrunhappy_v2.py +19 -13
  208. rapidtide/tests/test_fullrunhappy_v3.py +28 -13
  209. rapidtide/tests/test_fullrunhappy_v4.py +30 -11
  210. rapidtide/tests/test_fullrunhappy_v5.py +62 -0
  211. rapidtide/tests/test_fullrunrapidtide_v1.py +61 -7
  212. rapidtide/tests/test_fullrunrapidtide_v2.py +27 -15
  213. rapidtide/tests/test_fullrunrapidtide_v3.py +28 -8
  214. rapidtide/tests/test_fullrunrapidtide_v4.py +16 -8
  215. rapidtide/tests/test_fullrunrapidtide_v5.py +15 -6
  216. rapidtide/tests/test_fullrunrapidtide_v6.py +142 -0
  217. rapidtide/tests/test_fullrunrapidtide_v7.py +114 -0
  218. rapidtide/tests/test_fullrunrapidtide_v8.py +66 -0
  219. rapidtide/tests/test_getparsers.py +158 -0
  220. rapidtide/tests/test_io.py +59 -18
  221. rapidtide/tests/{test_glmpass.py → test_linfitfiltpass.py} +10 -10
  222. rapidtide/tests/test_mi.py +1 -1
  223. rapidtide/tests/test_miscmath.py +1 -1
  224. rapidtide/tests/test_motionregress.py +5 -5
  225. rapidtide/tests/test_nullcorr.py +6 -9
  226. rapidtide/tests/test_padvec.py +216 -0
  227. rapidtide/tests/test_parserfuncs.py +101 -0
  228. rapidtide/tests/test_phaseanalysis.py +1 -1
  229. rapidtide/tests/test_rapidtideparser.py +59 -53
  230. rapidtide/tests/test_refinedelay.py +296 -0
  231. rapidtide/tests/test_runmisc.py +5 -5
  232. rapidtide/tests/test_sharedmem.py +60 -0
  233. rapidtide/tests/test_simroundtrip.py +132 -0
  234. rapidtide/tests/test_simulate.py +1 -1
  235. rapidtide/tests/test_stcorrelate.py +4 -2
  236. rapidtide/tests/test_timeshift.py +2 -2
  237. rapidtide/tests/test_valtoindex.py +1 -1
  238. rapidtide/tests/test_zRapidtideDataset.py +5 -3
  239. rapidtide/tests/utils.py +10 -9
  240. rapidtide/tidepoolTemplate.py +88 -70
  241. rapidtide/tidepoolTemplate.ui +60 -46
  242. rapidtide/tidepoolTemplate_alt.py +88 -53
  243. rapidtide/tidepoolTemplate_alt.ui +62 -52
  244. rapidtide/tidepoolTemplate_alt_qt6.py +921 -0
  245. rapidtide/tidepoolTemplate_big.py +1125 -0
  246. rapidtide/tidepoolTemplate_big.ui +2386 -0
  247. rapidtide/tidepoolTemplate_big_qt6.py +1129 -0
  248. rapidtide/tidepoolTemplate_qt6.py +793 -0
  249. rapidtide/util.py +1389 -148
  250. rapidtide/voxelData.py +1048 -0
  251. rapidtide/wiener.py +138 -25
  252. rapidtide/wiener2.py +114 -8
  253. rapidtide/workflows/adjustoffset.py +107 -5
  254. rapidtide/workflows/aligntcs.py +86 -3
  255. rapidtide/workflows/applydlfilter.py +231 -89
  256. rapidtide/workflows/applyppgproc.py +540 -0
  257. rapidtide/workflows/atlasaverage.py +309 -48
  258. rapidtide/workflows/atlastool.py +130 -9
  259. rapidtide/workflows/calcSimFuncMap.py +490 -0
  260. rapidtide/workflows/calctexticc.py +202 -10
  261. rapidtide/workflows/ccorrica.py +123 -15
  262. rapidtide/workflows/cleanregressor.py +415 -0
  263. rapidtide/workflows/delayvar.py +1268 -0
  264. rapidtide/workflows/diffrois.py +84 -6
  265. rapidtide/workflows/endtidalproc.py +149 -9
  266. rapidtide/workflows/fdica.py +197 -17
  267. rapidtide/workflows/filtnifti.py +71 -4
  268. rapidtide/workflows/filttc.py +76 -5
  269. rapidtide/workflows/fitSimFuncMap.py +578 -0
  270. rapidtide/workflows/fixtr.py +74 -4
  271. rapidtide/workflows/gmscalc.py +116 -6
  272. rapidtide/workflows/happy.py +1242 -480
  273. rapidtide/workflows/happy2std.py +145 -13
  274. rapidtide/workflows/happy_parser.py +277 -59
  275. rapidtide/workflows/histnifti.py +120 -4
  276. rapidtide/workflows/histtc.py +85 -4
  277. rapidtide/workflows/{glmfilt.py → linfitfilt.py} +128 -14
  278. rapidtide/workflows/localflow.py +329 -29
  279. rapidtide/workflows/mergequality.py +80 -4
  280. rapidtide/workflows/niftidecomp.py +323 -19
  281. rapidtide/workflows/niftistats.py +178 -8
  282. rapidtide/workflows/pairproc.py +99 -5
  283. rapidtide/workflows/pairwisemergenifti.py +86 -3
  284. rapidtide/workflows/parser_funcs.py +1488 -56
  285. rapidtide/workflows/physiofreq.py +139 -12
  286. rapidtide/workflows/pixelcomp.py +211 -9
  287. rapidtide/workflows/plethquality.py +105 -23
  288. rapidtide/workflows/polyfitim.py +159 -19
  289. rapidtide/workflows/proj2flow.py +76 -3
  290. rapidtide/workflows/rankimage.py +115 -8
  291. rapidtide/workflows/rapidtide.py +1833 -1919
  292. rapidtide/workflows/rapidtide2std.py +101 -3
  293. rapidtide/workflows/rapidtide_parser.py +607 -372
  294. rapidtide/workflows/refineDelayMap.py +249 -0
  295. rapidtide/workflows/refineRegressor.py +1215 -0
  296. rapidtide/workflows/regressfrommaps.py +308 -0
  297. rapidtide/workflows/resamplenifti.py +86 -4
  298. rapidtide/workflows/resampletc.py +92 -4
  299. rapidtide/workflows/retrolagtcs.py +442 -0
  300. rapidtide/workflows/retroregress.py +1501 -0
  301. rapidtide/workflows/roisummarize.py +176 -7
  302. rapidtide/workflows/runqualitycheck.py +72 -7
  303. rapidtide/workflows/showarbcorr.py +172 -16
  304. rapidtide/workflows/showhist.py +87 -3
  305. rapidtide/workflows/showstxcorr.py +161 -4
  306. rapidtide/workflows/showtc.py +172 -10
  307. rapidtide/workflows/showxcorrx.py +250 -62
  308. rapidtide/workflows/showxy.py +186 -16
  309. rapidtide/workflows/simdata.py +418 -112
  310. rapidtide/workflows/spatialfit.py +83 -8
  311. rapidtide/workflows/spatialmi.py +252 -29
  312. rapidtide/workflows/spectrogram.py +306 -33
  313. rapidtide/workflows/synthASL.py +157 -6
  314. rapidtide/workflows/tcfrom2col.py +77 -3
  315. rapidtide/workflows/tcfrom3col.py +75 -3
  316. rapidtide/workflows/tidepool.py +3829 -666
  317. rapidtide/workflows/utils.py +45 -19
  318. rapidtide/workflows/utils_doc.py +293 -0
  319. rapidtide/workflows/variabilityizer.py +118 -5
  320. {rapidtide-2.9.5.dist-info → rapidtide-3.1.3.dist-info}/METADATA +30 -223
  321. rapidtide-3.1.3.dist-info/RECORD +393 -0
  322. {rapidtide-2.9.5.dist-info → rapidtide-3.1.3.dist-info}/WHEEL +1 -1
  323. rapidtide-3.1.3.dist-info/entry_points.txt +65 -0
  324. rapidtide-3.1.3.dist-info/top_level.txt +2 -0
  325. rapidtide/calcandfitcorrpairs.py +0 -262
  326. rapidtide/data/examples/src/testoutputsize +0 -45
  327. rapidtide/data/models/model_revised/model.h5 +0 -0
  328. rapidtide/data/models/model_serdar/model.h5 +0 -0
  329. rapidtide/data/models/model_serdar2/model.h5 +0 -0
  330. rapidtide/data/reference/ASPECTS_nlin_asym_09c_2mm.nii.gz +0 -0
  331. rapidtide/data/reference/ASPECTS_nlin_asym_09c_2mm_mask.nii.gz +0 -0
  332. rapidtide/data/reference/ATTbasedFlowTerritories_split_nlin_asym_09c_2mm.nii.gz +0 -0
  333. rapidtide/data/reference/ATTbasedFlowTerritories_split_nlin_asym_09c_2mm_mask.nii.gz +0 -0
  334. rapidtide/data/reference/HCP1200_binmask_2mm_2009c_asym.nii.gz +0 -0
  335. rapidtide/data/reference/HCP1200_lag_2mm_2009c_asym.nii.gz +0 -0
  336. rapidtide/data/reference/HCP1200_mask_2mm_2009c_asym.nii.gz +0 -0
  337. rapidtide/data/reference/HCP1200_negmask_2mm_2009c_asym.nii.gz +0 -0
  338. rapidtide/data/reference/HCP1200_sigma_2mm_2009c_asym.nii.gz +0 -0
  339. rapidtide/data/reference/HCP1200_strength_2mm_2009c_asym.nii.gz +0 -0
  340. rapidtide/glmpass.py +0 -434
  341. rapidtide/refine_factored.py +0 -641
  342. rapidtide/scripts/retroglm +0 -23
  343. rapidtide/workflows/glmfrommaps.py +0 -202
  344. rapidtide/workflows/retroglm.py +0 -643
  345. rapidtide-2.9.5.data/scripts/adjustoffset +0 -23
  346. rapidtide-2.9.5.data/scripts/aligntcs +0 -23
  347. rapidtide-2.9.5.data/scripts/applydlfilter +0 -23
  348. rapidtide-2.9.5.data/scripts/atlasaverage +0 -23
  349. rapidtide-2.9.5.data/scripts/atlastool +0 -23
  350. rapidtide-2.9.5.data/scripts/calcicc +0 -22
  351. rapidtide-2.9.5.data/scripts/calctexticc +0 -23
  352. rapidtide-2.9.5.data/scripts/calcttest +0 -22
  353. rapidtide-2.9.5.data/scripts/ccorrica +0 -23
  354. rapidtide-2.9.5.data/scripts/diffrois +0 -23
  355. rapidtide-2.9.5.data/scripts/endtidalproc +0 -23
  356. rapidtide-2.9.5.data/scripts/filtnifti +0 -23
  357. rapidtide-2.9.5.data/scripts/filttc +0 -23
  358. rapidtide-2.9.5.data/scripts/fingerprint +0 -593
  359. rapidtide-2.9.5.data/scripts/fixtr +0 -23
  360. rapidtide-2.9.5.data/scripts/glmfilt +0 -24
  361. rapidtide-2.9.5.data/scripts/gmscalc +0 -22
  362. rapidtide-2.9.5.data/scripts/happy +0 -25
  363. rapidtide-2.9.5.data/scripts/happy2std +0 -23
  364. rapidtide-2.9.5.data/scripts/happywarp +0 -350
  365. rapidtide-2.9.5.data/scripts/histnifti +0 -23
  366. rapidtide-2.9.5.data/scripts/histtc +0 -23
  367. rapidtide-2.9.5.data/scripts/localflow +0 -23
  368. rapidtide-2.9.5.data/scripts/mergequality +0 -23
  369. rapidtide-2.9.5.data/scripts/pairproc +0 -23
  370. rapidtide-2.9.5.data/scripts/pairwisemergenifti +0 -23
  371. rapidtide-2.9.5.data/scripts/physiofreq +0 -23
  372. rapidtide-2.9.5.data/scripts/pixelcomp +0 -23
  373. rapidtide-2.9.5.data/scripts/plethquality +0 -23
  374. rapidtide-2.9.5.data/scripts/polyfitim +0 -23
  375. rapidtide-2.9.5.data/scripts/proj2flow +0 -23
  376. rapidtide-2.9.5.data/scripts/rankimage +0 -23
  377. rapidtide-2.9.5.data/scripts/rapidtide +0 -23
  378. rapidtide-2.9.5.data/scripts/rapidtide2std +0 -23
  379. rapidtide-2.9.5.data/scripts/resamplenifti +0 -23
  380. rapidtide-2.9.5.data/scripts/resampletc +0 -23
  381. rapidtide-2.9.5.data/scripts/retroglm +0 -23
  382. rapidtide-2.9.5.data/scripts/roisummarize +0 -23
  383. rapidtide-2.9.5.data/scripts/runqualitycheck +0 -23
  384. rapidtide-2.9.5.data/scripts/showarbcorr +0 -23
  385. rapidtide-2.9.5.data/scripts/showhist +0 -23
  386. rapidtide-2.9.5.data/scripts/showstxcorr +0 -23
  387. rapidtide-2.9.5.data/scripts/showtc +0 -23
  388. rapidtide-2.9.5.data/scripts/showxcorr_legacy +0 -536
  389. rapidtide-2.9.5.data/scripts/showxcorrx +0 -23
  390. rapidtide-2.9.5.data/scripts/showxy +0 -23
  391. rapidtide-2.9.5.data/scripts/simdata +0 -23
  392. rapidtide-2.9.5.data/scripts/spatialdecomp +0 -23
  393. rapidtide-2.9.5.data/scripts/spatialfit +0 -23
  394. rapidtide-2.9.5.data/scripts/spatialmi +0 -23
  395. rapidtide-2.9.5.data/scripts/spectrogram +0 -23
  396. rapidtide-2.9.5.data/scripts/synthASL +0 -23
  397. rapidtide-2.9.5.data/scripts/tcfrom2col +0 -23
  398. rapidtide-2.9.5.data/scripts/tcfrom3col +0 -23
  399. rapidtide-2.9.5.data/scripts/temporaldecomp +0 -23
  400. rapidtide-2.9.5.data/scripts/threeD +0 -236
  401. rapidtide-2.9.5.data/scripts/tidepool +0 -23
  402. rapidtide-2.9.5.data/scripts/variabilityizer +0 -23
  403. rapidtide-2.9.5.dist-info/RECORD +0 -357
  404. rapidtide-2.9.5.dist-info/top_level.txt +0 -86
  405. {rapidtide-2.9.5.dist-info → rapidtide-3.1.3.dist-info/licenses}/LICENSE +0 -0
rapidtide/miscmath.py CHANGED
@@ -1,7 +1,7 @@
1
1
  #!/usr/bin/env python
2
2
  # -*- coding: utf-8 -*-
3
3
  #
4
- # Copyright 2016-2024 Blaise Frederick
4
+ # Copyright 2016-2025 Blaise Frederick
5
5
  #
6
6
  # Licensed under the Apache License, Version 2.0 (the "License");
7
7
  # you may not use this file except in compliance with the License.
@@ -17,10 +17,14 @@
17
17
  #
18
18
  #
19
19
  import warnings
20
+ from typing import Callable, Optional, Tuple, Union
20
21
 
21
22
  import matplotlib.pyplot as plt
22
23
  import numpy as np
23
24
  from numpy.polynomial import Polynomial
25
+ from numpy.typing import NDArray
26
+
27
+ from rapidtide.decorators import conditionaljit, conditionaljit2
24
28
 
25
29
  with warnings.catch_warnings():
26
30
  warnings.simplefilter("ignore")
@@ -44,77 +48,87 @@ if pyfftwpresent:
44
48
  # ---------------------------------------- Global constants -------------------------------------------
45
49
  defaultbutterorder = 6
46
50
  MAXLINES = 10000000
47
- donotbeaggressive = True
48
-
49
- # ----------------------------------------- Conditional imports ---------------------------------------
50
- try:
51
- from memory_profiler import profile
52
-
53
- memprofilerexists = True
54
- except ImportError:
55
- memprofilerexists = False
56
-
57
- try:
58
- from numba import jit
59
- except ImportError:
60
- donotusenumba = True
61
- else:
62
- donotusenumba = False
63
-
64
-
65
- # ----------------------------------------- Conditional jit handling ----------------------------------
66
- def conditionaljit():
67
- def resdec(f):
68
- if donotusenumba:
69
- return f
70
- return jit(f, nopython=True)
71
-
72
- return resdec
73
-
74
-
75
- def conditionaljit2():
76
- def resdec(f):
77
- if donotusenumba or donotbeaggressive:
78
- return f
79
- return jit(f, nopython=True)
80
-
81
- return resdec
82
-
83
-
84
- def disablenumba():
85
- global donotusenumba
86
- donotusenumba = True
87
51
 
88
52
 
89
53
  # --------------------------- Spectral analysis functions ---------------------------------------
90
- def phase(mcv):
91
- r"""Return phase of complex numbers.
54
+ def phase(mcv: NDArray) -> NDArray:
55
+ """
56
+ Return phase of complex numbers.
92
57
 
93
58
  Parameters
94
59
  ----------
95
- mcv : complex array
96
- A complex vector
60
+ mcv : NDArray
61
+ A complex vector. The input array can be of any shape, but must contain
62
+ complex numbers.
97
63
 
98
64
  Returns
99
65
  -------
100
- phase : float array
101
- The phase angle of the numbers, in radians
102
-
66
+ NDArray
67
+ The phase angle of the numbers, in radians. The return array has the same
68
+ shape as the input array. Phase angles are in the range [-π, π].
69
+
70
+ Notes
71
+ -----
72
+ This function computes the element-wise phase angle of complex numbers using
73
+ the arctan2 function, which correctly handles the quadrant of the angle.
74
+ The phase is computed as atan2(imaginary_part, real_part).
75
+
76
+ Examples
77
+ --------
78
+ >>> import numpy as np
79
+ >>> z = np.array([1+1j, -1+1j, -1-1j, 1-1j])
80
+ >>> phase(z)
81
+ array([ 0.78539816, 2.35619449, -2.35619449, -0.78539816])
82
+
83
+ >>> z = np.array([[1+1j, -1+1j], [-1-1j, 1-1j]])
84
+ >>> phase(z)
85
+ array([[ 0.78539816, 2.35619449],
86
+ [-2.35619449, -0.78539816]])
103
87
  """
104
88
  return np.arctan2(mcv.imag, mcv.real)
105
89
 
106
90
 
107
- def polarfft(invec, samplerate):
91
+ def polarfft(invec: NDArray, samplerate: float) -> Tuple[NDArray, NDArray, NDArray]:
108
92
  """
93
+ Compute polar FFT representation of input signal.
94
+
95
+ This function applies a Hamming window to the input signal, computes the FFT,
96
+ and returns the frequency spectrum, magnitude spectrum, and phase spectrum.
109
97
 
110
98
  Parameters
111
99
  ----------
112
- invec
113
- samplerate
100
+ invec : ndarray
101
+ Input signal vector to be transformed
102
+ samplerate : float
103
+ Sampling rate of the input signal in Hz
114
104
 
115
105
  Returns
116
106
  -------
117
-
107
+ tuple of ndarray
108
+ A tuple containing:
109
+ - freqs : ndarray
110
+ Frequency values corresponding to the spectrum
111
+ - magspec : ndarray
112
+ Magnitude spectrum of the input signal
113
+ - phspec : ndarray
114
+ Phase spectrum of the input signal
115
+
116
+ Notes
117
+ -----
118
+ - If the input vector length is odd, the last element is removed to make it even
119
+ - A Hamming window is applied before FFT computation
120
+ - Only the first half of the FFT result is returned (positive frequencies)
121
+ - The maximum frequency is half the sampling rate (Nyquist frequency)
122
+
123
+ Examples
124
+ --------
125
+ >>> import numpy as np
126
+ >>> from scipy import fftpack
127
+ >>> # Create a test signal
128
+ >>> t = np.linspace(0, 1, 1000)
129
+ >>> signal = np.sin(2 * np.pi * 50 * t) + 0.5 * np.sin(2 * np.pi * 120 * t)
130
+ >>> freqs, mags, phs = polarfft(signal, 1000.0)
131
+ >>> print(f"Frequency range: {freqs[0]} to {freqs[-1]} Hz")
118
132
  """
119
133
  if np.shape(invec)[0] % 2 == 1:
120
134
  thevec = invec[:-1]
@@ -130,20 +144,84 @@ def polarfft(invec, samplerate):
130
144
  return freqs, magspec, phspec
131
145
 
132
146
 
133
- def complex_cepstrum(x):
147
+ def complex_cepstrum(x: NDArray) -> Tuple[NDArray, NDArray]:
134
148
  """
149
+ Compute the complex cepstrum of a real sequence.
150
+
151
+ The complex cepstrum is the inverse Fourier transform of the logarithm of the
152
+ complex spectrum. It is commonly used in signal processing for analyzing
153
+ periodicities and harmonics in signals.
135
154
 
136
155
  Parameters
137
156
  ----------
138
- x
157
+ x : ndarray
158
+ Real sequence to compute complex cepstrum of.
139
159
 
140
160
  Returns
141
161
  -------
142
-
162
+ ceps : ndarray
163
+ Complex cepstrum of the input sequence.
164
+ ndelay : ndarray
165
+ The number of samples of circular delay added to the input sequence.
166
+
167
+ Notes
168
+ -----
169
+ This implementation follows the approach described in [1]_ and handles
170
+ the unwrapping of the phase to avoid discontinuities in the cepstral
171
+ domain.
172
+
173
+ References
174
+ ----------
175
+ .. [1] M. R. Schroeder, "Periodicity and cepstral analysis," IEEE Transactions
176
+ on Audio and Electroacoustics, vol. 19, no. 3, pp. 233-238, 1971.
177
+
178
+ Examples
179
+ --------
180
+ >>> import numpy as np
181
+ >>> x = np.array([1.0, 2.0, 3.0, 4.0, 3.0, 2.0, 1.0])
182
+ >>> ceps, ndelay = complex_cepstrum(x)
183
+ >>> print(ceps)
184
+ >>> print(ndelay)
143
185
  """
144
186
 
145
187
  # adapted from https://github.com/python-acoustics/python-acoustics/blob/master/acoustics/cepstrum.py
146
- def _unwrap(phase):
188
+ def _unwrap(phase: NDArray) -> Tuple[NDArray, NDArray]:
189
+ """
190
+ Unwrap phase and compute delay correction.
191
+
192
+ This function unwraps a phase array to remove discontinuities and computes
193
+ the necessary delay correction to align the unwrapped phase at the center
194
+ of the array.
195
+
196
+ Parameters
197
+ ----------
198
+ phase : NDArray
199
+ Input phase array with shape (..., samples) where the last dimension
200
+ represents the phase samples to be unwrapped.
201
+
202
+ Returns
203
+ -------
204
+ unwrapped : NDArray
205
+ Unwrapped phase array with the same shape as input phase.
206
+ ndelay : NDArray
207
+ Delay correction array with shape (...,) containing the number of
208
+ π phase jumps to correct for each sample in the batch.
209
+
210
+ Notes
211
+ -----
212
+ The unwrapping process removes discontinuities by adding multiples of 2π
213
+ to eliminate phase jumps greater than π. The delay correction is computed
214
+ by finding the phase at the center sample and adjusting the entire array
215
+ to align this reference point.
216
+
217
+ Examples
218
+ --------
219
+ >>> import numpy as np
220
+ >>> phase = np.array([[0, np.pi/2, np.pi, 3*np.pi/2, 2*np.pi]])
221
+ >>> unwrapped, ndelay = _unwrap(phase)
222
+ >>> print(unwrapped)
223
+ >>> print(ndelay)
224
+ """
147
225
  samples = phase.shape[-1]
148
226
  unwrapped = np.unwrap(phase)
149
227
  center = (samples + 1) // 2
@@ -161,34 +239,82 @@ def complex_cepstrum(x):
161
239
  return ceps, ndelay
162
240
 
163
241
 
164
- def real_cepstrum(x):
242
+ def real_cepstrum(x: NDArray) -> NDArray:
165
243
  """
244
+ Compute the real cepstrum of a signal.
245
+
246
+ The cepstrum is the inverse Fourier transform of the logarithm of the magnitude
247
+ of the Fourier transform of a signal. It is commonly used in speech processing
248
+ and audio analysis to analyze the periodicity and structure of signals.
166
249
 
167
250
  Parameters
168
251
  ----------
169
- x
252
+ x : ndarray
253
+ Input signal array of real numbers.
170
254
 
171
255
  Returns
172
256
  -------
173
-
257
+ ndarray
258
+ Real cepstrum of the input signal. The result has the same shape as the input.
259
+
260
+ Notes
261
+ -----
262
+ This implementation uses the FFT-based approach:
263
+ 1. Compute the Fourier transform of the input signal
264
+ 2. Take the absolute value and logarithm
265
+ 3. Apply inverse FFT and take the real part
266
+
267
+ The cepstrum is useful for identifying periodic structures in signals and
268
+ is particularly important in speech analysis for determining pitch and
269
+ formant frequencies.
270
+
271
+ Examples
272
+ --------
273
+ >>> import numpy as np
274
+ >>> x = np.array([1.0, 2.0, 3.0, 4.0, 3.0, 2.0, 1.0])
275
+ >>> cepstrum = real_cepstrum(x)
276
+ >>> print(cepstrum)
277
+ [ 2.53444207 0.74508512 -0.23302092 -0.34635144 -0.23302092
278
+ 0.74508512 2.53444207]
174
279
  """
175
280
  # adapted from https://github.com/python-acoustics/python-acoustics/blob/master/acoustics/cepstrum.py
176
281
  return fftpack.ifft(np.log(np.abs(fftpack.fft(x)))).real
177
282
 
178
283
 
179
284
  # --------------------------- miscellaneous math functions -------------------------------------------------
180
- def thederiv(y):
285
+ def thederiv(y: NDArray) -> NDArray:
181
286
  """
287
+ Compute the first derivative of an array using finite differences.
288
+
289
+ This function calculates the derivative of an array `y` using a central difference
290
+ scheme for interior points and forward/backward differences for the first and
291
+ last points respectively.
182
292
 
183
293
  Parameters
184
294
  ----------
185
- y
295
+ y : ndarray
296
+ Input array of values to differentiate. Shape (n,) where n is the number of points.
186
297
 
187
298
  Returns
188
299
  -------
189
-
300
+ ndarray
301
+ Array of same shape as `y` containing the computed derivative values.
302
+
303
+ Notes
304
+ -----
305
+ The derivative is computed using the following scheme:
306
+ - First point: dyc[0] = (y[0] - y[1]) / 2.0
307
+ - Interior points: dyc[i] = (y[i+1] - y[i-1]) / 2.0
308
+ - Last point: dyc[-1] = (y[-1] - y[-2]) / 2.0
309
+
310
+ Examples
311
+ --------
312
+ >>> import numpy as np
313
+ >>> y = np.array([1, 2, 4, 7, 11])
314
+ >>> thederiv(y)
315
+ array([-0.5, 1. , 2. , 3. , 4.5])
190
316
  """
191
- dyc = [0.0] * len(y)
317
+ dyc = np.zeros_like(y)
192
318
  dyc[0] = (y[0] - y[1]) / 2.0
193
319
  for i in range(1, len(y) - 1):
194
320
  dyc[i] = (y[i + 1] - y[i - 1]) / 2.0
@@ -196,16 +322,38 @@ def thederiv(y):
196
322
  return dyc
197
323
 
198
324
 
199
- def primes(n):
325
+ def primes(n: int) -> list:
200
326
  """
327
+ Compute the prime factorization of a positive integer.
328
+
329
+ Returns the prime factors of n in ascending order, including repeated factors.
201
330
 
202
331
  Parameters
203
332
  ----------
204
- n
333
+ n : int
334
+ A positive integer to factorize. Must be greater than 0.
205
335
 
206
336
  Returns
207
337
  -------
338
+ list of int
339
+ A list of prime factors of n in ascending order. If n is 1, returns an empty list.
340
+
341
+ Notes
342
+ -----
343
+ This implementation uses trial division starting from 2, incrementing by 1
344
+ until the square root of n. It is based on a StackOverflow answer and
345
+ efficiently handles repeated prime factors.
346
+
347
+ Examples
348
+ --------
349
+ >>> primes(12)
350
+ [2, 2, 3]
208
351
 
352
+ >>> primes(17)
353
+ [17]
354
+
355
+ >>> primes(1)
356
+ []
209
357
  """
210
358
  # found on stackoverflow: https://stackoverflow.com/questions/16996217/prime-factorization-list
211
359
  primfac = []
@@ -220,31 +368,77 @@ def primes(n):
220
368
  return primfac
221
369
 
222
370
 
223
- def largestfac(n):
371
+ def largestfac(n: int) -> int:
224
372
  """
373
+ Return the largest prime factor of n.
225
374
 
226
375
  Parameters
227
376
  ----------
228
- n
377
+ n : int
378
+ The integer to find the largest prime factor for. Must be a positive integer.
229
379
 
230
380
  Returns
231
381
  -------
232
-
382
+ int
383
+ The largest prime factor of n.
384
+
385
+ Notes
386
+ -----
387
+ This function relies on a `primes(n)` function that returns all prime numbers up to n.
388
+ The largest prime factor is obtained by taking the last element from this list.
389
+
390
+ Examples
391
+ --------
392
+ >>> largestfac(13)
393
+ 13
394
+ >>> largestfac(315)
395
+ 7
233
396
  """
234
397
  return primes(n)[-1]
235
398
 
236
399
 
237
400
  # --------------------------- Normalization functions -------------------------------------------------
238
- def normalize(vector, method="stddev"):
401
+ def normalize(vector: NDArray, method: str = "stddev") -> NDArray:
239
402
  """
403
+ Normalize a vector using the specified normalization method.
240
404
 
241
405
  Parameters
242
406
  ----------
243
- vector
407
+ vector : NDArray
408
+ Input vector to be normalized.
409
+ method : str, default="stddev"
410
+ Normalization method to apply. Options are:
411
+ - "None": Subtract mean from vector
412
+ - "percent": Apply percentage normalization
413
+ - "variance": Apply variance normalization
414
+ - "stddev" or "z": Apply standard deviation normalization (Z-score)
415
+ - "p2p": Apply peak-to-peak normalization
416
+ - "mad": Apply median absolute deviation normalization
244
417
 
245
418
  Returns
246
419
  -------
247
-
420
+ NDArray
421
+ Normalized vector according to the specified method.
422
+
423
+ Raises
424
+ ------
425
+ ValueError
426
+ If an invalid normalization method is specified.
427
+
428
+ Notes
429
+ -----
430
+ This function provides multiple normalization techniques for preprocessing
431
+ data. The default "stddev" method (also available as "z") performs Z-score
432
+ normalization, which centers the data around zero with unit variance.
433
+
434
+ Examples
435
+ --------
436
+ >>> import numpy as np
437
+ >>> vector = np.array([1, 2, 3, 4, 5])
438
+ >>> normalize(vector, "stddev")
439
+ array([-1.41421356, -0.70710678, 0. , 0.70710678, 1.41421356])
440
+ >>> normalize(vector, "None")
441
+ array([-2., -1., 0., 1., 2.])
248
442
  """
249
443
  if method == "None":
250
444
  return vector - np.mean(vector)
@@ -257,26 +451,46 @@ def normalize(vector, method="stddev"):
257
451
  elif method == "p2p":
258
452
  return ppnormalize(vector)
259
453
  elif method == "mad":
260
- return madnormalize(vector)
454
+ return madnormalize(vector)[0]
261
455
  else:
262
456
  raise ValueError("Illegal normalization type")
263
457
 
264
458
 
265
- def znormalize(vector):
459
+ def znormalize(vector: NDArray) -> NDArray:
460
+ return stdnormalize(vector)
461
+
462
+
463
+ def removeoutliers(
464
+ vector: NDArray, zerobad: bool = True, outlierfac: float = 3.0
465
+ ) -> Tuple[NDArray, float, float]:
266
466
  """
467
+ Normalize a vector using standard normalization (z-score normalization).
468
+
469
+ Standard normalization transforms the vector by subtracting the mean and
470
+ dividing by the standard deviation, resulting in a vector with mean=0 and std=1.
267
471
 
268
472
  Parameters
269
473
  ----------
270
- vector
474
+ vector : array-like
475
+ Input vector to be normalized. Should be a 1D array-like object.
271
476
 
272
477
  Returns
273
478
  -------
274
-
479
+ ndarray
480
+ Normalized vector with mean=0 and standard deviation=1.
481
+
482
+ Notes
483
+ -----
484
+ This function is equivalent to calling `stdnormalize(vector)` and performs
485
+ the standard z-score normalization: (x - mean) / std.
486
+
487
+ Examples
488
+ --------
489
+ >>> import numpy as np
490
+ >>> vector = np.array([1, 2, 3, 4, 5])
491
+ >>> znormalize(vector)
492
+ array([-1.41421356, -0.70710678, 0. , 0.70710678, 1.41421356])
275
493
  """
276
- return stdnormalize(vector)
277
-
278
-
279
- def removeoutliers(vector, zerobad=True, outlierfac=3.0):
280
494
  themedian = np.median(vector)
281
495
  sigmad = mad(vector - themedian).astype(np.float64)
282
496
  if zerobad:
@@ -288,42 +502,83 @@ def removeoutliers(vector, zerobad=True, outlierfac=3.0):
288
502
  return cleaneddata, themedian, sigmad
289
503
 
290
504
 
291
- def madnormalize(vector, returnnormfac=False):
505
+ def madnormalize(vector: NDArray) -> Tuple[NDArray, float]:
292
506
  """
507
+ Normalize a vector using the median absolute deviation (MAD).
508
+
509
+ This function normalizes a vector by subtracting the median and dividing by the
510
+ median absolute deviation. The MAD is computed as the median of the absolute
511
+ deviations from the median, scaled by a constant factor (1.4826) to make it
512
+ consistent with the standard deviation for normally distributed data.
293
513
 
294
514
  Parameters
295
515
  ----------
296
- vector
516
+ vector : array_like
517
+ Input vector to be normalized.
297
518
 
298
519
  Returns
299
520
  -------
300
-
521
+ ndarray or tuple
522
+ Returns a tuple of (normalized_vector, mad).
523
+
524
+ Notes
525
+ -----
526
+ The normalization is performed as: (vector - median(vector)) / MAD
527
+ where MAD is the median absolute deviation. If MAD is zero or negative,
528
+ the original vector is returned without normalization.
529
+
530
+ Examples
531
+ --------
532
+ >>> import numpy as np
533
+ >>> vector = np.array([1, 2, 3, 4, 5])
534
+
535
+
536
+ >>> normalized, mad_val = madnormalize(vector)
537
+ >>> print(f"Normalized: {normalized}")
538
+ >>> print(f"MAD: {mad_val}")
539
+ >>> print(normalized)
540
+ [-1.4826 -0.7413 0. 0.7413 1.4826]
301
541
  """
302
542
  demedianed = vector - np.median(vector)
303
543
  sigmad = mad(demedianed).astype(np.float64)
304
544
  if sigmad > 0.0:
305
- if returnnormfac:
306
- return demedianed / sigmad, sigmad
307
- else:
308
- return demedianed / sigmad
545
+ return demedianed / sigmad, sigmad
309
546
  else:
310
- if returnnormfac:
311
- return demedianed, sigmad
312
- else:
313
- return demedianed
547
+ return demedianed, sigmad
314
548
 
315
549
 
316
550
  @conditionaljit()
317
- def stdnormalize(vector):
551
+ def stdnormalize(vector: NDArray) -> NDArray:
318
552
  """
553
+ Standardize a vector by removing mean and scaling by standard deviation.
319
554
 
320
555
  Parameters
321
556
  ----------
322
- vector
557
+ vector : NDArray
558
+ Input vector to be standardized.
323
559
 
324
560
  Returns
325
561
  -------
326
-
562
+ NDArray
563
+ Standardized vector with zero mean and unit variance. If the input vector
564
+ has zero standard deviation, the demeaned vector is returned unchanged.
565
+
566
+ Notes
567
+ -----
568
+ This function performs standardization (z-score normalization) by:
569
+ 1. Removing the mean from each element (demeaning)
570
+ 2. Dividing by the standard deviation (if non-zero)
571
+
572
+ Examples
573
+ --------
574
+ >>> import numpy as np
575
+ >>> x = np.array([1, 2, 3, 4, 5])
576
+ >>> stdnormalize(x)
577
+ array([-1.41421356, -0.70710678, 0. , 0.70710678, 1.41421356])
578
+
579
+ >>> y = np.array([5, 5, 5, 5])
580
+ >>> stdnormalize(y)
581
+ array([0., 0., 0., 0.])
327
582
  """
328
583
  demeaned = vector - np.mean(vector)
329
584
  sigstd = np.std(demeaned)
@@ -333,16 +588,42 @@ def stdnormalize(vector):
333
588
  return demeaned
334
589
 
335
590
 
336
- def varnormalize(vector):
591
+ def varnormalize(vector: NDArray) -> NDArray:
337
592
  """
593
+ Normalize a vector by subtracting the mean and dividing by variance.
594
+
595
+ This function performs variance normalization on the input vector. It first
596
+ demeanes the vector by subtracting its mean, then divides by the variance
597
+ if it's greater than zero. If the variance is zero (constant vector), the
598
+ demeaned vector is returned unchanged.
338
599
 
339
600
  Parameters
340
601
  ----------
341
- vector
602
+ vector : ndarray
603
+ Input vector to be normalized. Should be a numpy array of numeric values.
342
604
 
343
605
  Returns
344
606
  -------
345
-
607
+ ndarray
608
+ Normalized vector with mean zero and variance one (when input has non-zero variance).
609
+ If input vector has zero variance, returns the demeaned vector.
610
+
611
+ Notes
612
+ -----
613
+ This normalization is similar to standardization but uses variance instead of
614
+ standard deviation for the normalization factor. The function handles edge cases
615
+ where variance is zero by returning the demeaned vector without division.
616
+
617
+ Examples
618
+ --------
619
+ >>> import numpy as np
620
+ >>> vec = np.array([1, 2, 3, 4, 5])
621
+ >>> varnormalize(vec)
622
+ array([-2., -1., 0., 1., 2.])
623
+
624
+ >>> constant_vec = np.array([5, 5, 5, 5])
625
+ >>> varnormalize(constant_vec)
626
+ array([0., 0., 0., 0.])
346
627
  """
347
628
  demeaned = vector - np.mean(vector)
348
629
  sigvar = np.var(demeaned)
@@ -352,16 +633,41 @@ def varnormalize(vector):
352
633
  return demeaned
353
634
 
354
635
 
355
- def pcnormalize(vector):
636
+ def pcnormalize(vector: NDArray) -> NDArray:
356
637
  """
638
+ Normalize a vector using percentage change normalization.
639
+
640
+ This function performs percentage change normalization by dividing each element
641
+ by the mean of the vector and subtracting 1.0.
357
642
 
358
643
  Parameters
359
644
  ----------
360
- vector
645
+ vector : NDArray
646
+ Input vector to be normalized.
361
647
 
362
648
  Returns
363
649
  -------
364
-
650
+ NDArray
651
+ Normalized vector where each element is (vector[i] / mean) - 1.0.
652
+ If the mean is less than or equal to zero, the original vector is returned.
653
+
654
+ Notes
655
+ -----
656
+ The normalization formula is: (vector / mean) - 1.0
657
+ If the mean of the vector is less than or equal to zero, the function returns
658
+ the original vector to avoid division by zero or negative normalization.
659
+
660
+ Examples
661
+ --------
662
+ >>> data = np.array([1, 2, 3, 4, 5])
663
+ >>> normalized = pcnormalize(data)
664
+ >>> print(normalized)
665
+ [-0.6 -0.2 0.2 0.6 1. ]
666
+
667
+ >>> data = np.array([10, 20, 30])
668
+ >>> normalized = pcnormalize(data)
669
+ >>> print(normalized)
670
+ [-0.5 0.5 1.5]
365
671
  """
366
672
  sigmean = np.mean(vector)
367
673
  if sigmean > 0.0:
@@ -370,16 +676,36 @@ def pcnormalize(vector):
370
676
  return vector
371
677
 
372
678
 
373
- def ppnormalize(vector):
679
+ def ppnormalize(vector: NDArray) -> NDArray:
374
680
  """
681
+ Normalize a vector using peak-to-peak normalization.
682
+
683
+ This function performs peak-to-peak normalization by subtracting the mean
684
+ and dividing by the range (max - min) of the demeaned vector.
375
685
 
376
686
  Parameters
377
687
  ----------
378
- vector
688
+ vector : NDArray
689
+ Input vector to be normalized
379
690
 
380
691
  Returns
381
692
  -------
382
-
693
+ NDArray
694
+ Normalized vector with values ranging from -0.5 to 0.5 when the range is non-zero,
695
+ or zero vector when the range is zero
696
+
697
+ Notes
698
+ -----
699
+ The normalization is performed as: (vector - mean) / (max - min)
700
+ If the range (max - min) is zero, the function returns the demeaned vector
701
+ (which will be all zeros) to avoid division by zero.
702
+
703
+ Examples
704
+ --------
705
+ >>> data = np.array([1, 2, 3, 4, 5])
706
+ >>> normalized = ppnormalize(data)
707
+ >>> print(normalized)
708
+ [-0.5 -0.25 0. 0.25 0.5 ]
383
709
  """
384
710
  demeaned = vector - np.mean(vector)
385
711
  sigpp = np.max(demeaned) - np.min(demeaned)
@@ -389,12 +715,61 @@ def ppnormalize(vector):
389
715
  return demeaned
390
716
 
391
717
 
392
- def imagevariance(thedata, thefilter, samplefreq, meannorm=True, debug=False):
718
+ def imagevariance(
719
+ thedata: NDArray,
720
+ thefilter: Optional[tide_filt.NoncausalFilter],
721
+ samplefreq: float,
722
+ meannorm: bool = True,
723
+ debug: bool = False,
724
+ ) -> NDArray:
725
+ """
726
+ Calculate variance of filtered image data, optionally normalized by mean.
727
+
728
+ This function applies a filter to each voxel's time series data and computes
729
+ the variance along the time dimension. The result can be optionally normalized
730
+ by the mean of the original data.
731
+
732
+ Parameters
733
+ ----------
734
+ thedata : NDArray
735
+ Input image data with shape (n_voxels, n_timepoints).
736
+ thefilter : Optional[object]
737
+ Filter object with an 'apply' method that takes (samplefreq, data) as arguments.
738
+ If None, no filtering is applied.
739
+ samplefreq : float
740
+ Sampling frequency used for filter application.
741
+ meannorm : bool, optional
742
+ If True, normalize variance by mean of original data. Default is True.
743
+ debug : bool, optional
744
+ If True, print debug information. Default is False.
745
+
746
+ Returns
747
+ -------
748
+ NDArray
749
+ Array of variance values for each voxel. Shape is (n_voxels,).
750
+
751
+ Notes
752
+ -----
753
+ - NaN values are converted to zero in the final result.
754
+ - When `meannorm=True`, the variance is normalized by the mean of the original data.
755
+ - The filter is applied to each voxel's time series independently.
756
+ - If no filter is provided, the original data is used directly.
757
+
758
+ Examples
759
+ --------
760
+ >>> data = np.random.randn(100, 50)
761
+ >>> filter_obj = SomeFilter()
762
+ >>> variance = imagevariance(data, filter_obj, samplefreq=2.0)
763
+ >>> variance = imagevariance(data, None, samplefreq=2.0, meannorm=False)
764
+ """
393
765
  if debug:
394
766
  print(f"IMAGEVARIANCE: {thedata.shape}, {thefilter}, {samplefreq}")
395
- filteredim = thedata * 0.0
396
- for thevoxel in range(thedata.shape[0]):
397
- filteredim[thevoxel, :] = thefilter.apply(samplefreq, thedata[thevoxel, :])
767
+ filteredim = np.zeros_like(thedata)
768
+ if thefilter is not None:
769
+ for thevoxel in range(thedata.shape[0]):
770
+ filteredim[thevoxel, :] = thefilter.apply(samplefreq, thedata[thevoxel, :])
771
+ else:
772
+ filteredim = thedata
398
773
  if meannorm:
399
774
  return np.nan_to_num(np.var(filteredim, axis=1) / np.mean(thedata, axis=1))
400
775
  else:
@@ -402,19 +777,47 @@ def imagevariance(thedata, thefilter, samplefreq, meannorm=True, debug=False):
402
777
 
403
778
 
404
779
  # @conditionaljit()
405
- def corrnormalize(thedata, detrendorder=1, windowfunc="hamming"):
780
+ def corrnormalize(thedata: NDArray, detrendorder: int = 1, windowfunc: str = "hamming") -> NDArray:
406
781
  """
782
+ Normalize data by detrending and applying a window function, then standardize.
783
+
784
+ This function first detrends the input data, applies a window function if specified,
785
+ and then normalizes the result using standard normalization.
407
786
 
408
787
  Parameters
409
788
  ----------
410
- thedata
411
- detrendorder
412
- windowfunc
789
+ thedata : NDArray
790
+ Input data to be normalized.
791
+ detrendorder : int, optional
792
+ Order of detrending to apply. A value of 0 skips detrending, while values > 0
793
+ apply polynomial detrending (default is 1 for linear detrending).
794
+ windowfunc : str, optional
795
+ Window function to apply (e.g., 'hamming', 'hanning'). Use 'None' to skip
796
+ windowing (default is 'hamming').
413
797
 
414
798
  Returns
415
799
  -------
416
-
800
+ NDArray
801
+ Normalized data array with detrending, windowing (if applicable), and standard
802
+ normalization applied, followed by division by sqrt(n), where n is the length
803
+ of the input data.
804
+
805
+ Notes
806
+ -----
807
+ The normalization process is performed in the following steps:
808
+ 1. Detrend the data using polynomial fitting if `detrendorder` > 0.
809
+ 2. Apply a window function if `windowfunc` is not 'None'.
810
+ 3. Standard normalize the result.
811
+ 4. Divide the normalized result by sqrt(n), where n is the length of the data.
812
+
813
+ Examples
814
+ --------
815
+ >>> import numpy as np
816
+ >>> data = np.random.randn(100)
817
+ >>> normalized = corrnormalize(data)
818
+ >>> normalized = corrnormalize(data, detrendorder=2, windowfunc='hanning')
417
819
  """
820
+
418
821
  # detrend first
419
822
  if detrendorder > 0:
420
823
  intervec = stdnormalize(tide_fit.detrend(thedata, order=detrendorder, demean=True))
@@ -430,37 +833,156 @@ def corrnormalize(thedata, detrendorder=1, windowfunc="hamming"):
430
833
  return stdnormalize(intervec) / np.sqrt(np.shape(thedata)[0])
431
834
 
432
835
 
433
- def rms(vector):
836
+ def noiseamp(
837
+ vector: NDArray, Fs: float, windowsize: float = 40.0
838
+ ) -> Tuple[NDArray, NDArray, float, float, float, float]:
434
839
  """
840
+ Compute noise amplitude characteristics from a vector using band-pass filtering and trend analysis.
841
+
842
+ This function applies a non-causal band-pass filter to the squared input vector to extract
843
+ envelope information, then computes root-mean-square (RMS) values over time. A linear trend
844
+ is fitted to the RMS values to determine the start and end amplitudes, and the percentage
845
+ change and rate of change are calculated over the signal duration.
435
846
 
436
847
  Parameters
437
848
  ----------
438
- vector
849
+ vector : ndarray
850
+ Input signal vector (1D array) from which noise amplitude is computed.
851
+ Fs : float
852
+ Sampling frequency of the input signal in Hz.
853
+ windowsize : float, optional
854
+ Size of the filtering window in seconds, used to define the cutoff frequency.
855
+ Default is 40.0 seconds.
439
856
 
440
857
  Returns
441
858
  -------
859
+ tuple of (filtrms, thefittc, startamp, endamp, changepct, changerate)
860
+ - filtrms : ndarray
861
+ Root-mean-square (RMS) values of the filtered signal.
862
+ - thefittc : ndarray
863
+ Linear trend fit applied to the RMS values.
864
+ - startamp : float
865
+ Starting amplitude value from the trend fit.
866
+ - endamp : float
867
+ Ending amplitude value from the trend fit.
868
+ - changepct : float
869
+ Percentage change in amplitude from start to end.
870
+ - changerate : float
871
+ Rate of amplitude change per second (percentage per second).
872
+
873
+ Notes
874
+ -----
875
+ - The function uses a non-causal filter (`tide_filt.NoncausalFilter`) with an
876
+ arbitrary band-pass configuration.
877
+ - The cutoff frequency is computed as 1 / windowsize.
878
+ - Padding and unpadding are applied to avoid edge effects in filtering.
879
+ - If a RankWarning occurs during polynomial fitting, the coefficients are set to [0.0, 0.0].
880
+
881
+ Examples
882
+ --------
883
+ >>> import numpy as np
884
+ >>> vector = np.random.randn(1000)
885
+ >>> Fs = 10.0
886
+ >>> rms_vals, trend_vals, start, end, pct_chg, rate_chg = noiseamp(vector, Fs)
887
+ >>> print(f"Start amplitude: {start:.3f}, End amplitude: {end:.3f}")
888
+ """
889
+ cutoff = 1.0 / windowsize
890
+ padlen = int(len(vector) // 2)
891
+ theenvbpf = tide_filt.NoncausalFilter(filtertype="arb")
892
+ theenvbpf.setfreqs(0.0, 0.0, cutoff, 1.1 * cutoff)
893
+ tide_filt.unpadvec(theenvbpf.apply(Fs, tide_filt.padvec(np.square(vector), padlen)), padlen)
894
+ filtsq = tide_filt.unpadvec(
895
+ theenvbpf.apply(Fs, tide_filt.padvec(np.square(vector), padlen)), padlen
896
+ )
897
+ filtsq = np.where(filtsq >= 0.0, filtsq, 0.0)
898
+ filtrms = np.sqrt(filtsq)
899
+ thetimepoints = np.arange(0.0, len(filtrms), 1.0) - len(filtrms) / 2.0
900
+ try:
901
+ thecoffs = Polynomial.fit(thetimepoints, filtrms, 1).convert().coef[::-1]
902
+ except np.exceptions.RankWarning:
903
+ thecoffs = np.asarray([0.0, 0.0])
904
+ thefittc = tide_fit.trendgen(thetimepoints, thecoffs, True)
905
+ startamp = thefittc[0]
906
+ endamp = thefittc[-1]
907
+ if startamp > 0.0:
908
+ changepct = 100.0 * (endamp / startamp - 1.0)
909
+ else:
910
+ changepct = 0.0
911
+ runtime = len(vector) / Fs
912
+ changerate = changepct / runtime
913
+ return filtrms, thefittc, startamp, endamp, changepct, changerate
914
+
915
+
916
+ def rms(vector: NDArray) -> float:
917
+ """
918
+ Compute the root mean square (RMS) of a vector.
442
919
 
920
+ The root mean square is a statistical measure that represents the magnitude
921
+ of a varying quantity. It is especially useful in physics and engineering
922
+ applications.
923
+
924
+ Parameters
925
+ ----------
926
+ vector : array_like
927
+ Input vector for which to compute the root mean square.
928
+
929
+ Returns
930
+ -------
931
+ float
932
+ The root mean square value of the input vector.
933
+
934
+ Notes
935
+ -----
936
+ The RMS is calculated as sqrt(mean(square(vector))).
937
+
938
+ Examples
939
+ --------
940
+ >>> import numpy as np
941
+ >>> rms([1, 2, 3, 4])
942
+ 2.7386127875258306
943
+ >>> rms(np.array([1, 2, 3, 4]))
944
+ 2.7386127875258306
443
945
  """
444
946
  return np.sqrt(np.mean(np.square(vector)))
445
947
 
446
948
 
447
- def envdetect(Fs, inputdata, cutoff=0.25, padlen=10):
949
+ def envdetect(Fs: float, inputdata: NDArray, cutoff: float = 0.25, padlen: int = 10) -> NDArray:
448
950
  """
951
+ Compute the envelope of input signal using band-pass filtering.
952
+
953
+ This function calculates the envelope of a signal by first removing the mean,
954
+ taking the absolute value, and then applying a band-pass filter to isolate
955
+ the envelope components. The filtering is performed using a non-causal filter
956
+ to avoid phase distortion.
449
957
 
450
958
  Parameters
451
959
  ----------
452
960
  Fs : float
453
- Sample frequency in Hz.
454
- inputdata : float array
455
- Data to be envelope detected
456
- cutoff : float
457
- Highest possible modulation frequency
961
+ Sampling frequency of the input signal in Hz.
962
+ inputdata : NDArray
963
+ Input signal array to process.
964
+ cutoff : float, optional
965
+ Cutoff frequency for the band-pass filter. Default is 0.25.
966
+ padlen : int, optional
967
+ Padding length used for filtering to avoid edge effects. Default is 10.
458
968
 
459
969
  Returns
460
970
  -------
461
- envelope : float array
462
- The envelope function
463
-
971
+ NDArray
972
+ Envelope of the input signal with the same shape as inputdata.
973
+
974
+ Notes
975
+ -----
976
+ The function uses a non-causal filter (two-pass filtering) which avoids
977
+ phase distortion but requires padding the signal. The filter is set to
978
+ pass frequencies between 0 and cutoff, with a stop band starting at 1.1*cutoff.
979
+
980
+ Examples
981
+ --------
982
+ >>> import numpy as np
983
+ >>> Fs = 100.0
984
+ >>> signal = np.sin(2 * np.pi * 10 * np.linspace(0, 1, Fs))
985
+ >>> envelope = envdetect(Fs, signal)
464
986
  """
465
987
  demeaned = inputdata - np.mean(inputdata)
466
988
  sigabs = abs(demeaned)
@@ -469,20 +991,39 @@ def envdetect(Fs, inputdata, cutoff=0.25, padlen=10):
469
991
  return tide_filt.unpadvec(theenvbpf.apply(Fs, tide_filt.padvec(sigabs, padlen)), padlen)
470
992
 
471
993
 
472
- def phasemod(phase, centric=True):
994
+ def phasemod(phase: NDArray, centric: bool = True) -> NDArray | float:
473
995
  """
996
+ Perform phase modulation with optional centric adjustment.
997
+
998
+ This function applies phase modulation to the input phase array, with an option
999
+ to apply a centric transformation that maps the phase range to [-π, π].
474
1000
 
475
1001
  Parameters
476
1002
  ----------
477
- phase : array-like
478
- An unwrapped phase vector
479
- centric: boolean, optional
480
- Determines whether to do modulo to centric (-np.pi to np.pi) or non-centric (0 to 2 * np.pi) range
1003
+ phase : ndarray
1004
+ Input phase array in radians.
1005
+ centric : bool, optional
1006
+ If True, applies centric transformation to map phase to [-π, π] range.
1007
+ If False, returns phase modulo 2π. Default is True.
481
1008
 
482
1009
  Returns
483
1010
  -------
484
- wrapped : array-like
485
- The phase vector, remapped to the range of +/-np.pi
1011
+ ndarray
1012
+ Modulated phase array with same shape as input.
1013
+
1014
+ Notes
1015
+ -----
1016
+ When `centric=True`, the transformation is equivalent to:
1017
+ `((-phase + π) % (2π) - π) * -1`
1018
+
1019
+ Examples
1020
+ --------
1021
+ >>> import numpy as np
1022
+ >>> phase = np.array([0, np.pi/2, np.pi, 3*np.pi/2, 2*np.pi])
1023
+ >>> phasemod(phase)
1024
+ array([ 0. , 1.57079633, 3.14159265, -1.57079633, 0. ])
1025
+ >>> phasemod(phase, centric=False)
1026
+ array([0. , 1.57079633, 3.14159265, 4.71238898, 0. ])
486
1027
  """
487
1028
  if centric:
488
1029
  return ((-phase + np.pi) % (2.0 * np.pi) - np.pi) * -1.0
@@ -490,30 +1031,58 @@ def phasemod(phase, centric=True):
490
1031
  return phase % (2.0 * np.pi)
491
1032
 
492
1033
 
493
- def trendfilt(inputdata, order=3, ndevs=3.0, debug=False):
1034
+ def trendfilt(
1035
+ inputdata: NDArray, order: int = 3, ndevs: float = 3.0, debug: bool = False
1036
+ ) -> NDArray:
494
1037
  """
1038
+ Apply trend filtering to remove polynomial trends and outliers from time series data.
1039
+
1040
+ This function fits a polynomial trend to the input data using least squares,
1041
+ removes the trend to obtain detrended data, and then applies outlier detection
1042
+ using median absolute deviation (MAD) normalization to identify and mask outliers.
495
1043
 
496
1044
  Parameters
497
1045
  ----------
498
- inputdata : array-like
499
- A data vector with a polynomial trend and impulsive noise
1046
+ inputdata : NDArray
1047
+ Input time series data to be filtered.
1048
+ order : int, optional
1049
+ Order of the polynomial trend to fit (default is 3).
1050
+ ndevs : float, optional
1051
+ Number of standard deviations for outlier detection (default is 3.0).
1052
+ debug : bool, optional
1053
+ If True, display debug plots showing the detrended data and outliers (default is False).
500
1054
 
501
1055
  Returns
502
1056
  -------
503
- patched : array-like
504
- The input data with the impulsive noise removed
1057
+ NDArray
1058
+ Filtered time series data with polynomial trend removed and outliers masked as zeros.
1059
+
1060
+ Notes
1061
+ -----
1062
+ The function uses `Polynomial.fit` to fit a polynomial trend and `tide_fit.trendgen`
1063
+ to generate the trend values. Outliers are detected using median absolute deviation
1064
+ normalization and masked by setting them to zero. The original trend is added back
1065
+ to the filtered data to maintain the overall signal structure.
1066
+
1067
+ Examples
1068
+ --------
1069
+ >>> import numpy as np
1070
+ >>> data = np.random.randn(100)
1071
+ >>> filtered_data = trendfilt(data, order=2, ndevs=2.0)
1072
+ >>> # With debug mode enabled
1073
+ >>> filtered_data = trendfilt(data, debug=True)
505
1074
  """
506
1075
  thetimepoints = np.arange(0.0, len(inputdata), 1.0) - len(inputdata) / 2.0
507
1076
  try:
508
1077
  thecoffs = Polynomial.fit(thetimepoints, inputdata, order).convert().coef[::-1]
509
- except np.lib.RankWarning:
510
- thecoffs = [0.0, 0.0]
1078
+ except np.exceptions.RankWarning:
1079
+ thecoffs = np.asarray([0.0, 0.0])
511
1080
  thefittc = tide_fit.trendgen(thetimepoints, thecoffs, True)
512
1081
  detrended = inputdata - thefittc
513
1082
  if debug:
514
1083
  plt.figure()
515
1084
  plt.plot(detrended)
516
- detrended[np.where(np.fabs(madnormalize(detrended)) > ndevs)] = 0.0
1085
+ detrended[np.where(np.fabs(madnormalize(detrended)[0]) > ndevs)] = 0.0
517
1086
  if debug:
518
1087
  plt.plot(detrended)
519
1088
  plt.show()
@@ -523,15 +1092,113 @@ def trendfilt(inputdata, order=3, ndevs=3.0, debug=False):
523
1092
  # found here: https://datascience.stackexchange.com/questions/75733/pca-for-complex-valued-data
524
1093
  class ComplexPCA:
525
1094
  def __init__(self, n_components):
1095
+ """
1096
+ Initialize the PCA model with the specified number of components.
1097
+
1098
+ Parameters
1099
+ ----------
1100
+ n_components : int
1101
+ Number of components to keep.
1102
+
1103
+ Returns
1104
+ -------
1105
+ None
1106
+ Initializes the PCA model with the specified number of components and
1107
+ sets internal attributes to None.
1108
+
1109
+ Notes
1110
+ -----
1111
+ This constructor initializes the PCA model with the specified number of
1112
+ components. The actual computation of principal components is performed
1113
+ during the fit method.
1114
+
1115
+ Examples
1116
+ --------
1117
+ >>> from sklearn.decomposition import PCA
1118
+ >>> pca = PCA(n_components=2)
1119
+ >>> print(pca.n_components)
1120
+ 2
1121
+ """
526
1122
  self.n_components = n_components
527
1123
  self.u = self.s = self.components_ = None
528
1124
  self.mean_ = None
529
1125
 
530
1126
  @property
531
1127
  def explained_variance_ratio_(self):
1128
+ """
1129
+ Return the explained variance ratio.
1130
+
1131
+ This function returns the explained variance ratio stored in the object's
1132
+ `s` attribute, which typically represents the proportion of variance
1133
+ explained by each component in dimensionality reduction techniques.
1134
+
1135
+ Returns
1136
+ -------
1137
+ explained_variance_ratio : array-like
1138
+ The explained variance ratio for each component. Each element
1139
+ represents the fraction of the total variance explained by the
1140
+ corresponding component.
1141
+
1142
+ Notes
1143
+ -----
1144
+ The explained variance ratio is commonly used in Principal Component
1145
+ Analysis (PCA) and similar dimensionality reduction methods to determine
1146
+ the importance of each component and to decide how many components to
1147
+ retain for analysis.
1148
+
1149
+ Examples
1150
+ --------
1151
+ >>> from sklearn.decomposition import PCA
1152
+ >>> pca = PCA()
1153
+ >>> pca.fit(X)
1154
+ >>> ratio = pca.explained_variance_ratio_
1155
+ >>> print(ratio)
1156
+ [0.856, 0.123, 0.021]
1157
+ """
532
1158
  return self.s
533
1159
 
534
1160
  def fit(self, matrix, use_gpu=False):
1161
+ """
1162
+ Fit the model with the given matrix using Singular Value Decomposition.
1163
+
1164
+ This method computes the mean of the input matrix and performs SVD decomposition
1165
+ to obtain the principal components. The decomposition can be performed using
1166
+ either CPU (numpy) or GPU (tensorflow) depending on the use_gpu parameter.
1167
+
1168
+ Parameters
1169
+ ----------
1170
+ matrix : array-like of shape (n_samples, n_features)
1171
+ Input matrix to fit the model on. The matrix should be numeric.
1172
+ use_gpu : bool, default=False
1173
+ If True, use TensorFlow for SVD computation on GPU. If False, use NumPy.
1174
+ Note: TensorFlow is used for GPU computation as PyTorch doesn't handle
1175
+ complex values well.
1176
+
1177
+ Returns
1178
+ -------
1179
+ self : object
1180
+ Returns the instance itself.
1181
+
1182
+ Notes
1183
+ -----
1184
+ - The SVD is performed with `full_matrices=False`, which means the number of
1185
+ components will be min(n_samples, n_features).
1186
+ - For better performance when only a subset of components is needed, consider
1187
+ truncating the SVD to `n_components` instead of computing all components.
1188
+ - The `components_` attribute stores the right singular vectors (principal components).
1189
+ - The `mean_` attribute stores the mean of each feature across samples.
1190
+ - The `s` attribute stores the singular values from the SVD decomposition.
1191
+
1192
+ Examples
1193
+ --------
1194
+ >>> import numpy as np
1195
+ >>> from sklearn.decomposition import PCA
1196
+ >>> X = np.random.rand(100, 10)
1197
+ >>> pca = PCA()
1198
+ >>> pca.fit(X)
1199
+ >>> print(pca.components_.shape)
1200
+ (10, 10)
1201
+ """
535
1202
  self.mean_ = matrix.mean(axis=0)
536
1203
  if use_gpu:
537
1204
  import tensorflow as tf # torch doesn't handle complex values.
@@ -550,10 +1217,73 @@ class ComplexPCA:
550
1217
  # Leave those components as rows of matrix so that it is compatible with Sklearn PCA.
551
1218
 
552
1219
  def transform(self, matrix):
1220
+ """
1221
+ Transform matrix using the fitted components.
1222
+
1223
+ Parameters
1224
+ ----------
1225
+ matrix : array-like of shape (n_samples, n_features)
1226
+ The data to be transformed.
1227
+
1228
+ Returns
1229
+ -------
1230
+ array-like of shape (n_samples, n_components)
1231
+ The transformed data.
1232
+
1233
+ Notes
1234
+ -----
1235
+ This function applies the transformation defined by the fitted components
1236
+ to the input matrix. It subtracts the mean and projects onto the component
1237
+ space.
1238
+
1239
+ Examples
1240
+ --------
1241
+ >>> from sklearn.decomposition import PCA
1242
+ >>> import numpy as np
1243
+ >>> pca = PCA(n_components=2)
1244
+ >>> X = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
1245
+ >>> pca.fit(X)
1246
+ >>> transformed = pca.transform(X)
1247
+ >>> print(transformed.shape)
1248
+ (3, 2)
1249
+ """
553
1250
  data = matrix - self.mean_
554
1251
  result = data @ self.components_.T
555
1252
  return result
556
1253
 
557
1254
  def inverse_transform(self, matrix):
1255
+ """
1256
+ Apply inverse transformation to the input matrix.
1257
+
1258
+ Parameters
1259
+ ----------
1260
+ matrix : array-like of shape (n_samples, n_components)
1261
+ The transformed data to be inverse transformed.
1262
+
1263
+ Returns
1264
+ -------
1265
+ ndarray of shape (n_samples, n_features)
1266
+ The inverse transformed data in the original feature space.
1267
+
1268
+ Notes
1269
+ -----
1270
+ This function applies the inverse transformation using the stored components
1271
+ and mean values. The transformation is defined as:
1272
+ result = matrix @ conj(self.components_) + self.mean_
1273
+
1274
+ Examples
1275
+ --------
1276
+ >>> from sklearn.decomposition import PCA
1277
+ >>> import numpy as np
1278
+ >>> # Create sample data
1279
+ >>> data = np.random.rand(100, 10)
1280
+ >>> # Fit PCA
1281
+ >>> pca = PCA(n_components=5)
1282
+ >>> transformed = pca.fit_transform(data)
1283
+ >>> # Inverse transform
1284
+ >>> reconstructed = pca.inverse_transform(transformed)
1285
+ >>> print(reconstructed.shape)
1286
+ (100, 10)
1287
+ """
558
1288
  result = matrix @ np.conj(self.components_)
559
1289
  return self.mean_ + result