rapidtide 2.9.6__py3-none-any.whl → 3.1.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (405) hide show
  1. cloud/gmscalc-HCPYA +1 -1
  2. cloud/mount-and-run +2 -0
  3. cloud/rapidtide-HCPYA +3 -3
  4. rapidtide/Colortables.py +538 -38
  5. rapidtide/OrthoImageItem.py +1094 -51
  6. rapidtide/RapidtideDataset.py +1709 -114
  7. rapidtide/__init__.py +0 -8
  8. rapidtide/_version.py +4 -4
  9. rapidtide/calccoherence.py +242 -97
  10. rapidtide/calcnullsimfunc.py +240 -140
  11. rapidtide/calcsimfunc.py +314 -129
  12. rapidtide/correlate.py +1211 -389
  13. rapidtide/data/examples/src/testLD +56 -0
  14. rapidtide/data/examples/src/test_findmaxlag.py +2 -2
  15. rapidtide/data/examples/src/test_mlregressallt.py +32 -17
  16. rapidtide/data/examples/src/testalign +1 -1
  17. rapidtide/data/examples/src/testatlasaverage +35 -7
  18. rapidtide/data/examples/src/testboth +21 -0
  19. rapidtide/data/examples/src/testcifti +11 -0
  20. rapidtide/data/examples/src/testdelayvar +13 -0
  21. rapidtide/data/examples/src/testdlfilt +25 -0
  22. rapidtide/data/examples/src/testfft +35 -0
  23. rapidtide/data/examples/src/testfileorfloat +37 -0
  24. rapidtide/data/examples/src/testfmri +92 -42
  25. rapidtide/data/examples/src/testfuncs +3 -3
  26. rapidtide/data/examples/src/testglmfilt +8 -6
  27. rapidtide/data/examples/src/testhappy +84 -51
  28. rapidtide/data/examples/src/testinitdelay +19 -0
  29. rapidtide/data/examples/src/testmodels +33 -0
  30. rapidtide/data/examples/src/testnewrefine +26 -0
  31. rapidtide/data/examples/src/testnoiseamp +2 -2
  32. rapidtide/data/examples/src/testppgproc +17 -0
  33. rapidtide/data/examples/src/testrefineonly +22 -0
  34. rapidtide/data/examples/src/testretro +26 -13
  35. rapidtide/data/examples/src/testretrolagtcs +16 -0
  36. rapidtide/data/examples/src/testrolloff +11 -0
  37. rapidtide/data/examples/src/testsimdata +45 -28
  38. rapidtide/data/models/model_cnn_pytorch/loss.png +0 -0
  39. rapidtide/data/models/model_cnn_pytorch/loss.txt +1 -0
  40. rapidtide/data/models/model_cnn_pytorch/model.pth +0 -0
  41. rapidtide/data/models/model_cnn_pytorch/model_meta.json +68 -0
  42. rapidtide/data/models/model_cnn_pytorch_fulldata/loss.png +0 -0
  43. rapidtide/data/models/model_cnn_pytorch_fulldata/loss.txt +1 -0
  44. rapidtide/data/models/model_cnn_pytorch_fulldata/model.pth +0 -0
  45. rapidtide/data/models/model_cnn_pytorch_fulldata/model_meta.json +80 -0
  46. rapidtide/data/models/model_cnnbp_pytorch_fullldata/loss.png +0 -0
  47. rapidtide/data/models/model_cnnbp_pytorch_fullldata/loss.txt +1 -0
  48. rapidtide/data/models/model_cnnbp_pytorch_fullldata/model.pth +0 -0
  49. rapidtide/data/models/model_cnnbp_pytorch_fullldata/model_meta.json +138 -0
  50. rapidtide/data/models/model_cnnfft_pytorch_fulldata/loss.png +0 -0
  51. rapidtide/data/models/model_cnnfft_pytorch_fulldata/loss.txt +1 -0
  52. rapidtide/data/models/model_cnnfft_pytorch_fulldata/model.pth +0 -0
  53. rapidtide/data/models/model_cnnfft_pytorch_fulldata/model_meta.json +128 -0
  54. rapidtide/data/models/model_ppgattention_pytorch_w128_fulldata/loss.png +0 -0
  55. rapidtide/data/models/model_ppgattention_pytorch_w128_fulldata/loss.txt +1 -0
  56. rapidtide/data/models/model_ppgattention_pytorch_w128_fulldata/model.pth +0 -0
  57. rapidtide/data/models/model_ppgattention_pytorch_w128_fulldata/model_meta.json +49 -0
  58. rapidtide/data/models/model_revised_tf2/model.keras +0 -0
  59. rapidtide/data/models/{model_serdar → model_revised_tf2}/model_meta.json +1 -1
  60. rapidtide/data/models/model_serdar2_tf2/model.keras +0 -0
  61. rapidtide/data/models/{model_serdar2 → model_serdar2_tf2}/model_meta.json +1 -1
  62. rapidtide/data/models/model_serdar_tf2/model.keras +0 -0
  63. rapidtide/data/models/{model_revised → model_serdar_tf2}/model_meta.json +1 -1
  64. rapidtide/data/reference/HCP1200v2_MTT_2mm.nii.gz +0 -0
  65. rapidtide/data/reference/HCP1200v2_binmask_2mm.nii.gz +0 -0
  66. rapidtide/data/reference/HCP1200v2_csf_2mm.nii.gz +0 -0
  67. rapidtide/data/reference/HCP1200v2_gray_2mm.nii.gz +0 -0
  68. rapidtide/data/reference/HCP1200v2_graylaghist.json +7 -0
  69. rapidtide/data/reference/HCP1200v2_graylaghist.tsv.gz +0 -0
  70. rapidtide/data/reference/HCP1200v2_laghist.json +7 -0
  71. rapidtide/data/reference/HCP1200v2_laghist.tsv.gz +0 -0
  72. rapidtide/data/reference/HCP1200v2_mask_2mm.nii.gz +0 -0
  73. rapidtide/data/reference/HCP1200v2_maxcorr_2mm.nii.gz +0 -0
  74. rapidtide/data/reference/HCP1200v2_maxtime_2mm.nii.gz +0 -0
  75. rapidtide/data/reference/HCP1200v2_maxwidth_2mm.nii.gz +0 -0
  76. rapidtide/data/reference/HCP1200v2_negmask_2mm.nii.gz +0 -0
  77. rapidtide/data/reference/HCP1200v2_timepercentile_2mm.nii.gz +0 -0
  78. rapidtide/data/reference/HCP1200v2_white_2mm.nii.gz +0 -0
  79. rapidtide/data/reference/HCP1200v2_whitelaghist.json +7 -0
  80. rapidtide/data/reference/HCP1200v2_whitelaghist.tsv.gz +0 -0
  81. rapidtide/data/reference/JHU-ArterialTerritoriesNoVent-LVL1-seg2.xml +131 -0
  82. rapidtide/data/reference/JHU-ArterialTerritoriesNoVent-LVL1-seg2_regions.txt +60 -0
  83. rapidtide/data/reference/JHU-ArterialTerritoriesNoVent-LVL1-seg2_space-MNI152NLin6Asym_2mm.nii.gz +0 -0
  84. rapidtide/data/reference/JHU-ArterialTerritoriesNoVent-LVL1_space-MNI152NLin2009cAsym_2mm.nii.gz +0 -0
  85. rapidtide/data/reference/JHU-ArterialTerritoriesNoVent-LVL1_space-MNI152NLin2009cAsym_2mm_mask.nii.gz +0 -0
  86. rapidtide/data/reference/JHU-ArterialTerritoriesNoVent-LVL1_space-MNI152NLin6Asym_2mm_mask.nii.gz +0 -0
  87. rapidtide/data/reference/JHU-ArterialTerritoriesNoVent-LVL2_space-MNI152NLin6Asym_2mm_mask.nii.gz +0 -0
  88. rapidtide/data/reference/MNI152_T1_1mm_Brain_FAST_seg.nii.gz +0 -0
  89. rapidtide/data/reference/MNI152_T1_1mm_Brain_Mask.nii.gz +0 -0
  90. rapidtide/data/reference/MNI152_T1_2mm_Brain_FAST_seg.nii.gz +0 -0
  91. rapidtide/data/reference/MNI152_T1_2mm_Brain_Mask.nii.gz +0 -0
  92. rapidtide/decorators.py +91 -0
  93. rapidtide/dlfilter.py +2553 -414
  94. rapidtide/dlfiltertorch.py +5201 -0
  95. rapidtide/externaltools.py +328 -13
  96. rapidtide/fMRIData_class.py +108 -92
  97. rapidtide/ffttools.py +168 -0
  98. rapidtide/filter.py +2704 -1462
  99. rapidtide/fit.py +2361 -579
  100. rapidtide/genericmultiproc.py +197 -0
  101. rapidtide/happy_supportfuncs.py +3255 -548
  102. rapidtide/helper_classes.py +587 -1116
  103. rapidtide/io.py +2569 -468
  104. rapidtide/linfitfiltpass.py +784 -0
  105. rapidtide/makelaggedtcs.py +267 -97
  106. rapidtide/maskutil.py +555 -25
  107. rapidtide/miscmath.py +835 -144
  108. rapidtide/multiproc.py +217 -44
  109. rapidtide/patchmatch.py +752 -0
  110. rapidtide/peakeval.py +32 -32
  111. rapidtide/ppgproc.py +2205 -0
  112. rapidtide/qualitycheck.py +353 -40
  113. rapidtide/refinedelay.py +854 -0
  114. rapidtide/refineregressor.py +939 -0
  115. rapidtide/resample.py +725 -204
  116. rapidtide/scripts/__init__.py +1 -0
  117. rapidtide/scripts/{adjustoffset → adjustoffset.py} +7 -2
  118. rapidtide/scripts/{aligntcs → aligntcs.py} +7 -2
  119. rapidtide/scripts/{applydlfilter → applydlfilter.py} +7 -2
  120. rapidtide/scripts/applyppgproc.py +28 -0
  121. rapidtide/scripts/{atlasaverage → atlasaverage.py} +7 -2
  122. rapidtide/scripts/{atlastool → atlastool.py} +7 -2
  123. rapidtide/scripts/{calcicc → calcicc.py} +7 -2
  124. rapidtide/scripts/{calctexticc → calctexticc.py} +7 -2
  125. rapidtide/scripts/{calcttest → calcttest.py} +7 -2
  126. rapidtide/scripts/{ccorrica → ccorrica.py} +7 -2
  127. rapidtide/scripts/delayvar.py +28 -0
  128. rapidtide/scripts/{diffrois → diffrois.py} +7 -2
  129. rapidtide/scripts/{endtidalproc → endtidalproc.py} +7 -2
  130. rapidtide/scripts/{fdica → fdica.py} +7 -2
  131. rapidtide/scripts/{filtnifti → filtnifti.py} +7 -2
  132. rapidtide/scripts/{filttc → filttc.py} +7 -2
  133. rapidtide/scripts/{fingerprint → fingerprint.py} +20 -16
  134. rapidtide/scripts/{fixtr → fixtr.py} +7 -2
  135. rapidtide/scripts/{gmscalc → gmscalc.py} +7 -2
  136. rapidtide/scripts/{happy → happy.py} +7 -2
  137. rapidtide/scripts/{happy2std → happy2std.py} +7 -2
  138. rapidtide/scripts/{happywarp → happywarp.py} +8 -4
  139. rapidtide/scripts/{histnifti → histnifti.py} +7 -2
  140. rapidtide/scripts/{histtc → histtc.py} +7 -2
  141. rapidtide/scripts/{glmfilt → linfitfilt.py} +7 -4
  142. rapidtide/scripts/{localflow → localflow.py} +7 -2
  143. rapidtide/scripts/{mergequality → mergequality.py} +7 -2
  144. rapidtide/scripts/{pairproc → pairproc.py} +7 -2
  145. rapidtide/scripts/{pairwisemergenifti → pairwisemergenifti.py} +7 -2
  146. rapidtide/scripts/{physiofreq → physiofreq.py} +7 -2
  147. rapidtide/scripts/{pixelcomp → pixelcomp.py} +7 -2
  148. rapidtide/scripts/{plethquality → plethquality.py} +7 -2
  149. rapidtide/scripts/{polyfitim → polyfitim.py} +7 -2
  150. rapidtide/scripts/{proj2flow → proj2flow.py} +7 -2
  151. rapidtide/scripts/{rankimage → rankimage.py} +7 -2
  152. rapidtide/scripts/{rapidtide → rapidtide.py} +7 -2
  153. rapidtide/scripts/{rapidtide2std → rapidtide2std.py} +7 -2
  154. rapidtide/scripts/{resamplenifti → resamplenifti.py} +7 -2
  155. rapidtide/scripts/{resampletc → resampletc.py} +7 -2
  156. rapidtide/scripts/retrolagtcs.py +28 -0
  157. rapidtide/scripts/retroregress.py +28 -0
  158. rapidtide/scripts/{roisummarize → roisummarize.py} +7 -2
  159. rapidtide/scripts/{runqualitycheck → runqualitycheck.py} +7 -2
  160. rapidtide/scripts/{showarbcorr → showarbcorr.py} +7 -2
  161. rapidtide/scripts/{showhist → showhist.py} +7 -2
  162. rapidtide/scripts/{showstxcorr → showstxcorr.py} +7 -2
  163. rapidtide/scripts/{showtc → showtc.py} +7 -2
  164. rapidtide/scripts/{showxcorr_legacy → showxcorr_legacy.py} +8 -8
  165. rapidtide/scripts/{showxcorrx → showxcorrx.py} +7 -2
  166. rapidtide/scripts/{showxy → showxy.py} +7 -2
  167. rapidtide/scripts/{simdata → simdata.py} +7 -2
  168. rapidtide/scripts/{spatialdecomp → spatialdecomp.py} +7 -2
  169. rapidtide/scripts/{spatialfit → spatialfit.py} +7 -2
  170. rapidtide/scripts/{spatialmi → spatialmi.py} +7 -2
  171. rapidtide/scripts/{spectrogram → spectrogram.py} +7 -2
  172. rapidtide/scripts/stupidramtricks.py +238 -0
  173. rapidtide/scripts/{synthASL → synthASL.py} +7 -2
  174. rapidtide/scripts/{tcfrom2col → tcfrom2col.py} +7 -2
  175. rapidtide/scripts/{tcfrom3col → tcfrom3col.py} +7 -2
  176. rapidtide/scripts/{temporaldecomp → temporaldecomp.py} +7 -2
  177. rapidtide/scripts/{testhrv → testhrv.py} +1 -1
  178. rapidtide/scripts/{threeD → threeD.py} +7 -2
  179. rapidtide/scripts/{tidepool → tidepool.py} +7 -2
  180. rapidtide/scripts/{variabilityizer → variabilityizer.py} +7 -2
  181. rapidtide/simFuncClasses.py +2113 -0
  182. rapidtide/simfuncfit.py +312 -108
  183. rapidtide/stats.py +579 -247
  184. rapidtide/tests/.coveragerc +27 -6
  185. rapidtide-2.9.6.data/scripts/fdica → rapidtide/tests/cleanposttest +4 -6
  186. rapidtide/tests/happycomp +9 -0
  187. rapidtide/tests/resethappytargets +1 -1
  188. rapidtide/tests/resetrapidtidetargets +1 -1
  189. rapidtide/tests/resettargets +1 -1
  190. rapidtide/tests/runlocaltest +3 -3
  191. rapidtide/tests/showkernels +1 -1
  192. rapidtide/tests/test_aliasedcorrelate.py +4 -4
  193. rapidtide/tests/test_aligntcs.py +1 -1
  194. rapidtide/tests/test_calcicc.py +1 -1
  195. rapidtide/tests/test_cleanregressor.py +184 -0
  196. rapidtide/tests/test_congrid.py +70 -81
  197. rapidtide/tests/test_correlate.py +1 -1
  198. rapidtide/tests/test_corrpass.py +4 -4
  199. rapidtide/tests/test_delayestimation.py +54 -59
  200. rapidtide/tests/test_dlfiltertorch.py +437 -0
  201. rapidtide/tests/test_doresample.py +2 -2
  202. rapidtide/tests/test_externaltools.py +69 -0
  203. rapidtide/tests/test_fastresampler.py +9 -5
  204. rapidtide/tests/test_filter.py +96 -57
  205. rapidtide/tests/test_findmaxlag.py +50 -19
  206. rapidtide/tests/test_fullrunhappy_v1.py +15 -10
  207. rapidtide/tests/test_fullrunhappy_v2.py +19 -13
  208. rapidtide/tests/test_fullrunhappy_v3.py +28 -13
  209. rapidtide/tests/test_fullrunhappy_v4.py +30 -11
  210. rapidtide/tests/test_fullrunhappy_v5.py +62 -0
  211. rapidtide/tests/test_fullrunrapidtide_v1.py +61 -7
  212. rapidtide/tests/test_fullrunrapidtide_v2.py +26 -14
  213. rapidtide/tests/test_fullrunrapidtide_v3.py +28 -8
  214. rapidtide/tests/test_fullrunrapidtide_v4.py +16 -8
  215. rapidtide/tests/test_fullrunrapidtide_v5.py +15 -6
  216. rapidtide/tests/test_fullrunrapidtide_v6.py +142 -0
  217. rapidtide/tests/test_fullrunrapidtide_v7.py +114 -0
  218. rapidtide/tests/test_fullrunrapidtide_v8.py +66 -0
  219. rapidtide/tests/test_getparsers.py +158 -0
  220. rapidtide/tests/test_io.py +59 -18
  221. rapidtide/tests/{test_glmpass.py → test_linfitfiltpass.py} +10 -10
  222. rapidtide/tests/test_mi.py +1 -1
  223. rapidtide/tests/test_miscmath.py +1 -1
  224. rapidtide/tests/test_motionregress.py +5 -5
  225. rapidtide/tests/test_nullcorr.py +6 -9
  226. rapidtide/tests/test_padvec.py +216 -0
  227. rapidtide/tests/test_parserfuncs.py +101 -0
  228. rapidtide/tests/test_phaseanalysis.py +1 -1
  229. rapidtide/tests/test_rapidtideparser.py +59 -53
  230. rapidtide/tests/test_refinedelay.py +296 -0
  231. rapidtide/tests/test_runmisc.py +5 -5
  232. rapidtide/tests/test_sharedmem.py +60 -0
  233. rapidtide/tests/test_simroundtrip.py +132 -0
  234. rapidtide/tests/test_simulate.py +1 -1
  235. rapidtide/tests/test_stcorrelate.py +4 -2
  236. rapidtide/tests/test_timeshift.py +2 -2
  237. rapidtide/tests/test_valtoindex.py +1 -1
  238. rapidtide/tests/test_zRapidtideDataset.py +5 -3
  239. rapidtide/tests/utils.py +10 -9
  240. rapidtide/tidepoolTemplate.py +88 -70
  241. rapidtide/tidepoolTemplate.ui +60 -46
  242. rapidtide/tidepoolTemplate_alt.py +88 -53
  243. rapidtide/tidepoolTemplate_alt.ui +62 -52
  244. rapidtide/tidepoolTemplate_alt_qt6.py +921 -0
  245. rapidtide/tidepoolTemplate_big.py +1125 -0
  246. rapidtide/tidepoolTemplate_big.ui +2386 -0
  247. rapidtide/tidepoolTemplate_big_qt6.py +1129 -0
  248. rapidtide/tidepoolTemplate_qt6.py +793 -0
  249. rapidtide/util.py +1389 -148
  250. rapidtide/voxelData.py +1048 -0
  251. rapidtide/wiener.py +138 -25
  252. rapidtide/wiener2.py +114 -8
  253. rapidtide/workflows/adjustoffset.py +107 -5
  254. rapidtide/workflows/aligntcs.py +86 -3
  255. rapidtide/workflows/applydlfilter.py +231 -89
  256. rapidtide/workflows/applyppgproc.py +540 -0
  257. rapidtide/workflows/atlasaverage.py +309 -48
  258. rapidtide/workflows/atlastool.py +130 -9
  259. rapidtide/workflows/calcSimFuncMap.py +490 -0
  260. rapidtide/workflows/calctexticc.py +202 -10
  261. rapidtide/workflows/ccorrica.py +123 -15
  262. rapidtide/workflows/cleanregressor.py +415 -0
  263. rapidtide/workflows/delayvar.py +1268 -0
  264. rapidtide/workflows/diffrois.py +84 -6
  265. rapidtide/workflows/endtidalproc.py +149 -9
  266. rapidtide/workflows/fdica.py +197 -17
  267. rapidtide/workflows/filtnifti.py +71 -4
  268. rapidtide/workflows/filttc.py +76 -5
  269. rapidtide/workflows/fitSimFuncMap.py +578 -0
  270. rapidtide/workflows/fixtr.py +74 -4
  271. rapidtide/workflows/gmscalc.py +116 -6
  272. rapidtide/workflows/happy.py +1242 -480
  273. rapidtide/workflows/happy2std.py +145 -13
  274. rapidtide/workflows/happy_parser.py +277 -59
  275. rapidtide/workflows/histnifti.py +120 -4
  276. rapidtide/workflows/histtc.py +85 -4
  277. rapidtide/workflows/{glmfilt.py → linfitfilt.py} +128 -14
  278. rapidtide/workflows/localflow.py +329 -29
  279. rapidtide/workflows/mergequality.py +80 -4
  280. rapidtide/workflows/niftidecomp.py +323 -19
  281. rapidtide/workflows/niftistats.py +178 -8
  282. rapidtide/workflows/pairproc.py +99 -5
  283. rapidtide/workflows/pairwisemergenifti.py +86 -3
  284. rapidtide/workflows/parser_funcs.py +1488 -56
  285. rapidtide/workflows/physiofreq.py +139 -12
  286. rapidtide/workflows/pixelcomp.py +211 -9
  287. rapidtide/workflows/plethquality.py +105 -23
  288. rapidtide/workflows/polyfitim.py +159 -19
  289. rapidtide/workflows/proj2flow.py +76 -3
  290. rapidtide/workflows/rankimage.py +115 -8
  291. rapidtide/workflows/rapidtide.py +1785 -1858
  292. rapidtide/workflows/rapidtide2std.py +101 -3
  293. rapidtide/workflows/rapidtide_parser.py +590 -389
  294. rapidtide/workflows/refineDelayMap.py +249 -0
  295. rapidtide/workflows/refineRegressor.py +1215 -0
  296. rapidtide/workflows/regressfrommaps.py +308 -0
  297. rapidtide/workflows/resamplenifti.py +86 -4
  298. rapidtide/workflows/resampletc.py +92 -4
  299. rapidtide/workflows/retrolagtcs.py +442 -0
  300. rapidtide/workflows/retroregress.py +1501 -0
  301. rapidtide/workflows/roisummarize.py +176 -7
  302. rapidtide/workflows/runqualitycheck.py +72 -7
  303. rapidtide/workflows/showarbcorr.py +172 -16
  304. rapidtide/workflows/showhist.py +87 -3
  305. rapidtide/workflows/showstxcorr.py +161 -4
  306. rapidtide/workflows/showtc.py +172 -10
  307. rapidtide/workflows/showxcorrx.py +250 -62
  308. rapidtide/workflows/showxy.py +186 -16
  309. rapidtide/workflows/simdata.py +418 -112
  310. rapidtide/workflows/spatialfit.py +83 -8
  311. rapidtide/workflows/spatialmi.py +252 -29
  312. rapidtide/workflows/spectrogram.py +306 -33
  313. rapidtide/workflows/synthASL.py +157 -6
  314. rapidtide/workflows/tcfrom2col.py +77 -3
  315. rapidtide/workflows/tcfrom3col.py +75 -3
  316. rapidtide/workflows/tidepool.py +3829 -666
  317. rapidtide/workflows/utils.py +45 -19
  318. rapidtide/workflows/utils_doc.py +293 -0
  319. rapidtide/workflows/variabilityizer.py +118 -5
  320. {rapidtide-2.9.6.dist-info → rapidtide-3.1.3.dist-info}/METADATA +30 -223
  321. rapidtide-3.1.3.dist-info/RECORD +393 -0
  322. {rapidtide-2.9.6.dist-info → rapidtide-3.1.3.dist-info}/WHEEL +1 -1
  323. rapidtide-3.1.3.dist-info/entry_points.txt +65 -0
  324. rapidtide-3.1.3.dist-info/top_level.txt +2 -0
  325. rapidtide/calcandfitcorrpairs.py +0 -262
  326. rapidtide/data/examples/src/testoutputsize +0 -45
  327. rapidtide/data/models/model_revised/model.h5 +0 -0
  328. rapidtide/data/models/model_serdar/model.h5 +0 -0
  329. rapidtide/data/models/model_serdar2/model.h5 +0 -0
  330. rapidtide/data/reference/ASPECTS_nlin_asym_09c_2mm.nii.gz +0 -0
  331. rapidtide/data/reference/ASPECTS_nlin_asym_09c_2mm_mask.nii.gz +0 -0
  332. rapidtide/data/reference/ATTbasedFlowTerritories_split_nlin_asym_09c_2mm.nii.gz +0 -0
  333. rapidtide/data/reference/ATTbasedFlowTerritories_split_nlin_asym_09c_2mm_mask.nii.gz +0 -0
  334. rapidtide/data/reference/HCP1200_binmask_2mm_2009c_asym.nii.gz +0 -0
  335. rapidtide/data/reference/HCP1200_lag_2mm_2009c_asym.nii.gz +0 -0
  336. rapidtide/data/reference/HCP1200_mask_2mm_2009c_asym.nii.gz +0 -0
  337. rapidtide/data/reference/HCP1200_negmask_2mm_2009c_asym.nii.gz +0 -0
  338. rapidtide/data/reference/HCP1200_sigma_2mm_2009c_asym.nii.gz +0 -0
  339. rapidtide/data/reference/HCP1200_strength_2mm_2009c_asym.nii.gz +0 -0
  340. rapidtide/glmpass.py +0 -434
  341. rapidtide/refine_factored.py +0 -641
  342. rapidtide/scripts/retroglm +0 -23
  343. rapidtide/workflows/glmfrommaps.py +0 -202
  344. rapidtide/workflows/retroglm.py +0 -643
  345. rapidtide-2.9.6.data/scripts/adjustoffset +0 -23
  346. rapidtide-2.9.6.data/scripts/aligntcs +0 -23
  347. rapidtide-2.9.6.data/scripts/applydlfilter +0 -23
  348. rapidtide-2.9.6.data/scripts/atlasaverage +0 -23
  349. rapidtide-2.9.6.data/scripts/atlastool +0 -23
  350. rapidtide-2.9.6.data/scripts/calcicc +0 -22
  351. rapidtide-2.9.6.data/scripts/calctexticc +0 -23
  352. rapidtide-2.9.6.data/scripts/calcttest +0 -22
  353. rapidtide-2.9.6.data/scripts/ccorrica +0 -23
  354. rapidtide-2.9.6.data/scripts/diffrois +0 -23
  355. rapidtide-2.9.6.data/scripts/endtidalproc +0 -23
  356. rapidtide-2.9.6.data/scripts/filtnifti +0 -23
  357. rapidtide-2.9.6.data/scripts/filttc +0 -23
  358. rapidtide-2.9.6.data/scripts/fingerprint +0 -593
  359. rapidtide-2.9.6.data/scripts/fixtr +0 -23
  360. rapidtide-2.9.6.data/scripts/glmfilt +0 -24
  361. rapidtide-2.9.6.data/scripts/gmscalc +0 -22
  362. rapidtide-2.9.6.data/scripts/happy +0 -25
  363. rapidtide-2.9.6.data/scripts/happy2std +0 -23
  364. rapidtide-2.9.6.data/scripts/happywarp +0 -350
  365. rapidtide-2.9.6.data/scripts/histnifti +0 -23
  366. rapidtide-2.9.6.data/scripts/histtc +0 -23
  367. rapidtide-2.9.6.data/scripts/localflow +0 -23
  368. rapidtide-2.9.6.data/scripts/mergequality +0 -23
  369. rapidtide-2.9.6.data/scripts/pairproc +0 -23
  370. rapidtide-2.9.6.data/scripts/pairwisemergenifti +0 -23
  371. rapidtide-2.9.6.data/scripts/physiofreq +0 -23
  372. rapidtide-2.9.6.data/scripts/pixelcomp +0 -23
  373. rapidtide-2.9.6.data/scripts/plethquality +0 -23
  374. rapidtide-2.9.6.data/scripts/polyfitim +0 -23
  375. rapidtide-2.9.6.data/scripts/proj2flow +0 -23
  376. rapidtide-2.9.6.data/scripts/rankimage +0 -23
  377. rapidtide-2.9.6.data/scripts/rapidtide +0 -23
  378. rapidtide-2.9.6.data/scripts/rapidtide2std +0 -23
  379. rapidtide-2.9.6.data/scripts/resamplenifti +0 -23
  380. rapidtide-2.9.6.data/scripts/resampletc +0 -23
  381. rapidtide-2.9.6.data/scripts/retroglm +0 -23
  382. rapidtide-2.9.6.data/scripts/roisummarize +0 -23
  383. rapidtide-2.9.6.data/scripts/runqualitycheck +0 -23
  384. rapidtide-2.9.6.data/scripts/showarbcorr +0 -23
  385. rapidtide-2.9.6.data/scripts/showhist +0 -23
  386. rapidtide-2.9.6.data/scripts/showstxcorr +0 -23
  387. rapidtide-2.9.6.data/scripts/showtc +0 -23
  388. rapidtide-2.9.6.data/scripts/showxcorr_legacy +0 -536
  389. rapidtide-2.9.6.data/scripts/showxcorrx +0 -23
  390. rapidtide-2.9.6.data/scripts/showxy +0 -23
  391. rapidtide-2.9.6.data/scripts/simdata +0 -23
  392. rapidtide-2.9.6.data/scripts/spatialdecomp +0 -23
  393. rapidtide-2.9.6.data/scripts/spatialfit +0 -23
  394. rapidtide-2.9.6.data/scripts/spatialmi +0 -23
  395. rapidtide-2.9.6.data/scripts/spectrogram +0 -23
  396. rapidtide-2.9.6.data/scripts/synthASL +0 -23
  397. rapidtide-2.9.6.data/scripts/tcfrom2col +0 -23
  398. rapidtide-2.9.6.data/scripts/tcfrom3col +0 -23
  399. rapidtide-2.9.6.data/scripts/temporaldecomp +0 -23
  400. rapidtide-2.9.6.data/scripts/threeD +0 -236
  401. rapidtide-2.9.6.data/scripts/tidepool +0 -23
  402. rapidtide-2.9.6.data/scripts/variabilityizer +0 -23
  403. rapidtide-2.9.6.dist-info/RECORD +0 -359
  404. rapidtide-2.9.6.dist-info/top_level.txt +0 -86
  405. {rapidtide-2.9.6.dist-info → rapidtide-3.1.3.dist-info/licenses}/LICENSE +0 -0
rapidtide/util.py CHANGED
@@ -1,7 +1,7 @@
1
1
  #!/usr/bin/env python
2
2
  # -*- coding: utf-8 -*-
3
3
  #
4
- # Copyright 2016-2024 Blaise Frederick
4
+ # Copyright 2016-2025 Blaise Frederick
5
5
  #
6
6
  # Licensed under the Apache License, Version 2.0 (the "License");
7
7
  # you may not use this file except in compliance with the License.
@@ -18,7 +18,6 @@
18
18
  #
19
19
  import bisect
20
20
  import logging
21
- import multiprocessing as mp
22
21
  import os
23
22
  import platform
24
23
  import resource
@@ -27,13 +26,24 @@ import subprocess
27
26
  import sys
28
27
  import time
29
28
  from datetime import datetime
29
+ from multiprocessing import shared_memory
30
+ from typing import Any, Optional
30
31
 
31
32
  import matplotlib.pyplot as plt
32
33
  import numpy as np
33
34
  import pandas as pd
35
+ from numpy.typing import NDArray
34
36
 
35
37
  import rapidtide._version as tide_versioneer
36
38
  import rapidtide.io as tide_io
39
+ from rapidtide.decorators import getdecoratorvars
40
+
41
+ try:
42
+ import mkl
43
+
44
+ mklexists = True
45
+ except ImportError:
46
+ mklexists = False
37
47
 
38
48
  LGR = logging.getLogger(__name__)
39
49
  TimingLGR = logging.getLogger("TIMING")
@@ -41,25 +51,36 @@ MemoryLGR = logging.getLogger("MEMORY")
41
51
 
42
52
 
43
53
  # ---------------------------------------- Global constants -------------------------------------------
44
- defaultbutterorder = 6
45
- MAXLINES = 10000000
46
- donotbeaggressive = True
54
+ defaultbutterorder: int = 6
55
+ MAXLINES: int = 10000000
56
+ donotusenumba: bool = False
47
57
 
48
- # ----------------------------------------- Conditional imports ---------------------------------------
49
- try:
50
- from memory_profiler import profile
51
-
52
- memprofilerexists = True
53
- except ImportError:
54
- memprofilerexists = False
55
58
 
56
- try:
57
- from numba import jit
58
- except ImportError:
59
+ def disablenumba() -> None:
60
+ """
61
+ Set a global variable to disable numba.
62
+
63
+ This function sets the global variable `donotusenumba` to `True`, which
64
+ effectively disables the use of numba in subsequent operations that check
65
+ this variable.
66
+
67
+ Notes
68
+ -----
69
+ This function modifies a global variable. The variable `donotusenumba` should
70
+ be checked by other functions in the codebase to determine whether to use
71
+ numba or not.
72
+
73
+ Examples
74
+ --------
75
+ >>> disablenumba()
76
+ >>> print(donotusenumba)
77
+ True
78
+ """
79
+ global donotusenumba
59
80
  donotusenumba = True
60
- else:
61
- donotusenumba = False
62
81
 
82
+
83
+ # ----------------------------------------- Conditional imports ---------------------------------------
63
84
  try:
64
85
  import pyfftw
65
86
  except ImportError:
@@ -68,13 +89,52 @@ else:
68
89
  pyfftwpresent = True
69
90
 
70
91
 
71
- def checkimports(optiondict):
72
- if memprofilerexists:
73
- print("memprofiler exists")
74
- else:
75
- print("memprofiler does not exist")
76
- optiondict["memprofilerexists"] = memprofilerexists
92
+ def checkimports(optiondict: dict[str, Any]) -> None:
93
+ """
94
+ Check availability of optional dependencies and optimization settings.
77
95
 
96
+ This function verifies the presence of optional packages and optimization
97
+ settings, printing status messages and updating the provided dictionary with
98
+ the results. It checks for pyfftw, aggressive optimization flags, and numba
99
+ usage settings.
100
+
101
+ Parameters
102
+ ----------
103
+ optiondict : dict[str, Any]
104
+ Dictionary to be updated with boolean values indicating the status of
105
+ optional dependencies and optimization settings. The dictionary will be
106
+ modified in-place with the following keys:
107
+
108
+ - "pfftwexists": bool, True if pyfftw is available, False otherwise
109
+ - "donotbeaggressive": bool, True if aggressive optimization is disabled,
110
+ False if enabled
111
+ - "donotusenumba": bool, True if numba usage is disabled, False if numba
112
+ will be used when available
113
+
114
+ Returns
115
+ -------
116
+ None
117
+ This function does not return a value but modifies the input dictionary
118
+ in-place.
119
+
120
+ Notes
121
+ -----
122
+ The function relies on global variables:
123
+ - `pyfftwpresent`: Indicates if pyfftw is available
124
+ - `donotbeaggressive`: Controls aggressive optimization flag
125
+ - `donotusenumba`: Controls numba usage flag
126
+
127
+ Examples
128
+ --------
129
+ >>> options = {}
130
+ >>> checkimports(options)
131
+ pfftw does not exist
132
+ aggressive optimization
133
+ using numba if present
134
+ >>> print(options)
135
+ {'pfftwexists': False, 'donotbeaggressive': False, 'donotusenumba': False}
136
+ """
137
+ donotusenumba, donotbeaggressive = getdecoratorvars()
78
138
  if pyfftwpresent:
79
139
  print("pfftw exists")
80
140
  else:
@@ -87,7 +147,6 @@ def checkimports(optiondict):
87
147
  print("aggressive optimization")
88
148
  optiondict["donotbeaggressive"] = donotbeaggressive
89
149
 
90
- global donotusenumba
91
150
  if donotusenumba:
92
151
  print("will not use numba even if present")
93
152
  else:
@@ -95,32 +154,164 @@ def checkimports(optiondict):
95
154
  optiondict["donotusenumba"] = donotusenumba
96
155
 
97
156
 
98
- # ----------------------------------------- Conditional jit handling ----------------------------------
99
- def conditionaljit():
100
- def resdec(f):
101
- if donotusenumba:
102
- return f
103
- return jit(f, nopython=True)
157
+ def disablemkl(numprocs: int, debug: bool = False) -> None:
158
+ """
159
+ Disable MKL threading for parallel execution.
104
160
 
105
- return resdec
161
+ This function configures Intel MKL (Math Kernel Library) to use only a single
162
+ thread when the number of processes exceeds 1. This is useful for avoiding
163
+ oversubscription of CPU resources in parallel computing environments.
106
164
 
165
+ Parameters
166
+ ----------
167
+ numprocs : int
168
+ Number of processes to check against. If greater than 1, MKL threading
169
+ will be disabled by setting the number of threads to 1.
170
+ debug : bool, optional
171
+ If True, prints debug information about the threading configuration
172
+ (default is False).
107
173
 
108
- def conditionaljit2():
109
- def resdec(f):
110
- if donotusenumba or donotbeaggressive:
111
- return f
112
- return jit(f, nopython=True)
174
+ Returns
175
+ -------
176
+ None
177
+ This function does not return any value.
113
178
 
114
- return resdec
179
+ Notes
180
+ -----
181
+ This function only has an effect if MKL is available (mklexists is True).
182
+ The function uses mkl.set_num_threads(1) to disable parallel threading in MKL.
115
183
 
184
+ Examples
185
+ --------
186
+ >>> disablemkl(numprocs=4, debug=True)
187
+ disablemkl: setting threads to 1
116
188
 
117
- def disablenumba():
118
- global donotusenumba
119
- donotusenumba = True
189
+ >>> disablemkl(numprocs=1)
190
+ # No output, no threading changes
191
+ """
192
+ if mklexists:
193
+ if numprocs > 1:
194
+ if debug:
195
+ print("disablemkl: setting threads to 1")
196
+ mkl.set_num_threads(1)
197
+
198
+
199
+ def enablemkl(numthreads: int, debug: bool = False) -> None:
200
+ """
201
+ Enable Intel MKL threading with specified number of threads.
202
+
203
+ This function configures the Intel MKL (Math Kernel Library) to use the
204
+ specified number of threads for parallel execution. It only has an effect
205
+ if MKL is available in the current environment.
206
+
207
+ Parameters
208
+ ----------
209
+ numthreads : int
210
+ Number of threads to use for MKL operations. Must be a positive integer.
211
+ debug : bool, optional
212
+ If True, print debug information about the thread setting operation.
213
+ Default is False.
214
+
215
+ Returns
216
+ -------
217
+ None
218
+ This function does not return any value.
219
+
220
+ Notes
221
+ -----
222
+ This function only has an effect if MKL is available (mklexists is True).
223
+ The function uses mkl.set_num_threads() internally to configure the threading.
224
+
225
+ Examples
226
+ --------
227
+ >>> enablemkl(4)
228
+ >>> enablemkl(8, debug=True)
229
+ """
230
+ if mklexists:
231
+ if debug:
232
+ print(f"enablemkl: setting threads to {numthreads}")
233
+ mkl.set_num_threads(numthreads)
234
+
235
+
236
+ def configurepyfftw(threads: int = 1, debug: bool = False) -> Optional[str]:
237
+ if pyfftwpresent:
238
+ if threads < 1:
239
+ if os.environ.get("PYFFTW_NUM_THREADS") is not None:
240
+ pyfftw.config.NUM_THREADS = os.environ.get("PYFFTW_NUM_THREADS")
241
+ else:
242
+ pyfftw.config.NUM_THREADS = threads
243
+
244
+ if os.environ.get("PYFFTW_PLANNER_EFFORT") is None:
245
+ pyfftw.config.PLANNER_EFFORT = "FFTW_ESTIMATE"
246
+
247
+ # check for wisdom file, load it if it exist
248
+ wisdomfilename = os.path.join(
249
+ os.environ.get("HOME"),
250
+ ".config",
251
+ f"rapidtide_wisdom_{pyfftw.config.PLANNER_EFFORT}.txt",
252
+ )
253
+ if os.path.isfile(wisdomfilename):
254
+ # load the wisdom
255
+ # You need to parse the string
256
+ # For simple cases, eval() can work but is generally not recommended for untrusted input.
257
+ # For more complex cases, manual parsing or using a library like ast.literal_eval is safer.
258
+ with open(wisdomfilename, "r") as file:
259
+ loaded_string = file.read()
260
+ # Example using eval (use with caution)
261
+ thewisdom = eval(loaded_string)
262
+ if debug:
263
+ print("----------------------Loaded wisdom---------------------------------")
264
+ print(thewisdom)
265
+ print("----------------------Loaded wisdom---------------------------------")
266
+ pyfftw.import_wisdom(thewisdom)
267
+ print(f"Loaded pyfftw wisdom from {wisdomfilename}")
268
+ return wisdomfilename
269
+ else:
270
+ return None
271
+
272
+
273
+ def savewisdom(wisdomfilename: str, debug: bool = False) -> None:
274
+ if pyfftwpresent and (wisdomfilename is not None):
275
+ thewisdom = pyfftw.export_wisdom()
276
+ makeadir(os.path.split(wisdomfilename)[0])
277
+
278
+ if debug:
279
+ print("----------------------Saved wisdom---------------------------------")
280
+ print(thewisdom)
281
+ print("----------------------Saved wisdom---------------------------------")
282
+
283
+ # Save the tuple as a string to a text file
284
+ with open(wisdomfilename, "w") as file:
285
+ file.write(str(thewisdom))
120
286
 
121
287
 
122
288
  # --------------------------- Utility functions -------------------------------------------------
123
- def findavailablemem():
289
+ def findavailablemem() -> tuple[int, int]:
290
+ """
291
+ Get available memory information from system resources.
292
+
293
+ This function retrieves memory information from either cgroup limits or system
294
+ free memory statistics. It returns a tuple containing the memory limit and
295
+ swap information, both in bytes.
296
+
297
+ Returns
298
+ -------
299
+ tuple[int, int]
300
+ A tuple containing two integers:
301
+ - First integer: Memory limit in bytes (from cgroup or total memory)
302
+ - Second integer: Swap available in bytes (from cgroup or swap memory)
303
+
304
+ Notes
305
+ -----
306
+ The function first checks for cgroup memory limits at "/sys/fs/cgroup/memory/memory.limit_in_bytes".
307
+ If found, it returns the limit for both values in the tuple. Otherwise, it uses the "free" command
308
+ to retrieve system memory information, specifically the free memory and swap memory values.
309
+
310
+ Examples
311
+ --------
312
+ >>> findavailablemem()
313
+ (8589934592, 2147483648)
314
+ """
124
315
  if os.path.isfile("/sys/fs/cgroup/memory/memory.limit_in_bytes"):
125
316
  with open("/sys/fs/cgroup/memory/memory.limit_in_bytes") as limit:
126
317
  mem = int(limit.read())
@@ -132,11 +323,125 @@ def findavailablemem():
132
323
  return free, swap
133
324
 
134
325
 
135
- def setmemlimit(memlimit):
326
+ def checkifincontainer() -> str | None:
327
+ """
328
+ Determine if the program is running in a container and identify the container type.
329
+
330
+ This function checks environment variables to detect whether the program is running
331
+ inside a container environment. It specifically looks for indicators of Docker,
332
+ Singularity, and CircleCI environments. The function returns the container type
333
+ as a string, or None if running outside any container.
334
+
335
+ Returns
336
+ -------
337
+ str or None
338
+ Container type if running in a container, otherwise None. Possible return values:
339
+ - "Docker": Running in a Docker container (indicated by RUNNING_IN_CONTAINER env var)
340
+ - "Singularity": Running in a Singularity container (indicated by SINGULARITY_CONTAINER env var)
341
+ - "CircleCI": Running in CircleCI environment (indicated by CIRCLECI env var)
342
+ - None: Not running in any container environment
343
+
344
+ Notes
345
+ -----
346
+ The function prioritizes detection in the following order:
347
+ 1. Singularity containers (SINGULARITY_CONTAINER env var)
348
+ 2. Docker containers (RUNNING_IN_CONTAINER env var)
349
+ 3. CircleCI environment (CIRCLECI env var)
350
+
351
+ CircleCI detection takes precedence over other container types, as CircleCI
352
+ environments may not handle container parameter adjustments properly.
353
+
354
+ Examples
355
+ --------
356
+ >>> checkifincontainer()
357
+ 'Docker'
358
+
359
+ >>> checkifincontainer()
360
+ 'Singularity'
361
+
362
+ >>> checkifincontainer()
363
+ None
364
+ """
365
+ if os.environ.get("SINGULARITY_CONTAINER") is not None:
366
+ containertype = "Singularity"
367
+ elif os.environ.get("RUNNING_IN_CONTAINER") is not None:
368
+ containertype = "Docker"
369
+ else:
370
+ containertype = None
371
+ if os.environ.get("CIRCLECI") is not None:
372
+ containertype = "CircleCI"
373
+ return containertype
374
+
375
+
376
+ def setmemlimit(memlimit: int) -> None:
377
+ """
378
+ Set the memory limit for the current process.
379
+
380
+ This function sets the virtual memory limit (RLIMIT_AS) for the current process
381
+ using the resource module. The limit is specified in bytes and applies to both
382
+ soft and hard limits.
383
+
384
+ Parameters
385
+ ----------
386
+ memlimit : int
387
+ The memory limit in bytes. Setting this to -1 will remove the limit.
388
+ Values should be non-negative integers.
389
+
390
+ Returns
391
+ -------
392
+ None
393
+ This function does not return any value.
394
+
395
+ Notes
396
+ -----
397
+ - This function uses `resource.setrlimit()` with `resource.RLIMIT_AS`
398
+ - The memory limit is enforced by the operating system
399
+ - Setting memlimit to -1 removes any existing memory limit
400
+ - This function may raise `ValueError` or `OSError` if the limit cannot be set
401
+ - The limit applies to the current process and its children
402
+
403
+ Examples
404
+ --------
405
+ >>> setmemlimit(1024 * 1024 * 100) # Set limit to 100 MB
406
+ >>> setmemlimit(-1) # Remove memory limit
407
+ """
136
408
  resource.setrlimit(resource.RLIMIT_AS, (memlimit, memlimit))
137
409
 
138
410
 
139
- def formatmemamt(meminbytes):
411
+ def formatmemamt(meminbytes: int) -> str:
412
+ """
413
+ Format memory amount in bytes to human readable format.
414
+
415
+ Convert a memory size in bytes to a human readable string with appropriate units
416
+ (B, kB, MB, GB, TB).
417
+
418
+ Parameters
419
+ ----------
420
+ meminbytes : int
421
+ Memory amount in bytes to be formatted.
422
+
423
+ Returns
424
+ -------
425
+ str
426
+ Formatted memory amount with appropriate unit. The result is rounded to 3
427
+ decimal places and includes the unit suffix.
428
+
429
+ Notes
430
+ -----
431
+ The function uses binary units (1024-based) rather than decimal units (1000-based).
432
+ Units are: B (bytes), kB (kilobytes), MB (megabytes), GB (gigabytes), TB (terabytes).
433
+
434
+ Examples
435
+ --------
436
+ >>> formatmemamt(1024)
437
+ '1.000kB'
438
+
439
+ >>> formatmemamt(1048576)
440
+ '1.000MB'
441
+
442
+ >>> formatmemamt(1073741824)
443
+ '1.000GB'
444
+ """
140
445
  units = ["B", "kB", "MB", "GB", "TB"]
141
446
  index = 0
142
447
  unitnumber = np.uint64(1)
@@ -150,15 +455,91 @@ def formatmemamt(meminbytes):
150
455
  return f"{round(meminbytes/unitnumber, 3):.3f}{units[-1]}"
151
456
 
152
457
 
153
- def logmem(msg=None):
154
- """Log memory usage with a logging object.
458
+ def format_bytes(size: float) -> tuple[float, str]:
459
+ """
460
+ Convert a size in bytes to a human-readable format with appropriate units.
461
+
462
+ Convert a size in bytes to a more readable format by scaling it to the
463
+ appropriate unit (bytes, kilobytes, megabytes, gigabytes, terabytes).
464
+
465
+ Parameters
466
+ ----------
467
+ size : float
468
+ The size in bytes to be converted. Should be a non-negative number.
469
+
470
+ Returns
471
+ -------
472
+ tuple[float, str]
473
+ A tuple containing the scaled size (float) and the corresponding unit (str).
474
+ The unit will be one of: 'bytes', 'kilobytes', 'megabytes', 'gigabytes', 'terabytes'.
475
+
476
+ Notes
477
+ -----
478
+ This function uses base-2 (binary) units where 1 kilobyte = 1024 bytes.
479
+ The conversion continues until the size is less than 1024, at which point
480
+ the appropriate unit is returned.
481
+
482
+ Examples
483
+ --------
484
+ >>> format_bytes(512)
485
+ (512.0, 'bytes')
486
+
487
+ >>> format_bytes(2048)
488
+ (2.0, 'kilobytes')
489
+
490
+ >>> format_bytes(1048576)
491
+ (1.0, 'megabytes')
492
+
493
+ >>> format_bytes(1073741824)
494
+ (1.0, 'gigabytes')
495
+ """
496
+ # 2**10 = 1024
497
+ power = 2**10
498
+ n = 0
499
+ power_labels = {0: "", 1: "kilo", 2: "mega", 3: "giga", 4: "tera"}
500
+ while size > power:
501
+ size /= power
502
+ n += 1
503
+ return size, power_labels[n] + "bytes"
504
+
505
+
506
+ def logmem(msg: str | None = None) -> None:
507
+ """
508
+ Log memory usage with a logging object.
509
+
510
+ This function logs detailed memory usage statistics for the current process
511
+ and its children, including resident set size (RSS), shared and unshared memory,
512
+ page faults, and swap usage. On Windows, memory statistics are not available
513
+ and a placeholder message is logged instead.
155
514
 
156
515
  Parameters
157
516
  ----------
158
- msg : str or None, optional
159
- A message to include in the first column.
160
- If None, the column headers are logged.
161
- Default is None.
517
+ msg : str, optional
518
+ A message to include in the first column of the logged output.
519
+ If None, column headers are logged instead. Default is None.
520
+
521
+ Returns
522
+ -------
523
+ None
524
+ This function does not return any value; it logs information to a global
525
+ logger named `MemoryLGR`.
526
+
527
+ Notes
528
+ -----
529
+ - On Unix-like systems (Linux, macOS), this function uses `resource.getrusage`
530
+ to retrieve memory usage details.
531
+ - On Windows, memory statistics are not supported and a placeholder message
532
+ is logged.
533
+ - The function maintains internal state (`lastmaxrss_parent`, `lastmaxrss_child`)
534
+ to compute differences in memory usage between calls.
535
+
536
+ Examples
537
+ --------
538
+ >>> logmem("Before loop")
539
+ # Logs memory usage with "Before loop" as the first column
540
+
541
+ >>> logmem()
542
+ # Logs column headers for memory usage statistics
162
543
  """
163
544
  global lastmaxrss_parent, lastmaxrss_child
164
545
  if platform.system() != "Windows":
@@ -214,16 +595,37 @@ def logmem(msg=None):
214
595
  MemoryLGR.info("\t".join(outvals))
215
596
 
216
597
 
217
- def findexecutable(command):
598
+ def findexecutable(command: str) -> str | None:
218
599
  """
600
+ Locate an executable file in the system PATH.
601
+
602
+ This function searches for an executable file with the given name in the
603
+ system's PATH environment variable. It uses the most appropriate method
604
+ based on the Python version.
219
605
 
220
606
  Parameters
221
607
  ----------
222
- command
608
+ command : str
609
+ The name of the executable command to search for.
223
610
 
224
611
  Returns
225
612
  -------
226
-
613
+ str or None
614
+ The full path to the executable if found, None otherwise.
615
+
616
+ Notes
617
+ -----
618
+ For Python 3.3 and later, this function uses `shutil.which()` which is the
619
+ recommended approach. For earlier Python versions, it manually searches
620
+ through the PATH environment variable and checks execute permissions.
621
+
622
+ Examples
623
+ --------
624
+ >>> findexecutable('python')
625
+ '/usr/bin/python'
626
+
627
+ >>> findexecutable('nonexistent_command')
628
+ None
227
629
  """
228
630
  import shutil
229
631
 
@@ -237,16 +639,39 @@ def findexecutable(command):
237
639
  return None
238
640
 
239
641
 
240
- def isexecutable(command):
642
+ def isexecutable(command: str) -> bool:
241
643
  """
644
+ Check if a command is executable in the system's PATH.
645
+
646
+ This function determines whether a given command can be executed by checking
647
+ if it exists in the system's PATH and has execute permissions. For Python 3.3+
648
+ the function uses shutil.which() for cross-platform compatibility, while for
649
+ older versions it manually checks execute permissions in each PATH directory.
242
650
 
243
651
  Parameters
244
652
  ----------
245
- command
653
+ command : str
654
+ The name of the command to check for executability.
246
655
 
247
656
  Returns
248
657
  -------
249
-
658
+ bool
659
+ True if the command is executable, False otherwise.
660
+
661
+ Notes
662
+ -----
663
+ This function provides cross-platform compatibility by using different
664
+ approaches depending on the Python version. For Python 3.3 and later,
665
+ shutil.which() is used which handles platform-specific path searching.
666
+ For older Python versions, the function manually checks execute permissions
667
+ in each directory listed in the PATH environment variable.
668
+
669
+ Examples
670
+ --------
671
+ >>> isexecutable('python')
672
+ True
673
+ >>> isexecutable('nonexistent_command')
674
+ False
250
675
  """
251
676
  import shutil
252
677
 
@@ -263,7 +688,82 @@ def isexecutable(command):
263
688
  )
264
689
 
265
690
 
266
- def findreferencedir():
691
+ def makeadir(pathname: str) -> bool:
692
+ """
693
+ Create a directory if it doesn't already exist.
694
+
695
+ This function attempts to create a directory at the specified path. If the
696
+ directory already exists, it returns True without raising an error. If the
697
+ directory cannot be created due to a permissions error or other OS-related
698
+ issues, it returns False.
699
+
700
+ Parameters
701
+ ----------
702
+ pathname : str
703
+ The path of the directory to create. Can be a relative or absolute path.
704
+
705
+ Returns
706
+ -------
707
+ bool
708
+ True if the directory exists or was successfully created, False otherwise.
709
+
710
+ Notes
711
+ -----
712
+ This function uses `os.makedirs()` which creates all intermediate-level
713
+ directories needed to contain the leaf directory. If the directory already
714
+ exists, no error is raised.
715
+
716
+ Examples
717
+ --------
718
+ >>> makeadir('test_directory')
719
+ True
720
+
721
+ >>> makeadir('path/to/new/directory')
722
+ True
723
+
724
+ >>> makeadir('/root/protected_directory')
725
+ False # Will fail due to insufficient permissions
726
+ """
727
+ try:
728
+ os.makedirs(pathname)
729
+ except OSError:
730
+ if os.path.exists(pathname):
731
+ # We are nearly safe
732
+ return True
733
+ else:
734
+ # There was an error on creation, so make sure we know about it
735
+ print("ERROR: ", pathname, " does not exist, and could not create it")
736
+ return False
737
+ return True
738
+
739
+
740
+ def findreferencedir() -> str:
741
+ """
742
+ Find and return the path to the rapidtide reference data directory.
743
+
744
+ This function locates the site-packages directory and constructs the path to
745
+ the rapidtide reference data folder. It searches through all site-packages
746
+ directories to find the one ending with "site-packages" and then builds
747
+ the reference directory path relative to that location.
748
+
749
+ Returns
750
+ -------
751
+ str
752
+ Absolute path to the rapidtide reference data directory, typically
753
+ structured as: {site-packages-dir}/rapidtide/data/reference/
754
+
755
+ Notes
756
+ -----
757
+ This function is designed to work within the rapidtide package environment
758
+ and assumes that rapidtide is installed in a standard Python site-packages
759
+ location. The function will return None if no site-packages directory is found.
760
+
761
+ Examples
762
+ --------
763
+ >>> ref_dir = findreferencedir()
764
+ >>> print(ref_dir)
765
+ '/usr/local/lib/python3.8/site-packages/rapidtide/data/reference'
766
+ """
267
767
  # Get the list of directories
268
768
  site_packages_dirs = site.getsitepackages()
269
769
 
@@ -283,33 +783,89 @@ def findreferencedir():
283
783
  return referencedir
284
784
 
285
785
 
286
- def savecommandline(theargs, thename):
786
+ def savecommandline(theargs: list[str], thename: str) -> None:
287
787
  """
788
+ Save command line arguments to a text file.
789
+
790
+ This function takes a list of command line arguments and saves them
791
+ as a single line in a text file with a specified name.
288
792
 
289
793
  Parameters
290
794
  ----------
291
- theargs
292
- thename
795
+ theargs : list[str]
796
+ List of command line arguments to be saved
797
+ thename : str
798
+ Base name for the output file (without extension)
293
799
 
294
800
  Returns
295
801
  -------
296
-
802
+ None
803
+ This function does not return any value
804
+
805
+ Notes
806
+ -----
807
+ The function creates a file named ``{thename}_commandline.txt`` containing
808
+ the command line arguments joined by spaces on a single line.
809
+
810
+ Examples
811
+ --------
812
+ >>> savecommandline(['python', 'script.py', '--verbose'], 'myrun')
813
+ # Creates file 'myrun_commandline.txt' with content: "python script.py --verbose"
297
814
  """
298
815
  tide_io.writevec([" ".join(theargs)], thename + "_commandline.txt")
299
816
 
300
817
 
301
- def startendcheck(timepoints, startpoint, endpoint):
818
+ def startendcheck(timepoints: int, startpoint: int, endpoint: int) -> tuple[int, int]:
302
819
  """
820
+ Validate and adjust start and end points for time series processing.
821
+
822
+ This function checks if the provided start and end points are within valid
823
+ bounds for a time series with the specified number of time points. It handles
824
+ edge cases by adjusting values to reasonable defaults and raises errors for
825
+ invalid configurations.
303
826
 
304
827
  Parameters
305
828
  ----------
306
- timepoints
307
- startpoint
308
- endpoint
829
+ timepoints : int
830
+ Total number of time points in the series. Must be positive.
831
+ startpoint : int
832
+ Starting index for the time series segment. If negative, set to 0.
833
+ If greater than timepoints-1, the program exits with an error.
834
+ endpoint : int
835
+ Ending index for the time series segment. If -1, set to a large default value.
836
+ If greater than timepoints-1, set to timepoints-1.
309
837
 
310
838
  Returns
311
839
  -------
312
-
840
+ tuple[int, int]
841
+ A tuple containing (realstart, realend) where both values are valid
842
+ indices for the time series. realstart <= realend and both are within
843
+ the valid range [0, timepoints-1].
844
+
845
+ Notes
846
+ -----
847
+ - If startpoint is negative, it's automatically set to 0
848
+ - If endpoint is -1, it's set to 100000000 (large default value)
849
+ - If endpoint exceeds timepoints-1, it's set to timepoints-1
850
+ - The function exits with sys.exit() if startpoint >= endpoint or if
851
+ startpoint exceeds the maximum valid index
852
+
853
+ Examples
854
+ --------
855
+ >>> startendcheck(10, 2, 5)
856
+ startpoint set to 2
857
+ endpoint set to 5
858
+ (2, 5)
859
+
860
+ >>> startendcheck(5, -1, 3)
861
+ startpoint set to minimum, (0)
862
+ endpoint set to 3
863
+ (0, 3)
864
+
865
+ >>> startendcheck(5, 2, -1)
866
+ startpoint set to 2
867
+ endpoint set to maximum, (4)
868
+ (2, 4)
313
869
  """
314
870
  if startpoint > timepoints - 1:
315
871
  print("startpoint is too large (maximum is ", timepoints - 1, ")")
@@ -335,33 +891,61 @@ def startendcheck(timepoints, startpoint, endpoint):
335
891
 
336
892
 
337
893
  def valtoindex(
338
- thearray,
339
- thevalue,
340
- evenspacing=True,
341
- discrete=True,
342
- discretization="round",
343
- debug=False,
344
- ):
894
+ thearray: NDArray,
895
+ thevalue: float,
896
+ evenspacing: bool = True,
897
+ discrete: bool = True,
898
+ discretization: str = "round",
899
+ debug: bool = False,
900
+ ) -> int:
345
901
  """
902
+ Find the index of the closest value in an ordered array to a given value.
903
+
904
+ This function computes the index of the element in `thearray` that is closest
905
+ to `thevalue`. It supports both evenly spaced and unevenly spaced arrays,
906
+ with options for discrete or continuous index output and different rounding
907
+ methods.
346
908
 
347
909
  Parameters
348
910
  ----------
349
- thearray: array-like
350
- An ordered list of values (does not need to be equally spaced)
351
- thevalue: float
352
- The value to search for in the array
353
- evenspacing: boolean, optional
911
+ thearray : array-like
912
+ An ordered list of values (does not need to be equally spaced).
913
+ thevalue : float
914
+ The value to search for in the array.
915
+ evenspacing : bool, optional
354
916
  If True (default), assume data is evenly spaced for faster calculation.
355
- discrete: boolean, optional
356
- If True make the index an integer (round by default).
357
- discretization: string, optional
358
- Select rounding method - floor, ceiling, or round(default)
917
+ discrete : bool, optional
918
+ If True (default), the returned index is an integer.
919
+ discretization : str, optional
920
+ Select rounding method when `discrete=True`. Options are:
921
+ - "round" (default): round to nearest integer
922
+ - "floor": round down to nearest integer
923
+ - "ceiling": round up to nearest integer
924
+ debug : bool, optional
925
+ If True, print debug information during execution.
359
926
 
360
927
  Returns
361
928
  -------
362
- closestidx: int
363
- The index of the sample in thearray that is closest to val
364
-
929
+ int or float
930
+ The index of the closest value in `thearray` to `thevalue`. If `discrete=False`,
931
+ the index may be a float.
932
+
933
+ Notes
934
+ -----
935
+ When `evenspacing=True`, the function assumes uniform spacing between elements
936
+ and calculates the index using a linear interpolation formula. This is faster
937
+ than the default method but only accurate for evenly spaced data.
938
+
939
+ Examples
940
+ --------
941
+ >>> import numpy as np
942
+ >>> arr = np.array([0, 1, 2, 3, 4])
943
+ >>> valtoindex(arr, 2.3)
944
+ 2
945
+ >>> valtoindex(arr, 2.7, discretization="ceil")
946
+ 3
947
+ >>> valtoindex(arr, 2.5, evenspacing=False)
948
+ 2
365
949
  """
366
950
  if evenspacing:
367
951
  limval = np.max([thearray[0], np.min([thearray[-1], thevalue])])
@@ -392,19 +976,43 @@ def valtoindex(
392
976
  return int((np.abs(thearray - thevalue)).argmin())
393
977
 
394
978
 
395
- def progressbar(thisval, end_val, label="Percent", barsize=60):
979
+ def progressbar(thisval: int, end_val: int, label: str = "Percent", barsize: int = 60) -> None:
396
980
  """
981
+ Display a progress bar in the terminal.
982
+
983
+ This function creates a visual progress indicator that updates in place
984
+ on the terminal. It shows a bar filled according to the progress percentage
985
+ and displays the percentage value.
397
986
 
398
987
  Parameters
399
988
  ----------
400
- thisval
401
- end_val
402
- label
403
- barsize
989
+ thisval : int
990
+ Current progress value. Should be less than or equal to ``end_val``.
991
+ end_val : int
992
+ Total value representing 100% progress.
993
+ label : str, optional
994
+ Label to display before the progress bar (default is "Percent").
995
+ barsize : int, optional
996
+ Size of the progress bar in characters (default is 60).
404
997
 
405
998
  Returns
406
999
  -------
407
-
1000
+ None
1001
+ This function does not return any value. It prints directly to stdout.
1002
+
1003
+ Notes
1004
+ -----
1005
+ The progress bar updates in place using carriage return (`\\r`) to overwrite
1006
+ the previous output. The function uses ``sys.stdout.flush()`` to ensure
1007
+ immediate display updates.
1008
+
1009
+ Examples
1010
+ --------
1011
+ >>> progressbar(25, 100, "Loading", 30)
1012
+ Loading: [############################## ] 25.00%
1013
+
1014
+ >>> progressbar(50, 50)
1015
+ Percent: [##################################################] 100.00%
408
1016
  """
409
1017
  percent = float(thisval) / end_val
410
1018
  hashes = "#" * int(round(percent * barsize))
@@ -413,18 +1021,40 @@ def progressbar(thisval, end_val, label="Percent", barsize=60):
413
1021
  sys.stdout.flush()
414
1022
 
415
1023
 
416
- def makelaglist(lagstart, lagend, lagstep):
1024
+ def makelaglist(lagstart: float, lagend: float, lagstep: float) -> NDArray:
417
1025
  """
1026
+ Create a list of lag values from start to end with specified step size.
1027
+
1028
+ This function generates an array of evenly spaced lag values starting from
1029
+ `lagstart` up to (and including) `lagend` with increments of `lagstep`.
418
1030
 
419
1031
  Parameters
420
1032
  ----------
421
- lagstart
422
- lagend
423
- lagstep
1033
+ lagstart : float
1034
+ The starting value of the lag sequence.
1035
+ lagend : float
1036
+ The ending value of the lag sequence (inclusive).
1037
+ lagstep : float
1038
+ The step size between consecutive lag values.
424
1039
 
425
1040
  Returns
426
1041
  -------
427
-
1042
+ NDArray
1043
+ Array of lag values from `lagstart` to `lagend` with step size `lagstep`.
1044
+
1045
+ Notes
1046
+ -----
1047
+ The function adjusts the `lagend` value to ensure that the last value in the
1048
+ sequence is exactly `lagend` if it's a valid step from `lagstart`. The actual
1049
+ number of steps is calculated as ``(lagend - lagstart) // lagstep + 1``.
1050
+
1051
+ Examples
1052
+ --------
1053
+ >>> makelaglist(0.0, 1.0, 0.2)
1054
+ array([0. , 0.2, 0.4, 0.6, 0.8, 1. ])
1055
+
1056
+ >>> makelaglist(1.0, 5.0, 1.5)
1057
+ array([1. , 2.5, 4. ])
428
1058
  """
429
1059
  numsteps = int((lagend - lagstart) // lagstep + 1)
430
1060
  lagend = lagstart + lagstep * (numsteps - 1)
@@ -445,27 +1075,79 @@ def makelaglist(lagstart, lagend, lagstep):
445
1075
 
446
1076
 
447
1077
  # ------------------------------------------ Version function ----------------------------------
448
- def version():
1078
+ def version() -> tuple[str, str, str, bool | str]:
449
1079
  """
1080
+ Retrieve version information for the package, including version string,
1081
+ Git SHA, commit date, and dirty status.
1082
+
1083
+ This function attempts to retrieve version information from environment
1084
+ variables when running inside a container. If not in a container, it falls
1085
+ back to using `tide_versioneer.get_versions()` to obtain version details
1086
+ from the Git repository.
450
1087
 
451
1088
  Returns
452
1089
  -------
453
-
1090
+ tuple of (str, str, str, bool or str)
1091
+ A tuple containing:
1092
+ - version (str): The version string, potentially modified for container builds.
1093
+ - sha (str): The Git commit SHA, or "UNKNOWN" if not available.
1094
+ - date (str): The Git commit date, or "UNKNOWN" if not available.
1095
+ - isdirty (bool or str): Indicates whether the working directory is dirty
1096
+ (i.e., has uncommitted changes). Returns `True`, `False`, or `"UNKNOWN"`
1097
+ if the information is not available.
1098
+
1099
+ Notes
1100
+ -----
1101
+ - In containerized environments, version information is expected to be
1102
+ provided via environment variables: `GITVERSION`, `GITDIRECTVERSION`,
1103
+ `GITSHA`, and `GITDATE`.
1104
+ - If the environment variable `RUNNING_IN_CONTAINER` is not set, the function
1105
+ attempts to use `tide_versioneer` to extract version information from the
1106
+ Git repository.
1107
+ - If `tide_versioneer` is not available or fails, the function returns
1108
+ `("UNKNOWN", "UNKNOWN", "UNKNOWN", "UNKNOWN")`.
1109
+
1110
+ Examples
1111
+ --------
1112
+ >>> version()
1113
+ ('1.2.3', 'a1b2c3d', '2023-04-05', False)
454
1114
  """
455
1115
  try:
456
- dummy = os.environ["IS_DOCKER_8395080871"]
1116
+ dummy = os.environ["RUNNING_IN_CONTAINER"]
457
1117
  except KeyError:
458
- isdocker = False
1118
+ iscontainer = False
459
1119
  else:
460
- isdocker = True
1120
+ iscontainer = True
461
1121
 
462
- if isdocker:
1122
+ if iscontainer:
463
1123
  try:
464
1124
  theversion = os.environ["GITVERSION"]
465
1125
  if theversion.find("+") < 0:
466
1126
  theverion = theversion.split(".")[0]
467
1127
  except KeyError:
468
1128
  theversion = "UNKNOWN"
1129
+ try:
1130
+ thedirectversion = os.environ["GITDIRECTVERSION"]
1131
+ directversionparts = thedirectversion.split("-")
1132
+ if len(directversionparts) == 3:
1133
+ thedirectversion = (
1134
+ directversionparts[0]
1135
+ + "."
1136
+ + directversionparts[1]
1137
+ + "+"
1138
+ + directversionparts[2]
1139
+ )
1140
+ isdirty = True
1141
+ elif len(directversionparts) == 2:
1142
+ thedirectversion = directversionparts[0] + "." + directversionparts[1]
1143
+ isdirty = True
1144
+ elif len(directversionparts) == 1:
1145
+ thedirectversion = directversionparts[0]
1146
+ isdirty = False
1147
+ else:
1148
+ pass
1149
+ except KeyError:
1150
+ thedirectversion = "UNKNOWN"
469
1151
  try:
470
1152
  thesha = os.environ["GITSHA"]
471
1153
  except KeyError:
@@ -474,49 +1156,111 @@ def version():
474
1156
  thedate = os.environ["GITDATE"]
475
1157
  except KeyError:
476
1158
  thedate = "UNKNOWN"
477
- isdirty = False
1159
+ if thedirectversion != "UNKNOWN":
1160
+ theversion = thedirectversion
478
1161
  else:
479
1162
  try:
480
1163
  versioninfo = tide_versioneer.get_versions()
481
1164
  except:
482
1165
  return "UNKNOWN", "UNKNOWN", "UNKNOWN", "UNKNOWN"
483
-
1166
+ isdirty = versioninfo["dirty"]
1167
+ if isdirty is None:
1168
+ isdirty = "UNKNOWN"
484
1169
  theversion = versioninfo["version"]
485
1170
  if theversion is None:
486
1171
  theversion = "UNKNOWN"
1172
+ else:
1173
+ splitversion = theversion.split("+")
1174
+ if len(splitversion) > 1:
1175
+ resplit = splitversion[1].split(".")
1176
+ if len(resplit) == 3:
1177
+ if resplit[0] == "0":
1178
+ theversion = splitversion[0]
487
1179
  thesha = versioninfo["full-revisionid"]
488
1180
  if thesha is None:
489
1181
  thesha = "UNKNOWN"
490
1182
  thedate = versioninfo["date"]
491
1183
  if thedate is None:
492
1184
  thedate = "UNKNOWN"
493
- isdirty = versioninfo["dirty"]
494
- if isdirty is None:
495
- isdirty = "UNKNOWN"
496
1185
 
497
1186
  return theversion, thesha, thedate, isdirty
498
1187
 
499
1188
 
500
1189
  # --------------------------- timing functions -------------------------------------------------
501
- def timefmt(thenumber):
1190
+ def timefmt(thenumber: float) -> str:
502
1191
  """
1192
+ Format a floating-point number as a string with fixed width and 2 decimal places.
503
1193
 
504
1194
  Parameters
505
1195
  ----------
506
- thenumber
1196
+ thenumber : float
1197
+ The numeric value to be formatted as a string.
507
1198
 
508
1199
  Returns
509
1200
  -------
510
- outputlines:
511
- The formatted lines to save to the formatted runtimings file
512
- totaldiff:
513
- The total time from start to finish, in seconds
514
-
1201
+ str
1202
+ A string representation of the input number formatted to 2 decimal places
1203
+ with a minimum width of 10 characters, right-aligned.
1204
+
1205
+ Notes
1206
+ -----
1207
+ The formatting uses "{:10.2f}".format() which ensures:
1208
+ - Fixed width of 10 characters
1209
+ - 2 decimal places
1210
+ - Right alignment (default for numeric formats)
1211
+
1212
+ Examples
1213
+ --------
1214
+ >>> timefmt(123.456)
1215
+ ' 123.46'
1216
+ >>> timefmt(1.234)
1217
+ ' 1.23'
1218
+ >>> timefmt(0.0)
1219
+ ' 0.00'
515
1220
  """
516
1221
  return "{:10.2f}".format(thenumber)
517
1222
 
518
1223
 
519
- def proctiminglogfile(logfilename, timewidth=10):
1224
+ def proctiminglogfile(logfilename: str, timewidth: int = 10) -> tuple[list[str], float]:
1225
+ """
1226
+ Process a timing log file and return formatted timing information.
1227
+
1228
+ This function reads a timing log file, calculates cumulative and incremental
1229
+ time differences from the start time, and formats the output into a list of
1230
+ strings. If numerical data and units are present in the log, they are used
1231
+ to compute and display processing speeds.
1232
+
1233
+ Parameters
1234
+ ----------
1235
+ logfilename : str
1236
+ Path to the timing log file. The file should be a CSV with columns:
1237
+ 'time', 'description', 'number', 'units'.
1238
+ timewidth : int, optional
1239
+ Width for right-justifying time values in the output (default is 10).
1240
+
1241
+ Returns
1242
+ -------
1243
+ tuple[list[str], float]
1244
+ A tuple containing:
1245
+ - List of formatted timing lines as strings.
1246
+ - Total elapsed time in seconds as a float.
1247
+
1248
+ Notes
1249
+ -----
1250
+ The log file is expected to follow the format:
1251
+ `YYYYMMDDTHHMMSS.ffffff` for timestamps.
1252
+ The function assumes the first row is the starting point for all time calculations.
1253
+
1254
+ Examples
1255
+ --------
1256
+ >>> lines, total_time = proctiminglogfile('timing.log', timewidth=12)
1257
+ >>> for line in lines:
1258
+ ... print(line)
1259
+ Total (s) Diff. (s) Description
1260
+ 0.00 0.00 Start process
1261
+ 5.20 5.20 Load data
1262
+ 12.40 7.20 Process data (1000 items @ 138.89 items/s)
1263
+ """
520
1264
  timingdata = pd.read_csv(
521
1265
  logfilename,
522
1266
  sep=None,
@@ -554,18 +1298,51 @@ def proctiminglogfile(logfilename, timewidth=10):
554
1298
  return outputlines, totaldiff
555
1299
 
556
1300
 
557
- def proctiminginfo(thetimings, outputfile="", extraheader=None):
1301
+ def proctiminginfo(
1302
+ thetimings: list[tuple[str, float, float | None, str | None]],
1303
+ outputfile: str = "",
1304
+ extraheader: str | None = None,
1305
+ ) -> None:
558
1306
  """
1307
+ Process and display timing information for program execution.
1308
+
1309
+ This function takes a list of timing events and displays them in a formatted table
1310
+ showing clock time, program time, duration, and event descriptions. Optional
1311
+ output to file and additional header information can also be specified.
559
1312
 
560
1313
  Parameters
561
1314
  ----------
562
- thetimings
563
- outputfile
564
- extraheader
1315
+ thetimings : list of tuple of (str, float, float | None, str | None)
1316
+ List of timing events where each event is a tuple containing:
1317
+ - Event description (str)
1318
+ - Timestamp (float)
1319
+ - Events per second (float or None)
1320
+ - Unit of measurement (str or None)
1321
+ outputfile : str, optional
1322
+ Path to output file for writing timing information (default is "")
1323
+ extraheader : str, optional
1324
+ Additional header text to be printed before timing information (default is None)
565
1325
 
566
1326
  Returns
567
1327
  -------
568
-
1328
+ None
1329
+ This function does not return any value but prints timing information to stdout
1330
+ and optionally writes to a file.
1331
+
1332
+ Notes
1333
+ -----
1334
+ The function formats timestamps using YYYYMMDDTHHMMSS format and calculates
1335
+ durations between consecutive events. If event rate information is provided,
1336
+ it will be displayed in the format "(rate unit/second)".
1337
+
1338
+ Examples
1339
+ --------
1340
+ >>> timings = [
1341
+ ... ("Start", 1640995200.0, None, None),
1342
+ ... ("Process A", 1640995205.5, 100.0, "events"),
1343
+ ... ("End", 1640995210.0, None, None)
1344
+ ... ]
1345
+ >>> proctiminginfo(timings, "timing_output.txt", "Execution Timing Report")
569
1346
  """
570
1347
  theinfolist = []
571
1348
  start = thetimings[0]
@@ -599,7 +1376,52 @@ def proctiminginfo(thetimings, outputfile="", extraheader=None):
599
1376
 
600
1377
 
601
1378
  # timecourse functions
602
- def maketcfrom3col(inputdata, timeaxis, outputvector, debug=False):
1379
+ def maketcfrom3col(
1380
+ inputdata: NDArray, timeaxis: NDArray, outputvector: NDArray, debug: bool = False
1381
+ ) -> NDArray:
1382
+ """
1383
+ Create temporal output vector from 3-column input data.
1384
+
1385
+ This function processes input data containing start times, durations, and values,
1386
+ and maps these to an output vector based on a time axis. Each input row defines
1387
+ a time interval [start_time, start_time + duration] that is mapped to the output
1388
+ vector by setting the corresponding elements to the specified value.
1389
+
1390
+ Parameters
1391
+ ----------
1392
+ inputdata : array-like
1393
+ 3-column input data where:
1394
+ - First column: start times
1395
+ - Second column: durations
1396
+ - Third column: values to assign
1397
+ timeaxis : array-like
1398
+ Time axis defining the temporal resolution of the output vector
1399
+ outputvector : array-like
1400
+ Output vector to be populated with values from inputdata
1401
+ debug : bool, optional
1402
+ If True, displays a plot of the output vector (default is False)
1403
+
1404
+ Returns
1405
+ -------
1406
+ ndarray
1407
+ The populated output vector with values assigned according to input intervals
1408
+
1409
+ Notes
1410
+ -----
1411
+ - Intervals are clipped to the bounds of the time axis
1412
+ - Only intervals that overlap with the time axis (0 to max(timeaxis)) are processed
1413
+ - The function modifies the outputvector in-place
1414
+
1415
+ Examples
1416
+ --------
1417
+ >>> import numpy as np
1418
+ >>> timeaxis = np.linspace(0, 10, 11)
1419
+ >>> inputdata = np.array([[1, 2, 5], [3, 1, 10]])
1420
+ >>> outputvector = np.zeros(11)
1421
+ >>> result = maketcfrom3col(inputdata, timeaxis, outputvector)
1422
+ >>> print(result)
1423
+ [0. 5. 5. 10. 10. 0. 0. 0. 0. 0. 0.]
1424
+ """
603
1425
  theshape = np.shape(inputdata)
604
1426
  for idx in range(0, theshape[1]):
605
1427
  starttime = inputdata[0, idx]
@@ -618,7 +1440,53 @@ def maketcfrom3col(inputdata, timeaxis, outputvector, debug=False):
618
1440
  return outputvector
619
1441
 
620
1442
 
621
- def maketcfrom2col(inputdata, timeaxis, outputvector, debug=False):
1443
+ def maketcfrom2col(
1444
+ inputdata: NDArray, timeaxis: NDArray, outputvector: NDArray, debug: bool = False
1445
+ ) -> NDArray:
1446
+ """
1447
+ Create a temporal output vector from 2-column input data.
1448
+
1449
+ This function processes input data consisting of two columns where the first column
1450
+ represents time ranges and the second column represents the value to be assigned to
1451
+ corresponding time intervals in the output vector. The function iterates through
1452
+ the input data and assigns values to contiguous ranges in the output vector.
1453
+
1454
+ Parameters
1455
+ ----------
1456
+ inputdata : NDArray
1457
+ 2D array with shape (2, n) where first row contains start/end time indices
1458
+ and second row contains corresponding values to assign.
1459
+ timeaxis : NDArray
1460
+ 1D array representing time values for plotting (used only in debug mode).
1461
+ outputvector : NDArray
1462
+ 1D array to be populated with values from inputdata. This array is modified
1463
+ in-place and returned.
1464
+ debug : bool, optional
1465
+ If True, enables debug output including range assignments and plots the
1466
+ resulting output vector. Default is False.
1467
+
1468
+ Returns
1469
+ -------
1470
+ NDArray
1471
+ The modified outputvector with values assigned from inputdata.
1472
+
1473
+ Notes
1474
+ -----
1475
+ The function processes inputdata by iterating through columns and assigning
1476
+ values to ranges in outputvector. Each column represents a time interval
1477
+ [start, end) where start is taken from inputdata[0, i-1] and end from
1478
+ inputdata[0, i]. The value assigned is from inputdata[1, i-1].
1479
+
1480
+ Examples
1481
+ --------
1482
+ >>> import numpy as np
1483
+ >>> inputdata = np.array([[0, 5, 10, 15], [1, 2, 3, 4]])
1484
+ >>> timeaxis = np.arange(20)
1485
+ >>> outputvector = np.zeros(20)
1486
+ >>> result = maketcfrom2col(inputdata, timeaxis, outputvector, debug=False)
1487
+ >>> print(result[:15])
1488
+ [1. 1. 1. 1. 1. 2. 2. 2. 2. 2. 3. 3. 3. 3. 3.]
1489
+ """
622
1490
  theshape = np.shape(inputdata)
623
1491
  rangestart = int(inputdata[0, 0])
624
1492
  for i in range(1, theshape[1]):
@@ -640,7 +1508,62 @@ def maketcfrom2col(inputdata, timeaxis, outputvector, debug=False):
640
1508
 
641
1509
 
642
1510
  # --------------------------- simulation functions ----------------------------------------------
643
- def makeslicetimes(numslices, sliceordertype, tr=1.0, multibandfac=1, debug=False):
1511
+ def makeslicetimes(
1512
+ numslices: int,
1513
+ sliceordertype: str,
1514
+ tr: float = 1.0,
1515
+ multibandfac: int = 1,
1516
+ debug: bool = False,
1517
+ ) -> NDArray | None:
1518
+ """
1519
+ Generate slice timing list for MRI data acquisition based on slice ordering type.
1520
+
1521
+ Parameters
1522
+ ----------
1523
+ numslices : int
1524
+ Number of slices in the volume.
1525
+ sliceordertype : str
1526
+ Type of slice ordering. Valid options are:
1527
+ - 'ascending': slices acquired in ascending order
1528
+ - 'descending': slices acquired in descending order
1529
+ - 'ascending_interleaved': interleaved ascending order
1530
+ - 'descending_interleaved': interleaved descending order
1531
+ - 'ascending_sparkplug': sparkplug ascending order
1532
+ - 'descending_sparkplug': sparkplug descending order
1533
+ - 'ascending_interleaved_siemens': Siemens-style interleaved ascending
1534
+ - 'descending_interleaved_siemens': Siemens-style interleaved descending
1535
+ - 'ascending_interleaved_philips': Philips-style interleaved ascending
1536
+ - 'descending_interleaved_philips': Philips-style interleaved descending
1537
+ tr : float, optional
1538
+ Repetition time in seconds (default is 1.0).
1539
+ multibandfac : int, optional
1540
+ Multiband factor (default is 1).
1541
+ debug : bool, optional
1542
+ If True, print debug information (default is False).
1543
+
1544
+ Returns
1545
+ -------
1546
+ NDArray | None
1547
+ Array of slice times in seconds. Returns None if an error occurs.
1548
+
1549
+ Notes
1550
+ -----
1551
+ The function computes slice acquisition times based on the specified slice order
1552
+ and multiband factor. It supports various slice ordering strategies commonly used
1553
+ in MRI pulse sequences.
1554
+
1555
+ Examples
1556
+ --------
1557
+ >>> makeslicetimes(32, 'ascending_interleaved', tr=2.0, multibandfac=2)
1558
+ array([0. , 0.25 , 0.5 , 0.75 , 1. , 1.25 , 1.5 , 1.75 , 2. ,
1559
+ 2.25 , 2.5 , 2.75 , 3. , 3.25 , 3.5 , 3.75 , 4. , 4.25 ,
1560
+ 4.5 , 4.75 , 5. , 5.25 , 5.5 , 5.75 , 6. , 6.25 , 6.5 ,
1561
+ 6.75 , 7. , 7.25 , 7.5 , 7.75 ])
1562
+
1563
+ >>> makeslicetimes(16, 'descending_sparkplug', multibandfac=2)
1564
+ array([0. , 0.5 , 1. , 1.5 , 2. , 2.5 , 3. , 3.5 , 4. ,
1565
+ 4.5 , 5. , 5.5 , 6. , 6.5 , 7. , 7.5 ])
1566
+ """
644
1567
  outlist = np.zeros((numslices), dtype=np.float)
645
1568
  if (numslices % multibandfac) != 0:
646
1569
  print("ERROR: numslices is not evenly divisible by multband factor")
@@ -714,7 +1637,64 @@ def makeslicetimes(numslices, sliceordertype, tr=1.0, multibandfac=1, debug=Fals
714
1637
 
715
1638
 
716
1639
  # --------------------------- testing functions -------------------------------------------------
717
- def comparemap(map1, map2, mask=None, debug=False):
1640
+ def comparemap(
1641
+ map1: NDArray, map2: NDArray, mask: NDArray | None = None, debug: bool = False
1642
+ ) -> tuple[float, float, float, float, float, float, float, float]:
1643
+ """
1644
+ Compare two arrays (maps) and compute various difference statistics.
1645
+
1646
+ This function computes multiple metrics comparing two input arrays, `map1` and `map2`.
1647
+ It supports optional masking to focus comparisons on specific regions of the arrays.
1648
+ The function handles both 1D and multi-dimensional arrays, with support for different
1649
+ mask dimensions (either matching the map dimensions or one less).
1650
+
1651
+ Parameters
1652
+ ----------
1653
+ map1 : NDArray
1654
+ First input array to compare. Can be 1D or multi-dimensional.
1655
+ map2 : NDArray
1656
+ Second input array to compare. Must have the same shape as `map1`.
1657
+ mask : NDArray, optional
1658
+ A boolean or numeric mask to select valid voxels for comparison.
1659
+ If provided, its shape must either match `map1` or be one dimension smaller.
1660
+ If `None`, all voxels are compared.
1661
+ debug : bool, optional
1662
+ If True, print debug information during execution. Default is False.
1663
+
1664
+ Returns
1665
+ -------
1666
+ tuple of float
1667
+ A tuple containing the following statistics in order:
1668
+ - `mindiff`: Minimum absolute difference between `map1` and `map2`.
1669
+ - `maxdiff`: Maximum absolute difference between `map1` and `map2`.
1670
+ - `meandiff`: Mean absolute difference between `map1` and `map2`.
1671
+ - `mse`: Mean squared error between `map1` and `map2`.
1672
+ - `minreldiff`: Minimum relative difference (relative to `map1`).
1673
+ - `maxreldiff`: Maximum relative difference (relative to `map1`).
1674
+ - `meanreldiff`: Mean relative difference (relative to `map1`).
1675
+ - `relmse`: Mean squared relative error between `map1` and `map2`.
1676
+
1677
+ Notes
1678
+ -----
1679
+ - If `map1` contains zero values, relative differences are set to 0 to avoid division by zero.
1680
+ - When `mask` is provided and has one fewer dimension than `map1`, it is reshaped to match
1681
+ the first dimension of `map1` before comparison.
1682
+ - The function exits with an error if shapes are incompatible or if masks are not valid.
1683
+
1684
+ Examples
1685
+ --------
1686
+ >>> import numpy as np
1687
+ >>> map1 = np.array([1.0, 2.0, 3.0])
1688
+ >>> map2 = np.array([1.1, 2.2, 2.9])
1689
+ >>> result = comparemap(map1, map2)
1690
+ >>> print(result)
1691
+ (-0.1, 0.1, 0.0, 0.006666666666666667, -0.1, 0.1, 0.0, 0.006666666666666667)
1692
+
1693
+ >>> mask = np.array([1, 1, 0])
1694
+ >>> result = comparemap(map1, map2, mask=mask)
1695
+ >>> print(result)
1696
+ (-0.1, 0.1, 0.0, 0.006666666666666667, -0.1, 0.1, 0.0, 0.006666666666666667)
1697
+ """
718
1698
  ndims = len(map1.shape)
719
1699
  if debug:
720
1700
  print("map has", ndims, "axes")
@@ -780,7 +1760,54 @@ def comparemap(map1, map2, mask=None, debug=False):
780
1760
  return mindiff, maxdiff, meandiff, mse, minreldiff, maxreldiff, meanreldiff, relmse
781
1761
 
782
1762
 
783
- def comparerapidtideruns(root1, root2, debug=False):
1763
+ def comparerapidtideruns(root1: str, root2: str, debug: bool = False) -> dict[str, Any]:
1764
+ """
1765
+ Compare results from two rapidtide runs by evaluating corresponding maps and timecourses.
1766
+
1767
+ This function compares NIfTI maps and text-based timecourses from two different rapid tide
1768
+ processing runs. It evaluates differences between the corresponding files using various
1769
+ statistical measures such as mean difference, max difference, mean squared error, and
1770
+ relative versions of these metrics.
1771
+
1772
+ Parameters
1773
+ ----------
1774
+ root1 : str
1775
+ The base filename (without extension) for the first rapid tide run.
1776
+ root2 : str
1777
+ The base filename (without extension) for the second rapid tide run.
1778
+ debug : bool, optional
1779
+ If True, print detailed debug information during execution. Default is False.
1780
+
1781
+ Returns
1782
+ -------
1783
+ dict[str, Any]
1784
+ A dictionary containing comparison results for each map and timecourse.
1785
+ Keys are map or timecourse names, and values are dictionaries with the following keys:
1786
+ - 'mindiff': minimum difference
1787
+ - 'maxdiff': maximum difference
1788
+ - 'meandiff': mean difference
1789
+ - 'mse': mean squared error
1790
+ - 'relmindiff': relative minimum difference
1791
+ - 'relmaxdiff': relative maximum difference
1792
+ - 'relmeandiff': relative mean difference
1793
+ - 'relmse': relative mean squared error
1794
+
1795
+ Notes
1796
+ -----
1797
+ - The function assumes that both runs have corresponding mask files named
1798
+ ``<root>_desc-corrfit_mask.nii.gz``.
1799
+ - For each map, the function checks if the corresponding NIfTI files exist and match
1800
+ in spatial dimensions.
1801
+ - For each timecourse, the function reads from JSON files and compares the time series
1802
+ only if both files are present and have matching lengths.
1803
+ - If spatial or temporal dimensions do not match, the function exits with an error.
1804
+
1805
+ Examples
1806
+ --------
1807
+ >>> results = comparerapidtideruns("run1", "run2", debug=True)
1808
+ >>> print(results["maxtime"])
1809
+ {'mindiff': -0.01, 'maxdiff': 0.02, 'meandiff': 0.005, 'mse': 0.0001, ...}
1810
+ """
784
1811
  results = {}
785
1812
  maskname1 = f"{root1}_desc-corrfit_mask.nii.gz"
786
1813
  (
@@ -908,7 +1935,50 @@ def comparerapidtideruns(root1, root2, debug=False):
908
1935
  return results
909
1936
 
910
1937
 
911
- def comparehappyruns(root1, root2, debug=False):
1938
+ def comparehappyruns(root1: str, root2: str, debug: bool = False) -> dict[str, Any]:
1939
+ """
1940
+ Compare results from two happy runs by comparing output maps and timecourses.
1941
+
1942
+ This function compares neuroimaging maps (e.g., app_info, vessels_mask) and
1943
+ cardiac timecourses (e.g., cardfromfmri_25.0Hz.txt) between two datasets
1944
+ identified by their root names. It performs spatial alignment checks and
1945
+ computes various statistical differences between corresponding files.
1946
+
1947
+ Parameters
1948
+ ----------
1949
+ root1 : str
1950
+ Root name of the first dataset (e.g., 'subject01_run1').
1951
+ root2 : str
1952
+ Root name of the second dataset (e.g., 'subject01_run2').
1953
+ debug : bool, optional
1954
+ If True, print debug information during execution. Default is False.
1955
+
1956
+ Returns
1957
+ -------
1958
+ dict[str, Any]
1959
+ A dictionary containing comparison results for each processed map and
1960
+ timecourse. Each entry includes:
1961
+ - mindiff: minimum absolute difference
1962
+ - maxdiff: maximum absolute difference
1963
+ - meandiff: mean absolute difference
1964
+ - mse: mean squared error
1965
+ - relmindiff: relative minimum difference
1966
+ - relmaxdiff: relative maximum difference
1967
+ - relmeandiff: relative mean difference
1968
+ - relmse: relative mean squared error
1969
+
1970
+ Notes
1971
+ -----
1972
+ - The function expects specific file naming conventions for both maps and
1973
+ timecourses.
1974
+ - Spatial dimensions of masks and data must match for comparison to proceed.
1975
+ - If any file is missing or mismatched, the function will exit with an error.
1976
+
1977
+ Examples
1978
+ --------
1979
+ >>> results = comparehappyruns('subject01_run1', 'subject01_run2', debug=True)
1980
+ >>> print(results['app_info']['meandiff'])
1981
+ """
912
1982
  results = {}
913
1983
  if debug:
914
1984
  print("comparehappyruns rootnames:", root1, root2)
@@ -1003,29 +2073,200 @@ def comparehappyruns(root1, root2, debug=False):
1003
2073
 
1004
2074
 
1005
2075
  # shared memory routines
1006
- def numpy2shared(inarray, thetype):
1007
- thesize = inarray.size
1008
- theshape = inarray.shape
1009
- if thetype == np.float64:
1010
- inarray_shared = mp.RawArray("d", inarray.reshape(thesize))
1011
- else:
1012
- inarray_shared = mp.RawArray("f", inarray.reshape(thesize))
1013
- inarray = np.frombuffer(inarray_shared, dtype=thetype, count=thesize)
1014
- inarray.shape = theshape
1015
- return inarray
2076
+ def numpy2shared(
2077
+ inarray: NDArray, theouttype: np.dtype, name: str | None = None
2078
+ ) -> tuple[NDArray, shared_memory.SharedMemory]:
2079
+ """
2080
+ Convert a numpy array to a shared memory array.
1016
2081
 
2082
+ This function creates a shared memory block and copies the data from the input
2083
+ numpy array to the shared memory array. The returned array and shared memory
2084
+ object can be used for inter-process communication or memory sharing.
1017
2085
 
1018
- def allocshared(theshape, thetype):
1019
- thesize = int(1)
1020
- if not isinstance(theshape, (list, tuple)):
1021
- thesize = theshape
1022
- else:
1023
- for element in theshape:
1024
- thesize *= int(element)
1025
- if thetype == np.float64:
1026
- outarray_shared = mp.RawArray("d", thesize)
2086
+ Parameters
2087
+ ----------
2088
+ inarray : NDArray
2089
+ Input numpy array to be converted to shared memory.
2090
+ theouttype : dtype
2091
+ Data type of the output shared memory array.
2092
+ name : str, optional
2093
+ Name of the shared memory block. If None, an anonymous shared memory
2094
+ block is created.
2095
+
2096
+ Returns
2097
+ -------
2098
+ tuple[NDArray, multiprocessing.shared_memory.SharedMemory]
2099
+ A tuple containing:
2100
+ - The shared memory array with the same shape as input array
2101
+ - The shared memory object that manages the memory block
2102
+
2103
+ Notes
2104
+ -----
2105
+ The returned shared memory object must be explicitly closed and unlink
2106
+ when no longer needed to free system resources. The shared memory block
2107
+ will be automatically unlinked upon creation but can be accessed by other
2108
+ processes using the same name.
2109
+
2110
+ Examples
2111
+ --------
2112
+ >>> import numpy as np
2113
+ >>> from multiprocessing import shared_memory
2114
+ >>> arr = np.array([1, 2, 3, 4, 5])
2115
+ >>> shared_arr, shm = numpy2shared(arr, np.int32)
2116
+ >>> print(shared_arr)
2117
+ [1 2 3 4 5]
2118
+ >>> # Clean up when done
2119
+ >>> shared_arr.close()
2120
+ >>> shm.close()
2121
+ >>> shm.unlink()
2122
+ """
2123
+ # Create a shared memory block to store the array data
2124
+ outnbytes = np.dtype(theouttype).itemsize * inarray.size
2125
+ shm = shared_memory.SharedMemory(name=None, create=True, size=outnbytes)
2126
+ shm.unlink()
2127
+ inarray_shared = np.ndarray(inarray.shape, dtype=theouttype, buffer=shm.buf)
2128
+ np.copyto(inarray_shared, inarray) # Copy data to shared memory array
2129
+ return inarray_shared, shm # Return both the array and the shared memory object
2130
+
2131
+
2132
+ def allocshared(
2133
+ theshape: tuple[int, ...], thetype: np.dtype, name: str | None = None
2134
+ ) -> tuple[NDArray, shared_memory.SharedMemory]:
2135
+ """
2136
+ Allocate shared memory for a numpy array.
2137
+
2138
+ This function creates a shared memory block and returns both the numpy array
2139
+ backed by this shared memory and the shared memory object itself. The array
2140
+ can be accessed from different processes, making it useful for inter-process
2141
+ communication.
2142
+
2143
+ Parameters
2144
+ ----------
2145
+ theshape : tuple of int
2146
+ The shape of the array to be created. Must be a tuple of integers.
2147
+ thetype : type
2148
+ The data type of the array elements. Can be any numpy-compatible dtype.
2149
+ name : str, optional
2150
+ Name of existing shared memory object. If None, a new shared memory
2151
+ block is created. Default is None.
2152
+
2153
+ Returns
2154
+ -------
2155
+ tuple[NDArray, shared_memory.SharedMemory]
2156
+ A tuple containing:
2157
+ - The numpy array backed by shared memory
2158
+ - The shared_memory.SharedMemory object
2159
+
2160
+ Notes
2161
+ -----
2162
+ The returned shared memory object should be explicitly closed and unlink
2163
+ when no longer needed to free system resources. The array can be accessed
2164
+ from multiple processes, but care should be taken to avoid race conditions.
2165
+
2166
+ Examples
2167
+ --------
2168
+ >>> import numpy as np
2169
+ >>> from multiprocessing import shared_memory
2170
+ >>> arr, shm = allocshared((3, 4), np.float64)
2171
+ >>> arr[0, 0] = 42.0
2172
+ >>> print(arr)
2173
+ [[42. 0. 0. 0.]
2174
+ [ 0. 0. 0. 0.]
2175
+ [ 0. 0. 0. 0.]]
2176
+ >>> # Don't forget to clean up
2177
+ >>> shm.close()
2178
+ >>> shm.unlink()
2179
+ """
2180
+ # Calculate size based on shape
2181
+ thesize = np.prod(theshape)
2182
+ # Determine the data type size
2183
+ dtype_size = np.dtype(thetype).itemsize
2184
+ # Create a shared memory block of the required size
2185
+ shm = shared_memory.SharedMemory(name=None, create=True, size=thesize * dtype_size)
2186
+ shm.unlink()
2187
+ outarray = np.ndarray(theshape, dtype=thetype, buffer=shm.buf)
2188
+ return outarray, shm # Return both the array and the shared memory object
2189
+
2190
+
2191
+ def allocarray(
2192
+ theshape: tuple[int, ...], thetype: np.dtype, shared: bool = False, name: str | None = None
2193
+ ) -> tuple[NDArray, shared_memory.SharedMemory | None]:
2194
+ """
2195
+ Allocate and return a numpy array with specified shape and type.
2196
+
2197
+ Parameters
2198
+ ----------
2199
+ theshape : tuple[int, ...]
2200
+ Shape of the array to be allocated.
2201
+ thetype : type
2202
+ Data type of the array elements.
2203
+ shared : bool, optional
2204
+ If True, allocate the array in shared memory. Default is False.
2205
+ name : str | None, optional
2206
+ Name for the shared memory segment. Required if shared=True. Default is None.
2207
+
2208
+ Returns
2209
+ -------
2210
+ tuple[NDArray, shared_memory.SharedMemory | None]
2211
+ A tuple containing:
2212
+ - The allocated numpy array filled with zeros
2213
+ - The shared memory object if shared=True, otherwise None
2214
+
2215
+ Notes
2216
+ -----
2217
+ When ``shared=True``, the function delegates to ``allocshared`` to create
2218
+ a shared memory array. Otherwise, it creates a regular numpy array using
2219
+ ``np.zeros`` with the specified shape and dtype.
2220
+
2221
+ Examples
2222
+ --------
2223
+ >>> import numpy as np
2224
+ >>> arr, shm = allocarray((3, 4), np.float64)
2225
+ >>> print(arr.shape)
2226
+ (3, 4)
2227
+ >>> print(arr.dtype)
2228
+ float64
2229
+
2230
+ >>> # For shared memory allocation
2231
+ >>> arr, shm = allocarray((2, 3), np.int32, shared=True, name="my_array")
2232
+ >>> print(shm is not None)
2233
+ True
2234
+ """
2235
+ if shared:
2236
+ return allocshared(theshape, thetype, name)
1027
2237
  else:
1028
- outarray_shared = mp.RawArray("f", thesize)
1029
- outarray = np.frombuffer(outarray_shared, dtype=thetype, count=thesize)
1030
- outarray.shape = theshape
1031
- return outarray, outarray_shared, theshape
2238
+ return np.zeros(theshape, dtype=thetype), None
2239
+
2240
+
2241
+ def cleanup_shm(shm: shared_memory.SharedMemory | None) -> None:
2242
+ """
2243
+ Clean up shared memory object.
2244
+
2245
+ Parameters
2246
+ ----------
2247
+ shm : shared_memory.SharedMemory or None
2248
+ Shared memory object to clean up. If None, no action is taken.
2249
+
2250
+ Returns
2251
+ -------
2252
+ None
2253
+ This function does not return any value.
2254
+
2255
+ Notes
2256
+ -----
2257
+ This function is designed to properly release shared memory resources.
2258
+ It should be called to clean up shared memory objects to prevent resource leaks.
2259
+ If the shared memory object is None, the function performs no operation.
2260
+
2261
+ Examples
2262
+ --------
2263
+ >>> from multiprocessing import shared_memory
2264
+ >>> shm = shared_memory.SharedMemory(create=True, size=1024)
2265
+ >>> cleanup_shm(shm)
2266
+ >>> # Shared memory is now cleaned up
2267
+ """
2268
+ # Cleanup
2269
+ pass
2270
+ # if shm is not None:
2271
+ # shm.close()
2272
+ # shm.unlink()