rapidtide 2.9.5__py3-none-any.whl → 3.1.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- cloud/gmscalc-HCPYA +1 -1
- cloud/mount-and-run +2 -0
- cloud/rapidtide-HCPYA +3 -3
- rapidtide/Colortables.py +538 -38
- rapidtide/OrthoImageItem.py +1094 -51
- rapidtide/RapidtideDataset.py +1709 -114
- rapidtide/__init__.py +0 -8
- rapidtide/_version.py +4 -4
- rapidtide/calccoherence.py +242 -97
- rapidtide/calcnullsimfunc.py +240 -140
- rapidtide/calcsimfunc.py +314 -129
- rapidtide/correlate.py +1211 -389
- rapidtide/data/examples/src/testLD +56 -0
- rapidtide/data/examples/src/test_findmaxlag.py +2 -2
- rapidtide/data/examples/src/test_mlregressallt.py +32 -17
- rapidtide/data/examples/src/testalign +1 -1
- rapidtide/data/examples/src/testatlasaverage +35 -7
- rapidtide/data/examples/src/testboth +21 -0
- rapidtide/data/examples/src/testcifti +11 -0
- rapidtide/data/examples/src/testdelayvar +13 -0
- rapidtide/data/examples/src/testdlfilt +25 -0
- rapidtide/data/examples/src/testfft +35 -0
- rapidtide/data/examples/src/testfileorfloat +37 -0
- rapidtide/data/examples/src/testfmri +94 -27
- rapidtide/data/examples/src/testfuncs +3 -3
- rapidtide/data/examples/src/testglmfilt +8 -6
- rapidtide/data/examples/src/testhappy +84 -51
- rapidtide/data/examples/src/testinitdelay +19 -0
- rapidtide/data/examples/src/testmodels +33 -0
- rapidtide/data/examples/src/testnewrefine +26 -0
- rapidtide/data/examples/src/testnoiseamp +21 -0
- rapidtide/data/examples/src/testppgproc +17 -0
- rapidtide/data/examples/src/testrefineonly +22 -0
- rapidtide/data/examples/src/testretro +26 -13
- rapidtide/data/examples/src/testretrolagtcs +16 -0
- rapidtide/data/examples/src/testrolloff +11 -0
- rapidtide/data/examples/src/testsimdata +45 -28
- rapidtide/data/models/model_cnn_pytorch/loss.png +0 -0
- rapidtide/data/models/model_cnn_pytorch/loss.txt +1 -0
- rapidtide/data/models/model_cnn_pytorch/model.pth +0 -0
- rapidtide/data/models/model_cnn_pytorch/model_meta.json +68 -0
- rapidtide/data/models/model_cnn_pytorch_fulldata/loss.png +0 -0
- rapidtide/data/models/model_cnn_pytorch_fulldata/loss.txt +1 -0
- rapidtide/data/models/model_cnn_pytorch_fulldata/model.pth +0 -0
- rapidtide/data/models/model_cnn_pytorch_fulldata/model_meta.json +80 -0
- rapidtide/data/models/model_cnnbp_pytorch_fullldata/loss.png +0 -0
- rapidtide/data/models/model_cnnbp_pytorch_fullldata/loss.txt +1 -0
- rapidtide/data/models/model_cnnbp_pytorch_fullldata/model.pth +0 -0
- rapidtide/data/models/model_cnnbp_pytorch_fullldata/model_meta.json +138 -0
- rapidtide/data/models/model_cnnfft_pytorch_fulldata/loss.png +0 -0
- rapidtide/data/models/model_cnnfft_pytorch_fulldata/loss.txt +1 -0
- rapidtide/data/models/model_cnnfft_pytorch_fulldata/model.pth +0 -0
- rapidtide/data/models/model_cnnfft_pytorch_fulldata/model_meta.json +128 -0
- rapidtide/data/models/model_ppgattention_pytorch_w128_fulldata/loss.png +0 -0
- rapidtide/data/models/model_ppgattention_pytorch_w128_fulldata/loss.txt +1 -0
- rapidtide/data/models/model_ppgattention_pytorch_w128_fulldata/model.pth +0 -0
- rapidtide/data/models/model_ppgattention_pytorch_w128_fulldata/model_meta.json +49 -0
- rapidtide/data/models/model_revised_tf2/model.keras +0 -0
- rapidtide/data/models/{model_serdar → model_revised_tf2}/model_meta.json +1 -1
- rapidtide/data/models/model_serdar2_tf2/model.keras +0 -0
- rapidtide/data/models/{model_serdar2 → model_serdar2_tf2}/model_meta.json +1 -1
- rapidtide/data/models/model_serdar_tf2/model.keras +0 -0
- rapidtide/data/models/{model_revised → model_serdar_tf2}/model_meta.json +1 -1
- rapidtide/data/reference/HCP1200v2_MTT_2mm.nii.gz +0 -0
- rapidtide/data/reference/HCP1200v2_binmask_2mm.nii.gz +0 -0
- rapidtide/data/reference/HCP1200v2_csf_2mm.nii.gz +0 -0
- rapidtide/data/reference/HCP1200v2_gray_2mm.nii.gz +0 -0
- rapidtide/data/reference/HCP1200v2_graylaghist.json +7 -0
- rapidtide/data/reference/HCP1200v2_graylaghist.tsv.gz +0 -0
- rapidtide/data/reference/HCP1200v2_laghist.json +7 -0
- rapidtide/data/reference/HCP1200v2_laghist.tsv.gz +0 -0
- rapidtide/data/reference/HCP1200v2_mask_2mm.nii.gz +0 -0
- rapidtide/data/reference/HCP1200v2_maxcorr_2mm.nii.gz +0 -0
- rapidtide/data/reference/HCP1200v2_maxtime_2mm.nii.gz +0 -0
- rapidtide/data/reference/HCP1200v2_maxwidth_2mm.nii.gz +0 -0
- rapidtide/data/reference/HCP1200v2_negmask_2mm.nii.gz +0 -0
- rapidtide/data/reference/HCP1200v2_timepercentile_2mm.nii.gz +0 -0
- rapidtide/data/reference/HCP1200v2_white_2mm.nii.gz +0 -0
- rapidtide/data/reference/HCP1200v2_whitelaghist.json +7 -0
- rapidtide/data/reference/HCP1200v2_whitelaghist.tsv.gz +0 -0
- rapidtide/data/reference/JHU-ArterialTerritoriesNoVent-LVL1-seg2.xml +131 -0
- rapidtide/data/reference/JHU-ArterialTerritoriesNoVent-LVL1-seg2_regions.txt +60 -0
- rapidtide/data/reference/JHU-ArterialTerritoriesNoVent-LVL1-seg2_space-MNI152NLin6Asym_2mm.nii.gz +0 -0
- rapidtide/data/reference/JHU-ArterialTerritoriesNoVent-LVL1_space-MNI152NLin2009cAsym_2mm.nii.gz +0 -0
- rapidtide/data/reference/JHU-ArterialTerritoriesNoVent-LVL1_space-MNI152NLin2009cAsym_2mm_mask.nii.gz +0 -0
- rapidtide/data/reference/JHU-ArterialTerritoriesNoVent-LVL1_space-MNI152NLin6Asym_2mm_mask.nii.gz +0 -0
- rapidtide/data/reference/JHU-ArterialTerritoriesNoVent-LVL2_space-MNI152NLin6Asym_2mm_mask.nii.gz +0 -0
- rapidtide/data/reference/MNI152_T1_1mm_Brain_FAST_seg.nii.gz +0 -0
- rapidtide/data/reference/MNI152_T1_1mm_Brain_Mask.nii.gz +0 -0
- rapidtide/data/reference/MNI152_T1_2mm_Brain_FAST_seg.nii.gz +0 -0
- rapidtide/data/reference/MNI152_T1_2mm_Brain_Mask.nii.gz +0 -0
- rapidtide/decorators.py +91 -0
- rapidtide/dlfilter.py +2553 -414
- rapidtide/dlfiltertorch.py +5201 -0
- rapidtide/externaltools.py +328 -13
- rapidtide/fMRIData_class.py +178 -0
- rapidtide/ffttools.py +168 -0
- rapidtide/filter.py +2704 -1462
- rapidtide/fit.py +2361 -579
- rapidtide/genericmultiproc.py +197 -0
- rapidtide/happy_supportfuncs.py +3255 -548
- rapidtide/helper_classes.py +590 -1181
- rapidtide/io.py +2569 -468
- rapidtide/linfitfiltpass.py +784 -0
- rapidtide/makelaggedtcs.py +267 -97
- rapidtide/maskutil.py +555 -25
- rapidtide/miscmath.py +867 -137
- rapidtide/multiproc.py +217 -44
- rapidtide/patchmatch.py +752 -0
- rapidtide/peakeval.py +32 -32
- rapidtide/ppgproc.py +2205 -0
- rapidtide/qualitycheck.py +353 -40
- rapidtide/refinedelay.py +854 -0
- rapidtide/refineregressor.py +939 -0
- rapidtide/resample.py +725 -204
- rapidtide/scripts/__init__.py +1 -0
- rapidtide/scripts/{adjustoffset → adjustoffset.py} +7 -2
- rapidtide/scripts/{aligntcs → aligntcs.py} +7 -2
- rapidtide/scripts/{applydlfilter → applydlfilter.py} +7 -2
- rapidtide/scripts/applyppgproc.py +28 -0
- rapidtide/scripts/{atlasaverage → atlasaverage.py} +7 -2
- rapidtide/scripts/{atlastool → atlastool.py} +7 -2
- rapidtide/scripts/{calcicc → calcicc.py} +7 -2
- rapidtide/scripts/{calctexticc → calctexticc.py} +7 -2
- rapidtide/scripts/{calcttest → calcttest.py} +7 -2
- rapidtide/scripts/{ccorrica → ccorrica.py} +7 -2
- rapidtide/scripts/delayvar.py +28 -0
- rapidtide/scripts/{diffrois → diffrois.py} +7 -2
- rapidtide/scripts/{endtidalproc → endtidalproc.py} +7 -2
- rapidtide/scripts/{fdica → fdica.py} +7 -2
- rapidtide/scripts/{filtnifti → filtnifti.py} +7 -2
- rapidtide/scripts/{filttc → filttc.py} +7 -2
- rapidtide/scripts/{fingerprint → fingerprint.py} +20 -16
- rapidtide/scripts/{fixtr → fixtr.py} +7 -2
- rapidtide/scripts/{gmscalc → gmscalc.py} +7 -2
- rapidtide/scripts/{happy → happy.py} +7 -2
- rapidtide/scripts/{happy2std → happy2std.py} +7 -2
- rapidtide/scripts/{happywarp → happywarp.py} +8 -4
- rapidtide/scripts/{histnifti → histnifti.py} +7 -2
- rapidtide/scripts/{histtc → histtc.py} +7 -2
- rapidtide/scripts/{glmfilt → linfitfilt.py} +7 -4
- rapidtide/scripts/{localflow → localflow.py} +7 -2
- rapidtide/scripts/{mergequality → mergequality.py} +7 -2
- rapidtide/scripts/{pairproc → pairproc.py} +7 -2
- rapidtide/scripts/{pairwisemergenifti → pairwisemergenifti.py} +7 -2
- rapidtide/scripts/{physiofreq → physiofreq.py} +7 -2
- rapidtide/scripts/{pixelcomp → pixelcomp.py} +7 -2
- rapidtide/scripts/{plethquality → plethquality.py} +7 -2
- rapidtide/scripts/{polyfitim → polyfitim.py} +7 -2
- rapidtide/scripts/{proj2flow → proj2flow.py} +7 -2
- rapidtide/scripts/{rankimage → rankimage.py} +7 -2
- rapidtide/scripts/{rapidtide → rapidtide.py} +7 -2
- rapidtide/scripts/{rapidtide2std → rapidtide2std.py} +7 -2
- rapidtide/scripts/{resamplenifti → resamplenifti.py} +7 -2
- rapidtide/scripts/{resampletc → resampletc.py} +7 -2
- rapidtide/scripts/retrolagtcs.py +28 -0
- rapidtide/scripts/retroregress.py +28 -0
- rapidtide/scripts/{roisummarize → roisummarize.py} +7 -2
- rapidtide/scripts/{runqualitycheck → runqualitycheck.py} +7 -2
- rapidtide/scripts/{showarbcorr → showarbcorr.py} +7 -2
- rapidtide/scripts/{showhist → showhist.py} +7 -2
- rapidtide/scripts/{showstxcorr → showstxcorr.py} +7 -2
- rapidtide/scripts/{showtc → showtc.py} +7 -2
- rapidtide/scripts/{showxcorr_legacy → showxcorr_legacy.py} +8 -8
- rapidtide/scripts/{showxcorrx → showxcorrx.py} +7 -2
- rapidtide/scripts/{showxy → showxy.py} +7 -2
- rapidtide/scripts/{simdata → simdata.py} +7 -2
- rapidtide/scripts/{spatialdecomp → spatialdecomp.py} +7 -2
- rapidtide/scripts/{spatialfit → spatialfit.py} +7 -2
- rapidtide/scripts/{spatialmi → spatialmi.py} +7 -2
- rapidtide/scripts/{spectrogram → spectrogram.py} +7 -2
- rapidtide/scripts/stupidramtricks.py +238 -0
- rapidtide/scripts/{synthASL → synthASL.py} +7 -2
- rapidtide/scripts/{tcfrom2col → tcfrom2col.py} +7 -2
- rapidtide/scripts/{tcfrom3col → tcfrom3col.py} +7 -2
- rapidtide/scripts/{temporaldecomp → temporaldecomp.py} +7 -2
- rapidtide/scripts/{testhrv → testhrv.py} +1 -1
- rapidtide/scripts/{threeD → threeD.py} +7 -2
- rapidtide/scripts/{tidepool → tidepool.py} +7 -2
- rapidtide/scripts/{variabilityizer → variabilityizer.py} +7 -2
- rapidtide/simFuncClasses.py +2113 -0
- rapidtide/simfuncfit.py +312 -108
- rapidtide/stats.py +579 -247
- rapidtide/tests/.coveragerc +27 -6
- rapidtide-2.9.5.data/scripts/fdica → rapidtide/tests/cleanposttest +4 -6
- rapidtide/tests/happycomp +9 -0
- rapidtide/tests/resethappytargets +1 -1
- rapidtide/tests/resetrapidtidetargets +1 -1
- rapidtide/tests/resettargets +1 -1
- rapidtide/tests/runlocaltest +3 -3
- rapidtide/tests/showkernels +1 -1
- rapidtide/tests/test_aliasedcorrelate.py +4 -4
- rapidtide/tests/test_aligntcs.py +1 -1
- rapidtide/tests/test_calcicc.py +1 -1
- rapidtide/tests/test_cleanregressor.py +184 -0
- rapidtide/tests/test_congrid.py +70 -81
- rapidtide/tests/test_correlate.py +1 -1
- rapidtide/tests/test_corrpass.py +4 -4
- rapidtide/tests/test_delayestimation.py +54 -59
- rapidtide/tests/test_dlfiltertorch.py +437 -0
- rapidtide/tests/test_doresample.py +2 -2
- rapidtide/tests/test_externaltools.py +69 -0
- rapidtide/tests/test_fastresampler.py +9 -5
- rapidtide/tests/test_filter.py +96 -57
- rapidtide/tests/test_findmaxlag.py +50 -19
- rapidtide/tests/test_fullrunhappy_v1.py +15 -10
- rapidtide/tests/test_fullrunhappy_v2.py +19 -13
- rapidtide/tests/test_fullrunhappy_v3.py +28 -13
- rapidtide/tests/test_fullrunhappy_v4.py +30 -11
- rapidtide/tests/test_fullrunhappy_v5.py +62 -0
- rapidtide/tests/test_fullrunrapidtide_v1.py +61 -7
- rapidtide/tests/test_fullrunrapidtide_v2.py +27 -15
- rapidtide/tests/test_fullrunrapidtide_v3.py +28 -8
- rapidtide/tests/test_fullrunrapidtide_v4.py +16 -8
- rapidtide/tests/test_fullrunrapidtide_v5.py +15 -6
- rapidtide/tests/test_fullrunrapidtide_v6.py +142 -0
- rapidtide/tests/test_fullrunrapidtide_v7.py +114 -0
- rapidtide/tests/test_fullrunrapidtide_v8.py +66 -0
- rapidtide/tests/test_getparsers.py +158 -0
- rapidtide/tests/test_io.py +59 -18
- rapidtide/tests/{test_glmpass.py → test_linfitfiltpass.py} +10 -10
- rapidtide/tests/test_mi.py +1 -1
- rapidtide/tests/test_miscmath.py +1 -1
- rapidtide/tests/test_motionregress.py +5 -5
- rapidtide/tests/test_nullcorr.py +6 -9
- rapidtide/tests/test_padvec.py +216 -0
- rapidtide/tests/test_parserfuncs.py +101 -0
- rapidtide/tests/test_phaseanalysis.py +1 -1
- rapidtide/tests/test_rapidtideparser.py +59 -53
- rapidtide/tests/test_refinedelay.py +296 -0
- rapidtide/tests/test_runmisc.py +5 -5
- rapidtide/tests/test_sharedmem.py +60 -0
- rapidtide/tests/test_simroundtrip.py +132 -0
- rapidtide/tests/test_simulate.py +1 -1
- rapidtide/tests/test_stcorrelate.py +4 -2
- rapidtide/tests/test_timeshift.py +2 -2
- rapidtide/tests/test_valtoindex.py +1 -1
- rapidtide/tests/test_zRapidtideDataset.py +5 -3
- rapidtide/tests/utils.py +10 -9
- rapidtide/tidepoolTemplate.py +88 -70
- rapidtide/tidepoolTemplate.ui +60 -46
- rapidtide/tidepoolTemplate_alt.py +88 -53
- rapidtide/tidepoolTemplate_alt.ui +62 -52
- rapidtide/tidepoolTemplate_alt_qt6.py +921 -0
- rapidtide/tidepoolTemplate_big.py +1125 -0
- rapidtide/tidepoolTemplate_big.ui +2386 -0
- rapidtide/tidepoolTemplate_big_qt6.py +1129 -0
- rapidtide/tidepoolTemplate_qt6.py +793 -0
- rapidtide/util.py +1389 -148
- rapidtide/voxelData.py +1048 -0
- rapidtide/wiener.py +138 -25
- rapidtide/wiener2.py +114 -8
- rapidtide/workflows/adjustoffset.py +107 -5
- rapidtide/workflows/aligntcs.py +86 -3
- rapidtide/workflows/applydlfilter.py +231 -89
- rapidtide/workflows/applyppgproc.py +540 -0
- rapidtide/workflows/atlasaverage.py +309 -48
- rapidtide/workflows/atlastool.py +130 -9
- rapidtide/workflows/calcSimFuncMap.py +490 -0
- rapidtide/workflows/calctexticc.py +202 -10
- rapidtide/workflows/ccorrica.py +123 -15
- rapidtide/workflows/cleanregressor.py +415 -0
- rapidtide/workflows/delayvar.py +1268 -0
- rapidtide/workflows/diffrois.py +84 -6
- rapidtide/workflows/endtidalproc.py +149 -9
- rapidtide/workflows/fdica.py +197 -17
- rapidtide/workflows/filtnifti.py +71 -4
- rapidtide/workflows/filttc.py +76 -5
- rapidtide/workflows/fitSimFuncMap.py +578 -0
- rapidtide/workflows/fixtr.py +74 -4
- rapidtide/workflows/gmscalc.py +116 -6
- rapidtide/workflows/happy.py +1242 -480
- rapidtide/workflows/happy2std.py +145 -13
- rapidtide/workflows/happy_parser.py +277 -59
- rapidtide/workflows/histnifti.py +120 -4
- rapidtide/workflows/histtc.py +85 -4
- rapidtide/workflows/{glmfilt.py → linfitfilt.py} +128 -14
- rapidtide/workflows/localflow.py +329 -29
- rapidtide/workflows/mergequality.py +80 -4
- rapidtide/workflows/niftidecomp.py +323 -19
- rapidtide/workflows/niftistats.py +178 -8
- rapidtide/workflows/pairproc.py +99 -5
- rapidtide/workflows/pairwisemergenifti.py +86 -3
- rapidtide/workflows/parser_funcs.py +1488 -56
- rapidtide/workflows/physiofreq.py +139 -12
- rapidtide/workflows/pixelcomp.py +211 -9
- rapidtide/workflows/plethquality.py +105 -23
- rapidtide/workflows/polyfitim.py +159 -19
- rapidtide/workflows/proj2flow.py +76 -3
- rapidtide/workflows/rankimage.py +115 -8
- rapidtide/workflows/rapidtide.py +1833 -1919
- rapidtide/workflows/rapidtide2std.py +101 -3
- rapidtide/workflows/rapidtide_parser.py +607 -372
- rapidtide/workflows/refineDelayMap.py +249 -0
- rapidtide/workflows/refineRegressor.py +1215 -0
- rapidtide/workflows/regressfrommaps.py +308 -0
- rapidtide/workflows/resamplenifti.py +86 -4
- rapidtide/workflows/resampletc.py +92 -4
- rapidtide/workflows/retrolagtcs.py +442 -0
- rapidtide/workflows/retroregress.py +1501 -0
- rapidtide/workflows/roisummarize.py +176 -7
- rapidtide/workflows/runqualitycheck.py +72 -7
- rapidtide/workflows/showarbcorr.py +172 -16
- rapidtide/workflows/showhist.py +87 -3
- rapidtide/workflows/showstxcorr.py +161 -4
- rapidtide/workflows/showtc.py +172 -10
- rapidtide/workflows/showxcorrx.py +250 -62
- rapidtide/workflows/showxy.py +186 -16
- rapidtide/workflows/simdata.py +418 -112
- rapidtide/workflows/spatialfit.py +83 -8
- rapidtide/workflows/spatialmi.py +252 -29
- rapidtide/workflows/spectrogram.py +306 -33
- rapidtide/workflows/synthASL.py +157 -6
- rapidtide/workflows/tcfrom2col.py +77 -3
- rapidtide/workflows/tcfrom3col.py +75 -3
- rapidtide/workflows/tidepool.py +3829 -666
- rapidtide/workflows/utils.py +45 -19
- rapidtide/workflows/utils_doc.py +293 -0
- rapidtide/workflows/variabilityizer.py +118 -5
- {rapidtide-2.9.5.dist-info → rapidtide-3.1.3.dist-info}/METADATA +30 -223
- rapidtide-3.1.3.dist-info/RECORD +393 -0
- {rapidtide-2.9.5.dist-info → rapidtide-3.1.3.dist-info}/WHEEL +1 -1
- rapidtide-3.1.3.dist-info/entry_points.txt +65 -0
- rapidtide-3.1.3.dist-info/top_level.txt +2 -0
- rapidtide/calcandfitcorrpairs.py +0 -262
- rapidtide/data/examples/src/testoutputsize +0 -45
- rapidtide/data/models/model_revised/model.h5 +0 -0
- rapidtide/data/models/model_serdar/model.h5 +0 -0
- rapidtide/data/models/model_serdar2/model.h5 +0 -0
- rapidtide/data/reference/ASPECTS_nlin_asym_09c_2mm.nii.gz +0 -0
- rapidtide/data/reference/ASPECTS_nlin_asym_09c_2mm_mask.nii.gz +0 -0
- rapidtide/data/reference/ATTbasedFlowTerritories_split_nlin_asym_09c_2mm.nii.gz +0 -0
- rapidtide/data/reference/ATTbasedFlowTerritories_split_nlin_asym_09c_2mm_mask.nii.gz +0 -0
- rapidtide/data/reference/HCP1200_binmask_2mm_2009c_asym.nii.gz +0 -0
- rapidtide/data/reference/HCP1200_lag_2mm_2009c_asym.nii.gz +0 -0
- rapidtide/data/reference/HCP1200_mask_2mm_2009c_asym.nii.gz +0 -0
- rapidtide/data/reference/HCP1200_negmask_2mm_2009c_asym.nii.gz +0 -0
- rapidtide/data/reference/HCP1200_sigma_2mm_2009c_asym.nii.gz +0 -0
- rapidtide/data/reference/HCP1200_strength_2mm_2009c_asym.nii.gz +0 -0
- rapidtide/glmpass.py +0 -434
- rapidtide/refine_factored.py +0 -641
- rapidtide/scripts/retroglm +0 -23
- rapidtide/workflows/glmfrommaps.py +0 -202
- rapidtide/workflows/retroglm.py +0 -643
- rapidtide-2.9.5.data/scripts/adjustoffset +0 -23
- rapidtide-2.9.5.data/scripts/aligntcs +0 -23
- rapidtide-2.9.5.data/scripts/applydlfilter +0 -23
- rapidtide-2.9.5.data/scripts/atlasaverage +0 -23
- rapidtide-2.9.5.data/scripts/atlastool +0 -23
- rapidtide-2.9.5.data/scripts/calcicc +0 -22
- rapidtide-2.9.5.data/scripts/calctexticc +0 -23
- rapidtide-2.9.5.data/scripts/calcttest +0 -22
- rapidtide-2.9.5.data/scripts/ccorrica +0 -23
- rapidtide-2.9.5.data/scripts/diffrois +0 -23
- rapidtide-2.9.5.data/scripts/endtidalproc +0 -23
- rapidtide-2.9.5.data/scripts/filtnifti +0 -23
- rapidtide-2.9.5.data/scripts/filttc +0 -23
- rapidtide-2.9.5.data/scripts/fingerprint +0 -593
- rapidtide-2.9.5.data/scripts/fixtr +0 -23
- rapidtide-2.9.5.data/scripts/glmfilt +0 -24
- rapidtide-2.9.5.data/scripts/gmscalc +0 -22
- rapidtide-2.9.5.data/scripts/happy +0 -25
- rapidtide-2.9.5.data/scripts/happy2std +0 -23
- rapidtide-2.9.5.data/scripts/happywarp +0 -350
- rapidtide-2.9.5.data/scripts/histnifti +0 -23
- rapidtide-2.9.5.data/scripts/histtc +0 -23
- rapidtide-2.9.5.data/scripts/localflow +0 -23
- rapidtide-2.9.5.data/scripts/mergequality +0 -23
- rapidtide-2.9.5.data/scripts/pairproc +0 -23
- rapidtide-2.9.5.data/scripts/pairwisemergenifti +0 -23
- rapidtide-2.9.5.data/scripts/physiofreq +0 -23
- rapidtide-2.9.5.data/scripts/pixelcomp +0 -23
- rapidtide-2.9.5.data/scripts/plethquality +0 -23
- rapidtide-2.9.5.data/scripts/polyfitim +0 -23
- rapidtide-2.9.5.data/scripts/proj2flow +0 -23
- rapidtide-2.9.5.data/scripts/rankimage +0 -23
- rapidtide-2.9.5.data/scripts/rapidtide +0 -23
- rapidtide-2.9.5.data/scripts/rapidtide2std +0 -23
- rapidtide-2.9.5.data/scripts/resamplenifti +0 -23
- rapidtide-2.9.5.data/scripts/resampletc +0 -23
- rapidtide-2.9.5.data/scripts/retroglm +0 -23
- rapidtide-2.9.5.data/scripts/roisummarize +0 -23
- rapidtide-2.9.5.data/scripts/runqualitycheck +0 -23
- rapidtide-2.9.5.data/scripts/showarbcorr +0 -23
- rapidtide-2.9.5.data/scripts/showhist +0 -23
- rapidtide-2.9.5.data/scripts/showstxcorr +0 -23
- rapidtide-2.9.5.data/scripts/showtc +0 -23
- rapidtide-2.9.5.data/scripts/showxcorr_legacy +0 -536
- rapidtide-2.9.5.data/scripts/showxcorrx +0 -23
- rapidtide-2.9.5.data/scripts/showxy +0 -23
- rapidtide-2.9.5.data/scripts/simdata +0 -23
- rapidtide-2.9.5.data/scripts/spatialdecomp +0 -23
- rapidtide-2.9.5.data/scripts/spatialfit +0 -23
- rapidtide-2.9.5.data/scripts/spatialmi +0 -23
- rapidtide-2.9.5.data/scripts/spectrogram +0 -23
- rapidtide-2.9.5.data/scripts/synthASL +0 -23
- rapidtide-2.9.5.data/scripts/tcfrom2col +0 -23
- rapidtide-2.9.5.data/scripts/tcfrom3col +0 -23
- rapidtide-2.9.5.data/scripts/temporaldecomp +0 -23
- rapidtide-2.9.5.data/scripts/threeD +0 -236
- rapidtide-2.9.5.data/scripts/tidepool +0 -23
- rapidtide-2.9.5.data/scripts/variabilityizer +0 -23
- rapidtide-2.9.5.dist-info/RECORD +0 -357
- rapidtide-2.9.5.dist-info/top_level.txt +0 -86
- {rapidtide-2.9.5.dist-info → rapidtide-3.1.3.dist-info/licenses}/LICENSE +0 -0
rapidtide/io.py
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
#!/usr/bin/env python
|
|
2
2
|
# -*- coding: utf-8 -*-
|
|
3
3
|
#
|
|
4
|
-
# Copyright 2016-
|
|
4
|
+
# Copyright 2016-2025 Blaise Frederick
|
|
5
5
|
#
|
|
6
6
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
7
7
|
# you may not use this file except in compliance with the License.
|
|
@@ -22,29 +22,56 @@ import operator as op
|
|
|
22
22
|
import os
|
|
23
23
|
import platform
|
|
24
24
|
import sys
|
|
25
|
+
from typing import Any, Dict, List, Optional, Tuple, Union
|
|
25
26
|
|
|
26
27
|
import nibabel as nib
|
|
27
28
|
import numpy as np
|
|
28
29
|
import pandas as pd
|
|
30
|
+
from numpy.typing import NDArray
|
|
31
|
+
|
|
32
|
+
from rapidtide.tests.utils import mse
|
|
29
33
|
|
|
30
34
|
|
|
31
35
|
# ---------------------------------------- NIFTI file manipulation ---------------------------
|
|
32
|
-
def readfromnifti(
|
|
33
|
-
|
|
36
|
+
def readfromnifti(
|
|
37
|
+
inputfile: str, headeronly: bool = False
|
|
38
|
+
) -> Tuple[Any, Optional[NDArray], Any, NDArray, NDArray]:
|
|
39
|
+
"""
|
|
40
|
+
Open a nifti file and read in the various important parts
|
|
34
41
|
|
|
35
42
|
Parameters
|
|
36
43
|
----------
|
|
37
44
|
inputfile : str
|
|
38
|
-
The name of the nifti file.
|
|
45
|
+
The name of the nifti file. Can be provided with or without file extension
|
|
46
|
+
(.nii or .nii.gz).
|
|
47
|
+
headeronly : bool, optional
|
|
48
|
+
If True, only read the header without loading data. Default is False.
|
|
39
49
|
|
|
40
50
|
Returns
|
|
41
51
|
-------
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
52
|
+
tuple
|
|
53
|
+
A tuple containing:
|
|
54
|
+
|
|
55
|
+
- nim : nifti image structure
|
|
56
|
+
- nim_data : array-like or None
|
|
57
|
+
The image data if headeronly=False, None otherwise
|
|
58
|
+
- nim_hdr : nifti header
|
|
59
|
+
The header information copied from the nifti image
|
|
60
|
+
- thedims : int array
|
|
61
|
+
The dimensions from the nifti header
|
|
62
|
+
- thesizes : float array
|
|
63
|
+
The pixel dimensions from the nifti header
|
|
64
|
+
|
|
65
|
+
Notes
|
|
66
|
+
-----
|
|
67
|
+
This function automatically detects the file extension (.nii or .nii.gz) if
|
|
68
|
+
not provided in the inputfile parameter. If neither .nii nor .nii.gz extension
|
|
69
|
+
is found, it will look for the file with these extensions in order.
|
|
70
|
+
|
|
71
|
+
Examples
|
|
72
|
+
--------
|
|
73
|
+
>>> nim, data, hdr, dims, sizes = readfromnifti('my_image')
|
|
74
|
+
>>> nim, data, hdr, dims, sizes = readfromnifti('my_image.nii.gz', headeronly=True)
|
|
48
75
|
"""
|
|
49
76
|
if os.path.isfile(inputfile):
|
|
50
77
|
inputfilename = inputfile
|
|
@@ -55,20 +82,28 @@ def readfromnifti(inputfile):
|
|
|
55
82
|
else:
|
|
56
83
|
raise FileNotFoundError(f"nifti file {inputfile} does not exist")
|
|
57
84
|
nim = nib.load(inputfilename)
|
|
58
|
-
|
|
85
|
+
if headeronly:
|
|
86
|
+
nim_data = None
|
|
87
|
+
else:
|
|
88
|
+
nim_data = nim.get_fdata()
|
|
59
89
|
nim_hdr = nim.header.copy()
|
|
60
90
|
thedims = nim_hdr["dim"].copy()
|
|
61
91
|
thesizes = nim_hdr["pixdim"].copy()
|
|
62
92
|
return nim, nim_data, nim_hdr, thedims, thesizes
|
|
63
93
|
|
|
64
94
|
|
|
65
|
-
def readfromcifti(
|
|
66
|
-
|
|
95
|
+
def readfromcifti(
|
|
96
|
+
inputfile: str, debug: bool = False
|
|
97
|
+
) -> Tuple[Any, Any, NDArray, Any, NDArray, NDArray, Optional[float]]:
|
|
98
|
+
"""
|
|
99
|
+
Open a cifti file and read in the various important parts
|
|
67
100
|
|
|
68
101
|
Parameters
|
|
69
102
|
----------
|
|
70
103
|
inputfile : str
|
|
71
104
|
The name of the cifti file.
|
|
105
|
+
debug : bool, optional
|
|
106
|
+
Enable debug output. Default is False
|
|
72
107
|
|
|
73
108
|
Returns
|
|
74
109
|
-------
|
|
@@ -105,7 +140,52 @@ def readfromcifti(inputfile, debug=False):
|
|
|
105
140
|
return cifti, cifti_hdr, nifti_data, nifti_hdr, thedims, thesizes, timestep
|
|
106
141
|
|
|
107
142
|
|
|
108
|
-
def getciftitr(cifti_hdr):
|
|
143
|
+
def getciftitr(cifti_hdr: Any) -> Tuple[float, float]:
|
|
144
|
+
"""
|
|
145
|
+
Extract the TR (repetition time) from a CIFTI header.
|
|
146
|
+
|
|
147
|
+
This function extracts timing information from a CIFTI header, specifically
|
|
148
|
+
the time between timepoints (TR) and the start time of the first timepoint.
|
|
149
|
+
It searches for a SeriesAxis in the CIFTI header matrix to extract this
|
|
150
|
+
information.
|
|
151
|
+
|
|
152
|
+
Parameters
|
|
153
|
+
----------
|
|
154
|
+
cifti_hdr : Any
|
|
155
|
+
The CIFTI header object containing timing information. This should be
|
|
156
|
+
a valid CIFTI header that supports the matrix.mapped_indices and
|
|
157
|
+
matrix.get_axis methods.
|
|
158
|
+
|
|
159
|
+
Returns
|
|
160
|
+
-------
|
|
161
|
+
tuple of (float, float)
|
|
162
|
+
A tuple containing:
|
|
163
|
+
- timestep : float
|
|
164
|
+
The TR (time between timepoints) in seconds
|
|
165
|
+
- starttime : float
|
|
166
|
+
The start time of the first timepoint in seconds
|
|
167
|
+
|
|
168
|
+
Raises
|
|
169
|
+
------
|
|
170
|
+
SystemExit
|
|
171
|
+
If no SeriesAxis is found in the CIFTI header, the function will
|
|
172
|
+
print an error message and exit the program.
|
|
173
|
+
|
|
174
|
+
Notes
|
|
175
|
+
-----
|
|
176
|
+
The function specifically looks for a SeriesAxis in the CIFTI header's
|
|
177
|
+
matrix. If multiple SeriesAxes exist, only the first one encountered
|
|
178
|
+
will be used. The timing information is extracted using the get_element()
|
|
179
|
+
method on the SeriesAxis object.
|
|
180
|
+
|
|
181
|
+
Examples
|
|
182
|
+
--------
|
|
183
|
+
>>> import nibabel as nib
|
|
184
|
+
>>> cifti_hdr = nib.load('file.cifti').header
|
|
185
|
+
>>> tr, start_time = getciftitr(cifti_hdr)
|
|
186
|
+
>>> print(f"TR: {tr} seconds, Start time: {start_time} seconds")
|
|
187
|
+
TR: 0.8 seconds, Start time: 0.0 seconds
|
|
188
|
+
"""
|
|
109
189
|
seriesaxis = None
|
|
110
190
|
for theaxis in cifti_hdr.matrix.mapped_indices:
|
|
111
191
|
if isinstance(cifti_hdr.matrix.get_axis(theaxis), nib.cifti2.SeriesAxis):
|
|
@@ -121,40 +201,136 @@ def getciftitr(cifti_hdr):
|
|
|
121
201
|
|
|
122
202
|
|
|
123
203
|
# dims are the array dimensions along each axis
|
|
124
|
-
def parseniftidims(thedims):
|
|
125
|
-
|
|
204
|
+
def parseniftidims(thedims: NDArray) -> Tuple[int, int, int, int]:
|
|
205
|
+
"""
|
|
206
|
+
Split the dims array into individual elements
|
|
207
|
+
|
|
208
|
+
This function extracts the dimension sizes from a NIfTI dimensions array,
|
|
209
|
+
returning the number of points along each spatial and temporal dimension.
|
|
126
210
|
|
|
127
211
|
Parameters
|
|
128
212
|
----------
|
|
129
|
-
thedims : int
|
|
130
|
-
The
|
|
213
|
+
thedims : NDArray of int
|
|
214
|
+
The NIfTI dimensions structure, where:
|
|
215
|
+
- thedims[0] contains the data type
|
|
216
|
+
- thedims[1] contains the number of points along x-axis (nx)
|
|
217
|
+
- thedims[2] contains the number of points along y-axis (ny)
|
|
218
|
+
- thedims[3] contains the number of points along z-axis (nz)
|
|
219
|
+
- thedims[4] contains the number of points along t-axis (nt)
|
|
131
220
|
|
|
132
221
|
Returns
|
|
133
222
|
-------
|
|
134
|
-
nx
|
|
135
|
-
Number of points along
|
|
223
|
+
nx : int
|
|
224
|
+
Number of points along the x-axis
|
|
225
|
+
ny : int
|
|
226
|
+
Number of points along the y-axis
|
|
227
|
+
nz : int
|
|
228
|
+
Number of points along the z-axis
|
|
229
|
+
nt : int
|
|
230
|
+
Number of points along the t-axis (time)
|
|
231
|
+
|
|
232
|
+
Notes
|
|
233
|
+
-----
|
|
234
|
+
The input array is expected to be a NIfTI dimensions array with at least 5 elements.
|
|
235
|
+
This function assumes the standard NIfTI dimension ordering where dimensions 1-4
|
|
236
|
+
correspond to spatial x, y, z, and temporal t dimensions respectively.
|
|
237
|
+
|
|
238
|
+
Examples
|
|
239
|
+
--------
|
|
240
|
+
>>> import numpy as np
|
|
241
|
+
>>> dims = np.array([0, 64, 64, 32, 100, 1, 1, 1])
|
|
242
|
+
>>> nx, ny, nz, nt = parseniftidims(dims)
|
|
243
|
+
>>> print(f"Dimensions: {nx} x {ny} x {nz} x {nt}")
|
|
244
|
+
Dimensions: 64 x 64 x 32 x 100
|
|
136
245
|
"""
|
|
137
|
-
return thedims[1], thedims[2], thedims[3], thedims[4]
|
|
246
|
+
return int(thedims[1]), int(thedims[2]), int(thedims[3]), int(thedims[4])
|
|
138
247
|
|
|
139
248
|
|
|
140
249
|
# sizes are the mapping between voxels and physical coordinates
|
|
141
|
-
def parseniftisizes(thesizes):
|
|
142
|
-
|
|
250
|
+
def parseniftisizes(thesizes: NDArray) -> Tuple[float, float, float, float]:
|
|
251
|
+
"""
|
|
252
|
+
Split the size array into individual elements
|
|
253
|
+
|
|
254
|
+
This function extracts voxel size information from a NIfTI header structure
|
|
255
|
+
and returns the scaling factors for spatial dimensions (x, y, z) and time (t).
|
|
143
256
|
|
|
144
257
|
Parameters
|
|
145
258
|
----------
|
|
146
|
-
thesizes : float
|
|
147
|
-
The
|
|
259
|
+
thesizes : NDArray of float
|
|
260
|
+
The NIfTI voxel size structure containing scaling information.
|
|
261
|
+
Expected to be an array where indices 1-4 correspond to
|
|
262
|
+
x, y, z, and t scaling factors respectively.
|
|
148
263
|
|
|
149
264
|
Returns
|
|
150
265
|
-------
|
|
151
|
-
dimx
|
|
152
|
-
Scaling from voxel number to physical coordinates
|
|
266
|
+
dimx : float
|
|
267
|
+
Scaling factor from voxel number to physical coordinates in x dimension
|
|
268
|
+
dimy : float
|
|
269
|
+
Scaling factor from voxel number to physical coordinates in y dimension
|
|
270
|
+
dimz : float
|
|
271
|
+
Scaling factor from voxel number to physical coordinates in z dimension
|
|
272
|
+
dimt : float
|
|
273
|
+
Scaling factor from voxel number to physical coordinates in t dimension
|
|
274
|
+
|
|
275
|
+
Notes
|
|
276
|
+
-----
|
|
277
|
+
The function assumes the input array follows the NIfTI standard where:
|
|
278
|
+
- Index 0: unused or padding
|
|
279
|
+
- Index 1: x-dimension scaling
|
|
280
|
+
- Index 2: y-dimension scaling
|
|
281
|
+
- Index 3: z-dimension scaling
|
|
282
|
+
- Index 4: t-dimension scaling
|
|
283
|
+
|
|
284
|
+
Examples
|
|
285
|
+
--------
|
|
286
|
+
>>> import numpy as np
|
|
287
|
+
>>> sizes = np.array([0.0, 2.0, 2.0, 2.0, 1.0])
|
|
288
|
+
>>> x, y, z, t = parseniftisizes(sizes)
|
|
289
|
+
>>> print(x, y, z, t)
|
|
290
|
+
2.0 2.0 2.0 1.0
|
|
153
291
|
"""
|
|
154
292
|
return thesizes[1], thesizes[2], thesizes[3], thesizes[4]
|
|
155
293
|
|
|
156
294
|
|
|
157
|
-
def dumparraytonifti(thearray, filename):
|
|
295
|
+
def dumparraytonifti(thearray: NDArray, filename: str) -> None:
|
|
296
|
+
"""
|
|
297
|
+
Save a numpy array to a NIFTI file with an identity affine transform.
|
|
298
|
+
|
|
299
|
+
This function saves a numpy array to a NIFTI file format with an identity
|
|
300
|
+
affine transformation matrix. The resulting NIFTI file will have unit
|
|
301
|
+
spacing and no rotation or translation.
|
|
302
|
+
|
|
303
|
+
Parameters
|
|
304
|
+
----------
|
|
305
|
+
thearray : NDArray
|
|
306
|
+
The data array to save. Can be 2D, 3D, or 4D array representing
|
|
307
|
+
medical imaging data or other volumetric data.
|
|
308
|
+
filename : str
|
|
309
|
+
The output filename (without extension). The function will append
|
|
310
|
+
'.nii' or '.nii.gz' extension based on the nibabel library's
|
|
311
|
+
default behavior.
|
|
312
|
+
|
|
313
|
+
Returns
|
|
314
|
+
-------
|
|
315
|
+
None
|
|
316
|
+
This function does not return any value. It saves the array to disk
|
|
317
|
+
as a NIFTI file.
|
|
318
|
+
|
|
319
|
+
Notes
|
|
320
|
+
-----
|
|
321
|
+
- The function uses an identity affine matrix with dimensions 4x4
|
|
322
|
+
- The affine matrix represents unit spacing with no rotation or translation
|
|
323
|
+
- This is useful for simple data storage without spatial information
|
|
324
|
+
- The function relies on the `savetonifti` helper function for the actual
|
|
325
|
+
NIFTI file writing operation
|
|
326
|
+
|
|
327
|
+
Examples
|
|
328
|
+
--------
|
|
329
|
+
>>> import numpy as np
|
|
330
|
+
>>> data = np.random.rand(64, 64, 64)
|
|
331
|
+
>>> dumparraytonifti(data, 'my_data')
|
|
332
|
+
>>> # Creates 'my_data.nii' file with identity affine transform
|
|
333
|
+
"""
|
|
158
334
|
outputaffine = np.zeros((4, 4), dtype=float)
|
|
159
335
|
for i in range(4):
|
|
160
336
|
outputaffine[i, i] = 1.0
|
|
@@ -163,8 +339,9 @@ def dumparraytonifti(thearray, filename):
|
|
|
163
339
|
savetonifti(thearray, outputheader, filename)
|
|
164
340
|
|
|
165
341
|
|
|
166
|
-
def savetonifti(thearray, theheader, thename, debug=False):
|
|
167
|
-
|
|
342
|
+
def savetonifti(thearray: NDArray, theheader: Any, thename: str, debug: bool = False) -> None:
|
|
343
|
+
"""
|
|
344
|
+
Save a data array out to a nifti file
|
|
168
345
|
|
|
169
346
|
Parameters
|
|
170
347
|
----------
|
|
@@ -174,10 +351,12 @@ def savetonifti(thearray, theheader, thename, debug=False):
|
|
|
174
351
|
A valid nifti header
|
|
175
352
|
thename : str
|
|
176
353
|
The name of the nifti file to save
|
|
354
|
+
debug : bool, optional
|
|
355
|
+
Enable debug output. Default is False
|
|
177
356
|
|
|
178
357
|
Returns
|
|
179
358
|
-------
|
|
180
|
-
|
|
359
|
+
None
|
|
181
360
|
"""
|
|
182
361
|
outputaffine = theheader.get_best_affine()
|
|
183
362
|
qaffine, qcode = theheader.get_qform(coded=True)
|
|
@@ -248,82 +427,350 @@ def savetonifti(thearray, theheader, thename, debug=False):
|
|
|
248
427
|
output_nifti = None
|
|
249
428
|
|
|
250
429
|
|
|
251
|
-
def niftifromarray(data):
|
|
430
|
+
def niftifromarray(data: NDArray) -> Any:
|
|
431
|
+
"""
|
|
432
|
+
Create a NIFTI image object from a numpy array with identity affine.
|
|
433
|
+
|
|
434
|
+
This function converts a numpy array into a NIFTI image object using an identity
|
|
435
|
+
affine transformation matrix. The resulting image has no spatial transformation
|
|
436
|
+
applied, meaning the voxel coordinates directly correspond to the array indices.
|
|
437
|
+
|
|
438
|
+
Parameters
|
|
439
|
+
----------
|
|
440
|
+
data : NDArray
|
|
441
|
+
The data array to convert to NIFTI format. Can be 2D, 3D, or 4D array
|
|
442
|
+
representing image data with arbitrary data types.
|
|
443
|
+
|
|
444
|
+
Returns
|
|
445
|
+
-------
|
|
446
|
+
nibabel.Nifti1Image
|
|
447
|
+
The NIFTI image object with identity affine matrix. The returned object
|
|
448
|
+
can be saved to disk using nibabel's save functionality.
|
|
449
|
+
|
|
450
|
+
Notes
|
|
451
|
+
-----
|
|
452
|
+
- The affine matrix is set to identity (4x4), which means no spatial
|
|
453
|
+
transformation is applied
|
|
454
|
+
- This function is useful for creating NIFTI images from processed data
|
|
455
|
+
that doesn't require spatial registration
|
|
456
|
+
- The data array is copied into the NIFTI image object
|
|
457
|
+
|
|
458
|
+
Examples
|
|
459
|
+
--------
|
|
460
|
+
>>> import numpy as np
|
|
461
|
+
>>> data = np.random.rand(64, 64, 32)
|
|
462
|
+
>>> img = niftifromarray(data)
|
|
463
|
+
>>> print(img.shape)
|
|
464
|
+
(64, 64, 32)
|
|
465
|
+
>>> print(img.affine)
|
|
466
|
+
[[1. 0. 0. 0.]
|
|
467
|
+
[0. 1. 0. 0.]
|
|
468
|
+
[0. 0. 1. 0.]
|
|
469
|
+
[0. 0. 0. 1.]]
|
|
470
|
+
"""
|
|
252
471
|
return nib.Nifti1Image(data, affine=np.eye(4))
|
|
253
472
|
|
|
254
473
|
|
|
255
|
-
def niftihdrfromarray(data):
|
|
474
|
+
def niftihdrfromarray(data: NDArray) -> Any:
|
|
475
|
+
"""
|
|
476
|
+
Create a NIFTI header from a numpy array with identity affine.
|
|
477
|
+
|
|
478
|
+
This function creates a NIFTI header object from a numpy array by constructing
|
|
479
|
+
a minimal NIFTI image with an identity affine matrix and extracting its header.
|
|
480
|
+
The resulting header contains basic NIFTI metadata but no spatial transformation
|
|
481
|
+
information beyond the identity matrix.
|
|
482
|
+
|
|
483
|
+
Parameters
|
|
484
|
+
----------
|
|
485
|
+
data : NDArray
|
|
486
|
+
The data array to create a header for. The array can be of any shape and
|
|
487
|
+
data type, but should typically represent medical imaging data.
|
|
488
|
+
|
|
489
|
+
Returns
|
|
490
|
+
-------
|
|
491
|
+
nibabel.Nifti1Header
|
|
492
|
+
The NIFTI header object containing metadata for the input data array.
|
|
493
|
+
|
|
494
|
+
Notes
|
|
495
|
+
-----
|
|
496
|
+
The returned header is a copy of the header from a NIFTI image with identity
|
|
497
|
+
affine matrix. This is useful for creating NIFTI headers without requiring
|
|
498
|
+
full NIFTI image files or spatial transformation information.
|
|
499
|
+
|
|
500
|
+
Examples
|
|
501
|
+
--------
|
|
502
|
+
>>> import numpy as np
|
|
503
|
+
>>> data = np.random.rand(64, 64, 64)
|
|
504
|
+
>>> header = niftihdrfromarray(data)
|
|
505
|
+
>>> print(header)
|
|
506
|
+
<nibabel.nifti1.Nifti1Header object at 0x...>
|
|
507
|
+
"""
|
|
256
508
|
return nib.Nifti1Image(data, affine=np.eye(4)).header.copy()
|
|
257
509
|
|
|
258
510
|
|
|
259
|
-
def
|
|
260
|
-
|
|
261
|
-
|
|
262
|
-
|
|
263
|
-
|
|
264
|
-
|
|
265
|
-
|
|
266
|
-
|
|
267
|
-
|
|
268
|
-
|
|
269
|
-
|
|
270
|
-
|
|
271
|
-
|
|
272
|
-
|
|
273
|
-
|
|
511
|
+
def makedestarray(
|
|
512
|
+
destshape: Union[Tuple, NDArray],
|
|
513
|
+
filetype: str = "nifti",
|
|
514
|
+
rt_floattype: np.dtype = np.dtype(np.float64),
|
|
515
|
+
) -> Tuple[NDArray, int]:
|
|
516
|
+
"""
|
|
517
|
+
Create a destination array for output data based on file type and shape.
|
|
518
|
+
|
|
519
|
+
Parameters
|
|
520
|
+
----------
|
|
521
|
+
destshape : tuple or numpy array
|
|
522
|
+
Shape specification for the output array. For 'nifti' files, this is expected
|
|
523
|
+
to be a 3D or 4D shape; for 'cifti', it is expected to be a 2D or 3D shape
|
|
524
|
+
where the last dimension corresponds to spatial data and the second-to-last
|
|
525
|
+
to time; for 'text', it is expected to be a 1D or 2D shape.
|
|
526
|
+
filetype : str, optional
|
|
527
|
+
Type of output file. Must be one of 'nifti', 'cifti', or 'text'. Default is 'nifti'.
|
|
528
|
+
rt_floattype : np.dtype, optional
|
|
529
|
+
Data type for the output array. Default is 'np.float64'.
|
|
530
|
+
|
|
531
|
+
Returns
|
|
532
|
+
-------
|
|
533
|
+
outmaparray : numpy array
|
|
534
|
+
Pre-allocated output array with appropriate shape and dtype. The shape depends
|
|
535
|
+
on the `filetype` and `destshape`:
|
|
536
|
+
- For 'nifti': 1D array if 3D input, 2D array if 4D input.
|
|
537
|
+
- For 'cifti': 1D or 2D array depending on time dimension.
|
|
538
|
+
- For 'text': 1D or 2D array depending on time dimension.
|
|
539
|
+
internalspaceshape : int
|
|
540
|
+
The flattened spatial dimension size used to determine the shape of the output array.
|
|
541
|
+
|
|
542
|
+
Notes
|
|
543
|
+
-----
|
|
544
|
+
This function handles different file types by interpreting the input `destshape`
|
|
545
|
+
differently:
|
|
546
|
+
- For 'nifti', the spatial dimensions are multiplied together to form the
|
|
547
|
+
`internalspaceshape`, and the time dimension is inferred from the fourth
|
|
548
|
+
axis if present.
|
|
549
|
+
- For 'cifti', the last dimension is treated as spatial, and the second-to-last
|
|
550
|
+
as temporal if it exceeds 1.
|
|
551
|
+
- For 'text', the first dimension is treated as spatial, and the second as time.
|
|
552
|
+
|
|
553
|
+
Examples
|
|
554
|
+
--------
|
|
555
|
+
>>> import numpy as np
|
|
556
|
+
>>> from typing import Tuple, Union
|
|
557
|
+
>>> makedestarray((64, 64, 32), filetype="nifti")
|
|
558
|
+
(array([0., 0., ..., 0.]), 2097152)
|
|
559
|
+
|
|
560
|
+
>>> makedestarray((100, 50), filetype="text")
|
|
561
|
+
(array([0., 0., ..., 0.]), 100)
|
|
562
|
+
|
|
563
|
+
>>> makedestarray((100, 50, 20), filetype="cifti")
|
|
564
|
+
(array([[0., 0., ..., 0.], ..., [0., 0., ..., 0.]]), 20)
|
|
565
|
+
"""
|
|
566
|
+
if filetype == "text":
|
|
274
567
|
try:
|
|
275
568
|
internalspaceshape = destshape[0]
|
|
276
569
|
timedim = destshape[1]
|
|
277
|
-
spaceonly = False
|
|
278
570
|
except TypeError:
|
|
279
571
|
internalspaceshape = destshape
|
|
280
|
-
|
|
572
|
+
timedim = None
|
|
573
|
+
elif filetype == "cifti":
|
|
574
|
+
spaceindex = len(destshape) - 1
|
|
575
|
+
timeindex = spaceindex - 1
|
|
576
|
+
internalspaceshape = destshape[spaceindex]
|
|
577
|
+
if destshape[timeindex] > 1:
|
|
578
|
+
timedim = destshape[timeindex]
|
|
579
|
+
else:
|
|
580
|
+
timedim = None
|
|
281
581
|
else:
|
|
282
|
-
|
|
283
|
-
|
|
284
|
-
|
|
285
|
-
internalspaceshape = destshape[spaceindex]
|
|
286
|
-
if destshape[timeindex] > 1:
|
|
287
|
-
spaceonly = False
|
|
288
|
-
timedim = destshape[timeindex]
|
|
289
|
-
isseries = True
|
|
290
|
-
else:
|
|
291
|
-
spaceonly = True
|
|
292
|
-
isseries = False
|
|
582
|
+
internalspaceshape = int(destshape[0]) * int(destshape[1]) * int(destshape[2])
|
|
583
|
+
if len(destshape) == 3:
|
|
584
|
+
timedim = None
|
|
293
585
|
else:
|
|
294
|
-
|
|
295
|
-
|
|
296
|
-
spaceonly = True
|
|
297
|
-
else:
|
|
298
|
-
spaceonly = False
|
|
299
|
-
timedim = destshape[3]
|
|
300
|
-
if spaceonly:
|
|
586
|
+
timedim = destshape[3]
|
|
587
|
+
if timedim is None:
|
|
301
588
|
outmaparray = np.zeros(internalspaceshape, dtype=rt_floattype)
|
|
302
589
|
else:
|
|
303
590
|
outmaparray = np.zeros((internalspaceshape, timedim), dtype=rt_floattype)
|
|
591
|
+
return outmaparray, internalspaceshape
|
|
592
|
+
|
|
593
|
+
|
|
594
|
+
def populatemap(
|
|
595
|
+
themap: NDArray,
|
|
596
|
+
internalspaceshape: int,
|
|
597
|
+
validvoxels: Optional[NDArray],
|
|
598
|
+
outmaparray: NDArray,
|
|
599
|
+
debug: bool = False,
|
|
600
|
+
) -> NDArray:
|
|
601
|
+
"""
|
|
602
|
+
Populate an output array with data from a map, handling valid voxel masking.
|
|
603
|
+
|
|
604
|
+
This function populates an output array with data from a source map, optionally
|
|
605
|
+
masking invalid voxels. It supports both 1D and 2D output arrays.
|
|
606
|
+
|
|
607
|
+
Parameters
|
|
608
|
+
----------
|
|
609
|
+
themap : NDArray
|
|
610
|
+
The source data to populate into the output array. Shape is either
|
|
611
|
+
``(internalspaceshape,)`` for 1D or ``(internalspaceshape, N)`` for 2D.
|
|
612
|
+
internalspaceshape : int
|
|
613
|
+
The total spatial dimension size, used to determine the expected shape
|
|
614
|
+
of the input map and the output array.
|
|
615
|
+
validvoxels : NDArray or None
|
|
616
|
+
Indices of valid voxels to populate. If None, all voxels are populated.
|
|
617
|
+
Shape should be ``(M,)`` where M is the number of valid voxels.
|
|
618
|
+
outmaparray : NDArray
|
|
619
|
+
The destination array to populate. Shape should be either ``(internalspaceshape,)``
|
|
620
|
+
for 1D or ``(internalspaceshape, N)`` for 2D.
|
|
621
|
+
debug : bool, optional
|
|
622
|
+
Enable debug output. Default is False.
|
|
623
|
+
|
|
624
|
+
Returns
|
|
625
|
+
-------
|
|
626
|
+
NDArray
|
|
627
|
+
The populated output array with the same shape as `outmaparray`.
|
|
628
|
+
|
|
629
|
+
Notes
|
|
630
|
+
-----
|
|
631
|
+
- If `validvoxels` is provided, only the specified voxels are updated.
|
|
632
|
+
- The function modifies `outmaparray` in-place and returns it.
|
|
633
|
+
- For 2D arrays, the second dimension is preserved in the output.
|
|
634
|
+
|
|
635
|
+
Examples
|
|
636
|
+
--------
|
|
637
|
+
>>> import numpy as np
|
|
638
|
+
>>> themap = np.array([1, 2, 3, 4])
|
|
639
|
+
>>> outmaparray = np.zeros(4)
|
|
640
|
+
>>> validvoxels = np.array([0, 2])
|
|
641
|
+
>>> result = populatemap(themap, 4, validvoxels, outmaparray)
|
|
642
|
+
>>> print(result)
|
|
643
|
+
[1. 0. 3. 0.]
|
|
644
|
+
|
|
645
|
+
>>> outmaparray = np.zeros((4, 2))
|
|
646
|
+
>>> result = populatemap(themap.reshape((4, 1)), 4, None, outmaparray)
|
|
647
|
+
>>> print(result)
|
|
648
|
+
[[1.]
|
|
649
|
+
[2.]
|
|
650
|
+
[3.]
|
|
651
|
+
[4.]]
|
|
652
|
+
"""
|
|
653
|
+
if len(outmaparray.shape) == 1:
|
|
654
|
+
outmaparray[:] = 0.0
|
|
655
|
+
if validvoxels is not None:
|
|
656
|
+
outmaparray[validvoxels] = themap[:].reshape((np.shape(validvoxels)[0]))
|
|
657
|
+
else:
|
|
658
|
+
outmaparray = themap[:].reshape((internalspaceshape))
|
|
659
|
+
else:
|
|
660
|
+
outmaparray[:, :] = 0.0
|
|
661
|
+
if validvoxels is not None:
|
|
662
|
+
outmaparray[validvoxels, :] = themap[:, :].reshape(
|
|
663
|
+
(np.shape(validvoxels)[0], outmaparray.shape[1])
|
|
664
|
+
)
|
|
665
|
+
else:
|
|
666
|
+
outmaparray = themap[:, :].reshape((internalspaceshape, outmaparray.shape[1]))
|
|
667
|
+
if debug:
|
|
668
|
+
print(f"populatemap: output array shape is {outmaparray.shape}")
|
|
669
|
+
return outmaparray
|
|
670
|
+
|
|
671
|
+
|
|
672
|
+
def savemaplist(
|
|
673
|
+
outputname: str,
|
|
674
|
+
maplist: List[Tuple],
|
|
675
|
+
validvoxels: Optional[NDArray],
|
|
676
|
+
destshape: Union[Tuple, NDArray],
|
|
677
|
+
theheader: Any,
|
|
678
|
+
bidsbasedict: Dict[str, Any],
|
|
679
|
+
filetype: str = "nifti",
|
|
680
|
+
rt_floattype: np.dtype = np.dtype(np.float64),
|
|
681
|
+
cifti_hdr: Optional[Any] = None,
|
|
682
|
+
savejson: bool = True,
|
|
683
|
+
debug: bool = False,
|
|
684
|
+
) -> None:
|
|
685
|
+
"""
|
|
686
|
+
Save a list of data maps to files with appropriate BIDS metadata.
|
|
687
|
+
|
|
688
|
+
This function saves a list of data maps to output files (NIfTI, CIFTI, or text)
|
|
689
|
+
using the specified file type and includes BIDS-compliant metadata in JSON sidecars.
|
|
690
|
+
It supports mapping data into a destination array, handling valid voxels, and
|
|
691
|
+
writing out the final files with appropriate naming and headers.
|
|
692
|
+
|
|
693
|
+
Parameters
|
|
694
|
+
----------
|
|
695
|
+
outputname : str
|
|
696
|
+
Base name for output files (without extension).
|
|
697
|
+
maplist : list of tuples
|
|
698
|
+
List of (data, suffix, maptype, unit, description) tuples to save.
|
|
699
|
+
Each tuple corresponds to one map to be saved.
|
|
700
|
+
validvoxels : numpy array or None
|
|
701
|
+
Indices of valid voxels in the data. If None, all voxels are considered valid.
|
|
702
|
+
destshape : tuple or numpy array
|
|
703
|
+
Shape of the destination array into which data will be mapped.
|
|
704
|
+
theheader : nifti/cifti header
|
|
705
|
+
Header object for the output files (NIfTI or CIFTI).
|
|
706
|
+
bidsbasedict : dict
|
|
707
|
+
Base BIDS metadata to include in JSON sidecars.
|
|
708
|
+
filetype : str, optional
|
|
709
|
+
Output file type ('nifti', 'cifti', or 'text'). Default is 'nifti'.
|
|
710
|
+
rt_floattype : str, optional
|
|
711
|
+
Data type for output arrays. Default is 'float64'.
|
|
712
|
+
cifti_hdr : cifti header or None, optional
|
|
713
|
+
CIFTI header if filetype is 'cifti'. Default is None.
|
|
714
|
+
savejson : bool, optional
|
|
715
|
+
Whether to save JSON sidecar files. Default is True.
|
|
716
|
+
debug : bool, optional
|
|
717
|
+
Enable debug output. Default is False.
|
|
718
|
+
|
|
719
|
+
Returns
|
|
720
|
+
-------
|
|
721
|
+
None
|
|
722
|
+
This function does not return any value; it writes files to disk.
|
|
723
|
+
|
|
724
|
+
Notes
|
|
725
|
+
-----
|
|
726
|
+
- For CIFTI files, if the data is a series (multi-dimensional), it is saved with
|
|
727
|
+
the provided names; otherwise, it uses temporal offset and step information.
|
|
728
|
+
- The function uses `makedestarray` to prepare the output array and `populatemap`
|
|
729
|
+
to copy data into the array based on valid voxels.
|
|
730
|
+
- If `savejson` is True, a JSON file is created for each map with metadata
|
|
731
|
+
including unit and description.
|
|
732
|
+
|
|
733
|
+
Examples
|
|
734
|
+
--------
|
|
735
|
+
>>> savemaplist(
|
|
736
|
+
... outputname="sub-01_task-rest",
|
|
737
|
+
... maplist=[
|
|
738
|
+
... (data1, "stat", "stat", "z", "Statistical map"),
|
|
739
|
+
... (data2, "mask", "mask", None, "Binary mask"),
|
|
740
|
+
... ],
|
|
741
|
+
... validvoxels=valid_indices,
|
|
742
|
+
... destshape=(100, 100, 100),
|
|
743
|
+
... theheader=nifti_header,
|
|
744
|
+
... bidsbasedict={"Dataset": "MyDataset"},
|
|
745
|
+
... filetype="nifti",
|
|
746
|
+
... savejson=True,
|
|
747
|
+
... )
|
|
748
|
+
"""
|
|
749
|
+
outmaparray, internalspaceshape = makedestarray(
|
|
750
|
+
destshape,
|
|
751
|
+
filetype=filetype,
|
|
752
|
+
rt_floattype=rt_floattype,
|
|
753
|
+
)
|
|
754
|
+
if debug:
|
|
755
|
+
print("maplist:")
|
|
756
|
+
print(maplist)
|
|
304
757
|
for themap, mapsuffix, maptype, theunit, thedescription in maplist:
|
|
305
|
-
#
|
|
758
|
+
# copy the data into the output array, remapping if warranted
|
|
306
759
|
if debug:
|
|
760
|
+
print(f"processing map {mapsuffix}")
|
|
307
761
|
if validvoxels is None:
|
|
308
|
-
print(f"savemaplist: saving {mapsuffix}
|
|
762
|
+
print(f"savemaplist: saving {mapsuffix} of shape {themap.shape} to {destshape}")
|
|
309
763
|
else:
|
|
310
764
|
print(
|
|
311
|
-
f"savemaplist: saving {mapsuffix}
|
|
312
|
-
)
|
|
313
|
-
if spaceonly:
|
|
314
|
-
outmaparray[:] = 0.0
|
|
315
|
-
if validvoxels is not None:
|
|
316
|
-
outmaparray[validvoxels] = themap[:].reshape((np.shape(validvoxels)[0]))
|
|
317
|
-
else:
|
|
318
|
-
outmaparray = themap[:].reshape((internalspaceshape))
|
|
319
|
-
else:
|
|
320
|
-
outmaparray[:, :] = 0.0
|
|
321
|
-
if validvoxels is not None:
|
|
322
|
-
outmaparray[validvoxels, :] = themap[:, :].reshape(
|
|
323
|
-
(np.shape(validvoxels)[0], timedim)
|
|
765
|
+
f"savemaplist: saving {mapsuffix} of shape {themap.shape} to {destshape} from {np.shape(validvoxels)[0]} valid voxels"
|
|
324
766
|
)
|
|
325
|
-
|
|
326
|
-
|
|
767
|
+
outmaparray = populatemap(
|
|
768
|
+
themap,
|
|
769
|
+
internalspaceshape,
|
|
770
|
+
validvoxels,
|
|
771
|
+
outmaparray.astype(themap.dtype),
|
|
772
|
+
debug=False,
|
|
773
|
+
)
|
|
327
774
|
|
|
328
775
|
# actually write out the data
|
|
329
776
|
bidsdict = bidsbasedict.copy()
|
|
@@ -331,7 +778,7 @@ def savemaplist(
|
|
|
331
778
|
bidsdict["Units"] = theunit
|
|
332
779
|
if thedescription is not None:
|
|
333
780
|
bidsdict["Description"] = thedescription
|
|
334
|
-
if
|
|
781
|
+
if filetype == "text":
|
|
335
782
|
writenpvecs(
|
|
336
783
|
outmaparray.reshape(destshape),
|
|
337
784
|
f"{outputname}_{mapsuffix}.txt",
|
|
@@ -340,9 +787,10 @@ def savemaplist(
|
|
|
340
787
|
savename = f"{outputname}_desc-{mapsuffix}_{maptype}"
|
|
341
788
|
if savejson:
|
|
342
789
|
writedicttojson(bidsdict, savename + ".json")
|
|
343
|
-
if
|
|
790
|
+
if filetype == "nifti":
|
|
344
791
|
savetonifti(outmaparray.reshape(destshape), theheader, savename)
|
|
345
792
|
else:
|
|
793
|
+
isseries = len(outmaparray.shape) != 1
|
|
346
794
|
if isseries:
|
|
347
795
|
savetocifti(
|
|
348
796
|
outmaparray,
|
|
@@ -365,40 +813,66 @@ def savemaplist(
|
|
|
365
813
|
|
|
366
814
|
|
|
367
815
|
def savetocifti(
|
|
368
|
-
thearray,
|
|
369
|
-
theciftiheader,
|
|
370
|
-
theniftiheader,
|
|
371
|
-
thename,
|
|
372
|
-
isseries=False,
|
|
373
|
-
names=["placeholder"],
|
|
374
|
-
start=0.0,
|
|
375
|
-
step=1.0,
|
|
376
|
-
debug=False,
|
|
377
|
-
):
|
|
378
|
-
|
|
816
|
+
thearray: NDArray,
|
|
817
|
+
theciftiheader: Any,
|
|
818
|
+
theniftiheader: Any,
|
|
819
|
+
thename: str,
|
|
820
|
+
isseries: bool = False,
|
|
821
|
+
names: List[str] = ["placeholder"],
|
|
822
|
+
start: float = 0.0,
|
|
823
|
+
step: float = 1.0,
|
|
824
|
+
debug: bool = False,
|
|
825
|
+
) -> None:
|
|
826
|
+
"""
|
|
827
|
+
Save a data array out to a CIFTI file.
|
|
828
|
+
|
|
829
|
+
This function saves a given data array to a CIFTI file (either dense or parcellated,
|
|
830
|
+
scalar or series) based on the provided headers and parameters.
|
|
379
831
|
|
|
380
832
|
Parameters
|
|
381
833
|
----------
|
|
382
834
|
thearray : array-like
|
|
383
|
-
The data array to
|
|
835
|
+
The data array to be saved. The shape is expected to be (n_timepoints, n_vertices)
|
|
836
|
+
or (n_vertices,) for scalar data.
|
|
384
837
|
theciftiheader : cifti header
|
|
385
|
-
A valid
|
|
838
|
+
A valid CIFTI header object containing axis information, including BrainModelAxis
|
|
839
|
+
or ParcelsAxis.
|
|
386
840
|
theniftiheader : nifti header
|
|
387
|
-
A valid
|
|
841
|
+
A valid NIfTI header object to be used for setting the intent of the output file.
|
|
388
842
|
thename : str
|
|
389
|
-
The name of the
|
|
390
|
-
isseries: bool
|
|
391
|
-
True
|
|
392
|
-
|
|
393
|
-
|
|
394
|
-
|
|
395
|
-
|
|
396
|
-
|
|
397
|
-
|
|
843
|
+
The base name of the output CIFTI file (without extension).
|
|
844
|
+
isseries : bool, optional
|
|
845
|
+
If True, the output will be a time series file (dtseries or ptseries).
|
|
846
|
+
If False, it will be a scalar file (dscalar or pscalar). Default is False.
|
|
847
|
+
names : list of str, optional
|
|
848
|
+
Names for scalar maps when `isseries` is False. Default is ['placeholder'].
|
|
849
|
+
start : float, optional
|
|
850
|
+
Start time in seconds for the time series. Default is 0.0.
|
|
851
|
+
step : float, optional
|
|
852
|
+
Time step in seconds for the time series. Default is 1.0.
|
|
853
|
+
debug : bool, optional
|
|
854
|
+
If True, print debugging information. Default is False.
|
|
398
855
|
|
|
399
856
|
Returns
|
|
400
857
|
-------
|
|
401
|
-
|
|
858
|
+
None
|
|
859
|
+
This function does not return anything; it saves the file to disk.
|
|
860
|
+
|
|
861
|
+
Notes
|
|
862
|
+
-----
|
|
863
|
+
The function automatically detects whether the input CIFTI header contains a
|
|
864
|
+
BrainModelAxis or a ParcelsAxis and builds the appropriate output structure.
|
|
865
|
+
The correct CIFTI file extension (e.g., .dtseries.nii, .dscalar.nii) is appended
|
|
866
|
+
to the output filename based on the `isseries` and parcellation flags.
|
|
867
|
+
|
|
868
|
+
Examples
|
|
869
|
+
--------
|
|
870
|
+
>>> import numpy as np
|
|
871
|
+
>>> import nibabel as nib
|
|
872
|
+
>>> data = np.random.rand(100, 50)
|
|
873
|
+
>>> cifti_header = nib.load('input.cifti').header
|
|
874
|
+
>>> nifti_header = nib.load('input.nii').header
|
|
875
|
+
>>> savetocifti(data, cifti_header, nifti_header, 'output', isseries=True)
|
|
402
876
|
"""
|
|
403
877
|
if debug:
|
|
404
878
|
print("savetocifti:", thename)
|
|
@@ -494,19 +968,38 @@ def savetocifti(
|
|
|
494
968
|
nib.cifti2.save(img, thename + suffix)
|
|
495
969
|
|
|
496
970
|
|
|
497
|
-
def checkifnifti(filename):
|
|
498
|
-
|
|
971
|
+
def checkifnifti(filename: str) -> bool:
|
|
972
|
+
"""
|
|
973
|
+
Check to see if a file name is a valid nifti name.
|
|
974
|
+
|
|
975
|
+
This function determines whether a given filename has a valid NIfTI file extension.
|
|
976
|
+
NIfTI files typically have extensions ".nii" or ".nii.gz" for compressed files.
|
|
499
977
|
|
|
500
978
|
Parameters
|
|
501
979
|
----------
|
|
502
980
|
filename : str
|
|
503
|
-
The file name
|
|
981
|
+
The file name to check for valid NIfTI extension.
|
|
504
982
|
|
|
505
983
|
Returns
|
|
506
984
|
-------
|
|
507
|
-
|
|
508
|
-
True if
|
|
509
|
-
|
|
985
|
+
bool
|
|
986
|
+
True if the filename ends with ".nii" or ".nii.gz", False otherwise.
|
|
987
|
+
|
|
988
|
+
Notes
|
|
989
|
+
-----
|
|
990
|
+
This function only checks the file extension and does not verify if the file actually exists
|
|
991
|
+
or contains valid NIfTI data. It performs a simple string matching operation.
|
|
992
|
+
|
|
993
|
+
Examples
|
|
994
|
+
--------
|
|
995
|
+
>>> checkifnifti("image.nii")
|
|
996
|
+
True
|
|
997
|
+
>>> checkifnifti("data.nii.gz")
|
|
998
|
+
True
|
|
999
|
+
>>> checkifnifti("scan.json")
|
|
1000
|
+
False
|
|
1001
|
+
>>> checkifnifti("volume.nii.gz")
|
|
1002
|
+
True
|
|
510
1003
|
"""
|
|
511
1004
|
if filename.endswith(".nii") or filename.endswith(".nii.gz"):
|
|
512
1005
|
return True
|
|
@@ -514,22 +1007,44 @@ def checkifnifti(filename):
|
|
|
514
1007
|
return False
|
|
515
1008
|
|
|
516
1009
|
|
|
517
|
-
def niftisplitext(filename):
|
|
518
|
-
|
|
1010
|
+
def niftisplitext(filename: str) -> Tuple[str, str]:
|
|
1011
|
+
"""
|
|
1012
|
+
Split nifti filename into name base and extension.
|
|
1013
|
+
|
|
1014
|
+
This function splits a NIfTI filename into its base name and extension components.
|
|
1015
|
+
It handles NIfTI files that may have double extensions (e.g., '.nii.gz') by properly
|
|
1016
|
+
combining the extensions.
|
|
519
1017
|
|
|
520
1018
|
Parameters
|
|
521
1019
|
----------
|
|
522
1020
|
filename : str
|
|
523
|
-
The file name
|
|
1021
|
+
The NIfTI file name to split, which may contain double extensions like '.nii.gz'
|
|
524
1022
|
|
|
525
1023
|
Returns
|
|
526
1024
|
-------
|
|
527
|
-
|
|
528
|
-
|
|
529
|
-
|
|
530
|
-
|
|
531
|
-
|
|
532
|
-
|
|
1025
|
+
tuple[str, str]
|
|
1026
|
+
A tuple containing:
|
|
1027
|
+
- name : str
|
|
1028
|
+
Base name of the NIfTI file (without extension)
|
|
1029
|
+
- ext : str
|
|
1030
|
+
Extension of the NIfTI file (including any additional extensions)
|
|
1031
|
+
|
|
1032
|
+
Notes
|
|
1033
|
+
-----
|
|
1034
|
+
This function is specifically designed for NIfTI files which commonly have
|
|
1035
|
+
double extensions (e.g., '.nii.gz', '.nii.bz2'). It properly handles these
|
|
1036
|
+
cases by combining the two extension components.
|
|
1037
|
+
|
|
1038
|
+
Examples
|
|
1039
|
+
--------
|
|
1040
|
+
>>> niftisplitext('image.nii.gz')
|
|
1041
|
+
('image', '.nii.gz')
|
|
1042
|
+
|
|
1043
|
+
>>> niftisplitext('data.nii')
|
|
1044
|
+
('data', '.nii')
|
|
1045
|
+
|
|
1046
|
+
>>> niftisplitext('volume.nii.bz2')
|
|
1047
|
+
('volume', '.nii.bz2')
|
|
533
1048
|
"""
|
|
534
1049
|
firstsplit = os.path.splitext(filename)
|
|
535
1050
|
secondsplit = os.path.splitext(firstsplit[0])
|
|
@@ -539,41 +1054,137 @@ def niftisplitext(filename):
|
|
|
539
1054
|
return firstsplit[0], firstsplit[1]
|
|
540
1055
|
|
|
541
1056
|
|
|
542
|
-
def niftisplit(inputfile, outputroot, axis=3):
|
|
1057
|
+
def niftisplit(inputfile: str, outputroot: str, axis: int = 3) -> None:
|
|
1058
|
+
"""
|
|
1059
|
+
Split a NIFTI file along a specified axis into separate files.
|
|
1060
|
+
|
|
1061
|
+
This function splits a NIFTI image along a given axis into multiple
|
|
1062
|
+
individual NIFTI files, each corresponding to a slice along that axis.
|
|
1063
|
+
The output files are named using the provided root name with zero-padded
|
|
1064
|
+
slice indices.
|
|
1065
|
+
|
|
1066
|
+
Parameters
|
|
1067
|
+
----------
|
|
1068
|
+
inputfile : str
|
|
1069
|
+
Path to the input NIFTI file to be split.
|
|
1070
|
+
outputroot : str
|
|
1071
|
+
Base name for the output files. Each output file will be named
|
|
1072
|
+
``outputroot + str(i).zfill(4)`` where ``i`` is the slice index.
|
|
1073
|
+
axis : int, optional
|
|
1074
|
+
Axis along which to split the NIFTI file. Valid values are 0-4,
|
|
1075
|
+
corresponding to the dimensions of the NIFTI file. Default is 3,
|
|
1076
|
+
which corresponds to the time axis in 4D or 5D NIFTI files.
|
|
1077
|
+
|
|
1078
|
+
Returns
|
|
1079
|
+
-------
|
|
1080
|
+
None
|
|
1081
|
+
This function does not return any value. It writes the split slices
|
|
1082
|
+
as separate NIFTI files to disk.
|
|
1083
|
+
|
|
1084
|
+
Notes
|
|
1085
|
+
-----
|
|
1086
|
+
- The function supports both 4D and 5D NIFTI files.
|
|
1087
|
+
- The header information is preserved for each output slice, with the
|
|
1088
|
+
dimension along the split axis set to 1.
|
|
1089
|
+
- Slice indices in the output file names are zero-padded to four digits
|
|
1090
|
+
(e.g., ``0000``, ``0001``, etc.).
|
|
1091
|
+
|
|
1092
|
+
Examples
|
|
1093
|
+
--------
|
|
1094
|
+
>>> niftisplit('input.nii.gz', 'slice_', axis=2)
|
|
1095
|
+
Splits the input NIFTI file along the third axis (axis=2) and saves
|
|
1096
|
+
the resulting slices as ``slice_0000.nii.gz``, ``slice_0001.nii.gz``, etc.
|
|
1097
|
+
"""
|
|
543
1098
|
infile, infile_data, infile_hdr, infiledims, infilesizes = readfromnifti(inputfile)
|
|
544
1099
|
theheader = copy.deepcopy(infile_hdr)
|
|
545
1100
|
numpoints = infiledims[axis + 1]
|
|
546
1101
|
print(infiledims)
|
|
547
1102
|
theheader["dim"][axis + 1] = 1
|
|
548
|
-
|
|
549
|
-
|
|
550
|
-
if
|
|
551
|
-
|
|
552
|
-
|
|
553
|
-
|
|
554
|
-
|
|
555
|
-
|
|
556
|
-
|
|
557
|
-
|
|
558
|
-
|
|
559
|
-
|
|
560
|
-
|
|
561
|
-
|
|
562
|
-
|
|
563
|
-
|
|
564
|
-
|
|
565
|
-
|
|
566
|
-
|
|
567
|
-
|
|
568
|
-
|
|
569
|
-
|
|
570
|
-
|
|
571
|
-
|
|
572
|
-
|
|
573
|
-
|
|
1103
|
+
if infile_data is not None:
|
|
1104
|
+
for i in range(numpoints):
|
|
1105
|
+
if infiledims[0] == 5:
|
|
1106
|
+
if axis == 0:
|
|
1107
|
+
thisslice = infile_data[i : i + 1, :, :, :, :]
|
|
1108
|
+
elif axis == 1:
|
|
1109
|
+
thisslice = infile_data[:, i : i + 1, :, :, :]
|
|
1110
|
+
elif axis == 2:
|
|
1111
|
+
thisslice = infile_data[:, :, i : i + 1, :, :]
|
|
1112
|
+
elif axis == 3:
|
|
1113
|
+
thisslice = infile_data[:, :, :, i : i + 1, :]
|
|
1114
|
+
elif axis == 4:
|
|
1115
|
+
thisslice = infile_data[:, :, :, :, i : i + 1]
|
|
1116
|
+
else:
|
|
1117
|
+
raise ValueError("illegal axis")
|
|
1118
|
+
elif infiledims[0] == 4:
|
|
1119
|
+
if axis == 0:
|
|
1120
|
+
thisslice = infile_data[i : i + 1, :, :, :]
|
|
1121
|
+
elif axis == 1:
|
|
1122
|
+
thisslice = infile_data[:, i : i + 1, :, :]
|
|
1123
|
+
elif axis == 2:
|
|
1124
|
+
thisslice = infile_data[:, :, i : i + 1, :]
|
|
1125
|
+
elif axis == 3:
|
|
1126
|
+
thisslice = infile_data[:, :, :, i : i + 1]
|
|
1127
|
+
else:
|
|
1128
|
+
raise ValueError("illegal axis")
|
|
1129
|
+
savetonifti(thisslice, theheader, outputroot + str(i).zfill(4))
|
|
1130
|
+
else:
|
|
1131
|
+
raise ValueError("file contains no data!")
|
|
574
1132
|
|
|
575
1133
|
|
|
576
|
-
def niftimerge(
|
|
1134
|
+
def niftimerge(
|
|
1135
|
+
inputlist: List[str],
|
|
1136
|
+
outputname: str,
|
|
1137
|
+
writetodisk: bool = True,
|
|
1138
|
+
axis: int = 3,
|
|
1139
|
+
returndata: bool = False,
|
|
1140
|
+
debug: bool = False,
|
|
1141
|
+
) -> Optional[Tuple[NDArray, Any]]:
|
|
1142
|
+
"""
|
|
1143
|
+
Merge multiple NIFTI files along a specified axis.
|
|
1144
|
+
|
|
1145
|
+
This function reads a list of NIFTI files, concatenates their data along a
|
|
1146
|
+
specified axis, and optionally writes the result to a new NIFTI file. It can
|
|
1147
|
+
also return the merged data and header for further processing.
|
|
1148
|
+
|
|
1149
|
+
Parameters
|
|
1150
|
+
----------
|
|
1151
|
+
inputlist : list of str
|
|
1152
|
+
List of input NIFTI file paths to merge.
|
|
1153
|
+
outputname : str
|
|
1154
|
+
Path for the merged output NIFTI file.
|
|
1155
|
+
writetodisk : bool, optional
|
|
1156
|
+
If True, write the merged data to disk. Default is True.
|
|
1157
|
+
axis : int, optional
|
|
1158
|
+
Axis along which to concatenate the data (0-4). Default is 3, which
|
|
1159
|
+
corresponds to the time axis. The dimension of the output along this
|
|
1160
|
+
axis will be the number of input files.
|
|
1161
|
+
returndata : bool, optional
|
|
1162
|
+
If True, return the merged data array and header. Default is False.
|
|
1163
|
+
debug : bool, optional
|
|
1164
|
+
If True, print debug information during execution. Default is False.
|
|
1165
|
+
|
|
1166
|
+
Returns
|
|
1167
|
+
-------
|
|
1168
|
+
tuple of (NDArray, Any) or None
|
|
1169
|
+
If `returndata` is True, returns a tuple of:
|
|
1170
|
+
- `output_data`: The merged NIFTI data as a numpy array.
|
|
1171
|
+
- `infile_hdr`: The header from the last input file.
|
|
1172
|
+
If `returndata` is False, returns None.
|
|
1173
|
+
|
|
1174
|
+
Notes
|
|
1175
|
+
-----
|
|
1176
|
+
- The function assumes all input files have compatible dimensions except
|
|
1177
|
+
along the concatenation axis.
|
|
1178
|
+
- If the input file has 3D dimensions, it is reshaped to 4D before concatenation.
|
|
1179
|
+
- The output NIFTI header is updated to reflect the new dimension along the
|
|
1180
|
+
concatenation axis.
|
|
1181
|
+
|
|
1182
|
+
Examples
|
|
1183
|
+
--------
|
|
1184
|
+
>>> input_files = ['file1.nii', 'file2.nii', 'file3.nii']
|
|
1185
|
+
>>> niftimerge(input_files, 'merged.nii', axis=3, writetodisk=True)
|
|
1186
|
+
>>> data, header = niftimerge(input_files, 'merged.nii', returndata=True)
|
|
1187
|
+
"""
|
|
577
1188
|
inputdata = []
|
|
578
1189
|
for thefile in inputlist:
|
|
579
1190
|
if debug:
|
|
@@ -592,9 +1203,44 @@ def niftimerge(inputlist, outputname, writetodisk=True, axis=3, returndata=False
|
|
|
592
1203
|
savetonifti(output_data, theheader, outputname)
|
|
593
1204
|
if returndata:
|
|
594
1205
|
return output_data, infile_hdr
|
|
1206
|
+
else:
|
|
1207
|
+
return None
|
|
595
1208
|
|
|
596
1209
|
|
|
597
|
-
def niftiroi(inputfile, outputfile, startpt, numpoints):
|
|
1210
|
+
def niftiroi(inputfile: str, outputfile: str, startpt: int, numpoints: int) -> None:
|
|
1211
|
+
"""
|
|
1212
|
+
Extract a region of interest (ROI) from a NIFTI file along the time axis.
|
|
1213
|
+
|
|
1214
|
+
This function extracts a specified number of timepoints from a NIFTI file starting
|
|
1215
|
+
at a given timepoint index. The extracted data is saved to a new NIFTI file.
|
|
1216
|
+
|
|
1217
|
+
Parameters
|
|
1218
|
+
----------
|
|
1219
|
+
inputfile : str
|
|
1220
|
+
Path to the input NIFTI file
|
|
1221
|
+
outputfile : str
|
|
1222
|
+
Path for the output ROI file
|
|
1223
|
+
startpt : int
|
|
1224
|
+
Starting timepoint index (0-based)
|
|
1225
|
+
numpoints : int
|
|
1226
|
+
Number of timepoints to extract
|
|
1227
|
+
|
|
1228
|
+
Returns
|
|
1229
|
+
-------
|
|
1230
|
+
None
|
|
1231
|
+
This function does not return any value but saves the extracted ROI to the specified output file.
|
|
1232
|
+
|
|
1233
|
+
Notes
|
|
1234
|
+
-----
|
|
1235
|
+
The function handles both 4D and 5D NIFTI files. For 5D files, the function preserves
|
|
1236
|
+
the fifth dimension in the output. The time dimension is reduced according to the
|
|
1237
|
+
specified number of points.
|
|
1238
|
+
|
|
1239
|
+
Examples
|
|
1240
|
+
--------
|
|
1241
|
+
>>> niftiroi('input.nii', 'output.nii', 10, 50)
|
|
1242
|
+
Extracts timepoints 10-59 from input.nii and saves to output.nii
|
|
1243
|
+
"""
|
|
598
1244
|
print(inputfile, outputfile, startpt, numpoints)
|
|
599
1245
|
infile, infile_data, infile_hdr, infiledims, infilesizes = readfromnifti(inputfile)
|
|
600
1246
|
theheader = copy.deepcopy(infile_hdr)
|
|
@@ -606,19 +1252,41 @@ def niftiroi(inputfile, outputfile, startpt, numpoints):
|
|
|
606
1252
|
savetonifti(output_data, theheader, outputfile)
|
|
607
1253
|
|
|
608
1254
|
|
|
609
|
-
def checkifcifti(filename, debug=False):
|
|
610
|
-
|
|
1255
|
+
def checkifcifti(filename: str, debug: bool = False) -> bool:
|
|
1256
|
+
"""
|
|
1257
|
+
Check to see if the specified file is CIFTI format
|
|
1258
|
+
|
|
1259
|
+
This function determines whether a given neuroimaging file is in CIFTI (Connectivity Information Format)
|
|
1260
|
+
by examining the file's header information. CIFTI files have specific intent codes that distinguish them
|
|
1261
|
+
from other neuroimaging formats like NIFTI.
|
|
611
1262
|
|
|
612
1263
|
Parameters
|
|
613
1264
|
----------
|
|
614
1265
|
filename : str
|
|
615
|
-
The file
|
|
1266
|
+
The path to the file to be checked for CIFTI format
|
|
1267
|
+
debug : bool, optional
|
|
1268
|
+
Enable debug output to see intermediate processing information. Default is False
|
|
616
1269
|
|
|
617
1270
|
Returns
|
|
618
1271
|
-------
|
|
619
|
-
|
|
620
|
-
True if the file header indicates this is a CIFTI file
|
|
621
|
-
|
|
1272
|
+
bool
|
|
1273
|
+
True if the file header indicates this is a CIFTI file (intent code between 3000 and 3099),
|
|
1274
|
+
False otherwise
|
|
1275
|
+
|
|
1276
|
+
Notes
|
|
1277
|
+
-----
|
|
1278
|
+
CIFTI files are identified by their intent code, which should be in the range [3000, 3100) for valid
|
|
1279
|
+
CIFTI format files. This function uses nibabel to load the file and examine its NIfTI header properties.
|
|
1280
|
+
|
|
1281
|
+
Examples
|
|
1282
|
+
--------
|
|
1283
|
+
>>> is_cifti = checkifcifti('my_data.nii.gz')
|
|
1284
|
+
>>> print(is_cifti)
|
|
1285
|
+
True
|
|
1286
|
+
|
|
1287
|
+
>>> is_cifti = checkifcifti('my_data.nii.gz', debug=True)
|
|
1288
|
+
>>> print(is_cifti)
|
|
1289
|
+
True
|
|
622
1290
|
"""
|
|
623
1291
|
theimg = nib.load(filename)
|
|
624
1292
|
thedict = vars(theimg)
|
|
@@ -635,19 +1303,36 @@ def checkifcifti(filename, debug=False):
|
|
|
635
1303
|
return False
|
|
636
1304
|
|
|
637
1305
|
|
|
638
|
-
def checkiftext(filename):
|
|
639
|
-
|
|
1306
|
+
def checkiftext(filename: str) -> bool:
|
|
1307
|
+
"""
|
|
1308
|
+
Check to see if the specified filename ends in '.txt'
|
|
1309
|
+
|
|
1310
|
+
This function determines whether a given filename has a '.txt' extension
|
|
1311
|
+
by checking if the string ends with the specified suffix.
|
|
640
1312
|
|
|
641
1313
|
Parameters
|
|
642
1314
|
----------
|
|
643
1315
|
filename : str
|
|
644
|
-
The file name
|
|
1316
|
+
The file name to check for '.txt' extension
|
|
645
1317
|
|
|
646
1318
|
Returns
|
|
647
1319
|
-------
|
|
648
|
-
|
|
649
|
-
True if filename ends with '.txt'
|
|
650
|
-
|
|
1320
|
+
bool
|
|
1321
|
+
True if filename ends with '.txt', False otherwise
|
|
1322
|
+
|
|
1323
|
+
Notes
|
|
1324
|
+
-----
|
|
1325
|
+
This function performs a case-sensitive check. For case-insensitive
|
|
1326
|
+
checking, convert the filename to lowercase before calling this function.
|
|
1327
|
+
|
|
1328
|
+
Examples
|
|
1329
|
+
--------
|
|
1330
|
+
>>> checkiftext("document.txt")
|
|
1331
|
+
True
|
|
1332
|
+
>>> checkiftext("image.jpg")
|
|
1333
|
+
False
|
|
1334
|
+
>>> checkiftext("notes.TXT")
|
|
1335
|
+
False
|
|
651
1336
|
"""
|
|
652
1337
|
if filename.endswith(".txt"):
|
|
653
1338
|
return True
|
|
@@ -655,19 +1340,41 @@ def checkiftext(filename):
|
|
|
655
1340
|
return False
|
|
656
1341
|
|
|
657
1342
|
|
|
658
|
-
def getniftiroot(filename):
|
|
659
|
-
|
|
1343
|
+
def getniftiroot(filename: str) -> str:
|
|
1344
|
+
"""
|
|
1345
|
+
Strip a nifti filename down to the root with no extensions.
|
|
1346
|
+
|
|
1347
|
+
This function removes NIfTI file extensions (.nii or .nii.gz) from a filename,
|
|
1348
|
+
returning only the root name without any extensions.
|
|
660
1349
|
|
|
661
1350
|
Parameters
|
|
662
1351
|
----------
|
|
663
1352
|
filename : str
|
|
664
|
-
The
|
|
1353
|
+
The NIfTI filename to strip of extensions
|
|
665
1354
|
|
|
666
1355
|
Returns
|
|
667
1356
|
-------
|
|
668
|
-
|
|
669
|
-
The
|
|
1357
|
+
str
|
|
1358
|
+
The filename without NIfTI extensions (.nii or .nii.gz)
|
|
670
1359
|
|
|
1360
|
+
Notes
|
|
1361
|
+
-----
|
|
1362
|
+
This function only removes the standard NIfTI extensions (.nii and .nii.gz).
|
|
1363
|
+
For filenames without these extensions, the original filename is returned unchanged.
|
|
1364
|
+
|
|
1365
|
+
Examples
|
|
1366
|
+
--------
|
|
1367
|
+
>>> getniftiroot("sub-01_task-rest_bold.nii")
|
|
1368
|
+
'sub-01_task-rest_bold'
|
|
1369
|
+
|
|
1370
|
+
>>> getniftiroot("anatomical.nii.gz")
|
|
1371
|
+
'anatomical'
|
|
1372
|
+
|
|
1373
|
+
>>> getniftiroot("image.nii.gz")
|
|
1374
|
+
'image'
|
|
1375
|
+
|
|
1376
|
+
>>> getniftiroot("data.txt")
|
|
1377
|
+
'data.txt'
|
|
671
1378
|
"""
|
|
672
1379
|
if filename.endswith(".nii"):
|
|
673
1380
|
return filename[:-4]
|
|
@@ -677,21 +1384,39 @@ def getniftiroot(filename):
|
|
|
677
1384
|
return filename
|
|
678
1385
|
|
|
679
1386
|
|
|
680
|
-
def fmriheaderinfo(niftifilename):
|
|
681
|
-
|
|
1387
|
+
def fmriheaderinfo(niftifilename: str) -> Tuple[NDArray, NDArray]:
|
|
1388
|
+
"""
|
|
1389
|
+
Retrieve the header information from a nifti file.
|
|
1390
|
+
|
|
1391
|
+
This function extracts repetition time and timepoints information from a NIfTI file header.
|
|
1392
|
+
The repetition time is returned in seconds, and the number of timepoints is extracted
|
|
1393
|
+
from the header dimensions.
|
|
682
1394
|
|
|
683
1395
|
Parameters
|
|
684
1396
|
----------
|
|
685
1397
|
niftifilename : str
|
|
686
|
-
The name of the
|
|
1398
|
+
The name of the NIfTI file to read header information from.
|
|
687
1399
|
|
|
688
1400
|
Returns
|
|
689
1401
|
-------
|
|
690
|
-
|
|
691
|
-
|
|
692
|
-
|
|
693
|
-
|
|
694
|
-
|
|
1402
|
+
tuple of (NDArray, NDArray)
|
|
1403
|
+
A tuple containing:
|
|
1404
|
+
- tr : float
|
|
1405
|
+
The repetition time, in seconds
|
|
1406
|
+
- timepoints : int
|
|
1407
|
+
The number of points along the time axis
|
|
1408
|
+
|
|
1409
|
+
Notes
|
|
1410
|
+
-----
|
|
1411
|
+
The function uses nibabel to load the NIfTI file and extracts header information
|
|
1412
|
+
from the 'dim' and 'pixdim' fields. If the time unit is specified as milliseconds,
|
|
1413
|
+
the repetition time is converted to seconds.
|
|
1414
|
+
|
|
1415
|
+
Examples
|
|
1416
|
+
--------
|
|
1417
|
+
>>> tr, timepoints = fmriheaderinfo('subject_01.nii.gz')
|
|
1418
|
+
>>> print(f"Repetition time: {tr} seconds")
|
|
1419
|
+
>>> print(f"Number of timepoints: {timepoints}")
|
|
695
1420
|
"""
|
|
696
1421
|
nim = nib.load(niftifilename)
|
|
697
1422
|
hdr = nim.header.copy()
|
|
@@ -702,8 +1427,9 @@ def fmriheaderinfo(niftifilename):
|
|
|
702
1427
|
return thesizes, thedims
|
|
703
1428
|
|
|
704
1429
|
|
|
705
|
-
def fmritimeinfo(niftifilename):
|
|
706
|
-
|
|
1430
|
+
def fmritimeinfo(niftifilename: str) -> Tuple[float, int]:
|
|
1431
|
+
"""
|
|
1432
|
+
Retrieve the repetition time and number of timepoints from a nifti file
|
|
707
1433
|
|
|
708
1434
|
Parameters
|
|
709
1435
|
----------
|
|
@@ -717,6 +1443,18 @@ def fmritimeinfo(niftifilename):
|
|
|
717
1443
|
timepoints : int
|
|
718
1444
|
The number of points along the time axis
|
|
719
1445
|
|
|
1446
|
+
Notes
|
|
1447
|
+
-----
|
|
1448
|
+
This function extracts the repetition time (TR) and number of timepoints from
|
|
1449
|
+
the NIfTI file header. The repetition time is extracted from the pixdim[4] field
|
|
1450
|
+
and converted to seconds if necessary. The number of timepoints is extracted
|
|
1451
|
+
from the dim[4] field.
|
|
1452
|
+
|
|
1453
|
+
Examples
|
|
1454
|
+
--------
|
|
1455
|
+
>>> tr, timepoints = fmritimeinfo('sub-01_task-rest_bold.nii.gz')
|
|
1456
|
+
>>> print(f"Repetition time: {tr}s, Timepoints: {timepoints}")
|
|
1457
|
+
Repetition time: 2.0s, Timepoints: 240
|
|
720
1458
|
"""
|
|
721
1459
|
nim = nib.load(niftifilename)
|
|
722
1460
|
hdr = nim.header.copy()
|
|
@@ -730,8 +1468,9 @@ def fmritimeinfo(niftifilename):
|
|
|
730
1468
|
return tr, timepoints
|
|
731
1469
|
|
|
732
1470
|
|
|
733
|
-
def checkspacematch(hdr1, hdr2, tolerance=1.0e-3):
|
|
734
|
-
|
|
1471
|
+
def checkspacematch(hdr1: Any, hdr2: Any, tolerance: float = 1.0e-3) -> bool:
|
|
1472
|
+
"""
|
|
1473
|
+
Check the headers of two nifti files to determine if they cover the same volume at the same resolution (within tolerance)
|
|
735
1474
|
|
|
736
1475
|
Parameters
|
|
737
1476
|
----------
|
|
@@ -739,35 +1478,74 @@ def checkspacematch(hdr1, hdr2, tolerance=1.0e-3):
|
|
|
739
1478
|
The header of the first file
|
|
740
1479
|
hdr2 : nifti header structure
|
|
741
1480
|
The header of the second file
|
|
1481
|
+
tolerance : float, optional
|
|
1482
|
+
Tolerance for comparison. Default is 1.0e-3
|
|
742
1483
|
|
|
743
1484
|
Returns
|
|
744
1485
|
-------
|
|
745
|
-
|
|
1486
|
+
bool
|
|
746
1487
|
True if the spatial dimensions and resolutions of the two files match.
|
|
747
1488
|
|
|
1489
|
+
Notes
|
|
1490
|
+
-----
|
|
1491
|
+
This function performs two checks:
|
|
1492
|
+
1. Dimension matching using `checkspaceresmatch` on pixel dimensions (`pixdim`)
|
|
1493
|
+
2. Spatial dimension matching using `checkspacedimmatch` on array dimensions (`dim`)
|
|
1494
|
+
|
|
1495
|
+
Examples
|
|
1496
|
+
--------
|
|
1497
|
+
>>> import nibabel as nib
|
|
1498
|
+
>>> img1 = nib.load('file1.nii.gz')
|
|
1499
|
+
>>> img2 = nib.load('file2.nii.gz')
|
|
1500
|
+
>>> checkspacematch(img1.header, img2.header)
|
|
1501
|
+
True
|
|
748
1502
|
"""
|
|
749
1503
|
dimmatch = checkspaceresmatch(hdr1["pixdim"], hdr2["pixdim"], tolerance=tolerance)
|
|
750
1504
|
resmatch = checkspacedimmatch(hdr1["dim"], hdr2["dim"])
|
|
751
1505
|
return dimmatch and resmatch
|
|
752
1506
|
|
|
753
1507
|
|
|
754
|
-
def checkspaceresmatch(sizes1, sizes2, tolerance=1.0e-3):
|
|
755
|
-
|
|
1508
|
+
def checkspaceresmatch(sizes1: NDArray, sizes2: NDArray, tolerance: float = 1.0e-3) -> bool:
|
|
1509
|
+
"""
|
|
1510
|
+
Check the spatial pixdims of two nifti files to determine if they have the same resolution (within tolerance)
|
|
756
1511
|
|
|
757
1512
|
Parameters
|
|
758
1513
|
----------
|
|
759
|
-
sizes1 :
|
|
760
|
-
The size array from the first nifti file
|
|
761
|
-
sizes2 :
|
|
762
|
-
The size array from the second nifti file
|
|
763
|
-
tolerance: float
|
|
764
|
-
The fractional difference that is permissible between the two sizes that will still match
|
|
1514
|
+
sizes1 : array_like
|
|
1515
|
+
The size array from the first nifti file, typically containing spatial dimensions and pixel sizes
|
|
1516
|
+
sizes2 : array_like
|
|
1517
|
+
The size array from the second nifti file, typically containing spatial dimensions and pixel sizes
|
|
1518
|
+
tolerance : float, optional
|
|
1519
|
+
The fractional difference that is permissible between the two sizes that will still match,
|
|
1520
|
+
default is 1.0e-3 (0.1%)
|
|
765
1521
|
|
|
766
1522
|
Returns
|
|
767
1523
|
-------
|
|
768
|
-
|
|
769
|
-
True if the spatial resolutions of the two files match
|
|
770
|
-
|
|
1524
|
+
bool
|
|
1525
|
+
True if the spatial resolutions of the two files match within the specified tolerance,
|
|
1526
|
+
False otherwise
|
|
1527
|
+
|
|
1528
|
+
Notes
|
|
1529
|
+
-----
|
|
1530
|
+
This function compares the spatial dimensions (indices 1-3) of two nifti file size arrays.
|
|
1531
|
+
The comparison is performed using fractional difference: |sizes1[i] - sizes2[i]| / sizes1[i].
|
|
1532
|
+
Only dimensions 1-3 are compared (typically x, y, z spatial dimensions).
|
|
1533
|
+
The function returns False immediately upon finding any dimension that exceeds the tolerance.
|
|
1534
|
+
|
|
1535
|
+
Examples
|
|
1536
|
+
--------
|
|
1537
|
+
>>> import numpy as np
|
|
1538
|
+
>>> sizes1 = np.array([1.0, 2.0, 2.0, 2.0])
|
|
1539
|
+
>>> sizes2 = np.array([1.0, 2.0005, 2.0005, 2.0005])
|
|
1540
|
+
>>> checkspaceresmatch(sizes1, sizes2, tolerance=1e-3)
|
|
1541
|
+
True
|
|
1542
|
+
|
|
1543
|
+
>>> sizes1 = np.array([1.0, 2.0, 2.0, 2.0])
|
|
1544
|
+
>>> sizes2 = np.array([1.0, 2.5, 2.5, 2.5])
|
|
1545
|
+
>>> checkspaceresmatch(sizes1, sizes2, tolerance=1e-3)
|
|
1546
|
+
File spatial resolutions do not match within tolerance of 0.001
|
|
1547
|
+
size of dimension 1: 2.0 != 2.5 (0.25 difference)
|
|
1548
|
+
False
|
|
771
1549
|
"""
|
|
772
1550
|
for i in range(1, 4):
|
|
773
1551
|
fracdiff = np.fabs(sizes1[i] - sizes2[i]) / sizes1[i]
|
|
@@ -775,24 +1553,50 @@ def checkspaceresmatch(sizes1, sizes2, tolerance=1.0e-3):
|
|
|
775
1553
|
print(f"File spatial resolutions do not match within tolerance of {tolerance}")
|
|
776
1554
|
print(f"\tsize of dimension {i}: {sizes1[i]} != {sizes2[i]} ({fracdiff} difference)")
|
|
777
1555
|
return False
|
|
778
|
-
|
|
779
|
-
return True
|
|
1556
|
+
return True
|
|
780
1557
|
|
|
781
1558
|
|
|
782
|
-
def checkspacedimmatch(dims1, dims2, verbose=False):
|
|
783
|
-
|
|
1559
|
+
def checkspacedimmatch(dims1: NDArray, dims2: NDArray, verbose: bool = False) -> bool:
|
|
1560
|
+
"""
|
|
1561
|
+
Check the dimension arrays of two nifti files to determine if they cover the same number of voxels in each dimension.
|
|
784
1562
|
|
|
785
1563
|
Parameters
|
|
786
1564
|
----------
|
|
787
|
-
dims1 :
|
|
788
|
-
The dimension array from the first nifti file
|
|
789
|
-
|
|
790
|
-
|
|
1565
|
+
dims1 : NDArray
|
|
1566
|
+
The dimension array from the first nifti file. Should contain spatial dimensions
|
|
1567
|
+
(typically the first dimension is the number of time points, and dimensions 1-3
|
|
1568
|
+
represent x, y, z spatial dimensions).
|
|
1569
|
+
dims2 : NDArray
|
|
1570
|
+
The dimension array from the second nifti file. Should contain spatial dimensions
|
|
1571
|
+
(typically the first dimension is the number of time points, and dimensions 1-3
|
|
1572
|
+
represent x, y, z spatial dimensions).
|
|
1573
|
+
verbose : bool, optional
|
|
1574
|
+
Enable verbose output. Default is False. When True, prints detailed information
|
|
1575
|
+
about dimension mismatches.
|
|
791
1576
|
|
|
792
1577
|
Returns
|
|
793
1578
|
-------
|
|
794
|
-
|
|
795
|
-
True if the spatial dimensions of the two files match.
|
|
1579
|
+
bool
|
|
1580
|
+
True if the spatial dimensions (dimensions 1-3) of the two files match.
|
|
1581
|
+
False if any of the spatial dimensions differ between the files.
|
|
1582
|
+
|
|
1583
|
+
Notes
|
|
1584
|
+
-----
|
|
1585
|
+
This function compares dimensions 1 through 3 (inclusive) of the two dimension arrays,
|
|
1586
|
+
which typically represent the spatial dimensions (x, y, z) of the nifti files.
|
|
1587
|
+
The first dimension is usually the number of time points and is not compared.
|
|
1588
|
+
|
|
1589
|
+
Examples
|
|
1590
|
+
--------
|
|
1591
|
+
>>> import numpy as np
|
|
1592
|
+
>>> dims1 = np.array([10, 64, 64, 32])
|
|
1593
|
+
>>> dims2 = np.array([10, 64, 64, 32])
|
|
1594
|
+
>>> checkspacedimmatch(dims1, dims2)
|
|
1595
|
+
True
|
|
1596
|
+
|
|
1597
|
+
>>> dims3 = np.array([10, 64, 64, 33])
|
|
1598
|
+
>>> checkspacedimmatch(dims1, dims3)
|
|
1599
|
+
False
|
|
796
1600
|
"""
|
|
797
1601
|
for i in range(1, 4):
|
|
798
1602
|
if dims1[i] != dims2[i]:
|
|
@@ -800,29 +1604,57 @@ def checkspacedimmatch(dims1, dims2, verbose=False):
|
|
|
800
1604
|
print("File spatial voxels do not match")
|
|
801
1605
|
print("dimension ", i, ":", dims1[i], "!=", dims2[i])
|
|
802
1606
|
return False
|
|
803
|
-
|
|
804
|
-
return True
|
|
1607
|
+
return True
|
|
805
1608
|
|
|
806
1609
|
|
|
807
|
-
def checktimematch(
|
|
808
|
-
|
|
1610
|
+
def checktimematch(
|
|
1611
|
+
dims1: NDArray,
|
|
1612
|
+
dims2: NDArray,
|
|
1613
|
+
numskip1: int = 0,
|
|
1614
|
+
numskip2: int = 0,
|
|
1615
|
+
verbose: bool = False,
|
|
1616
|
+
) -> bool:
|
|
1617
|
+
"""
|
|
1618
|
+
Check the dimensions of two nifti files to determine if they cover the same number of timepoints.
|
|
1619
|
+
|
|
1620
|
+
This function compares the time dimensions of two NIfTI files after accounting for skipped timepoints
|
|
1621
|
+
at the beginning of each file. It is commonly used to verify temporal consistency between paired
|
|
1622
|
+
NIfTI datasets.
|
|
809
1623
|
|
|
810
1624
|
Parameters
|
|
811
1625
|
----------
|
|
812
|
-
dims1 :
|
|
813
|
-
The dimension array from the first
|
|
814
|
-
dims2 :
|
|
815
|
-
The dimension array from the second
|
|
1626
|
+
dims1 : NDArray
|
|
1627
|
+
The dimension array from the first NIfTI file. The time dimension is expected to be at index 4.
|
|
1628
|
+
dims2 : NDArray
|
|
1629
|
+
The dimension array from the second NIfTI file. The time dimension is expected to be at index 4.
|
|
816
1630
|
numskip1 : int, optional
|
|
817
|
-
Number of timepoints skipped at the beginning of file 1
|
|
1631
|
+
Number of timepoints skipped at the beginning of file 1. Default is 0.
|
|
818
1632
|
numskip2 : int, optional
|
|
819
|
-
Number of timepoints skipped at the beginning of file 2
|
|
1633
|
+
Number of timepoints skipped at the beginning of file 2. Default is 0.
|
|
1634
|
+
verbose : bool, optional
|
|
1635
|
+
Enable verbose output. If True, prints detailed information about the comparison.
|
|
1636
|
+
Default is False.
|
|
820
1637
|
|
|
821
1638
|
Returns
|
|
822
1639
|
-------
|
|
823
|
-
|
|
824
|
-
True if the time
|
|
825
|
-
|
|
1640
|
+
bool
|
|
1641
|
+
True if the effective time dimensions of the two files match after accounting for skipped
|
|
1642
|
+
timepoints, False otherwise.
|
|
1643
|
+
|
|
1644
|
+
Notes
|
|
1645
|
+
-----
|
|
1646
|
+
The function assumes that the time dimension is stored at index 4 of the dimension arrays.
|
|
1647
|
+
This is typical for NIfTI files where dimensions are ordered as [x, y, z, t, ...].
|
|
1648
|
+
|
|
1649
|
+
Examples
|
|
1650
|
+
--------
|
|
1651
|
+
>>> import numpy as np
|
|
1652
|
+
>>> dims1 = np.array([64, 64, 32, 1, 100, 1])
|
|
1653
|
+
>>> dims2 = np.array([64, 64, 32, 1, 95, 1])
|
|
1654
|
+
>>> checktimematch(dims1, dims2, numskip1=0, numskip2=5)
|
|
1655
|
+
True
|
|
1656
|
+
>>> checktimematch(dims1, dims2, numskip1=0, numskip2=3)
|
|
1657
|
+
False
|
|
826
1658
|
"""
|
|
827
1659
|
if (dims1[4] - numskip1) != (dims2[4] - numskip2):
|
|
828
1660
|
if verbose:
|
|
@@ -845,20 +1677,183 @@ def checktimematch(dims1, dims2, numskip1=0, numskip2=0, verbose=False):
|
|
|
845
1677
|
return True
|
|
846
1678
|
|
|
847
1679
|
|
|
1680
|
+
def checkdatamatch(
|
|
1681
|
+
data1: NDArray,
|
|
1682
|
+
data2: NDArray,
|
|
1683
|
+
absthresh: float = 1e-12,
|
|
1684
|
+
msethresh: float = 1e-12,
|
|
1685
|
+
debug: bool = False,
|
|
1686
|
+
) -> Tuple[bool, bool]:
|
|
1687
|
+
"""
|
|
1688
|
+
Check if two data arrays match within specified tolerances.
|
|
1689
|
+
|
|
1690
|
+
This function compares two numpy arrays using both mean squared error (MSE) and
|
|
1691
|
+
maximum absolute difference metrics to determine if they match within given thresholds.
|
|
1692
|
+
|
|
1693
|
+
Parameters
|
|
1694
|
+
----------
|
|
1695
|
+
data1 : NDArray
|
|
1696
|
+
First data array to compare
|
|
1697
|
+
data2 : NDArray
|
|
1698
|
+
Second data array to compare
|
|
1699
|
+
absthresh : float, optional
|
|
1700
|
+
Absolute difference threshold. Default is 1e-12
|
|
1701
|
+
msethresh : float, optional
|
|
1702
|
+
Mean squared error threshold. Default is 1e-12
|
|
1703
|
+
debug : bool, optional
|
|
1704
|
+
Enable debug output. Default is False
|
|
1705
|
+
|
|
1706
|
+
Returns
|
|
1707
|
+
-------
|
|
1708
|
+
tuple of (bool, bool)
|
|
1709
|
+
msematch : bool
|
|
1710
|
+
True if mean squared error is below msethresh threshold
|
|
1711
|
+
absmatch : bool
|
|
1712
|
+
True if maximum absolute difference is below absthresh threshold
|
|
1713
|
+
|
|
1714
|
+
Notes
|
|
1715
|
+
-----
|
|
1716
|
+
The function uses numpy's `mse` function for mean squared error calculation
|
|
1717
|
+
and `np.max(np.fabs(data1 - data2))` for maximum absolute difference.
|
|
1718
|
+
|
|
1719
|
+
Examples
|
|
1720
|
+
--------
|
|
1721
|
+
>>> import numpy as np
|
|
1722
|
+
>>> data1 = np.array([1.0, 2.0, 3.0])
|
|
1723
|
+
>>> data2 = np.array([1.000000000001, 2.000000000001, 3.000000000001])
|
|
1724
|
+
>>> checkdatamatch(data1, data2)
|
|
1725
|
+
(True, True)
|
|
1726
|
+
|
|
1727
|
+
>>> checkdatamatch(data1, data2, absthresh=1e-15)
|
|
1728
|
+
(True, False)
|
|
1729
|
+
"""
|
|
1730
|
+
msediff = mse(data1, data2)
|
|
1731
|
+
absdiff = np.max(np.fabs(data1 - data2))
|
|
1732
|
+
if debug:
|
|
1733
|
+
print(f"msediff {msediff}, absdiff {absdiff}")
|
|
1734
|
+
return msediff < msethresh, absdiff < absthresh
|
|
1735
|
+
|
|
1736
|
+
|
|
1737
|
+
def checkniftifilematch(
|
|
1738
|
+
filename1: str,
|
|
1739
|
+
filename2: str,
|
|
1740
|
+
absthresh: float = 1e-12,
|
|
1741
|
+
msethresh: float = 1e-12,
|
|
1742
|
+
spacetolerance: float = 1e-3,
|
|
1743
|
+
debug: bool = False,
|
|
1744
|
+
) -> bool:
|
|
1745
|
+
"""
|
|
1746
|
+
Check if two NIFTI files match in dimensions, resolution, and data values.
|
|
1747
|
+
|
|
1748
|
+
This function compares two NIFTI files for spatial compatibility and data
|
|
1749
|
+
equivalence. It verifies that the files have matching spatial dimensions,
|
|
1750
|
+
resolution, time dimensions, and that their voxel data values are within
|
|
1751
|
+
specified tolerances.
|
|
1752
|
+
|
|
1753
|
+
Parameters
|
|
1754
|
+
----------
|
|
1755
|
+
filename1 : str
|
|
1756
|
+
Path to the first NIFTI file to be compared.
|
|
1757
|
+
filename2 : str
|
|
1758
|
+
Path to the second NIFTI file to be compared.
|
|
1759
|
+
absthresh : float, optional
|
|
1760
|
+
Absolute difference threshold for voxel-wise data comparison.
|
|
1761
|
+
If any voxel differs by more than this value, the files are considered
|
|
1762
|
+
not to match. Default is 1e-12.
|
|
1763
|
+
msethresh : float, optional
|
|
1764
|
+
Mean squared error threshold for data comparison. If the MSE between
|
|
1765
|
+
the data arrays exceeds this value, the files are considered not to match.
|
|
1766
|
+
Default is 1e-12.
|
|
1767
|
+
spacetolerance : float, optional
|
|
1768
|
+
Tolerance for comparing spatial dimensions and resolution between files.
|
|
1769
|
+
Default is 1e-3.
|
|
1770
|
+
debug : bool, optional
|
|
1771
|
+
If True, enables debug output to assist in troubleshooting.
|
|
1772
|
+
Default is False.
|
|
1773
|
+
|
|
1774
|
+
Returns
|
|
1775
|
+
-------
|
|
1776
|
+
bool
|
|
1777
|
+
True if all checks (spatial, temporal, and data) pass within the specified
|
|
1778
|
+
tolerances; False otherwise.
|
|
1779
|
+
|
|
1780
|
+
Notes
|
|
1781
|
+
-----
|
|
1782
|
+
The function internally calls several helper functions:
|
|
1783
|
+
- `readfromnifti`: Reads NIFTI file metadata and data.
|
|
1784
|
+
- `checkspacematch`: Compares spatial dimensions and resolution.
|
|
1785
|
+
- `checktimematch`: Compares time dimensions.
|
|
1786
|
+
- `checkdatamatch`: Compares data values using MSE and absolute difference.
|
|
1787
|
+
|
|
1788
|
+
Examples
|
|
1789
|
+
--------
|
|
1790
|
+
>>> match = checkniftifilematch('file1.nii', 'file2.nii')
|
|
1791
|
+
>>> print(match)
|
|
1792
|
+
True
|
|
1793
|
+
|
|
1794
|
+
>>> match = checkniftifilematch('file1.nii', 'file2.nii', absthresh=1e-10)
|
|
1795
|
+
>>> print(match)
|
|
1796
|
+
False
|
|
1797
|
+
"""
|
|
1798
|
+
im1, im1_data, im1_hdr, im1_dims, im1_sizes = readfromnifti(filename1)
|
|
1799
|
+
im2, im2_data, im2_hdr, im2_dims, im2_sizes = readfromnifti(filename2)
|
|
1800
|
+
spacematch = checkspacematch(im1_hdr, im2_hdr, tolerance=spacetolerance)
|
|
1801
|
+
if not spacematch:
|
|
1802
|
+
print(
|
|
1803
|
+
"file spatial dimensions or resolution do not match within tolerance {spacetolerance}"
|
|
1804
|
+
)
|
|
1805
|
+
return False
|
|
1806
|
+
timematch = checktimematch(im1_dims, im2_dims)
|
|
1807
|
+
if not timematch:
|
|
1808
|
+
print(f"file time dimensions do not match")
|
|
1809
|
+
return False
|
|
1810
|
+
msedatamatch, absdatamatch = checkdatamatch(
|
|
1811
|
+
im1_data,
|
|
1812
|
+
im2_data,
|
|
1813
|
+
absthresh=absthresh,
|
|
1814
|
+
msethresh=msethresh,
|
|
1815
|
+
debug=debug,
|
|
1816
|
+
)
|
|
1817
|
+
if not msedatamatch:
|
|
1818
|
+
print(f"file data mse does not match within tolerance {msethresh}")
|
|
1819
|
+
return False
|
|
1820
|
+
if not absdatamatch:
|
|
1821
|
+
print(f"files differ by at least {absthresh} in at least one voxel")
|
|
1822
|
+
return False
|
|
1823
|
+
return True
|
|
1824
|
+
|
|
1825
|
+
|
|
848
1826
|
# --------------------------- non-NIFTI file I/O functions ------------------------------------------
|
|
849
|
-
def checkifparfile(filename):
|
|
850
|
-
|
|
1827
|
+
def checkifparfile(filename: str) -> bool:
|
|
1828
|
+
"""
|
|
1829
|
+
Checks to see if a file is an FSL style motion parameter file
|
|
1830
|
+
|
|
1831
|
+
This function determines whether a given filename corresponds to an FSL-style
|
|
1832
|
+
motion parameter file by checking if it ends with the '.par' extension.
|
|
851
1833
|
|
|
852
1834
|
Parameters
|
|
853
1835
|
----------
|
|
854
1836
|
filename : str
|
|
855
|
-
The name of the file in question.
|
|
1837
|
+
The name of the file in question, including the file extension.
|
|
856
1838
|
|
|
857
1839
|
Returns
|
|
858
1840
|
-------
|
|
859
|
-
|
|
860
|
-
True if filename ends
|
|
861
|
-
|
|
1841
|
+
bool
|
|
1842
|
+
True if the filename ends with '.par', False otherwise.
|
|
1843
|
+
|
|
1844
|
+
Notes
|
|
1845
|
+
-----
|
|
1846
|
+
FSL (FMRIB Software Library) motion parameter files typically have the '.par'
|
|
1847
|
+
extension and contain motion correction parameters for neuroimaging data.
|
|
1848
|
+
|
|
1849
|
+
Examples
|
|
1850
|
+
--------
|
|
1851
|
+
>>> checkifparfile("subject1.par")
|
|
1852
|
+
True
|
|
1853
|
+
>>> checkifparfile("subject1.txt")
|
|
1854
|
+
False
|
|
1855
|
+
>>> checkifparfile("motion.par")
|
|
1856
|
+
True
|
|
862
1857
|
"""
|
|
863
1858
|
if filename.endswith(".par"):
|
|
864
1859
|
return True
|
|
@@ -866,7 +1861,42 @@ def checkifparfile(filename):
|
|
|
866
1861
|
return False
|
|
867
1862
|
|
|
868
1863
|
|
|
869
|
-
def readconfounds(filename, debug=False):
|
|
1864
|
+
def readconfounds(filename: str, debug: bool = False) -> Dict[str, NDArray]:
|
|
1865
|
+
"""
|
|
1866
|
+
Read confound regressors from a text file.
|
|
1867
|
+
|
|
1868
|
+
This function reads confound regressors from a text file and returns them as a dictionary
|
|
1869
|
+
mapping confound names to timecourse arrays. The function handles both structured column
|
|
1870
|
+
names and automatically generated names for cases where column information is missing.
|
|
1871
|
+
|
|
1872
|
+
Parameters
|
|
1873
|
+
----------
|
|
1874
|
+
filename : str
|
|
1875
|
+
Path to the confounds file
|
|
1876
|
+
debug : bool, optional
|
|
1877
|
+
Enable debug output. Default is False
|
|
1878
|
+
|
|
1879
|
+
Returns
|
|
1880
|
+
-------
|
|
1881
|
+
dict of str to NDArray
|
|
1882
|
+
Dictionary mapping confound names to timecourse arrays. Each key is a confound name
|
|
1883
|
+
and each value is a 1D numpy array containing the timecourse data for that confound.
|
|
1884
|
+
|
|
1885
|
+
Notes
|
|
1886
|
+
-----
|
|
1887
|
+
The function internally calls `readvectorsfromtextfile` to parse the input file, which
|
|
1888
|
+
returns metadata including sample rate, start time, column names, and the actual data.
|
|
1889
|
+
If column names are not present in the file, automatically generated names are created
|
|
1890
|
+
in the format 'confound_000', 'confound_001', etc.
|
|
1891
|
+
|
|
1892
|
+
Examples
|
|
1893
|
+
--------
|
|
1894
|
+
>>> confounds = readconfounds('confounds.txt')
|
|
1895
|
+
>>> print(confounds.keys())
|
|
1896
|
+
dict_keys(['motion_000', 'motion_001', 'motion_002', 'scrubbing'])
|
|
1897
|
+
>>> print(confounds['motion_000'].shape)
|
|
1898
|
+
(1000,)
|
|
1899
|
+
"""
|
|
870
1900
|
(
|
|
871
1901
|
thesamplerate,
|
|
872
1902
|
thestarttime,
|
|
@@ -885,19 +1915,46 @@ def readconfounds(filename, debug=False):
|
|
|
885
1915
|
return theconfounddict
|
|
886
1916
|
|
|
887
1917
|
|
|
888
|
-
def readparfile(filename):
|
|
889
|
-
|
|
1918
|
+
def readparfile(filename: str) -> Dict[str, NDArray]:
|
|
1919
|
+
"""
|
|
1920
|
+
Read motion parameters from an FSL-style .par file.
|
|
1921
|
+
|
|
1922
|
+
This function reads motion parameters from FSL-style .par files and returns
|
|
1923
|
+
them as a dictionary with timecourses keyed by parameter names.
|
|
890
1924
|
|
|
891
1925
|
Parameters
|
|
892
1926
|
----------
|
|
893
1927
|
filename : str
|
|
894
|
-
The name of the file
|
|
1928
|
+
The name of the FSL-style .par file to read. This file should contain
|
|
1929
|
+
motion parameters in the standard FSL format with 6 columns representing
|
|
1930
|
+
translation (X, Y, Z) and rotation (RotX, RotY, RotZ) parameters.
|
|
895
1931
|
|
|
896
1932
|
Returns
|
|
897
1933
|
-------
|
|
898
|
-
|
|
899
|
-
|
|
900
|
-
|
|
1934
|
+
dict of NDArray
|
|
1935
|
+
Dictionary containing the motion parameters as timecourses. Keys are:
|
|
1936
|
+
- 'X': translation along x-axis
|
|
1937
|
+
- 'Y': translation along y-axis
|
|
1938
|
+
- 'Z': translation along z-axis
|
|
1939
|
+
- 'RotX': rotation around x-axis
|
|
1940
|
+
- 'RotY': rotation around y-axis
|
|
1941
|
+
- 'RotZ': rotation around z-axis
|
|
1942
|
+
Each value is a 1D numpy array containing the timecourse for that parameter.
|
|
1943
|
+
|
|
1944
|
+
Notes
|
|
1945
|
+
-----
|
|
1946
|
+
The .par file format expected by this function is the standard FSL format
|
|
1947
|
+
where each row represents a timepoint and each column represents a motion
|
|
1948
|
+
parameter. The function assumes the file contains exactly 6 columns in the
|
|
1949
|
+
order: X, Y, Z, RotX, RotY, RotZ.
|
|
1950
|
+
|
|
1951
|
+
Examples
|
|
1952
|
+
--------
|
|
1953
|
+
>>> motion_data = readparfile('motion.par')
|
|
1954
|
+
>>> print(motion_data.keys())
|
|
1955
|
+
dict_keys(['X', 'Y', 'Z', 'RotX', 'RotY', 'RotZ'])
|
|
1956
|
+
>>> print(motion_data['X'].shape)
|
|
1957
|
+
(100,) # assuming 100 timepoints
|
|
901
1958
|
"""
|
|
902
1959
|
labels = ["X", "Y", "Z", "RotX", "RotY", "RotZ"]
|
|
903
1960
|
motiontimeseries = readvecs(filename)
|
|
@@ -907,8 +1964,9 @@ def readparfile(filename):
|
|
|
907
1964
|
return motiondict
|
|
908
1965
|
|
|
909
1966
|
|
|
910
|
-
def readmotion(filename, tr=1.0, colspec=None):
|
|
911
|
-
|
|
1967
|
+
def readmotion(filename: str, tr: float = 1.0, colspec: Optional[str] = None) -> Dict[str, Any]:
|
|
1968
|
+
"""
|
|
1969
|
+
Read motion regressors from a file (.par, .tsv, or other text format).
|
|
912
1970
|
|
|
913
1971
|
Parameters
|
|
914
1972
|
----------
|
|
@@ -1048,25 +2106,41 @@ def readmotion(filename, tr=1.0, colspec=None):
|
|
|
1048
2106
|
return motiondict
|
|
1049
2107
|
|
|
1050
2108
|
|
|
1051
|
-
def sliceinfo(slicetimes, tr):
|
|
1052
|
-
|
|
2109
|
+
def sliceinfo(slicetimes: NDArray, tr: float) -> Tuple[int, float, NDArray]:
|
|
2110
|
+
"""
|
|
2111
|
+
Find out what slicetimes we have, their spacing, and which timepoint each slice occurs at. This assumes
|
|
1053
2112
|
uniform slice time spacing, but supports any slice acquisition order and multiband acquisitions.
|
|
1054
2113
|
|
|
1055
2114
|
Parameters
|
|
1056
2115
|
----------
|
|
1057
2116
|
slicetimes : 1d float array
|
|
1058
2117
|
List of all the slicetimes relative to the start of the TR
|
|
1059
|
-
tr: float
|
|
2118
|
+
tr : float
|
|
1060
2119
|
The TR of the acquisition
|
|
1061
2120
|
|
|
1062
2121
|
Returns
|
|
1063
2122
|
-------
|
|
1064
2123
|
numsteps : int
|
|
1065
2124
|
The number of unique slicetimes in the list
|
|
1066
|
-
stepsize: float
|
|
2125
|
+
stepsize : float
|
|
1067
2126
|
The stepsize in seconds between subsequent slice acquisitions
|
|
1068
|
-
sliceoffsets: 1d int array
|
|
2127
|
+
sliceoffsets : 1d int array
|
|
1069
2128
|
Which acquisition time each slice was acquired at
|
|
2129
|
+
|
|
2130
|
+
Notes
|
|
2131
|
+
-----
|
|
2132
|
+
This function assumes uniform slice time spacing and works with any slice acquisition order
|
|
2133
|
+
and multiband acquisitions. The function determines the minimum time step between slices
|
|
2134
|
+
and maps each slice to its corresponding timepoint within the TR.
|
|
2135
|
+
|
|
2136
|
+
Examples
|
|
2137
|
+
--------
|
|
2138
|
+
>>> import numpy as np
|
|
2139
|
+
>>> slicetimes = np.array([0.0, 0.1, 0.2, 0.3])
|
|
2140
|
+
>>> tr = 1.0
|
|
2141
|
+
>>> numsteps, stepsize, sliceoffsets = sliceinfo(slicetimes, tr)
|
|
2142
|
+
>>> print(numsteps, stepsize, sliceoffsets)
|
|
2143
|
+
(4, 0.1, [0 1 2 3])
|
|
1070
2144
|
"""
|
|
1071
2145
|
sortedtimes = np.sort(slicetimes)
|
|
1072
2146
|
diffs = sortedtimes[1:] - sortedtimes[0:-1]
|
|
@@ -1076,7 +2150,49 @@ def sliceinfo(slicetimes, tr):
|
|
|
1076
2150
|
return numsteps, minstep, sliceoffsets
|
|
1077
2151
|
|
|
1078
2152
|
|
|
1079
|
-
def getslicetimesfromfile(slicetimename):
|
|
2153
|
+
def getslicetimesfromfile(slicetimename: str) -> Tuple[NDArray, bool, bool]:
|
|
2154
|
+
"""
|
|
2155
|
+
Read slice timing information from a file.
|
|
2156
|
+
|
|
2157
|
+
This function reads slice timing data from either a JSON file (BIDS sidecar format)
|
|
2158
|
+
or a text file containing slice timing values. It returns the slice times along
|
|
2159
|
+
with metadata indicating how the data was processed.
|
|
2160
|
+
|
|
2161
|
+
Parameters
|
|
2162
|
+
----------
|
|
2163
|
+
slicetimename : str
|
|
2164
|
+
Path to the slice timing file. Can be either a JSON file (BIDS sidecar format)
|
|
2165
|
+
or a text file containing slice timing values.
|
|
2166
|
+
|
|
2167
|
+
Returns
|
|
2168
|
+
-------
|
|
2169
|
+
tuple of (NDArray, bool, bool)
|
|
2170
|
+
A tuple containing:
|
|
2171
|
+
- slicetimes : NDArray
|
|
2172
|
+
Array of slice timing values as floats
|
|
2173
|
+
- normalizedtotr : bool
|
|
2174
|
+
True if the slice times were normalized to TR (time resolution),
|
|
2175
|
+
False if they were read directly from a JSON file
|
|
2176
|
+
- fileisbidsjson : bool
|
|
2177
|
+
True if the input file was a BIDS JSON sidecar file,
|
|
2178
|
+
False if it was a text file
|
|
2179
|
+
|
|
2180
|
+
Notes
|
|
2181
|
+
-----
|
|
2182
|
+
- For JSON files, the function expects a "SliceTiming" key in the JSON dictionary
|
|
2183
|
+
- For text files, the function uses readvec() to parse the slice timing values
|
|
2184
|
+
- If a JSON file doesn't contain the required "SliceTiming" key, the function
|
|
2185
|
+
prints an error message and exits the program
|
|
2186
|
+
- Slice timing values are converted to float64 dtype for precision
|
|
2187
|
+
|
|
2188
|
+
Examples
|
|
2189
|
+
--------
|
|
2190
|
+
>>> slicetimes, normalized, is_bids = getslicetimesfromfile("sub-01_task-rest_bold.json")
|
|
2191
|
+
>>> print(slicetimes)
|
|
2192
|
+
[0.0, 0.1, 0.2, 0.3, 0.4]
|
|
2193
|
+
>>> print(normalized, is_bids)
|
|
2194
|
+
(False, True)
|
|
2195
|
+
"""
|
|
1080
2196
|
filebase, extension = os.path.splitext(slicetimename)
|
|
1081
2197
|
if extension == ".json":
|
|
1082
2198
|
jsoninfodict = readdictfromjson(slicetimename)
|
|
@@ -1086,28 +2202,52 @@ def getslicetimesfromfile(slicetimename):
|
|
|
1086
2202
|
for idx, thetime in enumerate(slicetimelist):
|
|
1087
2203
|
slicetimes[idx] = float(thetime)
|
|
1088
2204
|
normalizedtotr = False
|
|
2205
|
+
fileisbidsjson = True
|
|
1089
2206
|
except KeyError:
|
|
1090
2207
|
print(slicetimename, "is not a valid BIDS sidecar file")
|
|
1091
2208
|
sys.exit()
|
|
1092
2209
|
else:
|
|
1093
2210
|
slicetimes = readvec(slicetimename)
|
|
1094
2211
|
normalizedtotr = True
|
|
1095
|
-
|
|
2212
|
+
fileisbidsjson = False
|
|
2213
|
+
return slicetimes, normalizedtotr, fileisbidsjson
|
|
1096
2214
|
|
|
1097
2215
|
|
|
1098
|
-
def readbidssidecar(inputfilename):
|
|
1099
|
-
|
|
2216
|
+
def readbidssidecar(inputfilename: str) -> Dict[str, Any]:
|
|
2217
|
+
"""
|
|
2218
|
+
Read key value pairs out of a BIDS sidecar file
|
|
2219
|
+
|
|
2220
|
+
This function reads JSON sidecar files commonly used in BIDS (Brain Imaging Data Structure)
|
|
2221
|
+
datasets and returns the key-value pairs as a dictionary.
|
|
1100
2222
|
|
|
1101
2223
|
Parameters
|
|
1102
2224
|
----------
|
|
1103
2225
|
inputfilename : str
|
|
1104
|
-
The name of the sidecar file (with extension)
|
|
2226
|
+
The name of the sidecar file (with extension). The function will automatically
|
|
2227
|
+
look for a corresponding .json file with the same base name.
|
|
1105
2228
|
|
|
1106
2229
|
Returns
|
|
1107
2230
|
-------
|
|
1108
|
-
|
|
1109
|
-
|
|
1110
|
-
|
|
2231
|
+
dict
|
|
2232
|
+
A dictionary containing the key-value pairs from the JSON sidecar file.
|
|
2233
|
+
Returns an empty dictionary if the sidecar file does not exist.
|
|
2234
|
+
|
|
2235
|
+
Notes
|
|
2236
|
+
-----
|
|
2237
|
+
The function expects the sidecar file to have the same base name as the input file
|
|
2238
|
+
but with a .json extension. For example, if inputfilename is "sub-01_task-rest_bold.nii.gz",
|
|
2239
|
+
the function will look for "sub-01_task-rest_bold.json".
|
|
2240
|
+
|
|
2241
|
+
Examples
|
|
2242
|
+
--------
|
|
2243
|
+
>>> sidecar_data = readbidssidecar("sub-01_task-rest_bold.nii.gz")
|
|
2244
|
+
>>> print(sidecar_data['RepetitionTime'])
|
|
2245
|
+
2.0
|
|
2246
|
+
|
|
2247
|
+
>>> sidecar_data = readbidssidecar("nonexistent_file.nii.gz")
|
|
2248
|
+
sidecar file does not exist
|
|
2249
|
+
>>> print(sidecar_data)
|
|
2250
|
+
{}
|
|
1111
2251
|
"""
|
|
1112
2252
|
thefileroot, theext = os.path.splitext(inputfilename)
|
|
1113
2253
|
if os.path.exists(thefileroot + ".json"):
|
|
@@ -1119,16 +2259,48 @@ def readbidssidecar(inputfilename):
|
|
|
1119
2259
|
return {}
|
|
1120
2260
|
|
|
1121
2261
|
|
|
1122
|
-
def writedicttojson(thedict, thefilename):
|
|
1123
|
-
|
|
2262
|
+
def writedicttojson(thedict: Dict[str, Any], thefilename: str) -> None:
|
|
2263
|
+
"""
|
|
2264
|
+
Write key-value pairs to a JSON file with proper numpy type handling.
|
|
2265
|
+
|
|
2266
|
+
This function writes a dictionary to a JSON file, automatically converting
|
|
2267
|
+
numpy data types to their Python equivalents to ensure proper JSON serialization.
|
|
1124
2268
|
|
|
1125
2269
|
Parameters
|
|
1126
2270
|
----------
|
|
1127
|
-
thedict : dict
|
|
1128
|
-
|
|
2271
|
+
thedict : dict[str, Any]
|
|
2272
|
+
Dictionary containing key-value pairs to be written to JSON file
|
|
1129
2273
|
thefilename : str
|
|
1130
|
-
|
|
2274
|
+
Path and name of the output JSON file (including extension)
|
|
1131
2275
|
|
|
2276
|
+
Returns
|
|
2277
|
+
-------
|
|
2278
|
+
None
|
|
2279
|
+
This function does not return any value
|
|
2280
|
+
|
|
2281
|
+
Notes
|
|
2282
|
+
-----
|
|
2283
|
+
The function automatically converts numpy data types:
|
|
2284
|
+
- numpy.integer → Python int
|
|
2285
|
+
- numpy.floating → Python float
|
|
2286
|
+
- NDArray → Python list
|
|
2287
|
+
|
|
2288
|
+
The output JSON file will be formatted with:
|
|
2289
|
+
- Sorted keys
|
|
2290
|
+
- 4-space indentation
|
|
2291
|
+
- Comma-separated values without spaces
|
|
2292
|
+
|
|
2293
|
+
Examples
|
|
2294
|
+
--------
|
|
2295
|
+
>>> import numpy as np
|
|
2296
|
+
>>> data = {
|
|
2297
|
+
... 'name': 'John',
|
|
2298
|
+
... 'age': np.int32(30),
|
|
2299
|
+
... 'score': np.float64(95.5),
|
|
2300
|
+
... 'values': np.array([1, 2, 3, 4])
|
|
2301
|
+
... }
|
|
2302
|
+
>>> writedicttojson(data, 'output.json')
|
|
2303
|
+
>>> # Creates output.json with properly formatted data
|
|
1132
2304
|
"""
|
|
1133
2305
|
thisdict = {}
|
|
1134
2306
|
for key in thedict:
|
|
@@ -1146,19 +2318,41 @@ def writedicttojson(thedict, thefilename):
|
|
|
1146
2318
|
)
|
|
1147
2319
|
|
|
1148
2320
|
|
|
1149
|
-
def readdictfromjson(inputfilename):
|
|
1150
|
-
|
|
2321
|
+
def readdictfromjson(inputfilename: str) -> Dict[str, Any]:
|
|
2322
|
+
"""
|
|
2323
|
+
Read key value pairs out of a json file.
|
|
2324
|
+
|
|
2325
|
+
This function reads a JSON file and returns its contents as a dictionary.
|
|
2326
|
+
The function automatically appends the ".json" extension to the input filename
|
|
2327
|
+
if it's not already present.
|
|
1151
2328
|
|
|
1152
2329
|
Parameters
|
|
1153
2330
|
----------
|
|
1154
2331
|
inputfilename : str
|
|
1155
|
-
The name of the json file (with extension)
|
|
2332
|
+
The name of the json file (with or without extension). If the extension
|
|
2333
|
+
is not provided, ".json" will be appended automatically.
|
|
1156
2334
|
|
|
1157
2335
|
Returns
|
|
1158
2336
|
-------
|
|
1159
|
-
|
|
1160
|
-
|
|
1161
|
-
|
|
2337
|
+
dict[str, Any]
|
|
2338
|
+
A dictionary containing the key-value pairs from the JSON file. Returns
|
|
2339
|
+
an empty dictionary if the specified file does not exist.
|
|
2340
|
+
|
|
2341
|
+
Notes
|
|
2342
|
+
-----
|
|
2343
|
+
- The function checks for the existence of the file before attempting to read it
|
|
2344
|
+
- If the input filename doesn't have a ".json" extension, it will be automatically added
|
|
2345
|
+
- If the file doesn't exist, a message will be printed and an empty dictionary returned
|
|
2346
|
+
|
|
2347
|
+
Examples
|
|
2348
|
+
--------
|
|
2349
|
+
>>> data = readdictfromjson("config")
|
|
2350
|
+
>>> print(data)
|
|
2351
|
+
{'key1': 'value1', 'key2': 'value2'}
|
|
2352
|
+
|
|
2353
|
+
>>> data = readdictfromjson("data.json")
|
|
2354
|
+
>>> print(data)
|
|
2355
|
+
{'name': 'John', 'age': 30}
|
|
1162
2356
|
"""
|
|
1163
2357
|
thefileroot, theext = os.path.splitext(inputfilename)
|
|
1164
2358
|
if os.path.exists(thefileroot + ".json"):
|
|
@@ -1170,27 +2364,53 @@ def readdictfromjson(inputfilename):
|
|
|
1170
2364
|
return {}
|
|
1171
2365
|
|
|
1172
2366
|
|
|
1173
|
-
def readlabelledtsv(inputfilename, compressed=False):
|
|
1174
|
-
|
|
2367
|
+
def readlabelledtsv(inputfilename: str, compressed: bool = False) -> Dict[str, NDArray]:
|
|
2368
|
+
"""
|
|
2369
|
+
Read time series out of an fmriprep confounds tsv file
|
|
1175
2370
|
|
|
1176
2371
|
Parameters
|
|
1177
2372
|
----------
|
|
1178
2373
|
inputfilename : str
|
|
1179
|
-
The root name of the tsv (
|
|
2374
|
+
The root name of the tsv file (without extension)
|
|
2375
|
+
compressed : bool, optional
|
|
2376
|
+
If True, reads from a gzipped tsv file (.tsv.gz), otherwise reads from
|
|
2377
|
+
a regular tsv file (.tsv). Default is False.
|
|
1180
2378
|
|
|
1181
2379
|
Returns
|
|
1182
2380
|
-------
|
|
1183
|
-
|
|
1184
|
-
|
|
1185
|
-
|
|
1186
|
-
|
|
1187
|
-
|
|
2381
|
+
dict of str to NDArray
|
|
2382
|
+
Dictionary containing all the timecourses in the file, keyed by the
|
|
2383
|
+
column names from the first row of the tsv file. Each value is a
|
|
2384
|
+
numpy array containing the time series data for that column.
|
|
2385
|
+
|
|
2386
|
+
Raises
|
|
2387
|
+
------
|
|
2388
|
+
FileNotFoundError
|
|
2389
|
+
If the specified tsv file (with appropriate extension) does not exist.
|
|
2390
|
+
|
|
2391
|
+
Notes
|
|
2392
|
+
-----
|
|
2393
|
+
- NaN values in the input file are replaced with 0.0
|
|
2394
|
+
- If the file does not exist or is not valid, an empty dictionary is returned
|
|
2395
|
+
- The function supports both compressed (.tsv.gz) and uncompressed (.tsv) files
|
|
2396
|
+
|
|
2397
|
+
Examples
|
|
2398
|
+
--------
|
|
2399
|
+
>>> confounds = readlabelledtsv("sub-01_task-rest_bold_confounds")
|
|
2400
|
+
>>> print(confounds.keys())
|
|
2401
|
+
dict_keys(['trans_x', 'trans_y', 'trans_z', 'rot_x', 'rot_y', 'rot_z'])
|
|
2402
|
+
>>> print(confounds['trans_x'].shape)
|
|
2403
|
+
(100,)
|
|
1188
2404
|
"""
|
|
1189
2405
|
confounddict = {}
|
|
1190
2406
|
if compressed:
|
|
1191
2407
|
theext = ".tsv.gz"
|
|
1192
2408
|
else:
|
|
1193
2409
|
theext = ".tsv"
|
|
2410
|
+
|
|
2411
|
+
if not os.path.isfile(inputfilename + theext):
|
|
2412
|
+
raise FileNotFoundError(f"Labelled tsv file {inputfilename + theext} does not exist")
|
|
2413
|
+
|
|
1194
2414
|
df = pd.read_csv(inputfilename + theext, sep="\t", quotechar='"')
|
|
1195
2415
|
|
|
1196
2416
|
# replace nans with 0
|
|
@@ -1201,23 +2421,53 @@ def readlabelledtsv(inputfilename, compressed=False):
|
|
|
1201
2421
|
return confounddict
|
|
1202
2422
|
|
|
1203
2423
|
|
|
1204
|
-
def readcsv(inputfilename, debug=False):
|
|
1205
|
-
|
|
2424
|
+
def readcsv(inputfilename: str, debug: bool = False) -> Dict[str, NDArray]:
|
|
2425
|
+
"""
|
|
2426
|
+
Read time series out of an unlabelled csv file.
|
|
2427
|
+
|
|
2428
|
+
This function reads a CSV file and returns a dictionary of time series,
|
|
2429
|
+
where keys are column names (or generated names if no header is present)
|
|
2430
|
+
and values are NumPy arrays of the corresponding time series data.
|
|
1206
2431
|
|
|
1207
2432
|
Parameters
|
|
1208
2433
|
----------
|
|
1209
2434
|
inputfilename : str
|
|
1210
|
-
The root name of the
|
|
2435
|
+
The root name of the CSV file (without the '.csv' extension).
|
|
2436
|
+
debug : bool, optional
|
|
2437
|
+
If True, prints debug information about whether a header line is detected,
|
|
2438
|
+
by default False.
|
|
1211
2439
|
|
|
1212
2440
|
Returns
|
|
1213
2441
|
-------
|
|
1214
|
-
|
|
1215
|
-
|
|
1216
|
-
|
|
1217
|
-
|
|
1218
|
-
|
|
1219
|
-
|
|
2442
|
+
dict of str to NDArray
|
|
2443
|
+
A dictionary where keys are column names (or generated names like "col0", "col1", etc.)
|
|
2444
|
+
and values are NumPy arrays containing the time series data. If the file does not exist
|
|
2445
|
+
or is invalid, an empty dictionary is returned.
|
|
2446
|
+
|
|
2447
|
+
Notes
|
|
2448
|
+
-----
|
|
2449
|
+
- If the first column of the CSV contains non-numeric values, it is assumed to be a header.
|
|
2450
|
+
- If the first column is numeric, it is treated as part of the data, and columns are
|
|
2451
|
+
named "col0", "col1", etc.
|
|
2452
|
+
- NaN values in the CSV are replaced with 0.0.
|
|
2453
|
+
- If the file does not exist or cannot be read, a FileNotFoundError is raised.
|
|
2454
|
+
|
|
2455
|
+
Examples
|
|
2456
|
+
--------
|
|
2457
|
+
>>> data = readcsv("timeseries_data")
|
|
2458
|
+
>>> print(data.keys())
|
|
2459
|
+
['col0', 'col1', 'col2']
|
|
2460
|
+
>>> print(data['col0'])
|
|
2461
|
+
[1.0, 2.0, 3.0, 4.0]
|
|
2462
|
+
|
|
2463
|
+
>>> data = readcsv("labeled_data", debug=True)
|
|
2464
|
+
there is a header line
|
|
2465
|
+
>>> print(data.keys())
|
|
2466
|
+
['time', 'signal1', 'signal2']
|
|
1220
2467
|
"""
|
|
2468
|
+
if not os.path.isfile(inputfilename + ".csv"):
|
|
2469
|
+
raise FileNotFoundError(f"csv file {inputfilename}.csv does not exist")
|
|
2470
|
+
|
|
1221
2471
|
timeseriesdict = {}
|
|
1222
2472
|
|
|
1223
2473
|
# Read the data in initially with no header
|
|
@@ -1252,23 +2502,47 @@ def readcsv(inputfilename, debug=False):
|
|
|
1252
2502
|
return timeseriesdict
|
|
1253
2503
|
|
|
1254
2504
|
|
|
1255
|
-
def readfslmat(inputfilename, debug=False):
|
|
1256
|
-
|
|
2505
|
+
def readfslmat(inputfilename: str, debug: bool = False) -> Dict[str, NDArray]:
|
|
2506
|
+
"""
|
|
2507
|
+
Read time series out of an FSL design.mat file
|
|
1257
2508
|
|
|
1258
2509
|
Parameters
|
|
1259
2510
|
----------
|
|
1260
2511
|
inputfilename : str
|
|
1261
|
-
The root name of the
|
|
2512
|
+
The root name of the .mat file (no extension)
|
|
2513
|
+
debug : bool, optional
|
|
2514
|
+
If True, print the DataFrame contents for debugging purposes. Default is False
|
|
1262
2515
|
|
|
1263
2516
|
Returns
|
|
1264
2517
|
-------
|
|
1265
|
-
|
|
1266
|
-
|
|
1267
|
-
|
|
1268
|
-
|
|
1269
|
-
|
|
1270
|
-
|
|
2518
|
+
dict of NDArray
|
|
2519
|
+
Dictionary containing all the timecourses in the file, keyed by column names.
|
|
2520
|
+
If the first row exists, it is used as keys; otherwise, keys are generated as
|
|
2521
|
+
"col1, col2...colN". Returns an empty dictionary if file does not exist or is not valid.
|
|
2522
|
+
|
|
2523
|
+
Raises
|
|
2524
|
+
------
|
|
2525
|
+
FileNotFoundError
|
|
2526
|
+
If the specified FSL mat file does not exist
|
|
2527
|
+
|
|
2528
|
+
Notes
|
|
2529
|
+
-----
|
|
2530
|
+
This function reads FSL design.mat files and extracts time series data. The function
|
|
2531
|
+
skips the first 5 rows of the file (assumed to be header information) and treats
|
|
2532
|
+
subsequent rows as time series data. The column names are generated using the
|
|
2533
|
+
`makecolname` helper function.
|
|
2534
|
+
|
|
2535
|
+
Examples
|
|
2536
|
+
--------
|
|
2537
|
+
>>> timeseries = readfslmat("design")
|
|
2538
|
+
>>> print(timeseries.keys())
|
|
2539
|
+
dict_keys(['col0', 'col1', 'col2'])
|
|
2540
|
+
>>> print(timeseries['col0'])
|
|
2541
|
+
[0.1, 0.2, 0.3, 0.4]
|
|
1271
2542
|
"""
|
|
2543
|
+
if not os.path.isfile(inputfilename + ".mat"):
|
|
2544
|
+
raise FileNotFoundError(f"FSL mat file {inputfilename}.mat does not exist")
|
|
2545
|
+
|
|
1272
2546
|
timeseriesdict = {}
|
|
1273
2547
|
|
|
1274
2548
|
# Read the data in with no header
|
|
@@ -1284,7 +2558,51 @@ def readfslmat(inputfilename, debug=False):
|
|
|
1284
2558
|
return timeseriesdict
|
|
1285
2559
|
|
|
1286
2560
|
|
|
1287
|
-
def readoptionsfile(inputfileroot):
|
|
2561
|
+
def readoptionsfile(inputfileroot: str) -> Dict[str, Any]:
|
|
2562
|
+
"""
|
|
2563
|
+
Read a run options from a JSON or TXT configuration file.
|
|
2564
|
+
|
|
2565
|
+
This function attempts to read rapidtide run options from a file with the given root name,
|
|
2566
|
+
checking for `.json` and `.txt` extensions in that order. If neither file exists,
|
|
2567
|
+
a `FileNotFoundError` is raised. The function also handles backward compatibility
|
|
2568
|
+
for older options files by filling in default filter limits based on the `filtertype`.
|
|
2569
|
+
|
|
2570
|
+
Parameters
|
|
2571
|
+
----------
|
|
2572
|
+
inputfileroot : str
|
|
2573
|
+
The base name of the options file (without extension). The function will
|
|
2574
|
+
first look for `inputfileroot.json`, then `inputfileroot.txt`.
|
|
2575
|
+
|
|
2576
|
+
Returns
|
|
2577
|
+
-------
|
|
2578
|
+
Dict[str, Any]
|
|
2579
|
+
A dictionary containing the run options. The dictionary includes keys such as
|
|
2580
|
+
`filtertype`, `lowerstop`, `lowerpass`, `upperpass`, and `upperstop`, depending
|
|
2581
|
+
on the file content and filter type.
|
|
2582
|
+
|
|
2583
|
+
Raises
|
|
2584
|
+
------
|
|
2585
|
+
FileNotFoundError
|
|
2586
|
+
If neither `inputfileroot.json` nor `inputfileroot.txt` exists.
|
|
2587
|
+
|
|
2588
|
+
Notes
|
|
2589
|
+
-----
|
|
2590
|
+
For backward compatibility, older options files without `lowerpass` key are updated
|
|
2591
|
+
with default values based on the `filtertype`:
|
|
2592
|
+
|
|
2593
|
+
- "None": All limits set to 0.0 or -1.0
|
|
2594
|
+
- "vlf": 0.0, 0.0, 0.009, 0.010
|
|
2595
|
+
- "lfo": 0.009, 0.010, 0.15, 0.20
|
|
2596
|
+
- "resp": 0.15, 0.20, 0.4, 0.5
|
|
2597
|
+
- "card": 0.4, 0.5, 2.5, 3.0
|
|
2598
|
+
- "arb": Uses values from `arb_lowerstop`, `arb_lower`, `arb_upper`, `arb_upperstop`
|
|
2599
|
+
|
|
2600
|
+
Examples
|
|
2601
|
+
--------
|
|
2602
|
+
>>> options = readoptionsfile("myfilter")
|
|
2603
|
+
>>> print(options["filtertype"])
|
|
2604
|
+
'vlf'
|
|
2605
|
+
"""
|
|
1288
2606
|
if os.path.isfile(inputfileroot + ".json"):
|
|
1289
2607
|
# options saved as json
|
|
1290
2608
|
thedict = readdictfromjson(inputfileroot + ".json")
|
|
@@ -1292,8 +2610,7 @@ def readoptionsfile(inputfileroot):
|
|
|
1292
2610
|
# options saved as text
|
|
1293
2611
|
thedict = readdict(inputfileroot + ".txt")
|
|
1294
2612
|
else:
|
|
1295
|
-
|
|
1296
|
-
return {}
|
|
2613
|
+
raise FileNotFoundError(f"options file {inputfileroot}(.json/.txt) does not exist")
|
|
1297
2614
|
|
|
1298
2615
|
# correct behavior for older options files
|
|
1299
2616
|
try:
|
|
@@ -1339,45 +2656,138 @@ def readoptionsfile(inputfileroot):
|
|
|
1339
2656
|
return thedict
|
|
1340
2657
|
|
|
1341
2658
|
|
|
1342
|
-
def makecolname(colnum, startcol):
|
|
2659
|
+
def makecolname(colnum: int, startcol: int) -> str:
|
|
2660
|
+
"""
|
|
2661
|
+
Generate a column name in the format 'col_##' where ## is a zero-padded number.
|
|
2662
|
+
|
|
2663
|
+
This function creates standardized column names by adding a starting offset to
|
|
2664
|
+
a column number and formatting it with zero-padding to ensure consistent
|
|
2665
|
+
two-digit representation.
|
|
2666
|
+
|
|
2667
|
+
Parameters
|
|
2668
|
+
----------
|
|
2669
|
+
colnum : int
|
|
2670
|
+
The base column number to be used in the name generation.
|
|
2671
|
+
startcol : int
|
|
2672
|
+
The starting column offset to be added to colnum.
|
|
2673
|
+
|
|
2674
|
+
Returns
|
|
2675
|
+
-------
|
|
2676
|
+
str
|
|
2677
|
+
A column name in the format 'col_##' where ## represents the zero-padded
|
|
2678
|
+
sum of colnum and startcol.
|
|
2679
|
+
|
|
2680
|
+
Notes
|
|
2681
|
+
-----
|
|
2682
|
+
The resulting number is zero-padded to always have at least two digits.
|
|
2683
|
+
For example, if colnum=5 and startcol=10, the result will be 'col_15'.
|
|
2684
|
+
If colnum=1 and startcol=2, the result will be 'col_03'.
|
|
2685
|
+
|
|
2686
|
+
Examples
|
|
2687
|
+
--------
|
|
2688
|
+
>>> makecolname(0, 0)
|
|
2689
|
+
'col_00'
|
|
2690
|
+
|
|
2691
|
+
>>> makecolname(5, 10)
|
|
2692
|
+
'col_15'
|
|
2693
|
+
|
|
2694
|
+
>>> makecolname(1, 2)
|
|
2695
|
+
'col_03'
|
|
2696
|
+
"""
|
|
1343
2697
|
return f"col_{str(colnum + startcol).zfill(2)}"
|
|
1344
2698
|
|
|
1345
2699
|
|
|
1346
2700
|
def writebidstsv(
|
|
1347
|
-
outputfileroot,
|
|
1348
|
-
data,
|
|
1349
|
-
samplerate,
|
|
1350
|
-
extraheaderinfo=None,
|
|
1351
|
-
compressed=True,
|
|
1352
|
-
columns=None,
|
|
1353
|
-
|
|
1354
|
-
|
|
1355
|
-
|
|
1356
|
-
|
|
1357
|
-
|
|
1358
|
-
|
|
1359
|
-
|
|
1360
|
-
|
|
1361
|
-
|
|
1362
|
-
|
|
1363
|
-
|
|
1364
|
-
|
|
1365
|
-
|
|
1366
|
-
|
|
1367
|
-
|
|
1368
|
-
|
|
1369
|
-
|
|
1370
|
-
|
|
1371
|
-
|
|
1372
|
-
|
|
1373
|
-
:
|
|
1374
|
-
|
|
1375
|
-
|
|
1376
|
-
:
|
|
1377
|
-
|
|
1378
|
-
|
|
1379
|
-
:
|
|
1380
|
-
|
|
2701
|
+
outputfileroot: str,
|
|
2702
|
+
data: NDArray,
|
|
2703
|
+
samplerate: float,
|
|
2704
|
+
extraheaderinfo: Optional[Dict[str, Any]] = None,
|
|
2705
|
+
compressed: bool = True,
|
|
2706
|
+
columns: Optional[List[str]] = None,
|
|
2707
|
+
xaxislabel: str = "time",
|
|
2708
|
+
yaxislabel: str = "arbitrary value",
|
|
2709
|
+
starttime: float = 0.0,
|
|
2710
|
+
append: bool = False,
|
|
2711
|
+
samplerate_tolerance: float = 1e-6,
|
|
2712
|
+
starttime_tolerance: float = 1e-6,
|
|
2713
|
+
colsinjson: bool = True,
|
|
2714
|
+
colsintsv: bool = False,
|
|
2715
|
+
omitjson: bool = False,
|
|
2716
|
+
debug: bool = False,
|
|
2717
|
+
) -> None:
|
|
2718
|
+
"""
|
|
2719
|
+
Write physiological or stimulation data to a BIDS-compatible TSV file with optional JSON sidecar.
|
|
2720
|
+
|
|
2721
|
+
This function writes time series data to a TSV file following BIDS conventions for physiological
|
|
2722
|
+
(``_physio``) and stimulation (``_stim``) data. It supports optional compression, appending to
|
|
2723
|
+
existing files, and includes metadata in a corresponding JSON file.
|
|
2724
|
+
|
|
2725
|
+
Parameters
|
|
2726
|
+
----------
|
|
2727
|
+
outputfileroot : str
|
|
2728
|
+
Root name of the output files (without extension). The function will write
|
|
2729
|
+
``<outputfileroot>.tsv`` or ``<outputfileroot>.tsv.gz`` and ``<outputfileroot>.json``.
|
|
2730
|
+
data : NDArray
|
|
2731
|
+
Time series data to be written. If 1D, it will be reshaped to (1, n_timesteps).
|
|
2732
|
+
Shape should be (n_channels, n_timesteps).
|
|
2733
|
+
samplerate : float
|
|
2734
|
+
Sampling frequency of the data in Hz.
|
|
2735
|
+
extraheaderinfo : dict, optional
|
|
2736
|
+
Additional key-value pairs to include in the JSON sidecar file.
|
|
2737
|
+
compressed : bool, default=True
|
|
2738
|
+
If True, compress the TSV file using gzip (.tsv.gz). If False, write uncompressed (.tsv).
|
|
2739
|
+
columns : list of str, optional
|
|
2740
|
+
Column names for the TSV file. If None, default names are generated using
|
|
2741
|
+
``makecolname``.
|
|
2742
|
+
xaxislabel : str, default="time"
|
|
2743
|
+
Label for the x-axis in the JSON sidecar.
|
|
2744
|
+
yaxislabel : str, default="arbitrary value"
|
|
2745
|
+
Label for the y-axis in the JSON sidecar.
|
|
2746
|
+
starttime : float, default=0.0
|
|
2747
|
+
Start time of the recording in seconds.
|
|
2748
|
+
append : bool, default=False
|
|
2749
|
+
If True, append data to an existing file. The function checks compatibility of
|
|
2750
|
+
sampling rate, start time, and number of columns.
|
|
2751
|
+
samplerate_tolerance : float, default=1e-6
|
|
2752
|
+
Tolerance for comparing sampling rates when appending data.
|
|
2753
|
+
starttime_tolerance : float, default=1e-6
|
|
2754
|
+
Tolerance for comparing start times when appending data.
|
|
2755
|
+
colsinjson : bool, default=True
|
|
2756
|
+
If True, include the column names in the JSON file under the "Columns" key.
|
|
2757
|
+
colsintsv : bool, default=False
|
|
2758
|
+
If True, write column headers in the TSV file. BIDS convention requires no headers.
|
|
2759
|
+
omitjson : bool, default=False
|
|
2760
|
+
If True, do not write the JSON sidecar file.
|
|
2761
|
+
debug : bool, default=False
|
|
2762
|
+
If True, print debug information during execution.
|
|
2763
|
+
|
|
2764
|
+
Returns
|
|
2765
|
+
-------
|
|
2766
|
+
None
|
|
2767
|
+
This function does not return any value.
|
|
2768
|
+
|
|
2769
|
+
Notes
|
|
2770
|
+
-----
|
|
2771
|
+
- BIDS-compliant TSV files require:
|
|
2772
|
+
1. Compression (.tsv.gz)
|
|
2773
|
+
2. Presence of "SamplingFrequency", "StartTime", and "Columns" in the JSON file
|
|
2774
|
+
3. No column headers in the TSV file
|
|
2775
|
+
4. File name ending in "_physio" or "_stim"
|
|
2776
|
+
- If ``append=True``, the function will validate compatibility of sampling rate, start time,
|
|
2777
|
+
and number of columns with the existing file.
|
|
2778
|
+
|
|
2779
|
+
Examples
|
|
2780
|
+
--------
|
|
2781
|
+
>>> import numpy as np
|
|
2782
|
+
>>> data = np.random.rand(2, 1000)
|
|
2783
|
+
>>> writebidstsv("sub-01_task-rest_physio", data, samplerate=100.0)
|
|
2784
|
+
>>> # Writes:
|
|
2785
|
+
>>> # sub-01_task-rest_physio.tsv.gz
|
|
2786
|
+
>>> # sub-01_task-rest_physio.json
|
|
2787
|
+
|
|
2788
|
+
See Also
|
|
2789
|
+
--------
|
|
2790
|
+
readbidstsv : Read BIDS physiological or stimulation data from TSV and JSON files.
|
|
1381
2791
|
"""
|
|
1382
2792
|
if debug:
|
|
1383
2793
|
print("entering writebidstsv:")
|
|
@@ -1386,6 +2796,8 @@ def writebidstsv(
|
|
|
1386
2796
|
print("\tsamplerate:", samplerate)
|
|
1387
2797
|
print("\tcompressed:", compressed)
|
|
1388
2798
|
print("\tcolumns:", columns)
|
|
2799
|
+
print("\txaxislabel:", xaxislabel)
|
|
2800
|
+
print("\tyaxislabel:", yaxislabel)
|
|
1389
2801
|
print("\tstarttime:", starttime)
|
|
1390
2802
|
print("\tappend:", append)
|
|
1391
2803
|
if len(data.shape) == 1:
|
|
@@ -1395,8 +2807,12 @@ def writebidstsv(
|
|
|
1395
2807
|
else:
|
|
1396
2808
|
reshapeddata = data
|
|
1397
2809
|
if append:
|
|
1398
|
-
insamplerate, instarttime, incolumns, indata, incompressed, incolsource =
|
|
1399
|
-
|
|
2810
|
+
insamplerate, instarttime, incolumns, indata, incompressed, incolsource, inextrainfo = (
|
|
2811
|
+
readbidstsv(
|
|
2812
|
+
outputfileroot + ".json",
|
|
2813
|
+
neednotexist=True,
|
|
2814
|
+
debug=debug,
|
|
2815
|
+
)
|
|
1400
2816
|
)
|
|
1401
2817
|
if debug:
|
|
1402
2818
|
print("appending")
|
|
@@ -1420,8 +2836,8 @@ def writebidstsv(
|
|
|
1420
2836
|
)
|
|
1421
2837
|
compressed = incompressed
|
|
1422
2838
|
if (
|
|
1423
|
-
(insamplerate
|
|
1424
|
-
and (instarttime
|
|
2839
|
+
np.fabs(insamplerate - samplerate) < samplerate_tolerance
|
|
2840
|
+
and np.fabs(instarttime - starttime) < starttime_tolerance
|
|
1425
2841
|
and reshapeddata.shape[1] == indata.shape[1]
|
|
1426
2842
|
):
|
|
1427
2843
|
startcol = len(incolumns)
|
|
@@ -1434,6 +2850,7 @@ def writebidstsv(
|
|
|
1434
2850
|
sys.exit()
|
|
1435
2851
|
else:
|
|
1436
2852
|
startcol = 0
|
|
2853
|
+
inextrainfo = None
|
|
1437
2854
|
|
|
1438
2855
|
if columns is None:
|
|
1439
2856
|
columns = []
|
|
@@ -1462,6 +2879,8 @@ def writebidstsv(
|
|
|
1462
2879
|
headerdict = {}
|
|
1463
2880
|
headerdict["SamplingFrequency"] = float(samplerate)
|
|
1464
2881
|
headerdict["StartTime"] = float(starttime)
|
|
2882
|
+
headerdict["XAxisLabel"] = xaxislabel
|
|
2883
|
+
headerdict["YAxisLabel"] = yaxislabel
|
|
1465
2884
|
if colsinjson:
|
|
1466
2885
|
if startcol == 0:
|
|
1467
2886
|
headerdict["Columns"] = columns
|
|
@@ -1470,6 +2889,14 @@ def writebidstsv(
|
|
|
1470
2889
|
if extraheaderinfo is not None:
|
|
1471
2890
|
for key in extraheaderinfo:
|
|
1472
2891
|
headerdict[key] = extraheaderinfo[key]
|
|
2892
|
+
if inextrainfo is not None:
|
|
2893
|
+
for key in inextrainfo:
|
|
2894
|
+
headerdict[key] = inextrainfo[key]
|
|
2895
|
+
|
|
2896
|
+
if debug:
|
|
2897
|
+
print(f"{extraheaderinfo=}")
|
|
2898
|
+
print(f"{inextrainfo=}")
|
|
2899
|
+
print(f"{headerdict=}")
|
|
1473
2900
|
|
|
1474
2901
|
if not omitjson:
|
|
1475
2902
|
with open(outputfileroot + ".json", "wb") as fp:
|
|
@@ -1480,36 +2907,58 @@ def writebidstsv(
|
|
|
1480
2907
|
)
|
|
1481
2908
|
|
|
1482
2909
|
|
|
1483
|
-
def readvectorsfromtextfile(
|
|
1484
|
-
|
|
2910
|
+
def readvectorsfromtextfile(
|
|
2911
|
+
fullfilespec: str, onecol: bool = False, debug: bool = False
|
|
2912
|
+
) -> Tuple[Optional[float], Optional[float], Optional[List[str]], NDArray, Optional[bool], str]:
|
|
2913
|
+
"""
|
|
2914
|
+
Read time series data from a text-based file (TSV, CSV, MAT, or BIDS-style TSV).
|
|
2915
|
+
|
|
2916
|
+
This function reads timecourse data from various file formats, including plain TSV,
|
|
2917
|
+
gzipped TSV (.tsv.gz), CSV, and BIDS-style continuous data files (.tsv with associated .json).
|
|
2918
|
+
It automatically detects the file type and parses the data accordingly.
|
|
1485
2919
|
|
|
1486
2920
|
Parameters
|
|
1487
2921
|
----------
|
|
1488
2922
|
fullfilespec : str
|
|
1489
|
-
|
|
1490
|
-
|
|
1491
|
-
|
|
1492
|
-
|
|
1493
|
-
|
|
1494
|
-
|
|
1495
|
-
|
|
2923
|
+
Path to the input file. May include a column specification (e.g., ``"file.tsv[0:5]"``).
|
|
2924
|
+
colspec : str, optional
|
|
2925
|
+
Column specification for selecting specific columns. For TSV/CSV files, this can be a
|
|
2926
|
+
comma-separated list of column names or integer indices. For BIDS-style TSV files, it
|
|
2927
|
+
should be a comma-separated list of column names.
|
|
2928
|
+
onecol : bool, optional
|
|
2929
|
+
If True, returns only the first column of data. Default is False.
|
|
2930
|
+
debug : bool, optional
|
|
2931
|
+
If True, prints additional debugging information. Default is False.
|
|
1496
2932
|
|
|
1497
2933
|
Returns
|
|
1498
2934
|
-------
|
|
1499
|
-
|
|
1500
|
-
|
|
1501
|
-
|
|
1502
|
-
|
|
1503
|
-
|
|
1504
|
-
|
|
1505
|
-
|
|
1506
|
-
|
|
1507
|
-
|
|
1508
|
-
|
|
1509
|
-
|
|
1510
|
-
|
|
1511
|
-
|
|
1512
|
-
|
|
2935
|
+
samplerate : float
|
|
2936
|
+
Sample rate in Hz. None if not knowable.
|
|
2937
|
+
starttime : float
|
|
2938
|
+
Time of first point, in seconds. None if not knowable.
|
|
2939
|
+
columns : str array
|
|
2940
|
+
Names of the timecourses contained in the file. None if not knowable.
|
|
2941
|
+
data : 2D numpy array
|
|
2942
|
+
Timecourses from the file.
|
|
2943
|
+
compressed : bool
|
|
2944
|
+
True if time data is gzipped (as in a .tsv.gz file).
|
|
2945
|
+
filetype : str
|
|
2946
|
+
One of "text", "csv", "plaintsv", "bidscontinuous".
|
|
2947
|
+
|
|
2948
|
+
Notes
|
|
2949
|
+
-----
|
|
2950
|
+
- If the file does not exist or is not valid, all return values are None.
|
|
2951
|
+
- For BIDS-style TSV files, the associated .json sidecar file is used to determine
|
|
2952
|
+
sample rate and start time.
|
|
2953
|
+
- For plain TSV files, column names are read from the header row.
|
|
2954
|
+
- If ``onecol`` is True, only the first column is returned.
|
|
2955
|
+
|
|
2956
|
+
Examples
|
|
2957
|
+
--------
|
|
2958
|
+
>>> samplerate, starttime, columns, data, compressed, filetype = readvectorsfromtextfile("data.tsv")
|
|
2959
|
+
>>> samplerate, starttime, columns, data, compressed, filetype = readvectorsfromtextfile("data.tsv[0:3]")
|
|
2960
|
+
>>> samplerate, starttime, columns, data, compressed, filetype = readvectorsfromtextfile("data.tsv", onecol=True)
|
|
2961
|
+
"""
|
|
1513
2962
|
|
|
1514
2963
|
thefilename, colspec = parsefilespec(fullfilespec)
|
|
1515
2964
|
thefileroot, theext = os.path.splitext(thefilename)
|
|
@@ -1564,12 +3013,13 @@ def readvectorsfromtextfile(fullfilespec, onecol=False, debug=False):
|
|
|
1564
3013
|
colspectouse = makecolname(int(colspec), 0)
|
|
1565
3014
|
except ValueError:
|
|
1566
3015
|
colspectouse = colspec
|
|
1567
|
-
thesamplerate, thestarttime, thecolumns, thedata, compressed, colsource =
|
|
1568
|
-
thefilename, colspec=colspectouse, debug=debug
|
|
3016
|
+
thesamplerate, thestarttime, thecolumns, thedata, compressed, colsource, extrainfo = (
|
|
3017
|
+
readbidstsv(thefilename, colspec=colspectouse, debug=debug)
|
|
1569
3018
|
)
|
|
1570
3019
|
if thedata is None:
|
|
1571
3020
|
raise ValueError(f"specified column {colspectouse} does not exist")
|
|
1572
3021
|
if onecol and thedata.shape[0] > 1:
|
|
3022
|
+
print(f"{onecol=}, {thedata.shape=}, {colspec=}, {colspectouse=}")
|
|
1573
3023
|
raise ValueError("specify a single column from", thefilename)
|
|
1574
3024
|
elif filetype == "plaintsv":
|
|
1575
3025
|
thedatadict = readlabelledtsv(thefileroot, compressed=compressed)
|
|
@@ -1641,31 +3091,79 @@ def readvectorsfromtextfile(fullfilespec, onecol=False, debug=False):
|
|
|
1641
3091
|
return thesamplerate, thestarttime, thecolumns, thedata, compressed, filetype
|
|
1642
3092
|
|
|
1643
3093
|
|
|
1644
|
-
def readbidstsv(
|
|
1645
|
-
|
|
3094
|
+
def readbidstsv(
|
|
3095
|
+
inputfilename: str,
|
|
3096
|
+
colspec: Optional[str] = None,
|
|
3097
|
+
warn: bool = True,
|
|
3098
|
+
neednotexist: bool = False,
|
|
3099
|
+
debug: bool = False,
|
|
3100
|
+
) -> Tuple[
|
|
3101
|
+
float,
|
|
3102
|
+
float,
|
|
3103
|
+
Optional[List[str]],
|
|
3104
|
+
Optional[NDArray],
|
|
3105
|
+
Optional[bool],
|
|
3106
|
+
Optional[str],
|
|
3107
|
+
Optional[dict],
|
|
3108
|
+
]:
|
|
3109
|
+
"""
|
|
3110
|
+
Read BIDS-compatible TSV data file with associated JSON metadata.
|
|
3111
|
+
|
|
3112
|
+
This function reads a TSV file (optionally gzipped) and its corresponding JSON
|
|
3113
|
+
metadata file to extract timecourse data, sample rate, start time, and column names.
|
|
3114
|
+
It supports both compressed (.tsv.gz) and uncompressed (.tsv) TSV files.
|
|
1646
3115
|
|
|
1647
3116
|
Parameters
|
|
1648
3117
|
----------
|
|
1649
3118
|
inputfilename : str
|
|
1650
|
-
The root name of the
|
|
1651
|
-
colspec:
|
|
1652
|
-
A comma
|
|
1653
|
-
debug : bool
|
|
1654
|
-
|
|
3119
|
+
The root name of the TSV and accompanying JSON file (without extension).
|
|
3120
|
+
colspec : str, optional
|
|
3121
|
+
A comma-separated list of column names to return. If None, all columns are returned.
|
|
3122
|
+
debug : bool, optional
|
|
3123
|
+
If True, print additional debugging information. Default is False.
|
|
3124
|
+
warn : bool, optional
|
|
3125
|
+
If True, print warnings for missing metadata fields. Default is True.
|
|
3126
|
+
neednotexist : bool, optional
|
|
3127
|
+
If True, return None values instead of raising an exception if files do not exist.
|
|
3128
|
+
Default is False.
|
|
1655
3129
|
|
|
1656
3130
|
Returns
|
|
1657
3131
|
-------
|
|
3132
|
+
tuple of (samplerate, starttime, columns, data, is_compressed, columnsource)
|
|
1658
3133
|
samplerate : float
|
|
1659
|
-
Sample rate in Hz
|
|
3134
|
+
Sample rate in Hz.
|
|
1660
3135
|
starttime : float
|
|
1661
|
-
Time of first point
|
|
1662
|
-
columns : str
|
|
1663
|
-
Names of the timecourses contained in the file
|
|
1664
|
-
data :
|
|
1665
|
-
|
|
1666
|
-
|
|
1667
|
-
|
|
1668
|
-
|
|
3136
|
+
Time of first point in seconds.
|
|
3137
|
+
columns : list of str
|
|
3138
|
+
Names of the timecourses contained in the file.
|
|
3139
|
+
data : NDArray, optional
|
|
3140
|
+
2D array of timecourses from the file. Returns None if file does not exist or is invalid.
|
|
3141
|
+
is_compressed : bool
|
|
3142
|
+
Indicates whether the TSV file was gzipped.
|
|
3143
|
+
columnsource : str
|
|
3144
|
+
Source of column names: either 'json' or 'tsv'.
|
|
3145
|
+
extrainfo: dict
|
|
3146
|
+
Dictionary of any optional tokens in the .json file
|
|
3147
|
+
|
|
3148
|
+
Notes
|
|
3149
|
+
-----
|
|
3150
|
+
- If the TSV file does not exist or is not valid, all return values are None.
|
|
3151
|
+
- If the JSON metadata file is missing required fields (SamplingFrequency, StartTime, Columns),
|
|
3152
|
+
default values are used and warnings are issued if `warn=True`.
|
|
3153
|
+
- The function handles both gzipped and uncompressed TSV files.
|
|
3154
|
+
- If a header line is found in the TSV file, it is skipped and a warning is issued.
|
|
3155
|
+
|
|
3156
|
+
Examples
|
|
3157
|
+
--------
|
|
3158
|
+
>>> samplerate, starttime, columns, data, is_compressed, source, extrainfo = readbidstsv('sub-01_task-rest')
|
|
3159
|
+
>>> print(f"Sample rate: {samplerate} Hz")
|
|
3160
|
+
Sample rate: 10.0 Hz
|
|
3161
|
+
|
|
3162
|
+
>>> samplerate, starttime, columns, data, is_compressed, source, extrainfo = readbidstsv(
|
|
3163
|
+
... 'sub-01_task-rest', colspec='column1,column2'
|
|
3164
|
+
... )
|
|
3165
|
+
>>> print(f"Selected columns: {columns}")
|
|
3166
|
+
Selected columns: ['column1', 'column2']
|
|
1669
3167
|
"""
|
|
1670
3168
|
thefileroot, theext = os.path.splitext(inputfilename)
|
|
1671
3169
|
if theext == ".gz":
|
|
@@ -1717,6 +3215,10 @@ def readbidstsv(inputfilename, colspec=None, warn=True, debug=False):
|
|
|
1717
3215
|
)
|
|
1718
3216
|
else:
|
|
1719
3217
|
columnsource = "json"
|
|
3218
|
+
extrainfo = {}
|
|
3219
|
+
for key in d:
|
|
3220
|
+
if not key in ["SamplingFrequency", "StartTime", "Columns"]:
|
|
3221
|
+
extrainfo[key] = d[key]
|
|
1720
3222
|
if os.path.exists(thefileroot + ".tsv.gz"):
|
|
1721
3223
|
compression = "gzip"
|
|
1722
3224
|
theextension = ".tsv.gz"
|
|
@@ -1779,6 +3281,7 @@ def readbidstsv(inputfilename, colspec=None, warn=True, debug=False):
|
|
|
1779
3281
|
(compression == "gzip"),
|
|
1780
3282
|
warn,
|
|
1781
3283
|
headerlinefound,
|
|
3284
|
+
extrainfo,
|
|
1782
3285
|
)
|
|
1783
3286
|
|
|
1784
3287
|
# select a subset of columns if they were specified
|
|
@@ -1790,6 +3293,7 @@ def readbidstsv(inputfilename, colspec=None, warn=True, debug=False):
|
|
|
1790
3293
|
np.transpose(df.to_numpy()),
|
|
1791
3294
|
(compression == "gzip"),
|
|
1792
3295
|
columnsource,
|
|
3296
|
+
extrainfo,
|
|
1793
3297
|
)
|
|
1794
3298
|
else:
|
|
1795
3299
|
collist = colspec.split(",")
|
|
@@ -1797,7 +3301,7 @@ def readbidstsv(inputfilename, colspec=None, warn=True, debug=False):
|
|
|
1797
3301
|
selectedcols = df[collist]
|
|
1798
3302
|
except KeyError:
|
|
1799
3303
|
print("specified column list cannot be found in", inputfilename)
|
|
1800
|
-
return [None, None, None, None, None, None]
|
|
3304
|
+
return [None, None, None, None, None, None, None]
|
|
1801
3305
|
columns = list(selectedcols.columns.values)
|
|
1802
3306
|
return (
|
|
1803
3307
|
samplerate,
|
|
@@ -1806,27 +3310,76 @@ def readbidstsv(inputfilename, colspec=None, warn=True, debug=False):
|
|
|
1806
3310
|
np.transpose(selectedcols.to_numpy()),
|
|
1807
3311
|
(compression == "gzip"),
|
|
1808
3312
|
columnsource,
|
|
3313
|
+
extrainfo,
|
|
1809
3314
|
)
|
|
1810
3315
|
else:
|
|
1811
|
-
|
|
1812
|
-
|
|
3316
|
+
if neednotexist:
|
|
3317
|
+
return [None, None, None, None, None, None, None]
|
|
3318
|
+
else:
|
|
3319
|
+
raise FileNotFoundError(f"file pair {thefileroot}(.json/.tsv[.gz]) does not exist")
|
|
1813
3320
|
|
|
1814
3321
|
|
|
1815
|
-
def readcolfrombidstsv(
|
|
1816
|
-
|
|
3322
|
+
def readcolfrombidstsv(
|
|
3323
|
+
inputfilename: str,
|
|
3324
|
+
columnnum: Optional[int] = 0,
|
|
3325
|
+
columnname: Optional[str] = None,
|
|
3326
|
+
neednotexist: bool = False,
|
|
3327
|
+
debug: bool = False,
|
|
3328
|
+
) -> Tuple[Optional[float], Optional[float], Optional[NDArray]]:
|
|
3329
|
+
"""
|
|
3330
|
+
Read a specific column from a BIDS TSV file.
|
|
3331
|
+
|
|
3332
|
+
Extracts a single column of data from a BIDS TSV file, either by column name
|
|
3333
|
+
or by column index. The function handles both compressed and uncompressed files.
|
|
1817
3334
|
|
|
1818
3335
|
Parameters
|
|
1819
3336
|
----------
|
|
1820
|
-
inputfilename
|
|
1821
|
-
|
|
1822
|
-
columnname
|
|
3337
|
+
inputfilename : str
|
|
3338
|
+
Path to the input BIDS TSV file (can be .tsv or .tsv.gz)
|
|
3339
|
+
columnname : str, optional
|
|
3340
|
+
Name of the column to extract. If specified, ``columnnum`` is ignored.
|
|
3341
|
+
Default is None.
|
|
3342
|
+
columnnum : int, optional
|
|
3343
|
+
Index of the column to extract (0-based). Ignored if ``columnname`` is specified.
|
|
3344
|
+
Default is 0.
|
|
3345
|
+
neednotexist : bool, optional
|
|
3346
|
+
If True, the function will not raise an error if the file does not exist.
|
|
3347
|
+
Default is False.
|
|
3348
|
+
debug : bool, optional
|
|
3349
|
+
Enable debug output. Default is False.
|
|
1823
3350
|
|
|
1824
3351
|
Returns
|
|
1825
3352
|
-------
|
|
1826
|
-
|
|
3353
|
+
tuple
|
|
3354
|
+
A tuple containing:
|
|
3355
|
+
|
|
3356
|
+
- samplerate : float or None
|
|
3357
|
+
Sampling rate extracted from the file, or None if no valid data found
|
|
3358
|
+
- starttime : float or None
|
|
3359
|
+
Start time extracted from the file, or None if no valid data found
|
|
3360
|
+
- data : NDArray or None
|
|
3361
|
+
The extracted column data as a 1D array, or None if no valid data found
|
|
3362
|
+
|
|
3363
|
+
Notes
|
|
3364
|
+
-----
|
|
3365
|
+
- If both ``columnname`` and ``columnnum`` are specified, ``columnname`` takes precedence
|
|
3366
|
+
- Column indices are 0-based
|
|
3367
|
+
- The function handles both compressed (.tsv.gz) and uncompressed (.tsv) files
|
|
3368
|
+
- Returns None for all values if no valid data is found
|
|
3369
|
+
|
|
3370
|
+
Examples
|
|
3371
|
+
--------
|
|
3372
|
+
>>> # Read first column by index
|
|
3373
|
+
>>> samplerate, starttime, data = readcolfrombidstsv('data.tsv', columnnum=0)
|
|
3374
|
+
|
|
3375
|
+
>>> # Read column by name
|
|
3376
|
+
>>> samplerate, starttime, data = readcolfrombidstsv('data.tsv', columnname='reaction_time')
|
|
3377
|
+
|
|
3378
|
+
>>> # Read column with debug output
|
|
3379
|
+
>>> samplerate, starttime, data = readcolfrombidstsv('data.tsv', columnname='rt', debug=True)
|
|
1827
3380
|
"""
|
|
1828
|
-
samplerate, starttime, columns, data, compressed, colsource = readbidstsv(
|
|
1829
|
-
inputfilename, debug=debug
|
|
3381
|
+
samplerate, starttime, columns, data, compressed, colsource, extrainfo = readbidstsv(
|
|
3382
|
+
inputfilename, neednotexist=neednotexist, debug=debug
|
|
1830
3383
|
)
|
|
1831
3384
|
if data is None:
|
|
1832
3385
|
print("no valid datafile found")
|
|
@@ -1853,12 +3406,64 @@ def readcolfrombidstsv(inputfilename, columnnum=0, columnname=None, debug=False)
|
|
|
1853
3406
|
return samplerate, starttime, data[columnnum, :]
|
|
1854
3407
|
|
|
1855
3408
|
|
|
1856
|
-
def parsefilespec(filespec, debug=False):
|
|
3409
|
+
def parsefilespec(filespec: str, debug: bool = False) -> Tuple[str, Optional[str]]:
|
|
3410
|
+
"""
|
|
3411
|
+
Parse a file specification string into filename and column specification.
|
|
3412
|
+
|
|
3413
|
+
This function splits a file specification string using ':' as the delimiter.
|
|
3414
|
+
On Windows platforms, it handles special cases where the second character
|
|
3415
|
+
is ':' (e.g., "C:file.txt") by treating the first two parts as the filename.
|
|
3416
|
+
|
|
3417
|
+
Parameters
|
|
3418
|
+
----------
|
|
3419
|
+
filespec : str
|
|
3420
|
+
The file specification string to parse. Expected format is
|
|
3421
|
+
"filename[:column_specification]".
|
|
3422
|
+
debug : bool, optional
|
|
3423
|
+
If True, print debug information during execution. Default is False.
|
|
3424
|
+
|
|
3425
|
+
Returns
|
|
3426
|
+
-------
|
|
3427
|
+
tuple[str, str or None]
|
|
3428
|
+
A tuple containing:
|
|
3429
|
+
- thefilename : str
|
|
3430
|
+
The parsed filename part of the specification
|
|
3431
|
+
- thecolspec : str or None
|
|
3432
|
+
The parsed column specification, or None if not provided
|
|
3433
|
+
|
|
3434
|
+
Raises
|
|
3435
|
+
------
|
|
3436
|
+
ValueError
|
|
3437
|
+
If the file specification is malformed (e.g., too many parts when
|
|
3438
|
+
special case handling is not applicable).
|
|
3439
|
+
|
|
3440
|
+
Notes
|
|
3441
|
+
-----
|
|
3442
|
+
On Windows systems, this function correctly handles drive letter specifications
|
|
3443
|
+
such as "C:file.txt" by treating the first two elements ("C:" and "file.txt")
|
|
3444
|
+
as the filename part.
|
|
3445
|
+
|
|
3446
|
+
Examples
|
|
3447
|
+
--------
|
|
3448
|
+
>>> parsefilespec("data.csv")
|
|
3449
|
+
('data.csv', None)
|
|
3450
|
+
|
|
3451
|
+
>>> parsefilespec("data.csv:1,3,5")
|
|
3452
|
+
('data.csv', '1,3,5')
|
|
3453
|
+
|
|
3454
|
+
>>> parsefilespec("C:file.txt:col1")
|
|
3455
|
+
('C:file.txt', 'col1')
|
|
3456
|
+
"""
|
|
1857
3457
|
inputlist = filespec.split(":")
|
|
1858
3458
|
if debug:
|
|
1859
3459
|
print(f"PARSEFILESPEC: input string >>>{filespec}<<<")
|
|
1860
3460
|
print(f"PARSEFILESPEC: platform is {platform.system()}")
|
|
1861
|
-
|
|
3461
|
+
|
|
3462
|
+
specialcase = False
|
|
3463
|
+
if len(inputlist) > 1:
|
|
3464
|
+
if filespec[1] == ":" and platform.system() == "Windows":
|
|
3465
|
+
specialcase = True
|
|
3466
|
+
if specialcase:
|
|
1862
3467
|
thefilename = ":".join([inputlist[0], inputlist[1]])
|
|
1863
3468
|
if len(inputlist) == 3:
|
|
1864
3469
|
thecolspec = inputlist[2]
|
|
@@ -1883,7 +3488,47 @@ def parsefilespec(filespec, debug=False):
|
|
|
1883
3488
|
return thefilename, thecolspec
|
|
1884
3489
|
|
|
1885
3490
|
|
|
1886
|
-
def unique(list1):
|
|
3491
|
+
def unique(list1: List[Any]) -> List[Any]:
|
|
3492
|
+
"""
|
|
3493
|
+
Convert a column specification string to a list of column indices.
|
|
3494
|
+
|
|
3495
|
+
This function parses a column specification string and converts it into a list of
|
|
3496
|
+
zero-based column indices. The specification can include ranges (e.g., "0-5") and
|
|
3497
|
+
individual column numbers (e.g., "7") separated by commas.
|
|
3498
|
+
|
|
3499
|
+
Parameters
|
|
3500
|
+
----------
|
|
3501
|
+
colspec : str or None
|
|
3502
|
+
Column specification string in format like "0-5,7,10-12" or predefined macro.
|
|
3503
|
+
If None, returns None.
|
|
3504
|
+
debug : bool, optional
|
|
3505
|
+
Enable debug output. Default is False
|
|
3506
|
+
|
|
3507
|
+
Returns
|
|
3508
|
+
-------
|
|
3509
|
+
list of int or None
|
|
3510
|
+
List of column indices corresponding to the specification, or None if input is None.
|
|
3511
|
+
Returns empty list if specification is empty or invalid.
|
|
3512
|
+
|
|
3513
|
+
Notes
|
|
3514
|
+
-----
|
|
3515
|
+
- Column indices are zero-based
|
|
3516
|
+
- Ranges are inclusive on both ends
|
|
3517
|
+
- Individual columns can be specified as single numbers
|
|
3518
|
+
- Multiple specifications can be combined with commas
|
|
3519
|
+
- Invalid ranges or columns will be skipped
|
|
3520
|
+
|
|
3521
|
+
Examples
|
|
3522
|
+
--------
|
|
3523
|
+
>>> colspectolist("0-2,5,7-9")
|
|
3524
|
+
[0, 1, 2, 5, 7, 8, 9]
|
|
3525
|
+
|
|
3526
|
+
>>> colspectolist("3,1-4,6")
|
|
3527
|
+
[3, 1, 2, 3, 4, 6]
|
|
3528
|
+
|
|
3529
|
+
>>> colspectolist(None)
|
|
3530
|
+
None
|
|
3531
|
+
"""
|
|
1887
3532
|
# initialize a null list
|
|
1888
3533
|
unique_list = []
|
|
1889
3534
|
|
|
@@ -1895,7 +3540,57 @@ def unique(list1):
|
|
|
1895
3540
|
return unique_list
|
|
1896
3541
|
|
|
1897
3542
|
|
|
1898
|
-
def colspectolist(colspec, debug=False):
|
|
3543
|
+
def colspectolist(colspec: Optional[str], debug: bool = False) -> Optional[List[int]]:
|
|
3544
|
+
"""
|
|
3545
|
+
Convert a column specification string into a sorted list of integers.
|
|
3546
|
+
|
|
3547
|
+
This function parses a column specification string that may contain
|
|
3548
|
+
individual integers, ranges (e.g., "1-5"), or predefined macros (e.g.,
|
|
3549
|
+
"APARC_GRAY"). It expands macros into their corresponding ranges and
|
|
3550
|
+
returns a sorted list of unique integers.
|
|
3551
|
+
|
|
3552
|
+
Parameters
|
|
3553
|
+
----------
|
|
3554
|
+
colspec : str or None
|
|
3555
|
+
A column specification string. Can include:
|
|
3556
|
+
- Individual integers (e.g., "1", "10")
|
|
3557
|
+
- Ranges (e.g., "1-5")
|
|
3558
|
+
- Predefined macros (e.g., "APARC_GRAY")
|
|
3559
|
+
If None, the function prints an error and returns None.
|
|
3560
|
+
debug : bool, optional
|
|
3561
|
+
If True, enables debug output showing processing steps. Default is False.
|
|
3562
|
+
|
|
3563
|
+
Returns
|
|
3564
|
+
-------
|
|
3565
|
+
list of int or None
|
|
3566
|
+
A sorted list of unique integers corresponding to the column
|
|
3567
|
+
specification. Returns None if an error occurs during processing.
|
|
3568
|
+
|
|
3569
|
+
Notes
|
|
3570
|
+
-----
|
|
3571
|
+
Predefined macros:
|
|
3572
|
+
- APARC_SUBCORTGRAY: 8-13,17-20,26-28,47-56,58-60,96,97
|
|
3573
|
+
- APARC_CORTGRAY: 1000-1035,2000-2035
|
|
3574
|
+
- APARC_GRAY: 8-13,17-20,26-28,47-56,58-60,96,97,1000-1035,2000-2035
|
|
3575
|
+
- APARC_WHITE: 2,7,41,46,177,219,3000-3035,4000-4035,5001,5002
|
|
3576
|
+
- APARC_CSF: 4,5,14,15,24,31,43,44,63,72
|
|
3577
|
+
- APARC_ALLBUTCSF: 2,7-13,17-20,26-28,41,46-56,58-60,96,97,177,219,1000-1035,2000-2035,3000-3035,4000-4035,5001,5002
|
|
3578
|
+
- SSEG_GRAY: 3,8,10-13,16-18,26,42,47,49-54,58
|
|
3579
|
+
- SSEG_WHITE: 2,7,41,46
|
|
3580
|
+
- SSEG_CSF: 4,5,14,15,24,43,44
|
|
3581
|
+
|
|
3582
|
+
Examples
|
|
3583
|
+
--------
|
|
3584
|
+
>>> colspectolist("1-3,5,7-9")
|
|
3585
|
+
[1, 2, 3, 5, 7, 8, 9]
|
|
3586
|
+
|
|
3587
|
+
>>> colspectolist("APARC_GRAY")
|
|
3588
|
+
[8, 9, 10, 11, 12, 13, 17, 18, 19, 20, 26, 27, 28, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 58, 59, 60, 96, 97, 1000, 1001, ..., 2035]
|
|
3589
|
+
|
|
3590
|
+
>>> colspectolist(None)
|
|
3591
|
+
COLSPECTOLIST: no range specification - exiting
|
|
3592
|
+
None
|
|
3593
|
+
"""
|
|
1899
3594
|
if colspec is None:
|
|
1900
3595
|
print("COLSPECTOLIST: no range specification - exiting")
|
|
1901
3596
|
return None
|
|
@@ -1903,6 +3598,46 @@ def colspectolist(colspec, debug=False):
|
|
|
1903
3598
|
theranges = colspec.split(",")
|
|
1904
3599
|
|
|
1905
3600
|
def safeint(s):
|
|
3601
|
+
"""
|
|
3602
|
+
Convert a value to integer safely, handling various input types.
|
|
3603
|
+
|
|
3604
|
+
This function attempts to convert the input value to an integer. It handles
|
|
3605
|
+
strings, floats, and other numeric types gracefully, with special handling
|
|
3606
|
+
for string representations that may contain commas or ranges.
|
|
3607
|
+
|
|
3608
|
+
Parameters
|
|
3609
|
+
----------
|
|
3610
|
+
value : str, int, float
|
|
3611
|
+
The value to convert to integer. If string, may contain comma-separated
|
|
3612
|
+
values or range notation (e.g., "2-5", "1,3,5").
|
|
3613
|
+
|
|
3614
|
+
Returns
|
|
3615
|
+
-------
|
|
3616
|
+
int or list of int
|
|
3617
|
+
Integer value or list of integers if input contains multiple values
|
|
3618
|
+
or ranges. Returns single integer for simple numeric inputs.
|
|
3619
|
+
|
|
3620
|
+
Notes
|
|
3621
|
+
-----
|
|
3622
|
+
- For string inputs containing commas, values are split and converted
|
|
3623
|
+
- For string inputs containing hyphens, ranges are expanded into individual integers
|
|
3624
|
+
- Non-numeric strings will raise ValueError
|
|
3625
|
+
- Float inputs are truncated to integers
|
|
3626
|
+
|
|
3627
|
+
Examples
|
|
3628
|
+
--------
|
|
3629
|
+
>>> safeint("42")
|
|
3630
|
+
42
|
|
3631
|
+
|
|
3632
|
+
>>> safeint("2,7-13,17-20")
|
|
3633
|
+
[2, 7, 8, 9, 10, 11, 12, 13, 17, 18, 19, 20]
|
|
3634
|
+
|
|
3635
|
+
>>> safeint(3.14)
|
|
3636
|
+
3
|
|
3637
|
+
|
|
3638
|
+
>>> safeint("10-15")
|
|
3639
|
+
[10, 11, 12, 13, 14, 15]
|
|
3640
|
+
"""
|
|
1906
3641
|
try:
|
|
1907
3642
|
int(s)
|
|
1908
3643
|
return int(s)
|
|
@@ -1915,10 +3650,15 @@ def colspectolist(colspec, debug=False):
|
|
|
1915
3650
|
("APARC_SUBCORTGRAY", "8-13,17-20,26-28,47-56,58-60,96,97"),
|
|
1916
3651
|
("APARC_CORTGRAY", "1000-1035,2000-2035"),
|
|
1917
3652
|
("APARC_GRAY", "8-13,17-20,26-28,47-56,58-60,96,97,1000-1035,2000-2035"),
|
|
1918
|
-
("APARC_WHITE", "2,7,41,46,177,219"),
|
|
1919
|
-
("
|
|
3653
|
+
("APARC_WHITE", "2,7,41,46,177,219,3000-3035,4000-4035,5001,5002"),
|
|
3654
|
+
("APARC_CSF", "4,5,14,15,24,31,43,44,63,72"),
|
|
3655
|
+
(
|
|
3656
|
+
"APARC_ALLBUTCSF",
|
|
3657
|
+
"2,7-13,17-20,26-28,41,46-56,58-60,96,97,177,219,1000-1035,2000-2035,3000-3035,4000-4035,5001,5002",
|
|
3658
|
+
),
|
|
1920
3659
|
("SSEG_GRAY", "3,8,10-13,16-18,26,42,47,49-54,58"),
|
|
1921
3660
|
("SSEG_WHITE", "2,7,41,46"),
|
|
3661
|
+
("SSEG_CSF", "4,5,14,15,24,43,44"),
|
|
1922
3662
|
)
|
|
1923
3663
|
preprocessedranges = []
|
|
1924
3664
|
for thisrange in theranges:
|
|
@@ -1955,7 +3695,43 @@ def colspectolist(colspec, debug=False):
|
|
|
1955
3695
|
return unique(sorted(collist))
|
|
1956
3696
|
|
|
1957
3697
|
|
|
1958
|
-
def processnamespec(
|
|
3698
|
+
def processnamespec(
|
|
3699
|
+
maskspec: str, spectext1: str, spectext2: str, debug: bool = False
|
|
3700
|
+
) -> Tuple[str, Optional[List[int]]]:
|
|
3701
|
+
"""
|
|
3702
|
+
Parse a file specification and extract filename and column specifications.
|
|
3703
|
+
|
|
3704
|
+
This function takes a file specification string and parses it to separate the filename
|
|
3705
|
+
from any column specification. The column specification is converted into a list of
|
|
3706
|
+
column indices for further processing.
|
|
3707
|
+
|
|
3708
|
+
Parameters
|
|
3709
|
+
----------
|
|
3710
|
+
maskspec : str
|
|
3711
|
+
Input file specification string containing filename and optional column specification
|
|
3712
|
+
debug : bool, optional
|
|
3713
|
+
Enable debug output. Default is False
|
|
3714
|
+
|
|
3715
|
+
Returns
|
|
3716
|
+
-------
|
|
3717
|
+
filename : str
|
|
3718
|
+
Parsed filename
|
|
3719
|
+
collist : list of int or None
|
|
3720
|
+
List of column indices, or None if no column spec provided
|
|
3721
|
+
|
|
3722
|
+
Notes
|
|
3723
|
+
-----
|
|
3724
|
+
The function uses `parsefilespec` to split the input string and `colspectolist` to
|
|
3725
|
+
convert column specifications into lists of integers.
|
|
3726
|
+
|
|
3727
|
+
Examples
|
|
3728
|
+
--------
|
|
3729
|
+
>>> processnamespec("data.txt:1,3,5")
|
|
3730
|
+
('data.txt', [1, 3, 5])
|
|
3731
|
+
|
|
3732
|
+
>>> processnamespec("data.txt")
|
|
3733
|
+
('data.txt', None)
|
|
3734
|
+
"""
|
|
1959
3735
|
thename, colspec = parsefilespec(maskspec)
|
|
1960
3736
|
if colspec is not None:
|
|
1961
3737
|
thevals = colspectolist(colspec)
|
|
@@ -1966,16 +3742,57 @@ def processnamespec(maskspec, spectext1, spectext2, debug=False):
|
|
|
1966
3742
|
return thename, thevals
|
|
1967
3743
|
|
|
1968
3744
|
|
|
1969
|
-
def readcolfromtextfile(inputfilespec):
|
|
1970
|
-
|
|
3745
|
+
def readcolfromtextfile(inputfilespec: str) -> NDArray:
|
|
3746
|
+
"""
|
|
3747
|
+
Read columns from a text file and return as numpy array.
|
|
3748
|
+
|
|
3749
|
+
This function reads data from a text file, optionally skipping header lines
|
|
3750
|
+
and specifying which columns to read. It supports various column specification
|
|
3751
|
+
formats and allows for debugging output.
|
|
1971
3752
|
|
|
1972
3753
|
Parameters
|
|
1973
3754
|
----------
|
|
1974
|
-
inputfilename
|
|
1975
|
-
|
|
3755
|
+
inputfilename : str
|
|
3756
|
+
Path to the input text file to read.
|
|
3757
|
+
colspec : str, optional
|
|
3758
|
+
Column specification string. Can be:
|
|
3759
|
+
- None: read all columns
|
|
3760
|
+
- Comma-separated column numbers (e.g., "1,3,5")
|
|
3761
|
+
- Column ranges (e.g., "1-3,5-7")
|
|
3762
|
+
- Single column number (e.g., "3")
|
|
3763
|
+
numskip : int, default: 0
|
|
3764
|
+
Number of header lines to skip before reading data.
|
|
3765
|
+
debug : bool, default: False
|
|
3766
|
+
If True, print debug information during execution.
|
|
3767
|
+
thedtype : type, default: float
|
|
3768
|
+
Data type to convert the read data to.
|
|
1976
3769
|
|
|
1977
3770
|
Returns
|
|
1978
3771
|
-------
|
|
3772
|
+
NDArray
|
|
3773
|
+
Numpy array containing the read data. Shape depends on the number of
|
|
3774
|
+
columns specified and the number of rows in the input file.
|
|
3775
|
+
|
|
3776
|
+
Notes
|
|
3777
|
+
-----
|
|
3778
|
+
- The function uses numpy's genfromtxt internally for reading the file
|
|
3779
|
+
- Column indexing starts from 1 (not 0)
|
|
3780
|
+
- If colspec is not provided, all columns are read
|
|
3781
|
+
- The function handles various text file formats including space and comma delimited data
|
|
3782
|
+
|
|
3783
|
+
Examples
|
|
3784
|
+
--------
|
|
3785
|
+
>>> # Read all columns from a file
|
|
3786
|
+
>>> data = readvecs('data.txt')
|
|
3787
|
+
|
|
3788
|
+
>>> # Read only columns 1, 3, and 5
|
|
3789
|
+
>>> data = readvecs('data.txt', colspec='1,3,5')
|
|
3790
|
+
|
|
3791
|
+
>>> # Read columns 2 through 4
|
|
3792
|
+
>>> data = readvecs('data.txt', colspec='2-4')
|
|
3793
|
+
|
|
3794
|
+
>>> # Skip first 5 lines and read columns 1 and 3
|
|
3795
|
+
>>> data = readvecs('data.txt', colspec='1,3', numskip=5)
|
|
1979
3796
|
"""
|
|
1980
3797
|
inputfilename, colspec = parsefilespec(inputfilespec)
|
|
1981
3798
|
if inputfilename is None:
|
|
@@ -1990,42 +3807,54 @@ def readcolfromtextfile(inputfilespec):
|
|
|
1990
3807
|
return inputdata[:, 0]
|
|
1991
3808
|
|
|
1992
3809
|
|
|
1993
|
-
def readvecs(
|
|
1994
|
-
|
|
3810
|
+
def readvecs(
|
|
3811
|
+
inputfilename: str,
|
|
3812
|
+
colspec: Optional[str] = None,
|
|
3813
|
+
numskip: int = 0,
|
|
3814
|
+
debug: bool = False,
|
|
3815
|
+
thedtype: np.dtype = np.dtype(np.float64),
|
|
3816
|
+
) -> NDArray:
|
|
3817
|
+
"""
|
|
3818
|
+
Read vectors from a text file and return them as a transposed numpy array.
|
|
1995
3819
|
|
|
1996
3820
|
Parameters
|
|
1997
3821
|
----------
|
|
1998
|
-
inputfilename
|
|
3822
|
+
inputfilename : str
|
|
3823
|
+
The name of the text file to read data from.
|
|
3824
|
+
colspec : str, optional
|
|
3825
|
+
A string specifying which columns to read. If None, all columns in the first
|
|
3826
|
+
line are read. Default is None.
|
|
3827
|
+
numskip : int, optional
|
|
3828
|
+
Number of lines to skip at the beginning of the file. If 0, the function
|
|
3829
|
+
attempts to auto-detect if the first line contains headers. Default is 0.
|
|
3830
|
+
thedtype : type, optional
|
|
3831
|
+
The data type to convert the read values to. Default is float.
|
|
3832
|
+
debug : bool, optional
|
|
3833
|
+
If True, print debug information including input parameters and processing
|
|
3834
|
+
details. Default is False.
|
|
1999
3835
|
|
|
2000
3836
|
Returns
|
|
2001
3837
|
-------
|
|
2002
|
-
|
|
3838
|
+
NDArray
|
|
3839
|
+
A 2D numpy array where each row corresponds to a vector read from the file.
|
|
3840
|
+
The array is transposed such that each column represents a vector.
|
|
3841
|
+
|
|
3842
|
+
Notes
|
|
3843
|
+
-----
|
|
3844
|
+
- The function assumes that the input file contains numeric data separated by
|
|
3845
|
+
whitespace.
|
|
3846
|
+
- If `colspec` is not provided, all columns from the first line are read.
|
|
3847
|
+
- If `numskip` is 0, the function attempts to detect whether the first line
|
|
3848
|
+
contains headers by trying to convert the first element to a float.
|
|
3849
|
+
- The function raises a `ValueError` if any requested column index is out of
|
|
3850
|
+
bounds.
|
|
3851
|
+
|
|
3852
|
+
Examples
|
|
3853
|
+
--------
|
|
3854
|
+
>>> data = readvecs('data.txt')
|
|
3855
|
+
>>> data = readvecs('data.txt', colspec='1:3', numskip=1)
|
|
3856
|
+
>>> data = readvecs('data.txt', colspec='0,2,4', thedtype=int)
|
|
2003
3857
|
"""
|
|
2004
|
-
"""if False:
|
|
2005
|
-
dataarray = pd.read_table(inputfilename, sep=None, header=None)
|
|
2006
|
-
if colspec is None:
|
|
2007
|
-
collist = range(len(dataarray.columns))
|
|
2008
|
-
else:
|
|
2009
|
-
collist = colspectolist(colspec, debug=debug)
|
|
2010
|
-
if debug:
|
|
2011
|
-
print("using collist:", collist)
|
|
2012
|
-
if len(collist) > len(dataarray.columns):
|
|
2013
|
-
print("READVECS: too many columns requested - exiting")
|
|
2014
|
-
sys.exit()
|
|
2015
|
-
if max(collist) > len(dataarray.columns) - 1:
|
|
2016
|
-
print("READVECS: requested column number", max(collist), "too large - exiting")
|
|
2017
|
-
sys.exit()
|
|
2018
|
-
numvals = len(dataarray[numskip:])
|
|
2019
|
-
numvecs = len(collist)
|
|
2020
|
-
inputvec = np.zeros((numvecs, numvals), dtype="float64")
|
|
2021
|
-
outcol = 0
|
|
2022
|
-
if debug:
|
|
2023
|
-
print(f"numvals = {numvals}, numvecs = {numvecs}")
|
|
2024
|
-
for vecnum in collist:
|
|
2025
|
-
inputvec[outcol, :] = dataarray[vecnum][numskip:]
|
|
2026
|
-
outcol += 1
|
|
2027
|
-
return 1.0 * inputvec[:, 0:numvals]
|
|
2028
|
-
else:"""
|
|
2029
3858
|
if debug:
|
|
2030
3859
|
print(f"inputfilename: {inputfilename}")
|
|
2031
3860
|
print(f"colspec: {colspec}")
|
|
@@ -2053,25 +3882,52 @@ def readvecs(inputfilename, colspec=None, numskip=0, debug=False, thedtype=float
|
|
|
2053
3882
|
thetokens = line.split()
|
|
2054
3883
|
thisvec = []
|
|
2055
3884
|
for vecnum in collist:
|
|
2056
|
-
thisvec.append(thedtype(thetokens[vecnum]))
|
|
3885
|
+
thisvec.append(thedtype.type(thetokens[vecnum]))
|
|
2057
3886
|
inputvec.append(thisvec)
|
|
2058
3887
|
theoutarray = np.transpose(np.asarray(inputvec, dtype=thedtype))
|
|
2059
3888
|
return theoutarray
|
|
2060
3889
|
|
|
2061
3890
|
|
|
2062
|
-
def readvec(inputfilename, numskip=0):
|
|
2063
|
-
|
|
3891
|
+
def readvec(inputfilename: str, numskip: int = 0) -> NDArray:
|
|
3892
|
+
"""
|
|
3893
|
+
Read a timecourse from a text or BIDS TSV file.
|
|
3894
|
+
|
|
3895
|
+
This function reads numerical data from a text file and returns it as a numpy array.
|
|
3896
|
+
It can handle both plain text files and BIDS TSV files, with optional column selection
|
|
3897
|
+
and debugging output.
|
|
2064
3898
|
|
|
2065
3899
|
Parameters
|
|
2066
3900
|
----------
|
|
2067
3901
|
inputfilename : str
|
|
2068
|
-
|
|
3902
|
+
Path to the input file
|
|
3903
|
+
colnum : int, optional
|
|
3904
|
+
Column number to read (0-indexed). If None, reads all columns.
|
|
3905
|
+
colname : str, optional
|
|
3906
|
+
Column name to read. If None, reads all columns.
|
|
3907
|
+
debug : bool, optional
|
|
3908
|
+
If True, enables debug output. Default is False.
|
|
2069
3909
|
|
|
2070
3910
|
Returns
|
|
2071
3911
|
-------
|
|
2072
|
-
|
|
2073
|
-
|
|
2074
|
-
|
|
3912
|
+
tuple
|
|
3913
|
+
A tuple containing:
|
|
3914
|
+
- NDArray: The read timecourse data
|
|
3915
|
+
- float, optional: Minimum value in the data
|
|
3916
|
+
- float, optional: Maximum value in the data
|
|
3917
|
+
|
|
3918
|
+
Notes
|
|
3919
|
+
-----
|
|
3920
|
+
- The function handles both text files and BIDS TSV files
|
|
3921
|
+
- Empty lines are skipped during reading
|
|
3922
|
+
- Data is converted to float64 type
|
|
3923
|
+
- If both colnum and colname are provided, colnum takes precedence
|
|
3924
|
+
- The function returns the minimum and maximum values only when the data is read successfully
|
|
3925
|
+
|
|
3926
|
+
Examples
|
|
3927
|
+
--------
|
|
3928
|
+
>>> data, min_val, max_val = readtc('timecourse.txt')
|
|
3929
|
+
>>> data, min_val, max_val = readtc('bids_file.tsv', colnum=2)
|
|
3930
|
+
>>> data, min_val, max_val = readtc('data.txt', colname='signal', debug=True)
|
|
2075
3931
|
"""
|
|
2076
3932
|
inputvec = []
|
|
2077
3933
|
with open(inputfilename, "r") as thefile:
|
|
@@ -2082,7 +3938,57 @@ def readvec(inputfilename, numskip=0):
|
|
|
2082
3938
|
return np.asarray(inputvec, dtype=float)
|
|
2083
3939
|
|
|
2084
3940
|
|
|
2085
|
-
def readtc(
|
|
3941
|
+
def readtc(
|
|
3942
|
+
inputfilename: str,
|
|
3943
|
+
colnum: Optional[int] = None,
|
|
3944
|
+
colname: Optional[str] = None,
|
|
3945
|
+
debug: bool = False,
|
|
3946
|
+
) -> Tuple[NDArray, Optional[float], Optional[float]]:
|
|
3947
|
+
"""
|
|
3948
|
+
Read timecourse data from a file, supporting BIDS TSV and other formats.
|
|
3949
|
+
|
|
3950
|
+
This function reads timecourse data from a file, with support for BIDS TSV files
|
|
3951
|
+
and generic multi-column text files. For BIDS TSV files, a column name or number
|
|
3952
|
+
must be specified. For other file types, column selection is limited to numeric indices.
|
|
3953
|
+
|
|
3954
|
+
Parameters
|
|
3955
|
+
----------
|
|
3956
|
+
inputfilename : str
|
|
3957
|
+
Path to the input file to read. Can be a BIDS TSV file (`.tsv`) or a generic
|
|
3958
|
+
text file with multiple columns.
|
|
3959
|
+
colname : str or None, optional
|
|
3960
|
+
Column name to read from a BIDS TSV file. Required if the file is a BIDS TSV
|
|
3961
|
+
and `colnum` is not specified. Default is None.
|
|
3962
|
+
colnum : int or None, optional
|
|
3963
|
+
Column number to read from a BIDS TSV file or a generic multi-column file.
|
|
3964
|
+
Required for generic files when `colname` is not specified. Default is None.
|
|
3965
|
+
debug : bool, optional
|
|
3966
|
+
Enable debug output to print intermediate information. Default is False.
|
|
3967
|
+
|
|
3968
|
+
Returns
|
|
3969
|
+
-------
|
|
3970
|
+
timecourse : NDArray
|
|
3971
|
+
The timecourse data as a 1D numpy array.
|
|
3972
|
+
inputfreq : float or None
|
|
3973
|
+
Sampling frequency (Hz) if available from the file metadata. Default is None.
|
|
3974
|
+
inputstart : float or None
|
|
3975
|
+
Start time (seconds) if available from the file metadata. Default is None.
|
|
3976
|
+
|
|
3977
|
+
Notes
|
|
3978
|
+
-----
|
|
3979
|
+
- For BIDS TSV files (`.tsv`), the function reads the specified column using
|
|
3980
|
+
`readcolfrombidstsv`, which extracts metadata such as sampling frequency and
|
|
3981
|
+
start time.
|
|
3982
|
+
- For generic text files, the function transposes the data and selects the
|
|
3983
|
+
specified column if `colnum` is provided.
|
|
3984
|
+
- If the input file is a `.json` file, it is assumed to contain metadata for
|
|
3985
|
+
a BIDS TSV file and is processed accordingly.
|
|
3986
|
+
|
|
3987
|
+
Examples
|
|
3988
|
+
--------
|
|
3989
|
+
>>> timecourse, freq, start = readtc('data.tsv', colname='signal')
|
|
3990
|
+
>>> timecourse, freq, start = readtc('data.txt', colnum=0, debug=True)
|
|
3991
|
+
"""
|
|
2086
3992
|
# check file type
|
|
2087
3993
|
filebase, extension = os.path.splitext(inputfilename)
|
|
2088
3994
|
inputfreq = None
|
|
@@ -2114,16 +4020,47 @@ def readtc(inputfilename, colnum=None, colname=None, debug=False):
|
|
|
2114
4020
|
return timecourse, inputfreq, inputstart
|
|
2115
4021
|
|
|
2116
4022
|
|
|
2117
|
-
def readlabels(inputfilename):
|
|
2118
|
-
|
|
4023
|
+
def readlabels(inputfilename: str) -> List[str]:
|
|
4024
|
+
"""
|
|
4025
|
+
Write all the key value pairs from a dictionary to a text file.
|
|
2119
4026
|
|
|
2120
4027
|
Parameters
|
|
2121
4028
|
----------
|
|
2122
|
-
|
|
4029
|
+
thedict : dict
|
|
4030
|
+
A dictionary containing key-value pairs to be written to file.
|
|
4031
|
+
outputfile : str
|
|
4032
|
+
The name of the output file where dictionary contents will be saved.
|
|
4033
|
+
lineend : {'mac', 'win', 'linux'}, optional
|
|
4034
|
+
Line ending style to use. Default is 'linux'.
|
|
4035
|
+
- 'mac': Uses carriage return ('\r')
|
|
4036
|
+
- 'win': Uses carriage return + line feed ('\r\n')
|
|
4037
|
+
- 'linux': Uses line feed ('\n')
|
|
4038
|
+
machinereadable : bool, optional
|
|
4039
|
+
If True, outputs in a machine-readable format (default is False).
|
|
4040
|
+
When False, outputs in a human-readable format with key-value pairs on separate lines.
|
|
2123
4041
|
|
|
2124
4042
|
Returns
|
|
2125
4043
|
-------
|
|
2126
|
-
|
|
4044
|
+
None
|
|
4045
|
+
This function does not return any value.
|
|
4046
|
+
|
|
4047
|
+
Notes
|
|
4048
|
+
-----
|
|
4049
|
+
- The function will overwrite the output file if it already exists.
|
|
4050
|
+
- Keys and values are converted to strings before writing.
|
|
4051
|
+
- If `machinereadable` is True, the output format may differ from the default human-readable format.
|
|
4052
|
+
|
|
4053
|
+
Examples
|
|
4054
|
+
--------
|
|
4055
|
+
>>> my_dict = {'name': 'John', 'age': 30, 'city': 'New York'}
|
|
4056
|
+
>>> writedict(my_dict, 'output.txt')
|
|
4057
|
+
# Writes dictionary to output.txt in human-readable format
|
|
4058
|
+
|
|
4059
|
+
>>> writedict(my_dict, 'output.txt', lineend='win')
|
|
4060
|
+
# Writes dictionary with Windows-style line endings
|
|
4061
|
+
|
|
4062
|
+
>>> writedict(my_dict, 'output.txt', machinereadable=True)
|
|
4063
|
+
# Writes dictionary in machine-readable format
|
|
2127
4064
|
"""
|
|
2128
4065
|
inputvec = []
|
|
2129
4066
|
with open(inputfilename, "r") as thefile:
|
|
@@ -2133,22 +4070,42 @@ def readlabels(inputfilename):
|
|
|
2133
4070
|
return inputvec
|
|
2134
4071
|
|
|
2135
4072
|
|
|
2136
|
-
def writedict(
|
|
2137
|
-
|
|
2138
|
-
|
|
4073
|
+
def writedict(
|
|
4074
|
+
thedict: Dict[str, Any], outputfile: str, lineend: str = "", machinereadable: bool = False
|
|
4075
|
+
) -> None:
|
|
4076
|
+
"""
|
|
4077
|
+
Write a dictionary to a text file with customizable line endings and formatting.
|
|
2139
4078
|
|
|
2140
4079
|
Parameters
|
|
2141
4080
|
----------
|
|
2142
4081
|
thedict : dict
|
|
2143
|
-
|
|
4082
|
+
Dictionary containing key-value pairs to be written to file
|
|
2144
4083
|
outputfile : str
|
|
2145
|
-
|
|
2146
|
-
lineend :
|
|
2147
|
-
Line ending style to use
|
|
4084
|
+
Path to the output file where dictionary will be written
|
|
4085
|
+
lineend : str, optional
|
|
4086
|
+
Line ending style to use ('mac', 'win', 'linux'), default is 'linux'
|
|
4087
|
+
machinereadable : bool, optional
|
|
4088
|
+
If True, write in machine-readable JSON-like format with quotes around keys,
|
|
4089
|
+
default is False
|
|
2148
4090
|
|
|
2149
4091
|
Returns
|
|
2150
4092
|
-------
|
|
2151
|
-
|
|
4093
|
+
None
|
|
4094
|
+
Function writes to file but does not return any value
|
|
4095
|
+
|
|
4096
|
+
Notes
|
|
4097
|
+
-----
|
|
4098
|
+
- For 'mac' line endings, uses carriage return (`\\r`)
|
|
4099
|
+
- For 'win' line endings, uses carriage return + line feed (`\\r\\n`)
|
|
4100
|
+
- For 'linux' line endings, uses line feed (`\\n`)
|
|
4101
|
+
- When `machinereadable=True`, keys are quoted and formatted with tab separators
|
|
4102
|
+
- When `machinereadable=False`, keys are written without quotes
|
|
4103
|
+
|
|
4104
|
+
Examples
|
|
4105
|
+
--------
|
|
4106
|
+
>>> my_dict = {'name': 'John', 'age': 30}
|
|
4107
|
+
>>> writedict(my_dict, 'output.txt', lineend='linux', machinereadable=False)
|
|
4108
|
+
>>> writedict(my_dict, 'output.json', lineend='win', machinereadable=True)
|
|
2152
4109
|
"""
|
|
2153
4110
|
if lineend == "mac":
|
|
2154
4111
|
thelineending = "\r"
|
|
@@ -2174,19 +4131,42 @@ def writedict(thedict, outputfile, lineend="", machinereadable=False):
|
|
|
2174
4131
|
FILE.writelines("}" + thelineending)
|
|
2175
4132
|
|
|
2176
4133
|
|
|
2177
|
-
def readdict(inputfilename):
|
|
2178
|
-
|
|
4134
|
+
def readdict(inputfilename: str) -> Dict[str, Any]:
|
|
4135
|
+
"""
|
|
4136
|
+
Read a dictionary from a text file.
|
|
4137
|
+
|
|
4138
|
+
Read a dictionary from a text file where each line contains a key followed by one or more values.
|
|
4139
|
+
The key is the first element of each line (with the trailing character removed), and the values
|
|
4140
|
+
are the remaining elements on that line.
|
|
2179
4141
|
|
|
2180
4142
|
Parameters
|
|
2181
4143
|
----------
|
|
2182
4144
|
inputfilename : str
|
|
2183
|
-
The name of the
|
|
4145
|
+
The name of the input file to read the dictionary from.
|
|
2184
4146
|
|
|
2185
4147
|
Returns
|
|
2186
4148
|
-------
|
|
2187
|
-
|
|
2188
|
-
|
|
2189
|
-
|
|
4149
|
+
dict
|
|
4150
|
+
A dictionary where keys are the first element of each line (with last character removed)
|
|
4151
|
+
and values are the remaining elements. If a line contains only one value, that value is
|
|
4152
|
+
returned as a string rather than a list. If the file does not exist, an empty dictionary
|
|
4153
|
+
is returned.
|
|
4154
|
+
|
|
4155
|
+
Notes
|
|
4156
|
+
-----
|
|
4157
|
+
- The function assumes that the input file exists and is properly formatted
|
|
4158
|
+
- Keys are processed by removing the last character from the first field
|
|
4159
|
+
- Values are stored as lists unless there's only one value, in which case it's stored as a string
|
|
4160
|
+
- If the file does not exist, a message is printed and an empty dictionary is returned
|
|
4161
|
+
|
|
4162
|
+
Examples
|
|
4163
|
+
--------
|
|
4164
|
+
>>> # Assuming a file 'data.txt' with content:
|
|
4165
|
+
>>> # key1 val1 val2 val3
|
|
4166
|
+
>>> # key2 val4
|
|
4167
|
+
>>> result = readdict('data.txt')
|
|
4168
|
+
>>> print(result)
|
|
4169
|
+
{'key': ['val1', 'val2', 'val3'], 'key2': 'val4'}
|
|
2190
4170
|
"""
|
|
2191
4171
|
if os.path.exists(inputfilename):
|
|
2192
4172
|
thedict = {}
|
|
@@ -2204,20 +4184,39 @@ def readdict(inputfilename):
|
|
|
2204
4184
|
return {}
|
|
2205
4185
|
|
|
2206
4186
|
|
|
2207
|
-
def writevec(thevec, outputfile, lineend=""):
|
|
2208
|
-
|
|
4187
|
+
def writevec(thevec: NDArray, outputfile: str, lineend: str = "") -> None:
|
|
4188
|
+
"""
|
|
4189
|
+
Write a vector to a text file, one value per line.
|
|
4190
|
+
|
|
2209
4191
|
Parameters
|
|
2210
4192
|
----------
|
|
2211
4193
|
thevec : 1D numpy or python array
|
|
2212
|
-
The array to write.
|
|
4194
|
+
The array to write. Must be a 1D array-like object.
|
|
2213
4195
|
outputfile : str
|
|
2214
|
-
The name of the output file
|
|
2215
|
-
lineend : {
|
|
4196
|
+
The name of the output file to write to.
|
|
4197
|
+
lineend : {'mac', 'win', 'linux'}, optional
|
|
2216
4198
|
Line ending style to use. Default is 'linux'.
|
|
4199
|
+
- 'mac': Use Mac line endings (\r)
|
|
4200
|
+
- 'win': Use Windows line endings (\r\n)
|
|
4201
|
+
- 'linux': Use Linux line endings (\n)
|
|
2217
4202
|
|
|
2218
4203
|
Returns
|
|
2219
4204
|
-------
|
|
2220
|
-
|
|
4205
|
+
None
|
|
4206
|
+
This function does not return any value.
|
|
4207
|
+
|
|
4208
|
+
Notes
|
|
4209
|
+
-----
|
|
4210
|
+
The function opens the output file in binary mode for all line ending types except
|
|
4211
|
+
when an invalid lineend value is provided, in which case it opens in text mode
|
|
4212
|
+
with default line endings.
|
|
4213
|
+
|
|
4214
|
+
Examples
|
|
4215
|
+
--------
|
|
4216
|
+
>>> import numpy as np
|
|
4217
|
+
>>> vec = np.array([1, 2, 3, 4, 5])
|
|
4218
|
+
>>> writevec(vec, 'output.txt')
|
|
4219
|
+
>>> writevec(vec, 'output_win.txt', lineend='win')
|
|
2221
4220
|
"""
|
|
2222
4221
|
if lineend == "mac":
|
|
2223
4222
|
thelineending = "\r"
|
|
@@ -2237,16 +4236,75 @@ def writevec(thevec, outputfile, lineend=""):
|
|
|
2237
4236
|
|
|
2238
4237
|
|
|
2239
4238
|
def writevectorstotextfile(
|
|
2240
|
-
thevecs,
|
|
2241
|
-
outputfile,
|
|
2242
|
-
samplerate=1.0,
|
|
2243
|
-
starttime=0.0,
|
|
2244
|
-
columns=None,
|
|
2245
|
-
compressed=True,
|
|
2246
|
-
filetype="text",
|
|
2247
|
-
lineend="",
|
|
2248
|
-
debug=False,
|
|
2249
|
-
):
|
|
4239
|
+
thevecs: NDArray,
|
|
4240
|
+
outputfile: str,
|
|
4241
|
+
samplerate: float = 1.0,
|
|
4242
|
+
starttime: float = 0.0,
|
|
4243
|
+
columns: Optional[List[str]] = None,
|
|
4244
|
+
compressed: bool = True,
|
|
4245
|
+
filetype: str = "text",
|
|
4246
|
+
lineend: str = "",
|
|
4247
|
+
debug: bool = False,
|
|
4248
|
+
) -> None:
|
|
4249
|
+
"""
|
|
4250
|
+
Write vectors to a text file in various formats.
|
|
4251
|
+
|
|
4252
|
+
This function writes data vectors to a text file, supporting multiple output formats
|
|
4253
|
+
including plain text, CSV, BIDS continuous data, and plain TSV. The format is determined
|
|
4254
|
+
by the `filetype` parameter. It supports optional headers, line ending styles, and
|
|
4255
|
+
compression for BIDS formats.
|
|
4256
|
+
|
|
4257
|
+
Parameters
|
|
4258
|
+
----------
|
|
4259
|
+
thevecs : NDArray
|
|
4260
|
+
Data vectors to write. Should be a 2D array where each row is a vector.
|
|
4261
|
+
outputfile : str
|
|
4262
|
+
Output file path. The extension determines the file format if not explicitly specified.
|
|
4263
|
+
samplerate : float, optional
|
|
4264
|
+
Sampling rate in Hz. Default is 1.0. Used in BIDS formats.
|
|
4265
|
+
starttime : float, optional
|
|
4266
|
+
Start time in seconds. Default is 0.0. Used in BIDS formats.
|
|
4267
|
+
columns : list of str, optional
|
|
4268
|
+
Column names for the output file. If None, no headers are written.
|
|
4269
|
+
compressed : bool, optional
|
|
4270
|
+
Whether to compress the output file (for BIDS formats). Default is True.
|
|
4271
|
+
filetype : str, optional
|
|
4272
|
+
Output format. Options are:
|
|
4273
|
+
- 'text': Plain text with space-separated values
|
|
4274
|
+
- 'csv': Comma-separated values
|
|
4275
|
+
- 'bidscontinuous': BIDS continuous data format (TSV with JSON sidecar)
|
|
4276
|
+
- 'plaintsv': Plain TSV format without JSON sidecar
|
|
4277
|
+
Default is 'text'.
|
|
4278
|
+
lineend : str, optional
|
|
4279
|
+
Line ending style. Options are:
|
|
4280
|
+
- 'mac' (``\r``)
|
|
4281
|
+
- 'win' (``\r\n``)
|
|
4282
|
+
- 'linux' (``\n``)
|
|
4283
|
+
- '' (system default)
|
|
4284
|
+
Default is ''.
|
|
4285
|
+
debug : bool, optional
|
|
4286
|
+
Enable debug output. Default is False.
|
|
4287
|
+
|
|
4288
|
+
Returns
|
|
4289
|
+
-------
|
|
4290
|
+
None
|
|
4291
|
+
This function does not return any value.
|
|
4292
|
+
|
|
4293
|
+
Notes
|
|
4294
|
+
-----
|
|
4295
|
+
- For BIDS formats, the function uses `writebidstsv` internally and splits the
|
|
4296
|
+
output filename using `niftisplitext`.
|
|
4297
|
+
- The `columns` parameter is only used when writing headers.
|
|
4298
|
+
- The `lineend` parameter controls how newlines are written to the file.
|
|
4299
|
+
|
|
4300
|
+
Examples
|
|
4301
|
+
--------
|
|
4302
|
+
>>> import numpy as np
|
|
4303
|
+
>>> data = np.array([[1, 2, 3], [4, 5, 6]])
|
|
4304
|
+
>>> writevectorstotextfile(data, "output.txt", filetype="text")
|
|
4305
|
+
>>> writevectorstotextfile(data, "output.csv", filetype="csv", columns=["A", "B", "C"])
|
|
4306
|
+
>>> writevectorstotextfile(data, "output.tsv", filetype="bidscontinuous", samplerate=100.0)
|
|
4307
|
+
"""
|
|
2250
4308
|
if filetype == "text":
|
|
2251
4309
|
writenpvecs(thevecs, outputfile, headers=columns, lineend=lineend)
|
|
2252
4310
|
elif filetype == "csv":
|
|
@@ -2284,21 +4342,64 @@ def writevectorstotextfile(
|
|
|
2284
4342
|
|
|
2285
4343
|
|
|
2286
4344
|
# rewritten to guarantee file closure, combines writenpvec and writenpvecs
|
|
2287
|
-
def writenpvecs(
|
|
2288
|
-
|
|
4345
|
+
def writenpvecs(
|
|
4346
|
+
thevecs: NDArray,
|
|
4347
|
+
outputfile: str,
|
|
4348
|
+
ascsv: bool = False,
|
|
4349
|
+
headers: Optional[List[str]] = None,
|
|
4350
|
+
altmethod: bool = True,
|
|
4351
|
+
lineend: str = "",
|
|
4352
|
+
) -> None:
|
|
4353
|
+
"""
|
|
4354
|
+
Write out a two dimensional numpy array to a text file.
|
|
4355
|
+
|
|
4356
|
+
This function writes a numpy array to a text file, with options for
|
|
4357
|
+
CSV-style output, custom headers, and line ending styles.
|
|
2289
4358
|
|
|
2290
4359
|
Parameters
|
|
2291
4360
|
----------
|
|
2292
|
-
thevecs:
|
|
2293
|
-
|
|
4361
|
+
thevecs : NDArray
|
|
4362
|
+
A 1D or 2D numpy array containing the data to be written. If 1D,
|
|
4363
|
+
the array is written as a single column. If 2D, each column is
|
|
4364
|
+
written as a separate line in the output file.
|
|
2294
4365
|
outputfile : str
|
|
2295
|
-
The
|
|
2296
|
-
|
|
2297
|
-
|
|
4366
|
+
The path to the output file where the data will be written.
|
|
4367
|
+
ascsv : bool, optional
|
|
4368
|
+
If True, use comma as the separator; otherwise, use tab. Default is False.
|
|
4369
|
+
headers : list of str, optional
|
|
4370
|
+
A list of header strings to write at the beginning of the file.
|
|
4371
|
+
If provided, the number of headers must match the number of columns
|
|
4372
|
+
in the data (for 2D arrays) or 1 (for 1D arrays).
|
|
4373
|
+
altmethod : bool, optional
|
|
4374
|
+
If True, use an optimized method for writing 2D data. If False,
|
|
4375
|
+
use a nested loop approach. Default is True.
|
|
4376
|
+
lineend : str, optional
|
|
4377
|
+
Line ending style to use. Options are 'mac' (\r), 'win' (\r\n),
|
|
4378
|
+
'linux' (\n), or empty string (uses system default). Default is 'linux'.
|
|
2298
4379
|
|
|
2299
4380
|
Returns
|
|
2300
4381
|
-------
|
|
2301
|
-
|
|
4382
|
+
None
|
|
4383
|
+
This function does not return any value.
|
|
4384
|
+
|
|
4385
|
+
Notes
|
|
4386
|
+
-----
|
|
4387
|
+
- For 2D arrays, data is written column-wise.
|
|
4388
|
+
- When `altmethod` is True, the function uses vectorized operations
|
|
4389
|
+
for better performance.
|
|
4390
|
+
- If `headers` are provided, they are written as the first line
|
|
4391
|
+
in the file, separated by the chosen delimiter.
|
|
4392
|
+
|
|
4393
|
+
Examples
|
|
4394
|
+
--------
|
|
4395
|
+
>>> import numpy as np
|
|
4396
|
+
>>> data = np.array([[1, 2, 3], [4, 5, 6]])
|
|
4397
|
+
>>> writenpvecs(data, 'output.txt')
|
|
4398
|
+
# Writes data as tab-separated columns to 'output.txt'
|
|
4399
|
+
|
|
4400
|
+
>>> headers = ['Col1', 'Col2', 'Col3']
|
|
4401
|
+
>>> writenpvecs(data, 'output.csv', ascsv=True, headers=headers)
|
|
4402
|
+
# Writes CSV-formatted data with headers to 'output.csv'
|
|
2302
4403
|
"""
|
|
2303
4404
|
theshape = np.shape(thevecs)
|
|
2304
4405
|
if lineend == "mac":
|