rapidtide 3.0.10__py3-none-any.whl → 3.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- rapidtide/Colortables.py +492 -27
- rapidtide/OrthoImageItem.py +1053 -47
- rapidtide/RapidtideDataset.py +1533 -86
- rapidtide/_version.py +3 -3
- rapidtide/calccoherence.py +196 -29
- rapidtide/calcnullsimfunc.py +191 -40
- rapidtide/calcsimfunc.py +245 -42
- rapidtide/correlate.py +1210 -393
- rapidtide/data/examples/src/testLD +56 -0
- rapidtide/data/examples/src/testalign +1 -1
- rapidtide/data/examples/src/testdelayvar +0 -1
- rapidtide/data/examples/src/testfmri +19 -1
- rapidtide/data/examples/src/testglmfilt +5 -5
- rapidtide/data/examples/src/testhappy +30 -1
- rapidtide/data/examples/src/testppgproc +17 -0
- rapidtide/data/examples/src/testrolloff +11 -0
- rapidtide/data/models/model_cnn_pytorch/best_model.pth +0 -0
- rapidtide/data/models/model_cnn_pytorch/loss.png +0 -0
- rapidtide/data/models/model_cnn_pytorch/loss.txt +1 -0
- rapidtide/data/models/model_cnn_pytorch/model.pth +0 -0
- rapidtide/data/models/model_cnn_pytorch/model_meta.json +68 -0
- rapidtide/data/reference/JHU-ArterialTerritoriesNoVent-LVL1_space-MNI152NLin2009cAsym_2mm.nii.gz +0 -0
- rapidtide/data/reference/JHU-ArterialTerritoriesNoVent-LVL1_space-MNI152NLin2009cAsym_2mm_mask.nii.gz +0 -0
- rapidtide/decorators.py +91 -0
- rapidtide/dlfilter.py +2225 -108
- rapidtide/dlfiltertorch.py +4843 -0
- rapidtide/externaltools.py +327 -12
- rapidtide/fMRIData_class.py +79 -40
- rapidtide/filter.py +1899 -810
- rapidtide/fit.py +2004 -574
- rapidtide/genericmultiproc.py +93 -18
- rapidtide/happy_supportfuncs.py +2044 -171
- rapidtide/helper_classes.py +584 -43
- rapidtide/io.py +2363 -370
- rapidtide/linfitfiltpass.py +341 -75
- rapidtide/makelaggedtcs.py +211 -20
- rapidtide/maskutil.py +423 -53
- rapidtide/miscmath.py +827 -121
- rapidtide/multiproc.py +210 -22
- rapidtide/patchmatch.py +234 -33
- rapidtide/peakeval.py +32 -30
- rapidtide/ppgproc.py +2203 -0
- rapidtide/qualitycheck.py +352 -39
- rapidtide/refinedelay.py +422 -57
- rapidtide/refineregressor.py +498 -184
- rapidtide/resample.py +671 -185
- rapidtide/scripts/applyppgproc.py +28 -0
- rapidtide/simFuncClasses.py +1052 -77
- rapidtide/simfuncfit.py +260 -46
- rapidtide/stats.py +540 -238
- rapidtide/tests/happycomp +9 -0
- rapidtide/tests/test_dlfiltertorch.py +627 -0
- rapidtide/tests/test_findmaxlag.py +24 -8
- rapidtide/tests/test_fullrunhappy_v1.py +0 -2
- rapidtide/tests/test_fullrunhappy_v2.py +0 -2
- rapidtide/tests/test_fullrunhappy_v3.py +1 -0
- rapidtide/tests/test_fullrunhappy_v4.py +2 -2
- rapidtide/tests/test_fullrunrapidtide_v7.py +1 -1
- rapidtide/tests/test_simroundtrip.py +8 -8
- rapidtide/tests/utils.py +9 -8
- rapidtide/tidepoolTemplate.py +142 -38
- rapidtide/tidepoolTemplate_alt.py +165 -44
- rapidtide/tidepoolTemplate_big.py +189 -52
- rapidtide/util.py +1217 -118
- rapidtide/voxelData.py +684 -37
- rapidtide/wiener.py +19 -12
- rapidtide/wiener2.py +113 -7
- rapidtide/wiener_doc.py +255 -0
- rapidtide/workflows/adjustoffset.py +105 -3
- rapidtide/workflows/aligntcs.py +85 -2
- rapidtide/workflows/applydlfilter.py +87 -10
- rapidtide/workflows/applyppgproc.py +522 -0
- rapidtide/workflows/atlasaverage.py +210 -47
- rapidtide/workflows/atlastool.py +100 -3
- rapidtide/workflows/calcSimFuncMap.py +294 -64
- rapidtide/workflows/calctexticc.py +201 -9
- rapidtide/workflows/ccorrica.py +97 -4
- rapidtide/workflows/cleanregressor.py +168 -29
- rapidtide/workflows/delayvar.py +163 -10
- rapidtide/workflows/diffrois.py +81 -3
- rapidtide/workflows/endtidalproc.py +144 -4
- rapidtide/workflows/fdica.py +195 -15
- rapidtide/workflows/filtnifti.py +70 -3
- rapidtide/workflows/filttc.py +74 -3
- rapidtide/workflows/fitSimFuncMap.py +206 -48
- rapidtide/workflows/fixtr.py +73 -3
- rapidtide/workflows/gmscalc.py +113 -3
- rapidtide/workflows/happy.py +813 -201
- rapidtide/workflows/happy2std.py +144 -12
- rapidtide/workflows/happy_parser.py +149 -8
- rapidtide/workflows/histnifti.py +118 -2
- rapidtide/workflows/histtc.py +84 -3
- rapidtide/workflows/linfitfilt.py +117 -4
- rapidtide/workflows/localflow.py +328 -28
- rapidtide/workflows/mergequality.py +79 -3
- rapidtide/workflows/niftidecomp.py +322 -18
- rapidtide/workflows/niftistats.py +174 -4
- rapidtide/workflows/pairproc.py +88 -2
- rapidtide/workflows/pairwisemergenifti.py +85 -2
- rapidtide/workflows/parser_funcs.py +1421 -40
- rapidtide/workflows/physiofreq.py +137 -11
- rapidtide/workflows/pixelcomp.py +208 -5
- rapidtide/workflows/plethquality.py +103 -21
- rapidtide/workflows/polyfitim.py +151 -11
- rapidtide/workflows/proj2flow.py +75 -2
- rapidtide/workflows/rankimage.py +111 -4
- rapidtide/workflows/rapidtide.py +272 -15
- rapidtide/workflows/rapidtide2std.py +98 -2
- rapidtide/workflows/rapidtide_parser.py +109 -9
- rapidtide/workflows/refineDelayMap.py +143 -33
- rapidtide/workflows/refineRegressor.py +682 -93
- rapidtide/workflows/regressfrommaps.py +152 -31
- rapidtide/workflows/resamplenifti.py +85 -3
- rapidtide/workflows/resampletc.py +91 -3
- rapidtide/workflows/retrolagtcs.py +98 -6
- rapidtide/workflows/retroregress.py +165 -9
- rapidtide/workflows/roisummarize.py +173 -5
- rapidtide/workflows/runqualitycheck.py +71 -3
- rapidtide/workflows/showarbcorr.py +147 -4
- rapidtide/workflows/showhist.py +86 -2
- rapidtide/workflows/showstxcorr.py +160 -3
- rapidtide/workflows/showtc.py +159 -3
- rapidtide/workflows/showxcorrx.py +184 -4
- rapidtide/workflows/showxy.py +185 -15
- rapidtide/workflows/simdata.py +262 -36
- rapidtide/workflows/spatialfit.py +77 -2
- rapidtide/workflows/spatialmi.py +251 -27
- rapidtide/workflows/spectrogram.py +305 -32
- rapidtide/workflows/synthASL.py +154 -3
- rapidtide/workflows/tcfrom2col.py +76 -2
- rapidtide/workflows/tcfrom3col.py +74 -2
- rapidtide/workflows/tidepool.py +2972 -133
- rapidtide/workflows/utils.py +19 -14
- rapidtide/workflows/utils_doc.py +293 -0
- rapidtide/workflows/variabilityizer.py +116 -3
- {rapidtide-3.0.10.dist-info → rapidtide-3.1.dist-info}/METADATA +10 -9
- {rapidtide-3.0.10.dist-info → rapidtide-3.1.dist-info}/RECORD +141 -122
- {rapidtide-3.0.10.dist-info → rapidtide-3.1.dist-info}/entry_points.txt +1 -0
- {rapidtide-3.0.10.dist-info → rapidtide-3.1.dist-info}/WHEEL +0 -0
- {rapidtide-3.0.10.dist-info → rapidtide-3.1.dist-info}/licenses/LICENSE +0 -0
- {rapidtide-3.0.10.dist-info → rapidtide-3.1.dist-info}/top_level.txt +0 -0
rapidtide/util.py
CHANGED
|
@@ -27,13 +27,17 @@ import sys
|
|
|
27
27
|
import time
|
|
28
28
|
from datetime import datetime
|
|
29
29
|
from multiprocessing import shared_memory
|
|
30
|
+
from typing import Any
|
|
30
31
|
|
|
31
32
|
import matplotlib.pyplot as plt
|
|
32
33
|
import numpy as np
|
|
33
34
|
import pandas as pd
|
|
35
|
+
from numpy.typing import NDArray
|
|
34
36
|
|
|
35
37
|
import rapidtide._version as tide_versioneer
|
|
38
|
+
import rapidtide.decorators
|
|
36
39
|
import rapidtide.io as tide_io
|
|
40
|
+
from rapidtide.decorators import getdecoratorvars
|
|
37
41
|
|
|
38
42
|
try:
|
|
39
43
|
import mkl
|
|
@@ -50,16 +54,33 @@ MemoryLGR = logging.getLogger("MEMORY")
|
|
|
50
54
|
# ---------------------------------------- Global constants -------------------------------------------
|
|
51
55
|
defaultbutterorder = 6
|
|
52
56
|
MAXLINES = 10000000
|
|
53
|
-
donotbeaggressive = True
|
|
54
57
|
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
58
|
+
|
|
59
|
+
def disablenumba() -> None:
|
|
60
|
+
"""
|
|
61
|
+
Set a global variable to disable numba.
|
|
62
|
+
|
|
63
|
+
This function sets the global variable `donotusenumba` to `True`, which
|
|
64
|
+
effectively disables the use of numba in subsequent operations that check
|
|
65
|
+
this variable.
|
|
66
|
+
|
|
67
|
+
Notes
|
|
68
|
+
-----
|
|
69
|
+
This function modifies a global variable. The variable `donotusenumba` should
|
|
70
|
+
be checked by other functions in the codebase to determine whether to use
|
|
71
|
+
numba or not.
|
|
72
|
+
|
|
73
|
+
Examples
|
|
74
|
+
--------
|
|
75
|
+
>>> disablenumba()
|
|
76
|
+
>>> print(donotusenumba)
|
|
77
|
+
True
|
|
78
|
+
"""
|
|
79
|
+
global donotusenumba
|
|
59
80
|
donotusenumba = True
|
|
60
|
-
else:
|
|
61
|
-
donotusenumba = False
|
|
62
81
|
|
|
82
|
+
|
|
83
|
+
# ----------------------------------------- Conditional imports ---------------------------------------
|
|
63
84
|
try:
|
|
64
85
|
import pyfftw
|
|
65
86
|
except ImportError:
|
|
@@ -68,7 +89,52 @@ else:
|
|
|
68
89
|
pyfftwpresent = True
|
|
69
90
|
|
|
70
91
|
|
|
71
|
-
def checkimports(optiondict):
|
|
92
|
+
def checkimports(optiondict: dict[str, Any]) -> None:
|
|
93
|
+
"""
|
|
94
|
+
Check availability of optional dependencies and optimization settings.
|
|
95
|
+
|
|
96
|
+
This function verifies the presence of optional packages and optimization
|
|
97
|
+
settings, printing status messages and updating the provided dictionary with
|
|
98
|
+
the results. It checks for pyfftw, aggressive optimization flags, and numba
|
|
99
|
+
usage settings.
|
|
100
|
+
|
|
101
|
+
Parameters
|
|
102
|
+
----------
|
|
103
|
+
optiondict : dict[str, Any]
|
|
104
|
+
Dictionary to be updated with boolean values indicating the status of
|
|
105
|
+
optional dependencies and optimization settings. The dictionary will be
|
|
106
|
+
modified in-place with the following keys:
|
|
107
|
+
|
|
108
|
+
- "pfftwexists": bool, True if pyfftw is available, False otherwise
|
|
109
|
+
- "donotbeaggressive": bool, True if aggressive optimization is disabled,
|
|
110
|
+
False if enabled
|
|
111
|
+
- "donotusenumba": bool, True if numba usage is disabled, False if numba
|
|
112
|
+
will be used when available
|
|
113
|
+
|
|
114
|
+
Returns
|
|
115
|
+
-------
|
|
116
|
+
None
|
|
117
|
+
This function does not return a value but modifies the input dictionary
|
|
118
|
+
in-place.
|
|
119
|
+
|
|
120
|
+
Notes
|
|
121
|
+
-----
|
|
122
|
+
The function relies on global variables:
|
|
123
|
+
- `pyfftwpresent`: Indicates if pyfftw is available
|
|
124
|
+
- `donotbeaggressive`: Controls aggressive optimization flag
|
|
125
|
+
- `donotusenumba`: Controls numba usage flag
|
|
126
|
+
|
|
127
|
+
Examples
|
|
128
|
+
--------
|
|
129
|
+
>>> options = {}
|
|
130
|
+
>>> checkimports(options)
|
|
131
|
+
pfftw does not exist
|
|
132
|
+
aggressive optimization
|
|
133
|
+
using numba if present
|
|
134
|
+
>>> print(options)
|
|
135
|
+
{'pfftwexists': False, 'donotbeaggressive': False, 'donotusenumba': False}
|
|
136
|
+
"""
|
|
137
|
+
donotusenumba, donotbeaggressive = getdecoratorvars()
|
|
72
138
|
if pyfftwpresent:
|
|
73
139
|
print("pfftw exists")
|
|
74
140
|
else:
|
|
@@ -81,7 +147,6 @@ def checkimports(optiondict):
|
|
|
81
147
|
print("aggressive optimization")
|
|
82
148
|
optiondict["donotbeaggressive"] = donotbeaggressive
|
|
83
149
|
|
|
84
|
-
global donotusenumba
|
|
85
150
|
if donotusenumba:
|
|
86
151
|
print("will not use numba even if present")
|
|
87
152
|
else:
|
|
@@ -89,31 +154,41 @@ def checkimports(optiondict):
|
|
|
89
154
|
optiondict["donotusenumba"] = donotusenumba
|
|
90
155
|
|
|
91
156
|
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
if donotusenumba:
|
|
96
|
-
return f
|
|
97
|
-
return jit(f, nopython=True)
|
|
98
|
-
|
|
99
|
-
return resdec
|
|
157
|
+
def disablemkl(numprocs: int, debug: bool = False) -> None:
|
|
158
|
+
"""
|
|
159
|
+
Disable MKL threading for parallel execution.
|
|
100
160
|
|
|
161
|
+
This function configures Intel MKL (Math Kernel Library) to use only a single
|
|
162
|
+
thread when the number of processes exceeds 1. This is useful for avoiding
|
|
163
|
+
oversubscription of CPU resources in parallel computing environments.
|
|
101
164
|
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
165
|
+
Parameters
|
|
166
|
+
----------
|
|
167
|
+
numprocs : int
|
|
168
|
+
Number of processes to check against. If greater than 1, MKL threading
|
|
169
|
+
will be disabled by setting the number of threads to 1.
|
|
170
|
+
debug : bool, optional
|
|
171
|
+
If True, prints debug information about the threading configuration
|
|
172
|
+
(default is False).
|
|
107
173
|
|
|
108
|
-
|
|
174
|
+
Returns
|
|
175
|
+
-------
|
|
176
|
+
None
|
|
177
|
+
This function does not return any value.
|
|
109
178
|
|
|
179
|
+
Notes
|
|
180
|
+
-----
|
|
181
|
+
This function only has an effect if MKL is available (mklexists is True).
|
|
182
|
+
The function uses mkl.set_num_threads(1) to disable parallel threading in MKL.
|
|
110
183
|
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
|
|
184
|
+
Examples
|
|
185
|
+
--------
|
|
186
|
+
>>> disablemkl(numprocs=4, debug=True)
|
|
187
|
+
disablemkl: setting threads to 1
|
|
114
188
|
|
|
115
|
-
|
|
116
|
-
|
|
189
|
+
>>> disablemkl(numprocs=1)
|
|
190
|
+
# No output, no threading changes
|
|
191
|
+
"""
|
|
117
192
|
if mklexists:
|
|
118
193
|
if numprocs > 1:
|
|
119
194
|
if debug:
|
|
@@ -121,7 +196,37 @@ def disablemkl(numprocs, debug=False):
|
|
|
121
196
|
mkl.set_num_threads(1)
|
|
122
197
|
|
|
123
198
|
|
|
124
|
-
def enablemkl(numthreads, debug=False):
|
|
199
|
+
def enablemkl(numthreads: int, debug: bool = False) -> None:
|
|
200
|
+
"""
|
|
201
|
+
Enable Intel MKL threading with specified number of threads.
|
|
202
|
+
|
|
203
|
+
This function configures the Intel MKL (Math Kernel Library) to use the
|
|
204
|
+
specified number of threads for parallel execution. It only has an effect
|
|
205
|
+
if MKL is available in the current environment.
|
|
206
|
+
|
|
207
|
+
Parameters
|
|
208
|
+
----------
|
|
209
|
+
numthreads : int
|
|
210
|
+
Number of threads to use for MKL operations. Must be a positive integer.
|
|
211
|
+
debug : bool, optional
|
|
212
|
+
If True, print debug information about the thread setting operation.
|
|
213
|
+
Default is False.
|
|
214
|
+
|
|
215
|
+
Returns
|
|
216
|
+
-------
|
|
217
|
+
None
|
|
218
|
+
This function does not return any value.
|
|
219
|
+
|
|
220
|
+
Notes
|
|
221
|
+
-----
|
|
222
|
+
This function only has an effect if MKL is available (mklexists is True).
|
|
223
|
+
The function uses mkl.set_num_threads() internally to configure the threading.
|
|
224
|
+
|
|
225
|
+
Examples
|
|
226
|
+
--------
|
|
227
|
+
>>> enablemkl(4)
|
|
228
|
+
>>> enablemkl(8, debug=True)
|
|
229
|
+
"""
|
|
125
230
|
if mklexists:
|
|
126
231
|
if debug:
|
|
127
232
|
print(f"enablemkl: setting threads to {numthreads}")
|
|
@@ -129,7 +234,32 @@ def enablemkl(numthreads, debug=False):
|
|
|
129
234
|
|
|
130
235
|
|
|
131
236
|
# --------------------------- Utility functions -------------------------------------------------
|
|
132
|
-
def findavailablemem():
|
|
237
|
+
def findavailablemem() -> tuple[int, int]:
|
|
238
|
+
"""
|
|
239
|
+
Get available memory information from system resources.
|
|
240
|
+
|
|
241
|
+
This function retrieves memory information from either cgroup limits or system
|
|
242
|
+
free memory statistics. It returns a tuple containing the memory limit and
|
|
243
|
+
swap information, both in bytes.
|
|
244
|
+
|
|
245
|
+
Returns
|
|
246
|
+
-------
|
|
247
|
+
tuple[int, int]
|
|
248
|
+
A tuple containing two integers:
|
|
249
|
+
- First integer: Memory limit in bytes (from cgroup or total memory)
|
|
250
|
+
- Second integer: Swap available in bytes (from cgroup or swap memory)
|
|
251
|
+
|
|
252
|
+
Notes
|
|
253
|
+
-----
|
|
254
|
+
The function first checks for cgroup memory limits at "/sys/fs/cgroup/memory/memory.limit_in_bytes".
|
|
255
|
+
If found, it returns the limit for both values in the tuple. Otherwise, it uses the "free" command
|
|
256
|
+
to retrieve system memory information, specifically the free memory and swap memory values.
|
|
257
|
+
|
|
258
|
+
Examples
|
|
259
|
+
--------
|
|
260
|
+
>>> findavailablemem()
|
|
261
|
+
(8589934592, 2147483648)
|
|
262
|
+
"""
|
|
133
263
|
if os.path.isfile("/sys/fs/cgroup/memory/memory.limit_in_bytes"):
|
|
134
264
|
with open("/sys/fs/cgroup/memory/memory.limit_in_bytes") as limit:
|
|
135
265
|
mem = int(limit.read())
|
|
@@ -141,13 +271,45 @@ def findavailablemem():
|
|
|
141
271
|
return free, swap
|
|
142
272
|
|
|
143
273
|
|
|
144
|
-
def checkifincontainer():
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
|
|
274
|
+
def checkifincontainer() -> str | None:
|
|
275
|
+
"""
|
|
276
|
+
Determine if the program is running in a container and identify the container type.
|
|
277
|
+
|
|
278
|
+
This function checks environment variables to detect whether the program is running
|
|
279
|
+
inside a container environment. It specifically looks for indicators of Docker,
|
|
280
|
+
Singularity, and CircleCI environments. The function returns the container type
|
|
281
|
+
as a string, or None if running outside any container.
|
|
282
|
+
|
|
283
|
+
Returns
|
|
284
|
+
-------
|
|
285
|
+
str or None
|
|
286
|
+
Container type if running in a container, otherwise None. Possible return values:
|
|
287
|
+
- "Docker": Running in a Docker container (indicated by RUNNING_IN_CONTAINER env var)
|
|
288
|
+
- "Singularity": Running in a Singularity container (indicated by SINGULARITY_CONTAINER env var)
|
|
289
|
+
- "CircleCI": Running in CircleCI environment (indicated by CIRCLECI env var)
|
|
290
|
+
- None: Not running in any container environment
|
|
291
|
+
|
|
292
|
+
Notes
|
|
293
|
+
-----
|
|
294
|
+
The function prioritizes detection in the following order:
|
|
295
|
+
1. Singularity containers (SINGULARITY_CONTAINER env var)
|
|
296
|
+
2. Docker containers (RUNNING_IN_CONTAINER env var)
|
|
297
|
+
3. CircleCI environment (CIRCLECI env var)
|
|
298
|
+
|
|
299
|
+
CircleCI detection takes precedence over other container types, as CircleCI
|
|
300
|
+
environments may not handle container parameter adjustments properly.
|
|
301
|
+
|
|
302
|
+
Examples
|
|
303
|
+
--------
|
|
304
|
+
>>> checkifincontainer()
|
|
305
|
+
'Docker'
|
|
306
|
+
|
|
307
|
+
>>> checkifincontainer()
|
|
308
|
+
'Singularity'
|
|
309
|
+
|
|
310
|
+
>>> checkifincontainer()
|
|
311
|
+
None
|
|
312
|
+
"""
|
|
151
313
|
if os.environ.get("SINGULARITY_CONTAINER") is not None:
|
|
152
314
|
containertype = "Singularity"
|
|
153
315
|
elif os.environ.get("RUNNING_IN_CONTAINER") is not None:
|
|
@@ -159,11 +321,75 @@ def checkifincontainer():
|
|
|
159
321
|
return containertype
|
|
160
322
|
|
|
161
323
|
|
|
162
|
-
def setmemlimit(memlimit):
|
|
324
|
+
def setmemlimit(memlimit: int) -> None:
|
|
325
|
+
"""
|
|
326
|
+
Set the memory limit for the current process.
|
|
327
|
+
|
|
328
|
+
This function sets the virtual memory limit (RLIMIT_AS) for the current process
|
|
329
|
+
using the resource module. The limit is specified in bytes and applies to both
|
|
330
|
+
soft and hard limits.
|
|
331
|
+
|
|
332
|
+
Parameters
|
|
333
|
+
----------
|
|
334
|
+
memlimit : int
|
|
335
|
+
The memory limit in bytes. Setting this to -1 will remove the limit.
|
|
336
|
+
Values should be non-negative integers.
|
|
337
|
+
|
|
338
|
+
Returns
|
|
339
|
+
-------
|
|
340
|
+
None
|
|
341
|
+
This function does not return any value.
|
|
342
|
+
|
|
343
|
+
Notes
|
|
344
|
+
-----
|
|
345
|
+
- This function uses `resource.setrlimit()` with `resource.RLIMIT_AS`
|
|
346
|
+
- The memory limit is enforced by the operating system
|
|
347
|
+
- Setting memlimit to -1 removes any existing memory limit
|
|
348
|
+
- This function may raise `ValueError` or `OSError` if the limit cannot be set
|
|
349
|
+
- The limit applies to the current process and its children
|
|
350
|
+
|
|
351
|
+
Examples
|
|
352
|
+
--------
|
|
353
|
+
>>> setmemlimit(1024 * 1024 * 100) # Set limit to 100 MB
|
|
354
|
+
>>> setmemlimit(-1) # Remove memory limit
|
|
355
|
+
"""
|
|
163
356
|
resource.setrlimit(resource.RLIMIT_AS, (memlimit, memlimit))
|
|
164
357
|
|
|
165
358
|
|
|
166
|
-
def formatmemamt(meminbytes):
|
|
359
|
+
def formatmemamt(meminbytes: int) -> str:
|
|
360
|
+
"""
|
|
361
|
+
Format memory amount in bytes to human readable format.
|
|
362
|
+
|
|
363
|
+
Convert a memory size in bytes to a human readable string with appropriate units
|
|
364
|
+
(B, kB, MB, GB, TB).
|
|
365
|
+
|
|
366
|
+
Parameters
|
|
367
|
+
----------
|
|
368
|
+
meminbytes : int
|
|
369
|
+
Memory amount in bytes to be formatted.
|
|
370
|
+
|
|
371
|
+
Returns
|
|
372
|
+
-------
|
|
373
|
+
str
|
|
374
|
+
Formatted memory amount with appropriate unit. The result is rounded to 3
|
|
375
|
+
decimal places and includes the unit suffix.
|
|
376
|
+
|
|
377
|
+
Notes
|
|
378
|
+
-----
|
|
379
|
+
The function uses binary units (1024-based) rather than decimal units (1000-based).
|
|
380
|
+
Units are: B (bytes), kB (kilobytes), MB (megabytes), GB (gigabytes), TB (terabytes).
|
|
381
|
+
|
|
382
|
+
Examples
|
|
383
|
+
--------
|
|
384
|
+
>>> formatmemamt(1024)
|
|
385
|
+
'1.000kB'
|
|
386
|
+
|
|
387
|
+
>>> formatmemamt(1048576)
|
|
388
|
+
'1.000MB'
|
|
389
|
+
|
|
390
|
+
>>> formatmemamt(1073741824)
|
|
391
|
+
'1.000GB'
|
|
392
|
+
"""
|
|
167
393
|
units = ["B", "kB", "MB", "GB", "TB"]
|
|
168
394
|
index = 0
|
|
169
395
|
unitnumber = np.uint64(1)
|
|
@@ -177,7 +403,44 @@ def formatmemamt(meminbytes):
|
|
|
177
403
|
return f"{round(meminbytes/unitnumber, 3):.3f}{units[-1]}"
|
|
178
404
|
|
|
179
405
|
|
|
180
|
-
def format_bytes(size):
|
|
406
|
+
def format_bytes(size: float) -> tuple[float, str]:
|
|
407
|
+
"""
|
|
408
|
+
Convert a size in bytes to a human-readable format with appropriate units.
|
|
409
|
+
|
|
410
|
+
Convert a size in bytes to a more readable format by scaling it to the
|
|
411
|
+
appropriate unit (bytes, kilobytes, megabytes, gigabytes, terabytes).
|
|
412
|
+
|
|
413
|
+
Parameters
|
|
414
|
+
----------
|
|
415
|
+
size : float
|
|
416
|
+
The size in bytes to be converted. Should be a non-negative number.
|
|
417
|
+
|
|
418
|
+
Returns
|
|
419
|
+
-------
|
|
420
|
+
tuple[float, str]
|
|
421
|
+
A tuple containing the scaled size (float) and the corresponding unit (str).
|
|
422
|
+
The unit will be one of: 'bytes', 'kilobytes', 'megabytes', 'gigabytes', 'terabytes'.
|
|
423
|
+
|
|
424
|
+
Notes
|
|
425
|
+
-----
|
|
426
|
+
This function uses base-2 (binary) units where 1 kilobyte = 1024 bytes.
|
|
427
|
+
The conversion continues until the size is less than 1024, at which point
|
|
428
|
+
the appropriate unit is returned.
|
|
429
|
+
|
|
430
|
+
Examples
|
|
431
|
+
--------
|
|
432
|
+
>>> format_bytes(512)
|
|
433
|
+
(512.0, 'bytes')
|
|
434
|
+
|
|
435
|
+
>>> format_bytes(2048)
|
|
436
|
+
(2.0, 'kilobytes')
|
|
437
|
+
|
|
438
|
+
>>> format_bytes(1048576)
|
|
439
|
+
(1.0, 'megabytes')
|
|
440
|
+
|
|
441
|
+
>>> format_bytes(1073741824)
|
|
442
|
+
(1.0, 'gigabytes')
|
|
443
|
+
"""
|
|
181
444
|
# 2**10 = 1024
|
|
182
445
|
power = 2**10
|
|
183
446
|
n = 0
|
|
@@ -188,15 +451,43 @@ def format_bytes(size):
|
|
|
188
451
|
return size, power_labels[n] + "bytes"
|
|
189
452
|
|
|
190
453
|
|
|
191
|
-
def logmem(msg=None):
|
|
192
|
-
"""
|
|
454
|
+
def logmem(msg: str | None = None) -> None:
|
|
455
|
+
"""
|
|
456
|
+
Log memory usage with a logging object.
|
|
457
|
+
|
|
458
|
+
This function logs detailed memory usage statistics for the current process
|
|
459
|
+
and its children, including resident set size (RSS), shared and unshared memory,
|
|
460
|
+
page faults, and swap usage. On Windows, memory statistics are not available
|
|
461
|
+
and a placeholder message is logged instead.
|
|
193
462
|
|
|
194
463
|
Parameters
|
|
195
464
|
----------
|
|
196
|
-
msg : str
|
|
197
|
-
A message to include in the first column.
|
|
198
|
-
If None,
|
|
199
|
-
|
|
465
|
+
msg : str, optional
|
|
466
|
+
A message to include in the first column of the logged output.
|
|
467
|
+
If None, column headers are logged instead. Default is None.
|
|
468
|
+
|
|
469
|
+
Returns
|
|
470
|
+
-------
|
|
471
|
+
None
|
|
472
|
+
This function does not return any value; it logs information to a global
|
|
473
|
+
logger named `MemoryLGR`.
|
|
474
|
+
|
|
475
|
+
Notes
|
|
476
|
+
-----
|
|
477
|
+
- On Unix-like systems (Linux, macOS), this function uses `resource.getrusage`
|
|
478
|
+
to retrieve memory usage details.
|
|
479
|
+
- On Windows, memory statistics are not supported and a placeholder message
|
|
480
|
+
is logged.
|
|
481
|
+
- The function maintains internal state (`lastmaxrss_parent`, `lastmaxrss_child`)
|
|
482
|
+
to compute differences in memory usage between calls.
|
|
483
|
+
|
|
484
|
+
Examples
|
|
485
|
+
--------
|
|
486
|
+
>>> logmem("Before loop")
|
|
487
|
+
# Logs memory usage with "Before loop" as the first column
|
|
488
|
+
|
|
489
|
+
>>> logmem()
|
|
490
|
+
# Logs column headers for memory usage statistics
|
|
200
491
|
"""
|
|
201
492
|
global lastmaxrss_parent, lastmaxrss_child
|
|
202
493
|
if platform.system() != "Windows":
|
|
@@ -252,16 +543,37 @@ def logmem(msg=None):
|
|
|
252
543
|
MemoryLGR.info("\t".join(outvals))
|
|
253
544
|
|
|
254
545
|
|
|
255
|
-
def findexecutable(command):
|
|
546
|
+
def findexecutable(command: str) -> str | None:
|
|
256
547
|
"""
|
|
548
|
+
Locate an executable file in the system PATH.
|
|
549
|
+
|
|
550
|
+
This function searches for an executable file with the given name in the
|
|
551
|
+
system's PATH environment variable. It uses the most appropriate method
|
|
552
|
+
based on the Python version.
|
|
257
553
|
|
|
258
554
|
Parameters
|
|
259
555
|
----------
|
|
260
|
-
command
|
|
556
|
+
command : str
|
|
557
|
+
The name of the executable command to search for.
|
|
261
558
|
|
|
262
559
|
Returns
|
|
263
560
|
-------
|
|
264
|
-
|
|
561
|
+
str or None
|
|
562
|
+
The full path to the executable if found, None otherwise.
|
|
563
|
+
|
|
564
|
+
Notes
|
|
565
|
+
-----
|
|
566
|
+
For Python 3.3 and later, this function uses `shutil.which()` which is the
|
|
567
|
+
recommended approach. For earlier Python versions, it manually searches
|
|
568
|
+
through the PATH environment variable and checks execute permissions.
|
|
569
|
+
|
|
570
|
+
Examples
|
|
571
|
+
--------
|
|
572
|
+
>>> findexecutable('python')
|
|
573
|
+
'/usr/bin/python'
|
|
574
|
+
|
|
575
|
+
>>> findexecutable('nonexistent_command')
|
|
576
|
+
None
|
|
265
577
|
"""
|
|
266
578
|
import shutil
|
|
267
579
|
|
|
@@ -275,16 +587,39 @@ def findexecutable(command):
|
|
|
275
587
|
return None
|
|
276
588
|
|
|
277
589
|
|
|
278
|
-
def isexecutable(command):
|
|
590
|
+
def isexecutable(command: str) -> bool:
|
|
279
591
|
"""
|
|
592
|
+
Check if a command is executable in the system's PATH.
|
|
593
|
+
|
|
594
|
+
This function determines whether a given command can be executed by checking
|
|
595
|
+
if it exists in the system's PATH and has execute permissions. For Python 3.3+
|
|
596
|
+
the function uses shutil.which() for cross-platform compatibility, while for
|
|
597
|
+
older versions it manually checks execute permissions in each PATH directory.
|
|
280
598
|
|
|
281
599
|
Parameters
|
|
282
600
|
----------
|
|
283
|
-
command
|
|
601
|
+
command : str
|
|
602
|
+
The name of the command to check for executability.
|
|
284
603
|
|
|
285
604
|
Returns
|
|
286
605
|
-------
|
|
287
|
-
|
|
606
|
+
bool
|
|
607
|
+
True if the command is executable, False otherwise.
|
|
608
|
+
|
|
609
|
+
Notes
|
|
610
|
+
-----
|
|
611
|
+
This function provides cross-platform compatibility by using different
|
|
612
|
+
approaches depending on the Python version. For Python 3.3 and later,
|
|
613
|
+
shutil.which() is used which handles platform-specific path searching.
|
|
614
|
+
For older Python versions, the function manually checks execute permissions
|
|
615
|
+
in each directory listed in the PATH environment variable.
|
|
616
|
+
|
|
617
|
+
Examples
|
|
618
|
+
--------
|
|
619
|
+
>>> isexecutable('python')
|
|
620
|
+
True
|
|
621
|
+
>>> isexecutable('nonexistent_command')
|
|
622
|
+
False
|
|
288
623
|
"""
|
|
289
624
|
import shutil
|
|
290
625
|
|
|
@@ -301,7 +636,42 @@ def isexecutable(command):
|
|
|
301
636
|
)
|
|
302
637
|
|
|
303
638
|
|
|
304
|
-
def makeadir(pathname):
|
|
639
|
+
def makeadir(pathname: str) -> bool:
|
|
640
|
+
"""
|
|
641
|
+
Create a directory if it doesn't already exist.
|
|
642
|
+
|
|
643
|
+
This function attempts to create a directory at the specified path. If the
|
|
644
|
+
directory already exists, it returns True without raising an error. If the
|
|
645
|
+
directory cannot be created due to a permissions error or other OS-related
|
|
646
|
+
issues, it returns False.
|
|
647
|
+
|
|
648
|
+
Parameters
|
|
649
|
+
----------
|
|
650
|
+
pathname : str
|
|
651
|
+
The path of the directory to create. Can be a relative or absolute path.
|
|
652
|
+
|
|
653
|
+
Returns
|
|
654
|
+
-------
|
|
655
|
+
bool
|
|
656
|
+
True if the directory exists or was successfully created, False otherwise.
|
|
657
|
+
|
|
658
|
+
Notes
|
|
659
|
+
-----
|
|
660
|
+
This function uses `os.makedirs()` which creates all intermediate-level
|
|
661
|
+
directories needed to contain the leaf directory. If the directory already
|
|
662
|
+
exists, no error is raised.
|
|
663
|
+
|
|
664
|
+
Examples
|
|
665
|
+
--------
|
|
666
|
+
>>> makeadir('test_directory')
|
|
667
|
+
True
|
|
668
|
+
|
|
669
|
+
>>> makeadir('path/to/new/directory')
|
|
670
|
+
True
|
|
671
|
+
|
|
672
|
+
>>> makeadir('/root/protected_directory')
|
|
673
|
+
False # Will fail due to insufficient permissions
|
|
674
|
+
"""
|
|
305
675
|
try:
|
|
306
676
|
os.makedirs(pathname)
|
|
307
677
|
except OSError:
|
|
@@ -315,7 +685,33 @@ def makeadir(pathname):
|
|
|
315
685
|
return True
|
|
316
686
|
|
|
317
687
|
|
|
318
|
-
def findreferencedir():
|
|
688
|
+
def findreferencedir() -> str:
|
|
689
|
+
"""
|
|
690
|
+
Find and return the path to the rapidtide reference data directory.
|
|
691
|
+
|
|
692
|
+
This function locates the site-packages directory and constructs the path to
|
|
693
|
+
the rapidtide reference data folder. It searches through all site-packages
|
|
694
|
+
directories to find the one ending with "site-packages" and then builds
|
|
695
|
+
the reference directory path relative to that location.
|
|
696
|
+
|
|
697
|
+
Returns
|
|
698
|
+
-------
|
|
699
|
+
str
|
|
700
|
+
Absolute path to the rapidtide reference data directory, typically
|
|
701
|
+
structured as: {site-packages-dir}/rapidtide/data/reference/
|
|
702
|
+
|
|
703
|
+
Notes
|
|
704
|
+
-----
|
|
705
|
+
This function is designed to work within the rapidtide package environment
|
|
706
|
+
and assumes that rapidtide is installed in a standard Python site-packages
|
|
707
|
+
location. The function will return None if no site-packages directory is found.
|
|
708
|
+
|
|
709
|
+
Examples
|
|
710
|
+
--------
|
|
711
|
+
>>> ref_dir = findreferencedir()
|
|
712
|
+
>>> print(ref_dir)
|
|
713
|
+
'/usr/local/lib/python3.8/site-packages/rapidtide/data/reference'
|
|
714
|
+
"""
|
|
319
715
|
# Get the list of directories
|
|
320
716
|
site_packages_dirs = site.getsitepackages()
|
|
321
717
|
|
|
@@ -335,33 +731,89 @@ def findreferencedir():
|
|
|
335
731
|
return referencedir
|
|
336
732
|
|
|
337
733
|
|
|
338
|
-
def savecommandline(theargs, thename):
|
|
734
|
+
def savecommandline(theargs: list[str], thename: str) -> None:
|
|
339
735
|
"""
|
|
736
|
+
Save command line arguments to a text file.
|
|
737
|
+
|
|
738
|
+
This function takes a list of command line arguments and saves them
|
|
739
|
+
as a single line in a text file with a specified name.
|
|
340
740
|
|
|
341
741
|
Parameters
|
|
342
742
|
----------
|
|
343
|
-
theargs
|
|
344
|
-
|
|
743
|
+
theargs : list[str]
|
|
744
|
+
List of command line arguments to be saved
|
|
745
|
+
thename : str
|
|
746
|
+
Base name for the output file (without extension)
|
|
345
747
|
|
|
346
748
|
Returns
|
|
347
749
|
-------
|
|
348
|
-
|
|
750
|
+
None
|
|
751
|
+
This function does not return any value
|
|
752
|
+
|
|
753
|
+
Notes
|
|
754
|
+
-----
|
|
755
|
+
The function creates a file named ``{thename}_commandline.txt`` containing
|
|
756
|
+
the command line arguments joined by spaces on a single line.
|
|
757
|
+
|
|
758
|
+
Examples
|
|
759
|
+
--------
|
|
760
|
+
>>> savecommandline(['python', 'script.py', '--verbose'], 'myrun')
|
|
761
|
+
# Creates file 'myrun_commandline.txt' with content: "python script.py --verbose"
|
|
349
762
|
"""
|
|
350
763
|
tide_io.writevec([" ".join(theargs)], thename + "_commandline.txt")
|
|
351
764
|
|
|
352
765
|
|
|
353
|
-
def startendcheck(timepoints, startpoint, endpoint):
|
|
766
|
+
def startendcheck(timepoints: int, startpoint: int, endpoint: int) -> tuple[int, int]:
|
|
354
767
|
"""
|
|
768
|
+
Validate and adjust start and end points for time series processing.
|
|
769
|
+
|
|
770
|
+
This function checks if the provided start and end points are within valid
|
|
771
|
+
bounds for a time series with the specified number of time points. It handles
|
|
772
|
+
edge cases by adjusting values to reasonable defaults and raises errors for
|
|
773
|
+
invalid configurations.
|
|
355
774
|
|
|
356
775
|
Parameters
|
|
357
776
|
----------
|
|
358
|
-
timepoints
|
|
359
|
-
|
|
360
|
-
|
|
777
|
+
timepoints : int
|
|
778
|
+
Total number of time points in the series. Must be positive.
|
|
779
|
+
startpoint : int
|
|
780
|
+
Starting index for the time series segment. If negative, set to 0.
|
|
781
|
+
If greater than timepoints-1, the program exits with an error.
|
|
782
|
+
endpoint : int
|
|
783
|
+
Ending index for the time series segment. If -1, set to a large default value.
|
|
784
|
+
If greater than timepoints-1, set to timepoints-1.
|
|
361
785
|
|
|
362
786
|
Returns
|
|
363
787
|
-------
|
|
364
|
-
|
|
788
|
+
tuple[int, int]
|
|
789
|
+
A tuple containing (realstart, realend) where both values are valid
|
|
790
|
+
indices for the time series. realstart <= realend and both are within
|
|
791
|
+
the valid range [0, timepoints-1].
|
|
792
|
+
|
|
793
|
+
Notes
|
|
794
|
+
-----
|
|
795
|
+
- If startpoint is negative, it's automatically set to 0
|
|
796
|
+
- If endpoint is -1, it's set to 100000000 (large default value)
|
|
797
|
+
- If endpoint exceeds timepoints-1, it's set to timepoints-1
|
|
798
|
+
- The function exits with sys.exit() if startpoint >= endpoint or if
|
|
799
|
+
startpoint exceeds the maximum valid index
|
|
800
|
+
|
|
801
|
+
Examples
|
|
802
|
+
--------
|
|
803
|
+
>>> startendcheck(10, 2, 5)
|
|
804
|
+
startpoint set to 2
|
|
805
|
+
endpoint set to 5
|
|
806
|
+
(2, 5)
|
|
807
|
+
|
|
808
|
+
>>> startendcheck(5, -1, 3)
|
|
809
|
+
startpoint set to minimum, (0)
|
|
810
|
+
endpoint set to 3
|
|
811
|
+
(0, 3)
|
|
812
|
+
|
|
813
|
+
>>> startendcheck(5, 2, -1)
|
|
814
|
+
startpoint set to 2
|
|
815
|
+
endpoint set to maximum, (4)
|
|
816
|
+
(2, 4)
|
|
365
817
|
"""
|
|
366
818
|
if startpoint > timepoints - 1:
|
|
367
819
|
print("startpoint is too large (maximum is ", timepoints - 1, ")")
|
|
@@ -387,33 +839,61 @@ def startendcheck(timepoints, startpoint, endpoint):
|
|
|
387
839
|
|
|
388
840
|
|
|
389
841
|
def valtoindex(
|
|
390
|
-
thearray,
|
|
391
|
-
thevalue,
|
|
392
|
-
evenspacing=True,
|
|
393
|
-
discrete=True,
|
|
394
|
-
discretization="round",
|
|
395
|
-
debug=False,
|
|
396
|
-
):
|
|
842
|
+
thearray: NDArray,
|
|
843
|
+
thevalue: float,
|
|
844
|
+
evenspacing: bool = True,
|
|
845
|
+
discrete: bool = True,
|
|
846
|
+
discretization: str = "round",
|
|
847
|
+
debug: bool = False,
|
|
848
|
+
) -> int:
|
|
397
849
|
"""
|
|
850
|
+
Find the index of the closest value in an ordered array to a given value.
|
|
851
|
+
|
|
852
|
+
This function computes the index of the element in `thearray` that is closest
|
|
853
|
+
to `thevalue`. It supports both evenly spaced and unevenly spaced arrays,
|
|
854
|
+
with options for discrete or continuous index output and different rounding
|
|
855
|
+
methods.
|
|
398
856
|
|
|
399
857
|
Parameters
|
|
400
858
|
----------
|
|
401
|
-
thearray: array-like
|
|
402
|
-
An ordered list of values (does not need to be equally spaced)
|
|
403
|
-
thevalue: float
|
|
404
|
-
The value to search for in the array
|
|
405
|
-
evenspacing:
|
|
859
|
+
thearray : array-like
|
|
860
|
+
An ordered list of values (does not need to be equally spaced).
|
|
861
|
+
thevalue : float
|
|
862
|
+
The value to search for in the array.
|
|
863
|
+
evenspacing : bool, optional
|
|
406
864
|
If True (default), assume data is evenly spaced for faster calculation.
|
|
407
|
-
discrete:
|
|
408
|
-
If True
|
|
409
|
-
discretization:
|
|
410
|
-
Select rounding method
|
|
865
|
+
discrete : bool, optional
|
|
866
|
+
If True (default), the returned index is an integer.
|
|
867
|
+
discretization : str, optional
|
|
868
|
+
Select rounding method when `discrete=True`. Options are:
|
|
869
|
+
- "round" (default): round to nearest integer
|
|
870
|
+
- "floor": round down to nearest integer
|
|
871
|
+
- "ceiling": round up to nearest integer
|
|
872
|
+
debug : bool, optional
|
|
873
|
+
If True, print debug information during execution.
|
|
411
874
|
|
|
412
875
|
Returns
|
|
413
876
|
-------
|
|
414
|
-
|
|
415
|
-
The index of the
|
|
416
|
-
|
|
877
|
+
int or float
|
|
878
|
+
The index of the closest value in `thearray` to `thevalue`. If `discrete=False`,
|
|
879
|
+
the index may be a float.
|
|
880
|
+
|
|
881
|
+
Notes
|
|
882
|
+
-----
|
|
883
|
+
When `evenspacing=True`, the function assumes uniform spacing between elements
|
|
884
|
+
and calculates the index using a linear interpolation formula. This is faster
|
|
885
|
+
than the default method but only accurate for evenly spaced data.
|
|
886
|
+
|
|
887
|
+
Examples
|
|
888
|
+
--------
|
|
889
|
+
>>> import numpy as np
|
|
890
|
+
>>> arr = np.array([0, 1, 2, 3, 4])
|
|
891
|
+
>>> valtoindex(arr, 2.3)
|
|
892
|
+
2
|
|
893
|
+
>>> valtoindex(arr, 2.7, discretization="ceil")
|
|
894
|
+
3
|
|
895
|
+
>>> valtoindex(arr, 2.5, evenspacing=False)
|
|
896
|
+
2
|
|
417
897
|
"""
|
|
418
898
|
if evenspacing:
|
|
419
899
|
limval = np.max([thearray[0], np.min([thearray[-1], thevalue])])
|
|
@@ -444,19 +924,43 @@ def valtoindex(
|
|
|
444
924
|
return int((np.abs(thearray - thevalue)).argmin())
|
|
445
925
|
|
|
446
926
|
|
|
447
|
-
def progressbar(thisval, end_val, label="Percent", barsize=60):
|
|
927
|
+
def progressbar(thisval: int, end_val: int, label: str = "Percent", barsize: int = 60) -> None:
|
|
448
928
|
"""
|
|
929
|
+
Display a progress bar in the terminal.
|
|
930
|
+
|
|
931
|
+
This function creates a visual progress indicator that updates in place
|
|
932
|
+
on the terminal. It shows a bar filled according to the progress percentage
|
|
933
|
+
and displays the percentage value.
|
|
449
934
|
|
|
450
935
|
Parameters
|
|
451
936
|
----------
|
|
452
|
-
thisval
|
|
453
|
-
|
|
454
|
-
|
|
455
|
-
|
|
937
|
+
thisval : int
|
|
938
|
+
Current progress value. Should be less than or equal to ``end_val``.
|
|
939
|
+
end_val : int
|
|
940
|
+
Total value representing 100% progress.
|
|
941
|
+
label : str, optional
|
|
942
|
+
Label to display before the progress bar (default is "Percent").
|
|
943
|
+
barsize : int, optional
|
|
944
|
+
Size of the progress bar in characters (default is 60).
|
|
456
945
|
|
|
457
946
|
Returns
|
|
458
947
|
-------
|
|
459
|
-
|
|
948
|
+
None
|
|
949
|
+
This function does not return any value. It prints directly to stdout.
|
|
950
|
+
|
|
951
|
+
Notes
|
|
952
|
+
-----
|
|
953
|
+
The progress bar updates in place using carriage return (`\\r`) to overwrite
|
|
954
|
+
the previous output. The function uses ``sys.stdout.flush()`` to ensure
|
|
955
|
+
immediate display updates.
|
|
956
|
+
|
|
957
|
+
Examples
|
|
958
|
+
--------
|
|
959
|
+
>>> progressbar(25, 100, "Loading", 30)
|
|
960
|
+
Loading: [############################## ] 25.00%
|
|
961
|
+
|
|
962
|
+
>>> progressbar(50, 50)
|
|
963
|
+
Percent: [##################################################] 100.00%
|
|
460
964
|
"""
|
|
461
965
|
percent = float(thisval) / end_val
|
|
462
966
|
hashes = "#" * int(round(percent * barsize))
|
|
@@ -465,18 +969,40 @@ def progressbar(thisval, end_val, label="Percent", barsize=60):
|
|
|
465
969
|
sys.stdout.flush()
|
|
466
970
|
|
|
467
971
|
|
|
468
|
-
def makelaglist(lagstart, lagend, lagstep):
|
|
972
|
+
def makelaglist(lagstart: float, lagend: float, lagstep: float) -> NDArray:
|
|
469
973
|
"""
|
|
974
|
+
Create a list of lag values from start to end with specified step size.
|
|
975
|
+
|
|
976
|
+
This function generates an array of evenly spaced lag values starting from
|
|
977
|
+
`lagstart` up to (and including) `lagend` with increments of `lagstep`.
|
|
470
978
|
|
|
471
979
|
Parameters
|
|
472
980
|
----------
|
|
473
|
-
lagstart
|
|
474
|
-
|
|
475
|
-
|
|
981
|
+
lagstart : float
|
|
982
|
+
The starting value of the lag sequence.
|
|
983
|
+
lagend : float
|
|
984
|
+
The ending value of the lag sequence (inclusive).
|
|
985
|
+
lagstep : float
|
|
986
|
+
The step size between consecutive lag values.
|
|
476
987
|
|
|
477
988
|
Returns
|
|
478
989
|
-------
|
|
479
|
-
|
|
990
|
+
NDArray
|
|
991
|
+
Array of lag values from `lagstart` to `lagend` with step size `lagstep`.
|
|
992
|
+
|
|
993
|
+
Notes
|
|
994
|
+
-----
|
|
995
|
+
The function adjusts the `lagend` value to ensure that the last value in the
|
|
996
|
+
sequence is exactly `lagend` if it's a valid step from `lagstart`. The actual
|
|
997
|
+
number of steps is calculated as ``(lagend - lagstart) // lagstep + 1``.
|
|
998
|
+
|
|
999
|
+
Examples
|
|
1000
|
+
--------
|
|
1001
|
+
>>> makelaglist(0.0, 1.0, 0.2)
|
|
1002
|
+
array([0. , 0.2, 0.4, 0.6, 0.8, 1. ])
|
|
1003
|
+
|
|
1004
|
+
>>> makelaglist(1.0, 5.0, 1.5)
|
|
1005
|
+
array([1. , 2.5, 4. ])
|
|
480
1006
|
"""
|
|
481
1007
|
numsteps = int((lagend - lagstart) // lagstep + 1)
|
|
482
1008
|
lagend = lagstart + lagstep * (numsteps - 1)
|
|
@@ -497,12 +1023,42 @@ def makelaglist(lagstart, lagend, lagstep):
|
|
|
497
1023
|
|
|
498
1024
|
|
|
499
1025
|
# ------------------------------------------ Version function ----------------------------------
|
|
500
|
-
def version():
|
|
1026
|
+
def version() -> tuple[str, str, str, bool | str]:
|
|
501
1027
|
"""
|
|
1028
|
+
Retrieve version information for the package, including version string,
|
|
1029
|
+
Git SHA, commit date, and dirty status.
|
|
1030
|
+
|
|
1031
|
+
This function attempts to retrieve version information from environment
|
|
1032
|
+
variables when running inside a container. If not in a container, it falls
|
|
1033
|
+
back to using `tide_versioneer.get_versions()` to obtain version details
|
|
1034
|
+
from the Git repository.
|
|
502
1035
|
|
|
503
1036
|
Returns
|
|
504
1037
|
-------
|
|
505
|
-
|
|
1038
|
+
tuple of (str, str, str, bool or str)
|
|
1039
|
+
A tuple containing:
|
|
1040
|
+
- version (str): The version string, potentially modified for container builds.
|
|
1041
|
+
- sha (str): The Git commit SHA, or "UNKNOWN" if not available.
|
|
1042
|
+
- date (str): The Git commit date, or "UNKNOWN" if not available.
|
|
1043
|
+
- isdirty (bool or str): Indicates whether the working directory is dirty
|
|
1044
|
+
(i.e., has uncommitted changes). Returns `True`, `False`, or `"UNKNOWN"`
|
|
1045
|
+
if the information is not available.
|
|
1046
|
+
|
|
1047
|
+
Notes
|
|
1048
|
+
-----
|
|
1049
|
+
- In containerized environments, version information is expected to be
|
|
1050
|
+
provided via environment variables: `GITVERSION`, `GITDIRECTVERSION`,
|
|
1051
|
+
`GITSHA`, and `GITDATE`.
|
|
1052
|
+
- If the environment variable `RUNNING_IN_CONTAINER` is not set, the function
|
|
1053
|
+
attempts to use `tide_versioneer` to extract version information from the
|
|
1054
|
+
Git repository.
|
|
1055
|
+
- If `tide_versioneer` is not available or fails, the function returns
|
|
1056
|
+
`("UNKNOWN", "UNKNOWN", "UNKNOWN", "UNKNOWN")`.
|
|
1057
|
+
|
|
1058
|
+
Examples
|
|
1059
|
+
--------
|
|
1060
|
+
>>> version()
|
|
1061
|
+
('1.2.3', 'a1b2c3d', '2023-04-05', False)
|
|
506
1062
|
"""
|
|
507
1063
|
try:
|
|
508
1064
|
dummy = os.environ["RUNNING_IN_CONTAINER"]
|
|
@@ -579,25 +1135,80 @@ def version():
|
|
|
579
1135
|
|
|
580
1136
|
|
|
581
1137
|
# --------------------------- timing functions -------------------------------------------------
|
|
582
|
-
def timefmt(thenumber):
|
|
1138
|
+
def timefmt(thenumber: float) -> str:
|
|
583
1139
|
"""
|
|
1140
|
+
Format a floating-point number as a string with fixed width and 2 decimal places.
|
|
584
1141
|
|
|
585
1142
|
Parameters
|
|
586
1143
|
----------
|
|
587
|
-
thenumber
|
|
1144
|
+
thenumber : float
|
|
1145
|
+
The numeric value to be formatted as a string.
|
|
588
1146
|
|
|
589
1147
|
Returns
|
|
590
1148
|
-------
|
|
591
|
-
|
|
592
|
-
|
|
593
|
-
|
|
594
|
-
|
|
595
|
-
|
|
1149
|
+
str
|
|
1150
|
+
A string representation of the input number formatted to 2 decimal places
|
|
1151
|
+
with a minimum width of 10 characters, right-aligned.
|
|
1152
|
+
|
|
1153
|
+
Notes
|
|
1154
|
+
-----
|
|
1155
|
+
The formatting uses "{:10.2f}".format() which ensures:
|
|
1156
|
+
- Fixed width of 10 characters
|
|
1157
|
+
- 2 decimal places
|
|
1158
|
+
- Right alignment (default for numeric formats)
|
|
1159
|
+
|
|
1160
|
+
Examples
|
|
1161
|
+
--------
|
|
1162
|
+
>>> timefmt(123.456)
|
|
1163
|
+
' 123.46'
|
|
1164
|
+
>>> timefmt(1.234)
|
|
1165
|
+
' 1.23'
|
|
1166
|
+
>>> timefmt(0.0)
|
|
1167
|
+
' 0.00'
|
|
596
1168
|
"""
|
|
597
1169
|
return "{:10.2f}".format(thenumber)
|
|
598
1170
|
|
|
599
1171
|
|
|
600
|
-
def proctiminglogfile(logfilename, timewidth=10):
|
|
1172
|
+
def proctiminglogfile(logfilename: str, timewidth: int = 10) -> tuple[list[str], float]:
|
|
1173
|
+
"""
|
|
1174
|
+
Process a timing log file and return formatted timing information.
|
|
1175
|
+
|
|
1176
|
+
This function reads a timing log file, calculates cumulative and incremental
|
|
1177
|
+
time differences from the start time, and formats the output into a list of
|
|
1178
|
+
strings. If numerical data and units are present in the log, they are used
|
|
1179
|
+
to compute and display processing speeds.
|
|
1180
|
+
|
|
1181
|
+
Parameters
|
|
1182
|
+
----------
|
|
1183
|
+
logfilename : str
|
|
1184
|
+
Path to the timing log file. The file should be a CSV with columns:
|
|
1185
|
+
'time', 'description', 'number', 'units'.
|
|
1186
|
+
timewidth : int, optional
|
|
1187
|
+
Width for right-justifying time values in the output (default is 10).
|
|
1188
|
+
|
|
1189
|
+
Returns
|
|
1190
|
+
-------
|
|
1191
|
+
tuple[list[str], float]
|
|
1192
|
+
A tuple containing:
|
|
1193
|
+
- List of formatted timing lines as strings.
|
|
1194
|
+
- Total elapsed time in seconds as a float.
|
|
1195
|
+
|
|
1196
|
+
Notes
|
|
1197
|
+
-----
|
|
1198
|
+
The log file is expected to follow the format:
|
|
1199
|
+
`YYYYMMDDTHHMMSS.ffffff` for timestamps.
|
|
1200
|
+
The function assumes the first row is the starting point for all time calculations.
|
|
1201
|
+
|
|
1202
|
+
Examples
|
|
1203
|
+
--------
|
|
1204
|
+
>>> lines, total_time = proctiminglogfile('timing.log', timewidth=12)
|
|
1205
|
+
>>> for line in lines:
|
|
1206
|
+
... print(line)
|
|
1207
|
+
Total (s) Diff. (s) Description
|
|
1208
|
+
0.00 0.00 Start process
|
|
1209
|
+
5.20 5.20 Load data
|
|
1210
|
+
12.40 7.20 Process data (1000 items @ 138.89 items/s)
|
|
1211
|
+
"""
|
|
601
1212
|
timingdata = pd.read_csv(
|
|
602
1213
|
logfilename,
|
|
603
1214
|
sep=None,
|
|
@@ -635,18 +1246,51 @@ def proctiminglogfile(logfilename, timewidth=10):
|
|
|
635
1246
|
return outputlines, totaldiff
|
|
636
1247
|
|
|
637
1248
|
|
|
638
|
-
def proctiminginfo(
|
|
1249
|
+
def proctiminginfo(
|
|
1250
|
+
thetimings: list[tuple[str, float, float | None, str | None]],
|
|
1251
|
+
outputfile: str = "",
|
|
1252
|
+
extraheader: str | None = None,
|
|
1253
|
+
) -> None:
|
|
639
1254
|
"""
|
|
1255
|
+
Process and display timing information for program execution.
|
|
1256
|
+
|
|
1257
|
+
This function takes a list of timing events and displays them in a formatted table
|
|
1258
|
+
showing clock time, program time, duration, and event descriptions. Optional
|
|
1259
|
+
output to file and additional header information can also be specified.
|
|
640
1260
|
|
|
641
1261
|
Parameters
|
|
642
1262
|
----------
|
|
643
|
-
thetimings
|
|
644
|
-
|
|
645
|
-
|
|
1263
|
+
thetimings : list of tuple of (str, float, float | None, str | None)
|
|
1264
|
+
List of timing events where each event is a tuple containing:
|
|
1265
|
+
- Event description (str)
|
|
1266
|
+
- Timestamp (float)
|
|
1267
|
+
- Events per second (float or None)
|
|
1268
|
+
- Unit of measurement (str or None)
|
|
1269
|
+
outputfile : str, optional
|
|
1270
|
+
Path to output file for writing timing information (default is "")
|
|
1271
|
+
extraheader : str, optional
|
|
1272
|
+
Additional header text to be printed before timing information (default is None)
|
|
646
1273
|
|
|
647
1274
|
Returns
|
|
648
1275
|
-------
|
|
649
|
-
|
|
1276
|
+
None
|
|
1277
|
+
This function does not return any value but prints timing information to stdout
|
|
1278
|
+
and optionally writes to a file.
|
|
1279
|
+
|
|
1280
|
+
Notes
|
|
1281
|
+
-----
|
|
1282
|
+
The function formats timestamps using YYYYMMDDTHHMMSS format and calculates
|
|
1283
|
+
durations between consecutive events. If event rate information is provided,
|
|
1284
|
+
it will be displayed in the format "(rate unit/second)".
|
|
1285
|
+
|
|
1286
|
+
Examples
|
|
1287
|
+
--------
|
|
1288
|
+
>>> timings = [
|
|
1289
|
+
... ("Start", 1640995200.0, None, None),
|
|
1290
|
+
... ("Process A", 1640995205.5, 100.0, "events"),
|
|
1291
|
+
... ("End", 1640995210.0, None, None)
|
|
1292
|
+
... ]
|
|
1293
|
+
>>> proctiminginfo(timings, "timing_output.txt", "Execution Timing Report")
|
|
650
1294
|
"""
|
|
651
1295
|
theinfolist = []
|
|
652
1296
|
start = thetimings[0]
|
|
@@ -680,7 +1324,52 @@ def proctiminginfo(thetimings, outputfile="", extraheader=None):
|
|
|
680
1324
|
|
|
681
1325
|
|
|
682
1326
|
# timecourse functions
|
|
683
|
-
def maketcfrom3col(
|
|
1327
|
+
def maketcfrom3col(
|
|
1328
|
+
inputdata: NDArray, timeaxis: NDArray, outputvector: NDArray, debug: bool = False
|
|
1329
|
+
) -> NDArray:
|
|
1330
|
+
"""
|
|
1331
|
+
Create temporal output vector from 3-column input data.
|
|
1332
|
+
|
|
1333
|
+
This function processes input data containing start times, durations, and values,
|
|
1334
|
+
and maps these to an output vector based on a time axis. Each input row defines
|
|
1335
|
+
a time interval [start_time, start_time + duration] that is mapped to the output
|
|
1336
|
+
vector by setting the corresponding elements to the specified value.
|
|
1337
|
+
|
|
1338
|
+
Parameters
|
|
1339
|
+
----------
|
|
1340
|
+
inputdata : array-like
|
|
1341
|
+
3-column input data where:
|
|
1342
|
+
- First column: start times
|
|
1343
|
+
- Second column: durations
|
|
1344
|
+
- Third column: values to assign
|
|
1345
|
+
timeaxis : array-like
|
|
1346
|
+
Time axis defining the temporal resolution of the output vector
|
|
1347
|
+
outputvector : array-like
|
|
1348
|
+
Output vector to be populated with values from inputdata
|
|
1349
|
+
debug : bool, optional
|
|
1350
|
+
If True, displays a plot of the output vector (default is False)
|
|
1351
|
+
|
|
1352
|
+
Returns
|
|
1353
|
+
-------
|
|
1354
|
+
ndarray
|
|
1355
|
+
The populated output vector with values assigned according to input intervals
|
|
1356
|
+
|
|
1357
|
+
Notes
|
|
1358
|
+
-----
|
|
1359
|
+
- Intervals are clipped to the bounds of the time axis
|
|
1360
|
+
- Only intervals that overlap with the time axis (0 to max(timeaxis)) are processed
|
|
1361
|
+
- The function modifies the outputvector in-place
|
|
1362
|
+
|
|
1363
|
+
Examples
|
|
1364
|
+
--------
|
|
1365
|
+
>>> import numpy as np
|
|
1366
|
+
>>> timeaxis = np.linspace(0, 10, 11)
|
|
1367
|
+
>>> inputdata = np.array([[1, 2, 5], [3, 1, 10]])
|
|
1368
|
+
>>> outputvector = np.zeros(11)
|
|
1369
|
+
>>> result = maketcfrom3col(inputdata, timeaxis, outputvector)
|
|
1370
|
+
>>> print(result)
|
|
1371
|
+
[0. 5. 5. 10. 10. 0. 0. 0. 0. 0. 0.]
|
|
1372
|
+
"""
|
|
684
1373
|
theshape = np.shape(inputdata)
|
|
685
1374
|
for idx in range(0, theshape[1]):
|
|
686
1375
|
starttime = inputdata[0, idx]
|
|
@@ -699,7 +1388,53 @@ def maketcfrom3col(inputdata, timeaxis, outputvector, debug=False):
|
|
|
699
1388
|
return outputvector
|
|
700
1389
|
|
|
701
1390
|
|
|
702
|
-
def maketcfrom2col(
|
|
1391
|
+
def maketcfrom2col(
|
|
1392
|
+
inputdata: NDArray, timeaxis: NDArray, outputvector: NDArray, debug: bool = False
|
|
1393
|
+
) -> NDArray:
|
|
1394
|
+
"""
|
|
1395
|
+
Create a temporal output vector from 2-column input data.
|
|
1396
|
+
|
|
1397
|
+
This function processes input data consisting of two columns where the first column
|
|
1398
|
+
represents time ranges and the second column represents the value to be assigned to
|
|
1399
|
+
corresponding time intervals in the output vector. The function iterates through
|
|
1400
|
+
the input data and assigns values to contiguous ranges in the output vector.
|
|
1401
|
+
|
|
1402
|
+
Parameters
|
|
1403
|
+
----------
|
|
1404
|
+
inputdata : NDArray
|
|
1405
|
+
2D array with shape (2, n) where first row contains start/end time indices
|
|
1406
|
+
and second row contains corresponding values to assign.
|
|
1407
|
+
timeaxis : NDArray
|
|
1408
|
+
1D array representing time values for plotting (used only in debug mode).
|
|
1409
|
+
outputvector : NDArray
|
|
1410
|
+
1D array to be populated with values from inputdata. This array is modified
|
|
1411
|
+
in-place and returned.
|
|
1412
|
+
debug : bool, optional
|
|
1413
|
+
If True, enables debug output including range assignments and plots the
|
|
1414
|
+
resulting output vector. Default is False.
|
|
1415
|
+
|
|
1416
|
+
Returns
|
|
1417
|
+
-------
|
|
1418
|
+
NDArray
|
|
1419
|
+
The modified outputvector with values assigned from inputdata.
|
|
1420
|
+
|
|
1421
|
+
Notes
|
|
1422
|
+
-----
|
|
1423
|
+
The function processes inputdata by iterating through columns and assigning
|
|
1424
|
+
values to ranges in outputvector. Each column represents a time interval
|
|
1425
|
+
[start, end) where start is taken from inputdata[0, i-1] and end from
|
|
1426
|
+
inputdata[0, i]. The value assigned is from inputdata[1, i-1].
|
|
1427
|
+
|
|
1428
|
+
Examples
|
|
1429
|
+
--------
|
|
1430
|
+
>>> import numpy as np
|
|
1431
|
+
>>> inputdata = np.array([[0, 5, 10, 15], [1, 2, 3, 4]])
|
|
1432
|
+
>>> timeaxis = np.arange(20)
|
|
1433
|
+
>>> outputvector = np.zeros(20)
|
|
1434
|
+
>>> result = maketcfrom2col(inputdata, timeaxis, outputvector, debug=False)
|
|
1435
|
+
>>> print(result[:15])
|
|
1436
|
+
[1. 1. 1. 1. 1. 2. 2. 2. 2. 2. 3. 3. 3. 3. 3.]
|
|
1437
|
+
"""
|
|
703
1438
|
theshape = np.shape(inputdata)
|
|
704
1439
|
rangestart = int(inputdata[0, 0])
|
|
705
1440
|
for i in range(1, theshape[1]):
|
|
@@ -721,7 +1456,62 @@ def maketcfrom2col(inputdata, timeaxis, outputvector, debug=False):
|
|
|
721
1456
|
|
|
722
1457
|
|
|
723
1458
|
# --------------------------- simulation functions ----------------------------------------------
|
|
724
|
-
def makeslicetimes(
|
|
1459
|
+
def makeslicetimes(
|
|
1460
|
+
numslices: int,
|
|
1461
|
+
sliceordertype: str,
|
|
1462
|
+
tr: float = 1.0,
|
|
1463
|
+
multibandfac: int = 1,
|
|
1464
|
+
debug: bool = False,
|
|
1465
|
+
) -> NDArray | None:
|
|
1466
|
+
"""
|
|
1467
|
+
Generate slice timing list for MRI data acquisition based on slice ordering type.
|
|
1468
|
+
|
|
1469
|
+
Parameters
|
|
1470
|
+
----------
|
|
1471
|
+
numslices : int
|
|
1472
|
+
Number of slices in the volume.
|
|
1473
|
+
sliceordertype : str
|
|
1474
|
+
Type of slice ordering. Valid options are:
|
|
1475
|
+
- 'ascending': slices acquired in ascending order
|
|
1476
|
+
- 'descending': slices acquired in descending order
|
|
1477
|
+
- 'ascending_interleaved': interleaved ascending order
|
|
1478
|
+
- 'descending_interleaved': interleaved descending order
|
|
1479
|
+
- 'ascending_sparkplug': sparkplug ascending order
|
|
1480
|
+
- 'descending_sparkplug': sparkplug descending order
|
|
1481
|
+
- 'ascending_interleaved_siemens': Siemens-style interleaved ascending
|
|
1482
|
+
- 'descending_interleaved_siemens': Siemens-style interleaved descending
|
|
1483
|
+
- 'ascending_interleaved_philips': Philips-style interleaved ascending
|
|
1484
|
+
- 'descending_interleaved_philips': Philips-style interleaved descending
|
|
1485
|
+
tr : float, optional
|
|
1486
|
+
Repetition time in seconds (default is 1.0).
|
|
1487
|
+
multibandfac : int, optional
|
|
1488
|
+
Multiband factor (default is 1).
|
|
1489
|
+
debug : bool, optional
|
|
1490
|
+
If True, print debug information (default is False).
|
|
1491
|
+
|
|
1492
|
+
Returns
|
|
1493
|
+
-------
|
|
1494
|
+
NDArray | None
|
|
1495
|
+
Array of slice times in seconds. Returns None if an error occurs.
|
|
1496
|
+
|
|
1497
|
+
Notes
|
|
1498
|
+
-----
|
|
1499
|
+
The function computes slice acquisition times based on the specified slice order
|
|
1500
|
+
and multiband factor. It supports various slice ordering strategies commonly used
|
|
1501
|
+
in MRI pulse sequences.
|
|
1502
|
+
|
|
1503
|
+
Examples
|
|
1504
|
+
--------
|
|
1505
|
+
>>> makeslicetimes(32, 'ascending_interleaved', tr=2.0, multibandfac=2)
|
|
1506
|
+
array([0. , 0.25 , 0.5 , 0.75 , 1. , 1.25 , 1.5 , 1.75 , 2. ,
|
|
1507
|
+
2.25 , 2.5 , 2.75 , 3. , 3.25 , 3.5 , 3.75 , 4. , 4.25 ,
|
|
1508
|
+
4.5 , 4.75 , 5. , 5.25 , 5.5 , 5.75 , 6. , 6.25 , 6.5 ,
|
|
1509
|
+
6.75 , 7. , 7.25 , 7.5 , 7.75 ])
|
|
1510
|
+
|
|
1511
|
+
>>> makeslicetimes(16, 'descending_sparkplug', multibandfac=2)
|
|
1512
|
+
array([0. , 0.5 , 1. , 1.5 , 2. , 2.5 , 3. , 3.5 , 4. ,
|
|
1513
|
+
4.5 , 5. , 5.5 , 6. , 6.5 , 7. , 7.5 ])
|
|
1514
|
+
"""
|
|
725
1515
|
outlist = np.zeros((numslices), dtype=np.float)
|
|
726
1516
|
if (numslices % multibandfac) != 0:
|
|
727
1517
|
print("ERROR: numslices is not evenly divisible by multband factor")
|
|
@@ -795,7 +1585,64 @@ def makeslicetimes(numslices, sliceordertype, tr=1.0, multibandfac=1, debug=Fals
|
|
|
795
1585
|
|
|
796
1586
|
|
|
797
1587
|
# --------------------------- testing functions -------------------------------------------------
|
|
798
|
-
def comparemap(
|
|
1588
|
+
def comparemap(
|
|
1589
|
+
map1: NDArray, map2: NDArray, mask: NDArray | None = None, debug: bool = False
|
|
1590
|
+
) -> tuple[float, float, float, float, float, float, float, float]:
|
|
1591
|
+
"""
|
|
1592
|
+
Compare two arrays (maps) and compute various difference statistics.
|
|
1593
|
+
|
|
1594
|
+
This function computes multiple metrics comparing two input arrays, `map1` and `map2`.
|
|
1595
|
+
It supports optional masking to focus comparisons on specific regions of the arrays.
|
|
1596
|
+
The function handles both 1D and multi-dimensional arrays, with support for different
|
|
1597
|
+
mask dimensions (either matching the map dimensions or one less).
|
|
1598
|
+
|
|
1599
|
+
Parameters
|
|
1600
|
+
----------
|
|
1601
|
+
map1 : NDArray
|
|
1602
|
+
First input array to compare. Can be 1D or multi-dimensional.
|
|
1603
|
+
map2 : NDArray
|
|
1604
|
+
Second input array to compare. Must have the same shape as `map1`.
|
|
1605
|
+
mask : NDArray, optional
|
|
1606
|
+
A boolean or numeric mask to select valid voxels for comparison.
|
|
1607
|
+
If provided, its shape must either match `map1` or be one dimension smaller.
|
|
1608
|
+
If `None`, all voxels are compared.
|
|
1609
|
+
debug : bool, optional
|
|
1610
|
+
If True, print debug information during execution. Default is False.
|
|
1611
|
+
|
|
1612
|
+
Returns
|
|
1613
|
+
-------
|
|
1614
|
+
tuple of float
|
|
1615
|
+
A tuple containing the following statistics in order:
|
|
1616
|
+
- `mindiff`: Minimum absolute difference between `map1` and `map2`.
|
|
1617
|
+
- `maxdiff`: Maximum absolute difference between `map1` and `map2`.
|
|
1618
|
+
- `meandiff`: Mean absolute difference between `map1` and `map2`.
|
|
1619
|
+
- `mse`: Mean squared error between `map1` and `map2`.
|
|
1620
|
+
- `minreldiff`: Minimum relative difference (relative to `map1`).
|
|
1621
|
+
- `maxreldiff`: Maximum relative difference (relative to `map1`).
|
|
1622
|
+
- `meanreldiff`: Mean relative difference (relative to `map1`).
|
|
1623
|
+
- `relmse`: Mean squared relative error between `map1` and `map2`.
|
|
1624
|
+
|
|
1625
|
+
Notes
|
|
1626
|
+
-----
|
|
1627
|
+
- If `map1` contains zero values, relative differences are set to 0 to avoid division by zero.
|
|
1628
|
+
- When `mask` is provided and has one fewer dimension than `map1`, it is reshaped to match
|
|
1629
|
+
the first dimension of `map1` before comparison.
|
|
1630
|
+
- The function exits with an error if shapes are incompatible or if masks are not valid.
|
|
1631
|
+
|
|
1632
|
+
Examples
|
|
1633
|
+
--------
|
|
1634
|
+
>>> import numpy as np
|
|
1635
|
+
>>> map1 = np.array([1.0, 2.0, 3.0])
|
|
1636
|
+
>>> map2 = np.array([1.1, 2.2, 2.9])
|
|
1637
|
+
>>> result = comparemap(map1, map2)
|
|
1638
|
+
>>> print(result)
|
|
1639
|
+
(-0.1, 0.1, 0.0, 0.006666666666666667, -0.1, 0.1, 0.0, 0.006666666666666667)
|
|
1640
|
+
|
|
1641
|
+
>>> mask = np.array([1, 1, 0])
|
|
1642
|
+
>>> result = comparemap(map1, map2, mask=mask)
|
|
1643
|
+
>>> print(result)
|
|
1644
|
+
(-0.1, 0.1, 0.0, 0.006666666666666667, -0.1, 0.1, 0.0, 0.006666666666666667)
|
|
1645
|
+
"""
|
|
799
1646
|
ndims = len(map1.shape)
|
|
800
1647
|
if debug:
|
|
801
1648
|
print("map has", ndims, "axes")
|
|
@@ -861,7 +1708,54 @@ def comparemap(map1, map2, mask=None, debug=False):
|
|
|
861
1708
|
return mindiff, maxdiff, meandiff, mse, minreldiff, maxreldiff, meanreldiff, relmse
|
|
862
1709
|
|
|
863
1710
|
|
|
864
|
-
def comparerapidtideruns(root1, root2, debug=False):
|
|
1711
|
+
def comparerapidtideruns(root1: str, root2: str, debug: bool = False) -> dict[str, Any]:
|
|
1712
|
+
"""
|
|
1713
|
+
Compare results from two rapidtide runs by evaluating corresponding maps and timecourses.
|
|
1714
|
+
|
|
1715
|
+
This function compares NIfTI maps and text-based timecourses from two different rapid tide
|
|
1716
|
+
processing runs. It evaluates differences between the corresponding files using various
|
|
1717
|
+
statistical measures such as mean difference, max difference, mean squared error, and
|
|
1718
|
+
relative versions of these metrics.
|
|
1719
|
+
|
|
1720
|
+
Parameters
|
|
1721
|
+
----------
|
|
1722
|
+
root1 : str
|
|
1723
|
+
The base filename (without extension) for the first rapid tide run.
|
|
1724
|
+
root2 : str
|
|
1725
|
+
The base filename (without extension) for the second rapid tide run.
|
|
1726
|
+
debug : bool, optional
|
|
1727
|
+
If True, print detailed debug information during execution. Default is False.
|
|
1728
|
+
|
|
1729
|
+
Returns
|
|
1730
|
+
-------
|
|
1731
|
+
dict[str, Any]
|
|
1732
|
+
A dictionary containing comparison results for each map and timecourse.
|
|
1733
|
+
Keys are map or timecourse names, and values are dictionaries with the following keys:
|
|
1734
|
+
- 'mindiff': minimum difference
|
|
1735
|
+
- 'maxdiff': maximum difference
|
|
1736
|
+
- 'meandiff': mean difference
|
|
1737
|
+
- 'mse': mean squared error
|
|
1738
|
+
- 'relmindiff': relative minimum difference
|
|
1739
|
+
- 'relmaxdiff': relative maximum difference
|
|
1740
|
+
- 'relmeandiff': relative mean difference
|
|
1741
|
+
- 'relmse': relative mean squared error
|
|
1742
|
+
|
|
1743
|
+
Notes
|
|
1744
|
+
-----
|
|
1745
|
+
- The function assumes that both runs have corresponding mask files named
|
|
1746
|
+
``<root>_desc-corrfit_mask.nii.gz``.
|
|
1747
|
+
- For each map, the function checks if the corresponding NIfTI files exist and match
|
|
1748
|
+
in spatial dimensions.
|
|
1749
|
+
- For each timecourse, the function reads from JSON files and compares the time series
|
|
1750
|
+
only if both files are present and have matching lengths.
|
|
1751
|
+
- If spatial or temporal dimensions do not match, the function exits with an error.
|
|
1752
|
+
|
|
1753
|
+
Examples
|
|
1754
|
+
--------
|
|
1755
|
+
>>> results = comparerapidtideruns("run1", "run2", debug=True)
|
|
1756
|
+
>>> print(results["maxtime"])
|
|
1757
|
+
{'mindiff': -0.01, 'maxdiff': 0.02, 'meandiff': 0.005, 'mse': 0.0001, ...}
|
|
1758
|
+
"""
|
|
865
1759
|
results = {}
|
|
866
1760
|
maskname1 = f"{root1}_desc-corrfit_mask.nii.gz"
|
|
867
1761
|
(
|
|
@@ -989,7 +1883,50 @@ def comparerapidtideruns(root1, root2, debug=False):
|
|
|
989
1883
|
return results
|
|
990
1884
|
|
|
991
1885
|
|
|
992
|
-
def comparehappyruns(root1, root2, debug=False):
|
|
1886
|
+
def comparehappyruns(root1: str, root2: str, debug: bool = False) -> dict[str, Any]:
|
|
1887
|
+
"""
|
|
1888
|
+
Compare results from two happy runs by comparing output maps and timecourses.
|
|
1889
|
+
|
|
1890
|
+
This function compares neuroimaging maps (e.g., app_info, vessels_mask) and
|
|
1891
|
+
cardiac timecourses (e.g., cardfromfmri_25.0Hz.txt) between two datasets
|
|
1892
|
+
identified by their root names. It performs spatial alignment checks and
|
|
1893
|
+
computes various statistical differences between corresponding files.
|
|
1894
|
+
|
|
1895
|
+
Parameters
|
|
1896
|
+
----------
|
|
1897
|
+
root1 : str
|
|
1898
|
+
Root name of the first dataset (e.g., 'subject01_run1').
|
|
1899
|
+
root2 : str
|
|
1900
|
+
Root name of the second dataset (e.g., 'subject01_run2').
|
|
1901
|
+
debug : bool, optional
|
|
1902
|
+
If True, print debug information during execution. Default is False.
|
|
1903
|
+
|
|
1904
|
+
Returns
|
|
1905
|
+
-------
|
|
1906
|
+
dict[str, Any]
|
|
1907
|
+
A dictionary containing comparison results for each processed map and
|
|
1908
|
+
timecourse. Each entry includes:
|
|
1909
|
+
- mindiff: minimum absolute difference
|
|
1910
|
+
- maxdiff: maximum absolute difference
|
|
1911
|
+
- meandiff: mean absolute difference
|
|
1912
|
+
- mse: mean squared error
|
|
1913
|
+
- relmindiff: relative minimum difference
|
|
1914
|
+
- relmaxdiff: relative maximum difference
|
|
1915
|
+
- relmeandiff: relative mean difference
|
|
1916
|
+
- relmse: relative mean squared error
|
|
1917
|
+
|
|
1918
|
+
Notes
|
|
1919
|
+
-----
|
|
1920
|
+
- The function expects specific file naming conventions for both maps and
|
|
1921
|
+
timecourses.
|
|
1922
|
+
- Spatial dimensions of masks and data must match for comparison to proceed.
|
|
1923
|
+
- If any file is missing or mismatched, the function will exit with an error.
|
|
1924
|
+
|
|
1925
|
+
Examples
|
|
1926
|
+
--------
|
|
1927
|
+
>>> results = comparehappyruns('subject01_run1', 'subject01_run2', debug=True)
|
|
1928
|
+
>>> print(results['app_info']['meandiff'])
|
|
1929
|
+
"""
|
|
993
1930
|
results = {}
|
|
994
1931
|
if debug:
|
|
995
1932
|
print("comparehappyruns rootnames:", root1, root2)
|
|
@@ -1084,7 +2021,53 @@ def comparehappyruns(root1, root2, debug=False):
|
|
|
1084
2021
|
|
|
1085
2022
|
|
|
1086
2023
|
# shared memory routines
|
|
1087
|
-
def numpy2shared(
|
|
2024
|
+
def numpy2shared(
|
|
2025
|
+
inarray: NDArray, theouttype: type, name: str | None = None
|
|
2026
|
+
) -> tuple[NDArray, shared_memory.SharedMemory]:
|
|
2027
|
+
"""
|
|
2028
|
+
Convert a numpy array to a shared memory array.
|
|
2029
|
+
|
|
2030
|
+
This function creates a shared memory block and copies the data from the input
|
|
2031
|
+
numpy array to the shared memory array. The returned array and shared memory
|
|
2032
|
+
object can be used for inter-process communication or memory sharing.
|
|
2033
|
+
|
|
2034
|
+
Parameters
|
|
2035
|
+
----------
|
|
2036
|
+
inarray : numpy.ndarray
|
|
2037
|
+
Input numpy array to be converted to shared memory.
|
|
2038
|
+
theouttype : type
|
|
2039
|
+
Data type of the output shared memory array.
|
|
2040
|
+
name : str, optional
|
|
2041
|
+
Name of the shared memory block. If None, an anonymous shared memory
|
|
2042
|
+
block is created.
|
|
2043
|
+
|
|
2044
|
+
Returns
|
|
2045
|
+
-------
|
|
2046
|
+
tuple[numpy.ndarray, multiprocessing.shared_memory.SharedMemory]
|
|
2047
|
+
A tuple containing:
|
|
2048
|
+
- The shared memory array with the same shape as input array
|
|
2049
|
+
- The shared memory object that manages the memory block
|
|
2050
|
+
|
|
2051
|
+
Notes
|
|
2052
|
+
-----
|
|
2053
|
+
The returned shared memory object must be explicitly closed and unlink
|
|
2054
|
+
when no longer needed to free system resources. The shared memory block
|
|
2055
|
+
will be automatically unlinked upon creation but can be accessed by other
|
|
2056
|
+
processes using the same name.
|
|
2057
|
+
|
|
2058
|
+
Examples
|
|
2059
|
+
--------
|
|
2060
|
+
>>> import numpy as np
|
|
2061
|
+
>>> from multiprocessing import shared_memory
|
|
2062
|
+
>>> arr = np.array([1, 2, 3, 4, 5])
|
|
2063
|
+
>>> shared_arr, shm = numpy2shared(arr, np.int32)
|
|
2064
|
+
>>> print(shared_arr)
|
|
2065
|
+
[1 2 3 4 5]
|
|
2066
|
+
>>> # Clean up when done
|
|
2067
|
+
>>> shared_arr.close()
|
|
2068
|
+
>>> shm.close()
|
|
2069
|
+
>>> shm.unlink()
|
|
2070
|
+
"""
|
|
1088
2071
|
# Create a shared memory block to store the array data
|
|
1089
2072
|
outnbytes = np.dtype(theouttype).itemsize * inarray.size
|
|
1090
2073
|
shm = shared_memory.SharedMemory(name=None, create=True, size=outnbytes)
|
|
@@ -1094,7 +2077,54 @@ def numpy2shared(inarray, theouttype, name=None):
|
|
|
1094
2077
|
return inarray_shared, shm # Return both the array and the shared memory object
|
|
1095
2078
|
|
|
1096
2079
|
|
|
1097
|
-
def allocshared(
|
|
2080
|
+
def allocshared(
|
|
2081
|
+
theshape: tuple[int, ...], thetype: type, name: str | None = None
|
|
2082
|
+
) -> tuple[NDArray, shared_memory.SharedMemory]:
|
|
2083
|
+
"""
|
|
2084
|
+
Allocate shared memory for a numpy array.
|
|
2085
|
+
|
|
2086
|
+
This function creates a shared memory block and returns both the numpy array
|
|
2087
|
+
backed by this shared memory and the shared memory object itself. The array
|
|
2088
|
+
can be accessed from different processes, making it useful for inter-process
|
|
2089
|
+
communication.
|
|
2090
|
+
|
|
2091
|
+
Parameters
|
|
2092
|
+
----------
|
|
2093
|
+
theshape : tuple of int
|
|
2094
|
+
The shape of the array to be created. Must be a tuple of integers.
|
|
2095
|
+
thetype : type
|
|
2096
|
+
The data type of the array elements. Can be any numpy-compatible dtype.
|
|
2097
|
+
name : str, optional
|
|
2098
|
+
Name of existing shared memory object. If None, a new shared memory
|
|
2099
|
+
block is created. Default is None.
|
|
2100
|
+
|
|
2101
|
+
Returns
|
|
2102
|
+
-------
|
|
2103
|
+
tuple[NDArray, shared_memory.SharedMemory]
|
|
2104
|
+
A tuple containing:
|
|
2105
|
+
- The numpy array backed by shared memory
|
|
2106
|
+
- The shared_memory.SharedMemory object
|
|
2107
|
+
|
|
2108
|
+
Notes
|
|
2109
|
+
-----
|
|
2110
|
+
The returned shared memory object should be explicitly closed and unlink
|
|
2111
|
+
when no longer needed to free system resources. The array can be accessed
|
|
2112
|
+
from multiple processes, but care should be taken to avoid race conditions.
|
|
2113
|
+
|
|
2114
|
+
Examples
|
|
2115
|
+
--------
|
|
2116
|
+
>>> import numpy as np
|
|
2117
|
+
>>> from multiprocessing import shared_memory
|
|
2118
|
+
>>> arr, shm = allocshared((3, 4), np.float64)
|
|
2119
|
+
>>> arr[0, 0] = 42.0
|
|
2120
|
+
>>> print(arr)
|
|
2121
|
+
[[42. 0. 0. 0.]
|
|
2122
|
+
[ 0. 0. 0. 0.]
|
|
2123
|
+
[ 0. 0. 0. 0.]]
|
|
2124
|
+
>>> # Don't forget to clean up
|
|
2125
|
+
>>> shm.close()
|
|
2126
|
+
>>> shm.unlink()
|
|
2127
|
+
"""
|
|
1098
2128
|
# Calculate size based on shape
|
|
1099
2129
|
thesize = np.prod(theshape)
|
|
1100
2130
|
# Determine the data type size
|
|
@@ -1106,14 +2136,83 @@ def allocshared(theshape, thetype, name=None):
|
|
|
1106
2136
|
return outarray, shm # Return both the array and the shared memory object
|
|
1107
2137
|
|
|
1108
2138
|
|
|
1109
|
-
def allocarray(
|
|
2139
|
+
def allocarray(
|
|
2140
|
+
theshape: tuple[int, ...], thetype: type, shared: bool = False, name: str | None = None
|
|
2141
|
+
) -> tuple[NDArray, shared_memory.SharedMemory | None]:
|
|
2142
|
+
"""
|
|
2143
|
+
Allocate and return a numpy array with specified shape and type.
|
|
2144
|
+
|
|
2145
|
+
Parameters
|
|
2146
|
+
----------
|
|
2147
|
+
theshape : tuple[int, ...]
|
|
2148
|
+
Shape of the array to be allocated.
|
|
2149
|
+
thetype : type
|
|
2150
|
+
Data type of the array elements.
|
|
2151
|
+
shared : bool, optional
|
|
2152
|
+
If True, allocate the array in shared memory. Default is False.
|
|
2153
|
+
name : str | None, optional
|
|
2154
|
+
Name for the shared memory segment. Required if shared=True. Default is None.
|
|
2155
|
+
|
|
2156
|
+
Returns
|
|
2157
|
+
-------
|
|
2158
|
+
tuple[NDArray, shared_memory.SharedMemory | None]
|
|
2159
|
+
A tuple containing:
|
|
2160
|
+
- The allocated numpy array filled with zeros
|
|
2161
|
+
- The shared memory object if shared=True, otherwise None
|
|
2162
|
+
|
|
2163
|
+
Notes
|
|
2164
|
+
-----
|
|
2165
|
+
When ``shared=True``, the function delegates to ``allocshared`` to create
|
|
2166
|
+
a shared memory array. Otherwise, it creates a regular numpy array using
|
|
2167
|
+
``np.zeros`` with the specified shape and dtype.
|
|
2168
|
+
|
|
2169
|
+
Examples
|
|
2170
|
+
--------
|
|
2171
|
+
>>> import numpy as np
|
|
2172
|
+
>>> arr, shm = allocarray((3, 4), np.float64)
|
|
2173
|
+
>>> print(arr.shape)
|
|
2174
|
+
(3, 4)
|
|
2175
|
+
>>> print(arr.dtype)
|
|
2176
|
+
float64
|
|
2177
|
+
|
|
2178
|
+
>>> # For shared memory allocation
|
|
2179
|
+
>>> arr, shm = allocarray((2, 3), np.int32, shared=True, name="my_array")
|
|
2180
|
+
>>> print(shm is not None)
|
|
2181
|
+
True
|
|
2182
|
+
"""
|
|
1110
2183
|
if shared:
|
|
1111
2184
|
return allocshared(theshape, thetype, name)
|
|
1112
2185
|
else:
|
|
1113
2186
|
return np.zeros(theshape, dtype=thetype), None
|
|
1114
2187
|
|
|
1115
2188
|
|
|
1116
|
-
def cleanup_shm(shm):
|
|
2189
|
+
def cleanup_shm(shm: shared_memory.SharedMemory | None) -> None:
|
|
2190
|
+
"""
|
|
2191
|
+
Clean up shared memory object.
|
|
2192
|
+
|
|
2193
|
+
Parameters
|
|
2194
|
+
----------
|
|
2195
|
+
shm : shared_memory.SharedMemory or None
|
|
2196
|
+
Shared memory object to clean up. If None, no action is taken.
|
|
2197
|
+
|
|
2198
|
+
Returns
|
|
2199
|
+
-------
|
|
2200
|
+
None
|
|
2201
|
+
This function does not return any value.
|
|
2202
|
+
|
|
2203
|
+
Notes
|
|
2204
|
+
-----
|
|
2205
|
+
This function is designed to properly release shared memory resources.
|
|
2206
|
+
It should be called to clean up shared memory objects to prevent resource leaks.
|
|
2207
|
+
If the shared memory object is None, the function performs no operation.
|
|
2208
|
+
|
|
2209
|
+
Examples
|
|
2210
|
+
--------
|
|
2211
|
+
>>> from multiprocessing import shared_memory
|
|
2212
|
+
>>> shm = shared_memory.SharedMemory(create=True, size=1024)
|
|
2213
|
+
>>> cleanup_shm(shm)
|
|
2214
|
+
>>> # Shared memory is now cleaned up
|
|
2215
|
+
"""
|
|
1117
2216
|
# Cleanup
|
|
1118
2217
|
pass
|
|
1119
2218
|
# if shm is not None:
|