rapidtide 3.0a12__py3-none-any.whl → 3.0a14__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- cloud/gmscalc-HCPYA +1 -1
- cloud/rapidtide-HCPYA +3 -3
- rapidtide/Colortables.py +10 -10
- rapidtide/DerivativeDelay.py +213 -0
- rapidtide/{Refiner.py → RegressorRefiner.py} +1 -1
- rapidtide/__init__.py +2 -1
- rapidtide/_version.py +1 -1
- rapidtide/data/examples/src/test_mlregressallt.py +32 -17
- rapidtide/data/examples/src/testalign +1 -1
- rapidtide/data/examples/src/testboth +1 -1
- rapidtide/data/examples/src/testcifti +11 -0
- rapidtide/data/examples/src/testdelayvar +14 -0
- rapidtide/data/examples/src/testfmri +1 -0
- rapidtide/data/examples/src/testglmfilt +8 -6
- rapidtide/data/examples/src/testhappy +1 -1
- rapidtide/data/examples/src/testnewrefine +11 -11
- rapidtide/data/examples/src/testnoiseamp +2 -2
- rapidtide/data/examples/src/testretro +16 -7
- rapidtide/data/examples/src/testretrolagtcs +1 -1
- rapidtide/dlfilter.py +0 -1
- rapidtide/fit.py +41 -9
- rapidtide/happy_supportfuncs.py +5 -0
- rapidtide/io.py +13 -2
- rapidtide/{glmpass.py → linfitfiltpass.py} +23 -19
- rapidtide/makelaggedtcs.py +8 -5
- rapidtide/multiproc.py +8 -11
- rapidtide/refinedelay.py +234 -109
- rapidtide/resample.py +3 -0
- rapidtide/scripts/{retroglm.py → delayvar.py} +2 -2
- rapidtide/scripts/{glmfilt.py → linfitfilt.py} +2 -2
- rapidtide/scripts/retroregress.py +28 -0
- rapidtide/scripts/stupidramtricks.py +9 -7
- rapidtide/simfuncfit.py +1 -1
- rapidtide/tests/cleanposttest +21 -0
- rapidtide/tests/test_delayestimation.py +3 -3
- rapidtide/tests/test_fastresampler.py +1 -2
- rapidtide/tests/test_fullrunhappy_v1.py +14 -6
- rapidtide/tests/test_fullrunhappy_v2.py +17 -9
- rapidtide/tests/test_fullrunhappy_v3.py +16 -8
- rapidtide/tests/test_fullrunhappy_v4.py +16 -8
- rapidtide/tests/test_fullrunhappy_v5.py +14 -6
- rapidtide/tests/test_fullrunrapidtide_v1.py +20 -12
- rapidtide/tests/test_fullrunrapidtide_v2.py +21 -13
- rapidtide/tests/test_fullrunrapidtide_v3.py +15 -7
- rapidtide/tests/test_fullrunrapidtide_v4.py +14 -7
- rapidtide/tests/test_fullrunrapidtide_v5.py +13 -5
- rapidtide/tests/test_fullrunrapidtide_v6.py +34 -26
- rapidtide/tests/{test_glmpass.py → test_linfitfiltpass.py} +9 -9
- rapidtide/tests/test_motionregress.py +3 -3
- rapidtide/tests/test_refinedelay.py +14 -12
- rapidtide/tidepoolTemplate_alt_qt6.py +172 -45
- rapidtide/tidepoolTemplate_big_qt6.py +196 -53
- rapidtide/tidepoolTemplate_qt6.py +150 -39
- rapidtide/workflows/atlasaverage.py +40 -12
- rapidtide/workflows/delayvar.py +1136 -0
- rapidtide/workflows/happy.py +37 -11
- rapidtide/workflows/happy_parser.py +4 -4
- rapidtide/workflows/{glmfilt.py → linfitfilt.py} +4 -4
- rapidtide/workflows/rapidtide.py +246 -178
- rapidtide/workflows/rapidtide_parser.py +116 -101
- rapidtide/workflows/{glmfrommaps.py → regressfrommaps.py} +30 -26
- rapidtide/workflows/retrolagtcs.py +13 -12
- rapidtide/workflows/{retroglm.py → retroregress.py} +182 -141
- {rapidtide-3.0a12.dist-info → rapidtide-3.0a14.dist-info}/METADATA +3 -2
- {rapidtide-3.0a12.dist-info → rapidtide-3.0a14.dist-info}/RECORD +69 -64
- {rapidtide-3.0a12.dist-info → rapidtide-3.0a14.dist-info}/WHEEL +1 -1
- {rapidtide-3.0a12.dist-info → rapidtide-3.0a14.dist-info}/entry_points.txt +3 -2
- rapidtide/data/examples/src/testoutputsize +0 -45
- {rapidtide-3.0a12.dist-info → rapidtide-3.0a14.dist-info/licenses}/LICENSE +0 -0
- {rapidtide-3.0a12.dist-info → rapidtide-3.0a14.dist-info}/top_level.txt +0 -0
rapidtide/fit.py
CHANGED
|
@@ -23,6 +23,7 @@ import matplotlib.pyplot as plt
|
|
|
23
23
|
import numpy as np
|
|
24
24
|
import scipy as sp
|
|
25
25
|
import scipy.special as sps
|
|
26
|
+
import statsmodels.api as sm
|
|
26
27
|
import tqdm
|
|
27
28
|
from numpy.polynomial import Polynomial
|
|
28
29
|
from scipy.optimize import curve_fit
|
|
@@ -1105,6 +1106,37 @@ def mlproject(thefit, theevs, intercept):
|
|
|
1105
1106
|
return thedest
|
|
1106
1107
|
|
|
1107
1108
|
|
|
1109
|
+
def olsregress(X, y, intercept=True, debug=False):
|
|
1110
|
+
"""
|
|
1111
|
+
|
|
1112
|
+
Parameters
|
|
1113
|
+
----------
|
|
1114
|
+
X
|
|
1115
|
+
y
|
|
1116
|
+
intercept
|
|
1117
|
+
|
|
1118
|
+
Returns
|
|
1119
|
+
-------
|
|
1120
|
+
|
|
1121
|
+
"""
|
|
1122
|
+
"""Return the coefficients from a multiple linear regression, along with R, the coefficient of determination.
|
|
1123
|
+
|
|
1124
|
+
X: The independent variables (nxp).
|
|
1125
|
+
y: The dependent variable (1xn or nx1).
|
|
1126
|
+
intercept: Specifies whether or not the slope intercept should be considered.
|
|
1127
|
+
|
|
1128
|
+
The routine computes the coefficients (b_0, b_1, ..., b_p) from the data (x,y) under
|
|
1129
|
+
the assumption that y = b0 + b_1 * x_1 + b_2 * x_2 + ... + b_p * x_p.
|
|
1130
|
+
|
|
1131
|
+
If intercept is False, the routine assumes that b0 = 0 and returns (b_1, b_2, ..., b_p).
|
|
1132
|
+
"""
|
|
1133
|
+
if intercept:
|
|
1134
|
+
X = sm.add_constant(X, prepend=True)
|
|
1135
|
+
model = sm.OLS(y, exog=X)
|
|
1136
|
+
thefit = model.fit()
|
|
1137
|
+
return thefit.params, np.sqrt(thefit.rsquared)
|
|
1138
|
+
|
|
1139
|
+
|
|
1108
1140
|
def mlregress(X, y, intercept=True, debug=False):
|
|
1109
1141
|
"""
|
|
1110
1142
|
|
|
@@ -1223,9 +1255,9 @@ def calcexpandedregressors(
|
|
|
1223
1255
|
return outputregressors, outlabels
|
|
1224
1256
|
|
|
1225
1257
|
|
|
1226
|
-
def
|
|
1258
|
+
def derivativelinfitfilt(thedata, theevs, nderivs=1, debug=False):
|
|
1227
1259
|
r"""First perform multicomponent expansion on theevs (each ev replaced by itself,
|
|
1228
|
-
its square, its cube, etc.). Then perform a
|
|
1260
|
+
its square, its cube, etc.). Then perform a linear fit of thedata using the vectors
|
|
1229
1261
|
in thenewevs and return the result.
|
|
1230
1262
|
|
|
1231
1263
|
Parameters
|
|
@@ -1269,16 +1301,16 @@ def derivativeglmfilt(thedata, theevs, nderivs=1, debug=False):
|
|
|
1269
1301
|
if debug:
|
|
1270
1302
|
print(f"{nderivs=}")
|
|
1271
1303
|
print(f"{thenewevs.shape=}")
|
|
1272
|
-
filtered, datatoremove, R, coffs =
|
|
1304
|
+
filtered, datatoremove, R, coffs = linfitfilt(thedata, thenewevs, debug=debug)
|
|
1273
1305
|
if debug:
|
|
1274
1306
|
print(f"{R=}")
|
|
1275
1307
|
|
|
1276
1308
|
return filtered, thenewevs, datatoremove, R, coffs
|
|
1277
1309
|
|
|
1278
1310
|
|
|
1279
|
-
def
|
|
1311
|
+
def expandedlinfitfilt(thedata, theevs, ncomps=1, debug=False):
|
|
1280
1312
|
r"""First perform multicomponent expansion on theevs (each ev replaced by itself,
|
|
1281
|
-
its square, its cube, etc.). Then perform a
|
|
1313
|
+
its square, its cube, etc.). Then perform a multiple regression fit of thedata using the vectors
|
|
1282
1314
|
in thenewevs and return the result.
|
|
1283
1315
|
|
|
1284
1316
|
Parameters
|
|
@@ -1322,15 +1354,15 @@ def expandedglmfilt(thedata, theevs, ncomps=1, debug=False):
|
|
|
1322
1354
|
if debug:
|
|
1323
1355
|
print(f"{ncomps=}")
|
|
1324
1356
|
print(f"{thenewevs.shape=}")
|
|
1325
|
-
filtered, datatoremove, R, coffs =
|
|
1357
|
+
filtered, datatoremove, R, coffs = linfitfilt(thedata, thenewevs, debug=debug)
|
|
1326
1358
|
if debug:
|
|
1327
1359
|
print(f"{R=}")
|
|
1328
1360
|
|
|
1329
1361
|
return filtered, thenewevs, datatoremove, R, coffs
|
|
1330
1362
|
|
|
1331
1363
|
|
|
1332
|
-
def
|
|
1333
|
-
r"""Performs a
|
|
1364
|
+
def linfitfilt(thedata, theevs, returnintercept=False, debug=False):
|
|
1365
|
+
r"""Performs a multiple regression fit of thedata using the vectors in theevs
|
|
1334
1366
|
and returns the result.
|
|
1335
1367
|
|
|
1336
1368
|
Parameters
|
|
@@ -1383,7 +1415,7 @@ def glmfilt(thedata, theevs, returnintercept=False, debug=False):
|
|
|
1383
1415
|
return filtered, datatoremove, R2, retcoffs
|
|
1384
1416
|
|
|
1385
1417
|
|
|
1386
|
-
def
|
|
1418
|
+
def confoundregress(
|
|
1387
1419
|
data,
|
|
1388
1420
|
regressors,
|
|
1389
1421
|
debug=False,
|
rapidtide/happy_supportfuncs.py
CHANGED
|
@@ -290,6 +290,11 @@ def cardiacfromimage(
|
|
|
290
290
|
)
|
|
291
291
|
|
|
292
292
|
|
|
293
|
+
def theCOM(X, data):
|
|
294
|
+
# return the center of mass
|
|
295
|
+
return np.sum(X * data) / np.sum(data)
|
|
296
|
+
|
|
297
|
+
|
|
293
298
|
def savgolsmooth(data, smoothlen=101, polyorder=3):
|
|
294
299
|
return savgol_filter(data, smoothlen, polyorder)
|
|
295
300
|
|
rapidtide/io.py
CHANGED
|
@@ -1426,6 +1426,8 @@ def writebidstsv(
|
|
|
1426
1426
|
extraheaderinfo=None,
|
|
1427
1427
|
compressed=True,
|
|
1428
1428
|
columns=None,
|
|
1429
|
+
xaxislabel="time",
|
|
1430
|
+
yaxislabel="arbitrary value",
|
|
1429
1431
|
starttime=0.0,
|
|
1430
1432
|
append=False,
|
|
1431
1433
|
colsinjson=True,
|
|
@@ -1447,6 +1449,8 @@ def writebidstsv(
|
|
|
1447
1449
|
:param samplerate:
|
|
1448
1450
|
:param compressed:
|
|
1449
1451
|
:param columns:
|
|
1452
|
+
:param xaxislabel:
|
|
1453
|
+
:param yaxislabel:
|
|
1450
1454
|
:param starttime:
|
|
1451
1455
|
:param append:
|
|
1452
1456
|
:param colsinjson:
|
|
@@ -1462,6 +1466,8 @@ def writebidstsv(
|
|
|
1462
1466
|
print("\tsamplerate:", samplerate)
|
|
1463
1467
|
print("\tcompressed:", compressed)
|
|
1464
1468
|
print("\tcolumns:", columns)
|
|
1469
|
+
print("\txaxislabel:", xaxislabel)
|
|
1470
|
+
print("\tyaxislabel:", yaxislabel)
|
|
1465
1471
|
print("\tstarttime:", starttime)
|
|
1466
1472
|
print("\tappend:", append)
|
|
1467
1473
|
if len(data.shape) == 1:
|
|
@@ -1538,6 +1544,8 @@ def writebidstsv(
|
|
|
1538
1544
|
headerdict = {}
|
|
1539
1545
|
headerdict["SamplingFrequency"] = float(samplerate)
|
|
1540
1546
|
headerdict["StartTime"] = float(starttime)
|
|
1547
|
+
headerdict["XAxisLabel"] = xaxislabel
|
|
1548
|
+
headerdict["YAxisLabel"] = yaxislabel
|
|
1541
1549
|
if colsinjson:
|
|
1542
1550
|
if startcol == 0:
|
|
1543
1551
|
headerdict["Columns"] = columns
|
|
@@ -1991,8 +1999,11 @@ def colspectolist(colspec, debug=False):
|
|
|
1991
1999
|
("APARC_SUBCORTGRAY", "8-13,17-20,26-28,47-56,58-60,96,97"),
|
|
1992
2000
|
("APARC_CORTGRAY", "1000-1035,2000-2035"),
|
|
1993
2001
|
("APARC_GRAY", "8-13,17-20,26-28,47-56,58-60,96,97,1000-1035,2000-2035"),
|
|
1994
|
-
("APARC_WHITE", "2,7,41,46,177,219"),
|
|
1995
|
-
(
|
|
2002
|
+
("APARC_WHITE", "2,7,41,46,177,219,3000-3035,4000-4035,5001,5002"),
|
|
2003
|
+
(
|
|
2004
|
+
"APARC_ALLBUTCSF",
|
|
2005
|
+
"2,7-13,17-20,26-28,41,46-56,58-60,96,97,177,219,1000-1035,2000-2035,3000-3035,4000-4035,5001,5002",
|
|
2006
|
+
),
|
|
1996
2007
|
("SSEG_GRAY", "3,8,10-13,16-18,26,42,47,49-54,58"),
|
|
1997
2008
|
("SSEG_WHITE", "2,7,41,46"),
|
|
1998
2009
|
)
|
|
@@ -26,7 +26,9 @@ import rapidtide.miscmath as tide_math
|
|
|
26
26
|
import rapidtide.multiproc as tide_multiproc
|
|
27
27
|
|
|
28
28
|
|
|
29
|
-
def
|
|
29
|
+
def _procOneRegressionFitItem(
|
|
30
|
+
vox, theevs, thedata, rt_floatset=np.float64, rt_floattype="float64"
|
|
31
|
+
):
|
|
30
32
|
# NOTE: if theevs is 2D, dimension 0 is number of points, dimension 1 is number of evs
|
|
31
33
|
thefit, R2 = tide_fit.mlregress(theevs, thedata)
|
|
32
34
|
if theevs.ndim > 1:
|
|
@@ -75,7 +77,7 @@ def _procOneGLMItem(vox, theevs, thedata, rt_floatset=np.float64, rt_floattype="
|
|
|
75
77
|
)
|
|
76
78
|
|
|
77
79
|
|
|
78
|
-
def
|
|
80
|
+
def linfitfiltpass(
|
|
79
81
|
numprocitems,
|
|
80
82
|
fmri_data,
|
|
81
83
|
threshval,
|
|
@@ -89,12 +91,13 @@ def glmpass(
|
|
|
89
91
|
filtereddata,
|
|
90
92
|
nprocs=1,
|
|
91
93
|
alwaysmultiproc=False,
|
|
92
|
-
|
|
94
|
+
confoundregress=False,
|
|
93
95
|
procbyvoxel=True,
|
|
94
96
|
showprogressbar=True,
|
|
95
97
|
mp_chunksize=1000,
|
|
96
98
|
rt_floatset=np.float64,
|
|
97
99
|
rt_floattype="float64",
|
|
100
|
+
verbose=True,
|
|
98
101
|
debug=False,
|
|
99
102
|
):
|
|
100
103
|
inputshape = np.shape(fmri_data)
|
|
@@ -132,9 +135,9 @@ def glmpass(
|
|
|
132
135
|
|
|
133
136
|
# process and send the data
|
|
134
137
|
if procbyvoxel:
|
|
135
|
-
if
|
|
138
|
+
if confoundregress:
|
|
136
139
|
outQ.put(
|
|
137
|
-
|
|
140
|
+
_procOneRegressionFitItem(
|
|
138
141
|
val,
|
|
139
142
|
theevs,
|
|
140
143
|
fmri_data[val, :],
|
|
@@ -144,7 +147,7 @@ def glmpass(
|
|
|
144
147
|
)
|
|
145
148
|
else:
|
|
146
149
|
outQ.put(
|
|
147
|
-
|
|
150
|
+
_procOneRegressionFitItem(
|
|
148
151
|
val,
|
|
149
152
|
theevs[val, :],
|
|
150
153
|
fmri_data[val, :],
|
|
@@ -153,9 +156,9 @@ def glmpass(
|
|
|
153
156
|
)
|
|
154
157
|
)
|
|
155
158
|
else:
|
|
156
|
-
if
|
|
159
|
+
if confoundregress:
|
|
157
160
|
outQ.put(
|
|
158
|
-
|
|
161
|
+
_procOneRegressionFitItem(
|
|
159
162
|
val,
|
|
160
163
|
theevs,
|
|
161
164
|
fmri_data[:, val],
|
|
@@ -165,7 +168,7 @@ def glmpass(
|
|
|
165
168
|
)
|
|
166
169
|
else:
|
|
167
170
|
outQ.put(
|
|
168
|
-
|
|
171
|
+
_procOneRegressionFitItem(
|
|
169
172
|
val,
|
|
170
173
|
theevs[:, val],
|
|
171
174
|
fmri_data[:, val],
|
|
@@ -182,6 +185,7 @@ def glmpass(
|
|
|
182
185
|
GLM_consumer,
|
|
183
186
|
inputshape,
|
|
184
187
|
themask,
|
|
188
|
+
verbose=verbose,
|
|
185
189
|
nprocs=nprocs,
|
|
186
190
|
procbyvoxel=procbyvoxel,
|
|
187
191
|
showprogressbar=showprogressbar,
|
|
@@ -191,7 +195,7 @@ def glmpass(
|
|
|
191
195
|
# unpack the data
|
|
192
196
|
itemstotal = 0
|
|
193
197
|
if procbyvoxel:
|
|
194
|
-
if
|
|
198
|
+
if confoundregress:
|
|
195
199
|
for voxel in data_out:
|
|
196
200
|
r2value[voxel[0]] = voxel[3]
|
|
197
201
|
filtereddata[voxel[0], :] = voxel[7]
|
|
@@ -211,7 +215,7 @@ def glmpass(
|
|
|
211
215
|
filtereddata[voxel[0], :] = voxel[7]
|
|
212
216
|
itemstotal += 1
|
|
213
217
|
else:
|
|
214
|
-
if
|
|
218
|
+
if confoundregress:
|
|
215
219
|
for timepoint in data_out:
|
|
216
220
|
r2value[timepoint[0]] = timepoint[3]
|
|
217
221
|
filtereddata[:, timepoint[0]] = timepoint[7]
|
|
@@ -243,7 +247,7 @@ def glmpass(
|
|
|
243
247
|
):
|
|
244
248
|
thedata = fmri_data[vox, :].copy()
|
|
245
249
|
if (themask is None) or (themask[vox] > 0):
|
|
246
|
-
if
|
|
250
|
+
if confoundregress:
|
|
247
251
|
(
|
|
248
252
|
dummy,
|
|
249
253
|
dummy,
|
|
@@ -253,7 +257,7 @@ def glmpass(
|
|
|
253
257
|
dummy,
|
|
254
258
|
dummy,
|
|
255
259
|
filtereddata[vox, :],
|
|
256
|
-
) =
|
|
260
|
+
) = _procOneRegressionFitItem(
|
|
257
261
|
vox,
|
|
258
262
|
theevs,
|
|
259
263
|
thedata,
|
|
@@ -270,7 +274,7 @@ def glmpass(
|
|
|
270
274
|
fitNorm[vox],
|
|
271
275
|
datatoremove[vox, :],
|
|
272
276
|
filtereddata[vox, :],
|
|
273
|
-
) =
|
|
277
|
+
) = _procOneRegressionFitItem(
|
|
274
278
|
vox,
|
|
275
279
|
theevs[vox, :],
|
|
276
280
|
thedata,
|
|
@@ -287,7 +291,7 @@ def glmpass(
|
|
|
287
291
|
):
|
|
288
292
|
thedata = fmri_data[:, timepoint].copy()
|
|
289
293
|
if (themask is None) or (themask[timepoint] > 0):
|
|
290
|
-
if
|
|
294
|
+
if confoundregress:
|
|
291
295
|
(
|
|
292
296
|
dummy,
|
|
293
297
|
dummy,
|
|
@@ -297,7 +301,7 @@ def glmpass(
|
|
|
297
301
|
dummy,
|
|
298
302
|
dummy,
|
|
299
303
|
filtereddata[:, timepoint],
|
|
300
|
-
) =
|
|
304
|
+
) = _procOneRegressionFitItem(
|
|
301
305
|
timepoint,
|
|
302
306
|
theevs,
|
|
303
307
|
thedata,
|
|
@@ -314,7 +318,7 @@ def glmpass(
|
|
|
314
318
|
fitNorm[timepoint],
|
|
315
319
|
datatoremove[:, timepoint],
|
|
316
320
|
filtereddata[:, timepoint],
|
|
317
|
-
) =
|
|
321
|
+
) = _procOneRegressionFitItem(
|
|
318
322
|
timepoint,
|
|
319
323
|
theevs[:, timepoint],
|
|
320
324
|
thedata,
|
|
@@ -425,7 +429,7 @@ def confoundregress(
|
|
|
425
429
|
numprocitems = thedataarray.shape[0]
|
|
426
430
|
filtereddata = thedataarray * 0.0
|
|
427
431
|
r2value = np.zeros(numprocitems)
|
|
428
|
-
numfiltered =
|
|
432
|
+
numfiltered = linfitfiltpass(
|
|
429
433
|
numprocitems,
|
|
430
434
|
thedataarray,
|
|
431
435
|
None,
|
|
@@ -437,7 +441,7 @@ def confoundregress(
|
|
|
437
441
|
None,
|
|
438
442
|
None,
|
|
439
443
|
filtereddata,
|
|
440
|
-
|
|
444
|
+
confoundregress=True,
|
|
441
445
|
nprocs=nprocs,
|
|
442
446
|
showprogressbar=showprogressbar,
|
|
443
447
|
procbyvoxel=True,
|
rapidtide/makelaggedtcs.py
CHANGED
|
@@ -24,8 +24,6 @@ from tqdm import tqdm
|
|
|
24
24
|
|
|
25
25
|
import rapidtide.multiproc as tide_multiproc
|
|
26
26
|
|
|
27
|
-
LGR = logging.getLogger("GENERAL")
|
|
28
|
-
|
|
29
27
|
|
|
30
28
|
def _procOneVoxelMakelagtc(
|
|
31
29
|
vox,
|
|
@@ -51,6 +49,7 @@ def makelaggedtcs(
|
|
|
51
49
|
lagmask,
|
|
52
50
|
lagtimes,
|
|
53
51
|
lagtc,
|
|
52
|
+
LGR=None,
|
|
54
53
|
nprocs=1,
|
|
55
54
|
alwaysmultiproc=False,
|
|
56
55
|
showprogressbar=True,
|
|
@@ -90,6 +89,7 @@ def makelaggedtcs(
|
|
|
90
89
|
makelagtc_consumer,
|
|
91
90
|
inputshape,
|
|
92
91
|
lagmask,
|
|
92
|
+
verbose=(LGR is not None),
|
|
93
93
|
nprocs=nprocs,
|
|
94
94
|
showprogressbar=showprogressbar,
|
|
95
95
|
chunksize=chunksize,
|
|
@@ -127,13 +127,16 @@ def makelaggedtcs(
|
|
|
127
127
|
)
|
|
128
128
|
volumetotal += 1
|
|
129
129
|
|
|
130
|
-
LGR
|
|
130
|
+
if LGR is not None:
|
|
131
|
+
LGR.info(f"\nLagged timecourses created for {volumetotal} voxels")
|
|
131
132
|
|
|
132
133
|
# garbage collect
|
|
133
134
|
uncollected = gc.collect()
|
|
134
135
|
if uncollected != 0:
|
|
135
|
-
LGR
|
|
136
|
+
if LGR is not None:
|
|
137
|
+
LGR.info(f"garbage collected - unable to collect {uncollected} objects")
|
|
136
138
|
else:
|
|
137
|
-
LGR
|
|
139
|
+
if LGR is not None:
|
|
140
|
+
LGR.info("garbage collected")
|
|
138
141
|
|
|
139
142
|
return volumetotal
|
rapidtide/multiproc.py
CHANGED
|
@@ -77,8 +77,8 @@ def _process_data(data_in, inQ, outQ, showprogressbar=True, reportstep=1000, chu
|
|
|
77
77
|
pbar.update(1)
|
|
78
78
|
if numreturned > remainder - 1:
|
|
79
79
|
break
|
|
80
|
-
|
|
81
|
-
|
|
80
|
+
if showprogressbar:
|
|
81
|
+
print()
|
|
82
82
|
|
|
83
83
|
return data_out
|
|
84
84
|
|
|
@@ -88,6 +88,7 @@ def run_multiproc(
|
|
|
88
88
|
inputshape,
|
|
89
89
|
maskarray,
|
|
90
90
|
nprocs=1,
|
|
91
|
+
verbose=True,
|
|
91
92
|
procbyvoxel=True,
|
|
92
93
|
showprogressbar=True,
|
|
93
94
|
chunksize=1000,
|
|
@@ -105,13 +106,6 @@ def run_multiproc(
|
|
|
105
106
|
workers = [ctx.Process(target=consumerfunc, args=(inQ, outQ)) for i in range(n_workers)]
|
|
106
107
|
# signal.signal(signal.SIGINT, original_sigint_handler)
|
|
107
108
|
else:
|
|
108
|
-
"""# try adding this magic incantation to get coverage to record multiprocessing properly
|
|
109
|
-
# This fails for python 3.8 and above
|
|
110
|
-
try:
|
|
111
|
-
from pytest_cov.embed import cleanup
|
|
112
|
-
except ImportError:
|
|
113
|
-
cleanup = None
|
|
114
|
-
"""
|
|
115
109
|
cleanup = None # just disable this for now
|
|
116
110
|
inQ = mp.Queue()
|
|
117
111
|
outQ = mp.Queue()
|
|
@@ -141,7 +135,8 @@ def run_multiproc(
|
|
|
141
135
|
data_in.append(d)
|
|
142
136
|
elif maskarray[d] > 0.5:
|
|
143
137
|
data_in.append(d)
|
|
144
|
-
|
|
138
|
+
if verbose:
|
|
139
|
+
print("processing", len(data_in), procunit + " with", n_workers, "processes")
|
|
145
140
|
data_out = _process_data(
|
|
146
141
|
data_in, inQ, outQ, showprogressbar=showprogressbar, chunksize=chunksize
|
|
147
142
|
)
|
|
@@ -162,6 +157,7 @@ def run_multithread(
|
|
|
162
157
|
consumerfunc,
|
|
163
158
|
inputshape,
|
|
164
159
|
maskarray,
|
|
160
|
+
verbose=True,
|
|
165
161
|
nprocs=1,
|
|
166
162
|
procbyvoxel=True,
|
|
167
163
|
showprogressbar=True,
|
|
@@ -197,7 +193,8 @@ def run_multithread(
|
|
|
197
193
|
data_in.append(d)
|
|
198
194
|
elif maskarray[d] > 0:
|
|
199
195
|
data_in.append(d)
|
|
200
|
-
|
|
196
|
+
if verbose:
|
|
197
|
+
print("processing", len(data_in), procunit + " with", n_workers, "threads")
|
|
201
198
|
data_out = _process_data(
|
|
202
199
|
data_in, inQ, outQ, showprogressbar=showprogressbar, chunksize=chunksize
|
|
203
200
|
)
|