rapidtide 3.0a12__py3-none-any.whl → 3.0a13__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- cloud/gmscalc-HCPYA +1 -1
- cloud/rapidtide-HCPYA +3 -3
- rapidtide/Colortables.py +10 -10
- rapidtide/DerivativeDelay.py +211 -0
- rapidtide/{Refiner.py → RegressorRefiner.py} +1 -1
- rapidtide/__init__.py +2 -1
- rapidtide/_version.py +1 -1
- rapidtide/data/examples/src/test_mlregressallt.py +32 -17
- rapidtide/data/examples/src/testalign +1 -1
- rapidtide/data/examples/src/testboth +1 -1
- rapidtide/data/examples/src/testcifti +11 -0
- rapidtide/data/examples/src/testdelayvar +13 -0
- rapidtide/data/examples/src/testfmri +1 -0
- rapidtide/data/examples/src/testglmfilt +8 -6
- rapidtide/data/examples/src/testhappy +1 -1
- rapidtide/data/examples/src/testnewrefine +11 -11
- rapidtide/data/examples/src/testnoiseamp +2 -2
- rapidtide/data/examples/src/testretro +16 -7
- rapidtide/data/examples/src/testretrolagtcs +1 -1
- rapidtide/dlfilter.py +0 -1
- rapidtide/fit.py +41 -9
- rapidtide/happy_supportfuncs.py +5 -0
- rapidtide/io.py +13 -2
- rapidtide/{glmpass.py → linfitfiltpass.py} +21 -19
- rapidtide/refinedelay.py +96 -58
- rapidtide/resample.py +3 -0
- rapidtide/scripts/{retroglm.py → delayvar.py} +2 -2
- rapidtide/scripts/{glmfilt.py → linfitfilt.py} +2 -2
- rapidtide/scripts/retroregress.py +28 -0
- rapidtide/scripts/stupidramtricks.py +9 -7
- rapidtide/simfuncfit.py +1 -1
- rapidtide/tests/cleanposttest +21 -0
- rapidtide/tests/test_delayestimation.py +3 -3
- rapidtide/tests/test_fastresampler.py +1 -2
- rapidtide/tests/test_fullrunhappy_v1.py +1 -1
- rapidtide/tests/test_fullrunhappy_v2.py +1 -1
- rapidtide/tests/test_fullrunrapidtide_v1.py +2 -2
- rapidtide/tests/test_fullrunrapidtide_v3.py +1 -1
- rapidtide/tests/test_fullrunrapidtide_v5.py +1 -1
- rapidtide/tests/test_fullrunrapidtide_v6.py +11 -11
- rapidtide/tests/{test_glmpass.py → test_linfitfiltpass.py} +9 -9
- rapidtide/tests/test_motionregress.py +3 -3
- rapidtide/tests/test_refinedelay.py +12 -12
- rapidtide/tidepoolTemplate_alt_qt6.py +172 -45
- rapidtide/tidepoolTemplate_big_qt6.py +196 -53
- rapidtide/tidepoolTemplate_qt6.py +150 -39
- rapidtide/workflows/delayvar.py +1048 -0
- rapidtide/workflows/happy.py +37 -11
- rapidtide/workflows/happy_parser.py +4 -4
- rapidtide/workflows/{glmfilt.py → linfitfilt.py} +4 -4
- rapidtide/workflows/rapidtide.py +235 -171
- rapidtide/workflows/rapidtide_parser.py +103 -86
- rapidtide/workflows/{glmfrommaps.py → regressfrommaps.py} +28 -26
- rapidtide/workflows/retrolagtcs.py +12 -12
- rapidtide/workflows/{retroglm.py → retroregress.py} +158 -141
- {rapidtide-3.0a12.dist-info → rapidtide-3.0a13.dist-info}/METADATA +3 -2
- {rapidtide-3.0a12.dist-info → rapidtide-3.0a13.dist-info}/RECORD +61 -56
- {rapidtide-3.0a12.dist-info → rapidtide-3.0a13.dist-info}/WHEEL +1 -1
- {rapidtide-3.0a12.dist-info → rapidtide-3.0a13.dist-info}/entry_points.txt +3 -2
- rapidtide/data/examples/src/testoutputsize +0 -45
- {rapidtide-3.0a12.dist-info → rapidtide-3.0a13.dist-info/licenses}/LICENSE +0 -0
- {rapidtide-3.0a12.dist-info → rapidtide-3.0a13.dist-info}/top_level.txt +0 -0
rapidtide/fit.py
CHANGED
|
@@ -23,6 +23,7 @@ import matplotlib.pyplot as plt
|
|
|
23
23
|
import numpy as np
|
|
24
24
|
import scipy as sp
|
|
25
25
|
import scipy.special as sps
|
|
26
|
+
import statsmodels.api as sm
|
|
26
27
|
import tqdm
|
|
27
28
|
from numpy.polynomial import Polynomial
|
|
28
29
|
from scipy.optimize import curve_fit
|
|
@@ -1105,6 +1106,37 @@ def mlproject(thefit, theevs, intercept):
|
|
|
1105
1106
|
return thedest
|
|
1106
1107
|
|
|
1107
1108
|
|
|
1109
|
+
def olsregress(X, y, intercept=True, debug=False):
|
|
1110
|
+
"""
|
|
1111
|
+
|
|
1112
|
+
Parameters
|
|
1113
|
+
----------
|
|
1114
|
+
X
|
|
1115
|
+
y
|
|
1116
|
+
intercept
|
|
1117
|
+
|
|
1118
|
+
Returns
|
|
1119
|
+
-------
|
|
1120
|
+
|
|
1121
|
+
"""
|
|
1122
|
+
"""Return the coefficients from a multiple linear regression, along with R, the coefficient of determination.
|
|
1123
|
+
|
|
1124
|
+
X: The independent variables (nxp).
|
|
1125
|
+
y: The dependent variable (1xn or nx1).
|
|
1126
|
+
intercept: Specifies whether or not the slope intercept should be considered.
|
|
1127
|
+
|
|
1128
|
+
The routine computes the coefficients (b_0, b_1, ..., b_p) from the data (x,y) under
|
|
1129
|
+
the assumption that y = b0 + b_1 * x_1 + b_2 * x_2 + ... + b_p * x_p.
|
|
1130
|
+
|
|
1131
|
+
If intercept is False, the routine assumes that b0 = 0 and returns (b_1, b_2, ..., b_p).
|
|
1132
|
+
"""
|
|
1133
|
+
if intercept:
|
|
1134
|
+
X = sm.add_constant(X, prepend=True)
|
|
1135
|
+
model = sm.OLS(y, exog=X)
|
|
1136
|
+
thefit = model.fit()
|
|
1137
|
+
return thefit.params, np.sqrt(thefit.rsquared)
|
|
1138
|
+
|
|
1139
|
+
|
|
1108
1140
|
def mlregress(X, y, intercept=True, debug=False):
|
|
1109
1141
|
"""
|
|
1110
1142
|
|
|
@@ -1223,9 +1255,9 @@ def calcexpandedregressors(
|
|
|
1223
1255
|
return outputregressors, outlabels
|
|
1224
1256
|
|
|
1225
1257
|
|
|
1226
|
-
def
|
|
1258
|
+
def derivativelinfitfilt(thedata, theevs, nderivs=1, debug=False):
|
|
1227
1259
|
r"""First perform multicomponent expansion on theevs (each ev replaced by itself,
|
|
1228
|
-
its square, its cube, etc.). Then perform a
|
|
1260
|
+
its square, its cube, etc.). Then perform a linear fit of thedata using the vectors
|
|
1229
1261
|
in thenewevs and return the result.
|
|
1230
1262
|
|
|
1231
1263
|
Parameters
|
|
@@ -1269,16 +1301,16 @@ def derivativeglmfilt(thedata, theevs, nderivs=1, debug=False):
|
|
|
1269
1301
|
if debug:
|
|
1270
1302
|
print(f"{nderivs=}")
|
|
1271
1303
|
print(f"{thenewevs.shape=}")
|
|
1272
|
-
filtered, datatoremove, R, coffs =
|
|
1304
|
+
filtered, datatoremove, R, coffs = linfitfilt(thedata, thenewevs, debug=debug)
|
|
1273
1305
|
if debug:
|
|
1274
1306
|
print(f"{R=}")
|
|
1275
1307
|
|
|
1276
1308
|
return filtered, thenewevs, datatoremove, R, coffs
|
|
1277
1309
|
|
|
1278
1310
|
|
|
1279
|
-
def
|
|
1311
|
+
def expandedlinfitfilt(thedata, theevs, ncomps=1, debug=False):
|
|
1280
1312
|
r"""First perform multicomponent expansion on theevs (each ev replaced by itself,
|
|
1281
|
-
its square, its cube, etc.). Then perform a
|
|
1313
|
+
its square, its cube, etc.). Then perform a multiple regression fit of thedata using the vectors
|
|
1282
1314
|
in thenewevs and return the result.
|
|
1283
1315
|
|
|
1284
1316
|
Parameters
|
|
@@ -1322,15 +1354,15 @@ def expandedglmfilt(thedata, theevs, ncomps=1, debug=False):
|
|
|
1322
1354
|
if debug:
|
|
1323
1355
|
print(f"{ncomps=}")
|
|
1324
1356
|
print(f"{thenewevs.shape=}")
|
|
1325
|
-
filtered, datatoremove, R, coffs =
|
|
1357
|
+
filtered, datatoremove, R, coffs = linfitfilt(thedata, thenewevs, debug=debug)
|
|
1326
1358
|
if debug:
|
|
1327
1359
|
print(f"{R=}")
|
|
1328
1360
|
|
|
1329
1361
|
return filtered, thenewevs, datatoremove, R, coffs
|
|
1330
1362
|
|
|
1331
1363
|
|
|
1332
|
-
def
|
|
1333
|
-
r"""Performs a
|
|
1364
|
+
def linfitfilt(thedata, theevs, returnintercept=False, debug=False):
|
|
1365
|
+
r"""Performs a multiple regression fit of thedata using the vectors in theevs
|
|
1334
1366
|
and returns the result.
|
|
1335
1367
|
|
|
1336
1368
|
Parameters
|
|
@@ -1383,7 +1415,7 @@ def glmfilt(thedata, theevs, returnintercept=False, debug=False):
|
|
|
1383
1415
|
return filtered, datatoremove, R2, retcoffs
|
|
1384
1416
|
|
|
1385
1417
|
|
|
1386
|
-
def
|
|
1418
|
+
def confoundregress(
|
|
1387
1419
|
data,
|
|
1388
1420
|
regressors,
|
|
1389
1421
|
debug=False,
|
rapidtide/happy_supportfuncs.py
CHANGED
|
@@ -290,6 +290,11 @@ def cardiacfromimage(
|
|
|
290
290
|
)
|
|
291
291
|
|
|
292
292
|
|
|
293
|
+
def theCOM(X, data):
|
|
294
|
+
# return the center of mass
|
|
295
|
+
return np.sum(X * data) / np.sum(data)
|
|
296
|
+
|
|
297
|
+
|
|
293
298
|
def savgolsmooth(data, smoothlen=101, polyorder=3):
|
|
294
299
|
return savgol_filter(data, smoothlen, polyorder)
|
|
295
300
|
|
rapidtide/io.py
CHANGED
|
@@ -1426,6 +1426,8 @@ def writebidstsv(
|
|
|
1426
1426
|
extraheaderinfo=None,
|
|
1427
1427
|
compressed=True,
|
|
1428
1428
|
columns=None,
|
|
1429
|
+
xaxislabel="time",
|
|
1430
|
+
yaxislabel="arbitrary value",
|
|
1429
1431
|
starttime=0.0,
|
|
1430
1432
|
append=False,
|
|
1431
1433
|
colsinjson=True,
|
|
@@ -1447,6 +1449,8 @@ def writebidstsv(
|
|
|
1447
1449
|
:param samplerate:
|
|
1448
1450
|
:param compressed:
|
|
1449
1451
|
:param columns:
|
|
1452
|
+
:param xaxislabel:
|
|
1453
|
+
:param yaxislabel:
|
|
1450
1454
|
:param starttime:
|
|
1451
1455
|
:param append:
|
|
1452
1456
|
:param colsinjson:
|
|
@@ -1462,6 +1466,8 @@ def writebidstsv(
|
|
|
1462
1466
|
print("\tsamplerate:", samplerate)
|
|
1463
1467
|
print("\tcompressed:", compressed)
|
|
1464
1468
|
print("\tcolumns:", columns)
|
|
1469
|
+
print("\txaxislabel:", xaxislabel)
|
|
1470
|
+
print("\tyaxislabel:", yaxislabel)
|
|
1465
1471
|
print("\tstarttime:", starttime)
|
|
1466
1472
|
print("\tappend:", append)
|
|
1467
1473
|
if len(data.shape) == 1:
|
|
@@ -1538,6 +1544,8 @@ def writebidstsv(
|
|
|
1538
1544
|
headerdict = {}
|
|
1539
1545
|
headerdict["SamplingFrequency"] = float(samplerate)
|
|
1540
1546
|
headerdict["StartTime"] = float(starttime)
|
|
1547
|
+
headerdict["XAxisLabel"] = xaxislabel
|
|
1548
|
+
headerdict["YAxisLabel"] = yaxislabel
|
|
1541
1549
|
if colsinjson:
|
|
1542
1550
|
if startcol == 0:
|
|
1543
1551
|
headerdict["Columns"] = columns
|
|
@@ -1991,8 +1999,11 @@ def colspectolist(colspec, debug=False):
|
|
|
1991
1999
|
("APARC_SUBCORTGRAY", "8-13,17-20,26-28,47-56,58-60,96,97"),
|
|
1992
2000
|
("APARC_CORTGRAY", "1000-1035,2000-2035"),
|
|
1993
2001
|
("APARC_GRAY", "8-13,17-20,26-28,47-56,58-60,96,97,1000-1035,2000-2035"),
|
|
1994
|
-
("APARC_WHITE", "2,7,41,46,177,219"),
|
|
1995
|
-
(
|
|
2002
|
+
("APARC_WHITE", "2,7,41,46,177,219,3000-3035,4000-4035,5001,5002"),
|
|
2003
|
+
(
|
|
2004
|
+
"APARC_ALLBUTCSF",
|
|
2005
|
+
"2,7-13,17-20,26-28,41,46-56,58-60,96,97,177,219,1000-1035,2000-2035,3000-3035,4000-4035,5001,5002",
|
|
2006
|
+
),
|
|
1996
2007
|
("SSEG_GRAY", "3,8,10-13,16-18,26,42,47,49-54,58"),
|
|
1997
2008
|
("SSEG_WHITE", "2,7,41,46"),
|
|
1998
2009
|
)
|
|
@@ -26,7 +26,9 @@ import rapidtide.miscmath as tide_math
|
|
|
26
26
|
import rapidtide.multiproc as tide_multiproc
|
|
27
27
|
|
|
28
28
|
|
|
29
|
-
def
|
|
29
|
+
def _procOneRegressionFitItem(
|
|
30
|
+
vox, theevs, thedata, rt_floatset=np.float64, rt_floattype="float64"
|
|
31
|
+
):
|
|
30
32
|
# NOTE: if theevs is 2D, dimension 0 is number of points, dimension 1 is number of evs
|
|
31
33
|
thefit, R2 = tide_fit.mlregress(theevs, thedata)
|
|
32
34
|
if theevs.ndim > 1:
|
|
@@ -75,7 +77,7 @@ def _procOneGLMItem(vox, theevs, thedata, rt_floatset=np.float64, rt_floattype="
|
|
|
75
77
|
)
|
|
76
78
|
|
|
77
79
|
|
|
78
|
-
def
|
|
80
|
+
def linfitfiltpass(
|
|
79
81
|
numprocitems,
|
|
80
82
|
fmri_data,
|
|
81
83
|
threshval,
|
|
@@ -89,7 +91,7 @@ def glmpass(
|
|
|
89
91
|
filtereddata,
|
|
90
92
|
nprocs=1,
|
|
91
93
|
alwaysmultiproc=False,
|
|
92
|
-
|
|
94
|
+
confoundregress=False,
|
|
93
95
|
procbyvoxel=True,
|
|
94
96
|
showprogressbar=True,
|
|
95
97
|
mp_chunksize=1000,
|
|
@@ -132,9 +134,9 @@ def glmpass(
|
|
|
132
134
|
|
|
133
135
|
# process and send the data
|
|
134
136
|
if procbyvoxel:
|
|
135
|
-
if
|
|
137
|
+
if confoundregress:
|
|
136
138
|
outQ.put(
|
|
137
|
-
|
|
139
|
+
_procOneRegressionFitItem(
|
|
138
140
|
val,
|
|
139
141
|
theevs,
|
|
140
142
|
fmri_data[val, :],
|
|
@@ -144,7 +146,7 @@ def glmpass(
|
|
|
144
146
|
)
|
|
145
147
|
else:
|
|
146
148
|
outQ.put(
|
|
147
|
-
|
|
149
|
+
_procOneRegressionFitItem(
|
|
148
150
|
val,
|
|
149
151
|
theevs[val, :],
|
|
150
152
|
fmri_data[val, :],
|
|
@@ -153,9 +155,9 @@ def glmpass(
|
|
|
153
155
|
)
|
|
154
156
|
)
|
|
155
157
|
else:
|
|
156
|
-
if
|
|
158
|
+
if confoundregress:
|
|
157
159
|
outQ.put(
|
|
158
|
-
|
|
160
|
+
_procOneRegressionFitItem(
|
|
159
161
|
val,
|
|
160
162
|
theevs,
|
|
161
163
|
fmri_data[:, val],
|
|
@@ -165,7 +167,7 @@ def glmpass(
|
|
|
165
167
|
)
|
|
166
168
|
else:
|
|
167
169
|
outQ.put(
|
|
168
|
-
|
|
170
|
+
_procOneRegressionFitItem(
|
|
169
171
|
val,
|
|
170
172
|
theevs[:, val],
|
|
171
173
|
fmri_data[:, val],
|
|
@@ -191,7 +193,7 @@ def glmpass(
|
|
|
191
193
|
# unpack the data
|
|
192
194
|
itemstotal = 0
|
|
193
195
|
if procbyvoxel:
|
|
194
|
-
if
|
|
196
|
+
if confoundregress:
|
|
195
197
|
for voxel in data_out:
|
|
196
198
|
r2value[voxel[0]] = voxel[3]
|
|
197
199
|
filtereddata[voxel[0], :] = voxel[7]
|
|
@@ -211,7 +213,7 @@ def glmpass(
|
|
|
211
213
|
filtereddata[voxel[0], :] = voxel[7]
|
|
212
214
|
itemstotal += 1
|
|
213
215
|
else:
|
|
214
|
-
if
|
|
216
|
+
if confoundregress:
|
|
215
217
|
for timepoint in data_out:
|
|
216
218
|
r2value[timepoint[0]] = timepoint[3]
|
|
217
219
|
filtereddata[:, timepoint[0]] = timepoint[7]
|
|
@@ -243,7 +245,7 @@ def glmpass(
|
|
|
243
245
|
):
|
|
244
246
|
thedata = fmri_data[vox, :].copy()
|
|
245
247
|
if (themask is None) or (themask[vox] > 0):
|
|
246
|
-
if
|
|
248
|
+
if confoundregress:
|
|
247
249
|
(
|
|
248
250
|
dummy,
|
|
249
251
|
dummy,
|
|
@@ -253,7 +255,7 @@ def glmpass(
|
|
|
253
255
|
dummy,
|
|
254
256
|
dummy,
|
|
255
257
|
filtereddata[vox, :],
|
|
256
|
-
) =
|
|
258
|
+
) = _procOneRegressionFitItem(
|
|
257
259
|
vox,
|
|
258
260
|
theevs,
|
|
259
261
|
thedata,
|
|
@@ -270,7 +272,7 @@ def glmpass(
|
|
|
270
272
|
fitNorm[vox],
|
|
271
273
|
datatoremove[vox, :],
|
|
272
274
|
filtereddata[vox, :],
|
|
273
|
-
) =
|
|
275
|
+
) = _procOneRegressionFitItem(
|
|
274
276
|
vox,
|
|
275
277
|
theevs[vox, :],
|
|
276
278
|
thedata,
|
|
@@ -287,7 +289,7 @@ def glmpass(
|
|
|
287
289
|
):
|
|
288
290
|
thedata = fmri_data[:, timepoint].copy()
|
|
289
291
|
if (themask is None) or (themask[timepoint] > 0):
|
|
290
|
-
if
|
|
292
|
+
if confoundregress:
|
|
291
293
|
(
|
|
292
294
|
dummy,
|
|
293
295
|
dummy,
|
|
@@ -297,7 +299,7 @@ def glmpass(
|
|
|
297
299
|
dummy,
|
|
298
300
|
dummy,
|
|
299
301
|
filtereddata[:, timepoint],
|
|
300
|
-
) =
|
|
302
|
+
) = _procOneRegressionFitItem(
|
|
301
303
|
timepoint,
|
|
302
304
|
theevs,
|
|
303
305
|
thedata,
|
|
@@ -314,7 +316,7 @@ def glmpass(
|
|
|
314
316
|
fitNorm[timepoint],
|
|
315
317
|
datatoremove[:, timepoint],
|
|
316
318
|
filtereddata[:, timepoint],
|
|
317
|
-
) =
|
|
319
|
+
) = _procOneRegressionFitItem(
|
|
318
320
|
timepoint,
|
|
319
321
|
theevs[:, timepoint],
|
|
320
322
|
thedata,
|
|
@@ -425,7 +427,7 @@ def confoundregress(
|
|
|
425
427
|
numprocitems = thedataarray.shape[0]
|
|
426
428
|
filtereddata = thedataarray * 0.0
|
|
427
429
|
r2value = np.zeros(numprocitems)
|
|
428
|
-
numfiltered =
|
|
430
|
+
numfiltered = linfitfiltpass(
|
|
429
431
|
numprocitems,
|
|
430
432
|
thedataarray,
|
|
431
433
|
None,
|
|
@@ -437,7 +439,7 @@ def confoundregress(
|
|
|
437
439
|
None,
|
|
438
440
|
None,
|
|
439
441
|
filtereddata,
|
|
440
|
-
|
|
442
|
+
confoundregress=True,
|
|
441
443
|
nprocs=nprocs,
|
|
442
444
|
showprogressbar=showprogressbar,
|
|
443
445
|
procbyvoxel=True,
|
rapidtide/refinedelay.py
CHANGED
|
@@ -24,7 +24,7 @@ from statsmodels.robust import mad
|
|
|
24
24
|
|
|
25
25
|
import rapidtide.filter as tide_filt
|
|
26
26
|
import rapidtide.io as tide_io
|
|
27
|
-
import rapidtide.workflows.
|
|
27
|
+
import rapidtide.workflows.regressfrommaps as tide_regressfrommaps
|
|
28
28
|
|
|
29
29
|
global ratiotooffsetfunc, maplimits
|
|
30
30
|
|
|
@@ -40,12 +40,14 @@ def trainratiotooffset(
|
|
|
40
40
|
timeaxis,
|
|
41
41
|
outputname,
|
|
42
42
|
outputlevel,
|
|
43
|
+
trainwidth=0.0,
|
|
44
|
+
trainstep=0.5,
|
|
43
45
|
mindelay=-3.0,
|
|
44
46
|
maxdelay=3.0,
|
|
45
47
|
numpoints=501,
|
|
46
48
|
smoothpts=3,
|
|
47
49
|
edgepad=5,
|
|
48
|
-
|
|
50
|
+
regressderivs=1,
|
|
49
51
|
debug=False,
|
|
50
52
|
):
|
|
51
53
|
global ratiotooffsetfunc, maplimits
|
|
@@ -55,11 +57,13 @@ def trainratiotooffset(
|
|
|
55
57
|
lagtcgenerator.info(prefix="\t")
|
|
56
58
|
print("\ttimeaxis:", timeaxis)
|
|
57
59
|
print("\toutputname:", outputname)
|
|
60
|
+
print("\ttrainwidth:", trainwidth)
|
|
61
|
+
print("\ttrainstep:", trainstep)
|
|
58
62
|
print("\tmindelay:", mindelay)
|
|
59
63
|
print("\tmaxdelay:", maxdelay)
|
|
60
64
|
print("\tsmoothpts:", smoothpts)
|
|
61
65
|
print("\tedgepad:", edgepad)
|
|
62
|
-
print("\
|
|
66
|
+
print("\tregressderivs:", regressderivs)
|
|
63
67
|
print("\tlagtcgenerator:", lagtcgenerator)
|
|
64
68
|
# make a delay map
|
|
65
69
|
delaystep = (maxdelay - mindelay) / (numpoints - 1)
|
|
@@ -78,16 +82,13 @@ def trainratiotooffset(
|
|
|
78
82
|
print(f"{maxdelay=}")
|
|
79
83
|
print("lagtimes=", lagtimes)
|
|
80
84
|
|
|
81
|
-
#
|
|
85
|
+
# set up for getratioderivs call
|
|
86
|
+
rt_floattype = "float64"
|
|
82
87
|
internalvalidfmrishape = (numpoints + 2 * edgepad, timeaxis.shape[0])
|
|
83
88
|
fmridata = np.zeros(internalvalidfmrishape, dtype=float)
|
|
84
89
|
fmrimask = np.ones(numpoints + 2 * edgepad, dtype=float)
|
|
85
90
|
validvoxels = np.where(fmrimask > 0)[0]
|
|
86
|
-
|
|
87
|
-
fmridata[i, :] = lagtcgenerator.yfromx(timeaxis - lagtimes[i])
|
|
88
|
-
|
|
89
|
-
rt_floattype = "float64"
|
|
90
|
-
glmmean = np.zeros(numpoints + 2 * edgepad, dtype=rt_floattype)
|
|
91
|
+
sLFOfitmean = np.zeros(numpoints + 2 * edgepad, dtype=rt_floattype)
|
|
91
92
|
rvalue = np.zeros(numpoints + 2 * edgepad, dtype=rt_floattype)
|
|
92
93
|
r2value = np.zeros(numpoints + 2 * edgepad, dtype=rt_floattype)
|
|
93
94
|
fitNorm = np.zeros((numpoints + 2 * edgepad, 2), dtype=rt_floattype)
|
|
@@ -97,10 +98,10 @@ def trainratiotooffset(
|
|
|
97
98
|
filtereddata = np.zeros(internalvalidfmrishape, dtype=rt_floattype)
|
|
98
99
|
sampletime = timeaxis[1] - timeaxis[0]
|
|
99
100
|
optiondict = {
|
|
100
|
-
"
|
|
101
|
-
"
|
|
101
|
+
"regressfiltthreshval": 0.0,
|
|
102
|
+
"saveminimumsLFOfiltfiles": False,
|
|
102
103
|
"nprocs_makelaggedtcs": 1,
|
|
103
|
-
"
|
|
104
|
+
"nprocs_regressionfilt": 1,
|
|
104
105
|
"mp_chunksize": 1000,
|
|
105
106
|
"showprogressbar": False,
|
|
106
107
|
"alwaysmultiproc": False,
|
|
@@ -110,7 +111,27 @@ def trainratiotooffset(
|
|
|
110
111
|
"textio": False,
|
|
111
112
|
}
|
|
112
113
|
|
|
113
|
-
|
|
114
|
+
if trainwidth > 0.0:
|
|
115
|
+
numsteps = int(trainwidth / trainstep)
|
|
116
|
+
numsteps += 1 - numsteps % 2 # force numsteps to be odd
|
|
117
|
+
numsteps = np.max((numsteps, 3)) # ensure at least 1 positive and 1 negative step
|
|
118
|
+
trainoffsets = (
|
|
119
|
+
np.linspace(0, numsteps * trainstep, numsteps, endpoint=True)
|
|
120
|
+
- (numsteps // 2) * trainstep
|
|
121
|
+
)
|
|
122
|
+
else:
|
|
123
|
+
trainoffsets = np.array([0.0], dtype=float)
|
|
124
|
+
if debug:
|
|
125
|
+
print("trainoffsets:", trainoffsets)
|
|
126
|
+
|
|
127
|
+
for i in range(len(trainoffsets)):
|
|
128
|
+
pass
|
|
129
|
+
|
|
130
|
+
# now make synthetic fMRI data
|
|
131
|
+
for i in range(numpoints + 2 * edgepad):
|
|
132
|
+
fmridata[i, :] = lagtcgenerator.yfromx(timeaxis - lagtimes[i])
|
|
133
|
+
|
|
134
|
+
regressderivratios, regressrvalues = getderivratios(
|
|
114
135
|
fmridata,
|
|
115
136
|
validvoxels,
|
|
116
137
|
timeaxis,
|
|
@@ -120,7 +141,7 @@ def trainratiotooffset(
|
|
|
120
141
|
"glm",
|
|
121
142
|
"refinedelaytest",
|
|
122
143
|
sampletime,
|
|
123
|
-
|
|
144
|
+
sLFOfitmean,
|
|
124
145
|
rvalue,
|
|
125
146
|
r2value,
|
|
126
147
|
fitNorm[:, :2],
|
|
@@ -131,41 +152,41 @@ def trainratiotooffset(
|
|
|
131
152
|
None,
|
|
132
153
|
None,
|
|
133
154
|
optiondict,
|
|
134
|
-
|
|
155
|
+
regressderivs=regressderivs,
|
|
135
156
|
debug=debug,
|
|
136
157
|
)
|
|
137
158
|
if debug:
|
|
138
159
|
print("before trimming")
|
|
139
|
-
print(f"{
|
|
160
|
+
print(f"{regressderivratios.shape=}")
|
|
140
161
|
print(f"{lagtimes.shape=}")
|
|
141
|
-
if
|
|
142
|
-
|
|
143
|
-
smooth(tide_filt.padvec(
|
|
162
|
+
if regressderivs == 1:
|
|
163
|
+
smoothregressderivratios = tide_filt.unpadvec(
|
|
164
|
+
smooth(tide_filt.padvec(regressderivratios, padlen=20, padtype="constant"), smoothpts),
|
|
144
165
|
padlen=20,
|
|
145
166
|
)
|
|
146
|
-
|
|
147
|
-
|
|
167
|
+
regressderivratios = regressderivratios[edgepad:-edgepad]
|
|
168
|
+
smoothregressderivratios = smoothregressderivratios[edgepad:-edgepad]
|
|
148
169
|
else:
|
|
149
|
-
|
|
150
|
-
for i in range(
|
|
151
|
-
|
|
170
|
+
smoothregressderivratios = np.zeros_like(regressderivratios)
|
|
171
|
+
for i in range(regressderivs):
|
|
172
|
+
smoothregressderivratios[i, :] = tide_filt.unpadvec(
|
|
152
173
|
smooth(
|
|
153
|
-
tide_filt.padvec(
|
|
174
|
+
tide_filt.padvec(regressderivratios[i, :], padlen=20, padtype="constant"),
|
|
154
175
|
smoothpts,
|
|
155
176
|
),
|
|
156
177
|
padlen=20,
|
|
157
178
|
)
|
|
158
|
-
|
|
159
|
-
|
|
179
|
+
regressderivratios = regressderivratios[:, edgepad:-edgepad]
|
|
180
|
+
smoothregressderivratios = smoothregressderivratios[:, edgepad:-edgepad]
|
|
160
181
|
lagtimes = lagtimes[edgepad:-edgepad]
|
|
161
182
|
if debug:
|
|
162
183
|
print("after trimming")
|
|
163
|
-
print(f"{
|
|
164
|
-
print(f"{
|
|
184
|
+
print(f"{regressderivratios.shape=}")
|
|
185
|
+
print(f"{smoothregressderivratios.shape=}")
|
|
165
186
|
print(f"{lagtimes.shape=}")
|
|
166
187
|
|
|
167
188
|
# make sure the mapping function is legal
|
|
168
|
-
xaxis =
|
|
189
|
+
xaxis = smoothregressderivratios[::-1]
|
|
169
190
|
yaxis = lagtimes[::-1]
|
|
170
191
|
midpoint = int(len(xaxis) // 2)
|
|
171
192
|
lowerlim = midpoint + 0
|
|
@@ -187,7 +208,13 @@ def trainratiotooffset(
|
|
|
187
208
|
1.0 / (resampaxis[1] - resampaxis[0]),
|
|
188
209
|
starttime=resampaxis[0],
|
|
189
210
|
columns=["delay"],
|
|
190
|
-
extraheaderinfo={
|
|
211
|
+
extraheaderinfo={
|
|
212
|
+
"Description": "The function mapping derivative ratio to delay",
|
|
213
|
+
"minratio": f"{resampaxis[0]}",
|
|
214
|
+
"maxratio": f"{resampaxis[-1]}",
|
|
215
|
+
},
|
|
216
|
+
xaxislabel="coefficientratio",
|
|
217
|
+
yaxislabel="time",
|
|
191
218
|
append=False,
|
|
192
219
|
)
|
|
193
220
|
|
|
@@ -240,7 +267,7 @@ def getderivratios(
|
|
|
240
267
|
mode,
|
|
241
268
|
outputname,
|
|
242
269
|
oversamptr,
|
|
243
|
-
|
|
270
|
+
sLFOfitmean,
|
|
244
271
|
rvalue,
|
|
245
272
|
r2value,
|
|
246
273
|
fitNorm,
|
|
@@ -251,25 +278,34 @@ def getderivratios(
|
|
|
251
278
|
LGR,
|
|
252
279
|
TimingLGR,
|
|
253
280
|
optiondict,
|
|
254
|
-
|
|
281
|
+
regressderivs=1,
|
|
282
|
+
starttr=None,
|
|
283
|
+
endtr=None,
|
|
255
284
|
debug=False,
|
|
256
285
|
):
|
|
286
|
+
if starttr is None:
|
|
287
|
+
starttr = 0
|
|
288
|
+
if endtr is None:
|
|
289
|
+
endtr = fmri_data_valid.shape[1]
|
|
257
290
|
if debug:
|
|
258
291
|
print("getderivratios")
|
|
259
292
|
print(f"{fitNorm.shape=}")
|
|
260
293
|
print(f"{fitcoeff.shape=}")
|
|
261
|
-
print(f"{
|
|
262
|
-
|
|
263
|
-
|
|
294
|
+
print(f"{regressderivs=}")
|
|
295
|
+
print(f"{starttr=}")
|
|
296
|
+
print(f"{endtr=}")
|
|
297
|
+
|
|
298
|
+
voxelsprocessed_regressionfilt, regressorset, evset = tide_regressfrommaps.regressfrommaps(
|
|
299
|
+
fmri_data_valid[:, starttr:endtr],
|
|
264
300
|
validvoxels,
|
|
265
|
-
initial_fmri_x,
|
|
301
|
+
initial_fmri_x[starttr:endtr],
|
|
266
302
|
lagtimes,
|
|
267
303
|
fitmask,
|
|
268
304
|
genlagtc,
|
|
269
305
|
mode,
|
|
270
306
|
outputname,
|
|
271
307
|
oversamptr,
|
|
272
|
-
|
|
308
|
+
sLFOfitmean,
|
|
273
309
|
rvalue,
|
|
274
310
|
r2value,
|
|
275
311
|
fitNorm,
|
|
@@ -279,11 +315,11 @@ def getderivratios(
|
|
|
279
315
|
filtereddata,
|
|
280
316
|
LGR,
|
|
281
317
|
TimingLGR,
|
|
282
|
-
optiondict["
|
|
283
|
-
optiondict["
|
|
318
|
+
optiondict["regressfiltthreshval"],
|
|
319
|
+
optiondict["saveminimumsLFOfiltfiles"],
|
|
284
320
|
nprocs_makelaggedtcs=optiondict["nprocs_makelaggedtcs"],
|
|
285
|
-
|
|
286
|
-
|
|
321
|
+
nprocs_regressionfilt=optiondict["nprocs_regressionfilt"],
|
|
322
|
+
regressderivs=regressderivs,
|
|
287
323
|
mp_chunksize=optiondict["mp_chunksize"],
|
|
288
324
|
showprogressbar=optiondict["showprogressbar"],
|
|
289
325
|
alwaysmultiproc=optiondict["alwaysmultiproc"],
|
|
@@ -292,19 +328,19 @@ def getderivratios(
|
|
|
292
328
|
)
|
|
293
329
|
|
|
294
330
|
# calculate the ratio of the first derivative to the main regressor
|
|
295
|
-
if
|
|
296
|
-
|
|
331
|
+
if regressderivs == 1:
|
|
332
|
+
regressderivratios = np.nan_to_num(fitcoeff[:, 1] / fitcoeff[:, 0])
|
|
297
333
|
else:
|
|
298
334
|
numvoxels = fitcoeff.shape[0]
|
|
299
|
-
|
|
300
|
-
for i in range(
|
|
301
|
-
|
|
335
|
+
regressderivratios = np.zeros((regressderivs, numvoxels), dtype=np.float64)
|
|
336
|
+
for i in range(regressderivs):
|
|
337
|
+
regressderivratios[i, :] = np.nan_to_num(fitcoeff[:, i + 1] / fitcoeff[:, 0])
|
|
302
338
|
|
|
303
|
-
return
|
|
339
|
+
return regressderivratios, rvalue
|
|
304
340
|
|
|
305
341
|
|
|
306
342
|
def filterderivratios(
|
|
307
|
-
|
|
343
|
+
regressderivratios,
|
|
308
344
|
nativespaceshape,
|
|
309
345
|
validvoxels,
|
|
310
346
|
thedims,
|
|
@@ -323,32 +359,34 @@ def filterderivratios(
|
|
|
323
359
|
print(f"\t{nativespaceshape=}")
|
|
324
360
|
|
|
325
361
|
# filter the ratio to find weird values
|
|
326
|
-
themad = mad(
|
|
327
|
-
print(f"MAD of
|
|
362
|
+
themad = mad(regressderivratios).astype(np.float64)
|
|
363
|
+
print(f"MAD of regression fit derivative ratios = {themad}")
|
|
328
364
|
outmaparray, internalspaceshape = tide_io.makedestarray(
|
|
329
365
|
nativespaceshape,
|
|
330
366
|
textio=textio,
|
|
331
367
|
fileiscifti=fileiscifti,
|
|
332
368
|
rt_floattype=rt_floattype,
|
|
333
369
|
)
|
|
334
|
-
|
|
335
|
-
|
|
370
|
+
mappedregressderivratios = tide_io.populatemap(
|
|
371
|
+
regressderivratios,
|
|
336
372
|
internalspaceshape,
|
|
337
373
|
validvoxels,
|
|
338
374
|
outmaparray,
|
|
339
375
|
debug=debug,
|
|
340
376
|
)
|
|
341
377
|
if textio or fileiscifti:
|
|
342
|
-
medfilt =
|
|
343
|
-
filteredarray =
|
|
378
|
+
medfilt = regressderivratios
|
|
379
|
+
filteredarray = regressderivratios
|
|
344
380
|
else:
|
|
345
381
|
if debug:
|
|
346
|
-
print(f"{
|
|
382
|
+
print(f"{regressderivratios.shape=}, {mappedregressderivratios.shape=}")
|
|
347
383
|
medfilt = median_filter(
|
|
348
|
-
|
|
384
|
+
mappedregressderivratios.reshape(nativespaceshape), size=(3, 3, 3)
|
|
349
385
|
).reshape(internalspaceshape)[validvoxels]
|
|
350
386
|
filteredarray = np.where(
|
|
351
|
-
np.fabs(
|
|
387
|
+
np.fabs(regressderivratios - medfilt) > patchthresh * themad,
|
|
388
|
+
medfilt,
|
|
389
|
+
regressderivratios,
|
|
352
390
|
)
|
|
353
391
|
if gausssigma > 0:
|
|
354
392
|
mappedfilteredarray = tide_io.populatemap(
|
rapidtide/resample.py
CHANGED
|
@@ -328,6 +328,9 @@ class FastResampler:
|
|
|
328
328
|
pl.legend(("input", "hires"))
|
|
329
329
|
pl.show()
|
|
330
330
|
|
|
331
|
+
def getdata(self):
|
|
332
|
+
return self.timeaxis, self.timecourse, self.hires_x, self.hires_y, 1.0 / self.initstep
|
|
333
|
+
|
|
331
334
|
def info(self, prefix=""):
|
|
332
335
|
print(f"{prefix}{self.timeaxis=}")
|
|
333
336
|
print(f"{prefix}{self.timecourse=}")
|
|
@@ -17,11 +17,11 @@
|
|
|
17
17
|
#
|
|
18
18
|
#
|
|
19
19
|
import rapidtide.workflows.parser_funcs as pf
|
|
20
|
-
import rapidtide.workflows.
|
|
20
|
+
import rapidtide.workflows.delayvar as theworkflow
|
|
21
21
|
|
|
22
22
|
|
|
23
23
|
def entrypoint():
|
|
24
|
-
pf.generic_init(theworkflow._get_parser, theworkflow.
|
|
24
|
+
pf.generic_init(theworkflow._get_parser, theworkflow.delayvar)
|
|
25
25
|
|
|
26
26
|
|
|
27
27
|
if __name__ == "__main__":
|
|
@@ -16,11 +16,11 @@
|
|
|
16
16
|
# limitations under the License.
|
|
17
17
|
#
|
|
18
18
|
#
|
|
19
|
-
import rapidtide.workflows.
|
|
19
|
+
import rapidtide.workflows.linfitfilt as linfitfilt_workflow
|
|
20
20
|
|
|
21
21
|
|
|
22
22
|
def entrypoint():
|
|
23
|
-
|
|
23
|
+
linfitfilt_workflow.main()
|
|
24
24
|
|
|
25
25
|
|
|
26
26
|
if __name__ == "__main__":
|