rapidtide 3.0a11__py3-none-any.whl → 3.0a13__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- cloud/gmscalc-HCPYA +1 -1
- cloud/rapidtide-HCPYA +3 -3
- rapidtide/Colortables.py +10 -10
- rapidtide/DerivativeDelay.py +211 -0
- rapidtide/RegressorRefiner.py +464 -0
- rapidtide/__init__.py +2 -1
- rapidtide/_version.py +1 -1
- rapidtide/data/examples/src/test_mlregressallt.py +32 -17
- rapidtide/data/examples/src/testalign +1 -1
- rapidtide/data/examples/src/testboth +1 -1
- rapidtide/data/examples/src/testcifti +11 -0
- rapidtide/data/examples/src/testdelayvar +13 -0
- rapidtide/data/examples/src/testfmri +3 -124
- rapidtide/data/examples/src/testglmfilt +8 -6
- rapidtide/data/examples/src/testhappy +1 -1
- rapidtide/data/examples/src/testinitdelay +19 -0
- rapidtide/data/examples/src/testnewrefine +49 -0
- rapidtide/data/examples/src/testnoiseamp +2 -2
- rapidtide/data/examples/src/testrefineonly +22 -0
- rapidtide/data/examples/src/testretro +16 -7
- rapidtide/data/examples/src/testretrolagtcs +1 -1
- rapidtide/dlfilter.py +0 -1
- rapidtide/fit.py +41 -9
- rapidtide/happy_supportfuncs.py +5 -0
- rapidtide/io.py +13 -2
- rapidtide/{glmpass.py → linfitfiltpass.py} +29 -20
- rapidtide/refinedelay.py +133 -55
- rapidtide/refineregressor.py +38 -24
- rapidtide/resample.py +3 -0
- rapidtide/scripts/{retroglm.py → delayvar.py} +2 -2
- rapidtide/scripts/{glmfilt.py → linfitfilt.py} +2 -2
- rapidtide/scripts/retroregress.py +28 -0
- rapidtide/scripts/stupidramtricks.py +9 -7
- rapidtide/simfuncfit.py +1 -1
- rapidtide/tests/cleanposttest +21 -0
- rapidtide/tests/test_delayestimation.py +3 -3
- rapidtide/tests/test_fastresampler.py +1 -2
- rapidtide/tests/test_fullrunhappy_v1.py +1 -1
- rapidtide/tests/test_fullrunhappy_v2.py +1 -1
- rapidtide/tests/test_fullrunrapidtide_v1.py +2 -2
- rapidtide/tests/test_fullrunrapidtide_v3.py +1 -1
- rapidtide/tests/test_fullrunrapidtide_v5.py +1 -1
- rapidtide/tests/test_fullrunrapidtide_v6.py +11 -11
- rapidtide/tests/{test_glmpass.py → test_linfitfiltpass.py} +9 -9
- rapidtide/tests/test_motionregress.py +3 -3
- rapidtide/tests/test_refinedelay.py +12 -12
- rapidtide/tidepoolTemplate.py +1 -0
- rapidtide/tidepoolTemplate.ui +1 -0
- rapidtide/tidepoolTemplate_alt.py +5 -4
- rapidtide/tidepoolTemplate_alt.ui +3 -2
- rapidtide/tidepoolTemplate_alt_qt6.py +177 -49
- rapidtide/tidepoolTemplate_big.py +1 -0
- rapidtide/tidepoolTemplate_big.ui +1 -0
- rapidtide/tidepoolTemplate_big_qt6.py +197 -53
- rapidtide/tidepoolTemplate_qt6.py +151 -39
- rapidtide/workflows/delayvar.py +1048 -0
- rapidtide/workflows/happy.py +37 -11
- rapidtide/workflows/happy_parser.py +4 -4
- rapidtide/workflows/{glmfilt.py → linfitfilt.py} +4 -4
- rapidtide/workflows/parser_funcs.py +10 -2
- rapidtide/workflows/rapidtide.py +388 -452
- rapidtide/workflows/rapidtide_parser.py +129 -90
- rapidtide/workflows/{glmfrommaps.py → regressfrommaps.py} +28 -26
- rapidtide/workflows/retrolagtcs.py +12 -12
- rapidtide/workflows/{retroglm.py → retroregress.py} +243 -141
- rapidtide/workflows/tidepool.py +2 -2
- {rapidtide-3.0a11.dist-info → rapidtide-3.0a13.dist-info}/METADATA +3 -2
- {rapidtide-3.0a11.dist-info → rapidtide-3.0a13.dist-info}/RECORD +72 -63
- {rapidtide-3.0a11.dist-info → rapidtide-3.0a13.dist-info}/WHEEL +1 -1
- {rapidtide-3.0a11.dist-info → rapidtide-3.0a13.dist-info}/entry_points.txt +3 -2
- rapidtide/data/examples/src/testoutputsize +0 -45
- {rapidtide-3.0a11.dist-info → rapidtide-3.0a13.dist-info/licenses}/LICENSE +0 -0
- {rapidtide-3.0a11.dist-info → rapidtide-3.0a13.dist-info}/top_level.txt +0 -0
|
@@ -17,6 +17,7 @@
|
|
|
17
17
|
#
|
|
18
18
|
#
|
|
19
19
|
import numpy as np
|
|
20
|
+
from scipy.special import factorial
|
|
20
21
|
from tqdm import tqdm
|
|
21
22
|
|
|
22
23
|
import rapidtide.filter as tide_filt
|
|
@@ -25,7 +26,9 @@ import rapidtide.miscmath as tide_math
|
|
|
25
26
|
import rapidtide.multiproc as tide_multiproc
|
|
26
27
|
|
|
27
28
|
|
|
28
|
-
def
|
|
29
|
+
def _procOneRegressionFitItem(
|
|
30
|
+
vox, theevs, thedata, rt_floatset=np.float64, rt_floattype="float64"
|
|
31
|
+
):
|
|
29
32
|
# NOTE: if theevs is 2D, dimension 0 is number of points, dimension 1 is number of evs
|
|
30
33
|
thefit, R2 = tide_fit.mlregress(theevs, thedata)
|
|
31
34
|
if theevs.ndim > 1:
|
|
@@ -74,7 +77,7 @@ def _procOneGLMItem(vox, theevs, thedata, rt_floatset=np.float64, rt_floattype="
|
|
|
74
77
|
)
|
|
75
78
|
|
|
76
79
|
|
|
77
|
-
def
|
|
80
|
+
def linfitfiltpass(
|
|
78
81
|
numprocitems,
|
|
79
82
|
fmri_data,
|
|
80
83
|
threshval,
|
|
@@ -88,7 +91,7 @@ def glmpass(
|
|
|
88
91
|
filtereddata,
|
|
89
92
|
nprocs=1,
|
|
90
93
|
alwaysmultiproc=False,
|
|
91
|
-
|
|
94
|
+
confoundregress=False,
|
|
92
95
|
procbyvoxel=True,
|
|
93
96
|
showprogressbar=True,
|
|
94
97
|
mp_chunksize=1000,
|
|
@@ -131,9 +134,9 @@ def glmpass(
|
|
|
131
134
|
|
|
132
135
|
# process and send the data
|
|
133
136
|
if procbyvoxel:
|
|
134
|
-
if
|
|
137
|
+
if confoundregress:
|
|
135
138
|
outQ.put(
|
|
136
|
-
|
|
139
|
+
_procOneRegressionFitItem(
|
|
137
140
|
val,
|
|
138
141
|
theevs,
|
|
139
142
|
fmri_data[val, :],
|
|
@@ -143,7 +146,7 @@ def glmpass(
|
|
|
143
146
|
)
|
|
144
147
|
else:
|
|
145
148
|
outQ.put(
|
|
146
|
-
|
|
149
|
+
_procOneRegressionFitItem(
|
|
147
150
|
val,
|
|
148
151
|
theevs[val, :],
|
|
149
152
|
fmri_data[val, :],
|
|
@@ -152,9 +155,9 @@ def glmpass(
|
|
|
152
155
|
)
|
|
153
156
|
)
|
|
154
157
|
else:
|
|
155
|
-
if
|
|
158
|
+
if confoundregress:
|
|
156
159
|
outQ.put(
|
|
157
|
-
|
|
160
|
+
_procOneRegressionFitItem(
|
|
158
161
|
val,
|
|
159
162
|
theevs,
|
|
160
163
|
fmri_data[:, val],
|
|
@@ -164,7 +167,7 @@ def glmpass(
|
|
|
164
167
|
)
|
|
165
168
|
else:
|
|
166
169
|
outQ.put(
|
|
167
|
-
|
|
170
|
+
_procOneRegressionFitItem(
|
|
168
171
|
val,
|
|
169
172
|
theevs[:, val],
|
|
170
173
|
fmri_data[:, val],
|
|
@@ -190,7 +193,7 @@ def glmpass(
|
|
|
190
193
|
# unpack the data
|
|
191
194
|
itemstotal = 0
|
|
192
195
|
if procbyvoxel:
|
|
193
|
-
if
|
|
196
|
+
if confoundregress:
|
|
194
197
|
for voxel in data_out:
|
|
195
198
|
r2value[voxel[0]] = voxel[3]
|
|
196
199
|
filtereddata[voxel[0], :] = voxel[7]
|
|
@@ -210,7 +213,7 @@ def glmpass(
|
|
|
210
213
|
filtereddata[voxel[0], :] = voxel[7]
|
|
211
214
|
itemstotal += 1
|
|
212
215
|
else:
|
|
213
|
-
if
|
|
216
|
+
if confoundregress:
|
|
214
217
|
for timepoint in data_out:
|
|
215
218
|
r2value[timepoint[0]] = timepoint[3]
|
|
216
219
|
filtereddata[:, timepoint[0]] = timepoint[7]
|
|
@@ -242,7 +245,7 @@ def glmpass(
|
|
|
242
245
|
):
|
|
243
246
|
thedata = fmri_data[vox, :].copy()
|
|
244
247
|
if (themask is None) or (themask[vox] > 0):
|
|
245
|
-
if
|
|
248
|
+
if confoundregress:
|
|
246
249
|
(
|
|
247
250
|
dummy,
|
|
248
251
|
dummy,
|
|
@@ -252,7 +255,7 @@ def glmpass(
|
|
|
252
255
|
dummy,
|
|
253
256
|
dummy,
|
|
254
257
|
filtereddata[vox, :],
|
|
255
|
-
) =
|
|
258
|
+
) = _procOneRegressionFitItem(
|
|
256
259
|
vox,
|
|
257
260
|
theevs,
|
|
258
261
|
thedata,
|
|
@@ -269,7 +272,7 @@ def glmpass(
|
|
|
269
272
|
fitNorm[vox],
|
|
270
273
|
datatoremove[vox, :],
|
|
271
274
|
filtereddata[vox, :],
|
|
272
|
-
) =
|
|
275
|
+
) = _procOneRegressionFitItem(
|
|
273
276
|
vox,
|
|
274
277
|
theevs[vox, :],
|
|
275
278
|
thedata,
|
|
@@ -286,7 +289,7 @@ def glmpass(
|
|
|
286
289
|
):
|
|
287
290
|
thedata = fmri_data[:, timepoint].copy()
|
|
288
291
|
if (themask is None) or (themask[timepoint] > 0):
|
|
289
|
-
if
|
|
292
|
+
if confoundregress:
|
|
290
293
|
(
|
|
291
294
|
dummy,
|
|
292
295
|
dummy,
|
|
@@ -296,7 +299,7 @@ def glmpass(
|
|
|
296
299
|
dummy,
|
|
297
300
|
dummy,
|
|
298
301
|
filtereddata[:, timepoint],
|
|
299
|
-
) =
|
|
302
|
+
) = _procOneRegressionFitItem(
|
|
300
303
|
timepoint,
|
|
301
304
|
theevs,
|
|
302
305
|
thedata,
|
|
@@ -313,7 +316,7 @@ def glmpass(
|
|
|
313
316
|
fitNorm[timepoint],
|
|
314
317
|
datatoremove[:, timepoint],
|
|
315
318
|
filtereddata[:, timepoint],
|
|
316
|
-
) =
|
|
319
|
+
) = _procOneRegressionFitItem(
|
|
317
320
|
timepoint,
|
|
318
321
|
theevs[:, timepoint],
|
|
319
322
|
thedata,
|
|
@@ -350,11 +353,17 @@ def makevoxelspecificderivs(theevs, nderivs=1, debug=False):
|
|
|
350
353
|
if nderivs == 0:
|
|
351
354
|
thenewevs = theevs
|
|
352
355
|
else:
|
|
356
|
+
taylorcoffs = np.zeros((nderivs + 1), dtype=np.float64)
|
|
357
|
+
taylorcoffs[0] = 1.0
|
|
353
358
|
thenewevs = np.zeros((theevs.shape[0], theevs.shape[1], nderivs + 1), dtype=float)
|
|
359
|
+
for i in range(1, nderivs + 1):
|
|
360
|
+
taylorcoffs[i] = 1.0 / factorial(i)
|
|
354
361
|
for thevoxel in range(0, theevs.shape[0]):
|
|
355
362
|
thenewevs[thevoxel, :, 0] = theevs[thevoxel, :] * 1.0
|
|
356
363
|
for i in range(1, nderivs + 1):
|
|
357
|
-
thenewevs[thevoxel, :, i] = np.gradient(
|
|
364
|
+
thenewevs[thevoxel, :, i] = taylorcoffs[i] * np.gradient(
|
|
365
|
+
thenewevs[thevoxel, :, i - 1]
|
|
366
|
+
)
|
|
358
367
|
if debug:
|
|
359
368
|
print(f"{nderivs=}")
|
|
360
369
|
print(f"{thenewevs.shape=}")
|
|
@@ -418,7 +427,7 @@ def confoundregress(
|
|
|
418
427
|
numprocitems = thedataarray.shape[0]
|
|
419
428
|
filtereddata = thedataarray * 0.0
|
|
420
429
|
r2value = np.zeros(numprocitems)
|
|
421
|
-
numfiltered =
|
|
430
|
+
numfiltered = linfitfiltpass(
|
|
422
431
|
numprocitems,
|
|
423
432
|
thedataarray,
|
|
424
433
|
None,
|
|
@@ -430,7 +439,7 @@ def confoundregress(
|
|
|
430
439
|
None,
|
|
431
440
|
None,
|
|
432
441
|
filtereddata,
|
|
433
|
-
|
|
442
|
+
confoundregress=True,
|
|
434
443
|
nprocs=nprocs,
|
|
435
444
|
showprogressbar=showprogressbar,
|
|
436
445
|
procbyvoxel=True,
|
rapidtide/refinedelay.py
CHANGED
|
@@ -17,14 +17,14 @@
|
|
|
17
17
|
#
|
|
18
18
|
#
|
|
19
19
|
import numpy as np
|
|
20
|
+
import numpy.polynomial.polynomial as poly
|
|
20
21
|
from scipy.interpolate import CubicSpline, UnivariateSpline
|
|
21
22
|
from scipy.ndimage import median_filter
|
|
22
23
|
from statsmodels.robust import mad
|
|
23
24
|
|
|
24
25
|
import rapidtide.filter as tide_filt
|
|
25
26
|
import rapidtide.io as tide_io
|
|
26
|
-
import rapidtide.workflows.
|
|
27
|
-
|
|
27
|
+
import rapidtide.workflows.regressfrommaps as tide_regressfrommaps
|
|
28
28
|
|
|
29
29
|
global ratiotooffsetfunc, maplimits
|
|
30
30
|
|
|
@@ -40,11 +40,14 @@ def trainratiotooffset(
|
|
|
40
40
|
timeaxis,
|
|
41
41
|
outputname,
|
|
42
42
|
outputlevel,
|
|
43
|
+
trainwidth=0.0,
|
|
44
|
+
trainstep=0.5,
|
|
43
45
|
mindelay=-3.0,
|
|
44
46
|
maxdelay=3.0,
|
|
45
47
|
numpoints=501,
|
|
46
48
|
smoothpts=3,
|
|
47
49
|
edgepad=5,
|
|
50
|
+
regressderivs=1,
|
|
48
51
|
debug=False,
|
|
49
52
|
):
|
|
50
53
|
global ratiotooffsetfunc, maplimits
|
|
@@ -54,10 +57,13 @@ def trainratiotooffset(
|
|
|
54
57
|
lagtcgenerator.info(prefix="\t")
|
|
55
58
|
print("\ttimeaxis:", timeaxis)
|
|
56
59
|
print("\toutputname:", outputname)
|
|
60
|
+
print("\ttrainwidth:", trainwidth)
|
|
61
|
+
print("\ttrainstep:", trainstep)
|
|
57
62
|
print("\tmindelay:", mindelay)
|
|
58
63
|
print("\tmaxdelay:", maxdelay)
|
|
59
64
|
print("\tsmoothpts:", smoothpts)
|
|
60
65
|
print("\tedgepad:", edgepad)
|
|
66
|
+
print("\tregressderivs:", regressderivs)
|
|
61
67
|
print("\tlagtcgenerator:", lagtcgenerator)
|
|
62
68
|
# make a delay map
|
|
63
69
|
delaystep = (maxdelay - mindelay) / (numpoints - 1)
|
|
@@ -76,16 +82,13 @@ def trainratiotooffset(
|
|
|
76
82
|
print(f"{maxdelay=}")
|
|
77
83
|
print("lagtimes=", lagtimes)
|
|
78
84
|
|
|
79
|
-
#
|
|
85
|
+
# set up for getratioderivs call
|
|
86
|
+
rt_floattype = "float64"
|
|
80
87
|
internalvalidfmrishape = (numpoints + 2 * edgepad, timeaxis.shape[0])
|
|
81
88
|
fmridata = np.zeros(internalvalidfmrishape, dtype=float)
|
|
82
89
|
fmrimask = np.ones(numpoints + 2 * edgepad, dtype=float)
|
|
83
90
|
validvoxels = np.where(fmrimask > 0)[0]
|
|
84
|
-
|
|
85
|
-
fmridata[i, :] = lagtcgenerator.yfromx(timeaxis - lagtimes[i])
|
|
86
|
-
|
|
87
|
-
rt_floattype = "float64"
|
|
88
|
-
glmmean = np.zeros(numpoints + 2 * edgepad, dtype=rt_floattype)
|
|
91
|
+
sLFOfitmean = np.zeros(numpoints + 2 * edgepad, dtype=rt_floattype)
|
|
89
92
|
rvalue = np.zeros(numpoints + 2 * edgepad, dtype=rt_floattype)
|
|
90
93
|
r2value = np.zeros(numpoints + 2 * edgepad, dtype=rt_floattype)
|
|
91
94
|
fitNorm = np.zeros((numpoints + 2 * edgepad, 2), dtype=rt_floattype)
|
|
@@ -95,10 +98,10 @@ def trainratiotooffset(
|
|
|
95
98
|
filtereddata = np.zeros(internalvalidfmrishape, dtype=rt_floattype)
|
|
96
99
|
sampletime = timeaxis[1] - timeaxis[0]
|
|
97
100
|
optiondict = {
|
|
98
|
-
"
|
|
99
|
-
"
|
|
101
|
+
"regressfiltthreshval": 0.0,
|
|
102
|
+
"saveminimumsLFOfiltfiles": False,
|
|
100
103
|
"nprocs_makelaggedtcs": 1,
|
|
101
|
-
"
|
|
104
|
+
"nprocs_regressionfilt": 1,
|
|
102
105
|
"mp_chunksize": 1000,
|
|
103
106
|
"showprogressbar": False,
|
|
104
107
|
"alwaysmultiproc": False,
|
|
@@ -108,7 +111,27 @@ def trainratiotooffset(
|
|
|
108
111
|
"textio": False,
|
|
109
112
|
}
|
|
110
113
|
|
|
111
|
-
|
|
114
|
+
if trainwidth > 0.0:
|
|
115
|
+
numsteps = int(trainwidth / trainstep)
|
|
116
|
+
numsteps += 1 - numsteps % 2 # force numsteps to be odd
|
|
117
|
+
numsteps = np.max((numsteps, 3)) # ensure at least 1 positive and 1 negative step
|
|
118
|
+
trainoffsets = (
|
|
119
|
+
np.linspace(0, numsteps * trainstep, numsteps, endpoint=True)
|
|
120
|
+
- (numsteps // 2) * trainstep
|
|
121
|
+
)
|
|
122
|
+
else:
|
|
123
|
+
trainoffsets = np.array([0.0], dtype=float)
|
|
124
|
+
if debug:
|
|
125
|
+
print("trainoffsets:", trainoffsets)
|
|
126
|
+
|
|
127
|
+
for i in range(len(trainoffsets)):
|
|
128
|
+
pass
|
|
129
|
+
|
|
130
|
+
# now make synthetic fMRI data
|
|
131
|
+
for i in range(numpoints + 2 * edgepad):
|
|
132
|
+
fmridata[i, :] = lagtcgenerator.yfromx(timeaxis - lagtimes[i])
|
|
133
|
+
|
|
134
|
+
regressderivratios, regressrvalues = getderivratios(
|
|
112
135
|
fmridata,
|
|
113
136
|
validvoxels,
|
|
114
137
|
timeaxis,
|
|
@@ -118,7 +141,7 @@ def trainratiotooffset(
|
|
|
118
141
|
"glm",
|
|
119
142
|
"refinedelaytest",
|
|
120
143
|
sampletime,
|
|
121
|
-
|
|
144
|
+
sLFOfitmean,
|
|
122
145
|
rvalue,
|
|
123
146
|
r2value,
|
|
124
147
|
fitNorm[:, :2],
|
|
@@ -129,27 +152,41 @@ def trainratiotooffset(
|
|
|
129
152
|
None,
|
|
130
153
|
None,
|
|
131
154
|
optiondict,
|
|
155
|
+
regressderivs=regressderivs,
|
|
132
156
|
debug=debug,
|
|
133
157
|
)
|
|
134
158
|
if debug:
|
|
135
159
|
print("before trimming")
|
|
136
|
-
print(f"{
|
|
160
|
+
print(f"{regressderivratios.shape=}")
|
|
137
161
|
print(f"{lagtimes.shape=}")
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
|
|
162
|
+
if regressderivs == 1:
|
|
163
|
+
smoothregressderivratios = tide_filt.unpadvec(
|
|
164
|
+
smooth(tide_filt.padvec(regressderivratios, padlen=20, padtype="constant"), smoothpts),
|
|
165
|
+
padlen=20,
|
|
166
|
+
)
|
|
167
|
+
regressderivratios = regressderivratios[edgepad:-edgepad]
|
|
168
|
+
smoothregressderivratios = smoothregressderivratios[edgepad:-edgepad]
|
|
169
|
+
else:
|
|
170
|
+
smoothregressderivratios = np.zeros_like(regressderivratios)
|
|
171
|
+
for i in range(regressderivs):
|
|
172
|
+
smoothregressderivratios[i, :] = tide_filt.unpadvec(
|
|
173
|
+
smooth(
|
|
174
|
+
tide_filt.padvec(regressderivratios[i, :], padlen=20, padtype="constant"),
|
|
175
|
+
smoothpts,
|
|
176
|
+
),
|
|
177
|
+
padlen=20,
|
|
178
|
+
)
|
|
179
|
+
regressderivratios = regressderivratios[:, edgepad:-edgepad]
|
|
180
|
+
smoothregressderivratios = smoothregressderivratios[:, edgepad:-edgepad]
|
|
144
181
|
lagtimes = lagtimes[edgepad:-edgepad]
|
|
145
182
|
if debug:
|
|
146
183
|
print("after trimming")
|
|
147
|
-
print(f"{
|
|
148
|
-
print(f"{
|
|
184
|
+
print(f"{regressderivratios.shape=}")
|
|
185
|
+
print(f"{smoothregressderivratios.shape=}")
|
|
149
186
|
print(f"{lagtimes.shape=}")
|
|
150
187
|
|
|
151
188
|
# make sure the mapping function is legal
|
|
152
|
-
xaxis =
|
|
189
|
+
xaxis = smoothregressderivratios[::-1]
|
|
153
190
|
yaxis = lagtimes[::-1]
|
|
154
191
|
midpoint = int(len(xaxis) // 2)
|
|
155
192
|
lowerlim = midpoint + 0
|
|
@@ -164,16 +201,6 @@ def trainratiotooffset(
|
|
|
164
201
|
maplimits = (xaxis[0], xaxis[-1])
|
|
165
202
|
|
|
166
203
|
if outputlevel != "min":
|
|
167
|
-
"""tide_io.writebidstsv(
|
|
168
|
-
f"{outputname}_desc-ratiotodelaymapping_timeseries",
|
|
169
|
-
np.stack((xaxis, yaxis)),
|
|
170
|
-
1.0,
|
|
171
|
-
columns=["smoothglmderivratio", "delay"],
|
|
172
|
-
extraheaderinfo={
|
|
173
|
-
"Description": "The ratio of sLFO derivative to the sLFO, and the corresponding delay offset"
|
|
174
|
-
},
|
|
175
|
-
append=False,
|
|
176
|
-
)"""
|
|
177
204
|
resampaxis = np.linspace(xaxis[0], xaxis[-1], num=len(xaxis), endpoint=True)
|
|
178
205
|
tide_io.writebidstsv(
|
|
179
206
|
f"{outputname}_desc-ratiotodelayfunc_timeseries",
|
|
@@ -182,8 +209,12 @@ def trainratiotooffset(
|
|
|
182
209
|
starttime=resampaxis[0],
|
|
183
210
|
columns=["delay"],
|
|
184
211
|
extraheaderinfo={
|
|
185
|
-
"Description": "The function mapping derivative ratio to delay"
|
|
212
|
+
"Description": "The function mapping derivative ratio to delay",
|
|
213
|
+
"minratio": f"{resampaxis[0]}",
|
|
214
|
+
"maxratio": f"{resampaxis[-1]}",
|
|
186
215
|
},
|
|
216
|
+
xaxislabel="coefficientratio",
|
|
217
|
+
yaxislabel="time",
|
|
187
218
|
append=False,
|
|
188
219
|
)
|
|
189
220
|
|
|
@@ -198,6 +229,34 @@ def ratiotodelay(theratio):
|
|
|
198
229
|
return ratiotooffsetfunc(theratio)
|
|
199
230
|
|
|
200
231
|
|
|
232
|
+
def coffstodelay(thecoffs, mindelay=-3.0, maxdelay=3.0, debug=False):
|
|
233
|
+
justaone = np.array([1.0], dtype=thecoffs.dtype)
|
|
234
|
+
allcoffs = np.concatenate((justaone, thecoffs))
|
|
235
|
+
theroots = (poly.Polynomial(allcoffs, domain=(mindelay, maxdelay))).roots()
|
|
236
|
+
if theroots is None:
|
|
237
|
+
return 0.0
|
|
238
|
+
elif len(theroots) == 1:
|
|
239
|
+
return theroots[0].real
|
|
240
|
+
else:
|
|
241
|
+
candidates = []
|
|
242
|
+
for i in range(len(theroots)):
|
|
243
|
+
if np.isreal(theroots[i]) and (mindelay <= theroots[i] <= maxdelay):
|
|
244
|
+
if debug:
|
|
245
|
+
print(f"keeping root {i} ({theroots[i]})")
|
|
246
|
+
candidates.append(theroots[i].real)
|
|
247
|
+
else:
|
|
248
|
+
if debug:
|
|
249
|
+
print(f"discarding root {i} ({theroots[i]})")
|
|
250
|
+
else:
|
|
251
|
+
pass
|
|
252
|
+
if len(candidates) > 0:
|
|
253
|
+
chosen = candidates[np.argmin(np.fabs(np.array(candidates)))].real
|
|
254
|
+
if debug:
|
|
255
|
+
print(f"{theroots=}, {candidates=}, {chosen=}")
|
|
256
|
+
return chosen
|
|
257
|
+
return 0.0
|
|
258
|
+
|
|
259
|
+
|
|
201
260
|
def getderivratios(
|
|
202
261
|
fmri_data_valid,
|
|
203
262
|
validvoxels,
|
|
@@ -208,7 +267,7 @@ def getderivratios(
|
|
|
208
267
|
mode,
|
|
209
268
|
outputname,
|
|
210
269
|
oversamptr,
|
|
211
|
-
|
|
270
|
+
sLFOfitmean,
|
|
212
271
|
rvalue,
|
|
213
272
|
r2value,
|
|
214
273
|
fitNorm,
|
|
@@ -219,23 +278,34 @@ def getderivratios(
|
|
|
219
278
|
LGR,
|
|
220
279
|
TimingLGR,
|
|
221
280
|
optiondict,
|
|
281
|
+
regressderivs=1,
|
|
282
|
+
starttr=None,
|
|
283
|
+
endtr=None,
|
|
222
284
|
debug=False,
|
|
223
285
|
):
|
|
286
|
+
if starttr is None:
|
|
287
|
+
starttr = 0
|
|
288
|
+
if endtr is None:
|
|
289
|
+
endtr = fmri_data_valid.shape[1]
|
|
224
290
|
if debug:
|
|
225
291
|
print("getderivratios")
|
|
226
292
|
print(f"{fitNorm.shape=}")
|
|
227
293
|
print(f"{fitcoeff.shape=}")
|
|
228
|
-
|
|
229
|
-
|
|
294
|
+
print(f"{regressderivs=}")
|
|
295
|
+
print(f"{starttr=}")
|
|
296
|
+
print(f"{endtr=}")
|
|
297
|
+
|
|
298
|
+
voxelsprocessed_regressionfilt, regressorset, evset = tide_regressfrommaps.regressfrommaps(
|
|
299
|
+
fmri_data_valid[:, starttr:endtr],
|
|
230
300
|
validvoxels,
|
|
231
|
-
initial_fmri_x,
|
|
301
|
+
initial_fmri_x[starttr:endtr],
|
|
232
302
|
lagtimes,
|
|
233
303
|
fitmask,
|
|
234
304
|
genlagtc,
|
|
235
305
|
mode,
|
|
236
306
|
outputname,
|
|
237
307
|
oversamptr,
|
|
238
|
-
|
|
308
|
+
sLFOfitmean,
|
|
239
309
|
rvalue,
|
|
240
310
|
r2value,
|
|
241
311
|
fitNorm,
|
|
@@ -245,11 +315,11 @@ def getderivratios(
|
|
|
245
315
|
filtereddata,
|
|
246
316
|
LGR,
|
|
247
317
|
TimingLGR,
|
|
248
|
-
optiondict["
|
|
249
|
-
optiondict["
|
|
318
|
+
optiondict["regressfiltthreshval"],
|
|
319
|
+
optiondict["saveminimumsLFOfiltfiles"],
|
|
250
320
|
nprocs_makelaggedtcs=optiondict["nprocs_makelaggedtcs"],
|
|
251
|
-
|
|
252
|
-
|
|
321
|
+
nprocs_regressionfilt=optiondict["nprocs_regressionfilt"],
|
|
322
|
+
regressderivs=regressderivs,
|
|
253
323
|
mp_chunksize=optiondict["mp_chunksize"],
|
|
254
324
|
showprogressbar=optiondict["showprogressbar"],
|
|
255
325
|
alwaysmultiproc=optiondict["alwaysmultiproc"],
|
|
@@ -258,13 +328,19 @@ def getderivratios(
|
|
|
258
328
|
)
|
|
259
329
|
|
|
260
330
|
# calculate the ratio of the first derivative to the main regressor
|
|
261
|
-
|
|
331
|
+
if regressderivs == 1:
|
|
332
|
+
regressderivratios = np.nan_to_num(fitcoeff[:, 1] / fitcoeff[:, 0])
|
|
333
|
+
else:
|
|
334
|
+
numvoxels = fitcoeff.shape[0]
|
|
335
|
+
regressderivratios = np.zeros((regressderivs, numvoxels), dtype=np.float64)
|
|
336
|
+
for i in range(regressderivs):
|
|
337
|
+
regressderivratios[i, :] = np.nan_to_num(fitcoeff[:, i + 1] / fitcoeff[:, 0])
|
|
262
338
|
|
|
263
|
-
return
|
|
339
|
+
return regressderivratios, rvalue
|
|
264
340
|
|
|
265
341
|
|
|
266
342
|
def filterderivratios(
|
|
267
|
-
|
|
343
|
+
regressderivratios,
|
|
268
344
|
nativespaceshape,
|
|
269
345
|
validvoxels,
|
|
270
346
|
thedims,
|
|
@@ -283,32 +359,34 @@ def filterderivratios(
|
|
|
283
359
|
print(f"\t{nativespaceshape=}")
|
|
284
360
|
|
|
285
361
|
# filter the ratio to find weird values
|
|
286
|
-
themad = mad(
|
|
287
|
-
print(f"MAD of
|
|
362
|
+
themad = mad(regressderivratios).astype(np.float64)
|
|
363
|
+
print(f"MAD of regression fit derivative ratios = {themad}")
|
|
288
364
|
outmaparray, internalspaceshape = tide_io.makedestarray(
|
|
289
365
|
nativespaceshape,
|
|
290
366
|
textio=textio,
|
|
291
367
|
fileiscifti=fileiscifti,
|
|
292
368
|
rt_floattype=rt_floattype,
|
|
293
369
|
)
|
|
294
|
-
|
|
295
|
-
|
|
370
|
+
mappedregressderivratios = tide_io.populatemap(
|
|
371
|
+
regressderivratios,
|
|
296
372
|
internalspaceshape,
|
|
297
373
|
validvoxels,
|
|
298
374
|
outmaparray,
|
|
299
375
|
debug=debug,
|
|
300
376
|
)
|
|
301
377
|
if textio or fileiscifti:
|
|
302
|
-
medfilt =
|
|
303
|
-
filteredarray =
|
|
378
|
+
medfilt = regressderivratios
|
|
379
|
+
filteredarray = regressderivratios
|
|
304
380
|
else:
|
|
305
381
|
if debug:
|
|
306
|
-
print(f"{
|
|
382
|
+
print(f"{regressderivratios.shape=}, {mappedregressderivratios.shape=}")
|
|
307
383
|
medfilt = median_filter(
|
|
308
|
-
|
|
384
|
+
mappedregressderivratios.reshape(nativespaceshape), size=(3, 3, 3)
|
|
309
385
|
).reshape(internalspaceshape)[validvoxels]
|
|
310
386
|
filteredarray = np.where(
|
|
311
|
-
np.fabs(
|
|
387
|
+
np.fabs(regressderivratios - medfilt) > patchthresh * themad,
|
|
388
|
+
medfilt,
|
|
389
|
+
regressderivratios,
|
|
312
390
|
)
|
|
313
391
|
if gausssigma > 0:
|
|
314
392
|
mappedfilteredarray = tide_io.populatemap(
|
rapidtide/refineregressor.py
CHANGED
|
@@ -151,6 +151,9 @@ def alignvoxels(
|
|
|
151
151
|
"""
|
|
152
152
|
inputshape = np.shape(fmridata)
|
|
153
153
|
volumetotal = np.sum(lagmask)
|
|
154
|
+
if debug:
|
|
155
|
+
print("alignvoxels: {inputshape}")
|
|
156
|
+
print("volumetotal: {volumetotal}")
|
|
154
157
|
|
|
155
158
|
# timeshift the valid voxels
|
|
156
159
|
if nprocs > 1 or alwaysmultiproc:
|
|
@@ -253,6 +256,7 @@ def makerefinemask(
|
|
|
253
256
|
bipolar=False,
|
|
254
257
|
includemask=None,
|
|
255
258
|
excludemask=None,
|
|
259
|
+
fixdelay=False,
|
|
256
260
|
debug=False,
|
|
257
261
|
rt_floatset=np.float64,
|
|
258
262
|
rt_floattype="float64",
|
|
@@ -321,35 +325,45 @@ def makerefinemask(
|
|
|
321
325
|
LGR.info(f"setting ampthresh to the {-100.0 * ampthresh}th percentile ({theampthresh})")
|
|
322
326
|
else:
|
|
323
327
|
theampthresh = ampthresh
|
|
328
|
+
if debug:
|
|
329
|
+
print(f"makerefinemask: {theampthresh=}")
|
|
324
330
|
if bipolar:
|
|
325
331
|
ampmask = np.where(np.fabs(lagstrengths) >= theampthresh, np.int16(1), np.int16(0))
|
|
326
332
|
else:
|
|
327
333
|
ampmask = np.where(lagstrengths >= theampthresh, np.int16(1), np.int16(0))
|
|
328
|
-
if
|
|
329
|
-
delaymask =
|
|
330
|
-
(lagtimes - offsettime) > lagminthresh,
|
|
331
|
-
np.int16(1),
|
|
332
|
-
np.int16(0),
|
|
333
|
-
) * np.where(
|
|
334
|
-
(lagtimes - offsettime) < lagmaxthresh,
|
|
335
|
-
np.int16(1),
|
|
336
|
-
np.int16(0),
|
|
337
|
-
)
|
|
338
|
-
elif lagmaskside == "lower":
|
|
339
|
-
delaymask = np.where(
|
|
340
|
-
(lagtimes - offsettime) < -lagminthresh,
|
|
341
|
-
np.int16(1),
|
|
342
|
-
np.int16(0),
|
|
343
|
-
) * np.where(
|
|
344
|
-
(lagtimes - offsettime) > -lagmaxthresh,
|
|
345
|
-
np.int16(1),
|
|
346
|
-
np.int16(0),
|
|
347
|
-
)
|
|
334
|
+
if fixdelay:
|
|
335
|
+
delaymask = lagmask + 0
|
|
348
336
|
else:
|
|
349
|
-
|
|
350
|
-
|
|
351
|
-
|
|
352
|
-
|
|
337
|
+
if lagmaskside == "upper":
|
|
338
|
+
delaymask = np.where(
|
|
339
|
+
(lagtimes - offsettime) > lagminthresh,
|
|
340
|
+
np.int16(1),
|
|
341
|
+
np.int16(0),
|
|
342
|
+
) * np.where(
|
|
343
|
+
(lagtimes - offsettime) < lagmaxthresh,
|
|
344
|
+
np.int16(1),
|
|
345
|
+
np.int16(0),
|
|
346
|
+
)
|
|
347
|
+
elif lagmaskside == "lower":
|
|
348
|
+
delaymask = np.where(
|
|
349
|
+
(lagtimes - offsettime) < -lagminthresh,
|
|
350
|
+
np.int16(1),
|
|
351
|
+
np.int16(0),
|
|
352
|
+
) * np.where(
|
|
353
|
+
(lagtimes - offsettime) > -lagmaxthresh,
|
|
354
|
+
np.int16(1),
|
|
355
|
+
np.int16(0),
|
|
356
|
+
)
|
|
357
|
+
else:
|
|
358
|
+
abslag = abs(lagtimes - offsettime)
|
|
359
|
+
delaymask = np.where(abslag > lagminthresh, np.int16(1), np.int16(0)) * np.where(
|
|
360
|
+
abslag < lagmaxthresh, np.int16(1), np.int16(0)
|
|
361
|
+
)
|
|
362
|
+
if debug:
|
|
363
|
+
print(f"makerefinemask: {lagmaskside=}")
|
|
364
|
+
print(f"makerefinemask: {lagminthresh=}")
|
|
365
|
+
print(f"makerefinemask: {lagmaxthresh=}")
|
|
366
|
+
print(f"makerefinemask: {offsettime=}")
|
|
353
367
|
sigmamask = np.where(lagsigma < sigmathresh, np.int16(1), np.int16(0))
|
|
354
368
|
locationmask = lagmask + 0
|
|
355
369
|
if includemask is not None:
|
rapidtide/resample.py
CHANGED
|
@@ -328,6 +328,9 @@ class FastResampler:
|
|
|
328
328
|
pl.legend(("input", "hires"))
|
|
329
329
|
pl.show()
|
|
330
330
|
|
|
331
|
+
def getdata(self):
|
|
332
|
+
return self.timeaxis, self.timecourse, self.hires_x, self.hires_y, 1.0 / self.initstep
|
|
333
|
+
|
|
331
334
|
def info(self, prefix=""):
|
|
332
335
|
print(f"{prefix}{self.timeaxis=}")
|
|
333
336
|
print(f"{prefix}{self.timecourse=}")
|
|
@@ -17,11 +17,11 @@
|
|
|
17
17
|
#
|
|
18
18
|
#
|
|
19
19
|
import rapidtide.workflows.parser_funcs as pf
|
|
20
|
-
import rapidtide.workflows.
|
|
20
|
+
import rapidtide.workflows.delayvar as theworkflow
|
|
21
21
|
|
|
22
22
|
|
|
23
23
|
def entrypoint():
|
|
24
|
-
pf.generic_init(theworkflow._get_parser, theworkflow.
|
|
24
|
+
pf.generic_init(theworkflow._get_parser, theworkflow.delayvar)
|
|
25
25
|
|
|
26
26
|
|
|
27
27
|
if __name__ == "__main__":
|
|
@@ -16,11 +16,11 @@
|
|
|
16
16
|
# limitations under the License.
|
|
17
17
|
#
|
|
18
18
|
#
|
|
19
|
-
import rapidtide.workflows.
|
|
19
|
+
import rapidtide.workflows.linfitfilt as linfitfilt_workflow
|
|
20
20
|
|
|
21
21
|
|
|
22
22
|
def entrypoint():
|
|
23
|
-
|
|
23
|
+
linfitfilt_workflow.main()
|
|
24
24
|
|
|
25
25
|
|
|
26
26
|
if __name__ == "__main__":
|