vbi 0.1.3__cp310-cp310-manylinux2014_x86_64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- vbi/__init__.py +37 -0
- vbi/_version.py +17 -0
- vbi/dataset/__init__.py +0 -0
- vbi/dataset/connectivity_84/centers.txt +84 -0
- vbi/dataset/connectivity_84/centres.txt +84 -0
- vbi/dataset/connectivity_84/cortical.txt +84 -0
- vbi/dataset/connectivity_84/tract_lengths.txt +84 -0
- vbi/dataset/connectivity_84/weights.txt +84 -0
- vbi/dataset/connectivity_88/Aud_88.txt +88 -0
- vbi/dataset/connectivity_88/Bold.npz +0 -0
- vbi/dataset/connectivity_88/Labels.txt +17 -0
- vbi/dataset/connectivity_88/Region_labels.txt +88 -0
- vbi/dataset/connectivity_88/tract_lengths.txt +88 -0
- vbi/dataset/connectivity_88/weights.txt +88 -0
- vbi/feature_extraction/__init__.py +1 -0
- vbi/feature_extraction/calc_features.py +293 -0
- vbi/feature_extraction/features.json +535 -0
- vbi/feature_extraction/features.py +2124 -0
- vbi/feature_extraction/features_settings.py +374 -0
- vbi/feature_extraction/features_utils.py +1357 -0
- vbi/feature_extraction/infodynamics.jar +0 -0
- vbi/feature_extraction/utility.py +507 -0
- vbi/inference.py +98 -0
- vbi/models/__init__.py +0 -0
- vbi/models/cpp/__init__.py +0 -0
- vbi/models/cpp/_src/__init__.py +0 -0
- vbi/models/cpp/_src/__pycache__/mpr_sde.cpython-310.pyc +0 -0
- vbi/models/cpp/_src/_do.cpython-310-x86_64-linux-gnu.so +0 -0
- vbi/models/cpp/_src/_jr_sdde.cpython-310-x86_64-linux-gnu.so +0 -0
- vbi/models/cpp/_src/_jr_sde.cpython-310-x86_64-linux-gnu.so +0 -0
- vbi/models/cpp/_src/_km_sde.cpython-310-x86_64-linux-gnu.so +0 -0
- vbi/models/cpp/_src/_mpr_sde.cpython-310-x86_64-linux-gnu.so +0 -0
- vbi/models/cpp/_src/_vep.cpython-310-x86_64-linux-gnu.so +0 -0
- vbi/models/cpp/_src/_wc_ode.cpython-310-x86_64-linux-gnu.so +0 -0
- vbi/models/cpp/_src/bold.hpp +303 -0
- vbi/models/cpp/_src/do.hpp +167 -0
- vbi/models/cpp/_src/do.i +17 -0
- vbi/models/cpp/_src/do.py +467 -0
- vbi/models/cpp/_src/do_wrap.cxx +12811 -0
- vbi/models/cpp/_src/jr_sdde.hpp +352 -0
- vbi/models/cpp/_src/jr_sdde.i +19 -0
- vbi/models/cpp/_src/jr_sdde.py +688 -0
- vbi/models/cpp/_src/jr_sdde_wrap.cxx +18718 -0
- vbi/models/cpp/_src/jr_sde.hpp +264 -0
- vbi/models/cpp/_src/jr_sde.i +17 -0
- vbi/models/cpp/_src/jr_sde.py +470 -0
- vbi/models/cpp/_src/jr_sde_wrap.cxx +13406 -0
- vbi/models/cpp/_src/km_sde.hpp +158 -0
- vbi/models/cpp/_src/km_sde.i +19 -0
- vbi/models/cpp/_src/km_sde.py +671 -0
- vbi/models/cpp/_src/km_sde_wrap.cxx +17367 -0
- vbi/models/cpp/_src/makefile +52 -0
- vbi/models/cpp/_src/mpr_sde.hpp +327 -0
- vbi/models/cpp/_src/mpr_sde.i +19 -0
- vbi/models/cpp/_src/mpr_sde.py +711 -0
- vbi/models/cpp/_src/mpr_sde_wrap.cxx +18618 -0
- vbi/models/cpp/_src/utility.hpp +307 -0
- vbi/models/cpp/_src/vep.hpp +171 -0
- vbi/models/cpp/_src/vep.i +16 -0
- vbi/models/cpp/_src/vep.py +464 -0
- vbi/models/cpp/_src/vep_wrap.cxx +12968 -0
- vbi/models/cpp/_src/wc_ode.hpp +294 -0
- vbi/models/cpp/_src/wc_ode.i +19 -0
- vbi/models/cpp/_src/wc_ode.py +686 -0
- vbi/models/cpp/_src/wc_ode_wrap.cxx +24263 -0
- vbi/models/cpp/damp_oscillator.py +143 -0
- vbi/models/cpp/jansen_rit.py +543 -0
- vbi/models/cpp/km.py +187 -0
- vbi/models/cpp/mpr.py +289 -0
- vbi/models/cpp/vep.py +150 -0
- vbi/models/cpp/wc.py +216 -0
- vbi/models/cupy/__init__.py +0 -0
- vbi/models/cupy/bold.py +111 -0
- vbi/models/cupy/ghb.py +284 -0
- vbi/models/cupy/jansen_rit.py +473 -0
- vbi/models/cupy/km.py +224 -0
- vbi/models/cupy/mpr.py +475 -0
- vbi/models/cupy/mpr_modified_bold.py +12 -0
- vbi/models/cupy/utils.py +184 -0
- vbi/models/numba/__init__.py +0 -0
- vbi/models/numba/_ww_EI.py +444 -0
- vbi/models/numba/damp_oscillator.py +162 -0
- vbi/models/numba/ghb.py +208 -0
- vbi/models/numba/mpr.py +383 -0
- vbi/models/pytorch/__init__.py +0 -0
- vbi/models/pytorch/data/default_parameters.npz +0 -0
- vbi/models/pytorch/data/input/ROI_sim.mat +0 -0
- vbi/models/pytorch/data/input/fc_test.csv +68 -0
- vbi/models/pytorch/data/input/fc_train.csv +68 -0
- vbi/models/pytorch/data/input/fc_vali.csv +68 -0
- vbi/models/pytorch/data/input/fcd_test.mat +0 -0
- vbi/models/pytorch/data/input/fcd_test_high_window.mat +0 -0
- vbi/models/pytorch/data/input/fcd_test_low_window.mat +0 -0
- vbi/models/pytorch/data/input/fcd_train.mat +0 -0
- vbi/models/pytorch/data/input/fcd_vali.mat +0 -0
- vbi/models/pytorch/data/input/myelin.csv +68 -0
- vbi/models/pytorch/data/input/rsfc_gradient.csv +68 -0
- vbi/models/pytorch/data/input/run_label_testset.mat +0 -0
- vbi/models/pytorch/data/input/sc_test.csv +68 -0
- vbi/models/pytorch/data/input/sc_train.csv +68 -0
- vbi/models/pytorch/data/input/sc_vali.csv +68 -0
- vbi/models/pytorch/data/obs_kong0.npz +0 -0
- vbi/models/pytorch/ww_sde_kong.py +570 -0
- vbi/models/tvbk/__init__.py +9 -0
- vbi/models/tvbk/tvbk_wrapper.py +166 -0
- vbi/models/tvbk/utils.py +72 -0
- vbi/papers/__init__.py +0 -0
- vbi/papers/pavlides_pcb_2015/pavlides.py +211 -0
- vbi/tests/__init__.py +0 -0
- vbi/tests/_test_mpr_nb.py +36 -0
- vbi/tests/test_features.py +355 -0
- vbi/tests/test_ghb_cupy.py +90 -0
- vbi/tests/test_mpr_cupy.py +49 -0
- vbi/tests/test_mpr_numba.py +84 -0
- vbi/tests/test_suite.py +19 -0
- vbi/utils.py +402 -0
- vbi-0.1.3.dist-info/METADATA +166 -0
- vbi-0.1.3.dist-info/RECORD +121 -0
- vbi-0.1.3.dist-info/WHEEL +5 -0
- vbi-0.1.3.dist-info/licenses/LICENSE +201 -0
- vbi-0.1.3.dist-info/top_level.txt +1 -0
@@ -0,0 +1,570 @@
|
|
1
|
+
from os.path import join
|
2
|
+
from copy import copy
|
3
|
+
from tqdm import tqdm
|
4
|
+
import numpy as np
|
5
|
+
import torch
|
6
|
+
import time
|
7
|
+
import math
|
8
|
+
import csv
|
9
|
+
import vbi
|
10
|
+
import gc
|
11
|
+
|
12
|
+
|
13
|
+
class BoldParams:
|
14
|
+
def __init__(self, par={}):
|
15
|
+
|
16
|
+
self._par = self.get_params()
|
17
|
+
self.valid_parameters = list(self._par.keys())
|
18
|
+
self.check_parameters(par)
|
19
|
+
self._par.update(par)
|
20
|
+
|
21
|
+
def check_parameters(self, par: dict):
|
22
|
+
for key in par.keys():
|
23
|
+
if key not in self.valid_parameters:
|
24
|
+
raise ValueError(f"Invalid parameter {key:s} provided.")
|
25
|
+
|
26
|
+
def get_params(self):
|
27
|
+
|
28
|
+
p_costant = 0.34
|
29
|
+
k_1 = 4.3 * 28.265 * 3 * 0.0331 * p_costant
|
30
|
+
k_2 = 0.47 * 110 * 0.0331 * p_costant
|
31
|
+
par = {
|
32
|
+
"beta": 0.65,
|
33
|
+
"gamma": 0.41,
|
34
|
+
"tau": 0.98,
|
35
|
+
"alpha": 0.33,
|
36
|
+
"v_0": 0.02,
|
37
|
+
"p_constant": p_costant,
|
38
|
+
"k_1": k_1,
|
39
|
+
"k_2": k_2,
|
40
|
+
"k_3": 0.53,
|
41
|
+
}
|
42
|
+
|
43
|
+
return par
|
44
|
+
|
45
|
+
|
46
|
+
class WW_SDE_KONG:
|
47
|
+
|
48
|
+
def __init__(self, par={}, bold_par={}, path=None):
|
49
|
+
|
50
|
+
if path is None:
|
51
|
+
path = join(vbi.__file__)
|
52
|
+
path = path.replace("__init__.py", "")
|
53
|
+
path = join(path, "models/pytorch/data")
|
54
|
+
self.input_path = path
|
55
|
+
self._par = self.get_default_params()
|
56
|
+
self.valid_parameters = list(self._par.keys())
|
57
|
+
self.check_parameters(par)
|
58
|
+
self._par.update(par)
|
59
|
+
|
60
|
+
for item in self._par.keys():
|
61
|
+
setattr(self, item, self._par[item])
|
62
|
+
|
63
|
+
BP = BoldParams(bold_par)
|
64
|
+
self.bp = BP._par
|
65
|
+
|
66
|
+
self.myelin_data, self.gradient_data = self.get_myelin_gradient()
|
67
|
+
self.nn = self.n_node = self.myelin_data.shape[0]
|
68
|
+
# dim = self.n_node * 3 + 1
|
69
|
+
|
70
|
+
def get_default_params(self):
|
71
|
+
|
72
|
+
engine = "cpu"
|
73
|
+
self.device = "cuda" if engine == "gpu" else "cpu"
|
74
|
+
|
75
|
+
data = np.load(join(self.input_path, "default_parameters.npz"))
|
76
|
+
self.weights = self.get_sc(device=self.device)
|
77
|
+
|
78
|
+
nn = self.weights.shape[0]
|
79
|
+
inp = data["input_para"][:, 0]
|
80
|
+
w = inp[:nn]
|
81
|
+
I0 = inp[nn : 2 * nn]
|
82
|
+
g_true = inp[2 * nn]
|
83
|
+
s = inp[2 * nn + 1 :]
|
84
|
+
|
85
|
+
return {
|
86
|
+
"G": g_true,
|
87
|
+
"J": 0.2609,
|
88
|
+
"w": w,
|
89
|
+
"s": s,
|
90
|
+
"I0": I0,
|
91
|
+
"a": 270.0,
|
92
|
+
"b": 108.0,
|
93
|
+
"d": 0.154,
|
94
|
+
"tau_s": 0.1,
|
95
|
+
"gamma_s": 0.641,
|
96
|
+
"t_end": 5.0 * 60.0,
|
97
|
+
"t_cut": 2.0 * 60.0,
|
98
|
+
"tr": 0.72,
|
99
|
+
"dt": 0.01,
|
100
|
+
"n_sim": 1,
|
101
|
+
"weights": self.weights,
|
102
|
+
"engine": engine,
|
103
|
+
"device": self.device,
|
104
|
+
"dtype": torch.float64,
|
105
|
+
}
|
106
|
+
|
107
|
+
def f_mfm(self, y_t):
|
108
|
+
|
109
|
+
x = (
|
110
|
+
self.J * self.w * y_t
|
111
|
+
+ self.J * self.G * torch.mm(self.weights, y_t)
|
112
|
+
+ self.I0
|
113
|
+
)
|
114
|
+
# Population firing rate
|
115
|
+
H = (self.a * x - self.b) / (1 - torch.exp(-self.d * (self.a * x - self.b)))
|
116
|
+
# Synaptic activity
|
117
|
+
dy = -1 / self.tau_s * y_t + self.gamma_s * (1 - y_t) * H
|
118
|
+
return dy
|
119
|
+
|
120
|
+
def f_rfMRI(self, y_t, F):
|
121
|
+
"""
|
122
|
+
This fucntion is to implement the hemodynamic model
|
123
|
+
|
124
|
+
parameters
|
125
|
+
----------
|
126
|
+
y_t: torch.Tensor
|
127
|
+
N*M matrix represents synaptic gating variable
|
128
|
+
N is the number of ROI
|
129
|
+
M is the number of candidate parameter sets
|
130
|
+
F: torch.Tensor
|
131
|
+
Hemodynamic activity variables
|
132
|
+
|
133
|
+
Returns
|
134
|
+
-------
|
135
|
+
dF: torch.Tensor
|
136
|
+
Derivatives of hemodynamic activity variables
|
137
|
+
"""
|
138
|
+
beta = self.bp["beta"]
|
139
|
+
gamma = self.bp["gamma"]
|
140
|
+
tau = self.bp["tau"]
|
141
|
+
alpha = self.bp["alpha"]
|
142
|
+
p_constant = self.bp["p_constant"]
|
143
|
+
n_nodes = y_t.shape[0]
|
144
|
+
n_set = y_t.shape[1]
|
145
|
+
|
146
|
+
# Calculate derivatives
|
147
|
+
if self.engine == "gpu":
|
148
|
+
dF = torch.zeros((n_nodes, n_set, 4), dtype=self.dtype).cuda()
|
149
|
+
else:
|
150
|
+
dF = torch.zeros((n_nodes, n_set, 4), dtype=self.dtype)
|
151
|
+
|
152
|
+
dF[:, :, 0] = y_t - beta * F[:, :, 0] - gamma * (F[:, :, 1] - 1)
|
153
|
+
dF[:, :, 1] = F[:, :, 0]
|
154
|
+
dF[:, :, 2] = 1 / tau * (F[:, :, 1] - F[:, :, 2] ** (1 / alpha))
|
155
|
+
dF[:, :, 3] = (
|
156
|
+
1
|
157
|
+
/ tau
|
158
|
+
* (
|
159
|
+
F[:, :, 1] / p_constant * (1 - (1 - p_constant) ** (1 / F[:, :, 1]))
|
160
|
+
- F[:, :, 3] / F[:, :, 2] * F[:, :, 2] ** (1 / alpha)
|
161
|
+
)
|
162
|
+
)
|
163
|
+
return dF
|
164
|
+
|
165
|
+
def run(self):
|
166
|
+
"""
|
167
|
+
Function used to generate the simulated BOLD signal using mean field model
|
168
|
+
and hemodynamic model
|
169
|
+
Each parameter set is ussed to simulated multiple times to get stable
|
170
|
+
result
|
171
|
+
"""
|
172
|
+
|
173
|
+
nn = self.nn
|
174
|
+
ns = self.n_sim
|
175
|
+
dt = self.dt
|
176
|
+
n_dup = 1
|
177
|
+
dtype = self.dtype
|
178
|
+
engine = self.engine
|
179
|
+
dt_tensor = torch.tensor(self.dt, dtype=dtype)
|
180
|
+
|
181
|
+
self.prepare_input()
|
182
|
+
|
183
|
+
p_costant = self.bp["p_constant"]
|
184
|
+
v_0 = self.bp["v_0"]
|
185
|
+
k_1 = self.bp["k_1"]
|
186
|
+
k_2 = self.bp["k_2"]
|
187
|
+
k_3 = self.bp["k_3"]
|
188
|
+
|
189
|
+
if self.engine == "gpu":
|
190
|
+
k_p = torch.arange(0.0, self.t_end + dt, dt).cuda()
|
191
|
+
else:
|
192
|
+
k_p = torch.arange(0.0, self.t_end + dt, dt)
|
193
|
+
|
194
|
+
nt_samples = k_p.shape[0]
|
195
|
+
device = "cuda" if engine == "gpu" else "cpu"
|
196
|
+
y_t = torch.zeros((nn, ns), dtype=dtype, device=device)
|
197
|
+
d_y = torch.zeros((nn, ns), dtype=dtype, device=device)
|
198
|
+
f_mat = torch.ones((nn, ns, 4), dtype=dtype, device=device)
|
199
|
+
z_t = torch.zeros((nn, ns), dtype=dtype, device=device)
|
200
|
+
f_t = torch.ones((nn, ns), dtype=dtype, device=device)
|
201
|
+
v_t = torch.ones((nn, ns), dtype=dtype, device=device)
|
202
|
+
q_t = torch.ones((nn, ns), dtype=dtype, device=device)
|
203
|
+
|
204
|
+
f_mat[:, :, 0] = z_t
|
205
|
+
y_t[:, :] = 0.001
|
206
|
+
# Wiener process
|
207
|
+
w_coef = self.s / math.sqrt(0.001)
|
208
|
+
if w_coef.shape[0] == 1:
|
209
|
+
w_coef = w_coef.repeat(nn, 1)
|
210
|
+
|
211
|
+
d_w = torch.sqrt(dt_tensor) * torch.randn(
|
212
|
+
n_dup, nn, nt_samples + 1000, dtype=dtype, device=device
|
213
|
+
)
|
214
|
+
|
215
|
+
y_bold = torch.zeros(
|
216
|
+
(nn, ns, int(nt_samples / (self.tr / dt) + 1)),
|
217
|
+
dtype=torch.float32,
|
218
|
+
device=device,
|
219
|
+
)
|
220
|
+
|
221
|
+
# Warm up
|
222
|
+
for i in range(1000):
|
223
|
+
d_y = self.f_mfm(y_t)
|
224
|
+
noise_level = (
|
225
|
+
d_w[:, :, i].repeat(1, 1, ns).contiguous().view(-1, nn)
|
226
|
+
) # repeat the noise level for all simulations (ns) at one time step
|
227
|
+
y_t = y_t + d_y * dt + w_coef * torch.transpose(noise_level, 0, 1)
|
228
|
+
|
229
|
+
# Main body: calculation
|
230
|
+
count = 0
|
231
|
+
for i in tqdm(range(nt_samples)):
|
232
|
+
d_y = self.f_mfm(y_t)
|
233
|
+
noise_level = d_w[:, :, i + 1000].repeat(1, 1, ns).contiguous().view(-1, nn)
|
234
|
+
y_t = y_t + d_y * dt + w_coef * torch.transpose(noise_level, 0, 1)
|
235
|
+
d_f = self.f_rfMRI(y_t, f_mat)
|
236
|
+
f_mat = f_mat + d_f * dt
|
237
|
+
z_t, f_t, v_t, q_t = torch.chunk(f_mat, 4, dim=2)
|
238
|
+
y_bold_temp = (
|
239
|
+
100
|
240
|
+
/ p_costant
|
241
|
+
* v_0
|
242
|
+
* (k_1 * (1 - q_t) + k_2 * (1 - q_t / v_t) + k_3 * (1 - v_t))
|
243
|
+
)
|
244
|
+
|
245
|
+
y_bold[:, :, count] = y_bold_temp[:, :, 0]
|
246
|
+
count = count + ((i + 1) % (self.tr / dt) == 0) * 1
|
247
|
+
|
248
|
+
# Downsampling
|
249
|
+
cut_index = int(self.t_cut / self.tr)
|
250
|
+
|
251
|
+
t = k_p[cut_index + 1 : y_bold.shape[2]]
|
252
|
+
if t.is_cuda:
|
253
|
+
t = t.cpu().numpy()
|
254
|
+
|
255
|
+
if engine == "gpu":
|
256
|
+
y_bold_cpu = y_bold.cpu()
|
257
|
+
y_bold_cpu = y_bold_cpu[:, :, cut_index + 1 : y_bold.shape[2]]
|
258
|
+
del y_bold
|
259
|
+
torch.cuda.empty_cache()
|
260
|
+
gc.collect()
|
261
|
+
return {"t": t, "x": y_bold_cpu.numpy()}
|
262
|
+
|
263
|
+
return {
|
264
|
+
"t": t,
|
265
|
+
"x": y_bold[:, :, cut_index + 1 : y_bold.shape[2]].numpy(),
|
266
|
+
}
|
267
|
+
|
268
|
+
def get_sc(self, device: str = "cpu", dtype=torch.float64):
|
269
|
+
sc_mat_raw = csv_matrix_read(join(self.input_path, "input", "sc_train.csv"))
|
270
|
+
sc_mat = sc_mat_raw / sc_mat_raw.max() * 0.2
|
271
|
+
sc_mat = torch.from_numpy(sc_mat).type(dtype)
|
272
|
+
|
273
|
+
if device == "cuda":
|
274
|
+
sc_mat = sc_mat.cuda()
|
275
|
+
return sc_mat
|
276
|
+
|
277
|
+
def get_myelin_gradient(self):
|
278
|
+
myelin_data = csv_matrix_read(join(self.input_path, "input", "myelin.csv"))
|
279
|
+
myelin_data = myelin_data[:, 0]
|
280
|
+
gradient_data = csv_matrix_read(
|
281
|
+
join(self.input_path, "input", "rsfc_gradient.csv")
|
282
|
+
)
|
283
|
+
gradient_data = gradient_data[:, 0]
|
284
|
+
return myelin_data, gradient_data
|
285
|
+
|
286
|
+
def check_parameters(self, par: dict):
|
287
|
+
for key in par.keys():
|
288
|
+
if key not in self.valid_parameters:
|
289
|
+
raise ValueError(f"Invalid parameter {key:s} provided.")
|
290
|
+
|
291
|
+
def prepare_input(self):
|
292
|
+
|
293
|
+
if isinstance(self.weights, np.ndarray):
|
294
|
+
self.weights = torch.from_numpy(self.weights)
|
295
|
+
if self.weights.dtype != self.dtype:
|
296
|
+
self.weights = self.weights.type(self.dtype)
|
297
|
+
|
298
|
+
if (self.engine == "gpu") and (not is_cuda(self.weights)):
|
299
|
+
self.weights = self.weights.cuda()
|
300
|
+
|
301
|
+
self.w = to_vector_2d(
|
302
|
+
self.w, self.nn, self.n_sim, dtype=self.dtype, engine=self.engine
|
303
|
+
)
|
304
|
+
|
305
|
+
self.I0 = to_vector_2d(
|
306
|
+
self.I0, self.nn, self.n_sim, dtype=self.dtype, engine=self.engine
|
307
|
+
)
|
308
|
+
self.s = to_vector_2d(
|
309
|
+
self.s, self.nn, self.n_sim, dtype=self.dtype, engine=self.engine
|
310
|
+
)
|
311
|
+
self.G = to_vector(self.G, self.n_sim, dtype=self.dtype, engine=self.engine)
|
312
|
+
self.J = to_vector(self.J, self.n_sim, dtype=self.dtype, engine=self.engine)
|
313
|
+
|
314
|
+
|
315
|
+
def to_vector(x, ns, dtype=torch.float64, engine="cpu"):
|
316
|
+
"""
|
317
|
+
Converts the input `x` to a tensor of specified size and type.
|
318
|
+
|
319
|
+
Parameters
|
320
|
+
------------------
|
321
|
+
x : array-like or torch.Tensor
|
322
|
+
The input data to be converted to a tensor.
|
323
|
+
ns : int
|
324
|
+
The size to which the tensor should be repeated if `x` is a single element.
|
325
|
+
dtype : torch.dtype, optional
|
326
|
+
The desired data type of the tensor. Default is `torch.float64`.
|
327
|
+
engine : str, optional
|
328
|
+
The computation engine to use, either `"cpu"` or `"gpu"`. Default is `"cpu"`.
|
329
|
+
|
330
|
+
Returns
|
331
|
+
------------------
|
332
|
+
torch.Tensor
|
333
|
+
The converted tensor with the specified size and type.
|
334
|
+
|
335
|
+
Raises
|
336
|
+
------------------
|
337
|
+
AssertionError
|
338
|
+
If the size of `x` is not 1 and does not match `n`.
|
339
|
+
"""
|
340
|
+
|
341
|
+
if not isinstance(x, torch.Tensor):
|
342
|
+
x = torch.tensor(x, dtype=dtype)
|
343
|
+
if x.ndim == 0: # scalar
|
344
|
+
x = x.repeat(1, ns)
|
345
|
+
elif x.ndim == 1:
|
346
|
+
assert x.size(0) == ns, f"input size must be 1 or {ns}"
|
347
|
+
x = x.view(1, ns)
|
348
|
+
if engine == "gpu":
|
349
|
+
x = x.cuda()
|
350
|
+
return x
|
351
|
+
|
352
|
+
|
353
|
+
def to_vector_2d(x, nn, ns, dtype=torch.float64, engine="cpu"):
|
354
|
+
"""
|
355
|
+
Convert input `x` to a tensor of specified size and type.
|
356
|
+
|
357
|
+
Parameters
|
358
|
+
------------------
|
359
|
+
x : array-like or torch.Tensor
|
360
|
+
The input data to be converted to a tensor.
|
361
|
+
nn : int
|
362
|
+
The number of nodes.
|
363
|
+
ns : int
|
364
|
+
The number of simulations.
|
365
|
+
dtype : torch.dtype, optional
|
366
|
+
The desired data type of the tensor. Default is `torch.float64`.
|
367
|
+
engine : str, optional
|
368
|
+
The computation engine to use, either `"cpu"` or `"gpu"`. Default is `"cpu"`.
|
369
|
+
|
370
|
+
Returns
|
371
|
+
------------------
|
372
|
+
torch.Tensor
|
373
|
+
The converted tensor with the specified size and type.
|
374
|
+
"""
|
375
|
+
|
376
|
+
if not isinstance(x, torch.Tensor):
|
377
|
+
x = torch.tensor(x, dtype=dtype)
|
378
|
+
if x.ndim == 0: # scalar
|
379
|
+
x = x.repeat(nn, ns)
|
380
|
+
elif x.ndim == 1:
|
381
|
+
assert x.size(0) == nn, f"input size must be 1 or {nn}"
|
382
|
+
x = x.view(nn, 1).repeat(1, ns)
|
383
|
+
|
384
|
+
if engine == "gpu":
|
385
|
+
x = x.cuda()
|
386
|
+
return x
|
387
|
+
|
388
|
+
|
389
|
+
# =============================================================================
|
390
|
+
### Functions for heterogenous parameter estimation corresponding to :
|
391
|
+
### Kong, et.al., 2021. Nature communications, 12(1), p.6373.
|
392
|
+
|
393
|
+
|
394
|
+
def csv_matrix_read(filename):
|
395
|
+
"""
|
396
|
+
This function is used to read csv file into a numpy array
|
397
|
+
Args:
|
398
|
+
filename: input csv file
|
399
|
+
Returns:
|
400
|
+
out_array: output numpy array
|
401
|
+
"""
|
402
|
+
|
403
|
+
csv_file = open(filename, "r")
|
404
|
+
read_handle = csv.reader(csv_file)
|
405
|
+
out_list = []
|
406
|
+
R = 0
|
407
|
+
for row in read_handle:
|
408
|
+
out_list.append([])
|
409
|
+
for col in row:
|
410
|
+
out_list[R].append(float(col))
|
411
|
+
R = R + 1
|
412
|
+
out_array = np.array(out_list)
|
413
|
+
csv_file.close()
|
414
|
+
return out_array
|
415
|
+
|
416
|
+
|
417
|
+
def is_cuda(tensor: torch.Tensor):
|
418
|
+
if not isinstance(tensor, torch.Tensor):
|
419
|
+
print("not a tensor")
|
420
|
+
return False
|
421
|
+
return tensor.is_cuda
|
422
|
+
|
423
|
+
|
424
|
+
def get_ranges():
|
425
|
+
return {
|
426
|
+
"w": np.array([[0.3, 0.7], [-0.3, -0.04], [0.0, 0.06]]),
|
427
|
+
"i": np.array([[0.2, 0.32], [-0.006, 0.02], [-0.02, -0.003]]),
|
428
|
+
"g": np.array([1.0, 10.0]),
|
429
|
+
"s": np.array([[0.004, 0.006], [-0.001, 0.001], [0.0, 0.0008]]),
|
430
|
+
}
|
431
|
+
|
432
|
+
|
433
|
+
def get_prior_limits():
|
434
|
+
|
435
|
+
ranges = get_ranges()
|
436
|
+
prior_min = np.hstack(
|
437
|
+
[ranges["w"][:, 0], ranges["i"][:, 0], ranges["g"][0], ranges["s"][:, 0]]
|
438
|
+
)
|
439
|
+
prior_max = np.hstack(
|
440
|
+
[ranges["w"][:, 1], ranges["i"][:, 1], ranges["g"][1], ranges["s"][:, 1]]
|
441
|
+
)
|
442
|
+
|
443
|
+
return prior_min, prior_max
|
444
|
+
|
445
|
+
|
446
|
+
def sample_prior(My, Gr):
|
447
|
+
rand = np.random.uniform
|
448
|
+
ranges = get_ranges()
|
449
|
+
wrange = ranges["w"]
|
450
|
+
irange = ranges["i"]
|
451
|
+
grange = ranges["g"]
|
452
|
+
srange = ranges["s"]
|
453
|
+
|
454
|
+
f = lambda x, y, z: x * My + y * Gr + z
|
455
|
+
|
456
|
+
abc_w = [rand(*wrange[0]), rand(*wrange[1]), rand(*wrange[2])]
|
457
|
+
abc_i = [rand(*irange[0]), rand(*irange[1]), rand(*irange[2])]
|
458
|
+
abc_s = [rand(*srange[0]), rand(*srange[1]), rand(*srange[2])]
|
459
|
+
g = rand(*grange)
|
460
|
+
|
461
|
+
w = f(*abc_w)
|
462
|
+
i = f(*abc_i)
|
463
|
+
s = f(*abc_s)
|
464
|
+
|
465
|
+
return w, i, s, g, abc_w, abc_i, abc_s
|
466
|
+
|
467
|
+
|
468
|
+
def get_search_range(nn):
|
469
|
+
"""
|
470
|
+
Returns the search range for the given number of nodes.
|
471
|
+
"""
|
472
|
+
wrange = [0, 1]
|
473
|
+
irange = [0, 0.5]
|
474
|
+
grange = [1, 10]
|
475
|
+
srange = [0.0005, 0.01]
|
476
|
+
|
477
|
+
d = 3 * nn + 1
|
478
|
+
search_range = np.zeros((d, 2))
|
479
|
+
search_range[:nn, :] = wrange
|
480
|
+
search_range[nn : 2 * nn, :] = irange
|
481
|
+
search_range[2 * nn, :] = grange
|
482
|
+
search_range[2 * nn + 1 :, :] = srange
|
483
|
+
|
484
|
+
return search_range
|
485
|
+
|
486
|
+
|
487
|
+
def check_range(w, ii, g, s):
|
488
|
+
aw, bw, cw = w
|
489
|
+
ai, bi, ci = ii
|
490
|
+
asig, bsig, csig = s
|
491
|
+
|
492
|
+
wrange = np.array([[0.3, 0.7], [-0.3, -0.04], [0.0, 0.06]])
|
493
|
+
irange = np.array([[0.2, 0.32], [-0.006, 0.02], [-0.02, -0.003]])
|
494
|
+
srange = np.array([[0.004, 0.006], [-0.001, 0.0001], [0.0, 0.0008]])
|
495
|
+
grange = np.array([5, 7])
|
496
|
+
|
497
|
+
if aw < wrange[0, 0] or aw > wrange[0, 1]:
|
498
|
+
return False
|
499
|
+
if bw < wrange[1, 0] or bw > wrange[1, 1]:
|
500
|
+
return False
|
501
|
+
if cw < wrange[2, 0] or cw > wrange[2, 1]:
|
502
|
+
return False
|
503
|
+
if ai < irange[0, 0] or ai > irange[0, 1]:
|
504
|
+
return False
|
505
|
+
if bi < irange[1, 0] or bi > irange[1, 1]:
|
506
|
+
return False
|
507
|
+
if ci < irange[2, 0] or ci > irange[2, 1]:
|
508
|
+
return False
|
509
|
+
if asig < srange[0, 0] or asig > srange[0, 1]:
|
510
|
+
return False
|
511
|
+
if bsig < srange[1, 0] or bsig > srange[1, 1]:
|
512
|
+
return False
|
513
|
+
if csig < srange[2, 0] or csig > srange[2, 1]:
|
514
|
+
return False
|
515
|
+
if g < grange[0] or g > grange[1]:
|
516
|
+
return False
|
517
|
+
return True
|
518
|
+
|
519
|
+
|
520
|
+
def get_cmatrix(myelin_data, gradient_data):
|
521
|
+
|
522
|
+
nn = myelin_data.shape[0]
|
523
|
+
return np.vstack((myelin_data, gradient_data, np.ones(nn))).T
|
524
|
+
|
525
|
+
|
526
|
+
def get_invcc(cmatrix):
|
527
|
+
invcc = np.linalg.inv(cmatrix.T @ cmatrix) @ cmatrix.T
|
528
|
+
return invcc
|
529
|
+
|
530
|
+
|
531
|
+
def sample_prior(search_range, dim, cmatrix, invcc):
|
532
|
+
init_para = np.zeros(dim)
|
533
|
+
|
534
|
+
init_para = (
|
535
|
+
np.random.uniform(0, 1, dim) * (search_range[:, 1] - search_range[:, 0])
|
536
|
+
+ search_range[:, 0]
|
537
|
+
)
|
538
|
+
nn = cmatrix.shape[0]
|
539
|
+
w = invcc @ init_para[:nn]
|
540
|
+
i = invcc @ init_para[nn : 2 * nn]
|
541
|
+
g = init_para[2 * nn]
|
542
|
+
s = invcc @ init_para[2 * nn + 1 :]
|
543
|
+
return init_para, w, i, g, s
|
544
|
+
|
545
|
+
|
546
|
+
def get_init(myelin_data, gradient_data, highest_order, init_para):
|
547
|
+
"""
|
548
|
+
This function is implemented to calculate the initial parametrized
|
549
|
+
coefficients
|
550
|
+
"""
|
551
|
+
|
552
|
+
n_node = myelin_data.shape[0]
|
553
|
+
amatrix = np.zeros((n_node, highest_order + 1))
|
554
|
+
bmatrix = np.zeros((n_node, highest_order + 1))
|
555
|
+
for i in range(highest_order + 1):
|
556
|
+
amatrix[:, i] = myelin_data ** (i)
|
557
|
+
bmatrix[:, i] = gradient_data ** (i)
|
558
|
+
cmatrix = np.hstack((amatrix, bmatrix[:, 1 : highest_order + 1]))
|
559
|
+
para = np.linalg.inv(cmatrix.T @ cmatrix) @ cmatrix.T @ init_para
|
560
|
+
return para, cmatrix
|
561
|
+
|
562
|
+
|
563
|
+
def make_input_para(theta, cmatrix, dim):
|
564
|
+
input_para = np.zeros(dim)
|
565
|
+
nn = cmatrix.shape[0]
|
566
|
+
input_para[:nn] = cmatrix @ theta[:3]
|
567
|
+
input_para[nn : nn * 2] = cmatrix @ theta[3:6]
|
568
|
+
input_para[nn * 2] = theta[6]
|
569
|
+
input_para[nn * 2 + 1 :] = cmatrix @ theta[7:]
|
570
|
+
return input_para
|