vbi 0.1.3__cp310-cp310-manylinux2014_x86_64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- vbi/__init__.py +37 -0
- vbi/_version.py +17 -0
- vbi/dataset/__init__.py +0 -0
- vbi/dataset/connectivity_84/centers.txt +84 -0
- vbi/dataset/connectivity_84/centres.txt +84 -0
- vbi/dataset/connectivity_84/cortical.txt +84 -0
- vbi/dataset/connectivity_84/tract_lengths.txt +84 -0
- vbi/dataset/connectivity_84/weights.txt +84 -0
- vbi/dataset/connectivity_88/Aud_88.txt +88 -0
- vbi/dataset/connectivity_88/Bold.npz +0 -0
- vbi/dataset/connectivity_88/Labels.txt +17 -0
- vbi/dataset/connectivity_88/Region_labels.txt +88 -0
- vbi/dataset/connectivity_88/tract_lengths.txt +88 -0
- vbi/dataset/connectivity_88/weights.txt +88 -0
- vbi/feature_extraction/__init__.py +1 -0
- vbi/feature_extraction/calc_features.py +293 -0
- vbi/feature_extraction/features.json +535 -0
- vbi/feature_extraction/features.py +2124 -0
- vbi/feature_extraction/features_settings.py +374 -0
- vbi/feature_extraction/features_utils.py +1357 -0
- vbi/feature_extraction/infodynamics.jar +0 -0
- vbi/feature_extraction/utility.py +507 -0
- vbi/inference.py +98 -0
- vbi/models/__init__.py +0 -0
- vbi/models/cpp/__init__.py +0 -0
- vbi/models/cpp/_src/__init__.py +0 -0
- vbi/models/cpp/_src/__pycache__/mpr_sde.cpython-310.pyc +0 -0
- vbi/models/cpp/_src/_do.cpython-310-x86_64-linux-gnu.so +0 -0
- vbi/models/cpp/_src/_jr_sdde.cpython-310-x86_64-linux-gnu.so +0 -0
- vbi/models/cpp/_src/_jr_sde.cpython-310-x86_64-linux-gnu.so +0 -0
- vbi/models/cpp/_src/_km_sde.cpython-310-x86_64-linux-gnu.so +0 -0
- vbi/models/cpp/_src/_mpr_sde.cpython-310-x86_64-linux-gnu.so +0 -0
- vbi/models/cpp/_src/_vep.cpython-310-x86_64-linux-gnu.so +0 -0
- vbi/models/cpp/_src/_wc_ode.cpython-310-x86_64-linux-gnu.so +0 -0
- vbi/models/cpp/_src/bold.hpp +303 -0
- vbi/models/cpp/_src/do.hpp +167 -0
- vbi/models/cpp/_src/do.i +17 -0
- vbi/models/cpp/_src/do.py +467 -0
- vbi/models/cpp/_src/do_wrap.cxx +12811 -0
- vbi/models/cpp/_src/jr_sdde.hpp +352 -0
- vbi/models/cpp/_src/jr_sdde.i +19 -0
- vbi/models/cpp/_src/jr_sdde.py +688 -0
- vbi/models/cpp/_src/jr_sdde_wrap.cxx +18718 -0
- vbi/models/cpp/_src/jr_sde.hpp +264 -0
- vbi/models/cpp/_src/jr_sde.i +17 -0
- vbi/models/cpp/_src/jr_sde.py +470 -0
- vbi/models/cpp/_src/jr_sde_wrap.cxx +13406 -0
- vbi/models/cpp/_src/km_sde.hpp +158 -0
- vbi/models/cpp/_src/km_sde.i +19 -0
- vbi/models/cpp/_src/km_sde.py +671 -0
- vbi/models/cpp/_src/km_sde_wrap.cxx +17367 -0
- vbi/models/cpp/_src/makefile +52 -0
- vbi/models/cpp/_src/mpr_sde.hpp +327 -0
- vbi/models/cpp/_src/mpr_sde.i +19 -0
- vbi/models/cpp/_src/mpr_sde.py +711 -0
- vbi/models/cpp/_src/mpr_sde_wrap.cxx +18618 -0
- vbi/models/cpp/_src/utility.hpp +307 -0
- vbi/models/cpp/_src/vep.hpp +171 -0
- vbi/models/cpp/_src/vep.i +16 -0
- vbi/models/cpp/_src/vep.py +464 -0
- vbi/models/cpp/_src/vep_wrap.cxx +12968 -0
- vbi/models/cpp/_src/wc_ode.hpp +294 -0
- vbi/models/cpp/_src/wc_ode.i +19 -0
- vbi/models/cpp/_src/wc_ode.py +686 -0
- vbi/models/cpp/_src/wc_ode_wrap.cxx +24263 -0
- vbi/models/cpp/damp_oscillator.py +143 -0
- vbi/models/cpp/jansen_rit.py +543 -0
- vbi/models/cpp/km.py +187 -0
- vbi/models/cpp/mpr.py +289 -0
- vbi/models/cpp/vep.py +150 -0
- vbi/models/cpp/wc.py +216 -0
- vbi/models/cupy/__init__.py +0 -0
- vbi/models/cupy/bold.py +111 -0
- vbi/models/cupy/ghb.py +284 -0
- vbi/models/cupy/jansen_rit.py +473 -0
- vbi/models/cupy/km.py +224 -0
- vbi/models/cupy/mpr.py +475 -0
- vbi/models/cupy/mpr_modified_bold.py +12 -0
- vbi/models/cupy/utils.py +184 -0
- vbi/models/numba/__init__.py +0 -0
- vbi/models/numba/_ww_EI.py +444 -0
- vbi/models/numba/damp_oscillator.py +162 -0
- vbi/models/numba/ghb.py +208 -0
- vbi/models/numba/mpr.py +383 -0
- vbi/models/pytorch/__init__.py +0 -0
- vbi/models/pytorch/data/default_parameters.npz +0 -0
- vbi/models/pytorch/data/input/ROI_sim.mat +0 -0
- vbi/models/pytorch/data/input/fc_test.csv +68 -0
- vbi/models/pytorch/data/input/fc_train.csv +68 -0
- vbi/models/pytorch/data/input/fc_vali.csv +68 -0
- vbi/models/pytorch/data/input/fcd_test.mat +0 -0
- vbi/models/pytorch/data/input/fcd_test_high_window.mat +0 -0
- vbi/models/pytorch/data/input/fcd_test_low_window.mat +0 -0
- vbi/models/pytorch/data/input/fcd_train.mat +0 -0
- vbi/models/pytorch/data/input/fcd_vali.mat +0 -0
- vbi/models/pytorch/data/input/myelin.csv +68 -0
- vbi/models/pytorch/data/input/rsfc_gradient.csv +68 -0
- vbi/models/pytorch/data/input/run_label_testset.mat +0 -0
- vbi/models/pytorch/data/input/sc_test.csv +68 -0
- vbi/models/pytorch/data/input/sc_train.csv +68 -0
- vbi/models/pytorch/data/input/sc_vali.csv +68 -0
- vbi/models/pytorch/data/obs_kong0.npz +0 -0
- vbi/models/pytorch/ww_sde_kong.py +570 -0
- vbi/models/tvbk/__init__.py +9 -0
- vbi/models/tvbk/tvbk_wrapper.py +166 -0
- vbi/models/tvbk/utils.py +72 -0
- vbi/papers/__init__.py +0 -0
- vbi/papers/pavlides_pcb_2015/pavlides.py +211 -0
- vbi/tests/__init__.py +0 -0
- vbi/tests/_test_mpr_nb.py +36 -0
- vbi/tests/test_features.py +355 -0
- vbi/tests/test_ghb_cupy.py +90 -0
- vbi/tests/test_mpr_cupy.py +49 -0
- vbi/tests/test_mpr_numba.py +84 -0
- vbi/tests/test_suite.py +19 -0
- vbi/utils.py +402 -0
- vbi-0.1.3.dist-info/METADATA +166 -0
- vbi-0.1.3.dist-info/RECORD +121 -0
- vbi-0.1.3.dist-info/WHEEL +5 -0
- vbi-0.1.3.dist-info/licenses/LICENSE +201 -0
- vbi-0.1.3.dist-info/top_level.txt +1 -0
vbi/utils.py
ADDED
@@ -0,0 +1,402 @@
|
|
1
|
+
import os
|
2
|
+
import time
|
3
|
+
import torch
|
4
|
+
import numpy as np
|
5
|
+
|
6
|
+
from rich import box
|
7
|
+
from rich.table import Table
|
8
|
+
from rich.console import Console
|
9
|
+
|
10
|
+
from os.path import join
|
11
|
+
from scipy.stats import gaussian_kde
|
12
|
+
from sbi.analysis.plot import _get_default_opts, _update, ensure_numpy
|
13
|
+
|
14
|
+
from torch import Tensor
|
15
|
+
from typing import Union
|
16
|
+
|
17
|
+
|
18
|
+
import re
|
19
|
+
try :
|
20
|
+
import nbformat
|
21
|
+
from nbconvert import PythonExporter
|
22
|
+
except:
|
23
|
+
pass
|
24
|
+
|
25
|
+
|
26
|
+
def timer(func):
|
27
|
+
"""
|
28
|
+
decorator to measure elapsed time
|
29
|
+
|
30
|
+
Parameters
|
31
|
+
-----------
|
32
|
+
func: function
|
33
|
+
function to be decorated
|
34
|
+
"""
|
35
|
+
|
36
|
+
def wrapper(*args, **kwargs):
|
37
|
+
start = time.time()
|
38
|
+
result = func(*args, **kwargs)
|
39
|
+
end = time.time()
|
40
|
+
display_time(end - start, message="{:s}".format(func.__name__))
|
41
|
+
return result
|
42
|
+
|
43
|
+
return wrapper
|
44
|
+
|
45
|
+
|
46
|
+
def display_time(time, message=""):
|
47
|
+
"""
|
48
|
+
display elapsed time in hours, minutes, seconds
|
49
|
+
|
50
|
+
Parameters
|
51
|
+
-----------
|
52
|
+
time: float
|
53
|
+
elaspsed time in seconds
|
54
|
+
"""
|
55
|
+
|
56
|
+
hour = int(time / 3600)
|
57
|
+
minute = (int(time % 3600)) // 60
|
58
|
+
second = time - (3600.0 * hour + 60.0 * minute)
|
59
|
+
print(
|
60
|
+
"{:s} Done in {:d} hours {:d} minutes {:09.6f} seconds".format(
|
61
|
+
message, hour, minute, second
|
62
|
+
)
|
63
|
+
)
|
64
|
+
|
65
|
+
|
66
|
+
class LoadSample(object):
|
67
|
+
def __init__(self, nn=84) -> None:
|
68
|
+
|
69
|
+
self.root_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
|
70
|
+
self.nn = nn
|
71
|
+
|
72
|
+
def get_weights(self, normalize=True):
|
73
|
+
nn = self.nn
|
74
|
+
SC_name = join(
|
75
|
+
self.root_dir, "vbi/dataset", f"connectivity_{nn}", "weights.txt"
|
76
|
+
)
|
77
|
+
SC = np.loadtxt(SC_name)
|
78
|
+
np.fill_diagonal(SC, 0.0)
|
79
|
+
if normalize:
|
80
|
+
SC /= SC.max()
|
81
|
+
SC[SC < 0] = 0.0
|
82
|
+
return SC
|
83
|
+
|
84
|
+
def get_lengths(self):
|
85
|
+
nn = self.nn
|
86
|
+
tract_lenghts_name = join(
|
87
|
+
self.root_dir, "vbi/dataset", f"connectivity_{nn}", "tract_lengths.txt"
|
88
|
+
)
|
89
|
+
tract_lengths = np.loadtxt(tract_lenghts_name)
|
90
|
+
return tract_lengths
|
91
|
+
|
92
|
+
def get_bold(self):
|
93
|
+
nn = self.nn
|
94
|
+
bold_name = join(
|
95
|
+
self.root_dir, "vbi", "dataset", f"connectivity_{nn}", "Bold.npz"
|
96
|
+
)
|
97
|
+
bold = np.load(bold_name)["Bold"]
|
98
|
+
return bold.T
|
99
|
+
|
100
|
+
|
101
|
+
def get_limits(samples, limits=None):
|
102
|
+
|
103
|
+
if type(samples) != list:
|
104
|
+
samples = ensure_numpy(samples)
|
105
|
+
samples = [samples]
|
106
|
+
else:
|
107
|
+
for i, sample_pack in enumerate(samples):
|
108
|
+
samples[i] = ensure_numpy(samples[i])
|
109
|
+
|
110
|
+
# Dimensionality of the problem.
|
111
|
+
dim = samples[0].shape[1]
|
112
|
+
|
113
|
+
if limits == [] or limits is None:
|
114
|
+
limits = []
|
115
|
+
for d in range(dim):
|
116
|
+
min = +np.inf
|
117
|
+
max = -np.inf
|
118
|
+
for sample in samples:
|
119
|
+
min_ = sample[:, d].min()
|
120
|
+
min = min_ if min_ < min else min
|
121
|
+
max_ = sample[:, d].max()
|
122
|
+
max = max_ if max_ > max else max
|
123
|
+
limits.append([min, max])
|
124
|
+
else:
|
125
|
+
if len(limits) == 1:
|
126
|
+
limits = [limits[0] for _ in range(dim)]
|
127
|
+
else:
|
128
|
+
limits = limits
|
129
|
+
limits = torch.as_tensor(limits)
|
130
|
+
|
131
|
+
return limits
|
132
|
+
|
133
|
+
|
134
|
+
def posterior_peaks(samples, return_dict=False, **kwargs):
|
135
|
+
|
136
|
+
opts = _get_default_opts()
|
137
|
+
opts = _update(opts, kwargs)
|
138
|
+
|
139
|
+
limits = get_limits(samples)
|
140
|
+
samples = samples.numpy()
|
141
|
+
n, dim = samples.shape
|
142
|
+
|
143
|
+
try:
|
144
|
+
labels = opts["labels"]
|
145
|
+
except:
|
146
|
+
labels = range(dim)
|
147
|
+
|
148
|
+
peaks = {}
|
149
|
+
if labels is None:
|
150
|
+
labels = range(dim)
|
151
|
+
for i in range(dim):
|
152
|
+
peaks[labels[i]] = 0
|
153
|
+
|
154
|
+
for row in range(dim):
|
155
|
+
density = gaussian_kde(samples[:, row], bw_method=opts["kde_diag"]["bw_method"])
|
156
|
+
xs = np.linspace(limits[row, 0], limits[row, 1], opts["kde_diag"]["bins"])
|
157
|
+
ys = density(xs)
|
158
|
+
|
159
|
+
# y, x = np.histogram(samples[:, row], bins=bins)
|
160
|
+
peaks[labels[row]] = xs[ys.argmax()]
|
161
|
+
|
162
|
+
if return_dict:
|
163
|
+
return peaks
|
164
|
+
else:
|
165
|
+
return list(peaks.values())
|
166
|
+
|
167
|
+
|
168
|
+
def p2j(modulePath):
|
169
|
+
"""convert python script to jupyter notebook"""
|
170
|
+
os.system(f"p2j -o {modulePath}")
|
171
|
+
|
172
|
+
|
173
|
+
def j2p(notebookPath, modulePath=None):
|
174
|
+
"""
|
175
|
+
convert a jupyter notebook to a python module
|
176
|
+
|
177
|
+
>>> j2p("sample.ipynb", "sample.py")
|
178
|
+
|
179
|
+
"""
|
180
|
+
|
181
|
+
with open(notebookPath) as fh:
|
182
|
+
nb = nbformat.reads(fh.read(), nbformat.NO_CONVERT)
|
183
|
+
|
184
|
+
exporter = PythonExporter()
|
185
|
+
source, meta = exporter.from_notebook_node(nb)
|
186
|
+
|
187
|
+
# remove lines start with `# In[` from source
|
188
|
+
source = re.sub(r"^# In\[[0-9 ]*\]:\n", "", source, flags=re.MULTILINE)
|
189
|
+
|
190
|
+
# replace more that 1 empty lines with 1 empty line
|
191
|
+
source = re.sub(r"\n{2,}", "\n\n", source)
|
192
|
+
|
193
|
+
if modulePath is None:
|
194
|
+
modulePath = notebookPath.replace(".ipynb", ".py")
|
195
|
+
|
196
|
+
with open(modulePath, "w+") as fh:
|
197
|
+
fh.writelines(source)
|
198
|
+
|
199
|
+
|
200
|
+
def posterior_shrinkage(
|
201
|
+
prior_samples: Union[Tensor, np.ndarray], post_samples: Union[Tensor, np.ndarray]
|
202
|
+
) -> Tensor:
|
203
|
+
"""
|
204
|
+
Calculate the posterior shrinkage, quantifying how much
|
205
|
+
the posterior distribution contracts from the initial
|
206
|
+
prior distribution.
|
207
|
+
References:
|
208
|
+
https://arxiv.org/abs/1803.08393
|
209
|
+
|
210
|
+
Parameters
|
211
|
+
----------
|
212
|
+
prior_samples : array_like or torch.Tensor [n_samples, n_params]
|
213
|
+
Samples from the prior distribution.
|
214
|
+
post_samples : array-like or torch.Tensor [n_samples, n_params]
|
215
|
+
Samples from the posterior distribution.
|
216
|
+
|
217
|
+
Returns
|
218
|
+
-------
|
219
|
+
shrinkage : torch.Tensor [n_params]
|
220
|
+
The posterior shrinkage.
|
221
|
+
"""
|
222
|
+
|
223
|
+
if len(prior_samples) == 0 or len(post_samples) == 0:
|
224
|
+
raise ValueError("Input samples are empty")
|
225
|
+
|
226
|
+
if not isinstance(prior_samples, torch.Tensor):
|
227
|
+
prior_samples = torch.tensor(prior_samples, dtype=torch.float32)
|
228
|
+
if not isinstance(post_samples, torch.Tensor):
|
229
|
+
post_samples = torch.tensor(post_samples, dtype=torch.float32)
|
230
|
+
|
231
|
+
if prior_samples.ndim == 1:
|
232
|
+
prior_samples = prior_samples[:, None]
|
233
|
+
if post_samples.ndim == 1:
|
234
|
+
post_samples = post_samples[:, None]
|
235
|
+
|
236
|
+
prior_std = torch.std(prior_samples, dim=0)
|
237
|
+
post_std = torch.std(post_samples, dim=0)
|
238
|
+
|
239
|
+
return 1 - (post_std / prior_std) ** 2
|
240
|
+
|
241
|
+
|
242
|
+
def posterior_zscore(
|
243
|
+
true_theta: Union[Tensor, np.array, float], post_samples: Union[Tensor, np.array]
|
244
|
+
):
|
245
|
+
"""
|
246
|
+
Calculate the posterior z-score, quantifying how much the posterior
|
247
|
+
distribution of a parameter encompasses its true value.
|
248
|
+
References:
|
249
|
+
https://arxiv.org/abs/1803.08393
|
250
|
+
|
251
|
+
Parameters
|
252
|
+
----------
|
253
|
+
true_theta : float, array-like or torch.Tensor [n_params]
|
254
|
+
The true value of the parameters.
|
255
|
+
post_samples : array-like or torch.Tensor [n_samples, n_params]
|
256
|
+
Samples from the posterior distributions.
|
257
|
+
|
258
|
+
Returns
|
259
|
+
-------
|
260
|
+
z : Tensor [n_params]
|
261
|
+
The z-score of the posterior distributions.
|
262
|
+
"""
|
263
|
+
|
264
|
+
if len(post_samples) == 0:
|
265
|
+
raise ValueError("Input samples are empty")
|
266
|
+
|
267
|
+
if not isinstance(true_theta, torch.Tensor):
|
268
|
+
true_theta = torch.tensor(true_theta, dtype=torch.float32)
|
269
|
+
if not isinstance(post_samples, torch.Tensor):
|
270
|
+
post_samples = torch.tensor(post_samples, dtype=torch.float32)
|
271
|
+
|
272
|
+
true_theta = np.atleast_1d(true_theta)
|
273
|
+
if post_samples.ndim == 1:
|
274
|
+
post_samples = post_samples[:, None]
|
275
|
+
|
276
|
+
post_mean = torch.mean(post_samples, dim=0)
|
277
|
+
post_std = torch.std(post_samples, dim=0)
|
278
|
+
|
279
|
+
return torch.abs((post_mean - true_theta) / post_std)
|
280
|
+
|
281
|
+
|
282
|
+
def set_diag(A: np.ndarray, k: int = 0, value: float = 0.0):
|
283
|
+
"""
|
284
|
+
set k diagonals of the given matrix to given value.
|
285
|
+
|
286
|
+
Parameters
|
287
|
+
----------
|
288
|
+
A: np.ndarray
|
289
|
+
matrix
|
290
|
+
k: int
|
291
|
+
number of diagonals
|
292
|
+
value: float
|
293
|
+
value to be set
|
294
|
+
|
295
|
+
Returns
|
296
|
+
-------
|
297
|
+
A: np.ndarray
|
298
|
+
matrix with k diagonals set to value
|
299
|
+
|
300
|
+
"""
|
301
|
+
|
302
|
+
assert len(A.shape) == 2
|
303
|
+
n = A.shape[0]
|
304
|
+
assert k < n
|
305
|
+
for i in range(-k, k + 1):
|
306
|
+
a1 = np.diag(np.random.randint(1, 2, n - abs(i)), i)
|
307
|
+
idx = np.where(a1)
|
308
|
+
A[idx] = value
|
309
|
+
return A
|
310
|
+
|
311
|
+
|
312
|
+
def test_imports():
|
313
|
+
"""Check required dependencies, print versions, and warn if unavailable."""
|
314
|
+
console = Console()
|
315
|
+
table = Table(title="Dependency Check", box=box.SIMPLE_HEAVY)
|
316
|
+
table.add_column("Package", style="bold cyan")
|
317
|
+
table.add_column("Version", style="bold green")
|
318
|
+
table.add_column("Status", style="bold yellow")
|
319
|
+
|
320
|
+
dependencies = [
|
321
|
+
("vbi", "vbi"),
|
322
|
+
("numpy", "numpy"),
|
323
|
+
("scipy", "scipy"),
|
324
|
+
("matplotlib", "matplotlib"),
|
325
|
+
("sbi", "sbi"),
|
326
|
+
("torch", "torch"),
|
327
|
+
("cupy", "cupy")
|
328
|
+
]
|
329
|
+
|
330
|
+
for name, module in dependencies:
|
331
|
+
try:
|
332
|
+
pkg = __import__(module)
|
333
|
+
version = pkg.__version__
|
334
|
+
status = "✅ Available"
|
335
|
+
except ImportError:
|
336
|
+
version = "-"
|
337
|
+
status = "❌ Not Found"
|
338
|
+
|
339
|
+
table.add_row(name, version, status)
|
340
|
+
|
341
|
+
console.print(table)
|
342
|
+
|
343
|
+
# Additional GPU checks
|
344
|
+
try:
|
345
|
+
import torch
|
346
|
+
console.print(f"[bold blue]Torch GPU available:[/bold blue] {torch.cuda.is_available()}")
|
347
|
+
console.print(f"[bold blue]Torch device count:[/bold blue] {torch.cuda.device_count()}")
|
348
|
+
console.print(f"[bold blue]Torch CUDA version:[/bold blue] {torch.version.cuda}") # Display CUDA version used by PyTorch
|
349
|
+
except ImportError:
|
350
|
+
pass
|
351
|
+
|
352
|
+
try:
|
353
|
+
import cupy
|
354
|
+
console.print(f"[bold blue]CuPy GPU available:[/bold blue] {cupy.cuda.is_available()}")
|
355
|
+
console.print(f"[bold blue]CuPy device count:[/bold blue] {cupy.cuda.runtime.getDeviceCount()}")
|
356
|
+
info = get_cuda_info()
|
357
|
+
if isinstance(info, dict):
|
358
|
+
print(f"CUDA Version: {info['cuda_version']}")
|
359
|
+
print(f"Device Name: {info['device_name']}")
|
360
|
+
print(f"Total Memory: {info['total_memory']:.2f} GB")
|
361
|
+
print(f"Compute Capability: {info['compute_capability']}")
|
362
|
+
|
363
|
+
except ImportError:
|
364
|
+
pass
|
365
|
+
|
366
|
+
|
367
|
+
|
368
|
+
def get_cuda_info():
|
369
|
+
"""
|
370
|
+
Get CUDA version and device information using CuPy.
|
371
|
+
|
372
|
+
Returns:
|
373
|
+
dict: Dictionary containing CUDA version and device information
|
374
|
+
"""
|
375
|
+
import cupy as cp
|
376
|
+
|
377
|
+
try:
|
378
|
+
# Get CUDA version
|
379
|
+
cuda_version = cp.cuda.runtime.runtimeGetVersion()
|
380
|
+
major = cuda_version // 1000
|
381
|
+
minor = (cuda_version % 1000) // 10
|
382
|
+
|
383
|
+
# Get device info
|
384
|
+
device = cp.cuda.runtime.getDeviceProperties(0)
|
385
|
+
|
386
|
+
return {
|
387
|
+
'cuda_version': f"{major}.{minor}",
|
388
|
+
'device_name': device['name'].decode(),
|
389
|
+
'total_memory': device['totalGlobalMem'] / (1024**3), # Convert to GB
|
390
|
+
'compute_capability': f"{device['major']}.{device['minor']}"
|
391
|
+
}
|
392
|
+
except ImportError:
|
393
|
+
return "CuPy is not installed"
|
394
|
+
except Exception as e:
|
395
|
+
return f"Error getting CUDA information: {str(e)}"
|
396
|
+
|
397
|
+
|
398
|
+
|
399
|
+
# def tests():
|
400
|
+
# from vbi.tests.test_suite import tests
|
401
|
+
# tests()
|
402
|
+
|
@@ -0,0 +1,166 @@
|
|
1
|
+
Metadata-Version: 2.4
|
2
|
+
Name: vbi
|
3
|
+
Version: 0.1.3
|
4
|
+
Summary: Virtual brain inference.
|
5
|
+
Author-email: Abolfazl Ziaeemehr <a.ziaeemehr@gmail.com>, Meysam Hashemi <meysam.hashemi@gmail.com>, Marmaduke Woodman <marmaduke.woodman@gmail.com>
|
6
|
+
License: MIT
|
7
|
+
Project-URL: homepage, https://ziaeemehr.github.io/vbi_paper/
|
8
|
+
Project-URL: repository, https://github.com/Ziaeemehr/vbi_paper
|
9
|
+
Classifier: Programming Language :: Python :: 3
|
10
|
+
Classifier: Topic :: Scientific/Engineering :: Information Analysis
|
11
|
+
Classifier: License :: OSI Approved :: Apache Software License
|
12
|
+
Classifier: Operating System :: OS Independent
|
13
|
+
Requires-Python: >=3.8
|
14
|
+
Description-Content-Type: text/markdown
|
15
|
+
License-File: LICENSE
|
16
|
+
Requires-Dist: wheel
|
17
|
+
Requires-Dist: numpy
|
18
|
+
Requires-Dist: scipy
|
19
|
+
Requires-Dist: numba
|
20
|
+
Requires-Dist: h5py
|
21
|
+
Requires-Dist: pandas
|
22
|
+
Requires-Dist: networkx
|
23
|
+
Requires-Dist: nbconvert
|
24
|
+
Requires-Dist: matplotlib
|
25
|
+
Requires-Dist: setuptools-scm
|
26
|
+
Requires-Dist: tqdm
|
27
|
+
Requires-Dist: sbi
|
28
|
+
Requires-Dist: torch
|
29
|
+
Requires-Dist: parameterized
|
30
|
+
Requires-Dist: scikit-learn
|
31
|
+
Requires-Dist: pycatch22
|
32
|
+
Requires-Dist: pytest
|
33
|
+
Requires-Dist: swig
|
34
|
+
Requires-Dist: rich
|
35
|
+
Provides-Extra: dev
|
36
|
+
Provides-Extra: docs
|
37
|
+
Requires-Dist: sphinx; extra == "docs"
|
38
|
+
Requires-Dist: numpydoc; extra == "docs"
|
39
|
+
Requires-Dist: nbformat; extra == "docs"
|
40
|
+
Requires-Dist: nbsphinx; extra == "docs"
|
41
|
+
Requires-Dist: cloud_sptheme; extra == "docs"
|
42
|
+
Requires-Dist: sphinx_bootstrap_theme; extra == "docs"
|
43
|
+
Provides-Extra: cu
|
44
|
+
Requires-Dist: cupy; extra == "cu"
|
45
|
+
Provides-Extra: all
|
46
|
+
Requires-Dist: pytest; extra == "all"
|
47
|
+
Requires-Dist: sphinx; extra == "all"
|
48
|
+
Requires-Dist: numpydoc; extra == "all"
|
49
|
+
Requires-Dist: nbformat; extra == "all"
|
50
|
+
Requires-Dist: nbsphinx; extra == "all"
|
51
|
+
Requires-Dist: cloud_sptheme; extra == "all"
|
52
|
+
Requires-Dist: sphinx_bootstrap_theme; extra == "all"
|
53
|
+
Requires-Dist: cupy; extra == "all"
|
54
|
+
Dynamic: license-file
|
55
|
+
|
56
|
+
[](https://github.com/ins-amu/vbi/actions/workflows/tests.yml)
|
57
|
+
[](https://vbi.readthedocs.io/latest/)
|
58
|
+
[](https://doi.org/10.5281/zenodo.14795543)
|
59
|
+
[](https://github.com/ins-amu/vbi/actions/workflows/docker-image.yml)
|
60
|
+
<!-- [](https://mybinder.org/v2/gh/ins-amu/vbi/main?labpath=docs/examples/intro.ipynb) -->
|
61
|
+
|
62
|
+
|
63
|
+
# VBI: Virtual Brain Inference
|
64
|
+
---
|
65
|
+
[Getting Started](https://github.com/ins-amu/vbi/tree/main/docs/examples) |
|
66
|
+
[Documentation](https://vbi.readthedocs.io/latest/) |
|
67
|
+
|
68
|
+
<p align="center">
|
69
|
+
<img src="https://github.com/Ziaeemehr/vbi_paper/blob/main/vbi_log.png" width="250">
|
70
|
+
</p>
|
71
|
+
|
72
|
+
|
73
|
+
## installation
|
74
|
+
|
75
|
+
```bash
|
76
|
+
conda env create --name vbi python=3.10
|
77
|
+
conda activate vbi
|
78
|
+
git clone https://github.com/ins-amu/vbi.git
|
79
|
+
cd vbi
|
80
|
+
pip install .
|
81
|
+
|
82
|
+
# pip install -e .[all,dev,docs]
|
83
|
+
```
|
84
|
+
|
85
|
+
## Using Docker
|
86
|
+
|
87
|
+
To use the Docker image, you can pull it from the GitHub Container Registry and run it as follows:
|
88
|
+
|
89
|
+
```bash
|
90
|
+
# Get it without building anything locally
|
91
|
+
# without GPU
|
92
|
+
docker run --rm -it -p 8888:8888 ghcr.io/ins-amu/vbi:main
|
93
|
+
|
94
|
+
# with GPU
|
95
|
+
docker run --gpus all --rm -it -p 8888:8888 ghcr.io/ins-amu/vbi:main
|
96
|
+
|
97
|
+
|
98
|
+
# or build it locally:
|
99
|
+
docker build -t vbi-project . # build
|
100
|
+
docker run --gpus all -it -p 8888:8888 vbi-project # use with gpu
|
101
|
+
|
102
|
+
```
|
103
|
+
|
104
|
+
- Quick check :
|
105
|
+
|
106
|
+
```python
|
107
|
+
|
108
|
+
import vbi
|
109
|
+
vbi.tests()
|
110
|
+
vbi.test_imports()
|
111
|
+
|
112
|
+
|
113
|
+
# Dependency Check
|
114
|
+
#
|
115
|
+
# Package Version Status
|
116
|
+
#━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
|
117
|
+
# vbi v0.1.3 ✅ Available
|
118
|
+
# numpy 1.24.4 ✅ Available
|
119
|
+
# scipy 1.10.1 ✅ Available
|
120
|
+
# matplotlib 3.7.5 ✅ Available
|
121
|
+
# sbi 0.22.0 ✅ Available
|
122
|
+
# torch 2.4.1+cu121 ✅ Available
|
123
|
+
# cupy 12.3.0 ✅ Available
|
124
|
+
#
|
125
|
+
# Torch GPU available: True
|
126
|
+
# Torch device count: 1
|
127
|
+
# Torch CUDA version: 12.1
|
128
|
+
# CuPy GPU available: True
|
129
|
+
# CuPy device count: 1
|
130
|
+
# CUDA Version: 11.8
|
131
|
+
# Device Name: NVIDIA RTX A5000
|
132
|
+
# Total Memory: 23.68 GB
|
133
|
+
# Compute Capability: 8.6
|
134
|
+
|
135
|
+
```
|
136
|
+
|
137
|
+
|
138
|
+
## Feedback and Contributions
|
139
|
+
|
140
|
+
We welcome contributions to the VBI project! If you have suggestions, bug reports, or feature requests, please open an issue on our [GitHub repository](https://github.com/ins-amu/vbi/issues). To contribute code, fork the repository, create a new branch for your feature or bugfix, and submit a pull request. Make sure to follow our coding standards and include tests for your changes. For detailed guidelines, please refer to our [CONTRIBUTING.md](https://github.com/ins-amu/vbi/blob/main/CONTRIBUTING.md) file. Thank you for helping us improve VBI!
|
141
|
+
|
142
|
+
|
143
|
+
## Citation
|
144
|
+
|
145
|
+
```bibtex
|
146
|
+
@article{VBI,
|
147
|
+
author = {Ziaeemehr, Abolfazl and Woodman, Marmaduke and Domide, Lia and Petkoski, Spase and Jirsa, Viktor and Hashemi, Meysam},
|
148
|
+
title = {Virtual Brain Inference (VBI): A flexible and integrative toolkit for efficient probabilistic inference on virtual brain models},
|
149
|
+
journal = {bioRxiv},
|
150
|
+
year = {2025},
|
151
|
+
doi = {10.1101/2025.01.21.633922},
|
152
|
+
url = {https://doi.org/10.1101/2025.01.21.633922},
|
153
|
+
abstract = {Network neuroscience has proven essential for understanding the principles and mechanisms underlying complex brain (dys)function and cognition. In this context, whole-brain network modeling--also known as virtual brain modeling--combines computational models of brain dynamics (placed at each network node) with individual brain imaging data (to coordinate and connect the nodes), advancing our understanding of the complex dynamics of the brain and its neurobiological underpinnings. However, there remains a critical need for automated model inversion tools to estimate control (bifurcation) parameters at large scales and across neuroimaging modalities, given their varying spatio-temporal resolutions. This study aims to address this gap by introducing a flexible and integrative toolkit for efficient Bayesian inference on virtual brain models, called Virtual Brain Inference (VBI). This open-source toolkit provides fast simulations, taxonomy of feature extraction, efficient data storage and loading, and probabilistic machine learning algorithms, enabling biophysically interpretable inference from non-invasive and invasive recordings. Through in-silico testing, we demonstrate the accuracy and reliability of inference for commonly used whole-brain network models and their associated neuroimaging data. VBI shows potential to improve hypothesis evaluation in network neuroscience through uncertainty quantification, and contribute to advances in precision medicine by enhancing the predictive power of virtual brain models.}
|
154
|
+
}
|
155
|
+
```
|
156
|
+
|
157
|
+
This research has received funding from:
|
158
|
+
|
159
|
+
- EU's Horizon 2020 Framework Programme for Research and Innovation under the Specific Grant Agreements:
|
160
|
+
- No. 101147319 (EBRAINS 2.0 Project)
|
161
|
+
- No. 101137289 (Virtual Brain Twin Project)
|
162
|
+
- No. 101057429 (project environMENTAL)
|
163
|
+
- Government grant managed by the Agence Nationale de la Recherche:
|
164
|
+
- Reference ANR-22-PESN-0012 (France 2030 program)
|
165
|
+
|
166
|
+
The funders had no role in study design, data collection and analysis, decision to publish, or preparation of the manuscript.
|