pyNIBS 0.2024.8__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pyNIBS-0.2024.8.dist-info/LICENSE +623 -0
- pyNIBS-0.2024.8.dist-info/METADATA +723 -0
- pyNIBS-0.2024.8.dist-info/RECORD +107 -0
- pyNIBS-0.2024.8.dist-info/WHEEL +5 -0
- pyNIBS-0.2024.8.dist-info/top_level.txt +1 -0
- pynibs/__init__.py +34 -0
- pynibs/coil.py +1367 -0
- pynibs/congruence/__init__.py +15 -0
- pynibs/congruence/congruence.py +1108 -0
- pynibs/congruence/ext_metrics.py +257 -0
- pynibs/congruence/stimulation_threshold.py +318 -0
- pynibs/data/configuration_exp0.yaml +59 -0
- pynibs/data/configuration_linear_MEP.yaml +61 -0
- pynibs/data/configuration_linear_RT.yaml +61 -0
- pynibs/data/configuration_sigmoid4.yaml +68 -0
- pynibs/data/network mapping configuration/configuration guide.md +238 -0
- pynibs/data/network mapping configuration/configuration_TEMPLATE.yaml +42 -0
- pynibs/data/network mapping configuration/configuration_for_testing.yaml +43 -0
- pynibs/data/network mapping configuration/configuration_modelTMS.yaml +43 -0
- pynibs/data/network mapping configuration/configuration_reg_isi_05.yaml +43 -0
- pynibs/data/network mapping configuration/output_documentation.md +185 -0
- pynibs/data/network mapping configuration/recommendations_for_accuracy_threshold.md +77 -0
- pynibs/data/neuron/models/L23_PC_cADpyr_biphasic_v1.csv +1281 -0
- pynibs/data/neuron/models/L23_PC_cADpyr_monophasic_v1.csv +1281 -0
- pynibs/data/neuron/models/L4_LBC_biphasic_v1.csv +1281 -0
- pynibs/data/neuron/models/L4_LBC_monophasic_v1.csv +1281 -0
- pynibs/data/neuron/models/L4_NBC_biphasic_v1.csv +1281 -0
- pynibs/data/neuron/models/L4_NBC_monophasic_v1.csv +1281 -0
- pynibs/data/neuron/models/L4_SBC_biphasic_v1.csv +1281 -0
- pynibs/data/neuron/models/L4_SBC_monophasic_v1.csv +1281 -0
- pynibs/data/neuron/models/L5_TTPC2_cADpyr_biphasic_v1.csv +1281 -0
- pynibs/data/neuron/models/L5_TTPC2_cADpyr_monophasic_v1.csv +1281 -0
- pynibs/expio/Mep.py +1518 -0
- pynibs/expio/__init__.py +8 -0
- pynibs/expio/brainsight.py +979 -0
- pynibs/expio/brainvis.py +71 -0
- pynibs/expio/cobot.py +239 -0
- pynibs/expio/exp.py +1876 -0
- pynibs/expio/fit_funs.py +287 -0
- pynibs/expio/localite.py +1987 -0
- pynibs/expio/signal_ced.py +51 -0
- pynibs/expio/visor.py +624 -0
- pynibs/freesurfer.py +502 -0
- pynibs/hdf5_io/__init__.py +10 -0
- pynibs/hdf5_io/hdf5_io.py +1857 -0
- pynibs/hdf5_io/xdmf.py +1542 -0
- pynibs/mesh/__init__.py +3 -0
- pynibs/mesh/mesh_struct.py +1394 -0
- pynibs/mesh/transformations.py +866 -0
- pynibs/mesh/utils.py +1103 -0
- pynibs/models/_TMS.py +211 -0
- pynibs/models/__init__.py +0 -0
- pynibs/muap.py +392 -0
- pynibs/neuron/__init__.py +2 -0
- pynibs/neuron/neuron_regression.py +284 -0
- pynibs/neuron/util.py +58 -0
- pynibs/optimization/__init__.py +5 -0
- pynibs/optimization/multichannel.py +278 -0
- pynibs/optimization/opt_mep.py +152 -0
- pynibs/optimization/optimization.py +1445 -0
- pynibs/optimization/workhorses.py +698 -0
- pynibs/pckg/__init__.py +0 -0
- pynibs/pckg/biosig/biosig4c++-1.9.5.src_fixed.tar.gz +0 -0
- pynibs/pckg/libeep/__init__.py +0 -0
- pynibs/pckg/libeep/pyeep.so +0 -0
- pynibs/regression/__init__.py +11 -0
- pynibs/regression/dual_node_detection.py +2375 -0
- pynibs/regression/regression.py +2984 -0
- pynibs/regression/score_types.py +0 -0
- pynibs/roi/__init__.py +2 -0
- pynibs/roi/roi.py +895 -0
- pynibs/roi/roi_structs.py +1233 -0
- pynibs/subject.py +1009 -0
- pynibs/tensor_scaling.py +144 -0
- pynibs/tests/data/InstrumentMarker20200225163611937.xml +19 -0
- pynibs/tests/data/TriggerMarkers_Coil0_20200225163443682.xml +14 -0
- pynibs/tests/data/TriggerMarkers_Coil1_20200225170337572.xml +6373 -0
- pynibs/tests/data/Xdmf.dtd +89 -0
- pynibs/tests/data/brainsight_niiImage_nifticoord.txt +145 -0
- pynibs/tests/data/brainsight_niiImage_nifticoord_largefile.txt +1434 -0
- pynibs/tests/data/brainsight_niiImage_niifticoord_mixedtargets.txt +47 -0
- pynibs/tests/data/create_subject_testsub.py +332 -0
- pynibs/tests/data/data.hdf5 +0 -0
- pynibs/tests/data/geo.hdf5 +0 -0
- pynibs/tests/test_coil.py +474 -0
- pynibs/tests/test_elements2nodes.py +100 -0
- pynibs/tests/test_hdf5_io/test_xdmf.py +61 -0
- pynibs/tests/test_mesh_transformations.py +123 -0
- pynibs/tests/test_mesh_utils.py +143 -0
- pynibs/tests/test_nnav_imports.py +101 -0
- pynibs/tests/test_quality_measures.py +117 -0
- pynibs/tests/test_regressdata.py +289 -0
- pynibs/tests/test_roi.py +17 -0
- pynibs/tests/test_rotations.py +86 -0
- pynibs/tests/test_subject.py +71 -0
- pynibs/tests/test_util.py +24 -0
- pynibs/tms_pulse.py +34 -0
- pynibs/util/__init__.py +4 -0
- pynibs/util/dosing.py +233 -0
- pynibs/util/quality_measures.py +562 -0
- pynibs/util/rotations.py +340 -0
- pynibs/util/simnibs.py +763 -0
- pynibs/util/util.py +727 -0
- pynibs/visualization/__init__.py +2 -0
- pynibs/visualization/para.py +4372 -0
- pynibs/visualization/plot_2D.py +137 -0
- pynibs/visualization/render_3D.py +347 -0
|
@@ -0,0 +1,257 @@
|
|
|
1
|
+
import time
|
|
2
|
+
import warnings
|
|
3
|
+
import numpy as np
|
|
4
|
+
from sklearn.neighbors import KernelDensity
|
|
5
|
+
|
|
6
|
+
import pynibs
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
def rsd_inverse_workhorse(elm_idx_list, mep, e):
|
|
10
|
+
"""
|
|
11
|
+
Worker function for RSD inverse computation after Bungert et al. (2017) [1]_, call from
|
|
12
|
+
:py:class:`multiprocessing.Pool`.
|
|
13
|
+
Calculates the RSD inverse for ``e = (E_mag, E_norm and/or E_tan)`` for given zaps and elements.
|
|
14
|
+
The computations are parallelized in terms of element indices (``elm_idx_list``).
|
|
15
|
+
|
|
16
|
+
Parameters
|
|
17
|
+
----------
|
|
18
|
+
elm_idx_list : np.ndarray
|
|
19
|
+
(chunksize) List of element indices, the congruence factor is computed for
|
|
20
|
+
mep: list of :py:class:`~pynibs.expio.Mep`
|
|
21
|
+
(n_cond) List of fitted Mep object instances for all conditions.
|
|
22
|
+
e: list of list of np.ndarray of float
|
|
23
|
+
[n_cond][n_datasets][n_elm] Tuple of ``n_datasets`` of the electric field to compute the congruence factor for,
|
|
24
|
+
e.g. ``(e_mag, e_norm, e_tan)``.
|
|
25
|
+
Each dataset is a list over all conditions containing the electric field component of interest
|
|
26
|
+
|
|
27
|
+
* ``len(e) = n_cond``
|
|
28
|
+
* ``len(e[0]) = n_comp`` (e.g: ``e_mag = e[0])``)
|
|
29
|
+
|
|
30
|
+
Returns
|
|
31
|
+
-------
|
|
32
|
+
rsd_inv : np.ndarray of float
|
|
33
|
+
(n_roi, n_datasets) RSD inverse in each element specified in ``elm_idx_list`` and for each input dataset.
|
|
34
|
+
|
|
35
|
+
Notes
|
|
36
|
+
-----
|
|
37
|
+
.. [1] Bungert, A., Antunes, A., Espenhahn, S., & Thielscher, A. (2016).
|
|
38
|
+
Where does TMS stimulate the motor cortex? Combining electrophysiological measurements and realistic field
|
|
39
|
+
estimates to reveal the affected cortex position. Cerebral Cortex, 27(11), 5083-5094.
|
|
40
|
+
"""
|
|
41
|
+
|
|
42
|
+
n_datasets = len(e[0])
|
|
43
|
+
n_elm = len(elm_idx_list)
|
|
44
|
+
n_conditions = len(mep)
|
|
45
|
+
|
|
46
|
+
rsd_inv = np.empty((n_elm, n_datasets))
|
|
47
|
+
mt_vec = np.empty((1, n_conditions))
|
|
48
|
+
|
|
49
|
+
for i_cond in range(n_conditions):
|
|
50
|
+
mt_vec[0, i_cond] = mep[i_cond].mt
|
|
51
|
+
|
|
52
|
+
e_arr = np.array(e)
|
|
53
|
+
|
|
54
|
+
for i_dataset in range(n_datasets):
|
|
55
|
+
e_mat = np.array(e_arr[:, i_dataset, np.array(elm_idx_list).astype(int)]).transpose()
|
|
56
|
+
std_vec = np.std(e_mat * mt_vec, axis=1)
|
|
57
|
+
mean_vec = np.mean(e_mat * mt_vec, axis=1)
|
|
58
|
+
rsd_inv[:, i_dataset] = 1 - (std_vec / mean_vec)
|
|
59
|
+
|
|
60
|
+
return rsd_inv
|
|
61
|
+
|
|
62
|
+
|
|
63
|
+
def dvs_likelihood(params, x, y, verbose=True, normalize=False, bounds=[(1, 2), (1, 2)]):
|
|
64
|
+
start = time.time()
|
|
65
|
+
|
|
66
|
+
# extract parameters
|
|
67
|
+
p = np.zeros(len(params) - 2)
|
|
68
|
+
|
|
69
|
+
for i, p_ in enumerate(params):
|
|
70
|
+
if i == 0:
|
|
71
|
+
sigma_x = p_
|
|
72
|
+
elif i == 1:
|
|
73
|
+
sigma_y = p_
|
|
74
|
+
else:
|
|
75
|
+
p[i - 2] = p_
|
|
76
|
+
|
|
77
|
+
# denormalize parameters from [0, 1] to bounds
|
|
78
|
+
if normalize:
|
|
79
|
+
sigma_x = sigma_x * (bounds[0][1] - bounds[0][0]) + bounds[0][0]
|
|
80
|
+
sigma_y = sigma_y * (bounds[1][1] - bounds[1][0]) + bounds[1][0]
|
|
81
|
+
|
|
82
|
+
for i, p_ in enumerate(p):
|
|
83
|
+
p[i] = p[i] * (bounds[i + 2][1] - bounds[i + 2][0]) + bounds[i + 2][0]
|
|
84
|
+
|
|
85
|
+
if sigma_x < 0:
|
|
86
|
+
sigma_x = 0
|
|
87
|
+
|
|
88
|
+
if sigma_y < 0:
|
|
89
|
+
sigma_y = 0
|
|
90
|
+
|
|
91
|
+
# determine posterior of DVS model with test data
|
|
92
|
+
x_pre = np.linspace(np.min(x), np.max(x), 200000)
|
|
93
|
+
x_post = x_pre + np.random.normal(loc=0., scale=sigma_x, size=len(x_pre))
|
|
94
|
+
y_post = pynibs.expio.fit_funs.sigmoid(x_post, p=p) + np.random.normal(loc=0., scale=sigma_y, size=len(x_pre))
|
|
95
|
+
|
|
96
|
+
# bin data
|
|
97
|
+
n_bins = 50
|
|
98
|
+
dx_bins = (np.max(x_pre) - np.min(x_pre)) / n_bins
|
|
99
|
+
x_bins_loc = np.linspace(np.min(x_pre) + dx_bins / 2, np.max(x_pre) - dx_bins / 2, n_bins)
|
|
100
|
+
|
|
101
|
+
# determine probabilities of observations
|
|
102
|
+
kde = KernelDensity(bandwidth=0.01, kernel='gaussian')
|
|
103
|
+
|
|
104
|
+
l = []
|
|
105
|
+
|
|
106
|
+
for i in range(n_bins):
|
|
107
|
+
mask = np.logical_and(x_pre >= (x_bins_loc[i] - dx_bins / 2), x_pre < (x_bins_loc[i] + dx_bins / 2))
|
|
108
|
+
mask_data = np.logical_and(x >= (x_bins_loc[i] - dx_bins / 2), x < (x_bins_loc[i] + dx_bins / 2))
|
|
109
|
+
|
|
110
|
+
if np.sum(mask_data) == 0:
|
|
111
|
+
continue
|
|
112
|
+
|
|
113
|
+
# determine kernel density estimate
|
|
114
|
+
try:
|
|
115
|
+
kde_bins = kde.fit(y_post[mask][:, np.newaxis])
|
|
116
|
+
except ValueError:
|
|
117
|
+
warnings.warn("kde.fit(y_post[mask][:, np.newaxis]) yield NaN ... skipping bin")
|
|
118
|
+
continue
|
|
119
|
+
|
|
120
|
+
# get probability densities at data
|
|
121
|
+
kde_y_post_bins = np.exp(kde_bins.score_samples(y[mask_data][:, np.newaxis]))
|
|
122
|
+
|
|
123
|
+
l.append(kde_y_post_bins)
|
|
124
|
+
|
|
125
|
+
l = np.concatenate(l)
|
|
126
|
+
|
|
127
|
+
# mask out zero probabilities
|
|
128
|
+
l[l == 0] = 1e-100
|
|
129
|
+
|
|
130
|
+
# determine log likelihood
|
|
131
|
+
l = np.sum(np.log10(l))
|
|
132
|
+
|
|
133
|
+
stop = time.time()
|
|
134
|
+
|
|
135
|
+
if verbose:
|
|
136
|
+
parameter_str = [f"p[{i_p}]={p_:.5f}" for i_p, p_ in enumerate(p)]
|
|
137
|
+
print(f"Likelihood: {l:.1f} / sigma_x={sigma_x:.2f}, sigma_y={sigma_y:.2f} " +
|
|
138
|
+
", ".join(parameter_str) + f"({stop - start:.2f} sec)")
|
|
139
|
+
|
|
140
|
+
return -l
|
|
141
|
+
|
|
142
|
+
|
|
143
|
+
def e_focal_workhorse(elm_idx_list, e):
|
|
144
|
+
"""
|
|
145
|
+
Worker function to determine the site of stimulation after Aonuma et al. (2018) [1]_,
|
|
146
|
+
call from :py:class:`multiprocessing.Pool`.
|
|
147
|
+
Calculates the site of stimulation for ``e = (E_mag, E_norm and/or E_tan)`` for given zaps and elements by
|
|
148
|
+
multiplying the electric fields with each other.
|
|
149
|
+
The computations are parallelized in terms of element indices (``elm_idx_list``).
|
|
150
|
+
|
|
151
|
+
Parameters
|
|
152
|
+
----------
|
|
153
|
+
elm_idx_list : np.ndarray
|
|
154
|
+
(chunksize) List of element indices, the congruence factor is computed for
|
|
155
|
+
e: list of list of np.ndarray of float
|
|
156
|
+
[n_cond][n_datasets][n_elm] Tuple of ``n_datasets`` of the electric field to compute the congruence factor for,
|
|
157
|
+
e.g. ``(e_mag, e_norm, e_tan)``.
|
|
158
|
+
Each dataset is a list over all conditions containing the electric field component of interest
|
|
159
|
+
|
|
160
|
+
* ``len(e) = n_cond``
|
|
161
|
+
* ``len(e[0]) = n_comp`` (e.g: ``e_mag = e[0])``)
|
|
162
|
+
|
|
163
|
+
Returns
|
|
164
|
+
-------
|
|
165
|
+
e_focal : np.ndarray of float
|
|
166
|
+
(n_roi, n_datasets) Focal electric field in each element specified in ``elm_idx_list`` and for each input.
|
|
167
|
+
|
|
168
|
+
Notes
|
|
169
|
+
-----
|
|
170
|
+
.. [1] Aonuma, S., Gomez-Tames, J., Laakso, I., Hirata, A., Takakura, T., Tamura, M., & Muragaki, Y. (2018).
|
|
171
|
+
A high-resolution computational localization method for transcranial magnetic stimulation mapping.
|
|
172
|
+
NeuroImage, 172, 85-93.
|
|
173
|
+
"""
|
|
174
|
+
|
|
175
|
+
n_datasets = len(e[0])
|
|
176
|
+
n_elm = len(elm_idx_list)
|
|
177
|
+
n_conditions = len(e)
|
|
178
|
+
|
|
179
|
+
e_focal = np.ones((n_elm, n_datasets))
|
|
180
|
+
|
|
181
|
+
for i_dataset in range(n_datasets):
|
|
182
|
+
for i_cond in range(n_conditions):
|
|
183
|
+
e_focal[:, i_dataset] *= e[i_cond][i_dataset][elm_idx_list]
|
|
184
|
+
|
|
185
|
+
return e_focal
|
|
186
|
+
|
|
187
|
+
|
|
188
|
+
def e_cog_workhorse(elm_idx_list, mep, mep_params, e):
|
|
189
|
+
"""
|
|
190
|
+
Worker function for electric field center of gravity (e_cog) computation after Opitz et al. (2013) [1]_
|
|
191
|
+
- call from :py:class:`multiprocessing.Pool`. Calculates the e_cog for ``e = (E_mag, E_norm and/or E_tan)`` for given zaps
|
|
192
|
+
and elements. The electric field is weighted by the mean MEP amplitude (turning point of the sigmoid) and summed up.
|
|
193
|
+
The computations are parallelized in terms of element indices (``elm_idx_list``).
|
|
194
|
+
|
|
195
|
+
Parameters
|
|
196
|
+
----------
|
|
197
|
+
elm_idx_list : np.ndarray
|
|
198
|
+
(chunksize) List of element indices, the congruence factor is computed for.
|
|
199
|
+
mep : list of :py:class:`~pynibs.expio.Mep`
|
|
200
|
+
(n_cond) List of fitted Mep object instances for all conditions.
|
|
201
|
+
mep_params : np.ndarray of float
|
|
202
|
+
(n_mep_params_total) List of all mep parameters of curve fits used to calculate the MEP, accumulated into
|
|
203
|
+
one array.
|
|
204
|
+
|
|
205
|
+
* e.g. [``mep_#1_para_#1``, ``mep_#1_para_#2``, ``mep_#1_para_#3``, ``mep_#2_para_#1``,
|
|
206
|
+
``mep_#2_para_#1``, ...]
|
|
207
|
+
|
|
208
|
+
e : list of list of np.ndarray of float
|
|
209
|
+
[n_cond][n_datasets][n_elm] Tuple of n_datasets of the electric field to compute the congruence factor for,
|
|
210
|
+
e.g. ``(e_mag, e_norm, e_tan)``.
|
|
211
|
+
Each dataset is a list over all conditions containing the electric field component of interest
|
|
212
|
+
|
|
213
|
+
* e.g.: ``len(e) = n_cond``
|
|
214
|
+
* ``len(e[0]) = n_comp`` (e.g: ``e_mag = e[0])``)
|
|
215
|
+
|
|
216
|
+
Returns
|
|
217
|
+
-------
|
|
218
|
+
e_cog : np.ndarray of float
|
|
219
|
+
(n_roi, n_datasets) RSD inverse in each element specified in ``elm_idx_list`` and for each input dataset.
|
|
220
|
+
|
|
221
|
+
Notes
|
|
222
|
+
-----
|
|
223
|
+
.. [1] Opitz, A., Legon, W., Rowlands, A., Bickel, W. K., Paulus, W., & Tyler, W. J. (2013).
|
|
224
|
+
Physiological observations validate finite element models for estimating subject-specific electric field
|
|
225
|
+
distributions induced by transcranial magnetic stimulation of the human motor cortex. Neuroimage, 81, 253-264.
|
|
226
|
+
"""
|
|
227
|
+
|
|
228
|
+
n_datasets = len(e[0])
|
|
229
|
+
n_elm = len(elm_idx_list)
|
|
230
|
+
n_conditions = len(mep)
|
|
231
|
+
|
|
232
|
+
mep_params = np.array(mep_params).flatten()
|
|
233
|
+
mep_params_cond = []
|
|
234
|
+
start_idx = 0
|
|
235
|
+
e_cog = np.empty((n_elm, n_datasets))
|
|
236
|
+
mep_mean_vec = np.empty((1, n_conditions))
|
|
237
|
+
intensity_mep_mean_vec = np.empty((1, n_conditions))
|
|
238
|
+
|
|
239
|
+
# extract parameters
|
|
240
|
+
for i_cond in range(n_conditions):
|
|
241
|
+
mep_params_cond.append(mep_params[start_idx:(start_idx + mep[i_cond].popt.size)])
|
|
242
|
+
start_idx = start_idx + mep[i_cond].popt.size
|
|
243
|
+
|
|
244
|
+
# stimulator intensity in [A/us] for mean MEP amplitude, i.e. turning point of pynibs.sigmoid (1st para of
|
|
245
|
+
# sigmoid)
|
|
246
|
+
intensity_mep_mean_vec[0, i_cond] = mep[i_cond].popt[0]
|
|
247
|
+
|
|
248
|
+
# mean MEP amplitude (function value at 1st parameter of pynibs.sigmoid)
|
|
249
|
+
mep_mean_vec[0, i_cond] = mep[i_cond].eval(mep_params_cond[-1][0], mep_params_cond[-1])
|
|
250
|
+
|
|
251
|
+
e_arr = np.array(e)
|
|
252
|
+
|
|
253
|
+
for i_dataset in range(n_datasets):
|
|
254
|
+
e_mat = np.array(e_arr[:, i_dataset, np.array(elm_idx_list).astype(int)]).transpose()
|
|
255
|
+
e_cog[:, i_dataset] = np.sum(e_mat * (intensity_mep_mean_vec * mep_mean_vec), axis=1)
|
|
256
|
+
|
|
257
|
+
return e_cog
|
|
@@ -0,0 +1,318 @@
|
|
|
1
|
+
import numpy as np
|
|
2
|
+
|
|
3
|
+
import pynibs
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
def stimulation_threshold(elm_idx_list, mep, mep_params, n_samples, e, c_factor_percentile=95, mep_threshold=0.5,
|
|
7
|
+
c_factor=None, c_function=None, t_function=None):
|
|
8
|
+
"""
|
|
9
|
+
Computes the stimulation threshold in terms of the electric field in [V/m]. The threshold is defined as the
|
|
10
|
+
electric field value where the mep exceeds mep_threshold. The average value is taken over all mep curves in each
|
|
11
|
+
condition and over an area where the congruence factor exceeds ``c_factor_percentile``.
|
|
12
|
+
|
|
13
|
+
Parameters
|
|
14
|
+
----------
|
|
15
|
+
elm_idx_list : np.ndarray
|
|
16
|
+
(chunksize) List of element indices, the congruence factor is computed for.
|
|
17
|
+
mep : list of Mep object instances
|
|
18
|
+
(n_cond) List of fitted :py:class:`~pynibs.expio.Mep` object instances for all conditions.
|
|
19
|
+
mep_params : np.ndarray of float [n_mep_params_total]
|
|
20
|
+
List of all mep parameters of curve fits used to calculate the MEP (accumulated into one array)
|
|
21
|
+
|
|
22
|
+
* e.g. [``mep_#1_para_#1``, ``mep_#1_para_#2``, ``mep_#1_para_#3``, ``mep_#2_para_#1``, ``mep_#2_para_#1``, ...]
|
|
23
|
+
|
|
24
|
+
n_samples : int
|
|
25
|
+
Number of data points to generate discrete mep and e curves.
|
|
26
|
+
e : list of list of np.ndarray of float
|
|
27
|
+
[n_cond][n_datasets][n_elm] Tuple of ``n_datasets`` of the electric field to compute the congruence factor for,
|
|
28
|
+
e.g. ``(e_mag, e_norm, e_tan)``.
|
|
29
|
+
Each dataset is a list over all conditions containing the electric field component of interest
|
|
30
|
+
|
|
31
|
+
* e.g.: ``len(e) = n_cond``
|
|
32
|
+
* ``len(e[0]) = n_comp`` (e.g: ``e_mag = e[0])``)
|
|
33
|
+
c_factor_percentile : float
|
|
34
|
+
Percentile of the c_factor taken into account for the threshold evaluation. Only c_factors are considered
|
|
35
|
+
exceeding this.
|
|
36
|
+
mep_threshold : float
|
|
37
|
+
MEP value in [mV], which has to be exceeded for threshold definition.
|
|
38
|
+
c_factor : np.ndarray of float
|
|
39
|
+
(n_roi, n_datasets) Congruence factor in each element specified in elm_idx_list and for each input dataset.
|
|
40
|
+
c_function : function
|
|
41
|
+
Defines the function to use during c_gpc to calculate the congruence factor.
|
|
42
|
+
|
|
43
|
+
* congruence_factor_curveshift_workhorse: determines the average curve shift
|
|
44
|
+
* congruence_factor_curveshift_workhorse_stretch_correction: determines the average curve shift
|
|
45
|
+
* congruence_factor_curveshift_workhorse_stretch_correction_variance: determines the average curve shift
|
|
46
|
+
* congruence_factor_variance_workhorse: evaluates the variance of the shifting and stretching parameters
|
|
47
|
+
|
|
48
|
+
t_function : function
|
|
49
|
+
Defines the function to determine the stimulation_threshold.
|
|
50
|
+
|
|
51
|
+
* stimulation_threshold_mean_mep_threshold: uses mep_threshold to determine the corresponding e_threshold over
|
|
52
|
+
all conditions and takes the average values as the stimulation threshold
|
|
53
|
+
* stimulation_threshold_pynibs.sigmoid: Fits a new pynibs.sigmoid using all datapoints in the mep-vs-E space and
|
|
54
|
+
evaluates the threshold from the turning point or the intersection of the derivative in the crossing point
|
|
55
|
+
with the e-axis
|
|
56
|
+
|
|
57
|
+
Returns
|
|
58
|
+
-------
|
|
59
|
+
stim_threshold_avg: float
|
|
60
|
+
Average stimulation threshold in [V/m] where c_factor is greater than ``c_factor_percentile``.
|
|
61
|
+
"""
|
|
62
|
+
|
|
63
|
+
n_datasets = len(e[0])
|
|
64
|
+
n_conditions = len(mep)
|
|
65
|
+
mep_params = np.array(mep_params).flatten()
|
|
66
|
+
|
|
67
|
+
# rearrange mep parameters to individual conditions
|
|
68
|
+
mep_params_cond = []
|
|
69
|
+
start_idx = 0
|
|
70
|
+
|
|
71
|
+
for i_cond in range(n_conditions):
|
|
72
|
+
mep_params_cond.append(mep_params[start_idx:(start_idx + mep[i_cond].popt.size)])
|
|
73
|
+
start_idx = start_idx + mep[i_cond].popt.size
|
|
74
|
+
|
|
75
|
+
# calculate mep curves per condition
|
|
76
|
+
mep_curve = []
|
|
77
|
+
intensities = []
|
|
78
|
+
|
|
79
|
+
for i_cond in range(n_conditions):
|
|
80
|
+
intensities.append(np.linspace(mep[i_cond].x_limits[0], mep[i_cond].x_limits[1], n_samples))
|
|
81
|
+
mep_curve.append(mep[i_cond].eval(intensities[-1], mep_params_cond[i_cond]))
|
|
82
|
+
|
|
83
|
+
# determine congruence factor, if not provided
|
|
84
|
+
if not c_factor.any():
|
|
85
|
+
if c_function == pynibs.congruence.cf_curveshift_workhorse or \
|
|
86
|
+
c_function == pynibs.congruence.cf_curveshift_workhorse_stretch_correction or \
|
|
87
|
+
c_function == pynibs.congruence.cf_curveshift_workhorse_stretch_correction_variance:
|
|
88
|
+
c_factor = c_function(elm_idx_list,
|
|
89
|
+
mep=mep,
|
|
90
|
+
mep_params=mep_params,
|
|
91
|
+
n_samples=n_samples,
|
|
92
|
+
e=e)
|
|
93
|
+
|
|
94
|
+
elif c_function == pynibs.congruence.cf_variance_workhorse:
|
|
95
|
+
c_factor = c_function(elm_idx_list,
|
|
96
|
+
mep=mep,
|
|
97
|
+
mep_params=mep_params,
|
|
98
|
+
e=e)
|
|
99
|
+
|
|
100
|
+
# determine elements where the congruence factor exceeds c_factor_percentile
|
|
101
|
+
elm_idx = []
|
|
102
|
+
c_factor_percentile_value = []
|
|
103
|
+
|
|
104
|
+
for i_data in range(n_datasets):
|
|
105
|
+
c_factor_percentile_value.append(np.percentile(c_factor[np.logical_not(np.isnan(c_factor[:, i_data])), i_data],
|
|
106
|
+
c_factor_percentile))
|
|
107
|
+
elm_idx.append(np.where(c_factor[:, i_data] > c_factor_percentile_value[i_data])[0])
|
|
108
|
+
|
|
109
|
+
if t_function == mean_mep_threshold:
|
|
110
|
+
stim_threshold_avg, stim_threshold_std = \
|
|
111
|
+
mean_mep_threshold(elm_idx=elm_idx,
|
|
112
|
+
mep_curve=mep_curve,
|
|
113
|
+
intensities=intensities,
|
|
114
|
+
e=e,
|
|
115
|
+
mep_threshold=mep_threshold)
|
|
116
|
+
|
|
117
|
+
elif t_function == sigmoid_thresh:
|
|
118
|
+
stim_threshold_avg, stim_threshold_std = \
|
|
119
|
+
sigmoid_thresh(elm_idx=elm_idx,
|
|
120
|
+
mep_curve=mep_curve,
|
|
121
|
+
intensities=intensities,
|
|
122
|
+
e=e,
|
|
123
|
+
mep_threshold=mep_threshold)
|
|
124
|
+
|
|
125
|
+
elif t_function == intensity_thresh:
|
|
126
|
+
stim_threshold_avg = \
|
|
127
|
+
intensity_thresh(mep_curve=mep_curve,
|
|
128
|
+
intensities=intensities,
|
|
129
|
+
mep_threshold=mep_threshold)
|
|
130
|
+
stim_threshold_std = np.nan
|
|
131
|
+
|
|
132
|
+
else:
|
|
133
|
+
raise NotImplementedError('Provided t_function not implemented yet!')
|
|
134
|
+
|
|
135
|
+
return stim_threshold_avg, stim_threshold_std
|
|
136
|
+
|
|
137
|
+
|
|
138
|
+
def mean_mep_threshold(elm_idx, mep_curve, intensities, e, mep_threshold):
|
|
139
|
+
"""
|
|
140
|
+
Determines the stimulation threshold by calculating the average electric field over all conditions, where the
|
|
141
|
+
mep curves exceed the value of mep_threshold (in [mV]).
|
|
142
|
+
|
|
143
|
+
Parameters
|
|
144
|
+
----------
|
|
145
|
+
elm_idx : list of np.ndarray of int
|
|
146
|
+
[n_datasets](n_elements) Element indices where the congruence factor exceeds a certain percentile,
|
|
147
|
+
defined during the call of :py:meth:`stimulation_threshold`.
|
|
148
|
+
mep_curve : list of np.ndarray of float
|
|
149
|
+
[n_conditions](n_samples) MEP curve values for every condition.
|
|
150
|
+
intensities : list of np.ndarray of float
|
|
151
|
+
[n_conditions](n_samples) To the MEP values corresponding stimulator intensities in [A/us].
|
|
152
|
+
e : list of list of np.ndarray of float
|
|
153
|
+
[n_cond][n_datasets][n_elm] Tuple of n_datasets of the electric field to compute the congruence factor for,
|
|
154
|
+
e.g. ``(e_mag, e_norm, e_tan)``.
|
|
155
|
+
Each dataset is a list over all conditions containing the electric field component of interest
|
|
156
|
+
|
|
157
|
+
* e.g.: ``len(e) = n_cond``
|
|
158
|
+
* ``len(e[0]) = n_comp`` (e.g: ``e_mag = e[0])``)
|
|
159
|
+
|
|
160
|
+
mep_threshold : float
|
|
161
|
+
MEP value in [mV], which has to be exceeded for threshold definition.
|
|
162
|
+
|
|
163
|
+
Returns
|
|
164
|
+
-------
|
|
165
|
+
stim_threshold_avg : float
|
|
166
|
+
Average stimulation threshold in [V/m] where c_factor is greater than c_factor_percentile
|
|
167
|
+
"""
|
|
168
|
+
|
|
169
|
+
n_conditions = len(mep_curve)
|
|
170
|
+
n_datasets = len(e[0])
|
|
171
|
+
|
|
172
|
+
# determine electric field values exceeding mep_threshold in this elements
|
|
173
|
+
stim_threshold_cond = [np.zeros((elm_idx[i_data].size, n_conditions)) * np.nan for i_data in range(n_datasets)]
|
|
174
|
+
stim_threshold_avg = [np.nan for _ in range(n_datasets)]
|
|
175
|
+
stim_threshold_std = [np.nan for _ in range(n_datasets)]
|
|
176
|
+
|
|
177
|
+
for i_cond in range(n_conditions):
|
|
178
|
+
e_threshold_idx = np.where(mep_curve[i_cond] > mep_threshold)[0]
|
|
179
|
+
|
|
180
|
+
if e_threshold_idx.any():
|
|
181
|
+
for i_data in range(n_datasets):
|
|
182
|
+
stim_threshold_cond[i_data][:, i_cond] = e[i_cond][i_data][elm_idx[i_data]] * \
|
|
183
|
+
intensities[i_cond][e_threshold_idx[0]]
|
|
184
|
+
|
|
185
|
+
for i_data in range(n_datasets):
|
|
186
|
+
stim_threshold_avg[i_data] = np.mean(
|
|
187
|
+
stim_threshold_cond[i_data][np.logical_not(np.isnan(stim_threshold_cond[i_data]))])
|
|
188
|
+
stim_threshold_std[i_data] = np.std(
|
|
189
|
+
stim_threshold_cond[i_data][np.logical_not(np.isnan(stim_threshold_cond[i_data]))])
|
|
190
|
+
|
|
191
|
+
return stim_threshold_avg, stim_threshold_std
|
|
192
|
+
|
|
193
|
+
|
|
194
|
+
def sigmoid_thresh(elm_idx, mep_curve, intensities, e, mep_threshold):
|
|
195
|
+
"""
|
|
196
|
+
Determines the stimulation threshold by calculating an equivalent :py:class:`pynibs.expio.Mep.sigmoid`
|
|
197
|
+
over all conditions. The stimulation threshold is the electric field value where the mep curves exceed the value of
|
|
198
|
+
mep_threshold (in [mV]).
|
|
199
|
+
|
|
200
|
+
Parameters
|
|
201
|
+
----------
|
|
202
|
+
elm_idx : list of np.ndarray of int
|
|
203
|
+
[n_datasets](n_elements) Element indices where the congruence factor exceeds a certain percentile,
|
|
204
|
+
defined during the call of :py:meth:`stimulation_threshold`.
|
|
205
|
+
mep_curve : list of np.ndarray of float
|
|
206
|
+
[n_conditions](n_samples) MEP curve values for every condition.
|
|
207
|
+
intensities : list of np.ndarray of float
|
|
208
|
+
[n_conditions](n_samples) To the MEP values corresponding stimulator intensities in [A/us].
|
|
209
|
+
e : list of list of np.ndarray of float
|
|
210
|
+
[n_cond][n_datasets][n_elm] Tuple of n_datasets of the electric field to compute the congruence factor for,
|
|
211
|
+
e.g. ``(e_mag, e_norm, e_tan)``.
|
|
212
|
+
Each dataset is a list over all conditions containing the electric field component of interest
|
|
213
|
+
|
|
214
|
+
* e.g.: ``len(e) = n_cond``
|
|
215
|
+
* ``len(e[0]) = n_comp`` (e.g: ``e_mag = e[0])``)
|
|
216
|
+
|
|
217
|
+
mep_threshold : float
|
|
218
|
+
MEP value in [mV], which has to be exceeded for threshold definition
|
|
219
|
+
|
|
220
|
+
Returns
|
|
221
|
+
-------
|
|
222
|
+
stim_threshold_avg : float
|
|
223
|
+
Average stimulation threshold in [V/m] where c_factor is greater than c_factor_percentile
|
|
224
|
+
"""
|
|
225
|
+
|
|
226
|
+
n_conditions = len(mep_curve)
|
|
227
|
+
n_datasets = len(e[0])
|
|
228
|
+
stim_threshold_elm = [[] for _ in range(n_datasets)]
|
|
229
|
+
stim_threshold_avg = [[] for _ in range(n_datasets)]
|
|
230
|
+
stim_threshold_std = [[] for _ in range(n_datasets)]
|
|
231
|
+
|
|
232
|
+
# accumulate all data values in one array
|
|
233
|
+
mep_curve_all = np.hstack(mep_curve)
|
|
234
|
+
|
|
235
|
+
for i_data in range(n_datasets):
|
|
236
|
+
print(('Evaluating stimulation threshold for dataset {}/{}'.format(i_data + 1, n_datasets)))
|
|
237
|
+
n_elms = len(elm_idx[i_data])
|
|
238
|
+
stim_threshold_elm[i_data] = np.zeros(n_elms) * np.nan
|
|
239
|
+
|
|
240
|
+
for i_elm, elm in enumerate(elm_idx[i_data]):
|
|
241
|
+
print((' > Element {}/{}'.format(i_elm, n_elms)))
|
|
242
|
+
|
|
243
|
+
# accumulate all data values in one array
|
|
244
|
+
e_all = []
|
|
245
|
+
|
|
246
|
+
for i_cond in range(n_conditions):
|
|
247
|
+
e_all.append(e[i_cond][i_data][elm] * intensities[i_cond])
|
|
248
|
+
e_all = np.hstack(e_all)
|
|
249
|
+
|
|
250
|
+
# fit data to function
|
|
251
|
+
mep = pynibs.Mep(intensities=e_all, mep=mep_curve_all, intensity_min_threshold=0, mep_min_threshold=0)
|
|
252
|
+
mep.fit = mep.run_fit_multistart(pynibs.expio.fit_funs.sigmoid,
|
|
253
|
+
x=e_all,
|
|
254
|
+
y=mep_curve_all,
|
|
255
|
+
p0=[70, 0.6, 2],
|
|
256
|
+
constraints=None,
|
|
257
|
+
verbose=False,
|
|
258
|
+
n_multistart=20)
|
|
259
|
+
|
|
260
|
+
# read out optimal function parameters from best fit
|
|
261
|
+
try:
|
|
262
|
+
for p in ['x0', 'r', 'amp']:
|
|
263
|
+
mep.popt.append(mep.fit.best_values[p])
|
|
264
|
+
|
|
265
|
+
mep.popt = np.asarray(mep.popt)
|
|
266
|
+
mep.cvar = np.asarray(mep.fit.covar)
|
|
267
|
+
mep.pstd = np.sqrt(np.diag(mep.cvar))
|
|
268
|
+
mep.fun = pynibs.expio.fit_funs.sigmoid
|
|
269
|
+
|
|
270
|
+
# determine stimulation threshold
|
|
271
|
+
e_fit = np.linspace(np.min(e_all), np.max(e_all), 200)
|
|
272
|
+
mep_fit = mep.eval_opt(e_fit)
|
|
273
|
+
e_threshold_idx = np.where(mep_fit > mep_threshold)[0]
|
|
274
|
+
|
|
275
|
+
if e_threshold_idx.any():
|
|
276
|
+
stim_threshold_elm[i_data][i_elm] = e_fit[e_threshold_idx[0]]
|
|
277
|
+
|
|
278
|
+
except (AttributeError, ValueError):
|
|
279
|
+
print(' > Warning: pynibs.sigmoid in element could not be fitted!')
|
|
280
|
+
stim_threshold_elm[i_data][i_elm] = np.nan
|
|
281
|
+
|
|
282
|
+
# determine mean threshold over all elements
|
|
283
|
+
stim_threshold_avg[i_data] = np.mean(stim_threshold_elm[i_data]
|
|
284
|
+
[np.logical_not(np.isnan(stim_threshold_elm[i_data]))])
|
|
285
|
+
stim_threshold_std[i_data] = np.std(stim_threshold_elm[i_data]
|
|
286
|
+
[np.logical_not(np.isnan(stim_threshold_elm[i_data]))])
|
|
287
|
+
|
|
288
|
+
return stim_threshold_avg, stim_threshold_std
|
|
289
|
+
|
|
290
|
+
|
|
291
|
+
def intensity_thresh(mep_curve, intensities, mep_threshold):
|
|
292
|
+
"""
|
|
293
|
+
Determines the stimulation threshold of one particular condition (usually the most sensitive e.g. M1-45). The
|
|
294
|
+
stimulation threshold is the stimulator intensity value in [A/us] where the mep curves exceed the value of
|
|
295
|
+
mep_threshold (in [mV]).
|
|
296
|
+
|
|
297
|
+
Parameters
|
|
298
|
+
----------
|
|
299
|
+
mep_curve: list [1] of np.ndarray of float [n_samples]
|
|
300
|
+
MEP curve values for every conditions
|
|
301
|
+
intensities: list [1] of np.ndarray of float [n_samples]
|
|
302
|
+
To the MEP values corresponding stimulator intensities in [A/us]
|
|
303
|
+
mep_threshold: float
|
|
304
|
+
MEP value in [mV], which has to be exceeded for threshold definition
|
|
305
|
+
|
|
306
|
+
Returns
|
|
307
|
+
-------
|
|
308
|
+
stim_threshold_avg: float
|
|
309
|
+
Average stimulation threshold in [V/m] where c_factor is greater than c_factor_percentile
|
|
310
|
+
"""
|
|
311
|
+
|
|
312
|
+
stim_threshold = np.nan
|
|
313
|
+
i_threshold_idx = np.where(mep_curve[0] > mep_threshold)[0]
|
|
314
|
+
|
|
315
|
+
if i_threshold_idx.any():
|
|
316
|
+
stim_threshold = intensities[0][i_threshold_idx[0]]
|
|
317
|
+
|
|
318
|
+
return stim_threshold
|
|
@@ -0,0 +1,59 @@
|
|
|
1
|
+
########################################################################################################################
|
|
2
|
+
# This configuration file can be used to set values in pynibs.Element() that influence the regression. #
|
|
3
|
+
########################################################################################################################
|
|
4
|
+
#
|
|
5
|
+
#
|
|
6
|
+
############################################## General regression concept ##############################################
|
|
7
|
+
# While trying to find the best fit, many possible regression functions are calculated and the best one is picked.
|
|
8
|
+
# Usually, that is the one that explains the most noise, meaning that the data points have a minimal distance to the
|
|
9
|
+
# regression function. The function exp0 always looks like the following term, with varying coefficients x_0 and r:
|
|
10
|
+
## y = e^{r(x-x_0)}
|
|
11
|
+
## (r : Slope parameter (steepness))
|
|
12
|
+
## (x0 : Horizontal shift along the abscissa)
|
|
13
|
+
|
|
14
|
+
# The usual approach is to start with a function described by some initial values (init_vals) for x_0 and r and try out
|
|
15
|
+
# many other values within the value limits (limits), minimizing the distance between regression function and real data.
|
|
16
|
+
# During refitting, different initial vales (calculated within the random_vals_init_range) are used to see whether a
|
|
17
|
+
# better result can be achieved.
|
|
18
|
+
|
|
19
|
+
############################################### Picking suitable values ################################################
|
|
20
|
+
# Initial values for the regression coefficients
|
|
21
|
+
init_vals:
|
|
22
|
+
r: 0.1
|
|
23
|
+
x0: 10
|
|
24
|
+
# Strategy: Pick the values you may expect the result to have, or that are not far-fetched. Picking reasonable initial
|
|
25
|
+
# values can speed up the fitting procedure.
|
|
26
|
+
|
|
27
|
+
# Values that limit all possible regression coefficients:
|
|
28
|
+
limits:
|
|
29
|
+
r:
|
|
30
|
+
- 1.0e-12
|
|
31
|
+
- 100
|
|
32
|
+
x0:
|
|
33
|
+
- 0
|
|
34
|
+
- 1000
|
|
35
|
+
# Strategy: Rather wide range recommended, since a few points could have very extreme values and therefore be
|
|
36
|
+
# approximated by a function very different from the expected values. E.g. a multiple of the presented data range.
|
|
37
|
+
# (Note: This should not be used to factor out outliers, since an approximation will still be calculated, but with too
|
|
38
|
+
# narrow limits it will just be a very bad one.)
|
|
39
|
+
|
|
40
|
+
# Value range for the calculation of new initial values for refits:
|
|
41
|
+
random_vals_init_range:
|
|
42
|
+
r:
|
|
43
|
+
- 0
|
|
44
|
+
- 0.2
|
|
45
|
+
x0:
|
|
46
|
+
- 0
|
|
47
|
+
- 10
|
|
48
|
+
# Strategy: During refitting, new initial values are calculated by picking a random number between these lower and upper
|
|
49
|
+
# bounds, so the range should be a lot smaller than 'limits' and somewhat symmetrical around the 'init_vals'.
|
|
50
|
+
|
|
51
|
+
|
|
52
|
+
# Example usage:
|
|
53
|
+
##
|
|
54
|
+
## configfile = configuration_exp0.yaml
|
|
55
|
+
## with open(configfile, "r") as yamlfile:
|
|
56
|
+
## config = yaml.load(yamlfile, Loader=yaml.FullLoader)
|
|
57
|
+
##
|
|
58
|
+
## pynibs.regress_data(...,
|
|
59
|
+
## **configfile)
|