pyNIBS 0.2024.8__py3-none-any.whl → 0.2026.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pynibs/__init__.py +26 -14
- pynibs/coil/__init__.py +6 -0
- pynibs/{coil.py → coil/coil.py} +213 -543
- pynibs/coil/export.py +508 -0
- pynibs/congruence/__init__.py +4 -1
- pynibs/congruence/congruence.py +37 -45
- pynibs/congruence/ext_metrics.py +40 -11
- pynibs/congruence/stimulation_threshold.py +1 -2
- pynibs/expio/Mep.py +120 -370
- pynibs/expio/__init__.py +10 -0
- pynibs/expio/brainsight.py +34 -37
- pynibs/expio/cobot.py +25 -25
- pynibs/expio/exp.py +10 -7
- pynibs/expio/fit_funs.py +3 -0
- pynibs/expio/invesalius.py +70 -0
- pynibs/expio/localite.py +190 -91
- pynibs/expio/neurone.py +139 -0
- pynibs/expio/signal_ced.py +345 -2
- pynibs/expio/visor.py +16 -15
- pynibs/freesurfer.py +34 -33
- pynibs/hdf5_io/hdf5_io.py +149 -132
- pynibs/hdf5_io/xdmf.py +35 -31
- pynibs/mesh/__init__.py +1 -1
- pynibs/mesh/mesh_struct.py +77 -92
- pynibs/mesh/transformations.py +121 -21
- pynibs/mesh/utils.py +191 -99
- pynibs/models/_TMS.py +2 -1
- pynibs/muap.py +1 -2
- pynibs/neuron/__init__.py +10 -0
- pynibs/neuron/models/mep.py +566 -0
- pynibs/neuron/neuron_regression.py +98 -8
- pynibs/optimization/__init__.py +12 -2
- pynibs/optimization/{optimization.py → coil_opt.py} +157 -133
- pynibs/optimization/multichannel.py +1174 -24
- pynibs/optimization/workhorses.py +7 -8
- pynibs/regression/__init__.py +4 -2
- pynibs/regression/dual_node_detection.py +229 -219
- pynibs/regression/regression.py +92 -61
- pynibs/roi/__init__.py +4 -1
- pynibs/roi/roi_structs.py +19 -21
- pynibs/roi/{roi.py → roi_utils.py} +56 -33
- pynibs/subject.py +24 -14
- pynibs/util/__init__.py +20 -4
- pynibs/util/dosing.py +4 -5
- pynibs/util/quality_measures.py +39 -38
- pynibs/util/rotations.py +116 -9
- pynibs/util/{simnibs.py → simnibs_io.py} +29 -19
- pynibs/util/{util.py → utils.py} +20 -22
- pynibs/visualization/para.py +4 -4
- pynibs/visualization/render_3D.py +4 -4
- pynibs-0.2026.1.dist-info/METADATA +105 -0
- pynibs-0.2026.1.dist-info/RECORD +69 -0
- {pyNIBS-0.2024.8.dist-info → pynibs-0.2026.1.dist-info}/WHEEL +1 -1
- pyNIBS-0.2024.8.dist-info/METADATA +0 -723
- pyNIBS-0.2024.8.dist-info/RECORD +0 -107
- pynibs/data/configuration_exp0.yaml +0 -59
- pynibs/data/configuration_linear_MEP.yaml +0 -61
- pynibs/data/configuration_linear_RT.yaml +0 -61
- pynibs/data/configuration_sigmoid4.yaml +0 -68
- pynibs/data/network mapping configuration/configuration guide.md +0 -238
- pynibs/data/network mapping configuration/configuration_TEMPLATE.yaml +0 -42
- pynibs/data/network mapping configuration/configuration_for_testing.yaml +0 -43
- pynibs/data/network mapping configuration/configuration_modelTMS.yaml +0 -43
- pynibs/data/network mapping configuration/configuration_reg_isi_05.yaml +0 -43
- pynibs/data/network mapping configuration/output_documentation.md +0 -185
- pynibs/data/network mapping configuration/recommendations_for_accuracy_threshold.md +0 -77
- pynibs/data/neuron/models/L23_PC_cADpyr_biphasic_v1.csv +0 -1281
- pynibs/data/neuron/models/L23_PC_cADpyr_monophasic_v1.csv +0 -1281
- pynibs/data/neuron/models/L4_LBC_biphasic_v1.csv +0 -1281
- pynibs/data/neuron/models/L4_LBC_monophasic_v1.csv +0 -1281
- pynibs/data/neuron/models/L4_NBC_biphasic_v1.csv +0 -1281
- pynibs/data/neuron/models/L4_NBC_monophasic_v1.csv +0 -1281
- pynibs/data/neuron/models/L4_SBC_biphasic_v1.csv +0 -1281
- pynibs/data/neuron/models/L4_SBC_monophasic_v1.csv +0 -1281
- pynibs/data/neuron/models/L5_TTPC2_cADpyr_biphasic_v1.csv +0 -1281
- pynibs/data/neuron/models/L5_TTPC2_cADpyr_monophasic_v1.csv +0 -1281
- pynibs/tests/data/InstrumentMarker20200225163611937.xml +0 -19
- pynibs/tests/data/TriggerMarkers_Coil0_20200225163443682.xml +0 -14
- pynibs/tests/data/TriggerMarkers_Coil1_20200225170337572.xml +0 -6373
- pynibs/tests/data/Xdmf.dtd +0 -89
- pynibs/tests/data/brainsight_niiImage_nifticoord.txt +0 -145
- pynibs/tests/data/brainsight_niiImage_nifticoord_largefile.txt +0 -1434
- pynibs/tests/data/brainsight_niiImage_niifticoord_mixedtargets.txt +0 -47
- pynibs/tests/data/create_subject_testsub.py +0 -332
- pynibs/tests/data/data.hdf5 +0 -0
- pynibs/tests/data/geo.hdf5 +0 -0
- pynibs/tests/test_coil.py +0 -474
- pynibs/tests/test_elements2nodes.py +0 -100
- pynibs/tests/test_hdf5_io/test_xdmf.py +0 -61
- pynibs/tests/test_mesh_transformations.py +0 -123
- pynibs/tests/test_mesh_utils.py +0 -143
- pynibs/tests/test_nnav_imports.py +0 -101
- pynibs/tests/test_quality_measures.py +0 -117
- pynibs/tests/test_regressdata.py +0 -289
- pynibs/tests/test_roi.py +0 -17
- pynibs/tests/test_rotations.py +0 -86
- pynibs/tests/test_subject.py +0 -71
- pynibs/tests/test_util.py +0 -24
- /pynibs/{regression/score_types.py → neuron/models/m1_montbrio.py} +0 -0
- {pyNIBS-0.2024.8.dist-info → pynibs-0.2026.1.dist-info/licenses}/LICENSE +0 -0
- {pyNIBS-0.2024.8.dist-info → pynibs-0.2026.1.dist-info}/top_level.txt +0 -0
|
@@ -1,11 +1,27 @@
|
|
|
1
1
|
"""
|
|
2
|
-
Functions to optimize single
|
|
2
|
+
Functions to optimize single channel currents for multichannel TMS arrays.
|
|
3
3
|
"""
|
|
4
|
+
import h5py
|
|
5
|
+
import math
|
|
6
|
+
import tqdm
|
|
7
|
+
import scipy
|
|
8
|
+
import warnings
|
|
9
|
+
import itertools
|
|
4
10
|
import numpy as np
|
|
5
|
-
|
|
11
|
+
import pandas as pd
|
|
12
|
+
from functools import partial
|
|
13
|
+
from scipy.optimize import minimize, basinhopping
|
|
14
|
+
from tqdm.contrib.concurrent import process_map # or thread_map
|
|
15
|
+
try:
|
|
16
|
+
import terminalplot
|
|
17
|
+
except ImportError:
|
|
18
|
+
pass
|
|
6
19
|
|
|
7
20
|
|
|
8
|
-
def get_score_raw(x, e, n_stim, n_ele, n_channel,
|
|
21
|
+
def get_score_raw(x, e, n_stim, n_ele, n_channel,
|
|
22
|
+
x_opt=None, opt_target='elms', fun=None, fun_args=None, res_scale=.001,
|
|
23
|
+
metric='mean',
|
|
24
|
+
verbose=False):
|
|
9
25
|
"""
|
|
10
26
|
Compute score for e-efield cross correlations.
|
|
11
27
|
Non-normalized score is returned, so you need to do sth like
|
|
@@ -28,6 +44,14 @@ def get_score_raw(x, e, n_stim, n_ele, n_channel, x_opt=None, opt_target='elms')
|
|
|
28
44
|
(n_pre_opt,) Previously optimized channel currents.
|
|
29
45
|
opt_target : str, default: 'elms'
|
|
30
46
|
Optimization target. 'elms' for optimizing decorrelations of elements, 'stims' for stimulations.
|
|
47
|
+
fun : function, optional
|
|
48
|
+
Function to scale the e-field.
|
|
49
|
+
fun_args : tuple, optional
|
|
50
|
+
Arguments for the function.
|
|
51
|
+
res_scale : float, default: 1
|
|
52
|
+
Scaling factor for the result.
|
|
53
|
+
verbose : book, default: False
|
|
54
|
+
Print out progress.
|
|
31
55
|
|
|
32
56
|
Returns
|
|
33
57
|
-------
|
|
@@ -48,7 +72,6 @@ def get_score_raw(x, e, n_stim, n_ele, n_channel, x_opt=None, opt_target='elms')
|
|
|
48
72
|
# # e_.shape = (n_ele=3,)
|
|
49
73
|
# # np.reshape(e_, (n_ele, 3)).shape = (n_ele,3)
|
|
50
74
|
# # a.shape = (n_ele,)
|
|
51
|
-
|
|
52
75
|
if e.ndim == 2:
|
|
53
76
|
e_mag = np.vstack([np.linalg.norm(np.reshape(e_, (n_ele, 3)), axis=1) for e_ in e_vec.T]).T
|
|
54
77
|
elif e.ndim == 3:
|
|
@@ -57,17 +80,115 @@ def get_score_raw(x, e, n_stim, n_ele, n_channel, x_opt=None, opt_target='elms')
|
|
|
57
80
|
raise ValueError
|
|
58
81
|
# e_mag.shape = (n_ele, n_zaps)
|
|
59
82
|
|
|
83
|
+
# scale e_mag if necessary
|
|
84
|
+
if fun is not None:
|
|
85
|
+
if fun_args is None:
|
|
86
|
+
fun_args = {}
|
|
87
|
+
e_mag = fun(e_mag, **fun_args)
|
|
88
|
+
|
|
60
89
|
# determine average correlation coefficient
|
|
61
90
|
# r_avg = 1/((n_ele**2-n_ele)/2) * np.sum(np.abs(np.triu(np.corrcoef(e_mag), k=1)))
|
|
91
|
+
|
|
62
92
|
if opt_target == 'elms':
|
|
63
|
-
r_sum = np.nansum(np.abs(np.triu(np.corrcoef(e_mag), k=1)))
|
|
93
|
+
# r_sum = np.nansum(np.abs(np.triu(np.corrcoef(e_mag), k=1)))
|
|
94
|
+
# r_sum = np.sum(np.abs(np.triu(np.corrcoef(e_mag), k=1)))
|
|
95
|
+
# a = np.corrcoef(e_mag)
|
|
96
|
+
# np.mean(a)
|
|
97
|
+
# np.mean(np.triu(a, k=0))
|
|
98
|
+
# c = np.tril(a, k=-1)
|
|
99
|
+
# np.tril(a, k=-1).sum() == np.triu(a, k=1).sum()
|
|
100
|
+
# np.mean(np.tril(a, k=0))
|
|
101
|
+
if metric == "mean":
|
|
102
|
+
r_sum = np.sum(np.triu(np.corrcoef(e_mag), k=1))
|
|
103
|
+
elif metric =="max":
|
|
104
|
+
r_sum = np.max(np.triu(np.abs(np.corrcoef(e_mag)), k=1))
|
|
105
|
+
else:
|
|
106
|
+
raise ValueError
|
|
107
|
+
|
|
108
|
+
if np.isnan(r_sum):
|
|
109
|
+
r_sum = n_ele**2
|
|
64
110
|
elif opt_target == 'stims':
|
|
65
|
-
r_sum = np.nansum(np.abs(np.triu(np.corrcoef(e_mag.T), k=1)))
|
|
111
|
+
# r_sum = np.nansum(np.abs(np.triu(np.corrcoef(e_mag.T), k=1)))
|
|
112
|
+
# r_sum = np.sum(np.abs(np.triu(np.corrcoef(e_mag.T), k=1)))
|
|
113
|
+
if metric == "mean":
|
|
114
|
+
r_sum = np.sum(np.triu(np.corrcoef(e_mag.T), k=1))
|
|
115
|
+
elif metric == "max":
|
|
116
|
+
r_sum = np.max(np.triu(np.abs(np.corrcoef(e_mag.T)), k=1))
|
|
117
|
+
else:
|
|
118
|
+
raise ValueError
|
|
119
|
+
if np.isnan(r_sum):
|
|
120
|
+
r_sum = n_stim**2
|
|
66
121
|
else:
|
|
67
122
|
raise ValueError("opt_target has to be 'elms' or 'stims'")
|
|
68
123
|
|
|
69
|
-
|
|
124
|
+
if verbose and np.random.randint(0,1000) > 998:
|
|
125
|
+
bins=200
|
|
126
|
+
y_hist, x_hist = np.histogram(e_mag, bins=bins)
|
|
127
|
+
# y_hist, x_hist = np.histogram(x, bins=bins)
|
|
128
|
+
terminalplot.plot(list(x_hist), list(y_hist), rows=50, columns=80)
|
|
129
|
+
# currents_all_zaps_channels = np.reshape(x, (n_channel, n_stim))
|
|
130
|
+
# didt_constr_res = (np.sum(currents_all_zaps_channels ** 2, axis=0) < 2*0.8**2).sum() - n_stim
|
|
131
|
+
didt_constr_res = strain_constr(x, 0.64, 5, 20)
|
|
132
|
+
# print(f"")
|
|
133
|
+
print(f"didt_constr: {didt_constr_res}, score: {r_sum / res_scale}")
|
|
134
|
+
return r_sum / res_scale
|
|
135
|
+
|
|
70
136
|
|
|
137
|
+
def strain_constr(x, max_strain, n_channel, n_stim):
|
|
138
|
+
"""
|
|
139
|
+
Compute mechanical strain on 5 channel planar mTMS coil
|
|
140
|
+
"""
|
|
141
|
+
# Ensure weights is a numpy array and take the absolute value
|
|
142
|
+
currents = np.reshape(x, (n_stim, n_channel)).T
|
|
143
|
+
pulse_strain = calculate_strain(currents)
|
|
144
|
+
|
|
145
|
+
return -(pulse_strain > max_strain).sum()
|
|
146
|
+
|
|
147
|
+
|
|
148
|
+
def strain_constr_min(x, max_strain, n_channel, n_stim):
|
|
149
|
+
"""
|
|
150
|
+
Compute mechanical strain on 5 channel planar mTMS coil
|
|
151
|
+
"""
|
|
152
|
+
# Ensure weights is a numpy array and take the absolute value
|
|
153
|
+
currents = np.reshape(x, (n_stim, n_channel)).T
|
|
154
|
+
pulse_strain = calculate_strain(currents)
|
|
155
|
+
|
|
156
|
+
return -(pulse_strain < max_strain).sum()
|
|
157
|
+
|
|
158
|
+
|
|
159
|
+
def calculate_strain(currents):
|
|
160
|
+
"""
|
|
161
|
+
Computes planar mTMS strain from single channel currents.
|
|
162
|
+
|
|
163
|
+
Written by Mikael Laine, 2025.
|
|
164
|
+
|
|
165
|
+
Parameters
|
|
166
|
+
----------
|
|
167
|
+
currents : list
|
|
168
|
+
|
|
169
|
+
Returns
|
|
170
|
+
-------
|
|
171
|
+
pulse_strain : float
|
|
172
|
+
|
|
173
|
+
"""
|
|
174
|
+
# Ensure weights is a numpy array and take the absolute value
|
|
175
|
+
currents = np.abs(np.array(currents))
|
|
176
|
+
|
|
177
|
+
# Calculate coil 5 strain
|
|
178
|
+
term1_5 = np.sqrt(0.9 * currents[0] ** 2 + currents[1] ** 2) * 0.2
|
|
179
|
+
term2_5 = np.sqrt(0.9 * currents[2] ** 2 + currents[3] ** 2)
|
|
180
|
+
coil_5_strain = (term1_5 + term2_5) * currents[4]
|
|
181
|
+
|
|
182
|
+
# Calculate coil 1 strain
|
|
183
|
+
term1_1 = 0.9 * currents[1]
|
|
184
|
+
term2_1 = 0.7 * np.sqrt(currents[2] ** 2 + 0.9 * currents[3] ** 2)
|
|
185
|
+
term3_1 = 0.2 * currents[4]
|
|
186
|
+
coil_1_strain = (term1_1 + term2_1 + term3_1) * currents[0]
|
|
187
|
+
|
|
188
|
+
# Find the maximum of the two strain values
|
|
189
|
+
pulse_strain = np.maximum(coil_5_strain, coil_1_strain)
|
|
190
|
+
|
|
191
|
+
return pulse_strain
|
|
71
192
|
|
|
72
193
|
def get_score_raw_single_channel(x, e, x_opt=None):
|
|
73
194
|
"""
|
|
@@ -105,7 +226,9 @@ def get_score_raw_single_channel(x, e, x_opt=None):
|
|
|
105
226
|
|
|
106
227
|
|
|
107
228
|
def optimize_currents(e, n_stim, currents_prev=None, seed=None,
|
|
108
|
-
maxiter=200, method='SLSQP', opt_target='elms',
|
|
229
|
+
maxiter=200, basiniter=50, method='SLSQP', opt_target='elms',
|
|
230
|
+
fun=None, fun_args=None, bounds=None, x0=None, constraints=None,
|
|
231
|
+
verbose=False):
|
|
109
232
|
"""
|
|
110
233
|
Optimize the currents for a multichannel TMS array by minimizing e-fields cross-correlation.
|
|
111
234
|
|
|
@@ -123,10 +246,20 @@ def optimize_currents(e, n_stim, currents_prev=None, seed=None,
|
|
|
123
246
|
Max iterations of the optimization.
|
|
124
247
|
method : str, default: 'SLSQP'
|
|
125
248
|
Optimization method.
|
|
126
|
-
verbose : bool, default: False
|
|
127
|
-
Print additional information.
|
|
128
249
|
opt_target : str, default: 'elms'
|
|
129
250
|
Optimization target. 'elms' for optimizing decorrelations of elements, 'stims' for stimulations.
|
|
251
|
+
fun : function, optional
|
|
252
|
+
Function to scale the e-field.
|
|
253
|
+
fun_args : tuple, optional
|
|
254
|
+
Arguments for the fun function.
|
|
255
|
+
bounds : tuple, optional
|
|
256
|
+
Bounds of the optimization problem.
|
|
257
|
+
x0 : np.ndarray of float, optional
|
|
258
|
+
Initial guess for the optimization problem.
|
|
259
|
+
constraints : list of dict, optional
|
|
260
|
+
Constraints for the optimization problem.
|
|
261
|
+
verbose : bool, default: False
|
|
262
|
+
Print additional information.
|
|
130
263
|
|
|
131
264
|
Returns
|
|
132
265
|
-------
|
|
@@ -134,6 +267,7 @@ def optimize_currents(e, n_stim, currents_prev=None, seed=None,
|
|
|
134
267
|
(n_channels, n_stims) The optimized currents to drive the multichannel array.
|
|
135
268
|
score : float
|
|
136
269
|
Final score of the solution.
|
|
270
|
+
res
|
|
137
271
|
"""
|
|
138
272
|
if e.ndim == 2:
|
|
139
273
|
n_channel = e.shape[1]
|
|
@@ -150,24 +284,51 @@ def optimize_currents(e, n_stim, currents_prev=None, seed=None,
|
|
|
150
284
|
n_stim_opt = n_stim - currents_prev.shape[1]
|
|
151
285
|
|
|
152
286
|
if n_stim_opt <= 0:
|
|
153
|
-
raise ValueError("N_stim has to be larger
|
|
287
|
+
raise ValueError(f"N_stim({n_stim}) has to be larger "
|
|
288
|
+
f"than already optimized optimal values ({currents_prev.shape[1]}).")
|
|
154
289
|
|
|
155
290
|
# initial guess for currents for all channels * stimulations
|
|
156
291
|
if seed is not None:
|
|
157
292
|
np.random.seed(seed)
|
|
158
|
-
x0
|
|
159
|
-
|
|
293
|
+
if x0 is None:
|
|
294
|
+
x0 = (np.random.rand(n_channel * n_stim_opt) * 2) - 1
|
|
295
|
+
|
|
160
296
|
if verbose:
|
|
161
297
|
print(f"n_ele: {n_ele}, n_channels: {n_channel}, n_stims: {n_stim}")
|
|
162
298
|
|
|
299
|
+
print(len(x0))
|
|
300
|
+
if bounds is None:
|
|
301
|
+
bounds = [(-1, 1) for _ in range(len(x0))]
|
|
302
|
+
else:
|
|
303
|
+
assert len(bounds) == len(x0)
|
|
304
|
+
|
|
305
|
+
res_scale = 1
|
|
306
|
+
metric = 'mean'
|
|
163
307
|
# optimization algorithm
|
|
164
|
-
|
|
165
|
-
|
|
166
|
-
|
|
167
|
-
|
|
168
|
-
|
|
169
|
-
|
|
170
|
-
|
|
308
|
+
minimizer_kwargs = {'args': (e, n_stim, n_ele, n_channel, currents_prev, opt_target, fun, fun_args, res_scale, metric),
|
|
309
|
+
'method': method,
|
|
310
|
+
'options': {'disp': False, 'maxiter': maxiter},
|
|
311
|
+
'bounds': bounds,
|
|
312
|
+
'tol': 1e-4,
|
|
313
|
+
'constraints': constraints}
|
|
314
|
+
|
|
315
|
+
# def print_fun(x, f, accepted):
|
|
316
|
+
# print("at minimum %.4f accepted %d" % (f, int(accepted)))
|
|
317
|
+
|
|
318
|
+
print_fun = None
|
|
319
|
+
|
|
320
|
+
res = basinhopping(get_score_raw, x0, minimizer_kwargs=minimizer_kwargs,
|
|
321
|
+
niter=basiniter, disp=True, callback=print_fun,
|
|
322
|
+
T=.25, interval=10, stepsize=.1, target_accept_rate=.5)
|
|
323
|
+
print(res.fun, res.success, res.message)
|
|
324
|
+
# res = minimize(get_score_raw,
|
|
325
|
+
# args=(e, n_stim, n_ele, n_channel, currents_prev, opt_target, fun, fun_args, res_scale),
|
|
326
|
+
# x0=x0,
|
|
327
|
+
# method=method,
|
|
328
|
+
# options={'disp': True, 'maxiter': maxiter},
|
|
329
|
+
# bounds=bounds,
|
|
330
|
+
# tol=1e-6,
|
|
331
|
+
# constraints=constraints)
|
|
171
332
|
# print(res.fun, res.success, res.message)
|
|
172
333
|
|
|
173
334
|
if currents_prev is None:
|
|
@@ -175,9 +336,9 @@ def optimize_currents(e, n_stim, currents_prev=None, seed=None,
|
|
|
175
336
|
else:
|
|
176
337
|
currents = np.hstack((currents_prev, np.reshape(res.x, (n_channel, n_stim - currents_prev.shape[1]))))
|
|
177
338
|
if opt_target == 'elms':
|
|
178
|
-
score = 1 / ((n_ele ** 2 - n_ele) / 2) * res.fun
|
|
339
|
+
score = 1 / ((n_ele ** 2 - n_ele) / 2) * res.fun * res_scale
|
|
179
340
|
elif opt_target == 'stims':
|
|
180
|
-
score = 1 / ((n_stim ** 2 - n_stim) / 2) * res.fun
|
|
341
|
+
score = 1 / ((n_stim ** 2 - n_stim) / 2) * res.fun * res_scale
|
|
181
342
|
else:
|
|
182
343
|
raise ValueError("opt_target has to be 'elms' or 'stims'")
|
|
183
344
|
|
|
@@ -250,7 +411,7 @@ def optimize_currents_single_channel(e, n_stim, currents_prev=None, seed=None,
|
|
|
250
411
|
return res.x, score
|
|
251
412
|
|
|
252
413
|
|
|
253
|
-
def get_score(x, e, n_stim, n_ele, n_channel, x_opt=None):
|
|
414
|
+
def get_score(x, e, n_stim, n_ele, n_channel, x_opt=None, opt_target='elms', fun=None, fun_args=None):
|
|
254
415
|
"""
|
|
255
416
|
Normalize the score by the number of elements.
|
|
256
417
|
|
|
@@ -274,5 +435,994 @@ def get_score(x, e, n_stim, n_ele, n_channel, x_opt=None):
|
|
|
274
435
|
score : float
|
|
275
436
|
The normalized score.
|
|
276
437
|
"""
|
|
277
|
-
score_raw = get_score_raw(x, e, n_stim, n_ele, n_channel, x_opt)
|
|
438
|
+
score_raw = get_score_raw(x, e, n_stim, n_ele, n_channel, x_opt, opt_target=opt_target, fun=fun, fun_args=fun_args)
|
|
278
439
|
return 1 / ((n_ele ** 2 - n_ele) / 2) * score_raw
|
|
440
|
+
|
|
441
|
+
|
|
442
|
+
def get_current_init(n_channel, n_stim, bounds=None, only_pos=None, seed=None):
|
|
443
|
+
"""
|
|
444
|
+
Get initial guess for the optimization problem.
|
|
445
|
+
|
|
446
|
+
Example:
|
|
447
|
+
--------
|
|
448
|
+
# To set channels 3 and 4 to init values within (-.7, -1) and (0.7, 1):
|
|
449
|
+
get_current_init(n_channels=5, n_stim=150,
|
|
450
|
+
bounds = [(1,-1), (1,-1), (.7,1), (.7,-1), (1,-1)],
|
|
451
|
+
only_pos = [True, True, False, False])
|
|
452
|
+
|
|
453
|
+
Parameters
|
|
454
|
+
----------
|
|
455
|
+
n_channel : int
|
|
456
|
+
Number of channels.
|
|
457
|
+
n_stim : int
|
|
458
|
+
Number of stimulations.
|
|
459
|
+
bounds : List of tuples
|
|
460
|
+
Bounds of the optimization problem.
|
|
461
|
+
only_pos : List of bool
|
|
462
|
+
If True, only positive values are used from within bounds.
|
|
463
|
+
|
|
464
|
+
Returns
|
|
465
|
+
-------
|
|
466
|
+
x0 : np.ndarray of float
|
|
467
|
+
Initial guess for the optimization problem.
|
|
468
|
+
"""
|
|
469
|
+
if bounds is None:
|
|
470
|
+
bounds = [(1, -1) for _ in range(n_channel)]
|
|
471
|
+
if only_pos is None:
|
|
472
|
+
only_pos = [False for _ in range(n_channel)]
|
|
473
|
+
assert len(bounds) == n_channel
|
|
474
|
+
assert len(only_pos) == n_channel
|
|
475
|
+
x0 = (np.random.rand(n_channel * n_stim) * 2) - 1
|
|
476
|
+
for i in range(n_channel):
|
|
477
|
+
a = np.random.rand(n_stim)
|
|
478
|
+
max_bound, min_bound = max(bounds[i]), min(bounds[i])
|
|
479
|
+
x0[n_stim * i: n_stim * (i + 1)] = (((a - a.min()) * (max_bound - min_bound)) / (a.max() - a.min()) + min_bound)
|
|
480
|
+
if not only_pos[i]:
|
|
481
|
+
x0[n_stim * i: n_stim * (i + 1)] *= np.random.choice([-1, 1], n_stim)
|
|
482
|
+
return x0
|
|
483
|
+
|
|
484
|
+
|
|
485
|
+
def constraint_min_current(x, n_stim, channels=(2, 3), min_current=0.7, frac=0.95, factor=100., verbose=False):
|
|
486
|
+
"""
|
|
487
|
+
This function constrains the currents to be above a certain threshold.
|
|
488
|
+
Should be used with the 'SLSQP' method of scipy.optimize.minimize as an inequality constraint.
|
|
489
|
+
|
|
490
|
+
Parameters
|
|
491
|
+
----------
|
|
492
|
+
x : np.ndarray of float
|
|
493
|
+
(n_channel * n_stim_opt, ) Vector to scale channels for each.
|
|
494
|
+
n_stim : int
|
|
495
|
+
Number of stimulations to compute score for.
|
|
496
|
+
channels : tuple of int, default: (2, 3)
|
|
497
|
+
Channels to constrain.
|
|
498
|
+
min_current : float, default: 0.7
|
|
499
|
+
Minimum current.
|
|
500
|
+
frac : float, default: 0.95
|
|
501
|
+
Fraction of channels that must be within the bounds.
|
|
502
|
+
factor : float, default: 100.
|
|
503
|
+
Factor to scale the constraint.
|
|
504
|
+
verbose : bool, default: False
|
|
505
|
+
Print additional information
|
|
506
|
+
|
|
507
|
+
Returns
|
|
508
|
+
-------
|
|
509
|
+
res : float
|
|
510
|
+
>=0 if the constraint is met, <0 otherwise
|
|
511
|
+
"""
|
|
512
|
+
np.random.seed(n_stim)
|
|
513
|
+
res = (x[n_stim * channels[0]: n_stim * (channels[0] + 1)] ** 2 > min_current ** 2)
|
|
514
|
+
for i in range(1, len(channels)):
|
|
515
|
+
res += (x[n_stim * channels[i]: n_stim * (channels[i] + 1)] ** 2 > min_current ** 2)
|
|
516
|
+
res = (res.sum() - n_stim * frac) / factor
|
|
517
|
+
|
|
518
|
+
# res = (((x[n_stim * 2: n_stim * 3] ** 2 > 0.5) + (
|
|
519
|
+
# x[n_stim * 3: n_stim * 4] ** 2 > 0.5)).sum() - n_stim * 0.95) / 100.
|
|
520
|
+
|
|
521
|
+
if verbose:
|
|
522
|
+
print(f"Bad_currents: {res}")
|
|
523
|
+
return res
|
|
524
|
+
|
|
525
|
+
|
|
526
|
+
def calc_ll(l):
|
|
527
|
+
"""
|
|
528
|
+
Calculate matrix multiplication of Lead-filed * transpose(Lead-field).
|
|
529
|
+
|
|
530
|
+
Parameters
|
|
531
|
+
----------
|
|
532
|
+
l : np.ndarray
|
|
533
|
+
[n_elm*3, n_coils] Flattened lead-field matrix. x,y,z direction in each point as consecutive rows.
|
|
534
|
+
|
|
535
|
+
Returns
|
|
536
|
+
-------
|
|
537
|
+
LL : np.ndarray
|
|
538
|
+
[n_coils, n_coils] Resulting square matrix LL.
|
|
539
|
+
"""
|
|
540
|
+
return l.T @ l
|
|
541
|
+
|
|
542
|
+
|
|
543
|
+
def calc_e(l, j):
|
|
544
|
+
"""
|
|
545
|
+
Calculate E-field. Multiplication of lead-field and coil currents.
|
|
546
|
+
|
|
547
|
+
Parameters
|
|
548
|
+
----------
|
|
549
|
+
l : np.ndarray
|
|
550
|
+
[n_elm*3, n_coils] Flattened lead-field matrix. x,y,z direction in each point as consecutive rows.
|
|
551
|
+
j : np.ndarray
|
|
552
|
+
[n_coils]
|
|
553
|
+
|
|
554
|
+
Returns
|
|
555
|
+
-------
|
|
556
|
+
E : np.ndarray of dimenstion
|
|
557
|
+
[n_elm, 3] Resulting e-field for all elements in ROI.
|
|
558
|
+
"""
|
|
559
|
+
n_k = int(len(l) / 3) # cound of elements in ROI
|
|
560
|
+
e = l @ j
|
|
561
|
+
return e.reshape((n_k, 3))
|
|
562
|
+
|
|
563
|
+
|
|
564
|
+
def goal(phi, l, ll, target_stim=80, u=0, s=0):
|
|
565
|
+
"""
|
|
566
|
+
Goal Function for focality optimization.
|
|
567
|
+
|
|
568
|
+
Parameters
|
|
569
|
+
----------
|
|
570
|
+
phi : np.ndarray
|
|
571
|
+
[n_coils] Angles of hyperellipsoid cylinder.
|
|
572
|
+
l: np.ndarray of float
|
|
573
|
+
[n_elm*3, n_coils] Flattened lead-field matrix. x,y,z direction in each point as consecutive rows.
|
|
574
|
+
ll : np.ndarray of float
|
|
575
|
+
[n_coils, n_coils] Result of matrix multiplication of lead-field matrix * transpose(lead-field matrix)
|
|
576
|
+
target_stim : int, default: 80
|
|
577
|
+
Desired target stimulation. scaling value for unit current.
|
|
578
|
+
u : np.ndarray of float
|
|
579
|
+
[n_coils, n_coils] Left singular vectors of lead-field matrix.
|
|
580
|
+
s : np.ndarray of float
|
|
581
|
+
[n_coils] Singular values of lead-field matrix.
|
|
582
|
+
|
|
583
|
+
Returns
|
|
584
|
+
-------
|
|
585
|
+
eps : int
|
|
586
|
+
Result value of goal function for given phi.
|
|
587
|
+
J : np.ndarray of dimension
|
|
588
|
+
[n_coils] Resulting currents for given phi.
|
|
589
|
+
"""
|
|
590
|
+
# Count of Coils
|
|
591
|
+
n_c = len(l[0])
|
|
592
|
+
n_elm = l.shape[0] / 100
|
|
593
|
+
# phi /= 1000
|
|
594
|
+
# Local lead-field: lead-field at target position k
|
|
595
|
+
# start =
|
|
596
|
+
# end =
|
|
597
|
+
# phi *= np.pi
|
|
598
|
+
# Check for very small singular values and avoid division by zero
|
|
599
|
+
s[s < 1e-10] = 1e-10
|
|
600
|
+
# reciprocal of sqrt to normalize singular values. transforms them into set of scaling factors
|
|
601
|
+
e = 1. / np.sqrt((np.array(s[0:3])))
|
|
602
|
+
|
|
603
|
+
# hyperellipsoid cylinder
|
|
604
|
+
x = np.zeros(n_c)
|
|
605
|
+
# for i in range(n_c):
|
|
606
|
+
# if i == 0:
|
|
607
|
+
x[0] = e[0] * math.cos(phi[0])
|
|
608
|
+
# elif i == 1:
|
|
609
|
+
x[1] = e[1] * math.sin(phi[0]) * math.cos(phi[1])
|
|
610
|
+
# elif i == 2:
|
|
611
|
+
x[2] = e[2] * math.sin(phi[0]) * math.sin(phi[1])
|
|
612
|
+
# else:
|
|
613
|
+
for i in range(3, n_c):
|
|
614
|
+
x[i] = phi[i - 1]
|
|
615
|
+
|
|
616
|
+
j = u @ x
|
|
617
|
+
# eps =
|
|
618
|
+
# print(f"GOAL phi: {np.round(phi,4)}: error: {np.round(eps/n_elm, 4)}, j:{j}")
|
|
619
|
+
j = j * target_stim
|
|
620
|
+
return get_focality_score(j, ll) / n_elm, j
|
|
621
|
+
|
|
622
|
+
|
|
623
|
+
def get_focality_score(j, ll):
|
|
624
|
+
"""
|
|
625
|
+
Calculate the focality score.
|
|
626
|
+
"""
|
|
627
|
+
# Calculate the focality score
|
|
628
|
+
eps = j @ ll @ j.T
|
|
629
|
+
return eps
|
|
630
|
+
|
|
631
|
+
|
|
632
|
+
def foc_opt(l, ll, k, target_stim=.1, current_constraint=None, verbose=False):
|
|
633
|
+
"""
|
|
634
|
+
Focality optimization routine. Minimizing goal function value via gradient descend of scipy.minimize().
|
|
635
|
+
The goal function is the squared local lead-field matrix at target position k.
|
|
636
|
+
The optimization is done in the space of the hyperellipsoid cylinder.
|
|
637
|
+
|
|
638
|
+
1) Initially, an unconstrained optimization is performed.
|
|
639
|
+
2) In case current_constraints are set and the resulting currents exceed the constraints,
|
|
640
|
+
a constrained optimization is performed.
|
|
641
|
+
If the constrained optimization fails, the function returns None for e, eps, j, and phi_opt.
|
|
642
|
+
|
|
643
|
+
``l`` is ``np.vstack([E_chan1.flatten(), E_chan2.flatten(),...]).T``
|
|
644
|
+
with ``E_chan1.shape = (n_elms, 3)``
|
|
645
|
+
|
|
646
|
+
``ll`` is ``L.T @ L``
|
|
647
|
+
|
|
648
|
+
Parameters
|
|
649
|
+
----------
|
|
650
|
+
l : np.ndarray of float
|
|
651
|
+
[n_elm*3, n_coils] Flattened lead-field matrix. x,y,z direction in each point as consecutive rows.
|
|
652
|
+
ll : np.ndarray of float
|
|
653
|
+
[n_coils, n_coils] Result of matrix multiplication of lead-field matrix * transpose(lead-field matrix)
|
|
654
|
+
k : int
|
|
655
|
+
index of optimization target element in ``l``.
|
|
656
|
+
target_stim : int, default = 80
|
|
657
|
+
desired target stimulation. scaling value for unit current.
|
|
658
|
+
verbose : bool, default = False
|
|
659
|
+
If True, displays optimization progress and debug information. This parameter controls the verbosity of the
|
|
660
|
+
optimization process.
|
|
661
|
+
If set to False, it suppresses the output.
|
|
662
|
+
current_constraint : int, optional
|
|
663
|
+
If set, a current constraint is applied for the optimization, ensuring the resulting currents don't exceed
|
|
664
|
+
this value.
|
|
665
|
+
|
|
666
|
+
Returns
|
|
667
|
+
-------
|
|
668
|
+
e : np.ndarray
|
|
669
|
+
[n_elm, 3] The resulting electric field from the optimization process.
|
|
670
|
+
eps : float
|
|
671
|
+
The result of the goal function for the given phi.
|
|
672
|
+
j : np.ndarray
|
|
673
|
+
[n_coils] The optimal currents for the given phi.
|
|
674
|
+
phi_opt : np.ndarray of dimension
|
|
675
|
+
[n_coils] The resulting optimal angles of the hyperellipsoid.
|
|
676
|
+
constraint_opt : str
|
|
677
|
+
The type of constraint optimization used. 'foc_opt' for unconstrained, 'foc_opt_constrained' for constrained.
|
|
678
|
+
eps_raw : float
|
|
679
|
+
The result of the goal function for the given phi.
|
|
680
|
+
"""
|
|
681
|
+
l_k = l[3 * k:3 * k + 3, :]
|
|
682
|
+
# singular value decomposition of squared local lead-field + perturbation to improve numeric stability
|
|
683
|
+
[u, s, _] = np.linalg.svd(l_k.T @ l_k + np.eye(l_k.shape[1]) * 1e-9)
|
|
684
|
+
# l_k.shape
|
|
685
|
+
|
|
686
|
+
def current_constraint_fun(x):
|
|
687
|
+
"""
|
|
688
|
+
Constraint function to ensure that the currents do not exceed a certain threshold.
|
|
689
|
+
"""
|
|
690
|
+
eps, j = goal(x, l, ll, target_stim, u, s)
|
|
691
|
+
res = (current_constraint - np.abs(j)).min() + 0.00001
|
|
692
|
+
# print(f"target stim {np.round(target_stim, 2)} "
|
|
693
|
+
# f"j: {j}, phi: {x}, res={res}, res: {np.round((res + + 0.0001) / 10000, 2)}")
|
|
694
|
+
# print(f"CONS phi: {np.round(x, 4)}: ret: {np.round(res, 4)}, j:{j}")
|
|
695
|
+
# return (res+ + 0.0001) / 10000
|
|
696
|
+
return res
|
|
697
|
+
|
|
698
|
+
if not current_constraint:
|
|
699
|
+
current_constraint = None
|
|
700
|
+
elif current_constraint is True:
|
|
701
|
+
raise ValueError("current_constraint has to be an integer value.")
|
|
702
|
+
assert l.shape[1] == ll.shape[0] == ll.shape[1]
|
|
703
|
+
constraint_opt = 'foc_opt'
|
|
704
|
+
n_chans = l.shape[1]
|
|
705
|
+
|
|
706
|
+
# check if target strength can be achieved with constrained
|
|
707
|
+
if current_constraint is not None:
|
|
708
|
+
e_unflattened = l.T.reshape(n_chans, -1, 3)
|
|
709
|
+
j_max, e_max = get_maximality_currents(e_unflattened[:, k, :], current_constraint)
|
|
710
|
+
|
|
711
|
+
if e_max < target_stim:
|
|
712
|
+
print(f"Maximality currents {j_max} yield e-field {e_max} < target_stim {target_stim}.")
|
|
713
|
+
eps = get_focality_score(j_max, ll)
|
|
714
|
+
e = calc_e(l, j_max)
|
|
715
|
+
return e, eps, j_max, [-1] * n_chans, "Maximality", np.inf
|
|
716
|
+
|
|
717
|
+
# do an initial optimization without constraints
|
|
718
|
+
constraint = {'type': 'ineq', 'fun': current_constraint_fun}
|
|
719
|
+
|
|
720
|
+
epss = [20, 50, 100, 200]
|
|
721
|
+
ftols = [1e-8, 1e-10, 1e-12, 1e-14, 1e-16, 1e-18, 1e-20, 1e-22]
|
|
722
|
+
tols = [1e-4, 1e-5, 1e-6]
|
|
723
|
+
steps = [0.01, 0.03, 0.05, 0.07, 0.09]
|
|
724
|
+
eps_goal = np.inf
|
|
725
|
+
best_params = {}
|
|
726
|
+
for eps in epss:
|
|
727
|
+
for ftol in ftols:
|
|
728
|
+
for step in steps:
|
|
729
|
+
for tol in tols:
|
|
730
|
+
for i in range(10):
|
|
731
|
+
phi = np.random.rand(len(l.T)) * np.pi
|
|
732
|
+
opt_slsqp_unconstrained = scipy.optimize.minimize(
|
|
733
|
+
lambda phi: goal(phi, l, ll, target_stim, u, s)[0],
|
|
734
|
+
phi,
|
|
735
|
+
method='SLSQP',
|
|
736
|
+
tol=tol,
|
|
737
|
+
options={
|
|
738
|
+
"disp": verbose > 1,
|
|
739
|
+
"maxiter": 1000,
|
|
740
|
+
'ftol': ftol,
|
|
741
|
+
'eps': eps,
|
|
742
|
+
'finite_diff_rel_step': step,
|
|
743
|
+
},
|
|
744
|
+
constraints=None,
|
|
745
|
+
bounds=[(0, np.pi) for _ in range(n_chans)] # (-np.pi/2, np.pi/2),(-np.pi/2, np.pi/2))
|
|
746
|
+
)
|
|
747
|
+
|
|
748
|
+
if opt_slsqp_unconstrained.success and opt_slsqp_unconstrained.fun < eps_goal:
|
|
749
|
+
eps_goal = opt_slsqp_unconstrained.fun
|
|
750
|
+
phi_unconstrained = opt_slsqp_unconstrained.x
|
|
751
|
+
best_params['eps'] = eps
|
|
752
|
+
best_params['ftol'] = ftol
|
|
753
|
+
best_params['step'] = step
|
|
754
|
+
best_params['tol'] = tol
|
|
755
|
+
# print(f"{np.round(eps_goal)}: {np.round(np.abs(goal(phi_unconstrained, l, ll, target_stim, u, s)[1]).max())}")
|
|
756
|
+
|
|
757
|
+
eps_raw_unconstrained, j_unconstrained = goal(phi_unconstrained, l, ll, target_stim, u, s)
|
|
758
|
+
eps_unconstrained = get_focality_score(j_unconstrained, ll)
|
|
759
|
+
phi_unconstrained = phi_unconstrained
|
|
760
|
+
|
|
761
|
+
if current_constraint is not None and np.any(np.abs(j_unconstrained) * .99 > current_constraint):
|
|
762
|
+
if verbose:
|
|
763
|
+
print(f"Unconstrained optimization yields j_max: {np.max(abs(j_unconstrained))}. "
|
|
764
|
+
f"Performing constrained optimization.")
|
|
765
|
+
# perform constrained optimization
|
|
766
|
+
n_iter = 0
|
|
767
|
+
n_success = 0
|
|
768
|
+
eps_final = np.inf
|
|
769
|
+
while n_success <= 10 and n_iter < 1000:
|
|
770
|
+
phi = np.random.random(n_chans) * np.pi
|
|
771
|
+
opt_slsqp_constrained = scipy.optimize.minimize(
|
|
772
|
+
lambda phi: goal(phi, l, ll, target_stim, u, s)[0],
|
|
773
|
+
phi,
|
|
774
|
+
method='SLSQP',
|
|
775
|
+
tol=best_params['tol'] + np.random.rand(1) * best_params['tol'] - best_params['tol']/2,
|
|
776
|
+
options={
|
|
777
|
+
"disp": verbose > 1,
|
|
778
|
+
"maxiter": 1000,
|
|
779
|
+
'ftol': best_params['ftol'] + np.random.rand(1) * best_params['ftol'] - best_params['ftol']/2,
|
|
780
|
+
'eps': best_params['eps'] + np.random.rand(1) * best_params['eps'] - best_params['eps']/2,
|
|
781
|
+
'finite_diff_rel_step': best_params['step'] +
|
|
782
|
+
np.random.rand(1) * best_params['step'] - best_params['step']/2,
|
|
783
|
+
},
|
|
784
|
+
constraints=constraint,
|
|
785
|
+
bounds=[(0, np.pi) for _ in range(n_chans)]
|
|
786
|
+
)
|
|
787
|
+
|
|
788
|
+
if opt_slsqp_constrained.success and opt_slsqp_constrained.fun < eps_final:
|
|
789
|
+
eps_final = opt_slsqp_constrained.fun
|
|
790
|
+
phi_opt = opt_slsqp_constrained.x
|
|
791
|
+
# print(f"n_iter: {n_iter} -- "
|
|
792
|
+
# f"{np.round(opt_slsqp_constrained.fun, 4)}, "
|
|
793
|
+
# f"{opt_slsqp_constrained.message} {opt_slsqp_constrained.fun < eps_final} n_succes={n_success}")
|
|
794
|
+
if opt_slsqp_constrained.success:
|
|
795
|
+
n_success += 1
|
|
796
|
+
n_iter += 1
|
|
797
|
+
|
|
798
|
+
if n_success == 0:
|
|
799
|
+
print(f"Constrained optimization failed after {n_iter} iterations.")
|
|
800
|
+
e = calc_e(l, j_max)
|
|
801
|
+
eps = get_focality_score(j_max, ll)
|
|
802
|
+
return e, eps, j_max, [-1] * n_chans, "Failed", np.inf, eps, j_max, [-1] * n_chans, np.inf
|
|
803
|
+
|
|
804
|
+
# print(f"{k} n_iter: {n_iter}, n_success: {n_success}, emax: {np.round(e_max, 2)}")
|
|
805
|
+
constraint_opt = 'foc_opt_constrained'
|
|
806
|
+
eps_raw_opt, j_opt = goal(phi_opt, l, ll, target_stim, u, s)
|
|
807
|
+
eps_opt = get_focality_score(j_opt, ll)
|
|
808
|
+
|
|
809
|
+
else:
|
|
810
|
+
eps_opt = eps_unconstrained
|
|
811
|
+
j_opt = j_unconstrained
|
|
812
|
+
eps_raw_opt = eps_raw_unconstrained
|
|
813
|
+
phi_opt = phi_unconstrained
|
|
814
|
+
|
|
815
|
+
e = calc_e(l, j_opt)
|
|
816
|
+
return (e, eps_opt, j_opt, phi_opt, constraint_opt, eps_raw_opt,
|
|
817
|
+
eps_unconstrained, j_unconstrained, phi_unconstrained, eps_raw_unconstrained)
|
|
818
|
+
|
|
819
|
+
|
|
820
|
+
def sphere(points, connections, radius, center):
|
|
821
|
+
"""
|
|
822
|
+
Define target-sphere around M1-center with given radius.
|
|
823
|
+
All triangle surface elements included that contain minimum one point inside sphere.
|
|
824
|
+
|
|
825
|
+
Parameters
|
|
826
|
+
----------
|
|
827
|
+
points : np.ndarray
|
|
828
|
+
[n_points x 3] Point elements for describing triangle surface. x,y,z coordinates.
|
|
829
|
+
connections : np.ndarray
|
|
830
|
+
[n_elm x 3] Triangle connectivity, indices of ``points``.
|
|
831
|
+
radius : int
|
|
832
|
+
Sphere radius.
|
|
833
|
+
center : list of int
|
|
834
|
+
[3] Center [x, y, z] of sphere.
|
|
835
|
+
|
|
836
|
+
Returns
|
|
837
|
+
-------
|
|
838
|
+
valid_p : np.ndarray
|
|
839
|
+
All points included in the sphere.
|
|
840
|
+
idx_c : np.ndarray
|
|
841
|
+
Triangle surface indices included in the sphere.
|
|
842
|
+
"""
|
|
843
|
+
assert points.shape[1] == 3
|
|
844
|
+
distances = np.linalg.norm(points - center, axis=1)
|
|
845
|
+
valid_p = np.argwhere(distances < radius)
|
|
846
|
+
valid_p = valid_p.flatten().tolist()
|
|
847
|
+
# for point in points:
|
|
848
|
+
# distance = np.sqrt(np.sum((point - center) ** 2))
|
|
849
|
+
# if distance <= radius:
|
|
850
|
+
# valid_p.append(i)
|
|
851
|
+
idx_c = []
|
|
852
|
+
for count, c in tqdm.tqdm(enumerate(connections), total=connections.shape[0], desc='Finding sphere indices'):
|
|
853
|
+
# c = connections[0,:]
|
|
854
|
+
if np.in1d(valid_p, c).any():
|
|
855
|
+
idx_c.append(count)
|
|
856
|
+
|
|
857
|
+
# if count > 1000:
|
|
858
|
+
# break
|
|
859
|
+
# if c[0] in valid_p or c[1] in valid_p or c[2] in valid_p:
|
|
860
|
+
# idx_c.append(c)
|
|
861
|
+
# valid_p in connections[:,0]
|
|
862
|
+
# len(valid_p)
|
|
863
|
+
# np.max(valid_p)
|
|
864
|
+
# idx_c = [i for i, row in enumerate(
|
|
865
|
+
# connections) if any(point in valid_p for point in row)]
|
|
866
|
+
|
|
867
|
+
return valid_p, idx_c
|
|
868
|
+
|
|
869
|
+
|
|
870
|
+
def optimization_all_k(l, points, connections, radius, center, ll, current_constraint=None, target_stim=80):
|
|
871
|
+
"""
|
|
872
|
+
Run optimization on all target points inside sphere with certain radius.
|
|
873
|
+
Save results as hdf5.
|
|
874
|
+
|
|
875
|
+
Parameters
|
|
876
|
+
----------
|
|
877
|
+
l : np.ndarray of dimension
|
|
878
|
+
[n_elm*3, n_coils] Flattened lead-field matrix. x,y,z direction in each point as consecutive rows.
|
|
879
|
+
points : np.ndarray of dimension
|
|
880
|
+
[n_points, 3] Point elements for describing triangle surface. x,y,z coordinates.
|
|
881
|
+
connections : np.ndarray of dimension
|
|
882
|
+
[n_elm x 3] Triangle surface elements. defined by three point element indices.
|
|
883
|
+
radius : int
|
|
884
|
+
Sphere radius.
|
|
885
|
+
center : list of float
|
|
886
|
+
[3] Center [x, y, z] of target sphere.
|
|
887
|
+
ll : np.ndarray
|
|
888
|
+
[n_coils, n_coils] Result of matrix multiplication of lead-field matrix * transpose(lead-field matrix).
|
|
889
|
+
current_constraint: int, optional
|
|
890
|
+
If set, applies a current constraint during the optimization, ensuring the resulting currents
|
|
891
|
+
don't exceed a certain threshold (100 in this case).
|
|
892
|
+
target_stim: int
|
|
893
|
+
desired target stimulation value.
|
|
894
|
+
|
|
895
|
+
Returns
|
|
896
|
+
-------
|
|
897
|
+
ee : np.ndarray of dimension [n_targets, n_elm, 3]
|
|
898
|
+
Electric fields computed for each target in the optimization process. Each row represents the electric
|
|
899
|
+
field values across the ROI for a specific target.
|
|
900
|
+
eps : list of int
|
|
901
|
+
Result values of the goal function for each target. This represents how well the optimization achieved
|
|
902
|
+
the target stimulation for each point.
|
|
903
|
+
jj : np.ndarray of dimension [n_points x n_coils]
|
|
904
|
+
Currents for each point in the sphere. Each row corresponds to the resulting currents for a specific point.
|
|
905
|
+
phi : list of np.ndarray
|
|
906
|
+
Optimal angles of the hyperellipsoid for each target point, resulting from the optimization.
|
|
907
|
+
curr_cons : list of float
|
|
908
|
+
The maximum stimulation for each target after applying current constraints (if any).
|
|
909
|
+
idx : list of int
|
|
910
|
+
Indices of the points inside the target sphere, selected based on the radius and center.
|
|
911
|
+
"""
|
|
912
|
+
eps_all = []
|
|
913
|
+
jj = []
|
|
914
|
+
phi_all = []
|
|
915
|
+
ee = []
|
|
916
|
+
curr_cons = []
|
|
917
|
+
targets = []
|
|
918
|
+
if radius <= 0:
|
|
919
|
+
print(f"Computing for all points in ROI")
|
|
920
|
+
idx = np.arange(len(points))
|
|
921
|
+
else:
|
|
922
|
+
print("calculating target sphere")
|
|
923
|
+
idx = sphere(points, connections, radius, center)[1]
|
|
924
|
+
c = 0
|
|
925
|
+
t = tqdm.tqdm(idx)
|
|
926
|
+
e = None
|
|
927
|
+
for i in t:
|
|
928
|
+
k = i
|
|
929
|
+
c += 1
|
|
930
|
+
targets.append(i)
|
|
931
|
+
# print(f"target number {c} of total {len(idx)}, with index {i}")
|
|
932
|
+
|
|
933
|
+
e, eps, j, phi, max_stim = foc_opt(l, ll, k, target_stim, current_constraint, False, t)
|
|
934
|
+
e = np.linalg.norm(e, axis=1)
|
|
935
|
+
eps_all.append(eps)
|
|
936
|
+
jj.append(j)
|
|
937
|
+
phi_all.append(phi)
|
|
938
|
+
ee.append(e)
|
|
939
|
+
curr_cons.append(max_stim)
|
|
940
|
+
|
|
941
|
+
# save EE as k x 3*ROI dataframe
|
|
942
|
+
print(f"E.shape: {e.shape}")
|
|
943
|
+
print(f"np.array(ee)")
|
|
944
|
+
ee = np.array(ee)
|
|
945
|
+
# reshaped_EE = EE.reshape(len(EE), -1)
|
|
946
|
+
# df_EE = pd.DataFrame(reshaped_EE)
|
|
947
|
+
|
|
948
|
+
return ee, eps_all, jj, phi_all, curr_cons, idx
|
|
949
|
+
|
|
950
|
+
|
|
951
|
+
def foc_opt_worker_fun(idx, l, ll, points_wb, connections_wb, l_wb, current_constraint, target_stim):
|
|
952
|
+
"""
|
|
953
|
+
Worker fun to be used with multiprocessing.
|
|
954
|
+
"""
|
|
955
|
+
# perform optimization
|
|
956
|
+
res = foc_opt(l, ll, idx, target_stim, current_constraint, False)
|
|
957
|
+
e, eps, j, phi, constraint_type, eps_raw, eps_unc, j_unc, phi_unc, eps_raw_unc = res
|
|
958
|
+
e = np.linalg.norm(e, axis=1)
|
|
959
|
+
max_e = e[idx]
|
|
960
|
+
percentiles = np.linspace(0.5, 1.0, 6)
|
|
961
|
+
# eps_all.append(eps)
|
|
962
|
+
# j_all.append(j)
|
|
963
|
+
# phi_all.append(phi)
|
|
964
|
+
# constraint_types.append(constraint_type)
|
|
965
|
+
# eps_raw_all.append(eps_raw)
|
|
966
|
+
eval_res = {}
|
|
967
|
+
eval_res['idx'] = idx
|
|
968
|
+
eval_res['eps'] = eps
|
|
969
|
+
eval_res['eps_cun'] = eps_unc
|
|
970
|
+
eval_res['eps_raw'] = eps_raw
|
|
971
|
+
eval_res['eps_raw_unc'] = eps_raw_unc
|
|
972
|
+
for chan in range(len(j)):
|
|
973
|
+
eval_res[f"j_{chan:0>2}"] = j[chan]
|
|
974
|
+
eval_res[f"j_unc_{chan:0>2}"] = j_unc[chan]
|
|
975
|
+
eval_res['max_e'] = max_e
|
|
976
|
+
eval_res['constraint_type'] = constraint_type
|
|
977
|
+
|
|
978
|
+
# get wb e-field
|
|
979
|
+
e_wb = calc_e(l_wb, j)
|
|
980
|
+
e_wb_mag = np.linalg.norm(e_wb, axis=1)
|
|
981
|
+
|
|
982
|
+
# evaluate
|
|
983
|
+
t2m = target_to_max_ratio(e_wb_mag, k=None, e_target=max_e)
|
|
984
|
+
eval_res['T2M'] = t2m
|
|
985
|
+
for percentile in percentiles:
|
|
986
|
+
res_oa = overstimulated_area(e_wb_mag, None, connections_wb,
|
|
987
|
+
points_wb, percentile, e_target=max_e)
|
|
988
|
+
eva_rel, eva_abs, total_area_cm2 = res_oa
|
|
989
|
+
eval_res[f'OA_eva_rel_{percentile}'] = eva_rel
|
|
990
|
+
eval_res[f'OA_eva_abs_{percentile}'] = eva_abs
|
|
991
|
+
eval_res[f'OA_total_area_cm2_{percentile}'] = total_area_cm2
|
|
992
|
+
|
|
993
|
+
# compute f = e^2 / sum(e_wb_mag^2)
|
|
994
|
+
e_wb_mag[idx] = 0
|
|
995
|
+
f = target_stim ** 2 / np.sum(e_wb_mag ** 2)
|
|
996
|
+
eval_res['f'] = f
|
|
997
|
+
|
|
998
|
+
return eval_res
|
|
999
|
+
|
|
1000
|
+
|
|
1001
|
+
def optimization_roi(l, points_wb, connections_wb, l_wb, opt_indices=None,
|
|
1002
|
+
current_constraint=None, target_stim=100, hdf5_file_path=None, n_cpus=1):
|
|
1003
|
+
"""
|
|
1004
|
+
Run optimization on all target points inside sphere with certain radius.
|
|
1005
|
+
Save results as hdf5.
|
|
1006
|
+
|
|
1007
|
+
Parameters
|
|
1008
|
+
----------
|
|
1009
|
+
l : np.ndarray of dimension
|
|
1010
|
+
[n_elm*3, n_coils] Flattened lead-field matrix. x,y,z direction in each point as consecutive rows.
|
|
1011
|
+
points_wb : np.ndarray of float
|
|
1012
|
+
[n_points, 3] Point elements for describing triangle surface. x,y,z coordinates.
|
|
1013
|
+
connections_wb : np.ndarray of float
|
|
1014
|
+
[n_elm x 3] Triangle surface elements. defined by three point element indices.
|
|
1015
|
+
l_wb : np.ndarray of float
|
|
1016
|
+
[n_elm*3, n_coils] Flattened lead-field matrix. x,y,z direction in each point as consecutive rows.
|
|
1017
|
+
opt_indices : list of int, optional
|
|
1018
|
+
[n_points] Indices of the elements to optimize.
|
|
1019
|
+
current_constraint: int, optional
|
|
1020
|
+
If set, applies a current constraint during the optimization, ensuring the resulting currents
|
|
1021
|
+
don't exceed a certain threshold (100 in this case).
|
|
1022
|
+
target_stim: int
|
|
1023
|
+
desired target stimulation value
|
|
1024
|
+
hdf5_file_path: str, optional
|
|
1025
|
+
Path to save the results in HDF5 format.
|
|
1026
|
+
n_cpus: int, default: 1
|
|
1027
|
+
Number of CPUs to use for parallel processing.
|
|
1028
|
+
|
|
1029
|
+
Returns
|
|
1030
|
+
-------
|
|
1031
|
+
eps : list of int
|
|
1032
|
+
Result values of the goal function for each target. This represents how well the optimization achieved
|
|
1033
|
+
the target stimulation for each point.
|
|
1034
|
+
j : np.ndarray of dimension [n_points x n_coils]
|
|
1035
|
+
Currents for each point in the sphere. Each row corresponds to the resulting currents for a specific point.
|
|
1036
|
+
phi : list of np.ndarray
|
|
1037
|
+
Optimal angles of the hyperellipsoid for each target point, resulting from the optimization.
|
|
1038
|
+
max_e : list of float
|
|
1039
|
+
The maximum stimulation for each target after applying current constraints (if any).
|
|
1040
|
+
constraint_types : list of string
|
|
1041
|
+
The type of constraint optimization used. 'foc_opt' for unconstrained, 'foc_opt_constrained' for constrained.
|
|
1042
|
+
eps_raw : list of float
|
|
1043
|
+
The result of the goal function for the given phi.
|
|
1044
|
+
eval_res : dict
|
|
1045
|
+
Dictionary containing various evaluation metrics for the optimization process, including
|
|
1046
|
+
overstimulated area and target-to-max ratio.
|
|
1047
|
+
"""
|
|
1048
|
+
ll = l.T @ l
|
|
1049
|
+
eps_all = []
|
|
1050
|
+
eps_raw_all = []
|
|
1051
|
+
j_all = []
|
|
1052
|
+
phi_all = []
|
|
1053
|
+
max_e = []
|
|
1054
|
+
constraint_types = []
|
|
1055
|
+
if opt_indices is None:
|
|
1056
|
+
opt_indices = np.arange(int(l.shape[0] / 3))
|
|
1057
|
+
|
|
1058
|
+
_fun = partial(foc_opt_worker_fun, l=l, ll=ll, points_wb=points_wb, connections_wb=connections_wb, l_wb=l_wb,
|
|
1059
|
+
current_constraint=current_constraint, target_stim=target_stim)
|
|
1060
|
+
dict_list = process_map(_fun, opt_indices, max_workers=n_cpus)
|
|
1061
|
+
eval_res = {k: [] for k in dict_list[0].keys()}
|
|
1062
|
+
for d in dict_list:
|
|
1063
|
+
for k, v in d.items():
|
|
1064
|
+
eval_res[k].append(v)
|
|
1065
|
+
|
|
1066
|
+
if hdf5_file_path is not None:
|
|
1067
|
+
with h5py.File(hdf5_file_path, 'w') as file:
|
|
1068
|
+
for k, d in eval_res.items():
|
|
1069
|
+
data = np.zeros((int(l.shape[0] / 3)))
|
|
1070
|
+
try:
|
|
1071
|
+
data[opt_indices] = d
|
|
1072
|
+
except ValueError:
|
|
1073
|
+
d = np.unique(d, return_inverse=True)[1]
|
|
1074
|
+
data[opt_indices] = d
|
|
1075
|
+
file.create_dataset(k, data=data)
|
|
1076
|
+
|
|
1077
|
+
return eps_all, j_all, phi_all, max_e, constraint_types, eps_raw_all, eval_res
|
|
1078
|
+
|
|
1079
|
+
|
|
1080
|
+
def scd(connections, points, idx, mesh_skin, center_skin=None):
|
|
1081
|
+
"""
|
|
1082
|
+
Calculate skin-cortex distance between midlayer target points and skin surface
|
|
1083
|
+
|
|
1084
|
+
Parameters
|
|
1085
|
+
----------
|
|
1086
|
+
connections : np.ndarray of dimension [n_ROI/3 x 3]
|
|
1087
|
+
Triangle surface elements, defined by three point element indices for each triangle.
|
|
1088
|
+
points : np.ndarray of dimension [n_points x 3]
|
|
1089
|
+
Point elements describing the triangle surface. Each point has x, y, z coordinates.
|
|
1090
|
+
idx : list or int
|
|
1091
|
+
Indices of the target points or a single index. If scalar, it is converted to a list of one element.
|
|
1092
|
+
mesh_skin : simnibs.mesh_io.Mesh
|
|
1093
|
+
A SimNIBS skin surface mesh.
|
|
1094
|
+
center_skin : np.ndarray of dimension, default: None
|
|
1095
|
+
(n_elements, 3) The centers of the skin mesh elements. If not provided, it is calculated using the
|
|
1096
|
+
`elements_baricenters()` method of the `mesh_skin`.
|
|
1097
|
+
|
|
1098
|
+
Returns
|
|
1099
|
+
-------
|
|
1100
|
+
target_idx : np.ndarray of dimension [n_targetsphere]
|
|
1101
|
+
indices of targets in sphere
|
|
1102
|
+
scd : np.ndarray of dimension [n_targetsphere]
|
|
1103
|
+
skin cortex distance, or rather skin gm midlayer distance for each triangle mesh in target sphere
|
|
1104
|
+
"""
|
|
1105
|
+
warnings.warn("This function is deprecated and will be removed in the future. "
|
|
1106
|
+
"Use `pynibs.utils.scd` instead.",
|
|
1107
|
+
DeprecationWarning)
|
|
1108
|
+
scd = []
|
|
1109
|
+
centers = []
|
|
1110
|
+
|
|
1111
|
+
if center_skin is None:
|
|
1112
|
+
center_skin = mesh_skin.elements_baricenters()[:]
|
|
1113
|
+
|
|
1114
|
+
# Check if idx is scalar or iterable
|
|
1115
|
+
if isinstance(idx, (list, np.ndarray)):
|
|
1116
|
+
target_idx = idx
|
|
1117
|
+
else:
|
|
1118
|
+
target_idx = [idx] # Convert scalar to list for iteration
|
|
1119
|
+
|
|
1120
|
+
for idx in target_idx:
|
|
1121
|
+
connection = connections[idx]
|
|
1122
|
+
pts = points[connection]
|
|
1123
|
+
center = np.mean(pts, axis=0)
|
|
1124
|
+
centers.append(center)
|
|
1125
|
+
|
|
1126
|
+
for idx, target in zip(target_idx, centers):
|
|
1127
|
+
distances = np.linalg.norm(center_skin - target, axis=1)
|
|
1128
|
+
scd = distances.min()
|
|
1129
|
+
|
|
1130
|
+
return target_idx, scd
|
|
1131
|
+
|
|
1132
|
+
|
|
1133
|
+
def target_to_max_ratio(e, k, e_target=None):
|
|
1134
|
+
"""
|
|
1135
|
+
Evaluation-metric: E_target/E_max.
|
|
1136
|
+
Quantifies the difference between target ``|E|`` and ``max(|E|)``.
|
|
1137
|
+
|
|
1138
|
+
Parameters
|
|
1139
|
+
----------
|
|
1140
|
+
e : np.ndarray
|
|
1141
|
+
[n_elm, 1] or [n_elm, 3] E-Field.
|
|
1142
|
+
k : int
|
|
1143
|
+
Index of optimized target.
|
|
1144
|
+
e_target : float, optional
|
|
1145
|
+
E-field at the target position. If not provided, it is calculated from ``e``.
|
|
1146
|
+
|
|
1147
|
+
Returns
|
|
1148
|
+
-------
|
|
1149
|
+
t2m : int
|
|
1150
|
+
e_target/e_max Metric
|
|
1151
|
+
if max_stim = 1: e_target == e_max
|
|
1152
|
+
if max_stim < 1: e_target << e_max
|
|
1153
|
+
-> values < 1 indicate worse focality
|
|
1154
|
+
"""
|
|
1155
|
+
assert k is None or e_target is None, "Either k or e_at_target should be None, not both."
|
|
1156
|
+
if e_target is None:
|
|
1157
|
+
e_target = e[k]
|
|
1158
|
+
if len(e.shape) == 2 and e.shape[1] == 3:
|
|
1159
|
+
e = np.linalg.norm(e, axis=1)
|
|
1160
|
+
t2m = e_target / e.max()
|
|
1161
|
+
return t2m
|
|
1162
|
+
|
|
1163
|
+
|
|
1164
|
+
def calculate_triangle_area(p1, p2, p3):
|
|
1165
|
+
"""
|
|
1166
|
+
Calculate the area of a triangle given its three vertices. v
|
|
1167
|
+
|
|
1168
|
+
Parameters
|
|
1169
|
+
----------
|
|
1170
|
+
p1, p2, p3 : np.ndarray
|
|
1171
|
+
(n, 3) Coordinates of the triangle's vertices.
|
|
1172
|
+
|
|
1173
|
+
Returns
|
|
1174
|
+
-------
|
|
1175
|
+
area : np.ndarray
|
|
1176
|
+
(n, ) Area of the triangles.
|
|
1177
|
+
"""
|
|
1178
|
+
# Using the cross product to find the area of the triangle
|
|
1179
|
+
vec1 = p2 - p1
|
|
1180
|
+
vec2 = p3 - p1
|
|
1181
|
+
cross_product = np.cross(vec1, vec2)
|
|
1182
|
+
area = np.linalg.norm(cross_product, axis=1) / 2.0
|
|
1183
|
+
return area
|
|
1184
|
+
|
|
1185
|
+
|
|
1186
|
+
def overstimulated_area(e, k, connections, points, percentile=1, e_target=None, e_in_tris=True):
|
|
1187
|
+
"""
|
|
1188
|
+
Calculate the prevalence of the target field and transform it into a measure in cm^2.
|
|
1189
|
+
|
|
1190
|
+
Parameters
|
|
1191
|
+
----------
|
|
1192
|
+
e : np.ndarray
|
|
1193
|
+
[n_nodes,] E-Field magnitudes (in triangles or nodes) .
|
|
1194
|
+
k : int
|
|
1195
|
+
Index of the optimized target.
|
|
1196
|
+
connections : np.ndarray
|
|
1197
|
+
[n_connections, 3] Triangle surface elements defined by three point element indices.
|
|
1198
|
+
points : np.ndarray of dimension
|
|
1199
|
+
[n_points, 3] Coordinates of the mesh points.
|
|
1200
|
+
percentile: int, default = 1
|
|
1201
|
+
percentile*e_target >= E, range 0-1.
|
|
1202
|
+
e_target : float, optional
|
|
1203
|
+
E-field at the target position. If not provided, it is calculated from ``e``.
|
|
1204
|
+
e_in_tris : bool, default: True
|
|
1205
|
+
Is E provided in tris (True) or nodes (False)
|
|
1206
|
+
|
|
1207
|
+
Returns
|
|
1208
|
+
-------
|
|
1209
|
+
eva_rel : float
|
|
1210
|
+
Relative area for which \|E\|\ :sub:`target` >= \|E\|\ :sub:`offtarget`\.
|
|
1211
|
+
eva_abs : int
|
|
1212
|
+
Absolute count of the indices where \|E\|\ :sub:`target` >= \|E\|\ :sub:`offtarget`\.
|
|
1213
|
+
total_area_cm2 : float
|
|
1214
|
+
Total area in cm² for which \|E\|\ :sub:`target` >= \|E\|\ :sub:`offtarget`\.
|
|
1215
|
+
"""
|
|
1216
|
+
import trimesh
|
|
1217
|
+
assert k is None or e_target is None, "Either k or e_at_target should be None, not both."
|
|
1218
|
+
if e_target is None:
|
|
1219
|
+
e_target = e[k]
|
|
1220
|
+
# print(percentile)
|
|
1221
|
+
# print(e_target)
|
|
1222
|
+
# print(e)
|
|
1223
|
+
if not e_in_tris:
|
|
1224
|
+
raise NotImplementedError
|
|
1225
|
+
else:
|
|
1226
|
+
assert e.shape[0] == connections.shape[0], f"Wrong shape: {e.shape[0]} != {connections.shape[0]}"
|
|
1227
|
+
over_idx = e_target * percentile <= e
|
|
1228
|
+
eva_abs = np.sum(over_idx)
|
|
1229
|
+
|
|
1230
|
+
# mask = (percentile * e_target <= e)
|
|
1231
|
+
# indices = np.where(mask)[0]
|
|
1232
|
+
# eva_abs = len(indices)
|
|
1233
|
+
eva_rel = eva_abs / e.shape[0]
|
|
1234
|
+
# log_eva = math.log(eva_rel) if eva_rel > 0 else float('-inf')
|
|
1235
|
+
|
|
1236
|
+
# Calculate the total area for the indices where |E|_target >= |E|_offtarget
|
|
1237
|
+
# triangles_mask = np.isin(connections, indices).any(axis=1)
|
|
1238
|
+
selected_triangles = connections[over_idx] # this only works if E is in tris
|
|
1239
|
+
total_area = trimesh.triangles.area(points[selected_triangles]).sum()
|
|
1240
|
+
# p1 = points[selected_triangles[:, 0]]
|
|
1241
|
+
# p2 = points[selected_triangles[:, 1]]
|
|
1242
|
+
# p3 = points[selected_triangles[:, 2]]
|
|
1243
|
+
#
|
|
1244
|
+
# total_area = np.sum(calculate_triangle_area(p1, p2, p3))
|
|
1245
|
+
|
|
1246
|
+
# Convert area from mm² to cm²
|
|
1247
|
+
total_area_cm2 = total_area / 100.0
|
|
1248
|
+
|
|
1249
|
+
return eva_rel, eva_abs, total_area_cm2
|
|
1250
|
+
|
|
1251
|
+
|
|
1252
|
+
def foc_targets(r, oa, thr_oa, thr_r, result_path):
|
|
1253
|
+
"""
|
|
1254
|
+
Calculate the prevalence of the target field and transform it into a measure in cm^2.
|
|
1255
|
+
|
|
1256
|
+
Parameters
|
|
1257
|
+
----------
|
|
1258
|
+
r: np.ndarray of dimension
|
|
1259
|
+
[n_nodes,] Target-to-max Ratio focality measure evaluated at all nodes.
|
|
1260
|
+
oa : np.ndarray of dimension
|
|
1261
|
+
[n_nodes,] Overstimulated Area focality measure evaluated all nodes.
|
|
1262
|
+
thr_oa: int
|
|
1263
|
+
threshold for focality mask: OA <= thr_OA.
|
|
1264
|
+
thr_r : float
|
|
1265
|
+
threshold for focality mask: R >= thr_R.
|
|
1266
|
+
result_path : string
|
|
1267
|
+
path to results folder.
|
|
1268
|
+
|
|
1269
|
+
Returns
|
|
1270
|
+
-------
|
|
1271
|
+
valid_array : np.ndarray of dimension
|
|
1272
|
+
[n_nodes] Array where valid_indices = 1, invalid_indices = 0, non-evaluated = nan.
|
|
1273
|
+
valid_indices : np.ndarray
|
|
1274
|
+
Indices for which OA <= thr_OA and R >= thr_R is true.
|
|
1275
|
+
"""
|
|
1276
|
+
mask = ~np.isnan(r) & ~np.isnan(oa)
|
|
1277
|
+
r_clean, oa_clean = r[mask], oa[mask]
|
|
1278
|
+
|
|
1279
|
+
valid_mask = (r_clean >= thr_r) & (oa_clean <= thr_oa)
|
|
1280
|
+
valid_indices = np.where(valid_mask)[0]
|
|
1281
|
+
valid_array = np.full(len(r), np.nan)
|
|
1282
|
+
|
|
1283
|
+
# Set valid entries to 1 where the mask is true
|
|
1284
|
+
valid_array[mask][valid_mask] = 1
|
|
1285
|
+
|
|
1286
|
+
# Set invalid entries to 0 where the mask is false
|
|
1287
|
+
valid_array[mask][~valid_mask] = 0
|
|
1288
|
+
|
|
1289
|
+
# Save valid indices and the valid array to an HDF5 file
|
|
1290
|
+
with h5py.File(result_path, 'w') as f:
|
|
1291
|
+
f.create_dataset('valid_idx', data=valid_indices)
|
|
1292
|
+
f.create_dataset('valid_array', data=valid_array)
|
|
1293
|
+
|
|
1294
|
+
return valid_array, valid_indices
|
|
1295
|
+
|
|
1296
|
+
|
|
1297
|
+
def get_maximality_currents(e, current_constraint=100):
|
|
1298
|
+
"""
|
|
1299
|
+
Compute the current set that maximizes \|E\|
|
|
1300
|
+
|
|
1301
|
+
Parameters
|
|
1302
|
+
----------
|
|
1303
|
+
e : np.ndarray of float
|
|
1304
|
+
(n_chans, 3) Leadfield in target element.
|
|
1305
|
+
current_constraint : float, default=100
|
|
1306
|
+
Maximum current constraint.
|
|
1307
|
+
|
|
1308
|
+
Returns
|
|
1309
|
+
-------
|
|
1310
|
+
j_max : list of float
|
|
1311
|
+
[n_chans] Current-set to maximize E.
|
|
1312
|
+
e_max : Maximum \|E\| for given ``current_constraint``.
|
|
1313
|
+
"""
|
|
1314
|
+
n_chans = e.shape[0]
|
|
1315
|
+
combs = np.array(list(itertools.product([-1, 1], repeat=n_chans)))
|
|
1316
|
+
combs = combs[:int(combs.shape[0] / 2)]
|
|
1317
|
+
|
|
1318
|
+
e_max_candidates = []
|
|
1319
|
+
for i_step in range(combs.shape[0]):
|
|
1320
|
+
a = combs[i_step, :][:, np.newaxis] * current_constraint * e
|
|
1321
|
+
a = np.linalg.norm(np.sum(a, axis=0))
|
|
1322
|
+
e_max_candidates.append(a)
|
|
1323
|
+
|
|
1324
|
+
e_max_candidates = np.array(e_max_candidates)
|
|
1325
|
+
max_ids = np.where(e_max_candidates == np.max(e_max_candidates))[0][0]
|
|
1326
|
+
j_max = combs[max_ids].copy()
|
|
1327
|
+
e_target_max = e_max_candidates[max_ids]
|
|
1328
|
+
|
|
1329
|
+
return j_max, e_target_max
|
|
1330
|
+
|
|
1331
|
+
|
|
1332
|
+
def maximality_focality_optimization(j_foc, flat_e_data, target_stim=80, current_constraint=100, steps=100):
|
|
1333
|
+
"""
|
|
1334
|
+
Find balanced current solution between maximality and focality that yields a target stimulation
|
|
1335
|
+
while keeping within a current constraint.
|
|
1336
|
+
|
|
1337
|
+
Parameters
|
|
1338
|
+
----------
|
|
1339
|
+
j_foc : np.ndarray of float
|
|
1340
|
+
Focality current set.
|
|
1341
|
+
flat_e_data : np.ndarray of float
|
|
1342
|
+
(n_chans, 3) Flattened leadfield matrix for the element.
|
|
1343
|
+
target_stim : float, default=80
|
|
1344
|
+
Desired target stimulation.
|
|
1345
|
+
current_constraint : float, default=100
|
|
1346
|
+
Maximum current constraint.
|
|
1347
|
+
steps : int, default=100
|
|
1348
|
+
How many steps to consider for the balancing. Basically the resolution of the optimization.
|
|
1349
|
+
|
|
1350
|
+
Returns
|
|
1351
|
+
-------
|
|
1352
|
+
j_inter : np.ndarray of float
|
|
1353
|
+
Current set that balances maximality and focality.
|
|
1354
|
+
dist_foc_int : float
|
|
1355
|
+
Distance between focality current and the intersection current.
|
|
1356
|
+
dist_max_int : float
|
|
1357
|
+
Distance between maximality current and the intersection current.
|
|
1358
|
+
"""
|
|
1359
|
+
j_max = get_maximality_currents(flat_e_data, current_constraint)[0]
|
|
1360
|
+
|
|
1361
|
+
# normalize j_foc
|
|
1362
|
+
j_foc = j_foc.copy()
|
|
1363
|
+
j_foc_max = max(j_foc, key=abs)
|
|
1364
|
+
j_foc /= j_foc_max
|
|
1365
|
+
|
|
1366
|
+
# find distances to the maximality current
|
|
1367
|
+
foc_max_dists = [np.linalg.norm(j_foc - j_max), np.linalg.norm(j_foc - j_max * -1)]
|
|
1368
|
+
if foc_max_dists[0] > foc_max_dists[1]:
|
|
1369
|
+
j_foc *= -1
|
|
1370
|
+
|
|
1371
|
+
stepsize = (j_foc - j_max) / steps
|
|
1372
|
+
j_inter = None
|
|
1373
|
+
for i_step in range(steps):
|
|
1374
|
+
j_inter = (j_foc - i_step * stepsize) * current_constraint
|
|
1375
|
+
# foc_str = np.array2string(j_foc, formatter={'float_kind': lambda x: f"{np.round(x,2): >4}"})
|
|
1376
|
+
# inter_str = np.array2string(j_inter, formatter={'float_kind': lambda x: f"{np.round(x/current_constraint,2): >4}"})
|
|
1377
|
+
# max_str = np.array2string(j_max, formatter={'float_kind': lambda x: f"{np.round(x,2): >4}"})
|
|
1378
|
+
# print(f"{i_step: >2}: {foc_str} {inter_str} {max_str}")
|
|
1379
|
+
e_inter = np.linalg.norm(
|
|
1380
|
+
np.sum(
|
|
1381
|
+
j_inter[:, np.newaxis] * flat_e_data, axis=0))
|
|
1382
|
+
if e_inter >= target_stim and (j_inter <= current_constraint).all():
|
|
1383
|
+
break
|
|
1384
|
+
print(f"{i_step} used")
|
|
1385
|
+
# compute distance of found solution to focality and maximality solutoin
|
|
1386
|
+
dist_foc_int = np.round(np.linalg.norm((j_inter / current_constraint) - j_foc), 2)
|
|
1387
|
+
dist_max_int = np.round(np.linalg.norm((j_inter / current_constraint) - j_max), 2)
|
|
1388
|
+
|
|
1389
|
+
return j_inter, dist_foc_int, dist_max_int
|
|
1390
|
+
|
|
1391
|
+
|
|
1392
|
+
def write_magventure_xls(opt_currents_fn, max_mso, xls_fn=None, rep=1):
|
|
1393
|
+
"""
|
|
1394
|
+
Writes .xls file for MagVenture mTMS systems.
|
|
1395
|
+
"""
|
|
1396
|
+
assert opt_currents_fn.endswith(".npy"), "opt_currents_fn must be a .npy file"
|
|
1397
|
+
currents_opt = np.load(opt_currents_fn)
|
|
1398
|
+
n_chans, n_zaps = currents_opt.shape
|
|
1399
|
+
current_scale_factor_opt = max_mso / np.sqrt((currents_opt ** 2).max())
|
|
1400
|
+
currents_opt *= current_scale_factor_opt
|
|
1401
|
+
|
|
1402
|
+
data = pd.DataFrame(
|
|
1403
|
+
columns=["Trigger type", "Line delay [us]", "Trigout start [us]", "Trigout length [us]", "ch1 delay [us]",
|
|
1404
|
+
"ch1 amplitude [%]", "ch2 delay [us]", "ch2 amplitude [%]", "ch3 delay [us]", "ch3 amplitude [%]",
|
|
1405
|
+
"ch4 delay [us]", "ch4 amplitude [%]", "ch5 delay [us]", "ch5 amplitude [%]", "ch6 delay [us]",
|
|
1406
|
+
"ch6 amplitude [%]", "ch7 delay [us]", "ch7 amplitude [%]", "ch8 delay [us]", "ch8 amplitude [%]",
|
|
1407
|
+
"ch9 delay [us]", "ch9 amplitude [%]", "ch10 delay [us]", "ch10 amplitude [%]", "ch11 delay [us]",
|
|
1408
|
+
"ch11 amplitude [%]", "ch12 delay [us]", "ch12 amplitude [%]", "ch13 delay [us]",
|
|
1409
|
+
"ch13 amplitude [%]", "ch14 delay [us]", "ch14 amplitude [%]", "ch15 delay [us]",
|
|
1410
|
+
"ch15 amplitude [%]", "ch16 delay [us]", "ch16 amplitude [%]"])
|
|
1411
|
+
|
|
1412
|
+
data["Trigger type"] = [1] * n_zaps*rep
|
|
1413
|
+
data["Line delay [us]"] = [4000000] * n_zaps*rep
|
|
1414
|
+
data["Trigout start [us]"] = [5000] * n_zaps*rep
|
|
1415
|
+
data["Trigout length [us]"] = [5000] * n_zaps*rep
|
|
1416
|
+
|
|
1417
|
+
for chan_i in range(1, 17):
|
|
1418
|
+
data[f"ch{chan_i} delay [us]"] = ['']* n_zaps*rep
|
|
1419
|
+
# data[f"ch{chan_i} amplitude [%]"] = currents_opt[chan_i - 1, :] * 100
|
|
1420
|
+
data[f"ch{chan_i} amplitude [%]"] = ''
|
|
1421
|
+
|
|
1422
|
+
for chan_i in range(1, 7):
|
|
1423
|
+
data[f"ch{chan_i} delay [us]"] = [0]* n_zaps*rep
|
|
1424
|
+
data[f"ch{chan_i} amplitude [%]"] = np.repeat(currents_opt[chan_i - 1, :], rep)
|
|
1425
|
+
|
|
1426
|
+
if xls_fn is None:
|
|
1427
|
+
xls_fn = opt_currents_fn.replace(".npy", f"_maxmso{max_mso}.xlsx")
|
|
1428
|
+
data.to_excel(xls_fn, index=False, engine='openpyxl')
|