pyNIBS 0.2024.8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (107) hide show
  1. pyNIBS-0.2024.8.dist-info/LICENSE +623 -0
  2. pyNIBS-0.2024.8.dist-info/METADATA +723 -0
  3. pyNIBS-0.2024.8.dist-info/RECORD +107 -0
  4. pyNIBS-0.2024.8.dist-info/WHEEL +5 -0
  5. pyNIBS-0.2024.8.dist-info/top_level.txt +1 -0
  6. pynibs/__init__.py +34 -0
  7. pynibs/coil.py +1367 -0
  8. pynibs/congruence/__init__.py +15 -0
  9. pynibs/congruence/congruence.py +1108 -0
  10. pynibs/congruence/ext_metrics.py +257 -0
  11. pynibs/congruence/stimulation_threshold.py +318 -0
  12. pynibs/data/configuration_exp0.yaml +59 -0
  13. pynibs/data/configuration_linear_MEP.yaml +61 -0
  14. pynibs/data/configuration_linear_RT.yaml +61 -0
  15. pynibs/data/configuration_sigmoid4.yaml +68 -0
  16. pynibs/data/network mapping configuration/configuration guide.md +238 -0
  17. pynibs/data/network mapping configuration/configuration_TEMPLATE.yaml +42 -0
  18. pynibs/data/network mapping configuration/configuration_for_testing.yaml +43 -0
  19. pynibs/data/network mapping configuration/configuration_modelTMS.yaml +43 -0
  20. pynibs/data/network mapping configuration/configuration_reg_isi_05.yaml +43 -0
  21. pynibs/data/network mapping configuration/output_documentation.md +185 -0
  22. pynibs/data/network mapping configuration/recommendations_for_accuracy_threshold.md +77 -0
  23. pynibs/data/neuron/models/L23_PC_cADpyr_biphasic_v1.csv +1281 -0
  24. pynibs/data/neuron/models/L23_PC_cADpyr_monophasic_v1.csv +1281 -0
  25. pynibs/data/neuron/models/L4_LBC_biphasic_v1.csv +1281 -0
  26. pynibs/data/neuron/models/L4_LBC_monophasic_v1.csv +1281 -0
  27. pynibs/data/neuron/models/L4_NBC_biphasic_v1.csv +1281 -0
  28. pynibs/data/neuron/models/L4_NBC_monophasic_v1.csv +1281 -0
  29. pynibs/data/neuron/models/L4_SBC_biphasic_v1.csv +1281 -0
  30. pynibs/data/neuron/models/L4_SBC_monophasic_v1.csv +1281 -0
  31. pynibs/data/neuron/models/L5_TTPC2_cADpyr_biphasic_v1.csv +1281 -0
  32. pynibs/data/neuron/models/L5_TTPC2_cADpyr_monophasic_v1.csv +1281 -0
  33. pynibs/expio/Mep.py +1518 -0
  34. pynibs/expio/__init__.py +8 -0
  35. pynibs/expio/brainsight.py +979 -0
  36. pynibs/expio/brainvis.py +71 -0
  37. pynibs/expio/cobot.py +239 -0
  38. pynibs/expio/exp.py +1876 -0
  39. pynibs/expio/fit_funs.py +287 -0
  40. pynibs/expio/localite.py +1987 -0
  41. pynibs/expio/signal_ced.py +51 -0
  42. pynibs/expio/visor.py +624 -0
  43. pynibs/freesurfer.py +502 -0
  44. pynibs/hdf5_io/__init__.py +10 -0
  45. pynibs/hdf5_io/hdf5_io.py +1857 -0
  46. pynibs/hdf5_io/xdmf.py +1542 -0
  47. pynibs/mesh/__init__.py +3 -0
  48. pynibs/mesh/mesh_struct.py +1394 -0
  49. pynibs/mesh/transformations.py +866 -0
  50. pynibs/mesh/utils.py +1103 -0
  51. pynibs/models/_TMS.py +211 -0
  52. pynibs/models/__init__.py +0 -0
  53. pynibs/muap.py +392 -0
  54. pynibs/neuron/__init__.py +2 -0
  55. pynibs/neuron/neuron_regression.py +284 -0
  56. pynibs/neuron/util.py +58 -0
  57. pynibs/optimization/__init__.py +5 -0
  58. pynibs/optimization/multichannel.py +278 -0
  59. pynibs/optimization/opt_mep.py +152 -0
  60. pynibs/optimization/optimization.py +1445 -0
  61. pynibs/optimization/workhorses.py +698 -0
  62. pynibs/pckg/__init__.py +0 -0
  63. pynibs/pckg/biosig/biosig4c++-1.9.5.src_fixed.tar.gz +0 -0
  64. pynibs/pckg/libeep/__init__.py +0 -0
  65. pynibs/pckg/libeep/pyeep.so +0 -0
  66. pynibs/regression/__init__.py +11 -0
  67. pynibs/regression/dual_node_detection.py +2375 -0
  68. pynibs/regression/regression.py +2984 -0
  69. pynibs/regression/score_types.py +0 -0
  70. pynibs/roi/__init__.py +2 -0
  71. pynibs/roi/roi.py +895 -0
  72. pynibs/roi/roi_structs.py +1233 -0
  73. pynibs/subject.py +1009 -0
  74. pynibs/tensor_scaling.py +144 -0
  75. pynibs/tests/data/InstrumentMarker20200225163611937.xml +19 -0
  76. pynibs/tests/data/TriggerMarkers_Coil0_20200225163443682.xml +14 -0
  77. pynibs/tests/data/TriggerMarkers_Coil1_20200225170337572.xml +6373 -0
  78. pynibs/tests/data/Xdmf.dtd +89 -0
  79. pynibs/tests/data/brainsight_niiImage_nifticoord.txt +145 -0
  80. pynibs/tests/data/brainsight_niiImage_nifticoord_largefile.txt +1434 -0
  81. pynibs/tests/data/brainsight_niiImage_niifticoord_mixedtargets.txt +47 -0
  82. pynibs/tests/data/create_subject_testsub.py +332 -0
  83. pynibs/tests/data/data.hdf5 +0 -0
  84. pynibs/tests/data/geo.hdf5 +0 -0
  85. pynibs/tests/test_coil.py +474 -0
  86. pynibs/tests/test_elements2nodes.py +100 -0
  87. pynibs/tests/test_hdf5_io/test_xdmf.py +61 -0
  88. pynibs/tests/test_mesh_transformations.py +123 -0
  89. pynibs/tests/test_mesh_utils.py +143 -0
  90. pynibs/tests/test_nnav_imports.py +101 -0
  91. pynibs/tests/test_quality_measures.py +117 -0
  92. pynibs/tests/test_regressdata.py +289 -0
  93. pynibs/tests/test_roi.py +17 -0
  94. pynibs/tests/test_rotations.py +86 -0
  95. pynibs/tests/test_subject.py +71 -0
  96. pynibs/tests/test_util.py +24 -0
  97. pynibs/tms_pulse.py +34 -0
  98. pynibs/util/__init__.py +4 -0
  99. pynibs/util/dosing.py +233 -0
  100. pynibs/util/quality_measures.py +562 -0
  101. pynibs/util/rotations.py +340 -0
  102. pynibs/util/simnibs.py +763 -0
  103. pynibs/util/util.py +727 -0
  104. pynibs/visualization/__init__.py +2 -0
  105. pynibs/visualization/para.py +4372 -0
  106. pynibs/visualization/plot_2D.py +137 -0
  107. pynibs/visualization/render_3D.py +347 -0
@@ -0,0 +1,284 @@
1
+ import os
2
+ import copy
3
+ import pynibs
4
+ import numpy as np
5
+ import _pickle as pickle
6
+ import multiprocessing.pool
7
+ from _functools import partial
8
+ from scipy.interpolate import LinearNDInterpolator
9
+
10
+
11
+ def workhorse_interp(idx_list, interp, params):
12
+ """
13
+ Single core workhorse to interpolate data.
14
+
15
+ Parameters
16
+ ----------
17
+ idx_list : np.nfarray or list of float
18
+ (n_interpolations) Indices in params array where the interpolation has to be performed
19
+ (subset of all indices in params array).
20
+ interp : instance of scipy.interpolate
21
+ Interpolator instance.
22
+ params : np.ndarray of float
23
+ (N_interpolations, N_params) Array containing the parameters the function is evaluated
24
+ (total array with all parameters).
25
+
26
+ Returns
27
+ -------
28
+ res : np.ndarray of float
29
+ (n_interpolations) Interpolation results (subset params[idx_list, :]).
30
+ """
31
+ return interp(params[idx_list, 0], params[idx_list, 1], params[idx_list, 2]) / 2.2 # 2.2
32
+
33
+
34
+ def load_cell_model(fn_csv):
35
+ """
36
+ Load interpolation points of the mean field model from the specified CSV file.
37
+
38
+ Parameters
39
+ ----------
40
+ fn_csv : str
41
+ Fully qualified path to the CSV containing the interpolation points of the mean field model.
42
+
43
+ Returns
44
+ -------
45
+ scipy.interpolate.LinearNDInterpolator
46
+ interpolation points 'theta'
47
+ interpolation points 'gradient'
48
+ """
49
+ cell_simulation_data = [np.genfromtxt(fn_csv, delimiter=',')]
50
+
51
+ thresholds = cell_simulation_data[-1][:, 2]
52
+ theta = cell_simulation_data[-1][:, 1]
53
+ rel_grad = cell_simulation_data[-1][:, 0]
54
+
55
+ return LinearNDInterpolator(list(zip(theta, rel_grad)), thresholds), theta, rel_grad
56
+
57
+
58
+ # TODO: implement the creation of a response interpolator
59
+ def _create_model_response_interpolator(fn_model_csv):
60
+ return LinearNDInterpolator([(0, 0), (0, 1), (1, 0), (1, 1)], [1, 1, 1, 1], fill_value=1)
61
+
62
+
63
+ def calc_e_threshold(layerid, theta, gradient=None, mep=None, neuronmodel="sensitivity_weighting",
64
+ waveform="biphasic", e_thresh_subject=None):
65
+ """
66
+ Determine sensitivity map of electric field.
67
+
68
+ Parameters
69
+ ----------
70
+ layerid : str
71
+ Choose from the neocortical layers (e.g. "L1", "L23", "L4", "L5", "L6") to load data for.
72
+ theta : np.ndarray
73
+ (N_stim, N_ele) Theta angle (matrix) of electric field with respect to surface normal. In degrees [0 .. 180].
74
+ gradient : np.ndarray, optional
75
+ (N_stim, N_ele) Electric field gradient (matrix) between layer 1 and layer 6. Optional, the neuron mean field
76
+ model is more accurate when provided. Percent [-100 .. 100].
77
+ mep : np.ndarray of float, optional
78
+ (N_stim, ) MEP data (required in case of "IOcurve" approach (neuronmodel)
79
+ neuronmodel : str, default: 'sensitivity_weighting'
80
+ Select neuron model to modify the electric field values
81
+
82
+ - "sensitivity_weighting": normalize threshold map and divide raw e-field by it.
83
+ - "threshold_subtract": subtract mean threshold from electric field.
84
+ - "threshold_binary": assign e-field a binary value to predict MEPs
85
+ (False -> below threshold, True, above threshold).
86
+ - "IOcurve": subtract value read from precomputed neuron IO curve from electric field.
87
+ - "cosine" : # TODO: document
88
+ waveform : str, default: 'biphasic'
89
+ Waveform of TMS pulse:
90
+
91
+ - "monophasic"
92
+ - "biphasic"
93
+ e_thresh_subject : float, optional
94
+ Subject specific stimulation threshold in V/m. Typically between 60 ... 80 V/m.
95
+ Only used for 'threshold_subtract' and '"threshold_binary",'
96
+
97
+ Returns
98
+ -------
99
+ e_sens : np.ndarray
100
+ (N_stim, N_ele) Electric field sensitivity maps.
101
+ """
102
+ # load neuron models
103
+ ####################################################################################################################
104
+ models_folder = os.path.join(pynibs.__datadir__, "neuron", "models")
105
+ interp_folder = os.path.join(pynibs.__datadir__, "neuron", "interpolators")
106
+ scaling_factor = None
107
+
108
+ if waveform == "monophasic":
109
+ models = {
110
+ "L23": os.path.join(models_folder, "L23_PC_cADpyr_monophasic_v1.csv"),
111
+ "L4SBC": os.path.join(models_folder, "L4_SBC_monophasic_v1.csv"),
112
+ "L4NBC": os.path.join(models_folder, "L4_NBC_monophasic_v1.csv"),
113
+ "L4LBC": os.path.join(models_folder, "L4_LBC_monophasic_v1.csv"),
114
+ "L5": os.path.join(models_folder, "L5_TTPC2_cADpyr_monophasic_v1.csv")
115
+ }
116
+
117
+ models_io = {
118
+ "L23": os.path.join(interp_folder, "L23_biphasic_recruitment_rate_interpolator_inverse.pkl"),
119
+ "L5": os.path.join(interp_folder, "L5_biphasic_recruitment_rate_interpolator_inverse.pkl")
120
+ }
121
+ elif waveform == "biphasic":
122
+ models = {
123
+ "L23": os.path.join(models_folder, "L23_PC_cADpyr_biphasic_v1.csv"),
124
+ "L4SBC": os.path.join(models_folder, "L4_SBC_biphasic_v1.csv"),
125
+ "L4NBC": os.path.join(models_folder, "L4_NBC_biphasic_v1.csv"),
126
+ "L4LBC": os.path.join(models_folder, "L4_LBC_biphasic_v1.csv"),
127
+ "L5": os.path.join(models_folder, "L5_TTPC2_cADpyr_biphasic_v1.csv")
128
+ }
129
+
130
+ models_io = {
131
+ "L23": os.path.join(interp_folder, "L23_biphasic_recruitment_rate_interpolator_inverse.pkl"),
132
+ "L5": os.path.join(interp_folder, "L5_biphasic_recruitment_rate_interpolator_inverse.pkl")
133
+ }
134
+ else:
135
+ raise NotImplementedError(f"Specified waveform {waveform} not implemented.")
136
+
137
+ if neuronmodel in ["threshold_subtract", "threshold_binary", "sensitivity_weighting", "cosine"]:
138
+ interp, thetas, rel_gradients = load_cell_model(models[layerid])
139
+
140
+ if neuronmodel in ["sensitivity_weighting", "cosine"]:
141
+ scaling_factor = interp(0, 0)
142
+ if e_thresh_subject is not None:
143
+ print(f"e_thresh_subject={e_thresh_subject} is not used for neuronmodel={neuronmodel}")
144
+
145
+ elif e_thresh_subject is not None:
146
+ scaling_factor = np.mean(interp(np.linspace(0, 180, 181),
147
+ np.zeros(181))) / e_thresh_subject
148
+
149
+ else:
150
+ # Scaling factor between electric field thresholds of model and subject specific e-field thresholds.
151
+ # scaling_factor = e_threshold_model / e_threshold_subject
152
+ # Model thresholds are higher than subject specific thresholds (typically in a range between 2..3)
153
+ # average thresholds between 0 and 180° at E_grad = 0
154
+ scaling_factor = 1
155
+
156
+ elif neuronmodel == "IOcurve":
157
+ _, thetas, rel_gradients = load_cell_model(models[layerid])
158
+
159
+ # TODO: not implemented yet
160
+ if not os.path.exists(models_io[layerid]):
161
+ raise NotImplementedError("[neuron_regression] Pickl files containing the response interpolators "
162
+ f"do not exist (path checked: {models_io[layerid]}) and their creation "
163
+ "is not implemented yet.")
164
+ # interp = _create_model_response_interpolator(models[layerid])
165
+ # with open(models_io[layerid], 'wb') as f:
166
+ # pickle.dump(interp, f)
167
+ else:
168
+ with open(models_io[layerid], 'rb') as f:
169
+ interp = pickle.load(f)
170
+
171
+ else:
172
+ raise NotImplementedError(f"Specified neuronmodel {neuronmodel} not implemented.")
173
+
174
+ # bound observed values to min/max values available in the model
175
+ ####################################################################################################################
176
+ theta_bound = theta
177
+ theta_bound[np.where(theta > np.max(thetas))] = np.max(thetas)
178
+ theta_bound[np.where(theta < np.min(thetas))] = np.min(thetas)
179
+
180
+ if gradient is None:
181
+ gradient_bound = np.zeros(theta.shape)
182
+ else:
183
+ gradient_bound = gradient
184
+ gradient_bound[np.where(gradient > np.max(rel_gradients))] = np.max(rel_gradients)
185
+ gradient_bound[np.where(gradient < np.min(rel_gradients))] = np.min(rel_gradients)
186
+
187
+ # Determine approach specific effective electric field
188
+ ####################################################################################################################
189
+ if neuronmodel in ["threshold_subtract", "threshold_binary", "sensitivity_weighting"]:
190
+ e_thres = interp(theta_bound, gradient_bound) / scaling_factor
191
+
192
+ elif neuronmodel == "cosine":
193
+ e_thres = scaling_factor * 1 / np.abs(np.cos(theta_bound / np.pi))
194
+
195
+ elif neuronmodel == "IOcurve":
196
+ # normalize MEPs between [0, 0.999]
197
+ mep_threshold = 2
198
+ mep_cropped = copy.deepcopy(mep)
199
+ mep_cropped[mep > mep_threshold] = mep_threshold
200
+ mep_norm = mep_cropped / (mep_threshold * 1.05)
201
+
202
+ # calculate expected electric field at observed MEP
203
+ params = np.zeros((theta.shape[0] * theta.shape[1], 3))
204
+ params[:, 0] = gradient_bound.flatten()
205
+ params[:, 1] = theta_bound.flatten()
206
+ params[:, 2] = np.repeat(mep_norm, theta.shape[1])
207
+
208
+ idx = np.arange(params.shape[0])
209
+ idx_chunked = pynibs.compute_chunks(list(idx), multiprocessing.cpu_count())
210
+
211
+ pool = multiprocessing.Pool(multiprocessing.cpu_count())
212
+ workhorse_partial = partial(workhorse_interp, interp=interp, params=params)
213
+ res = np.hstack(pool.map(workhorse_partial, idx_chunked))
214
+ e_thres = np.reshape(res, theta.shape)
215
+ pool.close()
216
+ pool.join()
217
+
218
+ else:
219
+ raise NotImplementedError
220
+
221
+ return e_thres
222
+
223
+
224
+ def calc_e_effective(e, layerid, theta, gradient=None, neuronmodel="sensitivity_weighting", mep=None,
225
+ waveform="biphasic", e_thresh_subject=None):
226
+ """
227
+ Determines the effective electric field using a neuron mean field model.
228
+ The electric field magnitude is 'subtracted' by the threshold map (in V/m), yielding the
229
+ effective electric field (e_eff).
230
+
231
+ Parameters
232
+ ----------
233
+ e : np.ndarray
234
+ (N_stim, N_ele) Electric field (matrix).
235
+ layerid : str
236
+ Choose from the neocortical layers (e.g. "L1", "L23", "L4", "L5", "L6").
237
+ theta : np.ndarray
238
+ (N_stim, N_ele) Theta angle (matrix) of electric field with respect to surface normal.
239
+ gradient : np.ndarray, optional
240
+ (N_stim, N_ele) Electric field gradient (matrix) between layer 1 and layer 6. Optional, the neuron mean field
241
+ model is more accurate when provided.
242
+ neuronmodel : str, default: 'threshold'
243
+ Select neuron model to modify the electric field values
244
+
245
+ - "sensitivity_weighting": normalize threshold map and divide raw e-field by it.
246
+ - "threshold_subtract": subtract mean threshold from electric field.
247
+ - "threshold_binary": assign e-field a binary value to predict MEPs
248
+ (False -> below threshold, True, above threshold).
249
+ - "IOcurve": subtract value read from precomputed neuron IO curve from electric field.
250
+ mep : np.ndarray of float [N_stim], optional
251
+ MEP data (required in case of "IOcurve" approach (neuronmodel)).
252
+ waveform : str, default: 'biphasic'
253
+ Waveform of TMS pulse:
254
+
255
+ - "monophasic"
256
+ - "biphasic"
257
+ e_thresh_subject : float, optional
258
+ Subject specific stimulation threshold in V/m. Typically, between 60 ... 80 V/m.
259
+ This is not used for sensitivity_weighting and
260
+
261
+ Returns
262
+ -------
263
+ e_eff : np.ndarray
264
+ Effective electric field (matrix) [N_stim x N_ele] the regression analysis can be performed with.
265
+ """
266
+ # determine sensitivity map
267
+ e_thres = calc_e_threshold(layerid=layerid,
268
+ theta=theta,
269
+ gradient=gradient,
270
+ neuronmodel=neuronmodel,
271
+ mep=mep,
272
+ waveform=waveform,
273
+ e_thresh_subject=e_thresh_subject)
274
+
275
+ if neuronmodel == "threshold_subtract":
276
+ e_eff = e - e_thres
277
+ elif neuronmodel == "threshold_binary":
278
+ e_eff = e > e_thres
279
+ elif neuronmodel in ["sensitivity_weighting", "cosine"]:
280
+ e_eff = e / e_thres
281
+ else:
282
+ e_eff = np.zeros(e.shape)
283
+
284
+ return e_eff
pynibs/neuron/util.py ADDED
@@ -0,0 +1,58 @@
1
+ import pynibs
2
+ import numpy as np
3
+
4
+
5
+ def DI_wave(t, intensity, t0=5, dt=1.4, width=0.25):
6
+ """
7
+ Determines cortical DI waves from TMS
8
+
9
+ Parameters
10
+ ----------
11
+ t: np.ndarray of float
12
+ (n_t) Time axis in ms.
13
+ intensity: float
14
+ Stimulator intensity w.r.t resting motor threshold (typical range: [0 ... 2]).
15
+ t0: float
16
+ Offset time.
17
+ dt: float
18
+ Spacing of waves in ms.
19
+ width: float
20
+ Width of waves.
21
+
22
+ Returns
23
+ -------
24
+ y: np.ndarray of float
25
+ (n_t) DI waves.
26
+ """
27
+ waves = ["D", "I1", "I2", "I3", "I4"]
28
+
29
+ x0 = dict()
30
+ x0["D"] = 1.6952640144480995
31
+ x0["I1"] = 1.314432218728424
32
+ x0["I2"] = 1.4421623825084195
33
+ x0["I3"] = 1.31643163560532
34
+ x0["I4"] = 1.747079479469914
35
+
36
+ amp = dict()
37
+ amp["D"] = 12.83042571812661 / 35.46534715796085
38
+ amp["I1"] = 35.46534715796085 / 35.46534715796085
39
+ amp["I2"] = 26.15109003222628 / 35.46534715796085
40
+ amp["I3"] = 15.491215097559184 / 35.46534715796085
41
+ amp["I4"] = 10.461195366965226 / 35.46534715796085
42
+
43
+ r = dict()
44
+ r["D"] = 13.945868670402973
45
+ r["I1"] = 8.707029476168504
46
+ r["I2"] = 7.02266347578131
47
+ r["I3"] = 16.74855628350182
48
+ r["I4"] = 17.85806255278076
49
+
50
+ y = np.zeros(len(t), dtype=np.float128)
51
+
52
+ for i, w in enumerate(waves):
53
+ y_ = np.exp(-(t - t0 - i * dt) ** 2 / (2 * width ** 2))
54
+ y_ = y_ / np.max(y_)
55
+ y_ = y_ * pynibs.expio.fit_funs.sigmoid(intensity, amp=amp[w], r=r[w], x0=x0[w])
56
+ y = y + y_
57
+
58
+ return y
@@ -0,0 +1,5 @@
1
+ """Routines to compute optimal sets of electric fields. """
2
+ from .opt_mep import *
3
+ from .optimization import *
4
+ from .workhorses import *
5
+ from .multichannel import *
@@ -0,0 +1,278 @@
1
+ """
2
+ Functions to optimize single coil currents for multichannel TMS arrays.
3
+ """
4
+ import numpy as np
5
+ from scipy.optimize import minimize
6
+
7
+
8
+ def get_score_raw(x, e, n_stim, n_ele, n_channel, x_opt=None, opt_target='elms'):
9
+ """
10
+ Compute score for e-efield cross correlations.
11
+ Non-normalized score is returned, so you need to do sth like
12
+
13
+ score = 1 / ((n_ele ** 2 - n_ele) / 2) * get_score()
14
+
15
+ Parameters
16
+ ----------
17
+ x : np.ndarray of float
18
+ (n_channel * n_stim_opt, ) Vector to scale channels for each.
19
+ e : np.ndarray of float
20
+ (n_ele*3, n_channels) E-field.
21
+ n_stim : int
22
+ Number of stimulations to compute score for.
23
+ n_ele : int
24
+ Number of elements.
25
+ n_channel : int
26
+ Number of channels.
27
+ x_opt : np.ndarray of float, optinonal
28
+ (n_pre_opt,) Previously optimized channel currents.
29
+ opt_target : str, default: 'elms'
30
+ Optimization target. 'elms' for optimizing decorrelations of elements, 'stims' for stimulations.
31
+
32
+ Returns
33
+ -------
34
+ score : float
35
+ non-normalized score np.nansum(np.abs(np.triu(np.corrcoef(e_mag), k=1)))
36
+ """
37
+ if x_opt is None:
38
+ currents_all_zaps_channels = np.reshape(x, (n_channel, n_stim))
39
+ else:
40
+ currents_all_zaps_channels = np.hstack((x_opt, np.reshape(x, (n_channel, n_stim - x_opt.shape[1]))))
41
+ # currents_all_zaps_channels.shape = (n_chans, n_zaps)
42
+
43
+ # determine total electric field (vector form)
44
+ e_vec = np.matmul(e, currents_all_zaps_channels) # e_vec.shape = (n_elems*3, n_zaps)
45
+ # determine magnitude
46
+ # for e_ in e_vec.T:
47
+ # a = np.linalg.norm(np.reshape(e_, (n_ele, 3)), axis=1)
48
+ # # e_.shape = (n_ele=3,)
49
+ # # np.reshape(e_, (n_ele, 3)).shape = (n_ele,3)
50
+ # # a.shape = (n_ele,)
51
+
52
+ if e.ndim == 2:
53
+ e_mag = np.vstack([np.linalg.norm(np.reshape(e_, (n_ele, 3)), axis=1) for e_ in e_vec.T]).T
54
+ elif e.ndim == 3:
55
+ e_mag = np.linalg.norm(e_vec, axis=1)
56
+ else:
57
+ raise ValueError
58
+ # e_mag.shape = (n_ele, n_zaps)
59
+
60
+ # determine average correlation coefficient
61
+ # r_avg = 1/((n_ele**2-n_ele)/2) * np.sum(np.abs(np.triu(np.corrcoef(e_mag), k=1)))
62
+ if opt_target == 'elms':
63
+ r_sum = np.nansum(np.abs(np.triu(np.corrcoef(e_mag), k=1)))
64
+ elif opt_target == 'stims':
65
+ r_sum = np.nansum(np.abs(np.triu(np.corrcoef(e_mag.T), k=1)))
66
+ else:
67
+ raise ValueError("opt_target has to be 'elms' or 'stims'")
68
+
69
+ return r_sum
70
+
71
+
72
+ def get_score_raw_single_channel(x, e, x_opt=None):
73
+ """
74
+ Compute score for e-efield cross correlations.
75
+ Non-normalized score is returned, so you need to do sth like
76
+
77
+ score = 1 / ((n_ele ** 2 - n_ele) / 2) * get_score()
78
+
79
+ Parameters
80
+ ----------
81
+ x : np.ndarray of float
82
+ (n_placements, ) selection of coil placements.
83
+ e : np.ndarray of float
84
+ (n_ele*3, n_channels) E-field.
85
+ x_opt : np.ndarray of float, optinonal
86
+ (n_pre_opt,) Previously optimized channel currents.
87
+
88
+ Returns
89
+ -------
90
+ score : float
91
+ non-normalized score np.nansum(np.abs(np.triu(np.corrcoef(e_mag), k=1)))
92
+ """
93
+ # x = ((x / x.max()) * e.shape[0]-1).astype(int)
94
+ if x_opt is not None:
95
+ x = np.hstack((x_opt, x))
96
+ x = x.astype(int)
97
+ e_mag = e[x, :]
98
+
99
+ # determine average correlation coefficient
100
+ # r_avg = 1/((n_ele**2-n_ele)/2) * np.sum(np.abs(np.triu(np.corrcoef(e_mag), k=1)))
101
+ # a = np.abs(np.triu(np.corrcoef(e_mag.T), k=1))
102
+
103
+ r_avg = np.nansum(np.abs(np.triu(np.corrcoef(e_mag.T), k=1)))
104
+ return r_avg
105
+
106
+
107
+ def optimize_currents(e, n_stim, currents_prev=None, seed=None,
108
+ maxiter=200, method='SLSQP', opt_target='elms', verbose=False):
109
+ """
110
+ Optimize the currents for a multichannel TMS array by minimizing e-fields cross-correlation.
111
+
112
+ Parameters
113
+ ----------
114
+ e : np.ndarray of float
115
+ (n_elms * 3, n_channel) or (n_elms, 3, n_channel). E in ROI for currents = 1.
116
+ n_stim : int
117
+ Number of stimulations.
118
+ currents_prev : np.ndarray of float, optional
119
+ (n_channels, n_stims_prev) Previous currents to append to.
120
+ seed : int, optional
121
+ Seed for random number generator.
122
+ maxiter : int, default=200
123
+ Max iterations of the optimization.
124
+ method : str, default: 'SLSQP'
125
+ Optimization method.
126
+ verbose : bool, default: False
127
+ Print additional information.
128
+ opt_target : str, default: 'elms'
129
+ Optimization target. 'elms' for optimizing decorrelations of elements, 'stims' for stimulations.
130
+
131
+ Returns
132
+ -------
133
+ currents : np.ndarray
134
+ (n_channels, n_stims) The optimized currents to drive the multichannel array.
135
+ score : float
136
+ Final score of the solution.
137
+ """
138
+ if e.ndim == 2:
139
+ n_channel = e.shape[1]
140
+ n_ele = int(e.shape[0] / 3)
141
+ elif e.ndim == 3:
142
+ n_channel = e.shape[2]
143
+ n_ele = int(e.shape[0])
144
+ else:
145
+ raise ValueError
146
+
147
+ if currents_prev is None:
148
+ n_stim_opt = n_stim
149
+ else:
150
+ n_stim_opt = n_stim - currents_prev.shape[1]
151
+
152
+ if n_stim_opt <= 0:
153
+ raise ValueError("N_stim has to be larger than already optimized optimal values!")
154
+
155
+ # initial guess for currents for all channels * stimulations
156
+ if seed is not None:
157
+ np.random.seed(seed)
158
+ x0 = (np.random.rand(n_channel * n_stim_opt) * 2) -1
159
+ # print(x0[:5])
160
+ if verbose:
161
+ print(f"n_ele: {n_ele}, n_channels: {n_channel}, n_stims: {n_stim}")
162
+
163
+ # optimization algorithm
164
+ res = minimize(get_score_raw,
165
+ args=(e, n_stim, n_ele, n_channel, currents_prev, opt_target),
166
+ x0=x0,
167
+ method=method,
168
+ options={'disp': False, 'maxiter': maxiter},
169
+ bounds=[(-1, 1) for _ in range(len(x0))],
170
+ tol=1e-6)
171
+ # print(res.fun, res.success, res.message)
172
+
173
+ if currents_prev is None:
174
+ currents = np.reshape(res.x, (n_channel, n_stim))
175
+ else:
176
+ currents = np.hstack((currents_prev, np.reshape(res.x, (n_channel, n_stim - currents_prev.shape[1]))))
177
+ if opt_target == 'elms':
178
+ score = 1 / ((n_ele ** 2 - n_ele) / 2) * res.fun
179
+ elif opt_target == 'stims':
180
+ score = 1 / ((n_stim ** 2 - n_stim) / 2) * res.fun
181
+ else:
182
+ raise ValueError("opt_target has to be 'elms' or 'stims'")
183
+
184
+ return currents, score, res
185
+
186
+
187
+ def optimize_currents_single_channel(e, n_stim, currents_prev=None, seed=None,
188
+ maxiter=200, method='SLSQP', verbose=False):
189
+ """
190
+ Optimize the coil placement selection for a single channel e-field set minimizing e-fields cross-correlation.
191
+
192
+ Parameters
193
+ ----------
194
+ e : np.ndarray of float
195
+ (n_elms3, n_placements).
196
+ n_stim : int
197
+ Number of stimulations.
198
+ currents_prev : np.ndarray of float, optional
199
+ (n_channels, n_stims_prev) Previous currents to append to.
200
+ seed : int, optional
201
+ Seed for random number generator.
202
+ maxiter : int, default=200
203
+ Max iterations of the optimization.
204
+ method : str, default: 'SLSQP'
205
+ Optimization method.
206
+ verbose : bool, default: False
207
+ Print additional information.
208
+
209
+ Returns
210
+ -------
211
+ currents : np.ndarray
212
+ (n_channels, n_stims) The optimized currents to drive the multichannel array.
213
+ score : float
214
+ Final score of the solution.
215
+ """
216
+ n_placements = e.shape[0]
217
+ n_ele = e.shape[1]
218
+
219
+ if currents_prev is None:
220
+ n_stim_opt = n_stim
221
+ else:
222
+ n_stim_opt = n_stim - currents_prev.shape[0]
223
+
224
+ if n_stim_opt <= 0:
225
+ raise ValueError("N_stim has to be larger than already optimized optimal values!")
226
+
227
+ # initial guess for currents for all channels * stimulations
228
+ if seed is not None:
229
+ np.random.seed(seed)
230
+ x0 = np.random.randint(0, n_placements, n_stim_opt)
231
+ if verbose:
232
+ print(f"n_ele: {n_ele}, n_placements: {n_placements}, n_stims: {n_stim}")
233
+
234
+ # optimization algorithm
235
+ res = minimize(get_score_raw_single_channel,
236
+ args=(e, currents_prev),
237
+ x0=x0,
238
+ method=method,
239
+ options={'disp': False, 'maxiter': maxiter},
240
+ bounds=[(0, n_placements) for _ in range(len(x0))])
241
+
242
+ # if currents_prev is None:
243
+ # currents = np.reshape(res.x, (n_placements, n_stim))
244
+ # else:
245
+ # currents = np.hstack((currents_prev, np.reshape(res.x, (n_placements, n_stim - currents_prev.shape[1]))))
246
+
247
+ score = 1 / ((n_ele ** 2 - n_ele) / 2) * res.fun
248
+ # score = res.fun
249
+
250
+ return res.x, score
251
+
252
+
253
+ def get_score(x, e, n_stim, n_ele, n_channel, x_opt=None):
254
+ """
255
+ Normalize the score by the number of elements.
256
+
257
+ Parameters
258
+ ----------
259
+ x : np.ndarray of float
260
+ (n_channel * n_stim_opt, ) Vector to scale channels for each.
261
+ e : np.ndarray of float
262
+ (n_ele*3, n_channels) E-field.
263
+ n_stim : int
264
+ Number of stimulations to compute score for.
265
+ n_ele : int
266
+ Number of elements.
267
+ n_channel : int
268
+ Number of channels.
269
+ x_opt : np.ndarray of float, optinonal
270
+ (n_pre_opt,) Previously optimized channel currents.
271
+
272
+ Returns
273
+ -------
274
+ score : float
275
+ The normalized score.
276
+ """
277
+ score_raw = get_score_raw(x, e, n_stim, n_ele, n_channel, x_opt)
278
+ return 1 / ((n_ele ** 2 - n_ele) / 2) * score_raw