pyNIBS 0.2024.8__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pyNIBS-0.2024.8.dist-info/LICENSE +623 -0
- pyNIBS-0.2024.8.dist-info/METADATA +723 -0
- pyNIBS-0.2024.8.dist-info/RECORD +107 -0
- pyNIBS-0.2024.8.dist-info/WHEEL +5 -0
- pyNIBS-0.2024.8.dist-info/top_level.txt +1 -0
- pynibs/__init__.py +34 -0
- pynibs/coil.py +1367 -0
- pynibs/congruence/__init__.py +15 -0
- pynibs/congruence/congruence.py +1108 -0
- pynibs/congruence/ext_metrics.py +257 -0
- pynibs/congruence/stimulation_threshold.py +318 -0
- pynibs/data/configuration_exp0.yaml +59 -0
- pynibs/data/configuration_linear_MEP.yaml +61 -0
- pynibs/data/configuration_linear_RT.yaml +61 -0
- pynibs/data/configuration_sigmoid4.yaml +68 -0
- pynibs/data/network mapping configuration/configuration guide.md +238 -0
- pynibs/data/network mapping configuration/configuration_TEMPLATE.yaml +42 -0
- pynibs/data/network mapping configuration/configuration_for_testing.yaml +43 -0
- pynibs/data/network mapping configuration/configuration_modelTMS.yaml +43 -0
- pynibs/data/network mapping configuration/configuration_reg_isi_05.yaml +43 -0
- pynibs/data/network mapping configuration/output_documentation.md +185 -0
- pynibs/data/network mapping configuration/recommendations_for_accuracy_threshold.md +77 -0
- pynibs/data/neuron/models/L23_PC_cADpyr_biphasic_v1.csv +1281 -0
- pynibs/data/neuron/models/L23_PC_cADpyr_monophasic_v1.csv +1281 -0
- pynibs/data/neuron/models/L4_LBC_biphasic_v1.csv +1281 -0
- pynibs/data/neuron/models/L4_LBC_monophasic_v1.csv +1281 -0
- pynibs/data/neuron/models/L4_NBC_biphasic_v1.csv +1281 -0
- pynibs/data/neuron/models/L4_NBC_monophasic_v1.csv +1281 -0
- pynibs/data/neuron/models/L4_SBC_biphasic_v1.csv +1281 -0
- pynibs/data/neuron/models/L4_SBC_monophasic_v1.csv +1281 -0
- pynibs/data/neuron/models/L5_TTPC2_cADpyr_biphasic_v1.csv +1281 -0
- pynibs/data/neuron/models/L5_TTPC2_cADpyr_monophasic_v1.csv +1281 -0
- pynibs/expio/Mep.py +1518 -0
- pynibs/expio/__init__.py +8 -0
- pynibs/expio/brainsight.py +979 -0
- pynibs/expio/brainvis.py +71 -0
- pynibs/expio/cobot.py +239 -0
- pynibs/expio/exp.py +1876 -0
- pynibs/expio/fit_funs.py +287 -0
- pynibs/expio/localite.py +1987 -0
- pynibs/expio/signal_ced.py +51 -0
- pynibs/expio/visor.py +624 -0
- pynibs/freesurfer.py +502 -0
- pynibs/hdf5_io/__init__.py +10 -0
- pynibs/hdf5_io/hdf5_io.py +1857 -0
- pynibs/hdf5_io/xdmf.py +1542 -0
- pynibs/mesh/__init__.py +3 -0
- pynibs/mesh/mesh_struct.py +1394 -0
- pynibs/mesh/transformations.py +866 -0
- pynibs/mesh/utils.py +1103 -0
- pynibs/models/_TMS.py +211 -0
- pynibs/models/__init__.py +0 -0
- pynibs/muap.py +392 -0
- pynibs/neuron/__init__.py +2 -0
- pynibs/neuron/neuron_regression.py +284 -0
- pynibs/neuron/util.py +58 -0
- pynibs/optimization/__init__.py +5 -0
- pynibs/optimization/multichannel.py +278 -0
- pynibs/optimization/opt_mep.py +152 -0
- pynibs/optimization/optimization.py +1445 -0
- pynibs/optimization/workhorses.py +698 -0
- pynibs/pckg/__init__.py +0 -0
- pynibs/pckg/biosig/biosig4c++-1.9.5.src_fixed.tar.gz +0 -0
- pynibs/pckg/libeep/__init__.py +0 -0
- pynibs/pckg/libeep/pyeep.so +0 -0
- pynibs/regression/__init__.py +11 -0
- pynibs/regression/dual_node_detection.py +2375 -0
- pynibs/regression/regression.py +2984 -0
- pynibs/regression/score_types.py +0 -0
- pynibs/roi/__init__.py +2 -0
- pynibs/roi/roi.py +895 -0
- pynibs/roi/roi_structs.py +1233 -0
- pynibs/subject.py +1009 -0
- pynibs/tensor_scaling.py +144 -0
- pynibs/tests/data/InstrumentMarker20200225163611937.xml +19 -0
- pynibs/tests/data/TriggerMarkers_Coil0_20200225163443682.xml +14 -0
- pynibs/tests/data/TriggerMarkers_Coil1_20200225170337572.xml +6373 -0
- pynibs/tests/data/Xdmf.dtd +89 -0
- pynibs/tests/data/brainsight_niiImage_nifticoord.txt +145 -0
- pynibs/tests/data/brainsight_niiImage_nifticoord_largefile.txt +1434 -0
- pynibs/tests/data/brainsight_niiImage_niifticoord_mixedtargets.txt +47 -0
- pynibs/tests/data/create_subject_testsub.py +332 -0
- pynibs/tests/data/data.hdf5 +0 -0
- pynibs/tests/data/geo.hdf5 +0 -0
- pynibs/tests/test_coil.py +474 -0
- pynibs/tests/test_elements2nodes.py +100 -0
- pynibs/tests/test_hdf5_io/test_xdmf.py +61 -0
- pynibs/tests/test_mesh_transformations.py +123 -0
- pynibs/tests/test_mesh_utils.py +143 -0
- pynibs/tests/test_nnav_imports.py +101 -0
- pynibs/tests/test_quality_measures.py +117 -0
- pynibs/tests/test_regressdata.py +289 -0
- pynibs/tests/test_roi.py +17 -0
- pynibs/tests/test_rotations.py +86 -0
- pynibs/tests/test_subject.py +71 -0
- pynibs/tests/test_util.py +24 -0
- pynibs/tms_pulse.py +34 -0
- pynibs/util/__init__.py +4 -0
- pynibs/util/dosing.py +233 -0
- pynibs/util/quality_measures.py +562 -0
- pynibs/util/rotations.py +340 -0
- pynibs/util/simnibs.py +763 -0
- pynibs/util/util.py +727 -0
- pynibs/visualization/__init__.py +2 -0
- pynibs/visualization/para.py +4372 -0
- pynibs/visualization/plot_2D.py +137 -0
- pynibs/visualization/render_3D.py +347 -0
|
@@ -0,0 +1,1445 @@
|
|
|
1
|
+
"""
|
|
2
|
+
The `optimizhat.py` module provides functions for optimizing coil placements in Transcranial Magnetic Stimulation
|
|
3
|
+
(TMS) based on given electric field matrices and fMRI statistics. It includes functions for identifying optimal coil
|
|
4
|
+
placement regions, calculating the gain map between a reference and an optimized sequence of electric fields,
|
|
5
|
+
and performing virtual online optimization to determine the congruence factor.
|
|
6
|
+
|
|
7
|
+
The module includes the following functions:
|
|
8
|
+
|
|
9
|
+
- `rowvec_diff()`: This function returns the coil configuration out of all available configurations exhibiting the
|
|
10
|
+
highest minimum difference to the already selected configurations.
|
|
11
|
+
|
|
12
|
+
- `get_optimal_coil_positions()`: This function determines a set of optimal coil positions for TMS regression analysis.
|
|
13
|
+
|
|
14
|
+
- `online_optimization()`: This function performs virtual online optimization to determine the congruence factor.
|
|
15
|
+
After an initial set of coil positions, the algorithm iteratively optimizes the next coil position based on the
|
|
16
|
+
virtually measured MEP data.
|
|
17
|
+
|
|
18
|
+
- `calc_opt_gain_map()`: This function calculates the gain map between a reference e_matrix (e.g. from random
|
|
19
|
+
sampling) and an optimized sequence of electric fields for mapping.
|
|
20
|
+
|
|
21
|
+
- `optimal_coilplacement_region()`: This function identifies the optimal coil placement regions based on given
|
|
22
|
+
electric field (E-field) matrices and fMRI statistics.
|
|
23
|
+
|
|
24
|
+
Each function in this module is documented with docstrings providing more detailed information about its purpose,
|
|
25
|
+
parameters, and return values.
|
|
26
|
+
|
|
27
|
+
This module is primarily used for handling and optimizing coil placements in TMS studies.
|
|
28
|
+
"""
|
|
29
|
+
import os
|
|
30
|
+
import h5py
|
|
31
|
+
import numpy as np
|
|
32
|
+
import multiprocessing
|
|
33
|
+
from functools import partial
|
|
34
|
+
from matplotlib import pyplot as plt
|
|
35
|
+
|
|
36
|
+
import pynibs
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
def rowvec_diff(candidate_coil_idcs, selected_coil_idcs, efields_diff_mat):
|
|
40
|
+
"""
|
|
41
|
+
Given a difference matrix (e.g. of row vectors/coil configurations) this function
|
|
42
|
+
returns the coil configuration out of all available configurations exhibiting the
|
|
43
|
+
highest minimum difference to the already selected configurations.
|
|
44
|
+
|
|
45
|
+
:param candidate_coil_idcs: np.ndarry[int]
|
|
46
|
+
List of indices of coil configurations that are still available to pick for the optiized sequence.
|
|
47
|
+
:param selected_coil_idcs: np.ndarray[int]
|
|
48
|
+
List of indices of coil configurations that have already been selected for the optimized sequence.
|
|
49
|
+
:param efields_diff_mat: np.ndarray[float], [n_coil,n_coil]
|
|
50
|
+
Difference matrix, where each cell denotes the magnitude of the difference vector between
|
|
51
|
+
two coil configurations (determined by row_idx,col_idx).
|
|
52
|
+
|
|
53
|
+
Returns
|
|
54
|
+
-------
|
|
55
|
+
coil_idx: int
|
|
56
|
+
index of coil configuration with maximal minimal difference to the set of already selected coil configurations.
|
|
57
|
+
"""
|
|
58
|
+
|
|
59
|
+
# min_diff_selected_to_all_coil_pos = matrix with:
|
|
60
|
+
# rows -> set of selected coil configurations (selected_coil_idcs)
|
|
61
|
+
# columns -> available coil configuration for optimization (candidate_coil_idcs)
|
|
62
|
+
min_diff_selected_to_all_coil_pos = np.min(efields_diff_mat[selected_coil_idcs][:, candidate_coil_idcs], axis=0)
|
|
63
|
+
|
|
64
|
+
# returned index valid in the "idx_list" array
|
|
65
|
+
return np.argmax(min_diff_selected_to_all_coil_pos), np.max(min_diff_selected_to_all_coil_pos)
|
|
66
|
+
|
|
67
|
+
|
|
68
|
+
def get_optimal_coil_positions(
|
|
69
|
+
e_matrix,
|
|
70
|
+
criterion,
|
|
71
|
+
n_stim,
|
|
72
|
+
ele_idx_1=None,
|
|
73
|
+
ele_idx_2=None,
|
|
74
|
+
fn_out_hdf5=None,
|
|
75
|
+
n_cpu=4,
|
|
76
|
+
zap_idx_opt=None,
|
|
77
|
+
regression_cmap=None,
|
|
78
|
+
regression_fit_parameters=None,
|
|
79
|
+
metrics_weights=None,
|
|
80
|
+
overwrite=True,
|
|
81
|
+
verbose=True,
|
|
82
|
+
fn_coilpos_hdf5=None,
|
|
83
|
+
start_zap_idx=-1,
|
|
84
|
+
fim_fit_fun=None,
|
|
85
|
+
fim_p2p_amps=None,
|
|
86
|
+
fim_didt_list=None,
|
|
87
|
+
fim_rmt_mso=None,
|
|
88
|
+
fim_mso_didt_conversion_factor=1.43,
|
|
89
|
+
fim_visited_positions_e_mat=None,
|
|
90
|
+
fim_regression_n_refit=10,
|
|
91
|
+
fim_debug_screenshot_dir_fn=None,
|
|
92
|
+
fim_roi_pts=None,
|
|
93
|
+
fim_roi_tris=None,
|
|
94
|
+
fim_use_gpu=False):
|
|
95
|
+
"""
|
|
96
|
+
Determine set of optimal coil positions for TMS regression analysis.
|
|
97
|
+
|
|
98
|
+
Parameters
|
|
99
|
+
----------
|
|
100
|
+
e_matrix : np.ndarray of float
|
|
101
|
+
(n_stim, n_ele) Matrix containing the electric field values in the ROI.
|
|
102
|
+
criterion : str
|
|
103
|
+
Optimization criterion:
|
|
104
|
+
* "mc_cols": Minimization of mutual coherence between columns
|
|
105
|
+
* "mc_rows": Minimization of mutual coherence between rows
|
|
106
|
+
* "svd": Minimization of condition number
|
|
107
|
+
* "dist": Equal distant sampling
|
|
108
|
+
* "dist_svd": Minimization of condition number and equidistant sampling
|
|
109
|
+
* "dist_mc_cols": Minimization of mutual coherence between columns and equidistant sampling
|
|
110
|
+
* "dist_mc_rows": Minimization of mutual coherence between rows and equidistant sampling
|
|
111
|
+
* "coverage": Maximizes the electric field coverage
|
|
112
|
+
* "variability": Maximizes variability between elements
|
|
113
|
+
|
|
114
|
+
n_stim : int
|
|
115
|
+
Maximum number of stimulations.
|
|
116
|
+
ele_idx_1 : np.ndarray of int, optional
|
|
117
|
+
Element indices the first optimization goal is performed for, If None, all elements are consiered.
|
|
118
|
+
ele_idx_2 : np.ndarray of int, optional
|
|
119
|
+
Element indices the first optimization goal is performed for. If None, all elements are consiered.
|
|
120
|
+
n_cpu : int
|
|
121
|
+
Number of threads.
|
|
122
|
+
fn_out_hdf5 : str, optional
|
|
123
|
+
Returns the list of optimal zap indices if fn_out_hdf5 is None, otherwise, save the results in .hdf5 file.
|
|
124
|
+
Filename of output .hdf5 file where the zap index lists are saved in subfolder "zap_index_lists"
|
|
125
|
+
|
|
126
|
+
* "zap_index_lists/0": [213]
|
|
127
|
+
* "zap_index_lists/1": [213, 5]
|
|
128
|
+
* etc
|
|
129
|
+
|
|
130
|
+
zap_idx_opt : list of int, optional
|
|
131
|
+
List of already selected optimal coil positions
|
|
132
|
+
(those are ignored in the optimization and will not be picked again).
|
|
133
|
+
fim_fit_fun : function object
|
|
134
|
+
Function object defined in interval [0, 1] (only needed for fim optimization).
|
|
135
|
+
regression_fit_parameters : dict [n_ele], optional, optional
|
|
136
|
+
The parameter estimates that should be used for the FIM optimization (whole ROI).
|
|
137
|
+
The keys are the parameter names of fun
|
|
138
|
+
(only needed for fim and dist optimization).
|
|
139
|
+
regression_cmap : np.ndarray of float [n_ele], optional, optional
|
|
140
|
+
Congruence factor in each ROI element. Used to weight fim and dist optimization
|
|
141
|
+
(only needed for fim and dist optimization).
|
|
142
|
+
metrics_weights : list of float [2], default: [0.5, 0.5]
|
|
143
|
+
Weights of optimization criteria in case of multiple goal functions (e_all_coil_pos.g. fim_svd).
|
|
144
|
+
Higher weight means higher importance for the respective criteria.
|
|
145
|
+
By default both optimization criteria are weighted equally [0.5, 0.5].
|
|
146
|
+
overwrite : bool, default: True
|
|
147
|
+
Overwrite existing solutions or read existing hdf5 file and continue optimization.
|
|
148
|
+
verbose : bool, default: True
|
|
149
|
+
Print output messages.
|
|
150
|
+
fn_coilpos_hdf5 : str
|
|
151
|
+
File containing the corresponding coil positions and orientations (centers, m0, m1, m2).
|
|
152
|
+
start_zap_idx : int, default: 0
|
|
153
|
+
First zap index to start greedy search.
|
|
154
|
+
fim_didt_list : np.array(float), [len(zap_idx_opt)]
|
|
155
|
+
List of realized dI/dt of each of the already stimulated coil configurations in 'zap_idx_opt'.
|
|
156
|
+
Not required for any other metric than FIM.
|
|
157
|
+
fim_rmt_mso : int
|
|
158
|
+
Resting motor threshold used as the lower boundary of the FIM optimal e-field scaling. Unit in %MSO
|
|
159
|
+
Not required for any other metric than FIM.
|
|
160
|
+
fim_mso_didt_conversion_factor : float, default: 1.43
|
|
161
|
+
Factor to convert between realized current (dI/dt) and percentage of maximum stimulator output (%MSO).
|
|
162
|
+
Defaults to 1.43 describing the factor of a Magventure Pro with an MCF-B65 coil.
|
|
163
|
+
Not required for any other metric than FIM.
|
|
164
|
+
fim_visited_positions_e_mat : np.ndarray[float], (len(zap_idx_opt], n_ele), optional
|
|
165
|
+
The efield matrix computed using the actually approached coil configurations.
|
|
166
|
+
This is intended to make the FIM method even more precise by taking into account slight deviations in the
|
|
167
|
+
approached coil configuration instead of relying on the static pre-computed coil configurations.
|
|
168
|
+
Not required for any other metric than FIM.
|
|
169
|
+
fim_p2p_amps : np.ndarray[float], (len(zap_idx_opt))
|
|
170
|
+
EMG peak to peak amplitudes associated with the already collected (optimal) coil positions.
|
|
171
|
+
Not required for any other metric than FIM.
|
|
172
|
+
fim_didt_list : np.ndarray[float], (len(zap_idx_op))
|
|
173
|
+
Realized current (didt) in TMS coil (as returned by the stimulator).
|
|
174
|
+
of the already collected (optimal) coil positions.
|
|
175
|
+
Not required for any other metric than FIM.
|
|
176
|
+
fim_regression_n_refit : float
|
|
177
|
+
Number of refits used in the mag(E)<>p2p regression during FIM optimization.
|
|
178
|
+
Not required for any other metric than FIM.
|
|
179
|
+
fim_debug_screenshot_dir_fn : str
|
|
180
|
+
String representation of the fully qualified path to a directory where a 3D rendering of the
|
|
181
|
+
the FIM optimal coil positions, ie coil positions that can reach the FIM optimal e-field strength
|
|
182
|
+
at the current target hotspot given the MSO bounds [fim_rmt_mso, 100], should be saved.
|
|
183
|
+
Not required for any other metric than FIM.
|
|
184
|
+
fim_roi_pts : np.ndarray, (n_points x 3)
|
|
185
|
+
Points (vertices) of ROI surface mesh (where the congruence scores should be computed on).
|
|
186
|
+
Not required for any other metric than FIM.
|
|
187
|
+
fim_roi_tris : np.ndarray, (n_tris x 3)
|
|
188
|
+
Connectivity list of the 'fim_roi_points'.
|
|
189
|
+
Not required for any other metric than FIM.
|
|
190
|
+
fim_use_gpu : bool
|
|
191
|
+
True: Use cupy and CUDA acceleration for the computation of the correlation matrix.
|
|
192
|
+
False: Use Python multiprocessing for the computation of the corrleation matrix.
|
|
193
|
+
Not required for any other metric than FIM.
|
|
194
|
+
|
|
195
|
+
Returns
|
|
196
|
+
-------
|
|
197
|
+
zap_idx_e_opt : list of int
|
|
198
|
+
(n_stim) Optimal zap indices.
|
|
199
|
+
<File> .hdf5 file
|
|
200
|
+
Output file containing the zap index lists.
|
|
201
|
+
"""
|
|
202
|
+
if zap_idx_opt is not None and fn_out_hdf5 is not None and (os.path.exists(fn_out_hdf5) and not overwrite):
|
|
203
|
+
raise ValueError("zap_idx_opt and fn_out_hdf5 given... please choose whether to load optimal zap indices from "
|
|
204
|
+
"file or given explicitly as list")
|
|
205
|
+
|
|
206
|
+
e_all_coil_pos = e_matrix
|
|
207
|
+
|
|
208
|
+
if ele_idx_1 is None:
|
|
209
|
+
ele_idx_1 = np.arange(e_all_coil_pos.shape[1])
|
|
210
|
+
|
|
211
|
+
if ele_idx_2 is None:
|
|
212
|
+
ele_idx_2 = np.arange(e_all_coil_pos.shape[1])
|
|
213
|
+
|
|
214
|
+
# Normalize congruence map as it is used to weight the scores of some metrics.
|
|
215
|
+
if regression_cmap is not None:
|
|
216
|
+
regression_cmap_normalized = regression_cmap / np.max(regression_cmap)
|
|
217
|
+
|
|
218
|
+
# value of the associated optimization metric
|
|
219
|
+
crit = np.zeros(n_stim)
|
|
220
|
+
|
|
221
|
+
# Position of the last optimal coil idx in the result list.
|
|
222
|
+
# Greedy search will continue from 'idx_of_last_result+1' until 'n_stim' configurations have been found.
|
|
223
|
+
idx_of_last_result = -1 if start_zap_idx == -1 else 0
|
|
224
|
+
|
|
225
|
+
# Out of the set of all available indices, which indices should be considered for the sequence of optimal configs.
|
|
226
|
+
idcs_to_check = list(range(0, e_all_coil_pos.shape[0]))
|
|
227
|
+
|
|
228
|
+
# Initialize the list of indices valid for rows of 'e_matrix'
|
|
229
|
+
# (= coil configurations identified as optimal configurations)
|
|
230
|
+
zap_idx_e_opt = [-1 for _ in range(n_stim)]
|
|
231
|
+
zap_idx_e_opt[0] = start_zap_idx
|
|
232
|
+
# Adopt predefined set of optimal coil positions as 'zap_idx_e_opt'
|
|
233
|
+
if zap_idx_opt is not None:
|
|
234
|
+
num_visited_configurations = len(zap_idx_opt)
|
|
235
|
+
idx_of_last_result = num_visited_configurations - 1
|
|
236
|
+
zap_idx_e_opt[:num_visited_configurations] = zap_idx_opt
|
|
237
|
+
for idx in zap_idx_opt:
|
|
238
|
+
idcs_to_check.remove(idx)
|
|
239
|
+
|
|
240
|
+
# set the next to-be determined position to 'start_zap_idx'
|
|
241
|
+
zap_idx_e_opt[idx_of_last_result + 1] = start_zap_idx
|
|
242
|
+
|
|
243
|
+
# load position and orientation vectors of all available coil configurations
|
|
244
|
+
if fn_coilpos_hdf5 is not None:
|
|
245
|
+
with h5py.File(fn_coilpos_hdf5, "r") as f:
|
|
246
|
+
centers = f["centers"][:]
|
|
247
|
+
m0 = f["m0"][:]
|
|
248
|
+
m1 = f["m1"][:]
|
|
249
|
+
m2 = f["m2"][:]
|
|
250
|
+
|
|
251
|
+
pool = multiprocessing.Pool(n_cpu)
|
|
252
|
+
|
|
253
|
+
# load already present optimization results
|
|
254
|
+
if fn_out_hdf5 is not None:
|
|
255
|
+
if not overwrite and os.path.exists(fn_out_hdf5):
|
|
256
|
+
with h5py.File(fn_out_hdf5, "r") as f:
|
|
257
|
+
|
|
258
|
+
idx_of_last_result = 0
|
|
259
|
+
|
|
260
|
+
try:
|
|
261
|
+
keys = f[f"zap_index_list"].keys()
|
|
262
|
+
crit = f["criterion"][:]
|
|
263
|
+
|
|
264
|
+
for k in keys:
|
|
265
|
+
zap_idx_e_opt[int(k)] = list(f[f"zap_index_list/{k}"][:])
|
|
266
|
+
|
|
267
|
+
if int(k) > idx_of_last_result:
|
|
268
|
+
idx_of_last_result = int(k)
|
|
269
|
+
|
|
270
|
+
# check if loaded sequence starts with the same zap index as intended
|
|
271
|
+
if zap_idx_e_opt[0] != start_zap_idx:
|
|
272
|
+
idx_of_last_result = 0
|
|
273
|
+
zap_idx_e_opt = [-1 for _ in range(n_stim)]
|
|
274
|
+
zap_idx_e_opt[0] = start_zap_idx
|
|
275
|
+
if verbose:
|
|
276
|
+
print(f"Loaded sequence does not start with specified start idx (restarting optimization)")
|
|
277
|
+
print(f"=================================================================================")
|
|
278
|
+
else:
|
|
279
|
+
if verbose:
|
|
280
|
+
print(f"Loading optimal index set for n={idx_of_last_result + 1}")
|
|
281
|
+
print(f"====================================")
|
|
282
|
+
|
|
283
|
+
for idx in zap_idx_e_opt:
|
|
284
|
+
idcs_to_check.remove(idx)
|
|
285
|
+
except KeyError:
|
|
286
|
+
pass
|
|
287
|
+
|
|
288
|
+
if criterion == "rowvec_diff":
|
|
289
|
+
# Preparation for the rowvec difference metric is independent of the number of requested stimulation (n_stim):
|
|
290
|
+
# Compute the difference matrix.
|
|
291
|
+
workhorse_prepare = partial(pynibs.optimization.workhorses.rowvec_diff_prepare, array=e_all_coil_pos, ele_idx_1=ele_idx_1)
|
|
292
|
+
coil_idx_list_chunks = pynibs.compute_chunks(list(range(e_all_coil_pos.shape[0])), n_cpu)
|
|
293
|
+
res = pool.map(workhorse_prepare, coil_idx_list_chunks)
|
|
294
|
+
|
|
295
|
+
# sum all difference matrices up = create upper triangle difference matrix
|
|
296
|
+
efields_diff = np.sum(res, axis=0)
|
|
297
|
+
|
|
298
|
+
efields_diff_mat_full = efields_diff + np.transpose(efields_diff)
|
|
299
|
+
|
|
300
|
+
if n_stim >= 2:
|
|
301
|
+
if idx_of_last_result < 1:
|
|
302
|
+
# Search coil position sequence yielding the highest difference score.
|
|
303
|
+
# Use the global maximum as the initial pair of positions.
|
|
304
|
+
zap_idx_e_opt[:2] = list(
|
|
305
|
+
np.unravel_index(np.argmax(efields_diff_mat_full), efields_diff_mat_full.shape))
|
|
306
|
+
crit[:2] = np.max(efields_diff_mat_full)
|
|
307
|
+
|
|
308
|
+
# remove the identified optimal coil configuration
|
|
309
|
+
# from the list of available/to-be checked configurations
|
|
310
|
+
idcs_to_check.remove(zap_idx_e_opt[0])
|
|
311
|
+
idcs_to_check.remove(zap_idx_e_opt[1])
|
|
312
|
+
|
|
313
|
+
# We completed two positions: #0, #1
|
|
314
|
+
idx_of_last_result = 1
|
|
315
|
+
else:
|
|
316
|
+
print("[WARN] get_optimal_coil_positions: A minimum number of n_stim=2 stimulations"
|
|
317
|
+
"can be computed for the metric 'rowvec_diff'.")
|
|
318
|
+
|
|
319
|
+
workhorse_partial = None
|
|
320
|
+
for idx_in_result_list in range(idx_of_last_result + 1, n_stim):
|
|
321
|
+
if verbose:
|
|
322
|
+
# if idx_of_last_result == 0:
|
|
323
|
+
# print(f"Initializing greedy algorithm for n={idx_in_result_list}")
|
|
324
|
+
# print(f"=====================================")
|
|
325
|
+
# print(f" >>> Chosen index: {start_zap_idx}")
|
|
326
|
+
|
|
327
|
+
print(f"Calculating optimal idx for n={idx_in_result_list + 1}")
|
|
328
|
+
print(f"==================================")
|
|
329
|
+
|
|
330
|
+
# preparatory functions of some of the result metrics
|
|
331
|
+
if criterion == "coverage":
|
|
332
|
+
workhorse_prepare = partial(workhorses.coverage_prepare, array=e_all_coil_pos,
|
|
333
|
+
zap_idx=zap_idx_e_opt[:idx_in_result_list - 1])
|
|
334
|
+
ele_idx_list_chunks = pynibs.compute_chunks([j for j in range(e_all_coil_pos.shape[1])], n_cpu)
|
|
335
|
+
res = pool.map(workhorse_prepare, ele_idx_list_chunks)
|
|
336
|
+
|
|
337
|
+
x = np.zeros((1, 1))
|
|
338
|
+
y = np.zeros((1, 1))
|
|
339
|
+
|
|
340
|
+
for j in range(len(res)):
|
|
341
|
+
if j == 0:
|
|
342
|
+
x = res[0][0]
|
|
343
|
+
y = res[0][1]
|
|
344
|
+
else:
|
|
345
|
+
x = np.hstack((x, res[j][0]))
|
|
346
|
+
y = np.hstack((y, res[j][1]))
|
|
347
|
+
|
|
348
|
+
workhorse_partial = partial(workhorses.coverage, array=e_all_coil_pos, x=x, y=y)
|
|
349
|
+
# Preprocessing of metrics combined with FIM:
|
|
350
|
+
# - determine the required %MSO for each candidate location via FIM
|
|
351
|
+
# - determine valid candidate locations that can achieve a FIM optimal mag(E) within the valid range of
|
|
352
|
+
# the stimulator output intensity, %MSO in [RMT,100]
|
|
353
|
+
elif "fim" in criterion:
|
|
354
|
+
# Sanity checks first
|
|
355
|
+
if n_stim - len(zap_idx_opt) > 1:
|
|
356
|
+
raise ValueError("[Error] get_optimal_coil_positions: Cannot compute more than one new coil"
|
|
357
|
+
"configuration with the FIM optimization criterion due to lacking feedback"
|
|
358
|
+
"from future coil configurations.")
|
|
359
|
+
|
|
360
|
+
if len(zap_idx_opt) != len(fim_didt_list) or len(zap_idx_opt) != len(fim_p2p_amps) or \
|
|
361
|
+
fim_visited_positions_e_mat is not None and len(fim_visited_positions_e_mat) != len(zap_idx_e_opt):
|
|
362
|
+
raise ValueError("[Error] get_optimal_coil_positions: The number of provided optimized positions,"
|
|
363
|
+
"the number of e-fields of the visited positions, the number of dI/dt values and the"
|
|
364
|
+
"the number of p2p amplitudes must be the same:"
|
|
365
|
+
"reflecting the number of already visited positions.")
|
|
366
|
+
|
|
367
|
+
if fim_rmt_mso is None:
|
|
368
|
+
raise ValueError("[Error] get_optimal_coil_positions: requested FIM optimal sampling without"
|
|
369
|
+
"specifying a specific resting motor threshold (fim_rmt_mso).")
|
|
370
|
+
|
|
371
|
+
if fim_fit_fun == pynibs.sigmoid4 or fim_fit_fun == pynibs.sigmoid4_log:
|
|
372
|
+
default_sigmoid_params = {
|
|
373
|
+
"x0": 0.5,
|
|
374
|
+
"y0": 1e-10,
|
|
375
|
+
"r": 12.5,
|
|
376
|
+
"amp": 1
|
|
377
|
+
}
|
|
378
|
+
else:
|
|
379
|
+
raise AttributeError("[Error] Unsupported function type requested for FIM optimization."
|
|
380
|
+
"Currently 'simgoid4', 'sigmoig4_log' supported.")
|
|
381
|
+
|
|
382
|
+
# convert between arbitrarily shaped sigmoids and a standard sigmoid defined by the parameters above
|
|
383
|
+
def to_std_sigmoid(x, params_real_data, params_ideal_sigmoid):
|
|
384
|
+
return params_real_data["r"] / params_ideal_sigmoid["r"] \
|
|
385
|
+
* \
|
|
386
|
+
(x - params_real_data["x0"]) \
|
|
387
|
+
+ \
|
|
388
|
+
params_ideal_sigmoid["x0"]
|
|
389
|
+
|
|
390
|
+
def from_std_sigmoid(x, params_real_data, params_ideal_sigmoid):
|
|
391
|
+
return params_ideal_sigmoid["r"] / params_real_data["r"] \
|
|
392
|
+
* \
|
|
393
|
+
(x - params_ideal_sigmoid["x0"]) \
|
|
394
|
+
+ \
|
|
395
|
+
params_real_data["x0"]
|
|
396
|
+
|
|
397
|
+
# Normalize already selected e-fields to the argument space of the standard sigmoid [0, 1].
|
|
398
|
+
if fim_didt_list is None:
|
|
399
|
+
didt_list = np.ones(idx_in_result_list)
|
|
400
|
+
else:
|
|
401
|
+
didt_list = fim_didt_list
|
|
402
|
+
|
|
403
|
+
e_optimal_positions = e_all_coil_pos[zap_idx_e_opt[:idx_in_result_list], :] \
|
|
404
|
+
if fim_visited_positions_e_mat is None else fim_visited_positions_e_mat
|
|
405
|
+
|
|
406
|
+
max_r2_ele_idx = np.argmax(regression_cmap_normalized)
|
|
407
|
+
|
|
408
|
+
e_realized_in_hotspot = np.multiply(
|
|
409
|
+
e_optimal_positions[:, max_r2_ele_idx],
|
|
410
|
+
didt_list
|
|
411
|
+
)
|
|
412
|
+
|
|
413
|
+
# compute sigmoid4 fit parameters in hotspot if parameters were not provided externally as parameters
|
|
414
|
+
if regression_fit_parameters is None:
|
|
415
|
+
e_realized_in_hotspot_np = np.zeros((e_realized_in_hotspot.shape[0], 1))
|
|
416
|
+
e_realized_in_hotspot_np[:, 0] = e_realized_in_hotspot
|
|
417
|
+
|
|
418
|
+
_, regression_sigmoid_fitparams = pynibs.regress_data(
|
|
419
|
+
e_matrix=e_realized_in_hotspot_np,
|
|
420
|
+
mep=fim_p2p_amps,
|
|
421
|
+
fun=fim_fit_fun,
|
|
422
|
+
n_refit=fim_regression_n_refit,
|
|
423
|
+
n_cpu=n_cpu,
|
|
424
|
+
return_fits=True,
|
|
425
|
+
refit_discontinuities=False
|
|
426
|
+
)[0]
|
|
427
|
+
else:
|
|
428
|
+
regression_sigmoid_fitparams = regression_fit_parameters
|
|
429
|
+
|
|
430
|
+
e_in_hotspot_normalized_to_std_sigmoid = to_std_sigmoid(
|
|
431
|
+
e_realized_in_hotspot,
|
|
432
|
+
params_real_data=regression_sigmoid_fitparams,
|
|
433
|
+
params_ideal_sigmoid=default_sigmoid_params
|
|
434
|
+
)
|
|
435
|
+
e_in_hotspot_normalized_to_std_sigmoid = np.clip(e_in_hotspot_normalized_to_std_sigmoid, 0, 1)
|
|
436
|
+
|
|
437
|
+
import time
|
|
438
|
+
# Determine optimal e-field intensity for next zap for each ROI element.
|
|
439
|
+
t0 = time.time()
|
|
440
|
+
e_opt_in_hotspot_on_std_sigmoid = pynibs.get_optimal_sample_fim(
|
|
441
|
+
fun=fim_fit_fun,
|
|
442
|
+
x=e_in_hotspot_normalized_to_std_sigmoid,
|
|
443
|
+
p=default_sigmoid_params
|
|
444
|
+
)
|
|
445
|
+
print(f"Elapsed time FIM {round((time.time() - t0) * 10) / 10}")
|
|
446
|
+
|
|
447
|
+
t0 = time.time()
|
|
448
|
+
# Undo e-field normalization: e_opt contains the optimal mag(E) for each ROI element in 'ele_idx_1'
|
|
449
|
+
e_opt_real_in_hotspot = from_std_sigmoid(
|
|
450
|
+
e_opt_in_hotspot_on_std_sigmoid,
|
|
451
|
+
params_real_data=regression_sigmoid_fitparams,
|
|
452
|
+
params_ideal_sigmoid=default_sigmoid_params
|
|
453
|
+
)
|
|
454
|
+
|
|
455
|
+
# Determine required %MSO for candidate coil configurations.
|
|
456
|
+
e_candidates = e_all_coil_pos[idcs_to_check, :]
|
|
457
|
+
|
|
458
|
+
# compute the current the stimulator would need to realize (dI/dt) to achieve
|
|
459
|
+
# the 'e_opt' at the hotspot location for each of the candidate locations
|
|
460
|
+
didt_opt = np.divide(np.ones(e_candidates.shape[0]) * e_opt_real_in_hotspot,
|
|
461
|
+
e_candidates[:, max_r2_ele_idx])
|
|
462
|
+
|
|
463
|
+
mso_opt = didt_opt / fim_mso_didt_conversion_factor
|
|
464
|
+
|
|
465
|
+
# select feasible efields:
|
|
466
|
+
# required e-field value at hotspot location must be achievable with
|
|
467
|
+
# <= 100%MSO && >= resting motor threshold
|
|
468
|
+
num_configurations_below_valid_range = np.sum(mso_opt < fim_rmt_mso)
|
|
469
|
+
num_configurations_within_valid_range = np.sum(np.logical_and(fim_rmt_mso <= mso_opt, mso_opt <= 100))
|
|
470
|
+
num_configurations_above_valid_range = np.sum(mso_opt > 100)
|
|
471
|
+
|
|
472
|
+
# If the target e_opt could not be achieved with a stimulator intensity within the valid range for any of
|
|
473
|
+
# the candidate coil configurations, use the bounds of the valid range depending on where most of the
|
|
474
|
+
# outliers were (above or below).
|
|
475
|
+
if num_configurations_within_valid_range == 0:
|
|
476
|
+
if num_configurations_below_valid_range >= num_configurations_above_valid_range:
|
|
477
|
+
# setting 'mso_opt' to either of the bounds will make of the candidate locations available for the
|
|
478
|
+
# secondary optimization metric and still scale them in the minimum or maximum allowed direction as
|
|
479
|
+
# requested by the FIM optimization.
|
|
480
|
+
mso_opt = np.ones(mso_opt.shape) * fim_rmt_mso
|
|
481
|
+
else:
|
|
482
|
+
mso_opt = np.ones(mso_opt.shape) * 100
|
|
483
|
+
|
|
484
|
+
# convert to didt after changing mso values
|
|
485
|
+
didt_opt = mso_opt * fim_mso_didt_conversion_factor
|
|
486
|
+
|
|
487
|
+
# extract the indices of valid coil configurations and associated scaling factors
|
|
488
|
+
achievable_candidates_idcs = np.where(np.logical_and(fim_rmt_mso <= mso_opt, mso_opt <= 100))[0].tolist()
|
|
489
|
+
idcs_to_check = [idcs_to_check[i] for i in achievable_candidates_idcs]
|
|
490
|
+
# Create didt scaling vector for all coil configurations:
|
|
491
|
+
# - valid coil configurations exhibit a scaling factor accoridng to their computed optimla didt
|
|
492
|
+
# - invlaid coil configurations remain unscaled
|
|
493
|
+
# 'fim_didt_scaling' is used to initialize the metric workers:
|
|
494
|
+
# Only through 'idcs_to_check' the worker individually subset the rows (=coil configurations) of the
|
|
495
|
+
# full e-field matrix upon execution. To provide them with a (full) scaled e-field matrix the scaling
|
|
496
|
+
# vector must be of the same length as the number of rows of the full efield matrix.
|
|
497
|
+
fim_didt_scaling = np.ones(e_all_coil_pos.shape[0])
|
|
498
|
+
fim_didt_scaling[idcs_to_check] = didt_opt[achievable_candidates_idcs]
|
|
499
|
+
print(f"Elapsed time array stuff {round((time.time() - t0) * 10) / 10}")
|
|
500
|
+
|
|
501
|
+
t0 = time.time()
|
|
502
|
+
if fim_debug_screenshot_dir_fn is not None and fn_coilpos_hdf5 is not None:
|
|
503
|
+
pynibs.render_coil_positions(
|
|
504
|
+
coil_conf_set_2_positions=centers[:],
|
|
505
|
+
coil_conf_set_2_orientations=m1[:],
|
|
506
|
+
coil_conf_set_1_positions=centers[achievable_candidates_idcs],
|
|
507
|
+
coil_conf_set_1_orientations=m1[achievable_candidates_idcs],
|
|
508
|
+
viewport_dim=(1280, 720),
|
|
509
|
+
camera_polar_coords=(-175, 66, 110),
|
|
510
|
+
screenshot_fn=os.path.join(
|
|
511
|
+
fim_debug_screenshot_dir_fn,
|
|
512
|
+
f"fim_coilpos_selection_debug_after_{e_realized_in_hotspot.shape[0]}_stims.png"
|
|
513
|
+
),
|
|
514
|
+
interactive=False
|
|
515
|
+
)
|
|
516
|
+
print(f"Elapsed time Rendering {round((time.time() - t0) * 10) / 10}")
|
|
517
|
+
|
|
518
|
+
# prepare the result metrics for the parallel execution
|
|
519
|
+
if criterion == "svd":
|
|
520
|
+
workhorse_partial = partial(workhorses.svd, array=e_all_coil_pos, ele_idx_1=ele_idx_1)
|
|
521
|
+
|
|
522
|
+
elif criterion == "dist":
|
|
523
|
+
workhorse_partial = partial(workhorses.dist, array=e_all_coil_pos, ele_idx_1=ele_idx_1)
|
|
524
|
+
|
|
525
|
+
elif criterion == "mc_cols":
|
|
526
|
+
e_all_coil_pos = e_all_coil_pos - np.mean(e_all_coil_pos[:, ele_idx_1], axis=1)[:, np.newaxis]
|
|
527
|
+
workhorse_partial = partial(workhorses.mc, array=e_all_coil_pos, ele_idx_1=ele_idx_1, mode="cols")
|
|
528
|
+
|
|
529
|
+
elif criterion == "mc_rows":
|
|
530
|
+
workhorse_partial = partial(workhorses.mc, array=e_all_coil_pos, ele_idx_1=ele_idx_1, mode="rows")
|
|
531
|
+
|
|
532
|
+
elif criterion == "variability":
|
|
533
|
+
workhorse_partial = partial(workhorses.variability, array=e_all_coil_pos, ele_idx_1=ele_idx_1)
|
|
534
|
+
|
|
535
|
+
elif criterion == "dist_mc_cols":
|
|
536
|
+
if metrics_weights[0] == 0:
|
|
537
|
+
workhorse_partial = partial(workhorses.mc, array=e_all_coil_pos, ele_idx_1=ele_idx_2, mode="cols")
|
|
538
|
+
elif metrics_weights[1] == 0:
|
|
539
|
+
workhorse_partial = partial(workhorses.dist, array=e_all_coil_pos, ele_idx_1=ele_idx_1)
|
|
540
|
+
else:
|
|
541
|
+
workhorse_partial = partial(workhorses.dist_mc, array=e_all_coil_pos, ele_idx_1=ele_idx_1,
|
|
542
|
+
ele_idx_2=ele_idx_2,
|
|
543
|
+
mode="cols")
|
|
544
|
+
|
|
545
|
+
elif criterion == "dist_mc_rows":
|
|
546
|
+
if metrics_weights[0] == 0:
|
|
547
|
+
workhorse_partial = partial(workhorses.mc, array=e_all_coil_pos, ele_idx_1=ele_idx_2, mode="rows")
|
|
548
|
+
elif metrics_weights[1] == 0:
|
|
549
|
+
workhorse_partial = partial(workhorses.dist, array=e_all_coil_pos, ele_idx_1=ele_idx_1)
|
|
550
|
+
else:
|
|
551
|
+
workhorse_partial = partial(workhorses.dist_mc, array=e_all_coil_pos, ele_idx_1=ele_idx_1,
|
|
552
|
+
ele_idx_2=ele_idx_2,
|
|
553
|
+
mode="rows")
|
|
554
|
+
|
|
555
|
+
elif criterion == "dist_svd":
|
|
556
|
+
if metrics_weights[0] == 0:
|
|
557
|
+
workhorse_partial = partial(workhorses.svd, array=e_all_coil_pos, ele_idx_1=ele_idx_2)
|
|
558
|
+
elif metrics_weights[1] == 0:
|
|
559
|
+
workhorse_partial = partial(workhorses.dist, array=e_all_coil_pos, ele_idx_1=ele_idx_1)
|
|
560
|
+
else:
|
|
561
|
+
workhorse_partial = partial(workhorses.dist_svd, array=e_all_coil_pos, ele_idx_1=ele_idx_1,
|
|
562
|
+
ele_idx_2=ele_idx_2)
|
|
563
|
+
|
|
564
|
+
elif criterion == "fim":
|
|
565
|
+
workhorse_partial = partial(workhorses.fim, array=e_all_coil_pos, ele_idx_1=ele_idx_1,
|
|
566
|
+
e_opt=e_opt_real_in_hotspot, c=regression_cmap_normalized)
|
|
567
|
+
|
|
568
|
+
elif criterion == "fim_svd":
|
|
569
|
+
if metrics_weights[0] == 0:
|
|
570
|
+
workhorse_partial = partial(workhorses.svd, array=e_all_coil_pos, ele_idx_1=ele_idx_2)
|
|
571
|
+
elif metrics_weights[1] == 0:
|
|
572
|
+
workhorse_partial = partial(workhorses.fim, array=e_all_coil_pos, ele_idx_1=ele_idx_1,
|
|
573
|
+
e_opt=e_opt_real_in_hotspot, c=regression_cmap_normalized)
|
|
574
|
+
else:
|
|
575
|
+
workhorse_partial = partial(workhorses.fim_svd, array=e_all_coil_pos, ele_idx_1=ele_idx_1,
|
|
576
|
+
ele_idx_2=ele_idx_2, e_opt=e_opt_real_in_hotspot,
|
|
577
|
+
c=regression_cmap_normalized)
|
|
578
|
+
|
|
579
|
+
elif criterion == "fim_mc_rows":
|
|
580
|
+
if metrics_weights[0] == 0:
|
|
581
|
+
workhorse_partial = partial(workhorses.mc, array=e_all_coil_pos, ele_idx_1=ele_idx_2, mode="rows")
|
|
582
|
+
elif metrics_weights[1] == 0:
|
|
583
|
+
workhorse_partial = partial(workhorses.fim, array=e_all_coil_pos, ele_idx_1=ele_idx_1,
|
|
584
|
+
e_opt=e_opt_real_in_hotspot, c=regression_cmap_normalized)
|
|
585
|
+
else:
|
|
586
|
+
workhorse_partial = partial(workhorses.fim_mc, array=e_all_coil_pos, ele_idx_1=ele_idx_1,
|
|
587
|
+
ele_idx_2=ele_idx_2, e_opt=e_opt_real_in_hotspot,
|
|
588
|
+
c=regression_cmap_normalized,
|
|
589
|
+
mode="rows")
|
|
590
|
+
|
|
591
|
+
elif criterion == "fim_mc_cols":
|
|
592
|
+
if metrics_weights[0] == 0:
|
|
593
|
+
workhorse_partial = partial(workhorses.mc, array=e_all_coil_pos, ele_idx_1=ele_idx_2, mode="cols")
|
|
594
|
+
elif metrics_weights[1] == 0:
|
|
595
|
+
workhorse_partial = partial(workhorses.fim, array=e_all_coil_pos, ele_idx_1=ele_idx_1,
|
|
596
|
+
e_opt=e_opt_real_in_hotspot, c=regression_cmap_normalized)
|
|
597
|
+
else:
|
|
598
|
+
workhorse_partial = partial(workhorses.fim_mc, array=e_all_coil_pos, ele_idx_1=ele_idx_1,
|
|
599
|
+
ele_idx_2=ele_idx_2, e_opt=e_opt_real_in_hotspot,
|
|
600
|
+
c=regression_cmap_normalized,
|
|
601
|
+
mode="cols")
|
|
602
|
+
elif criterion == "fim_correlation":
|
|
603
|
+
try:
|
|
604
|
+
e_all_coil_pos_scaled = np.multiply(e_all_coil_pos, fim_didt_scaling[:, np.newaxis])
|
|
605
|
+
|
|
606
|
+
# mask regions of low r^2 out for correlation optimization
|
|
607
|
+
max_r2_idx = np.argmax(regression_cmap_normalized)
|
|
608
|
+
high_r2_roi_ele_idcs = np.array(regression_cmap_normalized > 0.6, dtype=np.int16)
|
|
609
|
+
subsampled_ele_idc = np.zeros(regression_cmap_normalized.shape[0])
|
|
610
|
+
subsampled_ele_idc[ele_idx_2] = 1
|
|
611
|
+
|
|
612
|
+
ele_idcs_mask = np.logical_and(high_r2_roi_ele_idcs, subsampled_ele_idc).astype(np.int16)
|
|
613
|
+
|
|
614
|
+
# for plotting set hotspot_idx to 2
|
|
615
|
+
if fim_debug_screenshot_dir_fn is not None:
|
|
616
|
+
ele_idcs_mask[max_r2_idx] = 2
|
|
617
|
+
pynibs.render_data_on_surface(
|
|
618
|
+
points=fim_roi_pts,
|
|
619
|
+
tris=fim_roi_tris,
|
|
620
|
+
data=ele_idcs_mask,
|
|
621
|
+
screenshot_fn=os.path.join(
|
|
622
|
+
fim_debug_screenshot_dir_fn,
|
|
623
|
+
f"fim_decorrelation_idcs_{e_realized_in_hotspot.shape[0]}_stims.png"
|
|
624
|
+
),
|
|
625
|
+
interactive=False
|
|
626
|
+
)
|
|
627
|
+
# make sure to set max_r2_idx to 0 to avoid double insertion of the hotspot idx
|
|
628
|
+
# (will be inserted at the end of the index list in any case)
|
|
629
|
+
ele_idcs_mask[max_r2_idx] = 0
|
|
630
|
+
|
|
631
|
+
subsampled_ele_idcs = np.append(np.where(ele_idcs_mask)[0], max_r2_idx)
|
|
632
|
+
|
|
633
|
+
if not fim_use_gpu:
|
|
634
|
+
workhorse_partial = partial(
|
|
635
|
+
workhorses.roi_elmt_wise_corr,
|
|
636
|
+
array=e_all_coil_pos_scaled,
|
|
637
|
+
# hotspot idx must be last element of "ele_idx_1" to decorrelate
|
|
638
|
+
# only the hotspot idx with all other ROI elements
|
|
639
|
+
ele_idx_1=subsampled_ele_idcs, # indices of elements marked with '1'
|
|
640
|
+
decorrelate_hotspot_only=False
|
|
641
|
+
)
|
|
642
|
+
else:
|
|
643
|
+
import cupy as cp
|
|
644
|
+
import time
|
|
645
|
+
|
|
646
|
+
# assemble to be checked index lists
|
|
647
|
+
idx_list = []
|
|
648
|
+
for j in range(len(idcs_to_check)):
|
|
649
|
+
idx_list.append(zap_idx_e_opt[:idx_in_result_list - 1] + [idcs_to_check[j]])
|
|
650
|
+
idx_list = np.array(idx_list)
|
|
651
|
+
|
|
652
|
+
# load data on GPU
|
|
653
|
+
e = cp.empty(shape=e_all_coil_pos_scaled.shape, dtype=cp.float32)
|
|
654
|
+
subsampling_idcs = cp.empty(shape=subsampled_ele_idcs.shape, dtype=cp.int32)
|
|
655
|
+
index_lists = cp.empty(shape=idx_list.shape, dtype=cp.int32)
|
|
656
|
+
e[:] = cp.asarray(e_all_coil_pos_scaled[:], dtype=cp.float32)
|
|
657
|
+
subsampling_idcs[:] = cp.asarray(subsampled_ele_idcs[:], dtype=cp.int32)
|
|
658
|
+
index_lists[:] = cp.asarray(idx_list[:], dtype=cp.int32)
|
|
659
|
+
|
|
660
|
+
# execute optimization
|
|
661
|
+
res_all = workhorses.roi_elmt_wise_corr(
|
|
662
|
+
idx_list=index_lists,
|
|
663
|
+
array=e,
|
|
664
|
+
ele_idx_1=subsampling_idcs,
|
|
665
|
+
decorrelate_hotspot_only=False,
|
|
666
|
+
backend=cp
|
|
667
|
+
)
|
|
668
|
+
idx_best = cp.argmin(res_all).get()
|
|
669
|
+
crit[idx_in_result_list] = res_all[idx_best].get()
|
|
670
|
+
except NameError:
|
|
671
|
+
print("[Error] opt/get_optimal_coil_positions: Could not access precomputed "
|
|
672
|
+
"target optimal e-field values from FIM. Will skip computing 'fim_correlation' metric.")
|
|
673
|
+
elif criterion == "rowvec_diff":
|
|
674
|
+
# "rowvec_diff" metric will not be parallelized: no "workhorse_partial" will be prepared,
|
|
675
|
+
# instead the result is computed immediately as it is a simple direct matrix lookup.
|
|
676
|
+
# Must have the results of at least two stimulations already.
|
|
677
|
+
# (either provided via 'zap_idx_opt' or computed in the preparatory function)
|
|
678
|
+
if idx_of_last_result >= 1:
|
|
679
|
+
try:
|
|
680
|
+
idx_best, crit[idx_in_result_list] = rowvec_diff(
|
|
681
|
+
candidate_coil_idcs=idcs_to_check,
|
|
682
|
+
selected_coil_idcs=zap_idx_e_opt[:idx_in_result_list],
|
|
683
|
+
efields_diff_mat=efields_diff_mat_full,
|
|
684
|
+
)
|
|
685
|
+
except NameError:
|
|
686
|
+
print("[Error] opt/get_optimal_coil_positions: Could not access precomputed "
|
|
687
|
+
"e-field difference matrix. Will skip computing 'rowvec_diff' metric.")
|
|
688
|
+
else:
|
|
689
|
+
raise NameError(f"criterion: {criterion} not implemented")
|
|
690
|
+
|
|
691
|
+
# parallel execution of prepared workers
|
|
692
|
+
if workhorse_partial is not None:
|
|
693
|
+
# Create the list of lists the optimization criterion is supposed to be computed for:
|
|
694
|
+
# - first part of the sub-lists is always the sequence of already determined optimal positions
|
|
695
|
+
# - all of these lists differ only in their last element: which are the indices from "idcs_to_check"
|
|
696
|
+
idx_list = []
|
|
697
|
+
for j in range(len(idcs_to_check)):
|
|
698
|
+
idx_list.append(zap_idx_e_opt[:idx_in_result_list - 1] + [idcs_to_check[j]])
|
|
699
|
+
|
|
700
|
+
idx_list_chunks = pynibs.compute_chunks(idx_list, n_cpu)
|
|
701
|
+
res = pool.map(workhorse_partial, idx_list_chunks)
|
|
702
|
+
|
|
703
|
+
# extract best solution (multiple objectives)
|
|
704
|
+
if type(res[0]) is tuple:
|
|
705
|
+
|
|
706
|
+
for j in range(len(res)):
|
|
707
|
+
if j == 0:
|
|
708
|
+
res_all = np.vstack(res[j]).transpose()
|
|
709
|
+
else:
|
|
710
|
+
res_all = np.vstack((res_all, np.vstack(res[j]).transpose()))
|
|
711
|
+
|
|
712
|
+
# filter nans # TODO: not to 1e6 -> max of each opt criterion (column)
|
|
713
|
+
res_all[np.isnan(res_all)] = 1e6
|
|
714
|
+
res_all[res_all == 0] = 1e-6
|
|
715
|
+
|
|
716
|
+
# normalize both optimization criteria to [0, 1]
|
|
717
|
+
res_all = (res_all - np.min(res_all, axis=0)) / (np.max(res_all, axis=0) - np.min(res_all, axis=0))
|
|
718
|
+
|
|
719
|
+
# weight optimization criteria
|
|
720
|
+
res_all = res_all * metrics_weights
|
|
721
|
+
|
|
722
|
+
# find the best solution with the lowest sum
|
|
723
|
+
res_all_sum = np.sum(res_all, axis=1)
|
|
724
|
+
idx_best = np.argmin(res_all_sum)
|
|
725
|
+
crit[idx_in_result_list] = res_all_sum[idx_best]
|
|
726
|
+
|
|
727
|
+
# extract best solution (single objective)
|
|
728
|
+
else:
|
|
729
|
+
# filter nans
|
|
730
|
+
res = np.concatenate(res)
|
|
731
|
+
res[np.isnan(res)] = 1e6
|
|
732
|
+
|
|
733
|
+
# find best solution
|
|
734
|
+
idx_best = np.argmin(res)
|
|
735
|
+
crit[idx_in_result_list] = res[idx_best]
|
|
736
|
+
|
|
737
|
+
if verbose:
|
|
738
|
+
print(f" >>> Best index: {idcs_to_check[idx_best]}, criterion: {crit[idx_in_result_list]}")
|
|
739
|
+
|
|
740
|
+
next_optimal_coil_idx = idcs_to_check[idx_best]
|
|
741
|
+
if "fim" in criterion:
|
|
742
|
+
next_optimal_stim_mso = np.round(fim_didt_scaling[next_optimal_coil_idx] / fim_mso_didt_conversion_factor)
|
|
743
|
+
else:
|
|
744
|
+
next_optimal_stim_mso = None
|
|
745
|
+
zap_idx_e_opt[idx_in_result_list] = next_optimal_coil_idx
|
|
746
|
+
idcs_to_check.remove(next_optimal_coil_idx)
|
|
747
|
+
|
|
748
|
+
if fn_out_hdf5 is not None:
|
|
749
|
+
# save results
|
|
750
|
+
with h5py.File(fn_out_hdf5, "a") as f:
|
|
751
|
+
try:
|
|
752
|
+
del f["criterion"]
|
|
753
|
+
except (RuntimeError, KeyError):
|
|
754
|
+
f.create_dataset("criterion", data=crit)
|
|
755
|
+
|
|
756
|
+
try:
|
|
757
|
+
f.create_dataset(f"zap_index_list/{idx_in_result_list}", data=zap_idx_e_opt[:idx_in_result_list])
|
|
758
|
+
except (RuntimeError, KeyError):
|
|
759
|
+
if overwrite:
|
|
760
|
+
del f[f"zap_index_list/{idx_in_result_list}"]
|
|
761
|
+
f.create_dataset(f"zap_index_list/{idx_in_result_list}",
|
|
762
|
+
data=zap_idx_e_opt[:idx_in_result_list])
|
|
763
|
+
else:
|
|
764
|
+
print(f"Could not write zap_index_list/{idx_in_result_list}. Dataset already exists.")
|
|
765
|
+
|
|
766
|
+
if fn_coilpos_hdf5 is not None:
|
|
767
|
+
try:
|
|
768
|
+
del f["centers"], f["m0"], f["m1"], f["m2"]
|
|
769
|
+
except (RuntimeError, KeyError):
|
|
770
|
+
if overwrite:
|
|
771
|
+
f.create_dataset("centers", data=centers[zap_idx_e_opt[idx_in_result_list], :])
|
|
772
|
+
f.create_dataset("m0", data=m0[zap_idx_e_opt[idx_in_result_list], :])
|
|
773
|
+
f.create_dataset("m1", data=m1[zap_idx_e_opt[idx_in_result_list], :])
|
|
774
|
+
f.create_dataset("m2", data=m2[zap_idx_e_opt[idx_in_result_list], :])
|
|
775
|
+
|
|
776
|
+
pool.close()
|
|
777
|
+
pool.join()
|
|
778
|
+
|
|
779
|
+
if fn_out_hdf5 is None:
|
|
780
|
+
if "fim" not in criterion:
|
|
781
|
+
return zap_idx_e_opt, crit
|
|
782
|
+
else:
|
|
783
|
+
return zap_idx_e_opt, crit, next_optimal_stim_mso
|
|
784
|
+
else:
|
|
785
|
+
with h5py.File(fn_out_hdf5, "a") as f:
|
|
786
|
+
try:
|
|
787
|
+
# f.create_dataset(f"zap_index_list", data=np.array(zap_idx_e_opt[-1])[:, np.newaxis])
|
|
788
|
+
f.create_dataset(f"zap_index_list", data=np.array(zap_idx_e_opt)[:, np.newaxis])
|
|
789
|
+
except (RuntimeError, KeyError):
|
|
790
|
+
if overwrite:
|
|
791
|
+
del f[f"zap_index_list"]
|
|
792
|
+
# f.create_dataset(f"zap_index_list", data=np.array(zap_idx_e_opt[-1])[:, np.newaxis])
|
|
793
|
+
f.create_dataset(f"zap_index_list", data=np.array(zap_idx_e_opt)[:, np.newaxis])
|
|
794
|
+
else:
|
|
795
|
+
print(f"Could not write zap_index_list. Dataset already exists.")
|
|
796
|
+
|
|
797
|
+
if fn_coilpos_hdf5 is not None:
|
|
798
|
+
m0_opt_reshaped = np.hstack((m0[zap_idx_e_opt[-1], :],
|
|
799
|
+
np.zeros((len(zap_idx_e_opt[-1]), 1)))).T[:, np.newaxis, :]
|
|
800
|
+
m1_opt_reshaped = np.hstack((m1[zap_idx_e_opt[-1], :],
|
|
801
|
+
np.zeros((len(zap_idx_e_opt[-1]), 1)))).T[:, np.newaxis, :]
|
|
802
|
+
m2_opt_reshaped = np.hstack((m2[zap_idx_e_opt[-1], :],
|
|
803
|
+
np.zeros((len(zap_idx_e_opt[-1]), 1)))).T[:, np.newaxis, :]
|
|
804
|
+
centers_opt_reshaped = np.hstack((centers[zap_idx_e_opt[-1], :],
|
|
805
|
+
np.ones((len(zap_idx_e_opt[-1]), 1)))).T[:, np.newaxis, :]
|
|
806
|
+
matsimnibs = np.concatenate((m0_opt_reshaped,
|
|
807
|
+
m1_opt_reshaped,
|
|
808
|
+
m2_opt_reshaped,
|
|
809
|
+
centers_opt_reshaped), axis=1)
|
|
810
|
+
|
|
811
|
+
try:
|
|
812
|
+
f.create_dataset("centers", data=centers[zap_idx_e_opt[-1], :])
|
|
813
|
+
f.create_dataset("m0", data=m0[zap_idx_e_opt[-1], :])
|
|
814
|
+
f.create_dataset("m1", data=m1[zap_idx_e_opt[-1], :])
|
|
815
|
+
f.create_dataset("m2", data=m2[zap_idx_e_opt[-1], :])
|
|
816
|
+
f.create_dataset("matsimnibs", data=matsimnibs)
|
|
817
|
+
except (RuntimeError, KeyError):
|
|
818
|
+
if overwrite:
|
|
819
|
+
del f["centers"], f["m0"], f["m1"], f["m2"]
|
|
820
|
+
f.create_dataset("centers", data=centers[zap_idx_e_opt[-1], :])
|
|
821
|
+
f.create_dataset("m0", data=m0[zap_idx_e_opt[-1], :])
|
|
822
|
+
f.create_dataset("m1", data=m1[zap_idx_e_opt[-1], :])
|
|
823
|
+
f.create_dataset("m2", data=m2[zap_idx_e_opt[-1], :])
|
|
824
|
+
f.create_dataset("matsimnibs", data=matsimnibs)
|
|
825
|
+
else:
|
|
826
|
+
print(f"Could not write centers, m0, m1, m2 ... Dataset already exists.")
|
|
827
|
+
|
|
828
|
+
|
|
829
|
+
def online_optimization(fn_subject_hdf5, fn_roi_ss_indices_hdf5, fn_out_hdf5, fn_stimsites_hdf5, e_matrix, mep,
|
|
830
|
+
mesh_idx, roi_idx, n_zaps_init=3, criterion_init="mc_rows", criterion="coverage", n_cpu=4,
|
|
831
|
+
threshold=0.8, weights=None, eps0=0.01, eps0_dist=1, exponent=5, perc=99,
|
|
832
|
+
n_refit=0, fun=pynibs.sigmoid, verbose=True):
|
|
833
|
+
"""
|
|
834
|
+
Performs virtual online optimization to determine the congruence factor. After an initial set of coil positions,
|
|
835
|
+
the algorithm iteratively optimizes the next coil position based on the virtually measured MEP data.
|
|
836
|
+
|
|
837
|
+
Parameters
|
|
838
|
+
----------
|
|
839
|
+
fn_subject_hdf5 : str
|
|
840
|
+
Filename of subject .hdf5 file.
|
|
841
|
+
fn_roi_ss_indices_hdf5 : str
|
|
842
|
+
Filename of .hdf5 file containing the element indices of the subsampled ROI in ``f["roi_indices"]``.
|
|
843
|
+
e_matrix : np.ndarray of float
|
|
844
|
+
(n_zaps, n_ele) Electric field matrix.
|
|
845
|
+
mep : np.ndarray of float
|
|
846
|
+
(n_zaps) Motor evoked potentials for every stimulation.
|
|
847
|
+
fn_out_hdf5 : str
|
|
848
|
+
Filename of .hdf5 output file containing the coil positions and the congruence factor maps for every iteration.
|
|
849
|
+
fn_stimsites_hdf5 : str
|
|
850
|
+
Filename of the .hdf5 file containing the stimulation sites in "centers", "m0", "m1", "m2".
|
|
851
|
+
mesh_idx : int
|
|
852
|
+
Mesh index.
|
|
853
|
+
roi_idx : int
|
|
854
|
+
ROI index.
|
|
855
|
+
n_zaps_init : int, default: 3
|
|
856
|
+
Number of initial samples optimized using optimization criterion specified in ``criterion_init``.
|
|
857
|
+
criterion_init : str, default: "mc_rows"
|
|
858
|
+
Optimization criterion for which the initial samples are optimized (e.g. "mc_rows", "svd", ...).
|
|
859
|
+
criterion : str, default: "coverage"
|
|
860
|
+
Optimization criterion for which the online optimization is performed (e.g. "coverage", "mc_rows", "svd", ...).
|
|
861
|
+
n_cpu : int, optional, dfault: 4
|
|
862
|
+
Number of CPU cores to use.
|
|
863
|
+
threshold : float, default: 0.1
|
|
864
|
+
Threshold between [0 ... 1] of the maximal congruence factor. Elements where c > threshold * max(c)
|
|
865
|
+
are included in the online optimization to select the next optimal coil position.
|
|
866
|
+
weights : list of float [2], default: [0.5, 0.5]
|
|
867
|
+
Weights of optimization criteria in case of multiple goal functions (e.g. fim_svd). Higher weight means higher
|
|
868
|
+
importance for the respective criteria. By default, both optimization criteria are weighted equally [0.5, 0.5].
|
|
869
|
+
eps0 : float, default: 0.01
|
|
870
|
+
First error threshold to terminate the online optimization. The normalized root mean square deviation is
|
|
871
|
+
calculated between the current and the previous solution. If the error is lower than eps0 for 3 times in a row,
|
|
872
|
+
the online optimization terminates and returns the results.
|
|
873
|
+
eps0_dist : float, default: 1
|
|
874
|
+
Second error threshold to terminate the online optimization. The geodesic distance in mm of the hotspot is
|
|
875
|
+
calculated between the current and the previous solution. If the error is lower than eps0_dist for 3 times
|
|
876
|
+
in a row, the online optimization terminates and returns the results.
|
|
877
|
+
exponent : float, default: 5
|
|
878
|
+
Exponent the congruence factor map is scaled c**exponent.
|
|
879
|
+
perc : float, default: 99
|
|
880
|
+
Percentile the congruence factor map is normalized (between 0 and 100).
|
|
881
|
+
n_refit : int, default: 0
|
|
882
|
+
Number of refit iterations. No refit is applied if ``n_refit=0``.
|
|
883
|
+
fun : function object, default: pynibs.linear
|
|
884
|
+
Function to use to determine the congruence factor (e.g. pynibs.linear, pynibs.sigmoid, ...).
|
|
885
|
+
verbose : bool, default: True
|
|
886
|
+
Plot output messages.
|
|
887
|
+
|
|
888
|
+
Returns
|
|
889
|
+
-------
|
|
890
|
+
<file> .hdf5 file
|
|
891
|
+
Results output file containing the coil positions and the congruence factor maps for every iteration.
|
|
892
|
+
|
|
893
|
+
"""
|
|
894
|
+
if weights is None:
|
|
895
|
+
weights = [0.5, 0.5]
|
|
896
|
+
print("Starting online congruence factor optimization:")
|
|
897
|
+
print("===============================================")
|
|
898
|
+
print(f" > fn_subject_hdf5: {fn_subject_hdf5}")
|
|
899
|
+
print(f" > fn_roi_ss_indices_hdf5: {fn_roi_ss_indices_hdf5}")
|
|
900
|
+
print(f" > fn_stimsites_hdf5: {fn_stimsites_hdf5}")
|
|
901
|
+
print(f" > fn_out_hdf5: {fn_out_hdf5}")
|
|
902
|
+
print(f" > e_matrix: shape: {e_matrix.shape}")
|
|
903
|
+
print(f" > mep: shape: {mep.shape}")
|
|
904
|
+
print(f" > mesh_idx: {mesh_idx}")
|
|
905
|
+
print(f" > roi_idx: {roi_idx}")
|
|
906
|
+
print(f" > n_zaps_init: {n_zaps_init}")
|
|
907
|
+
print(f" > criterion_init: {criterion_init}")
|
|
908
|
+
print(f" > criterion: {criterion}")
|
|
909
|
+
print(f" > n_cpu: {n_cpu}")
|
|
910
|
+
print(f" > threshold: {threshold}")
|
|
911
|
+
print(f" > weights: {weights}")
|
|
912
|
+
print(f" > eps0: {eps0}")
|
|
913
|
+
print(f" > eps0_dist: {eps0_dist}")
|
|
914
|
+
print(f" > exponent: {exponent}")
|
|
915
|
+
print(f" > perc: {perc}")
|
|
916
|
+
print(f" > n_refit: {n_refit}")
|
|
917
|
+
print(f" > fun: {fun.__name__}")
|
|
918
|
+
print(f" > verbose: {verbose}")
|
|
919
|
+
print("")
|
|
920
|
+
|
|
921
|
+
zap_idx = dict()
|
|
922
|
+
c = dict()
|
|
923
|
+
|
|
924
|
+
# load subject
|
|
925
|
+
if verbose:
|
|
926
|
+
print(f"Loading subject")
|
|
927
|
+
subject = pynibs.load_subject(fn_subject_hdf5)
|
|
928
|
+
|
|
929
|
+
# load ROI and perform subsampling
|
|
930
|
+
if verbose:
|
|
931
|
+
print(f"Loading ROI and perform subsampling")
|
|
932
|
+
roi = pynibs.load_roi_surface_obj_from_hdf5(subject.mesh[mesh_idx]['fn_mesh_hdf5'])[roi_idx]
|
|
933
|
+
con = roi.node_number_list
|
|
934
|
+
points = roi.node_coord_mid
|
|
935
|
+
|
|
936
|
+
with h5py.File(fn_roi_ss_indices_hdf5, "r") as f:
|
|
937
|
+
ele_idx_ss = f["roi_indices"][:]
|
|
938
|
+
|
|
939
|
+
# e-fields
|
|
940
|
+
if verbose:
|
|
941
|
+
print(f"Loading electric field from regression.hdf5")
|
|
942
|
+
n_ele = e_matrix.shape[1]
|
|
943
|
+
|
|
944
|
+
# loading coil positions and create matsimnibs [4x4] matrices
|
|
945
|
+
with h5py.File(fn_stimsites_hdf5, "r") as f:
|
|
946
|
+
centers_all = f["centers"][:]
|
|
947
|
+
m0_all = f["m0"][:]
|
|
948
|
+
m1_all = f["m1"][:]
|
|
949
|
+
m2_all = f["m2"][:]
|
|
950
|
+
|
|
951
|
+
coil_mean = dict()
|
|
952
|
+
current_dict = dict()
|
|
953
|
+
|
|
954
|
+
for i in range(centers_all.shape[0]):
|
|
955
|
+
coil_mean[str(i)] = np.hstack((m0_all[i, :][:, np.newaxis],
|
|
956
|
+
m1_all[i, :][:, np.newaxis],
|
|
957
|
+
m2_all[i, :][:, np.newaxis],
|
|
958
|
+
centers_all[i, :][:, np.newaxis]))
|
|
959
|
+
current_dict[str(i)] = 1
|
|
960
|
+
|
|
961
|
+
# determine initial number of optimal samples
|
|
962
|
+
if verbose:
|
|
963
|
+
print(f"Determine optimal coil positions for initial number of {n_zaps_init} samples using {criterion_init}")
|
|
964
|
+
|
|
965
|
+
zap_idx_opt = pynibs.get_optimal_coil_positions(e_matrix=e_matrix,
|
|
966
|
+
ele_idx_1=ele_idx_ss,
|
|
967
|
+
ele_idx_2=None,
|
|
968
|
+
criterion=criterion_init,
|
|
969
|
+
n_stim=n_zaps_init,
|
|
970
|
+
fn_out_hdf5=None,
|
|
971
|
+
n_cpu=n_cpu,
|
|
972
|
+
zap_idx_opt=None,
|
|
973
|
+
metrics_weights=weights,
|
|
974
|
+
overwrite=False,
|
|
975
|
+
verbose=True)
|
|
976
|
+
|
|
977
|
+
# determine initial c-factor map for all N (not existing in real life)
|
|
978
|
+
if verbose:
|
|
979
|
+
print(f"Determine reference c-factor map (N)")
|
|
980
|
+
|
|
981
|
+
c_ref_n = pynibs.regress_data(elm_idx_list=np.arange(n_ele),
|
|
982
|
+
e_matrix=e_matrix,
|
|
983
|
+
mep=mep,
|
|
984
|
+
zap_idx=None,
|
|
985
|
+
fun=fun,
|
|
986
|
+
n_refit=n_refit,
|
|
987
|
+
n_cpu=n_cpu,
|
|
988
|
+
con=con,
|
|
989
|
+
return_fits=False,
|
|
990
|
+
refit_discontinuities=True)
|
|
991
|
+
|
|
992
|
+
ref_n = c_ref_n.flatten() ** exponent
|
|
993
|
+
ref_n = ref_n / np.percentile(ref_n, perc)
|
|
994
|
+
|
|
995
|
+
c_max_idx_N = np.argmax(c_ref_n)
|
|
996
|
+
|
|
997
|
+
##########################################################################
|
|
998
|
+
#
|
|
999
|
+
# Robot measures initial offline optimal coil positions and collects MEPs
|
|
1000
|
+
#
|
|
1001
|
+
##########################################################################
|
|
1002
|
+
|
|
1003
|
+
# determine initial c-factor map (after robot measured first offline optimal coil positions)
|
|
1004
|
+
if verbose:
|
|
1005
|
+
print(f"Determine initial c-factor map")
|
|
1006
|
+
|
|
1007
|
+
c_init, p = pynibs.regress_data(elm_idx_list=np.arange(n_ele),
|
|
1008
|
+
e_matrix=e_matrix,
|
|
1009
|
+
mep=mep,
|
|
1010
|
+
zap_idx=zap_idx_opt,
|
|
1011
|
+
fun=fun,
|
|
1012
|
+
n_refit=n_refit,
|
|
1013
|
+
n_cpu=n_cpu,
|
|
1014
|
+
con=con,
|
|
1015
|
+
return_fits=True,
|
|
1016
|
+
refit_discontinuities=True)
|
|
1017
|
+
ref = c_init.flatten() ** exponent
|
|
1018
|
+
ref = ref / np.percentile(ref, perc)
|
|
1019
|
+
|
|
1020
|
+
eps = [eps0 + 1 for _ in range(len(zap_idx_opt))]
|
|
1021
|
+
eps_n = [eps0 + 1 for _ in range(len(zap_idx_opt))]
|
|
1022
|
+
|
|
1023
|
+
gdist = [eps0_dist + 1 for _ in range(len(zap_idx_opt))]
|
|
1024
|
+
gdist_n = [eps0_dist + 1 for _ in range(len(zap_idx_opt))]
|
|
1025
|
+
|
|
1026
|
+
for i in range(len(zap_idx_opt)):
|
|
1027
|
+
zap_idx[str(i)] = zap_idx_opt[:(i + 1)]
|
|
1028
|
+
c[str(i)] = np.zeros(ref.shape)
|
|
1029
|
+
|
|
1030
|
+
c[str(n_zaps_init - 1)] = c_init
|
|
1031
|
+
|
|
1032
|
+
n_zaps = [i for i in range(1, n_zaps_init + 1)]
|
|
1033
|
+
|
|
1034
|
+
# Start online optimization loop
|
|
1035
|
+
while not ((np.array(eps[-3:]) < eps0).all() and (np.array(gdist[-3:]) < eps0_dist).all()):
|
|
1036
|
+
|
|
1037
|
+
if "fim" in criterion or "dist_" in criterion:
|
|
1038
|
+
# find elements with values greater than threshold
|
|
1039
|
+
mask_perc = ref >= threshold * np.max(ref)
|
|
1040
|
+
ele_idx_1 = np.where(mask_perc)[0]
|
|
1041
|
+
ele_idx_2 = ele_idx_ss
|
|
1042
|
+
|
|
1043
|
+
else:
|
|
1044
|
+
ele_idx_1 = ele_idx_ss
|
|
1045
|
+
ele_idx_2 = ele_idx_ss
|
|
1046
|
+
|
|
1047
|
+
# optimize coil positions for subset of ROI elements
|
|
1048
|
+
if verbose:
|
|
1049
|
+
print(f"Optimizing next coil position for ROI_1: {len(ele_idx_1)} / ROI_2: {len(ele_idx_2)} elements "
|
|
1050
|
+
f"using {criterion}")
|
|
1051
|
+
|
|
1052
|
+
n_zaps.append(n_zaps[-1] + 1)
|
|
1053
|
+
zap_idx_opt = pynibs.get_optimal_coil_positions(e_matrix=e_matrix,
|
|
1054
|
+
ele_idx_1=ele_idx_1,
|
|
1055
|
+
ele_idx_2=ele_idx_2,
|
|
1056
|
+
criterion=criterion,
|
|
1057
|
+
n_stim=n_zaps[-1],
|
|
1058
|
+
fn_out_hdf5=None,
|
|
1059
|
+
n_cpu=n_cpu,
|
|
1060
|
+
zap_idx_opt=zap_idx_opt,
|
|
1061
|
+
fim_fit_fun=fun,
|
|
1062
|
+
regression_fit_parameters=p,
|
|
1063
|
+
regression_cmap=ref,
|
|
1064
|
+
metrics_weights=weights,
|
|
1065
|
+
overwrite=False,
|
|
1066
|
+
verbose=True)
|
|
1067
|
+
key = str(len(zap_idx_opt) - 1)
|
|
1068
|
+
zap_idx[key] = zap_idx_opt
|
|
1069
|
+
|
|
1070
|
+
##########################################################################
|
|
1071
|
+
#
|
|
1072
|
+
# Robot measures next optimal coil position
|
|
1073
|
+
#
|
|
1074
|
+
##########################################################################
|
|
1075
|
+
|
|
1076
|
+
# determine updated c-factor map
|
|
1077
|
+
if verbose:
|
|
1078
|
+
print(f"Determine c-factor map for {len(zap_idx_opt)} zaps")
|
|
1079
|
+
|
|
1080
|
+
c[key], p = pynibs.regress_data(elm_idx_list=np.arange(n_ele),
|
|
1081
|
+
e_matrix=e_matrix,
|
|
1082
|
+
mep=mep,
|
|
1083
|
+
zap_idx=zap_idx_opt,
|
|
1084
|
+
fun=fun,
|
|
1085
|
+
n_refit=n_refit,
|
|
1086
|
+
n_cpu=n_cpu,
|
|
1087
|
+
con=con,
|
|
1088
|
+
return_fits=True,
|
|
1089
|
+
refit_discontinuities=True)
|
|
1090
|
+
arr = c[key].flatten() ** exponent
|
|
1091
|
+
arr = arr / np.percentile(arr, perc)
|
|
1092
|
+
|
|
1093
|
+
##########################################################################
|
|
1094
|
+
#
|
|
1095
|
+
# Plot updated c-factor map
|
|
1096
|
+
#
|
|
1097
|
+
##########################################################################
|
|
1098
|
+
|
|
1099
|
+
# determine NRMSD w.r.t. previous solution
|
|
1100
|
+
eps.append(pynibs.nrmsd(arr, ref))
|
|
1101
|
+
|
|
1102
|
+
if verbose:
|
|
1103
|
+
print(f"NRMSD to previous solution: {eps[-1]}")
|
|
1104
|
+
|
|
1105
|
+
# determine NRMSD w.r.t. global solution (not existing in real life)
|
|
1106
|
+
eps_n.append(pynibs.nrmsd(arr, ref_n))
|
|
1107
|
+
|
|
1108
|
+
if verbose:
|
|
1109
|
+
print(f"NRMSD to global solution: {eps_n[-1]}")
|
|
1110
|
+
|
|
1111
|
+
# determine geodesic distance w.r.t. previous solution
|
|
1112
|
+
nodes_dist, tris_dist = pynibs.geodesic_dist(nodes=points, tris=con, source=np.argmax(c[key]),
|
|
1113
|
+
source_is_node=False)
|
|
1114
|
+
gdist.append(tris_dist[np.argmax(c[str(len(zap_idx_opt) - 2)])])
|
|
1115
|
+
|
|
1116
|
+
if verbose:
|
|
1117
|
+
print(f"GDIST to previous solution: {gdist[-1]:.3f} mm")
|
|
1118
|
+
|
|
1119
|
+
# determine geodesic distance w.r.t. global solution (not existing in real life)
|
|
1120
|
+
gdist_n.append(tris_dist[c_max_idx_N])
|
|
1121
|
+
|
|
1122
|
+
if verbose:
|
|
1123
|
+
print(f"GDIST to global solution: {gdist_n[-1]:.3f} mm")
|
|
1124
|
+
|
|
1125
|
+
# set current solution as ref
|
|
1126
|
+
ref = arr
|
|
1127
|
+
|
|
1128
|
+
eps_n = np.array(eps_n)
|
|
1129
|
+
eps = np.array(eps)
|
|
1130
|
+
gdist_n = np.array(gdist_n)
|
|
1131
|
+
gdist = np.array(gdist)
|
|
1132
|
+
n_zaps = np.array(n_zaps)
|
|
1133
|
+
|
|
1134
|
+
if verbose:
|
|
1135
|
+
print(f"Saving results to {fn_out_hdf5}")
|
|
1136
|
+
|
|
1137
|
+
with h5py.File(fn_out_hdf5, "w") as f:
|
|
1138
|
+
f.create_dataset("nrmsd", data=eps_n)
|
|
1139
|
+
f.create_dataset("nrmsd_n_1", data=eps)
|
|
1140
|
+
f.create_dataset("gdist", data=gdist_n)
|
|
1141
|
+
f.create_dataset("gdist_n_1", data=gdist)
|
|
1142
|
+
f.create_dataset("n_zaps", data=n_zaps)
|
|
1143
|
+
f.create_dataset(f"c_ref", data=c_ref_n)
|
|
1144
|
+
|
|
1145
|
+
for key in zap_idx:
|
|
1146
|
+
f.create_dataset(f"zap_index_lists/{key}", data=zap_idx[key])
|
|
1147
|
+
f.create_dataset(f"c/{key}", data=c[key])
|
|
1148
|
+
|
|
1149
|
+
n_zaps_tmp = len(zap_idx[key])
|
|
1150
|
+
centers = np.zeros((n_zaps_tmp, 3))
|
|
1151
|
+
m0 = np.zeros((n_zaps_tmp, 3))
|
|
1152
|
+
m1 = np.zeros((n_zaps_tmp, 3))
|
|
1153
|
+
m2 = np.zeros((n_zaps_tmp, 3))
|
|
1154
|
+
current = np.zeros((n_zaps_tmp, 1))
|
|
1155
|
+
|
|
1156
|
+
for i, j in enumerate(zap_idx[key]):
|
|
1157
|
+
centers[i, :] = coil_mean[str(j)][0:3, 3]
|
|
1158
|
+
m0[i, :] = coil_mean[str(j)][0:3, 0]
|
|
1159
|
+
m1[i, :] = coil_mean[str(j)][0:3, 1]
|
|
1160
|
+
m2[i, :] = coil_mean[str(j)][0:3, 2]
|
|
1161
|
+
current[i, 0] = current_dict[str(j)]
|
|
1162
|
+
|
|
1163
|
+
f.create_dataset(f"centers/{key}", data=centers)
|
|
1164
|
+
f.create_dataset(f"m0/{key}", data=m0)
|
|
1165
|
+
f.create_dataset(f"m1/{key}", data=m1)
|
|
1166
|
+
f.create_dataset(f"m2/{key}", data=m2)
|
|
1167
|
+
f.create_dataset(f"current/{key}", data=current)
|
|
1168
|
+
|
|
1169
|
+
# create geo .hdf5
|
|
1170
|
+
fn_geo_hdf5 = os.path.splitext(fn_out_hdf5)[0] + "_geo.hdf5"
|
|
1171
|
+
pynibs.write_geo_hdf5_surf(out_fn=fn_geo_hdf5,
|
|
1172
|
+
points=points,
|
|
1173
|
+
con=con,
|
|
1174
|
+
replace=True,
|
|
1175
|
+
hdf5_path='/mesh')
|
|
1176
|
+
|
|
1177
|
+
# write xdmf file with optimal results
|
|
1178
|
+
if verbose:
|
|
1179
|
+
print(f"Creating .xdmf ...")
|
|
1180
|
+
|
|
1181
|
+
pynibs.write_temporal_xdmf(hdf5_fn=fn_out_hdf5,
|
|
1182
|
+
data_folder='c',
|
|
1183
|
+
coil_center_folder="centers",
|
|
1184
|
+
coil_ori_0_folder="m0",
|
|
1185
|
+
coil_ori_1_folder="m1",
|
|
1186
|
+
coil_ori_2_folder="m2",
|
|
1187
|
+
coil_current_folder="current",
|
|
1188
|
+
hdf5_geo_fn=fn_geo_hdf5,
|
|
1189
|
+
overwrite_xdmf=True,
|
|
1190
|
+
verbose=False)
|
|
1191
|
+
|
|
1192
|
+
# plot results
|
|
1193
|
+
fn_plot_nrmsd = os.path.splitext(fn_out_hdf5)[0] + "_nrmsd.png"
|
|
1194
|
+
fn_plot_gdist = os.path.splitext(fn_out_hdf5)[0] + "_gdist.png"
|
|
1195
|
+
|
|
1196
|
+
if verbose:
|
|
1197
|
+
print(f"Plotting results to {fn_plot_nrmsd}")
|
|
1198
|
+
|
|
1199
|
+
sort_idx = np.argsort(n_zaps)
|
|
1200
|
+
n_zaps = n_zaps[sort_idx]
|
|
1201
|
+
eps_n = eps_n[sort_idx]
|
|
1202
|
+
eps = eps[sort_idx]
|
|
1203
|
+
|
|
1204
|
+
# nrmsd (n vs N) error
|
|
1205
|
+
plt.plot(n_zaps[n_zaps_init:], eps_n[n_zaps_init:], color="r")
|
|
1206
|
+
|
|
1207
|
+
# nrmsd (n vs n-1) error
|
|
1208
|
+
plt.plot(n_zaps[n_zaps_init:], eps[n_zaps_init:], color="b")
|
|
1209
|
+
|
|
1210
|
+
# 5% error bar
|
|
1211
|
+
plt.plot(np.array([n_zaps[4], n_zaps[-1]]), np.array([0.05, 0.05]), "r--")
|
|
1212
|
+
|
|
1213
|
+
# 1% error bar
|
|
1214
|
+
plt.plot(np.array([n_zaps[4], n_zaps[-1]]), np.array([0.01, 0.01]), "b--")
|
|
1215
|
+
|
|
1216
|
+
plt.grid()
|
|
1217
|
+
plt.xlabel("n", fontsize=11)
|
|
1218
|
+
plt.ylabel("NRMSD", fontsize=11)
|
|
1219
|
+
plt.ylim([0, 0.2])
|
|
1220
|
+
# plt.xscale("log")
|
|
1221
|
+
plt.title("Convergence analysis of online optimization", fontsize=11)
|
|
1222
|
+
plt.legend(["n vs N", "n vs (n-1)"])
|
|
1223
|
+
plt.savefig(fn_plot_nrmsd)
|
|
1224
|
+
plt.close()
|
|
1225
|
+
|
|
1226
|
+
# gdist (n vs N) error
|
|
1227
|
+
plt.plot(n_zaps[n_zaps_init:], gdist_n[n_zaps_init:], color="r")
|
|
1228
|
+
|
|
1229
|
+
# gdist (n vs n-1) error
|
|
1230
|
+
plt.plot(n_zaps[n_zaps_init:], gdist[n_zaps_init:], color="b")
|
|
1231
|
+
|
|
1232
|
+
# 1% error bar
|
|
1233
|
+
plt.plot(np.array([n_zaps[4], n_zaps[-1]]), np.array([1, 1]), "b--")
|
|
1234
|
+
|
|
1235
|
+
plt.grid()
|
|
1236
|
+
plt.xlabel("n", fontsize=11)
|
|
1237
|
+
plt.ylabel("GDIST", fontsize=11)
|
|
1238
|
+
plt.title("Convergence analysis of online optimization", fontsize=11)
|
|
1239
|
+
plt.legend(["n vs N", "n vs (n-1)"])
|
|
1240
|
+
plt.savefig(fn_plot_gdist)
|
|
1241
|
+
plt.close()
|
|
1242
|
+
|
|
1243
|
+
if verbose:
|
|
1244
|
+
print("DONE")
|
|
1245
|
+
|
|
1246
|
+
|
|
1247
|
+
def calc_opt_gain_map(e_matrix_ref, e_matrix_opt, points, con, fn_out=None, threshold=0.75):
|
|
1248
|
+
"""
|
|
1249
|
+
Calculates the gain map between a reference e_matrix (e.g. from random sampling) and an optimized sequence of
|
|
1250
|
+
electric fields for mapping.
|
|
1251
|
+
|
|
1252
|
+
Parameters
|
|
1253
|
+
----------
|
|
1254
|
+
e_matrix_ref : np.ndarray of float [n_ref, n_ele_roi]
|
|
1255
|
+
Electric field matrix of reference simulations. E-fields in ROI are in the rows.
|
|
1256
|
+
(n_ref does not have to match n_opt)
|
|
1257
|
+
e_matrix_opt : np.ndarray of float [n_opt, n_ele_roi]
|
|
1258
|
+
Electric field matrix of optimal simulations. E-fields in ROI are in the rows.
|
|
1259
|
+
(n_ref does not have to match n_opt)
|
|
1260
|
+
points : np.ndarray of float [n_points_roi, 3]
|
|
1261
|
+
Node coordinates of the ROI.
|
|
1262
|
+
con : np.ndarray of float [n_ele_roi, 3]
|
|
1263
|
+
Connectivity matrix of ROI surface.
|
|
1264
|
+
fn_out : str, optional, default: None
|
|
1265
|
+
Filename of .hdf5 and .xdmf file for plots with paraview. (Without file extension)
|
|
1266
|
+
threshold : float, optional, default: 0.75
|
|
1267
|
+
Threshold of correlation the focality is quantified by area.
|
|
1268
|
+
|
|
1269
|
+
Returns
|
|
1270
|
+
-------
|
|
1271
|
+
focality_ref : np.ndarray of float [n_ele_roi, 3]
|
|
1272
|
+
Focality measure (area) of PSF in each element > threshold for reference case.
|
|
1273
|
+
focality_opt : np.ndarray of float [n_ele_roi, 3]
|
|
1274
|
+
Focality measure (area) of PSF in each element > threshold for optimal case.
|
|
1275
|
+
focality_dif : np.ndarray of float [n_ele_roi, 3]
|
|
1276
|
+
Difference between focality_opt and focality_ref quantifying absolute gain in mm^2.
|
|
1277
|
+
Values < 0 : Optimal solution has smaller PSF than reference if hotspot would be in this element.
|
|
1278
|
+
Values > 0 : Optimal solution has larger PSF than reference if hotspot would be in this element.
|
|
1279
|
+
<file> : .hdf5 and .xmdf
|
|
1280
|
+
Geo and data files for visualization in paraview.
|
|
1281
|
+
"""
|
|
1282
|
+
assert e_matrix_ref.shape[1] == e_matrix_opt.shape[1], "e_matrix_ref and e_matrix_opt do not have the same number" \
|
|
1283
|
+
"columns, i.e. elements."
|
|
1284
|
+
n_ele = e_matrix_ref.shape[1]
|
|
1285
|
+
|
|
1286
|
+
# determine correlation matrices
|
|
1287
|
+
corr_matrix_ref = np.abs(np.corrcoef(e_matrix_ref.T)) ** 2
|
|
1288
|
+
corr_matrix_opt = np.abs(np.corrcoef(e_matrix_opt.T)) ** 2
|
|
1289
|
+
|
|
1290
|
+
# determine element areas
|
|
1291
|
+
p1_tri = points[con[:, 0], :]
|
|
1292
|
+
p2_tri = points[con[:, 1], :]
|
|
1293
|
+
p3_tri = points[con[:, 2], :]
|
|
1294
|
+
area = 0.5 * np.linalg.norm(np.cross(p2_tri - p1_tri, p3_tri - p1_tri), axis=1)
|
|
1295
|
+
|
|
1296
|
+
# calculate focality
|
|
1297
|
+
focality_ref = np.zeros(n_ele)
|
|
1298
|
+
focality_opt = np.zeros(n_ele)
|
|
1299
|
+
|
|
1300
|
+
for i_ele in range(n_ele):
|
|
1301
|
+
focality_ref[i_ele] = np.sum(area[corr_matrix_ref[i_ele, :] > threshold])
|
|
1302
|
+
focality_opt[i_ele] = np.sum(area[corr_matrix_opt[i_ele, :] > threshold])
|
|
1303
|
+
|
|
1304
|
+
focality_dif = focality_opt - focality_ref
|
|
1305
|
+
|
|
1306
|
+
# export results for visualization in paraview
|
|
1307
|
+
if fn_out is not None:
|
|
1308
|
+
if not os.path.exists(os.path.split(fn_out)[0]):
|
|
1309
|
+
os.makedirs(os.path.split(fn_out)[0])
|
|
1310
|
+
|
|
1311
|
+
pynibs.write_geo_hdf5_surf(out_fn=fn_out + "_geo.hdf5",
|
|
1312
|
+
points=points,
|
|
1313
|
+
con=con,
|
|
1314
|
+
replace=True,
|
|
1315
|
+
hdf5_path='/mesh')
|
|
1316
|
+
|
|
1317
|
+
pynibs.write_data_hdf5_surf(data=[focality_ref, focality_opt, focality_dif],
|
|
1318
|
+
data_names=["focality_ref", "focality_opt", "focality_dif"],
|
|
1319
|
+
data_hdf_fn_out=fn_out + "_data.hdf5",
|
|
1320
|
+
geo_hdf_fn=fn_out + "_geo.hdf5",
|
|
1321
|
+
replace=True)
|
|
1322
|
+
|
|
1323
|
+
return focality_ref, focality_opt, focality_dif
|
|
1324
|
+
|
|
1325
|
+
|
|
1326
|
+
def optimal_coilplacement_region(e, fmri_stats, best_n=1, metric='dot', non_negative=False, pos_matrices=None,
|
|
1327
|
+
res_folder=None):
|
|
1328
|
+
"""
|
|
1329
|
+
Identify the optimal coil placement regions based on given electric field (E-field) matrices and fMRI statistics.
|
|
1330
|
+
|
|
1331
|
+
Parameters
|
|
1332
|
+
----------
|
|
1333
|
+
e : np.ndarray
|
|
1334
|
+
The E-field matrix containing field strengths for various positions.
|
|
1335
|
+
fmri_stats : ndarray
|
|
1336
|
+
fMRI statistics used to identify optimal stimulation regions. The values are normalized to the maximum value.
|
|
1337
|
+
best_n : int, default: 1
|
|
1338
|
+
Number of top positions to identify.
|
|
1339
|
+
metric : str, default: 'dot'
|
|
1340
|
+
The evaluation metric to use.
|
|
1341
|
+
|
|
1342
|
+
- 'dot' calculates the dot product,
|
|
1343
|
+
- 'cor' computes the correlation coefficient.
|
|
1344
|
+
non_negative : bool, optional
|
|
1345
|
+
If True, only considers non-negative fMRI statistics.
|
|
1346
|
+
pos_matrices : ndarray, optional
|
|
1347
|
+
Position matrices for each coil position.
|
|
1348
|
+
res_folder : str, optional
|
|
1349
|
+
Directory to save the results. If not specified, no files are saved.
|
|
1350
|
+
|
|
1351
|
+
Returns
|
|
1352
|
+
-------
|
|
1353
|
+
best_coil_id : int
|
|
1354
|
+
N best coil ids, accoring to ``best_n``
|
|
1355
|
+
|
|
1356
|
+
Raises
|
|
1357
|
+
------
|
|
1358
|
+
ValueError
|
|
1359
|
+
If the specified metric is not recognized.
|
|
1360
|
+
|
|
1361
|
+
Notes
|
|
1362
|
+
-----
|
|
1363
|
+
- The function normalizes the input E-field matrix and fMRI statistics for evaluation.
|
|
1364
|
+
- If `res_folder` is specified, visualizations of the results are saved to the directory.
|
|
1365
|
+
- The function supports saving and writing TMS navigator instrument marker files using `simnibs.localite`.
|
|
1366
|
+
"""
|
|
1367
|
+
# Import localite utility to write TMS navigator instrument marker files
|
|
1368
|
+
from simnibs.utils.nnav import localite
|
|
1369
|
+
loc = localite()
|
|
1370
|
+
write_tms_navigator_im = loc.write
|
|
1371
|
+
|
|
1372
|
+
# Normalize fMRI statistics
|
|
1373
|
+
fmri_stats /= fmri_stats.max()
|
|
1374
|
+
|
|
1375
|
+
# Normalize E-field matrix
|
|
1376
|
+
e_max = e.max()
|
|
1377
|
+
e /= e_max
|
|
1378
|
+
|
|
1379
|
+
# Handle negative fMRI statistics based on the non_negative parameter
|
|
1380
|
+
if non_negative:
|
|
1381
|
+
suffix = 'nonneg'
|
|
1382
|
+
else:
|
|
1383
|
+
fmri_stats[fmri_stats < 0] = 0
|
|
1384
|
+
suffix = 'full'
|
|
1385
|
+
|
|
1386
|
+
# Calculate evaluation scores using the specified metric
|
|
1387
|
+
if metric == 'dot':
|
|
1388
|
+
ef_stats = np.dot(e, fmri_stats) # Dot product
|
|
1389
|
+
suffix = f"dot_{suffix}"
|
|
1390
|
+
title = "np.dot(e_matrix, fmri_stats)"
|
|
1391
|
+
elif metric == 'cor':
|
|
1392
|
+
ef_stats = np.corrcoef(e, fmri_stats)[-1, :-1] # Correlation coefficient
|
|
1393
|
+
suffix = f"cor_{suffix}"
|
|
1394
|
+
title = "np.corrcoef(e_matrix, fmri_stats)"
|
|
1395
|
+
else:
|
|
1396
|
+
raise ValueError("Unsupported metric. Use either 'dot' or 'cor'.")
|
|
1397
|
+
|
|
1398
|
+
# Find the index of the best coil position and the top N positions
|
|
1399
|
+
nth_best_coil_pos = np.argsort(ef_stats)[-best_n:][::-1]
|
|
1400
|
+
|
|
1401
|
+
# If a results folder is specified, plot and save the optimization results
|
|
1402
|
+
if res_folder is not None:
|
|
1403
|
+
def plot_placement_optimizations(stats, fn, title, n_elms, n_elms_nonneg):
|
|
1404
|
+
"""
|
|
1405
|
+
This plots two images to visualize the optimzation metric across coil placements
|
|
1406
|
+
"""
|
|
1407
|
+
# plot optim results
|
|
1408
|
+
plt.plot(stats)
|
|
1409
|
+
plt.suptitle(f'Optimization results')
|
|
1410
|
+
plt.title(f'n_elms ROI: {n_elms}, nonnegative elms: {n_elms_nonneg}')
|
|
1411
|
+
plt.xlabel("Coil placement candidate")
|
|
1412
|
+
plt.ylabel(title)
|
|
1413
|
+
plt.savefig(fn)
|
|
1414
|
+
plt.close()
|
|
1415
|
+
|
|
1416
|
+
# plot another version with results normalized based on elements used
|
|
1417
|
+
plt.plot(stats / (stats != 0).sum())
|
|
1418
|
+
plt.suptitle(f'Optimization results - normalized to nonnegative elms')
|
|
1419
|
+
plt.title(f'n_elms ROI: {n_elms}, nonnegative elms: {n_elms_nonneg}')
|
|
1420
|
+
plt.xlabel("Coil placement candidate")
|
|
1421
|
+
plt.ylabel(title)
|
|
1422
|
+
plt.savefig(fn.replace('.png', '_normalized.png'))
|
|
1423
|
+
plt.close()
|
|
1424
|
+
|
|
1425
|
+
optim_plot_fn = os.path.join(res_folder, f"coil_placements_{suffix}.png")
|
|
1426
|
+
plot_placement_optimizations(ef_stats, optim_plot_fn, title, e.shape[1], (fmri_stats > 0).sum())
|
|
1427
|
+
|
|
1428
|
+
# If position matrices are provided, identify the best and N best positions
|
|
1429
|
+
if pos_matrices is not None:
|
|
1430
|
+
nth_opt_matsim = pos_matrices[:, :, nth_best_coil_pos]
|
|
1431
|
+
|
|
1432
|
+
# Transpose the matrices for proper alignment
|
|
1433
|
+
nth_opt_matsim = np.transpose(nth_opt_matsim, axes=(1, 0, 2))
|
|
1434
|
+
|
|
1435
|
+
# Save the best coil position to an HDF5 file
|
|
1436
|
+
with h5py.File(os.path.join(res_folder, f'opt_coil_pos_{metric}.hdf5'), "w") as f:
|
|
1437
|
+
f.create_dataset(name="matsimnibs", data=nth_opt_matsim[:, :, 0])
|
|
1438
|
+
|
|
1439
|
+
# Write the instrument marker file for the top N positions
|
|
1440
|
+
write_tms_navigator_im(nth_opt_matsim, os.path.join(res_folder, f'opt_coil_pos_nth_{metric}.xml'),
|
|
1441
|
+
names=[f'opt_{i:0>2}' for i in range(best_n)], overwrite=True)
|
|
1442
|
+
pynibs.create_stimsite_from_matsimnibs(os.path.join(res_folder, f'opt_coil_pos_nth_{metric}.hdf5'),
|
|
1443
|
+
nth_opt_matsim, overwrite=True, data=np.array(range(1, best_n + 1)),
|
|
1444
|
+
datanames='Best coil id')
|
|
1445
|
+
return nth_best_coil_pos
|