pyNIBS 0.2024.8__py3-none-any.whl → 0.2026.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pynibs/__init__.py +26 -14
- pynibs/coil/__init__.py +6 -0
- pynibs/{coil.py → coil/coil.py} +213 -543
- pynibs/coil/export.py +508 -0
- pynibs/congruence/__init__.py +4 -1
- pynibs/congruence/congruence.py +37 -45
- pynibs/congruence/ext_metrics.py +40 -11
- pynibs/congruence/stimulation_threshold.py +1 -2
- pynibs/expio/Mep.py +120 -370
- pynibs/expio/__init__.py +10 -0
- pynibs/expio/brainsight.py +34 -37
- pynibs/expio/cobot.py +25 -25
- pynibs/expio/exp.py +10 -7
- pynibs/expio/fit_funs.py +3 -0
- pynibs/expio/invesalius.py +70 -0
- pynibs/expio/localite.py +190 -91
- pynibs/expio/neurone.py +139 -0
- pynibs/expio/signal_ced.py +345 -2
- pynibs/expio/visor.py +16 -15
- pynibs/freesurfer.py +34 -33
- pynibs/hdf5_io/hdf5_io.py +149 -132
- pynibs/hdf5_io/xdmf.py +35 -31
- pynibs/mesh/__init__.py +1 -1
- pynibs/mesh/mesh_struct.py +77 -92
- pynibs/mesh/transformations.py +121 -21
- pynibs/mesh/utils.py +191 -99
- pynibs/models/_TMS.py +2 -1
- pynibs/muap.py +1 -2
- pynibs/neuron/__init__.py +10 -0
- pynibs/neuron/models/mep.py +566 -0
- pynibs/neuron/neuron_regression.py +98 -8
- pynibs/optimization/__init__.py +12 -2
- pynibs/optimization/{optimization.py → coil_opt.py} +157 -133
- pynibs/optimization/multichannel.py +1174 -24
- pynibs/optimization/workhorses.py +7 -8
- pynibs/regression/__init__.py +4 -2
- pynibs/regression/dual_node_detection.py +229 -219
- pynibs/regression/regression.py +92 -61
- pynibs/roi/__init__.py +4 -1
- pynibs/roi/roi_structs.py +19 -21
- pynibs/roi/{roi.py → roi_utils.py} +56 -33
- pynibs/subject.py +24 -14
- pynibs/util/__init__.py +20 -4
- pynibs/util/dosing.py +4 -5
- pynibs/util/quality_measures.py +39 -38
- pynibs/util/rotations.py +116 -9
- pynibs/util/{simnibs.py → simnibs_io.py} +29 -19
- pynibs/util/{util.py → utils.py} +20 -22
- pynibs/visualization/para.py +4 -4
- pynibs/visualization/render_3D.py +4 -4
- pynibs-0.2026.1.dist-info/METADATA +105 -0
- pynibs-0.2026.1.dist-info/RECORD +69 -0
- {pyNIBS-0.2024.8.dist-info → pynibs-0.2026.1.dist-info}/WHEEL +1 -1
- pyNIBS-0.2024.8.dist-info/METADATA +0 -723
- pyNIBS-0.2024.8.dist-info/RECORD +0 -107
- pynibs/data/configuration_exp0.yaml +0 -59
- pynibs/data/configuration_linear_MEP.yaml +0 -61
- pynibs/data/configuration_linear_RT.yaml +0 -61
- pynibs/data/configuration_sigmoid4.yaml +0 -68
- pynibs/data/network mapping configuration/configuration guide.md +0 -238
- pynibs/data/network mapping configuration/configuration_TEMPLATE.yaml +0 -42
- pynibs/data/network mapping configuration/configuration_for_testing.yaml +0 -43
- pynibs/data/network mapping configuration/configuration_modelTMS.yaml +0 -43
- pynibs/data/network mapping configuration/configuration_reg_isi_05.yaml +0 -43
- pynibs/data/network mapping configuration/output_documentation.md +0 -185
- pynibs/data/network mapping configuration/recommendations_for_accuracy_threshold.md +0 -77
- pynibs/data/neuron/models/L23_PC_cADpyr_biphasic_v1.csv +0 -1281
- pynibs/data/neuron/models/L23_PC_cADpyr_monophasic_v1.csv +0 -1281
- pynibs/data/neuron/models/L4_LBC_biphasic_v1.csv +0 -1281
- pynibs/data/neuron/models/L4_LBC_monophasic_v1.csv +0 -1281
- pynibs/data/neuron/models/L4_NBC_biphasic_v1.csv +0 -1281
- pynibs/data/neuron/models/L4_NBC_monophasic_v1.csv +0 -1281
- pynibs/data/neuron/models/L4_SBC_biphasic_v1.csv +0 -1281
- pynibs/data/neuron/models/L4_SBC_monophasic_v1.csv +0 -1281
- pynibs/data/neuron/models/L5_TTPC2_cADpyr_biphasic_v1.csv +0 -1281
- pynibs/data/neuron/models/L5_TTPC2_cADpyr_monophasic_v1.csv +0 -1281
- pynibs/tests/data/InstrumentMarker20200225163611937.xml +0 -19
- pynibs/tests/data/TriggerMarkers_Coil0_20200225163443682.xml +0 -14
- pynibs/tests/data/TriggerMarkers_Coil1_20200225170337572.xml +0 -6373
- pynibs/tests/data/Xdmf.dtd +0 -89
- pynibs/tests/data/brainsight_niiImage_nifticoord.txt +0 -145
- pynibs/tests/data/brainsight_niiImage_nifticoord_largefile.txt +0 -1434
- pynibs/tests/data/brainsight_niiImage_niifticoord_mixedtargets.txt +0 -47
- pynibs/tests/data/create_subject_testsub.py +0 -332
- pynibs/tests/data/data.hdf5 +0 -0
- pynibs/tests/data/geo.hdf5 +0 -0
- pynibs/tests/test_coil.py +0 -474
- pynibs/tests/test_elements2nodes.py +0 -100
- pynibs/tests/test_hdf5_io/test_xdmf.py +0 -61
- pynibs/tests/test_mesh_transformations.py +0 -123
- pynibs/tests/test_mesh_utils.py +0 -143
- pynibs/tests/test_nnav_imports.py +0 -101
- pynibs/tests/test_quality_measures.py +0 -117
- pynibs/tests/test_regressdata.py +0 -289
- pynibs/tests/test_roi.py +0 -17
- pynibs/tests/test_rotations.py +0 -86
- pynibs/tests/test_subject.py +0 -71
- pynibs/tests/test_util.py +0 -24
- /pynibs/{regression/score_types.py → neuron/models/m1_montbrio.py} +0 -0
- {pyNIBS-0.2024.8.dist-info → pynibs-0.2026.1.dist-info/licenses}/LICENSE +0 -0
- {pyNIBS-0.2024.8.dist-info → pynibs-0.2026.1.dist-info}/top_level.txt +0 -0
|
@@ -43,12 +43,12 @@ def determine_coil_position_idcs(num_idcs, all_efields, rn_seed):
|
|
|
43
43
|
selection : ndarray of int
|
|
44
44
|
(n_zaps_used) Indices of chosen coil positions.
|
|
45
45
|
"""
|
|
46
|
-
selection = np.array([],dtype=int)
|
|
46
|
+
selection = np.array([], dtype=int)
|
|
47
47
|
|
|
48
48
|
all_idcs = list(range(all_efields.shape[0]))
|
|
49
49
|
|
|
50
50
|
rn_generator = default_rng(seed=rn_seed)
|
|
51
|
-
selection = rn_generator.choice(all_idcs,size=num_idcs,replace=False)
|
|
51
|
+
selection = rn_generator.choice(all_idcs, size=num_idcs, replace=False)
|
|
52
52
|
|
|
53
53
|
return selection
|
|
54
54
|
|
|
@@ -80,7 +80,7 @@ def determine_detectable_hotspots(e_subset, rn_seed):
|
|
|
80
80
|
idx0 = np.random.choice(e_subset.shape[1])
|
|
81
81
|
idx1 = np.random.choice(e_subset.shape[1])
|
|
82
82
|
|
|
83
|
-
if
|
|
83
|
+
if compute_correlation_with_all_elements(e_subset, idx0)[idx1] < corr_thr:
|
|
84
84
|
if np.max(e_subset[:, idx0]) > e_max_thr and np.max(e_subset[:, idx1]) > e_max_thr:
|
|
85
85
|
finished = True
|
|
86
86
|
|
|
@@ -164,43 +164,43 @@ def create_response_data(efields, config):
|
|
|
164
164
|
effect = 0
|
|
165
165
|
|
|
166
166
|
loc = response_shift + response_multiplier * effect
|
|
167
|
-
scale = (1+effect/4) * jitter_scale
|
|
167
|
+
scale = (1 + effect / 4) * jitter_scale
|
|
168
168
|
idx_prob_dist = gen_dist(loc=loc, scale=scale, K=100)
|
|
169
169
|
data[efield_idx] = idx_prob_dist.rvs()
|
|
170
170
|
|
|
171
171
|
elif network_type == 'AND':
|
|
172
172
|
for efield_idx in range(0, efields.shape[0]):
|
|
173
|
-
effect = (effect_indicator[efield_idx, 0] * effect_indicator[efield_idx, 1])**2 # \in [0, 1.44]
|
|
173
|
+
effect = (effect_indicator[efield_idx, 0] * effect_indicator[efield_idx, 1]) ** 2 # \in [0, 1.44]
|
|
174
174
|
|
|
175
175
|
loc = response_shift + response_multiplier * effect
|
|
176
|
-
scale = (1+effect/4) * jitter_scale
|
|
176
|
+
scale = (1 + effect / 4) * jitter_scale
|
|
177
177
|
idx_prob_dist = gen_dist(loc=loc, scale=scale, K=50)
|
|
178
178
|
data[efield_idx] = idx_prob_dist.rvs()
|
|
179
179
|
|
|
180
180
|
elif network_type == '1_INH_0':
|
|
181
181
|
for efield_idx in range(0, efields.shape[0]):
|
|
182
|
-
effect = max((effect_indicator[efield_idx, 0]**2 - effect_indicator[efield_idx, 1]**2), 0)
|
|
182
|
+
effect = max((effect_indicator[efield_idx, 0] ** 2 - effect_indicator[efield_idx, 1] ** 2), 0)
|
|
183
183
|
|
|
184
184
|
loc = response_shift + response_multiplier * effect
|
|
185
|
-
scale = (1+effect/4) * jitter_scale
|
|
185
|
+
scale = (1 + effect / 4) * jitter_scale
|
|
186
186
|
idx_prob_dist = gen_dist(loc=loc, scale=scale, K=100)
|
|
187
187
|
data[efield_idx] = idx_prob_dist.rvs()
|
|
188
188
|
|
|
189
189
|
elif network_type == 'SH_0':
|
|
190
190
|
for efield_idx in range(0, efields.shape[0]):
|
|
191
|
-
effect = (effect_indicator[efield_idx, 0])**4
|
|
191
|
+
effect = (effect_indicator[efield_idx, 0]) ** 4
|
|
192
192
|
|
|
193
193
|
loc = response_shift + response_multiplier * effect
|
|
194
|
-
scale = (1+effect/4) * jitter_scale
|
|
194
|
+
scale = (1 + effect / 4) * jitter_scale
|
|
195
195
|
idx_prob_dist = gen_dist(loc=loc, scale=scale, K=100)
|
|
196
196
|
data[efield_idx] = idx_prob_dist.rvs()
|
|
197
197
|
|
|
198
198
|
elif network_type == '0_INH_1':
|
|
199
199
|
for efield_idx in range(0, efields.shape[0]):
|
|
200
|
-
effect = max((effect_indicator[efield_idx, 1]**2 - effect_indicator[efield_idx, 0]**2), 0)
|
|
200
|
+
effect = max((effect_indicator[efield_idx, 1] ** 2 - effect_indicator[efield_idx, 0] ** 2), 0)
|
|
201
201
|
|
|
202
202
|
loc = response_shift + response_multiplier * effect
|
|
203
|
-
scale = (1+effect/4) * jitter_scale
|
|
203
|
+
scale = (1 + effect / 4) * jitter_scale
|
|
204
204
|
idx_prob_dist = gen_dist(loc=loc, scale=scale, K=100)
|
|
205
205
|
data[efield_idx] = idx_prob_dist.rvs()
|
|
206
206
|
|
|
@@ -209,7 +209,7 @@ def create_response_data(efields, config):
|
|
|
209
209
|
effect = (effect_indicator[efield_idx, 1]) ** 4
|
|
210
210
|
|
|
211
211
|
loc = response_shift + response_multiplier * effect
|
|
212
|
-
scale = (1+effect/4) * jitter_scale
|
|
212
|
+
scale = (1 + effect / 4) * jitter_scale
|
|
213
213
|
idx_prob_dist = gen_dist(loc=loc, scale=scale, K=100)
|
|
214
214
|
data[efield_idx] = idx_prob_dist.rvs()
|
|
215
215
|
|
|
@@ -218,21 +218,21 @@ def create_response_data(efields, config):
|
|
|
218
218
|
x = effect_indicator[efield_idx, 0]
|
|
219
219
|
y = effect_indicator[efield_idx, 1]
|
|
220
220
|
if x > y:
|
|
221
|
-
effect = x**2 - y**2
|
|
221
|
+
effect = x ** 2 - y ** 2
|
|
222
222
|
else:
|
|
223
|
-
effect = y**2 - x**2
|
|
223
|
+
effect = y ** 2 - x ** 2
|
|
224
224
|
|
|
225
225
|
loc = response_shift + response_multiplier * effect
|
|
226
|
-
scale = (1+effect/4) * jitter_scale
|
|
226
|
+
scale = (1 + effect / 4) * jitter_scale
|
|
227
227
|
idx_prob_dist = gen_dist(loc=loc, scale=scale, K=100)
|
|
228
228
|
data[efield_idx] = idx_prob_dist.rvs()
|
|
229
229
|
|
|
230
230
|
elif network_type == 'OR':
|
|
231
231
|
for efield_idx in range(0, efields.shape[0]):
|
|
232
|
-
effect = (effect_indicator[efield_idx, 0]**4 + effect_indicator[efield_idx, 1]**4)/2
|
|
232
|
+
effect = (effect_indicator[efield_idx, 0] ** 4 + effect_indicator[efield_idx, 1] ** 4) / 2
|
|
233
233
|
|
|
234
234
|
loc = response_shift + response_multiplier * effect
|
|
235
|
-
scale = (1+effect/4) * jitter_scale
|
|
235
|
+
scale = (1 + effect / 4) * jitter_scale
|
|
236
236
|
idx_prob_dist = gen_dist(loc=loc, scale=scale, K=100)
|
|
237
237
|
data[efield_idx] = idx_prob_dist.rvs()
|
|
238
238
|
|
|
@@ -247,7 +247,7 @@ def create_response_data(efields, config):
|
|
|
247
247
|
effect = 0
|
|
248
248
|
|
|
249
249
|
loc = response_shift + response_multiplier * effect
|
|
250
|
-
scale = (1+effect/2) * jitter_scale
|
|
250
|
+
scale = (1 + effect / 2) * jitter_scale
|
|
251
251
|
idx_prob_dist = gen_dist(loc=loc, scale=scale)
|
|
252
252
|
data[efield_idx] = idx_prob_dist.rvs()
|
|
253
253
|
|
|
@@ -256,7 +256,7 @@ def create_response_data(efields, config):
|
|
|
256
256
|
effect = (effect_indicator[efield_idx, 0] * effect_indicator[efield_idx, 1]) ** 2 # \in [0, 1.44]
|
|
257
257
|
|
|
258
258
|
loc = response_shift + response_multiplier * effect
|
|
259
|
-
scale = (1+effect/2) * jitter_scale
|
|
259
|
+
scale = (1 + effect / 2) * jitter_scale
|
|
260
260
|
idx_prob_dist = gen_dist(loc=loc, scale=scale)
|
|
261
261
|
data[efield_idx] = idx_prob_dist.rvs()
|
|
262
262
|
|
|
@@ -266,7 +266,7 @@ def create_response_data(efields, config):
|
|
|
266
266
|
effect = max((effect_indicator[efield_idx, 0] ** 2 - effect_indicator[efield_idx, 1] ** 2), 0)
|
|
267
267
|
|
|
268
268
|
loc = response_shift + response_multiplier * effect
|
|
269
|
-
scale = (1+effect/2) * jitter_scale
|
|
269
|
+
scale = (1 + effect / 2) * jitter_scale
|
|
270
270
|
idx_prob_dist = gen_dist(loc=loc, scale=scale)
|
|
271
271
|
data[efield_idx] = idx_prob_dist.rvs()
|
|
272
272
|
|
|
@@ -275,7 +275,7 @@ def create_response_data(efields, config):
|
|
|
275
275
|
effect = (effect_indicator[efield_idx, 0]) ** 4
|
|
276
276
|
|
|
277
277
|
loc = response_shift + response_multiplier * effect
|
|
278
|
-
scale = (1+effect/2) * jitter_scale
|
|
278
|
+
scale = (1 + effect / 2) * jitter_scale
|
|
279
279
|
idx_prob_dist = gen_dist(loc=loc, scale=scale)
|
|
280
280
|
data[efield_idx] = idx_prob_dist.rvs()
|
|
281
281
|
|
|
@@ -284,7 +284,7 @@ def create_response_data(efields, config):
|
|
|
284
284
|
effect = max((effect_indicator[efield_idx, 1] ** 2 - effect_indicator[efield_idx, 0] ** 2), 0)
|
|
285
285
|
|
|
286
286
|
loc = response_shift + response_multiplier * effect
|
|
287
|
-
scale = (1+effect/2) * jitter_scale
|
|
287
|
+
scale = (1 + effect / 2) * jitter_scale
|
|
288
288
|
idx_prob_dist = gen_dist(loc=loc, scale=scale)
|
|
289
289
|
data[efield_idx] = idx_prob_dist.rvs()
|
|
290
290
|
|
|
@@ -293,7 +293,7 @@ def create_response_data(efields, config):
|
|
|
293
293
|
effect = (effect_indicator[efield_idx, 1]) ** 4
|
|
294
294
|
|
|
295
295
|
loc = response_shift + response_multiplier * effect
|
|
296
|
-
scale = (1+effect/2) * jitter_scale
|
|
296
|
+
scale = (1 + effect / 2) * jitter_scale
|
|
297
297
|
idx_prob_dist = gen_dist(loc=loc, scale=scale)
|
|
298
298
|
data[efield_idx] = idx_prob_dist.rvs()
|
|
299
299
|
|
|
@@ -307,7 +307,7 @@ def create_response_data(efields, config):
|
|
|
307
307
|
effect = y ** 2 - x ** 2
|
|
308
308
|
|
|
309
309
|
loc = response_shift + response_multiplier * effect
|
|
310
|
-
scale = (1+effect/2) * jitter_scale
|
|
310
|
+
scale = (1 + effect / 2) * jitter_scale
|
|
311
311
|
idx_prob_dist = gen_dist(loc=loc, scale=scale)
|
|
312
312
|
data[efield_idx] = idx_prob_dist.rvs()
|
|
313
313
|
|
|
@@ -316,7 +316,7 @@ def create_response_data(efields, config):
|
|
|
316
316
|
effect = (effect_indicator[efield_idx, 0] ** 4 + effect_indicator[efield_idx, 1] ** 4) / 2
|
|
317
317
|
|
|
318
318
|
loc = response_shift + response_multiplier * effect
|
|
319
|
-
scale = (1+effect/2) * jitter_scale
|
|
319
|
+
scale = (1 + effect / 2) * jitter_scale
|
|
320
320
|
idx_prob_dist = gen_dist(loc=loc, scale=scale)
|
|
321
321
|
data[efield_idx] = idx_prob_dist.rvs()
|
|
322
322
|
|
|
@@ -333,7 +333,7 @@ def binarize_response_data(response_values, method, bin_factor):
|
|
|
333
333
|
|
|
334
334
|
Parameters
|
|
335
335
|
----------
|
|
336
|
-
response_values :
|
|
336
|
+
response_values : np.ndarray
|
|
337
337
|
(n_zaps) Response data.
|
|
338
338
|
method : str
|
|
339
339
|
Method of calculating the binarization threshold. ("mean", "slope", "median")
|
|
@@ -343,7 +343,7 @@ def binarize_response_data(response_values, method, bin_factor):
|
|
|
343
343
|
|
|
344
344
|
Returns
|
|
345
345
|
-------
|
|
346
|
-
response_bin :
|
|
346
|
+
response_bin : np.ndarray
|
|
347
347
|
(n_zaps) Binarized response data, where values greater than the threshold are set to 1, and values
|
|
348
348
|
less than or equal to the threshold are set to 0.
|
|
349
349
|
"""
|
|
@@ -393,17 +393,18 @@ def binarize_real_meps(meps, method='kde'):
|
|
|
393
393
|
|
|
394
394
|
Parameters
|
|
395
395
|
----------
|
|
396
|
-
meps :
|
|
396
|
+
meps : np.ndarray
|
|
397
397
|
(n_zaps) The MEP measurements.
|
|
398
398
|
method : str
|
|
399
399
|
The method for determining the binarization threshold. Valid options are:
|
|
400
|
-
|
|
401
|
-
|
|
402
|
-
|
|
400
|
+
|
|
401
|
+
- ``kde``: Determine the threshold based on the density of the measurement points.
|
|
402
|
+
The point density is low when the value of the MEPs is rapidly changing.
|
|
403
|
+
- ``slope``: Determine the threshold based on the derivative/slope.
|
|
403
404
|
|
|
404
405
|
Returns
|
|
405
406
|
-------
|
|
406
|
-
|
|
407
|
+
np.ndarray
|
|
407
408
|
An array of the same shape as `meps` with 1 for data points below the determined threshold,
|
|
408
409
|
and 0 otherwise.
|
|
409
410
|
"""
|
|
@@ -417,8 +418,8 @@ def binarize_real_meps(meps, method='kde'):
|
|
|
417
418
|
bandwidth = .9 * min(np.std(meps), q75 - q25) * pow(meps.shape[0], -0.2)
|
|
418
419
|
|
|
419
420
|
kde = KernelDensity(
|
|
420
|
-
|
|
421
|
-
|
|
421
|
+
kernel='gaussian',
|
|
422
|
+
bandwidth=bandwidth
|
|
422
423
|
).fit(np.reshape(meps, (-1, 1)))
|
|
423
424
|
|
|
424
425
|
x_grid = np.linspace(0, max(meps))
|
|
@@ -455,8 +456,8 @@ def binarize_real_meps(meps, method='kde'):
|
|
|
455
456
|
plt.figure()
|
|
456
457
|
plt.scatter(x=range(0, meps_sorted.shape[0]), y=meps_sorted)
|
|
457
458
|
plt.scatter(
|
|
458
|
-
|
|
459
|
-
|
|
459
|
+
x=range(meps_sorted_smoothed.shape[0] + int(num_discarded_values / 2)),
|
|
460
|
+
y=np.pad(meps_sorted_smoothed, (int(num_discarded_values / 2), 0))
|
|
460
461
|
)
|
|
461
462
|
plt.figure()
|
|
462
463
|
plt.scatter(x=range(0, meps_sorted_smoothed_gradient.shape[0]), y=meps_sorted_smoothed_gradient)
|
|
@@ -598,11 +599,11 @@ def plot_data_clf(response_data_bin, e_field0, e_field1):
|
|
|
598
599
|
grid_y_min, grid_y_max = np.min(e_field1), np.max(e_field1)
|
|
599
600
|
# define a regular grid between the boundaries of the data
|
|
600
601
|
grid_x_coords, grid_y_coords = \
|
|
601
|
-
np.meshgrid(np.arange(grid_x_min, grid_x_max+0.005, .005),
|
|
602
|
-
np.arange(grid_y_min, grid_y_max+0.005, .005))
|
|
602
|
+
np.meshgrid(np.arange(grid_x_min, grid_x_max + 0.005, .005),
|
|
603
|
+
np.arange(grid_y_min, grid_y_max + 0.005, .005))
|
|
603
604
|
# have the classifier determine the class of the grid points
|
|
604
605
|
grid_points_predicted = clf.predict(
|
|
605
|
-
|
|
606
|
+
np.c_[grid_x_coords.ravel(), grid_y_coords.ravel()]
|
|
606
607
|
)
|
|
607
608
|
# Put the result into a color plot
|
|
608
609
|
grid_points_predicted = grid_points_predicted.reshape(grid_x_coords.shape)
|
|
@@ -634,18 +635,18 @@ def plot_data_clf(response_data_bin, e_field0, e_field1):
|
|
|
634
635
|
def write_effect_map_hdf5(datatype, e_matrix, roi_surf, detection_result, base_path, config):
|
|
635
636
|
"""
|
|
636
637
|
Create effect maps for result validation. The idea is that given a detection result, especially the hotspot_idcs and
|
|
637
|
-
|
|
638
|
+
network_type, a prediction can be made about where the response is supposed to be affected most.
|
|
638
639
|
Comparing these predictions and real response measurements is a possibility for result validation.
|
|
639
640
|
See output_documentation.md for more.
|
|
640
641
|
|
|
641
642
|
Parameters
|
|
642
643
|
----------
|
|
643
644
|
datatype : str
|
|
644
|
-
Whether the response data is
|
|
645
|
+
Whether the response data is "real" or "artificial".
|
|
645
646
|
e_matrix : np.ndarray of float
|
|
646
647
|
(n_zaps, n_elms) The efield magnitudes of all available coil positions across all ROI elements.
|
|
647
648
|
roi_surf : ROI obj
|
|
648
|
-
|
|
649
|
+
ROI surface object.
|
|
649
650
|
detection_result : np.ndarray
|
|
650
651
|
(7) contains the result of the detection, consisting of
|
|
651
652
|
(found_network_type, found_idcs, found_acc, found_distance, found_scores, network_type_certainty, shape_vector).
|
|
@@ -654,7 +655,6 @@ def write_effect_map_hdf5(datatype, e_matrix, roi_surf, detection_result, base_p
|
|
|
654
655
|
config : dict
|
|
655
656
|
YAML configuration file content as a dictionary.
|
|
656
657
|
"""
|
|
657
|
-
|
|
658
658
|
print("Effect values are being calculated.")
|
|
659
659
|
|
|
660
660
|
effect_full = config['effect_full']
|
|
@@ -672,7 +672,7 @@ def write_effect_map_hdf5(datatype, e_matrix, roi_surf, detection_result, base_p
|
|
|
672
672
|
|
|
673
673
|
# can also go with different stimulation position from here, instead of the proxy
|
|
674
674
|
effect_vals = [np.zeros(e_matrix.shape[1]), np.zeros(e_matrix.shape[1])]
|
|
675
|
-
for idx in [0,1]:
|
|
675
|
+
for idx in [0, 1]:
|
|
676
676
|
if not np.isnan(found_idcs[idx]):
|
|
677
677
|
effect_vals[idx] = min(e_subset[found_idcs[idx]] / effect_full, effect_saturation)
|
|
678
678
|
|
|
@@ -694,7 +694,7 @@ def write_effect_map_hdf5(datatype, e_matrix, roi_surf, detection_result, base_p
|
|
|
694
694
|
else:
|
|
695
695
|
effect[elm] = effect_vals[1] ** 2 - effect_vals[0] ** 2
|
|
696
696
|
elif network_type == 8:
|
|
697
|
-
effect[elm] = (effect_vals[0] ** 4 + effect_vals[1] ** 4)/2
|
|
697
|
+
effect[elm] = (effect_vals[0] ** 4 + effect_vals[1] ** 4) / 2
|
|
698
698
|
else:
|
|
699
699
|
raise NotImplementedError()
|
|
700
700
|
|
|
@@ -714,15 +714,15 @@ def write_effect_map_hdf5(datatype, e_matrix, roi_surf, detection_result, base_p
|
|
|
714
714
|
while True:
|
|
715
715
|
try:
|
|
716
716
|
pynibs.write_geo_hdf5_surf(out_fn=fn_out_roi_geo,
|
|
717
|
-
|
|
718
|
-
|
|
719
|
-
|
|
720
|
-
|
|
717
|
+
points=roi_surf.node_coord_mid,
|
|
718
|
+
con=roi_surf.node_number_list,
|
|
719
|
+
replace=True,
|
|
720
|
+
hdf5_path='/mesh')
|
|
721
721
|
pynibs.write_data_hdf5_surf(data=effect,
|
|
722
|
-
|
|
723
|
-
|
|
724
|
-
|
|
725
|
-
|
|
722
|
+
data_names=['effect_map'],
|
|
723
|
+
data_hdf_fn_out=fn_out_roi_effectmap,
|
|
724
|
+
geo_hdf_fn=fn_out_roi_geo,
|
|
725
|
+
replace=True)
|
|
726
726
|
break
|
|
727
727
|
except:
|
|
728
728
|
print('problem accessing effectmap hdf5')
|
|
@@ -772,11 +772,11 @@ def write_network_detection_data_hdf5(datatype, e_matrix, response_values, base_
|
|
|
772
772
|
fn_out_data = f"{base_path}/data_jitter_{jitter}_seed_{rn_seed}.hdf5" # nnn
|
|
773
773
|
|
|
774
774
|
# (1.1) Pick e-fields as random samples from coil positions and orientations
|
|
775
|
-
idcs =
|
|
775
|
+
idcs = determine_coil_position_idcs(num_idcs=num_coil_samples, all_efields=e_matrix, rn_seed=rn_seed)
|
|
776
776
|
e_subset = e_matrix[idcs]
|
|
777
777
|
|
|
778
778
|
# (1.2) generate response data
|
|
779
|
-
response_values =
|
|
779
|
+
response_values = create_response_data(efields=e_subset, config=config)
|
|
780
780
|
|
|
781
781
|
if datatype == 'real':
|
|
782
782
|
print('Real data will now be binarized.')
|
|
@@ -784,8 +784,8 @@ def write_network_detection_data_hdf5(datatype, e_matrix, response_values, base_
|
|
|
784
784
|
fn_out_data = f"{base_path}/data_{flag}.hdf5" # nnn
|
|
785
785
|
e_subset = e_matrix
|
|
786
786
|
|
|
787
|
-
response_bin =
|
|
788
|
-
|
|
787
|
+
response_bin = binarize_response_data(response_values, config['bin_method'],
|
|
788
|
+
bin_factor=config['bin_factor'])
|
|
789
789
|
response = np.vstack((response_values, response_bin))
|
|
790
790
|
stop = time.time()
|
|
791
791
|
runtime_gen = np.round(stop - start, 2)
|
|
@@ -797,12 +797,12 @@ def write_network_detection_data_hdf5(datatype, e_matrix, response_values, base_
|
|
|
797
797
|
try:
|
|
798
798
|
with h5py.File(fn_out_data, 'w') as f:
|
|
799
799
|
f.create_dataset(
|
|
800
|
-
|
|
801
|
-
|
|
800
|
+
'response_data',
|
|
801
|
+
data=response
|
|
802
802
|
)
|
|
803
803
|
f.create_dataset(
|
|
804
|
-
|
|
805
|
-
|
|
804
|
+
'e_subset',
|
|
805
|
+
data=e_subset
|
|
806
806
|
)
|
|
807
807
|
break
|
|
808
808
|
except:
|
|
@@ -814,13 +814,13 @@ def write_network_detection_data_hdf5(datatype, e_matrix, response_values, base_
|
|
|
814
814
|
if datatype == 'artificial':
|
|
815
815
|
flag = f'jitter_{jitter}_seed_{rn_seed}_hotspots'
|
|
816
816
|
if config['plot_std']:
|
|
817
|
-
std_plt =
|
|
817
|
+
std_plt = plot_data_std(response[0], e_subset[:, hotspot_idcs[0]], e_subset[:, hotspot_idcs[1]])
|
|
818
818
|
fn_std_plt = os.path.join(base_path,
|
|
819
819
|
f'plot_std_{flag}.png') # nnn
|
|
820
820
|
std_plt.savefig(fn_std_plt, dpi=600)
|
|
821
821
|
std_plt.close()
|
|
822
822
|
if config['plot_bin']:
|
|
823
|
-
bin_plt =
|
|
823
|
+
bin_plt = plot_data_bin(response[1], e_subset[:, hotspot_idcs[0]], e_subset[:, hotspot_idcs[1]])
|
|
824
824
|
fn_bin_plt = os.path.join(base_path,
|
|
825
825
|
f'plot_bin_{flag}.png') # nnn
|
|
826
826
|
bin_plt.savefig(fn_bin_plt, dpi=600)
|
|
@@ -828,14 +828,14 @@ def write_network_detection_data_hdf5(datatype, e_matrix, response_values, base_
|
|
|
828
828
|
if config['plot_curves']:
|
|
829
829
|
plot_idx0 = hotspot_idcs[0]
|
|
830
830
|
|
|
831
|
-
plt_curve =
|
|
831
|
+
plt_curve = plot_data_bin(np.zeros(response[1].shape), e_subset[:, plot_idx0], response[0])
|
|
832
832
|
plt_curve.ylabel('response')
|
|
833
833
|
plt_curve.savefig(f'{base_path}/plot_{plot_idx0}_curve_{flag}.png', dpi=600)
|
|
834
834
|
plt_curve.close()
|
|
835
835
|
|
|
836
836
|
plot_idx1 = hotspot_idcs[1]
|
|
837
837
|
|
|
838
|
-
plt_curve1 =
|
|
838
|
+
plt_curve1 = plot_data_bin(np.zeros(response[1].shape), e_subset[:, plot_idx1], response[0])
|
|
839
839
|
plt_curve1.ylabel('response')
|
|
840
840
|
plt_curve1.xlabel('E-field $h_1')
|
|
841
841
|
plt_curve1.savefig(f'{base_path}/plot_{plot_idx1}_curve_{flag}.png', dpi=600)
|
|
@@ -862,9 +862,9 @@ def determine_scoring_idcs(e_subset, scoring_emag_thr=0, scoring_interval=22, me
|
|
|
862
862
|
method : str, default: 'optimized'
|
|
863
863
|
Specify method for selecting scoring indices.
|
|
864
864
|
|
|
865
|
-
|
|
866
|
-
|
|
867
|
-
|
|
865
|
+
* `random`: Random elements across the ROI are picked ('random'),
|
|
866
|
+
* `optimized`: Optimal elements are chosen by maximizing the distances between them. ('optimized') (Only applicable without
|
|
867
|
+
scoring emag threshold. Requires fn_geo.)
|
|
868
868
|
|
|
869
869
|
fn_geo : str, optional
|
|
870
870
|
Required to compute element distances if method='optimized'.
|
|
@@ -895,13 +895,13 @@ def determine_scoring_idcs(e_subset, scoring_emag_thr=0, scoring_interval=22, me
|
|
|
895
895
|
if method == 'optimized':
|
|
896
896
|
with h5py.File(fn_geo, 'r') as f:
|
|
897
897
|
mesh = trimesh.Trimesh(
|
|
898
|
-
|
|
899
|
-
|
|
898
|
+
vertices=f['mesh/nodes/node_coord'][:],
|
|
899
|
+
faces=f['mesh/elm/triangle_number_list'][:]
|
|
900
900
|
)
|
|
901
901
|
|
|
902
902
|
pts, scoring_idcs = trimesh.sample.sample_surface_even(
|
|
903
|
-
|
|
904
|
-
|
|
903
|
+
mesh=mesh,
|
|
904
|
+
count=num_subsample)
|
|
905
905
|
# add additional desired index
|
|
906
906
|
if required_idcs is not None:
|
|
907
907
|
scoring_idcs = np.append(scoring_idcs, required_idcs)
|
|
@@ -934,13 +934,13 @@ def compute_scores_with_single_element_clf(element_idx, efields, data, weights=N
|
|
|
934
934
|
A tuple with the index of the investigated element and an array containing the scores of this element with each
|
|
935
935
|
other array element.
|
|
936
936
|
"""
|
|
937
|
-
dim
|
|
937
|
+
dim = efields.shape[1]
|
|
938
938
|
scores = np.zeros(dim)
|
|
939
939
|
|
|
940
940
|
num_coil_samples = data.shape[0]
|
|
941
|
-
min_samples_leaf = max(int(0.05*num_coil_samples), 1)
|
|
941
|
+
min_samples_leaf = max(int(0.05 * num_coil_samples), 1)
|
|
942
942
|
|
|
943
|
-
clf = tree.DecisionTreeClassifier(max_depth=2, min_samples_leaf=min_samples_leaf)
|
|
943
|
+
clf = tree.DecisionTreeClassifier(max_depth=2, min_samples_leaf=min_samples_leaf) # ccc
|
|
944
944
|
# tried different parameter settings and adding max_leaf_nodes=3, didn't improve performance
|
|
945
945
|
|
|
946
946
|
elmts_to_iterate_over = scoring_idcs if scoring_idcs is not None else range(dim)
|
|
@@ -979,10 +979,10 @@ def compute_scores_with_single_element_regression(element_idx, efields, data, we
|
|
|
979
979
|
A tuple with the index of the investigated element and an array containing the scores of this element with each
|
|
980
980
|
other array element.
|
|
981
981
|
"""
|
|
982
|
-
dim
|
|
982
|
+
dim = efields.shape[1]
|
|
983
983
|
scores = np.zeros(dim)
|
|
984
984
|
num_coil_samples = data.shape[0]
|
|
985
|
-
min_samples_leaf = max(int(0.05*num_coil_samples), 1)
|
|
985
|
+
min_samples_leaf = max(int(0.05 * num_coil_samples), 1)
|
|
986
986
|
if VERBOSE:
|
|
987
987
|
print(f'min_samples_leaf for scoring: {min_samples_leaf}')
|
|
988
988
|
|
|
@@ -990,7 +990,6 @@ def compute_scores_with_single_element_regression(element_idx, efields, data, we
|
|
|
990
990
|
|
|
991
991
|
for i in elmts_to_iterate_over:
|
|
992
992
|
if i > element_idx:
|
|
993
|
-
|
|
994
993
|
data_var = np.var(data)
|
|
995
994
|
df = pd.DataFrame({
|
|
996
995
|
'a': pd.Series(efields[:, element_idx]),
|
|
@@ -1004,11 +1003,11 @@ def compute_scores_with_single_element_regression(element_idx, efields, data, we
|
|
|
1004
1003
|
#fit = model.fit(df['target'], x=df['a'], y=df['b'])
|
|
1005
1004
|
#scores[i] = 1 - np.var(fit.residual) / data_var
|
|
1006
1005
|
|
|
1007
|
-
def gaussian_multi(x,y,amp=1,x0=0,y0=0,sigma_x=1,sigma_y=1):
|
|
1008
|
-
return amp * np.exp(-0.5* (((x-x0) / sigma_x)**2 + ((y-y0)/sigma_y)**2))
|
|
1006
|
+
def gaussian_multi(x, y, amp=1, x0=0, y0=0, sigma_x=1, sigma_y=1):
|
|
1007
|
+
return amp * np.exp(-0.5 * (((x - x0) / sigma_x) ** 2 + ((y - y0) / sigma_y) ** 2))
|
|
1009
1008
|
|
|
1010
|
-
model = lmfit.Model(gaussian_multi, independent_vars=['x','y'])
|
|
1011
|
-
fit = model.fit(df['target'],x=df['a'], y=df['b'])
|
|
1009
|
+
model = lmfit.Model(gaussian_multi, independent_vars=['x', 'y'])
|
|
1010
|
+
fit = model.fit(df['target'], x=df['a'], y=df['b'])
|
|
1012
1011
|
|
|
1013
1012
|
scores[i] = 1 - np.var(fit.residual) / data_var
|
|
1014
1013
|
|
|
@@ -1040,16 +1039,18 @@ def compute_scores_with_all_elements(efields, data, weights=None, scoring_idcs=N
|
|
|
1040
1039
|
np.ndarray of float
|
|
1041
1040
|
(n_elms, n_elms) An upper triangle matrix containing the scores of each ROI element with all others.
|
|
1042
1041
|
"""
|
|
1043
|
-
dim
|
|
1044
|
-
scores = np.zeros((dim,dim))
|
|
1042
|
+
dim = efields.shape[1]
|
|
1043
|
+
scores = np.zeros((dim, dim))
|
|
1045
1044
|
|
|
1046
1045
|
if scoring_method == 'clf':
|
|
1047
1046
|
for j in range(dim):
|
|
1048
|
-
element_idx, scores_row =
|
|
1047
|
+
element_idx, scores_row = compute_scores_with_single_element_clf(j, efields, data[1], weights,
|
|
1048
|
+
scoring_idcs)
|
|
1049
1049
|
scores[element_idx] = scores_row
|
|
1050
1050
|
elif scoring_method == 'regression':
|
|
1051
1051
|
for j in range(dim):
|
|
1052
|
-
element_idx, scores_row =
|
|
1052
|
+
element_idx, scores_row = compute_scores_with_single_element_regression(j, efields, data[0], weights,
|
|
1053
|
+
scoring_idcs)
|
|
1053
1054
|
scores[element_idx] = scores_row
|
|
1054
1055
|
|
|
1055
1056
|
return scores
|
|
@@ -1075,28 +1076,27 @@ def compute_scores_with_all_elements_MP(efields, data, weights=None, scoring_idc
|
|
|
1075
1076
|
Select 'clf' for Decision Tree Classifier, 'regression' for multivariable regression method,
|
|
1076
1077
|
'regress_data' for one-dimensional regression method.
|
|
1077
1078
|
|
|
1078
|
-
|
|
1079
1079
|
Returns
|
|
1080
1080
|
-------
|
|
1081
1081
|
np.ndarray of float
|
|
1082
1082
|
An upper triangle matrix containing the scores of each ROI element with all others.
|
|
1083
1083
|
"""
|
|
1084
|
-
dim
|
|
1085
|
-
scores = np.zeros((dim,dim))
|
|
1084
|
+
dim = efields.shape[1]
|
|
1085
|
+
scores = np.zeros((dim, dim))
|
|
1086
1086
|
|
|
1087
1087
|
elmts_to_iterate_over = scoring_idcs if scoring_idcs is not None else range(dim)
|
|
1088
1088
|
|
|
1089
1089
|
if scoring_method == 'clf': # clf version: data[1] is used (binarized)
|
|
1090
|
-
num_processes=multiprocessing.cpu_count()
|
|
1090
|
+
num_processes = multiprocessing.cpu_count()
|
|
1091
1091
|
with multiprocessing.Pool(processes=num_processes) as pool:
|
|
1092
1092
|
# @TODO: monitor if chunksize=1 impairs performance; it was set to 1 to have a smoother tdqm-progress bar.
|
|
1093
1093
|
mp_res = pool.starmap(
|
|
1094
|
-
|
|
1095
|
-
|
|
1096
|
-
|
|
1097
|
-
|
|
1098
|
-
|
|
1099
|
-
|
|
1094
|
+
compute_scores_with_single_element_clf,
|
|
1095
|
+
tqdm(
|
|
1096
|
+
[(j, efields, data[1], weights, scoring_idcs) for j in elmts_to_iterate_over],
|
|
1097
|
+
total=len(elmts_to_iterate_over)
|
|
1098
|
+
),
|
|
1099
|
+
chunksize=1
|
|
1100
1100
|
)
|
|
1101
1101
|
pool.close()
|
|
1102
1102
|
pool.join()
|
|
@@ -1106,12 +1106,12 @@ def compute_scores_with_all_elements_MP(efields, data, weights=None, scoring_idc
|
|
|
1106
1106
|
with multiprocessing.Pool(processes=num_processes) as pool:
|
|
1107
1107
|
# @TODO: monitor if chunksize=1 impairs performance; it was set to 1 to have a smoother tdqm-progress bar.
|
|
1108
1108
|
mp_res = pool.starmap(
|
|
1109
|
-
|
|
1110
|
-
|
|
1111
|
-
|
|
1112
|
-
|
|
1113
|
-
|
|
1114
|
-
|
|
1109
|
+
compute_scores_with_single_element_regression,
|
|
1110
|
+
tqdm(
|
|
1111
|
+
[(j, efields, data[0], weights, scoring_idcs) for j in elmts_to_iterate_over],
|
|
1112
|
+
total=len(elmts_to_iterate_over)
|
|
1113
|
+
),
|
|
1114
|
+
chunksize=1
|
|
1115
1115
|
)
|
|
1116
1116
|
pool.close()
|
|
1117
1117
|
pool.join()
|
|
@@ -1170,12 +1170,12 @@ def hotspots_by_score_percentiles(scores, accumulated=False):
|
|
|
1170
1170
|
Array with dimensions equal to the number of ROI elements, where elements are marked as hotspots.
|
|
1171
1171
|
"""
|
|
1172
1172
|
# choose number of score accuracies that should contribute to the computed percentile
|
|
1173
|
-
num_desired_elements
|
|
1173
|
+
num_desired_elements = 100
|
|
1174
1174
|
# each score is associated with two elements - so if we want n-elements as hotspots,
|
|
1175
1175
|
# we must threshold n/2 scores. # n x n matrix of scores
|
|
1176
|
-
fraction_from_all_elements = (num_desired_elements/(scores.shape[0]**2))*100
|
|
1176
|
+
fraction_from_all_elements = (num_desired_elements / (scores.shape[0] ** 2)) * 100
|
|
1177
1177
|
# somehow num_desired_element doesn't have the desired effect, but threshold still works well
|
|
1178
|
-
percentile = np.percentile(scores,100-fraction_from_all_elements)
|
|
1178
|
+
percentile = np.percentile(scores, 100 - fraction_from_all_elements)
|
|
1179
1179
|
|
|
1180
1180
|
hotspot_idcs = np.array(np.where(scores >= percentile))
|
|
1181
1181
|
hotspots = np.zeros(scores.shape[0])
|
|
@@ -1183,7 +1183,7 @@ def hotspots_by_score_percentiles(scores, accumulated=False):
|
|
|
1183
1183
|
if accumulated:
|
|
1184
1184
|
np.add.at(hotspots, hotspot_idcs.flatten(), 1)
|
|
1185
1185
|
else:
|
|
1186
|
-
hotspots[
|
|
1186
|
+
hotspots[hotspot_idcs.flatten()] = 1
|
|
1187
1187
|
|
|
1188
1188
|
return hotspots, hotspot_idcs
|
|
1189
1189
|
|
|
@@ -1247,29 +1247,29 @@ def write_hotspot_scoremap_hdf5(datatype, e_subset, data, roi_surf, fn_geo, base
|
|
|
1247
1247
|
# select the relevant idcs
|
|
1248
1248
|
scoring_emag_thr = config['scoring_emag_thr']
|
|
1249
1249
|
scoring_interval = config['scoring_interval']
|
|
1250
|
-
scoring_idcs =
|
|
1251
|
-
|
|
1250
|
+
scoring_idcs = determine_scoring_idcs(e_subset, scoring_emag_thr, scoring_interval,
|
|
1251
|
+
method='optimized', fn_geo=fn_geo, required_idcs=required_idcs)
|
|
1252
1252
|
|
|
1253
1253
|
# calculate scores and potential hotspots
|
|
1254
1254
|
# SINGLE NODE METHODS: regress_data and mutual information score
|
|
1255
1255
|
if config['scoring_method'] == 'regress_data': # only for single node case
|
|
1256
1256
|
con = roi_surf.node_number_list
|
|
1257
1257
|
score_map = np.zeros(e_subset.shape[1])
|
|
1258
|
-
score_map[scoring_idcs] = pynibs.regress_data(e_matrix=e_subset,
|
|
1259
|
-
|
|
1260
|
-
|
|
1261
|
-
|
|
1262
|
-
|
|
1263
|
-
|
|
1264
|
-
|
|
1265
|
-
|
|
1266
|
-
|
|
1267
|
-
|
|
1268
|
-
|
|
1269
|
-
|
|
1270
|
-
|
|
1258
|
+
score_map[scoring_idcs] = pynibs.regression.regress_data(e_matrix=e_subset,
|
|
1259
|
+
mep=data[0],
|
|
1260
|
+
elm_idx_list=scoring_idcs,
|
|
1261
|
+
# fun='linear',
|
|
1262
|
+
n_cpu=160,
|
|
1263
|
+
con=con,
|
|
1264
|
+
n_refit=0,
|
|
1265
|
+
return_fits=False,
|
|
1266
|
+
score_type='R2',
|
|
1267
|
+
verbose=True,
|
|
1268
|
+
pool=None,
|
|
1269
|
+
refit_discontinuities=False,
|
|
1270
|
+
select_signed_data=False)
|
|
1271
1271
|
scores = score_map
|
|
1272
|
-
hotspots = score_map
|
|
1272
|
+
hotspots = score_map # only because in dual node approaches, this variable is needed
|
|
1273
1273
|
|
|
1274
1274
|
elif config['scoring_method'] == 'mi': # only for single node case
|
|
1275
1275
|
# Compute Mutual Information for each feature
|
|
@@ -1277,17 +1277,17 @@ def write_hotspot_scoremap_hdf5(datatype, e_subset, data, roi_surf, fn_geo, base
|
|
|
1277
1277
|
# subsampling
|
|
1278
1278
|
scores = np.zeros(e_subset.shape[1])
|
|
1279
1279
|
scores[scoring_idcs] = mi_scores
|
|
1280
|
-
hotspots = scores
|
|
1280
|
+
hotspots = scores # only because in dual node approaches, this variable is needed
|
|
1281
1281
|
score_map = scores
|
|
1282
1282
|
|
|
1283
1283
|
# DUAL NODE METHODS: classifier and regression based scoring
|
|
1284
|
-
else:
|
|
1285
|
-
scores =
|
|
1286
|
-
|
|
1284
|
+
else: # dual nodes expected
|
|
1285
|
+
scores = compute_scores_with_all_elements_MP(e_subset, data, scoring_idcs=scoring_idcs,
|
|
1286
|
+
scoring_method=config['scoring_method'])
|
|
1287
1287
|
|
|
1288
1288
|
score_map = np.max(scores, axis=0)
|
|
1289
1289
|
|
|
1290
|
-
hotspots =
|
|
1290
|
+
hotspots = hotspots_by_score_percentiles(scores, accumulated=True)[0]
|
|
1291
1291
|
|
|
1292
1292
|
if config['save_files']:
|
|
1293
1293
|
# save data as hdf5 _geo file (mapped)
|
|
@@ -1295,20 +1295,20 @@ def write_hotspot_scoremap_hdf5(datatype, e_subset, data, roi_surf, fn_geo, base
|
|
|
1295
1295
|
while True:
|
|
1296
1296
|
try:
|
|
1297
1297
|
pynibs.write_geo_hdf5_surf(out_fn=fn_out_roi_geo,
|
|
1298
|
-
|
|
1299
|
-
|
|
1300
|
-
|
|
1301
|
-
|
|
1298
|
+
points=roi_surf.node_coord_mid,
|
|
1299
|
+
con=roi_surf.node_number_list,
|
|
1300
|
+
replace=True,
|
|
1301
|
+
hdf5_path='/mesh')
|
|
1302
1302
|
pynibs.write_data_hdf5_surf(data=hotspots,
|
|
1303
|
-
|
|
1304
|
-
|
|
1305
|
-
|
|
1306
|
-
|
|
1303
|
+
data_names='res_hotspots',
|
|
1304
|
+
data_hdf_fn_out=fn_out_roi_hotpot_data,
|
|
1305
|
+
geo_hdf_fn=fn_out_roi_geo,
|
|
1306
|
+
replace=True)
|
|
1307
1307
|
pynibs.write_data_hdf5_surf(data=score_map,
|
|
1308
|
-
|
|
1309
|
-
|
|
1310
|
-
|
|
1311
|
-
|
|
1308
|
+
data_names='res_scoremap',
|
|
1309
|
+
data_hdf_fn_out=fn_out_roi_scoremap,
|
|
1310
|
+
geo_hdf_fn=fn_out_roi_geo,
|
|
1311
|
+
replace=True)
|
|
1312
1312
|
break
|
|
1313
1313
|
except:
|
|
1314
1314
|
print('problem writing score hdf5')
|
|
@@ -1336,7 +1336,6 @@ def compute_correlation_with_all_elements(e_subset, elm_idx):
|
|
|
1336
1336
|
corr_coeff : np.ndarray
|
|
1337
1337
|
(n_elms) Array containing the absolute Pearson correlation coefficients of elm_idx with each other element.
|
|
1338
1338
|
"""
|
|
1339
|
-
|
|
1340
1339
|
corr_coeff = np.zeros(e_subset.shape[1])
|
|
1341
1340
|
x = e_subset[:, elm_idx]
|
|
1342
1341
|
for idx in range(0, e_subset.shape[1]):
|
|
@@ -1345,7 +1344,7 @@ def compute_correlation_with_all_elements(e_subset, elm_idx):
|
|
|
1345
1344
|
pearson_res = scipy.stats.pearsonr(x, y)
|
|
1346
1345
|
corr_coeff[idx] = abs(pearson_res[0])
|
|
1347
1346
|
else:
|
|
1348
|
-
corr_coeff[idx] = (x==y)
|
|
1347
|
+
corr_coeff[idx] = (x == y)
|
|
1349
1348
|
return corr_coeff
|
|
1350
1349
|
|
|
1351
1350
|
|
|
@@ -1385,11 +1384,11 @@ def find_distinct_hotspots(scorematrix, hotspot_mask, e_subset, acc_thr, corr_th
|
|
|
1385
1384
|
found_bool = [False, False]
|
|
1386
1385
|
|
|
1387
1386
|
# first hotspot: take the max score, if multiple maximums the one with the max accuracy
|
|
1388
|
-
|
|
1387
|
+
|
|
1389
1388
|
# Find the indices of the maximum values in hotspot_mask
|
|
1390
1389
|
max_indices = np.argwhere(hotspot_mask == np.max(hotspot_mask))
|
|
1391
1390
|
# Create a list of (max_indices, accuracy) pairs
|
|
1392
|
-
hotspot_info = [(idx, np.nanmax(scorematrix[:,idx])) for idx in max_indices]
|
|
1391
|
+
hotspot_info = [(idx, np.nanmax(scorematrix[:, idx])) for idx in max_indices]
|
|
1393
1392
|
# Find the entry with the highest accuracy
|
|
1394
1393
|
max_entry = max(hotspot_info, key=lambda x: x[1])
|
|
1395
1394
|
# Extract the max_index
|
|
@@ -1399,12 +1398,12 @@ def find_distinct_hotspots(scorematrix, hotspot_mask, e_subset, acc_thr, corr_th
|
|
|
1399
1398
|
# check whether hotspot 0 meets hotspot criteria: accuracy threshold and score threshold.
|
|
1400
1399
|
# The score threshold is hardcoded because it is mostly to keep very strong single hotspot cases from running too
|
|
1401
1400
|
# long: they often have one hotspot with a very high score and every other element has score 1.
|
|
1402
|
-
found_bool[0] = bool((hotspot_scores[0]>=2 and hotspot_acc[0]>=acc_thr))
|
|
1401
|
+
found_bool[0] = bool((hotspot_scores[0] >= 2 and hotspot_acc[0] >= acc_thr))
|
|
1403
1402
|
|
|
1404
1403
|
# second hotspot: next maximum score checking hotspot and correlation criteria
|
|
1405
1404
|
if found_bool[0]:
|
|
1406
1405
|
hotspot_mask[hotspot_idcs[0]] = 0
|
|
1407
|
-
correlations =
|
|
1406
|
+
correlations = compute_correlation_with_all_elements(e_subset, hotspot_idcs[0])
|
|
1408
1407
|
|
|
1409
1408
|
while np.max(hotspot_mask) > 1 and not found_bool[1]:
|
|
1410
1409
|
# Find the indices of the maximum values in hotspot_mask
|
|
@@ -1419,13 +1418,13 @@ def find_distinct_hotspots(scorematrix, hotspot_mask, e_subset, acc_thr, corr_th
|
|
|
1419
1418
|
hot_candi_scores = hotspot_mask[hot_candi]
|
|
1420
1419
|
|
|
1421
1420
|
# check whether hotspot candidate meets hotspot criteria, if not: ignore this one and keep looking
|
|
1422
|
-
if correlations[hot_candi] < corr_thr and bool((hot_candi_scores>=2 and hot_candi_acc>=acc_thr)):
|
|
1421
|
+
if correlations[hot_candi] < corr_thr and bool((hot_candi_scores >= 2 and hot_candi_acc >= acc_thr)):
|
|
1423
1422
|
hotspot_idcs[1] = hot_candi
|
|
1424
1423
|
hotspot_scores[1] = hot_candi_scores
|
|
1425
1424
|
hotspot_acc[1] = hot_candi_acc
|
|
1426
1425
|
found_bool[1] = True
|
|
1427
1426
|
else:
|
|
1428
|
-
hotspot_mask[hot_candi] = 0
|
|
1427
|
+
hotspot_mask[hot_candi] = 0 # ignore this one
|
|
1429
1428
|
|
|
1430
1429
|
return hotspot_idcs, hotspot_scores, hotspot_acc, found_bool
|
|
1431
1430
|
|
|
@@ -1464,7 +1463,7 @@ def find_distinct_single_hotspot(hotspot_mask, acc_thr):
|
|
|
1464
1463
|
|
|
1465
1464
|
hotspot_acc[0] = hotspot_mask[hotspot_idcs[0]]
|
|
1466
1465
|
hotspot_scores[0] = hotspot_mask[hotspot_idcs[0]]
|
|
1467
|
-
found_bool[0] = bool(hotspot_acc[0]>=acc_thr)
|
|
1466
|
+
found_bool[0] = bool(hotspot_acc[0] >= acc_thr)
|
|
1468
1467
|
|
|
1469
1468
|
return hotspot_idcs, hotspot_scores, hotspot_acc, found_bool
|
|
1470
1469
|
|
|
@@ -1493,11 +1492,11 @@ def calc_dist_pairwise(fn_geo, idx0, idx1):
|
|
|
1493
1492
|
roi_tris = roi_geo_h5["mesh/elm/triangle_number_list"][:]
|
|
1494
1493
|
roi_nodes = roi_geo_h5["mesh/nodes/node_coord"][:]
|
|
1495
1494
|
|
|
1496
|
-
distances = pynibs.geodesic_dist(
|
|
1497
|
-
|
|
1498
|
-
|
|
1499
|
-
|
|
1500
|
-
|
|
1495
|
+
distances = pynibs.util.quality_measures.geodesic_dist(
|
|
1496
|
+
tris=roi_tris,
|
|
1497
|
+
nodes=roi_nodes,
|
|
1498
|
+
source=idx0,
|
|
1499
|
+
source_is_node=False
|
|
1501
1500
|
)[1]
|
|
1502
1501
|
distance = distances[idx1]
|
|
1503
1502
|
|
|
@@ -1505,11 +1504,11 @@ def calc_dist_pairwise(fn_geo, idx0, idx1):
|
|
|
1505
1504
|
print(f'****** Attention! ****** \n'
|
|
1506
1505
|
f'Distance between {idx0} and {idx1} could not be measured geodesically, euclidean distance is used. \n'
|
|
1507
1506
|
f'******************')
|
|
1508
|
-
distances = pynibs.euclidean_dist(
|
|
1509
|
-
|
|
1510
|
-
|
|
1511
|
-
|
|
1512
|
-
|
|
1507
|
+
distances = pynibs.util.quality_measures.euclidean_dist(
|
|
1508
|
+
tris=roi_tris,
|
|
1509
|
+
nodes=roi_nodes,
|
|
1510
|
+
source=idx0,
|
|
1511
|
+
source_is_node=False
|
|
1513
1512
|
)[1]
|
|
1514
1513
|
distance = distances[idx1]
|
|
1515
1514
|
|
|
@@ -1538,8 +1537,8 @@ def assign_found_hotspot_single(fn_geo, hotspot_idcs, found_idx):
|
|
|
1538
1537
|
print('Hotspot is assigned.')
|
|
1539
1538
|
|
|
1540
1539
|
# a and b real hotspots, c found hotspot
|
|
1541
|
-
dist_ac =
|
|
1542
|
-
dist_bc =
|
|
1540
|
+
dist_ac = calc_dist_pairwise(fn_geo, hotspot_idcs[0], found_idx)
|
|
1541
|
+
dist_bc = calc_dist_pairwise(fn_geo, hotspot_idcs[1], found_idx)
|
|
1543
1542
|
|
|
1544
1543
|
if dist_ac <= dist_bc:
|
|
1545
1544
|
# then a gets assigned c
|
|
@@ -1571,10 +1570,10 @@ def assign_found_hotspots(fn_geo, hotspot_idcs, found_idcs):
|
|
|
1571
1570
|
print('Hotspots are assigned.')
|
|
1572
1571
|
|
|
1573
1572
|
# a and b real hotspots, c and d found hotspots
|
|
1574
|
-
dist_ac =
|
|
1575
|
-
dist_bc =
|
|
1576
|
-
dist_ad =
|
|
1577
|
-
dist_bd =
|
|
1573
|
+
dist_ac = calc_dist_pairwise(fn_geo, hotspot_idcs[0], found_idcs[0])
|
|
1574
|
+
dist_bc = calc_dist_pairwise(fn_geo, hotspot_idcs[1], found_idcs[0])
|
|
1575
|
+
dist_ad = calc_dist_pairwise(fn_geo, hotspot_idcs[0], found_idcs[1])
|
|
1576
|
+
dist_bd = calc_dist_pairwise(fn_geo, hotspot_idcs[1], found_idcs[1])
|
|
1578
1577
|
|
|
1579
1578
|
if dist_ac + dist_bd <= dist_bc + dist_ad:
|
|
1580
1579
|
# then a gets assigned c and b gets assigned d
|
|
@@ -1610,21 +1609,21 @@ def get_quadrant_samples(e_field0, e_field1):
|
|
|
1610
1609
|
|
|
1611
1610
|
# First get portion of distances horizontally and vertically to shift the four values slightly into the middle
|
|
1612
1611
|
# (offset). No offset would mean the reference points are at the corners of the rectangle spanned by the data.
|
|
1613
|
-
h = (max(e_field0) - min(e_field0))/24
|
|
1614
|
-
v = (max(e_field1) - min(e_field1))/24
|
|
1612
|
+
h = (max(e_field0) - min(e_field0)) / 24
|
|
1613
|
+
v = (max(e_field1) - min(e_field1)) / 24
|
|
1615
1614
|
|
|
1616
1615
|
# 1 (bottom left)
|
|
1617
|
-
x = np.array(min(e_field0)+h)
|
|
1618
|
-
y = np.array(min(e_field1)+v)
|
|
1616
|
+
x = np.array(min(e_field0) + h)
|
|
1617
|
+
y = np.array(min(e_field1) + v)
|
|
1619
1618
|
# 2 (top left)
|
|
1620
|
-
x = np.append(x, min(e_field0)+h)
|
|
1621
|
-
y = np.append(y, max(e_field1)-v)
|
|
1619
|
+
x = np.append(x, min(e_field0) + h)
|
|
1620
|
+
y = np.append(y, max(e_field1) - v)
|
|
1622
1621
|
# 3 (bottom right)
|
|
1623
|
-
x = np.append(x, max(e_field0)-h)
|
|
1624
|
-
y = np.append(y, min(e_field1)+v)
|
|
1622
|
+
x = np.append(x, max(e_field0) - h)
|
|
1623
|
+
y = np.append(y, min(e_field1) + v)
|
|
1625
1624
|
# 4 (top right)
|
|
1626
|
-
x = np.append(x, max(e_field0)-h)
|
|
1627
|
-
y = np.append(y, max(e_field1)-v)
|
|
1625
|
+
x = np.append(x, max(e_field0) - h)
|
|
1626
|
+
y = np.append(y, max(e_field1) - v)
|
|
1628
1627
|
|
|
1629
1628
|
return np.vstack((x, y)).transpose()
|
|
1630
1629
|
|
|
@@ -1652,7 +1651,7 @@ def identify_shape(idx, scorematrix, e_subset, response):
|
|
|
1652
1651
|
Vector indicating shape of decision trees used in scoring the element in question (network type vector).
|
|
1653
1652
|
"""
|
|
1654
1653
|
num_coil_samples = int(response[0].shape[0])
|
|
1655
|
-
min_samples_leaf = max(int(0.05*num_coil_samples), 1)
|
|
1654
|
+
min_samples_leaf = max(int(0.05 * num_coil_samples), 1)
|
|
1656
1655
|
if VERBOSE:
|
|
1657
1656
|
print(f'min_samples_leaf for identify_shape: {min_samples_leaf}')
|
|
1658
1657
|
|
|
@@ -1664,7 +1663,7 @@ def identify_shape(idx, scorematrix, e_subset, response):
|
|
|
1664
1663
|
# get indices of those elements that 'gave' element 'idx' its hotspot score (meaning: with these elements a score
|
|
1665
1664
|
# higher than a calculated threshold was achieved)
|
|
1666
1665
|
# because those are the elements the network identification is based on
|
|
1667
|
-
hotspot_idcs =
|
|
1666
|
+
hotspot_idcs = hotspots_by_score_percentiles(scorematrix, accumulated=False)[1]
|
|
1668
1667
|
hotspot_pairs = hotspot_idcs.transpose()
|
|
1669
1668
|
for i in np.arange(0, hotspot_pairs.shape[0], 1):
|
|
1670
1669
|
if hotspot_pairs[i, 0] == idx:
|
|
@@ -1677,9 +1676,9 @@ def identify_shape(idx, scorematrix, e_subset, response):
|
|
|
1677
1676
|
iterate_idcs = np.argpartition(scorematrix[idx], -500)[-500:]
|
|
1678
1677
|
print(f'Because of a exceptionally many elements element {idx} has a high accuracy with, only the 500 ones with'
|
|
1679
1678
|
' highest accuracy were used for network type computation.')
|
|
1680
|
-
shape[0]
|
|
1679
|
+
shape[0] = -500
|
|
1681
1680
|
|
|
1682
|
-
elmts_to_iterate_over = iterate_idcs[iterate_idcs!=idx]
|
|
1681
|
+
elmts_to_iterate_over = iterate_idcs[iterate_idcs != idx]
|
|
1683
1682
|
|
|
1684
1683
|
clf = tree.DecisionTreeClassifier(max_depth=2, min_samples_leaf=min_samples_leaf) # ccc
|
|
1685
1684
|
# tried different parameter settings and adding max_leaf_nodes=3, didn't improve performance
|
|
@@ -1699,10 +1698,10 @@ def identify_shape(idx, scorematrix, e_subset, response):
|
|
|
1699
1698
|
# shape parameter has 9 entries, ignore shape[0], shape[1:8] represent the 8 network types
|
|
1700
1699
|
q_samples = get_quadrant_samples(e_field0, e_field1)
|
|
1701
1700
|
pred = clf.predict(q_samples)
|
|
1702
|
-
network_nr = 1 + pred[3] + 2*pred[2] + 4*pred[1]
|
|
1701
|
+
network_nr = 1 + pred[3] + 2 * pred[2] + 4 * pred[1]
|
|
1703
1702
|
# shape[1] += pred[0] - worth a try
|
|
1704
1703
|
# could use pred[0] for multiple things, like help distinguish shape 5 and 6
|
|
1705
|
-
shape[int(network_nr)] +=1
|
|
1704
|
+
shape[int(network_nr)] += 1
|
|
1706
1705
|
|
|
1707
1706
|
if VERBOSE:
|
|
1708
1707
|
print(shape)
|
|
@@ -1735,8 +1734,8 @@ def identify_network_type(found_idcs, scorematrix, e_subset, response):
|
|
|
1735
1734
|
value of the second most probable network)
|
|
1736
1735
|
"""
|
|
1737
1736
|
# calculate network type vectors
|
|
1738
|
-
shape_0 =
|
|
1739
|
-
shape_1 =
|
|
1737
|
+
shape_0 = identify_shape(found_idcs[0], scorematrix, e_subset, response)
|
|
1738
|
+
shape_1 = identify_shape(found_idcs[1], scorematrix, e_subset, response)
|
|
1740
1739
|
|
|
1741
1740
|
# axes are wrong for hotspot 1 (since it is on the y axis in the quadrant logic that the shape parameter is based
|
|
1742
1741
|
# on), switch the asymmetric types: type 4<->6 and type 3<->5
|
|
@@ -1830,12 +1829,12 @@ def whole_network_detection(e_subset, response_data, scorematrix, hotspot_mask,
|
|
|
1830
1829
|
network_type_certainty = np.nan
|
|
1831
1830
|
shape_vector = np.full(9, np.nan)
|
|
1832
1831
|
found_distance = [np.nan, np.nan]
|
|
1833
|
-
if config['scoring_method'] == 'regress_data' or config['scoring_method'] == 'mi':
|
|
1832
|
+
if config['scoring_method'] == 'regress_data' or config['scoring_method'] == 'mi': # single node approaches
|
|
1834
1833
|
found_idcs, found_scores, found_acc, found_bool = \
|
|
1835
|
-
|
|
1834
|
+
find_distinct_single_hotspot(hotspot_mask, acc_thr)
|
|
1836
1835
|
else: # dual node approach
|
|
1837
1836
|
found_idcs, found_scores, found_acc, found_bool = \
|
|
1838
|
-
|
|
1837
|
+
find_distinct_hotspots(scorematrix, hotspot_mask, e_subset, acc_thr, corr_thr)
|
|
1839
1838
|
|
|
1840
1839
|
# 1 hotspot found
|
|
1841
1840
|
if found_bool[0] and not found_bool[1]:
|
|
@@ -1847,7 +1846,7 @@ def whole_network_detection(e_subset, response_data, scorematrix, hotspot_mask,
|
|
|
1847
1846
|
elif found_bool[0] and found_bool[1]:
|
|
1848
1847
|
print('Two potential hotspots were detected.')
|
|
1849
1848
|
(found_network_type, shape_vector, network_type_certainty) = \
|
|
1850
|
-
|
|
1849
|
+
identify_network_type(found_idcs, scorematrix, e_subset, response_data)
|
|
1851
1850
|
print(f'Identified network type: ({found_network_type}) for hotspots {found_idcs}')
|
|
1852
1851
|
|
|
1853
1852
|
else:
|
|
@@ -1860,13 +1859,13 @@ def whole_network_detection(e_subset, response_data, scorematrix, hotspot_mask,
|
|
|
1860
1859
|
# plot response data on plain spanned by both hotspot efields
|
|
1861
1860
|
if found_bool[0] and found_bool[1]:
|
|
1862
1861
|
if config['plot_std']:
|
|
1863
|
-
std_plt =
|
|
1862
|
+
std_plt = plot_data_std(response_data[0], e_subset[:, found_idcs[0]], e_subset[:, found_idcs[1]])
|
|
1864
1863
|
fn_std_plt = os.path.join(base_path,
|
|
1865
1864
|
f'plot_std_found_hotspots_{found_idcs[0]}_{found_idcs[1]}.png') # nnn
|
|
1866
1865
|
std_plt.savefig(fn_std_plt, dpi=600)
|
|
1867
1866
|
std_plt.close()
|
|
1868
1867
|
if config['plot_bin']:
|
|
1869
|
-
bin_plt =
|
|
1868
|
+
bin_plt = plot_data_bin(response_data[1], e_subset[:, found_idcs[0]], e_subset[:, found_idcs[1]])
|
|
1870
1869
|
fn_bin_plt = os.path.join(base_path,
|
|
1871
1870
|
f'plot_bin_found_hotspots_{found_idcs[0]}_{found_idcs[1]}.png') # nnn
|
|
1872
1871
|
bin_plt.savefig(fn_bin_plt, dpi=600)
|
|
@@ -1874,13 +1873,14 @@ def whole_network_detection(e_subset, response_data, scorematrix, hotspot_mask,
|
|
|
1874
1873
|
if config['plot_curves']:
|
|
1875
1874
|
if found_bool[0]:
|
|
1876
1875
|
plot_idx0 = found_idcs[0]
|
|
1877
|
-
plt_curve =
|
|
1876
|
+
plt_curve = plot_data_bin(np.zeros(response_data[1].shape), e_subset[:, plot_idx0], response_data[0])
|
|
1878
1877
|
plt_curve.ylabel('response')
|
|
1879
1878
|
plt_curve.savefig(f'{base_path}/plot_found_hotspot_{plot_idx0}_curve.png', dpi=600)
|
|
1880
1879
|
plt_curve.close()
|
|
1881
1880
|
if found_bool[1]:
|
|
1882
1881
|
plot_idx1 = found_idcs[1]
|
|
1883
|
-
plt_curve1 =
|
|
1882
|
+
plt_curve1 = plot_data_bin(np.zeros(response_data[1].shape), e_subset[:, plot_idx1],
|
|
1883
|
+
response_data[0])
|
|
1884
1884
|
plt_curve1.ylabel('response')
|
|
1885
1885
|
plt_curve1.xlabel('E-field $h_1')
|
|
1886
1886
|
plt_curve1.savefig(f'{base_path}/plot_found_hotspot_{plot_idx1}_curve.png', dpi=600)
|
|
@@ -1924,7 +1924,7 @@ def write_nda_test_results_csv(runtimes, e_subset, response_data, hotspot_mask,
|
|
|
1924
1924
|
# (1) read config params and results
|
|
1925
1925
|
hotspot_idcs = (config['hotspot_elm0'], config['hotspot_elm1'])
|
|
1926
1926
|
found_network_type, found_idcs, found_acc, found_distance, found_scores, \
|
|
1927
|
-
network_type_certainty, shape_vector
|
|
1927
|
+
network_type_certainty, shape_vector = detection_result
|
|
1928
1928
|
|
|
1929
1929
|
# (2) hotspot assignment to enable network identification evaluation
|
|
1930
1930
|
|
|
@@ -1946,7 +1946,7 @@ def write_nda_test_results_csv(runtimes, e_subset, response_data, hotspot_mask,
|
|
|
1946
1946
|
|
|
1947
1947
|
# 2 hotspots found: assign hotspots so that total distances are minimized for more precise evaluation
|
|
1948
1948
|
elif found_network_type != 1:
|
|
1949
|
-
assignment =
|
|
1949
|
+
assignment = assign_found_hotspots(fn_geo, hotspot_idcs, found_idcs)
|
|
1950
1950
|
found_distance = [np.round(assignment[0], 2), np.round(assignment[1], 2)]
|
|
1951
1951
|
# switch hotspot order if reassignment necessary
|
|
1952
1952
|
if found_idcs[1] == assignment[2][0]:
|
|
@@ -1967,33 +1967,35 @@ def write_nda_test_results_csv(runtimes, e_subset, response_data, hotspot_mask,
|
|
|
1967
1967
|
|
|
1968
1968
|
# (3) evaluation
|
|
1969
1969
|
# collect additional info for result evaluation
|
|
1970
|
-
real_hotspot_dist = np.round(
|
|
1971
|
-
real_hotspot_corr = np.round(
|
|
1970
|
+
real_hotspot_dist = np.round(calc_dist_pairwise(fn_geo, hotspot_idcs[0], hotspot_idcs[1]), 2)
|
|
1971
|
+
real_hotspot_corr = np.round(
|
|
1972
|
+
compute_correlation_with_all_elements(e_subset, hotspot_idcs[0])[hotspot_idcs[1]], 2)
|
|
1972
1973
|
hotspot_0_emax = np.round(np.max(e_subset[:, hotspot_idcs[0]]), 3)
|
|
1973
1974
|
hotspot_1_emax = np.round(np.max(e_subset[:, hotspot_idcs[1]]), 3)
|
|
1974
1975
|
num_hotspot_candidates = np.count_nonzero(hotspot_mask)
|
|
1975
1976
|
found_accuracy = (np.round(found_acc[0], 3), np.round(found_acc[1], 3))
|
|
1976
1977
|
if found_network_type in [4, 6, 1]:
|
|
1977
|
-
found_hotspots_dist, found_hotspots_corr = np.nan,np.nan
|
|
1978
|
+
found_hotspots_dist, found_hotspots_corr = np.nan, np.nan
|
|
1978
1979
|
else: # compute found hotspot distance and correlation for dual hotspot types
|
|
1979
|
-
found_hotspots_dist = np.round(
|
|
1980
|
+
found_hotspots_dist = np.round(calc_dist_pairwise(fn_geo, found_idcs[0], found_idcs[1]), 2)
|
|
1980
1981
|
found_hotspots_corr = np.round(
|
|
1981
|
-
|
|
1982
|
+
compute_correlation_with_all_elements(e_subset, found_idcs[0])[found_idcs[1]], 2)
|
|
1982
1983
|
|
|
1983
1984
|
# evaluate network type identification: 1 means the network was correctly identified, 0 otherwise
|
|
1984
1985
|
network_types = ['NO', 'AND', '1_INH_0', 'SH_0', '0_INH_1', 'SH_1', 'XOR', 'OR']
|
|
1985
|
-
real_network_type = network_types.index(config['network_type'])+1
|
|
1986
|
-
identification_evaluation =
|
|
1986
|
+
real_network_type = network_types.index(config['network_type']) + 1
|
|
1987
|
+
identification_evaluation = evaluate_network_identification(real_hotspot_dist, real_network_type,
|
|
1988
|
+
found_network_type)
|
|
1987
1989
|
|
|
1988
1990
|
# evaluate hotspot localization: 1 means active hotspots were localized within 10mm, 0 otherwise
|
|
1989
1991
|
if real_network_type == 1:
|
|
1990
|
-
localization_evaluation=identification_evaluation
|
|
1992
|
+
localization_evaluation = identification_evaluation
|
|
1991
1993
|
elif real_network_type == 4:
|
|
1992
1994
|
localization_evaluation = int(found_distance[0] < 10)
|
|
1993
1995
|
elif real_network_type == 6:
|
|
1994
1996
|
localization_evaluation = int(found_distance[1] < 10)
|
|
1995
1997
|
else:
|
|
1996
|
-
localization_evaluation = int((found_distance[0] < 10) and (found_distance[1]<10))
|
|
1998
|
+
localization_evaluation = int((found_distance[0] < 10) and (found_distance[1] < 10))
|
|
1997
1999
|
|
|
1998
2000
|
# save information about the response
|
|
1999
2001
|
response_max = np.round(np.max(response_data[0]), 3)
|
|
@@ -2018,8 +2020,8 @@ def write_nda_test_results_csv(runtimes, e_subset, response_data, hotspot_mask,
|
|
|
2018
2020
|
np.round(found_scores[0], 2), np.round(found_scores[1], 2),
|
|
2019
2021
|
found_accuracy[0], found_accuracy[1],
|
|
2020
2022
|
found_hotspots_corr, found_hotspots_dist,
|
|
2021
|
-
|
|
2022
|
-
output_csv=np.append(output_csv, shape_vector)
|
|
2023
|
+
found_network_type, np.round(network_type_certainty, 2)])
|
|
2024
|
+
output_csv = np.append(output_csv, shape_vector)
|
|
2023
2025
|
# evaluation results (only in case of artificial data / testing reasons)
|
|
2024
2026
|
output_csv = np.append(output_csv,
|
|
2025
2027
|
[identification_evaluation, localization_evaluation,
|
|
@@ -2051,7 +2053,8 @@ def write_nda_test_results_csv(runtimes, e_subset, response_data, hotspot_mask,
|
|
|
2051
2053
|
print(f'Saved results and evaluation in {fn_results}.csv \n **** \n ')
|
|
2052
2054
|
|
|
2053
2055
|
|
|
2054
|
-
def write_nda_application_results_csv(runtimes, e_subset, response_data, hotspot_mask, detection_result, fn_geo,
|
|
2056
|
+
def write_nda_application_results_csv(runtimes, e_subset, response_data, hotspot_mask, detection_result, fn_geo,
|
|
2057
|
+
config):
|
|
2055
2058
|
"""
|
|
2056
2059
|
Writes network detection results to a CSV file based on the provided parameters and configuration for real data.
|
|
2057
2060
|
See output_documentation.md for more.
|
|
@@ -2083,13 +2086,13 @@ def write_nda_application_results_csv(runtimes, e_subset, response_data, hotspot
|
|
|
2083
2086
|
found_accuracy = (np.round(found_acc[0], 3), np.round(found_acc[1], 3))
|
|
2084
2087
|
# translate real network type id name into network name
|
|
2085
2088
|
network_types = ['NO', 'AND', '1_INH_0', 'SH_0', '0_INH_1', 'SH_1', 'XOR', 'OR']
|
|
2086
|
-
found_network_name = network_types[found_network_type-1]
|
|
2089
|
+
found_network_name = network_types[found_network_type - 1]
|
|
2087
2090
|
if found_network_type in [4, 6, 1]:
|
|
2088
2091
|
found_hotspots_dist, found_hotspots_corr = np.nan, np.nan
|
|
2089
2092
|
else: # compute found hotspot distance and correlation for dual hotspot types
|
|
2090
|
-
found_hotspots_dist = np.round(
|
|
2093
|
+
found_hotspots_dist = np.round(calc_dist_pairwise(fn_geo, found_idcs[0], found_idcs[1]), 2)
|
|
2091
2094
|
found_hotspots_corr = np.round(
|
|
2092
|
-
|
|
2095
|
+
compute_correlation_with_all_elements(e_subset, found_idcs[0])[found_idcs[1]], 2)
|
|
2093
2096
|
|
|
2094
2097
|
# read relevant parameters from config file
|
|
2095
2098
|
values = config.values()
|
|
@@ -2124,7 +2127,8 @@ def write_nda_application_results_csv(runtimes, e_subset, response_data, hotspot
|
|
|
2124
2127
|
fn_results = config['fn_results']
|
|
2125
2128
|
try:
|
|
2126
2129
|
# open the evaluation csv file in write mode
|
|
2127
|
-
with open(f'/data/pt_01756/studies/network_mapping/evaluation_files_realdata/{fn_results}.csv', 'a',
|
|
2130
|
+
with open(f'/data/pt_01756/studies/network_mapping/evaluation_files_realdata/{fn_results}.csv', 'a',
|
|
2131
|
+
newline='',
|
|
2128
2132
|
encoding='UTF8') as f:
|
|
2129
2133
|
# create the csv writer
|
|
2130
2134
|
writer = csv.writer(f)
|
|
@@ -2165,13 +2169,15 @@ def network_detection_algorithm_testing(e_matrix, roi_surf, fn_geo, base_path, c
|
|
|
2165
2169
|
runtimes = np.zeros(shape=3)
|
|
2166
2170
|
# apply network detection algorithm (steps 1-3) to artificially generated data and evaluate results:
|
|
2167
2171
|
# (0) generate and (1) binarize response data
|
|
2168
|
-
e_subset, response =
|
|
2172
|
+
e_subset, response = write_network_detection_data_hdf5('artificial', e_matrix, None, base_path, config)
|
|
2169
2173
|
# (2) scoring
|
|
2170
|
-
runtimes[1], scorematrix, hotspot_mask =
|
|
2174
|
+
runtimes[1], scorematrix, hotspot_mask = write_hotspot_scoremap_hdf5('artificial', e_subset, response,
|
|
2175
|
+
roi_surf, fn_geo, base_path, config)
|
|
2171
2176
|
# (3) detection (localization and identification)
|
|
2172
|
-
runtimes[2], detection_result =
|
|
2177
|
+
runtimes[2], detection_result = whole_network_detection(e_subset, response, scorematrix, hotspot_mask,
|
|
2178
|
+
base_path, config)
|
|
2173
2179
|
# automatically evaluate results
|
|
2174
|
-
|
|
2180
|
+
write_nda_test_results_csv(runtimes, e_subset, response, hotspot_mask, detection_result, fn_geo, config)
|
|
2175
2181
|
|
|
2176
2182
|
|
|
2177
2183
|
def network_detection_algorithm_application(e_matrix, response_values, roi_surf, fn_geo, base_path, config):
|
|
@@ -2201,16 +2207,20 @@ def network_detection_algorithm_application(e_matrix, response_values, roi_surf,
|
|
|
2201
2207
|
|
|
2202
2208
|
# apply network detection algorithm
|
|
2203
2209
|
# (1) binarization:
|
|
2204
|
-
e_subset, response
|
|
2210
|
+
e_subset, response = write_network_detection_data_hdf5('real', e_matrix, response_values, base_path, config)
|
|
2205
2211
|
# (2) score calculation
|
|
2206
|
-
runtimes[1], scorematrix, hotspot_mask =
|
|
2212
|
+
runtimes[1], scorematrix, hotspot_mask = write_hotspot_scoremap_hdf5('real', e_subset, response, roi_surf,
|
|
2213
|
+
fn_geo, base_path, config)
|
|
2207
2214
|
# (3) detection
|
|
2208
|
-
runtimes[2], detection_result =
|
|
2215
|
+
runtimes[2], detection_result = whole_network_detection(e_subset, response, scorematrix, hotspot_mask,
|
|
2216
|
+
base_path, config)
|
|
2209
2217
|
|
|
2210
2218
|
# save results in .csv (and write effect map if wanted)
|
|
2211
|
-
|
|
2219
|
+
write_nda_application_results_csv(runtimes, e_subset, response, hotspot_mask, detection_result, fn_geo,
|
|
2220
|
+
config)
|
|
2212
2221
|
if config['write_effect_map']:
|
|
2213
|
-
|
|
2222
|
+
write_effect_map_hdf5('real', e_matrix, roi_surf, detection_result, base_path, config)
|
|
2223
|
+
|
|
2214
2224
|
|
|
2215
2225
|
#
|
|
2216
2226
|
# ARCHIVE
|
|
@@ -2372,4 +2382,4 @@ def compute_scores_and_shape_with_single_element(element_idx, efields, data, wei
|
|
|
2372
2382
|
|
|
2373
2383
|
|
|
2374
2384
|
|
|
2375
|
-
'''
|
|
2385
|
+
'''
|