pyNIBS 0.2024.8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (107) hide show
  1. pyNIBS-0.2024.8.dist-info/LICENSE +623 -0
  2. pyNIBS-0.2024.8.dist-info/METADATA +723 -0
  3. pyNIBS-0.2024.8.dist-info/RECORD +107 -0
  4. pyNIBS-0.2024.8.dist-info/WHEEL +5 -0
  5. pyNIBS-0.2024.8.dist-info/top_level.txt +1 -0
  6. pynibs/__init__.py +34 -0
  7. pynibs/coil.py +1367 -0
  8. pynibs/congruence/__init__.py +15 -0
  9. pynibs/congruence/congruence.py +1108 -0
  10. pynibs/congruence/ext_metrics.py +257 -0
  11. pynibs/congruence/stimulation_threshold.py +318 -0
  12. pynibs/data/configuration_exp0.yaml +59 -0
  13. pynibs/data/configuration_linear_MEP.yaml +61 -0
  14. pynibs/data/configuration_linear_RT.yaml +61 -0
  15. pynibs/data/configuration_sigmoid4.yaml +68 -0
  16. pynibs/data/network mapping configuration/configuration guide.md +238 -0
  17. pynibs/data/network mapping configuration/configuration_TEMPLATE.yaml +42 -0
  18. pynibs/data/network mapping configuration/configuration_for_testing.yaml +43 -0
  19. pynibs/data/network mapping configuration/configuration_modelTMS.yaml +43 -0
  20. pynibs/data/network mapping configuration/configuration_reg_isi_05.yaml +43 -0
  21. pynibs/data/network mapping configuration/output_documentation.md +185 -0
  22. pynibs/data/network mapping configuration/recommendations_for_accuracy_threshold.md +77 -0
  23. pynibs/data/neuron/models/L23_PC_cADpyr_biphasic_v1.csv +1281 -0
  24. pynibs/data/neuron/models/L23_PC_cADpyr_monophasic_v1.csv +1281 -0
  25. pynibs/data/neuron/models/L4_LBC_biphasic_v1.csv +1281 -0
  26. pynibs/data/neuron/models/L4_LBC_monophasic_v1.csv +1281 -0
  27. pynibs/data/neuron/models/L4_NBC_biphasic_v1.csv +1281 -0
  28. pynibs/data/neuron/models/L4_NBC_monophasic_v1.csv +1281 -0
  29. pynibs/data/neuron/models/L4_SBC_biphasic_v1.csv +1281 -0
  30. pynibs/data/neuron/models/L4_SBC_monophasic_v1.csv +1281 -0
  31. pynibs/data/neuron/models/L5_TTPC2_cADpyr_biphasic_v1.csv +1281 -0
  32. pynibs/data/neuron/models/L5_TTPC2_cADpyr_monophasic_v1.csv +1281 -0
  33. pynibs/expio/Mep.py +1518 -0
  34. pynibs/expio/__init__.py +8 -0
  35. pynibs/expio/brainsight.py +979 -0
  36. pynibs/expio/brainvis.py +71 -0
  37. pynibs/expio/cobot.py +239 -0
  38. pynibs/expio/exp.py +1876 -0
  39. pynibs/expio/fit_funs.py +287 -0
  40. pynibs/expio/localite.py +1987 -0
  41. pynibs/expio/signal_ced.py +51 -0
  42. pynibs/expio/visor.py +624 -0
  43. pynibs/freesurfer.py +502 -0
  44. pynibs/hdf5_io/__init__.py +10 -0
  45. pynibs/hdf5_io/hdf5_io.py +1857 -0
  46. pynibs/hdf5_io/xdmf.py +1542 -0
  47. pynibs/mesh/__init__.py +3 -0
  48. pynibs/mesh/mesh_struct.py +1394 -0
  49. pynibs/mesh/transformations.py +866 -0
  50. pynibs/mesh/utils.py +1103 -0
  51. pynibs/models/_TMS.py +211 -0
  52. pynibs/models/__init__.py +0 -0
  53. pynibs/muap.py +392 -0
  54. pynibs/neuron/__init__.py +2 -0
  55. pynibs/neuron/neuron_regression.py +284 -0
  56. pynibs/neuron/util.py +58 -0
  57. pynibs/optimization/__init__.py +5 -0
  58. pynibs/optimization/multichannel.py +278 -0
  59. pynibs/optimization/opt_mep.py +152 -0
  60. pynibs/optimization/optimization.py +1445 -0
  61. pynibs/optimization/workhorses.py +698 -0
  62. pynibs/pckg/__init__.py +0 -0
  63. pynibs/pckg/biosig/biosig4c++-1.9.5.src_fixed.tar.gz +0 -0
  64. pynibs/pckg/libeep/__init__.py +0 -0
  65. pynibs/pckg/libeep/pyeep.so +0 -0
  66. pynibs/regression/__init__.py +11 -0
  67. pynibs/regression/dual_node_detection.py +2375 -0
  68. pynibs/regression/regression.py +2984 -0
  69. pynibs/regression/score_types.py +0 -0
  70. pynibs/roi/__init__.py +2 -0
  71. pynibs/roi/roi.py +895 -0
  72. pynibs/roi/roi_structs.py +1233 -0
  73. pynibs/subject.py +1009 -0
  74. pynibs/tensor_scaling.py +144 -0
  75. pynibs/tests/data/InstrumentMarker20200225163611937.xml +19 -0
  76. pynibs/tests/data/TriggerMarkers_Coil0_20200225163443682.xml +14 -0
  77. pynibs/tests/data/TriggerMarkers_Coil1_20200225170337572.xml +6373 -0
  78. pynibs/tests/data/Xdmf.dtd +89 -0
  79. pynibs/tests/data/brainsight_niiImage_nifticoord.txt +145 -0
  80. pynibs/tests/data/brainsight_niiImage_nifticoord_largefile.txt +1434 -0
  81. pynibs/tests/data/brainsight_niiImage_niifticoord_mixedtargets.txt +47 -0
  82. pynibs/tests/data/create_subject_testsub.py +332 -0
  83. pynibs/tests/data/data.hdf5 +0 -0
  84. pynibs/tests/data/geo.hdf5 +0 -0
  85. pynibs/tests/test_coil.py +474 -0
  86. pynibs/tests/test_elements2nodes.py +100 -0
  87. pynibs/tests/test_hdf5_io/test_xdmf.py +61 -0
  88. pynibs/tests/test_mesh_transformations.py +123 -0
  89. pynibs/tests/test_mesh_utils.py +143 -0
  90. pynibs/tests/test_nnav_imports.py +101 -0
  91. pynibs/tests/test_quality_measures.py +117 -0
  92. pynibs/tests/test_regressdata.py +289 -0
  93. pynibs/tests/test_roi.py +17 -0
  94. pynibs/tests/test_rotations.py +86 -0
  95. pynibs/tests/test_subject.py +71 -0
  96. pynibs/tests/test_util.py +24 -0
  97. pynibs/tms_pulse.py +34 -0
  98. pynibs/util/__init__.py +4 -0
  99. pynibs/util/dosing.py +233 -0
  100. pynibs/util/quality_measures.py +562 -0
  101. pynibs/util/rotations.py +340 -0
  102. pynibs/util/simnibs.py +763 -0
  103. pynibs/util/util.py +727 -0
  104. pynibs/visualization/__init__.py +2 -0
  105. pynibs/visualization/para.py +4372 -0
  106. pynibs/visualization/plot_2D.py +137 -0
  107. pynibs/visualization/render_3D.py +347 -0
@@ -0,0 +1,1108 @@
1
+ import os
2
+ import h5py
3
+ import numpy as np
4
+ import yaml
5
+ import pynibs
6
+
7
+
8
+ def cf_curveshift_workhorse_stretch_correction(elm_idx_list, mep, mep_params, e, n_samples=100):
9
+ """
10
+ Worker function for congruence factor computation - call from multiprocessing.pool
11
+ Calculates congruence factor for ``e = (E_mag, E_norm and/or E_tan)`` for given zaps and elements.
12
+ The computations are parallelized in terms of element indices (``elm_idx_list``).
13
+ ``n_samples`` are taken from fitted_mep, within the range of the :py:class:`~pynibs.expio.Mep`.
14
+
15
+ Parameters
16
+ ----------
17
+ elm_idx_list : np.ndarray
18
+ (chunksize) List of element indices, the congruence factor is computed for
19
+ mep : list of :py:class:`~pynibs.expio.Mep`
20
+ (n_cond) List of fitted Mep object instances for all conditions.
21
+ mep_params : np.ndarray of float
22
+ (n_mep_params_total) List of all mep parameters of curve fits used to calculate the MEP,
23
+ accumulated into one array.
24
+
25
+ * e.g. [``mep_#1_para_#1``, ``mep_#1_para_#2``, ``mep_#1_para_#3``,
26
+ ``mep_#2_para_#1``, ``mep_#2_para_#1``, ...]
27
+
28
+ e : list of list of np.ndarray of float
29
+ [n_cond][n_datasets][n_elm] Tuple of n_datasets of the electric field to compute the congruence factor for,
30
+ e.g. ``(e_mag, e_norm, e_tan)``.
31
+ Each dataset is a list over all conditions containing the electric field component of interest
32
+
33
+ * e.g.: ``len(e) = n_cond``
34
+ * ``len(e[0]) = n_comp`` (e.g: ``e_mag = e[0])``)
35
+
36
+ n_samples : int, default=100
37
+ Number of data points to generate discrete mep and e curves.
38
+
39
+ Returns
40
+ -------
41
+ congruence_factor : np.ndarray of float
42
+ (n_roi, n_datasets) Congruence factor in each element specified in elm_idx_list and for each input dataset.
43
+ """
44
+
45
+ stepsize = 1e-1
46
+ n_datasets = len(e[0])
47
+ n_elm = len(elm_idx_list)
48
+ n_conditions = len(mep)
49
+
50
+ mep_params = np.array(mep_params).flatten()
51
+
52
+ congruence_factor = np.empty((n_elm, n_datasets))
53
+
54
+ # rearrange mep parameters to individual conditions
55
+ mep_params_cond = []
56
+ start_idx = 0
57
+ for i_cond in range(n_conditions):
58
+ mep_params_cond.append(mep_params[start_idx:(start_idx + mep[i_cond].popt.size)])
59
+ start_idx = start_idx + mep[i_cond].popt.size
60
+
61
+ del start_idx
62
+
63
+ intensities = []
64
+ intensities_min = []
65
+ intensities_max = []
66
+ mep_curve = []
67
+
68
+ # calculate mep curves per condition
69
+ for i_cond in range(n_conditions):
70
+ intensities.append(np.linspace(mep[i_cond].x_limits[0], mep[i_cond].x_limits[1], n_samples))
71
+ mep_curve.append(mep[i_cond].eval(intensities[-1], mep_params_cond[i_cond]))
72
+ intensities_min.append(mep[i_cond].x_limits[0])
73
+ intensities_max.append(mep[i_cond].x_limits[1])
74
+
75
+ for i_datasets in range(n_datasets):
76
+
77
+ # calculate corresponding electric field values per condition
78
+ for elm_idx, elmIdx in enumerate(elm_idx_list):
79
+ e_curve = []
80
+ stepsize_local_shift = []
81
+
82
+ # get e-curves for reference solutions with n_samples
83
+ for i_cond in range(n_conditions):
84
+ e_curve.append(e[i_cond][i_datasets][elmIdx] * intensities[i_cond])
85
+ stepsize_local_shift.append(e_curve[-1][1] - e_curve[-1][0])
86
+
87
+ # KERNEL CODE STARTED HERE
88
+ e_min = np.min(e_curve, axis=1) # minima of electric field for every condition
89
+ # ceil to .stepsize
90
+ e_min = np.ceil(e_min / stepsize) * stepsize
91
+ e_max = np.max(e_curve, axis=1) # maxima of electric field for every condition
92
+ e_max = np.floor(e_max / stepsize) * stepsize
93
+
94
+ # find median mep cond
95
+ e_mean = np.mean((e_max + e_min) / 2)
96
+
97
+ # return NaN if xmax-xmin is smaller than stepsize
98
+ if np.any(e_max - e_min <= stepsize):
99
+ congruence_factor[elm_idx, i_datasets] = np.nan
100
+
101
+ else:
102
+
103
+ # find start and stop indices of e_x in global e array
104
+ start_ind = np.empty(n_conditions, dtype=int)
105
+ stop_ind = np.empty(n_conditions, dtype=int)
106
+ e_x_global = np.arange(0, np.max(e_max) + stepsize, stepsize)
107
+
108
+ for idx in range(n_conditions):
109
+ # lower boundary idx of e_x_cond in e_x_global
110
+ start_ind[idx] = pynibs.mesh.utils.find_nearest(e_x_global, e_min[idx])
111
+
112
+ # upper boundary idx of e_x_cond in e_x_global
113
+ stop_ind[idx] = pynibs.mesh.utils.find_nearest(e_x_global, e_max[idx])
114
+
115
+ # get tau distances for all conditions vs reference condition
116
+ # distances for ref,i == i,ref. i,i == 0. So only compute upper triangle of matrix
117
+ ref_range = np.arange(n_conditions)
118
+ t_cond = np.zeros((n_conditions, n_conditions))
119
+ idx_range = list(reversed(np.arange(n_conditions)))
120
+
121
+ for reference_idx in ref_range:
122
+ # remove this reference index from idx_range
123
+ idx_range.pop()
124
+ # # as we always measure the distance of the shorter mep_cond, save idx to store in matrix
125
+ # reference_idx_backup = copy.deepcopy(reference_idx)
126
+
127
+ for idx in idx_range:
128
+ idx_save = idx
129
+ # # switch ref and idx, as we want to measure from short mep_y to avoid overshifting
130
+
131
+ # get initially shifted mep curve
132
+ # e axis of initially shifted mep curve (just needed for length)
133
+
134
+ # resampled intensity axis of initially shifted mep curve
135
+ intens_mep = np.linspace(intensities_min[idx],
136
+ intensities_max[idx],
137
+ ((e_min[reference_idx] - stepsize_local_shift[reference_idx]) -
138
+ ((e_min[reference_idx] - stepsize_local_shift[reference_idx]) /
139
+ intensities_max[idx] * intensities_min[idx])) /
140
+ stepsize_local_shift[reference_idx])
141
+
142
+ # ficticious e_mep value for initial shift (e'_mep)
143
+ e_mep_initial_shift = (e_min[reference_idx] - stepsize_local_shift[reference_idx]) / \
144
+ intensities_max[idx]
145
+
146
+ # start index of initially shifted and stretched mep curve
147
+ start_idx_mep_initial_shift = pynibs.mesh.utils.find_nearest(e_x_global,
148
+ e_mep_initial_shift *
149
+ intensities_min[idx])
150
+
151
+ mep_shift = mep[idx].eval(intens_mep, mep_params_cond[idx])
152
+
153
+ # determine length of mep curve in dependence on its location
154
+ max_e_mep_end = (e_max[reference_idx] + stepsize_local_shift[reference_idx]) * \
155
+ intensities_max[idx] / intensities_min[idx]
156
+ len_e_ref = n_samples
157
+ len_e_mep_start = mep_shift.size
158
+ len_e_mep_end = np.ceil((max_e_mep_end - e_max[reference_idx] +
159
+ stepsize_local_shift[reference_idx]) /
160
+ stepsize_local_shift[reference_idx])
161
+
162
+ # length of shifted curve as a function of position (gets longer while shifting)
163
+ len_mep_idx_shift = np.round(np.linspace(
164
+ len_e_mep_start,
165
+ len_e_mep_end,
166
+ len_e_mep_start + len_e_ref + 2 * stepsize_local_shift[reference_idx]))
167
+
168
+ # construct shift array (there are less 0 at the beginning and more at the end because the mep
169
+ # curve is stretched during shifting)
170
+ stepsize_local_shift_intens = (intensities_max[reference_idx] -
171
+ intensities_min[reference_idx]) / \
172
+ n_samples
173
+ min_intens_ref_prime = intensities_min[reference_idx] - stepsize_local_shift_intens * \
174
+ (1 + len_e_mep_start)
175
+ max_intens_ref_prime = intensities_max[reference_idx] + stepsize_local_shift_intens * \
176
+ (1 + len_e_mep_end)
177
+
178
+ shift_array = mep[reference_idx].eval(np.arange(min_intens_ref_prime,
179
+ max_intens_ref_prime,
180
+ stepsize_local_shift_intens),
181
+ mep_params_cond[reference_idx])
182
+
183
+ # generate index shift list to compare curves
184
+ slice_indices = np.outer(len_mep_idx_shift[:, np.newaxis],
185
+ np.linspace(0, 1, len_e_mep_start)[np.newaxis, :])
186
+ slice_indices = np.round(
187
+ np.add(slice_indices, np.arange(slice_indices.shape[0])[:, np.newaxis])).astype(int)
188
+
189
+ # the error is y-difference between mep[idx] and mep[reference].zero_padded
190
+ err = np.sqrt(np.sum((shift_array[slice_indices] - mep_shift) ** 2, axis=1))
191
+
192
+ # which shift leads to minimum error. remember that we don't start at 0-shift, so add start idx
193
+ t_cond[reference_idx, idx_save] = (start_idx_mep_initial_shift - start_ind[idx]) * stepsize + \
194
+ np.argmin(err) * stepsize_local_shift[reference_idx]
195
+
196
+ # sum all errors and divide by e_mean over all conditions
197
+ congruence_factor[elm_idx, i_datasets] = 1 / (
198
+ np.sqrt(np.sum(np.square(t_cond) * 2)) / e_mean / n_conditions / (n_conditions - 1))
199
+
200
+ return congruence_factor
201
+
202
+
203
+ def cf_curveshift_workhorse_stretch_correction_new(mep, mep_params, e, n_samples=100, ref_idx=0):
204
+ """
205
+ Worker function for congruence factor computation - call from :py:class:`multiprocessing.Pool`.
206
+ Calculates congruence factor for ``e = (E_mag, E_norm and/or E_tan)`` for given zaps and elements.
207
+ The computations are parallelized in terms of element indices (``elm_idx_list``).
208
+ ``n_samples`` are taken from fitted_mep, within the range of the :py:class:`~pynibs.expio.Mep`.
209
+
210
+ Parameters
211
+ ----------
212
+ mep : list of :py:class:`~pynibs.expio.Mep`
213
+ (n_cond) List of fitted Mep object instances for all conditions.
214
+ mep_params : np.ndarray of float [n_mep_params_total]
215
+ List of all mep parameters of curve fits used to calculate the MEP (accumulated into one array)
216
+
217
+ * e.g. [``mep_#1_para_#1``, ``mep_#1_para_#2``, ``mep_#1_para_#3``, ``mep_#2_para_#1``, ``mep_#2_para_#1``, ...]
218
+
219
+ e : np.ndarray of float
220
+ (n_elm, n_cond) Electric field in elements.
221
+ n_samples : int, default=100
222
+ Number of data points to generate discrete mep and e curves.
223
+
224
+ Returns
225
+ -------
226
+ congruence_factor : np.ndarray of float
227
+ (n_elm) Congruence factor in each element specified in elm_idx_list and for each input dataset.
228
+ """
229
+ n_elm = e.shape[0]
230
+ n_conditions = e.shape[1]
231
+ c_idx = [idx for idx in np.arange(n_conditions) if idx != ref_idx]
232
+
233
+ # rearrange mep parameters to individual conditions
234
+ mep_params_cond = []
235
+ start_idx = 0
236
+ for i_cond in range(n_conditions):
237
+ mep_params_cond.append(mep_params[start_idx:(start_idx + mep[i_cond].popt.size)])
238
+ start_idx = start_idx + mep[i_cond].popt.size
239
+
240
+ mep_params_ref = mep_params_cond[ref_idx]
241
+ mep_params_c = [mep_params_cond[idx] for idx in c_idx]
242
+
243
+ # intensities max and min [n_curves]
244
+ i_ref_min = mep[ref_idx].intensities[0]
245
+ i_ref_max = mep[ref_idx].intensities[-1]
246
+
247
+ i_c_min = np.array([mep[idx].intensities[0] for idx in c_idx])
248
+ i_c_max = np.array([mep[idx].intensities[-1] for idx in c_idx])
249
+
250
+ i_stepsize = (i_ref_max - i_ref_min) / (n_samples - 1)
251
+
252
+ # number of samples before and after shift with stretch correction
253
+ n_c_before = np.round((1 - i_c_min / i_c_max) / (i_ref_max / i_ref_min - 1) * n_samples)
254
+ n_c_after = np.round((i_c_max / i_c_min - 1) / (1 - i_ref_min / i_ref_max) * n_samples)
255
+
256
+ # evaluate curves
257
+ i_ref_shift = np.arange(i_ref_min - max(n_c_before) * i_stepsize,
258
+ i_ref_max + max(n_c_after) * i_stepsize + i_stepsize,
259
+ i_stepsize)
260
+
261
+ mep_ref_shift = mep[ref_idx].eval(i_ref_shift, mep_params_ref)
262
+ err_min_idx = []
263
+ for i, idx in enumerate(c_idx):
264
+ # evaluate curves at resampled intensity axis
265
+ i_c_shift = np.linspace(i_c_min[i], i_c_max[i], n_c_before[i])
266
+ mep_c_shift = mep[idx].eval(i_c_shift, mep_params_c[i])
267
+ # generate index shift list to compare curves
268
+ slice_indices = np.outer(
269
+ np.round(np.linspace(n_c_before[i], n_c_after[i], n_c_before[i] + n_samples))[:, np.newaxis],
270
+ np.linspace(0, 1, n_c_before[i])[np.newaxis, :])
271
+ slice_indices = np.round(slice_indices + np.arange(slice_indices.shape[0])[:, np.newaxis])
272
+ slice_indices = (slice_indices + (np.max(n_c_before) - n_c_before[i])).astype(int)
273
+ # the error is y-difference between mep[idx] and mep[reference].zero_padded
274
+ err = np.sum((mep_ref_shift[slice_indices] - mep_c_shift) ** 2, axis=1)
275
+ err_min_idx.append(np.argmin(err))
276
+ # electric fields [n_elm x n_curves]
277
+ e_ref = e[:, ref_idx][:, np.newaxis]
278
+ e_c = e[:, c_idx]
279
+
280
+ # determine stepsizes in intensity and electric field space
281
+ e_max = np.hstack((i_ref_max, i_c_max)) * np.hstack((e_ref, e_c))
282
+ e_min = np.hstack((i_ref_min, i_c_min)) * np.hstack((e_ref, e_c))
283
+ e_mean = np.mean((e_max + e_min) / 2, axis=1)[:, np.newaxis]
284
+ e_stepsize = e_ref * i_stepsize
285
+
286
+ # determine initial shift in electric field space
287
+ initial_shift = e_c * i_c_min - e_ref * i_ref_min * i_c_min / i_c_max
288
+
289
+ # determine total shift
290
+ total_shift = np.zeros((n_elm, n_conditions))
291
+ total_shift[:, 1:] = initial_shift - e_stepsize * np.array(err_min_idx)[np.newaxis, :]
292
+
293
+ # sum all errors and divide by e_mean over all conditions
294
+ congruence_factor = (e_mean ** 2) / np.var(total_shift, axis=1)[:, np.newaxis]
295
+
296
+ return congruence_factor
297
+
298
+
299
+ def cf_curveshift_workhorse_stretch_correction_sign_new(mep, mep_params, e, n_samples=100, ref_idx=0):
300
+ """
301
+ Worker function for congruence factor computation - call from :py:class:`multiprocessing.Pool`.
302
+ Calculates congruence factor for ``e = (E_mag, E_norm and/or E_tan)`` for given zaps and elements.
303
+ The computations are parallelized in terms of element indices (``elm_idx_list``).
304
+ ``n_sample``s are taken from fitted_mep, within the range of the :py:class:`~pynibs.expio.Mep`.
305
+
306
+ Parameters
307
+ ----------
308
+ mep : list of :py:class:`~pynibs.expio.Mep`
309
+ (n_cond) List of fitted Mep object instances for all conditions.
310
+ mep_params : np.ndarray of float
311
+ (n_mep_params_total) List of all mep parameters of curve fits used to calculate the MEP, accumulated into
312
+ one array.
313
+
314
+ * e.g. [``mep_#1_para_#1``, ``mep_#1_para_#2``, ``mep_#1_para_#3``, ``mep_#2_para_#1``,
315
+ ``mep_#2_para_#1``, ...]
316
+
317
+ e : np.ndarray of float
318
+ (n_elm, n_cond) Electric field in elements.
319
+ n_samples : int, default=100
320
+ Number of data points to generate discrete mep and e curves.
321
+
322
+ Returns
323
+ -------
324
+ congruence_factor : np.ndarray of float
325
+ (n_elm, 1) Congruence factor in each element specified in elm_idx_list and for each input dataset.
326
+ """
327
+ n_elm = e.shape[0]
328
+ n_conditions = e.shape[1]
329
+ err_min_idx = np.zeros((n_conditions, n_conditions))
330
+ initial_shift = np.zeros((n_elm, n_conditions, n_conditions))
331
+ x_mean = np.empty((1, n_conditions))
332
+ e_stepsize = np.zeros((n_elm, n_conditions))
333
+
334
+ mep_params_cond = []
335
+ start_idx = 0
336
+
337
+ mask_pos = e > 0
338
+ mask_neg = e < 0
339
+
340
+ mask_only_one_curve = np.logical_or(np.sum(mask_pos, axis=1) == 1, np.sum(mask_neg, axis=1) == 1)
341
+ n_curves = np.ones(n_elm) * n_conditions
342
+ n_curves[mask_only_one_curve] = n_conditions - 1
343
+
344
+ # rearrange mep parameters to individual conditions
345
+ for i_cond in range(n_conditions):
346
+ mep_params_cond.append(mep_params[start_idx:(start_idx + mep[i_cond].popt.size)])
347
+ start_idx = start_idx + mep[i_cond].popt.size
348
+ x_mean[0, i_cond] = (mep[i_cond].x_limits[0] + mep[i_cond].x_limits[1]) / 2
349
+
350
+ for ref_idx in range(n_conditions):
351
+ c_idx = [idx for idx in np.arange(n_conditions) if idx != ref_idx]
352
+
353
+ mep_params_ref = mep_params_cond[ref_idx]
354
+ mep_params_c = [mep_params_cond[idx] for idx in c_idx]
355
+
356
+ # intensities max and min [n_curves]
357
+ i_ref_min = np.min(mep[ref_idx].intensities) # [0]
358
+ i_ref_max = np.max(mep[ref_idx].intensities) # [-1]
359
+
360
+ i_c_min = np.array([np.min(mep[idx].intensities) for idx in c_idx])
361
+ i_c_max = np.array([np.max(mep[idx].intensities) for idx in c_idx])
362
+
363
+ i_stepsize = (i_ref_max - i_ref_min) / (n_samples - 1)
364
+
365
+ # number of samples before and after shift with stretch correction
366
+ n_c_before = np.round((1 - i_c_min / i_c_max) / (i_ref_max / i_ref_min - 1) * n_samples).astype(int)
367
+ n_c_after = np.round((i_c_max / i_c_min - 1) / (1 - i_ref_min / i_ref_max) * n_samples)
368
+
369
+ # evaluate curves
370
+ i_ref_shift = np.arange(i_ref_min - max(n_c_before) * i_stepsize,
371
+ i_ref_max + max(n_c_after) * i_stepsize + i_stepsize,
372
+ i_stepsize)
373
+
374
+ mep_ref_shift = mep[ref_idx].eval(i_ref_shift, mep_params_ref)
375
+
376
+ for i, idx in enumerate(c_idx):
377
+ # evaluate curves at resampled intensity axis
378
+ i_c_shift = np.linspace(i_c_min[i], i_c_max[i], n_c_before[i])
379
+ mep_c_shift = mep[idx].eval(i_c_shift, mep_params_c[i])
380
+ # generate index shift list to compare curves
381
+ slice_indices = np.outer(
382
+ np.round(np.linspace(n_c_before[i], n_c_after[i], n_c_before[i] + n_samples))[:, np.newaxis],
383
+ np.linspace(0, 1, n_c_before[i])[np.newaxis, :])
384
+ slice_indices = np.round(slice_indices + np.arange(slice_indices.shape[0])[:, np.newaxis])
385
+ slice_indices = (slice_indices + (np.max(n_c_before) - n_c_before[i])).astype(int)
386
+ # the error is y-difference between mep[idx] and mep[reference].zero_padded
387
+ err = np.sum((mep_ref_shift[slice_indices] - mep_c_shift) ** 2, axis=1)
388
+ err_min_idx[ref_idx, idx] = np.argmin(err)
389
+
390
+ # electric fields [n_elm x n_curves]
391
+ e_ref = e[:, ref_idx][:, np.newaxis]
392
+ e_c = e[:, c_idx]
393
+
394
+ # determine stepsizes in intensity and electric field space
395
+ e_stepsize[:, ref_idx] = (e_ref * i_stepsize).flatten()
396
+
397
+ # determine initial shift in electric field space
398
+ initial_shift[:, c_idx, ref_idx] = e_c * i_c_min - e_ref * i_ref_min * i_c_min / i_c_max
399
+
400
+ mean_pos = np.array([np.mean(row[mask_pos[i, :]] * x_mean[0, mask_pos[i, :]]) for i, row in enumerate(e)])
401
+ mean_neg = np.array([np.mean(row[mask_neg[i, :]] * x_mean[0, mask_neg[i, :]]) for i, row in enumerate(e)])
402
+
403
+ # determine total shift
404
+ total_shift_pos = []
405
+ total_shift_neg = []
406
+
407
+ for i_elm in range(n_elm):
408
+ curve_idx_neg = np.where(mask_neg[i_elm, :])[0]
409
+ curve_idx_pos = np.where(mask_pos[i_elm, :])[0]
410
+
411
+ if curve_idx_neg.size != 0:
412
+ ref_idx_neg = curve_idx_neg[0]
413
+ total_shift_neg.append(initial_shift[i_elm, curve_idx_neg[1:], ref_idx_neg] -
414
+ e_stepsize[i_elm, ref_idx_neg] * err_min_idx[ref_idx_neg, curve_idx_neg[1:]])
415
+ else:
416
+ total_shift_neg.append(np.array([]))
417
+
418
+ if curve_idx_pos.size != 0:
419
+ ref_idx_pos = curve_idx_pos[0]
420
+ total_shift_pos.append(initial_shift[i_elm, curve_idx_pos[1:], ref_idx_pos] -
421
+ e_stepsize[i_elm, ref_idx_pos] * err_min_idx[ref_idx_pos, curve_idx_pos[1:]])
422
+ else:
423
+ total_shift_pos.append(np.array([]))
424
+
425
+ var_pos = np.array([np.sum(mask_pos[i, :]) * np.var(np.hstack((0, row))) for i, row in enumerate(total_shift_pos)])
426
+ var_neg = np.array([np.sum(mask_neg[i, :]) * np.var(np.hstack((0, row))) for i, row in enumerate(total_shift_neg)])
427
+
428
+ mean_pos[np.isnan(mean_pos)] = np.inf
429
+ mean_neg[np.isnan(mean_neg)] = np.inf
430
+
431
+ mean_pos[np.isnan(var_pos)] = np.inf
432
+ mean_neg[np.isnan(var_neg)] = np.inf
433
+
434
+ var = (var_pos / mean_pos ** 2 + var_neg / mean_neg ** 2) / n_curves
435
+
436
+ congruence_factor = (1 / var)[:, np.newaxis]
437
+
438
+ return congruence_factor
439
+
440
+
441
+ def cf_curveshift_workhorse_stretch_correction_variance(elm_idx_list, mep, mep_params, e, n_samples=100):
442
+ """
443
+ Worker function for congruence factor computation - call from :py:class:`multiprocessing.Pool`.
444
+ Calculates congruence factor for ``e = (E_mag, E_norm and/or E_tan)`` for given zaps and elements.
445
+ The computations are parallelized in terms of element indices (``elm_idx_list``).
446
+ ``n_samples`` are taken from fitted_mep, within the range of the :py:class:`~pynibs.expio.Mep`.
447
+
448
+ Parameters
449
+ ----------
450
+ elm_idx_list : np.ndarray
451
+ (chunksize) List of element indices, the congruence factor is computed for.
452
+ mep : list of :py:class:`~pynibs.expio.Mep`
453
+ (n_cond) List of fitted Mep object instances for all conditions
454
+ mep_params : np.ndarray of float [n_mep_params_total]
455
+ List of all mep parameters of curve fits used to calculate the MEP (accumulated into one array)
456
+
457
+ * e.g. [``mep_#1_para_#1``, ``mep_#1_para_#2``, ``mep_#1_para_#3``, ``mep_#2_para_#1``, ``mep_#2_para_#1``, ...]
458
+
459
+ e : list of list of np.ndarray of float
460
+ [n_cond][n_datasets][n_elm] Tuple of n_datasets of the electric field to compute the congruence factor for,
461
+ e.g. ``(e_mag, e_norm, e_tan)``.
462
+ Each dataset is a list over all conditions containing the electric field component of interest
463
+
464
+ * e.g.: ``len(e) = n_cond``
465
+ * ``len(e[0]) = n_comp`` (e.g: ``e_mag = e[0])``)
466
+
467
+ n_samples : int, default=100
468
+ Number of data points to generate discrete mep and e curves
469
+
470
+ Returns
471
+ -------
472
+ congruence_factor : np.ndarray of float [n_roi, n_datasets]
473
+ Congruence factor in each element specified in elm_idx_list and for each input dataset
474
+ """
475
+
476
+ stepsize = 1e-1
477
+ n_datasets = len(e[0])
478
+ n_elm = len(elm_idx_list)
479
+ n_conditions = len(mep)
480
+
481
+ mep_params = np.array(mep_params).flatten()
482
+
483
+ congruence_factor = np.empty((n_elm, n_datasets))
484
+
485
+ # rearrange mep parameters to individual conditions
486
+ mep_params_cond = []
487
+ start_idx = 0
488
+ for i_cond in range(n_conditions):
489
+ mep_params_cond.append(mep_params[start_idx:(start_idx + mep[i_cond].popt.size)])
490
+ start_idx = start_idx + mep[i_cond].popt.size
491
+
492
+ del start_idx
493
+
494
+ intensities = []
495
+ intensities_min = []
496
+ intensities_max = []
497
+ stepsize_local_shift = []
498
+ mep_curve = []
499
+
500
+ # calculate mep curves per condition
501
+ for i_cond in range(n_conditions):
502
+ intensities.append(np.linspace(mep[i_cond].x_limits[0], mep[i_cond].x_limits[1], n_samples))
503
+ mep_curve.append(mep[i_cond].eval(intensities[-1], mep_params_cond[i_cond]))
504
+ intensities_min.append(mep[i_cond].x_limits[0])
505
+ intensities_max.append(mep[i_cond].x_limits[1])
506
+
507
+ for i_datasets in range(n_datasets):
508
+
509
+ # calculate corresponding electric field values per condition
510
+ for elm_idx, elmIdx in enumerate(elm_idx_list):
511
+
512
+ e_curve = []
513
+ stepsize_local_shift = []
514
+
515
+ # get e-curves for reference solutions with n_samples
516
+ for i_cond in range(n_conditions):
517
+ e_curve.append(e[i_cond][i_datasets][elmIdx] * intensities[i_cond])
518
+ stepsize_local_shift.append(e_curve[-1][1] - e_curve[-1][0])
519
+
520
+ # KERNEL CODE STARTED HERE
521
+ e_min = np.min(e_curve, axis=1) # minima of electric field for every condition
522
+ # ceil to .stepsize
523
+ e_min = np.ceil(e_min / stepsize) * stepsize
524
+ e_max = np.max(e_curve, axis=1) # maxima of electric field for every condition
525
+ e_max = np.floor(e_max / stepsize) * stepsize
526
+
527
+ # find median mep cond
528
+ e_mean = np.mean((e_max + e_min) / 2)
529
+
530
+ # return NaN if xmax-xmin is smaller than stepsize
531
+ if np.any(e_max - e_min <= stepsize):
532
+ congruence_factor[elm_idx, i_datasets] = np.nan
533
+
534
+ else:
535
+
536
+ # find start and stop indices of e_x in global e array
537
+ start_ind = np.empty(n_conditions, dtype=int)
538
+ stop_ind = np.empty(n_conditions, dtype=int)
539
+ e_x_global = np.arange(0, np.max(e_max) + stepsize, stepsize)
540
+
541
+ for idx in range(n_conditions):
542
+ # lower boundary idx of e_x_cond in e_x_global
543
+ start_ind[idx] = pynibs.mesh.utils.find_nearest(e_x_global, e_min[idx])
544
+
545
+ # upper boundary idx of e_x_cond in e_x_global
546
+ stop_ind[idx] = pynibs.mesh.utils.find_nearest(e_x_global, e_max[idx])
547
+
548
+ # get tau distances for all conditions vs reference condition
549
+ # distances for ref,i == i,ref. i,i == 0. So only compute upper triangle of matrix
550
+ ref_range = [0] # np.arange(n_conditions)
551
+ t_cond = np.zeros((n_conditions, n_conditions))
552
+ idx_range = list(reversed(np.arange(n_conditions)))
553
+
554
+ for reference_idx in ref_range:
555
+ # remove this reference index from idx_range
556
+ idx_range.pop()
557
+ # # as we always measure the distance of the shorter mep_cond, save idx to store in matrix
558
+ # reference_idx_backup = copy.deepcopy(reference_idx)
559
+
560
+ for idx in idx_range:
561
+ idx_save = idx
562
+
563
+ # resampled intensity axis of initially shifted mep curve
564
+ intens_mep = np.linspace(intensities_min[idx],
565
+ intensities_max[idx],
566
+ ((e_min[reference_idx] - stepsize_local_shift[reference_idx]) -
567
+ ((e_min[reference_idx] - stepsize_local_shift[reference_idx]) /
568
+ intensities_max[idx] * intensities_min[idx])) /
569
+ stepsize_local_shift[reference_idx])
570
+
571
+ # ficticious e_mep value for initial shift (e'_mep)
572
+ e_mep_initial_shift = (e_min[reference_idx] - stepsize_local_shift[reference_idx]) / \
573
+ intensities_max[idx]
574
+
575
+ # start index of initially shifted and stretched mep curve
576
+ start_idx_mep_initial_shift = pynibs.mesh.utils.find_nearest(e_x_global,
577
+ e_mep_initial_shift *
578
+ intensities_min[idx])
579
+
580
+ mep_shift = mep[idx].eval(intens_mep, mep_params_cond[idx])
581
+
582
+ # determine length of mep curve in dependence on its location
583
+ max_e_mep_end = (e_max[reference_idx] + stepsize_local_shift[reference_idx]) * \
584
+ intensities_max[idx] / intensities_min[idx]
585
+ len_e_ref = n_samples
586
+ len_e_mep_start = mep_shift.size
587
+ len_e_mep_end = np.ceil((max_e_mep_end - e_max[reference_idx] +
588
+ stepsize_local_shift[reference_idx]) /
589
+ stepsize_local_shift[reference_idx])
590
+ # len_total = (len_e_mep_start + len_e_ref + len_e_mep_end + 2).astype(int)
591
+
592
+ # length of shifted curve as a function of position (gets longer while shifting)
593
+ len_mep_idx_shift = np.round(np.linspace(
594
+ len_e_mep_start,
595
+ len_e_mep_end,
596
+ len_e_mep_start + len_e_ref + 2 * stepsize_local_shift[reference_idx]))
597
+
598
+ # construct shift array (there are less 0 at the beginning and more at the end because the mep
599
+ # curve is stretched during shifting)
600
+ stepsize_local_shift_intens = (intensities_max[reference_idx] -
601
+ intensities_min[reference_idx]) / \
602
+ float(n_samples - 1)
603
+ min_intens_ref_prime = intensities_min[reference_idx] - stepsize_local_shift_intens * \
604
+ (1 + len_e_mep_start)
605
+ max_intens_ref_prime = intensities_max[reference_idx] + stepsize_local_shift_intens * \
606
+ (1 + len_e_mep_end)
607
+
608
+ shift_array = mep[reference_idx].eval(np.arange(min_intens_ref_prime,
609
+ max_intens_ref_prime,
610
+ stepsize_local_shift_intens),
611
+ mep_params_cond[reference_idx])
612
+
613
+ # generate index shift list to compare curves
614
+ slice_indices = np.outer(len_mep_idx_shift[:, np.newaxis],
615
+ np.linspace(0, 1, len_e_mep_start)[np.newaxis, :])
616
+ slice_indices = np.round(
617
+ np.add(slice_indices, np.arange(slice_indices.shape[0])[:, np.newaxis])).astype(int)
618
+
619
+ # the error is y-difference between mep[idx] and mep[reference].zero_padded
620
+ err = np.sqrt(np.sum((shift_array[slice_indices] - mep_shift) ** 2, axis=1))
621
+
622
+ # which shift leads to minimum error. remember that we don't start at 0-shift, so add start idx
623
+ t_cond[reference_idx, idx_save] = (start_idx_mep_initial_shift - start_ind[idx]) * stepsize + \
624
+ np.argmin(err) * stepsize_local_shift[reference_idx]
625
+
626
+ # sum all errors and divide by e_mean over all conditions
627
+ congruence_factor[elm_idx, i_datasets] = 1 / (
628
+ np.var(t_cond[0, :]) / (e_mean ** 2)) # changed to squared e
629
+
630
+ return congruence_factor
631
+
632
+
633
+ def cf_variance_workhorse(elm_idx_list, mep, mep_params, e, old_style=True):
634
+ """
635
+ Worker function for congruence factor computation - call from :py:class:`multiprocessing.Pool`.
636
+ Calculates congruence factor for ``e = (E_mag, E_norm and/or E_tan)`` for given zaps and elements.
637
+
638
+ Parameters
639
+ ----------
640
+ elm_idx_list : np.ndarray
641
+ (chunksize) List of element indices, the congruence factor is computed for.
642
+ mep: list of :py:class:`~pynibs.expio.Mep`
643
+ (n_cond) List of fitted Mep object instances for all conditions.
644
+ mep_params: np.ndarray of float
645
+ (n_mep_params_total) List of all mep parameters used to calculate the MEP, accumulated into one array).
646
+
647
+ * e.g. [``mep_#1_para_#1``, ``mep_#1_para_#2``, ``mep_#1_para_#3``, ``mep_#2_para_#1``,
648
+ ``mep_#2_para_#1``, ...])
649
+
650
+ e: list of list of np.ndarray of float
651
+ [n_cond][n_datasets][n_elm] Tuple of ``n_datasets`` of the electric field to compute the congruence factor for,
652
+ e.g. ``(e_mag, e_norm, e_tan)``.
653
+ Each dataset is a list over all conditions containing the electric field component of interest
654
+
655
+ * ``len(e) = n_cond``
656
+ * ``len(e[0]) = n_comp`` (e.g: ``e_mag = e[0])``)
657
+
658
+ old_style: bool, default: True
659
+ True: Weight ``var(x_0_prime(r))`` with ``mean(e(r) * mean(Stimulator Intensity)``, taken from ``mep``
660
+ False: Weight ``var(x_0_prime(r))`` with ``mean(E(r))``, taken from `e`
661
+
662
+ Returns
663
+ -------
664
+ congruence_factor: np.ndarray of float
665
+ (n_roi, n_datasets) Congruence factor in each element specified in elm_idx_list and for each input dataset
666
+ """
667
+ n_datasets = len(e[0])
668
+ n_elm = len(elm_idx_list)
669
+ n_conditions = len(mep)
670
+
671
+ mep_params = np.array(mep_params).flatten()
672
+
673
+ congruence_factor = np.empty((n_elm, n_datasets))
674
+
675
+ # rearrange mep parameters to individual conditions
676
+ mep_params_cond = []
677
+ start_idx = 0
678
+ x0_vec = np.empty((1, n_conditions))
679
+ x_mean = np.empty((1, n_conditions))
680
+
681
+ for i_cond in range(n_conditions):
682
+ mep_params_cond.append(mep_params[start_idx:(start_idx + mep[i_cond].popt.size)])
683
+ start_idx = start_idx + mep[i_cond].popt.size
684
+ x0_vec[0, i_cond] = mep_params_cond[i_cond][0]
685
+ x_mean[0, i_cond] = (mep[i_cond].x_limits[0] + mep[i_cond].x_limits[1]) / 2
686
+
687
+ e_arr = np.array(e)
688
+
689
+ for i_dataset in range(n_datasets):
690
+
691
+ e_mat = np.array(e_arr[:, i_dataset, np.array(elm_idx_list).astype(int)]).transpose()
692
+
693
+ x0_prime = e_mat * x0_vec
694
+
695
+ var_x0_prime = np.var(x0_prime, axis=1)
696
+
697
+ e_mean_vec = np.mean(e_mat * x_mean, axis=1)
698
+
699
+ if old_style:
700
+ congruence_factor[:, i_dataset] = e_mean_vec ** 2 / var_x0_prime
701
+ else:
702
+ congruence_factor[:, i_dataset] = np.mean(e_mat, axis=1) ** 2 / var_x0_prime
703
+
704
+ return congruence_factor
705
+
706
+
707
+ def cf_variance_sign_workhorse(elm_idx_list, mep, mep_params, e):
708
+ """
709
+ Worker function for congruence factor computation - call from :py:class:`multiprocessing.Pool`.
710
+ Calculates congruence factor for ``e = (E_mag, E_norm and/or E_tan)`` for given zaps and elements.
711
+
712
+ Parameters
713
+ ----------
714
+ elm_idx_list: np.ndarray
715
+ (chunksize) List of element indices, the congruence factor is computed for.
716
+ mep: list of :py:class:`~pynibs.expio.Mep`
717
+ (n_cond) List of fitted Mep object instances for all conditions.
718
+ mep_params: np.ndarray of float
719
+ (n_mep_params_total) List of all mep parameters of curve fits used to calculate the MEP,
720
+ accumulated into one array), e.g. [``mep_#1_para_#1``, ``mep_#1_para_#2``, ``mep_#1_para_#3``,
721
+ ``mep_#2_para_#1``, ``mep_#2_para_#1``, ...])
722
+ e: list of list of np.ndarray of float
723
+ [n_cond][n_datasets][n_elm] Tuple of ``n_datasets`` of the electric field to compute the congruence factor for,
724
+ e.g. ``(e_mag, e_norm, e_tan)``.
725
+ Each dataset is a list over all conditions containing the electric field component of interest
726
+
727
+ * ``len(e) = n_cond``
728
+ * ``len(e[0]) = n_comp`` (e.g: ``e_mag = e[0])``)
729
+
730
+ Returns
731
+ -------
732
+ congruence_factor: np.ndarray of float
733
+ (n_roi, n_datasets) Congruence factor in each element specified in elm_idx_list and for each input dataset.
734
+ """
735
+ n_datasets = len(e[0])
736
+ n_elm = len(elm_idx_list)
737
+ n_conditions = len(mep)
738
+
739
+ mep_params = np.array(mep_params).flatten()
740
+
741
+ congruence_factor = np.empty((n_elm, n_datasets))
742
+
743
+ # rearrange mep parameters to individual conditions
744
+ mep_params_cond = []
745
+ start_idx = 0
746
+ x0_vec = np.empty((1, n_conditions))
747
+ x_mean = np.empty((1, n_conditions))
748
+
749
+ for i_cond in range(n_conditions):
750
+ mep_params_cond.append(mep_params[start_idx:(start_idx + mep[i_cond].popt.size)])
751
+ start_idx = start_idx + mep[i_cond].popt.size
752
+ x0_vec[0, i_cond] = mep_params_cond[i_cond][0]
753
+ x_mean[0, i_cond] = (mep[i_cond].x_limits[0] + mep[i_cond].x_limits[1]) / 2
754
+
755
+ e_arr = np.array(e)
756
+
757
+ for i_dataset in range(n_datasets):
758
+ e_mat = np.array(e_arr[:, i_dataset, np.array(elm_idx_list).astype(int)]).transpose()
759
+
760
+ mask_pos = e_mat > 0
761
+ mask_neg = e_mat < 0
762
+
763
+ mask_only_one_curve = np.logical_or(np.sum(mask_pos, axis=1) == 1, np.sum(mask_neg, axis=1) == 1)
764
+ n_curves = np.ones(n_elm) * n_conditions
765
+ n_curves[mask_only_one_curve] = n_conditions - 1
766
+
767
+ x0_prime = e_mat * x0_vec
768
+
769
+ var_pos = np.array([np.sum(mask_pos[i, :]) * np.var(row[mask_pos[i, :]]) for i, row in enumerate(x0_prime)])
770
+ var_neg = np.array([np.sum(mask_neg[i, :]) * np.var(row[mask_neg[i, :]]) for i, row in enumerate(x0_prime)])
771
+
772
+ var_pos[np.isnan(var_pos)] = 0
773
+ var_neg[np.isnan(var_neg)] = 0
774
+
775
+ mean_pos = np.array([np.mean(row[mask_pos[i, :]] * x_mean[0, mask_pos[i, :]]) for i, row in enumerate(e_mat)])
776
+ mean_neg = np.array([np.mean(row[mask_neg[i, :]] * x_mean[0, mask_neg[i, :]]) for i, row in enumerate(e_mat)])
777
+
778
+ mean_pos[np.isnan(mean_pos)] = np.inf
779
+ mean_neg[np.isnan(mean_neg)] = np.inf
780
+
781
+ mean_pos[np.isnan(var_pos)] = np.inf
782
+ mean_neg[np.isnan(var_neg)] = np.inf
783
+
784
+ var = (var_pos / mean_pos ** 2 + var_neg / mean_neg ** 2) / n_curves
785
+
786
+ congruence_factor[:, i_dataset] = 1 / var
787
+
788
+ return congruence_factor
789
+
790
+
791
+ def cf_curveshift_workhorse(elm_idx_list, mep, mep_params, e, n_samples=100):
792
+ """
793
+ Worker function for congruence factor computation - call from :py:class:`multiprocessing.Pool`.
794
+ Calculates congruence factor for ``e = (E_mag, E_norm and/or E_tan)`` for given zaps and elements.
795
+ The computations are parallelized in terms of element indices (``elm_idx_list``).
796
+ n_samples are taken from fitted_mep, within the range of the :py:class:`~pynibs.expio.Mep`.
797
+
798
+ Parameters
799
+ ----------
800
+ elm_idx_list : np.ndarray
801
+ (chunksize) List of element indices, the congruence factor is computed for.
802
+ mep: list of :py:class:`~pynibs.expio.Mep`
803
+ (n_cond) List of fitted Mep object instances for all conditions.
804
+ mep_params : np.ndarray of float
805
+ (n_mep_params_total) List of all mep parameters of curve fits used to calculate the MEP,
806
+ accumulated into one array.
807
+
808
+ * e.g. [``mep_#1_para_#1``, ``mep_#1_para_#2``, ``mep_#1_para_#3``,
809
+ ``mep_#2_para_#1``, ``mep_#2_para_#1``, ...]
810
+
811
+ e : list of list of np.ndarray of float
812
+ [n_cond][n_datasets][n_elm] Tuple of ``n_datasets`` of the electric field to compute the congruence factor for,
813
+ e.g. ``(e_mag, e_norm, e_tan)``.
814
+ Each dataset is a list over all conditions containing the electric field component of interest.
815
+
816
+ * ``len(e) = n_cond``
817
+ * ``len(e[0]) = n_comp`` (e.g: ``e_mag = e[0])``)
818
+
819
+ n_samples : int, default=100
820
+ Number of data points to generate discrete mep and e curves.
821
+
822
+ Returns
823
+ -------
824
+ congruence_factor: np.ndarray of float
825
+ (n_roi, n_datasets) Congruence factor in each element specified in elm_idx_list and for each input dataset.
826
+
827
+ """
828
+ n_datasets = len(e[0])
829
+ n_elm = len(elm_idx_list)
830
+ n_conditions = len(mep)
831
+
832
+ mep_params = np.array(mep_params).flatten()
833
+
834
+ congruence_factor = np.empty((n_elm, n_datasets))
835
+
836
+ # rearrange mep parameters to individual conditions
837
+ mep_params_cond = []
838
+ start_idx = 0
839
+ for i_cond in range(n_conditions):
840
+ mep_params_cond.append(mep_params[start_idx:(start_idx + mep[i_cond].popt.size)])
841
+ start_idx = start_idx + mep[i_cond].popt.size
842
+
843
+ del start_idx
844
+
845
+ intensities = []
846
+ mep_curve = []
847
+
848
+ # calculate mep curves per condition
849
+ for i_cond in range(n_conditions):
850
+ intensities.append(np.arange(mep[i_cond].x_limits[0],
851
+ mep[i_cond].x_limits[1],
852
+ step=(mep[i_cond].x_limits[1] - mep[i_cond].x_limits[0]) / float(n_samples)))
853
+ mep_curve.append(mep[i_cond].eval(intensities[-1], mep_params_cond[i_cond]))
854
+
855
+ for i_datasets in range(n_datasets):
856
+
857
+ # calculate corresponding electric field values per condition
858
+ for elm_idx, elmIdx in enumerate(elm_idx_list):
859
+
860
+ e_curve = []
861
+
862
+ for i_cond in range(n_conditions):
863
+ e_curve.append(e[i_cond][i_datasets][elmIdx] * intensities[i_cond])
864
+
865
+ congruence_factor[elm_idx, i_datasets] = cf_curveshift_kernel(e_curve, mep_curve)
866
+ # print("{}:{}".format(idx, len(elm_idx_list)))
867
+ return congruence_factor
868
+
869
+
870
+ def cf_curveshift_kernel(e_curve, mep_curve):
871
+ """
872
+ Curve congruence (overlap) measure for multiple MEP curves per element. Determines the average displacement
873
+ between the MEP curves. The congruence factor is weighted by ``median(E)`` and summed up. This favors elements which
874
+ have greater E, as these are more likely to produce MEPs.
875
+
876
+ .. math::
877
+ dE = \\begin{bmatrix}
878
+ dE_{11} & dE_{12} & ... & dE_{1n} \\\\
879
+ dE_{21} & dE_{22} & ... & dE_{2n} \\\\
880
+ ... & ... & ... & ... \\\\
881
+ dE_{n1} & dE_{n2} & ... & dE_{nn} \\\\
882
+ \\end{bmatrix}
883
+
884
+ -> ``congruence_factor ~ np.linalg.norm(dE)/median(E)/n_cond/2``
885
+
886
+ Parameters
887
+ ----------
888
+ e_curve: list of np.ndarray of float
889
+ (n_cond) List over all conditions of electric field values corresponding to the mep amplitudes.
890
+ mep_curve: list of np.ndarray of float
891
+ (n_cond) List over all conditions of mep values corresponding to the electric field.
892
+
893
+ Returns
894
+ -------
895
+ congruence_factor: float
896
+ Congruence factor for the n_cond electric field and MEP curves.
897
+ """
898
+
899
+ stepsize = 1e-1
900
+ n_condition = len(mep_curve)
901
+ e_min = np.min(e_curve, axis=1) # minima of electric field for every condition
902
+ # ceil to .stepsize
903
+ e_min = np.ceil(e_min / stepsize) * stepsize
904
+ e_max = np.max(e_curve, axis=1) # maxima of electric field for every condition
905
+ e_max = np.floor(e_max / stepsize) * stepsize
906
+
907
+ # return NaN if xmax-xmin is smaller than stepsize
908
+ if np.any(e_max - e_min <= stepsize):
909
+ return np.nan
910
+
911
+ else:
912
+ # stepsize-wise e over all conditions. we only need the length of this and first elm
913
+
914
+ mep_y_all_cond = []
915
+ start_ind = np.empty(n_condition, dtype=int)
916
+ stop_ind = np.empty(n_condition, dtype=int)
917
+ for idx in range(n_condition):
918
+ # x range for e for conditions, stepsize wise
919
+ e_x_cond = np.arange(e_min[idx], e_max[idx], stepsize)
920
+ # e_x_cond_all.append(e_x_con d)
921
+
922
+ # interpolate mep values to stepsize width
923
+ mep_y_all_cond.append(np.interp(e_x_cond, e_curve[idx], mep_curve[idx]))
924
+ # mep_y_all_cond.append(mep_y_cond)
925
+
926
+ # lower boundary idx of e_x_cond in e_arr
927
+ start_idx = int((e_x_cond[0] - np.min(e_min)) / stepsize)
928
+ stop_idx = start_idx + len(e_x_cond)
929
+ stop_ind[idx] = stop_idx
930
+ start_ind[idx] = start_idx
931
+
932
+ # find median mep cond
933
+ e_mean = np.mean((e_max + e_min) / 2)
934
+
935
+ # get tau distances for all conditions vs median condition
936
+ # distances for ref,i == i,ref. i,i == 0. So only compute upper triangle of matrix
937
+ ref_range = np.arange(n_condition)
938
+ t_cond = np.zeros((n_condition, n_condition))
939
+ idx_range = list(reversed(np.arange(n_condition)))
940
+ for reference_idx in ref_range:
941
+ # remove this reference index from idx_range
942
+ idx_range.pop()
943
+ # as we always measure the distance of the shorter mep_cond, save idx to store in matrix
944
+ reference_idx_backup = reference_idx
945
+ for idx in idx_range:
946
+ # print((reference_idx, idx))
947
+ idx_save = idx
948
+ # restore correct reference idx
949
+ reference_idx = reference_idx_backup
950
+
951
+ # get lengths of mep_y
952
+ len_mep_idx = mep_y_all_cond[idx].shape[0]
953
+ len_mep_ref = mep_y_all_cond[reference_idx].shape[0]
954
+
955
+ # switch ref and idx, as we want to measure from short mep_y
956
+ if len_mep_idx < len_mep_ref:
957
+ reference_idx, idx = idx, reference_idx
958
+ len_mep_idx, len_mep_ref = len_mep_ref, len_mep_idx
959
+
960
+ # and paste reference mep values. errors will be measured against this array
961
+ # create array: global e + 2* len(mep[idx])
962
+ shift_array = np.zeros(2 * len_mep_idx + len_mep_ref)
963
+ shift_array[len_mep_idx:(len_mep_idx + len_mep_ref)] = mep_y_all_cond[reference_idx]
964
+
965
+ # instead of for loop, I'll use multple slices:
966
+ # slice_indices[0] is 0-shifting
967
+ # slice_indices[1] is 1-shifting,...
968
+ # we start shifting at start_ind[reference_idx], because range left of that is only 0
969
+ # we stop shifting after len_mep_idx + e_len - stop_ind[reference_idx] times
970
+ # slice_indices.shape == (len_mep_idx + e_len - stop_ind[reference_idx], len_mep_idx)
971
+ slice_indices = np.add(np.arange(len_mep_idx),
972
+ np.arange(len_mep_idx + len_mep_ref)[:, np.newaxis])
973
+
974
+ # compute error vectorized
975
+ # the error is y-difference between mep[idx] and mep[reference].zero_padded
976
+ err = np.sqrt(np.sum((shift_array[slice_indices] - mep_y_all_cond[idx]) ** 2, axis=1))
977
+
978
+ # which shift leads to minimum error. remember that we don't start at 0-shift, so add start index
979
+ if stop_ind[idx] >= start_ind[reference_idx]:
980
+ min_err_idx = np.abs(start_ind[reference_idx] - stop_ind[idx]) - np.argmin(err)
981
+ else:
982
+ min_err_idx = np.abs(start_ind[reference_idx] - stop_ind[idx]) + np.argmin(err)
983
+
984
+ # rescale min_error_idx to real E values
985
+ t_cond[reference_idx_backup, idx_save] = min_err_idx * stepsize
986
+
987
+ # sum all errors and divide by e_mean over all conditions
988
+ congruence_factor = 1 / (np.sqrt(np.sum(np.square(t_cond) * 2)) / e_mean / n_condition / (n_condition - 1))
989
+
990
+ return congruence_factor
991
+
992
+
993
+ def extract_condition_combination(fn_config_cfg, fn_results_hdf5, conds, fn_out_prefix):
994
+ """
995
+ Extract and plot congruence factor results for specific condition combinations from permutation analysis.
996
+
997
+ Parameters
998
+ ----------
999
+ fn_config_cfg : str
1000
+ Filename of .cfg file the permutation study was cinducted with
1001
+ fn_results_hdf5 : str
1002
+ Filename of ``.hdf5`` results file generated by ``00_run_c_standard_compute_all_permutations.py``
1003
+ containing congruence factors and condition combinations.
1004
+ conds : list of str
1005
+ (n_cond) List containing condition combinations to extract and plot,
1006
+ e.g. ``['P_0', 'I_225', 'M1_0', 'I_675', 'P_225']``).
1007
+ fn_out_prefix : str
1008
+ Prefix of output filenames of *_data.xdmf, *_data.hdf5 and *_geo.hdf5.
1009
+
1010
+ Returns
1011
+ -------
1012
+ <fn_out_prefix_data.xdmf> : .xdmf file
1013
+ Output file linking *_data.hdf5 and *_geo.hdf5 file to plot in paraview.
1014
+ <fn_out_prefix_data.hdf5> : .hdf5 file
1015
+ Output .hdf5 file containing the data.
1016
+ <fn_out_prefix_geo.xdmf> : .hdf5 file
1017
+ Output .hdf5 file containing the geometry information.
1018
+ """
1019
+
1020
+ # Read config file
1021
+ with open(fn_config_cfg, 'r') as f:
1022
+ config = yaml.load(f)
1023
+
1024
+ # Initialize parameters
1025
+ ###############################################
1026
+ fn_subject = config['fn_subject']
1027
+ roi_idx = config['roi_idx']
1028
+ mesh_idx = config['mesh_idx']
1029
+ e_qoi = ['mag', 'norm', 'tan']
1030
+ n_qoi = len(e_qoi)
1031
+
1032
+ # load subject object
1033
+ subject = pynibs.load_subject(fn_subject)
1034
+ mesh_folder = subject.mesh[mesh_idx]["mesh_folder"]
1035
+
1036
+ # loading roi
1037
+ roi = pynibs.load_roi_surface_obj_from_hdf5(subject.mesh[mesh_idx]['fn_mesh_hdf5'])
1038
+
1039
+ # load results file
1040
+ c_extracted = []
1041
+
1042
+ print((" > Loading results file: {} ...".format(fn_results_hdf5)))
1043
+ with h5py.File(fn_results_hdf5) as f:
1044
+ # extract condition combination
1045
+ print(' > Loading condition labels ...')
1046
+ cond_comb = f['conds'][:]
1047
+
1048
+ print(' > Determining condition combination index ...')
1049
+ conds_idx = [idx for idx, c in enumerate(cond_comb) if set(c) == set(conds)][0]
1050
+
1051
+ print(' > Loading corresponding congruence factor results ...')
1052
+ for qoi_idx, qoi in enumerate(e_qoi):
1053
+ e_tri_idx = list(range(0, f['c'].shape[0], n_qoi))
1054
+ e_tri_idx = [mag + qoi_idx for mag in e_tri_idx]
1055
+ c_extracted.append(f['c'][e_tri_idx, conds_idx][:])
1056
+
1057
+ # map data to whole brain surface
1058
+ print(" > Mapping c-factor map to brain surface...")
1059
+ c_mapped = pynibs.mesh.transformations.map_data_to_surface(
1060
+ datasets=[c_extracted[qoi_idx][:, np.newaxis] for qoi_idx in range(n_qoi)],
1061
+ points_datasets=[roi[roi_idx].node_coord_mid] * n_qoi,
1062
+ con_datasets=[roi[roi_idx].node_number_list] * n_qoi,
1063
+ fname_fsl_gm=os.path.join(mesh_folder, subject.mesh[mesh_idx]['fn_lh_gm']),
1064
+ fname_fsl_wm=os.path.join(mesh_folder, subject.mesh[mesh_idx]['fn_lh_wm']),
1065
+ delta=subject.roi[mesh_idx][roi_idx]['delta'],
1066
+ input_data_in_center=True,
1067
+ return_data_in_center=True,
1068
+ data_substitute=-1)
1069
+
1070
+ # recreate complete midlayer surface to write in .hdf5 geo file
1071
+ points_midlayer, con_midlayer = pynibs.make_GM_WM_surface(
1072
+ gm_surf_fname=os.path.join(mesh_folder, subject.mesh[mesh_idx]['fn_lh_gm']),
1073
+ wm_surf_fname=os.path.join(mesh_folder, subject.mesh[mesh_idx]['fn_lh_wm']),
1074
+ delta=subject.roi[mesh_idx][roi_idx]['delta'],
1075
+ x_roi=None,
1076
+ y_roi=None,
1077
+ z_roi=None,
1078
+ layer=1,
1079
+ fn_mask=None)
1080
+
1081
+ # write output files
1082
+ # save .hdf5 _geo file
1083
+ print(" > Creating .hdf5 geo file of mapped and roi only data ...")
1084
+ pynibs.write_geo_hdf5_surf(out_fn=fn_out_prefix + '_geo.hdf5',
1085
+ points=points_midlayer,
1086
+ con=con_midlayer,
1087
+ replace=True,
1088
+ hdf5_path='/mesh')
1089
+
1090
+ pynibs.write_geo_hdf5_surf(out_fn=fn_out_prefix + '_geo_roi.hdf5',
1091
+ points=roi[roi_idx].node_coord_mid,
1092
+ con=roi[roi_idx].node_number_list,
1093
+ replace=True,
1094
+ hdf5_path='/mesh')
1095
+
1096
+ # save .hdf5 _data file
1097
+ print(" > Creating .hdf5 data file of mapped and roi only data ...")
1098
+ pynibs.write_data_hdf5_surf(data=c_mapped,
1099
+ data_names=['c_' + e_qoi[qoi_idx] for qoi_idx in range(n_qoi)],
1100
+ data_hdf_fn_out=fn_out_prefix + '_data.hdf5',
1101
+ geo_hdf_fn=fn_out_prefix + '_geo.hdf5',
1102
+ replace=True)
1103
+
1104
+ pynibs.write_data_hdf5_surf(data=[c_extracted[qoi_idx][:, np.newaxis] for qoi_idx in range(n_qoi)],
1105
+ data_names=['c_' + e_qoi[qoi_idx] for qoi_idx in range(n_qoi)],
1106
+ data_hdf_fn_out=fn_out_prefix + '_data_roi.hdf5',
1107
+ geo_hdf_fn=fn_out_prefix + '_geo_roi.hdf5',
1108
+ replace=True)