pyNIBS 0.2024.8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (107) hide show
  1. pyNIBS-0.2024.8.dist-info/LICENSE +623 -0
  2. pyNIBS-0.2024.8.dist-info/METADATA +723 -0
  3. pyNIBS-0.2024.8.dist-info/RECORD +107 -0
  4. pyNIBS-0.2024.8.dist-info/WHEEL +5 -0
  5. pyNIBS-0.2024.8.dist-info/top_level.txt +1 -0
  6. pynibs/__init__.py +34 -0
  7. pynibs/coil.py +1367 -0
  8. pynibs/congruence/__init__.py +15 -0
  9. pynibs/congruence/congruence.py +1108 -0
  10. pynibs/congruence/ext_metrics.py +257 -0
  11. pynibs/congruence/stimulation_threshold.py +318 -0
  12. pynibs/data/configuration_exp0.yaml +59 -0
  13. pynibs/data/configuration_linear_MEP.yaml +61 -0
  14. pynibs/data/configuration_linear_RT.yaml +61 -0
  15. pynibs/data/configuration_sigmoid4.yaml +68 -0
  16. pynibs/data/network mapping configuration/configuration guide.md +238 -0
  17. pynibs/data/network mapping configuration/configuration_TEMPLATE.yaml +42 -0
  18. pynibs/data/network mapping configuration/configuration_for_testing.yaml +43 -0
  19. pynibs/data/network mapping configuration/configuration_modelTMS.yaml +43 -0
  20. pynibs/data/network mapping configuration/configuration_reg_isi_05.yaml +43 -0
  21. pynibs/data/network mapping configuration/output_documentation.md +185 -0
  22. pynibs/data/network mapping configuration/recommendations_for_accuracy_threshold.md +77 -0
  23. pynibs/data/neuron/models/L23_PC_cADpyr_biphasic_v1.csv +1281 -0
  24. pynibs/data/neuron/models/L23_PC_cADpyr_monophasic_v1.csv +1281 -0
  25. pynibs/data/neuron/models/L4_LBC_biphasic_v1.csv +1281 -0
  26. pynibs/data/neuron/models/L4_LBC_monophasic_v1.csv +1281 -0
  27. pynibs/data/neuron/models/L4_NBC_biphasic_v1.csv +1281 -0
  28. pynibs/data/neuron/models/L4_NBC_monophasic_v1.csv +1281 -0
  29. pynibs/data/neuron/models/L4_SBC_biphasic_v1.csv +1281 -0
  30. pynibs/data/neuron/models/L4_SBC_monophasic_v1.csv +1281 -0
  31. pynibs/data/neuron/models/L5_TTPC2_cADpyr_biphasic_v1.csv +1281 -0
  32. pynibs/data/neuron/models/L5_TTPC2_cADpyr_monophasic_v1.csv +1281 -0
  33. pynibs/expio/Mep.py +1518 -0
  34. pynibs/expio/__init__.py +8 -0
  35. pynibs/expio/brainsight.py +979 -0
  36. pynibs/expio/brainvis.py +71 -0
  37. pynibs/expio/cobot.py +239 -0
  38. pynibs/expio/exp.py +1876 -0
  39. pynibs/expio/fit_funs.py +287 -0
  40. pynibs/expio/localite.py +1987 -0
  41. pynibs/expio/signal_ced.py +51 -0
  42. pynibs/expio/visor.py +624 -0
  43. pynibs/freesurfer.py +502 -0
  44. pynibs/hdf5_io/__init__.py +10 -0
  45. pynibs/hdf5_io/hdf5_io.py +1857 -0
  46. pynibs/hdf5_io/xdmf.py +1542 -0
  47. pynibs/mesh/__init__.py +3 -0
  48. pynibs/mesh/mesh_struct.py +1394 -0
  49. pynibs/mesh/transformations.py +866 -0
  50. pynibs/mesh/utils.py +1103 -0
  51. pynibs/models/_TMS.py +211 -0
  52. pynibs/models/__init__.py +0 -0
  53. pynibs/muap.py +392 -0
  54. pynibs/neuron/__init__.py +2 -0
  55. pynibs/neuron/neuron_regression.py +284 -0
  56. pynibs/neuron/util.py +58 -0
  57. pynibs/optimization/__init__.py +5 -0
  58. pynibs/optimization/multichannel.py +278 -0
  59. pynibs/optimization/opt_mep.py +152 -0
  60. pynibs/optimization/optimization.py +1445 -0
  61. pynibs/optimization/workhorses.py +698 -0
  62. pynibs/pckg/__init__.py +0 -0
  63. pynibs/pckg/biosig/biosig4c++-1.9.5.src_fixed.tar.gz +0 -0
  64. pynibs/pckg/libeep/__init__.py +0 -0
  65. pynibs/pckg/libeep/pyeep.so +0 -0
  66. pynibs/regression/__init__.py +11 -0
  67. pynibs/regression/dual_node_detection.py +2375 -0
  68. pynibs/regression/regression.py +2984 -0
  69. pynibs/regression/score_types.py +0 -0
  70. pynibs/roi/__init__.py +2 -0
  71. pynibs/roi/roi.py +895 -0
  72. pynibs/roi/roi_structs.py +1233 -0
  73. pynibs/subject.py +1009 -0
  74. pynibs/tensor_scaling.py +144 -0
  75. pynibs/tests/data/InstrumentMarker20200225163611937.xml +19 -0
  76. pynibs/tests/data/TriggerMarkers_Coil0_20200225163443682.xml +14 -0
  77. pynibs/tests/data/TriggerMarkers_Coil1_20200225170337572.xml +6373 -0
  78. pynibs/tests/data/Xdmf.dtd +89 -0
  79. pynibs/tests/data/brainsight_niiImage_nifticoord.txt +145 -0
  80. pynibs/tests/data/brainsight_niiImage_nifticoord_largefile.txt +1434 -0
  81. pynibs/tests/data/brainsight_niiImage_niifticoord_mixedtargets.txt +47 -0
  82. pynibs/tests/data/create_subject_testsub.py +332 -0
  83. pynibs/tests/data/data.hdf5 +0 -0
  84. pynibs/tests/data/geo.hdf5 +0 -0
  85. pynibs/tests/test_coil.py +474 -0
  86. pynibs/tests/test_elements2nodes.py +100 -0
  87. pynibs/tests/test_hdf5_io/test_xdmf.py +61 -0
  88. pynibs/tests/test_mesh_transformations.py +123 -0
  89. pynibs/tests/test_mesh_utils.py +143 -0
  90. pynibs/tests/test_nnav_imports.py +101 -0
  91. pynibs/tests/test_quality_measures.py +117 -0
  92. pynibs/tests/test_regressdata.py +289 -0
  93. pynibs/tests/test_roi.py +17 -0
  94. pynibs/tests/test_rotations.py +86 -0
  95. pynibs/tests/test_subject.py +71 -0
  96. pynibs/tests/test_util.py +24 -0
  97. pynibs/tms_pulse.py +34 -0
  98. pynibs/util/__init__.py +4 -0
  99. pynibs/util/dosing.py +233 -0
  100. pynibs/util/quality_measures.py +562 -0
  101. pynibs/util/rotations.py +340 -0
  102. pynibs/util/simnibs.py +763 -0
  103. pynibs/util/util.py +727 -0
  104. pynibs/visualization/__init__.py +2 -0
  105. pynibs/visualization/para.py +4372 -0
  106. pynibs/visualization/plot_2D.py +137 -0
  107. pynibs/visualization/render_3D.py +347 -0
pynibs/util/util.py ADDED
@@ -0,0 +1,727 @@
1
+ import os
2
+ import copy
3
+ import time
4
+ import h5py
5
+ import inspect
6
+ import warnings
7
+ import itertools
8
+ import subprocess
9
+ import numpy as np
10
+ from scipy.special import binom
11
+ from sklearn.neighbors import KernelDensity
12
+ import pynibs
13
+
14
+
15
+ def tal2mni(coords, direction='tal2mni', style='nonlinear'):
16
+ """
17
+ Transform Talairach coordinates into (SPM) MNI space and vice versa.
18
+
19
+ This is taken from https://imaging.mrc-cbu.cam.ac.uk/imaging/MniTalairach and
20
+ http://gibms.mc.ntu.edu.tw/bmlab/tools/data-analysis-codes/mni2tal-m/
21
+
22
+ Parameters
23
+ ----------
24
+ coords : np.ndarray or list
25
+ x,y,z coordinates.
26
+ direction : str, default: 'tal2mni
27
+ Transformation direction. One of ('tal2mni', 'mni2tal').
28
+ style : str, default: 'nonlinear'
29
+ Transformation style. One of ('linear', 'nonlinear').
30
+
31
+ Returns
32
+ -------
33
+ coords_trans : np.ndarray
34
+
35
+ """
36
+ assert direction in ['tal2mni',
37
+ 'mni2tal'], f"direction parameter '{direction}' invalid. Choose 'tal2mni' or 'mni2tal'."
38
+ assert style in ['linear', 'nonlinear'], f"style parameter '{style}' invalid. Choose 'linear' or 'nonlinear'."
39
+ if len(coords) == 3:
40
+ coords = np.hstack((coords, 1))
41
+ if style == 'linear':
42
+ mat = np.array([[0.88, 0, 0, -0.8], [0, 0.97, 0, -3.32], [0, 0.05, 0.88, -0.44], [0, 0, 0, 1]])
43
+ if direction == 'mni2tal':
44
+ return np.dot(mat, coords)[:3]
45
+ elif direction == 'tal2mni':
46
+ return np.dot(np.linalg.inv(mat), coords)[:3]
47
+ elif style == 'nonlinear':
48
+ upper_mat = np.array([[0.99, 0, 0, 0],
49
+ [0, 0.9688, 0.0460, 0],
50
+ [0, -0.0485, 0.9189, 0],
51
+ [0, 0, 0, 1]])
52
+
53
+ lower_mat = np.array([[0.99, 0, 0, 0],
54
+ [0, 0.9688, 0.042, 0],
55
+ [0, -0.0485, 0.839, 0],
56
+ [0, 0, 0, 1]])
57
+ if direction == 'mni2tal':
58
+ pass
59
+ elif direction == 'tal2mni':
60
+ upper_mat = np.linalg.inv(upper_mat)
61
+ lower_mat = np.linalg.inv(lower_mat)
62
+ if coords[2] > 0:
63
+ # above AC
64
+ return np.dot(upper_mat, coords)[:3]
65
+ else:
66
+ # below AC
67
+ return np.dot(lower_mat, coords)[:3]
68
+
69
+
70
+ def rd(array, array_ref):
71
+ """
72
+ Determine the relative difference between input data and reference data.
73
+
74
+ Parameters
75
+ ----------
76
+ array : np.ndarray
77
+ input data [ (x), y0, y1, y2 ... ].
78
+ array_ref : np.ndarray
79
+ reference data [ (x_ref), y0_ref, y1_ref, y2_ref ... ]
80
+ if array_ref is 1D, all sizes have to match.
81
+
82
+ Returns
83
+ -------
84
+ rd : ndarray of float
85
+ (array.shape[1]) Relative difference between the columns of array and array_ref.
86
+ """
87
+
88
+ return np.linalg.norm(array - array_ref) / np.linalg.norm(array_ref)
89
+
90
+
91
+ def generalized_extreme_value_distribution(x, mu, sigma, k):
92
+ """
93
+ Generalized extreme value distribution.
94
+
95
+ Parameters
96
+ ----------
97
+ x : ndarray of float
98
+ (n_x) Events.
99
+ mu : float
100
+ Mean value.
101
+ sigma : float
102
+ Standard deviation.
103
+ k : float
104
+ Shape parameter.
105
+
106
+ Returns
107
+ -------
108
+ y : ndarray of float
109
+ (n_x) Probability density of events
110
+ """
111
+ y = 1 / sigma * np.exp(-(1 + (k * (x - mu)) / sigma) ** (-1 / k)) * (1 + (k * (x - mu)) / sigma) ** (-(1 + 1 / k))
112
+
113
+ return y
114
+
115
+
116
+ def differential_evolution(fobj, bounds, mut=0.8, crossp=0.7, popsize=20, its=1000, **kwargs):
117
+ """
118
+ Differential evolution optimization algorithm
119
+
120
+ Parameters
121
+ ----------
122
+ fobj : function object
123
+ Function to optimize.
124
+ bounds : dict
125
+ Dictionary containing the bounds of the free variables to optimize.
126
+ mut : float, default: 0.8
127
+ Mutation factor.
128
+ crossp : float, default: 0.7
129
+ Cross population factor.
130
+ popsize : int, default: 20
131
+ Population size.
132
+ its : int, default: 1000
133
+ Number of iterations.
134
+ kwargs : dict
135
+ Arguments passed to fobj (constants etc...).
136
+
137
+ Returns
138
+ -------
139
+ best : dict
140
+ Dictionary containing the best values.
141
+ fitness : float
142
+ Fitness value of best solution.
143
+ """
144
+ if kwargs is None:
145
+ kwargs = dict()
146
+
147
+ fobj_args = inspect.getfullargspec(fobj).args
148
+
149
+ if "bounds" in fobj_args:
150
+ kwargs["bounds"] = bounds
151
+ params_str = list(bounds.keys())
152
+
153
+ # set up initial simulations
154
+ dimensions = len(bounds)
155
+ pop = np.random.rand(popsize, dimensions)
156
+
157
+ min_b = np.zeros(dimensions)
158
+ max_b = np.zeros(dimensions)
159
+
160
+ for i, key in enumerate(bounds):
161
+ min_b[i] = bounds[key][0]
162
+ max_b[i] = bounds[key][1]
163
+
164
+ diff = np.fabs(min_b - max_b)
165
+ pop_denorm = min_b + pop * diff
166
+
167
+ print("Initial simulations:")
168
+ print("====================")
169
+
170
+ fitness = np.zeros(len(pop_denorm))
171
+
172
+ for i, po in enumerate(pop_denorm):
173
+
174
+ for i_key, key in enumerate(params_str):
175
+ kwargs[key] = po[i_key]
176
+
177
+ fitness[i] = fobj(**kwargs)
178
+
179
+ best_idx = np.argmin(fitness)
180
+ best = pop_denorm[best_idx]
181
+
182
+ parameter_str = [f"{params_str[i_p]}={p_:.5f}" for i_p, p_ in enumerate(pop_denorm[best_idx])]
183
+ print(f"-> Fittest: {fitness[best_idx]:.3f} / " + ", ".join(parameter_str))
184
+
185
+ for i in range(its):
186
+ print(f"Iteration: {i}")
187
+ print(f"==============")
188
+
189
+ trial_denorm = []
190
+ trial = []
191
+
192
+ # create new parameter sets
193
+ for j in range(popsize):
194
+ idxs = [idx for idx in range(popsize) if idx != j]
195
+ a, b, c = pop[np.random.choice(idxs, 3, replace=False)]
196
+ mutant = np.clip(a + mut * (b - c), 0, 1)
197
+ cross_points = np.random.rand(dimensions) < crossp
198
+
199
+ if not np.any(cross_points):
200
+ cross_points[np.random.randint(0, dimensions)] = True
201
+
202
+ trial.append(np.where(cross_points, mutant, pop[j]))
203
+ trial_denorm.append(min_b + trial[j] * diff)
204
+
205
+ # run likelihood function
206
+ f = np.zeros(len(trial))
207
+
208
+ for j, tr in enumerate(trial_denorm):
209
+ for i_key, key in enumerate(params_str):
210
+ kwargs[key] = tr[i_key]
211
+ f[j] = fobj(**kwargs)
212
+
213
+ for j in range(popsize):
214
+ if f[j] < fitness[j]:
215
+ fitness[j] = copy.deepcopy(f[j])
216
+ pop[j] = trial[j]
217
+ if f[j] < fitness[best_idx]:
218
+ best_idx = j
219
+ best = trial_denorm[j]
220
+
221
+ parameter_str = [f"{params_str[i_p]}={p_:.5f}" for i_p, p_ in enumerate(best)]
222
+ print(f"-> Fittest: {fitness[best_idx]:.3f} / " + ", ".join(parameter_str))
223
+
224
+ best_dict = dict()
225
+ for i_key, key in enumerate(params_str):
226
+ best_dict[key] = best[i_key]
227
+
228
+ return best_dict, fitness[best_idx]
229
+
230
+
231
+ def sigmoid_log_p(x, p):
232
+ y = np.log10(p[0] / (1 + np.exp(-p[1] * (x - p[2]))))
233
+ return y
234
+
235
+
236
+ def likelihood_posterior(x, y, fun, bounds=None, verbose=True, normalized_params=False, **params):
237
+ """
238
+ Determines the likelihood of the data following the function "fun" assuming a two
239
+ variability source of the data pairs (x, y) using the posterior distribution.
240
+
241
+ Parameters
242
+ ----------
243
+ x : ndarray of float
244
+ (n_points) x data.
245
+ y : ndarray of float
246
+ (n_points) y data
247
+ fun : function
248
+ Function to fit the data to (e.g. sigmoid).
249
+ bounds : dict, optional
250
+ Dictionary containing the bounds of "sigma_x" and "sigma_y" and the free parameters of fun.
251
+ verbose : bool, default: True
252
+ Print function output after every calculation.
253
+ normalized_params : bool, default: False
254
+ Are the parameters passed in normalized space between [0, 1]? If so, bounds are used to
255
+ denormalize them before calculation.
256
+ **params : dict
257
+ Free parameters to optimize. Contains "sigma_x", "sigma_y", and the free parameters of fun.
258
+
259
+ Returns
260
+ -------
261
+ l : float
262
+ Negative likelihood.
263
+ """
264
+ start = time.time()
265
+
266
+ # read arguments from function
267
+ # args = inspect.getfullargspec(fun).args
268
+
269
+ # extract parameters
270
+ sigma_x = params["sigma_x"]
271
+ sigma_y = params["sigma_y"]
272
+
273
+ del params["sigma_x"], params["sigma_y"]
274
+
275
+ # denormalize parameters from [0, 1] to bounds
276
+ if normalized_params:
277
+ if bounds is None:
278
+ raise ValueError("Please provide bounds if parameters were passed normalized!")
279
+ sigma_x = sigma_x * (bounds["sigma_x"][1] - bounds["sigma_x"][0]) + bounds["sigma_x"][0]
280
+ sigma_y = sigma_y * (bounds["sigma_y"][1] - bounds["sigma_y"][0]) + bounds["sigma_y"][0]
281
+
282
+ for key in enumerate(params):
283
+ params[key] = params[key] * (bounds[key][1] - bounds[key][0]) + bounds[key][0]
284
+
285
+ if sigma_x < 0:
286
+ sigma_x = 0
287
+
288
+ if sigma_y < 0:
289
+ sigma_y = 0
290
+
291
+ # determine posterior of DVS model with test data
292
+ x_pre = np.linspace(np.min(x), np.max(x), 500000)
293
+ x_post = x_pre + np.random.normal(loc=0., scale=sigma_x, size=len(x_pre))
294
+ y_post = fun(x_post, **params) + np.random.normal(loc=0., scale=sigma_y, size=len(x_pre))
295
+
296
+ # bin data
297
+ n_bins = 50
298
+ dx_bins = (np.max(x_pre) - np.min(x_pre)) / n_bins
299
+ x_bins_loc = np.linspace(np.min(x_pre) + dx_bins / 2, np.max(x_pre) - dx_bins / 2, n_bins)
300
+
301
+ # determine probabilities of observations
302
+ kde = KernelDensity(bandwidth=0.01, kernel='gaussian')
303
+
304
+ likelihood = []
305
+
306
+ for i in range(n_bins):
307
+ mask = np.logical_and(x_pre >= (x_bins_loc[i] - dx_bins / 2), x_pre < (x_bins_loc[i] + dx_bins / 2))
308
+ mask_data = np.logical_and(x >= (x_bins_loc[i] - dx_bins / 2), x < (x_bins_loc[i] + dx_bins / 2))
309
+
310
+ if np.sum(mask_data) == 0:
311
+ continue
312
+
313
+ # determine kernel density estimate
314
+ try:
315
+ kde_bins = kde.fit(y_post[mask][:, np.newaxis])
316
+ except ValueError:
317
+ warnings.warn("kde.fit(y_post[mask][:, np.newaxis]) yield NaN ... skipping bin")
318
+ continue
319
+
320
+ # get probability densities at data
321
+ kde_y_post_bins = np.exp(kde_bins.score_samples(y[mask_data][:, np.newaxis]))
322
+
323
+ likelihood.append(kde_y_post_bins)
324
+
325
+ likelihood = np.concatenate(likelihood)
326
+
327
+ # mask out zero probabilities
328
+ likelihood[likelihood == 0] = 1e-100
329
+
330
+ # determine log likelihood
331
+ likelihood = np.sum(np.log10(likelihood))
332
+
333
+ stop = time.time()
334
+
335
+ if verbose:
336
+ parameter_str = [f"{p_}={params[p_]:.5f}" for p_ in params]
337
+ print(f"Likelihood: {likelihood:.1f} / sigma_x={sigma_x:.5f}, sigma_y={sigma_y:.5f}, " +
338
+ ", ".join(parameter_str) + f"({stop - start:.2f} sec)")
339
+
340
+ return -likelihood
341
+
342
+
343
+ def mutual_coherence(array):
344
+ """
345
+ Calculate the mutual coherence of a matrix A. It can also be referred as the cosine of the smallest angle
346
+ between two columns.
347
+
348
+ mutual_coherence = mutual_coherence(array)
349
+
350
+ Parameters
351
+ ----------
352
+ array : ndarray of float
353
+ Input matrix.
354
+
355
+ Returns
356
+ -------
357
+ mutual_coherence : float
358
+ Mutual coherence.
359
+ """
360
+
361
+ array = array / np.linalg.norm(array, axis=0)[np.newaxis, :]
362
+ t = np.matmul(array.conj().T, array)
363
+ np.fill_diagonal(t, 0.0)
364
+ mu = np.max(t)
365
+
366
+ # s = np.sqrt(np.diag(t))
367
+ # s_sqrt = np.diag(s)
368
+ # mu = np.max(1.0*(t-s_sqrt)/np.outer(s, s))
369
+
370
+ return mu
371
+
372
+
373
+ def get_cartesian_product(array_list):
374
+ """
375
+ Generate a cartesian product of input arrays (all combinations).
376
+
377
+ cartesian_product = get_cartesian_product(array_list)
378
+
379
+ Parameters
380
+ ----------
381
+ array_list : list of 1D ndarray of float
382
+ Arrays to compute the cartesian product with.
383
+
384
+ Returns
385
+ -------
386
+ cartesian_product : ndarray of float
387
+ (M, len(arrays)) Array containing the cartesian products (all combinations of input vectors).
388
+
389
+ Examples
390
+ --------
391
+ .. code-block:: python
392
+ import pygpc
393
+ out = pygpc.get_cartesian_product(([1, 2, 3], [4, 5], [6, 7]))
394
+ out
395
+ """
396
+ cartesian_product = [element for element in itertools.product(*array_list)]
397
+ return np.array(cartesian_product)
398
+
399
+
400
+ def norm_percentile(data, percentile):
401
+ """
402
+ Normalizes data to a given percentile.
403
+
404
+ Parameters
405
+ ----------
406
+ data : np.ndarray
407
+ (n_data, ) Dataset to normalize.
408
+ percentile : float
409
+ Percentile of normalization value [0 ... 100].
410
+
411
+ Returns
412
+ -------
413
+ data_norm : np.ndarray
414
+ (n_data, ) Normalized dataset.
415
+ """
416
+ return data / np.percentile(data, percentile)
417
+
418
+
419
+ def compute_chunks(seq, num):
420
+ """
421
+ Splits up a sequence _seq_ into _num_ chunks of similar size.
422
+ If len(seq) < num, (num-len(seq)) empty chunks are returned so that len(out) == num.
423
+
424
+ Parameters
425
+ ----------
426
+ seq : list of something
427
+ (n_ele) List containing data or indices, which is divided into chunks.
428
+ num : int
429
+ Number of chunks to generate.
430
+
431
+ Returns
432
+ -------
433
+ out : list of num sublists
434
+ num sub-lists of seq with each of a similar number of elements (or empty).
435
+ """
436
+ assert len(seq) > 0
437
+ assert num > 0
438
+ assert isinstance(seq, list), f"{type(seq)} can't be chunked. Provide list."
439
+ avg = len(seq) / float(num)
440
+ n_empty = 0 # if len(seg) < num, how many empty lists to append to return?
441
+
442
+ if avg < 1:
443
+ # raise ValueError("seq/num ration too small: " + str(avg))
444
+ avg = 1
445
+ n_empty = num - len(seq)
446
+
447
+ out = []
448
+ last = 0.0
449
+
450
+ while last < len(seq):
451
+ # if only one element would be left in the last run, add it to the current
452
+ if (int(last + avg) + 1) == len(seq):
453
+ last_append_idx = int(last + avg) + 1
454
+ else:
455
+ last_append_idx = int(last + avg)
456
+
457
+ out.append(seq[int(last):last_append_idx])
458
+
459
+ if (int(last + avg) + 1) == len(seq):
460
+ last += avg + 1
461
+ else:
462
+ last += avg
463
+
464
+ # append empty lists if len(seq) < num
465
+ out += [[]] * n_empty
466
+
467
+ return out
468
+
469
+
470
+ def bash(command):
471
+ """
472
+ Executes bash command and returns output message in stdout (uses os.popen).
473
+
474
+ Parameters
475
+ ----------
476
+ command : str
477
+ Bash command.
478
+
479
+ Returns
480
+ -------
481
+ output : str
482
+ Output from stdout.
483
+ error : str
484
+ Error message from stdout.
485
+ """
486
+ print(("Running " + command))
487
+ return os.popen(command).read()
488
+ # process = subprocess.Popen(command.split(), stdout=subprocess.PIPE, shell=True)
489
+ # output, error = process.communicate()
490
+ # return output, error
491
+
492
+
493
+ def bash_call(command):
494
+ """
495
+ Executes bash command and returns output message in stdout (uses subprocess.Popen).
496
+
497
+ Parameters
498
+ ----------
499
+ command : str
500
+ bash command.
501
+ """
502
+ subprocess.Popen(command, shell=True)
503
+
504
+
505
+ def invert(trans):
506
+ """
507
+ Invert rotation matrix.
508
+
509
+ Parameters
510
+ ----------
511
+ trans : np.ndarray of float
512
+ (3, 3) Rotation matrix.
513
+
514
+ Returns
515
+ -------
516
+ rot_inv : np.ndarray of float
517
+ (3, 3) Inverse rotation matrix.
518
+ """
519
+ rot = pynibs.normalize_rot(trans[:3, :3].flatten())
520
+ result = np.zeros((4, 4))
521
+ result[3, 3] = 1.0
522
+ t = -rot.T.dot(trans[:3, 3])
523
+ result[:3, :3] = rot.T
524
+ result[:3, 3] = t
525
+ return result
526
+
527
+
528
+ def list2dict(l):
529
+ """
530
+ Transform list of dicts with same keys to dict of list
531
+
532
+ Parameters
533
+ ----------
534
+ l : list of dict
535
+ List containing dictionaries with same keys.
536
+
537
+ Returns
538
+ -------
539
+ d : dict of lists
540
+ Dictionary containing the entries in a list.
541
+ """
542
+ n = len(l)
543
+ keys = l[0].keys()
544
+ d = dict()
545
+
546
+ for key in keys:
547
+ d[key] = [0 for _ in range(n)]
548
+ for i in range(n):
549
+ d[key][i] = l[i][key]
550
+
551
+ return d
552
+
553
+
554
+ def recursive_len(item):
555
+ """
556
+ Determine len of list of lists (recursively).
557
+
558
+ Parameters
559
+ ----------
560
+ item : list of list
561
+ List of list.
562
+
563
+ Returns
564
+ -------
565
+ len : int
566
+ Total length of list of lists.
567
+ """
568
+ if type(item) == list:
569
+ return sum(recursive_len(subitem) for subitem in item)
570
+ else:
571
+ return 1
572
+
573
+
574
+ def add_center(var):
575
+ """
576
+ Adds center to argument list.
577
+
578
+ Parameters
579
+ ----------
580
+ var : list of float
581
+ (2) List containing two values [f1,f2].
582
+
583
+ Returns
584
+ -------
585
+ out: list of float
586
+ (3) List containing the average value in the middle [f1, mean(f1,f2), f2].
587
+ """
588
+ return [var[0], sum(var) / 2, var[1]]
589
+
590
+
591
+ def unique_rows(a):
592
+ """
593
+ Returns the unique rows of np.array(a).
594
+
595
+ Parameters
596
+ ----------
597
+ a : np.ndarray of float
598
+ (m, n) Array to search for double row entries.
599
+
600
+ Returns
601
+ -------
602
+ a_unique : np.array
603
+ (k, n) array a with only unique rows.
604
+ """
605
+ b = np.ascontiguousarray(a).view(np.dtype((np.void, a.dtype.itemsize * a.shape[1])))
606
+ _, idx = np.unique(b, return_index=True)
607
+ return a[idx]
608
+ # alternative but ~10x slower:
609
+ # surface_points=np.vstack({tuple(row) for row in surface_points})
610
+
611
+
612
+ def calc_n_network_combs(n_e, n_c, n_i):
613
+ """
614
+ Determine number of combinations if all conditions may be replaced between N_i elements (mixed interaction).
615
+
616
+ Parameters
617
+ ----------
618
+ n_e : int
619
+ Number of elements in the ROI.
620
+ n_c : int
621
+ Number of conditions (I/O curves).
622
+ n_i : int
623
+ Number of maximum interactions.
624
+
625
+ Returns
626
+ -------
627
+ n_comb : int
628
+ Number of combinations.
629
+ """
630
+ return binom(n_e, n_i) * np.sum([((n_i - 1) ** k) * binom(n_c, k) for k in range(1, n_c)])
631
+
632
+
633
+ def load_muaps(fn_muaps, fs=1e6, fs_downsample=1e5):
634
+ # load MUAPs and downsample
635
+ with h5py.File(fn_muaps, "r") as f:
636
+ muaps_orig = f["MUAPShapes"][:]
637
+ N_MU = muaps_orig.shape[1]
638
+
639
+ t_muap_orig = np.linspace(0, 1 / fs * (muaps_orig.shape[0] - 1), muaps_orig.shape[0])
640
+ t_muap = np.linspace(0, t_muap_orig[-1], int(t_muap_orig[-1] * fs_downsample + 1))
641
+ muaps = np.zeros((len(t_muap), N_MU))
642
+
643
+ for i in range(N_MU):
644
+ muaps[:, i] = np.interp(t_muap, t_muap_orig, muaps_orig[:, i])
645
+
646
+ return muaps, t_muap
647
+
648
+
649
+ def cross_product(A, B):
650
+ """
651
+ Evaluates the cross product between the vector pairs in a and b using pure Python.
652
+
653
+ Parameters
654
+ ----------
655
+ A : np.ndarray of float
656
+ (2, (N, 3))
657
+ B : np.ndarray of float
658
+ (2, (N, 3)) Input vectors, the cross product is evaluated between.
659
+
660
+ Returns
661
+ -------
662
+ c : np.ndarray of float
663
+ (N, 3) Cross product between vector pairs in a and b.
664
+ """
665
+ c1 = np.multiply(A[:, 1], B[:, 2]) - np.multiply(A[:, 2], B[:, 1])
666
+ c2 = np.multiply(A[:, 2], B[:, 0]) - np.multiply(A[:, 0], B[:, 2])
667
+ c3 = np.multiply(A[:, 0], B[:, 1]) - np.multiply(A[:, 1], B[:, 0])
668
+ # C=np.array(np.multiply(A[:, 1], B[:, 2]) - np.multiply(A[:, 2], B[:, 1]),
669
+ # np.multiply(A[:, 2], B[:, 0]) - np.multiply(A[:, 0], B[:, 2]),
670
+ # np.multiply(A[:, 0], B[:, 1]) - np.multiply(A[:, 1], B[:, 0]))
671
+ return np.vstack([c1, c2, c3]).transpose()
672
+
673
+
674
+ def cross_product_einsum2(a, b):
675
+ """
676
+ Evaluates the cross product between the vector pairs in a and b using the double Einstein sum.
677
+
678
+ Parameters
679
+ ----------
680
+ a : np.ndarray of float
681
+ (2, (N, 3))
682
+ b : np.ndarray of float
683
+ (2, (N, 3)) Input vectors, the cross product is evaluated between.
684
+
685
+ Returns
686
+ -------
687
+ c : np.ndarray of float
688
+ (N, 3) Cross product between vector pairs in a and b.
689
+ """
690
+ eijk = np.zeros((3, 3, 3))
691
+ eijk[0, 1, 2] = eijk[1, 2, 0] = eijk[2, 0, 1] = 1
692
+ eijk[0, 2, 1] = eijk[2, 1, 0] = eijk[1, 0, 2] = -1
693
+
694
+ return np.einsum('iak,ak->ai', np.einsum('ijk,aj->iak', eijk, a), b)
695
+
696
+
697
+ def intersection_vec_plan(ray_dir, ray_origin, plane_n, plane_p, eps=1e-6):
698
+ """
699
+ Computes intersection between vector ('ray') and plane.
700
+
701
+ Parameters
702
+ ----------
703
+ ray_dir : np.ndarray
704
+ (3,) (Rotated) vector direction.
705
+ ray_origin : np.ndarray
706
+ (3,) Any point on the ray.
707
+ plane_n : np.ndarray
708
+ (3,) Plane normal.
709
+ plane_p : np.ndarray
710
+ (3,) Any point on the plane.
711
+ eps : float, default=1e-6
712
+ Resolution.
713
+
714
+ Returns
715
+ -------
716
+ inters : np.ndarray
717
+ (3,) Intersection location. (np.inf, np.inf, np.inf) if no intersection.
718
+ """
719
+ ndotu = plane_n.dot(ray_dir)
720
+
721
+ if abs(ndotu) < eps:
722
+ return np.array([np.inf, np.inf, np.inf])
723
+ w = ray_origin - plane_p
724
+ si = -plane_n.dot(w) / ndotu
725
+ intersec = w + si * ray_dir + plane_p
726
+
727
+ return intersec