braindecode 0.8__py3-none-any.whl → 1.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of braindecode might be problematic. Click here for more details.
- braindecode/__init__.py +1 -2
- braindecode/augmentation/__init__.py +50 -0
- braindecode/augmentation/base.py +222 -0
- braindecode/augmentation/functional.py +1096 -0
- braindecode/augmentation/transforms.py +1274 -0
- braindecode/classifier.py +26 -24
- braindecode/datasets/__init__.py +34 -0
- braindecode/datasets/base.py +840 -0
- braindecode/datasets/bbci.py +694 -0
- braindecode/datasets/bcicomp.py +194 -0
- braindecode/datasets/bids.py +245 -0
- braindecode/datasets/mne.py +172 -0
- braindecode/datasets/moabb.py +209 -0
- braindecode/datasets/nmt.py +311 -0
- braindecode/datasets/sleep_physio_challe_18.py +412 -0
- braindecode/datasets/sleep_physionet.py +125 -0
- braindecode/datasets/tuh.py +588 -0
- braindecode/datasets/xy.py +95 -0
- braindecode/datautil/__init__.py +49 -0
- braindecode/datautil/serialization.py +342 -0
- braindecode/datautil/util.py +41 -0
- braindecode/eegneuralnet.py +63 -47
- braindecode/functional/__init__.py +10 -0
- braindecode/functional/functions.py +251 -0
- braindecode/functional/initialization.py +47 -0
- braindecode/models/__init__.py +52 -0
- braindecode/models/atcnet.py +652 -0
- braindecode/models/attentionbasenet.py +550 -0
- braindecode/models/base.py +296 -0
- braindecode/models/biot.py +483 -0
- braindecode/models/contrawr.py +296 -0
- braindecode/models/ctnet.py +450 -0
- braindecode/models/deep4.py +322 -0
- braindecode/models/deepsleepnet.py +295 -0
- braindecode/models/eegconformer.py +372 -0
- braindecode/models/eeginception_erp.py +304 -0
- braindecode/models/eeginception_mi.py +371 -0
- braindecode/models/eegitnet.py +301 -0
- braindecode/models/eegminer.py +255 -0
- braindecode/models/eegnet.py +473 -0
- braindecode/models/eegnex.py +247 -0
- braindecode/models/eegresnet.py +362 -0
- braindecode/models/eegsimpleconv.py +199 -0
- braindecode/models/eegtcnet.py +335 -0
- braindecode/models/fbcnet.py +221 -0
- braindecode/models/fblightconvnet.py +313 -0
- braindecode/models/fbmsnet.py +325 -0
- braindecode/models/hybrid.py +126 -0
- braindecode/models/ifnet.py +441 -0
- braindecode/models/labram.py +1166 -0
- braindecode/models/msvtnet.py +375 -0
- braindecode/models/sccnet.py +182 -0
- braindecode/models/shallow_fbcsp.py +208 -0
- braindecode/models/signal_jepa.py +1012 -0
- braindecode/models/sinc_shallow.py +337 -0
- braindecode/models/sleep_stager_blanco_2020.py +167 -0
- braindecode/models/sleep_stager_chambon_2018.py +157 -0
- braindecode/models/sleep_stager_eldele_2021.py +536 -0
- braindecode/models/sparcnet.py +378 -0
- braindecode/models/summary.csv +41 -0
- braindecode/models/syncnet.py +232 -0
- braindecode/models/tcn.py +273 -0
- braindecode/models/tidnet.py +395 -0
- braindecode/models/tsinception.py +258 -0
- braindecode/models/usleep.py +340 -0
- braindecode/models/util.py +133 -0
- braindecode/modules/__init__.py +38 -0
- braindecode/modules/activation.py +60 -0
- braindecode/modules/attention.py +757 -0
- braindecode/modules/blocks.py +108 -0
- braindecode/modules/convolution.py +274 -0
- braindecode/modules/filter.py +632 -0
- braindecode/modules/layers.py +133 -0
- braindecode/modules/linear.py +50 -0
- braindecode/modules/parametrization.py +38 -0
- braindecode/modules/stats.py +77 -0
- braindecode/modules/util.py +77 -0
- braindecode/modules/wrapper.py +75 -0
- braindecode/preprocessing/__init__.py +37 -0
- braindecode/preprocessing/mne_preprocess.py +77 -0
- braindecode/preprocessing/preprocess.py +478 -0
- braindecode/preprocessing/windowers.py +1031 -0
- braindecode/regressor.py +23 -12
- braindecode/samplers/__init__.py +18 -0
- braindecode/samplers/base.py +401 -0
- braindecode/samplers/ssl.py +263 -0
- braindecode/training/__init__.py +23 -0
- braindecode/training/callbacks.py +23 -0
- braindecode/training/losses.py +105 -0
- braindecode/training/scoring.py +483 -0
- braindecode/util.py +55 -59
- braindecode/version.py +1 -1
- braindecode/visualization/__init__.py +8 -0
- braindecode/visualization/confusion_matrices.py +289 -0
- braindecode/visualization/gradients.py +57 -0
- {braindecode-0.8.dist-info → braindecode-1.0.0.dist-info}/METADATA +39 -55
- braindecode-1.0.0.dist-info/RECORD +101 -0
- {braindecode-0.8.dist-info → braindecode-1.0.0.dist-info}/WHEEL +1 -1
- {braindecode-0.8.dist-info → braindecode-1.0.0.dist-info/licenses}/LICENSE.txt +1 -1
- braindecode-1.0.0.dist-info/licenses/NOTICE.txt +20 -0
- braindecode-0.8.dist-info/RECORD +0 -11
- {braindecode-0.8.dist-info → braindecode-1.0.0.dist-info}/top_level.txt +0 -0
braindecode/version.py
CHANGED
|
@@ -1 +1 @@
|
|
|
1
|
-
__version__ = "0.
|
|
1
|
+
__version__ = "1.0.0"
|
|
@@ -0,0 +1,289 @@
|
|
|
1
|
+
# Authors: Robin Schirrmeister <robintibor@gmail.com>
|
|
2
|
+
#
|
|
3
|
+
# License: BSD (3-clause)
|
|
4
|
+
|
|
5
|
+
import matplotlib.pyplot as plt
|
|
6
|
+
import numpy as np
|
|
7
|
+
from matplotlib import cm
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
def plot_confusion_matrix(
|
|
11
|
+
confusion_mat,
|
|
12
|
+
class_names=None,
|
|
13
|
+
figsize=None,
|
|
14
|
+
colormap=cm.bwr,
|
|
15
|
+
textcolor="black",
|
|
16
|
+
vmin=None,
|
|
17
|
+
vmax=None,
|
|
18
|
+
fontweight="normal",
|
|
19
|
+
rotate_row_labels=90,
|
|
20
|
+
rotate_col_labels=0,
|
|
21
|
+
with_f1_score=False,
|
|
22
|
+
norm_axes=(0, 1),
|
|
23
|
+
rotate_precision=False,
|
|
24
|
+
class_names_fontsize=12,
|
|
25
|
+
):
|
|
26
|
+
"""
|
|
27
|
+
|
|
28
|
+
Generates a confusion matrix with additional precision and sensitivity metrics as in [1]_.
|
|
29
|
+
|
|
30
|
+
Parameters
|
|
31
|
+
----------
|
|
32
|
+
confusion_mat: 2d numpy array
|
|
33
|
+
A confusion matrix, e.g. sklearn confusion matrix:
|
|
34
|
+
https://scikit-learn.org/stable/modules/generated/sklearn.metrics.confusion_matrix.html
|
|
35
|
+
class_names: array, optional
|
|
36
|
+
List of classes/targets.
|
|
37
|
+
figsize: tuple, optional
|
|
38
|
+
Size of the generated confusion matrix figure.
|
|
39
|
+
colormap: matplotlib cm colormap, optional
|
|
40
|
+
textcolor: str, optional
|
|
41
|
+
Color of the text in the figure.
|
|
42
|
+
vmin, vmax: float, optional
|
|
43
|
+
The data range that the colormap covers.
|
|
44
|
+
fontweight: str, optional
|
|
45
|
+
Weight of the font in the figure:
|
|
46
|
+
[ 'normal' | 'bold' | 'heavy' | 'light' | 'ultrabold' | 'ultralight']
|
|
47
|
+
rotate_row_labels: int, optional
|
|
48
|
+
The rotation angle of the row labels
|
|
49
|
+
rotate_col_labels: int, optional
|
|
50
|
+
The rotation angle of the column labels
|
|
51
|
+
with_f1_score: bool, optional
|
|
52
|
+
norm_axes: tuple, optional
|
|
53
|
+
rotate_precision: bool, optional
|
|
54
|
+
class_names_fontsize: int, optional
|
|
55
|
+
|
|
56
|
+
Returns
|
|
57
|
+
-------
|
|
58
|
+
fig: matplotlib figure
|
|
59
|
+
|
|
60
|
+
References
|
|
61
|
+
----------
|
|
62
|
+
|
|
63
|
+
.. [1] Schirrmeister, R. T., Springenberg, J. T., Fiederer, L. D. J.,
|
|
64
|
+
Glasstetter, M., Eggensperger, K., Tangermann, M., Hutter, F. & Ball, T. (2017).
|
|
65
|
+
Deep learning with convolutional neural networks for EEG decoding and
|
|
66
|
+
visualization.
|
|
67
|
+
Human Brain Mapping , Aug. 2017. Online: http://dx.doi.org/10.1002/hbm.23730
|
|
68
|
+
"""
|
|
69
|
+
# transpose to get confusion matrix same way as matlab
|
|
70
|
+
confusion_mat = confusion_mat.T
|
|
71
|
+
n_classes = confusion_mat.shape[0]
|
|
72
|
+
if class_names is None:
|
|
73
|
+
class_names = [str(i_class + 1) for i_class in range(n_classes)]
|
|
74
|
+
|
|
75
|
+
# norm by all targets
|
|
76
|
+
normed_conf_mat = confusion_mat / np.float32(
|
|
77
|
+
np.sum(confusion_mat, axis=norm_axes, keepdims=True)
|
|
78
|
+
)
|
|
79
|
+
|
|
80
|
+
fig = plt.figure(figsize=figsize)
|
|
81
|
+
plt.clf()
|
|
82
|
+
ax = fig.add_subplot(111)
|
|
83
|
+
ax.set_aspect(1)
|
|
84
|
+
if vmin is None:
|
|
85
|
+
vmin = np.min(normed_conf_mat)
|
|
86
|
+
if vmax is None:
|
|
87
|
+
vmax = np.max(normed_conf_mat)
|
|
88
|
+
|
|
89
|
+
# see http://stackoverflow.com/a/31397438/1469195
|
|
90
|
+
# brighten so that black text remains readable
|
|
91
|
+
# used alpha=0.6 before
|
|
92
|
+
def _brighten(
|
|
93
|
+
x,
|
|
94
|
+
):
|
|
95
|
+
brightened_x = 1 - ((1 - np.array(x)) * 0.4)
|
|
96
|
+
return brightened_x
|
|
97
|
+
|
|
98
|
+
brightened_cmap = _cmap_map(_brighten, colormap) # colormap #
|
|
99
|
+
ax.imshow(
|
|
100
|
+
np.array(normed_conf_mat),
|
|
101
|
+
cmap=brightened_cmap,
|
|
102
|
+
interpolation="nearest",
|
|
103
|
+
vmin=vmin,
|
|
104
|
+
vmax=vmax,
|
|
105
|
+
)
|
|
106
|
+
|
|
107
|
+
# make space for precision and sensitivity
|
|
108
|
+
plt.xlim(-0.5, normed_conf_mat.shape[0] + 0.5)
|
|
109
|
+
plt.ylim(normed_conf_mat.shape[1] + 0.5, -0.5)
|
|
110
|
+
width = len(confusion_mat)
|
|
111
|
+
height = len(confusion_mat[0])
|
|
112
|
+
for x in range(width):
|
|
113
|
+
for y in range(height):
|
|
114
|
+
if x == y:
|
|
115
|
+
this_font_weight = "bold"
|
|
116
|
+
else:
|
|
117
|
+
this_font_weight = fontweight
|
|
118
|
+
annotate_str = "{:d}".format(confusion_mat[x][y])
|
|
119
|
+
annotate_str += "\n"
|
|
120
|
+
ax.annotate(
|
|
121
|
+
annotate_str.format(confusion_mat[x][y]),
|
|
122
|
+
xy=(y, x),
|
|
123
|
+
horizontalalignment="center",
|
|
124
|
+
verticalalignment="center",
|
|
125
|
+
fontsize=12,
|
|
126
|
+
color=textcolor,
|
|
127
|
+
fontweight=this_font_weight,
|
|
128
|
+
)
|
|
129
|
+
if x != y or (not with_f1_score):
|
|
130
|
+
ax.annotate(
|
|
131
|
+
"\n\n{:4.1f}%".format(normed_conf_mat[x][y] * 100),
|
|
132
|
+
xy=(y, x),
|
|
133
|
+
horizontalalignment="center",
|
|
134
|
+
verticalalignment="center",
|
|
135
|
+
fontsize=10,
|
|
136
|
+
color=textcolor,
|
|
137
|
+
fontweight=this_font_weight,
|
|
138
|
+
)
|
|
139
|
+
else:
|
|
140
|
+
assert x == y
|
|
141
|
+
precision = confusion_mat[x][x] / float(np.sum(confusion_mat[x, :]))
|
|
142
|
+
sensitivity = confusion_mat[x][x] / float(np.sum(confusion_mat[:, y]))
|
|
143
|
+
f1_score = 2 * precision * sensitivity / (precision + sensitivity)
|
|
144
|
+
|
|
145
|
+
ax.annotate(
|
|
146
|
+
"\n{:4.1f}%\n{:4.1f}% (F)".format(
|
|
147
|
+
(confusion_mat[x][y] / float(np.sum(confusion_mat))) * 100,
|
|
148
|
+
f1_score * 100,
|
|
149
|
+
),
|
|
150
|
+
xy=(y, x + 0.1),
|
|
151
|
+
horizontalalignment="center",
|
|
152
|
+
verticalalignment="center",
|
|
153
|
+
fontsize=10,
|
|
154
|
+
color=textcolor,
|
|
155
|
+
fontweight=this_font_weight,
|
|
156
|
+
)
|
|
157
|
+
|
|
158
|
+
# Add values for target correctness etc.
|
|
159
|
+
for x in range(width):
|
|
160
|
+
y = len(confusion_mat)
|
|
161
|
+
if float(np.sum(confusion_mat[x, :])) == 0:
|
|
162
|
+
annotate_str = "-"
|
|
163
|
+
else:
|
|
164
|
+
correctness = confusion_mat[x][x] / float(np.sum(confusion_mat[x, :]))
|
|
165
|
+
annotate_str = ""
|
|
166
|
+
annotate_str += "\n{:5.2f}%".format(correctness * 100)
|
|
167
|
+
ax.annotate(
|
|
168
|
+
annotate_str,
|
|
169
|
+
xy=(y, x),
|
|
170
|
+
horizontalalignment="center",
|
|
171
|
+
verticalalignment="center",
|
|
172
|
+
fontsize=12,
|
|
173
|
+
)
|
|
174
|
+
|
|
175
|
+
for y in range(height):
|
|
176
|
+
x = len(confusion_mat)
|
|
177
|
+
if float(np.sum(confusion_mat[:, y])) == 0:
|
|
178
|
+
annotate_str = "-"
|
|
179
|
+
else:
|
|
180
|
+
correctness = confusion_mat[y][y] / float(np.sum(confusion_mat[:, y]))
|
|
181
|
+
annotate_str = ""
|
|
182
|
+
annotate_str += "\n{:5.2f}%".format(correctness * 100)
|
|
183
|
+
ax.annotate(
|
|
184
|
+
annotate_str,
|
|
185
|
+
xy=(y, x),
|
|
186
|
+
horizontalalignment="center",
|
|
187
|
+
verticalalignment="center",
|
|
188
|
+
fontsize=12,
|
|
189
|
+
)
|
|
190
|
+
|
|
191
|
+
overall_correctness = np.sum(np.diag(confusion_mat)) / np.sum(confusion_mat).astype(
|
|
192
|
+
float
|
|
193
|
+
)
|
|
194
|
+
ax.annotate(
|
|
195
|
+
"{:5.2f}%".format(overall_correctness * 100),
|
|
196
|
+
xy=(len(confusion_mat), len(confusion_mat)),
|
|
197
|
+
horizontalalignment="center",
|
|
198
|
+
verticalalignment="center",
|
|
199
|
+
fontsize=12,
|
|
200
|
+
fontweight="bold",
|
|
201
|
+
)
|
|
202
|
+
|
|
203
|
+
plt.xticks(
|
|
204
|
+
range(width),
|
|
205
|
+
class_names,
|
|
206
|
+
fontsize=class_names_fontsize,
|
|
207
|
+
rotation=rotate_col_labels,
|
|
208
|
+
)
|
|
209
|
+
plt.yticks(
|
|
210
|
+
np.arange(0, height),
|
|
211
|
+
class_names,
|
|
212
|
+
va="center",
|
|
213
|
+
fontsize=class_names_fontsize,
|
|
214
|
+
rotation=rotate_row_labels,
|
|
215
|
+
)
|
|
216
|
+
plt.grid(False)
|
|
217
|
+
plt.ylabel("Predictions", fontsize=15)
|
|
218
|
+
plt.xlabel("Targets", fontsize=15)
|
|
219
|
+
|
|
220
|
+
# n classes is also shape of matrix/size
|
|
221
|
+
ax.text(-1.2, n_classes + 0.2, "Recall", ha="center", va="center", fontsize=13)
|
|
222
|
+
if rotate_precision:
|
|
223
|
+
rotation = 90
|
|
224
|
+
x_pos = -1.1
|
|
225
|
+
va = "center"
|
|
226
|
+
else:
|
|
227
|
+
rotation = 0
|
|
228
|
+
x_pos = -0.8
|
|
229
|
+
va = "top"
|
|
230
|
+
ax.text(
|
|
231
|
+
n_classes,
|
|
232
|
+
x_pos,
|
|
233
|
+
"Precision",
|
|
234
|
+
ha="center",
|
|
235
|
+
va=va,
|
|
236
|
+
rotation=rotation, # 270,
|
|
237
|
+
fontsize=13,
|
|
238
|
+
)
|
|
239
|
+
|
|
240
|
+
return fig
|
|
241
|
+
|
|
242
|
+
|
|
243
|
+
# see http://stackoverflow.com/a/31397438/1469195
|
|
244
|
+
def _cmap_map(function, cmap, name="colormap_mod", N=None, gamma=None):
|
|
245
|
+
"""
|
|
246
|
+
Modify a colormap using `function` which must operate on 3-element
|
|
247
|
+
arrays of [r, g, b] values.
|
|
248
|
+
|
|
249
|
+
You may specify the number of colors, `N`, and the opacity, `gamma`,
|
|
250
|
+
value of the returned colormap. These values default to the ones in
|
|
251
|
+
the input `cmap`.
|
|
252
|
+
|
|
253
|
+
You may also specify a `name` for the colormap, so that it can be
|
|
254
|
+
loaded using plt.get_cmap(name).
|
|
255
|
+
"""
|
|
256
|
+
from matplotlib.colors import LinearSegmentedColormap as lsc
|
|
257
|
+
|
|
258
|
+
if N is None:
|
|
259
|
+
N = cmap.N
|
|
260
|
+
if gamma is None:
|
|
261
|
+
gamma = cmap._gamma
|
|
262
|
+
cdict = cmap._segmentdata
|
|
263
|
+
# Cast the steps into lists:
|
|
264
|
+
step_dict = {key: list(map(lambda x: x[0], cdict[key])) for key in cdict}
|
|
265
|
+
# Now get the unique steps (first column of the arrays):
|
|
266
|
+
step_dicts = np.array(list(step_dict.values()))
|
|
267
|
+
step_list = np.unique(step_dicts)
|
|
268
|
+
# 'y0', 'y1' are as defined in LinearSegmentedColormap docstring:
|
|
269
|
+
y0 = cmap(step_list)[:, :3]
|
|
270
|
+
y1 = y0.copy()[:, :3]
|
|
271
|
+
# Go back to catch the discontinuities, and place them into y0, y1
|
|
272
|
+
for iclr, key in enumerate(["red", "green", "blue"]):
|
|
273
|
+
for istp, step in enumerate(step_list):
|
|
274
|
+
try:
|
|
275
|
+
ind = step_dict[key].index(step)
|
|
276
|
+
except ValueError:
|
|
277
|
+
# This step is not in this color
|
|
278
|
+
continue
|
|
279
|
+
y0[istp, iclr] = cdict[key][ind][1]
|
|
280
|
+
y1[istp, iclr] = cdict[key][ind][2]
|
|
281
|
+
# Map the colors to their new values:
|
|
282
|
+
y0 = np.array(list(map(function, y0)))
|
|
283
|
+
y1 = np.array(list(map(function, y1)))
|
|
284
|
+
# Build the new colormap (overwriting step_dict):
|
|
285
|
+
for iclr, clr in enumerate(["red", "green", "blue"]):
|
|
286
|
+
step_dict[clr] = np.vstack((step_list, y0[:, iclr], y1[:, iclr])).T
|
|
287
|
+
# Remove alpha, otherwise crashes...
|
|
288
|
+
step_dict.pop("alpha", None)
|
|
289
|
+
return lsc(name, step_dict, N=N, gamma=gamma)
|
|
@@ -0,0 +1,57 @@
|
|
|
1
|
+
# Authors: Robin Schirrmeister <robintibor@gmail.com>
|
|
2
|
+
#
|
|
3
|
+
# License: BSD (3-clause)
|
|
4
|
+
|
|
5
|
+
import numpy as np
|
|
6
|
+
import torch
|
|
7
|
+
from skorch.utils import to_numpy, to_tensor
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
def compute_amplitude_gradients(model, dataset, batch_size):
|
|
11
|
+
loader = torch.utils.data.DataLoader(
|
|
12
|
+
dataset, batch_size=batch_size, drop_last=False, shuffle=False
|
|
13
|
+
)
|
|
14
|
+
all_amp_grads = []
|
|
15
|
+
for batch_X, _, _ in loader:
|
|
16
|
+
this_amp_grads = compute_amplitude_gradients_for_X(
|
|
17
|
+
model,
|
|
18
|
+
batch_X,
|
|
19
|
+
)
|
|
20
|
+
all_amp_grads.append(this_amp_grads)
|
|
21
|
+
all_amp_grads = np.concatenate(all_amp_grads, axis=1)
|
|
22
|
+
return all_amp_grads
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
def compute_amplitude_gradients_for_X(model, X):
|
|
26
|
+
device = next(model.parameters()).device
|
|
27
|
+
ffted = np.fft.rfft(X, axis=2)
|
|
28
|
+
amps = np.abs(ffted)
|
|
29
|
+
phases = np.angle(ffted)
|
|
30
|
+
amps_th = to_tensor(amps.astype(np.float32), device=device).requires_grad_(True)
|
|
31
|
+
phases_th = to_tensor(phases.astype(np.float32), device=device).requires_grad_(True)
|
|
32
|
+
|
|
33
|
+
fft_coefs = amps_th.unsqueeze(-1) * torch.stack(
|
|
34
|
+
(torch.cos(phases_th), torch.sin(phases_th)), dim=-1
|
|
35
|
+
)
|
|
36
|
+
fft_coefs = fft_coefs.squeeze(3)
|
|
37
|
+
|
|
38
|
+
try:
|
|
39
|
+
complex_fft_coefs = torch.view_as_complex(fft_coefs)
|
|
40
|
+
iffted = torch.fft.irfft(complex_fft_coefs, n=X.shape[2], dim=2)
|
|
41
|
+
except AttributeError:
|
|
42
|
+
iffted = torch.irfft( # Deprecated since 1.7
|
|
43
|
+
fft_coefs, signal_ndim=1, signal_sizes=(X.shape[2],)
|
|
44
|
+
)
|
|
45
|
+
|
|
46
|
+
outs = model(iffted)
|
|
47
|
+
|
|
48
|
+
n_filters = outs.shape[1]
|
|
49
|
+
amp_grads_per_filter = np.full((n_filters,) + ffted.shape, np.nan, dtype=np.float32)
|
|
50
|
+
for i_filter in range(n_filters):
|
|
51
|
+
mean_out = torch.mean(outs[:, i_filter])
|
|
52
|
+
mean_out.backward(retain_graph=True)
|
|
53
|
+
amp_grads = to_numpy(amps_th.grad.clone())
|
|
54
|
+
amp_grads_per_filter[i_filter] = amp_grads
|
|
55
|
+
amps_th.grad.zero_()
|
|
56
|
+
assert not np.any(np.isnan(amp_grads_per_filter))
|
|
57
|
+
return amp_grads_per_filter
|
|
@@ -1,6 +1,6 @@
|
|
|
1
|
-
Metadata-Version: 2.
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
2
|
Name: braindecode
|
|
3
|
-
Version: 0.
|
|
3
|
+
Version: 1.0.0
|
|
4
4
|
Summary: Deep learning software to decode EEG, ECG or MEG signals
|
|
5
5
|
Author-email: Robin Tibor Schirrmeister <robintibor@gmail.com>
|
|
6
6
|
Maintainer-email: Alexandre Gramfort <agramfort@meta.com>, Bruno Aristimunha Pinto <b.aristimunha@gmail.com>, Robin Tibor Schirrmeister <robintibor@gmail.com>
|
|
@@ -15,14 +15,15 @@ Classifier: Intended Audience :: Science/Research
|
|
|
15
15
|
Classifier: Topic :: Software Development :: Build Tools
|
|
16
16
|
Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
|
|
17
17
|
Classifier: License :: OSI Approved :: BSD License
|
|
18
|
-
Classifier: Programming Language :: Python :: 3.8
|
|
19
|
-
Classifier: Programming Language :: Python :: 3.9
|
|
20
18
|
Classifier: Programming Language :: Python :: 3.10
|
|
21
19
|
Classifier: Programming Language :: Python :: 3.11
|
|
22
|
-
|
|
20
|
+
Classifier: Programming Language :: Python :: 3.12
|
|
21
|
+
Requires-Python: >3.9
|
|
23
22
|
Description-Content-Type: text/x-rst
|
|
24
23
|
License-File: LICENSE.txt
|
|
25
|
-
|
|
24
|
+
License-File: NOTICE.txt
|
|
25
|
+
Requires-Dist: mne>=1.10.0
|
|
26
|
+
Requires-Dist: mne_bids>=0.16
|
|
26
27
|
Requires-Dist: numpy
|
|
27
28
|
Requires-Dist: pandas
|
|
28
29
|
Requires-Dist: scipy
|
|
@@ -30,29 +31,36 @@ Requires-Dist: matplotlib
|
|
|
30
31
|
Requires-Dist: h5py
|
|
31
32
|
Requires-Dist: skorch
|
|
32
33
|
Requires-Dist: torch
|
|
34
|
+
Requires-Dist: torchaudio
|
|
33
35
|
Requires-Dist: einops
|
|
34
36
|
Requires-Dist: joblib
|
|
35
37
|
Requires-Dist: torchinfo
|
|
36
|
-
Requires-Dist:
|
|
37
|
-
|
|
38
|
-
Requires-Dist:
|
|
39
|
-
Requires-Dist:
|
|
40
|
-
Requires-Dist: pydata-sphinx-theme ; extra == 'docs'
|
|
41
|
-
Requires-Dist: numpydoc ; extra == 'docs'
|
|
42
|
-
Requires-Dist: memory-profiler ; extra == 'docs'
|
|
43
|
-
Requires-Dist: pillow ; extra == 'docs'
|
|
44
|
-
Requires-Dist: ipython ; extra == 'docs'
|
|
45
|
-
Requires-Dist: sphinx-design ; extra == 'docs'
|
|
46
|
-
Requires-Dist: lightning ; extra == 'docs'
|
|
47
|
-
Requires-Dist: seaborn ; extra == 'docs'
|
|
48
|
-
Requires-Dist: pre-commit ; extra == 'docs'
|
|
38
|
+
Requires-Dist: wfdb
|
|
39
|
+
Requires-Dist: h5py
|
|
40
|
+
Requires-Dist: linear_attention_transformer
|
|
41
|
+
Requires-Dist: docstring_inheritance
|
|
49
42
|
Provides-Extra: moabb
|
|
50
|
-
Requires-Dist: moabb
|
|
43
|
+
Requires-Dist: moabb>=1.2.0; extra == "moabb"
|
|
51
44
|
Provides-Extra: tests
|
|
52
|
-
Requires-Dist: pytest
|
|
53
|
-
Requires-Dist: pytest-cov
|
|
54
|
-
Requires-Dist: codecov
|
|
55
|
-
Requires-Dist:
|
|
45
|
+
Requires-Dist: pytest; extra == "tests"
|
|
46
|
+
Requires-Dist: pytest-cov; extra == "tests"
|
|
47
|
+
Requires-Dist: codecov; extra == "tests"
|
|
48
|
+
Requires-Dist: pytest_cases; extra == "tests"
|
|
49
|
+
Requires-Dist: mypy; extra == "tests"
|
|
50
|
+
Provides-Extra: docs
|
|
51
|
+
Requires-Dist: sphinx_gallery; extra == "docs"
|
|
52
|
+
Requires-Dist: sphinx_rtd_theme; extra == "docs"
|
|
53
|
+
Requires-Dist: pydata_sphinx_theme; extra == "docs"
|
|
54
|
+
Requires-Dist: numpydoc; extra == "docs"
|
|
55
|
+
Requires-Dist: memory_profiler; extra == "docs"
|
|
56
|
+
Requires-Dist: pillow; extra == "docs"
|
|
57
|
+
Requires-Dist: ipython; extra == "docs"
|
|
58
|
+
Requires-Dist: sphinx_design; extra == "docs"
|
|
59
|
+
Requires-Dist: lightning; extra == "docs"
|
|
60
|
+
Requires-Dist: seaborn; extra == "docs"
|
|
61
|
+
Requires-Dist: pre-commit; extra == "docs"
|
|
62
|
+
Requires-Dist: openneuro-py; extra == "docs"
|
|
63
|
+
Dynamic: license-file
|
|
56
64
|
|
|
57
65
|
.. image:: https://badges.gitter.im/braindecodechat/community.svg
|
|
58
66
|
:alt: Join the chat at https://gitter.im/braindecodechat/community
|
|
@@ -166,37 +174,13 @@ as well as the `MNE-Python <https://mne.tools>`_ software that is used by braind
|
|
|
166
174
|
Licensing
|
|
167
175
|
^^^^^^^^^
|
|
168
176
|
|
|
169
|
-
|
|
170
|
-
|
|
171
|
-
This software is OSI Certified Open Source Software.
|
|
172
|
-
OSI Certified is a certification mark of the Open Source Initiative.
|
|
173
|
-
|
|
174
|
-
Copyright (c) 2011-2022, authors of Braindecode.
|
|
175
|
-
All rights reserved.
|
|
176
|
-
|
|
177
|
-
Redistribution and use in source and binary forms, with or without
|
|
178
|
-
modification, are permitted provided that the following conditions are met:
|
|
179
|
-
|
|
180
|
-
* Redistributions of source code must retain the above copyright notice,
|
|
181
|
-
this list of conditions and the following disclaimer.
|
|
177
|
+
This project is primarily licensed under the BSD-3-Clause License.
|
|
182
178
|
|
|
183
|
-
|
|
184
|
-
|
|
185
|
-
and/or other materials provided with the distribution.
|
|
179
|
+
Additional Components
|
|
180
|
+
~~~~~~~~~~~~~~~~~~~~~
|
|
186
181
|
|
|
187
|
-
|
|
188
|
-
|
|
189
|
-
this software without specific prior written permission.
|
|
182
|
+
Some components within this repository are licensed under the Creative Commons Attribution-NonCommercial 4.0 International
|
|
183
|
+
License.
|
|
190
184
|
|
|
191
|
-
|
|
192
|
-
|
|
193
|
-
limited to, the implied warranties of merchantability and fitness for
|
|
194
|
-
a particular purpose are disclaimed. In no event shall the copyright
|
|
195
|
-
owner or contributors be liable for any direct, indirect, incidental,
|
|
196
|
-
special, exemplary, or consequential damages (including, but not
|
|
197
|
-
limited to, procurement of substitute goods or services; loss of use,
|
|
198
|
-
data, or profits; or business interruption) however caused and on any
|
|
199
|
-
theory of liability, whether in contract, strict liability, or tort
|
|
200
|
-
(including negligence or otherwise) arising in any way out of the use
|
|
201
|
-
of this software, even if advised of the possibility of such
|
|
202
|
-
damage.**
|
|
185
|
+
Please refer to the ``LICENSE`` and ``NOTICE`` files for more detailed
|
|
186
|
+
information.
|
|
@@ -0,0 +1,101 @@
|
|
|
1
|
+
braindecode/__init__.py,sha256=Ac3LEEyIHWFY_fFh3eAY1GZUqXcUxVSJwOSUCwGEDvQ,182
|
|
2
|
+
braindecode/classifier.py,sha256=k9vSCtfQbld0YVleDi5rrrmk6k_k5JYEPPBYcNxYjZ8,9807
|
|
3
|
+
braindecode/eegneuralnet.py,sha256=dz8k_-2jV7WqkaX4bQG-dmr-vRT7ZtOwJqomXyC9PTw,15287
|
|
4
|
+
braindecode/regressor.py,sha256=VLfrpiXklwI4onkwue3QmzlBWcvspu0tlrLo9RT1Oiw,9375
|
|
5
|
+
braindecode/util.py,sha256=J-tBcDJNlMTIFW2mfOy6Ko0nsgdP4obRoEVDeg2rFH0,12686
|
|
6
|
+
braindecode/version.py,sha256=J-j-u0itpEFT6irdmWmixQqYMadNl1X91TxUmoiLHMI,22
|
|
7
|
+
braindecode/augmentation/__init__.py,sha256=LG7ONqCufYAF9NZt8POIp10lYXb8iSueYkF-CWGK2Ls,1001
|
|
8
|
+
braindecode/augmentation/base.py,sha256=gg7wYsVfa9jfqBddtE03B5ZrPHFFmPl2sa3LOrRnGfo,7325
|
|
9
|
+
braindecode/augmentation/functional.py,sha256=ygkMNEFHaUdRQfk7meMML19FnM406Uf34h-ztKXdJwM,37978
|
|
10
|
+
braindecode/augmentation/transforms.py,sha256=JFkUll0bUaaMBdWFcFex9aYqgnvfpc3IZbs8hHB_Zdo,44273
|
|
11
|
+
braindecode/datasets/__init__.py,sha256=CTl8ucbG948ZJqntEBELb-Pn8GsZLfFZLgVcB-fhw4k,891
|
|
12
|
+
braindecode/datasets/base.py,sha256=ED8RQWusMyWf0T7b_HXwouR2Ax47qppEc506AlSzBt0,32155
|
|
13
|
+
braindecode/datasets/bbci.py,sha256=BC9o1thEyYBREAo930O7zZz3xZB-l4Odt5j8E_1huXI,19277
|
|
14
|
+
braindecode/datasets/bcicomp.py,sha256=ER_XmqxhpoO2FWELMesQXQ40OTe7BXoy7nYDSiZG9kE,7556
|
|
15
|
+
braindecode/datasets/bids.py,sha256=4asq1HyQHgJjwW7w-GMlvTVQhi-hR2HWLJ8Z__UrUS4,8846
|
|
16
|
+
braindecode/datasets/mne.py,sha256=Dg6RZAAwd8TVGrvLOPF5B_JrbyGUWg52vWmn6fLMOQM,6135
|
|
17
|
+
braindecode/datasets/moabb.py,sha256=JmBcFV7QJT8GCgLNNKWgxJVnEVnO5wd9U_uiIqTIxDM,7091
|
|
18
|
+
braindecode/datasets/nmt.py,sha256=E4T8OYBEwWRSjh7VFzmyxaZbf5ufFVEBYYmQEd1ghUU,10430
|
|
19
|
+
braindecode/datasets/sleep_physio_challe_18.py,sha256=KTvUtuarOOYu6PHN6H1vcy4W9xilwtZE08n7JSrk8Cs,15414
|
|
20
|
+
braindecode/datasets/sleep_physionet.py,sha256=jieRx6u-MQ4jn_5Zox_pVV8WjBwXKLv9uq4GXRAZ_58,4087
|
|
21
|
+
braindecode/datasets/tuh.py,sha256=iG1hOtdevzKGEVpeuRFDBOnsW_rWa5zEmMFJfYR1hqg,22867
|
|
22
|
+
braindecode/datasets/xy.py,sha256=xT-nS_5jpuVKJ0SGqc7Ia0FVpqj86UfuzcYQdEGZdp0,2986
|
|
23
|
+
braindecode/datautil/__init__.py,sha256=53iVndXeqRYQ2k0S4qtjoMHYLqrrjIivYkUBgxT_wHw,1713
|
|
24
|
+
braindecode/datautil/serialization.py,sha256=cqLn7H4NW_8O-Et-w7GoDMyCWuILZoV8LdqKGAGd13k,12574
|
|
25
|
+
braindecode/datautil/util.py,sha256=ZfDoxLieKsgI8xcWQqebV-vJ5pJYRvRRHkEwhwpgoKU,674
|
|
26
|
+
braindecode/functional/__init__.py,sha256=Jpp3A1OCdYoJ94y5VVDb9jjiimIcKk2RdrGnaOlh2cw,213
|
|
27
|
+
braindecode/functional/functions.py,sha256=7HQHmGG-kk3VjPY5cE-XFK6_9Mlj0ZODbzYAlpcXW3U,8634
|
|
28
|
+
braindecode/functional/initialization.py,sha256=BUSC7y2TMsfShpMYBVwm3xg3ODFqWp-STH7yD4sn8zk,1388
|
|
29
|
+
braindecode/models/__init__.py,sha256=FLOI-_mvkwapRY2DhQp-bSf5nWNoG4aMIvV0Unqgyy8,1681
|
|
30
|
+
braindecode/models/atcnet.py,sha256=0DCaJ0FEZhFrA1oYigJkcsS5sSLtEginvjxVVEOxjN0,22767
|
|
31
|
+
braindecode/models/attentionbasenet.py,sha256=KneWnjUA_LMOUF__2xPQmmrdlzpvrxh9A736eoY5A4o,21235
|
|
32
|
+
braindecode/models/base.py,sha256=9icrWNZBGbh_VLyB9m8g_K1QyK7s3mh8X-hJ29gEbWs,10802
|
|
33
|
+
braindecode/models/biot.py,sha256=5BpyBQ8WbFwCDFnTQDqtm1FqORIIvghy22YkLncxXnA,16653
|
|
34
|
+
braindecode/models/contrawr.py,sha256=rcdK8ODlUV9cmdlhholx565lGrYuOZSC7VRC-zF2HoM,9246
|
|
35
|
+
braindecode/models/ctnet.py,sha256=KdYin0IG9AfUx1hlBi7scwSDVPJCLX7XC8vYII8x3Yk,13705
|
|
36
|
+
braindecode/models/deep4.py,sha256=t5aa9O4j0OmDLFyTO1SKZN6VznCz47Uok2QH0LRpkv0,12019
|
|
37
|
+
braindecode/models/deepsleepnet.py,sha256=RrciuVJtZ-fhiUl-yLPfK2FP-G29V5Wor6pPlrMHQWQ,9218
|
|
38
|
+
braindecode/models/eegconformer.py,sha256=Q24aWnTEkfgaxGg0qMcPqMtmyIIYl5z7Ys3lJLrWPLQ,11636
|
|
39
|
+
braindecode/models/eeginception_erp.py,sha256=mwh3rGSHAJVvnbOlYTuWWkKxlmFAdAXBNCrq4IPgOS4,11408
|
|
40
|
+
braindecode/models/eeginception_mi.py,sha256=ta3IPs22mFc8Fg8jRjWCucIQu6tUvSn2xDD4NIRVFZM,12161
|
|
41
|
+
braindecode/models/eegitnet.py,sha256=-twgYp5GCAlBFVl4tAK6qTVvE6wDRWvB9RHeP7h4SHk,9840
|
|
42
|
+
braindecode/models/eegminer.py,sha256=IKVOheOSLUwgy7Tdl1jWt7jaYrw4AecRfVMtMBflOFY,9877
|
|
43
|
+
braindecode/models/eegnet.py,sha256=-5Ixnw7ypd6qxYHXnvVMkErLxH4otlPkEIgIxItZ8eA,16300
|
|
44
|
+
braindecode/models/eegnex.py,sha256=KNJIh8pFNhY087Bey2OPzDD4Uqw9pS6UkwMjnOngBzg,8497
|
|
45
|
+
braindecode/models/eegresnet.py,sha256=cqWOSGqfJN_dNYUU9l8nYd_S3T1N-UX5-encKQzfBlg,12057
|
|
46
|
+
braindecode/models/eegsimpleconv.py,sha256=sHpK-7ZGOCMuXsdkSVuarFTd1T0jMJUP_xwXP3gxQwc,7268
|
|
47
|
+
braindecode/models/eegtcnet.py,sha256=np-93Ttctp2uaEYpMrfXfH5bJmCOUZZHLjv8GJEEym4,10830
|
|
48
|
+
braindecode/models/fbcnet.py,sha256=nMO-xc2XHdlj0h6Veby2sB-nFBNwYA4vm8TMAUfSENA,7494
|
|
49
|
+
braindecode/models/fblightconvnet.py,sha256=d5MwhawhkjilAMo0ckaYMxJhdGMEuorWgHX-TBgwv6s,11041
|
|
50
|
+
braindecode/models/fbmsnet.py,sha256=LYw7Smk0Hpft6PBsSq801xCqU5B88nhX-E-oDfilPnw,11689
|
|
51
|
+
braindecode/models/hybrid.py,sha256=hA8jwD3_3LL71BxUjRM1dkhqlHU9E9hjuDokh-jBq-4,4024
|
|
52
|
+
braindecode/models/ifnet.py,sha256=Y2bwfko3SDjD74AzgUEzgMhKJFGCCw_Q_Noh5VONEjQ,15137
|
|
53
|
+
braindecode/models/labram.py,sha256=PHTf7KuOTnDYeFmiCRxTvAhHzHLXEdIYQWSnQp1wOJI,41034
|
|
54
|
+
braindecode/models/msvtnet.py,sha256=Y2n1X6cklleU4YA6ZIgw70DYTCl88gTMWKqgvWTniwg,12658
|
|
55
|
+
braindecode/models/sccnet.py,sha256=Y9mh9jb0-BKiQU9oD3J-zQqNd83GJIjB_E_rUm2hH90,6704
|
|
56
|
+
braindecode/models/shallow_fbcsp.py,sha256=-sL6XCmCUZVhKKrC84-KWgwhWKQQvev1oNSmH_d6FA4,7499
|
|
57
|
+
braindecode/models/signal_jepa.py,sha256=0YI9thmiP3MRTf-cekewIHoXKjafuZqepLSsvHXN5Cc,37129
|
|
58
|
+
braindecode/models/sinc_shallow.py,sha256=Ilv8K1XhMGiRTBtQdq7L595i6cEFYOBe0_UDv-LqL7s,11907
|
|
59
|
+
braindecode/models/sleep_stager_blanco_2020.py,sha256=qPKMDLuv4J7et4dZHyTe-j0oB6ESYn9mA_aW7RMC-rU,6002
|
|
60
|
+
braindecode/models/sleep_stager_chambon_2018.py,sha256=62x2Rdjd5UZDX8YlnfAtdRCrjLsPvPpnUweGElZLdkw,5213
|
|
61
|
+
braindecode/models/sleep_stager_eldele_2021.py,sha256=-gY-cGmxQ2waA-6fwIvxYWQSodZQKPzXr7v3g3FwBt8,17593
|
|
62
|
+
braindecode/models/sparcnet.py,sha256=3lDsQzwq8r_Dg1HlrZiBmwx_ijCXDm2ngOfTneorBIk,12144
|
|
63
|
+
braindecode/models/summary.csv,sha256=l7HYYwv3Z69JRPVIhVq-wr_nC1J1KIz6IGw_zeRSk58,6110
|
|
64
|
+
braindecode/models/syncnet.py,sha256=nrWJC5ijCSWKVZyRn-dmOuc1t5vk2C6tx8U3U4j5d5Y,8362
|
|
65
|
+
braindecode/models/tcn.py,sha256=siazLYl3zOtD499UZFjQwqsYNOomMX6lGmXGxltYxWk,8170
|
|
66
|
+
braindecode/models/tidnet.py,sha256=k7Q0yAnEBmq1sqhsvoV4-g8wfYSUQ-C3iYxfLp5m8xQ,11805
|
|
67
|
+
braindecode/models/tsinception.py,sha256=E1jDXpGYiOddOFmeV-0esy-DHoCpS6LFygbDfwVcgSY,8864
|
|
68
|
+
braindecode/models/usleep.py,sha256=dFh3KiZITu13gMxcbPGoK4hq2ySDWzVSCQXkj1006w0,11605
|
|
69
|
+
braindecode/models/util.py,sha256=7he-Y5KpDYXjMhLbQXauJWslynM4FvbeYG9VT82zbHg,5387
|
|
70
|
+
braindecode/modules/__init__.py,sha256=cwH_l1R6Y6t3OV9qunh6UkPIWGH2JtMh9gGKxgMhvc8,911
|
|
71
|
+
braindecode/modules/activation.py,sha256=lTO2IjZWBDeXZ4ZVDgLmTDmxHdqyAny3Fsy07HY9tmQ,1466
|
|
72
|
+
braindecode/modules/attention.py,sha256=fsjruzqMdtPbcS6jbU5ux8xtHl0BVrKt4agyf2yNe_E,23966
|
|
73
|
+
braindecode/modules/blocks.py,sha256=QE34HBg7kmEj0z-8dQZ1jJErLRPcniGIorMTeIArpv4,3621
|
|
74
|
+
braindecode/modules/convolution.py,sha256=VAqJXj1Xfb7qlnjRAhH_fJT8qPcaAqy5FHu_sNEbkWw,8291
|
|
75
|
+
braindecode/modules/filter.py,sha256=Utjdnt2dxGXE7ibr778IhwwRBN3FfSu7lxB07aCbDdY,25158
|
|
76
|
+
braindecode/modules/layers.py,sha256=RL4O-kXgCBjVTe8TgA2t_o9fPvv4pv5Jr3wyRtH-XlU,3794
|
|
77
|
+
braindecode/modules/linear.py,sha256=r8bxoz43x5F382j5wqHF6DhZy6Hij9gUQzBz4ZYzqvI,1723
|
|
78
|
+
braindecode/modules/parametrization.py,sha256=sTvV21-sdpqpiY2PzwDebi7SeEvkFw8yDgA6OqJDo34,1310
|
|
79
|
+
braindecode/modules/stats.py,sha256=ETqZH6PPyYCss2PKBDNrO4uUeijR4bxvjCQCXjNJkH4,2398
|
|
80
|
+
braindecode/modules/util.py,sha256=5oKG2sfrn21MGAJ8BHxEBLbjC9agZDq-s56KZX4cijo,2622
|
|
81
|
+
braindecode/modules/wrapper.py,sha256=O2krRJKwhpRRbevT_a2cM80Ww355MjDtXK_T9xTA_nk,2453
|
|
82
|
+
braindecode/preprocessing/__init__.py,sha256=V0iwdzb6DzpUaCabA7I6HmOqXK_XvTbpP5HaEduSJ4s,776
|
|
83
|
+
braindecode/preprocessing/mne_preprocess.py,sha256=_Jczaitqbx16utsUOhnonEcoExf6jPsWNwVOVvoKFfU,2210
|
|
84
|
+
braindecode/preprocessing/preprocess.py,sha256=-9IKjb0THq36m54TK-YRzV18wIkxmVgTcGO2sEH6q98,17665
|
|
85
|
+
braindecode/preprocessing/windowers.py,sha256=6w6mOnroGWnV7tS23UagZZepswaxaL00S45Jr5AViRE,36551
|
|
86
|
+
braindecode/samplers/__init__.py,sha256=TLuO6gXv2WioJdX671MI_CHVSsOfbjnly1Xv9K3_WdA,452
|
|
87
|
+
braindecode/samplers/base.py,sha256=bDJArEyGZQuGUv6fkIms-u8--pY8z7YJIvr6iJEaNzE,15140
|
|
88
|
+
braindecode/samplers/ssl.py,sha256=C-FKopnbncN_-spQPCrgljY5Qds4fgTLr2TG3s_-QqU,9146
|
|
89
|
+
braindecode/training/__init__.py,sha256=sxtfI6MgxX3aP03EFc0wJYA37uULoL9SQyUao1Oxyn0,523
|
|
90
|
+
braindecode/training/callbacks.py,sha256=LqXqzJd6s3w0pvAKy9TEVTxWwVRyWNEu2uyWVsvb9RQ,839
|
|
91
|
+
braindecode/training/losses.py,sha256=EyVVZE_028G6WwrAtzLbrRfDLgsoKwLLhqIkOYBXNL4,3551
|
|
92
|
+
braindecode/training/scoring.py,sha256=tG7uCojIG3KIQZq3AymrdwlIJLlzbgsS0nBLUXQ-A8s,19062
|
|
93
|
+
braindecode/visualization/__init__.py,sha256=4EER_xHqZIDzEvmgUEm7K1bgNKpyZAIClR9ZCkMuY4M,240
|
|
94
|
+
braindecode/visualization/confusion_matrices.py,sha256=qIWMLEHow5CJ7PhGggD8mnD55Le6xhma9HSzt4R33fc,9509
|
|
95
|
+
braindecode/visualization/gradients.py,sha256=qAtXmuXkCDsWs8RMxvE6T9dz3uv_BhwTqhzkFFsEUDI,1948
|
|
96
|
+
braindecode-1.0.0.dist-info/licenses/LICENSE.txt,sha256=7rg7k6hyj8m9whQ7dpKbqnCssoOEx_Mbtqb4uSOjljE,1525
|
|
97
|
+
braindecode-1.0.0.dist-info/licenses/NOTICE.txt,sha256=sOxuTbalPxTM8H6VqtvGbXCt_BoOF7JevEYG_knqbm4,620
|
|
98
|
+
braindecode-1.0.0.dist-info/METADATA,sha256=h7UXT9Mpi-aJUWWsn6tuOrnn3pTaY9HX4D6e-idWSr8,6807
|
|
99
|
+
braindecode-1.0.0.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
|
100
|
+
braindecode-1.0.0.dist-info/top_level.txt,sha256=pHsWQmSy0uhIez62-HA9j0iaXKvSbUL39ifFRkFnChA,12
|
|
101
|
+
braindecode-1.0.0.dist-info/RECORD,,
|
|
@@ -0,0 +1,20 @@
|
|
|
1
|
+
# BRAINDECODE Notice
|
|
2
|
+
|
|
3
|
+
## Licensed Components
|
|
4
|
+
|
|
5
|
+
### BSD-3-Clause Licensed Files
|
|
6
|
+
|
|
7
|
+
All files within the `src/` directory are licensed under the BSD-3-Clause License.
|
|
8
|
+
|
|
9
|
+
### CC BY-NC 4.0 Licensed Files
|
|
10
|
+
|
|
11
|
+
The following components are licensed under the Creative Commons Attribution-NonCommercial 4.0 International License:
|
|
12
|
+
|
|
13
|
+
- `braindecode/models/eegminer.py`
|
|
14
|
+
|
|
15
|
+
As well as class later imported into the `braindecode.models.module` named as GeneralizedGaussianFilter.
|
|
16
|
+
|
|
17
|
+
## License Links
|
|
18
|
+
|
|
19
|
+
- [BSD-3-Clause License](https://opensource.org/licenses/BSD-3-Clause)
|
|
20
|
+
- [CC BY-NC 4.0 License](https://creativecommons.org/licenses/by-nc/4.0/)
|
braindecode-0.8.dist-info/RECORD
DELETED
|
@@ -1,11 +0,0 @@
|
|
|
1
|
-
braindecode/__init__.py,sha256=DVr7xakvqGSkno_VZoOmXHot-PeRHfkSGGBwGq8b1Us,183
|
|
2
|
-
braindecode/classifier.py,sha256=IONNZmZHyoKmWSdmuL9pZqEDxlS0aJvcLD_M9O41CP0,9816
|
|
3
|
-
braindecode/eegneuralnet.py,sha256=P8Trs2m6X6lEKnrQH-FOdQPmQ8cqaLwPl1rEzk_pJaw,14818
|
|
4
|
-
braindecode/regressor.py,sha256=zXrYi3fEvQdROe-BQ79ZFHfbrCvFb4KJa0_zesbvAXw,9359
|
|
5
|
-
braindecode/util.py,sha256=OW4UATTHvnpem9O6vD3UfJSA0y76kons_VBchVPMGM8,13037
|
|
6
|
-
braindecode/version.py,sha256=WY9HbkgoxJlUCfEaMASIAo3wFTh3K3UXHty6LsjolMg,20
|
|
7
|
-
braindecode-0.8.dist-info/LICENSE.txt,sha256=1IX7wR-nRXQap6IOIEXe-ZtJHIaXuiX5UOMsQCrou2I,1520
|
|
8
|
-
braindecode-0.8.dist-info/METADATA,sha256=y_1A_rNknrXPLqa1N75b30MxSknJy5xoghPGx03_7mM,8049
|
|
9
|
-
braindecode-0.8.dist-info/WHEEL,sha256=Xo9-1PvkuimrydujYJAjF7pCkriuXBpUPEjma1nZyJ0,92
|
|
10
|
-
braindecode-0.8.dist-info/top_level.txt,sha256=pHsWQmSy0uhIez62-HA9j0iaXKvSbUL39ifFRkFnChA,12
|
|
11
|
-
braindecode-0.8.dist-info/RECORD,,
|
|
File without changes
|