braindecode 1.3.0.dev180851780__py3-none-any.whl → 1.3.0.dev183934738__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of braindecode might be problematic. Click here for more details.
- braindecode/eegneuralnet.py +2 -0
- braindecode/models/attentionbasenet.py +2 -0
- braindecode/models/labram.py +168 -69
- braindecode/version.py +1 -1
- {braindecode-1.3.0.dev180851780.dist-info → braindecode-1.3.0.dev183934738.dist-info}/METADATA +1 -1
- {braindecode-1.3.0.dev180851780.dist-info → braindecode-1.3.0.dev183934738.dist-info}/RECORD +10 -10
- {braindecode-1.3.0.dev180851780.dist-info → braindecode-1.3.0.dev183934738.dist-info}/WHEEL +0 -0
- {braindecode-1.3.0.dev180851780.dist-info → braindecode-1.3.0.dev183934738.dist-info}/licenses/LICENSE.txt +0 -0
- {braindecode-1.3.0.dev180851780.dist-info → braindecode-1.3.0.dev183934738.dist-info}/licenses/NOTICE.txt +0 -0
- {braindecode-1.3.0.dev180851780.dist-info → braindecode-1.3.0.dev183934738.dist-info}/top_level.txt +0 -0
braindecode/eegneuralnet.py
CHANGED
|
@@ -189,6 +189,8 @@ class _EEGNeuralNet(NeuralNet, abc.ABC):
|
|
|
189
189
|
"Skipping setting signal-related parameters from data."
|
|
190
190
|
)
|
|
191
191
|
return
|
|
192
|
+
if classes is None:
|
|
193
|
+
classes = getattr(self, "classes", None)
|
|
192
194
|
# get kwargs from signal:
|
|
193
195
|
signal_kwargs = dict()
|
|
194
196
|
# Using shape to work both with torch.tensor and numpy.array:
|
|
@@ -381,6 +381,8 @@ class AttentionBaseNet(EEGModuleMixin, nn.Module):
|
|
|
381
381
|
for k, pl, ps in zip(kernel_lengths, pool_lengths, pool_strides):
|
|
382
382
|
out = math.floor(out + 2 * (k // 2) - k + 1)
|
|
383
383
|
out = math.floor((out - pl) / ps + 1)
|
|
384
|
+
# Ensure output is at least 1 to avoid zero-sized tensors
|
|
385
|
+
out = max(1, out)
|
|
384
386
|
seq_lengths.append(int(out))
|
|
385
387
|
return seq_lengths
|
|
386
388
|
|
braindecode/models/labram.py
CHANGED
|
@@ -61,13 +61,24 @@ class Labram(EEGModuleMixin, nn.Module):
|
|
|
61
61
|
|
|
62
62
|
.. versionadded:: 0.9
|
|
63
63
|
|
|
64
|
+
|
|
65
|
+
Examples on how to load pre-trained weights:
|
|
66
|
+
--------------------------------------------
|
|
67
|
+
>>> import torch
|
|
68
|
+
>>> from braindecode.models import Labram
|
|
69
|
+
>>> model = Labram(n_times=1600, n_chans=64, n_outputs=4)
|
|
70
|
+
>>> url = 'https://huggingface.co/braindecode/Labram-Braindecode/blob/main/braindecode_labram_base.pt'
|
|
71
|
+
>>> state = torch.hub.load_state_dict_from_url(url, progress=True)
|
|
72
|
+
>>> model.load_state_dict(state)
|
|
73
|
+
|
|
74
|
+
|
|
64
75
|
Parameters
|
|
65
76
|
----------
|
|
66
77
|
patch_size : int
|
|
67
78
|
The size of the patch to be used in the patch embedding.
|
|
68
79
|
emb_size : int
|
|
69
80
|
The dimension of the embedding.
|
|
70
|
-
|
|
81
|
+
in_conv_channels : int
|
|
71
82
|
The number of convolutional input channels.
|
|
72
83
|
out_channels : int
|
|
73
84
|
The number of convolutional output channels.
|
|
@@ -79,8 +90,10 @@ class Labram(EEGModuleMixin, nn.Module):
|
|
|
79
90
|
The expansion ratio of the mlp layer
|
|
80
91
|
qkv_bias : bool (default=False)
|
|
81
92
|
If True, add a learnable bias to the query, key, and value tensors.
|
|
82
|
-
qk_norm : Pytorch Normalize layer (default=
|
|
83
|
-
If not None, apply LayerNorm to the query and key tensors
|
|
93
|
+
qk_norm : Pytorch Normalize layer (default=nn.LayerNorm)
|
|
94
|
+
If not None, apply LayerNorm to the query and key tensors.
|
|
95
|
+
Default is nn.LayerNorm for better weight transfer from original LaBraM.
|
|
96
|
+
Set to None to disable Q,K normalization.
|
|
84
97
|
qk_scale : float (default=None)
|
|
85
98
|
If not None, use this value as the scale factor. If None,
|
|
86
99
|
use head_dim**-0.5, where head_dim = dim // num_heads.
|
|
@@ -92,9 +105,10 @@ class Labram(EEGModuleMixin, nn.Module):
|
|
|
92
105
|
Dropout rate for the attention weights used on DropPath.
|
|
93
106
|
norm_layer : Pytorch Normalize layer (default=nn.LayerNorm)
|
|
94
107
|
The normalization layer to be used.
|
|
95
|
-
init_values : float (default=
|
|
108
|
+
init_values : float (default=0.1)
|
|
96
109
|
If not None, use this value to initialize the gamma_1 and gamma_2
|
|
97
|
-
parameters.
|
|
110
|
+
parameters for residual scaling. Default is 0.1 for better weight
|
|
111
|
+
transfer from original LaBraM. Set to None to disable.
|
|
98
112
|
use_abs_pos_emb : bool (default=True)
|
|
99
113
|
If True, use absolute position embedding.
|
|
100
114
|
use_mean_pooling : bool (default=True)
|
|
@@ -135,19 +149,19 @@ class Labram(EEGModuleMixin, nn.Module):
|
|
|
135
149
|
input_window_seconds=None,
|
|
136
150
|
patch_size=200,
|
|
137
151
|
emb_size=200,
|
|
138
|
-
|
|
152
|
+
in_conv_channels=1,
|
|
139
153
|
out_channels=8,
|
|
140
154
|
n_layers=12,
|
|
141
155
|
att_num_heads=10,
|
|
142
156
|
mlp_ratio=4.0,
|
|
143
157
|
qkv_bias=False,
|
|
144
|
-
qk_norm=
|
|
158
|
+
qk_norm=nn.LayerNorm,
|
|
145
159
|
qk_scale=None,
|
|
146
160
|
drop_prob=0.0,
|
|
147
161
|
attn_drop_prob=0.0,
|
|
148
162
|
drop_path_prob=0.0,
|
|
149
163
|
norm_layer=nn.LayerNorm,
|
|
150
|
-
init_values=
|
|
164
|
+
init_values=0.1,
|
|
151
165
|
use_abs_pos_emb=True,
|
|
152
166
|
use_mean_pooling=True,
|
|
153
167
|
init_scale=0.001,
|
|
@@ -183,15 +197,15 @@ class Labram(EEGModuleMixin, nn.Module):
|
|
|
183
197
|
self.patch_size = patch_size
|
|
184
198
|
self.n_path = self.n_times // self.patch_size
|
|
185
199
|
|
|
186
|
-
if neural_tokenizer and
|
|
200
|
+
if neural_tokenizer and in_conv_channels != 1:
|
|
187
201
|
warn(
|
|
188
202
|
"The model is in Neural Tokenizer mode, but the variable "
|
|
189
|
-
+ "`
|
|
190
|
-
+ "`
|
|
191
|
-
+ "
|
|
203
|
+
+ "`in_conv_channels` is different from the default values."
|
|
204
|
+
+ "`in_conv_channels` is only needed for the Neural Decoder mode."
|
|
205
|
+
+ "in_conv_channels is not used in the Neural Tokenizer mode.",
|
|
192
206
|
UserWarning,
|
|
193
207
|
)
|
|
194
|
-
|
|
208
|
+
in_conv_channels = 1
|
|
195
209
|
# If you can use the model in Neural Tokenizer mode,
|
|
196
210
|
# temporal conv layer will be use over the patched dataset
|
|
197
211
|
if neural_tokenizer:
|
|
@@ -228,7 +242,7 @@ class Labram(EEGModuleMixin, nn.Module):
|
|
|
228
242
|
_PatchEmbed(
|
|
229
243
|
n_times=self.n_times,
|
|
230
244
|
patch_size=patch_size,
|
|
231
|
-
in_channels=
|
|
245
|
+
in_channels=in_conv_channels,
|
|
232
246
|
emb_dim=self.emb_size,
|
|
233
247
|
),
|
|
234
248
|
)
|
|
@@ -373,8 +387,7 @@ class Labram(EEGModuleMixin, nn.Module):
|
|
|
373
387
|
Parameters
|
|
374
388
|
----------
|
|
375
389
|
x : torch.Tensor
|
|
376
|
-
The input data with shape (batch, n_chans,
|
|
377
|
-
if neural decoder or (batch, n_chans, n_times), if neural tokenizer.
|
|
390
|
+
The input data with shape (batch, n_chans, n_times).
|
|
378
391
|
input_chans : int
|
|
379
392
|
The number of input channels.
|
|
380
393
|
return_patch_tokens : bool
|
|
@@ -387,37 +400,72 @@ class Labram(EEGModuleMixin, nn.Module):
|
|
|
387
400
|
x : torch.Tensor
|
|
388
401
|
The output of the model.
|
|
389
402
|
"""
|
|
403
|
+
batch_size = x.shape[0]
|
|
404
|
+
|
|
390
405
|
if self.neural_tokenizer:
|
|
391
|
-
|
|
406
|
+
# For neural tokenizer: input is (batch, n_chans, n_times)
|
|
407
|
+
# patch_embed returns (batch, n_chans, emb_dim)
|
|
408
|
+
x = self.patch_embed(x)
|
|
409
|
+
# x shape: (batch, n_chans, emb_dim)
|
|
410
|
+
n_patch = self.n_chans
|
|
411
|
+
temporal = self.emb_size
|
|
392
412
|
else:
|
|
393
|
-
|
|
394
|
-
|
|
413
|
+
# For neural decoder: input is (batch, n_chans, n_times)
|
|
414
|
+
# patch_embed returns (batch, n_patchs, emb_dim)
|
|
415
|
+
x = self.patch_embed(x)
|
|
416
|
+
# x shape: (batch, n_patchs, emb_dim)
|
|
417
|
+
batch_size, n_patch, temporal = x.shape
|
|
418
|
+
|
|
395
419
|
# add the [CLS] token to the embedded patch tokens
|
|
396
420
|
cls_tokens = self.cls_token.expand(batch_size, -1, -1)
|
|
397
421
|
|
|
422
|
+
# Concatenate cls token with patch/channel embeddings
|
|
398
423
|
x = torch.cat((cls_tokens, x), dim=1)
|
|
399
424
|
|
|
400
425
|
# Positional Embedding
|
|
401
|
-
if input_chans is not None:
|
|
402
|
-
pos_embed_used = self.position_embedding[:, input_chans]
|
|
403
|
-
else:
|
|
404
|
-
pos_embed_used = self.position_embedding
|
|
405
|
-
|
|
406
426
|
if self.position_embedding is not None:
|
|
407
|
-
|
|
408
|
-
|
|
409
|
-
|
|
427
|
+
if self.neural_tokenizer:
|
|
428
|
+
# In tokenizer mode, use channel-based position embedding
|
|
429
|
+
if input_chans is not None:
|
|
430
|
+
pos_embed_used = self.position_embedding[:, input_chans]
|
|
431
|
+
else:
|
|
432
|
+
pos_embed_used = self.position_embedding
|
|
433
|
+
|
|
434
|
+
pos_embed = self._adj_position_embedding(
|
|
435
|
+
pos_embed_used=pos_embed_used, batch_size=batch_size
|
|
436
|
+
)
|
|
437
|
+
else:
|
|
438
|
+
# In decoder mode, we have different number of patches
|
|
439
|
+
# Adapt position embedding for n_patch patches
|
|
440
|
+
# Use the first n_patch+1 positions from position_embedding
|
|
441
|
+
n_pos = min(self.position_embedding.shape[1], n_patch + 1)
|
|
442
|
+
pos_embed_used = self.position_embedding[:, :n_pos, :]
|
|
443
|
+
pos_embed = pos_embed_used.expand(batch_size, -1, -1)
|
|
444
|
+
|
|
410
445
|
x += pos_embed
|
|
411
446
|
|
|
412
447
|
# The time embedding is added across the channels after the [CLS] token
|
|
413
448
|
if self.neural_tokenizer:
|
|
414
449
|
num_ch = self.n_chans
|
|
450
|
+
time_embed = self._adj_temporal_embedding(
|
|
451
|
+
num_ch=num_ch, batch_size=batch_size, dim_embed=temporal
|
|
452
|
+
)
|
|
453
|
+
x[:, 1:, :] += time_embed
|
|
415
454
|
else:
|
|
416
|
-
|
|
417
|
-
|
|
418
|
-
|
|
419
|
-
|
|
420
|
-
|
|
455
|
+
# In decoder mode, we have n_patch patches and don't need to expand
|
|
456
|
+
# Just broadcast the temporal embedding
|
|
457
|
+
if temporal is None:
|
|
458
|
+
temporal = self.emb_size
|
|
459
|
+
|
|
460
|
+
# Get temporal embeddings for n_patch patches
|
|
461
|
+
n_time_tokens = min(n_patch, self.temporal_embedding.shape[1] - 1)
|
|
462
|
+
time_embed = self.temporal_embedding[
|
|
463
|
+
:, 1 : n_time_tokens + 1, :
|
|
464
|
+
] # (1, n_patch, emb_dim)
|
|
465
|
+
time_embed = time_embed.expand(
|
|
466
|
+
batch_size, -1, -1
|
|
467
|
+
) # (batch, n_patch, emb_dim)
|
|
468
|
+
x[:, 1:, :] += time_embed
|
|
421
469
|
|
|
422
470
|
x = self.pos_drop(x)
|
|
423
471
|
|
|
@@ -428,10 +476,10 @@ class Labram(EEGModuleMixin, nn.Module):
|
|
|
428
476
|
if self.fc_norm is not None:
|
|
429
477
|
if return_all_tokens:
|
|
430
478
|
return self.fc_norm(x)
|
|
431
|
-
|
|
479
|
+
tokens = x[:, 1:, :]
|
|
432
480
|
if return_patch_tokens:
|
|
433
|
-
return self.fc_norm(
|
|
434
|
-
return self.fc_norm(
|
|
481
|
+
return self.fc_norm(tokens)
|
|
482
|
+
return self.fc_norm(tokens.mean(1))
|
|
435
483
|
else:
|
|
436
484
|
if return_all_tokens:
|
|
437
485
|
return x
|
|
@@ -505,14 +553,16 @@ class Labram(EEGModuleMixin, nn.Module):
|
|
|
505
553
|
def _adj_temporal_embedding(self, num_ch, batch_size, dim_embed=None):
|
|
506
554
|
"""
|
|
507
555
|
Adjust the dimensions of the time embedding to match the
|
|
508
|
-
number of channels.
|
|
556
|
+
number of channels or patches.
|
|
509
557
|
|
|
510
558
|
Parameters
|
|
511
559
|
----------
|
|
512
560
|
num_ch : int
|
|
513
|
-
The number of channels or number of
|
|
561
|
+
The number of channels or number of patches.
|
|
514
562
|
batch_size : int
|
|
515
563
|
Batch size of the input data.
|
|
564
|
+
dim_embed : int
|
|
565
|
+
The embedding dimension (temporal feature dimension).
|
|
516
566
|
|
|
517
567
|
Returns
|
|
518
568
|
-------
|
|
@@ -523,17 +573,24 @@ class Labram(EEGModuleMixin, nn.Module):
|
|
|
523
573
|
if dim_embed is None:
|
|
524
574
|
cut_dimension = self.patch_size
|
|
525
575
|
else:
|
|
526
|
-
cut_dimension = dim_embed
|
|
527
|
-
|
|
528
|
-
|
|
576
|
+
cut_dimension = min(dim_embed, self.temporal_embedding.shape[1] - 1)
|
|
577
|
+
|
|
578
|
+
# Get the temporal embedding: (1, temporal_embedding_dim, emb_size)
|
|
579
|
+
# Slice to cut_dimension: (1, cut_dimension, emb_size)
|
|
580
|
+
temporal_embedding = self.temporal_embedding[:, 1 : cut_dimension + 1, :]
|
|
581
|
+
|
|
529
582
|
# Add a new dimension to the time embedding
|
|
530
|
-
# e.g. (
|
|
583
|
+
# e.g. (1, 5, 200) -> (1, 1, 5, 200)
|
|
531
584
|
temporal_embedding = temporal_embedding.unsqueeze(1)
|
|
532
|
-
|
|
533
|
-
#
|
|
585
|
+
|
|
586
|
+
# Expand the time embedding to match the number of channels or patches
|
|
587
|
+
# (1, 1, cut_dimension, 200) -> (batch_size, num_ch, cut_dimension, 200)
|
|
534
588
|
temporal_embedding = temporal_embedding.expand(batch_size, num_ch, -1, -1)
|
|
589
|
+
|
|
535
590
|
# Flatten the intermediate dimensions
|
|
591
|
+
# (batch_size, num_ch, cut_dimension, 200) -> (batch_size, num_ch * cut_dimension, 200)
|
|
536
592
|
temporal_embedding = temporal_embedding.flatten(1, 2)
|
|
593
|
+
|
|
537
594
|
return temporal_embedding
|
|
538
595
|
|
|
539
596
|
def _adj_position_embedding(self, pos_embed_used, batch_size):
|
|
@@ -679,25 +736,27 @@ class _SegmentPatch(nn.Module):
|
|
|
679
736
|
|
|
680
737
|
|
|
681
738
|
class _PatchEmbed(nn.Module):
|
|
682
|
-
"""EEG to Patch Embedding.
|
|
739
|
+
"""EEG to Patch Embedding for Neural Decoder mode.
|
|
683
740
|
|
|
684
741
|
This code is used when we want to apply the patch embedding
|
|
685
|
-
after the codebook layer.
|
|
742
|
+
after the codebook layer (Neural Decoder mode).
|
|
743
|
+
|
|
744
|
+
The input is expected to be in the format (Batch, n_channels, n_times),
|
|
745
|
+
but the original LaBraM expects pre-patched data (Batch, n_channels, n_patches, patch_size).
|
|
746
|
+
This class reshapes the input to the pre-patched format, then applies a 2D
|
|
747
|
+
convolution to project this pre-patched data to the embedding dimension,
|
|
748
|
+
and finally flattens across channels to produce a unified embedding.
|
|
686
749
|
|
|
687
750
|
Parameters:
|
|
688
751
|
-----------
|
|
689
752
|
n_times: int (default=2000)
|
|
690
|
-
Number of temporal components of the input tensor.
|
|
753
|
+
Number of temporal components of the input tensor (used for dimension calculation).
|
|
691
754
|
patch_size: int (default=200)
|
|
692
755
|
Size of the patch, default is 1-seconds with 200Hz.
|
|
693
756
|
in_channels: int (default=1)
|
|
694
|
-
Number of input channels
|
|
757
|
+
Number of input channels (from VQVAE codebook).
|
|
695
758
|
emb_dim: int (default=200)
|
|
696
|
-
Number of
|
|
697
|
-
we used the same as patch_size.
|
|
698
|
-
n_codebooks: int (default=62)
|
|
699
|
-
Number of patches to be used in the convolution, here,
|
|
700
|
-
we used the same as n_times // patch_size.
|
|
759
|
+
Number of output embedding dimension.
|
|
701
760
|
"""
|
|
702
761
|
|
|
703
762
|
def __init__(
|
|
@@ -707,10 +766,13 @@ class _PatchEmbed(nn.Module):
|
|
|
707
766
|
self.n_times = n_times
|
|
708
767
|
self.patch_size = patch_size
|
|
709
768
|
self.patch_shape = (1, self.n_times // self.patch_size)
|
|
710
|
-
n_patchs =
|
|
711
|
-
|
|
712
|
-
self.
|
|
769
|
+
self.n_patchs = self.n_times // self.patch_size
|
|
770
|
+
self.emb_dim = emb_dim
|
|
771
|
+
self.in_channels = in_channels
|
|
713
772
|
|
|
773
|
+
# 2D Conv to project the pre-patched data
|
|
774
|
+
# Input: (Batch, in_channels, n_patches, patch_size)
|
|
775
|
+
# After proj: (Batch, emb_dim, n_patches, 1)
|
|
714
776
|
self.proj = nn.Conv2d(
|
|
715
777
|
in_channels=in_channels,
|
|
716
778
|
out_channels=emb_dim,
|
|
@@ -718,27 +780,64 @@ class _PatchEmbed(nn.Module):
|
|
|
718
780
|
stride=(1, self.patch_size),
|
|
719
781
|
)
|
|
720
782
|
|
|
721
|
-
self.merge_transpose = Rearrange(
|
|
722
|
-
"Batch ch patch spatch -> Batch patch spatch ch",
|
|
723
|
-
)
|
|
724
|
-
|
|
725
783
|
def forward(self, x):
|
|
726
784
|
"""
|
|
727
|
-
Apply the
|
|
728
|
-
then merge the output tensor to the desired shape.
|
|
785
|
+
Apply the temporal projection to the input tensor after grouping channels.
|
|
729
786
|
|
|
730
|
-
Parameters
|
|
731
|
-
|
|
732
|
-
x: torch.Tensor
|
|
733
|
-
Input tensor of shape (Batch,
|
|
787
|
+
Parameters
|
|
788
|
+
----------
|
|
789
|
+
x : torch.Tensor
|
|
790
|
+
Input tensor of shape (Batch, n_channels, n_times) or
|
|
791
|
+
(Batch, n_channels, n_patches, patch_size).
|
|
734
792
|
|
|
735
|
-
|
|
793
|
+
Returns
|
|
736
794
|
-------
|
|
737
|
-
|
|
738
|
-
Output tensor of shape (Batch, n_patchs,
|
|
795
|
+
torch.Tensor
|
|
796
|
+
Output tensor of shape (Batch, n_patchs, emb_dim).
|
|
739
797
|
"""
|
|
798
|
+
if x.ndim == 4:
|
|
799
|
+
batch_size, n_channels, n_patchs, patch_len = x.shape
|
|
800
|
+
if patch_len != self.patch_size:
|
|
801
|
+
raise ValueError(
|
|
802
|
+
"When providing a 4D tensor, the last dimension "
|
|
803
|
+
f"({patch_len}) must match patch_size ({self.patch_size})."
|
|
804
|
+
)
|
|
805
|
+
n_times = n_patchs * patch_len
|
|
806
|
+
x = x.reshape(batch_size, n_channels, n_times)
|
|
807
|
+
elif x.ndim == 3:
|
|
808
|
+
batch_size, n_channels, n_times = x.shape
|
|
809
|
+
else:
|
|
810
|
+
raise ValueError(
|
|
811
|
+
"Input must be either 3D (batch, channels, times) or "
|
|
812
|
+
"4D (batch, channels, n_patches, patch_size)."
|
|
813
|
+
)
|
|
814
|
+
|
|
815
|
+
if n_times % self.patch_size != 0:
|
|
816
|
+
raise ValueError(
|
|
817
|
+
f"n_times ({n_times}) must be divisible by patch_size ({self.patch_size})."
|
|
818
|
+
)
|
|
819
|
+
if n_channels % self.in_channels != 0:
|
|
820
|
+
raise ValueError(
|
|
821
|
+
"The input channel dimension "
|
|
822
|
+
f"({n_channels}) must be divisible by in_channels ({self.in_channels})."
|
|
823
|
+
)
|
|
824
|
+
|
|
825
|
+
group_size = n_channels // self.in_channels
|
|
826
|
+
|
|
827
|
+
# Reshape so Conv2d sees `in_channels` feature maps and uses the grouped
|
|
828
|
+
# EEG channels as the spatial height dimension.
|
|
829
|
+
# Shape after view: (Batch, in_channels, group_size, n_times)
|
|
830
|
+
x = x.view(batch_size, self.in_channels, group_size, n_times)
|
|
831
|
+
|
|
832
|
+
# Apply the temporal projection per group.
|
|
833
|
+
# Output shape: (Batch, emb_dim, group_size, n_patchs)
|
|
740
834
|
x = self.proj(x)
|
|
741
|
-
|
|
835
|
+
|
|
836
|
+
# THIS IS braindecode's MODIFICATION:
|
|
837
|
+
# Average over the grouped channel dimension and permute to (Batch, n_patchs, emb_dim)
|
|
838
|
+
x = x.mean(dim=2)
|
|
839
|
+
x = x.transpose(1, 2).contiguous()
|
|
840
|
+
|
|
742
841
|
return x
|
|
743
842
|
|
|
744
843
|
|
braindecode/version.py
CHANGED
|
@@ -1 +1 @@
|
|
|
1
|
-
__version__ = "1.3.0.
|
|
1
|
+
__version__ = "1.3.0.dev183934738"
|
{braindecode-1.3.0.dev180851780.dist-info → braindecode-1.3.0.dev183934738.dist-info}/METADATA
RENAMED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: braindecode
|
|
3
|
-
Version: 1.3.0.
|
|
3
|
+
Version: 1.3.0.dev183934738
|
|
4
4
|
Summary: Deep learning software to decode EEG, ECG or MEG signals
|
|
5
5
|
Author-email: Robin Tibor Schirrmeister <robintibor@gmail.com>
|
|
6
6
|
Maintainer-email: Alexandre Gramfort <agramfort@meta.com>, Bruno Aristimunha Pinto <b.aristimunha@gmail.com>, Robin Tibor Schirrmeister <robintibor@gmail.com>
|
{braindecode-1.3.0.dev180851780.dist-info → braindecode-1.3.0.dev183934738.dist-info}/RECORD
RENAMED
|
@@ -1,9 +1,9 @@
|
|
|
1
1
|
braindecode/__init__.py,sha256=Ac3LEEyIHWFY_fFh3eAY1GZUqXcUxVSJwOSUCwGEDvQ,182
|
|
2
2
|
braindecode/classifier.py,sha256=k9vSCtfQbld0YVleDi5rrrmk6k_k5JYEPPBYcNxYjZ8,9807
|
|
3
|
-
braindecode/eegneuralnet.py,sha256=
|
|
3
|
+
braindecode/eegneuralnet.py,sha256=U6kRdT2u8A2Ca0axMTR8IAESBsvgjLMusAbYappKAOk,15368
|
|
4
4
|
braindecode/regressor.py,sha256=VLfrpiXklwI4onkwue3QmzlBWcvspu0tlrLo9RT1Oiw,9375
|
|
5
5
|
braindecode/util.py,sha256=J-tBcDJNlMTIFW2mfOy6Ko0nsgdP4obRoEVDeg2rFH0,12686
|
|
6
|
-
braindecode/version.py,sha256=
|
|
6
|
+
braindecode/version.py,sha256=rd9ITfQsrw03V6jxIPF0WKYZHuhed2RdByopoU6kp7s,35
|
|
7
7
|
braindecode/augmentation/__init__.py,sha256=LG7ONqCufYAF9NZt8POIp10lYXb8iSueYkF-CWGK2Ls,1001
|
|
8
8
|
braindecode/augmentation/base.py,sha256=gg7wYsVfa9jfqBddtE03B5ZrPHFFmPl2sa3LOrRnGfo,7325
|
|
9
9
|
braindecode/augmentation/functional.py,sha256=lPhGpZcVtgfQ3oV6p6IQLBCWM_Psa60TwxH3Wj1WyOQ,41133
|
|
@@ -29,7 +29,7 @@ braindecode/functional/functions.py,sha256=CoEweM6YLhigx0tNmmz6yAc8iQ078sTFY2GeC
|
|
|
29
29
|
braindecode/functional/initialization.py,sha256=BUSC7y2TMsfShpMYBVwm3xg3ODFqWp-STH7yD4sn8zk,1388
|
|
30
30
|
braindecode/models/__init__.py,sha256=vB0ZFhucH1cRQPoAAAcc3S-hVTnAy674Eu0FjjjKJp0,2543
|
|
31
31
|
braindecode/models/atcnet.py,sha256=H2IWMscm3IM4PH8DA_iLkUaeMXgA120DmVld4jBFOCM,32242
|
|
32
|
-
braindecode/models/attentionbasenet.py,sha256=
|
|
32
|
+
braindecode/models/attentionbasenet.py,sha256=_bml0Ofy7yB12X19a026EYkcLuzZIab0v3sQTqZ5HGQ,30485
|
|
33
33
|
braindecode/models/attn_sleep.py,sha256=m6sdFfD4en2hHf_TpotLPC1hVweJcYZvjgf12bV5FZg,17822
|
|
34
34
|
braindecode/models/base.py,sha256=9icrWNZBGbh_VLyB9m8g_K1QyK7s3mh8X-hJ29gEbWs,10802
|
|
35
35
|
braindecode/models/biot.py,sha256=d2P1i_8k98SU3FkN_dKPXcCoFVmyQIIrBbI1-F3g-8E,17509
|
|
@@ -51,7 +51,7 @@ braindecode/models/fblightconvnet.py,sha256=d5MwhawhkjilAMo0ckaYMxJhdGMEuorWgHX-
|
|
|
51
51
|
braindecode/models/fbmsnet.py,sha256=9bZn2_n1dTrI1Qh3Sz9zMZnH_a-Yq-13UHYSmF6r_UE,11659
|
|
52
52
|
braindecode/models/hybrid.py,sha256=hA8jwD3_3LL71BxUjRM1dkhqlHU9E9hjuDokh-jBq-4,4024
|
|
53
53
|
braindecode/models/ifnet.py,sha256=Y2bwfko3SDjD74AzgUEzgMhKJFGCCw_Q_Noh5VONEjQ,15137
|
|
54
|
-
braindecode/models/labram.py,sha256=
|
|
54
|
+
braindecode/models/labram.py,sha256=dnZpHbuB60pKZWZHNQaM01eNajGG0tkZB2iutT882PM,46563
|
|
55
55
|
braindecode/models/msvtnet.py,sha256=hxeCLkHS6w2w89YlLfEPCyQ4XQQpt45bEYPiQJ9SFzY,12642
|
|
56
56
|
braindecode/models/patchedtransformer.py,sha256=9TY9l2X4EoCuE9IoOObjubKFRdmsN5lbrVQLnmr66VY,23444
|
|
57
57
|
braindecode/models/sccnet.py,sha256=C7vdwIR5cI6wJCl5f8TnGQG6qinq21y4HG6l-D5AwbY,11971
|
|
@@ -95,9 +95,9 @@ braindecode/training/scoring.py,sha256=WRkwqbitA3m_dzRnGp2ZIZPge5Nhx9gAEQhIHzeH4
|
|
|
95
95
|
braindecode/visualization/__init__.py,sha256=4EER_xHqZIDzEvmgUEm7K1bgNKpyZAIClR9ZCkMuY4M,240
|
|
96
96
|
braindecode/visualization/confusion_matrices.py,sha256=qIWMLEHow5CJ7PhGggD8mnD55Le6xhma9HSzt4R33fc,9509
|
|
97
97
|
braindecode/visualization/gradients.py,sha256=KZo-GA0uwiwty2_94j2IjmCR2SKcfPb1Bi3sQq7vpTk,2170
|
|
98
|
-
braindecode-1.3.0.
|
|
99
|
-
braindecode-1.3.0.
|
|
100
|
-
braindecode-1.3.0.
|
|
101
|
-
braindecode-1.3.0.
|
|
102
|
-
braindecode-1.3.0.
|
|
103
|
-
braindecode-1.3.0.
|
|
98
|
+
braindecode-1.3.0.dev183934738.dist-info/licenses/LICENSE.txt,sha256=7rg7k6hyj8m9whQ7dpKbqnCssoOEx_Mbtqb4uSOjljE,1525
|
|
99
|
+
braindecode-1.3.0.dev183934738.dist-info/licenses/NOTICE.txt,sha256=sOxuTbalPxTM8H6VqtvGbXCt_BoOF7JevEYG_knqbm4,620
|
|
100
|
+
braindecode-1.3.0.dev183934738.dist-info/METADATA,sha256=7kNfM6cPR3AaaIwbaxoVLlt8OshwSRj8pXcRZtvwkLA,7129
|
|
101
|
+
braindecode-1.3.0.dev183934738.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
|
102
|
+
braindecode-1.3.0.dev183934738.dist-info/top_level.txt,sha256=pHsWQmSy0uhIez62-HA9j0iaXKvSbUL39ifFRkFnChA,12
|
|
103
|
+
braindecode-1.3.0.dev183934738.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
{braindecode-1.3.0.dev180851780.dist-info → braindecode-1.3.0.dev183934738.dist-info}/top_level.txt
RENAMED
|
File without changes
|