braindecode 1.2.0.dev184328194__py3-none-any.whl → 1.3.0.dev174777731__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of braindecode might be problematic. Click here for more details.

Files changed (34) hide show
  1. braindecode/datasets/experimental.py +218 -0
  2. braindecode/models/__init__.py +6 -8
  3. braindecode/models/atcnet.py +152 -12
  4. braindecode/models/attentionbasenet.py +151 -26
  5. braindecode/models/{sleep_stager_eldele_2021.py → attn_sleep.py} +12 -2
  6. braindecode/models/ctnet.py +1 -1
  7. braindecode/models/deep4.py +6 -2
  8. braindecode/models/deepsleepnet.py +118 -5
  9. braindecode/models/eegconformer.py +114 -15
  10. braindecode/models/eeginception_erp.py +76 -7
  11. braindecode/models/eeginception_mi.py +2 -0
  12. braindecode/models/eegnet.py +64 -177
  13. braindecode/models/eegnex.py +113 -6
  14. braindecode/models/eegsimpleconv.py +2 -0
  15. braindecode/models/eegtcnet.py +1 -1
  16. braindecode/models/sccnet.py +81 -8
  17. braindecode/models/shallow_fbcsp.py +2 -0
  18. braindecode/models/sleep_stager_blanco_2020.py +2 -0
  19. braindecode/models/sleep_stager_chambon_2018.py +2 -0
  20. braindecode/models/sparcnet.py +2 -0
  21. braindecode/models/summary.csv +39 -41
  22. braindecode/models/tidnet.py +2 -0
  23. braindecode/models/tsinception.py +15 -3
  24. braindecode/models/usleep.py +103 -9
  25. braindecode/models/util.py +5 -5
  26. braindecode/preprocessing/preprocess.py +20 -26
  27. braindecode/version.py +1 -1
  28. {braindecode-1.2.0.dev184328194.dist-info → braindecode-1.3.0.dev174777731.dist-info}/METADATA +7 -2
  29. {braindecode-1.2.0.dev184328194.dist-info → braindecode-1.3.0.dev174777731.dist-info}/RECORD +33 -33
  30. braindecode/models/eegresnet.py +0 -362
  31. {braindecode-1.2.0.dev184328194.dist-info → braindecode-1.3.0.dev174777731.dist-info}/WHEEL +0 -0
  32. {braindecode-1.2.0.dev184328194.dist-info → braindecode-1.3.0.dev174777731.dist-info}/licenses/LICENSE.txt +0 -0
  33. {braindecode-1.2.0.dev184328194.dist-info → braindecode-1.3.0.dev174777731.dist-info}/licenses/NOTICE.txt +0 -0
  34. {braindecode-1.2.0.dev184328194.dist-info → braindecode-1.3.0.dev174777731.dist-info}/top_level.txt +0 -0
@@ -1,41 +1,39 @@
1
- Model,Paradigm,Type,Freq(Hz),Hyperparameters,#Parameters,get_#Parameters
2
- ATCNet,General,Classification,250,"n_chans, n_outputs, n_times",113732,"ATCNet(n_chans=22, n_outputs=4, n_times=1000)"
3
- AttentionBaseNet,Motor Imagery,Classification,250,"n_chans, n_outputs, n_times",3692,"AttentionBaseNet(n_chans=22, n_outputs=4, n_times=1000)"
4
- BDTCN,Normal/Abnormal,Classification,100,"n_chans, n_outputs, n_times",456502,"BDTCN(n_chans=21, n_outputs=2, n_times=6000, n_blocks=5, n_filters=55, kernel_size=16)"
5
- BIOT,"Sleep Staging, Epilepsy",Classification,200,"n_chans, n_outputs",3183879,"BIOT(n_chans=2, n_outputs=5, n_times=6000)"
6
- ContraWR,Sleep Staging,"Classification, Embedding",125,"n_chans, n_outputs, sfreq",1160165,"ContraWR(n_chans=2, n_outputs=5, n_times=3750, emb_size=256, sfreq=125)"
7
- CTNet,Motor Imagery,Classification,250,"n_chans, n_outputs, n_times",26900,"CTNet(n_chans=22, n_outputs=4, n_times=1000, n_filters_time=8, kernel_size=16, heads=2, emb_size=16)"
8
- Deep4Net,Motor Imagery,Classification,250,"n_chans, n_outputs, n_times",282879,"Deep4Net(n_chans=22, n_outputs=4, n_times=1000)"
9
- DeepSleepNet,Sleep Staging,Classification,256,"n_chans, n_outputs",24744837,"DeepSleepNet(n_chans=1, n_outputs=5, n_times=7680, sfreq=256)"
10
- EEGConformer,General,Classification,250,"n_chans, n_outputs, n_times",789572,"EEGConformer(n_chans=22, n_outputs=4, n_times=1000)."
11
- EEGInceptionERP,"ERP, SSVEP",Classification,128,"n_chans, n_outputs",14926,"EEGInceptionERP(n_chans=8, n_outputs=2, n_times=128, sfreq=128)"
12
- EEGInceptionMI,Motor Imagery,Classification,250,"n_chans, n_outputs, n_times",558028,"EEGInceptionMI(n_chans=22, n_outputs=4, n_times=1000, n_convs=5, n_filters=12)"
13
- EEGITNet,Motor Imagery,Classification,125,"n_chans, n_outputs, n_times",5212,"EEGITNet(n_chans=22, n_outputs=4, n_times=500)"
14
- EEGNetv1,General,Classification,128,"n_chans, n_outputs, n_times",3052,"EEGNetv1(n_chans=22, n_outputs=4, n_times=512)"
15
- EEGNetv4,General,Classification,128,"n_chans, n_outputs, n_times",2484,"EEGNetv4(n_chans=22, n_outputs=4, n_times=512)"
16
- EEGNeX,Motor Imagery,Classification,125,"n_chans, n_outputs, n_times",55940,"EEGNeX(n_chans=22, n_outputs=4, n_times=500)"
17
- EEGMiner,Emotion Recognition,Classification,128,"n_chans, n_outputs, n_times, sfreq",7572,"EEGMiner(n_chans=62, n_outputs=2, n_times=2560, sfreq=128)"
18
- EEGResNet,General,Classification,250,"n_chans, n_outputs, n_times",247484,"EEGResNet(n_chans=22, n_outputs=4, n_times=1000)"
19
- EEGSimpleConv,Motor Imagery,Classification,80,"n_chans, n_outputs, sfreq",730404,"EEGSimpleConv(n_chans=22, n_outputs=4, n_times=320, sfreq=80)"
20
- EEGTCNet,Motor Imagery,Classification,250,"n_chans, n_outputs",4516,"EEGTCNet(n_chans=22, n_outputs=4, n_times=1000, kern_length=32)"
21
- Labram,General,"Classification, Embedding",200,"n_chans, n_outputs, n_times",5866180,"Labram(n_chans=22, n_outputs=4, n_times=1000, sfreq=250)"
22
- MSVTNet,Motor Imagery,Classification,250,"n_chans, n_outputs, n_times",75494," MSVTNet(n_chans=22, n_outputs=4, n_times=1000)"
23
- SCCNet,Motor Imagery,Classification,125,"n_chans, n_outputs, n_times, sfreq",12070,"SCCNet(n_chans=22, n_outputs=4, n_times=1000, sfreq=125)"
24
- SignalJEPA,"Motor Imagery, ERP, SSVEP",Embedding,128,"n_times, chs_info",3456882,"SignalJEPA(n_times=512, chs_info=Lee2019_MI().get_data(subjects=[1])[1]['0']['1train'].info[""chs""][:62])"
25
- SignalJEPA_Contextual,"Motor Imagery, ERP, SSVEP",Classification,128,"n_outputs, n_times, chs_info",3459184,"SignalJEPA_Contextual(n_outputs=2, input_window_seconds=4.19, sfreq=128, chs_info=Lee2019_MI().get_data(subjects=[1])[1]['0']['1train'].info[""chs""][:62])"
26
- SignalJEPA_PostLocal,"Motor Imagery, ERP, SSVEP",Classification,128,"n_chans, n_outputs, n_times",16142,"SignalJEPA_PostLocal(n_chans=62, n_outputs=2, input_window_seconds=4.19, sfreq=128)"
27
- SignalJEPA_PreLocal,"Motor Imagery, ERP, SSVEP",Classification,128,"n_outputs, n_times, chs_info",16142,"SignalJEPA_PreLocal(n_chans=62, n_outputs=2, input_window_seconds=4.19, sfreq=128)"
28
- SincShallowNet,Motor Imagery,Classification,250,"n_chans, n_outputs, n_times, sfreq",21892,"SincShallowNet(n_chans=22, n_outputs=4, n_times=1000, sfreq=250)"
29
- ShallowFBCSPNet,Motor Imagery,Classification,250,"n_chans, n_outputs, n_times",46084,"ShallowFBCSPNet(n_chans=22, n_outputs=4, n_times=1000, sfreq=250)"
30
- SleepStagerBlanco2020,Sleep Staging,Classification,100,"n_chans, n_outputs, n_times",2845,"SleepStagerBlanco2020(n_chans=2, n_outputs=5, n_times=3000, sfreq=100)"
31
- SleepStagerChambon2018,Sleep Staging,Classification,128,"n_chans, n_outputs, n_times, sfreq",5835,"SleepStagerChambon2018(n_chans=2, n_outputs=5, n_times=3840, sfreq=128)"
32
- SleepStagerEldele2021,Sleep Staging,Classification,100,"n_chans, n_outputs, n_times, sfreq",719925,"SleepStagerEldele2021(n_chans=2, n_outputs=5, n_times=3000, sfreq=100)"
33
- SPARCNet,Epilepsy,Classification,200,"n_chans, n_outputs, n_times",1141921,"SPARCNet(n_chans=16, n_outputs=6, n_times=2000, sfreq=200)"
34
- SyncNet,"Emotion Recognition, Alcoholism",Classification,256,"n_chans, n_outputs, n_times",554,"SyncNet(n_chans=62, n_outputs=3, n_times=5120, sfreq=256)"
35
- TSceptionV1,Emotion Recognition,Classification,256,"n_chans, n_outputs, n_times, sfreq",2187206,"TSceptionV1(n_chans=62, n_outputs=3, n_times=5120, sfreq=256)"
36
- TIDNet,General,Classification,250,"n_chans, n_outputs, n_times",240404,"TIDNet(n_chans=22, n_outputs=4, n_times=1000)"
37
- USleep,Sleep Staging,Classification,128,"n_chans, n_outputs, n_times, sfreq",2482011,"USleep(n_chans=2, n_outputs=5, n_times=3000, sfreq=100)"
38
- FBCNet,Motor Imagery,Classification,250,"n_chans, n_outputs, n_times, sfreq",11812,"FCNet(n_chans=22, n_outputs=4, n_times=1000, sfreq=250)"
39
- FBMSNet,Motor Imagery,Classification,250,"n_chans, n_outputs, n_times, sfreq",16231,"FBMSNet(n_chans=22, n_outputs=4, n_times=1000, sfreq=250)"
40
- FBLightConvNet,Motor Imagery,Classification,250,"n_chans, n_outputs, n_times, sfreq",6596,"FBLightConvNet(n_chans=22, n_outputs=4, n_times=1000, sfreq=250)"
41
- IFNet,Motor Imagery,Classification,250,"n_chans, n_outputs, n_times, sfreq",9860,"IFNet(n_chans=22, n_outputs=4, n_times=1000, sfreq=250)"
1
+ Model,Application,Type,Sampling Frequency (Hz),Hyperparameters,#Parameters,get_#Parameters,Categorization
2
+ ATCNet,General,Classification,250,"n_chans, n_outputs, n_times",113732,"ATCNet(n_chans=22, n_outputs=4, n_times=1000)","Convolution,Recurrent,Small Attention"
3
+ AttentionBaseNet,Motor Imagery,Classification,250,"n_chans, n_outputs, n_times",3692,"AttentionBaseNet(n_chans=22, n_outputs=4, n_times=1000)","Convolution,Small Attention"
4
+ BDTCN,Normal Abnormal,Classification,100,"n_chans, n_outputs, n_times",456502,"BDTCN(n_chans=21, n_outputs=2, n_times=6000, n_blocks=5, n_filters=55, kernel_size=16)","Convolution,Recurrent"
5
+ BIOT,"Sleep Staging, Epilepsy",Classification,200,"n_chans, n_outputs",3183879,"BIOT(n_chans=2, n_outputs=5, n_times=6000)","Large Language Model"
6
+ ContraWR,Sleep Staging,"Classification, Embedding",125,"n_chans, n_outputs, sfreq",1160165,"ContraWR(n_chans=2, n_outputs=5, n_times=3750, emb_size=256, sfreq=125)",Convolution
7
+ CTNet,Motor Imagery,Classification,250,"n_chans, n_outputs, n_times",26900,"CTNet(n_chans=22, n_outputs=4, n_times=1000, n_filters_time=8, kernel_size=16, heads=2, emb_size=16)","Convolution,Small Attention"
8
+ Deep4Net,General,Classification,250,"n_chans, n_outputs, n_times",282879,"Deep4Net(n_chans=22, n_outputs=4, n_times=1000)","Convolution"
9
+ DeepSleepNet,Sleep Staging,Classification,256,"n_chans, n_outputs",24744837,"DeepSleepNet(n_chans=1, n_outputs=5, n_times=7680, sfreq=256)","Convolution,Recurrent"
10
+ EEGConformer,General,Classification,250,"n_chans, n_outputs, n_times",789572,"EEGConformer(n_chans=22, n_outputs=4, n_times=1000)","Convolution,Small Attention"
11
+ EEGInceptionERP,"ERP, SSVEP",Classification,128,"n_chans, n_outputs",14926,"EEGInceptionERP(n_chans=8, n_outputs=2, n_times=128, sfreq=128)","Convolution"
12
+ EEGInceptionMI,Motor Imagery,Classification,250,"n_chans, n_outputs, n_times",558028,"EEGInceptionMI(n_chans=22, n_outputs=4, n_times=1000, n_convs=5, n_filters=12)","Convolution"
13
+ EEGITNet,Motor Imagery,Classification,125,"n_chans, n_outputs, n_times",5212,"EEGITNet(n_chans=22, n_outputs=4, n_times=500)","Convolution,Recurrent"
14
+ EEGNet,General,Classification,128,"n_chans, n_outputs, n_times",2484,"EEGNet(n_chans=22, n_outputs=4, n_times=512)","Convolution"
15
+ EEGNeX,Motor Imagery,Classification,125,"n_chans, n_outputs, n_times",55940,"EEGNeX(n_chans=22, n_outputs=4, n_times=500)","Convolution"
16
+ EEGMiner,Emotion Recognition,Classification,128,"n_chans, n_outputs, n_times, sfreq",7572,"EEGMiner(n_chans=62, n_outputs=2, n_times=2560, sfreq=128)","Convolution,Interpretability"
17
+ EEGSimpleConv,Motor Imagery,Classification,80,"n_chans, n_outputs, sfreq",730404,"EEGSimpleConv(n_chans=22, n_outputs=4, n_times=320, sfreq=80)","Convolution"
18
+ EEGTCNet,Motor Imagery,Classification,250,"n_chans, n_outputs",4516,"EEGTCNet(n_chans=22, n_outputs=4, n_times=1000, kern_length=32)","Convolution,Recurrent"
19
+ Labram,General,"Classification, Embedding",200,"n_chans, n_outputs, n_times",5866180,"Labram(n_chans=22, n_outputs=4, n_times=1000, sfreq=250)","Convolution,Large Language Model"
20
+ MSVTNet,Motor Imagery,Classification,250,"n_chans, n_outputs, n_times",75494," MSVTNet(n_chans=22, n_outputs=4, n_times=1000)","Convolution,Recurrent,Small Attention"
21
+ SCCNet,Motor Imagery,Classification,125,"n_chans, n_outputs, n_times, sfreq",12070,"SCCNet(n_chans=22, n_outputs=4, n_times=1000, sfreq=125)","Convolution"
22
+ SignalJEPA,"Motor Imagery, ERP, SSVEP",Embedding,128,"n_times, chs_info",3456882,"SignalJEPA(n_times=512, chs_info=Lee2019_MI().get_data(subjects=[1])[1]['0']['1train'].info[""chs""][:62])","Convolution,Channel,Large Language Model"
23
+ SignalJEPA_Contextual,"Motor Imagery, ERP, SSVEP",Classification,128,"n_outputs, n_times, chs_info",3459184,"SignalJEPA_Contextual(n_outputs=2, input_window_seconds=4.19, sfreq=128, chs_info=Lee2019_MI().get_data(subjects=[1])[1]['0']['1train'].info[""chs""][:62])","Convolution,Channel,Large Language Model"
24
+ SignalJEPA_PostLocal,"Motor Imagery, ERP, SSVEP",Classification,128,"n_chans, n_outputs, n_times",16142,"SignalJEPA_PostLocal(n_chans=62, n_outputs=2, input_window_seconds=4.19, sfreq=128)","Convolution,Channel,Large Language Model"
25
+ SignalJEPA_PreLocal,"Motor Imagery, ERP, SSVEP",Classification,128,"n_outputs, n_times, chs_info",16142,"SignalJEPA_PreLocal(n_chans=62, n_outputs=2, input_window_seconds=4.19, sfreq=128)","Convolution,Channel,Large Language Model"
26
+ SincShallowNet,Motor Imagery,Classification,250,"n_chans, n_outputs, n_times, sfreq",21892,"SincShallowNet(n_chans=22, n_outputs=4, n_times=1000, sfreq=250)","Convolution,Interpretability"
27
+ ShallowFBCSPNet,General,Classification,250,"n_chans, n_outputs, n_times",46084,"ShallowFBCSPNet(n_chans=22, n_outputs=4, n_times=1000, sfreq=250)","Convolution"
28
+ SleepStagerBlanco2020,Sleep Staging,Classification,100,"n_chans, n_outputs, n_times",2845,"SleepStagerBlanco2020(n_chans=2, n_outputs=5, n_times=3000, sfreq=100)","Convolution"
29
+ SleepStagerChambon2018,Sleep Staging,Classification,128,"n_chans, n_outputs, n_times, sfreq",5835,"SleepStagerChambon2018(n_chans=2, n_outputs=5, n_times=3840, sfreq=128)","Convolution"
30
+ AttnSleep,Sleep Staging,Classification,100,"n_chans, n_outputs, n_times, sfreq",719925,"AttnSleep(n_chans=2, n_outputs=5, n_times=3000, sfreq=100)","Convolution, Small Attention"
31
+ SPARCNet,Epilepsy,Classification,200,"n_chans, n_outputs, n_times",1141921,"SPARCNet(n_chans=16, n_outputs=6, n_times=2000, sfreq=200)","Convolution"
32
+ SyncNet,"Emotion Recognition, Alcoholism",Classification,256,"n_chans, n_outputs, n_times",554,"SyncNet(n_chans=62, n_outputs=3, n_times=5120, sfreq=256)","Interpretability"
33
+ TSception,Emotion Recognition,Classification,256,"n_chans, n_outputs, n_times, sfreq",2187206,"TSception(n_chans=62, n_outputs=3, n_times=5120, sfreq=256)","Convolution"
34
+ TIDNet,General,Classification,250,"n_chans, n_outputs, n_times",240404,"TIDNet(n_chans=22, n_outputs=4, n_times=1000)","Convolution"
35
+ USleep,Sleep Staging,Classification,128,"n_chans, n_outputs, n_times, sfreq",2482011,"USleep(n_chans=2, n_outputs=5, n_times=3000, sfreq=100)","Convolution"
36
+ FBCNet,Motor Imagery,Classification,250,"n_chans, n_outputs, n_times, sfreq",11812,"FCNet(n_chans=22, n_outputs=4, n_times=1000, sfreq=250)","Convolution,FilterBank"
37
+ FBMSNet,Motor Imagery,Classification,250,"n_chans, n_outputs, n_times, sfreq",16231,"FBMSNet(n_chans=22, n_outputs=4, n_times=1000, sfreq=250)","Convolution,FilterBank"
38
+ FBLightConvNet,Motor Imagery,Classification,250,"n_chans, n_outputs, n_times, sfreq",6596,"FBLightConvNet(n_chans=22, n_outputs=4, n_times=1000, sfreq=250)","Convolution,FilterBank"
39
+ IFNet,Motor Imagery,Classification,250,"n_chans, n_outputs, n_times, sfreq",9860,"IFNet(n_chans=22, n_outputs=4, n_times=1000, sfreq=250)","Convolution,FilterBank"
@@ -13,6 +13,8 @@ from braindecode.modules import Ensure4d
13
13
  class TIDNet(EEGModuleMixin, nn.Module):
14
14
  """Thinker Invariance DenseNet model from Kostas et al. (2020) [TIDNet]_.
15
15
 
16
+ :bdg-success:`Convolution`
17
+
16
18
  .. figure:: https://content.cld.iop.org/journals/1741-2552/17/5/056008/revision3/jneabb7a7f1_hr.jpg
17
19
  :align: center
18
20
  :alt: TIDNet Architecture
@@ -7,19 +7,21 @@ from __future__ import annotations
7
7
  import torch
8
8
  import torch.nn as nn
9
9
  from einops.layers.torch import Rearrange
10
- from mne.utils import warn
10
+ from mne.utils import deprecated, warn
11
11
 
12
12
  from braindecode.models.base import EEGModuleMixin
13
13
 
14
14
 
15
- class TSceptionV1(EEGModuleMixin, nn.Module):
15
+ class TSception(EEGModuleMixin, nn.Module):
16
16
  """TSception model from Ding et al. (2020) from [ding2020]_.
17
17
 
18
+ :bdg-success:`Convolution`
19
+
18
20
  TSception: A deep learning framework for emotion detection using EEG.
19
21
 
20
22
  .. figure:: https://user-images.githubusercontent.com/58539144/74716976-80415e00-526a-11ea-9433-02ab2b753f6b.PNG
21
23
  :align: center
22
- :alt: TSceptionV1 Architecture
24
+ :alt: TSception Architecture
23
25
 
24
26
  The model consists of temporal and spatial convolutional layers
25
27
  (Tception and Sception) designed to learn temporal and spatial features
@@ -281,3 +283,13 @@ class TSceptionV1(EEGModuleMixin, nn.Module):
281
283
  activation(),
282
284
  nn.AvgPool2d(kernel_size=(1, pool_size), stride=(1, pool_size)),
283
285
  )
286
+
287
+
288
+ @deprecated(
289
+ "`TSceptionV1` was renamed to `TSception` in v1.12; "
290
+ "this alias will be removed in v1.14."
291
+ )
292
+ class TSceptionV1(TSception):
293
+ """Deprecated alias for TSception."""
294
+
295
+ pass
@@ -15,22 +15,116 @@ class USleep(EEGModuleMixin, nn.Module):
15
15
  """
16
16
  Sleep staging architecture from Perslev et al. (2021) [1]_.
17
17
 
18
+ :bdg-success:`Convolution`
19
+
18
20
  .. figure:: https://media.springernature.com/full/springer-static/image/art%3A10.1038%2Fs41746-021-00440-5/MediaObjects/41746_2021_440_Fig2_HTML.png
19
21
  :align: center
20
22
  :alt: USleep Architecture
21
23
 
22
- U-Net (autoencoder with skip connections) feature-extractor for sleep
23
- staging described in [1]_.
24
+ Figure: U-Sleep consists of an encoder (left) which encodes the input signals into dense feature representations, a decoder (middle) which projects
25
+ the learned features into the input space to generate a dense sleep stage representation, and finally a specially designed segment
26
+ classifier (right) which generates sleep stages at a chosen temporal resolution.
27
+
28
+ .. rubric:: Architectural Overview
29
+
30
+ U-Sleep is a **fully convolutional**, feed-forward encoder-decoder with a *segment classifier* head for
31
+ time-series **segmentation** (sleep staging). It maps multi-channel PSG (EEG+EOG) to a *dense, high-frequency*
32
+ per-sample representation, then aggregates it into fixed-length stage labels (e.g., 30 s). The network
33
+ processes arbitrarily long inputs in **one forward pass** (resampling to 128 Hz), allowing whole-night
34
+ hypnograms in seconds.
35
+
36
+ - (i). :class:`_EncoderBlock` extracts progressively deeper temporal features at lower resolution;
37
+ - (ii). :class:`_Decoder` upsamples and fuses encoder features via U-Net-style skips to recover a per-sample stage map;
38
+ - (iii). Segment Classifier mean-pools over the target epoch length and applies two pointwise convs to yield
39
+ per-epoch probabilities. Integrates into the USleep class.
40
+
41
+ .. rubric:: Macro Components
42
+
43
+ - Encoder :class:`_EncoderBlock` **(multi-scale temporal feature extractor; downsampling x2 per block)**
44
+
45
+ - *Operations.*
46
+ - **Conv1d** (:class:`torch.nn.Conv1d`) with kernel ``9`` (stride ``1``, no dilation)
47
+ - **ELU** (:class:`torch.nn.ELU`)
48
+ - **Batch Norm** (:class:`torch.nn.BatchNorm1d`)
49
+ - **Max Pool 1d**, :class:`torch.nn.MaxPool1d` (``kernel=2, stride=2``).
50
+
51
+ Filters grow with depth by a factor of ``sqrt(2)`` (start ``c_1=5``); each block exposes a **skip**
52
+ (pre-pooling activation) to the matching decoder block.
53
+ *Role.* Slow, uniform downsampling preserves early information while expanding the effective temporal
54
+ context over minutes—foundational for robust cross-cohort staging.
55
+
56
+ The number of filters grows with depth (capacity scaling); each block also exposes a **skip** (pre-pool)
57
+ to the matching decoder block.
58
+
59
+ **Rationale.**
60
+ - Slow, uniform downsampling (x2 each level) preserves information in early layers while expanding the temporal receptive field over the minutes.
61
+
62
+ - Decoder :class:`_DecoderBlock` **(progressive upsampling + skip fusion to high-frequency map, 12 blocks; upsampling x2 per block)**
63
+
64
+ - *Operations.*
65
+ - **Nearest-neighbor upsample**, :class:`nn.Upsample` (x2)
66
+ - **Convolution2d** (k=2), :class:`torch.nn.Conv2d`
67
+ - ELU, :class:`torch.nn.ELU`
68
+ - Batch Norm, :class:`torch.nn.BatchNorm2d`
69
+ - **Concatenate** with the encoder skip at the same temporal scale, :function:`torch.cat`
70
+ - **Convolution**, :class:`torch.nn.Conv2d`
71
+ - ELU, :class:`torch.nn.ELU`
72
+ - Batch Norm, :class:`torch.nn.BatchNorm2d`.
73
+
74
+ **Output**: A multi-class, **high-frequency** per-sample representation aligned to the input rate (128 Hz).
75
+
76
+ - **Segment Classifier incorporate into :class:`braindecode.models.USleep` (aggregation to fixed epochs)**
77
+
78
+ - *Operations.*
79
+ - **Mean-pool**, :class:`torch.nn.AvgPool2d` per class with kernel = epoch length *i* and stride *i*
80
+ - **1x1 conv**, :class:`torch.nn.Conv2d`
81
+ - ELU, :class:`torch.nn.ELU`
82
+ - **1x1 conv**, :class:`torch.nn.Conv2d` with ``(T, K)`` (epochs x stages).
83
+
84
+ **Role**: Learns a **non-linear** weighted combination over each 30-s window (unlike U-Time's linear combiner).
85
+
86
+ .. rubric:: Convolutional Details
87
+
88
+ - **Temporal (where time-domain patterns are learned).**
89
+ All convolutions are **1-D along time**; depth (12 levels) plus pooling yields an extensive receptive field
90
+ (reported sensitivity to ±6.75 min around each epoch; theoretical field ≈ 9.6 min at the deepest layer).
91
+ The decoder restores sample-level resolution before epoch aggregation.
92
+
93
+ - **Spatial (how channels are processed).**
94
+ Convolutions mix across the *channel* dimension jointly with time (no separate spatial operator). The system
95
+ is **montage-agnostic** (any reasonable EEG/EOG pair) and was trained across diverse cohorts/protocols,
96
+ supporting robustness to channel placement and hardware differences.
97
+
98
+ - **Spectral (how frequency content is captured).**
99
+ No explicit Fourier/wavelet transform is used; the **stack of temporal convolutions** acts as a learned
100
+ filter bank whose effective bandwidth grows with depth. The high-frequency decoder output (128 Hz)
101
+ retains fine temporal detail for the segment classifier.
102
+
103
+
104
+ .. rubric:: Attention / Sequential Modules
105
+
106
+ U-Sleep contains **no attention or recurrent units**; it is a *pure* feed-forward, fully convolutional
107
+ segmentation network inspired by U-Net/U-Time, favoring training stability and cross-dataset portability.
108
+
109
+
110
+ .. rubric:: Additional Mechanisms
111
+
112
+ - **U-Net lineage with task-specific head.** U-Sleep extends U-Time by being **deeper** (12 vs. 4 levels),
113
+ switching ReLU→**ELU**, using uniform pooling (2) at all depths, and replacing the linear combiner with a
114
+ **two-layer** pointwise head—improving capacity and resilience across datasets.
115
+ - **Arbitrary-length inference.** Thanks to full convolutionality and tiling-free design, entire nights can be
116
+ staged in a single pass on commodity hardware. Inputs shorter than ≈ 17.5 min may reduce performance by
117
+ limiting long-range context.
118
+ - **Complexity scaling (alpha).** Filter counts can be adjusted by a global **complexity factor** to trade accuracy
119
+ and memory (as described in the paper's topology table).
120
+
24
121
 
25
- For the encoder ('down'):
26
- - the temporal dimension shrinks (via maxpooling in the time-domain)
27
- - the spatial dimension expands (via more conv1d filters in the time-domain)
122
+ .. rubric:: Usage and Configuration
28
123
 
29
- For the decoder ('up'):
30
- - the temporal dimension expands (via upsampling in the time-domain)
31
- - the spatial dimension shrinks (via fewer conv1d filters in the time-domain)
124
+ - **Practice.** Resample PSG to **128 Hz** and provide at least two channels (one EEG, one EOG). Choose epoch
125
+ length *i* (often 30 s); ensure windows long enough to exploit the model's receptive field (e.g., training on
126
+ 17.5 min chunks).
32
127
 
33
- Both do so at exponential rates.
34
128
 
35
129
  Parameters
36
130
  ----------
@@ -22,6 +22,8 @@ def _init_models_dict():
22
22
  issubclass(m[1], models.base.EEGModuleMixin)
23
23
  and m[1] != models.base.EEGModuleMixin
24
24
  ):
25
+ if m[1].__name__ == "EEGNetv4":
26
+ continue
25
27
  models_dict[m[0]] = m[1]
26
28
 
27
29
 
@@ -55,9 +57,7 @@ models_mandatory_parameters = [
55
57
  ("EEGInceptionERP", ["n_chans", "n_outputs", "n_times", "sfreq"], None),
56
58
  ("EEGInceptionMI", ["n_chans", "n_outputs", "n_times", "sfreq"], None),
57
59
  ("EEGITNet", ["n_chans", "n_outputs", "n_times"], None),
58
- ("EEGNetv1", ["n_chans", "n_outputs", "n_times"], None),
59
- ("EEGNetv4", ["n_chans", "n_outputs", "n_times"], None),
60
- ("EEGResNet", ["n_chans", "n_outputs", "n_times"], None),
60
+ ("EEGNet", ["n_chans", "n_outputs", "n_times"], None),
61
61
  ("ShallowFBCSPNet", ["n_chans", "n_outputs", "n_times"], None),
62
62
  (
63
63
  "SleepStagerBlanco2020",
@@ -66,7 +66,7 @@ models_mandatory_parameters = [
66
66
  ),
67
67
  ("SleepStagerChambon2018", ["n_chans", "n_outputs", "n_times", "sfreq"], None),
68
68
  (
69
- "SleepStagerEldele2021",
69
+ "AttnSleep",
70
70
  ["n_outputs", "n_times", "sfreq"],
71
71
  dict(sfreq=100.0, n_times=3000, chs_info=[dict(ch_name="C1", kind="eeg")]),
72
72
  ), # 1 channel
@@ -79,7 +79,7 @@ models_mandatory_parameters = [
79
79
  ("SPARCNet", ["n_chans", "n_outputs", "n_times"], None),
80
80
  ("ContraWR", ["n_chans", "n_outputs", "sfreq", "n_times"], dict(sfreq=200.0)),
81
81
  ("EEGNeX", ["n_chans", "n_outputs", "n_times"], None),
82
- ("TSceptionV1", ["n_chans", "n_outputs", "n_times", "sfreq"], dict(sfreq=200.0)),
82
+ ("TSception", ["n_chans", "n_outputs", "n_times", "sfreq"], dict(sfreq=200.0)),
83
83
  ("EEGTCNet", ["n_chans", "n_outputs", "n_times"], None),
84
84
  ("SyncNet", ["n_chans", "n_outputs", "n_times"], None),
85
85
  ("MSVTNet", ["n_chans", "n_outputs", "n_times"], None),
@@ -55,15 +55,15 @@ class Preprocessor(object):
55
55
 
56
56
  Parameters
57
57
  ----------
58
- fn: str or callable
58
+ fn : str or callable
59
59
  If str, the Raw/Epochs object must have a method with that name.
60
60
  If callable, directly apply the callable to the object.
61
61
  apply_on_array : bool
62
- Ignored if `fn` is not a callable. If True, the `apply_function` of Raw
63
- and Epochs object will be used to run `fn` on the underlying arrays
64
- directly. If False, `fn` must directly modify the Raw or Epochs object.
65
- kwargs:
66
- Keyword arguments to be forwarded to the MNE function.
62
+ Ignored if ``fn`` is not a callable. If True, the ``apply_function`` of Raw
63
+ and Epochs will be used to run ``fn`` on the underlying arrays directly.
64
+ If False, ``fn`` must directly modify the Raw or Epochs object.
65
+ **kwargs : dict
66
+ Keyword arguments forwarded to the MNE function or callable.
67
67
  """
68
68
 
69
69
  def __init__(self, fn: Callable | str, *, apply_on_array: bool = True, **kwargs):
@@ -117,34 +117,28 @@ def preprocess(
117
117
 
118
118
  Parameters
119
119
  ----------
120
- concat_ds: BaseConcatDataset
121
- A concat of BaseDataset or WindowsDataset datasets to be preprocessed.
122
- preprocessors: list(Preprocessor)
123
- List of Preprocessor objects to apply to the dataset.
120
+ concat_ds : BaseConcatDataset
121
+ A concat of ``BaseDataset`` or ``WindowsDataset`` to be preprocessed.
122
+ preprocessors : list of Preprocessor
123
+ Preprocessor objects to apply to each dataset.
124
124
  save_dir : str | None
125
- If a string, the preprocessed data will be saved under the specified
126
- directory and the datasets in ``concat_ds`` will be reloaded with
127
- `preload=False`.
125
+ If provided, save preprocessed data under this directory and reload
126
+ datasets in ``concat_ds`` with ``preload=False``.
128
127
  overwrite : bool
129
- When `save_dir` is provided, controls whether to delete the old
130
- subdirectories that will be written to under `save_dir`. If False and
131
- the corresponding subdirectories already exist, a ``FileExistsError``
132
- will be raised.
128
+ When ``save_dir`` is provided, controls whether to delete the old
129
+ subdirectories that will be written to under ``save_dir``. If False and
130
+ the corresponding subdirectories already exist, a ``FileExistsError`` is raised.
133
131
  n_jobs : int | None
134
- Number of jobs for parallel execution. See `joblib.Parallel` for
135
- a more detailed explanation.
132
+ Number of jobs for parallel execution. See ``joblib.Parallel`` for details.
136
133
  offset : int
137
- If provided, the integer is added to the id of the dataset in the
138
- concat. This is useful in the setting of very large datasets, where
139
- one dataset has to be processed and saved at a time to account for
140
- its original position.
134
+ Integer added to the dataset id in the concat. Useful when processing
135
+ and saving very large datasets in chunks to preserve original positions.
141
136
  copy_data : bool | None
142
- Whether the data passed to the different jobs should be copied or
143
- passed by reference.
137
+ Whether the data passed to parallel jobs should be copied or passed by reference.
144
138
 
145
139
  Returns
146
140
  -------
147
- BaseConcatDataset:
141
+ BaseConcatDataset
148
142
  Preprocessed dataset.
149
143
  """
150
144
  # In case of serialization, make sure directory is available before
braindecode/version.py CHANGED
@@ -1 +1 @@
1
- __version__ = "1.2.0.dev184328194"
1
+ __version__ = "1.3.0.dev174777731"
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: braindecode
3
- Version: 1.2.0.dev184328194
3
+ Version: 1.3.0.dev174777731
4
4
  Summary: Deep learning software to decode EEG, ECG or MEG signals
5
5
  Author-email: Robin Tibor Schirrmeister <robintibor@gmail.com>
6
6
  Maintainer-email: Alexandre Gramfort <agramfort@meta.com>, Bruno Aristimunha Pinto <b.aristimunha@gmail.com>, Robin Tibor Schirrmeister <robintibor@gmail.com>
@@ -17,7 +17,7 @@ Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
17
17
  Classifier: Programming Language :: Python :: 3.10
18
18
  Classifier: Programming Language :: Python :: 3.11
19
19
  Classifier: Programming Language :: Python :: 3.12
20
- Requires-Python: >3.10
20
+ Requires-Python: >=3.10
21
21
  Description-Content-Type: text/x-rst
22
22
  License-File: LICENSE.txt
23
23
  License-File: NOTICE.txt
@@ -49,6 +49,10 @@ Requires-Dist: mypy; extra == "tests"
49
49
  Provides-Extra: docs
50
50
  Requires-Dist: sphinx_gallery; extra == "docs"
51
51
  Requires-Dist: sphinx_rtd_theme; extra == "docs"
52
+ Requires-Dist: sphinx-autodoc-typehints; extra == "docs"
53
+ Requires-Dist: sphinx-autobuild; extra == "docs"
54
+ Requires-Dist: sphinxcontrib-bibtex; extra == "docs"
55
+ Requires-Dist: sphinx_sitemap; extra == "docs"
52
56
  Requires-Dist: pydata_sphinx_theme; extra == "docs"
53
57
  Requires-Dist: numpydoc; extra == "docs"
54
58
  Requires-Dist: memory_profiler; extra == "docs"
@@ -59,6 +63,7 @@ Requires-Dist: lightning; extra == "docs"
59
63
  Requires-Dist: seaborn; extra == "docs"
60
64
  Requires-Dist: pre-commit; extra == "docs"
61
65
  Requires-Dist: openneuro-py; extra == "docs"
66
+ Requires-Dist: plotly; extra == "docs"
62
67
  Provides-Extra: all
63
68
  Requires-Dist: braindecode[docs,moabb,tests]; extra == "all"
64
69
  Dynamic: license-file
@@ -3,7 +3,7 @@ braindecode/classifier.py,sha256=k9vSCtfQbld0YVleDi5rrrmk6k_k5JYEPPBYcNxYjZ8,980
3
3
  braindecode/eegneuralnet.py,sha256=dz8k_-2jV7WqkaX4bQG-dmr-vRT7ZtOwJqomXyC9PTw,15287
4
4
  braindecode/regressor.py,sha256=VLfrpiXklwI4onkwue3QmzlBWcvspu0tlrLo9RT1Oiw,9375
5
5
  braindecode/util.py,sha256=J-tBcDJNlMTIFW2mfOy6Ko0nsgdP4obRoEVDeg2rFH0,12686
6
- braindecode/version.py,sha256=Adl2q0noMgIED1dlngWz_nvDbzU6GpgOYSGTS9Fs6io,35
6
+ braindecode/version.py,sha256=ITOwYdDdkdORjVSxO76b7RvrkPrF0sKdf_mXdBTDBMw,35
7
7
  braindecode/augmentation/__init__.py,sha256=LG7ONqCufYAF9NZt8POIp10lYXb8iSueYkF-CWGK2Ls,1001
8
8
  braindecode/augmentation/base.py,sha256=gg7wYsVfa9jfqBddtE03B5ZrPHFFmPl2sa3LOrRnGfo,7325
9
9
  braindecode/augmentation/functional.py,sha256=ygkMNEFHaUdRQfk7meMML19FnM406Uf34h-ztKXdJwM,37978
@@ -13,6 +13,7 @@ braindecode/datasets/base.py,sha256=ED8RQWusMyWf0T7b_HXwouR2Ax47qppEc506AlSzBt0,
13
13
  braindecode/datasets/bbci.py,sha256=BC9o1thEyYBREAo930O7zZz3xZB-l4Odt5j8E_1huXI,19277
14
14
  braindecode/datasets/bcicomp.py,sha256=ER_XmqxhpoO2FWELMesQXQ40OTe7BXoy7nYDSiZG9kE,7556
15
15
  braindecode/datasets/bids.py,sha256=4asq1HyQHgJjwW7w-GMlvTVQhi-hR2HWLJ8Z__UrUS4,8846
16
+ braindecode/datasets/experimental.py,sha256=Z_uzMNA875-l878LAv7bWiWYJX3QAefmb5quBkcPp7M,8514
16
17
  braindecode/datasets/mne.py,sha256=Dg6RZAAwd8TVGrvLOPF5B_JrbyGUWg52vWmn6fLMOQM,6135
17
18
  braindecode/datasets/moabb.py,sha256=JmBcFV7QJT8GCgLNNKWgxJVnEVnO5wd9U_uiIqTIxDM,7091
18
19
  braindecode/datasets/nmt.py,sha256=E4T8OYBEwWRSjh7VFzmyxaZbf5ufFVEBYYmQEd1ghUU,10430
@@ -26,25 +27,25 @@ braindecode/datautil/util.py,sha256=ZfDoxLieKsgI8xcWQqebV-vJ5pJYRvRRHkEwhwpgoKU,
26
27
  braindecode/functional/__init__.py,sha256=JPUDFeKtfogEzfrwPaZRBmxexPjBw7AglYMlImaAnWc,413
27
28
  braindecode/functional/functions.py,sha256=CoEweM6YLhigx0tNmmz6yAc8iQ078sTFY2GeCjK5fFs,8622
28
29
  braindecode/functional/initialization.py,sha256=BUSC7y2TMsfShpMYBVwm3xg3ODFqWp-STH7yD4sn8zk,1388
29
- braindecode/models/__init__.py,sha256=xv1QPELZxocPgbc_mz-eYM5w08ZDNOsDV4pOnIFhUww,2551
30
- braindecode/models/atcnet.py,sha256=PhDJl6nBChButabjsmLz_heRcGFCCMKoeUt7k7neNzs,24483
31
- braindecode/models/attentionbasenet.py,sha256=1uwrtsdEGiBwokkO8A_2SR5zapOTQUBZd4q7hIpR0cw,23359
30
+ braindecode/models/__init__.py,sha256=v2Pn0H-rM_9xr1EEoKIFygmhbS9r52qh8XwFzXuhK70,2455
31
+ braindecode/models/atcnet.py,sha256=jA_18BOaasmiqGbLJOvfBY5q2xHtKdoRFKzN_aqpDoQ,32107
32
+ braindecode/models/attentionbasenet.py,sha256=AK78VvwrZXyJY20zadzDUHl17C-5zcWCd5xPRN7Lr4o,30385
33
+ braindecode/models/attn_sleep.py,sha256=m6sdFfD4en2hHf_TpotLPC1hVweJcYZvjgf12bV5FZg,17822
32
34
  braindecode/models/base.py,sha256=9icrWNZBGbh_VLyB9m8g_K1QyK7s3mh8X-hJ29gEbWs,10802
33
35
  braindecode/models/biot.py,sha256=T4PymX3penMJcrdfb5Nq6B3P-jyP2laAIu_R9o3uCXo,17512
34
36
  braindecode/models/contrawr.py,sha256=eeR_ik4gNZ3rJLM6Mw9gJ2gTMkZ8CU8C4rN_GQMQTAE,10044
35
- braindecode/models/ctnet.py,sha256=-J9QtUM8kcntz_xinfuBBvwDMECHiMPMcr2MS4GDPEY,17308
36
- braindecode/models/deep4.py,sha256=YJQUw-0EuFUi4qjm8caJGB8wRM_aeJa5X_d8jrGaQAI,14588
37
- braindecode/models/deepsleepnet.py,sha256=RrciuVJtZ-fhiUl-yLPfK2FP-G29V5Wor6pPlrMHQWQ,9218
38
- braindecode/models/eegconformer.py,sha256=_Y0SXprBD74zD8nKPcS9HQ6PoWzfpu-VCY7Tj6R7Xrs,11612
39
- braindecode/models/eeginception_erp.py,sha256=mwh3rGSHAJVvnbOlYTuWWkKxlmFAdAXBNCrq4IPgOS4,11408
40
- braindecode/models/eeginception_mi.py,sha256=aKJRFuYrpbcRbmmT2xVghKbK8pnl7fzu5hrV0ybRKso,12424
37
+ braindecode/models/ctnet.py,sha256=ce5F31q2weBKvg7PL80iDm7za9fhGaCFvNfHoJW_dtg,17315
38
+ braindecode/models/deep4.py,sha256=-s-R3H7so2xlSiPsU226eSwscv1X9xJMYLm3LhZ3mSU,14645
39
+ braindecode/models/deepsleepnet.py,sha256=wGSAXW73Ga1-HFbn7kXiLeGsJceiqZyMLZnX2UZZXWw,15207
40
+ braindecode/models/eegconformer.py,sha256=rxMAmqErDVLq7nS77CnTtpcC3C2OR_EoZ8-jG-dKP9I,17433
41
+ braindecode/models/eeginception_erp.py,sha256=FYXoM-u4kOodMzGgvKDn7IwJwHl9Z0iiWx9bVHiO9EY,16324
42
+ braindecode/models/eeginception_mi.py,sha256=VoWtsaWj1xQ4FlrvCbnPvo8eosufYUmTrL4uvFtqKcg,12456
41
43
  braindecode/models/eegitnet.py,sha256=feXFmPCd-Ejxt7jgWPen1Ag0-oSclDVQai0Atwu9d_A,9827
42
44
  braindecode/models/eegminer.py,sha256=ouKZah9Q7_sxT7DJJMcPObwVxNQE87sEljJg6QwiQNw,9847
43
- braindecode/models/eegnet.py,sha256=1ZAG0KLDedkodDfqgnGGsoZj6iuU55kGmBlyQo1b47w,16284
44
- braindecode/models/eegnex.py,sha256=KNJIh8pFNhY087Bey2OPzDD4Uqw9pS6UkwMjnOngBzg,8497
45
- braindecode/models/eegresnet.py,sha256=cqWOSGqfJN_dNYUU9l8nYd_S3T1N-UX5-encKQzfBlg,12057
46
- braindecode/models/eegsimpleconv.py,sha256=sHpK-7ZGOCMuXsdkSVuarFTd1T0jMJUP_xwXP3gxQwc,7268
47
- braindecode/models/eegtcnet.py,sha256=np-93Ttctp2uaEYpMrfXfH5bJmCOUZZHLjv8GJEEym4,10830
45
+ braindecode/models/eegnet.py,sha256=i5HzBKTd82fTlKDfB42uc14HpDYxN29SGPfCa4ON5gk,13686
46
+ braindecode/models/eegnex.py,sha256=eahHolFl15LwNWeC5qjQqUGqURibQZIV425rI1p-dG8,13604
47
+ braindecode/models/eegsimpleconv.py,sha256=6V5ZQNWijmd3-2wv7lJB_HGBS3wHWWVrKoNIeWTXu-w,7300
48
+ braindecode/models/eegtcnet.py,sha256=Y53uJEX_hoB6eHCew9SIfzNxCYea8UhljDARJTk-Tq8,10837
48
49
  braindecode/models/fbcnet.py,sha256=RBCLOaiUvivfsT2mq6FN0Kp1-rR3iB0ElzVpHxRl4oI,7486
49
50
  braindecode/models/fblightconvnet.py,sha256=d5MwhawhkjilAMo0ckaYMxJhdGMEuorWgHX-TBgwv6s,11041
50
51
  braindecode/models/fbmsnet.py,sha256=9bZn2_n1dTrI1Qh3Sz9zMZnH_a-Yq-13UHYSmF6r_UE,11659
@@ -52,21 +53,20 @@ braindecode/models/hybrid.py,sha256=hA8jwD3_3LL71BxUjRM1dkhqlHU9E9hjuDokh-jBq-4,
52
53
  braindecode/models/ifnet.py,sha256=Y2bwfko3SDjD74AzgUEzgMhKJFGCCw_Q_Noh5VONEjQ,15137
53
54
  braindecode/models/labram.py,sha256=vcrpwiu4F-djtIPscFbtP2Y0jTosyR_cXnOMQQRGPLw,41798
54
55
  braindecode/models/msvtnet.py,sha256=hxeCLkHS6w2w89YlLfEPCyQ4XQQpt45bEYPiQJ9SFzY,12642
55
- braindecode/models/sccnet.py,sha256=baGsNpVRdyWzbkTizOthJoJGejLb8BxMpN9ODwZinio,7919
56
- braindecode/models/shallow_fbcsp.py,sha256=-sL6XCmCUZVhKKrC84-KWgwhWKQQvev1oNSmH_d6FA4,7499
56
+ braindecode/models/sccnet.py,sha256=C7vdwIR5cI6wJCl5f8TnGQG6qinq21y4HG6l-D5AwbY,11971
57
+ braindecode/models/shallow_fbcsp.py,sha256=7U07DJBrm2JHV8v5ja-xuE5-IH5tfmryhJtrfO1n4jk,7531
57
58
  braindecode/models/signal_jepa.py,sha256=UeSkeAM3Qmx8bbAqHCj5nP-PtZM00_5SGA8ibo9mptc,37079
58
59
  braindecode/models/sinc_shallow.py,sha256=Ilv8K1XhMGiRTBtQdq7L595i6cEFYOBe0_UDv-LqL7s,11907
59
- braindecode/models/sleep_stager_blanco_2020.py,sha256=qPKMDLuv4J7et4dZHyTe-j0oB6ESYn9mA_aW7RMC-rU,6002
60
- braindecode/models/sleep_stager_chambon_2018.py,sha256=62x2Rdjd5UZDX8YlnfAtdRCrjLsPvPpnUweGElZLdkw,5213
61
- braindecode/models/sleep_stager_eldele_2021.py,sha256=-4ISuznykDy9ZFzUM-OeiGCwmgM3U-LuyoDSrhPbRDw,17555
62
- braindecode/models/sparcnet.py,sha256=eZMoJOxlcIyHPdQiX7KXUKuUBlAWkTwsXNWmNma_KAI,13941
63
- braindecode/models/summary.csv,sha256=l7HYYwv3Z69JRPVIhVq-wr_nC1J1KIz6IGw_zeRSk58,6110
60
+ braindecode/models/sleep_stager_blanco_2020.py,sha256=vXulnDYutEFLM0UPXyAI0YIj5QImUMVEmYZb78j34H8,6034
61
+ braindecode/models/sleep_stager_chambon_2018.py,sha256=8w8IR2PsfG0jSc3o0YVopgHpOvCHNIuMi7-QRJOYEW4,5245
62
+ braindecode/models/sparcnet.py,sha256=MG1OB91guI7ssKRk8GvWlzUvaxo_otaYnbEGzNUZVyg,13973
63
+ braindecode/models/summary.csv,sha256=NfrmnjyfDmWVe2zyNqgczEQcLI910BOS4sICtcKS3gc,6765
64
64
  braindecode/models/syncnet.py,sha256=nrWJC5ijCSWKVZyRn-dmOuc1t5vk2C6tx8U3U4j5d5Y,8362
65
65
  braindecode/models/tcn.py,sha256=SQu56H9zdbcbbDIXZVgZtJg7es8CRAJ7z-IBnmf4UWM,8158
66
- braindecode/models/tidnet.py,sha256=k7Q0yAnEBmq1sqhsvoV4-g8wfYSUQ-C3iYxfLp5m8xQ,11805
67
- braindecode/models/tsinception.py,sha256=EcfLDDJXZloh_vrKRuxAHYRZ1EVWlEKHNXqybTRrTbQ,10116
68
- braindecode/models/usleep.py,sha256=dFh3KiZITu13gMxcbPGoK4hq2ySDWzVSCQXkj1006w0,11605
69
- braindecode/models/util.py,sha256=VrhwG1YBGwKohCej6TmhrNAIoleQHRu3YdiBPuHFY_E,5302
66
+ braindecode/models/tidnet.py,sha256=HSUL1al6gaRbJ-BRYAAs4KDvLuKEvh0NnBfAsPeWMpM,11837
67
+ braindecode/models/tsinception.py,sha256=nnQxzpqRy9FPuN5xgh9fNQ386VbreQ_nZBSFNkSfal0,10356
68
+ braindecode/models/usleep.py,sha256=5uztUHX70T_LurqRob_XmVnKkZDwt74x2Iz181M7s54,17233
69
+ braindecode/models/util.py,sha256=VZGVPhUSsoP47pta0_UhC2-g5n5-EFZAW93ZVccrEHU,5232
70
70
  braindecode/modules/__init__.py,sha256=PD2LpeSHWW_MgEef7-G8ief5gheGObzsIoacchxWuyA,1756
71
71
  braindecode/modules/activation.py,sha256=lTO2IjZWBDeXZ4ZVDgLmTDmxHdqyAny3Fsy07HY9tmQ,1466
72
72
  braindecode/modules/attention.py,sha256=ISE11jXAvMqKpawZilg8i7lDX5mkuvpEplrh_CtGEkk,24102
@@ -81,7 +81,7 @@ braindecode/modules/util.py,sha256=tVXEhzeTsYrr_wZ5CiXaq3VYGtC5TmGEEW2hMYjTQAE,2
81
81
  braindecode/modules/wrapper.py,sha256=Z-aZ4wxA0psYefMOfj03r7D1XjD4az6GpZpaQoDPJv0,2421
82
82
  braindecode/preprocessing/__init__.py,sha256=V0iwdzb6DzpUaCabA7I6HmOqXK_XvTbpP5HaEduSJ4s,776
83
83
  braindecode/preprocessing/mne_preprocess.py,sha256=_Jczaitqbx16utsUOhnonEcoExf6jPsWNwVOVvoKFfU,2210
84
- braindecode/preprocessing/preprocess.py,sha256=-9IKjb0THq36m54TK-YRzV18wIkxmVgTcGO2sEH6q98,17665
84
+ braindecode/preprocessing/preprocess.py,sha256=gg52Uqo23yqXBckXrph_AFWCSEmrA7JdA54vcCwbrKE,17489
85
85
  braindecode/preprocessing/windowers.py,sha256=6w6mOnroGWnV7tS23UagZZepswaxaL00S45Jr5AViRE,36551
86
86
  braindecode/samplers/__init__.py,sha256=TLuO6gXv2WioJdX671MI_CHVSsOfbjnly1Xv9K3_WdA,452
87
87
  braindecode/samplers/base.py,sha256=z_Txp9cEwUmIBL0J6FPJbx1cMSsU9l9mxymRCGqNss0,15111
@@ -93,9 +93,9 @@ braindecode/training/scoring.py,sha256=WRkwqbitA3m_dzRnGp2ZIZPge5Nhx9gAEQhIHzeH4
93
93
  braindecode/visualization/__init__.py,sha256=4EER_xHqZIDzEvmgUEm7K1bgNKpyZAIClR9ZCkMuY4M,240
94
94
  braindecode/visualization/confusion_matrices.py,sha256=qIWMLEHow5CJ7PhGggD8mnD55Le6xhma9HSzt4R33fc,9509
95
95
  braindecode/visualization/gradients.py,sha256=KZo-GA0uwiwty2_94j2IjmCR2SKcfPb1Bi3sQq7vpTk,2170
96
- braindecode-1.2.0.dev184328194.dist-info/licenses/LICENSE.txt,sha256=7rg7k6hyj8m9whQ7dpKbqnCssoOEx_Mbtqb4uSOjljE,1525
97
- braindecode-1.2.0.dev184328194.dist-info/licenses/NOTICE.txt,sha256=sOxuTbalPxTM8H6VqtvGbXCt_BoOF7JevEYG_knqbm4,620
98
- braindecode-1.2.0.dev184328194.dist-info/METADATA,sha256=PgPq5CmBC6TDByTBtGn3Gtf6yaAJW96CZ_3J5BgGhDc,6883
99
- braindecode-1.2.0.dev184328194.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
100
- braindecode-1.2.0.dev184328194.dist-info/top_level.txt,sha256=pHsWQmSy0uhIez62-HA9j0iaXKvSbUL39ifFRkFnChA,12
101
- braindecode-1.2.0.dev184328194.dist-info/RECORD,,
96
+ braindecode-1.3.0.dev174777731.dist-info/licenses/LICENSE.txt,sha256=7rg7k6hyj8m9whQ7dpKbqnCssoOEx_Mbtqb4uSOjljE,1525
97
+ braindecode-1.3.0.dev174777731.dist-info/licenses/NOTICE.txt,sha256=sOxuTbalPxTM8H6VqtvGbXCt_BoOF7JevEYG_knqbm4,620
98
+ braindecode-1.3.0.dev174777731.dist-info/METADATA,sha256=5ZzBglJFkqmqKFAwR28trrqQkBwCyUJDN4oWY5tetcM,7129
99
+ braindecode-1.3.0.dev174777731.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
100
+ braindecode-1.3.0.dev174777731.dist-info/top_level.txt,sha256=pHsWQmSy0uhIez62-HA9j0iaXKvSbUL39ifFRkFnChA,12
101
+ braindecode-1.3.0.dev174777731.dist-info/RECORD,,