braindecode 1.3.0.dev176728557__py3-none-any.whl → 1.3.0.dev177509039__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of braindecode might be problematic. Click here for more details.

Files changed (42) hide show
  1. braindecode/augmentation/functional.py +154 -54
  2. braindecode/augmentation/transforms.py +2 -2
  3. braindecode/datasets/base.py +18 -17
  4. braindecode/datasets/bcicomp.py +1 -1
  5. braindecode/datasets/sleep_physio_challe_18.py +2 -1
  6. braindecode/datautil/serialization.py +11 -6
  7. braindecode/eegneuralnet.py +2 -0
  8. braindecode/functional/functions.py +6 -2
  9. braindecode/functional/initialization.py +2 -3
  10. braindecode/models/__init__.py +6 -0
  11. braindecode/models/atcnet.py +32 -33
  12. braindecode/models/attentionbasenet.py +39 -32
  13. braindecode/models/base.py +280 -2
  14. braindecode/models/bendr.py +469 -0
  15. braindecode/models/biot.py +3 -1
  16. braindecode/models/ctnet.py +6 -3
  17. braindecode/models/deepsleepnet.py +27 -18
  18. braindecode/models/eegconformer.py +2 -2
  19. braindecode/models/eeginception_erp.py +31 -25
  20. braindecode/models/eegnet.py +5 -4
  21. braindecode/models/labram.py +188 -84
  22. braindecode/models/patchedtransformer.py +640 -0
  23. braindecode/models/signal_jepa.py +109 -27
  24. braindecode/models/sinc_shallow.py +10 -9
  25. braindecode/models/sstdpn.py +869 -0
  26. braindecode/models/summary.csv +9 -6
  27. braindecode/models/usleep.py +26 -21
  28. braindecode/models/util.py +3 -0
  29. braindecode/modules/attention.py +10 -10
  30. braindecode/modules/blocks.py +3 -3
  31. braindecode/modules/filter.py +2 -3
  32. braindecode/modules/layers.py +18 -17
  33. braindecode/preprocessing/preprocess.py +11 -2
  34. braindecode/preprocessing/windowers.py +2 -2
  35. braindecode/samplers/base.py +8 -8
  36. braindecode/version.py +1 -1
  37. {braindecode-1.3.0.dev176728557.dist-info → braindecode-1.3.0.dev177509039.dist-info}/METADATA +4 -2
  38. {braindecode-1.3.0.dev176728557.dist-info → braindecode-1.3.0.dev177509039.dist-info}/RECORD +42 -39
  39. {braindecode-1.3.0.dev176728557.dist-info → braindecode-1.3.0.dev177509039.dist-info}/WHEEL +0 -0
  40. {braindecode-1.3.0.dev176728557.dist-info → braindecode-1.3.0.dev177509039.dist-info}/licenses/LICENSE.txt +0 -0
  41. {braindecode-1.3.0.dev176728557.dist-info → braindecode-1.3.0.dev177509039.dist-info}/licenses/NOTICE.txt +0 -0
  42. {braindecode-1.3.0.dev176728557.dist-info → braindecode-1.3.0.dev177509039.dist-info}/top_level.txt +0 -0
@@ -2,7 +2,7 @@
2
2
  ATCNet,General,Classification,250,"n_chans, n_outputs, n_times",113732,"ATCNet(n_chans=22, n_outputs=4, n_times=1000)","Convolution,Recurrent,Small Attention"
3
3
  AttentionBaseNet,Motor Imagery,Classification,250,"n_chans, n_outputs, n_times",3692,"AttentionBaseNet(n_chans=22, n_outputs=4, n_times=1000)","Convolution,Small Attention"
4
4
  BDTCN,Normal Abnormal,Classification,100,"n_chans, n_outputs, n_times",456502,"BDTCN(n_chans=21, n_outputs=2, n_times=6000, n_blocks=5, n_filters=55, kernel_size=16)","Convolution,Recurrent"
5
- BIOT,"Sleep Staging, Epilepsy",Classification,200,"n_chans, n_outputs",3183879,"BIOT(n_chans=2, n_outputs=5, n_times=6000)","Large Language Model"
5
+ BIOT,"Sleep Staging, Epilepsy",Classification,200,"n_chans, n_outputs",3183879,"BIOT(n_chans=2, n_outputs=5, n_times=6000)","Large Brain Model"
6
6
  ContraWR,Sleep Staging,"Classification, Embedding",125,"n_chans, n_outputs, sfreq",1160165,"ContraWR(n_chans=2, n_outputs=5, n_times=3750, emb_size=256, sfreq=125)",Convolution
7
7
  CTNet,Motor Imagery,Classification,250,"n_chans, n_outputs, n_times",26900,"CTNet(n_chans=22, n_outputs=4, n_times=1000, n_filters_time=8, kernel_size=16, heads=2, emb_size=16)","Convolution,Small Attention"
8
8
  Deep4Net,General,Classification,250,"n_chans, n_outputs, n_times",282879,"Deep4Net(n_chans=22, n_outputs=4, n_times=1000)","Convolution"
@@ -16,13 +16,13 @@ EEGNeX,Motor Imagery,Classification,125,"n_chans, n_outputs, n_times",55940,"EEG
16
16
  EEGMiner,Emotion Recognition,Classification,128,"n_chans, n_outputs, n_times, sfreq",7572,"EEGMiner(n_chans=62, n_outputs=2, n_times=2560, sfreq=128)","Convolution,Interpretability"
17
17
  EEGSimpleConv,Motor Imagery,Classification,80,"n_chans, n_outputs, sfreq",730404,"EEGSimpleConv(n_chans=22, n_outputs=4, n_times=320, sfreq=80)","Convolution"
18
18
  EEGTCNet,Motor Imagery,Classification,250,"n_chans, n_outputs",4516,"EEGTCNet(n_chans=22, n_outputs=4, n_times=1000, kern_length=32)","Convolution,Recurrent"
19
- Labram,General,"Classification, Embedding",200,"n_chans, n_outputs, n_times",5866180,"Labram(n_chans=22, n_outputs=4, n_times=1000, sfreq=250)","Convolution,Large Language Model"
19
+ Labram,General,"Classification, Embedding",200,"n_chans, n_outputs, n_times",5866180,"Labram(n_chans=22, n_outputs=4, n_times=1000, sfreq=250)","Convolution,Large Brain Model"
20
20
  MSVTNet,Motor Imagery,Classification,250,"n_chans, n_outputs, n_times",75494," MSVTNet(n_chans=22, n_outputs=4, n_times=1000)","Convolution,Recurrent,Small Attention"
21
21
  SCCNet,Motor Imagery,Classification,125,"n_chans, n_outputs, n_times, sfreq",12070,"SCCNet(n_chans=22, n_outputs=4, n_times=1000, sfreq=125)","Convolution"
22
- SignalJEPA,"Motor Imagery, ERP, SSVEP",Embedding,128,"n_times, chs_info",3456882,"SignalJEPA(n_times=512, chs_info=Lee2019_MI().get_data(subjects=[1])[1]['0']['1train'].info[""chs""][:62])","Convolution,Channel,Large Language Model"
23
- SignalJEPA_Contextual,"Motor Imagery, ERP, SSVEP",Classification,128,"n_outputs, n_times, chs_info",3459184,"SignalJEPA_Contextual(n_outputs=2, input_window_seconds=4.19, sfreq=128, chs_info=Lee2019_MI().get_data(subjects=[1])[1]['0']['1train'].info[""chs""][:62])","Convolution,Channel,Large Language Model"
24
- SignalJEPA_PostLocal,"Motor Imagery, ERP, SSVEP",Classification,128,"n_chans, n_outputs, n_times",16142,"SignalJEPA_PostLocal(n_chans=62, n_outputs=2, input_window_seconds=4.19, sfreq=128)","Convolution,Channel,Large Language Model"
25
- SignalJEPA_PreLocal,"Motor Imagery, ERP, SSVEP",Classification,128,"n_outputs, n_times, chs_info",16142,"SignalJEPA_PreLocal(n_chans=62, n_outputs=2, input_window_seconds=4.19, sfreq=128)","Convolution,Channel,Large Language Model"
22
+ SignalJEPA,"Motor Imagery, ERP, SSVEP",Embedding,128,"n_times, chs_info",3456882,"SignalJEPA(n_times=512, chs_info=Lee2019_MI().get_data(subjects=[1])[1]['0']['1train'].info[""chs""][:62])","Convolution,Channel,Large Brain Model"
23
+ SignalJEPA_Contextual,"Motor Imagery, ERP, SSVEP",Classification,128,"n_outputs, n_times, chs_info",3459184,"SignalJEPA_Contextual(n_outputs=2, input_window_seconds=4.19, sfreq=128, chs_info=Lee2019_MI().get_data(subjects=[1])[1]['0']['1train'].info[""chs""][:62])","Convolution,Channel,Large Brain Model"
24
+ SignalJEPA_PostLocal,"Motor Imagery, ERP, SSVEP",Classification,128,"n_chans, n_outputs, n_times",16142,"SignalJEPA_PostLocal(n_chans=62, n_outputs=2, input_window_seconds=4.19, sfreq=128)","Convolution,Channel,Large Brain Model"
25
+ SignalJEPA_PreLocal,"Motor Imagery, ERP, SSVEP",Classification,128,"n_outputs, n_times, chs_info",16142,"SignalJEPA_PreLocal(n_chans=62, n_outputs=2, input_window_seconds=4.19, sfreq=128)","Convolution,Channel,Large Brain Model"
26
26
  SincShallowNet,Motor Imagery,Classification,250,"n_chans, n_outputs, n_times, sfreq",21892,"SincShallowNet(n_chans=22, n_outputs=4, n_times=1000, sfreq=250)","Convolution,Interpretability"
27
27
  ShallowFBCSPNet,General,Classification,250,"n_chans, n_outputs, n_times",46084,"ShallowFBCSPNet(n_chans=22, n_outputs=4, n_times=1000, sfreq=250)","Convolution"
28
28
  SleepStagerBlanco2020,Sleep Staging,Classification,100,"n_chans, n_outputs, n_times",2845,"SleepStagerBlanco2020(n_chans=2, n_outputs=5, n_times=3000, sfreq=100)","Convolution"
@@ -37,3 +37,6 @@ FBCNet,Motor Imagery,Classification,250,"n_chans, n_outputs, n_times, sfreq",118
37
37
  FBMSNet,Motor Imagery,Classification,250,"n_chans, n_outputs, n_times, sfreq",16231,"FBMSNet(n_chans=22, n_outputs=4, n_times=1000, sfreq=250)","Convolution,FilterBank"
38
38
  FBLightConvNet,Motor Imagery,Classification,250,"n_chans, n_outputs, n_times, sfreq",6596,"FBLightConvNet(n_chans=22, n_outputs=4, n_times=1000, sfreq=250)","Convolution,FilterBank"
39
39
  IFNet,Motor Imagery,Classification,250,"n_chans, n_outputs, n_times, sfreq",9860,"IFNet(n_chans=22, n_outputs=4, n_times=1000, sfreq=250)","Convolution,FilterBank"
40
+ PBT,General,Classification,250,"n_chans, n_outputs, n_times",818948,"PBT(n_chans=22, n_outputs=4, n_times=1000, sfreq=250)","Large Brain Model"
41
+ SSTDPN,Motor Imagery,Classification,250,"n_chans, n_outputs, n_times",19502,"SSTDPN(n_chans=22, n_outputs=4, n_times=1000)","Convolution,Small Attention"
42
+ BENDR,General,"Classification,Embedding",250,"n_chans, n_times, n_outputs",157141049,"BENDR(n_chans=22, n_outputs=4, n_times=1000)","Large Brain Model,Convolution"
@@ -62,43 +62,48 @@ class USleep(EEGModuleMixin, nn.Module):
62
62
  - Decoder :class:`_DecoderBlock` **(progressive upsampling + skip fusion to high-frequency map, 12 blocks; upsampling x2 per block)**
63
63
 
64
64
  - *Operations.*
65
- - **Nearest-neighbor upsample**, :class:`nn.Upsample` (x2)
66
- - **Convolution2d** (k=2), :class:`torch.nn.Conv2d`
67
- - ELU, :class:`torch.nn.ELU`
68
- - Batch Norm, :class:`torch.nn.BatchNorm2d`
69
- - **Concatenate** with the encoder skip at the same temporal scale, :function:`torch.cat`
70
- - **Convolution**, :class:`torch.nn.Conv2d`
71
- - ELU, :class:`torch.nn.ELU`
72
- - Batch Norm, :class:`torch.nn.BatchNorm2d`.
65
+
66
+ - **Nearest-neighbor upsample**, :class:`nn.Upsample` (x2)
67
+ - **Convolution2d** (k=2), :class:`torch.nn.Conv2d`
68
+ - ELU, :class:`torch.nn.ELU`
69
+ - Batch Norm, :class:`torch.nn.BatchNorm2d`
70
+ - **Concatenate** with the encoder skip at the same temporal scale, ``torch.cat``
71
+ - **Convolution**, :class:`torch.nn.Conv2d`
72
+ - ELU, :class:`torch.nn.ELU`
73
+ - Batch Norm, :class:`torch.nn.BatchNorm2d`.
73
74
 
74
75
  **Output**: A multi-class, **high-frequency** per-sample representation aligned to the input rate (128 Hz).
75
76
 
76
77
  - **Segment Classifier incorporate into :class:`braindecode.models.USleep` (aggregation to fixed epochs)**
77
78
 
78
79
  - *Operations.*
79
- - **Mean-pool**, :class:`torch.nn.AvgPool2d` per class with kernel = epoch length *i* and stride *i*
80
- - **1x1 conv**, :class:`torch.nn.Conv2d`
81
- - ELU, :class:`torch.nn.ELU`
82
- - **1x1 conv**, :class:`torch.nn.Conv2d` with ``(T, K)`` (epochs x stages).
80
+
81
+ - **Mean-pool**, :class:`torch.nn.AvgPool2d` per class with kernel = epoch length *i* and stride *i*
82
+ - **1x1 conv**, :class:`torch.nn.Conv2d`
83
+ - ELU, :class:`torch.nn.ELU`
84
+ - **1x1 conv**, :class:`torch.nn.Conv2d` with ``(T, K)`` (epochs x stages).
83
85
 
84
86
  **Role**: Learns a **non-linear** weighted combination over each 30-s window (unlike U-Time's linear combiner).
85
87
 
86
88
  .. rubric:: Convolutional Details
87
89
 
88
90
  - **Temporal (where time-domain patterns are learned).**
89
- All convolutions are **1-D along time**; depth (12 levels) plus pooling yields an extensive receptive field
90
- (reported sensitivity to ±6.75 min around each epoch; theoretical field 9.6 min at the deepest layer).
91
- The decoder restores sample-level resolution before epoch aggregation.
91
+
92
+ All convolutions are **1-D along time**; depth (12 levels) plus pooling yields an extensive receptive field
93
+ (reported sensitivity to ±6.75 min around each epoch; theoretical field ≈ 9.6 min at the deepest layer).
94
+ The decoder restores sample-level resolution before epoch aggregation.
92
95
 
93
96
  - **Spatial (how channels are processed).**
94
- Convolutions mix across the *channel* dimension jointly with time (no separate spatial operator). The system
95
- is **montage-agnostic** (any reasonable EEG/EOG pair) and was trained across diverse cohorts/protocols,
96
- supporting robustness to channel placement and hardware differences.
97
+
98
+ Convolutions mix across the *channel* dimension jointly with time (no separate spatial operator). The system
99
+ is **montage-agnostic** (any reasonable EEG/EOG pair) and was trained across diverse cohorts/protocols,
100
+ supporting robustness to channel placement and hardware differences.
97
101
 
98
102
  - **Spectral (how frequency content is captured).**
99
- No explicit Fourier/wavelet transform is used; the **stack of temporal convolutions** acts as a learned
100
- filter bank whose effective bandwidth grows with depth. The high-frequency decoder output (128 Hz)
101
- retains fine temporal detail for the segment classifier.
103
+
104
+ No explicit Fourier/wavelet transform is used; the **stack of temporal convolutions** acts as a learned
105
+ filter bank whose effective bandwidth grows with depth. The high-frequency decoder output (128 Hz)
106
+ retains fine temporal detail for the segment classifier.
102
107
 
103
108
 
104
109
  .. rubric:: Attention / Sequential Modules
@@ -95,6 +95,9 @@ models_mandatory_parameters = [
95
95
  ("FBMSNet", ["n_chans", "n_outputs", "n_times", "sfreq"], dict(sfreq=200.0)),
96
96
  ("FBLightConvNet", ["n_chans", "n_outputs", "n_times", "sfreq"], dict(sfreq=200.0)),
97
97
  ("IFNet", ["n_chans", "n_outputs", "n_times", "sfreq"], dict(sfreq=200.0)),
98
+ ("PBT", ["n_chans", "n_outputs", "n_times"], None),
99
+ ("SSTDPN", ["n_chans", "n_outputs", "n_times", "sfreq"], None),
100
+ ("BENDR", ["n_chans", "n_outputs", "n_times"], None),
98
101
  ]
99
102
 
100
103
  ################################################################
@@ -38,7 +38,7 @@ class SqueezeAndExcitation(nn.Module):
38
38
  References
39
39
  ----------
40
40
  .. [Hu2018] Hu, J., Albanie, S., Sun, G., Wu, E., 2018.
41
- Squeeze-and-Excitation Networks. CVPR 2018.
41
+ Squeeze-and-Excitation Networks. CVPR 2018.
42
42
  """
43
43
 
44
44
  def __init__(self, in_channels: int, reduction_rate: int, bias: bool = False):
@@ -93,7 +93,7 @@ class GSoP(nn.Module):
93
93
  References
94
94
  ----------
95
95
  .. [Gao2018] Gao, Z., Jiangtao, X., Wang, Q., Li, P., 2018.
96
- Global Second-order Pooling Convolutional Networks. CVPR 2018.
96
+ Global Second-order Pooling Convolutional Networks. CVPR 2018.
97
97
  """
98
98
 
99
99
  def __init__(self, in_channels: int, reduction_rate: int, bias: bool = True):
@@ -149,7 +149,7 @@ class FCA(nn.Module):
149
149
  References
150
150
  ----------
151
151
  .. [Qin2021] Qin, Z., Zhang, P., Wu, F., Li, X., 2021.
152
- FcaNet: Frequency Channel Attention Networks. ICCV 2021.
152
+ FcaNet: Frequency Channel Attention Networks. ICCV 2021.
153
153
  """
154
154
 
155
155
  def __init__(
@@ -233,7 +233,7 @@ class EncNet(nn.Module):
233
233
  References
234
234
  ----------
235
235
  .. [Zhang2018] Zhang, H. et al. 2018.
236
- Context Encoding for Semantic Segmentation. CVPR 2018.
236
+ Context Encoding for Semantic Segmentation. CVPR 2018.
237
237
  """
238
238
 
239
239
  def __init__(self, in_channels: int, n_codewords: int):
@@ -290,7 +290,7 @@ class ECA(nn.Module):
290
290
  References
291
291
  ----------
292
292
  .. [Wang2021] Wang, Q. et al., 2021. ECA-Net: Efficient Channel Attention
293
- for Deep Convolutional Neural Networks. CVPR 2021.
293
+ for Deep Convolutional Neural Networks. CVPR 2021.
294
294
  """
295
295
 
296
296
  def __init__(self, in_channels: int, kernel_size: int):
@@ -341,8 +341,8 @@ class GatherExcite(nn.Module):
341
341
  References
342
342
  ----------
343
343
  .. [Hu2018b] Hu, J., Albanie, S., Sun, G., Vedaldi, A., 2018.
344
- Gather-Excite: Exploiting Feature Context in Convolutional Neural Networks.
345
- NeurIPS 2018.
344
+ Gather-Excite: Exploiting Feature Context in Convolutional Neural Networks.
345
+ NeurIPS 2018.
346
346
  """
347
347
 
348
348
  def __init__(
@@ -410,7 +410,7 @@ class GCT(nn.Module):
410
410
  References
411
411
  ----------
412
412
  .. [Yang2020] Yang, Z. Linchao, Z., Wu, Y., Yang, Y., 2020.
413
- Gated Channel Transformation for Visual Recognition. CVPR 2020.
413
+ Gated Channel Transformation for Visual Recognition. CVPR 2020.
414
414
  """
415
415
 
416
416
  def __init__(self, in_channels: int):
@@ -455,7 +455,7 @@ class SRM(nn.Module):
455
455
  References
456
456
  ----------
457
457
  .. [Lee2019] Lee, H., Kim, H., Nam, H., 2019. SRM: A Style-based
458
- Recalibration Module for Convolutional Neural Networks. ICCV 2019.
458
+ Recalibration Module for Convolutional Neural Networks. ICCV 2019.
459
459
  """
460
460
 
461
461
  def __init__(
@@ -520,7 +520,7 @@ class CBAM(nn.Module):
520
520
  References
521
521
  ----------
522
522
  .. [Woo2018] Woo, S., Park, J., Lee, J., Kweon, I., 2018.
523
- CBAM: Convolutional Block Attention Module. ECCV 2018.
523
+ CBAM: Convolutional Block Attention Module. ECCV 2018.
524
524
  """
525
525
 
526
526
  def __init__(self, in_channels: int, reduction_rate: int, kernel_size: int):
@@ -37,8 +37,8 @@ class MLP(nn.Sequential):
37
37
  :math:`a_i` are called activation functions. The trainable parameters of an
38
38
  MLP are its weights and biases :math:`\\phi = \{W_i, b_i | i = 1, \dots, L\}`.
39
39
 
40
- Parameters:
41
- -----------
40
+ Parameters
41
+ ----------
42
42
  in_features: int
43
43
  Number of input features.
44
44
  hidden_features: Sequential[int] (default=None)
@@ -49,7 +49,7 @@ class MLP(nn.Sequential):
49
49
  out_features: int (default=None)
50
50
  Number of output features, if None, set to in_features.
51
51
  act_layer: nn.GELU (default)
52
- The activation function constructor. If :py:`None`, use
52
+ The activation function constructor. If ``None``, use
53
53
  :class:`torch.nn.GELU` instead.
54
54
  drop: float (default=0.0)
55
55
  Dropout rate.
@@ -17,9 +17,8 @@ class FilterBankLayer(nn.Module):
17
17
  It uses MNE's `create_filter` function to create the band-specific filters and
18
18
  applies them to multi-channel time-series data. Each filter in the bank corresponds to a
19
19
  specific frequency band and is applied to all channels of the input data. The filtering is
20
- performed using FFT-based convolution via the `fftconvolve` function from
21
- :func:`torchaudio.functional if the method is FIR, and `filtfilt` function from
22
- :func:`torchaudio.functional if the method is IIR.
20
+ performed using FFT-based convolution via the ``torchaudio.functional`` if the method is FIR,
21
+ and ``torchaudio.functional`` if the method is IIR.
23
22
 
24
23
  The default configuration creates 9 non-overlapping frequency bands with a 4 Hz bandwidth,
25
24
  spanning from 4 Hz to 40 Hz (i.e., 4-8 Hz, 8-12 Hz, ..., 36-40 Hz). This setup is based on the
@@ -70,26 +70,27 @@ class TimeDistributed(nn.Module):
70
70
  class DropPath(nn.Module):
71
71
  """Drop paths, also known as Stochastic Depth, per sample.
72
72
 
73
- When applied in main path of residual blocks.
73
+ When applied in main path of residual blocks.
74
74
 
75
- Parameters:
76
- -----------
77
- drop_prob: float (default=None)
78
- Drop path probability (should be in range 0-1).
75
+ Parameters
76
+ ----------
77
+ drop_prob: float (default=None)
78
+ Drop path probability (should be in range 0-1).
79
79
 
80
- Notes
81
- -----
82
- Code copied and modified from VISSL facebookresearch:
80
+ Notes
81
+ -----
82
+ Code copied and modified from VISSL facebookresearch:
83
83
  https://github.com/facebookresearch/vissl/blob/0b5d6a94437bc00baed112ca90c9d78c6ccfbafb/vissl/models/model_helpers.py#L676
84
- All rights reserved.
85
-
86
- THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
87
- IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
88
- FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
89
- AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
90
- LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
91
- OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
92
- SOFTWARE.
84
+
85
+ All rights reserved.
86
+
87
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
88
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
89
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
90
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
91
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
92
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
93
+ SOFTWARE.
93
94
  """
94
95
 
95
96
  def __init__(self, drop_prob=None):
@@ -112,6 +112,7 @@ def preprocess(
112
112
  n_jobs: int | None = None,
113
113
  offset: int = 0,
114
114
  copy_data: bool | None = None,
115
+ parallel_kwargs: dict | None = None,
115
116
  ):
116
117
  """Apply preprocessors to a concat dataset.
117
118
 
@@ -135,6 +136,10 @@ def preprocess(
135
136
  and saving very large datasets in chunks to preserve original positions.
136
137
  copy_data : bool | None
137
138
  Whether the data passed to parallel jobs should be copied or passed by reference.
139
+ parallel_kwargs : dict | None
140
+ Additional keyword arguments forwarded to ``joblib.Parallel``.
141
+ Defaults to None (equivalent to ``{}``).
142
+ See https://joblib.readthedocs.io/en/stable/generated/joblib.Parallel.html for details.
138
143
 
139
144
  Returns
140
145
  -------
@@ -153,8 +158,12 @@ def preprocess(
153
158
 
154
159
  parallel_processing = (n_jobs is not None) and (n_jobs != 1)
155
160
 
156
- job_prefer = "threads" if platform.system() == "Windows" else None
157
- list_of_ds = Parallel(n_jobs=n_jobs, prefer=job_prefer)(
161
+ parallel_params = {} if parallel_kwargs is None else dict(parallel_kwargs)
162
+ parallel_params.setdefault(
163
+ "prefer", "threads" if platform.system() == "Windows" else None
164
+ )
165
+
166
+ list_of_ds = Parallel(n_jobs=n_jobs, **parallel_params)(
158
167
  delayed(_preprocess)(
159
168
  ds,
160
169
  i + offset,
@@ -268,7 +268,7 @@ def create_windows_from_events(
268
268
  rejection based on flatness is done. See mne.Epochs.
269
269
  on_missing: str
270
270
  What to do if one or several event ids are not found in the recording.
271
- Valid keys are ‘error | ‘warning | ‘ignore’. See mne.Epochs.
271
+ Valid keys are ‘error' | ‘warning' | ‘ignore'. See mne.Epochs.
272
272
  accepted_bads_ratio: float, optional
273
273
  Acceptable proportion of trials with inconsistent length in a raw. If
274
274
  the number of trials whose length is exceeded by the window size is
@@ -398,7 +398,7 @@ def create_fixed_length_windows(
398
398
  by using the _LazyDataFrame (experimental).
399
399
  on_missing: str
400
400
  What to do if one or several event ids are not found in the recording.
401
- Valid keys are ‘error | ‘warning | ‘ignore’. See mne.Epochs.
401
+ Valid keys are ‘error' | ‘warning' | ‘ignore'. See mne.Epochs.
402
402
  n_jobs: int
403
403
  Number of jobs to use to parallelize the windowing.
404
404
  verbose: bool | str | int | None
@@ -122,14 +122,14 @@ class DistributedRecordingSampler(DistributedSampler):
122
122
  DataFrame with at least one of {subject, session, run} columns for each
123
123
  window in the BaseConcatDataset to sample examples from. Normally
124
124
  obtained with `BaseConcatDataset.get_metadata()`. For instance,
125
- `metadata.head()` might look like this:
126
-
127
- i_window_in_trial i_start_in_trial i_stop_in_trial target subject session run
128
- 0 0 0 500 -1 4 session_T run_0
129
- 1 1 500 1000 -1 4 session_T run_0
130
- 2 2 1000 1500 -1 4 session_T run_0
131
- 3 3 1500 2000 -1 4 session_T run_0
132
- 4 4 2000 2500 -1 4 session_T run_0
125
+ `metadata.head()` might look like this::
126
+
127
+ i_window_in_trial i_start_in_trial i_stop_in_trial target subject session run
128
+ 0 0 0 500 -1 4 session_T run_0
129
+ 1 1 500 1000 -1 4 session_T run_0
130
+ 2 2 1000 1500 -1 4 session_T run_0
131
+ 3 3 1500 2000 -1 4 session_T run_0
132
+ 4 4 2000 2500 -1 4 session_T run_0
133
133
 
134
134
  random_state : np.RandomState | int | None
135
135
  Random state.
braindecode/version.py CHANGED
@@ -1 +1 @@
1
- __version__ = "1.3.0.dev176728557"
1
+ __version__ = "1.3.0.dev177509039"
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: braindecode
3
- Version: 1.3.0.dev176728557
3
+ Version: 1.3.0.dev177509039
4
4
  Summary: Deep learning software to decode EEG, ECG or MEG signals
5
5
  Author-email: Robin Tibor Schirrmeister <robintibor@gmail.com>
6
6
  Maintainer-email: Alexandre Gramfort <agramfort@meta.com>, Bruno Aristimunha Pinto <b.aristimunha@gmail.com>, Robin Tibor Schirrmeister <robintibor@gmail.com>
@@ -40,6 +40,8 @@ Requires-Dist: linear_attention_transformer
40
40
  Requires-Dist: docstring_inheritance
41
41
  Provides-Extra: moabb
42
42
  Requires-Dist: moabb>=1.2.0; extra == "moabb"
43
+ Provides-Extra: hug
44
+ Requires-Dist: huggingface_hub[torch]>=0.20.0; extra == "hug"
43
45
  Provides-Extra: tests
44
46
  Requires-Dist: pytest; extra == "tests"
45
47
  Requires-Dist: pytest-cov; extra == "tests"
@@ -65,7 +67,7 @@ Requires-Dist: pre-commit; extra == "docs"
65
67
  Requires-Dist: openneuro-py; extra == "docs"
66
68
  Requires-Dist: plotly; extra == "docs"
67
69
  Provides-Extra: all
68
- Requires-Dist: braindecode[docs,moabb,tests]; extra == "all"
70
+ Requires-Dist: braindecode[docs,hug,moabb,tests]; extra == "all"
69
71
  Dynamic: license-file
70
72
 
71
73
  .. image:: https://badges.gitter.im/braindecodechat/community.svg
@@ -1,48 +1,49 @@
1
1
  braindecode/__init__.py,sha256=Ac3LEEyIHWFY_fFh3eAY1GZUqXcUxVSJwOSUCwGEDvQ,182
2
2
  braindecode/classifier.py,sha256=k9vSCtfQbld0YVleDi5rrrmk6k_k5JYEPPBYcNxYjZ8,9807
3
- braindecode/eegneuralnet.py,sha256=dz8k_-2jV7WqkaX4bQG-dmr-vRT7ZtOwJqomXyC9PTw,15287
3
+ braindecode/eegneuralnet.py,sha256=U6kRdT2u8A2Ca0axMTR8IAESBsvgjLMusAbYappKAOk,15368
4
4
  braindecode/regressor.py,sha256=VLfrpiXklwI4onkwue3QmzlBWcvspu0tlrLo9RT1Oiw,9375
5
5
  braindecode/util.py,sha256=J-tBcDJNlMTIFW2mfOy6Ko0nsgdP4obRoEVDeg2rFH0,12686
6
- braindecode/version.py,sha256=VE8TY-87uYMqzkLZWXK_YyADJSoX-fDwhSwMmmON7q8,35
6
+ braindecode/version.py,sha256=d7DGzW6lZw0fxjOJqlDfyORYAFc4vafKD_PcKvu5ERo,35
7
7
  braindecode/augmentation/__init__.py,sha256=LG7ONqCufYAF9NZt8POIp10lYXb8iSueYkF-CWGK2Ls,1001
8
8
  braindecode/augmentation/base.py,sha256=gg7wYsVfa9jfqBddtE03B5ZrPHFFmPl2sa3LOrRnGfo,7325
9
- braindecode/augmentation/functional.py,sha256=ygkMNEFHaUdRQfk7meMML19FnM406Uf34h-ztKXdJwM,37978
10
- braindecode/augmentation/transforms.py,sha256=QgLoX6MFaiBH8WoVBgB8eY4x9jZNPMvj20zlwUM8AOs,44245
9
+ braindecode/augmentation/functional.py,sha256=lPhGpZcVtgfQ3oV6p6IQLBCWM_Psa60TwxH3Wj1WyOQ,41133
10
+ braindecode/augmentation/transforms.py,sha256=Ur05yLdROm5pfKTsS2opCWI--X6JwWjP7YMa2KTTZTw,44243
11
11
  braindecode/datasets/__init__.py,sha256=CTl8ucbG948ZJqntEBELb-Pn8GsZLfFZLgVcB-fhw4k,891
12
- braindecode/datasets/base.py,sha256=ED8RQWusMyWf0T7b_HXwouR2Ax47qppEc506AlSzBt0,32155
12
+ braindecode/datasets/base.py,sha256=c-mtoYOXzoap7KftmxNds-84V6-MN8RjuHp-L9d8ROw,32238
13
13
  braindecode/datasets/bbci.py,sha256=BC9o1thEyYBREAo930O7zZz3xZB-l4Odt5j8E_1huXI,19277
14
- braindecode/datasets/bcicomp.py,sha256=ER_XmqxhpoO2FWELMesQXQ40OTe7BXoy7nYDSiZG9kE,7556
14
+ braindecode/datasets/bcicomp.py,sha256=QK3mffLQjVZyf-whkNxeFij5SWhmNDsONnM72M0PnUE,7554
15
15
  braindecode/datasets/bids.py,sha256=4asq1HyQHgJjwW7w-GMlvTVQhi-hR2HWLJ8Z__UrUS4,8846
16
16
  braindecode/datasets/experimental.py,sha256=Z_uzMNA875-l878LAv7bWiWYJX3QAefmb5quBkcPp7M,8514
17
17
  braindecode/datasets/mne.py,sha256=Dg6RZAAwd8TVGrvLOPF5B_JrbyGUWg52vWmn6fLMOQM,6135
18
18
  braindecode/datasets/moabb.py,sha256=JmBcFV7QJT8GCgLNNKWgxJVnEVnO5wd9U_uiIqTIxDM,7091
19
19
  braindecode/datasets/nmt.py,sha256=E4T8OYBEwWRSjh7VFzmyxaZbf5ufFVEBYYmQEd1ghUU,10430
20
- braindecode/datasets/sleep_physio_challe_18.py,sha256=KTvUtuarOOYu6PHN6H1vcy4W9xilwtZE08n7JSrk8Cs,15414
20
+ braindecode/datasets/sleep_physio_challe_18.py,sha256=66A86_9VssszKrVXowb0oFyL3xbF1VRqQK5FtW33QlM,15427
21
21
  braindecode/datasets/sleep_physionet.py,sha256=jieRx6u-MQ4jn_5Zox_pVV8WjBwXKLv9uq4GXRAZ_58,4087
22
22
  braindecode/datasets/tuh.py,sha256=iG1hOtdevzKGEVpeuRFDBOnsW_rWa5zEmMFJfYR1hqg,22867
23
23
  braindecode/datasets/xy.py,sha256=xT-nS_5jpuVKJ0SGqc7Ia0FVpqj86UfuzcYQdEGZdp0,2986
24
24
  braindecode/datautil/__init__.py,sha256=GB9xOudUhJGDyG08PBrnotw6HnWoWIXAHfRNFO-pxSk,1797
25
- braindecode/datautil/serialization.py,sha256=gLIm9bcuR-XfVdII-RTplUWFRms9qVvVZ0-M6gTucNc,13028
25
+ braindecode/datautil/serialization.py,sha256=g_EVg3oTieqFRattw9OdwMaYjfjANVG-uCS3xVkuHjg,13293
26
26
  braindecode/datautil/util.py,sha256=ZfDoxLieKsgI8xcWQqebV-vJ5pJYRvRRHkEwhwpgoKU,674
27
27
  braindecode/functional/__init__.py,sha256=JPUDFeKtfogEzfrwPaZRBmxexPjBw7AglYMlImaAnWc,413
28
- braindecode/functional/functions.py,sha256=CoEweM6YLhigx0tNmmz6yAc8iQ078sTFY2GeCjK5fFs,8622
29
- braindecode/functional/initialization.py,sha256=BUSC7y2TMsfShpMYBVwm3xg3ODFqWp-STH7yD4sn8zk,1388
30
- braindecode/models/__init__.py,sha256=v2Pn0H-rM_9xr1EEoKIFygmhbS9r52qh8XwFzXuhK70,2455
31
- braindecode/models/atcnet.py,sha256=jA_18BOaasmiqGbLJOvfBY5q2xHtKdoRFKzN_aqpDoQ,32107
32
- braindecode/models/attentionbasenet.py,sha256=AK78VvwrZXyJY20zadzDUHl17C-5zcWCd5xPRN7Lr4o,30385
28
+ braindecode/functional/functions.py,sha256=x3_UGovZ9HPnSAL2DtMwHsGm6MdNm0CdHd3-pzHzEto,8649
29
+ braindecode/functional/initialization.py,sha256=f-4jIS9QY-YD-3R7N77UbBJh8GcuDvVUzn6Ad6Gx8LE,1382
30
+ braindecode/models/__init__.py,sha256=ovF_WX8ZkXEkleRwYsMMS7ldLPh8_2NzTeYGVqH9ilg,2581
31
+ braindecode/models/atcnet.py,sha256=XhcxnGYcrAsHPvYwCHUNZaSMzhfvg6Pm_PPy--7-W4A,32216
32
+ braindecode/models/attentionbasenet.py,sha256=bgc6_7jDT_fnfyCtPhI4i6H7Zornxe46-bMoINLl6YE,30416
33
33
  braindecode/models/attn_sleep.py,sha256=m6sdFfD4en2hHf_TpotLPC1hVweJcYZvjgf12bV5FZg,17822
34
- braindecode/models/base.py,sha256=9icrWNZBGbh_VLyB9m8g_K1QyK7s3mh8X-hJ29gEbWs,10802
35
- braindecode/models/biot.py,sha256=T4PymX3penMJcrdfb5Nq6B3P-jyP2laAIu_R9o3uCXo,17512
34
+ braindecode/models/base.py,sha256=iufKlZf_Oe7wPkkOvfNPOn387E_np6B9YLeVLHTlRHk,20191
35
+ braindecode/models/bendr.py,sha256=MZQdYFERVeBJnynEXDlCLdn_I0mJtgzzFuMhCXkbMkg,21591
36
+ braindecode/models/biot.py,sha256=LpJ8tXqQL2Zh_vcQnpUHEpAGQrPHtn2cBSTUPFCW8jQ,17546
36
37
  braindecode/models/contrawr.py,sha256=eeR_ik4gNZ3rJLM6Mw9gJ2gTMkZ8CU8C4rN_GQMQTAE,10044
37
- braindecode/models/ctnet.py,sha256=ce5F31q2weBKvg7PL80iDm7za9fhGaCFvNfHoJW_dtg,17315
38
+ braindecode/models/ctnet.py,sha256=ThKoZsosmkNOIKekL8uQVk1kdBlntuDmqM83sH9LgWc,17324
38
39
  braindecode/models/deep4.py,sha256=-s-R3H7so2xlSiPsU226eSwscv1X9xJMYLm3LhZ3mSU,14645
39
- braindecode/models/deepsleepnet.py,sha256=wGSAXW73Ga1-HFbn7kXiLeGsJceiqZyMLZnX2UZZXWw,15207
40
- braindecode/models/eegconformer.py,sha256=rxMAmqErDVLq7nS77CnTtpcC3C2OR_EoZ8-jG-dKP9I,17433
41
- braindecode/models/eeginception_erp.py,sha256=FYXoM-u4kOodMzGgvKDn7IwJwHl9Z0iiWx9bVHiO9EY,16324
40
+ braindecode/models/deepsleepnet.py,sha256=ht2MDuUp-pQBJTTWNEgN4_Orxz01yVyXhxYND6vgJFA,15263
41
+ braindecode/models/eegconformer.py,sha256=z8oSuo1Dv-MKGyxCFQVxQa3sbeku8v8u66c3Qjig38c,17429
42
+ braindecode/models/eeginception_erp.py,sha256=aAjpweNixFgOSL47r-IjHFZujJje8a7TWudtbYdY98M,16410
42
43
  braindecode/models/eeginception_mi.py,sha256=VoWtsaWj1xQ4FlrvCbnPvo8eosufYUmTrL4uvFtqKcg,12456
43
44
  braindecode/models/eegitnet.py,sha256=feXFmPCd-Ejxt7jgWPen1Ag0-oSclDVQai0Atwu9d_A,9827
44
45
  braindecode/models/eegminer.py,sha256=ouKZah9Q7_sxT7DJJMcPObwVxNQE87sEljJg6QwiQNw,9847
45
- braindecode/models/eegnet.py,sha256=dIaHZoz7xMII1qKrS0___IWdy1xg2QrMMiqUgTJM9E8,13682
46
+ braindecode/models/eegnet.py,sha256=qmxQZa-owqEuha7iwOAdPQU29DoLpEyNjH-oouddWLc,13684
46
47
  braindecode/models/eegnex.py,sha256=eahHolFl15LwNWeC5qjQqUGqURibQZIV425rI1p-dG8,13604
47
48
  braindecode/models/eegsimpleconv.py,sha256=6V5ZQNWijmd3-2wv7lJB_HGBS3wHWWVrKoNIeWTXu-w,7300
48
49
  braindecode/models/eegtcnet.py,sha256=Y53uJEX_hoB6eHCew9SIfzNxCYea8UhljDARJTk-Tq8,10837
@@ -51,29 +52,31 @@ braindecode/models/fblightconvnet.py,sha256=d5MwhawhkjilAMo0ckaYMxJhdGMEuorWgHX-
51
52
  braindecode/models/fbmsnet.py,sha256=9bZn2_n1dTrI1Qh3Sz9zMZnH_a-Yq-13UHYSmF6r_UE,11659
52
53
  braindecode/models/hybrid.py,sha256=hA8jwD3_3LL71BxUjRM1dkhqlHU9E9hjuDokh-jBq-4,4024
53
54
  braindecode/models/ifnet.py,sha256=Y2bwfko3SDjD74AzgUEzgMhKJFGCCw_Q_Noh5VONEjQ,15137
54
- braindecode/models/labram.py,sha256=vcrpwiu4F-djtIPscFbtP2Y0jTosyR_cXnOMQQRGPLw,41798
55
+ braindecode/models/labram.py,sha256=3wQ0XwPT8ISPAyEe6_OaC5gqjH3ZYSYCMNv8OXkssvY,46638
55
56
  braindecode/models/msvtnet.py,sha256=hxeCLkHS6w2w89YlLfEPCyQ4XQQpt45bEYPiQJ9SFzY,12642
57
+ braindecode/models/patchedtransformer.py,sha256=9TY9l2X4EoCuE9IoOObjubKFRdmsN5lbrVQLnmr66VY,23444
56
58
  braindecode/models/sccnet.py,sha256=C7vdwIR5cI6wJCl5f8TnGQG6qinq21y4HG6l-D5AwbY,11971
57
59
  braindecode/models/shallow_fbcsp.py,sha256=7U07DJBrm2JHV8v5ja-xuE5-IH5tfmryhJtrfO1n4jk,7531
58
- braindecode/models/signal_jepa.py,sha256=UeSkeAM3Qmx8bbAqHCj5nP-PtZM00_5SGA8ibo9mptc,37079
59
- braindecode/models/sinc_shallow.py,sha256=Ilv8K1XhMGiRTBtQdq7L595i6cEFYOBe0_UDv-LqL7s,11907
60
+ braindecode/models/signal_jepa.py,sha256=bBujhM9ItIJisKvbxEi5e1yuV-0mBb41GlyMeEs_TkA,41124
61
+ braindecode/models/sinc_shallow.py,sha256=SrYm45xaffDTP-SjWso89IvOgBFhQRO3BsFZGNt_OrI,11880
60
62
  braindecode/models/sleep_stager_blanco_2020.py,sha256=vXulnDYutEFLM0UPXyAI0YIj5QImUMVEmYZb78j34H8,6034
61
63
  braindecode/models/sleep_stager_chambon_2018.py,sha256=8w8IR2PsfG0jSc3o0YVopgHpOvCHNIuMi7-QRJOYEW4,5245
62
64
  braindecode/models/sparcnet.py,sha256=MG1OB91guI7ssKRk8GvWlzUvaxo_otaYnbEGzNUZVyg,13973
63
- braindecode/models/summary.csv,sha256=NfrmnjyfDmWVe2zyNqgczEQcLI910BOS4sICtcKS3gc,6765
65
+ braindecode/models/sstdpn.py,sha256=wJv-UYP1q8cMGp2wU1efzIZiigRmkJ8uY22rNB2D7Wc,35077
66
+ braindecode/models/summary.csv,sha256=vFmhpCGFZlxC9Zm8KLBaGRHvZZfdRY85NAGj1Wyv1yU,7209
64
67
  braindecode/models/syncnet.py,sha256=nrWJC5ijCSWKVZyRn-dmOuc1t5vk2C6tx8U3U4j5d5Y,8362
65
68
  braindecode/models/tcn.py,sha256=SQu56H9zdbcbbDIXZVgZtJg7es8CRAJ7z-IBnmf4UWM,8158
66
69
  braindecode/models/tidnet.py,sha256=HSUL1al6gaRbJ-BRYAAs4KDvLuKEvh0NnBfAsPeWMpM,11837
67
70
  braindecode/models/tsinception.py,sha256=nnQxzpqRy9FPuN5xgh9fNQ386VbreQ_nZBSFNkSfal0,10356
68
- braindecode/models/usleep.py,sha256=5uztUHX70T_LurqRob_XmVnKkZDwt74x2Iz181M7s54,17233
69
- braindecode/models/util.py,sha256=VZGVPhUSsoP47pta0_UhC2-g5n5-EFZAW93ZVccrEHU,5232
71
+ braindecode/models/usleep.py,sha256=oZv2Z78d2jfyyh-LbRBSgGfWjP8YugcXEHvQAENM_Q8,17296
72
+ braindecode/models/util.py,sha256=nrYBdd0FTCoYxgg21oz1UlW-PACx-0-_EyvMQua0QI8,5414
70
73
  braindecode/modules/__init__.py,sha256=PD2LpeSHWW_MgEef7-G8ief5gheGObzsIoacchxWuyA,1756
71
74
  braindecode/modules/activation.py,sha256=lTO2IjZWBDeXZ4ZVDgLmTDmxHdqyAny3Fsy07HY9tmQ,1466
72
- braindecode/modules/attention.py,sha256=ISE11jXAvMqKpawZilg8i7lDX5mkuvpEplrh_CtGEkk,24102
73
- braindecode/modules/blocks.py,sha256=QE34HBg7kmEj0z-8dQZ1jJErLRPcniGIorMTeIArpv4,3621
75
+ braindecode/modules/attention.py,sha256=N-GYLyDV5crKFg08x-lkosMjaOTJv8lk_2p1Jkh_PdU,24142
76
+ braindecode/modules/blocks.py,sha256=M_jWtr9kNOP-hZVVzb9hj-jsSV1mvv-eX1qtV5MacEU,3617
74
77
  braindecode/modules/convolution.py,sha256=gZMMOa-2gy1nfduA_j2ezgdIdq5Bi2PtonNomWA4D8k,8481
75
- braindecode/modules/filter.py,sha256=iCz0HiGKrBS09m3aGiNnZEt8jpYOOrmn6SpPCUcuHfU,25291
76
- braindecode/modules/layers.py,sha256=w_tAGcm8BDFiyMdAYM4DNLx46zIUted8B6my8_jtpps,3724
78
+ braindecode/modules/filter.py,sha256=8Li7AYQeN5D2A0Q14m2LDlQBZJbVZoiH50A2EkGgqZc,25228
79
+ braindecode/modules/layers.py,sha256=LqkXuSaSPKD9qWBy7jYLJ9lBSHObYsmwfgGEFFZ6xq0,3659
77
80
  braindecode/modules/linear.py,sha256=pNhSUU0u-IGEUCjAfEDq_TJWnIJMWuOk7Y5L-7I8Meg,1702
78
81
  braindecode/modules/parametrization.py,sha256=sTvV21-sdpqpiY2PzwDebi7SeEvkFw8yDgA6OqJDo34,1310
79
82
  braindecode/modules/stats.py,sha256=ETqZH6PPyYCss2PKBDNrO4uUeijR4bxvjCQCXjNJkH4,2398
@@ -81,10 +84,10 @@ braindecode/modules/util.py,sha256=tVXEhzeTsYrr_wZ5CiXaq3VYGtC5TmGEEW2hMYjTQAE,2
81
84
  braindecode/modules/wrapper.py,sha256=Z-aZ4wxA0psYefMOfj03r7D1XjD4az6GpZpaQoDPJv0,2421
82
85
  braindecode/preprocessing/__init__.py,sha256=V0iwdzb6DzpUaCabA7I6HmOqXK_XvTbpP5HaEduSJ4s,776
83
86
  braindecode/preprocessing/mne_preprocess.py,sha256=_Jczaitqbx16utsUOhnonEcoExf6jPsWNwVOVvoKFfU,2210
84
- braindecode/preprocessing/preprocess.py,sha256=gg52Uqo23yqXBckXrph_AFWCSEmrA7JdA54vcCwbrKE,17489
85
- braindecode/preprocessing/windowers.py,sha256=6w6mOnroGWnV7tS23UagZZepswaxaL00S45Jr5AViRE,36551
87
+ braindecode/preprocessing/preprocess.py,sha256=da_-Tn1NLPunsZC2-uzzgCYgdm_Xj-CIJjwf_CTMuFs,17899
88
+ braindecode/preprocessing/windowers.py,sha256=GU2_Mi06W_2cpZZ2yvm-YjXFM9-0mGcZ0lU5mfj_ZiI,36539
86
89
  braindecode/samplers/__init__.py,sha256=TLuO6gXv2WioJdX671MI_CHVSsOfbjnly1Xv9K3_WdA,452
87
- braindecode/samplers/base.py,sha256=z_Txp9cEwUmIBL0J6FPJbx1cMSsU9l9mxymRCGqNss0,15111
90
+ braindecode/samplers/base.py,sha256=PTa4gGAKXH1Tnx4vBXBAb43x7wQKVvqK1mlM_zE3yY4,15133
88
91
  braindecode/samplers/ssl.py,sha256=C-FKopnbncN_-spQPCrgljY5Qds4fgTLr2TG3s_-QqU,9146
89
92
  braindecode/training/__init__.py,sha256=sxtfI6MgxX3aP03EFc0wJYA37uULoL9SQyUao1Oxyn0,523
90
93
  braindecode/training/callbacks.py,sha256=LqXqzJd6s3w0pvAKy9TEVTxWwVRyWNEu2uyWVsvb9RQ,839
@@ -93,9 +96,9 @@ braindecode/training/scoring.py,sha256=WRkwqbitA3m_dzRnGp2ZIZPge5Nhx9gAEQhIHzeH4
93
96
  braindecode/visualization/__init__.py,sha256=4EER_xHqZIDzEvmgUEm7K1bgNKpyZAIClR9ZCkMuY4M,240
94
97
  braindecode/visualization/confusion_matrices.py,sha256=qIWMLEHow5CJ7PhGggD8mnD55Le6xhma9HSzt4R33fc,9509
95
98
  braindecode/visualization/gradients.py,sha256=KZo-GA0uwiwty2_94j2IjmCR2SKcfPb1Bi3sQq7vpTk,2170
96
- braindecode-1.3.0.dev176728557.dist-info/licenses/LICENSE.txt,sha256=7rg7k6hyj8m9whQ7dpKbqnCssoOEx_Mbtqb4uSOjljE,1525
97
- braindecode-1.3.0.dev176728557.dist-info/licenses/NOTICE.txt,sha256=sOxuTbalPxTM8H6VqtvGbXCt_BoOF7JevEYG_knqbm4,620
98
- braindecode-1.3.0.dev176728557.dist-info/METADATA,sha256=SFpdHpYvRTRFC4ZmrHDvFRLEtnpIygDz0in33OmV8FU,7129
99
- braindecode-1.3.0.dev176728557.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
100
- braindecode-1.3.0.dev176728557.dist-info/top_level.txt,sha256=pHsWQmSy0uhIez62-HA9j0iaXKvSbUL39ifFRkFnChA,12
101
- braindecode-1.3.0.dev176728557.dist-info/RECORD,,
99
+ braindecode-1.3.0.dev177509039.dist-info/licenses/LICENSE.txt,sha256=7rg7k6hyj8m9whQ7dpKbqnCssoOEx_Mbtqb4uSOjljE,1525
100
+ braindecode-1.3.0.dev177509039.dist-info/licenses/NOTICE.txt,sha256=sOxuTbalPxTM8H6VqtvGbXCt_BoOF7JevEYG_knqbm4,620
101
+ braindecode-1.3.0.dev177509039.dist-info/METADATA,sha256=e6R1OzO0qvzjik6uRiZrhHPWBNG98sDVxYSwPM8BPmc,7215
102
+ braindecode-1.3.0.dev177509039.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
103
+ braindecode-1.3.0.dev177509039.dist-info/top_level.txt,sha256=pHsWQmSy0uhIez62-HA9j0iaXKvSbUL39ifFRkFnChA,12
104
+ braindecode-1.3.0.dev177509039.dist-info/RECORD,,