braindecode 1.0.0__tar.gz → 1.1.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of braindecode might be problematic. Click here for more details.

Files changed (390) hide show
  1. {braindecode-1.0.0/braindecode.egg-info → braindecode-1.1.0}/PKG-INFO +5 -5
  2. {braindecode-1.0.0 → braindecode-1.1.0}/braindecode/augmentation/transforms.py +0 -1
  3. {braindecode-1.0.0 → braindecode-1.1.0}/braindecode/datautil/__init__.py +3 -0
  4. {braindecode-1.0.0 → braindecode-1.1.0}/braindecode/datautil/serialization.py +13 -2
  5. {braindecode-1.0.0 → braindecode-1.1.0}/braindecode/functional/__init__.py +12 -0
  6. {braindecode-1.0.0 → braindecode-1.1.0}/braindecode/functional/functions.py +0 -1
  7. {braindecode-1.0.0 → braindecode-1.1.0}/braindecode/models/__init__.py +48 -0
  8. {braindecode-1.0.0 → braindecode-1.1.0}/braindecode/models/atcnet.py +46 -11
  9. {braindecode-1.0.0 → braindecode-1.1.0}/braindecode/models/attentionbasenet.py +49 -0
  10. {braindecode-1.0.0 → braindecode-1.1.0}/braindecode/models/biot.py +29 -8
  11. {braindecode-1.0.0 → braindecode-1.1.0}/braindecode/models/contrawr.py +29 -8
  12. {braindecode-1.0.0 → braindecode-1.1.0}/braindecode/models/ctnet.py +99 -13
  13. {braindecode-1.0.0 → braindecode-1.1.0}/braindecode/models/deep4.py +52 -2
  14. {braindecode-1.0.0 → braindecode-1.1.0}/braindecode/models/eegconformer.py +2 -3
  15. {braindecode-1.0.0 → braindecode-1.1.0}/braindecode/models/eeginception_mi.py +9 -3
  16. {braindecode-1.0.0 → braindecode-1.1.0}/braindecode/models/eegitnet.py +0 -1
  17. {braindecode-1.0.0 → braindecode-1.1.0}/braindecode/models/eegminer.py +0 -1
  18. {braindecode-1.0.0 → braindecode-1.1.0}/braindecode/models/eegnet.py +0 -1
  19. {braindecode-1.0.0 → braindecode-1.1.0}/braindecode/models/fbcnet.py +1 -1
  20. {braindecode-1.0.0 → braindecode-1.1.0}/braindecode/models/fbmsnet.py +0 -1
  21. {braindecode-1.0.0 → braindecode-1.1.0}/braindecode/models/labram.py +23 -3
  22. {braindecode-1.0.0 → braindecode-1.1.0}/braindecode/models/msvtnet.py +1 -1
  23. {braindecode-1.0.0 → braindecode-1.1.0}/braindecode/models/sccnet.py +29 -4
  24. {braindecode-1.0.0 → braindecode-1.1.0}/braindecode/models/signal_jepa.py +0 -1
  25. {braindecode-1.0.0 → braindecode-1.1.0}/braindecode/models/sleep_stager_eldele_2021.py +0 -1
  26. {braindecode-1.0.0 → braindecode-1.1.0}/braindecode/models/sparcnet.py +62 -16
  27. {braindecode-1.0.0 → braindecode-1.1.0}/braindecode/models/tcn.py +1 -1
  28. {braindecode-1.0.0 → braindecode-1.1.0}/braindecode/models/tsinception.py +38 -13
  29. {braindecode-1.0.0 → braindecode-1.1.0}/braindecode/models/util.py +2 -6
  30. {braindecode-1.0.0 → braindecode-1.1.0}/braindecode/modules/__init__.py +46 -0
  31. {braindecode-1.0.0 → braindecode-1.1.0}/braindecode/modules/filter.py +0 -4
  32. {braindecode-1.0.0 → braindecode-1.1.0}/braindecode/modules/layers.py +3 -5
  33. {braindecode-1.0.0 → braindecode-1.1.0}/braindecode/modules/linear.py +1 -2
  34. {braindecode-1.0.0 → braindecode-1.1.0}/braindecode/modules/util.py +0 -1
  35. {braindecode-1.0.0 → braindecode-1.1.0}/braindecode/modules/wrapper.py +0 -2
  36. {braindecode-1.0.0 → braindecode-1.1.0}/braindecode/samplers/base.py +0 -2
  37. braindecode-1.1.0/braindecode/version.py +1 -0
  38. {braindecode-1.0.0 → braindecode-1.1.0/braindecode.egg-info}/PKG-INFO +5 -5
  39. {braindecode-1.0.0 → braindecode-1.1.0}/braindecode.egg-info/requires.txt +4 -4
  40. {braindecode-1.0.0 → braindecode-1.1.0}/docs/api.rst +31 -31
  41. {braindecode-1.0.0 → braindecode-1.1.0}/docs/whats_new.rst +22 -4
  42. {braindecode-1.0.0 → braindecode-1.1.0}/pyproject.toml +8 -8
  43. braindecode-1.0.0/braindecode/version.py +0 -1
  44. {braindecode-1.0.0 → braindecode-1.1.0}/LICENSE.txt +0 -0
  45. {braindecode-1.0.0 → braindecode-1.1.0}/MANIFEST.in +0 -0
  46. {braindecode-1.0.0 → braindecode-1.1.0}/NOTICE.txt +0 -0
  47. {braindecode-1.0.0 → braindecode-1.1.0}/README.rst +0 -0
  48. {braindecode-1.0.0 → braindecode-1.1.0}/braindecode/__init__.py +0 -0
  49. {braindecode-1.0.0 → braindecode-1.1.0}/braindecode/augmentation/__init__.py +0 -0
  50. {braindecode-1.0.0 → braindecode-1.1.0}/braindecode/augmentation/base.py +0 -0
  51. {braindecode-1.0.0 → braindecode-1.1.0}/braindecode/augmentation/functional.py +0 -0
  52. {braindecode-1.0.0 → braindecode-1.1.0}/braindecode/classifier.py +0 -0
  53. {braindecode-1.0.0 → braindecode-1.1.0}/braindecode/datasets/__init__.py +0 -0
  54. {braindecode-1.0.0 → braindecode-1.1.0}/braindecode/datasets/base.py +0 -0
  55. {braindecode-1.0.0 → braindecode-1.1.0}/braindecode/datasets/bbci.py +0 -0
  56. {braindecode-1.0.0 → braindecode-1.1.0}/braindecode/datasets/bcicomp.py +0 -0
  57. {braindecode-1.0.0 → braindecode-1.1.0}/braindecode/datasets/bids.py +0 -0
  58. {braindecode-1.0.0 → braindecode-1.1.0}/braindecode/datasets/mne.py +0 -0
  59. {braindecode-1.0.0 → braindecode-1.1.0}/braindecode/datasets/moabb.py +0 -0
  60. {braindecode-1.0.0 → braindecode-1.1.0}/braindecode/datasets/nmt.py +0 -0
  61. {braindecode-1.0.0 → braindecode-1.1.0}/braindecode/datasets/sleep_physio_challe_18.py +0 -0
  62. {braindecode-1.0.0 → braindecode-1.1.0}/braindecode/datasets/sleep_physionet.py +0 -0
  63. {braindecode-1.0.0 → braindecode-1.1.0}/braindecode/datasets/tuh.py +0 -0
  64. {braindecode-1.0.0 → braindecode-1.1.0}/braindecode/datasets/xy.py +0 -0
  65. {braindecode-1.0.0 → braindecode-1.1.0}/braindecode/datautil/util.py +0 -0
  66. {braindecode-1.0.0 → braindecode-1.1.0}/braindecode/eegneuralnet.py +0 -0
  67. {braindecode-1.0.0 → braindecode-1.1.0}/braindecode/functional/initialization.py +0 -0
  68. {braindecode-1.0.0 → braindecode-1.1.0}/braindecode/models/base.py +0 -0
  69. {braindecode-1.0.0 → braindecode-1.1.0}/braindecode/models/deepsleepnet.py +0 -0
  70. {braindecode-1.0.0 → braindecode-1.1.0}/braindecode/models/eeginception_erp.py +0 -0
  71. {braindecode-1.0.0 → braindecode-1.1.0}/braindecode/models/eegnex.py +0 -0
  72. {braindecode-1.0.0 → braindecode-1.1.0}/braindecode/models/eegresnet.py +0 -0
  73. {braindecode-1.0.0 → braindecode-1.1.0}/braindecode/models/eegsimpleconv.py +0 -0
  74. {braindecode-1.0.0 → braindecode-1.1.0}/braindecode/models/eegtcnet.py +0 -0
  75. {braindecode-1.0.0 → braindecode-1.1.0}/braindecode/models/fblightconvnet.py +0 -0
  76. {braindecode-1.0.0 → braindecode-1.1.0}/braindecode/models/hybrid.py +0 -0
  77. {braindecode-1.0.0 → braindecode-1.1.0}/braindecode/models/ifnet.py +0 -0
  78. {braindecode-1.0.0 → braindecode-1.1.0}/braindecode/models/shallow_fbcsp.py +0 -0
  79. {braindecode-1.0.0 → braindecode-1.1.0}/braindecode/models/sinc_shallow.py +0 -0
  80. {braindecode-1.0.0 → braindecode-1.1.0}/braindecode/models/sleep_stager_blanco_2020.py +0 -0
  81. {braindecode-1.0.0 → braindecode-1.1.0}/braindecode/models/sleep_stager_chambon_2018.py +0 -0
  82. {braindecode-1.0.0 → braindecode-1.1.0}/braindecode/models/summary.csv +0 -0
  83. {braindecode-1.0.0 → braindecode-1.1.0}/braindecode/models/syncnet.py +0 -0
  84. {braindecode-1.0.0 → braindecode-1.1.0}/braindecode/models/tidnet.py +0 -0
  85. {braindecode-1.0.0 → braindecode-1.1.0}/braindecode/models/usleep.py +0 -0
  86. {braindecode-1.0.0 → braindecode-1.1.0}/braindecode/modules/activation.py +0 -0
  87. {braindecode-1.0.0 → braindecode-1.1.0}/braindecode/modules/attention.py +0 -0
  88. {braindecode-1.0.0 → braindecode-1.1.0}/braindecode/modules/blocks.py +0 -0
  89. {braindecode-1.0.0 → braindecode-1.1.0}/braindecode/modules/convolution.py +0 -0
  90. {braindecode-1.0.0 → braindecode-1.1.0}/braindecode/modules/parametrization.py +0 -0
  91. {braindecode-1.0.0 → braindecode-1.1.0}/braindecode/modules/stats.py +0 -0
  92. {braindecode-1.0.0 → braindecode-1.1.0}/braindecode/preprocessing/__init__.py +0 -0
  93. {braindecode-1.0.0 → braindecode-1.1.0}/braindecode/preprocessing/mne_preprocess.py +0 -0
  94. {braindecode-1.0.0 → braindecode-1.1.0}/braindecode/preprocessing/preprocess.py +0 -0
  95. {braindecode-1.0.0 → braindecode-1.1.0}/braindecode/preprocessing/windowers.py +0 -0
  96. {braindecode-1.0.0 → braindecode-1.1.0}/braindecode/regressor.py +0 -0
  97. {braindecode-1.0.0 → braindecode-1.1.0}/braindecode/samplers/__init__.py +0 -0
  98. {braindecode-1.0.0 → braindecode-1.1.0}/braindecode/samplers/ssl.py +0 -0
  99. {braindecode-1.0.0 → braindecode-1.1.0}/braindecode/training/__init__.py +0 -0
  100. {braindecode-1.0.0 → braindecode-1.1.0}/braindecode/training/callbacks.py +0 -0
  101. {braindecode-1.0.0 → braindecode-1.1.0}/braindecode/training/losses.py +0 -0
  102. {braindecode-1.0.0 → braindecode-1.1.0}/braindecode/training/scoring.py +0 -0
  103. {braindecode-1.0.0 → braindecode-1.1.0}/braindecode/util.py +0 -0
  104. {braindecode-1.0.0 → braindecode-1.1.0}/braindecode/visualization/__init__.py +0 -0
  105. {braindecode-1.0.0 → braindecode-1.1.0}/braindecode/visualization/confusion_matrices.py +0 -0
  106. {braindecode-1.0.0 → braindecode-1.1.0}/braindecode/visualization/gradients.py +0 -0
  107. {braindecode-1.0.0 → braindecode-1.1.0}/braindecode.egg-info/SOURCES.txt +0 -0
  108. {braindecode-1.0.0 → braindecode-1.1.0}/braindecode.egg-info/dependency_links.txt +0 -0
  109. {braindecode-1.0.0 → braindecode-1.1.0}/braindecode.egg-info/top_level.txt +0 -0
  110. {braindecode-1.0.0 → braindecode-1.1.0}/docs/Makefile +0 -0
  111. {braindecode-1.0.0 → braindecode-1.1.0}/docs/_build/html/_downloads/090305d06248840b75133975e5121f41/plot_sleep_staging_chambon2018.ipynb +0 -0
  112. {braindecode-1.0.0 → braindecode-1.1.0}/docs/_build/html/_downloads/0f2bf063e08b7d05b80e0004fcbbb6f9/benchmark_lazy_eager_loading.ipynb +0 -0
  113. {braindecode-1.0.0 → braindecode-1.1.0}/docs/_build/html/_downloads/10fc813a8f78253f4bf301264950f5c1/plot_bcic_iv_4_ecog_cropped.ipynb +0 -0
  114. {braindecode-1.0.0 → braindecode-1.1.0}/docs/_build/html/_downloads/1c6590d134fa0befda654e89aa55b2ac/plot_benchmark_preprocessing.ipynb +0 -0
  115. {braindecode-1.0.0 → braindecode-1.1.0}/docs/_build/html/_downloads/1d879df548fa18be8c23d9ca0dc008d4/plot_data_augmentation.ipynb +0 -0
  116. {braindecode-1.0.0 → braindecode-1.1.0}/docs/_build/html/_downloads/2466f8ec5c733d0bd65e187b45d875cc/plot_data_augmentation_search.ipynb +0 -0
  117. {braindecode-1.0.0 → braindecode-1.1.0}/docs/_build/html/_downloads/263464a28477cf8decb861ae6e2e9be7/plot_how_train_test_and_tune.ipynb +0 -0
  118. {braindecode-1.0.0 → braindecode-1.1.0}/docs/_build/html/_downloads/3862cafd7f0d815e434319ffe525afc8/plot_bcic_iv_2a_moabb_cropped.ipynb +0 -0
  119. {braindecode-1.0.0 → braindecode-1.1.0}/docs/_build/html/_downloads/408ca4ffd0ea1f76faa9ef602734ac94/plot_tuh_eeg_corpus.ipynb +0 -0
  120. {braindecode-1.0.0 → braindecode-1.1.0}/docs/_build/html/_downloads/5030717acb00990b394b83314b0797d9/plot_bids_dataset_example.ipynb +0 -0
  121. {braindecode-1.0.0 → braindecode-1.1.0}/docs/_build/html/_downloads/75a73c7d94f3a671fd3dec28f4031ead/plot_regression.ipynb +0 -0
  122. {braindecode-1.0.0 → braindecode-1.1.0}/docs/_build/html/_downloads/84fbcd59a346e5e56758285122dc69e2/plot_sleep_staging_eldele2021.ipynb +0 -0
  123. {braindecode-1.0.0 → braindecode-1.1.0}/docs/_build/html/_downloads/8616a7f968141825e56ab3e3d59be449/plot_tuh_discrete_multitarget.ipynb +0 -0
  124. {braindecode-1.0.0 → braindecode-1.1.0}/docs/_build/html/_downloads/8b5ba06718764b959e8dea1dd0bb97df/plot_sleep_staging_usleep.ipynb +0 -0
  125. {braindecode-1.0.0 → braindecode-1.1.0}/docs/_build/html/_downloads/91651c9d4fde110b4a53f5775a91acc5/plot_mne_dataset_example.ipynb +0 -0
  126. {braindecode-1.0.0 → braindecode-1.1.0}/docs/_build/html/_downloads/93fdea8cbadaf0e3afa2eb2be3ebc483/bcic_iv_4_ecog_trial.ipynb +0 -0
  127. {braindecode-1.0.0 → braindecode-1.1.0}/docs/_build/html/_downloads/9a4447462c3b255ba7e5ca212bbadd52/plot_bcic_iv_2a_moabb_trial.ipynb +0 -0
  128. {braindecode-1.0.0 → braindecode-1.1.0}/docs/_build/html/_downloads/a2f12129facb042643a99d4b9de1886d/bcic_iv_4_ecog_cropped.ipynb +0 -0
  129. {braindecode-1.0.0 → braindecode-1.1.0}/docs/_build/html/_downloads/a39e4245738e55e0eb7084c545ed05bc/plot_hyperparameter_tuning_with_scikit-learn.ipynb +0 -0
  130. {braindecode-1.0.0 → braindecode-1.1.0}/docs/_build/html/_downloads/a6249715d3c30cb41c4af85938cca008/plot_moabb_dataset_example.ipynb +0 -0
  131. {braindecode-1.0.0 → braindecode-1.1.0}/docs/_build/html/_downloads/a7ccc5c1d1d2775e08c053bb25c81a91/plot_bcic_iv_4_ecog_trial.ipynb +0 -0
  132. {braindecode-1.0.0 → braindecode-1.1.0}/docs/_build/html/_downloads/a86d0c5f3a882a069df1683a708d3e25/plot_train_in_pure_pytorch_and_pytorch_lightning.ipynb +0 -0
  133. {braindecode-1.0.0 → braindecode-1.1.0}/docs/_build/html/_downloads/aa8426d97090e7b3062c4e4732c3214a/plot_relative_positioning.ipynb +0 -0
  134. {braindecode-1.0.0 → braindecode-1.1.0}/docs/_build/html/_downloads/b9d19304ecd233ea7a79d4365316ea49/plot_load_save_datasets.ipynb +0 -0
  135. {braindecode-1.0.0 → braindecode-1.1.0}/docs/_build/html/_downloads/f3c89d39e947a121c7920b4d415413a2/plot_split_dataset.ipynb +0 -0
  136. {braindecode-1.0.0 → braindecode-1.1.0}/docs/_build/html/_downloads/f7b38cac92c078838442753121efc297/plot_custom_dataset_example.ipynb +0 -0
  137. {braindecode-1.0.0 → braindecode-1.1.0}/docs/_build/html/_downloads/fff46913db5173d3ae22c1113acffb45/plot_basic_training_epochs.ipynb +0 -0
  138. {braindecode-1.0.0 → braindecode-1.1.0}/docs/_templates/autosummary/class.rst +0 -0
  139. {braindecode-1.0.0 → braindecode-1.1.0}/docs/_templates/autosummary/function.rst +0 -0
  140. {braindecode-1.0.0 → braindecode-1.1.0}/docs/auto_examples/advanced_training/bcic_iv_4_ecog_cropped.ipynb +0 -0
  141. {braindecode-1.0.0 → braindecode-1.1.0}/docs/auto_examples/advanced_training/bcic_iv_4_ecog_cropped.rst +0 -0
  142. {braindecode-1.0.0 → braindecode-1.1.0}/docs/auto_examples/advanced_training/index.rst +0 -0
  143. {braindecode-1.0.0 → braindecode-1.1.0}/docs/auto_examples/advanced_training/plot_bcic_iv_4_ecog_cropped.ipynb +0 -0
  144. {braindecode-1.0.0 → braindecode-1.1.0}/docs/auto_examples/advanced_training/plot_bcic_iv_4_ecog_cropped.rst +0 -0
  145. {braindecode-1.0.0 → braindecode-1.1.0}/docs/auto_examples/advanced_training/plot_data_augmentation.ipynb +0 -0
  146. {braindecode-1.0.0 → braindecode-1.1.0}/docs/auto_examples/advanced_training/plot_data_augmentation.rst +0 -0
  147. {braindecode-1.0.0 → braindecode-1.1.0}/docs/auto_examples/advanced_training/plot_data_augmentation_search.ipynb +0 -0
  148. {braindecode-1.0.0 → braindecode-1.1.0}/docs/auto_examples/advanced_training/plot_data_augmentation_search.rst +0 -0
  149. {braindecode-1.0.0 → braindecode-1.1.0}/docs/auto_examples/advanced_training/plot_relative_positioning.ipynb +0 -0
  150. {braindecode-1.0.0 → braindecode-1.1.0}/docs/auto_examples/advanced_training/plot_relative_positioning.rst +0 -0
  151. {braindecode-1.0.0 → braindecode-1.1.0}/docs/auto_examples/advanced_training/sg_execution_times.rst +0 -0
  152. {braindecode-1.0.0 → braindecode-1.1.0}/docs/auto_examples/applied_examples/bcic_iv_4_ecog_trial.ipynb +0 -0
  153. {braindecode-1.0.0 → braindecode-1.1.0}/docs/auto_examples/applied_examples/bcic_iv_4_ecog_trial.rst +0 -0
  154. {braindecode-1.0.0 → braindecode-1.1.0}/docs/auto_examples/applied_examples/index.rst +0 -0
  155. {braindecode-1.0.0 → braindecode-1.1.0}/docs/auto_examples/applied_examples/plot_bcic_iv_4_ecog_trial.ipynb +0 -0
  156. {braindecode-1.0.0 → braindecode-1.1.0}/docs/auto_examples/applied_examples/plot_bcic_iv_4_ecog_trial.rst +0 -0
  157. {braindecode-1.0.0 → braindecode-1.1.0}/docs/auto_examples/applied_examples/plot_sleep_staging_chambon2018.ipynb +0 -0
  158. {braindecode-1.0.0 → braindecode-1.1.0}/docs/auto_examples/applied_examples/plot_sleep_staging_chambon2018.rst +0 -0
  159. {braindecode-1.0.0 → braindecode-1.1.0}/docs/auto_examples/applied_examples/plot_sleep_staging_eldele2021.ipynb +0 -0
  160. {braindecode-1.0.0 → braindecode-1.1.0}/docs/auto_examples/applied_examples/plot_sleep_staging_eldele2021.rst +0 -0
  161. {braindecode-1.0.0 → braindecode-1.1.0}/docs/auto_examples/applied_examples/plot_sleep_staging_usleep.ipynb +0 -0
  162. {braindecode-1.0.0 → braindecode-1.1.0}/docs/auto_examples/applied_examples/plot_sleep_staging_usleep.rst +0 -0
  163. {braindecode-1.0.0 → braindecode-1.1.0}/docs/auto_examples/applied_examples/plot_tuh_eeg_corpus.ipynb +0 -0
  164. {braindecode-1.0.0 → braindecode-1.1.0}/docs/auto_examples/applied_examples/plot_tuh_eeg_corpus.rst +0 -0
  165. {braindecode-1.0.0 → braindecode-1.1.0}/docs/auto_examples/applied_examples/sg_execution_times.rst +0 -0
  166. {braindecode-1.0.0 → braindecode-1.1.0}/docs/auto_examples/datasets_io/benchmark_lazy_eager_loading.ipynb +0 -0
  167. {braindecode-1.0.0 → braindecode-1.1.0}/docs/auto_examples/datasets_io/benchmark_lazy_eager_loading.rst +0 -0
  168. {braindecode-1.0.0 → braindecode-1.1.0}/docs/auto_examples/datasets_io/index.rst +0 -0
  169. {braindecode-1.0.0 → braindecode-1.1.0}/docs/auto_examples/datasets_io/plot_benchmark_preprocessing.ipynb +0 -0
  170. {braindecode-1.0.0 → braindecode-1.1.0}/docs/auto_examples/datasets_io/plot_benchmark_preprocessing.rst +0 -0
  171. {braindecode-1.0.0 → braindecode-1.1.0}/docs/auto_examples/datasets_io/plot_bids_dataset_example.ipynb +0 -0
  172. {braindecode-1.0.0 → braindecode-1.1.0}/docs/auto_examples/datasets_io/plot_bids_dataset_example.rst +0 -0
  173. {braindecode-1.0.0 → braindecode-1.1.0}/docs/auto_examples/datasets_io/plot_custom_dataset_example.ipynb +0 -0
  174. {braindecode-1.0.0 → braindecode-1.1.0}/docs/auto_examples/datasets_io/plot_custom_dataset_example.rst +0 -0
  175. {braindecode-1.0.0 → braindecode-1.1.0}/docs/auto_examples/datasets_io/plot_load_save_datasets.ipynb +0 -0
  176. {braindecode-1.0.0 → braindecode-1.1.0}/docs/auto_examples/datasets_io/plot_load_save_datasets.rst +0 -0
  177. {braindecode-1.0.0 → braindecode-1.1.0}/docs/auto_examples/datasets_io/plot_mne_dataset_example.ipynb +0 -0
  178. {braindecode-1.0.0 → braindecode-1.1.0}/docs/auto_examples/datasets_io/plot_mne_dataset_example.rst +0 -0
  179. {braindecode-1.0.0 → braindecode-1.1.0}/docs/auto_examples/datasets_io/plot_moabb_dataset_example.ipynb +0 -0
  180. {braindecode-1.0.0 → braindecode-1.1.0}/docs/auto_examples/datasets_io/plot_moabb_dataset_example.rst +0 -0
  181. {braindecode-1.0.0 → braindecode-1.1.0}/docs/auto_examples/datasets_io/plot_split_dataset.ipynb +0 -0
  182. {braindecode-1.0.0 → braindecode-1.1.0}/docs/auto_examples/datasets_io/plot_split_dataset.rst +0 -0
  183. {braindecode-1.0.0 → braindecode-1.1.0}/docs/auto_examples/datasets_io/plot_tuh_discrete_multitarget.ipynb +0 -0
  184. {braindecode-1.0.0 → braindecode-1.1.0}/docs/auto_examples/datasets_io/plot_tuh_discrete_multitarget.rst +0 -0
  185. {braindecode-1.0.0 → braindecode-1.1.0}/docs/auto_examples/datasets_io/sg_execution_times.rst +0 -0
  186. {braindecode-1.0.0 → braindecode-1.1.0}/docs/auto_examples/index.rst +0 -0
  187. {braindecode-1.0.0 → braindecode-1.1.0}/docs/auto_examples/model_building/index.rst +0 -0
  188. {braindecode-1.0.0 → braindecode-1.1.0}/docs/auto_examples/model_building/plot_basic_training_epochs.ipynb +0 -0
  189. {braindecode-1.0.0 → braindecode-1.1.0}/docs/auto_examples/model_building/plot_basic_training_epochs.rst +0 -0
  190. {braindecode-1.0.0 → braindecode-1.1.0}/docs/auto_examples/model_building/plot_bcic_iv_2a_moabb_cropped.ipynb +0 -0
  191. {braindecode-1.0.0 → braindecode-1.1.0}/docs/auto_examples/model_building/plot_bcic_iv_2a_moabb_cropped.rst +0 -0
  192. {braindecode-1.0.0 → braindecode-1.1.0}/docs/auto_examples/model_building/plot_bcic_iv_2a_moabb_trial.ipynb +0 -0
  193. {braindecode-1.0.0 → braindecode-1.1.0}/docs/auto_examples/model_building/plot_bcic_iv_2a_moabb_trial.rst +0 -0
  194. {braindecode-1.0.0 → braindecode-1.1.0}/docs/auto_examples/model_building/plot_how_train_test_and_tune.ipynb +0 -0
  195. {braindecode-1.0.0 → braindecode-1.1.0}/docs/auto_examples/model_building/plot_how_train_test_and_tune.rst +0 -0
  196. {braindecode-1.0.0 → braindecode-1.1.0}/docs/auto_examples/model_building/plot_hyperparameter_tuning_with_scikit-learn.ipynb +0 -0
  197. {braindecode-1.0.0 → braindecode-1.1.0}/docs/auto_examples/model_building/plot_hyperparameter_tuning_with_scikit-learn.rst +0 -0
  198. {braindecode-1.0.0 → braindecode-1.1.0}/docs/auto_examples/model_building/plot_regression.ipynb +0 -0
  199. {braindecode-1.0.0 → braindecode-1.1.0}/docs/auto_examples/model_building/plot_regression.rst +0 -0
  200. {braindecode-1.0.0 → braindecode-1.1.0}/docs/auto_examples/model_building/plot_train_in_pure_pytorch_and_pytorch_lightning.ipynb +0 -0
  201. {braindecode-1.0.0 → braindecode-1.1.0}/docs/auto_examples/model_building/plot_train_in_pure_pytorch_and_pytorch_lightning.rst +0 -0
  202. {braindecode-1.0.0 → braindecode-1.1.0}/docs/auto_examples/model_building/sg_execution_times.rst +0 -0
  203. {braindecode-1.0.0 → braindecode-1.1.0}/docs/auto_examples/sg_execution_times.rst +0 -0
  204. {braindecode-1.0.0 → braindecode-1.1.0}/docs/cite.rst +0 -0
  205. {braindecode-1.0.0 → braindecode-1.1.0}/docs/conf.py +0 -0
  206. {braindecode-1.0.0 → braindecode-1.1.0}/docs/generated/activation/braindecode.modules.LogActivation.rst +0 -0
  207. {braindecode-1.0.0 → braindecode-1.1.0}/docs/generated/activation/braindecode.modules.SafeLog.rst +0 -0
  208. {braindecode-1.0.0 → braindecode-1.1.0}/docs/generated/attention/braindecode.modules.CAT.rst +0 -0
  209. {braindecode-1.0.0 → braindecode-1.1.0}/docs/generated/attention/braindecode.modules.CATLite.rst +0 -0
  210. {braindecode-1.0.0 → braindecode-1.1.0}/docs/generated/attention/braindecode.modules.CBAM.rst +0 -0
  211. {braindecode-1.0.0 → braindecode-1.1.0}/docs/generated/attention/braindecode.modules.ECA.rst +0 -0
  212. {braindecode-1.0.0 → braindecode-1.1.0}/docs/generated/attention/braindecode.modules.EncNet.rst +0 -0
  213. {braindecode-1.0.0 → braindecode-1.1.0}/docs/generated/attention/braindecode.modules.FCA.rst +0 -0
  214. {braindecode-1.0.0 → braindecode-1.1.0}/docs/generated/attention/braindecode.modules.GCT.rst +0 -0
  215. {braindecode-1.0.0 → braindecode-1.1.0}/docs/generated/attention/braindecode.modules.GSoP.rst +0 -0
  216. {braindecode-1.0.0 → braindecode-1.1.0}/docs/generated/attention/braindecode.modules.GatherExcite.rst +0 -0
  217. {braindecode-1.0.0 → braindecode-1.1.0}/docs/generated/attention/braindecode.modules.MultiHeadAttention.rst +0 -0
  218. {braindecode-1.0.0 → braindecode-1.1.0}/docs/generated/attention/braindecode.modules.SRM.rst +0 -0
  219. {braindecode-1.0.0 → braindecode-1.1.0}/docs/generated/attention/braindecode.modules.SqueezeAndExcitation.rst +0 -0
  220. {braindecode-1.0.0 → braindecode-1.1.0}/docs/generated/blocks/braindecode.modules.FeedForwardBlock.rst +0 -0
  221. {braindecode-1.0.0 → braindecode-1.1.0}/docs/generated/blocks/braindecode.modules.InceptionBlock.rst +0 -0
  222. {braindecode-1.0.0 → braindecode-1.1.0}/docs/generated/blocks/braindecode.modules.MLP.rst +0 -0
  223. {braindecode-1.0.0 → braindecode-1.1.0}/docs/generated/braindecode.augmentation.AugmentedDataLoader.rst +0 -0
  224. {braindecode-1.0.0 → braindecode-1.1.0}/docs/generated/braindecode.augmentation.BandstopFilter.rst +0 -0
  225. {braindecode-1.0.0 → braindecode-1.1.0}/docs/generated/braindecode.augmentation.ChannelsDropout.rst +0 -0
  226. {braindecode-1.0.0 → braindecode-1.1.0}/docs/generated/braindecode.augmentation.ChannelsShuffle.rst +0 -0
  227. {braindecode-1.0.0 → braindecode-1.1.0}/docs/generated/braindecode.augmentation.ChannelsSymmetry.rst +0 -0
  228. {braindecode-1.0.0 → braindecode-1.1.0}/docs/generated/braindecode.augmentation.Compose.rst +0 -0
  229. {braindecode-1.0.0 → braindecode-1.1.0}/docs/generated/braindecode.augmentation.FTSurrogate.rst +0 -0
  230. {braindecode-1.0.0 → braindecode-1.1.0}/docs/generated/braindecode.augmentation.FrequencyShift.rst +0 -0
  231. {braindecode-1.0.0 → braindecode-1.1.0}/docs/generated/braindecode.augmentation.GaussianNoise.rst +0 -0
  232. {braindecode-1.0.0 → braindecode-1.1.0}/docs/generated/braindecode.augmentation.IdentityTransform.rst +0 -0
  233. {braindecode-1.0.0 → braindecode-1.1.0}/docs/generated/braindecode.augmentation.MaskEncoding.rst +0 -0
  234. {braindecode-1.0.0 → braindecode-1.1.0}/docs/generated/braindecode.augmentation.Mixup.rst +0 -0
  235. {braindecode-1.0.0 → braindecode-1.1.0}/docs/generated/braindecode.augmentation.SegmentationReconstruction.rst +0 -0
  236. {braindecode-1.0.0 → braindecode-1.1.0}/docs/generated/braindecode.augmentation.SensorsRotation.rst +0 -0
  237. {braindecode-1.0.0 → braindecode-1.1.0}/docs/generated/braindecode.augmentation.SensorsXRotation.rst +0 -0
  238. {braindecode-1.0.0 → braindecode-1.1.0}/docs/generated/braindecode.augmentation.SensorsYRotation.rst +0 -0
  239. {braindecode-1.0.0 → braindecode-1.1.0}/docs/generated/braindecode.augmentation.SensorsZRotation.rst +0 -0
  240. {braindecode-1.0.0 → braindecode-1.1.0}/docs/generated/braindecode.augmentation.SignFlip.rst +0 -0
  241. {braindecode-1.0.0 → braindecode-1.1.0}/docs/generated/braindecode.augmentation.SmoothTimeMask.rst +0 -0
  242. {braindecode-1.0.0 → braindecode-1.1.0}/docs/generated/braindecode.augmentation.TimeReverse.rst +0 -0
  243. {braindecode-1.0.0 → braindecode-1.1.0}/docs/generated/braindecode.augmentation.Transform.rst +0 -0
  244. {braindecode-1.0.0 → braindecode-1.1.0}/docs/generated/braindecode.augmentation.functional.bandstop_filter.rst +0 -0
  245. {braindecode-1.0.0 → braindecode-1.1.0}/docs/generated/braindecode.augmentation.functional.channels_dropout.rst +0 -0
  246. {braindecode-1.0.0 → braindecode-1.1.0}/docs/generated/braindecode.augmentation.functional.channels_permute.rst +0 -0
  247. {braindecode-1.0.0 → braindecode-1.1.0}/docs/generated/braindecode.augmentation.functional.channels_shuffle.rst +0 -0
  248. {braindecode-1.0.0 → braindecode-1.1.0}/docs/generated/braindecode.augmentation.functional.frequency_shift.rst +0 -0
  249. {braindecode-1.0.0 → braindecode-1.1.0}/docs/generated/braindecode.augmentation.functional.ft_surrogate.rst +0 -0
  250. {braindecode-1.0.0 → braindecode-1.1.0}/docs/generated/braindecode.augmentation.functional.gaussian_noise.rst +0 -0
  251. {braindecode-1.0.0 → braindecode-1.1.0}/docs/generated/braindecode.augmentation.functional.identity.rst +0 -0
  252. {braindecode-1.0.0 → braindecode-1.1.0}/docs/generated/braindecode.augmentation.functional.mask_encoding.rst +0 -0
  253. {braindecode-1.0.0 → braindecode-1.1.0}/docs/generated/braindecode.augmentation.functional.mixup.rst +0 -0
  254. {braindecode-1.0.0 → braindecode-1.1.0}/docs/generated/braindecode.augmentation.functional.segmentation_reconstruction.rst +0 -0
  255. {braindecode-1.0.0 → braindecode-1.1.0}/docs/generated/braindecode.augmentation.functional.sensors_rotation.rst +0 -0
  256. {braindecode-1.0.0 → braindecode-1.1.0}/docs/generated/braindecode.augmentation.functional.sign_flip.rst +0 -0
  257. {braindecode-1.0.0 → braindecode-1.1.0}/docs/generated/braindecode.augmentation.functional.smooth_time_mask.rst +0 -0
  258. {braindecode-1.0.0 → braindecode-1.1.0}/docs/generated/braindecode.augmentation.functional.time_reverse.rst +0 -0
  259. {braindecode-1.0.0 → braindecode-1.1.0}/docs/generated/braindecode.classifier.EEGClassifier.rst +0 -0
  260. {braindecode-1.0.0 → braindecode-1.1.0}/docs/generated/braindecode.datasets.BCICompetitionIVDataset4.rst +0 -0
  261. {braindecode-1.0.0 → braindecode-1.1.0}/docs/generated/braindecode.datasets.BIDSDataset.rst +0 -0
  262. {braindecode-1.0.0 → braindecode-1.1.0}/docs/generated/braindecode.datasets.BIDSEpochsDataset.rst +0 -0
  263. {braindecode-1.0.0 → braindecode-1.1.0}/docs/generated/braindecode.datasets.BNCI2014001.rst +0 -0
  264. {braindecode-1.0.0 → braindecode-1.1.0}/docs/generated/braindecode.datasets.BaseConcatDataset.rst +0 -0
  265. {braindecode-1.0.0 → braindecode-1.1.0}/docs/generated/braindecode.datasets.BaseDataset.rst +0 -0
  266. {braindecode-1.0.0 → braindecode-1.1.0}/docs/generated/braindecode.datasets.HGD.rst +0 -0
  267. {braindecode-1.0.0 → braindecode-1.1.0}/docs/generated/braindecode.datasets.MOABBDataset.rst +0 -0
  268. {braindecode-1.0.0 → braindecode-1.1.0}/docs/generated/braindecode.datasets.NMT.rst +0 -0
  269. {braindecode-1.0.0 → braindecode-1.1.0}/docs/generated/braindecode.datasets.SleepPhysionet.rst +0 -0
  270. {braindecode-1.0.0 → braindecode-1.1.0}/docs/generated/braindecode.datasets.SleepPhysionetChallenge2018.rst +0 -0
  271. {braindecode-1.0.0 → braindecode-1.1.0}/docs/generated/braindecode.datasets.TUH.rst +0 -0
  272. {braindecode-1.0.0 → braindecode-1.1.0}/docs/generated/braindecode.datasets.TUHAbnormal.rst +0 -0
  273. {braindecode-1.0.0 → braindecode-1.1.0}/docs/generated/braindecode.datasets.WindowsDataset.rst +0 -0
  274. {braindecode-1.0.0 → braindecode-1.1.0}/docs/generated/braindecode.datasets.create_from_X_y.rst +0 -0
  275. {braindecode-1.0.0 → braindecode-1.1.0}/docs/generated/braindecode.datasets.create_from_mne_epochs.rst +0 -0
  276. {braindecode-1.0.0 → braindecode-1.1.0}/docs/generated/braindecode.datasets.create_from_mne_raw.rst +0 -0
  277. {braindecode-1.0.0 → braindecode-1.1.0}/docs/generated/braindecode.datautil.load_concat_dataset.rst +0 -0
  278. {braindecode-1.0.0 → braindecode-1.1.0}/docs/generated/braindecode.datautil.save_concat_dataset.rst +0 -0
  279. {braindecode-1.0.0 → braindecode-1.1.0}/docs/generated/braindecode.functional.drop_path.rst +0 -0
  280. {braindecode-1.0.0 → braindecode-1.1.0}/docs/generated/braindecode.functional.glorot_weight_zero_bias.rst +0 -0
  281. {braindecode-1.0.0 → braindecode-1.1.0}/docs/generated/braindecode.functional.hilbert_freq.rst +0 -0
  282. {braindecode-1.0.0 → braindecode-1.1.0}/docs/generated/braindecode.functional.identity.rst +0 -0
  283. {braindecode-1.0.0 → braindecode-1.1.0}/docs/generated/braindecode.functional.plv_time.rst +0 -0
  284. {braindecode-1.0.0 → braindecode-1.1.0}/docs/generated/braindecode.functional.rescale_parameter.rst +0 -0
  285. {braindecode-1.0.0 → braindecode-1.1.0}/docs/generated/braindecode.functional.safe_log.rst +0 -0
  286. {braindecode-1.0.0 → braindecode-1.1.0}/docs/generated/braindecode.functional.square.rst +0 -0
  287. {braindecode-1.0.0 → braindecode-1.1.0}/docs/generated/braindecode.models.ATCNet.rst +0 -0
  288. {braindecode-1.0.0 → braindecode-1.1.0}/docs/generated/braindecode.models.AttentionBaseNet.rst +0 -0
  289. {braindecode-1.0.0 → braindecode-1.1.0}/docs/generated/braindecode.models.BDTCN.rst +0 -0
  290. {braindecode-1.0.0 → braindecode-1.1.0}/docs/generated/braindecode.models.BIOT.rst +0 -0
  291. {braindecode-1.0.0 → braindecode-1.1.0}/docs/generated/braindecode.models.CTNet.rst +0 -0
  292. {braindecode-1.0.0 → braindecode-1.1.0}/docs/generated/braindecode.models.ContraWR.rst +0 -0
  293. {braindecode-1.0.0 → braindecode-1.1.0}/docs/generated/braindecode.models.Deep4Net.rst +0 -0
  294. {braindecode-1.0.0 → braindecode-1.1.0}/docs/generated/braindecode.models.DeepSleepNet.rst +0 -0
  295. {braindecode-1.0.0 → braindecode-1.1.0}/docs/generated/braindecode.models.EEGConformer.rst +0 -0
  296. {braindecode-1.0.0 → braindecode-1.1.0}/docs/generated/braindecode.models.EEGITNet.rst +0 -0
  297. {braindecode-1.0.0 → braindecode-1.1.0}/docs/generated/braindecode.models.EEGInceptionERP.rst +0 -0
  298. {braindecode-1.0.0 → braindecode-1.1.0}/docs/generated/braindecode.models.EEGInceptionMI.rst +0 -0
  299. {braindecode-1.0.0 → braindecode-1.1.0}/docs/generated/braindecode.models.EEGMiner.rst +0 -0
  300. {braindecode-1.0.0 → braindecode-1.1.0}/docs/generated/braindecode.models.EEGModuleMixin.rst +0 -0
  301. {braindecode-1.0.0 → braindecode-1.1.0}/docs/generated/braindecode.models.EEGNeX.rst +0 -0
  302. {braindecode-1.0.0 → braindecode-1.1.0}/docs/generated/braindecode.models.EEGNetv1.rst +0 -0
  303. {braindecode-1.0.0 → braindecode-1.1.0}/docs/generated/braindecode.models.EEGNetv4.rst +0 -0
  304. {braindecode-1.0.0 → braindecode-1.1.0}/docs/generated/braindecode.models.EEGResNet.rst +0 -0
  305. {braindecode-1.0.0 → braindecode-1.1.0}/docs/generated/braindecode.models.EEGSimpleConv.rst +0 -0
  306. {braindecode-1.0.0 → braindecode-1.1.0}/docs/generated/braindecode.models.EEGTCNet.rst +0 -0
  307. {braindecode-1.0.0 → braindecode-1.1.0}/docs/generated/braindecode.models.FBCNet.rst +0 -0
  308. {braindecode-1.0.0 → braindecode-1.1.0}/docs/generated/braindecode.models.FBLightConvNet.rst +0 -0
  309. {braindecode-1.0.0 → braindecode-1.1.0}/docs/generated/braindecode.models.FBMSNet.rst +0 -0
  310. {braindecode-1.0.0 → braindecode-1.1.0}/docs/generated/braindecode.models.IFNet.rst +0 -0
  311. {braindecode-1.0.0 → braindecode-1.1.0}/docs/generated/braindecode.models.Labram.rst +0 -0
  312. {braindecode-1.0.0 → braindecode-1.1.0}/docs/generated/braindecode.models.MSVTNet.rst +0 -0
  313. {braindecode-1.0.0 → braindecode-1.1.0}/docs/generated/braindecode.models.SCCNet.rst +0 -0
  314. {braindecode-1.0.0 → braindecode-1.1.0}/docs/generated/braindecode.models.SPARCNet.rst +0 -0
  315. {braindecode-1.0.0 → braindecode-1.1.0}/docs/generated/braindecode.models.ShallowFBCSPNet.rst +0 -0
  316. {braindecode-1.0.0 → braindecode-1.1.0}/docs/generated/braindecode.models.SignalJEPA.rst +0 -0
  317. {braindecode-1.0.0 → braindecode-1.1.0}/docs/generated/braindecode.models.SignalJEPA_Contextual.rst +0 -0
  318. {braindecode-1.0.0 → braindecode-1.1.0}/docs/generated/braindecode.models.SignalJEPA_PostLocal.rst +0 -0
  319. {braindecode-1.0.0 → braindecode-1.1.0}/docs/generated/braindecode.models.SignalJEPA_PreLocal.rst +0 -0
  320. {braindecode-1.0.0 → braindecode-1.1.0}/docs/generated/braindecode.models.SincShallowNet.rst +0 -0
  321. {braindecode-1.0.0 → braindecode-1.1.0}/docs/generated/braindecode.models.SleepStagerBlanco2020.rst +0 -0
  322. {braindecode-1.0.0 → braindecode-1.1.0}/docs/generated/braindecode.models.SleepStagerChambon2018.rst +0 -0
  323. {braindecode-1.0.0 → braindecode-1.1.0}/docs/generated/braindecode.models.SleepStagerEldele2021.rst +0 -0
  324. {braindecode-1.0.0 → braindecode-1.1.0}/docs/generated/braindecode.models.SyncNet.rst +0 -0
  325. {braindecode-1.0.0 → braindecode-1.1.0}/docs/generated/braindecode.models.TIDNet.rst +0 -0
  326. {braindecode-1.0.0 → braindecode-1.1.0}/docs/generated/braindecode.models.TSceptionV1.rst +0 -0
  327. {braindecode-1.0.0 → braindecode-1.1.0}/docs/generated/braindecode.models.USleep.rst +0 -0
  328. {braindecode-1.0.0 → braindecode-1.1.0}/docs/generated/braindecode.preprocessing.Crop.rst +0 -0
  329. {braindecode-1.0.0 → braindecode-1.1.0}/docs/generated/braindecode.preprocessing.DropChannels.rst +0 -0
  330. {braindecode-1.0.0 → braindecode-1.1.0}/docs/generated/braindecode.preprocessing.Filter.rst +0 -0
  331. {braindecode-1.0.0 → braindecode-1.1.0}/docs/generated/braindecode.preprocessing.Pick.rst +0 -0
  332. {braindecode-1.0.0 → braindecode-1.1.0}/docs/generated/braindecode.preprocessing.Preprocessor.rst +0 -0
  333. {braindecode-1.0.0 → braindecode-1.1.0}/docs/generated/braindecode.preprocessing.Resample.rst +0 -0
  334. {braindecode-1.0.0 → braindecode-1.1.0}/docs/generated/braindecode.preprocessing.SetEEGReference.rst +0 -0
  335. {braindecode-1.0.0 → braindecode-1.1.0}/docs/generated/braindecode.preprocessing.create_fixed_length_windows.rst +0 -0
  336. {braindecode-1.0.0 → braindecode-1.1.0}/docs/generated/braindecode.preprocessing.create_windows_from_events.rst +0 -0
  337. {braindecode-1.0.0 → braindecode-1.1.0}/docs/generated/braindecode.preprocessing.create_windows_from_target_channels.rst +0 -0
  338. {braindecode-1.0.0 → braindecode-1.1.0}/docs/generated/braindecode.preprocessing.exponential_moving_demean.rst +0 -0
  339. {braindecode-1.0.0 → braindecode-1.1.0}/docs/generated/braindecode.preprocessing.exponential_moving_standardize.rst +0 -0
  340. {braindecode-1.0.0 → braindecode-1.1.0}/docs/generated/braindecode.preprocessing.filterbank.rst +0 -0
  341. {braindecode-1.0.0 → braindecode-1.1.0}/docs/generated/braindecode.preprocessing.preprocess.rst +0 -0
  342. {braindecode-1.0.0 → braindecode-1.1.0}/docs/generated/braindecode.regressor.EEGRegressor.rst +0 -0
  343. {braindecode-1.0.0 → braindecode-1.1.0}/docs/generated/braindecode.samplers.BalancedSequenceSampler.rst +0 -0
  344. {braindecode-1.0.0 → braindecode-1.1.0}/docs/generated/braindecode.samplers.DistributedRecordingSampler.rst +0 -0
  345. {braindecode-1.0.0 → braindecode-1.1.0}/docs/generated/braindecode.samplers.DistributedRelativePositioningSampler.rst +0 -0
  346. {braindecode-1.0.0 → braindecode-1.1.0}/docs/generated/braindecode.samplers.RecordingSampler.rst +0 -0
  347. {braindecode-1.0.0 → braindecode-1.1.0}/docs/generated/braindecode.samplers.RelativePositioningSampler.rst +0 -0
  348. {braindecode-1.0.0 → braindecode-1.1.0}/docs/generated/braindecode.samplers.SequenceSampler.rst +0 -0
  349. {braindecode-1.0.0 → braindecode-1.1.0}/docs/generated/braindecode.training.CroppedLoss.rst +0 -0
  350. {braindecode-1.0.0 → braindecode-1.1.0}/docs/generated/braindecode.training.CroppedTimeSeriesEpochScoring.rst +0 -0
  351. {braindecode-1.0.0 → braindecode-1.1.0}/docs/generated/braindecode.training.CroppedTrialEpochScoring.rst +0 -0
  352. {braindecode-1.0.0 → braindecode-1.1.0}/docs/generated/braindecode.training.PostEpochTrainScoring.rst +0 -0
  353. {braindecode-1.0.0 → braindecode-1.1.0}/docs/generated/braindecode.training.TimeSeriesLoss.rst +0 -0
  354. {braindecode-1.0.0 → braindecode-1.1.0}/docs/generated/braindecode.training.mixup_criterion.rst +0 -0
  355. {braindecode-1.0.0 → braindecode-1.1.0}/docs/generated/braindecode.training.predict_trials.rst +0 -0
  356. {braindecode-1.0.0 → braindecode-1.1.0}/docs/generated/braindecode.training.trial_preds_from_window_preds.rst +0 -0
  357. {braindecode-1.0.0 → braindecode-1.1.0}/docs/generated/braindecode.util.set_random_seeds.rst +0 -0
  358. {braindecode-1.0.0 → braindecode-1.1.0}/docs/generated/braindecode.visualization.compute_amplitude_gradients.rst +0 -0
  359. {braindecode-1.0.0 → braindecode-1.1.0}/docs/generated/braindecode.visualization.plot_confusion_matrix.rst +0 -0
  360. {braindecode-1.0.0 → braindecode-1.1.0}/docs/generated/convolution/braindecode.modules.AvgPool2dWithConv.rst +0 -0
  361. {braindecode-1.0.0 → braindecode-1.1.0}/docs/generated/convolution/braindecode.modules.CausalConv1d.rst +0 -0
  362. {braindecode-1.0.0 → braindecode-1.1.0}/docs/generated/convolution/braindecode.modules.CombinedConv.rst +0 -0
  363. {braindecode-1.0.0 → braindecode-1.1.0}/docs/generated/convolution/braindecode.modules.Conv2dWithConstraint.rst +0 -0
  364. {braindecode-1.0.0 → braindecode-1.1.0}/docs/generated/convolution/braindecode.modules.DepthwiseConv2d.rst +0 -0
  365. {braindecode-1.0.0 → braindecode-1.1.0}/docs/generated/filter/braindecode.modules.FilterBankLayer.rst +0 -0
  366. {braindecode-1.0.0 → braindecode-1.1.0}/docs/generated/filter/braindecode.modules.GeneralizedGaussianFilter.rst +0 -0
  367. {braindecode-1.0.0 → braindecode-1.1.0}/docs/generated/layers/braindecode.modules.Chomp1d.rst +0 -0
  368. {braindecode-1.0.0 → braindecode-1.1.0}/docs/generated/layers/braindecode.modules.DropPath.rst +0 -0
  369. {braindecode-1.0.0 → braindecode-1.1.0}/docs/generated/layers/braindecode.modules.Ensure4d.rst +0 -0
  370. {braindecode-1.0.0 → braindecode-1.1.0}/docs/generated/layers/braindecode.modules.TimeDistributed.rst +0 -0
  371. {braindecode-1.0.0 → braindecode-1.1.0}/docs/generated/linear/braindecode.modules.LinearWithConstraint.rst +0 -0
  372. {braindecode-1.0.0 → braindecode-1.1.0}/docs/generated/linear/braindecode.modules.MaxNormLinear.rst +0 -0
  373. {braindecode-1.0.0 → braindecode-1.1.0}/docs/generated/stats/braindecode.modules.LogPowerLayer.rst +0 -0
  374. {braindecode-1.0.0 → braindecode-1.1.0}/docs/generated/stats/braindecode.modules.LogVarLayer.rst +0 -0
  375. {braindecode-1.0.0 → braindecode-1.1.0}/docs/generated/stats/braindecode.modules.MaxLayer.rst +0 -0
  376. {braindecode-1.0.0 → braindecode-1.1.0}/docs/generated/stats/braindecode.modules.MeanLayer.rst +0 -0
  377. {braindecode-1.0.0 → braindecode-1.1.0}/docs/generated/stats/braindecode.modules.StatLayer.rst +0 -0
  378. {braindecode-1.0.0 → braindecode-1.1.0}/docs/generated/stats/braindecode.modules.StdLayer.rst +0 -0
  379. {braindecode-1.0.0 → braindecode-1.1.0}/docs/generated/stats/braindecode.modules.VarLayer.rst +0 -0
  380. {braindecode-1.0.0 → braindecode-1.1.0}/docs/generated/util/braindecode.modules.aggregate_probas.rst +0 -0
  381. {braindecode-1.0.0 → braindecode-1.1.0}/docs/generated/wrapper/braindecode.modules.Expression.rst +0 -0
  382. {braindecode-1.0.0 → braindecode-1.1.0}/docs/generated/wrapper/braindecode.modules.IntermediateOutputWrapper.rst +0 -0
  383. {braindecode-1.0.0 → braindecode-1.1.0}/docs/help.rst +0 -0
  384. {braindecode-1.0.0 → braindecode-1.1.0}/docs/index.rst +0 -0
  385. {braindecode-1.0.0 → braindecode-1.1.0}/docs/install/install.rst +0 -0
  386. {braindecode-1.0.0 → braindecode-1.1.0}/docs/install/install_pip.rst +0 -0
  387. {braindecode-1.0.0 → braindecode-1.1.0}/docs/install/install_source.rst +0 -0
  388. {braindecode-1.0.0 → braindecode-1.1.0}/docs/models_summary.rst +0 -0
  389. {braindecode-1.0.0 → braindecode-1.1.0}/docs/sg_execution_times.rst +0 -0
  390. {braindecode-1.0.0 → braindecode-1.1.0}/setup.cfg +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: braindecode
3
- Version: 1.0.0
3
+ Version: 1.1.0
4
4
  Summary: Deep learning software to decode EEG, ECG or MEG signals
5
5
  Author-email: Robin Tibor Schirrmeister <robintibor@gmail.com>
6
6
  Maintainer-email: Alexandre Gramfort <agramfort@meta.com>, Bruno Aristimunha Pinto <b.aristimunha@gmail.com>, Robin Tibor Schirrmeister <robintibor@gmail.com>
@@ -29,12 +29,12 @@ Requires-Dist: pandas
29
29
  Requires-Dist: scipy
30
30
  Requires-Dist: matplotlib
31
31
  Requires-Dist: h5py
32
- Requires-Dist: skorch
33
- Requires-Dist: torch
34
- Requires-Dist: torchaudio
32
+ Requires-Dist: skorch~=0.14.0
33
+ Requires-Dist: torch~=2.0
34
+ Requires-Dist: torchaudio~=2.0
35
35
  Requires-Dist: einops
36
36
  Requires-Dist: joblib
37
- Requires-Dist: torchinfo
37
+ Requires-Dist: torchinfo~=1.8
38
38
  Requires-Dist: wfdb
39
39
  Requires-Dist: h5py
40
40
  Requires-Dist: linear_attention_transformer
@@ -6,7 +6,6 @@
6
6
 
7
7
  import warnings
8
8
  from numbers import Real
9
- from typing import Callable
10
9
 
11
10
  import numpy as np
12
11
  import torch
@@ -47,3 +47,6 @@ def __getattr__(name):
47
47
  return windowers.__dict__[name]
48
48
 
49
49
  raise AttributeError("No possible import named " + name)
50
+
51
+
52
+ __all__ = ["load_concat_dataset", "save_concat_dataset", "_check_save_dir_empty"]
@@ -107,7 +107,14 @@ def _outdated_load_concat_dataset(path, preload, ids_to_load=None, target_name=N
107
107
  def _load_signals_and_description(path, preload, is_raw, ids_to_load=None):
108
108
  all_signals = []
109
109
  file_name = "{}-raw.fif" if is_raw else "{}-epo.fif"
110
- description_df = pd.read_json(path / "description.json")
110
+ description_df = pd.read_json(
111
+ path / "description.json", typ="series", convert_dates=False
112
+ )
113
+
114
+ if "timestamp" in description_df.index:
115
+ timestamp_numeric = pd.to_numeric(description_df["timestamp"])
116
+ description_df["timestamp"] = pd.to_datetime(timestamp_numeric)
117
+
111
118
  if ids_to_load is None:
112
119
  file_names = path.glob(f"*{file_name.lstrip('{}')}")
113
120
  # Extract ids, e.g.,
@@ -242,7 +249,11 @@ def _load_parallel(path, i, preload, is_raw, has_stored_windows):
242
249
  signals = _load_signals(fif_file_path, preload, is_raw)
243
250
 
244
251
  description_file_path = sub_dir / "description.json"
245
- description = pd.read_json(description_file_path, typ="series")
252
+ description = pd.read_json(description_file_path, typ="series", convert_dates=False)
253
+
254
+ # if 'timestamp' in description.index:
255
+ # timestamp_numeric = pd.to_numeric(description['timestamp'])
256
+ # description['timestamp'] = pd.to_datetime(timestamp_numeric, unit='s')
246
257
 
247
258
  target_file_path = sub_dir / "target_name.json"
248
259
  target_name = None
@@ -8,3 +8,15 @@ from .functions import (
8
8
  square,
9
9
  )
10
10
  from .initialization import glorot_weight_zero_bias, rescale_parameter
11
+
12
+ __all__ = [
13
+ "_get_gaussian_kernel1d",
14
+ "drop_path",
15
+ "hilbert_freq",
16
+ "identity",
17
+ "plv_time",
18
+ "safe_log",
19
+ "square",
20
+ "glorot_weight_zero_bias",
21
+ "rescale_parameter",
22
+ ]
@@ -1,7 +1,6 @@
1
1
  # Authors: Robin Schirrmeister <robintibor@gmail.com>
2
2
  #
3
3
  # License: BSD (3-clause)
4
- import math
5
4
 
6
5
  import torch
7
6
  import torch.nn.functional as F
@@ -50,3 +50,51 @@ from .util import _init_models_dict, models_mandatory_parameters
50
50
  # Call this last in order to make sure the dataset list is populated with
51
51
  # the models imported in this file.
52
52
  _init_models_dict()
53
+
54
+ __all__ = [
55
+ "ATCNet",
56
+ "AttentionBaseNet",
57
+ "EEGModuleMixin",
58
+ "BIOT",
59
+ "ContraWR",
60
+ "CTNet",
61
+ "Deep4Net",
62
+ "DeepSleepNet",
63
+ "EEGConformer",
64
+ "EEGInceptionERP",
65
+ "EEGInceptionMI",
66
+ "EEGITNet",
67
+ "EEGMiner",
68
+ "EEGNetv1",
69
+ "EEGNetv4",
70
+ "EEGNeX",
71
+ "EEGResNet",
72
+ "EEGSimpleConv",
73
+ "EEGTCNet",
74
+ "FBCNet",
75
+ "FBLightConvNet",
76
+ "FBMSNet",
77
+ "HybridNet",
78
+ "IFNet",
79
+ "Labram",
80
+ "MSVTNet",
81
+ "SCCNet",
82
+ "ShallowFBCSPNet",
83
+ "SignalJEPA",
84
+ "SignalJEPA_Contextual",
85
+ "SignalJEPA_PostLocal",
86
+ "SignalJEPA_PreLocal",
87
+ "SincShallowNet",
88
+ "SleepStagerBlanco2020",
89
+ "SleepStagerChambon2018",
90
+ "SleepStagerEldele2021",
91
+ "SPARCNet",
92
+ "SyncNet",
93
+ "BDTCN",
94
+ "TCN",
95
+ "TIDNet",
96
+ "TSceptionV1",
97
+ "USleep",
98
+ "_init_models_dict",
99
+ "models_mandatory_parameters",
100
+ ]
@@ -5,6 +5,7 @@ import math
5
5
 
6
6
  import torch
7
7
  from einops.layers.torch import Rearrange
8
+ from mne.utils import warn
8
9
  from torch import nn
9
10
 
10
11
  from braindecode.models.base import EEGModuleMixin
@@ -69,9 +70,6 @@ class ATCNet(EEGModuleMixin, nn.Module):
69
70
  tcn_kernel_size : int
70
71
  Temporal kernel size used in TCN block, denoted Kt in table 1 of the
71
72
  paper [1]_. Defaults to 4 as in [1]_.
72
- tcn_n_filters : int
73
- Number of filters used in TCN convolutional layers (Ft). Defaults to
74
- 32 as in [1]_.
75
73
  tcn_dropout : float
76
74
  Dropout probability used in the TCN block, denoted pt in table 1
77
75
  of the paper [1]_. Defaults to 0.3 as in [1]_.
@@ -117,7 +115,6 @@ class ATCNet(EEGModuleMixin, nn.Module):
117
115
  att_drop_prob=0.5,
118
116
  tcn_depth=2,
119
117
  tcn_kernel_size=4,
120
- tcn_n_filters=32,
121
118
  tcn_drop_prob=0.3,
122
119
  tcn_activation: nn.Module = nn.ELU,
123
120
  concat=False,
@@ -134,6 +131,45 @@ class ATCNet(EEGModuleMixin, nn.Module):
134
131
  sfreq=sfreq,
135
132
  )
136
133
  del n_outputs, n_chans, chs_info, n_times, input_window_seconds, sfreq
134
+
135
+ # Validate and adjust parameters based on input size
136
+
137
+ min_len_tcn = (tcn_kernel_size - 1) * (2 ** (tcn_depth - 1)) + 1
138
+ # Minimum length required to get at least one sliding window
139
+ min_len_sliding = n_windows + min_len_tcn - 1
140
+ # Minimum input size that produces the required feature map length
141
+ min_n_times = min_len_sliding * conv_block_pool_size_1 * conv_block_pool_size_2
142
+
143
+ # 2. If the input is shorter, calculate a scaling factor
144
+ if self.n_times < min_n_times:
145
+ scaling_factor = self.n_times / min_n_times
146
+ warn(
147
+ f"n_times ({self.n_times}) is smaller than the minimum required "
148
+ f"({min_n_times}) for the current model parameters configuration. "
149
+ "Adjusting parameters to ensure compatibility."
150
+ "Reducing the kernel, pooling, and stride sizes accordingly."
151
+ "Scaling factor: {:.2f}".format(scaling_factor),
152
+ UserWarning,
153
+ )
154
+ conv_block_kernel_length_1 = max(
155
+ 1, int(conv_block_kernel_length_1 * scaling_factor)
156
+ )
157
+ conv_block_kernel_length_2 = max(
158
+ 1, int(conv_block_kernel_length_2 * scaling_factor)
159
+ )
160
+ conv_block_pool_size_1 = max(
161
+ 1, int(conv_block_pool_size_1 * scaling_factor)
162
+ )
163
+ conv_block_pool_size_2 = max(
164
+ 1, int(conv_block_pool_size_2 * scaling_factor)
165
+ )
166
+
167
+ # n_windows should be at least 1
168
+ n_windows = max(1, int(n_windows * scaling_factor))
169
+
170
+ # tcn_kernel_size must be at least 2 for dilation to work
171
+ tcn_kernel_size = max(2, int(tcn_kernel_size * scaling_factor))
172
+
137
173
  self.conv_block_n_filters = conv_block_n_filters
138
174
  self.conv_block_kernel_length_1 = conv_block_kernel_length_1
139
175
  self.conv_block_kernel_length_2 = conv_block_kernel_length_2
@@ -147,12 +183,11 @@ class ATCNet(EEGModuleMixin, nn.Module):
147
183
  self.att_dropout = att_drop_prob
148
184
  self.tcn_depth = tcn_depth
149
185
  self.tcn_kernel_size = tcn_kernel_size
150
- self.tcn_n_filters = tcn_n_filters
151
186
  self.tcn_dropout = tcn_drop_prob
152
187
  self.tcn_activation = tcn_activation
153
188
  self.concat = concat
154
189
  self.max_norm_const = max_norm_const
155
-
190
+ self.tcn_n_filters = int(self.conv_block_depth_mult * self.conv_block_n_filters)
156
191
  map = dict()
157
192
  for w in range(self.n_windows):
158
193
  map[f"max_norm_linears.[{w}].weight"] = f"final_layer.[{w}].weight"
@@ -197,13 +232,13 @@ class ATCNet(EEGModuleMixin, nn.Module):
197
232
  *[
198
233
  _TCNResidualBlock(
199
234
  in_channels=self.F2,
200
- kernel_size=tcn_kernel_size,
201
- n_filters=tcn_n_filters,
202
- dropout=tcn_drop_prob,
203
- activation=tcn_activation,
235
+ kernel_size=self.tcn_kernel_size,
236
+ n_filters=self.tcn_n_filters,
237
+ dropout=self.tcn_dropout,
238
+ activation=self.tcn_activation,
204
239
  dilation=2**i,
205
240
  )
206
- for i in range(tcn_depth)
241
+ for i in range(self.tcn_depth)
207
242
  ]
208
243
  )
209
244
  for _ in range(self.n_windows)
@@ -3,6 +3,7 @@ from __future__ import annotations
3
3
  import math
4
4
 
5
5
  from einops.layers.torch import Rearrange
6
+ from mne.utils import warn
6
7
  from torch import nn
7
8
 
8
9
  from braindecode.models.base import EEGModuleMixin
@@ -162,6 +163,33 @@ class AttentionBaseNet(EEGModuleMixin, nn.Module):
162
163
  )
163
164
  del n_outputs, n_chans, chs_info, n_times, sfreq, input_window_seconds
164
165
 
166
+ min_n_times_required = self._get_min_n_times(
167
+ pool_length_inp,
168
+ pool_stride_inp,
169
+ pool_length,
170
+ )
171
+
172
+ if self.n_times < min_n_times_required:
173
+ scaling_factor = self.n_times / min_n_times_required
174
+ warn(
175
+ f"n_times ({self.n_times}) is smaller than the minimum required "
176
+ f"({min_n_times_required}) for the current model parameters configuration. "
177
+ "Adjusting parameters to ensure compatibility."
178
+ "Reducing the kernel, pooling, and stride sizes accordingly.\n"
179
+ "Scaling factor: {:.2f}".format(scaling_factor),
180
+ UserWarning,
181
+ )
182
+ # 3. Scale down all temporal parameters proportionally
183
+ # Use max(1, ...) to ensure parameters remain valid
184
+ temp_filter_length_inp = max(
185
+ 1, int(temp_filter_length_inp * scaling_factor)
186
+ )
187
+ pool_length_inp = max(1, int(pool_length_inp * scaling_factor))
188
+ pool_stride_inp = max(1, int(pool_stride_inp * scaling_factor))
189
+ temp_filter_length = max(1, int(temp_filter_length * scaling_factor))
190
+ pool_length = max(1, int(pool_length * scaling_factor))
191
+ pool_stride = max(1, int(pool_stride * scaling_factor))
192
+
165
193
  self.input_block = _FeatureExtractor(
166
194
  n_chans=self.n_chans,
167
195
  n_temporal_filters=n_temporal_filters,
@@ -231,6 +259,27 @@ class AttentionBaseNet(EEGModuleMixin, nn.Module):
231
259
  seq_lengths.append(int(out))
232
260
  return seq_lengths
233
261
 
262
+ @staticmethod
263
+ def _get_min_n_times(
264
+ pool_length_inp: int,
265
+ pool_stride_inp: int,
266
+ pool_length: int,
267
+ ) -> int:
268
+ """
269
+ Calculates the minimum n_times required for the model to work
270
+ with the given parameters.
271
+
272
+ The calculation is based on reversing the pooling operations to
273
+ ensure the input to each is valid.
274
+ """
275
+ # The input to the second pooling layer must be at least its kernel size.
276
+ min_len_for_second_pool = pool_length
277
+
278
+ # Reverse the first pooling operation to find the required input size.
279
+ # Formula: min_L_in = Stride * (min_L_out - 1) + Kernel
280
+ min_len = pool_stride_inp * (min_len_for_second_pool - 1) + pool_length_inp
281
+ return min_len
282
+
234
283
 
235
284
  class _FeatureExtractor(nn.Module):
236
285
  """
@@ -87,6 +87,10 @@ class BIOT(EEGModuleMixin, nn.Module):
87
87
  input_window_seconds=None,
88
88
  activation: nn.Module = nn.ELU,
89
89
  drop_prob: float = 0.5,
90
+ # Parameters for the encoder
91
+ max_seq_len: int = 1024,
92
+ attn_dropout=0.2,
93
+ attn_layer_dropout=0.2,
90
94
  ):
91
95
  super().__init__(
92
96
  n_outputs=n_outputs,
@@ -123,14 +127,29 @@ class BIOT(EEGModuleMixin, nn.Module):
123
127
  UserWarning,
124
128
  )
125
129
  hop_length = self.sfreq // 2
130
+
131
+ if self.input_window_seconds < 1.0:
132
+ warning_msg = (
133
+ "The input window is less than 1 second, which may not be "
134
+ "sufficient for the model to learn meaningful representations."
135
+ "Changing the `n_fft` to `n_times`."
136
+ )
137
+ warn(warning_msg, UserWarning)
138
+ self.n_fft = self.n_times
139
+ else:
140
+ self.n_fft = int(self.sfreq)
141
+
126
142
  self.encoder = _BIOTEncoder(
127
143
  emb_size=emb_size,
128
144
  att_num_heads=att_num_heads,
129
145
  n_layers=n_layers,
130
146
  n_chans=self.n_chans,
131
- n_fft=self.sfreq,
147
+ n_fft=self.n_fft,
132
148
  hop_length=hop_length,
133
149
  drop_prob=drop_prob,
150
+ max_seq_len=max_seq_len,
151
+ attn_dropout=attn_dropout,
152
+ attn_layer_dropout=attn_layer_dropout,
134
153
  )
135
154
 
136
155
  self.final_layer = _ClassificationHead(
@@ -231,12 +250,11 @@ class _ClassificationHead(nn.Sequential):
231
250
 
232
251
  def __init__(self, emb_size: int, n_outputs: int, activation: nn.Module = nn.ELU):
233
252
  super().__init__()
234
- self.classification_head = nn.Sequential(
235
- activation(),
236
- nn.Linear(emb_size, n_outputs),
237
- )
253
+ self.activation_layer = activation()
254
+ self.classification_head = nn.Linear(emb_size, n_outputs)
238
255
 
239
256
  def forward(self, x):
257
+ x = self.activation_layer(x)
240
258
  out = self.classification_head(x)
241
259
  return out
242
260
 
@@ -344,6 +362,9 @@ class _BIOTEncoder(nn.Module):
344
362
  n_fft=200, # Related with the frequency resolution
345
363
  hop_length=100,
346
364
  drop_prob: float = 0.1,
365
+ max_seq_len: int = 1024, # The maximum sequence length
366
+ attn_dropout=0.2, # dropout post-attention
367
+ attn_layer_dropout=0.2, # dropout right after self-attention layer
347
368
  ):
348
369
  super().__init__()
349
370
 
@@ -357,9 +378,9 @@ class _BIOTEncoder(nn.Module):
357
378
  dim=emb_size,
358
379
  heads=att_num_heads,
359
380
  depth=n_layers,
360
- max_seq_len=1024,
361
- attn_layer_dropout=0.2, # dropout right after self-attention layer
362
- attn_dropout=0.2, # dropout post-attention
381
+ max_seq_len=max_seq_len,
382
+ attn_layer_dropout=attn_layer_dropout,
383
+ attn_dropout=attn_dropout,
363
384
  )
364
385
  self.positional_encoding = _PositionalEncoding(emb_size, drop_prob=drop_prob)
365
386
 
@@ -2,6 +2,7 @@ from __future__ import annotations
2
2
 
3
3
  import torch
4
4
  import torch.nn as nn
5
+ from mne.utils import warn
5
6
 
6
7
  from braindecode.models.base import EEGModuleMixin
7
8
 
@@ -57,6 +58,9 @@ class ContraWR(EEGModuleMixin, nn.Module):
57
58
  steps=20,
58
59
  activation: nn.Module = nn.ELU,
59
60
  drop_prob: float = 0.5,
61
+ stride_res: int = 2,
62
+ kernel_size_res: int = 3,
63
+ padding_res: int = 1,
60
64
  # Another way to pass the EEG parameters
61
65
  chs_info=None,
62
66
  n_times=None,
@@ -74,7 +78,17 @@ class ContraWR(EEGModuleMixin, nn.Module):
74
78
  if not isinstance(res_channels, list):
75
79
  raise ValueError("res_channels must be a list of integers.")
76
80
 
77
- self.n_fft = int(self.sfreq)
81
+ if self.input_window_seconds < 1.0:
82
+ warning_msg = (
83
+ "The input window is less than 1 second, which may not be "
84
+ "sufficient for the model to learn meaningful representations."
85
+ "changing the `n_fft` to `n_times`."
86
+ )
87
+ warn(warning_msg, UserWarning)
88
+ self.n_fft = self.n_times
89
+ else:
90
+ self.n_fft = int(self.sfreq)
91
+
78
92
  self.steps = steps
79
93
 
80
94
  res_channels = [self.n_chans] + res_channels + [emb_size]
@@ -89,19 +103,22 @@ class ContraWR(EEGModuleMixin, nn.Module):
89
103
  _ResBlock(
90
104
  in_channels=res_channels[i],
91
105
  out_channels=res_channels[i + 1],
92
- stride=2,
106
+ stride=stride_res,
93
107
  use_downsampling=True,
94
108
  pooling=True,
95
109
  drop_prob=drop_prob,
110
+ kernel_size=kernel_size_res,
111
+ padding=padding_res,
112
+ activation=activation,
96
113
  )
97
114
  for i in range(len(res_channels) - 1)
98
115
  ]
99
116
  )
117
+ self.adaptative_pool = nn.AdaptiveAvgPool2d((1, 1))
118
+ self.flatten_layer = nn.Flatten()
100
119
 
101
- self.final_layer = nn.Sequential(
102
- activation(),
103
- nn.Linear(emb_size, self.n_outputs),
104
- )
120
+ self.activation_layer = activation()
121
+ self.final_layer = nn.Linear(emb_size, self.n_outputs)
105
122
 
106
123
  def forward(self, X: torch.Tensor) -> torch.Tensor:
107
124
  """
@@ -118,9 +135,13 @@ class ContraWR(EEGModuleMixin, nn.Module):
118
135
  """
119
136
  X = self.torch_stft(X)
120
137
 
121
- for conv in self.convs[:-1]:
138
+ for conv in self.convs:
122
139
  X = conv.forward(X)
123
- emb = self.convs[-1](X).squeeze(-1).squeeze(-1)
140
+
141
+ emb = self.adaptative_pool(X)
142
+ emb = self.flatten_layer(emb)
143
+ emb = self.activation_layer(emb)
144
+
124
145
  return self.final_layer(emb)
125
146
 
126
147
 
@@ -10,6 +10,7 @@ classification from Wei Zhao et al. (2024).
10
10
  from __future__ import annotations
11
11
 
12
12
  import math
13
+ from typing import Optional
13
14
 
14
15
  import torch
15
16
  from einops.layers.torch import Rearrange
@@ -57,7 +58,7 @@ class CTNet(EEGModuleMixin, nn.Module):
57
58
  Activation function to use in the network.
58
59
  heads : int, default=4
59
60
  Number of attention heads in the Transformer encoder.
60
- emb_size : int, default=40
61
+ emb_size : int or None, default=None
61
62
  Embedding size (dimensionality) for the Transformer encoder.
62
63
  depth : int, default=6
63
64
  Number of encoder layers in the Transformer.
@@ -110,11 +111,11 @@ class CTNet(EEGModuleMixin, nn.Module):
110
111
  drop_prob_final: float = 0.5,
111
112
  # other parameters
112
113
  heads: int = 4,
113
- emb_size: int = 40,
114
+ emb_size: Optional[int] = 40,
114
115
  depth: int = 6,
115
- n_filters_time: int = 20,
116
+ n_filters_time: Optional[int] = None,
116
117
  kernel_size: int = 64,
117
- depth_multiplier: int = 2,
118
+ depth_multiplier: Optional[int] = 2,
118
119
  pool_size_1: int = 8,
119
120
  pool_size_2: int = 8,
120
121
  ):
@@ -128,21 +129,18 @@ class CTNet(EEGModuleMixin, nn.Module):
128
129
  )
129
130
  del n_outputs, n_chans, chs_info, n_times, input_window_seconds, sfreq
130
131
 
131
- self.emb_size = emb_size
132
132
  self.activation_patch = activation_patch
133
133
  self.activation_transformer = activation_transformer
134
-
135
- self.n_filters_time = n_filters_time
136
134
  self.drop_prob_cnn = drop_prob_cnn
137
135
  self.pool_size_1 = pool_size_1
138
136
  self.pool_size_2 = pool_size_2
139
- self.depth_multiplier = depth_multiplier
140
137
  self.kernel_size = kernel_size
141
138
  self.drop_prob_posi = drop_prob_posi
142
139
  self.drop_prob_final = drop_prob_final
143
-
140
+ self.heads = heads
141
+ self.depth = depth
144
142
  # n_times - pool_size_1 / p
145
- sequence_length = math.floor(
143
+ self.sequence_length = math.floor(
146
144
  (
147
145
  math.floor((self.n_times - self.pool_size_1) / self.pool_size_1 + 1)
148
146
  - self.pool_size_2
@@ -151,6 +149,10 @@ class CTNet(EEGModuleMixin, nn.Module):
151
149
  + 1
152
150
  )
153
151
 
152
+ self.depth_multiplier, self.n_filters_time, self.emb_size = self._resolve_dims(
153
+ depth_multiplier, n_filters_time, emb_size
154
+ )
155
+
154
156
  # Layers
155
157
  self.ensuredim = Rearrange("batch nchans time -> batch 1 nchans time")
156
158
  self.flatten = nn.Flatten()
@@ -167,14 +169,17 @@ class CTNet(EEGModuleMixin, nn.Module):
167
169
  )
168
170
 
169
171
  self.position = _PositionalEncoding(
170
- emb_size=emb_size,
172
+ emb_size=self.emb_size,
171
173
  drop_prob=self.drop_prob_posi,
172
174
  n_times=self.n_times,
173
175
  pool_size=self.pool_size_1,
174
176
  )
175
177
 
176
178
  self.trans = _TransformerEncoder(
177
- heads, depth, emb_size, activation=self.activation_transformer
179
+ self.heads,
180
+ self.depth,
181
+ self.emb_size,
182
+ activation=self.activation_transformer,
178
183
  )
179
184
 
180
185
  self.flatten_drop_layer = nn.Sequential(
@@ -183,7 +188,8 @@ class CTNet(EEGModuleMixin, nn.Module):
183
188
  )
184
189
 
185
190
  self.final_layer = nn.Linear(
186
- in_features=emb_size * sequence_length, out_features=self.n_outputs
191
+ in_features=int(self.emb_size * self.sequence_length),
192
+ out_features=self.n_outputs,
187
193
  )
188
194
 
189
195
  def forward(self, x: Tensor) -> Tensor:
@@ -210,6 +216,86 @@ class CTNet(EEGModuleMixin, nn.Module):
210
216
  out = self.final_layer(flatten_feature)
211
217
  return out
212
218
 
219
+ @staticmethod
220
+ def _resolve_dims(
221
+ depth_multiplier: Optional[int],
222
+ n_filters_time: Optional[int],
223
+ emb_size: Optional[int],
224
+ ) -> tuple[int, int, int]:
225
+ # Basic type/positivity checks for provided values
226
+ for name, val in (
227
+ ("depth_multiplier", depth_multiplier),
228
+ ("n_filters_time", n_filters_time),
229
+ ("emb_size", emb_size),
230
+ ):
231
+ if val is not None:
232
+ if not isinstance(val, int):
233
+ raise TypeError(f"{name} must be int, got {type(val).__name__}")
234
+ if val <= 0:
235
+ raise ValueError(f"{name} must be > 0, got {val}")
236
+
237
+ missing = [
238
+ k
239
+ for k, v in {
240
+ "depth_multiplier": depth_multiplier,
241
+ "n_filters_time": n_filters_time,
242
+ "emb_size": emb_size,
243
+ }.items()
244
+ if v is None
245
+ ]
246
+
247
+ if len(missing) >= 2:
248
+ # Too many unknowns → ambiguous
249
+ raise ValueError(
250
+ "Specify exactly two of {depth_multiplier, n_filters_time, emb_size}; the third will be inferred."
251
+ )
252
+
253
+ if len(missing) == 1:
254
+ # Infer the missing one
255
+ if missing[0] == "emb_size":
256
+ assert depth_multiplier is not None and n_filters_time is not None
257
+ emb_size = depth_multiplier * n_filters_time
258
+ elif missing[0] == "n_filters_time":
259
+ assert emb_size is not None and depth_multiplier is not None
260
+ if emb_size % depth_multiplier != 0:
261
+ raise ValueError(
262
+ f"emb_size={emb_size} must be divisible by depth_multiplier={depth_multiplier}"
263
+ )
264
+ n_filters_time = emb_size // depth_multiplier
265
+ else: # missing depth_multiplier
266
+ assert emb_size is not None and n_filters_time is not None
267
+ if emb_size % n_filters_time != 0:
268
+ raise ValueError(
269
+ f"emb_size={emb_size} must be divisible by n_filters_time={n_filters_time}"
270
+ )
271
+ depth_multiplier = emb_size // n_filters_time
272
+
273
+ else:
274
+ # All provided: enforce consistency
275
+ assert (
276
+ depth_multiplier is not None
277
+ and n_filters_time is not None
278
+ and emb_size is not None
279
+ )
280
+ prod = depth_multiplier * n_filters_time
281
+ if prod != emb_size:
282
+ raise ValueError(
283
+ "`depth_multiplier * n_filters_time` must equal `emb_size`, "
284
+ f"but got {depth_multiplier} * {n_filters_time} = {prod} != {emb_size}. "
285
+ "Fix by setting one of: "
286
+ f"emb_size={prod}, "
287
+ f"n_filters_time={emb_size // depth_multiplier if emb_size % depth_multiplier == 0 else 'not integer'}, "
288
+ f"depth_multiplier={emb_size // n_filters_time if emb_size % n_filters_time == 0 else 'not integer'}."
289
+ )
290
+
291
+ # Ensure plain ints for the return type
292
+ assert (
293
+ depth_multiplier is not None
294
+ and n_filters_time is not None
295
+ and emb_size is not None
296
+ )
297
+ return depth_multiplier, n_filters_time, emb_size
298
+
213
299
 
214
300
  class _PatchEmbeddingEEGNet(nn.Module):
215
301
  def __init__(