eegdash 0.4.0.dev173498563__tar.gz → 0.4.1__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of eegdash might be problematic. Click here for more details.

Files changed (417) hide show
  1. {eegdash-0.4.0.dev173498563/eegdash.egg-info → eegdash-0.4.1}/PKG-INFO +6 -8
  2. eegdash-0.4.1/docs/build/html/_downloads/06c8d94b7e0b8be2de39fdc122dd12bb/tutorial_challenge_2.ipynb +261 -0
  3. eegdash-0.4.1/docs/build/html/_downloads/2c592649a2079630923cb072bc1beaf3/tutorial_eoec.ipynb +187 -0
  4. eegdash-0.4.1/docs/build/html/_downloads/5702e607758ace8a64a5cb0cf540ace7/tutorial_eegdash_offline.ipynb +151 -0
  5. eegdash-0.4.1/docs/build/html/_downloads/9f4f54b7e99e554f34ea4efcf2a8337e/tutorial_challenge_1.ipynb +297 -0
  6. eegdash-0.4.1/docs/build/html/_downloads/f3cf56a30a7c06a2eccae3b5b3d28e35/tutorial_feature_extractor_open_close_eye.ipynb +253 -0
  7. eegdash-0.4.1/docs/build/html/notebooks/generated/auto_examples/core/tutorial_eoec.ipynb +187 -0
  8. eegdash-0.4.1/docs/build/html/notebooks/generated/auto_examples/core/tutorial_feature_extractor_open_close_eye.ipynb +253 -0
  9. eegdash-0.4.1/docs/build/html/notebooks/generated/auto_examples/eeg2025/tutorial_challenge_1.ipynb +297 -0
  10. eegdash-0.4.1/docs/build/html/notebooks/generated/auto_examples/eeg2025/tutorial_challenge_2.ipynb +261 -0
  11. eegdash-0.4.1/docs/build/html/notebooks/generated/auto_examples/eeg2025/tutorial_eegdash_offline.ipynb +151 -0
  12. {eegdash-0.4.0.dev173498563 → eegdash-0.4.1}/docs/source/_templates/autosummary/module.rst +16 -0
  13. {eegdash-0.4.0.dev173498563 → eegdash-0.4.1}/docs/source/api/api_core.rst +1 -1
  14. eegdash-0.4.1/docs/source/api/dataset/api_dataset.rst +326 -0
  15. eegdash-0.4.1/docs/source/api/dataset/eegdash.api.rst +8 -0
  16. eegdash-0.4.1/docs/source/api/dataset/eegdash.bids_eeg_metadata.rst +8 -0
  17. eegdash-0.4.1/docs/source/api/dataset/eegdash.const.rst +8 -0
  18. eegdash-0.4.1/docs/source/api/dataset/eegdash.data_utils.rst +8 -0
  19. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS001785.rst +64 -0
  20. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS001787.rst +64 -0
  21. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS001810.rst +64 -0
  22. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS001849.rst +64 -0
  23. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS001971.rst +64 -0
  24. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS002034.rst +64 -0
  25. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS002094.rst +64 -0
  26. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS002158.rst +62 -0
  27. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS002181.rst +63 -0
  28. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS002218.rst +64 -0
  29. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS002336.rst +63 -0
  30. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS002338.rst +63 -0
  31. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS002578.rst +64 -0
  32. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS002680.rst +64 -0
  33. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS002691.rst +64 -0
  34. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS002718.rst +64 -0
  35. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS002720.rst +64 -0
  36. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS002721.rst +64 -0
  37. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS002722.rst +64 -0
  38. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS002723.rst +64 -0
  39. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS002724.rst +64 -0
  40. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS002725.rst +64 -0
  41. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS002778.rst +64 -0
  42. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS002814.rst +64 -0
  43. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS002833.rst +64 -0
  44. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS002893.rst +64 -0
  45. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS003004.rst +64 -0
  46. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS003039.rst +64 -0
  47. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS003061.rst +64 -0
  48. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS003190.rst +64 -0
  49. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS003194.rst +64 -0
  50. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS003195.rst +64 -0
  51. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS003343.rst +64 -0
  52. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS003421.rst +64 -0
  53. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS003458.rst +64 -0
  54. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS003474.rst +64 -0
  55. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS003478.rst +64 -0
  56. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS003490.rst +64 -0
  57. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS003505.rst +64 -0
  58. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS003506.rst +64 -0
  59. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS003509.rst +64 -0
  60. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS003516.rst +64 -0
  61. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS003517.rst +64 -0
  62. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS003518.rst +64 -0
  63. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS003519.rst +64 -0
  64. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS003522.rst +64 -0
  65. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS003523.rst +64 -0
  66. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS003555.rst +63 -0
  67. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS003570.rst +64 -0
  68. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS003574.rst +64 -0
  69. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS003602.rst +64 -0
  70. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS003626.rst +62 -0
  71. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS003638.rst +64 -0
  72. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS003645.rst +62 -0
  73. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS003655.rst +64 -0
  74. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS003670.rst +64 -0
  75. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS003690.rst +64 -0
  76. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS003702.rst +64 -0
  77. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS003710.rst +64 -0
  78. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS003739.rst +64 -0
  79. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS003751.rst +64 -0
  80. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS003753.rst +64 -0
  81. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS003766.rst +64 -0
  82. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS003768.rst +62 -0
  83. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS003801.rst +64 -0
  84. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS003805.rst +64 -0
  85. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS003810.rst +64 -0
  86. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS003816.rst +64 -0
  87. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS003822.rst +64 -0
  88. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS003825.rst +64 -0
  89. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS003838.rst +64 -0
  90. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS003846.rst +64 -0
  91. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS003885.rst +64 -0
  92. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS003887.rst +64 -0
  93. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS003944.rst +64 -0
  94. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS003947.rst +64 -0
  95. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS003969.rst +64 -0
  96. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS003987.rst +64 -0
  97. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS004000.rst +64 -0
  98. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS004010.rst +64 -0
  99. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS004015.rst +64 -0
  100. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS004018.rst +64 -0
  101. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS004022.rst +64 -0
  102. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS004024.rst +64 -0
  103. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS004033.rst +64 -0
  104. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS004040.rst +64 -0
  105. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS004043.rst +64 -0
  106. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS004067.rst +64 -0
  107. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS004075.rst +62 -0
  108. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS004117.rst +64 -0
  109. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS004152.rst +64 -0
  110. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS004196.rst +64 -0
  111. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS004200.rst +64 -0
  112. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS004252.rst +62 -0
  113. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS004256.rst +63 -0
  114. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS004262.rst +64 -0
  115. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS004264.rst +64 -0
  116. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS004279.rst +64 -0
  117. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS004284.rst +64 -0
  118. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS004295.rst +64 -0
  119. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS004306.rst +64 -0
  120. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS004315.rst +64 -0
  121. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS004317.rst +64 -0
  122. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS004324.rst +64 -0
  123. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS004347.rst +64 -0
  124. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS004348.rst +64 -0
  125. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS004350.rst +64 -0
  126. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS004356.rst +64 -0
  127. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS004357.rst +64 -0
  128. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS004362.rst +64 -0
  129. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS004367.rst +64 -0
  130. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS004368.rst +64 -0
  131. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS004369.rst +64 -0
  132. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS004381.rst +64 -0
  133. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS004388.rst +63 -0
  134. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS004389.rst +63 -0
  135. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS004408.rst +64 -0
  136. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS004444.rst +64 -0
  137. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS004446.rst +64 -0
  138. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS004447.rst +64 -0
  139. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS004448.rst +64 -0
  140. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS004460.rst +64 -0
  141. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS004475.rst +64 -0
  142. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS004477.rst +64 -0
  143. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS004504.rst +64 -0
  144. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS004505.rst +64 -0
  145. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS004511.rst +63 -0
  146. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS004515.rst +64 -0
  147. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS004519.rst +64 -0
  148. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS004520.rst +64 -0
  149. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS004521.rst +64 -0
  150. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS004532.rst +64 -0
  151. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS004554.rst +64 -0
  152. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS004561.rst +64 -0
  153. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS004572.rst +64 -0
  154. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS004574.rst +64 -0
  155. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS004577.rst +64 -0
  156. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS004579.rst +64 -0
  157. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS004580.rst +64 -0
  158. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS004582.rst +64 -0
  159. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS004584.rst +64 -0
  160. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS004587.rst +64 -0
  161. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS004588.rst +64 -0
  162. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS004595.rst +64 -0
  163. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS004598.rst +62 -0
  164. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS004602.rst +64 -0
  165. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS004603.rst +64 -0
  166. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS004621.rst +63 -0
  167. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS004625.rst +64 -0
  168. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS004626.rst +64 -0
  169. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS004635.rst +64 -0
  170. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS004657.rst +64 -0
  171. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS004660.rst +64 -0
  172. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS004661.rst +64 -0
  173. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS004718.rst +64 -0
  174. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS004745.rst +62 -0
  175. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS004752.rst +64 -0
  176. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS004771.rst +64 -0
  177. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS004784.rst +64 -0
  178. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS004785.rst +64 -0
  179. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS004796.rst +63 -0
  180. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS004802.rst +64 -0
  181. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS004816.rst +64 -0
  182. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS004817.rst +64 -0
  183. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS004840.rst +64 -0
  184. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS004841.rst +64 -0
  185. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS004842.rst +64 -0
  186. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS004843.rst +64 -0
  187. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS004844.rst +64 -0
  188. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS004849.rst +63 -0
  189. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS004850.rst +63 -0
  190. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS004851.rst +63 -0
  191. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS004852.rst +63 -0
  192. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS004853.rst +63 -0
  193. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS004854.rst +63 -0
  194. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS004855.rst +63 -0
  195. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS004860.rst +64 -0
  196. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS004883.rst +64 -0
  197. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS004902.rst +64 -0
  198. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS004917.rst +62 -0
  199. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS004942.rst +64 -0
  200. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS004951.rst +64 -0
  201. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS004952.rst +64 -0
  202. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS004980.rst +64 -0
  203. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS004995.rst +62 -0
  204. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS005021.rst +64 -0
  205. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS005028.rst +62 -0
  206. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS005034.rst +64 -0
  207. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS005048.rst +63 -0
  208. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS005079.rst +64 -0
  209. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS005089.rst +64 -0
  210. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS005095.rst +64 -0
  211. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS005106.rst +64 -0
  212. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS005114.rst +64 -0
  213. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS005121.rst +64 -0
  214. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS005131.rst +64 -0
  215. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS005170.rst +62 -0
  216. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS005185.rst +63 -0
  217. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS005189.rst +64 -0
  218. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS005207.rst +64 -0
  219. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS005262.rst +62 -0
  220. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS005273.rst +64 -0
  221. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS005274.rst +64 -0
  222. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS005296.rst +63 -0
  223. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS005305.rst +64 -0
  224. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS005307.rst +63 -0
  225. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS005340.rst +63 -0
  226. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS005342.rst +64 -0
  227. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS005345.rst +63 -0
  228. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS005363.rst +63 -0
  229. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS005383.rst +63 -0
  230. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS005385.rst +63 -0
  231. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS005397.rst +63 -0
  232. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS005403.rst +63 -0
  233. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS005406.rst +63 -0
  234. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS005410.rst +63 -0
  235. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS005416.rst +63 -0
  236. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS005420.rst +63 -0
  237. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS005429.rst +63 -0
  238. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS005486.rst +62 -0
  239. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS005505.rst +63 -0
  240. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS005506.rst +63 -0
  241. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS005507.rst +63 -0
  242. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS005508.rst +63 -0
  243. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS005509.rst +63 -0
  244. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS005510.rst +63 -0
  245. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS005511.rst +63 -0
  246. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS005512.rst +63 -0
  247. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS005514.rst +63 -0
  248. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS005515.rst +63 -0
  249. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS005516.rst +63 -0
  250. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS005520.rst +63 -0
  251. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS005530.rst +63 -0
  252. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS005540.rst +63 -0
  253. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS005555.rst +63 -0
  254. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS005565.rst +62 -0
  255. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS005571.rst +63 -0
  256. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS005586.rst +63 -0
  257. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS005594.rst +63 -0
  258. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS005620.rst +63 -0
  259. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS005672.rst +63 -0
  260. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS005688.rst +63 -0
  261. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS005692.rst +63 -0
  262. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS005697.rst +63 -0
  263. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS005779.rst +63 -0
  264. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS005787.rst +63 -0
  265. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS005795.rst +63 -0
  266. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS005811.rst +63 -0
  267. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS005815.rst +63 -0
  268. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS005863.rst +63 -0
  269. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS005866.rst +62 -0
  270. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS005868.rst +62 -0
  271. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS005873.rst +63 -0
  272. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.DS005876.rst +63 -0
  273. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.EEGChallengeDataset.rst +17 -0
  274. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.dataset.rst +8 -0
  275. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.registry.rst +8 -0
  276. eegdash-0.4.1/docs/source/api/dataset/eegdash.dataset.rst +17 -0
  277. eegdash-0.4.1/docs/source/api/dataset/eegdash.downloader.rst +8 -0
  278. eegdash-0.4.1/docs/source/api/dataset/eegdash.features.datasets.rst +8 -0
  279. eegdash-0.4.1/docs/source/api/dataset/eegdash.features.decorators.rst +8 -0
  280. eegdash-0.4.1/docs/source/api/dataset/eegdash.features.extractors.rst +8 -0
  281. eegdash-0.4.1/docs/source/api/dataset/eegdash.features.feature_bank.complexity.rst +8 -0
  282. eegdash-0.4.1/docs/source/api/dataset/eegdash.features.feature_bank.connectivity.rst +8 -0
  283. eegdash-0.4.1/docs/source/api/dataset/eegdash.features.feature_bank.csp.rst +8 -0
  284. eegdash-0.4.1/docs/source/api/dataset/eegdash.features.feature_bank.dimensionality.rst +8 -0
  285. eegdash-0.4.1/docs/source/api/dataset/eegdash.features.feature_bank.rst +22 -0
  286. eegdash-0.4.1/docs/source/api/dataset/eegdash.features.feature_bank.signal.rst +8 -0
  287. eegdash-0.4.1/docs/source/api/dataset/eegdash.features.feature_bank.spectral.rst +8 -0
  288. eegdash-0.4.1/docs/source/api/dataset/eegdash.features.feature_bank.utils.rst +8 -0
  289. eegdash-0.4.1/docs/source/api/dataset/eegdash.features.inspect.rst +8 -0
  290. eegdash-0.4.1/docs/source/api/dataset/eegdash.features.rst +29 -0
  291. eegdash-0.4.1/docs/source/api/dataset/eegdash.features.serialization.rst +8 -0
  292. eegdash-0.4.1/docs/source/api/dataset/eegdash.features.utils.rst +8 -0
  293. eegdash-0.4.1/docs/source/api/dataset/eegdash.hbn.preprocessing.rst +8 -0
  294. eegdash-0.4.1/docs/source/api/dataset/eegdash.hbn.rst +17 -0
  295. eegdash-0.4.1/docs/source/api/dataset/eegdash.hbn.windows.rst +8 -0
  296. eegdash-0.4.1/docs/source/api/dataset/eegdash.logging.rst +8 -0
  297. eegdash-0.4.1/docs/source/api/dataset/eegdash.mongodb.rst +8 -0
  298. eegdash-0.4.1/docs/source/api/dataset/eegdash.paths.rst +8 -0
  299. eegdash-0.4.1/docs/source/api/dataset/eegdash.rst +34 -0
  300. eegdash-0.4.1/docs/source/api/dataset/eegdash.utils.rst +8 -0
  301. eegdash-0.4.1/docs/source/api/dataset/modules.rst +7 -0
  302. eegdash-0.4.1/docs/source/api/generated/api-core/eegdash.api.rst +18 -0
  303. eegdash-0.4.1/docs/source/api/generated/api-core/eegdash.bids_eeg_metadata.rst +20 -0
  304. eegdash-0.4.1/docs/source/api/generated/api-core/eegdash.const.rst +20 -0
  305. eegdash-0.4.1/docs/source/api/generated/api-core/eegdash.data_utils.rst +19 -0
  306. eegdash-0.4.1/docs/source/api/generated/api-core/eegdash.hbn.rst +27 -0
  307. eegdash-0.4.1/docs/source/api/generated/api-core/eegdash.logging.rst +17 -0
  308. eegdash-0.4.1/docs/source/api/generated/api-core/eegdash.mongodb.rst +17 -0
  309. eegdash-0.4.1/docs/source/api/generated/api-core/eegdash.paths.rst +17 -0
  310. eegdash-0.4.1/docs/source/api/generated/api-core/eegdash.utils.rst +11 -0
  311. eegdash-0.4.1/docs/source/api/generated/api-features/eegdash.features.datasets.rst +18 -0
  312. eegdash-0.4.1/docs/source/api/generated/api-features/eegdash.features.decorators.rst +31 -0
  313. eegdash-0.4.1/docs/source/api/generated/api-features/eegdash.features.extractors.rst +22 -0
  314. eegdash-0.4.1/docs/source/api/generated/api-features/eegdash.features.feature_bank.rst +63 -0
  315. eegdash-0.4.1/docs/source/api/generated/api-features/eegdash.features.inspect.rst +21 -0
  316. eegdash-0.4.1/docs/source/api/generated/api-features/eegdash.features.rst +82 -0
  317. eegdash-0.4.1/docs/source/api/generated/api-features/eegdash.features.serialization.rst +17 -0
  318. eegdash-0.4.1/docs/source/api/generated/api-features/eegdash.features.utils.rst +18 -0
  319. {eegdash-0.4.0.dev173498563 → eegdash-0.4.1}/docs/source/conf.py +121 -4
  320. eegdash-0.4.1/docs/source/dataset_summary/treemap.rst +19 -0
  321. eegdash-0.4.1/docs/source/dataset_summary.rst +99 -0
  322. eegdash-0.4.1/docs/source/developer_notes.rst +118 -0
  323. eegdash-0.4.1/docs/source/generated/auto_examples/core/sg_execution_times.rst +40 -0
  324. eegdash-0.4.1/docs/source/generated/auto_examples/core/tutorial_eoec.ipynb +187 -0
  325. eegdash-0.4.1/docs/source/generated/auto_examples/core/tutorial_eoec.rst +423 -0
  326. eegdash-0.4.1/docs/source/generated/auto_examples/core/tutorial_feature_extractor_open_close_eye.ipynb +253 -0
  327. eegdash-0.4.1/docs/source/generated/auto_examples/core/tutorial_feature_extractor_open_close_eye.rst +547 -0
  328. eegdash-0.4.1/docs/source/generated/auto_examples/eeg2025/sg_execution_times.rst +43 -0
  329. eegdash-0.4.1/docs/source/generated/auto_examples/eeg2025/tutorial_challenge_1.ipynb +297 -0
  330. eegdash-0.4.1/docs/source/generated/auto_examples/eeg2025/tutorial_challenge_1.rst +730 -0
  331. eegdash-0.4.1/docs/source/generated/auto_examples/eeg2025/tutorial_challenge_2.ipynb +261 -0
  332. eegdash-0.4.1/docs/source/generated/auto_examples/eeg2025/tutorial_challenge_2.rst +486 -0
  333. eegdash-0.4.1/docs/source/generated/auto_examples/eeg2025/tutorial_eegdash_offline.ipynb +151 -0
  334. eegdash-0.4.1/docs/source/generated/auto_examples/eeg2025/tutorial_eegdash_offline.rst +238 -0
  335. eegdash-0.4.1/docs/source/generated/auto_examples/index.rst +178 -0
  336. eegdash-0.4.1/docs/source/generated/auto_examples/sg_execution_times.rst +37 -0
  337. eegdash-0.4.1/docs/source/index.rst +86 -0
  338. {eegdash-0.4.0.dev173498563 → eegdash-0.4.1}/docs/source/install/install_source.rst +2 -0
  339. {eegdash-0.4.0.dev173498563 → eegdash-0.4.1}/docs/source/user_guide.rst +1 -1
  340. {eegdash-0.4.0.dev173498563 → eegdash-0.4.1}/eegdash/__init__.py +1 -1
  341. {eegdash-0.4.0.dev173498563 → eegdash-0.4.1}/eegdash/api.py +183 -88
  342. {eegdash-0.4.0.dev173498563 → eegdash-0.4.1}/eegdash/bids_eeg_metadata.py +139 -39
  343. {eegdash-0.4.0.dev173498563 → eegdash-0.4.1}/eegdash/const.py +25 -0
  344. {eegdash-0.4.0.dev173498563 → eegdash-0.4.1}/eegdash/data_utils.py +333 -276
  345. {eegdash-0.4.0.dev173498563 → eegdash-0.4.1}/eegdash/dataset/dataset.py +35 -13
  346. {eegdash-0.4.0.dev173498563 → eegdash-0.4.1}/eegdash/dataset/dataset_summary.csv +255 -255
  347. {eegdash-0.4.0.dev173498563 → eegdash-0.4.1}/eegdash/dataset/registry.py +69 -4
  348. {eegdash-0.4.0.dev173498563 → eegdash-0.4.1}/eegdash/downloader.py +95 -9
  349. eegdash-0.4.1/eegdash/features/datasets.py +682 -0
  350. eegdash-0.4.1/eegdash/features/decorators.py +144 -0
  351. eegdash-0.4.1/eegdash/features/extractors.py +366 -0
  352. {eegdash-0.4.0.dev173498563 → eegdash-0.4.1}/eegdash/features/feature_bank/complexity.py +7 -3
  353. {eegdash-0.4.0.dev173498563 → eegdash-0.4.1}/eegdash/features/feature_bank/dimensionality.py +1 -1
  354. {eegdash-0.4.0.dev173498563 → eegdash-0.4.1}/eegdash/features/feature_bank/signal.py +11 -10
  355. {eegdash-0.4.0.dev173498563 → eegdash-0.4.1}/eegdash/features/feature_bank/utils.py +8 -0
  356. eegdash-0.4.1/eegdash/features/inspect.py +131 -0
  357. eegdash-0.4.1/eegdash/features/serialization.py +119 -0
  358. {eegdash-0.4.0.dev173498563 → eegdash-0.4.1}/eegdash/features/utils.py +80 -8
  359. eegdash-0.4.1/eegdash/hbn/preprocessing.py +105 -0
  360. {eegdash-0.4.0.dev173498563 → eegdash-0.4.1}/eegdash/hbn/windows.py +145 -32
  361. {eegdash-0.4.0.dev173498563 → eegdash-0.4.1}/eegdash/logging.py +19 -0
  362. eegdash-0.4.1/eegdash/mongodb.py +97 -0
  363. {eegdash-0.4.0.dev173498563 → eegdash-0.4.1}/eegdash/paths.py +14 -5
  364. {eegdash-0.4.0.dev173498563 → eegdash-0.4.1}/eegdash/utils.py +16 -1
  365. {eegdash-0.4.0.dev173498563 → eegdash-0.4.1/eegdash.egg-info}/PKG-INFO +6 -8
  366. eegdash-0.4.1/eegdash.egg-info/SOURCES.txt +405 -0
  367. {eegdash-0.4.0.dev173498563 → eegdash-0.4.1}/eegdash.egg-info/requires.txt +6 -7
  368. {eegdash-0.4.0.dev173498563 → eegdash-0.4.1}/pyproject.toml +18 -17
  369. eegdash-0.4.1/tests/test_features.py +67 -0
  370. eegdash-0.4.1/tests/test_speed_regression.py +337 -0
  371. eegdash-0.4.0.dev173498563/docs/source/dataset_summary.rst +0 -40
  372. eegdash-0.4.0.dev173498563/docs/source/index.rst +0 -63
  373. eegdash-0.4.0.dev173498563/eegdash/features/datasets.py +0 -493
  374. eegdash-0.4.0.dev173498563/eegdash/features/decorators.py +0 -51
  375. eegdash-0.4.0.dev173498563/eegdash/features/extractors.py +0 -209
  376. eegdash-0.4.0.dev173498563/eegdash/features/inspect.py +0 -48
  377. eegdash-0.4.0.dev173498563/eegdash/features/serialization.py +0 -87
  378. eegdash-0.4.0.dev173498563/eegdash/hbn/preprocessing.py +0 -72
  379. eegdash-0.4.0.dev173498563/eegdash/mongodb.py +0 -80
  380. eegdash-0.4.0.dev173498563/eegdash.egg-info/SOURCES.txt +0 -72
  381. {eegdash-0.4.0.dev173498563 → eegdash-0.4.1}/LICENSE +0 -0
  382. {eegdash-0.4.0.dev173498563 → eegdash-0.4.1}/MANIFEST.in +0 -0
  383. {eegdash-0.4.0.dev173498563 → eegdash-0.4.1}/README.md +0 -0
  384. {eegdash-0.4.0.dev173498563 → eegdash-0.4.1}/docs/Makefile +0 -0
  385. {eegdash-0.4.0.dev173498563 → eegdash-0.4.1}/docs/source/api/api.rst +0 -0
  386. {eegdash-0.4.0.dev173498563 → eegdash-0.4.1}/docs/source/api/api_features.rst +0 -0
  387. {eegdash-0.4.0.dev173498563 → eegdash-0.4.1}/docs/source/dataset_summary/bubble.rst +0 -0
  388. {eegdash-0.4.0.dev173498563 → eegdash-0.4.1}/docs/source/dataset_summary/kde.rst +0 -0
  389. {eegdash-0.4.0.dev173498563 → eegdash-0.4.1}/docs/source/dataset_summary/sankey.rst +0 -0
  390. {eegdash-0.4.0.dev173498563 → eegdash-0.4.1}/docs/source/dataset_summary/table.rst +0 -0
  391. {eegdash-0.4.0.dev173498563 → eegdash-0.4.1}/docs/source/install/install.rst +0 -0
  392. {eegdash-0.4.0.dev173498563 → eegdash-0.4.1}/docs/source/install/install_pip.rst +0 -0
  393. {eegdash-0.4.0.dev173498563 → eegdash-0.4.1}/docs/source/sg_execution_times.rst +0 -0
  394. {eegdash-0.4.0.dev173498563 → eegdash-0.4.1}/eegdash/dataset/__init__.py +0 -0
  395. {eegdash-0.4.0.dev173498563 → eegdash-0.4.1}/eegdash/features/__init__.py +0 -0
  396. {eegdash-0.4.0.dev173498563 → eegdash-0.4.1}/eegdash/features/feature_bank/__init__.py +0 -0
  397. {eegdash-0.4.0.dev173498563 → eegdash-0.4.1}/eegdash/features/feature_bank/connectivity.py +0 -0
  398. {eegdash-0.4.0.dev173498563 → eegdash-0.4.1}/eegdash/features/feature_bank/csp.py +0 -0
  399. {eegdash-0.4.0.dev173498563 → eegdash-0.4.1}/eegdash/features/feature_bank/spectral.py +0 -0
  400. {eegdash-0.4.0.dev173498563 → eegdash-0.4.1}/eegdash/hbn/__init__.py +0 -0
  401. {eegdash-0.4.0.dev173498563 → eegdash-0.4.1}/eegdash.egg-info/dependency_links.txt +0 -0
  402. {eegdash-0.4.0.dev173498563 → eegdash-0.4.1}/eegdash.egg-info/top_level.txt +0 -0
  403. {eegdash-0.4.0.dev173498563 → eegdash-0.4.1}/setup.cfg +0 -0
  404. {eegdash-0.4.0.dev173498563 → eegdash-0.4.1}/tests/test_api.py +0 -0
  405. {eegdash-0.4.0.dev173498563 → eegdash-0.4.1}/tests/test_bids_dependencies.py +0 -0
  406. {eegdash-0.4.0.dev173498563 → eegdash-0.4.1}/tests/test_cache_folder_suffix.py +0 -0
  407. {eegdash-0.4.0.dev173498563 → eegdash-0.4.1}/tests/test_challenge_kwargs.py +0 -0
  408. {eegdash-0.4.0.dev173498563 → eegdash-0.4.1}/tests/test_correctness.py +0 -0
  409. {eegdash-0.4.0.dev173498563 → eegdash-0.4.1}/tests/test_dataset.py +0 -0
  410. {eegdash-0.4.0.dev173498563 → eegdash-0.4.1}/tests/test_dataset_registration.py +0 -0
  411. {eegdash-0.4.0.dev173498563 → eegdash-0.4.1}/tests/test_downloader.py +0 -0
  412. {eegdash-0.4.0.dev173498563 → eegdash-0.4.1}/tests/test_eegdash.py +0 -0
  413. {eegdash-0.4.0.dev173498563 → eegdash-0.4.1}/tests/test_init.py +0 -0
  414. {eegdash-0.4.0.dev173498563 → eegdash-0.4.1}/tests/test_minirelease.py +0 -0
  415. {eegdash-0.4.0.dev173498563 → eegdash-0.4.1}/tests/test_mongo_connection.py +0 -0
  416. {eegdash-0.4.0.dev173498563 → eegdash-0.4.1}/tests/test_offline.py +0 -0
  417. {eegdash-0.4.0.dev173498563 → eegdash-0.4.1}/tests/test_query.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: eegdash
3
- Version: 0.4.0.dev173498563
3
+ Version: 0.4.1
4
4
  Summary: EEG data for machine learning
5
5
  Author-email: Young Truong <dt.young112@gmail.com>, Arnaud Delorme <adelorme@gmail.com>, Aviv Dotan <avivd220@gmail.com>, Oren Shriki <oren70@gmail.com>, Bruno Aristimunha <b.aristimunha@gmail.com>
6
6
  License-Expression: GPL-3.0-only
@@ -27,23 +27,17 @@ License-File: LICENSE
27
27
  Requires-Dist: braindecode>=1.0
28
28
  Requires-Dist: mne_bids>=0.17.0
29
29
  Requires-Dist: numba
30
- Requires-Dist: numpy
31
- Requires-Dist: pandas
32
- Requires-Dist: pybids
33
30
  Requires-Dist: pymongo
34
- Requires-Dist: python-dotenv
35
31
  Requires-Dist: s3fs
36
- Requires-Dist: scipy
37
32
  Requires-Dist: tqdm
38
- Requires-Dist: h5io>=0.2.4
39
33
  Requires-Dist: pymatreader
40
34
  Requires-Dist: eeglabio
41
35
  Requires-Dist: tabulate
42
- Requires-Dist: docstring_inheritance
43
36
  Requires-Dist: rich
44
37
  Provides-Extra: tests
45
38
  Requires-Dist: pytest; extra == "tests"
46
39
  Requires-Dist: pytest-cov; extra == "tests"
40
+ Requires-Dist: pytest-sugar; extra == "tests"
47
41
  Requires-Dist: codecov; extra == "tests"
48
42
  Requires-Dist: pytest_cases; extra == "tests"
49
43
  Requires-Dist: pytest-benchmark; extra == "tests"
@@ -66,10 +60,14 @@ Requires-Dist: lightgbm; extra == "docs"
66
60
  Requires-Dist: plotly; extra == "docs"
67
61
  Requires-Dist: nbformat; extra == "docs"
68
62
  Requires-Dist: graphviz; extra == "docs"
63
+ Provides-Extra: digestion
64
+ Requires-Dist: pybids; extra == "digestion"
65
+ Requires-Dist: python-dotenv; extra == "digestion"
69
66
  Provides-Extra: all
70
67
  Requires-Dist: eegdash[docs]; extra == "all"
71
68
  Requires-Dist: eegdash[dev]; extra == "all"
72
69
  Requires-Dist: eegdash[tests]; extra == "all"
70
+ Requires-Dist: eegdash[digestion]; extra == "all"
73
71
  Dynamic: license-file
74
72
 
75
73
  # EEG-Dash
@@ -0,0 +1,261 @@
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": null,
6
+ "metadata": {
7
+ "collapsed": false
8
+ },
9
+ "outputs": [],
10
+ "source": [
11
+ "# For tips on running notebooks in Google Colab:\n# `pip install eegdash`\n%matplotlib inline"
12
+ ]
13
+ },
14
+ {
15
+ "cell_type": "markdown",
16
+ "metadata": {},
17
+ "source": [
18
+ "\n.. meta::\n :html_theme.sidebar_secondary.remove: true\n\n# Challenge 2: Predicting the p-factor from EEG\n :depth: 2\n"
19
+ ]
20
+ },
21
+ {
22
+ "cell_type": "markdown",
23
+ "metadata": {},
24
+ "source": [
25
+ "<img src=\"https://colab.research.google.com/assets/colab-badge.svg\" target=\"https://colab.research.google.com/github/eeg2025/startkit/blob/main/challenge_2.ipynb\" alt=\"Open In Colab\">\n\n"
26
+ ]
27
+ },
28
+ {
29
+ "cell_type": "markdown",
30
+ "metadata": {},
31
+ "source": [
32
+ "## Preliminary notes\nBefore we begin, I just want to make a deal with you, ok?\nThis is a community competition with a strong open-source foundation.\nWhen I say open-source, I mean volunteer work.\n\nSo, if you see something that does not work or could be improved, first, **please be kind**, and\nwe will fix it together on GitHub, okay?\n\nThe entire decoding community will only go further when we stop\nsolving the same problems over and over again, and it starts working together.\n\n"
33
+ ]
34
+ },
35
+ {
36
+ "cell_type": "markdown",
37
+ "metadata": {},
38
+ "source": [
39
+ "## Overview\nThe psychopathology factor (P-factor) is a widely recognized construct in mental health research, representing a common underlying dimension of psychopathology across various disorders.\nCurrently, the P-factor is often assessed using self-report questionnaires or clinician ratings, which can be subjective, prone to bias, and time-consuming.\n**The Challenge 2** consists of developing a model to predict the P-factor from EEG recordings.\n\nThe challenge encourages learning physiologically meaningful signal representations and discovery of reproducible biomarkers.\nModels of any size should emphasize robust, interpretable features that generalize across subjects,\nsessions, and acquisition sites.\n\nUnlike a standard in-distribution classification task, this regression problem stresses out-of-distribution robustness\nand extrapolation. The goal is not only to minimize error on seen subjects, but also to transfer effectively to unseen data.\nEnsure the dataset is available locally. If not, see the\n[dataset download guide](https://eeg2025.github.io/data/#downloading-the-data)_.\n\n"
40
+ ]
41
+ },
42
+ {
43
+ "cell_type": "markdown",
44
+ "metadata": {},
45
+ "source": [
46
+ "## Contents of this start kit\n<div class=\"alert alert-info\"><h4>Note</h4><p>If you need additional explanations on the\n :doc:`EEGChallengeDataset\n </api/dataset/eegdash.dataset.EEGChallengeDataset>` class, dataloading,\n [braindecode](https://braindecode.org/stable/models/models_table.html)_'s\n deep learning models, or brain decoding in general, please refer to the\n start-kit of challenge 1 which delves deeper into these topics.</p></div>\n\nMore contents will be released during the competition inside the\n:mod:`eegdash` [examples webpage](https://eeglab.org/EEGDash/generated/auto_examples/index.html)_.\n\n.. admonition:: Prerequisites\n :class: important\n\n The tutorial assumes prior knowledge of:\n\n - Standard neural network architectures (e.g., CNNs)\n - Optimization by batch gradient descent and backpropagation\n - Overfitting, early stopping, and regularization\n - Some knowledge of PyTorch\n - Basic familiarity with EEG and preprocessing\n - An appreciation for open-source work :)\n\n"
47
+ ]
48
+ },
49
+ {
50
+ "cell_type": "markdown",
51
+ "metadata": {},
52
+ "source": [
53
+ "## Install dependencies on Colab\n\n<div class=\"alert alert-info\"><h4>Note</h4><p>These installs are optional; skip on local environments\n where you already have the dependencies installed.</p></div>\n\n```bash\npip install eegdash\n```\n"
54
+ ]
55
+ },
56
+ {
57
+ "cell_type": "markdown",
58
+ "metadata": {},
59
+ "source": [
60
+ "## Imports\n\n"
61
+ ]
62
+ },
63
+ {
64
+ "cell_type": "code",
65
+ "execution_count": null,
66
+ "metadata": {
67
+ "collapsed": false
68
+ },
69
+ "outputs": [],
70
+ "source": [
71
+ "from pathlib import Path\nimport math\nimport os\nimport random\nfrom joblib import Parallel, delayed\n\nimport torch\nfrom torch.utils.data import DataLoader\nfrom torch import optim\nfrom torch.nn.functional import l1_loss\nfrom braindecode.preprocessing import create_fixed_length_windows\nfrom braindecode.datasets.base import EEGWindowsDataset, BaseConcatDataset, BaseDataset\nfrom braindecode.models import EEGNeX\nfrom eegdash import EEGChallengeDataset"
72
+ ]
73
+ },
74
+ {
75
+ "cell_type": "markdown",
76
+ "metadata": {},
77
+ "source": [
78
+ "<div class=\"alert alert-danger\"><h4>Warning</h4><p>In case of Colab, before starting, make sure you're on a GPU instance\n for faster training! If running on Google Colab, please request a GPU runtime\n by clicking `Runtime/Change runtime type` in the top bar menu, then selecting\n 'T4 GPU' under 'Hardware accelerator'.</p></div>\n\n"
79
+ ]
80
+ },
81
+ {
82
+ "cell_type": "markdown",
83
+ "metadata": {},
84
+ "source": [
85
+ "## Identify whether a CUDA-enabled GPU is available\n\n"
86
+ ]
87
+ },
88
+ {
89
+ "cell_type": "code",
90
+ "execution_count": null,
91
+ "metadata": {
92
+ "collapsed": false
93
+ },
94
+ "outputs": [],
95
+ "source": [
96
+ "device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\nif device == \"cuda\":\n msg = \"CUDA-enabled GPU found. Training should be faster.\"\nelse:\n msg = (\n \"No GPU found. Training will be carried out on CPU, which might be \"\n \"slower.\\n\\nIf running on Google Colab, you can request a GPU runtime by\"\n \" clicking\\n`Runtime/Change runtime type` in the top bar menu, then \"\n \"selecting 'T4 GPU'\\nunder 'Hardware accelerator'.\"\n )\nprint(msg)"
97
+ ]
98
+ },
99
+ {
100
+ "cell_type": "markdown",
101
+ "metadata": {},
102
+ "source": [
103
+ "## Understanding the P-factor regression task.\n\nThe psychopathology factor (P-factor) is a widely recognized construct in mental health research, representing a common underlying dimension of psychopathology across various disorders.\nThe P-factor is thought to reflect the shared variance among different psychiatric conditions, suggesting that individuals with higher P-factor scores may be more vulnerable to a range of mental health issues.\nCurrently, the P-factor is often assessed using self-report questionnaires or clinician ratings, which can be subjective, prone to bias, and time-consuming.\nIn the dataset of this challenge, the P-factor was assessed using the Child\nBehavior Checklist (CBCL) [McElroy et al., (2017)](https://doi.org/10.1111/jcpp.12849)_.\n\nThe goal of Challenge 2 is to develop a model to predict the P-factor from EEG recordings.\n**The feasibility of using EEG data for this purpose is still an open question**.\nThe solution may involve finding meaningful representations of the EEG data that correlate with the P-factor scores.\nThe challenge encourages learning physiologically meaningful signal representations and discovery of reproducible biomarkers.\nIf contestants are successful in this task, it could pave the way for more objective and efficient assessments of the P-factor in clinical settings.\n\n"
104
+ ]
105
+ },
106
+ {
107
+ "cell_type": "markdown",
108
+ "metadata": {},
109
+ "source": [
110
+ "## Define local path and (down)load the data\nIn this challenge 2 example, we load the EEG 2025 release using\n:doc:`EEGChallengeDataset </api/dataset/eegdash.dataset.EEGChallengeDataset>`.\n**Note:** in this example notebook, we load the contrast change detection task from one mini release only as an example. Naturally, you are encouraged to train your models on all complete releases, using data from all the tasks you deem relevant.\n\n"
111
+ ]
112
+ },
113
+ {
114
+ "cell_type": "markdown",
115
+ "metadata": {},
116
+ "source": [
117
+ "The first step is to define the cache folder!\nMatch tests' cache layout under ~/eegdash_cache/eeg_challenge_cache\n\n"
118
+ ]
119
+ },
120
+ {
121
+ "cell_type": "code",
122
+ "execution_count": null,
123
+ "metadata": {
124
+ "collapsed": false
125
+ },
126
+ "outputs": [],
127
+ "source": [
128
+ "DATA_DIR = (Path.home() / \"eegdash_cache\" / \"eeg_challenge_cache\").resolve()\n\n# Creating the path if it does not exist\nDATA_DIR.mkdir(parents=True, exist_ok=True)\n\n# We define the list of releases to load.\n# Here, only release 5 is loaded.\nrelease_list = [\"R5\"]\n\nall_datasets_list = [\n EEGChallengeDataset(\n release=release,\n task=\"contrastChangeDetection\",\n mini=True,\n description_fields=[\n \"subject\",\n \"session\",\n \"run\",\n \"task\",\n \"age\",\n \"gender\",\n \"sex\",\n \"p_factor\",\n ],\n cache_dir=DATA_DIR,\n )\n for release in release_list\n]\nprint(\"Datasets loaded\")\nsub_rm = [\"NDARWV769JM7\"]"
129
+ ]
130
+ },
131
+ {
132
+ "cell_type": "markdown",
133
+ "metadata": {},
134
+ "source": [
135
+ "### Combine the datasets into a single one\nHere, we combine the datasets from the different releases into a single\n``BaseConcatDataset`` object.\n\n"
136
+ ]
137
+ },
138
+ {
139
+ "cell_type": "code",
140
+ "execution_count": null,
141
+ "metadata": {
142
+ "collapsed": false
143
+ },
144
+ "outputs": [],
145
+ "source": [
146
+ "all_datasets = BaseConcatDataset(all_datasets_list)\nprint(all_datasets.description)\n\nraws = Parallel(n_jobs=os.cpu_count())(\n delayed(lambda d: d.raw)(d) for d in all_datasets.datasets\n)"
147
+ ]
148
+ },
149
+ {
150
+ "cell_type": "markdown",
151
+ "metadata": {},
152
+ "source": [
153
+ "## Inspect your data\nWe can check what is inside the dataset consuming the\nMNE-object inside the Braindecode dataset.\n\nThe following snippet, if uncommented, will show the first 10 seconds of the raw EEG signal.\nWe can also inspect the data further by looking at the events and annotations.\nWe strongly recommend you to take a look into the details and check how the events are structured.\n\n"
154
+ ]
155
+ },
156
+ {
157
+ "cell_type": "code",
158
+ "execution_count": null,
159
+ "metadata": {
160
+ "collapsed": false
161
+ },
162
+ "outputs": [],
163
+ "source": [
164
+ "raw = all_datasets.datasets[0].raw # mne.io.Raw object\n\nprint(raw.info)\n\nraw.plot(duration=10, scalings=\"auto\", show=True)\n\nprint(raw.annotations)\n\nSFREQ = 100"
165
+ ]
166
+ },
167
+ {
168
+ "cell_type": "markdown",
169
+ "metadata": {},
170
+ "source": [
171
+ "## Wrap the data into a PyTorch-compatible dataset\nThe class below defines a dataset wrapper that will extract 2-second windows,\nuniformly sampled over the whole signal. In addition, it will add useful information\nabout the extracted windows, such as the p-factor, the subject or the task.\n\n"
172
+ ]
173
+ },
174
+ {
175
+ "cell_type": "code",
176
+ "execution_count": null,
177
+ "metadata": {
178
+ "collapsed": false
179
+ },
180
+ "outputs": [],
181
+ "source": [
182
+ "class DatasetWrapper(BaseDataset):\n def __init__(self, dataset: EEGWindowsDataset, crop_size_samples: int, seed=None):\n self.dataset = dataset\n self.crop_size_samples = crop_size_samples\n self.rng = random.Random(seed)\n\n def __len__(self):\n return len(self.dataset)\n\n def __getitem__(self, index):\n X, _, crop_inds = self.dataset[index]\n\n # P-factor label:\n p_factor = self.dataset.description[\"p_factor\"]\n p_factor = float(p_factor)\n\n # Additional information:\n infos = {\n \"subject\": self.dataset.description[\"subject\"],\n \"sex\": self.dataset.description[\"sex\"],\n \"age\": float(self.dataset.description[\"age\"]),\n \"task\": self.dataset.description[\"task\"],\n \"session\": self.dataset.description.get(\"session\", None) or \"\",\n \"run\": self.dataset.description.get(\"run\", None) or \"\",\n }\n\n # Randomly crop the signal to the desired length:\n i_window_in_trial, i_start, i_stop = crop_inds\n assert i_stop - i_start >= self.crop_size_samples, f\"{i_stop=} {i_start=}\"\n start_offset = self.rng.randint(0, i_stop - i_start - self.crop_size_samples)\n i_start = i_start + start_offset\n i_stop = i_start + self.crop_size_samples\n X = X[:, start_offset : start_offset + self.crop_size_samples]\n\n return X, p_factor, (i_window_in_trial, i_start, i_stop), infos\n\n\n# We filter out certain recordings, create fixed length windows and finally make use of our `DatasetWrapper`."
183
+ ]
184
+ },
185
+ {
186
+ "cell_type": "markdown",
187
+ "metadata": {},
188
+ "source": [
189
+ "Filter out recordings that are too short\n\n"
190
+ ]
191
+ },
192
+ {
193
+ "cell_type": "code",
194
+ "execution_count": null,
195
+ "metadata": {
196
+ "collapsed": false
197
+ },
198
+ "outputs": [],
199
+ "source": [
200
+ "all_datasets = BaseConcatDataset(\n [\n ds\n for ds in all_datasets.datasets\n if ds.description.subject not in sub_rm\n and ds.raw.n_times >= 4 * SFREQ\n and len(ds.raw.ch_names) == 129\n and not math.isnan(ds.description[\"p_factor\"])\n ]\n)\n\n# Create 4-seconds windows with 2-seconds stride\nwindows_ds = create_fixed_length_windows(\n all_datasets,\n window_size_samples=4 * SFREQ,\n window_stride_samples=2 * SFREQ,\n drop_last_window=True,\n)\n\n# Wrap each sub-dataset in the windows_ds\nwindows_ds = BaseConcatDataset(\n [DatasetWrapper(ds, crop_size_samples=2 * SFREQ) for ds in windows_ds.datasets]\n)"
201
+ ]
202
+ },
203
+ {
204
+ "cell_type": "markdown",
205
+ "metadata": {},
206
+ "source": [
207
+ "## Inspect the label distribution\n\n\n"
208
+ ]
209
+ },
210
+ {
211
+ "cell_type": "code",
212
+ "execution_count": null,
213
+ "metadata": {
214
+ "collapsed": false
215
+ },
216
+ "outputs": [],
217
+ "source": [
218
+ "import numpy as np\nfrom skorch.helper import SliceDataset\n\ny_label = np.array(list(SliceDataset(windows_ds, 1)))\n\n# Plot histogram of the response times with matplotlib\nimport matplotlib.pyplot as plt\n\nfig, ax = plt.subplots(figsize=(10, 5))\nax.hist(y_label)\nax.set_title(\"Response Time Distribution\")\nax.set_xlabel(\"Response Time (s)\")\nax.set_ylabel(\"Count\")\nplt.tight_layout()\nplt.show()"
219
+ ]
220
+ },
221
+ {
222
+ "cell_type": "markdown",
223
+ "metadata": {},
224
+ "source": [
225
+ "Define, train and save a model\n ---------------------------------\n Now we have our pytorch dataset necessary for the training!\n\n Below, we define a simple EEGNeX model from Braindecode.\n All the braindecode models expect the input to be of shape (batch_size, n_channels, n_times)\n and have a test coverage about the behavior of the model.\n However, you can use any pytorch model you want.\n\n#####################################################################\n Initialize model\n -----------------\n\n"
226
+ ]
227
+ },
228
+ {
229
+ "cell_type": "code",
230
+ "execution_count": null,
231
+ "metadata": {
232
+ "collapsed": false
233
+ },
234
+ "outputs": [],
235
+ "source": [
236
+ "model = EEGNeX(n_chans=129, n_outputs=1, n_times=2 * SFREQ).to(device)\n\n# Specify optimizer\noptimizer = optim.Adamax(params=model.parameters(), lr=0.002)\n\nprint(model)\n\n\n# Finally, we can train our model. Here we define a simple training loop using pure PyTorch.\n# In this example, we only train for a single epoch. Feel free to increase the number of epochs.\n# Create PyTorch Dataloader\n\nnum_workers = (\n 0 # Set num_workers to 0 to avoid multiprocessing issues in notebooks/tutorials.\n)\ndataloader = DataLoader(\n windows_ds, batch_size=128, shuffle=True, num_workers=num_workers\n)\n\nn_epochs = 1\n\n# Train model for 1 epoch\nfor epoch in range(n_epochs):\n for idx, batch in enumerate(dataloader):\n # Reset gradients\n optimizer.zero_grad()\n\n # Unpack the batch\n X, y, crop_inds, infos = batch\n X = X.to(dtype=torch.float32, device=device)\n y = y.to(dtype=torch.float32, device=device).unsqueeze(1)\n\n # Forward pass\n y_pred = model(X)\n\n # Compute loss\n loss = l1_loss(y_pred, y)\n print(f\"Epoch {0} - step {idx}, loss: {loss.item()}\")\n\n # Gradient backpropagation\n loss.backward()\n optimizer.step()\n\n# Finally, we can save the model for later use\ntorch.save(model.state_dict(), \"weights_challenge_2.pt\")\nprint(\"Model saved as 'weights_challenge_2.pt'\")"
237
+ ]
238
+ }
239
+ ],
240
+ "metadata": {
241
+ "kernelspec": {
242
+ "display_name": "Python 3",
243
+ "language": "python",
244
+ "name": "python3"
245
+ },
246
+ "language_info": {
247
+ "codemirror_mode": {
248
+ "name": "ipython",
249
+ "version": 3
250
+ },
251
+ "file_extension": ".py",
252
+ "mimetype": "text/x-python",
253
+ "name": "python",
254
+ "nbconvert_exporter": "python",
255
+ "pygments_lexer": "ipython3",
256
+ "version": "3.12.11"
257
+ }
258
+ },
259
+ "nbformat": 4,
260
+ "nbformat_minor": 0
261
+ }
@@ -0,0 +1,187 @@
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": null,
6
+ "metadata": {
7
+ "collapsed": false
8
+ },
9
+ "outputs": [],
10
+ "source": [
11
+ "# For tips on running notebooks in Google Colab:\n# `pip install eegdash`\n%matplotlib inline"
12
+ ]
13
+ },
14
+ {
15
+ "cell_type": "markdown",
16
+ "metadata": {},
17
+ "source": [
18
+ "\n# Eyes Open vs. Closed Classification\n\nEEGDash example for eyes open vs. closed classification.\n\nThis example uses the :mod:`eegdash` library in combination with PyTorch to develop a deep learning model for analyzing EEG data, specifically for eyes open vs. closed classification in a single subject.\n\n1. **Data Retrieval Using EEGDash**: An instance of :class:`eegdash.api.EEGDashDataset` is created to search and retrieve an EEG dataset. At this step, only the metadata is transferred.\n\n2. **Data Preprocessing Using BrainDecode**: This process preprocesses EEG data using Braindecode by reannotating events, selecting specific channels, resampling, filtering, and extracting 2-second epochs, ensuring balanced eyes-open and eyes-closed data for analysis.\n\n3. **Creating train and testing sets**: The dataset is split into training (80%) and testing (20%) sets with balanced labels, converted into PyTorch tensors, and wrapped in DataLoader objects for efficient mini-batch training.\n\n4. **Model Definition**: The model is a shallow convolutional neural network (ShallowFBCSPNet) with 24 input channels (EEG channels), 2 output classes (eyes-open and eyes-closed).\n\n5. **Model Training and Evaluation Process**: This section trains the neural network, normalizes input data, computes cross-entropy loss, updates model parameters, and evaluates classification accuracy over six epochs.\n"
19
+ ]
20
+ },
21
+ {
22
+ "cell_type": "markdown",
23
+ "metadata": {},
24
+ "source": [
25
+ "## Data Retrieval Using EEGDash\n\nThis section instantiates :class:`eegdash.api.EEGDashDataset` to fetch\nthe metadata for the experiment before requesting any recordings.\n\nFirst we find one resting state dataset. This dataset contains both eyes open\nand eyes closed data.\n\n"
26
+ ]
27
+ },
28
+ {
29
+ "cell_type": "code",
30
+ "execution_count": null,
31
+ "metadata": {
32
+ "collapsed": false
33
+ },
34
+ "outputs": [],
35
+ "source": [
36
+ "from pathlib import Path\n\ncache_folder = Path.home() / \"eegdash\""
37
+ ]
38
+ },
39
+ {
40
+ "cell_type": "code",
41
+ "execution_count": null,
42
+ "metadata": {
43
+ "collapsed": false
44
+ },
45
+ "outputs": [],
46
+ "source": [
47
+ "from eegdash import EEGDashDataset\n\nds_eoec = EEGDashDataset(\n query={\"dataset\": \"ds005514\", \"task\": \"RestingState\", \"subject\": \"NDARDB033FW5\"},\n cache_dir=cache_folder,\n)"
48
+ ]
49
+ },
50
+ {
51
+ "cell_type": "markdown",
52
+ "metadata": {},
53
+ "source": [
54
+ "## Data Preprocessing Using Braindecode\n\n[braindecode](https://braindecode.org/stable/install/install.html)_ is a\nspecialized library for preprocessing EEG and MEG data. In this dataset, there\nare two key events in the continuous data: **instructed_toCloseEyes**, marking\nthe start of a 40-second eyes-closed period, and **instructed_toOpenEyes**,\nindicating the start of a 20-second eyes-open period.\n\nFor the eyes-closed event, we extract 14 seconds of data from 15 to 29 seconds\nafter the event onset. Similarly, for the eyes-open event, we extract data\nfrom 5 to 19 seconds after the event onset. This ensures an equal amount of\ndata for both conditions. The event extraction is handled by the custom\nfunction :func:`eegdash.hbn.preprocessing.hbn_ec_ec_reannotation`.\n\nNext, we apply four preprocessing steps in Braindecode:\n1. **Reannotation** of event markers using :func:`eegdash.hbn.preprocessing.hbn_ec_ec_reannotation`.\n2. **Selection** of 24 specific EEG channels from the original 128.\n3. **Resampling** the EEG data to a frequency of 128 Hz.\n4. **Filtering** the EEG signals to retain frequencies between 1 Hz and 55 Hz.\n\nWhen calling the `preprocess` function, the data is retrieved from the remote\nrepository.\n\nFinally, we use `create_windows_from_events` to extract 2-second epochs from\nthe data. These epochs serve as the dataset samples. At this stage, each\nsample is automatically labeled with the corresponding event type (eyes-open\nor eyes-closed). `windows_ds` is a PyTorch dataset, and when queried, it\nreturns labels for eyes-open and eyes-closed (assigned as labels 0 and 1,\ncorresponding to their respective event markers).\n\n"
55
+ ]
56
+ },
57
+ {
58
+ "cell_type": "code",
59
+ "execution_count": null,
60
+ "metadata": {
61
+ "collapsed": false
62
+ },
63
+ "outputs": [],
64
+ "source": [
65
+ "from braindecode.preprocessing import (\n preprocess,\n Preprocessor,\n create_windows_from_events,\n)\nimport numpy as np\nfrom eegdash.hbn.preprocessing import hbn_ec_ec_reannotation\nimport warnings\n\nwarnings.simplefilter(\"ignore\", category=RuntimeWarning)\n\n\n# BrainDecode preprocessors\npreprocessors = [\n hbn_ec_ec_reannotation(),\n Preprocessor(\n \"pick_channels\",\n ch_names=[\n \"E22\",\n \"E9\",\n \"E33\",\n \"E24\",\n \"E11\",\n \"E124\",\n \"E122\",\n \"E29\",\n \"E6\",\n \"E111\",\n \"E45\",\n \"E36\",\n \"E104\",\n \"E108\",\n \"E42\",\n \"E55\",\n \"E93\",\n \"E58\",\n \"E52\",\n \"E62\",\n \"E92\",\n \"E96\",\n \"E70\",\n \"Cz\",\n ],\n ),\n Preprocessor(\"resample\", sfreq=128),\n Preprocessor(\"filter\", l_freq=1, h_freq=55),\n]\npreprocess(ds_eoec, preprocessors)\n\n# Extract 2-second segments\nwindows_ds = create_windows_from_events(\n ds_eoec,\n trial_start_offset_samples=0,\n trial_stop_offset_samples=256,\n preload=True,\n)"
66
+ ]
67
+ },
68
+ {
69
+ "cell_type": "markdown",
70
+ "metadata": {},
71
+ "source": [
72
+ "## Plotting a Single Channel for One Sample\n\nIt\u2019s always a good practice to verify that the data has been properly loaded\nand processed. Here, we plot a single channel from one sample to ensure the\nsignal is present and looks as expected.\n\n"
73
+ ]
74
+ },
75
+ {
76
+ "cell_type": "code",
77
+ "execution_count": null,
78
+ "metadata": {
79
+ "collapsed": false
80
+ },
81
+ "outputs": [],
82
+ "source": [
83
+ "import matplotlib.pyplot as plt\n\nplt.figure()\nplt.plot(windows_ds[2][0][0, :].transpose()) # first channel of first epoch\nplt.show()"
84
+ ]
85
+ },
86
+ {
87
+ "cell_type": "markdown",
88
+ "metadata": {},
89
+ "source": [
90
+ "## Creating training and test sets\n\nThe code below creates a training and test set. We first split the data into\ntraining and test sets using the **train_test_split** function from the\n**sklearn** library. We then create a **TensorDataset** for the training and\ntest sets.\n\n1. **Set Random Seed** \u2013 The random seed is fixed using\n `torch.manual_seed(random_state)` to ensure reproducibility in dataset\n splitting and model training.\n2. **Extract Labels from the Dataset** \u2013 Labels (eye-open or eye-closed\n events) are extracted from `windows_ds`, stored as a NumPy array, and\n printed for verification.\n3. **Split Dataset into Train and Test Sets** \u2013 The dataset is split into\n training (80%) and testing (20%) subsets using `train_test_split()`,\n ensuring balanced stratification based on the extracted labels.\n4. **Convert Data to PyTorch Tensors** \u2013 The selected training and testing\n samples are converted into `FloatTensor` for input features and\n `LongTensor` for labels, making them compatible with PyTorch models.\n5. **Create DataLoaders** \u2013 The datasets are wrapped in PyTorch DataLoader\n objects with a batch size of 10, enabling efficient mini-batch training and\n shuffling.\n\n\n"
91
+ ]
92
+ },
93
+ {
94
+ "cell_type": "code",
95
+ "execution_count": null,
96
+ "metadata": {
97
+ "collapsed": false
98
+ },
99
+ "outputs": [],
100
+ "source": [
101
+ "import torch\nfrom sklearn.model_selection import train_test_split\nfrom torch.utils.data import DataLoader\nfrom torch.utils.data import TensorDataset\n\n# Set random seed for reproducibility\nrandom_state = 42\ntorch.manual_seed(random_state)\nnp.random.seed(random_state)\n\n# Extract labels from the dataset\neo_ec = np.array([ds[1] for ds in windows_ds]).transpose() # check labels\nprint(\"labels: \", eo_ec)\n\n# Get balanced indices for male and female subjects\ntrain_indices, test_indices = train_test_split(\n range(len(windows_ds)), test_size=0.2, stratify=eo_ec, random_state=random_state\n)\n\n# Convert the data to tensors\nX_train = torch.FloatTensor(\n np.array([windows_ds[i][0] for i in train_indices])\n) # Convert list of arrays to single tensor\nX_test = torch.FloatTensor(\n np.array([windows_ds[i][0] for i in test_indices])\n) # Convert list of arrays to single tensor\ny_train = torch.LongTensor(eo_ec[train_indices]) # Convert targets to tensor\ny_test = torch.LongTensor(eo_ec[test_indices]) # Convert targets to tensor\ndataset_train = TensorDataset(X_train, y_train)\ndataset_test = TensorDataset(X_test, y_test)\n\n# Create data loaders for training and testing (batch size 10)\ntrain_loader = DataLoader(dataset_train, batch_size=10, shuffle=True)\ntest_loader = DataLoader(dataset_test, batch_size=10, shuffle=True)\n\n# Print shapes and sizes to verify split\nprint(\n f\"Shape of data {X_train.shape} number of samples - Train: {len(train_loader)}, Test: {len(test_loader)}\"\n)\nprint(\n f\"Eyes-Open/Eyes-Closed balance, train: {np.mean(eo_ec[train_indices]):.2f}, test: {np.mean(eo_ec[test_indices]):.2f}\"\n)"
102
+ ]
103
+ },
104
+ {
105
+ "cell_type": "markdown",
106
+ "metadata": {},
107
+ "source": [
108
+ "## Check labels\n\nIt is good practice to verify the labels and ensure the random seed is\nfunctioning correctly. If all labels are 0s (eyes closed) or 1s (eyes open),\nit could indicate an issue with data loading or stratification, requiring\nfurther investigation.\n\n"
109
+ ]
110
+ },
111
+ {
112
+ "cell_type": "markdown",
113
+ "metadata": {},
114
+ "source": [
115
+ "Visualize a batch of target labels\n\n"
116
+ ]
117
+ },
118
+ {
119
+ "cell_type": "code",
120
+ "execution_count": null,
121
+ "metadata": {
122
+ "collapsed": false
123
+ },
124
+ "outputs": [],
125
+ "source": [
126
+ "dataiter = iter(train_loader)\nfirst_item, label = dataiter.__next__()\nlabel"
127
+ ]
128
+ },
129
+ {
130
+ "cell_type": "markdown",
131
+ "metadata": {},
132
+ "source": [
133
+ "## Create model\n\nThe model is a shallow convolutional neural network (ShallowFBCSPNet) with 24\ninput channels (EEG channels), 2 output classes (eyes-open and eyes-closed),\nand an input window size of 256 samples (2 seconds of EEG data).\n\n"
134
+ ]
135
+ },
136
+ {
137
+ "cell_type": "code",
138
+ "execution_count": null,
139
+ "metadata": {
140
+ "collapsed": false
141
+ },
142
+ "outputs": [],
143
+ "source": [
144
+ "import torch\nimport numpy as np\nfrom torch.nn import functional as F\nfrom braindecode.models import ShallowFBCSPNet\nfrom torchinfo import summary\n\ntorch.manual_seed(random_state)\nmodel = ShallowFBCSPNet(24, 2, n_times=256, final_conv_length=\"auto\")\nsummary(model, input_size=(1, 24, 256))"
145
+ ]
146
+ },
147
+ {
148
+ "cell_type": "markdown",
149
+ "metadata": {},
150
+ "source": [
151
+ "## Model Training and Evaluation Process\n\nThis section trains the neural network using the Adamax optimizer, normalizes\ninput data, computes cross-entropy loss, updates model parameters, and tracks\naccuracy across six epochs.\n\n1. **Set Up Optimizer and Learning Rate Scheduler** \u2013 The `Adamax` optimizer\n initializes with a learning rate of 0.002 and weight decay of 0.001 for\n regularization. An `ExponentialLR` scheduler with a decay factor of 1 keeps\n the learning rate constant.\n2. **Allocate Model to Device** \u2013 The model moves to the specified device\n (CPU, GPU, or MPS for Mac silicon) to optimize computation efficiency.\n3. **Normalize Input Data** \u2013 The `normalize_data` function standardizes input\n data by subtracting the mean and dividing by the standard deviation along\n the time dimension before transferring it to the appropriate device.\n4. **Evaluates Classification Accuracy Over Six Epochs** \u2013 The training loop\n iterates through data batches with the model in training mode. It\n normalizes inputs, computes predictions, calculates cross-entropy loss,\n performs backpropagation, updates model parameters, and steps the learning\n rate scheduler. It tracks correct predictions to compute accuracy.\n5. **Evaluate on Test Data** \u2013 After each epoch, the model runs in evaluation\n mode on the test set. It computes predictions on normalized data and\n calculates test accuracy by comparing outputs with actual labels.\n\n"
152
+ ]
153
+ },
154
+ {
155
+ "cell_type": "code",
156
+ "execution_count": null,
157
+ "metadata": {
158
+ "collapsed": false
159
+ },
160
+ "outputs": [],
161
+ "source": [
162
+ "optimizer = torch.optim.Adamax(model.parameters(), lr=0.002, weight_decay=0.001)\nscheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer, gamma=1)\n\ndevice = torch.device(\n \"cuda\"\n if torch.cuda.is_available()\n else \"mps\"\n if torch.backends.mps.is_available()\n else \"cpu\"\n)\nmodel = model.to(device=device) # move the model parameters to CPU/GPU\nepochs = 6\n\n\ndef normalize_data(x):\n mean = x.mean(dim=2, keepdim=True)\n std = x.std(dim=2, keepdim=True) + 1e-7 # add small epsilon for numerical stability\n x = (x - mean) / std\n x = x.to(device=device, dtype=torch.float32) # move to device, e.g. GPU\n return x\n\n\nfor e in range(epochs):\n # training\n correct_train = 0\n for t, (x, y) in enumerate(train_loader):\n model.train() # put model to training mode\n scores = model(normalize_data(x))\n y = y.to(device=device, dtype=torch.long)\n _, preds = scores.max(1)\n correct_train += (preds == y).sum() / len(dataset_train)\n\n loss = F.cross_entropy(scores, y)\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n scheduler.step()\n\n # Validation\n correct_test = 0\n for t, (x, y) in enumerate(test_loader):\n model.eval() # put model to testing mode\n scores = model(normalize_data(x))\n y = y.to(device=device, dtype=torch.long)\n _, preds = scores.max(1)\n correct_test += (preds == y).sum() / len(dataset_test)\n\n # Reporting\n print(\n f\"Epoch {e}, Train accuracy: {correct_train:.2f}, Test accuracy: {correct_test:.2f}\"\n )"
163
+ ]
164
+ }
165
+ ],
166
+ "metadata": {
167
+ "kernelspec": {
168
+ "display_name": "Python 3",
169
+ "language": "python",
170
+ "name": "python3"
171
+ },
172
+ "language_info": {
173
+ "codemirror_mode": {
174
+ "name": "ipython",
175
+ "version": 3
176
+ },
177
+ "file_extension": ".py",
178
+ "mimetype": "text/x-python",
179
+ "name": "python",
180
+ "nbconvert_exporter": "python",
181
+ "pygments_lexer": "ipython3",
182
+ "version": "3.12.11"
183
+ }
184
+ },
185
+ "nbformat": 4,
186
+ "nbformat_minor": 0
187
+ }