py-neuromodulation 0.0.3__py3-none-any.whl → 0.0.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (233) hide show
  1. py_neuromodulation/ConnectivityDecoding/Automated Anatomical Labeling 3 (Rolls 2020).nii +0 -0
  2. py_neuromodulation/ConnectivityDecoding/_get_grid_hull.m +34 -0
  3. py_neuromodulation/ConnectivityDecoding/_get_grid_whole_brain.py +95 -0
  4. py_neuromodulation/ConnectivityDecoding/_helper_write_connectome.py +107 -0
  5. py_neuromodulation/ConnectivityDecoding/mni_coords_cortical_surface.mat +0 -0
  6. py_neuromodulation/ConnectivityDecoding/mni_coords_whole_brain.mat +0 -0
  7. py_neuromodulation/ConnectivityDecoding/rmap_func_all.nii +0 -0
  8. py_neuromodulation/ConnectivityDecoding/rmap_struc.nii +0 -0
  9. py_neuromodulation/FieldTrip.py +589 -589
  10. py_neuromodulation/__init__.py +74 -13
  11. py_neuromodulation/_write_example_dataset_helper.py +83 -65
  12. py_neuromodulation/data/README +6 -0
  13. py_neuromodulation/data/dataset_description.json +8 -0
  14. py_neuromodulation/data/participants.json +32 -0
  15. py_neuromodulation/data/participants.tsv +2 -0
  16. py_neuromodulation/data/sub-testsub/ses-EphysMedOff/ieeg/sub-testsub_ses-EphysMedOff_space-mni_coordsystem.json +5 -0
  17. py_neuromodulation/data/sub-testsub/ses-EphysMedOff/ieeg/sub-testsub_ses-EphysMedOff_space-mni_electrodes.tsv +11 -0
  18. py_neuromodulation/data/sub-testsub/ses-EphysMedOff/ieeg/sub-testsub_ses-EphysMedOff_task-gripforce_run-0_channels.tsv +11 -0
  19. py_neuromodulation/data/sub-testsub/ses-EphysMedOff/ieeg/sub-testsub_ses-EphysMedOff_task-gripforce_run-0_ieeg.eeg +0 -0
  20. py_neuromodulation/data/sub-testsub/ses-EphysMedOff/ieeg/sub-testsub_ses-EphysMedOff_task-gripforce_run-0_ieeg.json +18 -0
  21. py_neuromodulation/data/sub-testsub/ses-EphysMedOff/ieeg/sub-testsub_ses-EphysMedOff_task-gripforce_run-0_ieeg.vhdr +35 -0
  22. py_neuromodulation/data/sub-testsub/ses-EphysMedOff/ieeg/sub-testsub_ses-EphysMedOff_task-gripforce_run-0_ieeg.vmrk +13 -0
  23. py_neuromodulation/data/sub-testsub/ses-EphysMedOff/sub-testsub_ses-EphysMedOff_scans.tsv +2 -0
  24. py_neuromodulation/grid_cortex.tsv +40 -0
  25. py_neuromodulation/grid_subcortex.tsv +1429 -0
  26. py_neuromodulation/liblsl/libpugixml.so.1.12 +0 -0
  27. py_neuromodulation/liblsl/linux/bionic_amd64/liblsl.1.16.2.so +0 -0
  28. py_neuromodulation/liblsl/linux/bookworm_amd64/liblsl.1.16.2.so +0 -0
  29. py_neuromodulation/liblsl/linux/focal_amd46/liblsl.1.16.2.so +0 -0
  30. py_neuromodulation/liblsl/linux/jammy_amd64/liblsl.1.16.2.so +0 -0
  31. py_neuromodulation/liblsl/linux/jammy_x86/liblsl.1.16.2.so +0 -0
  32. py_neuromodulation/liblsl/linux/noble_amd64/liblsl.1.16.2.so +0 -0
  33. py_neuromodulation/liblsl/macos/amd64/liblsl.1.16.2.dylib +0 -0
  34. py_neuromodulation/liblsl/macos/arm64/liblsl.1.16.0.dylib +0 -0
  35. py_neuromodulation/liblsl/windows/amd64/liblsl.1.16.2.dll +0 -0
  36. py_neuromodulation/liblsl/windows/x86/liblsl.1.16.2.dll +0 -0
  37. py_neuromodulation/nm_IO.py +413 -417
  38. py_neuromodulation/nm_RMAP.py +496 -531
  39. py_neuromodulation/nm_analysis.py +993 -1074
  40. py_neuromodulation/nm_artifacts.py +30 -25
  41. py_neuromodulation/nm_bispectra.py +154 -168
  42. py_neuromodulation/nm_bursts.py +292 -198
  43. py_neuromodulation/nm_coherence.py +251 -205
  44. py_neuromodulation/nm_database.py +149 -0
  45. py_neuromodulation/nm_decode.py +918 -992
  46. py_neuromodulation/nm_define_nmchannels.py +300 -302
  47. py_neuromodulation/nm_features.py +144 -116
  48. py_neuromodulation/nm_filter.py +219 -219
  49. py_neuromodulation/nm_filter_preprocessing.py +79 -91
  50. py_neuromodulation/nm_fooof.py +139 -159
  51. py_neuromodulation/nm_generator.py +45 -37
  52. py_neuromodulation/nm_hjorth_raw.py +52 -73
  53. py_neuromodulation/nm_kalmanfilter.py +71 -58
  54. py_neuromodulation/nm_linelength.py +21 -33
  55. py_neuromodulation/nm_logger.py +66 -0
  56. py_neuromodulation/nm_mne_connectivity.py +149 -112
  57. py_neuromodulation/nm_mnelsl_generator.py +90 -0
  58. py_neuromodulation/nm_mnelsl_stream.py +116 -0
  59. py_neuromodulation/nm_nolds.py +96 -93
  60. py_neuromodulation/nm_normalization.py +173 -214
  61. py_neuromodulation/nm_oscillatory.py +423 -448
  62. py_neuromodulation/nm_plots.py +585 -612
  63. py_neuromodulation/nm_preprocessing.py +83 -0
  64. py_neuromodulation/nm_projection.py +370 -394
  65. py_neuromodulation/nm_rereference.py +97 -95
  66. py_neuromodulation/nm_resample.py +59 -50
  67. py_neuromodulation/nm_run_analysis.py +325 -435
  68. py_neuromodulation/nm_settings.py +289 -68
  69. py_neuromodulation/nm_settings.yaml +244 -0
  70. py_neuromodulation/nm_sharpwaves.py +423 -401
  71. py_neuromodulation/nm_stats.py +464 -480
  72. py_neuromodulation/nm_stream.py +398 -0
  73. py_neuromodulation/nm_stream_abc.py +166 -218
  74. py_neuromodulation/nm_types.py +193 -0
  75. py_neuromodulation/plots/STN_surf.mat +0 -0
  76. py_neuromodulation/plots/Vertices.mat +0 -0
  77. py_neuromodulation/plots/faces.mat +0 -0
  78. py_neuromodulation/plots/grid.mat +0 -0
  79. {py_neuromodulation-0.0.3.dist-info → py_neuromodulation-0.0.5.dist-info}/METADATA +185 -182
  80. py_neuromodulation-0.0.5.dist-info/RECORD +83 -0
  81. {py_neuromodulation-0.0.3.dist-info → py_neuromodulation-0.0.5.dist-info}/WHEEL +1 -2
  82. {py_neuromodulation-0.0.3.dist-info → py_neuromodulation-0.0.5.dist-info/licenses}/LICENSE +21 -21
  83. docs/build/_downloads/09df217f95985497f45d69e2d4bdc5b1/plot_2_example_add_feature.py +0 -68
  84. docs/build/_downloads/3b4900a2b2818ff30362215b76f7d5eb/plot_1_example_BIDS.py +0 -233
  85. docs/build/_downloads/7e92dd2e6cc86b239d14cafad972ae4f/plot_3_example_sharpwave_analysis.py +0 -219
  86. docs/build/_downloads/c2db0bf2b334d541b00662b991682256/plot_6_real_time_demo.py +0 -97
  87. docs/build/_downloads/ce3914826f782cbd1ea8fd024eaf0ac3/plot_5_example_rmap_computing.py +0 -64
  88. docs/build/_downloads/da36848a41e6a3235d91fb7cfb6d59b4/plot_0_first_demo.py +0 -192
  89. docs/build/_downloads/eaa4305c75b19a1e2eea941f742a6331/plot_4_example_gridPointProjection.py +0 -210
  90. docs/build/html/_downloads/09df217f95985497f45d69e2d4bdc5b1/plot_2_example_add_feature.py +0 -68
  91. docs/build/html/_downloads/3b4900a2b2818ff30362215b76f7d5eb/plot_1_example_BIDS.py +0 -239
  92. docs/build/html/_downloads/7e92dd2e6cc86b239d14cafad972ae4f/plot_3_example_sharpwave_analysis.py +0 -219
  93. docs/build/html/_downloads/c2db0bf2b334d541b00662b991682256/plot_6_real_time_demo.py +0 -97
  94. docs/build/html/_downloads/ce3914826f782cbd1ea8fd024eaf0ac3/plot_5_example_rmap_computing.py +0 -64
  95. docs/build/html/_downloads/da36848a41e6a3235d91fb7cfb6d59b4/plot_0_first_demo.py +0 -192
  96. docs/build/html/_downloads/eaa4305c75b19a1e2eea941f742a6331/plot_4_example_gridPointProjection.py +0 -210
  97. docs/source/_build/html/_downloads/09df217f95985497f45d69e2d4bdc5b1/plot_2_example_add_feature.py +0 -76
  98. docs/source/_build/html/_downloads/0d0d0a76e8f648d5d3cbc47da6351932/plot_real_time_demo.py +0 -97
  99. docs/source/_build/html/_downloads/3b4900a2b2818ff30362215b76f7d5eb/plot_1_example_BIDS.py +0 -240
  100. docs/source/_build/html/_downloads/5d73cadc59a8805c47e3b84063afc157/plot_example_BIDS.py +0 -233
  101. docs/source/_build/html/_downloads/7660317fa5a6bfbd12fcca9961457fc4/plot_example_rmap_computing.py +0 -63
  102. docs/source/_build/html/_downloads/7e92dd2e6cc86b239d14cafad972ae4f/plot_3_example_sharpwave_analysis.py +0 -219
  103. docs/source/_build/html/_downloads/839e5b319379f7fd9e867deb00fd797f/plot_example_gridPointProjection.py +0 -210
  104. docs/source/_build/html/_downloads/ae8be19afe5e559f011fc9b138968ba0/plot_first_demo.py +0 -192
  105. docs/source/_build/html/_downloads/b8b06cacc17969d3725a0b6f1d7741c5/plot_example_sharpwave_analysis.py +0 -219
  106. docs/source/_build/html/_downloads/c2db0bf2b334d541b00662b991682256/plot_6_real_time_demo.py +0 -121
  107. docs/source/_build/html/_downloads/c31a86c0b68cb4167d968091ace8080d/plot_example_add_feature.py +0 -68
  108. docs/source/_build/html/_downloads/ce3914826f782cbd1ea8fd024eaf0ac3/plot_5_example_rmap_computing.py +0 -64
  109. docs/source/_build/html/_downloads/da36848a41e6a3235d91fb7cfb6d59b4/plot_0_first_demo.py +0 -189
  110. docs/source/_build/html/_downloads/eaa4305c75b19a1e2eea941f742a6331/plot_4_example_gridPointProjection.py +0 -210
  111. docs/source/auto_examples/plot_0_first_demo.py +0 -189
  112. docs/source/auto_examples/plot_1_example_BIDS.py +0 -240
  113. docs/source/auto_examples/plot_2_example_add_feature.py +0 -76
  114. docs/source/auto_examples/plot_3_example_sharpwave_analysis.py +0 -219
  115. docs/source/auto_examples/plot_4_example_gridPointProjection.py +0 -210
  116. docs/source/auto_examples/plot_5_example_rmap_computing.py +0 -64
  117. docs/source/auto_examples/plot_6_real_time_demo.py +0 -121
  118. docs/source/conf.py +0 -105
  119. examples/plot_0_first_demo.py +0 -189
  120. examples/plot_1_example_BIDS.py +0 -240
  121. examples/plot_2_example_add_feature.py +0 -76
  122. examples/plot_3_example_sharpwave_analysis.py +0 -219
  123. examples/plot_4_example_gridPointProjection.py +0 -210
  124. examples/plot_5_example_rmap_computing.py +0 -64
  125. examples/plot_6_real_time_demo.py +0 -121
  126. packages/realtime_decoding/build/lib/realtime_decoding/__init__.py +0 -4
  127. packages/realtime_decoding/build/lib/realtime_decoding/decoder.py +0 -104
  128. packages/realtime_decoding/build/lib/realtime_decoding/features.py +0 -163
  129. packages/realtime_decoding/build/lib/realtime_decoding/helpers.py +0 -15
  130. packages/realtime_decoding/build/lib/realtime_decoding/run_decoding.py +0 -345
  131. packages/realtime_decoding/build/lib/realtime_decoding/trainer.py +0 -54
  132. packages/tmsi/build/lib/TMSiFileFormats/__init__.py +0 -37
  133. packages/tmsi/build/lib/TMSiFileFormats/file_formats/__init__.py +0 -36
  134. packages/tmsi/build/lib/TMSiFileFormats/file_formats/lsl_stream_writer.py +0 -200
  135. packages/tmsi/build/lib/TMSiFileFormats/file_formats/poly5_file_writer.py +0 -496
  136. packages/tmsi/build/lib/TMSiFileFormats/file_formats/poly5_to_edf_converter.py +0 -236
  137. packages/tmsi/build/lib/TMSiFileFormats/file_formats/xdf_file_writer.py +0 -977
  138. packages/tmsi/build/lib/TMSiFileFormats/file_readers/__init__.py +0 -35
  139. packages/tmsi/build/lib/TMSiFileFormats/file_readers/edf_reader.py +0 -116
  140. packages/tmsi/build/lib/TMSiFileFormats/file_readers/poly5reader.py +0 -294
  141. packages/tmsi/build/lib/TMSiFileFormats/file_readers/xdf_reader.py +0 -229
  142. packages/tmsi/build/lib/TMSiFileFormats/file_writer.py +0 -102
  143. packages/tmsi/build/lib/TMSiPlotters/__init__.py +0 -2
  144. packages/tmsi/build/lib/TMSiPlotters/gui/__init__.py +0 -39
  145. packages/tmsi/build/lib/TMSiPlotters/gui/_plotter_gui.py +0 -234
  146. packages/tmsi/build/lib/TMSiPlotters/gui/plotting_gui.py +0 -440
  147. packages/tmsi/build/lib/TMSiPlotters/plotters/__init__.py +0 -44
  148. packages/tmsi/build/lib/TMSiPlotters/plotters/hd_emg_plotter.py +0 -446
  149. packages/tmsi/build/lib/TMSiPlotters/plotters/impedance_plotter.py +0 -589
  150. packages/tmsi/build/lib/TMSiPlotters/plotters/signal_plotter.py +0 -1326
  151. packages/tmsi/build/lib/TMSiSDK/__init__.py +0 -54
  152. packages/tmsi/build/lib/TMSiSDK/device.py +0 -588
  153. packages/tmsi/build/lib/TMSiSDK/devices/__init__.py +0 -34
  154. packages/tmsi/build/lib/TMSiSDK/devices/saga/TMSi_Device_API.py +0 -1764
  155. packages/tmsi/build/lib/TMSiSDK/devices/saga/__init__.py +0 -34
  156. packages/tmsi/build/lib/TMSiSDK/devices/saga/saga_device.py +0 -1366
  157. packages/tmsi/build/lib/TMSiSDK/devices/saga/saga_types.py +0 -520
  158. packages/tmsi/build/lib/TMSiSDK/devices/saga/xml_saga_config.py +0 -165
  159. packages/tmsi/build/lib/TMSiSDK/error.py +0 -95
  160. packages/tmsi/build/lib/TMSiSDK/sample_data.py +0 -63
  161. packages/tmsi/build/lib/TMSiSDK/sample_data_server.py +0 -99
  162. packages/tmsi/build/lib/TMSiSDK/settings.py +0 -45
  163. packages/tmsi/build/lib/TMSiSDK/tmsi_device.py +0 -111
  164. packages/tmsi/build/lib/__init__.py +0 -4
  165. packages/tmsi/build/lib/apex_sdk/__init__.py +0 -34
  166. packages/tmsi/build/lib/apex_sdk/device/__init__.py +0 -41
  167. packages/tmsi/build/lib/apex_sdk/device/devices/apex/apex_API.py +0 -1009
  168. packages/tmsi/build/lib/apex_sdk/device/devices/apex/apex_API_enums.py +0 -239
  169. packages/tmsi/build/lib/apex_sdk/device/devices/apex/apex_API_structures.py +0 -668
  170. packages/tmsi/build/lib/apex_sdk/device/devices/apex/apex_device.py +0 -1611
  171. packages/tmsi/build/lib/apex_sdk/device/devices/apex/apex_dongle.py +0 -38
  172. packages/tmsi/build/lib/apex_sdk/device/devices/apex/apex_event_reader.py +0 -57
  173. packages/tmsi/build/lib/apex_sdk/device/devices/apex/apex_structures/apex_channel.py +0 -44
  174. packages/tmsi/build/lib/apex_sdk/device/devices/apex/apex_structures/apex_config.py +0 -150
  175. packages/tmsi/build/lib/apex_sdk/device/devices/apex/apex_structures/apex_const.py +0 -36
  176. packages/tmsi/build/lib/apex_sdk/device/devices/apex/apex_structures/apex_impedance_channel.py +0 -48
  177. packages/tmsi/build/lib/apex_sdk/device/devices/apex/apex_structures/apex_info.py +0 -108
  178. packages/tmsi/build/lib/apex_sdk/device/devices/apex/apex_structures/dongle_info.py +0 -39
  179. packages/tmsi/build/lib/apex_sdk/device/devices/apex/measurements/download_measurement.py +0 -77
  180. packages/tmsi/build/lib/apex_sdk/device/devices/apex/measurements/eeg_measurement.py +0 -150
  181. packages/tmsi/build/lib/apex_sdk/device/devices/apex/measurements/impedance_measurement.py +0 -129
  182. packages/tmsi/build/lib/apex_sdk/device/threads/conversion_thread.py +0 -59
  183. packages/tmsi/build/lib/apex_sdk/device/threads/sampling_thread.py +0 -57
  184. packages/tmsi/build/lib/apex_sdk/device/tmsi_channel.py +0 -83
  185. packages/tmsi/build/lib/apex_sdk/device/tmsi_device.py +0 -201
  186. packages/tmsi/build/lib/apex_sdk/device/tmsi_device_enums.py +0 -103
  187. packages/tmsi/build/lib/apex_sdk/device/tmsi_dongle.py +0 -43
  188. packages/tmsi/build/lib/apex_sdk/device/tmsi_event_reader.py +0 -50
  189. packages/tmsi/build/lib/apex_sdk/device/tmsi_measurement.py +0 -118
  190. packages/tmsi/build/lib/apex_sdk/sample_data_server/__init__.py +0 -33
  191. packages/tmsi/build/lib/apex_sdk/sample_data_server/event_data.py +0 -44
  192. packages/tmsi/build/lib/apex_sdk/sample_data_server/sample_data.py +0 -50
  193. packages/tmsi/build/lib/apex_sdk/sample_data_server/sample_data_server.py +0 -136
  194. packages/tmsi/build/lib/apex_sdk/tmsi_errors/error.py +0 -126
  195. packages/tmsi/build/lib/apex_sdk/tmsi_sdk.py +0 -113
  196. packages/tmsi/build/lib/apex_sdk/tmsi_utilities/apex/apex_structure_generator.py +0 -134
  197. packages/tmsi/build/lib/apex_sdk/tmsi_utilities/decorators.py +0 -60
  198. packages/tmsi/build/lib/apex_sdk/tmsi_utilities/logger_filter.py +0 -42
  199. packages/tmsi/build/lib/apex_sdk/tmsi_utilities/singleton.py +0 -42
  200. packages/tmsi/build/lib/apex_sdk/tmsi_utilities/support_functions.py +0 -72
  201. packages/tmsi/build/lib/apex_sdk/tmsi_utilities/tmsi_logger.py +0 -98
  202. py_neuromodulation/nm_EpochStream.py +0 -92
  203. py_neuromodulation/nm_across_patient_decoding.py +0 -927
  204. py_neuromodulation/nm_cohortwrapper.py +0 -435
  205. py_neuromodulation/nm_eval_timing.py +0 -239
  206. py_neuromodulation/nm_features_abc.py +0 -39
  207. py_neuromodulation/nm_stream_offline.py +0 -358
  208. py_neuromodulation/utils/_logging.py +0 -24
  209. py_neuromodulation-0.0.3.dist-info/RECORD +0 -188
  210. py_neuromodulation-0.0.3.dist-info/top_level.txt +0 -5
  211. tests/__init__.py +0 -0
  212. tests/conftest.py +0 -117
  213. tests/test_all_examples.py +0 -10
  214. tests/test_all_features.py +0 -63
  215. tests/test_bispectra.py +0 -70
  216. tests/test_bursts.py +0 -105
  217. tests/test_feature_sampling_rates.py +0 -143
  218. tests/test_fooof.py +0 -16
  219. tests/test_initalization_offline_stream.py +0 -41
  220. tests/test_multiprocessing.py +0 -58
  221. tests/test_nan_values.py +0 -29
  222. tests/test_nm_filter.py +0 -95
  223. tests/test_nm_resample.py +0 -63
  224. tests/test_normalization_settings.py +0 -146
  225. tests/test_notch_filter.py +0 -31
  226. tests/test_osc_features.py +0 -424
  227. tests/test_preprocessing_filter.py +0 -151
  228. tests/test_rereference.py +0 -171
  229. tests/test_sampling.py +0 -57
  230. tests/test_settings_change_after_init.py +0 -76
  231. tests/test_sharpwave.py +0 -165
  232. tests/test_target_channel_add.py +0 -100
  233. tests/test_timing.py +0 -80
@@ -1,435 +0,0 @@
1
- from re import VERBOSE
2
- import sys
3
- import os
4
- import numpy as np
5
- from pathlib import Path
6
- from scipy import stats
7
- import pandas as pd
8
- from multiprocessing import Pool
9
- from sklearn import linear_model, discriminant_analysis, ensemble, svm
10
- from sklearn import metrics
11
- from sklearn.base import clone
12
- from sklearn import model_selection
13
- from sklearn.utils import class_weight
14
- from scipy.ndimage import binary_dilation, binary_erosion, label
15
- import _pickle as cPickle
16
- from scipy import io
17
- from matplotlib import pyplot as plt
18
- import matplotlib
19
- import bids
20
- from bids import BIDSLayout
21
- from itertools import product
22
- import nibabel as nib
23
-
24
- import py_neuromodulation
25
- from py_neuromodulation import nm_decode, nm_analysis, nm_IO
26
- from py_neuromodulation import nm_stream_offline
27
-
28
-
29
- class CohortRunner:
30
- def __init__(
31
- self,
32
- cohorts: dict = None,
33
- ML_model_name="LM",
34
- model=linear_model.LogisticRegression(class_weight="balanced"),
35
- eval_method=metrics.balanced_accuracy_score,
36
- estimate_gridpoints=False,
37
- estimate_channels=True,
38
- estimate_all_channels_combined=False,
39
- save_coef=False,
40
- TRAIN_VAL_SPLIT=False,
41
- plot_features=False,
42
- plot_grid_performances=False,
43
- run_ML_model=True,
44
- run_bids=True,
45
- binarize_label=True,
46
- used_types=("ecog", "dbs", "seeg"),
47
- target_keywords=("mov", "squared", "label"),
48
- get_movement_detection_rate=False,
49
- run_pool=True,
50
- VERBOSE=False,
51
- LIMIT_DATA=False,
52
- RUN_BAY_OPT=False,
53
- STACK_FEATURES_N_SAMPLES=True,
54
- cv_method=model_selection.KFold(n_splits=3, shuffle=False),
55
- use_nested_cv=True,
56
- outpath=r"C:\Users\ICN_admin\Documents\Decoding_Toolbox\write_out\0209_SharpWaveLimFeaturesSTFT_with_Grid",
57
- PATH_SETTINGS=r"C:\Users\ICN_admin\Documents\py_neuromodulation\pyneuromodulation\nm_settings.json",
58
- ) -> None:
59
-
60
- self.ML_model_name = ML_model_name
61
- self.model = model
62
- self.outpath = outpath
63
- self.PATH_SETTINGS = PATH_SETTINGS
64
- self.estimate_gridpoints = estimate_gridpoints
65
- self.estimate_channels = estimate_channels
66
- self.estimate_all_channels_combined = estimate_all_channels_combined
67
- self.save_coef = save_coef
68
- self.plot_features = plot_features
69
- self.plot_grid_performances = plot_grid_performances
70
- self.run_ML_model = run_ML_model
71
- self.run_bids = run_bids
72
- self.run_pool = run_pool
73
- self.TRAIN_VAL_SPLIT = TRAIN_VAL_SPLIT
74
- self.cohorts = cohorts
75
- self.VERBOSE = VERBOSE
76
- self.LIMIT_DATA = LIMIT_DATA
77
- self.eval_method = eval_method
78
- self.cv_method = cv_method
79
- self.use_nested_cv = use_nested_cv
80
- self.RUN_BAY_OPT = RUN_BAY_OPT
81
- self.TRAIN_VAL_SPLIT = TRAIN_VAL_SPLIT
82
- self.STACK_FEATURES_N_SAMPLES = STACK_FEATURES_N_SAMPLES
83
- self.model = model
84
- self.binarize_label = binarize_label
85
- self.used_types = used_types
86
- self.target_keywords = target_keywords
87
- self.get_movement_detection_rate = get_movement_detection_rate
88
- self.grid_cortex = pd.read_csv(
89
- os.path.join(py_neuromodulation.__path__[0], "grid_cortex.tsv"),
90
- sep="\t",
91
- ).to_numpy()
92
-
93
- def init_decoder(self) -> nm_decode.Decoder:
94
- return nm_decode.Decoder(
95
- model=self.model,
96
- TRAIN_VAL_SPLIT=self.TRAIN_VAL_SPLIT,
97
- STACK_FEATURES_N_SAMPLES=self.STACK_FEATURES_N_SAMPLES,
98
- get_movement_detection_rate=self.get_movement_detection_rate,
99
- eval_method=self.eval_method,
100
- VERBOSE=self.VERBOSE,
101
- cv_method=self.cv_method,
102
- use_nested_cv=self.use_nested_cv,
103
- RUN_BAY_OPT=self.RUN_BAY_OPT,
104
- )
105
-
106
- def multiprocess_pipeline_run_wrapper(self, PATH_RUN):
107
-
108
- if type(PATH_RUN) is bids.layout.models.BIDSFile:
109
- PATH_RUN = PATH_RUN.path
110
-
111
- # set BIDS PATH and out path
112
- # better option: feed the output and bids path as well as a param through the pool
113
-
114
- for cohort, PATH_COHORT in self.cohorts.items():
115
- if cohort in PATH_RUN:
116
- PATH_BIDS = PATH_COHORT
117
- PATH_OUT = os.path.join(self.outpath, cohort)
118
- break
119
-
120
- if self.run_bids:
121
- stream = nm_stream_offline.Stream(
122
- PATH_RUN=PATH_RUN,
123
- PATH_BIDS=PATH_BIDS,
124
- PATH_OUT=PATH_OUT,
125
- LIMIT_DATA=self.LIMIT_DATA,
126
- LIMIT_HIGH=200000,
127
- LIMIT_LOW=0,
128
- target_keywords=self.target_keywords,
129
- used_types=self.used_types,
130
- PATH_SETTINGS=self.PATH_SETTINGS,
131
- VERBOSE=self.VERBOSE,
132
- )
133
- stream.run()
134
-
135
- feature_file = os.path.basename(PATH_RUN)[:-5] # cut off ".vhdr"
136
-
137
- feature_reader = nm_analysis.Feature_Reader(
138
- feature_dir=PATH_OUT,
139
- feature_file=feature_file,
140
- binarize_label=self.binarize_label,
141
- )
142
-
143
- if self.plot_grid_performances:
144
- feature_reader.plot_cort_projection()
145
-
146
- if self.plot_features:
147
-
148
- ch_to_plot = feature_reader.nm_channels.query(
149
- '(type=="ecog") and (used == 1)'
150
- )["name"]
151
-
152
- feature_used = "stft"
153
-
154
- for ch_used in ch_to_plot:
155
- feature_reader.plot_target_averaged_channel(
156
- ch=ch_used,
157
- list_feature_keywords=[feature_used],
158
- epoch_len=4,
159
- threshold=0.5,
160
- )
161
-
162
- # model = discriminant_analysis.LinearDiscriminantAnalysis()
163
- # model = xgboost.XGBClassifier(scale_pos_weight=10) # balanced class weights
164
- # model = ensemble.RandomForestClassifier(n_estimators=6, max_depth=6, class_weight='balanced')
165
- # model = svm.SVC(class_weight="balanced")
166
-
167
- if self.run_ML_model:
168
- # set decoder for this specific run (using the feature_reader features)
169
- feature_reader.decoder = self.init_decoder()
170
-
171
- feature_reader.decoder.set_data(
172
- features=feature_reader.feature_arr,
173
- label=feature_reader.label,
174
- label_name=feature_reader.label_name,
175
- used_chs=feature_reader.used_chs,
176
- )
177
-
178
- performances = feature_reader.run_ML_model(
179
- estimate_channels=self.estimate_channels,
180
- estimate_gridpoints=self.estimate_gridpoints,
181
- estimate_all_channels_combined=self.estimate_all_channels_combined,
182
- save_results=True,
183
- output_name="XGBNOMP",
184
- )
185
-
186
- if self.plot_grid_performances:
187
- feature_reader.plot_subject_grid_ch_performance(
188
- performance_dict=performances, plt_grid=True
189
- )
190
-
191
- def run_cohorts(self):
192
-
193
- run_files_all = []
194
- for _, PATH_COHORT in self.cohorts.items():
195
- layout = BIDSLayout(PATH_COHORT)
196
- run_files_all.append(layout.get(extension=".vhdr"))
197
-
198
- run_files_all = list(np.concatenate(run_files_all))
199
-
200
- if self.run_pool:
201
- pool = Pool(processes=50)
202
- pool.map(self.multiprocess_pipeline_run_wrapper, run_files_all)
203
- else:
204
- # self.multiprocess_pipeline_run_wrapper(run_files_all[11])
205
- for run_file in run_files_all[:12]:
206
- self.multiprocess_pipeline_run_wrapper(run_file)
207
-
208
- def read_cohort_results(self, feature_path, cohort):
209
- """Read for a given path (of potentially multiple estimated runs) performance results
210
- Parameters
211
- ----------
212
- feature_path : string
213
- path where estimated runs are saved
214
- cohort : string
215
- used for saving output npy dictionary
216
- ML_model_name : string
217
- model name, by default "LM"
218
- """
219
-
220
- # Here the runs are overwritten!
221
- folders_path = [x[0] for x in os.walk(feature_path)]
222
- feature_paths = [os.path.basename(x) for x in folders_path[1:]]
223
- performance_out = {}
224
-
225
- for feature_file in feature_paths:
226
- feature_reader = nm_analysis.Feature_Reader(
227
- feature_dir=feature_path, feature_file=feature_file
228
- )
229
-
230
- performance_run = feature_reader.read_results(
231
- read_grid_points=self.estimate_gridpoints,
232
- read_channels=self.estimate_channels,
233
- read_all_combined=self.estimate_all_channels_combined,
234
- read_mov_detection_rates=True,
235
- )
236
-
237
- sub = feature_file[
238
- feature_file.find("sub-") : feature_file.find("_ses")
239
- ][4:]
240
- if sub not in performance_out:
241
- performance_out[sub] = {}
242
- performance_out[sub][feature_file] = performance_run[
243
- sub
244
- ] # get saved in performance_run
245
-
246
- np.save(
247
- os.path.join(
248
- self.outpath, self.ML_model_name + "_cohort_" + cohort + ".npy"
249
- ),
250
- performance_out,
251
- )
252
-
253
- def cohort_wrapper_read_cohort(self):
254
- """Read results for multiple cohorts"""
255
-
256
- for cohort in self.cohorts.keys():
257
- self.read_cohort_results(
258
- os.path.join(self.outpath, cohort), cohort
259
- )
260
-
261
- def read_all_channels(
262
- self,
263
- channel_all,
264
- feature_path,
265
- feature_file,
266
- cohort,
267
- read_channels: bool = True,
268
- ):
269
- """Save for a given feature path all used grid point data. Necessary to run across patient and cohort analysis.
270
- Parameters
271
- ----------
272
- channel_all : dictionary
273
- dictionary with data, label, label_name and feature_names for each channel
274
- feature_path : string
275
- path to feature files
276
- feature_file : string
277
- feature file
278
- cohort : string
279
- used for indecing of grid_point_all
280
- read_ch : bool
281
- if True read channels, else read grid_points
282
- Returns
283
- -------
284
- dictionary
285
- ch_all
286
- """
287
- feature_reader = nm_analysis.Feature_Reader(
288
- feature_dir=feature_path, feature_file=feature_file
289
- )
290
- if "Washington" in feature_path:
291
- mov_starts = np.where(np.diff(feature_reader.feature_arr["mov"])>0)[0]
292
- seg_cut = []
293
- for mov_start in mov_starts:
294
- for i in range(5):
295
- seg_cut.append(mov_start+i)
296
-
297
- ind_cut = np.concatenate((np.where(feature_reader.feature_arr["mov"] == 11)[0], seg_cut))
298
- idx_select = set(np.arange(feature_reader.feature_arr["mov"].shape[0])) - set(ind_cut)
299
- feature_reader.feature_arr = feature_reader.feature_arr.iloc[list(idx_select), :].reset_index(drop=True)
300
- #analyzer.feature_arr["mov"] = analyzer.feature_arr["mov"] > 0
301
- feature_reader.label = np.array(feature_reader.feature_arr["mov"] > 0, dtype=int)
302
- subject_name = feature_file[:2]
303
- task_name = "hand_movement"
304
- run_number = 1
305
- else:
306
- subject_name = feature_file[
307
- feature_file.find("sub-") + 4 : feature_file.find("_ses")
308
- ]
309
- sess_name = feature_file[
310
- feature_file.find("ses-") + 4 : feature_file.find("_task")
311
- ]
312
- task_name = feature_file[
313
- feature_file.find("task-") + 5 : feature_file.find("_run")
314
- ]
315
- run_number = feature_file[
316
- feature_file.find("run-") + 4 : feature_file.find("_ieeg")
317
- ]
318
- print(feature_reader.label_name)
319
- decoder = nm_decode.Decoder(
320
- features=feature_reader.feature_arr,
321
- label=feature_reader.label,
322
- label_name=feature_reader.label_name,
323
- used_chs=feature_reader.used_chs,
324
- )
325
-
326
- if read_channels is True:
327
- decoder.set_data_ind_channels()
328
- data_to_read = decoder.ch_ind_data
329
- else:
330
- decoder.set_data_grid_points()
331
- data_to_read = decoder.grid_point_ind_data
332
-
333
- for ch in list(data_to_read.keys()):
334
- if cohort not in channel_all:
335
- channel_all[cohort] = {}
336
- if subject_name not in channel_all[cohort]:
337
- channel_all[cohort][subject_name] = {}
338
- if ch not in channel_all[cohort][subject_name]:
339
- channel_all[cohort][subject_name][ch] = {}
340
- channel_all[cohort][subject_name][ch][feature_file] = {}
341
-
342
- channel_all[cohort][subject_name][ch][feature_file][
343
- "data"
344
- ] = data_to_read[ch]
345
- channel_all[cohort][subject_name][ch][feature_file][
346
- "feature_names"
347
- ] = [
348
- ch_[len(ch) + 1 :]
349
- for ch_ in decoder.features.columns
350
- if ch in ch_
351
- ]
352
- channel_all[cohort][subject_name][ch][feature_file][
353
- "label"
354
- ] = decoder.label
355
- channel_all[cohort][subject_name][ch][feature_file][
356
- "label_name"
357
- ] = decoder.label_name
358
-
359
- # check laterality
360
- lat = "CON" # Beijing is always contralateral
361
- # Pittsburgh Subjects
362
- if (
363
- "LEFT" in decoder.label_name
364
- and "LEFT" in decoder.features.columns[1]
365
- ) or (
366
- "RIGHT" in decoder.label_name
367
- and "RIGHT" in decoder.features.columns[1]
368
- ):
369
- lat = "IPS"
370
-
371
- # Berlin subjects
372
- if (
373
- "_L_" in decoder.features.columns[1]
374
- and task_name == "SelfpacedRotationL"
375
- ) or (
376
- "_R_" in decoder.features.columns[1]
377
- and task_name == "SelfpacedRotationR"
378
- ):
379
- lat = "IPS"
380
- channel_all[cohort][subject_name][ch][feature_file]["lat"] = lat
381
- return channel_all
382
-
383
- def cohort_wrapper_read_all_grid_points(self, read_channels=True):
384
- cohorts = self.cohorts.keys()
385
- grid_point_all = {}
386
- for cohort in cohorts:
387
- print("COHORT: " + cohort)
388
- feature_path = os.path.join(self.outpath, cohort)
389
- feature_list = nm_IO.get_run_list_indir(feature_path)
390
- for feature_file in feature_list:
391
- print(feature_file)
392
- grid_point_all = self.read_all_channels(
393
- grid_point_all,
394
- feature_path,
395
- feature_file,
396
- cohort,
397
- read_channels=read_channels,
398
- )
399
-
400
- if read_channels is True:
401
- np.save(
402
- os.path.join(self.outpath, "channel_all.npy"), grid_point_all
403
- )
404
- else:
405
- np.save(
406
- os.path.join(self.outpath, "grid_point_all.npy"),
407
- grid_point_all,
408
- )
409
-
410
- @staticmethod
411
- def rewrite_grid_point_all(d, outpath):
412
-
413
- # from channel_all[cohort][subject_name][ch][feature_file]
414
-
415
- # to grid_point_all[grid_point][cohort][subject_test][run]
416
-
417
- p = {}
418
- for cohort in d.keys():
419
- for sub in d[cohort].keys():
420
- for gp in d[cohort][sub].keys():
421
- for f in d[cohort][sub][gp].keys():
422
- if gp not in p.keys():
423
- p[gp] = {}
424
- if cohort not in p[gp].keys():
425
- p[gp][cohort] = {}
426
- if sub not in p[gp][cohort].keys():
427
- p[gp][cohort][sub] = {}
428
- if f not in p[gp][cohort][sub].keys():
429
- p[gp][cohort][sub][f] = {}
430
- for key_ in d[cohort][sub][gp][f].keys():
431
- p[gp][cohort][sub][f][key_] = d[cohort][sub][gp][
432
- f
433
- ][key_]
434
-
435
- np.save(os.path.join(outpath, "grid_point_all_re.npy"), p)
@@ -1,239 +0,0 @@
1
- import timeit
2
- from . import nm_oscillatory
3
- import numpy as np
4
- from py_neuromodulation import (
5
- nm_normalization,
6
- nm_stft,
7
- nm_bandpower,
8
- nm_filter,
9
- )
10
-
11
-
12
- class NM_Timer:
13
- def __init__(self, analyzer) -> None:
14
- self.analyzer = analyzer
15
-
16
- self.get_timings()
17
-
18
- def get_timings(self, number_repeat=1000):
19
-
20
- features_ = {}
21
- ch_idx = 0
22
- fs = self.analyzer.fs
23
- ch_name = "ECOG_L_1_SMC_AT"
24
- N_CH_BEFORE_REREF = 15 # 2
25
- N_CH_AFTER_REREF = 11 # 2
26
- data = np.random.random([N_CH_BEFORE_REREF, fs])
27
-
28
- dict_timings = {}
29
-
30
- if self.analyzer.settings["methods"]["notch_filter"]:
31
- dict_timings["time_rereference"] = (
32
- timeit.timeit(
33
- lambda: self.analyzer.reference.rereference(data),
34
- number=number_repeat,
35
- )
36
- / number_repeat
37
- )
38
-
39
- data = np.random.random([N_CH_AFTER_REREF, fs])
40
-
41
- if self.analyzer.settings["methods"]["raw_resampling"]:
42
- dict_timings["time_resample"] = (
43
- timeit.timeit(
44
- lambda: self.analyzer.resample.raw_resampling(data),
45
- number=number_repeat,
46
- )
47
- / number_repeat
48
- )
49
-
50
- data = np.random.random(
51
- [
52
- N_CH_AFTER_REREF,
53
- self.analyzer.settings["raw_resampling_settings"][
54
- "resample_freq"
55
- ],
56
- ]
57
- )
58
-
59
- if self.analyzer.settings["methods"]["notch_filter"]:
60
- dict_timings["time_notchfilter"] = (
61
- timeit.timeit(
62
- lambda: self.analyzer.notch_filter.filter_data(data),
63
- number=number_repeat,
64
- )
65
- / number_repeat
66
- )
67
-
68
- if self.analyzer.settings["methods"]["raw_normalization"]:
69
- dict_timings["time_norm_raw"] = (
70
- timeit.timeit(
71
- lambda: nm_normalization.normalize_raw(
72
- current=data,
73
- previous=data.T,
74
- normalize_samples=int(
75
- self.analyzer.settings[
76
- "raw_normalization_settings"
77
- ]["normalization_time"]
78
- * self.analyzer.fs
79
- ),
80
- sample_add=int(self.analyzer.fs / self.analyzer.fs_new),
81
- method=self.analyzer.settings[
82
- "raw_normalization_settings"
83
- ]["normalization_method"],
84
- clip=self.analyzer.settings[
85
- "raw_normalization_settings"
86
- ]["clip"],
87
- ),
88
- number=number_repeat,
89
- )
90
- / number_repeat
91
- )
92
-
93
- features_previous = self.analyzer.features_previous
94
- features_current = self.analyzer.features_current.iloc[
95
- : features_previous.shape[1]
96
- ]
97
-
98
- if self.analyzer.settings["methods"]["feature_normalization"]:
99
- dict_timings["time_feature_norm"] = (
100
- timeit.timeit(
101
- lambda: nm_normalization.normalize_features(
102
- current=features_current.to_numpy(),
103
- previous=features_previous,
104
- normalize_samples=self.analyzer.feat_normalize_samples,
105
- method=self.analyzer.settings[
106
- "feature_normalization_settings"
107
- ]["normalization_method"],
108
- clip=self.analyzer.settings[
109
- "feature_normalization_settings"
110
- ]["clip"],
111
- ),
112
- number=number_repeat,
113
- )
114
- / number_repeat
115
- )
116
-
117
- if self.analyzer.settings["methods"]["project_cortex"]:
118
- dict_timings["time_projection"] = (
119
- timeit.timeit(
120
- lambda: self.analyzer.projection.project_features(
121
- features_current
122
- ),
123
- number=number_repeat,
124
- )
125
- / number_repeat
126
- )
127
-
128
- if self.analyzer.settings["methods"]["bandpass_filter"]:
129
- dict_timings["time_applyfilterband"] = (
130
- timeit.timeit(
131
- lambda: self.analyzer.features.bandpass_filter.filter_data(
132
- data,
133
- ),
134
- number=number_repeat,
135
- )
136
- / number_repeat
137
- )
138
-
139
- if self.analyzer.settings["methods"]["sharpwave_analysis"]:
140
- dict_timings["time_sw"] = (
141
- timeit.timeit(
142
- lambda: self.analyzer.features.sw_features.get_sharpwave_features(
143
- features_, data[ch_idx, -100:], ch_name
144
- ),
145
- number=number_repeat,
146
- )
147
- / number_repeat
148
- )
149
-
150
- if self.analyzer.settings["methods"]["stft"]:
151
- dict_timings["time_stft"] = (
152
- timeit.timeit(
153
- lambda: nm_stft.get_stft_features(
154
- features_,
155
- self.analyzer.features.s,
156
- self.analyzer.features.fs,
157
- data[ch_idx, :],
158
- self.analyzer.features.KF_dict,
159
- ch_name + "-avgref",
160
- self.analyzer.features.f_ranges,
161
- self.analyzer.features.fband_names,
162
- ),
163
- number=number_repeat,
164
- )
165
- / number_repeat
166
- )
167
-
168
- if self.analyzer.settings["methods"]["fft"]:
169
- dict_timings["time_fft"] = (
170
- timeit.timeit(
171
- lambda: nm_oscillatory.get_fft_features(
172
- features_,
173
- self.analyzer.features.s,
174
- self.analyzer.features.fs,
175
- data[ch_idx, :],
176
- self.analyzer.features.KF_dict,
177
- ch_name,
178
- self.analyzer.features.f_ranges,
179
- self.analyzer.features.fband_names,
180
- ),
181
- number=number_repeat,
182
- )
183
- / number_repeat
184
- )
185
-
186
- if self.analyzer.settings["methods"]["bandpass_filter"]:
187
- seglengths = np.floor(
188
- self.analyzer.fs
189
- / 1000
190
- * np.array(
191
- [
192
- value
193
- for value in self.analyzer.features.s[
194
- "bandpass_filter_settings"
195
- ]["segment_lengths"].values()
196
- ]
197
- )
198
- ).astype(int)
199
-
200
- dat_filtered = nm_filter.apply_filter(
201
- data, self.analyzer.features.filter_fun
202
- ) # shape (bands, time)
203
- dict_timings["time_bandpass_filter"] = (
204
- timeit.timeit(
205
- lambda: nm_bandpower.get_bandpower_features(
206
- features_,
207
- self.analyzer.features.s,
208
- seglengths,
209
- dat_filtered,
210
- self.analyzer.features.KF_dict,
211
- ch_name,
212
- ch_idx,
213
- ),
214
- number=number_repeat,
215
- )
216
- / number_repeat
217
- )
218
-
219
- if self.analyzer.settings["methods"]["coherence"]:
220
- coh_obj = self.analyzer.features.coherence_objects[0]
221
- dict_timings["time_coherence"] = (
222
- timeit.timeit(
223
- lambda: coh_obj.get_coh(
224
- features_,
225
- data[coh_obj.ch_1_idx, :],
226
- data[coh_obj.ch_2_idx, :],
227
- ),
228
- number=number_repeat,
229
- )
230
- / number_repeat
231
- )
232
-
233
- print("Average duration per function:")
234
- for key, val in dict_timings.items():
235
- print(f" {key} : {np.round(val*1000, 2)}ms")
236
-
237
- print(
238
- "fft, sw, bandpass, coherence and stft are timings for an individual channel"
239
- )