py-neuromodulation 0.0.2__py3-none-any.whl → 0.0.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (204) hide show
  1. docs/build/_downloads/09df217f95985497f45d69e2d4bdc5b1/plot_2_example_add_feature.py +68 -0
  2. docs/build/_downloads/3b4900a2b2818ff30362215b76f7d5eb/plot_1_example_BIDS.py +233 -0
  3. docs/build/_downloads/7e92dd2e6cc86b239d14cafad972ae4f/plot_3_example_sharpwave_analysis.py +219 -0
  4. docs/build/_downloads/c2db0bf2b334d541b00662b991682256/plot_6_real_time_demo.py +97 -0
  5. docs/build/_downloads/ce3914826f782cbd1ea8fd024eaf0ac3/plot_5_example_rmap_computing.py +64 -0
  6. docs/build/_downloads/da36848a41e6a3235d91fb7cfb6d59b4/plot_0_first_demo.py +192 -0
  7. docs/build/_downloads/eaa4305c75b19a1e2eea941f742a6331/plot_4_example_gridPointProjection.py +210 -0
  8. docs/build/html/_downloads/09df217f95985497f45d69e2d4bdc5b1/plot_2_example_add_feature.py +68 -0
  9. docs/build/html/_downloads/3b4900a2b2818ff30362215b76f7d5eb/plot_1_example_BIDS.py +239 -0
  10. docs/build/html/_downloads/7e92dd2e6cc86b239d14cafad972ae4f/plot_3_example_sharpwave_analysis.py +219 -0
  11. docs/build/html/_downloads/c2db0bf2b334d541b00662b991682256/plot_6_real_time_demo.py +97 -0
  12. docs/build/html/_downloads/ce3914826f782cbd1ea8fd024eaf0ac3/plot_5_example_rmap_computing.py +64 -0
  13. docs/build/html/_downloads/da36848a41e6a3235d91fb7cfb6d59b4/plot_0_first_demo.py +192 -0
  14. docs/build/html/_downloads/eaa4305c75b19a1e2eea941f742a6331/plot_4_example_gridPointProjection.py +210 -0
  15. docs/source/_build/html/_downloads/09df217f95985497f45d69e2d4bdc5b1/plot_2_example_add_feature.py +76 -0
  16. docs/source/_build/html/_downloads/0d0d0a76e8f648d5d3cbc47da6351932/plot_real_time_demo.py +97 -0
  17. docs/source/_build/html/_downloads/3b4900a2b2818ff30362215b76f7d5eb/plot_1_example_BIDS.py +240 -0
  18. docs/source/_build/html/_downloads/5d73cadc59a8805c47e3b84063afc157/plot_example_BIDS.py +233 -0
  19. docs/source/_build/html/_downloads/7660317fa5a6bfbd12fcca9961457fc4/plot_example_rmap_computing.py +63 -0
  20. docs/source/_build/html/_downloads/7e92dd2e6cc86b239d14cafad972ae4f/plot_3_example_sharpwave_analysis.py +219 -0
  21. docs/source/_build/html/_downloads/839e5b319379f7fd9e867deb00fd797f/plot_example_gridPointProjection.py +210 -0
  22. docs/source/_build/html/_downloads/ae8be19afe5e559f011fc9b138968ba0/plot_first_demo.py +192 -0
  23. docs/source/_build/html/_downloads/b8b06cacc17969d3725a0b6f1d7741c5/plot_example_sharpwave_analysis.py +219 -0
  24. docs/source/_build/html/_downloads/c2db0bf2b334d541b00662b991682256/plot_6_real_time_demo.py +121 -0
  25. docs/source/_build/html/_downloads/c31a86c0b68cb4167d968091ace8080d/plot_example_add_feature.py +68 -0
  26. docs/source/_build/html/_downloads/ce3914826f782cbd1ea8fd024eaf0ac3/plot_5_example_rmap_computing.py +64 -0
  27. docs/source/_build/html/_downloads/da36848a41e6a3235d91fb7cfb6d59b4/plot_0_first_demo.py +189 -0
  28. docs/source/_build/html/_downloads/eaa4305c75b19a1e2eea941f742a6331/plot_4_example_gridPointProjection.py +210 -0
  29. docs/source/auto_examples/plot_0_first_demo.py +189 -0
  30. docs/source/auto_examples/plot_1_example_BIDS.py +240 -0
  31. docs/source/auto_examples/plot_2_example_add_feature.py +76 -0
  32. docs/source/auto_examples/plot_3_example_sharpwave_analysis.py +219 -0
  33. docs/source/auto_examples/plot_4_example_gridPointProjection.py +210 -0
  34. docs/source/auto_examples/plot_5_example_rmap_computing.py +64 -0
  35. docs/source/auto_examples/plot_6_real_time_demo.py +121 -0
  36. docs/source/conf.py +105 -0
  37. examples/plot_0_first_demo.py +189 -0
  38. examples/plot_1_example_BIDS.py +240 -0
  39. examples/plot_2_example_add_feature.py +76 -0
  40. examples/plot_3_example_sharpwave_analysis.py +219 -0
  41. examples/plot_4_example_gridPointProjection.py +210 -0
  42. examples/plot_5_example_rmap_computing.py +64 -0
  43. examples/plot_6_real_time_demo.py +121 -0
  44. packages/realtime_decoding/build/lib/realtime_decoding/__init__.py +4 -0
  45. packages/realtime_decoding/build/lib/realtime_decoding/decoder.py +104 -0
  46. packages/realtime_decoding/build/lib/realtime_decoding/features.py +163 -0
  47. packages/realtime_decoding/build/lib/realtime_decoding/helpers.py +15 -0
  48. packages/realtime_decoding/build/lib/realtime_decoding/run_decoding.py +345 -0
  49. packages/realtime_decoding/build/lib/realtime_decoding/trainer.py +54 -0
  50. packages/tmsi/build/lib/TMSiFileFormats/__init__.py +37 -0
  51. packages/tmsi/build/lib/TMSiFileFormats/file_formats/__init__.py +36 -0
  52. packages/tmsi/build/lib/TMSiFileFormats/file_formats/lsl_stream_writer.py +200 -0
  53. packages/tmsi/build/lib/TMSiFileFormats/file_formats/poly5_file_writer.py +496 -0
  54. packages/tmsi/build/lib/TMSiFileFormats/file_formats/poly5_to_edf_converter.py +236 -0
  55. packages/tmsi/build/lib/TMSiFileFormats/file_formats/xdf_file_writer.py +977 -0
  56. packages/tmsi/build/lib/TMSiFileFormats/file_readers/__init__.py +35 -0
  57. packages/tmsi/build/lib/TMSiFileFormats/file_readers/edf_reader.py +116 -0
  58. packages/tmsi/build/lib/TMSiFileFormats/file_readers/poly5reader.py +294 -0
  59. packages/tmsi/build/lib/TMSiFileFormats/file_readers/xdf_reader.py +229 -0
  60. packages/tmsi/build/lib/TMSiFileFormats/file_writer.py +102 -0
  61. packages/tmsi/build/lib/TMSiPlotters/__init__.py +2 -0
  62. packages/tmsi/build/lib/TMSiPlotters/gui/__init__.py +39 -0
  63. packages/tmsi/build/lib/TMSiPlotters/gui/_plotter_gui.py +234 -0
  64. packages/tmsi/build/lib/TMSiPlotters/gui/plotting_gui.py +440 -0
  65. packages/tmsi/build/lib/TMSiPlotters/plotters/__init__.py +44 -0
  66. packages/tmsi/build/lib/TMSiPlotters/plotters/hd_emg_plotter.py +446 -0
  67. packages/tmsi/build/lib/TMSiPlotters/plotters/impedance_plotter.py +589 -0
  68. packages/tmsi/build/lib/TMSiPlotters/plotters/signal_plotter.py +1326 -0
  69. packages/tmsi/build/lib/TMSiSDK/__init__.py +54 -0
  70. packages/tmsi/build/lib/TMSiSDK/device.py +588 -0
  71. packages/tmsi/build/lib/TMSiSDK/devices/__init__.py +34 -0
  72. packages/tmsi/build/lib/TMSiSDK/devices/saga/TMSi_Device_API.py +1764 -0
  73. packages/tmsi/build/lib/TMSiSDK/devices/saga/__init__.py +34 -0
  74. packages/tmsi/build/lib/TMSiSDK/devices/saga/saga_device.py +1366 -0
  75. packages/tmsi/build/lib/TMSiSDK/devices/saga/saga_types.py +520 -0
  76. packages/tmsi/build/lib/TMSiSDK/devices/saga/xml_saga_config.py +165 -0
  77. packages/tmsi/build/lib/TMSiSDK/error.py +95 -0
  78. packages/tmsi/build/lib/TMSiSDK/sample_data.py +63 -0
  79. packages/tmsi/build/lib/TMSiSDK/sample_data_server.py +99 -0
  80. packages/tmsi/build/lib/TMSiSDK/settings.py +45 -0
  81. packages/tmsi/build/lib/TMSiSDK/tmsi_device.py +111 -0
  82. packages/tmsi/build/lib/__init__.py +4 -0
  83. packages/tmsi/build/lib/apex_sdk/__init__.py +34 -0
  84. packages/tmsi/build/lib/apex_sdk/device/__init__.py +41 -0
  85. packages/tmsi/build/lib/apex_sdk/device/devices/apex/apex_API.py +1009 -0
  86. packages/tmsi/build/lib/apex_sdk/device/devices/apex/apex_API_enums.py +239 -0
  87. packages/tmsi/build/lib/apex_sdk/device/devices/apex/apex_API_structures.py +668 -0
  88. packages/tmsi/build/lib/apex_sdk/device/devices/apex/apex_device.py +1611 -0
  89. packages/tmsi/build/lib/apex_sdk/device/devices/apex/apex_dongle.py +38 -0
  90. packages/tmsi/build/lib/apex_sdk/device/devices/apex/apex_event_reader.py +57 -0
  91. packages/tmsi/build/lib/apex_sdk/device/devices/apex/apex_structures/apex_channel.py +44 -0
  92. packages/tmsi/build/lib/apex_sdk/device/devices/apex/apex_structures/apex_config.py +150 -0
  93. packages/tmsi/build/lib/apex_sdk/device/devices/apex/apex_structures/apex_const.py +36 -0
  94. packages/tmsi/build/lib/apex_sdk/device/devices/apex/apex_structures/apex_impedance_channel.py +48 -0
  95. packages/tmsi/build/lib/apex_sdk/device/devices/apex/apex_structures/apex_info.py +108 -0
  96. packages/tmsi/build/lib/apex_sdk/device/devices/apex/apex_structures/dongle_info.py +39 -0
  97. packages/tmsi/build/lib/apex_sdk/device/devices/apex/measurements/download_measurement.py +77 -0
  98. packages/tmsi/build/lib/apex_sdk/device/devices/apex/measurements/eeg_measurement.py +150 -0
  99. packages/tmsi/build/lib/apex_sdk/device/devices/apex/measurements/impedance_measurement.py +129 -0
  100. packages/tmsi/build/lib/apex_sdk/device/threads/conversion_thread.py +59 -0
  101. packages/tmsi/build/lib/apex_sdk/device/threads/sampling_thread.py +57 -0
  102. packages/tmsi/build/lib/apex_sdk/device/tmsi_channel.py +83 -0
  103. packages/tmsi/build/lib/apex_sdk/device/tmsi_device.py +201 -0
  104. packages/tmsi/build/lib/apex_sdk/device/tmsi_device_enums.py +103 -0
  105. packages/tmsi/build/lib/apex_sdk/device/tmsi_dongle.py +43 -0
  106. packages/tmsi/build/lib/apex_sdk/device/tmsi_event_reader.py +50 -0
  107. packages/tmsi/build/lib/apex_sdk/device/tmsi_measurement.py +118 -0
  108. packages/tmsi/build/lib/apex_sdk/sample_data_server/__init__.py +33 -0
  109. packages/tmsi/build/lib/apex_sdk/sample_data_server/event_data.py +44 -0
  110. packages/tmsi/build/lib/apex_sdk/sample_data_server/sample_data.py +50 -0
  111. packages/tmsi/build/lib/apex_sdk/sample_data_server/sample_data_server.py +136 -0
  112. packages/tmsi/build/lib/apex_sdk/tmsi_errors/error.py +126 -0
  113. packages/tmsi/build/lib/apex_sdk/tmsi_sdk.py +113 -0
  114. packages/tmsi/build/lib/apex_sdk/tmsi_utilities/apex/apex_structure_generator.py +134 -0
  115. packages/tmsi/build/lib/apex_sdk/tmsi_utilities/decorators.py +60 -0
  116. packages/tmsi/build/lib/apex_sdk/tmsi_utilities/logger_filter.py +42 -0
  117. packages/tmsi/build/lib/apex_sdk/tmsi_utilities/singleton.py +42 -0
  118. packages/tmsi/build/lib/apex_sdk/tmsi_utilities/support_functions.py +72 -0
  119. packages/tmsi/build/lib/apex_sdk/tmsi_utilities/tmsi_logger.py +98 -0
  120. py_neuromodulation/{helper.py → _write_example_dataset_helper.py} +1 -1
  121. py_neuromodulation/nm_EpochStream.py +2 -3
  122. py_neuromodulation/nm_IO.py +43 -70
  123. py_neuromodulation/nm_RMAP.py +308 -11
  124. py_neuromodulation/nm_analysis.py +1 -1
  125. py_neuromodulation/nm_artifacts.py +25 -0
  126. py_neuromodulation/nm_bispectra.py +64 -29
  127. py_neuromodulation/nm_bursts.py +44 -30
  128. py_neuromodulation/nm_coherence.py +2 -1
  129. py_neuromodulation/nm_features.py +4 -2
  130. py_neuromodulation/nm_filter.py +63 -32
  131. py_neuromodulation/nm_filter_preprocessing.py +91 -0
  132. py_neuromodulation/nm_fooof.py +47 -29
  133. py_neuromodulation/nm_mne_connectivity.py +1 -1
  134. py_neuromodulation/nm_normalization.py +50 -74
  135. py_neuromodulation/nm_oscillatory.py +151 -31
  136. py_neuromodulation/nm_plots.py +13 -10
  137. py_neuromodulation/nm_rereference.py +10 -8
  138. py_neuromodulation/nm_run_analysis.py +28 -13
  139. py_neuromodulation/nm_sharpwaves.py +103 -136
  140. py_neuromodulation/nm_stats.py +44 -30
  141. py_neuromodulation/nm_stream_abc.py +18 -10
  142. py_neuromodulation/nm_stream_offline.py +181 -40
  143. py_neuromodulation/utils/_logging.py +24 -0
  144. {py_neuromodulation-0.0.2.dist-info → py_neuromodulation-0.0.3.dist-info}/METADATA +182 -142
  145. py_neuromodulation-0.0.3.dist-info/RECORD +188 -0
  146. {py_neuromodulation-0.0.2.dist-info → py_neuromodulation-0.0.3.dist-info}/WHEEL +2 -1
  147. py_neuromodulation-0.0.3.dist-info/top_level.txt +5 -0
  148. tests/__init__.py +0 -0
  149. tests/conftest.py +117 -0
  150. tests/test_all_examples.py +10 -0
  151. tests/test_all_features.py +63 -0
  152. tests/test_bispectra.py +70 -0
  153. tests/test_bursts.py +105 -0
  154. tests/test_feature_sampling_rates.py +143 -0
  155. tests/test_fooof.py +16 -0
  156. tests/test_initalization_offline_stream.py +41 -0
  157. tests/test_multiprocessing.py +58 -0
  158. tests/test_nan_values.py +29 -0
  159. tests/test_nm_filter.py +95 -0
  160. tests/test_nm_resample.py +63 -0
  161. tests/test_normalization_settings.py +146 -0
  162. tests/test_notch_filter.py +31 -0
  163. tests/test_osc_features.py +424 -0
  164. tests/test_preprocessing_filter.py +151 -0
  165. tests/test_rereference.py +171 -0
  166. tests/test_sampling.py +57 -0
  167. tests/test_settings_change_after_init.py +76 -0
  168. tests/test_sharpwave.py +165 -0
  169. tests/test_target_channel_add.py +100 -0
  170. tests/test_timing.py +80 -0
  171. py_neuromodulation/data/README +0 -6
  172. py_neuromodulation/data/dataset_description.json +0 -8
  173. py_neuromodulation/data/derivatives/sub-testsub_ses-EphysMedOff_task-gripforce_run-0/MOV_aligned_features_ch_ECOG_RIGHT_0_all.png +0 -0
  174. py_neuromodulation/data/derivatives/sub-testsub_ses-EphysMedOff_task-gripforce_run-0/all_feature_plt.pdf +0 -0
  175. py_neuromodulation/data/derivatives/sub-testsub_ses-EphysMedOff_task-gripforce_run-0/sub-testsub_ses-EphysMedOff_task-gripforce_run-0_FEATURES.csv +0 -182
  176. py_neuromodulation/data/derivatives/sub-testsub_ses-EphysMedOff_task-gripforce_run-0/sub-testsub_ses-EphysMedOff_task-gripforce_run-0_LM_ML_RES.p +0 -0
  177. py_neuromodulation/data/derivatives/sub-testsub_ses-EphysMedOff_task-gripforce_run-0/sub-testsub_ses-EphysMedOff_task-gripforce_run-0_SETTINGS.json +0 -273
  178. py_neuromodulation/data/derivatives/sub-testsub_ses-EphysMedOff_task-gripforce_run-0/sub-testsub_ses-EphysMedOff_task-gripforce_run-0_SIDECAR.json +0 -6
  179. py_neuromodulation/data/derivatives/sub-testsub_ses-EphysMedOff_task-gripforce_run-0/sub-testsub_ses-EphysMedOff_task-gripforce_run-0_decoding_performance.png +0 -0
  180. py_neuromodulation/data/derivatives/sub-testsub_ses-EphysMedOff_task-gripforce_run-0/sub-testsub_ses-EphysMedOff_task-gripforce_run-0_nm_channels.csv +0 -11
  181. py_neuromodulation/data/participants.json +0 -32
  182. py_neuromodulation/data/participants.tsv +0 -2
  183. py_neuromodulation/data/sub-testsub/ses-EphysMedOff/ieeg/sub-testsub_ses-EphysMedOff_space-mni_coordsystem.json +0 -5
  184. py_neuromodulation/data/sub-testsub/ses-EphysMedOff/ieeg/sub-testsub_ses-EphysMedOff_space-mni_electrodes.tsv +0 -11
  185. py_neuromodulation/data/sub-testsub/ses-EphysMedOff/ieeg/sub-testsub_ses-EphysMedOff_task-gripforce_run-0_channels.tsv +0 -11
  186. py_neuromodulation/data/sub-testsub/ses-EphysMedOff/ieeg/sub-testsub_ses-EphysMedOff_task-gripforce_run-0_ieeg.eeg +0 -0
  187. py_neuromodulation/data/sub-testsub/ses-EphysMedOff/ieeg/sub-testsub_ses-EphysMedOff_task-gripforce_run-0_ieeg.json +0 -18
  188. py_neuromodulation/data/sub-testsub/ses-EphysMedOff/ieeg/sub-testsub_ses-EphysMedOff_task-gripforce_run-0_ieeg.vhdr +0 -35
  189. py_neuromodulation/data/sub-testsub/ses-EphysMedOff/ieeg/sub-testsub_ses-EphysMedOff_task-gripforce_run-0_ieeg.vmrk +0 -13
  190. py_neuromodulation/data/sub-testsub/ses-EphysMedOff/sub-testsub_ses-EphysMedOff_scans.tsv +0 -2
  191. py_neuromodulation/grid_cortex.tsv +0 -40
  192. py_neuromodulation/grid_subcortex.tsv +0 -1429
  193. py_neuromodulation/nm_settings.json +0 -290
  194. py_neuromodulation/plots/STN_surf.mat +0 -0
  195. py_neuromodulation/plots/Vertices.mat +0 -0
  196. py_neuromodulation/plots/faces.mat +0 -0
  197. py_neuromodulation/plots/grid.mat +0 -0
  198. py_neuromodulation/py_neuromodulation.egg-info/PKG-INFO +0 -104
  199. py_neuromodulation/py_neuromodulation.egg-info/dependency_links.txt +0 -1
  200. py_neuromodulation/py_neuromodulation.egg-info/requires.txt +0 -26
  201. py_neuromodulation/py_neuromodulation.egg-info/top_level.txt +0 -1
  202. py_neuromodulation-0.0.2.dist-info/RECORD +0 -73
  203. /py_neuromodulation/{py_neuromodulation.egg-info/SOURCES.txt → utils/__init__.py} +0 -0
  204. {py_neuromodulation-0.0.2.dist-info → py_neuromodulation-0.0.3.dist-info}/LICENSE +0 -0
@@ -0,0 +1,68 @@
1
+ """
2
+ ===================
3
+ Adding New Features
4
+ ===================
5
+
6
+ """
7
+
8
+ import py_neuromodulation as pn
9
+ from py_neuromodulation import nm_features_abc
10
+ import numpy as np
11
+ from typing import Iterable
12
+
13
+ # %%
14
+ # In this example we will demonstrate how a new feature can be added to the existing feature pipeline.
15
+ # This can be done simply by adding an object of the inherited :class:`~nm_features_abc.Feature`
16
+ # class to the stream `stream.run_analysis.features.features` list.
17
+
18
+ data = np.random.random([1, 1000])
19
+
20
+ stream = pn.Stream(sfreq=1000, data=data, sampling_rate_features_hz=10, verbose=False,)
21
+
22
+ class NewFeature(nm_features_abc.Feature):
23
+
24
+ def __init__(
25
+ self, settings: dict, ch_names: Iterable[str], sfreq: float
26
+ ) -> None:
27
+ self.s = settings
28
+ self.ch_names = ch_names
29
+
30
+ def calc_feature(self, data: np.array, features_compute: dict) -> dict:
31
+ for ch_idx, ch in enumerate(self.ch_names):
32
+ features_compute[f"new_feature_{ch}"] = np.mean(data[ch_idx, :])
33
+
34
+ return features_compute
35
+
36
+ def test_settings():
37
+ pass
38
+
39
+ newFeature = NewFeature(stream.settings, list(stream.nm_channels["name"]), stream.sfreq)
40
+ stream.run_analysis.features.features.append(newFeature)
41
+
42
+ features = stream.run_analysis.process(data)
43
+ feature_name = f"new_feature_{stream.nm_channels['name'][0]}"
44
+
45
+ print(f"{feature_name}: {features[feature_name]}")
46
+
47
+ # %%
48
+ # This example shows a simple newly instantiated feature class called `NewFeature`.
49
+ # The instantiated `newFeature` object could then be added to the existing feature list by calling
50
+ # `stream.run_analysis.features.features.append(newFeature)`.
51
+ #
52
+ # To permanently add a novel feature, the new feature class needs to be added to
53
+ # the :class:`~nm_features` class. This can be done by inserting the feature_name in
54
+ # in the :class:`~nm_features.Feature` init function:
55
+ #
56
+ # .. code-block:: python
57
+ #
58
+ # for feature in s["features"]:
59
+ # if s["features"][feature] is False:
60
+ # continue
61
+ # match feature:
62
+ # case "new_feature":
63
+ # FeatureClass = nm_new_feature.NewFeature
64
+ # ...
65
+ #
66
+ # The new feature class can then be used by setting the `settings["feature"]["new_feature"]` value in the
67
+ # settings to true.
68
+ #
@@ -0,0 +1,233 @@
1
+ """
2
+ ECoG Movement decoding example
3
+ ==============================
4
+
5
+ """
6
+
7
+ # %%
8
+ # This example notebook read openly accessible data from the publication
9
+ # *Electrocorticography is superior to subthalamic local field potentials
10
+ # for movement decoding in Parkinson’s disease*
11
+ # (`Merk et al. 2022 <https://elifesciences.org/articles/75126>_`).
12
+ # The dataset is available `here <https://doi.org/10.7910/DVN/IO2FLM>`_.
13
+ #
14
+ # For simplicity one example subject is automatically shipped within
15
+ # this repo at the *py_neuromodulation/data* folder, stored in
16
+ # `iEEG BIDS <https://www.nature.com/articles/s41597-019-0105-7>`_ format.
17
+
18
+ # %%
19
+ from sklearn import metrics, model_selection, linear_model
20
+ import matplotlib.pyplot as plt
21
+
22
+ import py_neuromodulation as nm
23
+ from py_neuromodulation import (
24
+ nm_analysis,
25
+ nm_decode,
26
+ nm_define_nmchannels,
27
+ nm_IO,
28
+ nm_plots,
29
+ nm_settings,
30
+ )
31
+
32
+ # %%
33
+ # Let's read the example using `mne_bids <https://mne.tools/mne-bids/stable/index.html>`_.
34
+ # The resulting raw object is of type `mne.RawArray <https://mne.tools/stable/generated/mne.io.RawArray.html>`_.
35
+ # We can use the properties such as sampling frequency, channel names, channel types all from the mne array and create the *nm_channels* DataFrame:
36
+
37
+ RUN_NAME, PATH_RUN, PATH_BIDS, PATH_OUT, datatype = nm_IO.get_paths_example_data()
38
+
39
+ (
40
+ raw,
41
+ data,
42
+ sfreq,
43
+ line_noise,
44
+ coord_list,
45
+ coord_names,
46
+ ) = nm_IO.read_BIDS_data(
47
+ PATH_RUN=PATH_RUN, BIDS_PATH=PATH_BIDS, datatype=datatype
48
+ )
49
+
50
+ nm_channels = nm_define_nmchannels.set_channels(
51
+ ch_names=raw.ch_names,
52
+ ch_types=raw.get_channel_types(),
53
+ reference="default",
54
+ bads=raw.info["bads"],
55
+ new_names="default",
56
+ used_types=("ecog", "dbs", "seeg"),
57
+ target_keywords=["MOV_RIGHT"],
58
+ )
59
+
60
+ nm_channels
61
+
62
+ # %%
63
+ # This example contains the grip force movement traces, we'll use the *MOV_RIGHT* channel as a decoding target channel.
64
+ # Let's check some of the raw feature and time series traces:
65
+
66
+ plt.figure(figsize=(12, 4), dpi=300)
67
+ plt.subplot(121)
68
+ plt.plot(raw.times, data[-1, :])
69
+ plt.xlabel("Time [s]")
70
+ plt.ylabel("a.u.")
71
+ plt.title("Movement label")
72
+ plt.xlim(0, 20)
73
+
74
+ plt.subplot(122)
75
+ for idx, ch_name in enumerate(nm_channels.query("used == 1").name):
76
+ plt.plot(raw.times, data[idx, :] + idx * 300, label=ch_name)
77
+ plt.legend(bbox_to_anchor=(1, 0.5), loc="center left")
78
+ plt.title("ECoG + STN-LFP time series")
79
+ plt.xlabel("Time [s]")
80
+ plt.ylabel("Voltage a.u.")
81
+ plt.xlim(0, 20)
82
+
83
+ # %%
84
+ settings = nm_settings.get_default_settings()
85
+ settings = nm_settings.set_settings_fast_compute(settings)
86
+
87
+ settings["features"]["fft"] = True
88
+ settings["features"]["bursts"] = False
89
+ settings["features"]["sharpwave_analysis"] = False
90
+ settings["features"]["coherence"] = False # True
91
+ settings["coherence"]["channels"] = [["LFP_RIGHT_0", "ECOG_RIGHT_0"]]
92
+ settings["coherence"]["frequency_bands"] = ["high beta", "low gamma"]
93
+ settings["sharpwave_analysis_settings"]["estimator"]["mean"] = []
94
+ for sw_feature in list(
95
+ settings["sharpwave_analysis_settings"]["sharpwave_features"].keys()
96
+ ):
97
+ settings["sharpwave_analysis_settings"]["sharpwave_features"][
98
+ sw_feature
99
+ ] = True
100
+ settings["sharpwave_analysis_settings"]["estimator"]["mean"].append(
101
+ sw_feature
102
+ )
103
+
104
+ # %%
105
+ stream = nm.Stream(
106
+ sfreq=sfreq,
107
+ nm_channels=nm_channels,
108
+ settings=settings,
109
+ line_noise=line_noise,
110
+ coord_list=coord_list,
111
+ coord_names=coord_names,
112
+ verbose=True,
113
+ )
114
+
115
+ # %%
116
+ features = stream.run(
117
+ data=data,
118
+ out_path_root=PATH_OUT,
119
+ folder_name=RUN_NAME,
120
+ )
121
+
122
+ # %%
123
+ # Feature Analysis Movement
124
+ # -------------------------
125
+ # The obtained performances can now be read and visualized using the :class:`nm_analysis.Feature_Reader`.
126
+
127
+ # initialize analyzer
128
+ feature_reader = nm_analysis.Feature_Reader(
129
+ feature_dir=PATH_OUT,
130
+ feature_file=RUN_NAME,
131
+ )
132
+ feature_reader.label_name = "MOV_RIGHT"
133
+ feature_reader.label = feature_reader.feature_arr["MOV_RIGHT"]
134
+
135
+ # %%
136
+ feature_reader.feature_arr.iloc[100:108, -6:]
137
+
138
+ # %%
139
+ print(feature_reader.feature_arr.shape)
140
+
141
+ # %%
142
+ feature_reader._get_target_ch()
143
+
144
+ # %%
145
+ feature_reader.plot_target_averaged_channel(
146
+ ch="ECOG_RIGHT_0",
147
+ list_feature_keywords=None,
148
+ epoch_len=4,
149
+ threshold=0.5,
150
+ ytick_labelsize=7,
151
+ figsize_x=12,
152
+ figsize_y=12,
153
+ )
154
+
155
+ # %%
156
+ feature_reader.plot_all_features(
157
+ ytick_labelsize=3,
158
+ clim_low=-2,
159
+ clim_high=2,
160
+ ch_used="ECOG_RIGHT_0",
161
+ time_limit_low_s=0,
162
+ time_limit_high_s=20,
163
+ normalize=True,
164
+ save=True,
165
+ )
166
+
167
+ # %%
168
+ nm_plots.plot_corr_matrix(
169
+ feature=feature_reader.feature_arr.filter(regex="ECOG_RIGHT_0"),
170
+ ch_name="ECOG_RIGHT_0-avgref",
171
+ feature_names=feature_reader.feature_arr.filter(
172
+ regex="ECOG_RIGHT_0-avgref"
173
+ ).columns,
174
+ feature_file=feature_reader.feature_file,
175
+ show_plot=True,
176
+ figsize=(15, 15),
177
+ )
178
+
179
+ # %%
180
+ # Decoding
181
+ # --------
182
+ #
183
+ # The main focus of the *py_neuromodulation* pipeline is feature estimation.
184
+ # Nevertheless, the user can also use the pipeline for machine learning decoding.
185
+ # It can be used for regression and classification problems and also dimensionality reduction such as PCA and CCA.
186
+ #
187
+ # Here, we show an example using the XGBOOST classifier. The used labels came from a continuous grip force movement target, named "MOV_RIGHT".
188
+ #
189
+ # First we initialize the :class:`~nm_decode.Decoder` class, which the specified *validation method*, here being a simple 3-fold cross validation,
190
+ # the evaluation metric, used machine learning model, and the channels we want to evaluate performances for.
191
+ #
192
+ # There are many more implemented methods, but we will here limit it to the ones presented.
193
+
194
+ model = linear_model.LinearRegression()
195
+
196
+ feature_reader.decoder = nm_decode.Decoder(
197
+ features=feature_reader.feature_arr,
198
+ label=feature_reader.label,
199
+ label_name=feature_reader.label_name,
200
+ used_chs=feature_reader.used_chs,
201
+ model=model,
202
+ eval_method=metrics.r2_score,
203
+ cv_method=model_selection.KFold(n_splits=3, shuffle=True),
204
+ )
205
+
206
+ # %%
207
+ performances = feature_reader.run_ML_model(
208
+ estimate_channels=True,
209
+ estimate_gridpoints=False,
210
+ estimate_all_channels_combined=True,
211
+ save_results=True,
212
+ )
213
+
214
+ # %%
215
+ # The performances are a dictionary that can be transformed into a DataFrame:
216
+
217
+ df_per = feature_reader.get_dataframe_performances(performances)
218
+
219
+ df_per
220
+
221
+ # %%
222
+ ax = nm_plots.plot_df_subjects(
223
+ df_per,
224
+ x_col="sub",
225
+ y_col="performance_test",
226
+ hue="ch_type",
227
+ PATH_SAVE=PATH_OUT / RUN_NAME / (RUN_NAME + "_decoding_performance.png"),
228
+ figsize_tuple=(8, 5)
229
+ )
230
+ ax.set_ylabel(r"$R^2$ Correlation")
231
+ ax.set_xlabel("Subject 000")
232
+ ax.set_title("Performance comparison Movement decoding")
233
+ plt.tight_layout()
@@ -0,0 +1,219 @@
1
+ """
2
+ Analyzing temporal features
3
+ ===========================
4
+
5
+ """
6
+
7
+ # %%
8
+ # Time series data can be characterized using oscillatory components, but assumptions of sinusoidality are for real data rarely fulfilled.
9
+ # See *"Brain Oscillations and the Importance of Waveform Shape"* `Cole et al 2017 <https://doi.org/10.1016/j.tics.2016.12.008>`_ for a great motivation.
10
+ # We implemented here temporal characteristics based on individual trough and peak relations,
11
+ # based on the :meth:~`scipy.signal.find_peaks` method. The function parameter *distance* can be specified in the *nm_settings.json*.
12
+ # Temporal features can be calculated twice for troughs and peaks. In the settings, this can be specified by setting *estimate* to true
13
+ # in *detect_troughs* and/or *detect_peaks*. A statistical measure (e.g. mean, max, median, var) can be defined as a resulting feature from the peak and
14
+ # trough estimates using the *apply_estimator_between_peaks_and_troughs* setting.
15
+ #
16
+ # In py_neuromodulation the following characteristics are implemented:
17
+ #
18
+ # .. note::
19
+ # The nomenclature is written here for sharpwave troughs, but detection of peak characteristics can be computed in the same way.
20
+ #
21
+ # - prominence:
22
+ # :math:`V_{prominence} = |\frac{V_{peak-left} + V_{peak-right}}{2}| - V_{trough}`
23
+ # - sharpness:
24
+ # :math:`V_{sharpnesss} = \frac{(V_{trough} - V_{trough-5 ms}) + (V_{trough} - V_{trough+5 ms})}{2}`
25
+ # - rise and decay rise time
26
+ # - rise and decay steepness
27
+ # - width (between left and right peaks)
28
+ # - interval (between troughs)
29
+ #
30
+ # Additionally, different filter ranges can be parametrized using the *filter_ranges_hz* setting.
31
+ # Filtering is necessary to remove high frequent signal fluctuations, but limits also the true estimation of sharpness and prominence due to signal smoothing.
32
+
33
+ import seaborn as sb
34
+ from matplotlib import pyplot as plt
35
+ from scipy import signal
36
+ import numpy as np
37
+
38
+ import py_neuromodulation as nm
39
+ from py_neuromodulation import (
40
+ nm_define_nmchannels,
41
+ nm_IO,
42
+ nm_settings,
43
+ )
44
+
45
+
46
+ # %%
47
+ # We will first read the example ECoG data and plot the identified features on the filtered time series.
48
+
49
+ RUN_NAME, PATH_RUN, PATH_BIDS, PATH_OUT, datatype = nm_IO.get_paths_example_data()
50
+
51
+ (
52
+ raw,
53
+ data,
54
+ sfreq,
55
+ line_noise,
56
+ coord_list,
57
+ coord_names,
58
+ ) = nm_IO.read_BIDS_data(
59
+ PATH_RUN=PATH_RUN,
60
+ BIDS_PATH=PATH_BIDS, datatype=datatype
61
+ )
62
+
63
+ # %%
64
+ settings = nm_settings.get_default_settings()
65
+ settings = nm_settings.set_settings_fast_compute(settings)
66
+
67
+ settings["features"]["fft"] = True
68
+ settings["features"]["bursts"] = False
69
+ settings["features"]["sharpwave_analysis"] = True
70
+ settings["features"]["coherence"] = False
71
+
72
+ settings["sharpwave_analysis_settings"]["estimator"]["mean"] = []
73
+ for sw_feature in list(
74
+ settings["sharpwave_analysis_settings"]["sharpwave_features"].keys()
75
+ ):
76
+ settings["sharpwave_analysis_settings"]["sharpwave_features"][sw_feature] = True
77
+ settings["sharpwave_analysis_settings"]["estimator"]["mean"].append(sw_feature)
78
+
79
+ nm_channels = nm_define_nmchannels.set_channels(
80
+ ch_names=raw.ch_names,
81
+ ch_types=raw.get_channel_types(),
82
+ reference="default",
83
+ bads=raw.info["bads"],
84
+ new_names="default",
85
+ used_types=("ecog", "dbs", "seeg"),
86
+ target_keywords=["MOV_RIGHT"]
87
+ )
88
+
89
+ stream = nm.Stream(
90
+ sfreq=sfreq,
91
+ nm_channels=nm_channels,
92
+ settings=settings,
93
+ line_noise=line_noise,
94
+ coord_list=coord_list,
95
+ coord_names=coord_names,
96
+ verbose=False,
97
+ )
98
+ sw_analyzer = stream.run_analysis.features.features[1]
99
+
100
+ # %%
101
+ # The plotted example time series, visualized on a short time scale, shows the relation of identified peaks, troughs, and estimated features:
102
+ data_plt = data[5, 1000:4000]
103
+
104
+
105
+ sw_analyzer._initialize_sw_features()
106
+ filtered_dat = np.convolve(
107
+ data_plt,
108
+ sw_analyzer.list_filter[0][1],
109
+ mode="same"
110
+ )
111
+ #filtered_dat = filtered_dat[500:-500]
112
+
113
+ troughs = signal.find_peaks(-filtered_dat, distance=10)[0]
114
+ peaks = signal.find_peaks(filtered_dat, distance=5)[0]
115
+
116
+ sw_analyzer.data_process_sw = filtered_dat
117
+ sw_analyzer.analyze_waveform()
118
+
119
+ WIDTH = BAR_WIDTH = 4
120
+ BAR_OFFSET = 50
121
+ OFFSET_TIME_SERIES = -100
122
+ SCALE_TIMESERIES = 1
123
+
124
+ hue_colors = sb.color_palette("viridis_r", 6)
125
+
126
+ plt.figure(figsize=(5, 3), dpi=300)
127
+ plt.plot(OFFSET_TIME_SERIES + data_plt, color="gray", linewidth=0.5, alpha=0.5, label="original ECoG data")
128
+ plt.plot(OFFSET_TIME_SERIES + filtered_dat*SCALE_TIMESERIES, linewidth=0.5, color="black", label="[5-30]Hz filtered data")
129
+
130
+ plt.plot(peaks, OFFSET_TIME_SERIES + filtered_dat[peaks]*SCALE_TIMESERIES, "x", label="peaks",markersize=3, color="darkgray")
131
+ plt.plot(troughs, OFFSET_TIME_SERIES + filtered_dat[troughs]*SCALE_TIMESERIES, "x", label="troughs", markersize=3, color="lightgray")
132
+
133
+ plt.bar(troughs+BAR_WIDTH, np.array(sw_analyzer.prominence)*4, bottom=BAR_OFFSET, width=WIDTH, color=hue_colors[0], label="Prominence", alpha=0.5)
134
+ plt.bar(troughs+BAR_WIDTH*2, -np.array(sw_analyzer.sharpness)*6, bottom=BAR_OFFSET, width=WIDTH, color=hue_colors[1], label="Sharpness", alpha=0.5)
135
+ plt.bar(troughs+BAR_WIDTH*3, np.array(sw_analyzer.interval)*5, bottom=BAR_OFFSET, width=WIDTH, color=hue_colors[2], label="Interval", alpha=0.5)
136
+ plt.bar(troughs+BAR_WIDTH*4, np.array(sw_analyzer.rise_time)*5, bottom=BAR_OFFSET, width=WIDTH, color=hue_colors[3], label="Rise time", alpha=0.5)
137
+
138
+ plt.xticks(np.arange(0, data_plt.shape[0], 200), np.round(np.arange(0, int(data_plt.shape[0]/1000), 0.2), 2))
139
+ plt.xlabel("Time [s]")
140
+ plt.title("Temporal waveform shape features")
141
+ plt.legend(loc='center left', bbox_to_anchor=(1, 0.5))
142
+ plt.ylim(-550, 700)
143
+ plt.xlim(0, 200)
144
+ plt.ylabel("a.u.")
145
+ plt.tight_layout()
146
+
147
+ # %%
148
+ # See in the following example a time series example, that is aligned to movement. With movement onset the prominence, sharpness, and interval features are reduced:
149
+
150
+ plt.figure(figsize=(8, 5), dpi=300)
151
+ plt.plot(OFFSET_TIME_SERIES + data_plt, color="gray", linewidth=0.5, alpha=0.5, label="original ECoG data")
152
+ plt.plot(OFFSET_TIME_SERIES + filtered_dat*SCALE_TIMESERIES, linewidth=0.5, color="black", label="[5-30]Hz filtered data")
153
+
154
+ plt.plot(peaks, OFFSET_TIME_SERIES + filtered_dat[peaks]*SCALE_TIMESERIES, "x", label="peaks",markersize=3, color="darkgray")
155
+ plt.plot(troughs, OFFSET_TIME_SERIES + filtered_dat[troughs]*SCALE_TIMESERIES, "x", label="troughs", markersize=3, color="lightgray")
156
+
157
+ plt.bar(troughs+BAR_WIDTH, np.array(sw_analyzer.prominence)*4, bottom=BAR_OFFSET, width=WIDTH, color=hue_colors[0], label="Prominence", alpha=0.5)
158
+ plt.bar(troughs+BAR_WIDTH*2, -np.array(sw_analyzer.sharpness)*6, bottom=BAR_OFFSET, width=WIDTH, color=hue_colors[1], label="Sharpness", alpha=0.5)
159
+ plt.bar(troughs+BAR_WIDTH*3, np.array(sw_analyzer.interval)*5, bottom=BAR_OFFSET, width=WIDTH, color=hue_colors[2], label="Interval", alpha=0.5)
160
+ plt.bar(troughs+BAR_WIDTH*4, np.array(sw_analyzer.rise_time)*5, bottom=BAR_OFFSET, width=WIDTH, color=hue_colors[3], label="Rise time", alpha=0.5)
161
+
162
+ plt.axvline(x=1500, label="Movement start", color="red")
163
+
164
+ #plt.xticks(np.arange(0, 2000, 200), np.round(np.arange(0, 2, 0.2), 2))
165
+ plt.xticks(np.arange(0, data_plt.shape[0], 200), np.round(np.arange(0, int(data_plt.shape[0]/1000), 0.2), 2))
166
+ plt.xlabel("Time [s]")
167
+ plt.title("Temporal waveform shape features")
168
+ plt.legend(loc='center left', bbox_to_anchor=(1, 0.5))
169
+ plt.ylim(-450, 400)
170
+ plt.ylabel("a.u.")
171
+ plt.tight_layout()
172
+
173
+ # %%
174
+ # In the *sharpwave_analysis_settings* the *estimator* keyword further specifies which statistic is computed based on the individual
175
+ # features in one batch. The "global" setting *segment_length_features_ms* specifies the time duration for feature computation.
176
+ # Since there can be a different number of identified waveform shape features for different batches (i.e. different number of peaks/troughs),
177
+ # taking a statistical measure (e.g. the maximum or mean) will be necessary for feature comparison.
178
+
179
+ # %%
180
+ # Example time series computation for movement decoding
181
+ # -----------------------------------------------------
182
+ # We will now read the ECoG example/data and investigate if samples differ across movement states. Therefore we compute features and enable the default *sharpwave* features.
183
+
184
+ settings = nm_settings.get_default_settings()
185
+ settings = nm_settings.reset_settings(settings)
186
+ settings["features"]["sharpwave_analysis"] = True
187
+ settings["sharpwave_analysis_settings"]["interval"] = False
188
+ settings["sharpwave_analysis_settings"]["filter_ranges"] = [[5, 80]]
189
+
190
+ nm_channels["used"] = 0 # set only two ECoG channels for faster computation to true
191
+ nm_channels.loc[[3, 8], "used"] = 1
192
+
193
+ stream = nm.Stream(
194
+ sfreq=sfreq,
195
+ nm_channels=nm_channels,
196
+ settings=settings,
197
+ line_noise=line_noise,
198
+ coord_list=coord_list,
199
+ coord_names=coord_names,
200
+ verbose=True,
201
+ )
202
+
203
+ df_features = stream.run(data=data[:, :30000])
204
+
205
+ # %%
206
+ # We can then plot two exemplary features, prominence and interval, and see that the movement amplitude can be clustered with those two features alone:
207
+
208
+ plt.figure(figsize=(5, 3), dpi=300)
209
+ plt.scatter(
210
+ df_features["ECOG_RIGHT_0-avgref_Sharpwave_Max_prominence_range_5_80"],
211
+ df_features["ECOG_RIGHT_5-avgref_Sharpwave_Mean_interval_range_5_80"],
212
+ c=df_features["MOV_RIGHT"], alpha=0.8, s=30
213
+ )
214
+ cbar = plt.colorbar()
215
+ cbar.set_label("Movement amplitude")
216
+ plt.xlabel("Prominence a.u.")
217
+ plt.ylabel("Interval a.u.")
218
+ plt.title("Temporal features predict movement amplitude")
219
+ plt.tight_layout()
@@ -0,0 +1,97 @@
1
+ """
2
+ Real-time feature estimation
3
+ ============================
4
+
5
+ """
6
+
7
+ # %%
8
+ # Implementation of individual nm_streams
9
+ # ---------------------------------------
10
+ #
11
+ # *py_neuromodulation* was optimized for computation of real-time data streams.
12
+ # There are however center -and lab specific hardware acquisition systems. Therefore, each experiment requires modules to interact with hardware platforms
13
+ # which periodically acquire data.
14
+ #
15
+ # Given the raw data, data can be analyzed using *py_neuromodulation*. Preprocessing methods, such as re-referencing and normalization,
16
+ # feature computation and decoding can be performed then in real-time.
17
+ #
18
+ # For online as well as as offline analysis, the :class:`~nm_stream_abc` class needs to be instantiated.
19
+ # Here the `nm_settings` and `nm_channels` are required to be defined.
20
+ # Previously for the offline analysis, an offline :class:`~nm_generator` object was defined that periodically yielded data.
21
+ # For online data, the :meth:`~nm_stream_abc.run` function therefore needs to be overwritten, which first acquires data and then calls
22
+ # the :meth:`~nm_run_analysis.process` function.
23
+ #
24
+ # The following illustrates in pseudo-code how such a stream could be initialized:
25
+ #
26
+ # .. code-block:: python
27
+ #
28
+ # from py_neuromodulation import nm_stream_abc
29
+ #
30
+ # class MyStream(nm_stream_abc):
31
+ # def __init__(self, settings, channels):
32
+ # super().__init__(settings, channels)
33
+ #
34
+ # def run(self):
35
+ # features_ = []
36
+ # while True:
37
+ # data = self.acquire_data()
38
+ # features_.append(self.run_analysis.process(data))
39
+ # # potentially use machine learning model for decoding
40
+ #
41
+ #
42
+ # Computation time examples
43
+ # -------------------------
44
+ #
45
+ # The following example calculates for six channels, CAR re-referencing, z-score normalization and FFT features results the following computation time:
46
+
47
+ # %%
48
+ import py_neuromodulation as pn
49
+ import numpy as np
50
+ import timeit
51
+
52
+ def get_fast_compute_settings():
53
+ settings = pn.nm_settings.get_default_settings()
54
+ settings = pn.nm_settings.reset_settings(settings)
55
+ settings = pn.nm_settings.set_settings_fast_compute(settings)
56
+ settings["preprocessing"] = ["re_referencing", "notch_filter"]
57
+ settings["features"]["fft"] = True
58
+ settings["postprocessing"]["feature_normalization"] = True
59
+ return settings
60
+
61
+ data = np.random.random([1, 1000])
62
+
63
+ print("FFT Features, CAR re-referencing, z-score normalization")
64
+ print()
65
+ print("Computation time for single ECoG channel: ")
66
+ stream = pn.Stream(sfreq=1000, data=data, sampling_rate_features_hz=10, verbose=False, settings=get_fast_compute_settings())
67
+ print(f"{np.round(timeit.timeit(lambda: stream.run_analysis.process(data), number=100)/100, 3)} s")
68
+
69
+ print("Computation time for 6 ECoG channels: ")
70
+ data = np.random.random([6, 1000])
71
+ stream = pn.Stream(sfreq=500, data=data, sampling_rate_features_hz=10, verbose=False, settings=get_fast_compute_settings())
72
+ print(f"{np.round(timeit.timeit(lambda: stream.run_analysis.process(data), number=100)/100, 3)} s")
73
+
74
+ print("\nFFT Features & Temporal Waveform Shape & Hjorth & Bursts, CAR re-referencing, z-score normalization")
75
+ print("Computation time for single ECoG channel: ")
76
+ data = np.random.random([1, 1000])
77
+ stream = pn.Stream(sfreq=1000, data=data, sampling_rate_features_hz=10, verbose=False)
78
+ print(f"{np.round(timeit.timeit(lambda: stream.run_analysis.process(data), number=10)/10, 3)} s")
79
+
80
+
81
+ # %%
82
+ # Those results show that the computation time for a typical pipeline (FFT, re-referencing, notch-filtering, feature normalization)
83
+ # is well below 10 ms, which is fast enough for real-time analysis with feature sampling rates below 100 Hz.
84
+ # Computation of more complex features could still result in feature sampling rates of more than 30 Hz.
85
+ #
86
+ # Real-time movement decoding using the TMSi-SAGA amplifier
87
+ # ---------------------------------------------------------
88
+ #
89
+ # In the following example, we will show how we setup a real-time movement decoding experiment using the TMSi-SAGA amplifier.
90
+ # First, we relied on different software modules for data streaming and visualization.
91
+ # `LabStreamingLayer <https://labstreaminglayer.org>`_ allows for real-time data streaming and synchronization across multiple devices.
92
+ # We used `timeflux <https://timeflux.io>`_ for real-time data visualization of features, decoded output.
93
+ # For raw data visualization we used `Brain Streaming Layer <https://fcbg-hnp-meeg.github.io/bsl/dev/index.html>`_.
94
+ #
95
+ # The code for real-time movement decoding is added in the GitHub branch `realtime_decoding <https://github.com/neuromodulation/py_neuromodulation/tree/realtime_decoding>`_.
96
+ # Here we relied on the `TMSI SAGA Python interface <https://gitlab.com/tmsi/tmsi-python-interface>`_.
97
+ #
@@ -0,0 +1,64 @@
1
+ """
2
+ R-Map computation
3
+ =================
4
+
5
+ """
6
+ # %%
7
+ # sphinx_gallery_thumbnail_path = '_static/RMAP_figure.png'
8
+
9
+ # %%
10
+ # Across patient decoding using R-Map optimal connectivity
11
+ # --------------------------------------------------------
12
+ #
13
+ # ECoG electrode placement is commonly very heterogeneous across patients and cohorts.
14
+ # To still facilitate approaches that are able to perform decoding applications without patient individual training,
15
+ # two across-patient decoding approaches were previously investigated for movement decoding:
16
+ #
17
+ #
18
+ # * grid-point decoding
19
+ # * optimal connectivity channel decoding
20
+ #
21
+ #
22
+ # First, the grid-point decoding approach relies on definition of a cortical or subcortical grid.
23
+ # Data from individual grid points is then interpolated onto those common grid points.
24
+ # The approach was also explained in the :doc:`plot_4_example_gridPointProjection` notebook.
25
+ #
26
+ # .. image:: ../_static/RMAP_figure.png
27
+ # :alt: R-Map and grid point approach for decoding without patient-individual training
28
+ #
29
+ # The R-Map decoding approach relies on the other hand on computation of whole brain connectivity. The electrode MNI space locations need to be known,
30
+ # then the following steps can be performed for decoding without patient individual training:
31
+ #
32
+ # #. Using the `wjn_toolbox <https://github.com/neuromodulation/wjn_toolbox>`_ *wjn_specrical_roi* function, the MNI coordinates can be transformed into NIFTI (.nii) files, containing the electrode contact region of interest (ROI):
33
+ #
34
+ # .. code-block:: python
35
+ #
36
+ # wjn_spherical_roi(roiname, mni, 4)
37
+ #
38
+ # #. For the given *ROI.nii* files, the LeadDBS `LeadMapper <https://netstim.gitbook.io/leaddbs/connectomics/lead-mapper>`_ tool can be used for functional or structural connectivity estimation.
39
+ #
40
+ # #. The py_neuromodulation :class:`~nm_RMAP.py` module can then compute the R-Map given the contact-individual connectivity fingerprints:
41
+ #
42
+ # .. code-block:: python
43
+ #
44
+ # nm_RMAP.calculate_RMap_numba(fingerprints, performances)
45
+ #
46
+ # #. The fingerprints from test-set patients can then be correlated with the calculated R-Map:
47
+ #
48
+ # .. code-block:: python
49
+ #
50
+ # nm_RMAP.get_corr_numba(fp, fp_test)
51
+ #
52
+ # #. The channel with highest correlation can then be selected for decoding without individual training. :class:`~nm_RMAP.py` contain already leave one channel and leave one patient out cross validation functions:
53
+ #
54
+ # .. code-block:: python
55
+ #
56
+ # nm_RMAP.leave_one_sub_out_cv(l_fps_names, l_fps_dat, l_per, sub_list)
57
+ #
58
+ # #. The obtained R-Map correlations can then be estimated statistically and plotted against true correlates:
59
+ #
60
+ # .. code-block:: python
61
+ #
62
+ # nm_RMAP.plot_performance_prediction_correlation(per_left_out, per_predict, out_path_save)
63
+ #
64
+ #