py-neuromodulation 0.0.2__py3-none-any.whl → 0.0.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- docs/build/_downloads/09df217f95985497f45d69e2d4bdc5b1/plot_2_example_add_feature.py +68 -0
- docs/build/_downloads/3b4900a2b2818ff30362215b76f7d5eb/plot_1_example_BIDS.py +233 -0
- docs/build/_downloads/7e92dd2e6cc86b239d14cafad972ae4f/plot_3_example_sharpwave_analysis.py +219 -0
- docs/build/_downloads/c2db0bf2b334d541b00662b991682256/plot_6_real_time_demo.py +97 -0
- docs/build/_downloads/ce3914826f782cbd1ea8fd024eaf0ac3/plot_5_example_rmap_computing.py +64 -0
- docs/build/_downloads/da36848a41e6a3235d91fb7cfb6d59b4/plot_0_first_demo.py +192 -0
- docs/build/_downloads/eaa4305c75b19a1e2eea941f742a6331/plot_4_example_gridPointProjection.py +210 -0
- docs/build/html/_downloads/09df217f95985497f45d69e2d4bdc5b1/plot_2_example_add_feature.py +68 -0
- docs/build/html/_downloads/3b4900a2b2818ff30362215b76f7d5eb/plot_1_example_BIDS.py +239 -0
- docs/build/html/_downloads/7e92dd2e6cc86b239d14cafad972ae4f/plot_3_example_sharpwave_analysis.py +219 -0
- docs/build/html/_downloads/c2db0bf2b334d541b00662b991682256/plot_6_real_time_demo.py +97 -0
- docs/build/html/_downloads/ce3914826f782cbd1ea8fd024eaf0ac3/plot_5_example_rmap_computing.py +64 -0
- docs/build/html/_downloads/da36848a41e6a3235d91fb7cfb6d59b4/plot_0_first_demo.py +192 -0
- docs/build/html/_downloads/eaa4305c75b19a1e2eea941f742a6331/plot_4_example_gridPointProjection.py +210 -0
- docs/source/_build/html/_downloads/09df217f95985497f45d69e2d4bdc5b1/plot_2_example_add_feature.py +76 -0
- docs/source/_build/html/_downloads/0d0d0a76e8f648d5d3cbc47da6351932/plot_real_time_demo.py +97 -0
- docs/source/_build/html/_downloads/3b4900a2b2818ff30362215b76f7d5eb/plot_1_example_BIDS.py +240 -0
- docs/source/_build/html/_downloads/5d73cadc59a8805c47e3b84063afc157/plot_example_BIDS.py +233 -0
- docs/source/_build/html/_downloads/7660317fa5a6bfbd12fcca9961457fc4/plot_example_rmap_computing.py +63 -0
- docs/source/_build/html/_downloads/7e92dd2e6cc86b239d14cafad972ae4f/plot_3_example_sharpwave_analysis.py +219 -0
- docs/source/_build/html/_downloads/839e5b319379f7fd9e867deb00fd797f/plot_example_gridPointProjection.py +210 -0
- docs/source/_build/html/_downloads/ae8be19afe5e559f011fc9b138968ba0/plot_first_demo.py +192 -0
- docs/source/_build/html/_downloads/b8b06cacc17969d3725a0b6f1d7741c5/plot_example_sharpwave_analysis.py +219 -0
- docs/source/_build/html/_downloads/c2db0bf2b334d541b00662b991682256/plot_6_real_time_demo.py +121 -0
- docs/source/_build/html/_downloads/c31a86c0b68cb4167d968091ace8080d/plot_example_add_feature.py +68 -0
- docs/source/_build/html/_downloads/ce3914826f782cbd1ea8fd024eaf0ac3/plot_5_example_rmap_computing.py +64 -0
- docs/source/_build/html/_downloads/da36848a41e6a3235d91fb7cfb6d59b4/plot_0_first_demo.py +189 -0
- docs/source/_build/html/_downloads/eaa4305c75b19a1e2eea941f742a6331/plot_4_example_gridPointProjection.py +210 -0
- docs/source/auto_examples/plot_0_first_demo.py +189 -0
- docs/source/auto_examples/plot_1_example_BIDS.py +240 -0
- docs/source/auto_examples/plot_2_example_add_feature.py +76 -0
- docs/source/auto_examples/plot_3_example_sharpwave_analysis.py +219 -0
- docs/source/auto_examples/plot_4_example_gridPointProjection.py +210 -0
- docs/source/auto_examples/plot_5_example_rmap_computing.py +64 -0
- docs/source/auto_examples/plot_6_real_time_demo.py +121 -0
- docs/source/conf.py +105 -0
- examples/plot_0_first_demo.py +189 -0
- examples/plot_1_example_BIDS.py +240 -0
- examples/plot_2_example_add_feature.py +76 -0
- examples/plot_3_example_sharpwave_analysis.py +219 -0
- examples/plot_4_example_gridPointProjection.py +210 -0
- examples/plot_5_example_rmap_computing.py +64 -0
- examples/plot_6_real_time_demo.py +121 -0
- packages/realtime_decoding/build/lib/realtime_decoding/__init__.py +4 -0
- packages/realtime_decoding/build/lib/realtime_decoding/decoder.py +104 -0
- packages/realtime_decoding/build/lib/realtime_decoding/features.py +163 -0
- packages/realtime_decoding/build/lib/realtime_decoding/helpers.py +15 -0
- packages/realtime_decoding/build/lib/realtime_decoding/run_decoding.py +345 -0
- packages/realtime_decoding/build/lib/realtime_decoding/trainer.py +54 -0
- packages/tmsi/build/lib/TMSiFileFormats/__init__.py +37 -0
- packages/tmsi/build/lib/TMSiFileFormats/file_formats/__init__.py +36 -0
- packages/tmsi/build/lib/TMSiFileFormats/file_formats/lsl_stream_writer.py +200 -0
- packages/tmsi/build/lib/TMSiFileFormats/file_formats/poly5_file_writer.py +496 -0
- packages/tmsi/build/lib/TMSiFileFormats/file_formats/poly5_to_edf_converter.py +236 -0
- packages/tmsi/build/lib/TMSiFileFormats/file_formats/xdf_file_writer.py +977 -0
- packages/tmsi/build/lib/TMSiFileFormats/file_readers/__init__.py +35 -0
- packages/tmsi/build/lib/TMSiFileFormats/file_readers/edf_reader.py +116 -0
- packages/tmsi/build/lib/TMSiFileFormats/file_readers/poly5reader.py +294 -0
- packages/tmsi/build/lib/TMSiFileFormats/file_readers/xdf_reader.py +229 -0
- packages/tmsi/build/lib/TMSiFileFormats/file_writer.py +102 -0
- packages/tmsi/build/lib/TMSiPlotters/__init__.py +2 -0
- packages/tmsi/build/lib/TMSiPlotters/gui/__init__.py +39 -0
- packages/tmsi/build/lib/TMSiPlotters/gui/_plotter_gui.py +234 -0
- packages/tmsi/build/lib/TMSiPlotters/gui/plotting_gui.py +440 -0
- packages/tmsi/build/lib/TMSiPlotters/plotters/__init__.py +44 -0
- packages/tmsi/build/lib/TMSiPlotters/plotters/hd_emg_plotter.py +446 -0
- packages/tmsi/build/lib/TMSiPlotters/plotters/impedance_plotter.py +589 -0
- packages/tmsi/build/lib/TMSiPlotters/plotters/signal_plotter.py +1326 -0
- packages/tmsi/build/lib/TMSiSDK/__init__.py +54 -0
- packages/tmsi/build/lib/TMSiSDK/device.py +588 -0
- packages/tmsi/build/lib/TMSiSDK/devices/__init__.py +34 -0
- packages/tmsi/build/lib/TMSiSDK/devices/saga/TMSi_Device_API.py +1764 -0
- packages/tmsi/build/lib/TMSiSDK/devices/saga/__init__.py +34 -0
- packages/tmsi/build/lib/TMSiSDK/devices/saga/saga_device.py +1366 -0
- packages/tmsi/build/lib/TMSiSDK/devices/saga/saga_types.py +520 -0
- packages/tmsi/build/lib/TMSiSDK/devices/saga/xml_saga_config.py +165 -0
- packages/tmsi/build/lib/TMSiSDK/error.py +95 -0
- packages/tmsi/build/lib/TMSiSDK/sample_data.py +63 -0
- packages/tmsi/build/lib/TMSiSDK/sample_data_server.py +99 -0
- packages/tmsi/build/lib/TMSiSDK/settings.py +45 -0
- packages/tmsi/build/lib/TMSiSDK/tmsi_device.py +111 -0
- packages/tmsi/build/lib/__init__.py +4 -0
- packages/tmsi/build/lib/apex_sdk/__init__.py +34 -0
- packages/tmsi/build/lib/apex_sdk/device/__init__.py +41 -0
- packages/tmsi/build/lib/apex_sdk/device/devices/apex/apex_API.py +1009 -0
- packages/tmsi/build/lib/apex_sdk/device/devices/apex/apex_API_enums.py +239 -0
- packages/tmsi/build/lib/apex_sdk/device/devices/apex/apex_API_structures.py +668 -0
- packages/tmsi/build/lib/apex_sdk/device/devices/apex/apex_device.py +1611 -0
- packages/tmsi/build/lib/apex_sdk/device/devices/apex/apex_dongle.py +38 -0
- packages/tmsi/build/lib/apex_sdk/device/devices/apex/apex_event_reader.py +57 -0
- packages/tmsi/build/lib/apex_sdk/device/devices/apex/apex_structures/apex_channel.py +44 -0
- packages/tmsi/build/lib/apex_sdk/device/devices/apex/apex_structures/apex_config.py +150 -0
- packages/tmsi/build/lib/apex_sdk/device/devices/apex/apex_structures/apex_const.py +36 -0
- packages/tmsi/build/lib/apex_sdk/device/devices/apex/apex_structures/apex_impedance_channel.py +48 -0
- packages/tmsi/build/lib/apex_sdk/device/devices/apex/apex_structures/apex_info.py +108 -0
- packages/tmsi/build/lib/apex_sdk/device/devices/apex/apex_structures/dongle_info.py +39 -0
- packages/tmsi/build/lib/apex_sdk/device/devices/apex/measurements/download_measurement.py +77 -0
- packages/tmsi/build/lib/apex_sdk/device/devices/apex/measurements/eeg_measurement.py +150 -0
- packages/tmsi/build/lib/apex_sdk/device/devices/apex/measurements/impedance_measurement.py +129 -0
- packages/tmsi/build/lib/apex_sdk/device/threads/conversion_thread.py +59 -0
- packages/tmsi/build/lib/apex_sdk/device/threads/sampling_thread.py +57 -0
- packages/tmsi/build/lib/apex_sdk/device/tmsi_channel.py +83 -0
- packages/tmsi/build/lib/apex_sdk/device/tmsi_device.py +201 -0
- packages/tmsi/build/lib/apex_sdk/device/tmsi_device_enums.py +103 -0
- packages/tmsi/build/lib/apex_sdk/device/tmsi_dongle.py +43 -0
- packages/tmsi/build/lib/apex_sdk/device/tmsi_event_reader.py +50 -0
- packages/tmsi/build/lib/apex_sdk/device/tmsi_measurement.py +118 -0
- packages/tmsi/build/lib/apex_sdk/sample_data_server/__init__.py +33 -0
- packages/tmsi/build/lib/apex_sdk/sample_data_server/event_data.py +44 -0
- packages/tmsi/build/lib/apex_sdk/sample_data_server/sample_data.py +50 -0
- packages/tmsi/build/lib/apex_sdk/sample_data_server/sample_data_server.py +136 -0
- packages/tmsi/build/lib/apex_sdk/tmsi_errors/error.py +126 -0
- packages/tmsi/build/lib/apex_sdk/tmsi_sdk.py +113 -0
- packages/tmsi/build/lib/apex_sdk/tmsi_utilities/apex/apex_structure_generator.py +134 -0
- packages/tmsi/build/lib/apex_sdk/tmsi_utilities/decorators.py +60 -0
- packages/tmsi/build/lib/apex_sdk/tmsi_utilities/logger_filter.py +42 -0
- packages/tmsi/build/lib/apex_sdk/tmsi_utilities/singleton.py +42 -0
- packages/tmsi/build/lib/apex_sdk/tmsi_utilities/support_functions.py +72 -0
- packages/tmsi/build/lib/apex_sdk/tmsi_utilities/tmsi_logger.py +98 -0
- py_neuromodulation/{helper.py → _write_example_dataset_helper.py} +1 -1
- py_neuromodulation/nm_EpochStream.py +2 -3
- py_neuromodulation/nm_IO.py +43 -70
- py_neuromodulation/nm_RMAP.py +308 -11
- py_neuromodulation/nm_analysis.py +1 -1
- py_neuromodulation/nm_artifacts.py +25 -0
- py_neuromodulation/nm_bispectra.py +64 -29
- py_neuromodulation/nm_bursts.py +44 -30
- py_neuromodulation/nm_coherence.py +2 -1
- py_neuromodulation/nm_features.py +4 -2
- py_neuromodulation/nm_filter.py +63 -32
- py_neuromodulation/nm_filter_preprocessing.py +91 -0
- py_neuromodulation/nm_fooof.py +47 -29
- py_neuromodulation/nm_mne_connectivity.py +1 -1
- py_neuromodulation/nm_normalization.py +50 -74
- py_neuromodulation/nm_oscillatory.py +151 -31
- py_neuromodulation/nm_plots.py +13 -10
- py_neuromodulation/nm_rereference.py +10 -8
- py_neuromodulation/nm_run_analysis.py +28 -13
- py_neuromodulation/nm_sharpwaves.py +103 -136
- py_neuromodulation/nm_stats.py +44 -30
- py_neuromodulation/nm_stream_abc.py +18 -10
- py_neuromodulation/nm_stream_offline.py +181 -40
- py_neuromodulation/utils/_logging.py +24 -0
- {py_neuromodulation-0.0.2.dist-info → py_neuromodulation-0.0.3.dist-info}/METADATA +182 -142
- py_neuromodulation-0.0.3.dist-info/RECORD +188 -0
- {py_neuromodulation-0.0.2.dist-info → py_neuromodulation-0.0.3.dist-info}/WHEEL +2 -1
- py_neuromodulation-0.0.3.dist-info/top_level.txt +5 -0
- tests/__init__.py +0 -0
- tests/conftest.py +117 -0
- tests/test_all_examples.py +10 -0
- tests/test_all_features.py +63 -0
- tests/test_bispectra.py +70 -0
- tests/test_bursts.py +105 -0
- tests/test_feature_sampling_rates.py +143 -0
- tests/test_fooof.py +16 -0
- tests/test_initalization_offline_stream.py +41 -0
- tests/test_multiprocessing.py +58 -0
- tests/test_nan_values.py +29 -0
- tests/test_nm_filter.py +95 -0
- tests/test_nm_resample.py +63 -0
- tests/test_normalization_settings.py +146 -0
- tests/test_notch_filter.py +31 -0
- tests/test_osc_features.py +424 -0
- tests/test_preprocessing_filter.py +151 -0
- tests/test_rereference.py +171 -0
- tests/test_sampling.py +57 -0
- tests/test_settings_change_after_init.py +76 -0
- tests/test_sharpwave.py +165 -0
- tests/test_target_channel_add.py +100 -0
- tests/test_timing.py +80 -0
- py_neuromodulation/data/README +0 -6
- py_neuromodulation/data/dataset_description.json +0 -8
- py_neuromodulation/data/derivatives/sub-testsub_ses-EphysMedOff_task-gripforce_run-0/MOV_aligned_features_ch_ECOG_RIGHT_0_all.png +0 -0
- py_neuromodulation/data/derivatives/sub-testsub_ses-EphysMedOff_task-gripforce_run-0/all_feature_plt.pdf +0 -0
- py_neuromodulation/data/derivatives/sub-testsub_ses-EphysMedOff_task-gripforce_run-0/sub-testsub_ses-EphysMedOff_task-gripforce_run-0_FEATURES.csv +0 -182
- py_neuromodulation/data/derivatives/sub-testsub_ses-EphysMedOff_task-gripforce_run-0/sub-testsub_ses-EphysMedOff_task-gripforce_run-0_LM_ML_RES.p +0 -0
- py_neuromodulation/data/derivatives/sub-testsub_ses-EphysMedOff_task-gripforce_run-0/sub-testsub_ses-EphysMedOff_task-gripforce_run-0_SETTINGS.json +0 -273
- py_neuromodulation/data/derivatives/sub-testsub_ses-EphysMedOff_task-gripforce_run-0/sub-testsub_ses-EphysMedOff_task-gripforce_run-0_SIDECAR.json +0 -6
- py_neuromodulation/data/derivatives/sub-testsub_ses-EphysMedOff_task-gripforce_run-0/sub-testsub_ses-EphysMedOff_task-gripforce_run-0_decoding_performance.png +0 -0
- py_neuromodulation/data/derivatives/sub-testsub_ses-EphysMedOff_task-gripforce_run-0/sub-testsub_ses-EphysMedOff_task-gripforce_run-0_nm_channels.csv +0 -11
- py_neuromodulation/data/participants.json +0 -32
- py_neuromodulation/data/participants.tsv +0 -2
- py_neuromodulation/data/sub-testsub/ses-EphysMedOff/ieeg/sub-testsub_ses-EphysMedOff_space-mni_coordsystem.json +0 -5
- py_neuromodulation/data/sub-testsub/ses-EphysMedOff/ieeg/sub-testsub_ses-EphysMedOff_space-mni_electrodes.tsv +0 -11
- py_neuromodulation/data/sub-testsub/ses-EphysMedOff/ieeg/sub-testsub_ses-EphysMedOff_task-gripforce_run-0_channels.tsv +0 -11
- py_neuromodulation/data/sub-testsub/ses-EphysMedOff/ieeg/sub-testsub_ses-EphysMedOff_task-gripforce_run-0_ieeg.eeg +0 -0
- py_neuromodulation/data/sub-testsub/ses-EphysMedOff/ieeg/sub-testsub_ses-EphysMedOff_task-gripforce_run-0_ieeg.json +0 -18
- py_neuromodulation/data/sub-testsub/ses-EphysMedOff/ieeg/sub-testsub_ses-EphysMedOff_task-gripforce_run-0_ieeg.vhdr +0 -35
- py_neuromodulation/data/sub-testsub/ses-EphysMedOff/ieeg/sub-testsub_ses-EphysMedOff_task-gripforce_run-0_ieeg.vmrk +0 -13
- py_neuromodulation/data/sub-testsub/ses-EphysMedOff/sub-testsub_ses-EphysMedOff_scans.tsv +0 -2
- py_neuromodulation/grid_cortex.tsv +0 -40
- py_neuromodulation/grid_subcortex.tsv +0 -1429
- py_neuromodulation/nm_settings.json +0 -290
- py_neuromodulation/plots/STN_surf.mat +0 -0
- py_neuromodulation/plots/Vertices.mat +0 -0
- py_neuromodulation/plots/faces.mat +0 -0
- py_neuromodulation/plots/grid.mat +0 -0
- py_neuromodulation/py_neuromodulation.egg-info/PKG-INFO +0 -104
- py_neuromodulation/py_neuromodulation.egg-info/dependency_links.txt +0 -1
- py_neuromodulation/py_neuromodulation.egg-info/requires.txt +0 -26
- py_neuromodulation/py_neuromodulation.egg-info/top_level.txt +0 -1
- py_neuromodulation-0.0.2.dist-info/RECORD +0 -73
- /py_neuromodulation/{py_neuromodulation.egg-info/SOURCES.txt → utils/__init__.py} +0 -0
- {py_neuromodulation-0.0.2.dist-info → py_neuromodulation-0.0.3.dist-info}/LICENSE +0 -0
docs/source/conf.py
ADDED
|
@@ -0,0 +1,105 @@
|
|
|
1
|
+
# Configuration file for the Sphinx documentation builder.
|
|
2
|
+
#
|
|
3
|
+
# This file only contains a selection of the most common options. For a full
|
|
4
|
+
# list see the documentation:
|
|
5
|
+
# https://www.sphinx-doc.org/en/master/usage/configuration.html
|
|
6
|
+
|
|
7
|
+
# -- Path setup --------------------------------------------------------------
|
|
8
|
+
import sys
|
|
9
|
+
import os
|
|
10
|
+
import json
|
|
11
|
+
import re
|
|
12
|
+
from sphinx_gallery.sorting import FileNameSortKey
|
|
13
|
+
import sphinx_gallery
|
|
14
|
+
import py_neuromodulation
|
|
15
|
+
|
|
16
|
+
print("CURRENT WORKING DIRECTORY")
|
|
17
|
+
print(os.getcwd())
|
|
18
|
+
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
|
|
19
|
+
if os.path.basename(SCRIPT_DIR) == "source":
|
|
20
|
+
# this check is necessary, so we can also run the script from the root directory
|
|
21
|
+
SCRIPT_DIR = os.path.join(os.path.dirname(os.path.dirname(SCRIPT_DIR)), "py_neuromodulation")
|
|
22
|
+
print(f"Script Directory to add: {SCRIPT_DIR}")
|
|
23
|
+
sys.path.append(SCRIPT_DIR)
|
|
24
|
+
|
|
25
|
+
print(sys.path)
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
exclude_patterns = ["_build", "_templates"]
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
# -- Project information -----------------------------------------------------
|
|
32
|
+
project = 'py_neuromodulation'
|
|
33
|
+
copyright = '2021, Timon Merk'
|
|
34
|
+
author = 'Timon Merk'
|
|
35
|
+
|
|
36
|
+
source_parsers = {
|
|
37
|
+
'.md': 'recommonmark.parser.CommonMarkParser',
|
|
38
|
+
}
|
|
39
|
+
|
|
40
|
+
extensions = [
|
|
41
|
+
'sphinx.ext.autodoc',
|
|
42
|
+
'sphinx.ext.autosummary',
|
|
43
|
+
'sphinx.ext.doctest',
|
|
44
|
+
'sphinx.ext.intersphinx',
|
|
45
|
+
'sphinx.ext.viewcode',
|
|
46
|
+
'sphinx.ext.mathjax',
|
|
47
|
+
'sphinx.ext.napoleon',
|
|
48
|
+
'sphinx.ext.autosectionlabel',
|
|
49
|
+
'sphinx_gallery.gen_gallery',
|
|
50
|
+
'sphinx_togglebutton',
|
|
51
|
+
]
|
|
52
|
+
|
|
53
|
+
source_suffix = ['.rst', '.md', ]
|
|
54
|
+
|
|
55
|
+
autosummary_generate = True
|
|
56
|
+
|
|
57
|
+
PYDEVD_DISABLE_FILE_VALIDATION=1
|
|
58
|
+
|
|
59
|
+
sphinx_gallery_conf = {
|
|
60
|
+
"examples_dirs": "../../examples",
|
|
61
|
+
"gallery_dirs": "auto_examples",
|
|
62
|
+
"within_subsection_order" : sphinx_gallery.sorting.FileNameSortKey,
|
|
63
|
+
}
|
|
64
|
+
|
|
65
|
+
#sphinx_gallery_conf = {
|
|
66
|
+
# "examples_dirs": "../../examples",
|
|
67
|
+
# "gallery_dirs": "auto_examples",
|
|
68
|
+
# #'plot_gallery': True,
|
|
69
|
+
# 'thumbnail_size': (160, 112),
|
|
70
|
+
# 'image_scrapers': ("matplotlib", ),
|
|
71
|
+
# 'show_memory': True,
|
|
72
|
+
# 'notebook_images': 'https://py-neuromodulation.readthedocs.io/en/latest/',
|
|
73
|
+
# 'default_thumb_file': os.path.join('_static', 'RMAP_figure.png'),
|
|
74
|
+
#}
|
|
75
|
+
|
|
76
|
+
templates_path = ["_templates"]
|
|
77
|
+
exclude_patterns = []
|
|
78
|
+
|
|
79
|
+
|
|
80
|
+
html_theme = "pydata_sphinx_theme"
|
|
81
|
+
html_static_path = ['_static']
|
|
82
|
+
|
|
83
|
+
html_theme_options = {
|
|
84
|
+
"show_nav_level": 4,
|
|
85
|
+
"icon_links": [
|
|
86
|
+
dict(
|
|
87
|
+
name="GitHub",
|
|
88
|
+
url="https://github.com/neuromodulation/py_neuromodulation",
|
|
89
|
+
icon="fa-brands fa-square-github",
|
|
90
|
+
)
|
|
91
|
+
],
|
|
92
|
+
}
|
|
93
|
+
|
|
94
|
+
# -- Intersphinx configuration -----------------------------------------------
|
|
95
|
+
|
|
96
|
+
intersphinx_mapping = {
|
|
97
|
+
"python": ("https://docs.python.org/3", None),
|
|
98
|
+
"numpy": ("https://numpy.org/doc/stable", None),
|
|
99
|
+
"scipy": ("https://docs.scipy.org/doc/scipy", None),
|
|
100
|
+
"matplotlib": ("https://matplotlib.org/stable", None),
|
|
101
|
+
"numba": ("https://numba.readthedocs.io/en/latest", None),
|
|
102
|
+
"mne": ("https://mne.tools/stable", None),
|
|
103
|
+
"pandas" : ("https://pandas.pydata.org/docs", None),
|
|
104
|
+
}
|
|
105
|
+
|
|
@@ -0,0 +1,189 @@
|
|
|
1
|
+
"""
|
|
2
|
+
First Demo
|
|
3
|
+
==========
|
|
4
|
+
|
|
5
|
+
This Demo will showcase the feature estimation and
|
|
6
|
+
exemplar analysis using simulated data.
|
|
7
|
+
"""
|
|
8
|
+
|
|
9
|
+
import numpy as np
|
|
10
|
+
from matplotlib import pyplot as plt
|
|
11
|
+
|
|
12
|
+
import py_neuromodulation as nm
|
|
13
|
+
|
|
14
|
+
from py_neuromodulation import nm_analysis, nm_define_nmchannels, nm_plots
|
|
15
|
+
|
|
16
|
+
# %%
|
|
17
|
+
# Data Simulation
|
|
18
|
+
# ---------------
|
|
19
|
+
# We will now generate some exemplar data with 10 second duration for 6 channels with a sample rate of 1 kHz.
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
def generate_random_walk(NUM_CHANNELS, TIME_DATA_SAMPLES):
|
|
23
|
+
# from https://towardsdatascience.com/random-walks-with-python-8420981bc4bc
|
|
24
|
+
dims = NUM_CHANNELS
|
|
25
|
+
step_n = TIME_DATA_SAMPLES - 1
|
|
26
|
+
step_set = [-1, 0, 1]
|
|
27
|
+
origin = (np.random.random([1, dims]) - 0.5) * 1 # Simulate steps in 1D
|
|
28
|
+
step_shape = (step_n, dims)
|
|
29
|
+
steps = np.random.choice(a=step_set, size=step_shape)
|
|
30
|
+
path = np.concatenate([origin, steps]).cumsum(0)
|
|
31
|
+
return path.T
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
NUM_CHANNELS = 6
|
|
35
|
+
sfreq = 1000
|
|
36
|
+
TIME_DATA_SAMPLES = 10 * sfreq
|
|
37
|
+
data = generate_random_walk(NUM_CHANNELS, TIME_DATA_SAMPLES)
|
|
38
|
+
time = np.arange(0, TIME_DATA_SAMPLES / sfreq, 1 / sfreq)
|
|
39
|
+
|
|
40
|
+
plt.figure(figsize=(8, 4), dpi=100)
|
|
41
|
+
for ch_idx in range(data.shape[0]):
|
|
42
|
+
plt.plot(time, data[ch_idx, :])
|
|
43
|
+
plt.xlabel("Time [s]")
|
|
44
|
+
plt.ylabel("Amplitude")
|
|
45
|
+
plt.title("Example random walk data")
|
|
46
|
+
|
|
47
|
+
# %%
|
|
48
|
+
# Now let’s define the necessary setup files we will be using for data
|
|
49
|
+
# preprocessing and feature estimation. Py_neuromodualtion is based on two
|
|
50
|
+
# parametrization files: the *nm_channels.tsv* and the *nm_setting.json*.
|
|
51
|
+
#
|
|
52
|
+
# nm_channels
|
|
53
|
+
# ~~~~~~~~~~~
|
|
54
|
+
#
|
|
55
|
+
# The *nm_channel* dataframe. This dataframe contains the columns
|
|
56
|
+
#
|
|
57
|
+
# +-----------------------------------+-----------------------------------+
|
|
58
|
+
# | Column name | Description |
|
|
59
|
+
# +===================================+===================================+
|
|
60
|
+
# | **name** | name of the channel |
|
|
61
|
+
# +-----------------------------------+-----------------------------------+
|
|
62
|
+
# | **rereference** | different channel name for |
|
|
63
|
+
# | | bipolar re-referencing, or |
|
|
64
|
+
# | | average for common average |
|
|
65
|
+
# | | re-referencing |
|
|
66
|
+
# +-----------------------------------+-----------------------------------+
|
|
67
|
+
# | **used** | 0 or 1, channel selection |
|
|
68
|
+
# +-----------------------------------+-----------------------------------+
|
|
69
|
+
# | **target** | 0 or 1, for some decoding |
|
|
70
|
+
# | | applications we can define target |
|
|
71
|
+
# | | channels, e.g. EMG channels |
|
|
72
|
+
# +-----------------------------------+-----------------------------------+
|
|
73
|
+
# | **type** | channel type according to the |
|
|
74
|
+
# | | `mne-python`_ toolbox |
|
|
75
|
+
# | | |
|
|
76
|
+
# | | |
|
|
77
|
+
# | | |
|
|
78
|
+
# | | |
|
|
79
|
+
# | | e.g. ecog, eeg, ecg, emg, dbs, |
|
|
80
|
+
# | | seeg etc. |
|
|
81
|
+
# +-----------------------------------+-----------------------------------+
|
|
82
|
+
# | **status** | good or bad, used for channel |
|
|
83
|
+
# | | quality indication |
|
|
84
|
+
# +-----------------------------------+-----------------------------------+
|
|
85
|
+
# | **new_name** | this keyword can be specified to |
|
|
86
|
+
# | | indicate for example the used |
|
|
87
|
+
# | | rereferncing scheme |
|
|
88
|
+
# +-----------------------------------+-----------------------------------+
|
|
89
|
+
#
|
|
90
|
+
# .. _mne-python: https://mne.tools/stable/auto_tutorials/raw/10_raw_overview.html#sphx-glr-auto-tutorials-raw-10-raw-overview-py
|
|
91
|
+
#
|
|
92
|
+
# The :class:`~nm_stream_abc` can either be created as a *.tsv* text file, or as a pandas
|
|
93
|
+
# DataFrame. There are some helper functions that let you create the
|
|
94
|
+
# nm_channels without much effort:
|
|
95
|
+
|
|
96
|
+
nm_channels = nm_define_nmchannels.get_default_channels_from_data(
|
|
97
|
+
data, car_rereferencing=True
|
|
98
|
+
)
|
|
99
|
+
|
|
100
|
+
nm_channels
|
|
101
|
+
|
|
102
|
+
# %%
|
|
103
|
+
# Using this function default channel names and a common average re-referencing scheme is specified.
|
|
104
|
+
# Alternatively the *nm_define_nmchannels.set_channels* function can be used to pass each column values.
|
|
105
|
+
#
|
|
106
|
+
# nm_settings
|
|
107
|
+
# -----------
|
|
108
|
+
# Next, we will initialize the nm_settings dictionary and use the default settings, reset them, and enable a subset of features:
|
|
109
|
+
|
|
110
|
+
settings = nm.nm_settings.get_default_settings()
|
|
111
|
+
settings = nm.nm_settings.reset_settings(settings)
|
|
112
|
+
|
|
113
|
+
|
|
114
|
+
# %%
|
|
115
|
+
# The setting itself is a .json file which contains the parametrization for preprocessing, feature estimation, postprocessing and
|
|
116
|
+
# definition with which sampling rate features are being calculated.
|
|
117
|
+
# In this example `sampling_rate_features_hz` is specified to be 10 Hz, so every 100ms a new set of features is calculated.
|
|
118
|
+
#
|
|
119
|
+
# For many features the `segment_length_features_ms` specifies the time dimension of the raw signal being used for feature calculation. Here it is specified to be 1000 ms.
|
|
120
|
+
#
|
|
121
|
+
# We will now enable the features:
|
|
122
|
+
#
|
|
123
|
+
# * fft
|
|
124
|
+
# * bursts
|
|
125
|
+
# * sharpwave
|
|
126
|
+
#
|
|
127
|
+
# and stay with the default preprcessing methods:
|
|
128
|
+
#
|
|
129
|
+
# * notch_filter
|
|
130
|
+
# * re_referencing
|
|
131
|
+
#
|
|
132
|
+
# and use *z-score* postprocessing normalization.
|
|
133
|
+
|
|
134
|
+
settings["features"]["fft"] = True
|
|
135
|
+
settings["features"]["bursts"] = True
|
|
136
|
+
settings["features"]["sharpwave_analysis"] = True
|
|
137
|
+
|
|
138
|
+
# %%
|
|
139
|
+
# We are now ready to go to instantiate the *Stream* and call the *run* method for feature estimation:
|
|
140
|
+
|
|
141
|
+
stream = nm.Stream(
|
|
142
|
+
settings=settings,
|
|
143
|
+
nm_channels=nm_channels,
|
|
144
|
+
verbose=True,
|
|
145
|
+
sfreq=sfreq,
|
|
146
|
+
line_noise=50,
|
|
147
|
+
)
|
|
148
|
+
|
|
149
|
+
features = stream.run(data)
|
|
150
|
+
|
|
151
|
+
# %%
|
|
152
|
+
# Feature Analysis
|
|
153
|
+
# ----------------
|
|
154
|
+
#
|
|
155
|
+
# There is a lot of output, which we could omit by verbose being False, but let's have a look what was being computed.
|
|
156
|
+
# We will therefore use the :class:`~nm_analysis` class to showcase some functions. For multi-run -or subject analysis we will pass here the feature_file "sub" as default directory:
|
|
157
|
+
|
|
158
|
+
analyzer = nm_analysis.Feature_Reader(
|
|
159
|
+
feature_dir=stream.PATH_OUT, feature_file=stream.PATH_OUT_folder_name
|
|
160
|
+
)
|
|
161
|
+
|
|
162
|
+
# %%
|
|
163
|
+
# Let's have a look at the resulting "feature_arr" DataFrame:
|
|
164
|
+
|
|
165
|
+
analyzer.feature_arr.iloc[:10, :7]
|
|
166
|
+
|
|
167
|
+
# %%
|
|
168
|
+
# Seems like a lot of features were calculated. The `time` column tells us about each row time index.
|
|
169
|
+
# For the 6 specified channels, it is each 31 features.
|
|
170
|
+
# We can now use some in-built plotting functions for visualization.
|
|
171
|
+
#
|
|
172
|
+
# .. note::
|
|
173
|
+
#
|
|
174
|
+
# Due to the nature of simulated data, some of the features have constant values, which are not displayed through the image normalization.
|
|
175
|
+
#
|
|
176
|
+
#
|
|
177
|
+
|
|
178
|
+
analyzer.plot_all_features(ch_used="ch1")
|
|
179
|
+
|
|
180
|
+
# %%
|
|
181
|
+
nm_plots.plot_corr_matrix(
|
|
182
|
+
figsize=(25, 25),
|
|
183
|
+
show_plot=True,
|
|
184
|
+
feature=analyzer.feature_arr,
|
|
185
|
+
)
|
|
186
|
+
|
|
187
|
+
# %%
|
|
188
|
+
# The upper correlation matrix shows the correlation of every feature of every channel to every other.
|
|
189
|
+
# This notebook demonstrated a first demo how features can quickly be generated. For further feature modalities and decoding applications check out the next notebooks.
|
|
@@ -0,0 +1,240 @@
|
|
|
1
|
+
"""
|
|
2
|
+
ECoG Movement decoding example
|
|
3
|
+
==============================
|
|
4
|
+
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
# %%
|
|
8
|
+
# This example notebook read openly accessible data from the publication
|
|
9
|
+
# *Electrocorticography is superior to subthalamic local field potentials
|
|
10
|
+
# for movement decoding in Parkinson’s disease*
|
|
11
|
+
# (`Merk et al. 2022 <https://elifesciences.org/articles/75126>_`).
|
|
12
|
+
# The dataset is available `here <https://doi.org/10.7910/DVN/IO2FLM>`_.
|
|
13
|
+
#
|
|
14
|
+
# For simplicity one example subject is automatically shipped within
|
|
15
|
+
# this repo at the *py_neuromodulation/data* folder, stored in
|
|
16
|
+
# `iEEG BIDS <https://www.nature.com/articles/s41597-019-0105-7>`_ format.
|
|
17
|
+
|
|
18
|
+
# %%
|
|
19
|
+
from sklearn import metrics, model_selection, linear_model
|
|
20
|
+
import matplotlib.pyplot as plt
|
|
21
|
+
|
|
22
|
+
import py_neuromodulation as nm
|
|
23
|
+
from py_neuromodulation import (
|
|
24
|
+
nm_analysis,
|
|
25
|
+
nm_decode,
|
|
26
|
+
nm_define_nmchannels,
|
|
27
|
+
nm_IO,
|
|
28
|
+
nm_plots,
|
|
29
|
+
nm_settings,
|
|
30
|
+
)
|
|
31
|
+
|
|
32
|
+
# %%
|
|
33
|
+
# Let's read the example using `mne_bids <https://mne.tools/mne-bids/stable/index.html>`_.
|
|
34
|
+
# The resulting raw object is of type `mne.RawArray <https://mne.tools/stable/generated/mne.io.RawArray.html>`_.
|
|
35
|
+
# We can use the properties such as sampling frequency, channel names, channel types all from the mne array and create the *nm_channels* DataFrame:
|
|
36
|
+
|
|
37
|
+
(
|
|
38
|
+
RUN_NAME,
|
|
39
|
+
PATH_RUN,
|
|
40
|
+
PATH_BIDS,
|
|
41
|
+
PATH_OUT,
|
|
42
|
+
datatype,
|
|
43
|
+
) = nm_IO.get_paths_example_data()
|
|
44
|
+
|
|
45
|
+
(
|
|
46
|
+
raw,
|
|
47
|
+
data,
|
|
48
|
+
sfreq,
|
|
49
|
+
line_noise,
|
|
50
|
+
coord_list,
|
|
51
|
+
coord_names,
|
|
52
|
+
) = nm_IO.read_BIDS_data(
|
|
53
|
+
PATH_RUN=PATH_RUN, BIDS_PATH=PATH_BIDS, datatype=datatype
|
|
54
|
+
)
|
|
55
|
+
|
|
56
|
+
nm_channels = nm_define_nmchannels.set_channels(
|
|
57
|
+
ch_names=raw.ch_names,
|
|
58
|
+
ch_types=raw.get_channel_types(),
|
|
59
|
+
reference="default",
|
|
60
|
+
bads=raw.info["bads"],
|
|
61
|
+
new_names="default",
|
|
62
|
+
used_types=("ecog", "dbs", "seeg"),
|
|
63
|
+
target_keywords=["MOV_RIGHT"],
|
|
64
|
+
)
|
|
65
|
+
|
|
66
|
+
nm_channels
|
|
67
|
+
|
|
68
|
+
# %%
|
|
69
|
+
# This example contains the grip force movement traces, we'll use the *MOV_RIGHT* channel as a decoding target channel.
|
|
70
|
+
# Let's check some of the raw feature and time series traces:
|
|
71
|
+
|
|
72
|
+
plt.figure(figsize=(12, 4), dpi=300)
|
|
73
|
+
plt.subplot(121)
|
|
74
|
+
plt.plot(raw.times, data[-1, :])
|
|
75
|
+
plt.xlabel("Time [s]")
|
|
76
|
+
plt.ylabel("a.u.")
|
|
77
|
+
plt.title("Movement label")
|
|
78
|
+
plt.xlim(0, 20)
|
|
79
|
+
|
|
80
|
+
plt.subplot(122)
|
|
81
|
+
for idx, ch_name in enumerate(nm_channels.query("used == 1").name):
|
|
82
|
+
plt.plot(raw.times, data[idx, :] + idx * 300, label=ch_name)
|
|
83
|
+
plt.legend(bbox_to_anchor=(1, 0.5), loc="center left")
|
|
84
|
+
plt.title("ECoG + STN-LFP time series")
|
|
85
|
+
plt.xlabel("Time [s]")
|
|
86
|
+
plt.ylabel("Voltage a.u.")
|
|
87
|
+
plt.xlim(0, 20)
|
|
88
|
+
|
|
89
|
+
# %%
|
|
90
|
+
settings = nm_settings.get_default_settings()
|
|
91
|
+
settings = nm_settings.set_settings_fast_compute(settings)
|
|
92
|
+
|
|
93
|
+
settings["features"]["welch"] = True
|
|
94
|
+
settings["features"]["fft"] = True
|
|
95
|
+
settings["features"]["bursts"] = True
|
|
96
|
+
settings["features"]["sharpwave_analysis"] = True
|
|
97
|
+
settings["features"]["coherence"] = True
|
|
98
|
+
settings["coherence"]["channels"] = [["LFP_RIGHT_0", "ECOG_RIGHT_0"]]
|
|
99
|
+
settings["coherence"]["frequency_bands"] = ["high beta", "low gamma"]
|
|
100
|
+
settings["sharpwave_analysis_settings"]["estimator"]["mean"] = []
|
|
101
|
+
for sw_feature in list(
|
|
102
|
+
settings["sharpwave_analysis_settings"]["sharpwave_features"].keys()
|
|
103
|
+
):
|
|
104
|
+
settings["sharpwave_analysis_settings"]["sharpwave_features"][
|
|
105
|
+
sw_feature
|
|
106
|
+
] = True
|
|
107
|
+
settings["sharpwave_analysis_settings"]["estimator"]["mean"].append(
|
|
108
|
+
sw_feature
|
|
109
|
+
)
|
|
110
|
+
|
|
111
|
+
# %%
|
|
112
|
+
stream = nm.Stream(
|
|
113
|
+
sfreq=sfreq,
|
|
114
|
+
nm_channels=nm_channels,
|
|
115
|
+
settings=settings,
|
|
116
|
+
line_noise=line_noise,
|
|
117
|
+
coord_list=coord_list,
|
|
118
|
+
coord_names=coord_names,
|
|
119
|
+
verbose=True,
|
|
120
|
+
)
|
|
121
|
+
|
|
122
|
+
# %%
|
|
123
|
+
features = stream.run(
|
|
124
|
+
data=data,
|
|
125
|
+
out_path_root=PATH_OUT,
|
|
126
|
+
folder_name=RUN_NAME,
|
|
127
|
+
)
|
|
128
|
+
|
|
129
|
+
# %%
|
|
130
|
+
# Feature Analysis Movement
|
|
131
|
+
# -------------------------
|
|
132
|
+
# The obtained performances can now be read and visualized using the :class:`nm_analysis.Feature_Reader`.
|
|
133
|
+
|
|
134
|
+
# initialize analyzer
|
|
135
|
+
feature_reader = nm_analysis.Feature_Reader(
|
|
136
|
+
feature_dir=PATH_OUT,
|
|
137
|
+
feature_file=RUN_NAME,
|
|
138
|
+
)
|
|
139
|
+
feature_reader.label_name = "MOV_RIGHT"
|
|
140
|
+
feature_reader.label = feature_reader.feature_arr["MOV_RIGHT"]
|
|
141
|
+
|
|
142
|
+
# %%
|
|
143
|
+
feature_reader.feature_arr.iloc[100:108, -6:]
|
|
144
|
+
|
|
145
|
+
# %%
|
|
146
|
+
print(feature_reader.feature_arr.shape)
|
|
147
|
+
|
|
148
|
+
# %%
|
|
149
|
+
feature_reader._get_target_ch()
|
|
150
|
+
|
|
151
|
+
# %%
|
|
152
|
+
feature_reader.plot_target_averaged_channel(
|
|
153
|
+
ch="ECOG_RIGHT_0",
|
|
154
|
+
list_feature_keywords=None,
|
|
155
|
+
epoch_len=4,
|
|
156
|
+
threshold=0.5,
|
|
157
|
+
ytick_labelsize=7,
|
|
158
|
+
figsize_x=12,
|
|
159
|
+
figsize_y=12,
|
|
160
|
+
)
|
|
161
|
+
|
|
162
|
+
# %%
|
|
163
|
+
feature_reader.plot_all_features(
|
|
164
|
+
ytick_labelsize=6,
|
|
165
|
+
clim_low=-2,
|
|
166
|
+
clim_high=2,
|
|
167
|
+
ch_used="ECOG_RIGHT_0",
|
|
168
|
+
time_limit_low_s=0,
|
|
169
|
+
time_limit_high_s=20,
|
|
170
|
+
normalize=True,
|
|
171
|
+
save=True,
|
|
172
|
+
)
|
|
173
|
+
|
|
174
|
+
# %%
|
|
175
|
+
nm_plots.plot_corr_matrix(
|
|
176
|
+
feature=feature_reader.feature_arr.filter(regex="ECOG_RIGHT_0"),
|
|
177
|
+
ch_name="ECOG_RIGHT_0-avgref",
|
|
178
|
+
feature_names=feature_reader.feature_arr.filter(
|
|
179
|
+
regex="ECOG_RIGHT_0-avgref"
|
|
180
|
+
).columns,
|
|
181
|
+
feature_file=feature_reader.feature_file,
|
|
182
|
+
show_plot=True,
|
|
183
|
+
figsize=(15, 15),
|
|
184
|
+
)
|
|
185
|
+
|
|
186
|
+
# %%
|
|
187
|
+
# Decoding
|
|
188
|
+
# --------
|
|
189
|
+
#
|
|
190
|
+
# The main focus of the *py_neuromodulation* pipeline is feature estimation.
|
|
191
|
+
# Nevertheless, the user can also use the pipeline for machine learning decoding.
|
|
192
|
+
# It can be used for regression and classification problems and also dimensionality reduction such as PCA and CCA.
|
|
193
|
+
#
|
|
194
|
+
# Here, we show an example using the XGBOOST classifier. The used labels came from a continuous grip force movement target, named "MOV_RIGHT".
|
|
195
|
+
#
|
|
196
|
+
# First we initialize the :class:`~nm_decode.Decoder` class, which the specified *validation method*, here being a simple 3-fold cross validation,
|
|
197
|
+
# the evaluation metric, used machine learning model, and the channels we want to evaluate performances for.
|
|
198
|
+
#
|
|
199
|
+
# There are many more implemented methods, but we will here limit it to the ones presented.
|
|
200
|
+
|
|
201
|
+
model = linear_model.LinearRegression()
|
|
202
|
+
|
|
203
|
+
feature_reader.decoder = nm_decode.Decoder(
|
|
204
|
+
features=feature_reader.feature_arr,
|
|
205
|
+
label=feature_reader.label,
|
|
206
|
+
label_name=feature_reader.label_name,
|
|
207
|
+
used_chs=feature_reader.used_chs,
|
|
208
|
+
model=model,
|
|
209
|
+
eval_method=metrics.r2_score,
|
|
210
|
+
cv_method=model_selection.KFold(n_splits=3, shuffle=True),
|
|
211
|
+
)
|
|
212
|
+
|
|
213
|
+
# %%
|
|
214
|
+
performances = feature_reader.run_ML_model(
|
|
215
|
+
estimate_channels=True,
|
|
216
|
+
estimate_gridpoints=False,
|
|
217
|
+
estimate_all_channels_combined=True,
|
|
218
|
+
save_results=True,
|
|
219
|
+
)
|
|
220
|
+
|
|
221
|
+
# %%
|
|
222
|
+
# The performances are a dictionary that can be transformed into a DataFrame:
|
|
223
|
+
|
|
224
|
+
df_per = feature_reader.get_dataframe_performances(performances)
|
|
225
|
+
|
|
226
|
+
df_per
|
|
227
|
+
|
|
228
|
+
# %%
|
|
229
|
+
ax = nm_plots.plot_df_subjects(
|
|
230
|
+
df_per,
|
|
231
|
+
x_col="sub",
|
|
232
|
+
y_col="performance_test",
|
|
233
|
+
hue="ch_type",
|
|
234
|
+
PATH_SAVE=PATH_OUT / RUN_NAME / (RUN_NAME + "_decoding_performance.png"),
|
|
235
|
+
figsize_tuple=(8, 5),
|
|
236
|
+
)
|
|
237
|
+
ax.set_ylabel(r"$R^2$ Correlation")
|
|
238
|
+
ax.set_xlabel("Subject 000")
|
|
239
|
+
ax.set_title("Performance comparison Movement decoding")
|
|
240
|
+
plt.tight_layout()
|
|
@@ -0,0 +1,76 @@
|
|
|
1
|
+
"""
|
|
2
|
+
===================
|
|
3
|
+
Adding New Features
|
|
4
|
+
===================
|
|
5
|
+
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
import py_neuromodulation as nm
|
|
9
|
+
from py_neuromodulation import nm_features_abc
|
|
10
|
+
import numpy as np
|
|
11
|
+
from typing import Iterable
|
|
12
|
+
|
|
13
|
+
# %%
|
|
14
|
+
# In this example we will demonstrate how a new feature can be added to the existing feature pipeline.
|
|
15
|
+
# This can be done simply by adding an object of the inherited :class:`~nm_features_abc.Feature`
|
|
16
|
+
# class to the stream `stream.run_analysis.features.features` list.
|
|
17
|
+
|
|
18
|
+
data = np.random.random([1, 1000])
|
|
19
|
+
|
|
20
|
+
stream = nm.Stream(
|
|
21
|
+
sfreq=1000,
|
|
22
|
+
data=data,
|
|
23
|
+
sampling_rate_features_hz=10,
|
|
24
|
+
verbose=False,
|
|
25
|
+
)
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
class NewFeature(nm_features_abc.Feature):
|
|
29
|
+
def __init__(
|
|
30
|
+
self, settings: dict, ch_names: Iterable[str], sfreq: float
|
|
31
|
+
) -> None:
|
|
32
|
+
self.s = settings
|
|
33
|
+
self.ch_names = ch_names
|
|
34
|
+
|
|
35
|
+
def calc_feature(self, data: np.array, features_compute: dict) -> dict:
|
|
36
|
+
for ch_idx, ch in enumerate(self.ch_names):
|
|
37
|
+
features_compute[f"new_feature_{ch}"] = np.mean(data[ch_idx, :])
|
|
38
|
+
|
|
39
|
+
return features_compute
|
|
40
|
+
|
|
41
|
+
def test_settings():
|
|
42
|
+
pass
|
|
43
|
+
|
|
44
|
+
|
|
45
|
+
newFeature = NewFeature(
|
|
46
|
+
stream.settings, list(stream.nm_channels["name"]), stream.sfreq
|
|
47
|
+
)
|
|
48
|
+
stream.run_analysis.features.features.append(newFeature)
|
|
49
|
+
|
|
50
|
+
features = stream.run_analysis.process(data)
|
|
51
|
+
feature_name = f"new_feature_{stream.nm_channels['name'][0]}"
|
|
52
|
+
|
|
53
|
+
print(f"{feature_name}: {features[feature_name]}")
|
|
54
|
+
|
|
55
|
+
# %%
|
|
56
|
+
# This example shows a simple newly instantiated feature class called `NewFeature`.
|
|
57
|
+
# The instantiated `newFeature` object could then be added to the existing feature list by calling
|
|
58
|
+
# `stream.run_analysis.features.features.append(newFeature)`.
|
|
59
|
+
#
|
|
60
|
+
# To permanently add a novel feature, the new feature class needs to be added to
|
|
61
|
+
# the :class:`~nm_features` class. This can be done by inserting the feature_name in
|
|
62
|
+
# in the :class:`~nm_features.Feature` init function:
|
|
63
|
+
#
|
|
64
|
+
# .. code-block:: python
|
|
65
|
+
#
|
|
66
|
+
# for feature in s["features"]:
|
|
67
|
+
# if s["features"][feature] is False:
|
|
68
|
+
# continue
|
|
69
|
+
# match feature:
|
|
70
|
+
# case "new_feature":
|
|
71
|
+
# FeatureClass = nm_new_feature.NewFeature
|
|
72
|
+
# ...
|
|
73
|
+
#
|
|
74
|
+
# The new feature class can then be used by setting the `settings["feature"]["new_feature"]` value in the
|
|
75
|
+
# settings to true.
|
|
76
|
+
#
|