py-neuromodulation 0.0.3__py3-none-any.whl → 0.0.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- py_neuromodulation/ConnectivityDecoding/Automated Anatomical Labeling 3 (Rolls 2020).nii +0 -0
- py_neuromodulation/ConnectivityDecoding/_get_grid_hull.m +34 -0
- py_neuromodulation/ConnectivityDecoding/_get_grid_whole_brain.py +95 -0
- py_neuromodulation/ConnectivityDecoding/_helper_write_connectome.py +107 -0
- py_neuromodulation/ConnectivityDecoding/mni_coords_cortical_surface.mat +0 -0
- py_neuromodulation/ConnectivityDecoding/mni_coords_whole_brain.mat +0 -0
- py_neuromodulation/ConnectivityDecoding/rmap_func_all.nii +0 -0
- py_neuromodulation/ConnectivityDecoding/rmap_struc.nii +0 -0
- py_neuromodulation/FieldTrip.py +589 -589
- py_neuromodulation/__init__.py +74 -13
- py_neuromodulation/_write_example_dataset_helper.py +83 -65
- py_neuromodulation/data/README +6 -0
- py_neuromodulation/data/dataset_description.json +8 -0
- py_neuromodulation/data/participants.json +32 -0
- py_neuromodulation/data/participants.tsv +2 -0
- py_neuromodulation/data/sub-testsub/ses-EphysMedOff/ieeg/sub-testsub_ses-EphysMedOff_space-mni_coordsystem.json +5 -0
- py_neuromodulation/data/sub-testsub/ses-EphysMedOff/ieeg/sub-testsub_ses-EphysMedOff_space-mni_electrodes.tsv +11 -0
- py_neuromodulation/data/sub-testsub/ses-EphysMedOff/ieeg/sub-testsub_ses-EphysMedOff_task-gripforce_run-0_channels.tsv +11 -0
- py_neuromodulation/data/sub-testsub/ses-EphysMedOff/ieeg/sub-testsub_ses-EphysMedOff_task-gripforce_run-0_ieeg.eeg +0 -0
- py_neuromodulation/data/sub-testsub/ses-EphysMedOff/ieeg/sub-testsub_ses-EphysMedOff_task-gripforce_run-0_ieeg.json +18 -0
- py_neuromodulation/data/sub-testsub/ses-EphysMedOff/ieeg/sub-testsub_ses-EphysMedOff_task-gripforce_run-0_ieeg.vhdr +35 -0
- py_neuromodulation/data/sub-testsub/ses-EphysMedOff/ieeg/sub-testsub_ses-EphysMedOff_task-gripforce_run-0_ieeg.vmrk +13 -0
- py_neuromodulation/data/sub-testsub/ses-EphysMedOff/sub-testsub_ses-EphysMedOff_scans.tsv +2 -0
- py_neuromodulation/grid_cortex.tsv +40 -0
- py_neuromodulation/grid_subcortex.tsv +1429 -0
- py_neuromodulation/liblsl/libpugixml.so.1.12 +0 -0
- py_neuromodulation/liblsl/linux/bionic_amd64/liblsl.1.16.2.so +0 -0
- py_neuromodulation/liblsl/linux/bookworm_amd64/liblsl.1.16.2.so +0 -0
- py_neuromodulation/liblsl/linux/focal_amd46/liblsl.1.16.2.so +0 -0
- py_neuromodulation/liblsl/linux/jammy_amd64/liblsl.1.16.2.so +0 -0
- py_neuromodulation/liblsl/linux/jammy_x86/liblsl.1.16.2.so +0 -0
- py_neuromodulation/liblsl/linux/noble_amd64/liblsl.1.16.2.so +0 -0
- py_neuromodulation/liblsl/macos/amd64/liblsl.1.16.2.dylib +0 -0
- py_neuromodulation/liblsl/macos/arm64/liblsl.1.16.0.dylib +0 -0
- py_neuromodulation/liblsl/windows/amd64/liblsl.1.16.2.dll +0 -0
- py_neuromodulation/liblsl/windows/x86/liblsl.1.16.2.dll +0 -0
- py_neuromodulation/nm_IO.py +413 -417
- py_neuromodulation/nm_RMAP.py +496 -531
- py_neuromodulation/nm_analysis.py +993 -1074
- py_neuromodulation/nm_artifacts.py +30 -25
- py_neuromodulation/nm_bispectra.py +154 -168
- py_neuromodulation/nm_bursts.py +292 -198
- py_neuromodulation/nm_coherence.py +251 -205
- py_neuromodulation/nm_database.py +149 -0
- py_neuromodulation/nm_decode.py +918 -992
- py_neuromodulation/nm_define_nmchannels.py +300 -302
- py_neuromodulation/nm_features.py +144 -116
- py_neuromodulation/nm_filter.py +219 -219
- py_neuromodulation/nm_filter_preprocessing.py +79 -91
- py_neuromodulation/nm_fooof.py +139 -159
- py_neuromodulation/nm_generator.py +45 -37
- py_neuromodulation/nm_hjorth_raw.py +52 -73
- py_neuromodulation/nm_kalmanfilter.py +71 -58
- py_neuromodulation/nm_linelength.py +21 -33
- py_neuromodulation/nm_logger.py +66 -0
- py_neuromodulation/nm_mne_connectivity.py +149 -112
- py_neuromodulation/nm_mnelsl_generator.py +90 -0
- py_neuromodulation/nm_mnelsl_stream.py +116 -0
- py_neuromodulation/nm_nolds.py +96 -93
- py_neuromodulation/nm_normalization.py +173 -214
- py_neuromodulation/nm_oscillatory.py +423 -448
- py_neuromodulation/nm_plots.py +585 -612
- py_neuromodulation/nm_preprocessing.py +83 -0
- py_neuromodulation/nm_projection.py +370 -394
- py_neuromodulation/nm_rereference.py +97 -95
- py_neuromodulation/nm_resample.py +59 -50
- py_neuromodulation/nm_run_analysis.py +325 -435
- py_neuromodulation/nm_settings.py +289 -68
- py_neuromodulation/nm_settings.yaml +244 -0
- py_neuromodulation/nm_sharpwaves.py +423 -401
- py_neuromodulation/nm_stats.py +464 -480
- py_neuromodulation/nm_stream.py +398 -0
- py_neuromodulation/nm_stream_abc.py +166 -218
- py_neuromodulation/nm_types.py +193 -0
- py_neuromodulation/plots/STN_surf.mat +0 -0
- py_neuromodulation/plots/Vertices.mat +0 -0
- py_neuromodulation/plots/faces.mat +0 -0
- py_neuromodulation/plots/grid.mat +0 -0
- {py_neuromodulation-0.0.3.dist-info → py_neuromodulation-0.0.5.dist-info}/METADATA +185 -182
- py_neuromodulation-0.0.5.dist-info/RECORD +83 -0
- {py_neuromodulation-0.0.3.dist-info → py_neuromodulation-0.0.5.dist-info}/WHEEL +1 -2
- {py_neuromodulation-0.0.3.dist-info → py_neuromodulation-0.0.5.dist-info/licenses}/LICENSE +21 -21
- docs/build/_downloads/09df217f95985497f45d69e2d4bdc5b1/plot_2_example_add_feature.py +0 -68
- docs/build/_downloads/3b4900a2b2818ff30362215b76f7d5eb/plot_1_example_BIDS.py +0 -233
- docs/build/_downloads/7e92dd2e6cc86b239d14cafad972ae4f/plot_3_example_sharpwave_analysis.py +0 -219
- docs/build/_downloads/c2db0bf2b334d541b00662b991682256/plot_6_real_time_demo.py +0 -97
- docs/build/_downloads/ce3914826f782cbd1ea8fd024eaf0ac3/plot_5_example_rmap_computing.py +0 -64
- docs/build/_downloads/da36848a41e6a3235d91fb7cfb6d59b4/plot_0_first_demo.py +0 -192
- docs/build/_downloads/eaa4305c75b19a1e2eea941f742a6331/plot_4_example_gridPointProjection.py +0 -210
- docs/build/html/_downloads/09df217f95985497f45d69e2d4bdc5b1/plot_2_example_add_feature.py +0 -68
- docs/build/html/_downloads/3b4900a2b2818ff30362215b76f7d5eb/plot_1_example_BIDS.py +0 -239
- docs/build/html/_downloads/7e92dd2e6cc86b239d14cafad972ae4f/plot_3_example_sharpwave_analysis.py +0 -219
- docs/build/html/_downloads/c2db0bf2b334d541b00662b991682256/plot_6_real_time_demo.py +0 -97
- docs/build/html/_downloads/ce3914826f782cbd1ea8fd024eaf0ac3/plot_5_example_rmap_computing.py +0 -64
- docs/build/html/_downloads/da36848a41e6a3235d91fb7cfb6d59b4/plot_0_first_demo.py +0 -192
- docs/build/html/_downloads/eaa4305c75b19a1e2eea941f742a6331/plot_4_example_gridPointProjection.py +0 -210
- docs/source/_build/html/_downloads/09df217f95985497f45d69e2d4bdc5b1/plot_2_example_add_feature.py +0 -76
- docs/source/_build/html/_downloads/0d0d0a76e8f648d5d3cbc47da6351932/plot_real_time_demo.py +0 -97
- docs/source/_build/html/_downloads/3b4900a2b2818ff30362215b76f7d5eb/plot_1_example_BIDS.py +0 -240
- docs/source/_build/html/_downloads/5d73cadc59a8805c47e3b84063afc157/plot_example_BIDS.py +0 -233
- docs/source/_build/html/_downloads/7660317fa5a6bfbd12fcca9961457fc4/plot_example_rmap_computing.py +0 -63
- docs/source/_build/html/_downloads/7e92dd2e6cc86b239d14cafad972ae4f/plot_3_example_sharpwave_analysis.py +0 -219
- docs/source/_build/html/_downloads/839e5b319379f7fd9e867deb00fd797f/plot_example_gridPointProjection.py +0 -210
- docs/source/_build/html/_downloads/ae8be19afe5e559f011fc9b138968ba0/plot_first_demo.py +0 -192
- docs/source/_build/html/_downloads/b8b06cacc17969d3725a0b6f1d7741c5/plot_example_sharpwave_analysis.py +0 -219
- docs/source/_build/html/_downloads/c2db0bf2b334d541b00662b991682256/plot_6_real_time_demo.py +0 -121
- docs/source/_build/html/_downloads/c31a86c0b68cb4167d968091ace8080d/plot_example_add_feature.py +0 -68
- docs/source/_build/html/_downloads/ce3914826f782cbd1ea8fd024eaf0ac3/plot_5_example_rmap_computing.py +0 -64
- docs/source/_build/html/_downloads/da36848a41e6a3235d91fb7cfb6d59b4/plot_0_first_demo.py +0 -189
- docs/source/_build/html/_downloads/eaa4305c75b19a1e2eea941f742a6331/plot_4_example_gridPointProjection.py +0 -210
- docs/source/auto_examples/plot_0_first_demo.py +0 -189
- docs/source/auto_examples/plot_1_example_BIDS.py +0 -240
- docs/source/auto_examples/plot_2_example_add_feature.py +0 -76
- docs/source/auto_examples/plot_3_example_sharpwave_analysis.py +0 -219
- docs/source/auto_examples/plot_4_example_gridPointProjection.py +0 -210
- docs/source/auto_examples/plot_5_example_rmap_computing.py +0 -64
- docs/source/auto_examples/plot_6_real_time_demo.py +0 -121
- docs/source/conf.py +0 -105
- examples/plot_0_first_demo.py +0 -189
- examples/plot_1_example_BIDS.py +0 -240
- examples/plot_2_example_add_feature.py +0 -76
- examples/plot_3_example_sharpwave_analysis.py +0 -219
- examples/plot_4_example_gridPointProjection.py +0 -210
- examples/plot_5_example_rmap_computing.py +0 -64
- examples/plot_6_real_time_demo.py +0 -121
- packages/realtime_decoding/build/lib/realtime_decoding/__init__.py +0 -4
- packages/realtime_decoding/build/lib/realtime_decoding/decoder.py +0 -104
- packages/realtime_decoding/build/lib/realtime_decoding/features.py +0 -163
- packages/realtime_decoding/build/lib/realtime_decoding/helpers.py +0 -15
- packages/realtime_decoding/build/lib/realtime_decoding/run_decoding.py +0 -345
- packages/realtime_decoding/build/lib/realtime_decoding/trainer.py +0 -54
- packages/tmsi/build/lib/TMSiFileFormats/__init__.py +0 -37
- packages/tmsi/build/lib/TMSiFileFormats/file_formats/__init__.py +0 -36
- packages/tmsi/build/lib/TMSiFileFormats/file_formats/lsl_stream_writer.py +0 -200
- packages/tmsi/build/lib/TMSiFileFormats/file_formats/poly5_file_writer.py +0 -496
- packages/tmsi/build/lib/TMSiFileFormats/file_formats/poly5_to_edf_converter.py +0 -236
- packages/tmsi/build/lib/TMSiFileFormats/file_formats/xdf_file_writer.py +0 -977
- packages/tmsi/build/lib/TMSiFileFormats/file_readers/__init__.py +0 -35
- packages/tmsi/build/lib/TMSiFileFormats/file_readers/edf_reader.py +0 -116
- packages/tmsi/build/lib/TMSiFileFormats/file_readers/poly5reader.py +0 -294
- packages/tmsi/build/lib/TMSiFileFormats/file_readers/xdf_reader.py +0 -229
- packages/tmsi/build/lib/TMSiFileFormats/file_writer.py +0 -102
- packages/tmsi/build/lib/TMSiPlotters/__init__.py +0 -2
- packages/tmsi/build/lib/TMSiPlotters/gui/__init__.py +0 -39
- packages/tmsi/build/lib/TMSiPlotters/gui/_plotter_gui.py +0 -234
- packages/tmsi/build/lib/TMSiPlotters/gui/plotting_gui.py +0 -440
- packages/tmsi/build/lib/TMSiPlotters/plotters/__init__.py +0 -44
- packages/tmsi/build/lib/TMSiPlotters/plotters/hd_emg_plotter.py +0 -446
- packages/tmsi/build/lib/TMSiPlotters/plotters/impedance_plotter.py +0 -589
- packages/tmsi/build/lib/TMSiPlotters/plotters/signal_plotter.py +0 -1326
- packages/tmsi/build/lib/TMSiSDK/__init__.py +0 -54
- packages/tmsi/build/lib/TMSiSDK/device.py +0 -588
- packages/tmsi/build/lib/TMSiSDK/devices/__init__.py +0 -34
- packages/tmsi/build/lib/TMSiSDK/devices/saga/TMSi_Device_API.py +0 -1764
- packages/tmsi/build/lib/TMSiSDK/devices/saga/__init__.py +0 -34
- packages/tmsi/build/lib/TMSiSDK/devices/saga/saga_device.py +0 -1366
- packages/tmsi/build/lib/TMSiSDK/devices/saga/saga_types.py +0 -520
- packages/tmsi/build/lib/TMSiSDK/devices/saga/xml_saga_config.py +0 -165
- packages/tmsi/build/lib/TMSiSDK/error.py +0 -95
- packages/tmsi/build/lib/TMSiSDK/sample_data.py +0 -63
- packages/tmsi/build/lib/TMSiSDK/sample_data_server.py +0 -99
- packages/tmsi/build/lib/TMSiSDK/settings.py +0 -45
- packages/tmsi/build/lib/TMSiSDK/tmsi_device.py +0 -111
- packages/tmsi/build/lib/__init__.py +0 -4
- packages/tmsi/build/lib/apex_sdk/__init__.py +0 -34
- packages/tmsi/build/lib/apex_sdk/device/__init__.py +0 -41
- packages/tmsi/build/lib/apex_sdk/device/devices/apex/apex_API.py +0 -1009
- packages/tmsi/build/lib/apex_sdk/device/devices/apex/apex_API_enums.py +0 -239
- packages/tmsi/build/lib/apex_sdk/device/devices/apex/apex_API_structures.py +0 -668
- packages/tmsi/build/lib/apex_sdk/device/devices/apex/apex_device.py +0 -1611
- packages/tmsi/build/lib/apex_sdk/device/devices/apex/apex_dongle.py +0 -38
- packages/tmsi/build/lib/apex_sdk/device/devices/apex/apex_event_reader.py +0 -57
- packages/tmsi/build/lib/apex_sdk/device/devices/apex/apex_structures/apex_channel.py +0 -44
- packages/tmsi/build/lib/apex_sdk/device/devices/apex/apex_structures/apex_config.py +0 -150
- packages/tmsi/build/lib/apex_sdk/device/devices/apex/apex_structures/apex_const.py +0 -36
- packages/tmsi/build/lib/apex_sdk/device/devices/apex/apex_structures/apex_impedance_channel.py +0 -48
- packages/tmsi/build/lib/apex_sdk/device/devices/apex/apex_structures/apex_info.py +0 -108
- packages/tmsi/build/lib/apex_sdk/device/devices/apex/apex_structures/dongle_info.py +0 -39
- packages/tmsi/build/lib/apex_sdk/device/devices/apex/measurements/download_measurement.py +0 -77
- packages/tmsi/build/lib/apex_sdk/device/devices/apex/measurements/eeg_measurement.py +0 -150
- packages/tmsi/build/lib/apex_sdk/device/devices/apex/measurements/impedance_measurement.py +0 -129
- packages/tmsi/build/lib/apex_sdk/device/threads/conversion_thread.py +0 -59
- packages/tmsi/build/lib/apex_sdk/device/threads/sampling_thread.py +0 -57
- packages/tmsi/build/lib/apex_sdk/device/tmsi_channel.py +0 -83
- packages/tmsi/build/lib/apex_sdk/device/tmsi_device.py +0 -201
- packages/tmsi/build/lib/apex_sdk/device/tmsi_device_enums.py +0 -103
- packages/tmsi/build/lib/apex_sdk/device/tmsi_dongle.py +0 -43
- packages/tmsi/build/lib/apex_sdk/device/tmsi_event_reader.py +0 -50
- packages/tmsi/build/lib/apex_sdk/device/tmsi_measurement.py +0 -118
- packages/tmsi/build/lib/apex_sdk/sample_data_server/__init__.py +0 -33
- packages/tmsi/build/lib/apex_sdk/sample_data_server/event_data.py +0 -44
- packages/tmsi/build/lib/apex_sdk/sample_data_server/sample_data.py +0 -50
- packages/tmsi/build/lib/apex_sdk/sample_data_server/sample_data_server.py +0 -136
- packages/tmsi/build/lib/apex_sdk/tmsi_errors/error.py +0 -126
- packages/tmsi/build/lib/apex_sdk/tmsi_sdk.py +0 -113
- packages/tmsi/build/lib/apex_sdk/tmsi_utilities/apex/apex_structure_generator.py +0 -134
- packages/tmsi/build/lib/apex_sdk/tmsi_utilities/decorators.py +0 -60
- packages/tmsi/build/lib/apex_sdk/tmsi_utilities/logger_filter.py +0 -42
- packages/tmsi/build/lib/apex_sdk/tmsi_utilities/singleton.py +0 -42
- packages/tmsi/build/lib/apex_sdk/tmsi_utilities/support_functions.py +0 -72
- packages/tmsi/build/lib/apex_sdk/tmsi_utilities/tmsi_logger.py +0 -98
- py_neuromodulation/nm_EpochStream.py +0 -92
- py_neuromodulation/nm_across_patient_decoding.py +0 -927
- py_neuromodulation/nm_cohortwrapper.py +0 -435
- py_neuromodulation/nm_eval_timing.py +0 -239
- py_neuromodulation/nm_features_abc.py +0 -39
- py_neuromodulation/nm_stream_offline.py +0 -358
- py_neuromodulation/utils/_logging.py +0 -24
- py_neuromodulation-0.0.3.dist-info/RECORD +0 -188
- py_neuromodulation-0.0.3.dist-info/top_level.txt +0 -5
- tests/__init__.py +0 -0
- tests/conftest.py +0 -117
- tests/test_all_examples.py +0 -10
- tests/test_all_features.py +0 -63
- tests/test_bispectra.py +0 -70
- tests/test_bursts.py +0 -105
- tests/test_feature_sampling_rates.py +0 -143
- tests/test_fooof.py +0 -16
- tests/test_initalization_offline_stream.py +0 -41
- tests/test_multiprocessing.py +0 -58
- tests/test_nan_values.py +0 -29
- tests/test_nm_filter.py +0 -95
- tests/test_nm_resample.py +0 -63
- tests/test_normalization_settings.py +0 -146
- tests/test_notch_filter.py +0 -31
- tests/test_osc_features.py +0 -424
- tests/test_preprocessing_filter.py +0 -151
- tests/test_rereference.py +0 -171
- tests/test_sampling.py +0 -57
- tests/test_settings_change_after_init.py +0 -76
- tests/test_sharpwave.py +0 -165
- tests/test_target_channel_add.py +0 -100
- tests/test_timing.py +0 -80
|
@@ -1,435 +1,325 @@
|
|
|
1
|
-
"""This module contains the class to process a given batch of data."""
|
|
2
|
-
|
|
3
|
-
from
|
|
4
|
-
import
|
|
5
|
-
import
|
|
6
|
-
|
|
7
|
-
from
|
|
8
|
-
import
|
|
9
|
-
|
|
10
|
-
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
self.
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
|
|
118
|
-
|
|
119
|
-
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
|
|
123
|
-
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
|
|
152
|
-
|
|
153
|
-
|
|
154
|
-
|
|
155
|
-
|
|
156
|
-
|
|
157
|
-
|
|
158
|
-
|
|
159
|
-
|
|
160
|
-
|
|
161
|
-
|
|
162
|
-
|
|
163
|
-
|
|
164
|
-
|
|
165
|
-
|
|
166
|
-
|
|
167
|
-
|
|
168
|
-
|
|
169
|
-
|
|
170
|
-
|
|
171
|
-
|
|
172
|
-
|
|
173
|
-
|
|
174
|
-
|
|
175
|
-
|
|
176
|
-
|
|
177
|
-
|
|
178
|
-
|
|
179
|
-
|
|
180
|
-
|
|
181
|
-
|
|
182
|
-
|
|
183
|
-
|
|
184
|
-
|
|
185
|
-
|
|
186
|
-
|
|
187
|
-
|
|
188
|
-
|
|
189
|
-
|
|
190
|
-
|
|
191
|
-
|
|
192
|
-
|
|
193
|
-
|
|
194
|
-
|
|
195
|
-
|
|
196
|
-
|
|
197
|
-
|
|
198
|
-
|
|
199
|
-
|
|
200
|
-
|
|
201
|
-
|
|
202
|
-
|
|
203
|
-
|
|
204
|
-
|
|
205
|
-
|
|
206
|
-
|
|
207
|
-
|
|
208
|
-
|
|
209
|
-
|
|
210
|
-
|
|
211
|
-
|
|
212
|
-
|
|
213
|
-
|
|
214
|
-
|
|
215
|
-
|
|
216
|
-
|
|
217
|
-
|
|
218
|
-
|
|
219
|
-
|
|
220
|
-
|
|
221
|
-
|
|
222
|
-
|
|
223
|
-
|
|
224
|
-
|
|
225
|
-
|
|
226
|
-
|
|
227
|
-
|
|
228
|
-
|
|
229
|
-
|
|
230
|
-
|
|
231
|
-
|
|
232
|
-
|
|
233
|
-
|
|
234
|
-
|
|
235
|
-
|
|
236
|
-
|
|
237
|
-
|
|
238
|
-
|
|
239
|
-
|
|
240
|
-
|
|
241
|
-
|
|
242
|
-
|
|
243
|
-
|
|
244
|
-
|
|
245
|
-
|
|
246
|
-
|
|
247
|
-
|
|
248
|
-
|
|
249
|
-
|
|
250
|
-
|
|
251
|
-
|
|
252
|
-
|
|
253
|
-
|
|
254
|
-
|
|
255
|
-
|
|
256
|
-
|
|
257
|
-
|
|
258
|
-
|
|
259
|
-
|
|
260
|
-
|
|
261
|
-
|
|
262
|
-
|
|
263
|
-
|
|
264
|
-
|
|
265
|
-
|
|
266
|
-
|
|
267
|
-
self
|
|
268
|
-
|
|
269
|
-
|
|
270
|
-
|
|
271
|
-
if
|
|
272
|
-
|
|
273
|
-
|
|
274
|
-
|
|
275
|
-
|
|
276
|
-
|
|
277
|
-
|
|
278
|
-
|
|
279
|
-
|
|
280
|
-
|
|
281
|
-
|
|
282
|
-
|
|
283
|
-
|
|
284
|
-
|
|
285
|
-
|
|
286
|
-
|
|
287
|
-
|
|
288
|
-
|
|
289
|
-
|
|
290
|
-
|
|
291
|
-
|
|
292
|
-
|
|
293
|
-
|
|
294
|
-
|
|
295
|
-
|
|
296
|
-
|
|
297
|
-
|
|
298
|
-
|
|
299
|
-
|
|
300
|
-
|
|
301
|
-
|
|
302
|
-
|
|
303
|
-
|
|
304
|
-
|
|
305
|
-
|
|
306
|
-
|
|
307
|
-
|
|
308
|
-
|
|
309
|
-
|
|
310
|
-
|
|
311
|
-
|
|
312
|
-
|
|
313
|
-
|
|
314
|
-
)
|
|
315
|
-
|
|
316
|
-
|
|
317
|
-
|
|
318
|
-
|
|
319
|
-
|
|
320
|
-
|
|
321
|
-
|
|
322
|
-
|
|
323
|
-
|
|
324
|
-
|
|
325
|
-
)
|
|
326
|
-
|
|
327
|
-
def process(self, data: np.ndarray) -> pd.Series:
|
|
328
|
-
"""Given a new data batch, calculate and return features.
|
|
329
|
-
|
|
330
|
-
Parameters
|
|
331
|
-
----------
|
|
332
|
-
data : np.ndarray
|
|
333
|
-
Current batch of raw data
|
|
334
|
-
|
|
335
|
-
Returns
|
|
336
|
-
-------
|
|
337
|
-
pandas Series
|
|
338
|
-
Features calculated from current data
|
|
339
|
-
"""
|
|
340
|
-
start_time = time()
|
|
341
|
-
|
|
342
|
-
nan_channels = np.isnan(data).any(axis=1)
|
|
343
|
-
|
|
344
|
-
data = np.nan_to_num(data)[self.feature_idx, :]
|
|
345
|
-
|
|
346
|
-
for processor in self.preprocessors:
|
|
347
|
-
data = processor.process(data)
|
|
348
|
-
|
|
349
|
-
# calculate features
|
|
350
|
-
features_dict = self.features.estimate_features(data)
|
|
351
|
-
|
|
352
|
-
# normalize features
|
|
353
|
-
if self.settings["postprocessing"]["feature_normalization"]:
|
|
354
|
-
normed_features = self.feature_normalizer.process(
|
|
355
|
-
np.fromiter(features_dict.values(), dtype="float")
|
|
356
|
-
)
|
|
357
|
-
features_dict = {
|
|
358
|
-
key: normed_features[idx]
|
|
359
|
-
for idx, key in enumerate(features_dict.keys())
|
|
360
|
-
}
|
|
361
|
-
|
|
362
|
-
features_current = pd.Series(
|
|
363
|
-
data=list(features_dict.values()),
|
|
364
|
-
index=list(features_dict.keys()),
|
|
365
|
-
dtype=np.float64,
|
|
366
|
-
)
|
|
367
|
-
|
|
368
|
-
# project features to grid
|
|
369
|
-
if self.projection:
|
|
370
|
-
features_current = self.projection.project_features(
|
|
371
|
-
features_current
|
|
372
|
-
)
|
|
373
|
-
|
|
374
|
-
# check for all features, where the channel had a NaN, that the feature is also put to NaN
|
|
375
|
-
if nan_channels.sum() > 0:
|
|
376
|
-
for ch in list(np.array(self.ch_names_used)[nan_channels]):
|
|
377
|
-
features_current.loc[
|
|
378
|
-
features_current.index.str.contains(ch)
|
|
379
|
-
] = np.nan
|
|
380
|
-
|
|
381
|
-
if self.verbose is True:
|
|
382
|
-
logger.info(
|
|
383
|
-
"Last batch took: "
|
|
384
|
-
+ str(np.round(time() - start_time, 2))
|
|
385
|
-
+ " seconds"
|
|
386
|
-
)
|
|
387
|
-
|
|
388
|
-
return features_current
|
|
389
|
-
|
|
390
|
-
def save_sidecar(
|
|
391
|
-
self,
|
|
392
|
-
out_path_root: _PathLike,
|
|
393
|
-
folder_name: str,
|
|
394
|
-
additional_args: dict | None = None,
|
|
395
|
-
) -> None:
|
|
396
|
-
"""Save sidecar incuding fs, coords, sess_right to
|
|
397
|
-
out_path_root and subfolder 'folder_name'.
|
|
398
|
-
"""
|
|
399
|
-
sidecar = {
|
|
400
|
-
"original_fs": self._sfreq_raw_orig,
|
|
401
|
-
"final_fs": self.sfreq_raw,
|
|
402
|
-
"sfreq": self.sfreq_features,
|
|
403
|
-
}
|
|
404
|
-
if self.projection:
|
|
405
|
-
sidecar["coords"] = self.projection.coords
|
|
406
|
-
if self.settings["postprocessing"]["project_cortex"]:
|
|
407
|
-
sidecar["grid_cortex"] = self.projection.grid_cortex
|
|
408
|
-
sidecar["proj_matrix_cortex"] = (
|
|
409
|
-
self.projection.proj_matrix_cortex
|
|
410
|
-
)
|
|
411
|
-
if self.settings["postprocessing"]["project_subcortex"]:
|
|
412
|
-
sidecar["grid_subcortex"] = self.projection.grid_subcortex
|
|
413
|
-
sidecar["proj_matrix_subcortex"] = (
|
|
414
|
-
self.projection.proj_matrix_subcortex
|
|
415
|
-
)
|
|
416
|
-
if additional_args is not None:
|
|
417
|
-
sidecar = sidecar | additional_args
|
|
418
|
-
|
|
419
|
-
nm_IO.save_sidecar(sidecar, out_path_root, folder_name)
|
|
420
|
-
|
|
421
|
-
def save_settings(self, out_path_root: _PathLike, folder_name: str) -> None:
|
|
422
|
-
nm_IO.save_settings(self.settings, out_path_root, folder_name)
|
|
423
|
-
|
|
424
|
-
def save_nm_channels(
|
|
425
|
-
self, out_path_root: _PathLike, folder_name: str
|
|
426
|
-
) -> None:
|
|
427
|
-
nm_IO.save_nm_channels(self.nm_channels, out_path_root, folder_name)
|
|
428
|
-
|
|
429
|
-
def save_features(
|
|
430
|
-
self,
|
|
431
|
-
out_path_root: _PathLike,
|
|
432
|
-
folder_name: str,
|
|
433
|
-
feature_arr: pd.DataFrame,
|
|
434
|
-
) -> None:
|
|
435
|
-
nm_IO.save_features(feature_arr, out_path_root, folder_name)
|
|
1
|
+
"""This module contains the class to process a given batch of data."""
|
|
2
|
+
|
|
3
|
+
from time import time
|
|
4
|
+
import numpy as np
|
|
5
|
+
import pandas as pd
|
|
6
|
+
|
|
7
|
+
from py_neuromodulation import nm_IO, logger
|
|
8
|
+
from py_neuromodulation.nm_types import _PathLike
|
|
9
|
+
from py_neuromodulation.nm_features import FeatureProcessors
|
|
10
|
+
from py_neuromodulation.nm_preprocessing import NMPreprocessors
|
|
11
|
+
from py_neuromodulation.nm_projection import Projection
|
|
12
|
+
from py_neuromodulation.nm_settings import NMSettings
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
class DataProcessor:
|
|
16
|
+
def __init__(
|
|
17
|
+
self,
|
|
18
|
+
sfreq: float,
|
|
19
|
+
settings: NMSettings | _PathLike,
|
|
20
|
+
nm_channels: pd.DataFrame | _PathLike,
|
|
21
|
+
coord_names: list | None = None,
|
|
22
|
+
coord_list: list | None = None,
|
|
23
|
+
line_noise: float | None = None,
|
|
24
|
+
path_grids: _PathLike | None = None,
|
|
25
|
+
verbose: bool = True,
|
|
26
|
+
) -> None:
|
|
27
|
+
"""Initialize run class.
|
|
28
|
+
|
|
29
|
+
Parameters
|
|
30
|
+
----------
|
|
31
|
+
settings : nm_settings.NMSettings object
|
|
32
|
+
nm_channels : pd.DataFrame | _PathLike
|
|
33
|
+
Initialized pd.DataFrame with channel specific information.
|
|
34
|
+
The path to a nm_channels.csv can be also passed.
|
|
35
|
+
coord_names : list | None
|
|
36
|
+
list of coordinate names
|
|
37
|
+
coord_list : list | None
|
|
38
|
+
list of list of 3D coordinates
|
|
39
|
+
path_grids : _PathLike | None
|
|
40
|
+
List to grid_cortex.tsv and grid_subcortex.tsv for grid point projection
|
|
41
|
+
verbose : boolean
|
|
42
|
+
if True, log signal processed and computation time
|
|
43
|
+
"""
|
|
44
|
+
self.settings = NMSettings.load(settings)
|
|
45
|
+
self.nm_channels = self._load_nm_channels(nm_channels)
|
|
46
|
+
|
|
47
|
+
self.sfreq_features: float = self.settings.sampling_rate_features_hz
|
|
48
|
+
self._sfreq_raw_orig: float = sfreq
|
|
49
|
+
self.sfreq_raw: float = sfreq // 1
|
|
50
|
+
self.line_noise: float | None = line_noise
|
|
51
|
+
self.path_grids: _PathLike | None = path_grids
|
|
52
|
+
self.verbose: bool = verbose
|
|
53
|
+
|
|
54
|
+
self.features_previous = None
|
|
55
|
+
|
|
56
|
+
(self.ch_names_used, _, self.feature_idx, _) = self._get_ch_info()
|
|
57
|
+
|
|
58
|
+
self.preprocessors = NMPreprocessors(
|
|
59
|
+
settings=self.settings,
|
|
60
|
+
nm_channels=self.nm_channels,
|
|
61
|
+
sfreq=self.sfreq_raw,
|
|
62
|
+
line_noise=self.line_noise,
|
|
63
|
+
)
|
|
64
|
+
|
|
65
|
+
if self.settings.postprocessing.feature_normalization:
|
|
66
|
+
from py_neuromodulation.nm_normalization import FeatureNormalizer
|
|
67
|
+
|
|
68
|
+
self.feature_normalizer = FeatureNormalizer(self.settings)
|
|
69
|
+
|
|
70
|
+
self.features = FeatureProcessors(
|
|
71
|
+
settings=self.settings,
|
|
72
|
+
ch_names=self.ch_names_used,
|
|
73
|
+
sfreq=self.sfreq_raw,
|
|
74
|
+
)
|
|
75
|
+
|
|
76
|
+
if coord_list is not None and coord_names is not None:
|
|
77
|
+
self.coords = self._set_coords(
|
|
78
|
+
coord_names=coord_names, coord_list=coord_list
|
|
79
|
+
)
|
|
80
|
+
|
|
81
|
+
self.projection = self._get_projection(self.settings, self.nm_channels)
|
|
82
|
+
|
|
83
|
+
self.cnt_samples = 0
|
|
84
|
+
|
|
85
|
+
@staticmethod
|
|
86
|
+
def _add_coordinates(coord_names: list[str], coord_list: list) -> dict:
|
|
87
|
+
"""Write cortical and subcortical coordinate information in joint dictionary
|
|
88
|
+
|
|
89
|
+
Parameters
|
|
90
|
+
----------
|
|
91
|
+
coord_names : list[str]
|
|
92
|
+
list of coordinate names
|
|
93
|
+
coord_list : list
|
|
94
|
+
list of list of 3D coordinates
|
|
95
|
+
|
|
96
|
+
Returns
|
|
97
|
+
-------
|
|
98
|
+
dict with (sub)cortex_left and (sub)cortex_right ch_names and positions
|
|
99
|
+
"""
|
|
100
|
+
|
|
101
|
+
def is_left_coord(val: float, coord_region: str) -> bool:
|
|
102
|
+
if coord_region.split("_")[1] == "left":
|
|
103
|
+
return val < 0
|
|
104
|
+
return val > 0
|
|
105
|
+
|
|
106
|
+
coords: dict[str, dict[str, list | np.ndarray]] = {}
|
|
107
|
+
|
|
108
|
+
for coord_region in [
|
|
109
|
+
coord_loc + "_" + lat
|
|
110
|
+
for coord_loc in ["cortex", "subcortex"]
|
|
111
|
+
for lat in ["left", "right"]
|
|
112
|
+
]:
|
|
113
|
+
coords[coord_region] = {}
|
|
114
|
+
|
|
115
|
+
ch_type = "ECOG" if "cortex" == coord_region.split("_")[0] else "LFP"
|
|
116
|
+
|
|
117
|
+
coords[coord_region]["ch_names"] = [
|
|
118
|
+
coord_name
|
|
119
|
+
for coord_name, ch in zip(coord_names, coord_list)
|
|
120
|
+
if is_left_coord(ch[0], coord_region) and (ch_type in coord_name)
|
|
121
|
+
]
|
|
122
|
+
|
|
123
|
+
# multiply by 1000 to get m instead of mm
|
|
124
|
+
positions = []
|
|
125
|
+
for coord, coord_name in zip(coord_list, coord_names):
|
|
126
|
+
if is_left_coord(coord[0], coord_region) and (ch_type in coord_name):
|
|
127
|
+
positions.append(coord)
|
|
128
|
+
coords[coord_region]["positions"] = (
|
|
129
|
+
np.array(positions, dtype=np.float64) * 1000
|
|
130
|
+
)
|
|
131
|
+
|
|
132
|
+
return coords
|
|
133
|
+
|
|
134
|
+
def _get_ch_info(
|
|
135
|
+
self,
|
|
136
|
+
) -> tuple[list[str], list[str], list[int], np.ndarray]:
|
|
137
|
+
"""Get used feature and label info from nm_channels"""
|
|
138
|
+
nm_channels = self.nm_channels
|
|
139
|
+
ch_names_used = nm_channels[nm_channels["used"] == 1]["new_name"].tolist()
|
|
140
|
+
ch_types_used = nm_channels[nm_channels["used"] == 1]["type"].tolist()
|
|
141
|
+
|
|
142
|
+
# used channels for feature estimation
|
|
143
|
+
feature_idx = np.where(nm_channels["used"] & ~nm_channels["target"])[0].tolist()
|
|
144
|
+
|
|
145
|
+
# If multiple targets exist, select only the first
|
|
146
|
+
label_idx = np.where(nm_channels["target"] == 1)[0]
|
|
147
|
+
|
|
148
|
+
return ch_names_used, ch_types_used, feature_idx, label_idx
|
|
149
|
+
|
|
150
|
+
@staticmethod
|
|
151
|
+
def _get_grids(
|
|
152
|
+
settings: "NMSettings",
|
|
153
|
+
path_grids: _PathLike | None,
|
|
154
|
+
) -> tuple[pd.DataFrame | None, pd.DataFrame | None]:
|
|
155
|
+
"""Read settings specified grids
|
|
156
|
+
|
|
157
|
+
Parameters
|
|
158
|
+
----------
|
|
159
|
+
settings : nm_settings.NMSettings object
|
|
160
|
+
path_grids : _PathLike | str
|
|
161
|
+
|
|
162
|
+
Returns
|
|
163
|
+
-------
|
|
164
|
+
Tuple
|
|
165
|
+
grid_cortex, grid_subcortex,
|
|
166
|
+
might be None if not specified in settings
|
|
167
|
+
"""
|
|
168
|
+
if settings.postprocessing.project_cortex:
|
|
169
|
+
grid_cortex = nm_IO.read_grid(path_grids, "cortex")
|
|
170
|
+
else:
|
|
171
|
+
grid_cortex = None
|
|
172
|
+
if settings.postprocessing.project_subcortex:
|
|
173
|
+
grid_subcortex = nm_IO.read_grid(path_grids, "subcortex")
|
|
174
|
+
else:
|
|
175
|
+
grid_subcortex = None
|
|
176
|
+
return grid_cortex, grid_subcortex
|
|
177
|
+
|
|
178
|
+
def _get_projection(
|
|
179
|
+
self, settings: "NMSettings", nm_channels: pd.DataFrame
|
|
180
|
+
) -> Projection | None:
|
|
181
|
+
"""Return projection of used coordinated and grids"""
|
|
182
|
+
|
|
183
|
+
if not any(
|
|
184
|
+
(
|
|
185
|
+
settings.postprocessing.project_cortex,
|
|
186
|
+
settings.postprocessing.project_subcortex,
|
|
187
|
+
)
|
|
188
|
+
):
|
|
189
|
+
return None
|
|
190
|
+
|
|
191
|
+
grid_cortex, grid_subcortex = self._get_grids(self.settings, self.path_grids)
|
|
192
|
+
projection = Projection(
|
|
193
|
+
settings=settings,
|
|
194
|
+
grid_cortex=grid_cortex,
|
|
195
|
+
grid_subcortex=grid_subcortex,
|
|
196
|
+
coords=self.coords,
|
|
197
|
+
nm_channels=nm_channels,
|
|
198
|
+
plot_projection=False,
|
|
199
|
+
)
|
|
200
|
+
return projection
|
|
201
|
+
|
|
202
|
+
@staticmethod
|
|
203
|
+
def _load_nm_channels(
|
|
204
|
+
nm_channels: pd.DataFrame | _PathLike,
|
|
205
|
+
) -> pd.DataFrame:
|
|
206
|
+
if not isinstance(nm_channels, pd.DataFrame):
|
|
207
|
+
return nm_IO.load_nm_channels(nm_channels)
|
|
208
|
+
return nm_channels
|
|
209
|
+
|
|
210
|
+
def _set_coords(
|
|
211
|
+
self, coord_names: list[str] | None, coord_list: list | None
|
|
212
|
+
) -> dict:
|
|
213
|
+
if not any(
|
|
214
|
+
(
|
|
215
|
+
self.settings.postprocessing.project_cortex,
|
|
216
|
+
self.settings.postprocessing.project_subcortex,
|
|
217
|
+
)
|
|
218
|
+
):
|
|
219
|
+
return {}
|
|
220
|
+
|
|
221
|
+
if any((coord_list is None, coord_names is None)):
|
|
222
|
+
raise ValueError(
|
|
223
|
+
"No coordinates could be loaded. Please provide coord_list and"
|
|
224
|
+
f" coord_names. Got: {coord_list=}, {coord_names=}."
|
|
225
|
+
)
|
|
226
|
+
|
|
227
|
+
return self._add_coordinates(
|
|
228
|
+
coord_names=coord_names,
|
|
229
|
+
coord_list=coord_list, # type: ignore # None case handled above
|
|
230
|
+
)
|
|
231
|
+
|
|
232
|
+
def process(self, data: np.ndarray) -> dict[str, float]:
|
|
233
|
+
"""Given a new data batch, calculate and return features.
|
|
234
|
+
|
|
235
|
+
Parameters
|
|
236
|
+
----------
|
|
237
|
+
data : np.ndarray
|
|
238
|
+
Current batch of raw data
|
|
239
|
+
|
|
240
|
+
Returns
|
|
241
|
+
-------
|
|
242
|
+
pandas Series
|
|
243
|
+
Features calculated from current data
|
|
244
|
+
"""
|
|
245
|
+
start_time = time()
|
|
246
|
+
|
|
247
|
+
nan_channels = np.isnan(data).any(axis=1)
|
|
248
|
+
|
|
249
|
+
data = np.nan_to_num(data)[self.feature_idx, :]
|
|
250
|
+
|
|
251
|
+
data = self.preprocessors.process_data(data)
|
|
252
|
+
|
|
253
|
+
# calculate features
|
|
254
|
+
features_dict = self.features.estimate_features(data)
|
|
255
|
+
|
|
256
|
+
# normalize features
|
|
257
|
+
if self.settings.postprocessing.feature_normalization:
|
|
258
|
+
normed_features = self.feature_normalizer.process(
|
|
259
|
+
np.fromiter(features_dict.values(), dtype=np.float64)
|
|
260
|
+
)
|
|
261
|
+
features_dict = {
|
|
262
|
+
key: normed_features[idx]
|
|
263
|
+
for idx, key in enumerate(features_dict.keys())
|
|
264
|
+
}
|
|
265
|
+
|
|
266
|
+
# project features to grid
|
|
267
|
+
if self.projection:
|
|
268
|
+
self.projection.project_features(features_dict)
|
|
269
|
+
|
|
270
|
+
# check for all features, where the channel had a NaN, that the feature is also put to NaN
|
|
271
|
+
if nan_channels.sum() > 0:
|
|
272
|
+
# TONI: no need to do this if we store both old and new names for the channels
|
|
273
|
+
new_nan_channels = []
|
|
274
|
+
for ch in list(np.array(self.ch_names_used)[nan_channels]):
|
|
275
|
+
for key in features_dict.keys():
|
|
276
|
+
if ch in key:
|
|
277
|
+
new_nan_channels.append(key)
|
|
278
|
+
|
|
279
|
+
for ch in new_nan_channels:
|
|
280
|
+
features_dict[ch] = np.nan
|
|
281
|
+
|
|
282
|
+
if self.verbose:
|
|
283
|
+
logger.info("Last batch took: %.3f seconds to process", time() - start_time)
|
|
284
|
+
|
|
285
|
+
return features_dict
|
|
286
|
+
|
|
287
|
+
def save_sidecar(
|
|
288
|
+
self,
|
|
289
|
+
out_dir: _PathLike,
|
|
290
|
+
prefix: str = "",
|
|
291
|
+
additional_args: dict | None = None,
|
|
292
|
+
) -> None:
|
|
293
|
+
"""Save sidecar incuding fs, coords, sess_right to out_dir."""
|
|
294
|
+
|
|
295
|
+
sidecar: dict = {
|
|
296
|
+
"original_fs": self._sfreq_raw_orig,
|
|
297
|
+
"final_fs": self.sfreq_raw,
|
|
298
|
+
"sfreq": self.sfreq_features,
|
|
299
|
+
}
|
|
300
|
+
if self.projection:
|
|
301
|
+
sidecar["coords"] = self.projection.coords
|
|
302
|
+
if self.settings.postprocessing.project_cortex:
|
|
303
|
+
sidecar["grid_cortex"] = self.projection.grid_cortex
|
|
304
|
+
sidecar["proj_matrix_cortex"] = self.projection.proj_matrix_cortex
|
|
305
|
+
if self.settings.postprocessing.project_subcortex:
|
|
306
|
+
sidecar["grid_subcortex"] = self.projection.grid_subcortex
|
|
307
|
+
sidecar["proj_matrix_subcortex"] = self.projection.proj_matrix_subcortex
|
|
308
|
+
if additional_args is not None:
|
|
309
|
+
sidecar = sidecar | additional_args
|
|
310
|
+
|
|
311
|
+
nm_IO.save_sidecar(sidecar, out_dir, prefix)
|
|
312
|
+
|
|
313
|
+
def save_settings(self, out_dir: _PathLike, prefix: str = "") -> None:
|
|
314
|
+
self.settings.save(out_dir, prefix)
|
|
315
|
+
|
|
316
|
+
def save_nm_channels(self, out_dir: _PathLike, prefix: str = "") -> None:
|
|
317
|
+
nm_IO.save_nm_channels(self.nm_channels, out_dir, prefix)
|
|
318
|
+
|
|
319
|
+
def save_features(
|
|
320
|
+
self,
|
|
321
|
+
feature_arr: pd.DataFrame,
|
|
322
|
+
out_dir: _PathLike = "",
|
|
323
|
+
prefix: str = "",
|
|
324
|
+
) -> None:
|
|
325
|
+
nm_IO.save_features(feature_arr, out_dir, prefix)
|