py-neuromodulation 0.0.3__py3-none-any.whl → 0.0.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (233) hide show
  1. py_neuromodulation/ConnectivityDecoding/Automated Anatomical Labeling 3 (Rolls 2020).nii +0 -0
  2. py_neuromodulation/ConnectivityDecoding/_get_grid_hull.m +34 -0
  3. py_neuromodulation/ConnectivityDecoding/_get_grid_whole_brain.py +95 -0
  4. py_neuromodulation/ConnectivityDecoding/_helper_write_connectome.py +107 -0
  5. py_neuromodulation/ConnectivityDecoding/mni_coords_cortical_surface.mat +0 -0
  6. py_neuromodulation/ConnectivityDecoding/mni_coords_whole_brain.mat +0 -0
  7. py_neuromodulation/ConnectivityDecoding/rmap_func_all.nii +0 -0
  8. py_neuromodulation/ConnectivityDecoding/rmap_struc.nii +0 -0
  9. py_neuromodulation/FieldTrip.py +589 -589
  10. py_neuromodulation/__init__.py +74 -13
  11. py_neuromodulation/_write_example_dataset_helper.py +83 -65
  12. py_neuromodulation/data/README +6 -0
  13. py_neuromodulation/data/dataset_description.json +8 -0
  14. py_neuromodulation/data/participants.json +32 -0
  15. py_neuromodulation/data/participants.tsv +2 -0
  16. py_neuromodulation/data/sub-testsub/ses-EphysMedOff/ieeg/sub-testsub_ses-EphysMedOff_space-mni_coordsystem.json +5 -0
  17. py_neuromodulation/data/sub-testsub/ses-EphysMedOff/ieeg/sub-testsub_ses-EphysMedOff_space-mni_electrodes.tsv +11 -0
  18. py_neuromodulation/data/sub-testsub/ses-EphysMedOff/ieeg/sub-testsub_ses-EphysMedOff_task-gripforce_run-0_channels.tsv +11 -0
  19. py_neuromodulation/data/sub-testsub/ses-EphysMedOff/ieeg/sub-testsub_ses-EphysMedOff_task-gripforce_run-0_ieeg.eeg +0 -0
  20. py_neuromodulation/data/sub-testsub/ses-EphysMedOff/ieeg/sub-testsub_ses-EphysMedOff_task-gripforce_run-0_ieeg.json +18 -0
  21. py_neuromodulation/data/sub-testsub/ses-EphysMedOff/ieeg/sub-testsub_ses-EphysMedOff_task-gripforce_run-0_ieeg.vhdr +35 -0
  22. py_neuromodulation/data/sub-testsub/ses-EphysMedOff/ieeg/sub-testsub_ses-EphysMedOff_task-gripforce_run-0_ieeg.vmrk +13 -0
  23. py_neuromodulation/data/sub-testsub/ses-EphysMedOff/sub-testsub_ses-EphysMedOff_scans.tsv +2 -0
  24. py_neuromodulation/grid_cortex.tsv +40 -0
  25. py_neuromodulation/grid_subcortex.tsv +1429 -0
  26. py_neuromodulation/liblsl/libpugixml.so.1.12 +0 -0
  27. py_neuromodulation/liblsl/linux/bionic_amd64/liblsl.1.16.2.so +0 -0
  28. py_neuromodulation/liblsl/linux/bookworm_amd64/liblsl.1.16.2.so +0 -0
  29. py_neuromodulation/liblsl/linux/focal_amd46/liblsl.1.16.2.so +0 -0
  30. py_neuromodulation/liblsl/linux/jammy_amd64/liblsl.1.16.2.so +0 -0
  31. py_neuromodulation/liblsl/linux/jammy_x86/liblsl.1.16.2.so +0 -0
  32. py_neuromodulation/liblsl/linux/noble_amd64/liblsl.1.16.2.so +0 -0
  33. py_neuromodulation/liblsl/macos/amd64/liblsl.1.16.2.dylib +0 -0
  34. py_neuromodulation/liblsl/macos/arm64/liblsl.1.16.0.dylib +0 -0
  35. py_neuromodulation/liblsl/windows/amd64/liblsl.1.16.2.dll +0 -0
  36. py_neuromodulation/liblsl/windows/x86/liblsl.1.16.2.dll +0 -0
  37. py_neuromodulation/nm_IO.py +413 -417
  38. py_neuromodulation/nm_RMAP.py +496 -531
  39. py_neuromodulation/nm_analysis.py +993 -1074
  40. py_neuromodulation/nm_artifacts.py +30 -25
  41. py_neuromodulation/nm_bispectra.py +154 -168
  42. py_neuromodulation/nm_bursts.py +292 -198
  43. py_neuromodulation/nm_coherence.py +251 -205
  44. py_neuromodulation/nm_database.py +149 -0
  45. py_neuromodulation/nm_decode.py +918 -992
  46. py_neuromodulation/nm_define_nmchannels.py +300 -302
  47. py_neuromodulation/nm_features.py +144 -116
  48. py_neuromodulation/nm_filter.py +219 -219
  49. py_neuromodulation/nm_filter_preprocessing.py +79 -91
  50. py_neuromodulation/nm_fooof.py +139 -159
  51. py_neuromodulation/nm_generator.py +45 -37
  52. py_neuromodulation/nm_hjorth_raw.py +52 -73
  53. py_neuromodulation/nm_kalmanfilter.py +71 -58
  54. py_neuromodulation/nm_linelength.py +21 -33
  55. py_neuromodulation/nm_logger.py +66 -0
  56. py_neuromodulation/nm_mne_connectivity.py +149 -112
  57. py_neuromodulation/nm_mnelsl_generator.py +90 -0
  58. py_neuromodulation/nm_mnelsl_stream.py +116 -0
  59. py_neuromodulation/nm_nolds.py +96 -93
  60. py_neuromodulation/nm_normalization.py +173 -214
  61. py_neuromodulation/nm_oscillatory.py +423 -448
  62. py_neuromodulation/nm_plots.py +585 -612
  63. py_neuromodulation/nm_preprocessing.py +83 -0
  64. py_neuromodulation/nm_projection.py +370 -394
  65. py_neuromodulation/nm_rereference.py +97 -95
  66. py_neuromodulation/nm_resample.py +59 -50
  67. py_neuromodulation/nm_run_analysis.py +325 -435
  68. py_neuromodulation/nm_settings.py +289 -68
  69. py_neuromodulation/nm_settings.yaml +244 -0
  70. py_neuromodulation/nm_sharpwaves.py +423 -401
  71. py_neuromodulation/nm_stats.py +464 -480
  72. py_neuromodulation/nm_stream.py +398 -0
  73. py_neuromodulation/nm_stream_abc.py +166 -218
  74. py_neuromodulation/nm_types.py +193 -0
  75. py_neuromodulation/plots/STN_surf.mat +0 -0
  76. py_neuromodulation/plots/Vertices.mat +0 -0
  77. py_neuromodulation/plots/faces.mat +0 -0
  78. py_neuromodulation/plots/grid.mat +0 -0
  79. {py_neuromodulation-0.0.3.dist-info → py_neuromodulation-0.0.5.dist-info}/METADATA +185 -182
  80. py_neuromodulation-0.0.5.dist-info/RECORD +83 -0
  81. {py_neuromodulation-0.0.3.dist-info → py_neuromodulation-0.0.5.dist-info}/WHEEL +1 -2
  82. {py_neuromodulation-0.0.3.dist-info → py_neuromodulation-0.0.5.dist-info/licenses}/LICENSE +21 -21
  83. docs/build/_downloads/09df217f95985497f45d69e2d4bdc5b1/plot_2_example_add_feature.py +0 -68
  84. docs/build/_downloads/3b4900a2b2818ff30362215b76f7d5eb/plot_1_example_BIDS.py +0 -233
  85. docs/build/_downloads/7e92dd2e6cc86b239d14cafad972ae4f/plot_3_example_sharpwave_analysis.py +0 -219
  86. docs/build/_downloads/c2db0bf2b334d541b00662b991682256/plot_6_real_time_demo.py +0 -97
  87. docs/build/_downloads/ce3914826f782cbd1ea8fd024eaf0ac3/plot_5_example_rmap_computing.py +0 -64
  88. docs/build/_downloads/da36848a41e6a3235d91fb7cfb6d59b4/plot_0_first_demo.py +0 -192
  89. docs/build/_downloads/eaa4305c75b19a1e2eea941f742a6331/plot_4_example_gridPointProjection.py +0 -210
  90. docs/build/html/_downloads/09df217f95985497f45d69e2d4bdc5b1/plot_2_example_add_feature.py +0 -68
  91. docs/build/html/_downloads/3b4900a2b2818ff30362215b76f7d5eb/plot_1_example_BIDS.py +0 -239
  92. docs/build/html/_downloads/7e92dd2e6cc86b239d14cafad972ae4f/plot_3_example_sharpwave_analysis.py +0 -219
  93. docs/build/html/_downloads/c2db0bf2b334d541b00662b991682256/plot_6_real_time_demo.py +0 -97
  94. docs/build/html/_downloads/ce3914826f782cbd1ea8fd024eaf0ac3/plot_5_example_rmap_computing.py +0 -64
  95. docs/build/html/_downloads/da36848a41e6a3235d91fb7cfb6d59b4/plot_0_first_demo.py +0 -192
  96. docs/build/html/_downloads/eaa4305c75b19a1e2eea941f742a6331/plot_4_example_gridPointProjection.py +0 -210
  97. docs/source/_build/html/_downloads/09df217f95985497f45d69e2d4bdc5b1/plot_2_example_add_feature.py +0 -76
  98. docs/source/_build/html/_downloads/0d0d0a76e8f648d5d3cbc47da6351932/plot_real_time_demo.py +0 -97
  99. docs/source/_build/html/_downloads/3b4900a2b2818ff30362215b76f7d5eb/plot_1_example_BIDS.py +0 -240
  100. docs/source/_build/html/_downloads/5d73cadc59a8805c47e3b84063afc157/plot_example_BIDS.py +0 -233
  101. docs/source/_build/html/_downloads/7660317fa5a6bfbd12fcca9961457fc4/plot_example_rmap_computing.py +0 -63
  102. docs/source/_build/html/_downloads/7e92dd2e6cc86b239d14cafad972ae4f/plot_3_example_sharpwave_analysis.py +0 -219
  103. docs/source/_build/html/_downloads/839e5b319379f7fd9e867deb00fd797f/plot_example_gridPointProjection.py +0 -210
  104. docs/source/_build/html/_downloads/ae8be19afe5e559f011fc9b138968ba0/plot_first_demo.py +0 -192
  105. docs/source/_build/html/_downloads/b8b06cacc17969d3725a0b6f1d7741c5/plot_example_sharpwave_analysis.py +0 -219
  106. docs/source/_build/html/_downloads/c2db0bf2b334d541b00662b991682256/plot_6_real_time_demo.py +0 -121
  107. docs/source/_build/html/_downloads/c31a86c0b68cb4167d968091ace8080d/plot_example_add_feature.py +0 -68
  108. docs/source/_build/html/_downloads/ce3914826f782cbd1ea8fd024eaf0ac3/plot_5_example_rmap_computing.py +0 -64
  109. docs/source/_build/html/_downloads/da36848a41e6a3235d91fb7cfb6d59b4/plot_0_first_demo.py +0 -189
  110. docs/source/_build/html/_downloads/eaa4305c75b19a1e2eea941f742a6331/plot_4_example_gridPointProjection.py +0 -210
  111. docs/source/auto_examples/plot_0_first_demo.py +0 -189
  112. docs/source/auto_examples/plot_1_example_BIDS.py +0 -240
  113. docs/source/auto_examples/plot_2_example_add_feature.py +0 -76
  114. docs/source/auto_examples/plot_3_example_sharpwave_analysis.py +0 -219
  115. docs/source/auto_examples/plot_4_example_gridPointProjection.py +0 -210
  116. docs/source/auto_examples/plot_5_example_rmap_computing.py +0 -64
  117. docs/source/auto_examples/plot_6_real_time_demo.py +0 -121
  118. docs/source/conf.py +0 -105
  119. examples/plot_0_first_demo.py +0 -189
  120. examples/plot_1_example_BIDS.py +0 -240
  121. examples/plot_2_example_add_feature.py +0 -76
  122. examples/plot_3_example_sharpwave_analysis.py +0 -219
  123. examples/plot_4_example_gridPointProjection.py +0 -210
  124. examples/plot_5_example_rmap_computing.py +0 -64
  125. examples/plot_6_real_time_demo.py +0 -121
  126. packages/realtime_decoding/build/lib/realtime_decoding/__init__.py +0 -4
  127. packages/realtime_decoding/build/lib/realtime_decoding/decoder.py +0 -104
  128. packages/realtime_decoding/build/lib/realtime_decoding/features.py +0 -163
  129. packages/realtime_decoding/build/lib/realtime_decoding/helpers.py +0 -15
  130. packages/realtime_decoding/build/lib/realtime_decoding/run_decoding.py +0 -345
  131. packages/realtime_decoding/build/lib/realtime_decoding/trainer.py +0 -54
  132. packages/tmsi/build/lib/TMSiFileFormats/__init__.py +0 -37
  133. packages/tmsi/build/lib/TMSiFileFormats/file_formats/__init__.py +0 -36
  134. packages/tmsi/build/lib/TMSiFileFormats/file_formats/lsl_stream_writer.py +0 -200
  135. packages/tmsi/build/lib/TMSiFileFormats/file_formats/poly5_file_writer.py +0 -496
  136. packages/tmsi/build/lib/TMSiFileFormats/file_formats/poly5_to_edf_converter.py +0 -236
  137. packages/tmsi/build/lib/TMSiFileFormats/file_formats/xdf_file_writer.py +0 -977
  138. packages/tmsi/build/lib/TMSiFileFormats/file_readers/__init__.py +0 -35
  139. packages/tmsi/build/lib/TMSiFileFormats/file_readers/edf_reader.py +0 -116
  140. packages/tmsi/build/lib/TMSiFileFormats/file_readers/poly5reader.py +0 -294
  141. packages/tmsi/build/lib/TMSiFileFormats/file_readers/xdf_reader.py +0 -229
  142. packages/tmsi/build/lib/TMSiFileFormats/file_writer.py +0 -102
  143. packages/tmsi/build/lib/TMSiPlotters/__init__.py +0 -2
  144. packages/tmsi/build/lib/TMSiPlotters/gui/__init__.py +0 -39
  145. packages/tmsi/build/lib/TMSiPlotters/gui/_plotter_gui.py +0 -234
  146. packages/tmsi/build/lib/TMSiPlotters/gui/plotting_gui.py +0 -440
  147. packages/tmsi/build/lib/TMSiPlotters/plotters/__init__.py +0 -44
  148. packages/tmsi/build/lib/TMSiPlotters/plotters/hd_emg_plotter.py +0 -446
  149. packages/tmsi/build/lib/TMSiPlotters/plotters/impedance_plotter.py +0 -589
  150. packages/tmsi/build/lib/TMSiPlotters/plotters/signal_plotter.py +0 -1326
  151. packages/tmsi/build/lib/TMSiSDK/__init__.py +0 -54
  152. packages/tmsi/build/lib/TMSiSDK/device.py +0 -588
  153. packages/tmsi/build/lib/TMSiSDK/devices/__init__.py +0 -34
  154. packages/tmsi/build/lib/TMSiSDK/devices/saga/TMSi_Device_API.py +0 -1764
  155. packages/tmsi/build/lib/TMSiSDK/devices/saga/__init__.py +0 -34
  156. packages/tmsi/build/lib/TMSiSDK/devices/saga/saga_device.py +0 -1366
  157. packages/tmsi/build/lib/TMSiSDK/devices/saga/saga_types.py +0 -520
  158. packages/tmsi/build/lib/TMSiSDK/devices/saga/xml_saga_config.py +0 -165
  159. packages/tmsi/build/lib/TMSiSDK/error.py +0 -95
  160. packages/tmsi/build/lib/TMSiSDK/sample_data.py +0 -63
  161. packages/tmsi/build/lib/TMSiSDK/sample_data_server.py +0 -99
  162. packages/tmsi/build/lib/TMSiSDK/settings.py +0 -45
  163. packages/tmsi/build/lib/TMSiSDK/tmsi_device.py +0 -111
  164. packages/tmsi/build/lib/__init__.py +0 -4
  165. packages/tmsi/build/lib/apex_sdk/__init__.py +0 -34
  166. packages/tmsi/build/lib/apex_sdk/device/__init__.py +0 -41
  167. packages/tmsi/build/lib/apex_sdk/device/devices/apex/apex_API.py +0 -1009
  168. packages/tmsi/build/lib/apex_sdk/device/devices/apex/apex_API_enums.py +0 -239
  169. packages/tmsi/build/lib/apex_sdk/device/devices/apex/apex_API_structures.py +0 -668
  170. packages/tmsi/build/lib/apex_sdk/device/devices/apex/apex_device.py +0 -1611
  171. packages/tmsi/build/lib/apex_sdk/device/devices/apex/apex_dongle.py +0 -38
  172. packages/tmsi/build/lib/apex_sdk/device/devices/apex/apex_event_reader.py +0 -57
  173. packages/tmsi/build/lib/apex_sdk/device/devices/apex/apex_structures/apex_channel.py +0 -44
  174. packages/tmsi/build/lib/apex_sdk/device/devices/apex/apex_structures/apex_config.py +0 -150
  175. packages/tmsi/build/lib/apex_sdk/device/devices/apex/apex_structures/apex_const.py +0 -36
  176. packages/tmsi/build/lib/apex_sdk/device/devices/apex/apex_structures/apex_impedance_channel.py +0 -48
  177. packages/tmsi/build/lib/apex_sdk/device/devices/apex/apex_structures/apex_info.py +0 -108
  178. packages/tmsi/build/lib/apex_sdk/device/devices/apex/apex_structures/dongle_info.py +0 -39
  179. packages/tmsi/build/lib/apex_sdk/device/devices/apex/measurements/download_measurement.py +0 -77
  180. packages/tmsi/build/lib/apex_sdk/device/devices/apex/measurements/eeg_measurement.py +0 -150
  181. packages/tmsi/build/lib/apex_sdk/device/devices/apex/measurements/impedance_measurement.py +0 -129
  182. packages/tmsi/build/lib/apex_sdk/device/threads/conversion_thread.py +0 -59
  183. packages/tmsi/build/lib/apex_sdk/device/threads/sampling_thread.py +0 -57
  184. packages/tmsi/build/lib/apex_sdk/device/tmsi_channel.py +0 -83
  185. packages/tmsi/build/lib/apex_sdk/device/tmsi_device.py +0 -201
  186. packages/tmsi/build/lib/apex_sdk/device/tmsi_device_enums.py +0 -103
  187. packages/tmsi/build/lib/apex_sdk/device/tmsi_dongle.py +0 -43
  188. packages/tmsi/build/lib/apex_sdk/device/tmsi_event_reader.py +0 -50
  189. packages/tmsi/build/lib/apex_sdk/device/tmsi_measurement.py +0 -118
  190. packages/tmsi/build/lib/apex_sdk/sample_data_server/__init__.py +0 -33
  191. packages/tmsi/build/lib/apex_sdk/sample_data_server/event_data.py +0 -44
  192. packages/tmsi/build/lib/apex_sdk/sample_data_server/sample_data.py +0 -50
  193. packages/tmsi/build/lib/apex_sdk/sample_data_server/sample_data_server.py +0 -136
  194. packages/tmsi/build/lib/apex_sdk/tmsi_errors/error.py +0 -126
  195. packages/tmsi/build/lib/apex_sdk/tmsi_sdk.py +0 -113
  196. packages/tmsi/build/lib/apex_sdk/tmsi_utilities/apex/apex_structure_generator.py +0 -134
  197. packages/tmsi/build/lib/apex_sdk/tmsi_utilities/decorators.py +0 -60
  198. packages/tmsi/build/lib/apex_sdk/tmsi_utilities/logger_filter.py +0 -42
  199. packages/tmsi/build/lib/apex_sdk/tmsi_utilities/singleton.py +0 -42
  200. packages/tmsi/build/lib/apex_sdk/tmsi_utilities/support_functions.py +0 -72
  201. packages/tmsi/build/lib/apex_sdk/tmsi_utilities/tmsi_logger.py +0 -98
  202. py_neuromodulation/nm_EpochStream.py +0 -92
  203. py_neuromodulation/nm_across_patient_decoding.py +0 -927
  204. py_neuromodulation/nm_cohortwrapper.py +0 -435
  205. py_neuromodulation/nm_eval_timing.py +0 -239
  206. py_neuromodulation/nm_features_abc.py +0 -39
  207. py_neuromodulation/nm_stream_offline.py +0 -358
  208. py_neuromodulation/utils/_logging.py +0 -24
  209. py_neuromodulation-0.0.3.dist-info/RECORD +0 -188
  210. py_neuromodulation-0.0.3.dist-info/top_level.txt +0 -5
  211. tests/__init__.py +0 -0
  212. tests/conftest.py +0 -117
  213. tests/test_all_examples.py +0 -10
  214. tests/test_all_features.py +0 -63
  215. tests/test_bispectra.py +0 -70
  216. tests/test_bursts.py +0 -105
  217. tests/test_feature_sampling_rates.py +0 -143
  218. tests/test_fooof.py +0 -16
  219. tests/test_initalization_offline_stream.py +0 -41
  220. tests/test_multiprocessing.py +0 -58
  221. tests/test_nan_values.py +0 -29
  222. tests/test_nm_filter.py +0 -95
  223. tests/test_nm_resample.py +0 -63
  224. tests/test_normalization_settings.py +0 -146
  225. tests/test_notch_filter.py +0 -31
  226. tests/test_osc_features.py +0 -424
  227. tests/test_preprocessing_filter.py +0 -151
  228. tests/test_rereference.py +0 -171
  229. tests/test_sampling.py +0 -57
  230. tests/test_settings_change_after_init.py +0 -76
  231. tests/test_sharpwave.py +0 -165
  232. tests/test_target_channel_add.py +0 -100
  233. tests/test_timing.py +0 -80
@@ -1,93 +1,96 @@
1
- import numpy as np
2
- from typing import Iterable
3
- import nolds
4
- import warnings
5
-
6
- from py_neuromodulation import nm_features_abc, nm_oscillatory
7
-
8
-
9
- class Nolds(nm_features_abc.Feature):
10
- def __init__(
11
- self, settings: dict, ch_names: Iterable[str], sfreq: float
12
- ) -> None:
13
- self.s = settings
14
- self.ch_names = ch_names
15
-
16
- if len(self.s["nolds_features"]["data"]["frequency_bands"]) > 0:
17
- self.bp_filter = nm_oscillatory.BandPower(
18
- settings, ch_names, sfreq, use_kf=False
19
- )
20
-
21
- @staticmethod
22
- def test_settings(
23
- s: dict,
24
- ch_names: Iterable[str],
25
- sfreq: int | float,
26
- ):
27
- nolds_feature_cols = [
28
- "sample_entropy",
29
- "correlation_dimension",
30
- "lyapunov_exponent",
31
- "hurst_exponent",
32
- "detrended_fluctutaion_analysis",
33
- ]
34
- if sum([s["nolds_features"][f] for f in nolds_feature_cols]) == 0:
35
- warnings.warn(
36
- "nolds feature enabled, but no nolds_feature type selected"
37
- )
38
-
39
- for fb in s["nolds_features"]["data"]["frequency_bands"]:
40
- assert fb in list(
41
- s["frequency_ranges_hz"].keys()
42
- ), f"{fb} selected in nolds_features, but not defined in s['frequency_ranges_hz']"
43
-
44
- def calc_feature(
45
- self,
46
- data: np.array,
47
- features_compute: dict,
48
- ) -> dict:
49
-
50
- data = np.nan_to_num(data)
51
- if self.s["nolds_features"]["data"]["raw"]:
52
- features_compute = self.calc_nolds(data, features_compute)
53
- if len(self.s["nolds_features"]["data"]["frequency_bands"]) > 0:
54
- data_filt = self.bp_filter.bandpass_filter.filter_data(data)
55
-
56
- for f_band_idx, f_band in enumerate(
57
- self.s["nolds_features"]["data"]["frequency_bands"]
58
- ):
59
- # filter data now for a specific fband and pass to calc_nolds
60
- features_compute = self.calc_nolds(
61
- data_filt[:, f_band_idx, :], features_compute, f_band
62
- ) # ch, bands, samples
63
- return features_compute
64
-
65
- def calc_nolds(
66
- self, data: np.array, features_compute: dict, data_str: str = "raw"
67
- ) -> dict:
68
-
69
- for ch_idx, ch_name in enumerate(self.ch_names):
70
- dat = data[ch_idx, :]
71
- empty_arr = dat.sum() == 0
72
- if self.s["nolds_features"]["sample_entropy"]:
73
- features_compute[
74
- f"{ch_name}_nolds_sample_entropy"
75
- ] = nolds.sampen(dat) if not empty_arr else 0
76
- if self.s["nolds_features"]["correlation_dimension"]:
77
- features_compute[
78
- f"{ch_name}_nolds_correlation_dimension_{data_str}"
79
- ] = nolds.corr_dim(dat, emb_dim=2) if not empty_arr else 0
80
- if self.s["nolds_features"]["lyapunov_exponent"]:
81
- features_compute[
82
- f"{ch_name}_nolds_lyapunov_exponent_{data_str}"
83
- ] = nolds.lyap_r(dat) if not empty_arr else 0
84
- if self.s["nolds_features"]["hurst_exponent"]:
85
- features_compute[
86
- f"{ch_name}_nolds_hurst_exponent_{data_str}"
87
- ] = nolds.hurst_rs(dat) if not empty_arr else 0
88
- if self.s["nolds_features"]["detrended_fluctutaion_analysis"]:
89
- features_compute[
90
- f"{ch_name}_nolds_detrended_fluctutaion_analysis_{data_str}"
91
- ] = nolds.dfa(dat) if not empty_arr else 0
92
-
93
- return features_compute
1
+ import numpy as np
2
+ from collections.abc import Iterable
3
+
4
+ from py_neuromodulation.nm_types import NMBaseModel
5
+ from typing import TYPE_CHECKING
6
+
7
+ from py_neuromodulation.nm_features import NMFeature
8
+ from py_neuromodulation.nm_types import BoolSelector
9
+
10
+ from pydantic import field_validator
11
+
12
+ if TYPE_CHECKING:
13
+ from py_neuromodulation.nm_settings import NMSettings
14
+
15
+
16
+ class NoldsFeatures(BoolSelector):
17
+ sample_entropy: bool = False
18
+ correlation_dimension: bool = False
19
+ lyapunov_exponent: bool = True
20
+ hurst_exponent: bool = False
21
+ detrended_fluctuation_analysis: bool = False
22
+
23
+
24
+ class NoldsSettings(NMBaseModel):
25
+ raw: bool = True
26
+ frequency_bands: list[str] = ["low_beta"]
27
+ features: NoldsFeatures = NoldsFeatures()
28
+
29
+ @field_validator("frequency_bands")
30
+ def fbands_spaces_to_underscores(cls, frequency_bands):
31
+ return [f.replace(" ", "_") for f in frequency_bands]
32
+
33
+
34
+ class Nolds(NMFeature):
35
+ def __init__(
36
+ self, settings: "NMSettings", ch_names: Iterable[str], sfreq: float
37
+ ) -> None:
38
+ self.settings = settings.nolds_features
39
+ self.ch_names = ch_names
40
+
41
+ if len(self.settings.frequency_bands) > 0:
42
+ from py_neuromodulation.nm_oscillatory import BandPower
43
+
44
+ self.bp_filter = BandPower(settings, ch_names, sfreq, use_kf=False)
45
+
46
+ # Check if the selected frequency bands are defined in the global settings
47
+ for fb in settings.nolds_features.frequency_bands:
48
+ assert (
49
+ fb in settings.frequency_ranges_hz
50
+ ), f"{fb} selected in nolds_features, but not defined in s['frequency_ranges_hz']"
51
+
52
+ def calc_feature(self, data: np.ndarray) -> dict:
53
+ feature_results = {}
54
+ data = np.nan_to_num(data)
55
+ if self.settings.raw:
56
+ feature_results = self.calc_nolds(data, feature_results)
57
+ if len(self.settings.frequency_bands) > 0:
58
+ data_filt = self.bp_filter.bandpass_filter.filter_data(data)
59
+
60
+ for f_band_idx, f_band in enumerate(self.settings.frequency_bands):
61
+ # filter data now for a specific fband and pass to calc_nolds
62
+ feature_results = self.calc_nolds(
63
+ data_filt[:, f_band_idx, :], feature_results, f_band
64
+ ) # ch, bands, samples
65
+ return feature_results
66
+
67
+ def calc_nolds(
68
+ self, data: np.ndarray, feature_results: dict, data_str: str = "raw"
69
+ ) -> dict:
70
+ for ch_idx, ch_name in enumerate(self.ch_names):
71
+ for f_name in self.settings.features.get_enabled():
72
+ feature_results[f"{ch_name}_nolds_{f_name}_{data_str}"] = (
73
+ self.calc_nolds_feature(f_name, data[ch_idx, :])
74
+ if data[ch_idx, :].sum()
75
+ else 0
76
+ )
77
+
78
+ return feature_results
79
+
80
+ @staticmethod
81
+ def calc_nolds_feature(f_name: str, dat: np.ndarray):
82
+ import nolds
83
+
84
+ match f_name:
85
+ case "sample_entropy":
86
+ return nolds.sampen(dat)
87
+ case "correlation_dimension":
88
+ return nolds.corr_dim(dat, emb_dim=2)
89
+ case "lyapunov_exponent":
90
+ return nolds.lyap_r(dat)
91
+ case "hurst_exponent":
92
+ return nolds.hurst_rs(dat)
93
+ case "detrended_fluctuation_analysis":
94
+ return nolds.dfa(dat)
95
+ case _:
96
+ raise ValueError(f"Invalid nolds feature name: {f_name}")
@@ -1,214 +1,173 @@
1
- """Module for real-time data normalization."""
2
- from enum import Enum
3
-
4
- from sklearn import preprocessing
5
- import numpy as np
6
- class NORM_METHODS(Enum):
7
- MEAN = "mean"
8
- MEDIAN = "median"
9
- ZSCORE = "zscore"
10
- ZSCORE_MEDIAN = "zscore-median"
11
- QUANTILE = "quantile"
12
- POWER = "power"
13
- ROBUST = "robust"
14
- MINMAX = "minmax"
15
-
16
-
17
- def test_normalization_settings(
18
- normalization_time_s: int | float, normalization_method: str, clip: bool
19
- ):
20
- assert isinstance(
21
- normalization_time_s,
22
- (float, int),
23
- )
24
-
25
- assert isinstance(
26
- normalization_method, str
27
- ), "normalization method needs to be of type string"
28
-
29
- assert normalization_method in [e.value for e in NORM_METHODS], (
30
- f"select a valid normalization method, got {normalization_method}, "
31
- f"valid options are {[e.value for e in NORM_METHODS]}"
32
- )
33
-
34
- assert isinstance(clip, (float, int, bool))
35
-
36
-
37
- class RawNormalizer:
38
- def __init__(
39
- self,
40
- sfreq: int | float,
41
- sampling_rate_features_hz: int,
42
- normalization_method: str = "zscore",
43
- normalization_time_s: int | float = 30,
44
- clip: bool | int | float = False,
45
- ) -> None:
46
- """Normalize raw data.
47
-
48
- normalize_samples : int
49
- number of past samples considered for normalization
50
- sample_add : int
51
- number of samples to add to previous
52
- method : str | default is 'mean'
53
- data is normalized via subtraction of the 'mean' or 'median' and
54
- subsequent division by the 'mean' or 'median'. For z-scoring enter
55
- 'zscore'.
56
- clip : int | float, optional
57
- value at which to clip after normalization
58
- """
59
-
60
- test_normalization_settings(normalization_time_s, normalization_method, clip)
61
-
62
- self.method = normalization_method
63
- self.clip = clip
64
- self.num_samples_normalize = int(normalization_time_s * sfreq)
65
- self.add_samples = int(sfreq / sampling_rate_features_hz)
66
- self.previous = None
67
-
68
- def process(self, data: np.ndarray) -> np.ndarray:
69
- data = data.T
70
- if self.previous is None:
71
- self.previous = data
72
- return data.T
73
-
74
- self.previous = np.vstack((self.previous, data[-self.add_samples :]))
75
-
76
- data, self.previous = _normalize_and_clip(
77
- current=data,
78
- previous=self.previous,
79
- method=self.method,
80
- clip=self.clip,
81
- description="raw",
82
- )
83
- if self.previous.shape[0] >= self.num_samples_normalize:
84
- self.previous = self.previous[1:]
85
-
86
- return data.T
87
-
88
-
89
- class FeatureNormalizer:
90
- def __init__(
91
- self,
92
- sampling_rate_features_hz: int,
93
- normalization_method: str = "zscore",
94
- normalization_time_s: int | float = 30,
95
- clip: bool | int | float = False,
96
- ) -> None:
97
- """Normalize raw data.
98
-
99
- normalize_samples : int
100
- number of past samples considered for normalization
101
- sample_add : int
102
- number of samples to add to previous
103
- method : str | default is 'mean'
104
- data is normalized via subtraction of the 'mean' or 'median' and
105
- subsequent division by the 'mean' or 'median'. For z-scoring enter
106
- 'zscore'.
107
- clip : int | float, optional
108
- value at which to clip after normalization
109
- """
110
-
111
- test_normalization_settings(normalization_time_s, normalization_method, clip)
112
-
113
- self.method = normalization_method
114
- self.clip = clip
115
- self.num_samples_normalize = int(
116
- normalization_time_s * sampling_rate_features_hz
117
- )
118
- self.previous = None
119
-
120
- def process(self, data: np.ndarray) -> np.ndarray:
121
- if self.previous is None:
122
- self.previous = data
123
- return data
124
-
125
- self.previous = np.vstack((self.previous, data))
126
-
127
- data, self.previous = _normalize_and_clip(
128
- current=data,
129
- previous=self.previous,
130
- method=self.method,
131
- clip=self.clip,
132
- description="feature",
133
- )
134
- if self.previous.shape[0] >= self.num_samples_normalize:
135
- self.previous = self.previous[1:]
136
-
137
- return data
138
-
139
- """
140
- Functions to check for NaN's before deciding which Numpy function to call
141
- """
142
- def nan_mean(data, axis):
143
- return np.nanmean(data, axis=axis) if np.any(np.isnan(sum(data))) else np.mean(data, axis=axis)
144
-
145
- def nan_std(data, axis):
146
- return np.nanstd(data, axis=axis) if np.any(np.isnan(sum(data))) else np.std(data, axis=axis)
147
-
148
- def nan_median(data, axis):
149
- return np.nanmedian(data, axis=axis) if np.any(np.isnan(sum(data))) else np.median(data, axis=axis)
150
-
151
- def _normalize_and_clip(
152
- current: np.ndarray,
153
- previous: np.ndarray,
154
- method: str,
155
- clip: int | float | bool,
156
- description: str,
157
- ) -> tuple[np.ndarray, np.ndarray]:
158
- """Normalize data."""
159
- match method:
160
- case NORM_METHODS.MEAN.value:
161
- mean = nan_mean(previous, axis=0)
162
- current = (current - mean) / mean
163
- case NORM_METHODS.MEDIAN.value:
164
- median = nan_median(previous, axis=0)
165
- current = (current - median) / median
166
- case NORM_METHODS.ZSCORE.value:
167
- current = (current - nan_mean(previous, axis=0)) / nan_std(previous, axis=0)
168
- case NORM_METHODS.ZSCORE_MEDIAN.value:
169
- current = (current - nan_median(previous, axis=0)) / nan_std(previous, axis=0)
170
- # For the following methods we check for the shape of current
171
- # when current is a 1D array, then it is the post-processing normalization,
172
- # and we need to expand, and remove the extra dimension afterwards
173
- # When current is a 2D array, then it is pre-processing normalization, and
174
- # there's no need for expanding.
175
- case (NORM_METHODS.QUANTILE.value |
176
- NORM_METHODS.ROBUST.value |
177
- NORM_METHODS.MINMAX.value |
178
- NORM_METHODS.POWER.value):
179
-
180
- norm_methods = {
181
- NORM_METHODS.QUANTILE.value : lambda: preprocessing.QuantileTransformer(n_quantiles=300),
182
- NORM_METHODS.ROBUST.value : preprocessing.RobustScaler,
183
- NORM_METHODS.MINMAX.value : preprocessing.MinMaxScaler,
184
- NORM_METHODS.POWER.value : preprocessing.PowerTransformer
185
- }
186
-
187
- current = (
188
- norm_methods[method]()
189
- .fit(np.nan_to_num(previous))
190
- .transform(
191
- # if post-processing: pad dimensions to 2
192
- np.reshape(current, (2-len(current.shape))*(1,) + current.shape)
193
- )
194
- .squeeze() # if post-processing: remove extra dimension
195
- )
196
-
197
- case _:
198
- raise ValueError(
199
- f"Only {[e.value for e in NORM_METHODS]} are supported as "
200
- f"{description} normalization methods. Got {method}."
201
- )
202
-
203
- if clip:
204
- current = _clip(data=current, clip=clip)
205
- return current, previous
206
-
207
-
208
- def _clip(data: np.ndarray, clip: bool | int | float) -> np.ndarray:
209
- """Clip data."""
210
- if clip is True:
211
- clip = 3.0 # default value
212
- else:
213
- clip = float(clip)
214
- return np.nan_to_num(data).clip(min=-clip, max=clip)
1
+ """Module for real-time data normalization."""
2
+
3
+ import numpy as np
4
+ from typing import TYPE_CHECKING, Callable, Literal, get_args
5
+
6
+ from py_neuromodulation.nm_types import NMBaseModel, Field, NormMethod
7
+ from py_neuromodulation.nm_preprocessing import NMPreprocessor
8
+
9
+ if TYPE_CHECKING:
10
+ from py_neuromodulation.nm_settings import NMSettings
11
+
12
+ NormalizerType = Literal["raw", "feature"]
13
+
14
+
15
+ class NormalizationSettings(NMBaseModel):
16
+ normalization_time_s: float = 30
17
+ normalization_method: NormMethod = "zscore"
18
+ clip: float = Field(default=3, ge=0)
19
+
20
+ @staticmethod
21
+ def list_normalization_methods() -> list[NormMethod]:
22
+ return list(get_args(NormMethod))
23
+
24
+
25
+ class Normalizer(NMPreprocessor):
26
+ def __init__(
27
+ self,
28
+ sfreq: float,
29
+ settings: "NMSettings",
30
+ type: NormalizerType,
31
+ ) -> None:
32
+
33
+ self.type = type
34
+ self.settings: NormalizationSettings
35
+
36
+ match self.type:
37
+ case "raw":
38
+ self.settings = settings.raw_normalization_settings.validate()
39
+ self.add_samples = int(sfreq / settings.sampling_rate_features_hz)
40
+ case "feature":
41
+ self.settings = settings.feature_normalization_settings.validate()
42
+ self.add_samples = 0
43
+
44
+ # For type = "feature" sfreq = sampling_rate_features_hz
45
+ self.num_samples_normalize = int(self.settings.normalization_time_s * sfreq)
46
+
47
+ self.previous: np.ndarray = np.empty((0, 0)) # Default empty array
48
+
49
+ self.method = self.settings.normalization_method
50
+ self.using_sklearn = self.method in ["quantile", "power", "robust", "minmax"]
51
+
52
+ if self.using_sklearn:
53
+ import sklearn.preprocessing as skpp
54
+
55
+ NORM_METHODS_SKLEARN: dict[NormMethod, Callable] = {
56
+ "quantile": lambda: skpp.QuantileTransformer(n_quantiles=300),
57
+ "robust": skpp.RobustScaler,
58
+ "minmax": skpp.MinMaxScaler,
59
+ "power": skpp.PowerTransformer,
60
+ }
61
+
62
+ self.normalizer = norm_sklearn(NORM_METHODS_SKLEARN[self.method]())
63
+
64
+ else:
65
+ NORM_FUNCTIONS = {
66
+ "mean": norm_mean,
67
+ "median": norm_median,
68
+ "zscore": norm_zscore,
69
+ "zscore-median": norm_zscore_median,
70
+ }
71
+ self.normalizer = NORM_FUNCTIONS[self.method]
72
+
73
+ def process(self, data: np.ndarray) -> np.ndarray:
74
+ # TODO: does feature normalization need to be transposed too?
75
+ if self.type == "raw":
76
+ data = data.T
77
+
78
+ if self.previous.size == 0: # Check if empty
79
+ self.previous = data
80
+ return data if self.type == "raw" else data.T
81
+
82
+ self.previous = np.vstack((self.previous, data[-self.add_samples :]))
83
+
84
+ data = self.normalizer(data, self.previous)
85
+
86
+ if self.settings.clip:
87
+ data = data.clip(min=-self.settings.clip, max=self.settings.clip)
88
+
89
+
90
+ self.previous = self.previous[-self.num_samples_normalize + 1 :]
91
+
92
+ data = np.nan_to_num(data)
93
+
94
+ return data if self.type == "raw" else data.T
95
+
96
+
97
+ class RawNormalizer(Normalizer):
98
+ def __init__(self, sfreq: float, settings: "NMSettings") -> None:
99
+ super().__init__(sfreq, settings, "raw")
100
+
101
+
102
+ class FeatureNormalizer(Normalizer):
103
+ def __init__(self, settings: "NMSettings") -> None:
104
+ super().__init__(settings.sampling_rate_features_hz, settings, "feature")
105
+
106
+
107
+ """ Functions to check for NaN's before deciding which Numpy function to call """
108
+
109
+
110
+ def nan_mean(data: np.ndarray, axis: int) -> np.ndarray:
111
+ return (
112
+ np.nanmean(data, axis=axis)
113
+ if np.any(np.isnan(sum(data)))
114
+ else np.mean(data, axis=axis)
115
+ )
116
+
117
+
118
+ def nan_std(data: np.ndarray, axis: int) -> np.ndarray:
119
+ return (
120
+ np.nanstd(data, axis=axis)
121
+ if np.any(np.isnan(sum(data)))
122
+ else np.std(data, axis=axis)
123
+ )
124
+
125
+
126
+ def nan_median(data: np.ndarray, axis: int) -> np.ndarray:
127
+ return (
128
+ np.nanmedian(data, axis=axis)
129
+ if np.any(np.isnan(sum(data)))
130
+ else np.median(data, axis=axis)
131
+ )
132
+
133
+
134
+ def norm_mean(current, previous):
135
+ mean = nan_mean(previous, axis=0)
136
+ return (current - mean) / mean
137
+
138
+
139
+ def norm_median(current, previous):
140
+ median = nan_median(previous, axis=0)
141
+ return (current - median) / median
142
+
143
+
144
+ def norm_zscore(current, previous):
145
+ std = nan_std(previous, axis=0)
146
+ std[std == 0] = 1 # same behavior as sklearn
147
+ return (current - nan_mean(previous, axis=0)) / std
148
+
149
+
150
+ def norm_zscore_median(current, previous):
151
+ std = nan_std(previous, axis=0)
152
+ std[std == 0] = 1 # same behavior as sklearn
153
+ return (current - nan_median(previous, axis=0)) / std
154
+
155
+
156
+ def norm_sklearn(sknormalizer):
157
+ # For the following methods we check for the shape of current
158
+ # when current is a 1D array, then it is the post-processing normalization,
159
+ # and we need to expand, and remove the extra dimension afterwards
160
+ # When current is a 2D array, then it is pre-processing normalization, and
161
+ # there's no need for expanding.
162
+
163
+ def sk_normalizer(current, previous):
164
+ return (
165
+ sknormalizer.fit(np.nan_to_num(previous))
166
+ .transform(
167
+ # if post-processing: pad dimensions to 2
168
+ np.reshape(current, (2 - len(current.shape)) * (1,) + current.shape)
169
+ )
170
+ .squeeze() # if post-processing: remove extra dimension # type: ignore
171
+ )
172
+
173
+ return sk_normalizer