py-neuromodulation 0.0.4__py3-none-any.whl → 0.0.6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- py_neuromodulation/ConnectivityDecoding/_get_grid_hull.m +34 -34
- py_neuromodulation/ConnectivityDecoding/_get_grid_whole_brain.py +95 -106
- py_neuromodulation/ConnectivityDecoding/_helper_write_connectome.py +107 -119
- py_neuromodulation/__init__.py +80 -13
- py_neuromodulation/{nm_RMAP.py → analysis/RMAP.py} +496 -531
- py_neuromodulation/analysis/__init__.py +4 -0
- py_neuromodulation/{nm_decode.py → analysis/decode.py} +918 -992
- py_neuromodulation/{nm_analysis.py → analysis/feature_reader.py} +994 -1074
- py_neuromodulation/{nm_plots.py → analysis/plots.py} +627 -612
- py_neuromodulation/{nm_stats.py → analysis/stats.py} +458 -480
- py_neuromodulation/data/README +6 -6
- py_neuromodulation/data/dataset_description.json +8 -8
- py_neuromodulation/data/participants.json +32 -32
- py_neuromodulation/data/participants.tsv +2 -2
- py_neuromodulation/data/sub-testsub/ses-EphysMedOff/ieeg/sub-testsub_ses-EphysMedOff_space-mni_coordsystem.json +5 -5
- py_neuromodulation/data/sub-testsub/ses-EphysMedOff/ieeg/sub-testsub_ses-EphysMedOff_space-mni_electrodes.tsv +11 -11
- py_neuromodulation/data/sub-testsub/ses-EphysMedOff/ieeg/sub-testsub_ses-EphysMedOff_task-gripforce_run-0_channels.tsv +11 -11
- py_neuromodulation/data/sub-testsub/ses-EphysMedOff/ieeg/sub-testsub_ses-EphysMedOff_task-gripforce_run-0_ieeg.json +18 -18
- py_neuromodulation/data/sub-testsub/ses-EphysMedOff/ieeg/sub-testsub_ses-EphysMedOff_task-gripforce_run-0_ieeg.vhdr +35 -35
- py_neuromodulation/data/sub-testsub/ses-EphysMedOff/ieeg/sub-testsub_ses-EphysMedOff_task-gripforce_run-0_ieeg.vmrk +13 -13
- py_neuromodulation/data/sub-testsub/ses-EphysMedOff/sub-testsub_ses-EphysMedOff_scans.tsv +2 -2
- py_neuromodulation/default_settings.yaml +241 -0
- py_neuromodulation/features/__init__.py +31 -0
- py_neuromodulation/features/bandpower.py +165 -0
- py_neuromodulation/features/bispectra.py +157 -0
- py_neuromodulation/features/bursts.py +297 -0
- py_neuromodulation/features/coherence.py +255 -0
- py_neuromodulation/features/feature_processor.py +121 -0
- py_neuromodulation/features/fooof.py +142 -0
- py_neuromodulation/features/hjorth_raw.py +57 -0
- py_neuromodulation/features/linelength.py +21 -0
- py_neuromodulation/features/mne_connectivity.py +148 -0
- py_neuromodulation/features/nolds.py +94 -0
- py_neuromodulation/features/oscillatory.py +249 -0
- py_neuromodulation/features/sharpwaves.py +432 -0
- py_neuromodulation/filter/__init__.py +3 -0
- py_neuromodulation/filter/kalman_filter.py +67 -0
- py_neuromodulation/filter/kalman_filter_external.py +1890 -0
- py_neuromodulation/filter/mne_filter.py +128 -0
- py_neuromodulation/filter/notch_filter.py +93 -0
- py_neuromodulation/grid_cortex.tsv +40 -40
- py_neuromodulation/liblsl/libpugixml.so.1.12 +0 -0
- py_neuromodulation/liblsl/linux/bionic_amd64/liblsl.1.16.2.so +0 -0
- py_neuromodulation/liblsl/linux/bookworm_amd64/liblsl.1.16.2.so +0 -0
- py_neuromodulation/liblsl/linux/focal_amd46/liblsl.1.16.2.so +0 -0
- py_neuromodulation/liblsl/linux/jammy_amd64/liblsl.1.16.2.so +0 -0
- py_neuromodulation/liblsl/linux/jammy_x86/liblsl.1.16.2.so +0 -0
- py_neuromodulation/liblsl/linux/noble_amd64/liblsl.1.16.2.so +0 -0
- py_neuromodulation/liblsl/macos/amd64/liblsl.1.16.2.dylib +0 -0
- py_neuromodulation/liblsl/macos/arm64/liblsl.1.16.0.dylib +0 -0
- py_neuromodulation/liblsl/windows/amd64/liblsl.1.16.2.dll +0 -0
- py_neuromodulation/liblsl/windows/x86/liblsl.1.16.2.dll +0 -0
- py_neuromodulation/processing/__init__.py +10 -0
- py_neuromodulation/{nm_artifacts.py → processing/artifacts.py} +29 -25
- py_neuromodulation/processing/data_preprocessor.py +77 -0
- py_neuromodulation/processing/filter_preprocessing.py +78 -0
- py_neuromodulation/processing/normalization.py +175 -0
- py_neuromodulation/{nm_projection.py → processing/projection.py} +370 -394
- py_neuromodulation/{nm_rereference.py → processing/rereference.py} +97 -95
- py_neuromodulation/{nm_resample.py → processing/resample.py} +56 -50
- py_neuromodulation/stream/__init__.py +3 -0
- py_neuromodulation/stream/data_processor.py +325 -0
- py_neuromodulation/stream/generator.py +53 -0
- py_neuromodulation/stream/mnelsl_player.py +94 -0
- py_neuromodulation/stream/mnelsl_stream.py +120 -0
- py_neuromodulation/stream/settings.py +292 -0
- py_neuromodulation/stream/stream.py +427 -0
- py_neuromodulation/utils/__init__.py +2 -0
- py_neuromodulation/{nm_define_nmchannels.py → utils/channels.py} +305 -302
- py_neuromodulation/utils/database.py +149 -0
- py_neuromodulation/utils/io.py +378 -0
- py_neuromodulation/utils/keyboard.py +52 -0
- py_neuromodulation/utils/logging.py +66 -0
- py_neuromodulation/utils/types.py +251 -0
- {py_neuromodulation-0.0.4.dist-info → py_neuromodulation-0.0.6.dist-info}/METADATA +28 -33
- py_neuromodulation-0.0.6.dist-info/RECORD +89 -0
- {py_neuromodulation-0.0.4.dist-info → py_neuromodulation-0.0.6.dist-info}/WHEEL +1 -1
- {py_neuromodulation-0.0.4.dist-info → py_neuromodulation-0.0.6.dist-info}/licenses/LICENSE +21 -21
- py_neuromodulation/FieldTrip.py +0 -589
- py_neuromodulation/_write_example_dataset_helper.py +0 -65
- py_neuromodulation/nm_EpochStream.py +0 -92
- py_neuromodulation/nm_IO.py +0 -417
- py_neuromodulation/nm_across_patient_decoding.py +0 -927
- py_neuromodulation/nm_bispectra.py +0 -168
- py_neuromodulation/nm_bursts.py +0 -198
- py_neuromodulation/nm_coherence.py +0 -205
- py_neuromodulation/nm_cohortwrapper.py +0 -435
- py_neuromodulation/nm_eval_timing.py +0 -239
- py_neuromodulation/nm_features.py +0 -116
- py_neuromodulation/nm_features_abc.py +0 -39
- py_neuromodulation/nm_filter.py +0 -219
- py_neuromodulation/nm_filter_preprocessing.py +0 -91
- py_neuromodulation/nm_fooof.py +0 -159
- py_neuromodulation/nm_generator.py +0 -37
- py_neuromodulation/nm_hjorth_raw.py +0 -73
- py_neuromodulation/nm_kalmanfilter.py +0 -58
- py_neuromodulation/nm_linelength.py +0 -33
- py_neuromodulation/nm_mne_connectivity.py +0 -112
- py_neuromodulation/nm_nolds.py +0 -93
- py_neuromodulation/nm_normalization.py +0 -214
- py_neuromodulation/nm_oscillatory.py +0 -448
- py_neuromodulation/nm_run_analysis.py +0 -435
- py_neuromodulation/nm_settings.json +0 -338
- py_neuromodulation/nm_settings.py +0 -68
- py_neuromodulation/nm_sharpwaves.py +0 -401
- py_neuromodulation/nm_stream_abc.py +0 -218
- py_neuromodulation/nm_stream_offline.py +0 -359
- py_neuromodulation/utils/_logging.py +0 -24
- py_neuromodulation-0.0.4.dist-info/RECORD +0 -72
|
@@ -0,0 +1,175 @@
|
|
|
1
|
+
"""Module for real-time data normalization."""
|
|
2
|
+
|
|
3
|
+
import numpy as np
|
|
4
|
+
from typing import TYPE_CHECKING, Callable, Literal, get_args
|
|
5
|
+
|
|
6
|
+
from py_neuromodulation.utils.types import (
|
|
7
|
+
NMBaseModel,
|
|
8
|
+
Field,
|
|
9
|
+
NormMethod,
|
|
10
|
+
NMPreprocessor,
|
|
11
|
+
)
|
|
12
|
+
|
|
13
|
+
if TYPE_CHECKING:
|
|
14
|
+
from py_neuromodulation import NMSettings
|
|
15
|
+
|
|
16
|
+
NormalizerType = Literal["raw", "feature"]
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
class NormalizationSettings(NMBaseModel):
|
|
20
|
+
normalization_time_s: float = 30
|
|
21
|
+
normalization_method: NormMethod = "zscore"
|
|
22
|
+
clip: float = Field(default=3, ge=0)
|
|
23
|
+
|
|
24
|
+
@staticmethod
|
|
25
|
+
def list_normalization_methods() -> list[NormMethod]:
|
|
26
|
+
return list(get_args(NormMethod))
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
class Normalizer(NMPreprocessor):
|
|
30
|
+
def __init__(
|
|
31
|
+
self,
|
|
32
|
+
sfreq: float,
|
|
33
|
+
settings: "NMSettings",
|
|
34
|
+
type: NormalizerType,
|
|
35
|
+
) -> None:
|
|
36
|
+
self.type = type
|
|
37
|
+
self.settings: NormalizationSettings
|
|
38
|
+
|
|
39
|
+
match self.type:
|
|
40
|
+
case "raw":
|
|
41
|
+
self.settings = settings.raw_normalization_settings.validate()
|
|
42
|
+
self.add_samples = int(sfreq / settings.sampling_rate_features_hz)
|
|
43
|
+
case "feature":
|
|
44
|
+
self.settings = settings.feature_normalization_settings.validate()
|
|
45
|
+
self.add_samples = 0
|
|
46
|
+
|
|
47
|
+
# For type = "feature" sfreq = sampling_rate_features_hz
|
|
48
|
+
self.num_samples_normalize = int(self.settings.normalization_time_s * sfreq)
|
|
49
|
+
|
|
50
|
+
self.previous: np.ndarray = np.empty((0, 0)) # Default empty array
|
|
51
|
+
|
|
52
|
+
self.method = self.settings.normalization_method
|
|
53
|
+
self.using_sklearn = self.method in ["quantile", "power", "robust", "minmax"]
|
|
54
|
+
|
|
55
|
+
if self.using_sklearn:
|
|
56
|
+
import sklearn.preprocessing as skpp
|
|
57
|
+
|
|
58
|
+
NORM_METHODS_SKLEARN: dict[NormMethod, Callable] = {
|
|
59
|
+
"quantile": lambda: skpp.QuantileTransformer(n_quantiles=300),
|
|
60
|
+
"robust": skpp.RobustScaler,
|
|
61
|
+
"minmax": skpp.MinMaxScaler,
|
|
62
|
+
"power": skpp.PowerTransformer,
|
|
63
|
+
}
|
|
64
|
+
|
|
65
|
+
self.normalizer = norm_sklearn(NORM_METHODS_SKLEARN[self.method]())
|
|
66
|
+
|
|
67
|
+
else:
|
|
68
|
+
NORM_FUNCTIONS = {
|
|
69
|
+
"mean": norm_mean,
|
|
70
|
+
"median": norm_median,
|
|
71
|
+
"zscore": norm_zscore,
|
|
72
|
+
"zscore-median": norm_zscore_median,
|
|
73
|
+
}
|
|
74
|
+
self.normalizer = NORM_FUNCTIONS[self.method]
|
|
75
|
+
|
|
76
|
+
def process(self, data: np.ndarray) -> np.ndarray:
|
|
77
|
+
# TODO: does feature normalization need to be transposed too?
|
|
78
|
+
if self.type == "raw":
|
|
79
|
+
data = data.T
|
|
80
|
+
|
|
81
|
+
if self.previous.size == 0: # Check if empty
|
|
82
|
+
self.previous = data
|
|
83
|
+
return data if self.type == "raw" else data.T
|
|
84
|
+
|
|
85
|
+
self.previous = np.vstack((self.previous, data[-self.add_samples :]))
|
|
86
|
+
|
|
87
|
+
data = self.normalizer(data, self.previous)
|
|
88
|
+
|
|
89
|
+
if self.settings.clip:
|
|
90
|
+
data = data.clip(min=-self.settings.clip, max=self.settings.clip)
|
|
91
|
+
|
|
92
|
+
self.previous = self.previous[-self.num_samples_normalize + 1 :]
|
|
93
|
+
|
|
94
|
+
data = np.nan_to_num(data)
|
|
95
|
+
|
|
96
|
+
return data if self.type == "raw" else data.T
|
|
97
|
+
|
|
98
|
+
|
|
99
|
+
class RawNormalizer(Normalizer):
|
|
100
|
+
def __init__(self, sfreq: float, settings: "NMSettings") -> None:
|
|
101
|
+
super().__init__(sfreq, settings, "raw")
|
|
102
|
+
|
|
103
|
+
|
|
104
|
+
class FeatureNormalizer(Normalizer):
|
|
105
|
+
def __init__(self, settings: "NMSettings") -> None:
|
|
106
|
+
super().__init__(settings.sampling_rate_features_hz, settings, "feature")
|
|
107
|
+
|
|
108
|
+
|
|
109
|
+
""" Functions to check for NaN's before deciding which Numpy function to call """
|
|
110
|
+
|
|
111
|
+
|
|
112
|
+
def nan_mean(data: np.ndarray, axis: int) -> np.ndarray:
|
|
113
|
+
return (
|
|
114
|
+
np.nanmean(data, axis=axis)
|
|
115
|
+
if np.any(np.isnan(sum(data)))
|
|
116
|
+
else np.mean(data, axis=axis)
|
|
117
|
+
)
|
|
118
|
+
|
|
119
|
+
|
|
120
|
+
def nan_std(data: np.ndarray, axis: int) -> np.ndarray:
|
|
121
|
+
return (
|
|
122
|
+
np.nanstd(data, axis=axis)
|
|
123
|
+
if np.any(np.isnan(sum(data)))
|
|
124
|
+
else np.std(data, axis=axis)
|
|
125
|
+
)
|
|
126
|
+
|
|
127
|
+
|
|
128
|
+
def nan_median(data: np.ndarray, axis: int) -> np.ndarray:
|
|
129
|
+
return (
|
|
130
|
+
np.nanmedian(data, axis=axis)
|
|
131
|
+
if np.any(np.isnan(sum(data)))
|
|
132
|
+
else np.median(data, axis=axis)
|
|
133
|
+
)
|
|
134
|
+
|
|
135
|
+
|
|
136
|
+
def norm_mean(current, previous):
|
|
137
|
+
mean = nan_mean(previous, axis=0)
|
|
138
|
+
return (current - mean) / mean
|
|
139
|
+
|
|
140
|
+
|
|
141
|
+
def norm_median(current, previous):
|
|
142
|
+
median = nan_median(previous, axis=0)
|
|
143
|
+
return (current - median) / median
|
|
144
|
+
|
|
145
|
+
|
|
146
|
+
def norm_zscore(current, previous):
|
|
147
|
+
std = nan_std(previous, axis=0)
|
|
148
|
+
std[std == 0] = 1 # same behavior as sklearn
|
|
149
|
+
return (current - nan_mean(previous, axis=0)) / std
|
|
150
|
+
|
|
151
|
+
|
|
152
|
+
def norm_zscore_median(current, previous):
|
|
153
|
+
std = nan_std(previous, axis=0)
|
|
154
|
+
std[std == 0] = 1 # same behavior as sklearn
|
|
155
|
+
return (current - nan_median(previous, axis=0)) / std
|
|
156
|
+
|
|
157
|
+
|
|
158
|
+
def norm_sklearn(sknormalizer):
|
|
159
|
+
# For the following methods we check for the shape of current
|
|
160
|
+
# when current is a 1D array, then it is the post-processing normalization,
|
|
161
|
+
# and we need to expand, and remove the extra dimension afterwards
|
|
162
|
+
# When current is a 2D array, then it is pre-processing normalization, and
|
|
163
|
+
# there's no need for expanding.
|
|
164
|
+
|
|
165
|
+
def sk_normalizer(current, previous):
|
|
166
|
+
return (
|
|
167
|
+
sknormalizer.fit(np.nan_to_num(previous))
|
|
168
|
+
.transform(
|
|
169
|
+
# if post-processing: pad dimensions to 2
|
|
170
|
+
np.reshape(current, (2 - len(current.shape)) * (1,) + current.shape)
|
|
171
|
+
)
|
|
172
|
+
.squeeze() # if post-processing: remove extra dimension # type: ignore
|
|
173
|
+
)
|
|
174
|
+
|
|
175
|
+
return sk_normalizer
|