py-neuromodulation 0.0.4__py3-none-any.whl → 0.0.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (80) hide show
  1. py_neuromodulation/ConnectivityDecoding/_get_grid_hull.m +34 -34
  2. py_neuromodulation/ConnectivityDecoding/_get_grid_whole_brain.py +95 -106
  3. py_neuromodulation/ConnectivityDecoding/_helper_write_connectome.py +107 -119
  4. py_neuromodulation/FieldTrip.py +589 -589
  5. py_neuromodulation/__init__.py +74 -13
  6. py_neuromodulation/_write_example_dataset_helper.py +83 -65
  7. py_neuromodulation/data/README +6 -6
  8. py_neuromodulation/data/dataset_description.json +8 -8
  9. py_neuromodulation/data/participants.json +32 -32
  10. py_neuromodulation/data/participants.tsv +2 -2
  11. py_neuromodulation/data/sub-testsub/ses-EphysMedOff/ieeg/sub-testsub_ses-EphysMedOff_space-mni_coordsystem.json +5 -5
  12. py_neuromodulation/data/sub-testsub/ses-EphysMedOff/ieeg/sub-testsub_ses-EphysMedOff_space-mni_electrodes.tsv +11 -11
  13. py_neuromodulation/data/sub-testsub/ses-EphysMedOff/ieeg/sub-testsub_ses-EphysMedOff_task-gripforce_run-0_channels.tsv +11 -11
  14. py_neuromodulation/data/sub-testsub/ses-EphysMedOff/ieeg/sub-testsub_ses-EphysMedOff_task-gripforce_run-0_ieeg.json +18 -18
  15. py_neuromodulation/data/sub-testsub/ses-EphysMedOff/ieeg/sub-testsub_ses-EphysMedOff_task-gripforce_run-0_ieeg.vhdr +35 -35
  16. py_neuromodulation/data/sub-testsub/ses-EphysMedOff/ieeg/sub-testsub_ses-EphysMedOff_task-gripforce_run-0_ieeg.vmrk +13 -13
  17. py_neuromodulation/data/sub-testsub/ses-EphysMedOff/sub-testsub_ses-EphysMedOff_scans.tsv +2 -2
  18. py_neuromodulation/grid_cortex.tsv +40 -40
  19. py_neuromodulation/liblsl/libpugixml.so.1.12 +0 -0
  20. py_neuromodulation/liblsl/linux/bionic_amd64/liblsl.1.16.2.so +0 -0
  21. py_neuromodulation/liblsl/linux/bookworm_amd64/liblsl.1.16.2.so +0 -0
  22. py_neuromodulation/liblsl/linux/focal_amd46/liblsl.1.16.2.so +0 -0
  23. py_neuromodulation/liblsl/linux/jammy_amd64/liblsl.1.16.2.so +0 -0
  24. py_neuromodulation/liblsl/linux/jammy_x86/liblsl.1.16.2.so +0 -0
  25. py_neuromodulation/liblsl/linux/noble_amd64/liblsl.1.16.2.so +0 -0
  26. py_neuromodulation/liblsl/macos/amd64/liblsl.1.16.2.dylib +0 -0
  27. py_neuromodulation/liblsl/macos/arm64/liblsl.1.16.0.dylib +0 -0
  28. py_neuromodulation/liblsl/windows/amd64/liblsl.1.16.2.dll +0 -0
  29. py_neuromodulation/liblsl/windows/x86/liblsl.1.16.2.dll +0 -0
  30. py_neuromodulation/nm_IO.py +413 -417
  31. py_neuromodulation/nm_RMAP.py +496 -531
  32. py_neuromodulation/nm_analysis.py +993 -1074
  33. py_neuromodulation/nm_artifacts.py +30 -25
  34. py_neuromodulation/nm_bispectra.py +154 -168
  35. py_neuromodulation/nm_bursts.py +292 -198
  36. py_neuromodulation/nm_coherence.py +251 -205
  37. py_neuromodulation/nm_database.py +149 -0
  38. py_neuromodulation/nm_decode.py +918 -992
  39. py_neuromodulation/nm_define_nmchannels.py +300 -302
  40. py_neuromodulation/nm_features.py +144 -116
  41. py_neuromodulation/nm_filter.py +219 -219
  42. py_neuromodulation/nm_filter_preprocessing.py +79 -91
  43. py_neuromodulation/nm_fooof.py +139 -159
  44. py_neuromodulation/nm_generator.py +45 -37
  45. py_neuromodulation/nm_hjorth_raw.py +52 -73
  46. py_neuromodulation/nm_kalmanfilter.py +71 -58
  47. py_neuromodulation/nm_linelength.py +21 -33
  48. py_neuromodulation/nm_logger.py +66 -0
  49. py_neuromodulation/nm_mne_connectivity.py +149 -112
  50. py_neuromodulation/nm_mnelsl_generator.py +90 -0
  51. py_neuromodulation/nm_mnelsl_stream.py +116 -0
  52. py_neuromodulation/nm_nolds.py +96 -93
  53. py_neuromodulation/nm_normalization.py +173 -214
  54. py_neuromodulation/nm_oscillatory.py +423 -448
  55. py_neuromodulation/nm_plots.py +585 -612
  56. py_neuromodulation/nm_preprocessing.py +83 -0
  57. py_neuromodulation/nm_projection.py +370 -394
  58. py_neuromodulation/nm_rereference.py +97 -95
  59. py_neuromodulation/nm_resample.py +59 -50
  60. py_neuromodulation/nm_run_analysis.py +325 -435
  61. py_neuromodulation/nm_settings.py +289 -68
  62. py_neuromodulation/nm_settings.yaml +244 -0
  63. py_neuromodulation/nm_sharpwaves.py +423 -401
  64. py_neuromodulation/nm_stats.py +464 -480
  65. py_neuromodulation/nm_stream.py +398 -0
  66. py_neuromodulation/nm_stream_abc.py +166 -218
  67. py_neuromodulation/nm_types.py +193 -0
  68. {py_neuromodulation-0.0.4.dist-info → py_neuromodulation-0.0.5.dist-info}/METADATA +29 -26
  69. py_neuromodulation-0.0.5.dist-info/RECORD +83 -0
  70. {py_neuromodulation-0.0.4.dist-info → py_neuromodulation-0.0.5.dist-info}/WHEEL +1 -1
  71. {py_neuromodulation-0.0.4.dist-info → py_neuromodulation-0.0.5.dist-info}/licenses/LICENSE +21 -21
  72. py_neuromodulation/nm_EpochStream.py +0 -92
  73. py_neuromodulation/nm_across_patient_decoding.py +0 -927
  74. py_neuromodulation/nm_cohortwrapper.py +0 -435
  75. py_neuromodulation/nm_eval_timing.py +0 -239
  76. py_neuromodulation/nm_features_abc.py +0 -39
  77. py_neuromodulation/nm_settings.json +0 -338
  78. py_neuromodulation/nm_stream_offline.py +0 -359
  79. py_neuromodulation/utils/_logging.py +0 -24
  80. py_neuromodulation-0.0.4.dist-info/RECORD +0 -72
@@ -1,93 +1,96 @@
1
- import numpy as np
2
- from typing import Iterable
3
- import nolds
4
- import warnings
5
-
6
- from py_neuromodulation import nm_features_abc, nm_oscillatory
7
-
8
-
9
- class Nolds(nm_features_abc.Feature):
10
- def __init__(
11
- self, settings: dict, ch_names: Iterable[str], sfreq: float
12
- ) -> None:
13
- self.s = settings
14
- self.ch_names = ch_names
15
-
16
- if len(self.s["nolds_features"]["data"]["frequency_bands"]) > 0:
17
- self.bp_filter = nm_oscillatory.BandPower(
18
- settings, ch_names, sfreq, use_kf=False
19
- )
20
-
21
- @staticmethod
22
- def test_settings(
23
- s: dict,
24
- ch_names: Iterable[str],
25
- sfreq: int | float,
26
- ):
27
- nolds_feature_cols = [
28
- "sample_entropy",
29
- "correlation_dimension",
30
- "lyapunov_exponent",
31
- "hurst_exponent",
32
- "detrended_fluctutaion_analysis",
33
- ]
34
- if sum([s["nolds_features"][f] for f in nolds_feature_cols]) == 0:
35
- warnings.warn(
36
- "nolds feature enabled, but no nolds_feature type selected"
37
- )
38
-
39
- for fb in s["nolds_features"]["data"]["frequency_bands"]:
40
- assert fb in list(
41
- s["frequency_ranges_hz"].keys()
42
- ), f"{fb} selected in nolds_features, but not defined in s['frequency_ranges_hz']"
43
-
44
- def calc_feature(
45
- self,
46
- data: np.array,
47
- features_compute: dict,
48
- ) -> dict:
49
-
50
- data = np.nan_to_num(data)
51
- if self.s["nolds_features"]["data"]["raw"]:
52
- features_compute = self.calc_nolds(data, features_compute)
53
- if len(self.s["nolds_features"]["data"]["frequency_bands"]) > 0:
54
- data_filt = self.bp_filter.bandpass_filter.filter_data(data)
55
-
56
- for f_band_idx, f_band in enumerate(
57
- self.s["nolds_features"]["data"]["frequency_bands"]
58
- ):
59
- # filter data now for a specific fband and pass to calc_nolds
60
- features_compute = self.calc_nolds(
61
- data_filt[:, f_band_idx, :], features_compute, f_band
62
- ) # ch, bands, samples
63
- return features_compute
64
-
65
- def calc_nolds(
66
- self, data: np.array, features_compute: dict, data_str: str = "raw"
67
- ) -> dict:
68
-
69
- for ch_idx, ch_name in enumerate(self.ch_names):
70
- dat = data[ch_idx, :]
71
- empty_arr = dat.sum() == 0
72
- if self.s["nolds_features"]["sample_entropy"]:
73
- features_compute[
74
- f"{ch_name}_nolds_sample_entropy"
75
- ] = nolds.sampen(dat) if not empty_arr else 0
76
- if self.s["nolds_features"]["correlation_dimension"]:
77
- features_compute[
78
- f"{ch_name}_nolds_correlation_dimension_{data_str}"
79
- ] = nolds.corr_dim(dat, emb_dim=2) if not empty_arr else 0
80
- if self.s["nolds_features"]["lyapunov_exponent"]:
81
- features_compute[
82
- f"{ch_name}_nolds_lyapunov_exponent_{data_str}"
83
- ] = nolds.lyap_r(dat) if not empty_arr else 0
84
- if self.s["nolds_features"]["hurst_exponent"]:
85
- features_compute[
86
- f"{ch_name}_nolds_hurst_exponent_{data_str}"
87
- ] = nolds.hurst_rs(dat) if not empty_arr else 0
88
- if self.s["nolds_features"]["detrended_fluctutaion_analysis"]:
89
- features_compute[
90
- f"{ch_name}_nolds_detrended_fluctutaion_analysis_{data_str}"
91
- ] = nolds.dfa(dat) if not empty_arr else 0
92
-
93
- return features_compute
1
+ import numpy as np
2
+ from collections.abc import Iterable
3
+
4
+ from py_neuromodulation.nm_types import NMBaseModel
5
+ from typing import TYPE_CHECKING
6
+
7
+ from py_neuromodulation.nm_features import NMFeature
8
+ from py_neuromodulation.nm_types import BoolSelector
9
+
10
+ from pydantic import field_validator
11
+
12
+ if TYPE_CHECKING:
13
+ from py_neuromodulation.nm_settings import NMSettings
14
+
15
+
16
+ class NoldsFeatures(BoolSelector):
17
+ sample_entropy: bool = False
18
+ correlation_dimension: bool = False
19
+ lyapunov_exponent: bool = True
20
+ hurst_exponent: bool = False
21
+ detrended_fluctuation_analysis: bool = False
22
+
23
+
24
+ class NoldsSettings(NMBaseModel):
25
+ raw: bool = True
26
+ frequency_bands: list[str] = ["low_beta"]
27
+ features: NoldsFeatures = NoldsFeatures()
28
+
29
+ @field_validator("frequency_bands")
30
+ def fbands_spaces_to_underscores(cls, frequency_bands):
31
+ return [f.replace(" ", "_") for f in frequency_bands]
32
+
33
+
34
+ class Nolds(NMFeature):
35
+ def __init__(
36
+ self, settings: "NMSettings", ch_names: Iterable[str], sfreq: float
37
+ ) -> None:
38
+ self.settings = settings.nolds_features
39
+ self.ch_names = ch_names
40
+
41
+ if len(self.settings.frequency_bands) > 0:
42
+ from py_neuromodulation.nm_oscillatory import BandPower
43
+
44
+ self.bp_filter = BandPower(settings, ch_names, sfreq, use_kf=False)
45
+
46
+ # Check if the selected frequency bands are defined in the global settings
47
+ for fb in settings.nolds_features.frequency_bands:
48
+ assert (
49
+ fb in settings.frequency_ranges_hz
50
+ ), f"{fb} selected in nolds_features, but not defined in s['frequency_ranges_hz']"
51
+
52
+ def calc_feature(self, data: np.ndarray) -> dict:
53
+ feature_results = {}
54
+ data = np.nan_to_num(data)
55
+ if self.settings.raw:
56
+ feature_results = self.calc_nolds(data, feature_results)
57
+ if len(self.settings.frequency_bands) > 0:
58
+ data_filt = self.bp_filter.bandpass_filter.filter_data(data)
59
+
60
+ for f_band_idx, f_band in enumerate(self.settings.frequency_bands):
61
+ # filter data now for a specific fband and pass to calc_nolds
62
+ feature_results = self.calc_nolds(
63
+ data_filt[:, f_band_idx, :], feature_results, f_band
64
+ ) # ch, bands, samples
65
+ return feature_results
66
+
67
+ def calc_nolds(
68
+ self, data: np.ndarray, feature_results: dict, data_str: str = "raw"
69
+ ) -> dict:
70
+ for ch_idx, ch_name in enumerate(self.ch_names):
71
+ for f_name in self.settings.features.get_enabled():
72
+ feature_results[f"{ch_name}_nolds_{f_name}_{data_str}"] = (
73
+ self.calc_nolds_feature(f_name, data[ch_idx, :])
74
+ if data[ch_idx, :].sum()
75
+ else 0
76
+ )
77
+
78
+ return feature_results
79
+
80
+ @staticmethod
81
+ def calc_nolds_feature(f_name: str, dat: np.ndarray):
82
+ import nolds
83
+
84
+ match f_name:
85
+ case "sample_entropy":
86
+ return nolds.sampen(dat)
87
+ case "correlation_dimension":
88
+ return nolds.corr_dim(dat, emb_dim=2)
89
+ case "lyapunov_exponent":
90
+ return nolds.lyap_r(dat)
91
+ case "hurst_exponent":
92
+ return nolds.hurst_rs(dat)
93
+ case "detrended_fluctuation_analysis":
94
+ return nolds.dfa(dat)
95
+ case _:
96
+ raise ValueError(f"Invalid nolds feature name: {f_name}")
@@ -1,214 +1,173 @@
1
- """Module for real-time data normalization."""
2
- from enum import Enum
3
-
4
- from sklearn import preprocessing
5
- import numpy as np
6
- class NORM_METHODS(Enum):
7
- MEAN = "mean"
8
- MEDIAN = "median"
9
- ZSCORE = "zscore"
10
- ZSCORE_MEDIAN = "zscore-median"
11
- QUANTILE = "quantile"
12
- POWER = "power"
13
- ROBUST = "robust"
14
- MINMAX = "minmax"
15
-
16
-
17
- def test_normalization_settings(
18
- normalization_time_s: int | float, normalization_method: str, clip: bool
19
- ):
20
- assert isinstance(
21
- normalization_time_s,
22
- (float, int),
23
- )
24
-
25
- assert isinstance(
26
- normalization_method, str
27
- ), "normalization method needs to be of type string"
28
-
29
- assert normalization_method in [e.value for e in NORM_METHODS], (
30
- f"select a valid normalization method, got {normalization_method}, "
31
- f"valid options are {[e.value for e in NORM_METHODS]}"
32
- )
33
-
34
- assert isinstance(clip, (float, int, bool))
35
-
36
-
37
- class RawNormalizer:
38
- def __init__(
39
- self,
40
- sfreq: int | float,
41
- sampling_rate_features_hz: int,
42
- normalization_method: str = "zscore",
43
- normalization_time_s: int | float = 30,
44
- clip: bool | int | float = False,
45
- ) -> None:
46
- """Normalize raw data.
47
-
48
- normalize_samples : int
49
- number of past samples considered for normalization
50
- sample_add : int
51
- number of samples to add to previous
52
- method : str | default is 'mean'
53
- data is normalized via subtraction of the 'mean' or 'median' and
54
- subsequent division by the 'mean' or 'median'. For z-scoring enter
55
- 'zscore'.
56
- clip : int | float, optional
57
- value at which to clip after normalization
58
- """
59
-
60
- test_normalization_settings(normalization_time_s, normalization_method, clip)
61
-
62
- self.method = normalization_method
63
- self.clip = clip
64
- self.num_samples_normalize = int(normalization_time_s * sfreq)
65
- self.add_samples = int(sfreq / sampling_rate_features_hz)
66
- self.previous = None
67
-
68
- def process(self, data: np.ndarray) -> np.ndarray:
69
- data = data.T
70
- if self.previous is None:
71
- self.previous = data
72
- return data.T
73
-
74
- self.previous = np.vstack((self.previous, data[-self.add_samples :]))
75
-
76
- data, self.previous = _normalize_and_clip(
77
- current=data,
78
- previous=self.previous,
79
- method=self.method,
80
- clip=self.clip,
81
- description="raw",
82
- )
83
- if self.previous.shape[0] >= self.num_samples_normalize:
84
- self.previous = self.previous[1:]
85
-
86
- return data.T
87
-
88
-
89
- class FeatureNormalizer:
90
- def __init__(
91
- self,
92
- sampling_rate_features_hz: int,
93
- normalization_method: str = "zscore",
94
- normalization_time_s: int | float = 30,
95
- clip: bool | int | float = False,
96
- ) -> None:
97
- """Normalize raw data.
98
-
99
- normalize_samples : int
100
- number of past samples considered for normalization
101
- sample_add : int
102
- number of samples to add to previous
103
- method : str | default is 'mean'
104
- data is normalized via subtraction of the 'mean' or 'median' and
105
- subsequent division by the 'mean' or 'median'. For z-scoring enter
106
- 'zscore'.
107
- clip : int | float, optional
108
- value at which to clip after normalization
109
- """
110
-
111
- test_normalization_settings(normalization_time_s, normalization_method, clip)
112
-
113
- self.method = normalization_method
114
- self.clip = clip
115
- self.num_samples_normalize = int(
116
- normalization_time_s * sampling_rate_features_hz
117
- )
118
- self.previous = None
119
-
120
- def process(self, data: np.ndarray) -> np.ndarray:
121
- if self.previous is None:
122
- self.previous = data
123
- return data
124
-
125
- self.previous = np.vstack((self.previous, data))
126
-
127
- data, self.previous = _normalize_and_clip(
128
- current=data,
129
- previous=self.previous,
130
- method=self.method,
131
- clip=self.clip,
132
- description="feature",
133
- )
134
- if self.previous.shape[0] >= self.num_samples_normalize:
135
- self.previous = self.previous[1:]
136
-
137
- return data
138
-
139
- """
140
- Functions to check for NaN's before deciding which Numpy function to call
141
- """
142
- def nan_mean(data, axis):
143
- return np.nanmean(data, axis=axis) if np.any(np.isnan(sum(data))) else np.mean(data, axis=axis)
144
-
145
- def nan_std(data, axis):
146
- return np.nanstd(data, axis=axis) if np.any(np.isnan(sum(data))) else np.std(data, axis=axis)
147
-
148
- def nan_median(data, axis):
149
- return np.nanmedian(data, axis=axis) if np.any(np.isnan(sum(data))) else np.median(data, axis=axis)
150
-
151
- def _normalize_and_clip(
152
- current: np.ndarray,
153
- previous: np.ndarray,
154
- method: str,
155
- clip: int | float | bool,
156
- description: str,
157
- ) -> tuple[np.ndarray, np.ndarray]:
158
- """Normalize data."""
159
- match method:
160
- case NORM_METHODS.MEAN.value:
161
- mean = nan_mean(previous, axis=0)
162
- current = (current - mean) / mean
163
- case NORM_METHODS.MEDIAN.value:
164
- median = nan_median(previous, axis=0)
165
- current = (current - median) / median
166
- case NORM_METHODS.ZSCORE.value:
167
- current = (current - nan_mean(previous, axis=0)) / nan_std(previous, axis=0)
168
- case NORM_METHODS.ZSCORE_MEDIAN.value:
169
- current = (current - nan_median(previous, axis=0)) / nan_std(previous, axis=0)
170
- # For the following methods we check for the shape of current
171
- # when current is a 1D array, then it is the post-processing normalization,
172
- # and we need to expand, and remove the extra dimension afterwards
173
- # When current is a 2D array, then it is pre-processing normalization, and
174
- # there's no need for expanding.
175
- case (NORM_METHODS.QUANTILE.value |
176
- NORM_METHODS.ROBUST.value |
177
- NORM_METHODS.MINMAX.value |
178
- NORM_METHODS.POWER.value):
179
-
180
- norm_methods = {
181
- NORM_METHODS.QUANTILE.value : lambda: preprocessing.QuantileTransformer(n_quantiles=300),
182
- NORM_METHODS.ROBUST.value : preprocessing.RobustScaler,
183
- NORM_METHODS.MINMAX.value : preprocessing.MinMaxScaler,
184
- NORM_METHODS.POWER.value : preprocessing.PowerTransformer
185
- }
186
-
187
- current = (
188
- norm_methods[method]()
189
- .fit(np.nan_to_num(previous))
190
- .transform(
191
- # if post-processing: pad dimensions to 2
192
- np.reshape(current, (2-len(current.shape))*(1,) + current.shape)
193
- )
194
- .squeeze() # if post-processing: remove extra dimension
195
- )
196
-
197
- case _:
198
- raise ValueError(
199
- f"Only {[e.value for e in NORM_METHODS]} are supported as "
200
- f"{description} normalization methods. Got {method}."
201
- )
202
-
203
- if clip:
204
- current = _clip(data=current, clip=clip)
205
- return current, previous
206
-
207
-
208
- def _clip(data: np.ndarray, clip: bool | int | float) -> np.ndarray:
209
- """Clip data."""
210
- if clip is True:
211
- clip = 3.0 # default value
212
- else:
213
- clip = float(clip)
214
- return np.nan_to_num(data).clip(min=-clip, max=clip)
1
+ """Module for real-time data normalization."""
2
+
3
+ import numpy as np
4
+ from typing import TYPE_CHECKING, Callable, Literal, get_args
5
+
6
+ from py_neuromodulation.nm_types import NMBaseModel, Field, NormMethod
7
+ from py_neuromodulation.nm_preprocessing import NMPreprocessor
8
+
9
+ if TYPE_CHECKING:
10
+ from py_neuromodulation.nm_settings import NMSettings
11
+
12
+ NormalizerType = Literal["raw", "feature"]
13
+
14
+
15
+ class NormalizationSettings(NMBaseModel):
16
+ normalization_time_s: float = 30
17
+ normalization_method: NormMethod = "zscore"
18
+ clip: float = Field(default=3, ge=0)
19
+
20
+ @staticmethod
21
+ def list_normalization_methods() -> list[NormMethod]:
22
+ return list(get_args(NormMethod))
23
+
24
+
25
+ class Normalizer(NMPreprocessor):
26
+ def __init__(
27
+ self,
28
+ sfreq: float,
29
+ settings: "NMSettings",
30
+ type: NormalizerType,
31
+ ) -> None:
32
+
33
+ self.type = type
34
+ self.settings: NormalizationSettings
35
+
36
+ match self.type:
37
+ case "raw":
38
+ self.settings = settings.raw_normalization_settings.validate()
39
+ self.add_samples = int(sfreq / settings.sampling_rate_features_hz)
40
+ case "feature":
41
+ self.settings = settings.feature_normalization_settings.validate()
42
+ self.add_samples = 0
43
+
44
+ # For type = "feature" sfreq = sampling_rate_features_hz
45
+ self.num_samples_normalize = int(self.settings.normalization_time_s * sfreq)
46
+
47
+ self.previous: np.ndarray = np.empty((0, 0)) # Default empty array
48
+
49
+ self.method = self.settings.normalization_method
50
+ self.using_sklearn = self.method in ["quantile", "power", "robust", "minmax"]
51
+
52
+ if self.using_sklearn:
53
+ import sklearn.preprocessing as skpp
54
+
55
+ NORM_METHODS_SKLEARN: dict[NormMethod, Callable] = {
56
+ "quantile": lambda: skpp.QuantileTransformer(n_quantiles=300),
57
+ "robust": skpp.RobustScaler,
58
+ "minmax": skpp.MinMaxScaler,
59
+ "power": skpp.PowerTransformer,
60
+ }
61
+
62
+ self.normalizer = norm_sklearn(NORM_METHODS_SKLEARN[self.method]())
63
+
64
+ else:
65
+ NORM_FUNCTIONS = {
66
+ "mean": norm_mean,
67
+ "median": norm_median,
68
+ "zscore": norm_zscore,
69
+ "zscore-median": norm_zscore_median,
70
+ }
71
+ self.normalizer = NORM_FUNCTIONS[self.method]
72
+
73
+ def process(self, data: np.ndarray) -> np.ndarray:
74
+ # TODO: does feature normalization need to be transposed too?
75
+ if self.type == "raw":
76
+ data = data.T
77
+
78
+ if self.previous.size == 0: # Check if empty
79
+ self.previous = data
80
+ return data if self.type == "raw" else data.T
81
+
82
+ self.previous = np.vstack((self.previous, data[-self.add_samples :]))
83
+
84
+ data = self.normalizer(data, self.previous)
85
+
86
+ if self.settings.clip:
87
+ data = data.clip(min=-self.settings.clip, max=self.settings.clip)
88
+
89
+
90
+ self.previous = self.previous[-self.num_samples_normalize + 1 :]
91
+
92
+ data = np.nan_to_num(data)
93
+
94
+ return data if self.type == "raw" else data.T
95
+
96
+
97
+ class RawNormalizer(Normalizer):
98
+ def __init__(self, sfreq: float, settings: "NMSettings") -> None:
99
+ super().__init__(sfreq, settings, "raw")
100
+
101
+
102
+ class FeatureNormalizer(Normalizer):
103
+ def __init__(self, settings: "NMSettings") -> None:
104
+ super().__init__(settings.sampling_rate_features_hz, settings, "feature")
105
+
106
+
107
+ """ Functions to check for NaN's before deciding which Numpy function to call """
108
+
109
+
110
+ def nan_mean(data: np.ndarray, axis: int) -> np.ndarray:
111
+ return (
112
+ np.nanmean(data, axis=axis)
113
+ if np.any(np.isnan(sum(data)))
114
+ else np.mean(data, axis=axis)
115
+ )
116
+
117
+
118
+ def nan_std(data: np.ndarray, axis: int) -> np.ndarray:
119
+ return (
120
+ np.nanstd(data, axis=axis)
121
+ if np.any(np.isnan(sum(data)))
122
+ else np.std(data, axis=axis)
123
+ )
124
+
125
+
126
+ def nan_median(data: np.ndarray, axis: int) -> np.ndarray:
127
+ return (
128
+ np.nanmedian(data, axis=axis)
129
+ if np.any(np.isnan(sum(data)))
130
+ else np.median(data, axis=axis)
131
+ )
132
+
133
+
134
+ def norm_mean(current, previous):
135
+ mean = nan_mean(previous, axis=0)
136
+ return (current - mean) / mean
137
+
138
+
139
+ def norm_median(current, previous):
140
+ median = nan_median(previous, axis=0)
141
+ return (current - median) / median
142
+
143
+
144
+ def norm_zscore(current, previous):
145
+ std = nan_std(previous, axis=0)
146
+ std[std == 0] = 1 # same behavior as sklearn
147
+ return (current - nan_mean(previous, axis=0)) / std
148
+
149
+
150
+ def norm_zscore_median(current, previous):
151
+ std = nan_std(previous, axis=0)
152
+ std[std == 0] = 1 # same behavior as sklearn
153
+ return (current - nan_median(previous, axis=0)) / std
154
+
155
+
156
+ def norm_sklearn(sknormalizer):
157
+ # For the following methods we check for the shape of current
158
+ # when current is a 1D array, then it is the post-processing normalization,
159
+ # and we need to expand, and remove the extra dimension afterwards
160
+ # When current is a 2D array, then it is pre-processing normalization, and
161
+ # there's no need for expanding.
162
+
163
+ def sk_normalizer(current, previous):
164
+ return (
165
+ sknormalizer.fit(np.nan_to_num(previous))
166
+ .transform(
167
+ # if post-processing: pad dimensions to 2
168
+ np.reshape(current, (2 - len(current.shape)) * (1,) + current.shape)
169
+ )
170
+ .squeeze() # if post-processing: remove extra dimension # type: ignore
171
+ )
172
+
173
+ return sk_normalizer