py-neuromodulation 0.0.2__py3-none-any.whl → 0.0.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- py_neuromodulation/ConnectivityDecoding/Automated Anatomical Labeling 3 (Rolls 2020).nii +0 -0
- py_neuromodulation/ConnectivityDecoding/_get_grid_hull.m +34 -0
- py_neuromodulation/ConnectivityDecoding/_get_grid_whole_brain.py +106 -0
- py_neuromodulation/ConnectivityDecoding/_helper_write_connectome.py +119 -0
- py_neuromodulation/ConnectivityDecoding/mni_coords_cortical_surface.mat +0 -0
- py_neuromodulation/ConnectivityDecoding/mni_coords_whole_brain.mat +0 -0
- py_neuromodulation/ConnectivityDecoding/rmap_func_all.nii +0 -0
- py_neuromodulation/ConnectivityDecoding/rmap_struc.nii +0 -0
- py_neuromodulation/{helper.py → _write_example_dataset_helper.py} +1 -1
- py_neuromodulation/nm_EpochStream.py +2 -3
- py_neuromodulation/nm_IO.py +43 -70
- py_neuromodulation/nm_RMAP.py +308 -11
- py_neuromodulation/nm_analysis.py +1 -1
- py_neuromodulation/nm_artifacts.py +25 -0
- py_neuromodulation/nm_bispectra.py +64 -29
- py_neuromodulation/nm_bursts.py +44 -30
- py_neuromodulation/nm_coherence.py +2 -1
- py_neuromodulation/nm_features.py +4 -2
- py_neuromodulation/nm_filter.py +63 -32
- py_neuromodulation/nm_filter_preprocessing.py +91 -0
- py_neuromodulation/nm_fooof.py +47 -29
- py_neuromodulation/nm_mne_connectivity.py +1 -1
- py_neuromodulation/nm_normalization.py +50 -74
- py_neuromodulation/nm_oscillatory.py +151 -31
- py_neuromodulation/nm_plots.py +13 -10
- py_neuromodulation/nm_rereference.py +10 -8
- py_neuromodulation/nm_run_analysis.py +28 -13
- py_neuromodulation/nm_settings.json +51 -3
- py_neuromodulation/nm_sharpwaves.py +103 -136
- py_neuromodulation/nm_stats.py +44 -30
- py_neuromodulation/nm_stream_abc.py +18 -10
- py_neuromodulation/nm_stream_offline.py +188 -46
- py_neuromodulation/utils/_logging.py +24 -0
- {py_neuromodulation-0.0.2.dist-info → py_neuromodulation-0.0.4.dist-info}/METADATA +72 -32
- py_neuromodulation-0.0.4.dist-info/RECORD +72 -0
- {py_neuromodulation-0.0.2.dist-info → py_neuromodulation-0.0.4.dist-info}/WHEEL +1 -1
- py_neuromodulation/data/derivatives/sub-testsub_ses-EphysMedOff_task-gripforce_run-0/MOV_aligned_features_ch_ECOG_RIGHT_0_all.png +0 -0
- py_neuromodulation/data/derivatives/sub-testsub_ses-EphysMedOff_task-gripforce_run-0/all_feature_plt.pdf +0 -0
- py_neuromodulation/data/derivatives/sub-testsub_ses-EphysMedOff_task-gripforce_run-0/sub-testsub_ses-EphysMedOff_task-gripforce_run-0_FEATURES.csv +0 -182
- py_neuromodulation/data/derivatives/sub-testsub_ses-EphysMedOff_task-gripforce_run-0/sub-testsub_ses-EphysMedOff_task-gripforce_run-0_LM_ML_RES.p +0 -0
- py_neuromodulation/data/derivatives/sub-testsub_ses-EphysMedOff_task-gripforce_run-0/sub-testsub_ses-EphysMedOff_task-gripforce_run-0_SETTINGS.json +0 -273
- py_neuromodulation/data/derivatives/sub-testsub_ses-EphysMedOff_task-gripforce_run-0/sub-testsub_ses-EphysMedOff_task-gripforce_run-0_SIDECAR.json +0 -6
- py_neuromodulation/data/derivatives/sub-testsub_ses-EphysMedOff_task-gripforce_run-0/sub-testsub_ses-EphysMedOff_task-gripforce_run-0_decoding_performance.png +0 -0
- py_neuromodulation/data/derivatives/sub-testsub_ses-EphysMedOff_task-gripforce_run-0/sub-testsub_ses-EphysMedOff_task-gripforce_run-0_nm_channels.csv +0 -11
- py_neuromodulation/py_neuromodulation.egg-info/PKG-INFO +0 -104
- py_neuromodulation/py_neuromodulation.egg-info/dependency_links.txt +0 -1
- py_neuromodulation/py_neuromodulation.egg-info/requires.txt +0 -26
- py_neuromodulation/py_neuromodulation.egg-info/top_level.txt +0 -1
- py_neuromodulation-0.0.2.dist-info/RECORD +0 -73
- /py_neuromodulation/{py_neuromodulation.egg-info/SOURCES.txt → utils/__init__.py} +0 -0
- {py_neuromodulation-0.0.2.dist-info → py_neuromodulation-0.0.4.dist-info/licenses}/LICENSE +0 -0
|
@@ -1,9 +1,13 @@
|
|
|
1
1
|
"""This module contains the class to process a given batch of data."""
|
|
2
|
+
|
|
2
3
|
from enum import Enum
|
|
3
4
|
import math
|
|
4
5
|
import os
|
|
5
6
|
from time import time
|
|
6
7
|
from typing import Protocol, Type
|
|
8
|
+
import logging
|
|
9
|
+
|
|
10
|
+
logger = logging.getLogger("PynmLogger")
|
|
7
11
|
|
|
8
12
|
import numpy as np
|
|
9
13
|
import pandas as pd
|
|
@@ -16,6 +20,7 @@ from py_neuromodulation import (
|
|
|
16
20
|
nm_projection,
|
|
17
21
|
nm_rereference,
|
|
18
22
|
nm_resample,
|
|
23
|
+
nm_filter_preprocessing,
|
|
19
24
|
)
|
|
20
25
|
|
|
21
26
|
_PathLike = str | os.PathLike
|
|
@@ -25,8 +30,7 @@ class Preprocessor(Protocol):
|
|
|
25
30
|
def process(self, data: np.ndarray) -> np.ndarray:
|
|
26
31
|
pass
|
|
27
32
|
|
|
28
|
-
def test_settings(self, settings: dict):
|
|
29
|
-
...
|
|
33
|
+
def test_settings(self, settings: dict): ...
|
|
30
34
|
|
|
31
35
|
|
|
32
36
|
_PREPROCESSING_CONSTRUCTORS = [
|
|
@@ -73,7 +77,7 @@ class DataProcessor:
|
|
|
73
77
|
notch_filter : nm_filter.NotchFilter,
|
|
74
78
|
Notch Filter object, needs to be instantiated beforehand
|
|
75
79
|
verbose : boolean
|
|
76
|
-
if True,
|
|
80
|
+
if True, log signal processed and computation time
|
|
77
81
|
"""
|
|
78
82
|
self.settings = self._load_settings(settings)
|
|
79
83
|
self.nm_channels = self._load_nm_channels(nm_channels)
|
|
@@ -119,6 +123,12 @@ class DataProcessor:
|
|
|
119
123
|
**self.settings.get(settings_str, {}),
|
|
120
124
|
)
|
|
121
125
|
self.preprocessors.append(preprocessor)
|
|
126
|
+
case "preprocessing_filter":
|
|
127
|
+
preprocessor = nm_filter_preprocessing.PreprocessingFilter(
|
|
128
|
+
settings=self.settings,
|
|
129
|
+
sfreq=self.sfreq_raw,
|
|
130
|
+
)
|
|
131
|
+
self.preprocessors.append(preprocessor)
|
|
122
132
|
case _:
|
|
123
133
|
raise ValueError(
|
|
124
134
|
"Invalid preprocessing method. Must be one of"
|
|
@@ -331,7 +341,7 @@ class DataProcessor:
|
|
|
331
341
|
|
|
332
342
|
nan_channels = np.isnan(data).any(axis=1)
|
|
333
343
|
|
|
334
|
-
data = np.nan_to_num(data)
|
|
344
|
+
data = np.nan_to_num(data)[self.feature_idx, :]
|
|
335
345
|
|
|
336
346
|
for processor in self.preprocessors:
|
|
337
347
|
data = processor.process(data)
|
|
@@ -341,8 +351,13 @@ class DataProcessor:
|
|
|
341
351
|
|
|
342
352
|
# normalize features
|
|
343
353
|
if self.settings["postprocessing"]["feature_normalization"]:
|
|
344
|
-
normed_features = self.feature_normalizer.process(
|
|
345
|
-
|
|
354
|
+
normed_features = self.feature_normalizer.process(
|
|
355
|
+
np.fromiter(features_dict.values(), dtype="float")
|
|
356
|
+
)
|
|
357
|
+
features_dict = {
|
|
358
|
+
key: normed_features[idx]
|
|
359
|
+
for idx, key in enumerate(features_dict.keys())
|
|
360
|
+
}
|
|
346
361
|
|
|
347
362
|
features_current = pd.Series(
|
|
348
363
|
data=list(features_dict.values()),
|
|
@@ -364,7 +379,7 @@ class DataProcessor:
|
|
|
364
379
|
] = np.nan
|
|
365
380
|
|
|
366
381
|
if self.verbose is True:
|
|
367
|
-
|
|
382
|
+
logger.info(
|
|
368
383
|
"Last batch took: "
|
|
369
384
|
+ str(np.round(time() - start_time, 2))
|
|
370
385
|
+ " seconds"
|
|
@@ -390,14 +405,14 @@ class DataProcessor:
|
|
|
390
405
|
sidecar["coords"] = self.projection.coords
|
|
391
406
|
if self.settings["postprocessing"]["project_cortex"]:
|
|
392
407
|
sidecar["grid_cortex"] = self.projection.grid_cortex
|
|
393
|
-
sidecar[
|
|
394
|
-
|
|
395
|
-
|
|
408
|
+
sidecar["proj_matrix_cortex"] = (
|
|
409
|
+
self.projection.proj_matrix_cortex
|
|
410
|
+
)
|
|
396
411
|
if self.settings["postprocessing"]["project_subcortex"]:
|
|
397
412
|
sidecar["grid_subcortex"] = self.projection.grid_subcortex
|
|
398
|
-
sidecar[
|
|
399
|
-
|
|
400
|
-
|
|
413
|
+
sidecar["proj_matrix_subcortex"] = (
|
|
414
|
+
self.projection.proj_matrix_subcortex
|
|
415
|
+
)
|
|
401
416
|
if additional_args is not None:
|
|
402
417
|
sidecar = sidecar | additional_args
|
|
403
418
|
|
|
@@ -10,7 +10,8 @@
|
|
|
10
10
|
"raw_resampling",
|
|
11
11
|
"notch_filter",
|
|
12
12
|
"re_referencing",
|
|
13
|
-
"raw_normalization"
|
|
13
|
+
"raw_normalization",
|
|
14
|
+
"preprocessing_filter"
|
|
14
15
|
],
|
|
15
16
|
"features": {
|
|
16
17
|
"raw_hjorth": true,
|
|
@@ -18,6 +19,7 @@
|
|
|
18
19
|
"bandpass_filter": false,
|
|
19
20
|
"stft": false,
|
|
20
21
|
"fft": true,
|
|
22
|
+
"welch": true,
|
|
21
23
|
"sharpwave_analysis": true,
|
|
22
24
|
"fooof": false,
|
|
23
25
|
"bursts": true,
|
|
@@ -35,6 +37,29 @@
|
|
|
35
37
|
"raw_resampling_settings": {
|
|
36
38
|
"resample_freq_hz": 1000
|
|
37
39
|
},
|
|
40
|
+
"preprocessing_filter": {
|
|
41
|
+
"bandstop_filter": true,
|
|
42
|
+
"lowpass_filter": true,
|
|
43
|
+
"highpass_filter": true,
|
|
44
|
+
"bandpass_filter": true,
|
|
45
|
+
"bandstop_filter_settings": {
|
|
46
|
+
"frequency_low_hz": 100,
|
|
47
|
+
"frequency_high_hz": 160
|
|
48
|
+
},
|
|
49
|
+
"lowpass_filter_settings": {
|
|
50
|
+
"frequency_cutoff_hz": 200
|
|
51
|
+
},
|
|
52
|
+
"highpass_filter_settings": {
|
|
53
|
+
"frequency_cutoff_hz": 3
|
|
54
|
+
},
|
|
55
|
+
"bandpass_filter_settings": {
|
|
56
|
+
"frequency_low_hz": 3,
|
|
57
|
+
"frequency_high_hz": 200
|
|
58
|
+
}
|
|
59
|
+
},
|
|
60
|
+
"re_referencing_settings": {
|
|
61
|
+
"reference": "common"
|
|
62
|
+
},
|
|
38
63
|
"documentation_normalization_options": [
|
|
39
64
|
"mean",
|
|
40
65
|
"median",
|
|
@@ -88,12 +113,35 @@
|
|
|
88
113
|
"fft_settings": {
|
|
89
114
|
"windowlength_ms": 1000,
|
|
90
115
|
"log_transform": true,
|
|
91
|
-
"
|
|
116
|
+
"features": {
|
|
117
|
+
"mean": true,
|
|
118
|
+
"median": false,
|
|
119
|
+
"std": false,
|
|
120
|
+
"max": false
|
|
121
|
+
},
|
|
122
|
+
"return_spectrum": false
|
|
123
|
+
},
|
|
124
|
+
"welch_settings": {
|
|
125
|
+
"windowlength_ms": 1000,
|
|
126
|
+
"log_transform": true,
|
|
127
|
+
"features": {
|
|
128
|
+
"mean": true,
|
|
129
|
+
"median": false,
|
|
130
|
+
"std": false,
|
|
131
|
+
"max": false
|
|
132
|
+
},
|
|
133
|
+
"return_spectrum": false
|
|
92
134
|
},
|
|
93
135
|
"stft_settings": {
|
|
94
136
|
"windowlength_ms": 500,
|
|
95
137
|
"log_transform": true,
|
|
96
|
-
"
|
|
138
|
+
"features": {
|
|
139
|
+
"mean": true,
|
|
140
|
+
"median": false,
|
|
141
|
+
"std": false,
|
|
142
|
+
"max": false
|
|
143
|
+
},
|
|
144
|
+
"return_spectrum": false
|
|
97
145
|
},
|
|
98
146
|
"bandpass_filter_settings": {
|
|
99
147
|
"segment_lengths_ms": {
|
|
@@ -105,23 +105,12 @@ class SharpwaveAnalyzer(nm_features_abc.Feature):
|
|
|
105
105
|
peak_right_val (np.ndarray): value of righ peak
|
|
106
106
|
"""
|
|
107
107
|
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
raise NoValidTroughException("No valid trough")
|
|
111
|
-
val_ind_greater = arr_ind_peaks[ind_greater]
|
|
112
|
-
peak_right_idx = arr_ind_peaks[
|
|
113
|
-
ind_greater[np.argsort(val_ind_greater)[0]]
|
|
114
|
-
]
|
|
115
|
-
|
|
116
|
-
ind_smaller = np.where(arr_ind_peaks < trough_ind)[0]
|
|
117
|
-
if ind_smaller.shape[0] == 0:
|
|
118
|
-
raise NoValidTroughException("No valid trough")
|
|
119
|
-
|
|
120
|
-
val_ind_smaller = arr_ind_peaks[ind_smaller]
|
|
121
|
-
peak_left_idx = arr_ind_peaks[
|
|
122
|
-
ind_smaller[np.argsort(val_ind_smaller)[-1]]
|
|
123
|
-
]
|
|
108
|
+
try: peak_right_idx = arr_ind_peaks[arr_ind_peaks > trough_ind][0]
|
|
109
|
+
except IndexError: raise NoValidTroughException("No valid trough")
|
|
124
110
|
|
|
111
|
+
try: peak_left_idx = arr_ind_peaks[arr_ind_peaks < trough_ind][-1]
|
|
112
|
+
except IndexError: raise NoValidTroughException("No valid trough")
|
|
113
|
+
|
|
125
114
|
return (
|
|
126
115
|
peak_left_idx,
|
|
127
116
|
peak_right_idx,
|
|
@@ -150,12 +139,10 @@ class SharpwaveAnalyzer(nm_features_abc.Feature):
|
|
|
150
139
|
"""
|
|
151
140
|
for ch_idx, ch_name in enumerate(self.ch_names):
|
|
152
141
|
for filter_name, filter in self.list_filter:
|
|
153
|
-
|
|
154
|
-
|
|
155
|
-
|
|
156
|
-
|
|
157
|
-
data[ch_idx, :], filter, mode="same"
|
|
158
|
-
)
|
|
142
|
+
self.data_process_sw = (data[ch_idx, :]
|
|
143
|
+
if filter_name == "no_filter"
|
|
144
|
+
else signal.fftconvolve(data[ch_idx, :], filter, mode="same")
|
|
145
|
+
)
|
|
159
146
|
|
|
160
147
|
# check settings if troughs and peaks are analyzed
|
|
161
148
|
|
|
@@ -267,123 +254,103 @@ class SharpwaveAnalyzer(nm_features_abc.Feature):
|
|
|
267
254
|
distance=self.sw_settings["detect_troughs"]["distance_troughs_ms"],
|
|
268
255
|
)[0]
|
|
269
256
|
|
|
270
|
-
for
|
|
271
|
-
|
|
272
|
-
|
|
273
|
-
|
|
274
|
-
|
|
275
|
-
|
|
276
|
-
|
|
277
|
-
|
|
278
|
-
|
|
279
|
-
|
|
280
|
-
|
|
281
|
-
|
|
282
|
-
|
|
283
|
-
#
|
|
257
|
+
""" Find left and right peak indexes for each trough """
|
|
258
|
+
peak_pointer = 0
|
|
259
|
+
peak_idx_left = []
|
|
260
|
+
peak_idx_right = []
|
|
261
|
+
first_valid = last_valid = 0
|
|
262
|
+
|
|
263
|
+
for i, trough_idx in enumerate(troughs):
|
|
264
|
+
|
|
265
|
+
# Locate peak right of current trough
|
|
266
|
+
while peak_pointer < peaks.size and peaks[peak_pointer] < trough_idx:
|
|
267
|
+
peak_pointer += 1
|
|
268
|
+
|
|
269
|
+
if peak_pointer - 1 < 0:
|
|
270
|
+
# If trough has no peak to it's left, it's not valid
|
|
271
|
+
first_valid = i + 1 # Try with next one
|
|
284
272
|
continue
|
|
285
273
|
|
|
286
|
-
|
|
287
|
-
|
|
288
|
-
|
|
289
|
-
|
|
290
|
-
if self.sw_settings["sharpwave_features"]["interval"] is True:
|
|
291
|
-
if len(self.troughs_idx) > 1:
|
|
292
|
-
# take the last identified trough idx
|
|
293
|
-
# corresponds here to second last trough_idx
|
|
294
|
-
|
|
295
|
-
interval = (trough_idx - self.troughs_idx[-2]) * (
|
|
296
|
-
1000 / self.sfreq
|
|
297
|
-
)
|
|
298
|
-
else:
|
|
299
|
-
# set first interval to zero
|
|
300
|
-
interval = 0
|
|
301
|
-
self.interval.append(interval)
|
|
302
|
-
|
|
303
|
-
if self.sw_settings["sharpwave_features"]["peak_left"] is True:
|
|
304
|
-
self.peak_left.append(peak_left)
|
|
305
|
-
|
|
306
|
-
if self.sw_settings["sharpwave_features"]["peak_right"] is True:
|
|
307
|
-
self.peak_right.append(peak_right)
|
|
308
|
-
|
|
309
|
-
if self.sw_settings["sharpwave_features"]["sharpness"] is True:
|
|
310
|
-
# check if sharpness can be calculated
|
|
311
|
-
# trough_idx 5 ms need to be consistent
|
|
312
|
-
if (trough_idx - int(5 * (1000 / self.sfreq)) <= 0) or (
|
|
313
|
-
trough_idx + int(5 * (1000 / self.sfreq))
|
|
314
|
-
>= self.data_process_sw.shape[0]
|
|
315
|
-
):
|
|
316
|
-
continue
|
|
317
|
-
|
|
318
|
-
sharpness = (
|
|
319
|
-
(
|
|
320
|
-
self.data_process_sw[trough_idx]
|
|
321
|
-
- self.data_process_sw[
|
|
322
|
-
trough_idx - int(5 * (1000 / self.sfreq))
|
|
323
|
-
]
|
|
324
|
-
)
|
|
325
|
-
+ (
|
|
326
|
-
self.data_process_sw[trough_idx]
|
|
327
|
-
- self.data_process_sw[
|
|
328
|
-
trough_idx + int(5 * (1000 / self.sfreq))
|
|
329
|
-
]
|
|
330
|
-
)
|
|
331
|
-
) / 2
|
|
332
|
-
|
|
333
|
-
self.sharpness.append(sharpness)
|
|
334
|
-
|
|
335
|
-
if self.sw_settings["sharpwave_features"]["rise_steepness"] is True:
|
|
336
|
-
# steepness is calculated as the first derivative
|
|
337
|
-
# from peak/trough to trough/peak
|
|
338
|
-
# here + 1 due to python syntax, s.t. the last element is included
|
|
339
|
-
rise_steepness = np.max(
|
|
340
|
-
np.diff(
|
|
341
|
-
self.data_process_sw[peak_idx_left : trough_idx + 1]
|
|
342
|
-
)
|
|
343
|
-
)
|
|
344
|
-
self.rise_steepness.append(rise_steepness)
|
|
345
|
-
|
|
346
|
-
if (
|
|
347
|
-
self.sw_settings["sharpwave_features"]["decay_steepness"]
|
|
348
|
-
is True
|
|
349
|
-
):
|
|
350
|
-
decay_steepness = np.max(
|
|
351
|
-
np.diff(
|
|
352
|
-
self.data_process_sw[trough_idx : peak_idx_right + 1]
|
|
353
|
-
)
|
|
354
|
-
)
|
|
355
|
-
self.decay_steepness.append(decay_steepness)
|
|
356
|
-
|
|
357
|
-
if (
|
|
358
|
-
self.sw_settings["sharpwave_features"]["rise_steepness"] is True
|
|
359
|
-
and self.sw_settings["sharpwave_features"]["decay_steepness"]
|
|
360
|
-
is True
|
|
361
|
-
and self.sw_settings["sharpwave_features"]["slope_ratio"]
|
|
362
|
-
is True
|
|
363
|
-
):
|
|
364
|
-
self.slope_ratio.append(rise_steepness - decay_steepness)
|
|
365
|
-
|
|
366
|
-
if self.sw_settings["sharpwave_features"]["prominence"] is True:
|
|
367
|
-
self.prominence.append(
|
|
368
|
-
np.abs(
|
|
369
|
-
(peak_right + peak_left) / 2
|
|
370
|
-
- self.data_process_sw[trough_idx]
|
|
371
|
-
)
|
|
372
|
-
)
|
|
373
|
-
|
|
374
|
-
if self.sw_settings["sharpwave_features"]["decay_time"] is True:
|
|
375
|
-
self.decay_time.append(
|
|
376
|
-
(peak_idx_left - trough_idx) * (1000 / self.sfreq)
|
|
377
|
-
) # ms
|
|
378
|
-
|
|
379
|
-
if self.sw_settings["sharpwave_features"]["rise_time"] is True:
|
|
380
|
-
self.rise_time.append(
|
|
381
|
-
(peak_idx_right - trough_idx) * (1000 / self.sfreq)
|
|
382
|
-
) # ms
|
|
383
|
-
|
|
384
|
-
if self.sw_settings["sharpwave_features"]["width"] is True:
|
|
385
|
-
self.width.append(peak_idx_right - peak_idx_left) # ms
|
|
274
|
+
if peak_pointer == peaks.size:
|
|
275
|
+
# If we went past the end of the peaks list, trough had no peak to its right
|
|
276
|
+
continue
|
|
386
277
|
|
|
278
|
+
last_valid = i
|
|
279
|
+
peak_idx_left.append(peaks[peak_pointer - 1])
|
|
280
|
+
peak_idx_right.append(peaks[peak_pointer])
|
|
281
|
+
|
|
282
|
+
troughs = troughs[first_valid:last_valid + 1] # Remove non valid troughs
|
|
283
|
+
|
|
284
|
+
peak_idx_left = np.array(peak_idx_left, dtype=np.integer)
|
|
285
|
+
peak_idx_right = np.array(peak_idx_right, dtype=np.integer)
|
|
286
|
+
|
|
287
|
+
peak_left = self.data_process_sw[peak_idx_left]
|
|
288
|
+
peak_right = self.data_process_sw[peak_idx_right]
|
|
289
|
+
trough_values = self.data_process_sw[troughs]
|
|
290
|
+
|
|
291
|
+
# No need to store trough data as it is not used anywhere else in the program
|
|
292
|
+
# self.trough.append(trough)
|
|
293
|
+
# self.troughs_idx.append(trough_idx)
|
|
294
|
+
|
|
295
|
+
""" Calculate features (vectorized) """
|
|
296
|
+
|
|
297
|
+
if self.sw_settings["sharpwave_features"]["interval"]:
|
|
298
|
+
self.interval = np.concatenate(([0], np.diff(troughs))) * (1000 / self.sfreq)
|
|
299
|
+
|
|
300
|
+
if self.sw_settings["sharpwave_features"]["peak_left"]:
|
|
301
|
+
self.peak_left = peak_left
|
|
302
|
+
|
|
303
|
+
if self.sw_settings["sharpwave_features"]["peak_right"]:
|
|
304
|
+
self.peak_right = peak_right
|
|
305
|
+
|
|
306
|
+
if self.sw_settings["sharpwave_features"]["sharpness"]:
|
|
307
|
+
# sharpess is calculated on a +- 5 ms window
|
|
308
|
+
# valid troughs need 5 ms of margin on both siddes
|
|
309
|
+
troughs_valid = troughs[np.logical_and(
|
|
310
|
+
troughs - int(5 * (1000 / self.sfreq)) > 0,
|
|
311
|
+
troughs + int(5 * (1000 / self.sfreq)) < self.data_process_sw.shape[0])]
|
|
312
|
+
|
|
313
|
+
self.sharpness = (
|
|
314
|
+
(self.data_process_sw[troughs_valid] - self.data_process_sw[troughs_valid - int(5 * (1000 / self.sfreq))]) +
|
|
315
|
+
(self.data_process_sw[troughs_valid] - self.data_process_sw[troughs_valid + int(5 * (1000 / self.sfreq))])
|
|
316
|
+
) / 2
|
|
317
|
+
|
|
318
|
+
if (self.sw_settings["sharpwave_features"]["rise_steepness"] or
|
|
319
|
+
self.sw_settings["sharpwave_features"]["decay_steepness"]):
|
|
320
|
+
|
|
321
|
+
# steepness is calculated as the first derivative
|
|
322
|
+
steepness = np.concatenate(([0],np.diff(self.data_process_sw)))
|
|
323
|
+
|
|
324
|
+
if self.sw_settings["sharpwave_features"]["rise_steepness"]: # left peak -> trough
|
|
325
|
+
# + 1 due to python syntax, s.t. the last element is included
|
|
326
|
+
self.rise_steepness = np.array([
|
|
327
|
+
np.max(np.abs(steepness[peak_idx_left[i] : troughs[i] + 1]))
|
|
328
|
+
for i in range(trough_idx.size)
|
|
329
|
+
])
|
|
330
|
+
|
|
331
|
+
if self.sw_settings["sharpwave_features"]["decay_steepness"]: # trough -> right peak
|
|
332
|
+
self.decay_steepness = np.array([
|
|
333
|
+
np.max(np.abs(steepness[troughs[i] : peak_idx_right[i] + 1]))
|
|
334
|
+
for i in range(trough_idx.size)
|
|
335
|
+
])
|
|
336
|
+
|
|
337
|
+
if (self.sw_settings["sharpwave_features"]["rise_steepness"] and
|
|
338
|
+
self.sw_settings["sharpwave_features"]["decay_steepness"] and
|
|
339
|
+
self.sw_settings["sharpwave_features"]["slope_ratio"]):
|
|
340
|
+
self.slope_ratio = self.rise_steepness - self.decay_steepness
|
|
341
|
+
|
|
342
|
+
if self.sw_settings["sharpwave_features"]["prominence"]:
|
|
343
|
+
self.prominence = np.abs((peak_right + peak_left) / 2 - trough_values)
|
|
344
|
+
|
|
345
|
+
if self.sw_settings["sharpwave_features"]["decay_time"]:
|
|
346
|
+
self.decay_time = (peak_idx_left - troughs) * (1000 / self.sfreq) # ms
|
|
347
|
+
|
|
348
|
+
if self.sw_settings["sharpwave_features"]["rise_time"]:
|
|
349
|
+
self.rise_time = (peak_idx_right - troughs) * (1000 / self.sfreq) # ms
|
|
350
|
+
|
|
351
|
+
if self.sw_settings["sharpwave_features"]["width"]:
|
|
352
|
+
self.width = peak_idx_right - peak_idx_left # ms
|
|
353
|
+
|
|
387
354
|
@staticmethod
|
|
388
355
|
def test_settings(
|
|
389
356
|
s: dict,
|
py_neuromodulation/nm_stats.py
CHANGED
|
@@ -2,6 +2,7 @@ import random
|
|
|
2
2
|
import copy
|
|
3
3
|
|
|
4
4
|
import matplotlib.pyplot as plt
|
|
5
|
+
|
|
5
6
|
# from numba import njit
|
|
6
7
|
import numpy as np
|
|
7
8
|
import pandas as pd
|
|
@@ -33,7 +34,7 @@ def fitlm_kfold(x, y, kfold_splits=5):
|
|
|
33
34
|
scores.append(score)
|
|
34
35
|
coeffs = np.vstack((coeffs, model.coef_))
|
|
35
36
|
coeffs = list(np.delete(coeffs, 0))
|
|
36
|
-
return scores, coeffs, model, [
|
|
37
|
+
return scores, coeffs, model, ["scores", "coeffs", "model"]
|
|
37
38
|
|
|
38
39
|
|
|
39
40
|
def zscore(data):
|
|
@@ -76,14 +77,14 @@ def permutationTestSpearmansRho(x, y, plot_distr=True, x_unit=None, p=5000):
|
|
|
76
77
|
|
|
77
78
|
# calculate p value
|
|
78
79
|
if gT < 0:
|
|
79
|
-
p_val = len(np.where(pD <= gT)[0])/p
|
|
80
|
+
p_val = len(np.where(pD <= gT)[0]) / p
|
|
80
81
|
else:
|
|
81
|
-
p_val = len(np.where(pD >= gT)[0])/p
|
|
82
|
+
p_val = len(np.where(pD >= gT)[0]) / p
|
|
82
83
|
|
|
83
84
|
if plot_distr is True:
|
|
84
85
|
plt.hist(pD, bins=30, label="permutation results")
|
|
85
86
|
plt.axvline(gT, color="orange", label="ground truth")
|
|
86
|
-
plt.title("ground truth " + x_unit + "="+str(gT) + " p=" + str(p_val))
|
|
87
|
+
plt.title("ground truth " + x_unit + "=" + str(gT) + " p=" + str(p_val))
|
|
87
88
|
plt.xlabel(x_unit)
|
|
88
89
|
plt.legend()
|
|
89
90
|
plt.show()
|
|
@@ -120,19 +121,23 @@ def permutationTest(x, y, plot_distr=True, x_unit=None, p=5000):
|
|
|
120
121
|
random.shuffle(pS)
|
|
121
122
|
# Compute permuted absolute difference of your two sampled
|
|
122
123
|
# distributions and store it in pD:
|
|
123
|
-
pD.append(
|
|
124
|
-
|
|
124
|
+
pD.append(
|
|
125
|
+
np.abs(
|
|
126
|
+
np.average(pS[0 : int(len(pS) / 2)])
|
|
127
|
+
- np.average(pS[int(len(pS) / 2) :])
|
|
128
|
+
)
|
|
129
|
+
)
|
|
125
130
|
|
|
126
131
|
# Calculate p-value
|
|
127
132
|
if gT < 0:
|
|
128
|
-
p_val = len(np.where(pD <= gT)[0])/p
|
|
133
|
+
p_val = len(np.where(pD <= gT)[0]) / p
|
|
129
134
|
else:
|
|
130
|
-
p_val = len(np.where(pD >= gT)[0])/p
|
|
135
|
+
p_val = len(np.where(pD >= gT)[0]) / p
|
|
131
136
|
|
|
132
137
|
if plot_distr is True:
|
|
133
138
|
plt.hist(pD, bins=30, label="permutation results")
|
|
134
139
|
plt.axvline(gT, color="orange", label="ground truth")
|
|
135
|
-
plt.title("ground truth "+x_unit+"="+str(gT)+" p="+str(p_val))
|
|
140
|
+
plt.title("ground truth " + x_unit + "=" + str(gT) + " p=" + str(p_val))
|
|
136
141
|
plt.xlabel(x_unit)
|
|
137
142
|
plt.legend()
|
|
138
143
|
plt.show()
|
|
@@ -165,17 +170,20 @@ def permutationTest_relative(x, y, plot_distr=True, x_unit=None, p=5000):
|
|
|
165
170
|
l_.append((x[i], y[i]))
|
|
166
171
|
else:
|
|
167
172
|
l_.append((y[i], x[i]))
|
|
168
|
-
pD.append(
|
|
169
|
-
np.
|
|
173
|
+
pD.append(
|
|
174
|
+
np.abs(
|
|
175
|
+
np.average(np.array(l_)[:, 0]) - np.average(np.array(l_)[:, 1])
|
|
176
|
+
)
|
|
177
|
+
)
|
|
170
178
|
if gT < 0:
|
|
171
|
-
p_val = len(np.where(pD <= gT)[0])/p
|
|
179
|
+
p_val = len(np.where(pD <= gT)[0]) / p
|
|
172
180
|
else:
|
|
173
|
-
p_val = len(np.where(pD >= gT)[0])/p
|
|
181
|
+
p_val = len(np.where(pD >= gT)[0]) / p
|
|
174
182
|
|
|
175
183
|
if plot_distr is True:
|
|
176
184
|
plt.hist(pD, bins=30, label="permutation results")
|
|
177
185
|
plt.axvline(gT, color="orange", label="ground truth")
|
|
178
|
-
plt.title("ground truth "+x_unit+"="+str(gT)+" p="+str(p_val))
|
|
186
|
+
plt.title("ground truth " + x_unit + "=" + str(gT) + " p=" + str(p_val))
|
|
179
187
|
plt.xlabel(x_unit)
|
|
180
188
|
plt.legend()
|
|
181
189
|
plt.show()
|
|
@@ -211,13 +219,13 @@ def permutation_numba_onesample(x, y, n_perm, two_tailed=True):
|
|
|
211
219
|
"""
|
|
212
220
|
if two_tailed is True:
|
|
213
221
|
zeroed = x - y
|
|
214
|
-
print(zeroed)
|
|
215
222
|
z = np.abs(np.mean(zeroed))
|
|
216
223
|
p = np.empty(n_perm)
|
|
217
224
|
# Run the simulation n_perm times
|
|
218
225
|
for i in np.arange(n_perm):
|
|
219
226
|
sign = np.random.choice(
|
|
220
|
-
a=np.array([-1
|
|
227
|
+
a=np.array([-1.0, 1.0]), size=len(x), replace=True
|
|
228
|
+
)
|
|
221
229
|
p[i] = np.abs(np.mean(zeroed * sign))
|
|
222
230
|
else:
|
|
223
231
|
zeroed = x - y
|
|
@@ -226,7 +234,8 @@ def permutation_numba_onesample(x, y, n_perm, two_tailed=True):
|
|
|
226
234
|
# Run the simulation n_perm times
|
|
227
235
|
for i in np.arange(n_perm):
|
|
228
236
|
sign = np.random.choice(
|
|
229
|
-
a=np.array([-1
|
|
237
|
+
a=np.array([-1.0, 1.0]), size=len(x), replace=True
|
|
238
|
+
)
|
|
230
239
|
p[i] = np.mean(zeroed * sign)
|
|
231
240
|
# Return p-value
|
|
232
241
|
return z, (np.sum(p >= z)) / n_perm
|
|
@@ -311,7 +320,8 @@ def cluster_wise_p_val_correction(p_arr, p_sig=0.05, num_permutations=10000):
|
|
|
311
320
|
# first cluster is assigned to be 1 from measure.label
|
|
312
321
|
index_cluster[cluster_i] = np.where(labels == cluster_i + 1)[0]
|
|
313
322
|
p_cluster_sum[cluster_i] = np.sum(
|
|
314
|
-
np.array(1 - p_arr)[index_cluster[cluster_i]]
|
|
323
|
+
np.array(1 - p_arr)[index_cluster[cluster_i]]
|
|
324
|
+
)
|
|
315
325
|
# p_min corresponds to the most unlikely cluster
|
|
316
326
|
p_min = np.max(p_cluster_sum)
|
|
317
327
|
|
|
@@ -320,11 +330,13 @@ def cluster_wise_p_val_correction(p_arr, p_sig=0.05, num_permutations=10000):
|
|
|
320
330
|
# loop through random permutation cycles
|
|
321
331
|
r_per_arr = np.zeros(num_permutations)
|
|
322
332
|
for r in range(num_permutations):
|
|
323
|
-
r_per = np.random.randint(
|
|
324
|
-
|
|
333
|
+
r_per = np.random.randint(
|
|
334
|
+
low=0, high=p_arr.shape[0], size=p_arr.shape[0]
|
|
335
|
+
)
|
|
325
336
|
|
|
326
|
-
labels, num_clusters = measure.label(
|
|
327
|
-
|
|
337
|
+
labels, num_clusters = measure.label(
|
|
338
|
+
p_arr[r_per] <= p_sig, return_num=True
|
|
339
|
+
)
|
|
328
340
|
|
|
329
341
|
index_cluster = {}
|
|
330
342
|
if num_clusters == 0:
|
|
@@ -332,11 +344,12 @@ def cluster_wise_p_val_correction(p_arr, p_sig=0.05, num_permutations=10000):
|
|
|
332
344
|
else:
|
|
333
345
|
p_cluster_sum = np.zeros(num_clusters)
|
|
334
346
|
for cluster_i in np.arange(num_clusters):
|
|
335
|
-
index_cluster[cluster_i] = np.where(
|
|
336
|
-
|
|
337
|
-
|
|
347
|
+
index_cluster[cluster_i] = np.where(labels == cluster_i + 1)[
|
|
348
|
+
0
|
|
349
|
+
] # first cluster is assigned to be 1 from measure.label
|
|
338
350
|
p_cluster_sum[cluster_i] = np.sum(
|
|
339
|
-
np.array(1 - p_arr[r_per])[index_cluster[cluster_i]]
|
|
351
|
+
np.array(1 - p_arr[r_per])[index_cluster[cluster_i]]
|
|
352
|
+
)
|
|
340
353
|
# corresponds to the most unlikely cluster
|
|
341
354
|
r_per_arr[r] = np.max(p_cluster_sum)
|
|
342
355
|
|
|
@@ -432,7 +445,8 @@ def cluster_wise_p_val_correction_numba(p_arr, p_sig, n_perm):
|
|
|
432
445
|
r_per_arr = np.zeros(n_perm_)
|
|
433
446
|
for r in range(n_perm_):
|
|
434
447
|
r_per = np.random.randint(
|
|
435
|
-
low=0, high=p_arr_.shape[0], size=p_arr_.shape[0]
|
|
448
|
+
low=0, high=p_arr_.shape[0], size=p_arr_.shape[0]
|
|
449
|
+
)
|
|
436
450
|
labels_, n_clusters = cluster(p_arr_[r_per] <= p_sig_)
|
|
437
451
|
|
|
438
452
|
cluster_ind = {}
|
|
@@ -441,10 +455,10 @@ def cluster_wise_p_val_correction_numba(p_arr, p_sig, n_perm):
|
|
|
441
455
|
else:
|
|
442
456
|
p_sum = np.zeros(n_clusters)
|
|
443
457
|
for ind in range(n_clusters):
|
|
444
|
-
cluster_ind[ind] =
|
|
445
|
-
np.where(labels_ == ind + 1)[0]
|
|
458
|
+
cluster_ind[ind] = np.where(labels_ == ind + 1)[0]
|
|
446
459
|
p_sum[ind] = np.sum(
|
|
447
|
-
np.asarray(1 - p_arr_[r_per])[cluster_ind[ind]]
|
|
460
|
+
np.asarray(1 - p_arr_[r_per])[cluster_ind[ind]]
|
|
461
|
+
)
|
|
448
462
|
r_per_arr[r] = np.max(p_sum)
|
|
449
463
|
return r_per_arr
|
|
450
464
|
|