eegdash 0.0.7__py3-none-any.whl → 0.0.9__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of eegdash might be problematic. Click here for more details.

@@ -0,0 +1,6 @@
1
+ from .signal import *
2
+ from .spectral import *
3
+ from .complexity import *
4
+ from .dimensionality import *
5
+ from .connectivity import *
6
+ from .csp import *
@@ -0,0 +1,97 @@
1
+ import numpy as np
2
+ import numba as nb
3
+ from sklearn.neighbors import KDTree
4
+
5
+ from ..extractors import FeatureExtractor
6
+ from ..decorators import FeaturePredecessor, univariate_feature
7
+
8
+
9
+ __all__ = [
10
+ "EntropyFeatureExtractor",
11
+ "complexity_approx_entropy",
12
+ "complexity_sample_entropy",
13
+ "complexity_svd_entropy",
14
+ "complexity_lempel_ziv",
15
+ ]
16
+
17
+
18
+ @nb.njit(cache=True, fastmath=True)
19
+ def _create_embedding(x, dim, lag):
20
+ y = np.empty(((x.shape[-1] - dim + 1) // lag, dim))
21
+ for i in range(0, x.shape[-1] - dim + 1, lag):
22
+ y[i] = x[i : i + dim]
23
+ return y
24
+
25
+
26
+ def _channel_app_samp_entropy_counts(x, m, r, l):
27
+ x_emb = _create_embedding(x, m, l)
28
+ kdtree = KDTree(x_emb, metric="chebyshev")
29
+ return kdtree.query_radius(x_emb, r, count_only=True)
30
+
31
+
32
+ class EntropyFeatureExtractor(FeatureExtractor):
33
+ def preprocess(self, x, m=2, r=0.2, l=1):
34
+ rr = r * x.std(axis=-1)
35
+ counts_m = np.empty((*x.shape[:-1], (x.shape[-1] - m + 1) // l))
36
+ counts_mp1 = np.empty((*x.shape[:-1], (x.shape[-1] - m) // l))
37
+ for i in np.ndindex(x.shape[:-1]):
38
+ counts_m[*i, :] = _channel_app_samp_entropy_counts(x[i], m, rr[i], l)
39
+ counts_mp1[*i, :] = _channel_app_samp_entropy_counts(x[i], m + 1, rr[i], l)
40
+ return counts_m, counts_mp1
41
+
42
+
43
+ @FeaturePredecessor(EntropyFeatureExtractor)
44
+ @univariate_feature
45
+ def complexity_approx_entropy(counts_m, counts_mp1):
46
+ phi_m = np.log(counts_m / counts_m.shape[-1]).mean(axis=-1)
47
+ phi_mp1 = np.log(counts_mp1 / counts_mp1.shape[-1]).mean(axis=-1)
48
+ return phi_m - phi_mp1
49
+
50
+
51
+ @FeaturePredecessor(EntropyFeatureExtractor)
52
+ @univariate_feature
53
+ def complexity_sample_entropy(counts_m, counts_mp1):
54
+ A = np.sum(counts_mp1 - 1, axis=-1)
55
+ B = np.sum(counts_m - 1, axis=-1)
56
+ return -np.log(A / B)
57
+
58
+
59
+ @univariate_feature
60
+ def complexity_svd_entropy(x, m=10, tau=1):
61
+ x_emb = np.empty((*x.shape[:-1], (x.shape[-1] - m + 1) // tau, m))
62
+ for i in np.ndindex(x.shape[:-1]):
63
+ x_emb[*i, :, :] = _create_embedding(x[i], m, tau)
64
+ s = np.linalg.svdvals(x_emb)
65
+ s /= s.sum(axis=-1, keepdims=True)
66
+ return -np.sum(s * np.log(s), axis=-1)
67
+
68
+
69
+ @univariate_feature
70
+ @nb.njit(cache=True, fastmath=True)
71
+ def complexity_lempel_ziv(x, threshold=None):
72
+ lzc = np.empty(x.shape[:-1])
73
+ for i in np.ndindex(x.shape[:-1]):
74
+ t = np.median(x[i]) if threshold is None else threshold
75
+ s = x[i] > t
76
+ n = s.shape[0]
77
+ j, k, l = 0, 1, 1
78
+ k_max = 1
79
+ lzc[i] = 1
80
+ while True:
81
+ if s[j + k - 1] == s[l + k - 1]:
82
+ k += 1
83
+ if l + k > n:
84
+ lzc[i] += 1
85
+ break
86
+ else:
87
+ k_max = np.maximum(k, k_max)
88
+ j += 1
89
+ if j == l:
90
+ lzc[i] += 1
91
+ l += k_max
92
+ if l + 1 > n:
93
+ break
94
+ j, k, k_max = 0, 1, 1
95
+ else:
96
+ k = 1
97
+ return lzc
@@ -0,0 +1,99 @@
1
+ from itertools import chain
2
+ import numpy as np
3
+ from scipy.signal import csd
4
+
5
+ from ..extractors import FeatureExtractor, BivariateFeature
6
+ from ..decorators import FeaturePredecessor, bivariate_feature
7
+
8
+
9
+ __all__ = [
10
+ "CoherenceFeatureExtractor",
11
+ "connectivity_magnitude_square_coherence",
12
+ "connectivity_imaginary_coherence",
13
+ "connectivity_lagged_coherence",
14
+ ]
15
+
16
+
17
+ class CoherenceFeatureExtractor(FeatureExtractor):
18
+ def preprocess(self, x, **kwargs):
19
+ f_min = kwargs.pop("f_min") if "f_min" in kwargs else None
20
+ f_max = kwargs.pop("f_max") if "f_max" in kwargs else None
21
+ kwargs["axis"] = -1
22
+ n = x.shape[1]
23
+ idx_x, idx_y = BivariateFeature.get_pair_iterators(n)
24
+ ix, iy = list(chain(range(n), idx_x)), list(chain(range(n), idx_y))
25
+ f, s = csd(x[:, ix], x[:, iy], **kwargs)
26
+ if f_min is not None or f_max is not None:
27
+ f_min_idx = f > f_min if f_min is not None else True
28
+ f_max_idx = f < f_max if f_max is not None else True
29
+ idx = np.logical_and(f_min_idx, f_max_idx)
30
+ f = f[idx]
31
+ s = s[..., idx]
32
+ sx, sxy = np.split(s, [n], axis=1)
33
+ sxx, syy = sx[:, idx_x].real, sx[:, idx_y].real
34
+ c = sxy / np.sqrt(sxx * syy)
35
+ return f, c
36
+
37
+
38
+ def _avg_over_bands(f, x, bands):
39
+ bands_avg = dict()
40
+ for k, v in bands.items():
41
+ assert isinstance(k, str)
42
+ assert isinstance(v, tuple)
43
+ assert len(v) == 2
44
+ assert v[0] < v[1]
45
+ mask = np.logical_and(f > v[0], f < v[1])
46
+ avg = x[..., mask].mean(axis=-1)
47
+ bands_avg[k] = avg
48
+ return bands_avg
49
+
50
+
51
+ @FeaturePredecessor(CoherenceFeatureExtractor)
52
+ @bivariate_feature
53
+ def connectivity_magnitude_square_coherence(
54
+ f,
55
+ c,
56
+ bands={
57
+ "delta": (1, 4.5),
58
+ "theta": (4.5, 8),
59
+ "alpha": (8, 12),
60
+ "beta": (12, 30),
61
+ },
62
+ ):
63
+ # https://neuroimage.usc.edu/brainstorm/Tutorials/Connectivity
64
+ coher = c.real**2 + c.imag**2
65
+ return _avg_over_bands(f, coher, bands)
66
+
67
+
68
+ @FeaturePredecessor(CoherenceFeatureExtractor)
69
+ @bivariate_feature
70
+ def connectivity_imaginary_coherence(
71
+ f,
72
+ c,
73
+ bands={
74
+ "delta": (1, 4.5),
75
+ "theta": (4.5, 8),
76
+ "alpha": (8, 12),
77
+ "beta": (12, 30),
78
+ },
79
+ ):
80
+ # https://neuroimage.usc.edu/brainstorm/Tutorials/Connectivity
81
+ coher = c.imag
82
+ return _avg_over_bands(f, coher, bands)
83
+
84
+
85
+ @FeaturePredecessor(CoherenceFeatureExtractor)
86
+ @bivariate_feature
87
+ def connectivity_lagged_coherence(
88
+ f,
89
+ c,
90
+ bands={
91
+ "delta": (1, 4.5),
92
+ "theta": (4.5, 8),
93
+ "alpha": (8, 12),
94
+ "beta": (12, 30),
95
+ },
96
+ ):
97
+ # https://neuroimage.usc.edu/brainstorm/Tutorials/Connectivity
98
+ coher = c.imag / np.sqrt(1 - c.real)
99
+ return _avg_over_bands(f, coher, bands)
@@ -0,0 +1,102 @@
1
+ import numpy as np
2
+ import numba as nb
3
+ import scipy
4
+ import scipy.linalg
5
+
6
+ from ..extractors import FitableFeature
7
+ from ..decorators import multivariate_feature
8
+
9
+
10
+ __all__ = [
11
+ "CommonSpatialPattern",
12
+ ]
13
+
14
+
15
+ @nb.njit(cache=True, fastmath=True, parallel=True)
16
+ def _update_mean_cov(count, mean, cov, x_count, x_mean, x_cov):
17
+ alpha2 = x_count / count
18
+ alpha1 = 1 - alpha2
19
+ cov[:] = alpha1 * (cov + np.outer(mean, mean))
20
+ cov[:] += alpha2 * (x_cov + np.outer(x_mean, x_mean))
21
+ mean[:] = alpha1 * mean + alpha2 * x_mean
22
+ cov[:] -= np.outer(mean, mean)
23
+
24
+
25
+ @multivariate_feature
26
+ class CommonSpatialPattern(FitableFeature):
27
+ def __init__(self):
28
+ super().__init__()
29
+
30
+ def clear(self):
31
+ self._labels = None
32
+ self._counts = np.array([0, 0])
33
+ self._means = np.array([None, None])
34
+ self._covs = np.array([None, None])
35
+ self._mean = None
36
+ self._eigvals = None
37
+ self._weights = None
38
+
39
+ def _update_labels(self, labels):
40
+ if self._labels is None:
41
+ self._labels = labels
42
+ else:
43
+ for label in labels:
44
+ if label not in self._labels:
45
+ self._labels = np.append(self._labels, label)
46
+ assert self._labels.shape[0] < 3
47
+ return self._labels
48
+
49
+ def _update_stats(self, l, x):
50
+ x_count, x_mean, x_cov = x.shape[0], x.mean(axis=0), np.cov(x.T, ddof=0)
51
+ if self._counts[l] == 0:
52
+ self._counts[l] = x_count
53
+ self._means[l] = x_mean
54
+ self._covs[l] = x_cov
55
+ else:
56
+ self._counts[l] += x_count
57
+ _update_mean_cov(
58
+ self._counts[l], self._means[l], self._covs[l], x_count, x_mean, x_cov
59
+ )
60
+
61
+ def partial_fit(self, x, y=None):
62
+ labels = self._update_labels(np.unique(y))
63
+ for i, l in enumerate(labels):
64
+ ind = (y == l).nonzero()[0]
65
+ if ind.shape[0] > 0:
66
+ xl = self.transform_input(x[ind])
67
+ self._update_stats(i, xl)
68
+
69
+ @staticmethod
70
+ def transform_input(x):
71
+ return x.swapaxes(1, 2).reshape(-1, x.shape[1])
72
+
73
+ def fit(self):
74
+ alphas = self._counts / self._counts.sum()
75
+ self._mean = np.sum(alphas * self._means)
76
+ for l in range(len(self._labels)):
77
+ self._covs[l] *= self._counts[l] / (self._counts[1] - 1)
78
+ l, w = scipy.linalg.eig(self._covs[0], self._covs[0] + self._covs[1])
79
+ l = l.real
80
+ ind = l > 0
81
+ l, w = l[ind], w[:, ind]
82
+ ord = np.abs(l - 0.5).argsort()[::-1]
83
+ self._eigvals = l[ord]
84
+ self._weights = w[:, ord]
85
+ super().fit()
86
+
87
+ def __call__(self, x, n_select=None, crit_select=None):
88
+ super().__call__()
89
+ w = self._weights
90
+ if n_select:
91
+ w = w[:, :n_select]
92
+ if crit_select:
93
+ sel = 0.5 - np.abs(self._eigvals - 0.5) < crit_select
94
+ w = w[:, sel]
95
+ if w.shape[-1] == 0:
96
+ raise RuntimeError(
97
+ "CSP weights selection criterion is too strict,"
98
+ + "all weights were filtered out."
99
+ )
100
+ proj = (self.transform_input(x) - self._mean) @ w
101
+ proj = proj.reshape(x.shape[0], x.shape[2], -1).mean(axis=1)
102
+ return {f"{i}": proj[:, i] for i in range(proj.shape[-1])}
@@ -0,0 +1,108 @@
1
+ import numpy as np
2
+ import numba as nb
3
+ from scipy import special
4
+
5
+ from ..decorators import univariate_feature
6
+ from .signal import signal_zero_crossings
7
+
8
+
9
+ __all__ = [
10
+ "dimensionality_higuchi_fractal_dim",
11
+ "dimensionality_petrosian_fractal_dim",
12
+ "dimensionality_katz_fractal_dim",
13
+ "dimensionality_hurst_exp",
14
+ "dimensionality_detrended_fluctuation_analysis",
15
+ ]
16
+
17
+
18
+ @univariate_feature
19
+ @nb.njit(cache=True, fastmath=True)
20
+ def dimensionality_higuchi_fractal_dim(x, k_max=10, eps=1e-7):
21
+ N = x.shape[-1]
22
+ hfd = np.empty(x.shape[:-1])
23
+ log_k = np.vstack((-np.log(np.arange(1, k_max + 1)), np.ones(k_max))).T
24
+ L_km = np.empty(k_max)
25
+ L_k = np.empty(k_max)
26
+ for i in np.ndindex(x.shape[:-1]):
27
+ for k in range(1, k_max + 1):
28
+ for m in range(k):
29
+ L_km[m] = np.mean(np.abs(np.diff(x[*i, m:], n=k)))
30
+ L_k[k - 1] = (N - 1) * np.sum(L_km[:k]) / (k**3)
31
+ L_k = np.maximum(L_k, eps)
32
+ hfd[i] = np.linalg.lstsq(log_k, np.log(L_k))[0][0]
33
+ return hfd
34
+
35
+
36
+ @univariate_feature
37
+ def dimensionality_petrosian_fractal_dim(x):
38
+ nd = signal_zero_crossings(np.diff(x, axis=-1))
39
+ log_n = np.log(x.shape[-1])
40
+ return log_n / (np.log(nd) + log_n)
41
+
42
+
43
+ @univariate_feature
44
+ def dimensionality_katz_fractal_dim(x):
45
+ dists = np.abs(np.diff(x, axis=-1))
46
+ L = dists.sum(axis=-1)
47
+ a = dists.mean(axis=-1)
48
+ log_n = np.log(L / a)
49
+ d = np.abs(x[..., 1:] - x[..., 0, None]).max(axis=-1)
50
+ return log_n / (np.log(d / L) + log_n)
51
+
52
+
53
+ @univariate_feature
54
+ @nb.njit(cache=True, fastmath=True)
55
+ def _hurst_exp(x, ns, a, gamma_ratios, log_n):
56
+ h = np.empty(x.shape[:-1])
57
+ rs = np.empty((ns.shape[0], x.shape[-1] // ns[0]))
58
+ log_rs = np.empty(ns.shape[0])
59
+ for i in np.ndindex(x.shape[:-1]):
60
+ t0 = 0
61
+ for j, n in enumerate(ns):
62
+ for k, t0 in enumerate(range(0, x.shape[-1], n)):
63
+ xj = x[i][t0 : t0 + n]
64
+ m = np.mean(xj)
65
+ y = xj - m
66
+ z = np.cumsum(y)
67
+ r = np.ptp(z)
68
+ s = np.sqrt(np.mean(y**2))
69
+ if s == 0.0:
70
+ rs[j, k] = np.nan
71
+ else:
72
+ rs[j, k] = r / s
73
+ log_rs[j] = np.log(np.nanmean(rs[j, : x.shape[1] // n]))
74
+ log_rs[j] -= np.log(np.sum(np.sqrt((n - a[:n]) / a[:n])) * gamma_ratios[j])
75
+ h[i] = 0.5 + np.linalg.lstsq(log_n, log_rs)[0][0]
76
+ return h
77
+
78
+
79
+ @univariate_feature
80
+ def dimensionality_hurst_exp(x):
81
+ ns = np.unique(np.power(2, np.arange(2, np.log2(x.shape[-1]) - 1)).astype(int))
82
+ idx = ns > 340
83
+ gamma_ratios = np.empty(ns.shape[0])
84
+ gamma_ratios[idx] = 1 / np.sqrt(ns[idx] / 2)
85
+ gamma_ratios[~idx] = special.gamma((ns[~idx] - 1) / 2) / special.gamma(ns[~idx] / 2)
86
+ gamma_ratios /= np.sqrt(np.pi)
87
+ log_n = np.vstack((np.log(ns), np.ones(ns.shape[0]))).T
88
+ a = np.arange(1, ns[-1], dtype=float)
89
+ return _hurst_exp(x, ns, a, gamma_ratios, log_n)
90
+
91
+
92
+ @univariate_feature
93
+ @nb.njit(cache=True, fastmath=True)
94
+ def dimensionality_detrended_fluctuation_analysis(x):
95
+ ns = np.unique(np.floor(np.power(2, np.arange(2, np.log2(x.shape[-1]) - 1))))
96
+ a = np.vstack((np.arange(ns[-1]), np.ones(int(ns[-1])))).T
97
+ log_n = np.vstack((np.log(ns), np.ones(ns.shape[0]))).T
98
+ Fn = np.empty(ns.shape[0])
99
+ alpha = np.empty(x.shape[:-1])
100
+ for i in np.ndindex(x.shape[:-1]):
101
+ X = np.cumsum(x[i] - np.mean(x[i]))
102
+ for j, n in enumerate(ns):
103
+ n = int(n)
104
+ Z = np.reshape(X[: n * (X.shape[0] // n)], (n, X.shape[0] // n))
105
+ Fni2 = np.linalg.lstsq(a[:n], Z)[1] / n
106
+ Fn[j] = np.sqrt(np.mean(Fni2))
107
+ alpha[i] = np.linalg.lstsq(log_n, np.log(Fn))[0][0]
108
+ return alpha
@@ -0,0 +1,103 @@
1
+ import numbers
2
+ import numpy as np
3
+ from scipy import stats
4
+
5
+ from ..decorators import univariate_feature
6
+
7
+
8
+ __all__ = [
9
+ "signal_mean",
10
+ "signal_variance",
11
+ "signal_skewness",
12
+ "signal_kurtosis",
13
+ "signal_std",
14
+ "signal_root_mean_square",
15
+ "signal_peak_to_peak",
16
+ "signal_quantile",
17
+ "signal_zero_crossings",
18
+ "signal_line_length",
19
+ "signal_hjorth_activity",
20
+ "signal_hjorth_mobility",
21
+ "signal_hjorth_complexity",
22
+ "signal_decorrelation_time",
23
+ ]
24
+
25
+
26
+ @univariate_feature
27
+ def signal_mean(x):
28
+ return x.mean(axis=-1)
29
+
30
+
31
+ @univariate_feature
32
+ def signal_variance(x, **kwargs):
33
+ return x.var(axis=-1, **kwargs)
34
+
35
+
36
+ @univariate_feature
37
+ def signal_std(x, **kwargs):
38
+ return x.std(axis=-1, **kwargs)
39
+
40
+
41
+ @univariate_feature
42
+ def signal_skewness(x, **kwargs):
43
+ return stats.skew(x, axis=x.ndim - 1, **kwargs)
44
+
45
+
46
+ @univariate_feature
47
+ def signal_kurtosis(x, **kwargs):
48
+ return stats.kurtosis(x, axis=x.ndim - 1, **kwargs)
49
+
50
+
51
+ @univariate_feature
52
+ def signal_root_mean_square(x):
53
+ return np.sqrt(np.power(x, 2).mean(axis=-1))
54
+
55
+
56
+ @univariate_feature
57
+ def signal_peak_to_peak(x, **kwargs):
58
+ return np.ptp(x, axis=-1, **kwargs)
59
+
60
+
61
+ @univariate_feature
62
+ def signal_quantile(x, q: numbers.Number = 0.5, **kwargs):
63
+ return np.quantile(x, q=q, axis=-1, **kwargs)
64
+
65
+
66
+ @univariate_feature
67
+ def signal_line_length(x):
68
+ return np.abs(np.diff(x, axis=-1)).mean(axis=-1)
69
+
70
+
71
+ @univariate_feature
72
+ def signal_zero_crossings(x, threshold=1e-15):
73
+ zero_ind = np.logical_and(x > -threshold, x < threshold)
74
+ zero_cross = np.diff(zero_ind, axis=-1).astype(int).sum(axis=-1)
75
+ y = x.copy()
76
+ y[zero_ind] = 0
77
+ zero_cross += np.sum(np.signbit(y[..., :-1]) != np.signbit(y[..., 1:]), axis=-1)
78
+ return zero_cross
79
+
80
+
81
+ @univariate_feature
82
+ def signal_hjorth_mobility(x):
83
+ return np.diff(x, axis=-1).std(axis=-1) / x.std(axis=-1)
84
+
85
+
86
+ @univariate_feature
87
+ def signal_hjorth_complexity(x):
88
+ return np.diff(x, 2, axis=-1).std(axis=-1) / x.std(axis=-1)
89
+
90
+
91
+ @univariate_feature
92
+ def signal_decorrelation_time(x, fs=1):
93
+ f = np.fft.fft(x - x.mean(axis=-1, keepdims=True), axis=-1)
94
+ ac = np.fft.ifft(f.real**2 + f.imag**2, axis=-1)[..., : x.shape[-1] // 2]
95
+ dct = np.empty(x.shape[:-1])
96
+ for i in np.ndindex(x.shape[:-1]):
97
+ dct[i] = np.searchsorted(ac[i] <= 0, True)
98
+ return dct / fs
99
+
100
+
101
+ # ================================= Aliases =================================
102
+
103
+ signal_hjorth_activity = signal_variance
@@ -0,0 +1,134 @@
1
+ import numpy as np
2
+ import numba as nb
3
+ from scipy.signal import welch
4
+
5
+ from ..extractors import FeatureExtractor
6
+ from ..decorators import FeaturePredecessor, univariate_feature
7
+
8
+
9
+ __all__ = [
10
+ "SpectralFeatureExtractor",
11
+ "NormalizedSpectralFeatureExtractor",
12
+ "DBSpectralFeatureExtractor",
13
+ "spectral_root_total_power",
14
+ "spectral_moment",
15
+ "spectral_entropy",
16
+ "spectral_edge",
17
+ "spectral_slope",
18
+ "spectral_bands_power",
19
+ "spectral_hjorth_activity",
20
+ "spectral_hjorth_mobility",
21
+ "spectral_hjorth_complexity",
22
+ ]
23
+
24
+
25
+ class SpectralFeatureExtractor(FeatureExtractor):
26
+ def preprocess(self, x, **kwargs):
27
+ f_min = kwargs.pop("f_min") if "f_min" in kwargs else None
28
+ f_max = kwargs.pop("f_max") if "f_max" in kwargs else None
29
+ kwargs["axis"] = -1
30
+ f, p = welch(x, **kwargs)
31
+ if f_min is not None or f_max is not None:
32
+ f_min_idx = f > f_min if f_min is not None else True
33
+ f_max_idx = f < f_max if f_max is not None else True
34
+ idx = np.logical_and(f_min_idx, f_max_idx)
35
+ f = f[idx]
36
+ p = p[..., idx]
37
+ return f, p
38
+
39
+
40
+ @FeaturePredecessor(SpectralFeatureExtractor)
41
+ class NormalizedSpectralFeatureExtractor(FeatureExtractor):
42
+ def preprocess(self, *x):
43
+ return (*x[:-1], x[-1] / x[-1].sum(axis=-1, keepdims=True))
44
+
45
+
46
+ @FeaturePredecessor(SpectralFeatureExtractor)
47
+ class DBSpectralFeatureExtractor(FeatureExtractor):
48
+ def preprocess(self, *x, eps=1e-15):
49
+ return (*x[:-1], 10 * np.log10(x[-1] + eps))
50
+
51
+
52
+ @FeaturePredecessor(SpectralFeatureExtractor)
53
+ @univariate_feature
54
+ def spectral_root_total_power(f, p):
55
+ return np.sqrt(p.sum(axis=-1))
56
+
57
+
58
+ @FeaturePredecessor(NormalizedSpectralFeatureExtractor)
59
+ @univariate_feature
60
+ def spectral_moment(f, p):
61
+ return np.sum(f * p, axis=-1)
62
+
63
+
64
+ @FeaturePredecessor(SpectralFeatureExtractor)
65
+ @univariate_feature
66
+ def spectral_hjorth_activity(f, p):
67
+ return np.sum(p, axis=-1)
68
+
69
+
70
+ @FeaturePredecessor(NormalizedSpectralFeatureExtractor)
71
+ @univariate_feature
72
+ def spectral_hjorth_mobility(f, p):
73
+ return np.sqrt(np.sum(np.power(f, 2) * p, axis=-1))
74
+
75
+
76
+ @FeaturePredecessor(NormalizedSpectralFeatureExtractor)
77
+ @univariate_feature
78
+ def spectral_hjorth_complexity(f, p):
79
+ return np.sqrt(np.sum(np.power(f, 4) * p, axis=-1))
80
+
81
+
82
+ @FeaturePredecessor(NormalizedSpectralFeatureExtractor)
83
+ @univariate_feature
84
+ def spectral_entropy(f, p):
85
+ idx = p > 0
86
+ plogp = np.zeros_like(p)
87
+ plogp[idx] = p[idx] * np.log(p[idx])
88
+ return -np.sum(plogp, axis=-1)
89
+
90
+
91
+ @FeaturePredecessor(NormalizedSpectralFeatureExtractor)
92
+ @univariate_feature
93
+ @nb.njit(cache=True, fastmath=True)
94
+ def spectral_edge(f, p, edge=0.9):
95
+ se = np.empty(p.shape[:-1])
96
+ for i in np.ndindex(p.shape[:-1]):
97
+ se[i] = f[np.searchsorted(np.cumsum(p[i]), edge)]
98
+ return se
99
+
100
+
101
+ @FeaturePredecessor(DBSpectralFeatureExtractor)
102
+ @univariate_feature
103
+ def spectral_slope(f, p):
104
+ log_f = np.vstack((np.log(f), np.ones(f.shape[0]))).T
105
+ r = np.linalg.lstsq(log_f, p.reshape(-1, p.shape[-1]).T)[0]
106
+ r = r.reshape(2, *p.shape[:-1])
107
+ return {"exp": r[0], "int": r[1]}
108
+
109
+
110
+ @FeaturePredecessor(
111
+ SpectralFeatureExtractor,
112
+ NormalizedSpectralFeatureExtractor,
113
+ DBSpectralFeatureExtractor,
114
+ )
115
+ @univariate_feature
116
+ def spectral_bands_power(
117
+ f,
118
+ p,
119
+ bands={
120
+ "delta": (1, 4.5),
121
+ "theta": (4.5, 8),
122
+ "alpha": (8, 12),
123
+ "beta": (12, 30),
124
+ },
125
+ ):
126
+ bands_power = dict()
127
+ for k, v in bands.items():
128
+ assert isinstance(k, str)
129
+ assert isinstance(v, tuple)
130
+ assert len(v) == 2
131
+ mask = np.logical_and(f > v[0], f < v[1])
132
+ power = p[..., mask].sum(axis=-1)
133
+ bands_power[k] = power
134
+ return bands_power