eegdash 0.0.8__py3-none-any.whl → 0.0.9__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of eegdash might be problematic. Click here for more details.

@@ -0,0 +1,43 @@
1
+ from typing import List, Type
2
+ from collections.abc import Callable
3
+
4
+ from .extractors import (
5
+ FeatureExtractor,
6
+ UnivariateFeature,
7
+ BivariateFeature,
8
+ DirectedBivariateFeature,
9
+ MultivariateFeature,
10
+ )
11
+ from .extractors import _get_underlying_func
12
+
13
+
14
+ class FeaturePredecessor:
15
+ def __init__(self, *parent_extractor_type: List[Type]):
16
+ parent_cls = parent_extractor_type
17
+ if not parent_cls:
18
+ parent_cls = [FeatureExtractor]
19
+ for p_cls in parent_cls:
20
+ assert issubclass(p_cls, FeatureExtractor)
21
+ self.parent_extractor_type = parent_cls
22
+
23
+ def __call__(self, func: Callable):
24
+ f = _get_underlying_func(func)
25
+ f.parent_extractor_type = self.parent_extractor_type
26
+ return func
27
+
28
+
29
+ class FeatureKind:
30
+ def __init__(self, feature_kind: MultivariateFeature):
31
+ self.feature_kind = feature_kind
32
+
33
+ def __call__(self, func):
34
+ f = _get_underlying_func(func)
35
+ f.feature_kind = self.feature_kind
36
+ return func
37
+
38
+
39
+ # Syntax sugar
40
+ univariate_feature = FeatureKind(UnivariateFeature())
41
+ bivariate_feature = FeatureKind(BivariateFeature())
42
+ directed_bivariate_feature = FeatureKind(DirectedBivariateFeature())
43
+ multivariate_feature = FeatureKind(MultivariateFeature())
@@ -0,0 +1,209 @@
1
+ from abc import ABC, abstractmethod
2
+ from typing import Dict
3
+ from collections.abc import Callable
4
+ from functools import partial
5
+ import numpy as np
6
+ from numba.core.dispatcher import Dispatcher
7
+
8
+
9
+ def _get_underlying_func(func):
10
+ f = func
11
+ if isinstance(f, partial):
12
+ f = f.func
13
+ if isinstance(f, Dispatcher):
14
+ f = f.py_func
15
+ return f
16
+
17
+
18
+ class FitableFeature(ABC):
19
+ def __init__(self):
20
+ self._is_fitted = False
21
+ self.clear()
22
+
23
+ @abstractmethod
24
+ def clear(self):
25
+ pass
26
+
27
+ @abstractmethod
28
+ def partial_fit(self, *x, y=None):
29
+ pass
30
+
31
+ def fit(self):
32
+ self._is_fitted = True
33
+
34
+ def __call__(self, *args, **kwargs):
35
+ if not self._is_fitted:
36
+ raise RuntimeError(
37
+ f"{self.__class__} cannot be called, it has to be fitted first."
38
+ )
39
+
40
+
41
+ class FeatureExtractor(FitableFeature):
42
+ def __init__(
43
+ self, feature_extractors: Dict[str, Callable], **preprocess_kwargs: Dict
44
+ ):
45
+ self.feature_extractors_dict = self._validate_execution_tree(feature_extractors)
46
+ self._is_fitable = self._check_is_fitable(feature_extractors)
47
+ super().__init__()
48
+
49
+ # bypassing FeaturePredecessor to avoid circular import
50
+ if not hasattr(self, "parent_extractor_type"):
51
+ self.parent_extractor_type = [FeatureExtractor]
52
+
53
+ self.preprocess_kwargs = preprocess_kwargs
54
+ if self.preprocess_kwargs is None:
55
+ self.preprocess_kwargs = dict()
56
+ self.features_kwargs = {
57
+ "preprocess_kwargs": preprocess_kwargs,
58
+ }
59
+ for fn, fe in feature_extractors.items():
60
+ if isinstance(fe, FeatureExtractor):
61
+ self.features_kwargs[fn] = fe.features_kwargs
62
+ if isinstance(fe, partial):
63
+ self.features_kwargs[fn] = fe.keywords
64
+
65
+ def _validate_execution_tree(self, feature_extractors):
66
+ for fname, f in feature_extractors.items():
67
+ f = _get_underlying_func(f)
68
+ pe_type = getattr(f, "parent_extractor_type", [FeatureExtractor])
69
+ assert type(self) in pe_type
70
+ return feature_extractors
71
+
72
+ def _check_is_fitable(self, feature_extractors):
73
+ is_fitable = False
74
+ for fname, f in feature_extractors.items():
75
+ if isinstance(f, FeatureExtractor):
76
+ is_fitable = f._is_fitable
77
+ else:
78
+ f = _get_underlying_func(f)
79
+ if isinstance(f, FitableFeature):
80
+ is_fitable = True
81
+ if is_fitable:
82
+ break
83
+ return is_fitable
84
+
85
+ def preprocess(self, *x, **kwargs):
86
+ return (*x,)
87
+
88
+ def feature_channel_names(self, ch_names):
89
+ return [""]
90
+
91
+ def __call__(self, *x, _batch_size=None, _ch_names=None):
92
+ assert _batch_size is not None
93
+ assert _ch_names is not None
94
+ if self._is_fitable:
95
+ super().__call__()
96
+ results_dict = dict()
97
+ z = self.preprocess(*x, **self.preprocess_kwargs)
98
+ for fname, f in self.feature_extractors_dict.items():
99
+ if isinstance(f, FeatureExtractor):
100
+ r = f(*z, _batch_size=_batch_size, _ch_names=_ch_names)
101
+ else:
102
+ r = f(*z)
103
+ f = _get_underlying_func(f)
104
+ if hasattr(f, "feature_kind"):
105
+ r = f.feature_kind(r, _ch_names=_ch_names)
106
+ if not isinstance(fname, str) or not fname:
107
+ if isinstance(f, FeatureExtractor) or not hasattr(f, "__name__"):
108
+ fname = ""
109
+ else:
110
+ fname = f.__name__
111
+ if isinstance(r, dict):
112
+ if fname:
113
+ fname += "_"
114
+ for k, v in r.items():
115
+ self._add_feature_to_dict(results_dict, fname + k, v, _batch_size)
116
+ else:
117
+ self._add_feature_to_dict(results_dict, fname, r, _batch_size)
118
+ return results_dict
119
+
120
+ def _add_feature_to_dict(self, results_dict, name, value, batch_size):
121
+ if not isinstance(value, np.ndarray):
122
+ results_dict[name] = value
123
+ else:
124
+ assert value.shape[0] == batch_size
125
+ results_dict[name] = value
126
+
127
+ def clear(self):
128
+ if not self._is_fitable:
129
+ return
130
+ for fname, f in self.feature_extractors_dict.items():
131
+ f = _get_underlying_func(f)
132
+ if isinstance(f, FitableFeature):
133
+ f.clear()
134
+
135
+ def partial_fit(self, *x, y=None):
136
+ if not self._is_fitable:
137
+ return
138
+ z = self.preprocess(*x, **self.preprocess_kwargs)
139
+ for fname, f in self.feature_extractors_dict.items():
140
+ f = _get_underlying_func(f)
141
+ if isinstance(f, FitableFeature):
142
+ f.partial_fit(*z, y=y)
143
+
144
+ def fit(self):
145
+ if not self._is_fitable:
146
+ return
147
+ for fname, f in self.feature_extractors_dict.items():
148
+ f = _get_underlying_func(f)
149
+ if isinstance(f, FitableFeature):
150
+ f.fit()
151
+ super().fit()
152
+
153
+
154
+ class MultivariateFeature:
155
+ def __call__(self, x, _ch_names=None):
156
+ assert _ch_names is not None
157
+ f_channels = self.feature_channel_names(_ch_names)
158
+ if isinstance(x, dict):
159
+ r = dict()
160
+ for k, v in x.items():
161
+ r.update(self._array_to_dict(v, f_channels, k))
162
+ return r
163
+ return self._array_to_dict(x, f_channels)
164
+
165
+ @staticmethod
166
+ def _array_to_dict(x, f_channels, name=""):
167
+ assert isinstance(x, np.ndarray)
168
+ if len(f_channels) == 0:
169
+ assert x.ndim == 1
170
+ if name:
171
+ return {name: x}
172
+ return x
173
+ assert x.shape[1] == len(f_channels)
174
+ x = x.swapaxes(0, 1)
175
+ names = [f"{name}_{ch}" for ch in f_channels] if name else f_channels
176
+ return dict(zip(names, x))
177
+
178
+ def feature_channel_names(self, ch_names):
179
+ return []
180
+
181
+
182
+ class UnivariateFeature(MultivariateFeature):
183
+ def feature_channel_names(self, ch_names):
184
+ return ch_names
185
+
186
+
187
+ class BivariateFeature(MultivariateFeature):
188
+ def __init__(self, *args, channel_pair_format="{}<>{}"):
189
+ super().__init__(*args)
190
+ self.channel_pair_format = channel_pair_format
191
+
192
+ @staticmethod
193
+ def get_pair_iterators(n):
194
+ return np.triu_indices(n, 1)
195
+
196
+ def feature_channel_names(self, ch_names):
197
+ return [
198
+ self.channel_pair_format.format(ch_names[i], ch_names[j])
199
+ for i, j in zip(*self.get_pair_iterators(len(ch_names)))
200
+ ]
201
+
202
+
203
+ class DirectedBivariateFeature(BivariateFeature):
204
+ @staticmethod
205
+ def get_pair_iterators(n):
206
+ return [
207
+ np.append(a, b)
208
+ for a, b in zip(np.tril_indices(n, -1), np.triu_indices(n, 1))
209
+ ]
@@ -0,0 +1,6 @@
1
+ from .signal import *
2
+ from .spectral import *
3
+ from .complexity import *
4
+ from .dimensionality import *
5
+ from .connectivity import *
6
+ from .csp import *
@@ -0,0 +1,97 @@
1
+ import numpy as np
2
+ import numba as nb
3
+ from sklearn.neighbors import KDTree
4
+
5
+ from ..extractors import FeatureExtractor
6
+ from ..decorators import FeaturePredecessor, univariate_feature
7
+
8
+
9
+ __all__ = [
10
+ "EntropyFeatureExtractor",
11
+ "complexity_approx_entropy",
12
+ "complexity_sample_entropy",
13
+ "complexity_svd_entropy",
14
+ "complexity_lempel_ziv",
15
+ ]
16
+
17
+
18
+ @nb.njit(cache=True, fastmath=True)
19
+ def _create_embedding(x, dim, lag):
20
+ y = np.empty(((x.shape[-1] - dim + 1) // lag, dim))
21
+ for i in range(0, x.shape[-1] - dim + 1, lag):
22
+ y[i] = x[i : i + dim]
23
+ return y
24
+
25
+
26
+ def _channel_app_samp_entropy_counts(x, m, r, l):
27
+ x_emb = _create_embedding(x, m, l)
28
+ kdtree = KDTree(x_emb, metric="chebyshev")
29
+ return kdtree.query_radius(x_emb, r, count_only=True)
30
+
31
+
32
+ class EntropyFeatureExtractor(FeatureExtractor):
33
+ def preprocess(self, x, m=2, r=0.2, l=1):
34
+ rr = r * x.std(axis=-1)
35
+ counts_m = np.empty((*x.shape[:-1], (x.shape[-1] - m + 1) // l))
36
+ counts_mp1 = np.empty((*x.shape[:-1], (x.shape[-1] - m) // l))
37
+ for i in np.ndindex(x.shape[:-1]):
38
+ counts_m[*i, :] = _channel_app_samp_entropy_counts(x[i], m, rr[i], l)
39
+ counts_mp1[*i, :] = _channel_app_samp_entropy_counts(x[i], m + 1, rr[i], l)
40
+ return counts_m, counts_mp1
41
+
42
+
43
+ @FeaturePredecessor(EntropyFeatureExtractor)
44
+ @univariate_feature
45
+ def complexity_approx_entropy(counts_m, counts_mp1):
46
+ phi_m = np.log(counts_m / counts_m.shape[-1]).mean(axis=-1)
47
+ phi_mp1 = np.log(counts_mp1 / counts_mp1.shape[-1]).mean(axis=-1)
48
+ return phi_m - phi_mp1
49
+
50
+
51
+ @FeaturePredecessor(EntropyFeatureExtractor)
52
+ @univariate_feature
53
+ def complexity_sample_entropy(counts_m, counts_mp1):
54
+ A = np.sum(counts_mp1 - 1, axis=-1)
55
+ B = np.sum(counts_m - 1, axis=-1)
56
+ return -np.log(A / B)
57
+
58
+
59
+ @univariate_feature
60
+ def complexity_svd_entropy(x, m=10, tau=1):
61
+ x_emb = np.empty((*x.shape[:-1], (x.shape[-1] - m + 1) // tau, m))
62
+ for i in np.ndindex(x.shape[:-1]):
63
+ x_emb[*i, :, :] = _create_embedding(x[i], m, tau)
64
+ s = np.linalg.svdvals(x_emb)
65
+ s /= s.sum(axis=-1, keepdims=True)
66
+ return -np.sum(s * np.log(s), axis=-1)
67
+
68
+
69
+ @univariate_feature
70
+ @nb.njit(cache=True, fastmath=True)
71
+ def complexity_lempel_ziv(x, threshold=None):
72
+ lzc = np.empty(x.shape[:-1])
73
+ for i in np.ndindex(x.shape[:-1]):
74
+ t = np.median(x[i]) if threshold is None else threshold
75
+ s = x[i] > t
76
+ n = s.shape[0]
77
+ j, k, l = 0, 1, 1
78
+ k_max = 1
79
+ lzc[i] = 1
80
+ while True:
81
+ if s[j + k - 1] == s[l + k - 1]:
82
+ k += 1
83
+ if l + k > n:
84
+ lzc[i] += 1
85
+ break
86
+ else:
87
+ k_max = np.maximum(k, k_max)
88
+ j += 1
89
+ if j == l:
90
+ lzc[i] += 1
91
+ l += k_max
92
+ if l + 1 > n:
93
+ break
94
+ j, k, k_max = 0, 1, 1
95
+ else:
96
+ k = 1
97
+ return lzc
@@ -0,0 +1,99 @@
1
+ from itertools import chain
2
+ import numpy as np
3
+ from scipy.signal import csd
4
+
5
+ from ..extractors import FeatureExtractor, BivariateFeature
6
+ from ..decorators import FeaturePredecessor, bivariate_feature
7
+
8
+
9
+ __all__ = [
10
+ "CoherenceFeatureExtractor",
11
+ "connectivity_magnitude_square_coherence",
12
+ "connectivity_imaginary_coherence",
13
+ "connectivity_lagged_coherence",
14
+ ]
15
+
16
+
17
+ class CoherenceFeatureExtractor(FeatureExtractor):
18
+ def preprocess(self, x, **kwargs):
19
+ f_min = kwargs.pop("f_min") if "f_min" in kwargs else None
20
+ f_max = kwargs.pop("f_max") if "f_max" in kwargs else None
21
+ kwargs["axis"] = -1
22
+ n = x.shape[1]
23
+ idx_x, idx_y = BivariateFeature.get_pair_iterators(n)
24
+ ix, iy = list(chain(range(n), idx_x)), list(chain(range(n), idx_y))
25
+ f, s = csd(x[:, ix], x[:, iy], **kwargs)
26
+ if f_min is not None or f_max is not None:
27
+ f_min_idx = f > f_min if f_min is not None else True
28
+ f_max_idx = f < f_max if f_max is not None else True
29
+ idx = np.logical_and(f_min_idx, f_max_idx)
30
+ f = f[idx]
31
+ s = s[..., idx]
32
+ sx, sxy = np.split(s, [n], axis=1)
33
+ sxx, syy = sx[:, idx_x].real, sx[:, idx_y].real
34
+ c = sxy / np.sqrt(sxx * syy)
35
+ return f, c
36
+
37
+
38
+ def _avg_over_bands(f, x, bands):
39
+ bands_avg = dict()
40
+ for k, v in bands.items():
41
+ assert isinstance(k, str)
42
+ assert isinstance(v, tuple)
43
+ assert len(v) == 2
44
+ assert v[0] < v[1]
45
+ mask = np.logical_and(f > v[0], f < v[1])
46
+ avg = x[..., mask].mean(axis=-1)
47
+ bands_avg[k] = avg
48
+ return bands_avg
49
+
50
+
51
+ @FeaturePredecessor(CoherenceFeatureExtractor)
52
+ @bivariate_feature
53
+ def connectivity_magnitude_square_coherence(
54
+ f,
55
+ c,
56
+ bands={
57
+ "delta": (1, 4.5),
58
+ "theta": (4.5, 8),
59
+ "alpha": (8, 12),
60
+ "beta": (12, 30),
61
+ },
62
+ ):
63
+ # https://neuroimage.usc.edu/brainstorm/Tutorials/Connectivity
64
+ coher = c.real**2 + c.imag**2
65
+ return _avg_over_bands(f, coher, bands)
66
+
67
+
68
+ @FeaturePredecessor(CoherenceFeatureExtractor)
69
+ @bivariate_feature
70
+ def connectivity_imaginary_coherence(
71
+ f,
72
+ c,
73
+ bands={
74
+ "delta": (1, 4.5),
75
+ "theta": (4.5, 8),
76
+ "alpha": (8, 12),
77
+ "beta": (12, 30),
78
+ },
79
+ ):
80
+ # https://neuroimage.usc.edu/brainstorm/Tutorials/Connectivity
81
+ coher = c.imag
82
+ return _avg_over_bands(f, coher, bands)
83
+
84
+
85
+ @FeaturePredecessor(CoherenceFeatureExtractor)
86
+ @bivariate_feature
87
+ def connectivity_lagged_coherence(
88
+ f,
89
+ c,
90
+ bands={
91
+ "delta": (1, 4.5),
92
+ "theta": (4.5, 8),
93
+ "alpha": (8, 12),
94
+ "beta": (12, 30),
95
+ },
96
+ ):
97
+ # https://neuroimage.usc.edu/brainstorm/Tutorials/Connectivity
98
+ coher = c.imag / np.sqrt(1 - c.real)
99
+ return _avg_over_bands(f, coher, bands)
@@ -0,0 +1,102 @@
1
+ import numpy as np
2
+ import numba as nb
3
+ import scipy
4
+ import scipy.linalg
5
+
6
+ from ..extractors import FitableFeature
7
+ from ..decorators import multivariate_feature
8
+
9
+
10
+ __all__ = [
11
+ "CommonSpatialPattern",
12
+ ]
13
+
14
+
15
+ @nb.njit(cache=True, fastmath=True, parallel=True)
16
+ def _update_mean_cov(count, mean, cov, x_count, x_mean, x_cov):
17
+ alpha2 = x_count / count
18
+ alpha1 = 1 - alpha2
19
+ cov[:] = alpha1 * (cov + np.outer(mean, mean))
20
+ cov[:] += alpha2 * (x_cov + np.outer(x_mean, x_mean))
21
+ mean[:] = alpha1 * mean + alpha2 * x_mean
22
+ cov[:] -= np.outer(mean, mean)
23
+
24
+
25
+ @multivariate_feature
26
+ class CommonSpatialPattern(FitableFeature):
27
+ def __init__(self):
28
+ super().__init__()
29
+
30
+ def clear(self):
31
+ self._labels = None
32
+ self._counts = np.array([0, 0])
33
+ self._means = np.array([None, None])
34
+ self._covs = np.array([None, None])
35
+ self._mean = None
36
+ self._eigvals = None
37
+ self._weights = None
38
+
39
+ def _update_labels(self, labels):
40
+ if self._labels is None:
41
+ self._labels = labels
42
+ else:
43
+ for label in labels:
44
+ if label not in self._labels:
45
+ self._labels = np.append(self._labels, label)
46
+ assert self._labels.shape[0] < 3
47
+ return self._labels
48
+
49
+ def _update_stats(self, l, x):
50
+ x_count, x_mean, x_cov = x.shape[0], x.mean(axis=0), np.cov(x.T, ddof=0)
51
+ if self._counts[l] == 0:
52
+ self._counts[l] = x_count
53
+ self._means[l] = x_mean
54
+ self._covs[l] = x_cov
55
+ else:
56
+ self._counts[l] += x_count
57
+ _update_mean_cov(
58
+ self._counts[l], self._means[l], self._covs[l], x_count, x_mean, x_cov
59
+ )
60
+
61
+ def partial_fit(self, x, y=None):
62
+ labels = self._update_labels(np.unique(y))
63
+ for i, l in enumerate(labels):
64
+ ind = (y == l).nonzero()[0]
65
+ if ind.shape[0] > 0:
66
+ xl = self.transform_input(x[ind])
67
+ self._update_stats(i, xl)
68
+
69
+ @staticmethod
70
+ def transform_input(x):
71
+ return x.swapaxes(1, 2).reshape(-1, x.shape[1])
72
+
73
+ def fit(self):
74
+ alphas = self._counts / self._counts.sum()
75
+ self._mean = np.sum(alphas * self._means)
76
+ for l in range(len(self._labels)):
77
+ self._covs[l] *= self._counts[l] / (self._counts[1] - 1)
78
+ l, w = scipy.linalg.eig(self._covs[0], self._covs[0] + self._covs[1])
79
+ l = l.real
80
+ ind = l > 0
81
+ l, w = l[ind], w[:, ind]
82
+ ord = np.abs(l - 0.5).argsort()[::-1]
83
+ self._eigvals = l[ord]
84
+ self._weights = w[:, ord]
85
+ super().fit()
86
+
87
+ def __call__(self, x, n_select=None, crit_select=None):
88
+ super().__call__()
89
+ w = self._weights
90
+ if n_select:
91
+ w = w[:, :n_select]
92
+ if crit_select:
93
+ sel = 0.5 - np.abs(self._eigvals - 0.5) < crit_select
94
+ w = w[:, sel]
95
+ if w.shape[-1] == 0:
96
+ raise RuntimeError(
97
+ "CSP weights selection criterion is too strict,"
98
+ + "all weights were filtered out."
99
+ )
100
+ proj = (self.transform_input(x) - self._mean) @ w
101
+ proj = proj.reshape(x.shape[0], x.shape[2], -1).mean(axis=1)
102
+ return {f"{i}": proj[:, i] for i in range(proj.shape[-1])}
@@ -0,0 +1,108 @@
1
+ import numpy as np
2
+ import numba as nb
3
+ from scipy import special
4
+
5
+ from ..decorators import univariate_feature
6
+ from .signal import signal_zero_crossings
7
+
8
+
9
+ __all__ = [
10
+ "dimensionality_higuchi_fractal_dim",
11
+ "dimensionality_petrosian_fractal_dim",
12
+ "dimensionality_katz_fractal_dim",
13
+ "dimensionality_hurst_exp",
14
+ "dimensionality_detrended_fluctuation_analysis",
15
+ ]
16
+
17
+
18
+ @univariate_feature
19
+ @nb.njit(cache=True, fastmath=True)
20
+ def dimensionality_higuchi_fractal_dim(x, k_max=10, eps=1e-7):
21
+ N = x.shape[-1]
22
+ hfd = np.empty(x.shape[:-1])
23
+ log_k = np.vstack((-np.log(np.arange(1, k_max + 1)), np.ones(k_max))).T
24
+ L_km = np.empty(k_max)
25
+ L_k = np.empty(k_max)
26
+ for i in np.ndindex(x.shape[:-1]):
27
+ for k in range(1, k_max + 1):
28
+ for m in range(k):
29
+ L_km[m] = np.mean(np.abs(np.diff(x[*i, m:], n=k)))
30
+ L_k[k - 1] = (N - 1) * np.sum(L_km[:k]) / (k**3)
31
+ L_k = np.maximum(L_k, eps)
32
+ hfd[i] = np.linalg.lstsq(log_k, np.log(L_k))[0][0]
33
+ return hfd
34
+
35
+
36
+ @univariate_feature
37
+ def dimensionality_petrosian_fractal_dim(x):
38
+ nd = signal_zero_crossings(np.diff(x, axis=-1))
39
+ log_n = np.log(x.shape[-1])
40
+ return log_n / (np.log(nd) + log_n)
41
+
42
+
43
+ @univariate_feature
44
+ def dimensionality_katz_fractal_dim(x):
45
+ dists = np.abs(np.diff(x, axis=-1))
46
+ L = dists.sum(axis=-1)
47
+ a = dists.mean(axis=-1)
48
+ log_n = np.log(L / a)
49
+ d = np.abs(x[..., 1:] - x[..., 0, None]).max(axis=-1)
50
+ return log_n / (np.log(d / L) + log_n)
51
+
52
+
53
+ @univariate_feature
54
+ @nb.njit(cache=True, fastmath=True)
55
+ def _hurst_exp(x, ns, a, gamma_ratios, log_n):
56
+ h = np.empty(x.shape[:-1])
57
+ rs = np.empty((ns.shape[0], x.shape[-1] // ns[0]))
58
+ log_rs = np.empty(ns.shape[0])
59
+ for i in np.ndindex(x.shape[:-1]):
60
+ t0 = 0
61
+ for j, n in enumerate(ns):
62
+ for k, t0 in enumerate(range(0, x.shape[-1], n)):
63
+ xj = x[i][t0 : t0 + n]
64
+ m = np.mean(xj)
65
+ y = xj - m
66
+ z = np.cumsum(y)
67
+ r = np.ptp(z)
68
+ s = np.sqrt(np.mean(y**2))
69
+ if s == 0.0:
70
+ rs[j, k] = np.nan
71
+ else:
72
+ rs[j, k] = r / s
73
+ log_rs[j] = np.log(np.nanmean(rs[j, : x.shape[1] // n]))
74
+ log_rs[j] -= np.log(np.sum(np.sqrt((n - a[:n]) / a[:n])) * gamma_ratios[j])
75
+ h[i] = 0.5 + np.linalg.lstsq(log_n, log_rs)[0][0]
76
+ return h
77
+
78
+
79
+ @univariate_feature
80
+ def dimensionality_hurst_exp(x):
81
+ ns = np.unique(np.power(2, np.arange(2, np.log2(x.shape[-1]) - 1)).astype(int))
82
+ idx = ns > 340
83
+ gamma_ratios = np.empty(ns.shape[0])
84
+ gamma_ratios[idx] = 1 / np.sqrt(ns[idx] / 2)
85
+ gamma_ratios[~idx] = special.gamma((ns[~idx] - 1) / 2) / special.gamma(ns[~idx] / 2)
86
+ gamma_ratios /= np.sqrt(np.pi)
87
+ log_n = np.vstack((np.log(ns), np.ones(ns.shape[0]))).T
88
+ a = np.arange(1, ns[-1], dtype=float)
89
+ return _hurst_exp(x, ns, a, gamma_ratios, log_n)
90
+
91
+
92
+ @univariate_feature
93
+ @nb.njit(cache=True, fastmath=True)
94
+ def dimensionality_detrended_fluctuation_analysis(x):
95
+ ns = np.unique(np.floor(np.power(2, np.arange(2, np.log2(x.shape[-1]) - 1))))
96
+ a = np.vstack((np.arange(ns[-1]), np.ones(int(ns[-1])))).T
97
+ log_n = np.vstack((np.log(ns), np.ones(ns.shape[0]))).T
98
+ Fn = np.empty(ns.shape[0])
99
+ alpha = np.empty(x.shape[:-1])
100
+ for i in np.ndindex(x.shape[:-1]):
101
+ X = np.cumsum(x[i] - np.mean(x[i]))
102
+ for j, n in enumerate(ns):
103
+ n = int(n)
104
+ Z = np.reshape(X[: n * (X.shape[0] // n)], (n, X.shape[0] // n))
105
+ Fni2 = np.linalg.lstsq(a[:n], Z)[1] / n
106
+ Fn[j] = np.sqrt(np.mean(Fni2))
107
+ alpha[i] = np.linalg.lstsq(log_n, np.log(Fn))[0][0]
108
+ return alpha