ezmsg-sigproc 2.2.0__py3-none-any.whl → 2.4.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- ezmsg/sigproc/__version__.py +16 -3
- ezmsg/sigproc/aggregate.py +69 -0
- ezmsg/sigproc/denormalize.py +86 -0
- ezmsg/sigproc/fbcca.py +332 -0
- ezmsg/sigproc/filter.py +16 -0
- ezmsg/sigproc/filterbankdesign.py +136 -0
- ezmsg/sigproc/firfilter.py +119 -0
- ezmsg/sigproc/kaiser.py +110 -0
- ezmsg/sigproc/resample.py +186 -185
- ezmsg/sigproc/sampler.py +71 -83
- ezmsg/sigproc/util/axisarray_buffer.py +379 -0
- ezmsg/sigproc/util/buffer.py +470 -0
- ezmsg/sigproc/window.py +12 -10
- {ezmsg_sigproc-2.2.0.dist-info → ezmsg_sigproc-2.4.0.dist-info}/METADATA +1 -1
- {ezmsg_sigproc-2.2.0.dist-info → ezmsg_sigproc-2.4.0.dist-info}/RECORD +17 -10
- {ezmsg_sigproc-2.2.0.dist-info → ezmsg_sigproc-2.4.0.dist-info}/WHEEL +1 -1
- {ezmsg_sigproc-2.2.0.dist-info → ezmsg_sigproc-2.4.0.dist-info}/licenses/LICENSE.txt +0 -0
ezmsg/sigproc/__version__.py
CHANGED
|
@@ -1,7 +1,14 @@
|
|
|
1
1
|
# file generated by setuptools-scm
|
|
2
2
|
# don't change, don't track in version control
|
|
3
3
|
|
|
4
|
-
__all__ = [
|
|
4
|
+
__all__ = [
|
|
5
|
+
"__version__",
|
|
6
|
+
"__version_tuple__",
|
|
7
|
+
"version",
|
|
8
|
+
"version_tuple",
|
|
9
|
+
"__commit_id__",
|
|
10
|
+
"commit_id",
|
|
11
|
+
]
|
|
5
12
|
|
|
6
13
|
TYPE_CHECKING = False
|
|
7
14
|
if TYPE_CHECKING:
|
|
@@ -9,13 +16,19 @@ if TYPE_CHECKING:
|
|
|
9
16
|
from typing import Union
|
|
10
17
|
|
|
11
18
|
VERSION_TUPLE = Tuple[Union[int, str], ...]
|
|
19
|
+
COMMIT_ID = Union[str, None]
|
|
12
20
|
else:
|
|
13
21
|
VERSION_TUPLE = object
|
|
22
|
+
COMMIT_ID = object
|
|
14
23
|
|
|
15
24
|
version: str
|
|
16
25
|
__version__: str
|
|
17
26
|
__version_tuple__: VERSION_TUPLE
|
|
18
27
|
version_tuple: VERSION_TUPLE
|
|
28
|
+
commit_id: COMMIT_ID
|
|
29
|
+
__commit_id__: COMMIT_ID
|
|
19
30
|
|
|
20
|
-
__version__ = version = '2.
|
|
21
|
-
__version_tuple__ = version_tuple = (2,
|
|
31
|
+
__version__ = version = '2.4.0'
|
|
32
|
+
__version_tuple__ = version_tuple = (2, 4, 0)
|
|
33
|
+
|
|
34
|
+
__commit_id__ = commit_id = None
|
ezmsg/sigproc/aggregate.py
CHANGED
|
@@ -1,3 +1,4 @@
|
|
|
1
|
+
from array_api_compat import get_namespace
|
|
1
2
|
import typing
|
|
2
3
|
|
|
3
4
|
import numpy as np
|
|
@@ -12,6 +13,7 @@ from ezmsg.util.messages.axisarray import (
|
|
|
12
13
|
|
|
13
14
|
from .spectral import OptionsEnum
|
|
14
15
|
from .base import (
|
|
16
|
+
BaseTransformer,
|
|
15
17
|
BaseStatefulTransformer,
|
|
16
18
|
BaseTransformerUnit,
|
|
17
19
|
processor_state,
|
|
@@ -213,3 +215,70 @@ def ranged_aggregate(
|
|
|
213
215
|
return RangedAggregateTransformer(
|
|
214
216
|
RangedAggregateSettings(axis=axis, bands=bands, operation=operation)
|
|
215
217
|
)
|
|
218
|
+
|
|
219
|
+
|
|
220
|
+
class AggregateSettings(ez.Settings):
|
|
221
|
+
"""Settings for :obj:`Aggregate`."""
|
|
222
|
+
|
|
223
|
+
axis: str
|
|
224
|
+
"""The name of the axis to aggregate over. This axis will be removed from the output."""
|
|
225
|
+
|
|
226
|
+
operation: AggregationFunction = AggregationFunction.MEAN
|
|
227
|
+
""":obj:`AggregationFunction` to apply."""
|
|
228
|
+
|
|
229
|
+
|
|
230
|
+
class AggregateTransformer(BaseTransformer[AggregateSettings, AxisArray, AxisArray]):
|
|
231
|
+
"""
|
|
232
|
+
Transformer that aggregates an entire axis using a specified operation.
|
|
233
|
+
|
|
234
|
+
Unlike :obj:`RangedAggregateTransformer` which aggregates over specific ranges/bands
|
|
235
|
+
and preserves the axis (with one value per band), this transformer aggregates the
|
|
236
|
+
entire axis and removes it from the output, reducing dimensionality by one.
|
|
237
|
+
"""
|
|
238
|
+
|
|
239
|
+
def _process(self, message: AxisArray) -> AxisArray:
|
|
240
|
+
xp = get_namespace(message.data)
|
|
241
|
+
axis_idx = message.get_axis_idx(self.settings.axis)
|
|
242
|
+
op = self.settings.operation
|
|
243
|
+
|
|
244
|
+
if op == AggregationFunction.NONE:
|
|
245
|
+
raise ValueError(
|
|
246
|
+
"AggregationFunction.NONE is not supported for full-axis aggregation"
|
|
247
|
+
)
|
|
248
|
+
|
|
249
|
+
if op == AggregationFunction.TRAPEZOID:
|
|
250
|
+
# Trapezoid integration requires x-coordinates
|
|
251
|
+
target_axis = message.get_axis(self.settings.axis)
|
|
252
|
+
if hasattr(target_axis, "data"):
|
|
253
|
+
x = target_axis.data
|
|
254
|
+
else:
|
|
255
|
+
x = target_axis.value(np.arange(message.data.shape[axis_idx]))
|
|
256
|
+
agg_data = np.trapezoid(np.asarray(message.data), x=x, axis=axis_idx)
|
|
257
|
+
else:
|
|
258
|
+
# Try array-API compatible function first, fall back to numpy
|
|
259
|
+
func_name = op.value
|
|
260
|
+
if hasattr(xp, func_name):
|
|
261
|
+
agg_data = getattr(xp, func_name)(message.data, axis=axis_idx)
|
|
262
|
+
else:
|
|
263
|
+
agg_data = AGGREGATORS[op](message.data, axis=axis_idx)
|
|
264
|
+
|
|
265
|
+
new_dims = list(message.dims)
|
|
266
|
+
new_dims.pop(axis_idx)
|
|
267
|
+
|
|
268
|
+
new_axes = dict(message.axes)
|
|
269
|
+
new_axes.pop(self.settings.axis, None)
|
|
270
|
+
|
|
271
|
+
return replace(
|
|
272
|
+
message,
|
|
273
|
+
data=agg_data,
|
|
274
|
+
dims=new_dims,
|
|
275
|
+
axes=new_axes,
|
|
276
|
+
)
|
|
277
|
+
|
|
278
|
+
|
|
279
|
+
class AggregateUnit(
|
|
280
|
+
BaseTransformerUnit[AggregateSettings, AxisArray, AxisArray, AggregateTransformer]
|
|
281
|
+
):
|
|
282
|
+
"""Unit that aggregates an entire axis using a specified operation."""
|
|
283
|
+
|
|
284
|
+
SETTINGS = AggregateSettings
|
|
@@ -0,0 +1,86 @@
|
|
|
1
|
+
import ezmsg.core as ez
|
|
2
|
+
import numpy as np
|
|
3
|
+
import numpy.typing as npt
|
|
4
|
+
from ezmsg.sigproc.base import (
|
|
5
|
+
BaseTransformerUnit,
|
|
6
|
+
BaseStatefulTransformer,
|
|
7
|
+
processor_state,
|
|
8
|
+
)
|
|
9
|
+
from ezmsg.util.messages.axisarray import AxisArray
|
|
10
|
+
from ezmsg.util.messages.util import replace
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
class DenormalizeSettings(ez.Settings):
|
|
14
|
+
low_rate: float = 2.0
|
|
15
|
+
"""Low end of probable rate after denormalization (Hz)."""
|
|
16
|
+
|
|
17
|
+
high_rate: float = 40.0
|
|
18
|
+
"""High end of probable rate after denormalization (Hz)."""
|
|
19
|
+
|
|
20
|
+
distribution: str = "uniform"
|
|
21
|
+
"""Distribution to sample rates from. Options are 'uniform', 'normal', or 'constant'."""
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
@processor_state
|
|
25
|
+
class DenormalizeRateState:
|
|
26
|
+
gains: npt.NDArray | None = None
|
|
27
|
+
offsets: npt.NDArray | None = None
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
class DenormalizeTransformer(
|
|
31
|
+
BaseStatefulTransformer[
|
|
32
|
+
DenormalizeSettings, AxisArray, AxisArray, DenormalizeRateState
|
|
33
|
+
]
|
|
34
|
+
):
|
|
35
|
+
"""
|
|
36
|
+
Scales data from a normalized distribution (mean=0, std=1) to a denormalized
|
|
37
|
+
distribution using random per-channel offsets and gains designed to keep the
|
|
38
|
+
99.9% CIs between 0 and 2x the offset.
|
|
39
|
+
|
|
40
|
+
This is useful for simulating realistic firing rates from normalized data.
|
|
41
|
+
"""
|
|
42
|
+
|
|
43
|
+
def _reset_state(self, message: AxisArray) -> None:
|
|
44
|
+
ax_ix = message.get_axis_idx("ch")
|
|
45
|
+
nch = message.data.shape[ax_ix]
|
|
46
|
+
arr_size = (nch, 1) if ax_ix == 0 else (1, nch)
|
|
47
|
+
if self.settings.distribution == "uniform":
|
|
48
|
+
self.state.offsets = np.random.uniform(2.0, 40.0, size=arr_size)
|
|
49
|
+
elif self.settings.distribution == "normal":
|
|
50
|
+
self.state.offsets = np.random.normal(
|
|
51
|
+
loc=(self.settings.low_rate + self.settings.high_rate) / 2.0,
|
|
52
|
+
scale=(self.settings.high_rate - self.settings.low_rate) / 6.0,
|
|
53
|
+
size=arr_size,
|
|
54
|
+
)
|
|
55
|
+
self.state.offsets = np.clip(
|
|
56
|
+
self.state.offsets,
|
|
57
|
+
a_min=self.settings.low_rate,
|
|
58
|
+
a_max=self.settings.high_rate,
|
|
59
|
+
)
|
|
60
|
+
elif self.settings.distribution == "constant":
|
|
61
|
+
self.state.offsets = np.full(
|
|
62
|
+
shape=arr_size,
|
|
63
|
+
fill_value=(self.settings.low_rate + self.settings.high_rate) / 2.0,
|
|
64
|
+
)
|
|
65
|
+
else:
|
|
66
|
+
raise ValueError(f"Invalid distribution: {self.settings.distribution}")
|
|
67
|
+
# Input has std == 1
|
|
68
|
+
# Desired output has range from 0 to 2*self.state.offsets within 99.9% confidence interval
|
|
69
|
+
# For a standard normal distribution, 99.9% of data is within +/- 3.29 std devs.
|
|
70
|
+
# So, gain = offset / 3.29 to scale the std dev appropriately.
|
|
71
|
+
self.state.gains = self.state.offsets / 3.29
|
|
72
|
+
|
|
73
|
+
def _process(self, message: AxisArray) -> AxisArray:
|
|
74
|
+
denorm = message.data * self.state.gains + self.state.offsets
|
|
75
|
+
return replace(
|
|
76
|
+
message,
|
|
77
|
+
data=np.clip(denorm, a_min=0.0, a_max=None),
|
|
78
|
+
)
|
|
79
|
+
|
|
80
|
+
|
|
81
|
+
class DenormalizeRateUnit(
|
|
82
|
+
BaseTransformerUnit[
|
|
83
|
+
DenormalizeSettings, AxisArray, AxisArray, DenormalizeTransformer
|
|
84
|
+
]
|
|
85
|
+
):
|
|
86
|
+
SETTINGS = DenormalizeSettings
|
ezmsg/sigproc/fbcca.py
ADDED
|
@@ -0,0 +1,332 @@
|
|
|
1
|
+
import typing
|
|
2
|
+
import math
|
|
3
|
+
from dataclasses import field
|
|
4
|
+
|
|
5
|
+
import numpy as np
|
|
6
|
+
|
|
7
|
+
import ezmsg.core as ez
|
|
8
|
+
from ezmsg.util.messages.axisarray import AxisArray
|
|
9
|
+
from ezmsg.util.messages.util import replace
|
|
10
|
+
|
|
11
|
+
from .sampler import SampleTriggerMessage
|
|
12
|
+
from .window import WindowTransformer, WindowSettings
|
|
13
|
+
|
|
14
|
+
from .base import (
|
|
15
|
+
BaseTransformer,
|
|
16
|
+
BaseTransformerUnit,
|
|
17
|
+
CompositeProcessor,
|
|
18
|
+
BaseProcessor,
|
|
19
|
+
BaseStatefulProcessor,
|
|
20
|
+
)
|
|
21
|
+
|
|
22
|
+
from .kaiser import KaiserFilterSettings
|
|
23
|
+
from .filterbankdesign import (
|
|
24
|
+
FilterbankDesignSettings,
|
|
25
|
+
FilterbankDesignTransformer,
|
|
26
|
+
)
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
class FBCCASettings(ez.Settings):
|
|
30
|
+
"""
|
|
31
|
+
Settings for :obj:`FBCCATransformer`
|
|
32
|
+
"""
|
|
33
|
+
|
|
34
|
+
time_dim: str
|
|
35
|
+
"""
|
|
36
|
+
The time dim in the data array.
|
|
37
|
+
"""
|
|
38
|
+
|
|
39
|
+
ch_dim: str
|
|
40
|
+
"""
|
|
41
|
+
The channels dim in the data array.
|
|
42
|
+
"""
|
|
43
|
+
|
|
44
|
+
filterbank_dim: str | None = None
|
|
45
|
+
"""
|
|
46
|
+
The filter bank subband dim in the data array. If unspecified, method falls back to CCA
|
|
47
|
+
None (default): the input has no subbands; just use CCA
|
|
48
|
+
"""
|
|
49
|
+
|
|
50
|
+
harmonics: int = 5
|
|
51
|
+
"""
|
|
52
|
+
The number of additional harmonics beyond the fundamental to use for the 'design' matrix.
|
|
53
|
+
5 (default): Evaluate 5 harmonics of the base frequency.
|
|
54
|
+
Many periodic signals are not pure sinusoids, and inclusion of higher harmonics can help evaluate the
|
|
55
|
+
presence of signals with higher frequency harmonic content
|
|
56
|
+
"""
|
|
57
|
+
|
|
58
|
+
freqs: typing.List[float] = field(default_factory=list)
|
|
59
|
+
"""
|
|
60
|
+
Frequencies (in hz) to evaluate the presence of within the input signal.
|
|
61
|
+
[] (default): an empty list; frequencies will be found within the input SampleMessages.
|
|
62
|
+
AxisArrays have no good place to put this metadata, so specify frequencies here if only AxisArrays
|
|
63
|
+
will be passed as input to the generator. If the input has a `trigger` attr of type :obj:`SampleTriggerMessage`,
|
|
64
|
+
the processor looks for the `freqs` attribute within that trigger for a list of frequencies to evaluate.
|
|
65
|
+
This field is present in the :obj:`SSVEPSampleTriggerMessage` defined in ezmsg.tasks.ssvep from the ezmsg-tasks package.
|
|
66
|
+
NOTE: Avoid frequencies that have line-noise (60 Hz/50 Hz) as a harmonic.
|
|
67
|
+
"""
|
|
68
|
+
|
|
69
|
+
softmax_beta: float = 1.0
|
|
70
|
+
"""
|
|
71
|
+
Beta parameter for softmax on output --> "probabilities".
|
|
72
|
+
1.0 (default): Use the shifted softmax transformation to output 0-1 probabilities.
|
|
73
|
+
If 0.0, the maximum singular value of the SVD for each design matrix is output
|
|
74
|
+
"""
|
|
75
|
+
|
|
76
|
+
target_freq_dim: str = "target_freq"
|
|
77
|
+
"""
|
|
78
|
+
Name for dim to put target frequency outputs on.
|
|
79
|
+
'target_freq' (default)
|
|
80
|
+
"""
|
|
81
|
+
|
|
82
|
+
max_int_time: float = 0.0
|
|
83
|
+
"""
|
|
84
|
+
Maximum integration time (in seconds) to use for calculation.
|
|
85
|
+
0 (default): Use all time provided for the calculation.
|
|
86
|
+
Useful for artificially limiting the amount of data used for the CCA method to evaluate
|
|
87
|
+
the necessary integration time for good decoding performance
|
|
88
|
+
"""
|
|
89
|
+
|
|
90
|
+
|
|
91
|
+
class FBCCATransformer(BaseTransformer[FBCCASettings, AxisArray, AxisArray]):
|
|
92
|
+
"""
|
|
93
|
+
A canonical-correlation (CCA) signal decoder for detection of periodic activity in multi-channel timeseries
|
|
94
|
+
recordings. It is particularly useful for detecting the presence of steady-state evoked responses in multi-channel
|
|
95
|
+
EEG data. Please see Lin et. al. 2007 for a description on the use of CCA to detect the presence of SSVEP in EEG
|
|
96
|
+
data.
|
|
97
|
+
This implementation also includes the "Filterbank" extension of the CCA decoding approach which utilizes a
|
|
98
|
+
filterbank to decompose input multi-channel EEG data into several frequency sub-bands; each of which is analyzed
|
|
99
|
+
with CCA, then combined using a weighted sum; allowing CCA to more readily identify harmonic content in EEG data.
|
|
100
|
+
Read more about this approach in Chen et. al. 2015.
|
|
101
|
+
|
|
102
|
+
## Further reading:
|
|
103
|
+
* [Lin et. al. 2007](https://ieeexplore.ieee.org/document/4015614)
|
|
104
|
+
* [Nakanishi et. al. 2015](https://doi.org/10.1371%2Fjournal.pone.0140703)
|
|
105
|
+
* [Chen et. al. 2015](http://dx.doi.org/10.1088/1741-2560/12/4/046008)
|
|
106
|
+
"""
|
|
107
|
+
|
|
108
|
+
def _process(self, message: AxisArray) -> AxisArray:
|
|
109
|
+
"""
|
|
110
|
+
Input: AxisArray with at least a time_dim, and ch_dim
|
|
111
|
+
Output: AxisArray with time_dim, ch_dim, (and filterbank_dim if specified)
|
|
112
|
+
collapsed, with a new 'target_freq' dim of length 'freqs'
|
|
113
|
+
"""
|
|
114
|
+
|
|
115
|
+
test_freqs: list[float] = self.settings.freqs
|
|
116
|
+
trigger = message.attrs.get("trigger", None)
|
|
117
|
+
if isinstance(trigger, SampleTriggerMessage):
|
|
118
|
+
if len(test_freqs) == 0:
|
|
119
|
+
test_freqs = getattr(trigger, "freqs", [])
|
|
120
|
+
|
|
121
|
+
if len(test_freqs) == 0:
|
|
122
|
+
raise ValueError("no frequencies to test")
|
|
123
|
+
|
|
124
|
+
time_dim_idx = message.get_axis_idx(self.settings.time_dim)
|
|
125
|
+
ch_dim_idx = message.get_axis_idx(self.settings.ch_dim)
|
|
126
|
+
|
|
127
|
+
filterbank_dim_idx = None
|
|
128
|
+
if self.settings.filterbank_dim is not None:
|
|
129
|
+
filterbank_dim_idx = message.get_axis_idx(self.settings.filterbank_dim)
|
|
130
|
+
|
|
131
|
+
# Move (filterbank_dim), time, ch to end of array
|
|
132
|
+
rm_dims = [self.settings.time_dim, self.settings.ch_dim]
|
|
133
|
+
if self.settings.filterbank_dim is not None:
|
|
134
|
+
rm_dims = [self.settings.filterbank_dim] + rm_dims
|
|
135
|
+
new_order = [i for i, dim in enumerate(message.dims) if dim not in rm_dims]
|
|
136
|
+
if filterbank_dim_idx is not None:
|
|
137
|
+
new_order.append(filterbank_dim_idx)
|
|
138
|
+
new_order.extend([time_dim_idx, ch_dim_idx])
|
|
139
|
+
out_dims = [
|
|
140
|
+
message.dims[i] for i in new_order if message.dims[i] not in rm_dims
|
|
141
|
+
]
|
|
142
|
+
data_arr = message.data.transpose(new_order)
|
|
143
|
+
|
|
144
|
+
# Add a singleton dim for filterbank dim if we don't have one
|
|
145
|
+
if filterbank_dim_idx is None:
|
|
146
|
+
data_arr = data_arr[..., None, :, :]
|
|
147
|
+
filterbank_dim_idx = data_arr.ndim - 3
|
|
148
|
+
|
|
149
|
+
# data_arr is now (..., filterbank, time, ch)
|
|
150
|
+
# Get output shape for remaining dims and reshape data_arr for iterative processing
|
|
151
|
+
out_shape = list(data_arr.shape[:-3])
|
|
152
|
+
data_arr = data_arr.reshape([math.prod(out_shape), *data_arr.shape[-3:]])
|
|
153
|
+
|
|
154
|
+
# Create output dims and axes with added target_freq_dim
|
|
155
|
+
out_shape.append(len(test_freqs))
|
|
156
|
+
out_dims.append(self.settings.target_freq_dim)
|
|
157
|
+
out_axes = {
|
|
158
|
+
axis_name: axis
|
|
159
|
+
for axis_name, axis in message.axes.items()
|
|
160
|
+
if axis_name not in rm_dims
|
|
161
|
+
and not (
|
|
162
|
+
isinstance(axis, AxisArray.CoordinateAxis)
|
|
163
|
+
and any(d in rm_dims for d in axis.dims)
|
|
164
|
+
)
|
|
165
|
+
}
|
|
166
|
+
out_axes[self.settings.target_freq_dim] = AxisArray.CoordinateAxis(
|
|
167
|
+
np.array(test_freqs), [self.settings.target_freq_dim]
|
|
168
|
+
)
|
|
169
|
+
|
|
170
|
+
if message.data.size == 0:
|
|
171
|
+
out_data = message.data.reshape(out_shape)
|
|
172
|
+
output = replace(message, data=out_data, dims=out_dims, axes=out_axes)
|
|
173
|
+
return output
|
|
174
|
+
|
|
175
|
+
# Get time axis
|
|
176
|
+
t_ax_info = message.ax(self.settings.time_dim)
|
|
177
|
+
t = t_ax_info.values
|
|
178
|
+
t -= t[0]
|
|
179
|
+
max_samp = len(t)
|
|
180
|
+
if self.settings.max_int_time > 0:
|
|
181
|
+
max_samp = int(abs(t_ax_info.values - self.settings.max_int_time).argmin())
|
|
182
|
+
t = t[:max_samp]
|
|
183
|
+
|
|
184
|
+
calc_output = np.zeros((*data_arr.shape[:-2], len(test_freqs)))
|
|
185
|
+
|
|
186
|
+
for test_freq_idx, test_freq in enumerate(test_freqs):
|
|
187
|
+
# Create the design matrix of base frequency and requested harmonics
|
|
188
|
+
Y = np.column_stack(
|
|
189
|
+
[
|
|
190
|
+
fn(2.0 * np.pi * k * test_freq * t)
|
|
191
|
+
for k in range(1, self.settings.harmonics + 1)
|
|
192
|
+
for fn in (np.sin, np.cos)
|
|
193
|
+
]
|
|
194
|
+
)
|
|
195
|
+
|
|
196
|
+
for test_idx, arr in enumerate(
|
|
197
|
+
data_arr
|
|
198
|
+
): # iterate over first dim; arr is (filterbank x time x ch)
|
|
199
|
+
for band_idx, band in enumerate(
|
|
200
|
+
arr
|
|
201
|
+
): # iterate over second dim: arr is (time x ch)
|
|
202
|
+
calc_output[test_idx, band_idx, test_freq_idx] = cca_rho_max(
|
|
203
|
+
band[:max_samp, ...], Y
|
|
204
|
+
)
|
|
205
|
+
|
|
206
|
+
# Combine per-subband canonical correlations using a weighted sum
|
|
207
|
+
# https://iopscience.iop.org/article/10.1088/1741-2560/12/4/046008
|
|
208
|
+
freq_weights = (np.arange(1, calc_output.shape[1] + 1) ** -1.25) + 0.25
|
|
209
|
+
calc_output = ((calc_output**2) * freq_weights[None, :, None]).sum(axis=1)
|
|
210
|
+
|
|
211
|
+
if self.settings.softmax_beta != 0:
|
|
212
|
+
calc_output = calc_softmax(
|
|
213
|
+
calc_output, axis=-1, beta=self.settings.softmax_beta
|
|
214
|
+
)
|
|
215
|
+
|
|
216
|
+
output = replace(
|
|
217
|
+
message,
|
|
218
|
+
data=calc_output.reshape(out_shape),
|
|
219
|
+
dims=out_dims,
|
|
220
|
+
axes=out_axes,
|
|
221
|
+
)
|
|
222
|
+
|
|
223
|
+
return output
|
|
224
|
+
|
|
225
|
+
|
|
226
|
+
class FBCCA(BaseTransformerUnit[FBCCASettings, AxisArray, AxisArray, FBCCATransformer]):
|
|
227
|
+
SETTINGS = FBCCASettings
|
|
228
|
+
|
|
229
|
+
|
|
230
|
+
class StreamingFBCCASettings(FBCCASettings):
|
|
231
|
+
"""
|
|
232
|
+
Perform rolling/streaming FBCCA on incoming EEG.
|
|
233
|
+
Decomposes the input multi-channel timeseries data into multiple sub-bands using a FilterbankDesign Transformer,
|
|
234
|
+
then accumulates data using Window into short-time observations for analysis using an FBCCA Transformer.
|
|
235
|
+
"""
|
|
236
|
+
|
|
237
|
+
window_dur: float = 4.0 # sec
|
|
238
|
+
window_shift: float = 0.5 # sec
|
|
239
|
+
window_dim: str = "fbcca_window"
|
|
240
|
+
filter_bw: float = 7.0 # Hz
|
|
241
|
+
filter_low: float = 7.0 # Hz
|
|
242
|
+
trans_bw: float = 2.0 # Hz
|
|
243
|
+
ripple_db: float = 20.0 # dB
|
|
244
|
+
subbands: int = 12
|
|
245
|
+
|
|
246
|
+
|
|
247
|
+
class StreamingFBCCATransformer(
|
|
248
|
+
CompositeProcessor[StreamingFBCCASettings, AxisArray, AxisArray]
|
|
249
|
+
):
|
|
250
|
+
@staticmethod
|
|
251
|
+
def _initialize_processors(
|
|
252
|
+
settings: StreamingFBCCASettings,
|
|
253
|
+
) -> dict[str, BaseProcessor | BaseStatefulProcessor]:
|
|
254
|
+
pipeline = {}
|
|
255
|
+
|
|
256
|
+
if settings.filterbank_dim is not None:
|
|
257
|
+
cut_freqs = (
|
|
258
|
+
np.arange(settings.subbands + 1) * settings.filter_bw
|
|
259
|
+
) + settings.filter_low
|
|
260
|
+
filters = [
|
|
261
|
+
KaiserFilterSettings(
|
|
262
|
+
axis=settings.time_dim,
|
|
263
|
+
cutoff=(c - settings.trans_bw, cut_freqs[-1]),
|
|
264
|
+
ripple=settings.ripple_db,
|
|
265
|
+
width=settings.trans_bw,
|
|
266
|
+
pass_zero=False,
|
|
267
|
+
)
|
|
268
|
+
for c in cut_freqs[:-1]
|
|
269
|
+
]
|
|
270
|
+
|
|
271
|
+
pipeline["filterbank"] = FilterbankDesignTransformer(
|
|
272
|
+
FilterbankDesignSettings(
|
|
273
|
+
filters=filters, new_axis=settings.filterbank_dim
|
|
274
|
+
)
|
|
275
|
+
)
|
|
276
|
+
|
|
277
|
+
pipeline["window"] = WindowTransformer(
|
|
278
|
+
WindowSettings(
|
|
279
|
+
axis=settings.time_dim,
|
|
280
|
+
newaxis=settings.window_dim,
|
|
281
|
+
window_dur=settings.window_dur,
|
|
282
|
+
window_shift=settings.window_shift,
|
|
283
|
+
zero_pad_until="shift",
|
|
284
|
+
)
|
|
285
|
+
)
|
|
286
|
+
|
|
287
|
+
pipeline["fbcca"] = FBCCATransformer(settings)
|
|
288
|
+
|
|
289
|
+
return pipeline
|
|
290
|
+
|
|
291
|
+
|
|
292
|
+
class StreamingFBCCA(
|
|
293
|
+
BaseTransformerUnit[
|
|
294
|
+
StreamingFBCCASettings, AxisArray, AxisArray, StreamingFBCCATransformer
|
|
295
|
+
]
|
|
296
|
+
):
|
|
297
|
+
SETTINGS = StreamingFBCCASettings
|
|
298
|
+
|
|
299
|
+
|
|
300
|
+
def cca_rho_max(X: np.ndarray, Y: np.ndarray) -> float:
|
|
301
|
+
"""
|
|
302
|
+
X: (n_time, n_ch)
|
|
303
|
+
Y: (n_time, n_ref) # design matrix for one frequency
|
|
304
|
+
returns: largest canonical correlation in [0,1]
|
|
305
|
+
"""
|
|
306
|
+
# Center columns
|
|
307
|
+
Xc = X - X.mean(axis=0, keepdims=True)
|
|
308
|
+
Yc = Y - Y.mean(axis=0, keepdims=True)
|
|
309
|
+
|
|
310
|
+
# Drop any zero-variance columns to avoid rank issues
|
|
311
|
+
Xc = Xc[:, Xc.std(axis=0) > 1e-12]
|
|
312
|
+
Yc = Yc[:, Yc.std(axis=0) > 1e-12]
|
|
313
|
+
if Xc.size == 0 or Yc.size == 0:
|
|
314
|
+
return 0.0
|
|
315
|
+
|
|
316
|
+
# Orthonormal bases
|
|
317
|
+
Qx, _ = np.linalg.qr(Xc, mode="reduced") # (n_time, r_x)
|
|
318
|
+
Qy, _ = np.linalg.qr(Yc, mode="reduced") # (n_time, r_y)
|
|
319
|
+
|
|
320
|
+
# Canonical correlations are the singular values of Qx^T Qy
|
|
321
|
+
with np.errstate(divide="ignore", over="ignore", invalid="ignore"):
|
|
322
|
+
s = np.linalg.svd(Qx.T @ Qy, compute_uv=False)
|
|
323
|
+
return float(s[0]) if s.size else 0.0
|
|
324
|
+
|
|
325
|
+
|
|
326
|
+
def calc_softmax(cv: np.ndarray, axis: int, beta: float = 1.0):
|
|
327
|
+
# Calculate softmax with shifting to avoid overflow
|
|
328
|
+
# (https://doi.org/10.1093/imanum/draa038)
|
|
329
|
+
cv = cv - cv.max(axis=axis, keepdims=True)
|
|
330
|
+
cv = np.exp(beta * cv)
|
|
331
|
+
cv = cv / np.sum(cv, axis=axis, keepdims=True)
|
|
332
|
+
return cv
|
ezmsg/sigproc/filter.py
CHANGED
|
@@ -263,6 +263,14 @@ class FilterByDesignTransformer(
|
|
|
263
263
|
axis = self.state.filter.settings.axis
|
|
264
264
|
fs = 1 / message.axes[axis].gain
|
|
265
265
|
coefs = design_fun(fs)
|
|
266
|
+
|
|
267
|
+
# Convert BA to SOS if requested
|
|
268
|
+
if coefs is not None and self.settings.coef_type == "sos":
|
|
269
|
+
if isinstance(coefs, tuple) and len(coefs) == 2:
|
|
270
|
+
# It's BA format, convert to SOS
|
|
271
|
+
b, a = coefs
|
|
272
|
+
coefs = scipy.signal.tf2sos(b, a)
|
|
273
|
+
|
|
266
274
|
self.state.filter.update_coefficients(
|
|
267
275
|
coefs, coef_type=self.settings.coef_type
|
|
268
276
|
)
|
|
@@ -282,6 +290,14 @@ class FilterByDesignTransformer(
|
|
|
282
290
|
axis = message.dims[0] if self.settings.axis is None else self.settings.axis
|
|
283
291
|
fs = 1 / message.axes[axis].gain
|
|
284
292
|
coefs = design_fun(fs)
|
|
293
|
+
|
|
294
|
+
# Convert BA to SOS if requested
|
|
295
|
+
if coefs is not None and self.settings.coef_type == "sos":
|
|
296
|
+
if isinstance(coefs, tuple) and len(coefs) == 2:
|
|
297
|
+
# It's BA format, convert to SOS
|
|
298
|
+
b, a = coefs
|
|
299
|
+
coefs = scipy.signal.tf2sos(b, a)
|
|
300
|
+
|
|
285
301
|
new_settings = FilterSettings(
|
|
286
302
|
axis=axis, coef_type=self.settings.coef_type, coefs=coefs
|
|
287
303
|
)
|