ezmsg-sigproc 1.2.2__py3-none-any.whl → 2.10.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (69) hide show
  1. ezmsg/sigproc/__init__.py +1 -1
  2. ezmsg/sigproc/__version__.py +34 -1
  3. ezmsg/sigproc/activation.py +78 -0
  4. ezmsg/sigproc/adaptive_lattice_notch.py +212 -0
  5. ezmsg/sigproc/affinetransform.py +235 -0
  6. ezmsg/sigproc/aggregate.py +276 -0
  7. ezmsg/sigproc/bandpower.py +80 -0
  8. ezmsg/sigproc/base.py +149 -0
  9. ezmsg/sigproc/butterworthfilter.py +129 -39
  10. ezmsg/sigproc/butterworthzerophase.py +305 -0
  11. ezmsg/sigproc/cheby.py +125 -0
  12. ezmsg/sigproc/combfilter.py +160 -0
  13. ezmsg/sigproc/coordinatespaces.py +159 -0
  14. ezmsg/sigproc/decimate.py +46 -18
  15. ezmsg/sigproc/denormalize.py +78 -0
  16. ezmsg/sigproc/detrend.py +28 -0
  17. ezmsg/sigproc/diff.py +82 -0
  18. ezmsg/sigproc/downsample.py +97 -49
  19. ezmsg/sigproc/ewma.py +217 -0
  20. ezmsg/sigproc/ewmfilter.py +45 -19
  21. ezmsg/sigproc/extract_axis.py +39 -0
  22. ezmsg/sigproc/fbcca.py +307 -0
  23. ezmsg/sigproc/filter.py +282 -117
  24. ezmsg/sigproc/filterbank.py +292 -0
  25. ezmsg/sigproc/filterbankdesign.py +129 -0
  26. ezmsg/sigproc/fir_hilbert.py +336 -0
  27. ezmsg/sigproc/fir_pmc.py +209 -0
  28. ezmsg/sigproc/firfilter.py +117 -0
  29. ezmsg/sigproc/gaussiansmoothing.py +89 -0
  30. ezmsg/sigproc/kaiser.py +106 -0
  31. ezmsg/sigproc/linear.py +120 -0
  32. ezmsg/sigproc/math/__init__.py +0 -0
  33. ezmsg/sigproc/math/abs.py +35 -0
  34. ezmsg/sigproc/math/add.py +120 -0
  35. ezmsg/sigproc/math/clip.py +48 -0
  36. ezmsg/sigproc/math/difference.py +143 -0
  37. ezmsg/sigproc/math/invert.py +28 -0
  38. ezmsg/sigproc/math/log.py +57 -0
  39. ezmsg/sigproc/math/scale.py +39 -0
  40. ezmsg/sigproc/messages.py +3 -6
  41. ezmsg/sigproc/quantize.py +68 -0
  42. ezmsg/sigproc/resample.py +278 -0
  43. ezmsg/sigproc/rollingscaler.py +232 -0
  44. ezmsg/sigproc/sampler.py +232 -241
  45. ezmsg/sigproc/scaler.py +165 -0
  46. ezmsg/sigproc/signalinjector.py +70 -0
  47. ezmsg/sigproc/slicer.py +138 -0
  48. ezmsg/sigproc/spectral.py +6 -132
  49. ezmsg/sigproc/spectrogram.py +90 -0
  50. ezmsg/sigproc/spectrum.py +277 -0
  51. ezmsg/sigproc/transpose.py +134 -0
  52. ezmsg/sigproc/util/__init__.py +0 -0
  53. ezmsg/sigproc/util/asio.py +25 -0
  54. ezmsg/sigproc/util/axisarray_buffer.py +365 -0
  55. ezmsg/sigproc/util/buffer.py +449 -0
  56. ezmsg/sigproc/util/message.py +17 -0
  57. ezmsg/sigproc/util/profile.py +23 -0
  58. ezmsg/sigproc/util/sparse.py +115 -0
  59. ezmsg/sigproc/util/typeresolution.py +17 -0
  60. ezmsg/sigproc/wavelets.py +187 -0
  61. ezmsg/sigproc/window.py +301 -117
  62. ezmsg_sigproc-2.10.0.dist-info/METADATA +60 -0
  63. ezmsg_sigproc-2.10.0.dist-info/RECORD +65 -0
  64. {ezmsg_sigproc-1.2.2.dist-info → ezmsg_sigproc-2.10.0.dist-info}/WHEEL +1 -2
  65. ezmsg/sigproc/synth.py +0 -411
  66. ezmsg_sigproc-1.2.2.dist-info/METADATA +0 -36
  67. ezmsg_sigproc-1.2.2.dist-info/RECORD +0 -17
  68. ezmsg_sigproc-1.2.2.dist-info/top_level.txt +0 -1
  69. /ezmsg_sigproc-1.2.2.dist-info/LICENSE.txt → /ezmsg_sigproc-2.10.0.dist-info/licenses/LICENSE +0 -0
@@ -0,0 +1,57 @@
1
+ """
2
+ Take the logarithm of the data.
3
+
4
+ .. note::
5
+ This module supports the :doc:`Array API standard </guides/explanations/array_api>`,
6
+ enabling use with NumPy, CuPy, PyTorch, and other compatible array libraries.
7
+ """
8
+
9
+ import ezmsg.core as ez
10
+ from array_api_compat import get_namespace
11
+ from ezmsg.baseproc import BaseTransformer, BaseTransformerUnit
12
+ from ezmsg.util.messages.axisarray import AxisArray
13
+ from ezmsg.util.messages.util import replace
14
+
15
+
16
+ class LogSettings(ez.Settings):
17
+ base: float = 10.0
18
+ """The base of the logarithm. Default is 10."""
19
+
20
+ clip_zero: bool = False
21
+ """If True, clip the data to the minimum positive value of the data type before taking the log."""
22
+
23
+
24
+ class LogTransformer(BaseTransformer[LogSettings, AxisArray, AxisArray]):
25
+ def _process(self, message: AxisArray) -> AxisArray:
26
+ xp = get_namespace(message.data)
27
+ data = message.data
28
+ if self.settings.clip_zero:
29
+ # Check if any values are <= 0 and dtype is floating point
30
+ has_non_positive = bool(xp.any(data <= 0))
31
+ is_floating = xp.isdtype(data.dtype, "real floating")
32
+ if has_non_positive and is_floating:
33
+ # Use smallest_normal (Array API equivalent of numpy's finfo.tiny)
34
+ min_val = xp.finfo(data.dtype).smallest_normal
35
+ data = xp.clip(data, min_val, None)
36
+ return replace(message, data=xp.log(data) / xp.log(self.settings.base))
37
+
38
+
39
+ class Log(BaseTransformerUnit[LogSettings, AxisArray, AxisArray, LogTransformer]):
40
+ SETTINGS = LogSettings
41
+
42
+
43
+ def log(
44
+ base: float = 10.0,
45
+ clip_zero: bool = False,
46
+ ) -> LogTransformer:
47
+ """
48
+ Take the logarithm of the data. See :obj:`np.log` for more details.
49
+
50
+ Args:
51
+ base: The base of the logarithm. Default is 10.
52
+ clip_zero: If True, clip the data to the minimum positive value of the data type before taking the log.
53
+
54
+ Returns: :obj:`LogTransformer`.
55
+
56
+ """
57
+ return LogTransformer(LogSettings(base=base, clip_zero=clip_zero))
@@ -0,0 +1,39 @@
1
+ """
2
+ Scale the data by a constant factor.
3
+
4
+ .. note::
5
+ This module supports the :doc:`Array API standard </guides/explanations/array_api>`,
6
+ enabling use with NumPy, CuPy, PyTorch, and other compatible array libraries.
7
+ """
8
+
9
+ import ezmsg.core as ez
10
+ from ezmsg.baseproc import BaseTransformer, BaseTransformerUnit
11
+ from ezmsg.util.messages.axisarray import AxisArray
12
+ from ezmsg.util.messages.util import replace
13
+
14
+
15
+ class ScaleSettings(ez.Settings):
16
+ scale: float = 1.0
17
+ """Factor by which to scale the data magnitude."""
18
+
19
+
20
+ class ScaleTransformer(BaseTransformer[ScaleSettings, AxisArray, AxisArray]):
21
+ def _process(self, message: AxisArray) -> AxisArray:
22
+ return replace(message, data=self.settings.scale * message.data)
23
+
24
+
25
+ class Scale(BaseTransformerUnit[ScaleSettings, AxisArray, AxisArray, ScaleTransformer]):
26
+ SETTINGS = ScaleSettings
27
+
28
+
29
+ def scale(scale: float = 1.0) -> ScaleTransformer:
30
+ """
31
+ Scale the data by a constant factor.
32
+
33
+ Args:
34
+ scale: Factor by which to scale the data magnitude.
35
+
36
+ Returns: :obj:`ScaleTransformer`
37
+
38
+ """
39
+ return ScaleTransformer(ScaleSettings(scale=scale))
ezmsg/sigproc/messages.py CHANGED
@@ -1,12 +1,9 @@
1
- import warnings
2
1
  import time
2
+ import warnings
3
3
 
4
4
  import numpy.typing as npt
5
-
6
5
  from ezmsg.util.messages.axisarray import AxisArray
7
6
 
8
- from typing import Optional
9
-
10
7
  # UPCOMING: TSMessage Deprecation
11
8
  # TSMessage is deprecated because it doesn't handle multiple time axes well.
12
9
  # AxisArray has an incompatible API but supports a superset of functionality.
@@ -21,11 +18,11 @@ def TSMessage(
21
18
  data: npt.NDArray,
22
19
  fs: float = 1.0,
23
20
  time_dim: int = 0,
24
- timestamp: Optional[float] = None,
21
+ timestamp: float | None = None,
25
22
  ) -> AxisArray:
26
23
  dims = [f"dim_{i}" for i in range(data.ndim)]
27
24
  dims[time_dim] = "time"
28
25
  offset = time.time() if timestamp is None else timestamp
29
26
  offset_adj = data.shape[time_dim] / fs # offset corresponds to idx[0] on time_dim
30
- axis = AxisArray.Axis.TimeAxis(fs, offset=offset - offset_adj)
27
+ axis = AxisArray.TimeAxis(fs, offset=offset - offset_adj)
31
28
  return AxisArray(data, dims=dims, axes=dict(time=axis))
@@ -0,0 +1,68 @@
1
+ import ezmsg.core as ez
2
+ import numpy as np
3
+ from ezmsg.baseproc import BaseTransformer, BaseTransformerUnit
4
+ from ezmsg.util.messages.axisarray import AxisArray, replace
5
+
6
+
7
+ class QuantizeSettings(ez.Settings):
8
+ """
9
+ Settings for the Quantizer.
10
+ """
11
+
12
+ max_val: float
13
+ """
14
+ Clip the data to this maximum value before quantization and map the [min_val max_val] range to the quantized range.
15
+ """
16
+
17
+ min_val: float = 0.0
18
+ """
19
+ Clip the data to this minimum value before quantization and map the [min_val max_val] range to the quantized range.
20
+ Default: 0
21
+ """
22
+
23
+ bits: int = 8
24
+ """
25
+ Number of bits for quantization.
26
+ Note: The data type will be integer of the next power of 2 greater than or equal to this value.
27
+ Default: 8
28
+ """
29
+
30
+
31
+ class QuantizeTransformer(BaseTransformer[QuantizeSettings, AxisArray, AxisArray]):
32
+ def _process(
33
+ self,
34
+ message: AxisArray,
35
+ ) -> AxisArray:
36
+ expected_range = self.settings.max_val - self.settings.min_val
37
+ scale_factor = 2**self.settings.bits - 1
38
+ clip_max = self.settings.max_val
39
+
40
+ # Determine appropriate integer type based on bits
41
+ if self.settings.bits <= 1:
42
+ dtype = bool
43
+ elif self.settings.bits <= 8:
44
+ dtype = np.uint8
45
+ elif self.settings.bits <= 16:
46
+ dtype = np.uint16
47
+ elif self.settings.bits <= 32:
48
+ dtype = np.uint32
49
+ else:
50
+ dtype = np.uint64
51
+ if self.settings.bits == 64:
52
+ # The practical upper bound before converting to int is: 2**64 - 1025
53
+ # Anything larger will wrap around to 0.
54
+ #
55
+ clip_max *= 1 - 2e-16
56
+
57
+ data = message.data.clip(self.settings.min_val, clip_max)
58
+ data = (data - self.settings.min_val) / expected_range
59
+
60
+ # Scale to the quantized range [0, 2^bits - 1]
61
+ data = np.rint(scale_factor * data).astype(dtype)
62
+
63
+ # Create a new AxisArray with the quantized data
64
+ return replace(message, data=data)
65
+
66
+
67
+ class QuantizerUnit(BaseTransformerUnit[QuantizeSettings, AxisArray, AxisArray, QuantizeTransformer]):
68
+ SETTINGS = QuantizeSettings
@@ -0,0 +1,278 @@
1
+ import asyncio
2
+ import math
3
+ import time
4
+
5
+ import ezmsg.core as ez
6
+ import numpy as np
7
+ import scipy.interpolate
8
+ from ezmsg.baseproc import (
9
+ BaseConsumerUnit,
10
+ BaseStatefulProcessor,
11
+ processor_state,
12
+ )
13
+ from ezmsg.util.messages.axisarray import AxisArray, LinearAxis
14
+ from ezmsg.util.messages.util import replace
15
+
16
+ from .util.axisarray_buffer import HybridAxisArrayBuffer, HybridAxisBuffer
17
+ from .util.buffer import UpdateStrategy
18
+
19
+
20
+ class ResampleSettings(ez.Settings):
21
+ axis: str = "time"
22
+
23
+ resample_rate: float | None = None
24
+ """target resample rate in Hz. If None, the resample rate will be determined by the reference signal."""
25
+
26
+ max_chunk_delay: float = np.inf
27
+ """Maximum delay between outputs in seconds. If the delay exceeds this value, the transformer will extrapolate."""
28
+
29
+ fill_value: str = "extrapolate"
30
+ """
31
+ Value to use for out-of-bounds samples.
32
+ If 'extrapolate', the transformer will extrapolate.
33
+ If 'last', the transformer will use the last sample.
34
+ See scipy.interpolate.interp1d for more options.
35
+ """
36
+
37
+ buffer_duration: float = 2.0
38
+
39
+ buffer_update_strategy: UpdateStrategy = "immediate"
40
+ """
41
+ The buffer update strategy. See :obj:`ezmsg.sigproc.util.buffer.UpdateStrategy`.
42
+ If you expect to push data much more frequently than it is resampled, then "on_demand"
43
+ might be more efficient. For most other scenarios, "immediate" is best.
44
+ """
45
+
46
+
47
+ @processor_state
48
+ class ResampleState:
49
+ src_buffer: HybridAxisArrayBuffer | None = None
50
+ """
51
+ Buffer for the incoming signal data. This is the source for training the interpolation function.
52
+ Its contents are rarely empty because we usually hold back some data to allow for accurate
53
+ interpolation and optionally extrapolation.
54
+ """
55
+
56
+ ref_axis_buffer: HybridAxisBuffer | None = None
57
+ """
58
+ The buffer for the reference axis (usually a time axis). The interpolation function
59
+ will be evaluated at the reference axis values.
60
+ When resample_rate is None, this buffer will be filled with the axis from incoming
61
+ _reference_ messages.
62
+ When resample_rate is not None (i.e., prescribed float resample_rate), this buffer
63
+ is filled with a synthetic axis that is generated from the incoming signal messages.
64
+ """
65
+
66
+ last_ref_ax_val: float | None = None
67
+ """
68
+ The last value of the reference axis that was returned. This helps us to know
69
+ what the _next_ returned value should be, and to avoid returning the same value.
70
+ TODO: We can eliminate this variable if we maintain "by convention" that the
71
+ reference axis always has 1 value at its start that we exclude from the resampling.
72
+ """
73
+
74
+ last_write_time: float = -np.inf
75
+ """
76
+ Wall clock time of the last write to the signal buffer.
77
+ This is used to determine if we need to extrapolate the reference axis
78
+ if we have not received an update within max_chunk_delay.
79
+ """
80
+
81
+
82
+ class ResampleProcessor(BaseStatefulProcessor[ResampleSettings, AxisArray, AxisArray, ResampleState]):
83
+ def _hash_message(self, message: AxisArray) -> int:
84
+ ax_idx: int = message.get_axis_idx(self.settings.axis)
85
+ sample_shape = message.data.shape[:ax_idx] + message.data.shape[ax_idx + 1 :]
86
+ ax = message.axes[self.settings.axis]
87
+ gain = ax.gain if hasattr(ax, "gain") else None
88
+ return hash((message.key, gain) + sample_shape)
89
+
90
+ def _reset_state(self, message: AxisArray) -> None:
91
+ """
92
+ Reset the internal state based on the incoming message.
93
+ """
94
+ self.state.src_buffer = HybridAxisArrayBuffer(
95
+ duration=self.settings.buffer_duration,
96
+ axis=self.settings.axis,
97
+ update_strategy=self.settings.buffer_update_strategy,
98
+ overflow_strategy="grow",
99
+ )
100
+ if self.settings.resample_rate is not None:
101
+ # If we are resampling at a prescribed rate, then we synthesize a reference axis
102
+ self.state.ref_axis_buffer = HybridAxisBuffer(
103
+ duration=self.settings.buffer_duration,
104
+ )
105
+ in_ax = message.axes[self.settings.axis]
106
+ out_gain = 1 / self.settings.resample_rate
107
+ t0 = in_ax.data[0] if hasattr(in_ax, "data") else in_ax.value(0)
108
+ self.state.last_ref_ax_val = t0 - out_gain
109
+ self.state.last_write_time = -np.inf
110
+
111
+ def push_reference(self, message: AxisArray) -> None:
112
+ ax = message.axes[self.settings.axis]
113
+ ax_idx = message.get_axis_idx(self.settings.axis)
114
+ if self.state.ref_axis_buffer is None:
115
+ self.state.ref_axis_buffer = HybridAxisBuffer(
116
+ duration=self.settings.buffer_duration,
117
+ update_strategy=self.settings.buffer_update_strategy,
118
+ overflow_strategy="grow",
119
+ )
120
+ t0 = ax.data[0] if hasattr(ax, "data") else ax.value(0)
121
+ self.state.last_ref_ax_val = t0 - ax.gain
122
+ self.state.ref_axis_buffer.write(ax, n_samples=message.data.shape[ax_idx])
123
+
124
+ def _process(self, message: AxisArray) -> None:
125
+ """
126
+ Add a new data message to the buffer and update the reference axis if needed.
127
+ """
128
+ # Note: The src_buffer will copy and permute message if ax_idx != 0
129
+ self.state.src_buffer.write(message)
130
+
131
+ # If we are resampling at a prescribed rate (i.e., not by reference msgs),
132
+ # then we use this opportunity to extend our synthetic reference axis.
133
+ ax_idx = message.get_axis_idx(self.settings.axis)
134
+ if self.settings.resample_rate is not None and message.data.shape[ax_idx] > 0:
135
+ in_ax = message.axes[self.settings.axis]
136
+ in_t_end = in_ax.data[-1] if hasattr(in_ax, "data") else in_ax.value(message.data.shape[ax_idx] - 1)
137
+ out_gain = 1 / self.settings.resample_rate
138
+ prev_t_end = self.state.last_ref_ax_val
139
+ n_synth = math.ceil((in_t_end - prev_t_end) * self.settings.resample_rate)
140
+ synth_ref_axis = LinearAxis(unit="s", gain=out_gain, offset=prev_t_end + out_gain)
141
+ self.state.ref_axis_buffer.write(synth_ref_axis, n_samples=n_synth)
142
+
143
+ self.state.last_write_time = time.time()
144
+
145
+ def __next__(self) -> AxisArray:
146
+ if self.state.src_buffer is None or self.state.ref_axis_buffer is None:
147
+ # If we have not received any data, or we require reference data
148
+ # that we do not yet have, then return an empty template.
149
+ return AxisArray(data=np.array([]), dims=[""], axes={}, key="null")
150
+
151
+ src = self.state.src_buffer
152
+ ref = self.state.ref_axis_buffer
153
+
154
+ # If we have no reference or the source is insufficient for interpolation
155
+ # then return the empty template
156
+ if ref.is_empty() or src.available() < 3:
157
+ src_axarr = src.peek(0)
158
+ return replace(
159
+ src_axarr,
160
+ axes={
161
+ **src_axarr.axes,
162
+ self.settings.axis: ref.peek(0),
163
+ },
164
+ )
165
+
166
+ # Build the reference xvec.
167
+ # Note: The reference axis buffer may grow upon `.peek()`
168
+ # as it flushes data from its deque to its buffer.
169
+ ref_ax = ref.peek()
170
+ if hasattr(ref_ax, "data"):
171
+ ref_xvec = ref_ax.data
172
+ else:
173
+ ref_xvec = ref_ax.value(np.arange(ref.available()))
174
+
175
+ # If we do not rely on an external reference, and we have not received new data in a while,
176
+ # then extrapolate our reference vector out beyond the delay limit.
177
+ b_project = self.settings.resample_rate is not None and time.time() > (
178
+ self.state.last_write_time + self.settings.max_chunk_delay
179
+ )
180
+ if b_project:
181
+ n_append = math.ceil(self.settings.max_chunk_delay / ref_ax.gain)
182
+ xvec_append = ref_xvec[-1] + np.arange(1, n_append + 1) * ref_ax.gain
183
+ ref_xvec = np.hstack((ref_xvec, xvec_append))
184
+
185
+ # Get source to train interpolation
186
+ src_axarr = src.peek()
187
+ src_axis = src_axarr.axes[self.settings.axis]
188
+ x = src_axis.data if hasattr(src_axis, "data") else src_axis.value(np.arange(src_axarr.data.shape[0]))
189
+
190
+ # Only resample at reference values that have not been interpolated over previously.
191
+ b_ref = ref_xvec > self.state.last_ref_ax_val
192
+ if not b_project:
193
+ # Not extrapolating -- Do not resample beyond the end of the source buffer.
194
+ b_ref = np.logical_and(b_ref, ref_xvec <= x[-1])
195
+ ref_idx = np.where(b_ref)[0]
196
+
197
+ if len(ref_idx) == 0:
198
+ # Nothing to interpolate over; return empty data
199
+ null_ref = replace(ref_ax, data=ref_ax.data[:0]) if hasattr(ref_ax, "data") else ref_ax
200
+ return replace(
201
+ src_axarr,
202
+ data=src_axarr.data[:0, ...],
203
+ axes={**src_axarr.axes, self.settings.axis: null_ref},
204
+ )
205
+
206
+ xnew = ref_xvec[ref_idx]
207
+
208
+ # Identify source data indices around ref tvec with some padding for better interpolation.
209
+ src_start_ix = max(0, np.where(x > xnew[0])[0][0] - 2 if np.any(x > xnew[0]) else 0)
210
+
211
+ x = x[src_start_ix:]
212
+ y = src_axarr.data[src_start_ix:]
213
+
214
+ if isinstance(self.settings.fill_value, str) and self.settings.fill_value == "last":
215
+ fill_value = (y[0], y[-1])
216
+ else:
217
+ fill_value = self.settings.fill_value
218
+ f = scipy.interpolate.interp1d(
219
+ x,
220
+ y,
221
+ kind="linear",
222
+ axis=0,
223
+ copy=False,
224
+ bounds_error=False,
225
+ fill_value=fill_value,
226
+ assume_sorted=True,
227
+ )
228
+
229
+ # Calculate output
230
+ resampled_data = f(xnew)
231
+
232
+ # Create output message
233
+ if hasattr(ref_ax, "data"):
234
+ out_ax = replace(ref_ax, data=xnew)
235
+ else:
236
+ out_ax = replace(ref_ax, offset=xnew[0])
237
+ result = replace(
238
+ src_axarr,
239
+ data=resampled_data,
240
+ axes={
241
+ **src_axarr.axes,
242
+ self.settings.axis: out_ax,
243
+ },
244
+ )
245
+
246
+ # Update the state. For state buffers, seek beyond samples that are no longer needed.
247
+ # src: keep at least 1 sample before the final resampled value
248
+ seek_ix = np.where(x >= xnew[-1])[0]
249
+ if len(seek_ix) > 0:
250
+ self.state.src_buffer.seek(max(0, src_start_ix + seek_ix[0] - 1))
251
+ # ref: remove samples that have been sent to output
252
+ self.state.ref_axis_buffer.seek(ref_idx[-1] + 1)
253
+ self.state.last_ref_ax_val = xnew[-1]
254
+
255
+ return result
256
+
257
+ def send(self, message: AxisArray) -> AxisArray:
258
+ self(message)
259
+ return next(self)
260
+
261
+
262
+ class ResampleUnit(BaseConsumerUnit[ResampleSettings, AxisArray, ResampleProcessor]):
263
+ SETTINGS = ResampleSettings
264
+ INPUT_REFERENCE = ez.InputStream(AxisArray)
265
+ OUTPUT_SIGNAL = ez.OutputStream(AxisArray)
266
+
267
+ @ez.subscriber(INPUT_REFERENCE, zero_copy=True)
268
+ async def on_reference(self, message: AxisArray):
269
+ self.processor.push_reference(message)
270
+
271
+ @ez.publisher(OUTPUT_SIGNAL)
272
+ async def gen_resampled(self):
273
+ while True:
274
+ result: AxisArray = next(self.processor)
275
+ if np.prod(result.data.shape) > 0:
276
+ yield self.OUTPUT_SIGNAL, result
277
+ else:
278
+ await asyncio.sleep(0.001)