ezmsg-sigproc 2.2.0__py3-none-any.whl → 2.4.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,470 @@
1
+ import collections
2
+ import math
3
+ import typing
4
+ import warnings
5
+
6
+ Array = typing.TypeVar("Array")
7
+ ArrayNamespace = typing.Any
8
+ DType = typing.Any
9
+ UpdateStrategy = typing.Literal["immediate", "threshold", "on_demand"]
10
+ OverflowStrategy = typing.Literal["grow", "raise", "drop", "warn-overwrite"]
11
+
12
+
13
+ class HybridBuffer:
14
+ """A stateful, FIFO buffer that combines a deque for fast appends with a
15
+ contiguous circular buffer for efficient, advancing reads.
16
+
17
+ This buffer is designed to be agnostic to the array library used (e.g., NumPy,
18
+ CuPy, PyTorch) via the Python Array API standard.
19
+
20
+ Args:
21
+ array_namespace: The array library (e.g., numpy, cupy) that conforms to the Array API.
22
+ capacity: The current maximum number of samples to store in the circular buffer.
23
+ other_shape: A tuple defining the shape of the non-sample dimensions.
24
+ dtype: The data type of the samples, belonging to the provided array_namespace.
25
+ update_strategy: The strategy for synchronizing the deque to the circular buffer (flushing).
26
+ threshold: The number of samples to accumulate in the deque before flushing.
27
+ Ignored if update_strategy is "immediate" or "on_demand".
28
+ overflow_strategy: The strategy for handling overflow when the buffer is full.
29
+ Options are "grow", "raise", "drop", or "warn-overwrite". If "grow" (default), the buffer will
30
+ increase its capacity to accommodate new samples up to max_size. If "raise", an error will be
31
+ raised when the buffer is full. If "drop", the overflowing samples will be ignored.
32
+ If "warn-overwrite", a warning will be logged then the overflowing samples will
33
+ overwrite previously-unread samples.
34
+ max_size: The maximum size of the buffer in bytes.
35
+ If the buffer exceeds this size, it will raise an error.
36
+ warn_once: If True, will only warn once on overflow when using "warn-overwrite" strategy.
37
+ """
38
+
39
+ def __init__(
40
+ self,
41
+ array_namespace: ArrayNamespace,
42
+ capacity: int,
43
+ other_shape: tuple[int, ...],
44
+ dtype: DType,
45
+ update_strategy: UpdateStrategy = "on_demand",
46
+ threshold: int = 0,
47
+ overflow_strategy: OverflowStrategy = "grow",
48
+ max_size: int = 1024**3, # 1 GB default max size
49
+ warn_once: bool = True,
50
+ ):
51
+ self.xp = array_namespace
52
+ self._capacity = capacity
53
+ self._deque = collections.deque()
54
+ self._update_strategy = update_strategy
55
+ self._threshold = threshold
56
+ self._overflow_strategy = overflow_strategy
57
+ self._max_size = max_size
58
+ self._warn_once = warn_once
59
+
60
+ self._buffer = self.xp.empty((capacity, *other_shape), dtype=dtype)
61
+ self._head = 0 # Write pointer
62
+ self._tail = 0 # Read pointer
63
+ self._buff_unread = 0 # Number of unread samples in the circular buffer
64
+ self._buff_read = 0 # Tracks samples read and still in buffer
65
+ self._deque_len = 0 # Number of unread samples in the deque
66
+ self._last_overflow = (
67
+ 0 # Tracks the last overflow count, overwritten or skipped
68
+ )
69
+ self._warned = False # Tracks if we've warned already (for warn_once)
70
+
71
+ @property
72
+ def capacity(self) -> int:
73
+ """The maximum number of samples that can be stored in the buffer."""
74
+ return self._capacity
75
+
76
+ def available(self) -> int:
77
+ """The total number of unread samples available (in buffer and deque)."""
78
+ return self._buff_unread + self._deque_len
79
+
80
+ def is_empty(self) -> bool:
81
+ """Returns True if there are no unread samples in the buffer or deque."""
82
+ return self.available() == 0
83
+
84
+ def is_full(self) -> bool:
85
+ """Returns True if the buffer is full and cannot _flush_ more samples without overwriting."""
86
+ return self._buff_unread == self._capacity
87
+
88
+ def tell(self) -> int:
89
+ """Returns the number of samples that have been read and are still in the buffer."""
90
+ return self._buff_read
91
+
92
+ def write(self, block: Array):
93
+ """Appends a new block (an array of samples) to the internal deque."""
94
+ other_shape = self._buffer.shape[1:]
95
+ if other_shape == (1,) and block.ndim == 1:
96
+ block = block[:, self.xp.newaxis]
97
+
98
+ if block.shape[1:] != other_shape:
99
+ raise ValueError(
100
+ f"Block shape {block.shape[1:]} does not match buffer's other_shape {other_shape}"
101
+ )
102
+
103
+ # Most overflow strategies are handled during flush, but there are a couple
104
+ # scenarios that can be evaluated on write to give immediate feedback.
105
+ new_len = self._deque_len + block.shape[0]
106
+ if new_len > self._capacity and self._overflow_strategy == "raise":
107
+ raise OverflowError(
108
+ f"Buffer overflow: {new_len} samples awaiting in deque exceeds buffer capacity {self._capacity}."
109
+ )
110
+ elif new_len * block.dtype.itemsize > self._max_size:
111
+ raise OverflowError(
112
+ f"deque contents would exceed max_size ({self._max_size}) on subsequent flush."
113
+ "Are you reading samples frequently enough?"
114
+ )
115
+
116
+ self._deque.append(block)
117
+ self._deque_len += block.shape[0]
118
+
119
+ if self._update_strategy == "immediate" or (
120
+ self._update_strategy == "threshold"
121
+ and (0 < self._threshold <= self._deque_len)
122
+ ):
123
+ self.flush()
124
+
125
+ def _estimate_overflow(self, n_samples: int) -> int:
126
+ """
127
+ Estimates the number of samples that would overflow we requested n_samples
128
+ from the buffer.
129
+ """
130
+ if n_samples > self.available():
131
+ raise ValueError(
132
+ f"Requested {n_samples} samples, but only {self.available()} are available."
133
+ )
134
+ n_overflow = 0
135
+ if self._deque and (n_samples > self._buff_unread):
136
+ # We would cause a flush, but would that cause an overflow?
137
+ n_free = self._capacity - self._buff_unread
138
+ n_overflow = max(0, self._deque_len - n_free)
139
+ return n_overflow
140
+
141
+ def read(
142
+ self,
143
+ n_samples: int | None = None,
144
+ ) -> Array:
145
+ """
146
+ Retrieves the oldest unread samples from the buffer with padding
147
+ and advances the read head.
148
+
149
+ Args:
150
+ n_samples: The number of samples to retrieve. If None, returns all
151
+ unread samples.
152
+
153
+ Returns:
154
+ An array containing the requested samples. This may be a view or a copy.
155
+ Note: The result may have more samples than the buffer.capacity as it
156
+ may include samples from the deque in the output.
157
+ """
158
+ n_samples = n_samples if n_samples is not None else self.available()
159
+ data = None
160
+ offset = 0
161
+ n_overflow = self._estimate_overflow(n_samples)
162
+ if n_overflow > 0:
163
+ first_read = self._buff_unread
164
+ if (n_overflow - first_read) < self.capacity or (
165
+ self._overflow_strategy == "drop"
166
+ ):
167
+ # We can prevent the overflow (or at least *some* if using "drop"
168
+ # strategy) by reading the samples in the buffer first to make room.
169
+ data = self.xp.empty(
170
+ (n_samples, *self._buffer.shape[1:]), dtype=self._buffer.dtype
171
+ )
172
+ self.peek(first_read, out=data[:first_read])
173
+ offset += first_read
174
+ self.seek(first_read)
175
+ n_samples -= first_read
176
+ if data is None:
177
+ data = self.peek(n_samples)
178
+ self.seek(data.shape[0])
179
+ else:
180
+ d2 = self.peek(n_samples, out=data[offset:])
181
+ self.seek(d2.shape[0])
182
+
183
+ return data
184
+
185
+ def peek(self, n_samples: int | None = None, out: Array | None = None) -> Array:
186
+ """
187
+ Retrieves the oldest unread samples from the buffer with padding without
188
+ advancing the read head.
189
+
190
+ Args:
191
+ n_samples: The number of samples to retrieve. If None, returns all
192
+ unread samples.
193
+ out: Optionally, a destination array to store the samples.
194
+ If provided, must have shape (n_samples, *other_shape) where
195
+ other_shape matches the shape of the samples in the buffer.
196
+ If `out` is provided then the data will always be copied into it,
197
+ even if they are contiguous in the buffer.
198
+
199
+ Returns:
200
+ An array containing the requested samples. This may be a view or a copy.
201
+ Note: The result may have more samples than the buffer.capacity as it
202
+ may include samples from the deque in the output.
203
+ """
204
+ if n_samples is None:
205
+ n_samples = self.available()
206
+ elif n_samples > self.available():
207
+ raise ValueError(
208
+ f"Requested to peek {n_samples} samples, but only {self.available()} are available."
209
+ )
210
+ if out is not None and out.shape[0] < n_samples:
211
+ raise ValueError(
212
+ f"Output array shape {out.shape} is smaller than requested {n_samples} samples."
213
+ )
214
+
215
+ if n_samples == 0:
216
+ return self._buffer[:0]
217
+
218
+ self._flush_if_needed(n_samples=n_samples)
219
+
220
+ if self._tail + n_samples > self._capacity:
221
+ # discontiguous read (wraps around)
222
+ part1_len = self._capacity - self._tail
223
+ part2_len = n_samples - part1_len
224
+ out = (
225
+ out
226
+ if out is not None
227
+ else self.xp.empty(
228
+ (n_samples, *self._buffer.shape[1:]), dtype=self._buffer.dtype
229
+ )
230
+ )
231
+ out[:part1_len] = self._buffer[self._tail :]
232
+ out[part1_len:] = self._buffer[:part2_len]
233
+ else:
234
+ if out is not None:
235
+ out[:] = self._buffer[self._tail : self._tail + n_samples]
236
+ else:
237
+ # No output array provided, just return a view
238
+ out = self._buffer[self._tail : self._tail + n_samples]
239
+
240
+ return out
241
+
242
+ def peek_at(self, idx: int, allow_flush: bool = False) -> Array:
243
+ """
244
+ Retrieves a specific sample from the buffer without advancing the read head.
245
+
246
+ Args:
247
+ idx: The index of the sample to retrieve, relative to the read head.
248
+ allow_flush: If True, allows flushing the deque to the buffer if the
249
+ requested sample is not in the buffer. If False and the sample is
250
+ in the deque, the sample will be retrieved from the deque (slow!).
251
+
252
+ Returns:
253
+ An array containing the requested sample. This may be a view or a copy.
254
+ """
255
+ if idx < 0 or idx >= self.available():
256
+ raise IndexError(f"Index {idx} out of bounds for unread samples.")
257
+
258
+ if not allow_flush and idx >= self._buff_unread:
259
+ # The requested sample is in the deque.
260
+ idx -= self._buff_unread
261
+ deq_splits = self.xp.cumsum(
262
+ [0] + [_.shape[0] for _ in self._deque], dtype=int
263
+ )
264
+ arr_idx = self.xp.searchsorted(deq_splits, idx, side="right") - 1
265
+ idx -= deq_splits[arr_idx]
266
+ return self._deque[arr_idx][idx : idx + 1]
267
+
268
+ self._flush_if_needed(n_samples=idx + 1)
269
+
270
+ # The requested sample is within the unread samples in the buffer.
271
+ idx = (self._tail + idx) % self._capacity
272
+ return self._buffer[idx : idx + 1]
273
+
274
+ def peek_last(self) -> Array:
275
+ """
276
+ Retrieves the last sample in the buffer without advancing the read head.
277
+ """
278
+ if self._deque:
279
+ return self._deque[-1][-1:]
280
+ elif self._buff_unread > 0:
281
+ idx = (self._head - 1 + self._capacity) % self._capacity
282
+ return self._buffer[idx : idx + 1]
283
+ else:
284
+ raise IndexError("Cannot peek last from an empty buffer.")
285
+
286
+ def seek(self, n_samples: int) -> int:
287
+ """
288
+ Advances the read head by n_samples.
289
+
290
+ Args:
291
+ n_samples: The number of samples to seek.
292
+ Will seek forward if positive or backward if negative.
293
+
294
+ Returns:
295
+ The number of samples actually skipped.
296
+ """
297
+ self._flush_if_needed(n_samples=n_samples)
298
+
299
+ n_to_seek = max(min(n_samples, self._buff_unread), -self._buff_read)
300
+
301
+ if n_to_seek == 0:
302
+ return 0
303
+
304
+ self._tail = (self._tail + n_to_seek) % self._capacity
305
+ self._buff_unread -= n_to_seek
306
+ self._buff_read += n_to_seek
307
+
308
+ return n_to_seek
309
+
310
+ def _flush_if_needed(self, n_samples: int | None = None):
311
+ if (
312
+ self._update_strategy == "on_demand"
313
+ and self._deque
314
+ and (n_samples is None or n_samples > self._buff_unread)
315
+ ):
316
+ self.flush()
317
+
318
+ def flush(self):
319
+ """
320
+ Transfers all data from the deque to the circular buffer.
321
+ Note: This may overwrite data depending on the overflow strategy,
322
+ which will invalidate previous state variables.
323
+ """
324
+ if not self._deque:
325
+ return
326
+
327
+ n_new = self._deque_len
328
+ n_free = self._capacity - self._buff_unread
329
+ n_overflow = max(0, n_new - n_free)
330
+
331
+ # If new data is larger than buffer and overflow strategy is "warn-overwrite",
332
+ # then we can take a shortcut and replace the entire buffer.
333
+ if n_new >= self._capacity and self._overflow_strategy == "warn-overwrite":
334
+ if n_overflow > 0 and (not self._warn_once or not self._warned):
335
+ self._warned = True
336
+ warnings.warn(
337
+ f"Buffer overflow: {n_new} samples received, but only {self._capacity - self._buff_unread} available. "
338
+ f"Overwriting {n_overflow} previous samples.",
339
+ RuntimeWarning,
340
+ )
341
+
342
+ # We need to grab the last `self._capacity` samples from the deque
343
+ samples_to_copy = self._capacity
344
+ copied_samples = 0
345
+ for block in reversed(self._deque):
346
+ if copied_samples >= samples_to_copy:
347
+ break
348
+ n_to_copy = min(block.shape[0], samples_to_copy - copied_samples)
349
+ start_idx = block.shape[0] - n_to_copy
350
+ self._buffer[
351
+ samples_to_copy - copied_samples - n_to_copy : samples_to_copy
352
+ - copied_samples
353
+ ] = block[start_idx:]
354
+ copied_samples += n_to_copy
355
+
356
+ self._head = 0
357
+ self._tail = 0
358
+ self._buff_unread = self._capacity
359
+ self._buff_read = 0
360
+ self._last_overflow = n_overflow
361
+
362
+ else:
363
+ if n_overflow > 0:
364
+ if self._overflow_strategy == "raise":
365
+ raise OverflowError(
366
+ f"Buffer overflow: {n_new} samples received, but only {n_free} available."
367
+ )
368
+ elif self._overflow_strategy == "warn-overwrite":
369
+ if not self._warn_once or not self._warned:
370
+ self._warned = True
371
+ warnings.warn(
372
+ f"Buffer overflow: {n_new} samples received, but only {n_free} available. "
373
+ f"Overwriting {n_overflow} previous samples.",
374
+ RuntimeWarning,
375
+ )
376
+ # Move the tail forward to make room for the new data.
377
+ self.seek(n_overflow)
378
+ # Adjust the read pointer to account for the overflow. Should always be 0.
379
+ self._buff_read = max(0, self._buff_read - n_overflow)
380
+ self._last_overflow = n_overflow
381
+ elif self._overflow_strategy == "drop":
382
+ # Drop the overflow samples from the deque
383
+ samples_to_drop = n_overflow
384
+ while samples_to_drop > 0 and self._deque:
385
+ block = self._deque[-1]
386
+ if samples_to_drop >= block.shape[0]:
387
+ samples_to_drop -= block.shape[0]
388
+ self._deque.pop()
389
+ else:
390
+ block = self._deque.pop()
391
+ self._deque.append(block[:-samples_to_drop])
392
+ samples_to_drop = 0
393
+ n_new -= n_overflow
394
+ self._last_overflow = n_overflow
395
+
396
+ elif self._overflow_strategy == "grow":
397
+ self._grow_buffer(self._capacity + n_new)
398
+ self._last_overflow = 0
399
+
400
+ # Copy data to buffer by iterating over the deque
401
+ for block in self._deque:
402
+ n_block = block.shape[0]
403
+ space_til_end = self._capacity - self._head
404
+ if n_block > space_til_end:
405
+ # Two-part copy (wraps around)
406
+ part1_len = space_til_end
407
+ part2_len = n_block - part1_len
408
+ self._buffer[self._head :] = block[:part1_len]
409
+ self._buffer[:part2_len] = block[part1_len:]
410
+ else:
411
+ # Single-part copy
412
+ self._buffer[self._head : self._head + n_block] = block
413
+ self._head = (self._head + n_block) % self._capacity
414
+
415
+ self._buff_unread += n_new
416
+ if (self._buff_read > self._tail) or (self._tail > self._head):
417
+ # We have wrapped around the buffer; our count of read samples
418
+ # is simply the buffer capacity minus the count of unread samples.
419
+ self._buff_read = self._capacity - self._buff_unread
420
+
421
+ self._deque.clear()
422
+ self._deque_len = 0
423
+
424
+ def _grow_buffer(self, min_capacity: int):
425
+ """
426
+ Grows the buffer to at least min_capacity.
427
+ This is a helper method for the overflow strategy "grow".
428
+ """
429
+ if self._capacity >= min_capacity:
430
+ return
431
+
432
+ other_shape = self._buffer.shape[1:]
433
+ max_capacity = self._max_size / (
434
+ self._buffer.dtype.itemsize * math.prod(other_shape)
435
+ )
436
+ if min_capacity > max_capacity:
437
+ raise OverflowError(
438
+ f"Cannot grow buffer to {min_capacity} samples, "
439
+ f"maximum capacity is {max_capacity} samples ({self._max_size} bytes)."
440
+ )
441
+
442
+ new_capacity = min(max_capacity, max(self._capacity * 2, min_capacity))
443
+ new_buffer = self.xp.empty(
444
+ (new_capacity, *other_shape), dtype=self._buffer.dtype
445
+ )
446
+
447
+ # Copy existing data to new buffer
448
+ total_samples = self._buff_read + self._buff_unread
449
+ if total_samples > 0:
450
+ start_idx = (self._tail - self._buff_read) % self._capacity
451
+ stop_idx = (self._tail + self._buff_unread) % self._capacity
452
+ if stop_idx > start_idx:
453
+ # Data is contiguous
454
+ new_buffer[:total_samples] = self._buffer[start_idx:stop_idx]
455
+ else:
456
+ # Data wraps around. We write it in 2 parts.
457
+ part1_len = self._capacity - start_idx
458
+ part2_len = stop_idx
459
+ new_buffer[:part1_len] = self._buffer[start_idx:]
460
+ new_buffer[part1_len : part1_len + part2_len] = self._buffer[:stop_idx]
461
+ # self._buff_read stays the same
462
+ self._tail = self._buff_read
463
+ # self._buff_unread stays the same
464
+ self._head = self._tail + self._buff_unread
465
+ else:
466
+ self._tail = 0
467
+ self._head = 0
468
+
469
+ self._buffer = new_buffer
470
+ self._capacity = new_capacity
ezmsg/sigproc/window.py CHANGED
@@ -209,13 +209,15 @@ class WindowTransformer(
209
209
  )
210
210
 
211
211
  # Create a vector of buffer timestamps to track axis `offset` in output(s)
212
- buffer_tvec = xp.asarray(range(self._state.buffer.shape[axis_idx]), dtype=float)
212
+ buffer_t0 = 0.0
213
+ buffer_tlen = self._state.buffer.shape[axis_idx]
213
214
 
214
215
  # Adjust so first _new_ sample at index 0.
215
- buffer_tvec -= buffer_tvec[-message.data.shape[axis_idx]]
216
+ buffer_t0 -= self._state.buffer.shape[axis_idx] - message.data.shape[axis_idx]
217
+
216
218
  # Convert form indices to 'units' (probably seconds).
217
- buffer_tvec *= axis_info.gain
218
- buffer_tvec += axis_info.offset
219
+ buffer_t0 *= axis_info.gain
220
+ buffer_t0 += axis_info.offset
219
221
 
220
222
  if self.settings.window_shift is not None and self._state.shift_deficit > 0:
221
223
  n_skip = min(self._state.buffer.shape[axis_idx], self._state.shift_deficit)
@@ -223,7 +225,8 @@ class WindowTransformer(
223
225
  self._state.buffer = slice_along_axis(
224
226
  self._state.buffer, slice(n_skip, None), axis_idx
225
227
  )
226
- buffer_tvec = buffer_tvec[n_skip:]
228
+ buffer_t0 += n_skip * axis_info.gain
229
+ buffer_tlen -= n_skip
227
230
  self._state.shift_deficit -= n_skip
228
231
 
229
232
  # Generate outputs.
@@ -250,7 +253,9 @@ class WindowTransformer(
250
253
  + (1,)
251
254
  + self._state.buffer.shape[axis_idx:]
252
255
  )
253
- win_offset = buffer_tvec[-self._state.window_samples]
256
+ win_offset = buffer_t0 + axis_info.gain * (
257
+ buffer_tlen - self._state.window_samples
258
+ )
254
259
  elif self._state.buffer.shape[axis_idx] >= self._state.window_samples:
255
260
  # Deterministic window shifts.
256
261
  sliding_win_fun = (
@@ -264,10 +269,7 @@ class WindowTransformer(
264
269
  axis_idx,
265
270
  step=self._state.window_shift_samples,
266
271
  )
267
- offset_view = sliding_win_fun(buffer_tvec, self._state.window_samples, 0)[
268
- :: self._state.window_shift_samples
269
- ]
270
- win_offset = offset_view[0, 0]
272
+ win_offset = buffer_t0
271
273
 
272
274
  # Drop expired beginning of buffer and update shift_deficit
273
275
  multi_shift = self._state.window_shift_samples * out_dat.shape[axis_idx]
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: ezmsg-sigproc
3
- Version: 2.2.0
3
+ Version: 2.4.0
4
4
  Summary: Timeseries signal processing implementations in ezmsg
5
5
  Author-email: Griffin Milsap <griffin.milsap@gmail.com>, Preston Peranich <pperanich@gmail.com>, Chadwick Boulay <chadwick.boulay@gmail.com>
6
6
  License-Expression: MIT
@@ -1,28 +1,33 @@
1
1
  ezmsg/sigproc/__init__.py,sha256=8K4IcOA3-pfzadoM6s2Sfg5460KlJUocGgyTJTJl96U,52
2
- ezmsg/sigproc/__version__.py,sha256=wdFcRBeaWRZXknL-E8RTk9hV9M-OMto6dfJ90sc1i9A,511
2
+ ezmsg/sigproc/__version__.py,sha256=69_lBCO99qjONN2phoPwQ0THjdLm1VftCSvDKqELbGk,704
3
3
  ezmsg/sigproc/activation.py,sha256=qWAhpbFBxSoqbGy4P9JKE5LY-5v8rQI1U81OvNxBG2Y,2820
4
4
  ezmsg/sigproc/adaptive_lattice_notch.py,sha256=3M65PrZpdgBlQtE7Ph4Gu2ISIyWw4j8Xxhm5PpSkLFw,9102
5
5
  ezmsg/sigproc/affinetransform.py,sha256=WU495KoDKZfHPS3Dumh65rgf639koNlfDIx_torIByg,8662
6
- ezmsg/sigproc/aggregate.py,sha256=KR3u9D9jx9KcOQlvI10I6krSxbZCIerG2i4u5Wu5qMI,6754
6
+ ezmsg/sigproc/aggregate.py,sha256=wHUP_aS9NgnOxBCPN1_tSxCqMMb8UPBEoKwGKX7-ASk,9199
7
7
  ezmsg/sigproc/bandpower.py,sha256=j-Y6iWjD2xkggfi-4HAFJVBPJHHBGvAZy1uM4murZkQ,2319
8
8
  ezmsg/sigproc/base.py,sha256=PQr03O2P1v9LzcSR0GJLvPpBCLtnmGaz76gUeXphcH4,48753
9
9
  ezmsg/sigproc/butterworthfilter.py,sha256=7ZP4CRsXBt3-5dzyUjD45vc0J3Fhpm4CLrk-ps28jhc,5305
10
10
  ezmsg/sigproc/cheby.py,sha256=-aSauAwxJmmSSiRaw5qGY9rvYFOmk1bZlS4gGrS0jls,3737
11
11
  ezmsg/sigproc/combfilter.py,sha256=5UCfzGESpS5LSx6rxZv8_n25ZUvOOmws-mM_gpTZNhU,4777
12
12
  ezmsg/sigproc/decimate.py,sha256=Lz46fBllWagu17QeQzgklm6GWCV-zPysiydiby2IElU,2347
13
+ ezmsg/sigproc/denormalize.py,sha256=qMXkxpNoEACHzEfluA0wV4716HQyGE_1tcFAa8uzhIc,3091
13
14
  ezmsg/sigproc/detrend.py,sha256=7bpjFKdk2b6FdVn2GEtMbWtCuk7ToeiYKEBHVbN4Gd0,903
14
15
  ezmsg/sigproc/diff.py,sha256=P5BBjR7KdaCL9aD3GG09cmC7a-3cxDeEUw4nKdQ1HY8,2895
15
16
  ezmsg/sigproc/downsample.py,sha256=0X6EwPZ_XTwA2-nx5w-2HmMZUEDFuGAYF5EmPSuuVj8,3721
16
17
  ezmsg/sigproc/ewma.py,sha256=W_VS2MxiO1J7z2XS6rtnLnCEXxdRPQbMKtZduBwqTEQ,6369
17
18
  ezmsg/sigproc/ewmfilter.py,sha256=EPlocRdKORj575VV1YUzcNsVcq-pYgdEJ7_m9WfpVnY,4795
18
19
  ezmsg/sigproc/extract_axis.py,sha256=Gl8Hl_Ho2pPzchPjfseVHVRAqxj6eOvUQZlzfYRA7eI,1603
19
- ezmsg/sigproc/filter.py,sha256=SfptCJFVxYL4sTNoMCsn2NYr66bo6ea1w9PqzWZmUBY,11299
20
+ ezmsg/sigproc/fbcca.py,sha256=8NTJAOpHIvNFwQepui2_ZaJV4SMDFgXrqoWJyiQdF5U,12362
21
+ ezmsg/sigproc/filter.py,sha256=1MQUZDFIf6HAHuuhGQEvH4Yd6Jv_vv12PM25YaHjdxc,11921
20
22
  ezmsg/sigproc/filterbank.py,sha256=pJzv_G6chgWa1ARmRjMAMgt9eEGnA-ZbMSge4EWrcYY,13633
23
+ ezmsg/sigproc/filterbankdesign.py,sha256=OfIXM0ushSqbdSQG9DZB1Mh57d-lqdJQX8aqfxNN67E,4734
24
+ ezmsg/sigproc/firfilter.py,sha256=MCrwY3DLq-uMLX04JswVB9oHBSYJGbdUiQYW6eRdkxE,3805
21
25
  ezmsg/sigproc/gaussiansmoothing.py,sha256=NaVezgNwdvp-kam1I_7lSID4Obi0UCxZshH7A2afaVg,2692
26
+ ezmsg/sigproc/kaiser.py,sha256=WsZB8a4DP7WwrYLlGczHS61L86TiH6qEStAB6zxODhY,3502
22
27
  ezmsg/sigproc/messages.py,sha256=y_twVPK7TxRj8ajmuSuBuxwvLTgyv9OF7Y7v9bw1tfs,926
23
28
  ezmsg/sigproc/quantize.py,sha256=VzaqE6PatibEjkk7XrGO-ubAXYurAed9FYOn4bcQZQk,2193
24
- ezmsg/sigproc/resample.py,sha256=XQzEbUq44qTx5tXX2QXd14hkMb7C3LXT3CqbC161X1M,11600
25
- ezmsg/sigproc/sampler.py,sha256=qrw-7US3mqrGS7lOio7P_za0MSPgBhSxinIrMf1P3Os,11026
29
+ ezmsg/sigproc/resample.py,sha256=wqSM7g3QrcrklCeGVNN4l_qZLSXRUPHXCUxl1L47300,11654
30
+ ezmsg/sigproc/sampler.py,sha256=D5oMIZHAJS6XIKMdOHsDw97d4ZxfNP7iZwpc6J8Jmpk,10898
26
31
  ezmsg/sigproc/scaler.py,sha256=fCLHvCNUSgv0XChf8iS9s5uHCSCVjCasM2TCvyG5BwQ,4111
27
32
  ezmsg/sigproc/signalinjector.py,sha256=hGC837JyDLtAGrfsdMwzEoOqWXiwP7r7sGlUC9nahTY,2948
28
33
  ezmsg/sigproc/slicer.py,sha256=QKiq8wOTXf3kwWSCiZEGn9rA9HaM_q6PbXvtfpgjTXw,5417
@@ -32,7 +37,7 @@ ezmsg/sigproc/spectrum.py,sha256=xTSP8QFCG9M3NHveFkcks_wI-RzD7kM_fR1dmaLtiEQ,973
32
37
  ezmsg/sigproc/synth.py,sha256=DdE9yEXGrDRb745cOgKNpY2frI5uM2VHmCsaZO-UkBk,24547
33
38
  ezmsg/sigproc/transpose.py,sha256=AIwz8X2AS7Tf1_aidND132uDuB04M4f3-0iRYq0ysC8,4530
34
39
  ezmsg/sigproc/wavelets.py,sha256=g9nYRF4oVov2uLC0tfzPOLjaQah_HhM0ckhQ4m23mms,7507
35
- ezmsg/sigproc/window.py,sha256=VAFqZsHu-J3hfBnbbUk9d7MIsbbPIgrqg3OSz7uhl_o,16242
40
+ ezmsg/sigproc/window.py,sha256=DjfD-4zd_phUbJbGTGijAm4H7O-bNVPdyCrSfvXJ_HQ,16192
36
41
  ezmsg/sigproc/math/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
37
42
  ezmsg/sigproc/math/abs.py,sha256=9E0A-p_Qa1SVzqbr1sesjgpu6-XGUZkcRJVK5jcNc0U,685
38
43
  ezmsg/sigproc/math/clip.py,sha256=bBGfy45CKsUIhrznYZdpgUa0Lz7GsOuFl87IeL0qNA8,1057
@@ -42,11 +47,13 @@ ezmsg/sigproc/math/log.py,sha256=bx0om3Qi3ZShExEZ-IH5Xrg3XFjNEmjVygWlXWjyrv8,147
42
47
  ezmsg/sigproc/math/scale.py,sha256=kMQRPYnm1o_9lC1EtIkoZOWaAWOWWbeT4ri1q7Hs7Fc,898
43
48
  ezmsg/sigproc/util/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
44
49
  ezmsg/sigproc/util/asio.py,sha256=PQew73hB1oRmp7pfTqx-c4uo1zqgjxvZcTZCROQrEP4,5270
50
+ ezmsg/sigproc/util/axisarray_buffer.py,sha256=NEGOYxf1TN8_QQ6Vo-iIf_ZuxlqB-0v0_mgGAqJzlwM,14203
51
+ ezmsg/sigproc/util/buffer.py,sha256=wbTyj0p-Ib17rJ_ooI7x0VT_f8nhn9i3fpIuDdJQnic,19899
45
52
  ezmsg/sigproc/util/message.py,sha256=l_b1b6bXX8N6VF9RbUELzsHs73cKkDURBdIr0lt3CY0,909
46
53
  ezmsg/sigproc/util/profile.py,sha256=KNJ_QkKelQHNEp2C8MhqzdhYydMNULc_NQq3ccMfzIk,5775
47
54
  ezmsg/sigproc/util/sparse.py,sha256=mE64p1tYb5A1shaRE1D-VnH-RshbLb8g8kXSXxnA-J4,4842
48
55
  ezmsg/sigproc/util/typeresolution.py,sha256=5R7xmG-F4CkdqQ5aoQnqM-htQb-VwAJl58jJgxtClys,3146
49
- ezmsg_sigproc-2.2.0.dist-info/METADATA,sha256=KfMyPnQipTaX6puyJxEaKu-JDT0tvbxLnhwKeEY_kKw,4977
50
- ezmsg_sigproc-2.2.0.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
51
- ezmsg_sigproc-2.2.0.dist-info/licenses/LICENSE.txt,sha256=seu0tKhhAMPCUgc1XpXGGaCxY1YaYvFJwqFuQZAl2go,1100
52
- ezmsg_sigproc-2.2.0.dist-info/RECORD,,
56
+ ezmsg_sigproc-2.4.0.dist-info/METADATA,sha256=FcsrFuRHBBbdrHsdlVGJjU7hUGkX-ql3xYWGAPdkD1M,4977
57
+ ezmsg_sigproc-2.4.0.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
58
+ ezmsg_sigproc-2.4.0.dist-info/licenses/LICENSE.txt,sha256=seu0tKhhAMPCUgc1XpXGGaCxY1YaYvFJwqFuQZAl2go,1100
59
+ ezmsg_sigproc-2.4.0.dist-info/RECORD,,
@@ -1,4 +1,4 @@
1
1
  Wheel-Version: 1.0
2
- Generator: hatchling 1.27.0
2
+ Generator: hatchling 1.28.0
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any