ezmsg-sigproc 1.2.2__py3-none-any.whl → 2.10.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (69) hide show
  1. ezmsg/sigproc/__init__.py +1 -1
  2. ezmsg/sigproc/__version__.py +34 -1
  3. ezmsg/sigproc/activation.py +78 -0
  4. ezmsg/sigproc/adaptive_lattice_notch.py +212 -0
  5. ezmsg/sigproc/affinetransform.py +235 -0
  6. ezmsg/sigproc/aggregate.py +276 -0
  7. ezmsg/sigproc/bandpower.py +80 -0
  8. ezmsg/sigproc/base.py +149 -0
  9. ezmsg/sigproc/butterworthfilter.py +129 -39
  10. ezmsg/sigproc/butterworthzerophase.py +305 -0
  11. ezmsg/sigproc/cheby.py +125 -0
  12. ezmsg/sigproc/combfilter.py +160 -0
  13. ezmsg/sigproc/coordinatespaces.py +159 -0
  14. ezmsg/sigproc/decimate.py +46 -18
  15. ezmsg/sigproc/denormalize.py +78 -0
  16. ezmsg/sigproc/detrend.py +28 -0
  17. ezmsg/sigproc/diff.py +82 -0
  18. ezmsg/sigproc/downsample.py +97 -49
  19. ezmsg/sigproc/ewma.py +217 -0
  20. ezmsg/sigproc/ewmfilter.py +45 -19
  21. ezmsg/sigproc/extract_axis.py +39 -0
  22. ezmsg/sigproc/fbcca.py +307 -0
  23. ezmsg/sigproc/filter.py +282 -117
  24. ezmsg/sigproc/filterbank.py +292 -0
  25. ezmsg/sigproc/filterbankdesign.py +129 -0
  26. ezmsg/sigproc/fir_hilbert.py +336 -0
  27. ezmsg/sigproc/fir_pmc.py +209 -0
  28. ezmsg/sigproc/firfilter.py +117 -0
  29. ezmsg/sigproc/gaussiansmoothing.py +89 -0
  30. ezmsg/sigproc/kaiser.py +106 -0
  31. ezmsg/sigproc/linear.py +120 -0
  32. ezmsg/sigproc/math/__init__.py +0 -0
  33. ezmsg/sigproc/math/abs.py +35 -0
  34. ezmsg/sigproc/math/add.py +120 -0
  35. ezmsg/sigproc/math/clip.py +48 -0
  36. ezmsg/sigproc/math/difference.py +143 -0
  37. ezmsg/sigproc/math/invert.py +28 -0
  38. ezmsg/sigproc/math/log.py +57 -0
  39. ezmsg/sigproc/math/scale.py +39 -0
  40. ezmsg/sigproc/messages.py +3 -6
  41. ezmsg/sigproc/quantize.py +68 -0
  42. ezmsg/sigproc/resample.py +278 -0
  43. ezmsg/sigproc/rollingscaler.py +232 -0
  44. ezmsg/sigproc/sampler.py +232 -241
  45. ezmsg/sigproc/scaler.py +165 -0
  46. ezmsg/sigproc/signalinjector.py +70 -0
  47. ezmsg/sigproc/slicer.py +138 -0
  48. ezmsg/sigproc/spectral.py +6 -132
  49. ezmsg/sigproc/spectrogram.py +90 -0
  50. ezmsg/sigproc/spectrum.py +277 -0
  51. ezmsg/sigproc/transpose.py +134 -0
  52. ezmsg/sigproc/util/__init__.py +0 -0
  53. ezmsg/sigproc/util/asio.py +25 -0
  54. ezmsg/sigproc/util/axisarray_buffer.py +365 -0
  55. ezmsg/sigproc/util/buffer.py +449 -0
  56. ezmsg/sigproc/util/message.py +17 -0
  57. ezmsg/sigproc/util/profile.py +23 -0
  58. ezmsg/sigproc/util/sparse.py +115 -0
  59. ezmsg/sigproc/util/typeresolution.py +17 -0
  60. ezmsg/sigproc/wavelets.py +187 -0
  61. ezmsg/sigproc/window.py +301 -117
  62. ezmsg_sigproc-2.10.0.dist-info/METADATA +60 -0
  63. ezmsg_sigproc-2.10.0.dist-info/RECORD +65 -0
  64. {ezmsg_sigproc-1.2.2.dist-info → ezmsg_sigproc-2.10.0.dist-info}/WHEEL +1 -2
  65. ezmsg/sigproc/synth.py +0 -411
  66. ezmsg_sigproc-1.2.2.dist-info/METADATA +0 -36
  67. ezmsg_sigproc-1.2.2.dist-info/RECORD +0 -17
  68. ezmsg_sigproc-1.2.2.dist-info/top_level.txt +0 -1
  69. /ezmsg_sigproc-1.2.2.dist-info/LICENSE.txt → /ezmsg_sigproc-2.10.0.dist-info/licenses/LICENSE +0 -0
@@ -0,0 +1,449 @@
1
+ """A stateful, FIFO buffer that combines a deque for fast appends with a
2
+ contiguous circular buffer for efficient, advancing reads.
3
+ """
4
+
5
+ import collections
6
+ import math
7
+ import typing
8
+ import warnings
9
+
10
+ Array = typing.TypeVar("Array")
11
+ ArrayNamespace = typing.Any
12
+ DType = typing.Any
13
+ UpdateStrategy = typing.Literal["immediate", "threshold", "on_demand"]
14
+ OverflowStrategy = typing.Literal["grow", "raise", "drop", "warn-overwrite"]
15
+
16
+
17
+ class HybridBuffer:
18
+ """A stateful, FIFO buffer that combines a deque for fast appends with a
19
+ contiguous circular buffer for efficient, advancing reads.
20
+
21
+ This buffer is designed to be agnostic to the array library used (e.g., NumPy,
22
+ CuPy, PyTorch) via the Python Array API standard.
23
+
24
+ Args:
25
+ array_namespace: The array library (e.g., numpy, cupy) that conforms to the Array API.
26
+ capacity: The current maximum number of samples to store in the circular buffer.
27
+ other_shape: A tuple defining the shape of the non-sample dimensions.
28
+ dtype: The data type of the samples, belonging to the provided array_namespace.
29
+ update_strategy: The strategy for synchronizing the deque to the circular buffer (flushing).
30
+ threshold: The number of samples to accumulate in the deque before flushing.
31
+ Ignored if update_strategy is "immediate" or "on_demand".
32
+ overflow_strategy: The strategy for handling overflow when the buffer is full.
33
+ Options are "grow", "raise", "drop", or "warn-overwrite". If "grow" (default), the buffer will
34
+ increase its capacity to accommodate new samples up to max_size. If "raise", an error will be
35
+ raised when the buffer is full. If "drop", the overflowing samples will be ignored.
36
+ If "warn-overwrite", a warning will be logged then the overflowing samples will
37
+ overwrite previously-unread samples.
38
+ max_size: The maximum size of the buffer in bytes.
39
+ If the buffer exceeds this size, it will raise an error.
40
+ warn_once: If True, will only warn once on overflow when using "warn-overwrite" strategy.
41
+ """
42
+
43
+ def __init__(
44
+ self,
45
+ array_namespace: ArrayNamespace,
46
+ capacity: int,
47
+ other_shape: tuple[int, ...],
48
+ dtype: DType,
49
+ update_strategy: UpdateStrategy = "on_demand",
50
+ threshold: int = 0,
51
+ overflow_strategy: OverflowStrategy = "grow",
52
+ max_size: int = 1024**3, # 1 GB default max size
53
+ warn_once: bool = True,
54
+ ):
55
+ self.xp = array_namespace
56
+ self._capacity = capacity
57
+ self._deque = collections.deque()
58
+ self._update_strategy = update_strategy
59
+ self._threshold = threshold
60
+ self._overflow_strategy = overflow_strategy
61
+ self._max_size = max_size
62
+ self._warn_once = warn_once
63
+
64
+ self._buffer = self.xp.empty((capacity, *other_shape), dtype=dtype)
65
+ self._head = 0 # Write pointer
66
+ self._tail = 0 # Read pointer
67
+ self._buff_unread = 0 # Number of unread samples in the circular buffer
68
+ self._buff_read = 0 # Tracks samples read and still in buffer
69
+ self._deque_len = 0 # Number of unread samples in the deque
70
+ self._last_overflow = 0 # Tracks the last overflow count, overwritten or skipped
71
+ self._warned = False # Tracks if we've warned already (for warn_once)
72
+
73
+ @property
74
+ def capacity(self) -> int:
75
+ """The maximum number of samples that can be stored in the buffer."""
76
+ return self._capacity
77
+
78
+ def available(self) -> int:
79
+ """The total number of unread samples available (in buffer and deque)."""
80
+ return self._buff_unread + self._deque_len
81
+
82
+ def is_empty(self) -> bool:
83
+ """Returns True if there are no unread samples in the buffer or deque."""
84
+ return self.available() == 0
85
+
86
+ def is_full(self) -> bool:
87
+ """Returns True if the buffer is full and cannot _flush_ more samples without overwriting."""
88
+ return self._buff_unread == self._capacity
89
+
90
+ def tell(self) -> int:
91
+ """Returns the number of samples that have been read and are still in the buffer."""
92
+ return self._buff_read
93
+
94
+ def write(self, block: Array):
95
+ """Appends a new block (an array of samples) to the internal deque."""
96
+ other_shape = self._buffer.shape[1:]
97
+ if other_shape == (1,) and block.ndim == 1:
98
+ block = block[:, self.xp.newaxis]
99
+
100
+ if block.shape[1:] != other_shape:
101
+ raise ValueError(f"Block shape {block.shape[1:]} does not match buffer's other_shape {other_shape}")
102
+
103
+ # Most overflow strategies are handled during flush, but there are a couple
104
+ # scenarios that can be evaluated on write to give immediate feedback.
105
+ new_len = self._deque_len + block.shape[0]
106
+ if new_len > self._capacity and self._overflow_strategy == "raise":
107
+ raise OverflowError(
108
+ f"Buffer overflow: {new_len} samples awaiting in deque exceeds buffer capacity {self._capacity}."
109
+ )
110
+ elif new_len * block.dtype.itemsize > self._max_size:
111
+ raise OverflowError(
112
+ f"deque contents would exceed max_size ({self._max_size}) on subsequent flush."
113
+ "Are you reading samples frequently enough?"
114
+ )
115
+
116
+ self._deque.append(block)
117
+ self._deque_len += block.shape[0]
118
+
119
+ if self._update_strategy == "immediate" or (
120
+ self._update_strategy == "threshold" and (0 < self._threshold <= self._deque_len)
121
+ ):
122
+ self.flush()
123
+
124
+ def _estimate_overflow(self, n_samples: int) -> int:
125
+ """
126
+ Estimates the number of samples that would overflow we requested n_samples
127
+ from the buffer.
128
+ """
129
+ if n_samples > self.available():
130
+ raise ValueError(f"Requested {n_samples} samples, but only {self.available()} are available.")
131
+ n_overflow = 0
132
+ if self._deque and (n_samples > self._buff_unread):
133
+ # We would cause a flush, but would that cause an overflow?
134
+ n_free = self._capacity - self._buff_unread
135
+ n_overflow = max(0, self._deque_len - n_free)
136
+ return n_overflow
137
+
138
+ def read(
139
+ self,
140
+ n_samples: int | None = None,
141
+ ) -> Array:
142
+ """
143
+ Retrieves the oldest unread samples from the buffer with padding
144
+ and advances the read head.
145
+
146
+ Args:
147
+ n_samples: The number of samples to retrieve. If None, returns all
148
+ unread samples.
149
+
150
+ Returns:
151
+ An array containing the requested samples. This may be a view or a copy.
152
+ Note: The result may have more samples than the buffer.capacity as it
153
+ may include samples from the deque in the output.
154
+ """
155
+ n_samples = n_samples if n_samples is not None else self.available()
156
+ data = None
157
+ offset = 0
158
+ n_overflow = self._estimate_overflow(n_samples)
159
+ if n_overflow > 0:
160
+ first_read = self._buff_unread
161
+ if (n_overflow - first_read) < self.capacity or (self._overflow_strategy == "drop"):
162
+ # We can prevent the overflow (or at least *some* if using "drop"
163
+ # strategy) by reading the samples in the buffer first to make room.
164
+ data = self.xp.empty((n_samples, *self._buffer.shape[1:]), dtype=self._buffer.dtype)
165
+ self.peek(first_read, out=data[:first_read])
166
+ offset += first_read
167
+ self.seek(first_read)
168
+ n_samples -= first_read
169
+ if data is None:
170
+ data = self.peek(n_samples)
171
+ self.seek(data.shape[0])
172
+ else:
173
+ d2 = self.peek(n_samples, out=data[offset:])
174
+ self.seek(d2.shape[0])
175
+
176
+ return data
177
+
178
+ def peek(self, n_samples: int | None = None, out: Array | None = None) -> Array:
179
+ """
180
+ Retrieves the oldest unread samples from the buffer with padding without
181
+ advancing the read head.
182
+
183
+ Args:
184
+ n_samples: The number of samples to retrieve. If None, returns all
185
+ unread samples.
186
+ out: Optionally, a destination array to store the samples.
187
+ If provided, must have shape (n_samples, *other_shape) where
188
+ other_shape matches the shape of the samples in the buffer.
189
+ If `out` is provided then the data will always be copied into it,
190
+ even if they are contiguous in the buffer.
191
+
192
+ Returns:
193
+ An array containing the requested samples. This may be a view or a copy.
194
+ Note: The result may have more samples than the buffer.capacity as it
195
+ may include samples from the deque in the output.
196
+ """
197
+ if n_samples is None:
198
+ n_samples = self.available()
199
+ elif n_samples > self.available():
200
+ raise ValueError(f"Requested to peek {n_samples} samples, but only {self.available()} are available.")
201
+ if out is not None and out.shape[0] < n_samples:
202
+ raise ValueError(f"Output array shape {out.shape} is smaller than requested {n_samples} samples.")
203
+
204
+ if n_samples == 0:
205
+ return self._buffer[:0]
206
+
207
+ self._flush_if_needed(n_samples=n_samples)
208
+
209
+ if self._tail + n_samples > self._capacity:
210
+ # discontiguous read (wraps around)
211
+ part1_len = self._capacity - self._tail
212
+ part2_len = n_samples - part1_len
213
+ out = (
214
+ out
215
+ if out is not None
216
+ else self.xp.empty((n_samples, *self._buffer.shape[1:]), dtype=self._buffer.dtype)
217
+ )
218
+ out[:part1_len] = self._buffer[self._tail :]
219
+ out[part1_len:] = self._buffer[:part2_len]
220
+ else:
221
+ if out is not None:
222
+ out[:] = self._buffer[self._tail : self._tail + n_samples]
223
+ else:
224
+ # No output array provided, just return a view
225
+ out = self._buffer[self._tail : self._tail + n_samples]
226
+
227
+ return out
228
+
229
+ def peek_at(self, idx: int, allow_flush: bool = False) -> Array:
230
+ """
231
+ Retrieves a specific sample from the buffer without advancing the read head.
232
+
233
+ Args:
234
+ idx: The index of the sample to retrieve, relative to the read head.
235
+ allow_flush: If True, allows flushing the deque to the buffer if the
236
+ requested sample is not in the buffer. If False and the sample is
237
+ in the deque, the sample will be retrieved from the deque (slow!).
238
+
239
+ Returns:
240
+ An array containing the requested sample. This may be a view or a copy.
241
+ """
242
+ if idx < 0 or idx >= self.available():
243
+ raise IndexError(f"Index {idx} out of bounds for unread samples.")
244
+
245
+ if not allow_flush and idx >= self._buff_unread:
246
+ # The requested sample is in the deque.
247
+ idx -= self._buff_unread
248
+ deq_splits = self.xp.cumsum([0] + [_.shape[0] for _ in self._deque], dtype=int)
249
+ arr_idx = self.xp.searchsorted(deq_splits, idx, side="right") - 1
250
+ idx -= deq_splits[arr_idx]
251
+ return self._deque[arr_idx][idx : idx + 1]
252
+
253
+ self._flush_if_needed(n_samples=idx + 1)
254
+
255
+ # The requested sample is within the unread samples in the buffer.
256
+ idx = (self._tail + idx) % self._capacity
257
+ return self._buffer[idx : idx + 1]
258
+
259
+ def peek_last(self) -> Array:
260
+ """
261
+ Retrieves the last sample in the buffer without advancing the read head.
262
+ """
263
+ if self._deque:
264
+ return self._deque[-1][-1:]
265
+ elif self._buff_unread > 0:
266
+ idx = (self._head - 1 + self._capacity) % self._capacity
267
+ return self._buffer[idx : idx + 1]
268
+ else:
269
+ raise IndexError("Cannot peek last from an empty buffer.")
270
+
271
+ def seek(self, n_samples: int) -> int:
272
+ """
273
+ Advances the read head by n_samples.
274
+
275
+ Args:
276
+ n_samples: The number of samples to seek.
277
+ Will seek forward if positive or backward if negative.
278
+
279
+ Returns:
280
+ The number of samples actually skipped.
281
+ """
282
+ self._flush_if_needed(n_samples=n_samples)
283
+
284
+ n_to_seek = max(min(n_samples, self._buff_unread), -self._buff_read)
285
+
286
+ if n_to_seek == 0:
287
+ return 0
288
+
289
+ self._tail = (self._tail + n_to_seek) % self._capacity
290
+ self._buff_unread -= n_to_seek
291
+ self._buff_read += n_to_seek
292
+
293
+ return n_to_seek
294
+
295
+ def _flush_if_needed(self, n_samples: int | None = None):
296
+ if (
297
+ self._update_strategy == "on_demand"
298
+ and self._deque
299
+ and (n_samples is None or n_samples > self._buff_unread)
300
+ ):
301
+ self.flush()
302
+
303
+ def flush(self):
304
+ """
305
+ Transfers all data from the deque to the circular buffer.
306
+ Note: This may overwrite data depending on the overflow strategy,
307
+ which will invalidate previous state variables.
308
+ """
309
+ if not self._deque:
310
+ return
311
+
312
+ n_new = self._deque_len
313
+ n_free = self._capacity - self._buff_unread
314
+ n_overflow = max(0, n_new - n_free)
315
+
316
+ # If new data is larger than buffer and overflow strategy is "warn-overwrite",
317
+ # then we can take a shortcut and replace the entire buffer.
318
+ if n_new >= self._capacity and self._overflow_strategy == "warn-overwrite":
319
+ if n_overflow > 0 and (not self._warn_once or not self._warned):
320
+ self._warned = True
321
+ warnings.warn(
322
+ f"Buffer overflow: {n_new} samples received, "
323
+ f"but only {self._capacity - self._buff_unread} available. "
324
+ f"Overwriting {n_overflow} previous samples.",
325
+ RuntimeWarning,
326
+ )
327
+
328
+ # We need to grab the last `self._capacity` samples from the deque
329
+ samples_to_copy = self._capacity
330
+ copied_samples = 0
331
+ for block in reversed(self._deque):
332
+ if copied_samples >= samples_to_copy:
333
+ break
334
+ n_to_copy = min(block.shape[0], samples_to_copy - copied_samples)
335
+ start_idx = block.shape[0] - n_to_copy
336
+ self._buffer[samples_to_copy - copied_samples - n_to_copy : samples_to_copy - copied_samples] = block[
337
+ start_idx:
338
+ ]
339
+ copied_samples += n_to_copy
340
+
341
+ self._head = 0
342
+ self._tail = 0
343
+ self._buff_unread = self._capacity
344
+ self._buff_read = 0
345
+ self._last_overflow = n_overflow
346
+
347
+ else:
348
+ if n_overflow > 0:
349
+ if self._overflow_strategy == "raise":
350
+ raise OverflowError(f"Buffer overflow: {n_new} samples received, but only {n_free} available.")
351
+ elif self._overflow_strategy == "warn-overwrite":
352
+ if not self._warn_once or not self._warned:
353
+ self._warned = True
354
+ warnings.warn(
355
+ f"Buffer overflow: {n_new} samples received, but only {n_free} available. "
356
+ f"Overwriting {n_overflow} previous samples.",
357
+ RuntimeWarning,
358
+ )
359
+ # Move the tail forward to make room for the new data.
360
+ self.seek(n_overflow)
361
+ # Adjust the read pointer to account for the overflow. Should always be 0.
362
+ self._buff_read = max(0, self._buff_read - n_overflow)
363
+ self._last_overflow = n_overflow
364
+ elif self._overflow_strategy == "drop":
365
+ # Drop the overflow samples from the deque
366
+ samples_to_drop = n_overflow
367
+ while samples_to_drop > 0 and self._deque:
368
+ block = self._deque[-1]
369
+ if samples_to_drop >= block.shape[0]:
370
+ samples_to_drop -= block.shape[0]
371
+ self._deque.pop()
372
+ else:
373
+ block = self._deque.pop()
374
+ self._deque.append(block[:-samples_to_drop])
375
+ samples_to_drop = 0
376
+ n_new -= n_overflow
377
+ self._last_overflow = n_overflow
378
+
379
+ elif self._overflow_strategy == "grow":
380
+ self._grow_buffer(self._capacity + n_new)
381
+ self._last_overflow = 0
382
+
383
+ # Copy data to buffer by iterating over the deque
384
+ for block in self._deque:
385
+ n_block = block.shape[0]
386
+ space_til_end = self._capacity - self._head
387
+ if n_block > space_til_end:
388
+ # Two-part copy (wraps around)
389
+ part1_len = space_til_end
390
+ part2_len = n_block - part1_len
391
+ self._buffer[self._head :] = block[:part1_len]
392
+ self._buffer[:part2_len] = block[part1_len:]
393
+ else:
394
+ # Single-part copy
395
+ self._buffer[self._head : self._head + n_block] = block
396
+ self._head = (self._head + n_block) % self._capacity
397
+
398
+ self._buff_unread += n_new
399
+ if (self._buff_read > self._tail) or (self._tail > self._head):
400
+ # We have wrapped around the buffer; our count of read samples
401
+ # is simply the buffer capacity minus the count of unread samples.
402
+ self._buff_read = self._capacity - self._buff_unread
403
+
404
+ self._deque.clear()
405
+ self._deque_len = 0
406
+
407
+ def _grow_buffer(self, min_capacity: int):
408
+ """
409
+ Grows the buffer to at least min_capacity.
410
+ This is a helper method for the overflow strategy "grow".
411
+ """
412
+ if self._capacity >= min_capacity:
413
+ return
414
+
415
+ other_shape = self._buffer.shape[1:]
416
+ max_capacity = self._max_size / (self._buffer.dtype.itemsize * math.prod(other_shape))
417
+ if min_capacity > max_capacity:
418
+ raise OverflowError(
419
+ f"Cannot grow buffer to {min_capacity} samples, "
420
+ f"maximum capacity is {max_capacity} samples ({self._max_size} bytes)."
421
+ )
422
+
423
+ new_capacity = min(max_capacity, max(self._capacity * 2, min_capacity))
424
+ new_buffer = self.xp.empty((new_capacity, *other_shape), dtype=self._buffer.dtype)
425
+
426
+ # Copy existing data to new buffer
427
+ total_samples = self._buff_read + self._buff_unread
428
+ if total_samples > 0:
429
+ start_idx = (self._tail - self._buff_read) % self._capacity
430
+ stop_idx = (self._tail + self._buff_unread) % self._capacity
431
+ if stop_idx > start_idx:
432
+ # Data is contiguous
433
+ new_buffer[:total_samples] = self._buffer[start_idx:stop_idx]
434
+ else:
435
+ # Data wraps around. We write it in 2 parts.
436
+ part1_len = self._capacity - start_idx
437
+ part2_len = stop_idx
438
+ new_buffer[:part1_len] = self._buffer[start_idx:]
439
+ new_buffer[part1_len : part1_len + part2_len] = self._buffer[:stop_idx]
440
+ # self._buff_read stays the same
441
+ self._tail = self._buff_read
442
+ # self._buff_unread stays the same
443
+ self._head = self._tail + self._buff_unread
444
+ else:
445
+ self._tail = 0
446
+ self._head = 0
447
+
448
+ self._buffer = new_buffer
449
+ self._capacity = new_capacity
@@ -0,0 +1,17 @@
1
+ """
2
+ Backwards-compatible re-exports from ezmsg.baseproc.util.message.
3
+
4
+ New code should import directly from ezmsg.baseproc instead.
5
+ """
6
+
7
+ from ezmsg.baseproc.util.message import (
8
+ SampleMessage,
9
+ SampleTriggerMessage,
10
+ is_sample_message,
11
+ )
12
+
13
+ __all__ = [
14
+ "SampleMessage",
15
+ "SampleTriggerMessage",
16
+ "is_sample_message",
17
+ ]
@@ -0,0 +1,23 @@
1
+ """
2
+ Backwards-compatible re-exports from ezmsg.baseproc.util.profile.
3
+
4
+ New code should import directly from ezmsg.baseproc instead.
5
+ """
6
+
7
+ from ezmsg.baseproc.util.profile import (
8
+ HEADER,
9
+ _setup_logger,
10
+ get_logger_path,
11
+ logger,
12
+ profile_method,
13
+ profile_subpub,
14
+ )
15
+
16
+ __all__ = [
17
+ "HEADER",
18
+ "get_logger_path",
19
+ "logger",
20
+ "profile_method",
21
+ "profile_subpub",
22
+ "_setup_logger",
23
+ ]
@@ -0,0 +1,115 @@
1
+ """Methods for sparse array signal processing operations."""
2
+
3
+ import numpy as np
4
+ import sparse
5
+
6
+
7
+ def sliding_win_oneaxis_old(s: sparse.SparseArray, nwin: int, axis: int, step: int = 1) -> sparse.SparseArray:
8
+ """
9
+ Like `ezmsg.util.messages.axisarray.sliding_win_oneaxis` but for sparse arrays.
10
+ This approach is about 4x slower than the version that uses coordinate arithmetic below.
11
+
12
+ Args:
13
+ s: The input sparse array.
14
+ nwin: The size of the sliding window.
15
+ axis: The axis along which the sliding window will be applied.
16
+ step: The size of the step between windows. If > 1, the strided window will be sliced with `slice_along_axis`.
17
+
18
+ Returns:
19
+
20
+ """
21
+ if -s.ndim <= axis < 0:
22
+ axis = s.ndim + axis
23
+ targ_slices = [slice(_, _ + nwin) for _ in range(0, s.shape[axis] - nwin + 1, step)]
24
+ s = s.reshape(s.shape[:axis] + (1,) + s.shape[axis:])
25
+ full_slices = (slice(None),) * s.ndim
26
+ full_slices = [full_slices[: axis + 1] + (sl,) + full_slices[axis + 2 :] for sl in targ_slices]
27
+ result = sparse.concatenate([s[_] for _ in full_slices], axis=axis)
28
+ return result
29
+
30
+
31
+ def sliding_win_oneaxis(s: sparse.SparseArray, nwin: int, axis: int, step: int = 1) -> sparse.SparseArray:
32
+ """
33
+ Generates a view-like sparse array using a sliding window of specified length along a specified axis.
34
+ Sparse analog of an optimized dense as_strided-based implementation with these properties:
35
+
36
+ - Accepts a single `nwin` and a single `axis`.
37
+ - Inserts a new 'win' axis immediately BEFORE the original target axis.
38
+ Output shape:
39
+ s.shape[:axis] + (W,) + (nwin,) + s.shape[axis+1:]
40
+ where W = s.shape[axis] - (nwin - 1).
41
+ - If `step > 1`, stepping is applied by slicing along the new windows axis (same observable behavior
42
+ as doing `slice_along_axis(result, slice(None, None, step), axis)` in the dense version).
43
+
44
+ Args:
45
+ s: Input sparse array (pydata/sparse COO-compatible).
46
+ nwin: Sliding window size (must be > 0).
47
+ axis: Axis of `s` along which the window slides (supports negative indexing).
48
+ step: Stride between windows. If > 1, applied by slicing the windows axis after construction.
49
+
50
+ Returns:
51
+ A sparse array with a new windows axis inserted before the original axis.
52
+
53
+ Notes:
54
+ - Mirrors the dense function’s known edge case: when nwin == shape[axis] + 1, W becomes 0 and
55
+ an empty windows axis is returned.
56
+ - Built by coordinate arithmetic; no per-window indexing or concatenation.
57
+ """
58
+ if -s.ndim <= axis < 0:
59
+ axis = s.ndim + axis
60
+ if not (0 <= axis < s.ndim):
61
+ raise ValueError(f"Invalid axis {axis} for array with {s.ndim} dimensions")
62
+ if nwin <= 0:
63
+ raise ValueError("nwin must be > 0")
64
+ dim = s.shape[axis]
65
+
66
+ last_win_start = dim - nwin
67
+ win_starts = list(range(0, last_win_start + 1, step))
68
+ n_win_out = len(win_starts)
69
+ if n_win_out <= 0:
70
+ # Return array with proper shape except empty along windows axis
71
+ return sparse.zeros(s.shape[:axis] + (0,) + (nwin,) + s.shape[axis + 1 :], dtype=s.dtype)
72
+
73
+ coo = s.asformat("coo")
74
+ coords = coo.coords # shape: (ndim, nnz)
75
+ data = coo.data # shape: (nnz,)
76
+ ia = coords[axis] # indices along sliding axis, shape: (nnz,)
77
+
78
+ # We emit contributions for each offset o in [0, nwin-1].
79
+ # For a nonzero at index i, it contributes to window start w = i - o when 0 <= w < W.
80
+ out_coords_blocks = []
81
+ out_data_blocks = []
82
+
83
+ # Small speed/memory tweak: reuse dtypes and pre-allocate o-array once per loop.
84
+ idx_dtype = coords.dtype
85
+
86
+ for win_ix, win_start in enumerate(win_starts):
87
+ w = ia - win_start
88
+ # Valid window starts are those within [0, nwin]
89
+ mask = (w >= 0) & (w < nwin)
90
+ if not mask.any():
91
+ continue
92
+
93
+ sel = np.nonzero(mask)[0]
94
+ w_sel = w[sel]
95
+
96
+ # Build new coords with windows axis inserted at `axis` and the original axis
97
+ # becoming the next axis with fixed offset value `o`.
98
+ # Output ndim = s.ndim + 1
99
+ before = coords[:axis, sel] # unchanged
100
+ after_other = coords[axis + 1 :, sel] # dims after original axis
101
+ win_idx_row = np.full((1, sel.size), win_ix, dtype=idx_dtype)
102
+
103
+ new_coords = np.vstack([before, win_idx_row, w_sel[None, :], after_other])
104
+
105
+ out_coords_blocks.append(new_coords)
106
+ out_data_blocks.append(data[sel])
107
+
108
+ if not out_coords_blocks:
109
+ return sparse.zeros(s.shape[:axis] + (n_win_out,) + (nwin,) + s.shape[axis + 1 :], dtype=s.dtype)
110
+
111
+ out_coords = np.hstack(out_coords_blocks)
112
+ out_data = np.hstack(out_data_blocks)
113
+ out_shape = s.shape[:axis] + (n_win_out,) + (nwin,) + s.shape[axis + 1 :]
114
+
115
+ return sparse.COO(out_coords, out_data, shape=out_shape)
@@ -0,0 +1,17 @@
1
+ """
2
+ Backwards-compatible re-exports from ezmsg.baseproc.util.typeresolution.
3
+
4
+ New code should import directly from ezmsg.baseproc instead.
5
+ """
6
+
7
+ from ezmsg.baseproc.util.typeresolution import (
8
+ TypeLike,
9
+ check_message_type_compatibility,
10
+ resolve_typevar,
11
+ )
12
+
13
+ __all__ = [
14
+ "TypeLike",
15
+ "check_message_type_compatibility",
16
+ "resolve_typevar",
17
+ ]