ezmsg-sigproc 2.12.0__py3-none-any.whl → 2.13.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -28,7 +28,7 @@ version_tuple: VERSION_TUPLE
28
28
  commit_id: COMMIT_ID
29
29
  __commit_id__: COMMIT_ID
30
30
 
31
- __version__ = version = '2.12.0'
32
- __version_tuple__ = version_tuple = (2, 12, 0)
31
+ __version__ = version = '2.13.0'
32
+ __version_tuple__ = version_tuple = (2, 13, 0)
33
33
 
34
34
  __commit_id__ = commit_id = None
@@ -17,7 +17,6 @@ import numpy.typing as npt
17
17
  from array_api_compat import get_namespace
18
18
  from ezmsg.baseproc import (
19
19
  BaseStatefulTransformer,
20
- BaseTransformer,
21
20
  BaseTransformerUnit,
22
21
  processor_state,
23
22
  )
@@ -25,6 +24,117 @@ from ezmsg.util.messages.axisarray import AxisArray, AxisBase
25
24
  from ezmsg.util.messages.util import replace
26
25
 
27
26
 
27
+ def _find_block_diagonal_clusters(weights: np.ndarray) -> list[tuple[np.ndarray, np.ndarray]] | None:
28
+ """Detect block-diagonal structure in a weight matrix.
29
+
30
+ Finds connected components in the bipartite graph of non-zero weights,
31
+ where input channels and output channels are separate node sets.
32
+
33
+ Args:
34
+ weights: 2-D weight matrix of shape (n_in, n_out).
35
+
36
+ Returns:
37
+ List of (input_indices, output_indices) tuples, one per block, or
38
+ None if the matrix is not block-diagonal (single connected component).
39
+ """
40
+ if weights.ndim != 2:
41
+ return None
42
+
43
+ n_in, n_out = weights.shape
44
+ if n_in + n_out <= 2:
45
+ return None
46
+
47
+ from scipy.sparse import coo_matrix
48
+ from scipy.sparse.csgraph import connected_components
49
+
50
+ rows, cols = np.nonzero(weights)
51
+ if len(rows) == 0:
52
+ return None
53
+
54
+ # Bipartite graph: input nodes [0, n_in), output nodes [n_in, n_in + n_out)
55
+ shifted_cols = cols + n_in
56
+ adj_rows = np.concatenate([rows, shifted_cols])
57
+ adj_cols = np.concatenate([shifted_cols, rows])
58
+ adj_data = np.ones(len(adj_rows), dtype=bool)
59
+ n_nodes = n_in + n_out
60
+ adj = coo_matrix((adj_data, (adj_rows, adj_cols)), shape=(n_nodes, n_nodes))
61
+
62
+ n_components, labels = connected_components(adj, directed=False)
63
+
64
+ if n_components <= 1:
65
+ return None
66
+
67
+ clusters = []
68
+ for comp in range(n_components):
69
+ members = np.where(labels == comp)[0]
70
+ in_idx = np.sort(members[members < n_in])
71
+ out_idx = np.sort(members[members >= n_in] - n_in)
72
+ if len(in_idx) > 0 and len(out_idx) > 0:
73
+ clusters.append((in_idx, out_idx))
74
+
75
+ return clusters if len(clusters) > 1 else None
76
+
77
+
78
+ def _max_cross_cluster_weight(weights: np.ndarray, clusters: list[tuple[np.ndarray, np.ndarray]]) -> float:
79
+ """Return the maximum absolute weight between different clusters."""
80
+ mask = np.zeros(weights.shape, dtype=bool)
81
+ for in_idx, out_idx in clusters:
82
+ mask[np.ix_(in_idx, out_idx)] = True
83
+ cross = np.abs(weights[~mask])
84
+ return float(cross.max()) if cross.size > 0 else 0.0
85
+
86
+
87
+ def _merge_small_clusters(
88
+ clusters: list[tuple[np.ndarray, np.ndarray]], min_size: int
89
+ ) -> list[tuple[np.ndarray, np.ndarray]]:
90
+ """Merge clusters smaller than *min_size* into combined groups.
91
+
92
+ Small clusters are greedily concatenated until each merged group has
93
+ at least *min_size* channels (measured as ``max(n_in, n_out)``).
94
+ Any leftover small clusters that don't reach the threshold are
95
+ combined into a final group.
96
+
97
+ The merged group's sub-weight-matrix will contain the original small
98
+ diagonal blocks with zeros between them — a dense matmul on that
99
+ sub-matrix is cheaper than iterating over many tiny matmuls.
100
+ """
101
+ if min_size <= 1:
102
+ return clusters
103
+
104
+ large = []
105
+ small = []
106
+ for cluster in clusters:
107
+ in_idx, out_idx = cluster
108
+ if max(len(in_idx), len(out_idx)) >= min_size:
109
+ large.append(cluster)
110
+ else:
111
+ small.append(cluster)
112
+
113
+ if not small:
114
+ return clusters
115
+
116
+ current_in: list[np.ndarray] = []
117
+ current_out: list[np.ndarray] = []
118
+ current_in_size = 0
119
+ current_out_size = 0
120
+ for in_idx, out_idx in small:
121
+ current_in.append(in_idx)
122
+ current_out.append(out_idx)
123
+ current_in_size += len(in_idx)
124
+ current_out_size += len(out_idx)
125
+ if max(current_in_size, current_out_size) >= min_size:
126
+ large.append((np.sort(np.concatenate(current_in)), np.sort(np.concatenate(current_out))))
127
+ current_in = []
128
+ current_out = []
129
+ current_in_size = 0
130
+ current_out_size = 0
131
+
132
+ if current_in:
133
+ large.append((np.sort(np.concatenate(current_in)), np.sort(np.concatenate(current_out))))
134
+
135
+ return large
136
+
137
+
28
138
  class AffineTransformSettings(ez.Settings):
29
139
  """
30
140
  Settings for :obj:`AffineTransform`.
@@ -39,11 +149,32 @@ class AffineTransformSettings(ez.Settings):
39
149
  right_multiply: bool = True
40
150
  """Set False to transpose the weights before applying."""
41
151
 
152
+ channel_clusters: list[list[int]] | None = None
153
+ """Optional explicit input channel cluster specification for block-diagonal optimization.
154
+
155
+ Each element is a list of input channel indices forming one cluster. The
156
+ corresponding output indices are derived automatically from the non-zero
157
+ columns of the weight matrix for those input rows.
158
+
159
+ When provided, the weight matrix is decomposed into per-cluster sub-matrices
160
+ and multiplied separately, which is faster when cross-cluster weights are zero.
161
+
162
+ If None, block-diagonal structure is auto-detected from the zero pattern
163
+ of the weights."""
164
+
165
+ min_cluster_size: int = 32
166
+ """Minimum number of channels per cluster for the block-diagonal optimization.
167
+ Clusters smaller than this are greedily merged together to avoid excessive
168
+ Python loop overhead. Set to 1 to disable merging."""
169
+
42
170
 
43
171
  @processor_state
44
172
  class AffineTransformState:
45
173
  weights: npt.NDArray | None = None
46
174
  new_axis: AxisBase | None = None
175
+ n_out: int = 0
176
+ clusters: list | None = None
177
+ """list of (in_indices_xp, out_indices_xp, sub_weights_xp) tuples when block-diagonal."""
47
178
 
48
179
 
49
180
  class AffineTransformTransformer(
@@ -86,11 +217,60 @@ class AffineTransformTransformer(
86
217
 
87
218
  self._state.weights = weights
88
219
 
220
+ # Note: If weights were scipy.sparse BSR then maybe we could use automate this next part.
221
+ # However, that would break compatibility with Array API.
222
+
223
+ # --- Block-diagonal cluster detection ---
224
+ # Clusters are a list of (input_indices, output_indices) tuples.
225
+ n_in, n_out = weights.shape
226
+ if self.settings.channel_clusters is not None:
227
+ # Validate input index bounds
228
+ all_in = np.concatenate([np.asarray(group) for group in self.settings.channel_clusters])
229
+ if np.any((all_in < 0) | (all_in >= n_in)):
230
+ raise ValueError(
231
+ "channel_clusters contains out-of-range input indices " f"(valid range: 0..{n_in - 1})"
232
+ )
233
+
234
+ # Derive output indices from non-zero weights for each input cluster
235
+ clusters = []
236
+ for group in self.settings.channel_clusters:
237
+ in_idx = np.asarray(group)
238
+ out_idx = np.where(np.any(weights[in_idx, :] != 0, axis=0))[0]
239
+ clusters.append((in_idx, out_idx))
240
+
241
+ max_cross = _max_cross_cluster_weight(weights, clusters)
242
+ if max_cross > 0:
243
+ ez.logger.warning(
244
+ f"Non-zero cross-cluster weights detected (max abs: {max_cross:.2e}). "
245
+ "These will be ignored in block-diagonal multiplication."
246
+ )
247
+ else:
248
+ clusters = _find_block_diagonal_clusters(weights)
249
+ if clusters is not None:
250
+ ez.logger.info(
251
+ f"Auto-detected {len(clusters)} block-diagonal clusters "
252
+ f"(sizes: {[(len(i), len(o)) for i, o in clusters]})"
253
+ )
254
+
255
+ # Merge small clusters to avoid excessive loop overhead
256
+ if clusters is not None:
257
+ clusters = _merge_small_clusters(clusters, self.settings.min_cluster_size)
258
+
259
+ if clusters is not None and len(clusters) > 1:
260
+ self._state.n_out = n_out
261
+ self._state.clusters = [
262
+ (in_idx, out_idx, np.ascontiguousarray(weights[np.ix_(in_idx, out_idx)]))
263
+ for in_idx, out_idx in clusters
264
+ ]
265
+ self._state.weights = None
266
+ else:
267
+ self._state.clusters = None
268
+
269
+ # --- Axis label handling (for non-square transforms, non-cluster path) ---
89
270
  axis = self.settings.axis or message.dims[-1]
90
- if axis in message.axes and hasattr(message.axes[axis], "data") and weights.shape[0] != weights.shape[1]:
271
+ if axis in message.axes and hasattr(message.axes[axis], "data") and n_in != n_out:
91
272
  in_labels = message.axes[axis].data
92
273
  new_labels = []
93
- n_in, n_out = weights.shape
94
274
  if len(in_labels) != n_in:
95
275
  ez.logger.warning(f"Received {len(in_labels)} for {n_in} inputs. Check upstream labels.")
96
276
  else:
@@ -112,10 +292,44 @@ class AffineTransformTransformer(
112
292
 
113
293
  self._state.new_axis = replace(message.axes[axis], data=np.array(new_labels))
114
294
 
115
- # Convert weights to match message.data namespace for efficient operations in _process
295
+ # Convert to match message.data namespace for efficient operations in _process
116
296
  xp = get_namespace(message.data)
117
297
  if self._state.weights is not None:
118
298
  self._state.weights = xp.asarray(self._state.weights)
299
+ if self._state.clusters is not None:
300
+ self._state.clusters = [
301
+ (xp.asarray(in_idx), xp.asarray(out_idx), xp.asarray(sub_w))
302
+ for in_idx, out_idx, sub_w in self._state.clusters
303
+ ]
304
+
305
+ def _block_diagonal_matmul(self, xp, data, axis_idx):
306
+ """Perform matmul using block-diagonal decomposition.
307
+
308
+ For each cluster, gathers input channels via ``xp.take``, performs a
309
+ matmul with the cluster's sub-weight matrix, and writes the result
310
+ directly into the pre-allocated output at the cluster's output indices.
311
+ Omitted output channels naturally remain zero.
312
+ """
313
+ needs_permute = axis_idx not in [-1, data.ndim - 1]
314
+ if needs_permute:
315
+ dim_perm = list(range(data.ndim))
316
+ dim_perm.append(dim_perm.pop(axis_idx))
317
+ data = xp.permute_dims(data, dim_perm)
318
+
319
+ # Pre-allocate output (omitted channels stay zero)
320
+ out_shape = data.shape[:-1] + (self._state.n_out,)
321
+ result = xp.zeros(out_shape, dtype=data.dtype)
322
+
323
+ for in_idx, out_idx, sub_weights in self._state.clusters:
324
+ chunk = xp.take(data, in_idx, axis=data.ndim - 1)
325
+ result[..., out_idx] = xp.matmul(chunk, sub_weights)
326
+
327
+ if needs_permute:
328
+ inv_dim_perm = list(range(result.ndim))
329
+ inv_dim_perm.insert(axis_idx, inv_dim_perm.pop(-1))
330
+ result = xp.permute_dims(result, inv_dim_perm)
331
+
332
+ return result
119
333
 
120
334
  def _process(self, message: AxisArray) -> AxisArray:
121
335
  xp = get_namespace(message.data)
@@ -123,22 +337,25 @@ class AffineTransformTransformer(
123
337
  axis_idx = message.get_axis_idx(axis)
124
338
  data = message.data
125
339
 
126
- if data.shape[axis_idx] == (self._state.weights.shape[0] - 1):
127
- # The weights are stacked A|B where A is the transform and B is a single row
128
- # in the equation y = Ax + B. This supports NeuroKey's weights matrices.
129
- sample_shape = data.shape[:axis_idx] + (1,) + data.shape[axis_idx + 1 :]
130
- data = xp.concat((data, xp.ones(sample_shape, dtype=data.dtype)), axis=axis_idx)
131
-
132
- if axis_idx in [-1, len(message.dims) - 1]:
133
- data = xp.matmul(data, self._state.weights)
340
+ if self._state.clusters is not None:
341
+ data = self._block_diagonal_matmul(xp, data, axis_idx)
134
342
  else:
135
- perm = list(range(data.ndim))
136
- perm.append(perm.pop(axis_idx))
137
- data = xp.permute_dims(data, perm)
138
- data = xp.matmul(data, self._state.weights)
139
- inv_perm = list(range(data.ndim))
140
- inv_perm.insert(axis_idx, inv_perm.pop(-1))
141
- data = xp.permute_dims(data, inv_perm)
343
+ if data.shape[axis_idx] == (self._state.weights.shape[0] - 1):
344
+ # The weights are stacked A|B where A is the transform and B is a single row
345
+ # in the equation y = Ax + B. This supports NeuroKey's weights matrices.
346
+ sample_shape = data.shape[:axis_idx] + (1,) + data.shape[axis_idx + 1 :]
347
+ data = xp.concat((data, xp.ones(sample_shape, dtype=data.dtype)), axis=axis_idx)
348
+
349
+ if axis_idx in [-1, len(message.dims) - 1]:
350
+ data = xp.matmul(data, self._state.weights)
351
+ else:
352
+ perm = list(range(data.ndim))
353
+ perm.append(perm.pop(axis_idx))
354
+ data = xp.permute_dims(data, perm)
355
+ data = xp.matmul(data, self._state.weights)
356
+ inv_perm = list(range(data.ndim))
357
+ inv_perm.insert(axis_idx, inv_perm.pop(-1))
358
+ data = xp.permute_dims(data, inv_perm)
142
359
 
143
360
  replace_kwargs = {"data": data}
144
361
  if self._state.new_axis is not None:
@@ -155,6 +372,8 @@ def affine_transform(
155
372
  weights: np.ndarray | str | Path,
156
373
  axis: str | None = None,
157
374
  right_multiply: bool = True,
375
+ channel_clusters: list[list[int]] | None = None,
376
+ min_cluster_size: int = 32,
158
377
  ) -> AffineTransformTransformer:
159
378
  """
160
379
  Perform affine transformations on streaming data.
@@ -163,20 +382,25 @@ def affine_transform(
163
382
  weights: An array of weights or a path to a file with weights compatible with np.loadtxt.
164
383
  axis: The name of the axis to apply the transformation to. Defaults to the leading (0th) axis in the array.
165
384
  right_multiply: Set False to transpose the weights before applying.
385
+ channel_clusters: Optional explicit channel cluster specification. See
386
+ :attr:`AffineTransformSettings.channel_clusters`.
387
+ min_cluster_size: Minimum channels per cluster; smaller clusters are merged. See
388
+ :attr:`AffineTransformSettings.min_cluster_size`.
166
389
 
167
390
  Returns:
168
391
  :obj:`AffineTransformTransformer`.
169
392
  """
170
393
  return AffineTransformTransformer(
171
- AffineTransformSettings(weights=weights, axis=axis, right_multiply=right_multiply)
394
+ AffineTransformSettings(
395
+ weights=weights,
396
+ axis=axis,
397
+ right_multiply=right_multiply,
398
+ channel_clusters=channel_clusters,
399
+ min_cluster_size=min_cluster_size,
400
+ )
172
401
  )
173
402
 
174
403
 
175
- def zeros_for_noop(data, **ignore_kwargs):
176
- xp = get_namespace(data)
177
- return xp.zeros_like(data)
178
-
179
-
180
404
  class CommonRereferenceSettings(ez.Settings):
181
405
  """
182
406
  Settings for :obj:`CommonRereference`
@@ -191,8 +415,37 @@ class CommonRereferenceSettings(ez.Settings):
191
415
  include_current: bool = True
192
416
  """Set False to exclude each channel from participating in the calculation of its reference."""
193
417
 
418
+ channel_clusters: list[list[int]] | None = None
419
+ """Optional channel clusters for per-cluster rereferencing. Each element is a
420
+ list of channel indices forming one cluster. The common reference is computed
421
+ independently within each cluster. If None, all channels form a single cluster."""
422
+
423
+
424
+ @processor_state
425
+ class CommonRereferenceState:
426
+ clusters: list | None = None
427
+ """list of xp arrays of channel indices, one per cluster."""
428
+
429
+
430
+ class CommonRereferenceTransformer(
431
+ BaseStatefulTransformer[CommonRereferenceSettings, AxisArray, AxisArray, CommonRereferenceState]
432
+ ):
433
+ def _hash_message(self, message: AxisArray) -> int:
434
+ axis = self.settings.axis or message.dims[-1]
435
+ axis_idx = message.get_axis_idx(axis)
436
+ return hash((message.key, message.data.shape[axis_idx]))
437
+
438
+ def _reset_state(self, message: AxisArray) -> None:
439
+ xp = get_namespace(message.data)
440
+ axis = self.settings.axis or message.dims[-1]
441
+ axis_idx = message.get_axis_idx(axis)
442
+ n_chans = message.data.shape[axis_idx]
443
+
444
+ if self.settings.channel_clusters is not None:
445
+ self._state.clusters = [xp.asarray(group) for group in self.settings.channel_clusters]
446
+ else:
447
+ self._state.clusters = [xp.arange(n_chans)]
194
448
 
195
- class CommonRereferenceTransformer(BaseTransformer[CommonRereferenceSettings, AxisArray, AxisArray]):
196
449
  def _process(self, message: AxisArray) -> AxisArray:
197
450
  if self.settings.mode == "passthrough":
198
451
  return message
@@ -200,27 +453,26 @@ class CommonRereferenceTransformer(BaseTransformer[CommonRereferenceSettings, Ax
200
453
  xp = get_namespace(message.data)
201
454
  axis = self.settings.axis or message.dims[-1]
202
455
  axis_idx = message.get_axis_idx(axis)
456
+ func = {"mean": xp.mean, "median": np.median}[self.settings.mode]
203
457
 
204
- func = {"mean": xp.mean, "median": np.median, "passthrough": zeros_for_noop}[self.settings.mode]
458
+ # Use result_type to match dtype promotion from data - float operations.
459
+ out_dtype = np.result_type(message.data.dtype, np.float64)
460
+ output = xp.zeros(message.data.shape, dtype=out_dtype)
205
461
 
206
- ref_data = func(message.data, axis=axis_idx, keepdims=True)
462
+ for cluster_idx in self._state.clusters:
463
+ cluster_data = xp.take(message.data, cluster_idx, axis=axis_idx)
464
+ ref_data = func(cluster_data, axis=axis_idx, keepdims=True)
207
465
 
208
- if not self.settings.include_current:
209
- # Typical `CAR = x[0]/N + x[1]/N + ... x[i-1]/N + x[i]/N + x[i+1]/N + ... + x[N-1]/N`
210
- # and is the same for all i, so it is calculated only once in `ref_data`.
211
- # However, if we had excluded the current channel,
212
- # then we would have omitted the contribution of the current channel:
213
- # `CAR[i] = x[0]/(N-1) + x[1]/(N-1) + ... x[i-1]/(N-1) + x[i+1]/(N-1) + ... + x[N-1]/(N-1)`
214
- # The majority of the calculation is the same as when the current channel is included;
215
- # we need only rescale CAR so the divisor is `N-1` instead of `N`, then subtract the contribution
216
- # from the current channel (i.e., `x[i] / (N-1)`)
217
- # i.e., `CAR[i] = (N / (N-1)) * common_CAR - x[i]/(N-1)`
218
- # We can use broadcasting subtraction instead of looping over channels.
219
- N = message.data.shape[axis_idx]
220
- ref_data = (N / (N - 1)) * ref_data - message.data / (N - 1)
221
- # Note: I profiled using AffineTransformTransformer; it's ~30x slower than this implementation.
466
+ if not self.settings.include_current:
467
+ N = cluster_data.shape[axis_idx]
468
+ ref_data = (N / (N - 1)) * ref_data - cluster_data / (N - 1)
222
469
 
223
- return replace(message, data=message.data - ref_data)
470
+ # Write per-cluster result into output at the correct axis position
471
+ idx = [slice(None)] * output.ndim
472
+ idx[axis_idx] = cluster_idx
473
+ output[tuple(idx)] = cluster_data - ref_data
474
+
475
+ return replace(message, data=output)
224
476
 
225
477
 
226
478
  class CommonRereference(
@@ -230,19 +482,26 @@ class CommonRereference(
230
482
 
231
483
 
232
484
  def common_rereference(
233
- mode: str = "mean", axis: str | None = None, include_current: bool = True
485
+ mode: str = "mean",
486
+ axis: str | None = None,
487
+ include_current: bool = True,
488
+ channel_clusters: list[list[int]] | None = None,
234
489
  ) -> CommonRereferenceTransformer:
235
490
  """
236
491
  Perform common average referencing (CAR) on streaming data.
237
492
 
238
493
  Args:
239
494
  mode: The statistical mode to apply -- either "mean" or "median"
240
- axis: The name of hte axis to apply the transformation to.
495
+ axis: The name of the axis to apply the transformation to.
241
496
  include_current: Set False to exclude each channel from participating in the calculation of its reference.
497
+ channel_clusters: Optional channel clusters for per-cluster rereferencing. See
498
+ :attr:`CommonRereferenceSettings.channel_clusters`.
242
499
 
243
500
  Returns:
244
501
  :obj:`CommonRereferenceTransformer`
245
502
  """
246
503
  return CommonRereferenceTransformer(
247
- CommonRereferenceSettings(mode=mode, axis=axis, include_current=include_current)
504
+ CommonRereferenceSettings(
505
+ mode=mode, axis=axis, include_current=include_current, channel_clusters=channel_clusters
506
+ )
248
507
  )
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: ezmsg-sigproc
3
- Version: 2.12.0
3
+ Version: 2.13.0
4
4
  Summary: Timeseries signal processing implementations in ezmsg
5
5
  Author-email: Griffin Milsap <griffin.milsap@gmail.com>, Preston Peranich <pperanich@gmail.com>, Chadwick Boulay <chadwick.boulay@gmail.com>, Kyle McGraw <kmcgraw@blackrockneuro.com>
6
6
  License-Expression: MIT
@@ -1,8 +1,8 @@
1
1
  ezmsg/sigproc/__init__.py,sha256=8K4IcOA3-pfzadoM6s2Sfg5460KlJUocGgyTJTJl96U,52
2
- ezmsg/sigproc/__version__.py,sha256=wFoDXARGO4HXo-ocBvYv-8vU4Kqf2IkZye_YzWivyoI,706
2
+ ezmsg/sigproc/__version__.py,sha256=_4LOjlEcfZzfuqIlglDZmVBPO4LyQ8P97qO716YoUL8,706
3
3
  ezmsg/sigproc/activation.py,sha256=83vnTa3ZcC4Q3VSWcGfaqhCEqYRNySUOyVpMHZXfz-c,2755
4
4
  ezmsg/sigproc/adaptive_lattice_notch.py,sha256=ThUR48mbSHuThkimtD0j4IXNMrOVcpZgGhE7PCYfXhU,8818
5
- ezmsg/sigproc/affinetransform.py,sha256=PQ0nSSELrfEpaMtvS2FJLLxZGfpLVOgoSpj4sXNUB7Q,9985
5
+ ezmsg/sigproc/affinetransform.py,sha256=ZugiQg89Ly1I9SDgf0ZzgU2XdwVDmPrU7-orO9yrt7w,20210
6
6
  ezmsg/sigproc/aggregate.py,sha256=7Hdz1m-S6Cl9h0oRQHeS_UTGBemhOB4XdFyX6cGcdHo,9362
7
7
  ezmsg/sigproc/bandpower.py,sha256=dAhH56sUrXNhcRFymTTwjdM_KcU5OxFzrR_sxIPAxyw,2264
8
8
  ezmsg/sigproc/base.py,sha256=SJvKEb8gw6mUMwlV5sH0iPG0bXrgS8tvkPwhI-j89MQ,3672
@@ -62,7 +62,7 @@ ezmsg/sigproc/util/message.py,sha256=ppN3IYtIAwrxWG9JOvgWFn1wDdIumkEzYFfqpH9VQkY
62
62
  ezmsg/sigproc/util/profile.py,sha256=eVOo9pXgusrnH1yfRdd2RsM7Dbe2UpyC0LJ9MfGpB08,416
63
63
  ezmsg/sigproc/util/sparse.py,sha256=NjbJitCtO0B6CENTlyd9c-lHEJwoCan-T3DIgPyeShw,4834
64
64
  ezmsg/sigproc/util/typeresolution.py,sha256=fMFzLi63dqCIclGFLcMdM870OYxJnkeWw6aWKNMk718,362
65
- ezmsg_sigproc-2.12.0.dist-info/METADATA,sha256=jSgl8ORZbzctTMX2jPI2Si9F8ywv8UY9ZA1H9Bbq_GI,1909
66
- ezmsg_sigproc-2.12.0.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
67
- ezmsg_sigproc-2.12.0.dist-info/licenses/LICENSE,sha256=seu0tKhhAMPCUgc1XpXGGaCxY1YaYvFJwqFuQZAl2go,1100
68
- ezmsg_sigproc-2.12.0.dist-info/RECORD,,
65
+ ezmsg_sigproc-2.13.0.dist-info/METADATA,sha256=RXENX541lABAic8oUDuT8vQwx9nlWY9JETyXYKxdeTQ,1909
66
+ ezmsg_sigproc-2.13.0.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
67
+ ezmsg_sigproc-2.13.0.dist-info/licenses/LICENSE,sha256=seu0tKhhAMPCUgc1XpXGGaCxY1YaYvFJwqFuQZAl2go,1100
68
+ ezmsg_sigproc-2.13.0.dist-info/RECORD,,