ezmsg-sigproc 2.0.0__py3-none-any.whl → 2.2.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -17,5 +17,5 @@ __version__: str
17
17
  __version_tuple__: VERSION_TUPLE
18
18
  version_tuple: VERSION_TUPLE
19
19
 
20
- __version__ = version = '2.0.0'
21
- __version_tuple__ = version_tuple = (2, 0, 0)
20
+ __version__ = version = '2.2.0'
21
+ __version_tuple__ = version_tuple = (2, 2, 0)
@@ -36,6 +36,7 @@ class AggregationFunction(OptionsEnum):
36
36
  NANSUM = "nansum"
37
37
  ARGMIN = "argmin"
38
38
  ARGMAX = "argmax"
39
+ TRAPEZOID = "trapezoid"
39
40
 
40
41
 
41
42
  AGGREGATORS = {
@@ -54,6 +55,9 @@ AGGREGATORS = {
54
55
  AggregationFunction.NANSUM: np.nansum,
55
56
  AggregationFunction.ARGMIN: np.argmin,
56
57
  AggregationFunction.ARGMAX: np.argmax,
58
+ # Note: Some methods require x-coordinates and
59
+ # are handled specially in `_process`.
60
+ AggregationFunction.TRAPEZOID: np.trapezoid,
57
61
  }
58
62
 
59
63
 
@@ -144,10 +148,23 @@ class RangedAggregateTransformer(
144
148
  ax_idx = message.get_axis_idx(axis)
145
149
  agg_func = AGGREGATORS[self.settings.operation]
146
150
 
147
- out_data = [
148
- agg_func(slice_along_axis(message.data, sl, axis=ax_idx), axis=ax_idx)
149
- for sl in self._state.slices
150
- ]
151
+ if self.settings.operation in [
152
+ AggregationFunction.TRAPEZOID,
153
+ ]:
154
+ # Special handling for methods that require x-coordinates.
155
+ out_data = [
156
+ agg_func(
157
+ slice_along_axis(message.data, sl, axis=ax_idx),
158
+ x=self._state.ax_vec[sl],
159
+ axis=ax_idx,
160
+ )
161
+ for sl in self._state.slices
162
+ ]
163
+ else:
164
+ out_data = [
165
+ agg_func(slice_along_axis(message.data, sl, axis=ax_idx), axis=ax_idx)
166
+ for sl in self._state.slices
167
+ ]
151
168
 
152
169
  msg_out = replace(
153
170
  message,
@@ -36,6 +36,9 @@ class BandPowerSettings(ez.Settings):
36
36
  (min, max) tuples of band limits in Hz.
37
37
  """
38
38
 
39
+ aggregation: AggregationFunction = AggregationFunction.MEAN
40
+ """:obj:`AggregationFunction` to apply to each band."""
41
+
39
42
 
40
43
  class BandPowerTransformer(CompositeProcessor[BandPowerSettings, AxisArray, AxisArray]):
41
44
  @staticmethod
@@ -50,7 +53,7 @@ class BandPowerTransformer(CompositeProcessor[BandPowerSettings, AxisArray, Axis
50
53
  settings=RangedAggregateSettings(
51
54
  axis="freq",
52
55
  bands=settings.bands,
53
- operation=AggregationFunction.MEAN,
56
+ operation=settings.aggregation,
54
57
  )
55
58
  ),
56
59
  }
@@ -68,6 +71,7 @@ def bandpower(
68
71
  (17, 30),
69
72
  (70, 170),
70
73
  ],
74
+ aggregation: AggregationFunction = AggregationFunction.MEAN,
71
75
  ) -> BandPowerTransformer:
72
76
  """
73
77
  Calculate the average spectral power in each band.
@@ -77,6 +81,8 @@ def bandpower(
77
81
  """
78
82
  return BandPowerTransformer(
79
83
  settings=BandPowerSettings(
80
- spectrogram_settings=spectrogram_settings, bands=bands
84
+ spectrogram_settings=spectrogram_settings,
85
+ bands=bands,
86
+ aggregation=aggregation,
81
87
  )
82
88
  )
ezmsg/sigproc/filter.py CHANGED
@@ -32,17 +32,19 @@ FilterCoefsType = typing.TypeVar("FilterCoefsType", BACoeffs, SOSCoeffs)
32
32
 
33
33
 
34
34
  def _normalize_coefs(
35
- coefs: FilterCoefficients | tuple[npt.NDArray, npt.NDArray] | npt.NDArray,
36
- ) -> tuple[str, tuple[npt.NDArray, ...]]:
35
+ coefs: FilterCoefficients | tuple[npt.NDArray, npt.NDArray] | npt.NDArray | None,
36
+ ) -> tuple[str, tuple[npt.NDArray, ...] | None]:
37
37
  coef_type = "ba"
38
38
  if coefs is not None:
39
39
  # scipy.signal functions called with first arg `*coefs`.
40
40
  # Make sure we have a tuple of coefficients.
41
- if isinstance(coefs, npt.NDArray):
41
+ if isinstance(coefs, np.ndarray):
42
42
  coef_type = "sos"
43
43
  coefs = (coefs,) # sos funcs just want a single ndarray.
44
44
  elif isinstance(coefs, FilterCoefficients):
45
- coefs = (FilterCoefficients.b, FilterCoefficients.a)
45
+ coefs = (coefs.b, coefs.a)
46
+ elif not isinstance(coefs, tuple):
47
+ coefs = (coefs,)
46
48
  return coef_type, coefs
47
49
 
48
50
 
@@ -91,16 +93,20 @@ class FilterTransformer(
91
93
  axis = message.dims[0] if self.settings.axis is None else self.settings.axis
92
94
  axis_idx = message.get_axis_idx(axis)
93
95
  n_tail = message.data.ndim - axis_idx - 1
94
- coefs = (
95
- (self.settings.coefs,)
96
- if self.settings.coefs is not None
97
- and not isinstance(self.settings.coefs, tuple)
98
- else self.settings.coefs
99
- )
100
- zi_func = {"ba": scipy.signal.lfilter_zi, "sos": scipy.signal.sosfilt_zi}[
101
- self.settings.coef_type
102
- ]
103
- zi = zi_func(*coefs)
96
+ _, coefs = _normalize_coefs(self.settings.coefs)
97
+
98
+ if self.settings.coef_type == "ba":
99
+ b, a = coefs
100
+ if len(a) == 1 or np.allclose(a[1:], 0):
101
+ # For FIR filters, use lfiltic with zero initial conditions
102
+ zi = scipy.signal.lfiltic(b, a, [])
103
+ else:
104
+ # For IIR filters...
105
+ zi = scipy.signal.lfilter_zi(b, a)
106
+ else:
107
+ # For second-order sections (SOS) filters, use sosfilt_zi
108
+ zi = scipy.signal.sosfilt_zi(*coefs)
109
+
104
110
  zi_expand = (None,) * axis_idx + (slice(None),) + (None,) * n_tail
105
111
  n_tile = (
106
112
  message.data.shape[:axis_idx] + (1,) + message.data.shape[axis_idx + 1 :]
@@ -166,12 +172,7 @@ class FilterTransformer(
166
172
  if message.data.size > 0:
167
173
  axis = message.dims[0] if self.settings.axis is None else self.settings.axis
168
174
  axis_idx = message.get_axis_idx(axis)
169
- coefs = (
170
- (self.settings.coefs,)
171
- if self.settings.coefs is not None
172
- and not isinstance(self.settings.coefs, tuple)
173
- else self.settings.coefs
174
- )
175
+ _, coefs = _normalize_coefs(self.settings.coefs)
175
176
  filt_func = {"ba": scipy.signal.lfilter, "sos": scipy.signal.sosfilt}[
176
177
  self.settings.coef_type
177
178
  ]
@@ -0,0 +1,93 @@
1
+ from typing import Callable
2
+ import warnings
3
+
4
+ import numpy as np
5
+
6
+ from .filter import (
7
+ FilterBaseSettings,
8
+ BACoeffs,
9
+ FilterByDesignTransformer,
10
+ BaseFilterByDesignTransformerUnit,
11
+ )
12
+
13
+
14
+ class GaussianSmoothingSettings(FilterBaseSettings):
15
+ sigma: float | None = 1.0
16
+ """
17
+ sigma : float
18
+ Standard deviation of the Gaussian kernel.
19
+ """
20
+
21
+ width: int | None = 4
22
+ """
23
+ width : int
24
+ Number of standard deviations covered by the kernel window if kernel_size is not provided.
25
+ """
26
+
27
+ kernel_size: int | None = None
28
+ """
29
+ kernel_size : int | None
30
+ Length of the kernel in samples. If provided, overrides automatic calculation.
31
+ """
32
+
33
+
34
+ def gaussian_smoothing_filter_design(
35
+ sigma: float = 1.0,
36
+ width: int = 4,
37
+ kernel_size: int | None = None,
38
+ ) -> BACoeffs | None:
39
+ # Parameter checks
40
+ if sigma <= 0:
41
+ raise ValueError(f"sigma must be positive. Received: {sigma}")
42
+
43
+ if width <= 0:
44
+ raise ValueError(f"width must be positive. Received: {width}")
45
+
46
+ if kernel_size is not None:
47
+ if kernel_size < 1:
48
+ raise ValueError(f"kernel_size must be >= 1. Received: {kernel_size}")
49
+ else:
50
+ kernel_size = int(2 * width * sigma + 1)
51
+
52
+ # Warn if kernel_size is smaller than recommended but don't fail
53
+ expected_kernel_size = int(2 * width * sigma + 1)
54
+ if kernel_size < expected_kernel_size:
55
+ ## TODO: Either add a warning or determine appropriate kernel size and raise an error
56
+ warnings.warn(
57
+ f"Provided kernel_size {kernel_size} is smaller than recommended "
58
+ f"size {expected_kernel_size} for sigma={sigma} and width={width}. "
59
+ "The kernel may be truncated."
60
+ )
61
+
62
+ from scipy.signal.windows import gaussian
63
+
64
+ b = gaussian(kernel_size, std=sigma)
65
+ b /= np.sum(b) # Ensure normalization
66
+ a = np.array([1.0])
67
+
68
+ return b, a
69
+
70
+
71
+ class GaussianSmoothingFilterTransformer(
72
+ FilterByDesignTransformer[GaussianSmoothingSettings, BACoeffs]
73
+ ):
74
+ def get_design_function(
75
+ self,
76
+ ) -> Callable[[float], BACoeffs]:
77
+ # Create a wrapper function that ignores fs parameter since gaussian smoothing doesn't need it
78
+ def design_wrapper(fs: float) -> BACoeffs:
79
+ return gaussian_smoothing_filter_design(
80
+ sigma=self.settings.sigma,
81
+ width=self.settings.width,
82
+ kernel_size=self.settings.kernel_size,
83
+ )
84
+
85
+ return design_wrapper
86
+
87
+
88
+ class GaussianSmoothingFilter(
89
+ BaseFilterByDesignTransformerUnit[
90
+ GaussianSmoothingSettings, GaussianSmoothingFilterTransformer
91
+ ]
92
+ ):
93
+ SETTINGS = GaussianSmoothingSettings
@@ -1,11 +1,13 @@
1
+ import numpy as np
1
2
  import sparse
2
3
 
3
4
 
4
- def sliding_win_oneaxis(
5
+ def sliding_win_oneaxis_old(
5
6
  s: sparse.SparseArray, nwin: int, axis: int, step: int = 1
6
7
  ) -> sparse.SparseArray:
7
8
  """
8
9
  Like `ezmsg.util.messages.axisarray.sliding_win_oneaxis` but for sparse arrays.
10
+ This approach is about 4x slower than the version that uses coordinate arithmetic below.
9
11
 
10
12
  Args:
11
13
  s: The input sparse array.
@@ -25,5 +27,97 @@ def sliding_win_oneaxis(
25
27
  full_slices[: axis + 1] + (sl,) + full_slices[axis + 2 :] for sl in targ_slices
26
28
  ]
27
29
  result = sparse.concatenate([s[_] for _ in full_slices], axis=axis)
28
- # TODO: Profile this approach vs modifying coords only.
29
30
  return result
31
+
32
+
33
+ def sliding_win_oneaxis(
34
+ s: sparse.SparseArray, nwin: int, axis: int, step: int = 1
35
+ ) -> sparse.SparseArray:
36
+ """
37
+ Generates a view-like sparse array using a sliding window of specified length along a specified axis.
38
+ Sparse analog of an optimized dense as_strided-based implementation with these properties:
39
+
40
+ - Accepts a single `nwin` and a single `axis`.
41
+ - Inserts a new 'win' axis immediately BEFORE the original target axis.
42
+ Output shape:
43
+ s.shape[:axis] + (W,) + (nwin,) + s.shape[axis+1:]
44
+ where W = s.shape[axis] - (nwin - 1).
45
+ - If `step > 1`, stepping is applied by slicing along the new windows axis (same observable behavior
46
+ as doing `slice_along_axis(result, slice(None, None, step), axis)` in the dense version).
47
+
48
+ Args:
49
+ s: Input sparse array (pydata/sparse COO-compatible).
50
+ nwin: Sliding window size (must be > 0).
51
+ axis: Axis of `s` along which the window slides (supports negative indexing).
52
+ step: Stride between windows. If > 1, applied by slicing the windows axis after construction.
53
+
54
+ Returns:
55
+ A sparse array with a new windows axis inserted before the original axis.
56
+
57
+ Notes:
58
+ - Mirrors the dense function’s known edge case: when nwin == shape[axis] + 1, W becomes 0 and
59
+ an empty windows axis is returned.
60
+ - Built by coordinate arithmetic; no per-window indexing or concatenation.
61
+ """
62
+ if -s.ndim <= axis < 0:
63
+ axis = s.ndim + axis
64
+ if not (0 <= axis < s.ndim):
65
+ raise ValueError(f"Invalid axis {axis} for array with {s.ndim} dimensions")
66
+ if nwin <= 0:
67
+ raise ValueError("nwin must be > 0")
68
+ dim = s.shape[axis]
69
+
70
+ last_win_start = dim - nwin
71
+ win_starts = list(range(0, last_win_start + 1, step))
72
+ n_win_out = len(win_starts)
73
+ if n_win_out <= 0:
74
+ # Return array with proper shape except empty along windows axis
75
+ return sparse.zeros(
76
+ s.shape[:axis] + (0,) + (nwin,) + s.shape[axis + 1 :], dtype=s.dtype
77
+ )
78
+
79
+ coo = s.asformat("coo")
80
+ coords = coo.coords # shape: (ndim, nnz)
81
+ data = coo.data # shape: (nnz,)
82
+ ia = coords[axis] # indices along sliding axis, shape: (nnz,)
83
+
84
+ # We emit contributions for each offset o in [0, nwin-1].
85
+ # For a nonzero at index i, it contributes to window start w = i - o when 0 <= w < W.
86
+ out_coords_blocks = []
87
+ out_data_blocks = []
88
+
89
+ # Small speed/memory tweak: reuse dtypes and pre-allocate o-array once per loop.
90
+ idx_dtype = coords.dtype
91
+
92
+ for win_ix, win_start in enumerate(win_starts):
93
+ w = ia - win_start
94
+ # Valid window starts are those within [0, nwin]
95
+ mask = (w >= 0) & (w < nwin)
96
+ if not mask.any():
97
+ continue
98
+
99
+ sel = np.nonzero(mask)[0]
100
+ w_sel = w[sel]
101
+
102
+ # Build new coords with windows axis inserted at `axis` and the original axis
103
+ # becoming the next axis with fixed offset value `o`.
104
+ # Output ndim = s.ndim + 1
105
+ before = coords[:axis, sel] # unchanged
106
+ after_other = coords[axis + 1 :, sel] # dims after original axis
107
+ win_idx_row = np.full((1, sel.size), win_ix, dtype=idx_dtype)
108
+
109
+ new_coords = np.vstack([before, win_idx_row, w_sel[None, :], after_other])
110
+
111
+ out_coords_blocks.append(new_coords)
112
+ out_data_blocks.append(data[sel])
113
+
114
+ if not out_coords_blocks:
115
+ return sparse.zeros(
116
+ s.shape[:axis] + (n_win_out,) + (nwin,) + s.shape[axis + 1 :], dtype=s.dtype
117
+ )
118
+
119
+ out_coords = np.hstack(out_coords_blocks)
120
+ out_data = np.hstack(out_data_blocks)
121
+ out_shape = s.shape[:axis] + (n_win_out,) + (nwin,) + s.shape[axis + 1 :]
122
+
123
+ return sparse.COO(out_coords, out_data, shape=out_shape)
@@ -0,0 +1,72 @@
1
+ Metadata-Version: 2.4
2
+ Name: ezmsg-sigproc
3
+ Version: 2.2.0
4
+ Summary: Timeseries signal processing implementations in ezmsg
5
+ Author-email: Griffin Milsap <griffin.milsap@gmail.com>, Preston Peranich <pperanich@gmail.com>, Chadwick Boulay <chadwick.boulay@gmail.com>
6
+ License-Expression: MIT
7
+ License-File: LICENSE.txt
8
+ Requires-Python: >=3.10.15
9
+ Requires-Dist: array-api-compat>=1.11.1
10
+ Requires-Dist: ezmsg>=3.6.0
11
+ Requires-Dist: numba>=0.61.0
12
+ Requires-Dist: numpy>=1.26.0
13
+ Requires-Dist: pywavelets>=1.6.0
14
+ Requires-Dist: scipy>=1.13.1
15
+ Requires-Dist: sparse>=0.15.4
16
+ Description-Content-Type: text/markdown
17
+
18
+ # ezmsg.sigproc
19
+
20
+ ## Overview
21
+
22
+ ezmsg-sigproc offers timeseries signal‑processing primitives built atop the ezmsg message‑passing framework. Core dependencies include ezmsg, numpy, scipy, pywavelets, and sparse; the project itself is managed through hatchling and uses VCS hooks to populate __version__.py.
23
+
24
+ ## Installation
25
+
26
+ Install the latest release from pypi with: `pip install ezmsg-sigproc` (or `uv add ...` or `poetry add ...`).
27
+
28
+ You can install pre-release versions directly from GitHub:
29
+
30
+ * Using `pip`: `pip install git+https://github.com/ezmsg-org/ezmsg-sigproc.git@dev`
31
+ * Using `uv`: `uv add git+https://github.com/ezmsg-org/ezmsg-sigproc --branch dev`
32
+ * Using `poetry`: `poetry add "git+https://github.com/ezmsg-org/ezmsg-sigproc.git@dev"`
33
+
34
+ > See the [Development](#development) section below for installing with the intention of developing.
35
+
36
+ ## Source layout & key modules
37
+ * All source resides under src/ezmsg/sigproc, which contains a suite of processors (for example, filter.py, spectrogram.py, spectrum.py, sampler.py) and math and util subpackages.
38
+ * The framework’s backbone is base.py, defining standard protocols—Processor, Producer, Consumer, and Transformer—that enable both stateless and stateful processing chains.
39
+ * Filtering is implemented in filter.py, providing settings dataclasses and a stateful transformer that applies supplied coefficients to incoming data.
40
+ * Spectral analysis uses a composite spectrogram transformer chaining windowing, spectrum computation, and axis adjustments.
41
+
42
+ ## Operating styles: Standalone processors vs. ezmsg pipelines
43
+ While each processor is designed to be assembled into an ezmsg pipeline, the components are also well‑suited for offline, ad‑hoc analysis. You can instantiate processors directly in scripts or notebooks for quick prototyping or to validate results from other code. The companion Unit wrappers, however, are meant for assembling processors into a full ezmsg pipeline.
44
+
45
+ A fully defined ezmsg pipeline shines in online streaming scenarios where message routing, scheduling, and latency handling are crucial. Nevertheless, you can run the same pipeline offline—say, within a Jupyter notebook—if your analysis benefits from ezmsg’s structured execution model. Deciding between a standalone processor and a full pipeline comes down to the trade‑off between simplicity and the operational overhead of the pipeline:
46
+
47
+ * Standalone processors: Low overhead, ideal for one‑off or exploratory offline tasks.
48
+ * Pipeline + Unit wrappers: Additional setup cost but bring concurrency, standardized interfaces, and automatic message flow—useful when your offline experiment mirrors a live system or when you require fine‑grained pipeline behavior.
49
+
50
+ ## Documentation & tests
51
+ * `docs/ProcessorsBase.md` details the processor hierarchy and generic type patterns, providing a solid foundation for custom components.
52
+ * Unit tests (e.g., `tests/unit/test_sampler.py`) offer concrete examples of usage, showcasing sampler generation, windowing, and message handling.
53
+
54
+ ## Where to learn next
55
+ * Study docs/ProcessorsBase.md to master the processor architecture.
56
+ * Explore unit tests for hands‑on examples of composing processors and Units.
57
+ * Review the ezmsg framework in pyproject.toml to understand the surrounding ecosystem.
58
+ * Experiment with the code—try running processors standalone and then integrate them into a small pipeline to observe the trade‑offs firsthand.
59
+
60
+ This approach equips newcomers to choose the right level of abstraction—raw processor, Unit wrapper, or full pipeline—based on the demands of their analysis or streaming application.
61
+
62
+ ## Development
63
+
64
+ We use [`uv`](https://docs.astral.sh/uv/getting-started/installation/) for development. It is not strictly required, but if you intend to contribute to ezmsg-sigproc then using `uv` will lead to the smoothest collaboration.
65
+
66
+ 1. Install [`uv`](https://docs.astral.sh/uv/getting-started/installation/) if not already installed.
67
+ 2. Fork ezmsg-sigproc and clone your fork to your local computer.
68
+ 3. Open a terminal and `cd` to the cloned folder.
69
+ 4. `uv sync` to create a .venv and install dependencies.
70
+ 5. `uv run pre-commit install` to install pre-commit hooks to do linting and formatting.
71
+ 6. Run the test suite before finalizing your edits: `uv run pytest tests`
72
+ 7. Make a PR against the `dev` branch of the main repo.
@@ -1,10 +1,10 @@
1
1
  ezmsg/sigproc/__init__.py,sha256=8K4IcOA3-pfzadoM6s2Sfg5460KlJUocGgyTJTJl96U,52
2
- ezmsg/sigproc/__version__.py,sha256=2thmcF9DS_Zp1zHI3N0kjBeMAuCP9mdDGnL0clqQpS8,511
2
+ ezmsg/sigproc/__version__.py,sha256=wdFcRBeaWRZXknL-E8RTk9hV9M-OMto6dfJ90sc1i9A,511
3
3
  ezmsg/sigproc/activation.py,sha256=qWAhpbFBxSoqbGy4P9JKE5LY-5v8rQI1U81OvNxBG2Y,2820
4
4
  ezmsg/sigproc/adaptive_lattice_notch.py,sha256=3M65PrZpdgBlQtE7Ph4Gu2ISIyWw4j8Xxhm5PpSkLFw,9102
5
5
  ezmsg/sigproc/affinetransform.py,sha256=WU495KoDKZfHPS3Dumh65rgf639koNlfDIx_torIByg,8662
6
- ezmsg/sigproc/aggregate.py,sha256=sdVzSXDg9BUNT-ljyvrWLeoZtRTlfisP0OxEchbgyMM,6111
7
- ezmsg/sigproc/bandpower.py,sha256=N9pDz1X6ZTNP6VpCcfoXhj32j_9KpMaMuVYimoS6Jpc,2083
6
+ ezmsg/sigproc/aggregate.py,sha256=KR3u9D9jx9KcOQlvI10I6krSxbZCIerG2i4u5Wu5qMI,6754
7
+ ezmsg/sigproc/bandpower.py,sha256=j-Y6iWjD2xkggfi-4HAFJVBPJHHBGvAZy1uM4murZkQ,2319
8
8
  ezmsg/sigproc/base.py,sha256=PQr03O2P1v9LzcSR0GJLvPpBCLtnmGaz76gUeXphcH4,48753
9
9
  ezmsg/sigproc/butterworthfilter.py,sha256=7ZP4CRsXBt3-5dzyUjD45vc0J3Fhpm4CLrk-ps28jhc,5305
10
10
  ezmsg/sigproc/cheby.py,sha256=-aSauAwxJmmSSiRaw5qGY9rvYFOmk1bZlS4gGrS0jls,3737
@@ -16,8 +16,9 @@ ezmsg/sigproc/downsample.py,sha256=0X6EwPZ_XTwA2-nx5w-2HmMZUEDFuGAYF5EmPSuuVj8,3
16
16
  ezmsg/sigproc/ewma.py,sha256=W_VS2MxiO1J7z2XS6rtnLnCEXxdRPQbMKtZduBwqTEQ,6369
17
17
  ezmsg/sigproc/ewmfilter.py,sha256=EPlocRdKORj575VV1YUzcNsVcq-pYgdEJ7_m9WfpVnY,4795
18
18
  ezmsg/sigproc/extract_axis.py,sha256=Gl8Hl_Ho2pPzchPjfseVHVRAqxj6eOvUQZlzfYRA7eI,1603
19
- ezmsg/sigproc/filter.py,sha256=i5adfND0NATrk2RewkWQ0C3RKRGiElr5AIB2eZE4Dr8,11225
19
+ ezmsg/sigproc/filter.py,sha256=SfptCJFVxYL4sTNoMCsn2NYr66bo6ea1w9PqzWZmUBY,11299
20
20
  ezmsg/sigproc/filterbank.py,sha256=pJzv_G6chgWa1ARmRjMAMgt9eEGnA-ZbMSge4EWrcYY,13633
21
+ ezmsg/sigproc/gaussiansmoothing.py,sha256=NaVezgNwdvp-kam1I_7lSID4Obi0UCxZshH7A2afaVg,2692
21
22
  ezmsg/sigproc/messages.py,sha256=y_twVPK7TxRj8ajmuSuBuxwvLTgyv9OF7Y7v9bw1tfs,926
22
23
  ezmsg/sigproc/quantize.py,sha256=VzaqE6PatibEjkk7XrGO-ubAXYurAed9FYOn4bcQZQk,2193
23
24
  ezmsg/sigproc/resample.py,sha256=XQzEbUq44qTx5tXX2QXd14hkMb7C3LXT3CqbC161X1M,11600
@@ -43,9 +44,9 @@ ezmsg/sigproc/util/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuF
43
44
  ezmsg/sigproc/util/asio.py,sha256=PQew73hB1oRmp7pfTqx-c4uo1zqgjxvZcTZCROQrEP4,5270
44
45
  ezmsg/sigproc/util/message.py,sha256=l_b1b6bXX8N6VF9RbUELzsHs73cKkDURBdIr0lt3CY0,909
45
46
  ezmsg/sigproc/util/profile.py,sha256=KNJ_QkKelQHNEp2C8MhqzdhYydMNULc_NQq3ccMfzIk,5775
46
- ezmsg/sigproc/util/sparse.py,sha256=8Ke0jh3jRPi_TwIdLTwLdojQiaqPs6QV-Edqpx81VoI,1036
47
+ ezmsg/sigproc/util/sparse.py,sha256=mE64p1tYb5A1shaRE1D-VnH-RshbLb8g8kXSXxnA-J4,4842
47
48
  ezmsg/sigproc/util/typeresolution.py,sha256=5R7xmG-F4CkdqQ5aoQnqM-htQb-VwAJl58jJgxtClys,3146
48
- ezmsg_sigproc-2.0.0.dist-info/METADATA,sha256=6QpdeS-5kMO5DogCy7LpB6ho0sQN5rnpfRwF7TwgXg4,2479
49
- ezmsg_sigproc-2.0.0.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
50
- ezmsg_sigproc-2.0.0.dist-info/licenses/LICENSE.txt,sha256=seu0tKhhAMPCUgc1XpXGGaCxY1YaYvFJwqFuQZAl2go,1100
51
- ezmsg_sigproc-2.0.0.dist-info/RECORD,,
49
+ ezmsg_sigproc-2.2.0.dist-info/METADATA,sha256=KfMyPnQipTaX6puyJxEaKu-JDT0tvbxLnhwKeEY_kKw,4977
50
+ ezmsg_sigproc-2.2.0.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
51
+ ezmsg_sigproc-2.2.0.dist-info/licenses/LICENSE.txt,sha256=seu0tKhhAMPCUgc1XpXGGaCxY1YaYvFJwqFuQZAl2go,1100
52
+ ezmsg_sigproc-2.2.0.dist-info/RECORD,,
@@ -1,62 +0,0 @@
1
- Metadata-Version: 2.4
2
- Name: ezmsg-sigproc
3
- Version: 2.0.0
4
- Summary: Timeseries signal processing implementations in ezmsg
5
- Author-email: Griffin Milsap <griffin.milsap@gmail.com>, Preston Peranich <pperanich@gmail.com>, Chadwick Boulay <chadwick.boulay@gmail.com>
6
- License-Expression: MIT
7
- License-File: LICENSE.txt
8
- Requires-Python: >=3.10.15
9
- Requires-Dist: array-api-compat>=1.11.1
10
- Requires-Dist: ezmsg>=3.6.0
11
- Requires-Dist: numba>=0.61.0
12
- Requires-Dist: numpy>=1.26.0
13
- Requires-Dist: pywavelets>=1.6.0
14
- Requires-Dist: scipy>=1.13.1
15
- Requires-Dist: sparse>=0.15.4
16
- Provides-Extra: test
17
- Requires-Dist: flake8>=7.1.1; extra == 'test'
18
- Requires-Dist: frozendict>=2.4.4; extra == 'test'
19
- Requires-Dist: pytest-asyncio>=0.24.0; extra == 'test'
20
- Requires-Dist: pytest-cov>=5.0.0; extra == 'test'
21
- Requires-Dist: pytest>=8.3.3; extra == 'test'
22
- Description-Content-Type: text/markdown
23
-
24
- # ezmsg.sigproc
25
-
26
- Timeseries signal processing implementations for ezmsg
27
-
28
- ## Dependencies
29
-
30
- * `ezmsg`
31
- * `numpy`
32
- * `scipy`
33
- * `pywavelets`
34
-
35
- ## Installation
36
-
37
- ### Release
38
-
39
- Install the latest release from pypi with: `pip install ezmsg-sigproc` (or `uv add ...` or `poetry add ...`).
40
-
41
- ### Development Version
42
-
43
- You can add the development version of `ezmsg-sigproc` to your project's dependencies in one of several ways.
44
-
45
- You can clone it and add its path to your project dependencies. You may wish to do this if you intend to edit `ezmsg-sigproc`. If so, please refer to the [Developers](#developers) section below.
46
-
47
- You can also add it directly from GitHub:
48
-
49
- * Using `pip`: `pip install git+https://github.com/ezmsg-org/ezmsg-sigproc.git@dev`
50
- * Using `poetry`: `poetry add "git+https://github.com/ezmsg-org/ezmsg-sigproc.git@dev"`
51
- * Using `uv`: `uv add git+https://github.com/ezmsg-org/ezmsg-sigproc --branch dev`
52
-
53
- ## Developers
54
-
55
- We use [`uv`](https://docs.astral.sh/uv/getting-started/installation/) for development. It is not strictly required, but if you intend to contribute to ezmsg-sigproc then using `uv` will lead to the smoothest collaboration.
56
-
57
- 1. Install [`uv`](https://docs.astral.sh/uv/getting-started/installation/) if not already installed.
58
- 2. Fork ezmsg-sigproc and clone your fork to your local computer.
59
- 3. Open a terminal and `cd` to the cloned folder.
60
- 4. `uv sync` to create a .venv and install dependencies.
61
- 5. `uv run pre-commit install` to install pre-commit hooks to do linting and formatting.
62
- 6. After editing code and making commits, Run the test suite before making a PR: `uv run pytest tests`