sigima 0.0.1.dev0__py3-none-any.whl → 1.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- sigima/__init__.py +142 -2
- sigima/client/__init__.py +105 -0
- sigima/client/base.py +780 -0
- sigima/client/remote.py +469 -0
- sigima/client/stub.py +814 -0
- sigima/client/utils.py +90 -0
- sigima/config.py +444 -0
- sigima/data/logo/Sigima.svg +135 -0
- sigima/data/tests/annotations.json +798 -0
- sigima/data/tests/curve_fitting/exponential_fit.txt +511 -0
- sigima/data/tests/curve_fitting/gaussian_fit.txt +100 -0
- sigima/data/tests/curve_fitting/piecewiseexponential_fit.txt +1022 -0
- sigima/data/tests/curve_fitting/polynomial_fit.txt +100 -0
- sigima/data/tests/curve_fitting/twohalfgaussian_fit.txt +1000 -0
- sigima/data/tests/curve_formats/bandwidth.txt +201 -0
- sigima/data/tests/curve_formats/boxcar.npy +0 -0
- sigima/data/tests/curve_formats/datetime.txt +1001 -0
- sigima/data/tests/curve_formats/dynamic_parameters.txt +4000 -0
- sigima/data/tests/curve_formats/fw1e2.txt +301 -0
- sigima/data/tests/curve_formats/fwhm.txt +319 -0
- sigima/data/tests/curve_formats/multiple_curves.csv +29 -0
- sigima/data/tests/curve_formats/noised_saw.mat +0 -0
- sigima/data/tests/curve_formats/oscilloscope.csv +111 -0
- sigima/data/tests/curve_formats/other/other2/recursive2.txt +5 -0
- sigima/data/tests/curve_formats/other/recursive1.txt +5 -0
- sigima/data/tests/curve_formats/paracetamol.npy +0 -0
- sigima/data/tests/curve_formats/paracetamol.txt +1010 -0
- sigima/data/tests/curve_formats/paracetamol_dx_dy.csv +1000 -0
- sigima/data/tests/curve_formats/paracetamol_dy.csv +1001 -0
- sigima/data/tests/curve_formats/pulse1.npy +0 -0
- sigima/data/tests/curve_formats/pulse2.npy +0 -0
- sigima/data/tests/curve_formats/simple.txt +5 -0
- sigima/data/tests/curve_formats/spectrum.mca +2139 -0
- sigima/data/tests/curve_formats/square2.npy +0 -0
- sigima/data/tests/curve_formats/step.npy +0 -0
- sigima/data/tests/fabry-perot1.jpg +0 -0
- sigima/data/tests/fabry-perot2.jpg +0 -0
- sigima/data/tests/flower.npy +0 -0
- sigima/data/tests/image_formats/NF 180338201.scor-data +11003 -0
- sigima/data/tests/image_formats/binary_image.npy +0 -0
- sigima/data/tests/image_formats/binary_image.png +0 -0
- sigima/data/tests/image_formats/centroid_test.npy +0 -0
- sigima/data/tests/image_formats/coordinated_text/complex_image.txt +10011 -0
- sigima/data/tests/image_formats/coordinated_text/complex_ref_image.txt +10010 -0
- sigima/data/tests/image_formats/coordinated_text/image.txt +15 -0
- sigima/data/tests/image_formats/coordinated_text/image2.txt +14 -0
- sigima/data/tests/image_formats/coordinated_text/image_no_unit_no_label.txt +14 -0
- sigima/data/tests/image_formats/coordinated_text/image_with_nan.txt +15 -0
- sigima/data/tests/image_formats/coordinated_text/image_with_unit.txt +14 -0
- sigima/data/tests/image_formats/fiber.csv +480 -0
- sigima/data/tests/image_formats/fiber.jpg +0 -0
- sigima/data/tests/image_formats/fiber.png +0 -0
- sigima/data/tests/image_formats/fiber.txt +480 -0
- sigima/data/tests/image_formats/gaussian_spot_with_noise.npy +0 -0
- sigima/data/tests/image_formats/mr-brain.dcm +0 -0
- sigima/data/tests/image_formats/noised_gaussian.mat +0 -0
- sigima/data/tests/image_formats/sif_reader/nd_lum_image_no_glue.sif +0 -0
- sigima/data/tests/image_formats/sif_reader/raman1.sif +0 -0
- sigima/data/tests/image_formats/tiling.txt +10 -0
- sigima/data/tests/image_formats/uint16.tiff +0 -0
- sigima/data/tests/image_formats/uint8.tiff +0 -0
- sigima/data/tests/laser_beam/TEM00_z_13.jpg +0 -0
- sigima/data/tests/laser_beam/TEM00_z_18.jpg +0 -0
- sigima/data/tests/laser_beam/TEM00_z_23.jpg +0 -0
- sigima/data/tests/laser_beam/TEM00_z_30.jpg +0 -0
- sigima/data/tests/laser_beam/TEM00_z_35.jpg +0 -0
- sigima/data/tests/laser_beam/TEM00_z_40.jpg +0 -0
- sigima/data/tests/laser_beam/TEM00_z_45.jpg +0 -0
- sigima/data/tests/laser_beam/TEM00_z_50.jpg +0 -0
- sigima/data/tests/laser_beam/TEM00_z_55.jpg +0 -0
- sigima/data/tests/laser_beam/TEM00_z_60.jpg +0 -0
- sigima/data/tests/laser_beam/TEM00_z_65.jpg +0 -0
- sigima/data/tests/laser_beam/TEM00_z_70.jpg +0 -0
- sigima/data/tests/laser_beam/TEM00_z_75.jpg +0 -0
- sigima/data/tests/laser_beam/TEM00_z_80.jpg +0 -0
- sigima/enums.py +195 -0
- sigima/io/__init__.py +123 -0
- sigima/io/base.py +311 -0
- sigima/io/common/__init__.py +5 -0
- sigima/io/common/basename.py +164 -0
- sigima/io/common/converters.py +189 -0
- sigima/io/common/objmeta.py +181 -0
- sigima/io/common/textreader.py +58 -0
- sigima/io/convenience.py +157 -0
- sigima/io/enums.py +17 -0
- sigima/io/ftlab.py +395 -0
- sigima/io/image/__init__.py +9 -0
- sigima/io/image/base.py +177 -0
- sigima/io/image/formats.py +1016 -0
- sigima/io/image/funcs.py +414 -0
- sigima/io/signal/__init__.py +9 -0
- sigima/io/signal/base.py +129 -0
- sigima/io/signal/formats.py +290 -0
- sigima/io/signal/funcs.py +723 -0
- sigima/objects/__init__.py +260 -0
- sigima/objects/base.py +937 -0
- sigima/objects/image/__init__.py +88 -0
- sigima/objects/image/creation.py +556 -0
- sigima/objects/image/object.py +524 -0
- sigima/objects/image/roi.py +904 -0
- sigima/objects/scalar/__init__.py +57 -0
- sigima/objects/scalar/common.py +215 -0
- sigima/objects/scalar/geometry.py +502 -0
- sigima/objects/scalar/table.py +784 -0
- sigima/objects/shape.py +290 -0
- sigima/objects/signal/__init__.py +133 -0
- sigima/objects/signal/constants.py +27 -0
- sigima/objects/signal/creation.py +1428 -0
- sigima/objects/signal/object.py +444 -0
- sigima/objects/signal/roi.py +274 -0
- sigima/params.py +405 -0
- sigima/proc/__init__.py +96 -0
- sigima/proc/base.py +381 -0
- sigima/proc/decorator.py +330 -0
- sigima/proc/image/__init__.py +513 -0
- sigima/proc/image/arithmetic.py +335 -0
- sigima/proc/image/base.py +260 -0
- sigima/proc/image/detection.py +519 -0
- sigima/proc/image/edges.py +329 -0
- sigima/proc/image/exposure.py +406 -0
- sigima/proc/image/extraction.py +458 -0
- sigima/proc/image/filtering.py +219 -0
- sigima/proc/image/fourier.py +147 -0
- sigima/proc/image/geometry.py +661 -0
- sigima/proc/image/mathops.py +340 -0
- sigima/proc/image/measurement.py +195 -0
- sigima/proc/image/morphology.py +155 -0
- sigima/proc/image/noise.py +107 -0
- sigima/proc/image/preprocessing.py +182 -0
- sigima/proc/image/restoration.py +235 -0
- sigima/proc/image/threshold.py +217 -0
- sigima/proc/image/transformations.py +393 -0
- sigima/proc/signal/__init__.py +376 -0
- sigima/proc/signal/analysis.py +206 -0
- sigima/proc/signal/arithmetic.py +551 -0
- sigima/proc/signal/base.py +262 -0
- sigima/proc/signal/extraction.py +60 -0
- sigima/proc/signal/features.py +310 -0
- sigima/proc/signal/filtering.py +484 -0
- sigima/proc/signal/fitting.py +276 -0
- sigima/proc/signal/fourier.py +259 -0
- sigima/proc/signal/mathops.py +420 -0
- sigima/proc/signal/processing.py +580 -0
- sigima/proc/signal/stability.py +175 -0
- sigima/proc/title_formatting.py +227 -0
- sigima/proc/validation.py +272 -0
- sigima/tests/__init__.py +7 -0
- sigima/tests/common/__init__.py +0 -0
- sigima/tests/common/arithmeticparam_unit_test.py +26 -0
- sigima/tests/common/basename_unit_test.py +126 -0
- sigima/tests/common/client_unit_test.py +412 -0
- sigima/tests/common/converters_unit_test.py +77 -0
- sigima/tests/common/decorator_unit_test.py +176 -0
- sigima/tests/common/examples_unit_test.py +104 -0
- sigima/tests/common/kernel_normalization_unit_test.py +242 -0
- sigima/tests/common/roi_basic_unit_test.py +73 -0
- sigima/tests/common/roi_geometry_unit_test.py +171 -0
- sigima/tests/common/scalar_builder_unit_test.py +142 -0
- sigima/tests/common/scalar_unit_test.py +991 -0
- sigima/tests/common/shape_unit_test.py +183 -0
- sigima/tests/common/stat_unit_test.py +138 -0
- sigima/tests/common/title_formatting_unit_test.py +338 -0
- sigima/tests/common/tools_coordinates_unit_test.py +60 -0
- sigima/tests/common/transformations_unit_test.py +178 -0
- sigima/tests/common/validation_unit_test.py +205 -0
- sigima/tests/conftest.py +129 -0
- sigima/tests/data.py +998 -0
- sigima/tests/env.py +280 -0
- sigima/tests/guiutils.py +163 -0
- sigima/tests/helpers.py +532 -0
- sigima/tests/image/__init__.py +28 -0
- sigima/tests/image/binning_unit_test.py +128 -0
- sigima/tests/image/blob_detection_unit_test.py +312 -0
- sigima/tests/image/centroid_unit_test.py +170 -0
- sigima/tests/image/check_2d_array_unit_test.py +63 -0
- sigima/tests/image/contour_unit_test.py +172 -0
- sigima/tests/image/convolution_unit_test.py +178 -0
- sigima/tests/image/datatype_unit_test.py +67 -0
- sigima/tests/image/edges_unit_test.py +155 -0
- sigima/tests/image/enclosingcircle_unit_test.py +88 -0
- sigima/tests/image/exposure_unit_test.py +223 -0
- sigima/tests/image/fft2d_unit_test.py +189 -0
- sigima/tests/image/filtering_unit_test.py +166 -0
- sigima/tests/image/geometry_unit_test.py +654 -0
- sigima/tests/image/hough_circle_unit_test.py +147 -0
- sigima/tests/image/imageobj_unit_test.py +737 -0
- sigima/tests/image/morphology_unit_test.py +71 -0
- sigima/tests/image/noise_unit_test.py +57 -0
- sigima/tests/image/offset_correction_unit_test.py +72 -0
- sigima/tests/image/operation_unit_test.py +518 -0
- sigima/tests/image/peak2d_limits_unit_test.py +41 -0
- sigima/tests/image/peak2d_unit_test.py +133 -0
- sigima/tests/image/profile_unit_test.py +159 -0
- sigima/tests/image/projections_unit_test.py +121 -0
- sigima/tests/image/restoration_unit_test.py +141 -0
- sigima/tests/image/roi2dparam_unit_test.py +53 -0
- sigima/tests/image/roi_advanced_unit_test.py +588 -0
- sigima/tests/image/roi_grid_unit_test.py +279 -0
- sigima/tests/image/spectrum2d_unit_test.py +40 -0
- sigima/tests/image/threshold_unit_test.py +91 -0
- sigima/tests/io/__init__.py +0 -0
- sigima/tests/io/addnewformat_unit_test.py +125 -0
- sigima/tests/io/convenience_funcs_unit_test.py +470 -0
- sigima/tests/io/coordinated_text_format_unit_test.py +495 -0
- sigima/tests/io/datetime_csv_unit_test.py +198 -0
- sigima/tests/io/imageio_formats_test.py +41 -0
- sigima/tests/io/ioregistry_unit_test.py +69 -0
- sigima/tests/io/objmeta_unit_test.py +87 -0
- sigima/tests/io/readobj_unit_test.py +130 -0
- sigima/tests/io/readwriteobj_unit_test.py +67 -0
- sigima/tests/signal/__init__.py +0 -0
- sigima/tests/signal/analysis_unit_test.py +135 -0
- sigima/tests/signal/check_1d_arrays_unit_test.py +169 -0
- sigima/tests/signal/convolution_unit_test.py +404 -0
- sigima/tests/signal/datetime_unit_test.py +176 -0
- sigima/tests/signal/fft1d_unit_test.py +303 -0
- sigima/tests/signal/filters_unit_test.py +403 -0
- sigima/tests/signal/fitting_unit_test.py +929 -0
- sigima/tests/signal/fwhm_unit_test.py +111 -0
- sigima/tests/signal/noise_unit_test.py +128 -0
- sigima/tests/signal/offset_correction_unit_test.py +34 -0
- sigima/tests/signal/operation_unit_test.py +489 -0
- sigima/tests/signal/peakdetection_unit_test.py +145 -0
- sigima/tests/signal/processing_unit_test.py +657 -0
- sigima/tests/signal/pulse/__init__.py +112 -0
- sigima/tests/signal/pulse/crossing_times_unit_test.py +123 -0
- sigima/tests/signal/pulse/plateau_detection_unit_test.py +102 -0
- sigima/tests/signal/pulse/pulse_unit_test.py +1824 -0
- sigima/tests/signal/roi_advanced_unit_test.py +392 -0
- sigima/tests/signal/signalobj_unit_test.py +603 -0
- sigima/tests/signal/stability_unit_test.py +431 -0
- sigima/tests/signal/uncertainty_unit_test.py +611 -0
- sigima/tests/vistools.py +1030 -0
- sigima/tools/__init__.py +59 -0
- sigima/tools/checks.py +290 -0
- sigima/tools/coordinates.py +308 -0
- sigima/tools/datatypes.py +26 -0
- sigima/tools/image/__init__.py +97 -0
- sigima/tools/image/detection.py +451 -0
- sigima/tools/image/exposure.py +77 -0
- sigima/tools/image/extraction.py +48 -0
- sigima/tools/image/fourier.py +260 -0
- sigima/tools/image/geometry.py +190 -0
- sigima/tools/image/preprocessing.py +165 -0
- sigima/tools/signal/__init__.py +86 -0
- sigima/tools/signal/dynamic.py +254 -0
- sigima/tools/signal/features.py +135 -0
- sigima/tools/signal/filtering.py +171 -0
- sigima/tools/signal/fitting.py +1171 -0
- sigima/tools/signal/fourier.py +466 -0
- sigima/tools/signal/interpolation.py +70 -0
- sigima/tools/signal/peakdetection.py +126 -0
- sigima/tools/signal/pulse.py +1626 -0
- sigima/tools/signal/scaling.py +50 -0
- sigima/tools/signal/stability.py +258 -0
- sigima/tools/signal/windowing.py +90 -0
- sigima/worker.py +79 -0
- sigima-1.0.0.dist-info/METADATA +233 -0
- sigima-1.0.0.dist-info/RECORD +262 -0
- {sigima-0.0.1.dev0.dist-info → sigima-1.0.0.dist-info}/licenses/LICENSE +29 -29
- sigima-0.0.1.dev0.dist-info/METADATA +0 -60
- sigima-0.0.1.dev0.dist-info/RECORD +0 -6
- {sigima-0.0.1.dev0.dist-info → sigima-1.0.0.dist-info}/WHEEL +0 -0
- {sigima-0.0.1.dev0.dist-info → sigima-1.0.0.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,466 @@
|
|
|
1
|
+
# Copyright (c) DataLab Platform Developers, BSD 3-Clause license, see LICENSE file.
|
|
2
|
+
|
|
3
|
+
"""
|
|
4
|
+
.. Fourier Analysis (see parent package :mod:`sigima.tools.signal`).
|
|
5
|
+
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
# pylint: disable=invalid-name # Allows short reference names like x, y, ...
|
|
9
|
+
|
|
10
|
+
from __future__ import annotations
|
|
11
|
+
|
|
12
|
+
from typing import Literal
|
|
13
|
+
|
|
14
|
+
import numpy as np
|
|
15
|
+
import scipy.signal # type: ignore[import]
|
|
16
|
+
|
|
17
|
+
from sigima.tools.checks import check_1d_arrays, normalize_kernel
|
|
18
|
+
from sigima.tools.signal.dynamic import sampling_rate
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
@check_1d_arrays(x_evenly_spaced=True)
|
|
22
|
+
def zero_padding(
|
|
23
|
+
x: np.ndarray, y: np.ndarray, n_prepend: int = 0, n_append: int = 0
|
|
24
|
+
) -> tuple[np.ndarray, np.ndarray]:
|
|
25
|
+
"""Prepend and append zeros.
|
|
26
|
+
|
|
27
|
+
This function pads the input signal with zeros at the beginning and end.
|
|
28
|
+
|
|
29
|
+
Args:
|
|
30
|
+
x: X data.
|
|
31
|
+
y: Y data.
|
|
32
|
+
n_prepend: Number of zeros to prepend.
|
|
33
|
+
n_append: Number of zeros to append.
|
|
34
|
+
|
|
35
|
+
Returns:
|
|
36
|
+
Tuple (xnew, ynew): Padded x and y.
|
|
37
|
+
"""
|
|
38
|
+
if n_prepend < 0:
|
|
39
|
+
raise ValueError("Number of zeros to prepend must be non-negative.")
|
|
40
|
+
if n_append < 0:
|
|
41
|
+
raise ValueError("Number of zeros to append must be non-negative.")
|
|
42
|
+
|
|
43
|
+
dx = np.mean(np.diff(x))
|
|
44
|
+
xnew = np.linspace(
|
|
45
|
+
x[0] - n_prepend * dx,
|
|
46
|
+
x[-1] + n_append * dx,
|
|
47
|
+
y.size + n_prepend + n_append,
|
|
48
|
+
)
|
|
49
|
+
ynew = np.pad(y, (n_prepend, n_append), mode="constant")
|
|
50
|
+
return xnew, ynew
|
|
51
|
+
|
|
52
|
+
|
|
53
|
+
@check_1d_arrays(x_evenly_spaced=True)
|
|
54
|
+
def fft1d(
|
|
55
|
+
x: np.ndarray, y: np.ndarray, shift: bool = True
|
|
56
|
+
) -> tuple[np.ndarray, np.ndarray]:
|
|
57
|
+
"""Compute the Fast Fourier Transform (FFT) of a 1D real signal.
|
|
58
|
+
|
|
59
|
+
Args:
|
|
60
|
+
x: Time domain axis (evenly spaced).
|
|
61
|
+
y: Signal values.
|
|
62
|
+
shift: If True, shift zero frequency and its corresponding FFT component to the
|
|
63
|
+
center.
|
|
64
|
+
|
|
65
|
+
Returns:
|
|
66
|
+
Tuple (f, sp): Frequency axis and corresponding FFT values.
|
|
67
|
+
"""
|
|
68
|
+
dt = np.mean(np.diff(x))
|
|
69
|
+
f = np.fft.fftfreq(x.size, d=dt) # Frequency axis
|
|
70
|
+
sp = np.fft.fft(y) # Spectrum values
|
|
71
|
+
if shift:
|
|
72
|
+
f = np.fft.fftshift(f)
|
|
73
|
+
sp = np.fft.fftshift(sp)
|
|
74
|
+
return f, sp
|
|
75
|
+
|
|
76
|
+
|
|
77
|
+
@check_1d_arrays(x_evenly_spaced=False, x_sorted=False, y_dtype=np.complexfloating)
|
|
78
|
+
def ifft1d(
|
|
79
|
+
f: np.ndarray, sp: np.ndarray, initial: float = 0.0
|
|
80
|
+
) -> tuple[np.ndarray, np.ndarray]:
|
|
81
|
+
"""Compute the inverse Fast Fourier Transform (FFT) of a 1D complex spectrum.
|
|
82
|
+
|
|
83
|
+
Args:
|
|
84
|
+
f: Frequency axis (evenly spaced).
|
|
85
|
+
sp: FFT values.
|
|
86
|
+
initial: Starting value for the time axis.
|
|
87
|
+
|
|
88
|
+
Returns:
|
|
89
|
+
Tuple (x, y): Time axis and real signal.
|
|
90
|
+
|
|
91
|
+
Raises:
|
|
92
|
+
ValueError: If frequency array is not evenly spaced or has fewer than 2 points.
|
|
93
|
+
"""
|
|
94
|
+
if f.size < 2:
|
|
95
|
+
raise ValueError("Frequency array must have at least two elements.")
|
|
96
|
+
|
|
97
|
+
if np.all(np.diff(f) >= 0.0):
|
|
98
|
+
# If frequencies are sorted, assume input is shifted.
|
|
99
|
+
# The spectrum needs to be unshifted.
|
|
100
|
+
sp = np.fft.ifftshift(sp)
|
|
101
|
+
else:
|
|
102
|
+
# Otherwise assume input is not shifted.
|
|
103
|
+
# The frequencies need to be shifted.
|
|
104
|
+
f = np.fft.fftshift(f)
|
|
105
|
+
|
|
106
|
+
diff_f = np.diff(f)
|
|
107
|
+
df = np.mean(diff_f)
|
|
108
|
+
if not np.allclose(diff_f, df):
|
|
109
|
+
raise ValueError("Frequency array must be evenly spaced.")
|
|
110
|
+
|
|
111
|
+
y = np.fft.ifft(sp)
|
|
112
|
+
dt = 1.0 / (f.size * df)
|
|
113
|
+
x = np.linspace(initial, initial + (y.size - 1) * dt, y.size)
|
|
114
|
+
|
|
115
|
+
return x, y.real
|
|
116
|
+
|
|
117
|
+
|
|
118
|
+
@check_1d_arrays(x_evenly_spaced=True)
|
|
119
|
+
def magnitude_spectrum(
|
|
120
|
+
x: np.ndarray, y: np.ndarray, decibel: bool = False
|
|
121
|
+
) -> tuple[np.ndarray, np.ndarray]:
|
|
122
|
+
"""Compute magnitude spectrum.
|
|
123
|
+
|
|
124
|
+
Args:
|
|
125
|
+
x: X data.
|
|
126
|
+
y: Y data.
|
|
127
|
+
decibel: Compute the magnitude spectrum root-power level in decibel (dB).
|
|
128
|
+
|
|
129
|
+
Returns:
|
|
130
|
+
Tuple (f, mag_spectrum): Frequency values and magnitude spectrum.
|
|
131
|
+
"""
|
|
132
|
+
f, spectrum = fft1d(x, y)
|
|
133
|
+
mag_spectrum = np.abs(spectrum)
|
|
134
|
+
if decibel:
|
|
135
|
+
mag_spectrum = 20 * np.log10(mag_spectrum)
|
|
136
|
+
return f, mag_spectrum
|
|
137
|
+
|
|
138
|
+
|
|
139
|
+
@check_1d_arrays(x_evenly_spaced=True)
|
|
140
|
+
def phase_spectrum(x: np.ndarray, y: np.ndarray) -> tuple[np.ndarray, np.ndarray]:
|
|
141
|
+
"""Compute phase spectrum.
|
|
142
|
+
|
|
143
|
+
Args:
|
|
144
|
+
x: X data.
|
|
145
|
+
y: Y data.
|
|
146
|
+
|
|
147
|
+
Returns:
|
|
148
|
+
Tuple (f, phase): Frequency values and phase spectrum in degrees.
|
|
149
|
+
"""
|
|
150
|
+
f, spectrum = fft1d(x, y)
|
|
151
|
+
phase = np.rad2deg(np.angle(spectrum))
|
|
152
|
+
return f, phase
|
|
153
|
+
|
|
154
|
+
|
|
155
|
+
@check_1d_arrays(x_evenly_spaced=True)
|
|
156
|
+
def psd(
|
|
157
|
+
x: np.ndarray, y: np.ndarray, decibel: bool = False
|
|
158
|
+
) -> tuple[np.ndarray, np.ndarray]:
|
|
159
|
+
"""Estimate the Power Spectral Density (PSD) using Welch's method.
|
|
160
|
+
|
|
161
|
+
Args:
|
|
162
|
+
x: X data.
|
|
163
|
+
y: Y data.
|
|
164
|
+
decibel: Compute the power spectral density power level in decibel (dB).
|
|
165
|
+
|
|
166
|
+
Returns:
|
|
167
|
+
Tuple (f, welch_psd): Frequency values and PSD.
|
|
168
|
+
"""
|
|
169
|
+
f, welch_psd = scipy.signal.welch(y, fs=sampling_rate(x))
|
|
170
|
+
if decibel:
|
|
171
|
+
welch_psd = 10 * np.log10(welch_psd)
|
|
172
|
+
return f, welch_psd
|
|
173
|
+
|
|
174
|
+
|
|
175
|
+
@check_1d_arrays(x_evenly_spaced=True)
|
|
176
|
+
def convolve(
|
|
177
|
+
x: np.ndarray,
|
|
178
|
+
y: np.ndarray,
|
|
179
|
+
h: np.ndarray,
|
|
180
|
+
boundary: Literal["reflect", "symmetric", "edge", "wrap"] = "reflect",
|
|
181
|
+
normalize_kernel_flag: bool = True,
|
|
182
|
+
method: Literal["auto", "direct", "fft"] = "auto",
|
|
183
|
+
correct_group_delay: bool = True,
|
|
184
|
+
) -> np.ndarray:
|
|
185
|
+
"""Convolve a 1D signal with a kernel, avoiding border artifacts and x-shift.
|
|
186
|
+
|
|
187
|
+
The input signal is padded before convolution, then a 'valid' extraction is
|
|
188
|
+
used to return exactly len(y) samples. Non-zero padding (e.g. "reflect")
|
|
189
|
+
prevents the typical edge attenuation caused by implicit zero-padding.
|
|
190
|
+
If the kernel is asymmetric, an optional group-delay correction recenters the
|
|
191
|
+
output on the same x-grid (no shift), using sub-sample interpolation.
|
|
192
|
+
|
|
193
|
+
Args:
|
|
194
|
+
x: 1D monotonically increasing and uniformly spaced axis (same length as y).
|
|
195
|
+
y: 1D input signal.
|
|
196
|
+
h: 1D convolution kernel (impulse response).
|
|
197
|
+
boundary: Padding mode passed to ``np.pad`` ("reflect" recommended).
|
|
198
|
+
normalize_kernel_flag: If True, normalize kernel so that ``h.sum() == 1`` to
|
|
199
|
+
preserve DC level.
|
|
200
|
+
method: Convolution method for ``scipy.signal.convolve``.
|
|
201
|
+
correct_group_delay: If True, compensate the kernel center-of-mass shift
|
|
202
|
+
(group delay) to avoid any x-shift in the output.
|
|
203
|
+
|
|
204
|
+
Returns:
|
|
205
|
+
Convolved signal with the same length as ``y``, aligned on ``x``.
|
|
206
|
+
|
|
207
|
+
Raises:
|
|
208
|
+
ValueError: If inputs are not 1D, empty, or shapes are inconsistent.
|
|
209
|
+
|
|
210
|
+
Notes:
|
|
211
|
+
Precondition: ``x`` is strictly increasing with constant spacing. This is
|
|
212
|
+
required for standard discrete convolution to represent a physical LTI
|
|
213
|
+
filtering on the given grid.
|
|
214
|
+
"""
|
|
215
|
+
if h.size != y.size:
|
|
216
|
+
raise ValueError("X data and Y data of the filter must have the same size.")
|
|
217
|
+
|
|
218
|
+
# ---- Optional DC preservation
|
|
219
|
+
if normalize_kernel_flag:
|
|
220
|
+
h = normalize_kernel(h)
|
|
221
|
+
|
|
222
|
+
M = int(h.size)
|
|
223
|
+
if M == 1:
|
|
224
|
+
# With normalization, h == [1]; otherwise scale by h[0]
|
|
225
|
+
return y.copy() if normalize_kernel_flag else y * h[0]
|
|
226
|
+
|
|
227
|
+
# ---- Compute asymmetric pad widths so that 'valid' returns exactly len(y)
|
|
228
|
+
w_left = M // 2
|
|
229
|
+
w_right = (M - 1) - w_left
|
|
230
|
+
|
|
231
|
+
# ---- Pad the signal to mitigate border artifacts during convolution
|
|
232
|
+
y_pad = np.pad(y, (w_left, w_right), mode=boundary)
|
|
233
|
+
|
|
234
|
+
# ---- Linear convolution with 'valid' to get back exactly N samples
|
|
235
|
+
y_conv = scipy.signal.convolve(y_pad, h, mode="valid", method=method)
|
|
236
|
+
|
|
237
|
+
if correct_group_delay:
|
|
238
|
+
# Center-of-mass of the kernel in sample units relative to w_left.
|
|
239
|
+
# n runs from -w_left ... +w_right (integer sample offsets).
|
|
240
|
+
n = np.arange(M, dtype=float) - w_left
|
|
241
|
+
denom = h.sum() if h.sum() != 0.0 else 1.0
|
|
242
|
+
mu_samples = float(np.dot(n, h) / denom) # may be fractional
|
|
243
|
+
|
|
244
|
+
if np.isfinite(mu_samples) and mu_samples != 0.0:
|
|
245
|
+
# Sub-sample compensation on the *x-axis* to keep alignment.
|
|
246
|
+
# Positive mu_samples means the effective kernel center is to the right
|
|
247
|
+
# (additional delay); compensate by advancing the output.
|
|
248
|
+
dx = float(x[1] - x[0]) # uniform spacing guaranteed by your decorator
|
|
249
|
+
x_shifted = x + mu_samples * dx
|
|
250
|
+
# Interpolate with edge holding to maintain length and alignment
|
|
251
|
+
y_conv = np.interp(x, x_shifted, y_conv, left=y_conv[0], right=y_conv[-1])
|
|
252
|
+
|
|
253
|
+
return y_conv
|
|
254
|
+
|
|
255
|
+
|
|
256
|
+
def _psf_to_otf_1d(h: np.ndarray, L: int) -> np.ndarray:
|
|
257
|
+
"""Convert a centered 1D PSF h to an OTF (RFFT length L).
|
|
258
|
+
|
|
259
|
+
The PSF center (index floor((M-1)/2)) is shifted to index 0 before FFT so that
|
|
260
|
+
the convolution geometry matches 'same' with a centered kernel.
|
|
261
|
+
|
|
262
|
+
Args:
|
|
263
|
+
h: 1D convolution kernel (PSF).
|
|
264
|
+
L: Length of the output OTF (RFFT length, power of two recommended).
|
|
265
|
+
|
|
266
|
+
Returns:
|
|
267
|
+
OTF as a 1D complex array of length L//2 + 1 (RFFT output).
|
|
268
|
+
"""
|
|
269
|
+
M = h.size
|
|
270
|
+
w_left = M // 2
|
|
271
|
+
h0 = np.roll(h, -w_left) # center -> index 0
|
|
272
|
+
h_z = np.zeros(L, dtype=float)
|
|
273
|
+
h_z[:M] = h0
|
|
274
|
+
return np.fft.rfft(h_z)
|
|
275
|
+
|
|
276
|
+
|
|
277
|
+
@check_1d_arrays(x_evenly_spaced=True)
|
|
278
|
+
def deconvolve(
|
|
279
|
+
x: np.ndarray,
|
|
280
|
+
y: np.ndarray,
|
|
281
|
+
h: np.ndarray,
|
|
282
|
+
*,
|
|
283
|
+
boundary: Literal["reflect", "symmetric", "edge", "wrap"] = "reflect",
|
|
284
|
+
normalize_kernel_flag: bool = True,
|
|
285
|
+
# regularized inverse with derivative prior (recommended):
|
|
286
|
+
method: Literal["wiener", "fft"] = "wiener",
|
|
287
|
+
reg: float = 5e-2, # increase to reduce ringing (e.g. 5e-2, 1e-1)
|
|
288
|
+
gain_max: float | None = 10.0, # clamp max |gain| in frequency (None to disable)
|
|
289
|
+
dc_lock: bool = True, # force exact DC gain (preserve plateau)
|
|
290
|
+
auto_scale: bool = True, # auto-correct amplitude scaling after deconvolution
|
|
291
|
+
) -> np.ndarray:
|
|
292
|
+
"""Deconvolve a 1D signal with frequency-dependent regularization and DC lock.
|
|
293
|
+
|
|
294
|
+
Strategy:
|
|
295
|
+
1) Pad y with the same geometry as the ``convolve`` step (x-uniform grid).
|
|
296
|
+
2) Build OTF ``H(f)`` from the centered PSF ``h``.
|
|
297
|
+
3) Compute inverse filter:
|
|
298
|
+
- ``wiener`` (recommended): ``H*(f) / (|H|^2 + reg * |D|^2)``, with
|
|
299
|
+
``|D|^2 = (2 sin(ω/2))^2`` (1st-derivative prior).
|
|
300
|
+
- ``fft``: bare inverse ``1/H(f)`` (unstable; only for noise-free data).
|
|
301
|
+
- Optionally clamp ``|G(f)| ≤ gain_max`` and lock DC gain.
|
|
302
|
+
4) IFFT, then extract the central unpadded segment (``len == len(y)``).
|
|
303
|
+
5) Optionally auto-scale the result to correct amplitude bias from regularization.
|
|
304
|
+
|
|
305
|
+
Args:
|
|
306
|
+
x: Strictly increasing, uniformly spaced axis (same length as y).
|
|
307
|
+
y: Observed signal (result of ``y_true ⊛ h``, plus noise).
|
|
308
|
+
h: Centered convolution kernel (PSF).
|
|
309
|
+
boundary: Padding mode (should match your convolution).
|
|
310
|
+
normalize_kernel_flag: If True, normalize ``h`` to preserve DC.
|
|
311
|
+
method: ``"wiener"`` (regularized inverse) or ``"fft"`` (bare inverse).
|
|
312
|
+
reg: Regularization strength for the derivative prior.
|
|
313
|
+
gain_max: Optional clamp on ``|G(f)|`` to avoid wild amplification.
|
|
314
|
+
dc_lock: If True, enforce exact DC gain (preserve mean/plateau).
|
|
315
|
+
auto_scale: If True, auto-correct amplitude scaling after deconvolution.
|
|
316
|
+
|
|
317
|
+
Returns:
|
|
318
|
+
Deconvolved signal with the same length as y, x-aligned.
|
|
319
|
+
"""
|
|
320
|
+
if x.ndim != 1 or y.ndim != 1 or h.ndim != 1:
|
|
321
|
+
raise ValueError("`x`, `y`, and `h` must be 1D arrays.")
|
|
322
|
+
if y.size == 0 or h.size == 0 or x.size != y.size:
|
|
323
|
+
raise ValueError("Non-empty arrays required and `x` length must match `y`.")
|
|
324
|
+
if y.size != h.size:
|
|
325
|
+
raise ValueError("X data and Y data of the filter must have the same size.")
|
|
326
|
+
if np.all(h == 0.0):
|
|
327
|
+
raise ValueError("Filter is all zeros, cannot be used to deconvolve.")
|
|
328
|
+
|
|
329
|
+
y = np.asarray(y, dtype=float)
|
|
330
|
+
h = np.asarray(h, dtype=float)
|
|
331
|
+
|
|
332
|
+
# Check if kernel normalization is requested
|
|
333
|
+
if normalize_kernel_flag:
|
|
334
|
+
h = normalize_kernel(h)
|
|
335
|
+
|
|
336
|
+
M = int(h.size)
|
|
337
|
+
if M == 1:
|
|
338
|
+
return y.copy() # normalized h == [1]
|
|
339
|
+
|
|
340
|
+
# Padding identical to your convolve() geometry
|
|
341
|
+
w_left = M // 2
|
|
342
|
+
w_right = (M - 1) - w_left
|
|
343
|
+
y_pad = np.pad(y, (w_left, w_right), mode=boundary)
|
|
344
|
+
|
|
345
|
+
N = y.size
|
|
346
|
+
Npad = y_pad.size # N + (M - 1)
|
|
347
|
+
|
|
348
|
+
# FFT size for linear convolution equivalence
|
|
349
|
+
L_needed = Npad + M - 1
|
|
350
|
+
L = 1 << int(np.ceil(np.log2(L_needed)))
|
|
351
|
+
|
|
352
|
+
# Build spectra
|
|
353
|
+
y_z = np.zeros(L, dtype=float)
|
|
354
|
+
y_z[:Npad] = y_pad
|
|
355
|
+
Y = np.fft.rfft(y_z)
|
|
356
|
+
|
|
357
|
+
H = _psf_to_otf_1d(h, L)
|
|
358
|
+
|
|
359
|
+
if method == "wiener":
|
|
360
|
+
# Derivative prior: |D(ω)|^2 = (2 sin(ω/2))^2
|
|
361
|
+
k = np.arange(H.size, dtype=float)
|
|
362
|
+
omega = 2.0 * np.pi * k / L
|
|
363
|
+
D2 = (2.0 * np.sin(omega / 2.0)) ** 2
|
|
364
|
+
|
|
365
|
+
Hc = np.conjugate(H)
|
|
366
|
+
H2 = (H * Hc).real
|
|
367
|
+
denom = H2 + float(reg) * D2
|
|
368
|
+
# Lock exact DC gain (avoid plateau bias)
|
|
369
|
+
if dc_lock:
|
|
370
|
+
denom[0] = H2[0] # since D2[0] = 0, this already holds; keep explicit
|
|
371
|
+
|
|
372
|
+
G = Hc / denom
|
|
373
|
+
elif method == "fft":
|
|
374
|
+
eps = 1e-12
|
|
375
|
+
G = 1.0 / (H + eps)
|
|
376
|
+
else:
|
|
377
|
+
raise ValueError("Unknown method. Use 'wiener' or 'fft'.")
|
|
378
|
+
|
|
379
|
+
# Clamp frequency gain (safety net against spikes)
|
|
380
|
+
if gain_max is not None and gain_max > 0:
|
|
381
|
+
mag = np.abs(G)
|
|
382
|
+
too_big = mag > gain_max
|
|
383
|
+
if np.any(too_big):
|
|
384
|
+
G[too_big] *= gain_max / mag[too_big]
|
|
385
|
+
|
|
386
|
+
X = Y * G
|
|
387
|
+
y_true_pad = np.fft.irfft(X, n=L)[:Npad]
|
|
388
|
+
|
|
389
|
+
# Extract central segment (same slicing as convolve)
|
|
390
|
+
y_deconv = y_true_pad[w_left : w_left + N]
|
|
391
|
+
|
|
392
|
+
# Auto-scale to correct amplitude bias from regularization
|
|
393
|
+
if auto_scale and method == "wiener" and reg > 0:
|
|
394
|
+
# Use energy conservation principle for scaling correction
|
|
395
|
+
# The idea: compare input energy to output energy and adjust
|
|
396
|
+
|
|
397
|
+
# Calculate RMS (root mean square) of input and output
|
|
398
|
+
y_rms = np.sqrt(np.mean(y**2)) if len(y) > 0 else 0.0
|
|
399
|
+
y_deconv_rms = np.sqrt(np.mean(y_deconv**2)) if len(y_deconv) > 0 else 0.0
|
|
400
|
+
|
|
401
|
+
if y_rms > 1e-12 and y_deconv_rms > 1e-12:
|
|
402
|
+
# Calculate the energy-based scaling factor
|
|
403
|
+
energy_ratio = y_rms / y_deconv_rms
|
|
404
|
+
|
|
405
|
+
# Apply scaling if the ratio is reasonable
|
|
406
|
+
# (regularization typically reduces energy)
|
|
407
|
+
if 0.5 < energy_ratio < 5.0: # Conservative bounds
|
|
408
|
+
y_deconv *= energy_ratio
|
|
409
|
+
|
|
410
|
+
return y_deconv
|
|
411
|
+
|
|
412
|
+
|
|
413
|
+
@check_1d_arrays(x_evenly_spaced=True)
|
|
414
|
+
def brickwall_filter(
|
|
415
|
+
x: np.ndarray,
|
|
416
|
+
y: np.ndarray,
|
|
417
|
+
mode: Literal["lowpass", "highpass", "bandpass", "bandstop"],
|
|
418
|
+
cut0: float,
|
|
419
|
+
cut1: float | None = None,
|
|
420
|
+
) -> tuple[np.ndarray, np.ndarray]:
|
|
421
|
+
"""
|
|
422
|
+
Apply an ideal frequency filter ("brick wall" filter) to a signal.
|
|
423
|
+
|
|
424
|
+
Args:
|
|
425
|
+
x: Time domain axis (evenly spaced).
|
|
426
|
+
y: Signal values (same length as x).
|
|
427
|
+
mode: Type of filter to apply.
|
|
428
|
+
cut0: First cutoff frequency.
|
|
429
|
+
cut1: Second cutoff frequency, required for band-pass and band-stop filters.
|
|
430
|
+
|
|
431
|
+
Returns:
|
|
432
|
+
Tuple (x, y_filtered), where y_filtered is the filtered signal.
|
|
433
|
+
|
|
434
|
+
Raises:
|
|
435
|
+
ValueError: If mode is unknown.
|
|
436
|
+
ValueError: If cut0 is not positive.
|
|
437
|
+
ValueError: If cut1 is missing for band-pass and band-stop filters.
|
|
438
|
+
ValueError: If cut0 > cut1 for band-pass and band-stop filters.
|
|
439
|
+
"""
|
|
440
|
+
if mode not in ("lowpass", "highpass", "bandpass", "bandstop"):
|
|
441
|
+
raise ValueError(f"Unknown filter mode: {mode!r}")
|
|
442
|
+
|
|
443
|
+
if cut0 <= 0.0:
|
|
444
|
+
raise ValueError("Cutoff frequency must be positive.")
|
|
445
|
+
|
|
446
|
+
if mode in ("bandpass", "bandstop"):
|
|
447
|
+
if cut1 is None:
|
|
448
|
+
raise ValueError(f"cut1 must be specified for mode '{mode}'")
|
|
449
|
+
if cut0 > cut1:
|
|
450
|
+
raise ValueError("cut0 must be less than or equal to cut1.")
|
|
451
|
+
|
|
452
|
+
freqs, ffty = fft1d(x, y, shift=False)
|
|
453
|
+
|
|
454
|
+
if mode == "lowpass":
|
|
455
|
+
frequency_mask = np.abs(freqs) <= cut0
|
|
456
|
+
elif mode == "highpass":
|
|
457
|
+
frequency_mask = np.abs(freqs) >= cut0
|
|
458
|
+
elif mode == "bandpass":
|
|
459
|
+
frequency_mask = (np.abs(freqs) >= cut0) & (np.abs(freqs) <= cut1)
|
|
460
|
+
else: # bandstop
|
|
461
|
+
frequency_mask = (np.abs(freqs) <= cut0) | (np.abs(freqs) >= cut1)
|
|
462
|
+
|
|
463
|
+
ffty_filtered = ffty * frequency_mask
|
|
464
|
+
_, y_filtered = ifft1d(freqs, ffty_filtered)
|
|
465
|
+
y_filtered = y_filtered.real
|
|
466
|
+
return x.copy(), y_filtered
|
|
@@ -0,0 +1,70 @@
|
|
|
1
|
+
# Copyright (c) DataLab Platform Developers, BSD 3-Clause license, see LICENSE file.
|
|
2
|
+
|
|
3
|
+
"""
|
|
4
|
+
.. Interpolation (see parent package :mod:`sigima.algorithms.signal`)
|
|
5
|
+
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
from __future__ import annotations
|
|
9
|
+
|
|
10
|
+
import numpy as np
|
|
11
|
+
import scipy.interpolate
|
|
12
|
+
|
|
13
|
+
from sigima.enums import Interpolation1DMethod
|
|
14
|
+
from sigima.tools.checks import check_1d_arrays
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
@check_1d_arrays(x_sorted=True)
|
|
18
|
+
def interpolate(
|
|
19
|
+
x: np.ndarray,
|
|
20
|
+
y: np.ndarray,
|
|
21
|
+
xnew: np.ndarray,
|
|
22
|
+
method: Interpolation1DMethod,
|
|
23
|
+
fill_value: float | None = None,
|
|
24
|
+
) -> np.ndarray:
|
|
25
|
+
"""Interpolate data.
|
|
26
|
+
|
|
27
|
+
Args:
|
|
28
|
+
x: X data
|
|
29
|
+
y: Y data
|
|
30
|
+
xnew: New X data
|
|
31
|
+
method: Interpolation method
|
|
32
|
+
fill_value: Fill value. Defaults to None.
|
|
33
|
+
This value is used to fill in for requested points outside of the
|
|
34
|
+
X data range. It is only used if the method argument is 'linear',
|
|
35
|
+
'cubic' or 'pchip'.
|
|
36
|
+
|
|
37
|
+
Returns:
|
|
38
|
+
Interpolated Y data
|
|
39
|
+
"""
|
|
40
|
+
interpolator_extrap = None
|
|
41
|
+
if method == Interpolation1DMethod.LINEAR:
|
|
42
|
+
# Linear interpolation using NumPy's interp function:
|
|
43
|
+
ynew = np.interp(xnew, x, y, left=fill_value, right=fill_value)
|
|
44
|
+
elif method == Interpolation1DMethod.SPLINE:
|
|
45
|
+
# Spline using 1-D interpolation with SciPy's interpolate package:
|
|
46
|
+
# pylint: disable=unbalanced-tuple-unpacking
|
|
47
|
+
knots, coeffs, degree = scipy.interpolate.splrep(x, y, s=0)
|
|
48
|
+
ynew = scipy.interpolate.splev(xnew, (knots, coeffs, degree), der=0)
|
|
49
|
+
elif method == Interpolation1DMethod.QUADRATIC:
|
|
50
|
+
# Quadratic interpolation using NumPy's polyval function:
|
|
51
|
+
coeffs = np.polyfit(x, y, 2)
|
|
52
|
+
ynew = np.polyval(coeffs, xnew)
|
|
53
|
+
elif method == Interpolation1DMethod.CUBIC:
|
|
54
|
+
# Cubic interpolation using SciPy's Akima1DInterpolator class:
|
|
55
|
+
interpolator_extrap = scipy.interpolate.Akima1DInterpolator(x, y)
|
|
56
|
+
elif method == Interpolation1DMethod.BARYCENTRIC:
|
|
57
|
+
# Barycentric interpolation using SciPy's BarycentricInterpolator class:
|
|
58
|
+
interpolator = scipy.interpolate.BarycentricInterpolator(x, y)
|
|
59
|
+
ynew = interpolator(xnew)
|
|
60
|
+
elif method == Interpolation1DMethod.PCHIP:
|
|
61
|
+
# PCHIP interpolation using SciPy's PchipInterpolator class:
|
|
62
|
+
interpolator_extrap = scipy.interpolate.PchipInterpolator(x, y)
|
|
63
|
+
else:
|
|
64
|
+
raise ValueError(f"Invalid interpolation method {method}")
|
|
65
|
+
if interpolator_extrap is not None:
|
|
66
|
+
ynew = interpolator_extrap(xnew, extrapolate=fill_value is None)
|
|
67
|
+
if fill_value is not None:
|
|
68
|
+
ynew[xnew < x[0]] = fill_value
|
|
69
|
+
ynew[xnew > x[-1]] = fill_value
|
|
70
|
+
return ynew
|
|
@@ -0,0 +1,126 @@
|
|
|
1
|
+
# Copyright (c) DataLab Platform Developers, BSD 3-Clause license, see LICENSE file.
|
|
2
|
+
|
|
3
|
+
"""
|
|
4
|
+
.. Peak Detection (see parent package :mod:`sigima.algorithms.signal`)
|
|
5
|
+
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
from __future__ import annotations
|
|
9
|
+
|
|
10
|
+
import numpy as np
|
|
11
|
+
|
|
12
|
+
from sigima.tools.checks import check_1d_arrays
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
def peak_indices(
|
|
16
|
+
y, thres: float = 0.3, min_dist: int = 1, thres_abs: bool = False
|
|
17
|
+
) -> np.ndarray:
|
|
18
|
+
# Copyright (c) 2014 Lucas Hermann Negri
|
|
19
|
+
# Unmodified code snippet from PeakUtils 1.3.0
|
|
20
|
+
"""Peak detection routine.
|
|
21
|
+
|
|
22
|
+
Finds the numeric index of the peaks in *y* by taking its first order
|
|
23
|
+
difference. By using *thres* and *min_dist* parameters, it is possible
|
|
24
|
+
to reduce the number of detected peaks. *y* must be signed.
|
|
25
|
+
|
|
26
|
+
Parameters
|
|
27
|
+
----------
|
|
28
|
+
y : ndarray (signed)
|
|
29
|
+
1D amplitude data to search for peaks.
|
|
30
|
+
thres : float between [0., 1.]
|
|
31
|
+
Normalized threshold. Only the peaks with amplitude higher than the
|
|
32
|
+
threshold will be detected.
|
|
33
|
+
min_dist : int
|
|
34
|
+
Minimum distance between each detected peak. The peak with the highest
|
|
35
|
+
amplitude is preferred to satisfy this constraint.
|
|
36
|
+
thres_abs: boolean
|
|
37
|
+
If True, the thres value will be interpreted as an absolute value,
|
|
38
|
+
instead of a normalized threshold.
|
|
39
|
+
|
|
40
|
+
Returns
|
|
41
|
+
-------
|
|
42
|
+
ndarray
|
|
43
|
+
Array containing the numeric indices of the peaks that were detected
|
|
44
|
+
"""
|
|
45
|
+
if isinstance(y, np.ndarray) and np.issubdtype(y.dtype, np.unsignedinteger):
|
|
46
|
+
raise ValueError("y must be signed")
|
|
47
|
+
|
|
48
|
+
if not thres_abs:
|
|
49
|
+
thres = thres * (np.max(y) - np.min(y)) + np.min(y)
|
|
50
|
+
|
|
51
|
+
# compute first order difference
|
|
52
|
+
dy = np.diff(y)
|
|
53
|
+
|
|
54
|
+
# propagate left and right values successively to fill all plateau pixels
|
|
55
|
+
# (0-value)
|
|
56
|
+
(zeros,) = np.where(dy == 0)
|
|
57
|
+
|
|
58
|
+
# check if the signal is totally flat
|
|
59
|
+
if len(zeros) == len(y) - 1:
|
|
60
|
+
return np.array([])
|
|
61
|
+
|
|
62
|
+
if len(zeros):
|
|
63
|
+
# compute first order difference of zero indices
|
|
64
|
+
zeros_diff = np.diff(zeros)
|
|
65
|
+
# check when zeros are not chained together
|
|
66
|
+
(zeros_diff_not_one,) = np.add(np.where(zeros_diff != 1), 1)
|
|
67
|
+
# make an array of the chained zero indices
|
|
68
|
+
zero_plateaus = np.split(zeros, zeros_diff_not_one)
|
|
69
|
+
|
|
70
|
+
# fix if leftmost value in dy is zero
|
|
71
|
+
if zero_plateaus[0][0] == 0:
|
|
72
|
+
dy[zero_plateaus[0]] = dy[zero_plateaus[0][-1] + 1]
|
|
73
|
+
zero_plateaus.pop(0)
|
|
74
|
+
|
|
75
|
+
# fix if rightmost value of dy is zero
|
|
76
|
+
if len(zero_plateaus) > 0 and zero_plateaus[-1][-1] == len(dy) - 1:
|
|
77
|
+
dy[zero_plateaus[-1]] = dy[zero_plateaus[-1][0] - 1]
|
|
78
|
+
zero_plateaus.pop(-1)
|
|
79
|
+
|
|
80
|
+
# for each chain of zero indices
|
|
81
|
+
for plateau in zero_plateaus:
|
|
82
|
+
median = np.median(plateau)
|
|
83
|
+
# set leftmost values to leftmost non zero values
|
|
84
|
+
dy[plateau[plateau < median]] = dy[plateau[0] - 1]
|
|
85
|
+
# set rightmost and middle values to rightmost non zero values
|
|
86
|
+
dy[plateau[plateau >= median]] = dy[plateau[-1] + 1]
|
|
87
|
+
|
|
88
|
+
# find the peaks by using the first order difference
|
|
89
|
+
peaks = np.where(
|
|
90
|
+
(np.hstack([dy, 0.0]) < 0.0)
|
|
91
|
+
& (np.hstack([0.0, dy]) > 0.0)
|
|
92
|
+
& (np.greater(y, thres))
|
|
93
|
+
)[0]
|
|
94
|
+
|
|
95
|
+
# handle multiple peaks, respecting the minimum distance
|
|
96
|
+
if peaks.size > 1 and min_dist > 1:
|
|
97
|
+
highest = peaks[np.argsort(y[peaks])][::-1]
|
|
98
|
+
rem = np.ones(y.size, dtype=bool)
|
|
99
|
+
rem[peaks] = False
|
|
100
|
+
|
|
101
|
+
for peak in highest:
|
|
102
|
+
if not rem[peak]:
|
|
103
|
+
sl = slice(max(0, peak - min_dist), peak + min_dist + 1)
|
|
104
|
+
rem[sl] = True
|
|
105
|
+
rem[peak] = False
|
|
106
|
+
|
|
107
|
+
peaks = np.arange(y.size)[~rem]
|
|
108
|
+
|
|
109
|
+
return peaks
|
|
110
|
+
|
|
111
|
+
|
|
112
|
+
@check_1d_arrays
|
|
113
|
+
def xpeak(x: np.ndarray, y: np.ndarray) -> float:
|
|
114
|
+
"""Return default peak X-position (assuming a single peak).
|
|
115
|
+
|
|
116
|
+
Args:
|
|
117
|
+
x: X data
|
|
118
|
+
y: Y data
|
|
119
|
+
|
|
120
|
+
Returns:
|
|
121
|
+
Peak X-position
|
|
122
|
+
"""
|
|
123
|
+
peaks = peak_indices(y)
|
|
124
|
+
if peaks.size == 1:
|
|
125
|
+
return x[peaks[0]]
|
|
126
|
+
return np.average(x, weights=y)
|