ringdownanalysis 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- ringdownanalysis/__init__.py +105 -0
- ringdownanalysis/analyzer.py +581 -0
- ringdownanalysis/batch_analyzer.py +737 -0
- ringdownanalysis/compat.py +140 -0
- ringdownanalysis/crlb.py +256 -0
- ringdownanalysis/data_loader.py +327 -0
- ringdownanalysis/estimators.py +782 -0
- ringdownanalysis/legacy_ring_down_mc.py +905 -0
- ringdownanalysis/monte_carlo.py +582 -0
- ringdownanalysis/plots.py +511 -0
- ringdownanalysis/signal.py +152 -0
- ringdownanalysis-0.1.0.dist-info/METADATA +453 -0
- ringdownanalysis-0.1.0.dist-info/RECORD +15 -0
- ringdownanalysis-0.1.0.dist-info/WHEEL +5 -0
- ringdownanalysis-0.1.0.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,105 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Ring-down analysis package for frequency estimation of ring-down signals.
|
|
3
|
+
|
|
4
|
+
This package provides both object-oriented and function-based APIs for:
|
|
5
|
+
- Ring-down signal generation
|
|
6
|
+
- Frequency estimation (NLS and DFT methods)
|
|
7
|
+
- CRLB calculation
|
|
8
|
+
- Monte Carlo analysis
|
|
9
|
+
- Real data analysis
|
|
10
|
+
"""
|
|
11
|
+
|
|
12
|
+
import logging
|
|
13
|
+
|
|
14
|
+
from .analyzer import RingDownAnalyzer
|
|
15
|
+
from .batch_analyzer import BatchRingDownAnalyzer, ProcessResult
|
|
16
|
+
from .compat import (
|
|
17
|
+
crlb_var_f_ringdown_explicit,
|
|
18
|
+
db_to_lin,
|
|
19
|
+
estimate_freq_dft,
|
|
20
|
+
estimate_freq_dft_optimized,
|
|
21
|
+
estimate_freq_nls_ringdown,
|
|
22
|
+
generate_ringdown,
|
|
23
|
+
monte_carlo_analysis,
|
|
24
|
+
)
|
|
25
|
+
from .crlb import CRLBCalculator
|
|
26
|
+
from .data_loader import RingDownDataLoader
|
|
27
|
+
from .estimators import (
|
|
28
|
+
DFTFrequencyEstimator,
|
|
29
|
+
EstimationResult,
|
|
30
|
+
FrequencyEstimator,
|
|
31
|
+
NLSFrequencyEstimator,
|
|
32
|
+
)
|
|
33
|
+
from .monte_carlo import MonteCarloAnalyzer
|
|
34
|
+
from .plots import (
|
|
35
|
+
plot_aggregate_results,
|
|
36
|
+
plot_individual_results,
|
|
37
|
+
plot_performance_comparison,
|
|
38
|
+
plot_q_individual_results,
|
|
39
|
+
plot_q_performance_comparison,
|
|
40
|
+
)
|
|
41
|
+
from .signal import RingDownSignal
|
|
42
|
+
|
|
43
|
+
# Configure package logger
|
|
44
|
+
_logger = logging.getLogger(__name__)
|
|
45
|
+
_logger.addHandler(logging.NullHandler()) # Default: no output unless configured
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
def configure_logging(
|
|
49
|
+
level: int = logging.INFO,
|
|
50
|
+
format_string: str = "%(asctime)s - %(name)s - %(levelname)s - %(message)s",
|
|
51
|
+
) -> None:
|
|
52
|
+
"""
|
|
53
|
+
Configure logging for the ringdownanalysis package with a default console handler.
|
|
54
|
+
|
|
55
|
+
Call this at application startup for easier debugging. By default, the package
|
|
56
|
+
uses NullHandler (no output) unless explicitly configured.
|
|
57
|
+
|
|
58
|
+
Parameters:
|
|
59
|
+
-----------
|
|
60
|
+
level : int
|
|
61
|
+
Logging level (default: logging.INFO)
|
|
62
|
+
format_string : str
|
|
63
|
+
Log message format (default includes timestamp, logger name, level, message)
|
|
64
|
+
|
|
65
|
+
Example:
|
|
66
|
+
--------
|
|
67
|
+
>>> import logging
|
|
68
|
+
>>> from ringdownanalysis import configure_logging, BatchRingDownAnalyzer
|
|
69
|
+
>>> configure_logging(level=logging.INFO)
|
|
70
|
+
>>> analyzer = BatchRingDownAnalyzer()
|
|
71
|
+
>>> results = analyzer.process_directory("data")
|
|
72
|
+
"""
|
|
73
|
+
logging.basicConfig(level=level, format=format_string)
|
|
74
|
+
|
|
75
|
+
|
|
76
|
+
__all__ = [
|
|
77
|
+
# Logging
|
|
78
|
+
"configure_logging",
|
|
79
|
+
# Classes
|
|
80
|
+
"ProcessResult",
|
|
81
|
+
"RingDownSignal",
|
|
82
|
+
"FrequencyEstimator",
|
|
83
|
+
"NLSFrequencyEstimator",
|
|
84
|
+
"DFTFrequencyEstimator",
|
|
85
|
+
"EstimationResult",
|
|
86
|
+
"CRLBCalculator",
|
|
87
|
+
"RingDownDataLoader",
|
|
88
|
+
"RingDownAnalyzer",
|
|
89
|
+
"MonteCarloAnalyzer",
|
|
90
|
+
"BatchRingDownAnalyzer",
|
|
91
|
+
# Compatibility functions
|
|
92
|
+
"db_to_lin",
|
|
93
|
+
"crlb_var_f_ringdown_explicit",
|
|
94
|
+
"generate_ringdown",
|
|
95
|
+
"estimate_freq_nls_ringdown",
|
|
96
|
+
"estimate_freq_dft",
|
|
97
|
+
"estimate_freq_dft_optimized",
|
|
98
|
+
"monte_carlo_analysis",
|
|
99
|
+
# Plotting functions
|
|
100
|
+
"plot_individual_results",
|
|
101
|
+
"plot_aggregate_results",
|
|
102
|
+
"plot_performance_comparison",
|
|
103
|
+
"plot_q_individual_results",
|
|
104
|
+
"plot_q_performance_comparison",
|
|
105
|
+
]
|
|
@@ -0,0 +1,581 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Analysis pipeline for real ring-down measurement data.
|
|
3
|
+
"""
|
|
4
|
+
|
|
5
|
+
from __future__ import annotations
|
|
6
|
+
|
|
7
|
+
import logging
|
|
8
|
+
from pathlib import Path
|
|
9
|
+
|
|
10
|
+
import numpy as np
|
|
11
|
+
from scipy.optimize import least_squares
|
|
12
|
+
|
|
13
|
+
from .crlb import CRLBCalculator
|
|
14
|
+
from .data_loader import RingDownDataLoader
|
|
15
|
+
from .estimators import (
|
|
16
|
+
DFTFrequencyEstimator,
|
|
17
|
+
NLSFrequencyEstimator,
|
|
18
|
+
_estimate_initial_parameters_from_dft,
|
|
19
|
+
_estimate_initial_tau_from_envelope,
|
|
20
|
+
)
|
|
21
|
+
|
|
22
|
+
logger = logging.getLogger(__name__)
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
def _parse_array_input(
|
|
26
|
+
t: np.ndarray | None = None,
|
|
27
|
+
data: np.ndarray | None = None,
|
|
28
|
+
fs: float | None = None,
|
|
29
|
+
*,
|
|
30
|
+
time_col: str | int = 0,
|
|
31
|
+
data_col: str | int = 1,
|
|
32
|
+
) -> tuple[np.ndarray, np.ndarray]:
|
|
33
|
+
"""
|
|
34
|
+
Parse array-like inputs into (t, data) numpy arrays.
|
|
35
|
+
|
|
36
|
+
Supports:
|
|
37
|
+
- (t, data): two array-likes
|
|
38
|
+
- (data, fs): data array and sampling frequency (t = arange(len(data))/fs)
|
|
39
|
+
- (data=DataFrame): extract t and data from columns via time_col, data_col
|
|
40
|
+
|
|
41
|
+
Returns:
|
|
42
|
+
--------
|
|
43
|
+
t : np.ndarray
|
|
44
|
+
Time array (s), starting from 0
|
|
45
|
+
data : np.ndarray
|
|
46
|
+
Signal array
|
|
47
|
+
"""
|
|
48
|
+
import pandas as pd
|
|
49
|
+
|
|
50
|
+
if data is None:
|
|
51
|
+
raise ValueError("data is required")
|
|
52
|
+
|
|
53
|
+
# DataFrame: extract time and signal columns
|
|
54
|
+
if isinstance(data, pd.DataFrame):
|
|
55
|
+
t_arr = np.asarray(
|
|
56
|
+
data.iloc[:, time_col] if isinstance(time_col, int) else data[time_col],
|
|
57
|
+
dtype=np.float64,
|
|
58
|
+
)
|
|
59
|
+
data_arr = np.asarray(
|
|
60
|
+
data.iloc[:, data_col] if isinstance(data_col, int) else data[data_col],
|
|
61
|
+
dtype=np.float64,
|
|
62
|
+
)
|
|
63
|
+
t_arr = t_arr - t_arr[0]
|
|
64
|
+
elif t is not None:
|
|
65
|
+
t_arr = np.asarray(t, dtype=np.float64)
|
|
66
|
+
data_arr = np.asarray(data, dtype=np.float64)
|
|
67
|
+
elif fs is not None:
|
|
68
|
+
data_arr = np.asarray(data, dtype=np.float64)
|
|
69
|
+
t_arr = np.arange(len(data_arr), dtype=np.float64) / fs
|
|
70
|
+
else:
|
|
71
|
+
raise ValueError("Either t or fs must be provided when data is not a DataFrame")
|
|
72
|
+
|
|
73
|
+
if len(t_arr) != len(data_arr):
|
|
74
|
+
raise ValueError(f"t and data must have same length, got {len(t_arr)} and {len(data_arr)}")
|
|
75
|
+
if len(t_arr) < 2:
|
|
76
|
+
raise ValueError("At least 2 samples required for analysis")
|
|
77
|
+
|
|
78
|
+
return t_arr, data_arr
|
|
79
|
+
|
|
80
|
+
|
|
81
|
+
class RingDownAnalyzer:
|
|
82
|
+
"""
|
|
83
|
+
Analyzes real ring-down measurement data.
|
|
84
|
+
|
|
85
|
+
Performs the following pipeline:
|
|
86
|
+
1. Load data from file
|
|
87
|
+
2. Estimate tau from full data using NLS
|
|
88
|
+
3. Crop data to max_tau_multiplier*tau to avoid long noisy tail
|
|
89
|
+
4. Estimate frequency using NLS and DFT methods
|
|
90
|
+
5. Estimate noise parameters for CRLB calculation
|
|
91
|
+
6. Calculate CRLB
|
|
92
|
+
"""
|
|
93
|
+
|
|
94
|
+
def __init__(
|
|
95
|
+
self,
|
|
96
|
+
nls_estimator: NLSFrequencyEstimator | None = None,
|
|
97
|
+
dft_estimator: DFTFrequencyEstimator | None = None,
|
|
98
|
+
):
|
|
99
|
+
"""
|
|
100
|
+
Initialize analyzer.
|
|
101
|
+
|
|
102
|
+
Parameters:
|
|
103
|
+
-----------
|
|
104
|
+
nls_estimator : NLSFrequencyEstimator, optional
|
|
105
|
+
NLS frequency estimator. If None, creates default (tau unknown).
|
|
106
|
+
dft_estimator : DFTFrequencyEstimator, optional
|
|
107
|
+
DFT frequency estimator. If None, creates default (rectangular window).
|
|
108
|
+
"""
|
|
109
|
+
self.nls_estimator = nls_estimator or NLSFrequencyEstimator(tau_known=None)
|
|
110
|
+
self.dft_estimator = dft_estimator or DFTFrequencyEstimator(window="rect")
|
|
111
|
+
self.crlb_calc = CRLBCalculator()
|
|
112
|
+
|
|
113
|
+
def estimate_tau(
|
|
114
|
+
self,
|
|
115
|
+
data: np.ndarray,
|
|
116
|
+
t: np.ndarray,
|
|
117
|
+
fs: float,
|
|
118
|
+
initial_params: tuple | None = None,
|
|
119
|
+
) -> float:
|
|
120
|
+
"""
|
|
121
|
+
Estimate tau from full data using NLS fit.
|
|
122
|
+
|
|
123
|
+
Parameters:
|
|
124
|
+
-----------
|
|
125
|
+
data : np.ndarray
|
|
126
|
+
Signal data
|
|
127
|
+
t : np.ndarray
|
|
128
|
+
Time array (s)
|
|
129
|
+
fs : float
|
|
130
|
+
Sampling frequency (Hz)
|
|
131
|
+
|
|
132
|
+
Returns:
|
|
133
|
+
--------
|
|
134
|
+
float
|
|
135
|
+
Estimated tau value in seconds
|
|
136
|
+
"""
|
|
137
|
+
N = len(data)
|
|
138
|
+
t_norm = t - t[0]
|
|
139
|
+
|
|
140
|
+
# Get initial parameter estimates (use cached if provided)
|
|
141
|
+
if initial_params is not None:
|
|
142
|
+
f0_init, phi0_init, A0_init, c0 = initial_params
|
|
143
|
+
else:
|
|
144
|
+
f0_init, phi0_init, A0_init, c0 = _estimate_initial_parameters_from_dft(data, fs)
|
|
145
|
+
|
|
146
|
+
# Initial tau guess from envelope decay
|
|
147
|
+
tau_init = _estimate_initial_tau_from_envelope(data, t_norm)
|
|
148
|
+
|
|
149
|
+
# NLS fit to estimate tau: fit (A0, f, phi, tau, c)
|
|
150
|
+
def residuals_tau(p):
|
|
151
|
+
A0, f, phi, tau, c = p
|
|
152
|
+
return (A0 * np.exp(-t_norm / tau) * np.cos(2.0 * np.pi * f * t_norm + phi) + c) - data
|
|
153
|
+
|
|
154
|
+
df = fs / N
|
|
155
|
+
f_low = max(0.0, f0_init - max(0.2 * f0_init, 2 * df))
|
|
156
|
+
f_high = min(0.5 * fs, f0_init + max(0.2 * f0_init, 2 * df))
|
|
157
|
+
|
|
158
|
+
lb = [0.0, f_low, -np.pi, t_norm[1], -np.inf]
|
|
159
|
+
ub = [10.0 * A0_init, f_high, np.pi, 10.0 * t_norm[-1], np.inf]
|
|
160
|
+
|
|
161
|
+
res_tau = least_squares(
|
|
162
|
+
residuals_tau,
|
|
163
|
+
x0=np.array([A0_init, f0_init, phi0_init, tau_init, c0]),
|
|
164
|
+
bounds=(lb, ub),
|
|
165
|
+
method="trf",
|
|
166
|
+
ftol=1e-8,
|
|
167
|
+
xtol=1e-8,
|
|
168
|
+
gtol=1e-8,
|
|
169
|
+
max_nfev=150, # Optimized: typical convergence in 6-12 nfev, 150 provides safety margin
|
|
170
|
+
verbose=0,
|
|
171
|
+
)
|
|
172
|
+
|
|
173
|
+
if res_tau.success:
|
|
174
|
+
_, _, _, tau_est, _ = res_tau.x
|
|
175
|
+
# Sanity check
|
|
176
|
+
if (
|
|
177
|
+
tau_est <= 0
|
|
178
|
+
or not np.isfinite(tau_est)
|
|
179
|
+
or tau_est > 10.0 * t_norm[-1]
|
|
180
|
+
or tau_est < t_norm[1]
|
|
181
|
+
):
|
|
182
|
+
if logger.isEnabledFor(logging.WARNING):
|
|
183
|
+
logger.warning(
|
|
184
|
+
"tau_sanity_check_failed",
|
|
185
|
+
extra={
|
|
186
|
+
"event": "tau_sanity_check_failed",
|
|
187
|
+
"tau_est": float(tau_est),
|
|
188
|
+
"tau_init": float(tau_init),
|
|
189
|
+
"t_max": float(t_norm[-1]),
|
|
190
|
+
},
|
|
191
|
+
)
|
|
192
|
+
return tau_init
|
|
193
|
+
if logger.isEnabledFor(logging.DEBUG):
|
|
194
|
+
logger.debug(
|
|
195
|
+
"tau_estimated",
|
|
196
|
+
extra={
|
|
197
|
+
"event": "tau_estimated",
|
|
198
|
+
"tau_est": float(tau_est),
|
|
199
|
+
"tau_init": float(tau_init),
|
|
200
|
+
"nfev": res_tau.nfev,
|
|
201
|
+
},
|
|
202
|
+
)
|
|
203
|
+
return tau_est
|
|
204
|
+
else:
|
|
205
|
+
if logger.isEnabledFor(logging.WARNING):
|
|
206
|
+
logger.warning(
|
|
207
|
+
"tau_estimation_failed",
|
|
208
|
+
extra={
|
|
209
|
+
"event": "tau_estimation_failed",
|
|
210
|
+
"tau_init": float(tau_init),
|
|
211
|
+
"message": res_tau.message,
|
|
212
|
+
"nfev": res_tau.nfev,
|
|
213
|
+
},
|
|
214
|
+
)
|
|
215
|
+
return tau_init
|
|
216
|
+
|
|
217
|
+
def crop_data_to_tau(
|
|
218
|
+
self,
|
|
219
|
+
t: np.ndarray,
|
|
220
|
+
data: np.ndarray,
|
|
221
|
+
tau_est: float,
|
|
222
|
+
min_samples: int = 100,
|
|
223
|
+
max_tau_multiplier: float = 1.0,
|
|
224
|
+
) -> tuple[np.ndarray, np.ndarray]:
|
|
225
|
+
"""
|
|
226
|
+
Crop data to max_tau_multiplier*tau_est to avoid long noisy tail affecting frequency estimation.
|
|
227
|
+
|
|
228
|
+
Parameters:
|
|
229
|
+
-----------
|
|
230
|
+
t : np.ndarray
|
|
231
|
+
Time array
|
|
232
|
+
data : np.ndarray
|
|
233
|
+
Signal array
|
|
234
|
+
tau_est : float
|
|
235
|
+
Estimated tau value in seconds
|
|
236
|
+
min_samples : int
|
|
237
|
+
Minimum number of samples required. If cropped data is shorter, return original.
|
|
238
|
+
max_tau_multiplier : float
|
|
239
|
+
Multiplier for tau_est to determine maximum record length. Default is 1.0.
|
|
240
|
+
|
|
241
|
+
Returns:
|
|
242
|
+
--------
|
|
243
|
+
(t_crop, data_cropped) : tuple
|
|
244
|
+
Cropped time and signal arrays
|
|
245
|
+
"""
|
|
246
|
+
t_crop_max = max_tau_multiplier * tau_est
|
|
247
|
+
crop_idx = t <= t_crop_max
|
|
248
|
+
t_crop = t[crop_idx]
|
|
249
|
+
data_cropped = data[crop_idx]
|
|
250
|
+
|
|
251
|
+
# If cropped data is too short, return original
|
|
252
|
+
# Use views instead of copies when possible
|
|
253
|
+
if len(t_crop) < min_samples:
|
|
254
|
+
if logger.isEnabledFor(logging.WARNING):
|
|
255
|
+
logger.warning(
|
|
256
|
+
"data_crop_too_short",
|
|
257
|
+
extra={
|
|
258
|
+
"event": "data_crop_too_short",
|
|
259
|
+
"n_cropped": len(t_crop),
|
|
260
|
+
"min_samples": min_samples,
|
|
261
|
+
"tau_est": float(tau_est),
|
|
262
|
+
},
|
|
263
|
+
)
|
|
264
|
+
return t, data
|
|
265
|
+
|
|
266
|
+
if logger.isEnabledFor(logging.DEBUG):
|
|
267
|
+
logger.debug(
|
|
268
|
+
"data_cropped",
|
|
269
|
+
extra={
|
|
270
|
+
"event": "data_cropped",
|
|
271
|
+
"n_original": len(t),
|
|
272
|
+
"n_cropped": len(t_crop),
|
|
273
|
+
"tau_est": float(tau_est),
|
|
274
|
+
"crop_time": float(t_crop[-1]) if len(t_crop) > 0 else 0.0,
|
|
275
|
+
},
|
|
276
|
+
)
|
|
277
|
+
|
|
278
|
+
return t_crop, data_cropped
|
|
279
|
+
|
|
280
|
+
def estimate_noise_parameters(
|
|
281
|
+
self,
|
|
282
|
+
data_cropped: np.ndarray,
|
|
283
|
+
t_crop: np.ndarray,
|
|
284
|
+
tau_est: float,
|
|
285
|
+
fs: float,
|
|
286
|
+
initial_params: tuple | None = None,
|
|
287
|
+
) -> tuple[float, float]:
|
|
288
|
+
"""
|
|
289
|
+
Estimate A0 (initial amplitude) and sigma (noise std) from cropped data.
|
|
290
|
+
|
|
291
|
+
Parameters:
|
|
292
|
+
-----------
|
|
293
|
+
data_cropped : np.ndarray
|
|
294
|
+
Cropped signal data
|
|
295
|
+
t_crop : np.ndarray
|
|
296
|
+
Cropped time array
|
|
297
|
+
tau_est : float
|
|
298
|
+
Estimated tau value in seconds
|
|
299
|
+
fs : float
|
|
300
|
+
Sampling frequency (Hz)
|
|
301
|
+
|
|
302
|
+
Returns:
|
|
303
|
+
--------
|
|
304
|
+
(A0_est, sigma_est) : tuple
|
|
305
|
+
Estimated A0 and sigma
|
|
306
|
+
"""
|
|
307
|
+
N_crop = len(data_cropped)
|
|
308
|
+
t_crop_norm = t_crop - t_crop[0]
|
|
309
|
+
|
|
310
|
+
# Initial estimate from first portion
|
|
311
|
+
n_init = min(1000, N_crop // 10)
|
|
312
|
+
A0_est = np.sqrt(2.0) * np.std(data_cropped[:n_init])
|
|
313
|
+
|
|
314
|
+
# Fit model to get residuals for noise estimation
|
|
315
|
+
def model_residuals(p):
|
|
316
|
+
A0, f, phi, c = p
|
|
317
|
+
return (
|
|
318
|
+
A0 * np.exp(-t_crop_norm / tau_est) * np.cos(2.0 * np.pi * f * t_crop_norm + phi)
|
|
319
|
+
+ c
|
|
320
|
+
) - data_cropped
|
|
321
|
+
|
|
322
|
+
# Get initial guesses (use cached if provided)
|
|
323
|
+
if initial_params is not None:
|
|
324
|
+
f0_init, phi0_init, A0_init, c0 = initial_params
|
|
325
|
+
else:
|
|
326
|
+
f0_init, phi0_init, A0_init, c0 = _estimate_initial_parameters_from_dft(
|
|
327
|
+
data_cropped, fs
|
|
328
|
+
)
|
|
329
|
+
|
|
330
|
+
# Quick fit to get residuals
|
|
331
|
+
df = fs / N_crop
|
|
332
|
+
f_low = max(0.0, f0_init - max(0.2 * f0_init, 2 * df))
|
|
333
|
+
f_high = min(0.5 * fs, f0_init + max(0.2 * f0_init, 2 * df))
|
|
334
|
+
|
|
335
|
+
res_fit = least_squares(
|
|
336
|
+
model_residuals,
|
|
337
|
+
x0=np.array([A0_init, f0_init, phi0_init, c0]),
|
|
338
|
+
bounds=([0.0, f_low, -np.pi, -np.inf], [10.0 * A0_init, f_high, np.pi, np.inf]),
|
|
339
|
+
method="trf",
|
|
340
|
+
ftol=1e-6,
|
|
341
|
+
max_nfev=100, # Optimized: typical convergence in 5-10 nfev, 100 provides safety margin
|
|
342
|
+
verbose=0,
|
|
343
|
+
)
|
|
344
|
+
|
|
345
|
+
if res_fit.success:
|
|
346
|
+
residuals = res_fit.fun
|
|
347
|
+
sigma_est = np.std(residuals)
|
|
348
|
+
A0_est = res_fit.x[0]
|
|
349
|
+
if logger.isEnabledFor(logging.DEBUG):
|
|
350
|
+
logger.debug(
|
|
351
|
+
"noise_parameters_estimated",
|
|
352
|
+
extra={
|
|
353
|
+
"event": "noise_parameters_estimated",
|
|
354
|
+
"A0_est": float(A0_est),
|
|
355
|
+
"sigma_est": float(sigma_est),
|
|
356
|
+
"nfev": res_fit.nfev,
|
|
357
|
+
},
|
|
358
|
+
)
|
|
359
|
+
else:
|
|
360
|
+
# Fallback: estimate noise from tail
|
|
361
|
+
if logger.isEnabledFor(logging.WARNING):
|
|
362
|
+
logger.warning(
|
|
363
|
+
"noise_estimation_fallback",
|
|
364
|
+
extra={
|
|
365
|
+
"event": "noise_estimation_fallback",
|
|
366
|
+
"message": res_fit.message,
|
|
367
|
+
"nfev": res_fit.nfev,
|
|
368
|
+
},
|
|
369
|
+
)
|
|
370
|
+
tail_start = max(int(0.8 * len(data_cropped)), len(data_cropped) - 1000)
|
|
371
|
+
sigma_est = np.std(data_cropped[tail_start:])
|
|
372
|
+
A0_est = np.sqrt(2.0) * np.std(data_cropped[:n_init])
|
|
373
|
+
|
|
374
|
+
return A0_est, sigma_est
|
|
375
|
+
|
|
376
|
+
def _run_analysis_pipeline(
|
|
377
|
+
self,
|
|
378
|
+
t: np.ndarray,
|
|
379
|
+
data: np.ndarray,
|
|
380
|
+
fs: float,
|
|
381
|
+
max_tau_multiplier: float,
|
|
382
|
+
) -> dict:
|
|
383
|
+
"""Run the full analysis pipeline on (t, data) arrays."""
|
|
384
|
+
initial_params_full = _estimate_initial_parameters_from_dft(data, fs)
|
|
385
|
+
|
|
386
|
+
tau_est = self.estimate_tau(data, t, fs, initial_params=initial_params_full)
|
|
387
|
+
|
|
388
|
+
t_crop, data_cropped = self.crop_data_to_tau(
|
|
389
|
+
t, data, tau_est, min_samples=1000, max_tau_multiplier=max_tau_multiplier
|
|
390
|
+
)
|
|
391
|
+
|
|
392
|
+
min_samples_for_analysis = 1000
|
|
393
|
+
if len(t_crop) < min_samples_for_analysis:
|
|
394
|
+
t_crop = t
|
|
395
|
+
data_cropped = data
|
|
396
|
+
|
|
397
|
+
initial_params_cropped = _estimate_initial_parameters_from_dft(data_cropped, fs)
|
|
398
|
+
|
|
399
|
+
result_nls = self.nls_estimator.estimate_full(
|
|
400
|
+
data_cropped, fs, initial_params=initial_params_cropped
|
|
401
|
+
)
|
|
402
|
+
result_dft = self.dft_estimator.estimate_full(data_cropped, fs)
|
|
403
|
+
|
|
404
|
+
f_nls = result_nls.f
|
|
405
|
+
f_dft = result_dft.f
|
|
406
|
+
Q_nls = result_nls.Q
|
|
407
|
+
Q_dft = result_dft.Q
|
|
408
|
+
tau_nls = result_nls.tau
|
|
409
|
+
|
|
410
|
+
A0_est, sigma_est = self.estimate_noise_parameters(
|
|
411
|
+
data_cropped, t_crop, tau_est, fs, initial_params=initial_params_cropped
|
|
412
|
+
)
|
|
413
|
+
|
|
414
|
+
N_crop = len(data_cropped)
|
|
415
|
+
crlb_var_f = self.crlb_calc.variance(A0_est, sigma_est, fs, N_crop, tau_est)
|
|
416
|
+
crlb_std_f = np.sqrt(crlb_var_f) if np.isfinite(crlb_var_f) else np.inf
|
|
417
|
+
|
|
418
|
+
return {
|
|
419
|
+
"t": t,
|
|
420
|
+
"data": data,
|
|
421
|
+
"V2": None,
|
|
422
|
+
"t_crop": t_crop,
|
|
423
|
+
"data_cropped": data_cropped,
|
|
424
|
+
"fs": fs,
|
|
425
|
+
"tau_est": tau_est,
|
|
426
|
+
"tau_nls": tau_nls,
|
|
427
|
+
"f_nls": f_nls,
|
|
428
|
+
"f_dft": f_dft,
|
|
429
|
+
"Q_nls": Q_nls,
|
|
430
|
+
"Q_dft": Q_dft,
|
|
431
|
+
"A0_est": A0_est,
|
|
432
|
+
"sigma_est": sigma_est,
|
|
433
|
+
"crlb_std_f": crlb_std_f,
|
|
434
|
+
"N": len(t),
|
|
435
|
+
"N_crop": len(t_crop),
|
|
436
|
+
"T": t[-1],
|
|
437
|
+
"T_crop": t_crop[-1] if len(t_crop) > 0 else 0,
|
|
438
|
+
}
|
|
439
|
+
|
|
440
|
+
def analyze_array(
|
|
441
|
+
self,
|
|
442
|
+
t: np.ndarray | None = None,
|
|
443
|
+
data: np.ndarray | None = None,
|
|
444
|
+
fs: float | None = None,
|
|
445
|
+
*,
|
|
446
|
+
time_col: str | int = 0,
|
|
447
|
+
data_col: str | int = 1,
|
|
448
|
+
max_tau_multiplier: float = 1.0,
|
|
449
|
+
) -> dict:
|
|
450
|
+
"""
|
|
451
|
+
Analyze ring-down data from numpy arrays or pandas Series/DataFrame.
|
|
452
|
+
|
|
453
|
+
Parameters:
|
|
454
|
+
-----------
|
|
455
|
+
t : np.ndarray or pd.Series, optional
|
|
456
|
+
Time array (s). Required unless data is a DataFrame or fs is provided.
|
|
457
|
+
data : np.ndarray, pd.Series, or pd.DataFrame
|
|
458
|
+
Signal data. Required.
|
|
459
|
+
fs : float, optional
|
|
460
|
+
Sampling frequency (Hz). If provided with t=None, time is inferred as
|
|
461
|
+
t = np.arange(len(data)) / fs.
|
|
462
|
+
time_col : str or int, optional
|
|
463
|
+
Column index or name for time when data is a DataFrame. Default 0.
|
|
464
|
+
data_col : str or int, optional
|
|
465
|
+
Column index or name for signal when data is a DataFrame. Default 1.
|
|
466
|
+
max_tau_multiplier : float
|
|
467
|
+
Multiplier for tau_est when cropping data. Default 1.0.
|
|
468
|
+
|
|
469
|
+
Returns:
|
|
470
|
+
--------
|
|
471
|
+
dict
|
|
472
|
+
Results dictionary (same structure as analyze_file, minus filename/type).
|
|
473
|
+
|
|
474
|
+
Raises:
|
|
475
|
+
-------
|
|
476
|
+
ValueError
|
|
477
|
+
If data is invalid, lengths mismatch, or neither t nor fs provided.
|
|
478
|
+
|
|
479
|
+
Examples:
|
|
480
|
+
---------
|
|
481
|
+
>>> # From numpy arrays
|
|
482
|
+
>>> result = analyzer.analyze_array(t, data)
|
|
483
|
+
>>> # From data and sampling rate
|
|
484
|
+
>>> result = analyzer.analyze_array(data=data, fs=1000.0)
|
|
485
|
+
>>> # From pandas DataFrame
|
|
486
|
+
>>> result = analyzer.analyze_array(data=df, time_col="time", data_col="phase")
|
|
487
|
+
"""
|
|
488
|
+
t_arr, data_arr = _parse_array_input(
|
|
489
|
+
t=t, data=data, fs=fs, time_col=time_col, data_col=data_col
|
|
490
|
+
)
|
|
491
|
+
|
|
492
|
+
fs = 1.0 / np.mean(np.diff(t_arr))
|
|
493
|
+
|
|
494
|
+
if logger.isEnabledFor(logging.INFO):
|
|
495
|
+
logger.info(
|
|
496
|
+
"analysis_start",
|
|
497
|
+
extra={
|
|
498
|
+
"event": "analysis_start",
|
|
499
|
+
"source": "array",
|
|
500
|
+
"n_samples": len(t_arr),
|
|
501
|
+
},
|
|
502
|
+
)
|
|
503
|
+
|
|
504
|
+
result = self._run_analysis_pipeline(t_arr, data_arr, fs, max_tau_multiplier)
|
|
505
|
+
|
|
506
|
+
if logger.isEnabledFor(logging.INFO):
|
|
507
|
+
logger.info(
|
|
508
|
+
"analysis_complete",
|
|
509
|
+
extra={
|
|
510
|
+
"event": "analysis_complete",
|
|
511
|
+
"source": "array",
|
|
512
|
+
"f_nls": float(result["f_nls"]),
|
|
513
|
+
"f_dft": float(result["f_dft"]),
|
|
514
|
+
"tau_est": float(result["tau_est"]),
|
|
515
|
+
"crlb_std_f": float(result["crlb_std_f"])
|
|
516
|
+
if np.isfinite(result["crlb_std_f"])
|
|
517
|
+
else None,
|
|
518
|
+
},
|
|
519
|
+
)
|
|
520
|
+
|
|
521
|
+
return result
|
|
522
|
+
|
|
523
|
+
def analyze_file(self, filepath: str, max_tau_multiplier: float = 1.0) -> dict:
|
|
524
|
+
"""
|
|
525
|
+
Process a single data file and return analysis results.
|
|
526
|
+
|
|
527
|
+
Parameters:
|
|
528
|
+
-----------
|
|
529
|
+
filepath : str
|
|
530
|
+
Path to the data file
|
|
531
|
+
max_tau_multiplier : float
|
|
532
|
+
Multiplier for tau_est to determine maximum record length when cropping data.
|
|
533
|
+
Default is 1.0.
|
|
534
|
+
|
|
535
|
+
Returns:
|
|
536
|
+
--------
|
|
537
|
+
dict
|
|
538
|
+
Results dictionary with all analysis data
|
|
539
|
+
|
|
540
|
+
Raises:
|
|
541
|
+
-------
|
|
542
|
+
FileNotFoundError
|
|
543
|
+
If the file does not exist
|
|
544
|
+
ValueError
|
|
545
|
+
If the file format is unsupported, data is invalid (e.g., empty CSV,
|
|
546
|
+
malformed MAT structure), or file exceeds size limit
|
|
547
|
+
"""
|
|
548
|
+
if logger.isEnabledFor(logging.INFO):
|
|
549
|
+
logger.info(
|
|
550
|
+
"analysis_start",
|
|
551
|
+
extra={
|
|
552
|
+
"event": "analysis_start",
|
|
553
|
+
"filepath": str(filepath),
|
|
554
|
+
},
|
|
555
|
+
)
|
|
556
|
+
|
|
557
|
+
t, data, V2, file_type = RingDownDataLoader.load(filepath)
|
|
558
|
+
fs = 1.0 / np.mean(np.diff(t))
|
|
559
|
+
|
|
560
|
+
result = self._run_analysis_pipeline(t, data, fs, max_tau_multiplier)
|
|
561
|
+
|
|
562
|
+
result["filename"] = Path(filepath).name
|
|
563
|
+
result["type"] = file_type
|
|
564
|
+
result["V2"] = V2
|
|
565
|
+
|
|
566
|
+
if logger.isEnabledFor(logging.INFO):
|
|
567
|
+
logger.info(
|
|
568
|
+
"analysis_complete",
|
|
569
|
+
extra={
|
|
570
|
+
"event": "analysis_complete",
|
|
571
|
+
"filepath": str(filepath),
|
|
572
|
+
"f_nls": float(result["f_nls"]),
|
|
573
|
+
"f_dft": float(result["f_dft"]),
|
|
574
|
+
"tau_est": float(result["tau_est"]),
|
|
575
|
+
"crlb_std_f": float(result["crlb_std_f"])
|
|
576
|
+
if np.isfinite(result["crlb_std_f"])
|
|
577
|
+
else None,
|
|
578
|
+
},
|
|
579
|
+
)
|
|
580
|
+
|
|
581
|
+
return result
|