oscura 0.6.0__py3-none-any.whl → 0.8.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- oscura/__init__.py +1 -1
- oscura/analyzers/eye/__init__.py +5 -1
- oscura/analyzers/eye/generation.py +501 -0
- oscura/analyzers/jitter/__init__.py +6 -6
- oscura/analyzers/jitter/timing.py +419 -0
- oscura/analyzers/patterns/__init__.py +28 -0
- oscura/analyzers/patterns/reverse_engineering.py +991 -0
- oscura/analyzers/power/__init__.py +35 -12
- oscura/analyzers/statistics/__init__.py +4 -0
- oscura/analyzers/statistics/basic.py +149 -0
- oscura/analyzers/statistics/correlation.py +47 -6
- oscura/analyzers/waveform/__init__.py +2 -0
- oscura/analyzers/waveform/measurements.py +145 -23
- oscura/analyzers/waveform/spectral.py +361 -8
- oscura/automotive/__init__.py +1 -1
- oscura/core/config/loader.py +0 -1
- oscura/core/types.py +108 -0
- oscura/loaders/__init__.py +12 -4
- oscura/loaders/tss.py +456 -0
- oscura/reporting/__init__.py +88 -1
- oscura/reporting/automation.py +348 -0
- oscura/reporting/citations.py +374 -0
- oscura/reporting/core.py +54 -0
- oscura/reporting/formatting/__init__.py +11 -0
- oscura/reporting/formatting/measurements.py +279 -0
- oscura/reporting/html.py +57 -0
- oscura/reporting/interpretation.py +431 -0
- oscura/reporting/summary.py +329 -0
- oscura/reporting/visualization.py +542 -0
- oscura/visualization/__init__.py +2 -1
- oscura/visualization/batch.py +521 -0
- oscura/workflows/__init__.py +2 -0
- oscura/workflows/waveform.py +783 -0
- {oscura-0.6.0.dist-info → oscura-0.8.0.dist-info}/METADATA +37 -19
- {oscura-0.6.0.dist-info → oscura-0.8.0.dist-info}/RECORD +38 -26
- {oscura-0.6.0.dist-info → oscura-0.8.0.dist-info}/WHEEL +0 -0
- {oscura-0.6.0.dist-info → oscura-0.8.0.dist-info}/entry_points.txt +0 -0
- {oscura-0.6.0.dist-info → oscura-0.8.0.dist-info}/licenses/LICENSE +0 -0
|
@@ -1,12 +1,13 @@
|
|
|
1
|
-
"""Power analysis module for Oscura.
|
|
1
|
+
"""Power analysis module for Oscura - IEEE 1459-2010 compliant.
|
|
2
2
|
|
|
3
3
|
Provides comprehensive power analysis capabilities including:
|
|
4
|
-
- Basic power measurements (instantaneous, average, RMS, peak)
|
|
5
|
-
- AC power analysis (reactive, apparent, power factor)
|
|
4
|
+
- Basic power measurements (instantaneous, average, RMS, peak) per IEEE 1459
|
|
5
|
+
- AC power analysis (reactive, apparent, power factor, harmonics) per IEEE 1459
|
|
6
6
|
- Switching loss analysis for power electronics
|
|
7
|
+
- Conduction loss analysis (MOSFET, IGBT, diode)
|
|
7
8
|
- Safe Operating Area (SOA) analysis
|
|
8
|
-
- Ripple measurement
|
|
9
|
-
- Efficiency calculations
|
|
9
|
+
- Ripple measurement and analysis
|
|
10
|
+
- Efficiency calculations (single and multi-output)
|
|
10
11
|
|
|
11
12
|
|
|
12
13
|
Example:
|
|
@@ -14,6 +15,9 @@ Example:
|
|
|
14
15
|
>>> power_trace = instantaneous_power(voltage_trace, current_trace)
|
|
15
16
|
>>> stats = power_statistics(power_trace)
|
|
16
17
|
>>> print(f"Average power: {stats['average']:.2f} W")
|
|
18
|
+
|
|
19
|
+
References:
|
|
20
|
+
IEEE 1459-2010: Standard for Power Quality Definitions
|
|
17
21
|
"""
|
|
18
22
|
|
|
19
23
|
from oscura.analyzers.power.ac_power import (
|
|
@@ -23,6 +27,7 @@ from oscura.analyzers.power.ac_power import (
|
|
|
23
27
|
phase_angle,
|
|
24
28
|
power_factor,
|
|
25
29
|
reactive_power,
|
|
30
|
+
three_phase_power,
|
|
26
31
|
total_harmonic_distortion_power,
|
|
27
32
|
)
|
|
28
33
|
from oscura.analyzers.power.basic import (
|
|
@@ -30,39 +35,51 @@ from oscura.analyzers.power.basic import (
|
|
|
30
35
|
energy,
|
|
31
36
|
instantaneous_power,
|
|
32
37
|
peak_power,
|
|
38
|
+
power_profile,
|
|
33
39
|
power_statistics,
|
|
34
40
|
rms_power,
|
|
35
41
|
)
|
|
36
42
|
from oscura.analyzers.power.conduction import (
|
|
37
43
|
conduction_loss,
|
|
44
|
+
diode_conduction_loss,
|
|
38
45
|
duty_cycle_weighted_loss,
|
|
39
46
|
forward_voltage,
|
|
47
|
+
igbt_conduction_loss,
|
|
40
48
|
mosfet_conduction_loss,
|
|
41
49
|
on_resistance,
|
|
50
|
+
temperature_derating,
|
|
42
51
|
)
|
|
43
52
|
from oscura.analyzers.power.efficiency import (
|
|
44
53
|
efficiency,
|
|
54
|
+
efficiency_vs_load,
|
|
55
|
+
loss_breakdown,
|
|
45
56
|
multi_output_efficiency,
|
|
46
57
|
power_conversion_efficiency,
|
|
58
|
+
thermal_efficiency,
|
|
47
59
|
)
|
|
48
60
|
from oscura.analyzers.power.ripple import (
|
|
49
61
|
extract_ripple,
|
|
50
62
|
ripple,
|
|
63
|
+
ripple_envelope,
|
|
51
64
|
ripple_frequency,
|
|
65
|
+
ripple_harmonics,
|
|
52
66
|
ripple_percentage,
|
|
53
67
|
ripple_statistics,
|
|
54
68
|
)
|
|
55
69
|
from oscura.analyzers.power.soa import (
|
|
56
70
|
SOALimit,
|
|
71
|
+
SOAViolation,
|
|
57
72
|
check_soa_violations,
|
|
58
73
|
create_mosfet_soa,
|
|
59
74
|
plot_soa,
|
|
60
75
|
soa_analysis,
|
|
61
76
|
)
|
|
62
77
|
from oscura.analyzers.power.switching import (
|
|
78
|
+
SwitchingEvent,
|
|
63
79
|
switching_energy,
|
|
64
80
|
switching_frequency,
|
|
65
81
|
switching_loss,
|
|
82
|
+
switching_times,
|
|
66
83
|
total_switching_loss,
|
|
67
84
|
turn_off_loss,
|
|
68
85
|
turn_on_loss,
|
|
@@ -70,22 +87,25 @@ from oscura.analyzers.power.switching import (
|
|
|
70
87
|
|
|
71
88
|
__all__ = [
|
|
72
89
|
"SOALimit",
|
|
90
|
+
"SOAViolation",
|
|
91
|
+
"SwitchingEvent",
|
|
73
92
|
"apparent_power",
|
|
74
93
|
"average_power",
|
|
75
94
|
"check_soa_violations",
|
|
76
|
-
# Conduction
|
|
77
95
|
"conduction_loss",
|
|
78
96
|
"create_mosfet_soa",
|
|
97
|
+
"diode_conduction_loss",
|
|
79
98
|
"displacement_power_factor",
|
|
80
99
|
"distortion_power_factor",
|
|
81
100
|
"duty_cycle_weighted_loss",
|
|
82
|
-
# Efficiency
|
|
83
101
|
"efficiency",
|
|
102
|
+
"efficiency_vs_load",
|
|
84
103
|
"energy",
|
|
85
104
|
"extract_ripple",
|
|
86
105
|
"forward_voltage",
|
|
87
|
-
|
|
106
|
+
"igbt_conduction_loss",
|
|
88
107
|
"instantaneous_power",
|
|
108
|
+
"loss_breakdown",
|
|
89
109
|
"mosfet_conduction_loss",
|
|
90
110
|
"multi_output_efficiency",
|
|
91
111
|
"on_resistance",
|
|
@@ -94,21 +114,24 @@ __all__ = [
|
|
|
94
114
|
"plot_soa",
|
|
95
115
|
"power_conversion_efficiency",
|
|
96
116
|
"power_factor",
|
|
117
|
+
"power_profile",
|
|
97
118
|
"power_statistics",
|
|
98
|
-
# AC power
|
|
99
119
|
"reactive_power",
|
|
100
|
-
# Ripple
|
|
101
120
|
"ripple",
|
|
121
|
+
"ripple_envelope",
|
|
102
122
|
"ripple_frequency",
|
|
123
|
+
"ripple_harmonics",
|
|
103
124
|
"ripple_percentage",
|
|
104
125
|
"ripple_statistics",
|
|
105
126
|
"rms_power",
|
|
106
|
-
# SOA
|
|
107
127
|
"soa_analysis",
|
|
108
128
|
"switching_energy",
|
|
109
129
|
"switching_frequency",
|
|
110
|
-
# Switching
|
|
111
130
|
"switching_loss",
|
|
131
|
+
"switching_times",
|
|
132
|
+
"temperature_derating",
|
|
133
|
+
"thermal_efficiency",
|
|
134
|
+
"three_phase_power",
|
|
112
135
|
"total_harmonic_distortion_power",
|
|
113
136
|
"total_switching_loss",
|
|
114
137
|
"turn_off_loss",
|
|
@@ -20,11 +20,13 @@ from oscura.analyzers.statistics.advanced import (
|
|
|
20
20
|
)
|
|
21
21
|
from oscura.analyzers.statistics.basic import (
|
|
22
22
|
basic_stats,
|
|
23
|
+
measure,
|
|
23
24
|
percentiles,
|
|
24
25
|
quartiles,
|
|
25
26
|
running_stats,
|
|
26
27
|
summary_stats,
|
|
27
28
|
weighted_mean,
|
|
29
|
+
weighted_std,
|
|
28
30
|
)
|
|
29
31
|
from oscura.analyzers.statistics.correlation import (
|
|
30
32
|
CrossCorrelationResult,
|
|
@@ -99,6 +101,7 @@ __all__ = [
|
|
|
99
101
|
"kernel_density",
|
|
100
102
|
# Advanced (STAT-012)
|
|
101
103
|
"local_outlier_factor",
|
|
104
|
+
"measure",
|
|
102
105
|
"modified_zscore_outliers",
|
|
103
106
|
"moment",
|
|
104
107
|
"moving_average",
|
|
@@ -114,6 +117,7 @@ __all__ = [
|
|
|
114
117
|
"seasonal_decompose",
|
|
115
118
|
"summary_stats",
|
|
116
119
|
"weighted_mean",
|
|
120
|
+
"weighted_std",
|
|
117
121
|
# Outlier detection
|
|
118
122
|
"zscore_outliers",
|
|
119
123
|
]
|
|
@@ -171,6 +171,75 @@ def weighted_mean(
|
|
|
171
171
|
return float(np.average(data, weights=weights))
|
|
172
172
|
|
|
173
173
|
|
|
174
|
+
def weighted_std(
|
|
175
|
+
trace: WaveformTrace | NDArray[np.floating[Any]],
|
|
176
|
+
weights: NDArray[np.floating[Any]] | None = None,
|
|
177
|
+
*,
|
|
178
|
+
ddof: int = 0,
|
|
179
|
+
) -> float:
|
|
180
|
+
"""Compute weighted standard deviation.
|
|
181
|
+
|
|
182
|
+
Uses the reliability weights formula for weighted variance.
|
|
183
|
+
|
|
184
|
+
Args:
|
|
185
|
+
trace: Input trace or numpy array.
|
|
186
|
+
weights: Weight array (same length as data). If None, equal weights (unweighted std).
|
|
187
|
+
ddof: Delta degrees of freedom for bias correction (default 0).
|
|
188
|
+
- ddof=0: Maximum likelihood estimate (biased)
|
|
189
|
+
- ddof=1: Sample standard deviation (unbiased for normal distribution)
|
|
190
|
+
|
|
191
|
+
Returns:
|
|
192
|
+
Weighted standard deviation.
|
|
193
|
+
|
|
194
|
+
Raises:
|
|
195
|
+
ValueError: If weights and data have different lengths.
|
|
196
|
+
ValueError: If weights contain negative values.
|
|
197
|
+
|
|
198
|
+
Example:
|
|
199
|
+
>>> weights = np.linspace(0.5, 1.0, len(trace.data))
|
|
200
|
+
>>> wstd = weighted_std(trace, weights)
|
|
201
|
+
>>> print(f"Weighted std: {wstd:.6f}")
|
|
202
|
+
|
|
203
|
+
>>> # Sample standard deviation (Bessel's correction)
|
|
204
|
+
>>> wstd_unbiased = weighted_std(trace, weights, ddof=1)
|
|
205
|
+
|
|
206
|
+
References:
|
|
207
|
+
Wikipedia: Weighted arithmetic mean
|
|
208
|
+
https://en.wikipedia.org/wiki/Weighted_arithmetic_mean#Weighted_sample_variance
|
|
209
|
+
"""
|
|
210
|
+
data = trace.data if isinstance(trace, WaveformTrace) else trace
|
|
211
|
+
|
|
212
|
+
if weights is None:
|
|
213
|
+
return float(np.std(data, ddof=ddof))
|
|
214
|
+
|
|
215
|
+
if len(weights) != len(data):
|
|
216
|
+
raise ValueError(f"Weights and data must have same length: {len(weights)} != {len(data)}")
|
|
217
|
+
|
|
218
|
+
if np.any(weights < 0):
|
|
219
|
+
raise ValueError("Weights must be non-negative")
|
|
220
|
+
|
|
221
|
+
# Handle edge cases
|
|
222
|
+
if len(data) == 0:
|
|
223
|
+
return float("nan")
|
|
224
|
+
|
|
225
|
+
if len(data) == 1:
|
|
226
|
+
return 0.0
|
|
227
|
+
|
|
228
|
+
# Compute weighted mean
|
|
229
|
+
w_sum = np.sum(weights)
|
|
230
|
+
if w_sum <= 0:
|
|
231
|
+
return float("nan")
|
|
232
|
+
|
|
233
|
+
w_mean = np.sum(weights * data) / w_sum
|
|
234
|
+
|
|
235
|
+
# Compute weighted variance with bias correction
|
|
236
|
+
# Reliability weights formula: var = sum(w * (x - mean)^2) / (sum(w) - ddof)
|
|
237
|
+
weighted_sq_deviations = weights * (data - w_mean) ** 2
|
|
238
|
+
variance = np.sum(weighted_sq_deviations) / (w_sum - ddof) if w_sum > ddof else 0.0
|
|
239
|
+
|
|
240
|
+
return float(np.sqrt(max(0.0, variance)))
|
|
241
|
+
|
|
242
|
+
|
|
174
243
|
def running_stats(
|
|
175
244
|
trace: WaveformTrace | NDArray[np.floating[Any]],
|
|
176
245
|
window_size: int,
|
|
@@ -253,11 +322,91 @@ def summary_stats(
|
|
|
253
322
|
return basic
|
|
254
323
|
|
|
255
324
|
|
|
325
|
+
def measure(
|
|
326
|
+
trace: WaveformTrace | NDArray[np.floating[Any]],
|
|
327
|
+
*,
|
|
328
|
+
parameters: list[str] | None = None,
|
|
329
|
+
include_units: bool = True,
|
|
330
|
+
) -> dict[str, Any]:
|
|
331
|
+
"""Compute statistical measurements with consistent format.
|
|
332
|
+
|
|
333
|
+
Unified function matching the API pattern of waveform.measure() and spectral.measure().
|
|
334
|
+
Returns measurements with units for easy formatting and display.
|
|
335
|
+
|
|
336
|
+
Args:
|
|
337
|
+
trace: Input trace or numpy array.
|
|
338
|
+
parameters: List of measurement names to compute. If None, compute all.
|
|
339
|
+
Valid names: mean, variance, std, min, max, range, count, p1, p5, p25, p50, p75, p95, p99
|
|
340
|
+
include_units: If True, return {value, unit} dicts. If False, return flat values.
|
|
341
|
+
|
|
342
|
+
Returns:
|
|
343
|
+
Dictionary mapping measurement names to values (with units if requested).
|
|
344
|
+
|
|
345
|
+
Example:
|
|
346
|
+
>>> from oscura.analyzers.statistics import measure
|
|
347
|
+
>>> results = measure(trace)
|
|
348
|
+
>>> print(f"Mean: {results['mean']['value']} {results['mean']['unit']}")
|
|
349
|
+
>>> print(f"Std: {results['std']['value']} {results['std']['unit']}")
|
|
350
|
+
|
|
351
|
+
>>> # Get specific measurements only
|
|
352
|
+
>>> results = measure(trace, parameters=["mean", "std"])
|
|
353
|
+
|
|
354
|
+
>>> # Get flat values without units
|
|
355
|
+
>>> results = measure(trace, include_units=False)
|
|
356
|
+
>>> mean_value = results["mean"] # Just the float
|
|
357
|
+
"""
|
|
358
|
+
data = trace.data if isinstance(trace, WaveformTrace) else trace
|
|
359
|
+
|
|
360
|
+
# Define unit mappings for statistical measurements
|
|
361
|
+
# For generic signals we use voltage units, but this could be parameterized
|
|
362
|
+
unit_map = {
|
|
363
|
+
"mean": "V",
|
|
364
|
+
"variance": "V²",
|
|
365
|
+
"std": "V",
|
|
366
|
+
"min": "V",
|
|
367
|
+
"max": "V",
|
|
368
|
+
"range": "dimensionless",
|
|
369
|
+
"count": "samples",
|
|
370
|
+
"p1": "dimensionless",
|
|
371
|
+
"p5": "dimensionless",
|
|
372
|
+
"p25": "dimensionless",
|
|
373
|
+
"p50": "dimensionless",
|
|
374
|
+
"p75": "dimensionless",
|
|
375
|
+
"p95": "dimensionless",
|
|
376
|
+
"p99": "dimensionless",
|
|
377
|
+
}
|
|
378
|
+
|
|
379
|
+
# Get basic stats
|
|
380
|
+
basic = basic_stats(trace)
|
|
381
|
+
|
|
382
|
+
# Get percentiles
|
|
383
|
+
percentile_values = percentiles(data, [1, 5, 25, 50, 75, 95, 99])
|
|
384
|
+
|
|
385
|
+
# Combine into single dict
|
|
386
|
+
all_measurements = {**basic, **percentile_values}
|
|
387
|
+
|
|
388
|
+
# Select requested measurements or all
|
|
389
|
+
if parameters is not None:
|
|
390
|
+
all_measurements = {k: v for k, v in all_measurements.items() if k in parameters}
|
|
391
|
+
|
|
392
|
+
# Format results
|
|
393
|
+
if include_units:
|
|
394
|
+
results = {}
|
|
395
|
+
for name, value in all_measurements.items():
|
|
396
|
+
unit = unit_map.get(name, "")
|
|
397
|
+
results[name] = {"value": value, "unit": unit}
|
|
398
|
+
return results
|
|
399
|
+
else:
|
|
400
|
+
return all_measurements
|
|
401
|
+
|
|
402
|
+
|
|
256
403
|
__all__ = [
|
|
257
404
|
"basic_stats",
|
|
405
|
+
"measure",
|
|
258
406
|
"percentiles",
|
|
259
407
|
"quartiles",
|
|
260
408
|
"running_stats",
|
|
261
409
|
"summary_stats",
|
|
262
410
|
"weighted_mean",
|
|
411
|
+
"weighted_std",
|
|
263
412
|
]
|
|
@@ -338,24 +338,48 @@ def cross_correlation(
|
|
|
338
338
|
def correlation_coefficient(
|
|
339
339
|
trace1: WaveformTrace | NDArray[np.floating[Any]],
|
|
340
340
|
trace2: WaveformTrace | NDArray[np.floating[Any]],
|
|
341
|
+
*,
|
|
342
|
+
method: Literal["pearson", "spearman", "kendall"] = "pearson",
|
|
341
343
|
) -> float:
|
|
342
|
-
"""Compute
|
|
344
|
+
"""Compute correlation coefficient between two signals.
|
|
343
345
|
|
|
344
|
-
|
|
346
|
+
Supports Pearson (linear), Spearman (monotonic), and Kendall (rank) correlations.
|
|
345
347
|
|
|
346
348
|
Args:
|
|
347
349
|
trace1: First input trace or numpy array.
|
|
348
350
|
trace2: Second input trace or numpy array.
|
|
351
|
+
method: Correlation method to use:
|
|
352
|
+
- "pearson": Linear correlation (default, parametric)
|
|
353
|
+
- "spearman": Monotonic correlation (non-parametric, robust to outliers)
|
|
354
|
+
- "kendall": Rank correlation (non-parametric, tau-b coefficient)
|
|
349
355
|
|
|
350
356
|
Returns:
|
|
351
357
|
Correlation coefficient in range [-1, 1].
|
|
352
358
|
|
|
359
|
+
Raises:
|
|
360
|
+
ValueError: If method is not one of the supported types.
|
|
361
|
+
|
|
353
362
|
Example:
|
|
363
|
+
>>> # Linear correlation (default)
|
|
354
364
|
>>> r = correlation_coefficient(trace1, trace2)
|
|
355
|
-
>>> print(f"
|
|
365
|
+
>>> print(f"Pearson correlation: {r:.3f}")
|
|
366
|
+
|
|
367
|
+
>>> # Monotonic correlation (robust to outliers)
|
|
368
|
+
>>> rho = correlation_coefficient(trace1, trace2, method="spearman")
|
|
369
|
+
>>> print(f"Spearman correlation: {rho:.3f}")
|
|
370
|
+
|
|
371
|
+
>>> # Rank correlation (best for ordinal data)
|
|
372
|
+
>>> tau = correlation_coefficient(trace1, trace2, method="kendall")
|
|
373
|
+
>>> print(f"Kendall correlation: {tau:.3f}")
|
|
374
|
+
|
|
375
|
+
References:
|
|
376
|
+
Pearson, K. (1895). Correlation coefficient
|
|
377
|
+
Spearman, C. (1904). Rank correlation
|
|
378
|
+
Kendall, M. G. (1938). Tau rank correlation
|
|
356
379
|
"""
|
|
357
|
-
|
|
380
|
+
from scipy import stats as sp_stats
|
|
358
381
|
|
|
382
|
+
data1 = trace1.data if isinstance(trace1, WaveformTrace) else trace1
|
|
359
383
|
data2 = trace2.data if isinstance(trace2, WaveformTrace) else trace2
|
|
360
384
|
|
|
361
385
|
# Ensure same length
|
|
@@ -363,8 +387,25 @@ def correlation_coefficient(
|
|
|
363
387
|
data1 = data1[:n]
|
|
364
388
|
data2 = data2[:n]
|
|
365
389
|
|
|
366
|
-
# Compute correlation
|
|
367
|
-
|
|
390
|
+
# Compute correlation based on method
|
|
391
|
+
if method == "pearson":
|
|
392
|
+
# Pearson linear correlation (parametric)
|
|
393
|
+
return float(np.corrcoef(data1, data2)[0, 1])
|
|
394
|
+
|
|
395
|
+
elif method == "spearman":
|
|
396
|
+
# Spearman rank correlation (non-parametric, monotonic)
|
|
397
|
+
corr, _p_value = sp_stats.spearmanr(data1, data2)
|
|
398
|
+
return float(corr)
|
|
399
|
+
|
|
400
|
+
elif method == "kendall":
|
|
401
|
+
# Kendall tau-b rank correlation (non-parametric)
|
|
402
|
+
corr, _p_value = sp_stats.kendalltau(data1, data2)
|
|
403
|
+
return float(corr)
|
|
404
|
+
|
|
405
|
+
else:
|
|
406
|
+
raise ValueError(
|
|
407
|
+
f"Unknown correlation method: {method}. Available: 'pearson', 'spearman', 'kendall'"
|
|
408
|
+
)
|
|
368
409
|
|
|
369
410
|
|
|
370
411
|
def _extract_periodicity_data(
|
|
@@ -4,6 +4,7 @@ Provides timing and amplitude measurements for analog waveforms.
|
|
|
4
4
|
"""
|
|
5
5
|
|
|
6
6
|
from oscura.analyzers.waveform.measurements import (
|
|
7
|
+
MEASUREMENT_METADATA,
|
|
7
8
|
amplitude,
|
|
8
9
|
duty_cycle,
|
|
9
10
|
fall_time,
|
|
@@ -20,6 +21,7 @@ from oscura.analyzers.waveform.measurements import (
|
|
|
20
21
|
)
|
|
21
22
|
|
|
22
23
|
__all__ = [
|
|
24
|
+
"MEASUREMENT_METADATA",
|
|
23
25
|
"amplitude",
|
|
24
26
|
"duty_cycle",
|
|
25
27
|
"fall_time",
|
|
@@ -28,6 +28,47 @@ if TYPE_CHECKING:
|
|
|
28
28
|
from oscura.core.types import WaveformTrace
|
|
29
29
|
|
|
30
30
|
|
|
31
|
+
# Measurement metadata: unit information for all waveform measurements
|
|
32
|
+
MEASUREMENT_METADATA: dict[str, dict[str, str]] = {
|
|
33
|
+
# Time-domain measurements
|
|
34
|
+
"rise_time": {"unit": "s", "description": "Rise time (10%-90%)"},
|
|
35
|
+
"fall_time": {"unit": "s", "description": "Fall time (90%-10%)"},
|
|
36
|
+
"period": {"unit": "s", "description": "Signal period"},
|
|
37
|
+
"pulse_width": {"unit": "s", "description": "Pulse width"},
|
|
38
|
+
"jitter": {"unit": "s", "description": "Period jitter"},
|
|
39
|
+
# Frequency measurements
|
|
40
|
+
"frequency": {"unit": "Hz", "description": "Signal frequency"},
|
|
41
|
+
"clock_frequency": {"unit": "Hz", "description": "Clock frequency"},
|
|
42
|
+
"dominant_freq": {"unit": "Hz", "description": "Dominant frequency"},
|
|
43
|
+
# Voltage measurements
|
|
44
|
+
"amplitude": {"unit": "V", "description": "Peak-to-peak amplitude"},
|
|
45
|
+
"mean": {"unit": "V", "description": "Mean voltage"},
|
|
46
|
+
"rms": {"unit": "V", "description": "RMS voltage"},
|
|
47
|
+
"threshold": {"unit": "V", "description": "Logic threshold"},
|
|
48
|
+
"min": {"unit": "V", "description": "Minimum voltage"},
|
|
49
|
+
"max": {"unit": "V", "description": "Maximum voltage"},
|
|
50
|
+
"std": {"unit": "V", "description": "Standard deviation"},
|
|
51
|
+
"median": {"unit": "V", "description": "Median voltage"},
|
|
52
|
+
# Ratio measurements (0-1, displayed as percentage)
|
|
53
|
+
"duty_cycle": {"unit": "ratio", "description": "Duty cycle"},
|
|
54
|
+
# Percentage measurements (already 0-100)
|
|
55
|
+
"overshoot": {"unit": "%", "description": "Overshoot percentage"},
|
|
56
|
+
"undershoot": {"unit": "%", "description": "Undershoot percentage"},
|
|
57
|
+
"thd": {"unit": "%", "description": "Total harmonic distortion"},
|
|
58
|
+
# Decibel measurements
|
|
59
|
+
"snr": {"unit": "dB", "description": "Signal-to-noise ratio"},
|
|
60
|
+
"sinad": {"unit": "dB", "description": "SINAD"},
|
|
61
|
+
"sfdr": {"unit": "dB", "description": "Spurious-free dynamic range"},
|
|
62
|
+
# Dimensionless measurements
|
|
63
|
+
"enob": {"unit": "", "description": "Effective number of bits"},
|
|
64
|
+
"rising_edges": {"unit": "", "description": "Rising edge count"},
|
|
65
|
+
"falling_edges": {"unit": "", "description": "Falling edge count"},
|
|
66
|
+
"outliers": {"unit": "", "description": "Outlier count"},
|
|
67
|
+
# Statistical measurements (squared units)
|
|
68
|
+
"variance": {"unit": "V²", "description": "Variance"},
|
|
69
|
+
}
|
|
70
|
+
|
|
71
|
+
|
|
31
72
|
def rise_time(
|
|
32
73
|
trace: WaveformTrace,
|
|
33
74
|
*,
|
|
@@ -255,12 +296,14 @@ def frequency(
|
|
|
255
296
|
"""Measure signal frequency.
|
|
256
297
|
|
|
257
298
|
Computes frequency either from edge-to-edge period or using FFT.
|
|
299
|
+
The "edge" method automatically falls back to FFT if edge detection fails
|
|
300
|
+
(e.g., for sine or triangle waves without clear rising edges).
|
|
258
301
|
|
|
259
302
|
Args:
|
|
260
303
|
trace: Input waveform trace.
|
|
261
304
|
method: Measurement method:
|
|
262
|
-
- "edge": 1/period from edge timing (default
|
|
263
|
-
- "fft": Peak of FFT magnitude spectrum
|
|
305
|
+
- "edge": 1/period from edge timing with automatic FFT fallback (default)
|
|
306
|
+
- "fft": Peak of FFT magnitude spectrum (always use FFT)
|
|
264
307
|
|
|
265
308
|
Returns:
|
|
266
309
|
Frequency in Hz, or np.nan if measurement not possible.
|
|
@@ -272,32 +315,65 @@ def frequency(
|
|
|
272
315
|
>>> f = frequency(trace)
|
|
273
316
|
>>> print(f"Frequency: {f / 1e6:.3f} MHz")
|
|
274
317
|
|
|
318
|
+
>>> # Force FFT method for smooth waveforms
|
|
319
|
+
>>> f = frequency(trace, method="fft")
|
|
320
|
+
|
|
275
321
|
References:
|
|
276
322
|
IEEE 181-2011 Section 5.3
|
|
277
323
|
"""
|
|
278
324
|
if method == "edge":
|
|
325
|
+
# Try edge detection first (faster and more accurate for square waves)
|
|
279
326
|
T = period(trace, edge_type="rising", return_all=False)
|
|
327
|
+
|
|
328
|
+
# Fall back to FFT if edge detection fails
|
|
280
329
|
if np.isnan(T) or T <= 0:
|
|
281
|
-
|
|
330
|
+
# Try FFT fallback for smooth waveforms (sine, triangle)
|
|
331
|
+
return _frequency_fft(trace)
|
|
332
|
+
|
|
282
333
|
return 1.0 / T
|
|
283
334
|
|
|
284
335
|
elif method == "fft":
|
|
285
|
-
|
|
286
|
-
|
|
336
|
+
return _frequency_fft(trace)
|
|
337
|
+
|
|
338
|
+
else:
|
|
339
|
+
raise ValueError(f"Unknown method: {method}")
|
|
287
340
|
|
|
288
|
-
data = trace.data - np.mean(trace.data) # Remove DC
|
|
289
|
-
n = len(data)
|
|
290
|
-
fft_mag = np.abs(np.fft.rfft(data))
|
|
291
341
|
|
|
292
|
-
|
|
293
|
-
|
|
342
|
+
def _frequency_fft(trace: WaveformTrace) -> float | np_floating[Any]:
|
|
343
|
+
"""Compute frequency using FFT peak detection.
|
|
294
344
|
|
|
295
|
-
|
|
296
|
-
freq_resolution = trace.metadata.sample_rate / n
|
|
297
|
-
return float(peak_idx * freq_resolution)
|
|
345
|
+
Internal helper function for FFT-based frequency measurement.
|
|
298
346
|
|
|
299
|
-
|
|
300
|
-
|
|
347
|
+
Args:
|
|
348
|
+
trace: Input waveform trace.
|
|
349
|
+
|
|
350
|
+
Returns:
|
|
351
|
+
Frequency in Hz, or np.nan if measurement not possible.
|
|
352
|
+
"""
|
|
353
|
+
if len(trace.data) < 16:
|
|
354
|
+
return np.nan
|
|
355
|
+
|
|
356
|
+
# Remove DC offset before FFT
|
|
357
|
+
data = trace.data - np.mean(trace.data)
|
|
358
|
+
|
|
359
|
+
# Check if signal is essentially constant (DC only)
|
|
360
|
+
if np.std(data) < 1e-10:
|
|
361
|
+
return np.nan
|
|
362
|
+
|
|
363
|
+
n = len(data)
|
|
364
|
+
fft_mag = np.abs(np.fft.rfft(data))
|
|
365
|
+
|
|
366
|
+
# Find peak (skip DC component at index 0)
|
|
367
|
+
peak_idx = np.argmax(fft_mag[1:]) + 1
|
|
368
|
+
|
|
369
|
+
# Verify peak is significant (SNR check)
|
|
370
|
+
# If the peak is not at least 3x the mean, it's likely noise
|
|
371
|
+
if fft_mag[peak_idx] < 3.0 * np.mean(fft_mag[1:]):
|
|
372
|
+
return np.nan
|
|
373
|
+
|
|
374
|
+
# Calculate frequency from peak index
|
|
375
|
+
freq_resolution = trace.metadata.sample_rate / n
|
|
376
|
+
return float(peak_idx * freq_resolution)
|
|
301
377
|
|
|
302
378
|
|
|
303
379
|
def duty_cycle(
|
|
@@ -308,13 +384,17 @@ def duty_cycle(
|
|
|
308
384
|
"""Measure duty cycle.
|
|
309
385
|
|
|
310
386
|
Computes duty cycle as the ratio of positive pulse width to period.
|
|
387
|
+
Uses robust algorithm that handles extreme duty cycles (1%-99%) and
|
|
388
|
+
incomplete waveforms (fewer than 2 complete cycles visible).
|
|
389
|
+
|
|
390
|
+
Falls back to time-domain calculation when edge-based measurement fails.
|
|
311
391
|
|
|
312
392
|
Args:
|
|
313
393
|
trace: Input waveform trace.
|
|
314
394
|
percentage: If True, return as percentage (0-100). If False, return ratio (0-1).
|
|
315
395
|
|
|
316
396
|
Returns:
|
|
317
|
-
Duty cycle as ratio or percentage.
|
|
397
|
+
Duty cycle as ratio or percentage, or np.nan if measurement not possible.
|
|
318
398
|
|
|
319
399
|
Example:
|
|
320
400
|
>>> dc = duty_cycle(trace, percentage=True)
|
|
@@ -323,13 +403,49 @@ def duty_cycle(
|
|
|
323
403
|
References:
|
|
324
404
|
IEEE 181-2011 Section 5.4
|
|
325
405
|
"""
|
|
406
|
+
# Strategy: Use multiple methods depending on what edges are available
|
|
407
|
+
# Method 1 (best): period + pulse width (needs 2+ rising edges, 1+ falling edge)
|
|
408
|
+
# Method 2 (fallback): time-based calculation from data samples
|
|
409
|
+
|
|
326
410
|
pw_pos = pulse_width(trace, polarity="positive", return_all=False)
|
|
327
411
|
T = period(trace, edge_type="rising", return_all=False)
|
|
328
412
|
|
|
329
|
-
|
|
413
|
+
# Method 1: Standard period-based calculation
|
|
414
|
+
if not np.isnan(pw_pos) and not np.isnan(T) and T > 0:
|
|
415
|
+
dc = pw_pos / T
|
|
416
|
+
if percentage:
|
|
417
|
+
return dc * 100
|
|
418
|
+
return dc
|
|
419
|
+
|
|
420
|
+
# Method 2: Fallback for incomplete waveforms - time-domain calculation
|
|
421
|
+
# Calculate fraction of time signal spends above midpoint threshold
|
|
422
|
+
data = trace.data
|
|
423
|
+
if len(data) < 3:
|
|
424
|
+
return np.nan
|
|
425
|
+
|
|
426
|
+
# Convert boolean data to float if needed
|
|
427
|
+
if data.dtype == bool:
|
|
428
|
+
data = data.astype(np.float64)
|
|
429
|
+
|
|
430
|
+
low, high = _find_levels(data)
|
|
431
|
+
amplitude = high - low
|
|
432
|
+
|
|
433
|
+
# Check for invalid levels
|
|
434
|
+
if amplitude <= 0 or np.isnan(amplitude):
|
|
330
435
|
return np.nan
|
|
331
436
|
|
|
332
|
-
|
|
437
|
+
# Calculate threshold at 50% of amplitude
|
|
438
|
+
mid = low + 0.5 * amplitude
|
|
439
|
+
|
|
440
|
+
# Count samples above threshold
|
|
441
|
+
above_threshold = data >= mid
|
|
442
|
+
samples_high = np.sum(above_threshold)
|
|
443
|
+
total_samples = len(data)
|
|
444
|
+
|
|
445
|
+
if total_samples == 0:
|
|
446
|
+
return np.nan
|
|
447
|
+
|
|
448
|
+
dc = float(samples_high) / total_samples
|
|
333
449
|
|
|
334
450
|
if percentage:
|
|
335
451
|
return dc * 100
|
|
@@ -779,6 +895,9 @@ def measure(
|
|
|
779
895
|
def _find_levels(data: NDArray[np_floating[Any]]) -> tuple[float, float]:
|
|
780
896
|
"""Find low and high levels using histogram method.
|
|
781
897
|
|
|
898
|
+
Robust algorithm that handles extreme duty cycles (1%-99%) by using
|
|
899
|
+
adaptive percentile-based level detection when histogram method fails.
|
|
900
|
+
|
|
782
901
|
Args:
|
|
783
902
|
data: Waveform data array.
|
|
784
903
|
|
|
@@ -794,12 +913,13 @@ def _find_levels(data: NDArray[np_floating[Any]]) -> tuple[float, float]:
|
|
|
794
913
|
return float(np.nan), float(np.nan)
|
|
795
914
|
|
|
796
915
|
# Use percentiles for robust level detection
|
|
797
|
-
|
|
916
|
+
# For extreme duty cycles, use wider percentile range
|
|
917
|
+
p01, p05, p10, p50, p90, p95, p99 = np.percentile(data, [1, 5, 10, 50, 90, 95, 99])
|
|
798
918
|
|
|
799
919
|
# Check for constant or near-constant signal
|
|
800
|
-
data_range =
|
|
920
|
+
data_range = p99 - p01
|
|
801
921
|
if data_range < 1e-10 or np.isnan(data_range): # Essentially constant or NaN
|
|
802
|
-
return float(
|
|
922
|
+
return float(p50), float(p50)
|
|
803
923
|
|
|
804
924
|
# Refine using histogram peaks
|
|
805
925
|
hist, bin_edges = np.histogram(data, bins=50)
|
|
@@ -813,9 +933,11 @@ def _find_levels(data: NDArray[np_floating[Any]]) -> tuple[float, float]:
|
|
|
813
933
|
low = bin_centers[low_idx]
|
|
814
934
|
high = bin_centers[high_idx]
|
|
815
935
|
|
|
816
|
-
# Sanity check
|
|
936
|
+
# Sanity check - if histogram method failed, use adaptive percentiles
|
|
817
937
|
if high <= low:
|
|
818
|
-
|
|
938
|
+
# For extreme duty cycles, use min/max with small outlier rejection
|
|
939
|
+
# p01 and p99 remove top/bottom 1% outliers (noise, ringing)
|
|
940
|
+
return float(p01), float(p99)
|
|
819
941
|
|
|
820
942
|
return float(low), float(high)
|
|
821
943
|
|