oscura 0.7.0__py3-none-any.whl → 0.10.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (175) hide show
  1. oscura/__init__.py +19 -19
  2. oscura/analyzers/__init__.py +2 -0
  3. oscura/analyzers/digital/extraction.py +2 -3
  4. oscura/analyzers/digital/quality.py +1 -1
  5. oscura/analyzers/digital/timing.py +1 -1
  6. oscura/analyzers/eye/__init__.py +5 -1
  7. oscura/analyzers/eye/generation.py +501 -0
  8. oscura/analyzers/jitter/__init__.py +6 -6
  9. oscura/analyzers/jitter/timing.py +419 -0
  10. oscura/analyzers/patterns/__init__.py +94 -0
  11. oscura/analyzers/patterns/reverse_engineering.py +991 -0
  12. oscura/analyzers/power/__init__.py +35 -12
  13. oscura/analyzers/power/basic.py +3 -3
  14. oscura/analyzers/power/soa.py +1 -1
  15. oscura/analyzers/power/switching.py +3 -3
  16. oscura/analyzers/signal_classification.py +529 -0
  17. oscura/analyzers/signal_integrity/sparams.py +3 -3
  18. oscura/analyzers/statistics/__init__.py +4 -0
  19. oscura/analyzers/statistics/basic.py +152 -0
  20. oscura/analyzers/statistics/correlation.py +47 -6
  21. oscura/analyzers/validation.py +1 -1
  22. oscura/analyzers/waveform/__init__.py +2 -0
  23. oscura/analyzers/waveform/measurements.py +329 -163
  24. oscura/analyzers/waveform/measurements_with_uncertainty.py +91 -35
  25. oscura/analyzers/waveform/spectral.py +498 -54
  26. oscura/api/dsl/commands.py +15 -6
  27. oscura/api/server/templates/base.html +137 -146
  28. oscura/api/server/templates/export.html +84 -110
  29. oscura/api/server/templates/home.html +248 -267
  30. oscura/api/server/templates/protocols.html +44 -48
  31. oscura/api/server/templates/reports.html +27 -35
  32. oscura/api/server/templates/session_detail.html +68 -78
  33. oscura/api/server/templates/sessions.html +62 -72
  34. oscura/api/server/templates/waveforms.html +54 -64
  35. oscura/automotive/__init__.py +1 -1
  36. oscura/automotive/can/session.py +1 -1
  37. oscura/automotive/dbc/generator.py +638 -23
  38. oscura/automotive/dtc/data.json +102 -17
  39. oscura/automotive/uds/decoder.py +99 -6
  40. oscura/cli/analyze.py +8 -2
  41. oscura/cli/batch.py +36 -5
  42. oscura/cli/characterize.py +18 -4
  43. oscura/cli/export.py +47 -5
  44. oscura/cli/main.py +2 -0
  45. oscura/cli/onboarding/wizard.py +10 -6
  46. oscura/cli/pipeline.py +585 -0
  47. oscura/cli/visualize.py +6 -4
  48. oscura/convenience.py +400 -32
  49. oscura/core/config/loader.py +0 -1
  50. oscura/core/measurement_result.py +286 -0
  51. oscura/core/progress.py +1 -1
  52. oscura/core/schemas/device_mapping.json +8 -2
  53. oscura/core/schemas/packet_format.json +24 -4
  54. oscura/core/schemas/protocol_definition.json +12 -2
  55. oscura/core/types.py +300 -199
  56. oscura/correlation/multi_protocol.py +1 -1
  57. oscura/export/legacy/__init__.py +11 -0
  58. oscura/export/legacy/wav.py +75 -0
  59. oscura/exporters/__init__.py +19 -0
  60. oscura/exporters/wireshark.py +809 -0
  61. oscura/hardware/acquisition/file.py +5 -19
  62. oscura/hardware/acquisition/saleae.py +10 -10
  63. oscura/hardware/acquisition/socketcan.py +4 -6
  64. oscura/hardware/acquisition/synthetic.py +1 -5
  65. oscura/hardware/acquisition/visa.py +6 -6
  66. oscura/hardware/security/side_channel_detector.py +5 -508
  67. oscura/inference/message_format.py +686 -1
  68. oscura/jupyter/display.py +2 -2
  69. oscura/jupyter/magic.py +3 -3
  70. oscura/loaders/__init__.py +17 -12
  71. oscura/loaders/binary.py +1 -1
  72. oscura/loaders/chipwhisperer.py +1 -2
  73. oscura/loaders/configurable.py +1 -1
  74. oscura/loaders/csv_loader.py +2 -2
  75. oscura/loaders/hdf5_loader.py +1 -1
  76. oscura/loaders/lazy.py +6 -1
  77. oscura/loaders/mmap_loader.py +0 -1
  78. oscura/loaders/numpy_loader.py +8 -7
  79. oscura/loaders/preprocessing.py +3 -5
  80. oscura/loaders/rigol.py +21 -7
  81. oscura/loaders/sigrok.py +2 -5
  82. oscura/loaders/tdms.py +3 -2
  83. oscura/loaders/tektronix.py +38 -32
  84. oscura/loaders/tss.py +20 -27
  85. oscura/loaders/vcd.py +13 -8
  86. oscura/loaders/wav.py +1 -6
  87. oscura/pipeline/__init__.py +76 -0
  88. oscura/pipeline/handlers/__init__.py +165 -0
  89. oscura/pipeline/handlers/analyzers.py +1045 -0
  90. oscura/pipeline/handlers/decoders.py +899 -0
  91. oscura/pipeline/handlers/exporters.py +1103 -0
  92. oscura/pipeline/handlers/filters.py +891 -0
  93. oscura/pipeline/handlers/loaders.py +640 -0
  94. oscura/pipeline/handlers/transforms.py +768 -0
  95. oscura/reporting/__init__.py +88 -1
  96. oscura/reporting/automation.py +348 -0
  97. oscura/reporting/citations.py +374 -0
  98. oscura/reporting/core.py +54 -0
  99. oscura/reporting/formatting/__init__.py +11 -0
  100. oscura/reporting/formatting/measurements.py +320 -0
  101. oscura/reporting/html.py +57 -0
  102. oscura/reporting/interpretation.py +431 -0
  103. oscura/reporting/summary.py +329 -0
  104. oscura/reporting/templates/enhanced/protocol_re.html +504 -503
  105. oscura/reporting/visualization.py +542 -0
  106. oscura/side_channel/__init__.py +38 -57
  107. oscura/utils/builders/signal_builder.py +5 -5
  108. oscura/utils/comparison/compare.py +7 -9
  109. oscura/utils/comparison/golden.py +1 -1
  110. oscura/utils/filtering/convenience.py +2 -2
  111. oscura/utils/math/arithmetic.py +38 -62
  112. oscura/utils/math/interpolation.py +20 -20
  113. oscura/utils/pipeline/__init__.py +4 -17
  114. oscura/utils/progressive.py +1 -4
  115. oscura/utils/triggering/edge.py +1 -1
  116. oscura/utils/triggering/pattern.py +2 -2
  117. oscura/utils/triggering/pulse.py +2 -2
  118. oscura/utils/triggering/window.py +3 -3
  119. oscura/validation/hil_testing.py +11 -11
  120. oscura/visualization/__init__.py +47 -284
  121. oscura/visualization/batch.py +160 -0
  122. oscura/visualization/plot.py +542 -53
  123. oscura/visualization/styles.py +184 -318
  124. oscura/workflows/__init__.py +2 -0
  125. oscura/workflows/batch/advanced.py +1 -1
  126. oscura/workflows/batch/aggregate.py +7 -8
  127. oscura/workflows/complete_re.py +251 -23
  128. oscura/workflows/digital.py +27 -4
  129. oscura/workflows/multi_trace.py +136 -17
  130. oscura/workflows/waveform.py +788 -0
  131. {oscura-0.7.0.dist-info → oscura-0.10.0.dist-info}/METADATA +59 -79
  132. {oscura-0.7.0.dist-info → oscura-0.10.0.dist-info}/RECORD +135 -149
  133. oscura/side_channel/dpa.py +0 -1025
  134. oscura/utils/optimization/__init__.py +0 -19
  135. oscura/utils/optimization/parallel.py +0 -443
  136. oscura/utils/optimization/search.py +0 -532
  137. oscura/utils/pipeline/base.py +0 -338
  138. oscura/utils/pipeline/composition.py +0 -248
  139. oscura/utils/pipeline/parallel.py +0 -449
  140. oscura/utils/pipeline/pipeline.py +0 -375
  141. oscura/utils/search/__init__.py +0 -16
  142. oscura/utils/search/anomaly.py +0 -424
  143. oscura/utils/search/context.py +0 -294
  144. oscura/utils/search/pattern.py +0 -288
  145. oscura/utils/storage/__init__.py +0 -61
  146. oscura/utils/storage/database.py +0 -1166
  147. oscura/visualization/accessibility.py +0 -526
  148. oscura/visualization/annotations.py +0 -371
  149. oscura/visualization/axis_scaling.py +0 -305
  150. oscura/visualization/colors.py +0 -451
  151. oscura/visualization/digital.py +0 -436
  152. oscura/visualization/eye.py +0 -571
  153. oscura/visualization/histogram.py +0 -281
  154. oscura/visualization/interactive.py +0 -1035
  155. oscura/visualization/jitter.py +0 -1042
  156. oscura/visualization/keyboard.py +0 -394
  157. oscura/visualization/layout.py +0 -400
  158. oscura/visualization/optimization.py +0 -1079
  159. oscura/visualization/palettes.py +0 -446
  160. oscura/visualization/power.py +0 -508
  161. oscura/visualization/power_extended.py +0 -955
  162. oscura/visualization/presets.py +0 -469
  163. oscura/visualization/protocols.py +0 -1246
  164. oscura/visualization/render.py +0 -223
  165. oscura/visualization/rendering.py +0 -444
  166. oscura/visualization/reverse_engineering.py +0 -838
  167. oscura/visualization/signal_integrity.py +0 -989
  168. oscura/visualization/specialized.py +0 -643
  169. oscura/visualization/spectral.py +0 -1226
  170. oscura/visualization/thumbnails.py +0 -340
  171. oscura/visualization/time_axis.py +0 -351
  172. oscura/visualization/waveform.py +0 -454
  173. {oscura-0.7.0.dist-info → oscura-0.10.0.dist-info}/WHEEL +0 -0
  174. {oscura-0.7.0.dist-info → oscura-0.10.0.dist-info}/entry_points.txt +0 -0
  175. {oscura-0.7.0.dist-info → oscura-0.10.0.dist-info}/licenses/LICENSE +0 -0
@@ -1,1079 +0,0 @@
1
- """Visualization optimization functions for automatic plot parameter selection.
2
-
3
- This module provides intelligent optimization algorithms for plot parameters
4
- including axis ranges, decimation, grid spacing, dynamic range, and
5
- interesting region detection.
6
-
7
-
8
- Example:
9
- >>> from oscura.visualization.optimization import calculate_optimal_y_range
10
- >>> y_min, y_max = calculate_optimal_y_range(signal_data)
11
- >>> ax.set_ylim(y_min, y_max)
12
-
13
- References:
14
- - Wilkinson's tick placement algorithm (1999)
15
- - LTTB (Largest Triangle Three Buckets) decimation
16
- - Percentile-based outlier detection
17
- - Edge detection using Sobel operators
18
- - Statistical outlier detection using MAD (Median Absolute Deviation)
19
- """
20
-
21
- from __future__ import annotations
22
-
23
- from dataclasses import dataclass
24
- from typing import TYPE_CHECKING, Literal
25
-
26
- import numpy as np
27
- from scipy import signal as sp_signal
28
-
29
- if TYPE_CHECKING:
30
- from numpy.typing import NDArray
31
-
32
-
33
- def calculate_optimal_y_range(
34
- data: NDArray[np.float64],
35
- *,
36
- outlier_threshold: float = 3.0,
37
- margin_percent: float = 5.0,
38
- symmetric: bool = False,
39
- clip_warning_threshold: float = 0.01,
40
- ) -> tuple[float, float]:
41
- """Calculate optimal Y-axis range with outlier exclusion.
42
-
43
- Uses percentile-based outlier detection and smart margins to ensure
44
- data visibility without wasted space. Detects clipping when too many
45
- samples are excluded.
46
-
47
- Args:
48
- data: Signal data array.
49
- outlier_threshold: Number of standard deviations for outlier exclusion (default 3.0).
50
- margin_percent: Margin as percentage of data range (default 5%, auto-adjusts).
51
- symmetric: If True, use symmetric range ±max for bipolar signals.
52
- clip_warning_threshold: Warn if this fraction of samples are clipped (default 1%).
53
-
54
- Returns:
55
- Tuple of (y_min, y_max) for axis limits.
56
-
57
- Raises:
58
- ValueError: If data is empty or all NaN.
59
-
60
- Example:
61
- >>> signal = np.random.randn(1000)
62
- >>> y_min, y_max = calculate_optimal_y_range(signal, symmetric=True)
63
- >>> # For bipolar signal: y_min ≈ -y_max
64
-
65
- References:
66
- VIS-013: Auto Y-Axis Range Optimization
67
- """
68
- clean_data = _validate_and_clean_data(data)
69
- filtered_data = _filter_outliers(clean_data, outlier_threshold)
70
- _check_clipping(clean_data, filtered_data, clip_warning_threshold)
71
-
72
- # Calculate data range
73
- data_min, data_max = float(np.min(filtered_data)), float(np.max(filtered_data))
74
- margin = _select_smart_margin(len(filtered_data), margin_percent)
75
-
76
- # Apply range mode
77
- if symmetric:
78
- return _symmetric_range(data_min, data_max, margin)
79
- else:
80
- return _asymmetric_range(data_min, data_max, margin)
81
-
82
-
83
- def _validate_and_clean_data(data: NDArray[np.float64]) -> NDArray[np.float64]:
84
- """Validate data and remove NaN values."""
85
- if len(data) == 0:
86
- raise ValueError("Data array is empty")
87
-
88
- clean_data = data[~np.isnan(data)]
89
- if len(clean_data) == 0:
90
- raise ValueError("Data contains only NaN values")
91
-
92
- return clean_data
93
-
94
-
95
- def _filter_outliers(data: NDArray[np.float64], outlier_threshold: float) -> NDArray[np.float64]:
96
- """Filter outliers using robust MAD-based z-scores.
97
-
98
- Falls back to standard deviation when MAD = 0 (highly concentrated data).
99
- """
100
- median = np.median(data)
101
- mad = np.median(np.abs(data - median))
102
- robust_std = 1.4826 * mad # MAD to std conversion
103
-
104
- if robust_std > 0:
105
- z_scores = np.abs(data - median) / robust_std
106
- filtered: NDArray[np.float64] = data[z_scores <= outlier_threshold]
107
- return filtered
108
-
109
- # Fallback to standard deviation when MAD = 0
110
- mean = np.mean(data)
111
- std = np.std(data)
112
- if std > 0:
113
- z_scores = np.abs(data - mean) / std
114
- filtered = data[z_scores <= outlier_threshold]
115
- return filtered
116
-
117
- return data
118
-
119
-
120
- def _check_clipping(
121
- clean_data: NDArray[np.float64],
122
- filtered_data: NDArray[np.float64],
123
- clip_warning_threshold: float,
124
- ) -> None:
125
- """Check and warn if too many samples are clipped."""
126
- clipped_fraction = 1.0 - (len(filtered_data) / len(clean_data))
127
- if clipped_fraction > clip_warning_threshold:
128
- import warnings
129
-
130
- warnings.warn(
131
- f"Clipping detected: {clipped_fraction * 100:.1f}% of samples "
132
- f"excluded by range limits (threshold: {clip_warning_threshold * 100:.1f}%)",
133
- UserWarning,
134
- stacklevel=2,
135
- )
136
-
137
-
138
- def _select_smart_margin(n_samples: int, margin_percent: float) -> float:
139
- """Select margin based on data density.
140
-
141
- Only applies smart margin when using default value (5.0%).
142
- Otherwise respects user's explicit margin_percent.
143
- """
144
- # Always respect explicit user values (non-default)
145
- if margin_percent != 5.0:
146
- return margin_percent / 100.0
147
-
148
- # Apply smart margin only for default value
149
- if n_samples > 10000:
150
- return 0.02 # Dense data: smaller margin
151
- elif n_samples < 100:
152
- return 0.10 # Sparse data: larger margin
153
- return margin_percent / 100.0
154
-
155
-
156
- def _symmetric_range(data_min: float, data_max: float, margin: float) -> tuple[float, float]:
157
- """Calculate symmetric range for bipolar signals."""
158
- max_abs = max(abs(data_min), abs(data_max))
159
-
160
- # Handle constant data
161
- if max_abs == 0:
162
- return (-0.5, 0.5) # Default range for constant zero
163
-
164
- margin_value = max_abs * margin
165
- return (-(max_abs + margin_value), max_abs + margin_value)
166
-
167
-
168
- def _asymmetric_range(data_min: float, data_max: float, margin: float) -> tuple[float, float]:
169
- """Calculate asymmetric range."""
170
- data_range = data_max - data_min
171
-
172
- # Handle constant data (range = 0)
173
- if data_range == 0:
174
- # Add fixed margin for constant data
175
- default_margin = 0.5 if data_min == 0 else abs(data_min) * 0.1
176
- return (data_min - default_margin, data_max + default_margin)
177
-
178
- margin_value = data_range * margin
179
- return (data_min - margin_value, data_max + margin_value)
180
-
181
-
182
- def calculate_optimal_x_window(
183
- time: NDArray[np.float64],
184
- data: NDArray[np.float64],
185
- *,
186
- target_features: int = 5,
187
- samples_per_pixel: float = 2.0,
188
- screen_width: int = 1000,
189
- activity_threshold: float = 0.1,
190
- ) -> tuple[float, float]:
191
- """Calculate optimal X-axis time window with activity detection.
192
-
193
- Intelligently determines time window based on signal activity and features.
194
- Detects regions with significant activity and zooms to show N complete features.
195
-
196
- Args:
197
- time: Time axis array in seconds.
198
- data: Signal data array.
199
- target_features: Number of complete features to display (default 5-10).
200
- samples_per_pixel: Threshold for decimation (default >2 samples/pixel).
201
- screen_width: Screen width in pixels for decimation calculation.
202
- activity_threshold: Relative threshold for activity detection (0-1).
203
-
204
- Returns:
205
- Tuple of (t_start, t_end) for time window in seconds.
206
-
207
- Raises:
208
- ValueError: If arrays are empty or mismatched.
209
-
210
- Example:
211
- >>> time = np.linspace(0, 1e-3, 10000)
212
- >>> signal = np.sin(2 * np.pi * 1000 * time)
213
- >>> t_start, t_end = calculate_optimal_x_window(time, signal, target_features=5)
214
-
215
- References:
216
- VIS-014: Adaptive X-Axis Time Window
217
- """
218
- if len(time) == 0 or len(data) == 0:
219
- raise ValueError("Time or data array is empty")
220
-
221
- if len(time) != len(data):
222
- raise ValueError(f"Time and data arrays must match: {len(time)} vs {len(data)}")
223
-
224
- # Detect signal activity using RMS windowing
225
- window_size = max(10, len(data) // 100)
226
- rms = np.sqrt(np.convolve(data**2, np.ones(window_size) / window_size, mode="same"))
227
-
228
- # Find activity threshold
229
- rms_threshold = activity_threshold * np.max(rms)
230
- active_regions = rms > rms_threshold
231
-
232
- if not np.any(active_regions):
233
- # No significant activity, return padded full range
234
- time_range = time[-1] - time[0]
235
- padding = time_range * 0.05 # 5% padding on each side
236
- return (float(time[0] - padding), float(time[-1] + padding))
237
-
238
- # Find first active region
239
- active_indices = np.where(active_regions)[0]
240
- first_active = active_indices[0]
241
-
242
- # Detect features using autocorrelation for periodic signals
243
- # Use a subset for efficiency
244
- subset_size = min(5000, len(data) - first_active)
245
- subset_start = first_active
246
- subset_end = subset_start + subset_size
247
-
248
- if subset_end > len(data):
249
- subset_end = len(data)
250
- subset_start = max(0, subset_end - subset_size)
251
-
252
- subset = data[subset_start:subset_end]
253
-
254
- # Try to detect periodicity
255
- if len(subset) > 20:
256
- # Use zero-crossing to detect period
257
- mean_val = np.mean(subset)
258
- crossings = np.where(np.diff(np.sign(subset - mean_val)))[0]
259
-
260
- if len(crossings) >= 4:
261
- # Estimate period from crossings (two crossings per cycle)
262
- # crossings[::2] already gives full periods (every other crossing)
263
- periods = np.diff(crossings[::2])
264
- if len(periods) > 0:
265
- median_period = np.median(periods)
266
- samples_per_feature = int(median_period) # Already full cycle from [::2]
267
-
268
- # Calculate window to show target_features
269
- total_samples = samples_per_feature * target_features
270
-
271
- # Respect decimation constraint
272
- max_window_samples = int(screen_width * samples_per_pixel)
273
- total_samples = min(total_samples, max_window_samples)
274
-
275
- window_start = first_active
276
- window_end = min(window_start + total_samples, len(time) - 1)
277
-
278
- return (float(time[window_start]), float(time[window_end]))
279
-
280
- # Fallback: zoom to respect decimation threshold
281
- # Limit window to screen_width * samples_per_pixel samples
282
- max_window_samples = int(screen_width * samples_per_pixel)
283
- active_duration = len(active_indices)
284
- zoom_samples = min(active_duration, max_window_samples)
285
- window_end = min(first_active + zoom_samples, len(time) - 1)
286
-
287
- return (float(time[first_active]), float(time[window_end]))
288
-
289
-
290
- def calculate_grid_spacing(
291
- axis_min: float,
292
- axis_max: float,
293
- *,
294
- target_major_ticks: int = 7,
295
- log_scale: bool = False,
296
- time_axis: bool = False,
297
- ) -> tuple[float, float]:
298
- """Calculate optimal grid spacing using nice numbers.
299
-
300
- Implements Wilkinson's tick placement algorithm to generate
301
- aesthetically pleasing major and minor grid line spacing.
302
-
303
- Args:
304
- axis_min: Minimum axis value.
305
- axis_max: Maximum axis value.
306
- target_major_ticks: Target number of major gridlines (default 5-10).
307
- log_scale: Use logarithmic spacing for log-scale axes.
308
- time_axis: Use time-unit alignment (ms, μs, ns).
309
-
310
- Returns:
311
- Tuple of (major_spacing, minor_spacing).
312
-
313
- Raises:
314
- ValueError: If axis_max <= axis_min.
315
-
316
- Example:
317
- >>> major, minor = calculate_grid_spacing(0, 100, target_major_ticks=5)
318
- >>> # Returns nice numbers like (20.0, 4.0)
319
-
320
- References:
321
- VIS-019: Grid Auto-Spacing
322
- Wilkinson (1999): The Grammar of Graphics
323
- """
324
- if axis_max <= axis_min:
325
- raise ValueError(f"Invalid axis range: [{axis_min}, {axis_max}]")
326
-
327
- if log_scale:
328
- # Logarithmic spacing: major grids at decade boundaries
329
- log_min = np.log10(max(axis_min, 1e-100))
330
- log_max = np.log10(axis_max)
331
- n_decades = log_max - log_min
332
-
333
- if n_decades < 1:
334
- # Less than one decade: use linear spacing
335
- major_spacing = _calculate_nice_number((axis_max - axis_min) / target_major_ticks)
336
- minor_spacing = major_spacing / 5
337
- else:
338
- # Major grids at decades, minors at 2, 5
339
- major_spacing = 10.0 ** np.ceil(n_decades / target_major_ticks)
340
- minor_spacing = major_spacing / 5
341
-
342
- return (float(major_spacing), float(minor_spacing))
343
-
344
- # Linear spacing with nice numbers
345
- axis_range = axis_max - axis_min
346
- rough_spacing = axis_range / target_major_ticks
347
-
348
- # Find nice number for major spacing
349
- major_spacing = _calculate_nice_number(rough_spacing)
350
-
351
- # Minor spacing: 1/5 or 1/2 of major
352
- # Use 1/5 for spacings ending in 5 or 10, 1/2 otherwise
353
- if major_spacing % 5 == 0 or major_spacing % 10 == 0:
354
- minor_spacing = major_spacing / 5
355
- else:
356
- minor_spacing = major_spacing / 2
357
-
358
- # Time axis alignment
359
- if time_axis:
360
- # Align to natural time units
361
- time_units = [
362
- 1e-9,
363
- 2e-9,
364
- 5e-9, # ns
365
- 1e-6,
366
- 2e-6,
367
- 5e-6, # μs
368
- 1e-3,
369
- 2e-3,
370
- 5e-3, # ms
371
- 1.0,
372
- 2.0,
373
- 5.0,
374
- ] # s
375
-
376
- # Find closest time unit
377
- closest_idx = np.argmin(np.abs(np.array(time_units) - major_spacing))
378
- major_spacing = time_units[closest_idx]
379
- minor_spacing = major_spacing / 5
380
-
381
- # Check if grid would be too dense
382
- actual_major_ticks = axis_range / major_spacing
383
- if actual_major_ticks > 15:
384
- # Disable minor grids (set equal to major)
385
- minor_spacing = major_spacing
386
-
387
- return (float(major_spacing), float(minor_spacing))
388
-
389
-
390
- def _calculate_nice_number(value: float) -> float:
391
- """Calculate nice number using powers of 10 × (1, 2, 5). # noqa: RUF002
392
-
393
- Args:
394
- value: Input value.
395
-
396
- Returns:
397
- Nice number (1, 2, or 5 × 10^n). # noqa: RUF002
398
- """
399
- if value <= 0:
400
- return 1.0
401
-
402
- # Find exponent
403
- exponent = np.floor(np.log10(value))
404
- fraction = value / (10**exponent)
405
-
406
- # Round to nice fraction (1, 2, 5)
407
- if fraction < 1.5:
408
- nice_fraction = 1.0
409
- elif fraction < 3.5:
410
- nice_fraction = 2.0
411
- elif fraction < 7.5:
412
- nice_fraction = 5.0
413
- else:
414
- nice_fraction = 10.0
415
-
416
- return nice_fraction * (10**exponent) # type: ignore[no-any-return]
417
-
418
-
419
- def optimize_db_range(
420
- spectrum: NDArray[np.float64],
421
- *,
422
- noise_floor_percentile: float = 5.0,
423
- peak_threshold_db: float = 10.0,
424
- margin_db: float = 10.0,
425
- max_dynamic_range_db: float = 100.0,
426
- ) -> tuple[float, float]:
427
- """Optimize dB range for spectrum plots with noise floor detection.
428
-
429
- Automatically detects noise floor and calculates optimal dynamic range
430
- for maximum information visibility in frequency-domain plots.
431
-
432
- Args:
433
- spectrum: Spectrum magnitude array (linear or dB).
434
- noise_floor_percentile: Percentile for noise floor estimation (default 5%).
435
- peak_threshold_db: Threshold above noise floor for peak detection (default 10 dB).
436
- margin_db: Margin below noise floor (default 10 dB).
437
- max_dynamic_range_db: Maximum dynamic range to display (default 100 dB).
438
-
439
- Returns:
440
- Tuple of (db_min, db_max) for spectrum plot limits.
441
-
442
- Raises:
443
- ValueError: If spectrum is empty or all zero.
444
-
445
- Example:
446
- >>> spectrum_db = 20 * np.log10(np.abs(fft_result))
447
- >>> db_min, db_max = optimize_db_range(spectrum_db)
448
- >>> ax.set_ylim(db_min, db_max)
449
-
450
- References:
451
- VIS-022: Spectrum dB Range Optimization
452
- """
453
- if len(spectrum) == 0:
454
- raise ValueError("Spectrum array is empty")
455
-
456
- # Convert to dB if needed (check if values are in linear scale)
457
- if np.max(spectrum) > 100:
458
- # Likely linear, convert to dB
459
- spectrum_db = 20 * np.log10(np.maximum(spectrum, 1e-100))
460
- else:
461
- # Assume already in dB
462
- spectrum_db = spectrum
463
-
464
- # Detect noise floor using percentile method
465
- noise_floor = np.percentile(spectrum_db, noise_floor_percentile)
466
-
467
- # Find signal peaks using scipy
468
- peak_indices, peak_properties = sp_signal.find_peaks(
469
- spectrum_db,
470
- height=noise_floor + peak_threshold_db,
471
- prominence=peak_threshold_db / 2,
472
- )
473
-
474
- if len(peak_indices) > 0:
475
- peak_max = np.max(peak_properties["peak_heights"])
476
- else:
477
- # No peaks detected, use maximum value
478
- peak_max = np.max(spectrum_db)
479
-
480
- # Calculate dB range
481
- db_max = float(peak_max)
482
- db_min = float(noise_floor - margin_db)
483
-
484
- # Apply dynamic range compression if too wide
485
- dynamic_range = db_max - db_min
486
- if dynamic_range > max_dynamic_range_db:
487
- db_min = db_max - max_dynamic_range_db
488
-
489
- return (db_min, db_max)
490
-
491
-
492
- def decimate_for_display(
493
- time: NDArray[np.float64],
494
- data: NDArray[np.float64],
495
- *,
496
- max_points: int = 2000,
497
- method: Literal["lttb", "minmax", "uniform"] = "lttb",
498
- ) -> tuple[NDArray[np.float64], NDArray[np.float64]]:
499
- """Decimate signal data for display using LTTB or min-max envelope.
500
-
501
- Intelligently reduces number of points while preserving visual appearance
502
- and important features like edges and peaks.
503
-
504
- Args:
505
- time: Time axis array.
506
- data: Signal data array.
507
- max_points: Maximum number of points to retain.
508
- method: Decimation method ("lttb", "minmax", "uniform").
509
-
510
- Returns:
511
- Tuple of (decimated_time, decimated_data).
512
-
513
- Raises:
514
- ValueError: If arrays are empty or method is invalid.
515
-
516
- Example:
517
- >>> time_dec, data_dec = decimate_for_display(time, data, max_points=1000)
518
- >>> # Reduced from 100k to 1k points while preserving shape
519
-
520
- References:
521
- VIS-014: Adaptive X-Axis Time Window
522
- LTTB: Largest Triangle Three Buckets algorithm
523
- """
524
- if len(time) == 0 or len(data) == 0:
525
- raise ValueError("Time or data array is empty")
526
-
527
- if len(time) != len(data):
528
- raise ValueError(f"Time and data arrays must match: {len(time)} vs {len(data)}")
529
-
530
- # Don't decimate if already below threshold
531
- if len(data) <= max_points:
532
- return (time, data)
533
-
534
- # Never decimate very small signals
535
- if len(data) < 10:
536
- return (time, data)
537
-
538
- if method == "uniform":
539
- # Simple uniform stride decimation
540
- stride = len(data) // max_points
541
- indices = np.arange(0, len(data), stride)
542
- return (time[indices], data[indices])
543
-
544
- elif method == "minmax":
545
- # Min-max envelope: preserve peaks and valleys
546
- return _decimate_minmax(time, data, max_points)
547
-
548
- elif method == "lttb":
549
- # Largest Triangle Three Buckets
550
- return _decimate_lttb(time, data, max_points)
551
-
552
- else:
553
- raise ValueError(f"Invalid decimation method: {method}")
554
-
555
-
556
- def _decimate_minmax(
557
- time: NDArray[np.float64],
558
- data: NDArray[np.float64],
559
- max_points: int,
560
- ) -> tuple[NDArray[np.float64], NDArray[np.float64]]:
561
- """Decimate using min-max envelope to preserve peaks/valleys.
562
-
563
- Args:
564
- time: Time array.
565
- data: Signal data array.
566
- max_points: Maximum number of points to retain.
567
-
568
- Returns:
569
- Tuple of (decimated_time, decimated_data).
570
- """
571
- # Calculate bucket size
572
- bucket_size = len(data) // (max_points // 2)
573
-
574
- decimated_time = []
575
- decimated_data = []
576
-
577
- for i in range(0, len(data), bucket_size):
578
- bucket = data[i : i + bucket_size]
579
- time_bucket = time[i : i + bucket_size]
580
-
581
- if len(bucket) > 0:
582
- # Add min and max from each bucket
583
- min_idx = np.argmin(bucket)
584
- max_idx = np.argmax(bucket)
585
-
586
- # Add in chronological order
587
- if min_idx < max_idx:
588
- decimated_time.extend([time_bucket[min_idx], time_bucket[max_idx]])
589
- decimated_data.extend([bucket[min_idx], bucket[max_idx]])
590
- else:
591
- decimated_time.extend([time_bucket[max_idx], time_bucket[min_idx]])
592
- decimated_data.extend([bucket[max_idx], bucket[min_idx]])
593
-
594
- return (np.array(decimated_time), np.array(decimated_data))
595
-
596
-
597
- def _decimate_lttb(
598
- time: NDArray[np.float64],
599
- data: NDArray[np.float64],
600
- max_points: int,
601
- ) -> tuple[NDArray[np.float64], NDArray[np.float64]]:
602
- """Decimate using Largest Triangle Three Buckets algorithm.
603
-
604
- Preserves visual appearance by selecting points that maximize
605
- the area of triangles formed with neighboring buckets.
606
-
607
- Args:
608
- time: Time array.
609
- data: Signal data array.
610
- max_points: Maximum number of points to retain.
611
-
612
- Returns:
613
- Tuple of (decimated_time, decimated_data).
614
- """
615
- if len(data) <= max_points:
616
- return (time, data)
617
-
618
- # Always include first and last points
619
- sampled_time = [time[0]]
620
- sampled_data = [data[0]]
621
-
622
- # Calculate bucket size
623
- bucket_size = (len(data) - 2) / (max_points - 2)
624
-
625
- # Previous selected point
626
- prev_idx = 0
627
-
628
- for i in range(max_points - 2):
629
- # Calculate average point of next bucket (for triangle area calculation)
630
- avg_range_start = int((i + 1) * bucket_size) + 1
631
- avg_range_end = int((i + 2) * bucket_size) + 1
632
- avg_range_end = min(avg_range_end, len(data))
633
-
634
- if avg_range_start < avg_range_end:
635
- avg_time = np.mean(time[avg_range_start:avg_range_end])
636
- avg_data = np.mean(data[avg_range_start:avg_range_end])
637
- else:
638
- avg_time = time[-1]
639
- avg_data = data[-1]
640
-
641
- # Current bucket range
642
- range_start = int(i * bucket_size) + 1
643
- range_end = int((i + 1) * bucket_size) + 1
644
- range_end = min(range_end, len(data) - 1)
645
-
646
- # Find point in current bucket that forms largest triangle
647
- prev_time = time[prev_idx]
648
- prev_data = data[prev_idx]
649
-
650
- max_area = -1.0
651
- max_idx = range_start
652
-
653
- for idx in range(range_start, range_end):
654
- # Calculate triangle area
655
- area = abs(
656
- (prev_time - avg_time) * (data[idx] - prev_data)
657
- - (prev_time - time[idx]) * (avg_data - prev_data)
658
- )
659
-
660
- if area > max_area:
661
- max_area = area
662
- max_idx = idx
663
-
664
- sampled_time.append(time[max_idx])
665
- sampled_data.append(data[max_idx])
666
- prev_idx = max_idx
667
-
668
- # Always include last point
669
- sampled_time.append(time[-1])
670
- sampled_data.append(data[-1])
671
-
672
- return (np.array(sampled_time), np.array(sampled_data))
673
-
674
-
675
- @dataclass
676
- class InterestingRegion:
677
- """Represents an interesting region in a signal.
678
-
679
- Attributes:
680
- start_idx: Starting sample index
681
- end_idx: Ending sample index
682
- start_time: Starting time in seconds
683
- end_time: Ending time in seconds
684
- type: Type of interesting feature
685
- significance: Significance score (0-1, higher is more significant)
686
- metadata: Additional metadata about the region
687
- """
688
-
689
- start_idx: int
690
- end_idx: int
691
- start_time: float
692
- end_time: float
693
- type: Literal["edge", "glitch", "anomaly", "pattern_change"]
694
- significance: float
695
- metadata: dict # type: ignore[type-arg]
696
-
697
-
698
- def detect_interesting_regions(
699
- signal: NDArray[np.float64],
700
- sample_rate: float,
701
- *,
702
- edge_threshold: float | None = None,
703
- glitch_sigma: float = 3.0,
704
- anomaly_threshold: float = 3.0,
705
- min_region_samples: int = 1,
706
- max_regions: int = 10,
707
- ) -> list[InterestingRegion]:
708
- """Detect interesting regions in a signal for automatic zoom/focus.
709
-
710
- : Automatically detect and zoom to interesting signal
711
- regions such as edges, glitches, anomalies, or pattern changes.
712
-
713
- Args:
714
- signal: Input signal array
715
- sample_rate: Sample rate in Hz
716
- edge_threshold: Edge detection threshold (default: auto from signal stddev)
717
- glitch_sigma: Sigma threshold for glitch detection (default: 3.0)
718
- anomaly_threshold: Threshold for anomaly detection in sigma (default: 3.0)
719
- min_region_samples: Minimum samples per region (default: 1)
720
- max_regions: Maximum number of regions to return (default: 10)
721
-
722
- Returns:
723
- List of InterestingRegion objects, sorted by significance (descending)
724
-
725
- Raises:
726
- ValueError: If signal is empty or sample_rate is invalid
727
-
728
- Example:
729
- >>> signal = np.sin(2*np.pi*1000*t) + 0.1*np.random.randn(len(t))
730
- >>> regions = detect_interesting_regions(signal, 1e6)
731
- >>> print(f"Found {len(regions)} interesting regions")
732
-
733
- References:
734
- VIS-020: Zoom to Interesting Regions
735
- Edge detection: Sobel operator on signal derivative
736
- Glitch detection: MAD-based outlier detection
737
- """
738
- if len(signal) == 0:
739
- raise ValueError("Signal cannot be empty")
740
- if sample_rate <= 0:
741
- raise ValueError("Sample rate must be positive")
742
- if min_region_samples < 1:
743
- raise ValueError("min_region_samples must be >= 1")
744
-
745
- regions: list[InterestingRegion] = []
746
-
747
- # 1. Edge detection using first derivative
748
- edges = _detect_edges(signal, sample_rate, edge_threshold)
749
- regions.extend(edges)
750
-
751
- # 2. Glitch detection using statistical outliers
752
- glitches = _detect_glitches(signal, sample_rate, glitch_sigma)
753
- regions.extend(glitches)
754
-
755
- # 3. Anomaly detection using MAD
756
- anomalies = _detect_anomalies(signal, sample_rate, anomaly_threshold)
757
- regions.extend(anomalies)
758
-
759
- # 4. Pattern change detection (simplified using variance changes)
760
- pattern_changes = _detect_pattern_changes(signal, sample_rate)
761
- regions.extend(pattern_changes)
762
-
763
- # Filter out regions that are too small
764
- regions = [r for r in regions if (r.end_idx - r.start_idx) >= min_region_samples]
765
-
766
- # Sort by significance (descending)
767
- regions.sort(key=lambda r: r.significance, reverse=True)
768
-
769
- # Return top N regions
770
- return regions[:max_regions]
771
-
772
-
773
- def _detect_edges(
774
- signal: NDArray[np.float64],
775
- sample_rate: float,
776
- threshold: float | None,
777
- ) -> list[InterestingRegion]:
778
- """Detect edge transitions using first derivative.
779
-
780
- Args:
781
- signal: Input signal
782
- sample_rate: Sample rate in Hz
783
- threshold: Edge threshold (auto if None)
784
-
785
- Returns:
786
- List of edge regions
787
- """
788
- # Calculate first derivative (gradient)
789
- gradient = np.gradient(signal)
790
-
791
- # Auto threshold based on signal statistics
792
- if threshold is None:
793
- threshold = np.std(gradient) * 2.0
794
-
795
- # Find where gradient exceeds threshold
796
- edge_mask = np.abs(gradient) > threshold
797
-
798
- # Find continuous edge regions
799
- regions: list[InterestingRegion] = []
800
- in_edge = False
801
- start_idx = 0
802
-
803
- for i, is_edge in enumerate(edge_mask):
804
- if is_edge and not in_edge:
805
- # Start of edge
806
- start_idx = i
807
- in_edge = True
808
- elif not is_edge and in_edge:
809
- # End of edge
810
- end_idx = i
811
-
812
- # Calculate significance based on gradient magnitude
813
- edge_gradient = gradient[start_idx:end_idx]
814
- significance = min(1.0, np.max(np.abs(edge_gradient)) / (threshold * 5))
815
-
816
- time_base = 1.0 / sample_rate
817
- regions.append(
818
- InterestingRegion(
819
- start_idx=start_idx,
820
- end_idx=end_idx,
821
- start_time=start_idx * time_base,
822
- end_time=end_idx * time_base,
823
- type="edge",
824
- significance=significance,
825
- metadata={
826
- "max_gradient": float(np.max(np.abs(edge_gradient))),
827
- "threshold": threshold,
828
- },
829
- )
830
- )
831
- in_edge = False
832
-
833
- # Handle edge at end of signal
834
- if in_edge:
835
- end_idx = len(signal)
836
- edge_gradient = gradient[start_idx:end_idx]
837
- significance = min(1.0, np.max(np.abs(edge_gradient)) / (threshold * 5))
838
- time_base = 1.0 / sample_rate
839
- regions.append(
840
- InterestingRegion(
841
- start_idx=start_idx,
842
- end_idx=end_idx,
843
- start_time=start_idx * time_base,
844
- end_time=end_idx * time_base,
845
- type="edge",
846
- significance=significance,
847
- metadata={
848
- "max_gradient": float(np.max(np.abs(edge_gradient))),
849
- "threshold": threshold,
850
- },
851
- )
852
- )
853
-
854
- return regions
855
-
856
-
857
- def _detect_glitches(
858
- signal: NDArray[np.float64],
859
- sample_rate: float,
860
- sigma_threshold: float,
861
- ) -> list[InterestingRegion]:
862
- """Detect isolated spikes (glitches) using z-score.
863
-
864
- Args:
865
- signal: Input signal
866
- sample_rate: Sample rate in Hz
867
- sigma_threshold: Sigma threshold for outlier detection
868
-
869
- Returns:
870
- List of glitch regions
871
- """
872
- # Calculate z-scores
873
- mean = np.mean(signal)
874
- std = np.std(signal)
875
-
876
- if std == 0:
877
- return []
878
-
879
- z_scores = np.abs((signal - mean) / std)
880
-
881
- # Find outliers
882
- outlier_mask = z_scores > sigma_threshold
883
-
884
- # Find isolated glitches (single sample or very short bursts)
885
- regions: list[InterestingRegion] = []
886
- time_base = 1.0 / sample_rate
887
-
888
- i = 0
889
- while i < len(outlier_mask):
890
- if outlier_mask[i]:
891
- # Start of potential glitch
892
- start_idx = i
893
-
894
- # Find end of glitch (max 5 samples to be considered a glitch)
895
- while i < len(outlier_mask) and outlier_mask[i] and (i - start_idx) < 5:
896
- i += 1
897
-
898
- end_idx = i
899
-
900
- # Calculate significance based on z-score
901
- glitch_z_scores = z_scores[start_idx:end_idx]
902
- significance = min(1.0, np.max(glitch_z_scores) / (sigma_threshold * 3))
903
-
904
- regions.append(
905
- InterestingRegion(
906
- start_idx=start_idx,
907
- end_idx=end_idx,
908
- start_time=start_idx * time_base,
909
- end_time=end_idx * time_base,
910
- type="glitch",
911
- significance=significance,
912
- metadata={
913
- "max_z_score": float(np.max(glitch_z_scores)),
914
- "threshold_sigma": sigma_threshold,
915
- },
916
- )
917
- )
918
- else:
919
- i += 1
920
-
921
- return regions
922
-
923
-
924
- def _detect_anomalies(
925
- signal: NDArray[np.float64],
926
- sample_rate: float,
927
- threshold_sigma: float,
928
- ) -> list[InterestingRegion]:
929
- """Detect anomalies using MAD (Median Absolute Deviation).
930
-
931
- Args:
932
- signal: Input signal
933
- sample_rate: Sample rate in Hz
934
- threshold_sigma: Sigma threshold for MAD
935
-
936
- Returns:
937
- List of anomaly regions
938
- """
939
- # Calculate MAD
940
- median = np.median(signal)
941
- mad = np.median(np.abs(signal - median))
942
-
943
- if mad == 0:
944
- return []
945
-
946
- # Modified z-score using MAD (more robust than standard z-score)
947
- modified_z_scores = 0.6745 * (signal - median) / mad
948
-
949
- # Find anomalies
950
- anomaly_mask = np.abs(modified_z_scores) > threshold_sigma
951
-
952
- # Find continuous anomaly regions
953
- regions: list[InterestingRegion] = []
954
- in_anomaly = False
955
- start_idx = 0
956
- time_base = 1.0 / sample_rate
957
-
958
- for i, is_anomaly in enumerate(anomaly_mask):
959
- if is_anomaly and not in_anomaly:
960
- start_idx = i
961
- in_anomaly = True
962
- elif not is_anomaly and in_anomaly:
963
- end_idx = i
964
-
965
- # Calculate significance
966
- anomaly_scores = modified_z_scores[start_idx:end_idx]
967
- significance = min(1.0, np.max(np.abs(anomaly_scores)) / (threshold_sigma * 3))
968
-
969
- regions.append(
970
- InterestingRegion(
971
- start_idx=start_idx,
972
- end_idx=end_idx,
973
- start_time=start_idx * time_base,
974
- end_time=end_idx * time_base,
975
- type="anomaly",
976
- significance=significance,
977
- metadata={
978
- "max_mad_score": float(np.max(np.abs(anomaly_scores))),
979
- "threshold_sigma": threshold_sigma,
980
- },
981
- )
982
- )
983
- in_anomaly = False
984
-
985
- # Handle anomaly at end
986
- if in_anomaly:
987
- end_idx = len(signal)
988
- anomaly_scores = modified_z_scores[start_idx:end_idx]
989
- significance = min(1.0, np.max(np.abs(anomaly_scores)) / (threshold_sigma * 3))
990
- regions.append(
991
- InterestingRegion(
992
- start_idx=start_idx,
993
- end_idx=end_idx,
994
- start_time=start_idx * time_base,
995
- end_time=end_idx * time_base,
996
- type="anomaly",
997
- significance=significance,
998
- metadata={
999
- "max_mad_score": float(np.max(np.abs(anomaly_scores))),
1000
- "threshold_sigma": threshold_sigma,
1001
- },
1002
- )
1003
- )
1004
-
1005
- return regions
1006
-
1007
-
1008
- def _detect_pattern_changes(
1009
- signal: NDArray[np.float64],
1010
- sample_rate: float,
1011
- ) -> list[InterestingRegion]:
1012
- """Detect pattern changes using windowed variance analysis.
1013
-
1014
- Args:
1015
- signal: Input signal
1016
- sample_rate: Sample rate in Hz
1017
-
1018
- Returns:
1019
- List of pattern change regions
1020
- """
1021
- # Use sliding window to detect variance changes
1022
- window_size = min(100, len(signal) // 10)
1023
-
1024
- if window_size < 10:
1025
- return []
1026
-
1027
- # Calculate windowed variance
1028
- variances = np.zeros(len(signal) - window_size + 1)
1029
- for i in range(len(variances)):
1030
- variances[i] = np.var(signal[i : i + window_size])
1031
-
1032
- # Detect changes in variance
1033
- if len(variances) < 2:
1034
- return []
1035
-
1036
- variance_gradient = np.gradient(variances)
1037
- threshold = np.std(variance_gradient) * 2.0
1038
-
1039
- change_mask = np.abs(variance_gradient) > threshold
1040
-
1041
- # Find change regions
1042
- regions: list[InterestingRegion] = []
1043
- time_base = 1.0 / sample_rate
1044
-
1045
- for i in range(len(change_mask)):
1046
- if change_mask[i]:
1047
- start_idx = i
1048
- end_idx = min(i + window_size, len(signal))
1049
-
1050
- # Calculate significance
1051
- significance = min(1.0, np.abs(variance_gradient[i]) / (threshold * 5))
1052
-
1053
- regions.append(
1054
- InterestingRegion(
1055
- start_idx=start_idx,
1056
- end_idx=end_idx,
1057
- start_time=start_idx * time_base,
1058
- end_time=end_idx * time_base,
1059
- type="pattern_change",
1060
- significance=significance,
1061
- metadata={
1062
- "variance_change": float(variance_gradient[i]),
1063
- "threshold": threshold,
1064
- },
1065
- )
1066
- )
1067
-
1068
- return regions
1069
-
1070
-
1071
- __all__ = [
1072
- "InterestingRegion",
1073
- "calculate_grid_spacing",
1074
- "calculate_optimal_x_window",
1075
- "calculate_optimal_y_range",
1076
- "decimate_for_display",
1077
- "detect_interesting_regions",
1078
- "optimize_db_range",
1079
- ]