oscura 0.8.0__py3-none-any.whl → 0.11.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (161) hide show
  1. oscura/__init__.py +19 -19
  2. oscura/__main__.py +4 -0
  3. oscura/analyzers/__init__.py +2 -0
  4. oscura/analyzers/digital/extraction.py +2 -3
  5. oscura/analyzers/digital/quality.py +1 -1
  6. oscura/analyzers/digital/timing.py +1 -1
  7. oscura/analyzers/ml/signal_classifier.py +6 -0
  8. oscura/analyzers/patterns/__init__.py +66 -0
  9. oscura/analyzers/power/basic.py +3 -3
  10. oscura/analyzers/power/soa.py +1 -1
  11. oscura/analyzers/power/switching.py +3 -3
  12. oscura/analyzers/signal_classification.py +529 -0
  13. oscura/analyzers/signal_integrity/sparams.py +3 -3
  14. oscura/analyzers/statistics/basic.py +10 -7
  15. oscura/analyzers/validation.py +1 -1
  16. oscura/analyzers/waveform/measurements.py +200 -156
  17. oscura/analyzers/waveform/measurements_with_uncertainty.py +91 -35
  18. oscura/analyzers/waveform/spectral.py +182 -84
  19. oscura/api/dsl/commands.py +15 -6
  20. oscura/api/server/templates/base.html +137 -146
  21. oscura/api/server/templates/export.html +84 -110
  22. oscura/api/server/templates/home.html +248 -267
  23. oscura/api/server/templates/protocols.html +44 -48
  24. oscura/api/server/templates/reports.html +27 -35
  25. oscura/api/server/templates/session_detail.html +68 -78
  26. oscura/api/server/templates/sessions.html +62 -72
  27. oscura/api/server/templates/waveforms.html +54 -64
  28. oscura/automotive/__init__.py +1 -1
  29. oscura/automotive/can/session.py +1 -1
  30. oscura/automotive/dbc/generator.py +638 -23
  31. oscura/automotive/dtc/data.json +17 -102
  32. oscura/automotive/flexray/fibex.py +9 -1
  33. oscura/automotive/uds/decoder.py +99 -6
  34. oscura/cli/analyze.py +8 -2
  35. oscura/cli/batch.py +36 -5
  36. oscura/cli/characterize.py +18 -4
  37. oscura/cli/export.py +47 -5
  38. oscura/cli/main.py +2 -0
  39. oscura/cli/onboarding/wizard.py +10 -6
  40. oscura/cli/pipeline.py +585 -0
  41. oscura/cli/visualize.py +6 -4
  42. oscura/convenience.py +400 -32
  43. oscura/core/measurement_result.py +286 -0
  44. oscura/core/progress.py +1 -1
  45. oscura/core/schemas/device_mapping.json +2 -8
  46. oscura/core/schemas/packet_format.json +4 -24
  47. oscura/core/schemas/protocol_definition.json +2 -12
  48. oscura/core/types.py +232 -239
  49. oscura/correlation/multi_protocol.py +1 -1
  50. oscura/export/legacy/__init__.py +11 -0
  51. oscura/export/legacy/wav.py +75 -0
  52. oscura/exporters/__init__.py +19 -0
  53. oscura/exporters/wireshark.py +809 -0
  54. oscura/hardware/acquisition/file.py +5 -19
  55. oscura/hardware/acquisition/saleae.py +10 -10
  56. oscura/hardware/acquisition/socketcan.py +4 -6
  57. oscura/hardware/acquisition/synthetic.py +1 -5
  58. oscura/hardware/acquisition/visa.py +6 -6
  59. oscura/hardware/security/side_channel_detector.py +5 -508
  60. oscura/inference/message_format.py +686 -1
  61. oscura/jupyter/display.py +2 -2
  62. oscura/jupyter/magic.py +3 -3
  63. oscura/loaders/__init__.py +17 -12
  64. oscura/loaders/binary.py +1 -1
  65. oscura/loaders/chipwhisperer.py +1 -2
  66. oscura/loaders/configurable.py +1 -1
  67. oscura/loaders/csv_loader.py +2 -2
  68. oscura/loaders/hdf5_loader.py +1 -1
  69. oscura/loaders/lazy.py +6 -1
  70. oscura/loaders/mmap_loader.py +0 -1
  71. oscura/loaders/numpy_loader.py +8 -7
  72. oscura/loaders/preprocessing.py +3 -5
  73. oscura/loaders/rigol.py +21 -7
  74. oscura/loaders/sigrok.py +2 -5
  75. oscura/loaders/tdms.py +3 -2
  76. oscura/loaders/tektronix.py +38 -32
  77. oscura/loaders/tss.py +20 -27
  78. oscura/loaders/validation.py +17 -10
  79. oscura/loaders/vcd.py +13 -8
  80. oscura/loaders/wav.py +1 -6
  81. oscura/pipeline/__init__.py +76 -0
  82. oscura/pipeline/handlers/__init__.py +165 -0
  83. oscura/pipeline/handlers/analyzers.py +1045 -0
  84. oscura/pipeline/handlers/decoders.py +899 -0
  85. oscura/pipeline/handlers/exporters.py +1103 -0
  86. oscura/pipeline/handlers/filters.py +891 -0
  87. oscura/pipeline/handlers/loaders.py +640 -0
  88. oscura/pipeline/handlers/transforms.py +768 -0
  89. oscura/reporting/formatting/measurements.py +55 -14
  90. oscura/reporting/templates/enhanced/protocol_re.html +504 -503
  91. oscura/sessions/legacy.py +49 -1
  92. oscura/side_channel/__init__.py +38 -57
  93. oscura/utils/builders/signal_builder.py +5 -5
  94. oscura/utils/comparison/compare.py +7 -9
  95. oscura/utils/comparison/golden.py +1 -1
  96. oscura/utils/filtering/convenience.py +2 -2
  97. oscura/utils/math/arithmetic.py +38 -62
  98. oscura/utils/math/interpolation.py +20 -20
  99. oscura/utils/pipeline/__init__.py +4 -17
  100. oscura/utils/progressive.py +1 -4
  101. oscura/utils/triggering/edge.py +1 -1
  102. oscura/utils/triggering/pattern.py +2 -2
  103. oscura/utils/triggering/pulse.py +2 -2
  104. oscura/utils/triggering/window.py +3 -3
  105. oscura/validation/hil_testing.py +11 -11
  106. oscura/visualization/__init__.py +46 -284
  107. oscura/visualization/batch.py +72 -433
  108. oscura/visualization/plot.py +542 -53
  109. oscura/visualization/styles.py +184 -318
  110. oscura/workflows/batch/advanced.py +1 -1
  111. oscura/workflows/batch/aggregate.py +12 -9
  112. oscura/workflows/complete_re.py +251 -23
  113. oscura/workflows/digital.py +27 -4
  114. oscura/workflows/multi_trace.py +136 -17
  115. oscura/workflows/waveform.py +11 -6
  116. oscura-0.11.0.dist-info/METADATA +460 -0
  117. {oscura-0.8.0.dist-info → oscura-0.11.0.dist-info}/RECORD +120 -145
  118. oscura/side_channel/dpa.py +0 -1025
  119. oscura/utils/optimization/__init__.py +0 -19
  120. oscura/utils/optimization/parallel.py +0 -443
  121. oscura/utils/optimization/search.py +0 -532
  122. oscura/utils/pipeline/base.py +0 -338
  123. oscura/utils/pipeline/composition.py +0 -248
  124. oscura/utils/pipeline/parallel.py +0 -449
  125. oscura/utils/pipeline/pipeline.py +0 -375
  126. oscura/utils/search/__init__.py +0 -16
  127. oscura/utils/search/anomaly.py +0 -424
  128. oscura/utils/search/context.py +0 -294
  129. oscura/utils/search/pattern.py +0 -288
  130. oscura/utils/storage/__init__.py +0 -61
  131. oscura/utils/storage/database.py +0 -1166
  132. oscura/visualization/accessibility.py +0 -526
  133. oscura/visualization/annotations.py +0 -371
  134. oscura/visualization/axis_scaling.py +0 -305
  135. oscura/visualization/colors.py +0 -451
  136. oscura/visualization/digital.py +0 -436
  137. oscura/visualization/eye.py +0 -571
  138. oscura/visualization/histogram.py +0 -281
  139. oscura/visualization/interactive.py +0 -1035
  140. oscura/visualization/jitter.py +0 -1042
  141. oscura/visualization/keyboard.py +0 -394
  142. oscura/visualization/layout.py +0 -400
  143. oscura/visualization/optimization.py +0 -1079
  144. oscura/visualization/palettes.py +0 -446
  145. oscura/visualization/power.py +0 -508
  146. oscura/visualization/power_extended.py +0 -955
  147. oscura/visualization/presets.py +0 -469
  148. oscura/visualization/protocols.py +0 -1246
  149. oscura/visualization/render.py +0 -223
  150. oscura/visualization/rendering.py +0 -444
  151. oscura/visualization/reverse_engineering.py +0 -838
  152. oscura/visualization/signal_integrity.py +0 -989
  153. oscura/visualization/specialized.py +0 -643
  154. oscura/visualization/spectral.py +0 -1226
  155. oscura/visualization/thumbnails.py +0 -340
  156. oscura/visualization/time_axis.py +0 -351
  157. oscura/visualization/waveform.py +0 -454
  158. oscura-0.8.0.dist-info/METADATA +0 -661
  159. {oscura-0.8.0.dist-info → oscura-0.11.0.dist-info}/WHEEL +0 -0
  160. {oscura-0.8.0.dist-info → oscura-0.11.0.dist-info}/entry_points.txt +0 -0
  161. {oscura-0.8.0.dist-info → oscura-0.11.0.dist-info}/licenses/LICENSE +0 -0
@@ -1,424 +0,0 @@
1
- """Anomaly detection in signal traces.
2
-
3
- This module provides automated detection of glitches, timing violations,
4
- and protocol errors with context extraction for debugging.
5
- """
6
-
7
- from __future__ import annotations
8
-
9
- from typing import Any
10
-
11
- import numpy as np
12
- from numpy.typing import NDArray
13
-
14
-
15
- def find_anomalies(
16
- trace: NDArray[np.float64],
17
- anomaly_type: str = "glitch",
18
- *,
19
- threshold: float | None = None,
20
- min_width: float | None = None,
21
- max_width: float | None = None,
22
- sample_rate: float | None = None,
23
- context_samples: int = 100,
24
- **kwargs: Any,
25
- ) -> list[dict[str, Any]]:
26
- """Find glitches, timing violations, or protocol errors in traces.
27
-
28
- Anomaly detection with context extraction.
29
- Integrates with QUAL-005 glitch detection for signal quality analysis.
30
-
31
- Args:
32
- trace: Input signal trace
33
- anomaly_type: Type of anomaly to detect:
34
- - 'glitch': Short-duration voltage spikes/dips
35
- - 'timing': Edge timing violations (requires sample_rate)
36
- - 'protocol': Protocol-level errors (requires decoded data)
37
- threshold: Detection threshold. Meaning depends on anomaly_type:
38
- - glitch: Voltage deviation from expected level
39
- - timing: Timing violation threshold in seconds
40
- min_width: Minimum anomaly width in seconds (requires sample_rate)
41
- max_width: Maximum anomaly width in seconds (requires sample_rate)
42
- sample_rate: Sample rate in Hz (required for timing analysis)
43
- context_samples: Number of samples to include before/after anomaly
44
- for context extraction (default: 100)
45
- **kwargs: Additional type-specific parameters
46
-
47
- Returns:
48
- List of anomaly dictionaries, each containing:
49
- - index: Sample index where anomaly occurs
50
- - type: Anomaly type
51
- - severity: Severity score (0-1, higher is worse)
52
- - duration: Duration in samples
53
- - amplitude: Amplitude deviation (for glitches)
54
- - context: ±context_samples around anomaly
55
- - description: Human-readable description
56
-
57
- Raises:
58
- ValueError: If invalid anomaly_type or missing required parameters
59
-
60
- Examples:
61
- >>> # Detect voltage glitches
62
- >>> trace = np.array([0, 0, 0, 0.8, 0, 0, 0]) # Spike at index 3
63
- >>> anomalies = find_anomalies(
64
- ... trace,
65
- ... anomaly_type='glitch',
66
- ... threshold=0.5,
67
- ... sample_rate=1e6
68
- ... )
69
- >>> print(f"Found {len(anomalies)} glitches")
70
-
71
- >>> # Detect timing violations
72
- >>> anomalies = find_anomalies(
73
- ... trace,
74
- ... anomaly_type='timing',
75
- ... min_width=10e-9, # 10 ns minimum
76
- ... max_width=100e-9, # 100 ns maximum
77
- ... sample_rate=1e9
78
- ... )
79
-
80
- Notes:
81
- - Glitch detection uses derivative and threshold methods
82
- - Timing detection requires sample_rate for width calculations
83
- - Context extraction handles edge cases at trace boundaries
84
- - Integrates with QUAL-005 for comprehensive signal quality analysis
85
-
86
- References:
87
- SRCH-002: Anomaly Search
88
- QUAL-005: Glitch Detection
89
- """
90
- if trace.size == 0:
91
- return []
92
-
93
- valid_types = {"glitch", "timing", "protocol"}
94
- if anomaly_type not in valid_types:
95
- raise ValueError(f"Invalid anomaly_type '{anomaly_type}'. Must be one of: {valid_types}")
96
-
97
- return _dispatch_anomaly_detection(
98
- trace, anomaly_type, threshold, min_width, max_width, sample_rate, context_samples
99
- )
100
-
101
-
102
- def _dispatch_anomaly_detection(
103
- trace: NDArray[np.float64],
104
- anomaly_type: str,
105
- threshold: float | None,
106
- min_width: float | None,
107
- max_width: float | None,
108
- sample_rate: float | None,
109
- context_samples: int,
110
- ) -> list[dict[str, Any]]:
111
- """Dispatch to appropriate anomaly detection method."""
112
- if anomaly_type == "glitch":
113
- return _detect_glitches(
114
- trace, threshold, min_width, max_width, sample_rate, context_samples
115
- )
116
-
117
- if anomaly_type == "timing":
118
- if sample_rate is None:
119
- raise ValueError("sample_rate required for timing anomaly detection")
120
- return _detect_timing_violations(trace, sample_rate, min_width, max_width, context_samples)
121
-
122
- # Protocol error detection would integrate with protocol decoders
123
- return []
124
-
125
-
126
- def _detect_glitches(
127
- trace: NDArray[np.float64],
128
- threshold: float | None,
129
- min_width: float | None,
130
- max_width: float | None,
131
- sample_rate: float | None,
132
- context_samples: int,
133
- ) -> list[dict[str, Any]]:
134
- """Detect voltage glitches using derivative method.
135
-
136
- Uses median absolute deviation (MAD) for robust auto-thresholding,
137
- groups consecutive derivative spikes, and filters by duration.
138
-
139
- Example:
140
- >>> trace = np.array([1.0, 1.0, 5.0, 1.0, 1.0]) # Glitch at index 2
141
- >>> glitches = _detect_glitches(trace, None, None, None, 1000.0, 5)
142
- >>> len(glitches) > 0
143
- True
144
- """
145
- if len(trace) < 2:
146
- return []
147
-
148
- # Compute derivative to find rapid changes
149
- abs_derivative = np.abs(np.diff(trace))
150
-
151
- # Determine threshold
152
- threshold_value = _compute_glitch_threshold(threshold, abs_derivative)
153
-
154
- # Find glitch candidate points
155
- glitch_candidates = np.where(abs_derivative > threshold_value)[0]
156
- if len(glitch_candidates) == 0:
157
- return []
158
-
159
- # Group consecutive points into glitch events
160
- glitch_groups = _group_consecutive_indices(glitch_candidates)
161
-
162
- # Compute baseline once for performance
163
- baseline = _compute_baseline(trace)
164
-
165
- # Convert groups to glitch dictionaries
166
- return _build_glitch_results(
167
- glitch_groups,
168
- trace,
169
- baseline,
170
- threshold_value,
171
- min_width,
172
- max_width,
173
- sample_rate,
174
- context_samples,
175
- )
176
-
177
-
178
- def _compute_glitch_threshold(
179
- threshold: float | None, abs_derivative: NDArray[np.float64]
180
- ) -> float:
181
- """Compute threshold for glitch detection using MAD.
182
-
183
- Args:
184
- threshold: User-provided threshold, or None for auto-threshold.
185
- abs_derivative: Absolute derivative of signal.
186
-
187
- Returns:
188
- Threshold value for glitch detection.
189
- """
190
- if threshold is not None:
191
- return threshold
192
-
193
- # Use median absolute deviation (MAD) for robust auto-thresholding
194
- median_deriv = np.median(abs_derivative)
195
- mad = np.median(np.abs(abs_derivative - median_deriv))
196
-
197
- # Convert MAD to equivalent std (1.4826 is the constant for normal distribution)
198
- if mad > 0:
199
- return float(median_deriv + 3 * 1.4826 * mad)
200
-
201
- # Fallback: use 75th percentile to avoid catching glitches in threshold
202
- p75 = np.percentile(abs_derivative, 75)
203
- if p75 > 0:
204
- return float(p75)
205
-
206
- # Last resort: use any non-zero derivative
207
- return 0.0
208
-
209
-
210
- def _group_consecutive_indices(indices: NDArray[np.int64]) -> list[list[int]]:
211
- """Group consecutive indices into separate lists.
212
-
213
- Args:
214
- indices: Sorted array of indices.
215
-
216
- Returns:
217
- List of groups, where each group contains consecutive indices.
218
-
219
- Example:
220
- >>> indices = np.array([1, 2, 3, 7, 8, 10])
221
- >>> _group_consecutive_indices(indices)
222
- [[1, 2, 3], [7, 8], [10]]
223
- """
224
- if len(indices) == 0:
225
- return []
226
-
227
- groups = []
228
- current_group = [int(indices[0])]
229
-
230
- for idx in indices[1:]:
231
- if idx == current_group[-1] + 1:
232
- current_group.append(int(idx))
233
- else:
234
- groups.append(current_group)
235
- current_group = [int(idx)]
236
-
237
- if current_group:
238
- groups.append(current_group)
239
-
240
- return groups
241
-
242
-
243
- def _compute_baseline(trace: NDArray[np.float64]) -> float:
244
- """Compute baseline value using median.
245
-
246
- For very large arrays (>1M samples), uses percentile approximation
247
- for performance.
248
-
249
- Args:
250
- trace: Signal trace.
251
-
252
- Returns:
253
- Baseline value (median).
254
- """
255
- if len(trace) > 1_000_000:
256
- # Fast approximation: 50th percentile with linear interpolation
257
- return float(np.percentile(trace, 50, method="linear"))
258
- return float(np.median(trace))
259
-
260
-
261
- def _build_glitch_results(
262
- glitch_groups: list[list[int]],
263
- trace: NDArray[np.float64],
264
- baseline: float,
265
- threshold_value: float,
266
- min_width: float | None,
267
- max_width: float | None,
268
- sample_rate: float | None,
269
- context_samples: int,
270
- ) -> list[dict[str, Any]]:
271
- """Build glitch result dictionaries from detected groups.
272
-
273
- Args:
274
- glitch_groups: Groups of consecutive glitch indices.
275
- trace: Original signal trace.
276
- baseline: Signal baseline value.
277
- threshold_value: Detection threshold.
278
- min_width: Minimum glitch duration (seconds), or None.
279
- max_width: Maximum glitch duration (seconds), or None.
280
- sample_rate: Sample rate (Hz), or None.
281
- context_samples: Number of context samples to include.
282
-
283
- Returns:
284
- List of glitch dictionaries with metadata.
285
- """
286
- glitches: list[dict[str, Any]] = []
287
-
288
- for group in glitch_groups:
289
- start_idx = group[0]
290
- end_idx = group[-1] + 1
291
- duration_samples = end_idx - start_idx
292
-
293
- # Check width constraints
294
- if not _check_width_constraints(duration_samples, min_width, max_width, sample_rate):
295
- continue
296
-
297
- # Extract context
298
- ctx_start = max(0, start_idx - context_samples)
299
- ctx_end = min(len(trace), end_idx + context_samples)
300
- context = trace[ctx_start:ctx_end].copy()
301
-
302
- # Compute amplitude deviation
303
- amplitude = np.max(np.abs(trace[start_idx:end_idx] - baseline))
304
-
305
- # Severity: normalized amplitude
306
- severity = min(1.0, amplitude / (threshold_value * 3))
307
-
308
- glitches.append(
309
- {
310
- "index": start_idx,
311
- "type": "glitch",
312
- "severity": float(severity),
313
- "duration": duration_samples,
314
- "amplitude": float(amplitude),
315
- "context": context,
316
- "description": f"Glitch at sample {start_idx}, amplitude {amplitude:.3g}",
317
- }
318
- )
319
-
320
- return glitches
321
-
322
-
323
- def _check_width_constraints(
324
- duration_samples: int,
325
- min_width: float | None,
326
- max_width: float | None,
327
- sample_rate: float | None,
328
- ) -> bool:
329
- """Check if glitch duration meets width constraints.
330
-
331
- Args:
332
- duration_samples: Glitch duration in samples.
333
- min_width: Minimum duration (seconds), or None.
334
- max_width: Maximum duration (seconds), or None.
335
- sample_rate: Sample rate (Hz), or None.
336
-
337
- Returns:
338
- True if glitch meets constraints, False otherwise.
339
- """
340
- if sample_rate is None:
341
- return True
342
-
343
- duration_seconds = duration_samples / sample_rate
344
-
345
- if min_width is not None and duration_seconds < min_width:
346
- return False
347
-
348
- return not (max_width is not None and duration_seconds > max_width)
349
-
350
-
351
- def _detect_timing_violations(
352
- trace: NDArray[np.float64],
353
- sample_rate: float,
354
- min_width: float | None,
355
- max_width: float | None,
356
- context_samples: int,
357
- ) -> list[dict[str, Any]]:
358
- """Detect timing violations (pulse width violations)."""
359
- violations = []
360
-
361
- # Simple threshold for digital signal
362
- threshold = (np.max(trace) + np.min(trace)) / 2
363
- digital = (trace >= threshold).astype(int)
364
-
365
- # Find edges
366
- edges = np.diff(digital)
367
- rising_edges = np.where(edges == 1)[0]
368
- falling_edges = np.where(edges == -1)[0]
369
-
370
- # Measure pulse widths
371
- for rise in rising_edges:
372
- # Find next falling edge
373
- next_fall = falling_edges[falling_edges > rise]
374
- if len(next_fall) == 0:
375
- continue
376
-
377
- fall = next_fall[0]
378
- pulse_width_samples = fall - rise
379
- pulse_width_seconds = pulse_width_samples / sample_rate
380
-
381
- # Check violations
382
- violated = False
383
- violation_type = ""
384
-
385
- if min_width is not None and pulse_width_seconds < min_width:
386
- violated = True
387
- violation_type = "too_short"
388
-
389
- if max_width is not None and pulse_width_seconds > max_width:
390
- violated = True
391
- violation_type = "too_long"
392
-
393
- if violated:
394
- # Extract context
395
- ctx_start = max(0, rise - context_samples)
396
- ctx_end = min(len(trace), fall + context_samples)
397
- context = trace[ctx_start:ctx_end].copy()
398
-
399
- # Severity based on deviation
400
- if min_width is not None:
401
- deviation = abs(pulse_width_seconds - min_width) / min_width
402
- elif max_width is not None:
403
- deviation = abs(pulse_width_seconds - max_width) / max_width
404
- else:
405
- deviation = 0.0
406
-
407
- severity = min(1.0, deviation)
408
-
409
- violations.append(
410
- {
411
- "index": rise,
412
- "type": f"timing_{violation_type}",
413
- "severity": float(severity),
414
- "duration": pulse_width_samples,
415
- "amplitude": float(pulse_width_seconds),
416
- "context": context,
417
- "description": (
418
- f"Timing violation at sample {rise}: "
419
- f"pulse width {pulse_width_seconds * 1e9:.1f} ns ({violation_type})"
420
- ),
421
- }
422
- )
423
-
424
- return violations
@@ -1,294 +0,0 @@
1
- """Context extraction around points of interest.
2
-
3
-
4
- This module provides efficient extraction of signal context around
5
- events, maintaining original time references for debugging workflows.
6
- """
7
-
8
- from typing import Any
9
-
10
- import numpy as np
11
- from numpy.typing import NDArray
12
-
13
-
14
- def extract_context(
15
- trace: NDArray[np.float64],
16
- index: int | list[int] | NDArray[np.int_],
17
- *,
18
- before: int = 100,
19
- after: int = 100,
20
- sample_rate: float | None = None,
21
- include_metadata: bool = True,
22
- ) -> dict[str, Any] | list[dict[str, Any]]:
23
- """Extract signal context around a point of interest.
24
-
25
- : Context extraction with time reference preservation.
26
- Supports batch extraction for multiple indices and optional protocol data.
27
-
28
- Args:
29
- trace: Input signal trace
30
- index: Sample index or list of indices to extract context around.
31
- Can be int, list of ints, or numpy array.
32
- before: Number of samples to include before index (default: 100)
33
- after: Number of samples to include after index (default: 100)
34
- sample_rate: Optional sample rate in Hz for time calculations
35
- include_metadata: Include metadata dict with context info (default: True)
36
-
37
- Returns:
38
- If index is scalar: Single context dictionary
39
- If index is list/array: List of context dictionaries
40
-
41
- Each context dictionary contains:
42
- - data: Extracted sub-trace array
43
- - start_index: Starting index in original trace
44
- - end_index: Ending index in original trace
45
- - center_index: Center index (original query index)
46
- - time_reference: Time offset if sample_rate provided
47
- - length: Number of samples in context
48
-
49
- Raises:
50
- ValueError: If index is out of bounds
51
- ValueError: If before or after are negative
52
-
53
- Examples:
54
- >>> # Extract context around a glitch
55
- >>> trace = np.random.randn(1000)
56
- >>> glitch_index = 500
57
- >>> context = extract_context(
58
- ... trace,
59
- ... glitch_index,
60
- ... before=50,
61
- ... after=50,
62
- ... sample_rate=1e6
63
- ... )
64
- >>> print(f"Context length: {len(context['data'])}")
65
- >>> print(f"Time reference: {context['time_reference']*1e6:.2f} µs")
66
-
67
- >>> # Batch extraction for multiple events
68
- >>> event_indices = [100, 200, 300]
69
- >>> contexts = extract_context(
70
- ... trace,
71
- ... event_indices,
72
- ... before=25,
73
- ... after=25
74
- ... )
75
- >>> print(f"Extracted {len(contexts)} contexts")
76
-
77
- Notes:
78
- - Handles edge cases at trace boundaries automatically
79
- - Context may be shorter than before+after at boundaries
80
- - Time reference is relative to start of extracted context
81
- - Original trace is not modified
82
-
83
- References:
84
- SRCH-003: Context Extraction
85
- """
86
- # Phase 1: Input validation
87
- _validate_context_params(before, after, trace)
88
-
89
- # Phase 2: Normalize indices
90
- indices, return_single = _normalize_indices(index, trace)
91
-
92
- # Phase 3: Extract contexts
93
- contexts = [
94
- _extract_single_context(trace, idx, before, after, sample_rate, include_metadata)
95
- for idx in indices
96
- ]
97
-
98
- # Return single context or list
99
- return contexts[0] if return_single else contexts
100
-
101
-
102
- def _validate_context_params(before: int, after: int, trace: NDArray[np.float64]) -> None:
103
- """Validate context extraction parameters.
104
-
105
- Args:
106
- before: Samples before index.
107
- after: Samples after index.
108
- trace: Input trace.
109
-
110
- Raises:
111
- ValueError: If parameters are invalid.
112
-
113
- Example:
114
- >>> trace = np.array([1.0, 2.0, 3.0])
115
- >>> _validate_context_params(10, 10, trace)
116
- >>> _validate_context_params(-1, 10, trace)
117
- Traceback (most recent call last):
118
- ValueError: before and after must be non-negative
119
- """
120
- if before < 0 or after < 0:
121
- raise ValueError("before and after must be non-negative")
122
-
123
- if trace.size == 0:
124
- raise ValueError("Trace cannot be empty")
125
-
126
-
127
- def _normalize_indices(
128
- index: int | list[int] | NDArray[np.int_], trace: NDArray[np.float64]
129
- ) -> tuple[list[int], bool]:
130
- """Normalize index input to list of integers.
131
-
132
- Args:
133
- index: Input index (int, list, or array).
134
- trace: Trace to validate against.
135
-
136
- Returns:
137
- Tuple of (normalized_indices, return_single_flag).
138
-
139
- Raises:
140
- ValueError: If any index is out of bounds.
141
-
142
- Example:
143
- >>> trace = np.array([1.0, 2.0, 3.0, 4.0, 5.0])
144
- >>> _normalize_indices(2, trace)
145
- ([2], True)
146
- >>> _normalize_indices([1, 3], trace)
147
- ([1, 3], False)
148
- """
149
- # Handle single index vs multiple indices
150
- if isinstance(index, int | np.integer):
151
- indices = [int(index)]
152
- return_single = True
153
- else:
154
- indices = [int(i) for i in index]
155
- return_single = False
156
-
157
- # Validate indices
158
- for idx in indices:
159
- if idx < 0 or idx >= len(trace):
160
- raise ValueError(f"Index {idx} out of bounds for trace of length {len(trace)}")
161
-
162
- return indices, return_single
163
-
164
-
165
- def _extract_single_context(
166
- trace: NDArray[np.float64],
167
- idx: int,
168
- before: int,
169
- after: int,
170
- sample_rate: float | None,
171
- include_metadata: bool,
172
- ) -> dict[str, Any]:
173
- """Extract context for a single index.
174
-
175
- Args:
176
- trace: Input signal trace.
177
- idx: Center index to extract around.
178
- before: Samples before index.
179
- after: Samples after index.
180
- sample_rate: Optional sample rate.
181
- include_metadata: Include metadata dict.
182
-
183
- Returns:
184
- Context dictionary with extracted data and metadata.
185
-
186
- Example:
187
- >>> trace = np.array([1.0, 2.0, 3.0, 4.0, 5.0])
188
- >>> ctx = _extract_single_context(trace, 2, 1, 1, None, False)
189
- >>> ctx['data']
190
- array([2., 3., 4.])
191
- """
192
- # Calculate window bounds with boundary handling
193
- start_idx, end_idx = _calculate_window_bounds(idx, before, after, len(trace))
194
-
195
- # Extract data
196
- data = trace[start_idx:end_idx].copy()
197
-
198
- # Build context dictionary
199
- context: dict[str, Any] = {
200
- "data": data,
201
- "start_index": start_idx,
202
- "end_index": end_idx,
203
- "center_index": idx,
204
- "length": len(data),
205
- }
206
-
207
- # Add time information if requested
208
- if sample_rate is not None:
209
- _add_time_information(context, start_idx, sample_rate, len(data))
210
-
211
- # Add metadata if requested
212
- if include_metadata:
213
- _add_boundary_metadata(context, idx, start_idx, end_idx, len(trace))
214
-
215
- return context
216
-
217
-
218
- def _calculate_window_bounds(idx: int, before: int, after: int, trace_len: int) -> tuple[int, int]:
219
- """Calculate window boundaries with edge handling.
220
-
221
- Args:
222
- idx: Center index.
223
- before: Samples before center.
224
- after: Samples after center.
225
- trace_len: Length of trace.
226
-
227
- Returns:
228
- Tuple of (start_idx, end_idx).
229
-
230
- Example:
231
- >>> _calculate_window_bounds(50, 10, 10, 100)
232
- (40, 61)
233
- >>> _calculate_window_bounds(5, 10, 10, 100)
234
- (0, 16)
235
- """
236
- start_idx = max(0, idx - before)
237
- end_idx = min(trace_len, idx + after + 1)
238
- return start_idx, end_idx
239
-
240
-
241
- def _add_time_information(
242
- context: dict[str, Any], start_idx: int, sample_rate: float, data_len: int
243
- ) -> None:
244
- """Add time reference information to context.
245
-
246
- Args:
247
- context: Context dictionary to update.
248
- start_idx: Start index in original trace.
249
- sample_rate: Sample rate in Hz.
250
- data_len: Length of extracted data.
251
-
252
- Example:
253
- >>> ctx = {}
254
- >>> _add_time_information(ctx, 100, 1e6, 50)
255
- >>> ctx['time_reference']
256
- 0.0001
257
- >>> ctx['sample_rate']
258
- 1000000.0
259
- """
260
- time_offset = start_idx / sample_rate
261
- context["time_reference"] = time_offset
262
- context["sample_rate"] = sample_rate
263
-
264
- # Time array for the context
265
- dt = 1.0 / sample_rate
266
- context["time_array"] = np.arange(data_len) * dt + time_offset
267
-
268
-
269
- def _add_boundary_metadata(
270
- context: dict[str, Any], idx: int, start_idx: int, end_idx: int, trace_len: int
271
- ) -> None:
272
- """Add boundary metadata to context.
273
-
274
- Args:
275
- context: Context dictionary to update.
276
- idx: Center index.
277
- start_idx: Window start index.
278
- end_idx: Window end index.
279
- trace_len: Total trace length.
280
-
281
- Example:
282
- >>> ctx = {}
283
- >>> _add_boundary_metadata(ctx, 5, 0, 11, 100)
284
- >>> ctx['metadata']['at_start_boundary']
285
- True
286
- >>> ctx['metadata']['samples_before']
287
- 5
288
- """
289
- context["metadata"] = {
290
- "samples_before": idx - start_idx,
291
- "samples_after": end_idx - idx - 1,
292
- "at_start_boundary": start_idx == 0,
293
- "at_end_boundary": end_idx == trace_len,
294
- }