oscura 0.7.0__py3-none-any.whl → 0.10.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (175) hide show
  1. oscura/__init__.py +19 -19
  2. oscura/analyzers/__init__.py +2 -0
  3. oscura/analyzers/digital/extraction.py +2 -3
  4. oscura/analyzers/digital/quality.py +1 -1
  5. oscura/analyzers/digital/timing.py +1 -1
  6. oscura/analyzers/eye/__init__.py +5 -1
  7. oscura/analyzers/eye/generation.py +501 -0
  8. oscura/analyzers/jitter/__init__.py +6 -6
  9. oscura/analyzers/jitter/timing.py +419 -0
  10. oscura/analyzers/patterns/__init__.py +94 -0
  11. oscura/analyzers/patterns/reverse_engineering.py +991 -0
  12. oscura/analyzers/power/__init__.py +35 -12
  13. oscura/analyzers/power/basic.py +3 -3
  14. oscura/analyzers/power/soa.py +1 -1
  15. oscura/analyzers/power/switching.py +3 -3
  16. oscura/analyzers/signal_classification.py +529 -0
  17. oscura/analyzers/signal_integrity/sparams.py +3 -3
  18. oscura/analyzers/statistics/__init__.py +4 -0
  19. oscura/analyzers/statistics/basic.py +152 -0
  20. oscura/analyzers/statistics/correlation.py +47 -6
  21. oscura/analyzers/validation.py +1 -1
  22. oscura/analyzers/waveform/__init__.py +2 -0
  23. oscura/analyzers/waveform/measurements.py +329 -163
  24. oscura/analyzers/waveform/measurements_with_uncertainty.py +91 -35
  25. oscura/analyzers/waveform/spectral.py +498 -54
  26. oscura/api/dsl/commands.py +15 -6
  27. oscura/api/server/templates/base.html +137 -146
  28. oscura/api/server/templates/export.html +84 -110
  29. oscura/api/server/templates/home.html +248 -267
  30. oscura/api/server/templates/protocols.html +44 -48
  31. oscura/api/server/templates/reports.html +27 -35
  32. oscura/api/server/templates/session_detail.html +68 -78
  33. oscura/api/server/templates/sessions.html +62 -72
  34. oscura/api/server/templates/waveforms.html +54 -64
  35. oscura/automotive/__init__.py +1 -1
  36. oscura/automotive/can/session.py +1 -1
  37. oscura/automotive/dbc/generator.py +638 -23
  38. oscura/automotive/dtc/data.json +102 -17
  39. oscura/automotive/uds/decoder.py +99 -6
  40. oscura/cli/analyze.py +8 -2
  41. oscura/cli/batch.py +36 -5
  42. oscura/cli/characterize.py +18 -4
  43. oscura/cli/export.py +47 -5
  44. oscura/cli/main.py +2 -0
  45. oscura/cli/onboarding/wizard.py +10 -6
  46. oscura/cli/pipeline.py +585 -0
  47. oscura/cli/visualize.py +6 -4
  48. oscura/convenience.py +400 -32
  49. oscura/core/config/loader.py +0 -1
  50. oscura/core/measurement_result.py +286 -0
  51. oscura/core/progress.py +1 -1
  52. oscura/core/schemas/device_mapping.json +8 -2
  53. oscura/core/schemas/packet_format.json +24 -4
  54. oscura/core/schemas/protocol_definition.json +12 -2
  55. oscura/core/types.py +300 -199
  56. oscura/correlation/multi_protocol.py +1 -1
  57. oscura/export/legacy/__init__.py +11 -0
  58. oscura/export/legacy/wav.py +75 -0
  59. oscura/exporters/__init__.py +19 -0
  60. oscura/exporters/wireshark.py +809 -0
  61. oscura/hardware/acquisition/file.py +5 -19
  62. oscura/hardware/acquisition/saleae.py +10 -10
  63. oscura/hardware/acquisition/socketcan.py +4 -6
  64. oscura/hardware/acquisition/synthetic.py +1 -5
  65. oscura/hardware/acquisition/visa.py +6 -6
  66. oscura/hardware/security/side_channel_detector.py +5 -508
  67. oscura/inference/message_format.py +686 -1
  68. oscura/jupyter/display.py +2 -2
  69. oscura/jupyter/magic.py +3 -3
  70. oscura/loaders/__init__.py +17 -12
  71. oscura/loaders/binary.py +1 -1
  72. oscura/loaders/chipwhisperer.py +1 -2
  73. oscura/loaders/configurable.py +1 -1
  74. oscura/loaders/csv_loader.py +2 -2
  75. oscura/loaders/hdf5_loader.py +1 -1
  76. oscura/loaders/lazy.py +6 -1
  77. oscura/loaders/mmap_loader.py +0 -1
  78. oscura/loaders/numpy_loader.py +8 -7
  79. oscura/loaders/preprocessing.py +3 -5
  80. oscura/loaders/rigol.py +21 -7
  81. oscura/loaders/sigrok.py +2 -5
  82. oscura/loaders/tdms.py +3 -2
  83. oscura/loaders/tektronix.py +38 -32
  84. oscura/loaders/tss.py +20 -27
  85. oscura/loaders/vcd.py +13 -8
  86. oscura/loaders/wav.py +1 -6
  87. oscura/pipeline/__init__.py +76 -0
  88. oscura/pipeline/handlers/__init__.py +165 -0
  89. oscura/pipeline/handlers/analyzers.py +1045 -0
  90. oscura/pipeline/handlers/decoders.py +899 -0
  91. oscura/pipeline/handlers/exporters.py +1103 -0
  92. oscura/pipeline/handlers/filters.py +891 -0
  93. oscura/pipeline/handlers/loaders.py +640 -0
  94. oscura/pipeline/handlers/transforms.py +768 -0
  95. oscura/reporting/__init__.py +88 -1
  96. oscura/reporting/automation.py +348 -0
  97. oscura/reporting/citations.py +374 -0
  98. oscura/reporting/core.py +54 -0
  99. oscura/reporting/formatting/__init__.py +11 -0
  100. oscura/reporting/formatting/measurements.py +320 -0
  101. oscura/reporting/html.py +57 -0
  102. oscura/reporting/interpretation.py +431 -0
  103. oscura/reporting/summary.py +329 -0
  104. oscura/reporting/templates/enhanced/protocol_re.html +504 -503
  105. oscura/reporting/visualization.py +542 -0
  106. oscura/side_channel/__init__.py +38 -57
  107. oscura/utils/builders/signal_builder.py +5 -5
  108. oscura/utils/comparison/compare.py +7 -9
  109. oscura/utils/comparison/golden.py +1 -1
  110. oscura/utils/filtering/convenience.py +2 -2
  111. oscura/utils/math/arithmetic.py +38 -62
  112. oscura/utils/math/interpolation.py +20 -20
  113. oscura/utils/pipeline/__init__.py +4 -17
  114. oscura/utils/progressive.py +1 -4
  115. oscura/utils/triggering/edge.py +1 -1
  116. oscura/utils/triggering/pattern.py +2 -2
  117. oscura/utils/triggering/pulse.py +2 -2
  118. oscura/utils/triggering/window.py +3 -3
  119. oscura/validation/hil_testing.py +11 -11
  120. oscura/visualization/__init__.py +47 -284
  121. oscura/visualization/batch.py +160 -0
  122. oscura/visualization/plot.py +542 -53
  123. oscura/visualization/styles.py +184 -318
  124. oscura/workflows/__init__.py +2 -0
  125. oscura/workflows/batch/advanced.py +1 -1
  126. oscura/workflows/batch/aggregate.py +7 -8
  127. oscura/workflows/complete_re.py +251 -23
  128. oscura/workflows/digital.py +27 -4
  129. oscura/workflows/multi_trace.py +136 -17
  130. oscura/workflows/waveform.py +788 -0
  131. {oscura-0.7.0.dist-info → oscura-0.10.0.dist-info}/METADATA +59 -79
  132. {oscura-0.7.0.dist-info → oscura-0.10.0.dist-info}/RECORD +135 -149
  133. oscura/side_channel/dpa.py +0 -1025
  134. oscura/utils/optimization/__init__.py +0 -19
  135. oscura/utils/optimization/parallel.py +0 -443
  136. oscura/utils/optimization/search.py +0 -532
  137. oscura/utils/pipeline/base.py +0 -338
  138. oscura/utils/pipeline/composition.py +0 -248
  139. oscura/utils/pipeline/parallel.py +0 -449
  140. oscura/utils/pipeline/pipeline.py +0 -375
  141. oscura/utils/search/__init__.py +0 -16
  142. oscura/utils/search/anomaly.py +0 -424
  143. oscura/utils/search/context.py +0 -294
  144. oscura/utils/search/pattern.py +0 -288
  145. oscura/utils/storage/__init__.py +0 -61
  146. oscura/utils/storage/database.py +0 -1166
  147. oscura/visualization/accessibility.py +0 -526
  148. oscura/visualization/annotations.py +0 -371
  149. oscura/visualization/axis_scaling.py +0 -305
  150. oscura/visualization/colors.py +0 -451
  151. oscura/visualization/digital.py +0 -436
  152. oscura/visualization/eye.py +0 -571
  153. oscura/visualization/histogram.py +0 -281
  154. oscura/visualization/interactive.py +0 -1035
  155. oscura/visualization/jitter.py +0 -1042
  156. oscura/visualization/keyboard.py +0 -394
  157. oscura/visualization/layout.py +0 -400
  158. oscura/visualization/optimization.py +0 -1079
  159. oscura/visualization/palettes.py +0 -446
  160. oscura/visualization/power.py +0 -508
  161. oscura/visualization/power_extended.py +0 -955
  162. oscura/visualization/presets.py +0 -469
  163. oscura/visualization/protocols.py +0 -1246
  164. oscura/visualization/render.py +0 -223
  165. oscura/visualization/rendering.py +0 -444
  166. oscura/visualization/reverse_engineering.py +0 -838
  167. oscura/visualization/signal_integrity.py +0 -989
  168. oscura/visualization/specialized.py +0 -643
  169. oscura/visualization/spectral.py +0 -1226
  170. oscura/visualization/thumbnails.py +0 -340
  171. oscura/visualization/time_axis.py +0 -351
  172. oscura/visualization/waveform.py +0 -454
  173. {oscura-0.7.0.dist-info → oscura-0.10.0.dist-info}/WHEEL +0 -0
  174. {oscura-0.7.0.dist-info → oscura-0.10.0.dist-info}/entry_points.txt +0 -0
  175. {oscura-0.7.0.dist-info → oscura-0.10.0.dist-info}/licenses/LICENSE +0 -0
@@ -1,223 +0,0 @@
1
- """Visualization rendering functions for DPI-aware output.
2
-
3
- This module provides DPI-aware rendering configuration for adapting
4
- plot quality and parameters based on target output device (screen vs print).
5
-
6
-
7
- Example:
8
- >>> from oscura.visualization.render import configure_dpi_rendering
9
- >>> config = configure_dpi_rendering("publication")
10
- >>> fig = plt.figure(dpi=config['dpi'], figsize=config['figsize'])
11
-
12
- References:
13
- - matplotlib DPI scaling best practices
14
- - Print quality standards (300-600 DPI)
15
- """
16
-
17
- from __future__ import annotations
18
-
19
- from typing import Any, Literal
20
-
21
- RenderPreset = Literal["screen", "print", "publication"]
22
-
23
-
24
- def configure_dpi_rendering(
25
- preset: RenderPreset = "screen",
26
- *,
27
- custom_dpi: int | None = None,
28
- dpi: int | None = None,
29
- figsize: tuple[float, float] = (10, 6),
30
- baseline_dpi: float = 96.0,
31
- ) -> dict[str, Any]:
32
- """Configure DPI-aware rendering parameters.
33
-
34
- Adapts plot rendering quality and parameters based on target DPI
35
- for print (300-600 DPI) versus screen (72-96 DPI) with export presets.
36
-
37
- Args:
38
- preset: Rendering preset ("screen", "print", "publication").
39
- custom_dpi: Custom DPI override (ignores preset).
40
- dpi: Alias for custom_dpi.
41
- figsize: Figure size in inches (width, height).
42
- baseline_dpi: Baseline DPI for scaling calculations (default 96).
43
-
44
- Returns:
45
- Dictionary with rendering configuration:
46
- - dpi: Target DPI
47
- - figsize: Figure size
48
- - font_scale: Font size scale factor
49
- - line_scale: Line width scale factor
50
- - marker_scale: Marker size scale factor
51
- - antialias: Whether to enable anti-aliasing
52
- - format: Recommended file format
53
- - style_params: Additional matplotlib rcParams
54
-
55
- Raises:
56
- ValueError: If preset is invalid.
57
-
58
- Example:
59
- >>> config = configure_dpi_rendering("print")
60
- >>> plt.rcParams.update(config['style_params'])
61
- >>> fig = plt.figure(dpi=config['dpi'], figsize=config['figsize'])
62
-
63
- References:
64
- VIS-017: DPI-Aware Rendering
65
- """
66
- presets = _get_dpi_presets()
67
-
68
- # Handle dpi alias and resolve preset
69
- if dpi is not None and custom_dpi is None:
70
- custom_dpi = dpi
71
-
72
- if preset not in presets and custom_dpi is None:
73
- raise ValueError(f"Invalid preset: {preset}. Must be one of {list(presets.keys())}")
74
-
75
- # Get configuration and DPI
76
- target_dpi, preset_config = _resolve_dpi_config(preset, custom_dpi, presets)
77
-
78
- # Calculate scale factors
79
- scale = target_dpi / baseline_dpi
80
- font_scale = scale
81
- line_scale = scale
82
- marker_scale = scale
83
-
84
- # Build style parameters
85
- style_params = _build_style_params(
86
- target_dpi, font_scale, line_scale, marker_scale, preset_config
87
- )
88
-
89
- _apply_antialias_settings(style_params, preset_config)
90
-
91
- if preset == "publication":
92
- _apply_publication_settings(style_params)
93
-
94
- return {
95
- "dpi": target_dpi,
96
- "figsize": figsize,
97
- "font_scale": font_scale,
98
- "line_scale": line_scale,
99
- "marker_scale": marker_scale,
100
- "antialias": preset_config["antialias"],
101
- "format": preset_config["format"],
102
- "style_params": style_params,
103
- "description": preset_config["description"],
104
- "preset": preset if custom_dpi is None else "custom",
105
- }
106
-
107
-
108
- def _get_dpi_presets() -> dict[str, dict[str, Any]]:
109
- """Get DPI preset configurations."""
110
- return {
111
- "screen": {
112
- "dpi": 96,
113
- "font_family": "sans-serif",
114
- "antialias": True,
115
- "format": "png",
116
- "description": "Screen display (96 DPI)",
117
- },
118
- "print": {
119
- "dpi": 300,
120
- "font_family": "sans-serif",
121
- "antialias": False,
122
- "format": "pdf",
123
- "description": "Print output (300 DPI)",
124
- },
125
- "publication": {
126
- "dpi": 600,
127
- "font_family": "serif",
128
- "antialias": False,
129
- "format": "pdf",
130
- "description": "Publication quality (600 DPI)",
131
- },
132
- }
133
-
134
-
135
- def _resolve_dpi_config(
136
- preset: str, custom_dpi: int | None, presets: dict[str, dict[str, Any]]
137
- ) -> tuple[int, dict[str, Any]]:
138
- """Resolve target DPI and configuration from preset or custom value."""
139
- if custom_dpi is not None:
140
- return custom_dpi, {
141
- "font_family": "sans-serif",
142
- "antialias": True,
143
- "format": "png" if custom_dpi <= 150 else "pdf",
144
- "description": f"Custom ({custom_dpi} DPI)",
145
- }
146
-
147
- preset_config = presets[preset]
148
- return preset_config["dpi"], preset_config
149
-
150
-
151
- def _build_style_params(
152
- target_dpi: int,
153
- font_scale: float,
154
- line_scale: float,
155
- marker_scale: float,
156
- preset_config: dict[str, Any],
157
- ) -> dict[str, Any]:
158
- """Build matplotlib rcParams dictionary."""
159
- return {
160
- "figure.dpi": target_dpi,
161
- "savefig.dpi": target_dpi,
162
- "font.family": preset_config["font_family"],
163
- "font.size": 10 * font_scale,
164
- "axes.titlesize": 12 * font_scale,
165
- "axes.labelsize": 10 * font_scale,
166
- "xtick.labelsize": 9 * font_scale,
167
- "ytick.labelsize": 9 * font_scale,
168
- "legend.fontsize": 9 * font_scale,
169
- "lines.linewidth": 1.0 * line_scale,
170
- "lines.markersize": 6.0 * marker_scale,
171
- "patch.linewidth": 1.0 * line_scale,
172
- "grid.linewidth": 0.5 * line_scale,
173
- "axes.linewidth": 0.8 * line_scale,
174
- "xtick.major.width": 0.8 * line_scale,
175
- "ytick.major.width": 0.8 * line_scale,
176
- "xtick.minor.width": 0.6 * line_scale,
177
- "ytick.minor.width": 0.6 * line_scale,
178
- }
179
-
180
-
181
- def _apply_antialias_settings(style_params: dict[str, Any], preset_config: dict[str, Any]) -> None:
182
- """Apply anti-aliasing settings to style params (modifies in-place)."""
183
- antialias = preset_config["antialias"]
184
- style_params["lines.antialiased"] = antialias
185
- style_params["patch.antialiased"] = antialias
186
- style_params["text.antialiased"] = antialias
187
-
188
-
189
- def _apply_publication_settings(style_params: dict[str, Any]) -> None:
190
- """Apply publication-specific settings to style params (modifies in-place)."""
191
- style_params["font.family"] = "serif"
192
- style_params["mathtext.fontset"] = "cm" # Computer Modern for LaTeX
193
- style_params["axes.grid"] = True
194
- style_params["grid.alpha"] = 0.3
195
- style_params["axes.axisbelow"] = True
196
-
197
-
198
- def apply_rendering_config(config: dict[str, Any]) -> None:
199
- """Apply rendering configuration to matplotlib rcParams.
200
-
201
- Args:
202
- config: Configuration dictionary from configure_dpi_rendering().
203
-
204
- Raises:
205
- ImportError: If matplotlib is not available.
206
-
207
- Example:
208
- >>> config = configure_dpi_rendering("print")
209
- >>> apply_rendering_config(config)
210
- """
211
- try:
212
- import matplotlib.pyplot as plt
213
-
214
- plt.rcParams.update(config["style_params"])
215
- except ImportError:
216
- raise ImportError("matplotlib is required for rendering configuration")
217
-
218
-
219
- __all__ = [
220
- "RenderPreset",
221
- "apply_rendering_config",
222
- "configure_dpi_rendering",
223
- ]
@@ -1,444 +0,0 @@
1
- """Rendering optimization for large datasets and streaming updates.
2
-
3
- This module provides level-of-detail rendering, progressive rendering,
4
- and memory-efficient plot updates for high-performance visualization.
5
-
6
-
7
- Example:
8
- >>> from oscura.visualization.rendering import render_with_lod
9
- >>> time_lod, data_lod = render_with_lod(time, data, screen_width=1920)
10
-
11
- References:
12
- - Level-of-detail (LOD) rendering techniques
13
- - Min-max envelope for waveform rendering
14
- - Progressive rendering algorithms
15
- - Streaming data visualization
16
- """
17
-
18
- from __future__ import annotations
19
-
20
- from typing import TYPE_CHECKING, Literal
21
-
22
- import numpy as np
23
-
24
- if TYPE_CHECKING:
25
- from numpy.typing import NDArray
26
-
27
-
28
- def render_with_lod(
29
- time: NDArray[np.float64],
30
- data: NDArray[np.float64],
31
- *,
32
- screen_width: int = 1920,
33
- samples_per_pixel: float = 2.0,
34
- max_points: int = 100_000,
35
- method: Literal["minmax", "lttb", "uniform"] = "minmax",
36
- ) -> tuple[NDArray[np.float64], NDArray[np.float64]]:
37
- """Render signal with level-of-detail decimation./VIS-019.
38
-
39
- Reduces number of points while preserving visual appearance using
40
- intelligent downsampling. Target: <100k points at any zoom level.
41
-
42
- Args:
43
- time: Time array.
44
- data: Signal data array.
45
- screen_width: Screen width in pixels.
46
- samples_per_pixel: Target samples per pixel (2.0 recommended).
47
- max_points: Maximum points to render (default: 100k).
48
- method: Decimation method ("minmax", "lttb", "uniform").
49
-
50
- Returns:
51
- Tuple of (decimated_time, decimated_data).
52
-
53
- Raises:
54
- ValueError: If arrays are invalid or method unknown.
55
-
56
- Example:
57
- >>> # 1M sample signal decimated for 1920px display
58
- >>> time_lod, data_lod = render_with_lod(time, data, screen_width=1920)
59
- >>> print(len(data_lod)) # ~3840 samples (2 per pixel)
60
-
61
- References:
62
- VIS-017: Performance - LOD Rendering
63
- VIS-019: Memory-Efficient Plot Rendering
64
- """
65
- if len(time) == 0 or len(data) == 0:
66
- raise ValueError("Time or data array is empty")
67
-
68
- if len(time) != len(data):
69
- raise ValueError(f"Time and data length mismatch: {len(time)} vs {len(data)}")
70
-
71
- # Calculate target point count
72
- target_points = min(
73
- int(screen_width * samples_per_pixel),
74
- max_points,
75
- )
76
-
77
- # Skip decimation if already below target
78
- if len(data) <= target_points:
79
- return (time, data)
80
-
81
- # Apply decimation
82
- if method == "uniform":
83
- return _decimate_uniform(time, data, target_points)
84
- elif method == "minmax":
85
- return _decimate_minmax_envelope(time, data, target_points)
86
- elif method == "lttb":
87
- return _decimate_lttb(time, data, target_points)
88
- else:
89
- raise ValueError(f"Unknown decimation method: {method}")
90
-
91
-
92
- def _decimate_uniform(
93
- time: NDArray[np.float64],
94
- data: NDArray[np.float64],
95
- target_points: int,
96
- ) -> tuple[NDArray[np.float64], NDArray[np.float64]]:
97
- """Uniform stride decimation (simple but loses peaks).
98
-
99
- Args:
100
- time: Time array.
101
- data: Signal data array.
102
- target_points: Target number of points after decimation.
103
-
104
- Returns:
105
- Tuple of (decimated_time, decimated_data).
106
- """
107
- stride = len(data) // target_points
108
- stride = max(stride, 1)
109
-
110
- indices = np.arange(0, len(data), stride)[:target_points]
111
- return (time[indices], data[indices])
112
-
113
-
114
- def _decimate_minmax_envelope(
115
- time: NDArray[np.float64],
116
- data: NDArray[np.float64],
117
- target_points: int,
118
- ) -> tuple[NDArray[np.float64], NDArray[np.float64]]:
119
- """Min-max envelope decimation - preserves peaks and valleys.
120
-
121
- This method ensures all signal extrema are preserved in the decimated view.
122
-
123
- Args:
124
- time: Time array.
125
- data: Signal data array.
126
- target_points: Target number of points after decimation.
127
-
128
- Returns:
129
- Tuple of (decimated_time, decimated_data).
130
- """
131
- # Calculate bucket size (each bucket contributes 2 points: min and max)
132
- bucket_size = len(data) // (target_points // 2)
133
-
134
- if bucket_size < 1:
135
- return (time, data)
136
-
137
- decimated_time = []
138
- decimated_data = []
139
-
140
- for i in range(0, len(data), bucket_size):
141
- bucket_data = data[i : i + bucket_size]
142
- bucket_time = time[i : i + bucket_size]
143
-
144
- if len(bucket_data) == 0:
145
- continue
146
-
147
- # Find min and max in bucket
148
- min_idx = np.argmin(bucket_data)
149
- max_idx = np.argmax(bucket_data)
150
-
151
- # Add in chronological order
152
- if min_idx < max_idx:
153
- decimated_time.extend([bucket_time[min_idx], bucket_time[max_idx]])
154
- decimated_data.extend([bucket_data[min_idx], bucket_data[max_idx]])
155
- else:
156
- decimated_time.extend([bucket_time[max_idx], bucket_time[min_idx]])
157
- decimated_data.extend([bucket_data[max_idx], bucket_data[min_idx]])
158
-
159
- return (np.array(decimated_time), np.array(decimated_data))
160
-
161
-
162
- def _decimate_lttb(
163
- time: NDArray[np.float64],
164
- data: NDArray[np.float64],
165
- target_points: int,
166
- ) -> tuple[NDArray[np.float64], NDArray[np.float64]]:
167
- """Largest Triangle Three Buckets decimation.
168
-
169
- Preserves visual shape by maximizing triangle areas.
170
-
171
- Args:
172
- time: Time array.
173
- data: Signal data array.
174
- target_points: Target number of points after decimation.
175
-
176
- Returns:
177
- Tuple of (decimated_time, decimated_data).
178
- """
179
- if len(data) <= target_points:
180
- return (time, data)
181
-
182
- # Always include first and last points
183
- sampled_time = [time[0]]
184
- sampled_data = [data[0]]
185
-
186
- bucket_size = (len(data) - 2) / (target_points - 2)
187
-
188
- prev_idx = 0
189
-
190
- for i in range(target_points - 2):
191
- # Average point of next bucket
192
- avg_range_start = int((i + 1) * bucket_size) + 1
193
- avg_range_end = int((i + 2) * bucket_size) + 1
194
- avg_range_end = min(avg_range_end, len(data))
195
-
196
- if avg_range_start < avg_range_end:
197
- avg_time = np.mean(time[avg_range_start:avg_range_end])
198
- avg_data = np.mean(data[avg_range_start:avg_range_end])
199
- else:
200
- avg_time = time[-1]
201
- avg_data = data[-1]
202
-
203
- # Current bucket range
204
- range_start = int(i * bucket_size) + 1
205
- range_end = int((i + 1) * bucket_size) + 1
206
- range_end = min(range_end, len(data) - 1)
207
-
208
- # Find point in bucket that forms largest triangle
209
- max_area = -1.0
210
- max_idx = range_start
211
-
212
- for idx in range(range_start, range_end):
213
- # Calculate triangle area
214
- area = abs(
215
- (time[prev_idx] - avg_time) * (data[idx] - data[prev_idx])
216
- - (time[prev_idx] - time[idx]) * (avg_data - data[prev_idx])
217
- )
218
-
219
- if area > max_area:
220
- max_area = area
221
- max_idx = idx
222
-
223
- sampled_time.append(time[max_idx])
224
- sampled_data.append(data[max_idx])
225
- prev_idx = max_idx
226
-
227
- # Always include last point
228
- sampled_time.append(time[-1])
229
- sampled_data.append(data[-1])
230
-
231
- return (np.array(sampled_time), np.array(sampled_data))
232
-
233
-
234
- def progressive_render(
235
- time: NDArray[np.float64],
236
- data: NDArray[np.float64],
237
- *,
238
- viewport: tuple[float, float] | None = None,
239
- priority: Literal["viewport", "full"] = "viewport",
240
- ) -> tuple[NDArray[np.float64], NDArray[np.float64]]:
241
- """Progressive rendering - render visible viewport first.
242
-
243
- Args:
244
- time: Time array.
245
- data: Signal data array.
246
- viewport: Visible viewport (t_min, t_max). None = full range.
247
- priority: Rendering priority ("viewport" = visible first, "full" = all data).
248
-
249
- Returns:
250
- Tuple of (time, data) for priority rendering.
251
-
252
- Example:
253
- >>> # Render only visible portion for fast initial display
254
- >>> time_vis, data_vis = progressive_render(
255
- ... time, data, viewport=(0, 0.001), priority="viewport"
256
- ... )
257
-
258
- References:
259
- VIS-019: Memory-Efficient Plot Rendering (progressive rendering)
260
- """
261
- if viewport is None or priority == "full":
262
- return (time, data)
263
-
264
- t_min, t_max = viewport
265
-
266
- # Find indices within viewport
267
- mask = (time >= t_min) & (time <= t_max)
268
- indices = np.where(mask)[0]
269
-
270
- if len(indices) == 0:
271
- # Viewport is outside data range
272
- return (time, data)
273
-
274
- # Return viewport data first
275
- viewport_time = time[indices]
276
- viewport_data = data[indices]
277
-
278
- return (viewport_time, viewport_data)
279
-
280
-
281
- def estimate_memory_usage(
282
- n_samples: int,
283
- n_channels: int = 1,
284
- dtype: type = np.float64,
285
- ) -> float:
286
- """Estimate memory usage for plot rendering.
287
-
288
- Args:
289
- n_samples: Number of samples per channel.
290
- n_channels: Number of channels.
291
- dtype: Data type for arrays.
292
-
293
- Returns:
294
- Estimated memory usage in MB.
295
-
296
- Example:
297
- >>> mem_mb = estimate_memory_usage(1_000_000, n_channels=4)
298
- >>> print(f"Memory: {mem_mb:.1f} MB")
299
-
300
- References:
301
- VIS-019: Memory-Efficient Plot Rendering
302
- """
303
- # Bytes per sample
304
- if dtype == np.float64:
305
- bytes_per_sample = 8
306
- elif dtype == np.float32 or dtype == np.int32:
307
- bytes_per_sample = 4
308
- elif dtype == np.int16:
309
- bytes_per_sample = 2
310
- else:
311
- bytes_per_sample = 8 # Default
312
-
313
- # Total memory: time + data arrays per channel
314
- # Time array: n_samples * bytes_per_sample
315
- # Data arrays: n_channels * n_samples * bytes_per_sample
316
- total_bytes = (1 + n_channels) * n_samples * bytes_per_sample
317
-
318
- # Convert to MB
319
- return total_bytes / (1024 * 1024)
320
-
321
-
322
- def downsample_for_memory(
323
- time: NDArray[np.float64],
324
- data: NDArray[np.float64],
325
- *,
326
- target_memory_mb: float = 50.0,
327
- ) -> tuple[NDArray[np.float64], NDArray[np.float64]]:
328
- """Downsample signal to meet memory target.
329
-
330
- Args:
331
- time: Time array.
332
- data: Signal data array.
333
- target_memory_mb: Target memory usage in MB.
334
-
335
- Returns:
336
- Tuple of (decimated_time, decimated_data).
337
-
338
- Example:
339
- >>> # Reduce 100MB dataset to 50MB
340
- >>> time_ds, data_ds = downsample_for_memory(time, data, target_memory_mb=50.0)
341
-
342
- References:
343
- VIS-019: Memory-Efficient Plot Rendering (memory target <50MB per subplot)
344
- """
345
- current_memory = estimate_memory_usage(len(data), n_channels=1)
346
-
347
- if current_memory <= target_memory_mb:
348
- # Already within target
349
- return (time, data)
350
-
351
- # Calculate required decimation factor
352
- decimation_factor = current_memory / target_memory_mb
353
- target_samples = int(len(data) / decimation_factor)
354
-
355
- # Use min-max to preserve features
356
- return _decimate_minmax_envelope(time, data, target_samples)
357
-
358
-
359
- class StreamingRenderer:
360
- """Streaming plot renderer for real-time data updates.
361
-
362
- Handles incremental data updates without full redraws for performance.
363
-
364
- Example:
365
- >>> renderer = StreamingRenderer(max_samples=10000)
366
- >>> renderer.append(new_time, new_data)
367
- >>> time, data = renderer.get_render_data()
368
-
369
- References:
370
- VIS-018: Streaming Plot Updates
371
- """
372
-
373
- def __init__(
374
- self,
375
- *,
376
- max_samples: int = 10_000,
377
- decimation_method: Literal["minmax", "lttb", "uniform"] = "minmax",
378
- ):
379
- """Initialize streaming renderer.
380
-
381
- Args:
382
- max_samples: Maximum samples to keep in buffer.
383
- decimation_method: Decimation method for buffer management.
384
- """
385
- self.max_samples = max_samples
386
- self.decimation_method = decimation_method
387
-
388
- self._time: list[float] = []
389
- self._data: list[float] = []
390
-
391
- def append(
392
- self,
393
- time: NDArray[np.float64],
394
- data: NDArray[np.float64],
395
- ) -> None:
396
- """Append new data to streaming buffer.
397
-
398
- Args:
399
- time: New time samples.
400
- data: New data samples.
401
- """
402
- self._time.extend(time.tolist())
403
- self._data.extend(data.tolist())
404
-
405
- # Decimate if buffer exceeds limit
406
- if len(self._data) > self.max_samples:
407
- self._decimate_buffer()
408
-
409
- def _decimate_buffer(self) -> None:
410
- """Decimate internal buffer to max_samples."""
411
- time_arr = np.array(self._time)
412
- data_arr = np.array(self._data)
413
-
414
- time_dec, data_dec = render_with_lod(
415
- time_arr,
416
- data_arr,
417
- max_points=self.max_samples,
418
- method=self.decimation_method,
419
- )
420
-
421
- self._time = time_dec.tolist()
422
- self._data = data_dec.tolist()
423
-
424
- def get_render_data(self) -> tuple[NDArray[np.float64], NDArray[np.float64]]:
425
- """Get current data for rendering.
426
-
427
- Returns:
428
- Tuple of (time, data) arrays.
429
- """
430
- return (np.array(self._time), np.array(self._data))
431
-
432
- def clear(self) -> None:
433
- """Clear streaming buffer."""
434
- self._time.clear()
435
- self._data.clear()
436
-
437
-
438
- __all__ = [
439
- "StreamingRenderer",
440
- "downsample_for_memory",
441
- "estimate_memory_usage",
442
- "progressive_render",
443
- "render_with_lod",
444
- ]