oscura 0.8.0__py3-none-any.whl → 0.11.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (161) hide show
  1. oscura/__init__.py +19 -19
  2. oscura/__main__.py +4 -0
  3. oscura/analyzers/__init__.py +2 -0
  4. oscura/analyzers/digital/extraction.py +2 -3
  5. oscura/analyzers/digital/quality.py +1 -1
  6. oscura/analyzers/digital/timing.py +1 -1
  7. oscura/analyzers/ml/signal_classifier.py +6 -0
  8. oscura/analyzers/patterns/__init__.py +66 -0
  9. oscura/analyzers/power/basic.py +3 -3
  10. oscura/analyzers/power/soa.py +1 -1
  11. oscura/analyzers/power/switching.py +3 -3
  12. oscura/analyzers/signal_classification.py +529 -0
  13. oscura/analyzers/signal_integrity/sparams.py +3 -3
  14. oscura/analyzers/statistics/basic.py +10 -7
  15. oscura/analyzers/validation.py +1 -1
  16. oscura/analyzers/waveform/measurements.py +200 -156
  17. oscura/analyzers/waveform/measurements_with_uncertainty.py +91 -35
  18. oscura/analyzers/waveform/spectral.py +182 -84
  19. oscura/api/dsl/commands.py +15 -6
  20. oscura/api/server/templates/base.html +137 -146
  21. oscura/api/server/templates/export.html +84 -110
  22. oscura/api/server/templates/home.html +248 -267
  23. oscura/api/server/templates/protocols.html +44 -48
  24. oscura/api/server/templates/reports.html +27 -35
  25. oscura/api/server/templates/session_detail.html +68 -78
  26. oscura/api/server/templates/sessions.html +62 -72
  27. oscura/api/server/templates/waveforms.html +54 -64
  28. oscura/automotive/__init__.py +1 -1
  29. oscura/automotive/can/session.py +1 -1
  30. oscura/automotive/dbc/generator.py +638 -23
  31. oscura/automotive/dtc/data.json +17 -102
  32. oscura/automotive/flexray/fibex.py +9 -1
  33. oscura/automotive/uds/decoder.py +99 -6
  34. oscura/cli/analyze.py +8 -2
  35. oscura/cli/batch.py +36 -5
  36. oscura/cli/characterize.py +18 -4
  37. oscura/cli/export.py +47 -5
  38. oscura/cli/main.py +2 -0
  39. oscura/cli/onboarding/wizard.py +10 -6
  40. oscura/cli/pipeline.py +585 -0
  41. oscura/cli/visualize.py +6 -4
  42. oscura/convenience.py +400 -32
  43. oscura/core/measurement_result.py +286 -0
  44. oscura/core/progress.py +1 -1
  45. oscura/core/schemas/device_mapping.json +2 -8
  46. oscura/core/schemas/packet_format.json +4 -24
  47. oscura/core/schemas/protocol_definition.json +2 -12
  48. oscura/core/types.py +232 -239
  49. oscura/correlation/multi_protocol.py +1 -1
  50. oscura/export/legacy/__init__.py +11 -0
  51. oscura/export/legacy/wav.py +75 -0
  52. oscura/exporters/__init__.py +19 -0
  53. oscura/exporters/wireshark.py +809 -0
  54. oscura/hardware/acquisition/file.py +5 -19
  55. oscura/hardware/acquisition/saleae.py +10 -10
  56. oscura/hardware/acquisition/socketcan.py +4 -6
  57. oscura/hardware/acquisition/synthetic.py +1 -5
  58. oscura/hardware/acquisition/visa.py +6 -6
  59. oscura/hardware/security/side_channel_detector.py +5 -508
  60. oscura/inference/message_format.py +686 -1
  61. oscura/jupyter/display.py +2 -2
  62. oscura/jupyter/magic.py +3 -3
  63. oscura/loaders/__init__.py +17 -12
  64. oscura/loaders/binary.py +1 -1
  65. oscura/loaders/chipwhisperer.py +1 -2
  66. oscura/loaders/configurable.py +1 -1
  67. oscura/loaders/csv_loader.py +2 -2
  68. oscura/loaders/hdf5_loader.py +1 -1
  69. oscura/loaders/lazy.py +6 -1
  70. oscura/loaders/mmap_loader.py +0 -1
  71. oscura/loaders/numpy_loader.py +8 -7
  72. oscura/loaders/preprocessing.py +3 -5
  73. oscura/loaders/rigol.py +21 -7
  74. oscura/loaders/sigrok.py +2 -5
  75. oscura/loaders/tdms.py +3 -2
  76. oscura/loaders/tektronix.py +38 -32
  77. oscura/loaders/tss.py +20 -27
  78. oscura/loaders/validation.py +17 -10
  79. oscura/loaders/vcd.py +13 -8
  80. oscura/loaders/wav.py +1 -6
  81. oscura/pipeline/__init__.py +76 -0
  82. oscura/pipeline/handlers/__init__.py +165 -0
  83. oscura/pipeline/handlers/analyzers.py +1045 -0
  84. oscura/pipeline/handlers/decoders.py +899 -0
  85. oscura/pipeline/handlers/exporters.py +1103 -0
  86. oscura/pipeline/handlers/filters.py +891 -0
  87. oscura/pipeline/handlers/loaders.py +640 -0
  88. oscura/pipeline/handlers/transforms.py +768 -0
  89. oscura/reporting/formatting/measurements.py +55 -14
  90. oscura/reporting/templates/enhanced/protocol_re.html +504 -503
  91. oscura/sessions/legacy.py +49 -1
  92. oscura/side_channel/__init__.py +38 -57
  93. oscura/utils/builders/signal_builder.py +5 -5
  94. oscura/utils/comparison/compare.py +7 -9
  95. oscura/utils/comparison/golden.py +1 -1
  96. oscura/utils/filtering/convenience.py +2 -2
  97. oscura/utils/math/arithmetic.py +38 -62
  98. oscura/utils/math/interpolation.py +20 -20
  99. oscura/utils/pipeline/__init__.py +4 -17
  100. oscura/utils/progressive.py +1 -4
  101. oscura/utils/triggering/edge.py +1 -1
  102. oscura/utils/triggering/pattern.py +2 -2
  103. oscura/utils/triggering/pulse.py +2 -2
  104. oscura/utils/triggering/window.py +3 -3
  105. oscura/validation/hil_testing.py +11 -11
  106. oscura/visualization/__init__.py +46 -284
  107. oscura/visualization/batch.py +72 -433
  108. oscura/visualization/plot.py +542 -53
  109. oscura/visualization/styles.py +184 -318
  110. oscura/workflows/batch/advanced.py +1 -1
  111. oscura/workflows/batch/aggregate.py +12 -9
  112. oscura/workflows/complete_re.py +251 -23
  113. oscura/workflows/digital.py +27 -4
  114. oscura/workflows/multi_trace.py +136 -17
  115. oscura/workflows/waveform.py +11 -6
  116. oscura-0.11.0.dist-info/METADATA +460 -0
  117. {oscura-0.8.0.dist-info → oscura-0.11.0.dist-info}/RECORD +120 -145
  118. oscura/side_channel/dpa.py +0 -1025
  119. oscura/utils/optimization/__init__.py +0 -19
  120. oscura/utils/optimization/parallel.py +0 -443
  121. oscura/utils/optimization/search.py +0 -532
  122. oscura/utils/pipeline/base.py +0 -338
  123. oscura/utils/pipeline/composition.py +0 -248
  124. oscura/utils/pipeline/parallel.py +0 -449
  125. oscura/utils/pipeline/pipeline.py +0 -375
  126. oscura/utils/search/__init__.py +0 -16
  127. oscura/utils/search/anomaly.py +0 -424
  128. oscura/utils/search/context.py +0 -294
  129. oscura/utils/search/pattern.py +0 -288
  130. oscura/utils/storage/__init__.py +0 -61
  131. oscura/utils/storage/database.py +0 -1166
  132. oscura/visualization/accessibility.py +0 -526
  133. oscura/visualization/annotations.py +0 -371
  134. oscura/visualization/axis_scaling.py +0 -305
  135. oscura/visualization/colors.py +0 -451
  136. oscura/visualization/digital.py +0 -436
  137. oscura/visualization/eye.py +0 -571
  138. oscura/visualization/histogram.py +0 -281
  139. oscura/visualization/interactive.py +0 -1035
  140. oscura/visualization/jitter.py +0 -1042
  141. oscura/visualization/keyboard.py +0 -394
  142. oscura/visualization/layout.py +0 -400
  143. oscura/visualization/optimization.py +0 -1079
  144. oscura/visualization/palettes.py +0 -446
  145. oscura/visualization/power.py +0 -508
  146. oscura/visualization/power_extended.py +0 -955
  147. oscura/visualization/presets.py +0 -469
  148. oscura/visualization/protocols.py +0 -1246
  149. oscura/visualization/render.py +0 -223
  150. oscura/visualization/rendering.py +0 -444
  151. oscura/visualization/reverse_engineering.py +0 -838
  152. oscura/visualization/signal_integrity.py +0 -989
  153. oscura/visualization/specialized.py +0 -643
  154. oscura/visualization/spectral.py +0 -1226
  155. oscura/visualization/thumbnails.py +0 -340
  156. oscura/visualization/time_axis.py +0 -351
  157. oscura/visualization/waveform.py +0 -454
  158. oscura-0.8.0.dist-info/METADATA +0 -661
  159. {oscura-0.8.0.dist-info → oscura-0.11.0.dist-info}/WHEEL +0 -0
  160. {oscura-0.8.0.dist-info → oscura-0.11.0.dist-info}/entry_points.txt +0 -0
  161. {oscura-0.8.0.dist-info → oscura-0.11.0.dist-info}/licenses/LICENSE +0 -0
@@ -1,223 +0,0 @@
1
- """Visualization rendering functions for DPI-aware output.
2
-
3
- This module provides DPI-aware rendering configuration for adapting
4
- plot quality and parameters based on target output device (screen vs print).
5
-
6
-
7
- Example:
8
- >>> from oscura.visualization.render import configure_dpi_rendering
9
- >>> config = configure_dpi_rendering("publication")
10
- >>> fig = plt.figure(dpi=config['dpi'], figsize=config['figsize'])
11
-
12
- References:
13
- - matplotlib DPI scaling best practices
14
- - Print quality standards (300-600 DPI)
15
- """
16
-
17
- from __future__ import annotations
18
-
19
- from typing import Any, Literal
20
-
21
- RenderPreset = Literal["screen", "print", "publication"]
22
-
23
-
24
- def configure_dpi_rendering(
25
- preset: RenderPreset = "screen",
26
- *,
27
- custom_dpi: int | None = None,
28
- dpi: int | None = None,
29
- figsize: tuple[float, float] = (10, 6),
30
- baseline_dpi: float = 96.0,
31
- ) -> dict[str, Any]:
32
- """Configure DPI-aware rendering parameters.
33
-
34
- Adapts plot rendering quality and parameters based on target DPI
35
- for print (300-600 DPI) versus screen (72-96 DPI) with export presets.
36
-
37
- Args:
38
- preset: Rendering preset ("screen", "print", "publication").
39
- custom_dpi: Custom DPI override (ignores preset).
40
- dpi: Alias for custom_dpi.
41
- figsize: Figure size in inches (width, height).
42
- baseline_dpi: Baseline DPI for scaling calculations (default 96).
43
-
44
- Returns:
45
- Dictionary with rendering configuration:
46
- - dpi: Target DPI
47
- - figsize: Figure size
48
- - font_scale: Font size scale factor
49
- - line_scale: Line width scale factor
50
- - marker_scale: Marker size scale factor
51
- - antialias: Whether to enable anti-aliasing
52
- - format: Recommended file format
53
- - style_params: Additional matplotlib rcParams
54
-
55
- Raises:
56
- ValueError: If preset is invalid.
57
-
58
- Example:
59
- >>> config = configure_dpi_rendering("print")
60
- >>> plt.rcParams.update(config['style_params'])
61
- >>> fig = plt.figure(dpi=config['dpi'], figsize=config['figsize'])
62
-
63
- References:
64
- VIS-017: DPI-Aware Rendering
65
- """
66
- presets = _get_dpi_presets()
67
-
68
- # Handle dpi alias and resolve preset
69
- if dpi is not None and custom_dpi is None:
70
- custom_dpi = dpi
71
-
72
- if preset not in presets and custom_dpi is None:
73
- raise ValueError(f"Invalid preset: {preset}. Must be one of {list(presets.keys())}")
74
-
75
- # Get configuration and DPI
76
- target_dpi, preset_config = _resolve_dpi_config(preset, custom_dpi, presets)
77
-
78
- # Calculate scale factors
79
- scale = target_dpi / baseline_dpi
80
- font_scale = scale
81
- line_scale = scale
82
- marker_scale = scale
83
-
84
- # Build style parameters
85
- style_params = _build_style_params(
86
- target_dpi, font_scale, line_scale, marker_scale, preset_config
87
- )
88
-
89
- _apply_antialias_settings(style_params, preset_config)
90
-
91
- if preset == "publication":
92
- _apply_publication_settings(style_params)
93
-
94
- return {
95
- "dpi": target_dpi,
96
- "figsize": figsize,
97
- "font_scale": font_scale,
98
- "line_scale": line_scale,
99
- "marker_scale": marker_scale,
100
- "antialias": preset_config["antialias"],
101
- "format": preset_config["format"],
102
- "style_params": style_params,
103
- "description": preset_config["description"],
104
- "preset": preset if custom_dpi is None else "custom",
105
- }
106
-
107
-
108
- def _get_dpi_presets() -> dict[str, dict[str, Any]]:
109
- """Get DPI preset configurations."""
110
- return {
111
- "screen": {
112
- "dpi": 96,
113
- "font_family": "sans-serif",
114
- "antialias": True,
115
- "format": "png",
116
- "description": "Screen display (96 DPI)",
117
- },
118
- "print": {
119
- "dpi": 300,
120
- "font_family": "sans-serif",
121
- "antialias": False,
122
- "format": "pdf",
123
- "description": "Print output (300 DPI)",
124
- },
125
- "publication": {
126
- "dpi": 600,
127
- "font_family": "serif",
128
- "antialias": False,
129
- "format": "pdf",
130
- "description": "Publication quality (600 DPI)",
131
- },
132
- }
133
-
134
-
135
- def _resolve_dpi_config(
136
- preset: str, custom_dpi: int | None, presets: dict[str, dict[str, Any]]
137
- ) -> tuple[int, dict[str, Any]]:
138
- """Resolve target DPI and configuration from preset or custom value."""
139
- if custom_dpi is not None:
140
- return custom_dpi, {
141
- "font_family": "sans-serif",
142
- "antialias": True,
143
- "format": "png" if custom_dpi <= 150 else "pdf",
144
- "description": f"Custom ({custom_dpi} DPI)",
145
- }
146
-
147
- preset_config = presets[preset]
148
- return preset_config["dpi"], preset_config
149
-
150
-
151
- def _build_style_params(
152
- target_dpi: int,
153
- font_scale: float,
154
- line_scale: float,
155
- marker_scale: float,
156
- preset_config: dict[str, Any],
157
- ) -> dict[str, Any]:
158
- """Build matplotlib rcParams dictionary."""
159
- return {
160
- "figure.dpi": target_dpi,
161
- "savefig.dpi": target_dpi,
162
- "font.family": preset_config["font_family"],
163
- "font.size": 10 * font_scale,
164
- "axes.titlesize": 12 * font_scale,
165
- "axes.labelsize": 10 * font_scale,
166
- "xtick.labelsize": 9 * font_scale,
167
- "ytick.labelsize": 9 * font_scale,
168
- "legend.fontsize": 9 * font_scale,
169
- "lines.linewidth": 1.0 * line_scale,
170
- "lines.markersize": 6.0 * marker_scale,
171
- "patch.linewidth": 1.0 * line_scale,
172
- "grid.linewidth": 0.5 * line_scale,
173
- "axes.linewidth": 0.8 * line_scale,
174
- "xtick.major.width": 0.8 * line_scale,
175
- "ytick.major.width": 0.8 * line_scale,
176
- "xtick.minor.width": 0.6 * line_scale,
177
- "ytick.minor.width": 0.6 * line_scale,
178
- }
179
-
180
-
181
- def _apply_antialias_settings(style_params: dict[str, Any], preset_config: dict[str, Any]) -> None:
182
- """Apply anti-aliasing settings to style params (modifies in-place)."""
183
- antialias = preset_config["antialias"]
184
- style_params["lines.antialiased"] = antialias
185
- style_params["patch.antialiased"] = antialias
186
- style_params["text.antialiased"] = antialias
187
-
188
-
189
- def _apply_publication_settings(style_params: dict[str, Any]) -> None:
190
- """Apply publication-specific settings to style params (modifies in-place)."""
191
- style_params["font.family"] = "serif"
192
- style_params["mathtext.fontset"] = "cm" # Computer Modern for LaTeX
193
- style_params["axes.grid"] = True
194
- style_params["grid.alpha"] = 0.3
195
- style_params["axes.axisbelow"] = True
196
-
197
-
198
- def apply_rendering_config(config: dict[str, Any]) -> None:
199
- """Apply rendering configuration to matplotlib rcParams.
200
-
201
- Args:
202
- config: Configuration dictionary from configure_dpi_rendering().
203
-
204
- Raises:
205
- ImportError: If matplotlib is not available.
206
-
207
- Example:
208
- >>> config = configure_dpi_rendering("print")
209
- >>> apply_rendering_config(config)
210
- """
211
- try:
212
- import matplotlib.pyplot as plt
213
-
214
- plt.rcParams.update(config["style_params"])
215
- except ImportError:
216
- raise ImportError("matplotlib is required for rendering configuration")
217
-
218
-
219
- __all__ = [
220
- "RenderPreset",
221
- "apply_rendering_config",
222
- "configure_dpi_rendering",
223
- ]
@@ -1,444 +0,0 @@
1
- """Rendering optimization for large datasets and streaming updates.
2
-
3
- This module provides level-of-detail rendering, progressive rendering,
4
- and memory-efficient plot updates for high-performance visualization.
5
-
6
-
7
- Example:
8
- >>> from oscura.visualization.rendering import render_with_lod
9
- >>> time_lod, data_lod = render_with_lod(time, data, screen_width=1920)
10
-
11
- References:
12
- - Level-of-detail (LOD) rendering techniques
13
- - Min-max envelope for waveform rendering
14
- - Progressive rendering algorithms
15
- - Streaming data visualization
16
- """
17
-
18
- from __future__ import annotations
19
-
20
- from typing import TYPE_CHECKING, Literal
21
-
22
- import numpy as np
23
-
24
- if TYPE_CHECKING:
25
- from numpy.typing import NDArray
26
-
27
-
28
- def render_with_lod(
29
- time: NDArray[np.float64],
30
- data: NDArray[np.float64],
31
- *,
32
- screen_width: int = 1920,
33
- samples_per_pixel: float = 2.0,
34
- max_points: int = 100_000,
35
- method: Literal["minmax", "lttb", "uniform"] = "minmax",
36
- ) -> tuple[NDArray[np.float64], NDArray[np.float64]]:
37
- """Render signal with level-of-detail decimation./VIS-019.
38
-
39
- Reduces number of points while preserving visual appearance using
40
- intelligent downsampling. Target: <100k points at any zoom level.
41
-
42
- Args:
43
- time: Time array.
44
- data: Signal data array.
45
- screen_width: Screen width in pixels.
46
- samples_per_pixel: Target samples per pixel (2.0 recommended).
47
- max_points: Maximum points to render (default: 100k).
48
- method: Decimation method ("minmax", "lttb", "uniform").
49
-
50
- Returns:
51
- Tuple of (decimated_time, decimated_data).
52
-
53
- Raises:
54
- ValueError: If arrays are invalid or method unknown.
55
-
56
- Example:
57
- >>> # 1M sample signal decimated for 1920px display
58
- >>> time_lod, data_lod = render_with_lod(time, data, screen_width=1920)
59
- >>> print(len(data_lod)) # ~3840 samples (2 per pixel)
60
-
61
- References:
62
- VIS-017: Performance - LOD Rendering
63
- VIS-019: Memory-Efficient Plot Rendering
64
- """
65
- if len(time) == 0 or len(data) == 0:
66
- raise ValueError("Time or data array is empty")
67
-
68
- if len(time) != len(data):
69
- raise ValueError(f"Time and data length mismatch: {len(time)} vs {len(data)}")
70
-
71
- # Calculate target point count
72
- target_points = min(
73
- int(screen_width * samples_per_pixel),
74
- max_points,
75
- )
76
-
77
- # Skip decimation if already below target
78
- if len(data) <= target_points:
79
- return (time, data)
80
-
81
- # Apply decimation
82
- if method == "uniform":
83
- return _decimate_uniform(time, data, target_points)
84
- elif method == "minmax":
85
- return _decimate_minmax_envelope(time, data, target_points)
86
- elif method == "lttb":
87
- return _decimate_lttb(time, data, target_points)
88
- else:
89
- raise ValueError(f"Unknown decimation method: {method}")
90
-
91
-
92
- def _decimate_uniform(
93
- time: NDArray[np.float64],
94
- data: NDArray[np.float64],
95
- target_points: int,
96
- ) -> tuple[NDArray[np.float64], NDArray[np.float64]]:
97
- """Uniform stride decimation (simple but loses peaks).
98
-
99
- Args:
100
- time: Time array.
101
- data: Signal data array.
102
- target_points: Target number of points after decimation.
103
-
104
- Returns:
105
- Tuple of (decimated_time, decimated_data).
106
- """
107
- stride = len(data) // target_points
108
- stride = max(stride, 1)
109
-
110
- indices = np.arange(0, len(data), stride)[:target_points]
111
- return (time[indices], data[indices])
112
-
113
-
114
- def _decimate_minmax_envelope(
115
- time: NDArray[np.float64],
116
- data: NDArray[np.float64],
117
- target_points: int,
118
- ) -> tuple[NDArray[np.float64], NDArray[np.float64]]:
119
- """Min-max envelope decimation - preserves peaks and valleys.
120
-
121
- This method ensures all signal extrema are preserved in the decimated view.
122
-
123
- Args:
124
- time: Time array.
125
- data: Signal data array.
126
- target_points: Target number of points after decimation.
127
-
128
- Returns:
129
- Tuple of (decimated_time, decimated_data).
130
- """
131
- # Calculate bucket size (each bucket contributes 2 points: min and max)
132
- bucket_size = len(data) // (target_points // 2)
133
-
134
- if bucket_size < 1:
135
- return (time, data)
136
-
137
- decimated_time = []
138
- decimated_data = []
139
-
140
- for i in range(0, len(data), bucket_size):
141
- bucket_data = data[i : i + bucket_size]
142
- bucket_time = time[i : i + bucket_size]
143
-
144
- if len(bucket_data) == 0:
145
- continue
146
-
147
- # Find min and max in bucket
148
- min_idx = np.argmin(bucket_data)
149
- max_idx = np.argmax(bucket_data)
150
-
151
- # Add in chronological order
152
- if min_idx < max_idx:
153
- decimated_time.extend([bucket_time[min_idx], bucket_time[max_idx]])
154
- decimated_data.extend([bucket_data[min_idx], bucket_data[max_idx]])
155
- else:
156
- decimated_time.extend([bucket_time[max_idx], bucket_time[min_idx]])
157
- decimated_data.extend([bucket_data[max_idx], bucket_data[min_idx]])
158
-
159
- return (np.array(decimated_time), np.array(decimated_data))
160
-
161
-
162
- def _decimate_lttb(
163
- time: NDArray[np.float64],
164
- data: NDArray[np.float64],
165
- target_points: int,
166
- ) -> tuple[NDArray[np.float64], NDArray[np.float64]]:
167
- """Largest Triangle Three Buckets decimation.
168
-
169
- Preserves visual shape by maximizing triangle areas.
170
-
171
- Args:
172
- time: Time array.
173
- data: Signal data array.
174
- target_points: Target number of points after decimation.
175
-
176
- Returns:
177
- Tuple of (decimated_time, decimated_data).
178
- """
179
- if len(data) <= target_points:
180
- return (time, data)
181
-
182
- # Always include first and last points
183
- sampled_time = [time[0]]
184
- sampled_data = [data[0]]
185
-
186
- bucket_size = (len(data) - 2) / (target_points - 2)
187
-
188
- prev_idx = 0
189
-
190
- for i in range(target_points - 2):
191
- # Average point of next bucket
192
- avg_range_start = int((i + 1) * bucket_size) + 1
193
- avg_range_end = int((i + 2) * bucket_size) + 1
194
- avg_range_end = min(avg_range_end, len(data))
195
-
196
- if avg_range_start < avg_range_end:
197
- avg_time = np.mean(time[avg_range_start:avg_range_end])
198
- avg_data = np.mean(data[avg_range_start:avg_range_end])
199
- else:
200
- avg_time = time[-1]
201
- avg_data = data[-1]
202
-
203
- # Current bucket range
204
- range_start = int(i * bucket_size) + 1
205
- range_end = int((i + 1) * bucket_size) + 1
206
- range_end = min(range_end, len(data) - 1)
207
-
208
- # Find point in bucket that forms largest triangle
209
- max_area = -1.0
210
- max_idx = range_start
211
-
212
- for idx in range(range_start, range_end):
213
- # Calculate triangle area
214
- area = abs(
215
- (time[prev_idx] - avg_time) * (data[idx] - data[prev_idx])
216
- - (time[prev_idx] - time[idx]) * (avg_data - data[prev_idx])
217
- )
218
-
219
- if area > max_area:
220
- max_area = area
221
- max_idx = idx
222
-
223
- sampled_time.append(time[max_idx])
224
- sampled_data.append(data[max_idx])
225
- prev_idx = max_idx
226
-
227
- # Always include last point
228
- sampled_time.append(time[-1])
229
- sampled_data.append(data[-1])
230
-
231
- return (np.array(sampled_time), np.array(sampled_data))
232
-
233
-
234
- def progressive_render(
235
- time: NDArray[np.float64],
236
- data: NDArray[np.float64],
237
- *,
238
- viewport: tuple[float, float] | None = None,
239
- priority: Literal["viewport", "full"] = "viewport",
240
- ) -> tuple[NDArray[np.float64], NDArray[np.float64]]:
241
- """Progressive rendering - render visible viewport first.
242
-
243
- Args:
244
- time: Time array.
245
- data: Signal data array.
246
- viewport: Visible viewport (t_min, t_max). None = full range.
247
- priority: Rendering priority ("viewport" = visible first, "full" = all data).
248
-
249
- Returns:
250
- Tuple of (time, data) for priority rendering.
251
-
252
- Example:
253
- >>> # Render only visible portion for fast initial display
254
- >>> time_vis, data_vis = progressive_render(
255
- ... time, data, viewport=(0, 0.001), priority="viewport"
256
- ... )
257
-
258
- References:
259
- VIS-019: Memory-Efficient Plot Rendering (progressive rendering)
260
- """
261
- if viewport is None or priority == "full":
262
- return (time, data)
263
-
264
- t_min, t_max = viewport
265
-
266
- # Find indices within viewport
267
- mask = (time >= t_min) & (time <= t_max)
268
- indices = np.where(mask)[0]
269
-
270
- if len(indices) == 0:
271
- # Viewport is outside data range
272
- return (time, data)
273
-
274
- # Return viewport data first
275
- viewport_time = time[indices]
276
- viewport_data = data[indices]
277
-
278
- return (viewport_time, viewport_data)
279
-
280
-
281
- def estimate_memory_usage(
282
- n_samples: int,
283
- n_channels: int = 1,
284
- dtype: type = np.float64,
285
- ) -> float:
286
- """Estimate memory usage for plot rendering.
287
-
288
- Args:
289
- n_samples: Number of samples per channel.
290
- n_channels: Number of channels.
291
- dtype: Data type for arrays.
292
-
293
- Returns:
294
- Estimated memory usage in MB.
295
-
296
- Example:
297
- >>> mem_mb = estimate_memory_usage(1_000_000, n_channels=4)
298
- >>> print(f"Memory: {mem_mb:.1f} MB")
299
-
300
- References:
301
- VIS-019: Memory-Efficient Plot Rendering
302
- """
303
- # Bytes per sample
304
- if dtype == np.float64:
305
- bytes_per_sample = 8
306
- elif dtype == np.float32 or dtype == np.int32:
307
- bytes_per_sample = 4
308
- elif dtype == np.int16:
309
- bytes_per_sample = 2
310
- else:
311
- bytes_per_sample = 8 # Default
312
-
313
- # Total memory: time + data arrays per channel
314
- # Time array: n_samples * bytes_per_sample
315
- # Data arrays: n_channels * n_samples * bytes_per_sample
316
- total_bytes = (1 + n_channels) * n_samples * bytes_per_sample
317
-
318
- # Convert to MB
319
- return total_bytes / (1024 * 1024)
320
-
321
-
322
- def downsample_for_memory(
323
- time: NDArray[np.float64],
324
- data: NDArray[np.float64],
325
- *,
326
- target_memory_mb: float = 50.0,
327
- ) -> tuple[NDArray[np.float64], NDArray[np.float64]]:
328
- """Downsample signal to meet memory target.
329
-
330
- Args:
331
- time: Time array.
332
- data: Signal data array.
333
- target_memory_mb: Target memory usage in MB.
334
-
335
- Returns:
336
- Tuple of (decimated_time, decimated_data).
337
-
338
- Example:
339
- >>> # Reduce 100MB dataset to 50MB
340
- >>> time_ds, data_ds = downsample_for_memory(time, data, target_memory_mb=50.0)
341
-
342
- References:
343
- VIS-019: Memory-Efficient Plot Rendering (memory target <50MB per subplot)
344
- """
345
- current_memory = estimate_memory_usage(len(data), n_channels=1)
346
-
347
- if current_memory <= target_memory_mb:
348
- # Already within target
349
- return (time, data)
350
-
351
- # Calculate required decimation factor
352
- decimation_factor = current_memory / target_memory_mb
353
- target_samples = int(len(data) / decimation_factor)
354
-
355
- # Use min-max to preserve features
356
- return _decimate_minmax_envelope(time, data, target_samples)
357
-
358
-
359
- class StreamingRenderer:
360
- """Streaming plot renderer for real-time data updates.
361
-
362
- Handles incremental data updates without full redraws for performance.
363
-
364
- Example:
365
- >>> renderer = StreamingRenderer(max_samples=10000)
366
- >>> renderer.append(new_time, new_data)
367
- >>> time, data = renderer.get_render_data()
368
-
369
- References:
370
- VIS-018: Streaming Plot Updates
371
- """
372
-
373
- def __init__(
374
- self,
375
- *,
376
- max_samples: int = 10_000,
377
- decimation_method: Literal["minmax", "lttb", "uniform"] = "minmax",
378
- ):
379
- """Initialize streaming renderer.
380
-
381
- Args:
382
- max_samples: Maximum samples to keep in buffer.
383
- decimation_method: Decimation method for buffer management.
384
- """
385
- self.max_samples = max_samples
386
- self.decimation_method = decimation_method
387
-
388
- self._time: list[float] = []
389
- self._data: list[float] = []
390
-
391
- def append(
392
- self,
393
- time: NDArray[np.float64],
394
- data: NDArray[np.float64],
395
- ) -> None:
396
- """Append new data to streaming buffer.
397
-
398
- Args:
399
- time: New time samples.
400
- data: New data samples.
401
- """
402
- self._time.extend(time.tolist())
403
- self._data.extend(data.tolist())
404
-
405
- # Decimate if buffer exceeds limit
406
- if len(self._data) > self.max_samples:
407
- self._decimate_buffer()
408
-
409
- def _decimate_buffer(self) -> None:
410
- """Decimate internal buffer to max_samples."""
411
- time_arr = np.array(self._time)
412
- data_arr = np.array(self._data)
413
-
414
- time_dec, data_dec = render_with_lod(
415
- time_arr,
416
- data_arr,
417
- max_points=self.max_samples,
418
- method=self.decimation_method,
419
- )
420
-
421
- self._time = time_dec.tolist()
422
- self._data = data_dec.tolist()
423
-
424
- def get_render_data(self) -> tuple[NDArray[np.float64], NDArray[np.float64]]:
425
- """Get current data for rendering.
426
-
427
- Returns:
428
- Tuple of (time, data) arrays.
429
- """
430
- return (np.array(self._time), np.array(self._data))
431
-
432
- def clear(self) -> None:
433
- """Clear streaming buffer."""
434
- self._time.clear()
435
- self._data.clear()
436
-
437
-
438
- __all__ = [
439
- "StreamingRenderer",
440
- "downsample_for_memory",
441
- "estimate_memory_usage",
442
- "progressive_render",
443
- "render_with_lod",
444
- ]