oscura 0.7.0__py3-none-any.whl → 0.10.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (175) hide show
  1. oscura/__init__.py +19 -19
  2. oscura/analyzers/__init__.py +2 -0
  3. oscura/analyzers/digital/extraction.py +2 -3
  4. oscura/analyzers/digital/quality.py +1 -1
  5. oscura/analyzers/digital/timing.py +1 -1
  6. oscura/analyzers/eye/__init__.py +5 -1
  7. oscura/analyzers/eye/generation.py +501 -0
  8. oscura/analyzers/jitter/__init__.py +6 -6
  9. oscura/analyzers/jitter/timing.py +419 -0
  10. oscura/analyzers/patterns/__init__.py +94 -0
  11. oscura/analyzers/patterns/reverse_engineering.py +991 -0
  12. oscura/analyzers/power/__init__.py +35 -12
  13. oscura/analyzers/power/basic.py +3 -3
  14. oscura/analyzers/power/soa.py +1 -1
  15. oscura/analyzers/power/switching.py +3 -3
  16. oscura/analyzers/signal_classification.py +529 -0
  17. oscura/analyzers/signal_integrity/sparams.py +3 -3
  18. oscura/analyzers/statistics/__init__.py +4 -0
  19. oscura/analyzers/statistics/basic.py +152 -0
  20. oscura/analyzers/statistics/correlation.py +47 -6
  21. oscura/analyzers/validation.py +1 -1
  22. oscura/analyzers/waveform/__init__.py +2 -0
  23. oscura/analyzers/waveform/measurements.py +329 -163
  24. oscura/analyzers/waveform/measurements_with_uncertainty.py +91 -35
  25. oscura/analyzers/waveform/spectral.py +498 -54
  26. oscura/api/dsl/commands.py +15 -6
  27. oscura/api/server/templates/base.html +137 -146
  28. oscura/api/server/templates/export.html +84 -110
  29. oscura/api/server/templates/home.html +248 -267
  30. oscura/api/server/templates/protocols.html +44 -48
  31. oscura/api/server/templates/reports.html +27 -35
  32. oscura/api/server/templates/session_detail.html +68 -78
  33. oscura/api/server/templates/sessions.html +62 -72
  34. oscura/api/server/templates/waveforms.html +54 -64
  35. oscura/automotive/__init__.py +1 -1
  36. oscura/automotive/can/session.py +1 -1
  37. oscura/automotive/dbc/generator.py +638 -23
  38. oscura/automotive/dtc/data.json +102 -17
  39. oscura/automotive/uds/decoder.py +99 -6
  40. oscura/cli/analyze.py +8 -2
  41. oscura/cli/batch.py +36 -5
  42. oscura/cli/characterize.py +18 -4
  43. oscura/cli/export.py +47 -5
  44. oscura/cli/main.py +2 -0
  45. oscura/cli/onboarding/wizard.py +10 -6
  46. oscura/cli/pipeline.py +585 -0
  47. oscura/cli/visualize.py +6 -4
  48. oscura/convenience.py +400 -32
  49. oscura/core/config/loader.py +0 -1
  50. oscura/core/measurement_result.py +286 -0
  51. oscura/core/progress.py +1 -1
  52. oscura/core/schemas/device_mapping.json +8 -2
  53. oscura/core/schemas/packet_format.json +24 -4
  54. oscura/core/schemas/protocol_definition.json +12 -2
  55. oscura/core/types.py +300 -199
  56. oscura/correlation/multi_protocol.py +1 -1
  57. oscura/export/legacy/__init__.py +11 -0
  58. oscura/export/legacy/wav.py +75 -0
  59. oscura/exporters/__init__.py +19 -0
  60. oscura/exporters/wireshark.py +809 -0
  61. oscura/hardware/acquisition/file.py +5 -19
  62. oscura/hardware/acquisition/saleae.py +10 -10
  63. oscura/hardware/acquisition/socketcan.py +4 -6
  64. oscura/hardware/acquisition/synthetic.py +1 -5
  65. oscura/hardware/acquisition/visa.py +6 -6
  66. oscura/hardware/security/side_channel_detector.py +5 -508
  67. oscura/inference/message_format.py +686 -1
  68. oscura/jupyter/display.py +2 -2
  69. oscura/jupyter/magic.py +3 -3
  70. oscura/loaders/__init__.py +17 -12
  71. oscura/loaders/binary.py +1 -1
  72. oscura/loaders/chipwhisperer.py +1 -2
  73. oscura/loaders/configurable.py +1 -1
  74. oscura/loaders/csv_loader.py +2 -2
  75. oscura/loaders/hdf5_loader.py +1 -1
  76. oscura/loaders/lazy.py +6 -1
  77. oscura/loaders/mmap_loader.py +0 -1
  78. oscura/loaders/numpy_loader.py +8 -7
  79. oscura/loaders/preprocessing.py +3 -5
  80. oscura/loaders/rigol.py +21 -7
  81. oscura/loaders/sigrok.py +2 -5
  82. oscura/loaders/tdms.py +3 -2
  83. oscura/loaders/tektronix.py +38 -32
  84. oscura/loaders/tss.py +20 -27
  85. oscura/loaders/vcd.py +13 -8
  86. oscura/loaders/wav.py +1 -6
  87. oscura/pipeline/__init__.py +76 -0
  88. oscura/pipeline/handlers/__init__.py +165 -0
  89. oscura/pipeline/handlers/analyzers.py +1045 -0
  90. oscura/pipeline/handlers/decoders.py +899 -0
  91. oscura/pipeline/handlers/exporters.py +1103 -0
  92. oscura/pipeline/handlers/filters.py +891 -0
  93. oscura/pipeline/handlers/loaders.py +640 -0
  94. oscura/pipeline/handlers/transforms.py +768 -0
  95. oscura/reporting/__init__.py +88 -1
  96. oscura/reporting/automation.py +348 -0
  97. oscura/reporting/citations.py +374 -0
  98. oscura/reporting/core.py +54 -0
  99. oscura/reporting/formatting/__init__.py +11 -0
  100. oscura/reporting/formatting/measurements.py +320 -0
  101. oscura/reporting/html.py +57 -0
  102. oscura/reporting/interpretation.py +431 -0
  103. oscura/reporting/summary.py +329 -0
  104. oscura/reporting/templates/enhanced/protocol_re.html +504 -503
  105. oscura/reporting/visualization.py +542 -0
  106. oscura/side_channel/__init__.py +38 -57
  107. oscura/utils/builders/signal_builder.py +5 -5
  108. oscura/utils/comparison/compare.py +7 -9
  109. oscura/utils/comparison/golden.py +1 -1
  110. oscura/utils/filtering/convenience.py +2 -2
  111. oscura/utils/math/arithmetic.py +38 -62
  112. oscura/utils/math/interpolation.py +20 -20
  113. oscura/utils/pipeline/__init__.py +4 -17
  114. oscura/utils/progressive.py +1 -4
  115. oscura/utils/triggering/edge.py +1 -1
  116. oscura/utils/triggering/pattern.py +2 -2
  117. oscura/utils/triggering/pulse.py +2 -2
  118. oscura/utils/triggering/window.py +3 -3
  119. oscura/validation/hil_testing.py +11 -11
  120. oscura/visualization/__init__.py +47 -284
  121. oscura/visualization/batch.py +160 -0
  122. oscura/visualization/plot.py +542 -53
  123. oscura/visualization/styles.py +184 -318
  124. oscura/workflows/__init__.py +2 -0
  125. oscura/workflows/batch/advanced.py +1 -1
  126. oscura/workflows/batch/aggregate.py +7 -8
  127. oscura/workflows/complete_re.py +251 -23
  128. oscura/workflows/digital.py +27 -4
  129. oscura/workflows/multi_trace.py +136 -17
  130. oscura/workflows/waveform.py +788 -0
  131. {oscura-0.7.0.dist-info → oscura-0.10.0.dist-info}/METADATA +59 -79
  132. {oscura-0.7.0.dist-info → oscura-0.10.0.dist-info}/RECORD +135 -149
  133. oscura/side_channel/dpa.py +0 -1025
  134. oscura/utils/optimization/__init__.py +0 -19
  135. oscura/utils/optimization/parallel.py +0 -443
  136. oscura/utils/optimization/search.py +0 -532
  137. oscura/utils/pipeline/base.py +0 -338
  138. oscura/utils/pipeline/composition.py +0 -248
  139. oscura/utils/pipeline/parallel.py +0 -449
  140. oscura/utils/pipeline/pipeline.py +0 -375
  141. oscura/utils/search/__init__.py +0 -16
  142. oscura/utils/search/anomaly.py +0 -424
  143. oscura/utils/search/context.py +0 -294
  144. oscura/utils/search/pattern.py +0 -288
  145. oscura/utils/storage/__init__.py +0 -61
  146. oscura/utils/storage/database.py +0 -1166
  147. oscura/visualization/accessibility.py +0 -526
  148. oscura/visualization/annotations.py +0 -371
  149. oscura/visualization/axis_scaling.py +0 -305
  150. oscura/visualization/colors.py +0 -451
  151. oscura/visualization/digital.py +0 -436
  152. oscura/visualization/eye.py +0 -571
  153. oscura/visualization/histogram.py +0 -281
  154. oscura/visualization/interactive.py +0 -1035
  155. oscura/visualization/jitter.py +0 -1042
  156. oscura/visualization/keyboard.py +0 -394
  157. oscura/visualization/layout.py +0 -400
  158. oscura/visualization/optimization.py +0 -1079
  159. oscura/visualization/palettes.py +0 -446
  160. oscura/visualization/power.py +0 -508
  161. oscura/visualization/power_extended.py +0 -955
  162. oscura/visualization/presets.py +0 -469
  163. oscura/visualization/protocols.py +0 -1246
  164. oscura/visualization/render.py +0 -223
  165. oscura/visualization/rendering.py +0 -444
  166. oscura/visualization/reverse_engineering.py +0 -838
  167. oscura/visualization/signal_integrity.py +0 -989
  168. oscura/visualization/specialized.py +0 -643
  169. oscura/visualization/spectral.py +0 -1226
  170. oscura/visualization/thumbnails.py +0 -340
  171. oscura/visualization/time_axis.py +0 -351
  172. oscura/visualization/waveform.py +0 -454
  173. {oscura-0.7.0.dist-info → oscura-0.10.0.dist-info}/WHEEL +0 -0
  174. {oscura-0.7.0.dist-info → oscura-0.10.0.dist-info}/entry_points.txt +0 -0
  175. {oscura-0.7.0.dist-info → oscura-0.10.0.dist-info}/licenses/LICENSE +0 -0
@@ -1,19 +0,0 @@
1
- """Parameter optimization and search algorithms.
2
-
3
- This module provides grid search and randomized search for finding optimal
4
- analysis parameters.
5
- """
6
-
7
- from oscura.utils.optimization.search import (
8
- GridSearchCV,
9
- RandomizedSearchCV,
10
- ScoringFunction,
11
- SearchResult,
12
- )
13
-
14
- __all__ = [
15
- "GridSearchCV",
16
- "RandomizedSearchCV",
17
- "ScoringFunction",
18
- "SearchResult",
19
- ]
@@ -1,443 +0,0 @@
1
- """Parallel processing utilities for optimization and analysis.
2
-
3
- This module provides utilities for efficient parallel execution of analysis tasks
4
- using both thread and process-based parallelism.
5
- """
6
-
7
- from __future__ import annotations
8
-
9
- import logging
10
- from collections.abc import Callable
11
- from concurrent.futures import (
12
- ProcessPoolExecutor,
13
- ThreadPoolExecutor,
14
- as_completed,
15
- )
16
- from dataclasses import dataclass
17
- from typing import TYPE_CHECKING, Any, Generic, TypeVar
18
-
19
- import numpy as np
20
-
21
- from oscura.core.exceptions import AnalysisError
22
-
23
- T = TypeVar("T")
24
- R = TypeVar("R")
25
-
26
- if TYPE_CHECKING:
27
- from collections.abc import Iterable
28
-
29
- from numpy.typing import NDArray
30
-
31
- logger = logging.getLogger(__name__)
32
-
33
-
34
- @dataclass
35
- class ParallelResult(Generic[R]):
36
- """Result from parallel execution.
37
-
38
- Attributes:
39
- results: List of results from all tasks.
40
- execution_time: Total execution time in seconds.
41
- success_count: Number of successfully completed tasks.
42
- error_count: Number of failed tasks.
43
- errors: List of exceptions encountered.
44
-
45
- Example:
46
- >>> result = parallel_map(fn, items)
47
- >>> print(f"Completed {result.success_count}/{len(items)}")
48
- """
49
-
50
- results: list[R]
51
- execution_time: float
52
- success_count: int
53
- error_count: int
54
- errors: list[Exception] | None = None
55
-
56
-
57
- @dataclass
58
- class WorkerPool:
59
- """Configuration for worker pool management.
60
-
61
- Attributes:
62
- max_workers: Maximum number of workers.
63
- use_threads: Use threads (True) or processes (False).
64
- timeout: Timeout per task in seconds.
65
- chunk_size: Number of items per worker chunk.
66
-
67
- Example:
68
- >>> pool = WorkerPool(max_workers=4, use_threads=True, timeout=30)
69
- """
70
-
71
- max_workers: int = 4
72
- use_threads: bool = True
73
- timeout: float | None = None
74
- chunk_size: int = 1
75
-
76
-
77
- def get_optimal_workers(max_workers: int | None = None) -> int:
78
- """Get optimal number of workers for current system.
79
-
80
- Uses CPU count by default, respecting max_workers limit.
81
-
82
- Args:
83
- max_workers: Maximum workers to use. None for all CPUs.
84
-
85
- Returns:
86
- Optimal number of workers.
87
-
88
- Example:
89
- >>> workers = get_optimal_workers(max_workers=8)
90
- """
91
- import os
92
-
93
- cpu_count = os.cpu_count() or 1
94
- if max_workers is None:
95
- return cpu_count
96
- return min(max_workers, cpu_count)
97
-
98
-
99
- def parallel_map(
100
- func: Callable[[T], R],
101
- iterable: Iterable[T],
102
- *,
103
- max_workers: int | None = None,
104
- use_threads: bool = True,
105
- timeout: float | None = None,
106
- collect_errors: bool = True,
107
- ) -> ParallelResult[R]:
108
- """Apply function to items in parallel.
109
-
110
- Maps a function over an iterable using either threads or processes.
111
-
112
- Args:
113
- func: Function to apply to each item.
114
- iterable: Items to process.
115
- max_workers: Maximum concurrent workers.
116
- use_threads: Use threads (True) or processes (False).
117
- timeout: Timeout per task in seconds.
118
- collect_errors: Collect errors instead of raising.
119
-
120
- Returns:
121
- ParallelResult with results and execution stats.
122
-
123
- Raises:
124
- AnalysisError: If collect_errors=False and a task fails.
125
-
126
- Example:
127
- >>> def process_item(x):
128
- ... return x * 2
129
- >>> result = parallel_map(process_item, range(100))
130
- >>> print(f"Completed: {result.success_count}")
131
-
132
- References:
133
- OPT-001: Parallel Execution Framework
134
- """
135
- import time
136
-
137
- items = list(iterable)
138
- if not items:
139
- return ParallelResult(results=[], execution_time=0.0, success_count=0, error_count=0)
140
-
141
- executor_class = ThreadPoolExecutor if use_threads else ProcessPoolExecutor
142
- max_workers = get_optimal_workers(max_workers)
143
-
144
- start_time = time.time()
145
- results: list[R] = [None] * len(items) # type: ignore[list-item]
146
- errors: list[Exception] = []
147
- success_count = 0
148
- error_count = 0
149
-
150
- with executor_class(max_workers=max_workers) as executor:
151
- futures = {executor.submit(func, item): i for i, item in enumerate(items)}
152
-
153
- for future in as_completed(futures, timeout=timeout):
154
- idx = futures[future]
155
- try:
156
- results[idx] = future.result()
157
- success_count += 1
158
- except Exception as e:
159
- error_count += 1
160
- errors.append(e)
161
-
162
- if not collect_errors:
163
- execution_time = time.time() - start_time
164
- raise AnalysisError(f"Task {idx} failed: {e!s}") from e
165
-
166
- execution_time = time.time() - start_time
167
-
168
- return ParallelResult(
169
- results=results,
170
- execution_time=execution_time,
171
- success_count=success_count,
172
- error_count=error_count,
173
- errors=errors if errors else None,
174
- )
175
-
176
-
177
- def parallel_reduce(
178
- func: Callable[[T], R],
179
- iterable: Iterable[T],
180
- reducer: Callable[[list[R]], Any],
181
- *,
182
- max_workers: int | None = None,
183
- use_threads: bool = True,
184
- timeout: float | None = None,
185
- ) -> Any:
186
- """Map and reduce results in parallel.
187
-
188
- Applies function to items in parallel, then reduces results.
189
-
190
- Args:
191
- func: Function to apply to each item.
192
- iterable: Items to process.
193
- reducer: Function to reduce list of results.
194
- max_workers: Maximum concurrent workers.
195
- use_threads: Use threads (True) or processes (False).
196
- timeout: Timeout per task in seconds.
197
-
198
- Returns:
199
- Reduced result.
200
-
201
- Example:
202
- >>> def compute(x):
203
- ... return x * 2
204
- >>> result = parallel_reduce(
205
- ... compute,
206
- ... range(100),
207
- ... reducer=lambda x: sum(x)
208
- ... )
209
-
210
- References:
211
- OPT-001: Parallel Execution Framework
212
- """
213
- result = parallel_map(
214
- func,
215
- iterable,
216
- max_workers=max_workers,
217
- use_threads=use_threads,
218
- timeout=timeout,
219
- collect_errors=False,
220
- )
221
-
222
- return reducer(result.results)
223
-
224
-
225
- def batch_parallel_map(
226
- func: Callable[[list[T]], list[R]],
227
- iterable: Iterable[T],
228
- *,
229
- batch_size: int = 100,
230
- max_workers: int | None = None,
231
- use_threads: bool = True,
232
- timeout: float | None = None,
233
- ) -> ParallelResult[R]:
234
- """Apply function to batches of items in parallel.
235
-
236
- Processes items in batches, useful when function benefits from
237
- batch processing.
238
-
239
- Args:
240
- func: Function accepting list of items.
241
- iterable: Items to process.
242
- batch_size: Number of items per batch.
243
- max_workers: Maximum concurrent workers.
244
- use_threads: Use threads (True) or processes (False).
245
- timeout: Timeout per batch in seconds.
246
-
247
- Returns:
248
- ParallelResult with flattened results.
249
-
250
- Example:
251
- >>> def process_batch(items):
252
- ... return [x * 2 for x in items]
253
- >>> result = batch_parallel_map(
254
- ... process_batch,
255
- ... range(1000),
256
- ... batch_size=100
257
- ... )
258
-
259
- References:
260
- OPT-001: Parallel Execution Framework
261
- """
262
- import time
263
-
264
- items = list(iterable)
265
- if not items:
266
- return ParallelResult(results=[], execution_time=0.0, success_count=0, error_count=0)
267
-
268
- # Create batches
269
- batches = [items[i : i + batch_size] for i in range(0, len(items), batch_size)]
270
-
271
- start_time = time.time()
272
- executor_class = ThreadPoolExecutor if use_threads else ProcessPoolExecutor
273
- max_workers = get_optimal_workers(max_workers)
274
-
275
- all_results: list[R] = []
276
- errors: list[Exception] = []
277
- success_count = 0
278
- error_count = 0
279
-
280
- with executor_class(max_workers=max_workers) as executor:
281
- futures = {executor.submit(func, batch): i for i, batch in enumerate(batches)}
282
-
283
- for future in as_completed(futures, timeout=timeout):
284
- try:
285
- batch_results = future.result()
286
- all_results.extend(batch_results)
287
- success_count += 1
288
- except Exception as e:
289
- error_count += 1
290
- errors.append(e)
291
-
292
- execution_time = time.time() - start_time
293
-
294
- return ParallelResult(
295
- results=all_results,
296
- execution_time=execution_time,
297
- success_count=success_count,
298
- error_count=error_count,
299
- errors=errors if errors else None,
300
- )
301
-
302
-
303
- def parallel_filter(
304
- func: Callable[[T], bool],
305
- iterable: Iterable[T],
306
- *,
307
- max_workers: int | None = None,
308
- use_threads: bool = True,
309
- timeout: float | None = None,
310
- ) -> ParallelResult[T]:
311
- """Filter items in parallel.
312
-
313
- Applies predicate to items in parallel, filtering results.
314
-
315
- Args:
316
- func: Predicate function returning True to keep item.
317
- iterable: Items to filter.
318
- max_workers: Maximum concurrent workers.
319
- use_threads: Use threads (True) or processes (False).
320
- timeout: Timeout per task in seconds.
321
-
322
- Returns:
323
- ParallelResult with filtered items.
324
-
325
- Example:
326
- >>> def is_even(x):
327
- ... return x % 2 == 0
328
- >>> result = parallel_filter(is_even, range(100))
329
-
330
- References:
331
- OPT-001: Parallel Execution Framework
332
- """
333
- import time
334
-
335
- items = list(iterable)
336
- if not items:
337
- return ParallelResult(results=[], execution_time=0.0, success_count=0, error_count=0)
338
-
339
- executor_class = ThreadPoolExecutor if use_threads else ProcessPoolExecutor
340
- max_workers = get_optimal_workers(max_workers)
341
-
342
- start_time = time.time()
343
- results: list[T] = []
344
- errors: list[Exception] = []
345
- success_count = 0
346
- error_count = 0
347
-
348
- with executor_class(max_workers=max_workers) as executor:
349
- futures = {executor.submit(func, item): item for item in items}
350
-
351
- for future in as_completed(futures, timeout=timeout):
352
- item = futures[future]
353
- try:
354
- if future.result():
355
- results.append(item)
356
- success_count += 1
357
- except Exception as e:
358
- error_count += 1
359
- errors.append(e)
360
-
361
- execution_time = time.time() - start_time
362
-
363
- return ParallelResult(
364
- results=results,
365
- execution_time=execution_time,
366
- success_count=success_count,
367
- error_count=error_count,
368
- errors=errors if errors else None,
369
- )
370
-
371
-
372
- def chunked_parallel_map(
373
- func: Callable[[NDArray[np.float64]], NDArray[np.float64]],
374
- data: NDArray[np.float64],
375
- *,
376
- chunk_size: int = 10000,
377
- max_workers: int | None = None,
378
- use_threads: bool = True,
379
- timeout: float | None = None,
380
- ) -> NDArray[np.float64]:
381
- """Apply function to chunks of array data in parallel.
382
-
383
- Useful for processing large arrays where parallelization overhead
384
- is justified.
385
-
386
- Args:
387
- func: Function accepting 1D array chunk.
388
- data: Array to process.
389
- chunk_size: Number of samples per chunk.
390
- max_workers: Maximum concurrent workers.
391
- use_threads: Use threads (True) or processes (False).
392
- timeout: Timeout per chunk in seconds.
393
-
394
- Returns:
395
- Processed array (concatenated chunks).
396
-
397
- Raises:
398
- AnalysisError: If processing fails.
399
-
400
- Example:
401
- >>> def process_chunk(chunk):
402
- ... return np.fft.fft(chunk)
403
- >>> result = chunked_parallel_map(process_chunk, data, chunk_size=1000)
404
-
405
- References:
406
- OPT-001: Parallel Execution Framework
407
- """
408
- if len(data) == 0:
409
- return np.array([])
410
-
411
- if len(data) <= chunk_size:
412
- return func(data)
413
-
414
- # Create chunks
415
- chunks = [data[i : i + chunk_size] for i in range(0, len(data), chunk_size)]
416
-
417
- executor_class = ThreadPoolExecutor if use_threads else ProcessPoolExecutor
418
- max_workers = get_optimal_workers(max_workers)
419
-
420
- results: list[NDArray[np.float64]] = []
421
-
422
- with executor_class(max_workers=max_workers) as executor:
423
- futures = {executor.submit(func, chunk): i for i, chunk in enumerate(chunks)}
424
-
425
- for future in as_completed(futures, timeout=timeout):
426
- try:
427
- results.append(future.result())
428
- except Exception as e:
429
- raise AnalysisError(f"Chunk processing failed: {e!s}") from e
430
-
431
- return np.concatenate(results)
432
-
433
-
434
- __all__ = [
435
- "ParallelResult",
436
- "WorkerPool",
437
- "batch_parallel_map",
438
- "chunked_parallel_map",
439
- "get_optimal_workers",
440
- "parallel_filter",
441
- "parallel_map",
442
- "parallel_reduce",
443
- ]