oscura 0.3.0__py3-none-any.whl → 0.5.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (59) hide show
  1. oscura/__init__.py +1 -7
  2. oscura/acquisition/__init__.py +147 -0
  3. oscura/acquisition/file.py +255 -0
  4. oscura/acquisition/hardware.py +186 -0
  5. oscura/acquisition/saleae.py +340 -0
  6. oscura/acquisition/socketcan.py +315 -0
  7. oscura/acquisition/streaming.py +38 -0
  8. oscura/acquisition/synthetic.py +229 -0
  9. oscura/acquisition/visa.py +376 -0
  10. oscura/analyzers/__init__.py +3 -0
  11. oscura/analyzers/digital/__init__.py +48 -0
  12. oscura/analyzers/digital/clock.py +9 -1
  13. oscura/analyzers/digital/edges.py +1 -1
  14. oscura/analyzers/digital/extraction.py +195 -0
  15. oscura/analyzers/digital/ic_database.py +498 -0
  16. oscura/analyzers/digital/timing.py +41 -11
  17. oscura/analyzers/digital/timing_paths.py +339 -0
  18. oscura/analyzers/digital/vintage.py +377 -0
  19. oscura/analyzers/digital/vintage_result.py +148 -0
  20. oscura/analyzers/protocols/__init__.py +22 -1
  21. oscura/analyzers/protocols/parallel_bus.py +449 -0
  22. oscura/analyzers/side_channel/__init__.py +52 -0
  23. oscura/analyzers/side_channel/power.py +690 -0
  24. oscura/analyzers/side_channel/timing.py +369 -0
  25. oscura/analyzers/signal_integrity/sparams.py +1 -1
  26. oscura/automotive/__init__.py +4 -2
  27. oscura/automotive/can/patterns.py +3 -1
  28. oscura/automotive/can/session.py +277 -78
  29. oscura/automotive/can/state_machine.py +5 -2
  30. oscura/builders/__init__.py +9 -11
  31. oscura/builders/signal_builder.py +99 -191
  32. oscura/core/exceptions.py +5 -1
  33. oscura/export/__init__.py +12 -0
  34. oscura/export/wavedrom.py +430 -0
  35. oscura/exporters/json_export.py +47 -0
  36. oscura/exporters/vintage_logic_csv.py +247 -0
  37. oscura/loaders/__init__.py +1 -0
  38. oscura/loaders/chipwhisperer.py +393 -0
  39. oscura/loaders/touchstone.py +1 -1
  40. oscura/reporting/__init__.py +7 -0
  41. oscura/reporting/vintage_logic_report.py +523 -0
  42. oscura/session/session.py +54 -46
  43. oscura/sessions/__init__.py +70 -0
  44. oscura/sessions/base.py +323 -0
  45. oscura/sessions/blackbox.py +640 -0
  46. oscura/sessions/generic.py +189 -0
  47. oscura/utils/autodetect.py +5 -1
  48. oscura/visualization/digital_advanced.py +718 -0
  49. oscura/visualization/figure_manager.py +156 -0
  50. {oscura-0.3.0.dist-info → oscura-0.5.0.dist-info}/METADATA +86 -5
  51. {oscura-0.3.0.dist-info → oscura-0.5.0.dist-info}/RECORD +54 -33
  52. oscura/automotive/dtc/data.json +0 -2763
  53. oscura/schemas/bus_configuration.json +0 -322
  54. oscura/schemas/device_mapping.json +0 -182
  55. oscura/schemas/packet_format.json +0 -418
  56. oscura/schemas/protocol_definition.json +0 -363
  57. {oscura-0.3.0.dist-info → oscura-0.5.0.dist-info}/WHEEL +0 -0
  58. {oscura-0.3.0.dist-info → oscura-0.5.0.dist-info}/entry_points.txt +0 -0
  59. {oscura-0.3.0.dist-info → oscura-0.5.0.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,247 @@
1
+ """CSV export functions for vintage logic analysis results.
2
+
3
+ This module provides specialized CSV exporters for vintage logic analysis data,
4
+ including timing measurements, IC identification, and bill of materials.
5
+
6
+ Example:
7
+ >>> from oscura.exporters.vintage_logic_csv import export_bom_csv
8
+ >>> export_bom_csv(result, "bom.csv")
9
+ """
10
+
11
+ from __future__ import annotations
12
+
13
+ import csv
14
+ from pathlib import Path
15
+ from typing import TYPE_CHECKING
16
+
17
+ if TYPE_CHECKING:
18
+ from oscura.analyzers.digital.vintage_result import VintageLogicAnalysisResult
19
+
20
+
21
+ def export_timing_measurements_csv(
22
+ result: VintageLogicAnalysisResult,
23
+ path: str | Path,
24
+ ) -> None:
25
+ """Export timing measurements to CSV.
26
+
27
+ Creates a CSV file with columns: parameter, measured_value_ns, measurement_type.
28
+
29
+ Args:
30
+ result: Vintage logic analysis result.
31
+ path: Output CSV file path.
32
+
33
+ Example:
34
+ >>> export_timing_measurements_csv(result, "timing.csv")
35
+ """
36
+ path = Path(path)
37
+
38
+ with path.open("w", newline="") as csvfile:
39
+ writer = csv.writer(csvfile)
40
+
41
+ # Write header
42
+ writer.writerow(["parameter", "measured_value_ns", "measurement_type"])
43
+
44
+ # Write timing measurements
45
+ for param_name, value in result.timing_measurements.items():
46
+ # Determine measurement type from parameter name
47
+ if "_t_pd" in param_name:
48
+ meas_type = "propagation_delay"
49
+ elif "_t_su" in param_name:
50
+ meas_type = "setup_time"
51
+ elif "_t_h" in param_name:
52
+ meas_type = "hold_time"
53
+ elif "_t_w" in param_name:
54
+ meas_type = "pulse_width"
55
+ else:
56
+ meas_type = "other"
57
+
58
+ writer.writerow([param_name, f"{value * 1e9:.3f}", meas_type])
59
+
60
+
61
+ def export_ic_identification_csv(
62
+ result: VintageLogicAnalysisResult,
63
+ path: str | Path,
64
+ ) -> None:
65
+ """Export IC identification results to CSV.
66
+
67
+ Creates a CSV file with columns: ic_name, confidence, family, timing_params,
68
+ validation_status.
69
+
70
+ Args:
71
+ result: Vintage logic analysis result.
72
+ path: Output CSV file path.
73
+
74
+ Example:
75
+ >>> export_ic_identification_csv(result, "ic_identification.csv")
76
+ """
77
+ path = Path(path)
78
+
79
+ with path.open("w", newline="") as csvfile:
80
+ writer = csv.writer(csvfile)
81
+
82
+ # Write header
83
+ writer.writerow(
84
+ [
85
+ "ic_name",
86
+ "confidence",
87
+ "family",
88
+ "t_pd_ns",
89
+ "t_su_ns",
90
+ "t_h_ns",
91
+ "t_w_ns",
92
+ "validation_status",
93
+ ]
94
+ )
95
+
96
+ # Write IC identification results
97
+ for ic_result in result.identified_ics:
98
+ # Extract timing parameters
99
+ t_pd = ic_result.timing_params.get("t_pd", 0) * 1e9
100
+ t_su = ic_result.timing_params.get("t_su", 0) * 1e9
101
+ t_h = ic_result.timing_params.get("t_h", 0) * 1e9
102
+ t_w = ic_result.timing_params.get("t_w", 0) * 1e9
103
+
104
+ # Determine validation status
105
+ validation_failed = any(v.get("passes") is False for v in ic_result.validation.values())
106
+ validation_status = "FAIL" if validation_failed else "PASS"
107
+
108
+ writer.writerow(
109
+ [
110
+ ic_result.ic_name,
111
+ f"{ic_result.confidence:.3f}",
112
+ ic_result.family,
113
+ f"{t_pd:.3f}" if t_pd > 0 else "",
114
+ f"{t_su:.3f}" if t_su > 0 else "",
115
+ f"{t_h:.3f}" if t_h > 0 else "",
116
+ f"{t_w:.3f}" if t_w > 0 else "",
117
+ validation_status,
118
+ ]
119
+ )
120
+
121
+
122
+ def export_bom_csv(
123
+ result: VintageLogicAnalysisResult,
124
+ path: str | Path,
125
+ ) -> None:
126
+ """Export bill of materials to CSV.
127
+
128
+ Creates a CSV file compatible with spreadsheet programs and procurement systems.
129
+ Columns: part_number, description, quantity, category, notes.
130
+
131
+ Args:
132
+ result: Vintage logic analysis result.
133
+ path: Output CSV file path.
134
+
135
+ Example:
136
+ >>> export_bom_csv(result, "bom.csv")
137
+ """
138
+ path = Path(path)
139
+
140
+ with path.open("w", newline="") as csvfile:
141
+ writer = csv.writer(csvfile)
142
+
143
+ # Write header
144
+ writer.writerow(["part_number", "description", "quantity", "category", "notes"])
145
+
146
+ # Write BOM entries
147
+ for entry in result.bom:
148
+ writer.writerow(
149
+ [
150
+ entry.part_number,
151
+ entry.description,
152
+ entry.quantity,
153
+ entry.category,
154
+ entry.notes or "",
155
+ ]
156
+ )
157
+
158
+
159
+ def export_voltage_levels_csv(
160
+ result: VintageLogicAnalysisResult,
161
+ path: str | Path,
162
+ ) -> None:
163
+ """Export voltage levels to CSV.
164
+
165
+ Creates a CSV file with measured voltage levels for the detected logic family.
166
+
167
+ Args:
168
+ result: Vintage logic analysis result.
169
+ path: Output CSV file path.
170
+
171
+ Example:
172
+ >>> export_voltage_levels_csv(result, "voltage_levels.csv")
173
+ """
174
+ path = Path(path)
175
+
176
+ with path.open("w", newline="") as csvfile:
177
+ writer = csv.writer(csvfile)
178
+
179
+ # Write header
180
+ writer.writerow(["parameter", "voltage_v", "logic_family"])
181
+
182
+ # Write voltage levels
183
+ for param, value in result.voltage_levels.items():
184
+ writer.writerow([param, f"{value:.3f}", result.detected_family])
185
+
186
+
187
+ def export_all_vintage_logic_csv(
188
+ result: VintageLogicAnalysisResult,
189
+ output_dir: str | Path,
190
+ *,
191
+ prefix: str = "",
192
+ ) -> dict[str, Path]:
193
+ """Export all vintage logic analysis data to CSV files.
194
+
195
+ Convenience function that exports all data types to separate CSV files.
196
+
197
+ Args:
198
+ result: Vintage logic analysis result.
199
+ output_dir: Output directory for CSV files.
200
+ prefix: Optional prefix for file names.
201
+
202
+ Returns:
203
+ Dictionary mapping data type to output file path.
204
+
205
+ Example:
206
+ >>> paths = export_all_vintage_logic_csv(result, "./output", prefix="analysis_")
207
+ >>> print(paths["bom"]) # PosixPath('./output/analysis_bom.csv')
208
+ """
209
+ output_dir = Path(output_dir)
210
+ output_dir.mkdir(parents=True, exist_ok=True)
211
+
212
+ paths: dict[str, Path] = {}
213
+
214
+ # Export timing measurements
215
+ if result.timing_measurements:
216
+ timing_path = output_dir / f"{prefix}timing_measurements.csv"
217
+ export_timing_measurements_csv(result, timing_path)
218
+ paths["timing_measurements"] = timing_path
219
+
220
+ # Export IC identification
221
+ if result.identified_ics:
222
+ ic_path = output_dir / f"{prefix}ic_identification.csv"
223
+ export_ic_identification_csv(result, ic_path)
224
+ paths["ic_identification"] = ic_path
225
+
226
+ # Export BOM
227
+ if result.bom:
228
+ bom_path = output_dir / f"{prefix}bom.csv"
229
+ export_bom_csv(result, bom_path)
230
+ paths["bom"] = bom_path
231
+
232
+ # Export voltage levels
233
+ if result.voltage_levels:
234
+ voltage_path = output_dir / f"{prefix}voltage_levels.csv"
235
+ export_voltage_levels_csv(result, voltage_path)
236
+ paths["voltage_levels"] = voltage_path
237
+
238
+ return paths
239
+
240
+
241
+ __all__ = [
242
+ "export_all_vintage_logic_csv",
243
+ "export_bom_csv",
244
+ "export_ic_identification_csv",
245
+ "export_timing_measurements_csv",
246
+ "export_voltage_levels_csv",
247
+ ]
@@ -39,6 +39,7 @@ _LOADER_REGISTRY: dict[str, tuple[str, str]] = {
39
39
  "wav": ("oscura.loaders.wav", "load_wav"),
40
40
  "tdms": ("oscura.loaders.tdms", "load_tdms"),
41
41
  "touchstone": ("oscura.loaders.touchstone", "load_touchstone"),
42
+ "chipwhisperer": ("oscura.loaders.chipwhisperer", "load_chipwhisperer"),
42
43
  }
43
44
 
44
45
 
@@ -0,0 +1,393 @@
1
+ """ChipWhisperer trace loader.
2
+
3
+ This module loads power/EM traces from ChipWhisperer capture files (.npy, .trs).
4
+
5
+ ChipWhisperer is a widely-used open-source platform for side-channel analysis
6
+ and hardware security testing.
7
+
8
+ Example:
9
+ >>> from oscura.loaders.chipwhisperer import load_chipwhisperer
10
+ >>> traces, metadata = load_chipwhisperer("capture_data.npy")
11
+ >>> print(f"Loaded {len(traces)} traces")
12
+
13
+ References:
14
+ ChipWhisperer Project: https://github.com/newaetech/chipwhisperer
15
+ """
16
+
17
+ from __future__ import annotations
18
+
19
+ from dataclasses import dataclass
20
+ from pathlib import Path
21
+ from typing import TYPE_CHECKING, Any
22
+
23
+ import numpy as np
24
+
25
+ from oscura.core.exceptions import FormatError, LoaderError
26
+ from oscura.core.types import TraceMetadata, WaveformTrace
27
+
28
+ if TYPE_CHECKING:
29
+ from os import PathLike
30
+
31
+ from numpy.typing import NDArray
32
+
33
+ __all__ = [
34
+ "ChipWhispererTraceSet",
35
+ "load_chipwhisperer",
36
+ "load_chipwhisperer_npy",
37
+ "load_chipwhisperer_trs",
38
+ ]
39
+
40
+
41
+ @dataclass
42
+ class ChipWhispererTraceSet:
43
+ """ChipWhisperer trace set container.
44
+
45
+ Attributes:
46
+ traces: Power/EM traces (n_traces, n_samples).
47
+ plaintexts: Input plaintexts (n_traces, plaintext_size).
48
+ ciphertexts: Output ciphertexts (n_traces, ciphertext_size).
49
+ keys: Encryption keys if known (n_traces, key_size).
50
+ sample_rate: Sample rate in Hz.
51
+ metadata: Additional metadata.
52
+ """
53
+
54
+ traces: NDArray[np.floating[Any]]
55
+ plaintexts: NDArray[np.integer[Any]] | None = None
56
+ ciphertexts: NDArray[np.integer[Any]] | None = None
57
+ keys: NDArray[np.integer[Any]] | None = None
58
+ sample_rate: float = 1e6
59
+ metadata: dict[str, object] | None = None
60
+
61
+ @property
62
+ def n_traces(self) -> int:
63
+ """Number of traces."""
64
+ return int(self.traces.shape[0])
65
+
66
+ @property
67
+ def n_samples(self) -> int:
68
+ """Number of samples per trace."""
69
+ return int(self.traces.shape[1])
70
+
71
+
72
+ def load_chipwhisperer(
73
+ path: str | PathLike[str],
74
+ *,
75
+ sample_rate: float | None = None,
76
+ ) -> ChipWhispererTraceSet:
77
+ """Load ChipWhisperer traces from file.
78
+
79
+ Auto-detects file format (.npy, .trs) and delegates to appropriate loader.
80
+
81
+ Args:
82
+ path: Path to ChipWhisperer trace file.
83
+ sample_rate: Override sample rate (if not in file).
84
+
85
+ Returns:
86
+ ChipWhispererTraceSet with traces and metadata.
87
+
88
+ Raises:
89
+ LoaderError: If file cannot be loaded.
90
+ FormatError: If file format invalid.
91
+
92
+ Example:
93
+ >>> traceset = load_chipwhisperer("traces.npy")
94
+ >>> print(f"Loaded {traceset.n_traces} traces")
95
+ >>> print(f"Samples per trace: {traceset.n_samples}")
96
+ """
97
+ path = Path(path)
98
+
99
+ if not path.exists():
100
+ raise LoaderError("File not found", file_path=str(path))
101
+
102
+ ext = path.suffix.lower()
103
+
104
+ if ext == ".npy":
105
+ return load_chipwhisperer_npy(path, sample_rate=sample_rate)
106
+ elif ext == ".trs":
107
+ return load_chipwhisperer_trs(path, sample_rate=sample_rate)
108
+ else:
109
+ raise FormatError(
110
+ f"Unsupported ChipWhisperer format: {ext}",
111
+ file_path=str(path),
112
+ expected=".npy or .trs",
113
+ got=ext,
114
+ )
115
+
116
+
117
+ def load_chipwhisperer_npy(
118
+ path: str | PathLike[str],
119
+ *,
120
+ sample_rate: float | None = None,
121
+ ) -> ChipWhispererTraceSet:
122
+ """Load ChipWhisperer traces from .npy file.
123
+
124
+ ChipWhisperer often saves trace data as numpy .npy files with
125
+ associated metadata in .npy files (textin.npy, textout.npy, etc.).
126
+
127
+ Args:
128
+ path: Path to traces .npy file.
129
+ sample_rate: Override sample rate.
130
+
131
+ Returns:
132
+ ChipWhispererTraceSet with traces and metadata.
133
+
134
+ Raises:
135
+ LoaderError: If file cannot be loaded.
136
+
137
+ Example:
138
+ >>> traceset = load_chipwhisperer_npy("traces.npy")
139
+ >>> # Look for associated files
140
+ >>> if traceset.plaintexts is not None:
141
+ ... print("Plaintexts available")
142
+ """
143
+ path = Path(path)
144
+ base_path = path.parent
145
+ base_name = path.stem
146
+
147
+ try:
148
+ # Load main trace data
149
+ traces = np.load(path)
150
+
151
+ # Ensure 2D array (n_traces, n_samples)
152
+ if traces.ndim == 1:
153
+ traces = traces.reshape(1, -1)
154
+ elif traces.ndim > 2:
155
+ raise FormatError(
156
+ f"Expected 1D or 2D trace array, got {traces.ndim}D",
157
+ file_path=str(path),
158
+ )
159
+
160
+ except (OSError, ValueError) as e:
161
+ # Catch file I/O errors, but let FormatError propagate
162
+ raise LoaderError(
163
+ "Failed to load trace file",
164
+ file_path=str(path),
165
+ details=str(e),
166
+ ) from e
167
+
168
+ # Try to load associated files (common ChipWhisperer naming)
169
+ plaintexts = None
170
+ ciphertexts = None
171
+ keys = None
172
+
173
+ # Look for textin.npy (plaintexts)
174
+ textin_path = base_path / f"{base_name}_textin.npy"
175
+ if not textin_path.exists():
176
+ textin_path = base_path / "textin.npy"
177
+ if textin_path.exists():
178
+ try:
179
+ plaintexts = np.load(textin_path)
180
+ except Exception:
181
+ pass # Optional metadata file, silently ignore if missing or corrupt # Not critical
182
+
183
+ # Look for textout.npy (ciphertexts)
184
+ textout_path = base_path / f"{base_name}_textout.npy"
185
+ if not textout_path.exists():
186
+ textout_path = base_path / "textout.npy"
187
+ if textout_path.exists():
188
+ try:
189
+ ciphertexts = np.load(textout_path)
190
+ except Exception:
191
+ pass
192
+
193
+ # Look for keys.npy
194
+ keys_path = base_path / f"{base_name}_keys.npy"
195
+ if not keys_path.exists():
196
+ keys_path = base_path / "keys.npy"
197
+ if keys_path.exists():
198
+ try:
199
+ keys = np.load(keys_path)
200
+ except Exception:
201
+ pass # Optional metadata file, silently ignore if corrupt
202
+
203
+ # Use default sample rate if not specified
204
+ if sample_rate is None:
205
+ sample_rate = 1e6 # Default 1 MS/s
206
+
207
+ return ChipWhispererTraceSet(
208
+ traces=traces.astype(np.float64),
209
+ plaintexts=plaintexts.astype(np.uint8) if plaintexts is not None else None,
210
+ ciphertexts=ciphertexts.astype(np.uint8) if ciphertexts is not None else None,
211
+ keys=keys.astype(np.uint8) if keys is not None else None,
212
+ sample_rate=sample_rate,
213
+ metadata={
214
+ "source_file": str(path),
215
+ "format": "chipwhisperer_npy",
216
+ },
217
+ )
218
+
219
+
220
+ def load_chipwhisperer_trs(
221
+ path: str | PathLike[str],
222
+ *,
223
+ sample_rate: float | None = None,
224
+ ) -> ChipWhispererTraceSet:
225
+ """Load ChipWhisperer traces from Inspector .trs file.
226
+
227
+ The .trs format is used by Riscure Inspector and supported by ChipWhisperer.
228
+
229
+ TRS file structure:
230
+ - Header with metadata
231
+ - Trace data (interleaved with trace-specific data)
232
+
233
+ Args:
234
+ path: Path to .trs file.
235
+ sample_rate: Override sample rate.
236
+
237
+ Returns:
238
+ ChipWhispererTraceSet with traces and metadata.
239
+
240
+ Raises:
241
+ LoaderError: If file cannot be loaded.
242
+ FormatError: If TRS format invalid.
243
+
244
+ Example:
245
+ >>> traceset = load_chipwhisperer_trs("capture.trs")
246
+ >>> print(f"Loaded {traceset.n_traces} traces")
247
+
248
+ References:
249
+ Inspector Trace Set (.trs) file format specification
250
+ """
251
+ path = Path(path)
252
+
253
+ try:
254
+ with open(path, "rb") as f:
255
+ # Read TRS header
256
+ # Tag-Length-Value structure
257
+ tags = {}
258
+
259
+ while True:
260
+ tag_byte = f.read(1)
261
+ if not tag_byte or tag_byte == b"\x5f": # End of header
262
+ break
263
+
264
+ tag = tag_byte[0]
265
+ length = int.from_bytes(f.read(1), byteorder="little")
266
+
267
+ # Extended length for large values
268
+ if length == 0xFF:
269
+ length = int.from_bytes(f.read(4), byteorder="little")
270
+
271
+ value = f.read(length)
272
+ tags[tag] = value
273
+
274
+ # Parse critical tags
275
+ # 0x41: Number of traces
276
+ n_traces = int.from_bytes(tags.get(0x41, b"\x00\x00"), byteorder="little")
277
+
278
+ # 0x42: Number of samples per trace
279
+ n_samples = int.from_bytes(tags.get(0x42, b"\x00\x00"), byteorder="little")
280
+
281
+ # 0x43: Sample coding (1=byte, 2=short, 4=float)
282
+ sample_coding = tags.get(0x43, b"\x01")[0]
283
+
284
+ # 0x44: Data length (plaintext/ciphertext)
285
+ data_length = int.from_bytes(tags.get(0x44, b"\x00\x00"), byteorder="little")
286
+
287
+ if n_traces == 0 or n_samples == 0:
288
+ raise FormatError(
289
+ "Invalid TRS file: zero traces or samples",
290
+ file_path=str(path),
291
+ )
292
+
293
+ # Determine numpy dtype from sample coding
294
+ dtype: type[np.int8] | type[np.int16] | type[np.float32]
295
+ if sample_coding == 1:
296
+ dtype = np.int8
297
+ elif sample_coding == 2:
298
+ dtype = np.int16
299
+ elif sample_coding == 4:
300
+ dtype = np.float32
301
+ else:
302
+ raise FormatError(
303
+ f"Unsupported sample coding: {sample_coding}",
304
+ file_path=str(path),
305
+ )
306
+
307
+ # Read traces
308
+ traces = np.zeros((n_traces, n_samples), dtype=np.float64)
309
+ plaintexts = (
310
+ np.zeros((n_traces, data_length), dtype=np.uint8) if data_length > 0 else None
311
+ )
312
+ ciphertexts = None # Not typically in TRS files
313
+
314
+ for trace_idx in range(n_traces):
315
+ # Read trace-specific data (plaintext/key)
316
+ if data_length > 0:
317
+ trace_data = np.frombuffer(f.read(data_length), dtype=np.uint8)
318
+ if plaintexts is not None:
319
+ plaintexts[trace_idx] = trace_data
320
+
321
+ # Read trace samples
322
+ trace_samples = np.frombuffer(f.read(n_samples * dtype(0).itemsize), dtype=dtype)
323
+ traces[trace_idx] = trace_samples.astype(np.float64)
324
+
325
+ except OSError as e:
326
+ raise LoaderError(
327
+ "Failed to read TRS file",
328
+ file_path=str(path),
329
+ details=str(e),
330
+ ) from e
331
+ except Exception as e:
332
+ if isinstance(e, (LoaderError, FormatError)):
333
+ raise
334
+ raise LoaderError(
335
+ "Failed to parse TRS file",
336
+ file_path=str(path),
337
+ details=str(e),
338
+ ) from e
339
+
340
+ # Use default sample rate if not specified
341
+ if sample_rate is None:
342
+ sample_rate = 1e6 # Default 1 MS/s
343
+
344
+ return ChipWhispererTraceSet(
345
+ traces=traces,
346
+ plaintexts=plaintexts,
347
+ ciphertexts=ciphertexts,
348
+ keys=None,
349
+ sample_rate=sample_rate,
350
+ metadata={
351
+ "source_file": str(path),
352
+ "format": "chipwhisperer_trs",
353
+ "n_traces": n_traces,
354
+ "n_samples": n_samples,
355
+ "sample_coding": sample_coding,
356
+ },
357
+ )
358
+
359
+
360
+ def to_waveform_trace(
361
+ traceset: ChipWhispererTraceSet,
362
+ trace_index: int = 0,
363
+ ) -> WaveformTrace:
364
+ """Convert ChipWhisperer trace to WaveformTrace.
365
+
366
+ Args:
367
+ traceset: ChipWhisperer trace set.
368
+ trace_index: Index of trace to convert.
369
+
370
+ Returns:
371
+ WaveformTrace for single trace.
372
+
373
+ Raises:
374
+ IndexError: If trace_index out of range.
375
+
376
+ Example:
377
+ >>> traceset = load_chipwhisperer("traces.npy")
378
+ >>> trace = to_waveform_trace(traceset, trace_index=0)
379
+ >>> print(f"Sample rate: {trace.metadata.sample_rate} Hz")
380
+ """
381
+ if not 0 <= trace_index < traceset.n_traces:
382
+ raise IndexError(f"trace_index {trace_index} out of range [0, {traceset.n_traces})")
383
+
384
+ metadata = TraceMetadata(
385
+ sample_rate=traceset.sample_rate,
386
+ source_file=str(traceset.metadata.get("source_file", "")) if traceset.metadata else "",
387
+ channel_name=f"trace_{trace_index}",
388
+ )
389
+
390
+ return WaveformTrace(
391
+ data=traceset.traces[trace_index],
392
+ metadata=metadata,
393
+ )
@@ -169,7 +169,7 @@ def _parse_touchstone(
169
169
  if len(parts) % 2 == 1:
170
170
  break # New frequency line
171
171
  except (ValueError, IndexError):
172
- pass
172
+ pass # Skip lines that can't be parsed as numeric data
173
173
 
174
174
  for j in range(0, len(parts), 2):
175
175
  if j + 1 < len(parts):
@@ -138,6 +138,10 @@ from oscura.reporting.template_system import (
138
138
  list_templates,
139
139
  load_template,
140
140
  )
141
+ from oscura.reporting.vintage_logic_report import (
142
+ VintageLogicReport,
143
+ generate_vintage_logic_report,
144
+ )
141
145
 
142
146
  __all__ = [
143
147
  # Comprehensive Analysis Report API (CAR-001 through CAR-007)
@@ -187,6 +191,8 @@ __all__ = [
187
191
  "TemplateEngine",
188
192
  "TemplateSection",
189
193
  "UnsupportedFormatError",
194
+ # Vintage Logic Reporting
195
+ "VintageLogicReport",
190
196
  "VisualEmphasis",
191
197
  "aggregate_batch_measurements",
192
198
  "analyze",
@@ -235,6 +241,7 @@ __all__ = [
235
241
  "generate_presentation_from_report",
236
242
  "generate_report",
237
243
  "generate_summary",
244
+ "generate_vintage_logic_report",
238
245
  "get_available_analyses",
239
246
  "get_axis_scaling",
240
247
  "list_templates",