oscura 0.8.0__py3-none-any.whl → 0.11.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (161) hide show
  1. oscura/__init__.py +19 -19
  2. oscura/__main__.py +4 -0
  3. oscura/analyzers/__init__.py +2 -0
  4. oscura/analyzers/digital/extraction.py +2 -3
  5. oscura/analyzers/digital/quality.py +1 -1
  6. oscura/analyzers/digital/timing.py +1 -1
  7. oscura/analyzers/ml/signal_classifier.py +6 -0
  8. oscura/analyzers/patterns/__init__.py +66 -0
  9. oscura/analyzers/power/basic.py +3 -3
  10. oscura/analyzers/power/soa.py +1 -1
  11. oscura/analyzers/power/switching.py +3 -3
  12. oscura/analyzers/signal_classification.py +529 -0
  13. oscura/analyzers/signal_integrity/sparams.py +3 -3
  14. oscura/analyzers/statistics/basic.py +10 -7
  15. oscura/analyzers/validation.py +1 -1
  16. oscura/analyzers/waveform/measurements.py +200 -156
  17. oscura/analyzers/waveform/measurements_with_uncertainty.py +91 -35
  18. oscura/analyzers/waveform/spectral.py +182 -84
  19. oscura/api/dsl/commands.py +15 -6
  20. oscura/api/server/templates/base.html +137 -146
  21. oscura/api/server/templates/export.html +84 -110
  22. oscura/api/server/templates/home.html +248 -267
  23. oscura/api/server/templates/protocols.html +44 -48
  24. oscura/api/server/templates/reports.html +27 -35
  25. oscura/api/server/templates/session_detail.html +68 -78
  26. oscura/api/server/templates/sessions.html +62 -72
  27. oscura/api/server/templates/waveforms.html +54 -64
  28. oscura/automotive/__init__.py +1 -1
  29. oscura/automotive/can/session.py +1 -1
  30. oscura/automotive/dbc/generator.py +638 -23
  31. oscura/automotive/dtc/data.json +17 -102
  32. oscura/automotive/flexray/fibex.py +9 -1
  33. oscura/automotive/uds/decoder.py +99 -6
  34. oscura/cli/analyze.py +8 -2
  35. oscura/cli/batch.py +36 -5
  36. oscura/cli/characterize.py +18 -4
  37. oscura/cli/export.py +47 -5
  38. oscura/cli/main.py +2 -0
  39. oscura/cli/onboarding/wizard.py +10 -6
  40. oscura/cli/pipeline.py +585 -0
  41. oscura/cli/visualize.py +6 -4
  42. oscura/convenience.py +400 -32
  43. oscura/core/measurement_result.py +286 -0
  44. oscura/core/progress.py +1 -1
  45. oscura/core/schemas/device_mapping.json +2 -8
  46. oscura/core/schemas/packet_format.json +4 -24
  47. oscura/core/schemas/protocol_definition.json +2 -12
  48. oscura/core/types.py +232 -239
  49. oscura/correlation/multi_protocol.py +1 -1
  50. oscura/export/legacy/__init__.py +11 -0
  51. oscura/export/legacy/wav.py +75 -0
  52. oscura/exporters/__init__.py +19 -0
  53. oscura/exporters/wireshark.py +809 -0
  54. oscura/hardware/acquisition/file.py +5 -19
  55. oscura/hardware/acquisition/saleae.py +10 -10
  56. oscura/hardware/acquisition/socketcan.py +4 -6
  57. oscura/hardware/acquisition/synthetic.py +1 -5
  58. oscura/hardware/acquisition/visa.py +6 -6
  59. oscura/hardware/security/side_channel_detector.py +5 -508
  60. oscura/inference/message_format.py +686 -1
  61. oscura/jupyter/display.py +2 -2
  62. oscura/jupyter/magic.py +3 -3
  63. oscura/loaders/__init__.py +17 -12
  64. oscura/loaders/binary.py +1 -1
  65. oscura/loaders/chipwhisperer.py +1 -2
  66. oscura/loaders/configurable.py +1 -1
  67. oscura/loaders/csv_loader.py +2 -2
  68. oscura/loaders/hdf5_loader.py +1 -1
  69. oscura/loaders/lazy.py +6 -1
  70. oscura/loaders/mmap_loader.py +0 -1
  71. oscura/loaders/numpy_loader.py +8 -7
  72. oscura/loaders/preprocessing.py +3 -5
  73. oscura/loaders/rigol.py +21 -7
  74. oscura/loaders/sigrok.py +2 -5
  75. oscura/loaders/tdms.py +3 -2
  76. oscura/loaders/tektronix.py +38 -32
  77. oscura/loaders/tss.py +20 -27
  78. oscura/loaders/validation.py +17 -10
  79. oscura/loaders/vcd.py +13 -8
  80. oscura/loaders/wav.py +1 -6
  81. oscura/pipeline/__init__.py +76 -0
  82. oscura/pipeline/handlers/__init__.py +165 -0
  83. oscura/pipeline/handlers/analyzers.py +1045 -0
  84. oscura/pipeline/handlers/decoders.py +899 -0
  85. oscura/pipeline/handlers/exporters.py +1103 -0
  86. oscura/pipeline/handlers/filters.py +891 -0
  87. oscura/pipeline/handlers/loaders.py +640 -0
  88. oscura/pipeline/handlers/transforms.py +768 -0
  89. oscura/reporting/formatting/measurements.py +55 -14
  90. oscura/reporting/templates/enhanced/protocol_re.html +504 -503
  91. oscura/sessions/legacy.py +49 -1
  92. oscura/side_channel/__init__.py +38 -57
  93. oscura/utils/builders/signal_builder.py +5 -5
  94. oscura/utils/comparison/compare.py +7 -9
  95. oscura/utils/comparison/golden.py +1 -1
  96. oscura/utils/filtering/convenience.py +2 -2
  97. oscura/utils/math/arithmetic.py +38 -62
  98. oscura/utils/math/interpolation.py +20 -20
  99. oscura/utils/pipeline/__init__.py +4 -17
  100. oscura/utils/progressive.py +1 -4
  101. oscura/utils/triggering/edge.py +1 -1
  102. oscura/utils/triggering/pattern.py +2 -2
  103. oscura/utils/triggering/pulse.py +2 -2
  104. oscura/utils/triggering/window.py +3 -3
  105. oscura/validation/hil_testing.py +11 -11
  106. oscura/visualization/__init__.py +46 -284
  107. oscura/visualization/batch.py +72 -433
  108. oscura/visualization/plot.py +542 -53
  109. oscura/visualization/styles.py +184 -318
  110. oscura/workflows/batch/advanced.py +1 -1
  111. oscura/workflows/batch/aggregate.py +12 -9
  112. oscura/workflows/complete_re.py +251 -23
  113. oscura/workflows/digital.py +27 -4
  114. oscura/workflows/multi_trace.py +136 -17
  115. oscura/workflows/waveform.py +11 -6
  116. oscura-0.11.0.dist-info/METADATA +460 -0
  117. {oscura-0.8.0.dist-info → oscura-0.11.0.dist-info}/RECORD +120 -145
  118. oscura/side_channel/dpa.py +0 -1025
  119. oscura/utils/optimization/__init__.py +0 -19
  120. oscura/utils/optimization/parallel.py +0 -443
  121. oscura/utils/optimization/search.py +0 -532
  122. oscura/utils/pipeline/base.py +0 -338
  123. oscura/utils/pipeline/composition.py +0 -248
  124. oscura/utils/pipeline/parallel.py +0 -449
  125. oscura/utils/pipeline/pipeline.py +0 -375
  126. oscura/utils/search/__init__.py +0 -16
  127. oscura/utils/search/anomaly.py +0 -424
  128. oscura/utils/search/context.py +0 -294
  129. oscura/utils/search/pattern.py +0 -288
  130. oscura/utils/storage/__init__.py +0 -61
  131. oscura/utils/storage/database.py +0 -1166
  132. oscura/visualization/accessibility.py +0 -526
  133. oscura/visualization/annotations.py +0 -371
  134. oscura/visualization/axis_scaling.py +0 -305
  135. oscura/visualization/colors.py +0 -451
  136. oscura/visualization/digital.py +0 -436
  137. oscura/visualization/eye.py +0 -571
  138. oscura/visualization/histogram.py +0 -281
  139. oscura/visualization/interactive.py +0 -1035
  140. oscura/visualization/jitter.py +0 -1042
  141. oscura/visualization/keyboard.py +0 -394
  142. oscura/visualization/layout.py +0 -400
  143. oscura/visualization/optimization.py +0 -1079
  144. oscura/visualization/palettes.py +0 -446
  145. oscura/visualization/power.py +0 -508
  146. oscura/visualization/power_extended.py +0 -955
  147. oscura/visualization/presets.py +0 -469
  148. oscura/visualization/protocols.py +0 -1246
  149. oscura/visualization/render.py +0 -223
  150. oscura/visualization/rendering.py +0 -444
  151. oscura/visualization/reverse_engineering.py +0 -838
  152. oscura/visualization/signal_integrity.py +0 -989
  153. oscura/visualization/specialized.py +0 -643
  154. oscura/visualization/spectral.py +0 -1226
  155. oscura/visualization/thumbnails.py +0 -340
  156. oscura/visualization/time_axis.py +0 -351
  157. oscura/visualization/waveform.py +0 -454
  158. oscura-0.8.0.dist-info/METADATA +0 -661
  159. {oscura-0.8.0.dist-info → oscura-0.11.0.dist-info}/WHEEL +0 -0
  160. {oscura-0.8.0.dist-info → oscura-0.11.0.dist-info}/entry_points.txt +0 -0
  161. {oscura-0.8.0.dist-info → oscura-0.11.0.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,1103 @@
1
+ """Output exporter handlers for pipeline system.
2
+
3
+ This module provides handlers for exporting data to various file formats.
4
+ All handlers follow the standard signature: (inputs, params, step_name) -> outputs.
5
+
6
+ Available Handlers:
7
+ - output.json: Export to JSON file
8
+ - output.csv: Export to CSV file
9
+ - output.numpy: Save as NumPy .npz archive
10
+ - output.hdf5: Save to HDF5 file
11
+ - output.vcd: Export as VCD (Value Change Dump)
12
+ - output.pcap: Export as PCAP network capture
13
+ - output.wireshark: Export in Wireshark-compatible format
14
+ - output.report: Generate PDF/HTML report
15
+ - output.plot: Save matplotlib plot to file
16
+ - output.binary: Save raw binary data
17
+ - output.wav: Export as WAV audio file
18
+ - output.yaml: Export to YAML file
19
+
20
+ Example:
21
+ >>> # Pipeline YAML configuration
22
+ >>> steps:
23
+ >>> - name: save_results
24
+ >>> type: output.json
25
+ >>> params:
26
+ >>> path: results.json
27
+ >>> pretty: true
28
+ >>> inputs:
29
+ >>> data: analysis.measurements
30
+ """
31
+
32
+ from __future__ import annotations
33
+
34
+ import json
35
+ from pathlib import Path
36
+ from typing import Any
37
+
38
+ from oscura.core.config.pipeline import PipelineExecutionError
39
+ from oscura.pipeline.handlers import register_handler
40
+
41
+ # Lazy imports to avoid circular dependencies and reduce startup time
42
+ _numpy = None
43
+ _h5py = None
44
+ _pandas = None
45
+ _matplotlib = None
46
+
47
+
48
+ def _get_numpy() -> Any:
49
+ """Lazy import numpy."""
50
+ global _numpy
51
+ if _numpy is None:
52
+ import numpy as _numpy_module
53
+
54
+ _numpy = _numpy_module
55
+ return _numpy
56
+
57
+
58
+ def _get_h5py() -> Any:
59
+ """Lazy import h5py."""
60
+ global _h5py
61
+ if _h5py is None:
62
+ import h5py as _h5py_module
63
+
64
+ _h5py = _h5py_module
65
+ return _h5py
66
+
67
+
68
+ def _get_pandas() -> Any:
69
+ """Lazy import pandas."""
70
+ global _pandas
71
+ if _pandas is None:
72
+ import pandas as _pandas_module
73
+
74
+ _pandas = _pandas_module
75
+ return _pandas
76
+
77
+
78
+ def _get_matplotlib() -> Any:
79
+ """Lazy import matplotlib.pyplot."""
80
+ global _matplotlib
81
+ if _matplotlib is None:
82
+ import matplotlib.pyplot as _matplotlib_module
83
+
84
+ _matplotlib = _matplotlib_module
85
+ return _matplotlib
86
+
87
+
88
+ @register_handler("output.json")
89
+ def handle_output_json(
90
+ inputs: dict[str, Any], params: dict[str, Any], step_name: str
91
+ ) -> dict[str, Any]:
92
+ """Export data to JSON file.
93
+
94
+ Parameters:
95
+ path (str): Output file path
96
+ pretty (bool, optional): Pretty-print with indentation (default: True)
97
+ indent (int, optional): Indentation spaces (default: 2)
98
+
99
+ Inputs:
100
+ data: Data to export (dict, list, or JSON-serializable object)
101
+
102
+ Outputs:
103
+ path: Absolute file path
104
+ bytes_written: Number of bytes written
105
+ format: 'json'
106
+
107
+ Example:
108
+ >>> # Export analysis results
109
+ >>> steps:
110
+ >>> - name: save_results
111
+ >>> type: output.json
112
+ >>> params:
113
+ >>> path: results.json
114
+ >>> pretty: true
115
+ >>> inputs:
116
+ >>> data: analysis.measurements
117
+ """
118
+ data = inputs.get("data")
119
+ if data is None:
120
+ raise PipelineExecutionError(
121
+ "Missing required input 'data'. Connect 'data' input to previous step output.",
122
+ step_name=step_name,
123
+ )
124
+
125
+ path = params.get("path")
126
+ if not path:
127
+ raise PipelineExecutionError(
128
+ "Missing required parameter 'path'. Add 'path: output.json' to params.",
129
+ step_name=step_name,
130
+ )
131
+
132
+ pretty = params.get("pretty", True)
133
+ indent = params.get("indent", 2)
134
+
135
+ # Convert numpy arrays and other non-serializable types
136
+ def convert_to_serializable(obj: Any) -> Any:
137
+ """Convert objects to JSON-serializable types."""
138
+ np = _get_numpy()
139
+ if isinstance(obj, np.ndarray):
140
+ return obj.tolist()
141
+ if isinstance(obj, (np.integer, np.floating)):
142
+ return obj.item()
143
+ if isinstance(obj, dict):
144
+ return {k: convert_to_serializable(v) for k, v in obj.items()}
145
+ if isinstance(obj, (list, tuple)):
146
+ return [convert_to_serializable(v) for v in obj]
147
+ return obj
148
+
149
+ try:
150
+ data_serializable = convert_to_serializable(data)
151
+ output_path = Path(path).resolve()
152
+ output_path.parent.mkdir(parents=True, exist_ok=True)
153
+
154
+ with open(output_path, "w") as f:
155
+ if pretty:
156
+ json.dump(data_serializable, f, indent=indent)
157
+ else:
158
+ json.dump(data_serializable, f)
159
+
160
+ bytes_written = output_path.stat().st_size
161
+ except Exception as e:
162
+ raise PipelineExecutionError(f"Failed to write JSON file: {e}", step_name=step_name) from e
163
+
164
+ return {
165
+ "path": str(output_path),
166
+ "bytes_written": bytes_written,
167
+ "format": "json",
168
+ }
169
+
170
+
171
+ @register_handler("output.csv")
172
+ def handle_output_csv(
173
+ inputs: dict[str, Any], params: dict[str, Any], step_name: str
174
+ ) -> dict[str, Any]:
175
+ """Export data to CSV file.
176
+
177
+ Parameters:
178
+ path (str): Output file path
179
+ delimiter (str, optional): CSV delimiter (default: ',')
180
+ header (bool, optional): Include header row (default: True)
181
+ index (bool, optional): Include row index (default: False)
182
+
183
+ Inputs:
184
+ data: Data to export (DataFrame, dict, list of dicts, or 2D array)
185
+
186
+ Outputs:
187
+ path: Absolute file path
188
+ bytes_written: Number of bytes written
189
+ format: 'csv'
190
+ num_rows: Number of data rows written
191
+ num_columns: Number of columns
192
+
193
+ Example:
194
+ >>> # Export measurement data
195
+ >>> steps:
196
+ >>> - name: save_csv
197
+ >>> type: output.csv
198
+ >>> params:
199
+ >>> path: measurements.csv
200
+ >>> delimiter: ','
201
+ >>> inputs:
202
+ >>> data: analysis.results
203
+ """
204
+ data = inputs.get("data")
205
+ if data is None:
206
+ raise PipelineExecutionError("Missing required input 'data'", step_name=step_name)
207
+
208
+ path = params.get("path")
209
+ if not path:
210
+ raise PipelineExecutionError("Missing required parameter 'path'", step_name=step_name)
211
+
212
+ delimiter = params.get("delimiter", ",")
213
+ header = params.get("header", True)
214
+ index = params.get("index", False)
215
+
216
+ try:
217
+ pd = _get_pandas()
218
+ np = _get_numpy()
219
+
220
+ # Convert data to DataFrame
221
+ if isinstance(data, pd.DataFrame):
222
+ df = data
223
+ elif isinstance(data, (dict, list, np.ndarray)):
224
+ df = pd.DataFrame(data)
225
+ else:
226
+ raise ValueError(f"Unsupported data type: {type(data)}")
227
+
228
+ output_path = Path(path).resolve()
229
+ output_path.parent.mkdir(parents=True, exist_ok=True)
230
+
231
+ df.to_csv(output_path, sep=delimiter, header=header, index=index)
232
+
233
+ bytes_written = output_path.stat().st_size
234
+ num_rows, num_columns = df.shape
235
+ except Exception as e:
236
+ raise PipelineExecutionError(f"Failed to write CSV file: {e}", step_name=step_name) from e
237
+
238
+ return {
239
+ "path": str(output_path),
240
+ "bytes_written": bytes_written,
241
+ "format": "csv",
242
+ "num_rows": num_rows,
243
+ "num_columns": num_columns,
244
+ }
245
+
246
+
247
+ @register_handler("output.numpy")
248
+ def handle_output_numpy(
249
+ inputs: dict[str, Any], params: dict[str, Any], step_name: str
250
+ ) -> dict[str, Any]:
251
+ """Save data as NumPy .npz archive.
252
+
253
+ Parameters:
254
+ path (str): Output file path (.npz extension)
255
+ compressed (bool, optional): Use compression (default: True)
256
+
257
+ Inputs:
258
+ data: Data to save (dict of arrays or single array named 'data')
259
+
260
+ Outputs:
261
+ path: Absolute file path
262
+ bytes_written: Number of bytes written
263
+ format: 'npz'
264
+ arrays: List of array names saved
265
+
266
+ Example:
267
+ >>> # Save trace and analysis results
268
+ >>> steps:
269
+ >>> - name: save_numpy
270
+ >>> type: output.numpy
271
+ >>> params:
272
+ >>> path: output.npz
273
+ >>> compressed: true
274
+ >>> inputs:
275
+ >>> data: {signal: trace.data, spectrum: fft.power}
276
+ """
277
+ data = inputs.get("data")
278
+ if data is None:
279
+ raise PipelineExecutionError("Missing required input 'data'", step_name=step_name)
280
+
281
+ path = params.get("path")
282
+ if not path:
283
+ raise PipelineExecutionError("Missing required parameter 'path'", step_name=step_name)
284
+
285
+ compressed = params.get("compressed", True)
286
+
287
+ try:
288
+ np = _get_numpy()
289
+
290
+ output_path = Path(path).resolve()
291
+ output_path.parent.mkdir(parents=True, exist_ok=True)
292
+
293
+ # Handle different data types
294
+ if isinstance(data, dict):
295
+ arrays_dict = {k: np.asarray(v) for k, v in data.items()}
296
+ elif isinstance(data, np.ndarray):
297
+ arrays_dict = {"data": data}
298
+ else:
299
+ arrays_dict = {"data": np.asarray(data)}
300
+
301
+ # Save with or without compression
302
+ if compressed:
303
+ np.savez_compressed(output_path, **arrays_dict)
304
+ else:
305
+ np.savez(output_path, **arrays_dict)
306
+
307
+ bytes_written = output_path.stat().st_size
308
+ array_names = list(arrays_dict.keys())
309
+ except Exception as e:
310
+ raise PipelineExecutionError(f"Failed to write NumPy file: {e}", step_name=step_name) from e
311
+
312
+ return {
313
+ "path": str(output_path),
314
+ "bytes_written": bytes_written,
315
+ "format": "npz",
316
+ "arrays": array_names,
317
+ }
318
+
319
+
320
+ @register_handler("output.hdf5")
321
+ def handle_output_hdf5(
322
+ inputs: dict[str, Any], params: dict[str, Any], step_name: str
323
+ ) -> dict[str, Any]:
324
+ """Save data to HDF5 file.
325
+
326
+ Parameters:
327
+ path (str): Output file path (.h5 or .hdf5 extension)
328
+ dataset (str, optional): Dataset path within HDF5 (default: '/data')
329
+ compression (str, optional): Compression algorithm ('gzip', 'lzf', default: 'gzip')
330
+ mode (str, optional): File mode ('w' for overwrite, 'a' for append, default: 'w')
331
+
332
+ Inputs:
333
+ data: Data to save (array, dict of arrays, or trace object)
334
+
335
+ Outputs:
336
+ path: Absolute file path
337
+ bytes_written: Number of bytes written
338
+ format: 'hdf5'
339
+ datasets: List of dataset paths created
340
+
341
+ Example:
342
+ >>> # Save waveform to HDF5
343
+ >>> steps:
344
+ >>> - name: save_hdf5
345
+ >>> type: output.hdf5
346
+ >>> params:
347
+ >>> path: waveform.h5
348
+ >>> dataset: /trace/signal
349
+ >>> compression: gzip
350
+ >>> inputs:
351
+ >>> data: trace.data
352
+ """
353
+ data = inputs.get("data")
354
+ if data is None:
355
+ raise PipelineExecutionError("Missing required input 'data'", step_name=step_name)
356
+
357
+ path = params.get("path")
358
+ if not path:
359
+ raise PipelineExecutionError("Missing required parameter 'path'", step_name=step_name)
360
+
361
+ dataset_path = params.get("dataset", "/data")
362
+ compression = params.get("compression", "gzip")
363
+ mode = params.get("mode", "w")
364
+
365
+ try:
366
+ h5py = _get_h5py()
367
+ np = _get_numpy()
368
+
369
+ output_path = Path(path).resolve()
370
+ output_path.parent.mkdir(parents=True, exist_ok=True)
371
+
372
+ datasets_created = []
373
+
374
+ with h5py.File(output_path, mode) as f:
375
+ if isinstance(data, dict):
376
+ # Save multiple datasets
377
+ for key, value in data.items():
378
+ ds_path = f"{dataset_path}/{key}" if dataset_path != "/data" else f"/{key}"
379
+ arr = np.asarray(value)
380
+ f.create_dataset(ds_path, data=arr, compression=compression)
381
+ datasets_created.append(ds_path)
382
+ else:
383
+ # Save single dataset
384
+ arr = np.asarray(data)
385
+ f.create_dataset(dataset_path, data=arr, compression=compression)
386
+ datasets_created.append(dataset_path)
387
+
388
+ bytes_written = output_path.stat().st_size
389
+ except Exception as e:
390
+ raise PipelineExecutionError(f"Failed to write HDF5 file: {e}", step_name=step_name) from e
391
+
392
+ return {
393
+ "path": str(output_path),
394
+ "bytes_written": bytes_written,
395
+ "format": "hdf5",
396
+ "datasets": datasets_created,
397
+ }
398
+
399
+
400
+ @register_handler("output.vcd")
401
+ def handle_output_vcd(
402
+ inputs: dict[str, Any], params: dict[str, Any], step_name: str
403
+ ) -> dict[str, Any]:
404
+ """Export digital trace as VCD (Value Change Dump) file.
405
+
406
+ Parameters:
407
+ path (str): Output VCD file path
408
+ signal_name (str, optional): Signal name in VCD (default: 'signal')
409
+ timescale (str, optional): VCD timescale (default: '1ns')
410
+
411
+ Inputs:
412
+ data: Digital trace or dict with 'times' and 'values'
413
+
414
+ Outputs:
415
+ path: Absolute file path
416
+ bytes_written: Number of bytes written
417
+ format: 'vcd'
418
+ num_transitions: Number of value changes
419
+
420
+ Example:
421
+ >>> # Export decoded UART signal
422
+ >>> steps:
423
+ >>> - name: save_vcd
424
+ >>> type: output.vcd
425
+ >>> params:
426
+ >>> path: uart.vcd
427
+ >>> signal_name: uart_rx
428
+ >>> timescale: 1ns
429
+ >>> inputs:
430
+ >>> data: decoder.trace
431
+ """
432
+ data = inputs.get("data")
433
+ if data is None:
434
+ raise PipelineExecutionError("Missing required input 'data'", step_name=step_name)
435
+
436
+ path = params.get("path")
437
+ if not path:
438
+ raise PipelineExecutionError("Missing required parameter 'path'", step_name=step_name)
439
+
440
+ signal_name = params.get("signal_name", "signal")
441
+ timescale = params.get("timescale", "1ns")
442
+
443
+ try:
444
+ np = _get_numpy()
445
+
446
+ # Extract times and values
447
+ if hasattr(data, "times") and hasattr(data, "data"):
448
+ times = data.times
449
+ values = data.data
450
+ elif isinstance(data, dict) and "times" in data and "values" in data:
451
+ times = data["times"]
452
+ values = data["values"]
453
+ else:
454
+ raise ValueError("Data must have 'times' and 'values' or be a Trace object")
455
+
456
+ output_path = Path(path).resolve()
457
+ output_path.parent.mkdir(parents=True, exist_ok=True)
458
+
459
+ # Write VCD file
460
+ with open(output_path, "w") as f:
461
+ # Header
462
+ f.write("$version\n")
463
+ f.write(" Oscura VCD Exporter\n")
464
+ f.write("$end\n")
465
+ f.write(f"$timescale {timescale} $end\n")
466
+ f.write("$scope module top $end\n")
467
+ f.write(f"$var wire 1 ! {signal_name} $end\n")
468
+ f.write("$upscope $end\n")
469
+ f.write("$enddefinitions $end\n")
470
+ f.write("$dumpvars\n")
471
+
472
+ # Convert times to integers (VCD requires integer timestamps)
473
+ times_int = np.round(times * 1e9).astype(np.int64) # Convert to ns
474
+
475
+ # Write value changes
476
+ prev_value = None
477
+ num_transitions = 0
478
+ for time, value in zip(times_int, values, strict=True):
479
+ val_bit = "1" if value else "0"
480
+ if val_bit != prev_value:
481
+ f.write(f"#{time}\n")
482
+ f.write(f"{val_bit}!\n")
483
+ prev_value = val_bit
484
+ num_transitions += 1
485
+
486
+ f.write("$end\n")
487
+
488
+ bytes_written = output_path.stat().st_size
489
+ except Exception as e:
490
+ raise PipelineExecutionError(f"Failed to write VCD file: {e}", step_name=step_name) from e
491
+
492
+ return {
493
+ "path": str(output_path),
494
+ "bytes_written": bytes_written,
495
+ "format": "vcd",
496
+ "num_transitions": num_transitions,
497
+ }
498
+
499
+
500
+ @register_handler("output.pcap")
501
+ def handle_output_pcap(
502
+ inputs: dict[str, Any], params: dict[str, Any], step_name: str
503
+ ) -> dict[str, Any]:
504
+ """Export network packets as PCAP file.
505
+
506
+ Parameters:
507
+ path (str): Output PCAP file path
508
+ link_type (int, optional): PCAP link layer type (default: 1 for Ethernet)
509
+
510
+ Inputs:
511
+ data: List of packet bytes or packet objects with 'data' attribute
512
+
513
+ Outputs:
514
+ path: Absolute file path
515
+ bytes_written: Number of bytes written
516
+ format: 'pcap'
517
+ packet_count: Number of packets written
518
+
519
+ Example:
520
+ >>> # Export captured network packets
521
+ >>> steps:
522
+ >>> - name: save_pcap
523
+ >>> type: output.pcap
524
+ >>> params:
525
+ >>> path: capture.pcap
526
+ >>> link_type: 1
527
+ >>> inputs:
528
+ >>> data: sniffer.packets
529
+ """
530
+ data = inputs.get("data")
531
+ if data is None:
532
+ raise PipelineExecutionError("Missing required input 'data'", step_name=step_name)
533
+
534
+ path = params.get("path")
535
+ if not path:
536
+ raise PipelineExecutionError("Missing required parameter 'path'", step_name=step_name)
537
+
538
+ link_type = params.get("link_type", 1) # 1 = Ethernet
539
+
540
+ try:
541
+ import struct
542
+ import time as time_module
543
+
544
+ output_path = Path(path).resolve()
545
+ output_path.parent.mkdir(parents=True, exist_ok=True)
546
+
547
+ with open(output_path, "wb") as f:
548
+ # PCAP global header
549
+ f.write(struct.pack("<IHHIIII", 0xA1B2C3D4, 2, 4, 0, 0, 65535, link_type))
550
+
551
+ packet_count = 0
552
+ for packet in data:
553
+ # Extract packet data
554
+ if isinstance(packet, bytes):
555
+ pkt_data = packet
556
+ elif hasattr(packet, "data"):
557
+ pkt_data = packet.data
558
+ elif isinstance(packet, dict) and "data" in packet:
559
+ pkt_data = packet["data"]
560
+ else:
561
+ pkt_data = bytes(packet)
562
+
563
+ # PCAP packet header: timestamp (sec, usec), captured len, original len
564
+ timestamp = time_module.time()
565
+ ts_sec = int(timestamp)
566
+ ts_usec = int((timestamp - ts_sec) * 1e6)
567
+ pkt_len = len(pkt_data)
568
+
569
+ f.write(struct.pack("<IIII", ts_sec, ts_usec, pkt_len, pkt_len))
570
+ f.write(pkt_data)
571
+ packet_count += 1
572
+
573
+ bytes_written = output_path.stat().st_size
574
+ except Exception as e:
575
+ raise PipelineExecutionError(f"Failed to write PCAP file: {e}", step_name=step_name) from e
576
+
577
+ return {
578
+ "path": str(output_path),
579
+ "bytes_written": bytes_written,
580
+ "format": "pcap",
581
+ "packet_count": packet_count,
582
+ }
583
+
584
+
585
+ @register_handler("output.wireshark")
586
+ def handle_output_wireshark(
587
+ inputs: dict[str, Any], params: dict[str, Any], step_name: str
588
+ ) -> dict[str, Any]:
589
+ """Export data in Wireshark-compatible PCAPNG format.
590
+
591
+ Parameters:
592
+ path (str): Output PCAPNG file path
593
+ interface_name (str, optional): Interface name (default: 'oscura0')
594
+ comment (str, optional): File comment
595
+
596
+ Inputs:
597
+ data: List of packet bytes or packet objects
598
+
599
+ Outputs:
600
+ path: Absolute file path
601
+ bytes_written: Number of bytes written
602
+ format: 'pcapng'
603
+ packet_count: Number of packets written
604
+
605
+ Example:
606
+ >>> # Export for Wireshark analysis
607
+ >>> steps:
608
+ >>> - name: save_wireshark
609
+ >>> type: output.wireshark
610
+ >>> params:
611
+ >>> path: capture.pcapng
612
+ >>> interface_name: can0
613
+ >>> comment: CAN bus capture
614
+ >>> inputs:
615
+ >>> data: decoder.packets
616
+ """
617
+ data = inputs.get("data")
618
+ if data is None:
619
+ raise PipelineExecutionError("Missing required input 'data'", step_name=step_name)
620
+
621
+ path = params.get("path")
622
+ if not path:
623
+ raise PipelineExecutionError("Missing required parameter 'path'", step_name=step_name)
624
+
625
+ # Note: interface_name and comment are documented but not yet used in PCAP implementation
626
+ # A full PCAPNG implementation would include these in interface description blocks
627
+
628
+ try:
629
+ # For now, use PCAP format (Wireshark can read both)
630
+ # Full PCAPNG implementation would be more complex
631
+ pcap_result = handle_output_pcap(inputs, {"path": path, "link_type": 1}, step_name)
632
+ # Modify the format field to indicate PCAPNG
633
+ result: dict[str, Any] = dict(pcap_result)
634
+ result["format"] = "pcapng"
635
+
636
+ # Note: This is a simplified implementation using PCAP format
637
+ # A full PCAPNG implementation would include interface description blocks
638
+ except Exception as e:
639
+ raise PipelineExecutionError(
640
+ f"Failed to write Wireshark file: {e}", step_name=step_name
641
+ ) from e
642
+
643
+ return result
644
+
645
+
646
+ @register_handler("output.report")
647
+ def handle_output_report(
648
+ inputs: dict[str, Any], params: dict[str, Any], step_name: str
649
+ ) -> dict[str, Any]:
650
+ """Generate PDF or HTML report from analysis results.
651
+
652
+ Parameters:
653
+ path (str): Output file path (.html or .pdf)
654
+ title (str, optional): Report title (default: 'Analysis Report')
655
+ format (str, optional): Output format ('html' or 'pdf', auto-detected from path)
656
+ include_plots (bool, optional): Include matplotlib figures (default: True)
657
+
658
+ Inputs:
659
+ data: Report data (dict with sections, results, plots)
660
+
661
+ Outputs:
662
+ path: Absolute file path
663
+ bytes_written: Number of bytes written
664
+ format: 'html' or 'pdf'
665
+ sections: Number of sections in report
666
+
667
+ Example:
668
+ >>> # Generate HTML report
669
+ >>> steps:
670
+ >>> - name: create_report
671
+ >>> type: output.report
672
+ >>> params:
673
+ >>> path: analysis_report.html
674
+ >>> title: UART Protocol Analysis
675
+ >>> include_plots: true
676
+ >>> inputs:
677
+ >>> data: {summary: summary.stats, plots: plotter.figures}
678
+ """
679
+ data = inputs.get("data")
680
+ if data is None:
681
+ raise PipelineExecutionError("Missing required input 'data'", step_name=step_name)
682
+
683
+ path = params.get("path")
684
+ if not path:
685
+ raise PipelineExecutionError("Missing required parameter 'path'", step_name=step_name)
686
+
687
+ title = params.get("title", "Analysis Report")
688
+ fmt = params.get("format")
689
+ # Note: include_plots is documented but not yet used in HTML report generation
690
+ # Future enhancement: embed matplotlib figures as base64 images
691
+
692
+ try:
693
+ output_path = Path(path).resolve()
694
+ output_path.parent.mkdir(parents=True, exist_ok=True)
695
+
696
+ # Auto-detect format from extension
697
+ if fmt is None:
698
+ fmt = "pdf" if output_path.suffix.lower() == ".pdf" else "html"
699
+
700
+ if fmt == "html":
701
+ # Generate HTML report
702
+ html_content = f"""<!DOCTYPE html>
703
+ <html>
704
+ <head>
705
+ <title>{title}</title>
706
+ <style>
707
+ body {{ font-family: Arial, sans-serif; margin: 40px; }}
708
+ h1 {{ color: #333; }}
709
+ h2 {{ color: #666; margin-top: 30px; }}
710
+ table {{ border-collapse: collapse; width: 100%; margin: 20px 0; }}
711
+ th, td {{ border: 1px solid #ddd; padding: 8px; text-align: left; }}
712
+ th {{ background-color: #f2f2f2; }}
713
+ .section {{ margin-bottom: 30px; }}
714
+ </style>
715
+ </head>
716
+ <body>
717
+ <h1>{title}</h1>
718
+ """
719
+
720
+ sections = 0
721
+ if isinstance(data, dict):
722
+ for section_name, section_data in data.items():
723
+ html_content += f'<div class="section"><h2>{section_name}</h2>\n'
724
+ if isinstance(section_data, dict):
725
+ html_content += "<table><tr><th>Key</th><th>Value</th></tr>\n"
726
+ for key, value in section_data.items():
727
+ html_content += f"<tr><td>{key}</td><td>{value}</td></tr>\n"
728
+ html_content += "</table>\n"
729
+ else:
730
+ html_content += f"<p>{section_data}</p>\n"
731
+ html_content += "</div>\n"
732
+ sections += 1
733
+ else:
734
+ html_content += f"<p>{data}</p>\n"
735
+ sections = 1
736
+
737
+ html_content += "</body></html>"
738
+
739
+ with open(output_path, "w") as f:
740
+ f.write(html_content)
741
+
742
+ bytes_written = output_path.stat().st_size
743
+ else:
744
+ # PDF generation would require reportlab or weasyprint
745
+ raise NotImplementedError("PDF report generation requires additional dependencies")
746
+
747
+ except Exception as e:
748
+ raise PipelineExecutionError(f"Failed to generate report: {e}", step_name=step_name) from e
749
+
750
+ return {
751
+ "path": str(output_path),
752
+ "bytes_written": bytes_written,
753
+ "format": fmt,
754
+ "sections": sections,
755
+ }
756
+
757
+
758
+ @register_handler("output.plot")
759
+ def handle_output_plot(
760
+ inputs: dict[str, Any], params: dict[str, Any], step_name: str
761
+ ) -> dict[str, Any]:
762
+ """Save matplotlib plot to file.
763
+
764
+ Parameters:
765
+ path (str): Output file path (.png, .pdf, .svg, etc.)
766
+ dpi (int, optional): Resolution in dots per inch (default: 300)
767
+ format (str, optional): Output format (auto-detected from path extension)
768
+ transparent (bool, optional): Transparent background (default: False)
769
+
770
+ Inputs:
771
+ data: Matplotlib figure object or data to plot
772
+
773
+ Outputs:
774
+ path: Absolute file path
775
+ bytes_written: Number of bytes written
776
+ format: Image format
777
+ dpi: Resolution used
778
+
779
+ Example:
780
+ >>> # Save analysis plot
781
+ >>> steps:
782
+ >>> - name: save_plot
783
+ >>> type: output.plot
784
+ >>> params:
785
+ >>> path: spectrum.png
786
+ >>> dpi: 300
787
+ >>> transparent: false
788
+ >>> inputs:
789
+ >>> data: plotter.figure
790
+ """
791
+ data = inputs.get("data")
792
+ if data is None:
793
+ raise PipelineExecutionError("Missing required input 'data'", step_name=step_name)
794
+
795
+ path = params.get("path")
796
+ if not path:
797
+ raise PipelineExecutionError("Missing required parameter 'path'", step_name=step_name)
798
+
799
+ dpi = params.get("dpi", 300)
800
+ fmt = params.get("format")
801
+ transparent = params.get("transparent", False)
802
+
803
+ try:
804
+ plt = _get_matplotlib()
805
+
806
+ output_path = Path(path).resolve()
807
+ output_path.parent.mkdir(parents=True, exist_ok=True)
808
+
809
+ # Auto-detect format
810
+ if fmt is None:
811
+ fmt = output_path.suffix.lstrip(".")
812
+
813
+ # Handle figure object or create plot from data
814
+ if hasattr(data, "savefig"):
815
+ # It's a matplotlib figure
816
+ data.savefig(
817
+ output_path, dpi=dpi, format=fmt, transparent=transparent, bbox_inches="tight"
818
+ )
819
+ else:
820
+ # Create simple plot from data
821
+ fig = plt.figure(figsize=(10, 6))
822
+ if hasattr(data, "__iter__") and not isinstance(data, (str, dict)):
823
+ plt.plot(data)
824
+ else:
825
+ plt.text(0.5, 0.5, str(data), ha="center", va="center")
826
+ fig.savefig(
827
+ output_path, dpi=dpi, format=fmt, transparent=transparent, bbox_inches="tight"
828
+ )
829
+ plt.close(fig)
830
+
831
+ bytes_written = output_path.stat().st_size
832
+ except Exception as e:
833
+ raise PipelineExecutionError(f"Failed to save plot: {e}", step_name=step_name) from e
834
+
835
+ return {
836
+ "path": str(output_path),
837
+ "bytes_written": bytes_written,
838
+ "format": fmt,
839
+ "dpi": dpi,
840
+ }
841
+
842
+
843
+ @register_handler("output.binary")
844
+ def handle_output_binary(
845
+ inputs: dict[str, Any], params: dict[str, Any], step_name: str
846
+ ) -> dict[str, Any]:
847
+ """Save raw binary data to file.
848
+
849
+ Parameters:
850
+ path (str): Output file path
851
+ dtype (str, optional): NumPy dtype for conversion (default: preserve input type)
852
+ byte_order (str, optional): Byte order ('little', 'big', 'native', default: 'native')
853
+
854
+ Inputs:
855
+ data: Raw data (bytes, array, or list of numbers)
856
+
857
+ Outputs:
858
+ path: Absolute file path
859
+ bytes_written: Number of bytes written
860
+ format: 'binary'
861
+ dtype: Data type used
862
+
863
+ Example:
864
+ >>> # Save raw ADC samples
865
+ >>> steps:
866
+ >>> - name: save_raw
867
+ >>> type: output.binary
868
+ >>> params:
869
+ >>> path: samples.bin
870
+ >>> dtype: int16
871
+ >>> byte_order: little
872
+ >>> inputs:
873
+ >>> data: adc.samples
874
+ """
875
+ data = inputs.get("data")
876
+ if data is None:
877
+ raise PipelineExecutionError("Missing required input 'data'", step_name=step_name)
878
+
879
+ path = params.get("path")
880
+ if not path:
881
+ raise PipelineExecutionError("Missing required parameter 'path'", step_name=step_name)
882
+
883
+ dtype = params.get("dtype")
884
+ byte_order = params.get("byte_order", "native")
885
+
886
+ try:
887
+ np = _get_numpy()
888
+
889
+ output_path = Path(path).resolve()
890
+ output_path.parent.mkdir(parents=True, exist_ok=True)
891
+
892
+ # Convert to numpy array
893
+ if isinstance(data, bytes):
894
+ binary_data = data
895
+ actual_dtype = "bytes"
896
+ else:
897
+ arr = np.asarray(data, dtype=dtype)
898
+ actual_dtype = str(arr.dtype)
899
+
900
+ # Handle byte order
901
+ if byte_order == "little":
902
+ arr = arr.astype(arr.dtype.newbyteorder("<"))
903
+ elif byte_order == "big":
904
+ arr = arr.astype(arr.dtype.newbyteorder(">"))
905
+
906
+ binary_data = arr.tobytes()
907
+
908
+ # Write binary data
909
+ with open(output_path, "wb") as f:
910
+ f.write(binary_data)
911
+
912
+ bytes_written = output_path.stat().st_size
913
+ except Exception as e:
914
+ raise PipelineExecutionError(
915
+ f"Failed to write binary file: {e}", step_name=step_name
916
+ ) from e
917
+
918
+ return {
919
+ "path": str(output_path),
920
+ "bytes_written": bytes_written,
921
+ "format": "binary",
922
+ "dtype": actual_dtype,
923
+ }
924
+
925
+
926
+ @register_handler("output.wav")
927
+ def handle_output_wav(
928
+ inputs: dict[str, Any], params: dict[str, Any], step_name: str
929
+ ) -> dict[str, Any]:
930
+ """Export data as WAV audio file.
931
+
932
+ Parameters:
933
+ path (str): Output WAV file path
934
+ sample_rate (int): Sample rate in Hz (required)
935
+ bit_depth (int, optional): Bit depth (8, 16, 24, 32, default: 16)
936
+ normalize (bool, optional): Normalize to full scale (default: True)
937
+
938
+ Inputs:
939
+ data: Audio samples (array or trace object)
940
+
941
+ Outputs:
942
+ path: Absolute file path
943
+ bytes_written: Number of bytes written
944
+ format: 'wav'
945
+ sample_rate: Sample rate used
946
+ num_samples: Number of samples written
947
+
948
+ Example:
949
+ >>> # Export decoded audio
950
+ >>> steps:
951
+ >>> - name: save_wav
952
+ >>> type: output.wav
953
+ >>> params:
954
+ >>> path: decoded.wav
955
+ >>> sample_rate: 48000
956
+ >>> bit_depth: 16
957
+ >>> normalize: true
958
+ >>> inputs:
959
+ >>> data: decoder.audio
960
+ """
961
+ data = inputs.get("data")
962
+ if data is None:
963
+ raise PipelineExecutionError("Missing required input 'data'", step_name=step_name)
964
+
965
+ path = params.get("path")
966
+ if not path:
967
+ raise PipelineExecutionError("Missing required parameter 'path'", step_name=step_name)
968
+
969
+ sample_rate = params.get("sample_rate")
970
+ if not sample_rate:
971
+ raise PipelineExecutionError(
972
+ "Missing required parameter 'sample_rate'. Add 'sample_rate: 48000' to params.",
973
+ step_name=step_name,
974
+ )
975
+
976
+ bit_depth = params.get("bit_depth", 16)
977
+ normalize = params.get("normalize", True)
978
+
979
+ try:
980
+ import wave
981
+
982
+ np = _get_numpy()
983
+
984
+ # Extract audio data
985
+ if hasattr(data, "data"):
986
+ audio_data = np.asarray(data.data)
987
+ else:
988
+ audio_data = np.asarray(data)
989
+
990
+ # Normalize if requested
991
+ if normalize:
992
+ max_val = np.abs(audio_data).max()
993
+ if max_val > 0:
994
+ audio_data = audio_data / max_val
995
+
996
+ # Convert to integer format
997
+ if bit_depth == 8:
998
+ audio_int = (audio_data * 127 + 128).astype(np.uint8)
999
+ elif bit_depth == 16:
1000
+ audio_int = (audio_data * 32767).astype(np.int16)
1001
+ elif bit_depth == 32:
1002
+ audio_int = (audio_data * 2147483647).astype(np.int32)
1003
+ else:
1004
+ raise ValueError(f"Unsupported bit depth: {bit_depth}")
1005
+
1006
+ output_path = Path(path).resolve()
1007
+ output_path.parent.mkdir(parents=True, exist_ok=True)
1008
+
1009
+ # Write WAV file
1010
+ with wave.open(str(output_path), "wb") as wav_file:
1011
+ wav_file.setnchannels(1) # Mono
1012
+ wav_file.setsampwidth(bit_depth // 8)
1013
+ wav_file.setframerate(sample_rate)
1014
+ wav_file.writeframes(audio_int.tobytes())
1015
+
1016
+ bytes_written = output_path.stat().st_size
1017
+ num_samples = len(audio_data)
1018
+ except Exception as e:
1019
+ raise PipelineExecutionError(f"Failed to write WAV file: {e}", step_name=step_name) from e
1020
+
1021
+ return {
1022
+ "path": str(output_path),
1023
+ "bytes_written": bytes_written,
1024
+ "format": "wav",
1025
+ "sample_rate": sample_rate,
1026
+ "num_samples": num_samples,
1027
+ }
1028
+
1029
+
1030
+ @register_handler("output.yaml")
1031
+ def handle_output_yaml(
1032
+ inputs: dict[str, Any], params: dict[str, Any], step_name: str
1033
+ ) -> dict[str, Any]:
1034
+ """Export data to YAML file.
1035
+
1036
+ Parameters:
1037
+ path (str): Output YAML file path
1038
+ default_flow_style (bool, optional): Use flow style (default: False)
1039
+
1040
+ Inputs:
1041
+ data: Data to export (dict, list, or YAML-serializable object)
1042
+
1043
+ Outputs:
1044
+ path: Absolute file path
1045
+ bytes_written: Number of bytes written
1046
+ format: 'yaml'
1047
+
1048
+ Example:
1049
+ >>> # Export configuration
1050
+ >>> steps:
1051
+ >>> - name: save_config
1052
+ >>> type: output.yaml
1053
+ >>> params:
1054
+ >>> path: config.yaml
1055
+ >>> default_flow_style: false
1056
+ >>> inputs:
1057
+ >>> data: analysis.config
1058
+ """
1059
+ data = inputs.get("data")
1060
+ if data is None:
1061
+ raise PipelineExecutionError("Missing required input 'data'", step_name=step_name)
1062
+
1063
+ path = params.get("path")
1064
+ if not path:
1065
+ raise PipelineExecutionError("Missing required parameter 'path'", step_name=step_name)
1066
+
1067
+ default_flow_style = params.get("default_flow_style", False)
1068
+
1069
+ try:
1070
+ import yaml
1071
+
1072
+ np = _get_numpy()
1073
+
1074
+ # Convert numpy types to native Python types
1075
+ def convert_to_native(obj: Any) -> Any:
1076
+ """Convert objects to YAML-serializable types."""
1077
+ if isinstance(obj, np.ndarray):
1078
+ return obj.tolist()
1079
+ if isinstance(obj, (np.integer, np.floating)):
1080
+ return obj.item()
1081
+ if isinstance(obj, dict):
1082
+ return {k: convert_to_native(v) for k, v in obj.items()}
1083
+ if isinstance(obj, (list, tuple)):
1084
+ return [convert_to_native(v) for v in obj]
1085
+ return obj
1086
+
1087
+ data_native = convert_to_native(data)
1088
+
1089
+ output_path = Path(path).resolve()
1090
+ output_path.parent.mkdir(parents=True, exist_ok=True)
1091
+
1092
+ with open(output_path, "w") as f:
1093
+ yaml.dump(data_native, f, default_flow_style=default_flow_style)
1094
+
1095
+ bytes_written = output_path.stat().st_size
1096
+ except Exception as e:
1097
+ raise PipelineExecutionError(f"Failed to write YAML file: {e}", step_name=step_name) from e
1098
+
1099
+ return {
1100
+ "path": str(output_path),
1101
+ "bytes_written": bytes_written,
1102
+ "format": "yaml",
1103
+ }