oscura 0.8.0__py3-none-any.whl → 0.10.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (151) hide show
  1. oscura/__init__.py +19 -19
  2. oscura/analyzers/__init__.py +2 -0
  3. oscura/analyzers/digital/extraction.py +2 -3
  4. oscura/analyzers/digital/quality.py +1 -1
  5. oscura/analyzers/digital/timing.py +1 -1
  6. oscura/analyzers/patterns/__init__.py +66 -0
  7. oscura/analyzers/power/basic.py +3 -3
  8. oscura/analyzers/power/soa.py +1 -1
  9. oscura/analyzers/power/switching.py +3 -3
  10. oscura/analyzers/signal_classification.py +529 -0
  11. oscura/analyzers/signal_integrity/sparams.py +3 -3
  12. oscura/analyzers/statistics/basic.py +10 -7
  13. oscura/analyzers/validation.py +1 -1
  14. oscura/analyzers/waveform/measurements.py +200 -156
  15. oscura/analyzers/waveform/measurements_with_uncertainty.py +91 -35
  16. oscura/analyzers/waveform/spectral.py +164 -73
  17. oscura/api/dsl/commands.py +15 -6
  18. oscura/api/server/templates/base.html +137 -146
  19. oscura/api/server/templates/export.html +84 -110
  20. oscura/api/server/templates/home.html +248 -267
  21. oscura/api/server/templates/protocols.html +44 -48
  22. oscura/api/server/templates/reports.html +27 -35
  23. oscura/api/server/templates/session_detail.html +68 -78
  24. oscura/api/server/templates/sessions.html +62 -72
  25. oscura/api/server/templates/waveforms.html +54 -64
  26. oscura/automotive/__init__.py +1 -1
  27. oscura/automotive/can/session.py +1 -1
  28. oscura/automotive/dbc/generator.py +638 -23
  29. oscura/automotive/uds/decoder.py +99 -6
  30. oscura/cli/analyze.py +8 -2
  31. oscura/cli/batch.py +36 -5
  32. oscura/cli/characterize.py +18 -4
  33. oscura/cli/export.py +47 -5
  34. oscura/cli/main.py +2 -0
  35. oscura/cli/onboarding/wizard.py +10 -6
  36. oscura/cli/pipeline.py +585 -0
  37. oscura/cli/visualize.py +6 -4
  38. oscura/convenience.py +400 -32
  39. oscura/core/measurement_result.py +286 -0
  40. oscura/core/progress.py +1 -1
  41. oscura/core/types.py +232 -239
  42. oscura/correlation/multi_protocol.py +1 -1
  43. oscura/export/legacy/__init__.py +11 -0
  44. oscura/export/legacy/wav.py +75 -0
  45. oscura/exporters/__init__.py +19 -0
  46. oscura/exporters/wireshark.py +809 -0
  47. oscura/hardware/acquisition/file.py +5 -19
  48. oscura/hardware/acquisition/saleae.py +10 -10
  49. oscura/hardware/acquisition/socketcan.py +4 -6
  50. oscura/hardware/acquisition/synthetic.py +1 -5
  51. oscura/hardware/acquisition/visa.py +6 -6
  52. oscura/hardware/security/side_channel_detector.py +5 -508
  53. oscura/inference/message_format.py +686 -1
  54. oscura/jupyter/display.py +2 -2
  55. oscura/jupyter/magic.py +3 -3
  56. oscura/loaders/__init__.py +17 -12
  57. oscura/loaders/binary.py +1 -1
  58. oscura/loaders/chipwhisperer.py +1 -2
  59. oscura/loaders/configurable.py +1 -1
  60. oscura/loaders/csv_loader.py +2 -2
  61. oscura/loaders/hdf5_loader.py +1 -1
  62. oscura/loaders/lazy.py +6 -1
  63. oscura/loaders/mmap_loader.py +0 -1
  64. oscura/loaders/numpy_loader.py +8 -7
  65. oscura/loaders/preprocessing.py +3 -5
  66. oscura/loaders/rigol.py +21 -7
  67. oscura/loaders/sigrok.py +2 -5
  68. oscura/loaders/tdms.py +3 -2
  69. oscura/loaders/tektronix.py +38 -32
  70. oscura/loaders/tss.py +20 -27
  71. oscura/loaders/vcd.py +13 -8
  72. oscura/loaders/wav.py +1 -6
  73. oscura/pipeline/__init__.py +76 -0
  74. oscura/pipeline/handlers/__init__.py +165 -0
  75. oscura/pipeline/handlers/analyzers.py +1045 -0
  76. oscura/pipeline/handlers/decoders.py +899 -0
  77. oscura/pipeline/handlers/exporters.py +1103 -0
  78. oscura/pipeline/handlers/filters.py +891 -0
  79. oscura/pipeline/handlers/loaders.py +640 -0
  80. oscura/pipeline/handlers/transforms.py +768 -0
  81. oscura/reporting/formatting/measurements.py +55 -14
  82. oscura/reporting/templates/enhanced/protocol_re.html +504 -503
  83. oscura/side_channel/__init__.py +38 -57
  84. oscura/utils/builders/signal_builder.py +5 -5
  85. oscura/utils/comparison/compare.py +7 -9
  86. oscura/utils/comparison/golden.py +1 -1
  87. oscura/utils/filtering/convenience.py +2 -2
  88. oscura/utils/math/arithmetic.py +38 -62
  89. oscura/utils/math/interpolation.py +20 -20
  90. oscura/utils/pipeline/__init__.py +4 -17
  91. oscura/utils/progressive.py +1 -4
  92. oscura/utils/triggering/edge.py +1 -1
  93. oscura/utils/triggering/pattern.py +2 -2
  94. oscura/utils/triggering/pulse.py +2 -2
  95. oscura/utils/triggering/window.py +3 -3
  96. oscura/validation/hil_testing.py +11 -11
  97. oscura/visualization/__init__.py +46 -284
  98. oscura/visualization/batch.py +72 -433
  99. oscura/visualization/plot.py +542 -53
  100. oscura/visualization/styles.py +184 -318
  101. oscura/workflows/batch/advanced.py +1 -1
  102. oscura/workflows/batch/aggregate.py +7 -8
  103. oscura/workflows/complete_re.py +251 -23
  104. oscura/workflows/digital.py +27 -4
  105. oscura/workflows/multi_trace.py +136 -17
  106. oscura/workflows/waveform.py +11 -6
  107. {oscura-0.8.0.dist-info → oscura-0.10.0.dist-info}/METADATA +59 -79
  108. {oscura-0.8.0.dist-info → oscura-0.10.0.dist-info}/RECORD +111 -136
  109. oscura/side_channel/dpa.py +0 -1025
  110. oscura/utils/optimization/__init__.py +0 -19
  111. oscura/utils/optimization/parallel.py +0 -443
  112. oscura/utils/optimization/search.py +0 -532
  113. oscura/utils/pipeline/base.py +0 -338
  114. oscura/utils/pipeline/composition.py +0 -248
  115. oscura/utils/pipeline/parallel.py +0 -449
  116. oscura/utils/pipeline/pipeline.py +0 -375
  117. oscura/utils/search/__init__.py +0 -16
  118. oscura/utils/search/anomaly.py +0 -424
  119. oscura/utils/search/context.py +0 -294
  120. oscura/utils/search/pattern.py +0 -288
  121. oscura/utils/storage/__init__.py +0 -61
  122. oscura/utils/storage/database.py +0 -1166
  123. oscura/visualization/accessibility.py +0 -526
  124. oscura/visualization/annotations.py +0 -371
  125. oscura/visualization/axis_scaling.py +0 -305
  126. oscura/visualization/colors.py +0 -451
  127. oscura/visualization/digital.py +0 -436
  128. oscura/visualization/eye.py +0 -571
  129. oscura/visualization/histogram.py +0 -281
  130. oscura/visualization/interactive.py +0 -1035
  131. oscura/visualization/jitter.py +0 -1042
  132. oscura/visualization/keyboard.py +0 -394
  133. oscura/visualization/layout.py +0 -400
  134. oscura/visualization/optimization.py +0 -1079
  135. oscura/visualization/palettes.py +0 -446
  136. oscura/visualization/power.py +0 -508
  137. oscura/visualization/power_extended.py +0 -955
  138. oscura/visualization/presets.py +0 -469
  139. oscura/visualization/protocols.py +0 -1246
  140. oscura/visualization/render.py +0 -223
  141. oscura/visualization/rendering.py +0 -444
  142. oscura/visualization/reverse_engineering.py +0 -838
  143. oscura/visualization/signal_integrity.py +0 -989
  144. oscura/visualization/specialized.py +0 -643
  145. oscura/visualization/spectral.py +0 -1226
  146. oscura/visualization/thumbnails.py +0 -340
  147. oscura/visualization/time_axis.py +0 -351
  148. oscura/visualization/waveform.py +0 -454
  149. {oscura-0.8.0.dist-info → oscura-0.10.0.dist-info}/WHEEL +0 -0
  150. {oscura-0.8.0.dist-info → oscura-0.10.0.dist-info}/entry_points.txt +0 -0
  151. {oscura-0.8.0.dist-info → oscura-0.10.0.dist-info}/licenses/LICENSE +0 -0
@@ -386,8 +386,21 @@ def _step_5_infer_structure(
386
386
  if hasattr(re_result, "frames") and re_result.frames:
387
387
  structure = _infer_message_structure(re_result.frames)
388
388
  results.partial_results["structure"] = structure
389
- if structure.get("fields"):
390
- protocol_spec.fields = structure["fields"]
389
+ # Only update fields if protocol_spec has no fields yet
390
+ # (don't overwrite existing FieldSpec objects with dicts)
391
+ if structure.get("fields") and not protocol_spec.fields:
392
+ # Convert dict fields to FieldSpec objects
393
+ from oscura.workflows.reverse_engineering import FieldSpec
394
+
395
+ protocol_spec.fields = [
396
+ FieldSpec(
397
+ name=f"field_{field['offset']}",
398
+ offset=field["offset"],
399
+ size=field["length"],
400
+ field_type=field.get("field_type", "bytes"),
401
+ )
402
+ for field in structure["fields"]
403
+ ]
391
404
  except Exception as e:
392
405
  msg = f"Structure inference failed: {e}"
393
406
  results.warnings.append(msg)
@@ -732,7 +745,7 @@ def _load_captures(capture_dict: dict[str, str]) -> dict[str, WaveformTrace]:
732
745
  # Auto-detect format based on extension
733
746
  if suffix in (".bin", ".dat"):
734
747
  # Binary files - try to infer structure
735
- trace = loaders.load_binary(str(path)) # type: ignore[attr-defined]
748
+ trace = loaders.load_binary(str(path))
736
749
  elif suffix == ".wfm":
737
750
  trace = loaders.load_tektronix(str(path)) # type: ignore[attr-defined]
738
751
  elif suffix == ".vcd":
@@ -779,21 +792,79 @@ def _detect_protocol(
779
792
  def _differential_analysis(traces: dict[str, WaveformTrace]) -> dict[str, Any]:
780
793
  """Perform differential analysis between multiple captures.
781
794
 
795
+ Compares byte streams across captures to identify constant vs variable fields
796
+ using variance analysis and correlation.
797
+
782
798
  Args:
783
- traces: Multiple labeled captures.
799
+ traces: Multiple labeled captures (key: label, value: WaveformTrace).
784
800
 
785
801
  Returns:
786
- Dict with differential analysis results.
802
+ Dict with differential analysis results including constant_fields and variable_fields.
787
803
  """
788
- # Placeholder for differential analysis
789
- # Would compare traces to identify state-dependent fields
790
- results = {
804
+ import numpy as np
805
+
806
+ if not traces or len(traces) < 2:
807
+ return {
808
+ "trace_count": len(traces),
809
+ "differences": [],
810
+ "constant_fields": [],
811
+ "variable_fields": [],
812
+ }
813
+
814
+ # Convert traces to byte arrays for analysis
815
+ byte_arrays = []
816
+ for trace in traces.values():
817
+ if hasattr(trace, "data"):
818
+ # Convert analog data to bytes (threshold at 0)
819
+ if hasattr(trace.data, "dtype") and np.issubdtype(trace.data.dtype, np.floating):
820
+ digital = (trace.data > 0).astype(np.uint8)
821
+ byte_arrays.append(digital)
822
+ else:
823
+ byte_arrays.append(np.asarray(trace.data, dtype=np.uint8))
824
+
825
+ if not byte_arrays:
826
+ return {
827
+ "trace_count": len(traces),
828
+ "differences": [],
829
+ "constant_fields": [],
830
+ "variable_fields": [],
831
+ }
832
+
833
+ # Find minimum length to compare
834
+ min_len = min(len(arr) for arr in byte_arrays)
835
+ if min_len == 0:
836
+ return {
837
+ "trace_count": len(traces),
838
+ "differences": [],
839
+ "constant_fields": [],
840
+ "variable_fields": [],
841
+ }
842
+
843
+ # Truncate all arrays to same length
844
+ aligned_arrays = [arr[:min_len] for arr in byte_arrays]
845
+ stacked = np.stack(aligned_arrays, axis=0)
846
+
847
+ # Calculate variance across captures for each byte position
848
+ variances = np.var(stacked, axis=0)
849
+
850
+ # Identify constant fields (low variance) vs variable fields (high variance)
851
+ constant_threshold = 0.1
852
+ constant_fields = []
853
+ variable_fields = []
854
+
855
+ for i, variance in enumerate(variances):
856
+ if variance < constant_threshold:
857
+ constant_fields.append({"position": int(i), "variance": float(variance)})
858
+ else:
859
+ variable_fields.append({"position": int(i), "variance": float(variance)})
860
+
861
+ return {
791
862
  "trace_count": len(traces),
792
863
  "differences": [],
793
- "constant_fields": [],
794
- "variable_fields": [],
864
+ "constant_fields": constant_fields,
865
+ "variable_fields": variable_fields,
866
+ "analyzed_length": int(min_len),
795
867
  }
796
- return results
797
868
 
798
869
 
799
870
  def _enhance_spec_with_differential(spec: ProtocolSpec, diff_results: dict[str, Any]) -> None:
@@ -813,10 +884,53 @@ def _infer_message_structure(frames: list[Any]) -> dict[str, Any]:
813
884
  frames: List of decoded frames.
814
885
 
815
886
  Returns:
816
- Dict with inferred structure details.
887
+ Dict with inferred structure including fields and patterns.
817
888
  """
818
- # Placeholder for structure inference
819
- return {"fields": [], "patterns": []}
889
+ from oscura.analyzers.patterns.reverse_engineering import ReverseEngineer
890
+
891
+ if not frames:
892
+ return {"fields": [], "patterns": []}
893
+
894
+ # Convert frames to bytes messages for structure inference
895
+ messages = []
896
+ for frame in frames:
897
+ if hasattr(frame, "raw_bytes") and frame.raw_bytes:
898
+ # Ensure bytes format
899
+ if isinstance(frame.raw_bytes, bytes):
900
+ messages.append(frame.raw_bytes)
901
+ elif isinstance(frame.raw_bytes, (list, tuple)):
902
+ # Convert to bytes if it's a list/tuple
903
+ messages.append(bytes(frame.raw_bytes))
904
+
905
+ if not messages:
906
+ return {"fields": [], "patterns": []}
907
+
908
+ # Use ReverseEngineer to infer protocol structure
909
+ try:
910
+ re_tool = ReverseEngineer()
911
+ structure = re_tool.infer_protocol_structure(messages, min_field_size=1)
912
+
913
+ # Convert ProtocolStructure to dict format
914
+ fields_list = [
915
+ {
916
+ "offset": field.offset,
917
+ "length": field.length,
918
+ "field_type": field.field_type,
919
+ "entropy": field.entropy,
920
+ "is_constant": field.is_constant,
921
+ }
922
+ for field in structure.fields
923
+ ]
924
+
925
+ return {
926
+ "fields": fields_list,
927
+ "patterns": [],
928
+ "is_fixed_length": structure.message_length > 0,
929
+ "message_length": structure.message_length,
930
+ }
931
+ except Exception:
932
+ # If inference fails, return empty structure
933
+ return {"fields": [], "patterns": []}
820
934
 
821
935
 
822
936
  def _detect_crypto_regions(frames: list[Any]) -> list[dict[str, Any]]:
@@ -879,10 +993,22 @@ def _recover_crc(frames: list[Any], checksum_types: list[str] | None = None) ->
879
993
  checksum_types: Optional list of checksum types to try.
880
994
 
881
995
  Returns:
882
- Dict with CRC recovery results.
996
+ Dict with CRC recovery results including checksum_type, position, and confidence.
883
997
  """
884
- # Placeholder - would use existing checksum detection from reverse_engineer_signal
885
- return {"checksum_type": None, "position": None, "confidence": 0.0}
998
+ from oscura.workflows.reverse_engineering import _detect_checksum
999
+
1000
+ # Default checksum types to try
1001
+ if checksum_types is None:
1002
+ checksum_types = ["xor", "sum8", "crc8", "crc16", "crc32"]
1003
+
1004
+ # Detect checksum using existing function
1005
+ checksum_type, position, confidence = _detect_checksum(frames, checksum_types)
1006
+
1007
+ return {
1008
+ "checksum_type": checksum_type,
1009
+ "position": position,
1010
+ "confidence": float(confidence),
1011
+ }
886
1012
 
887
1013
 
888
1014
  def _extract_state_machine(frames: list[Any]) -> dict[str, Any]:
@@ -892,10 +1018,64 @@ def _extract_state_machine(frames: list[Any]) -> dict[str, Any]:
892
1018
  frames: List of decoded frames.
893
1019
 
894
1020
  Returns:
895
- Dict with state machine representation.
1021
+ Dict with state machine representation including states, transitions, and initial_state.
896
1022
  """
897
- # Placeholder for state machine extraction using RPNI or similar
898
- return {"states": [], "transitions": [], "initial_state": None}
1023
+ from oscura.inference.state_machine import infer_rpni
1024
+
1025
+ if not frames:
1026
+ return {"states": [], "transitions": [], "initial_state": None}
1027
+
1028
+ # Convert frames to sequences of message types/IDs for RPNI
1029
+ # Extract first byte as message type identifier
1030
+ sequences = []
1031
+ for frame in frames:
1032
+ if hasattr(frame, "raw_bytes") and frame.raw_bytes:
1033
+ # Use first byte as message type
1034
+ msg_type = (
1035
+ frame.raw_bytes[0]
1036
+ if isinstance(frame.raw_bytes[0], int)
1037
+ else ord(frame.raw_bytes[0])
1038
+ )
1039
+ sequences.append([msg_type])
1040
+
1041
+ if not sequences:
1042
+ return {"states": [], "transitions": [], "initial_state": None}
1043
+
1044
+ # Infer state machine using RPNI algorithm
1045
+ try:
1046
+ # Cast sequences to the expected type
1047
+ sequences_typed: list[list[str | int]] = [[int(x) for x in seq] for seq in sequences]
1048
+ automaton = infer_rpni(sequences_typed)
1049
+
1050
+ # Convert to dict format
1051
+ states_list = [
1052
+ {
1053
+ "id": state.id,
1054
+ "name": state.name,
1055
+ "is_initial": state.is_initial,
1056
+ "is_accepting": state.is_accepting,
1057
+ }
1058
+ for state in automaton.states
1059
+ ]
1060
+
1061
+ transitions_list = [
1062
+ {
1063
+ "source": trans.source,
1064
+ "target": trans.target,
1065
+ "symbol": trans.symbol,
1066
+ }
1067
+ for trans in automaton.transitions
1068
+ ]
1069
+
1070
+ return {
1071
+ "states": states_list,
1072
+ "transitions": transitions_list,
1073
+ "initial_state": automaton.initial_state,
1074
+ "accepting_states": list(automaton.accepting_states),
1075
+ }
1076
+ except Exception:
1077
+ # If RPNI fails, return empty state machine
1078
+ return {"states": [], "transitions": [], "initial_state": None}
899
1079
 
900
1080
 
901
1081
  def _generate_wireshark_dissector(spec: ProtocolSpec, output_path: Path) -> None:
@@ -1097,20 +1277,68 @@ def _generate_report(
1097
1277
  def _replay_validation(spec: ProtocolSpec, target_device: str, frames: list[Any]) -> dict[str, Any]:
1098
1278
  """Perform replay validation on target hardware.
1099
1279
 
1280
+ Implements dry-run mode with structural validation. Hardware replay is optional
1281
+ and documented for future implementation.
1282
+
1100
1283
  Args:
1101
1284
  spec: Protocol specification.
1102
- target_device: Device path for validation.
1285
+ target_device: Device path for validation (use "dry-run" for structural validation only).
1103
1286
  frames: Frames to replay.
1104
1287
 
1105
1288
  Returns:
1106
- Dict with validation results.
1289
+ Dict with validation results including replayed count and success rate.
1107
1290
  """
1108
- # Placeholder for replay validation
1291
+ if not frames:
1292
+ return {
1293
+ "replayed": 0,
1294
+ "successful": 0,
1295
+ "failed": 0,
1296
+ "success_rate": 0.0,
1297
+ "mode": "none",
1298
+ }
1299
+
1300
+ # Dry-run mode: structural validation only
1301
+ if target_device == "dry-run" or target_device is None:
1302
+ replayed = 0
1303
+ successful = 0
1304
+ failed = 0
1305
+
1306
+ for frame in frames:
1307
+ replayed += 1
1308
+
1309
+ # Structural validation: check if frame matches spec
1310
+ if hasattr(frame, "raw_bytes") and frame.raw_bytes:
1311
+ # Check length matches spec if fixed-length
1312
+ if spec.frame_length and spec.frame_length > 0:
1313
+ if len(frame.raw_bytes) == spec.frame_length:
1314
+ successful += 1
1315
+ else:
1316
+ failed += 1
1317
+ else:
1318
+ # Variable length - just count as successful if has data
1319
+ successful += 1
1320
+ else:
1321
+ failed += 1
1322
+
1323
+ success_rate = successful / replayed if replayed > 0 else 0.0
1324
+
1325
+ return {
1326
+ "replayed": replayed,
1327
+ "successful": successful,
1328
+ "failed": failed,
1329
+ "success_rate": success_rate,
1330
+ "mode": "dry-run",
1331
+ }
1332
+
1333
+ # Hardware mode (future implementation)
1334
+ # Would connect to target_device and replay frames
1109
1335
  return {
1110
1336
  "replayed": 0,
1111
1337
  "successful": 0,
1112
1338
  "failed": 0,
1113
1339
  "success_rate": 0.0,
1340
+ "mode": "hardware",
1341
+ "error": "Hardware replay not yet implemented. Use 'dry-run' for structural validation.",
1114
1342
  }
1115
1343
 
1116
1344
 
@@ -131,6 +131,27 @@ def characterize_buffer(
131
131
  return result
132
132
 
133
133
 
134
+ def _extract_measurement_value(result: Any) -> float:
135
+ """Extract numeric value from measurement result.
136
+
137
+ Handles both MeasurementResult dicts and simple numeric values (for tests).
138
+
139
+ Args:
140
+ result: MeasurementResult dict or numeric value.
141
+
142
+ Returns:
143
+ Extracted float value, or 0.0 if not applicable.
144
+ """
145
+ if isinstance(result, dict):
146
+ # Handle MeasurementResult dict
147
+ if not result.get("applicable", True):
148
+ return 0.0
149
+ value = result.get("value", 0.0)
150
+ return float(value) if value is not None else 0.0
151
+ # Handle simple numeric value (from mocks)
152
+ return float(result) if isinstance(result, (int, float)) else 0.0
153
+
154
+
134
155
  def _determine_logic_family(
135
156
  trace: WaveformTrace, logic_family: str | None
136
157
  ) -> tuple[str, float, float, float]:
@@ -176,8 +197,9 @@ def _measure_timing_params(trace: WaveformTrace) -> tuple[float, float]:
176
197
  try:
177
198
  t_rise_raw = rise_time(trace)
178
199
  t_fall_raw = fall_time(trace)
179
- t_rise: float = float(t_rise_raw)
180
- t_fall: float = float(t_fall_raw)
200
+ # Handle both MeasurementResult dict and simple float (for mocks)
201
+ t_rise = _extract_measurement_value(t_rise_raw)
202
+ t_fall = _extract_measurement_value(t_fall_raw)
181
203
  except Exception as e:
182
204
  raise AnalysisError(f"Failed to measure rise/fall time: {e}") from e
183
205
 
@@ -201,8 +223,9 @@ def _measure_overshoots(
201
223
 
202
224
  v_overshoot_raw = overshoot(trace)
203
225
  v_undershoot_raw = undershoot(trace)
204
- v_overshoot: float = float(v_overshoot_raw)
205
- v_undershoot: float = float(v_undershoot_raw)
226
+ # Handle both MeasurementResult dict and simple float (for mocks)
227
+ v_overshoot = _extract_measurement_value(v_overshoot_raw)
228
+ v_undershoot = _extract_measurement_value(v_undershoot_raw)
206
229
 
207
230
  swing = voh - vol
208
231
  if swing > 0:
@@ -310,9 +310,8 @@ class MultiTraceWorkflow:
310
310
  def _measure_sequential(self, measurements: tuple[str, ...]) -> None:
311
311
  """Measure sequentially."""
312
312
  # Progress tracking
313
- progress = create_progress_tracker( # type: ignore[call-arg]
313
+ progress = create_progress_tracker(
314
314
  total=len(self.results.trace_ids),
315
- description="Measuring traces",
316
315
  )
317
316
 
318
317
  for trace_id, trace in self._iter_traces(lazy=True):
@@ -367,18 +366,57 @@ class MultiTraceWorkflow:
367
366
  def _perform_measurement(self, trace: Any, measurement: str) -> Any:
368
367
  """Perform a single measurement.
369
368
 
369
+ Dispatches measurement name to appropriate function from oscura.analyzers.waveform.measurements.
370
+
370
371
  Args:
371
- trace: Trace object
372
- measurement: Measurement name
372
+ trace: Trace object (WaveformTrace or MockTrace)
373
+ measurement: Measurement name (rise_time, fall_time, frequency, amplitude, rms, duty_cycle, period, pulse_width)
374
+
375
+ Returns:
376
+ MeasurementResult dict with value, unit, applicable, reason, and display fields.
373
377
 
374
378
  Raises:
375
379
  OscuraError: If measurement not available
376
380
  """
377
- # Placeholder - would call actual measurement functions
378
- # from oscura.analyzers.measurements
379
- raise OscuraError(
380
- f"Measurement '{measurement}' not yet implemented in multi-trace workflow"
381
- )
381
+ from collections.abc import Callable
382
+ from typing import TYPE_CHECKING
383
+
384
+ from oscura.analyzers.waveform import measurements
385
+ from oscura.core.types import TraceMetadata, WaveformTrace
386
+
387
+ if TYPE_CHECKING:
388
+ from oscura.core.types import MeasurementResult
389
+
390
+ # Convert MockTrace or other formats to WaveformTrace if needed
391
+ if not isinstance(trace, WaveformTrace):
392
+ # Handle MockTrace or similar objects with data and sample_rate attributes
393
+ if hasattr(trace, "data") and hasattr(trace, "sample_rate"):
394
+ metadata = TraceMetadata(sample_rate=trace.sample_rate)
395
+ trace = WaveformTrace(data=trace.data, metadata=metadata)
396
+ else:
397
+ raise OscuraError(f"Invalid trace object: {type(trace)}")
398
+
399
+ # Map measurement name to function
400
+ measurement_functions: dict[str, Callable[[Any], MeasurementResult]] = {
401
+ "rise_time": measurements.rise_time,
402
+ "fall_time": measurements.fall_time,
403
+ "frequency": measurements.frequency,
404
+ "amplitude": measurements.amplitude,
405
+ "rms": measurements.rms,
406
+ "duty_cycle": measurements.duty_cycle,
407
+ "period": measurements.period,
408
+ "pulse_width": measurements.pulse_width,
409
+ }
410
+
411
+ func = measurement_functions.get(measurement)
412
+ if func is None:
413
+ raise OscuraError(
414
+ f"Measurement '{measurement}' not available. "
415
+ f"Supported: {', '.join(measurement_functions.keys())}"
416
+ )
417
+
418
+ # Perform measurement and return result
419
+ return func(trace)
382
420
 
383
421
  def aggregate(self) -> MultiTraceResults:
384
422
  """Compute aggregate statistics across traces.
@@ -462,24 +500,105 @@ class MultiTraceWorkflow:
462
500
  def _export_pdf(self, filename: str) -> None:
463
501
  """Export results to PDF.
464
502
 
503
+ Creates a comprehensive PDF report with measurement tables and statistics.
504
+
465
505
  Args:
466
- filename: Output filename
506
+ filename: Output filename (with .pdf extension)
467
507
 
468
508
  Raises:
469
- OscuraError: PDF export not yet implemented
509
+ ImportError: If reportlab is not installed
470
510
  """
471
- raise OscuraError("PDF export not yet implemented")
511
+ from pathlib import Path
512
+
513
+ from oscura.reporting.core import Report, ReportConfig
514
+ from oscura.reporting.pdf import generate_pdf_report
515
+
516
+ # Create report with results
517
+ config = ReportConfig(title="Multi-Trace Analysis Results")
518
+ report = Report(config=config)
519
+
520
+ # Add measurement results - aggregate across all traces
521
+ if self.results.measurements:
522
+ report.add_section("Measurement Results", level=2)
523
+ # Collect measurement values by name across all traces
524
+ measurement_data: dict[str, list[float]] = {}
525
+ for trace_results in self.results.measurements.values():
526
+ for meas_name, meas_result in trace_results.items():
527
+ # Only aggregate applicable numeric measurements
528
+ if isinstance(meas_result, dict) and meas_result.get("applicable", False):
529
+ try:
530
+ value = float(meas_result["value"])
531
+ if meas_name not in measurement_data:
532
+ measurement_data[meas_name] = []
533
+ measurement_data[meas_name].append(value)
534
+ except (ValueError, TypeError, KeyError):
535
+ # Skip non-numeric or missing values
536
+ pass
537
+
538
+ # Add aggregated measurements to report
539
+ for measurement_name, values in measurement_data.items():
540
+ if values:
541
+ report.add_measurements(
542
+ f"{measurement_name.replace('_', ' ').title()}",
543
+ {"mean": sum(values) / len(values), "count": len(values)},
544
+ level=3,
545
+ )
546
+
547
+ # Generate PDF
548
+ pdf_bytes = generate_pdf_report(report)
549
+
550
+ # Write to file
551
+ Path(filename).write_bytes(pdf_bytes)
472
552
 
473
553
  def _export_html(self, filename: str) -> None:
474
554
  """Export results to HTML.
475
555
 
476
- Args:
477
- filename: Output filename
556
+ Creates a modern HTML report with interactive features.
478
557
 
479
- Raises:
480
- OscuraError: HTML export not yet implemented
558
+ Args:
559
+ filename: Output filename (with .html extension)
481
560
  """
482
- raise OscuraError("HTML export not yet implemented")
561
+ from pathlib import Path
562
+
563
+ from oscura.reporting.core import Report, ReportConfig
564
+ from oscura.reporting.html import generate_html_report
565
+
566
+ # Create report with results
567
+ config = ReportConfig(title="Multi-Trace Analysis Results")
568
+ report = Report(config=config)
569
+
570
+ # Add measurement results - aggregate across all traces
571
+ if self.results.measurements:
572
+ report.add_section("Measurement Results", level=2)
573
+ # Collect measurement values by name across all traces
574
+ measurement_data: dict[str, list[float]] = {}
575
+ for trace_results in self.results.measurements.values():
576
+ for meas_name, meas_result in trace_results.items():
577
+ # Only aggregate applicable numeric measurements
578
+ if isinstance(meas_result, dict) and meas_result.get("applicable", False):
579
+ try:
580
+ value = float(meas_result["value"])
581
+ if meas_name not in measurement_data:
582
+ measurement_data[meas_name] = []
583
+ measurement_data[meas_name].append(value)
584
+ except (ValueError, TypeError, KeyError):
585
+ # Skip non-numeric or missing values
586
+ pass
587
+
588
+ # Add aggregated measurements to report
589
+ for measurement_name, values in measurement_data.items():
590
+ if values:
591
+ report.add_measurements(
592
+ f"{measurement_name.replace('_', ' ').title()}",
593
+ {"mean": sum(values) / len(values), "count": len(values)},
594
+ level=3,
595
+ )
596
+
597
+ # Generate HTML
598
+ html_content = generate_html_report(report, interactive=True)
599
+
600
+ # Write to file
601
+ Path(filename).write_text(html_content, encoding="utf-8")
483
602
 
484
603
 
485
604
  def load_all(pattern: str, lazy: bool = True) -> list[Any]:
@@ -545,16 +545,21 @@ def analyze_complete(
545
545
  },
546
546
  )
547
547
 
548
- # Add basic measurement sections - handle BOTH formats
548
+ # Add basic measurement sections - handle MeasurementResult format
549
549
  for analysis_name, analysis_results in results.items():
550
- # Extract measurements in both formats:
551
- # 1. Unified format: {"value": float, "unit": str}
552
- # 2. Legacy format: flat float/int values
550
+ # Extract measurements from MeasurementResult format:
551
+ # MeasurementResult: {"value": float|None, "unit": str, "applicable": bool, ...}
552
+ # Only include applicable measurements in the report
553
553
  measurements = {}
554
554
 
555
555
  for k, v in analysis_results.items():
556
- if isinstance(v, dict) and "value" in v:
557
- # Unified format - extract value for reporting
556
+ if isinstance(v, dict) and "value" in v and "applicable" in v:
557
+ # MeasurementResult format - only include if applicable
558
+ if v["applicable"] and v["value"] is not None:
559
+ measurements[k] = v["value"]
560
+ # Skip inapplicable measurements (they'll show as N/A in detailed views)
561
+ elif isinstance(v, dict) and "value" in v:
562
+ # Legacy unified format (for compatibility)
558
563
  measurements[k] = v["value"]
559
564
  elif isinstance(v, (int, float)) and not isinstance(v, bool):
560
565
  # Legacy flat format