oscura 0.5.0__py3-none-any.whl → 0.6.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- oscura/__init__.py +169 -167
- oscura/analyzers/__init__.py +3 -0
- oscura/analyzers/classification.py +659 -0
- oscura/analyzers/digital/__init__.py +0 -48
- oscura/analyzers/digital/edges.py +325 -65
- oscura/analyzers/digital/extraction.py +0 -195
- oscura/analyzers/digital/quality.py +293 -166
- oscura/analyzers/digital/timing.py +260 -115
- oscura/analyzers/digital/timing_numba.py +334 -0
- oscura/analyzers/entropy.py +605 -0
- oscura/analyzers/eye/diagram.py +176 -109
- oscura/analyzers/eye/metrics.py +5 -5
- oscura/analyzers/jitter/__init__.py +6 -4
- oscura/analyzers/jitter/ber.py +52 -52
- oscura/analyzers/jitter/classification.py +156 -0
- oscura/analyzers/jitter/decomposition.py +163 -113
- oscura/analyzers/jitter/spectrum.py +80 -64
- oscura/analyzers/ml/__init__.py +39 -0
- oscura/analyzers/ml/features.py +600 -0
- oscura/analyzers/ml/signal_classifier.py +604 -0
- oscura/analyzers/packet/daq.py +246 -158
- oscura/analyzers/packet/parser.py +12 -1
- oscura/analyzers/packet/payload.py +50 -2110
- oscura/analyzers/packet/payload_analysis.py +361 -181
- oscura/analyzers/packet/payload_patterns.py +133 -70
- oscura/analyzers/packet/stream.py +84 -23
- oscura/analyzers/patterns/__init__.py +26 -5
- oscura/analyzers/patterns/anomaly_detection.py +908 -0
- oscura/analyzers/patterns/clustering.py +169 -108
- oscura/analyzers/patterns/clustering_optimized.py +227 -0
- oscura/analyzers/patterns/discovery.py +1 -1
- oscura/analyzers/patterns/matching.py +581 -197
- oscura/analyzers/patterns/pattern_mining.py +778 -0
- oscura/analyzers/patterns/periodic.py +121 -38
- oscura/analyzers/patterns/sequences.py +175 -78
- oscura/analyzers/power/conduction.py +1 -1
- oscura/analyzers/power/soa.py +6 -6
- oscura/analyzers/power/switching.py +250 -110
- oscura/analyzers/protocol/__init__.py +17 -1
- oscura/analyzers/protocols/__init__.py +1 -22
- oscura/analyzers/protocols/base.py +6 -6
- oscura/analyzers/protocols/ble/__init__.py +38 -0
- oscura/analyzers/protocols/ble/analyzer.py +809 -0
- oscura/analyzers/protocols/ble/uuids.py +288 -0
- oscura/analyzers/protocols/can.py +257 -127
- oscura/analyzers/protocols/can_fd.py +107 -80
- oscura/analyzers/protocols/flexray.py +139 -80
- oscura/analyzers/protocols/hdlc.py +93 -58
- oscura/analyzers/protocols/i2c.py +247 -106
- oscura/analyzers/protocols/i2s.py +138 -86
- oscura/analyzers/protocols/industrial/__init__.py +40 -0
- oscura/analyzers/protocols/industrial/bacnet/__init__.py +33 -0
- oscura/analyzers/protocols/industrial/bacnet/analyzer.py +708 -0
- oscura/analyzers/protocols/industrial/bacnet/encoding.py +412 -0
- oscura/analyzers/protocols/industrial/bacnet/services.py +622 -0
- oscura/analyzers/protocols/industrial/ethercat/__init__.py +30 -0
- oscura/analyzers/protocols/industrial/ethercat/analyzer.py +474 -0
- oscura/analyzers/protocols/industrial/ethercat/mailbox.py +339 -0
- oscura/analyzers/protocols/industrial/ethercat/topology.py +166 -0
- oscura/analyzers/protocols/industrial/modbus/__init__.py +31 -0
- oscura/analyzers/protocols/industrial/modbus/analyzer.py +525 -0
- oscura/analyzers/protocols/industrial/modbus/crc.py +79 -0
- oscura/analyzers/protocols/industrial/modbus/functions.py +436 -0
- oscura/analyzers/protocols/industrial/opcua/__init__.py +21 -0
- oscura/analyzers/protocols/industrial/opcua/analyzer.py +552 -0
- oscura/analyzers/protocols/industrial/opcua/datatypes.py +446 -0
- oscura/analyzers/protocols/industrial/opcua/services.py +264 -0
- oscura/analyzers/protocols/industrial/profinet/__init__.py +23 -0
- oscura/analyzers/protocols/industrial/profinet/analyzer.py +441 -0
- oscura/analyzers/protocols/industrial/profinet/dcp.py +263 -0
- oscura/analyzers/protocols/industrial/profinet/ptcp.py +200 -0
- oscura/analyzers/protocols/jtag.py +180 -98
- oscura/analyzers/protocols/lin.py +219 -114
- oscura/analyzers/protocols/manchester.py +4 -4
- oscura/analyzers/protocols/onewire.py +253 -149
- oscura/analyzers/protocols/parallel_bus/__init__.py +20 -0
- oscura/analyzers/protocols/parallel_bus/centronics.py +92 -0
- oscura/analyzers/protocols/parallel_bus/gpib.py +137 -0
- oscura/analyzers/protocols/spi.py +192 -95
- oscura/analyzers/protocols/swd.py +321 -167
- oscura/analyzers/protocols/uart.py +267 -125
- oscura/analyzers/protocols/usb.py +235 -131
- oscura/analyzers/side_channel/power.py +17 -12
- oscura/analyzers/signal/__init__.py +15 -0
- oscura/analyzers/signal/timing_analysis.py +1086 -0
- oscura/analyzers/signal_integrity/__init__.py +4 -1
- oscura/analyzers/signal_integrity/sparams.py +2 -19
- oscura/analyzers/spectral/chunked.py +129 -60
- oscura/analyzers/spectral/chunked_fft.py +300 -94
- oscura/analyzers/spectral/chunked_wavelet.py +100 -80
- oscura/analyzers/statistical/checksum.py +376 -217
- oscura/analyzers/statistical/classification.py +229 -107
- oscura/analyzers/statistical/entropy.py +78 -53
- oscura/analyzers/statistics/correlation.py +407 -211
- oscura/analyzers/statistics/outliers.py +2 -2
- oscura/analyzers/statistics/streaming.py +30 -5
- oscura/analyzers/validation.py +216 -101
- oscura/analyzers/waveform/measurements.py +9 -0
- oscura/analyzers/waveform/measurements_with_uncertainty.py +31 -15
- oscura/analyzers/waveform/spectral.py +500 -228
- oscura/api/__init__.py +31 -5
- oscura/api/dsl/__init__.py +582 -0
- oscura/{dsl → api/dsl}/commands.py +43 -76
- oscura/{dsl → api/dsl}/interpreter.py +26 -51
- oscura/{dsl → api/dsl}/parser.py +107 -77
- oscura/{dsl → api/dsl}/repl.py +2 -2
- oscura/api/dsl.py +1 -1
- oscura/{integrations → api/integrations}/__init__.py +1 -1
- oscura/{integrations → api/integrations}/llm.py +201 -102
- oscura/api/operators.py +3 -3
- oscura/api/optimization.py +144 -30
- oscura/api/rest_server.py +921 -0
- oscura/api/server/__init__.py +17 -0
- oscura/api/server/dashboard.py +850 -0
- oscura/api/server/static/README.md +34 -0
- oscura/api/server/templates/base.html +181 -0
- oscura/api/server/templates/export.html +120 -0
- oscura/api/server/templates/home.html +284 -0
- oscura/api/server/templates/protocols.html +58 -0
- oscura/api/server/templates/reports.html +43 -0
- oscura/api/server/templates/session_detail.html +89 -0
- oscura/api/server/templates/sessions.html +83 -0
- oscura/api/server/templates/waveforms.html +73 -0
- oscura/automotive/__init__.py +8 -1
- oscura/automotive/can/__init__.py +10 -0
- oscura/automotive/can/checksum.py +3 -1
- oscura/automotive/can/dbc_generator.py +590 -0
- oscura/automotive/can/message_wrapper.py +121 -74
- oscura/automotive/can/patterns.py +98 -21
- oscura/automotive/can/session.py +292 -56
- oscura/automotive/can/state_machine.py +6 -3
- oscura/automotive/can/stimulus_response.py +97 -75
- oscura/automotive/dbc/__init__.py +10 -2
- oscura/automotive/dbc/generator.py +84 -56
- oscura/automotive/dbc/parser.py +6 -6
- oscura/automotive/dtc/data.json +2763 -0
- oscura/automotive/dtc/database.py +2 -2
- oscura/automotive/flexray/__init__.py +31 -0
- oscura/automotive/flexray/analyzer.py +504 -0
- oscura/automotive/flexray/crc.py +185 -0
- oscura/automotive/flexray/fibex.py +449 -0
- oscura/automotive/j1939/__init__.py +45 -8
- oscura/automotive/j1939/analyzer.py +605 -0
- oscura/automotive/j1939/spns.py +326 -0
- oscura/automotive/j1939/transport.py +306 -0
- oscura/automotive/lin/__init__.py +47 -0
- oscura/automotive/lin/analyzer.py +612 -0
- oscura/automotive/loaders/blf.py +13 -2
- oscura/automotive/loaders/csv_can.py +143 -72
- oscura/automotive/loaders/dispatcher.py +50 -2
- oscura/automotive/loaders/mdf.py +86 -45
- oscura/automotive/loaders/pcap.py +111 -61
- oscura/automotive/uds/__init__.py +4 -0
- oscura/automotive/uds/analyzer.py +725 -0
- oscura/automotive/uds/decoder.py +140 -58
- oscura/automotive/uds/models.py +7 -1
- oscura/automotive/visualization.py +1 -1
- oscura/cli/analyze.py +348 -0
- oscura/cli/batch.py +142 -122
- oscura/cli/benchmark.py +275 -0
- oscura/cli/characterize.py +137 -82
- oscura/cli/compare.py +224 -131
- oscura/cli/completion.py +250 -0
- oscura/cli/config_cmd.py +361 -0
- oscura/cli/decode.py +164 -87
- oscura/cli/export.py +286 -0
- oscura/cli/main.py +115 -31
- oscura/{onboarding → cli/onboarding}/__init__.py +3 -3
- oscura/{onboarding → cli/onboarding}/help.py +80 -58
- oscura/{onboarding → cli/onboarding}/tutorials.py +97 -72
- oscura/{onboarding → cli/onboarding}/wizard.py +55 -36
- oscura/cli/progress.py +147 -0
- oscura/cli/shell.py +157 -135
- oscura/cli/validate_cmd.py +204 -0
- oscura/cli/visualize.py +158 -0
- oscura/convenience.py +125 -79
- oscura/core/__init__.py +4 -2
- oscura/core/backend_selector.py +3 -3
- oscura/core/cache.py +126 -15
- oscura/core/cancellation.py +1 -1
- oscura/{config → core/config}/__init__.py +20 -11
- oscura/{config → core/config}/defaults.py +1 -1
- oscura/{config → core/config}/loader.py +7 -5
- oscura/{config → core/config}/memory.py +5 -5
- oscura/{config → core/config}/migration.py +1 -1
- oscura/{config → core/config}/pipeline.py +99 -23
- oscura/{config → core/config}/preferences.py +1 -1
- oscura/{config → core/config}/protocol.py +3 -3
- oscura/{config → core/config}/schema.py +426 -272
- oscura/{config → core/config}/settings.py +1 -1
- oscura/{config → core/config}/thresholds.py +195 -153
- oscura/core/correlation.py +5 -6
- oscura/core/cross_domain.py +0 -2
- oscura/core/debug.py +9 -5
- oscura/{extensibility → core/extensibility}/docs.py +158 -70
- oscura/{extensibility → core/extensibility}/extensions.py +160 -76
- oscura/{extensibility → core/extensibility}/logging.py +1 -1
- oscura/{extensibility → core/extensibility}/measurements.py +1 -1
- oscura/{extensibility → core/extensibility}/plugins.py +1 -1
- oscura/{extensibility → core/extensibility}/templates.py +73 -3
- oscura/{extensibility → core/extensibility}/validation.py +1 -1
- oscura/core/gpu_backend.py +11 -7
- oscura/core/log_query.py +101 -11
- oscura/core/logging.py +126 -54
- oscura/core/logging_advanced.py +5 -5
- oscura/core/memory_limits.py +108 -70
- oscura/core/memory_monitor.py +2 -2
- oscura/core/memory_progress.py +7 -7
- oscura/core/memory_warnings.py +1 -1
- oscura/core/numba_backend.py +13 -13
- oscura/{plugins → core/plugins}/__init__.py +9 -9
- oscura/{plugins → core/plugins}/base.py +7 -7
- oscura/{plugins → core/plugins}/cli.py +3 -3
- oscura/{plugins → core/plugins}/discovery.py +186 -106
- oscura/{plugins → core/plugins}/lifecycle.py +1 -1
- oscura/{plugins → core/plugins}/manager.py +7 -7
- oscura/{plugins → core/plugins}/registry.py +3 -3
- oscura/{plugins → core/plugins}/versioning.py +1 -1
- oscura/core/progress.py +16 -1
- oscura/core/provenance.py +8 -2
- oscura/{schemas → core/schemas}/__init__.py +2 -2
- oscura/core/schemas/bus_configuration.json +322 -0
- oscura/core/schemas/device_mapping.json +182 -0
- oscura/core/schemas/packet_format.json +418 -0
- oscura/core/schemas/protocol_definition.json +363 -0
- oscura/core/types.py +4 -0
- oscura/core/uncertainty.py +3 -3
- oscura/correlation/__init__.py +52 -0
- oscura/correlation/multi_protocol.py +811 -0
- oscura/discovery/auto_decoder.py +117 -35
- oscura/discovery/comparison.py +191 -86
- oscura/discovery/quality_validator.py +155 -68
- oscura/discovery/signal_detector.py +196 -79
- oscura/export/__init__.py +18 -20
- oscura/export/kaitai_struct.py +513 -0
- oscura/export/scapy_layer.py +801 -0
- oscura/export/wireshark/README.md +15 -15
- oscura/export/wireshark/generator.py +1 -1
- oscura/export/wireshark/templates/dissector.lua.j2 +2 -2
- oscura/export/wireshark_dissector.py +746 -0
- oscura/guidance/wizard.py +207 -111
- oscura/hardware/__init__.py +19 -0
- oscura/{acquisition → hardware/acquisition}/__init__.py +4 -4
- oscura/{acquisition → hardware/acquisition}/file.py +2 -2
- oscura/{acquisition → hardware/acquisition}/hardware.py +7 -7
- oscura/{acquisition → hardware/acquisition}/saleae.py +15 -12
- oscura/{acquisition → hardware/acquisition}/socketcan.py +1 -1
- oscura/{acquisition → hardware/acquisition}/streaming.py +2 -2
- oscura/{acquisition → hardware/acquisition}/synthetic.py +3 -3
- oscura/{acquisition → hardware/acquisition}/visa.py +33 -11
- oscura/hardware/firmware/__init__.py +29 -0
- oscura/hardware/firmware/pattern_recognition.py +874 -0
- oscura/hardware/hal_detector.py +736 -0
- oscura/hardware/security/__init__.py +37 -0
- oscura/hardware/security/side_channel_detector.py +1126 -0
- oscura/inference/__init__.py +4 -0
- oscura/inference/active_learning/README.md +7 -7
- oscura/inference/active_learning/observation_table.py +4 -1
- oscura/inference/alignment.py +216 -123
- oscura/inference/bayesian.py +113 -33
- oscura/inference/crc_reverse.py +101 -55
- oscura/inference/logic.py +6 -2
- oscura/inference/message_format.py +342 -183
- oscura/inference/protocol.py +95 -44
- oscura/inference/protocol_dsl.py +180 -82
- oscura/inference/signal_intelligence.py +1439 -706
- oscura/inference/spectral.py +99 -57
- oscura/inference/state_machine.py +810 -158
- oscura/inference/stream.py +270 -110
- oscura/iot/__init__.py +34 -0
- oscura/iot/coap/__init__.py +32 -0
- oscura/iot/coap/analyzer.py +668 -0
- oscura/iot/coap/options.py +212 -0
- oscura/iot/lorawan/__init__.py +21 -0
- oscura/iot/lorawan/crypto.py +206 -0
- oscura/iot/lorawan/decoder.py +801 -0
- oscura/iot/lorawan/mac_commands.py +341 -0
- oscura/iot/mqtt/__init__.py +27 -0
- oscura/iot/mqtt/analyzer.py +999 -0
- oscura/iot/mqtt/properties.py +315 -0
- oscura/iot/zigbee/__init__.py +31 -0
- oscura/iot/zigbee/analyzer.py +615 -0
- oscura/iot/zigbee/security.py +153 -0
- oscura/iot/zigbee/zcl.py +349 -0
- oscura/jupyter/display.py +125 -45
- oscura/{exploratory → jupyter/exploratory}/__init__.py +8 -8
- oscura/{exploratory → jupyter/exploratory}/error_recovery.py +298 -141
- oscura/jupyter/exploratory/fuzzy.py +746 -0
- oscura/{exploratory → jupyter/exploratory}/fuzzy_advanced.py +258 -100
- oscura/{exploratory → jupyter/exploratory}/legacy.py +464 -242
- oscura/{exploratory → jupyter/exploratory}/parse.py +167 -145
- oscura/{exploratory → jupyter/exploratory}/recovery.py +119 -87
- oscura/jupyter/exploratory/sync.py +612 -0
- oscura/{exploratory → jupyter/exploratory}/unknown.py +299 -176
- oscura/jupyter/magic.py +4 -4
- oscura/{ui → jupyter/ui}/__init__.py +2 -2
- oscura/{ui → jupyter/ui}/formatters.py +3 -3
- oscura/{ui → jupyter/ui}/progressive_display.py +153 -82
- oscura/loaders/__init__.py +171 -63
- oscura/loaders/binary.py +88 -1
- oscura/loaders/chipwhisperer.py +153 -137
- oscura/loaders/configurable.py +208 -86
- oscura/loaders/csv_loader.py +458 -215
- oscura/loaders/hdf5_loader.py +278 -119
- oscura/loaders/lazy.py +87 -54
- oscura/loaders/mmap_loader.py +1 -1
- oscura/loaders/numpy_loader.py +253 -116
- oscura/loaders/pcap.py +226 -151
- oscura/loaders/rigol.py +110 -49
- oscura/loaders/sigrok.py +201 -78
- oscura/loaders/tdms.py +81 -58
- oscura/loaders/tektronix.py +291 -174
- oscura/loaders/touchstone.py +182 -87
- oscura/loaders/vcd.py +215 -117
- oscura/loaders/wav.py +155 -68
- oscura/reporting/__init__.py +9 -7
- oscura/reporting/analyze.py +352 -146
- oscura/reporting/argument_preparer.py +69 -14
- oscura/reporting/auto_report.py +97 -61
- oscura/reporting/batch.py +131 -58
- oscura/reporting/chart_selection.py +57 -45
- oscura/reporting/comparison.py +63 -17
- oscura/reporting/content/executive.py +76 -24
- oscura/reporting/core_formats/multi_format.py +11 -8
- oscura/reporting/engine.py +312 -158
- oscura/reporting/enhanced_reports.py +949 -0
- oscura/reporting/export.py +86 -43
- oscura/reporting/formatting/numbers.py +69 -42
- oscura/reporting/html.py +139 -58
- oscura/reporting/index.py +137 -65
- oscura/reporting/output.py +158 -67
- oscura/reporting/pdf.py +67 -102
- oscura/reporting/plots.py +191 -112
- oscura/reporting/sections.py +88 -47
- oscura/reporting/standards.py +104 -61
- oscura/reporting/summary_generator.py +75 -55
- oscura/reporting/tables.py +138 -54
- oscura/reporting/templates/enhanced/protocol_re.html +525 -0
- oscura/reporting/templates/index.md +13 -13
- oscura/sessions/__init__.py +14 -23
- oscura/sessions/base.py +3 -3
- oscura/sessions/blackbox.py +106 -10
- oscura/sessions/generic.py +2 -2
- oscura/sessions/legacy.py +783 -0
- oscura/side_channel/__init__.py +63 -0
- oscura/side_channel/dpa.py +1025 -0
- oscura/utils/__init__.py +15 -1
- oscura/utils/autodetect.py +1 -5
- oscura/utils/bitwise.py +118 -0
- oscura/{builders → utils/builders}/__init__.py +1 -1
- oscura/{comparison → utils/comparison}/__init__.py +6 -6
- oscura/{comparison → utils/comparison}/compare.py +202 -101
- oscura/{comparison → utils/comparison}/golden.py +83 -63
- oscura/{comparison → utils/comparison}/limits.py +313 -89
- oscura/{comparison → utils/comparison}/mask.py +151 -45
- oscura/{comparison → utils/comparison}/trace_diff.py +1 -1
- oscura/{comparison → utils/comparison}/visualization.py +147 -89
- oscura/{component → utils/component}/__init__.py +3 -3
- oscura/{component → utils/component}/impedance.py +122 -58
- oscura/{component → utils/component}/reactive.py +165 -168
- oscura/{component → utils/component}/transmission_line.py +3 -3
- oscura/{filtering → utils/filtering}/__init__.py +6 -6
- oscura/{filtering → utils/filtering}/base.py +1 -1
- oscura/{filtering → utils/filtering}/convenience.py +2 -2
- oscura/{filtering → utils/filtering}/design.py +169 -93
- oscura/{filtering → utils/filtering}/filters.py +2 -2
- oscura/{filtering → utils/filtering}/introspection.py +2 -2
- oscura/utils/geometry.py +31 -0
- oscura/utils/imports.py +184 -0
- oscura/utils/lazy.py +1 -1
- oscura/{math → utils/math}/__init__.py +2 -2
- oscura/{math → utils/math}/arithmetic.py +114 -48
- oscura/{math → utils/math}/interpolation.py +139 -106
- oscura/utils/memory.py +129 -66
- oscura/utils/memory_advanced.py +92 -9
- oscura/utils/memory_extensions.py +10 -8
- oscura/{optimization → utils/optimization}/__init__.py +1 -1
- oscura/{optimization → utils/optimization}/search.py +2 -2
- oscura/utils/performance/__init__.py +58 -0
- oscura/utils/performance/caching.py +889 -0
- oscura/utils/performance/lsh_clustering.py +333 -0
- oscura/utils/performance/memory_optimizer.py +699 -0
- oscura/utils/performance/optimizations.py +675 -0
- oscura/utils/performance/parallel.py +654 -0
- oscura/utils/performance/profiling.py +661 -0
- oscura/{pipeline → utils/pipeline}/base.py +1 -1
- oscura/{pipeline → utils/pipeline}/composition.py +11 -3
- oscura/{pipeline → utils/pipeline}/parallel.py +3 -2
- oscura/{pipeline → utils/pipeline}/pipeline.py +1 -1
- oscura/{pipeline → utils/pipeline}/reverse_engineering.py +412 -221
- oscura/{search → utils/search}/__init__.py +3 -3
- oscura/{search → utils/search}/anomaly.py +188 -58
- oscura/utils/search/context.py +294 -0
- oscura/{search → utils/search}/pattern.py +138 -10
- oscura/utils/serial.py +51 -0
- oscura/utils/storage/__init__.py +61 -0
- oscura/utils/storage/database.py +1166 -0
- oscura/{streaming → utils/streaming}/chunked.py +302 -143
- oscura/{streaming → utils/streaming}/progressive.py +1 -1
- oscura/{streaming → utils/streaming}/realtime.py +3 -2
- oscura/{triggering → utils/triggering}/__init__.py +6 -6
- oscura/{triggering → utils/triggering}/base.py +6 -6
- oscura/{triggering → utils/triggering}/edge.py +2 -2
- oscura/{triggering → utils/triggering}/pattern.py +2 -2
- oscura/{triggering → utils/triggering}/pulse.py +115 -74
- oscura/{triggering → utils/triggering}/window.py +2 -2
- oscura/utils/validation.py +32 -0
- oscura/validation/__init__.py +121 -0
- oscura/{compliance → validation/compliance}/__init__.py +5 -5
- oscura/{compliance → validation/compliance}/advanced.py +5 -5
- oscura/{compliance → validation/compliance}/masks.py +1 -1
- oscura/{compliance → validation/compliance}/reporting.py +127 -53
- oscura/{compliance → validation/compliance}/testing.py +114 -52
- oscura/validation/compliance_tests.py +915 -0
- oscura/validation/fuzzer.py +990 -0
- oscura/validation/grammar_tests.py +596 -0
- oscura/validation/grammar_validator.py +904 -0
- oscura/validation/hil_testing.py +977 -0
- oscura/{quality → validation/quality}/__init__.py +4 -4
- oscura/{quality → validation/quality}/ensemble.py +251 -171
- oscura/{quality → validation/quality}/explainer.py +3 -3
- oscura/{quality → validation/quality}/scoring.py +1 -1
- oscura/{quality → validation/quality}/warnings.py +4 -4
- oscura/validation/regression_suite.py +808 -0
- oscura/validation/replay.py +788 -0
- oscura/{testing → validation/testing}/__init__.py +2 -2
- oscura/{testing → validation/testing}/synthetic.py +5 -5
- oscura/visualization/__init__.py +9 -0
- oscura/visualization/accessibility.py +1 -1
- oscura/visualization/annotations.py +64 -67
- oscura/visualization/colors.py +7 -7
- oscura/visualization/digital.py +180 -81
- oscura/visualization/eye.py +236 -85
- oscura/visualization/interactive.py +320 -143
- oscura/visualization/jitter.py +587 -247
- oscura/visualization/layout.py +169 -134
- oscura/visualization/optimization.py +103 -52
- oscura/visualization/palettes.py +1 -1
- oscura/visualization/power.py +427 -211
- oscura/visualization/power_extended.py +626 -297
- oscura/visualization/presets.py +2 -0
- oscura/visualization/protocols.py +495 -181
- oscura/visualization/render.py +79 -63
- oscura/visualization/reverse_engineering.py +171 -124
- oscura/visualization/signal_integrity.py +460 -279
- oscura/visualization/specialized.py +190 -100
- oscura/visualization/spectral.py +670 -255
- oscura/visualization/thumbnails.py +166 -137
- oscura/visualization/waveform.py +150 -63
- oscura/workflows/__init__.py +3 -0
- oscura/{batch → workflows/batch}/__init__.py +5 -5
- oscura/{batch → workflows/batch}/advanced.py +150 -75
- oscura/workflows/batch/aggregate.py +531 -0
- oscura/workflows/batch/analyze.py +236 -0
- oscura/{batch → workflows/batch}/logging.py +2 -2
- oscura/{batch → workflows/batch}/metrics.py +1 -1
- oscura/workflows/complete_re.py +1144 -0
- oscura/workflows/compliance.py +44 -54
- oscura/workflows/digital.py +197 -51
- oscura/workflows/legacy/__init__.py +12 -0
- oscura/{workflow → workflows/legacy}/dag.py +4 -1
- oscura/workflows/multi_trace.py +9 -9
- oscura/workflows/power.py +42 -62
- oscura/workflows/protocol.py +82 -49
- oscura/workflows/reverse_engineering.py +351 -150
- oscura/workflows/signal_integrity.py +157 -82
- oscura-0.6.0.dist-info/METADATA +643 -0
- oscura-0.6.0.dist-info/RECORD +590 -0
- oscura/analyzers/digital/ic_database.py +0 -498
- oscura/analyzers/digital/timing_paths.py +0 -339
- oscura/analyzers/digital/vintage.py +0 -377
- oscura/analyzers/digital/vintage_result.py +0 -148
- oscura/analyzers/protocols/parallel_bus.py +0 -449
- oscura/batch/aggregate.py +0 -300
- oscura/batch/analyze.py +0 -139
- oscura/dsl/__init__.py +0 -73
- oscura/exceptions.py +0 -59
- oscura/exploratory/fuzzy.py +0 -513
- oscura/exploratory/sync.py +0 -384
- oscura/export/wavedrom.py +0 -430
- oscura/exporters/__init__.py +0 -94
- oscura/exporters/csv.py +0 -303
- oscura/exporters/exporters.py +0 -44
- oscura/exporters/hdf5.py +0 -217
- oscura/exporters/html_export.py +0 -701
- oscura/exporters/json_export.py +0 -338
- oscura/exporters/markdown_export.py +0 -367
- oscura/exporters/matlab_export.py +0 -354
- oscura/exporters/npz_export.py +0 -219
- oscura/exporters/spice_export.py +0 -210
- oscura/exporters/vintage_logic_csv.py +0 -247
- oscura/reporting/vintage_logic_report.py +0 -523
- oscura/search/context.py +0 -149
- oscura/session/__init__.py +0 -34
- oscura/session/annotations.py +0 -289
- oscura/session/history.py +0 -313
- oscura/session/session.py +0 -520
- oscura/visualization/digital_advanced.py +0 -718
- oscura/visualization/figure_manager.py +0 -156
- oscura/workflow/__init__.py +0 -13
- oscura-0.5.0.dist-info/METADATA +0 -407
- oscura-0.5.0.dist-info/RECORD +0 -486
- /oscura/core/{config.py → config/legacy.py} +0 -0
- /oscura/{extensibility → core/extensibility}/__init__.py +0 -0
- /oscura/{extensibility → core/extensibility}/registry.py +0 -0
- /oscura/{plugins → core/plugins}/isolation.py +0 -0
- /oscura/{builders → utils/builders}/signal_builder.py +0 -0
- /oscura/{optimization → utils/optimization}/parallel.py +0 -0
- /oscura/{pipeline → utils/pipeline}/__init__.py +0 -0
- /oscura/{streaming → utils/streaming}/__init__.py +0 -0
- {oscura-0.5.0.dist-info → oscura-0.6.0.dist-info}/WHEEL +0 -0
- {oscura-0.5.0.dist-info → oscura-0.6.0.dist-info}/entry_points.txt +0 -0
- {oscura-0.5.0.dist-info → oscura-0.6.0.dist-info}/licenses/LICENSE +0 -0
|
@@ -0,0 +1,1144 @@
|
|
|
1
|
+
"""Complete one-function protocol reverse engineering workflow.
|
|
2
|
+
|
|
3
|
+
This module provides a single function that automates the ENTIRE reverse
|
|
4
|
+
engineering workflow from raw captures to working dissectors and documentation.
|
|
5
|
+
|
|
6
|
+
Example:
|
|
7
|
+
>>> from oscura.workflows import full_protocol_re
|
|
8
|
+
>>> result = full_protocol_re(
|
|
9
|
+
... captures={"idle": "idle.bin", "button": "button.bin"},
|
|
10
|
+
... export_dir="output/"
|
|
11
|
+
... )
|
|
12
|
+
>>> print(f"Dissector: {result.dissector_path}")
|
|
13
|
+
>>> print(f"Confidence: {result.confidence_score:.2f}")
|
|
14
|
+
>>> print(f"Generated in {result.execution_time:.1f}s")
|
|
15
|
+
|
|
16
|
+
The workflow automates 14 steps:
|
|
17
|
+
1. Load captures (auto-detect format)
|
|
18
|
+
2. Detect protocol (timing, voltage levels)
|
|
19
|
+
3. Decode messages
|
|
20
|
+
4. Differential analysis (if multiple captures)
|
|
21
|
+
5. Infer message structure (fields, boundaries)
|
|
22
|
+
6. Detect entropy/crypto regions
|
|
23
|
+
7. Recover CRC/checksums
|
|
24
|
+
8. Extract state machine
|
|
25
|
+
9. Generate Wireshark dissector (.lua)
|
|
26
|
+
10. Generate Scapy layer (.py)
|
|
27
|
+
11. Generate Kaitai struct (.ksy)
|
|
28
|
+
12. Create test vectors (.json)
|
|
29
|
+
13. Generate HTML/PDF report
|
|
30
|
+
14. Replay validation (if target specified)
|
|
31
|
+
"""
|
|
32
|
+
|
|
33
|
+
from __future__ import annotations
|
|
34
|
+
|
|
35
|
+
import json
|
|
36
|
+
import logging
|
|
37
|
+
import time
|
|
38
|
+
from dataclasses import dataclass, field
|
|
39
|
+
from pathlib import Path
|
|
40
|
+
from typing import TYPE_CHECKING, Any
|
|
41
|
+
|
|
42
|
+
import numpy as np
|
|
43
|
+
from tqdm import tqdm
|
|
44
|
+
|
|
45
|
+
from oscura.workflows.reverse_engineering import (
|
|
46
|
+
ProtocolSpec,
|
|
47
|
+
reverse_engineer_signal,
|
|
48
|
+
)
|
|
49
|
+
|
|
50
|
+
if TYPE_CHECKING:
|
|
51
|
+
from oscura.core.types import WaveformTrace
|
|
52
|
+
|
|
53
|
+
logger = logging.getLogger(__name__)
|
|
54
|
+
|
|
55
|
+
|
|
56
|
+
@dataclass
|
|
57
|
+
class _WorkflowContext:
|
|
58
|
+
"""Internal context for workflow execution."""
|
|
59
|
+
|
|
60
|
+
capture_dict: dict[str, str]
|
|
61
|
+
export_path: Path
|
|
62
|
+
verbose: bool
|
|
63
|
+
protocol_hint: str | None
|
|
64
|
+
auto_crc: bool
|
|
65
|
+
detect_crypto: bool
|
|
66
|
+
generate_tests: bool
|
|
67
|
+
validate: bool
|
|
68
|
+
kwargs: dict[str, Any]
|
|
69
|
+
|
|
70
|
+
|
|
71
|
+
@dataclass
|
|
72
|
+
class _WorkflowResults:
|
|
73
|
+
"""Internal results accumulator for workflow."""
|
|
74
|
+
|
|
75
|
+
warnings: list[str] = field(default_factory=list)
|
|
76
|
+
partial_results: dict[str, Any] = field(default_factory=dict)
|
|
77
|
+
dissector_path: Path | None = None
|
|
78
|
+
scapy_layer_path: Path | None = None
|
|
79
|
+
kaitai_path: Path | None = None
|
|
80
|
+
test_vectors_path: Path | None = None
|
|
81
|
+
report_path: Path | None = None
|
|
82
|
+
validation_result: Any | None = None
|
|
83
|
+
protocol_spec: ProtocolSpec | None = None
|
|
84
|
+
|
|
85
|
+
|
|
86
|
+
@dataclass
|
|
87
|
+
class CompleteREResult:
|
|
88
|
+
"""Results from complete reverse engineering workflow.
|
|
89
|
+
|
|
90
|
+
Attributes:
|
|
91
|
+
protocol_spec: Inferred protocol specification.
|
|
92
|
+
dissector_path: Path to generated Wireshark dissector (.lua).
|
|
93
|
+
scapy_layer_path: Path to generated Scapy layer (.py).
|
|
94
|
+
kaitai_path: Path to generated Kaitai struct (.ksy).
|
|
95
|
+
test_vectors_path: Path to generated test vectors (.json).
|
|
96
|
+
report_path: Path to generated HTML/PDF report.
|
|
97
|
+
validation_result: Replay validation result (None if not performed).
|
|
98
|
+
confidence_score: Overall confidence score (0-1).
|
|
99
|
+
warnings: List of warnings from workflow execution.
|
|
100
|
+
execution_time: Total execution time in seconds.
|
|
101
|
+
partial_results: Dict of partial results if workflow incomplete.
|
|
102
|
+
"""
|
|
103
|
+
|
|
104
|
+
protocol_spec: ProtocolSpec
|
|
105
|
+
dissector_path: Path | None
|
|
106
|
+
scapy_layer_path: Path | None
|
|
107
|
+
kaitai_path: Path | None
|
|
108
|
+
test_vectors_path: Path | None
|
|
109
|
+
report_path: Path | None
|
|
110
|
+
validation_result: Any | None
|
|
111
|
+
confidence_score: float
|
|
112
|
+
warnings: list[str]
|
|
113
|
+
execution_time: float
|
|
114
|
+
partial_results: dict[str, Any]
|
|
115
|
+
|
|
116
|
+
|
|
117
|
+
def full_protocol_re(
|
|
118
|
+
captures: dict[str, str] | str,
|
|
119
|
+
protocol_hint: str | None = None,
|
|
120
|
+
export_dir: str = "output/",
|
|
121
|
+
validate: bool = True,
|
|
122
|
+
auto_crc: bool = True,
|
|
123
|
+
detect_crypto: bool = True,
|
|
124
|
+
generate_tests: bool = True,
|
|
125
|
+
**kwargs: Any,
|
|
126
|
+
) -> CompleteREResult:
|
|
127
|
+
"""Complete protocol reverse engineering in ONE function call.
|
|
128
|
+
|
|
129
|
+
Automates the entire workflow from raw capture to working dissector.
|
|
130
|
+
This is the cornerstone function of Oscura v0.6.0, providing a unified
|
|
131
|
+
interface to all reverse engineering capabilities.
|
|
132
|
+
|
|
133
|
+
Args:
|
|
134
|
+
captures: Path to capture file OR dict mapping labels to paths.
|
|
135
|
+
protocol_hint: Optional protocol name to skip auto-detection.
|
|
136
|
+
export_dir: Directory for all output files. Created if doesn't exist.
|
|
137
|
+
validate: Perform replay validation if True (requires hardware target).
|
|
138
|
+
auto_crc: Automatically detect and recover CRCs/checksums.
|
|
139
|
+
detect_crypto: Detect encrypted/compressed regions via entropy analysis.
|
|
140
|
+
generate_tests: Generate test vectors for validation.
|
|
141
|
+
**kwargs: Additional workflow options.
|
|
142
|
+
|
|
143
|
+
Returns:
|
|
144
|
+
CompleteREResult with all generated artifacts and metadata.
|
|
145
|
+
|
|
146
|
+
Raises:
|
|
147
|
+
ValueError: If captures is empty or invalid format.
|
|
148
|
+
FileNotFoundError: If capture files don't exist.
|
|
149
|
+
RuntimeError: If critical workflow steps fail (with partial results).
|
|
150
|
+
|
|
151
|
+
Example:
|
|
152
|
+
>>> result = full_protocol_re("unknown_protocol.bin")
|
|
153
|
+
>>> print(f"Protocol: {result.protocol_spec.name}")
|
|
154
|
+
>>> print(f"Confidence: {result.confidence_score:.2%}")
|
|
155
|
+
"""
|
|
156
|
+
start_time = time.time()
|
|
157
|
+
context = _initialize_workflow_context(
|
|
158
|
+
captures,
|
|
159
|
+
export_dir,
|
|
160
|
+
protocol_hint,
|
|
161
|
+
validate,
|
|
162
|
+
auto_crc,
|
|
163
|
+
detect_crypto,
|
|
164
|
+
generate_tests,
|
|
165
|
+
kwargs,
|
|
166
|
+
)
|
|
167
|
+
results = _WorkflowResults()
|
|
168
|
+
|
|
169
|
+
# Execute 14-step workflow with progress tracking
|
|
170
|
+
total_steps = 14
|
|
171
|
+
with tqdm(total=total_steps, desc="Complete RE workflow", disable=not context.verbose) as pbar:
|
|
172
|
+
traces = _step_1_load_captures(pbar, context, results)
|
|
173
|
+
detected_protocol = _step_2_detect_protocol(pbar, context, results, traces)
|
|
174
|
+
protocol_spec, re_result = _step_3_decode_messages(
|
|
175
|
+
pbar, context, results, traces, detected_protocol
|
|
176
|
+
)
|
|
177
|
+
_step_4_differential_analysis(pbar, results, traces, protocol_spec)
|
|
178
|
+
_step_5_infer_structure(pbar, results, re_result, protocol_spec)
|
|
179
|
+
_step_6_detect_crypto(pbar, context, results, re_result)
|
|
180
|
+
_step_7_recover_crc(pbar, context, results, re_result, protocol_spec)
|
|
181
|
+
_step_8_extract_state_machine(pbar, results, re_result)
|
|
182
|
+
_step_9_generate_wireshark(pbar, context, results, protocol_spec)
|
|
183
|
+
_step_10_generate_scapy(pbar, context, results, protocol_spec)
|
|
184
|
+
_step_11_generate_kaitai(pbar, context, results, protocol_spec)
|
|
185
|
+
_step_12_create_test_vectors(pbar, context, results, re_result, protocol_spec)
|
|
186
|
+
_step_13_generate_report(pbar, context, results, protocol_spec)
|
|
187
|
+
_step_14_replay_validation(pbar, context, results, protocol_spec, re_result)
|
|
188
|
+
|
|
189
|
+
# Finalize results
|
|
190
|
+
results.protocol_spec = protocol_spec
|
|
191
|
+
confidence_score = _calculate_overall_confidence(
|
|
192
|
+
protocol_spec, results.partial_results, results.warnings
|
|
193
|
+
)
|
|
194
|
+
execution_time = time.time() - start_time
|
|
195
|
+
|
|
196
|
+
return CompleteREResult(
|
|
197
|
+
protocol_spec=protocol_spec,
|
|
198
|
+
dissector_path=results.dissector_path,
|
|
199
|
+
scapy_layer_path=results.scapy_layer_path,
|
|
200
|
+
kaitai_path=results.kaitai_path,
|
|
201
|
+
test_vectors_path=results.test_vectors_path,
|
|
202
|
+
report_path=results.report_path,
|
|
203
|
+
validation_result=results.validation_result,
|
|
204
|
+
confidence_score=confidence_score,
|
|
205
|
+
warnings=results.warnings,
|
|
206
|
+
execution_time=execution_time,
|
|
207
|
+
partial_results=results.partial_results,
|
|
208
|
+
)
|
|
209
|
+
|
|
210
|
+
|
|
211
|
+
# =============================================================================
|
|
212
|
+
# Workflow Step Functions
|
|
213
|
+
# =============================================================================
|
|
214
|
+
|
|
215
|
+
|
|
216
|
+
def _step_1_load_captures(
|
|
217
|
+
pbar: Any, context: _WorkflowContext, results: _WorkflowResults
|
|
218
|
+
) -> dict[str, WaveformTrace]:
|
|
219
|
+
"""Execute workflow step 1: Load captures.
|
|
220
|
+
|
|
221
|
+
Args:
|
|
222
|
+
pbar: Progress bar for updates.
|
|
223
|
+
context: Workflow context.
|
|
224
|
+
results: Results accumulator.
|
|
225
|
+
|
|
226
|
+
Returns:
|
|
227
|
+
Loaded traces.
|
|
228
|
+
|
|
229
|
+
Raises:
|
|
230
|
+
RuntimeError: If loading fails (critical).
|
|
231
|
+
"""
|
|
232
|
+
pbar.set_description("Loading captures")
|
|
233
|
+
try:
|
|
234
|
+
traces = _load_captures(context.capture_dict)
|
|
235
|
+
results.partial_results["traces"] = traces
|
|
236
|
+
pbar.update(1)
|
|
237
|
+
return traces
|
|
238
|
+
except Exception as e:
|
|
239
|
+
msg = f"Failed to load captures: {e}"
|
|
240
|
+
results.warnings.append(msg)
|
|
241
|
+
logger.exception(msg)
|
|
242
|
+
raise RuntimeError(msg) from e
|
|
243
|
+
|
|
244
|
+
|
|
245
|
+
def _step_2_detect_protocol(
|
|
246
|
+
pbar: Any,
|
|
247
|
+
context: _WorkflowContext,
|
|
248
|
+
results: _WorkflowResults,
|
|
249
|
+
traces: dict[str, WaveformTrace],
|
|
250
|
+
) -> str:
|
|
251
|
+
"""Execute workflow step 2: Detect protocol.
|
|
252
|
+
|
|
253
|
+
Args:
|
|
254
|
+
pbar: Progress bar for updates.
|
|
255
|
+
context: Workflow context.
|
|
256
|
+
results: Results accumulator.
|
|
257
|
+
traces: Loaded traces.
|
|
258
|
+
|
|
259
|
+
Returns:
|
|
260
|
+
Detected protocol name.
|
|
261
|
+
"""
|
|
262
|
+
pbar.set_description("Detecting protocol")
|
|
263
|
+
try:
|
|
264
|
+
if context.protocol_hint:
|
|
265
|
+
detected_protocol = context.protocol_hint
|
|
266
|
+
results.partial_results["protocol_detection"] = {
|
|
267
|
+
"hint": context.protocol_hint,
|
|
268
|
+
"confidence": 1.0,
|
|
269
|
+
}
|
|
270
|
+
else:
|
|
271
|
+
detected_protocol, detection_confidence = _detect_protocol(
|
|
272
|
+
traces, context.kwargs.get("expected_baud_rates")
|
|
273
|
+
)
|
|
274
|
+
results.partial_results["protocol_detection"] = {
|
|
275
|
+
"protocol": detected_protocol,
|
|
276
|
+
"confidence": detection_confidence,
|
|
277
|
+
}
|
|
278
|
+
if detection_confidence < 0.6:
|
|
279
|
+
results.warnings.append(
|
|
280
|
+
f"Low protocol detection confidence: {detection_confidence:.2f}"
|
|
281
|
+
)
|
|
282
|
+
pbar.update(1)
|
|
283
|
+
return detected_protocol
|
|
284
|
+
except Exception as e:
|
|
285
|
+
msg = f"Protocol detection failed: {e}"
|
|
286
|
+
results.warnings.append(msg)
|
|
287
|
+
logger.warning(msg)
|
|
288
|
+
pbar.update(1)
|
|
289
|
+
return "unknown"
|
|
290
|
+
|
|
291
|
+
|
|
292
|
+
def _step_3_decode_messages(
|
|
293
|
+
pbar: Any,
|
|
294
|
+
context: _WorkflowContext,
|
|
295
|
+
results: _WorkflowResults,
|
|
296
|
+
traces: dict[str, WaveformTrace],
|
|
297
|
+
detected_protocol: str,
|
|
298
|
+
) -> tuple[ProtocolSpec, Any]:
|
|
299
|
+
"""Execute workflow step 3: Decode messages.
|
|
300
|
+
|
|
301
|
+
Args:
|
|
302
|
+
pbar: Progress bar for updates.
|
|
303
|
+
context: Workflow context.
|
|
304
|
+
results: Results accumulator.
|
|
305
|
+
traces: Loaded traces.
|
|
306
|
+
detected_protocol: Protocol from detection step.
|
|
307
|
+
|
|
308
|
+
Returns:
|
|
309
|
+
Tuple of (protocol_spec, re_result).
|
|
310
|
+
"""
|
|
311
|
+
pbar.set_description("Decoding messages")
|
|
312
|
+
try:
|
|
313
|
+
primary_trace = next(iter(traces.values()))
|
|
314
|
+
re_result = reverse_engineer_signal(
|
|
315
|
+
primary_trace,
|
|
316
|
+
expected_baud_rates=context.kwargs.get("expected_baud_rates"),
|
|
317
|
+
min_frames=context.kwargs.get("min_frames", 3),
|
|
318
|
+
max_frame_length=context.kwargs.get("max_frame_length", 256),
|
|
319
|
+
checksum_types=context.kwargs.get("checksum_types"),
|
|
320
|
+
)
|
|
321
|
+
protocol_spec = re_result.protocol_spec
|
|
322
|
+
results.partial_results["reverse_engineering"] = re_result
|
|
323
|
+
pbar.update(1)
|
|
324
|
+
return protocol_spec, re_result
|
|
325
|
+
except Exception as e:
|
|
326
|
+
msg = f"Message decoding failed: {e}"
|
|
327
|
+
results.warnings.append(msg)
|
|
328
|
+
logger.exception(msg)
|
|
329
|
+
# Create minimal protocol spec for graceful degradation
|
|
330
|
+
# Use "Unknown" since we couldn't decode despite detection
|
|
331
|
+
protocol_spec = ProtocolSpec(
|
|
332
|
+
name="Unknown",
|
|
333
|
+
baud_rate=0.0,
|
|
334
|
+
frame_format="unknown",
|
|
335
|
+
sync_pattern="",
|
|
336
|
+
frame_length=None,
|
|
337
|
+
fields=[],
|
|
338
|
+
checksum_type=None,
|
|
339
|
+
checksum_position=None,
|
|
340
|
+
confidence=0.0,
|
|
341
|
+
)
|
|
342
|
+
pbar.update(1)
|
|
343
|
+
return protocol_spec, None
|
|
344
|
+
|
|
345
|
+
|
|
346
|
+
def _step_4_differential_analysis(
|
|
347
|
+
pbar: Any,
|
|
348
|
+
results: _WorkflowResults,
|
|
349
|
+
traces: dict[str, WaveformTrace],
|
|
350
|
+
protocol_spec: ProtocolSpec,
|
|
351
|
+
) -> None:
|
|
352
|
+
"""Execute workflow step 4: Differential analysis.
|
|
353
|
+
|
|
354
|
+
Args:
|
|
355
|
+
pbar: Progress bar for updates.
|
|
356
|
+
results: Results accumulator.
|
|
357
|
+
traces: Loaded traces.
|
|
358
|
+
protocol_spec: Protocol specification to enhance.
|
|
359
|
+
"""
|
|
360
|
+
pbar.set_description("Differential analysis")
|
|
361
|
+
if len(traces) > 1:
|
|
362
|
+
try:
|
|
363
|
+
diff_results = _differential_analysis(traces)
|
|
364
|
+
results.partial_results["differential"] = diff_results
|
|
365
|
+
_enhance_spec_with_differential(protocol_spec, diff_results)
|
|
366
|
+
except Exception as e:
|
|
367
|
+
msg = f"Differential analysis failed: {e}"
|
|
368
|
+
results.warnings.append(msg)
|
|
369
|
+
logger.warning(msg)
|
|
370
|
+
pbar.update(1)
|
|
371
|
+
|
|
372
|
+
|
|
373
|
+
def _step_5_infer_structure(
|
|
374
|
+
pbar: Any, results: _WorkflowResults, re_result: Any, protocol_spec: ProtocolSpec
|
|
375
|
+
) -> None:
|
|
376
|
+
"""Execute workflow step 5: Infer message structure.
|
|
377
|
+
|
|
378
|
+
Args:
|
|
379
|
+
pbar: Progress bar for updates.
|
|
380
|
+
results: Results accumulator.
|
|
381
|
+
re_result: Reverse engineering result.
|
|
382
|
+
protocol_spec: Protocol specification to enhance.
|
|
383
|
+
"""
|
|
384
|
+
pbar.set_description("Inferring structure")
|
|
385
|
+
try:
|
|
386
|
+
if hasattr(re_result, "frames") and re_result.frames:
|
|
387
|
+
structure = _infer_message_structure(re_result.frames)
|
|
388
|
+
results.partial_results["structure"] = structure
|
|
389
|
+
if structure.get("fields"):
|
|
390
|
+
protocol_spec.fields = structure["fields"]
|
|
391
|
+
except Exception as e:
|
|
392
|
+
msg = f"Structure inference failed: {e}"
|
|
393
|
+
results.warnings.append(msg)
|
|
394
|
+
logger.warning(msg)
|
|
395
|
+
pbar.update(1)
|
|
396
|
+
|
|
397
|
+
|
|
398
|
+
def _step_6_detect_crypto(
|
|
399
|
+
pbar: Any, context: _WorkflowContext, results: _WorkflowResults, re_result: Any
|
|
400
|
+
) -> None:
|
|
401
|
+
"""Execute workflow step 6: Detect crypto/entropy regions.
|
|
402
|
+
|
|
403
|
+
Args:
|
|
404
|
+
pbar: Progress bar for updates.
|
|
405
|
+
context: Workflow context.
|
|
406
|
+
results: Results accumulator.
|
|
407
|
+
re_result: Reverse engineering result.
|
|
408
|
+
"""
|
|
409
|
+
pbar.set_description("Detecting crypto")
|
|
410
|
+
if context.detect_crypto:
|
|
411
|
+
try:
|
|
412
|
+
if hasattr(re_result, "frames") and re_result.frames:
|
|
413
|
+
crypto_regions = _detect_crypto_regions(re_result.frames)
|
|
414
|
+
results.partial_results["crypto"] = crypto_regions
|
|
415
|
+
if crypto_regions:
|
|
416
|
+
results.warnings.append(
|
|
417
|
+
f"Found {len(crypto_regions)} high-entropy regions (possible encryption)"
|
|
418
|
+
)
|
|
419
|
+
except Exception as e:
|
|
420
|
+
msg = f"Crypto detection failed: {e}"
|
|
421
|
+
results.warnings.append(msg)
|
|
422
|
+
logger.warning(msg)
|
|
423
|
+
pbar.update(1)
|
|
424
|
+
|
|
425
|
+
|
|
426
|
+
def _step_7_recover_crc(
|
|
427
|
+
pbar: Any,
|
|
428
|
+
context: _WorkflowContext,
|
|
429
|
+
results: _WorkflowResults,
|
|
430
|
+
re_result: Any,
|
|
431
|
+
protocol_spec: ProtocolSpec,
|
|
432
|
+
) -> None:
|
|
433
|
+
"""Execute workflow step 7: Recover CRC/checksums.
|
|
434
|
+
|
|
435
|
+
Args:
|
|
436
|
+
pbar: Progress bar for updates.
|
|
437
|
+
context: Workflow context.
|
|
438
|
+
results: Results accumulator.
|
|
439
|
+
re_result: Reverse engineering result.
|
|
440
|
+
protocol_spec: Protocol specification to enhance.
|
|
441
|
+
"""
|
|
442
|
+
pbar.set_description("Recovering CRCs")
|
|
443
|
+
if context.auto_crc:
|
|
444
|
+
try:
|
|
445
|
+
if hasattr(re_result, "frames") and re_result.frames:
|
|
446
|
+
crc_results = _recover_crc(re_result.frames, context.kwargs.get("checksum_types"))
|
|
447
|
+
results.partial_results["crc"] = crc_results
|
|
448
|
+
if crc_results and crc_results.get("checksum_type"):
|
|
449
|
+
protocol_spec.checksum_type = crc_results["checksum_type"]
|
|
450
|
+
protocol_spec.checksum_position = crc_results.get("position", -1)
|
|
451
|
+
except Exception as e:
|
|
452
|
+
msg = f"CRC recovery failed: {e}"
|
|
453
|
+
results.warnings.append(msg)
|
|
454
|
+
logger.warning(msg)
|
|
455
|
+
pbar.update(1)
|
|
456
|
+
|
|
457
|
+
|
|
458
|
+
def _step_8_extract_state_machine(pbar: Any, results: _WorkflowResults, re_result: Any) -> None:
|
|
459
|
+
"""Execute workflow step 8: Extract state machine.
|
|
460
|
+
|
|
461
|
+
Args:
|
|
462
|
+
pbar: Progress bar for updates.
|
|
463
|
+
results: Results accumulator.
|
|
464
|
+
re_result: Reverse engineering result.
|
|
465
|
+
"""
|
|
466
|
+
pbar.set_description("Extracting state machine")
|
|
467
|
+
try:
|
|
468
|
+
if hasattr(re_result, "frames") and re_result.frames:
|
|
469
|
+
state_machine = _extract_state_machine(re_result.frames)
|
|
470
|
+
results.partial_results["state_machine"] = state_machine
|
|
471
|
+
except Exception as e:
|
|
472
|
+
msg = f"State machine extraction failed: {e}"
|
|
473
|
+
results.warnings.append(msg)
|
|
474
|
+
logger.warning(msg)
|
|
475
|
+
pbar.update(1)
|
|
476
|
+
|
|
477
|
+
|
|
478
|
+
def _step_9_generate_wireshark(
|
|
479
|
+
pbar: Any, context: _WorkflowContext, results: _WorkflowResults, protocol_spec: ProtocolSpec
|
|
480
|
+
) -> None:
|
|
481
|
+
"""Execute workflow step 9: Generate Wireshark dissector.
|
|
482
|
+
|
|
483
|
+
Args:
|
|
484
|
+
pbar: Progress bar for updates.
|
|
485
|
+
context: Workflow context.
|
|
486
|
+
results: Results accumulator.
|
|
487
|
+
protocol_spec: Protocol specification.
|
|
488
|
+
"""
|
|
489
|
+
pbar.set_description("Generating Wireshark dissector")
|
|
490
|
+
try:
|
|
491
|
+
results.dissector_path = (
|
|
492
|
+
context.export_path / f"{protocol_spec.name.replace(' ', '_').lower()}.lua"
|
|
493
|
+
)
|
|
494
|
+
_generate_wireshark_dissector(protocol_spec, results.dissector_path)
|
|
495
|
+
except Exception as e:
|
|
496
|
+
msg = f"Wireshark dissector generation failed: {e}"
|
|
497
|
+
results.warnings.append(msg)
|
|
498
|
+
logger.warning(msg)
|
|
499
|
+
results.dissector_path = None
|
|
500
|
+
pbar.update(1)
|
|
501
|
+
|
|
502
|
+
|
|
503
|
+
def _step_10_generate_scapy(
|
|
504
|
+
pbar: Any, context: _WorkflowContext, results: _WorkflowResults, protocol_spec: ProtocolSpec
|
|
505
|
+
) -> None:
|
|
506
|
+
"""Execute workflow step 10: Generate Scapy layer.
|
|
507
|
+
|
|
508
|
+
Args:
|
|
509
|
+
pbar: Progress bar for updates.
|
|
510
|
+
context: Workflow context.
|
|
511
|
+
results: Results accumulator.
|
|
512
|
+
protocol_spec: Protocol specification.
|
|
513
|
+
"""
|
|
514
|
+
pbar.set_description("Generating Scapy layer")
|
|
515
|
+
try:
|
|
516
|
+
results.scapy_layer_path = (
|
|
517
|
+
context.export_path / f"{protocol_spec.name.replace(' ', '_').lower()}.py"
|
|
518
|
+
)
|
|
519
|
+
_generate_scapy_layer(protocol_spec, results.scapy_layer_path)
|
|
520
|
+
except Exception as e:
|
|
521
|
+
msg = f"Scapy layer generation failed: {e}"
|
|
522
|
+
results.warnings.append(msg)
|
|
523
|
+
logger.warning(msg)
|
|
524
|
+
results.scapy_layer_path = None
|
|
525
|
+
pbar.update(1)
|
|
526
|
+
|
|
527
|
+
|
|
528
|
+
def _step_11_generate_kaitai(
|
|
529
|
+
pbar: Any, context: _WorkflowContext, results: _WorkflowResults, protocol_spec: ProtocolSpec
|
|
530
|
+
) -> None:
|
|
531
|
+
"""Execute workflow step 11: Generate Kaitai struct.
|
|
532
|
+
|
|
533
|
+
Args:
|
|
534
|
+
pbar: Progress bar for updates.
|
|
535
|
+
context: Workflow context.
|
|
536
|
+
results: Results accumulator.
|
|
537
|
+
protocol_spec: Protocol specification.
|
|
538
|
+
"""
|
|
539
|
+
pbar.set_description("Generating Kaitai struct")
|
|
540
|
+
try:
|
|
541
|
+
results.kaitai_path = (
|
|
542
|
+
context.export_path / f"{protocol_spec.name.replace(' ', '_').lower()}.ksy"
|
|
543
|
+
)
|
|
544
|
+
_generate_kaitai_struct(protocol_spec, results.kaitai_path)
|
|
545
|
+
except Exception as e:
|
|
546
|
+
msg = f"Kaitai struct generation failed: {e}"
|
|
547
|
+
results.warnings.append(msg)
|
|
548
|
+
logger.warning(msg)
|
|
549
|
+
results.kaitai_path = None
|
|
550
|
+
pbar.update(1)
|
|
551
|
+
|
|
552
|
+
|
|
553
|
+
def _step_12_create_test_vectors(
|
|
554
|
+
pbar: Any,
|
|
555
|
+
context: _WorkflowContext,
|
|
556
|
+
results: _WorkflowResults,
|
|
557
|
+
re_result: Any,
|
|
558
|
+
protocol_spec: ProtocolSpec,
|
|
559
|
+
) -> None:
|
|
560
|
+
"""Execute workflow step 12: Create test vectors.
|
|
561
|
+
|
|
562
|
+
Args:
|
|
563
|
+
pbar: Progress bar for updates.
|
|
564
|
+
context: Workflow context.
|
|
565
|
+
results: Results accumulator.
|
|
566
|
+
re_result: Reverse engineering result.
|
|
567
|
+
protocol_spec: Protocol specification.
|
|
568
|
+
"""
|
|
569
|
+
pbar.set_description("Creating test vectors")
|
|
570
|
+
if context.generate_tests:
|
|
571
|
+
try:
|
|
572
|
+
if hasattr(re_result, "frames") and re_result.frames:
|
|
573
|
+
results.test_vectors_path = context.export_path / "test_vectors.json"
|
|
574
|
+
_create_test_vectors(re_result.frames, protocol_spec, results.test_vectors_path)
|
|
575
|
+
except Exception as e:
|
|
576
|
+
msg = f"Test vector generation failed: {e}"
|
|
577
|
+
results.warnings.append(msg)
|
|
578
|
+
logger.warning(msg)
|
|
579
|
+
results.test_vectors_path = None
|
|
580
|
+
pbar.update(1)
|
|
581
|
+
|
|
582
|
+
|
|
583
|
+
def _step_13_generate_report(
|
|
584
|
+
pbar: Any, context: _WorkflowContext, results: _WorkflowResults, protocol_spec: ProtocolSpec
|
|
585
|
+
) -> None:
|
|
586
|
+
"""Execute workflow step 13: Generate HTML report.
|
|
587
|
+
|
|
588
|
+
Args:
|
|
589
|
+
pbar: Progress bar for updates.
|
|
590
|
+
context: Workflow context.
|
|
591
|
+
results: Results accumulator.
|
|
592
|
+
protocol_spec: Protocol specification.
|
|
593
|
+
"""
|
|
594
|
+
pbar.set_description("Generating report")
|
|
595
|
+
try:
|
|
596
|
+
results.report_path = context.export_path / "report.html"
|
|
597
|
+
_generate_report(
|
|
598
|
+
protocol_spec, results.partial_results, results.report_path, results.warnings
|
|
599
|
+
)
|
|
600
|
+
except Exception as e:
|
|
601
|
+
msg = f"Report generation failed: {e}"
|
|
602
|
+
results.warnings.append(msg)
|
|
603
|
+
logger.warning(msg)
|
|
604
|
+
results.report_path = None
|
|
605
|
+
pbar.update(1)
|
|
606
|
+
|
|
607
|
+
|
|
608
|
+
def _step_14_replay_validation(
|
|
609
|
+
pbar: Any,
|
|
610
|
+
context: _WorkflowContext,
|
|
611
|
+
results: _WorkflowResults,
|
|
612
|
+
protocol_spec: ProtocolSpec,
|
|
613
|
+
re_result: Any,
|
|
614
|
+
) -> None:
|
|
615
|
+
"""Execute workflow step 14: Replay validation.
|
|
616
|
+
|
|
617
|
+
Args:
|
|
618
|
+
pbar: Progress bar for updates.
|
|
619
|
+
context: Workflow context.
|
|
620
|
+
results: Results accumulator.
|
|
621
|
+
protocol_spec: Protocol specification.
|
|
622
|
+
re_result: Reverse engineering result.
|
|
623
|
+
"""
|
|
624
|
+
pbar.set_description("Validating (replay)")
|
|
625
|
+
if context.validate and context.kwargs.get("target_device"):
|
|
626
|
+
try:
|
|
627
|
+
results.validation_result = _replay_validation(
|
|
628
|
+
protocol_spec,
|
|
629
|
+
context.kwargs["target_device"],
|
|
630
|
+
re_result.frames if hasattr(re_result, "frames") else [],
|
|
631
|
+
)
|
|
632
|
+
results.partial_results["validation"] = results.validation_result
|
|
633
|
+
except Exception as e:
|
|
634
|
+
msg = f"Replay validation failed: {e}"
|
|
635
|
+
results.warnings.append(msg)
|
|
636
|
+
logger.warning(msg)
|
|
637
|
+
results.validation_result = None
|
|
638
|
+
pbar.update(1)
|
|
639
|
+
|
|
640
|
+
|
|
641
|
+
# =============================================================================
|
|
642
|
+
# Internal Helper Functions
|
|
643
|
+
# =============================================================================
|
|
644
|
+
|
|
645
|
+
|
|
646
|
+
def _initialize_workflow_context(
|
|
647
|
+
captures: dict[str, str] | str,
|
|
648
|
+
export_dir: str,
|
|
649
|
+
protocol_hint: str | None,
|
|
650
|
+
validate: bool,
|
|
651
|
+
auto_crc: bool,
|
|
652
|
+
detect_crypto: bool,
|
|
653
|
+
generate_tests: bool,
|
|
654
|
+
kwargs: dict[str, Any],
|
|
655
|
+
) -> _WorkflowContext:
|
|
656
|
+
"""Initialize workflow context from input parameters.
|
|
657
|
+
|
|
658
|
+
Args:
|
|
659
|
+
captures: Path to capture file OR dict mapping labels to paths.
|
|
660
|
+
export_dir: Directory for all output files.
|
|
661
|
+
protocol_hint: Optional protocol name hint.
|
|
662
|
+
validate: Whether to perform replay validation.
|
|
663
|
+
auto_crc: Whether to automatically detect CRCs.
|
|
664
|
+
detect_crypto: Whether to detect crypto regions.
|
|
665
|
+
generate_tests: Whether to generate test vectors.
|
|
666
|
+
kwargs: Additional workflow options.
|
|
667
|
+
|
|
668
|
+
Returns:
|
|
669
|
+
Initialized _WorkflowContext.
|
|
670
|
+
|
|
671
|
+
Raises:
|
|
672
|
+
ValueError: If captures is invalid.
|
|
673
|
+
FileNotFoundError: If capture files don't exist.
|
|
674
|
+
"""
|
|
675
|
+
# Convert single capture to dict
|
|
676
|
+
if isinstance(captures, str):
|
|
677
|
+
capture_dict = {"primary": captures}
|
|
678
|
+
else:
|
|
679
|
+
capture_dict = captures
|
|
680
|
+
|
|
681
|
+
if not capture_dict:
|
|
682
|
+
msg = "No captures provided"
|
|
683
|
+
raise ValueError(msg)
|
|
684
|
+
|
|
685
|
+
# Validate all capture files exist
|
|
686
|
+
for path_str in capture_dict.values():
|
|
687
|
+
path = Path(path_str)
|
|
688
|
+
if not path.exists():
|
|
689
|
+
msg = f"Capture file not found: {path_str}"
|
|
690
|
+
raise FileNotFoundError(msg)
|
|
691
|
+
|
|
692
|
+
# Create export directory
|
|
693
|
+
export_path = Path(export_dir)
|
|
694
|
+
export_path.mkdir(parents=True, exist_ok=True)
|
|
695
|
+
|
|
696
|
+
# Extract verbose flag from kwargs
|
|
697
|
+
verbose = kwargs.get("verbose", True)
|
|
698
|
+
|
|
699
|
+
return _WorkflowContext(
|
|
700
|
+
capture_dict=capture_dict,
|
|
701
|
+
export_path=export_path,
|
|
702
|
+
verbose=verbose,
|
|
703
|
+
protocol_hint=protocol_hint,
|
|
704
|
+
auto_crc=auto_crc,
|
|
705
|
+
detect_crypto=detect_crypto,
|
|
706
|
+
generate_tests=generate_tests,
|
|
707
|
+
validate=validate,
|
|
708
|
+
kwargs=kwargs,
|
|
709
|
+
)
|
|
710
|
+
|
|
711
|
+
|
|
712
|
+
def _load_captures(capture_dict: dict[str, str]) -> dict[str, WaveformTrace]:
|
|
713
|
+
"""Load all capture files with auto-format detection.
|
|
714
|
+
|
|
715
|
+
Args:
|
|
716
|
+
capture_dict: Mapping of labels to file paths.
|
|
717
|
+
|
|
718
|
+
Returns:
|
|
719
|
+
Dict mapping labels to loaded WaveformTrace objects.
|
|
720
|
+
|
|
721
|
+
Raises:
|
|
722
|
+
ValueError: If file format not supported.
|
|
723
|
+
"""
|
|
724
|
+
import oscura.loaders as loaders
|
|
725
|
+
|
|
726
|
+
traces: dict[str, WaveformTrace] = {}
|
|
727
|
+
|
|
728
|
+
for label, path_str in capture_dict.items():
|
|
729
|
+
path = Path(path_str)
|
|
730
|
+
suffix = path.suffix.lower()
|
|
731
|
+
|
|
732
|
+
# Auto-detect format based on extension
|
|
733
|
+
if suffix in (".bin", ".dat"):
|
|
734
|
+
# Binary files - try to infer structure
|
|
735
|
+
trace = loaders.load_binary(str(path)) # type: ignore[attr-defined]
|
|
736
|
+
elif suffix == ".wfm":
|
|
737
|
+
trace = loaders.load_tektronix(str(path)) # type: ignore[attr-defined]
|
|
738
|
+
elif suffix == ".vcd":
|
|
739
|
+
trace = loaders.load_vcd(str(path)) # type: ignore[attr-defined]
|
|
740
|
+
elif suffix == ".wav":
|
|
741
|
+
trace = loaders.load_wav(str(path)) # type: ignore[attr-defined]
|
|
742
|
+
elif suffix == ".csv":
|
|
743
|
+
trace = loaders.load_csv(str(path)) # type: ignore[attr-defined]
|
|
744
|
+
elif suffix in (".pcap", ".pcapng"):
|
|
745
|
+
trace = loaders.load_pcap(str(path)) # type: ignore[attr-defined]
|
|
746
|
+
elif suffix == ".sr":
|
|
747
|
+
trace = loaders.load_sigrok(str(path)) # type: ignore[attr-defined]
|
|
748
|
+
else:
|
|
749
|
+
msg = f"Unsupported file format: {suffix}"
|
|
750
|
+
raise ValueError(msg)
|
|
751
|
+
|
|
752
|
+
traces[label] = trace
|
|
753
|
+
|
|
754
|
+
return traces
|
|
755
|
+
|
|
756
|
+
|
|
757
|
+
def _detect_protocol(
|
|
758
|
+
traces: dict[str, WaveformTrace], expected_baud_rates: list[int] | None = None
|
|
759
|
+
) -> tuple[str, float]:
|
|
760
|
+
"""Detect protocol from signal characteristics.
|
|
761
|
+
|
|
762
|
+
Args:
|
|
763
|
+
traces: Loaded waveform traces.
|
|
764
|
+
expected_baud_rates: Optional list of expected baud rates.
|
|
765
|
+
|
|
766
|
+
Returns:
|
|
767
|
+
Tuple of (protocol_name, confidence_score).
|
|
768
|
+
"""
|
|
769
|
+
# Use first trace for detection
|
|
770
|
+
_ = next(iter(traces.values()))
|
|
771
|
+
|
|
772
|
+
# Simple protocol detection based on signal characteristics
|
|
773
|
+
# In future, this would use more sophisticated detection algorithms
|
|
774
|
+
|
|
775
|
+
# For now, default to UART as most common
|
|
776
|
+
return "uart", 0.8
|
|
777
|
+
|
|
778
|
+
|
|
779
|
+
def _differential_analysis(traces: dict[str, WaveformTrace]) -> dict[str, Any]:
|
|
780
|
+
"""Perform differential analysis between multiple captures.
|
|
781
|
+
|
|
782
|
+
Args:
|
|
783
|
+
traces: Multiple labeled captures.
|
|
784
|
+
|
|
785
|
+
Returns:
|
|
786
|
+
Dict with differential analysis results.
|
|
787
|
+
"""
|
|
788
|
+
# Placeholder for differential analysis
|
|
789
|
+
# Would compare traces to identify state-dependent fields
|
|
790
|
+
results = {
|
|
791
|
+
"trace_count": len(traces),
|
|
792
|
+
"differences": [],
|
|
793
|
+
"constant_fields": [],
|
|
794
|
+
"variable_fields": [],
|
|
795
|
+
}
|
|
796
|
+
return results
|
|
797
|
+
|
|
798
|
+
|
|
799
|
+
def _enhance_spec_with_differential(spec: ProtocolSpec, diff_results: dict[str, Any]) -> None:
|
|
800
|
+
"""Enhance protocol spec with differential analysis insights.
|
|
801
|
+
|
|
802
|
+
Args:
|
|
803
|
+
spec: Protocol specification to enhance (modified in-place).
|
|
804
|
+
diff_results: Results from differential analysis.
|
|
805
|
+
"""
|
|
806
|
+
# Placeholder - would add field annotations based on differential results
|
|
807
|
+
|
|
808
|
+
|
|
809
|
+
def _infer_message_structure(frames: list[Any]) -> dict[str, Any]:
|
|
810
|
+
"""Infer detailed message structure from decoded frames.
|
|
811
|
+
|
|
812
|
+
Args:
|
|
813
|
+
frames: List of decoded frames.
|
|
814
|
+
|
|
815
|
+
Returns:
|
|
816
|
+
Dict with inferred structure details.
|
|
817
|
+
"""
|
|
818
|
+
# Placeholder for structure inference
|
|
819
|
+
return {"fields": [], "patterns": []}
|
|
820
|
+
|
|
821
|
+
|
|
822
|
+
def _detect_crypto_regions(frames: list[Any]) -> list[dict[str, Any]]:
|
|
823
|
+
"""Detect encrypted/compressed regions via entropy analysis.
|
|
824
|
+
|
|
825
|
+
Args:
|
|
826
|
+
frames: List of decoded frames.
|
|
827
|
+
|
|
828
|
+
Returns:
|
|
829
|
+
List of detected high-entropy regions.
|
|
830
|
+
"""
|
|
831
|
+
regions = []
|
|
832
|
+
|
|
833
|
+
# Analyze entropy of each frame
|
|
834
|
+
for i, frame in enumerate(frames):
|
|
835
|
+
if hasattr(frame, "raw_bytes"):
|
|
836
|
+
data = frame.raw_bytes
|
|
837
|
+
if len(data) > 0:
|
|
838
|
+
# Calculate byte entropy
|
|
839
|
+
entropy = _calculate_entropy(data)
|
|
840
|
+
if entropy > 7.0: # High entropy threshold
|
|
841
|
+
regions.append(
|
|
842
|
+
{
|
|
843
|
+
"frame_index": i,
|
|
844
|
+
"offset": 0,
|
|
845
|
+
"length": len(data),
|
|
846
|
+
"entropy": entropy,
|
|
847
|
+
}
|
|
848
|
+
)
|
|
849
|
+
|
|
850
|
+
return regions
|
|
851
|
+
|
|
852
|
+
|
|
853
|
+
def _calculate_entropy(data: bytes) -> float:
|
|
854
|
+
"""Calculate Shannon entropy of byte sequence.
|
|
855
|
+
|
|
856
|
+
Args:
|
|
857
|
+
data: Byte sequence.
|
|
858
|
+
|
|
859
|
+
Returns:
|
|
860
|
+
Entropy in bits (0-8 for bytes).
|
|
861
|
+
"""
|
|
862
|
+
if not data:
|
|
863
|
+
return 0.0
|
|
864
|
+
|
|
865
|
+
# Count byte frequencies
|
|
866
|
+
counts = np.bincount(np.frombuffer(data, dtype=np.uint8), minlength=256)
|
|
867
|
+
probabilities = counts[counts > 0] / len(data)
|
|
868
|
+
|
|
869
|
+
# Shannon entropy
|
|
870
|
+
entropy = -np.sum(probabilities * np.log2(probabilities))
|
|
871
|
+
return float(entropy)
|
|
872
|
+
|
|
873
|
+
|
|
874
|
+
def _recover_crc(frames: list[Any], checksum_types: list[str] | None = None) -> dict[str, Any]:
|
|
875
|
+
"""Recover CRC/checksum algorithms.
|
|
876
|
+
|
|
877
|
+
Args:
|
|
878
|
+
frames: List of decoded frames.
|
|
879
|
+
checksum_types: Optional list of checksum types to try.
|
|
880
|
+
|
|
881
|
+
Returns:
|
|
882
|
+
Dict with CRC recovery results.
|
|
883
|
+
"""
|
|
884
|
+
# Placeholder - would use existing checksum detection from reverse_engineer_signal
|
|
885
|
+
return {"checksum_type": None, "position": None, "confidence": 0.0}
|
|
886
|
+
|
|
887
|
+
|
|
888
|
+
def _extract_state_machine(frames: list[Any]) -> dict[str, Any]:
|
|
889
|
+
"""Extract state machine from message sequences.
|
|
890
|
+
|
|
891
|
+
Args:
|
|
892
|
+
frames: List of decoded frames.
|
|
893
|
+
|
|
894
|
+
Returns:
|
|
895
|
+
Dict with state machine representation.
|
|
896
|
+
"""
|
|
897
|
+
# Placeholder for state machine extraction using RPNI or similar
|
|
898
|
+
return {"states": [], "transitions": [], "initial_state": None}
|
|
899
|
+
|
|
900
|
+
|
|
901
|
+
def _generate_wireshark_dissector(spec: ProtocolSpec, output_path: Path) -> None:
|
|
902
|
+
"""Generate Wireshark Lua dissector.
|
|
903
|
+
|
|
904
|
+
Args:
|
|
905
|
+
spec: Protocol specification.
|
|
906
|
+
output_path: Path to write .lua file.
|
|
907
|
+
"""
|
|
908
|
+
# Generate basic Lua dissector
|
|
909
|
+
lua_code = f'''-- Wireshark dissector for {spec.name}
|
|
910
|
+
-- Auto-generated by Oscura
|
|
911
|
+
|
|
912
|
+
local proto = Proto("{spec.name.lower().replace(" ", "_")}", "{spec.name}")
|
|
913
|
+
|
|
914
|
+
-- Fields
|
|
915
|
+
local fields = proto.fields
|
|
916
|
+
'''
|
|
917
|
+
|
|
918
|
+
for spec_field in spec.fields:
|
|
919
|
+
lua_code += f'fields.{spec_field.name} = ProtoField.bytes("{spec.name.lower()}.{spec_field.name}", "{spec_field.name}")\n'
|
|
920
|
+
|
|
921
|
+
lua_code += """
|
|
922
|
+
function proto.dissector(buffer, pinfo, tree)
|
|
923
|
+
pinfo.cols.protocol = proto.name
|
|
924
|
+
local subtree = tree:add(proto, buffer(), proto.name)
|
|
925
|
+
-- Field parsing would go here
|
|
926
|
+
end
|
|
927
|
+
|
|
928
|
+
DissectorTable.get("udp.port"):add(0, proto)
|
|
929
|
+
"""
|
|
930
|
+
|
|
931
|
+
output_path.write_text(lua_code)
|
|
932
|
+
logger.info(f"Generated Wireshark dissector: {output_path}")
|
|
933
|
+
|
|
934
|
+
|
|
935
|
+
def _generate_scapy_layer(spec: ProtocolSpec, output_path: Path) -> None:
|
|
936
|
+
"""Generate Scapy protocol layer.
|
|
937
|
+
|
|
938
|
+
Args:
|
|
939
|
+
spec: Protocol specification.
|
|
940
|
+
output_path: Path to write .py file.
|
|
941
|
+
"""
|
|
942
|
+
class_name = spec.name.replace(" ", "")
|
|
943
|
+
|
|
944
|
+
scapy_code = f'''"""Scapy layer for {spec.name}"""
|
|
945
|
+
|
|
946
|
+
from scapy.packet import Packet
|
|
947
|
+
from scapy.fields import ByteField, XByteField
|
|
948
|
+
|
|
949
|
+
class {class_name}(Packet):
|
|
950
|
+
name = "{spec.name}"
|
|
951
|
+
fields_desc = [
|
|
952
|
+
'''
|
|
953
|
+
|
|
954
|
+
for spec_field in spec.fields:
|
|
955
|
+
field_type = "ByteField" if spec_field.field_type == "uint8" else "XByteField"
|
|
956
|
+
scapy_code += f' {field_type}("{spec_field.name}", 0),\n'
|
|
957
|
+
|
|
958
|
+
scapy_code += " ]\n"
|
|
959
|
+
|
|
960
|
+
output_path.write_text(scapy_code)
|
|
961
|
+
logger.info(f"Generated Scapy layer: {output_path}")
|
|
962
|
+
|
|
963
|
+
|
|
964
|
+
def _generate_kaitai_struct(spec: ProtocolSpec, output_path: Path) -> None:
|
|
965
|
+
"""Generate Kaitai Struct definition.
|
|
966
|
+
|
|
967
|
+
Args:
|
|
968
|
+
spec: Protocol specification.
|
|
969
|
+
output_path: Path to write .ksy file.
|
|
970
|
+
"""
|
|
971
|
+
kaitai_yaml = f"""meta:
|
|
972
|
+
id: {spec.name.lower().replace(" ", "_")}
|
|
973
|
+
title: {spec.name}
|
|
974
|
+
endian: le
|
|
975
|
+
seq:
|
|
976
|
+
"""
|
|
977
|
+
|
|
978
|
+
for spec_field in spec.fields:
|
|
979
|
+
kaitai_yaml += f""" - id: {spec_field.name}
|
|
980
|
+
type: u1
|
|
981
|
+
"""
|
|
982
|
+
|
|
983
|
+
output_path.write_text(kaitai_yaml)
|
|
984
|
+
logger.info(f"Generated Kaitai struct: {output_path}")
|
|
985
|
+
|
|
986
|
+
|
|
987
|
+
def _create_test_vectors(frames: list[Any], spec: ProtocolSpec, output_path: Path) -> None:
|
|
988
|
+
"""Create test vectors for validation.
|
|
989
|
+
|
|
990
|
+
Args:
|
|
991
|
+
frames: Decoded frames.
|
|
992
|
+
spec: Protocol specification.
|
|
993
|
+
output_path: Path to write .json file.
|
|
994
|
+
"""
|
|
995
|
+
test_vectors = []
|
|
996
|
+
|
|
997
|
+
for i, frame in enumerate(frames[:10]): # First 10 frames
|
|
998
|
+
if hasattr(frame, "raw_bytes"):
|
|
999
|
+
test_vectors.append(
|
|
1000
|
+
{
|
|
1001
|
+
"index": i,
|
|
1002
|
+
"raw_hex": frame.raw_bytes.hex(),
|
|
1003
|
+
"expected_fields": {},
|
|
1004
|
+
}
|
|
1005
|
+
)
|
|
1006
|
+
|
|
1007
|
+
vectors_data = {
|
|
1008
|
+
"protocol": spec.name,
|
|
1009
|
+
"version": "1.0",
|
|
1010
|
+
"test_vectors": test_vectors,
|
|
1011
|
+
}
|
|
1012
|
+
|
|
1013
|
+
with output_path.open("w") as f:
|
|
1014
|
+
json.dump(vectors_data, f, indent=2)
|
|
1015
|
+
|
|
1016
|
+
logger.info(f"Generated {len(test_vectors)} test vectors: {output_path}")
|
|
1017
|
+
|
|
1018
|
+
|
|
1019
|
+
def _generate_report(
|
|
1020
|
+
spec: ProtocolSpec,
|
|
1021
|
+
partial_results: dict[str, Any],
|
|
1022
|
+
output_path: Path,
|
|
1023
|
+
warnings: list[str],
|
|
1024
|
+
) -> None:
|
|
1025
|
+
"""Generate HTML report with analysis results.
|
|
1026
|
+
|
|
1027
|
+
Args:
|
|
1028
|
+
spec: Protocol specification.
|
|
1029
|
+
partial_results: All partial results from workflow.
|
|
1030
|
+
output_path: Path to write .html file.
|
|
1031
|
+
warnings: List of warnings.
|
|
1032
|
+
"""
|
|
1033
|
+
html = f"""<!DOCTYPE html>
|
|
1034
|
+
<html>
|
|
1035
|
+
<head>
|
|
1036
|
+
<title>{spec.name} - Reverse Engineering Report</title>
|
|
1037
|
+
<style>
|
|
1038
|
+
body {{ font-family: Arial, sans-serif; margin: 20px; }}
|
|
1039
|
+
.section {{ margin: 20px 0; padding: 15px; border: 1px solid #ddd; }}
|
|
1040
|
+
.warning {{ background-color: #fff3cd; padding: 10px; margin: 5px 0; }}
|
|
1041
|
+
table {{ border-collapse: collapse; width: 100%; }}
|
|
1042
|
+
th, td {{ border: 1px solid #ddd; padding: 8px; text-align: left; }}
|
|
1043
|
+
th {{ background-color: #f2f2f2; }}
|
|
1044
|
+
</style>
|
|
1045
|
+
</head>
|
|
1046
|
+
<body>
|
|
1047
|
+
<h1>{spec.name} - Reverse Engineering Report</h1>
|
|
1048
|
+
|
|
1049
|
+
<div class="section">
|
|
1050
|
+
<h2>Protocol Summary</h2>
|
|
1051
|
+
<table>
|
|
1052
|
+
<tr><th>Property</th><th>Value</th></tr>
|
|
1053
|
+
<tr><td>Name</td><td>{spec.name}</td></tr>
|
|
1054
|
+
<tr><td>Baud Rate</td><td>{spec.baud_rate} bps</td></tr>
|
|
1055
|
+
<tr><td>Frame Format</td><td>{spec.frame_format}</td></tr>
|
|
1056
|
+
<tr><td>Sync Pattern</td><td>{spec.sync_pattern}</td></tr>
|
|
1057
|
+
<tr><td>Frame Length</td><td>{spec.frame_length or "Variable"}</td></tr>
|
|
1058
|
+
<tr><td>Checksum</td><td>{spec.checksum_type or "None detected"}</td></tr>
|
|
1059
|
+
<tr><td>Confidence</td><td>{spec.confidence:.2%}</td></tr>
|
|
1060
|
+
</table>
|
|
1061
|
+
</div>
|
|
1062
|
+
|
|
1063
|
+
<div class="section">
|
|
1064
|
+
<h2>Fields ({len(spec.fields)})</h2>
|
|
1065
|
+
<table>
|
|
1066
|
+
<tr><th>Name</th><th>Offset</th><th>Size</th><th>Type</th></tr>
|
|
1067
|
+
"""
|
|
1068
|
+
|
|
1069
|
+
for spec_field in spec.fields:
|
|
1070
|
+
html += f""" <tr>
|
|
1071
|
+
<td>{spec_field.name}</td>
|
|
1072
|
+
<td>{spec_field.offset}</td>
|
|
1073
|
+
<td>{spec_field.size}</td>
|
|
1074
|
+
<td>{spec_field.field_type}</td>
|
|
1075
|
+
</tr>
|
|
1076
|
+
"""
|
|
1077
|
+
|
|
1078
|
+
html += """ </table>
|
|
1079
|
+
</div>
|
|
1080
|
+
"""
|
|
1081
|
+
|
|
1082
|
+
if warnings:
|
|
1083
|
+
html += """ <div class="section">
|
|
1084
|
+
<h2>Warnings</h2>
|
|
1085
|
+
"""
|
|
1086
|
+
for warning in warnings:
|
|
1087
|
+
html += f' <div class="warning">{warning}</div>\n'
|
|
1088
|
+
html += " </div>\n"
|
|
1089
|
+
|
|
1090
|
+
html += """</body>
|
|
1091
|
+
</html>"""
|
|
1092
|
+
|
|
1093
|
+
output_path.write_text(html)
|
|
1094
|
+
logger.info(f"Generated report: {output_path}")
|
|
1095
|
+
|
|
1096
|
+
|
|
1097
|
+
def _replay_validation(spec: ProtocolSpec, target_device: str, frames: list[Any]) -> dict[str, Any]:
|
|
1098
|
+
"""Perform replay validation on target hardware.
|
|
1099
|
+
|
|
1100
|
+
Args:
|
|
1101
|
+
spec: Protocol specification.
|
|
1102
|
+
target_device: Device path for validation.
|
|
1103
|
+
frames: Frames to replay.
|
|
1104
|
+
|
|
1105
|
+
Returns:
|
|
1106
|
+
Dict with validation results.
|
|
1107
|
+
"""
|
|
1108
|
+
# Placeholder for replay validation
|
|
1109
|
+
return {
|
|
1110
|
+
"replayed": 0,
|
|
1111
|
+
"successful": 0,
|
|
1112
|
+
"failed": 0,
|
|
1113
|
+
"success_rate": 0.0,
|
|
1114
|
+
}
|
|
1115
|
+
|
|
1116
|
+
|
|
1117
|
+
def _calculate_overall_confidence(
|
|
1118
|
+
spec: ProtocolSpec, partial_results: dict[str, Any], warnings: list[str]
|
|
1119
|
+
) -> float:
|
|
1120
|
+
"""Calculate overall workflow confidence score.
|
|
1121
|
+
|
|
1122
|
+
Args:
|
|
1123
|
+
spec: Protocol specification.
|
|
1124
|
+
partial_results: All partial results.
|
|
1125
|
+
warnings: List of warnings.
|
|
1126
|
+
|
|
1127
|
+
Returns:
|
|
1128
|
+
Overall confidence score (0-1).
|
|
1129
|
+
"""
|
|
1130
|
+
# Start with protocol spec confidence
|
|
1131
|
+
confidence = spec.confidence
|
|
1132
|
+
|
|
1133
|
+
# Penalize for warnings
|
|
1134
|
+
warning_penalty = len(warnings) * 0.05
|
|
1135
|
+
confidence = max(0.0, confidence - warning_penalty)
|
|
1136
|
+
|
|
1137
|
+
# Bonus for successful steps
|
|
1138
|
+
successful_steps = sum(
|
|
1139
|
+
1 for key in ["traces", "reverse_engineering", "state_machine"] if key in partial_results
|
|
1140
|
+
)
|
|
1141
|
+
step_bonus = successful_steps * 0.02
|
|
1142
|
+
confidence = min(1.0, confidence + step_bonus)
|
|
1143
|
+
|
|
1144
|
+
return confidence
|