oscura 0.5.0__py3-none-any.whl → 0.6.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- oscura/__init__.py +169 -167
- oscura/analyzers/__init__.py +3 -0
- oscura/analyzers/classification.py +659 -0
- oscura/analyzers/digital/__init__.py +0 -48
- oscura/analyzers/digital/edges.py +325 -65
- oscura/analyzers/digital/extraction.py +0 -195
- oscura/analyzers/digital/quality.py +293 -166
- oscura/analyzers/digital/timing.py +260 -115
- oscura/analyzers/digital/timing_numba.py +334 -0
- oscura/analyzers/entropy.py +605 -0
- oscura/analyzers/eye/diagram.py +176 -109
- oscura/analyzers/eye/metrics.py +5 -5
- oscura/analyzers/jitter/__init__.py +6 -4
- oscura/analyzers/jitter/ber.py +52 -52
- oscura/analyzers/jitter/classification.py +156 -0
- oscura/analyzers/jitter/decomposition.py +163 -113
- oscura/analyzers/jitter/spectrum.py +80 -64
- oscura/analyzers/ml/__init__.py +39 -0
- oscura/analyzers/ml/features.py +600 -0
- oscura/analyzers/ml/signal_classifier.py +604 -0
- oscura/analyzers/packet/daq.py +246 -158
- oscura/analyzers/packet/parser.py +12 -1
- oscura/analyzers/packet/payload.py +50 -2110
- oscura/analyzers/packet/payload_analysis.py +361 -181
- oscura/analyzers/packet/payload_patterns.py +133 -70
- oscura/analyzers/packet/stream.py +84 -23
- oscura/analyzers/patterns/__init__.py +26 -5
- oscura/analyzers/patterns/anomaly_detection.py +908 -0
- oscura/analyzers/patterns/clustering.py +169 -108
- oscura/analyzers/patterns/clustering_optimized.py +227 -0
- oscura/analyzers/patterns/discovery.py +1 -1
- oscura/analyzers/patterns/matching.py +581 -197
- oscura/analyzers/patterns/pattern_mining.py +778 -0
- oscura/analyzers/patterns/periodic.py +121 -38
- oscura/analyzers/patterns/sequences.py +175 -78
- oscura/analyzers/power/conduction.py +1 -1
- oscura/analyzers/power/soa.py +6 -6
- oscura/analyzers/power/switching.py +250 -110
- oscura/analyzers/protocol/__init__.py +17 -1
- oscura/analyzers/protocols/__init__.py +1 -22
- oscura/analyzers/protocols/base.py +6 -6
- oscura/analyzers/protocols/ble/__init__.py +38 -0
- oscura/analyzers/protocols/ble/analyzer.py +809 -0
- oscura/analyzers/protocols/ble/uuids.py +288 -0
- oscura/analyzers/protocols/can.py +257 -127
- oscura/analyzers/protocols/can_fd.py +107 -80
- oscura/analyzers/protocols/flexray.py +139 -80
- oscura/analyzers/protocols/hdlc.py +93 -58
- oscura/analyzers/protocols/i2c.py +247 -106
- oscura/analyzers/protocols/i2s.py +138 -86
- oscura/analyzers/protocols/industrial/__init__.py +40 -0
- oscura/analyzers/protocols/industrial/bacnet/__init__.py +33 -0
- oscura/analyzers/protocols/industrial/bacnet/analyzer.py +708 -0
- oscura/analyzers/protocols/industrial/bacnet/encoding.py +412 -0
- oscura/analyzers/protocols/industrial/bacnet/services.py +622 -0
- oscura/analyzers/protocols/industrial/ethercat/__init__.py +30 -0
- oscura/analyzers/protocols/industrial/ethercat/analyzer.py +474 -0
- oscura/analyzers/protocols/industrial/ethercat/mailbox.py +339 -0
- oscura/analyzers/protocols/industrial/ethercat/topology.py +166 -0
- oscura/analyzers/protocols/industrial/modbus/__init__.py +31 -0
- oscura/analyzers/protocols/industrial/modbus/analyzer.py +525 -0
- oscura/analyzers/protocols/industrial/modbus/crc.py +79 -0
- oscura/analyzers/protocols/industrial/modbus/functions.py +436 -0
- oscura/analyzers/protocols/industrial/opcua/__init__.py +21 -0
- oscura/analyzers/protocols/industrial/opcua/analyzer.py +552 -0
- oscura/analyzers/protocols/industrial/opcua/datatypes.py +446 -0
- oscura/analyzers/protocols/industrial/opcua/services.py +264 -0
- oscura/analyzers/protocols/industrial/profinet/__init__.py +23 -0
- oscura/analyzers/protocols/industrial/profinet/analyzer.py +441 -0
- oscura/analyzers/protocols/industrial/profinet/dcp.py +263 -0
- oscura/analyzers/protocols/industrial/profinet/ptcp.py +200 -0
- oscura/analyzers/protocols/jtag.py +180 -98
- oscura/analyzers/protocols/lin.py +219 -114
- oscura/analyzers/protocols/manchester.py +4 -4
- oscura/analyzers/protocols/onewire.py +253 -149
- oscura/analyzers/protocols/parallel_bus/__init__.py +20 -0
- oscura/analyzers/protocols/parallel_bus/centronics.py +92 -0
- oscura/analyzers/protocols/parallel_bus/gpib.py +137 -0
- oscura/analyzers/protocols/spi.py +192 -95
- oscura/analyzers/protocols/swd.py +321 -167
- oscura/analyzers/protocols/uart.py +267 -125
- oscura/analyzers/protocols/usb.py +235 -131
- oscura/analyzers/side_channel/power.py +17 -12
- oscura/analyzers/signal/__init__.py +15 -0
- oscura/analyzers/signal/timing_analysis.py +1086 -0
- oscura/analyzers/signal_integrity/__init__.py +4 -1
- oscura/analyzers/signal_integrity/sparams.py +2 -19
- oscura/analyzers/spectral/chunked.py +129 -60
- oscura/analyzers/spectral/chunked_fft.py +300 -94
- oscura/analyzers/spectral/chunked_wavelet.py +100 -80
- oscura/analyzers/statistical/checksum.py +376 -217
- oscura/analyzers/statistical/classification.py +229 -107
- oscura/analyzers/statistical/entropy.py +78 -53
- oscura/analyzers/statistics/correlation.py +407 -211
- oscura/analyzers/statistics/outliers.py +2 -2
- oscura/analyzers/statistics/streaming.py +30 -5
- oscura/analyzers/validation.py +216 -101
- oscura/analyzers/waveform/measurements.py +9 -0
- oscura/analyzers/waveform/measurements_with_uncertainty.py +31 -15
- oscura/analyzers/waveform/spectral.py +500 -228
- oscura/api/__init__.py +31 -5
- oscura/api/dsl/__init__.py +582 -0
- oscura/{dsl → api/dsl}/commands.py +43 -76
- oscura/{dsl → api/dsl}/interpreter.py +26 -51
- oscura/{dsl → api/dsl}/parser.py +107 -77
- oscura/{dsl → api/dsl}/repl.py +2 -2
- oscura/api/dsl.py +1 -1
- oscura/{integrations → api/integrations}/__init__.py +1 -1
- oscura/{integrations → api/integrations}/llm.py +201 -102
- oscura/api/operators.py +3 -3
- oscura/api/optimization.py +144 -30
- oscura/api/rest_server.py +921 -0
- oscura/api/server/__init__.py +17 -0
- oscura/api/server/dashboard.py +850 -0
- oscura/api/server/static/README.md +34 -0
- oscura/api/server/templates/base.html +181 -0
- oscura/api/server/templates/export.html +120 -0
- oscura/api/server/templates/home.html +284 -0
- oscura/api/server/templates/protocols.html +58 -0
- oscura/api/server/templates/reports.html +43 -0
- oscura/api/server/templates/session_detail.html +89 -0
- oscura/api/server/templates/sessions.html +83 -0
- oscura/api/server/templates/waveforms.html +73 -0
- oscura/automotive/__init__.py +8 -1
- oscura/automotive/can/__init__.py +10 -0
- oscura/automotive/can/checksum.py +3 -1
- oscura/automotive/can/dbc_generator.py +590 -0
- oscura/automotive/can/message_wrapper.py +121 -74
- oscura/automotive/can/patterns.py +98 -21
- oscura/automotive/can/session.py +292 -56
- oscura/automotive/can/state_machine.py +6 -3
- oscura/automotive/can/stimulus_response.py +97 -75
- oscura/automotive/dbc/__init__.py +10 -2
- oscura/automotive/dbc/generator.py +84 -56
- oscura/automotive/dbc/parser.py +6 -6
- oscura/automotive/dtc/data.json +2763 -0
- oscura/automotive/dtc/database.py +2 -2
- oscura/automotive/flexray/__init__.py +31 -0
- oscura/automotive/flexray/analyzer.py +504 -0
- oscura/automotive/flexray/crc.py +185 -0
- oscura/automotive/flexray/fibex.py +449 -0
- oscura/automotive/j1939/__init__.py +45 -8
- oscura/automotive/j1939/analyzer.py +605 -0
- oscura/automotive/j1939/spns.py +326 -0
- oscura/automotive/j1939/transport.py +306 -0
- oscura/automotive/lin/__init__.py +47 -0
- oscura/automotive/lin/analyzer.py +612 -0
- oscura/automotive/loaders/blf.py +13 -2
- oscura/automotive/loaders/csv_can.py +143 -72
- oscura/automotive/loaders/dispatcher.py +50 -2
- oscura/automotive/loaders/mdf.py +86 -45
- oscura/automotive/loaders/pcap.py +111 -61
- oscura/automotive/uds/__init__.py +4 -0
- oscura/automotive/uds/analyzer.py +725 -0
- oscura/automotive/uds/decoder.py +140 -58
- oscura/automotive/uds/models.py +7 -1
- oscura/automotive/visualization.py +1 -1
- oscura/cli/analyze.py +348 -0
- oscura/cli/batch.py +142 -122
- oscura/cli/benchmark.py +275 -0
- oscura/cli/characterize.py +137 -82
- oscura/cli/compare.py +224 -131
- oscura/cli/completion.py +250 -0
- oscura/cli/config_cmd.py +361 -0
- oscura/cli/decode.py +164 -87
- oscura/cli/export.py +286 -0
- oscura/cli/main.py +115 -31
- oscura/{onboarding → cli/onboarding}/__init__.py +3 -3
- oscura/{onboarding → cli/onboarding}/help.py +80 -58
- oscura/{onboarding → cli/onboarding}/tutorials.py +97 -72
- oscura/{onboarding → cli/onboarding}/wizard.py +55 -36
- oscura/cli/progress.py +147 -0
- oscura/cli/shell.py +157 -135
- oscura/cli/validate_cmd.py +204 -0
- oscura/cli/visualize.py +158 -0
- oscura/convenience.py +125 -79
- oscura/core/__init__.py +4 -2
- oscura/core/backend_selector.py +3 -3
- oscura/core/cache.py +126 -15
- oscura/core/cancellation.py +1 -1
- oscura/{config → core/config}/__init__.py +20 -11
- oscura/{config → core/config}/defaults.py +1 -1
- oscura/{config → core/config}/loader.py +7 -5
- oscura/{config → core/config}/memory.py +5 -5
- oscura/{config → core/config}/migration.py +1 -1
- oscura/{config → core/config}/pipeline.py +99 -23
- oscura/{config → core/config}/preferences.py +1 -1
- oscura/{config → core/config}/protocol.py +3 -3
- oscura/{config → core/config}/schema.py +426 -272
- oscura/{config → core/config}/settings.py +1 -1
- oscura/{config → core/config}/thresholds.py +195 -153
- oscura/core/correlation.py +5 -6
- oscura/core/cross_domain.py +0 -2
- oscura/core/debug.py +9 -5
- oscura/{extensibility → core/extensibility}/docs.py +158 -70
- oscura/{extensibility → core/extensibility}/extensions.py +160 -76
- oscura/{extensibility → core/extensibility}/logging.py +1 -1
- oscura/{extensibility → core/extensibility}/measurements.py +1 -1
- oscura/{extensibility → core/extensibility}/plugins.py +1 -1
- oscura/{extensibility → core/extensibility}/templates.py +73 -3
- oscura/{extensibility → core/extensibility}/validation.py +1 -1
- oscura/core/gpu_backend.py +11 -7
- oscura/core/log_query.py +101 -11
- oscura/core/logging.py +126 -54
- oscura/core/logging_advanced.py +5 -5
- oscura/core/memory_limits.py +108 -70
- oscura/core/memory_monitor.py +2 -2
- oscura/core/memory_progress.py +7 -7
- oscura/core/memory_warnings.py +1 -1
- oscura/core/numba_backend.py +13 -13
- oscura/{plugins → core/plugins}/__init__.py +9 -9
- oscura/{plugins → core/plugins}/base.py +7 -7
- oscura/{plugins → core/plugins}/cli.py +3 -3
- oscura/{plugins → core/plugins}/discovery.py +186 -106
- oscura/{plugins → core/plugins}/lifecycle.py +1 -1
- oscura/{plugins → core/plugins}/manager.py +7 -7
- oscura/{plugins → core/plugins}/registry.py +3 -3
- oscura/{plugins → core/plugins}/versioning.py +1 -1
- oscura/core/progress.py +16 -1
- oscura/core/provenance.py +8 -2
- oscura/{schemas → core/schemas}/__init__.py +2 -2
- oscura/core/schemas/bus_configuration.json +322 -0
- oscura/core/schemas/device_mapping.json +182 -0
- oscura/core/schemas/packet_format.json +418 -0
- oscura/core/schemas/protocol_definition.json +363 -0
- oscura/core/types.py +4 -0
- oscura/core/uncertainty.py +3 -3
- oscura/correlation/__init__.py +52 -0
- oscura/correlation/multi_protocol.py +811 -0
- oscura/discovery/auto_decoder.py +117 -35
- oscura/discovery/comparison.py +191 -86
- oscura/discovery/quality_validator.py +155 -68
- oscura/discovery/signal_detector.py +196 -79
- oscura/export/__init__.py +18 -20
- oscura/export/kaitai_struct.py +513 -0
- oscura/export/scapy_layer.py +801 -0
- oscura/export/wireshark/README.md +15 -15
- oscura/export/wireshark/generator.py +1 -1
- oscura/export/wireshark/templates/dissector.lua.j2 +2 -2
- oscura/export/wireshark_dissector.py +746 -0
- oscura/guidance/wizard.py +207 -111
- oscura/hardware/__init__.py +19 -0
- oscura/{acquisition → hardware/acquisition}/__init__.py +4 -4
- oscura/{acquisition → hardware/acquisition}/file.py +2 -2
- oscura/{acquisition → hardware/acquisition}/hardware.py +7 -7
- oscura/{acquisition → hardware/acquisition}/saleae.py +15 -12
- oscura/{acquisition → hardware/acquisition}/socketcan.py +1 -1
- oscura/{acquisition → hardware/acquisition}/streaming.py +2 -2
- oscura/{acquisition → hardware/acquisition}/synthetic.py +3 -3
- oscura/{acquisition → hardware/acquisition}/visa.py +33 -11
- oscura/hardware/firmware/__init__.py +29 -0
- oscura/hardware/firmware/pattern_recognition.py +874 -0
- oscura/hardware/hal_detector.py +736 -0
- oscura/hardware/security/__init__.py +37 -0
- oscura/hardware/security/side_channel_detector.py +1126 -0
- oscura/inference/__init__.py +4 -0
- oscura/inference/active_learning/README.md +7 -7
- oscura/inference/active_learning/observation_table.py +4 -1
- oscura/inference/alignment.py +216 -123
- oscura/inference/bayesian.py +113 -33
- oscura/inference/crc_reverse.py +101 -55
- oscura/inference/logic.py +6 -2
- oscura/inference/message_format.py +342 -183
- oscura/inference/protocol.py +95 -44
- oscura/inference/protocol_dsl.py +180 -82
- oscura/inference/signal_intelligence.py +1439 -706
- oscura/inference/spectral.py +99 -57
- oscura/inference/state_machine.py +810 -158
- oscura/inference/stream.py +270 -110
- oscura/iot/__init__.py +34 -0
- oscura/iot/coap/__init__.py +32 -0
- oscura/iot/coap/analyzer.py +668 -0
- oscura/iot/coap/options.py +212 -0
- oscura/iot/lorawan/__init__.py +21 -0
- oscura/iot/lorawan/crypto.py +206 -0
- oscura/iot/lorawan/decoder.py +801 -0
- oscura/iot/lorawan/mac_commands.py +341 -0
- oscura/iot/mqtt/__init__.py +27 -0
- oscura/iot/mqtt/analyzer.py +999 -0
- oscura/iot/mqtt/properties.py +315 -0
- oscura/iot/zigbee/__init__.py +31 -0
- oscura/iot/zigbee/analyzer.py +615 -0
- oscura/iot/zigbee/security.py +153 -0
- oscura/iot/zigbee/zcl.py +349 -0
- oscura/jupyter/display.py +125 -45
- oscura/{exploratory → jupyter/exploratory}/__init__.py +8 -8
- oscura/{exploratory → jupyter/exploratory}/error_recovery.py +298 -141
- oscura/jupyter/exploratory/fuzzy.py +746 -0
- oscura/{exploratory → jupyter/exploratory}/fuzzy_advanced.py +258 -100
- oscura/{exploratory → jupyter/exploratory}/legacy.py +464 -242
- oscura/{exploratory → jupyter/exploratory}/parse.py +167 -145
- oscura/{exploratory → jupyter/exploratory}/recovery.py +119 -87
- oscura/jupyter/exploratory/sync.py +612 -0
- oscura/{exploratory → jupyter/exploratory}/unknown.py +299 -176
- oscura/jupyter/magic.py +4 -4
- oscura/{ui → jupyter/ui}/__init__.py +2 -2
- oscura/{ui → jupyter/ui}/formatters.py +3 -3
- oscura/{ui → jupyter/ui}/progressive_display.py +153 -82
- oscura/loaders/__init__.py +171 -63
- oscura/loaders/binary.py +88 -1
- oscura/loaders/chipwhisperer.py +153 -137
- oscura/loaders/configurable.py +208 -86
- oscura/loaders/csv_loader.py +458 -215
- oscura/loaders/hdf5_loader.py +278 -119
- oscura/loaders/lazy.py +87 -54
- oscura/loaders/mmap_loader.py +1 -1
- oscura/loaders/numpy_loader.py +253 -116
- oscura/loaders/pcap.py +226 -151
- oscura/loaders/rigol.py +110 -49
- oscura/loaders/sigrok.py +201 -78
- oscura/loaders/tdms.py +81 -58
- oscura/loaders/tektronix.py +291 -174
- oscura/loaders/touchstone.py +182 -87
- oscura/loaders/vcd.py +215 -117
- oscura/loaders/wav.py +155 -68
- oscura/reporting/__init__.py +9 -7
- oscura/reporting/analyze.py +352 -146
- oscura/reporting/argument_preparer.py +69 -14
- oscura/reporting/auto_report.py +97 -61
- oscura/reporting/batch.py +131 -58
- oscura/reporting/chart_selection.py +57 -45
- oscura/reporting/comparison.py +63 -17
- oscura/reporting/content/executive.py +76 -24
- oscura/reporting/core_formats/multi_format.py +11 -8
- oscura/reporting/engine.py +312 -158
- oscura/reporting/enhanced_reports.py +949 -0
- oscura/reporting/export.py +86 -43
- oscura/reporting/formatting/numbers.py +69 -42
- oscura/reporting/html.py +139 -58
- oscura/reporting/index.py +137 -65
- oscura/reporting/output.py +158 -67
- oscura/reporting/pdf.py +67 -102
- oscura/reporting/plots.py +191 -112
- oscura/reporting/sections.py +88 -47
- oscura/reporting/standards.py +104 -61
- oscura/reporting/summary_generator.py +75 -55
- oscura/reporting/tables.py +138 -54
- oscura/reporting/templates/enhanced/protocol_re.html +525 -0
- oscura/reporting/templates/index.md +13 -13
- oscura/sessions/__init__.py +14 -23
- oscura/sessions/base.py +3 -3
- oscura/sessions/blackbox.py +106 -10
- oscura/sessions/generic.py +2 -2
- oscura/sessions/legacy.py +783 -0
- oscura/side_channel/__init__.py +63 -0
- oscura/side_channel/dpa.py +1025 -0
- oscura/utils/__init__.py +15 -1
- oscura/utils/autodetect.py +1 -5
- oscura/utils/bitwise.py +118 -0
- oscura/{builders → utils/builders}/__init__.py +1 -1
- oscura/{comparison → utils/comparison}/__init__.py +6 -6
- oscura/{comparison → utils/comparison}/compare.py +202 -101
- oscura/{comparison → utils/comparison}/golden.py +83 -63
- oscura/{comparison → utils/comparison}/limits.py +313 -89
- oscura/{comparison → utils/comparison}/mask.py +151 -45
- oscura/{comparison → utils/comparison}/trace_diff.py +1 -1
- oscura/{comparison → utils/comparison}/visualization.py +147 -89
- oscura/{component → utils/component}/__init__.py +3 -3
- oscura/{component → utils/component}/impedance.py +122 -58
- oscura/{component → utils/component}/reactive.py +165 -168
- oscura/{component → utils/component}/transmission_line.py +3 -3
- oscura/{filtering → utils/filtering}/__init__.py +6 -6
- oscura/{filtering → utils/filtering}/base.py +1 -1
- oscura/{filtering → utils/filtering}/convenience.py +2 -2
- oscura/{filtering → utils/filtering}/design.py +169 -93
- oscura/{filtering → utils/filtering}/filters.py +2 -2
- oscura/{filtering → utils/filtering}/introspection.py +2 -2
- oscura/utils/geometry.py +31 -0
- oscura/utils/imports.py +184 -0
- oscura/utils/lazy.py +1 -1
- oscura/{math → utils/math}/__init__.py +2 -2
- oscura/{math → utils/math}/arithmetic.py +114 -48
- oscura/{math → utils/math}/interpolation.py +139 -106
- oscura/utils/memory.py +129 -66
- oscura/utils/memory_advanced.py +92 -9
- oscura/utils/memory_extensions.py +10 -8
- oscura/{optimization → utils/optimization}/__init__.py +1 -1
- oscura/{optimization → utils/optimization}/search.py +2 -2
- oscura/utils/performance/__init__.py +58 -0
- oscura/utils/performance/caching.py +889 -0
- oscura/utils/performance/lsh_clustering.py +333 -0
- oscura/utils/performance/memory_optimizer.py +699 -0
- oscura/utils/performance/optimizations.py +675 -0
- oscura/utils/performance/parallel.py +654 -0
- oscura/utils/performance/profiling.py +661 -0
- oscura/{pipeline → utils/pipeline}/base.py +1 -1
- oscura/{pipeline → utils/pipeline}/composition.py +11 -3
- oscura/{pipeline → utils/pipeline}/parallel.py +3 -2
- oscura/{pipeline → utils/pipeline}/pipeline.py +1 -1
- oscura/{pipeline → utils/pipeline}/reverse_engineering.py +412 -221
- oscura/{search → utils/search}/__init__.py +3 -3
- oscura/{search → utils/search}/anomaly.py +188 -58
- oscura/utils/search/context.py +294 -0
- oscura/{search → utils/search}/pattern.py +138 -10
- oscura/utils/serial.py +51 -0
- oscura/utils/storage/__init__.py +61 -0
- oscura/utils/storage/database.py +1166 -0
- oscura/{streaming → utils/streaming}/chunked.py +302 -143
- oscura/{streaming → utils/streaming}/progressive.py +1 -1
- oscura/{streaming → utils/streaming}/realtime.py +3 -2
- oscura/{triggering → utils/triggering}/__init__.py +6 -6
- oscura/{triggering → utils/triggering}/base.py +6 -6
- oscura/{triggering → utils/triggering}/edge.py +2 -2
- oscura/{triggering → utils/triggering}/pattern.py +2 -2
- oscura/{triggering → utils/triggering}/pulse.py +115 -74
- oscura/{triggering → utils/triggering}/window.py +2 -2
- oscura/utils/validation.py +32 -0
- oscura/validation/__init__.py +121 -0
- oscura/{compliance → validation/compliance}/__init__.py +5 -5
- oscura/{compliance → validation/compliance}/advanced.py +5 -5
- oscura/{compliance → validation/compliance}/masks.py +1 -1
- oscura/{compliance → validation/compliance}/reporting.py +127 -53
- oscura/{compliance → validation/compliance}/testing.py +114 -52
- oscura/validation/compliance_tests.py +915 -0
- oscura/validation/fuzzer.py +990 -0
- oscura/validation/grammar_tests.py +596 -0
- oscura/validation/grammar_validator.py +904 -0
- oscura/validation/hil_testing.py +977 -0
- oscura/{quality → validation/quality}/__init__.py +4 -4
- oscura/{quality → validation/quality}/ensemble.py +251 -171
- oscura/{quality → validation/quality}/explainer.py +3 -3
- oscura/{quality → validation/quality}/scoring.py +1 -1
- oscura/{quality → validation/quality}/warnings.py +4 -4
- oscura/validation/regression_suite.py +808 -0
- oscura/validation/replay.py +788 -0
- oscura/{testing → validation/testing}/__init__.py +2 -2
- oscura/{testing → validation/testing}/synthetic.py +5 -5
- oscura/visualization/__init__.py +9 -0
- oscura/visualization/accessibility.py +1 -1
- oscura/visualization/annotations.py +64 -67
- oscura/visualization/colors.py +7 -7
- oscura/visualization/digital.py +180 -81
- oscura/visualization/eye.py +236 -85
- oscura/visualization/interactive.py +320 -143
- oscura/visualization/jitter.py +587 -247
- oscura/visualization/layout.py +169 -134
- oscura/visualization/optimization.py +103 -52
- oscura/visualization/palettes.py +1 -1
- oscura/visualization/power.py +427 -211
- oscura/visualization/power_extended.py +626 -297
- oscura/visualization/presets.py +2 -0
- oscura/visualization/protocols.py +495 -181
- oscura/visualization/render.py +79 -63
- oscura/visualization/reverse_engineering.py +171 -124
- oscura/visualization/signal_integrity.py +460 -279
- oscura/visualization/specialized.py +190 -100
- oscura/visualization/spectral.py +670 -255
- oscura/visualization/thumbnails.py +166 -137
- oscura/visualization/waveform.py +150 -63
- oscura/workflows/__init__.py +3 -0
- oscura/{batch → workflows/batch}/__init__.py +5 -5
- oscura/{batch → workflows/batch}/advanced.py +150 -75
- oscura/workflows/batch/aggregate.py +531 -0
- oscura/workflows/batch/analyze.py +236 -0
- oscura/{batch → workflows/batch}/logging.py +2 -2
- oscura/{batch → workflows/batch}/metrics.py +1 -1
- oscura/workflows/complete_re.py +1144 -0
- oscura/workflows/compliance.py +44 -54
- oscura/workflows/digital.py +197 -51
- oscura/workflows/legacy/__init__.py +12 -0
- oscura/{workflow → workflows/legacy}/dag.py +4 -1
- oscura/workflows/multi_trace.py +9 -9
- oscura/workflows/power.py +42 -62
- oscura/workflows/protocol.py +82 -49
- oscura/workflows/reverse_engineering.py +351 -150
- oscura/workflows/signal_integrity.py +157 -82
- oscura-0.6.0.dist-info/METADATA +643 -0
- oscura-0.6.0.dist-info/RECORD +590 -0
- oscura/analyzers/digital/ic_database.py +0 -498
- oscura/analyzers/digital/timing_paths.py +0 -339
- oscura/analyzers/digital/vintage.py +0 -377
- oscura/analyzers/digital/vintage_result.py +0 -148
- oscura/analyzers/protocols/parallel_bus.py +0 -449
- oscura/batch/aggregate.py +0 -300
- oscura/batch/analyze.py +0 -139
- oscura/dsl/__init__.py +0 -73
- oscura/exceptions.py +0 -59
- oscura/exploratory/fuzzy.py +0 -513
- oscura/exploratory/sync.py +0 -384
- oscura/export/wavedrom.py +0 -430
- oscura/exporters/__init__.py +0 -94
- oscura/exporters/csv.py +0 -303
- oscura/exporters/exporters.py +0 -44
- oscura/exporters/hdf5.py +0 -217
- oscura/exporters/html_export.py +0 -701
- oscura/exporters/json_export.py +0 -338
- oscura/exporters/markdown_export.py +0 -367
- oscura/exporters/matlab_export.py +0 -354
- oscura/exporters/npz_export.py +0 -219
- oscura/exporters/spice_export.py +0 -210
- oscura/exporters/vintage_logic_csv.py +0 -247
- oscura/reporting/vintage_logic_report.py +0 -523
- oscura/search/context.py +0 -149
- oscura/session/__init__.py +0 -34
- oscura/session/annotations.py +0 -289
- oscura/session/history.py +0 -313
- oscura/session/session.py +0 -520
- oscura/visualization/digital_advanced.py +0 -718
- oscura/visualization/figure_manager.py +0 -156
- oscura/workflow/__init__.py +0 -13
- oscura-0.5.0.dist-info/METADATA +0 -407
- oscura-0.5.0.dist-info/RECORD +0 -486
- /oscura/core/{config.py → config/legacy.py} +0 -0
- /oscura/{extensibility → core/extensibility}/__init__.py +0 -0
- /oscura/{extensibility → core/extensibility}/registry.py +0 -0
- /oscura/{plugins → core/plugins}/isolation.py +0 -0
- /oscura/{builders → utils/builders}/signal_builder.py +0 -0
- /oscura/{optimization → utils/optimization}/parallel.py +0 -0
- /oscura/{pipeline → utils/pipeline}/__init__.py +0 -0
- /oscura/{streaming → utils/streaming}/__init__.py +0 -0
- {oscura-0.5.0.dist-info → oscura-0.6.0.dist-info}/WHEEL +0 -0
- {oscura-0.5.0.dist-info → oscura-0.6.0.dist-info}/entry_points.txt +0 -0
- {oscura-0.5.0.dist-info → oscura-0.6.0.dist-info}/licenses/LICENSE +0 -0
|
@@ -0,0 +1,1126 @@
|
|
|
1
|
+
"""Side-Channel Attack Detection and Vulnerability Assessment.
|
|
2
|
+
|
|
3
|
+
This module implements comprehensive side-channel vulnerability detection for
|
|
4
|
+
cryptographic implementations including timing attacks, power analysis, EM
|
|
5
|
+
emissions, and cache timing vulnerabilities.
|
|
6
|
+
|
|
7
|
+
Key capabilities:
|
|
8
|
+
- Timing-based leakage detection (variable-time operations)
|
|
9
|
+
- Power analysis vulnerability detection (data-dependent consumption)
|
|
10
|
+
- EM emission analysis for information leakage
|
|
11
|
+
- Cache timing attack detection
|
|
12
|
+
- Constant-time operation validation
|
|
13
|
+
- T-test for leakage detection (Welch's t-test)
|
|
14
|
+
- Mutual information calculation
|
|
15
|
+
- Statistical correlation analysis
|
|
16
|
+
|
|
17
|
+
Typical use cases:
|
|
18
|
+
- Evaluate cryptographic implementation security
|
|
19
|
+
- Detect non-constant-time operations
|
|
20
|
+
- Identify data-dependent branching
|
|
21
|
+
- Assess power consumption leakage
|
|
22
|
+
- Generate vulnerability reports for security audits
|
|
23
|
+
|
|
24
|
+
Example:
|
|
25
|
+
>>> from oscura.hardware.security.side_channel_detector import SideChannelDetector
|
|
26
|
+
>>> from oscura.side_channel.dpa import PowerTrace
|
|
27
|
+
>>> import numpy as np
|
|
28
|
+
>>> # Create detector
|
|
29
|
+
>>> detector = SideChannelDetector(
|
|
30
|
+
... timing_threshold=0.01,
|
|
31
|
+
... power_threshold=0.7,
|
|
32
|
+
... ttest_threshold=4.5
|
|
33
|
+
... )
|
|
34
|
+
>>> # Analyze power traces
|
|
35
|
+
>>> traces = [
|
|
36
|
+
... PowerTrace(
|
|
37
|
+
... timestamp=np.arange(1000),
|
|
38
|
+
... power=np.random.randn(1000),
|
|
39
|
+
... plaintext=bytes([i % 256 for i in range(16)])
|
|
40
|
+
... )
|
|
41
|
+
... for _ in range(100)
|
|
42
|
+
... ]
|
|
43
|
+
>>> report = detector.analyze_power_traces(traces, fixed_key=bytes(16))
|
|
44
|
+
>>> print(f"Found {len(report.vulnerabilities)} vulnerabilities")
|
|
45
|
+
>>> # Check for timing vulnerabilities
|
|
46
|
+
>>> timing_data = [(bytes([i]), 0.001 + i*1e-6) for i in range(256)]
|
|
47
|
+
>>> result = detector.detect_timing_leakage(timing_data)
|
|
48
|
+
>>> if result.severity != "low":
|
|
49
|
+
... print(f"Timing vulnerability: {result.evidence}")
|
|
50
|
+
"""
|
|
51
|
+
|
|
52
|
+
from __future__ import annotations
|
|
53
|
+
|
|
54
|
+
import json
|
|
55
|
+
import logging
|
|
56
|
+
from dataclasses import dataclass, field
|
|
57
|
+
from enum import Enum
|
|
58
|
+
from pathlib import Path
|
|
59
|
+
from typing import TYPE_CHECKING, Any, Literal
|
|
60
|
+
|
|
61
|
+
import numpy as np
|
|
62
|
+
from scipy import stats
|
|
63
|
+
|
|
64
|
+
if TYPE_CHECKING:
|
|
65
|
+
from collections.abc import Sequence
|
|
66
|
+
|
|
67
|
+
from numpy.typing import NDArray
|
|
68
|
+
|
|
69
|
+
from oscura.side_channel.dpa import PowerTrace
|
|
70
|
+
|
|
71
|
+
logger = logging.getLogger(__name__)
|
|
72
|
+
|
|
73
|
+
|
|
74
|
+
class VulnerabilityType(str, Enum):
|
|
75
|
+
"""Type of side-channel vulnerability detected."""
|
|
76
|
+
|
|
77
|
+
TIMING = "timing" # Timing-based leakage
|
|
78
|
+
POWER = "power" # Power consumption leakage
|
|
79
|
+
EM = "electromagnetic" # EM emission leakage
|
|
80
|
+
CACHE = "cache" # Cache timing leakage
|
|
81
|
+
CONSTANT_TIME = "constant_time" # Non-constant-time operations
|
|
82
|
+
|
|
83
|
+
|
|
84
|
+
class Severity(str, Enum):
|
|
85
|
+
"""Vulnerability severity level."""
|
|
86
|
+
|
|
87
|
+
LOW = "low" # Minor leakage, difficult to exploit
|
|
88
|
+
MEDIUM = "medium" # Moderate leakage, exploitable with effort
|
|
89
|
+
HIGH = "high" # Significant leakage, easily exploitable
|
|
90
|
+
CRITICAL = "critical" # Severe leakage, trivial to exploit
|
|
91
|
+
|
|
92
|
+
|
|
93
|
+
@dataclass
|
|
94
|
+
class SideChannelVulnerability:
|
|
95
|
+
"""Side-channel vulnerability finding.
|
|
96
|
+
|
|
97
|
+
Attributes:
|
|
98
|
+
vulnerability_type: Type of vulnerability detected.
|
|
99
|
+
severity: Severity level (low/medium/high/critical).
|
|
100
|
+
confidence: Confidence score (0.0-1.0) in detection.
|
|
101
|
+
evidence: Evidence supporting the vulnerability (e.g., correlation value).
|
|
102
|
+
description: Human-readable description of the vulnerability.
|
|
103
|
+
mitigation_suggestions: List of mitigation recommendations.
|
|
104
|
+
affected_operation: Operation or code location affected (optional).
|
|
105
|
+
metadata: Additional context and metrics.
|
|
106
|
+
|
|
107
|
+
Example:
|
|
108
|
+
>>> vuln = SideChannelVulnerability(
|
|
109
|
+
... vulnerability_type=VulnerabilityType.TIMING,
|
|
110
|
+
... severity=Severity.HIGH,
|
|
111
|
+
... confidence=0.95,
|
|
112
|
+
... evidence="Timing variance: 125.3 ns",
|
|
113
|
+
... description="Input-dependent execution time detected",
|
|
114
|
+
... mitigation_suggestions=["Use constant-time comparison"]
|
|
115
|
+
... )
|
|
116
|
+
"""
|
|
117
|
+
|
|
118
|
+
vulnerability_type: VulnerabilityType
|
|
119
|
+
severity: Severity
|
|
120
|
+
confidence: float
|
|
121
|
+
evidence: str
|
|
122
|
+
description: str
|
|
123
|
+
mitigation_suggestions: list[str] = field(default_factory=list)
|
|
124
|
+
affected_operation: str | None = None
|
|
125
|
+
metadata: dict[str, Any] = field(default_factory=dict)
|
|
126
|
+
|
|
127
|
+
|
|
128
|
+
@dataclass
|
|
129
|
+
class VulnerabilityReport:
|
|
130
|
+
"""Comprehensive side-channel vulnerability assessment report.
|
|
131
|
+
|
|
132
|
+
Attributes:
|
|
133
|
+
vulnerabilities: List of detected vulnerabilities.
|
|
134
|
+
summary_statistics: Summary metrics across all detections.
|
|
135
|
+
analysis_config: Configuration used for analysis.
|
|
136
|
+
recommendations: Overall security recommendations.
|
|
137
|
+
timestamp: When the analysis was performed.
|
|
138
|
+
|
|
139
|
+
Example:
|
|
140
|
+
>>> report = detector.analyze_power_traces(traces)
|
|
141
|
+
>>> print(f"Critical: {report.summary_statistics['critical_count']}")
|
|
142
|
+
>>> for vuln in report.vulnerabilities:
|
|
143
|
+
... if vuln.severity == Severity.CRITICAL:
|
|
144
|
+
... print(f" {vuln.description}")
|
|
145
|
+
"""
|
|
146
|
+
|
|
147
|
+
vulnerabilities: list[SideChannelVulnerability]
|
|
148
|
+
summary_statistics: dict[str, Any] = field(default_factory=dict)
|
|
149
|
+
analysis_config: dict[str, Any] = field(default_factory=dict)
|
|
150
|
+
recommendations: list[str] = field(default_factory=list)
|
|
151
|
+
timestamp: str = ""
|
|
152
|
+
|
|
153
|
+
|
|
154
|
+
class SideChannelDetector:
|
|
155
|
+
"""Side-channel vulnerability detection and assessment framework.
|
|
156
|
+
|
|
157
|
+
This class implements multiple statistical tests and analysis methods for
|
|
158
|
+
detecting side-channel vulnerabilities in cryptographic implementations.
|
|
159
|
+
|
|
160
|
+
Detection Methods:
|
|
161
|
+
- Welch's t-test for leakage detection (TVLA methodology)
|
|
162
|
+
- Pearson correlation analysis for data dependencies
|
|
163
|
+
- Mutual information calculation
|
|
164
|
+
- Statistical timing analysis (variance, distribution)
|
|
165
|
+
- Frequency-domain analysis for EM leakage
|
|
166
|
+
|
|
167
|
+
Analysis Types:
|
|
168
|
+
- Timing analysis: Variable-time operations, input correlations
|
|
169
|
+
- Power analysis: Data-dependent consumption, DPA susceptibility
|
|
170
|
+
- EM analysis: Emission patterns, frequency leakage
|
|
171
|
+
- Cache timing: Data-dependent memory access patterns
|
|
172
|
+
- Constant-time: Operation time validation
|
|
173
|
+
|
|
174
|
+
Example:
|
|
175
|
+
>>> # Basic timing analysis
|
|
176
|
+
>>> detector = SideChannelDetector(timing_threshold=0.01)
|
|
177
|
+
>>> timing_data = [(input_bytes, execution_time), ...]
|
|
178
|
+
>>> vuln = detector.detect_timing_leakage(timing_data)
|
|
179
|
+
>>> # Power trace analysis with t-test
|
|
180
|
+
>>> report = detector.analyze_power_traces(
|
|
181
|
+
... traces,
|
|
182
|
+
... fixed_key=key,
|
|
183
|
+
... use_ttest=True
|
|
184
|
+
... )
|
|
185
|
+
"""
|
|
186
|
+
|
|
187
|
+
def __init__(
|
|
188
|
+
self,
|
|
189
|
+
timing_threshold: float = 0.01,
|
|
190
|
+
power_threshold: float = 0.7,
|
|
191
|
+
em_threshold: float = 0.6,
|
|
192
|
+
cache_threshold: float = 0.05,
|
|
193
|
+
ttest_threshold: float = 4.5,
|
|
194
|
+
mutual_info_threshold: float = 0.1,
|
|
195
|
+
) -> None:
|
|
196
|
+
"""Initialize side-channel detector.
|
|
197
|
+
|
|
198
|
+
Args:
|
|
199
|
+
timing_threshold: Timing correlation threshold for vulnerability (0.0-1.0).
|
|
200
|
+
power_threshold: Power correlation threshold for vulnerability (0.0-1.0).
|
|
201
|
+
em_threshold: EM emission correlation threshold (0.0-1.0).
|
|
202
|
+
cache_threshold: Cache timing threshold for vulnerability (0.0-1.0).
|
|
203
|
+
ttest_threshold: T-test statistic threshold (typically 4.5 for p<0.00001).
|
|
204
|
+
mutual_info_threshold: Mutual information threshold in bits (0.0-8.0).
|
|
205
|
+
|
|
206
|
+
Example:
|
|
207
|
+
>>> # Strict detection thresholds
|
|
208
|
+
>>> detector = SideChannelDetector(
|
|
209
|
+
... timing_threshold=0.005,
|
|
210
|
+
... ttest_threshold=3.0
|
|
211
|
+
... )
|
|
212
|
+
"""
|
|
213
|
+
self.timing_threshold = timing_threshold
|
|
214
|
+
self.power_threshold = power_threshold
|
|
215
|
+
self.em_threshold = em_threshold
|
|
216
|
+
self.cache_threshold = cache_threshold
|
|
217
|
+
self.ttest_threshold = ttest_threshold
|
|
218
|
+
self.mutual_info_threshold = mutual_info_threshold
|
|
219
|
+
|
|
220
|
+
def detect_timing_leakage(
|
|
221
|
+
self,
|
|
222
|
+
timing_data: Sequence[tuple[bytes, float]],
|
|
223
|
+
operation_name: str = "operation",
|
|
224
|
+
) -> SideChannelVulnerability:
|
|
225
|
+
"""Detect timing-based side-channel leakage.
|
|
226
|
+
|
|
227
|
+
Analyzes timing measurements for correlation with input data to detect
|
|
228
|
+
non-constant-time operations.
|
|
229
|
+
|
|
230
|
+
Algorithm:
|
|
231
|
+
1. Calculate timing statistics (mean, variance, range)
|
|
232
|
+
2. Compute correlation between input values and timing
|
|
233
|
+
3. Perform Welch's t-test between input groups
|
|
234
|
+
4. Assess severity based on correlation and variance
|
|
235
|
+
|
|
236
|
+
Args:
|
|
237
|
+
timing_data: List of (input_bytes, execution_time) tuples.
|
|
238
|
+
operation_name: Name of operation being analyzed.
|
|
239
|
+
|
|
240
|
+
Returns:
|
|
241
|
+
SideChannelVulnerability with timing analysis results.
|
|
242
|
+
|
|
243
|
+
Example:
|
|
244
|
+
>>> # Measure encryption timing for different plaintexts
|
|
245
|
+
>>> timing_data = [
|
|
246
|
+
... (bytes([i]), measure_encryption_time(bytes([i])))
|
|
247
|
+
... for i in range(256)
|
|
248
|
+
... ]
|
|
249
|
+
>>> vuln = detector.detect_timing_leakage(timing_data, "AES_encrypt")
|
|
250
|
+
>>> if vuln.severity in [Severity.HIGH, Severity.CRITICAL]:
|
|
251
|
+
... print(f"Timing vulnerability: {vuln.evidence}")
|
|
252
|
+
"""
|
|
253
|
+
if not timing_data:
|
|
254
|
+
return self._create_empty_timing_result()
|
|
255
|
+
|
|
256
|
+
timings, first_bytes = self._extract_timing_data(timing_data)
|
|
257
|
+
stats_data = self._calculate_timing_statistics(timings)
|
|
258
|
+
correlation = self._calculate_timing_correlation(first_bytes, timings)
|
|
259
|
+
t_stat, p_value = self._perform_timing_ttest(timings, first_bytes)
|
|
260
|
+
severity = self._assess_timing_severity(correlation, t_stat)
|
|
261
|
+
confidence = self._calculate_timing_confidence(timings, stats_data)
|
|
262
|
+
mitigations = self._generate_timing_mitigations(correlation, t_stat)
|
|
263
|
+
|
|
264
|
+
return SideChannelVulnerability(
|
|
265
|
+
vulnerability_type=VulnerabilityType.TIMING,
|
|
266
|
+
severity=severity,
|
|
267
|
+
confidence=float(confidence),
|
|
268
|
+
evidence=self._format_timing_evidence(correlation, t_stat, stats_data),
|
|
269
|
+
description=self._format_timing_description(operation_name, severity),
|
|
270
|
+
mitigation_suggestions=mitigations,
|
|
271
|
+
affected_operation=operation_name,
|
|
272
|
+
metadata={
|
|
273
|
+
**stats_data,
|
|
274
|
+
"correlation": float(correlation),
|
|
275
|
+
"t_statistic": float(t_stat),
|
|
276
|
+
"p_value": float(p_value),
|
|
277
|
+
"sample_count": len(timings),
|
|
278
|
+
},
|
|
279
|
+
)
|
|
280
|
+
|
|
281
|
+
def _create_empty_timing_result(self) -> SideChannelVulnerability:
|
|
282
|
+
"""Create vulnerability result for empty timing data."""
|
|
283
|
+
return SideChannelVulnerability(
|
|
284
|
+
vulnerability_type=VulnerabilityType.TIMING,
|
|
285
|
+
severity=Severity.LOW,
|
|
286
|
+
confidence=0.0,
|
|
287
|
+
evidence="No timing data provided",
|
|
288
|
+
description="Insufficient data for timing analysis",
|
|
289
|
+
)
|
|
290
|
+
|
|
291
|
+
def _extract_timing_data(
|
|
292
|
+
self, timing_data: Sequence[tuple[bytes, float]]
|
|
293
|
+
) -> tuple[NDArray[np.float64], NDArray[np.int_]]:
|
|
294
|
+
"""Extract timing arrays from timing data."""
|
|
295
|
+
timings = np.array([t for _, t in timing_data])
|
|
296
|
+
first_bytes = np.array([data[0] if data else 0 for data, _ in timing_data])
|
|
297
|
+
return timings, first_bytes
|
|
298
|
+
|
|
299
|
+
def _calculate_timing_statistics(self, timings: NDArray[np.float64]) -> dict[str, float]:
|
|
300
|
+
"""Calculate timing statistics (mean, std, range)."""
|
|
301
|
+
mean_time = float(np.mean(timings))
|
|
302
|
+
std_time = float(np.std(timings))
|
|
303
|
+
min_time = float(np.min(timings))
|
|
304
|
+
max_time = float(np.max(timings))
|
|
305
|
+
return {
|
|
306
|
+
"mean_time": mean_time,
|
|
307
|
+
"std_time": std_time,
|
|
308
|
+
"time_range": max_time - min_time,
|
|
309
|
+
}
|
|
310
|
+
|
|
311
|
+
def _calculate_timing_correlation(
|
|
312
|
+
self, first_bytes: NDArray[np.int_], timings: NDArray[np.float64]
|
|
313
|
+
) -> float:
|
|
314
|
+
"""Calculate correlation between input bytes and timing."""
|
|
315
|
+
if len(set(first_bytes)) <= 1:
|
|
316
|
+
return 0.0
|
|
317
|
+
correlation = abs(np.corrcoef(first_bytes, timings)[0, 1])
|
|
318
|
+
return 0.0 if np.isnan(correlation) else correlation
|
|
319
|
+
|
|
320
|
+
def _perform_timing_ttest(
|
|
321
|
+
self, timings: NDArray[np.float64], first_bytes: NDArray[np.int_]
|
|
322
|
+
) -> tuple[float, float]:
|
|
323
|
+
"""Perform t-test between low and high input groups."""
|
|
324
|
+
if len(timings) < 10:
|
|
325
|
+
return 0.0, 1.0
|
|
326
|
+
|
|
327
|
+
median_byte = np.median(first_bytes)
|
|
328
|
+
low_group = timings[first_bytes <= median_byte]
|
|
329
|
+
high_group = timings[first_bytes > median_byte]
|
|
330
|
+
|
|
331
|
+
if len(low_group) == 0 or len(high_group) == 0:
|
|
332
|
+
return 0.0, 1.0
|
|
333
|
+
|
|
334
|
+
t_stat, p_value = stats.ttest_ind(low_group, high_group, equal_var=False)
|
|
335
|
+
return abs(float(t_stat)), float(p_value)
|
|
336
|
+
|
|
337
|
+
def _assess_timing_severity(self, correlation: float, t_stat: float) -> Severity:
|
|
338
|
+
"""Assess severity based on correlation and t-statistic."""
|
|
339
|
+
if correlation <= self.timing_threshold and t_stat <= self.ttest_threshold:
|
|
340
|
+
return Severity.LOW
|
|
341
|
+
if correlation > 0.5 or t_stat > 10.0:
|
|
342
|
+
return Severity.CRITICAL
|
|
343
|
+
if correlation > 0.2 or t_stat > 7.0:
|
|
344
|
+
return Severity.HIGH
|
|
345
|
+
if correlation > 0.1 or t_stat > self.ttest_threshold:
|
|
346
|
+
return Severity.MEDIUM
|
|
347
|
+
return Severity.LOW
|
|
348
|
+
|
|
349
|
+
def _calculate_timing_confidence(
|
|
350
|
+
self, timings: NDArray[np.float64], stats_data: dict[str, float]
|
|
351
|
+
) -> float:
|
|
352
|
+
"""Calculate confidence based on sample size and variance."""
|
|
353
|
+
confidence = min(1.0, len(timings) / 100.0)
|
|
354
|
+
if stats_data["std_time"] / (stats_data["mean_time"] + 1e-10) < 0.01:
|
|
355
|
+
confidence *= 0.5
|
|
356
|
+
return confidence
|
|
357
|
+
|
|
358
|
+
def _generate_timing_mitigations(self, correlation: float, t_stat: float) -> list[str]:
|
|
359
|
+
"""Generate mitigation suggestions based on findings."""
|
|
360
|
+
mitigations = []
|
|
361
|
+
if correlation > self.timing_threshold:
|
|
362
|
+
mitigations.extend(
|
|
363
|
+
[
|
|
364
|
+
"Implement constant-time operations",
|
|
365
|
+
"Use constant-time comparison functions",
|
|
366
|
+
"Avoid data-dependent branching",
|
|
367
|
+
]
|
|
368
|
+
)
|
|
369
|
+
if t_stat > self.ttest_threshold:
|
|
370
|
+
mitigations.extend(
|
|
371
|
+
[
|
|
372
|
+
"Add random delays to mask timing variations",
|
|
373
|
+
"Use blinding or masking techniques",
|
|
374
|
+
]
|
|
375
|
+
)
|
|
376
|
+
return mitigations
|
|
377
|
+
|
|
378
|
+
def _format_timing_evidence(
|
|
379
|
+
self, correlation: float, t_stat: float, stats_data: dict[str, float]
|
|
380
|
+
) -> str:
|
|
381
|
+
"""Format evidence string for timing vulnerability."""
|
|
382
|
+
return (
|
|
383
|
+
f"Correlation: {correlation:.4f}, T-statistic: {t_stat:.2f}, "
|
|
384
|
+
f"Range: {stats_data['time_range'] * 1e9:.1f} ns, "
|
|
385
|
+
f"Std: {stats_data['std_time'] * 1e9:.1f} ns"
|
|
386
|
+
)
|
|
387
|
+
|
|
388
|
+
def _format_timing_description(self, operation_name: str, severity: Severity) -> str:
|
|
389
|
+
"""Format description string for timing vulnerability."""
|
|
390
|
+
significance = (
|
|
391
|
+
"significant" if severity in [Severity.HIGH, Severity.CRITICAL] else "potential"
|
|
392
|
+
)
|
|
393
|
+
return f"Timing analysis of '{operation_name}' shows {significance} input-dependent execution time"
|
|
394
|
+
|
|
395
|
+
def analyze_power_traces(
|
|
396
|
+
self,
|
|
397
|
+
traces: Sequence[PowerTrace],
|
|
398
|
+
fixed_key: bytes | None = None,
|
|
399
|
+
use_ttest: bool = True,
|
|
400
|
+
) -> VulnerabilityReport:
|
|
401
|
+
"""Analyze power traces for DPA/CPA vulnerabilities.
|
|
402
|
+
|
|
403
|
+
Performs comprehensive power analysis to detect data-dependent power
|
|
404
|
+
consumption patterns that could enable DPA or CPA attacks.
|
|
405
|
+
|
|
406
|
+
Analysis Methods:
|
|
407
|
+
1. Welch's t-test (TVLA) for first-order leakage
|
|
408
|
+
2. Correlation analysis between power and hypothetical values
|
|
409
|
+
3. Variance analysis across different inputs
|
|
410
|
+
4. Frequency-domain analysis for EM leakage
|
|
411
|
+
|
|
412
|
+
Args:
|
|
413
|
+
traces: List of power consumption traces with plaintexts.
|
|
414
|
+
fixed_key: Known key for hypothesis testing (optional).
|
|
415
|
+
use_ttest: Whether to perform t-test analysis.
|
|
416
|
+
|
|
417
|
+
Returns:
|
|
418
|
+
VulnerabilityReport with all detected power-related vulnerabilities.
|
|
419
|
+
|
|
420
|
+
Example:
|
|
421
|
+
>>> # Analyze AES implementation for DPA vulnerabilities
|
|
422
|
+
>>> traces = collect_power_traces(plaintexts, key)
|
|
423
|
+
>>> report = detector.analyze_power_traces(traces, fixed_key=key)
|
|
424
|
+
>>> critical = [v for v in report.vulnerabilities
|
|
425
|
+
... if v.severity == Severity.CRITICAL]
|
|
426
|
+
>>> print(f"Critical vulnerabilities: {len(critical)}")
|
|
427
|
+
"""
|
|
428
|
+
if not traces:
|
|
429
|
+
return VulnerabilityReport(
|
|
430
|
+
vulnerabilities=[],
|
|
431
|
+
summary_statistics={"error": "No traces provided"},
|
|
432
|
+
)
|
|
433
|
+
|
|
434
|
+
power_matrix = np.array([t.power for t in traces])
|
|
435
|
+
num_traces, num_samples = power_matrix.shape
|
|
436
|
+
|
|
437
|
+
vulnerabilities = self._collect_power_vulnerabilities(
|
|
438
|
+
traces, power_matrix, use_ttest, fixed_key
|
|
439
|
+
)
|
|
440
|
+
summary_stats = self._build_summary_statistics(vulnerabilities, num_traces, num_samples)
|
|
441
|
+
recommendations = self._generate_recommendations(vulnerabilities)
|
|
442
|
+
|
|
443
|
+
return VulnerabilityReport(
|
|
444
|
+
vulnerabilities=vulnerabilities,
|
|
445
|
+
summary_statistics=summary_stats,
|
|
446
|
+
analysis_config={
|
|
447
|
+
"power_threshold": self.power_threshold,
|
|
448
|
+
"ttest_threshold": self.ttest_threshold,
|
|
449
|
+
"use_ttest": use_ttest,
|
|
450
|
+
},
|
|
451
|
+
recommendations=recommendations,
|
|
452
|
+
)
|
|
453
|
+
|
|
454
|
+
def _collect_power_vulnerabilities(
|
|
455
|
+
self,
|
|
456
|
+
traces: Sequence[PowerTrace],
|
|
457
|
+
power_matrix: np.ndarray[Any, Any],
|
|
458
|
+
use_ttest: bool,
|
|
459
|
+
fixed_key: bytes | None,
|
|
460
|
+
) -> list[SideChannelVulnerability]:
|
|
461
|
+
"""Collect all power-related vulnerabilities from traces.
|
|
462
|
+
|
|
463
|
+
Args:
|
|
464
|
+
traces: Power consumption traces.
|
|
465
|
+
power_matrix: 2D array of power measurements.
|
|
466
|
+
use_ttest: Whether to perform t-test analysis.
|
|
467
|
+
fixed_key: Known key for CPA analysis.
|
|
468
|
+
|
|
469
|
+
Returns:
|
|
470
|
+
List of detected vulnerabilities.
|
|
471
|
+
"""
|
|
472
|
+
vulnerabilities: list[SideChannelVulnerability] = []
|
|
473
|
+
|
|
474
|
+
# T-test analysis
|
|
475
|
+
if use_ttest and len(traces) >= 10:
|
|
476
|
+
ttest_vuln = self._analyze_ttest_leakage(traces, power_matrix)
|
|
477
|
+
if ttest_vuln is not None:
|
|
478
|
+
vulnerabilities.append(ttest_vuln)
|
|
479
|
+
|
|
480
|
+
# Correlation analysis
|
|
481
|
+
if fixed_key is not None and len(traces) >= 10:
|
|
482
|
+
cpa_vuln = self._analyze_cpa_vulnerability(traces, fixed_key)
|
|
483
|
+
if cpa_vuln is not None:
|
|
484
|
+
vulnerabilities.append(cpa_vuln)
|
|
485
|
+
|
|
486
|
+
# EM leakage analysis
|
|
487
|
+
em_vuln = self._analyze_em_leakage(power_matrix)
|
|
488
|
+
if em_vuln.severity != Severity.LOW:
|
|
489
|
+
vulnerabilities.append(em_vuln)
|
|
490
|
+
|
|
491
|
+
# Variance analysis
|
|
492
|
+
variance_vuln = self._analyze_power_variance(traces)
|
|
493
|
+
if variance_vuln.severity != Severity.LOW:
|
|
494
|
+
vulnerabilities.append(variance_vuln)
|
|
495
|
+
|
|
496
|
+
return vulnerabilities
|
|
497
|
+
|
|
498
|
+
def _analyze_ttest_leakage(
|
|
499
|
+
self, traces: Sequence[PowerTrace], power_matrix: np.ndarray[Any, Any]
|
|
500
|
+
) -> SideChannelVulnerability | None:
|
|
501
|
+
"""Analyze power traces using Welch's t-test for leakage.
|
|
502
|
+
|
|
503
|
+
Args:
|
|
504
|
+
traces: Power consumption traces.
|
|
505
|
+
power_matrix: 2D array of power measurements.
|
|
506
|
+
|
|
507
|
+
Returns:
|
|
508
|
+
Vulnerability if leakage detected, None otherwise.
|
|
509
|
+
"""
|
|
510
|
+
t_stats = self._perform_ttest_leakage(traces)
|
|
511
|
+
if t_stats is None:
|
|
512
|
+
return None
|
|
513
|
+
|
|
514
|
+
num_samples = power_matrix.shape[1]
|
|
515
|
+
max_t_stat = float(np.max(np.abs(t_stats)))
|
|
516
|
+
leakage_points = int(np.sum(np.abs(t_stats) > self.ttest_threshold))
|
|
517
|
+
|
|
518
|
+
if max_t_stat <= self.ttest_threshold:
|
|
519
|
+
return None
|
|
520
|
+
|
|
521
|
+
severity = self._assess_ttest_severity(max_t_stat)
|
|
522
|
+
confidence = min(1.0, len(traces) / 100.0)
|
|
523
|
+
|
|
524
|
+
return SideChannelVulnerability(
|
|
525
|
+
vulnerability_type=VulnerabilityType.POWER,
|
|
526
|
+
severity=severity,
|
|
527
|
+
confidence=confidence,
|
|
528
|
+
evidence=(
|
|
529
|
+
f"Max T-statistic: {max_t_stat:.2f}, Leakage points: {leakage_points}/{num_samples}"
|
|
530
|
+
),
|
|
531
|
+
description="Welch's t-test reveals significant first-order power leakage",
|
|
532
|
+
mitigation_suggestions=[
|
|
533
|
+
"Implement power-balanced logic gates",
|
|
534
|
+
"Add random noise to power consumption",
|
|
535
|
+
"Use masking or hiding countermeasures",
|
|
536
|
+
"Employ dual-rail precharge logic (DPL)",
|
|
537
|
+
],
|
|
538
|
+
metadata={
|
|
539
|
+
"max_t_statistic": max_t_stat,
|
|
540
|
+
"leakage_points": leakage_points,
|
|
541
|
+
"threshold": self.ttest_threshold,
|
|
542
|
+
},
|
|
543
|
+
)
|
|
544
|
+
|
|
545
|
+
def _assess_ttest_severity(self, max_t_stat: float) -> Severity:
|
|
546
|
+
"""Assess severity based on t-test statistic magnitude.
|
|
547
|
+
|
|
548
|
+
Args:
|
|
549
|
+
max_t_stat: Maximum t-statistic value.
|
|
550
|
+
|
|
551
|
+
Returns:
|
|
552
|
+
Severity level.
|
|
553
|
+
"""
|
|
554
|
+
if max_t_stat > 20.0:
|
|
555
|
+
return Severity.CRITICAL
|
|
556
|
+
if max_t_stat > 10.0:
|
|
557
|
+
return Severity.HIGH
|
|
558
|
+
if max_t_stat > self.ttest_threshold:
|
|
559
|
+
return Severity.MEDIUM
|
|
560
|
+
return Severity.LOW
|
|
561
|
+
|
|
562
|
+
def _analyze_cpa_vulnerability(
|
|
563
|
+
self, traces: Sequence[PowerTrace], fixed_key: bytes
|
|
564
|
+
) -> SideChannelVulnerability | None:
|
|
565
|
+
"""Analyze CPA vulnerability using correlation analysis.
|
|
566
|
+
|
|
567
|
+
Args:
|
|
568
|
+
traces: Power consumption traces.
|
|
569
|
+
fixed_key: Known encryption key.
|
|
570
|
+
|
|
571
|
+
Returns:
|
|
572
|
+
Vulnerability if CPA attack successful, None otherwise.
|
|
573
|
+
"""
|
|
574
|
+
from oscura.side_channel.dpa import DPAAnalyzer
|
|
575
|
+
|
|
576
|
+
analyzer = DPAAnalyzer(attack_type="cpa", leakage_model="hamming_weight")
|
|
577
|
+
|
|
578
|
+
try:
|
|
579
|
+
result = analyzer.cpa_attack(list(traces), target_byte=0)
|
|
580
|
+
if result.correlation_traces is None:
|
|
581
|
+
return None
|
|
582
|
+
|
|
583
|
+
max_correlation = float(np.max(result.correlation_traces))
|
|
584
|
+
if max_correlation <= self.power_threshold:
|
|
585
|
+
return None
|
|
586
|
+
|
|
587
|
+
severity = self._assess_correlation_severity(max_correlation)
|
|
588
|
+
|
|
589
|
+
return SideChannelVulnerability(
|
|
590
|
+
vulnerability_type=VulnerabilityType.POWER,
|
|
591
|
+
severity=severity,
|
|
592
|
+
confidence=result.confidence,
|
|
593
|
+
evidence=(
|
|
594
|
+
f"Max correlation: {max_correlation:.4f}, "
|
|
595
|
+
f"Attack confidence: {result.confidence:.2%}"
|
|
596
|
+
),
|
|
597
|
+
description=(
|
|
598
|
+
"CPA attack successful - power consumption correlates with Hamming weight"
|
|
599
|
+
),
|
|
600
|
+
mitigation_suggestions=[
|
|
601
|
+
"Implement algorithmic masking (boolean/arithmetic)",
|
|
602
|
+
"Use shuffling to randomize operation order",
|
|
603
|
+
"Add random delays between operations",
|
|
604
|
+
"Employ constant-power hardware primitives",
|
|
605
|
+
],
|
|
606
|
+
metadata={
|
|
607
|
+
"max_correlation": max_correlation,
|
|
608
|
+
"recovered_key_byte": int(result.recovered_key[0]),
|
|
609
|
+
"attack_successful": result.successful,
|
|
610
|
+
},
|
|
611
|
+
)
|
|
612
|
+
except Exception as e:
|
|
613
|
+
logger.warning(f"CPA analysis failed: {e}")
|
|
614
|
+
return None
|
|
615
|
+
|
|
616
|
+
def _assess_correlation_severity(self, max_correlation: float) -> Severity:
|
|
617
|
+
"""Assess severity based on correlation magnitude.
|
|
618
|
+
|
|
619
|
+
Args:
|
|
620
|
+
max_correlation: Maximum correlation value.
|
|
621
|
+
|
|
622
|
+
Returns:
|
|
623
|
+
Severity level.
|
|
624
|
+
"""
|
|
625
|
+
if max_correlation > 0.95:
|
|
626
|
+
return Severity.CRITICAL
|
|
627
|
+
if max_correlation > 0.85:
|
|
628
|
+
return Severity.HIGH
|
|
629
|
+
if max_correlation > self.power_threshold:
|
|
630
|
+
return Severity.MEDIUM
|
|
631
|
+
return Severity.LOW
|
|
632
|
+
|
|
633
|
+
def _build_summary_statistics(
|
|
634
|
+
self,
|
|
635
|
+
vulnerabilities: list[SideChannelVulnerability],
|
|
636
|
+
num_traces: int,
|
|
637
|
+
num_samples: int,
|
|
638
|
+
) -> dict[str, Any]:
|
|
639
|
+
"""Build summary statistics from detected vulnerabilities.
|
|
640
|
+
|
|
641
|
+
Args:
|
|
642
|
+
vulnerabilities: List of detected vulnerabilities.
|
|
643
|
+
num_traces: Number of power traces analyzed.
|
|
644
|
+
num_samples: Number of samples per trace.
|
|
645
|
+
|
|
646
|
+
Returns:
|
|
647
|
+
Dictionary of summary statistics.
|
|
648
|
+
"""
|
|
649
|
+
return {
|
|
650
|
+
"total_vulnerabilities": len(vulnerabilities),
|
|
651
|
+
"critical_count": sum(1 for v in vulnerabilities if v.severity == Severity.CRITICAL),
|
|
652
|
+
"high_count": sum(1 for v in vulnerabilities if v.severity == Severity.HIGH),
|
|
653
|
+
"medium_count": sum(1 for v in vulnerabilities if v.severity == Severity.MEDIUM),
|
|
654
|
+
"low_count": sum(1 for v in vulnerabilities if v.severity == Severity.LOW),
|
|
655
|
+
"num_traces": num_traces,
|
|
656
|
+
"num_samples": num_samples,
|
|
657
|
+
}
|
|
658
|
+
|
|
659
|
+
def _generate_recommendations(
|
|
660
|
+
self, vulnerabilities: list[SideChannelVulnerability]
|
|
661
|
+
) -> list[str]:
|
|
662
|
+
"""Generate security recommendations based on vulnerabilities.
|
|
663
|
+
|
|
664
|
+
Args:
|
|
665
|
+
vulnerabilities: List of detected vulnerabilities.
|
|
666
|
+
|
|
667
|
+
Returns:
|
|
668
|
+
List of recommendation strings.
|
|
669
|
+
"""
|
|
670
|
+
recommendations = []
|
|
671
|
+
|
|
672
|
+
if any(v.severity == Severity.CRITICAL for v in vulnerabilities):
|
|
673
|
+
recommendations.append(
|
|
674
|
+
"CRITICAL: Immediate countermeasures required - implementation is highly "
|
|
675
|
+
"vulnerable to power analysis attacks"
|
|
676
|
+
)
|
|
677
|
+
if any(v.vulnerability_type == VulnerabilityType.POWER for v in vulnerabilities):
|
|
678
|
+
recommendations.append(
|
|
679
|
+
"Consider hardware countermeasures (noise generation, power filtering)"
|
|
680
|
+
)
|
|
681
|
+
if any(v.vulnerability_type == VulnerabilityType.EM for v in vulnerabilities):
|
|
682
|
+
recommendations.append("Add EM shielding and filtering to reduce emissions")
|
|
683
|
+
|
|
684
|
+
if not vulnerabilities:
|
|
685
|
+
recommendations.append(
|
|
686
|
+
"No significant vulnerabilities detected with current thresholds"
|
|
687
|
+
)
|
|
688
|
+
|
|
689
|
+
return recommendations
|
|
690
|
+
|
|
691
|
+
def detect_constant_time_violation(
|
|
692
|
+
self,
|
|
693
|
+
timing_measurements: Sequence[tuple[Any, float]],
|
|
694
|
+
input_extractor: Any = None,
|
|
695
|
+
) -> SideChannelVulnerability:
|
|
696
|
+
"""Detect non-constant-time operations.
|
|
697
|
+
|
|
698
|
+
Identifies operations with execution time dependent on secret data,
|
|
699
|
+
which can enable timing side-channel attacks.
|
|
700
|
+
|
|
701
|
+
Args:
|
|
702
|
+
timing_measurements: List of (input_data, execution_time) tuples.
|
|
703
|
+
input_extractor: Function to extract relevant bits from input_data.
|
|
704
|
+
|
|
705
|
+
Returns:
|
|
706
|
+
SideChannelVulnerability for constant-time analysis.
|
|
707
|
+
|
|
708
|
+
Example:
|
|
709
|
+
>>> # Check if AES S-box lookup is constant-time
|
|
710
|
+
>>> measurements = [
|
|
711
|
+
... (input_byte, time_sbox_lookup(input_byte))
|
|
712
|
+
... for input_byte in range(256)
|
|
713
|
+
... ]
|
|
714
|
+
>>> vuln = detector.detect_constant_time_violation(measurements)
|
|
715
|
+
"""
|
|
716
|
+
if not timing_measurements:
|
|
717
|
+
return SideChannelVulnerability(
|
|
718
|
+
vulnerability_type=VulnerabilityType.CONSTANT_TIME,
|
|
719
|
+
severity=Severity.LOW,
|
|
720
|
+
confidence=0.0,
|
|
721
|
+
evidence="No timing measurements provided",
|
|
722
|
+
description="Insufficient data for constant-time analysis",
|
|
723
|
+
)
|
|
724
|
+
|
|
725
|
+
timings = np.array([t for _, t in timing_measurements])
|
|
726
|
+
|
|
727
|
+
# Calculate timing statistics
|
|
728
|
+
mean_time = float(np.mean(timings))
|
|
729
|
+
std_time = float(np.std(timings))
|
|
730
|
+
cv = std_time / (mean_time + 1e-10) # Coefficient of variation
|
|
731
|
+
|
|
732
|
+
# Check for constant time (low variance)
|
|
733
|
+
if cv < 0.001: # Very low variation (< 0.1%)
|
|
734
|
+
severity = Severity.LOW
|
|
735
|
+
description = "Operation appears to be constant-time"
|
|
736
|
+
mitigations: list[str] = []
|
|
737
|
+
elif cv < 0.01: # Low variation (< 1%)
|
|
738
|
+
severity = Severity.LOW
|
|
739
|
+
description = "Operation shows minimal timing variation"
|
|
740
|
+
mitigations = ["Verify constant-time properties with formal methods"]
|
|
741
|
+
elif cv < 0.05: # Moderate variation (< 5%)
|
|
742
|
+
severity = Severity.MEDIUM
|
|
743
|
+
description = "Operation shows moderate timing variation"
|
|
744
|
+
mitigations = [
|
|
745
|
+
"Review for data-dependent branches",
|
|
746
|
+
"Check for table lookups without cache protection",
|
|
747
|
+
]
|
|
748
|
+
else: # High variation (>= 5%)
|
|
749
|
+
severity = Severity.HIGH
|
|
750
|
+
description = "Operation shows significant timing variation (non-constant-time)"
|
|
751
|
+
mitigations = [
|
|
752
|
+
"Reimplement using constant-time algorithms",
|
|
753
|
+
"Eliminate data-dependent branches",
|
|
754
|
+
"Use constant-time table lookups",
|
|
755
|
+
]
|
|
756
|
+
|
|
757
|
+
confidence = min(1.0, len(timings) / 50.0)
|
|
758
|
+
|
|
759
|
+
return SideChannelVulnerability(
|
|
760
|
+
vulnerability_type=VulnerabilityType.CONSTANT_TIME,
|
|
761
|
+
severity=severity,
|
|
762
|
+
confidence=confidence,
|
|
763
|
+
evidence=f"Coefficient of variation: {cv:.6f}, Std: {std_time * 1e9:.2f} ns",
|
|
764
|
+
description=description,
|
|
765
|
+
mitigation_suggestions=mitigations,
|
|
766
|
+
metadata={
|
|
767
|
+
"mean_time": mean_time,
|
|
768
|
+
"std_time": std_time,
|
|
769
|
+
"coefficient_of_variation": float(cv),
|
|
770
|
+
"sample_count": len(timings),
|
|
771
|
+
},
|
|
772
|
+
)
|
|
773
|
+
|
|
774
|
+
def calculate_mutual_information(
|
|
775
|
+
self,
|
|
776
|
+
secret_data: NDArray[np.int_],
|
|
777
|
+
observable_data: NDArray[np.float64],
|
|
778
|
+
bins: int = 50,
|
|
779
|
+
) -> float:
|
|
780
|
+
"""Calculate mutual information between secret and observable data.
|
|
781
|
+
|
|
782
|
+
Mutual information quantifies how much knowing the observable reduces
|
|
783
|
+
uncertainty about the secret. Higher values indicate more leakage.
|
|
784
|
+
|
|
785
|
+
Formula:
|
|
786
|
+
I(Secret; Observable) = H(Secret) + H(Observable) - H(Secret, Observable)
|
|
787
|
+
|
|
788
|
+
Args:
|
|
789
|
+
secret_data: Secret data values (e.g., key bytes).
|
|
790
|
+
observable_data: Observable measurements (e.g., timing, power).
|
|
791
|
+
bins: Number of bins for histogram estimation.
|
|
792
|
+
|
|
793
|
+
Returns:
|
|
794
|
+
Mutual information in bits (0.0 to log2(len(secret_data))).
|
|
795
|
+
|
|
796
|
+
Example:
|
|
797
|
+
>>> # Calculate MI between key byte and execution time
|
|
798
|
+
>>> key_bytes = np.array([key[0] for _ in range(1000)])
|
|
799
|
+
>>> timings = np.array([measure_time(key[0]) for _ in range(1000)])
|
|
800
|
+
>>> mi = detector.calculate_mutual_information(key_bytes, timings)
|
|
801
|
+
>>> print(f"Mutual information: {mi:.4f} bits")
|
|
802
|
+
"""
|
|
803
|
+
if len(secret_data) != len(observable_data):
|
|
804
|
+
raise ValueError("Secret and observable data must have same length")
|
|
805
|
+
|
|
806
|
+
# Discretize observable data into bins
|
|
807
|
+
obs_binned = np.digitize(
|
|
808
|
+
observable_data, np.linspace(observable_data.min(), observable_data.max(), bins)
|
|
809
|
+
)
|
|
810
|
+
|
|
811
|
+
# Calculate joint histogram
|
|
812
|
+
hist_joint, _, _ = np.histogram2d(
|
|
813
|
+
secret_data, obs_binned, bins=[len(np.unique(secret_data)), bins]
|
|
814
|
+
)
|
|
815
|
+
|
|
816
|
+
# Normalize to probabilities
|
|
817
|
+
p_joint = hist_joint / hist_joint.sum()
|
|
818
|
+
p_secret = p_joint.sum(axis=1)
|
|
819
|
+
p_obs = p_joint.sum(axis=0)
|
|
820
|
+
|
|
821
|
+
# Calculate entropies
|
|
822
|
+
def entropy(p: NDArray[np.float64]) -> float:
|
|
823
|
+
"""Calculate Shannon entropy."""
|
|
824
|
+
p_nonzero = p[p > 0]
|
|
825
|
+
return float(-np.sum(p_nonzero * np.log2(p_nonzero)))
|
|
826
|
+
|
|
827
|
+
h_secret = entropy(p_secret)
|
|
828
|
+
h_obs = entropy(p_obs)
|
|
829
|
+
h_joint = entropy(p_joint.flatten())
|
|
830
|
+
|
|
831
|
+
# Mutual information
|
|
832
|
+
mi = h_secret + h_obs - h_joint
|
|
833
|
+
|
|
834
|
+
return float(max(0.0, mi)) # Ensure non-negative
|
|
835
|
+
|
|
836
|
+
def export_report(
|
|
837
|
+
self,
|
|
838
|
+
report: VulnerabilityReport,
|
|
839
|
+
output_path: Path,
|
|
840
|
+
format: Literal["json", "html"] = "json",
|
|
841
|
+
) -> None:
|
|
842
|
+
"""Export vulnerability report to file.
|
|
843
|
+
|
|
844
|
+
Args:
|
|
845
|
+
report: VulnerabilityReport to export.
|
|
846
|
+
output_path: Path to save report file.
|
|
847
|
+
format: Export format ("json" or "html").
|
|
848
|
+
|
|
849
|
+
Example:
|
|
850
|
+
>>> report = detector.analyze_power_traces(traces)
|
|
851
|
+
>>> detector.export_report(
|
|
852
|
+
... report,
|
|
853
|
+
... Path("security_audit.json"),
|
|
854
|
+
... format="json"
|
|
855
|
+
... )
|
|
856
|
+
"""
|
|
857
|
+
if format == "json":
|
|
858
|
+
self._export_json(report, output_path)
|
|
859
|
+
elif format == "html":
|
|
860
|
+
self._export_html(report, output_path)
|
|
861
|
+
else:
|
|
862
|
+
raise ValueError(f"Unsupported format: {format}")
|
|
863
|
+
|
|
864
|
+
def _export_json(self, report: VulnerabilityReport, output_path: Path) -> None:
|
|
865
|
+
"""Export report as JSON."""
|
|
866
|
+
data = {
|
|
867
|
+
"summary": report.summary_statistics,
|
|
868
|
+
"vulnerabilities": [
|
|
869
|
+
{
|
|
870
|
+
"type": v.vulnerability_type.value,
|
|
871
|
+
"severity": v.severity.value,
|
|
872
|
+
"confidence": v.confidence,
|
|
873
|
+
"evidence": v.evidence,
|
|
874
|
+
"description": v.description,
|
|
875
|
+
"mitigations": v.mitigation_suggestions,
|
|
876
|
+
"affected_operation": v.affected_operation,
|
|
877
|
+
"metadata": v.metadata,
|
|
878
|
+
}
|
|
879
|
+
for v in report.vulnerabilities
|
|
880
|
+
],
|
|
881
|
+
"recommendations": report.recommendations,
|
|
882
|
+
"config": report.analysis_config,
|
|
883
|
+
}
|
|
884
|
+
|
|
885
|
+
with open(output_path, "w") as f:
|
|
886
|
+
json.dump(data, f, indent=2)
|
|
887
|
+
|
|
888
|
+
logger.info(f"Vulnerability report exported to {output_path}")
|
|
889
|
+
|
|
890
|
+
def _export_html(self, report: VulnerabilityReport, output_path: Path) -> None:
|
|
891
|
+
"""Export report as HTML."""
|
|
892
|
+
html_content = [
|
|
893
|
+
"<!DOCTYPE html>",
|
|
894
|
+
"<html><head><title>Side-Channel Vulnerability Report</title>",
|
|
895
|
+
"<style>",
|
|
896
|
+
"body { font-family: Arial, sans-serif; margin: 20px; }",
|
|
897
|
+
".vulnerability { border: 1px solid #ccc; padding: 10px; margin: 10px 0; }",
|
|
898
|
+
".critical { border-left: 5px solid #d9534f; }",
|
|
899
|
+
".high { border-left: 5px solid #f0ad4e; }",
|
|
900
|
+
".medium { border-left: 5px solid #5bc0de; }",
|
|
901
|
+
".low { border-left: 5px solid #5cb85c; }",
|
|
902
|
+
"</style></head><body>",
|
|
903
|
+
"<h1>Side-Channel Vulnerability Report</h1>",
|
|
904
|
+
f"<h2>Summary: {report.summary_statistics.get('total_vulnerabilities', 0)} "
|
|
905
|
+
"vulnerabilities found</h2>",
|
|
906
|
+
]
|
|
907
|
+
|
|
908
|
+
# Summary statistics
|
|
909
|
+
html_content.append("<h3>Summary</h3><ul>")
|
|
910
|
+
for key, value in report.summary_statistics.items():
|
|
911
|
+
html_content.append(f"<li><strong>{key}:</strong> {value}</li>")
|
|
912
|
+
html_content.append("</ul>")
|
|
913
|
+
|
|
914
|
+
# Vulnerabilities
|
|
915
|
+
html_content.append("<h3>Vulnerabilities</h3>")
|
|
916
|
+
for vuln in report.vulnerabilities:
|
|
917
|
+
html_content.append(
|
|
918
|
+
f'<div class="vulnerability {vuln.severity.value}">'
|
|
919
|
+
f"<h4>{vuln.vulnerability_type.value.upper()} - {vuln.severity.value.upper()}</h4>"
|
|
920
|
+
f"<p><strong>Confidence:</strong> {vuln.confidence:.2%}</p>"
|
|
921
|
+
f"<p><strong>Description:</strong> {vuln.description}</p>"
|
|
922
|
+
f"<p><strong>Evidence:</strong> {vuln.evidence}</p>"
|
|
923
|
+
)
|
|
924
|
+
if vuln.mitigation_suggestions:
|
|
925
|
+
html_content.append("<p><strong>Mitigations:</strong></p><ul>")
|
|
926
|
+
for mitigation in vuln.mitigation_suggestions:
|
|
927
|
+
html_content.append(f"<li>{mitigation}</li>")
|
|
928
|
+
html_content.append("</ul>")
|
|
929
|
+
html_content.append("</div>")
|
|
930
|
+
|
|
931
|
+
# Recommendations
|
|
932
|
+
if report.recommendations:
|
|
933
|
+
html_content.append("<h3>Recommendations</h3><ul>")
|
|
934
|
+
for rec in report.recommendations:
|
|
935
|
+
html_content.append(f"<li>{rec}</li>")
|
|
936
|
+
html_content.append("</ul>")
|
|
937
|
+
|
|
938
|
+
html_content.append("</body></html>")
|
|
939
|
+
|
|
940
|
+
with open(output_path, "w") as f:
|
|
941
|
+
f.write("\n".join(html_content))
|
|
942
|
+
|
|
943
|
+
logger.info(f"HTML report exported to {output_path}")
|
|
944
|
+
|
|
945
|
+
def _perform_ttest_leakage(
|
|
946
|
+
self,
|
|
947
|
+
traces: Sequence[PowerTrace],
|
|
948
|
+
) -> NDArray[np.float64] | None:
|
|
949
|
+
"""Perform Welch's t-test for leakage detection (TVLA).
|
|
950
|
+
|
|
951
|
+
Args:
|
|
952
|
+
traces: Power traces with plaintexts.
|
|
953
|
+
|
|
954
|
+
Returns:
|
|
955
|
+
T-statistics for each sample point, or None if insufficient data.
|
|
956
|
+
"""
|
|
957
|
+
if len(traces) < 10:
|
|
958
|
+
return None
|
|
959
|
+
|
|
960
|
+
# Partition traces by first plaintext byte (odd vs even)
|
|
961
|
+
group0_traces = []
|
|
962
|
+
group1_traces = []
|
|
963
|
+
|
|
964
|
+
for trace in traces:
|
|
965
|
+
if trace.plaintext is None or len(trace.plaintext) == 0:
|
|
966
|
+
continue
|
|
967
|
+
|
|
968
|
+
if trace.plaintext[0] % 2 == 0:
|
|
969
|
+
group0_traces.append(trace.power)
|
|
970
|
+
else:
|
|
971
|
+
group1_traces.append(trace.power)
|
|
972
|
+
|
|
973
|
+
if len(group0_traces) < 5 or len(group1_traces) < 5:
|
|
974
|
+
return None
|
|
975
|
+
|
|
976
|
+
power0 = np.array(group0_traces)
|
|
977
|
+
power1 = np.array(group1_traces)
|
|
978
|
+
|
|
979
|
+
# Perform Welch's t-test at each time point
|
|
980
|
+
t_stats = np.zeros(power0.shape[1])
|
|
981
|
+
|
|
982
|
+
for i in range(power0.shape[1]):
|
|
983
|
+
t_stat, _ = stats.ttest_ind(power0[:, i], power1[:, i], equal_var=False)
|
|
984
|
+
t_stats[i] = abs(t_stat) if not np.isnan(t_stat) else 0.0
|
|
985
|
+
|
|
986
|
+
return t_stats
|
|
987
|
+
|
|
988
|
+
def _analyze_em_leakage(
|
|
989
|
+
self,
|
|
990
|
+
power_matrix: NDArray[np.float64],
|
|
991
|
+
) -> SideChannelVulnerability:
|
|
992
|
+
"""Analyze electromagnetic emission leakage via frequency domain.
|
|
993
|
+
|
|
994
|
+
Args:
|
|
995
|
+
power_matrix: Power traces matrix (num_traces x num_samples).
|
|
996
|
+
|
|
997
|
+
Returns:
|
|
998
|
+
SideChannelVulnerability for EM analysis.
|
|
999
|
+
"""
|
|
1000
|
+
# Compute FFT for each trace
|
|
1001
|
+
fft_traces = np.fft.rfft(power_matrix, axis=1)
|
|
1002
|
+
magnitude_spectrum = np.abs(fft_traces)
|
|
1003
|
+
|
|
1004
|
+
# Calculate variance across traces at each frequency
|
|
1005
|
+
freq_variance = np.var(magnitude_spectrum, axis=0)
|
|
1006
|
+
max_variance = float(np.max(freq_variance))
|
|
1007
|
+
mean_variance = float(np.mean(freq_variance))
|
|
1008
|
+
|
|
1009
|
+
# Normalize by mean to get relative peaks
|
|
1010
|
+
if mean_variance > 0:
|
|
1011
|
+
peak_ratio = max_variance / mean_variance
|
|
1012
|
+
else:
|
|
1013
|
+
peak_ratio = 0.0
|
|
1014
|
+
|
|
1015
|
+
# Assess EM leakage
|
|
1016
|
+
if peak_ratio > 10.0:
|
|
1017
|
+
severity = Severity.HIGH
|
|
1018
|
+
elif peak_ratio > 5.0:
|
|
1019
|
+
severity = Severity.MEDIUM
|
|
1020
|
+
elif peak_ratio > 3.0:
|
|
1021
|
+
severity = Severity.LOW
|
|
1022
|
+
else:
|
|
1023
|
+
severity = Severity.LOW
|
|
1024
|
+
|
|
1025
|
+
confidence = min(1.0, power_matrix.shape[0] / 100.0)
|
|
1026
|
+
|
|
1027
|
+
return SideChannelVulnerability(
|
|
1028
|
+
vulnerability_type=VulnerabilityType.EM,
|
|
1029
|
+
severity=severity,
|
|
1030
|
+
confidence=confidence,
|
|
1031
|
+
evidence=f"Peak frequency variance ratio: {peak_ratio:.2f}",
|
|
1032
|
+
description=(
|
|
1033
|
+
"Frequency-domain analysis shows potential EM emission leakage"
|
|
1034
|
+
if severity != Severity.LOW
|
|
1035
|
+
else "No significant EM leakage detected"
|
|
1036
|
+
),
|
|
1037
|
+
mitigation_suggestions=[
|
|
1038
|
+
"Add EM shielding to device enclosure",
|
|
1039
|
+
"Use frequency-domain filtering",
|
|
1040
|
+
"Implement spread-spectrum techniques",
|
|
1041
|
+
]
|
|
1042
|
+
if severity != Severity.LOW
|
|
1043
|
+
else [],
|
|
1044
|
+
metadata={
|
|
1045
|
+
"peak_variance_ratio": float(peak_ratio),
|
|
1046
|
+
"max_variance": max_variance,
|
|
1047
|
+
"mean_variance": mean_variance,
|
|
1048
|
+
},
|
|
1049
|
+
)
|
|
1050
|
+
|
|
1051
|
+
def _analyze_power_variance(
|
|
1052
|
+
self,
|
|
1053
|
+
traces: Sequence[PowerTrace],
|
|
1054
|
+
) -> SideChannelVulnerability:
|
|
1055
|
+
"""Analyze power consumption variance across different inputs.
|
|
1056
|
+
|
|
1057
|
+
Args:
|
|
1058
|
+
traces: Power traces with plaintexts.
|
|
1059
|
+
|
|
1060
|
+
Returns:
|
|
1061
|
+
SideChannelVulnerability for variance analysis.
|
|
1062
|
+
"""
|
|
1063
|
+
# Group traces by first plaintext byte
|
|
1064
|
+
power_by_input: dict[int, list[NDArray[np.float64]]] = {}
|
|
1065
|
+
|
|
1066
|
+
for trace in traces:
|
|
1067
|
+
if trace.plaintext is None or len(trace.plaintext) == 0:
|
|
1068
|
+
continue
|
|
1069
|
+
|
|
1070
|
+
first_byte = trace.plaintext[0]
|
|
1071
|
+
if first_byte not in power_by_input:
|
|
1072
|
+
power_by_input[first_byte] = []
|
|
1073
|
+
power_by_input[first_byte].append(trace.power)
|
|
1074
|
+
|
|
1075
|
+
if len(power_by_input) < 2:
|
|
1076
|
+
return SideChannelVulnerability(
|
|
1077
|
+
vulnerability_type=VulnerabilityType.POWER,
|
|
1078
|
+
severity=Severity.LOW,
|
|
1079
|
+
confidence=0.0,
|
|
1080
|
+
evidence="Insufficient input variation",
|
|
1081
|
+
description="Cannot assess variance - need multiple input values",
|
|
1082
|
+
)
|
|
1083
|
+
|
|
1084
|
+
# Calculate mean power for each input
|
|
1085
|
+
mean_powers = {}
|
|
1086
|
+
for byte_val, powers in power_by_input.items():
|
|
1087
|
+
power_array = np.array(powers)
|
|
1088
|
+
mean_powers[byte_val] = np.mean(power_array, axis=0)
|
|
1089
|
+
|
|
1090
|
+
# Calculate variance across different inputs
|
|
1091
|
+
mean_power_matrix = np.array(list(mean_powers.values()))
|
|
1092
|
+
inter_input_variance = np.var(mean_power_matrix, axis=0)
|
|
1093
|
+
max_variance = float(np.max(inter_input_variance))
|
|
1094
|
+
|
|
1095
|
+
# Assess severity
|
|
1096
|
+
if max_variance > 0.1:
|
|
1097
|
+
severity = Severity.MEDIUM
|
|
1098
|
+
elif max_variance > 0.01:
|
|
1099
|
+
severity = Severity.LOW
|
|
1100
|
+
else:
|
|
1101
|
+
severity = Severity.LOW
|
|
1102
|
+
|
|
1103
|
+
confidence = min(1.0, len(traces) / 100.0)
|
|
1104
|
+
|
|
1105
|
+
return SideChannelVulnerability(
|
|
1106
|
+
vulnerability_type=VulnerabilityType.POWER,
|
|
1107
|
+
severity=severity,
|
|
1108
|
+
confidence=confidence,
|
|
1109
|
+
evidence=f"Max power variance across inputs: {max_variance:.6f}",
|
|
1110
|
+
description=(
|
|
1111
|
+
"Power variance analysis shows data-dependent consumption"
|
|
1112
|
+
if severity != Severity.LOW
|
|
1113
|
+
else "Power variance across inputs is low"
|
|
1114
|
+
),
|
|
1115
|
+
mitigation_suggestions=[
|
|
1116
|
+
"Implement power balancing techniques",
|
|
1117
|
+
"Use dual-rail encoding",
|
|
1118
|
+
"Add noise injection",
|
|
1119
|
+
]
|
|
1120
|
+
if severity != Severity.LOW
|
|
1121
|
+
else [],
|
|
1122
|
+
metadata={
|
|
1123
|
+
"max_variance": max_variance,
|
|
1124
|
+
"num_unique_inputs": len(power_by_input),
|
|
1125
|
+
},
|
|
1126
|
+
)
|