oscura 0.5.0__py3-none-any.whl → 0.6.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- oscura/__init__.py +169 -167
- oscura/analyzers/__init__.py +3 -0
- oscura/analyzers/classification.py +659 -0
- oscura/analyzers/digital/__init__.py +0 -48
- oscura/analyzers/digital/edges.py +325 -65
- oscura/analyzers/digital/extraction.py +0 -195
- oscura/analyzers/digital/quality.py +293 -166
- oscura/analyzers/digital/timing.py +260 -115
- oscura/analyzers/digital/timing_numba.py +334 -0
- oscura/analyzers/entropy.py +605 -0
- oscura/analyzers/eye/diagram.py +176 -109
- oscura/analyzers/eye/metrics.py +5 -5
- oscura/analyzers/jitter/__init__.py +6 -4
- oscura/analyzers/jitter/ber.py +52 -52
- oscura/analyzers/jitter/classification.py +156 -0
- oscura/analyzers/jitter/decomposition.py +163 -113
- oscura/analyzers/jitter/spectrum.py +80 -64
- oscura/analyzers/ml/__init__.py +39 -0
- oscura/analyzers/ml/features.py +600 -0
- oscura/analyzers/ml/signal_classifier.py +604 -0
- oscura/analyzers/packet/daq.py +246 -158
- oscura/analyzers/packet/parser.py +12 -1
- oscura/analyzers/packet/payload.py +50 -2110
- oscura/analyzers/packet/payload_analysis.py +361 -181
- oscura/analyzers/packet/payload_patterns.py +133 -70
- oscura/analyzers/packet/stream.py +84 -23
- oscura/analyzers/patterns/__init__.py +26 -5
- oscura/analyzers/patterns/anomaly_detection.py +908 -0
- oscura/analyzers/patterns/clustering.py +169 -108
- oscura/analyzers/patterns/clustering_optimized.py +227 -0
- oscura/analyzers/patterns/discovery.py +1 -1
- oscura/analyzers/patterns/matching.py +581 -197
- oscura/analyzers/patterns/pattern_mining.py +778 -0
- oscura/analyzers/patterns/periodic.py +121 -38
- oscura/analyzers/patterns/sequences.py +175 -78
- oscura/analyzers/power/conduction.py +1 -1
- oscura/analyzers/power/soa.py +6 -6
- oscura/analyzers/power/switching.py +250 -110
- oscura/analyzers/protocol/__init__.py +17 -1
- oscura/analyzers/protocols/__init__.py +1 -22
- oscura/analyzers/protocols/base.py +6 -6
- oscura/analyzers/protocols/ble/__init__.py +38 -0
- oscura/analyzers/protocols/ble/analyzer.py +809 -0
- oscura/analyzers/protocols/ble/uuids.py +288 -0
- oscura/analyzers/protocols/can.py +257 -127
- oscura/analyzers/protocols/can_fd.py +107 -80
- oscura/analyzers/protocols/flexray.py +139 -80
- oscura/analyzers/protocols/hdlc.py +93 -58
- oscura/analyzers/protocols/i2c.py +247 -106
- oscura/analyzers/protocols/i2s.py +138 -86
- oscura/analyzers/protocols/industrial/__init__.py +40 -0
- oscura/analyzers/protocols/industrial/bacnet/__init__.py +33 -0
- oscura/analyzers/protocols/industrial/bacnet/analyzer.py +708 -0
- oscura/analyzers/protocols/industrial/bacnet/encoding.py +412 -0
- oscura/analyzers/protocols/industrial/bacnet/services.py +622 -0
- oscura/analyzers/protocols/industrial/ethercat/__init__.py +30 -0
- oscura/analyzers/protocols/industrial/ethercat/analyzer.py +474 -0
- oscura/analyzers/protocols/industrial/ethercat/mailbox.py +339 -0
- oscura/analyzers/protocols/industrial/ethercat/topology.py +166 -0
- oscura/analyzers/protocols/industrial/modbus/__init__.py +31 -0
- oscura/analyzers/protocols/industrial/modbus/analyzer.py +525 -0
- oscura/analyzers/protocols/industrial/modbus/crc.py +79 -0
- oscura/analyzers/protocols/industrial/modbus/functions.py +436 -0
- oscura/analyzers/protocols/industrial/opcua/__init__.py +21 -0
- oscura/analyzers/protocols/industrial/opcua/analyzer.py +552 -0
- oscura/analyzers/protocols/industrial/opcua/datatypes.py +446 -0
- oscura/analyzers/protocols/industrial/opcua/services.py +264 -0
- oscura/analyzers/protocols/industrial/profinet/__init__.py +23 -0
- oscura/analyzers/protocols/industrial/profinet/analyzer.py +441 -0
- oscura/analyzers/protocols/industrial/profinet/dcp.py +263 -0
- oscura/analyzers/protocols/industrial/profinet/ptcp.py +200 -0
- oscura/analyzers/protocols/jtag.py +180 -98
- oscura/analyzers/protocols/lin.py +219 -114
- oscura/analyzers/protocols/manchester.py +4 -4
- oscura/analyzers/protocols/onewire.py +253 -149
- oscura/analyzers/protocols/parallel_bus/__init__.py +20 -0
- oscura/analyzers/protocols/parallel_bus/centronics.py +92 -0
- oscura/analyzers/protocols/parallel_bus/gpib.py +137 -0
- oscura/analyzers/protocols/spi.py +192 -95
- oscura/analyzers/protocols/swd.py +321 -167
- oscura/analyzers/protocols/uart.py +267 -125
- oscura/analyzers/protocols/usb.py +235 -131
- oscura/analyzers/side_channel/power.py +17 -12
- oscura/analyzers/signal/__init__.py +15 -0
- oscura/analyzers/signal/timing_analysis.py +1086 -0
- oscura/analyzers/signal_integrity/__init__.py +4 -1
- oscura/analyzers/signal_integrity/sparams.py +2 -19
- oscura/analyzers/spectral/chunked.py +129 -60
- oscura/analyzers/spectral/chunked_fft.py +300 -94
- oscura/analyzers/spectral/chunked_wavelet.py +100 -80
- oscura/analyzers/statistical/checksum.py +376 -217
- oscura/analyzers/statistical/classification.py +229 -107
- oscura/analyzers/statistical/entropy.py +78 -53
- oscura/analyzers/statistics/correlation.py +407 -211
- oscura/analyzers/statistics/outliers.py +2 -2
- oscura/analyzers/statistics/streaming.py +30 -5
- oscura/analyzers/validation.py +216 -101
- oscura/analyzers/waveform/measurements.py +9 -0
- oscura/analyzers/waveform/measurements_with_uncertainty.py +31 -15
- oscura/analyzers/waveform/spectral.py +500 -228
- oscura/api/__init__.py +31 -5
- oscura/api/dsl/__init__.py +582 -0
- oscura/{dsl → api/dsl}/commands.py +43 -76
- oscura/{dsl → api/dsl}/interpreter.py +26 -51
- oscura/{dsl → api/dsl}/parser.py +107 -77
- oscura/{dsl → api/dsl}/repl.py +2 -2
- oscura/api/dsl.py +1 -1
- oscura/{integrations → api/integrations}/__init__.py +1 -1
- oscura/{integrations → api/integrations}/llm.py +201 -102
- oscura/api/operators.py +3 -3
- oscura/api/optimization.py +144 -30
- oscura/api/rest_server.py +921 -0
- oscura/api/server/__init__.py +17 -0
- oscura/api/server/dashboard.py +850 -0
- oscura/api/server/static/README.md +34 -0
- oscura/api/server/templates/base.html +181 -0
- oscura/api/server/templates/export.html +120 -0
- oscura/api/server/templates/home.html +284 -0
- oscura/api/server/templates/protocols.html +58 -0
- oscura/api/server/templates/reports.html +43 -0
- oscura/api/server/templates/session_detail.html +89 -0
- oscura/api/server/templates/sessions.html +83 -0
- oscura/api/server/templates/waveforms.html +73 -0
- oscura/automotive/__init__.py +8 -1
- oscura/automotive/can/__init__.py +10 -0
- oscura/automotive/can/checksum.py +3 -1
- oscura/automotive/can/dbc_generator.py +590 -0
- oscura/automotive/can/message_wrapper.py +121 -74
- oscura/automotive/can/patterns.py +98 -21
- oscura/automotive/can/session.py +292 -56
- oscura/automotive/can/state_machine.py +6 -3
- oscura/automotive/can/stimulus_response.py +97 -75
- oscura/automotive/dbc/__init__.py +10 -2
- oscura/automotive/dbc/generator.py +84 -56
- oscura/automotive/dbc/parser.py +6 -6
- oscura/automotive/dtc/data.json +2763 -0
- oscura/automotive/dtc/database.py +2 -2
- oscura/automotive/flexray/__init__.py +31 -0
- oscura/automotive/flexray/analyzer.py +504 -0
- oscura/automotive/flexray/crc.py +185 -0
- oscura/automotive/flexray/fibex.py +449 -0
- oscura/automotive/j1939/__init__.py +45 -8
- oscura/automotive/j1939/analyzer.py +605 -0
- oscura/automotive/j1939/spns.py +326 -0
- oscura/automotive/j1939/transport.py +306 -0
- oscura/automotive/lin/__init__.py +47 -0
- oscura/automotive/lin/analyzer.py +612 -0
- oscura/automotive/loaders/blf.py +13 -2
- oscura/automotive/loaders/csv_can.py +143 -72
- oscura/automotive/loaders/dispatcher.py +50 -2
- oscura/automotive/loaders/mdf.py +86 -45
- oscura/automotive/loaders/pcap.py +111 -61
- oscura/automotive/uds/__init__.py +4 -0
- oscura/automotive/uds/analyzer.py +725 -0
- oscura/automotive/uds/decoder.py +140 -58
- oscura/automotive/uds/models.py +7 -1
- oscura/automotive/visualization.py +1 -1
- oscura/cli/analyze.py +348 -0
- oscura/cli/batch.py +142 -122
- oscura/cli/benchmark.py +275 -0
- oscura/cli/characterize.py +137 -82
- oscura/cli/compare.py +224 -131
- oscura/cli/completion.py +250 -0
- oscura/cli/config_cmd.py +361 -0
- oscura/cli/decode.py +164 -87
- oscura/cli/export.py +286 -0
- oscura/cli/main.py +115 -31
- oscura/{onboarding → cli/onboarding}/__init__.py +3 -3
- oscura/{onboarding → cli/onboarding}/help.py +80 -58
- oscura/{onboarding → cli/onboarding}/tutorials.py +97 -72
- oscura/{onboarding → cli/onboarding}/wizard.py +55 -36
- oscura/cli/progress.py +147 -0
- oscura/cli/shell.py +157 -135
- oscura/cli/validate_cmd.py +204 -0
- oscura/cli/visualize.py +158 -0
- oscura/convenience.py +125 -79
- oscura/core/__init__.py +4 -2
- oscura/core/backend_selector.py +3 -3
- oscura/core/cache.py +126 -15
- oscura/core/cancellation.py +1 -1
- oscura/{config → core/config}/__init__.py +20 -11
- oscura/{config → core/config}/defaults.py +1 -1
- oscura/{config → core/config}/loader.py +7 -5
- oscura/{config → core/config}/memory.py +5 -5
- oscura/{config → core/config}/migration.py +1 -1
- oscura/{config → core/config}/pipeline.py +99 -23
- oscura/{config → core/config}/preferences.py +1 -1
- oscura/{config → core/config}/protocol.py +3 -3
- oscura/{config → core/config}/schema.py +426 -272
- oscura/{config → core/config}/settings.py +1 -1
- oscura/{config → core/config}/thresholds.py +195 -153
- oscura/core/correlation.py +5 -6
- oscura/core/cross_domain.py +0 -2
- oscura/core/debug.py +9 -5
- oscura/{extensibility → core/extensibility}/docs.py +158 -70
- oscura/{extensibility → core/extensibility}/extensions.py +160 -76
- oscura/{extensibility → core/extensibility}/logging.py +1 -1
- oscura/{extensibility → core/extensibility}/measurements.py +1 -1
- oscura/{extensibility → core/extensibility}/plugins.py +1 -1
- oscura/{extensibility → core/extensibility}/templates.py +73 -3
- oscura/{extensibility → core/extensibility}/validation.py +1 -1
- oscura/core/gpu_backend.py +11 -7
- oscura/core/log_query.py +101 -11
- oscura/core/logging.py +126 -54
- oscura/core/logging_advanced.py +5 -5
- oscura/core/memory_limits.py +108 -70
- oscura/core/memory_monitor.py +2 -2
- oscura/core/memory_progress.py +7 -7
- oscura/core/memory_warnings.py +1 -1
- oscura/core/numba_backend.py +13 -13
- oscura/{plugins → core/plugins}/__init__.py +9 -9
- oscura/{plugins → core/plugins}/base.py +7 -7
- oscura/{plugins → core/plugins}/cli.py +3 -3
- oscura/{plugins → core/plugins}/discovery.py +186 -106
- oscura/{plugins → core/plugins}/lifecycle.py +1 -1
- oscura/{plugins → core/plugins}/manager.py +7 -7
- oscura/{plugins → core/plugins}/registry.py +3 -3
- oscura/{plugins → core/plugins}/versioning.py +1 -1
- oscura/core/progress.py +16 -1
- oscura/core/provenance.py +8 -2
- oscura/{schemas → core/schemas}/__init__.py +2 -2
- oscura/core/schemas/bus_configuration.json +322 -0
- oscura/core/schemas/device_mapping.json +182 -0
- oscura/core/schemas/packet_format.json +418 -0
- oscura/core/schemas/protocol_definition.json +363 -0
- oscura/core/types.py +4 -0
- oscura/core/uncertainty.py +3 -3
- oscura/correlation/__init__.py +52 -0
- oscura/correlation/multi_protocol.py +811 -0
- oscura/discovery/auto_decoder.py +117 -35
- oscura/discovery/comparison.py +191 -86
- oscura/discovery/quality_validator.py +155 -68
- oscura/discovery/signal_detector.py +196 -79
- oscura/export/__init__.py +18 -20
- oscura/export/kaitai_struct.py +513 -0
- oscura/export/scapy_layer.py +801 -0
- oscura/export/wireshark/README.md +15 -15
- oscura/export/wireshark/generator.py +1 -1
- oscura/export/wireshark/templates/dissector.lua.j2 +2 -2
- oscura/export/wireshark_dissector.py +746 -0
- oscura/guidance/wizard.py +207 -111
- oscura/hardware/__init__.py +19 -0
- oscura/{acquisition → hardware/acquisition}/__init__.py +4 -4
- oscura/{acquisition → hardware/acquisition}/file.py +2 -2
- oscura/{acquisition → hardware/acquisition}/hardware.py +7 -7
- oscura/{acquisition → hardware/acquisition}/saleae.py +15 -12
- oscura/{acquisition → hardware/acquisition}/socketcan.py +1 -1
- oscura/{acquisition → hardware/acquisition}/streaming.py +2 -2
- oscura/{acquisition → hardware/acquisition}/synthetic.py +3 -3
- oscura/{acquisition → hardware/acquisition}/visa.py +33 -11
- oscura/hardware/firmware/__init__.py +29 -0
- oscura/hardware/firmware/pattern_recognition.py +874 -0
- oscura/hardware/hal_detector.py +736 -0
- oscura/hardware/security/__init__.py +37 -0
- oscura/hardware/security/side_channel_detector.py +1126 -0
- oscura/inference/__init__.py +4 -0
- oscura/inference/active_learning/README.md +7 -7
- oscura/inference/active_learning/observation_table.py +4 -1
- oscura/inference/alignment.py +216 -123
- oscura/inference/bayesian.py +113 -33
- oscura/inference/crc_reverse.py +101 -55
- oscura/inference/logic.py +6 -2
- oscura/inference/message_format.py +342 -183
- oscura/inference/protocol.py +95 -44
- oscura/inference/protocol_dsl.py +180 -82
- oscura/inference/signal_intelligence.py +1439 -706
- oscura/inference/spectral.py +99 -57
- oscura/inference/state_machine.py +810 -158
- oscura/inference/stream.py +270 -110
- oscura/iot/__init__.py +34 -0
- oscura/iot/coap/__init__.py +32 -0
- oscura/iot/coap/analyzer.py +668 -0
- oscura/iot/coap/options.py +212 -0
- oscura/iot/lorawan/__init__.py +21 -0
- oscura/iot/lorawan/crypto.py +206 -0
- oscura/iot/lorawan/decoder.py +801 -0
- oscura/iot/lorawan/mac_commands.py +341 -0
- oscura/iot/mqtt/__init__.py +27 -0
- oscura/iot/mqtt/analyzer.py +999 -0
- oscura/iot/mqtt/properties.py +315 -0
- oscura/iot/zigbee/__init__.py +31 -0
- oscura/iot/zigbee/analyzer.py +615 -0
- oscura/iot/zigbee/security.py +153 -0
- oscura/iot/zigbee/zcl.py +349 -0
- oscura/jupyter/display.py +125 -45
- oscura/{exploratory → jupyter/exploratory}/__init__.py +8 -8
- oscura/{exploratory → jupyter/exploratory}/error_recovery.py +298 -141
- oscura/jupyter/exploratory/fuzzy.py +746 -0
- oscura/{exploratory → jupyter/exploratory}/fuzzy_advanced.py +258 -100
- oscura/{exploratory → jupyter/exploratory}/legacy.py +464 -242
- oscura/{exploratory → jupyter/exploratory}/parse.py +167 -145
- oscura/{exploratory → jupyter/exploratory}/recovery.py +119 -87
- oscura/jupyter/exploratory/sync.py +612 -0
- oscura/{exploratory → jupyter/exploratory}/unknown.py +299 -176
- oscura/jupyter/magic.py +4 -4
- oscura/{ui → jupyter/ui}/__init__.py +2 -2
- oscura/{ui → jupyter/ui}/formatters.py +3 -3
- oscura/{ui → jupyter/ui}/progressive_display.py +153 -82
- oscura/loaders/__init__.py +171 -63
- oscura/loaders/binary.py +88 -1
- oscura/loaders/chipwhisperer.py +153 -137
- oscura/loaders/configurable.py +208 -86
- oscura/loaders/csv_loader.py +458 -215
- oscura/loaders/hdf5_loader.py +278 -119
- oscura/loaders/lazy.py +87 -54
- oscura/loaders/mmap_loader.py +1 -1
- oscura/loaders/numpy_loader.py +253 -116
- oscura/loaders/pcap.py +226 -151
- oscura/loaders/rigol.py +110 -49
- oscura/loaders/sigrok.py +201 -78
- oscura/loaders/tdms.py +81 -58
- oscura/loaders/tektronix.py +291 -174
- oscura/loaders/touchstone.py +182 -87
- oscura/loaders/vcd.py +215 -117
- oscura/loaders/wav.py +155 -68
- oscura/reporting/__init__.py +9 -7
- oscura/reporting/analyze.py +352 -146
- oscura/reporting/argument_preparer.py +69 -14
- oscura/reporting/auto_report.py +97 -61
- oscura/reporting/batch.py +131 -58
- oscura/reporting/chart_selection.py +57 -45
- oscura/reporting/comparison.py +63 -17
- oscura/reporting/content/executive.py +76 -24
- oscura/reporting/core_formats/multi_format.py +11 -8
- oscura/reporting/engine.py +312 -158
- oscura/reporting/enhanced_reports.py +949 -0
- oscura/reporting/export.py +86 -43
- oscura/reporting/formatting/numbers.py +69 -42
- oscura/reporting/html.py +139 -58
- oscura/reporting/index.py +137 -65
- oscura/reporting/output.py +158 -67
- oscura/reporting/pdf.py +67 -102
- oscura/reporting/plots.py +191 -112
- oscura/reporting/sections.py +88 -47
- oscura/reporting/standards.py +104 -61
- oscura/reporting/summary_generator.py +75 -55
- oscura/reporting/tables.py +138 -54
- oscura/reporting/templates/enhanced/protocol_re.html +525 -0
- oscura/reporting/templates/index.md +13 -13
- oscura/sessions/__init__.py +14 -23
- oscura/sessions/base.py +3 -3
- oscura/sessions/blackbox.py +106 -10
- oscura/sessions/generic.py +2 -2
- oscura/sessions/legacy.py +783 -0
- oscura/side_channel/__init__.py +63 -0
- oscura/side_channel/dpa.py +1025 -0
- oscura/utils/__init__.py +15 -1
- oscura/utils/autodetect.py +1 -5
- oscura/utils/bitwise.py +118 -0
- oscura/{builders → utils/builders}/__init__.py +1 -1
- oscura/{comparison → utils/comparison}/__init__.py +6 -6
- oscura/{comparison → utils/comparison}/compare.py +202 -101
- oscura/{comparison → utils/comparison}/golden.py +83 -63
- oscura/{comparison → utils/comparison}/limits.py +313 -89
- oscura/{comparison → utils/comparison}/mask.py +151 -45
- oscura/{comparison → utils/comparison}/trace_diff.py +1 -1
- oscura/{comparison → utils/comparison}/visualization.py +147 -89
- oscura/{component → utils/component}/__init__.py +3 -3
- oscura/{component → utils/component}/impedance.py +122 -58
- oscura/{component → utils/component}/reactive.py +165 -168
- oscura/{component → utils/component}/transmission_line.py +3 -3
- oscura/{filtering → utils/filtering}/__init__.py +6 -6
- oscura/{filtering → utils/filtering}/base.py +1 -1
- oscura/{filtering → utils/filtering}/convenience.py +2 -2
- oscura/{filtering → utils/filtering}/design.py +169 -93
- oscura/{filtering → utils/filtering}/filters.py +2 -2
- oscura/{filtering → utils/filtering}/introspection.py +2 -2
- oscura/utils/geometry.py +31 -0
- oscura/utils/imports.py +184 -0
- oscura/utils/lazy.py +1 -1
- oscura/{math → utils/math}/__init__.py +2 -2
- oscura/{math → utils/math}/arithmetic.py +114 -48
- oscura/{math → utils/math}/interpolation.py +139 -106
- oscura/utils/memory.py +129 -66
- oscura/utils/memory_advanced.py +92 -9
- oscura/utils/memory_extensions.py +10 -8
- oscura/{optimization → utils/optimization}/__init__.py +1 -1
- oscura/{optimization → utils/optimization}/search.py +2 -2
- oscura/utils/performance/__init__.py +58 -0
- oscura/utils/performance/caching.py +889 -0
- oscura/utils/performance/lsh_clustering.py +333 -0
- oscura/utils/performance/memory_optimizer.py +699 -0
- oscura/utils/performance/optimizations.py +675 -0
- oscura/utils/performance/parallel.py +654 -0
- oscura/utils/performance/profiling.py +661 -0
- oscura/{pipeline → utils/pipeline}/base.py +1 -1
- oscura/{pipeline → utils/pipeline}/composition.py +11 -3
- oscura/{pipeline → utils/pipeline}/parallel.py +3 -2
- oscura/{pipeline → utils/pipeline}/pipeline.py +1 -1
- oscura/{pipeline → utils/pipeline}/reverse_engineering.py +412 -221
- oscura/{search → utils/search}/__init__.py +3 -3
- oscura/{search → utils/search}/anomaly.py +188 -58
- oscura/utils/search/context.py +294 -0
- oscura/{search → utils/search}/pattern.py +138 -10
- oscura/utils/serial.py +51 -0
- oscura/utils/storage/__init__.py +61 -0
- oscura/utils/storage/database.py +1166 -0
- oscura/{streaming → utils/streaming}/chunked.py +302 -143
- oscura/{streaming → utils/streaming}/progressive.py +1 -1
- oscura/{streaming → utils/streaming}/realtime.py +3 -2
- oscura/{triggering → utils/triggering}/__init__.py +6 -6
- oscura/{triggering → utils/triggering}/base.py +6 -6
- oscura/{triggering → utils/triggering}/edge.py +2 -2
- oscura/{triggering → utils/triggering}/pattern.py +2 -2
- oscura/{triggering → utils/triggering}/pulse.py +115 -74
- oscura/{triggering → utils/triggering}/window.py +2 -2
- oscura/utils/validation.py +32 -0
- oscura/validation/__init__.py +121 -0
- oscura/{compliance → validation/compliance}/__init__.py +5 -5
- oscura/{compliance → validation/compliance}/advanced.py +5 -5
- oscura/{compliance → validation/compliance}/masks.py +1 -1
- oscura/{compliance → validation/compliance}/reporting.py +127 -53
- oscura/{compliance → validation/compliance}/testing.py +114 -52
- oscura/validation/compliance_tests.py +915 -0
- oscura/validation/fuzzer.py +990 -0
- oscura/validation/grammar_tests.py +596 -0
- oscura/validation/grammar_validator.py +904 -0
- oscura/validation/hil_testing.py +977 -0
- oscura/{quality → validation/quality}/__init__.py +4 -4
- oscura/{quality → validation/quality}/ensemble.py +251 -171
- oscura/{quality → validation/quality}/explainer.py +3 -3
- oscura/{quality → validation/quality}/scoring.py +1 -1
- oscura/{quality → validation/quality}/warnings.py +4 -4
- oscura/validation/regression_suite.py +808 -0
- oscura/validation/replay.py +788 -0
- oscura/{testing → validation/testing}/__init__.py +2 -2
- oscura/{testing → validation/testing}/synthetic.py +5 -5
- oscura/visualization/__init__.py +9 -0
- oscura/visualization/accessibility.py +1 -1
- oscura/visualization/annotations.py +64 -67
- oscura/visualization/colors.py +7 -7
- oscura/visualization/digital.py +180 -81
- oscura/visualization/eye.py +236 -85
- oscura/visualization/interactive.py +320 -143
- oscura/visualization/jitter.py +587 -247
- oscura/visualization/layout.py +169 -134
- oscura/visualization/optimization.py +103 -52
- oscura/visualization/palettes.py +1 -1
- oscura/visualization/power.py +427 -211
- oscura/visualization/power_extended.py +626 -297
- oscura/visualization/presets.py +2 -0
- oscura/visualization/protocols.py +495 -181
- oscura/visualization/render.py +79 -63
- oscura/visualization/reverse_engineering.py +171 -124
- oscura/visualization/signal_integrity.py +460 -279
- oscura/visualization/specialized.py +190 -100
- oscura/visualization/spectral.py +670 -255
- oscura/visualization/thumbnails.py +166 -137
- oscura/visualization/waveform.py +150 -63
- oscura/workflows/__init__.py +3 -0
- oscura/{batch → workflows/batch}/__init__.py +5 -5
- oscura/{batch → workflows/batch}/advanced.py +150 -75
- oscura/workflows/batch/aggregate.py +531 -0
- oscura/workflows/batch/analyze.py +236 -0
- oscura/{batch → workflows/batch}/logging.py +2 -2
- oscura/{batch → workflows/batch}/metrics.py +1 -1
- oscura/workflows/complete_re.py +1144 -0
- oscura/workflows/compliance.py +44 -54
- oscura/workflows/digital.py +197 -51
- oscura/workflows/legacy/__init__.py +12 -0
- oscura/{workflow → workflows/legacy}/dag.py +4 -1
- oscura/workflows/multi_trace.py +9 -9
- oscura/workflows/power.py +42 -62
- oscura/workflows/protocol.py +82 -49
- oscura/workflows/reverse_engineering.py +351 -150
- oscura/workflows/signal_integrity.py +157 -82
- oscura-0.6.0.dist-info/METADATA +643 -0
- oscura-0.6.0.dist-info/RECORD +590 -0
- oscura/analyzers/digital/ic_database.py +0 -498
- oscura/analyzers/digital/timing_paths.py +0 -339
- oscura/analyzers/digital/vintage.py +0 -377
- oscura/analyzers/digital/vintage_result.py +0 -148
- oscura/analyzers/protocols/parallel_bus.py +0 -449
- oscura/batch/aggregate.py +0 -300
- oscura/batch/analyze.py +0 -139
- oscura/dsl/__init__.py +0 -73
- oscura/exceptions.py +0 -59
- oscura/exploratory/fuzzy.py +0 -513
- oscura/exploratory/sync.py +0 -384
- oscura/export/wavedrom.py +0 -430
- oscura/exporters/__init__.py +0 -94
- oscura/exporters/csv.py +0 -303
- oscura/exporters/exporters.py +0 -44
- oscura/exporters/hdf5.py +0 -217
- oscura/exporters/html_export.py +0 -701
- oscura/exporters/json_export.py +0 -338
- oscura/exporters/markdown_export.py +0 -367
- oscura/exporters/matlab_export.py +0 -354
- oscura/exporters/npz_export.py +0 -219
- oscura/exporters/spice_export.py +0 -210
- oscura/exporters/vintage_logic_csv.py +0 -247
- oscura/reporting/vintage_logic_report.py +0 -523
- oscura/search/context.py +0 -149
- oscura/session/__init__.py +0 -34
- oscura/session/annotations.py +0 -289
- oscura/session/history.py +0 -313
- oscura/session/session.py +0 -520
- oscura/visualization/digital_advanced.py +0 -718
- oscura/visualization/figure_manager.py +0 -156
- oscura/workflow/__init__.py +0 -13
- oscura-0.5.0.dist-info/METADATA +0 -407
- oscura-0.5.0.dist-info/RECORD +0 -486
- /oscura/core/{config.py → config/legacy.py} +0 -0
- /oscura/{extensibility → core/extensibility}/__init__.py +0 -0
- /oscura/{extensibility → core/extensibility}/registry.py +0 -0
- /oscura/{plugins → core/plugins}/isolation.py +0 -0
- /oscura/{builders → utils/builders}/signal_builder.py +0 -0
- /oscura/{optimization → utils/optimization}/parallel.py +0 -0
- /oscura/{pipeline → utils/pipeline}/__init__.py +0 -0
- /oscura/{streaming → utils/streaming}/__init__.py +0 -0
- {oscura-0.5.0.dist-info → oscura-0.6.0.dist-info}/WHEEL +0 -0
- {oscura-0.5.0.dist-info → oscura-0.6.0.dist-info}/entry_points.txt +0 -0
- {oscura-0.5.0.dist-info → oscura-0.6.0.dist-info}/licenses/LICENSE +0 -0
|
@@ -27,134 +27,133 @@ from dataclasses import dataclass
|
|
|
27
27
|
from typing import TYPE_CHECKING, Any, cast
|
|
28
28
|
|
|
29
29
|
import numpy as np
|
|
30
|
+
from numpy.typing import NDArray
|
|
30
31
|
|
|
31
32
|
if TYPE_CHECKING:
|
|
32
|
-
from numpy.typing import NDArray
|
|
33
|
-
|
|
34
33
|
from oscura.core.types import WaveformTrace
|
|
35
34
|
from oscura.reporting.config import AnalysisDomain
|
|
36
35
|
|
|
37
36
|
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
*,
|
|
42
|
-
digital_threshold_ratio: float = 0.8,
|
|
43
|
-
dc_threshold_percent: float = 90.0,
|
|
44
|
-
periodicity_threshold: float = 0.7,
|
|
45
|
-
) -> dict[str, Any]:
|
|
46
|
-
"""Classify signal type and characteristics.
|
|
37
|
+
# =============================================================================
|
|
38
|
+
# Helper Functions for classify_signal
|
|
39
|
+
# =============================================================================
|
|
47
40
|
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
41
|
+
|
|
42
|
+
def _extract_signal_data(
|
|
43
|
+
trace: WaveformTrace | NDArray[np.floating[Any]],
|
|
44
|
+
sample_rate: float,
|
|
45
|
+
) -> tuple[NDArray[np.floating[Any]], float]:
|
|
46
|
+
"""Extract signal data and sample rate from trace or ndarray.
|
|
51
47
|
|
|
52
48
|
Args:
|
|
53
|
-
trace: Input waveform trace or numpy array
|
|
49
|
+
trace: Input waveform trace or numpy array.
|
|
54
50
|
sample_rate: Sample rate in Hz (only used if trace is ndarray).
|
|
55
|
-
digital_threshold_ratio: Ratio of samples at two levels to consider digital (0-1).
|
|
56
|
-
dc_threshold_percent: Percentage of DC component to classify as DC signal.
|
|
57
|
-
periodicity_threshold: Correlation threshold for periodic detection (0-1).
|
|
58
51
|
|
|
59
52
|
Returns:
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
- dc_component: True if significant DC offset present
|
|
66
|
-
- frequency_estimate: Estimated fundamental frequency in Hz (or None)
|
|
67
|
-
- dominant_frequency: Same as frequency_estimate (for compatibility)
|
|
68
|
-
- snr_db: Estimated SNR in dB (or None)
|
|
69
|
-
- confidence: Classification confidence (0.0-1.0)
|
|
70
|
-
- noise_level: Estimated noise level in signal units
|
|
71
|
-
- levels: For digital signals, dict with "low" and "high" levels
|
|
53
|
+
Tuple of (data array, sample rate).
|
|
54
|
+
"""
|
|
55
|
+
if isinstance(trace, np.ndarray):
|
|
56
|
+
return trace, sample_rate
|
|
57
|
+
return trace.data, trace.metadata.sample_rate
|
|
72
58
|
|
|
73
|
-
Example:
|
|
74
|
-
>>> trace = osc.load('square_wave.wfm')
|
|
75
|
-
>>> info = osc.classify_signal(trace)
|
|
76
|
-
>>> print(f"Type: {info['signal_type']}")
|
|
77
|
-
Type: digital
|
|
78
|
-
>>> print(f"Characteristics: {info['characteristics']}")
|
|
79
|
-
Characteristics: ['periodic', 'clean']
|
|
80
|
-
>>> print(f"Frequency: {info['frequency_estimate']:.3e} Hz")
|
|
81
|
-
Frequency: 1.000e+06 Hz
|
|
82
59
|
|
|
83
|
-
|
|
84
|
-
|
|
60
|
+
def _create_insufficient_data_result() -> dict[str, Any]:
|
|
61
|
+
"""Create result dict for insufficient data case.
|
|
62
|
+
|
|
63
|
+
Returns:
|
|
64
|
+
Classification result dict with unknown type.
|
|
85
65
|
"""
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
|
|
66
|
+
return {
|
|
67
|
+
"type": "unknown",
|
|
68
|
+
"signal_type": "unknown",
|
|
69
|
+
"is_digital": False,
|
|
70
|
+
"is_periodic": False,
|
|
71
|
+
"characteristics": ["insufficient_data"],
|
|
72
|
+
"dc_component": False,
|
|
73
|
+
"frequency_estimate": None,
|
|
74
|
+
"dominant_frequency": None,
|
|
75
|
+
"snr_db": None,
|
|
76
|
+
"confidence": 0.0,
|
|
77
|
+
"noise_level": 0.0,
|
|
78
|
+
"levels": None,
|
|
79
|
+
}
|
|
93
80
|
|
|
94
|
-
n = len(data)
|
|
95
81
|
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
"is_periodic": False,
|
|
102
|
-
"characteristics": ["insufficient_data"],
|
|
103
|
-
"dc_component": False,
|
|
104
|
-
"frequency_estimate": None,
|
|
105
|
-
"dominant_frequency": None,
|
|
106
|
-
"snr_db": None,
|
|
107
|
-
"confidence": 0.0,
|
|
108
|
-
"noise_level": 0.0,
|
|
109
|
-
"levels": None,
|
|
110
|
-
}
|
|
82
|
+
def _compute_signal_statistics(data: NDArray[np.floating[Any]]) -> dict[str, float]:
|
|
83
|
+
"""Compute basic signal statistics.
|
|
84
|
+
|
|
85
|
+
Args:
|
|
86
|
+
data: Signal data array.
|
|
111
87
|
|
|
112
|
-
|
|
88
|
+
Returns:
|
|
89
|
+
Dict with mean, std, min, max, amplitude statistics.
|
|
90
|
+
"""
|
|
113
91
|
mean_val = float(np.mean(data))
|
|
114
92
|
std_val = float(np.std(data))
|
|
115
93
|
min_val = float(np.min(data))
|
|
116
94
|
max_val = float(np.max(data))
|
|
117
95
|
amplitude = max_val - min_val
|
|
118
96
|
|
|
119
|
-
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
|
|
97
|
+
return {
|
|
98
|
+
"mean": mean_val,
|
|
99
|
+
"std": std_val,
|
|
100
|
+
"min": min_val,
|
|
101
|
+
"max": max_val,
|
|
102
|
+
"amplitude": amplitude,
|
|
103
|
+
}
|
|
104
|
+
|
|
123
105
|
|
|
124
|
-
|
|
106
|
+
def _is_dc_signal(stats: dict[str, float]) -> bool:
|
|
107
|
+
"""Check if signal is DC (very low variation).
|
|
108
|
+
|
|
109
|
+
Args:
|
|
110
|
+
stats: Signal statistics from _compute_signal_statistics.
|
|
111
|
+
|
|
112
|
+
Returns:
|
|
113
|
+
True if signal is DC.
|
|
114
|
+
"""
|
|
125
115
|
# Use coefficient of variation (CV) for DC detection
|
|
126
|
-
cv =
|
|
127
|
-
|
|
128
|
-
signal_type = "dc"
|
|
129
|
-
characteristics.append("constant")
|
|
130
|
-
confidence = 0.95
|
|
131
|
-
return {
|
|
132
|
-
"type": signal_type,
|
|
133
|
-
"signal_type": signal_type,
|
|
134
|
-
"is_digital": False,
|
|
135
|
-
"is_periodic": False,
|
|
136
|
-
"characteristics": characteristics,
|
|
137
|
-
"dc_component": True,
|
|
138
|
-
"frequency_estimate": None,
|
|
139
|
-
"dominant_frequency": None,
|
|
140
|
-
"snr_db": None,
|
|
141
|
-
"confidence": confidence,
|
|
142
|
-
"noise_level": std_val,
|
|
143
|
-
"levels": None,
|
|
144
|
-
}
|
|
116
|
+
cv = stats["std"] / (abs(stats["mean"]) + stats["amplitude"] / 2 + 1e-12)
|
|
117
|
+
return stats["amplitude"] < 1e-9 or cv < 0.005
|
|
145
118
|
|
|
146
|
-
# 2. Check for digital signal (bimodal distribution)
|
|
147
|
-
is_digital, digital_levels, digital_confidence = _detect_digital_signal(
|
|
148
|
-
data, digital_threshold_ratio
|
|
149
|
-
)
|
|
150
119
|
|
|
151
|
-
|
|
152
|
-
|
|
153
|
-
confidence = digital_confidence
|
|
154
|
-
characteristics.append("digital_levels")
|
|
120
|
+
def _create_dc_result(stats: dict[str, float]) -> dict[str, Any]:
|
|
121
|
+
"""Create result dict for DC signal.
|
|
155
122
|
|
|
156
|
-
|
|
157
|
-
|
|
123
|
+
Args:
|
|
124
|
+
stats: Signal statistics.
|
|
125
|
+
|
|
126
|
+
Returns:
|
|
127
|
+
Classification result dict for DC signal.
|
|
128
|
+
"""
|
|
129
|
+
return {
|
|
130
|
+
"type": "dc",
|
|
131
|
+
"signal_type": "dc",
|
|
132
|
+
"is_digital": False,
|
|
133
|
+
"is_periodic": False,
|
|
134
|
+
"characteristics": ["constant"],
|
|
135
|
+
"dc_component": True,
|
|
136
|
+
"frequency_estimate": None,
|
|
137
|
+
"dominant_frequency": None,
|
|
138
|
+
"snr_db": None,
|
|
139
|
+
"confidence": 0.95,
|
|
140
|
+
"noise_level": stats["std"],
|
|
141
|
+
"levels": None,
|
|
142
|
+
}
|
|
143
|
+
|
|
144
|
+
|
|
145
|
+
def _add_noise_characteristics(
|
|
146
|
+
characteristics: list[str],
|
|
147
|
+
noise_level: float,
|
|
148
|
+
amplitude: float,
|
|
149
|
+
) -> None:
|
|
150
|
+
"""Add noise-related characteristics to list.
|
|
151
|
+
|
|
152
|
+
Args:
|
|
153
|
+
characteristics: List to append characteristics to (modified in-place).
|
|
154
|
+
noise_level: Estimated noise level.
|
|
155
|
+
amplitude: Signal amplitude.
|
|
156
|
+
"""
|
|
158
157
|
noise_ratio = noise_level / (amplitude + 1e-12)
|
|
159
158
|
|
|
160
159
|
if noise_ratio < 0.05:
|
|
@@ -166,91 +165,195 @@ def classify_signal(
|
|
|
166
165
|
else:
|
|
167
166
|
characteristics.append("noisy")
|
|
168
167
|
|
|
169
|
-
|
|
168
|
+
|
|
169
|
+
def _classify_periodicity(
|
|
170
|
+
data: NDArray[np.floating[Any]],
|
|
171
|
+
sample_rate: float,
|
|
172
|
+
threshold: float,
|
|
173
|
+
is_digital: bool,
|
|
174
|
+
digital_levels: dict[str, float] | None,
|
|
175
|
+
n: int,
|
|
176
|
+
) -> tuple[bool, float | None, float]:
|
|
177
|
+
"""Classify signal periodicity using multiple detection methods.
|
|
178
|
+
|
|
179
|
+
Args:
|
|
180
|
+
data: Signal data array.
|
|
181
|
+
sample_rate: Sample rate in Hz.
|
|
182
|
+
threshold: Periodicity threshold for autocorrelation.
|
|
183
|
+
is_digital: Whether signal is digital.
|
|
184
|
+
digital_levels: Digital levels dict (if digital).
|
|
185
|
+
n: Data length.
|
|
186
|
+
|
|
187
|
+
Returns:
|
|
188
|
+
Tuple of (is_periodic, period_estimate, periodicity_score).
|
|
189
|
+
"""
|
|
190
|
+
# Try autocorrelation first
|
|
170
191
|
is_periodic, period_estimate, periodicity_score = _detect_periodicity(
|
|
171
|
-
data,
|
|
192
|
+
data, sample_rate, threshold
|
|
172
193
|
)
|
|
173
194
|
|
|
174
|
-
# For digital signals,
|
|
175
|
-
# This works better for signals with few periods
|
|
195
|
+
# For digital signals, try edge-based detection
|
|
176
196
|
if not is_periodic and is_digital:
|
|
177
197
|
edge_periodic, edge_period, edge_confidence = _detect_edge_periodicity(
|
|
178
|
-
data,
|
|
198
|
+
data, sample_rate, digital_levels
|
|
179
199
|
)
|
|
180
200
|
if edge_periodic:
|
|
181
|
-
|
|
182
|
-
period_estimate = edge_period
|
|
183
|
-
periodicity_score = edge_confidence
|
|
201
|
+
return edge_periodic, edge_period, edge_confidence
|
|
184
202
|
|
|
185
|
-
#
|
|
186
|
-
# FFT is more reliable for undersampled signals where autocorrelation may detect harmonics
|
|
203
|
+
# Try FFT-based detection for undersampled signals
|
|
187
204
|
if n >= 64:
|
|
188
|
-
fft_periodic, fft_period, fft_confidence = _detect_periodicity_fft(data,
|
|
205
|
+
fft_periodic, fft_period, fft_confidence = _detect_periodicity_fft(data, sample_rate)
|
|
189
206
|
if fft_periodic:
|
|
190
|
-
#
|
|
207
|
+
# Compare FFT and autocorrelation results
|
|
191
208
|
if is_periodic and period_estimate is not None:
|
|
192
|
-
#
|
|
193
|
-
|
|
194
|
-
|
|
195
|
-
|
|
196
|
-
freq_ratio = max(auto_freq, fft_freq) / (min(auto_freq, fft_freq) + 1e-12)
|
|
197
|
-
|
|
198
|
-
if freq_ratio > 1.2: # More than 20% difference
|
|
199
|
-
# Prefer higher frequency (more likely to be correct)
|
|
200
|
-
if fft_freq > auto_freq:
|
|
201
|
-
period_estimate = fft_period
|
|
202
|
-
periodicity_score = fft_confidence
|
|
209
|
+
# Reconcile conflicting frequency estimates
|
|
210
|
+
period_estimate, periodicity_score = _reconcile_period_estimates(
|
|
211
|
+
period_estimate, fft_period, fft_confidence
|
|
212
|
+
)
|
|
203
213
|
else:
|
|
204
214
|
# Only FFT detected periodicity
|
|
205
215
|
is_periodic = fft_periodic
|
|
206
216
|
period_estimate = fft_period
|
|
207
217
|
periodicity_score = fft_confidence
|
|
208
218
|
|
|
209
|
-
|
|
210
|
-
|
|
211
|
-
|
|
212
|
-
|
|
213
|
-
|
|
214
|
-
|
|
215
|
-
|
|
216
|
-
|
|
217
|
-
|
|
219
|
+
return is_periodic, period_estimate, periodicity_score
|
|
220
|
+
|
|
221
|
+
|
|
222
|
+
def _reconcile_period_estimates(
|
|
223
|
+
auto_period: float,
|
|
224
|
+
fft_period: float | None,
|
|
225
|
+
fft_confidence: float,
|
|
226
|
+
) -> tuple[float | None, float]:
|
|
227
|
+
"""Reconcile autocorrelation and FFT period estimates.
|
|
228
|
+
|
|
229
|
+
Args:
|
|
230
|
+
auto_period: Period from autocorrelation.
|
|
231
|
+
fft_period: Period from FFT.
|
|
232
|
+
fft_confidence: Confidence from FFT.
|
|
233
|
+
|
|
234
|
+
Returns:
|
|
235
|
+
Tuple of (reconciled period, confidence).
|
|
236
|
+
"""
|
|
237
|
+
if fft_period is None or fft_period <= 0:
|
|
238
|
+
return auto_period, fft_confidence
|
|
239
|
+
|
|
240
|
+
# Calculate frequency ratio
|
|
241
|
+
auto_freq = 1.0 / auto_period if auto_period > 0 else 0
|
|
242
|
+
fft_freq = 1.0 / fft_period if fft_period > 0 else 0
|
|
243
|
+
freq_ratio = max(auto_freq, fft_freq) / (min(auto_freq, fft_freq) + 1e-12)
|
|
244
|
+
|
|
245
|
+
# If frequencies differ >20%, prefer higher frequency
|
|
246
|
+
if freq_ratio > 1.2 and fft_freq > auto_freq:
|
|
247
|
+
return fft_period, fft_confidence
|
|
248
|
+
|
|
249
|
+
return auto_period, fft_confidence
|
|
250
|
+
|
|
218
251
|
|
|
219
|
-
|
|
220
|
-
|
|
252
|
+
def _add_transient_characteristics(
|
|
253
|
+
characteristics: list[str],
|
|
254
|
+
data: NDArray[np.floating[Any]],
|
|
255
|
+
stats: dict[str, float],
|
|
256
|
+
digital_levels: dict[str, float] | None,
|
|
257
|
+
) -> None:
|
|
258
|
+
"""Add pulsed/transient characteristics to list.
|
|
221
259
|
|
|
222
|
-
|
|
223
|
-
|
|
224
|
-
|
|
260
|
+
Args:
|
|
261
|
+
characteristics: List to append characteristics to (modified in-place).
|
|
262
|
+
data: Signal data array.
|
|
263
|
+
stats: Signal statistics.
|
|
264
|
+
digital_levels: Digital levels dict (if digital).
|
|
265
|
+
"""
|
|
266
|
+
edge_count = _count_edges(data, digital_levels)
|
|
267
|
+
samples_per_edge = len(data) / max(edge_count, 1)
|
|
225
268
|
|
|
226
269
|
if edge_count > 2 and samples_per_edge > 100:
|
|
227
270
|
characteristics.append("pulsed")
|
|
228
|
-
elif edge_count < 3 and amplitude >
|
|
271
|
+
elif edge_count < 3 and stats["amplitude"] > stats["std"] * 2:
|
|
229
272
|
characteristics.append("transient")
|
|
230
273
|
|
|
231
|
-
# 7. Check for mixed signal (both digital transitions and analog variation)
|
|
232
|
-
if is_digital and digital_levels is not None:
|
|
233
|
-
# Check if there's significant variation within digital levels
|
|
234
|
-
low_region = data[data < (digital_levels["low"] + digital_levels["high"]) / 2]
|
|
235
|
-
high_region = data[data >= (digital_levels["low"] + digital_levels["high"]) / 2]
|
|
236
|
-
|
|
237
|
-
if len(low_region) > 0 and len(high_region) > 0:
|
|
238
|
-
low_std = np.std(low_region)
|
|
239
|
-
high_std = np.std(high_region)
|
|
240
|
-
level_separation = digital_levels["high"] - digital_levels["low"]
|
|
241
|
-
|
|
242
|
-
if low_std > level_separation * 0.1 or high_std > level_separation * 0.1:
|
|
243
|
-
signal_type = "mixed"
|
|
244
|
-
characteristics.append("analog_variation")
|
|
245
|
-
|
|
246
|
-
# Calculate SNR estimate
|
|
247
|
-
snr_db = None
|
|
248
|
-
if amplitude > noise_level * 10:
|
|
249
|
-
signal_power = amplitude**2 / 8 # Approximate for most waveforms
|
|
250
|
-
noise_power = noise_level**2
|
|
251
|
-
if noise_power > 1e-20:
|
|
252
|
-
snr_db = 10 * np.log10(signal_power / noise_power)
|
|
253
274
|
|
|
275
|
+
def _detect_mixed_signal(
|
|
276
|
+
data: NDArray[np.floating[Any]],
|
|
277
|
+
digital_levels: dict[str, float],
|
|
278
|
+
) -> bool:
|
|
279
|
+
"""Check if signal is mixed (digital transitions + analog variation).
|
|
280
|
+
|
|
281
|
+
Args:
|
|
282
|
+
data: Signal data array.
|
|
283
|
+
digital_levels: Digital levels dict with "low" and "high" keys.
|
|
284
|
+
|
|
285
|
+
Returns:
|
|
286
|
+
True if signal appears mixed.
|
|
287
|
+
"""
|
|
288
|
+
threshold = (digital_levels["low"] + digital_levels["high"]) / 2
|
|
289
|
+
low_region = data[data < threshold]
|
|
290
|
+
high_region = data[data >= threshold]
|
|
291
|
+
|
|
292
|
+
if len(low_region) == 0 or len(high_region) == 0:
|
|
293
|
+
return False
|
|
294
|
+
|
|
295
|
+
low_std = np.std(low_region)
|
|
296
|
+
high_std = np.std(high_region)
|
|
297
|
+
level_separation = digital_levels["high"] - digital_levels["low"]
|
|
298
|
+
|
|
299
|
+
# Type narrowing: numpy comparison returns np.bool_
|
|
300
|
+
result: bool = bool(low_std > level_separation * 0.1 or high_std > level_separation * 0.1)
|
|
301
|
+
return result
|
|
302
|
+
|
|
303
|
+
|
|
304
|
+
def _compute_snr(amplitude: float, noise_level: float) -> float | None:
|
|
305
|
+
"""Compute signal-to-noise ratio.
|
|
306
|
+
|
|
307
|
+
Args:
|
|
308
|
+
amplitude: Signal amplitude.
|
|
309
|
+
noise_level: Noise level.
|
|
310
|
+
|
|
311
|
+
Returns:
|
|
312
|
+
SNR in dB or None if not calculable.
|
|
313
|
+
"""
|
|
314
|
+
if amplitude <= noise_level * 10:
|
|
315
|
+
return None
|
|
316
|
+
|
|
317
|
+
signal_power = amplitude**2 / 8 # Approximate for most waveforms
|
|
318
|
+
noise_power = noise_level**2
|
|
319
|
+
|
|
320
|
+
if noise_power <= 1e-20:
|
|
321
|
+
return None
|
|
322
|
+
|
|
323
|
+
# Type narrowing: numpy operations return numpy types
|
|
324
|
+
snr_db: float = float(10 * np.log10(signal_power / noise_power))
|
|
325
|
+
return snr_db
|
|
326
|
+
|
|
327
|
+
|
|
328
|
+
def _create_classification_result(
|
|
329
|
+
signal_type: str,
|
|
330
|
+
is_digital: bool,
|
|
331
|
+
is_periodic: bool,
|
|
332
|
+
characteristics: list[str],
|
|
333
|
+
dc_component: bool,
|
|
334
|
+
frequency_estimate: float | None,
|
|
335
|
+
snr_db: float | None,
|
|
336
|
+
confidence: float,
|
|
337
|
+
noise_level: float,
|
|
338
|
+
digital_levels: dict[str, float] | None,
|
|
339
|
+
) -> dict[str, Any]:
|
|
340
|
+
"""Create classification result dictionary.
|
|
341
|
+
|
|
342
|
+
Args:
|
|
343
|
+
signal_type: Signal type string.
|
|
344
|
+
is_digital: Whether signal is digital.
|
|
345
|
+
is_periodic: Whether signal is periodic.
|
|
346
|
+
characteristics: List of characteristic strings.
|
|
347
|
+
dc_component: Whether DC component is present.
|
|
348
|
+
frequency_estimate: Estimated frequency in Hz.
|
|
349
|
+
snr_db: SNR in dB.
|
|
350
|
+
confidence: Classification confidence.
|
|
351
|
+
noise_level: Noise level.
|
|
352
|
+
digital_levels: Digital levels dict (if digital).
|
|
353
|
+
|
|
354
|
+
Returns:
|
|
355
|
+
Classification result dictionary.
|
|
356
|
+
"""
|
|
254
357
|
return {
|
|
255
358
|
"type": signal_type,
|
|
256
359
|
"signal_type": signal_type,
|
|
@@ -267,6 +370,160 @@ def classify_signal(
|
|
|
267
370
|
}
|
|
268
371
|
|
|
269
372
|
|
|
373
|
+
# =============================================================================
|
|
374
|
+
# Public API Functions
|
|
375
|
+
# =============================================================================
|
|
376
|
+
|
|
377
|
+
|
|
378
|
+
def classify_signal(
|
|
379
|
+
trace: WaveformTrace | NDArray[np.floating[Any]],
|
|
380
|
+
sample_rate: float = 1.0,
|
|
381
|
+
*,
|
|
382
|
+
digital_threshold_ratio: float = 0.8,
|
|
383
|
+
dc_threshold_percent: float = 90.0,
|
|
384
|
+
periodicity_threshold: float = 0.7,
|
|
385
|
+
) -> dict[str, Any]:
|
|
386
|
+
"""Classify signal type and characteristics.
|
|
387
|
+
|
|
388
|
+
Automatically detects whether a signal is digital, analog, or mixed,
|
|
389
|
+
identifies key characteristics like periodicity and noise.
|
|
390
|
+
|
|
391
|
+
Args:
|
|
392
|
+
trace: Input waveform trace or numpy array to classify.
|
|
393
|
+
sample_rate: Sample rate in Hz (only used if trace is ndarray).
|
|
394
|
+
digital_threshold_ratio: Ratio for digital detection (0-1).
|
|
395
|
+
dc_threshold_percent: Percentage of DC for DC classification.
|
|
396
|
+
periodicity_threshold: Correlation threshold for periodic (0-1).
|
|
397
|
+
|
|
398
|
+
Returns:
|
|
399
|
+
Dictionary with signal_type, is_digital, is_periodic, characteristics,
|
|
400
|
+
frequency_estimate, snr_db, confidence, noise_level, levels.
|
|
401
|
+
|
|
402
|
+
Example:
|
|
403
|
+
>>> info = osc.classify_signal(trace)
|
|
404
|
+
>>> print(f"Type: {info['signal_type']}")
|
|
405
|
+
|
|
406
|
+
References:
|
|
407
|
+
IEEE 181-2011: Digital waveform characterization
|
|
408
|
+
"""
|
|
409
|
+
data, trace_sample_rate = _extract_signal_data(trace, sample_rate)
|
|
410
|
+
n = len(data)
|
|
411
|
+
|
|
412
|
+
if n < 10:
|
|
413
|
+
return _create_insufficient_data_result()
|
|
414
|
+
|
|
415
|
+
stats = _compute_signal_statistics(data)
|
|
416
|
+
|
|
417
|
+
if _is_dc_signal(stats):
|
|
418
|
+
return _create_dc_result(stats)
|
|
419
|
+
|
|
420
|
+
is_digital, digital_levels, confidence = _detect_digital_signal(data, digital_threshold_ratio)
|
|
421
|
+
signal_type = "digital" if is_digital else "analog"
|
|
422
|
+
|
|
423
|
+
characteristics = _build_characteristics(data, stats, is_digital, digital_levels)
|
|
424
|
+
|
|
425
|
+
is_periodic, frequency_estimate, periodicity_score = _analyze_periodicity(
|
|
426
|
+
data,
|
|
427
|
+
trace_sample_rate,
|
|
428
|
+
periodicity_threshold,
|
|
429
|
+
is_digital,
|
|
430
|
+
digital_levels,
|
|
431
|
+
n,
|
|
432
|
+
characteristics,
|
|
433
|
+
)
|
|
434
|
+
confidence = max(confidence, periodicity_score) if is_periodic else confidence
|
|
435
|
+
|
|
436
|
+
dc_component = abs(stats["mean"]) > (stats["amplitude"] * dc_threshold_percent / 100.0)
|
|
437
|
+
_add_transient_characteristics(
|
|
438
|
+
characteristics, data, stats, digital_levels if is_digital else None
|
|
439
|
+
)
|
|
440
|
+
|
|
441
|
+
if is_digital and digital_levels and _detect_mixed_signal(data, digital_levels):
|
|
442
|
+
signal_type = "mixed"
|
|
443
|
+
characteristics.append("analog_variation")
|
|
444
|
+
|
|
445
|
+
noise_level = _estimate_noise_level(data)
|
|
446
|
+
snr_db = _compute_snr(stats["amplitude"], noise_level)
|
|
447
|
+
|
|
448
|
+
return _create_classification_result(
|
|
449
|
+
signal_type,
|
|
450
|
+
is_digital,
|
|
451
|
+
is_periodic,
|
|
452
|
+
characteristics,
|
|
453
|
+
dc_component,
|
|
454
|
+
frequency_estimate,
|
|
455
|
+
snr_db,
|
|
456
|
+
confidence,
|
|
457
|
+
noise_level,
|
|
458
|
+
digital_levels,
|
|
459
|
+
)
|
|
460
|
+
|
|
461
|
+
|
|
462
|
+
def _build_characteristics(
|
|
463
|
+
data: NDArray[np.floating[Any]],
|
|
464
|
+
stats: dict[str, float],
|
|
465
|
+
is_digital: bool,
|
|
466
|
+
digital_levels: dict[str, float] | None,
|
|
467
|
+
) -> list[str]:
|
|
468
|
+
"""Build initial characteristics list.
|
|
469
|
+
|
|
470
|
+
Args:
|
|
471
|
+
data: Signal data.
|
|
472
|
+
stats: Signal statistics.
|
|
473
|
+
is_digital: Whether signal is digital.
|
|
474
|
+
digital_levels: Digital levels if applicable.
|
|
475
|
+
|
|
476
|
+
Returns:
|
|
477
|
+
List of characteristic strings.
|
|
478
|
+
"""
|
|
479
|
+
characteristics = []
|
|
480
|
+
if is_digital:
|
|
481
|
+
characteristics.append("digital_levels")
|
|
482
|
+
|
|
483
|
+
noise_level = _estimate_noise_level(data)
|
|
484
|
+
_add_noise_characteristics(characteristics, noise_level, stats["amplitude"])
|
|
485
|
+
return characteristics
|
|
486
|
+
|
|
487
|
+
|
|
488
|
+
def _analyze_periodicity(
|
|
489
|
+
data: NDArray[np.floating[Any]],
|
|
490
|
+
trace_sample_rate: float,
|
|
491
|
+
periodicity_threshold: float,
|
|
492
|
+
is_digital: bool,
|
|
493
|
+
digital_levels: dict[str, float] | None,
|
|
494
|
+
n: int,
|
|
495
|
+
characteristics: list[str],
|
|
496
|
+
) -> tuple[bool, float | None, float]:
|
|
497
|
+
"""Analyze signal periodicity.
|
|
498
|
+
|
|
499
|
+
Args:
|
|
500
|
+
data: Signal data.
|
|
501
|
+
trace_sample_rate: Sample rate.
|
|
502
|
+
periodicity_threshold: Detection threshold.
|
|
503
|
+
is_digital: Whether signal is digital.
|
|
504
|
+
digital_levels: Digital levels if applicable.
|
|
505
|
+
n: Data length.
|
|
506
|
+
characteristics: Characteristics list to update.
|
|
507
|
+
|
|
508
|
+
Returns:
|
|
509
|
+
Tuple of (is_periodic, frequency_estimate, periodicity_score).
|
|
510
|
+
"""
|
|
511
|
+
is_periodic, period_estimate, periodicity_score = _classify_periodicity(
|
|
512
|
+
data, trace_sample_rate, periodicity_threshold, is_digital, digital_levels, n
|
|
513
|
+
)
|
|
514
|
+
|
|
515
|
+
if is_periodic:
|
|
516
|
+
characteristics.append("periodic")
|
|
517
|
+
frequency_estimate = (
|
|
518
|
+
1.0 / period_estimate if period_estimate and period_estimate > 0 else None
|
|
519
|
+
)
|
|
520
|
+
else:
|
|
521
|
+
characteristics.append("aperiodic")
|
|
522
|
+
frequency_estimate = None
|
|
523
|
+
|
|
524
|
+
return is_periodic, frequency_estimate, periodicity_score
|
|
525
|
+
|
|
526
|
+
|
|
270
527
|
def assess_signal_quality(
|
|
271
528
|
trace: WaveformTrace,
|
|
272
529
|
) -> dict[str, Any]:
|
|
@@ -301,170 +558,557 @@ def assess_signal_quality(
|
|
|
301
558
|
"""
|
|
302
559
|
data = trace.data
|
|
303
560
|
n = len(data)
|
|
304
|
-
warnings = []
|
|
561
|
+
warnings: list[str] = []
|
|
305
562
|
|
|
306
563
|
if n < 10:
|
|
307
564
|
warnings.append("Insufficient data for quality assessment")
|
|
308
|
-
return
|
|
309
|
-
|
|
310
|
-
|
|
311
|
-
|
|
312
|
-
|
|
313
|
-
|
|
314
|
-
|
|
315
|
-
|
|
316
|
-
|
|
565
|
+
return _create_empty_quality_result(warnings)
|
|
566
|
+
|
|
567
|
+
stats = _calculate_basic_stats(data)
|
|
568
|
+
clipping, clipping_warnings = _detect_clipping(data, n, stats)
|
|
569
|
+
warnings.extend(clipping_warnings)
|
|
570
|
+
|
|
571
|
+
saturation, saturation_warning = _detect_saturation(data, trace, n)
|
|
572
|
+
if saturation_warning:
|
|
573
|
+
warnings.append(saturation_warning)
|
|
574
|
+
|
|
575
|
+
noise_level = _estimate_noise_level(data)
|
|
576
|
+
snr = _calculate_snr(stats, noise_level)
|
|
577
|
+
dynamic_range = _calculate_dynamic_range(stats)
|
|
578
|
+
crest_factor = _calculate_crest_factor(stats)
|
|
579
|
+
|
|
580
|
+
quantization_warning = _check_quantization(data, n, stats)
|
|
581
|
+
if quantization_warning:
|
|
582
|
+
warnings.append(quantization_warning)
|
|
583
|
+
|
|
584
|
+
sample_rate_warnings = _check_quality_sample_rate(trace)
|
|
585
|
+
warnings.extend(sample_rate_warnings)
|
|
586
|
+
|
|
587
|
+
return {
|
|
588
|
+
"snr": float(snr) if snr is not None else None,
|
|
589
|
+
"noise_level": float(noise_level),
|
|
590
|
+
"clipping": bool(clipping),
|
|
591
|
+
"saturation": bool(saturation),
|
|
592
|
+
"warnings": warnings,
|
|
593
|
+
"dynamic_range": float(dynamic_range) if dynamic_range is not None else None,
|
|
594
|
+
"crest_factor": float(crest_factor) if crest_factor is not None else None,
|
|
595
|
+
}
|
|
317
596
|
|
|
318
|
-
# Calculate statistics
|
|
319
|
-
min_val = float(np.min(data))
|
|
320
|
-
max_val = float(np.max(data))
|
|
321
|
-
mean_val = float(np.mean(data))
|
|
322
|
-
rms_val = float(np.sqrt(np.mean(data**2)))
|
|
323
|
-
amplitude = max_val - min_val
|
|
324
597
|
|
|
325
|
-
|
|
326
|
-
|
|
598
|
+
def _create_empty_quality_result(warnings: list[str]) -> dict[str, Any]:
|
|
599
|
+
"""Create quality result dict for insufficient data case.
|
|
600
|
+
|
|
601
|
+
Args:
|
|
602
|
+
warnings: List of warning messages.
|
|
603
|
+
|
|
604
|
+
Returns:
|
|
605
|
+
Quality result dictionary with null values.
|
|
606
|
+
"""
|
|
607
|
+
return {
|
|
608
|
+
"snr": None,
|
|
609
|
+
"noise_level": 0.0,
|
|
610
|
+
"clipping": False,
|
|
611
|
+
"saturation": False,
|
|
612
|
+
"warnings": warnings,
|
|
613
|
+
"dynamic_range": None,
|
|
614
|
+
"crest_factor": None,
|
|
615
|
+
}
|
|
616
|
+
|
|
617
|
+
|
|
618
|
+
def _calculate_basic_stats(data: NDArray[np.floating[Any]]) -> dict[str, float]:
|
|
619
|
+
"""Calculate basic signal statistics.
|
|
620
|
+
|
|
621
|
+
Args:
|
|
622
|
+
data: Signal data array.
|
|
623
|
+
|
|
624
|
+
Returns:
|
|
625
|
+
Dict with min, max, mean, rms, amplitude values.
|
|
626
|
+
"""
|
|
627
|
+
return {
|
|
628
|
+
"min": float(np.min(data)),
|
|
629
|
+
"max": float(np.max(data)),
|
|
630
|
+
"mean": float(np.mean(data)),
|
|
631
|
+
"rms": float(np.sqrt(np.mean(data**2))),
|
|
632
|
+
"amplitude": float(np.max(data) - np.min(data)),
|
|
633
|
+
}
|
|
634
|
+
|
|
635
|
+
|
|
636
|
+
def _detect_clipping(
|
|
637
|
+
data: NDArray[np.floating[Any]], n: int, stats: dict[str, float]
|
|
638
|
+
) -> tuple[bool, list[str]]:
|
|
639
|
+
"""Detect signal clipping at extremes.
|
|
640
|
+
|
|
641
|
+
Args:
|
|
642
|
+
data: Signal data array.
|
|
643
|
+
n: Number of samples.
|
|
644
|
+
stats: Basic statistics dict.
|
|
645
|
+
|
|
646
|
+
Returns:
|
|
647
|
+
Tuple of (clipping_detected, warning_messages).
|
|
648
|
+
"""
|
|
649
|
+
warnings: list[str] = []
|
|
650
|
+
amplitude = stats["amplitude"]
|
|
651
|
+
|
|
652
|
+
if amplitude <= 1e-9:
|
|
653
|
+
return False, warnings
|
|
654
|
+
|
|
655
|
+
tolerance = amplitude * 0.01
|
|
656
|
+
at_min = data <= (stats["min"] + tolerance)
|
|
657
|
+
at_max = data >= (stats["max"] - tolerance)
|
|
658
|
+
min_run_length = max(int(n * 0.15), 100)
|
|
659
|
+
|
|
660
|
+
max_min_run, max_max_run = _find_max_consecutive_runs(at_min, at_max, n)
|
|
661
|
+
|
|
327
662
|
clipping = False
|
|
328
|
-
if
|
|
329
|
-
|
|
330
|
-
|
|
331
|
-
|
|
332
|
-
|
|
333
|
-
|
|
334
|
-
|
|
335
|
-
# Check for long consecutive runs (clipping) vs brief peaks (natural waveform)
|
|
336
|
-
# For analog signals like sine waves, peaks naturally have ~5-10% of samples near extremes
|
|
337
|
-
# Real clipping typically shows >15-20% consecutive samples
|
|
338
|
-
# For digital signals, even short runs at extremes can indicate clipping
|
|
339
|
-
min_run_length = max(int(n * 0.15), 100) # 15% of data or 100 samples minimum
|
|
340
|
-
|
|
341
|
-
# Find maximum consecutive run lengths
|
|
342
|
-
max_min_run = 0
|
|
343
|
-
max_max_run = 0
|
|
344
|
-
|
|
345
|
-
current_min_run = 0
|
|
346
|
-
current_max_run = 0
|
|
347
|
-
|
|
348
|
-
for i in range(n):
|
|
349
|
-
if at_min[i]:
|
|
350
|
-
current_min_run += 1
|
|
351
|
-
max_min_run = max(max_min_run, current_min_run)
|
|
352
|
-
else:
|
|
353
|
-
current_min_run = 0
|
|
663
|
+
if max_min_run >= min_run_length:
|
|
664
|
+
clipping = True
|
|
665
|
+
warnings.append(f"Signal clipping detected at minimum ({max_min_run} consecutive samples)")
|
|
666
|
+
if max_max_run >= min_run_length:
|
|
667
|
+
clipping = True
|
|
668
|
+
warnings.append(f"Signal clipping detected at maximum ({max_max_run} consecutive samples)")
|
|
354
669
|
|
|
355
|
-
|
|
356
|
-
current_max_run += 1
|
|
357
|
-
max_max_run = max(max_max_run, current_max_run)
|
|
358
|
-
else:
|
|
359
|
-
current_max_run = 0
|
|
670
|
+
return clipping, warnings
|
|
360
671
|
|
|
361
|
-
# Clipping detected if we have long consecutive runs at extremes
|
|
362
|
-
if max_min_run >= min_run_length:
|
|
363
|
-
clipping = True
|
|
364
|
-
warnings.append(
|
|
365
|
-
f"Signal clipping detected at minimum ({max_min_run} consecutive samples)"
|
|
366
|
-
)
|
|
367
|
-
if max_max_run >= min_run_length:
|
|
368
|
-
clipping = True
|
|
369
|
-
warnings.append(
|
|
370
|
-
f"Signal clipping detected at maximum ({max_max_run} consecutive samples)"
|
|
371
|
-
)
|
|
372
672
|
|
|
373
|
-
|
|
374
|
-
|
|
375
|
-
|
|
673
|
+
def _find_max_consecutive_runs(
|
|
674
|
+
at_min: NDArray[np.bool_], at_max: NDArray[np.bool_], n: int
|
|
675
|
+
) -> tuple[int, int]:
|
|
676
|
+
"""Find maximum consecutive run lengths at min and max extremes.
|
|
677
|
+
|
|
678
|
+
Args:
|
|
679
|
+
at_min: Boolean array indicating samples at minimum.
|
|
680
|
+
at_max: Boolean array indicating samples at maximum.
|
|
681
|
+
n: Number of samples.
|
|
682
|
+
|
|
683
|
+
Returns:
|
|
684
|
+
Tuple of (max_min_run, max_max_run).
|
|
685
|
+
"""
|
|
686
|
+
max_min_run = 0
|
|
687
|
+
max_max_run = 0
|
|
688
|
+
current_min_run = 0
|
|
689
|
+
current_max_run = 0
|
|
690
|
+
|
|
691
|
+
for i in range(n):
|
|
692
|
+
if at_min[i]:
|
|
693
|
+
current_min_run += 1
|
|
694
|
+
max_min_run = max(max_min_run, current_min_run)
|
|
695
|
+
else:
|
|
696
|
+
current_min_run = 0
|
|
697
|
+
|
|
698
|
+
if at_max[i]:
|
|
699
|
+
current_max_run += 1
|
|
700
|
+
max_max_run = max(max_max_run, current_max_run)
|
|
701
|
+
else:
|
|
702
|
+
current_max_run = 0
|
|
703
|
+
|
|
704
|
+
return max_min_run, max_max_run
|
|
705
|
+
|
|
706
|
+
|
|
707
|
+
def _detect_saturation(
|
|
708
|
+
data: NDArray[np.floating[Any]], trace: WaveformTrace, n: int
|
|
709
|
+
) -> tuple[bool, str | None]:
|
|
710
|
+
"""Detect signal saturation (stuck at one level).
|
|
711
|
+
|
|
712
|
+
Args:
|
|
713
|
+
data: Signal data array.
|
|
714
|
+
trace: Waveform trace for classification.
|
|
715
|
+
n: Number of samples.
|
|
716
|
+
|
|
717
|
+
Returns:
|
|
718
|
+
Tuple of (saturation_detected, warning_message).
|
|
719
|
+
"""
|
|
376
720
|
unique_values = len(np.unique(data))
|
|
377
721
|
classification = classify_signal(trace)
|
|
378
722
|
|
|
379
|
-
# Different thresholds for digital vs analog signals
|
|
380
723
|
if classification["type"] == "digital":
|
|
381
|
-
# Digital signals should have 2+ levels; saturation is when stuck at 1 level
|
|
382
724
|
if unique_values < 2:
|
|
383
|
-
saturation
|
|
384
|
-
warnings.append(f"Signal saturation detected (only {unique_values} unique value)")
|
|
725
|
+
return True, f"Signal saturation detected (only {unique_values} unique value)"
|
|
385
726
|
else:
|
|
386
|
-
# Analog signals should have many unique values
|
|
387
727
|
if unique_values < max(10, n // 1000):
|
|
388
|
-
saturation
|
|
389
|
-
warnings.append(f"Signal saturation detected (only {unique_values} unique values)")
|
|
728
|
+
return True, f"Signal saturation detected (only {unique_values} unique values)"
|
|
390
729
|
|
|
391
|
-
|
|
392
|
-
noise_level = _estimate_noise_level(data)
|
|
730
|
+
return False, None
|
|
393
731
|
|
|
394
|
-
# 4. Calculate SNR
|
|
395
|
-
snr = None
|
|
396
|
-
if amplitude > noise_level * 10: # Only calculate if signal > noise
|
|
397
|
-
# Remove DC and calculate signal power
|
|
398
|
-
data_ac = data - mean_val
|
|
399
|
-
signal_power = np.mean(data_ac**2)
|
|
400
|
-
noise_power = noise_level**2
|
|
401
732
|
|
|
402
|
-
|
|
403
|
-
|
|
404
|
-
|
|
405
|
-
|
|
733
|
+
def _calculate_snr(stats: dict[str, float], noise_level: float) -> float | None:
|
|
734
|
+
"""Calculate signal-to-noise ratio.
|
|
735
|
+
|
|
736
|
+
Args:
|
|
737
|
+
stats: Basic statistics dict.
|
|
738
|
+
noise_level: Estimated noise level.
|
|
739
|
+
|
|
740
|
+
Returns:
|
|
741
|
+
SNR in dB or None if not calculable.
|
|
742
|
+
"""
|
|
743
|
+
amplitude = stats["amplitude"]
|
|
744
|
+
|
|
745
|
+
if amplitude <= noise_level * 10:
|
|
746
|
+
return None
|
|
747
|
+
|
|
748
|
+
signal_power = amplitude**2 / 8
|
|
749
|
+
noise_power = noise_level**2
|
|
750
|
+
|
|
751
|
+
if noise_power > 1e-20:
|
|
752
|
+
return float(10 * np.log10(signal_power / noise_power))
|
|
753
|
+
|
|
754
|
+
return float("inf")
|
|
755
|
+
|
|
756
|
+
|
|
757
|
+
def _calculate_dynamic_range(stats: dict[str, float]) -> float | None:
|
|
758
|
+
"""Calculate signal dynamic range.
|
|
759
|
+
|
|
760
|
+
Args:
|
|
761
|
+
stats: Basic statistics dict.
|
|
762
|
+
|
|
763
|
+
Returns:
|
|
764
|
+
Dynamic range in dB or None.
|
|
765
|
+
"""
|
|
766
|
+
min_val = stats["min"]
|
|
767
|
+
max_val = stats["max"]
|
|
406
768
|
|
|
407
|
-
# 5. Calculate dynamic range
|
|
408
|
-
dynamic_range = None
|
|
409
769
|
if min_val != 0 and max_val != 0 and max_val > 1e-20:
|
|
410
770
|
with np.errstate(invalid="ignore", divide="ignore"):
|
|
411
771
|
ratio = max_val / (abs(min_val) + 1e-20)
|
|
412
772
|
if ratio > 0 and np.isfinite(ratio):
|
|
413
|
-
|
|
773
|
+
return float(20 * np.log10(ratio))
|
|
774
|
+
|
|
775
|
+
return None
|
|
776
|
+
|
|
777
|
+
|
|
778
|
+
def _calculate_crest_factor(stats: dict[str, float]) -> float | None:
|
|
779
|
+
"""Calculate crest factor (peak-to-RMS ratio).
|
|
780
|
+
|
|
781
|
+
Args:
|
|
782
|
+
stats: Basic statistics dict.
|
|
414
783
|
|
|
415
|
-
|
|
416
|
-
|
|
784
|
+
Returns:
|
|
785
|
+
Crest factor or None.
|
|
786
|
+
"""
|
|
787
|
+
rms_val = stats["rms"]
|
|
417
788
|
if rms_val > 1e-12:
|
|
418
|
-
|
|
419
|
-
|
|
420
|
-
|
|
421
|
-
|
|
422
|
-
|
|
423
|
-
|
|
424
|
-
|
|
425
|
-
|
|
426
|
-
|
|
427
|
-
|
|
428
|
-
|
|
429
|
-
|
|
430
|
-
|
|
431
|
-
|
|
432
|
-
|
|
433
|
-
|
|
789
|
+
return float(max(abs(stats["max"]), abs(stats["min"])) / rms_val)
|
|
790
|
+
return None
|
|
791
|
+
|
|
792
|
+
|
|
793
|
+
def _check_quantization(
|
|
794
|
+
data: NDArray[np.floating[Any]], n: int, stats: dict[str, float]
|
|
795
|
+
) -> str | None:
|
|
796
|
+
"""Check for quantization issues.
|
|
797
|
+
|
|
798
|
+
Args:
|
|
799
|
+
data: Signal data array.
|
|
800
|
+
n: Number of samples.
|
|
801
|
+
stats: Basic statistics dict.
|
|
802
|
+
|
|
803
|
+
Returns:
|
|
804
|
+
Warning message or None.
|
|
805
|
+
"""
|
|
806
|
+
if n <= 100:
|
|
807
|
+
return None
|
|
808
|
+
|
|
809
|
+
sorted_data = np.sort(data)
|
|
810
|
+
diffs = np.diff(sorted_data)
|
|
811
|
+
diffs = diffs[diffs > 1e-15]
|
|
812
|
+
|
|
813
|
+
if len(diffs) > 10:
|
|
814
|
+
min_step = float(np.min(diffs))
|
|
815
|
+
amplitude = stats["amplitude"]
|
|
816
|
+
if amplitude / min_step < 256:
|
|
817
|
+
return f"Low resolution detected ({int(amplitude / min_step)} levels), may affect measurement accuracy"
|
|
818
|
+
|
|
819
|
+
return None
|
|
820
|
+
|
|
434
821
|
|
|
435
|
-
|
|
822
|
+
def _check_quality_sample_rate(trace: WaveformTrace) -> list[str]:
|
|
823
|
+
"""Check if sample rate is adequate for signal frequency in quality assessment.
|
|
824
|
+
|
|
825
|
+
Args:
|
|
826
|
+
trace: Waveform trace with metadata.
|
|
827
|
+
|
|
828
|
+
Returns:
|
|
829
|
+
List of warning messages.
|
|
830
|
+
"""
|
|
831
|
+
warnings: list[str] = []
|
|
436
832
|
classification = classify_signal(trace)
|
|
437
|
-
if classification["frequency_estimate"] is not None:
|
|
438
|
-
# Check if sample rate is at least 10x the detected frequency
|
|
439
|
-
nyquist_rate = 2 * classification["frequency_estimate"]
|
|
440
|
-
if trace.metadata.sample_rate < nyquist_rate * 5:
|
|
441
|
-
warnings.append(
|
|
442
|
-
f"Sample rate ({trace.metadata.sample_rate:.3e} Hz) may be "
|
|
443
|
-
f"insufficient for signal frequency ({classification['frequency_estimate']:.3e} Hz). "
|
|
444
|
-
"Recommend at least 10x oversampling"
|
|
445
|
-
)
|
|
446
833
|
|
|
447
|
-
|
|
448
|
-
|
|
449
|
-
|
|
450
|
-
|
|
451
|
-
|
|
452
|
-
|
|
453
|
-
f"Signal may be undersampled or frequency detection may be inaccurate. "
|
|
454
|
-
"Recommend at least 10 samples per period"
|
|
455
|
-
)
|
|
834
|
+
if classification["frequency_estimate"] is None:
|
|
835
|
+
return warnings
|
|
836
|
+
|
|
837
|
+
freq = classification["frequency_estimate"]
|
|
838
|
+
sample_rate = trace.metadata.sample_rate
|
|
839
|
+
nyquist_rate = 2 * freq
|
|
456
840
|
|
|
841
|
+
if sample_rate < nyquist_rate * 5:
|
|
842
|
+
warnings.append(
|
|
843
|
+
f"Sample rate ({sample_rate:.3e} Hz) may be insufficient for "
|
|
844
|
+
f"signal frequency ({freq:.3e} Hz). Recommend at least 10x oversampling"
|
|
845
|
+
)
|
|
846
|
+
|
|
847
|
+
samples_per_period = sample_rate / freq
|
|
848
|
+
if samples_per_period < 10 and "sample rate" not in "".join(warnings).lower():
|
|
849
|
+
warnings.append(
|
|
850
|
+
f"Very low oversampling detected ({samples_per_period:.1f} samples per period). "
|
|
851
|
+
"Signal may be undersampled or frequency detection may be inaccurate. "
|
|
852
|
+
"Recommend at least 10 samples per period"
|
|
853
|
+
)
|
|
854
|
+
|
|
855
|
+
return warnings
|
|
856
|
+
|
|
857
|
+
|
|
858
|
+
def _get_measurement_categories() -> dict[str, list[str]]:
|
|
859
|
+
"""Get categorized list of measurement types.
|
|
860
|
+
|
|
861
|
+
Returns:
|
|
862
|
+
Dictionary mapping category names to measurement lists.
|
|
863
|
+
"""
|
|
457
864
|
return {
|
|
458
|
-
"
|
|
459
|
-
"
|
|
460
|
-
"
|
|
461
|
-
"
|
|
462
|
-
"
|
|
463
|
-
"
|
|
464
|
-
"crest_factor": float(crest_factor) if crest_factor is not None else None,
|
|
865
|
+
"frequency": ["frequency", "period"],
|
|
866
|
+
"edge": ["rise_time", "fall_time"],
|
|
867
|
+
"amplitude": ["amplitude", "overshoot", "undershoot", "preshoot"],
|
|
868
|
+
"duty": ["duty_cycle", "pulse_width"],
|
|
869
|
+
"statistical": ["mean", "rms"],
|
|
870
|
+
"spectral": ["thd", "snr", "sinad", "enob", "sfdr", "fft", "psd"],
|
|
465
871
|
}
|
|
466
872
|
|
|
467
873
|
|
|
874
|
+
def _check_dc_signal_compatibility(
|
|
875
|
+
signal_type: str,
|
|
876
|
+
measurement_name: str,
|
|
877
|
+
categories: dict[str, list[str]],
|
|
878
|
+
state: dict[str, Any],
|
|
879
|
+
) -> None:
|
|
880
|
+
"""Check if measurement is compatible with DC signals.
|
|
881
|
+
|
|
882
|
+
Args:
|
|
883
|
+
signal_type: Type of signal (e.g., "dc", "digital").
|
|
884
|
+
measurement_name: Name of the measurement to check.
|
|
885
|
+
categories: Dict mapping category names to measurement lists.
|
|
886
|
+
state: Mutable dict with suitable, warnings, suggestions, expected_result.
|
|
887
|
+
"""
|
|
888
|
+
if signal_type != "dc":
|
|
889
|
+
return
|
|
890
|
+
|
|
891
|
+
if measurement_name in categories["frequency"]:
|
|
892
|
+
state["suitable"] = False
|
|
893
|
+
state["warnings"].append(f"{measurement_name} measurement not suitable for DC signal")
|
|
894
|
+
state["suggestions"].append("Use 'mean' or 'rms' measurements for DC signals")
|
|
895
|
+
state["expected_result"] = "nan"
|
|
896
|
+
elif measurement_name in categories["edge"]:
|
|
897
|
+
state["suitable"] = False
|
|
898
|
+
state["warnings"].append(f"{measurement_name} requires signal transitions")
|
|
899
|
+
state["suggestions"].append("Signal appears to be DC with no edges")
|
|
900
|
+
state["expected_result"] = "nan"
|
|
901
|
+
elif measurement_name in categories["duty"]:
|
|
902
|
+
state["suitable"] = False
|
|
903
|
+
state["warnings"].append(f"{measurement_name} requires periodic signal")
|
|
904
|
+
state["expected_result"] = "nan"
|
|
905
|
+
|
|
906
|
+
|
|
907
|
+
def _check_aperiodic_signal_compatibility(
|
|
908
|
+
characteristics: list[str],
|
|
909
|
+
measurement_name: str,
|
|
910
|
+
categories: dict[str, list[str]],
|
|
911
|
+
state: dict[str, Any],
|
|
912
|
+
) -> None:
|
|
913
|
+
"""Check if measurement is compatible with aperiodic signals.
|
|
914
|
+
|
|
915
|
+
Args:
|
|
916
|
+
characteristics: List of signal characteristics.
|
|
917
|
+
measurement_name: Name of the measurement to check.
|
|
918
|
+
categories: Dict mapping category names to measurement lists.
|
|
919
|
+
state: Mutable dict with suitable, warnings, suggestions, expected_result, confidence.
|
|
920
|
+
"""
|
|
921
|
+
if "aperiodic" not in characteristics:
|
|
922
|
+
return
|
|
923
|
+
|
|
924
|
+
periodic_measurements = categories["frequency"] + categories["duty"]
|
|
925
|
+
if measurement_name in periodic_measurements:
|
|
926
|
+
state["suitable"] = False
|
|
927
|
+
state["confidence"] = 0.6
|
|
928
|
+
state["warnings"].append(f"{measurement_name} requires periodic signal")
|
|
929
|
+
state["suggestions"].append("Signal does not appear periodic")
|
|
930
|
+
state["expected_result"] = "nan"
|
|
931
|
+
elif measurement_name in categories["spectral"]:
|
|
932
|
+
state["warnings"].append(
|
|
933
|
+
"Spectral measurements on aperiodic signals may not show clear peaks"
|
|
934
|
+
)
|
|
935
|
+
state["suggestions"].append("Consider time-domain or statistical analysis")
|
|
936
|
+
state["expected_result"] = "unreliable"
|
|
937
|
+
|
|
938
|
+
|
|
939
|
+
def _check_digital_signal_compatibility(
|
|
940
|
+
signal_type: str,
|
|
941
|
+
measurement_name: str,
|
|
942
|
+
categories: dict[str, list[str]],
|
|
943
|
+
state: dict[str, Any],
|
|
944
|
+
) -> None:
|
|
945
|
+
"""Check if measurement is compatible with digital signals.
|
|
946
|
+
|
|
947
|
+
Args:
|
|
948
|
+
signal_type: Type of signal.
|
|
949
|
+
measurement_name: Name of the measurement to check.
|
|
950
|
+
categories: Dict mapping category names to measurement lists.
|
|
951
|
+
state: Mutable dict with warnings, suggestions, expected_result, confidence.
|
|
952
|
+
"""
|
|
953
|
+
if signal_type != "digital":
|
|
954
|
+
return
|
|
955
|
+
|
|
956
|
+
if measurement_name in categories["amplitude"] and measurement_name != "amplitude":
|
|
957
|
+
state["warnings"].append(
|
|
958
|
+
f"{measurement_name} designed for analog signals with overshoot/ringing"
|
|
959
|
+
)
|
|
960
|
+
state["suggestions"].append("Digital signals may show zero overshoot/undershoot")
|
|
961
|
+
state["expected_result"] = "unreliable"
|
|
962
|
+
state["confidence"] = 0.5
|
|
963
|
+
|
|
964
|
+
|
|
965
|
+
def _check_edge_count_requirements(
|
|
966
|
+
trace: WaveformTrace,
|
|
967
|
+
measurement_name: str,
|
|
968
|
+
categories: dict[str, list[str]],
|
|
969
|
+
classification: dict[str, Any],
|
|
970
|
+
state: dict[str, Any],
|
|
971
|
+
) -> None:
|
|
972
|
+
"""Check if signal has sufficient edges for edge-based measurements.
|
|
973
|
+
|
|
974
|
+
Args:
|
|
975
|
+
trace: Input waveform trace.
|
|
976
|
+
measurement_name: Name of the measurement to check.
|
|
977
|
+
categories: Dict mapping category names to measurement lists.
|
|
978
|
+
classification: Signal classification info.
|
|
979
|
+
state: Mutable dict with suitable, warnings, suggestions, expected_result.
|
|
980
|
+
"""
|
|
981
|
+
edge_based = categories["edge"] + categories["duty"]
|
|
982
|
+
if measurement_name not in edge_based:
|
|
983
|
+
return
|
|
984
|
+
|
|
985
|
+
edge_count = _count_edges(trace.data, classification.get("levels"))
|
|
986
|
+
if edge_count < 2:
|
|
987
|
+
state["suitable"] = False
|
|
988
|
+
state["warnings"].append(f"{measurement_name} requires at least 2 signal edges")
|
|
989
|
+
state["suggestions"].append(f"Signal has only {edge_count} detected edge(s)")
|
|
990
|
+
state["expected_result"] = "nan"
|
|
991
|
+
|
|
992
|
+
|
|
993
|
+
def _check_quality_impacts(
|
|
994
|
+
quality: dict[str, Any],
|
|
995
|
+
measurement_name: str,
|
|
996
|
+
categories: dict[str, list[str]],
|
|
997
|
+
state: dict[str, Any],
|
|
998
|
+
) -> None:
|
|
999
|
+
"""Check how signal quality issues affect measurement suitability.
|
|
1000
|
+
|
|
1001
|
+
Args:
|
|
1002
|
+
quality: Signal quality assessment.
|
|
1003
|
+
measurement_name: Name of the measurement to check.
|
|
1004
|
+
categories: Dict mapping category names to measurement lists.
|
|
1005
|
+
state: Mutable dict with warnings, expected_result, confidence.
|
|
1006
|
+
"""
|
|
1007
|
+
affected_by_clipping = categories["edge"] + categories["amplitude"]
|
|
1008
|
+
|
|
1009
|
+
if quality["clipping"] and measurement_name in affected_by_clipping:
|
|
1010
|
+
state["warnings"].append("Signal clipping detected, may affect measurement accuracy")
|
|
1011
|
+
if state["expected_result"] != "nan":
|
|
1012
|
+
state["expected_result"] = "unreliable"
|
|
1013
|
+
state["confidence"] = min(state["confidence"], 0.6)
|
|
1014
|
+
|
|
1015
|
+
if quality["saturation"]:
|
|
1016
|
+
state["warnings"].append("Signal saturation detected, measurements may be unreliable")
|
|
1017
|
+
if state["expected_result"] != "nan":
|
|
1018
|
+
state["expected_result"] = "unreliable"
|
|
1019
|
+
state["confidence"] = min(state["confidence"], 0.5)
|
|
1020
|
+
|
|
1021
|
+
if quality["snr"] is not None and quality["snr"] < 20:
|
|
1022
|
+
if measurement_name in categories["edge"]:
|
|
1023
|
+
state["warnings"].append(
|
|
1024
|
+
f"Low SNR ({quality['snr']:.1f} dB) may affect edge timing measurements"
|
|
1025
|
+
)
|
|
1026
|
+
state["suggestions"].append("Consider filtering signal to improve SNR")
|
|
1027
|
+
state["confidence"] = min(state["confidence"], 0.7)
|
|
1028
|
+
|
|
1029
|
+
|
|
1030
|
+
def _check_sample_rate_adequacy(
|
|
1031
|
+
trace: WaveformTrace,
|
|
1032
|
+
measurement_name: str,
|
|
1033
|
+
categories: dict[str, list[str]],
|
|
1034
|
+
classification: dict[str, Any],
|
|
1035
|
+
state: dict[str, Any],
|
|
1036
|
+
) -> None:
|
|
1037
|
+
"""Check if sample rate is adequate for timing measurements.
|
|
1038
|
+
|
|
1039
|
+
Args:
|
|
1040
|
+
trace: Input waveform trace.
|
|
1041
|
+
measurement_name: Name of the measurement to check.
|
|
1042
|
+
categories: Dict mapping category names to measurement lists.
|
|
1043
|
+
classification: Signal classification info.
|
|
1044
|
+
state: Mutable dict with warnings, suggestions, expected_result, confidence.
|
|
1045
|
+
"""
|
|
1046
|
+
timing_measurements = categories["edge"] + categories["frequency"]
|
|
1047
|
+
if measurement_name not in timing_measurements:
|
|
1048
|
+
return
|
|
1049
|
+
|
|
1050
|
+
if classification["frequency_estimate"] is None:
|
|
1051
|
+
return
|
|
1052
|
+
|
|
1053
|
+
nyquist_rate = 2 * classification["frequency_estimate"]
|
|
1054
|
+
if trace.metadata.sample_rate < nyquist_rate * 5:
|
|
1055
|
+
state["warnings"].append("Sample rate may be too low for accurate timing measurements")
|
|
1056
|
+
state["suggestions"].append(
|
|
1057
|
+
f"Recommend sample rate > {nyquist_rate * 10:.3e} Hz (10x signal frequency)"
|
|
1058
|
+
)
|
|
1059
|
+
state["expected_result"] = "unreliable"
|
|
1060
|
+
state["confidence"] = min(state["confidence"], 0.6)
|
|
1061
|
+
|
|
1062
|
+
|
|
1063
|
+
def _check_data_length_adequacy(
|
|
1064
|
+
trace: WaveformTrace,
|
|
1065
|
+
measurement_name: str,
|
|
1066
|
+
categories: dict[str, list[str]],
|
|
1067
|
+
classification: dict[str, Any],
|
|
1068
|
+
state: dict[str, Any],
|
|
1069
|
+
) -> None:
|
|
1070
|
+
"""Check if signal length is adequate for the measurement.
|
|
1071
|
+
|
|
1072
|
+
Args:
|
|
1073
|
+
trace: Input waveform trace.
|
|
1074
|
+
measurement_name: Name of the measurement to check.
|
|
1075
|
+
categories: Dict mapping category names to measurement lists.
|
|
1076
|
+
classification: Signal classification info.
|
|
1077
|
+
state: Mutable dict with warnings, suggestions, expected_result, confidence.
|
|
1078
|
+
"""
|
|
1079
|
+
n = len(trace.data)
|
|
1080
|
+
|
|
1081
|
+
# Check spectral measurements
|
|
1082
|
+
if measurement_name in categories["spectral"]:
|
|
1083
|
+
if n < 256:
|
|
1084
|
+
state["warnings"].append(
|
|
1085
|
+
f"Signal length ({n} samples) may be too short for spectral analysis"
|
|
1086
|
+
)
|
|
1087
|
+
state["suggestions"].append(
|
|
1088
|
+
"Recommend at least 1024 samples for FFT-based measurements"
|
|
1089
|
+
)
|
|
1090
|
+
state["expected_result"] = "unreliable"
|
|
1091
|
+
state["confidence"] = min(state["confidence"], 0.5)
|
|
1092
|
+
|
|
1093
|
+
# Check frequency measurements
|
|
1094
|
+
if measurement_name in categories["frequency"]:
|
|
1095
|
+
if classification["frequency_estimate"] is not None:
|
|
1096
|
+
min_samples = trace.metadata.sample_rate / classification["frequency_estimate"]
|
|
1097
|
+
if n < min_samples * 0.5:
|
|
1098
|
+
state["warnings"].append(
|
|
1099
|
+
f"Signal length ({n} samples) captures < 0.5 periods, "
|
|
1100
|
+
"frequency measurement may fail"
|
|
1101
|
+
)
|
|
1102
|
+
state["suggestions"].append(
|
|
1103
|
+
"Capture at least 2 periods for reliable frequency measurement"
|
|
1104
|
+
)
|
|
1105
|
+
state["expected_result"] = "unreliable"
|
|
1106
|
+
state["confidence"] = min(state["confidence"], 0.5)
|
|
1107
|
+
elif n < min_samples * 2:
|
|
1108
|
+
state["suggestions"].append("Capture at least 10 periods for best accuracy")
|
|
1109
|
+
state["confidence"] = min(state["confidence"], 0.75)
|
|
1110
|
+
|
|
1111
|
+
|
|
468
1112
|
def check_measurement_suitability(
|
|
469
1113
|
trace: WaveformTrace,
|
|
470
1114
|
measurement_name: str,
|
|
@@ -499,142 +1143,35 @@ def check_measurement_suitability(
|
|
|
499
1143
|
classification = classify_signal(trace)
|
|
500
1144
|
quality = assess_signal_quality(trace)
|
|
501
1145
|
|
|
502
|
-
|
|
503
|
-
|
|
504
|
-
|
|
505
|
-
|
|
506
|
-
|
|
1146
|
+
state: dict[str, Any] = {
|
|
1147
|
+
"suitable": True,
|
|
1148
|
+
"confidence": 0.8,
|
|
1149
|
+
"expected_result": "valid",
|
|
1150
|
+
"warnings": [],
|
|
1151
|
+
"suggestions": [],
|
|
1152
|
+
}
|
|
507
1153
|
|
|
1154
|
+
categories = _get_measurement_categories()
|
|
508
1155
|
signal_type = classification["type"]
|
|
509
1156
|
characteristics = classification["characteristics"]
|
|
510
1157
|
|
|
511
|
-
#
|
|
512
|
-
|
|
513
|
-
|
|
514
|
-
|
|
515
|
-
|
|
516
|
-
|
|
517
|
-
|
|
518
|
-
|
|
519
|
-
|
|
520
|
-
|
|
521
|
-
|
|
522
|
-
suitable = False
|
|
523
|
-
warnings.append(f"{measurement_name} measurement not suitable for DC signal")
|
|
524
|
-
suggestions.append("Use 'mean' or 'rms' measurements for DC signals")
|
|
525
|
-
expected_result = "nan"
|
|
526
|
-
elif measurement_name in edge_measurements:
|
|
527
|
-
suitable = False
|
|
528
|
-
warnings.append(f"{measurement_name} requires signal transitions")
|
|
529
|
-
suggestions.append("Signal appears to be DC with no edges")
|
|
530
|
-
expected_result = "nan"
|
|
531
|
-
elif measurement_name in duty_measurements:
|
|
532
|
-
suitable = False
|
|
533
|
-
warnings.append(f"{measurement_name} requires periodic signal")
|
|
534
|
-
expected_result = "nan"
|
|
535
|
-
|
|
536
|
-
# Check aperiodic signals
|
|
537
|
-
if "aperiodic" in characteristics:
|
|
538
|
-
if measurement_name in frequency_measurements + duty_measurements:
|
|
539
|
-
suitable = False
|
|
540
|
-
confidence = 0.6
|
|
541
|
-
warnings.append(f"{measurement_name} requires periodic signal")
|
|
542
|
-
suggestions.append("Signal does not appear periodic")
|
|
543
|
-
expected_result = "nan"
|
|
544
|
-
elif measurement_name in spectral_measurements:
|
|
545
|
-
warnings.append("Spectral measurements on aperiodic signals may not show clear peaks")
|
|
546
|
-
suggestions.append("Consider time-domain or statistical analysis")
|
|
547
|
-
expected_result = "unreliable"
|
|
548
|
-
|
|
549
|
-
# Check digital vs analog
|
|
550
|
-
if signal_type == "digital":
|
|
551
|
-
if measurement_name in amplitude_measurements and measurement_name != "amplitude":
|
|
552
|
-
warnings.append(
|
|
553
|
-
f"{measurement_name} designed for analog signals with overshoot/ringing"
|
|
554
|
-
)
|
|
555
|
-
suggestions.append("Digital signals may show zero overshoot/undershoot")
|
|
556
|
-
expected_result = "unreliable"
|
|
557
|
-
confidence = 0.5
|
|
558
|
-
|
|
559
|
-
# Check for sufficient transitions
|
|
560
|
-
if measurement_name in edge_measurements + duty_measurements:
|
|
561
|
-
data = trace.data
|
|
562
|
-
edge_count = _count_edges(data, classification.get("levels"))
|
|
563
|
-
if edge_count < 2:
|
|
564
|
-
suitable = False
|
|
565
|
-
warnings.append(f"{measurement_name} requires at least 2 signal edges")
|
|
566
|
-
suggestions.append(f"Signal has only {edge_count} detected edge(s)")
|
|
567
|
-
expected_result = "nan"
|
|
568
|
-
|
|
569
|
-
# Check signal quality impacts
|
|
570
|
-
if quality["clipping"]:
|
|
571
|
-
if measurement_name in edge_measurements + amplitude_measurements:
|
|
572
|
-
warnings.append("Signal clipping detected, may affect measurement accuracy")
|
|
573
|
-
# Don't override "nan" - if measurement is fundamentally unsuitable, keep it as "nan"
|
|
574
|
-
if expected_result != "nan":
|
|
575
|
-
expected_result = "unreliable"
|
|
576
|
-
confidence = min(confidence, 0.6)
|
|
577
|
-
|
|
578
|
-
if quality["saturation"]:
|
|
579
|
-
warnings.append("Signal saturation detected, measurements may be unreliable")
|
|
580
|
-
# Don't override "nan" - if measurement is fundamentally unsuitable, keep it as "nan"
|
|
581
|
-
if expected_result != "nan":
|
|
582
|
-
expected_result = "unreliable"
|
|
583
|
-
confidence = min(confidence, 0.5)
|
|
584
|
-
|
|
585
|
-
if quality["snr"] is not None and quality["snr"] < 20:
|
|
586
|
-
if measurement_name in edge_measurements:
|
|
587
|
-
warnings.append(
|
|
588
|
-
f"Low SNR ({quality['snr']:.1f} dB) may affect edge timing measurements"
|
|
589
|
-
)
|
|
590
|
-
suggestions.append("Consider filtering signal to improve SNR")
|
|
591
|
-
confidence = min(confidence, 0.7)
|
|
592
|
-
|
|
593
|
-
# Check sample rate for timing measurements
|
|
594
|
-
if measurement_name in edge_measurements + frequency_measurements:
|
|
595
|
-
if classification["frequency_estimate"] is not None:
|
|
596
|
-
nyquist_rate = 2 * classification["frequency_estimate"]
|
|
597
|
-
if trace.metadata.sample_rate < nyquist_rate * 5:
|
|
598
|
-
warnings.append("Sample rate may be too low for accurate timing measurements")
|
|
599
|
-
suggestions.append(
|
|
600
|
-
f"Recommend sample rate > {nyquist_rate * 10:.3e} Hz (10x signal frequency)"
|
|
601
|
-
)
|
|
602
|
-
expected_result = "unreliable"
|
|
603
|
-
confidence = min(confidence, 0.6)
|
|
604
|
-
|
|
605
|
-
# Check data length
|
|
606
|
-
n = len(trace.data)
|
|
607
|
-
if measurement_name in spectral_measurements:
|
|
608
|
-
if n < 256:
|
|
609
|
-
warnings.append(f"Signal length ({n} samples) may be too short for spectral analysis")
|
|
610
|
-
suggestions.append("Recommend at least 1024 samples for FFT-based measurements")
|
|
611
|
-
expected_result = "unreliable"
|
|
612
|
-
confidence = min(confidence, 0.5)
|
|
613
|
-
|
|
614
|
-
if measurement_name in frequency_measurements:
|
|
615
|
-
if classification["frequency_estimate"] is not None:
|
|
616
|
-
min_samples = trace.metadata.sample_rate / classification["frequency_estimate"]
|
|
617
|
-
# Require at least 0.5 periods for basic detection
|
|
618
|
-
# Having 1+ complete periods is ideal, but FFT can work with less
|
|
619
|
-
if n < min_samples * 0.5:
|
|
620
|
-
warnings.append(
|
|
621
|
-
f"Signal length ({n} samples) captures < 0.5 periods, "
|
|
622
|
-
"frequency measurement may fail"
|
|
623
|
-
)
|
|
624
|
-
suggestions.append("Capture at least 2 periods for reliable frequency measurement")
|
|
625
|
-
expected_result = "unreliable"
|
|
626
|
-
confidence = min(confidence, 0.5)
|
|
627
|
-
elif n < min_samples * 2:
|
|
628
|
-
# Between 0.5 and 2 periods: usable but not ideal
|
|
629
|
-
suggestions.append("Capture at least 10 periods for best accuracy")
|
|
630
|
-
confidence = min(confidence, 0.75)
|
|
631
|
-
|
|
1158
|
+
# Run all compatibility checks
|
|
1159
|
+
_check_dc_signal_compatibility(signal_type, measurement_name, categories, state)
|
|
1160
|
+
_check_aperiodic_signal_compatibility(characteristics, measurement_name, categories, state)
|
|
1161
|
+
_check_digital_signal_compatibility(signal_type, measurement_name, categories, state)
|
|
1162
|
+
_check_edge_count_requirements(trace, measurement_name, categories, classification, state)
|
|
1163
|
+
_check_quality_impacts(quality, measurement_name, categories, state)
|
|
1164
|
+
_check_sample_rate_adequacy(trace, measurement_name, categories, classification, state)
|
|
1165
|
+
_check_data_length_adequacy(trace, measurement_name, categories, classification, state)
|
|
1166
|
+
|
|
1167
|
+
# Extract confidence (guaranteed to be float from initialization)
|
|
1168
|
+
confidence_value = float(state["confidence"])
|
|
632
1169
|
return {
|
|
633
|
-
"suitable": suitable,
|
|
634
|
-
"confidence":
|
|
635
|
-
"warnings": warnings,
|
|
636
|
-
"suggestions": suggestions,
|
|
637
|
-
"expected_result": expected_result,
|
|
1170
|
+
"suitable": state["suitable"],
|
|
1171
|
+
"confidence": confidence_value,
|
|
1172
|
+
"warnings": state["warnings"],
|
|
1173
|
+
"suggestions": state["suggestions"],
|
|
1174
|
+
"expected_result": state["expected_result"],
|
|
638
1175
|
}
|
|
639
1176
|
|
|
640
1177
|
|
|
@@ -678,9 +1215,40 @@ def suggest_measurements(
|
|
|
678
1215
|
signal_type = classification["type"]
|
|
679
1216
|
characteristics = classification["characteristics"]
|
|
680
1217
|
|
|
681
|
-
suggestions = []
|
|
1218
|
+
suggestions: list[dict[str, Any]] = []
|
|
1219
|
+
|
|
1220
|
+
# Core statistical measurements (always applicable)
|
|
1221
|
+
_add_statistical_suggestions(suggestions)
|
|
1222
|
+
|
|
1223
|
+
# Early return for DC signals
|
|
1224
|
+
if signal_type == "dc":
|
|
1225
|
+
_add_dc_signal_suggestion(suggestions)
|
|
1226
|
+
return sorted(suggestions, key=lambda x: cast("int", x["priority"]))[:max_suggestions]
|
|
1227
|
+
|
|
1228
|
+
# Add suggestions based on signal characteristics
|
|
1229
|
+
_add_amplitude_suggestion(suggestions, signal_type)
|
|
682
1230
|
|
|
683
|
-
|
|
1231
|
+
if "periodic" in characteristics:
|
|
1232
|
+
_add_periodic_suggestions(suggestions, classification)
|
|
1233
|
+
|
|
1234
|
+
if signal_type in ("digital", "mixed"):
|
|
1235
|
+
_add_digital_signal_suggestions(
|
|
1236
|
+
suggestions, trace, classification, quality, characteristics
|
|
1237
|
+
)
|
|
1238
|
+
|
|
1239
|
+
if signal_type in ("analog", "mixed"):
|
|
1240
|
+
_add_analog_signal_suggestions(suggestions, quality)
|
|
1241
|
+
|
|
1242
|
+
if "periodic" in characteristics and "clean" in characteristics:
|
|
1243
|
+
_add_spectral_suggestions(suggestions, trace)
|
|
1244
|
+
|
|
1245
|
+
# Sort by priority and limit
|
|
1246
|
+
suggestions = sorted(suggestions, key=lambda x: cast("int", x["priority"]))
|
|
1247
|
+
return suggestions[:max_suggestions]
|
|
1248
|
+
|
|
1249
|
+
|
|
1250
|
+
def _add_statistical_suggestions(suggestions: list[dict[str, Any]]) -> None:
|
|
1251
|
+
"""Add core statistical measurement suggestions."""
|
|
684
1252
|
suggestions.append(
|
|
685
1253
|
{
|
|
686
1254
|
"name": "mean",
|
|
@@ -701,21 +1269,22 @@ def suggest_measurements(
|
|
|
701
1269
|
}
|
|
702
1270
|
)
|
|
703
1271
|
|
|
704
|
-
# DC signals
|
|
705
|
-
if signal_type == "dc":
|
|
706
|
-
suggestions.append(
|
|
707
|
-
{
|
|
708
|
-
"name": "amplitude",
|
|
709
|
-
"category": "amplitude",
|
|
710
|
-
"priority": 3,
|
|
711
|
-
"rationale": "Measure noise/variation level in DC signal",
|
|
712
|
-
"confidence": 0.9,
|
|
713
|
-
}
|
|
714
|
-
)
|
|
715
|
-
# Don't suggest frequency, edges, etc.
|
|
716
|
-
return sorted(suggestions, key=lambda x: cast("int", x["priority"]))[:max_suggestions]
|
|
717
1272
|
|
|
718
|
-
|
|
1273
|
+
def _add_dc_signal_suggestion(suggestions: list[dict[str, Any]]) -> None:
|
|
1274
|
+
"""Add suggestion for DC signal noise measurement."""
|
|
1275
|
+
suggestions.append(
|
|
1276
|
+
{
|
|
1277
|
+
"name": "amplitude",
|
|
1278
|
+
"category": "amplitude",
|
|
1279
|
+
"priority": 3,
|
|
1280
|
+
"rationale": "Measure noise/variation level in DC signal",
|
|
1281
|
+
"confidence": 0.9,
|
|
1282
|
+
}
|
|
1283
|
+
)
|
|
1284
|
+
|
|
1285
|
+
|
|
1286
|
+
def _add_amplitude_suggestion(suggestions: list[dict[str, Any]], signal_type: str) -> None:
|
|
1287
|
+
"""Add general amplitude measurement suggestion."""
|
|
719
1288
|
suggestions.append(
|
|
720
1289
|
{
|
|
721
1290
|
"name": "amplitude",
|
|
@@ -726,124 +1295,137 @@ def suggest_measurements(
|
|
|
726
1295
|
}
|
|
727
1296
|
)
|
|
728
1297
|
|
|
729
|
-
|
|
730
|
-
|
|
1298
|
+
|
|
1299
|
+
def _add_periodic_suggestions(
|
|
1300
|
+
suggestions: list[dict[str, Any]], classification: dict[str, Any]
|
|
1301
|
+
) -> None:
|
|
1302
|
+
"""Add frequency/period suggestions for periodic signals."""
|
|
1303
|
+
suggestions.append(
|
|
1304
|
+
{
|
|
1305
|
+
"name": "frequency",
|
|
1306
|
+
"category": "timing",
|
|
1307
|
+
"priority": 4,
|
|
1308
|
+
"rationale": "Periodic signal detected, frequency measurement applicable",
|
|
1309
|
+
"confidence": classification["confidence"],
|
|
1310
|
+
}
|
|
1311
|
+
)
|
|
1312
|
+
|
|
1313
|
+
suggestions.append(
|
|
1314
|
+
{
|
|
1315
|
+
"name": "period",
|
|
1316
|
+
"category": "timing",
|
|
1317
|
+
"priority": 5,
|
|
1318
|
+
"rationale": "Period measurement for periodic signal",
|
|
1319
|
+
"confidence": classification["confidence"],
|
|
1320
|
+
}
|
|
1321
|
+
)
|
|
1322
|
+
|
|
1323
|
+
|
|
1324
|
+
def _add_digital_signal_suggestions(
|
|
1325
|
+
suggestions: list[dict[str, Any]],
|
|
1326
|
+
trace: WaveformTrace,
|
|
1327
|
+
classification: dict[str, Any],
|
|
1328
|
+
quality: dict[str, Any],
|
|
1329
|
+
characteristics: list[str],
|
|
1330
|
+
) -> None:
|
|
1331
|
+
"""Add edge timing and pulse measurement suggestions for digital signals."""
|
|
1332
|
+
edge_count = _count_edges(trace.data, classification.get("levels"))
|
|
1333
|
+
|
|
1334
|
+
if edge_count >= 2:
|
|
1335
|
+
snr_conf = 0.9 if quality["snr"] and quality["snr"] > 20 else 0.7
|
|
1336
|
+
|
|
1337
|
+
suggestions.append(
|
|
1338
|
+
{
|
|
1339
|
+
"name": "rise_time",
|
|
1340
|
+
"category": "timing",
|
|
1341
|
+
"priority": 6,
|
|
1342
|
+
"rationale": f"Digital edges detected ({edge_count} edges)",
|
|
1343
|
+
"confidence": snr_conf,
|
|
1344
|
+
}
|
|
1345
|
+
)
|
|
1346
|
+
|
|
1347
|
+
suggestions.append(
|
|
1348
|
+
{
|
|
1349
|
+
"name": "fall_time",
|
|
1350
|
+
"category": "timing",
|
|
1351
|
+
"priority": 7,
|
|
1352
|
+
"rationale": f"Digital edges detected ({edge_count} edges)",
|
|
1353
|
+
"confidence": snr_conf,
|
|
1354
|
+
}
|
|
1355
|
+
)
|
|
1356
|
+
|
|
1357
|
+
if "periodic" in characteristics and edge_count >= 2:
|
|
1358
|
+
duty_conf = 0.85 if edge_count >= 4 else 0.75
|
|
1359
|
+
|
|
1360
|
+
suggestions.append(
|
|
1361
|
+
{
|
|
1362
|
+
"name": "duty_cycle",
|
|
1363
|
+
"category": "timing",
|
|
1364
|
+
"priority": 8,
|
|
1365
|
+
"rationale": "Periodic pulse train detected",
|
|
1366
|
+
"confidence": duty_conf,
|
|
1367
|
+
}
|
|
1368
|
+
)
|
|
1369
|
+
|
|
1370
|
+
suggestions.append(
|
|
1371
|
+
{
|
|
1372
|
+
"name": "pulse_width",
|
|
1373
|
+
"category": "timing",
|
|
1374
|
+
"priority": 9,
|
|
1375
|
+
"rationale": "Pulse measurements suitable for periodic digital signal",
|
|
1376
|
+
"confidence": duty_conf,
|
|
1377
|
+
}
|
|
1378
|
+
)
|
|
1379
|
+
|
|
1380
|
+
|
|
1381
|
+
def _add_analog_signal_suggestions(
|
|
1382
|
+
suggestions: list[dict[str, Any]], quality: dict[str, Any]
|
|
1383
|
+
) -> None:
|
|
1384
|
+
"""Add overshoot/undershoot suggestions for analog signals."""
|
|
1385
|
+
if not quality["clipping"]:
|
|
731
1386
|
suggestions.append(
|
|
732
1387
|
{
|
|
733
|
-
"name": "
|
|
734
|
-
"category": "
|
|
735
|
-
"priority":
|
|
736
|
-
"rationale": "
|
|
737
|
-
"confidence":
|
|
1388
|
+
"name": "overshoot",
|
|
1389
|
+
"category": "amplitude",
|
|
1390
|
+
"priority": 10,
|
|
1391
|
+
"rationale": "Analog signal, overshoot measurement applicable",
|
|
1392
|
+
"confidence": 0.8,
|
|
738
1393
|
}
|
|
739
1394
|
)
|
|
740
1395
|
|
|
741
1396
|
suggestions.append(
|
|
742
1397
|
{
|
|
743
|
-
"name": "
|
|
744
|
-
"category": "
|
|
745
|
-
"priority":
|
|
746
|
-
"rationale": "
|
|
747
|
-
"confidence":
|
|
1398
|
+
"name": "undershoot",
|
|
1399
|
+
"category": "amplitude",
|
|
1400
|
+
"priority": 11,
|
|
1401
|
+
"rationale": "Analog signal, undershoot measurement applicable",
|
|
1402
|
+
"confidence": 0.8,
|
|
748
1403
|
}
|
|
749
1404
|
)
|
|
750
1405
|
|
|
751
|
-
# Digital signals with edges
|
|
752
|
-
if signal_type in ("digital", "mixed"):
|
|
753
|
-
edge_count = _count_edges(trace.data, classification.get("levels"))
|
|
754
|
-
|
|
755
|
-
if edge_count >= 2:
|
|
756
|
-
suggestions.append(
|
|
757
|
-
{
|
|
758
|
-
"name": "rise_time",
|
|
759
|
-
"category": "timing",
|
|
760
|
-
"priority": 6,
|
|
761
|
-
"rationale": f"Digital edges detected ({edge_count} edges)",
|
|
762
|
-
"confidence": 0.9 if quality["snr"] and quality["snr"] > 20 else 0.7,
|
|
763
|
-
}
|
|
764
|
-
)
|
|
765
|
-
|
|
766
|
-
suggestions.append(
|
|
767
|
-
{
|
|
768
|
-
"name": "fall_time",
|
|
769
|
-
"category": "timing",
|
|
770
|
-
"priority": 7,
|
|
771
|
-
"rationale": f"Digital edges detected ({edge_count} edges)",
|
|
772
|
-
"confidence": 0.9 if quality["snr"] and quality["snr"] > 20 else 0.7,
|
|
773
|
-
}
|
|
774
|
-
)
|
|
775
|
-
|
|
776
|
-
if "periodic" in characteristics and edge_count >= 2:
|
|
777
|
-
# Need at least 2 edges (1 complete cycle) for duty cycle
|
|
778
|
-
suggestions.append(
|
|
779
|
-
{
|
|
780
|
-
"name": "duty_cycle",
|
|
781
|
-
"category": "timing",
|
|
782
|
-
"priority": 8,
|
|
783
|
-
"rationale": "Periodic pulse train detected",
|
|
784
|
-
"confidence": 0.85 if edge_count >= 4 else 0.75,
|
|
785
|
-
}
|
|
786
|
-
)
|
|
787
|
-
|
|
788
|
-
suggestions.append(
|
|
789
|
-
{
|
|
790
|
-
"name": "pulse_width",
|
|
791
|
-
"category": "timing",
|
|
792
|
-
"priority": 9,
|
|
793
|
-
"rationale": "Pulse measurements suitable for periodic digital signal",
|
|
794
|
-
"confidence": 0.85 if edge_count >= 4 else 0.75,
|
|
795
|
-
}
|
|
796
|
-
)
|
|
797
|
-
|
|
798
|
-
# Analog signals
|
|
799
|
-
if signal_type in ("analog", "mixed"):
|
|
800
|
-
if not quality["clipping"]:
|
|
801
|
-
suggestions.append(
|
|
802
|
-
{
|
|
803
|
-
"name": "overshoot",
|
|
804
|
-
"category": "amplitude",
|
|
805
|
-
"priority": 10,
|
|
806
|
-
"rationale": "Analog signal, overshoot measurement applicable",
|
|
807
|
-
"confidence": 0.8,
|
|
808
|
-
}
|
|
809
|
-
)
|
|
810
|
-
|
|
811
|
-
suggestions.append(
|
|
812
|
-
{
|
|
813
|
-
"name": "undershoot",
|
|
814
|
-
"category": "amplitude",
|
|
815
|
-
"priority": 11,
|
|
816
|
-
"rationale": "Analog signal, undershoot measurement applicable",
|
|
817
|
-
"confidence": 0.8,
|
|
818
|
-
}
|
|
819
|
-
)
|
|
820
|
-
|
|
821
|
-
# Spectral measurements for clean, periodic signals
|
|
822
|
-
if "periodic" in characteristics and "clean" in characteristics:
|
|
823
|
-
if len(trace.data) >= 256:
|
|
824
|
-
suggestions.append(
|
|
825
|
-
{
|
|
826
|
-
"name": "thd",
|
|
827
|
-
"category": "spectral",
|
|
828
|
-
"priority": 12,
|
|
829
|
-
"rationale": "Clean periodic signal suitable for harmonic analysis",
|
|
830
|
-
"confidence": 0.85,
|
|
831
|
-
}
|
|
832
|
-
)
|
|
833
1406
|
|
|
834
|
-
|
|
835
|
-
|
|
836
|
-
|
|
837
|
-
|
|
838
|
-
|
|
839
|
-
|
|
840
|
-
|
|
841
|
-
|
|
842
|
-
|
|
1407
|
+
def _add_spectral_suggestions(suggestions: list[dict[str, Any]], trace: WaveformTrace) -> None:
|
|
1408
|
+
"""Add spectral analysis suggestions for clean periodic signals."""
|
|
1409
|
+
if len(trace.data) >= 256:
|
|
1410
|
+
suggestions.append(
|
|
1411
|
+
{
|
|
1412
|
+
"name": "thd",
|
|
1413
|
+
"category": "spectral",
|
|
1414
|
+
"priority": 12,
|
|
1415
|
+
"rationale": "Clean periodic signal suitable for harmonic analysis",
|
|
1416
|
+
"confidence": 0.85,
|
|
1417
|
+
}
|
|
1418
|
+
)
|
|
843
1419
|
|
|
844
|
-
|
|
845
|
-
|
|
846
|
-
|
|
1420
|
+
suggestions.append(
|
|
1421
|
+
{
|
|
1422
|
+
"name": "snr",
|
|
1423
|
+
"category": "spectral",
|
|
1424
|
+
"priority": 13,
|
|
1425
|
+
"rationale": "Spectral SNR measurement for signal quality",
|
|
1426
|
+
"confidence": 0.8,
|
|
1427
|
+
}
|
|
1428
|
+
)
|
|
847
1429
|
|
|
848
1430
|
|
|
849
1431
|
# =============================================================================
|
|
@@ -1138,85 +1720,126 @@ def _detect_edge_periodicity(
|
|
|
1138
1720
|
if len(data) < 10 or levels is None:
|
|
1139
1721
|
return False, None, 0.0
|
|
1140
1722
|
|
|
1141
|
-
|
|
1723
|
+
intervals = _extract_edge_intervals(data, levels)
|
|
1724
|
+
if intervals is None or len(intervals) < 1:
|
|
1725
|
+
return False, None, 0.0
|
|
1726
|
+
|
|
1727
|
+
mean_interval_raw = np.mean(intervals)
|
|
1728
|
+
mean_interval: float = float(mean_interval_raw)
|
|
1729
|
+
if mean_interval < 1:
|
|
1730
|
+
return False, None, 0.0
|
|
1731
|
+
|
|
1732
|
+
return _analyze_interval_pattern(intervals, mean_interval, sample_rate, len(data))
|
|
1733
|
+
|
|
1142
1734
|
|
|
1143
|
-
|
|
1735
|
+
def _extract_edge_intervals(
|
|
1736
|
+
data: NDArray[np.floating[Any]], levels: dict[str, float]
|
|
1737
|
+
) -> NDArray[np.intp] | None:
|
|
1738
|
+
"""Extract intervals between edges.
|
|
1739
|
+
|
|
1740
|
+
Args:
|
|
1741
|
+
data: Signal data array.
|
|
1742
|
+
levels: Digital levels dict.
|
|
1743
|
+
|
|
1744
|
+
Returns:
|
|
1745
|
+
Array of edge intervals or None if insufficient edges.
|
|
1746
|
+
"""
|
|
1747
|
+
threshold = (levels["low"] + levels["high"]) / 2
|
|
1144
1748
|
above = data > threshold
|
|
1145
1749
|
crossings = np.diff(above.astype(int))
|
|
1146
1750
|
edge_positions = np.where(crossings != 0)[0]
|
|
1147
1751
|
|
|
1148
1752
|
if len(edge_positions) < 2:
|
|
1149
|
-
|
|
1150
|
-
return False, None, 0.0
|
|
1753
|
+
return None
|
|
1151
1754
|
|
|
1152
|
-
|
|
1153
|
-
intervals = np.diff(edge_positions)
|
|
1755
|
+
return np.diff(edge_positions)
|
|
1154
1756
|
|
|
1155
|
-
if len(intervals) < 1:
|
|
1156
|
-
return False, None, 0.0
|
|
1157
|
-
|
|
1158
|
-
# For a periodic signal, intervals should form a repeating pattern
|
|
1159
|
-
# For a square wave: intervals alternate between high-time and low-time
|
|
1160
|
-
# Check if intervals show regular pattern
|
|
1161
1757
|
|
|
1162
|
-
|
|
1163
|
-
mean_interval
|
|
1164
|
-
|
|
1758
|
+
def _analyze_interval_pattern(
|
|
1759
|
+
intervals: NDArray[np.intp], mean_interval: float, sample_rate: float, n_samples: int
|
|
1760
|
+
) -> tuple[bool, float | None, float]:
|
|
1761
|
+
"""Analyze interval pattern to detect periodicity.
|
|
1165
1762
|
|
|
1166
|
-
|
|
1167
|
-
|
|
1763
|
+
Args:
|
|
1764
|
+
intervals: Edge intervals array.
|
|
1765
|
+
mean_interval: Mean interval value.
|
|
1766
|
+
sample_rate: Sampling rate in Hz.
|
|
1767
|
+
n_samples: Total number of samples.
|
|
1168
1768
|
|
|
1769
|
+
Returns:
|
|
1770
|
+
Tuple of (is_periodic, period_seconds, confidence).
|
|
1771
|
+
"""
|
|
1772
|
+
std_interval = np.std(intervals)
|
|
1169
1773
|
cv = std_interval / mean_interval
|
|
1170
1774
|
|
|
1171
|
-
# Special case:
|
|
1775
|
+
# Special case: single interval
|
|
1172
1776
|
if len(intervals) == 1:
|
|
1173
|
-
# This represents half a period for a square wave
|
|
1174
1777
|
period_samples = 2 * intervals[0]
|
|
1175
1778
|
period_seconds = period_samples / sample_rate
|
|
1176
|
-
# Lower confidence since we only have half a period
|
|
1177
1779
|
return True, period_seconds, 0.7
|
|
1178
1780
|
|
|
1179
|
-
#
|
|
1781
|
+
# High variation - check for alternating pattern
|
|
1180
1782
|
if cv > 0.3:
|
|
1181
|
-
|
|
1182
|
-
|
|
1183
|
-
|
|
1184
|
-
|
|
1185
|
-
|
|
1186
|
-
|
|
1187
|
-
|
|
1188
|
-
|
|
1189
|
-
|
|
1190
|
-
|
|
1191
|
-
|
|
1192
|
-
|
|
1193
|
-
|
|
1194
|
-
|
|
1195
|
-
|
|
1196
|
-
|
|
1197
|
-
|
|
1198
|
-
|
|
1199
|
-
|
|
1783
|
+
return _check_alternating_pattern(intervals, sample_rate)
|
|
1784
|
+
|
|
1785
|
+
# Regular intervals - estimate period
|
|
1786
|
+
cv_float: float = float(cv)
|
|
1787
|
+
return _estimate_regular_period(mean_interval, cv_float, sample_rate, n_samples)
|
|
1788
|
+
|
|
1789
|
+
|
|
1790
|
+
def _check_alternating_pattern(
|
|
1791
|
+
intervals: NDArray[np.intp], sample_rate: float
|
|
1792
|
+
) -> tuple[bool, float | None, float]:
|
|
1793
|
+
"""Check if intervals follow alternating pattern (square wave).
|
|
1794
|
+
|
|
1795
|
+
Args:
|
|
1796
|
+
intervals: Edge intervals array.
|
|
1797
|
+
sample_rate: Sampling rate in Hz.
|
|
1798
|
+
|
|
1799
|
+
Returns:
|
|
1800
|
+
Tuple of (is_periodic, period_seconds, confidence).
|
|
1801
|
+
"""
|
|
1802
|
+
if len(intervals) >= 4:
|
|
1803
|
+
odd_intervals = intervals[::2]
|
|
1804
|
+
even_intervals = intervals[1::2]
|
|
1805
|
+
|
|
1806
|
+
odd_cv = np.std(odd_intervals) / (np.mean(odd_intervals) + 1e-12)
|
|
1807
|
+
even_cv = np.std(even_intervals) / (np.mean(even_intervals) + 1e-12)
|
|
1808
|
+
|
|
1809
|
+
if odd_cv < 0.2 and even_cv < 0.2:
|
|
1810
|
+
period_samples = np.mean(odd_intervals) + np.mean(even_intervals)
|
|
1200
1811
|
period_seconds = period_samples / sample_rate
|
|
1201
|
-
|
|
1202
|
-
return True, period_seconds,
|
|
1812
|
+
confidence = 1.0 - max(odd_cv, even_cv)
|
|
1813
|
+
return True, period_seconds, float(confidence)
|
|
1203
1814
|
|
|
1204
|
-
|
|
1815
|
+
elif len(intervals) == 2:
|
|
1816
|
+
period_samples = intervals[0] + intervals[1]
|
|
1817
|
+
period_seconds = period_samples / sample_rate
|
|
1818
|
+
return True, period_seconds, 0.75
|
|
1819
|
+
|
|
1820
|
+
return False, None, 0.0
|
|
1821
|
+
|
|
1822
|
+
|
|
1823
|
+
def _estimate_regular_period(
|
|
1824
|
+
mean_interval: float, cv: float, sample_rate: float, n_samples: int
|
|
1825
|
+
) -> tuple[bool, float | None, float]:
|
|
1826
|
+
"""Estimate period from regular intervals.
|
|
1205
1827
|
|
|
1206
|
-
|
|
1207
|
-
|
|
1208
|
-
|
|
1828
|
+
Args:
|
|
1829
|
+
mean_interval: Mean interval between edges.
|
|
1830
|
+
cv: Coefficient of variation.
|
|
1831
|
+
sample_rate: Sampling rate in Hz.
|
|
1832
|
+
n_samples: Total number of samples.
|
|
1209
1833
|
|
|
1210
|
-
|
|
1211
|
-
|
|
1212
|
-
|
|
1213
|
-
period_samples = 2 * mean_interval
|
|
1214
|
-
num_periods =
|
|
1834
|
+
Returns:
|
|
1835
|
+
Tuple of (is_periodic, period_seconds, confidence).
|
|
1836
|
+
"""
|
|
1837
|
+
period_samples = 2 * mean_interval
|
|
1838
|
+
num_periods = n_samples / period_samples
|
|
1215
1839
|
|
|
1216
|
-
|
|
1217
|
-
if num_periods >= 0.5: # Allow detection with half a period
|
|
1840
|
+
if num_periods >= 0.5:
|
|
1218
1841
|
period_seconds = period_samples / sample_rate
|
|
1219
|
-
confidence = 1.0 - min(cv / 0.3, 0.5)
|
|
1842
|
+
confidence = 1.0 - min(cv / 0.3, 0.5)
|
|
1220
1843
|
return True, period_seconds, float(confidence)
|
|
1221
1844
|
|
|
1222
1845
|
return False, None, 0.0
|
|
@@ -1243,57 +1866,18 @@ class AnalysisRecommendation:
|
|
|
1243
1866
|
prerequisites_met: bool = True
|
|
1244
1867
|
|
|
1245
1868
|
|
|
1246
|
-
def
|
|
1247
|
-
|
|
1248
|
-
|
|
1249
|
-
|
|
1250
|
-
|
|
1251
|
-
confidence_target: float = 0.7,
|
|
1252
|
-
exclude_domains: list[AnalysisDomain] | None = None,
|
|
1253
|
-
) -> list[AnalysisRecommendation]:
|
|
1254
|
-
"""Recommend which analyses to run based on signal characteristics.
|
|
1255
|
-
|
|
1256
|
-
Uses signal classification, quality metrics, and heuristics to
|
|
1257
|
-
recommend the most valuable analyses for a given signal.
|
|
1869
|
+
def _add_foundational_recommendations(
|
|
1870
|
+
recommendations: list[AnalysisRecommendation],
|
|
1871
|
+
exclude: set[AnalysisDomain],
|
|
1872
|
+
) -> None:
|
|
1873
|
+
"""Add foundational analysis recommendations (waveform, statistics).
|
|
1258
1874
|
|
|
1259
1875
|
Args:
|
|
1260
|
-
|
|
1261
|
-
|
|
1262
|
-
time_budget_seconds: Optional time budget (prioritizes faster analyses).
|
|
1263
|
-
confidence_target: Minimum expected confidence threshold.
|
|
1264
|
-
exclude_domains: Domains to exclude from recommendations.
|
|
1265
|
-
|
|
1266
|
-
Returns:
|
|
1267
|
-
List of AnalysisRecommendation sorted by priority.
|
|
1268
|
-
|
|
1269
|
-
Example:
|
|
1270
|
-
>>> import numpy as np
|
|
1271
|
-
>>> import oscura as osc
|
|
1272
|
-
>>> # Generate test signal
|
|
1273
|
-
>>> t = np.linspace(0, 1, 10000)
|
|
1274
|
-
>>> signal = np.sin(2 * np.pi * 100 * t)
|
|
1275
|
-
>>> recommendations = osc.recommend_analyses(signal, sample_rate=10000)
|
|
1276
|
-
>>> for rec in recommendations[:3]:
|
|
1277
|
-
... print(f"{rec.domain.value}: {rec.reasoning}")
|
|
1278
|
-
waveform: Basic waveform measurements are always applicable
|
|
1279
|
-
statistics: Statistical analysis provides foundational metrics
|
|
1280
|
-
spectral: Spectral analysis reveals frequency content - signal appears periodic
|
|
1876
|
+
recommendations: List to append recommendations to
|
|
1877
|
+
exclude: Domains to exclude
|
|
1281
1878
|
"""
|
|
1282
|
-
# Avoid circular import
|
|
1283
1879
|
from oscura.reporting.config import AnalysisDomain
|
|
1284
1880
|
|
|
1285
|
-
recommendations = []
|
|
1286
|
-
exclude = set(exclude_domains or [])
|
|
1287
|
-
|
|
1288
|
-
# Classify signal
|
|
1289
|
-
classification = classify_signal(data, sample_rate)
|
|
1290
|
-
_signal_type = classification.get("signal_type", "unknown") # Reserved for future use
|
|
1291
|
-
is_digital = classification.get("is_digital", False)
|
|
1292
|
-
is_periodic = classification.get("is_periodic", False)
|
|
1293
|
-
_snr_db = classification.get("snr_db", 20) # Reserved for future use
|
|
1294
|
-
dominant_freq = classification.get("dominant_frequency")
|
|
1295
|
-
|
|
1296
|
-
# Always recommend these foundational domains
|
|
1297
1881
|
if AnalysisDomain.WAVEFORM not in exclude:
|
|
1298
1882
|
recommendations.append(
|
|
1299
1883
|
AnalysisRecommendation(
|
|
@@ -1316,7 +1900,21 @@ def recommend_analyses(
|
|
|
1316
1900
|
)
|
|
1317
1901
|
)
|
|
1318
1902
|
|
|
1319
|
-
|
|
1903
|
+
|
|
1904
|
+
def _add_spectral_recommendation(
|
|
1905
|
+
recommendations: list[AnalysisRecommendation],
|
|
1906
|
+
exclude: set[AnalysisDomain],
|
|
1907
|
+
is_periodic: bool,
|
|
1908
|
+
) -> None:
|
|
1909
|
+
"""Add spectral analysis recommendation.
|
|
1910
|
+
|
|
1911
|
+
Args:
|
|
1912
|
+
recommendations: List to append recommendations to
|
|
1913
|
+
exclude: Domains to exclude
|
|
1914
|
+
is_periodic: Whether signal is periodic
|
|
1915
|
+
"""
|
|
1916
|
+
from oscura.reporting.config import AnalysisDomain
|
|
1917
|
+
|
|
1320
1918
|
if AnalysisDomain.SPECTRAL not in exclude:
|
|
1321
1919
|
spectral_conf = 0.85 if is_periodic else 0.70
|
|
1322
1920
|
recommendations.append(
|
|
@@ -1330,70 +1928,113 @@ def recommend_analyses(
|
|
|
1330
1928
|
)
|
|
1331
1929
|
)
|
|
1332
1930
|
|
|
1333
|
-
# Digital-specific analyses
|
|
1334
|
-
if is_digital:
|
|
1335
|
-
if AnalysisDomain.DIGITAL not in exclude:
|
|
1336
|
-
recommendations.append(
|
|
1337
|
-
AnalysisRecommendation(
|
|
1338
|
-
domain=AnalysisDomain.DIGITAL,
|
|
1339
|
-
priority=1,
|
|
1340
|
-
confidence=0.90,
|
|
1341
|
-
reasoning="Digital signal detected - edge and timing analysis recommended",
|
|
1342
|
-
estimated_runtime_ms=80,
|
|
1343
|
-
)
|
|
1344
|
-
)
|
|
1345
1931
|
|
|
1346
|
-
|
|
1347
|
-
|
|
1348
|
-
|
|
1349
|
-
|
|
1350
|
-
|
|
1351
|
-
|
|
1352
|
-
|
|
1353
|
-
|
|
1354
|
-
|
|
1932
|
+
def _add_digital_recommendations(
|
|
1933
|
+
recommendations: list[AnalysisRecommendation],
|
|
1934
|
+
exclude: set[AnalysisDomain],
|
|
1935
|
+
dominant_freq: float | None,
|
|
1936
|
+
) -> None:
|
|
1937
|
+
"""Add digital signal analysis recommendations.
|
|
1938
|
+
|
|
1939
|
+
Args:
|
|
1940
|
+
recommendations: List to append recommendations to
|
|
1941
|
+
exclude: Domains to exclude
|
|
1942
|
+
dominant_freq: Dominant frequency in Hz
|
|
1943
|
+
"""
|
|
1944
|
+
from oscura.reporting.config import AnalysisDomain
|
|
1945
|
+
|
|
1946
|
+
if AnalysisDomain.DIGITAL not in exclude:
|
|
1947
|
+
recommendations.append(
|
|
1948
|
+
AnalysisRecommendation(
|
|
1949
|
+
domain=AnalysisDomain.DIGITAL,
|
|
1950
|
+
priority=1,
|
|
1951
|
+
confidence=0.90,
|
|
1952
|
+
reasoning="Digital signal detected - edge and timing analysis recommended",
|
|
1953
|
+
estimated_runtime_ms=80,
|
|
1355
1954
|
)
|
|
1955
|
+
)
|
|
1356
1956
|
|
|
1357
|
-
|
|
1358
|
-
|
|
1359
|
-
|
|
1360
|
-
|
|
1361
|
-
|
|
1362
|
-
|
|
1363
|
-
|
|
1364
|
-
|
|
1365
|
-
|
|
1366
|
-
|
|
1367
|
-
estimated_runtime_ms=150,
|
|
1368
|
-
)
|
|
1369
|
-
)
|
|
1957
|
+
if AnalysisDomain.TIMING not in exclude:
|
|
1958
|
+
recommendations.append(
|
|
1959
|
+
AnalysisRecommendation(
|
|
1960
|
+
domain=AnalysisDomain.TIMING,
|
|
1961
|
+
priority=2,
|
|
1962
|
+
confidence=0.85,
|
|
1963
|
+
reasoning="Timing analysis valuable for digital signals",
|
|
1964
|
+
estimated_runtime_ms=60,
|
|
1965
|
+
)
|
|
1966
|
+
)
|
|
1370
1967
|
|
|
1371
|
-
|
|
1372
|
-
|
|
1373
|
-
|
|
1968
|
+
if AnalysisDomain.PROTOCOLS not in exclude and dominant_freq:
|
|
1969
|
+
# Check if frequency matches common baud rates
|
|
1970
|
+
common_bauds = [9600, 19200, 38400, 57600, 115200]
|
|
1971
|
+
if any(abs(dominant_freq * 2 - b) / b < 0.1 for b in common_bauds):
|
|
1374
1972
|
recommendations.append(
|
|
1375
1973
|
AnalysisRecommendation(
|
|
1376
|
-
domain=AnalysisDomain.
|
|
1974
|
+
domain=AnalysisDomain.PROTOCOLS,
|
|
1377
1975
|
priority=3,
|
|
1378
|
-
confidence=0.
|
|
1379
|
-
reasoning="
|
|
1380
|
-
estimated_runtime_ms=
|
|
1976
|
+
confidence=0.70,
|
|
1977
|
+
reasoning=f"Frequency {dominant_freq:.0f} Hz suggests serial protocol",
|
|
1978
|
+
estimated_runtime_ms=150,
|
|
1381
1979
|
)
|
|
1382
1980
|
)
|
|
1383
1981
|
|
|
1384
|
-
|
|
1385
|
-
|
|
1386
|
-
|
|
1387
|
-
|
|
1388
|
-
|
|
1389
|
-
|
|
1390
|
-
|
|
1391
|
-
|
|
1392
|
-
|
|
1982
|
+
|
|
1983
|
+
def _add_periodic_recommendations(
|
|
1984
|
+
recommendations: list[AnalysisRecommendation],
|
|
1985
|
+
exclude: set[AnalysisDomain],
|
|
1986
|
+
is_digital: bool,
|
|
1987
|
+
) -> None:
|
|
1988
|
+
"""Add periodic signal analysis recommendations.
|
|
1989
|
+
|
|
1990
|
+
Args:
|
|
1991
|
+
recommendations: List to append recommendations to
|
|
1992
|
+
exclude: Domains to exclude
|
|
1993
|
+
is_digital: Whether signal is digital
|
|
1994
|
+
"""
|
|
1995
|
+
from oscura.reporting.config import AnalysisDomain
|
|
1996
|
+
|
|
1997
|
+
if AnalysisDomain.JITTER not in exclude and is_digital:
|
|
1998
|
+
recommendations.append(
|
|
1999
|
+
AnalysisRecommendation(
|
|
2000
|
+
domain=AnalysisDomain.JITTER,
|
|
2001
|
+
priority=3,
|
|
2002
|
+
confidence=0.80,
|
|
2003
|
+
reasoning="Periodic digital signal - jitter analysis applicable",
|
|
2004
|
+
estimated_runtime_ms=120,
|
|
2005
|
+
)
|
|
2006
|
+
)
|
|
2007
|
+
|
|
2008
|
+
if AnalysisDomain.EYE not in exclude and is_digital:
|
|
2009
|
+
recommendations.append(
|
|
2010
|
+
AnalysisRecommendation(
|
|
2011
|
+
domain=AnalysisDomain.EYE,
|
|
2012
|
+
priority=3,
|
|
2013
|
+
confidence=0.75,
|
|
2014
|
+
reasoning="Eye diagram analysis for signal integrity assessment",
|
|
2015
|
+
estimated_runtime_ms=200,
|
|
1393
2016
|
)
|
|
2017
|
+
)
|
|
2018
|
+
|
|
2019
|
+
|
|
2020
|
+
def _add_pattern_and_entropy_recommendations(
|
|
2021
|
+
recommendations: list[AnalysisRecommendation],
|
|
2022
|
+
exclude: set[AnalysisDomain],
|
|
2023
|
+
data_length: int,
|
|
2024
|
+
is_periodic: bool,
|
|
2025
|
+
) -> None:
|
|
2026
|
+
"""Add pattern and entropy analysis recommendations.
|
|
2027
|
+
|
|
2028
|
+
Args:
|
|
2029
|
+
recommendations: List to append recommendations to
|
|
2030
|
+
exclude: Domains to exclude
|
|
2031
|
+
data_length: Length of signal data
|
|
2032
|
+
is_periodic: Whether signal is periodic
|
|
2033
|
+
"""
|
|
2034
|
+
from oscura.reporting.config import AnalysisDomain
|
|
1394
2035
|
|
|
1395
2036
|
# Pattern analysis - good for complex signals
|
|
1396
|
-
if AnalysisDomain.PATTERNS not in exclude and
|
|
2037
|
+
if AnalysisDomain.PATTERNS not in exclude and data_length > 1000:
|
|
1397
2038
|
pattern_conf = 0.70 if is_periodic else 0.50
|
|
1398
2039
|
recommendations.append(
|
|
1399
2040
|
AnalysisRecommendation(
|
|
@@ -1417,21 +2058,113 @@ def recommend_analyses(
|
|
|
1417
2058
|
)
|
|
1418
2059
|
)
|
|
1419
2060
|
|
|
1420
|
-
# Apply confidence threshold filter
|
|
1421
|
-
recommendations = [r for r in recommendations if r.confidence >= confidence_target]
|
|
1422
2061
|
|
|
1423
|
-
|
|
2062
|
+
def _filter_by_confidence(
|
|
2063
|
+
recommendations: list[AnalysisRecommendation],
|
|
2064
|
+
confidence_target: float,
|
|
2065
|
+
) -> list[AnalysisRecommendation]:
|
|
2066
|
+
"""Filter recommendations by confidence threshold.
|
|
2067
|
+
|
|
2068
|
+
Args:
|
|
2069
|
+
recommendations: List of recommendations
|
|
2070
|
+
confidence_target: Minimum confidence threshold
|
|
2071
|
+
|
|
2072
|
+
Returns:
|
|
2073
|
+
Filtered recommendations
|
|
2074
|
+
"""
|
|
2075
|
+
return [r for r in recommendations if r.confidence >= confidence_target]
|
|
2076
|
+
|
|
2077
|
+
|
|
2078
|
+
def _filter_by_time_budget(
|
|
2079
|
+
recommendations: list[AnalysisRecommendation],
|
|
2080
|
+
time_budget_seconds: float,
|
|
2081
|
+
) -> list[AnalysisRecommendation]:
|
|
2082
|
+
"""Filter recommendations by time budget.
|
|
2083
|
+
|
|
2084
|
+
Args:
|
|
2085
|
+
recommendations: List of recommendations
|
|
2086
|
+
time_budget_seconds: Time budget in seconds
|
|
2087
|
+
|
|
2088
|
+
Returns:
|
|
2089
|
+
Filtered recommendations within budget
|
|
2090
|
+
"""
|
|
2091
|
+
budget_ms = time_budget_seconds * 1000
|
|
2092
|
+
cumulative = 0
|
|
2093
|
+
filtered = []
|
|
2094
|
+
|
|
2095
|
+
# Sort by priority and confidence for selection
|
|
2096
|
+
for rec in sorted(recommendations, key=lambda x: (x.priority, -x.confidence)):
|
|
2097
|
+
if cumulative + rec.estimated_runtime_ms <= budget_ms:
|
|
2098
|
+
filtered.append(rec)
|
|
2099
|
+
cumulative += rec.estimated_runtime_ms
|
|
2100
|
+
|
|
2101
|
+
return filtered
|
|
2102
|
+
|
|
2103
|
+
|
|
2104
|
+
def recommend_analyses(
|
|
2105
|
+
data: NDArray[np.floating[Any]],
|
|
2106
|
+
sample_rate: float = 1.0,
|
|
2107
|
+
*,
|
|
2108
|
+
time_budget_seconds: float | None = None,
|
|
2109
|
+
confidence_target: float = 0.7,
|
|
2110
|
+
exclude_domains: list[AnalysisDomain] | None = None,
|
|
2111
|
+
) -> list[AnalysisRecommendation]:
|
|
2112
|
+
"""Recommend which analyses to run based on signal characteristics.
|
|
2113
|
+
|
|
2114
|
+
Uses signal classification, quality metrics, and heuristics to
|
|
2115
|
+
recommend the most valuable analyses for a given signal.
|
|
2116
|
+
|
|
2117
|
+
Args:
|
|
2118
|
+
data: Input signal data.
|
|
2119
|
+
sample_rate: Sample rate in Hz.
|
|
2120
|
+
time_budget_seconds: Optional time budget (prioritizes faster analyses).
|
|
2121
|
+
confidence_target: Minimum expected confidence threshold.
|
|
2122
|
+
exclude_domains: Domains to exclude from recommendations.
|
|
2123
|
+
|
|
2124
|
+
Returns:
|
|
2125
|
+
List of AnalysisRecommendation sorted by priority.
|
|
2126
|
+
|
|
2127
|
+
Example:
|
|
2128
|
+
>>> import numpy as np
|
|
2129
|
+
>>> import oscura as osc
|
|
2130
|
+
>>> # Generate test signal
|
|
2131
|
+
>>> t = np.linspace(0, 1, 10000)
|
|
2132
|
+
>>> signal = np.sin(2 * np.pi * 100 * t)
|
|
2133
|
+
>>> recommendations = osc.recommend_analyses(signal, sample_rate=10000)
|
|
2134
|
+
>>> for rec in recommendations[:3]:
|
|
2135
|
+
... print(f"{rec.domain.value}: {rec.reasoning}")
|
|
2136
|
+
waveform: Basic waveform measurements are always applicable
|
|
2137
|
+
statistics: Statistical analysis provides foundational metrics
|
|
2138
|
+
spectral: Spectral analysis reveals frequency content - signal appears periodic
|
|
2139
|
+
"""
|
|
2140
|
+
recommendations: list[AnalysisRecommendation] = []
|
|
2141
|
+
exclude = set(exclude_domains or [])
|
|
2142
|
+
|
|
2143
|
+
# Extract signal features via classification
|
|
2144
|
+
classification = classify_signal(data, sample_rate)
|
|
2145
|
+
is_digital = classification.get("is_digital", False)
|
|
2146
|
+
is_periodic = classification.get("is_periodic", False)
|
|
2147
|
+
dominant_freq = classification.get("dominant_frequency")
|
|
2148
|
+
|
|
2149
|
+
# Build recommendations based on signal characteristics
|
|
2150
|
+
_add_foundational_recommendations(recommendations, exclude)
|
|
2151
|
+
_add_spectral_recommendation(recommendations, exclude, is_periodic)
|
|
2152
|
+
|
|
2153
|
+
if is_digital:
|
|
2154
|
+
_add_digital_recommendations(recommendations, exclude, dominant_freq)
|
|
2155
|
+
|
|
2156
|
+
if is_periodic:
|
|
2157
|
+
_add_periodic_recommendations(recommendations, exclude, is_digital)
|
|
2158
|
+
|
|
2159
|
+
_add_pattern_and_entropy_recommendations(recommendations, exclude, len(data), is_periodic)
|
|
2160
|
+
|
|
2161
|
+
# Apply filtering and ranking
|
|
2162
|
+
recommendations = _filter_by_confidence(recommendations, confidence_target)
|
|
2163
|
+
|
|
1424
2164
|
if time_budget_seconds is not None:
|
|
1425
|
-
|
|
1426
|
-
cumulative = 0
|
|
1427
|
-
filtered = []
|
|
1428
|
-
for rec in sorted(recommendations, key=lambda x: (x.priority, -x.confidence)):
|
|
1429
|
-
if cumulative + rec.estimated_runtime_ms <= budget_ms:
|
|
1430
|
-
filtered.append(rec)
|
|
1431
|
-
cumulative += rec.estimated_runtime_ms
|
|
1432
|
-
recommendations = filtered
|
|
2165
|
+
recommendations = _filter_by_time_budget(recommendations, time_budget_seconds)
|
|
1433
2166
|
|
|
1434
|
-
#
|
|
2167
|
+
# Final ranking by priority, then confidence
|
|
1435
2168
|
recommendations.sort(key=lambda x: (x.priority, -x.confidence))
|
|
1436
2169
|
|
|
1437
2170
|
return recommendations
|