oscura 0.5.0__py3-none-any.whl → 0.6.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- oscura/__init__.py +169 -167
- oscura/analyzers/__init__.py +3 -0
- oscura/analyzers/classification.py +659 -0
- oscura/analyzers/digital/__init__.py +0 -48
- oscura/analyzers/digital/edges.py +325 -65
- oscura/analyzers/digital/extraction.py +0 -195
- oscura/analyzers/digital/quality.py +293 -166
- oscura/analyzers/digital/timing.py +260 -115
- oscura/analyzers/digital/timing_numba.py +334 -0
- oscura/analyzers/entropy.py +605 -0
- oscura/analyzers/eye/diagram.py +176 -109
- oscura/analyzers/eye/metrics.py +5 -5
- oscura/analyzers/jitter/__init__.py +6 -4
- oscura/analyzers/jitter/ber.py +52 -52
- oscura/analyzers/jitter/classification.py +156 -0
- oscura/analyzers/jitter/decomposition.py +163 -113
- oscura/analyzers/jitter/spectrum.py +80 -64
- oscura/analyzers/ml/__init__.py +39 -0
- oscura/analyzers/ml/features.py +600 -0
- oscura/analyzers/ml/signal_classifier.py +604 -0
- oscura/analyzers/packet/daq.py +246 -158
- oscura/analyzers/packet/parser.py +12 -1
- oscura/analyzers/packet/payload.py +50 -2110
- oscura/analyzers/packet/payload_analysis.py +361 -181
- oscura/analyzers/packet/payload_patterns.py +133 -70
- oscura/analyzers/packet/stream.py +84 -23
- oscura/analyzers/patterns/__init__.py +26 -5
- oscura/analyzers/patterns/anomaly_detection.py +908 -0
- oscura/analyzers/patterns/clustering.py +169 -108
- oscura/analyzers/patterns/clustering_optimized.py +227 -0
- oscura/analyzers/patterns/discovery.py +1 -1
- oscura/analyzers/patterns/matching.py +581 -197
- oscura/analyzers/patterns/pattern_mining.py +778 -0
- oscura/analyzers/patterns/periodic.py +121 -38
- oscura/analyzers/patterns/sequences.py +175 -78
- oscura/analyzers/power/conduction.py +1 -1
- oscura/analyzers/power/soa.py +6 -6
- oscura/analyzers/power/switching.py +250 -110
- oscura/analyzers/protocol/__init__.py +17 -1
- oscura/analyzers/protocols/__init__.py +1 -22
- oscura/analyzers/protocols/base.py +6 -6
- oscura/analyzers/protocols/ble/__init__.py +38 -0
- oscura/analyzers/protocols/ble/analyzer.py +809 -0
- oscura/analyzers/protocols/ble/uuids.py +288 -0
- oscura/analyzers/protocols/can.py +257 -127
- oscura/analyzers/protocols/can_fd.py +107 -80
- oscura/analyzers/protocols/flexray.py +139 -80
- oscura/analyzers/protocols/hdlc.py +93 -58
- oscura/analyzers/protocols/i2c.py +247 -106
- oscura/analyzers/protocols/i2s.py +138 -86
- oscura/analyzers/protocols/industrial/__init__.py +40 -0
- oscura/analyzers/protocols/industrial/bacnet/__init__.py +33 -0
- oscura/analyzers/protocols/industrial/bacnet/analyzer.py +708 -0
- oscura/analyzers/protocols/industrial/bacnet/encoding.py +412 -0
- oscura/analyzers/protocols/industrial/bacnet/services.py +622 -0
- oscura/analyzers/protocols/industrial/ethercat/__init__.py +30 -0
- oscura/analyzers/protocols/industrial/ethercat/analyzer.py +474 -0
- oscura/analyzers/protocols/industrial/ethercat/mailbox.py +339 -0
- oscura/analyzers/protocols/industrial/ethercat/topology.py +166 -0
- oscura/analyzers/protocols/industrial/modbus/__init__.py +31 -0
- oscura/analyzers/protocols/industrial/modbus/analyzer.py +525 -0
- oscura/analyzers/protocols/industrial/modbus/crc.py +79 -0
- oscura/analyzers/protocols/industrial/modbus/functions.py +436 -0
- oscura/analyzers/protocols/industrial/opcua/__init__.py +21 -0
- oscura/analyzers/protocols/industrial/opcua/analyzer.py +552 -0
- oscura/analyzers/protocols/industrial/opcua/datatypes.py +446 -0
- oscura/analyzers/protocols/industrial/opcua/services.py +264 -0
- oscura/analyzers/protocols/industrial/profinet/__init__.py +23 -0
- oscura/analyzers/protocols/industrial/profinet/analyzer.py +441 -0
- oscura/analyzers/protocols/industrial/profinet/dcp.py +263 -0
- oscura/analyzers/protocols/industrial/profinet/ptcp.py +200 -0
- oscura/analyzers/protocols/jtag.py +180 -98
- oscura/analyzers/protocols/lin.py +219 -114
- oscura/analyzers/protocols/manchester.py +4 -4
- oscura/analyzers/protocols/onewire.py +253 -149
- oscura/analyzers/protocols/parallel_bus/__init__.py +20 -0
- oscura/analyzers/protocols/parallel_bus/centronics.py +92 -0
- oscura/analyzers/protocols/parallel_bus/gpib.py +137 -0
- oscura/analyzers/protocols/spi.py +192 -95
- oscura/analyzers/protocols/swd.py +321 -167
- oscura/analyzers/protocols/uart.py +267 -125
- oscura/analyzers/protocols/usb.py +235 -131
- oscura/analyzers/side_channel/power.py +17 -12
- oscura/analyzers/signal/__init__.py +15 -0
- oscura/analyzers/signal/timing_analysis.py +1086 -0
- oscura/analyzers/signal_integrity/__init__.py +4 -1
- oscura/analyzers/signal_integrity/sparams.py +2 -19
- oscura/analyzers/spectral/chunked.py +129 -60
- oscura/analyzers/spectral/chunked_fft.py +300 -94
- oscura/analyzers/spectral/chunked_wavelet.py +100 -80
- oscura/analyzers/statistical/checksum.py +376 -217
- oscura/analyzers/statistical/classification.py +229 -107
- oscura/analyzers/statistical/entropy.py +78 -53
- oscura/analyzers/statistics/correlation.py +407 -211
- oscura/analyzers/statistics/outliers.py +2 -2
- oscura/analyzers/statistics/streaming.py +30 -5
- oscura/analyzers/validation.py +216 -101
- oscura/analyzers/waveform/measurements.py +9 -0
- oscura/analyzers/waveform/measurements_with_uncertainty.py +31 -15
- oscura/analyzers/waveform/spectral.py +500 -228
- oscura/api/__init__.py +31 -5
- oscura/api/dsl/__init__.py +582 -0
- oscura/{dsl → api/dsl}/commands.py +43 -76
- oscura/{dsl → api/dsl}/interpreter.py +26 -51
- oscura/{dsl → api/dsl}/parser.py +107 -77
- oscura/{dsl → api/dsl}/repl.py +2 -2
- oscura/api/dsl.py +1 -1
- oscura/{integrations → api/integrations}/__init__.py +1 -1
- oscura/{integrations → api/integrations}/llm.py +201 -102
- oscura/api/operators.py +3 -3
- oscura/api/optimization.py +144 -30
- oscura/api/rest_server.py +921 -0
- oscura/api/server/__init__.py +17 -0
- oscura/api/server/dashboard.py +850 -0
- oscura/api/server/static/README.md +34 -0
- oscura/api/server/templates/base.html +181 -0
- oscura/api/server/templates/export.html +120 -0
- oscura/api/server/templates/home.html +284 -0
- oscura/api/server/templates/protocols.html +58 -0
- oscura/api/server/templates/reports.html +43 -0
- oscura/api/server/templates/session_detail.html +89 -0
- oscura/api/server/templates/sessions.html +83 -0
- oscura/api/server/templates/waveforms.html +73 -0
- oscura/automotive/__init__.py +8 -1
- oscura/automotive/can/__init__.py +10 -0
- oscura/automotive/can/checksum.py +3 -1
- oscura/automotive/can/dbc_generator.py +590 -0
- oscura/automotive/can/message_wrapper.py +121 -74
- oscura/automotive/can/patterns.py +98 -21
- oscura/automotive/can/session.py +292 -56
- oscura/automotive/can/state_machine.py +6 -3
- oscura/automotive/can/stimulus_response.py +97 -75
- oscura/automotive/dbc/__init__.py +10 -2
- oscura/automotive/dbc/generator.py +84 -56
- oscura/automotive/dbc/parser.py +6 -6
- oscura/automotive/dtc/data.json +2763 -0
- oscura/automotive/dtc/database.py +2 -2
- oscura/automotive/flexray/__init__.py +31 -0
- oscura/automotive/flexray/analyzer.py +504 -0
- oscura/automotive/flexray/crc.py +185 -0
- oscura/automotive/flexray/fibex.py +449 -0
- oscura/automotive/j1939/__init__.py +45 -8
- oscura/automotive/j1939/analyzer.py +605 -0
- oscura/automotive/j1939/spns.py +326 -0
- oscura/automotive/j1939/transport.py +306 -0
- oscura/automotive/lin/__init__.py +47 -0
- oscura/automotive/lin/analyzer.py +612 -0
- oscura/automotive/loaders/blf.py +13 -2
- oscura/automotive/loaders/csv_can.py +143 -72
- oscura/automotive/loaders/dispatcher.py +50 -2
- oscura/automotive/loaders/mdf.py +86 -45
- oscura/automotive/loaders/pcap.py +111 -61
- oscura/automotive/uds/__init__.py +4 -0
- oscura/automotive/uds/analyzer.py +725 -0
- oscura/automotive/uds/decoder.py +140 -58
- oscura/automotive/uds/models.py +7 -1
- oscura/automotive/visualization.py +1 -1
- oscura/cli/analyze.py +348 -0
- oscura/cli/batch.py +142 -122
- oscura/cli/benchmark.py +275 -0
- oscura/cli/characterize.py +137 -82
- oscura/cli/compare.py +224 -131
- oscura/cli/completion.py +250 -0
- oscura/cli/config_cmd.py +361 -0
- oscura/cli/decode.py +164 -87
- oscura/cli/export.py +286 -0
- oscura/cli/main.py +115 -31
- oscura/{onboarding → cli/onboarding}/__init__.py +3 -3
- oscura/{onboarding → cli/onboarding}/help.py +80 -58
- oscura/{onboarding → cli/onboarding}/tutorials.py +97 -72
- oscura/{onboarding → cli/onboarding}/wizard.py +55 -36
- oscura/cli/progress.py +147 -0
- oscura/cli/shell.py +157 -135
- oscura/cli/validate_cmd.py +204 -0
- oscura/cli/visualize.py +158 -0
- oscura/convenience.py +125 -79
- oscura/core/__init__.py +4 -2
- oscura/core/backend_selector.py +3 -3
- oscura/core/cache.py +126 -15
- oscura/core/cancellation.py +1 -1
- oscura/{config → core/config}/__init__.py +20 -11
- oscura/{config → core/config}/defaults.py +1 -1
- oscura/{config → core/config}/loader.py +7 -5
- oscura/{config → core/config}/memory.py +5 -5
- oscura/{config → core/config}/migration.py +1 -1
- oscura/{config → core/config}/pipeline.py +99 -23
- oscura/{config → core/config}/preferences.py +1 -1
- oscura/{config → core/config}/protocol.py +3 -3
- oscura/{config → core/config}/schema.py +426 -272
- oscura/{config → core/config}/settings.py +1 -1
- oscura/{config → core/config}/thresholds.py +195 -153
- oscura/core/correlation.py +5 -6
- oscura/core/cross_domain.py +0 -2
- oscura/core/debug.py +9 -5
- oscura/{extensibility → core/extensibility}/docs.py +158 -70
- oscura/{extensibility → core/extensibility}/extensions.py +160 -76
- oscura/{extensibility → core/extensibility}/logging.py +1 -1
- oscura/{extensibility → core/extensibility}/measurements.py +1 -1
- oscura/{extensibility → core/extensibility}/plugins.py +1 -1
- oscura/{extensibility → core/extensibility}/templates.py +73 -3
- oscura/{extensibility → core/extensibility}/validation.py +1 -1
- oscura/core/gpu_backend.py +11 -7
- oscura/core/log_query.py +101 -11
- oscura/core/logging.py +126 -54
- oscura/core/logging_advanced.py +5 -5
- oscura/core/memory_limits.py +108 -70
- oscura/core/memory_monitor.py +2 -2
- oscura/core/memory_progress.py +7 -7
- oscura/core/memory_warnings.py +1 -1
- oscura/core/numba_backend.py +13 -13
- oscura/{plugins → core/plugins}/__init__.py +9 -9
- oscura/{plugins → core/plugins}/base.py +7 -7
- oscura/{plugins → core/plugins}/cli.py +3 -3
- oscura/{plugins → core/plugins}/discovery.py +186 -106
- oscura/{plugins → core/plugins}/lifecycle.py +1 -1
- oscura/{plugins → core/plugins}/manager.py +7 -7
- oscura/{plugins → core/plugins}/registry.py +3 -3
- oscura/{plugins → core/plugins}/versioning.py +1 -1
- oscura/core/progress.py +16 -1
- oscura/core/provenance.py +8 -2
- oscura/{schemas → core/schemas}/__init__.py +2 -2
- oscura/core/schemas/bus_configuration.json +322 -0
- oscura/core/schemas/device_mapping.json +182 -0
- oscura/core/schemas/packet_format.json +418 -0
- oscura/core/schemas/protocol_definition.json +363 -0
- oscura/core/types.py +4 -0
- oscura/core/uncertainty.py +3 -3
- oscura/correlation/__init__.py +52 -0
- oscura/correlation/multi_protocol.py +811 -0
- oscura/discovery/auto_decoder.py +117 -35
- oscura/discovery/comparison.py +191 -86
- oscura/discovery/quality_validator.py +155 -68
- oscura/discovery/signal_detector.py +196 -79
- oscura/export/__init__.py +18 -20
- oscura/export/kaitai_struct.py +513 -0
- oscura/export/scapy_layer.py +801 -0
- oscura/export/wireshark/README.md +15 -15
- oscura/export/wireshark/generator.py +1 -1
- oscura/export/wireshark/templates/dissector.lua.j2 +2 -2
- oscura/export/wireshark_dissector.py +746 -0
- oscura/guidance/wizard.py +207 -111
- oscura/hardware/__init__.py +19 -0
- oscura/{acquisition → hardware/acquisition}/__init__.py +4 -4
- oscura/{acquisition → hardware/acquisition}/file.py +2 -2
- oscura/{acquisition → hardware/acquisition}/hardware.py +7 -7
- oscura/{acquisition → hardware/acquisition}/saleae.py +15 -12
- oscura/{acquisition → hardware/acquisition}/socketcan.py +1 -1
- oscura/{acquisition → hardware/acquisition}/streaming.py +2 -2
- oscura/{acquisition → hardware/acquisition}/synthetic.py +3 -3
- oscura/{acquisition → hardware/acquisition}/visa.py +33 -11
- oscura/hardware/firmware/__init__.py +29 -0
- oscura/hardware/firmware/pattern_recognition.py +874 -0
- oscura/hardware/hal_detector.py +736 -0
- oscura/hardware/security/__init__.py +37 -0
- oscura/hardware/security/side_channel_detector.py +1126 -0
- oscura/inference/__init__.py +4 -0
- oscura/inference/active_learning/README.md +7 -7
- oscura/inference/active_learning/observation_table.py +4 -1
- oscura/inference/alignment.py +216 -123
- oscura/inference/bayesian.py +113 -33
- oscura/inference/crc_reverse.py +101 -55
- oscura/inference/logic.py +6 -2
- oscura/inference/message_format.py +342 -183
- oscura/inference/protocol.py +95 -44
- oscura/inference/protocol_dsl.py +180 -82
- oscura/inference/signal_intelligence.py +1439 -706
- oscura/inference/spectral.py +99 -57
- oscura/inference/state_machine.py +810 -158
- oscura/inference/stream.py +270 -110
- oscura/iot/__init__.py +34 -0
- oscura/iot/coap/__init__.py +32 -0
- oscura/iot/coap/analyzer.py +668 -0
- oscura/iot/coap/options.py +212 -0
- oscura/iot/lorawan/__init__.py +21 -0
- oscura/iot/lorawan/crypto.py +206 -0
- oscura/iot/lorawan/decoder.py +801 -0
- oscura/iot/lorawan/mac_commands.py +341 -0
- oscura/iot/mqtt/__init__.py +27 -0
- oscura/iot/mqtt/analyzer.py +999 -0
- oscura/iot/mqtt/properties.py +315 -0
- oscura/iot/zigbee/__init__.py +31 -0
- oscura/iot/zigbee/analyzer.py +615 -0
- oscura/iot/zigbee/security.py +153 -0
- oscura/iot/zigbee/zcl.py +349 -0
- oscura/jupyter/display.py +125 -45
- oscura/{exploratory → jupyter/exploratory}/__init__.py +8 -8
- oscura/{exploratory → jupyter/exploratory}/error_recovery.py +298 -141
- oscura/jupyter/exploratory/fuzzy.py +746 -0
- oscura/{exploratory → jupyter/exploratory}/fuzzy_advanced.py +258 -100
- oscura/{exploratory → jupyter/exploratory}/legacy.py +464 -242
- oscura/{exploratory → jupyter/exploratory}/parse.py +167 -145
- oscura/{exploratory → jupyter/exploratory}/recovery.py +119 -87
- oscura/jupyter/exploratory/sync.py +612 -0
- oscura/{exploratory → jupyter/exploratory}/unknown.py +299 -176
- oscura/jupyter/magic.py +4 -4
- oscura/{ui → jupyter/ui}/__init__.py +2 -2
- oscura/{ui → jupyter/ui}/formatters.py +3 -3
- oscura/{ui → jupyter/ui}/progressive_display.py +153 -82
- oscura/loaders/__init__.py +171 -63
- oscura/loaders/binary.py +88 -1
- oscura/loaders/chipwhisperer.py +153 -137
- oscura/loaders/configurable.py +208 -86
- oscura/loaders/csv_loader.py +458 -215
- oscura/loaders/hdf5_loader.py +278 -119
- oscura/loaders/lazy.py +87 -54
- oscura/loaders/mmap_loader.py +1 -1
- oscura/loaders/numpy_loader.py +253 -116
- oscura/loaders/pcap.py +226 -151
- oscura/loaders/rigol.py +110 -49
- oscura/loaders/sigrok.py +201 -78
- oscura/loaders/tdms.py +81 -58
- oscura/loaders/tektronix.py +291 -174
- oscura/loaders/touchstone.py +182 -87
- oscura/loaders/vcd.py +215 -117
- oscura/loaders/wav.py +155 -68
- oscura/reporting/__init__.py +9 -7
- oscura/reporting/analyze.py +352 -146
- oscura/reporting/argument_preparer.py +69 -14
- oscura/reporting/auto_report.py +97 -61
- oscura/reporting/batch.py +131 -58
- oscura/reporting/chart_selection.py +57 -45
- oscura/reporting/comparison.py +63 -17
- oscura/reporting/content/executive.py +76 -24
- oscura/reporting/core_formats/multi_format.py +11 -8
- oscura/reporting/engine.py +312 -158
- oscura/reporting/enhanced_reports.py +949 -0
- oscura/reporting/export.py +86 -43
- oscura/reporting/formatting/numbers.py +69 -42
- oscura/reporting/html.py +139 -58
- oscura/reporting/index.py +137 -65
- oscura/reporting/output.py +158 -67
- oscura/reporting/pdf.py +67 -102
- oscura/reporting/plots.py +191 -112
- oscura/reporting/sections.py +88 -47
- oscura/reporting/standards.py +104 -61
- oscura/reporting/summary_generator.py +75 -55
- oscura/reporting/tables.py +138 -54
- oscura/reporting/templates/enhanced/protocol_re.html +525 -0
- oscura/reporting/templates/index.md +13 -13
- oscura/sessions/__init__.py +14 -23
- oscura/sessions/base.py +3 -3
- oscura/sessions/blackbox.py +106 -10
- oscura/sessions/generic.py +2 -2
- oscura/sessions/legacy.py +783 -0
- oscura/side_channel/__init__.py +63 -0
- oscura/side_channel/dpa.py +1025 -0
- oscura/utils/__init__.py +15 -1
- oscura/utils/autodetect.py +1 -5
- oscura/utils/bitwise.py +118 -0
- oscura/{builders → utils/builders}/__init__.py +1 -1
- oscura/{comparison → utils/comparison}/__init__.py +6 -6
- oscura/{comparison → utils/comparison}/compare.py +202 -101
- oscura/{comparison → utils/comparison}/golden.py +83 -63
- oscura/{comparison → utils/comparison}/limits.py +313 -89
- oscura/{comparison → utils/comparison}/mask.py +151 -45
- oscura/{comparison → utils/comparison}/trace_diff.py +1 -1
- oscura/{comparison → utils/comparison}/visualization.py +147 -89
- oscura/{component → utils/component}/__init__.py +3 -3
- oscura/{component → utils/component}/impedance.py +122 -58
- oscura/{component → utils/component}/reactive.py +165 -168
- oscura/{component → utils/component}/transmission_line.py +3 -3
- oscura/{filtering → utils/filtering}/__init__.py +6 -6
- oscura/{filtering → utils/filtering}/base.py +1 -1
- oscura/{filtering → utils/filtering}/convenience.py +2 -2
- oscura/{filtering → utils/filtering}/design.py +169 -93
- oscura/{filtering → utils/filtering}/filters.py +2 -2
- oscura/{filtering → utils/filtering}/introspection.py +2 -2
- oscura/utils/geometry.py +31 -0
- oscura/utils/imports.py +184 -0
- oscura/utils/lazy.py +1 -1
- oscura/{math → utils/math}/__init__.py +2 -2
- oscura/{math → utils/math}/arithmetic.py +114 -48
- oscura/{math → utils/math}/interpolation.py +139 -106
- oscura/utils/memory.py +129 -66
- oscura/utils/memory_advanced.py +92 -9
- oscura/utils/memory_extensions.py +10 -8
- oscura/{optimization → utils/optimization}/__init__.py +1 -1
- oscura/{optimization → utils/optimization}/search.py +2 -2
- oscura/utils/performance/__init__.py +58 -0
- oscura/utils/performance/caching.py +889 -0
- oscura/utils/performance/lsh_clustering.py +333 -0
- oscura/utils/performance/memory_optimizer.py +699 -0
- oscura/utils/performance/optimizations.py +675 -0
- oscura/utils/performance/parallel.py +654 -0
- oscura/utils/performance/profiling.py +661 -0
- oscura/{pipeline → utils/pipeline}/base.py +1 -1
- oscura/{pipeline → utils/pipeline}/composition.py +11 -3
- oscura/{pipeline → utils/pipeline}/parallel.py +3 -2
- oscura/{pipeline → utils/pipeline}/pipeline.py +1 -1
- oscura/{pipeline → utils/pipeline}/reverse_engineering.py +412 -221
- oscura/{search → utils/search}/__init__.py +3 -3
- oscura/{search → utils/search}/anomaly.py +188 -58
- oscura/utils/search/context.py +294 -0
- oscura/{search → utils/search}/pattern.py +138 -10
- oscura/utils/serial.py +51 -0
- oscura/utils/storage/__init__.py +61 -0
- oscura/utils/storage/database.py +1166 -0
- oscura/{streaming → utils/streaming}/chunked.py +302 -143
- oscura/{streaming → utils/streaming}/progressive.py +1 -1
- oscura/{streaming → utils/streaming}/realtime.py +3 -2
- oscura/{triggering → utils/triggering}/__init__.py +6 -6
- oscura/{triggering → utils/triggering}/base.py +6 -6
- oscura/{triggering → utils/triggering}/edge.py +2 -2
- oscura/{triggering → utils/triggering}/pattern.py +2 -2
- oscura/{triggering → utils/triggering}/pulse.py +115 -74
- oscura/{triggering → utils/triggering}/window.py +2 -2
- oscura/utils/validation.py +32 -0
- oscura/validation/__init__.py +121 -0
- oscura/{compliance → validation/compliance}/__init__.py +5 -5
- oscura/{compliance → validation/compliance}/advanced.py +5 -5
- oscura/{compliance → validation/compliance}/masks.py +1 -1
- oscura/{compliance → validation/compliance}/reporting.py +127 -53
- oscura/{compliance → validation/compliance}/testing.py +114 -52
- oscura/validation/compliance_tests.py +915 -0
- oscura/validation/fuzzer.py +990 -0
- oscura/validation/grammar_tests.py +596 -0
- oscura/validation/grammar_validator.py +904 -0
- oscura/validation/hil_testing.py +977 -0
- oscura/{quality → validation/quality}/__init__.py +4 -4
- oscura/{quality → validation/quality}/ensemble.py +251 -171
- oscura/{quality → validation/quality}/explainer.py +3 -3
- oscura/{quality → validation/quality}/scoring.py +1 -1
- oscura/{quality → validation/quality}/warnings.py +4 -4
- oscura/validation/regression_suite.py +808 -0
- oscura/validation/replay.py +788 -0
- oscura/{testing → validation/testing}/__init__.py +2 -2
- oscura/{testing → validation/testing}/synthetic.py +5 -5
- oscura/visualization/__init__.py +9 -0
- oscura/visualization/accessibility.py +1 -1
- oscura/visualization/annotations.py +64 -67
- oscura/visualization/colors.py +7 -7
- oscura/visualization/digital.py +180 -81
- oscura/visualization/eye.py +236 -85
- oscura/visualization/interactive.py +320 -143
- oscura/visualization/jitter.py +587 -247
- oscura/visualization/layout.py +169 -134
- oscura/visualization/optimization.py +103 -52
- oscura/visualization/palettes.py +1 -1
- oscura/visualization/power.py +427 -211
- oscura/visualization/power_extended.py +626 -297
- oscura/visualization/presets.py +2 -0
- oscura/visualization/protocols.py +495 -181
- oscura/visualization/render.py +79 -63
- oscura/visualization/reverse_engineering.py +171 -124
- oscura/visualization/signal_integrity.py +460 -279
- oscura/visualization/specialized.py +190 -100
- oscura/visualization/spectral.py +670 -255
- oscura/visualization/thumbnails.py +166 -137
- oscura/visualization/waveform.py +150 -63
- oscura/workflows/__init__.py +3 -0
- oscura/{batch → workflows/batch}/__init__.py +5 -5
- oscura/{batch → workflows/batch}/advanced.py +150 -75
- oscura/workflows/batch/aggregate.py +531 -0
- oscura/workflows/batch/analyze.py +236 -0
- oscura/{batch → workflows/batch}/logging.py +2 -2
- oscura/{batch → workflows/batch}/metrics.py +1 -1
- oscura/workflows/complete_re.py +1144 -0
- oscura/workflows/compliance.py +44 -54
- oscura/workflows/digital.py +197 -51
- oscura/workflows/legacy/__init__.py +12 -0
- oscura/{workflow → workflows/legacy}/dag.py +4 -1
- oscura/workflows/multi_trace.py +9 -9
- oscura/workflows/power.py +42 -62
- oscura/workflows/protocol.py +82 -49
- oscura/workflows/reverse_engineering.py +351 -150
- oscura/workflows/signal_integrity.py +157 -82
- oscura-0.6.0.dist-info/METADATA +643 -0
- oscura-0.6.0.dist-info/RECORD +590 -0
- oscura/analyzers/digital/ic_database.py +0 -498
- oscura/analyzers/digital/timing_paths.py +0 -339
- oscura/analyzers/digital/vintage.py +0 -377
- oscura/analyzers/digital/vintage_result.py +0 -148
- oscura/analyzers/protocols/parallel_bus.py +0 -449
- oscura/batch/aggregate.py +0 -300
- oscura/batch/analyze.py +0 -139
- oscura/dsl/__init__.py +0 -73
- oscura/exceptions.py +0 -59
- oscura/exploratory/fuzzy.py +0 -513
- oscura/exploratory/sync.py +0 -384
- oscura/export/wavedrom.py +0 -430
- oscura/exporters/__init__.py +0 -94
- oscura/exporters/csv.py +0 -303
- oscura/exporters/exporters.py +0 -44
- oscura/exporters/hdf5.py +0 -217
- oscura/exporters/html_export.py +0 -701
- oscura/exporters/json_export.py +0 -338
- oscura/exporters/markdown_export.py +0 -367
- oscura/exporters/matlab_export.py +0 -354
- oscura/exporters/npz_export.py +0 -219
- oscura/exporters/spice_export.py +0 -210
- oscura/exporters/vintage_logic_csv.py +0 -247
- oscura/reporting/vintage_logic_report.py +0 -523
- oscura/search/context.py +0 -149
- oscura/session/__init__.py +0 -34
- oscura/session/annotations.py +0 -289
- oscura/session/history.py +0 -313
- oscura/session/session.py +0 -520
- oscura/visualization/digital_advanced.py +0 -718
- oscura/visualization/figure_manager.py +0 -156
- oscura/workflow/__init__.py +0 -13
- oscura-0.5.0.dist-info/METADATA +0 -407
- oscura-0.5.0.dist-info/RECORD +0 -486
- /oscura/core/{config.py → config/legacy.py} +0 -0
- /oscura/{extensibility → core/extensibility}/__init__.py +0 -0
- /oscura/{extensibility → core/extensibility}/registry.py +0 -0
- /oscura/{plugins → core/plugins}/isolation.py +0 -0
- /oscura/{builders → utils/builders}/signal_builder.py +0 -0
- /oscura/{optimization → utils/optimization}/parallel.py +0 -0
- /oscura/{pipeline → utils/pipeline}/__init__.py +0 -0
- /oscura/{streaming → utils/streaming}/__init__.py +0 -0
- {oscura-0.5.0.dist-info → oscura-0.6.0.dist-info}/WHEEL +0 -0
- {oscura-0.5.0.dist-info → oscura-0.6.0.dist-info}/entry_points.txt +0 -0
- {oscura-0.5.0.dist-info → oscura-0.6.0.dist-info}/licenses/LICENSE +0 -0
|
@@ -0,0 +1,1086 @@
|
|
|
1
|
+
"""Comprehensive timing analysis and clock recovery for signal synchronization.
|
|
2
|
+
|
|
3
|
+
This module provides advanced timing analysis capabilities including:
|
|
4
|
+
- Clock recovery using multiple methods (ZCD, histogram, autocorrelation, PLL, FFT)
|
|
5
|
+
- Baud rate detection for serial protocols
|
|
6
|
+
- Timing jitter and drift analysis
|
|
7
|
+
- Phase-locked loop (PLL) simulation
|
|
8
|
+
- Signal-to-noise ratio (SNR) calculation
|
|
9
|
+
- Eye diagram generation
|
|
10
|
+
|
|
11
|
+
Example:
|
|
12
|
+
>>> import numpy as np
|
|
13
|
+
>>> analyzer = TimingAnalyzer(method="autocorrelation")
|
|
14
|
+
>>> signal = np.sin(2 * np.pi * 1e6 * np.linspace(0, 1e-3, 100000))
|
|
15
|
+
>>> result = analyzer.recover_clock(signal, sample_rate=100e6)
|
|
16
|
+
>>> print(f"Clock rate: {result.detected_clock_rate / 1e6:.3f} MHz")
|
|
17
|
+
Clock rate: 1.000 MHz
|
|
18
|
+
>>> print(f"Confidence: {result.confidence:.2f}")
|
|
19
|
+
Confidence: 0.95
|
|
20
|
+
|
|
21
|
+
References:
|
|
22
|
+
- Digital Communications by John G. Proakis
|
|
23
|
+
- Clock Recovery in High-Speed Optical Fiber Systems (IEEE)
|
|
24
|
+
- Phase-Locked Loop Design Handbook by Dan H. Wolaver
|
|
25
|
+
"""
|
|
26
|
+
|
|
27
|
+
from __future__ import annotations
|
|
28
|
+
|
|
29
|
+
from dataclasses import dataclass, field
|
|
30
|
+
from pathlib import Path
|
|
31
|
+
from typing import TYPE_CHECKING, Any, ClassVar
|
|
32
|
+
|
|
33
|
+
import numpy as np
|
|
34
|
+
|
|
35
|
+
if TYPE_CHECKING:
|
|
36
|
+
from numpy.typing import NDArray
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
@dataclass
|
|
40
|
+
class TimingAnalysisResult:
|
|
41
|
+
"""Timing analysis and clock recovery result.
|
|
42
|
+
|
|
43
|
+
Attributes:
|
|
44
|
+
detected_clock_rate: Recovered clock frequency in Hz.
|
|
45
|
+
confidence: Confidence score from 0.0 to 1.0.
|
|
46
|
+
jitter_rms: RMS jitter in seconds.
|
|
47
|
+
drift_rate: Clock drift in parts per million (ppm).
|
|
48
|
+
snr_db: Signal-to-noise ratio in decibels.
|
|
49
|
+
method: Method used ("zcd", "histogram", "autocorrelation", "pll", "fft").
|
|
50
|
+
statistics: Additional method-specific statistics.
|
|
51
|
+
|
|
52
|
+
Example:
|
|
53
|
+
>>> result = TimingAnalysisResult(
|
|
54
|
+
... detected_clock_rate=10e6,
|
|
55
|
+
... confidence=0.95,
|
|
56
|
+
... jitter_rms=1e-12,
|
|
57
|
+
... drift_rate=2.5,
|
|
58
|
+
... snr_db=45.0,
|
|
59
|
+
... method="autocorrelation"
|
|
60
|
+
... )
|
|
61
|
+
>>> print(f"Clock: {result.detected_clock_rate / 1e6:.1f} MHz")
|
|
62
|
+
Clock: 10.0 MHz
|
|
63
|
+
"""
|
|
64
|
+
|
|
65
|
+
detected_clock_rate: float
|
|
66
|
+
confidence: float
|
|
67
|
+
jitter_rms: float
|
|
68
|
+
drift_rate: float
|
|
69
|
+
snr_db: float
|
|
70
|
+
method: str
|
|
71
|
+
statistics: dict[str, float] = field(default_factory=dict)
|
|
72
|
+
|
|
73
|
+
|
|
74
|
+
class TimingAnalyzer:
|
|
75
|
+
"""Timing analysis and clock recovery engine.
|
|
76
|
+
|
|
77
|
+
Provides comprehensive timing analysis including multiple clock recovery
|
|
78
|
+
methods, jitter/drift measurement, and signal quality assessment.
|
|
79
|
+
|
|
80
|
+
Attributes:
|
|
81
|
+
method: Clock recovery method to use.
|
|
82
|
+
METHODS: Available clock recovery methods.
|
|
83
|
+
|
|
84
|
+
Example:
|
|
85
|
+
>>> analyzer = TimingAnalyzer(method="autocorrelation")
|
|
86
|
+
>>> signal = np.random.randn(10000)
|
|
87
|
+
>>> result = analyzer.recover_clock(signal, sample_rate=1e6)
|
|
88
|
+
>>> print(f"Method: {result.method}")
|
|
89
|
+
Method: autocorrelation
|
|
90
|
+
"""
|
|
91
|
+
|
|
92
|
+
METHODS: ClassVar[list[str]] = ["zcd", "histogram", "autocorrelation", "pll", "fft"]
|
|
93
|
+
|
|
94
|
+
def __init__(self, method: str = "autocorrelation") -> None:
|
|
95
|
+
"""Initialize timing analyzer.
|
|
96
|
+
|
|
97
|
+
Args:
|
|
98
|
+
method: Clock recovery method ("zcd", "histogram", "autocorrelation",
|
|
99
|
+
"pll", or "fft"). Default is "autocorrelation" for best general-
|
|
100
|
+
purpose performance.
|
|
101
|
+
|
|
102
|
+
Raises:
|
|
103
|
+
ValueError: If method is not in METHODS.
|
|
104
|
+
|
|
105
|
+
Example:
|
|
106
|
+
>>> analyzer = TimingAnalyzer(method="fft")
|
|
107
|
+
>>> analyzer.method
|
|
108
|
+
'fft'
|
|
109
|
+
"""
|
|
110
|
+
if method not in self.METHODS:
|
|
111
|
+
raise ValueError(f"Method must be one of {self.METHODS}, got '{method}'")
|
|
112
|
+
self.method = method
|
|
113
|
+
|
|
114
|
+
def recover_clock(
|
|
115
|
+
self,
|
|
116
|
+
signal: NDArray[np.floating[Any]],
|
|
117
|
+
sample_rate: float,
|
|
118
|
+
initial_estimate: float | None = None,
|
|
119
|
+
) -> TimingAnalysisResult:
|
|
120
|
+
"""Recover clock frequency from signal.
|
|
121
|
+
|
|
122
|
+
Uses the configured method to recover the clock rate from a digital
|
|
123
|
+
or analog signal. Automatically detects periodicity and estimates
|
|
124
|
+
the dominant frequency.
|
|
125
|
+
|
|
126
|
+
Args:
|
|
127
|
+
signal: Input signal array.
|
|
128
|
+
sample_rate: Sampling rate in Hz.
|
|
129
|
+
initial_estimate: Initial frequency estimate in Hz (optional,
|
|
130
|
+
required for PLL method).
|
|
131
|
+
|
|
132
|
+
Returns:
|
|
133
|
+
TimingAnalysisResult with recovered clock rate and statistics.
|
|
134
|
+
|
|
135
|
+
Example:
|
|
136
|
+
>>> signal = np.tile([0, 0, 1, 1], 1000)
|
|
137
|
+
>>> analyzer = TimingAnalyzer(method="autocorrelation")
|
|
138
|
+
>>> result = analyzer.recover_clock(signal, sample_rate=1e6)
|
|
139
|
+
>>> result.detected_clock_rate > 0
|
|
140
|
+
True
|
|
141
|
+
|
|
142
|
+
References:
|
|
143
|
+
IEEE 1241-2010: Standard for Terminology and Test Methods
|
|
144
|
+
"""
|
|
145
|
+
if self.method == "zcd":
|
|
146
|
+
return self._zero_crossing_detection(signal, sample_rate)
|
|
147
|
+
elif self.method == "histogram":
|
|
148
|
+
return self._histogram_method(signal, sample_rate)
|
|
149
|
+
elif self.method == "autocorrelation":
|
|
150
|
+
return self._autocorrelation_method(signal, sample_rate)
|
|
151
|
+
elif self.method == "pll":
|
|
152
|
+
if initial_estimate is None:
|
|
153
|
+
raise ValueError("PLL method requires initial_estimate parameter")
|
|
154
|
+
return self._pll_simulation(signal, sample_rate, initial_estimate)
|
|
155
|
+
elif self.method == "fft":
|
|
156
|
+
return self._fft_method(signal, sample_rate)
|
|
157
|
+
else:
|
|
158
|
+
raise ValueError(f"Unknown method: {self.method}")
|
|
159
|
+
|
|
160
|
+
def detect_baud_rate(
|
|
161
|
+
self,
|
|
162
|
+
signal: NDArray[np.floating[Any]],
|
|
163
|
+
sample_rate: float,
|
|
164
|
+
min_baud: float = 300,
|
|
165
|
+
max_baud: float = 115200,
|
|
166
|
+
) -> TimingAnalysisResult:
|
|
167
|
+
"""Detect serial baud rate from signal.
|
|
168
|
+
|
|
169
|
+
Analyzes signal to determine the most likely baud rate for
|
|
170
|
+
serial communication protocols (UART, RS-232, etc.).
|
|
171
|
+
|
|
172
|
+
Args:
|
|
173
|
+
signal: Input serial signal array.
|
|
174
|
+
sample_rate: Sampling rate in Hz.
|
|
175
|
+
min_baud: Minimum baud rate to consider (default 300).
|
|
176
|
+
max_baud: Maximum baud rate to consider (default 115200).
|
|
177
|
+
|
|
178
|
+
Returns:
|
|
179
|
+
TimingAnalysisResult with detected baud rate.
|
|
180
|
+
|
|
181
|
+
Example:
|
|
182
|
+
>>> # 9600 baud signal (104.17 µs bit period)
|
|
183
|
+
>>> bit_period = 1 / 9600
|
|
184
|
+
>>> signal = np.tile([0]*10 + [1]*10, 500)
|
|
185
|
+
>>> analyzer = TimingAnalyzer(method="autocorrelation")
|
|
186
|
+
>>> result = analyzer.detect_baud_rate(signal, sample_rate=1e6)
|
|
187
|
+
>>> abs(result.detected_clock_rate - 9600) < 1000
|
|
188
|
+
True
|
|
189
|
+
|
|
190
|
+
References:
|
|
191
|
+
EIA-232: Serial data communication standard
|
|
192
|
+
"""
|
|
193
|
+
# Use autocorrelation to find bit period
|
|
194
|
+
temp_analyzer = TimingAnalyzer(method="autocorrelation")
|
|
195
|
+
result = temp_analyzer.recover_clock(signal, sample_rate)
|
|
196
|
+
|
|
197
|
+
# Filter to common baud rates
|
|
198
|
+
detected_rate = result.detected_clock_rate
|
|
199
|
+
|
|
200
|
+
# Clamp to valid range
|
|
201
|
+
if detected_rate < min_baud:
|
|
202
|
+
detected_rate = min_baud
|
|
203
|
+
result.confidence *= 0.5
|
|
204
|
+
elif detected_rate > max_baud:
|
|
205
|
+
detected_rate = max_baud
|
|
206
|
+
result.confidence *= 0.5
|
|
207
|
+
|
|
208
|
+
# Find nearest standard baud rate
|
|
209
|
+
standard_bauds = [
|
|
210
|
+
300,
|
|
211
|
+
1200,
|
|
212
|
+
2400,
|
|
213
|
+
4800,
|
|
214
|
+
9600,
|
|
215
|
+
14400,
|
|
216
|
+
19200,
|
|
217
|
+
38400,
|
|
218
|
+
57600,
|
|
219
|
+
115200,
|
|
220
|
+
230400,
|
|
221
|
+
460800,
|
|
222
|
+
921600,
|
|
223
|
+
]
|
|
224
|
+
valid_bauds = [b for b in standard_bauds if min_baud <= b <= max_baud]
|
|
225
|
+
|
|
226
|
+
if valid_bauds:
|
|
227
|
+
nearest_baud = min(valid_bauds, key=lambda b: abs(b - detected_rate))
|
|
228
|
+
# Increase confidence if very close to standard baud
|
|
229
|
+
if abs(nearest_baud - detected_rate) / nearest_baud < 0.05:
|
|
230
|
+
result.confidence = min(1.0, result.confidence * 1.2)
|
|
231
|
+
detected_rate = float(nearest_baud)
|
|
232
|
+
|
|
233
|
+
result.detected_clock_rate = detected_rate
|
|
234
|
+
result.statistics["standard_baud_match"] = (
|
|
235
|
+
abs(detected_rate - nearest_baud) / nearest_baud < 0.05 if valid_bauds else False
|
|
236
|
+
)
|
|
237
|
+
|
|
238
|
+
return result
|
|
239
|
+
|
|
240
|
+
def analyze_jitter(
|
|
241
|
+
self,
|
|
242
|
+
transitions: NDArray[np.floating[Any]],
|
|
243
|
+
nominal_period: float,
|
|
244
|
+
) -> dict[str, Any]:
|
|
245
|
+
"""Analyze timing jitter from edge transitions.
|
|
246
|
+
|
|
247
|
+
Computes jitter statistics including RMS, peak-to-peak, and
|
|
248
|
+
histogram distribution.
|
|
249
|
+
|
|
250
|
+
Args:
|
|
251
|
+
transitions: Array of transition timestamps in seconds.
|
|
252
|
+
nominal_period: Expected nominal period in seconds.
|
|
253
|
+
|
|
254
|
+
Returns:
|
|
255
|
+
Dictionary with jitter statistics:
|
|
256
|
+
- rms: RMS jitter in seconds
|
|
257
|
+
- peak_to_peak: Peak-to-peak jitter in seconds
|
|
258
|
+
- mean_period: Mean measured period in seconds
|
|
259
|
+
- std_period: Standard deviation of period in seconds
|
|
260
|
+
- histogram_bins: Histogram bin edges
|
|
261
|
+
- histogram_counts: Histogram counts
|
|
262
|
+
|
|
263
|
+
Example:
|
|
264
|
+
>>> transitions = np.array([0.0, 1e-6, 2e-6, 3.01e-6, 4e-6])
|
|
265
|
+
>>> analyzer = TimingAnalyzer()
|
|
266
|
+
>>> stats = analyzer.analyze_jitter(transitions, nominal_period=1e-6)
|
|
267
|
+
>>> stats['rms'] >= 0
|
|
268
|
+
True
|
|
269
|
+
>>> 'peak_to_peak' in stats
|
|
270
|
+
True
|
|
271
|
+
|
|
272
|
+
References:
|
|
273
|
+
IEEE 2414-2020: Standard for Jitter and Phase Noise
|
|
274
|
+
"""
|
|
275
|
+
if len(transitions) < 2:
|
|
276
|
+
return {
|
|
277
|
+
"rms": np.nan,
|
|
278
|
+
"peak_to_peak": np.nan,
|
|
279
|
+
"mean_period": np.nan,
|
|
280
|
+
"std_period": np.nan,
|
|
281
|
+
"histogram_bins": np.array([]),
|
|
282
|
+
"histogram_counts": np.array([]),
|
|
283
|
+
}
|
|
284
|
+
|
|
285
|
+
# Calculate periods
|
|
286
|
+
periods = np.diff(transitions)
|
|
287
|
+
|
|
288
|
+
if len(periods) == 0:
|
|
289
|
+
return {
|
|
290
|
+
"rms": np.nan,
|
|
291
|
+
"peak_to_peak": np.nan,
|
|
292
|
+
"mean_period": np.nan,
|
|
293
|
+
"std_period": np.nan,
|
|
294
|
+
"histogram_bins": np.array([]),
|
|
295
|
+
"histogram_counts": np.array([]),
|
|
296
|
+
}
|
|
297
|
+
|
|
298
|
+
# Jitter is deviation from nominal period - handle NaN values
|
|
299
|
+
with np.errstate(invalid="ignore"):
|
|
300
|
+
deviations = periods - nominal_period
|
|
301
|
+
|
|
302
|
+
rms_jitter = float(np.std(deviations))
|
|
303
|
+
pp_jitter = float(np.max(periods) - np.min(periods))
|
|
304
|
+
mean_period = float(np.mean(periods))
|
|
305
|
+
std_period = float(np.std(periods))
|
|
306
|
+
|
|
307
|
+
# Generate histogram
|
|
308
|
+
if len(deviations) >= 10:
|
|
309
|
+
counts, bins = np.histogram(deviations, bins=50)
|
|
310
|
+
else:
|
|
311
|
+
counts = np.array([])
|
|
312
|
+
bins = np.array([])
|
|
313
|
+
|
|
314
|
+
return {
|
|
315
|
+
"rms": rms_jitter,
|
|
316
|
+
"peak_to_peak": pp_jitter,
|
|
317
|
+
"mean_period": mean_period,
|
|
318
|
+
"std_period": std_period,
|
|
319
|
+
"histogram_bins": bins,
|
|
320
|
+
"histogram_counts": counts,
|
|
321
|
+
}
|
|
322
|
+
|
|
323
|
+
def analyze_drift(
|
|
324
|
+
self,
|
|
325
|
+
transitions: NDArray[np.floating[Any]],
|
|
326
|
+
window_size: int = 1000,
|
|
327
|
+
) -> float:
|
|
328
|
+
"""Analyze clock drift over time.
|
|
329
|
+
|
|
330
|
+
Measures the rate of change in clock frequency over the observation
|
|
331
|
+
window, expressed in parts per million (ppm).
|
|
332
|
+
|
|
333
|
+
Args:
|
|
334
|
+
transitions: Array of transition timestamps in seconds.
|
|
335
|
+
window_size: Number of transitions to use for drift calculation.
|
|
336
|
+
|
|
337
|
+
Returns:
|
|
338
|
+
Clock drift in ppm (parts per million).
|
|
339
|
+
|
|
340
|
+
Example:
|
|
341
|
+
>>> # Perfect clock (no drift)
|
|
342
|
+
>>> transitions = np.arange(0, 1000) * 1e-6
|
|
343
|
+
>>> analyzer = TimingAnalyzer()
|
|
344
|
+
>>> drift = analyzer.analyze_drift(transitions, window_size=100)
|
|
345
|
+
>>> abs(drift) < 10 # Very low drift
|
|
346
|
+
True
|
|
347
|
+
|
|
348
|
+
References:
|
|
349
|
+
IEEE 1588: Precision Time Protocol (PTP)
|
|
350
|
+
"""
|
|
351
|
+
if len(transitions) < window_size:
|
|
352
|
+
window_size = len(transitions)
|
|
353
|
+
|
|
354
|
+
if window_size < 10:
|
|
355
|
+
return np.nan
|
|
356
|
+
|
|
357
|
+
# Split into windows and calculate average period in each
|
|
358
|
+
n_windows = max(2, window_size // 100)
|
|
359
|
+
window_length = len(transitions) // n_windows
|
|
360
|
+
|
|
361
|
+
window_frequencies: list[float] = []
|
|
362
|
+
|
|
363
|
+
for i in range(n_windows):
|
|
364
|
+
start_idx = i * window_length
|
|
365
|
+
end_idx = min((i + 1) * window_length, len(transitions))
|
|
366
|
+
|
|
367
|
+
if end_idx - start_idx < 2:
|
|
368
|
+
continue
|
|
369
|
+
|
|
370
|
+
window_transitions = transitions[start_idx:end_idx]
|
|
371
|
+
periods = np.diff(window_transitions)
|
|
372
|
+
|
|
373
|
+
if len(periods) > 0:
|
|
374
|
+
mean_period = np.mean(periods)
|
|
375
|
+
if mean_period > 0:
|
|
376
|
+
window_frequencies.append(1.0 / mean_period)
|
|
377
|
+
|
|
378
|
+
if len(window_frequencies) < 2:
|
|
379
|
+
return 0.0
|
|
380
|
+
|
|
381
|
+
# Linear fit to frequency vs time
|
|
382
|
+
time_points = np.linspace(0, len(transitions), len(window_frequencies))
|
|
383
|
+
coeffs = np.polyfit(time_points, window_frequencies, 1)
|
|
384
|
+
slope = coeffs[0] # Hz per sample
|
|
385
|
+
mean_freq = np.mean(window_frequencies)
|
|
386
|
+
|
|
387
|
+
if mean_freq == 0:
|
|
388
|
+
return 0.0
|
|
389
|
+
|
|
390
|
+
# Convert to ppm
|
|
391
|
+
drift_ppm = (slope * len(transitions)) / mean_freq * 1e6
|
|
392
|
+
|
|
393
|
+
return float(drift_ppm)
|
|
394
|
+
|
|
395
|
+
def calculate_snr(
|
|
396
|
+
self,
|
|
397
|
+
signal: NDArray[np.floating[Any]],
|
|
398
|
+
signal_freq: float,
|
|
399
|
+
sample_rate: float,
|
|
400
|
+
) -> float:
|
|
401
|
+
"""Calculate signal-to-noise ratio in dB.
|
|
402
|
+
|
|
403
|
+
Estimates SNR by separating signal power (at fundamental frequency)
|
|
404
|
+
from noise power (all other frequency components).
|
|
405
|
+
|
|
406
|
+
Args:
|
|
407
|
+
signal: Input signal array.
|
|
408
|
+
signal_freq: Expected signal frequency in Hz.
|
|
409
|
+
sample_rate: Sampling rate in Hz.
|
|
410
|
+
|
|
411
|
+
Returns:
|
|
412
|
+
SNR in decibels (dB).
|
|
413
|
+
|
|
414
|
+
Example:
|
|
415
|
+
>>> signal = np.sin(2 * np.pi * 1000 * np.linspace(0, 0.1, 10000))
|
|
416
|
+
>>> analyzer = TimingAnalyzer()
|
|
417
|
+
>>> snr = analyzer.calculate_snr(signal, signal_freq=1000, sample_rate=100e3)
|
|
418
|
+
>>> snr > 40 # Clean signal should have high SNR
|
|
419
|
+
True
|
|
420
|
+
|
|
421
|
+
References:
|
|
422
|
+
IEEE 1057: Standard for Digitizing Waveform Recorders
|
|
423
|
+
"""
|
|
424
|
+
if len(signal) < 64:
|
|
425
|
+
return np.nan
|
|
426
|
+
|
|
427
|
+
# Remove DC component - handle inf/nan
|
|
428
|
+
with np.errstate(invalid="ignore"):
|
|
429
|
+
signal_clean = np.where(np.isfinite(signal), signal, 0.0)
|
|
430
|
+
signal_centered = signal_clean - np.mean(signal_clean)
|
|
431
|
+
|
|
432
|
+
# Compute FFT
|
|
433
|
+
n = len(signal_centered)
|
|
434
|
+
nfft = int(2 ** np.ceil(np.log2(n)))
|
|
435
|
+
spectrum = np.fft.rfft(signal_centered, n=nfft)
|
|
436
|
+
freqs = np.fft.rfftfreq(nfft, d=1.0 / sample_rate)
|
|
437
|
+
magnitude = np.abs(spectrum)
|
|
438
|
+
|
|
439
|
+
# Find bin closest to signal frequency
|
|
440
|
+
signal_bin_idx = np.argmin(np.abs(freqs - signal_freq))
|
|
441
|
+
|
|
442
|
+
# Signal power is in the signal bin and immediate neighbors
|
|
443
|
+
signal_bins = [signal_bin_idx]
|
|
444
|
+
if signal_bin_idx > 0:
|
|
445
|
+
signal_bins.append(signal_bin_idx - 1)
|
|
446
|
+
if signal_bin_idx < len(magnitude) - 1:
|
|
447
|
+
signal_bins.append(signal_bin_idx + 1)
|
|
448
|
+
|
|
449
|
+
signal_power = float(np.sum(magnitude[signal_bins] ** 2))
|
|
450
|
+
|
|
451
|
+
# Noise power is everything else (excluding DC at bin 0)
|
|
452
|
+
noise_mask = np.ones(len(magnitude), dtype=bool)
|
|
453
|
+
noise_mask[0] = False # Exclude DC
|
|
454
|
+
for idx in signal_bins:
|
|
455
|
+
noise_mask[idx] = False
|
|
456
|
+
|
|
457
|
+
noise_power = float(np.sum(magnitude[noise_mask] ** 2))
|
|
458
|
+
|
|
459
|
+
if noise_power == 0 or signal_power == 0:
|
|
460
|
+
return np.nan
|
|
461
|
+
|
|
462
|
+
snr = 10 * np.log10(signal_power / noise_power)
|
|
463
|
+
|
|
464
|
+
return float(snr)
|
|
465
|
+
|
|
466
|
+
def generate_eye_diagram(
|
|
467
|
+
self,
|
|
468
|
+
signal: NDArray[np.floating[Any]],
|
|
469
|
+
symbol_rate: float,
|
|
470
|
+
sample_rate: float,
|
|
471
|
+
output_path: Path,
|
|
472
|
+
) -> None:
|
|
473
|
+
"""Generate eye diagram for signal quality assessment.
|
|
474
|
+
|
|
475
|
+
Creates an eye diagram by overlaying multiple symbol periods.
|
|
476
|
+
A wide, open eye indicates good signal quality; a closed eye
|
|
477
|
+
indicates high jitter or noise.
|
|
478
|
+
|
|
479
|
+
Args:
|
|
480
|
+
signal: Input signal array.
|
|
481
|
+
symbol_rate: Symbol rate in Hz.
|
|
482
|
+
sample_rate: Sampling rate in Hz.
|
|
483
|
+
output_path: Path to save eye diagram image.
|
|
484
|
+
|
|
485
|
+
Example:
|
|
486
|
+
>>> import tempfile
|
|
487
|
+
>>> signal = np.sin(2 * np.pi * 1e6 * np.linspace(0, 1e-3, 100000))
|
|
488
|
+
>>> analyzer = TimingAnalyzer()
|
|
489
|
+
>>> with tempfile.NamedTemporaryFile(suffix='.png', delete=False) as f:
|
|
490
|
+
... analyzer.generate_eye_diagram(signal, 1e6, 100e6, Path(f.name))
|
|
491
|
+
|
|
492
|
+
References:
|
|
493
|
+
- Telecommunications Measurement Analysis (Tektronix)
|
|
494
|
+
- IEEE 802.3: Ethernet eye diagram templates
|
|
495
|
+
"""
|
|
496
|
+
try:
|
|
497
|
+
import matplotlib
|
|
498
|
+
|
|
499
|
+
matplotlib.use("Agg")
|
|
500
|
+
import matplotlib.pyplot as plt
|
|
501
|
+
except ImportError as e:
|
|
502
|
+
raise ImportError(
|
|
503
|
+
"matplotlib required for eye diagram generation. "
|
|
504
|
+
"Install with: pip install matplotlib"
|
|
505
|
+
) from e
|
|
506
|
+
|
|
507
|
+
# Calculate samples per symbol
|
|
508
|
+
samples_per_symbol = int(sample_rate / symbol_rate)
|
|
509
|
+
|
|
510
|
+
if samples_per_symbol < 4:
|
|
511
|
+
raise ValueError(
|
|
512
|
+
f"Insufficient samples per symbol: {samples_per_symbol}. "
|
|
513
|
+
f"Need at least 4 samples per symbol for eye diagram."
|
|
514
|
+
)
|
|
515
|
+
|
|
516
|
+
# Extract symbol periods (2 symbols per trace for eye diagram)
|
|
517
|
+
num_symbols = len(signal) // samples_per_symbol
|
|
518
|
+
eye_traces: list[NDArray[np.floating[Any]]] = []
|
|
519
|
+
|
|
520
|
+
for i in range(num_symbols - 1):
|
|
521
|
+
start = i * samples_per_symbol
|
|
522
|
+
end = start + 2 * samples_per_symbol # 2 symbols for eye
|
|
523
|
+
if end <= len(signal):
|
|
524
|
+
eye_traces.append(signal[start:end])
|
|
525
|
+
|
|
526
|
+
if len(eye_traces) == 0:
|
|
527
|
+
raise ValueError("Insufficient data for eye diagram")
|
|
528
|
+
|
|
529
|
+
# Plot overlaid traces
|
|
530
|
+
plt.figure(figsize=(10, 6))
|
|
531
|
+
time_axis = np.linspace(0, 2, 2 * samples_per_symbol)
|
|
532
|
+
|
|
533
|
+
for trace in eye_traces:
|
|
534
|
+
plt.plot(time_axis, trace, alpha=0.1, color="blue", linewidth=0.5)
|
|
535
|
+
|
|
536
|
+
plt.xlabel("Time (symbol periods)")
|
|
537
|
+
plt.ylabel("Amplitude")
|
|
538
|
+
plt.title(f"Eye Diagram (Symbol Rate: {symbol_rate / 1e3:.1f} kHz)")
|
|
539
|
+
plt.grid(True, alpha=0.3)
|
|
540
|
+
plt.xlim(0, 2)
|
|
541
|
+
plt.tight_layout()
|
|
542
|
+
|
|
543
|
+
# Save to file
|
|
544
|
+
plt.savefig(output_path, dpi=300, bbox_inches="tight")
|
|
545
|
+
plt.close()
|
|
546
|
+
|
|
547
|
+
def export_statistics(
|
|
548
|
+
self,
|
|
549
|
+
result: TimingAnalysisResult,
|
|
550
|
+
output_path: Path,
|
|
551
|
+
) -> None:
|
|
552
|
+
"""Export timing statistics as JSON.
|
|
553
|
+
|
|
554
|
+
Args:
|
|
555
|
+
result: TimingAnalysisResult to export.
|
|
556
|
+
output_path: Path to save JSON file.
|
|
557
|
+
|
|
558
|
+
Example:
|
|
559
|
+
>>> import tempfile
|
|
560
|
+
>>> result = TimingAnalysisResult(
|
|
561
|
+
... detected_clock_rate=10e6,
|
|
562
|
+
... confidence=0.95,
|
|
563
|
+
... jitter_rms=1e-12,
|
|
564
|
+
... drift_rate=2.5,
|
|
565
|
+
... snr_db=45.0,
|
|
566
|
+
... method="autocorrelation"
|
|
567
|
+
... )
|
|
568
|
+
>>> analyzer = TimingAnalyzer()
|
|
569
|
+
>>> with tempfile.NamedTemporaryFile(suffix='.json', delete=False) as f:
|
|
570
|
+
... analyzer.export_statistics(result, Path(f.name))
|
|
571
|
+
"""
|
|
572
|
+
import json
|
|
573
|
+
|
|
574
|
+
data = {
|
|
575
|
+
"detected_clock_rate_hz": result.detected_clock_rate,
|
|
576
|
+
"detected_clock_rate_mhz": result.detected_clock_rate / 1e6,
|
|
577
|
+
"confidence": result.confidence,
|
|
578
|
+
"jitter_rms_seconds": result.jitter_rms,
|
|
579
|
+
"jitter_rms_picoseconds": result.jitter_rms * 1e12,
|
|
580
|
+
"drift_rate_ppm": result.drift_rate,
|
|
581
|
+
"snr_db": result.snr_db,
|
|
582
|
+
"method": result.method,
|
|
583
|
+
"statistics": result.statistics,
|
|
584
|
+
}
|
|
585
|
+
|
|
586
|
+
with open(output_path, "w") as f:
|
|
587
|
+
json.dump(data, f, indent=2)
|
|
588
|
+
|
|
589
|
+
def _zero_crossing_detection(
|
|
590
|
+
self,
|
|
591
|
+
signal: NDArray[np.floating[Any]],
|
|
592
|
+
sample_rate: float,
|
|
593
|
+
) -> TimingAnalysisResult:
|
|
594
|
+
"""Zero-crossing based clock recovery.
|
|
595
|
+
|
|
596
|
+
Detects zero crossings (rising edges) and calculates the most
|
|
597
|
+
common interval between crossings to determine clock period.
|
|
598
|
+
|
|
599
|
+
Args:
|
|
600
|
+
signal: Input signal array.
|
|
601
|
+
sample_rate: Sampling rate in Hz.
|
|
602
|
+
|
|
603
|
+
Returns:
|
|
604
|
+
TimingAnalysisResult with recovered clock rate.
|
|
605
|
+
"""
|
|
606
|
+
# Normalize signal
|
|
607
|
+
signal_norm = signal - np.mean(signal)
|
|
608
|
+
|
|
609
|
+
# Find zero crossings (rising edges)
|
|
610
|
+
crossings: list[float] = []
|
|
611
|
+
for i in range(len(signal_norm) - 1):
|
|
612
|
+
if signal_norm[i] <= 0 and signal_norm[i + 1] > 0:
|
|
613
|
+
# Linear interpolation to find exact crossing
|
|
614
|
+
if abs(signal_norm[i + 1] - signal_norm[i]) > 1e-12:
|
|
615
|
+
frac = abs(signal_norm[i]) / (abs(signal_norm[i]) + signal_norm[i + 1])
|
|
616
|
+
crossing_idx = i + frac
|
|
617
|
+
crossings.append(crossing_idx / sample_rate)
|
|
618
|
+
|
|
619
|
+
if len(crossings) < 2:
|
|
620
|
+
return TimingAnalysisResult(
|
|
621
|
+
detected_clock_rate=0.0,
|
|
622
|
+
confidence=0.0,
|
|
623
|
+
jitter_rms=0.0,
|
|
624
|
+
drift_rate=0.0,
|
|
625
|
+
snr_db=0.0,
|
|
626
|
+
method="zcd",
|
|
627
|
+
)
|
|
628
|
+
|
|
629
|
+
# Calculate intervals between crossings
|
|
630
|
+
intervals = np.diff(crossings)
|
|
631
|
+
|
|
632
|
+
if len(intervals) == 0:
|
|
633
|
+
return TimingAnalysisResult(
|
|
634
|
+
detected_clock_rate=0.0,
|
|
635
|
+
confidence=0.0,
|
|
636
|
+
jitter_rms=0.0,
|
|
637
|
+
drift_rate=0.0,
|
|
638
|
+
snr_db=0.0,
|
|
639
|
+
method="zcd",
|
|
640
|
+
)
|
|
641
|
+
|
|
642
|
+
# Find most common interval (mode) using histogram
|
|
643
|
+
hist, edges = np.histogram(intervals, bins=min(100, len(intervals)))
|
|
644
|
+
mode_idx = int(np.argmax(hist))
|
|
645
|
+
mode_interval = (edges[mode_idx] + edges[mode_idx + 1]) / 2
|
|
646
|
+
|
|
647
|
+
# Detected clock rate
|
|
648
|
+
clock_rate = 1.0 / mode_interval if mode_interval > 0 else 0.0
|
|
649
|
+
|
|
650
|
+
# Calculate jitter (RMS of deviations from mode interval)
|
|
651
|
+
jitter_rms = float(np.std(intervals - mode_interval))
|
|
652
|
+
|
|
653
|
+
# Confidence based on histogram peak sharpness
|
|
654
|
+
confidence = float(hist[mode_idx] / len(intervals))
|
|
655
|
+
|
|
656
|
+
# Calculate drift
|
|
657
|
+
drift_rate = self.analyze_drift(np.array(crossings))
|
|
658
|
+
|
|
659
|
+
# Calculate SNR
|
|
660
|
+
snr_db = self.calculate_snr(signal, clock_rate, sample_rate)
|
|
661
|
+
|
|
662
|
+
return TimingAnalysisResult(
|
|
663
|
+
detected_clock_rate=float(clock_rate),
|
|
664
|
+
confidence=confidence,
|
|
665
|
+
jitter_rms=jitter_rms,
|
|
666
|
+
drift_rate=drift_rate,
|
|
667
|
+
snr_db=snr_db,
|
|
668
|
+
method="zcd",
|
|
669
|
+
statistics={"num_crossings": len(crossings), "mode_interval": mode_interval},
|
|
670
|
+
)
|
|
671
|
+
|
|
672
|
+
def _histogram_method(
|
|
673
|
+
self,
|
|
674
|
+
signal: NDArray[np.floating[Any]],
|
|
675
|
+
sample_rate: float,
|
|
676
|
+
) -> TimingAnalysisResult:
|
|
677
|
+
"""Histogram-based clock recovery.
|
|
678
|
+
|
|
679
|
+
Uses histogram of signal values to detect logic levels,
|
|
680
|
+
then finds transitions and computes intervals.
|
|
681
|
+
|
|
682
|
+
Args:
|
|
683
|
+
signal: Input signal array.
|
|
684
|
+
sample_rate: Sampling rate in Hz.
|
|
685
|
+
|
|
686
|
+
Returns:
|
|
687
|
+
TimingAnalysisResult with recovered clock rate.
|
|
688
|
+
"""
|
|
689
|
+
# Find logic levels using histogram
|
|
690
|
+
hist, bin_edges = np.histogram(signal, bins=100)
|
|
691
|
+
bin_centers = (bin_edges[:-1] + bin_edges[1:]) / 2
|
|
692
|
+
|
|
693
|
+
# Find peaks in histogram (logic levels)
|
|
694
|
+
mid_idx = len(hist) // 2
|
|
695
|
+
low_peak_idx = int(np.argmax(hist[:mid_idx]))
|
|
696
|
+
high_peak_idx = mid_idx + int(np.argmax(hist[mid_idx:]))
|
|
697
|
+
|
|
698
|
+
low_level = bin_centers[low_peak_idx]
|
|
699
|
+
high_level = bin_centers[high_peak_idx]
|
|
700
|
+
|
|
701
|
+
# Threshold is midpoint
|
|
702
|
+
threshold = (low_level + high_level) / 2
|
|
703
|
+
|
|
704
|
+
# Find transitions (rising or falling edges)
|
|
705
|
+
transitions: list[float] = []
|
|
706
|
+
for i in range(len(signal) - 1):
|
|
707
|
+
if (signal[i] < threshold <= signal[i + 1]) or (signal[i] >= threshold > signal[i + 1]):
|
|
708
|
+
transitions.append(i / sample_rate)
|
|
709
|
+
|
|
710
|
+
if len(transitions) < 2:
|
|
711
|
+
return TimingAnalysisResult(
|
|
712
|
+
detected_clock_rate=0.0,
|
|
713
|
+
confidence=0.0,
|
|
714
|
+
jitter_rms=0.0,
|
|
715
|
+
drift_rate=0.0,
|
|
716
|
+
snr_db=0.0,
|
|
717
|
+
method="histogram",
|
|
718
|
+
)
|
|
719
|
+
|
|
720
|
+
# Calculate intervals
|
|
721
|
+
intervals = np.diff(transitions)
|
|
722
|
+
|
|
723
|
+
if len(intervals) == 0:
|
|
724
|
+
return TimingAnalysisResult(
|
|
725
|
+
detected_clock_rate=0.0,
|
|
726
|
+
confidence=0.0,
|
|
727
|
+
jitter_rms=0.0,
|
|
728
|
+
drift_rate=0.0,
|
|
729
|
+
snr_db=0.0,
|
|
730
|
+
method="histogram",
|
|
731
|
+
)
|
|
732
|
+
|
|
733
|
+
# Mode interval
|
|
734
|
+
mode_interval = float(np.median(intervals))
|
|
735
|
+
clock_rate = 1.0 / mode_interval if mode_interval > 0 else 0.0
|
|
736
|
+
|
|
737
|
+
jitter_rms = float(np.std(intervals))
|
|
738
|
+
confidence = min(1.0, 1.0 / (1.0 + jitter_rms / mode_interval))
|
|
739
|
+
|
|
740
|
+
drift_rate = self.analyze_drift(np.array(transitions))
|
|
741
|
+
snr_db = self.calculate_snr(signal, clock_rate, sample_rate)
|
|
742
|
+
|
|
743
|
+
return TimingAnalysisResult(
|
|
744
|
+
detected_clock_rate=float(clock_rate),
|
|
745
|
+
confidence=confidence,
|
|
746
|
+
jitter_rms=jitter_rms,
|
|
747
|
+
drift_rate=drift_rate,
|
|
748
|
+
snr_db=snr_db,
|
|
749
|
+
method="histogram",
|
|
750
|
+
statistics={
|
|
751
|
+
"num_transitions": len(transitions),
|
|
752
|
+
"low_level": low_level,
|
|
753
|
+
"high_level": high_level,
|
|
754
|
+
"threshold": threshold,
|
|
755
|
+
},
|
|
756
|
+
)
|
|
757
|
+
|
|
758
|
+
def _autocorrelation_method(
|
|
759
|
+
self,
|
|
760
|
+
signal: NDArray[np.floating[Any]],
|
|
761
|
+
sample_rate: float,
|
|
762
|
+
) -> TimingAnalysisResult:
|
|
763
|
+
"""Autocorrelation-based clock recovery.
|
|
764
|
+
|
|
765
|
+
Autocorrelation shows periodicity in signal. Peak at lag τ
|
|
766
|
+
indicates period τ.
|
|
767
|
+
|
|
768
|
+
Args:
|
|
769
|
+
signal: Input signal array.
|
|
770
|
+
sample_rate: Sampling rate in Hz.
|
|
771
|
+
|
|
772
|
+
Returns:
|
|
773
|
+
TimingAnalysisResult with recovered clock rate.
|
|
774
|
+
"""
|
|
775
|
+
# Handle edge cases
|
|
776
|
+
if len(signal) == 0 or sample_rate <= 0:
|
|
777
|
+
return TimingAnalysisResult(
|
|
778
|
+
detected_clock_rate=0.0,
|
|
779
|
+
confidence=0.0,
|
|
780
|
+
jitter_rms=0.0,
|
|
781
|
+
drift_rate=0.0,
|
|
782
|
+
snr_db=0.0,
|
|
783
|
+
method="autocorrelation",
|
|
784
|
+
)
|
|
785
|
+
|
|
786
|
+
# Calculate autocorrelation - handle inf/nan
|
|
787
|
+
with np.errstate(invalid="ignore"):
|
|
788
|
+
signal_clean = np.where(np.isfinite(signal), signal, 0.0)
|
|
789
|
+
signal_norm = signal_clean - np.mean(signal_clean)
|
|
790
|
+
|
|
791
|
+
# Use FFT-based autocorrelation for efficiency
|
|
792
|
+
n = len(signal_norm)
|
|
793
|
+
fft_signal = np.fft.fft(signal_norm, n=2 * n)
|
|
794
|
+
autocorr = np.fft.ifft(fft_signal * np.conj(fft_signal)).real
|
|
795
|
+
autocorr = autocorr[:n] # Keep positive lags
|
|
796
|
+
|
|
797
|
+
# Normalize
|
|
798
|
+
if autocorr[0] != 0:
|
|
799
|
+
autocorr = autocorr / autocorr[0]
|
|
800
|
+
|
|
801
|
+
# Skip small lags to avoid noise (minimum 1 MHz period = 1 µs)
|
|
802
|
+
min_lag_samples = max(1, int(sample_rate / 1000000))
|
|
803
|
+
|
|
804
|
+
# Find first significant peak (after lag 0)
|
|
805
|
+
peaks: list[tuple[int, float]] = []
|
|
806
|
+
for i in range(min_lag_samples, len(autocorr) - 1):
|
|
807
|
+
if (
|
|
808
|
+
autocorr[i] > autocorr[i - 1]
|
|
809
|
+
and autocorr[i] > autocorr[i + 1]
|
|
810
|
+
and autocorr[i] > 0.3
|
|
811
|
+
): # Significant peak threshold
|
|
812
|
+
peaks.append((i, autocorr[i]))
|
|
813
|
+
|
|
814
|
+
if not peaks:
|
|
815
|
+
return TimingAnalysisResult(
|
|
816
|
+
detected_clock_rate=0.0,
|
|
817
|
+
confidence=0.0,
|
|
818
|
+
jitter_rms=0.0,
|
|
819
|
+
drift_rate=0.0,
|
|
820
|
+
snr_db=0.0,
|
|
821
|
+
method="autocorrelation",
|
|
822
|
+
)
|
|
823
|
+
|
|
824
|
+
# Use first peak
|
|
825
|
+
peak_lag, peak_value = peaks[0]
|
|
826
|
+
period = peak_lag / sample_rate
|
|
827
|
+
clock_rate = 1.0 / period if period > 0 else 0.0
|
|
828
|
+
|
|
829
|
+
# Find secondary peaks for jitter estimation
|
|
830
|
+
secondary_peaks = [p for p in peaks[1:4] if p[1] > 0.2]
|
|
831
|
+
if secondary_peaks:
|
|
832
|
+
# Calculate jitter from peak spread
|
|
833
|
+
all_peak_lags = [peak_lag] + [p[0] for p in secondary_peaks]
|
|
834
|
+
# Expected harmonic positions
|
|
835
|
+
expected_lags = [peak_lag * (i + 1) for i in range(len(all_peak_lags))]
|
|
836
|
+
jitter_samples = np.std(
|
|
837
|
+
[abs(a - e) for a, e in zip(all_peak_lags, expected_lags, strict=True)]
|
|
838
|
+
)
|
|
839
|
+
jitter_rms = float(jitter_samples / sample_rate)
|
|
840
|
+
else:
|
|
841
|
+
jitter_rms = 0.0
|
|
842
|
+
|
|
843
|
+
snr_db = self.calculate_snr(signal, clock_rate, sample_rate)
|
|
844
|
+
|
|
845
|
+
return TimingAnalysisResult(
|
|
846
|
+
detected_clock_rate=float(clock_rate),
|
|
847
|
+
confidence=float(peak_value),
|
|
848
|
+
jitter_rms=jitter_rms,
|
|
849
|
+
drift_rate=0.0, # Autocorrelation doesn't directly measure drift
|
|
850
|
+
snr_db=snr_db,
|
|
851
|
+
method="autocorrelation",
|
|
852
|
+
statistics={
|
|
853
|
+
"peak_lag": peak_lag,
|
|
854
|
+
"peak_value": peak_value,
|
|
855
|
+
"num_peaks": len(peaks),
|
|
856
|
+
},
|
|
857
|
+
)
|
|
858
|
+
|
|
859
|
+
def _pll_simulation(
|
|
860
|
+
self,
|
|
861
|
+
signal: NDArray[np.floating[Any]],
|
|
862
|
+
sample_rate: float,
|
|
863
|
+
initial_freq: float,
|
|
864
|
+
) -> TimingAnalysisResult:
|
|
865
|
+
"""Phase-locked loop simulation for clock recovery.
|
|
866
|
+
|
|
867
|
+
Simulates a PLL to track signal frequency. The PLL adjusts
|
|
868
|
+
its frequency to minimize phase error with the input signal.
|
|
869
|
+
|
|
870
|
+
Args:
|
|
871
|
+
signal: Input signal array.
|
|
872
|
+
sample_rate: Sampling rate in Hz.
|
|
873
|
+
initial_freq: Initial PLL frequency estimate in Hz.
|
|
874
|
+
|
|
875
|
+
Returns:
|
|
876
|
+
TimingAnalysisResult with recovered clock rate.
|
|
877
|
+
|
|
878
|
+
References:
|
|
879
|
+
- Phase-Locked Loop Design Handbook by Dan H. Wolaver
|
|
880
|
+
- Digital Communications by Proakis & Salehi
|
|
881
|
+
"""
|
|
882
|
+
damping_factor = 0.707
|
|
883
|
+
natural_freq = initial_freq * 0.1
|
|
884
|
+
kp = 2 * damping_factor * natural_freq
|
|
885
|
+
ki = natural_freq**2
|
|
886
|
+
|
|
887
|
+
phase_errors, frequencies = _run_pll_loop(signal, sample_rate, initial_freq, kp, ki)
|
|
888
|
+
recovered_freq, confidence = _analyze_pll_convergence(frequencies, initial_freq)
|
|
889
|
+
jitter_rms = float(np.std(phase_errors)) / (2 * np.pi * float(recovered_freq))
|
|
890
|
+
drift_ppm = _compute_pll_drift(frequencies, recovered_freq)
|
|
891
|
+
snr_db = self.calculate_snr(signal, recovered_freq, sample_rate)
|
|
892
|
+
|
|
893
|
+
return TimingAnalysisResult(
|
|
894
|
+
detected_clock_rate=recovered_freq,
|
|
895
|
+
confidence=float(confidence),
|
|
896
|
+
jitter_rms=jitter_rms,
|
|
897
|
+
drift_rate=float(drift_ppm),
|
|
898
|
+
snr_db=snr_db,
|
|
899
|
+
method="pll",
|
|
900
|
+
statistics={
|
|
901
|
+
"initial_freq": initial_freq,
|
|
902
|
+
"final_freq": recovered_freq,
|
|
903
|
+
"damping_factor": damping_factor,
|
|
904
|
+
"natural_freq": natural_freq,
|
|
905
|
+
},
|
|
906
|
+
)
|
|
907
|
+
|
|
908
|
+
def _fft_method(
|
|
909
|
+
self,
|
|
910
|
+
signal: NDArray[np.floating[Any]],
|
|
911
|
+
sample_rate: float,
|
|
912
|
+
) -> TimingAnalysisResult:
|
|
913
|
+
"""FFT-based clock recovery.
|
|
914
|
+
|
|
915
|
+
Uses FFT to find the dominant frequency component in the signal.
|
|
916
|
+
|
|
917
|
+
Args:
|
|
918
|
+
signal: Input signal array.
|
|
919
|
+
sample_rate: Sampling rate in Hz.
|
|
920
|
+
|
|
921
|
+
Returns:
|
|
922
|
+
TimingAnalysisResult with recovered clock rate.
|
|
923
|
+
"""
|
|
924
|
+
if len(signal) < 64:
|
|
925
|
+
return TimingAnalysisResult(
|
|
926
|
+
detected_clock_rate=0.0,
|
|
927
|
+
confidence=0.0,
|
|
928
|
+
jitter_rms=0.0,
|
|
929
|
+
drift_rate=0.0,
|
|
930
|
+
snr_db=0.0,
|
|
931
|
+
method="fft",
|
|
932
|
+
)
|
|
933
|
+
|
|
934
|
+
# Remove DC and compute FFT
|
|
935
|
+
signal_centered = signal - np.mean(signal)
|
|
936
|
+
n = len(signal_centered)
|
|
937
|
+
nfft = int(2 ** np.ceil(np.log2(n)))
|
|
938
|
+
spectrum = np.fft.rfft(signal_centered, n=nfft)
|
|
939
|
+
freqs = np.fft.rfftfreq(nfft, d=1.0 / sample_rate)
|
|
940
|
+
magnitude = np.abs(spectrum)
|
|
941
|
+
|
|
942
|
+
# Exclude DC component
|
|
943
|
+
if len(magnitude) > 1:
|
|
944
|
+
magnitude = magnitude[1:]
|
|
945
|
+
freqs = freqs[1:]
|
|
946
|
+
|
|
947
|
+
if len(magnitude) == 0:
|
|
948
|
+
return TimingAnalysisResult(
|
|
949
|
+
detected_clock_rate=0.0,
|
|
950
|
+
confidence=0.0,
|
|
951
|
+
jitter_rms=0.0,
|
|
952
|
+
drift_rate=0.0,
|
|
953
|
+
snr_db=0.0,
|
|
954
|
+
method="fft",
|
|
955
|
+
)
|
|
956
|
+
|
|
957
|
+
# Find peak
|
|
958
|
+
peak_idx = int(np.argmax(magnitude))
|
|
959
|
+
peak_freq = freqs[peak_idx]
|
|
960
|
+
peak_mag = magnitude[peak_idx]
|
|
961
|
+
|
|
962
|
+
# Parabolic interpolation for more accurate frequency
|
|
963
|
+
if 0 < peak_idx < len(magnitude) - 1:
|
|
964
|
+
alpha = magnitude[peak_idx - 1]
|
|
965
|
+
beta = magnitude[peak_idx]
|
|
966
|
+
gamma = magnitude[peak_idx + 1]
|
|
967
|
+
|
|
968
|
+
if beta > alpha and beta > gamma and abs(alpha - 2 * beta + gamma) > 1e-12:
|
|
969
|
+
freq_resolution = sample_rate / nfft
|
|
970
|
+
delta = 0.5 * (alpha - gamma) / (alpha - 2 * beta + gamma)
|
|
971
|
+
peak_freq = peak_freq + delta * freq_resolution
|
|
972
|
+
|
|
973
|
+
# Calculate confidence (ratio of peak to RMS of spectrum)
|
|
974
|
+
rms_mag = np.sqrt(np.mean(magnitude**2))
|
|
975
|
+
if rms_mag > 0:
|
|
976
|
+
confidence = min(1.0, (peak_mag / rms_mag - 1) / 10)
|
|
977
|
+
else:
|
|
978
|
+
confidence = 0.0
|
|
979
|
+
|
|
980
|
+
snr_db = self.calculate_snr(signal, peak_freq, sample_rate)
|
|
981
|
+
|
|
982
|
+
return TimingAnalysisResult(
|
|
983
|
+
detected_clock_rate=float(peak_freq),
|
|
984
|
+
confidence=float(confidence),
|
|
985
|
+
jitter_rms=0.0, # FFT doesn't directly measure jitter
|
|
986
|
+
drift_rate=0.0, # FFT doesn't measure drift
|
|
987
|
+
snr_db=snr_db,
|
|
988
|
+
method="fft",
|
|
989
|
+
statistics={
|
|
990
|
+
"peak_magnitude": float(peak_mag),
|
|
991
|
+
"rms_magnitude": float(rms_mag),
|
|
992
|
+
},
|
|
993
|
+
)
|
|
994
|
+
|
|
995
|
+
|
|
996
|
+
def _run_pll_loop(
|
|
997
|
+
signal: NDArray[np.floating[Any]],
|
|
998
|
+
sample_rate: float,
|
|
999
|
+
initial_freq: float,
|
|
1000
|
+
kp: float,
|
|
1001
|
+
ki: float,
|
|
1002
|
+
) -> tuple[list[float], list[float]]:
|
|
1003
|
+
"""Run PLL loop to track signal frequency.
|
|
1004
|
+
|
|
1005
|
+
Args:
|
|
1006
|
+
signal: Input signal array.
|
|
1007
|
+
sample_rate: Sampling rate in Hz.
|
|
1008
|
+
initial_freq: Initial frequency estimate.
|
|
1009
|
+
kp: Proportional gain.
|
|
1010
|
+
ki: Integral gain.
|
|
1011
|
+
|
|
1012
|
+
Returns:
|
|
1013
|
+
Tuple of (phase_errors, frequencies).
|
|
1014
|
+
"""
|
|
1015
|
+
phase = 0.0
|
|
1016
|
+
frequency = initial_freq
|
|
1017
|
+
phase_error_integral = 0.0
|
|
1018
|
+
phase_errors: list[float] = []
|
|
1019
|
+
frequencies: list[float] = []
|
|
1020
|
+
dt = 1.0 / sample_rate
|
|
1021
|
+
|
|
1022
|
+
for sample in signal:
|
|
1023
|
+
vco_output = np.sin(2 * np.pi * phase)
|
|
1024
|
+
phase_error = sample * vco_output
|
|
1025
|
+
phase_error_integral += phase_error * dt
|
|
1026
|
+
frequency = initial_freq + kp * phase_error + ki * phase_error_integral
|
|
1027
|
+
phase += frequency * dt
|
|
1028
|
+
phase = phase % 1.0
|
|
1029
|
+
|
|
1030
|
+
phase_errors.append(phase_error)
|
|
1031
|
+
frequencies.append(frequency)
|
|
1032
|
+
|
|
1033
|
+
return phase_errors, frequencies
|
|
1034
|
+
|
|
1035
|
+
|
|
1036
|
+
def _analyze_pll_convergence(frequencies: list[float], initial_freq: float) -> tuple[float, float]:
|
|
1037
|
+
"""Analyze PLL convergence from frequency history.
|
|
1038
|
+
|
|
1039
|
+
Args:
|
|
1040
|
+
frequencies: List of frequency values over time.
|
|
1041
|
+
initial_freq: Initial frequency estimate.
|
|
1042
|
+
|
|
1043
|
+
Returns:
|
|
1044
|
+
Tuple of (recovered_freq, confidence).
|
|
1045
|
+
"""
|
|
1046
|
+
stable_start = int(0.9 * len(frequencies))
|
|
1047
|
+
final_frequencies = frequencies[stable_start:]
|
|
1048
|
+
|
|
1049
|
+
if len(final_frequencies) == 0:
|
|
1050
|
+
return initial_freq, 0.0
|
|
1051
|
+
|
|
1052
|
+
recovered_freq = float(np.mean(final_frequencies))
|
|
1053
|
+
freq_std = float(np.std(final_frequencies))
|
|
1054
|
+
confidence = float(max(0.0, min(1.0, 1.0 - freq_std / recovered_freq)))
|
|
1055
|
+
|
|
1056
|
+
return recovered_freq, confidence
|
|
1057
|
+
|
|
1058
|
+
|
|
1059
|
+
def _compute_pll_drift(frequencies: list[float], recovered_freq: float) -> float:
|
|
1060
|
+
"""Compute frequency drift from PLL tracking.
|
|
1061
|
+
|
|
1062
|
+
Args:
|
|
1063
|
+
frequencies: List of frequency values.
|
|
1064
|
+
recovered_freq: Final recovered frequency.
|
|
1065
|
+
|
|
1066
|
+
Returns:
|
|
1067
|
+
Drift in ppm.
|
|
1068
|
+
"""
|
|
1069
|
+
stable_start = int(0.9 * len(frequencies))
|
|
1070
|
+
final_frequencies = frequencies[stable_start:]
|
|
1071
|
+
|
|
1072
|
+
if len(final_frequencies) <= 10:
|
|
1073
|
+
return 0.0
|
|
1074
|
+
|
|
1075
|
+
time_points = np.arange(len(final_frequencies))
|
|
1076
|
+
coeffs = np.polyfit(time_points, final_frequencies, 1)
|
|
1077
|
+
drift_hz_per_sample = coeffs[0]
|
|
1078
|
+
drift_ppm = (drift_hz_per_sample * len(final_frequencies)) / recovered_freq * 1e6
|
|
1079
|
+
|
|
1080
|
+
return float(drift_ppm)
|
|
1081
|
+
|
|
1082
|
+
|
|
1083
|
+
__all__ = [
|
|
1084
|
+
"TimingAnalysisResult",
|
|
1085
|
+
"TimingAnalyzer",
|
|
1086
|
+
]
|