oscura 0.5.1__py3-none-any.whl → 0.7.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- oscura/__init__.py +169 -167
- oscura/analyzers/__init__.py +3 -0
- oscura/analyzers/classification.py +659 -0
- oscura/analyzers/digital/edges.py +325 -65
- oscura/analyzers/digital/quality.py +293 -166
- oscura/analyzers/digital/timing.py +260 -115
- oscura/analyzers/digital/timing_numba.py +334 -0
- oscura/analyzers/entropy.py +605 -0
- oscura/analyzers/eye/diagram.py +176 -109
- oscura/analyzers/eye/metrics.py +5 -5
- oscura/analyzers/jitter/__init__.py +6 -4
- oscura/analyzers/jitter/ber.py +52 -52
- oscura/analyzers/jitter/classification.py +156 -0
- oscura/analyzers/jitter/decomposition.py +163 -113
- oscura/analyzers/jitter/spectrum.py +80 -64
- oscura/analyzers/ml/__init__.py +39 -0
- oscura/analyzers/ml/features.py +600 -0
- oscura/analyzers/ml/signal_classifier.py +604 -0
- oscura/analyzers/packet/daq.py +246 -158
- oscura/analyzers/packet/parser.py +12 -1
- oscura/analyzers/packet/payload.py +50 -2110
- oscura/analyzers/packet/payload_analysis.py +361 -181
- oscura/analyzers/packet/payload_patterns.py +133 -70
- oscura/analyzers/packet/stream.py +84 -23
- oscura/analyzers/patterns/__init__.py +26 -5
- oscura/analyzers/patterns/anomaly_detection.py +908 -0
- oscura/analyzers/patterns/clustering.py +169 -108
- oscura/analyzers/patterns/clustering_optimized.py +227 -0
- oscura/analyzers/patterns/discovery.py +1 -1
- oscura/analyzers/patterns/matching.py +581 -197
- oscura/analyzers/patterns/pattern_mining.py +778 -0
- oscura/analyzers/patterns/periodic.py +121 -38
- oscura/analyzers/patterns/sequences.py +175 -78
- oscura/analyzers/power/conduction.py +1 -1
- oscura/analyzers/power/soa.py +6 -6
- oscura/analyzers/power/switching.py +250 -110
- oscura/analyzers/protocol/__init__.py +17 -1
- oscura/analyzers/protocols/base.py +6 -6
- oscura/analyzers/protocols/ble/__init__.py +38 -0
- oscura/analyzers/protocols/ble/analyzer.py +809 -0
- oscura/analyzers/protocols/ble/uuids.py +288 -0
- oscura/analyzers/protocols/can.py +257 -127
- oscura/analyzers/protocols/can_fd.py +107 -80
- oscura/analyzers/protocols/flexray.py +139 -80
- oscura/analyzers/protocols/hdlc.py +93 -58
- oscura/analyzers/protocols/i2c.py +247 -106
- oscura/analyzers/protocols/i2s.py +138 -86
- oscura/analyzers/protocols/industrial/__init__.py +40 -0
- oscura/analyzers/protocols/industrial/bacnet/__init__.py +33 -0
- oscura/analyzers/protocols/industrial/bacnet/analyzer.py +708 -0
- oscura/analyzers/protocols/industrial/bacnet/encoding.py +412 -0
- oscura/analyzers/protocols/industrial/bacnet/services.py +622 -0
- oscura/analyzers/protocols/industrial/ethercat/__init__.py +30 -0
- oscura/analyzers/protocols/industrial/ethercat/analyzer.py +474 -0
- oscura/analyzers/protocols/industrial/ethercat/mailbox.py +339 -0
- oscura/analyzers/protocols/industrial/ethercat/topology.py +166 -0
- oscura/analyzers/protocols/industrial/modbus/__init__.py +31 -0
- oscura/analyzers/protocols/industrial/modbus/analyzer.py +525 -0
- oscura/analyzers/protocols/industrial/modbus/crc.py +79 -0
- oscura/analyzers/protocols/industrial/modbus/functions.py +436 -0
- oscura/analyzers/protocols/industrial/opcua/__init__.py +21 -0
- oscura/analyzers/protocols/industrial/opcua/analyzer.py +552 -0
- oscura/analyzers/protocols/industrial/opcua/datatypes.py +446 -0
- oscura/analyzers/protocols/industrial/opcua/services.py +264 -0
- oscura/analyzers/protocols/industrial/profinet/__init__.py +23 -0
- oscura/analyzers/protocols/industrial/profinet/analyzer.py +441 -0
- oscura/analyzers/protocols/industrial/profinet/dcp.py +263 -0
- oscura/analyzers/protocols/industrial/profinet/ptcp.py +200 -0
- oscura/analyzers/protocols/jtag.py +180 -98
- oscura/analyzers/protocols/lin.py +219 -114
- oscura/analyzers/protocols/manchester.py +4 -4
- oscura/analyzers/protocols/onewire.py +253 -149
- oscura/analyzers/protocols/parallel_bus/__init__.py +20 -0
- oscura/analyzers/protocols/parallel_bus/centronics.py +92 -0
- oscura/analyzers/protocols/parallel_bus/gpib.py +137 -0
- oscura/analyzers/protocols/spi.py +192 -95
- oscura/analyzers/protocols/swd.py +321 -167
- oscura/analyzers/protocols/uart.py +267 -125
- oscura/analyzers/protocols/usb.py +235 -131
- oscura/analyzers/side_channel/power.py +17 -12
- oscura/analyzers/signal/__init__.py +15 -0
- oscura/analyzers/signal/timing_analysis.py +1086 -0
- oscura/analyzers/signal_integrity/__init__.py +4 -1
- oscura/analyzers/signal_integrity/sparams.py +2 -19
- oscura/analyzers/spectral/chunked.py +129 -60
- oscura/analyzers/spectral/chunked_fft.py +300 -94
- oscura/analyzers/spectral/chunked_wavelet.py +100 -80
- oscura/analyzers/statistical/checksum.py +376 -217
- oscura/analyzers/statistical/classification.py +229 -107
- oscura/analyzers/statistical/entropy.py +78 -53
- oscura/analyzers/statistics/correlation.py +407 -211
- oscura/analyzers/statistics/outliers.py +2 -2
- oscura/analyzers/statistics/streaming.py +30 -5
- oscura/analyzers/validation.py +216 -101
- oscura/analyzers/waveform/measurements.py +9 -0
- oscura/analyzers/waveform/measurements_with_uncertainty.py +31 -15
- oscura/analyzers/waveform/spectral.py +500 -228
- oscura/api/__init__.py +31 -5
- oscura/api/dsl/__init__.py +582 -0
- oscura/{dsl → api/dsl}/commands.py +43 -76
- oscura/{dsl → api/dsl}/interpreter.py +26 -51
- oscura/{dsl → api/dsl}/parser.py +107 -77
- oscura/{dsl → api/dsl}/repl.py +2 -2
- oscura/api/dsl.py +1 -1
- oscura/{integrations → api/integrations}/__init__.py +1 -1
- oscura/{integrations → api/integrations}/llm.py +201 -102
- oscura/api/operators.py +3 -3
- oscura/api/optimization.py +144 -30
- oscura/api/rest_server.py +921 -0
- oscura/api/server/__init__.py +17 -0
- oscura/api/server/dashboard.py +850 -0
- oscura/api/server/static/README.md +34 -0
- oscura/api/server/templates/base.html +181 -0
- oscura/api/server/templates/export.html +120 -0
- oscura/api/server/templates/home.html +284 -0
- oscura/api/server/templates/protocols.html +58 -0
- oscura/api/server/templates/reports.html +43 -0
- oscura/api/server/templates/session_detail.html +89 -0
- oscura/api/server/templates/sessions.html +83 -0
- oscura/api/server/templates/waveforms.html +73 -0
- oscura/automotive/__init__.py +8 -1
- oscura/automotive/can/__init__.py +10 -0
- oscura/automotive/can/checksum.py +3 -1
- oscura/automotive/can/dbc_generator.py +590 -0
- oscura/automotive/can/message_wrapper.py +121 -74
- oscura/automotive/can/patterns.py +98 -21
- oscura/automotive/can/session.py +292 -56
- oscura/automotive/can/state_machine.py +6 -3
- oscura/automotive/can/stimulus_response.py +97 -75
- oscura/automotive/dbc/__init__.py +10 -2
- oscura/automotive/dbc/generator.py +84 -56
- oscura/automotive/dbc/parser.py +6 -6
- oscura/automotive/dtc/data.json +17 -102
- oscura/automotive/dtc/database.py +2 -2
- oscura/automotive/flexray/__init__.py +31 -0
- oscura/automotive/flexray/analyzer.py +504 -0
- oscura/automotive/flexray/crc.py +185 -0
- oscura/automotive/flexray/fibex.py +449 -0
- oscura/automotive/j1939/__init__.py +45 -8
- oscura/automotive/j1939/analyzer.py +605 -0
- oscura/automotive/j1939/spns.py +326 -0
- oscura/automotive/j1939/transport.py +306 -0
- oscura/automotive/lin/__init__.py +47 -0
- oscura/automotive/lin/analyzer.py +612 -0
- oscura/automotive/loaders/blf.py +13 -2
- oscura/automotive/loaders/csv_can.py +143 -72
- oscura/automotive/loaders/dispatcher.py +50 -2
- oscura/automotive/loaders/mdf.py +86 -45
- oscura/automotive/loaders/pcap.py +111 -61
- oscura/automotive/uds/__init__.py +4 -0
- oscura/automotive/uds/analyzer.py +725 -0
- oscura/automotive/uds/decoder.py +140 -58
- oscura/automotive/uds/models.py +7 -1
- oscura/automotive/visualization.py +1 -1
- oscura/cli/analyze.py +348 -0
- oscura/cli/batch.py +142 -122
- oscura/cli/benchmark.py +275 -0
- oscura/cli/characterize.py +137 -82
- oscura/cli/compare.py +224 -131
- oscura/cli/completion.py +250 -0
- oscura/cli/config_cmd.py +361 -0
- oscura/cli/decode.py +164 -87
- oscura/cli/export.py +286 -0
- oscura/cli/main.py +115 -31
- oscura/{onboarding → cli/onboarding}/__init__.py +3 -3
- oscura/{onboarding → cli/onboarding}/help.py +80 -58
- oscura/{onboarding → cli/onboarding}/tutorials.py +97 -72
- oscura/{onboarding → cli/onboarding}/wizard.py +55 -36
- oscura/cli/progress.py +147 -0
- oscura/cli/shell.py +157 -135
- oscura/cli/validate_cmd.py +204 -0
- oscura/cli/visualize.py +158 -0
- oscura/convenience.py +125 -79
- oscura/core/__init__.py +4 -2
- oscura/core/backend_selector.py +3 -3
- oscura/core/cache.py +126 -15
- oscura/core/cancellation.py +1 -1
- oscura/{config → core/config}/__init__.py +20 -11
- oscura/{config → core/config}/defaults.py +1 -1
- oscura/{config → core/config}/loader.py +7 -5
- oscura/{config → core/config}/memory.py +5 -5
- oscura/{config → core/config}/migration.py +1 -1
- oscura/{config → core/config}/pipeline.py +99 -23
- oscura/{config → core/config}/preferences.py +1 -1
- oscura/{config → core/config}/protocol.py +3 -3
- oscura/{config → core/config}/schema.py +426 -272
- oscura/{config → core/config}/settings.py +1 -1
- oscura/{config → core/config}/thresholds.py +195 -153
- oscura/core/correlation.py +5 -6
- oscura/core/cross_domain.py +0 -2
- oscura/core/debug.py +9 -5
- oscura/{extensibility → core/extensibility}/docs.py +158 -70
- oscura/{extensibility → core/extensibility}/extensions.py +160 -76
- oscura/{extensibility → core/extensibility}/logging.py +1 -1
- oscura/{extensibility → core/extensibility}/measurements.py +1 -1
- oscura/{extensibility → core/extensibility}/plugins.py +1 -1
- oscura/{extensibility → core/extensibility}/templates.py +73 -3
- oscura/{extensibility → core/extensibility}/validation.py +1 -1
- oscura/core/gpu_backend.py +11 -7
- oscura/core/log_query.py +101 -11
- oscura/core/logging.py +126 -54
- oscura/core/logging_advanced.py +5 -5
- oscura/core/memory_limits.py +108 -70
- oscura/core/memory_monitor.py +2 -2
- oscura/core/memory_progress.py +7 -7
- oscura/core/memory_warnings.py +1 -1
- oscura/core/numba_backend.py +13 -13
- oscura/{plugins → core/plugins}/__init__.py +9 -9
- oscura/{plugins → core/plugins}/base.py +7 -7
- oscura/{plugins → core/plugins}/cli.py +3 -3
- oscura/{plugins → core/plugins}/discovery.py +186 -106
- oscura/{plugins → core/plugins}/lifecycle.py +1 -1
- oscura/{plugins → core/plugins}/manager.py +7 -7
- oscura/{plugins → core/plugins}/registry.py +3 -3
- oscura/{plugins → core/plugins}/versioning.py +1 -1
- oscura/core/progress.py +16 -1
- oscura/core/provenance.py +8 -2
- oscura/{schemas → core/schemas}/__init__.py +2 -2
- oscura/{schemas → core/schemas}/device_mapping.json +2 -8
- oscura/{schemas → core/schemas}/packet_format.json +4 -24
- oscura/{schemas → core/schemas}/protocol_definition.json +2 -12
- oscura/core/types.py +4 -0
- oscura/core/uncertainty.py +3 -3
- oscura/correlation/__init__.py +52 -0
- oscura/correlation/multi_protocol.py +811 -0
- oscura/discovery/auto_decoder.py +117 -35
- oscura/discovery/comparison.py +191 -86
- oscura/discovery/quality_validator.py +155 -68
- oscura/discovery/signal_detector.py +196 -79
- oscura/export/__init__.py +18 -8
- oscura/export/kaitai_struct.py +513 -0
- oscura/export/scapy_layer.py +801 -0
- oscura/export/wireshark/generator.py +1 -1
- oscura/export/wireshark/templates/dissector.lua.j2 +2 -2
- oscura/export/wireshark_dissector.py +746 -0
- oscura/guidance/wizard.py +207 -111
- oscura/hardware/__init__.py +19 -0
- oscura/{acquisition → hardware/acquisition}/__init__.py +4 -4
- oscura/{acquisition → hardware/acquisition}/file.py +2 -2
- oscura/{acquisition → hardware/acquisition}/hardware.py +7 -7
- oscura/{acquisition → hardware/acquisition}/saleae.py +15 -12
- oscura/{acquisition → hardware/acquisition}/socketcan.py +1 -1
- oscura/{acquisition → hardware/acquisition}/streaming.py +2 -2
- oscura/{acquisition → hardware/acquisition}/synthetic.py +3 -3
- oscura/{acquisition → hardware/acquisition}/visa.py +33 -11
- oscura/hardware/firmware/__init__.py +29 -0
- oscura/hardware/firmware/pattern_recognition.py +874 -0
- oscura/hardware/hal_detector.py +736 -0
- oscura/hardware/security/__init__.py +37 -0
- oscura/hardware/security/side_channel_detector.py +1126 -0
- oscura/inference/__init__.py +4 -0
- oscura/inference/active_learning/observation_table.py +4 -1
- oscura/inference/alignment.py +216 -123
- oscura/inference/bayesian.py +113 -33
- oscura/inference/crc_reverse.py +101 -55
- oscura/inference/logic.py +6 -2
- oscura/inference/message_format.py +342 -183
- oscura/inference/protocol.py +95 -44
- oscura/inference/protocol_dsl.py +180 -82
- oscura/inference/signal_intelligence.py +1439 -706
- oscura/inference/spectral.py +99 -57
- oscura/inference/state_machine.py +810 -158
- oscura/inference/stream.py +270 -110
- oscura/iot/__init__.py +34 -0
- oscura/iot/coap/__init__.py +32 -0
- oscura/iot/coap/analyzer.py +668 -0
- oscura/iot/coap/options.py +212 -0
- oscura/iot/lorawan/__init__.py +21 -0
- oscura/iot/lorawan/crypto.py +206 -0
- oscura/iot/lorawan/decoder.py +801 -0
- oscura/iot/lorawan/mac_commands.py +341 -0
- oscura/iot/mqtt/__init__.py +27 -0
- oscura/iot/mqtt/analyzer.py +999 -0
- oscura/iot/mqtt/properties.py +315 -0
- oscura/iot/zigbee/__init__.py +31 -0
- oscura/iot/zigbee/analyzer.py +615 -0
- oscura/iot/zigbee/security.py +153 -0
- oscura/iot/zigbee/zcl.py +349 -0
- oscura/jupyter/display.py +125 -45
- oscura/{exploratory → jupyter/exploratory}/__init__.py +8 -8
- oscura/{exploratory → jupyter/exploratory}/error_recovery.py +298 -141
- oscura/jupyter/exploratory/fuzzy.py +746 -0
- oscura/{exploratory → jupyter/exploratory}/fuzzy_advanced.py +258 -100
- oscura/{exploratory → jupyter/exploratory}/legacy.py +464 -242
- oscura/{exploratory → jupyter/exploratory}/parse.py +167 -145
- oscura/{exploratory → jupyter/exploratory}/recovery.py +119 -87
- oscura/jupyter/exploratory/sync.py +612 -0
- oscura/{exploratory → jupyter/exploratory}/unknown.py +299 -176
- oscura/jupyter/magic.py +4 -4
- oscura/{ui → jupyter/ui}/__init__.py +2 -2
- oscura/{ui → jupyter/ui}/formatters.py +3 -3
- oscura/{ui → jupyter/ui}/progressive_display.py +153 -82
- oscura/loaders/__init__.py +183 -67
- oscura/loaders/binary.py +88 -1
- oscura/loaders/chipwhisperer.py +153 -137
- oscura/loaders/configurable.py +208 -86
- oscura/loaders/csv_loader.py +458 -215
- oscura/loaders/hdf5_loader.py +278 -119
- oscura/loaders/lazy.py +87 -54
- oscura/loaders/mmap_loader.py +1 -1
- oscura/loaders/numpy_loader.py +253 -116
- oscura/loaders/pcap.py +226 -151
- oscura/loaders/rigol.py +110 -49
- oscura/loaders/sigrok.py +201 -78
- oscura/loaders/tdms.py +81 -58
- oscura/loaders/tektronix.py +291 -174
- oscura/loaders/touchstone.py +182 -87
- oscura/loaders/tss.py +456 -0
- oscura/loaders/vcd.py +215 -117
- oscura/loaders/wav.py +155 -68
- oscura/reporting/__init__.py +9 -0
- oscura/reporting/analyze.py +352 -146
- oscura/reporting/argument_preparer.py +69 -14
- oscura/reporting/auto_report.py +97 -61
- oscura/reporting/batch.py +131 -58
- oscura/reporting/chart_selection.py +57 -45
- oscura/reporting/comparison.py +63 -17
- oscura/reporting/content/executive.py +76 -24
- oscura/reporting/core_formats/multi_format.py +11 -8
- oscura/reporting/engine.py +312 -158
- oscura/reporting/enhanced_reports.py +949 -0
- oscura/reporting/export.py +86 -43
- oscura/reporting/formatting/numbers.py +69 -42
- oscura/reporting/html.py +139 -58
- oscura/reporting/index.py +137 -65
- oscura/reporting/output.py +158 -67
- oscura/reporting/pdf.py +67 -102
- oscura/reporting/plots.py +191 -112
- oscura/reporting/sections.py +88 -47
- oscura/reporting/standards.py +104 -61
- oscura/reporting/summary_generator.py +75 -55
- oscura/reporting/tables.py +138 -54
- oscura/reporting/templates/enhanced/protocol_re.html +525 -0
- oscura/sessions/__init__.py +14 -23
- oscura/sessions/base.py +3 -3
- oscura/sessions/blackbox.py +106 -10
- oscura/sessions/generic.py +2 -2
- oscura/sessions/legacy.py +783 -0
- oscura/side_channel/__init__.py +63 -0
- oscura/side_channel/dpa.py +1025 -0
- oscura/utils/__init__.py +15 -1
- oscura/utils/bitwise.py +118 -0
- oscura/{builders → utils/builders}/__init__.py +1 -1
- oscura/{comparison → utils/comparison}/__init__.py +6 -6
- oscura/{comparison → utils/comparison}/compare.py +202 -101
- oscura/{comparison → utils/comparison}/golden.py +83 -63
- oscura/{comparison → utils/comparison}/limits.py +313 -89
- oscura/{comparison → utils/comparison}/mask.py +151 -45
- oscura/{comparison → utils/comparison}/trace_diff.py +1 -1
- oscura/{comparison → utils/comparison}/visualization.py +147 -89
- oscura/{component → utils/component}/__init__.py +3 -3
- oscura/{component → utils/component}/impedance.py +122 -58
- oscura/{component → utils/component}/reactive.py +165 -168
- oscura/{component → utils/component}/transmission_line.py +3 -3
- oscura/{filtering → utils/filtering}/__init__.py +6 -6
- oscura/{filtering → utils/filtering}/base.py +1 -1
- oscura/{filtering → utils/filtering}/convenience.py +2 -2
- oscura/{filtering → utils/filtering}/design.py +169 -93
- oscura/{filtering → utils/filtering}/filters.py +2 -2
- oscura/{filtering → utils/filtering}/introspection.py +2 -2
- oscura/utils/geometry.py +31 -0
- oscura/utils/imports.py +184 -0
- oscura/utils/lazy.py +1 -1
- oscura/{math → utils/math}/__init__.py +2 -2
- oscura/{math → utils/math}/arithmetic.py +114 -48
- oscura/{math → utils/math}/interpolation.py +139 -106
- oscura/utils/memory.py +129 -66
- oscura/utils/memory_advanced.py +92 -9
- oscura/utils/memory_extensions.py +10 -8
- oscura/{optimization → utils/optimization}/__init__.py +1 -1
- oscura/{optimization → utils/optimization}/search.py +2 -2
- oscura/utils/performance/__init__.py +58 -0
- oscura/utils/performance/caching.py +889 -0
- oscura/utils/performance/lsh_clustering.py +333 -0
- oscura/utils/performance/memory_optimizer.py +699 -0
- oscura/utils/performance/optimizations.py +675 -0
- oscura/utils/performance/parallel.py +654 -0
- oscura/utils/performance/profiling.py +661 -0
- oscura/{pipeline → utils/pipeline}/base.py +1 -1
- oscura/{pipeline → utils/pipeline}/composition.py +1 -1
- oscura/{pipeline → utils/pipeline}/parallel.py +3 -2
- oscura/{pipeline → utils/pipeline}/pipeline.py +1 -1
- oscura/{pipeline → utils/pipeline}/reverse_engineering.py +412 -221
- oscura/{search → utils/search}/__init__.py +3 -3
- oscura/{search → utils/search}/anomaly.py +188 -58
- oscura/utils/search/context.py +294 -0
- oscura/{search → utils/search}/pattern.py +138 -10
- oscura/utils/serial.py +51 -0
- oscura/utils/storage/__init__.py +61 -0
- oscura/utils/storage/database.py +1166 -0
- oscura/{streaming → utils/streaming}/chunked.py +302 -143
- oscura/{streaming → utils/streaming}/progressive.py +1 -1
- oscura/{streaming → utils/streaming}/realtime.py +3 -2
- oscura/{triggering → utils/triggering}/__init__.py +6 -6
- oscura/{triggering → utils/triggering}/base.py +6 -6
- oscura/{triggering → utils/triggering}/edge.py +2 -2
- oscura/{triggering → utils/triggering}/pattern.py +2 -2
- oscura/{triggering → utils/triggering}/pulse.py +115 -74
- oscura/{triggering → utils/triggering}/window.py +2 -2
- oscura/utils/validation.py +32 -0
- oscura/validation/__init__.py +121 -0
- oscura/{compliance → validation/compliance}/__init__.py +5 -5
- oscura/{compliance → validation/compliance}/advanced.py +5 -5
- oscura/{compliance → validation/compliance}/masks.py +1 -1
- oscura/{compliance → validation/compliance}/reporting.py +127 -53
- oscura/{compliance → validation/compliance}/testing.py +114 -52
- oscura/validation/compliance_tests.py +915 -0
- oscura/validation/fuzzer.py +990 -0
- oscura/validation/grammar_tests.py +596 -0
- oscura/validation/grammar_validator.py +904 -0
- oscura/validation/hil_testing.py +977 -0
- oscura/{quality → validation/quality}/__init__.py +4 -4
- oscura/{quality → validation/quality}/ensemble.py +251 -171
- oscura/{quality → validation/quality}/explainer.py +3 -3
- oscura/{quality → validation/quality}/scoring.py +1 -1
- oscura/{quality → validation/quality}/warnings.py +4 -4
- oscura/validation/regression_suite.py +808 -0
- oscura/validation/replay.py +788 -0
- oscura/{testing → validation/testing}/__init__.py +2 -2
- oscura/{testing → validation/testing}/synthetic.py +5 -5
- oscura/visualization/__init__.py +9 -0
- oscura/visualization/accessibility.py +1 -1
- oscura/visualization/annotations.py +64 -67
- oscura/visualization/colors.py +7 -7
- oscura/visualization/digital.py +180 -81
- oscura/visualization/eye.py +236 -85
- oscura/visualization/interactive.py +320 -143
- oscura/visualization/jitter.py +587 -247
- oscura/visualization/layout.py +169 -134
- oscura/visualization/optimization.py +103 -52
- oscura/visualization/palettes.py +1 -1
- oscura/visualization/power.py +427 -211
- oscura/visualization/power_extended.py +626 -297
- oscura/visualization/presets.py +2 -0
- oscura/visualization/protocols.py +495 -181
- oscura/visualization/render.py +79 -63
- oscura/visualization/reverse_engineering.py +171 -124
- oscura/visualization/signal_integrity.py +460 -279
- oscura/visualization/specialized.py +190 -100
- oscura/visualization/spectral.py +670 -255
- oscura/visualization/thumbnails.py +166 -137
- oscura/visualization/waveform.py +150 -63
- oscura/workflows/__init__.py +3 -0
- oscura/{batch → workflows/batch}/__init__.py +5 -5
- oscura/{batch → workflows/batch}/advanced.py +150 -75
- oscura/workflows/batch/aggregate.py +531 -0
- oscura/workflows/batch/analyze.py +236 -0
- oscura/{batch → workflows/batch}/logging.py +2 -2
- oscura/{batch → workflows/batch}/metrics.py +1 -1
- oscura/workflows/complete_re.py +1144 -0
- oscura/workflows/compliance.py +44 -54
- oscura/workflows/digital.py +197 -51
- oscura/workflows/legacy/__init__.py +12 -0
- oscura/{workflow → workflows/legacy}/dag.py +4 -1
- oscura/workflows/multi_trace.py +9 -9
- oscura/workflows/power.py +42 -62
- oscura/workflows/protocol.py +82 -49
- oscura/workflows/reverse_engineering.py +351 -150
- oscura/workflows/signal_integrity.py +157 -82
- oscura-0.7.0.dist-info/METADATA +661 -0
- oscura-0.7.0.dist-info/RECORD +591 -0
- oscura/batch/aggregate.py +0 -300
- oscura/batch/analyze.py +0 -139
- oscura/dsl/__init__.py +0 -73
- oscura/exceptions.py +0 -59
- oscura/exploratory/fuzzy.py +0 -513
- oscura/exploratory/sync.py +0 -384
- oscura/exporters/__init__.py +0 -94
- oscura/exporters/csv.py +0 -303
- oscura/exporters/exporters.py +0 -44
- oscura/exporters/hdf5.py +0 -217
- oscura/exporters/html_export.py +0 -701
- oscura/exporters/json_export.py +0 -291
- oscura/exporters/markdown_export.py +0 -367
- oscura/exporters/matlab_export.py +0 -354
- oscura/exporters/npz_export.py +0 -219
- oscura/exporters/spice_export.py +0 -210
- oscura/search/context.py +0 -149
- oscura/session/__init__.py +0 -34
- oscura/session/annotations.py +0 -289
- oscura/session/history.py +0 -313
- oscura/session/session.py +0 -520
- oscura/workflow/__init__.py +0 -13
- oscura-0.5.1.dist-info/METADATA +0 -583
- oscura-0.5.1.dist-info/RECORD +0 -481
- /oscura/core/{config.py → config/legacy.py} +0 -0
- /oscura/{extensibility → core/extensibility}/__init__.py +0 -0
- /oscura/{extensibility → core/extensibility}/registry.py +0 -0
- /oscura/{plugins → core/plugins}/isolation.py +0 -0
- /oscura/{schemas → core/schemas}/bus_configuration.json +0 -0
- /oscura/{builders → utils/builders}/signal_builder.py +0 -0
- /oscura/{optimization → utils/optimization}/parallel.py +0 -0
- /oscura/{pipeline → utils/pipeline}/__init__.py +0 -0
- /oscura/{streaming → utils/streaming}/__init__.py +0 -0
- {oscura-0.5.1.dist-info → oscura-0.7.0.dist-info}/WHEEL +0 -0
- {oscura-0.5.1.dist-info → oscura-0.7.0.dist-info}/entry_points.txt +0 -0
- {oscura-0.5.1.dist-info → oscura-0.7.0.dist-info}/licenses/LICENSE +0 -0
|
@@ -18,7 +18,7 @@ References:
|
|
|
18
18
|
from __future__ import annotations
|
|
19
19
|
|
|
20
20
|
from functools import lru_cache
|
|
21
|
-
from typing import TYPE_CHECKING, Literal
|
|
21
|
+
from typing import TYPE_CHECKING, Any, Literal
|
|
22
22
|
|
|
23
23
|
import numpy as np
|
|
24
24
|
from scipy import signal as sp_signal
|
|
@@ -190,57 +190,127 @@ def fft(
|
|
|
190
190
|
analysis_type="fft",
|
|
191
191
|
)
|
|
192
192
|
|
|
193
|
-
|
|
193
|
+
data_processed = _apply_detrend(data, detrend)
|
|
194
|
+
nfft_computed = _compute_nfft(n, nfft)
|
|
195
|
+
sample_rate = trace.metadata.sample_rate
|
|
196
|
+
|
|
197
|
+
if use_cache:
|
|
198
|
+
return _fft_cached_path(
|
|
199
|
+
data_processed, n, window, nfft_computed, detrend, sample_rate, return_phase
|
|
200
|
+
)
|
|
201
|
+
else:
|
|
202
|
+
return _fft_direct_path(data_processed, n, window, nfft_computed, sample_rate, return_phase)
|
|
203
|
+
|
|
204
|
+
|
|
205
|
+
def _apply_detrend(
|
|
206
|
+
data: NDArray[np.float64], detrend: Literal["none", "mean", "linear"]
|
|
207
|
+
) -> NDArray[np.float64]:
|
|
208
|
+
"""Apply detrending to data.
|
|
209
|
+
|
|
210
|
+
Args:
|
|
211
|
+
data: Input data.
|
|
212
|
+
detrend: Detrend method.
|
|
213
|
+
|
|
214
|
+
Returns:
|
|
215
|
+
Detrended data.
|
|
216
|
+
"""
|
|
194
217
|
if detrend == "mean":
|
|
195
|
-
|
|
218
|
+
detrended: NDArray[np.float64] = data - np.mean(data)
|
|
219
|
+
return detrended
|
|
196
220
|
elif detrend == "linear":
|
|
197
|
-
|
|
221
|
+
linear_detrend: NDArray[np.float64] = np.asarray(
|
|
222
|
+
sp_signal.detrend(data, type="linear"), dtype=np.float64
|
|
223
|
+
)
|
|
224
|
+
return linear_detrend
|
|
198
225
|
else:
|
|
199
|
-
|
|
226
|
+
return data
|
|
200
227
|
|
|
201
|
-
# Determine FFT length
|
|
202
|
-
nfft_computed = int(2 ** np.ceil(np.log2(n))) if nfft is None else max(nfft, n)
|
|
203
228
|
|
|
204
|
-
|
|
229
|
+
def _compute_nfft(n: int, nfft: int | None) -> int:
|
|
230
|
+
"""Compute FFT length.
|
|
205
231
|
|
|
206
|
-
|
|
207
|
-
|
|
208
|
-
|
|
209
|
-
data_bytes = data_processed.tobytes()
|
|
210
|
-
|
|
211
|
-
# Call cached implementation
|
|
212
|
-
freq, magnitude_db, phase = _compute_fft_cached(
|
|
213
|
-
data_bytes,
|
|
214
|
-
n,
|
|
215
|
-
window,
|
|
216
|
-
nfft_computed,
|
|
217
|
-
detrend,
|
|
218
|
-
sample_rate,
|
|
219
|
-
)
|
|
220
|
-
_fft_cache_stats["hits"] += 1
|
|
232
|
+
Args:
|
|
233
|
+
n: Data length.
|
|
234
|
+
nfft: Requested FFT length or None.
|
|
221
235
|
|
|
222
|
-
|
|
223
|
-
|
|
224
|
-
|
|
225
|
-
|
|
236
|
+
Returns:
|
|
237
|
+
Computed FFT length (power of 2 or max of nfft and n).
|
|
238
|
+
"""
|
|
239
|
+
return int(2 ** np.ceil(np.log2(n))) if nfft is None else max(nfft, n)
|
|
240
|
+
|
|
241
|
+
|
|
242
|
+
def _fft_cached_path(
|
|
243
|
+
data_processed: NDArray[np.float64],
|
|
244
|
+
n: int,
|
|
245
|
+
window: str,
|
|
246
|
+
nfft_computed: int,
|
|
247
|
+
detrend: str,
|
|
248
|
+
sample_rate: float,
|
|
249
|
+
return_phase: bool,
|
|
250
|
+
) -> (
|
|
251
|
+
tuple[NDArray[np.float64], NDArray[np.float64]]
|
|
252
|
+
| tuple[NDArray[np.float64], NDArray[np.float64], NDArray[np.float64]]
|
|
253
|
+
):
|
|
254
|
+
"""Execute cached FFT computation path.
|
|
255
|
+
|
|
256
|
+
Args:
|
|
257
|
+
data_processed: Preprocessed data.
|
|
258
|
+
n: Data length.
|
|
259
|
+
window: Window function name.
|
|
260
|
+
nfft_computed: FFT length.
|
|
261
|
+
detrend: Detrend method string.
|
|
262
|
+
sample_rate: Sample rate.
|
|
263
|
+
return_phase: Whether to return phase.
|
|
264
|
+
|
|
265
|
+
Returns:
|
|
266
|
+
FFT results (with or without phase).
|
|
267
|
+
"""
|
|
268
|
+
data_bytes = data_processed.tobytes()
|
|
269
|
+
freq, magnitude_db, phase = _compute_fft_cached(
|
|
270
|
+
data_bytes, n, window, nfft_computed, detrend, sample_rate
|
|
271
|
+
)
|
|
272
|
+
_fft_cache_stats["hits"] += 1
|
|
226
273
|
|
|
227
|
-
|
|
274
|
+
if return_phase:
|
|
275
|
+
return freq, magnitude_db, phase
|
|
276
|
+
else:
|
|
277
|
+
return freq, magnitude_db
|
|
278
|
+
|
|
279
|
+
|
|
280
|
+
def _fft_direct_path(
|
|
281
|
+
data_processed: NDArray[np.float64],
|
|
282
|
+
n: int,
|
|
283
|
+
window: str,
|
|
284
|
+
nfft_computed: int,
|
|
285
|
+
sample_rate: float,
|
|
286
|
+
return_phase: bool,
|
|
287
|
+
) -> (
|
|
288
|
+
tuple[NDArray[np.float64], NDArray[np.float64]]
|
|
289
|
+
| tuple[NDArray[np.float64], NDArray[np.float64], NDArray[np.float64]]
|
|
290
|
+
):
|
|
291
|
+
"""Execute non-cached FFT computation path.
|
|
292
|
+
|
|
293
|
+
Args:
|
|
294
|
+
data_processed: Preprocessed data.
|
|
295
|
+
n: Data length.
|
|
296
|
+
window: Window function name.
|
|
297
|
+
nfft_computed: FFT length.
|
|
298
|
+
sample_rate: Sample rate.
|
|
299
|
+
return_phase: Whether to return phase.
|
|
300
|
+
|
|
301
|
+
Returns:
|
|
302
|
+
FFT results (with or without phase).
|
|
303
|
+
"""
|
|
228
304
|
_fft_cache_stats["misses"] += 1
|
|
229
305
|
|
|
230
|
-
# Apply window
|
|
231
306
|
w = get_window(window, n)
|
|
232
307
|
data_windowed = data_processed * w
|
|
233
|
-
|
|
234
|
-
# Compute FFT
|
|
235
308
|
spectrum = np.fft.rfft(data_windowed, n=nfft_computed)
|
|
236
|
-
|
|
237
|
-
# Frequency axis
|
|
238
309
|
freq = np.fft.rfftfreq(nfft_computed, d=1.0 / sample_rate)
|
|
239
310
|
|
|
240
|
-
# Magnitude in dB
|
|
311
|
+
# Magnitude in dB
|
|
241
312
|
window_gain = np.sum(w) / n
|
|
242
313
|
magnitude = np.abs(spectrum) / (n * window_gain)
|
|
243
|
-
# Avoid log(0)
|
|
244
314
|
magnitude = np.maximum(magnitude, 1e-20)
|
|
245
315
|
magnitude_db = 20 * np.log10(magnitude)
|
|
246
316
|
|
|
@@ -613,7 +683,7 @@ def thd(
|
|
|
613
683
|
_fund_idx, fund_freq, fund_mag = _find_fundamental(freq, magnitude)
|
|
614
684
|
|
|
615
685
|
if fund_mag == 0 or fund_freq == 0:
|
|
616
|
-
return np.nan
|
|
686
|
+
return np.nan
|
|
617
687
|
|
|
618
688
|
# Find harmonics
|
|
619
689
|
harmonic_indices = _find_harmonic_indices(freq, fund_freq, n_harmonics)
|
|
@@ -629,7 +699,7 @@ def thd(
|
|
|
629
699
|
|
|
630
700
|
if return_db:
|
|
631
701
|
if thd_ratio <= 0:
|
|
632
|
-
return -np.inf
|
|
702
|
+
return -np.inf
|
|
633
703
|
return float(20 * np.log10(thd_ratio))
|
|
634
704
|
else:
|
|
635
705
|
return float(thd_ratio * 100)
|
|
@@ -663,59 +733,108 @@ def snr(
|
|
|
663
733
|
References:
|
|
664
734
|
IEEE 1241-2010 Section 4.1.4.1
|
|
665
735
|
"""
|
|
666
|
-
# Use data length as NFFT to avoid zero-padding that breaks coherence
|
|
667
736
|
if nfft is None:
|
|
668
737
|
nfft = len(trace.data)
|
|
669
738
|
|
|
739
|
+
freq, magnitude = _compute_magnitude_spectrum(trace, window, nfft)
|
|
740
|
+
fund_idx, fund_freq, fund_mag = _find_fundamental(freq, magnitude)
|
|
741
|
+
|
|
742
|
+
if fund_mag == 0 or fund_freq == 0:
|
|
743
|
+
return np.nan
|
|
744
|
+
|
|
745
|
+
harmonic_indices = _find_harmonic_indices(freq, fund_freq, n_harmonics)
|
|
746
|
+
exclude_indices = _build_exclusion_set(fund_idx, harmonic_indices, len(magnitude))
|
|
747
|
+
|
|
748
|
+
signal_power = _compute_signal_power(magnitude, fund_idx)
|
|
749
|
+
noise_power = _compute_noise_power(magnitude, exclude_indices)
|
|
750
|
+
|
|
751
|
+
if noise_power <= 0:
|
|
752
|
+
return np.inf
|
|
753
|
+
|
|
754
|
+
return float(10 * np.log10(signal_power / noise_power))
|
|
755
|
+
|
|
756
|
+
|
|
757
|
+
def _compute_magnitude_spectrum(
|
|
758
|
+
trace: WaveformTrace, window: str, nfft: int
|
|
759
|
+
) -> tuple[NDArray[np.floating[Any]], NDArray[np.floating[Any]]]:
|
|
760
|
+
"""Compute magnitude spectrum from FFT.
|
|
761
|
+
|
|
762
|
+
Args:
|
|
763
|
+
trace: Input waveform.
|
|
764
|
+
window: Window function name.
|
|
765
|
+
nfft: FFT length.
|
|
766
|
+
|
|
767
|
+
Returns:
|
|
768
|
+
Tuple of (frequency array, magnitude array).
|
|
769
|
+
"""
|
|
670
770
|
result = fft(trace, window=window, nfft=nfft, detrend="mean")
|
|
671
771
|
freq, mag_db = result[0], result[1]
|
|
672
772
|
magnitude = 10 ** (mag_db / 20)
|
|
773
|
+
return freq, magnitude
|
|
673
774
|
|
|
674
|
-
# Find fundamental
|
|
675
|
-
fund_idx, fund_freq, fund_mag = _find_fundamental(freq, magnitude)
|
|
676
775
|
|
|
677
|
-
|
|
678
|
-
|
|
776
|
+
def _build_exclusion_set(fund_idx: int, harmonic_indices: list[int], n_bins: int) -> set[int]:
|
|
777
|
+
"""Build set of frequency bins to exclude from noise.
|
|
679
778
|
|
|
680
|
-
|
|
681
|
-
|
|
779
|
+
Args:
|
|
780
|
+
fund_idx: Fundamental frequency bin index.
|
|
781
|
+
harmonic_indices: Harmonic bin indices.
|
|
782
|
+
n_bins: Total number of bins.
|
|
682
783
|
|
|
683
|
-
|
|
684
|
-
|
|
784
|
+
Returns:
|
|
785
|
+
Set of bin indices to exclude.
|
|
786
|
+
"""
|
|
685
787
|
exclude_indices = {0} # DC
|
|
686
788
|
|
|
687
|
-
# Exclude fundamental
|
|
688
|
-
for offset in range(-3, 4):
|
|
789
|
+
# Exclude fundamental +/- 3 bins
|
|
790
|
+
for offset in range(-3, 4):
|
|
689
791
|
idx = fund_idx + offset
|
|
690
|
-
if 0 <= idx <
|
|
792
|
+
if 0 <= idx < n_bins:
|
|
691
793
|
exclude_indices.add(idx)
|
|
692
794
|
|
|
693
|
-
# Exclude harmonics
|
|
795
|
+
# Exclude harmonics +/- 3 bins
|
|
694
796
|
for h_idx in harmonic_indices:
|
|
695
|
-
for offset in range(-3, 4):
|
|
797
|
+
for offset in range(-3, 4):
|
|
696
798
|
idx = h_idx + offset
|
|
697
|
-
if 0 <= idx <
|
|
799
|
+
if 0 <= idx < n_bins:
|
|
698
800
|
exclude_indices.add(idx)
|
|
699
801
|
|
|
700
|
-
|
|
701
|
-
|
|
802
|
+
return exclude_indices
|
|
803
|
+
|
|
804
|
+
|
|
805
|
+
def _compute_signal_power(magnitude: NDArray[np.floating[Any]], fund_idx: int) -> float:
|
|
806
|
+
"""Compute signal power from fundamental.
|
|
807
|
+
|
|
808
|
+
Args:
|
|
809
|
+
magnitude: Magnitude spectrum.
|
|
810
|
+
fund_idx: Fundamental bin index.
|
|
811
|
+
|
|
812
|
+
Returns:
|
|
813
|
+
Signal power (3-bin sum around fundamental).
|
|
814
|
+
"""
|
|
702
815
|
signal_power = 0.0
|
|
703
816
|
for offset in range(-1, 2):
|
|
704
817
|
idx = fund_idx + offset
|
|
705
818
|
if 0 <= idx < len(magnitude):
|
|
706
819
|
signal_power += magnitude[idx] ** 2
|
|
820
|
+
return signal_power
|
|
821
|
+
|
|
707
822
|
|
|
708
|
-
|
|
823
|
+
def _compute_noise_power(magnitude: NDArray[np.floating[Any]], exclude_indices: set[int]) -> float:
|
|
824
|
+
"""Compute noise power from non-excluded bins.
|
|
825
|
+
|
|
826
|
+
Args:
|
|
827
|
+
magnitude: Magnitude spectrum.
|
|
828
|
+
exclude_indices: Bins to exclude.
|
|
829
|
+
|
|
830
|
+
Returns:
|
|
831
|
+
Noise power.
|
|
832
|
+
"""
|
|
709
833
|
noise_power = 0.0
|
|
710
834
|
for i in range(len(magnitude)):
|
|
711
835
|
if i not in exclude_indices:
|
|
712
836
|
noise_power += magnitude[i] ** 2
|
|
713
|
-
|
|
714
|
-
if noise_power <= 0:
|
|
715
|
-
return np.inf # type: ignore[no-any-return]
|
|
716
|
-
|
|
717
|
-
snr_ratio = signal_power / noise_power
|
|
718
|
-
return float(10 * np.log10(snr_ratio))
|
|
837
|
+
return noise_power
|
|
719
838
|
|
|
720
839
|
|
|
721
840
|
def sinad(
|
|
@@ -756,7 +875,7 @@ def sinad(
|
|
|
756
875
|
fund_idx, _fund_freq, fund_mag = _find_fundamental(freq, magnitude)
|
|
757
876
|
|
|
758
877
|
if fund_mag == 0:
|
|
759
|
-
return np.nan
|
|
878
|
+
return np.nan
|
|
760
879
|
|
|
761
880
|
# Signal power: use 3-bin window around fundamental to capture spectral leakage
|
|
762
881
|
signal_power = 0.0
|
|
@@ -772,7 +891,7 @@ def sinad(
|
|
|
772
891
|
nad_power = total_power - signal_power
|
|
773
892
|
|
|
774
893
|
if nad_power <= 0:
|
|
775
|
-
return np.inf
|
|
894
|
+
return np.inf
|
|
776
895
|
|
|
777
896
|
sinad_ratio = signal_power / nad_power
|
|
778
897
|
return float(10 * np.log10(sinad_ratio))
|
|
@@ -806,7 +925,7 @@ def enob(
|
|
|
806
925
|
sinad_db = sinad(trace, window=window, nfft=nfft)
|
|
807
926
|
|
|
808
927
|
if np.isnan(sinad_db) or sinad_db <= 0:
|
|
809
|
-
return np.nan
|
|
928
|
+
return np.nan
|
|
810
929
|
|
|
811
930
|
return float((sinad_db - 1.76) / 6.02)
|
|
812
931
|
|
|
@@ -849,7 +968,7 @@ def sfdr(
|
|
|
849
968
|
fund_idx, _fund_freq, fund_mag = _find_fundamental(freq, magnitude)
|
|
850
969
|
|
|
851
970
|
if fund_mag == 0:
|
|
852
|
-
return np.nan
|
|
971
|
+
return np.nan
|
|
853
972
|
|
|
854
973
|
# Create mask for spurs (exclude fundamental and DC)
|
|
855
974
|
spur_mask = np.ones(len(magnitude), dtype=bool)
|
|
@@ -868,12 +987,12 @@ def sfdr(
|
|
|
868
987
|
# Find largest spur
|
|
869
988
|
spur_magnitudes = magnitude[spur_mask]
|
|
870
989
|
if len(spur_magnitudes) == 0:
|
|
871
|
-
return np.inf
|
|
990
|
+
return np.inf
|
|
872
991
|
|
|
873
992
|
max_spur = np.max(spur_magnitudes)
|
|
874
993
|
|
|
875
994
|
if max_spur <= 0:
|
|
876
|
-
return np.inf
|
|
995
|
+
return np.inf
|
|
877
996
|
|
|
878
997
|
sfdr_ratio = fund_mag / max_spur
|
|
879
998
|
return float(20 * np.log10(sfdr_ratio))
|
|
@@ -1041,9 +1160,7 @@ def dwt(
|
|
|
1041
1160
|
try:
|
|
1042
1161
|
import pywt
|
|
1043
1162
|
except ImportError:
|
|
1044
|
-
raise ImportError(
|
|
1045
|
-
"DWT requires PyWavelets library. Install with: pip install PyWavelets"
|
|
1046
|
-
)
|
|
1163
|
+
raise ImportError("DWT requires PyWavelets library. Install with: pip install PyWavelets")
|
|
1047
1164
|
|
|
1048
1165
|
data = trace.data
|
|
1049
1166
|
|
|
@@ -1065,7 +1182,7 @@ def dwt(
|
|
|
1065
1182
|
# Perform multi-level DWT
|
|
1066
1183
|
coeffs = pywt.wavedec(data, wavelet, mode=mode, level=level)
|
|
1067
1184
|
except ValueError as e:
|
|
1068
|
-
raise AnalysisError(f"DWT decomposition failed: {e}", analysis_type="dwt")
|
|
1185
|
+
raise AnalysisError(f"DWT decomposition failed: {e}", analysis_type="dwt")
|
|
1069
1186
|
|
|
1070
1187
|
# Package into dictionary
|
|
1071
1188
|
result = {"cA": coeffs[0]} # Approximation coefficients
|
|
@@ -1108,9 +1225,7 @@ def idwt(
|
|
|
1108
1225
|
try:
|
|
1109
1226
|
import pywt
|
|
1110
1227
|
except ImportError:
|
|
1111
|
-
raise ImportError(
|
|
1112
|
-
"IDWT requires PyWavelets library. Install with: pip install PyWavelets"
|
|
1113
|
-
)
|
|
1228
|
+
raise ImportError("IDWT requires PyWavelets library. Install with: pip install PyWavelets")
|
|
1114
1229
|
|
|
1115
1230
|
# Reconstruct coefficient list
|
|
1116
1231
|
cA = coeffs["cA"]
|
|
@@ -1128,7 +1243,7 @@ def idwt(
|
|
|
1128
1243
|
try:
|
|
1129
1244
|
reconstructed = pywt.waverec(coeff_list, wavelet, mode=mode)
|
|
1130
1245
|
except ValueError as e:
|
|
1131
|
-
raise AnalysisError(f"IDWT reconstruction failed: {e}", analysis_type="idwt")
|
|
1246
|
+
raise AnalysisError(f"IDWT reconstruction failed: {e}", analysis_type="idwt")
|
|
1132
1247
|
|
|
1133
1248
|
return np.asarray(reconstructed, dtype=np.float64)
|
|
1134
1249
|
|
|
@@ -1304,7 +1419,6 @@ def spectrogram_chunked(
|
|
|
1304
1419
|
) -> tuple[NDArray[np.float64], NDArray[np.float64], NDArray[np.float64]]:
|
|
1305
1420
|
"""Compute spectrogram for very large signals using chunked processing.
|
|
1306
1421
|
|
|
1307
|
-
|
|
1308
1422
|
Processes signal in chunks with overlap to handle files larger than RAM.
|
|
1309
1423
|
Stitches STFT results from overlapping chunks to create continuous spectrogram.
|
|
1310
1424
|
|
|
@@ -1313,59 +1427,95 @@ def spectrogram_chunked(
|
|
|
1313
1427
|
chunk_size: Maximum samples per chunk (default 100M).
|
|
1314
1428
|
window: Window function name.
|
|
1315
1429
|
nperseg: Segment length for STFT. If None, auto-selected.
|
|
1316
|
-
noverlap: Overlap between STFT segments.
|
|
1430
|
+
noverlap: Overlap between STFT segments.
|
|
1317
1431
|
nfft: FFT length per segment.
|
|
1318
|
-
overlap_factor: Overlap factor between chunks (default 2.0
|
|
1432
|
+
overlap_factor: Overlap factor between chunks (default 2.0).
|
|
1319
1433
|
|
|
1320
1434
|
Returns:
|
|
1321
|
-
(times, frequencies, magnitude_db)
|
|
1322
|
-
and magnitude in dB as 2D array.
|
|
1435
|
+
(times, frequencies, magnitude_db) as 2D spectrogram.
|
|
1323
1436
|
|
|
1324
1437
|
Example:
|
|
1325
|
-
>>>
|
|
1326
|
-
>>> t, f, Sxx = spectrogram_chunked(trace, chunk_size=50_000_000, nperseg=4096)
|
|
1327
|
-
>>> print(f"Spectrogram shape: {Sxx.shape}")
|
|
1328
|
-
|
|
1329
|
-
References:
|
|
1330
|
-
scipy.signal.stft documentation
|
|
1438
|
+
>>> t, f, Sxx = spectrogram_chunked(trace, chunk_size=50_000_000)
|
|
1331
1439
|
"""
|
|
1332
1440
|
data = trace.data
|
|
1333
1441
|
n = len(data)
|
|
1334
1442
|
sample_rate = trace.metadata.sample_rate
|
|
1335
1443
|
|
|
1336
|
-
|
|
1444
|
+
nperseg, noverlap = _set_spectrogram_defaults(nperseg, noverlap, n)
|
|
1445
|
+
chunk_overlap = int(overlap_factor * nperseg)
|
|
1446
|
+
|
|
1447
|
+
if n <= chunk_size:
|
|
1448
|
+
return spectrogram(trace, window=window, nperseg=nperseg, noverlap=noverlap, nfft=nfft)
|
|
1449
|
+
|
|
1450
|
+
chunks_stft, chunks_times, freq = _process_spectrogram_chunks(
|
|
1451
|
+
data, n, chunk_size, chunk_overlap, sample_rate, window, nperseg, noverlap, nfft
|
|
1452
|
+
)
|
|
1453
|
+
|
|
1454
|
+
Sxx = np.concatenate(chunks_stft, axis=1)
|
|
1455
|
+
times = np.concatenate(chunks_times)
|
|
1456
|
+
|
|
1457
|
+
Sxx = np.maximum(Sxx, 1e-20)
|
|
1458
|
+
Sxx_db: NDArray[np.float64] = np.asarray(10 * np.log10(Sxx), dtype=np.float64)
|
|
1459
|
+
|
|
1460
|
+
return times, freq, Sxx_db
|
|
1461
|
+
|
|
1462
|
+
|
|
1463
|
+
def _set_spectrogram_defaults(nperseg: int | None, noverlap: int | None, n: int) -> tuple[int, int]:
|
|
1464
|
+
"""Set default spectrogram parameters.
|
|
1465
|
+
|
|
1466
|
+
Args:
|
|
1467
|
+
nperseg: Segment length or None.
|
|
1468
|
+
noverlap: Overlap or None.
|
|
1469
|
+
n: Data length.
|
|
1470
|
+
|
|
1471
|
+
Returns:
|
|
1472
|
+
Tuple of (nperseg, noverlap).
|
|
1473
|
+
"""
|
|
1337
1474
|
if nperseg is None:
|
|
1338
1475
|
nperseg = min(256, n // 4)
|
|
1339
1476
|
nperseg = max(nperseg, 16)
|
|
1340
|
-
|
|
1341
1477
|
if noverlap is None:
|
|
1342
1478
|
noverlap = nperseg - nperseg // 8
|
|
1479
|
+
return nperseg, noverlap
|
|
1343
1480
|
|
|
1344
|
-
# Calculate chunk overlap (overlap_factor * nperseg on each boundary)
|
|
1345
|
-
chunk_overlap = int(overlap_factor * nperseg)
|
|
1346
1481
|
|
|
1347
|
-
|
|
1348
|
-
|
|
1349
|
-
|
|
1482
|
+
def _process_spectrogram_chunks(
|
|
1483
|
+
data: NDArray[np.float64],
|
|
1484
|
+
n: int,
|
|
1485
|
+
chunk_size: int,
|
|
1486
|
+
chunk_overlap: int,
|
|
1487
|
+
sample_rate: float,
|
|
1488
|
+
window: str,
|
|
1489
|
+
nperseg: int,
|
|
1490
|
+
noverlap: int,
|
|
1491
|
+
nfft: int | None,
|
|
1492
|
+
) -> tuple[list[NDArray[np.float64]], list[NDArray[np.float64]], NDArray[np.float64]]:
|
|
1493
|
+
"""Process all spectrogram chunks.
|
|
1350
1494
|
|
|
1351
|
-
|
|
1495
|
+
Args:
|
|
1496
|
+
data: Signal data.
|
|
1497
|
+
n: Data length.
|
|
1498
|
+
chunk_size: Chunk size in samples.
|
|
1499
|
+
chunk_overlap: Overlap between chunks.
|
|
1500
|
+
sample_rate: Sampling rate.
|
|
1501
|
+
window: Window function.
|
|
1502
|
+
nperseg: Segment length.
|
|
1503
|
+
noverlap: Segment overlap.
|
|
1504
|
+
nfft: FFT length.
|
|
1505
|
+
|
|
1506
|
+
Returns:
|
|
1507
|
+
Tuple of (chunks_stft, chunks_times, freq).
|
|
1508
|
+
"""
|
|
1352
1509
|
chunks_stft = []
|
|
1353
1510
|
chunks_times = []
|
|
1354
1511
|
chunk_start = 0
|
|
1512
|
+
freq: NDArray[np.float64] | None = None
|
|
1355
1513
|
|
|
1356
1514
|
while chunk_start < n:
|
|
1357
|
-
# Determine chunk end with overlap
|
|
1358
1515
|
chunk_end = min(chunk_start + chunk_size, n)
|
|
1516
|
+
chunk_data = _extract_spectrogram_chunk(data, chunk_start, chunk_end, chunk_overlap, n)
|
|
1359
1517
|
|
|
1360
|
-
|
|
1361
|
-
chunk_data_start = chunk_start - chunk_overlap if chunk_start > 0 else 0
|
|
1362
|
-
|
|
1363
|
-
chunk_data_end = chunk_end + chunk_overlap if chunk_end < n else n
|
|
1364
|
-
|
|
1365
|
-
chunk_data = data[chunk_data_start:chunk_data_end]
|
|
1366
|
-
|
|
1367
|
-
# Compute STFT for chunk
|
|
1368
|
-
freq, times_chunk, Sxx_chunk = sp_signal.spectrogram(
|
|
1518
|
+
freq_local, times_chunk, Sxx_chunk = sp_signal.spectrogram(
|
|
1369
1519
|
chunk_data,
|
|
1370
1520
|
fs=sample_rate,
|
|
1371
1521
|
window=window,
|
|
@@ -1375,48 +1525,117 @@ def spectrogram_chunked(
|
|
|
1375
1525
|
scaling="spectrum",
|
|
1376
1526
|
)
|
|
1377
1527
|
|
|
1378
|
-
|
|
1379
|
-
|
|
1380
|
-
|
|
1381
|
-
|
|
1382
|
-
|
|
1383
|
-
|
|
1384
|
-
|
|
1385
|
-
|
|
1386
|
-
|
|
1387
|
-
|
|
1388
|
-
|
|
1389
|
-
|
|
1390
|
-
Sxx_chunk = Sxx_chunk[:, valid_mask]
|
|
1391
|
-
times_chunk_adjusted = times_chunk_adjusted[valid_mask]
|
|
1392
|
-
elif chunk_start > 0:
|
|
1393
|
-
# Last chunk: trim left overlap
|
|
1394
|
-
valid_time_start = chunk_start / sample_rate
|
|
1395
|
-
valid_mask = times_chunk_adjusted >= valid_time_start
|
|
1396
|
-
Sxx_chunk = Sxx_chunk[:, valid_mask]
|
|
1397
|
-
times_chunk_adjusted = times_chunk_adjusted[valid_mask]
|
|
1398
|
-
elif chunk_end < n:
|
|
1399
|
-
# First chunk: trim right overlap
|
|
1400
|
-
valid_time_end = chunk_end / sample_rate
|
|
1401
|
-
valid_mask = times_chunk_adjusted < valid_time_end
|
|
1402
|
-
Sxx_chunk = Sxx_chunk[:, valid_mask]
|
|
1403
|
-
times_chunk_adjusted = times_chunk_adjusted[valid_mask]
|
|
1404
|
-
|
|
1405
|
-
chunks_stft.append(Sxx_chunk)
|
|
1406
|
-
chunks_times.append(times_chunk_adjusted)
|
|
1407
|
-
|
|
1408
|
-
# Move to next chunk
|
|
1528
|
+
if freq is None:
|
|
1529
|
+
freq = freq_local
|
|
1530
|
+
|
|
1531
|
+
times_adjusted = _adjust_chunk_times(
|
|
1532
|
+
times_chunk, chunk_data, data, chunk_start, chunk_end, chunk_overlap, sample_rate
|
|
1533
|
+
)
|
|
1534
|
+
Sxx_trimmed, times_trimmed = _trim_chunk_overlap(
|
|
1535
|
+
Sxx_chunk, times_adjusted, chunk_start, chunk_end, n, sample_rate
|
|
1536
|
+
)
|
|
1537
|
+
|
|
1538
|
+
chunks_stft.append(Sxx_trimmed)
|
|
1539
|
+
chunks_times.append(times_trimmed)
|
|
1409
1540
|
chunk_start += chunk_size
|
|
1410
1541
|
|
|
1411
|
-
|
|
1412
|
-
|
|
1413
|
-
times = np.concatenate(chunks_times)
|
|
1542
|
+
if freq is None:
|
|
1543
|
+
raise ValueError("No chunks processed - data length too small")
|
|
1414
1544
|
|
|
1415
|
-
|
|
1416
|
-
Sxx = np.maximum(Sxx, 1e-20)
|
|
1417
|
-
Sxx_db = 10 * np.log10(Sxx)
|
|
1545
|
+
return chunks_stft, chunks_times, freq
|
|
1418
1546
|
|
|
1419
|
-
|
|
1547
|
+
|
|
1548
|
+
def _extract_spectrogram_chunk(
|
|
1549
|
+
data: NDArray[np.float64],
|
|
1550
|
+
chunk_start: int,
|
|
1551
|
+
chunk_end: int,
|
|
1552
|
+
chunk_overlap: int,
|
|
1553
|
+
n: int,
|
|
1554
|
+
) -> NDArray[np.float64]:
|
|
1555
|
+
"""Extract chunk data with overlap.
|
|
1556
|
+
|
|
1557
|
+
Args:
|
|
1558
|
+
data: Full data array.
|
|
1559
|
+
chunk_start: Chunk start index.
|
|
1560
|
+
chunk_end: Chunk end index.
|
|
1561
|
+
chunk_overlap: Overlap size.
|
|
1562
|
+
n: Total data length.
|
|
1563
|
+
|
|
1564
|
+
Returns:
|
|
1565
|
+
Chunk data array.
|
|
1566
|
+
"""
|
|
1567
|
+
chunk_data_start = chunk_start - chunk_overlap if chunk_start > 0 else 0
|
|
1568
|
+
chunk_data_end = chunk_end + chunk_overlap if chunk_end < n else n
|
|
1569
|
+
return data[chunk_data_start:chunk_data_end]
|
|
1570
|
+
|
|
1571
|
+
|
|
1572
|
+
def _adjust_chunk_times(
|
|
1573
|
+
times_chunk: NDArray[np.float64],
|
|
1574
|
+
chunk_data: NDArray[np.float64],
|
|
1575
|
+
data: NDArray[np.float64],
|
|
1576
|
+
chunk_start: int,
|
|
1577
|
+
chunk_end: int,
|
|
1578
|
+
chunk_overlap: int,
|
|
1579
|
+
sample_rate: float,
|
|
1580
|
+
) -> NDArray[np.float64]:
|
|
1581
|
+
"""Adjust chunk times for global position.
|
|
1582
|
+
|
|
1583
|
+
Args:
|
|
1584
|
+
times_chunk: Local chunk times.
|
|
1585
|
+
chunk_data: Chunk data array.
|
|
1586
|
+
data: Full data array.
|
|
1587
|
+
chunk_start: Chunk start index.
|
|
1588
|
+
chunk_end: Chunk end index.
|
|
1589
|
+
chunk_overlap: Overlap size.
|
|
1590
|
+
sample_rate: Sampling rate.
|
|
1591
|
+
|
|
1592
|
+
Returns:
|
|
1593
|
+
Adjusted time array.
|
|
1594
|
+
"""
|
|
1595
|
+
chunk_data_start = chunk_start - chunk_overlap if chunk_start > 0 else 0
|
|
1596
|
+
time_offset = chunk_data_start / sample_rate
|
|
1597
|
+
return times_chunk + time_offset
|
|
1598
|
+
|
|
1599
|
+
|
|
1600
|
+
def _trim_chunk_overlap(
|
|
1601
|
+
Sxx_chunk: NDArray[np.float64],
|
|
1602
|
+
times_adjusted: NDArray[np.float64],
|
|
1603
|
+
chunk_start: int,
|
|
1604
|
+
chunk_end: int,
|
|
1605
|
+
n: int,
|
|
1606
|
+
sample_rate: float,
|
|
1607
|
+
) -> tuple[NDArray[np.float64], NDArray[np.float64]]:
|
|
1608
|
+
"""Trim overlap regions from chunk.
|
|
1609
|
+
|
|
1610
|
+
Args:
|
|
1611
|
+
Sxx_chunk: Chunk spectrogram.
|
|
1612
|
+
times_adjusted: Adjusted times.
|
|
1613
|
+
chunk_start: Chunk start index.
|
|
1614
|
+
chunk_end: Chunk end index.
|
|
1615
|
+
n: Total data length.
|
|
1616
|
+
sample_rate: Sampling rate.
|
|
1617
|
+
|
|
1618
|
+
Returns:
|
|
1619
|
+
Tuple of (trimmed spectrogram, trimmed times).
|
|
1620
|
+
"""
|
|
1621
|
+
if chunk_start > 0 and chunk_end < n:
|
|
1622
|
+
# Middle chunk: trim both sides
|
|
1623
|
+
valid_time_start = chunk_start / sample_rate
|
|
1624
|
+
valid_time_end = chunk_end / sample_rate
|
|
1625
|
+
valid_mask = (times_adjusted >= valid_time_start) & (times_adjusted < valid_time_end)
|
|
1626
|
+
elif chunk_start > 0:
|
|
1627
|
+
# Last chunk: trim left overlap
|
|
1628
|
+
valid_time_start = chunk_start / sample_rate
|
|
1629
|
+
valid_mask = times_adjusted >= valid_time_start
|
|
1630
|
+
elif chunk_end < n:
|
|
1631
|
+
# First chunk: trim right overlap
|
|
1632
|
+
valid_time_end = chunk_end / sample_rate
|
|
1633
|
+
valid_mask = times_adjusted < valid_time_end
|
|
1634
|
+
else:
|
|
1635
|
+
# Single chunk
|
|
1636
|
+
return Sxx_chunk, times_adjusted
|
|
1637
|
+
|
|
1638
|
+
return Sxx_chunk[:, valid_mask], times_adjusted[valid_mask]
|
|
1420
1639
|
|
|
1421
1640
|
|
|
1422
1641
|
def psd_chunked(
|
|
@@ -1469,51 +1688,74 @@ def psd_chunked(
|
|
|
1469
1688
|
sample_rate = trace.metadata.sample_rate
|
|
1470
1689
|
|
|
1471
1690
|
# Set default parameters
|
|
1691
|
+
nperseg, noverlap, nfft = _set_psd_defaults(nperseg, noverlap, nfft, n, chunk_size)
|
|
1692
|
+
|
|
1693
|
+
# If data fits in one chunk, use standard PSD
|
|
1694
|
+
if n <= chunk_size:
|
|
1695
|
+
return psd(
|
|
1696
|
+
trace, window=window, nperseg=nperseg, noverlap=noverlap, nfft=nfft, scaling=scaling
|
|
1697
|
+
)
|
|
1698
|
+
|
|
1699
|
+
# Process chunks and accumulate
|
|
1700
|
+
psd_sum, total_segments, freq = _process_psd_chunks(
|
|
1701
|
+
data, sample_rate, chunk_size, nperseg, noverlap, nfft, window, scaling, n
|
|
1702
|
+
)
|
|
1703
|
+
|
|
1704
|
+
# Fallback if processing failed
|
|
1705
|
+
if psd_sum is None or total_segments == 0 or freq is None:
|
|
1706
|
+
return psd(
|
|
1707
|
+
trace, window=window, nperseg=nperseg, noverlap=noverlap, nfft=nfft, scaling=scaling
|
|
1708
|
+
)
|
|
1709
|
+
|
|
1710
|
+
# Average and convert to dB
|
|
1711
|
+
psd_avg = psd_sum / total_segments
|
|
1712
|
+
psd_avg = np.maximum(psd_avg, 1e-20)
|
|
1713
|
+
psd_db = 10 * np.log10(psd_avg)
|
|
1714
|
+
|
|
1715
|
+
return freq, psd_db
|
|
1716
|
+
|
|
1717
|
+
|
|
1718
|
+
def _set_psd_defaults(
|
|
1719
|
+
nperseg: int | None,
|
|
1720
|
+
noverlap: int | None,
|
|
1721
|
+
nfft: int | None,
|
|
1722
|
+
n: int,
|
|
1723
|
+
chunk_size: int,
|
|
1724
|
+
) -> tuple[int, int, int]:
|
|
1725
|
+
"""Set default PSD parameters."""
|
|
1472
1726
|
if nperseg is None:
|
|
1473
1727
|
nperseg = max(256, min(n // 8, chunk_size // 8))
|
|
1474
1728
|
nperseg = min(nperseg, n)
|
|
1475
|
-
|
|
1476
1729
|
if noverlap is None:
|
|
1477
1730
|
noverlap = nperseg // 2
|
|
1478
|
-
|
|
1479
1731
|
if nfft is None:
|
|
1480
1732
|
nfft = nperseg
|
|
1733
|
+
return nperseg, noverlap, nfft
|
|
1481
1734
|
|
|
1482
|
-
# If data fits in one chunk, use standard PSD
|
|
1483
|
-
if n <= chunk_size:
|
|
1484
|
-
return psd(
|
|
1485
|
-
trace,
|
|
1486
|
-
window=window,
|
|
1487
|
-
nperseg=nperseg,
|
|
1488
|
-
noverlap=noverlap,
|
|
1489
|
-
nfft=nfft,
|
|
1490
|
-
scaling=scaling,
|
|
1491
|
-
)
|
|
1492
1735
|
|
|
1493
|
-
|
|
1494
|
-
|
|
1736
|
+
def _process_psd_chunks(
|
|
1737
|
+
data: NDArray[np.float64],
|
|
1738
|
+
sample_rate: float,
|
|
1739
|
+
chunk_size: int,
|
|
1740
|
+
nperseg: int,
|
|
1741
|
+
noverlap: int,
|
|
1742
|
+
nfft: int,
|
|
1743
|
+
window: str,
|
|
1744
|
+
scaling: str,
|
|
1745
|
+
n: int,
|
|
1746
|
+
) -> tuple[NDArray[np.float64] | None, int, NDArray[np.float64] | None]:
|
|
1747
|
+
"""Process chunks and accumulate PSD estimates."""
|
|
1495
1748
|
chunk_overlap = nperseg
|
|
1496
|
-
|
|
1497
|
-
# Accumulate PSD estimates
|
|
1498
1749
|
psd_sum: NDArray[np.float64] | None = None
|
|
1499
1750
|
total_segments = 0
|
|
1500
1751
|
freq: NDArray[np.float64] | None = None
|
|
1501
|
-
|
|
1502
1752
|
chunk_start = 0
|
|
1503
|
-
while chunk_start < n:
|
|
1504
|
-
# Determine chunk boundaries with overlap
|
|
1505
|
-
chunk_data_start = max(0, chunk_start - chunk_overlap)
|
|
1506
|
-
chunk_end = min(chunk_start + chunk_size, n)
|
|
1507
|
-
chunk_data_end = min(chunk_end + chunk_overlap, n)
|
|
1508
|
-
|
|
1509
|
-
# Extract chunk
|
|
1510
|
-
chunk_data = data[chunk_data_start:chunk_data_end]
|
|
1511
1753
|
|
|
1754
|
+
while chunk_start < n:
|
|
1755
|
+
chunk_data = _extract_chunk_with_overlap(data, chunk_start, chunk_size, chunk_overlap, n)
|
|
1512
1756
|
if len(chunk_data) < nperseg:
|
|
1513
|
-
# Last chunk too small, skip
|
|
1514
1757
|
break
|
|
1515
1758
|
|
|
1516
|
-
# Compute Welch PSD for chunk
|
|
1517
1759
|
f, psd_linear = sp_signal.welch(
|
|
1518
1760
|
chunk_data,
|
|
1519
1761
|
fs=sample_rate,
|
|
@@ -1525,9 +1767,7 @@ def psd_chunked(
|
|
|
1525
1767
|
detrend="constant",
|
|
1526
1768
|
)
|
|
1527
1769
|
|
|
1528
|
-
|
|
1529
|
-
hop = nperseg - noverlap
|
|
1530
|
-
num_segments = max(1, (len(chunk_data) - noverlap) // hop)
|
|
1770
|
+
num_segments = max(1, (len(chunk_data) - noverlap) // (nperseg - noverlap))
|
|
1531
1771
|
|
|
1532
1772
|
if psd_sum is None:
|
|
1533
1773
|
psd_sum = psd_linear * num_segments
|
|
@@ -1536,29 +1776,23 @@ def psd_chunked(
|
|
|
1536
1776
|
psd_sum += psd_linear * num_segments
|
|
1537
1777
|
|
|
1538
1778
|
total_segments += num_segments
|
|
1539
|
-
|
|
1540
|
-
# Move to next chunk
|
|
1541
1779
|
chunk_start += chunk_size
|
|
1542
1780
|
|
|
1543
|
-
|
|
1544
|
-
# Fallback to standard PSD if something went wrong
|
|
1545
|
-
return psd(
|
|
1546
|
-
trace,
|
|
1547
|
-
window=window,
|
|
1548
|
-
nperseg=nperseg,
|
|
1549
|
-
noverlap=noverlap,
|
|
1550
|
-
nfft=nfft,
|
|
1551
|
-
scaling=scaling,
|
|
1552
|
-
)
|
|
1781
|
+
return psd_sum, total_segments, freq
|
|
1553
1782
|
|
|
1554
|
-
# Average across all segments
|
|
1555
|
-
psd_avg = psd_sum / total_segments
|
|
1556
1783
|
|
|
1557
|
-
|
|
1558
|
-
|
|
1559
|
-
|
|
1560
|
-
|
|
1561
|
-
|
|
1784
|
+
def _extract_chunk_with_overlap(
|
|
1785
|
+
data: NDArray[np.float64],
|
|
1786
|
+
chunk_start: int,
|
|
1787
|
+
chunk_size: int,
|
|
1788
|
+
chunk_overlap: int,
|
|
1789
|
+
n: int,
|
|
1790
|
+
) -> NDArray[np.float64]:
|
|
1791
|
+
"""Extract chunk with overlap on both sides."""
|
|
1792
|
+
chunk_data_start = max(0, chunk_start - chunk_overlap)
|
|
1793
|
+
chunk_end = min(chunk_start + chunk_size, n)
|
|
1794
|
+
chunk_data_end = min(chunk_end + chunk_overlap, n)
|
|
1795
|
+
return data[chunk_data_start:chunk_data_end]
|
|
1562
1796
|
|
|
1563
1797
|
|
|
1564
1798
|
def fft_chunked(
|
|
@@ -1571,7 +1805,6 @@ def fft_chunked(
|
|
|
1571
1805
|
) -> tuple[NDArray[np.float64], NDArray[np.float64]]:
|
|
1572
1806
|
"""Compute FFT for very long signals using segmented processing.
|
|
1573
1807
|
|
|
1574
|
-
|
|
1575
1808
|
Divides signal into overlapping segments, computes FFT for each,
|
|
1576
1809
|
and averages the magnitude spectra to reduce variance.
|
|
1577
1810
|
|
|
@@ -1583,15 +1816,13 @@ def fft_chunked(
|
|
|
1583
1816
|
nfft: FFT length. If None, uses segment_size.
|
|
1584
1817
|
|
|
1585
1818
|
Returns:
|
|
1586
|
-
(frequencies, magnitude_db) -
|
|
1819
|
+
(frequencies, magnitude_db) - Averaged magnitude spectrum in dB.
|
|
1587
1820
|
|
|
1588
1821
|
Raises:
|
|
1589
1822
|
AnalysisError: If no segments were processed (empty trace).
|
|
1590
1823
|
|
|
1591
1824
|
Example:
|
|
1592
|
-
>>>
|
|
1593
|
-
>>> freq, mag = fft_chunked(trace, segment_size=1_000_000, overlap_pct=50)
|
|
1594
|
-
>>> print(f"Frequency resolution: {freq[1] - freq[0]:.3f} Hz")
|
|
1825
|
+
>>> freq, mag = fft_chunked(trace, segment_size=1_000_000)
|
|
1595
1826
|
|
|
1596
1827
|
References:
|
|
1597
1828
|
Welch's method for spectral estimation
|
|
@@ -1601,47 +1832,65 @@ def fft_chunked(
|
|
|
1601
1832
|
sample_rate = trace.metadata.sample_rate
|
|
1602
1833
|
|
|
1603
1834
|
if n < segment_size:
|
|
1604
|
-
# Use standard FFT if data fits in one segment
|
|
1605
1835
|
result = fft(trace, window=window, nfft=nfft)
|
|
1606
|
-
return result[0], result[1]
|
|
1836
|
+
return result[0], result[1]
|
|
1607
1837
|
|
|
1608
|
-
# Calculate overlap
|
|
1609
1838
|
overlap_samples = int(segment_size * overlap_pct / 100.0)
|
|
1610
1839
|
hop = segment_size - overlap_samples
|
|
1611
|
-
|
|
1612
|
-
# Determine number of segments
|
|
1613
1840
|
num_segments = max(1, (n - overlap_samples) // hop)
|
|
1614
1841
|
|
|
1615
1842
|
if nfft is None:
|
|
1616
1843
|
nfft = int(2 ** np.ceil(np.log2(segment_size)))
|
|
1617
1844
|
|
|
1618
|
-
|
|
1845
|
+
freq, magnitude_sum = _accumulate_fft_segments(
|
|
1846
|
+
data, n, num_segments, hop, segment_size, window, nfft, sample_rate
|
|
1847
|
+
)
|
|
1848
|
+
|
|
1849
|
+
magnitude_avg = magnitude_sum / num_segments
|
|
1850
|
+
magnitude_avg = np.maximum(magnitude_avg, 1e-20)
|
|
1851
|
+
magnitude_db = 20 * np.log10(magnitude_avg)
|
|
1852
|
+
|
|
1853
|
+
return freq, magnitude_db
|
|
1854
|
+
|
|
1855
|
+
|
|
1856
|
+
def _accumulate_fft_segments(
|
|
1857
|
+
data: NDArray[np.float64],
|
|
1858
|
+
n: int,
|
|
1859
|
+
num_segments: int,
|
|
1860
|
+
hop: int,
|
|
1861
|
+
segment_size: int,
|
|
1862
|
+
window: str,
|
|
1863
|
+
nfft: int,
|
|
1864
|
+
sample_rate: float,
|
|
1865
|
+
) -> tuple[NDArray[np.float64], NDArray[np.float64]]:
|
|
1866
|
+
"""Accumulate FFT magnitudes from all segments.
|
|
1867
|
+
|
|
1868
|
+
Args:
|
|
1869
|
+
data: Signal data.
|
|
1870
|
+
n: Data length.
|
|
1871
|
+
num_segments: Number of segments.
|
|
1872
|
+
hop: Hop size between segments.
|
|
1873
|
+
segment_size: Size of each segment.
|
|
1874
|
+
window: Window function name.
|
|
1875
|
+
nfft: FFT length.
|
|
1876
|
+
sample_rate: Sampling rate.
|
|
1877
|
+
|
|
1878
|
+
Returns:
|
|
1879
|
+
Tuple of (freq, magnitude_sum).
|
|
1880
|
+
|
|
1881
|
+
Raises:
|
|
1882
|
+
AnalysisError: If no segments processed.
|
|
1883
|
+
"""
|
|
1619
1884
|
freq: NDArray[np.float64] | None = None
|
|
1620
1885
|
magnitude_sum: NDArray[np.float64] | None = None
|
|
1621
1886
|
w = get_window(window, segment_size)
|
|
1622
1887
|
window_gain = np.sum(w) / segment_size
|
|
1623
1888
|
|
|
1624
1889
|
for i in range(num_segments):
|
|
1625
|
-
|
|
1626
|
-
end = min(start + segment_size, n)
|
|
1627
|
-
|
|
1628
|
-
if end - start < segment_size:
|
|
1629
|
-
# Last segment might be shorter, pad with zeros
|
|
1630
|
-
segment = np.zeros(segment_size)
|
|
1631
|
-
segment[: end - start] = data[start:end]
|
|
1632
|
-
else:
|
|
1633
|
-
segment = data[start:end]
|
|
1634
|
-
|
|
1635
|
-
# Detrend
|
|
1890
|
+
segment = _extract_fft_segment(data, i, hop, segment_size, n)
|
|
1636
1891
|
segment = segment - np.mean(segment)
|
|
1637
|
-
|
|
1638
|
-
# Window
|
|
1639
1892
|
segment_windowed = segment * w
|
|
1640
|
-
|
|
1641
|
-
# FFT
|
|
1642
1893
|
spectrum = np.fft.rfft(segment_windowed, n=nfft)
|
|
1643
|
-
|
|
1644
|
-
# Magnitude
|
|
1645
1894
|
magnitude = np.abs(spectrum) / (segment_size * window_gain)
|
|
1646
1895
|
|
|
1647
1896
|
if magnitude_sum is None:
|
|
@@ -1650,18 +1899,41 @@ def fft_chunked(
|
|
|
1650
1899
|
else:
|
|
1651
1900
|
magnitude_sum += magnitude
|
|
1652
1901
|
|
|
1653
|
-
|
|
1654
|
-
if magnitude_sum is None:
|
|
1902
|
+
if magnitude_sum is None or freq is None:
|
|
1655
1903
|
raise AnalysisError("No segments were processed - input trace may be empty")
|
|
1656
|
-
if freq is None:
|
|
1657
|
-
raise AnalysisError("Frequency array was not initialized - internal error")
|
|
1658
|
-
magnitude_avg = magnitude_sum / num_segments
|
|
1659
1904
|
|
|
1660
|
-
|
|
1661
|
-
magnitude_avg = np.maximum(magnitude_avg, 1e-20)
|
|
1662
|
-
magnitude_db = 20 * np.log10(magnitude_avg)
|
|
1905
|
+
return freq, magnitude_sum
|
|
1663
1906
|
|
|
1664
|
-
|
|
1907
|
+
|
|
1908
|
+
def _extract_fft_segment(
|
|
1909
|
+
data: NDArray[np.float64],
|
|
1910
|
+
segment_idx: int,
|
|
1911
|
+
hop: int,
|
|
1912
|
+
segment_size: int,
|
|
1913
|
+
n: int,
|
|
1914
|
+
) -> NDArray[np.float64]:
|
|
1915
|
+
"""Extract segment for FFT processing.
|
|
1916
|
+
|
|
1917
|
+
Args:
|
|
1918
|
+
data: Full data array.
|
|
1919
|
+
segment_idx: Segment index.
|
|
1920
|
+
hop: Hop size.
|
|
1921
|
+
segment_size: Segment size.
|
|
1922
|
+
n: Total data length.
|
|
1923
|
+
|
|
1924
|
+
Returns:
|
|
1925
|
+
Segment data (padded if needed).
|
|
1926
|
+
"""
|
|
1927
|
+
start = segment_idx * hop
|
|
1928
|
+
end = min(start + segment_size, n)
|
|
1929
|
+
|
|
1930
|
+
if end - start < segment_size:
|
|
1931
|
+
segment = np.zeros(segment_size)
|
|
1932
|
+
segment[: end - start] = data[start:end]
|
|
1933
|
+
else:
|
|
1934
|
+
segment = data[start:end]
|
|
1935
|
+
|
|
1936
|
+
return segment
|
|
1665
1937
|
|
|
1666
1938
|
|
|
1667
1939
|
__all__ = [
|