oscura 0.0.1__py3-none-any.whl → 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- oscura/__init__.py +813 -8
- oscura/__main__.py +392 -0
- oscura/analyzers/__init__.py +37 -0
- oscura/analyzers/digital/__init__.py +177 -0
- oscura/analyzers/digital/bus.py +691 -0
- oscura/analyzers/digital/clock.py +805 -0
- oscura/analyzers/digital/correlation.py +720 -0
- oscura/analyzers/digital/edges.py +632 -0
- oscura/analyzers/digital/extraction.py +413 -0
- oscura/analyzers/digital/quality.py +878 -0
- oscura/analyzers/digital/signal_quality.py +877 -0
- oscura/analyzers/digital/thresholds.py +708 -0
- oscura/analyzers/digital/timing.py +1104 -0
- oscura/analyzers/eye/__init__.py +46 -0
- oscura/analyzers/eye/diagram.py +434 -0
- oscura/analyzers/eye/metrics.py +555 -0
- oscura/analyzers/jitter/__init__.py +83 -0
- oscura/analyzers/jitter/ber.py +333 -0
- oscura/analyzers/jitter/decomposition.py +759 -0
- oscura/analyzers/jitter/measurements.py +413 -0
- oscura/analyzers/jitter/spectrum.py +220 -0
- oscura/analyzers/measurements.py +40 -0
- oscura/analyzers/packet/__init__.py +171 -0
- oscura/analyzers/packet/daq.py +1077 -0
- oscura/analyzers/packet/metrics.py +437 -0
- oscura/analyzers/packet/parser.py +327 -0
- oscura/analyzers/packet/payload.py +2156 -0
- oscura/analyzers/packet/payload_analysis.py +1312 -0
- oscura/analyzers/packet/payload_extraction.py +236 -0
- oscura/analyzers/packet/payload_patterns.py +670 -0
- oscura/analyzers/packet/stream.py +359 -0
- oscura/analyzers/patterns/__init__.py +266 -0
- oscura/analyzers/patterns/clustering.py +1036 -0
- oscura/analyzers/patterns/discovery.py +539 -0
- oscura/analyzers/patterns/learning.py +797 -0
- oscura/analyzers/patterns/matching.py +1091 -0
- oscura/analyzers/patterns/periodic.py +650 -0
- oscura/analyzers/patterns/sequences.py +767 -0
- oscura/analyzers/power/__init__.py +116 -0
- oscura/analyzers/power/ac_power.py +391 -0
- oscura/analyzers/power/basic.py +383 -0
- oscura/analyzers/power/conduction.py +314 -0
- oscura/analyzers/power/efficiency.py +297 -0
- oscura/analyzers/power/ripple.py +356 -0
- oscura/analyzers/power/soa.py +372 -0
- oscura/analyzers/power/switching.py +479 -0
- oscura/analyzers/protocol/__init__.py +150 -0
- oscura/analyzers/protocols/__init__.py +150 -0
- oscura/analyzers/protocols/base.py +500 -0
- oscura/analyzers/protocols/can.py +620 -0
- oscura/analyzers/protocols/can_fd.py +448 -0
- oscura/analyzers/protocols/flexray.py +405 -0
- oscura/analyzers/protocols/hdlc.py +399 -0
- oscura/analyzers/protocols/i2c.py +368 -0
- oscura/analyzers/protocols/i2s.py +296 -0
- oscura/analyzers/protocols/jtag.py +393 -0
- oscura/analyzers/protocols/lin.py +445 -0
- oscura/analyzers/protocols/manchester.py +333 -0
- oscura/analyzers/protocols/onewire.py +501 -0
- oscura/analyzers/protocols/spi.py +334 -0
- oscura/analyzers/protocols/swd.py +325 -0
- oscura/analyzers/protocols/uart.py +393 -0
- oscura/analyzers/protocols/usb.py +495 -0
- oscura/analyzers/signal_integrity/__init__.py +63 -0
- oscura/analyzers/signal_integrity/embedding.py +294 -0
- oscura/analyzers/signal_integrity/equalization.py +370 -0
- oscura/analyzers/signal_integrity/sparams.py +484 -0
- oscura/analyzers/spectral/__init__.py +53 -0
- oscura/analyzers/spectral/chunked.py +273 -0
- oscura/analyzers/spectral/chunked_fft.py +571 -0
- oscura/analyzers/spectral/chunked_wavelet.py +391 -0
- oscura/analyzers/spectral/fft.py +92 -0
- oscura/analyzers/statistical/__init__.py +250 -0
- oscura/analyzers/statistical/checksum.py +923 -0
- oscura/analyzers/statistical/chunked_corr.py +228 -0
- oscura/analyzers/statistical/classification.py +778 -0
- oscura/analyzers/statistical/entropy.py +1113 -0
- oscura/analyzers/statistical/ngrams.py +614 -0
- oscura/analyzers/statistics/__init__.py +119 -0
- oscura/analyzers/statistics/advanced.py +885 -0
- oscura/analyzers/statistics/basic.py +263 -0
- oscura/analyzers/statistics/correlation.py +630 -0
- oscura/analyzers/statistics/distribution.py +298 -0
- oscura/analyzers/statistics/outliers.py +463 -0
- oscura/analyzers/statistics/streaming.py +93 -0
- oscura/analyzers/statistics/trend.py +520 -0
- oscura/analyzers/validation.py +598 -0
- oscura/analyzers/waveform/__init__.py +36 -0
- oscura/analyzers/waveform/measurements.py +943 -0
- oscura/analyzers/waveform/measurements_with_uncertainty.py +371 -0
- oscura/analyzers/waveform/spectral.py +1689 -0
- oscura/analyzers/waveform/wavelets.py +298 -0
- oscura/api/__init__.py +62 -0
- oscura/api/dsl.py +538 -0
- oscura/api/fluent.py +571 -0
- oscura/api/operators.py +498 -0
- oscura/api/optimization.py +392 -0
- oscura/api/profiling.py +396 -0
- oscura/automotive/__init__.py +73 -0
- oscura/automotive/can/__init__.py +52 -0
- oscura/automotive/can/analysis.py +356 -0
- oscura/automotive/can/checksum.py +250 -0
- oscura/automotive/can/correlation.py +212 -0
- oscura/automotive/can/discovery.py +355 -0
- oscura/automotive/can/message_wrapper.py +375 -0
- oscura/automotive/can/models.py +385 -0
- oscura/automotive/can/patterns.py +381 -0
- oscura/automotive/can/session.py +452 -0
- oscura/automotive/can/state_machine.py +300 -0
- oscura/automotive/can/stimulus_response.py +461 -0
- oscura/automotive/dbc/__init__.py +15 -0
- oscura/automotive/dbc/generator.py +156 -0
- oscura/automotive/dbc/parser.py +146 -0
- oscura/automotive/dtc/__init__.py +30 -0
- oscura/automotive/dtc/database.py +3036 -0
- oscura/automotive/j1939/__init__.py +14 -0
- oscura/automotive/j1939/decoder.py +745 -0
- oscura/automotive/loaders/__init__.py +35 -0
- oscura/automotive/loaders/asc.py +98 -0
- oscura/automotive/loaders/blf.py +77 -0
- oscura/automotive/loaders/csv_can.py +136 -0
- oscura/automotive/loaders/dispatcher.py +136 -0
- oscura/automotive/loaders/mdf.py +331 -0
- oscura/automotive/loaders/pcap.py +132 -0
- oscura/automotive/obd/__init__.py +14 -0
- oscura/automotive/obd/decoder.py +707 -0
- oscura/automotive/uds/__init__.py +48 -0
- oscura/automotive/uds/decoder.py +265 -0
- oscura/automotive/uds/models.py +64 -0
- oscura/automotive/visualization.py +369 -0
- oscura/batch/__init__.py +55 -0
- oscura/batch/advanced.py +627 -0
- oscura/batch/aggregate.py +300 -0
- oscura/batch/analyze.py +139 -0
- oscura/batch/logging.py +487 -0
- oscura/batch/metrics.py +556 -0
- oscura/builders/__init__.py +41 -0
- oscura/builders/signal_builder.py +1131 -0
- oscura/cli/__init__.py +14 -0
- oscura/cli/batch.py +339 -0
- oscura/cli/characterize.py +273 -0
- oscura/cli/compare.py +775 -0
- oscura/cli/decode.py +551 -0
- oscura/cli/main.py +247 -0
- oscura/cli/shell.py +350 -0
- oscura/comparison/__init__.py +66 -0
- oscura/comparison/compare.py +397 -0
- oscura/comparison/golden.py +487 -0
- oscura/comparison/limits.py +391 -0
- oscura/comparison/mask.py +434 -0
- oscura/comparison/trace_diff.py +30 -0
- oscura/comparison/visualization.py +481 -0
- oscura/compliance/__init__.py +70 -0
- oscura/compliance/advanced.py +756 -0
- oscura/compliance/masks.py +363 -0
- oscura/compliance/reporting.py +483 -0
- oscura/compliance/testing.py +298 -0
- oscura/component/__init__.py +38 -0
- oscura/component/impedance.py +365 -0
- oscura/component/reactive.py +598 -0
- oscura/component/transmission_line.py +312 -0
- oscura/config/__init__.py +191 -0
- oscura/config/defaults.py +254 -0
- oscura/config/loader.py +348 -0
- oscura/config/memory.py +271 -0
- oscura/config/migration.py +458 -0
- oscura/config/pipeline.py +1077 -0
- oscura/config/preferences.py +530 -0
- oscura/config/protocol.py +875 -0
- oscura/config/schema.py +713 -0
- oscura/config/settings.py +420 -0
- oscura/config/thresholds.py +599 -0
- oscura/convenience.py +457 -0
- oscura/core/__init__.py +299 -0
- oscura/core/audit.py +457 -0
- oscura/core/backend_selector.py +405 -0
- oscura/core/cache.py +590 -0
- oscura/core/cancellation.py +439 -0
- oscura/core/confidence.py +225 -0
- oscura/core/config.py +506 -0
- oscura/core/correlation.py +216 -0
- oscura/core/cross_domain.py +422 -0
- oscura/core/debug.py +301 -0
- oscura/core/edge_cases.py +541 -0
- oscura/core/exceptions.py +535 -0
- oscura/core/gpu_backend.py +523 -0
- oscura/core/lazy.py +832 -0
- oscura/core/log_query.py +540 -0
- oscura/core/logging.py +931 -0
- oscura/core/logging_advanced.py +952 -0
- oscura/core/memoize.py +171 -0
- oscura/core/memory_check.py +274 -0
- oscura/core/memory_guard.py +290 -0
- oscura/core/memory_limits.py +336 -0
- oscura/core/memory_monitor.py +453 -0
- oscura/core/memory_progress.py +465 -0
- oscura/core/memory_warnings.py +315 -0
- oscura/core/numba_backend.py +362 -0
- oscura/core/performance.py +352 -0
- oscura/core/progress.py +524 -0
- oscura/core/provenance.py +358 -0
- oscura/core/results.py +331 -0
- oscura/core/types.py +504 -0
- oscura/core/uncertainty.py +383 -0
- oscura/discovery/__init__.py +52 -0
- oscura/discovery/anomaly_detector.py +672 -0
- oscura/discovery/auto_decoder.py +415 -0
- oscura/discovery/comparison.py +497 -0
- oscura/discovery/quality_validator.py +528 -0
- oscura/discovery/signal_detector.py +769 -0
- oscura/dsl/__init__.py +73 -0
- oscura/dsl/commands.py +246 -0
- oscura/dsl/interpreter.py +455 -0
- oscura/dsl/parser.py +689 -0
- oscura/dsl/repl.py +172 -0
- oscura/exceptions.py +59 -0
- oscura/exploratory/__init__.py +111 -0
- oscura/exploratory/error_recovery.py +642 -0
- oscura/exploratory/fuzzy.py +513 -0
- oscura/exploratory/fuzzy_advanced.py +786 -0
- oscura/exploratory/legacy.py +831 -0
- oscura/exploratory/parse.py +358 -0
- oscura/exploratory/recovery.py +275 -0
- oscura/exploratory/sync.py +382 -0
- oscura/exploratory/unknown.py +707 -0
- oscura/export/__init__.py +25 -0
- oscura/export/wireshark/README.md +265 -0
- oscura/export/wireshark/__init__.py +47 -0
- oscura/export/wireshark/generator.py +312 -0
- oscura/export/wireshark/lua_builder.py +159 -0
- oscura/export/wireshark/templates/dissector.lua.j2 +92 -0
- oscura/export/wireshark/type_mapping.py +165 -0
- oscura/export/wireshark/validator.py +105 -0
- oscura/exporters/__init__.py +94 -0
- oscura/exporters/csv.py +303 -0
- oscura/exporters/exporters.py +44 -0
- oscura/exporters/hdf5.py +219 -0
- oscura/exporters/html_export.py +701 -0
- oscura/exporters/json_export.py +291 -0
- oscura/exporters/markdown_export.py +367 -0
- oscura/exporters/matlab_export.py +354 -0
- oscura/exporters/npz_export.py +219 -0
- oscura/exporters/spice_export.py +210 -0
- oscura/extensibility/__init__.py +131 -0
- oscura/extensibility/docs.py +752 -0
- oscura/extensibility/extensions.py +1125 -0
- oscura/extensibility/logging.py +259 -0
- oscura/extensibility/measurements.py +485 -0
- oscura/extensibility/plugins.py +414 -0
- oscura/extensibility/registry.py +346 -0
- oscura/extensibility/templates.py +913 -0
- oscura/extensibility/validation.py +651 -0
- oscura/filtering/__init__.py +89 -0
- oscura/filtering/base.py +563 -0
- oscura/filtering/convenience.py +564 -0
- oscura/filtering/design.py +725 -0
- oscura/filtering/filters.py +32 -0
- oscura/filtering/introspection.py +605 -0
- oscura/guidance/__init__.py +24 -0
- oscura/guidance/recommender.py +429 -0
- oscura/guidance/wizard.py +518 -0
- oscura/inference/__init__.py +251 -0
- oscura/inference/active_learning/README.md +153 -0
- oscura/inference/active_learning/__init__.py +38 -0
- oscura/inference/active_learning/lstar.py +257 -0
- oscura/inference/active_learning/observation_table.py +230 -0
- oscura/inference/active_learning/oracle.py +78 -0
- oscura/inference/active_learning/teachers/__init__.py +15 -0
- oscura/inference/active_learning/teachers/simulator.py +192 -0
- oscura/inference/adaptive_tuning.py +453 -0
- oscura/inference/alignment.py +653 -0
- oscura/inference/bayesian.py +943 -0
- oscura/inference/binary.py +1016 -0
- oscura/inference/crc_reverse.py +711 -0
- oscura/inference/logic.py +288 -0
- oscura/inference/message_format.py +1305 -0
- oscura/inference/protocol.py +417 -0
- oscura/inference/protocol_dsl.py +1084 -0
- oscura/inference/protocol_library.py +1230 -0
- oscura/inference/sequences.py +809 -0
- oscura/inference/signal_intelligence.py +1509 -0
- oscura/inference/spectral.py +215 -0
- oscura/inference/state_machine.py +634 -0
- oscura/inference/stream.py +918 -0
- oscura/integrations/__init__.py +59 -0
- oscura/integrations/llm.py +1827 -0
- oscura/jupyter/__init__.py +32 -0
- oscura/jupyter/display.py +268 -0
- oscura/jupyter/magic.py +334 -0
- oscura/loaders/__init__.py +526 -0
- oscura/loaders/binary.py +69 -0
- oscura/loaders/configurable.py +1255 -0
- oscura/loaders/csv.py +26 -0
- oscura/loaders/csv_loader.py +473 -0
- oscura/loaders/hdf5.py +9 -0
- oscura/loaders/hdf5_loader.py +510 -0
- oscura/loaders/lazy.py +370 -0
- oscura/loaders/mmap_loader.py +583 -0
- oscura/loaders/numpy_loader.py +436 -0
- oscura/loaders/pcap.py +432 -0
- oscura/loaders/preprocessing.py +368 -0
- oscura/loaders/rigol.py +287 -0
- oscura/loaders/sigrok.py +321 -0
- oscura/loaders/tdms.py +367 -0
- oscura/loaders/tektronix.py +711 -0
- oscura/loaders/validation.py +584 -0
- oscura/loaders/vcd.py +464 -0
- oscura/loaders/wav.py +233 -0
- oscura/math/__init__.py +45 -0
- oscura/math/arithmetic.py +824 -0
- oscura/math/interpolation.py +413 -0
- oscura/onboarding/__init__.py +39 -0
- oscura/onboarding/help.py +498 -0
- oscura/onboarding/tutorials.py +405 -0
- oscura/onboarding/wizard.py +466 -0
- oscura/optimization/__init__.py +19 -0
- oscura/optimization/parallel.py +440 -0
- oscura/optimization/search.py +532 -0
- oscura/pipeline/__init__.py +43 -0
- oscura/pipeline/base.py +338 -0
- oscura/pipeline/composition.py +242 -0
- oscura/pipeline/parallel.py +448 -0
- oscura/pipeline/pipeline.py +375 -0
- oscura/pipeline/reverse_engineering.py +1119 -0
- oscura/plugins/__init__.py +122 -0
- oscura/plugins/base.py +272 -0
- oscura/plugins/cli.py +497 -0
- oscura/plugins/discovery.py +411 -0
- oscura/plugins/isolation.py +418 -0
- oscura/plugins/lifecycle.py +959 -0
- oscura/plugins/manager.py +493 -0
- oscura/plugins/registry.py +421 -0
- oscura/plugins/versioning.py +372 -0
- oscura/py.typed +0 -0
- oscura/quality/__init__.py +65 -0
- oscura/quality/ensemble.py +740 -0
- oscura/quality/explainer.py +338 -0
- oscura/quality/scoring.py +616 -0
- oscura/quality/warnings.py +456 -0
- oscura/reporting/__init__.py +248 -0
- oscura/reporting/advanced.py +1234 -0
- oscura/reporting/analyze.py +448 -0
- oscura/reporting/argument_preparer.py +596 -0
- oscura/reporting/auto_report.py +507 -0
- oscura/reporting/batch.py +615 -0
- oscura/reporting/chart_selection.py +223 -0
- oscura/reporting/comparison.py +330 -0
- oscura/reporting/config.py +615 -0
- oscura/reporting/content/__init__.py +39 -0
- oscura/reporting/content/executive.py +127 -0
- oscura/reporting/content/filtering.py +191 -0
- oscura/reporting/content/minimal.py +257 -0
- oscura/reporting/content/verbosity.py +162 -0
- oscura/reporting/core.py +508 -0
- oscura/reporting/core_formats/__init__.py +17 -0
- oscura/reporting/core_formats/multi_format.py +210 -0
- oscura/reporting/engine.py +836 -0
- oscura/reporting/export.py +366 -0
- oscura/reporting/formatting/__init__.py +129 -0
- oscura/reporting/formatting/emphasis.py +81 -0
- oscura/reporting/formatting/numbers.py +403 -0
- oscura/reporting/formatting/standards.py +55 -0
- oscura/reporting/formatting.py +466 -0
- oscura/reporting/html.py +578 -0
- oscura/reporting/index.py +590 -0
- oscura/reporting/multichannel.py +296 -0
- oscura/reporting/output.py +379 -0
- oscura/reporting/pdf.py +373 -0
- oscura/reporting/plots.py +731 -0
- oscura/reporting/pptx_export.py +360 -0
- oscura/reporting/renderers/__init__.py +11 -0
- oscura/reporting/renderers/pdf.py +94 -0
- oscura/reporting/sections.py +471 -0
- oscura/reporting/standards.py +680 -0
- oscura/reporting/summary_generator.py +368 -0
- oscura/reporting/tables.py +397 -0
- oscura/reporting/template_system.py +724 -0
- oscura/reporting/templates/__init__.py +15 -0
- oscura/reporting/templates/definition.py +205 -0
- oscura/reporting/templates/index.html +649 -0
- oscura/reporting/templates/index.md +173 -0
- oscura/schemas/__init__.py +158 -0
- oscura/schemas/bus_configuration.json +322 -0
- oscura/schemas/device_mapping.json +182 -0
- oscura/schemas/packet_format.json +418 -0
- oscura/schemas/protocol_definition.json +363 -0
- oscura/search/__init__.py +16 -0
- oscura/search/anomaly.py +292 -0
- oscura/search/context.py +149 -0
- oscura/search/pattern.py +160 -0
- oscura/session/__init__.py +34 -0
- oscura/session/annotations.py +289 -0
- oscura/session/history.py +313 -0
- oscura/session/session.py +445 -0
- oscura/streaming/__init__.py +43 -0
- oscura/streaming/chunked.py +611 -0
- oscura/streaming/progressive.py +393 -0
- oscura/streaming/realtime.py +622 -0
- oscura/testing/__init__.py +54 -0
- oscura/testing/synthetic.py +808 -0
- oscura/triggering/__init__.py +68 -0
- oscura/triggering/base.py +229 -0
- oscura/triggering/edge.py +353 -0
- oscura/triggering/pattern.py +344 -0
- oscura/triggering/pulse.py +581 -0
- oscura/triggering/window.py +453 -0
- oscura/ui/__init__.py +48 -0
- oscura/ui/formatters.py +526 -0
- oscura/ui/progressive_display.py +340 -0
- oscura/utils/__init__.py +99 -0
- oscura/utils/autodetect.py +338 -0
- oscura/utils/buffer.py +389 -0
- oscura/utils/lazy.py +407 -0
- oscura/utils/lazy_imports.py +147 -0
- oscura/utils/memory.py +836 -0
- oscura/utils/memory_advanced.py +1326 -0
- oscura/utils/memory_extensions.py +465 -0
- oscura/utils/progressive.py +352 -0
- oscura/utils/windowing.py +362 -0
- oscura/visualization/__init__.py +321 -0
- oscura/visualization/accessibility.py +526 -0
- oscura/visualization/annotations.py +374 -0
- oscura/visualization/axis_scaling.py +305 -0
- oscura/visualization/colors.py +453 -0
- oscura/visualization/digital.py +337 -0
- oscura/visualization/eye.py +420 -0
- oscura/visualization/histogram.py +281 -0
- oscura/visualization/interactive.py +858 -0
- oscura/visualization/jitter.py +702 -0
- oscura/visualization/keyboard.py +394 -0
- oscura/visualization/layout.py +365 -0
- oscura/visualization/optimization.py +1028 -0
- oscura/visualization/palettes.py +446 -0
- oscura/visualization/plot.py +92 -0
- oscura/visualization/power.py +290 -0
- oscura/visualization/power_extended.py +626 -0
- oscura/visualization/presets.py +467 -0
- oscura/visualization/protocols.py +932 -0
- oscura/visualization/render.py +207 -0
- oscura/visualization/rendering.py +444 -0
- oscura/visualization/reverse_engineering.py +791 -0
- oscura/visualization/signal_integrity.py +808 -0
- oscura/visualization/specialized.py +553 -0
- oscura/visualization/spectral.py +811 -0
- oscura/visualization/styles.py +381 -0
- oscura/visualization/thumbnails.py +311 -0
- oscura/visualization/time_axis.py +351 -0
- oscura/visualization/waveform.py +367 -0
- oscura/workflow/__init__.py +13 -0
- oscura/workflow/dag.py +377 -0
- oscura/workflows/__init__.py +58 -0
- oscura/workflows/compliance.py +280 -0
- oscura/workflows/digital.py +272 -0
- oscura/workflows/multi_trace.py +502 -0
- oscura/workflows/power.py +178 -0
- oscura/workflows/protocol.py +492 -0
- oscura/workflows/reverse_engineering.py +639 -0
- oscura/workflows/signal_integrity.py +227 -0
- oscura-0.1.0.dist-info/METADATA +300 -0
- oscura-0.1.0.dist-info/RECORD +463 -0
- oscura-0.1.0.dist-info/entry_points.txt +2 -0
- {oscura-0.0.1.dist-info → oscura-0.1.0.dist-info}/licenses/LICENSE +1 -1
- oscura-0.0.1.dist-info/METADATA +0 -63
- oscura-0.0.1.dist-info/RECORD +0 -5
- {oscura-0.0.1.dist-info → oscura-0.1.0.dist-info}/WHEEL +0 -0
|
@@ -0,0 +1,1509 @@
|
|
|
1
|
+
"""Signal classification and measurement intelligence for TraceKit.
|
|
2
|
+
|
|
3
|
+
This module provides intelligent signal type detection, quality assessment,
|
|
4
|
+
and measurement suitability checking to help users understand why they might
|
|
5
|
+
get NaN results and which measurements are appropriate for their signals.
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
Example:
|
|
9
|
+
>>> import oscura as tk
|
|
10
|
+
>>> trace = tk.load('signal.wfm')
|
|
11
|
+
>>> classification = tk.classify_signal(trace)
|
|
12
|
+
>>> print(f"Signal type: {classification['type']}")
|
|
13
|
+
>>> print(f"Characteristics: {classification['characteristics']}")
|
|
14
|
+
>>> quality = tk.assess_signal_quality(trace)
|
|
15
|
+
>>> print(f"SNR: {quality['snr']:.1f} dB")
|
|
16
|
+
>>> suggestions = tk.suggest_measurements(trace)
|
|
17
|
+
>>> print(f"Recommended measurements: {suggestions}")
|
|
18
|
+
|
|
19
|
+
References:
|
|
20
|
+
IEEE 181-2011: Standard for Transitional Waveform Definitions
|
|
21
|
+
IEEE 1057-2017: Standard for Digitizing Waveform Recorders
|
|
22
|
+
"""
|
|
23
|
+
|
|
24
|
+
from __future__ import annotations
|
|
25
|
+
|
|
26
|
+
from dataclasses import dataclass
|
|
27
|
+
from typing import TYPE_CHECKING, Any, cast
|
|
28
|
+
|
|
29
|
+
import numpy as np
|
|
30
|
+
|
|
31
|
+
if TYPE_CHECKING:
|
|
32
|
+
from numpy.typing import NDArray
|
|
33
|
+
|
|
34
|
+
from oscura.core.types import WaveformTrace
|
|
35
|
+
from oscura.reporting.config import AnalysisDomain
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
def classify_signal(
|
|
39
|
+
trace: WaveformTrace | NDArray[np.floating[Any]],
|
|
40
|
+
sample_rate: float = 1.0,
|
|
41
|
+
*,
|
|
42
|
+
digital_threshold_ratio: float = 0.8,
|
|
43
|
+
dc_threshold_percent: float = 90.0,
|
|
44
|
+
periodicity_threshold: float = 0.7,
|
|
45
|
+
) -> dict[str, Any]:
|
|
46
|
+
"""Classify signal type and characteristics.
|
|
47
|
+
|
|
48
|
+
Automatically detects whether a signal is digital, analog, or mixed,
|
|
49
|
+
identifies key characteristics like periodicity and noise, and estimates
|
|
50
|
+
fundamental properties.
|
|
51
|
+
|
|
52
|
+
Args:
|
|
53
|
+
trace: Input waveform trace or numpy array to classify.
|
|
54
|
+
sample_rate: Sample rate in Hz (only used if trace is ndarray).
|
|
55
|
+
digital_threshold_ratio: Ratio of samples at two levels to consider digital (0-1).
|
|
56
|
+
dc_threshold_percent: Percentage of DC component to classify as DC signal.
|
|
57
|
+
periodicity_threshold: Correlation threshold for periodic detection (0-1).
|
|
58
|
+
|
|
59
|
+
Returns:
|
|
60
|
+
Dictionary containing:
|
|
61
|
+
- signal_type: Signal type ("digital", "analog", "mixed", "dc")
|
|
62
|
+
- is_digital: Boolean indicating if signal is digital
|
|
63
|
+
- is_periodic: Boolean indicating if signal is periodic
|
|
64
|
+
- characteristics: List of characteristics like "periodic", "noisy", "pulsed"
|
|
65
|
+
- dc_component: True if significant DC offset present
|
|
66
|
+
- frequency_estimate: Estimated fundamental frequency in Hz (or None)
|
|
67
|
+
- dominant_frequency: Same as frequency_estimate (for compatibility)
|
|
68
|
+
- snr_db: Estimated SNR in dB (or None)
|
|
69
|
+
- confidence: Classification confidence (0.0-1.0)
|
|
70
|
+
- noise_level: Estimated noise level in signal units
|
|
71
|
+
- levels: For digital signals, dict with "low" and "high" levels
|
|
72
|
+
|
|
73
|
+
Example:
|
|
74
|
+
>>> trace = tk.load('square_wave.wfm')
|
|
75
|
+
>>> info = tk.classify_signal(trace)
|
|
76
|
+
>>> print(f"Type: {info['signal_type']}")
|
|
77
|
+
Type: digital
|
|
78
|
+
>>> print(f"Characteristics: {info['characteristics']}")
|
|
79
|
+
Characteristics: ['periodic', 'clean']
|
|
80
|
+
>>> print(f"Frequency: {info['frequency_estimate']:.3e} Hz")
|
|
81
|
+
Frequency: 1.000e+06 Hz
|
|
82
|
+
|
|
83
|
+
References:
|
|
84
|
+
IEEE 181-2011: Digital waveform characterization
|
|
85
|
+
"""
|
|
86
|
+
# Handle both WaveformTrace and ndarray inputs
|
|
87
|
+
if isinstance(trace, np.ndarray):
|
|
88
|
+
data = trace
|
|
89
|
+
trace_sample_rate = sample_rate
|
|
90
|
+
else:
|
|
91
|
+
data = trace.data
|
|
92
|
+
trace_sample_rate = trace.metadata.sample_rate
|
|
93
|
+
|
|
94
|
+
n = len(data)
|
|
95
|
+
|
|
96
|
+
if n < 10:
|
|
97
|
+
return {
|
|
98
|
+
"type": "unknown",
|
|
99
|
+
"signal_type": "unknown",
|
|
100
|
+
"is_digital": False,
|
|
101
|
+
"is_periodic": False,
|
|
102
|
+
"characteristics": ["insufficient_data"],
|
|
103
|
+
"dc_component": False,
|
|
104
|
+
"frequency_estimate": None,
|
|
105
|
+
"dominant_frequency": None,
|
|
106
|
+
"snr_db": None,
|
|
107
|
+
"confidence": 0.0,
|
|
108
|
+
"noise_level": 0.0,
|
|
109
|
+
"levels": None,
|
|
110
|
+
}
|
|
111
|
+
|
|
112
|
+
# Calculate basic statistics
|
|
113
|
+
mean_val = float(np.mean(data))
|
|
114
|
+
std_val = float(np.std(data))
|
|
115
|
+
min_val = float(np.min(data))
|
|
116
|
+
max_val = float(np.max(data))
|
|
117
|
+
amplitude = max_val - min_val
|
|
118
|
+
|
|
119
|
+
# Initialize result
|
|
120
|
+
characteristics = []
|
|
121
|
+
signal_type = "analog"
|
|
122
|
+
confidence = 0.5
|
|
123
|
+
|
|
124
|
+
# 1. Check for DC signal (very low variation)
|
|
125
|
+
# Use coefficient of variation (CV) for DC detection
|
|
126
|
+
cv = std_val / (abs(mean_val) + amplitude / 2 + 1e-12)
|
|
127
|
+
if amplitude < 1e-9 or cv < 0.005: # Less than 0.5% variation
|
|
128
|
+
signal_type = "dc"
|
|
129
|
+
characteristics.append("constant")
|
|
130
|
+
confidence = 0.95
|
|
131
|
+
return {
|
|
132
|
+
"type": signal_type,
|
|
133
|
+
"signal_type": signal_type,
|
|
134
|
+
"is_digital": False,
|
|
135
|
+
"is_periodic": False,
|
|
136
|
+
"characteristics": characteristics,
|
|
137
|
+
"dc_component": True,
|
|
138
|
+
"frequency_estimate": None,
|
|
139
|
+
"dominant_frequency": None,
|
|
140
|
+
"snr_db": None,
|
|
141
|
+
"confidence": confidence,
|
|
142
|
+
"noise_level": std_val,
|
|
143
|
+
"levels": None,
|
|
144
|
+
}
|
|
145
|
+
|
|
146
|
+
# 2. Check for digital signal (bimodal distribution)
|
|
147
|
+
is_digital, digital_levels, digital_confidence = _detect_digital_signal(
|
|
148
|
+
data, digital_threshold_ratio
|
|
149
|
+
)
|
|
150
|
+
|
|
151
|
+
if is_digital:
|
|
152
|
+
signal_type = "digital"
|
|
153
|
+
confidence = digital_confidence
|
|
154
|
+
characteristics.append("digital_levels")
|
|
155
|
+
|
|
156
|
+
# 3. Assess noise level
|
|
157
|
+
noise_level = _estimate_noise_level(data)
|
|
158
|
+
noise_ratio = noise_level / (amplitude + 1e-12)
|
|
159
|
+
|
|
160
|
+
if noise_ratio < 0.05:
|
|
161
|
+
characteristics.append("clean")
|
|
162
|
+
elif noise_ratio < 0.15:
|
|
163
|
+
characteristics.append("low_noise")
|
|
164
|
+
elif noise_ratio < 0.30:
|
|
165
|
+
characteristics.append("moderate_noise")
|
|
166
|
+
else:
|
|
167
|
+
characteristics.append("noisy")
|
|
168
|
+
|
|
169
|
+
# 4. Check for periodicity
|
|
170
|
+
is_periodic, period_estimate, periodicity_score = _detect_periodicity(
|
|
171
|
+
data, trace_sample_rate, periodicity_threshold
|
|
172
|
+
)
|
|
173
|
+
|
|
174
|
+
# For digital signals, also try edge-based periodicity detection
|
|
175
|
+
# This works better for signals with few periods
|
|
176
|
+
if not is_periodic and is_digital:
|
|
177
|
+
edge_periodic, edge_period, edge_confidence = _detect_edge_periodicity(
|
|
178
|
+
data, trace_sample_rate, digital_levels
|
|
179
|
+
)
|
|
180
|
+
if edge_periodic:
|
|
181
|
+
is_periodic = edge_periodic
|
|
182
|
+
period_estimate = edge_period
|
|
183
|
+
periodicity_score = edge_confidence
|
|
184
|
+
|
|
185
|
+
# Also try FFT-based frequency detection
|
|
186
|
+
# FFT is more reliable for undersampled signals where autocorrelation may detect harmonics
|
|
187
|
+
if n >= 64:
|
|
188
|
+
fft_periodic, fft_period, fft_confidence = _detect_periodicity_fft(data, trace_sample_rate)
|
|
189
|
+
if fft_periodic:
|
|
190
|
+
# If autocorrelation also found periodicity, compare results
|
|
191
|
+
if is_periodic and period_estimate is not None:
|
|
192
|
+
# If frequencies differ significantly (>20%), prefer the higher frequency
|
|
193
|
+
# (lower frequencies are often harmonics or aliasing artifacts)
|
|
194
|
+
auto_freq = 1.0 / period_estimate if period_estimate > 0 else 0
|
|
195
|
+
fft_freq = 1.0 / fft_period if fft_period is not None and fft_period > 0 else 0
|
|
196
|
+
freq_ratio = max(auto_freq, fft_freq) / (min(auto_freq, fft_freq) + 1e-12)
|
|
197
|
+
|
|
198
|
+
if freq_ratio > 1.2: # More than 20% difference
|
|
199
|
+
# Prefer higher frequency (more likely to be correct)
|
|
200
|
+
if fft_freq > auto_freq:
|
|
201
|
+
period_estimate = fft_period
|
|
202
|
+
periodicity_score = fft_confidence
|
|
203
|
+
else:
|
|
204
|
+
# Only FFT detected periodicity
|
|
205
|
+
is_periodic = fft_periodic
|
|
206
|
+
period_estimate = fft_period
|
|
207
|
+
periodicity_score = fft_confidence
|
|
208
|
+
|
|
209
|
+
if is_periodic:
|
|
210
|
+
characteristics.append("periodic")
|
|
211
|
+
frequency_estimate = (
|
|
212
|
+
1.0 / period_estimate if period_estimate is not None and period_estimate > 0 else None
|
|
213
|
+
)
|
|
214
|
+
confidence = max(confidence, periodicity_score)
|
|
215
|
+
else:
|
|
216
|
+
characteristics.append("aperiodic")
|
|
217
|
+
frequency_estimate = None
|
|
218
|
+
|
|
219
|
+
# 5. Check for DC component
|
|
220
|
+
dc_component = abs(mean_val) > (amplitude * dc_threshold_percent / 100.0)
|
|
221
|
+
|
|
222
|
+
# 6. Detect pulsed/transient characteristics
|
|
223
|
+
edge_count = _count_edges(data, digital_levels if is_digital else None)
|
|
224
|
+
samples_per_edge = n / max(edge_count, 1)
|
|
225
|
+
|
|
226
|
+
if edge_count > 2 and samples_per_edge > 100:
|
|
227
|
+
characteristics.append("pulsed")
|
|
228
|
+
elif edge_count < 3 and amplitude > std_val * 2:
|
|
229
|
+
characteristics.append("transient")
|
|
230
|
+
|
|
231
|
+
# 7. Check for mixed signal (both digital transitions and analog variation)
|
|
232
|
+
if is_digital and digital_levels is not None:
|
|
233
|
+
# Check if there's significant variation within digital levels
|
|
234
|
+
low_region = data[data < (digital_levels["low"] + digital_levels["high"]) / 2]
|
|
235
|
+
high_region = data[data >= (digital_levels["low"] + digital_levels["high"]) / 2]
|
|
236
|
+
|
|
237
|
+
if len(low_region) > 0 and len(high_region) > 0:
|
|
238
|
+
low_std = np.std(low_region)
|
|
239
|
+
high_std = np.std(high_region)
|
|
240
|
+
level_separation = digital_levels["high"] - digital_levels["low"]
|
|
241
|
+
|
|
242
|
+
if low_std > level_separation * 0.1 or high_std > level_separation * 0.1:
|
|
243
|
+
signal_type = "mixed"
|
|
244
|
+
characteristics.append("analog_variation")
|
|
245
|
+
|
|
246
|
+
# Calculate SNR estimate
|
|
247
|
+
snr_db = None
|
|
248
|
+
if amplitude > noise_level * 10:
|
|
249
|
+
signal_power = amplitude**2 / 8 # Approximate for most waveforms
|
|
250
|
+
noise_power = noise_level**2
|
|
251
|
+
if noise_power > 1e-20:
|
|
252
|
+
snr_db = 10 * np.log10(signal_power / noise_power)
|
|
253
|
+
|
|
254
|
+
return {
|
|
255
|
+
"type": signal_type,
|
|
256
|
+
"signal_type": signal_type,
|
|
257
|
+
"is_digital": is_digital,
|
|
258
|
+
"is_periodic": is_periodic,
|
|
259
|
+
"characteristics": characteristics,
|
|
260
|
+
"dc_component": dc_component,
|
|
261
|
+
"frequency_estimate": frequency_estimate,
|
|
262
|
+
"dominant_frequency": frequency_estimate,
|
|
263
|
+
"snr_db": float(snr_db) if snr_db is not None else None,
|
|
264
|
+
"confidence": float(confidence),
|
|
265
|
+
"noise_level": float(noise_level),
|
|
266
|
+
"levels": digital_levels if is_digital else None,
|
|
267
|
+
}
|
|
268
|
+
|
|
269
|
+
|
|
270
|
+
def assess_signal_quality(
|
|
271
|
+
trace: WaveformTrace,
|
|
272
|
+
) -> dict[str, Any]:
|
|
273
|
+
"""Assess signal quality metrics.
|
|
274
|
+
|
|
275
|
+
Analyzes signal quality including SNR, noise level, clipping, saturation,
|
|
276
|
+
and other quality indicators that affect measurement accuracy.
|
|
277
|
+
|
|
278
|
+
Args:
|
|
279
|
+
trace: Input waveform trace to assess.
|
|
280
|
+
|
|
281
|
+
Returns:
|
|
282
|
+
Dictionary containing:
|
|
283
|
+
- snr: Signal-to-noise ratio in dB (or None if not applicable)
|
|
284
|
+
- noise_level: RMS noise level in signal units
|
|
285
|
+
- clipping: True if signal shows clipping
|
|
286
|
+
- saturation: True if signal appears saturated
|
|
287
|
+
- warnings: List of quality warning strings
|
|
288
|
+
- dynamic_range: Signal dynamic range in dB
|
|
289
|
+
- crest_factor: Peak-to-RMS ratio
|
|
290
|
+
|
|
291
|
+
Example:
|
|
292
|
+
>>> trace = tk.load('noisy_sine.wfm')
|
|
293
|
+
>>> quality = tk.assess_signal_quality(trace)
|
|
294
|
+
>>> print(f"SNR: {quality['snr']:.1f} dB")
|
|
295
|
+
SNR: 42.3 dB
|
|
296
|
+
>>> if quality['warnings']:
|
|
297
|
+
... print(f"Warnings: {quality['warnings']}")
|
|
298
|
+
|
|
299
|
+
References:
|
|
300
|
+
IEEE 1057-2017: ADC quality metrics
|
|
301
|
+
"""
|
|
302
|
+
data = trace.data
|
|
303
|
+
n = len(data)
|
|
304
|
+
warnings = []
|
|
305
|
+
|
|
306
|
+
if n < 10:
|
|
307
|
+
warnings.append("Insufficient data for quality assessment")
|
|
308
|
+
return {
|
|
309
|
+
"snr": None,
|
|
310
|
+
"noise_level": 0.0,
|
|
311
|
+
"clipping": False,
|
|
312
|
+
"saturation": False,
|
|
313
|
+
"warnings": warnings,
|
|
314
|
+
"dynamic_range": None,
|
|
315
|
+
"crest_factor": None,
|
|
316
|
+
}
|
|
317
|
+
|
|
318
|
+
# Calculate statistics
|
|
319
|
+
min_val = float(np.min(data))
|
|
320
|
+
max_val = float(np.max(data))
|
|
321
|
+
mean_val = float(np.mean(data))
|
|
322
|
+
rms_val = float(np.sqrt(np.mean(data**2)))
|
|
323
|
+
amplitude = max_val - min_val
|
|
324
|
+
|
|
325
|
+
# 1. Detect clipping (samples stuck at extremes)
|
|
326
|
+
# Real clipping shows as CONSECUTIVE samples at extremes, not just many samples near extremes
|
|
327
|
+
clipping = False
|
|
328
|
+
if amplitude > 1e-9:
|
|
329
|
+
tolerance = amplitude * 0.01 # 1% tolerance
|
|
330
|
+
|
|
331
|
+
# Find consecutive runs at extremes
|
|
332
|
+
at_min = data <= (min_val + tolerance)
|
|
333
|
+
at_max = data >= (max_val - tolerance)
|
|
334
|
+
|
|
335
|
+
# Check for long consecutive runs (clipping) vs brief peaks (natural waveform)
|
|
336
|
+
# For analog signals like sine waves, peaks naturally have ~5-10% of samples near extremes
|
|
337
|
+
# Real clipping typically shows >15-20% consecutive samples
|
|
338
|
+
# For digital signals, even short runs at extremes can indicate clipping
|
|
339
|
+
min_run_length = max(int(n * 0.15), 100) # 15% of data or 100 samples minimum
|
|
340
|
+
|
|
341
|
+
# Find maximum consecutive run lengths
|
|
342
|
+
max_min_run = 0
|
|
343
|
+
max_max_run = 0
|
|
344
|
+
|
|
345
|
+
current_min_run = 0
|
|
346
|
+
current_max_run = 0
|
|
347
|
+
|
|
348
|
+
for i in range(n):
|
|
349
|
+
if at_min[i]:
|
|
350
|
+
current_min_run += 1
|
|
351
|
+
max_min_run = max(max_min_run, current_min_run)
|
|
352
|
+
else:
|
|
353
|
+
current_min_run = 0
|
|
354
|
+
|
|
355
|
+
if at_max[i]:
|
|
356
|
+
current_max_run += 1
|
|
357
|
+
max_max_run = max(max_max_run, current_max_run)
|
|
358
|
+
else:
|
|
359
|
+
current_max_run = 0
|
|
360
|
+
|
|
361
|
+
# Clipping detected if we have long consecutive runs at extremes
|
|
362
|
+
if max_min_run >= min_run_length:
|
|
363
|
+
clipping = True
|
|
364
|
+
warnings.append(
|
|
365
|
+
f"Signal clipping detected at minimum ({max_min_run} consecutive samples)"
|
|
366
|
+
)
|
|
367
|
+
if max_max_run >= min_run_length:
|
|
368
|
+
clipping = True
|
|
369
|
+
warnings.append(
|
|
370
|
+
f"Signal clipping detected at maximum ({max_max_run} consecutive samples)"
|
|
371
|
+
)
|
|
372
|
+
|
|
373
|
+
# 2. Detect saturation (signal stuck at one level)
|
|
374
|
+
# For digital signals, 2 unique values is normal, not saturation
|
|
375
|
+
saturation = False
|
|
376
|
+
unique_values = len(np.unique(data))
|
|
377
|
+
classification = classify_signal(trace)
|
|
378
|
+
|
|
379
|
+
# Different thresholds for digital vs analog signals
|
|
380
|
+
if classification["type"] == "digital":
|
|
381
|
+
# Digital signals should have 2+ levels; saturation is when stuck at 1 level
|
|
382
|
+
if unique_values < 2:
|
|
383
|
+
saturation = True
|
|
384
|
+
warnings.append(f"Signal saturation detected (only {unique_values} unique value)")
|
|
385
|
+
else:
|
|
386
|
+
# Analog signals should have many unique values
|
|
387
|
+
if unique_values < max(10, n // 1000):
|
|
388
|
+
saturation = True
|
|
389
|
+
warnings.append(f"Signal saturation detected (only {unique_values} unique values)")
|
|
390
|
+
|
|
391
|
+
# 3. Estimate noise level
|
|
392
|
+
noise_level = _estimate_noise_level(data)
|
|
393
|
+
|
|
394
|
+
# 4. Calculate SNR
|
|
395
|
+
snr = None
|
|
396
|
+
if amplitude > noise_level * 10: # Only calculate if signal > noise
|
|
397
|
+
# Remove DC and calculate signal power
|
|
398
|
+
data_ac = data - mean_val
|
|
399
|
+
signal_power = np.mean(data_ac**2)
|
|
400
|
+
noise_power = noise_level**2
|
|
401
|
+
|
|
402
|
+
if noise_power > 1e-20:
|
|
403
|
+
snr = 10 * np.log10(signal_power / noise_power)
|
|
404
|
+
else:
|
|
405
|
+
snr = float("inf")
|
|
406
|
+
|
|
407
|
+
# 5. Calculate dynamic range
|
|
408
|
+
dynamic_range = None
|
|
409
|
+
if min_val != 0 and max_val != 0 and max_val > 1e-20:
|
|
410
|
+
with np.errstate(invalid="ignore", divide="ignore"):
|
|
411
|
+
ratio = max_val / (abs(min_val) + 1e-20)
|
|
412
|
+
if ratio > 0 and np.isfinite(ratio):
|
|
413
|
+
dynamic_range = 20 * np.log10(ratio)
|
|
414
|
+
|
|
415
|
+
# 6. Calculate crest factor (peak-to-RMS)
|
|
416
|
+
crest_factor = None
|
|
417
|
+
if rms_val > 1e-12:
|
|
418
|
+
crest_factor = max(abs(max_val), abs(min_val)) / rms_val
|
|
419
|
+
|
|
420
|
+
# 7. Check for quantization issues
|
|
421
|
+
if n > 100:
|
|
422
|
+
# Estimate quantization step
|
|
423
|
+
sorted_data = np.sort(data)
|
|
424
|
+
diffs = np.diff(sorted_data)
|
|
425
|
+
diffs = diffs[diffs > 1e-15] # Remove near-zero differences
|
|
426
|
+
|
|
427
|
+
if len(diffs) > 10:
|
|
428
|
+
min_step = np.min(diffs)
|
|
429
|
+
if amplitude / min_step < 256:
|
|
430
|
+
warnings.append(
|
|
431
|
+
f"Low resolution detected ({int(amplitude / min_step)} levels), "
|
|
432
|
+
"may affect measurement accuracy"
|
|
433
|
+
)
|
|
434
|
+
|
|
435
|
+
# 8. Check sample rate adequacy
|
|
436
|
+
classification = classify_signal(trace)
|
|
437
|
+
if classification["frequency_estimate"] is not None:
|
|
438
|
+
# Check if sample rate is at least 10x the detected frequency
|
|
439
|
+
nyquist_rate = 2 * classification["frequency_estimate"]
|
|
440
|
+
if trace.metadata.sample_rate < nyquist_rate * 5:
|
|
441
|
+
warnings.append(
|
|
442
|
+
f"Sample rate ({trace.metadata.sample_rate:.3e} Hz) may be "
|
|
443
|
+
f"insufficient for signal frequency ({classification['frequency_estimate']:.3e} Hz). "
|
|
444
|
+
"Recommend at least 10x oversampling"
|
|
445
|
+
)
|
|
446
|
+
|
|
447
|
+
# Additional check: if samples per period is very low, we might be undersampling
|
|
448
|
+
# This catches cases where frequency detection may be wrong due to aliasing
|
|
449
|
+
samples_per_period = trace.metadata.sample_rate / classification["frequency_estimate"]
|
|
450
|
+
if samples_per_period < 10 and "sample rate" not in "".join(warnings).lower():
|
|
451
|
+
warnings.append(
|
|
452
|
+
f"Very low oversampling detected ({samples_per_period:.1f} samples per period). "
|
|
453
|
+
f"Signal may be undersampled or frequency detection may be inaccurate. "
|
|
454
|
+
"Recommend at least 10 samples per period"
|
|
455
|
+
)
|
|
456
|
+
|
|
457
|
+
return {
|
|
458
|
+
"snr": float(snr) if snr is not None else None,
|
|
459
|
+
"noise_level": float(noise_level),
|
|
460
|
+
"clipping": bool(clipping),
|
|
461
|
+
"saturation": bool(saturation),
|
|
462
|
+
"warnings": warnings,
|
|
463
|
+
"dynamic_range": float(dynamic_range) if dynamic_range is not None else None,
|
|
464
|
+
"crest_factor": float(crest_factor) if crest_factor is not None else None,
|
|
465
|
+
}
|
|
466
|
+
|
|
467
|
+
|
|
468
|
+
def check_measurement_suitability(
|
|
469
|
+
trace: WaveformTrace,
|
|
470
|
+
measurement_name: str,
|
|
471
|
+
) -> dict[str, Any]:
|
|
472
|
+
"""Check if a measurement is suitable for this signal.
|
|
473
|
+
|
|
474
|
+
Analyzes signal characteristics to determine if a specific measurement
|
|
475
|
+
will produce valid results, and provides warnings and suggestions.
|
|
476
|
+
|
|
477
|
+
Args:
|
|
478
|
+
trace: Input waveform trace.
|
|
479
|
+
measurement_name: Name of measurement to check (e.g., "frequency", "rise_time").
|
|
480
|
+
|
|
481
|
+
Returns:
|
|
482
|
+
Dictionary containing:
|
|
483
|
+
- suitable: True if measurement is appropriate for this signal
|
|
484
|
+
- confidence: Confidence in suitability assessment (0.0-1.0)
|
|
485
|
+
- warnings: List of warning strings
|
|
486
|
+
- suggestions: List of suggestion strings
|
|
487
|
+
- expected_result: "valid", "nan", or "unreliable"
|
|
488
|
+
|
|
489
|
+
Example:
|
|
490
|
+
>>> trace = tk.load('dc_signal.wfm')
|
|
491
|
+
>>> check = tk.check_measurement_suitability(trace, "frequency")
|
|
492
|
+
>>> if not check['suitable']:
|
|
493
|
+
... print(f"Warning: {check['warnings']}")
|
|
494
|
+
Warning: ['Frequency measurement not suitable for DC signal']
|
|
495
|
+
|
|
496
|
+
References:
|
|
497
|
+
IEEE 181-2011: Measurement applicability
|
|
498
|
+
"""
|
|
499
|
+
classification = classify_signal(trace)
|
|
500
|
+
quality = assess_signal_quality(trace)
|
|
501
|
+
|
|
502
|
+
warnings = []
|
|
503
|
+
suggestions = []
|
|
504
|
+
suitable = True
|
|
505
|
+
confidence = 0.8
|
|
506
|
+
expected_result = "valid"
|
|
507
|
+
|
|
508
|
+
signal_type = classification["type"]
|
|
509
|
+
characteristics = classification["characteristics"]
|
|
510
|
+
|
|
511
|
+
# Define measurement requirements
|
|
512
|
+
frequency_measurements = ["frequency", "period"]
|
|
513
|
+
edge_measurements = ["rise_time", "fall_time"]
|
|
514
|
+
amplitude_measurements = ["amplitude", "overshoot", "undershoot", "preshoot"]
|
|
515
|
+
duty_measurements = ["duty_cycle", "pulse_width"]
|
|
516
|
+
_statistical_measurements = ["mean", "rms"]
|
|
517
|
+
spectral_measurements = ["thd", "snr", "sinad", "enob", "sfdr", "fft", "psd"]
|
|
518
|
+
|
|
519
|
+
# Check DC signals
|
|
520
|
+
if signal_type == "dc":
|
|
521
|
+
if measurement_name in frequency_measurements:
|
|
522
|
+
suitable = False
|
|
523
|
+
warnings.append(f"{measurement_name} measurement not suitable for DC signal")
|
|
524
|
+
suggestions.append("Use 'mean' or 'rms' measurements for DC signals")
|
|
525
|
+
expected_result = "nan"
|
|
526
|
+
elif measurement_name in edge_measurements:
|
|
527
|
+
suitable = False
|
|
528
|
+
warnings.append(f"{measurement_name} requires signal transitions")
|
|
529
|
+
suggestions.append("Signal appears to be DC with no edges")
|
|
530
|
+
expected_result = "nan"
|
|
531
|
+
elif measurement_name in duty_measurements:
|
|
532
|
+
suitable = False
|
|
533
|
+
warnings.append(f"{measurement_name} requires periodic signal")
|
|
534
|
+
expected_result = "nan"
|
|
535
|
+
|
|
536
|
+
# Check aperiodic signals
|
|
537
|
+
if "aperiodic" in characteristics:
|
|
538
|
+
if measurement_name in frequency_measurements + duty_measurements:
|
|
539
|
+
suitable = False
|
|
540
|
+
confidence = 0.6
|
|
541
|
+
warnings.append(f"{measurement_name} requires periodic signal")
|
|
542
|
+
suggestions.append("Signal does not appear periodic")
|
|
543
|
+
expected_result = "nan"
|
|
544
|
+
elif measurement_name in spectral_measurements:
|
|
545
|
+
warnings.append("Spectral measurements on aperiodic signals may not show clear peaks")
|
|
546
|
+
suggestions.append("Consider time-domain or statistical analysis")
|
|
547
|
+
expected_result = "unreliable"
|
|
548
|
+
|
|
549
|
+
# Check digital vs analog
|
|
550
|
+
if signal_type == "digital":
|
|
551
|
+
if measurement_name in amplitude_measurements and measurement_name != "amplitude":
|
|
552
|
+
warnings.append(
|
|
553
|
+
f"{measurement_name} designed for analog signals with overshoot/ringing"
|
|
554
|
+
)
|
|
555
|
+
suggestions.append("Digital signals may show zero overshoot/undershoot")
|
|
556
|
+
expected_result = "unreliable"
|
|
557
|
+
confidence = 0.5
|
|
558
|
+
|
|
559
|
+
# Check for sufficient transitions
|
|
560
|
+
if measurement_name in edge_measurements + duty_measurements:
|
|
561
|
+
data = trace.data
|
|
562
|
+
edge_count = _count_edges(data, classification.get("levels"))
|
|
563
|
+
if edge_count < 2:
|
|
564
|
+
suitable = False
|
|
565
|
+
warnings.append(f"{measurement_name} requires at least 2 signal edges")
|
|
566
|
+
suggestions.append(f"Signal has only {edge_count} detected edge(s)")
|
|
567
|
+
expected_result = "nan"
|
|
568
|
+
|
|
569
|
+
# Check signal quality impacts
|
|
570
|
+
if quality["clipping"]:
|
|
571
|
+
if measurement_name in edge_measurements + amplitude_measurements:
|
|
572
|
+
warnings.append("Signal clipping detected, may affect measurement accuracy")
|
|
573
|
+
# Don't override "nan" - if measurement is fundamentally unsuitable, keep it as "nan"
|
|
574
|
+
if expected_result != "nan":
|
|
575
|
+
expected_result = "unreliable"
|
|
576
|
+
confidence = min(confidence, 0.6)
|
|
577
|
+
|
|
578
|
+
if quality["saturation"]:
|
|
579
|
+
warnings.append("Signal saturation detected, measurements may be unreliable")
|
|
580
|
+
# Don't override "nan" - if measurement is fundamentally unsuitable, keep it as "nan"
|
|
581
|
+
if expected_result != "nan":
|
|
582
|
+
expected_result = "unreliable"
|
|
583
|
+
confidence = min(confidence, 0.5)
|
|
584
|
+
|
|
585
|
+
if quality["snr"] is not None and quality["snr"] < 20:
|
|
586
|
+
if measurement_name in edge_measurements:
|
|
587
|
+
warnings.append(
|
|
588
|
+
f"Low SNR ({quality['snr']:.1f} dB) may affect edge timing measurements"
|
|
589
|
+
)
|
|
590
|
+
suggestions.append("Consider filtering signal to improve SNR")
|
|
591
|
+
confidence = min(confidence, 0.7)
|
|
592
|
+
|
|
593
|
+
# Check sample rate for timing measurements
|
|
594
|
+
if measurement_name in edge_measurements + frequency_measurements:
|
|
595
|
+
if classification["frequency_estimate"] is not None:
|
|
596
|
+
nyquist_rate = 2 * classification["frequency_estimate"]
|
|
597
|
+
if trace.metadata.sample_rate < nyquist_rate * 5:
|
|
598
|
+
warnings.append("Sample rate may be too low for accurate timing measurements")
|
|
599
|
+
suggestions.append(
|
|
600
|
+
f"Recommend sample rate > {nyquist_rate * 10:.3e} Hz (10x signal frequency)"
|
|
601
|
+
)
|
|
602
|
+
expected_result = "unreliable"
|
|
603
|
+
confidence = min(confidence, 0.6)
|
|
604
|
+
|
|
605
|
+
# Check data length
|
|
606
|
+
n = len(trace.data)
|
|
607
|
+
if measurement_name in spectral_measurements:
|
|
608
|
+
if n < 256:
|
|
609
|
+
warnings.append(f"Signal length ({n} samples) may be too short for spectral analysis")
|
|
610
|
+
suggestions.append("Recommend at least 1024 samples for FFT-based measurements")
|
|
611
|
+
expected_result = "unreliable"
|
|
612
|
+
confidence = min(confidence, 0.5)
|
|
613
|
+
|
|
614
|
+
if measurement_name in frequency_measurements:
|
|
615
|
+
if classification["frequency_estimate"] is not None:
|
|
616
|
+
min_samples = trace.metadata.sample_rate / classification["frequency_estimate"]
|
|
617
|
+
# Require at least 0.5 periods for basic detection
|
|
618
|
+
# Having 1+ complete periods is ideal, but FFT can work with less
|
|
619
|
+
if n < min_samples * 0.5:
|
|
620
|
+
warnings.append(
|
|
621
|
+
f"Signal length ({n} samples) captures < 0.5 periods, "
|
|
622
|
+
"frequency measurement may fail"
|
|
623
|
+
)
|
|
624
|
+
suggestions.append("Capture at least 2 periods for reliable frequency measurement")
|
|
625
|
+
expected_result = "unreliable"
|
|
626
|
+
confidence = min(confidence, 0.5)
|
|
627
|
+
elif n < min_samples * 2:
|
|
628
|
+
# Between 0.5 and 2 periods: usable but not ideal
|
|
629
|
+
suggestions.append("Capture at least 10 periods for best accuracy")
|
|
630
|
+
confidence = min(confidence, 0.75)
|
|
631
|
+
|
|
632
|
+
return {
|
|
633
|
+
"suitable": suitable,
|
|
634
|
+
"confidence": float(confidence),
|
|
635
|
+
"warnings": warnings,
|
|
636
|
+
"suggestions": suggestions,
|
|
637
|
+
"expected_result": expected_result,
|
|
638
|
+
}
|
|
639
|
+
|
|
640
|
+
|
|
641
|
+
def suggest_measurements(
|
|
642
|
+
trace: WaveformTrace,
|
|
643
|
+
*,
|
|
644
|
+
max_suggestions: int = 10,
|
|
645
|
+
) -> list[dict[str, Any]]:
|
|
646
|
+
"""Suggest appropriate measurements for a signal.
|
|
647
|
+
|
|
648
|
+
Analyzes signal characteristics and recommends the most suitable
|
|
649
|
+
measurements, ranked by relevance and reliability.
|
|
650
|
+
|
|
651
|
+
Args:
|
|
652
|
+
trace: Input waveform trace.
|
|
653
|
+
max_suggestions: Maximum number of suggestions to return.
|
|
654
|
+
|
|
655
|
+
Returns:
|
|
656
|
+
List of dictionaries, each containing:
|
|
657
|
+
- name: Measurement name
|
|
658
|
+
- category: Measurement category (e.g., "timing", "amplitude", "spectral")
|
|
659
|
+
- priority: Priority ranking (1=highest)
|
|
660
|
+
- rationale: Why this measurement is recommended
|
|
661
|
+
- confidence: Confidence in recommendation (0.0-1.0)
|
|
662
|
+
|
|
663
|
+
Example:
|
|
664
|
+
>>> trace = tk.load('square_wave.wfm')
|
|
665
|
+
>>> suggestions = tk.suggest_measurements(trace)
|
|
666
|
+
>>> for s in suggestions[:3]:
|
|
667
|
+
... print(f"{s['name']}: {s['rationale']}")
|
|
668
|
+
frequency: Periodic digital signal detected
|
|
669
|
+
duty_cycle: Suitable for pulse analysis
|
|
670
|
+
rise_time: Digital edges detected
|
|
671
|
+
|
|
672
|
+
References:
|
|
673
|
+
Best practices for waveform analysis
|
|
674
|
+
"""
|
|
675
|
+
classification = classify_signal(trace)
|
|
676
|
+
quality = assess_signal_quality(trace)
|
|
677
|
+
|
|
678
|
+
signal_type = classification["type"]
|
|
679
|
+
characteristics = classification["characteristics"]
|
|
680
|
+
|
|
681
|
+
suggestions = []
|
|
682
|
+
|
|
683
|
+
# Always suggest basic statistical measurements
|
|
684
|
+
suggestions.append(
|
|
685
|
+
{
|
|
686
|
+
"name": "mean",
|
|
687
|
+
"category": "statistical",
|
|
688
|
+
"priority": 1,
|
|
689
|
+
"rationale": "Basic DC level measurement, always applicable",
|
|
690
|
+
"confidence": 1.0,
|
|
691
|
+
}
|
|
692
|
+
)
|
|
693
|
+
|
|
694
|
+
suggestions.append(
|
|
695
|
+
{
|
|
696
|
+
"name": "rms",
|
|
697
|
+
"category": "statistical",
|
|
698
|
+
"priority": 2,
|
|
699
|
+
"rationale": "RMS voltage measurement, useful for all signal types",
|
|
700
|
+
"confidence": 1.0,
|
|
701
|
+
}
|
|
702
|
+
)
|
|
703
|
+
|
|
704
|
+
# DC signals
|
|
705
|
+
if signal_type == "dc":
|
|
706
|
+
suggestions.append(
|
|
707
|
+
{
|
|
708
|
+
"name": "amplitude",
|
|
709
|
+
"category": "amplitude",
|
|
710
|
+
"priority": 3,
|
|
711
|
+
"rationale": "Measure noise/variation level in DC signal",
|
|
712
|
+
"confidence": 0.9,
|
|
713
|
+
}
|
|
714
|
+
)
|
|
715
|
+
# Don't suggest frequency, edges, etc.
|
|
716
|
+
return sorted(suggestions, key=lambda x: cast("int", x["priority"]))[:max_suggestions]
|
|
717
|
+
|
|
718
|
+
# Amplitude measurements
|
|
719
|
+
suggestions.append(
|
|
720
|
+
{
|
|
721
|
+
"name": "amplitude",
|
|
722
|
+
"category": "amplitude",
|
|
723
|
+
"priority": 3,
|
|
724
|
+
"rationale": f"Peak-to-peak amplitude for {signal_type} signal",
|
|
725
|
+
"confidence": 0.95,
|
|
726
|
+
}
|
|
727
|
+
)
|
|
728
|
+
|
|
729
|
+
# Periodic signals
|
|
730
|
+
if "periodic" in characteristics:
|
|
731
|
+
suggestions.append(
|
|
732
|
+
{
|
|
733
|
+
"name": "frequency",
|
|
734
|
+
"category": "timing",
|
|
735
|
+
"priority": 4,
|
|
736
|
+
"rationale": "Periodic signal detected, frequency measurement applicable",
|
|
737
|
+
"confidence": classification["confidence"],
|
|
738
|
+
}
|
|
739
|
+
)
|
|
740
|
+
|
|
741
|
+
suggestions.append(
|
|
742
|
+
{
|
|
743
|
+
"name": "period",
|
|
744
|
+
"category": "timing",
|
|
745
|
+
"priority": 5,
|
|
746
|
+
"rationale": "Period measurement for periodic signal",
|
|
747
|
+
"confidence": classification["confidence"],
|
|
748
|
+
}
|
|
749
|
+
)
|
|
750
|
+
|
|
751
|
+
# Digital signals with edges
|
|
752
|
+
if signal_type in ("digital", "mixed"):
|
|
753
|
+
edge_count = _count_edges(trace.data, classification.get("levels"))
|
|
754
|
+
|
|
755
|
+
if edge_count >= 2:
|
|
756
|
+
suggestions.append(
|
|
757
|
+
{
|
|
758
|
+
"name": "rise_time",
|
|
759
|
+
"category": "timing",
|
|
760
|
+
"priority": 6,
|
|
761
|
+
"rationale": f"Digital edges detected ({edge_count} edges)",
|
|
762
|
+
"confidence": 0.9 if quality["snr"] and quality["snr"] > 20 else 0.7,
|
|
763
|
+
}
|
|
764
|
+
)
|
|
765
|
+
|
|
766
|
+
suggestions.append(
|
|
767
|
+
{
|
|
768
|
+
"name": "fall_time",
|
|
769
|
+
"category": "timing",
|
|
770
|
+
"priority": 7,
|
|
771
|
+
"rationale": f"Digital edges detected ({edge_count} edges)",
|
|
772
|
+
"confidence": 0.9 if quality["snr"] and quality["snr"] > 20 else 0.7,
|
|
773
|
+
}
|
|
774
|
+
)
|
|
775
|
+
|
|
776
|
+
if "periodic" in characteristics and edge_count >= 2:
|
|
777
|
+
# Need at least 2 edges (1 complete cycle) for duty cycle
|
|
778
|
+
suggestions.append(
|
|
779
|
+
{
|
|
780
|
+
"name": "duty_cycle",
|
|
781
|
+
"category": "timing",
|
|
782
|
+
"priority": 8,
|
|
783
|
+
"rationale": "Periodic pulse train detected",
|
|
784
|
+
"confidence": 0.85 if edge_count >= 4 else 0.75,
|
|
785
|
+
}
|
|
786
|
+
)
|
|
787
|
+
|
|
788
|
+
suggestions.append(
|
|
789
|
+
{
|
|
790
|
+
"name": "pulse_width",
|
|
791
|
+
"category": "timing",
|
|
792
|
+
"priority": 9,
|
|
793
|
+
"rationale": "Pulse measurements suitable for periodic digital signal",
|
|
794
|
+
"confidence": 0.85 if edge_count >= 4 else 0.75,
|
|
795
|
+
}
|
|
796
|
+
)
|
|
797
|
+
|
|
798
|
+
# Analog signals
|
|
799
|
+
if signal_type in ("analog", "mixed"):
|
|
800
|
+
if not quality["clipping"]:
|
|
801
|
+
suggestions.append(
|
|
802
|
+
{
|
|
803
|
+
"name": "overshoot",
|
|
804
|
+
"category": "amplitude",
|
|
805
|
+
"priority": 10,
|
|
806
|
+
"rationale": "Analog signal, overshoot measurement applicable",
|
|
807
|
+
"confidence": 0.8,
|
|
808
|
+
}
|
|
809
|
+
)
|
|
810
|
+
|
|
811
|
+
suggestions.append(
|
|
812
|
+
{
|
|
813
|
+
"name": "undershoot",
|
|
814
|
+
"category": "amplitude",
|
|
815
|
+
"priority": 11,
|
|
816
|
+
"rationale": "Analog signal, undershoot measurement applicable",
|
|
817
|
+
"confidence": 0.8,
|
|
818
|
+
}
|
|
819
|
+
)
|
|
820
|
+
|
|
821
|
+
# Spectral measurements for clean, periodic signals
|
|
822
|
+
if "periodic" in characteristics and "clean" in characteristics:
|
|
823
|
+
if len(trace.data) >= 256:
|
|
824
|
+
suggestions.append(
|
|
825
|
+
{
|
|
826
|
+
"name": "thd",
|
|
827
|
+
"category": "spectral",
|
|
828
|
+
"priority": 12,
|
|
829
|
+
"rationale": "Clean periodic signal suitable for harmonic analysis",
|
|
830
|
+
"confidence": 0.85,
|
|
831
|
+
}
|
|
832
|
+
)
|
|
833
|
+
|
|
834
|
+
suggestions.append(
|
|
835
|
+
{
|
|
836
|
+
"name": "snr",
|
|
837
|
+
"category": "spectral",
|
|
838
|
+
"priority": 13,
|
|
839
|
+
"rationale": "Spectral SNR measurement for signal quality",
|
|
840
|
+
"confidence": 0.8,
|
|
841
|
+
}
|
|
842
|
+
)
|
|
843
|
+
|
|
844
|
+
# Sort by priority and limit
|
|
845
|
+
suggestions = sorted(suggestions, key=lambda x: cast("int", x["priority"]))
|
|
846
|
+
return suggestions[:max_suggestions]
|
|
847
|
+
|
|
848
|
+
|
|
849
|
+
# =============================================================================
|
|
850
|
+
# Helper Functions
|
|
851
|
+
# =============================================================================
|
|
852
|
+
|
|
853
|
+
|
|
854
|
+
def _detect_digital_signal(
|
|
855
|
+
data: NDArray[np.floating[Any]],
|
|
856
|
+
threshold_ratio: float,
|
|
857
|
+
) -> tuple[bool, dict[str, float] | None, float]:
|
|
858
|
+
"""Detect if signal is digital based on bimodal distribution.
|
|
859
|
+
|
|
860
|
+
Args:
|
|
861
|
+
data: Signal data array.
|
|
862
|
+
threshold_ratio: Ratio of samples at two levels to consider digital.
|
|
863
|
+
|
|
864
|
+
Returns:
|
|
865
|
+
Tuple of (is_digital, levels_dict, confidence).
|
|
866
|
+
"""
|
|
867
|
+
# Use histogram to find peaks
|
|
868
|
+
# Use more bins for better resolution on digital signals
|
|
869
|
+
n_bins = min(100, len(np.unique(data)))
|
|
870
|
+
hist, bin_edges = np.histogram(data, bins=n_bins)
|
|
871
|
+
bin_centers = (bin_edges[:-1] + bin_edges[1:]) / 2
|
|
872
|
+
|
|
873
|
+
# Find peaks (local maxima or significant bins)
|
|
874
|
+
peaks = []
|
|
875
|
+
|
|
876
|
+
# Special case: if only 2 bins (perfect digital signal), both are peaks
|
|
877
|
+
if len(hist) == 2:
|
|
878
|
+
for i in range(len(hist)):
|
|
879
|
+
if hist[i] > len(data) * 0.01:
|
|
880
|
+
peaks.append((i, hist[i], bin_centers[i]))
|
|
881
|
+
else:
|
|
882
|
+
# Find local maxima in histogram
|
|
883
|
+
for i in range(1, len(hist) - 1):
|
|
884
|
+
if hist[i] > hist[i - 1] and hist[i] > hist[i + 1]:
|
|
885
|
+
# Lower threshold for peak detection
|
|
886
|
+
if hist[i] > len(data) * 0.01: # At least 1% of samples
|
|
887
|
+
peaks.append((i, hist[i], bin_centers[i]))
|
|
888
|
+
|
|
889
|
+
# If we have exactly 2 dominant peaks, likely digital
|
|
890
|
+
if len(peaks) >= 2:
|
|
891
|
+
# Sort by count
|
|
892
|
+
peaks = sorted(peaks, key=lambda x: x[1], reverse=True)
|
|
893
|
+
|
|
894
|
+
# Take top 2 peaks
|
|
895
|
+
peak1, peak2 = peaks[0], peaks[1]
|
|
896
|
+
|
|
897
|
+
# Check if these two peaks account for most samples
|
|
898
|
+
total_in_peaks = peak1[1] + peak2[1]
|
|
899
|
+
ratio = total_in_peaks / len(data)
|
|
900
|
+
|
|
901
|
+
# Also check that peaks are well separated
|
|
902
|
+
peak_separation = abs(peak1[2] - peak2[2])
|
|
903
|
+
data_range = np.ptp(data)
|
|
904
|
+
|
|
905
|
+
# Peaks should be separated by at least 30% of data range
|
|
906
|
+
if ratio >= threshold_ratio and peak_separation > data_range * 0.3:
|
|
907
|
+
low_level = min(peak1[2], peak2[2])
|
|
908
|
+
high_level = max(peak1[2], peak2[2])
|
|
909
|
+
|
|
910
|
+
confidence = min(0.95, ratio)
|
|
911
|
+
|
|
912
|
+
return True, {"low": float(low_level), "high": float(high_level)}, confidence
|
|
913
|
+
|
|
914
|
+
return False, None, 0.0
|
|
915
|
+
|
|
916
|
+
|
|
917
|
+
def _estimate_noise_level(data: NDArray[np.floating[Any]]) -> float:
|
|
918
|
+
"""Estimate noise level using median absolute deviation.
|
|
919
|
+
|
|
920
|
+
Args:
|
|
921
|
+
data: Signal data array.
|
|
922
|
+
|
|
923
|
+
Returns:
|
|
924
|
+
Estimated RMS noise level.
|
|
925
|
+
"""
|
|
926
|
+
if len(data) < 10:
|
|
927
|
+
return 0.0
|
|
928
|
+
|
|
929
|
+
# Use differencing to remove slow variations
|
|
930
|
+
diffs = np.diff(data)
|
|
931
|
+
|
|
932
|
+
# MAD (Median Absolute Deviation) is robust to outliers
|
|
933
|
+
median_diff = np.median(diffs)
|
|
934
|
+
mad = np.median(np.abs(diffs - median_diff))
|
|
935
|
+
|
|
936
|
+
# Convert MAD to RMS noise estimate
|
|
937
|
+
# For Gaussian noise: sigma ≈ 1.4826 * MAD
|
|
938
|
+
# Divide by sqrt(2) because diff amplifies noise by sqrt(2)
|
|
939
|
+
noise_estimate = (1.4826 * mad) / np.sqrt(2)
|
|
940
|
+
|
|
941
|
+
return float(noise_estimate)
|
|
942
|
+
|
|
943
|
+
|
|
944
|
+
def _detect_periodicity(
|
|
945
|
+
data: NDArray[np.floating[Any]],
|
|
946
|
+
sample_rate: float,
|
|
947
|
+
threshold: float,
|
|
948
|
+
) -> tuple[bool, float | None, float]:
|
|
949
|
+
"""Detect if signal is periodic using autocorrelation.
|
|
950
|
+
|
|
951
|
+
Args:
|
|
952
|
+
data: Signal data array.
|
|
953
|
+
sample_rate: Sampling rate in Hz.
|
|
954
|
+
threshold: Correlation threshold for periodic detection.
|
|
955
|
+
|
|
956
|
+
Returns:
|
|
957
|
+
Tuple of (is_periodic, period_seconds, confidence).
|
|
958
|
+
"""
|
|
959
|
+
n = len(data)
|
|
960
|
+
|
|
961
|
+
if n < 20:
|
|
962
|
+
return False, None, 0.0
|
|
963
|
+
|
|
964
|
+
# Remove DC for autocorrelation
|
|
965
|
+
data_ac = data - np.mean(data)
|
|
966
|
+
|
|
967
|
+
# Check if there's any variation
|
|
968
|
+
if np.std(data_ac) < 1e-12:
|
|
969
|
+
return False, None, 0.0
|
|
970
|
+
|
|
971
|
+
# Compute autocorrelation for lags up to n-10 to detect signals with ~1 period
|
|
972
|
+
# This allows finding periodicity even when we have just 1 period of data
|
|
973
|
+
# Keep at least 10 samples of overlap for correlation
|
|
974
|
+
max_lag = min(n - 10, 20000) # Limit for performance
|
|
975
|
+
|
|
976
|
+
autocorr = np.correlate(data_ac, data_ac, mode="full")
|
|
977
|
+
autocorr = autocorr[n - 1 : n - 1 + max_lag]
|
|
978
|
+
|
|
979
|
+
# Normalize
|
|
980
|
+
if abs(autocorr[0]) > 1e-12:
|
|
981
|
+
autocorr = autocorr / autocorr[0]
|
|
982
|
+
else:
|
|
983
|
+
return False, None, 0.0
|
|
984
|
+
|
|
985
|
+
# Find peaks in autocorrelation (exclude lag=0 and very small lags)
|
|
986
|
+
# Start searching from lag > n/100 to avoid noise
|
|
987
|
+
min_lag = max(3, n // 100)
|
|
988
|
+
peaks = []
|
|
989
|
+
|
|
990
|
+
for i in range(min_lag, len(autocorr) - 2):
|
|
991
|
+
# Use stronger peak detection
|
|
992
|
+
if (
|
|
993
|
+
autocorr[i] > autocorr[i - 1]
|
|
994
|
+
and autocorr[i] > autocorr[i + 1]
|
|
995
|
+
and autocorr[i] > autocorr[i - 2]
|
|
996
|
+
and autocorr[i] > autocorr[i + 2]
|
|
997
|
+
):
|
|
998
|
+
if autocorr[i] > threshold:
|
|
999
|
+
peaks.append((i, autocorr[i]))
|
|
1000
|
+
|
|
1001
|
+
if peaks:
|
|
1002
|
+
# Take first significant peak as period
|
|
1003
|
+
period_samples = peaks[0][0]
|
|
1004
|
+
confidence = float(peaks[0][1])
|
|
1005
|
+
|
|
1006
|
+
period_seconds = period_samples / sample_rate
|
|
1007
|
+
|
|
1008
|
+
return True, period_seconds, confidence
|
|
1009
|
+
|
|
1010
|
+
return False, None, 0.0
|
|
1011
|
+
|
|
1012
|
+
|
|
1013
|
+
def _count_edges(
|
|
1014
|
+
data: NDArray[np.floating[Any]],
|
|
1015
|
+
levels: dict[str, float] | None,
|
|
1016
|
+
) -> int:
|
|
1017
|
+
"""Count number of edges in signal.
|
|
1018
|
+
|
|
1019
|
+
Args:
|
|
1020
|
+
data: Signal data array.
|
|
1021
|
+
levels: Optional digital levels dict with "low" and "high" keys.
|
|
1022
|
+
|
|
1023
|
+
Returns:
|
|
1024
|
+
Number of edges detected.
|
|
1025
|
+
"""
|
|
1026
|
+
if len(data) < 3:
|
|
1027
|
+
return 0
|
|
1028
|
+
|
|
1029
|
+
if levels is not None:
|
|
1030
|
+
# Use provided levels
|
|
1031
|
+
threshold = (levels["low"] + levels["high"]) / 2
|
|
1032
|
+
else:
|
|
1033
|
+
# Use median as threshold
|
|
1034
|
+
threshold = float(np.median(data))
|
|
1035
|
+
|
|
1036
|
+
# Find crossings
|
|
1037
|
+
above = data > threshold
|
|
1038
|
+
crossings = np.diff(above.astype(int))
|
|
1039
|
+
|
|
1040
|
+
# Count non-zero crossings (both rising and falling)
|
|
1041
|
+
edge_count = np.sum(np.abs(crossings))
|
|
1042
|
+
|
|
1043
|
+
return int(edge_count)
|
|
1044
|
+
|
|
1045
|
+
|
|
1046
|
+
def _detect_periodicity_fft(
|
|
1047
|
+
data: NDArray[np.floating[Any]],
|
|
1048
|
+
sample_rate: float,
|
|
1049
|
+
) -> tuple[bool, float | None, float]:
|
|
1050
|
+
"""Detect periodicity using FFT (frequency domain analysis).
|
|
1051
|
+
|
|
1052
|
+
This method works well for signals with few periods where autocorrelation
|
|
1053
|
+
may fail. It finds the dominant frequency component in the signal.
|
|
1054
|
+
|
|
1055
|
+
Args:
|
|
1056
|
+
data: Signal data array.
|
|
1057
|
+
sample_rate: Sampling rate in Hz.
|
|
1058
|
+
|
|
1059
|
+
Returns:
|
|
1060
|
+
Tuple of (is_periodic, period_seconds, confidence).
|
|
1061
|
+
"""
|
|
1062
|
+
n = len(data)
|
|
1063
|
+
|
|
1064
|
+
if n < 64:
|
|
1065
|
+
return False, None, 0.0
|
|
1066
|
+
|
|
1067
|
+
# Remove DC component
|
|
1068
|
+
data_ac = data - np.mean(data)
|
|
1069
|
+
|
|
1070
|
+
# Check if there's any variation
|
|
1071
|
+
if np.std(data_ac) < 1e-12:
|
|
1072
|
+
return False, None, 0.0
|
|
1073
|
+
|
|
1074
|
+
# Compute FFT
|
|
1075
|
+
fft = np.fft.rfft(data_ac)
|
|
1076
|
+
freqs = np.fft.rfftfreq(n, 1.0 / sample_rate)
|
|
1077
|
+
|
|
1078
|
+
# Compute power spectrum
|
|
1079
|
+
power = np.abs(fft) ** 2
|
|
1080
|
+
|
|
1081
|
+
# Skip DC component (index 0)
|
|
1082
|
+
if len(power) < 3:
|
|
1083
|
+
return False, None, 0.0
|
|
1084
|
+
|
|
1085
|
+
power = power[1:]
|
|
1086
|
+
freqs = freqs[1:]
|
|
1087
|
+
|
|
1088
|
+
# Find peak in power spectrum
|
|
1089
|
+
peak_idx = np.argmax(power)
|
|
1090
|
+
peak_power = power[peak_idx]
|
|
1091
|
+
peak_freq = freqs[peak_idx]
|
|
1092
|
+
|
|
1093
|
+
# Check if peak is significant compared to total power
|
|
1094
|
+
total_power = np.sum(power)
|
|
1095
|
+
if total_power < 1e-20:
|
|
1096
|
+
return False, None, 0.0
|
|
1097
|
+
|
|
1098
|
+
power_ratio = peak_power / total_power
|
|
1099
|
+
|
|
1100
|
+
# For periodic signals, the dominant frequency should have significant power
|
|
1101
|
+
# Require at least 10% of total power in the peak
|
|
1102
|
+
if power_ratio < 0.1:
|
|
1103
|
+
return False, None, 0.0
|
|
1104
|
+
|
|
1105
|
+
# Check that frequency is reasonable (not too low or too high)
|
|
1106
|
+
nyquist = sample_rate / 2
|
|
1107
|
+
if peak_freq < sample_rate / n or peak_freq > nyquist * 0.9:
|
|
1108
|
+
return False, None, 0.0
|
|
1109
|
+
|
|
1110
|
+
# Estimate period
|
|
1111
|
+
period_seconds = 1.0 / peak_freq
|
|
1112
|
+
|
|
1113
|
+
# Confidence based on how dominant the peak is
|
|
1114
|
+
# High power ratio -> high confidence
|
|
1115
|
+
confidence = min(0.95, 0.5 + power_ratio)
|
|
1116
|
+
|
|
1117
|
+
return True, period_seconds, float(confidence)
|
|
1118
|
+
|
|
1119
|
+
|
|
1120
|
+
def _detect_edge_periodicity(
|
|
1121
|
+
data: NDArray[np.floating[Any]],
|
|
1122
|
+
sample_rate: float,
|
|
1123
|
+
levels: dict[str, float] | None,
|
|
1124
|
+
) -> tuple[bool, float | None, float]:
|
|
1125
|
+
"""Detect periodicity in digital signals by analyzing edge spacing.
|
|
1126
|
+
|
|
1127
|
+
This method works well for signals with few periods where autocorrelation
|
|
1128
|
+
may fail. It detects regular patterns in edge timing.
|
|
1129
|
+
|
|
1130
|
+
Args:
|
|
1131
|
+
data: Signal data array.
|
|
1132
|
+
sample_rate: Sampling rate in Hz.
|
|
1133
|
+
levels: Digital levels dict with "low" and "high" keys.
|
|
1134
|
+
|
|
1135
|
+
Returns:
|
|
1136
|
+
Tuple of (is_periodic, period_seconds, confidence).
|
|
1137
|
+
"""
|
|
1138
|
+
if len(data) < 10 or levels is None:
|
|
1139
|
+
return False, None, 0.0
|
|
1140
|
+
|
|
1141
|
+
threshold = (levels["low"] + levels["high"]) / 2
|
|
1142
|
+
|
|
1143
|
+
# Find edge positions
|
|
1144
|
+
above = data > threshold
|
|
1145
|
+
crossings = np.diff(above.astype(int))
|
|
1146
|
+
edge_positions = np.where(crossings != 0)[0]
|
|
1147
|
+
|
|
1148
|
+
if len(edge_positions) < 2:
|
|
1149
|
+
# Need at least 2 edges (1 complete cycle) for detection
|
|
1150
|
+
return False, None, 0.0
|
|
1151
|
+
|
|
1152
|
+
# Calculate intervals between edges
|
|
1153
|
+
intervals = np.diff(edge_positions)
|
|
1154
|
+
|
|
1155
|
+
if len(intervals) < 1:
|
|
1156
|
+
return False, None, 0.0
|
|
1157
|
+
|
|
1158
|
+
# For a periodic signal, intervals should form a repeating pattern
|
|
1159
|
+
# For a square wave: intervals alternate between high-time and low-time
|
|
1160
|
+
# Check if intervals show regular pattern
|
|
1161
|
+
|
|
1162
|
+
# Calculate coefficient of variation of intervals
|
|
1163
|
+
mean_interval = np.mean(intervals)
|
|
1164
|
+
std_interval = np.std(intervals)
|
|
1165
|
+
|
|
1166
|
+
if mean_interval < 1:
|
|
1167
|
+
return False, None, 0.0
|
|
1168
|
+
|
|
1169
|
+
cv = std_interval / mean_interval
|
|
1170
|
+
|
|
1171
|
+
# Special case: exactly 1 interval (2 edges, half period of square wave)
|
|
1172
|
+
if len(intervals) == 1:
|
|
1173
|
+
# This represents half a period for a square wave
|
|
1174
|
+
period_samples = 2 * intervals[0]
|
|
1175
|
+
period_seconds = period_samples / sample_rate
|
|
1176
|
+
# Lower confidence since we only have half a period
|
|
1177
|
+
return True, period_seconds, 0.7
|
|
1178
|
+
|
|
1179
|
+
# For highly periodic signals, CV should be low
|
|
1180
|
+
if cv > 0.3:
|
|
1181
|
+
# High variation - check if it's alternating pattern (square wave)
|
|
1182
|
+
if len(intervals) >= 4:
|
|
1183
|
+
# Check if odd and even intervals are each consistent
|
|
1184
|
+
odd_intervals = intervals[::2]
|
|
1185
|
+
even_intervals = intervals[1::2]
|
|
1186
|
+
|
|
1187
|
+
odd_cv = np.std(odd_intervals) / (np.mean(odd_intervals) + 1e-12)
|
|
1188
|
+
even_cv = np.std(even_intervals) / (np.mean(even_intervals) + 1e-12)
|
|
1189
|
+
|
|
1190
|
+
if odd_cv < 0.2 and even_cv < 0.2:
|
|
1191
|
+
# Alternating pattern detected (square wave)
|
|
1192
|
+
# Period is sum of two consecutive intervals
|
|
1193
|
+
period_samples = np.mean(odd_intervals) + np.mean(even_intervals)
|
|
1194
|
+
period_seconds = period_samples / sample_rate
|
|
1195
|
+
confidence = 1.0 - max(odd_cv, even_cv)
|
|
1196
|
+
return True, period_seconds, float(confidence)
|
|
1197
|
+
elif len(intervals) == 2:
|
|
1198
|
+
# Only 2 intervals - assume alternating pattern for square wave
|
|
1199
|
+
period_samples = intervals[0] + intervals[1]
|
|
1200
|
+
period_seconds = period_samples / sample_rate
|
|
1201
|
+
# Moderate confidence with only 2 intervals
|
|
1202
|
+
return True, period_seconds, 0.75
|
|
1203
|
+
|
|
1204
|
+
return False, None, 0.0
|
|
1205
|
+
|
|
1206
|
+
# Regular intervals detected
|
|
1207
|
+
# For square waves with 50% duty cycle, full period = 2 * interval
|
|
1208
|
+
# For other waveforms, check if all intervals are similar (uniform spacing)
|
|
1209
|
+
|
|
1210
|
+
# Estimate period from intervals
|
|
1211
|
+
# If all intervals are similar, period might be 2*interval (square wave)
|
|
1212
|
+
# Check by seeing if we have roughly equal numbers of edges per inferred period
|
|
1213
|
+
period_samples = 2 * mean_interval # Assume square wave initially
|
|
1214
|
+
num_periods = len(data) / period_samples
|
|
1215
|
+
|
|
1216
|
+
# If we have at least 1 period, consider it periodic
|
|
1217
|
+
if num_periods >= 0.5: # Allow detection with half a period
|
|
1218
|
+
period_seconds = period_samples / sample_rate
|
|
1219
|
+
confidence = 1.0 - min(cv / 0.3, 0.5) # Scale confidence by CV
|
|
1220
|
+
return True, period_seconds, float(confidence)
|
|
1221
|
+
|
|
1222
|
+
return False, None, 0.0
|
|
1223
|
+
|
|
1224
|
+
|
|
1225
|
+
@dataclass
|
|
1226
|
+
class AnalysisRecommendation:
|
|
1227
|
+
"""Recommendation for an analysis to run.
|
|
1228
|
+
|
|
1229
|
+
Attributes:
|
|
1230
|
+
domain: Analysis domain to run.
|
|
1231
|
+
priority: Priority ranking (1=highest).
|
|
1232
|
+
confidence: Expected confidence if run (0.0-1.0).
|
|
1233
|
+
reasoning: Human-readable explanation.
|
|
1234
|
+
estimated_runtime_ms: Estimated runtime in milliseconds.
|
|
1235
|
+
prerequisites_met: Whether all prerequisites are satisfied.
|
|
1236
|
+
"""
|
|
1237
|
+
|
|
1238
|
+
domain: AnalysisDomain
|
|
1239
|
+
priority: int # 1=highest priority
|
|
1240
|
+
confidence: float # Expected confidence if run
|
|
1241
|
+
reasoning: str
|
|
1242
|
+
estimated_runtime_ms: int = 100
|
|
1243
|
+
prerequisites_met: bool = True
|
|
1244
|
+
|
|
1245
|
+
|
|
1246
|
+
def recommend_analyses(
|
|
1247
|
+
data: NDArray[np.floating[Any]],
|
|
1248
|
+
sample_rate: float = 1.0,
|
|
1249
|
+
*,
|
|
1250
|
+
time_budget_seconds: float | None = None,
|
|
1251
|
+
confidence_target: float = 0.7,
|
|
1252
|
+
exclude_domains: list[AnalysisDomain] | None = None,
|
|
1253
|
+
) -> list[AnalysisRecommendation]:
|
|
1254
|
+
"""Recommend which analyses to run based on signal characteristics.
|
|
1255
|
+
|
|
1256
|
+
Uses signal classification, quality metrics, and heuristics to
|
|
1257
|
+
recommend the most valuable analyses for a given signal.
|
|
1258
|
+
|
|
1259
|
+
Args:
|
|
1260
|
+
data: Input signal data.
|
|
1261
|
+
sample_rate: Sample rate in Hz.
|
|
1262
|
+
time_budget_seconds: Optional time budget (prioritizes faster analyses).
|
|
1263
|
+
confidence_target: Minimum expected confidence threshold.
|
|
1264
|
+
exclude_domains: Domains to exclude from recommendations.
|
|
1265
|
+
|
|
1266
|
+
Returns:
|
|
1267
|
+
List of AnalysisRecommendation sorted by priority.
|
|
1268
|
+
|
|
1269
|
+
Example:
|
|
1270
|
+
>>> import numpy as np
|
|
1271
|
+
>>> import oscura as tk
|
|
1272
|
+
>>> # Generate test signal
|
|
1273
|
+
>>> t = np.linspace(0, 1, 10000)
|
|
1274
|
+
>>> signal = np.sin(2 * np.pi * 100 * t)
|
|
1275
|
+
>>> recommendations = tk.recommend_analyses(signal, sample_rate=10000)
|
|
1276
|
+
>>> for rec in recommendations[:3]:
|
|
1277
|
+
... print(f"{rec.domain.value}: {rec.reasoning}")
|
|
1278
|
+
waveform: Basic waveform measurements are always applicable
|
|
1279
|
+
statistics: Statistical analysis provides foundational metrics
|
|
1280
|
+
spectral: Spectral analysis reveals frequency content - signal appears periodic
|
|
1281
|
+
"""
|
|
1282
|
+
# Avoid circular import
|
|
1283
|
+
from oscura.reporting.config import AnalysisDomain
|
|
1284
|
+
|
|
1285
|
+
recommendations = []
|
|
1286
|
+
exclude = set(exclude_domains or [])
|
|
1287
|
+
|
|
1288
|
+
# Classify signal
|
|
1289
|
+
classification = classify_signal(data, sample_rate)
|
|
1290
|
+
_signal_type = classification.get("signal_type", "unknown") # Reserved for future use
|
|
1291
|
+
is_digital = classification.get("is_digital", False)
|
|
1292
|
+
is_periodic = classification.get("is_periodic", False)
|
|
1293
|
+
_snr_db = classification.get("snr_db", 20) # Reserved for future use
|
|
1294
|
+
dominant_freq = classification.get("dominant_frequency")
|
|
1295
|
+
|
|
1296
|
+
# Always recommend these foundational domains
|
|
1297
|
+
if AnalysisDomain.WAVEFORM not in exclude:
|
|
1298
|
+
recommendations.append(
|
|
1299
|
+
AnalysisRecommendation(
|
|
1300
|
+
domain=AnalysisDomain.WAVEFORM,
|
|
1301
|
+
priority=1,
|
|
1302
|
+
confidence=0.95,
|
|
1303
|
+
reasoning="Basic waveform measurements are always applicable",
|
|
1304
|
+
estimated_runtime_ms=50,
|
|
1305
|
+
)
|
|
1306
|
+
)
|
|
1307
|
+
|
|
1308
|
+
if AnalysisDomain.STATISTICS not in exclude:
|
|
1309
|
+
recommendations.append(
|
|
1310
|
+
AnalysisRecommendation(
|
|
1311
|
+
domain=AnalysisDomain.STATISTICS,
|
|
1312
|
+
priority=1,
|
|
1313
|
+
confidence=0.95,
|
|
1314
|
+
reasoning="Statistical analysis provides foundational metrics",
|
|
1315
|
+
estimated_runtime_ms=30,
|
|
1316
|
+
)
|
|
1317
|
+
)
|
|
1318
|
+
|
|
1319
|
+
# Spectral analysis - good for most signals
|
|
1320
|
+
if AnalysisDomain.SPECTRAL not in exclude:
|
|
1321
|
+
spectral_conf = 0.85 if is_periodic else 0.70
|
|
1322
|
+
recommendations.append(
|
|
1323
|
+
AnalysisRecommendation(
|
|
1324
|
+
domain=AnalysisDomain.SPECTRAL,
|
|
1325
|
+
priority=2 if is_periodic else 3,
|
|
1326
|
+
confidence=spectral_conf,
|
|
1327
|
+
reasoning="Spectral analysis reveals frequency content"
|
|
1328
|
+
+ (" - signal appears periodic" if is_periodic else ""),
|
|
1329
|
+
estimated_runtime_ms=100,
|
|
1330
|
+
)
|
|
1331
|
+
)
|
|
1332
|
+
|
|
1333
|
+
# Digital-specific analyses
|
|
1334
|
+
if is_digital:
|
|
1335
|
+
if AnalysisDomain.DIGITAL not in exclude:
|
|
1336
|
+
recommendations.append(
|
|
1337
|
+
AnalysisRecommendation(
|
|
1338
|
+
domain=AnalysisDomain.DIGITAL,
|
|
1339
|
+
priority=1,
|
|
1340
|
+
confidence=0.90,
|
|
1341
|
+
reasoning="Digital signal detected - edge and timing analysis recommended",
|
|
1342
|
+
estimated_runtime_ms=80,
|
|
1343
|
+
)
|
|
1344
|
+
)
|
|
1345
|
+
|
|
1346
|
+
if AnalysisDomain.TIMING not in exclude:
|
|
1347
|
+
recommendations.append(
|
|
1348
|
+
AnalysisRecommendation(
|
|
1349
|
+
domain=AnalysisDomain.TIMING,
|
|
1350
|
+
priority=2,
|
|
1351
|
+
confidence=0.85,
|
|
1352
|
+
reasoning="Timing analysis valuable for digital signals",
|
|
1353
|
+
estimated_runtime_ms=60,
|
|
1354
|
+
)
|
|
1355
|
+
)
|
|
1356
|
+
|
|
1357
|
+
if AnalysisDomain.PROTOCOLS not in exclude and dominant_freq:
|
|
1358
|
+
# Check if frequency matches common baud rates
|
|
1359
|
+
common_bauds = [9600, 19200, 38400, 57600, 115200]
|
|
1360
|
+
if any(abs(dominant_freq * 2 - b) / b < 0.1 for b in common_bauds):
|
|
1361
|
+
recommendations.append(
|
|
1362
|
+
AnalysisRecommendation(
|
|
1363
|
+
domain=AnalysisDomain.PROTOCOLS,
|
|
1364
|
+
priority=3,
|
|
1365
|
+
confidence=0.70,
|
|
1366
|
+
reasoning=f"Frequency {dominant_freq:.0f} Hz suggests serial protocol",
|
|
1367
|
+
estimated_runtime_ms=150,
|
|
1368
|
+
)
|
|
1369
|
+
)
|
|
1370
|
+
|
|
1371
|
+
# Periodic signal analyses
|
|
1372
|
+
if is_periodic:
|
|
1373
|
+
if AnalysisDomain.JITTER not in exclude and is_digital:
|
|
1374
|
+
recommendations.append(
|
|
1375
|
+
AnalysisRecommendation(
|
|
1376
|
+
domain=AnalysisDomain.JITTER,
|
|
1377
|
+
priority=3,
|
|
1378
|
+
confidence=0.80,
|
|
1379
|
+
reasoning="Periodic digital signal - jitter analysis applicable",
|
|
1380
|
+
estimated_runtime_ms=120,
|
|
1381
|
+
)
|
|
1382
|
+
)
|
|
1383
|
+
|
|
1384
|
+
if AnalysisDomain.EYE not in exclude and is_digital:
|
|
1385
|
+
recommendations.append(
|
|
1386
|
+
AnalysisRecommendation(
|
|
1387
|
+
domain=AnalysisDomain.EYE,
|
|
1388
|
+
priority=3,
|
|
1389
|
+
confidence=0.75,
|
|
1390
|
+
reasoning="Eye diagram analysis for signal integrity assessment",
|
|
1391
|
+
estimated_runtime_ms=200,
|
|
1392
|
+
)
|
|
1393
|
+
)
|
|
1394
|
+
|
|
1395
|
+
# Pattern analysis - good for complex signals
|
|
1396
|
+
if AnalysisDomain.PATTERNS not in exclude and len(data) > 1000:
|
|
1397
|
+
pattern_conf = 0.70 if is_periodic else 0.50
|
|
1398
|
+
recommendations.append(
|
|
1399
|
+
AnalysisRecommendation(
|
|
1400
|
+
domain=AnalysisDomain.PATTERNS,
|
|
1401
|
+
priority=4,
|
|
1402
|
+
confidence=pattern_conf,
|
|
1403
|
+
reasoning="Pattern analysis can reveal repeating structures",
|
|
1404
|
+
estimated_runtime_ms=500,
|
|
1405
|
+
)
|
|
1406
|
+
)
|
|
1407
|
+
|
|
1408
|
+
# Entropy analysis - useful for random/encrypted data
|
|
1409
|
+
if AnalysisDomain.ENTROPY not in exclude:
|
|
1410
|
+
recommendations.append(
|
|
1411
|
+
AnalysisRecommendation(
|
|
1412
|
+
domain=AnalysisDomain.ENTROPY,
|
|
1413
|
+
priority=5,
|
|
1414
|
+
confidence=0.80,
|
|
1415
|
+
reasoning="Entropy analysis characterizes randomness and complexity",
|
|
1416
|
+
estimated_runtime_ms=100,
|
|
1417
|
+
)
|
|
1418
|
+
)
|
|
1419
|
+
|
|
1420
|
+
# Apply confidence threshold filter
|
|
1421
|
+
recommendations = [r for r in recommendations if r.confidence >= confidence_target]
|
|
1422
|
+
|
|
1423
|
+
# Apply time budget filter if specified
|
|
1424
|
+
if time_budget_seconds is not None:
|
|
1425
|
+
budget_ms = time_budget_seconds * 1000
|
|
1426
|
+
cumulative = 0
|
|
1427
|
+
filtered = []
|
|
1428
|
+
for rec in sorted(recommendations, key=lambda x: (x.priority, -x.confidence)):
|
|
1429
|
+
if cumulative + rec.estimated_runtime_ms <= budget_ms:
|
|
1430
|
+
filtered.append(rec)
|
|
1431
|
+
cumulative += rec.estimated_runtime_ms
|
|
1432
|
+
recommendations = filtered
|
|
1433
|
+
|
|
1434
|
+
# Sort by priority, then by confidence
|
|
1435
|
+
recommendations.sort(key=lambda x: (x.priority, -x.confidence))
|
|
1436
|
+
|
|
1437
|
+
return recommendations
|
|
1438
|
+
|
|
1439
|
+
|
|
1440
|
+
def get_optimal_domain_order(
|
|
1441
|
+
recommendations: list[AnalysisRecommendation],
|
|
1442
|
+
) -> list[AnalysisDomain]:
|
|
1443
|
+
"""Get optimal order for running analyses.
|
|
1444
|
+
|
|
1445
|
+
Considers dependencies and priorities to determine best order.
|
|
1446
|
+
|
|
1447
|
+
Args:
|
|
1448
|
+
recommendations: List of analysis recommendations.
|
|
1449
|
+
|
|
1450
|
+
Returns:
|
|
1451
|
+
Ordered list of domains to analyze.
|
|
1452
|
+
|
|
1453
|
+
Example:
|
|
1454
|
+
>>> import numpy as np
|
|
1455
|
+
>>> import oscura as tk
|
|
1456
|
+
>>> # Generate test signal
|
|
1457
|
+
>>> t = np.linspace(0, 1, 10000)
|
|
1458
|
+
>>> signal = np.sin(2 * np.pi * 100 * t)
|
|
1459
|
+
>>> recommendations = tk.recommend_analyses(signal, sample_rate=10000)
|
|
1460
|
+
>>> order = tk.get_optimal_domain_order(recommendations)
|
|
1461
|
+
>>> print([d.value for d in order])
|
|
1462
|
+
['waveform', 'statistics', 'spectral', 'patterns', 'entropy']
|
|
1463
|
+
"""
|
|
1464
|
+
# Avoid circular import
|
|
1465
|
+
from oscura.reporting.config import AnalysisDomain
|
|
1466
|
+
|
|
1467
|
+
# Define dependencies
|
|
1468
|
+
dependencies = {
|
|
1469
|
+
AnalysisDomain.JITTER: [AnalysisDomain.TIMING],
|
|
1470
|
+
AnalysisDomain.EYE: [AnalysisDomain.DIGITAL],
|
|
1471
|
+
AnalysisDomain.PROTOCOLS: [AnalysisDomain.DIGITAL],
|
|
1472
|
+
AnalysisDomain.INFERENCE: [AnalysisDomain.PATTERNS],
|
|
1473
|
+
}
|
|
1474
|
+
|
|
1475
|
+
# Build order respecting dependencies
|
|
1476
|
+
ordered = []
|
|
1477
|
+
remaining = {r.domain for r in recommendations}
|
|
1478
|
+
|
|
1479
|
+
while remaining:
|
|
1480
|
+
# Find domains with satisfied dependencies
|
|
1481
|
+
ready = []
|
|
1482
|
+
for domain in remaining:
|
|
1483
|
+
deps = dependencies.get(domain, [])
|
|
1484
|
+
if all(d not in remaining or d in ordered for d in deps):
|
|
1485
|
+
ready.append(domain)
|
|
1486
|
+
|
|
1487
|
+
if not ready:
|
|
1488
|
+
# No ready domains - just add remaining (circular deps)
|
|
1489
|
+
ready = list(remaining)
|
|
1490
|
+
|
|
1491
|
+
# Add highest priority ready domain
|
|
1492
|
+
for rec in sorted(recommendations, key=lambda x: (x.priority, -x.confidence)):
|
|
1493
|
+
if rec.domain in ready:
|
|
1494
|
+
ordered.append(rec.domain)
|
|
1495
|
+
remaining.discard(rec.domain)
|
|
1496
|
+
break
|
|
1497
|
+
|
|
1498
|
+
return ordered
|
|
1499
|
+
|
|
1500
|
+
|
|
1501
|
+
__all__ = [
|
|
1502
|
+
"AnalysisRecommendation",
|
|
1503
|
+
"assess_signal_quality",
|
|
1504
|
+
"check_measurement_suitability",
|
|
1505
|
+
"classify_signal",
|
|
1506
|
+
"get_optimal_domain_order",
|
|
1507
|
+
"recommend_analyses",
|
|
1508
|
+
"suggest_measurements",
|
|
1509
|
+
]
|