oscura 0.0.1__py3-none-any.whl → 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- oscura/__init__.py +813 -8
- oscura/__main__.py +392 -0
- oscura/analyzers/__init__.py +37 -0
- oscura/analyzers/digital/__init__.py +177 -0
- oscura/analyzers/digital/bus.py +691 -0
- oscura/analyzers/digital/clock.py +805 -0
- oscura/analyzers/digital/correlation.py +720 -0
- oscura/analyzers/digital/edges.py +632 -0
- oscura/analyzers/digital/extraction.py +413 -0
- oscura/analyzers/digital/quality.py +878 -0
- oscura/analyzers/digital/signal_quality.py +877 -0
- oscura/analyzers/digital/thresholds.py +708 -0
- oscura/analyzers/digital/timing.py +1104 -0
- oscura/analyzers/eye/__init__.py +46 -0
- oscura/analyzers/eye/diagram.py +434 -0
- oscura/analyzers/eye/metrics.py +555 -0
- oscura/analyzers/jitter/__init__.py +83 -0
- oscura/analyzers/jitter/ber.py +333 -0
- oscura/analyzers/jitter/decomposition.py +759 -0
- oscura/analyzers/jitter/measurements.py +413 -0
- oscura/analyzers/jitter/spectrum.py +220 -0
- oscura/analyzers/measurements.py +40 -0
- oscura/analyzers/packet/__init__.py +171 -0
- oscura/analyzers/packet/daq.py +1077 -0
- oscura/analyzers/packet/metrics.py +437 -0
- oscura/analyzers/packet/parser.py +327 -0
- oscura/analyzers/packet/payload.py +2156 -0
- oscura/analyzers/packet/payload_analysis.py +1312 -0
- oscura/analyzers/packet/payload_extraction.py +236 -0
- oscura/analyzers/packet/payload_patterns.py +670 -0
- oscura/analyzers/packet/stream.py +359 -0
- oscura/analyzers/patterns/__init__.py +266 -0
- oscura/analyzers/patterns/clustering.py +1036 -0
- oscura/analyzers/patterns/discovery.py +539 -0
- oscura/analyzers/patterns/learning.py +797 -0
- oscura/analyzers/patterns/matching.py +1091 -0
- oscura/analyzers/patterns/periodic.py +650 -0
- oscura/analyzers/patterns/sequences.py +767 -0
- oscura/analyzers/power/__init__.py +116 -0
- oscura/analyzers/power/ac_power.py +391 -0
- oscura/analyzers/power/basic.py +383 -0
- oscura/analyzers/power/conduction.py +314 -0
- oscura/analyzers/power/efficiency.py +297 -0
- oscura/analyzers/power/ripple.py +356 -0
- oscura/analyzers/power/soa.py +372 -0
- oscura/analyzers/power/switching.py +479 -0
- oscura/analyzers/protocol/__init__.py +150 -0
- oscura/analyzers/protocols/__init__.py +150 -0
- oscura/analyzers/protocols/base.py +500 -0
- oscura/analyzers/protocols/can.py +620 -0
- oscura/analyzers/protocols/can_fd.py +448 -0
- oscura/analyzers/protocols/flexray.py +405 -0
- oscura/analyzers/protocols/hdlc.py +399 -0
- oscura/analyzers/protocols/i2c.py +368 -0
- oscura/analyzers/protocols/i2s.py +296 -0
- oscura/analyzers/protocols/jtag.py +393 -0
- oscura/analyzers/protocols/lin.py +445 -0
- oscura/analyzers/protocols/manchester.py +333 -0
- oscura/analyzers/protocols/onewire.py +501 -0
- oscura/analyzers/protocols/spi.py +334 -0
- oscura/analyzers/protocols/swd.py +325 -0
- oscura/analyzers/protocols/uart.py +393 -0
- oscura/analyzers/protocols/usb.py +495 -0
- oscura/analyzers/signal_integrity/__init__.py +63 -0
- oscura/analyzers/signal_integrity/embedding.py +294 -0
- oscura/analyzers/signal_integrity/equalization.py +370 -0
- oscura/analyzers/signal_integrity/sparams.py +484 -0
- oscura/analyzers/spectral/__init__.py +53 -0
- oscura/analyzers/spectral/chunked.py +273 -0
- oscura/analyzers/spectral/chunked_fft.py +571 -0
- oscura/analyzers/spectral/chunked_wavelet.py +391 -0
- oscura/analyzers/spectral/fft.py +92 -0
- oscura/analyzers/statistical/__init__.py +250 -0
- oscura/analyzers/statistical/checksum.py +923 -0
- oscura/analyzers/statistical/chunked_corr.py +228 -0
- oscura/analyzers/statistical/classification.py +778 -0
- oscura/analyzers/statistical/entropy.py +1113 -0
- oscura/analyzers/statistical/ngrams.py +614 -0
- oscura/analyzers/statistics/__init__.py +119 -0
- oscura/analyzers/statistics/advanced.py +885 -0
- oscura/analyzers/statistics/basic.py +263 -0
- oscura/analyzers/statistics/correlation.py +630 -0
- oscura/analyzers/statistics/distribution.py +298 -0
- oscura/analyzers/statistics/outliers.py +463 -0
- oscura/analyzers/statistics/streaming.py +93 -0
- oscura/analyzers/statistics/trend.py +520 -0
- oscura/analyzers/validation.py +598 -0
- oscura/analyzers/waveform/__init__.py +36 -0
- oscura/analyzers/waveform/measurements.py +943 -0
- oscura/analyzers/waveform/measurements_with_uncertainty.py +371 -0
- oscura/analyzers/waveform/spectral.py +1689 -0
- oscura/analyzers/waveform/wavelets.py +298 -0
- oscura/api/__init__.py +62 -0
- oscura/api/dsl.py +538 -0
- oscura/api/fluent.py +571 -0
- oscura/api/operators.py +498 -0
- oscura/api/optimization.py +392 -0
- oscura/api/profiling.py +396 -0
- oscura/automotive/__init__.py +73 -0
- oscura/automotive/can/__init__.py +52 -0
- oscura/automotive/can/analysis.py +356 -0
- oscura/automotive/can/checksum.py +250 -0
- oscura/automotive/can/correlation.py +212 -0
- oscura/automotive/can/discovery.py +355 -0
- oscura/automotive/can/message_wrapper.py +375 -0
- oscura/automotive/can/models.py +385 -0
- oscura/automotive/can/patterns.py +381 -0
- oscura/automotive/can/session.py +452 -0
- oscura/automotive/can/state_machine.py +300 -0
- oscura/automotive/can/stimulus_response.py +461 -0
- oscura/automotive/dbc/__init__.py +15 -0
- oscura/automotive/dbc/generator.py +156 -0
- oscura/automotive/dbc/parser.py +146 -0
- oscura/automotive/dtc/__init__.py +30 -0
- oscura/automotive/dtc/database.py +3036 -0
- oscura/automotive/j1939/__init__.py +14 -0
- oscura/automotive/j1939/decoder.py +745 -0
- oscura/automotive/loaders/__init__.py +35 -0
- oscura/automotive/loaders/asc.py +98 -0
- oscura/automotive/loaders/blf.py +77 -0
- oscura/automotive/loaders/csv_can.py +136 -0
- oscura/automotive/loaders/dispatcher.py +136 -0
- oscura/automotive/loaders/mdf.py +331 -0
- oscura/automotive/loaders/pcap.py +132 -0
- oscura/automotive/obd/__init__.py +14 -0
- oscura/automotive/obd/decoder.py +707 -0
- oscura/automotive/uds/__init__.py +48 -0
- oscura/automotive/uds/decoder.py +265 -0
- oscura/automotive/uds/models.py +64 -0
- oscura/automotive/visualization.py +369 -0
- oscura/batch/__init__.py +55 -0
- oscura/batch/advanced.py +627 -0
- oscura/batch/aggregate.py +300 -0
- oscura/batch/analyze.py +139 -0
- oscura/batch/logging.py +487 -0
- oscura/batch/metrics.py +556 -0
- oscura/builders/__init__.py +41 -0
- oscura/builders/signal_builder.py +1131 -0
- oscura/cli/__init__.py +14 -0
- oscura/cli/batch.py +339 -0
- oscura/cli/characterize.py +273 -0
- oscura/cli/compare.py +775 -0
- oscura/cli/decode.py +551 -0
- oscura/cli/main.py +247 -0
- oscura/cli/shell.py +350 -0
- oscura/comparison/__init__.py +66 -0
- oscura/comparison/compare.py +397 -0
- oscura/comparison/golden.py +487 -0
- oscura/comparison/limits.py +391 -0
- oscura/comparison/mask.py +434 -0
- oscura/comparison/trace_diff.py +30 -0
- oscura/comparison/visualization.py +481 -0
- oscura/compliance/__init__.py +70 -0
- oscura/compliance/advanced.py +756 -0
- oscura/compliance/masks.py +363 -0
- oscura/compliance/reporting.py +483 -0
- oscura/compliance/testing.py +298 -0
- oscura/component/__init__.py +38 -0
- oscura/component/impedance.py +365 -0
- oscura/component/reactive.py +598 -0
- oscura/component/transmission_line.py +312 -0
- oscura/config/__init__.py +191 -0
- oscura/config/defaults.py +254 -0
- oscura/config/loader.py +348 -0
- oscura/config/memory.py +271 -0
- oscura/config/migration.py +458 -0
- oscura/config/pipeline.py +1077 -0
- oscura/config/preferences.py +530 -0
- oscura/config/protocol.py +875 -0
- oscura/config/schema.py +713 -0
- oscura/config/settings.py +420 -0
- oscura/config/thresholds.py +599 -0
- oscura/convenience.py +457 -0
- oscura/core/__init__.py +299 -0
- oscura/core/audit.py +457 -0
- oscura/core/backend_selector.py +405 -0
- oscura/core/cache.py +590 -0
- oscura/core/cancellation.py +439 -0
- oscura/core/confidence.py +225 -0
- oscura/core/config.py +506 -0
- oscura/core/correlation.py +216 -0
- oscura/core/cross_domain.py +422 -0
- oscura/core/debug.py +301 -0
- oscura/core/edge_cases.py +541 -0
- oscura/core/exceptions.py +535 -0
- oscura/core/gpu_backend.py +523 -0
- oscura/core/lazy.py +832 -0
- oscura/core/log_query.py +540 -0
- oscura/core/logging.py +931 -0
- oscura/core/logging_advanced.py +952 -0
- oscura/core/memoize.py +171 -0
- oscura/core/memory_check.py +274 -0
- oscura/core/memory_guard.py +290 -0
- oscura/core/memory_limits.py +336 -0
- oscura/core/memory_monitor.py +453 -0
- oscura/core/memory_progress.py +465 -0
- oscura/core/memory_warnings.py +315 -0
- oscura/core/numba_backend.py +362 -0
- oscura/core/performance.py +352 -0
- oscura/core/progress.py +524 -0
- oscura/core/provenance.py +358 -0
- oscura/core/results.py +331 -0
- oscura/core/types.py +504 -0
- oscura/core/uncertainty.py +383 -0
- oscura/discovery/__init__.py +52 -0
- oscura/discovery/anomaly_detector.py +672 -0
- oscura/discovery/auto_decoder.py +415 -0
- oscura/discovery/comparison.py +497 -0
- oscura/discovery/quality_validator.py +528 -0
- oscura/discovery/signal_detector.py +769 -0
- oscura/dsl/__init__.py +73 -0
- oscura/dsl/commands.py +246 -0
- oscura/dsl/interpreter.py +455 -0
- oscura/dsl/parser.py +689 -0
- oscura/dsl/repl.py +172 -0
- oscura/exceptions.py +59 -0
- oscura/exploratory/__init__.py +111 -0
- oscura/exploratory/error_recovery.py +642 -0
- oscura/exploratory/fuzzy.py +513 -0
- oscura/exploratory/fuzzy_advanced.py +786 -0
- oscura/exploratory/legacy.py +831 -0
- oscura/exploratory/parse.py +358 -0
- oscura/exploratory/recovery.py +275 -0
- oscura/exploratory/sync.py +382 -0
- oscura/exploratory/unknown.py +707 -0
- oscura/export/__init__.py +25 -0
- oscura/export/wireshark/README.md +265 -0
- oscura/export/wireshark/__init__.py +47 -0
- oscura/export/wireshark/generator.py +312 -0
- oscura/export/wireshark/lua_builder.py +159 -0
- oscura/export/wireshark/templates/dissector.lua.j2 +92 -0
- oscura/export/wireshark/type_mapping.py +165 -0
- oscura/export/wireshark/validator.py +105 -0
- oscura/exporters/__init__.py +94 -0
- oscura/exporters/csv.py +303 -0
- oscura/exporters/exporters.py +44 -0
- oscura/exporters/hdf5.py +219 -0
- oscura/exporters/html_export.py +701 -0
- oscura/exporters/json_export.py +291 -0
- oscura/exporters/markdown_export.py +367 -0
- oscura/exporters/matlab_export.py +354 -0
- oscura/exporters/npz_export.py +219 -0
- oscura/exporters/spice_export.py +210 -0
- oscura/extensibility/__init__.py +131 -0
- oscura/extensibility/docs.py +752 -0
- oscura/extensibility/extensions.py +1125 -0
- oscura/extensibility/logging.py +259 -0
- oscura/extensibility/measurements.py +485 -0
- oscura/extensibility/plugins.py +414 -0
- oscura/extensibility/registry.py +346 -0
- oscura/extensibility/templates.py +913 -0
- oscura/extensibility/validation.py +651 -0
- oscura/filtering/__init__.py +89 -0
- oscura/filtering/base.py +563 -0
- oscura/filtering/convenience.py +564 -0
- oscura/filtering/design.py +725 -0
- oscura/filtering/filters.py +32 -0
- oscura/filtering/introspection.py +605 -0
- oscura/guidance/__init__.py +24 -0
- oscura/guidance/recommender.py +429 -0
- oscura/guidance/wizard.py +518 -0
- oscura/inference/__init__.py +251 -0
- oscura/inference/active_learning/README.md +153 -0
- oscura/inference/active_learning/__init__.py +38 -0
- oscura/inference/active_learning/lstar.py +257 -0
- oscura/inference/active_learning/observation_table.py +230 -0
- oscura/inference/active_learning/oracle.py +78 -0
- oscura/inference/active_learning/teachers/__init__.py +15 -0
- oscura/inference/active_learning/teachers/simulator.py +192 -0
- oscura/inference/adaptive_tuning.py +453 -0
- oscura/inference/alignment.py +653 -0
- oscura/inference/bayesian.py +943 -0
- oscura/inference/binary.py +1016 -0
- oscura/inference/crc_reverse.py +711 -0
- oscura/inference/logic.py +288 -0
- oscura/inference/message_format.py +1305 -0
- oscura/inference/protocol.py +417 -0
- oscura/inference/protocol_dsl.py +1084 -0
- oscura/inference/protocol_library.py +1230 -0
- oscura/inference/sequences.py +809 -0
- oscura/inference/signal_intelligence.py +1509 -0
- oscura/inference/spectral.py +215 -0
- oscura/inference/state_machine.py +634 -0
- oscura/inference/stream.py +918 -0
- oscura/integrations/__init__.py +59 -0
- oscura/integrations/llm.py +1827 -0
- oscura/jupyter/__init__.py +32 -0
- oscura/jupyter/display.py +268 -0
- oscura/jupyter/magic.py +334 -0
- oscura/loaders/__init__.py +526 -0
- oscura/loaders/binary.py +69 -0
- oscura/loaders/configurable.py +1255 -0
- oscura/loaders/csv.py +26 -0
- oscura/loaders/csv_loader.py +473 -0
- oscura/loaders/hdf5.py +9 -0
- oscura/loaders/hdf5_loader.py +510 -0
- oscura/loaders/lazy.py +370 -0
- oscura/loaders/mmap_loader.py +583 -0
- oscura/loaders/numpy_loader.py +436 -0
- oscura/loaders/pcap.py +432 -0
- oscura/loaders/preprocessing.py +368 -0
- oscura/loaders/rigol.py +287 -0
- oscura/loaders/sigrok.py +321 -0
- oscura/loaders/tdms.py +367 -0
- oscura/loaders/tektronix.py +711 -0
- oscura/loaders/validation.py +584 -0
- oscura/loaders/vcd.py +464 -0
- oscura/loaders/wav.py +233 -0
- oscura/math/__init__.py +45 -0
- oscura/math/arithmetic.py +824 -0
- oscura/math/interpolation.py +413 -0
- oscura/onboarding/__init__.py +39 -0
- oscura/onboarding/help.py +498 -0
- oscura/onboarding/tutorials.py +405 -0
- oscura/onboarding/wizard.py +466 -0
- oscura/optimization/__init__.py +19 -0
- oscura/optimization/parallel.py +440 -0
- oscura/optimization/search.py +532 -0
- oscura/pipeline/__init__.py +43 -0
- oscura/pipeline/base.py +338 -0
- oscura/pipeline/composition.py +242 -0
- oscura/pipeline/parallel.py +448 -0
- oscura/pipeline/pipeline.py +375 -0
- oscura/pipeline/reverse_engineering.py +1119 -0
- oscura/plugins/__init__.py +122 -0
- oscura/plugins/base.py +272 -0
- oscura/plugins/cli.py +497 -0
- oscura/plugins/discovery.py +411 -0
- oscura/plugins/isolation.py +418 -0
- oscura/plugins/lifecycle.py +959 -0
- oscura/plugins/manager.py +493 -0
- oscura/plugins/registry.py +421 -0
- oscura/plugins/versioning.py +372 -0
- oscura/py.typed +0 -0
- oscura/quality/__init__.py +65 -0
- oscura/quality/ensemble.py +740 -0
- oscura/quality/explainer.py +338 -0
- oscura/quality/scoring.py +616 -0
- oscura/quality/warnings.py +456 -0
- oscura/reporting/__init__.py +248 -0
- oscura/reporting/advanced.py +1234 -0
- oscura/reporting/analyze.py +448 -0
- oscura/reporting/argument_preparer.py +596 -0
- oscura/reporting/auto_report.py +507 -0
- oscura/reporting/batch.py +615 -0
- oscura/reporting/chart_selection.py +223 -0
- oscura/reporting/comparison.py +330 -0
- oscura/reporting/config.py +615 -0
- oscura/reporting/content/__init__.py +39 -0
- oscura/reporting/content/executive.py +127 -0
- oscura/reporting/content/filtering.py +191 -0
- oscura/reporting/content/minimal.py +257 -0
- oscura/reporting/content/verbosity.py +162 -0
- oscura/reporting/core.py +508 -0
- oscura/reporting/core_formats/__init__.py +17 -0
- oscura/reporting/core_formats/multi_format.py +210 -0
- oscura/reporting/engine.py +836 -0
- oscura/reporting/export.py +366 -0
- oscura/reporting/formatting/__init__.py +129 -0
- oscura/reporting/formatting/emphasis.py +81 -0
- oscura/reporting/formatting/numbers.py +403 -0
- oscura/reporting/formatting/standards.py +55 -0
- oscura/reporting/formatting.py +466 -0
- oscura/reporting/html.py +578 -0
- oscura/reporting/index.py +590 -0
- oscura/reporting/multichannel.py +296 -0
- oscura/reporting/output.py +379 -0
- oscura/reporting/pdf.py +373 -0
- oscura/reporting/plots.py +731 -0
- oscura/reporting/pptx_export.py +360 -0
- oscura/reporting/renderers/__init__.py +11 -0
- oscura/reporting/renderers/pdf.py +94 -0
- oscura/reporting/sections.py +471 -0
- oscura/reporting/standards.py +680 -0
- oscura/reporting/summary_generator.py +368 -0
- oscura/reporting/tables.py +397 -0
- oscura/reporting/template_system.py +724 -0
- oscura/reporting/templates/__init__.py +15 -0
- oscura/reporting/templates/definition.py +205 -0
- oscura/reporting/templates/index.html +649 -0
- oscura/reporting/templates/index.md +173 -0
- oscura/schemas/__init__.py +158 -0
- oscura/schemas/bus_configuration.json +322 -0
- oscura/schemas/device_mapping.json +182 -0
- oscura/schemas/packet_format.json +418 -0
- oscura/schemas/protocol_definition.json +363 -0
- oscura/search/__init__.py +16 -0
- oscura/search/anomaly.py +292 -0
- oscura/search/context.py +149 -0
- oscura/search/pattern.py +160 -0
- oscura/session/__init__.py +34 -0
- oscura/session/annotations.py +289 -0
- oscura/session/history.py +313 -0
- oscura/session/session.py +445 -0
- oscura/streaming/__init__.py +43 -0
- oscura/streaming/chunked.py +611 -0
- oscura/streaming/progressive.py +393 -0
- oscura/streaming/realtime.py +622 -0
- oscura/testing/__init__.py +54 -0
- oscura/testing/synthetic.py +808 -0
- oscura/triggering/__init__.py +68 -0
- oscura/triggering/base.py +229 -0
- oscura/triggering/edge.py +353 -0
- oscura/triggering/pattern.py +344 -0
- oscura/triggering/pulse.py +581 -0
- oscura/triggering/window.py +453 -0
- oscura/ui/__init__.py +48 -0
- oscura/ui/formatters.py +526 -0
- oscura/ui/progressive_display.py +340 -0
- oscura/utils/__init__.py +99 -0
- oscura/utils/autodetect.py +338 -0
- oscura/utils/buffer.py +389 -0
- oscura/utils/lazy.py +407 -0
- oscura/utils/lazy_imports.py +147 -0
- oscura/utils/memory.py +836 -0
- oscura/utils/memory_advanced.py +1326 -0
- oscura/utils/memory_extensions.py +465 -0
- oscura/utils/progressive.py +352 -0
- oscura/utils/windowing.py +362 -0
- oscura/visualization/__init__.py +321 -0
- oscura/visualization/accessibility.py +526 -0
- oscura/visualization/annotations.py +374 -0
- oscura/visualization/axis_scaling.py +305 -0
- oscura/visualization/colors.py +453 -0
- oscura/visualization/digital.py +337 -0
- oscura/visualization/eye.py +420 -0
- oscura/visualization/histogram.py +281 -0
- oscura/visualization/interactive.py +858 -0
- oscura/visualization/jitter.py +702 -0
- oscura/visualization/keyboard.py +394 -0
- oscura/visualization/layout.py +365 -0
- oscura/visualization/optimization.py +1028 -0
- oscura/visualization/palettes.py +446 -0
- oscura/visualization/plot.py +92 -0
- oscura/visualization/power.py +290 -0
- oscura/visualization/power_extended.py +626 -0
- oscura/visualization/presets.py +467 -0
- oscura/visualization/protocols.py +932 -0
- oscura/visualization/render.py +207 -0
- oscura/visualization/rendering.py +444 -0
- oscura/visualization/reverse_engineering.py +791 -0
- oscura/visualization/signal_integrity.py +808 -0
- oscura/visualization/specialized.py +553 -0
- oscura/visualization/spectral.py +811 -0
- oscura/visualization/styles.py +381 -0
- oscura/visualization/thumbnails.py +311 -0
- oscura/visualization/time_axis.py +351 -0
- oscura/visualization/waveform.py +367 -0
- oscura/workflow/__init__.py +13 -0
- oscura/workflow/dag.py +377 -0
- oscura/workflows/__init__.py +58 -0
- oscura/workflows/compliance.py +280 -0
- oscura/workflows/digital.py +272 -0
- oscura/workflows/multi_trace.py +502 -0
- oscura/workflows/power.py +178 -0
- oscura/workflows/protocol.py +492 -0
- oscura/workflows/reverse_engineering.py +639 -0
- oscura/workflows/signal_integrity.py +227 -0
- oscura-0.1.0.dist-info/METADATA +300 -0
- oscura-0.1.0.dist-info/RECORD +463 -0
- oscura-0.1.0.dist-info/entry_points.txt +2 -0
- {oscura-0.0.1.dist-info → oscura-0.1.0.dist-info}/licenses/LICENSE +1 -1
- oscura-0.0.1.dist-info/METADATA +0 -63
- oscura-0.0.1.dist-info/RECORD +0 -5
- {oscura-0.0.1.dist-info → oscura-0.1.0.dist-info}/WHEEL +0 -0
|
@@ -0,0 +1,836 @@
|
|
|
1
|
+
"""Analysis Engine for orchestrating comprehensive analysis execution.
|
|
2
|
+
|
|
3
|
+
This module provides the AnalysisEngine class that orchestrates running all
|
|
4
|
+
applicable analyses on input data, handling progress tracking, timeouts,
|
|
5
|
+
and error collection.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
from __future__ import annotations
|
|
9
|
+
|
|
10
|
+
import importlib
|
|
11
|
+
import inspect
|
|
12
|
+
import logging
|
|
13
|
+
import time
|
|
14
|
+
import traceback
|
|
15
|
+
import types
|
|
16
|
+
from collections.abc import Callable
|
|
17
|
+
from pathlib import Path
|
|
18
|
+
from typing import Any
|
|
19
|
+
|
|
20
|
+
import numpy as np
|
|
21
|
+
|
|
22
|
+
from oscura.reporting.argument_preparer import ArgumentPreparer
|
|
23
|
+
from oscura.reporting.config import (
|
|
24
|
+
ANALYSIS_CAPABILITIES,
|
|
25
|
+
AnalysisConfig,
|
|
26
|
+
AnalysisDomain,
|
|
27
|
+
AnalysisError,
|
|
28
|
+
InputType,
|
|
29
|
+
ProgressInfo,
|
|
30
|
+
get_available_analyses,
|
|
31
|
+
)
|
|
32
|
+
|
|
33
|
+
logger = logging.getLogger(__name__)
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
# Functions that require context-specific parameters that cannot be auto-detected
|
|
37
|
+
NON_INFERRABLE_FUNCTIONS: set[str] = {
|
|
38
|
+
# INFERENCE domain - require specific data types
|
|
39
|
+
"oscura.inference.protocol_dsl.decode_protocol",
|
|
40
|
+
"oscura.inference.protocol_dsl.match_pattern",
|
|
41
|
+
"oscura.inference.protocol_dsl.validate_message",
|
|
42
|
+
# PACKET domain - require PacketInfo objects
|
|
43
|
+
"oscura.analyzers.packet.timing.analyze_inter_packet_timing",
|
|
44
|
+
"oscura.analyzers.packet.timing.detect_bursts",
|
|
45
|
+
# POWER domain - require voltage+current pairs
|
|
46
|
+
"oscura.analyzers.power.consumption.calculate_power",
|
|
47
|
+
"oscura.analyzers.power.consumption.analyze_power_efficiency",
|
|
48
|
+
}
|
|
49
|
+
|
|
50
|
+
|
|
51
|
+
class AnalysisEngine:
|
|
52
|
+
"""Engine for orchestrating comprehensive analysis execution.
|
|
53
|
+
|
|
54
|
+
The AnalysisEngine accepts input data (from file or in-memory), detects
|
|
55
|
+
the input type, determines applicable analysis domains, and executes
|
|
56
|
+
all relevant analysis functions with progress tracking and error handling.
|
|
57
|
+
|
|
58
|
+
Example:
|
|
59
|
+
>>> from oscura.reporting import AnalysisEngine, AnalysisConfig
|
|
60
|
+
>>> config = AnalysisConfig(timeout_per_analysis=30.0)
|
|
61
|
+
>>> engine = AnalysisEngine(config)
|
|
62
|
+
>>> result = engine.run(input_path=Path("data.wfm"))
|
|
63
|
+
>>> print(f"Ran {result['stats']['total_analyses']} analyses")
|
|
64
|
+
>>> print(f"Success rate: {result['stats']['success_rate']:.1f}%")
|
|
65
|
+
"""
|
|
66
|
+
|
|
67
|
+
def __init__(self, config: AnalysisConfig | None = None) -> None:
|
|
68
|
+
"""Initialize the analysis engine.
|
|
69
|
+
|
|
70
|
+
Args:
|
|
71
|
+
config: Analysis configuration. If None, uses defaults.
|
|
72
|
+
"""
|
|
73
|
+
self.config = config or AnalysisConfig()
|
|
74
|
+
self._start_time = 0.0
|
|
75
|
+
self._input_path: Path | None = None
|
|
76
|
+
self._arg_preparer: ArgumentPreparer | None = None
|
|
77
|
+
|
|
78
|
+
def detect_input_type(self, input_path: Path | None, data: Any) -> InputType:
|
|
79
|
+
"""Detect input type from file path or data characteristics.
|
|
80
|
+
|
|
81
|
+
Args:
|
|
82
|
+
input_path: Path to input file (None if in-memory data).
|
|
83
|
+
data: Input data object.
|
|
84
|
+
|
|
85
|
+
Returns:
|
|
86
|
+
Detected input type.
|
|
87
|
+
|
|
88
|
+
Raises:
|
|
89
|
+
ValueError: If input type cannot be determined.
|
|
90
|
+
"""
|
|
91
|
+
# If path provided, detect from extension
|
|
92
|
+
if input_path is not None:
|
|
93
|
+
ext = input_path.suffix.lower()
|
|
94
|
+
|
|
95
|
+
# Waveform formats
|
|
96
|
+
if ext in {".wfm", ".csv", ".npz", ".h5", ".hdf5", ".wav", ".tdms"}:
|
|
97
|
+
return InputType.WAVEFORM
|
|
98
|
+
# Digital formats
|
|
99
|
+
elif ext in {".vcd", ".sr"}:
|
|
100
|
+
return InputType.DIGITAL
|
|
101
|
+
# Packet formats
|
|
102
|
+
elif ext in {".pcap", ".pcapng"}:
|
|
103
|
+
return InputType.PCAP
|
|
104
|
+
# Binary formats
|
|
105
|
+
elif ext in {".bin", ".raw"}:
|
|
106
|
+
return InputType.BINARY
|
|
107
|
+
# S-parameter/Touchstone formats
|
|
108
|
+
elif ext in {".s1p", ".s2p", ".s3p", ".s4p", ".s5p", ".s6p", ".s7p", ".s8p"}:
|
|
109
|
+
return InputType.SPARAMS
|
|
110
|
+
|
|
111
|
+
# Detect from data object characteristics
|
|
112
|
+
if hasattr(data, "s_matrix") and hasattr(data, "frequencies"):
|
|
113
|
+
# SParameterData
|
|
114
|
+
return InputType.SPARAMS
|
|
115
|
+
elif hasattr(data, "data") and hasattr(data, "metadata"):
|
|
116
|
+
# WaveformTrace or DigitalTrace
|
|
117
|
+
if hasattr(data.metadata, "is_digital") and data.metadata.is_digital:
|
|
118
|
+
return InputType.DIGITAL
|
|
119
|
+
return InputType.WAVEFORM
|
|
120
|
+
elif isinstance(data, bytes | bytearray):
|
|
121
|
+
return InputType.BINARY
|
|
122
|
+
elif isinstance(data, list):
|
|
123
|
+
# Assume packet list
|
|
124
|
+
return InputType.PACKETS
|
|
125
|
+
elif isinstance(data, np.ndarray):
|
|
126
|
+
# Assume waveform
|
|
127
|
+
return InputType.WAVEFORM
|
|
128
|
+
|
|
129
|
+
raise ValueError("Unable to determine input type from path or data characteristics")
|
|
130
|
+
|
|
131
|
+
def run(
|
|
132
|
+
self,
|
|
133
|
+
input_path: Path | None = None,
|
|
134
|
+
data: Any = None,
|
|
135
|
+
progress_callback: Callable[[ProgressInfo], None] | None = None,
|
|
136
|
+
) -> dict[str, Any]:
|
|
137
|
+
"""Run comprehensive analysis on input data.
|
|
138
|
+
|
|
139
|
+
Args:
|
|
140
|
+
input_path: Path to input file (or None for in-memory data).
|
|
141
|
+
data: Input data object (or None to load from input_path).
|
|
142
|
+
progress_callback: Optional callback for progress updates.
|
|
143
|
+
|
|
144
|
+
Returns:
|
|
145
|
+
Dictionary with keys:
|
|
146
|
+
- 'results': Dict mapping AnalysisDomain to analysis results
|
|
147
|
+
- 'errors': List of AnalysisError objects
|
|
148
|
+
- 'stats': Execution statistics dict
|
|
149
|
+
|
|
150
|
+
Raises:
|
|
151
|
+
ValueError: If neither input_path nor data provided.
|
|
152
|
+
FileNotFoundError: If input_path doesn't exist.
|
|
153
|
+
|
|
154
|
+
Example:
|
|
155
|
+
>>> def progress(info: ProgressInfo):
|
|
156
|
+
... print(f"{info.phase}: {info.percent:.1f}%")
|
|
157
|
+
>>> result = engine.run(input_path=Path("data.wfm"), progress_callback=progress)
|
|
158
|
+
"""
|
|
159
|
+
if input_path is None and data is None:
|
|
160
|
+
raise ValueError("Must provide either input_path or data")
|
|
161
|
+
|
|
162
|
+
self._start_time = time.time()
|
|
163
|
+
self._input_path = input_path
|
|
164
|
+
|
|
165
|
+
# Initialize argument preparer with input path and default sample rate
|
|
166
|
+
default_sample_rate = self.config.default_sample_rate or 1e6
|
|
167
|
+
self._arg_preparer = ArgumentPreparer(
|
|
168
|
+
input_path=input_path, default_sample_rate=default_sample_rate
|
|
169
|
+
)
|
|
170
|
+
|
|
171
|
+
# Check available memory and adjust parallelism if needed
|
|
172
|
+
from oscura.core.memory_guard import check_memory_available
|
|
173
|
+
|
|
174
|
+
min_required_mb = 500 # Minimum 500MB needed for analysis
|
|
175
|
+
if not check_memory_available(min_required_mb):
|
|
176
|
+
logger.warning(
|
|
177
|
+
f"Low memory available (< {min_required_mb} MB). "
|
|
178
|
+
f"Reducing parallel workers to conserve memory."
|
|
179
|
+
)
|
|
180
|
+
# Temporarily reduce parallelism to conserve memory
|
|
181
|
+
self.config.parallel_domains = False
|
|
182
|
+
|
|
183
|
+
# Phase 1: Load data
|
|
184
|
+
if progress_callback:
|
|
185
|
+
progress_callback(
|
|
186
|
+
ProgressInfo(
|
|
187
|
+
phase="loading",
|
|
188
|
+
domain=None,
|
|
189
|
+
function=None,
|
|
190
|
+
percent=0.0,
|
|
191
|
+
message="Loading input data",
|
|
192
|
+
elapsed_seconds=0.0,
|
|
193
|
+
estimated_remaining_seconds=None,
|
|
194
|
+
)
|
|
195
|
+
)
|
|
196
|
+
|
|
197
|
+
if data is None:
|
|
198
|
+
if input_path is None or not input_path.exists():
|
|
199
|
+
raise FileNotFoundError(f"Input file not found: {input_path}")
|
|
200
|
+
|
|
201
|
+
# Load using oscura loaders
|
|
202
|
+
from oscura.loaders import load
|
|
203
|
+
|
|
204
|
+
data = load(input_path)
|
|
205
|
+
|
|
206
|
+
# Phase 2: Detect input type
|
|
207
|
+
input_type = self.detect_input_type(input_path, data)
|
|
208
|
+
|
|
209
|
+
if progress_callback:
|
|
210
|
+
progress_callback(
|
|
211
|
+
ProgressInfo(
|
|
212
|
+
phase="detecting",
|
|
213
|
+
domain=None,
|
|
214
|
+
function=None,
|
|
215
|
+
percent=5.0,
|
|
216
|
+
message=f"Detected input type: {input_type.value}",
|
|
217
|
+
elapsed_seconds=time.time() - self._start_time,
|
|
218
|
+
estimated_remaining_seconds=None,
|
|
219
|
+
)
|
|
220
|
+
)
|
|
221
|
+
|
|
222
|
+
# Phase 3: Determine applicable domains
|
|
223
|
+
applicable_domains = get_available_analyses(input_type)
|
|
224
|
+
|
|
225
|
+
# Filter by configuration
|
|
226
|
+
enabled_domains = [d for d in applicable_domains if self.config.is_domain_enabled(d)]
|
|
227
|
+
|
|
228
|
+
if progress_callback:
|
|
229
|
+
progress_callback(
|
|
230
|
+
ProgressInfo(
|
|
231
|
+
phase="planning",
|
|
232
|
+
domain=None,
|
|
233
|
+
function=None,
|
|
234
|
+
percent=10.0,
|
|
235
|
+
message=f"Planning analysis across {len(enabled_domains)} domains",
|
|
236
|
+
elapsed_seconds=time.time() - self._start_time,
|
|
237
|
+
estimated_remaining_seconds=None,
|
|
238
|
+
)
|
|
239
|
+
)
|
|
240
|
+
|
|
241
|
+
# Phase 4: Execute analyses
|
|
242
|
+
results: dict[AnalysisDomain, dict[str, Any]] = {}
|
|
243
|
+
errors: list[AnalysisError] = []
|
|
244
|
+
|
|
245
|
+
total_domains = len(enabled_domains)
|
|
246
|
+
|
|
247
|
+
# Execute domains in parallel if enabled and multiple domains exist
|
|
248
|
+
if self.config.parallel_domains and len(enabled_domains) > 1:
|
|
249
|
+
import concurrent.futures
|
|
250
|
+
|
|
251
|
+
# Use ThreadPoolExecutor with bounded workers from config
|
|
252
|
+
max_workers = min(self.config.max_parallel_workers, len(enabled_domains))
|
|
253
|
+
|
|
254
|
+
with concurrent.futures.ThreadPoolExecutor(max_workers=max_workers) as executor:
|
|
255
|
+
# Submit all domain executions
|
|
256
|
+
futures = {
|
|
257
|
+
executor.submit(self._execute_domain, domain, data): domain
|
|
258
|
+
for domain in enabled_domains
|
|
259
|
+
}
|
|
260
|
+
|
|
261
|
+
# Process results as they complete
|
|
262
|
+
for completed, future in enumerate(concurrent.futures.as_completed(futures), 1):
|
|
263
|
+
domain = futures[future]
|
|
264
|
+
domain_percent = 10.0 + (completed / total_domains) * 80.0
|
|
265
|
+
|
|
266
|
+
if progress_callback:
|
|
267
|
+
progress_callback(
|
|
268
|
+
ProgressInfo(
|
|
269
|
+
phase="analyzing",
|
|
270
|
+
domain=domain,
|
|
271
|
+
function=None,
|
|
272
|
+
percent=domain_percent,
|
|
273
|
+
message=f"Completed domain: {domain.value}",
|
|
274
|
+
elapsed_seconds=time.time() - self._start_time,
|
|
275
|
+
estimated_remaining_seconds=None,
|
|
276
|
+
)
|
|
277
|
+
)
|
|
278
|
+
|
|
279
|
+
try:
|
|
280
|
+
# Retrieve result with timeout
|
|
281
|
+
timeout_seconds = self.config.timeout_per_analysis or 30.0
|
|
282
|
+
domain_results, domain_errors = future.result(timeout=timeout_seconds * 10)
|
|
283
|
+
if domain_results:
|
|
284
|
+
results[domain] = domain_results
|
|
285
|
+
errors.extend(domain_errors)
|
|
286
|
+
except concurrent.futures.TimeoutError:
|
|
287
|
+
logger.error(f"Domain {domain.value} exceeded timeout")
|
|
288
|
+
errors.append(
|
|
289
|
+
AnalysisError(
|
|
290
|
+
domain=domain,
|
|
291
|
+
function=f"{domain.value}.*",
|
|
292
|
+
error_type="TimeoutError",
|
|
293
|
+
error_message="Domain execution exceeded timeout",
|
|
294
|
+
traceback=None,
|
|
295
|
+
duration_ms=timeout_seconds * 10 * 1000,
|
|
296
|
+
)
|
|
297
|
+
)
|
|
298
|
+
except Exception as e:
|
|
299
|
+
logger.error(f"Domain {domain.value} failed: {e}")
|
|
300
|
+
errors.append(
|
|
301
|
+
AnalysisError(
|
|
302
|
+
domain=domain,
|
|
303
|
+
function=f"{domain.value}.*",
|
|
304
|
+
error_type=type(e).__name__,
|
|
305
|
+
error_message=str(e),
|
|
306
|
+
traceback=traceback.format_exc(),
|
|
307
|
+
duration_ms=0.0,
|
|
308
|
+
)
|
|
309
|
+
)
|
|
310
|
+
else:
|
|
311
|
+
# Sequential fallback (existing code)
|
|
312
|
+
for idx, domain in enumerate(enabled_domains):
|
|
313
|
+
domain_percent = 10.0 + (idx / total_domains) * 80.0
|
|
314
|
+
|
|
315
|
+
if progress_callback:
|
|
316
|
+
progress_callback(
|
|
317
|
+
ProgressInfo(
|
|
318
|
+
phase="analyzing",
|
|
319
|
+
domain=domain,
|
|
320
|
+
function=None,
|
|
321
|
+
percent=domain_percent,
|
|
322
|
+
message=f"Analyzing domain: {domain.value}",
|
|
323
|
+
elapsed_seconds=time.time() - self._start_time,
|
|
324
|
+
estimated_remaining_seconds=None,
|
|
325
|
+
)
|
|
326
|
+
)
|
|
327
|
+
|
|
328
|
+
domain_results, domain_errors = self._execute_domain(domain, data)
|
|
329
|
+
if domain_results:
|
|
330
|
+
results[domain] = domain_results
|
|
331
|
+
errors.extend(domain_errors)
|
|
332
|
+
|
|
333
|
+
# Phase 5: Complete
|
|
334
|
+
total_duration = time.time() - self._start_time
|
|
335
|
+
|
|
336
|
+
if progress_callback:
|
|
337
|
+
progress_callback(
|
|
338
|
+
ProgressInfo(
|
|
339
|
+
phase="complete",
|
|
340
|
+
domain=None,
|
|
341
|
+
function=None,
|
|
342
|
+
percent=100.0,
|
|
343
|
+
message="Analysis complete",
|
|
344
|
+
elapsed_seconds=total_duration,
|
|
345
|
+
estimated_remaining_seconds=0.0,
|
|
346
|
+
)
|
|
347
|
+
)
|
|
348
|
+
|
|
349
|
+
# Calculate statistics
|
|
350
|
+
total_analyses = sum(len(dr) for dr in results.values())
|
|
351
|
+
successful_analyses = sum(
|
|
352
|
+
1 for dr in results.values() for v in dr.values() if not isinstance(v, Exception)
|
|
353
|
+
)
|
|
354
|
+
failed_analyses = len(errors)
|
|
355
|
+
|
|
356
|
+
stats = {
|
|
357
|
+
"input_type": input_type.value,
|
|
358
|
+
"total_domains": len(enabled_domains),
|
|
359
|
+
"total_analyses": total_analyses,
|
|
360
|
+
"successful_analyses": successful_analyses,
|
|
361
|
+
"failed_analyses": failed_analyses,
|
|
362
|
+
"success_rate": (successful_analyses / total_analyses * 100.0)
|
|
363
|
+
if total_analyses > 0
|
|
364
|
+
else 0.0,
|
|
365
|
+
"duration_seconds": total_duration,
|
|
366
|
+
}
|
|
367
|
+
|
|
368
|
+
return {
|
|
369
|
+
"results": results,
|
|
370
|
+
"errors": errors,
|
|
371
|
+
"stats": stats,
|
|
372
|
+
}
|
|
373
|
+
|
|
374
|
+
def _execute_domain(
|
|
375
|
+
self, domain: AnalysisDomain, data: Any
|
|
376
|
+
) -> tuple[dict[str, Any], list[AnalysisError]]:
|
|
377
|
+
"""Execute all analyses for a specific domain.
|
|
378
|
+
|
|
379
|
+
Args:
|
|
380
|
+
domain: Analysis domain to execute.
|
|
381
|
+
data: Input data object.
|
|
382
|
+
|
|
383
|
+
Returns:
|
|
384
|
+
Tuple of (results_dict, errors_list).
|
|
385
|
+
"""
|
|
386
|
+
results: dict[str, Any] = {}
|
|
387
|
+
errors: list[AnalysisError] = []
|
|
388
|
+
|
|
389
|
+
# Preprocess data for specific domains
|
|
390
|
+
data = self._preprocess_for_domain(domain, data)
|
|
391
|
+
|
|
392
|
+
# Get domain capabilities
|
|
393
|
+
cap = ANALYSIS_CAPABILITIES.get(domain, {})
|
|
394
|
+
module_names = cap.get("modules", [])
|
|
395
|
+
|
|
396
|
+
# Fallback to old single-module format
|
|
397
|
+
if not module_names:
|
|
398
|
+
single_module = cap.get("module", "")
|
|
399
|
+
if single_module:
|
|
400
|
+
module_names = [single_module]
|
|
401
|
+
|
|
402
|
+
if not module_names:
|
|
403
|
+
logger.debug(f"No modules configured for domain {domain.value}")
|
|
404
|
+
return results, errors
|
|
405
|
+
|
|
406
|
+
# Get domain-specific config
|
|
407
|
+
domain_config = self.config.get_domain_config(domain)
|
|
408
|
+
timeout = domain_config.timeout or self.config.timeout_per_analysis
|
|
409
|
+
|
|
410
|
+
# Track executed functions to prevent duplicates
|
|
411
|
+
executed_functions: set[str] = set()
|
|
412
|
+
|
|
413
|
+
# Iterate through all modules for this domain
|
|
414
|
+
for module_name in module_names:
|
|
415
|
+
try:
|
|
416
|
+
module = importlib.import_module(module_name)
|
|
417
|
+
except ImportError as e:
|
|
418
|
+
logger.warning(f"Failed to import module {module_name}: {e}")
|
|
419
|
+
if not self.config.continue_on_error:
|
|
420
|
+
errors.append(
|
|
421
|
+
AnalysisError(
|
|
422
|
+
domain=domain,
|
|
423
|
+
function=module_name,
|
|
424
|
+
error_type="ImportError",
|
|
425
|
+
error_message=str(e),
|
|
426
|
+
traceback=traceback.format_exc(),
|
|
427
|
+
duration_ms=0.0,
|
|
428
|
+
)
|
|
429
|
+
)
|
|
430
|
+
continue
|
|
431
|
+
|
|
432
|
+
# Discover public functions in the module
|
|
433
|
+
for func_name, func_obj in inspect.getmembers(module):
|
|
434
|
+
# Skip private functions and non-functions
|
|
435
|
+
if func_name.startswith("_") or not inspect.isfunction(func_obj):
|
|
436
|
+
continue
|
|
437
|
+
|
|
438
|
+
# Skip functions not defined in this module (imported from elsewhere)
|
|
439
|
+
if func_obj.__module__ != module_name:
|
|
440
|
+
continue
|
|
441
|
+
|
|
442
|
+
# Skip if already executed (prevent duplicates)
|
|
443
|
+
func_path = f"{module_name}.{func_name}"
|
|
444
|
+
if func_path in executed_functions:
|
|
445
|
+
logger.debug(f"Skipping duplicate function: {func_path}")
|
|
446
|
+
continue
|
|
447
|
+
executed_functions.add(func_path)
|
|
448
|
+
|
|
449
|
+
# Execute the function
|
|
450
|
+
try:
|
|
451
|
+
result = self._execute_function(module_name, func_name, data, timeout)
|
|
452
|
+
results[f"{module_name}.{func_name}"] = result
|
|
453
|
+
except Exception as e:
|
|
454
|
+
error = AnalysisError(
|
|
455
|
+
domain=domain,
|
|
456
|
+
function=f"{module_name}.{func_name}",
|
|
457
|
+
error_type=type(e).__name__,
|
|
458
|
+
error_message=str(e),
|
|
459
|
+
traceback=traceback.format_exc(),
|
|
460
|
+
duration_ms=0.0,
|
|
461
|
+
)
|
|
462
|
+
errors.append(error)
|
|
463
|
+
|
|
464
|
+
if not self.config.continue_on_error:
|
|
465
|
+
# Stop execution for this domain
|
|
466
|
+
return results, errors
|
|
467
|
+
|
|
468
|
+
return results, errors
|
|
469
|
+
|
|
470
|
+
def _preprocess_for_domain(self, domain: AnalysisDomain, data: Any) -> Any:
|
|
471
|
+
"""Preprocess data for domain-specific requirements.
|
|
472
|
+
|
|
473
|
+
Some domains require specialized data structures. This method
|
|
474
|
+
converts raw data into the appropriate format.
|
|
475
|
+
|
|
476
|
+
Args:
|
|
477
|
+
domain: Target analysis domain.
|
|
478
|
+
data: Input data object.
|
|
479
|
+
|
|
480
|
+
Returns:
|
|
481
|
+
Preprocessed data suitable for the domain.
|
|
482
|
+
"""
|
|
483
|
+
if domain == AnalysisDomain.EYE:
|
|
484
|
+
# EYE domain requires an EyeDiagram object
|
|
485
|
+
# Try to generate one from waveform data
|
|
486
|
+
return self._preprocess_for_eye_domain(data)
|
|
487
|
+
|
|
488
|
+
return data
|
|
489
|
+
|
|
490
|
+
def _get_effective_sample_rate(self, data: Any, context: str = "general") -> float:
|
|
491
|
+
"""Get effective sample rate from data metadata or config defaults.
|
|
492
|
+
|
|
493
|
+
Priority order:
|
|
494
|
+
1. Data metadata (e.g., WaveformTrace.metadata.sample_rate)
|
|
495
|
+
2. AnalysisConfig.default_sample_rate
|
|
496
|
+
3. Context-appropriate default constant
|
|
497
|
+
|
|
498
|
+
Args:
|
|
499
|
+
data: Input data object (may have .metadata.sample_rate).
|
|
500
|
+
context: Analysis context for selecting appropriate default.
|
|
501
|
+
Options: "general" (1 MHz), "highspeed" (1 GHz), "binary" (1 Hz).
|
|
502
|
+
|
|
503
|
+
Returns:
|
|
504
|
+
Effective sample rate in Hz.
|
|
505
|
+
|
|
506
|
+
Note:
|
|
507
|
+
This method logs a debug message when falling back to defaults,
|
|
508
|
+
as sample rate should ideally be provided in the data metadata
|
|
509
|
+
for accurate time-domain analysis.
|
|
510
|
+
"""
|
|
511
|
+
# Try to extract from data metadata
|
|
512
|
+
data_sample_rate = None
|
|
513
|
+
if hasattr(data, "metadata") and hasattr(data.metadata, "sample_rate"):
|
|
514
|
+
data_sample_rate = data.metadata.sample_rate
|
|
515
|
+
if data_sample_rate is not None and data_sample_rate > 0:
|
|
516
|
+
return float(data_sample_rate)
|
|
517
|
+
|
|
518
|
+
# Use config's get_effective_sample_rate method
|
|
519
|
+
effective_rate = self.config.get_effective_sample_rate(
|
|
520
|
+
data_sample_rate=data_sample_rate,
|
|
521
|
+
context=context,
|
|
522
|
+
)
|
|
523
|
+
|
|
524
|
+
# Log when using defaults (indicates missing metadata)
|
|
525
|
+
logger.debug(
|
|
526
|
+
f"Using default sample rate {effective_rate:.2e} Hz (context: {context}). "
|
|
527
|
+
f"For accurate analysis, provide sample_rate in data metadata."
|
|
528
|
+
)
|
|
529
|
+
|
|
530
|
+
return effective_rate
|
|
531
|
+
|
|
532
|
+
def _preprocess_for_eye_domain(self, data: Any) -> Any:
|
|
533
|
+
"""Preprocess data for eye diagram analysis.
|
|
534
|
+
|
|
535
|
+
Attempts to generate an EyeDiagram from waveform data using
|
|
536
|
+
automatic unit interval detection via FFT-based period detection
|
|
537
|
+
with fallback to zero-crossing analysis.
|
|
538
|
+
|
|
539
|
+
Args:
|
|
540
|
+
data: Input waveform data.
|
|
541
|
+
|
|
542
|
+
Returns:
|
|
543
|
+
EyeDiagram object if successful, original data otherwise.
|
|
544
|
+
"""
|
|
545
|
+
# Check if already an EyeDiagram
|
|
546
|
+
if hasattr(data, "samples_per_ui") and hasattr(data, "time_axis"):
|
|
547
|
+
return data
|
|
548
|
+
|
|
549
|
+
# Try to extract waveform data
|
|
550
|
+
if hasattr(data, "data") and hasattr(data, "metadata"):
|
|
551
|
+
# WaveformTrace
|
|
552
|
+
raw_data = data.data
|
|
553
|
+
sample_rate = getattr(data.metadata, "sample_rate", None)
|
|
554
|
+
elif isinstance(data, np.ndarray):
|
|
555
|
+
raw_data = data
|
|
556
|
+
sample_rate = None
|
|
557
|
+
else:
|
|
558
|
+
# Can't preprocess, return as-is
|
|
559
|
+
return data
|
|
560
|
+
|
|
561
|
+
if raw_data is None or len(raw_data) == 0:
|
|
562
|
+
return data
|
|
563
|
+
|
|
564
|
+
try:
|
|
565
|
+
from oscura.analyzers.eye.diagram import generate_eye
|
|
566
|
+
from oscura.core.types import TraceMetadata, WaveformTrace
|
|
567
|
+
|
|
568
|
+
# Get effective sample rate using config-aware method
|
|
569
|
+
# Use "highspeed" context for eye diagram (typically high-speed serial)
|
|
570
|
+
if sample_rate is None or sample_rate <= 0:
|
|
571
|
+
sample_rate = self._get_effective_sample_rate(data, context="highspeed")
|
|
572
|
+
|
|
573
|
+
# Estimate unit interval using FFT-based period detection
|
|
574
|
+
unit_interval = self._detect_unit_interval_fft(raw_data, sample_rate)
|
|
575
|
+
|
|
576
|
+
# If FFT detection fails, try zero-crossing analysis
|
|
577
|
+
if unit_interval is None:
|
|
578
|
+
unit_interval = self._detect_unit_interval_zero_crossing(raw_data, sample_rate)
|
|
579
|
+
|
|
580
|
+
# If both methods fail, use default fallback
|
|
581
|
+
if unit_interval is None:
|
|
582
|
+
# Fallback: assume 100 UI in the data
|
|
583
|
+
unit_interval = len(raw_data) / sample_rate / 100
|
|
584
|
+
logger.debug("Using default unit interval fallback (100 UI in data)")
|
|
585
|
+
|
|
586
|
+
# Ensure unit interval is reasonable
|
|
587
|
+
min_ui = 10 / sample_rate # At least 10 samples per UI
|
|
588
|
+
max_ui = len(raw_data) / sample_rate / 10 # At least 10 UI in data
|
|
589
|
+
unit_interval = np.clip(unit_interval, min_ui, max_ui)
|
|
590
|
+
|
|
591
|
+
# Create a WaveformTrace if we only have raw data
|
|
592
|
+
if not hasattr(data, "data"):
|
|
593
|
+
metadata = TraceMetadata(sample_rate=sample_rate)
|
|
594
|
+
trace = WaveformTrace(data=raw_data.astype(np.float64), metadata=metadata)
|
|
595
|
+
else:
|
|
596
|
+
trace = data
|
|
597
|
+
|
|
598
|
+
# Generate eye diagram
|
|
599
|
+
eye_diagram = generate_eye(
|
|
600
|
+
trace=trace,
|
|
601
|
+
unit_interval=unit_interval,
|
|
602
|
+
n_ui=2,
|
|
603
|
+
generate_histogram=True,
|
|
604
|
+
)
|
|
605
|
+
|
|
606
|
+
logger.debug(
|
|
607
|
+
f"Generated eye diagram: {eye_diagram.n_traces} traces, "
|
|
608
|
+
f"{eye_diagram.samples_per_ui} samples/UI"
|
|
609
|
+
)
|
|
610
|
+
return eye_diagram
|
|
611
|
+
|
|
612
|
+
except Exception as e:
|
|
613
|
+
logger.debug(f"Could not generate eye diagram: {e}")
|
|
614
|
+
# Return original data if preprocessing fails
|
|
615
|
+
return data
|
|
616
|
+
|
|
617
|
+
def _detect_unit_interval_fft(
|
|
618
|
+
self, raw_data: np.ndarray[Any, Any], sample_rate: float
|
|
619
|
+
) -> float | None:
|
|
620
|
+
"""Detect unit interval using FFT-based period detection.
|
|
621
|
+
|
|
622
|
+
Computes the FFT of the waveform, finds the dominant frequency
|
|
623
|
+
(excluding DC), and calculates the unit interval for NRZ data.
|
|
624
|
+
|
|
625
|
+
Args:
|
|
626
|
+
raw_data: Input waveform samples.
|
|
627
|
+
sample_rate: Sample rate in Hz.
|
|
628
|
+
|
|
629
|
+
Returns:
|
|
630
|
+
Estimated unit interval in seconds, or None if detection fails.
|
|
631
|
+
"""
|
|
632
|
+
try:
|
|
633
|
+
# Remove DC component
|
|
634
|
+
data_ac = raw_data - np.mean(raw_data)
|
|
635
|
+
|
|
636
|
+
# Compute FFT
|
|
637
|
+
fft_result = np.fft.rfft(data_ac)
|
|
638
|
+
fft_freqs = np.fft.rfftfreq(len(data_ac), d=1.0 / sample_rate)
|
|
639
|
+
|
|
640
|
+
# Get magnitude spectrum (exclude DC bin at index 0)
|
|
641
|
+
magnitude = np.abs(fft_result[1:])
|
|
642
|
+
freqs = fft_freqs[1:]
|
|
643
|
+
|
|
644
|
+
if len(magnitude) == 0:
|
|
645
|
+
return None
|
|
646
|
+
|
|
647
|
+
# Find dominant frequency (peak in magnitude spectrum)
|
|
648
|
+
peak_idx = np.argmax(magnitude)
|
|
649
|
+
dominant_freq = freqs[peak_idx]
|
|
650
|
+
|
|
651
|
+
# For NRZ data, unit interval = 1 / (2 * dominant_freq)
|
|
652
|
+
# For periodic signals like sine waves, unit interval = 1 / dominant_freq
|
|
653
|
+
# We'll use the period as the unit interval for general signals
|
|
654
|
+
if dominant_freq > 0:
|
|
655
|
+
unit_interval = float(1.0 / dominant_freq)
|
|
656
|
+
|
|
657
|
+
# Sanity check: dominant frequency should be reasonable
|
|
658
|
+
min_freq = sample_rate / len(raw_data) # At least one full cycle
|
|
659
|
+
max_freq = sample_rate / 20 # At least 20 samples per cycle
|
|
660
|
+
|
|
661
|
+
if min_freq <= dominant_freq <= max_freq:
|
|
662
|
+
logger.debug(
|
|
663
|
+
f"FFT detected dominant frequency: {dominant_freq:.2f} Hz, "
|
|
664
|
+
f"unit interval: {unit_interval * 1e6:.3f} us"
|
|
665
|
+
)
|
|
666
|
+
return unit_interval
|
|
667
|
+
else:
|
|
668
|
+
logger.debug(
|
|
669
|
+
f"FFT dominant frequency {dominant_freq:.2f} Hz out of range "
|
|
670
|
+
f"[{min_freq:.2f}, {max_freq:.2f}] Hz"
|
|
671
|
+
)
|
|
672
|
+
return None
|
|
673
|
+
|
|
674
|
+
return None
|
|
675
|
+
|
|
676
|
+
except Exception as e:
|
|
677
|
+
logger.debug(f"FFT-based unit interval detection failed: {e}")
|
|
678
|
+
return None
|
|
679
|
+
|
|
680
|
+
def _detect_unit_interval_zero_crossing(
|
|
681
|
+
self, raw_data: np.ndarray[Any, Any], sample_rate: float
|
|
682
|
+
) -> float | None:
|
|
683
|
+
"""Detect unit interval using zero-crossing analysis.
|
|
684
|
+
|
|
685
|
+
Estimates the signal period from the average interval between
|
|
686
|
+
zero crossings.
|
|
687
|
+
|
|
688
|
+
Args:
|
|
689
|
+
raw_data: Input waveform samples.
|
|
690
|
+
sample_rate: Sample rate in Hz.
|
|
691
|
+
|
|
692
|
+
Returns:
|
|
693
|
+
Estimated unit interval in seconds, or None if detection fails.
|
|
694
|
+
"""
|
|
695
|
+
try:
|
|
696
|
+
# Find zero crossings
|
|
697
|
+
zero_crossings = np.where(np.diff(np.sign(raw_data - np.mean(raw_data))))[0]
|
|
698
|
+
|
|
699
|
+
if len(zero_crossings) > 10:
|
|
700
|
+
# Estimate period from average crossing interval
|
|
701
|
+
avg_half_period = float(np.mean(np.diff(zero_crossings))) / sample_rate
|
|
702
|
+
unit_interval = avg_half_period * 2 # Full period
|
|
703
|
+
|
|
704
|
+
logger.debug(
|
|
705
|
+
f"Zero-crossing detected unit interval: {unit_interval * 1e6:.3f} us "
|
|
706
|
+
f"({len(zero_crossings)} crossings)"
|
|
707
|
+
)
|
|
708
|
+
return unit_interval
|
|
709
|
+
else:
|
|
710
|
+
logger.debug(f"Insufficient zero crossings ({len(zero_crossings)}) for detection")
|
|
711
|
+
return None
|
|
712
|
+
|
|
713
|
+
except Exception as e:
|
|
714
|
+
logger.debug(f"Zero-crossing unit interval detection failed: {e}")
|
|
715
|
+
return None
|
|
716
|
+
|
|
717
|
+
def _execute_function(
|
|
718
|
+
self, module_name: str, func_name: str, data: Any, timeout: float | None
|
|
719
|
+
) -> Any:
|
|
720
|
+
"""Execute a single analysis function with quality scoring.
|
|
721
|
+
|
|
722
|
+
Args:
|
|
723
|
+
module_name: Name of the module containing the function.
|
|
724
|
+
func_name: Name of the function to execute.
|
|
725
|
+
data: Input data object.
|
|
726
|
+
timeout: Timeout in seconds (None for no timeout).
|
|
727
|
+
|
|
728
|
+
Returns:
|
|
729
|
+
Analysis result with optional quality score attached.
|
|
730
|
+
|
|
731
|
+
Raises:
|
|
732
|
+
ValueError: If function is non-inferrable or invalid.
|
|
733
|
+
"""
|
|
734
|
+
# Check if function is in non-inferrable skip list
|
|
735
|
+
func_path = f"{module_name}.{func_name}"
|
|
736
|
+
if func_path in NON_INFERRABLE_FUNCTIONS:
|
|
737
|
+
logger.debug(f"Skipping non-inferrable function: {func_path}")
|
|
738
|
+
raise ValueError(
|
|
739
|
+
f"Function {func_path} requires context-specific parameters that cannot be auto-detected"
|
|
740
|
+
)
|
|
741
|
+
|
|
742
|
+
module = importlib.import_module(module_name)
|
|
743
|
+
func = getattr(module, func_name)
|
|
744
|
+
|
|
745
|
+
# Prepare function arguments using ArgumentPreparer
|
|
746
|
+
if self._arg_preparer is None:
|
|
747
|
+
raise RuntimeError("ArgumentPreparer not initialized - call run() first")
|
|
748
|
+
|
|
749
|
+
args, kwargs = self._arg_preparer.prepare_arguments(func, data)
|
|
750
|
+
|
|
751
|
+
if args is None:
|
|
752
|
+
# Function not applicable to this data type
|
|
753
|
+
raise ValueError(f"Function {func_name} not applicable to data type")
|
|
754
|
+
|
|
755
|
+
start_time = time.time()
|
|
756
|
+
|
|
757
|
+
# Execute with timeout if specified
|
|
758
|
+
if timeout is not None:
|
|
759
|
+
# Note: Python doesn't have built-in function timeout without threads/processes
|
|
760
|
+
# For simplicity, we'll just execute directly and check elapsed time afterward
|
|
761
|
+
# A production implementation would use threading.Timer or signal.alarm
|
|
762
|
+
result = func(*args, **kwargs)
|
|
763
|
+
|
|
764
|
+
elapsed = time.time() - start_time
|
|
765
|
+
if elapsed > timeout:
|
|
766
|
+
logger.warning(
|
|
767
|
+
f"Function {module_name}.{func_name} exceeded timeout "
|
|
768
|
+
f"({elapsed:.2f}s > {timeout:.2f}s)"
|
|
769
|
+
)
|
|
770
|
+
else:
|
|
771
|
+
result = func(*args, **kwargs)
|
|
772
|
+
|
|
773
|
+
# Consume generators to avoid serialization issues
|
|
774
|
+
if isinstance(result, types.GeneratorType):
|
|
775
|
+
try:
|
|
776
|
+
result = list(result)
|
|
777
|
+
logger.debug(f"Consumed generator from {module_name}.{func_name}")
|
|
778
|
+
except Exception as e:
|
|
779
|
+
logger.warning(f"Failed to consume generator from {module_name}.{func_name}: {e}")
|
|
780
|
+
result = f"<generator error: {type(e).__name__}>"
|
|
781
|
+
|
|
782
|
+
# Add quality scoring if enabled in config
|
|
783
|
+
if self.config.enable_quality_scoring:
|
|
784
|
+
result = self._add_quality_score(result, func_path, data)
|
|
785
|
+
|
|
786
|
+
return result
|
|
787
|
+
|
|
788
|
+
def _add_quality_score(self, result: Any, method_name: str, data: Any) -> Any:
|
|
789
|
+
"""Add quality score to analysis result.
|
|
790
|
+
|
|
791
|
+
Args:
|
|
792
|
+
result: Analysis result to score.
|
|
793
|
+
method_name: Name of the analysis method.
|
|
794
|
+
data: Input data object.
|
|
795
|
+
|
|
796
|
+
Returns:
|
|
797
|
+
Result with quality score attached (if applicable).
|
|
798
|
+
"""
|
|
799
|
+
try:
|
|
800
|
+
from oscura.quality import score_analysis_result
|
|
801
|
+
|
|
802
|
+
# Extract raw data array for quality assessment
|
|
803
|
+
if hasattr(data, "data"):
|
|
804
|
+
raw_data = data.data
|
|
805
|
+
elif isinstance(data, np.ndarray):
|
|
806
|
+
raw_data = data
|
|
807
|
+
else:
|
|
808
|
+
# Can't assess quality for non-array data
|
|
809
|
+
return result
|
|
810
|
+
|
|
811
|
+
# Score the result
|
|
812
|
+
quality_score = score_analysis_result(
|
|
813
|
+
result=result,
|
|
814
|
+
method_name=method_name,
|
|
815
|
+
data=raw_data,
|
|
816
|
+
)
|
|
817
|
+
|
|
818
|
+
# Attach quality score to result if it's a dict
|
|
819
|
+
if isinstance(result, dict):
|
|
820
|
+
result["_quality_score"] = quality_score.to_dict()
|
|
821
|
+
# For other types, wrap in dict
|
|
822
|
+
elif result is not None:
|
|
823
|
+
return {
|
|
824
|
+
"value": result,
|
|
825
|
+
"_quality_score": quality_score.to_dict(),
|
|
826
|
+
}
|
|
827
|
+
|
|
828
|
+
except Exception as e:
|
|
829
|
+
logger.debug(f"Failed to add quality score: {e}")
|
|
830
|
+
|
|
831
|
+
return result
|
|
832
|
+
|
|
833
|
+
|
|
834
|
+
__all__ = [
|
|
835
|
+
"AnalysisEngine",
|
|
836
|
+
]
|