oscura 0.0.1__py3-none-any.whl → 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- oscura/__init__.py +813 -8
- oscura/__main__.py +392 -0
- oscura/analyzers/__init__.py +37 -0
- oscura/analyzers/digital/__init__.py +177 -0
- oscura/analyzers/digital/bus.py +691 -0
- oscura/analyzers/digital/clock.py +805 -0
- oscura/analyzers/digital/correlation.py +720 -0
- oscura/analyzers/digital/edges.py +632 -0
- oscura/analyzers/digital/extraction.py +413 -0
- oscura/analyzers/digital/quality.py +878 -0
- oscura/analyzers/digital/signal_quality.py +877 -0
- oscura/analyzers/digital/thresholds.py +708 -0
- oscura/analyzers/digital/timing.py +1104 -0
- oscura/analyzers/eye/__init__.py +46 -0
- oscura/analyzers/eye/diagram.py +434 -0
- oscura/analyzers/eye/metrics.py +555 -0
- oscura/analyzers/jitter/__init__.py +83 -0
- oscura/analyzers/jitter/ber.py +333 -0
- oscura/analyzers/jitter/decomposition.py +759 -0
- oscura/analyzers/jitter/measurements.py +413 -0
- oscura/analyzers/jitter/spectrum.py +220 -0
- oscura/analyzers/measurements.py +40 -0
- oscura/analyzers/packet/__init__.py +171 -0
- oscura/analyzers/packet/daq.py +1077 -0
- oscura/analyzers/packet/metrics.py +437 -0
- oscura/analyzers/packet/parser.py +327 -0
- oscura/analyzers/packet/payload.py +2156 -0
- oscura/analyzers/packet/payload_analysis.py +1312 -0
- oscura/analyzers/packet/payload_extraction.py +236 -0
- oscura/analyzers/packet/payload_patterns.py +670 -0
- oscura/analyzers/packet/stream.py +359 -0
- oscura/analyzers/patterns/__init__.py +266 -0
- oscura/analyzers/patterns/clustering.py +1036 -0
- oscura/analyzers/patterns/discovery.py +539 -0
- oscura/analyzers/patterns/learning.py +797 -0
- oscura/analyzers/patterns/matching.py +1091 -0
- oscura/analyzers/patterns/periodic.py +650 -0
- oscura/analyzers/patterns/sequences.py +767 -0
- oscura/analyzers/power/__init__.py +116 -0
- oscura/analyzers/power/ac_power.py +391 -0
- oscura/analyzers/power/basic.py +383 -0
- oscura/analyzers/power/conduction.py +314 -0
- oscura/analyzers/power/efficiency.py +297 -0
- oscura/analyzers/power/ripple.py +356 -0
- oscura/analyzers/power/soa.py +372 -0
- oscura/analyzers/power/switching.py +479 -0
- oscura/analyzers/protocol/__init__.py +150 -0
- oscura/analyzers/protocols/__init__.py +150 -0
- oscura/analyzers/protocols/base.py +500 -0
- oscura/analyzers/protocols/can.py +620 -0
- oscura/analyzers/protocols/can_fd.py +448 -0
- oscura/analyzers/protocols/flexray.py +405 -0
- oscura/analyzers/protocols/hdlc.py +399 -0
- oscura/analyzers/protocols/i2c.py +368 -0
- oscura/analyzers/protocols/i2s.py +296 -0
- oscura/analyzers/protocols/jtag.py +393 -0
- oscura/analyzers/protocols/lin.py +445 -0
- oscura/analyzers/protocols/manchester.py +333 -0
- oscura/analyzers/protocols/onewire.py +501 -0
- oscura/analyzers/protocols/spi.py +334 -0
- oscura/analyzers/protocols/swd.py +325 -0
- oscura/analyzers/protocols/uart.py +393 -0
- oscura/analyzers/protocols/usb.py +495 -0
- oscura/analyzers/signal_integrity/__init__.py +63 -0
- oscura/analyzers/signal_integrity/embedding.py +294 -0
- oscura/analyzers/signal_integrity/equalization.py +370 -0
- oscura/analyzers/signal_integrity/sparams.py +484 -0
- oscura/analyzers/spectral/__init__.py +53 -0
- oscura/analyzers/spectral/chunked.py +273 -0
- oscura/analyzers/spectral/chunked_fft.py +571 -0
- oscura/analyzers/spectral/chunked_wavelet.py +391 -0
- oscura/analyzers/spectral/fft.py +92 -0
- oscura/analyzers/statistical/__init__.py +250 -0
- oscura/analyzers/statistical/checksum.py +923 -0
- oscura/analyzers/statistical/chunked_corr.py +228 -0
- oscura/analyzers/statistical/classification.py +778 -0
- oscura/analyzers/statistical/entropy.py +1113 -0
- oscura/analyzers/statistical/ngrams.py +614 -0
- oscura/analyzers/statistics/__init__.py +119 -0
- oscura/analyzers/statistics/advanced.py +885 -0
- oscura/analyzers/statistics/basic.py +263 -0
- oscura/analyzers/statistics/correlation.py +630 -0
- oscura/analyzers/statistics/distribution.py +298 -0
- oscura/analyzers/statistics/outliers.py +463 -0
- oscura/analyzers/statistics/streaming.py +93 -0
- oscura/analyzers/statistics/trend.py +520 -0
- oscura/analyzers/validation.py +598 -0
- oscura/analyzers/waveform/__init__.py +36 -0
- oscura/analyzers/waveform/measurements.py +943 -0
- oscura/analyzers/waveform/measurements_with_uncertainty.py +371 -0
- oscura/analyzers/waveform/spectral.py +1689 -0
- oscura/analyzers/waveform/wavelets.py +298 -0
- oscura/api/__init__.py +62 -0
- oscura/api/dsl.py +538 -0
- oscura/api/fluent.py +571 -0
- oscura/api/operators.py +498 -0
- oscura/api/optimization.py +392 -0
- oscura/api/profiling.py +396 -0
- oscura/automotive/__init__.py +73 -0
- oscura/automotive/can/__init__.py +52 -0
- oscura/automotive/can/analysis.py +356 -0
- oscura/automotive/can/checksum.py +250 -0
- oscura/automotive/can/correlation.py +212 -0
- oscura/automotive/can/discovery.py +355 -0
- oscura/automotive/can/message_wrapper.py +375 -0
- oscura/automotive/can/models.py +385 -0
- oscura/automotive/can/patterns.py +381 -0
- oscura/automotive/can/session.py +452 -0
- oscura/automotive/can/state_machine.py +300 -0
- oscura/automotive/can/stimulus_response.py +461 -0
- oscura/automotive/dbc/__init__.py +15 -0
- oscura/automotive/dbc/generator.py +156 -0
- oscura/automotive/dbc/parser.py +146 -0
- oscura/automotive/dtc/__init__.py +30 -0
- oscura/automotive/dtc/database.py +3036 -0
- oscura/automotive/j1939/__init__.py +14 -0
- oscura/automotive/j1939/decoder.py +745 -0
- oscura/automotive/loaders/__init__.py +35 -0
- oscura/automotive/loaders/asc.py +98 -0
- oscura/automotive/loaders/blf.py +77 -0
- oscura/automotive/loaders/csv_can.py +136 -0
- oscura/automotive/loaders/dispatcher.py +136 -0
- oscura/automotive/loaders/mdf.py +331 -0
- oscura/automotive/loaders/pcap.py +132 -0
- oscura/automotive/obd/__init__.py +14 -0
- oscura/automotive/obd/decoder.py +707 -0
- oscura/automotive/uds/__init__.py +48 -0
- oscura/automotive/uds/decoder.py +265 -0
- oscura/automotive/uds/models.py +64 -0
- oscura/automotive/visualization.py +369 -0
- oscura/batch/__init__.py +55 -0
- oscura/batch/advanced.py +627 -0
- oscura/batch/aggregate.py +300 -0
- oscura/batch/analyze.py +139 -0
- oscura/batch/logging.py +487 -0
- oscura/batch/metrics.py +556 -0
- oscura/builders/__init__.py +41 -0
- oscura/builders/signal_builder.py +1131 -0
- oscura/cli/__init__.py +14 -0
- oscura/cli/batch.py +339 -0
- oscura/cli/characterize.py +273 -0
- oscura/cli/compare.py +775 -0
- oscura/cli/decode.py +551 -0
- oscura/cli/main.py +247 -0
- oscura/cli/shell.py +350 -0
- oscura/comparison/__init__.py +66 -0
- oscura/comparison/compare.py +397 -0
- oscura/comparison/golden.py +487 -0
- oscura/comparison/limits.py +391 -0
- oscura/comparison/mask.py +434 -0
- oscura/comparison/trace_diff.py +30 -0
- oscura/comparison/visualization.py +481 -0
- oscura/compliance/__init__.py +70 -0
- oscura/compliance/advanced.py +756 -0
- oscura/compliance/masks.py +363 -0
- oscura/compliance/reporting.py +483 -0
- oscura/compliance/testing.py +298 -0
- oscura/component/__init__.py +38 -0
- oscura/component/impedance.py +365 -0
- oscura/component/reactive.py +598 -0
- oscura/component/transmission_line.py +312 -0
- oscura/config/__init__.py +191 -0
- oscura/config/defaults.py +254 -0
- oscura/config/loader.py +348 -0
- oscura/config/memory.py +271 -0
- oscura/config/migration.py +458 -0
- oscura/config/pipeline.py +1077 -0
- oscura/config/preferences.py +530 -0
- oscura/config/protocol.py +875 -0
- oscura/config/schema.py +713 -0
- oscura/config/settings.py +420 -0
- oscura/config/thresholds.py +599 -0
- oscura/convenience.py +457 -0
- oscura/core/__init__.py +299 -0
- oscura/core/audit.py +457 -0
- oscura/core/backend_selector.py +405 -0
- oscura/core/cache.py +590 -0
- oscura/core/cancellation.py +439 -0
- oscura/core/confidence.py +225 -0
- oscura/core/config.py +506 -0
- oscura/core/correlation.py +216 -0
- oscura/core/cross_domain.py +422 -0
- oscura/core/debug.py +301 -0
- oscura/core/edge_cases.py +541 -0
- oscura/core/exceptions.py +535 -0
- oscura/core/gpu_backend.py +523 -0
- oscura/core/lazy.py +832 -0
- oscura/core/log_query.py +540 -0
- oscura/core/logging.py +931 -0
- oscura/core/logging_advanced.py +952 -0
- oscura/core/memoize.py +171 -0
- oscura/core/memory_check.py +274 -0
- oscura/core/memory_guard.py +290 -0
- oscura/core/memory_limits.py +336 -0
- oscura/core/memory_monitor.py +453 -0
- oscura/core/memory_progress.py +465 -0
- oscura/core/memory_warnings.py +315 -0
- oscura/core/numba_backend.py +362 -0
- oscura/core/performance.py +352 -0
- oscura/core/progress.py +524 -0
- oscura/core/provenance.py +358 -0
- oscura/core/results.py +331 -0
- oscura/core/types.py +504 -0
- oscura/core/uncertainty.py +383 -0
- oscura/discovery/__init__.py +52 -0
- oscura/discovery/anomaly_detector.py +672 -0
- oscura/discovery/auto_decoder.py +415 -0
- oscura/discovery/comparison.py +497 -0
- oscura/discovery/quality_validator.py +528 -0
- oscura/discovery/signal_detector.py +769 -0
- oscura/dsl/__init__.py +73 -0
- oscura/dsl/commands.py +246 -0
- oscura/dsl/interpreter.py +455 -0
- oscura/dsl/parser.py +689 -0
- oscura/dsl/repl.py +172 -0
- oscura/exceptions.py +59 -0
- oscura/exploratory/__init__.py +111 -0
- oscura/exploratory/error_recovery.py +642 -0
- oscura/exploratory/fuzzy.py +513 -0
- oscura/exploratory/fuzzy_advanced.py +786 -0
- oscura/exploratory/legacy.py +831 -0
- oscura/exploratory/parse.py +358 -0
- oscura/exploratory/recovery.py +275 -0
- oscura/exploratory/sync.py +382 -0
- oscura/exploratory/unknown.py +707 -0
- oscura/export/__init__.py +25 -0
- oscura/export/wireshark/README.md +265 -0
- oscura/export/wireshark/__init__.py +47 -0
- oscura/export/wireshark/generator.py +312 -0
- oscura/export/wireshark/lua_builder.py +159 -0
- oscura/export/wireshark/templates/dissector.lua.j2 +92 -0
- oscura/export/wireshark/type_mapping.py +165 -0
- oscura/export/wireshark/validator.py +105 -0
- oscura/exporters/__init__.py +94 -0
- oscura/exporters/csv.py +303 -0
- oscura/exporters/exporters.py +44 -0
- oscura/exporters/hdf5.py +219 -0
- oscura/exporters/html_export.py +701 -0
- oscura/exporters/json_export.py +291 -0
- oscura/exporters/markdown_export.py +367 -0
- oscura/exporters/matlab_export.py +354 -0
- oscura/exporters/npz_export.py +219 -0
- oscura/exporters/spice_export.py +210 -0
- oscura/extensibility/__init__.py +131 -0
- oscura/extensibility/docs.py +752 -0
- oscura/extensibility/extensions.py +1125 -0
- oscura/extensibility/logging.py +259 -0
- oscura/extensibility/measurements.py +485 -0
- oscura/extensibility/plugins.py +414 -0
- oscura/extensibility/registry.py +346 -0
- oscura/extensibility/templates.py +913 -0
- oscura/extensibility/validation.py +651 -0
- oscura/filtering/__init__.py +89 -0
- oscura/filtering/base.py +563 -0
- oscura/filtering/convenience.py +564 -0
- oscura/filtering/design.py +725 -0
- oscura/filtering/filters.py +32 -0
- oscura/filtering/introspection.py +605 -0
- oscura/guidance/__init__.py +24 -0
- oscura/guidance/recommender.py +429 -0
- oscura/guidance/wizard.py +518 -0
- oscura/inference/__init__.py +251 -0
- oscura/inference/active_learning/README.md +153 -0
- oscura/inference/active_learning/__init__.py +38 -0
- oscura/inference/active_learning/lstar.py +257 -0
- oscura/inference/active_learning/observation_table.py +230 -0
- oscura/inference/active_learning/oracle.py +78 -0
- oscura/inference/active_learning/teachers/__init__.py +15 -0
- oscura/inference/active_learning/teachers/simulator.py +192 -0
- oscura/inference/adaptive_tuning.py +453 -0
- oscura/inference/alignment.py +653 -0
- oscura/inference/bayesian.py +943 -0
- oscura/inference/binary.py +1016 -0
- oscura/inference/crc_reverse.py +711 -0
- oscura/inference/logic.py +288 -0
- oscura/inference/message_format.py +1305 -0
- oscura/inference/protocol.py +417 -0
- oscura/inference/protocol_dsl.py +1084 -0
- oscura/inference/protocol_library.py +1230 -0
- oscura/inference/sequences.py +809 -0
- oscura/inference/signal_intelligence.py +1509 -0
- oscura/inference/spectral.py +215 -0
- oscura/inference/state_machine.py +634 -0
- oscura/inference/stream.py +918 -0
- oscura/integrations/__init__.py +59 -0
- oscura/integrations/llm.py +1827 -0
- oscura/jupyter/__init__.py +32 -0
- oscura/jupyter/display.py +268 -0
- oscura/jupyter/magic.py +334 -0
- oscura/loaders/__init__.py +526 -0
- oscura/loaders/binary.py +69 -0
- oscura/loaders/configurable.py +1255 -0
- oscura/loaders/csv.py +26 -0
- oscura/loaders/csv_loader.py +473 -0
- oscura/loaders/hdf5.py +9 -0
- oscura/loaders/hdf5_loader.py +510 -0
- oscura/loaders/lazy.py +370 -0
- oscura/loaders/mmap_loader.py +583 -0
- oscura/loaders/numpy_loader.py +436 -0
- oscura/loaders/pcap.py +432 -0
- oscura/loaders/preprocessing.py +368 -0
- oscura/loaders/rigol.py +287 -0
- oscura/loaders/sigrok.py +321 -0
- oscura/loaders/tdms.py +367 -0
- oscura/loaders/tektronix.py +711 -0
- oscura/loaders/validation.py +584 -0
- oscura/loaders/vcd.py +464 -0
- oscura/loaders/wav.py +233 -0
- oscura/math/__init__.py +45 -0
- oscura/math/arithmetic.py +824 -0
- oscura/math/interpolation.py +413 -0
- oscura/onboarding/__init__.py +39 -0
- oscura/onboarding/help.py +498 -0
- oscura/onboarding/tutorials.py +405 -0
- oscura/onboarding/wizard.py +466 -0
- oscura/optimization/__init__.py +19 -0
- oscura/optimization/parallel.py +440 -0
- oscura/optimization/search.py +532 -0
- oscura/pipeline/__init__.py +43 -0
- oscura/pipeline/base.py +338 -0
- oscura/pipeline/composition.py +242 -0
- oscura/pipeline/parallel.py +448 -0
- oscura/pipeline/pipeline.py +375 -0
- oscura/pipeline/reverse_engineering.py +1119 -0
- oscura/plugins/__init__.py +122 -0
- oscura/plugins/base.py +272 -0
- oscura/plugins/cli.py +497 -0
- oscura/plugins/discovery.py +411 -0
- oscura/plugins/isolation.py +418 -0
- oscura/plugins/lifecycle.py +959 -0
- oscura/plugins/manager.py +493 -0
- oscura/plugins/registry.py +421 -0
- oscura/plugins/versioning.py +372 -0
- oscura/py.typed +0 -0
- oscura/quality/__init__.py +65 -0
- oscura/quality/ensemble.py +740 -0
- oscura/quality/explainer.py +338 -0
- oscura/quality/scoring.py +616 -0
- oscura/quality/warnings.py +456 -0
- oscura/reporting/__init__.py +248 -0
- oscura/reporting/advanced.py +1234 -0
- oscura/reporting/analyze.py +448 -0
- oscura/reporting/argument_preparer.py +596 -0
- oscura/reporting/auto_report.py +507 -0
- oscura/reporting/batch.py +615 -0
- oscura/reporting/chart_selection.py +223 -0
- oscura/reporting/comparison.py +330 -0
- oscura/reporting/config.py +615 -0
- oscura/reporting/content/__init__.py +39 -0
- oscura/reporting/content/executive.py +127 -0
- oscura/reporting/content/filtering.py +191 -0
- oscura/reporting/content/minimal.py +257 -0
- oscura/reporting/content/verbosity.py +162 -0
- oscura/reporting/core.py +508 -0
- oscura/reporting/core_formats/__init__.py +17 -0
- oscura/reporting/core_formats/multi_format.py +210 -0
- oscura/reporting/engine.py +836 -0
- oscura/reporting/export.py +366 -0
- oscura/reporting/formatting/__init__.py +129 -0
- oscura/reporting/formatting/emphasis.py +81 -0
- oscura/reporting/formatting/numbers.py +403 -0
- oscura/reporting/formatting/standards.py +55 -0
- oscura/reporting/formatting.py +466 -0
- oscura/reporting/html.py +578 -0
- oscura/reporting/index.py +590 -0
- oscura/reporting/multichannel.py +296 -0
- oscura/reporting/output.py +379 -0
- oscura/reporting/pdf.py +373 -0
- oscura/reporting/plots.py +731 -0
- oscura/reporting/pptx_export.py +360 -0
- oscura/reporting/renderers/__init__.py +11 -0
- oscura/reporting/renderers/pdf.py +94 -0
- oscura/reporting/sections.py +471 -0
- oscura/reporting/standards.py +680 -0
- oscura/reporting/summary_generator.py +368 -0
- oscura/reporting/tables.py +397 -0
- oscura/reporting/template_system.py +724 -0
- oscura/reporting/templates/__init__.py +15 -0
- oscura/reporting/templates/definition.py +205 -0
- oscura/reporting/templates/index.html +649 -0
- oscura/reporting/templates/index.md +173 -0
- oscura/schemas/__init__.py +158 -0
- oscura/schemas/bus_configuration.json +322 -0
- oscura/schemas/device_mapping.json +182 -0
- oscura/schemas/packet_format.json +418 -0
- oscura/schemas/protocol_definition.json +363 -0
- oscura/search/__init__.py +16 -0
- oscura/search/anomaly.py +292 -0
- oscura/search/context.py +149 -0
- oscura/search/pattern.py +160 -0
- oscura/session/__init__.py +34 -0
- oscura/session/annotations.py +289 -0
- oscura/session/history.py +313 -0
- oscura/session/session.py +445 -0
- oscura/streaming/__init__.py +43 -0
- oscura/streaming/chunked.py +611 -0
- oscura/streaming/progressive.py +393 -0
- oscura/streaming/realtime.py +622 -0
- oscura/testing/__init__.py +54 -0
- oscura/testing/synthetic.py +808 -0
- oscura/triggering/__init__.py +68 -0
- oscura/triggering/base.py +229 -0
- oscura/triggering/edge.py +353 -0
- oscura/triggering/pattern.py +344 -0
- oscura/triggering/pulse.py +581 -0
- oscura/triggering/window.py +453 -0
- oscura/ui/__init__.py +48 -0
- oscura/ui/formatters.py +526 -0
- oscura/ui/progressive_display.py +340 -0
- oscura/utils/__init__.py +99 -0
- oscura/utils/autodetect.py +338 -0
- oscura/utils/buffer.py +389 -0
- oscura/utils/lazy.py +407 -0
- oscura/utils/lazy_imports.py +147 -0
- oscura/utils/memory.py +836 -0
- oscura/utils/memory_advanced.py +1326 -0
- oscura/utils/memory_extensions.py +465 -0
- oscura/utils/progressive.py +352 -0
- oscura/utils/windowing.py +362 -0
- oscura/visualization/__init__.py +321 -0
- oscura/visualization/accessibility.py +526 -0
- oscura/visualization/annotations.py +374 -0
- oscura/visualization/axis_scaling.py +305 -0
- oscura/visualization/colors.py +453 -0
- oscura/visualization/digital.py +337 -0
- oscura/visualization/eye.py +420 -0
- oscura/visualization/histogram.py +281 -0
- oscura/visualization/interactive.py +858 -0
- oscura/visualization/jitter.py +702 -0
- oscura/visualization/keyboard.py +394 -0
- oscura/visualization/layout.py +365 -0
- oscura/visualization/optimization.py +1028 -0
- oscura/visualization/palettes.py +446 -0
- oscura/visualization/plot.py +92 -0
- oscura/visualization/power.py +290 -0
- oscura/visualization/power_extended.py +626 -0
- oscura/visualization/presets.py +467 -0
- oscura/visualization/protocols.py +932 -0
- oscura/visualization/render.py +207 -0
- oscura/visualization/rendering.py +444 -0
- oscura/visualization/reverse_engineering.py +791 -0
- oscura/visualization/signal_integrity.py +808 -0
- oscura/visualization/specialized.py +553 -0
- oscura/visualization/spectral.py +811 -0
- oscura/visualization/styles.py +381 -0
- oscura/visualization/thumbnails.py +311 -0
- oscura/visualization/time_axis.py +351 -0
- oscura/visualization/waveform.py +367 -0
- oscura/workflow/__init__.py +13 -0
- oscura/workflow/dag.py +377 -0
- oscura/workflows/__init__.py +58 -0
- oscura/workflows/compliance.py +280 -0
- oscura/workflows/digital.py +272 -0
- oscura/workflows/multi_trace.py +502 -0
- oscura/workflows/power.py +178 -0
- oscura/workflows/protocol.py +492 -0
- oscura/workflows/reverse_engineering.py +639 -0
- oscura/workflows/signal_integrity.py +227 -0
- oscura-0.1.0.dist-info/METADATA +300 -0
- oscura-0.1.0.dist-info/RECORD +463 -0
- oscura-0.1.0.dist-info/entry_points.txt +2 -0
- {oscura-0.0.1.dist-info → oscura-0.1.0.dist-info}/licenses/LICENSE +1 -1
- oscura-0.0.1.dist-info/METADATA +0 -63
- oscura-0.0.1.dist-info/RECORD +0 -5
- {oscura-0.0.1.dist-info → oscura-0.1.0.dist-info}/WHEEL +0 -0
|
@@ -0,0 +1,642 @@
|
|
|
1
|
+
"""Error recovery and graceful degradation for signal analysis.
|
|
2
|
+
|
|
3
|
+
This module provides error recovery mechanisms for handling corrupted,
|
|
4
|
+
noisy, or incomplete signal data.
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
Example:
|
|
8
|
+
>>> from oscura.exploratory.error_recovery import recover_corrupted_data
|
|
9
|
+
>>> recovered, stats = recover_corrupted_data(trace)
|
|
10
|
+
>>> print(f"Recovered {stats.recovered_samples} samples")
|
|
11
|
+
"""
|
|
12
|
+
|
|
13
|
+
from __future__ import annotations
|
|
14
|
+
|
|
15
|
+
import logging
|
|
16
|
+
from dataclasses import dataclass
|
|
17
|
+
from typing import TYPE_CHECKING, Any, TypeVar
|
|
18
|
+
|
|
19
|
+
import numpy as np
|
|
20
|
+
|
|
21
|
+
from oscura.core.types import WaveformTrace
|
|
22
|
+
|
|
23
|
+
logger = logging.getLogger(__name__)
|
|
24
|
+
|
|
25
|
+
if TYPE_CHECKING:
|
|
26
|
+
from collections.abc import Callable
|
|
27
|
+
|
|
28
|
+
from numpy.typing import NDArray
|
|
29
|
+
|
|
30
|
+
T = TypeVar("T")
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
@dataclass
|
|
34
|
+
class RecoveryStats:
|
|
35
|
+
"""Statistics from data recovery.
|
|
36
|
+
|
|
37
|
+
Attributes:
|
|
38
|
+
total_samples: Total samples in original data.
|
|
39
|
+
corrupted_samples: Number of detected corrupted samples.
|
|
40
|
+
recovered_samples: Number of successfully recovered samples.
|
|
41
|
+
unrecoverable_samples: Number that could not be recovered.
|
|
42
|
+
recovery_method: Method used for recovery.
|
|
43
|
+
confidence: Confidence in recovered data.
|
|
44
|
+
"""
|
|
45
|
+
|
|
46
|
+
total_samples: int
|
|
47
|
+
corrupted_samples: int
|
|
48
|
+
recovered_samples: int
|
|
49
|
+
unrecoverable_samples: int
|
|
50
|
+
recovery_method: str
|
|
51
|
+
confidence: float
|
|
52
|
+
|
|
53
|
+
|
|
54
|
+
def recover_corrupted_data(
|
|
55
|
+
trace: WaveformTrace,
|
|
56
|
+
*,
|
|
57
|
+
corruption_threshold: float = 3.0,
|
|
58
|
+
recovery_method: str = "interpolate",
|
|
59
|
+
max_gap_samples: int = 100,
|
|
60
|
+
) -> tuple[WaveformTrace, RecoveryStats]:
|
|
61
|
+
"""Recover corrupted data.
|
|
62
|
+
|
|
63
|
+
Detects and attempts to recover corrupted samples using
|
|
64
|
+
interpolation or other techniques.
|
|
65
|
+
|
|
66
|
+
Args:
|
|
67
|
+
trace: Trace with potentially corrupted data.
|
|
68
|
+
corruption_threshold: Threshold for detecting corruption (in std devs).
|
|
69
|
+
recovery_method: 'interpolate', 'median', or 'zero'.
|
|
70
|
+
max_gap_samples: Maximum gap that can be recovered.
|
|
71
|
+
|
|
72
|
+
Returns:
|
|
73
|
+
Tuple of (recovered_trace, recovery_stats).
|
|
74
|
+
|
|
75
|
+
Example:
|
|
76
|
+
>>> recovered, stats = recover_corrupted_data(trace)
|
|
77
|
+
>>> print(f"Recovered {stats.recovered_samples} samples")
|
|
78
|
+
>>> print(f"Confidence: {stats.confidence:.1%}")
|
|
79
|
+
|
|
80
|
+
References:
|
|
81
|
+
ERROR-001: Error Recovery from Corrupted Data
|
|
82
|
+
"""
|
|
83
|
+
data = trace.data.copy()
|
|
84
|
+
n = len(data)
|
|
85
|
+
|
|
86
|
+
# Detect corrupted samples using statistical outlier detection
|
|
87
|
+
# Filter out nan/inf for initial statistics calculation
|
|
88
|
+
valid_mask = np.isfinite(data)
|
|
89
|
+
valid_data = data[valid_mask] if np.any(valid_mask) else data
|
|
90
|
+
|
|
91
|
+
median = np.median(valid_data) if len(valid_data) > 0 else 0.0
|
|
92
|
+
mad = np.median(np.abs(valid_data - median)) if len(valid_data) > 0 else 0.0
|
|
93
|
+
|
|
94
|
+
if mad < 1e-10:
|
|
95
|
+
mad = np.std(valid_data) if len(valid_data) > 0 else 1.0
|
|
96
|
+
|
|
97
|
+
# Z-score based on MAD
|
|
98
|
+
z_scores = np.abs(data - median) / (1.4826 * mad + 1e-10)
|
|
99
|
+
|
|
100
|
+
# Find corrupted samples
|
|
101
|
+
corrupted_mask = z_scores > corruption_threshold
|
|
102
|
+
|
|
103
|
+
# Also detect NaN and Inf
|
|
104
|
+
corrupted_mask |= np.isnan(data)
|
|
105
|
+
corrupted_mask |= np.isinf(data)
|
|
106
|
+
|
|
107
|
+
corrupted_indices = np.where(corrupted_mask)[0]
|
|
108
|
+
n_corrupted = len(corrupted_indices)
|
|
109
|
+
|
|
110
|
+
if n_corrupted == 0:
|
|
111
|
+
return trace, RecoveryStats(
|
|
112
|
+
total_samples=n,
|
|
113
|
+
corrupted_samples=0,
|
|
114
|
+
recovered_samples=0,
|
|
115
|
+
unrecoverable_samples=0,
|
|
116
|
+
recovery_method="none",
|
|
117
|
+
confidence=1.0,
|
|
118
|
+
)
|
|
119
|
+
|
|
120
|
+
# Group corrupted samples into contiguous regions
|
|
121
|
+
gaps = []
|
|
122
|
+
if len(corrupted_indices) > 0:
|
|
123
|
+
gap_start = corrupted_indices[0]
|
|
124
|
+
gap_end = corrupted_indices[0]
|
|
125
|
+
|
|
126
|
+
for idx in corrupted_indices[1:]:
|
|
127
|
+
if idx == gap_end + 1:
|
|
128
|
+
gap_end = idx
|
|
129
|
+
else:
|
|
130
|
+
gaps.append((gap_start, gap_end))
|
|
131
|
+
gap_start = idx
|
|
132
|
+
gap_end = idx
|
|
133
|
+
|
|
134
|
+
gaps.append((gap_start, gap_end))
|
|
135
|
+
|
|
136
|
+
# Attempt recovery
|
|
137
|
+
recovered = 0
|
|
138
|
+
unrecoverable = 0
|
|
139
|
+
|
|
140
|
+
for start, end in gaps:
|
|
141
|
+
gap_length = end - start + 1
|
|
142
|
+
|
|
143
|
+
if gap_length > max_gap_samples:
|
|
144
|
+
unrecoverable += gap_length
|
|
145
|
+
continue
|
|
146
|
+
|
|
147
|
+
if recovery_method == "interpolate":
|
|
148
|
+
# Linear interpolation from surrounding samples
|
|
149
|
+
left_idx = max(0, start - 1)
|
|
150
|
+
right_idx = min(n - 1, end + 1)
|
|
151
|
+
|
|
152
|
+
if left_idx < start and right_idx > end:
|
|
153
|
+
# Can interpolate
|
|
154
|
+
left_val = data[left_idx]
|
|
155
|
+
right_val = data[right_idx]
|
|
156
|
+
for i, idx in enumerate(range(start, end + 1)):
|
|
157
|
+
t = (i + 1) / (gap_length + 1)
|
|
158
|
+
data[idx] = left_val * (1 - t) + right_val * t
|
|
159
|
+
recovered += gap_length
|
|
160
|
+
else:
|
|
161
|
+
# Edge case - use nearest valid value
|
|
162
|
+
if left_idx >= start:
|
|
163
|
+
data[start : end + 1] = data[right_idx]
|
|
164
|
+
else:
|
|
165
|
+
data[start : end + 1] = data[left_idx]
|
|
166
|
+
recovered += gap_length
|
|
167
|
+
|
|
168
|
+
elif recovery_method == "median":
|
|
169
|
+
# Replace with local median
|
|
170
|
+
window_start = max(0, start - 50)
|
|
171
|
+
window_end = min(n, end + 50)
|
|
172
|
+
window_data = data[window_start:window_end]
|
|
173
|
+
valid_data = window_data[~corrupted_mask[window_start:window_end]]
|
|
174
|
+
|
|
175
|
+
if len(valid_data) > 0:
|
|
176
|
+
fill_value = np.median(valid_data)
|
|
177
|
+
data[start : end + 1] = fill_value
|
|
178
|
+
recovered += gap_length
|
|
179
|
+
else:
|
|
180
|
+
unrecoverable += gap_length
|
|
181
|
+
|
|
182
|
+
elif recovery_method == "zero":
|
|
183
|
+
# Replace with zero
|
|
184
|
+
data[start : end + 1] = 0
|
|
185
|
+
recovered += gap_length
|
|
186
|
+
|
|
187
|
+
else:
|
|
188
|
+
unrecoverable += gap_length
|
|
189
|
+
|
|
190
|
+
# Create recovered trace
|
|
191
|
+
recovered_trace = WaveformTrace(
|
|
192
|
+
data=data,
|
|
193
|
+
metadata=trace.metadata,
|
|
194
|
+
)
|
|
195
|
+
|
|
196
|
+
# Calculate confidence
|
|
197
|
+
recovery_ratio = recovered / max(n_corrupted, 1)
|
|
198
|
+
gap_sizes = [end - start + 1 for start, end in gaps]
|
|
199
|
+
avg_gap_size = np.mean(gap_sizes) if gap_sizes else 0
|
|
200
|
+
confidence = recovery_ratio * (1 - avg_gap_size / max_gap_samples)
|
|
201
|
+
|
|
202
|
+
return recovered_trace, RecoveryStats(
|
|
203
|
+
total_samples=n,
|
|
204
|
+
corrupted_samples=n_corrupted,
|
|
205
|
+
recovered_samples=recovered,
|
|
206
|
+
unrecoverable_samples=unrecoverable,
|
|
207
|
+
recovery_method=recovery_method,
|
|
208
|
+
confidence=max(0.0, min(1.0, confidence)),
|
|
209
|
+
)
|
|
210
|
+
|
|
211
|
+
|
|
212
|
+
@dataclass
|
|
213
|
+
class GracefulDegradationResult:
|
|
214
|
+
"""Result of gracefully degraded analysis.
|
|
215
|
+
|
|
216
|
+
Attributes:
|
|
217
|
+
result: Analysis result (may be partial).
|
|
218
|
+
quality_level: 'full', 'degraded', or 'minimal'.
|
|
219
|
+
available_features: Features that could be computed.
|
|
220
|
+
missing_features: Features that failed.
|
|
221
|
+
warnings: List of warnings about degradation.
|
|
222
|
+
"""
|
|
223
|
+
|
|
224
|
+
result: dict[str, Any]
|
|
225
|
+
quality_level: str
|
|
226
|
+
available_features: list[str]
|
|
227
|
+
missing_features: list[str]
|
|
228
|
+
warnings: list[str]
|
|
229
|
+
|
|
230
|
+
|
|
231
|
+
def graceful_degradation(
|
|
232
|
+
analysis_func: Callable[..., dict[str, Any]],
|
|
233
|
+
trace: WaveformTrace,
|
|
234
|
+
*,
|
|
235
|
+
required_features: list[str] | None = None,
|
|
236
|
+
optional_features: list[str] | None = None,
|
|
237
|
+
**kwargs: Any,
|
|
238
|
+
) -> GracefulDegradationResult:
|
|
239
|
+
"""Execute analysis with graceful degradation.
|
|
240
|
+
|
|
241
|
+
Attempts to provide partial results when full analysis fails.
|
|
242
|
+
|
|
243
|
+
Args:
|
|
244
|
+
analysis_func: Analysis function to call.
|
|
245
|
+
trace: Trace to analyze.
|
|
246
|
+
required_features: Features that must succeed.
|
|
247
|
+
optional_features: Features that can fail.
|
|
248
|
+
**kwargs: Additional arguments to analysis function.
|
|
249
|
+
|
|
250
|
+
Returns:
|
|
251
|
+
GracefulDegradationResult with partial or full results.
|
|
252
|
+
|
|
253
|
+
Example:
|
|
254
|
+
>>> result = graceful_degradation(analyze_signal, trace)
|
|
255
|
+
>>> print(f"Quality: {result.quality_level}")
|
|
256
|
+
>>> print(f"Available: {result.available_features}")
|
|
257
|
+
|
|
258
|
+
References:
|
|
259
|
+
ERROR-002: Graceful Degradation
|
|
260
|
+
"""
|
|
261
|
+
if required_features is None:
|
|
262
|
+
required_features = []
|
|
263
|
+
if optional_features is None:
|
|
264
|
+
optional_features = []
|
|
265
|
+
|
|
266
|
+
result: dict[str, Any] = {}
|
|
267
|
+
available = []
|
|
268
|
+
missing = []
|
|
269
|
+
warnings = []
|
|
270
|
+
|
|
271
|
+
# Try full analysis first
|
|
272
|
+
try:
|
|
273
|
+
result = analysis_func(trace, **kwargs)
|
|
274
|
+
available = list(result.keys())
|
|
275
|
+
quality_level = "full"
|
|
276
|
+
|
|
277
|
+
except Exception as e:
|
|
278
|
+
logger.warning("Full analysis failed: %s", e, exc_info=True)
|
|
279
|
+
warnings.append(f"Full analysis failed: {e!s}")
|
|
280
|
+
|
|
281
|
+
# Try reduced analysis
|
|
282
|
+
for feature in required_features + optional_features:
|
|
283
|
+
try:
|
|
284
|
+
# Attempt to compute individual feature
|
|
285
|
+
if hasattr(trace, feature):
|
|
286
|
+
result[feature] = getattr(trace, feature)
|
|
287
|
+
available.append(feature)
|
|
288
|
+
else:
|
|
289
|
+
missing.append(feature)
|
|
290
|
+
except Exception as fe:
|
|
291
|
+
logger.debug("Feature %s failed: %s", feature, fe, exc_info=True)
|
|
292
|
+
missing.append(feature)
|
|
293
|
+
if feature in required_features:
|
|
294
|
+
warnings.append(f"Required feature {feature} failed: {fe!s}")
|
|
295
|
+
|
|
296
|
+
# Determine quality level
|
|
297
|
+
if all(f in available for f in required_features):
|
|
298
|
+
quality_level = "degraded"
|
|
299
|
+
elif len(available) > 0:
|
|
300
|
+
quality_level = "minimal"
|
|
301
|
+
else:
|
|
302
|
+
quality_level = "failed"
|
|
303
|
+
warnings.append("Analysis completely failed")
|
|
304
|
+
|
|
305
|
+
return GracefulDegradationResult(
|
|
306
|
+
result=result,
|
|
307
|
+
quality_level=quality_level,
|
|
308
|
+
available_features=available,
|
|
309
|
+
missing_features=missing,
|
|
310
|
+
warnings=warnings,
|
|
311
|
+
)
|
|
312
|
+
|
|
313
|
+
|
|
314
|
+
@dataclass
|
|
315
|
+
class PartialDecodeResult:
|
|
316
|
+
"""Result of partial protocol decode.
|
|
317
|
+
|
|
318
|
+
Attributes:
|
|
319
|
+
complete_packets: Successfully decoded packets.
|
|
320
|
+
partial_packets: Partially decoded packets.
|
|
321
|
+
error_regions: Regions that could not be decoded.
|
|
322
|
+
decode_rate: Percentage of signal successfully decoded.
|
|
323
|
+
confidence: Confidence in decoded data.
|
|
324
|
+
"""
|
|
325
|
+
|
|
326
|
+
complete_packets: list[dict[str, Any]]
|
|
327
|
+
partial_packets: list[dict[str, Any]]
|
|
328
|
+
error_regions: list[dict[str, Any]]
|
|
329
|
+
decode_rate: float
|
|
330
|
+
confidence: float
|
|
331
|
+
|
|
332
|
+
|
|
333
|
+
def partial_decode(
|
|
334
|
+
trace: WaveformTrace,
|
|
335
|
+
decode_func: Callable[[WaveformTrace], list[dict[str, Any]]],
|
|
336
|
+
*,
|
|
337
|
+
segment_size: int = 10000,
|
|
338
|
+
min_valid_ratio: float = 0.5,
|
|
339
|
+
) -> PartialDecodeResult:
|
|
340
|
+
"""Decode protocol with partial result support.
|
|
341
|
+
|
|
342
|
+
Continues decoding after errors to capture as much data as possible.
|
|
343
|
+
|
|
344
|
+
Args:
|
|
345
|
+
trace: Trace to decode.
|
|
346
|
+
decode_func: Protocol decode function.
|
|
347
|
+
segment_size: Size of segments to try independently.
|
|
348
|
+
min_valid_ratio: Minimum valid ratio to accept segment.
|
|
349
|
+
|
|
350
|
+
Returns:
|
|
351
|
+
PartialDecodeResult with all decoded data.
|
|
352
|
+
|
|
353
|
+
Example:
|
|
354
|
+
>>> result = partial_decode(trace, uart_decode)
|
|
355
|
+
>>> print(f"Decoded {len(result.complete_packets)} complete packets")
|
|
356
|
+
>>> print(f"Decode rate: {result.decode_rate:.1%}")
|
|
357
|
+
|
|
358
|
+
References:
|
|
359
|
+
ERROR-003: Partial Decode Support
|
|
360
|
+
"""
|
|
361
|
+
data = trace.data
|
|
362
|
+
n = len(data)
|
|
363
|
+
|
|
364
|
+
complete_packets: list[dict[str, Any]] = []
|
|
365
|
+
partial_packets: list[dict[str, Any]] = []
|
|
366
|
+
error_regions: list[dict[str, Any]] = []
|
|
367
|
+
|
|
368
|
+
total_samples = 0
|
|
369
|
+
decoded_samples = 0
|
|
370
|
+
|
|
371
|
+
# Try to decode entire trace first
|
|
372
|
+
try:
|
|
373
|
+
full_result = decode_func(trace)
|
|
374
|
+
if full_result:
|
|
375
|
+
complete_packets.extend(full_result)
|
|
376
|
+
decoded_samples = n
|
|
377
|
+
total_samples = n
|
|
378
|
+
except Exception as e:
|
|
379
|
+
logger.info("Full decode failed, falling back to segment decode: %s", e)
|
|
380
|
+
# Fall back to segment-by-segment decode
|
|
381
|
+
for start in range(0, n, segment_size):
|
|
382
|
+
end = min(start + segment_size, n)
|
|
383
|
+
segment_data = data[start:end]
|
|
384
|
+
|
|
385
|
+
# Create segment trace
|
|
386
|
+
segment_trace = WaveformTrace(
|
|
387
|
+
data=segment_data,
|
|
388
|
+
metadata=trace.metadata,
|
|
389
|
+
)
|
|
390
|
+
|
|
391
|
+
total_samples += len(segment_data)
|
|
392
|
+
|
|
393
|
+
try:
|
|
394
|
+
segment_result = decode_func(segment_trace)
|
|
395
|
+
|
|
396
|
+
if segment_result:
|
|
397
|
+
# Adjust timestamps
|
|
398
|
+
for packet in segment_result:
|
|
399
|
+
if "timestamp" in packet:
|
|
400
|
+
packet["timestamp"] += start / trace.metadata.sample_rate
|
|
401
|
+
if "sample" in packet:
|
|
402
|
+
packet["sample"] += start
|
|
403
|
+
|
|
404
|
+
# Check if segment is valid
|
|
405
|
+
valid_ratio = len(segment_result) / max(len(segment_data) / 100, 1)
|
|
406
|
+
|
|
407
|
+
if valid_ratio >= min_valid_ratio:
|
|
408
|
+
complete_packets.extend(segment_result)
|
|
409
|
+
decoded_samples += len(segment_data)
|
|
410
|
+
else:
|
|
411
|
+
partial_packets.extend(segment_result)
|
|
412
|
+
decoded_samples += len(segment_data) // 2
|
|
413
|
+
|
|
414
|
+
except Exception as e:
|
|
415
|
+
logger.debug("Segment decode failed at sample %d: %s", start, e)
|
|
416
|
+
error_regions.append(
|
|
417
|
+
{
|
|
418
|
+
"start_sample": start,
|
|
419
|
+
"end_sample": end,
|
|
420
|
+
"error": str(e),
|
|
421
|
+
}
|
|
422
|
+
)
|
|
423
|
+
|
|
424
|
+
# Calculate statistics
|
|
425
|
+
decode_rate = decoded_samples / max(total_samples, 1)
|
|
426
|
+
|
|
427
|
+
# Calculate confidence
|
|
428
|
+
error_ratio = len(error_regions) / max((n // segment_size), 1)
|
|
429
|
+
confidence = decode_rate * (1 - error_ratio)
|
|
430
|
+
|
|
431
|
+
return PartialDecodeResult(
|
|
432
|
+
complete_packets=complete_packets,
|
|
433
|
+
partial_packets=partial_packets,
|
|
434
|
+
error_regions=error_regions,
|
|
435
|
+
decode_rate=decode_rate,
|
|
436
|
+
confidence=confidence,
|
|
437
|
+
)
|
|
438
|
+
|
|
439
|
+
|
|
440
|
+
@dataclass
|
|
441
|
+
class ErrorContext:
|
|
442
|
+
"""Preserved error context for debugging.
|
|
443
|
+
|
|
444
|
+
Attributes:
|
|
445
|
+
error_type: Type of error that occurred.
|
|
446
|
+
error_message: Error message.
|
|
447
|
+
location: Where in the signal the error occurred.
|
|
448
|
+
context_before: Signal context before error.
|
|
449
|
+
context_after: Signal context after error.
|
|
450
|
+
parameters: Parameters at time of error.
|
|
451
|
+
suggestions: Suggestions for fixing the error.
|
|
452
|
+
"""
|
|
453
|
+
|
|
454
|
+
error_type: str
|
|
455
|
+
error_message: str
|
|
456
|
+
location: int | None
|
|
457
|
+
context_before: NDArray[np.float64] | None
|
|
458
|
+
context_after: NDArray[np.float64] | None
|
|
459
|
+
parameters: dict[str, Any]
|
|
460
|
+
suggestions: list[str]
|
|
461
|
+
|
|
462
|
+
@classmethod
|
|
463
|
+
def capture(
|
|
464
|
+
cls,
|
|
465
|
+
exception: Exception,
|
|
466
|
+
trace: WaveformTrace | None = None,
|
|
467
|
+
location: int | None = None,
|
|
468
|
+
context_samples: int = 100,
|
|
469
|
+
parameters: dict[str, Any] | None = None,
|
|
470
|
+
) -> ErrorContext:
|
|
471
|
+
"""Capture error context from exception.
|
|
472
|
+
|
|
473
|
+
Args:
|
|
474
|
+
exception: The exception that occurred.
|
|
475
|
+
trace: Signal trace (for context extraction).
|
|
476
|
+
location: Sample index where error occurred.
|
|
477
|
+
context_samples: Number of context samples to capture.
|
|
478
|
+
parameters: Analysis parameters at time of error.
|
|
479
|
+
|
|
480
|
+
Returns:
|
|
481
|
+
ErrorContext with all available information.
|
|
482
|
+
"""
|
|
483
|
+
context_before = None
|
|
484
|
+
context_after = None
|
|
485
|
+
|
|
486
|
+
if trace is not None and location is not None:
|
|
487
|
+
data = trace.data
|
|
488
|
+
n = len(data)
|
|
489
|
+
|
|
490
|
+
if location >= 0 and location < n:
|
|
491
|
+
start = max(0, location - context_samples)
|
|
492
|
+
end = min(n, location + context_samples)
|
|
493
|
+
context_before = data[start:location]
|
|
494
|
+
context_after = data[location:end]
|
|
495
|
+
|
|
496
|
+
# Generate suggestions based on error type
|
|
497
|
+
suggestions = []
|
|
498
|
+
error_str = str(exception)
|
|
499
|
+
|
|
500
|
+
if "insufficient" in error_str.lower():
|
|
501
|
+
suggestions.append("Try providing more data samples")
|
|
502
|
+
suggestions.append("Check if trace is complete")
|
|
503
|
+
|
|
504
|
+
if "threshold" in error_str.lower():
|
|
505
|
+
suggestions.append("Try adjusting threshold parameter")
|
|
506
|
+
suggestions.append("Check signal levels are as expected")
|
|
507
|
+
|
|
508
|
+
if "timeout" in error_str.lower():
|
|
509
|
+
suggestions.append("Increase timeout parameter")
|
|
510
|
+
suggestions.append("Process in smaller chunks")
|
|
511
|
+
|
|
512
|
+
if "memory" in error_str.lower():
|
|
513
|
+
suggestions.append("Use chunked processing")
|
|
514
|
+
suggestions.append("Reduce analysis window size")
|
|
515
|
+
|
|
516
|
+
if not suggestions:
|
|
517
|
+
suggestions.append("Check input data format")
|
|
518
|
+
suggestions.append("Verify analysis parameters")
|
|
519
|
+
|
|
520
|
+
return cls(
|
|
521
|
+
error_type=type(exception).__name__,
|
|
522
|
+
error_message=str(exception),
|
|
523
|
+
location=location,
|
|
524
|
+
context_before=context_before,
|
|
525
|
+
context_after=context_after,
|
|
526
|
+
parameters=parameters or {},
|
|
527
|
+
suggestions=suggestions,
|
|
528
|
+
)
|
|
529
|
+
|
|
530
|
+
|
|
531
|
+
@dataclass
|
|
532
|
+
class RetryResult:
|
|
533
|
+
"""Result of retry with parameter adjustment.
|
|
534
|
+
|
|
535
|
+
Attributes:
|
|
536
|
+
success: True if retry succeeded.
|
|
537
|
+
result: Analysis result (if successful).
|
|
538
|
+
attempts: Number of attempts made.
|
|
539
|
+
final_parameters: Parameters that worked.
|
|
540
|
+
adjustments_made: List of adjustments made.
|
|
541
|
+
"""
|
|
542
|
+
|
|
543
|
+
success: bool
|
|
544
|
+
result: Any
|
|
545
|
+
attempts: int
|
|
546
|
+
final_parameters: dict[str, Any]
|
|
547
|
+
adjustments_made: list[str]
|
|
548
|
+
|
|
549
|
+
|
|
550
|
+
def retry_with_adjustment[T](
|
|
551
|
+
func: Callable[..., T],
|
|
552
|
+
trace: WaveformTrace,
|
|
553
|
+
initial_params: dict[str, Any],
|
|
554
|
+
*,
|
|
555
|
+
max_retries: int = 3,
|
|
556
|
+
adjustment_rules: dict[str, Callable[[Any, int], Any]] | None = None,
|
|
557
|
+
) -> RetryResult:
|
|
558
|
+
"""Retry analysis with automatic parameter adjustment.
|
|
559
|
+
|
|
560
|
+
Adjusts parameters and retries when analysis fails.
|
|
561
|
+
|
|
562
|
+
Args:
|
|
563
|
+
func: Analysis function to retry.
|
|
564
|
+
trace: Trace to analyze.
|
|
565
|
+
initial_params: Initial parameters.
|
|
566
|
+
max_retries: Maximum retry attempts.
|
|
567
|
+
adjustment_rules: Rules for adjusting parameters.
|
|
568
|
+
|
|
569
|
+
Returns:
|
|
570
|
+
RetryResult with outcome of retries.
|
|
571
|
+
|
|
572
|
+
Example:
|
|
573
|
+
>>> rules = {
|
|
574
|
+
... 'threshold': lambda v, n: v * 0.9, # Reduce by 10% each retry
|
|
575
|
+
... 'window_size': lambda v, n: v * 2, # Double each retry
|
|
576
|
+
... }
|
|
577
|
+
>>> result = retry_with_adjustment(analyze, trace, params, adjustment_rules=rules)
|
|
578
|
+
>>> if result.success:
|
|
579
|
+
... print(f"Succeeded after {result.attempts} attempts")
|
|
580
|
+
|
|
581
|
+
References:
|
|
582
|
+
ERROR-005: Automatic Retry with Parameter Adjustment
|
|
583
|
+
"""
|
|
584
|
+
if adjustment_rules is None:
|
|
585
|
+
# Default adjustment rules
|
|
586
|
+
adjustment_rules = {
|
|
587
|
+
"threshold": lambda v, n: v * (0.9**n),
|
|
588
|
+
"tolerance": lambda v, n: v * (1.2**n),
|
|
589
|
+
"window_size": lambda v, n: int(v * (1.5**n)),
|
|
590
|
+
"min_samples": lambda v, n: max(1, int(v * (0.8**n))),
|
|
591
|
+
}
|
|
592
|
+
|
|
593
|
+
params = initial_params.copy()
|
|
594
|
+
adjustments_made = [] # type: ignore[var-annotated]
|
|
595
|
+
|
|
596
|
+
for attempt in range(max_retries + 1):
|
|
597
|
+
try:
|
|
598
|
+
result = func(trace, **params)
|
|
599
|
+
return RetryResult(
|
|
600
|
+
success=True,
|
|
601
|
+
result=result,
|
|
602
|
+
attempts=attempt + 1,
|
|
603
|
+
final_parameters=params,
|
|
604
|
+
adjustments_made=adjustments_made,
|
|
605
|
+
)
|
|
606
|
+
|
|
607
|
+
except Exception as e:
|
|
608
|
+
logger.debug("Retry attempt %d failed: %s", attempt + 1, e)
|
|
609
|
+
if attempt >= max_retries:
|
|
610
|
+
logger.warning("Max retries (%d) reached, giving up: %s", max_retries, e)
|
|
611
|
+
break
|
|
612
|
+
|
|
613
|
+
# Adjust parameters for next attempt
|
|
614
|
+
for param_name, adjust_func in adjustment_rules.items():
|
|
615
|
+
if param_name in params:
|
|
616
|
+
old_val = params[param_name]
|
|
617
|
+
new_val = adjust_func(old_val, attempt + 1)
|
|
618
|
+
params[param_name] = new_val
|
|
619
|
+
adjustments_made.append(
|
|
620
|
+
f"Attempt {attempt + 1}: {param_name} {old_val} -> {new_val}"
|
|
621
|
+
)
|
|
622
|
+
|
|
623
|
+
return RetryResult(
|
|
624
|
+
success=False,
|
|
625
|
+
result=None,
|
|
626
|
+
attempts=max_retries + 1,
|
|
627
|
+
final_parameters=params,
|
|
628
|
+
adjustments_made=adjustments_made,
|
|
629
|
+
)
|
|
630
|
+
|
|
631
|
+
|
|
632
|
+
__all__ = [
|
|
633
|
+
"ErrorContext",
|
|
634
|
+
"GracefulDegradationResult",
|
|
635
|
+
"PartialDecodeResult",
|
|
636
|
+
"RecoveryStats",
|
|
637
|
+
"RetryResult",
|
|
638
|
+
"graceful_degradation",
|
|
639
|
+
"partial_decode",
|
|
640
|
+
"recover_corrupted_data",
|
|
641
|
+
"retry_with_adjustment",
|
|
642
|
+
]
|