oscura 0.0.1__py3-none-any.whl → 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- oscura/__init__.py +813 -8
- oscura/__main__.py +392 -0
- oscura/analyzers/__init__.py +37 -0
- oscura/analyzers/digital/__init__.py +177 -0
- oscura/analyzers/digital/bus.py +691 -0
- oscura/analyzers/digital/clock.py +805 -0
- oscura/analyzers/digital/correlation.py +720 -0
- oscura/analyzers/digital/edges.py +632 -0
- oscura/analyzers/digital/extraction.py +413 -0
- oscura/analyzers/digital/quality.py +878 -0
- oscura/analyzers/digital/signal_quality.py +877 -0
- oscura/analyzers/digital/thresholds.py +708 -0
- oscura/analyzers/digital/timing.py +1104 -0
- oscura/analyzers/eye/__init__.py +46 -0
- oscura/analyzers/eye/diagram.py +434 -0
- oscura/analyzers/eye/metrics.py +555 -0
- oscura/analyzers/jitter/__init__.py +83 -0
- oscura/analyzers/jitter/ber.py +333 -0
- oscura/analyzers/jitter/decomposition.py +759 -0
- oscura/analyzers/jitter/measurements.py +413 -0
- oscura/analyzers/jitter/spectrum.py +220 -0
- oscura/analyzers/measurements.py +40 -0
- oscura/analyzers/packet/__init__.py +171 -0
- oscura/analyzers/packet/daq.py +1077 -0
- oscura/analyzers/packet/metrics.py +437 -0
- oscura/analyzers/packet/parser.py +327 -0
- oscura/analyzers/packet/payload.py +2156 -0
- oscura/analyzers/packet/payload_analysis.py +1312 -0
- oscura/analyzers/packet/payload_extraction.py +236 -0
- oscura/analyzers/packet/payload_patterns.py +670 -0
- oscura/analyzers/packet/stream.py +359 -0
- oscura/analyzers/patterns/__init__.py +266 -0
- oscura/analyzers/patterns/clustering.py +1036 -0
- oscura/analyzers/patterns/discovery.py +539 -0
- oscura/analyzers/patterns/learning.py +797 -0
- oscura/analyzers/patterns/matching.py +1091 -0
- oscura/analyzers/patterns/periodic.py +650 -0
- oscura/analyzers/patterns/sequences.py +767 -0
- oscura/analyzers/power/__init__.py +116 -0
- oscura/analyzers/power/ac_power.py +391 -0
- oscura/analyzers/power/basic.py +383 -0
- oscura/analyzers/power/conduction.py +314 -0
- oscura/analyzers/power/efficiency.py +297 -0
- oscura/analyzers/power/ripple.py +356 -0
- oscura/analyzers/power/soa.py +372 -0
- oscura/analyzers/power/switching.py +479 -0
- oscura/analyzers/protocol/__init__.py +150 -0
- oscura/analyzers/protocols/__init__.py +150 -0
- oscura/analyzers/protocols/base.py +500 -0
- oscura/analyzers/protocols/can.py +620 -0
- oscura/analyzers/protocols/can_fd.py +448 -0
- oscura/analyzers/protocols/flexray.py +405 -0
- oscura/analyzers/protocols/hdlc.py +399 -0
- oscura/analyzers/protocols/i2c.py +368 -0
- oscura/analyzers/protocols/i2s.py +296 -0
- oscura/analyzers/protocols/jtag.py +393 -0
- oscura/analyzers/protocols/lin.py +445 -0
- oscura/analyzers/protocols/manchester.py +333 -0
- oscura/analyzers/protocols/onewire.py +501 -0
- oscura/analyzers/protocols/spi.py +334 -0
- oscura/analyzers/protocols/swd.py +325 -0
- oscura/analyzers/protocols/uart.py +393 -0
- oscura/analyzers/protocols/usb.py +495 -0
- oscura/analyzers/signal_integrity/__init__.py +63 -0
- oscura/analyzers/signal_integrity/embedding.py +294 -0
- oscura/analyzers/signal_integrity/equalization.py +370 -0
- oscura/analyzers/signal_integrity/sparams.py +484 -0
- oscura/analyzers/spectral/__init__.py +53 -0
- oscura/analyzers/spectral/chunked.py +273 -0
- oscura/analyzers/spectral/chunked_fft.py +571 -0
- oscura/analyzers/spectral/chunked_wavelet.py +391 -0
- oscura/analyzers/spectral/fft.py +92 -0
- oscura/analyzers/statistical/__init__.py +250 -0
- oscura/analyzers/statistical/checksum.py +923 -0
- oscura/analyzers/statistical/chunked_corr.py +228 -0
- oscura/analyzers/statistical/classification.py +778 -0
- oscura/analyzers/statistical/entropy.py +1113 -0
- oscura/analyzers/statistical/ngrams.py +614 -0
- oscura/analyzers/statistics/__init__.py +119 -0
- oscura/analyzers/statistics/advanced.py +885 -0
- oscura/analyzers/statistics/basic.py +263 -0
- oscura/analyzers/statistics/correlation.py +630 -0
- oscura/analyzers/statistics/distribution.py +298 -0
- oscura/analyzers/statistics/outliers.py +463 -0
- oscura/analyzers/statistics/streaming.py +93 -0
- oscura/analyzers/statistics/trend.py +520 -0
- oscura/analyzers/validation.py +598 -0
- oscura/analyzers/waveform/__init__.py +36 -0
- oscura/analyzers/waveform/measurements.py +943 -0
- oscura/analyzers/waveform/measurements_with_uncertainty.py +371 -0
- oscura/analyzers/waveform/spectral.py +1689 -0
- oscura/analyzers/waveform/wavelets.py +298 -0
- oscura/api/__init__.py +62 -0
- oscura/api/dsl.py +538 -0
- oscura/api/fluent.py +571 -0
- oscura/api/operators.py +498 -0
- oscura/api/optimization.py +392 -0
- oscura/api/profiling.py +396 -0
- oscura/automotive/__init__.py +73 -0
- oscura/automotive/can/__init__.py +52 -0
- oscura/automotive/can/analysis.py +356 -0
- oscura/automotive/can/checksum.py +250 -0
- oscura/automotive/can/correlation.py +212 -0
- oscura/automotive/can/discovery.py +355 -0
- oscura/automotive/can/message_wrapper.py +375 -0
- oscura/automotive/can/models.py +385 -0
- oscura/automotive/can/patterns.py +381 -0
- oscura/automotive/can/session.py +452 -0
- oscura/automotive/can/state_machine.py +300 -0
- oscura/automotive/can/stimulus_response.py +461 -0
- oscura/automotive/dbc/__init__.py +15 -0
- oscura/automotive/dbc/generator.py +156 -0
- oscura/automotive/dbc/parser.py +146 -0
- oscura/automotive/dtc/__init__.py +30 -0
- oscura/automotive/dtc/database.py +3036 -0
- oscura/automotive/j1939/__init__.py +14 -0
- oscura/automotive/j1939/decoder.py +745 -0
- oscura/automotive/loaders/__init__.py +35 -0
- oscura/automotive/loaders/asc.py +98 -0
- oscura/automotive/loaders/blf.py +77 -0
- oscura/automotive/loaders/csv_can.py +136 -0
- oscura/automotive/loaders/dispatcher.py +136 -0
- oscura/automotive/loaders/mdf.py +331 -0
- oscura/automotive/loaders/pcap.py +132 -0
- oscura/automotive/obd/__init__.py +14 -0
- oscura/automotive/obd/decoder.py +707 -0
- oscura/automotive/uds/__init__.py +48 -0
- oscura/automotive/uds/decoder.py +265 -0
- oscura/automotive/uds/models.py +64 -0
- oscura/automotive/visualization.py +369 -0
- oscura/batch/__init__.py +55 -0
- oscura/batch/advanced.py +627 -0
- oscura/batch/aggregate.py +300 -0
- oscura/batch/analyze.py +139 -0
- oscura/batch/logging.py +487 -0
- oscura/batch/metrics.py +556 -0
- oscura/builders/__init__.py +41 -0
- oscura/builders/signal_builder.py +1131 -0
- oscura/cli/__init__.py +14 -0
- oscura/cli/batch.py +339 -0
- oscura/cli/characterize.py +273 -0
- oscura/cli/compare.py +775 -0
- oscura/cli/decode.py +551 -0
- oscura/cli/main.py +247 -0
- oscura/cli/shell.py +350 -0
- oscura/comparison/__init__.py +66 -0
- oscura/comparison/compare.py +397 -0
- oscura/comparison/golden.py +487 -0
- oscura/comparison/limits.py +391 -0
- oscura/comparison/mask.py +434 -0
- oscura/comparison/trace_diff.py +30 -0
- oscura/comparison/visualization.py +481 -0
- oscura/compliance/__init__.py +70 -0
- oscura/compliance/advanced.py +756 -0
- oscura/compliance/masks.py +363 -0
- oscura/compliance/reporting.py +483 -0
- oscura/compliance/testing.py +298 -0
- oscura/component/__init__.py +38 -0
- oscura/component/impedance.py +365 -0
- oscura/component/reactive.py +598 -0
- oscura/component/transmission_line.py +312 -0
- oscura/config/__init__.py +191 -0
- oscura/config/defaults.py +254 -0
- oscura/config/loader.py +348 -0
- oscura/config/memory.py +271 -0
- oscura/config/migration.py +458 -0
- oscura/config/pipeline.py +1077 -0
- oscura/config/preferences.py +530 -0
- oscura/config/protocol.py +875 -0
- oscura/config/schema.py +713 -0
- oscura/config/settings.py +420 -0
- oscura/config/thresholds.py +599 -0
- oscura/convenience.py +457 -0
- oscura/core/__init__.py +299 -0
- oscura/core/audit.py +457 -0
- oscura/core/backend_selector.py +405 -0
- oscura/core/cache.py +590 -0
- oscura/core/cancellation.py +439 -0
- oscura/core/confidence.py +225 -0
- oscura/core/config.py +506 -0
- oscura/core/correlation.py +216 -0
- oscura/core/cross_domain.py +422 -0
- oscura/core/debug.py +301 -0
- oscura/core/edge_cases.py +541 -0
- oscura/core/exceptions.py +535 -0
- oscura/core/gpu_backend.py +523 -0
- oscura/core/lazy.py +832 -0
- oscura/core/log_query.py +540 -0
- oscura/core/logging.py +931 -0
- oscura/core/logging_advanced.py +952 -0
- oscura/core/memoize.py +171 -0
- oscura/core/memory_check.py +274 -0
- oscura/core/memory_guard.py +290 -0
- oscura/core/memory_limits.py +336 -0
- oscura/core/memory_monitor.py +453 -0
- oscura/core/memory_progress.py +465 -0
- oscura/core/memory_warnings.py +315 -0
- oscura/core/numba_backend.py +362 -0
- oscura/core/performance.py +352 -0
- oscura/core/progress.py +524 -0
- oscura/core/provenance.py +358 -0
- oscura/core/results.py +331 -0
- oscura/core/types.py +504 -0
- oscura/core/uncertainty.py +383 -0
- oscura/discovery/__init__.py +52 -0
- oscura/discovery/anomaly_detector.py +672 -0
- oscura/discovery/auto_decoder.py +415 -0
- oscura/discovery/comparison.py +497 -0
- oscura/discovery/quality_validator.py +528 -0
- oscura/discovery/signal_detector.py +769 -0
- oscura/dsl/__init__.py +73 -0
- oscura/dsl/commands.py +246 -0
- oscura/dsl/interpreter.py +455 -0
- oscura/dsl/parser.py +689 -0
- oscura/dsl/repl.py +172 -0
- oscura/exceptions.py +59 -0
- oscura/exploratory/__init__.py +111 -0
- oscura/exploratory/error_recovery.py +642 -0
- oscura/exploratory/fuzzy.py +513 -0
- oscura/exploratory/fuzzy_advanced.py +786 -0
- oscura/exploratory/legacy.py +831 -0
- oscura/exploratory/parse.py +358 -0
- oscura/exploratory/recovery.py +275 -0
- oscura/exploratory/sync.py +382 -0
- oscura/exploratory/unknown.py +707 -0
- oscura/export/__init__.py +25 -0
- oscura/export/wireshark/README.md +265 -0
- oscura/export/wireshark/__init__.py +47 -0
- oscura/export/wireshark/generator.py +312 -0
- oscura/export/wireshark/lua_builder.py +159 -0
- oscura/export/wireshark/templates/dissector.lua.j2 +92 -0
- oscura/export/wireshark/type_mapping.py +165 -0
- oscura/export/wireshark/validator.py +105 -0
- oscura/exporters/__init__.py +94 -0
- oscura/exporters/csv.py +303 -0
- oscura/exporters/exporters.py +44 -0
- oscura/exporters/hdf5.py +219 -0
- oscura/exporters/html_export.py +701 -0
- oscura/exporters/json_export.py +291 -0
- oscura/exporters/markdown_export.py +367 -0
- oscura/exporters/matlab_export.py +354 -0
- oscura/exporters/npz_export.py +219 -0
- oscura/exporters/spice_export.py +210 -0
- oscura/extensibility/__init__.py +131 -0
- oscura/extensibility/docs.py +752 -0
- oscura/extensibility/extensions.py +1125 -0
- oscura/extensibility/logging.py +259 -0
- oscura/extensibility/measurements.py +485 -0
- oscura/extensibility/plugins.py +414 -0
- oscura/extensibility/registry.py +346 -0
- oscura/extensibility/templates.py +913 -0
- oscura/extensibility/validation.py +651 -0
- oscura/filtering/__init__.py +89 -0
- oscura/filtering/base.py +563 -0
- oscura/filtering/convenience.py +564 -0
- oscura/filtering/design.py +725 -0
- oscura/filtering/filters.py +32 -0
- oscura/filtering/introspection.py +605 -0
- oscura/guidance/__init__.py +24 -0
- oscura/guidance/recommender.py +429 -0
- oscura/guidance/wizard.py +518 -0
- oscura/inference/__init__.py +251 -0
- oscura/inference/active_learning/README.md +153 -0
- oscura/inference/active_learning/__init__.py +38 -0
- oscura/inference/active_learning/lstar.py +257 -0
- oscura/inference/active_learning/observation_table.py +230 -0
- oscura/inference/active_learning/oracle.py +78 -0
- oscura/inference/active_learning/teachers/__init__.py +15 -0
- oscura/inference/active_learning/teachers/simulator.py +192 -0
- oscura/inference/adaptive_tuning.py +453 -0
- oscura/inference/alignment.py +653 -0
- oscura/inference/bayesian.py +943 -0
- oscura/inference/binary.py +1016 -0
- oscura/inference/crc_reverse.py +711 -0
- oscura/inference/logic.py +288 -0
- oscura/inference/message_format.py +1305 -0
- oscura/inference/protocol.py +417 -0
- oscura/inference/protocol_dsl.py +1084 -0
- oscura/inference/protocol_library.py +1230 -0
- oscura/inference/sequences.py +809 -0
- oscura/inference/signal_intelligence.py +1509 -0
- oscura/inference/spectral.py +215 -0
- oscura/inference/state_machine.py +634 -0
- oscura/inference/stream.py +918 -0
- oscura/integrations/__init__.py +59 -0
- oscura/integrations/llm.py +1827 -0
- oscura/jupyter/__init__.py +32 -0
- oscura/jupyter/display.py +268 -0
- oscura/jupyter/magic.py +334 -0
- oscura/loaders/__init__.py +526 -0
- oscura/loaders/binary.py +69 -0
- oscura/loaders/configurable.py +1255 -0
- oscura/loaders/csv.py +26 -0
- oscura/loaders/csv_loader.py +473 -0
- oscura/loaders/hdf5.py +9 -0
- oscura/loaders/hdf5_loader.py +510 -0
- oscura/loaders/lazy.py +370 -0
- oscura/loaders/mmap_loader.py +583 -0
- oscura/loaders/numpy_loader.py +436 -0
- oscura/loaders/pcap.py +432 -0
- oscura/loaders/preprocessing.py +368 -0
- oscura/loaders/rigol.py +287 -0
- oscura/loaders/sigrok.py +321 -0
- oscura/loaders/tdms.py +367 -0
- oscura/loaders/tektronix.py +711 -0
- oscura/loaders/validation.py +584 -0
- oscura/loaders/vcd.py +464 -0
- oscura/loaders/wav.py +233 -0
- oscura/math/__init__.py +45 -0
- oscura/math/arithmetic.py +824 -0
- oscura/math/interpolation.py +413 -0
- oscura/onboarding/__init__.py +39 -0
- oscura/onboarding/help.py +498 -0
- oscura/onboarding/tutorials.py +405 -0
- oscura/onboarding/wizard.py +466 -0
- oscura/optimization/__init__.py +19 -0
- oscura/optimization/parallel.py +440 -0
- oscura/optimization/search.py +532 -0
- oscura/pipeline/__init__.py +43 -0
- oscura/pipeline/base.py +338 -0
- oscura/pipeline/composition.py +242 -0
- oscura/pipeline/parallel.py +448 -0
- oscura/pipeline/pipeline.py +375 -0
- oscura/pipeline/reverse_engineering.py +1119 -0
- oscura/plugins/__init__.py +122 -0
- oscura/plugins/base.py +272 -0
- oscura/plugins/cli.py +497 -0
- oscura/plugins/discovery.py +411 -0
- oscura/plugins/isolation.py +418 -0
- oscura/plugins/lifecycle.py +959 -0
- oscura/plugins/manager.py +493 -0
- oscura/plugins/registry.py +421 -0
- oscura/plugins/versioning.py +372 -0
- oscura/py.typed +0 -0
- oscura/quality/__init__.py +65 -0
- oscura/quality/ensemble.py +740 -0
- oscura/quality/explainer.py +338 -0
- oscura/quality/scoring.py +616 -0
- oscura/quality/warnings.py +456 -0
- oscura/reporting/__init__.py +248 -0
- oscura/reporting/advanced.py +1234 -0
- oscura/reporting/analyze.py +448 -0
- oscura/reporting/argument_preparer.py +596 -0
- oscura/reporting/auto_report.py +507 -0
- oscura/reporting/batch.py +615 -0
- oscura/reporting/chart_selection.py +223 -0
- oscura/reporting/comparison.py +330 -0
- oscura/reporting/config.py +615 -0
- oscura/reporting/content/__init__.py +39 -0
- oscura/reporting/content/executive.py +127 -0
- oscura/reporting/content/filtering.py +191 -0
- oscura/reporting/content/minimal.py +257 -0
- oscura/reporting/content/verbosity.py +162 -0
- oscura/reporting/core.py +508 -0
- oscura/reporting/core_formats/__init__.py +17 -0
- oscura/reporting/core_formats/multi_format.py +210 -0
- oscura/reporting/engine.py +836 -0
- oscura/reporting/export.py +366 -0
- oscura/reporting/formatting/__init__.py +129 -0
- oscura/reporting/formatting/emphasis.py +81 -0
- oscura/reporting/formatting/numbers.py +403 -0
- oscura/reporting/formatting/standards.py +55 -0
- oscura/reporting/formatting.py +466 -0
- oscura/reporting/html.py +578 -0
- oscura/reporting/index.py +590 -0
- oscura/reporting/multichannel.py +296 -0
- oscura/reporting/output.py +379 -0
- oscura/reporting/pdf.py +373 -0
- oscura/reporting/plots.py +731 -0
- oscura/reporting/pptx_export.py +360 -0
- oscura/reporting/renderers/__init__.py +11 -0
- oscura/reporting/renderers/pdf.py +94 -0
- oscura/reporting/sections.py +471 -0
- oscura/reporting/standards.py +680 -0
- oscura/reporting/summary_generator.py +368 -0
- oscura/reporting/tables.py +397 -0
- oscura/reporting/template_system.py +724 -0
- oscura/reporting/templates/__init__.py +15 -0
- oscura/reporting/templates/definition.py +205 -0
- oscura/reporting/templates/index.html +649 -0
- oscura/reporting/templates/index.md +173 -0
- oscura/schemas/__init__.py +158 -0
- oscura/schemas/bus_configuration.json +322 -0
- oscura/schemas/device_mapping.json +182 -0
- oscura/schemas/packet_format.json +418 -0
- oscura/schemas/protocol_definition.json +363 -0
- oscura/search/__init__.py +16 -0
- oscura/search/anomaly.py +292 -0
- oscura/search/context.py +149 -0
- oscura/search/pattern.py +160 -0
- oscura/session/__init__.py +34 -0
- oscura/session/annotations.py +289 -0
- oscura/session/history.py +313 -0
- oscura/session/session.py +445 -0
- oscura/streaming/__init__.py +43 -0
- oscura/streaming/chunked.py +611 -0
- oscura/streaming/progressive.py +393 -0
- oscura/streaming/realtime.py +622 -0
- oscura/testing/__init__.py +54 -0
- oscura/testing/synthetic.py +808 -0
- oscura/triggering/__init__.py +68 -0
- oscura/triggering/base.py +229 -0
- oscura/triggering/edge.py +353 -0
- oscura/triggering/pattern.py +344 -0
- oscura/triggering/pulse.py +581 -0
- oscura/triggering/window.py +453 -0
- oscura/ui/__init__.py +48 -0
- oscura/ui/formatters.py +526 -0
- oscura/ui/progressive_display.py +340 -0
- oscura/utils/__init__.py +99 -0
- oscura/utils/autodetect.py +338 -0
- oscura/utils/buffer.py +389 -0
- oscura/utils/lazy.py +407 -0
- oscura/utils/lazy_imports.py +147 -0
- oscura/utils/memory.py +836 -0
- oscura/utils/memory_advanced.py +1326 -0
- oscura/utils/memory_extensions.py +465 -0
- oscura/utils/progressive.py +352 -0
- oscura/utils/windowing.py +362 -0
- oscura/visualization/__init__.py +321 -0
- oscura/visualization/accessibility.py +526 -0
- oscura/visualization/annotations.py +374 -0
- oscura/visualization/axis_scaling.py +305 -0
- oscura/visualization/colors.py +453 -0
- oscura/visualization/digital.py +337 -0
- oscura/visualization/eye.py +420 -0
- oscura/visualization/histogram.py +281 -0
- oscura/visualization/interactive.py +858 -0
- oscura/visualization/jitter.py +702 -0
- oscura/visualization/keyboard.py +394 -0
- oscura/visualization/layout.py +365 -0
- oscura/visualization/optimization.py +1028 -0
- oscura/visualization/palettes.py +446 -0
- oscura/visualization/plot.py +92 -0
- oscura/visualization/power.py +290 -0
- oscura/visualization/power_extended.py +626 -0
- oscura/visualization/presets.py +467 -0
- oscura/visualization/protocols.py +932 -0
- oscura/visualization/render.py +207 -0
- oscura/visualization/rendering.py +444 -0
- oscura/visualization/reverse_engineering.py +791 -0
- oscura/visualization/signal_integrity.py +808 -0
- oscura/visualization/specialized.py +553 -0
- oscura/visualization/spectral.py +811 -0
- oscura/visualization/styles.py +381 -0
- oscura/visualization/thumbnails.py +311 -0
- oscura/visualization/time_axis.py +351 -0
- oscura/visualization/waveform.py +367 -0
- oscura/workflow/__init__.py +13 -0
- oscura/workflow/dag.py +377 -0
- oscura/workflows/__init__.py +58 -0
- oscura/workflows/compliance.py +280 -0
- oscura/workflows/digital.py +272 -0
- oscura/workflows/multi_trace.py +502 -0
- oscura/workflows/power.py +178 -0
- oscura/workflows/protocol.py +492 -0
- oscura/workflows/reverse_engineering.py +639 -0
- oscura/workflows/signal_integrity.py +227 -0
- oscura-0.1.0.dist-info/METADATA +300 -0
- oscura-0.1.0.dist-info/RECORD +463 -0
- oscura-0.1.0.dist-info/entry_points.txt +2 -0
- {oscura-0.0.1.dist-info → oscura-0.1.0.dist-info}/licenses/LICENSE +1 -1
- oscura-0.0.1.dist-info/METADATA +0 -63
- oscura-0.0.1.dist-info/RECORD +0 -5
- {oscura-0.0.1.dist-info → oscura-0.1.0.dist-info}/WHEEL +0 -0
oscura/loaders/sigrok.py
ADDED
|
@@ -0,0 +1,321 @@
|
|
|
1
|
+
"""Sigrok session file (.sr) loader.
|
|
2
|
+
|
|
3
|
+
This module provides loading of sigrok session files containing
|
|
4
|
+
logic analyzer captures. Sigrok sessions are ZIP archives containing
|
|
5
|
+
metadata and binary signal data.
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
Example:
|
|
9
|
+
>>> from oscura.loaders.sigrok import load_sigrok
|
|
10
|
+
>>> trace = load_sigrok("capture.sr")
|
|
11
|
+
>>> print(f"Sample rate: {trace.metadata.sample_rate} Hz")
|
|
12
|
+
>>> print(f"Channels: {len(trace.data)}")
|
|
13
|
+
"""
|
|
14
|
+
|
|
15
|
+
from __future__ import annotations
|
|
16
|
+
|
|
17
|
+
import zipfile
|
|
18
|
+
from pathlib import Path
|
|
19
|
+
from typing import TYPE_CHECKING, Any
|
|
20
|
+
|
|
21
|
+
import numpy as np
|
|
22
|
+
from numpy.typing import NDArray
|
|
23
|
+
|
|
24
|
+
from oscura.core.exceptions import FormatError, LoaderError
|
|
25
|
+
from oscura.core.types import DigitalTrace, TraceMetadata
|
|
26
|
+
|
|
27
|
+
if TYPE_CHECKING:
|
|
28
|
+
from os import PathLike
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
def load_sigrok(
|
|
32
|
+
path: str | PathLike[str],
|
|
33
|
+
*,
|
|
34
|
+
channel: str | int | None = None,
|
|
35
|
+
) -> DigitalTrace:
|
|
36
|
+
"""Load a sigrok session file (.sr).
|
|
37
|
+
|
|
38
|
+
Sigrok session files are ZIP archives containing:
|
|
39
|
+
- metadata: JSON file with capture settings
|
|
40
|
+
- logic-1-*: Binary files with sample data
|
|
41
|
+
|
|
42
|
+
Args:
|
|
43
|
+
path: Path to the sigrok .sr session file.
|
|
44
|
+
channel: Optional channel name or index to load. If None,
|
|
45
|
+
loads the first channel or merges all channels.
|
|
46
|
+
|
|
47
|
+
Returns:
|
|
48
|
+
DigitalTrace containing the digital signal data and metadata.
|
|
49
|
+
|
|
50
|
+
Raises:
|
|
51
|
+
LoaderError: If the file cannot be loaded.
|
|
52
|
+
FormatError: If the file is not a valid sigrok session.
|
|
53
|
+
|
|
54
|
+
Example:
|
|
55
|
+
>>> trace = load_sigrok("capture.sr")
|
|
56
|
+
>>> print(f"Sample rate: {trace.metadata.sample_rate} Hz")
|
|
57
|
+
>>> print(f"Duration: {trace.duration:.6f} seconds")
|
|
58
|
+
|
|
59
|
+
References:
|
|
60
|
+
sigrok session file format specification
|
|
61
|
+
"""
|
|
62
|
+
path = Path(path)
|
|
63
|
+
|
|
64
|
+
if not path.exists():
|
|
65
|
+
raise LoaderError(
|
|
66
|
+
"File not found",
|
|
67
|
+
file_path=str(path),
|
|
68
|
+
)
|
|
69
|
+
|
|
70
|
+
if not zipfile.is_zipfile(path):
|
|
71
|
+
raise FormatError(
|
|
72
|
+
"File is not a valid sigrok session (not a ZIP archive)",
|
|
73
|
+
file_path=str(path),
|
|
74
|
+
expected="ZIP archive",
|
|
75
|
+
)
|
|
76
|
+
|
|
77
|
+
try:
|
|
78
|
+
with zipfile.ZipFile(path, "r") as zf:
|
|
79
|
+
# Parse metadata
|
|
80
|
+
metadata_dict = _parse_metadata(zf, path)
|
|
81
|
+
|
|
82
|
+
# Get sample rate from metadata
|
|
83
|
+
sample_rate = metadata_dict.get("samplerate", 1_000_000)
|
|
84
|
+
|
|
85
|
+
# Get channel information
|
|
86
|
+
channels = metadata_dict.get("channels", [])
|
|
87
|
+
total_channels = metadata_dict.get("total probes", len(channels))
|
|
88
|
+
|
|
89
|
+
# Find and read logic data files
|
|
90
|
+
logic_files = [name for name in zf.namelist() if name.startswith("logic-1")]
|
|
91
|
+
|
|
92
|
+
if not logic_files:
|
|
93
|
+
raise FormatError(
|
|
94
|
+
"No logic data found in sigrok session",
|
|
95
|
+
file_path=str(path),
|
|
96
|
+
expected="logic-1-* data files",
|
|
97
|
+
)
|
|
98
|
+
|
|
99
|
+
# Read and combine logic data
|
|
100
|
+
data = _read_logic_data(zf, logic_files, total_channels)
|
|
101
|
+
|
|
102
|
+
# Select specific channel if requested
|
|
103
|
+
if channel is not None:
|
|
104
|
+
if isinstance(channel, int):
|
|
105
|
+
if channel < 0 or channel >= data.shape[0]:
|
|
106
|
+
raise LoaderError(
|
|
107
|
+
f"Channel index {channel} out of range",
|
|
108
|
+
file_path=str(path),
|
|
109
|
+
details=f"Available channels: 0-{data.shape[0] - 1}",
|
|
110
|
+
)
|
|
111
|
+
channel_data = data[channel]
|
|
112
|
+
channel_name = channels[channel] if channel < len(channels) else f"D{channel}"
|
|
113
|
+
elif isinstance(channel, str):
|
|
114
|
+
if channel in channels:
|
|
115
|
+
idx = channels.index(channel)
|
|
116
|
+
channel_data = data[idx]
|
|
117
|
+
channel_name = channel
|
|
118
|
+
else:
|
|
119
|
+
raise LoaderError(
|
|
120
|
+
f"Channel '{channel}' not found",
|
|
121
|
+
file_path=str(path),
|
|
122
|
+
details=f"Available channels: {channels}",
|
|
123
|
+
)
|
|
124
|
+
else:
|
|
125
|
+
channel_data = data[0] # type: ignore[unreachable]
|
|
126
|
+
channel_name = channels[0] if channels else "D0"
|
|
127
|
+
else:
|
|
128
|
+
# Default to first channel
|
|
129
|
+
channel_data = data[0] if data.ndim > 1 else data
|
|
130
|
+
channel_name = channels[0] if channels else "D0"
|
|
131
|
+
|
|
132
|
+
# Compute edges
|
|
133
|
+
edges = _compute_edges(channel_data, sample_rate)
|
|
134
|
+
|
|
135
|
+
# Build metadata
|
|
136
|
+
trace_metadata = TraceMetadata(
|
|
137
|
+
sample_rate=float(sample_rate),
|
|
138
|
+
source_file=str(path),
|
|
139
|
+
channel_name=channel_name,
|
|
140
|
+
trigger_info=metadata_dict.get("trigger", None),
|
|
141
|
+
)
|
|
142
|
+
|
|
143
|
+
return DigitalTrace(
|
|
144
|
+
data=channel_data,
|
|
145
|
+
metadata=trace_metadata,
|
|
146
|
+
edges=edges,
|
|
147
|
+
)
|
|
148
|
+
|
|
149
|
+
except zipfile.BadZipFile as e:
|
|
150
|
+
raise FormatError(
|
|
151
|
+
"Corrupted sigrok session file",
|
|
152
|
+
file_path=str(path),
|
|
153
|
+
expected="Valid ZIP archive",
|
|
154
|
+
) from e
|
|
155
|
+
except Exception as e:
|
|
156
|
+
if isinstance(e, LoaderError | FormatError):
|
|
157
|
+
raise
|
|
158
|
+
raise LoaderError(
|
|
159
|
+
"Failed to load sigrok session",
|
|
160
|
+
file_path=str(path),
|
|
161
|
+
details=str(e),
|
|
162
|
+
fix_hint="Ensure the file is a valid sigrok session (.sr) file.",
|
|
163
|
+
) from e
|
|
164
|
+
|
|
165
|
+
|
|
166
|
+
def _parse_metadata(zf: zipfile.ZipFile, path: Path) -> dict[str, Any]:
|
|
167
|
+
"""Parse sigrok session metadata.
|
|
168
|
+
|
|
169
|
+
Args:
|
|
170
|
+
zf: Open ZipFile object.
|
|
171
|
+
path: Path to the session file (for error messages).
|
|
172
|
+
|
|
173
|
+
Returns:
|
|
174
|
+
Dictionary of metadata values.
|
|
175
|
+
"""
|
|
176
|
+
metadata: dict[str, Any] = {}
|
|
177
|
+
|
|
178
|
+
# Try to read metadata file (JSON format in newer versions)
|
|
179
|
+
if "metadata" in zf.namelist():
|
|
180
|
+
try:
|
|
181
|
+
with zf.open("metadata") as f:
|
|
182
|
+
content = f.read().decode("utf-8")
|
|
183
|
+
# Parse key=value format (sigrok classic format)
|
|
184
|
+
for line in content.strip().split("\n"):
|
|
185
|
+
line = line.strip()
|
|
186
|
+
if "=" in line:
|
|
187
|
+
key, value = line.split("=", 1)
|
|
188
|
+
key = key.strip()
|
|
189
|
+
value = value.strip()
|
|
190
|
+
# Try to convert numeric values
|
|
191
|
+
try:
|
|
192
|
+
if "." in value:
|
|
193
|
+
metadata[key] = float(value)
|
|
194
|
+
else:
|
|
195
|
+
metadata[key] = int(value)
|
|
196
|
+
except ValueError:
|
|
197
|
+
metadata[key] = value
|
|
198
|
+
except Exception:
|
|
199
|
+
pass # Use defaults if metadata parsing fails
|
|
200
|
+
|
|
201
|
+
# Extract channel names from probe entries
|
|
202
|
+
channels: list[str] = []
|
|
203
|
+
for key, value in metadata.items():
|
|
204
|
+
if key.startswith("probe"):
|
|
205
|
+
try:
|
|
206
|
+
idx = int(key.replace("probe", ""))
|
|
207
|
+
while len(channels) <= idx:
|
|
208
|
+
channels.append(f"D{len(channels)}")
|
|
209
|
+
channels[idx] = value
|
|
210
|
+
except ValueError:
|
|
211
|
+
pass
|
|
212
|
+
|
|
213
|
+
if channels:
|
|
214
|
+
metadata["channels"] = channels
|
|
215
|
+
|
|
216
|
+
return metadata
|
|
217
|
+
|
|
218
|
+
|
|
219
|
+
def _read_logic_data(
|
|
220
|
+
zf: zipfile.ZipFile,
|
|
221
|
+
logic_files: list[str],
|
|
222
|
+
total_channels: int,
|
|
223
|
+
) -> NDArray[np.bool_]:
|
|
224
|
+
"""Read and decode logic data from sigrok session.
|
|
225
|
+
|
|
226
|
+
Args:
|
|
227
|
+
zf: Open ZipFile object.
|
|
228
|
+
logic_files: List of logic data file names.
|
|
229
|
+
total_channels: Total number of digital channels.
|
|
230
|
+
|
|
231
|
+
Returns:
|
|
232
|
+
Boolean array of shape (channels, samples).
|
|
233
|
+
"""
|
|
234
|
+
# Sort logic files to ensure correct order
|
|
235
|
+
logic_files = sorted(logic_files)
|
|
236
|
+
|
|
237
|
+
# Determine bytes per sample based on channel count
|
|
238
|
+
bytes_per_sample = (total_channels + 7) // 8
|
|
239
|
+
|
|
240
|
+
# Read all logic data
|
|
241
|
+
all_data = []
|
|
242
|
+
for logic_file in logic_files:
|
|
243
|
+
with zf.open(logic_file) as f:
|
|
244
|
+
raw_data = f.read()
|
|
245
|
+
all_data.append(raw_data)
|
|
246
|
+
|
|
247
|
+
# Combine data
|
|
248
|
+
combined = b"".join(all_data)
|
|
249
|
+
|
|
250
|
+
# Convert to numpy array
|
|
251
|
+
if bytes_per_sample == 1:
|
|
252
|
+
raw = np.frombuffer(combined, dtype=np.uint8)
|
|
253
|
+
elif bytes_per_sample == 2:
|
|
254
|
+
raw = np.frombuffer(combined, dtype=np.uint16)
|
|
255
|
+
elif bytes_per_sample <= 4:
|
|
256
|
+
# Pad to 4 bytes and read as uint32
|
|
257
|
+
padded = combined + b"\x00" * (len(combined) % 4)
|
|
258
|
+
raw = np.frombuffer(padded, dtype=np.uint32)
|
|
259
|
+
else:
|
|
260
|
+
# Handle larger sample widths
|
|
261
|
+
raw = np.frombuffer(combined, dtype=np.uint8)
|
|
262
|
+
|
|
263
|
+
# Extract individual channel bits
|
|
264
|
+
n_samples = len(raw)
|
|
265
|
+
channels_data = np.zeros((total_channels, n_samples), dtype=np.bool_)
|
|
266
|
+
|
|
267
|
+
for ch in range(total_channels):
|
|
268
|
+
if bytes_per_sample <= 4:
|
|
269
|
+
channels_data[ch] = (raw >> ch) & 1
|
|
270
|
+
else:
|
|
271
|
+
# For larger widths, calculate byte and bit position
|
|
272
|
+
byte_idx = ch // 8
|
|
273
|
+
bit_idx = ch % 8
|
|
274
|
+
byte_data = raw[byte_idx::bytes_per_sample]
|
|
275
|
+
channels_data[ch, : len(byte_data)] = (byte_data >> bit_idx) & 1
|
|
276
|
+
|
|
277
|
+
return channels_data
|
|
278
|
+
|
|
279
|
+
|
|
280
|
+
def _compute_edges(
|
|
281
|
+
data: NDArray[np.bool_],
|
|
282
|
+
sample_rate: float,
|
|
283
|
+
) -> list[tuple[float, bool]]:
|
|
284
|
+
"""Compute edge timestamps from digital data.
|
|
285
|
+
|
|
286
|
+
Args:
|
|
287
|
+
data: Boolean array of digital samples.
|
|
288
|
+
sample_rate: Sample rate in Hz.
|
|
289
|
+
|
|
290
|
+
Returns:
|
|
291
|
+
List of (timestamp, is_rising) tuples.
|
|
292
|
+
"""
|
|
293
|
+
edges: list[tuple[float, bool]] = []
|
|
294
|
+
|
|
295
|
+
if len(data) < 2:
|
|
296
|
+
return edges
|
|
297
|
+
|
|
298
|
+
# Find transitions
|
|
299
|
+
diff = np.diff(data.astype(np.int8))
|
|
300
|
+
rising_indices = np.where(diff == 1)[0]
|
|
301
|
+
falling_indices = np.where(diff == -1)[0]
|
|
302
|
+
|
|
303
|
+
time_per_sample = 1.0 / sample_rate
|
|
304
|
+
|
|
305
|
+
# Add rising edges
|
|
306
|
+
for idx in rising_indices:
|
|
307
|
+
timestamp = (idx + 1) * time_per_sample
|
|
308
|
+
edges.append((timestamp, True))
|
|
309
|
+
|
|
310
|
+
# Add falling edges
|
|
311
|
+
for idx in falling_indices:
|
|
312
|
+
timestamp = (idx + 1) * time_per_sample
|
|
313
|
+
edges.append((timestamp, False))
|
|
314
|
+
|
|
315
|
+
# Sort by timestamp
|
|
316
|
+
edges.sort(key=lambda x: x[0])
|
|
317
|
+
|
|
318
|
+
return edges
|
|
319
|
+
|
|
320
|
+
|
|
321
|
+
__all__ = ["load_sigrok"]
|
oscura/loaders/tdms.py
ADDED
|
@@ -0,0 +1,367 @@
|
|
|
1
|
+
"""NI TDMS (Technical Data Management Streaming) file loader.
|
|
2
|
+
|
|
3
|
+
This module provides loading of NI LabVIEW TDMS files using the
|
|
4
|
+
npTDMS library when available.
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
Example:
|
|
8
|
+
>>> from oscura.loaders.tdms import load_tdms
|
|
9
|
+
>>> trace = load_tdms("measurement.tdms")
|
|
10
|
+
>>> print(f"Sample rate: {trace.metadata.sample_rate} Hz")
|
|
11
|
+
"""
|
|
12
|
+
|
|
13
|
+
from __future__ import annotations
|
|
14
|
+
|
|
15
|
+
from pathlib import Path
|
|
16
|
+
from typing import TYPE_CHECKING, Any
|
|
17
|
+
|
|
18
|
+
import numpy as np
|
|
19
|
+
|
|
20
|
+
from oscura.core.exceptions import FormatError, LoaderError
|
|
21
|
+
from oscura.core.types import TraceMetadata, WaveformTrace
|
|
22
|
+
|
|
23
|
+
if TYPE_CHECKING:
|
|
24
|
+
from os import PathLike
|
|
25
|
+
|
|
26
|
+
# Try to import npTDMS for TDMS support
|
|
27
|
+
try:
|
|
28
|
+
from nptdms import TdmsFile
|
|
29
|
+
|
|
30
|
+
NPTDMS_AVAILABLE = True
|
|
31
|
+
except ImportError:
|
|
32
|
+
NPTDMS_AVAILABLE = False
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
def load_tdms(
|
|
36
|
+
path: str | PathLike[str],
|
|
37
|
+
*,
|
|
38
|
+
channel: str | int | None = None,
|
|
39
|
+
group: str | None = None,
|
|
40
|
+
) -> WaveformTrace:
|
|
41
|
+
"""Load an NI TDMS file.
|
|
42
|
+
|
|
43
|
+
TDMS files contain hierarchical data with groups and channels.
|
|
44
|
+
Each channel can have associated properties including sample rate.
|
|
45
|
+
|
|
46
|
+
Args:
|
|
47
|
+
path: Path to the TDMS file.
|
|
48
|
+
channel: Channel name or index to load. If None, loads the
|
|
49
|
+
first channel found.
|
|
50
|
+
group: Group name to select from. If None, uses the first group.
|
|
51
|
+
|
|
52
|
+
Returns:
|
|
53
|
+
WaveformTrace containing the channel data and metadata.
|
|
54
|
+
|
|
55
|
+
Raises:
|
|
56
|
+
Exception: If the file cannot be read or parsed.
|
|
57
|
+
LoaderError: If the file cannot be loaded or npTDMS is not installed.
|
|
58
|
+
|
|
59
|
+
Example:
|
|
60
|
+
>>> trace = load_tdms("measurement.tdms", group="Voltage", channel="CH1")
|
|
61
|
+
>>> print(f"Sample rate: {trace.metadata.sample_rate} Hz")
|
|
62
|
+
>>> print(f"Duration: {trace.duration:.6f} seconds")
|
|
63
|
+
|
|
64
|
+
References:
|
|
65
|
+
NI TDMS File Format: https://www.ni.com/en-us/support/documentation/
|
|
66
|
+
"""
|
|
67
|
+
path = Path(path)
|
|
68
|
+
|
|
69
|
+
if not path.exists():
|
|
70
|
+
raise LoaderError(
|
|
71
|
+
"File not found",
|
|
72
|
+
file_path=str(path),
|
|
73
|
+
)
|
|
74
|
+
|
|
75
|
+
if not NPTDMS_AVAILABLE:
|
|
76
|
+
raise LoaderError(
|
|
77
|
+
"npTDMS library required for TDMS files",
|
|
78
|
+
file_path=str(path),
|
|
79
|
+
fix_hint="Install npTDMS: pip install npTDMS",
|
|
80
|
+
)
|
|
81
|
+
|
|
82
|
+
try:
|
|
83
|
+
return _load_with_nptdms(path, channel=channel, group=group)
|
|
84
|
+
except Exception as e:
|
|
85
|
+
if isinstance(e, LoaderError | FormatError):
|
|
86
|
+
raise
|
|
87
|
+
raise LoaderError(
|
|
88
|
+
"Failed to load TDMS file",
|
|
89
|
+
file_path=str(path),
|
|
90
|
+
details=str(e),
|
|
91
|
+
fix_hint="Ensure the file is a valid NI TDMS format.",
|
|
92
|
+
) from e
|
|
93
|
+
|
|
94
|
+
|
|
95
|
+
def _load_with_nptdms(
|
|
96
|
+
path: Path,
|
|
97
|
+
*,
|
|
98
|
+
channel: str | int | None = None,
|
|
99
|
+
group: str | None = None,
|
|
100
|
+
) -> WaveformTrace:
|
|
101
|
+
"""Load TDMS using npTDMS library.
|
|
102
|
+
|
|
103
|
+
Args:
|
|
104
|
+
path: Path to the TDMS file.
|
|
105
|
+
channel: Channel name or index.
|
|
106
|
+
group: Group name to select.
|
|
107
|
+
|
|
108
|
+
Returns:
|
|
109
|
+
WaveformTrace with channel data and metadata.
|
|
110
|
+
|
|
111
|
+
Raises:
|
|
112
|
+
FormatError: If file is not valid TDMS format or has no data.
|
|
113
|
+
LoaderError: If channel or group not found.
|
|
114
|
+
"""
|
|
115
|
+
try:
|
|
116
|
+
tdms_file = TdmsFile.read(str(path))
|
|
117
|
+
except Exception as e:
|
|
118
|
+
raise FormatError(
|
|
119
|
+
"Failed to parse TDMS file",
|
|
120
|
+
file_path=str(path),
|
|
121
|
+
expected="Valid NI TDMS format",
|
|
122
|
+
) from e
|
|
123
|
+
|
|
124
|
+
# Get available groups
|
|
125
|
+
groups = list(tdms_file.groups())
|
|
126
|
+
|
|
127
|
+
if not groups:
|
|
128
|
+
raise FormatError(
|
|
129
|
+
"No groups found in TDMS file",
|
|
130
|
+
file_path=str(path),
|
|
131
|
+
)
|
|
132
|
+
|
|
133
|
+
# Select group
|
|
134
|
+
if group is not None:
|
|
135
|
+
target_group = None
|
|
136
|
+
for g in groups:
|
|
137
|
+
if g.name == group:
|
|
138
|
+
target_group = g
|
|
139
|
+
break
|
|
140
|
+
if target_group is None:
|
|
141
|
+
available_groups = [g.name for g in groups]
|
|
142
|
+
raise LoaderError(
|
|
143
|
+
f"Group '{group}' not found",
|
|
144
|
+
file_path=str(path),
|
|
145
|
+
details=f"Available groups: {available_groups}",
|
|
146
|
+
)
|
|
147
|
+
else:
|
|
148
|
+
target_group = groups[0]
|
|
149
|
+
|
|
150
|
+
# Get channels in group
|
|
151
|
+
channels = list(target_group.channels())
|
|
152
|
+
|
|
153
|
+
if not channels:
|
|
154
|
+
raise FormatError(
|
|
155
|
+
f"No channels found in group '{target_group.name}'",
|
|
156
|
+
file_path=str(path),
|
|
157
|
+
)
|
|
158
|
+
|
|
159
|
+
# Select channel
|
|
160
|
+
if channel is not None:
|
|
161
|
+
if isinstance(channel, int):
|
|
162
|
+
if channel < 0 or channel >= len(channels):
|
|
163
|
+
raise LoaderError(
|
|
164
|
+
f"Channel index {channel} out of range",
|
|
165
|
+
file_path=str(path),
|
|
166
|
+
details=f"Available channels: 0-{len(channels) - 1}",
|
|
167
|
+
)
|
|
168
|
+
target_channel = channels[channel]
|
|
169
|
+
elif isinstance(channel, str):
|
|
170
|
+
target_channel = None
|
|
171
|
+
for ch in channels:
|
|
172
|
+
if ch.name == channel:
|
|
173
|
+
target_channel = ch
|
|
174
|
+
break
|
|
175
|
+
if target_channel is None:
|
|
176
|
+
available_channels = [ch.name for ch in channels]
|
|
177
|
+
raise LoaderError(
|
|
178
|
+
f"Channel '{channel}' not found",
|
|
179
|
+
file_path=str(path),
|
|
180
|
+
details=f"Available channels: {available_channels}",
|
|
181
|
+
)
|
|
182
|
+
else:
|
|
183
|
+
target_channel = channels[0] # type: ignore[unreachable]
|
|
184
|
+
else:
|
|
185
|
+
target_channel = channels[0]
|
|
186
|
+
|
|
187
|
+
# Get channel data
|
|
188
|
+
data = target_channel.data
|
|
189
|
+
if data is None or len(data) == 0:
|
|
190
|
+
raise FormatError(
|
|
191
|
+
f"Channel '{target_channel.name}' has no data",
|
|
192
|
+
file_path=str(path),
|
|
193
|
+
)
|
|
194
|
+
|
|
195
|
+
# Convert to float64
|
|
196
|
+
data = np.asarray(data, dtype=np.float64)
|
|
197
|
+
|
|
198
|
+
# Extract sample rate from properties
|
|
199
|
+
sample_rate = _get_sample_rate(target_channel, target_group, tdms_file)
|
|
200
|
+
|
|
201
|
+
# Extract other metadata
|
|
202
|
+
vertical_scale = target_channel.properties.get("NI_Scale[0]_Linear_Slope")
|
|
203
|
+
vertical_offset = target_channel.properties.get("NI_Scale[0]_Linear_Y_Intercept")
|
|
204
|
+
|
|
205
|
+
# Get units if available
|
|
206
|
+
target_channel.properties.get("unit_string", None)
|
|
207
|
+
|
|
208
|
+
# Build metadata
|
|
209
|
+
metadata = TraceMetadata(
|
|
210
|
+
sample_rate=sample_rate,
|
|
211
|
+
vertical_scale=float(vertical_scale) if vertical_scale is not None else None,
|
|
212
|
+
vertical_offset=float(vertical_offset) if vertical_offset is not None else None,
|
|
213
|
+
source_file=str(path),
|
|
214
|
+
channel_name=target_channel.name,
|
|
215
|
+
trigger_info=_extract_tdms_properties(target_channel),
|
|
216
|
+
)
|
|
217
|
+
|
|
218
|
+
return WaveformTrace(data=data, metadata=metadata)
|
|
219
|
+
|
|
220
|
+
|
|
221
|
+
def _get_sample_rate(
|
|
222
|
+
channel: Any,
|
|
223
|
+
group: Any,
|
|
224
|
+
tdms_file: Any,
|
|
225
|
+
) -> float:
|
|
226
|
+
"""Extract sample rate from TDMS channel properties.
|
|
227
|
+
|
|
228
|
+
Checks multiple common property names used by different NI software.
|
|
229
|
+
|
|
230
|
+
Args:
|
|
231
|
+
channel: TDMS channel object.
|
|
232
|
+
group: TDMS group object.
|
|
233
|
+
tdms_file: TDMS file object.
|
|
234
|
+
|
|
235
|
+
Returns:
|
|
236
|
+
Sample rate in Hz.
|
|
237
|
+
"""
|
|
238
|
+
# Common property names for sample rate
|
|
239
|
+
sample_rate_keys = [
|
|
240
|
+
"wf_samples", # DAQmx
|
|
241
|
+
"wf_increment", # Waveform dt (inverse of sample rate)
|
|
242
|
+
"NI_RF_IQ_Rate", # RF signal analyzer
|
|
243
|
+
"SamplingFrequency", # SignalExpress
|
|
244
|
+
"dt", # Delta time
|
|
245
|
+
"Fs", # Sample rate
|
|
246
|
+
"SampleRate",
|
|
247
|
+
"sample_rate",
|
|
248
|
+
]
|
|
249
|
+
|
|
250
|
+
# Check channel properties
|
|
251
|
+
for key in sample_rate_keys:
|
|
252
|
+
value = channel.properties.get(key)
|
|
253
|
+
if value is not None:
|
|
254
|
+
if key in ("wf_increment", "dt"):
|
|
255
|
+
# These are time intervals, invert for sample rate
|
|
256
|
+
if value > 0:
|
|
257
|
+
return 1.0 / float(value)
|
|
258
|
+
else:
|
|
259
|
+
return float(value)
|
|
260
|
+
|
|
261
|
+
# Check group properties
|
|
262
|
+
for key in sample_rate_keys:
|
|
263
|
+
value = group.properties.get(key)
|
|
264
|
+
if value is not None:
|
|
265
|
+
if key in ("wf_increment", "dt"):
|
|
266
|
+
if value > 0:
|
|
267
|
+
return 1.0 / float(value)
|
|
268
|
+
else:
|
|
269
|
+
return float(value)
|
|
270
|
+
|
|
271
|
+
# Check file properties
|
|
272
|
+
for key in sample_rate_keys:
|
|
273
|
+
value = tdms_file.properties.get(key)
|
|
274
|
+
if value is not None:
|
|
275
|
+
if key in ("wf_increment", "dt"):
|
|
276
|
+
if value > 0:
|
|
277
|
+
return 1.0 / float(value)
|
|
278
|
+
else:
|
|
279
|
+
return float(value)
|
|
280
|
+
|
|
281
|
+
# Default sample rate if not found
|
|
282
|
+
return 1.0e6 # 1 MHz default
|
|
283
|
+
|
|
284
|
+
|
|
285
|
+
def _extract_tdms_properties(channel: Any) -> dict[str, Any] | None:
|
|
286
|
+
"""Extract relevant properties from TDMS channel.
|
|
287
|
+
|
|
288
|
+
Args:
|
|
289
|
+
channel: TDMS channel object.
|
|
290
|
+
|
|
291
|
+
Returns:
|
|
292
|
+
Dictionary of properties, or None if no useful properties found.
|
|
293
|
+
"""
|
|
294
|
+
props: dict[str, Any] = {}
|
|
295
|
+
|
|
296
|
+
# Common useful properties
|
|
297
|
+
useful_keys = [
|
|
298
|
+
"unit_string",
|
|
299
|
+
"NI_ChannelName",
|
|
300
|
+
"wf_start_time",
|
|
301
|
+
"wf_start_offset",
|
|
302
|
+
"description",
|
|
303
|
+
"NI_Scale[0]_Linear_Slope",
|
|
304
|
+
"NI_Scale[0]_Linear_Y_Intercept",
|
|
305
|
+
]
|
|
306
|
+
|
|
307
|
+
for key in useful_keys:
|
|
308
|
+
value = channel.properties.get(key)
|
|
309
|
+
if value is not None:
|
|
310
|
+
props[key] = value
|
|
311
|
+
|
|
312
|
+
return props if props else None
|
|
313
|
+
|
|
314
|
+
|
|
315
|
+
def list_tdms_channels(
|
|
316
|
+
path: str | PathLike[str],
|
|
317
|
+
) -> dict[str, list[str]]:
|
|
318
|
+
"""List all groups and channels in a TDMS file.
|
|
319
|
+
|
|
320
|
+
Args:
|
|
321
|
+
path: Path to the TDMS file.
|
|
322
|
+
|
|
323
|
+
Returns:
|
|
324
|
+
Dictionary mapping group names to lists of channel names.
|
|
325
|
+
|
|
326
|
+
Raises:
|
|
327
|
+
LoaderError: If the file cannot be loaded.
|
|
328
|
+
|
|
329
|
+
Example:
|
|
330
|
+
>>> channels = list_tdms_channels("measurement.tdms")
|
|
331
|
+
>>> for group, chans in channels.items():
|
|
332
|
+
... print(f"Group '{group}': {chans}")
|
|
333
|
+
"""
|
|
334
|
+
path = Path(path)
|
|
335
|
+
|
|
336
|
+
if not path.exists():
|
|
337
|
+
raise LoaderError(
|
|
338
|
+
"File not found",
|
|
339
|
+
file_path=str(path),
|
|
340
|
+
)
|
|
341
|
+
|
|
342
|
+
if not NPTDMS_AVAILABLE:
|
|
343
|
+
raise LoaderError(
|
|
344
|
+
"npTDMS library required for TDMS files",
|
|
345
|
+
file_path=str(path),
|
|
346
|
+
fix_hint="Install npTDMS: pip install npTDMS",
|
|
347
|
+
)
|
|
348
|
+
|
|
349
|
+
try:
|
|
350
|
+
tdms_file = TdmsFile.read(str(path))
|
|
351
|
+
result: dict[str, list[str]] = {}
|
|
352
|
+
|
|
353
|
+
for group in tdms_file.groups():
|
|
354
|
+
channel_names = [ch.name for ch in group.channels()]
|
|
355
|
+
result[group.name] = channel_names
|
|
356
|
+
|
|
357
|
+
return result
|
|
358
|
+
|
|
359
|
+
except Exception as e:
|
|
360
|
+
raise LoaderError(
|
|
361
|
+
"Failed to read TDMS file",
|
|
362
|
+
file_path=str(path),
|
|
363
|
+
details=str(e),
|
|
364
|
+
) from e
|
|
365
|
+
|
|
366
|
+
|
|
367
|
+
__all__ = ["list_tdms_channels", "load_tdms"]
|