zea 0.0.5__py3-none-any.whl → 0.0.7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- zea/__init__.py +54 -19
- zea/agent/__init__.py +12 -12
- zea/agent/masks.py +2 -1
- zea/agent/selection.py +166 -0
- zea/backend/__init__.py +89 -0
- zea/backend/jax/__init__.py +14 -51
- zea/backend/tensorflow/__init__.py +0 -49
- zea/backend/tensorflow/dataloader.py +2 -1
- zea/backend/torch/__init__.py +27 -62
- zea/beamform/beamformer.py +100 -50
- zea/beamform/lens_correction.py +9 -2
- zea/beamform/pfield.py +9 -2
- zea/config.py +34 -25
- zea/data/__init__.py +22 -16
- zea/data/convert/camus.py +2 -1
- zea/data/convert/echonet.py +4 -4
- zea/data/convert/echonetlvh/convert_raw_to_usbmd.py +1 -1
- zea/data/convert/matlab.py +11 -4
- zea/data/data_format.py +31 -30
- zea/data/datasets.py +7 -5
- zea/data/file.py +104 -2
- zea/data/layers.py +5 -6
- zea/datapaths.py +16 -4
- zea/display.py +7 -5
- zea/interface.py +14 -16
- zea/internal/_generate_keras_ops.py +6 -7
- zea/internal/cache.py +2 -49
- zea/internal/config/validation.py +1 -2
- zea/internal/core.py +69 -6
- zea/internal/device.py +6 -2
- zea/internal/dummy_scan.py +330 -0
- zea/internal/operators.py +114 -2
- zea/internal/parameters.py +101 -70
- zea/internal/registry.py +1 -1
- zea/internal/setup_zea.py +5 -6
- zea/internal/utils.py +282 -0
- zea/io_lib.py +247 -19
- zea/keras_ops.py +74 -4
- zea/log.py +9 -7
- zea/metrics.py +365 -65
- zea/models/__init__.py +30 -20
- zea/models/base.py +30 -14
- zea/models/carotid_segmenter.py +19 -4
- zea/models/diffusion.py +187 -26
- zea/models/echonet.py +22 -8
- zea/models/echonetlvh.py +31 -18
- zea/models/lpips.py +19 -2
- zea/models/lv_segmentation.py +96 -0
- zea/models/preset_utils.py +5 -5
- zea/models/presets.py +36 -0
- zea/models/regional_quality.py +142 -0
- zea/models/taesd.py +21 -5
- zea/models/unet.py +15 -1
- zea/ops.py +414 -207
- zea/probes.py +6 -6
- zea/scan.py +109 -49
- zea/simulator.py +24 -21
- zea/tensor_ops.py +411 -206
- zea/tools/hf.py +1 -1
- zea/tools/selection_tool.py +47 -86
- zea/utils.py +92 -480
- zea/visualize.py +177 -39
- {zea-0.0.5.dist-info → zea-0.0.7.dist-info}/METADATA +9 -3
- zea-0.0.7.dist-info/RECORD +114 -0
- {zea-0.0.5.dist-info → zea-0.0.7.dist-info}/WHEEL +1 -1
- zea-0.0.5.dist-info/RECORD +0 -110
- {zea-0.0.5.dist-info → zea-0.0.7.dist-info}/entry_points.txt +0 -0
- {zea-0.0.5.dist-info → zea-0.0.7.dist-info/licenses}/LICENSE +0 -0
|
@@ -0,0 +1,142 @@
|
|
|
1
|
+
"""
|
|
2
|
+
MobileNetv2 based image quality model for myocardial regions in apical views.
|
|
3
|
+
|
|
4
|
+
To try this model, simply load one of the available presets:
|
|
5
|
+
|
|
6
|
+
.. doctest::
|
|
7
|
+
|
|
8
|
+
>>> from zea.models.regional_quality import MobileNetv2RegionalQuality
|
|
9
|
+
|
|
10
|
+
>>> model = MobileNetv2RegionalQuality.from_preset("mobilenetv2_regional_quality")
|
|
11
|
+
|
|
12
|
+
The model predicts the regional image quality of
|
|
13
|
+
the myocardial regions in apical views. It can also be used to get the overall image quality by averaging the
|
|
14
|
+
regional scores.
|
|
15
|
+
|
|
16
|
+
At the time of writing (17 September 2025) and to the best of our knowledge,
|
|
17
|
+
it is the state-of-the-art model for left ventricle segmentation on the CAMUS dataset.
|
|
18
|
+
|
|
19
|
+
.. important::
|
|
20
|
+
This is a ``zea`` implementation of the model.
|
|
21
|
+
For the original paper and code, see `here <https://github.com/GillesVanDeVyver/arqee>`_.
|
|
22
|
+
|
|
23
|
+
Van De Vyver, et al. "Regional Image Quality Scoring for 2-D Echocardiography Using Deep Learning."
|
|
24
|
+
*Ultrasound in Medicine & Biology 51.4 (2025): 638-649*
|
|
25
|
+
|
|
26
|
+
.. seealso::
|
|
27
|
+
A tutorial notebook where this model is used:
|
|
28
|
+
:doc:`../notebooks/metrics/myocardial_quality_example`.
|
|
29
|
+
|
|
30
|
+
.. note::
|
|
31
|
+
The model is originally a PyTorch model converted to ONNX. To use this model, you must have `onnxruntime` installed. This is required for ONNX model inference.
|
|
32
|
+
|
|
33
|
+
You can install it using pip:
|
|
34
|
+
|
|
35
|
+
.. code-block:: bash
|
|
36
|
+
|
|
37
|
+
pip install onnxruntime
|
|
38
|
+
|
|
39
|
+
""" # noqa: E501
|
|
40
|
+
|
|
41
|
+
import numpy as np
|
|
42
|
+
from keras import ops
|
|
43
|
+
|
|
44
|
+
from zea.internal.registry import model_registry
|
|
45
|
+
from zea.models.base import BaseModel
|
|
46
|
+
from zea.models.preset_utils import get_preset_loader, register_presets
|
|
47
|
+
from zea.models.presets import regional_quality_presets
|
|
48
|
+
|
|
49
|
+
# Visualization colors and helper for regional quality (arqee-inspired)
|
|
50
|
+
QUALITY_COLORS = np.array(
|
|
51
|
+
[
|
|
52
|
+
[0.929, 0.106, 0.141], # not visible, red
|
|
53
|
+
[0.957, 0.396, 0.137], # poor, orange
|
|
54
|
+
[1, 0.984, 0.090], # ok, yellow
|
|
55
|
+
[0.553, 0.776, 0.098], # good, light green
|
|
56
|
+
[0.09, 0.407, 0.216], # excellent, dark green
|
|
57
|
+
]
|
|
58
|
+
)
|
|
59
|
+
REGION_LABELS = [
|
|
60
|
+
"basal_left",
|
|
61
|
+
"mid_left",
|
|
62
|
+
"apical_left",
|
|
63
|
+
"apical_right",
|
|
64
|
+
"mid_right",
|
|
65
|
+
"basal_right",
|
|
66
|
+
"annulus_left",
|
|
67
|
+
"annulus_right",
|
|
68
|
+
]
|
|
69
|
+
QUALITY_CLASSES = ["not visible", "poor", "ok", "good", "excellent"]
|
|
70
|
+
|
|
71
|
+
|
|
72
|
+
@model_registry(name="mobilenetv2_regional_quality")
|
|
73
|
+
class MobileNetv2RegionalQuality(BaseModel):
|
|
74
|
+
"""
|
|
75
|
+
MobileNetV2 based regional image quality scoring model for myocardial regions in apical views.
|
|
76
|
+
|
|
77
|
+
This class loads an ONNX model and provides inference for regional image quality scoring tasks.
|
|
78
|
+
"""
|
|
79
|
+
|
|
80
|
+
def preprocess_input(self, inputs):
|
|
81
|
+
"""
|
|
82
|
+
Normalize input image(s) to [0, 255] range.
|
|
83
|
+
|
|
84
|
+
Args:
|
|
85
|
+
inputs (np.ndarray): Input image(s), any numeric range.
|
|
86
|
+
|
|
87
|
+
Returns:
|
|
88
|
+
np.ndarray: Normalized image(s) in [0, 255] range.
|
|
89
|
+
"""
|
|
90
|
+
inputs = ops.convert_to_numpy(inputs).astype("float32")
|
|
91
|
+
max_val = np.max(inputs)
|
|
92
|
+
min_val = np.min(inputs)
|
|
93
|
+
denom = max_val - min_val
|
|
94
|
+
if denom > 0.0:
|
|
95
|
+
inputs = (inputs - min_val) / denom * 255.0
|
|
96
|
+
else:
|
|
97
|
+
inputs = np.zeros_like(inputs, dtype=np.float32)
|
|
98
|
+
return inputs
|
|
99
|
+
|
|
100
|
+
def call(self, inputs):
|
|
101
|
+
"""
|
|
102
|
+
Predict regional image quality scores for input image(s).
|
|
103
|
+
|
|
104
|
+
Args:
|
|
105
|
+
inputs (np.ndarray): Input image or batch of images.
|
|
106
|
+
Shape: [batch, 1, 256, 256]
|
|
107
|
+
|
|
108
|
+
Returns:
|
|
109
|
+
np.ndarray: Regional quality scores.
|
|
110
|
+
Shape is [batch, 8] with regions in order:
|
|
111
|
+
basal_left, mid_left, apical_left, apical_right,
|
|
112
|
+
mid_right, basal_right, annulus_left, annulus_right
|
|
113
|
+
"""
|
|
114
|
+
if not hasattr(self, "onnx_sess"):
|
|
115
|
+
raise ValueError("Model weights not loaded. Please call custom_load_weights() first.")
|
|
116
|
+
input_name = self.onnx_sess.get_inputs()[0].name
|
|
117
|
+
output_name = self.onnx_sess.get_outputs()[0].name
|
|
118
|
+
inputs = self.preprocess_input(inputs)
|
|
119
|
+
|
|
120
|
+
output = self.onnx_sess.run([output_name], {input_name: inputs})[0]
|
|
121
|
+
slope = self.slope_intercept[0]
|
|
122
|
+
intercept = self.slope_intercept[1]
|
|
123
|
+
output_debiased = (output - intercept) / slope
|
|
124
|
+
return output_debiased
|
|
125
|
+
|
|
126
|
+
def custom_load_weights(self, preset, **kwargs):
|
|
127
|
+
"""Load ONNX model weights and bias correction for regional image quality scoring."""
|
|
128
|
+
try:
|
|
129
|
+
import onnxruntime
|
|
130
|
+
except ImportError:
|
|
131
|
+
raise ImportError(
|
|
132
|
+
"onnxruntime is not installed. Please run "
|
|
133
|
+
"`pip install onnxruntime` to use this model."
|
|
134
|
+
)
|
|
135
|
+
loader = get_preset_loader(preset)
|
|
136
|
+
filename = loader.get_file("model.onnx")
|
|
137
|
+
self.onnx_sess = onnxruntime.InferenceSession(filename)
|
|
138
|
+
filename = loader.get_file("slope_intercept_bias_correction.npy")
|
|
139
|
+
self.slope_intercept = np.load(filename)
|
|
140
|
+
|
|
141
|
+
|
|
142
|
+
register_presets(regional_quality_presets, MobileNetv2RegionalQuality)
|
zea/models/taesd.py
CHANGED
|
@@ -1,9 +1,19 @@
|
|
|
1
|
-
"""
|
|
1
|
+
"""
|
|
2
|
+
Tiny Autoencoder (TAESD) model.
|
|
3
|
+
|
|
4
|
+
.. doctest::
|
|
5
|
+
|
|
6
|
+
>>> from zea.models.taesd import TinyAutoencoder
|
|
7
|
+
|
|
8
|
+
>>> model = TinyAutoencoder.from_preset("taesdxl") # doctest: +SKIP
|
|
2
9
|
|
|
3
|
-
|
|
10
|
+
.. important::
|
|
11
|
+
This is a ``zea`` implementation of the model.
|
|
12
|
+
For the original code, see `here <https://github.com/madebyollin/taesd>`_.
|
|
4
13
|
|
|
5
|
-
|
|
6
|
-
:
|
|
14
|
+
.. seealso::
|
|
15
|
+
A tutorial notebook where this model is used:
|
|
16
|
+
:doc:`../notebooks/models/taesd_autoencoder_example`.
|
|
7
17
|
|
|
8
18
|
"""
|
|
9
19
|
|
|
@@ -23,7 +33,13 @@ tf = _import_tf()
|
|
|
23
33
|
|
|
24
34
|
@model_registry(name="taesdxl")
|
|
25
35
|
class TinyAutoencoder(BaseModel):
|
|
26
|
-
"""
|
|
36
|
+
"""Tiny Autoencoder model.
|
|
37
|
+
|
|
38
|
+
.. note::
|
|
39
|
+
|
|
40
|
+
This model currently only supports TensorFlow and Jax backends.
|
|
41
|
+
|
|
42
|
+
"""
|
|
27
43
|
|
|
28
44
|
def __init__(self, **kwargs):
|
|
29
45
|
"""
|
zea/models/unet.py
CHANGED
|
@@ -1,4 +1,18 @@
|
|
|
1
|
-
"""UNet models and architectures
|
|
1
|
+
"""UNet models and architectures.
|
|
2
|
+
|
|
3
|
+
To try this model, simply load one of the available presets:
|
|
4
|
+
|
|
5
|
+
.. doctest::
|
|
6
|
+
|
|
7
|
+
>>> from zea.models.unet import UNet
|
|
8
|
+
|
|
9
|
+
>>> model = UNet.from_preset("unet-echonet-inpainter")
|
|
10
|
+
|
|
11
|
+
.. seealso::
|
|
12
|
+
A tutorial notebook where this model is used:
|
|
13
|
+
:doc:`../notebooks/models/unet_example`.
|
|
14
|
+
|
|
15
|
+
"""
|
|
2
16
|
|
|
3
17
|
import keras
|
|
4
18
|
from keras import layers
|