zea 0.0.5__py3-none-any.whl → 0.0.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (68) hide show
  1. zea/__init__.py +54 -19
  2. zea/agent/__init__.py +12 -12
  3. zea/agent/masks.py +2 -1
  4. zea/agent/selection.py +166 -0
  5. zea/backend/__init__.py +89 -0
  6. zea/backend/jax/__init__.py +14 -51
  7. zea/backend/tensorflow/__init__.py +0 -49
  8. zea/backend/tensorflow/dataloader.py +2 -1
  9. zea/backend/torch/__init__.py +27 -62
  10. zea/beamform/beamformer.py +100 -50
  11. zea/beamform/lens_correction.py +9 -2
  12. zea/beamform/pfield.py +9 -2
  13. zea/config.py +34 -25
  14. zea/data/__init__.py +22 -16
  15. zea/data/convert/camus.py +2 -1
  16. zea/data/convert/echonet.py +4 -4
  17. zea/data/convert/echonetlvh/convert_raw_to_usbmd.py +1 -1
  18. zea/data/convert/matlab.py +11 -4
  19. zea/data/data_format.py +31 -30
  20. zea/data/datasets.py +7 -5
  21. zea/data/file.py +104 -2
  22. zea/data/layers.py +5 -6
  23. zea/datapaths.py +16 -4
  24. zea/display.py +7 -5
  25. zea/interface.py +14 -16
  26. zea/internal/_generate_keras_ops.py +6 -7
  27. zea/internal/cache.py +2 -49
  28. zea/internal/config/validation.py +1 -2
  29. zea/internal/core.py +69 -6
  30. zea/internal/device.py +6 -2
  31. zea/internal/dummy_scan.py +330 -0
  32. zea/internal/operators.py +114 -2
  33. zea/internal/parameters.py +101 -70
  34. zea/internal/registry.py +1 -1
  35. zea/internal/setup_zea.py +5 -6
  36. zea/internal/utils.py +282 -0
  37. zea/io_lib.py +247 -19
  38. zea/keras_ops.py +74 -4
  39. zea/log.py +9 -7
  40. zea/metrics.py +365 -65
  41. zea/models/__init__.py +30 -20
  42. zea/models/base.py +30 -14
  43. zea/models/carotid_segmenter.py +19 -4
  44. zea/models/diffusion.py +187 -26
  45. zea/models/echonet.py +22 -8
  46. zea/models/echonetlvh.py +31 -18
  47. zea/models/lpips.py +19 -2
  48. zea/models/lv_segmentation.py +96 -0
  49. zea/models/preset_utils.py +5 -5
  50. zea/models/presets.py +36 -0
  51. zea/models/regional_quality.py +142 -0
  52. zea/models/taesd.py +21 -5
  53. zea/models/unet.py +15 -1
  54. zea/ops.py +414 -207
  55. zea/probes.py +6 -6
  56. zea/scan.py +109 -49
  57. zea/simulator.py +24 -21
  58. zea/tensor_ops.py +411 -206
  59. zea/tools/hf.py +1 -1
  60. zea/tools/selection_tool.py +47 -86
  61. zea/utils.py +92 -480
  62. zea/visualize.py +177 -39
  63. {zea-0.0.5.dist-info → zea-0.0.7.dist-info}/METADATA +9 -3
  64. zea-0.0.7.dist-info/RECORD +114 -0
  65. {zea-0.0.5.dist-info → zea-0.0.7.dist-info}/WHEEL +1 -1
  66. zea-0.0.5.dist-info/RECORD +0 -110
  67. {zea-0.0.5.dist-info → zea-0.0.7.dist-info}/entry_points.txt +0 -0
  68. {zea-0.0.5.dist-info → zea-0.0.7.dist-info/licenses}/LICENSE +0 -0
@@ -0,0 +1,142 @@
1
+ """
2
+ MobileNetv2 based image quality model for myocardial regions in apical views.
3
+
4
+ To try this model, simply load one of the available presets:
5
+
6
+ .. doctest::
7
+
8
+ >>> from zea.models.regional_quality import MobileNetv2RegionalQuality
9
+
10
+ >>> model = MobileNetv2RegionalQuality.from_preset("mobilenetv2_regional_quality")
11
+
12
+ The model predicts the regional image quality of
13
+ the myocardial regions in apical views. It can also be used to get the overall image quality by averaging the
14
+ regional scores.
15
+
16
+ At the time of writing (17 September 2025) and to the best of our knowledge,
17
+ it is the state-of-the-art model for left ventricle segmentation on the CAMUS dataset.
18
+
19
+ .. important::
20
+ This is a ``zea`` implementation of the model.
21
+ For the original paper and code, see `here <https://github.com/GillesVanDeVyver/arqee>`_.
22
+
23
+ Van De Vyver, et al. "Regional Image Quality Scoring for 2-D Echocardiography Using Deep Learning."
24
+ *Ultrasound in Medicine & Biology 51.4 (2025): 638-649*
25
+
26
+ .. seealso::
27
+ A tutorial notebook where this model is used:
28
+ :doc:`../notebooks/metrics/myocardial_quality_example`.
29
+
30
+ .. note::
31
+ The model is originally a PyTorch model converted to ONNX. To use this model, you must have `onnxruntime` installed. This is required for ONNX model inference.
32
+
33
+ You can install it using pip:
34
+
35
+ .. code-block:: bash
36
+
37
+ pip install onnxruntime
38
+
39
+ """ # noqa: E501
40
+
41
+ import numpy as np
42
+ from keras import ops
43
+
44
+ from zea.internal.registry import model_registry
45
+ from zea.models.base import BaseModel
46
+ from zea.models.preset_utils import get_preset_loader, register_presets
47
+ from zea.models.presets import regional_quality_presets
48
+
49
+ # Visualization colors and helper for regional quality (arqee-inspired)
50
+ QUALITY_COLORS = np.array(
51
+ [
52
+ [0.929, 0.106, 0.141], # not visible, red
53
+ [0.957, 0.396, 0.137], # poor, orange
54
+ [1, 0.984, 0.090], # ok, yellow
55
+ [0.553, 0.776, 0.098], # good, light green
56
+ [0.09, 0.407, 0.216], # excellent, dark green
57
+ ]
58
+ )
59
+ REGION_LABELS = [
60
+ "basal_left",
61
+ "mid_left",
62
+ "apical_left",
63
+ "apical_right",
64
+ "mid_right",
65
+ "basal_right",
66
+ "annulus_left",
67
+ "annulus_right",
68
+ ]
69
+ QUALITY_CLASSES = ["not visible", "poor", "ok", "good", "excellent"]
70
+
71
+
72
+ @model_registry(name="mobilenetv2_regional_quality")
73
+ class MobileNetv2RegionalQuality(BaseModel):
74
+ """
75
+ MobileNetV2 based regional image quality scoring model for myocardial regions in apical views.
76
+
77
+ This class loads an ONNX model and provides inference for regional image quality scoring tasks.
78
+ """
79
+
80
+ def preprocess_input(self, inputs):
81
+ """
82
+ Normalize input image(s) to [0, 255] range.
83
+
84
+ Args:
85
+ inputs (np.ndarray): Input image(s), any numeric range.
86
+
87
+ Returns:
88
+ np.ndarray: Normalized image(s) in [0, 255] range.
89
+ """
90
+ inputs = ops.convert_to_numpy(inputs).astype("float32")
91
+ max_val = np.max(inputs)
92
+ min_val = np.min(inputs)
93
+ denom = max_val - min_val
94
+ if denom > 0.0:
95
+ inputs = (inputs - min_val) / denom * 255.0
96
+ else:
97
+ inputs = np.zeros_like(inputs, dtype=np.float32)
98
+ return inputs
99
+
100
+ def call(self, inputs):
101
+ """
102
+ Predict regional image quality scores for input image(s).
103
+
104
+ Args:
105
+ inputs (np.ndarray): Input image or batch of images.
106
+ Shape: [batch, 1, 256, 256]
107
+
108
+ Returns:
109
+ np.ndarray: Regional quality scores.
110
+ Shape is [batch, 8] with regions in order:
111
+ basal_left, mid_left, apical_left, apical_right,
112
+ mid_right, basal_right, annulus_left, annulus_right
113
+ """
114
+ if not hasattr(self, "onnx_sess"):
115
+ raise ValueError("Model weights not loaded. Please call custom_load_weights() first.")
116
+ input_name = self.onnx_sess.get_inputs()[0].name
117
+ output_name = self.onnx_sess.get_outputs()[0].name
118
+ inputs = self.preprocess_input(inputs)
119
+
120
+ output = self.onnx_sess.run([output_name], {input_name: inputs})[0]
121
+ slope = self.slope_intercept[0]
122
+ intercept = self.slope_intercept[1]
123
+ output_debiased = (output - intercept) / slope
124
+ return output_debiased
125
+
126
+ def custom_load_weights(self, preset, **kwargs):
127
+ """Load ONNX model weights and bias correction for regional image quality scoring."""
128
+ try:
129
+ import onnxruntime
130
+ except ImportError:
131
+ raise ImportError(
132
+ "onnxruntime is not installed. Please run "
133
+ "`pip install onnxruntime` to use this model."
134
+ )
135
+ loader = get_preset_loader(preset)
136
+ filename = loader.get_file("model.onnx")
137
+ self.onnx_sess = onnxruntime.InferenceSession(filename)
138
+ filename = loader.get_file("slope_intercept_bias_correction.npy")
139
+ self.slope_intercept = np.load(filename)
140
+
141
+
142
+ register_presets(regional_quality_presets, MobileNetv2RegionalQuality)
zea/models/taesd.py CHANGED
@@ -1,9 +1,19 @@
1
- """Tiny Autoencoder (TAESD) model converted to Tensorflow.
1
+ """
2
+ Tiny Autoencoder (TAESD) model.
3
+
4
+ .. doctest::
5
+
6
+ >>> from zea.models.taesd import TinyAutoencoder
7
+
8
+ >>> model = TinyAutoencoder.from_preset("taesdxl") # doctest: +SKIP
2
9
 
3
- For the original implementation, see the `TAESD repository <https://github.com/madebyollin/taesd>`_.
10
+ .. important::
11
+ This is a ``zea`` implementation of the model.
12
+ For the original code, see `here <https://github.com/madebyollin/taesd>`_.
4
13
 
5
- You can see an example of how to use this model in the example notebook:
6
- :doc:`../notebooks/models/taesd_autoencoder_example`.
14
+ .. seealso::
15
+ A tutorial notebook where this model is used:
16
+ :doc:`../notebooks/models/taesd_autoencoder_example`.
7
17
 
8
18
  """
9
19
 
@@ -23,7 +33,13 @@ tf = _import_tf()
23
33
 
24
34
  @model_registry(name="taesdxl")
25
35
  class TinyAutoencoder(BaseModel):
26
- """[TAESD](https://github.com/madebyollin/taesd) model in TensorFlow."""
36
+ """Tiny Autoencoder model.
37
+
38
+ .. note::
39
+
40
+ This model currently only supports TensorFlow and Jax backends.
41
+
42
+ """
27
43
 
28
44
  def __init__(self, **kwargs):
29
45
  """
zea/models/unet.py CHANGED
@@ -1,4 +1,18 @@
1
- """UNet models and architectures"""
1
+ """UNet models and architectures.
2
+
3
+ To try this model, simply load one of the available presets:
4
+
5
+ .. doctest::
6
+
7
+ >>> from zea.models.unet import UNet
8
+
9
+ >>> model = UNet.from_preset("unet-echonet-inpainter")
10
+
11
+ .. seealso::
12
+ A tutorial notebook where this model is used:
13
+ :doc:`../notebooks/models/unet_example`.
14
+
15
+ """
2
16
 
3
17
  import keras
4
18
  from keras import layers