zea 0.0.8__py3-none-any.whl → 0.0.9__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (44) hide show
  1. zea/__init__.py +3 -3
  2. zea/agent/masks.py +2 -2
  3. zea/agent/selection.py +3 -3
  4. zea/backend/__init__.py +1 -1
  5. zea/backend/tensorflow/dataloader.py +1 -1
  6. zea/beamform/beamformer.py +4 -2
  7. zea/beamform/pfield.py +2 -2
  8. zea/data/augmentations.py +1 -1
  9. zea/data/convert/__main__.py +93 -52
  10. zea/data/convert/camus.py +8 -2
  11. zea/data/convert/echonet.py +1 -1
  12. zea/data/convert/echonetlvh/__init__.py +1 -1
  13. zea/data/convert/verasonics.py +810 -772
  14. zea/data/data_format.py +0 -2
  15. zea/data/file.py +28 -0
  16. zea/data/preset_utils.py +1 -1
  17. zea/display.py +1 -1
  18. zea/doppler.py +5 -5
  19. zea/func/__init__.py +109 -0
  20. zea/{tensor_ops.py → func/tensor.py} +32 -8
  21. zea/func/ultrasound.py +500 -0
  22. zea/internal/_generate_keras_ops.py +5 -5
  23. zea/metrics.py +6 -5
  24. zea/models/diffusion.py +1 -1
  25. zea/models/echonetlvh.py +1 -1
  26. zea/models/gmm.py +1 -1
  27. zea/ops/__init__.py +188 -0
  28. zea/ops/base.py +442 -0
  29. zea/{keras_ops.py → ops/keras_ops.py} +2 -2
  30. zea/ops/pipeline.py +1472 -0
  31. zea/ops/tensor.py +356 -0
  32. zea/ops/ultrasound.py +890 -0
  33. zea/probes.py +2 -10
  34. zea/scan.py +17 -20
  35. zea/tools/fit_scan_cone.py +1 -1
  36. zea/tools/selection_tool.py +1 -1
  37. zea/tracking/lucas_kanade.py +1 -1
  38. zea/tracking/segmentation.py +1 -1
  39. {zea-0.0.8.dist-info → zea-0.0.9.dist-info}/METADATA +3 -1
  40. {zea-0.0.8.dist-info → zea-0.0.9.dist-info}/RECORD +43 -37
  41. zea/ops.py +0 -3534
  42. {zea-0.0.8.dist-info → zea-0.0.9.dist-info}/WHEEL +0 -0
  43. {zea-0.0.8.dist-info → zea-0.0.9.dist-info}/entry_points.txt +0 -0
  44. {zea-0.0.8.dist-info → zea-0.0.9.dist-info}/licenses/LICENSE +0 -0
zea/data/data_format.py CHANGED
@@ -256,8 +256,6 @@ def _write_datasets(
256
256
  n_tx = _first_not_none_shape([raw_data, aligned_data], axis=1)
257
257
  if n_ax is None:
258
258
  n_ax = _first_not_none_shape([raw_data, aligned_data, beamformed_data], axis=-3)
259
- if n_ax is None:
260
- n_ax = _first_not_none_shape([envelope_data, image, image_sc], axis=-2)
261
259
  if n_el is None:
262
260
  n_el = _first_not_none_shape([raw_data], axis=-2)
263
261
  if n_ch is None:
zea/data/file.py CHANGED
@@ -43,13 +43,16 @@ class File(h5py.File):
43
43
  **kwargs: Additional keyword arguments to pass to h5py.File.
44
44
  """
45
45
 
46
+ # Resolve huggingface path
46
47
  if str(name).startswith(HF_PREFIX):
47
48
  name = _hf_resolve_path(str(name))
48
49
 
50
+ # Disable locking for read mode by default
49
51
  if "locking" not in kwargs and "mode" in kwargs and kwargs["mode"] == "r":
50
52
  # If the file is opened in read mode, disable locking
51
53
  kwargs["locking"] = False
52
54
 
55
+ # Initialize the h5py.File
53
56
  super().__init__(name, *args, **kwargs)
54
57
 
55
58
  @property
@@ -285,6 +288,31 @@ class File(h5py.File):
285
288
  else:
286
289
  log.warning("Could not find scan parameters in file.")
287
290
 
291
+ scan_parameters = self._check_focus_distances(scan_parameters)
292
+
293
+ return scan_parameters
294
+
295
+ def _check_focus_distances(self, scan_parameters):
296
+ if "focus_distances" in scan_parameters:
297
+ focus_distances = scan_parameters["focus_distances"]
298
+ # check if focus distances are in wavelengths
299
+ if np.any(np.logical_and(focus_distances >= 1, focus_distances != np.inf)):
300
+ log.warning(
301
+ f"We have detected that focus distances in '{self.path}' are "
302
+ "(probably) stored wavelengths. Please update your file! "
303
+ "Converting to meters automatically for now."
304
+ )
305
+ assert "sound_speed" in scan_parameters, (
306
+ "Cannot convert focus distances from wavelengths to meters "
307
+ "because sound_speed is not defined in the scan parameters."
308
+ )
309
+ assert "center_frequency" in scan_parameters, (
310
+ "Cannot convert focus distances from wavelengths to meters "
311
+ "because center_frequency is not defined in the scan parameters."
312
+ )
313
+ wavelength = scan_parameters["sound_speed"] / scan_parameters["center_frequency"]
314
+ focus_distances = focus_distances * wavelength
315
+ scan_parameters["focus_distances"] = focus_distances
288
316
  return scan_parameters
289
317
 
290
318
  def get_scan_parameters(self, event=None) -> dict:
zea/data/preset_utils.py CHANGED
@@ -86,7 +86,7 @@ def _download_files_in_path(
86
86
  return downloaded_files
87
87
 
88
88
 
89
- def _hf_resolve_path(hf_path: str, cache_dir=HF_DATASETS_DIR):
89
+ def _hf_resolve_path(hf_path: str, cache_dir=HF_DATASETS_DIR) -> Path:
90
90
  """Resolve a Hugging Face path to a local cache directory path.
91
91
 
92
92
  Downloads files from a HuggingFace dataset repository and returns
zea/display.py CHANGED
@@ -8,7 +8,7 @@ import scipy
8
8
  from keras import ops
9
9
  from PIL import Image
10
10
 
11
- from zea.tensor_ops import translate
11
+ from zea.func.tensor import translate
12
12
  from zea.tools.fit_scan_cone import fit_and_crop_around_scan_cone
13
13
 
14
14
 
zea/doppler.py CHANGED
@@ -3,7 +3,7 @@
3
3
  import numpy as np
4
4
  from keras import ops
5
5
 
6
- from zea import tensor_ops
6
+ from zea.func import tensor
7
7
 
8
8
 
9
9
  def color_doppler(
@@ -61,11 +61,11 @@ def color_doppler(
61
61
  if hamming_size[0] != 1 and hamming_size[1] != 1:
62
62
  h_row = np.hamming(hamming_size[0])
63
63
  h_col = np.hamming(hamming_size[1])
64
- autocorr = tensor_ops.apply_along_axis(
65
- lambda x: tensor_ops.correlate(x, h_row, mode="same"), 0, autocorr
64
+ autocorr = tensor.apply_along_axis(
65
+ lambda x: tensor.correlate(x, h_row, mode="same"), 0, autocorr
66
66
  )
67
- autocorr = tensor_ops.apply_along_axis(
68
- lambda x: tensor_ops.correlate(x, h_col, mode="same"), 1, autocorr
67
+ autocorr = tensor.apply_along_axis(
68
+ lambda x: tensor.correlate(x, h_col, mode="same"), 1, autocorr
69
69
  )
70
70
 
71
71
  # Doppler velocity
zea/func/__init__.py ADDED
@@ -0,0 +1,109 @@
1
+ """Functional API of zea.
2
+
3
+ This module provides a collection of functions for various operations on tensors
4
+ and ultrasound data. These functions can be used standalone, in contrast to the :mod:`zea.ops` module which provides operation classes for building processing pipelines.
5
+ """
6
+
7
+ from .tensor import (
8
+ L1,
9
+ L2,
10
+ add_salt_and_pepper_noise,
11
+ apply_along_axis,
12
+ batch_cov,
13
+ boolean_mask,
14
+ check_patches_fit,
15
+ compute_required_patch_overlap,
16
+ compute_required_patch_shape,
17
+ correlate,
18
+ extend_n_dims,
19
+ find_contour,
20
+ flatten,
21
+ fori_loop,
22
+ func_with_one_batch_dim,
23
+ gaussian_filter,
24
+ images_to_patches,
25
+ interpolate_data,
26
+ is_jax_prng_key,
27
+ is_monotonic,
28
+ linear_sum_assignment,
29
+ map_indices_for_interpolation,
30
+ matrix_power,
31
+ normalize,
32
+ pad_array_to_divisible,
33
+ patches_to_images,
34
+ resample,
35
+ reshape_axis,
36
+ simple_map,
37
+ sinc,
38
+ split_seed,
39
+ split_volume_data_from_axis,
40
+ stack_volume_data_along_axis,
41
+ translate,
42
+ vmap,
43
+ )
44
+ from .ultrasound import (
45
+ channels_to_complex,
46
+ complex_to_channels,
47
+ compute_time_to_peak,
48
+ compute_time_to_peak_stack,
49
+ demodulate,
50
+ demodulate_not_jitable,
51
+ envelope_detect,
52
+ get_band_pass_filter,
53
+ get_low_pass_iq_filter,
54
+ hilbert,
55
+ log_compress,
56
+ upmix,
57
+ )
58
+
59
+ __all__ = [
60
+ # Tensor functions
61
+ "L1",
62
+ "L2",
63
+ "add_salt_and_pepper_noise",
64
+ "apply_along_axis",
65
+ "batch_cov",
66
+ "boolean_mask",
67
+ "check_patches_fit",
68
+ "compute_required_patch_overlap",
69
+ "compute_required_patch_shape",
70
+ "correlate",
71
+ "extend_n_dims",
72
+ "find_contour",
73
+ "flatten",
74
+ "fori_loop",
75
+ "func_with_one_batch_dim",
76
+ "gaussian_filter",
77
+ "images_to_patches",
78
+ "interpolate_data",
79
+ "is_jax_prng_key",
80
+ "is_monotonic",
81
+ "linear_sum_assignment",
82
+ "map_indices_for_interpolation",
83
+ "matrix_power",
84
+ "normalize",
85
+ "pad_array_to_divisible",
86
+ "patches_to_images",
87
+ "resample",
88
+ "reshape_axis",
89
+ "simple_map",
90
+ "sinc",
91
+ "split_seed",
92
+ "split_volume_data_from_axis",
93
+ "stack_volume_data_along_axis",
94
+ "translate",
95
+ "vmap",
96
+ # Ultrasound functions
97
+ "channels_to_complex",
98
+ "complex_to_channels",
99
+ "compute_time_to_peak",
100
+ "compute_time_to_peak_stack",
101
+ "demodulate",
102
+ "demodulate_not_jitable",
103
+ "envelope_detect",
104
+ "get_band_pass_filter",
105
+ "get_low_pass_iq_filter",
106
+ "hilbert",
107
+ "upmix",
108
+ "log_compress",
109
+ ]
@@ -329,7 +329,7 @@ def _map(fun, in_axes=0, out_axes=0, map_fn=None, _use_torch_vmap=False):
329
329
  For jax, this uses the native vmap implementation.
330
330
  For other backends, this a wrapper that uses `ops.vectorized_map` under the hood.
331
331
 
332
- Probably you want to use `zea.tensor_ops.vmap` instead, which uses this function
332
+ Probably you want to use `zea.func.vmap` instead, which uses this function
333
333
  with additional batching/chunking support.
334
334
 
335
335
  Args:
@@ -845,7 +845,7 @@ def stack_volume_data_along_axis(data, batch_axis: int, stack_axis: int, number:
845
845
  .. doctest::
846
846
 
847
847
  >>> import keras
848
- >>> from zea.tensor_ops import stack_volume_data_along_axis
848
+ >>> from zea.func import stack_volume_data_along_axis
849
849
 
850
850
  >>> data = keras.random.uniform((10, 20, 30))
851
851
  >>> # stacking along 1st axis with 2 frames per block
@@ -889,7 +889,7 @@ def split_volume_data_from_axis(data, batch_axis: int, stack_axis: int, number:
889
889
  .. doctest::
890
890
 
891
891
  >>> import keras
892
- >>> from zea.tensor_ops import split_volume_data_from_axis
892
+ >>> from zea.func import split_volume_data_from_axis
893
893
 
894
894
  >>> data = keras.random.uniform((20, 10, 30))
895
895
  >>> split_data = split_volume_data_from_axis(data, 0, 1, 2, 2)
@@ -1012,7 +1012,7 @@ def check_patches_fit(
1012
1012
  Example:
1013
1013
  .. doctest::
1014
1014
 
1015
- >>> from zea.tensor_ops import check_patches_fit
1015
+ >>> from zea.func import check_patches_fit
1016
1016
  >>> image_shape = (10, 10)
1017
1017
  >>> patch_shape = (4, 4)
1018
1018
  >>> overlap = (2, 2)
@@ -1080,7 +1080,7 @@ def images_to_patches(
1080
1080
  .. doctest::
1081
1081
 
1082
1082
  >>> import keras
1083
- >>> from zea.tensor_ops import images_to_patches
1083
+ >>> from zea.func import images_to_patches
1084
1084
 
1085
1085
  >>> images = keras.random.uniform((2, 8, 8, 3))
1086
1086
  >>> patches = images_to_patches(images, patch_shape=(4, 4), overlap=(2, 2))
@@ -1166,7 +1166,7 @@ def patches_to_images(
1166
1166
  .. doctest::
1167
1167
 
1168
1168
  >>> import keras
1169
- >>> from zea.tensor_ops import patches_to_images
1169
+ >>> from zea.func import patches_to_images
1170
1170
 
1171
1171
  >>> patches = keras.random.uniform((2, 3, 3, 4, 4, 3))
1172
1172
  >>> images = patches_to_images(patches, image_shape=(8, 8, 3), overlap=(2, 2))
@@ -1254,7 +1254,7 @@ def reshape_axis(data, newshape: tuple, axis: int):
1254
1254
  .. doctest::
1255
1255
 
1256
1256
  >>> import keras
1257
- >>> from zea.tensor_ops import reshape_axis
1257
+ >>> from zea.func import reshape_axis
1258
1258
 
1259
1259
  >>> data = keras.random.uniform((3, 4, 5))
1260
1260
  >>> newshape = (2, 2)
@@ -1641,7 +1641,7 @@ def find_contour(binary_mask):
1641
1641
  Example:
1642
1642
  .. doctest::
1643
1643
 
1644
- >>> from zea.tensor_ops import find_contour
1644
+ >>> from zea.func import find_contour
1645
1645
  >>> import keras
1646
1646
  >>> mask = keras.ops.zeros((10, 10))
1647
1647
  >>> mask = keras.ops.scatter_update(
@@ -1700,3 +1700,27 @@ def translate(array, range_from=None, range_to=(0, 255)):
1700
1700
 
1701
1701
  # Convert the 0-1 range into a value in the right range.
1702
1702
  return right_min + (value_scaled * (right_max - right_min))
1703
+
1704
+
1705
+ def normalize(data, output_range, input_range=None):
1706
+ """Normalize data to a given range.
1707
+
1708
+ Equivalent to `translate` with clipping.
1709
+
1710
+ Args:
1711
+ data (ops.Tensor): Input data to normalize.
1712
+ output_range (tuple): Range to which data should be mapped, e.g., (0, 1).
1713
+ input_range (tuple, optional): Range of input data.
1714
+ If None, the range will be computed from the data.
1715
+ Defaults to None.
1716
+ """
1717
+ if input_range is None:
1718
+ input_range = (None, None)
1719
+ minval, maxval = input_range
1720
+ if minval is None:
1721
+ minval = ops.min(data)
1722
+ if maxval is None:
1723
+ maxval = ops.max(data)
1724
+ data = ops.clip(data, minval, maxval)
1725
+ normalized_data = translate(data, (minval, maxval), output_range)
1726
+ return normalized_data