zea 0.0.3__py3-none-any.whl → 0.0.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- zea/__init__.py +8 -7
- zea/__main__.py +8 -26
- zea/agent/selection.py +24 -18
- zea/data/__main__.py +6 -3
- zea/data/data_format.py +28 -26
- zea/data/file.py +19 -74
- zea/display.py +1 -5
- zea/doppler.py +75 -0
- zea/internal/_generate_keras_ops.py +125 -0
- zea/internal/core.py +11 -4
- zea/internal/device.py +33 -16
- zea/internal/notebooks.py +39 -0
- zea/internal/operators.py +10 -0
- zea/internal/parameters.py +75 -19
- zea/internal/viewer.py +25 -25
- zea/io_lib.py +60 -62
- zea/keras_ops.py +1989 -0
- zea/log.py +8 -0
- zea/models/__init__.py +6 -3
- zea/models/deeplabv3.py +131 -0
- zea/models/diffusion.py +47 -21
- zea/models/echonetlvh.py +290 -0
- zea/models/presets.py +14 -0
- zea/ops.py +64 -49
- zea/scan.py +10 -3
- zea/tensor_ops.py +150 -0
- zea/tools/fit_scan_cone.py +2 -2
- zea/tools/selection_tool.py +28 -9
- zea/utils.py +31 -0
- zea/visualize.py +10 -4
- {zea-0.0.3.dist-info → zea-0.0.5.dist-info}/METADATA +5 -2
- {zea-0.0.3.dist-info → zea-0.0.5.dist-info}/RECORD +35 -30
- zea/internal/convert.py +0 -150
- {zea-0.0.3.dist-info → zea-0.0.5.dist-info}/LICENSE +0 -0
- {zea-0.0.3.dist-info → zea-0.0.5.dist-info}/WHEEL +0 -0
- {zea-0.0.3.dist-info → zea-0.0.5.dist-info}/entry_points.txt +0 -0
zea/__init__.py
CHANGED
|
@@ -7,10 +7,10 @@ from . import log
|
|
|
7
7
|
|
|
8
8
|
# dynamically add __version__ attribute (see pyproject.toml)
|
|
9
9
|
# __version__ = __import__("importlib.metadata").metadata.version(__package__)
|
|
10
|
-
__version__ = "0.0.
|
|
10
|
+
__version__ = "0.0.5"
|
|
11
11
|
|
|
12
12
|
|
|
13
|
-
def
|
|
13
|
+
def _bootstrap_backend():
|
|
14
14
|
"""Setup function to initialize the zea package."""
|
|
15
15
|
|
|
16
16
|
def _check_backend_installed():
|
|
@@ -40,14 +40,14 @@ def setup():
|
|
|
40
40
|
|
|
41
41
|
_check_backend_installed()
|
|
42
42
|
|
|
43
|
-
import
|
|
43
|
+
from keras.backend import backend as keras_backend
|
|
44
44
|
|
|
45
|
-
log.info(f"Using backend {
|
|
45
|
+
log.info(f"Using backend {keras_backend()!r}")
|
|
46
46
|
|
|
47
47
|
|
|
48
48
|
# call and clean up namespace
|
|
49
|
-
|
|
50
|
-
del
|
|
49
|
+
_bootstrap_backend()
|
|
50
|
+
del _bootstrap_backend
|
|
51
51
|
|
|
52
52
|
from . import (
|
|
53
53
|
agent,
|
|
@@ -55,6 +55,7 @@ from . import (
|
|
|
55
55
|
data,
|
|
56
56
|
display,
|
|
57
57
|
io_lib,
|
|
58
|
+
keras_ops,
|
|
58
59
|
metrics,
|
|
59
60
|
models,
|
|
60
61
|
simulator,
|
|
@@ -68,7 +69,7 @@ from .data.file import File, load_file
|
|
|
68
69
|
from .datapaths import set_data_paths
|
|
69
70
|
from .interface import Interface
|
|
70
71
|
from .internal.device import init_device
|
|
71
|
-
from .internal.setup_zea import
|
|
72
|
+
from .internal.setup_zea import setup, setup_config
|
|
72
73
|
from .ops import Pipeline
|
|
73
74
|
from .probes import Probe
|
|
74
75
|
from .scan import Scan
|
zea/__main__.py
CHANGED
|
@@ -9,30 +9,22 @@ import argparse
|
|
|
9
9
|
import sys
|
|
10
10
|
from pathlib import Path
|
|
11
11
|
|
|
12
|
-
from zea import log
|
|
13
12
|
from zea.visualize import set_mpl_style
|
|
14
13
|
|
|
15
14
|
|
|
16
|
-
def
|
|
15
|
+
def get_parser():
|
|
17
16
|
"""Command line argument parser"""
|
|
18
|
-
parser = argparse.ArgumentParser(
|
|
19
|
-
|
|
17
|
+
parser = argparse.ArgumentParser(
|
|
18
|
+
description="Load and process ultrasound data based on a configuration file."
|
|
19
|
+
)
|
|
20
|
+
parser.add_argument("-c", "--config", type=str, default=None, help="path to the config file.")
|
|
20
21
|
parser.add_argument(
|
|
21
22
|
"-t",
|
|
22
23
|
"--task",
|
|
23
24
|
default="view",
|
|
24
25
|
choices=["view"],
|
|
25
26
|
type=str,
|
|
26
|
-
help="
|
|
27
|
-
)
|
|
28
|
-
parser.add_argument(
|
|
29
|
-
"--backend",
|
|
30
|
-
default=None,
|
|
31
|
-
type=str,
|
|
32
|
-
help=(
|
|
33
|
-
"Keras backend to use. Default is the one set by the environment "
|
|
34
|
-
"variable KERAS_BACKEND."
|
|
35
|
-
),
|
|
27
|
+
help="Which task to run. Currently only 'view' is supported.",
|
|
36
28
|
)
|
|
37
29
|
parser.add_argument(
|
|
38
30
|
"--skip_validate_file",
|
|
@@ -40,27 +32,18 @@ def get_args():
|
|
|
40
32
|
action="store_true",
|
|
41
33
|
help="Skip zea file integrity checks. Use with caution.",
|
|
42
34
|
)
|
|
43
|
-
parser
|
|
44
|
-
args = parser.parse_args()
|
|
45
|
-
return args
|
|
35
|
+
return parser
|
|
46
36
|
|
|
47
37
|
|
|
48
38
|
def main():
|
|
49
39
|
"""main entrypoint for zea"""
|
|
50
|
-
args =
|
|
40
|
+
args = get_parser().parse_args()
|
|
51
41
|
|
|
52
42
|
set_mpl_style()
|
|
53
43
|
|
|
54
|
-
if args.backend:
|
|
55
|
-
from zea.internal.setup_zea import set_backend
|
|
56
|
-
|
|
57
|
-
set_backend(args.backend)
|
|
58
|
-
|
|
59
44
|
wd = Path(__file__).parent.resolve()
|
|
60
45
|
sys.path.append(str(wd))
|
|
61
46
|
|
|
62
|
-
import keras
|
|
63
|
-
|
|
64
47
|
from zea.interface import Interface
|
|
65
48
|
from zea.internal.setup_zea import setup
|
|
66
49
|
|
|
@@ -72,7 +55,6 @@ def main():
|
|
|
72
55
|
validate_file=not args.skip_validate_file,
|
|
73
56
|
)
|
|
74
57
|
|
|
75
|
-
log.info(f"Using {keras.backend.backend()} backend")
|
|
76
58
|
cli.run(plot=True)
|
|
77
59
|
else:
|
|
78
60
|
raise ValueError(f"Unknown task {args.task}, see `zea --help` for available tasks.")
|
zea/agent/selection.py
CHANGED
|
@@ -155,7 +155,9 @@ class GreedyEntropy(LinesActionModel):
|
|
|
155
155
|
# TODO: I think we only need to compute the lower triangular
|
|
156
156
|
# of this matrix, since it's symmetric
|
|
157
157
|
squared_l2_error_matrices = (particles[:, :, None, ...] - particles[:, None, :, ...]) ** 2
|
|
158
|
-
gaussian_error_per_pixel_i_j = ops.exp(
|
|
158
|
+
gaussian_error_per_pixel_i_j = ops.exp(
|
|
159
|
+
-(squared_l2_error_matrices) / (2 * entropy_sigma**2)
|
|
160
|
+
)
|
|
159
161
|
# Vertically stack all columns corresponding with the same line
|
|
160
162
|
# This way we can just sum across the height axis and get the entropy
|
|
161
163
|
# for each pixel in a given line
|
|
@@ -176,33 +178,35 @@ class GreedyEntropy(LinesActionModel):
|
|
|
176
178
|
# [n_particles, n_particles, batch, height, width]
|
|
177
179
|
return gaussian_error_per_pixel_stacked
|
|
178
180
|
|
|
179
|
-
def
|
|
180
|
-
"""
|
|
181
|
-
|
|
181
|
+
def compute_pixelwise_entropy(self, particles):
|
|
182
|
+
"""
|
|
182
183
|
This function computes the entropy for each line using a Gaussian Mixture Model
|
|
183
184
|
approximation of the posterior distribution.
|
|
184
|
-
For more details see Section
|
|
185
|
+
For more details see Section VI. B here: https://arxiv.org/pdf/2410.13310
|
|
185
186
|
|
|
186
187
|
Args:
|
|
187
188
|
particles (Tensor): Particles of shape (batch_size, n_particles, height, width)
|
|
188
189
|
|
|
189
190
|
Returns:
|
|
190
|
-
Tensor: batch of entropies per
|
|
191
|
+
Tensor: batch of entropies per pixel, of shape (batch, height, width)
|
|
191
192
|
"""
|
|
192
|
-
|
|
193
|
+
n_particles = ops.shape(particles)[1]
|
|
194
|
+
gaussian_error_per_pixel_stacked = self.compute_pairwise_pixel_gaussian_error(
|
|
193
195
|
particles,
|
|
194
196
|
self.stack_n_cols,
|
|
195
197
|
self.n_possible_actions,
|
|
196
198
|
self.entropy_sigma,
|
|
197
199
|
)
|
|
198
|
-
gaussian_error_per_line = ops.sum(gaussian_error_per_pixel_stacked, axis=3)
|
|
199
200
|
# sum out first dimension of (n_particles x n_particles) error matrix
|
|
200
|
-
# [n_particles, batch,
|
|
201
|
-
|
|
201
|
+
# [n_particles, batch, height, width]
|
|
202
|
+
pixelwise_entropy_sum_j = ops.sum(
|
|
203
|
+
(1 / n_particles) * gaussian_error_per_pixel_stacked, axis=1
|
|
204
|
+
)
|
|
205
|
+
log_pixelwise_entropy_sum_j = ops.log(pixelwise_entropy_sum_j)
|
|
202
206
|
# sum out second dimension of (n_particles x n_particles) error matrix
|
|
203
|
-
# [batch,
|
|
204
|
-
|
|
205
|
-
return
|
|
207
|
+
# [batch, height, width]
|
|
208
|
+
pixelwise_entropy = -ops.sum((1 / n_particles) * log_pixelwise_entropy_sum_j, axis=1)
|
|
209
|
+
return pixelwise_entropy
|
|
206
210
|
|
|
207
211
|
def select_line_and_reweight_entropy(self, entropy_per_line):
|
|
208
212
|
"""Select the line with maximum entropy and reweight the entropies.
|
|
@@ -260,17 +264,19 @@ class GreedyEntropy(LinesActionModel):
|
|
|
260
264
|
particles (Tensor): Particles of shape (batch_size, n_particles, height, width)
|
|
261
265
|
|
|
262
266
|
Returns:
|
|
263
|
-
|
|
267
|
+
Tuple[Tensor, Tensor]:
|
|
264
268
|
- Newly selected lines as k-hot vectors, shaped (batch_size, n_possible_actions)
|
|
265
|
-
|
|
269
|
+
- Masks of shape (batch_size, img_height, img_width)
|
|
266
270
|
"""
|
|
267
|
-
|
|
271
|
+
|
|
272
|
+
pixelwise_entropy = self.compute_pixelwise_entropy(particles)
|
|
273
|
+
linewise_entropy = ops.sum(pixelwise_entropy, axis=1)
|
|
268
274
|
|
|
269
275
|
# Greedily select best line, reweight entropies, and repeat
|
|
270
276
|
all_selected_lines = []
|
|
271
277
|
for _ in range(self.n_actions):
|
|
272
|
-
max_entropy_line,
|
|
273
|
-
self.select_line_and_reweight_entropy,
|
|
278
|
+
max_entropy_line, linewise_entropy = ops.vectorized_map(
|
|
279
|
+
self.select_line_and_reweight_entropy, linewise_entropy
|
|
274
280
|
)
|
|
275
281
|
all_selected_lines.append(max_entropy_line)
|
|
276
282
|
|
zea/data/__main__.py
CHANGED
|
@@ -9,8 +9,8 @@ import argparse
|
|
|
9
9
|
from zea import Folder
|
|
10
10
|
|
|
11
11
|
|
|
12
|
-
def
|
|
13
|
-
parser = argparse.ArgumentParser(description="Copy a zea.Folder to a new location.")
|
|
12
|
+
def get_parser():
|
|
13
|
+
parser = argparse.ArgumentParser(description="Copy a :class:`zea.Folder` to a new location.")
|
|
14
14
|
parser.add_argument("src", help="Source folder path")
|
|
15
15
|
parser.add_argument("dst", help="Destination folder path")
|
|
16
16
|
parser.add_argument("key", help="Key to access in the hdf5 files")
|
|
@@ -20,8 +20,11 @@ def main():
|
|
|
20
20
|
choices=["a", "w", "r+", "x"],
|
|
21
21
|
help="Mode in which to open the destination files (default: 'a')",
|
|
22
22
|
)
|
|
23
|
+
return parser
|
|
24
|
+
|
|
23
25
|
|
|
24
|
-
|
|
26
|
+
def main():
|
|
27
|
+
args = get_parser().parse_args()
|
|
25
28
|
|
|
26
29
|
src_folder = Folder(args.src, args.key, validate=False)
|
|
27
30
|
src_folder.copy(args.dst, args.key, mode=args.mode)
|
zea/data/data_format.py
CHANGED
|
@@ -468,32 +468,34 @@ def _write_datasets(
|
|
|
468
468
|
),
|
|
469
469
|
unit="-",
|
|
470
470
|
)
|
|
471
|
-
|
|
472
|
-
|
|
473
|
-
range(
|
|
474
|
-
|
|
475
|
-
|
|
476
|
-
|
|
477
|
-
|
|
478
|
-
|
|
479
|
-
|
|
480
|
-
|
|
481
|
-
|
|
482
|
-
|
|
483
|
-
|
|
484
|
-
|
|
485
|
-
|
|
486
|
-
|
|
487
|
-
|
|
488
|
-
|
|
489
|
-
|
|
490
|
-
|
|
491
|
-
|
|
492
|
-
|
|
493
|
-
|
|
494
|
-
|
|
495
|
-
|
|
496
|
-
|
|
471
|
+
|
|
472
|
+
if waveforms_one_way is not None:
|
|
473
|
+
for n in range(len(waveforms_one_way)):
|
|
474
|
+
_add_dataset(
|
|
475
|
+
group_name=scan_group_name + "/waveforms_one_way",
|
|
476
|
+
name=f"waveform_{str(n).zfill(3)}",
|
|
477
|
+
data=waveforms_one_way[n],
|
|
478
|
+
description=(
|
|
479
|
+
"One-way waveform as simulated by the Verasonics system, "
|
|
480
|
+
"sampled at 250MHz. This is the waveform after being filtered "
|
|
481
|
+
"by the tranducer bandwidth once."
|
|
482
|
+
),
|
|
483
|
+
unit="V",
|
|
484
|
+
)
|
|
485
|
+
|
|
486
|
+
if waveforms_two_way is not None:
|
|
487
|
+
for n in range(len(waveforms_two_way)):
|
|
488
|
+
_add_dataset(
|
|
489
|
+
group_name=scan_group_name + "/waveforms_two_way",
|
|
490
|
+
name=f"waveform_{str(n).zfill(3)}",
|
|
491
|
+
data=waveforms_two_way[n],
|
|
492
|
+
description=(
|
|
493
|
+
"Two-way waveform as simulated by the Verasonics system, "
|
|
494
|
+
"sampled at 250MHz. This is the waveform after being filtered "
|
|
495
|
+
"by the tranducer bandwidth twice."
|
|
496
|
+
),
|
|
497
|
+
unit="V",
|
|
498
|
+
)
|
|
497
499
|
|
|
498
500
|
# Add additional elements
|
|
499
501
|
if additional_elements is not None:
|
zea/data/file.py
CHANGED
|
@@ -290,7 +290,7 @@ class File(h5py.File):
|
|
|
290
290
|
"""
|
|
291
291
|
scan_parameters = {}
|
|
292
292
|
if "scan" in self:
|
|
293
|
-
scan_parameters = recursively_load_dict_contents_from_group(
|
|
293
|
+
scan_parameters = self.recursively_load_dict_contents_from_group("scan")
|
|
294
294
|
elif "event" in list(self.keys())[0]:
|
|
295
295
|
if event is None:
|
|
296
296
|
raise ValueError(
|
|
@@ -305,38 +305,17 @@ class File(h5py.File):
|
|
|
305
305
|
f"Found number of events: {len(self.keys())}."
|
|
306
306
|
)
|
|
307
307
|
|
|
308
|
-
scan_parameters = recursively_load_dict_contents_from_group(
|
|
308
|
+
scan_parameters = self.recursively_load_dict_contents_from_group(f"event_{event}/scan")
|
|
309
309
|
else:
|
|
310
310
|
log.warning("Could not find scan parameters in file.")
|
|
311
311
|
|
|
312
312
|
return scan_parameters
|
|
313
313
|
|
|
314
314
|
def get_scan_parameters(self, event=None) -> dict:
|
|
315
|
-
"""Returns a dictionary of
|
|
316
|
-
|
|
315
|
+
"""Returns a dictionary of scan parameters stored in the file."""
|
|
316
|
+
return self.get_parameters(event)
|
|
317
317
|
|
|
318
|
-
|
|
319
|
-
dict: The default parameters (the keys are identical to the
|
|
320
|
-
__init__ parameters of the Scan class).
|
|
321
|
-
"""
|
|
322
|
-
file_scan_parameters = self.get_parameters(event)
|
|
323
|
-
|
|
324
|
-
scan_parameters = {}
|
|
325
|
-
for parameter, value in file_scan_parameters.items():
|
|
326
|
-
if parameter in Scan.VALID_PARAMS:
|
|
327
|
-
param_type = Scan.VALID_PARAMS[parameter]["type"]
|
|
328
|
-
if param_type in (bool, int, float):
|
|
329
|
-
scan_parameters[parameter] = param_type(value)
|
|
330
|
-
elif isinstance(param_type, tuple) and float in param_type:
|
|
331
|
-
scan_parameters[parameter] = float(value)
|
|
332
|
-
else:
|
|
333
|
-
scan_parameters[parameter] = value
|
|
334
|
-
|
|
335
|
-
if len(scan_parameters) == 0:
|
|
336
|
-
log.info(f"Could not find proper scan parameters in {self}.")
|
|
337
|
-
return scan_parameters
|
|
338
|
-
|
|
339
|
-
def scan(self, event=None, **kwargs) -> Scan:
|
|
318
|
+
def scan(self, event=None, safe=True, **kwargs) -> Scan:
|
|
340
319
|
"""Returns a Scan object initialized with the parameters from the file.
|
|
341
320
|
|
|
342
321
|
Args:
|
|
@@ -348,6 +327,9 @@ class File(h5py.File):
|
|
|
348
327
|
...
|
|
349
328
|
|
|
350
329
|
Defaults to None. In that case no event structure is expected.
|
|
330
|
+
safe (bool, optional): If True, will only use parameters that are
|
|
331
|
+
defined in the Scan class. If False, will use all parameters
|
|
332
|
+
from the file. Defaults to True.
|
|
351
333
|
**kwargs: Additional keyword arguments to pass to the Scan object.
|
|
352
334
|
These will override the parameters from the file if they are
|
|
353
335
|
present in the file.
|
|
@@ -355,7 +337,7 @@ class File(h5py.File):
|
|
|
355
337
|
Returns:
|
|
356
338
|
Scan: The scan object.
|
|
357
339
|
"""
|
|
358
|
-
return Scan.merge(self.get_scan_parameters(event), kwargs)
|
|
340
|
+
return Scan.merge(self.get_scan_parameters(event), kwargs, safe=safe)
|
|
359
341
|
|
|
360
342
|
def get_probe_parameters(self, event=None) -> dict:
|
|
361
343
|
"""Returns a dictionary of probe parameters to initialize a probe
|
|
@@ -388,21 +370,24 @@ class File(h5py.File):
|
|
|
388
370
|
probe_parameters_file = self.get_probe_parameters(event)
|
|
389
371
|
return Probe.from_parameters(self.probe_name, probe_parameters_file)
|
|
390
372
|
|
|
391
|
-
def recursively_load_dict_contents_from_group(self, path: str
|
|
373
|
+
def recursively_load_dict_contents_from_group(self, path: str) -> dict:
|
|
392
374
|
"""Load dict from contents of group
|
|
393
375
|
|
|
394
376
|
Values inside the group are converted to numpy arrays
|
|
395
|
-
or primitive types (int, float, str).
|
|
396
|
-
arrays are converted to the corresponding primitive type (if squeeze=True)
|
|
377
|
+
or primitive types (int, float, str).
|
|
397
378
|
|
|
398
379
|
Args:
|
|
399
380
|
path (str): path to group
|
|
400
|
-
squeeze (bool, optional): squeeze arrays with single element.
|
|
401
|
-
Defaults to False.
|
|
402
381
|
Returns:
|
|
403
382
|
dict: dictionary with contents of group
|
|
404
383
|
"""
|
|
405
|
-
|
|
384
|
+
ans = {}
|
|
385
|
+
for key, item in self[path].items():
|
|
386
|
+
if isinstance(item, h5py.Dataset):
|
|
387
|
+
ans[key] = item[()]
|
|
388
|
+
elif isinstance(item, h5py.Group):
|
|
389
|
+
ans[key] = self.recursively_load_dict_contents_from_group(path + "/" + key + "/")
|
|
390
|
+
return ans
|
|
406
391
|
|
|
407
392
|
@classmethod
|
|
408
393
|
def get_shape(cls, path: str, key: str) -> tuple:
|
|
@@ -519,54 +504,14 @@ def load_file(
|
|
|
519
504
|
# the number of selected transmits
|
|
520
505
|
if data_type in ["raw_data", "aligned_data"]:
|
|
521
506
|
indices = File._prepare_indices(indices)
|
|
522
|
-
n_tx = data.shape[1]
|
|
523
507
|
if isinstance(indices, tuple) and len(indices) > 1:
|
|
524
|
-
|
|
525
|
-
transmits = np.arange(n_tx)[tx_idx]
|
|
526
|
-
scan_kwargs["selected_transmits"] = transmits
|
|
508
|
+
scan_kwargs["selected_transmits"] = indices[1]
|
|
527
509
|
|
|
528
510
|
scan = file.scan(**scan_kwargs)
|
|
529
511
|
|
|
530
512
|
return data, scan, probe
|
|
531
513
|
|
|
532
514
|
|
|
533
|
-
def recursively_load_dict_contents_from_group(
|
|
534
|
-
h5file: h5py._hl.files.File, path: str, squeeze: bool = False
|
|
535
|
-
) -> dict:
|
|
536
|
-
"""Load dict from contents of group
|
|
537
|
-
|
|
538
|
-
Values inside the group are converted to numpy arrays
|
|
539
|
-
or primitive types (int, float, str). Single element
|
|
540
|
-
arrays are converted to the corresponding primitive type (if squeeze=True)
|
|
541
|
-
|
|
542
|
-
Args:
|
|
543
|
-
h5file (h5py._hl.files.File): h5py file object
|
|
544
|
-
path (str): path to group
|
|
545
|
-
squeeze (bool, optional): squeeze arrays with single element.
|
|
546
|
-
Defaults to False.
|
|
547
|
-
Returns:
|
|
548
|
-
dict: dictionary with contents of group
|
|
549
|
-
"""
|
|
550
|
-
ans = {}
|
|
551
|
-
for key, item in h5file[path].items():
|
|
552
|
-
if isinstance(item, h5py._hl.dataset.Dataset):
|
|
553
|
-
ans[key] = item[()]
|
|
554
|
-
# all ones in shape
|
|
555
|
-
if squeeze:
|
|
556
|
-
if ans[key].shape == () or all(i == 1 for i in ans[key].shape):
|
|
557
|
-
# check for strings
|
|
558
|
-
if isinstance(ans[key], str):
|
|
559
|
-
ans[key] = str(ans[key])
|
|
560
|
-
# check for integers
|
|
561
|
-
elif int(ans[key]) == float(ans[key]):
|
|
562
|
-
ans[key] = int(ans[key])
|
|
563
|
-
else:
|
|
564
|
-
ans[key] = float(ans[key])
|
|
565
|
-
elif isinstance(item, h5py._hl.group.Group):
|
|
566
|
-
ans[key] = recursively_load_dict_contents_from_group(h5file, path + "/" + key + "/")
|
|
567
|
-
return ans
|
|
568
|
-
|
|
569
|
-
|
|
570
515
|
def _print_hdf5_attrs(hdf5_obj, prefix=""):
|
|
571
516
|
"""Recursively prints all keys, attributes, and shapes in an HDF5 file.
|
|
572
517
|
|
zea/display.py
CHANGED
|
@@ -3,7 +3,6 @@
|
|
|
3
3
|
from functools import partial
|
|
4
4
|
from typing import Tuple, Union
|
|
5
5
|
|
|
6
|
-
import keras
|
|
7
6
|
import numpy as np
|
|
8
7
|
import scipy
|
|
9
8
|
from keras import ops
|
|
@@ -342,6 +341,7 @@ def map_coordinates(inputs, coordinates, order, fill_mode="constant", fill_value
|
|
|
342
341
|
"""map_coordinates using keras.ops or scipy.ndimage when order > 1."""
|
|
343
342
|
if order > 1:
|
|
344
343
|
inputs = ops.convert_to_numpy(inputs)
|
|
344
|
+
coordinates = ops.convert_to_numpy(coordinates)
|
|
345
345
|
out = scipy.ndimage.map_coordinates(
|
|
346
346
|
inputs, coordinates, order=order, mode=fill_mode, cval=fill_value
|
|
347
347
|
)
|
|
@@ -359,10 +359,6 @@ def map_coordinates(inputs, coordinates, order, fill_mode="constant", fill_value
|
|
|
359
359
|
def _interpolate_batch(images, coordinates, fill_value=0.0, order=1, vectorize=True):
|
|
360
360
|
"""Interpolate a batch of images."""
|
|
361
361
|
|
|
362
|
-
# TODO: figure out why tensorflow map_coordinates is broken
|
|
363
|
-
if keras.backend.backend() == "tensorflow":
|
|
364
|
-
assert order > 1, "Some bug in tensorflow in map_coordinates, set order > 1 to use scipy."
|
|
365
|
-
|
|
366
362
|
image_shape = images.shape
|
|
367
363
|
num_image_dims = coordinates.shape[0]
|
|
368
364
|
|
zea/doppler.py
ADDED
|
@@ -0,0 +1,75 @@
|
|
|
1
|
+
"""Doppler functions for processing I/Q ultrasound data."""
|
|
2
|
+
|
|
3
|
+
import numpy as np
|
|
4
|
+
from keras import ops
|
|
5
|
+
|
|
6
|
+
from zea import tensor_ops
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
def color_doppler(
|
|
10
|
+
data,
|
|
11
|
+
center_frequency,
|
|
12
|
+
pulse_repetition_frequency,
|
|
13
|
+
sound_speed,
|
|
14
|
+
hamming_size=None,
|
|
15
|
+
lag=1,
|
|
16
|
+
):
|
|
17
|
+
"""Compute Color Doppler from packet of I/Q Data.
|
|
18
|
+
|
|
19
|
+
Args:
|
|
20
|
+
data (ndarray): I/Q complex data of shape (n_frames, grid_size_z, grid_size_x).
|
|
21
|
+
n_frames corresponds to the ensemble length used to compute
|
|
22
|
+
the Doppler signal.
|
|
23
|
+
center_frequency (float): Center frequency of the ultrasound probe in Hz.
|
|
24
|
+
pulse_repetition_frequency (float): Pulse repetition frequency in Hz.
|
|
25
|
+
sound_speed (float): Speed of sound in the medium in m/s.
|
|
26
|
+
hamming_size (int or tuple, optional): Size of the Hamming window to apply
|
|
27
|
+
for spatial averaging. If None, no window is applied.
|
|
28
|
+
If an integer, it is applied to both dimensions. If a tuple, it should
|
|
29
|
+
contain two integers for the row and column dimensions.
|
|
30
|
+
lag (int, optional): Lag for the auto-correlation computation.
|
|
31
|
+
Defaults to 1, meaning Doppler is computed from the current frame
|
|
32
|
+
and the next frame.
|
|
33
|
+
|
|
34
|
+
Returns:
|
|
35
|
+
doppler_velocities (ndarray): Doppler velocity map of shape (grid_size_z, grid_size_x) in
|
|
36
|
+
meters/second.
|
|
37
|
+
|
|
38
|
+
"""
|
|
39
|
+
assert data.ndim == 3, "Data must be a 3-D array"
|
|
40
|
+
if not (isinstance(lag, int) and lag >= 1):
|
|
41
|
+
raise ValueError("lag must be an integer >= 1")
|
|
42
|
+
n_frames = data.shape[0]
|
|
43
|
+
assert n_frames > lag, "Data must have more frames than the lag"
|
|
44
|
+
|
|
45
|
+
if hamming_size is None:
|
|
46
|
+
hamming_size = np.array([1, 1], dtype=int)
|
|
47
|
+
elif np.isscalar(hamming_size):
|
|
48
|
+
hamming_size = np.array([int(hamming_size), int(hamming_size)], dtype=int)
|
|
49
|
+
else:
|
|
50
|
+
assert len(hamming_size) == 2, "hamming_size must be an integer or a tuple of two integers"
|
|
51
|
+
hamming_size = np.array(hamming_size, dtype=int)
|
|
52
|
+
if not np.all(hamming_size > 0):
|
|
53
|
+
raise ValueError("hamming_size must contain integers > 0")
|
|
54
|
+
|
|
55
|
+
# Auto-correlation method
|
|
56
|
+
iq1 = data[: n_frames - lag]
|
|
57
|
+
iq2 = data[lag:]
|
|
58
|
+
autocorr = ops.sum(iq1 * ops.conj(iq2), axis=0) # Ensemble auto-correlation
|
|
59
|
+
|
|
60
|
+
# Spatial weighted average
|
|
61
|
+
if hamming_size[0] != 1 and hamming_size[1] != 1:
|
|
62
|
+
h_row = np.hamming(hamming_size[0])
|
|
63
|
+
h_col = np.hamming(hamming_size[1])
|
|
64
|
+
autocorr = tensor_ops.apply_along_axis(
|
|
65
|
+
lambda x: tensor_ops.correlate(x, h_row, mode="same"), 0, autocorr
|
|
66
|
+
)
|
|
67
|
+
autocorr = tensor_ops.apply_along_axis(
|
|
68
|
+
lambda x: tensor_ops.correlate(x, h_col, mode="same"), 1, autocorr
|
|
69
|
+
)
|
|
70
|
+
|
|
71
|
+
# Doppler velocity
|
|
72
|
+
nyquist_velocity = sound_speed * pulse_repetition_frequency / (4 * center_frequency * lag)
|
|
73
|
+
phase = ops.arctan2(ops.imag(autocorr), ops.real(autocorr))
|
|
74
|
+
doppler_velocities = -nyquist_velocity * phase / np.pi
|
|
75
|
+
return doppler_velocities
|