zea 0.0.7__py3-none-any.whl → 0.0.9__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- zea/__init__.py +3 -3
- zea/agent/masks.py +2 -2
- zea/agent/selection.py +3 -3
- zea/backend/__init__.py +1 -1
- zea/backend/tensorflow/dataloader.py +1 -5
- zea/beamform/beamformer.py +4 -2
- zea/beamform/pfield.py +2 -2
- zea/beamform/pixelgrid.py +1 -1
- zea/data/__init__.py +0 -9
- zea/data/augmentations.py +222 -29
- zea/data/convert/__init__.py +1 -6
- zea/data/convert/__main__.py +164 -0
- zea/data/convert/camus.py +106 -40
- zea/data/convert/echonet.py +184 -83
- zea/data/convert/echonetlvh/README.md +2 -3
- zea/data/convert/echonetlvh/{convert_raw_to_usbmd.py → __init__.py} +174 -103
- zea/data/convert/echonetlvh/manual_rejections.txt +73 -0
- zea/data/convert/echonetlvh/precompute_crop.py +43 -64
- zea/data/convert/picmus.py +37 -40
- zea/data/convert/utils.py +86 -0
- zea/data/convert/verasonics.py +1247 -0
- zea/data/data_format.py +124 -6
- zea/data/dataloader.py +12 -7
- zea/data/datasets.py +109 -70
- zea/data/file.py +119 -82
- zea/data/file_operations.py +496 -0
- zea/data/preset_utils.py +2 -2
- zea/display.py +8 -9
- zea/doppler.py +5 -5
- zea/func/__init__.py +109 -0
- zea/{tensor_ops.py → func/tensor.py} +113 -69
- zea/func/ultrasound.py +500 -0
- zea/internal/_generate_keras_ops.py +5 -5
- zea/internal/checks.py +6 -12
- zea/internal/operators.py +4 -0
- zea/io_lib.py +108 -160
- zea/metrics.py +6 -5
- zea/models/__init__.py +1 -1
- zea/models/diffusion.py +63 -12
- zea/models/echonetlvh.py +1 -1
- zea/models/gmm.py +1 -1
- zea/models/lv_segmentation.py +2 -0
- zea/ops/__init__.py +188 -0
- zea/ops/base.py +442 -0
- zea/{keras_ops.py → ops/keras_ops.py} +2 -2
- zea/ops/pipeline.py +1472 -0
- zea/ops/tensor.py +356 -0
- zea/ops/ultrasound.py +890 -0
- zea/probes.py +2 -10
- zea/scan.py +35 -28
- zea/tools/fit_scan_cone.py +90 -160
- zea/tools/selection_tool.py +1 -1
- zea/tracking/__init__.py +16 -0
- zea/tracking/base.py +94 -0
- zea/tracking/lucas_kanade.py +474 -0
- zea/tracking/segmentation.py +110 -0
- zea/utils.py +11 -2
- {zea-0.0.7.dist-info → zea-0.0.9.dist-info}/METADATA +5 -1
- {zea-0.0.7.dist-info → zea-0.0.9.dist-info}/RECORD +62 -48
- zea/data/convert/matlab.py +0 -1237
- zea/ops.py +0 -3294
- {zea-0.0.7.dist-info → zea-0.0.9.dist-info}/WHEEL +0 -0
- {zea-0.0.7.dist-info → zea-0.0.9.dist-info}/entry_points.txt +0 -0
- {zea-0.0.7.dist-info → zea-0.0.9.dist-info}/licenses/LICENSE +0 -0
|
@@ -0,0 +1,496 @@
|
|
|
1
|
+
"""
|
|
2
|
+
This module provides some utilities to edit zea data files.
|
|
3
|
+
|
|
4
|
+
Available operations
|
|
5
|
+
--------------------
|
|
6
|
+
|
|
7
|
+
- `sum`: Sum multiple raw data files into one.
|
|
8
|
+
|
|
9
|
+
- `compound_frames`: Compound frames in a raw data file to increase SNR.
|
|
10
|
+
|
|
11
|
+
- `compound_transmits`: Compound transmits in a raw data file to increase SNR.
|
|
12
|
+
|
|
13
|
+
- `resave`: Resave a zea data file. This can be used to change the file format version.
|
|
14
|
+
|
|
15
|
+
- `extract`: extract frames and transmits in a raw data file.
|
|
16
|
+
"""
|
|
17
|
+
|
|
18
|
+
import argparse
|
|
19
|
+
from pathlib import Path
|
|
20
|
+
|
|
21
|
+
import numpy as np
|
|
22
|
+
|
|
23
|
+
from zea import Probe, Scan
|
|
24
|
+
from zea.data.data_format import generate_zea_dataset, load_additional_elements, load_description
|
|
25
|
+
from zea.data.file import load_file_all_data_types
|
|
26
|
+
from zea.internal.checks import _IMAGE_DATA_TYPES, _NON_IMAGE_DATA_TYPES
|
|
27
|
+
from zea.internal.core import DataTypes
|
|
28
|
+
from zea.log import logger
|
|
29
|
+
|
|
30
|
+
ALL_DATA_TYPES_EXCEPT_RAW = set(_IMAGE_DATA_TYPES + _NON_IMAGE_DATA_TYPES) - {"raw_data"}
|
|
31
|
+
|
|
32
|
+
OPERATION_NAMES = [
|
|
33
|
+
"sum",
|
|
34
|
+
"compound_frames",
|
|
35
|
+
"compound_transmits",
|
|
36
|
+
"resave",
|
|
37
|
+
"extract",
|
|
38
|
+
]
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
def save_file(
|
|
42
|
+
path,
|
|
43
|
+
scan: Scan,
|
|
44
|
+
probe: Probe,
|
|
45
|
+
raw_data: np.ndarray = None,
|
|
46
|
+
aligned_data: np.ndarray = None,
|
|
47
|
+
beamformed_data: np.ndarray = None,
|
|
48
|
+
envelope_data: np.ndarray = None,
|
|
49
|
+
image: np.ndarray = None,
|
|
50
|
+
image_sc: np.ndarray = None,
|
|
51
|
+
additional_elements=None,
|
|
52
|
+
description="",
|
|
53
|
+
**kwargs,
|
|
54
|
+
):
|
|
55
|
+
"""Saves data to a zea data file (h5py file).
|
|
56
|
+
|
|
57
|
+
Args:
|
|
58
|
+
path (str, pathlike): The path to the hdf5 file.
|
|
59
|
+
raw_data (np.ndarray): The data to save.
|
|
60
|
+
scan (Scan): The scan object containing the parameters of the acquisition.
|
|
61
|
+
probe (Probe): The probe object containing the parameters of the probe.
|
|
62
|
+
additional_elements (list of DatasetElement, optional): Additional elements to save in the
|
|
63
|
+
file. Defaults to None.
|
|
64
|
+
"""
|
|
65
|
+
|
|
66
|
+
generate_zea_dataset(
|
|
67
|
+
path=path,
|
|
68
|
+
raw_data=raw_data,
|
|
69
|
+
aligned_data=aligned_data,
|
|
70
|
+
beamformed_data=beamformed_data,
|
|
71
|
+
image=image,
|
|
72
|
+
image_sc=image_sc,
|
|
73
|
+
envelope_data=envelope_data,
|
|
74
|
+
probe_name="generic",
|
|
75
|
+
probe_geometry=probe.probe_geometry,
|
|
76
|
+
sampling_frequency=scan.sampling_frequency,
|
|
77
|
+
center_frequency=scan.center_frequency,
|
|
78
|
+
initial_times=scan.initial_times,
|
|
79
|
+
t0_delays=scan.t0_delays,
|
|
80
|
+
sound_speed=scan.sound_speed,
|
|
81
|
+
focus_distances=scan.focus_distances,
|
|
82
|
+
polar_angles=scan.polar_angles,
|
|
83
|
+
azimuth_angles=scan.azimuth_angles,
|
|
84
|
+
tx_apodizations=scan.tx_apodizations,
|
|
85
|
+
bandwidth_percent=scan.bandwidth_percent,
|
|
86
|
+
time_to_next_transmit=scan.time_to_next_transmit,
|
|
87
|
+
tgc_gain_curve=scan.tgc_gain_curve,
|
|
88
|
+
element_width=scan.element_width,
|
|
89
|
+
tx_waveform_indices=scan.tx_waveform_indices,
|
|
90
|
+
waveforms_one_way=scan.waveforms_one_way,
|
|
91
|
+
waveforms_two_way=scan.waveforms_two_way,
|
|
92
|
+
description=description,
|
|
93
|
+
additional_elements=additional_elements,
|
|
94
|
+
)
|
|
95
|
+
|
|
96
|
+
|
|
97
|
+
def sum_data(input_paths: list[Path], output_path: Path, overwrite=False):
|
|
98
|
+
"""
|
|
99
|
+
Sums multiple raw data files and saves the result to a new file.
|
|
100
|
+
|
|
101
|
+
Args:
|
|
102
|
+
input_paths (list[Path]): List of paths to the input raw data files.
|
|
103
|
+
output_path (Path): Path to the output file where the summed data will be saved.
|
|
104
|
+
overwrite (bool, optional): Whether to overwrite the output file if it exists. Defaults to
|
|
105
|
+
False.
|
|
106
|
+
"""
|
|
107
|
+
|
|
108
|
+
data_dict, scan, probe = load_file_all_data_types(input_paths[0])
|
|
109
|
+
description = load_description(input_paths[0])
|
|
110
|
+
additional_elements = load_additional_elements(input_paths[0])
|
|
111
|
+
|
|
112
|
+
for file in input_paths[1:]:
|
|
113
|
+
new_data, new_scan, new_probe = load_file_all_data_types(file)
|
|
114
|
+
|
|
115
|
+
if data_dict["raw_data"] is not None:
|
|
116
|
+
_assert_shapes_equal(data_dict["raw_data"], new_data["raw_data"], "raw_data")
|
|
117
|
+
data_dict["raw_data"] += new_data["raw_data"]
|
|
118
|
+
|
|
119
|
+
if data_dict["aligned_data"] is not None:
|
|
120
|
+
_assert_shapes_equal(
|
|
121
|
+
data_dict["aligned_data"], new_data["aligned_data"], "aligned_data"
|
|
122
|
+
)
|
|
123
|
+
data_dict["aligned_data"] += new_data["aligned_data"]
|
|
124
|
+
|
|
125
|
+
if data_dict["beamformed_data"] is not None:
|
|
126
|
+
_assert_shapes_equal(
|
|
127
|
+
data_dict["beamformed_data"], new_data["beamformed_data"], "beamformed_data"
|
|
128
|
+
)
|
|
129
|
+
data_dict["beamformed_data"] += new_data["beamformed_data"]
|
|
130
|
+
|
|
131
|
+
if data_dict["envelope_data"] is not None:
|
|
132
|
+
_assert_shapes_equal(
|
|
133
|
+
data_dict["envelope_data"], new_data["envelope_data"], "envelope_data"
|
|
134
|
+
)
|
|
135
|
+
data_dict["envelope_data"] += new_data["envelope_data"]
|
|
136
|
+
|
|
137
|
+
if data_dict["image"] is not None:
|
|
138
|
+
_assert_shapes_equal(data_dict["image"], new_data["image"], "image")
|
|
139
|
+
data_dict["image"] = np.log(np.exp(new_data["image"]) + np.exp(data_dict["image"]))
|
|
140
|
+
|
|
141
|
+
if data_dict["image_sc"] is not None:
|
|
142
|
+
_assert_shapes_equal(data_dict["image_sc"], new_data["image_sc"], "image_sc")
|
|
143
|
+
data_dict["image_sc"] = np.log(
|
|
144
|
+
np.exp(new_data["image_sc"]) + np.exp(data_dict["image_sc"])
|
|
145
|
+
)
|
|
146
|
+
assert scan == new_scan, "Scan parameters do not match."
|
|
147
|
+
assert probe == new_probe, "Probe parameters do not match."
|
|
148
|
+
|
|
149
|
+
if overwrite:
|
|
150
|
+
_delete_file_if_exists(output_path)
|
|
151
|
+
|
|
152
|
+
save_file(
|
|
153
|
+
path=output_path,
|
|
154
|
+
scan=scan,
|
|
155
|
+
probe=probe,
|
|
156
|
+
additional_elements=additional_elements,
|
|
157
|
+
description=description,
|
|
158
|
+
**data_dict,
|
|
159
|
+
)
|
|
160
|
+
|
|
161
|
+
|
|
162
|
+
def _assert_shapes_equal(array0, array1, name="array"):
|
|
163
|
+
shape0, shape1 = array0.shape, array1.shape
|
|
164
|
+
assert shape0 == shape1, f"{name} shapes do not match. Got {shape0} and {shape1}."
|
|
165
|
+
|
|
166
|
+
|
|
167
|
+
def compound_frames(input_path: Path, output_path: Path, overwrite=False):
|
|
168
|
+
"""
|
|
169
|
+
Compounds frames in a raw data file by averaging them.
|
|
170
|
+
|
|
171
|
+
Args:
|
|
172
|
+
input_path (Path): Path to the input raw data file.
|
|
173
|
+
output_path (Path): Path to the output file where the compounded data will be saved.
|
|
174
|
+
overwrite (bool, optional): Whether to overwrite the output file if it exists. Defaults to
|
|
175
|
+
False.
|
|
176
|
+
"""
|
|
177
|
+
|
|
178
|
+
data_dict, scan, probe = load_file_all_data_types(input_path)
|
|
179
|
+
additional_elements = load_additional_elements(input_path)
|
|
180
|
+
description = load_description(input_path)
|
|
181
|
+
|
|
182
|
+
# Assuming the first dimension is the frame dimension
|
|
183
|
+
|
|
184
|
+
compounded_data = {}
|
|
185
|
+
for data_type in DataTypes:
|
|
186
|
+
key = data_type.value
|
|
187
|
+
if data_dict[key] is None:
|
|
188
|
+
compounded_data[key] = None
|
|
189
|
+
continue
|
|
190
|
+
if key == "image" or key == "image_sc":
|
|
191
|
+
compounded_data[key] = np.log(np.mean(np.exp(data_dict[key]), axis=0, keepdims=True))
|
|
192
|
+
else:
|
|
193
|
+
compounded_data[key] = np.mean(data_dict[key], axis=0, keepdims=True)
|
|
194
|
+
|
|
195
|
+
scan = _scan_reduce_frames(scan, [0])
|
|
196
|
+
|
|
197
|
+
if overwrite:
|
|
198
|
+
_delete_file_if_exists(output_path)
|
|
199
|
+
|
|
200
|
+
save_file(
|
|
201
|
+
path=output_path,
|
|
202
|
+
scan=scan,
|
|
203
|
+
probe=probe,
|
|
204
|
+
additional_elements=additional_elements,
|
|
205
|
+
description=description,
|
|
206
|
+
**compounded_data,
|
|
207
|
+
)
|
|
208
|
+
|
|
209
|
+
|
|
210
|
+
def compound_transmits(input_path: Path, output_path: Path, overwrite=False):
|
|
211
|
+
"""
|
|
212
|
+
Compounds transmits in a raw data file by averaging them.
|
|
213
|
+
|
|
214
|
+
Note
|
|
215
|
+
----
|
|
216
|
+
This function assumes that all transmits are identical. If this is not the case the function
|
|
217
|
+
will result in incorrect scan parameters.
|
|
218
|
+
|
|
219
|
+
Args:
|
|
220
|
+
input_path (Path): Path to the input raw data file.
|
|
221
|
+
output_path (Path): Path to the output file where the compounded data will be saved.
|
|
222
|
+
overwrite (bool, optional): Whether to overwrite the output file if it exists. Defaults to
|
|
223
|
+
False.
|
|
224
|
+
"""
|
|
225
|
+
|
|
226
|
+
data_dict, scan, probe = load_file_all_data_types(input_path)
|
|
227
|
+
additional_elements = load_additional_elements(input_path)
|
|
228
|
+
description = load_description(input_path)
|
|
229
|
+
|
|
230
|
+
if not _all_tx_are_identical(scan):
|
|
231
|
+
logger.warning(
|
|
232
|
+
"Not all transmits are identical. Compounding transmits may lead to unexpected results."
|
|
233
|
+
)
|
|
234
|
+
|
|
235
|
+
# Assuming the second dimension is the transmit dimension
|
|
236
|
+
for key in ["raw_data", "aligned_data"]:
|
|
237
|
+
if data_dict[key] is None:
|
|
238
|
+
continue
|
|
239
|
+
data_dict[key] = np.mean(data_dict[key], axis=1, keepdims=True)
|
|
240
|
+
|
|
241
|
+
scan.set_transmits([0])
|
|
242
|
+
|
|
243
|
+
if overwrite:
|
|
244
|
+
_delete_file_if_exists(output_path)
|
|
245
|
+
|
|
246
|
+
save_file(
|
|
247
|
+
path=output_path,
|
|
248
|
+
scan=scan,
|
|
249
|
+
probe=probe,
|
|
250
|
+
additional_elements=additional_elements,
|
|
251
|
+
description=description,
|
|
252
|
+
**data_dict,
|
|
253
|
+
)
|
|
254
|
+
|
|
255
|
+
|
|
256
|
+
def _all_tx_are_identical(scan: Scan):
|
|
257
|
+
"""Checks if all transmits in a Scan object are identical."""
|
|
258
|
+
attributes_to_check = [
|
|
259
|
+
scan.polar_angles,
|
|
260
|
+
scan.azimuth_angles,
|
|
261
|
+
scan.t0_delays,
|
|
262
|
+
scan.tx_apodizations,
|
|
263
|
+
scan.focus_distances,
|
|
264
|
+
scan.initial_times,
|
|
265
|
+
]
|
|
266
|
+
|
|
267
|
+
for attr in attributes_to_check:
|
|
268
|
+
if attr is not None and not _check_all_identical(attr, axis=0):
|
|
269
|
+
return False
|
|
270
|
+
return True
|
|
271
|
+
|
|
272
|
+
|
|
273
|
+
def _check_all_identical(array, axis=0):
|
|
274
|
+
"""Checks if all elements along a given axis are identical."""
|
|
275
|
+
first = array.take(0, axis=axis)
|
|
276
|
+
return np.all(np.equal(array, first), axis=axis).all()
|
|
277
|
+
|
|
278
|
+
|
|
279
|
+
def resave(input_path: Path, output_path: Path, overwrite=False):
|
|
280
|
+
"""
|
|
281
|
+
Resaves a zea data file to a new location.
|
|
282
|
+
|
|
283
|
+
Args:
|
|
284
|
+
input_path (Path): Path to the input zea data file.
|
|
285
|
+
output_path (Path): Path to the output file where the data will be saved.
|
|
286
|
+
overwrite (bool, optional): Whether to overwrite the output file if it exists. Defaults to
|
|
287
|
+
False.
|
|
288
|
+
"""
|
|
289
|
+
|
|
290
|
+
data_dict, scan, probe = load_file_all_data_types(input_path)
|
|
291
|
+
additional_elements = load_additional_elements(input_path)
|
|
292
|
+
description = load_description(input_path)
|
|
293
|
+
scan.set_transmits("all")
|
|
294
|
+
|
|
295
|
+
if overwrite:
|
|
296
|
+
_delete_file_if_exists(output_path)
|
|
297
|
+
save_file(
|
|
298
|
+
path=output_path,
|
|
299
|
+
**data_dict,
|
|
300
|
+
scan=scan,
|
|
301
|
+
probe=probe,
|
|
302
|
+
additional_elements=additional_elements,
|
|
303
|
+
description=description,
|
|
304
|
+
)
|
|
305
|
+
|
|
306
|
+
|
|
307
|
+
def extract_frames_transmits(
|
|
308
|
+
input_path: Path,
|
|
309
|
+
output_path: Path,
|
|
310
|
+
frame_indices=slice(None),
|
|
311
|
+
transmit_indices=slice(None),
|
|
312
|
+
overwrite=False,
|
|
313
|
+
):
|
|
314
|
+
"""
|
|
315
|
+
extracts frames and transmits in a raw data file.
|
|
316
|
+
|
|
317
|
+
Note that the frame indices cannot both be lists. At least one of them must be a slice.
|
|
318
|
+
Please refer to the documentation of :func:`zea.data.file.load_file_all_data_types` for more
|
|
319
|
+
information on the supported index types.
|
|
320
|
+
|
|
321
|
+
Args:
|
|
322
|
+
input_path (Path): Path to the input raw data file.
|
|
323
|
+
output_path (Path): Path to the output file where the extracted data will be saved.
|
|
324
|
+
frame_indices (list, array-like, or slice): Indices of the frames to keep.
|
|
325
|
+
transmit_indices (list, array-like, or slice): Indices of the transmits to keep.
|
|
326
|
+
overwrite (bool, optional): Whether to overwrite the output file if it exists. Defaults to
|
|
327
|
+
False.
|
|
328
|
+
"""
|
|
329
|
+
indices = (frame_indices, transmit_indices)
|
|
330
|
+
data_dict, scan, probe = load_file_all_data_types(input_path, indices=indices)
|
|
331
|
+
|
|
332
|
+
additional_elements = load_additional_elements(input_path)
|
|
333
|
+
description = load_description(input_path)
|
|
334
|
+
|
|
335
|
+
scan = _scan_reduce_frames(scan, frame_indices)
|
|
336
|
+
|
|
337
|
+
if overwrite:
|
|
338
|
+
_delete_file_if_exists(output_path)
|
|
339
|
+
|
|
340
|
+
save_file(
|
|
341
|
+
path=output_path,
|
|
342
|
+
**data_dict,
|
|
343
|
+
scan=scan,
|
|
344
|
+
probe=probe,
|
|
345
|
+
additional_elements=additional_elements,
|
|
346
|
+
description=description,
|
|
347
|
+
)
|
|
348
|
+
|
|
349
|
+
|
|
350
|
+
def _delete_file_if_exists(path: Path):
|
|
351
|
+
"""Deletes a file if it exists."""
|
|
352
|
+
if path.exists():
|
|
353
|
+
path.unlink()
|
|
354
|
+
|
|
355
|
+
|
|
356
|
+
def _interpret_index(input_str):
|
|
357
|
+
if "-" in input_str:
|
|
358
|
+
start, end = map(int, input_str.split("-"))
|
|
359
|
+
return list(range(start, end + 1))
|
|
360
|
+
else:
|
|
361
|
+
return [int(x) for x in input_str.split(" ")]
|
|
362
|
+
|
|
363
|
+
|
|
364
|
+
def _interpret_indices(input_str_list):
|
|
365
|
+
if isinstance(input_str_list, str) and input_str_list == "all":
|
|
366
|
+
return slice(None)
|
|
367
|
+
|
|
368
|
+
if len(input_str_list) == 1 and "-" in input_str_list[0]:
|
|
369
|
+
start, end = map(int, input_str_list[0].split("-"))
|
|
370
|
+
return slice(start, end + 1)
|
|
371
|
+
|
|
372
|
+
indices = []
|
|
373
|
+
for part in input_str_list:
|
|
374
|
+
indices.extend(_interpret_index(part))
|
|
375
|
+
return indices
|
|
376
|
+
|
|
377
|
+
|
|
378
|
+
def _scan_reduce_frames(scan, frame_indices):
|
|
379
|
+
transmit_indices = scan.selected_transmits
|
|
380
|
+
scan.set_transmits("all")
|
|
381
|
+
if scan.time_to_next_transmit is not None:
|
|
382
|
+
scan.time_to_next_transmit = scan.time_to_next_transmit[frame_indices]
|
|
383
|
+
scan.set_transmits(transmit_indices)
|
|
384
|
+
return scan
|
|
385
|
+
|
|
386
|
+
|
|
387
|
+
def get_parser():
|
|
388
|
+
"""Command line argument parser with subcommands"""
|
|
389
|
+
|
|
390
|
+
parser = argparse.ArgumentParser(
|
|
391
|
+
description="Manipulate zea data files.",
|
|
392
|
+
formatter_class=argparse.RawDescriptionHelpFormatter,
|
|
393
|
+
)
|
|
394
|
+
subparsers = parser.add_subparsers(dest="operation", required=True)
|
|
395
|
+
_add_parser_sum(subparsers)
|
|
396
|
+
_add_parser_compound_frames(subparsers)
|
|
397
|
+
_add_parser_compound_transmits(subparsers)
|
|
398
|
+
_add_parser_resave(subparsers)
|
|
399
|
+
_add_parser_extract(subparsers)
|
|
400
|
+
|
|
401
|
+
return parser
|
|
402
|
+
|
|
403
|
+
|
|
404
|
+
def _add_parser_sum(subparsers):
|
|
405
|
+
sum_parser = subparsers.add_parser("sum", help="Sum the raw data of multiple files.")
|
|
406
|
+
sum_parser.add_argument("input_paths", type=Path, nargs="+", help="Paths to the input files.")
|
|
407
|
+
sum_parser.add_argument("output_path", type=Path, help="Output HDF5 file.")
|
|
408
|
+
sum_parser.add_argument(
|
|
409
|
+
"--overwrite", action="store_true", default=False, help="Overwrite existing output file."
|
|
410
|
+
)
|
|
411
|
+
|
|
412
|
+
|
|
413
|
+
def _add_parser_compound_frames(subparsers):
|
|
414
|
+
cf_parser = subparsers.add_parser("compound_frames", help="Compound frames to increase SNR.")
|
|
415
|
+
cf_parser.add_argument("input_path", type=Path, help="Input HDF5 file.")
|
|
416
|
+
cf_parser.add_argument("output_path", type=Path, help="Output HDF5 file.")
|
|
417
|
+
cf_parser.add_argument(
|
|
418
|
+
"--overwrite", action="store_true", default=False, help="Overwrite existing output file."
|
|
419
|
+
)
|
|
420
|
+
|
|
421
|
+
|
|
422
|
+
def _add_parser_compound_transmits(subparsers):
|
|
423
|
+
ct_parser = subparsers.add_parser(
|
|
424
|
+
"compound_transmits", help="Compound transmits to increase SNR."
|
|
425
|
+
)
|
|
426
|
+
ct_parser.add_argument("input_path", type=Path, help="Input HDF5 file.")
|
|
427
|
+
ct_parser.add_argument("output_path", type=Path, help="Output HDF5 file.")
|
|
428
|
+
ct_parser.add_argument(
|
|
429
|
+
"--overwrite", action="store_true", default=False, help="Overwrite existing output file."
|
|
430
|
+
)
|
|
431
|
+
|
|
432
|
+
|
|
433
|
+
def _add_parser_resave(subparsers):
|
|
434
|
+
resave_parser = subparsers.add_parser("resave", help="Resave a file to change format version.")
|
|
435
|
+
resave_parser.add_argument("input_path", type=Path, help="Input HDF5 file.")
|
|
436
|
+
resave_parser.add_argument("output_path", type=Path, help="Output HDF5 file.")
|
|
437
|
+
resave_parser.add_argument(
|
|
438
|
+
"--overwrite", action="store_true", default=False, help="Overwrite existing output file."
|
|
439
|
+
)
|
|
440
|
+
|
|
441
|
+
|
|
442
|
+
def _add_parser_extract(subparsers):
|
|
443
|
+
extract_parser = subparsers.add_parser("extract", help="Extract subset of frames or transmits.")
|
|
444
|
+
extract_parser.add_argument("input_path", type=Path, help="Input HDF5 file.")
|
|
445
|
+
extract_parser.add_argument("output_path", type=Path, help="Output HDF5 file.")
|
|
446
|
+
extract_parser.add_argument(
|
|
447
|
+
"--transmits",
|
|
448
|
+
type=str,
|
|
449
|
+
nargs="+",
|
|
450
|
+
default="all",
|
|
451
|
+
help="Target transmits. Can be a list of integers or ranges (e.g. 0-3 7).",
|
|
452
|
+
)
|
|
453
|
+
extract_parser.add_argument(
|
|
454
|
+
"--frames",
|
|
455
|
+
type=str,
|
|
456
|
+
nargs="+",
|
|
457
|
+
default="all",
|
|
458
|
+
help="Target frames. Can be a list of integers or ranges (e.g. 0-3 7).",
|
|
459
|
+
)
|
|
460
|
+
extract_parser.add_argument(
|
|
461
|
+
"--overwrite", action="store_true", default=False, help="Overwrite existing output file."
|
|
462
|
+
)
|
|
463
|
+
|
|
464
|
+
|
|
465
|
+
if __name__ == "__main__":
|
|
466
|
+
parser = get_parser()
|
|
467
|
+
args = parser.parse_args()
|
|
468
|
+
|
|
469
|
+
if args.output_path.exists() and not args.overwrite:
|
|
470
|
+
logger.error(
|
|
471
|
+
f"Output file {args.output_path} already exists. Use --overwrite to overwrite it."
|
|
472
|
+
)
|
|
473
|
+
exit(1)
|
|
474
|
+
|
|
475
|
+
if args.operation == "compound_frames":
|
|
476
|
+
compound_frames(
|
|
477
|
+
input_path=args.input_path, output_path=args.output_path, overwrite=args.overwrite
|
|
478
|
+
)
|
|
479
|
+
elif args.operation == "compound_transmits":
|
|
480
|
+
compound_transmits(
|
|
481
|
+
input_path=args.input_path, output_path=args.output_path, overwrite=args.overwrite
|
|
482
|
+
)
|
|
483
|
+
elif args.operation == "resave":
|
|
484
|
+
resave(input_path=args.input_path, output_path=args.output_path, overwrite=args.overwrite)
|
|
485
|
+
elif args.operation == "extract":
|
|
486
|
+
extract_frames_transmits(
|
|
487
|
+
input_path=args.input_path,
|
|
488
|
+
output_path=args.output_path,
|
|
489
|
+
frame_indices=_interpret_indices(args.frames),
|
|
490
|
+
transmit_indices=_interpret_indices(args.transmits),
|
|
491
|
+
overwrite=args.overwrite,
|
|
492
|
+
)
|
|
493
|
+
else:
|
|
494
|
+
sum_data(
|
|
495
|
+
input_paths=args.input_paths, output_path=args.output_path, overwrite=args.overwrite
|
|
496
|
+
)
|
zea/data/preset_utils.py
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
"""Preset utils for zea datasets hosted on Hugging Face.
|
|
2
2
|
|
|
3
|
-
See https://huggingface.co/
|
|
3
|
+
See https://huggingface.co/zeahub/
|
|
4
4
|
"""
|
|
5
5
|
|
|
6
6
|
from pathlib import Path
|
|
@@ -86,7 +86,7 @@ def _download_files_in_path(
|
|
|
86
86
|
return downloaded_files
|
|
87
87
|
|
|
88
88
|
|
|
89
|
-
def _hf_resolve_path(hf_path: str, cache_dir=HF_DATASETS_DIR):
|
|
89
|
+
def _hf_resolve_path(hf_path: str, cache_dir=HF_DATASETS_DIR) -> Path:
|
|
90
90
|
"""Resolve a Hugging Face path to a local cache directory path.
|
|
91
91
|
|
|
92
92
|
Downloads files from a HuggingFace dataset repository and returns
|
zea/display.py
CHANGED
|
@@ -8,8 +8,7 @@ import scipy
|
|
|
8
8
|
from keras import ops
|
|
9
9
|
from PIL import Image
|
|
10
10
|
|
|
11
|
-
from zea import
|
|
12
|
-
from zea.tensor_ops import translate
|
|
11
|
+
from zea.func.tensor import translate
|
|
13
12
|
from zea.tools.fit_scan_cone import fit_and_crop_around_scan_cone
|
|
14
13
|
|
|
15
14
|
|
|
@@ -441,11 +440,7 @@ def cartesian_to_polar_matrix(
|
|
|
441
440
|
Returns:
|
|
442
441
|
polar_matrix (Array): The image re-sampled in polar coordinates with shape `polar_shape`.
|
|
443
442
|
"""
|
|
444
|
-
|
|
445
|
-
log.info(
|
|
446
|
-
f"Cartesian matrix with dtype {ops.dtype(cartesian_matrix)} has been cast to float32."
|
|
447
|
-
)
|
|
448
|
-
cartesian_matrix = ops.cast(cartesian_matrix, "float32")
|
|
443
|
+
assert "float" in ops.dtype(cartesian_matrix), "Input image must be float type"
|
|
449
444
|
|
|
450
445
|
# Assume that polar grid is same shape as cartesian grid unless specified
|
|
451
446
|
cartesian_rows, cartesian_cols = ops.shape(cartesian_matrix)
|
|
@@ -503,6 +498,7 @@ def inverse_scan_convert_2d(
|
|
|
503
498
|
output_size=None,
|
|
504
499
|
interpolation_order=1,
|
|
505
500
|
find_scan_cone=True,
|
|
501
|
+
image_range: tuple | None = None,
|
|
506
502
|
):
|
|
507
503
|
"""
|
|
508
504
|
Convert a Cartesian-format ultrasound image to a polar representation.
|
|
@@ -512,7 +508,7 @@ def inverse_scan_convert_2d(
|
|
|
512
508
|
Optionally, it can detect and crop around the scan cone before conversion.
|
|
513
509
|
|
|
514
510
|
Args:
|
|
515
|
-
cartesian_image (tensor): 2D image array in Cartesian coordinates.
|
|
511
|
+
cartesian_image (tensor): 2D image array in Cartesian coordinates of type float.
|
|
516
512
|
fill_value (float): Value used to fill regions outside the original image
|
|
517
513
|
during interpolation.
|
|
518
514
|
angle (float): Angular field of view (in radians) used for the polar transformation.
|
|
@@ -525,12 +521,15 @@ def inverse_scan_convert_2d(
|
|
|
525
521
|
in the Cartesian image before polar conversion, ensuring that the scan cone is
|
|
526
522
|
centered without padding. Can be set to False if the image is already cropped
|
|
527
523
|
and centered.
|
|
524
|
+
image_range (tuple, optional): Tuple (vmin, vmax) for display scaling
|
|
525
|
+
when detecting the scan cone.
|
|
528
526
|
|
|
529
527
|
Returns:
|
|
530
528
|
polar_image (Array): 2D image in polar coordinates (sector-shaped scan).
|
|
531
529
|
"""
|
|
532
530
|
if find_scan_cone:
|
|
533
|
-
|
|
531
|
+
assert image_range is not None, "image_range must be provided when find_scan_cone is True"
|
|
532
|
+
cartesian_image = fit_and_crop_around_scan_cone(cartesian_image, image_range)
|
|
534
533
|
polar_image = cartesian_to_polar_matrix(
|
|
535
534
|
cartesian_image,
|
|
536
535
|
fill_value=fill_value,
|
zea/doppler.py
CHANGED
|
@@ -3,7 +3,7 @@
|
|
|
3
3
|
import numpy as np
|
|
4
4
|
from keras import ops
|
|
5
5
|
|
|
6
|
-
from zea import
|
|
6
|
+
from zea.func import tensor
|
|
7
7
|
|
|
8
8
|
|
|
9
9
|
def color_doppler(
|
|
@@ -61,11 +61,11 @@ def color_doppler(
|
|
|
61
61
|
if hamming_size[0] != 1 and hamming_size[1] != 1:
|
|
62
62
|
h_row = np.hamming(hamming_size[0])
|
|
63
63
|
h_col = np.hamming(hamming_size[1])
|
|
64
|
-
autocorr =
|
|
65
|
-
lambda x:
|
|
64
|
+
autocorr = tensor.apply_along_axis(
|
|
65
|
+
lambda x: tensor.correlate(x, h_row, mode="same"), 0, autocorr
|
|
66
66
|
)
|
|
67
|
-
autocorr =
|
|
68
|
-
lambda x:
|
|
67
|
+
autocorr = tensor.apply_along_axis(
|
|
68
|
+
lambda x: tensor.correlate(x, h_col, mode="same"), 1, autocorr
|
|
69
69
|
)
|
|
70
70
|
|
|
71
71
|
# Doppler velocity
|
zea/func/__init__.py
ADDED
|
@@ -0,0 +1,109 @@
|
|
|
1
|
+
"""Functional API of zea.
|
|
2
|
+
|
|
3
|
+
This module provides a collection of functions for various operations on tensors
|
|
4
|
+
and ultrasound data. These functions can be used standalone, in contrast to the :mod:`zea.ops` module which provides operation classes for building processing pipelines.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
from .tensor import (
|
|
8
|
+
L1,
|
|
9
|
+
L2,
|
|
10
|
+
add_salt_and_pepper_noise,
|
|
11
|
+
apply_along_axis,
|
|
12
|
+
batch_cov,
|
|
13
|
+
boolean_mask,
|
|
14
|
+
check_patches_fit,
|
|
15
|
+
compute_required_patch_overlap,
|
|
16
|
+
compute_required_patch_shape,
|
|
17
|
+
correlate,
|
|
18
|
+
extend_n_dims,
|
|
19
|
+
find_contour,
|
|
20
|
+
flatten,
|
|
21
|
+
fori_loop,
|
|
22
|
+
func_with_one_batch_dim,
|
|
23
|
+
gaussian_filter,
|
|
24
|
+
images_to_patches,
|
|
25
|
+
interpolate_data,
|
|
26
|
+
is_jax_prng_key,
|
|
27
|
+
is_monotonic,
|
|
28
|
+
linear_sum_assignment,
|
|
29
|
+
map_indices_for_interpolation,
|
|
30
|
+
matrix_power,
|
|
31
|
+
normalize,
|
|
32
|
+
pad_array_to_divisible,
|
|
33
|
+
patches_to_images,
|
|
34
|
+
resample,
|
|
35
|
+
reshape_axis,
|
|
36
|
+
simple_map,
|
|
37
|
+
sinc,
|
|
38
|
+
split_seed,
|
|
39
|
+
split_volume_data_from_axis,
|
|
40
|
+
stack_volume_data_along_axis,
|
|
41
|
+
translate,
|
|
42
|
+
vmap,
|
|
43
|
+
)
|
|
44
|
+
from .ultrasound import (
|
|
45
|
+
channels_to_complex,
|
|
46
|
+
complex_to_channels,
|
|
47
|
+
compute_time_to_peak,
|
|
48
|
+
compute_time_to_peak_stack,
|
|
49
|
+
demodulate,
|
|
50
|
+
demodulate_not_jitable,
|
|
51
|
+
envelope_detect,
|
|
52
|
+
get_band_pass_filter,
|
|
53
|
+
get_low_pass_iq_filter,
|
|
54
|
+
hilbert,
|
|
55
|
+
log_compress,
|
|
56
|
+
upmix,
|
|
57
|
+
)
|
|
58
|
+
|
|
59
|
+
__all__ = [
|
|
60
|
+
# Tensor functions
|
|
61
|
+
"L1",
|
|
62
|
+
"L2",
|
|
63
|
+
"add_salt_and_pepper_noise",
|
|
64
|
+
"apply_along_axis",
|
|
65
|
+
"batch_cov",
|
|
66
|
+
"boolean_mask",
|
|
67
|
+
"check_patches_fit",
|
|
68
|
+
"compute_required_patch_overlap",
|
|
69
|
+
"compute_required_patch_shape",
|
|
70
|
+
"correlate",
|
|
71
|
+
"extend_n_dims",
|
|
72
|
+
"find_contour",
|
|
73
|
+
"flatten",
|
|
74
|
+
"fori_loop",
|
|
75
|
+
"func_with_one_batch_dim",
|
|
76
|
+
"gaussian_filter",
|
|
77
|
+
"images_to_patches",
|
|
78
|
+
"interpolate_data",
|
|
79
|
+
"is_jax_prng_key",
|
|
80
|
+
"is_monotonic",
|
|
81
|
+
"linear_sum_assignment",
|
|
82
|
+
"map_indices_for_interpolation",
|
|
83
|
+
"matrix_power",
|
|
84
|
+
"normalize",
|
|
85
|
+
"pad_array_to_divisible",
|
|
86
|
+
"patches_to_images",
|
|
87
|
+
"resample",
|
|
88
|
+
"reshape_axis",
|
|
89
|
+
"simple_map",
|
|
90
|
+
"sinc",
|
|
91
|
+
"split_seed",
|
|
92
|
+
"split_volume_data_from_axis",
|
|
93
|
+
"stack_volume_data_along_axis",
|
|
94
|
+
"translate",
|
|
95
|
+
"vmap",
|
|
96
|
+
# Ultrasound functions
|
|
97
|
+
"channels_to_complex",
|
|
98
|
+
"complex_to_channels",
|
|
99
|
+
"compute_time_to_peak",
|
|
100
|
+
"compute_time_to_peak_stack",
|
|
101
|
+
"demodulate",
|
|
102
|
+
"demodulate_not_jitable",
|
|
103
|
+
"envelope_detect",
|
|
104
|
+
"get_band_pass_filter",
|
|
105
|
+
"get_low_pass_iq_filter",
|
|
106
|
+
"hilbert",
|
|
107
|
+
"upmix",
|
|
108
|
+
"log_compress",
|
|
109
|
+
]
|