zea 0.0.7__py3-none-any.whl → 0.0.9__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- zea/__init__.py +3 -3
- zea/agent/masks.py +2 -2
- zea/agent/selection.py +3 -3
- zea/backend/__init__.py +1 -1
- zea/backend/tensorflow/dataloader.py +1 -5
- zea/beamform/beamformer.py +4 -2
- zea/beamform/pfield.py +2 -2
- zea/beamform/pixelgrid.py +1 -1
- zea/data/__init__.py +0 -9
- zea/data/augmentations.py +222 -29
- zea/data/convert/__init__.py +1 -6
- zea/data/convert/__main__.py +164 -0
- zea/data/convert/camus.py +106 -40
- zea/data/convert/echonet.py +184 -83
- zea/data/convert/echonetlvh/README.md +2 -3
- zea/data/convert/echonetlvh/{convert_raw_to_usbmd.py → __init__.py} +174 -103
- zea/data/convert/echonetlvh/manual_rejections.txt +73 -0
- zea/data/convert/echonetlvh/precompute_crop.py +43 -64
- zea/data/convert/picmus.py +37 -40
- zea/data/convert/utils.py +86 -0
- zea/data/convert/verasonics.py +1247 -0
- zea/data/data_format.py +124 -6
- zea/data/dataloader.py +12 -7
- zea/data/datasets.py +109 -70
- zea/data/file.py +119 -82
- zea/data/file_operations.py +496 -0
- zea/data/preset_utils.py +2 -2
- zea/display.py +8 -9
- zea/doppler.py +5 -5
- zea/func/__init__.py +109 -0
- zea/{tensor_ops.py → func/tensor.py} +113 -69
- zea/func/ultrasound.py +500 -0
- zea/internal/_generate_keras_ops.py +5 -5
- zea/internal/checks.py +6 -12
- zea/internal/operators.py +4 -0
- zea/io_lib.py +108 -160
- zea/metrics.py +6 -5
- zea/models/__init__.py +1 -1
- zea/models/diffusion.py +63 -12
- zea/models/echonetlvh.py +1 -1
- zea/models/gmm.py +1 -1
- zea/models/lv_segmentation.py +2 -0
- zea/ops/__init__.py +188 -0
- zea/ops/base.py +442 -0
- zea/{keras_ops.py → ops/keras_ops.py} +2 -2
- zea/ops/pipeline.py +1472 -0
- zea/ops/tensor.py +356 -0
- zea/ops/ultrasound.py +890 -0
- zea/probes.py +2 -10
- zea/scan.py +35 -28
- zea/tools/fit_scan_cone.py +90 -160
- zea/tools/selection_tool.py +1 -1
- zea/tracking/__init__.py +16 -0
- zea/tracking/base.py +94 -0
- zea/tracking/lucas_kanade.py +474 -0
- zea/tracking/segmentation.py +110 -0
- zea/utils.py +11 -2
- {zea-0.0.7.dist-info → zea-0.0.9.dist-info}/METADATA +5 -1
- {zea-0.0.7.dist-info → zea-0.0.9.dist-info}/RECORD +62 -48
- zea/data/convert/matlab.py +0 -1237
- zea/ops.py +0 -3294
- {zea-0.0.7.dist-info → zea-0.0.9.dist-info}/WHEEL +0 -0
- {zea-0.0.7.dist-info → zea-0.0.9.dist-info}/entry_points.txt +0 -0
- {zea-0.0.7.dist-info → zea-0.0.9.dist-info}/licenses/LICENSE +0 -0
|
@@ -3,58 +3,27 @@ Script to precompute cone parameters for the EchoNet-LVH dataset.
|
|
|
3
3
|
This script should be run separately before the main conversion process.
|
|
4
4
|
"""
|
|
5
5
|
|
|
6
|
-
import argparse
|
|
7
6
|
import csv
|
|
8
7
|
import json
|
|
9
|
-
import os
|
|
10
8
|
from pathlib import Path
|
|
11
9
|
|
|
12
10
|
from tqdm import tqdm
|
|
13
11
|
|
|
14
|
-
|
|
15
|
-
os.environ["KERAS_BACKEND"] = "numpy"
|
|
16
|
-
|
|
12
|
+
from zea import log
|
|
17
13
|
from zea.tools.fit_scan_cone import fit_and_crop_around_scan_cone
|
|
18
14
|
|
|
19
15
|
|
|
20
|
-
def get_args():
|
|
21
|
-
"""Parse command line arguments."""
|
|
22
|
-
parser = argparse.ArgumentParser(
|
|
23
|
-
description="Precompute cone parameters for EchoNet-LVH dataset"
|
|
24
|
-
)
|
|
25
|
-
parser.add_argument(
|
|
26
|
-
"--source",
|
|
27
|
-
type=str,
|
|
28
|
-
required=True,
|
|
29
|
-
)
|
|
30
|
-
parser.add_argument(
|
|
31
|
-
"--output",
|
|
32
|
-
type=str,
|
|
33
|
-
required=True,
|
|
34
|
-
)
|
|
35
|
-
parser.add_argument(
|
|
36
|
-
"--batch",
|
|
37
|
-
type=str,
|
|
38
|
-
help="Specify which BatchX directory to process, e.g. --batch=Batch2",
|
|
39
|
-
)
|
|
40
|
-
parser.add_argument(
|
|
41
|
-
"--max_files",
|
|
42
|
-
type=int,
|
|
43
|
-
default=None,
|
|
44
|
-
help="Maximum number of files to process (for testing)",
|
|
45
|
-
)
|
|
46
|
-
parser.add_argument(
|
|
47
|
-
"--force",
|
|
48
|
-
action="store_true",
|
|
49
|
-
help="Force recomputation even if parameters already exist",
|
|
50
|
-
)
|
|
51
|
-
return parser.parse_args()
|
|
52
|
-
|
|
53
|
-
|
|
54
16
|
def load_splits(source_dir):
|
|
55
|
-
"""
|
|
17
|
+
"""
|
|
18
|
+
Load splits from MeasurementsList.csv and return avi filenames
|
|
19
|
+
|
|
20
|
+
Args:
|
|
21
|
+
source_dir: Source directory containing MeasurementsList.csv
|
|
22
|
+
Returns:
|
|
23
|
+
Dictionary with keys 'train', 'val', 'test', 'rejected' and values as lists of avi filenames
|
|
24
|
+
"""
|
|
56
25
|
csv_path = Path(source_dir) / "MeasurementsList.csv"
|
|
57
|
-
splits = {"train": [], "val": [], "test": []}
|
|
26
|
+
splits = {"train": [], "val": [], "test": [], "rejected": []}
|
|
58
27
|
# Read CSV using built-in csv module
|
|
59
28
|
with open(csv_path, newline="", encoding="utf-8") as csvfile:
|
|
60
29
|
reader = csv.DictReader(csvfile)
|
|
@@ -71,7 +40,17 @@ def load_splits(source_dir):
|
|
|
71
40
|
|
|
72
41
|
|
|
73
42
|
def find_avi_file(source_dir, hashed_filename, batch=None):
|
|
74
|
-
"""
|
|
43
|
+
"""
|
|
44
|
+
Find AVI file in the specified batch directory or any batch if not specified.
|
|
45
|
+
|
|
46
|
+
Args:
|
|
47
|
+
source_dir: Source directory containing BatchX subdirectories
|
|
48
|
+
hashed_filename: Hashed filename (with or without .avi extension)
|
|
49
|
+
batch: Specific batch directory to search in (e.g., "Batch2"), or None to search all batches
|
|
50
|
+
|
|
51
|
+
Returns:
|
|
52
|
+
Path to the AVI file if found, else None
|
|
53
|
+
"""
|
|
75
54
|
# If filename already has .avi extension, strip it
|
|
76
55
|
if hashed_filename.endswith(".avi"):
|
|
77
56
|
hashed_filename = hashed_filename[:-4]
|
|
@@ -98,7 +77,7 @@ def load_first_frame(avi_file):
|
|
|
98
77
|
avi_file: Path to the video file
|
|
99
78
|
|
|
100
79
|
Returns:
|
|
101
|
-
First frame as numpy array
|
|
80
|
+
First frame as numpy array of shape (H, W) and dtype np.uint8 (grayscale)
|
|
102
81
|
"""
|
|
103
82
|
try:
|
|
104
83
|
import cv2
|
|
@@ -129,9 +108,20 @@ def precompute_cone_parameters(args):
|
|
|
129
108
|
This function loads the first frame from each AVI file, applies fit_scan_cone
|
|
130
109
|
to determine cropping parameters, and saves these parameters to a CSV file
|
|
131
110
|
for later use during the actual data conversion.
|
|
111
|
+
|
|
112
|
+
Args:
|
|
113
|
+
args: Argument parser namespace with the following attributes:
|
|
114
|
+
src: Source directory containing EchoNet-LVH data
|
|
115
|
+
dst: Destination directory to save cone parameters
|
|
116
|
+
batch: Specific batch to process (e.g., "Batch2") or None for all
|
|
117
|
+
max_files: Maximum number of files to process (or None for all)
|
|
118
|
+
force: Whether to recompute parameters if they already exist
|
|
119
|
+
Returns:
|
|
120
|
+
Path to the CSV file containing cone parameters
|
|
132
121
|
"""
|
|
133
|
-
|
|
134
|
-
|
|
122
|
+
|
|
123
|
+
source_path = Path(args.src)
|
|
124
|
+
output_path = Path(args.dst)
|
|
135
125
|
output_path.mkdir(parents=True, exist_ok=True)
|
|
136
126
|
|
|
137
127
|
# Output file for cone parameters
|
|
@@ -140,7 +130,7 @@ def precompute_cone_parameters(args):
|
|
|
140
130
|
|
|
141
131
|
# Check if parameters already exist
|
|
142
132
|
if cone_params_csv.exists() and not args.force:
|
|
143
|
-
|
|
133
|
+
log.warning(f"Parameters already exist at {cone_params_csv}. Use --force to recompute.")
|
|
144
134
|
return cone_params_csv
|
|
145
135
|
|
|
146
136
|
# Get list of files to process
|
|
@@ -151,21 +141,21 @@ def precompute_cone_parameters(args):
|
|
|
151
141
|
for avi_filename in split_files:
|
|
152
142
|
# Strip .avi if present
|
|
153
143
|
base_filename = avi_filename[:-4] if avi_filename.endswith(".avi") else avi_filename
|
|
154
|
-
avi_file = find_avi_file(args.
|
|
144
|
+
avi_file = find_avi_file(args.src, base_filename, batch=args.batch)
|
|
155
145
|
if avi_file:
|
|
156
146
|
files_to_process.append((avi_file, avi_filename))
|
|
157
147
|
else:
|
|
158
|
-
|
|
159
|
-
f"
|
|
148
|
+
log.warning(
|
|
149
|
+
f"Could not find AVI file for {base_filename} in batch "
|
|
160
150
|
f"{args.batch if args.batch else 'any'}"
|
|
161
151
|
)
|
|
162
152
|
|
|
163
153
|
# Limit files if max_files is specified
|
|
164
154
|
if args.max_files is not None:
|
|
165
155
|
files_to_process = files_to_process[: args.max_files]
|
|
166
|
-
|
|
156
|
+
log.info(f"Limited to processing {args.max_files} files due to max_files parameter")
|
|
167
157
|
|
|
168
|
-
|
|
158
|
+
log.info(f"Computing cone parameters for {len(files_to_process)} files")
|
|
169
159
|
|
|
170
160
|
# Dictionary to store parameters for each file
|
|
171
161
|
all_cone_params = {}
|
|
@@ -217,7 +207,7 @@ def precompute_cone_parameters(args):
|
|
|
217
207
|
all_cone_params[avi_filename] = essential_params
|
|
218
208
|
|
|
219
209
|
except Exception as e:
|
|
220
|
-
|
|
210
|
+
log.error(f"Error processing {avi_file}: {str(e)}")
|
|
221
211
|
|
|
222
212
|
# Write failure record
|
|
223
213
|
failure_record = {
|
|
@@ -236,16 +226,5 @@ def precompute_cone_parameters(args):
|
|
|
236
226
|
with open(cone_params_json, "w", encoding="utf-8") as jsonfile:
|
|
237
227
|
json.dump(all_cone_params, jsonfile)
|
|
238
228
|
|
|
239
|
-
|
|
229
|
+
log.info(f"Cone parameters saved to {cone_params_csv} and {cone_params_json}")
|
|
240
230
|
return cone_params_csv
|
|
241
|
-
|
|
242
|
-
|
|
243
|
-
if __name__ == "__main__":
|
|
244
|
-
args = get_args()
|
|
245
|
-
print("Using Keras backend: numpy (forced for best performance)")
|
|
246
|
-
|
|
247
|
-
# Precompute cone parameters
|
|
248
|
-
cone_params_csv = precompute_cone_parameters(args)
|
|
249
|
-
|
|
250
|
-
print(f"Precomputation completed. Parameters saved to {cone_params_csv}")
|
|
251
|
-
print("You can now run the main conversion script.")
|
zea/data/convert/picmus.py
CHANGED
|
@@ -1,15 +1,11 @@
|
|
|
1
1
|
"""
|
|
2
2
|
Script to convert the PICMUS database to the zea format.
|
|
3
3
|
|
|
4
|
-
|
|
5
|
-
|
|
6
|
-
|
|
7
|
-
--src_dir /mnt/data/PICMUS \
|
|
8
|
-
--output_dir converted_PICMUS_dir
|
|
9
|
-
```
|
|
4
|
+
For more information about the dataset, resort to the following links:
|
|
5
|
+
|
|
6
|
+
- The original dataset can be found at `this link <https://www.creatis.insa-lyon.fr/Challenge/IEEE_IUS_2016/download>`_.
|
|
10
7
|
"""
|
|
11
8
|
|
|
12
|
-
import argparse
|
|
13
9
|
import logging
|
|
14
10
|
import os
|
|
15
11
|
from pathlib import Path
|
|
@@ -19,11 +15,13 @@ import numpy as np
|
|
|
19
15
|
|
|
20
16
|
from zea import log
|
|
21
17
|
from zea.beamform.delays import compute_t0_delays_planewave
|
|
18
|
+
from zea.data.convert.utils import unzip
|
|
22
19
|
from zea.data.data_format import generate_zea_dataset
|
|
23
20
|
|
|
24
21
|
|
|
25
|
-
def
|
|
26
|
-
"""
|
|
22
|
+
def convert(source_path, output_path, overwrite=False):
|
|
23
|
+
"""
|
|
24
|
+
Converts and writes a single PICMUS file to the zea format.
|
|
27
25
|
|
|
28
26
|
Args:
|
|
29
27
|
source_path (str, pathlike): The path to the original PICMUS file.
|
|
@@ -112,37 +110,37 @@ def convert_picmus(source_path, output_path, overwrite=False):
|
|
|
112
110
|
)
|
|
113
111
|
|
|
114
112
|
|
|
115
|
-
def
|
|
116
|
-
"""
|
|
117
|
-
|
|
118
|
-
|
|
119
|
-
"Converts the PICMUS database to the zea format. The "
|
|
120
|
-
"src_dir is scanned for hdf5 files ending in iq or rf. These files are"
|
|
121
|
-
"converted and stored in output_dir under the same relative path as "
|
|
122
|
-
"they came from in src_dir."
|
|
123
|
-
)
|
|
124
|
-
)
|
|
125
|
-
parser.add_argument(
|
|
126
|
-
"--src_dir",
|
|
127
|
-
type=str,
|
|
128
|
-
help="Source directory where the original PICMUS data is stored.",
|
|
129
|
-
)
|
|
130
|
-
|
|
131
|
-
parser.add_argument("--output_dir", type=str, help="Output directory of the converted database")
|
|
132
|
-
return parser.parse_args()
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
if __name__ == "__main__":
|
|
136
|
-
# Parse the arguments
|
|
137
|
-
args = get_args()
|
|
113
|
+
def convert_picmus(args):
|
|
114
|
+
"""
|
|
115
|
+
Convert PICMUS HDF5 files under a source directory into the zea dataset format,
|
|
116
|
+
preserving relative paths in the destination.
|
|
138
117
|
|
|
118
|
+
Args:
|
|
119
|
+
args (argparse.Namespace): An object with the following attributes.
|
|
120
|
+
|
|
121
|
+
- src (str or Path): Path to the PICMUS source directory or archive.
|
|
122
|
+
- dst (str or Path): Path to the output directory where converted .hdf5 files
|
|
123
|
+
will be written.
|
|
124
|
+
|
|
125
|
+
Note:
|
|
126
|
+
- Scans `src` (after unzipping if needed) for `.hdf5` files containing IQ/RF data and
|
|
127
|
+
converts each to the zea format.
|
|
128
|
+
- Preserves the relative directory structure under `dst` and places each converted
|
|
129
|
+
file in its own subdirectory named after the file stem.
|
|
130
|
+
- Fails fast if `src` does not exist or if `dst` already exists.
|
|
131
|
+
"""
|
|
139
132
|
# Get the source and output directories
|
|
140
|
-
base_dir = Path(args.
|
|
141
|
-
|
|
133
|
+
base_dir = Path(args.src)
|
|
134
|
+
dst = Path(args.dst)
|
|
142
135
|
|
|
143
136
|
# Check if the source directory exists and create the output directory
|
|
144
137
|
assert base_dir.exists(), f"Source directory {base_dir} does not exist."
|
|
145
|
-
|
|
138
|
+
|
|
139
|
+
assert not dst.exists(), f"Destination directory {dst} already exists, Exiting."
|
|
140
|
+
|
|
141
|
+
# Unzip the PICMUS dataset if necessary
|
|
142
|
+
base_dir = unzip(base_dir, "picmus")
|
|
143
|
+
dst.mkdir(parents=True, exist_ok=False)
|
|
146
144
|
|
|
147
145
|
# Traverse the source directory and convert all files
|
|
148
146
|
for file in base_dir.rglob("*.hdf5"):
|
|
@@ -151,9 +149,8 @@ if __name__ == "__main__":
|
|
|
151
149
|
# Select only the data files that actually contain rf or iq data
|
|
152
150
|
# (There are also files containing the geometry of the phantoms or
|
|
153
151
|
# images)
|
|
154
|
-
|
|
155
|
-
|
|
156
|
-
) and "img" in str_file:
|
|
152
|
+
is_data_file = str_file.endswith("iq.hdf5") or str_file.endswith("rf.hdf5")
|
|
153
|
+
if not is_data_file or "img" in str_file:
|
|
157
154
|
log.info("Skipping %s", file.name)
|
|
158
155
|
continue
|
|
159
156
|
|
|
@@ -161,7 +158,7 @@ if __name__ == "__main__":
|
|
|
161
158
|
|
|
162
159
|
# Find the folder relative to the base directory to retain the
|
|
163
160
|
# folder structure in the output directory
|
|
164
|
-
output_file =
|
|
161
|
+
output_file = dst / file.relative_to(base_dir)
|
|
165
162
|
|
|
166
163
|
# Define the output path
|
|
167
164
|
# NOTE: I added output_file.stem to put each file in its own
|
|
@@ -175,7 +172,7 @@ if __name__ == "__main__":
|
|
|
175
172
|
# Create the output directory if it does not exist already
|
|
176
173
|
output_file.parent.mkdir(parents=True, exist_ok=True)
|
|
177
174
|
|
|
178
|
-
|
|
175
|
+
convert(file, output_file, overwrite=True)
|
|
179
176
|
except Exception:
|
|
180
177
|
output_file.parent.rmdir()
|
|
181
178
|
log.error("Failed to convert %s", str_file)
|
|
@@ -0,0 +1,86 @@
|
|
|
1
|
+
import zipfile
|
|
2
|
+
from pathlib import Path
|
|
3
|
+
|
|
4
|
+
import imageio
|
|
5
|
+
import numpy as np
|
|
6
|
+
from PIL import Image
|
|
7
|
+
|
|
8
|
+
from zea import log
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
def load_avi(file_path, mode="L"):
|
|
12
|
+
"""Load a .avi file and return a numpy array of frames.
|
|
13
|
+
|
|
14
|
+
Args:
|
|
15
|
+
filename (str): The path to the video file.
|
|
16
|
+
mode (str, optional): Color mode: "L" (grayscale) or "RGB".
|
|
17
|
+
Defaults to "L".
|
|
18
|
+
|
|
19
|
+
Returns:
|
|
20
|
+
numpy.ndarray: Array of frames (num_frames, H, W) or (num_frames, H, W, C)
|
|
21
|
+
"""
|
|
22
|
+
frames = []
|
|
23
|
+
with imageio.get_reader(file_path) as reader:
|
|
24
|
+
for frame in reader:
|
|
25
|
+
img = Image.fromarray(frame)
|
|
26
|
+
img = img.convert(mode)
|
|
27
|
+
img = np.array(img)
|
|
28
|
+
frames.append(img)
|
|
29
|
+
return np.stack(frames)
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
def unzip(src: str | Path, dataset: str) -> Path:
|
|
33
|
+
"""
|
|
34
|
+
Checks if data folder exist in src.
|
|
35
|
+
Otherwise, unzip dataset.zip in src.
|
|
36
|
+
|
|
37
|
+
Args:
|
|
38
|
+
src (str | Path): The source directory containing the zip file or unzipped folder.
|
|
39
|
+
dataset (str): The name of the dataset to unzip.
|
|
40
|
+
Options are "picmus", "camus", "echonet", "echonetlvh".
|
|
41
|
+
|
|
42
|
+
Returns:
|
|
43
|
+
Path: The path to the unzipped dataset directory.
|
|
44
|
+
"""
|
|
45
|
+
src = Path(src)
|
|
46
|
+
if dataset == "picmus":
|
|
47
|
+
zip_name = "picmus.zip"
|
|
48
|
+
folder_name = "archive_to_download"
|
|
49
|
+
unzip_dir = src / folder_name
|
|
50
|
+
elif dataset == "camus":
|
|
51
|
+
zip_name = "CAMUS_public.zip"
|
|
52
|
+
folder_name = "CAMUS_public"
|
|
53
|
+
unzip_dir = src / folder_name
|
|
54
|
+
elif dataset == "echonet":
|
|
55
|
+
zip_name = "EchoNet-Dynamic.zip"
|
|
56
|
+
folder_name = "EchoNet-Dynamic"
|
|
57
|
+
unzip_dir = src / folder_name / "Videos"
|
|
58
|
+
elif dataset == "echonetlvh":
|
|
59
|
+
zip_name = "EchoNet-LVH.zip"
|
|
60
|
+
folder_name = "Batch1"
|
|
61
|
+
unzip_dir = src
|
|
62
|
+
else:
|
|
63
|
+
raise ValueError(f"Dataset {dataset} not recognized for unzip.")
|
|
64
|
+
|
|
65
|
+
if (src / folder_name).exists():
|
|
66
|
+
if dataset == "echonetlvh":
|
|
67
|
+
# EchoNetLVH dataset unzips into four folders. Check they all exist.
|
|
68
|
+
assert (src / "Batch2").exists(), f"Missing Batch2 folder in {src}."
|
|
69
|
+
assert (src / "Batch3").exists(), f"Missing Batch3 folder in {src}."
|
|
70
|
+
assert (src / "Batch4").exists(), f"Missing Batch4 folder in {src}."
|
|
71
|
+
assert (src / "MeasurementsList.csv").exists(), (
|
|
72
|
+
f"Missing MeasurementsList.csv in {src}."
|
|
73
|
+
)
|
|
74
|
+
log.info(f"Found Batch1, Batch2, Batch3, Batch4 and MeasurementsList.csv in {src}.")
|
|
75
|
+
return unzip_dir
|
|
76
|
+
|
|
77
|
+
zip_path = src / zip_name
|
|
78
|
+
if not zip_path.exists():
|
|
79
|
+
raise FileNotFoundError(f"Could not find {zip_name} or {folder_name} folder in {src}.")
|
|
80
|
+
|
|
81
|
+
log.info(f"Unzipping {zip_path} to {src}...")
|
|
82
|
+
with zipfile.ZipFile(zip_path, "r") as zip_ref:
|
|
83
|
+
zip_ref.extractall(src)
|
|
84
|
+
log.info("Unzipping completed.")
|
|
85
|
+
log.info(f"Starting conversion from {src / folder_name}.")
|
|
86
|
+
return unzip_dir
|