zea 0.0.7__py3-none-any.whl → 0.0.8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (43) hide show
  1. zea/__init__.py +1 -1
  2. zea/backend/tensorflow/dataloader.py +0 -4
  3. zea/beamform/pixelgrid.py +1 -1
  4. zea/data/__init__.py +0 -9
  5. zea/data/augmentations.py +221 -28
  6. zea/data/convert/__init__.py +1 -6
  7. zea/data/convert/__main__.py +123 -0
  8. zea/data/convert/camus.py +99 -39
  9. zea/data/convert/echonet.py +183 -82
  10. zea/data/convert/echonetlvh/README.md +2 -3
  11. zea/data/convert/echonetlvh/{convert_raw_to_usbmd.py → __init__.py} +173 -102
  12. zea/data/convert/echonetlvh/manual_rejections.txt +73 -0
  13. zea/data/convert/echonetlvh/precompute_crop.py +43 -64
  14. zea/data/convert/picmus.py +37 -40
  15. zea/data/convert/utils.py +86 -0
  16. zea/data/convert/{matlab.py → verasonics.py} +33 -61
  17. zea/data/data_format.py +124 -4
  18. zea/data/dataloader.py +12 -7
  19. zea/data/datasets.py +109 -70
  20. zea/data/file.py +91 -82
  21. zea/data/file_operations.py +496 -0
  22. zea/data/preset_utils.py +1 -1
  23. zea/display.py +7 -8
  24. zea/internal/checks.py +6 -12
  25. zea/internal/operators.py +4 -0
  26. zea/io_lib.py +108 -160
  27. zea/models/__init__.py +1 -1
  28. zea/models/diffusion.py +62 -11
  29. zea/models/lv_segmentation.py +2 -0
  30. zea/ops.py +398 -158
  31. zea/scan.py +18 -8
  32. zea/tensor_ops.py +82 -62
  33. zea/tools/fit_scan_cone.py +90 -160
  34. zea/tracking/__init__.py +16 -0
  35. zea/tracking/base.py +94 -0
  36. zea/tracking/lucas_kanade.py +474 -0
  37. zea/tracking/segmentation.py +110 -0
  38. zea/utils.py +11 -2
  39. {zea-0.0.7.dist-info → zea-0.0.8.dist-info}/METADATA +3 -1
  40. {zea-0.0.7.dist-info → zea-0.0.8.dist-info}/RECORD +43 -35
  41. {zea-0.0.7.dist-info → zea-0.0.8.dist-info}/WHEEL +0 -0
  42. {zea-0.0.7.dist-info → zea-0.0.8.dist-info}/entry_points.txt +0 -0
  43. {zea-0.0.7.dist-info → zea-0.0.8.dist-info}/licenses/LICENSE +0 -0
@@ -11,20 +11,13 @@ This module provides functionality to:
11
11
  """
12
12
 
13
13
  import argparse
14
- import os
15
14
  from pathlib import Path
16
15
 
17
- if __name__ == "__main__":
18
- os.environ["KERAS_BACKEND"] = (
19
- "numpy" # recommend using numpy for this since some line fitting is performed on CPU
20
- )
21
-
22
- import keras
23
16
  import matplotlib.pyplot as plt
24
17
  import numpy as np
25
- from keras import ops
26
18
 
27
19
  from zea import log
20
+ from zea.tensor_ops import translate
28
21
 
29
22
 
30
23
  def filter_edge_points_by_boundary(edge_points, is_left=True, min_cone_half_angle_deg=20):
@@ -33,35 +26,32 @@ def filter_edge_points_by_boundary(edge_points, is_left=True, min_cone_half_angl
33
26
  Enforces minimum cone angle constraint to ensure valid cone shapes.
34
27
 
35
28
  Args:
36
- edge_points: Tensor of shape (N, 2) containing (x, y) coordinates of edge points
29
+ edge_points: Array of shape (N, 2) containing (x, y) coordinates of edge points
37
30
  is_left: Boolean indicating whether these are left (True) or right (False) edge points
38
31
  min_cone_half_angle_deg: Minimum expected half-angle of the cone in degrees
39
32
 
40
33
  Returns:
41
- Tensor of shape (M, 2) containing filtered edge points that satisfy the boundary constraints
34
+ Array of shape (M, 2) containing filtered edge points that satisfy the boundary constraints
42
35
  """
43
- if ops.shape(edge_points)[0] == 0:
36
+ if len(edge_points) == 0:
44
37
  return edge_points
45
38
 
46
39
  # Convert minimum angle to slope
47
- min_slope = ops.tan(np.radians(min_cone_half_angle_deg))
40
+ min_slope = np.tan(np.radians(min_cone_half_angle_deg))
48
41
 
49
42
  # Sort by y coordinate (top to bottom)
50
- sorted_indices = ops.argsort(edge_points[:, 1])
51
- sorted_points = ops.take(edge_points, sorted_indices, axis=0)
43
+ sorted_indices = np.argsort(edge_points[:, 1])
44
+ sorted_points = edge_points[sorted_indices]
52
45
 
53
46
  filtered_points = []
54
47
 
55
- # Convert to numpy for the iterative logic (this part is hard to vectorize)
56
- sorted_points_np = ops.convert_to_numpy(sorted_points)
57
-
58
- for i, point in enumerate(sorted_points_np):
48
+ for i, point in enumerate(sorted_points):
59
49
  x, y = point
60
50
  is_boundary_point = True
61
51
 
62
52
  # Check all points above this one
63
53
  for j in range(i):
64
- above_x, above_y = sorted_points_np[j]
54
+ above_x, above_y = sorted_points[j]
65
55
  dy = y - above_y
66
56
  min_dx_required = min_slope * dy
67
57
 
@@ -79,7 +69,7 @@ def filter_edge_points_by_boundary(edge_points, is_left=True, min_cone_half_angl
79
69
  if is_boundary_point:
80
70
  filtered_points.append(point)
81
71
 
82
- return ops.convert_to_tensor(filtered_points) if filtered_points else ops.zeros((0, 2))
72
+ return np.array(filtered_points) if filtered_points else np.zeros((0, 2))
83
73
 
84
74
 
85
75
  def detect_cone_parameters(image, min_cone_half_angle_deg=20, threshold=15):
@@ -93,7 +83,7 @@ def detect_cone_parameters(image, min_cone_half_angle_deg=20, threshold=15):
93
83
  5. Calculates cone parameters including apex position, opening angle, and crop boundaries
94
84
 
95
85
  Args:
96
- image: 2D Keras tensor (grayscale image)
86
+ image: 2D numpy array (grayscale image)
97
87
  min_cone_half_angle_deg: Minimum expected half-angle of the cone in degrees
98
88
  threshold: Threshold for binary image (pixels above this are considered data)
99
89
 
@@ -110,48 +100,29 @@ def detect_cone_parameters(image, min_cone_half_angle_deg=20, threshold=15):
110
100
  Raises:
111
101
  ValueError: If input image is not 2D or cone detection fails
112
102
  """
113
- try:
114
- import cv2
115
- except ImportError as exc:
116
- raise ImportError(
117
- "OpenCV is required for cone detection and visualization. "
118
- "Please install it with 'pip install opencv-python' or "
119
- "'pip install opencv-python-headless'."
120
- ) from exc
121
103
 
122
- if len(ops.shape(image)) != 2:
104
+ if len(image.shape) != 2:
123
105
  raise ValueError("Input image must be 2D (grayscale)")
124
106
 
125
- # Ensure image is in proper range for cv2.threshold
126
- if image.dtype != "uint8":
127
- image = ops.cast(image * 255, "uint8")
107
+ h, w = image.shape
128
108
 
129
- h, w = ops.shape(image)
130
-
131
- # OpenCV threshold requires numpy array
132
- image_np = ops.convert_to_numpy(image)
133
- _, thresh_np = cv2.threshold(image_np, threshold, 255, cv2.THRESH_BINARY)
134
- thresh = ops.convert_to_tensor(thresh_np)
109
+ # Apply threshold
110
+ thresh = np.where(image > threshold, 255, 0)
135
111
 
136
112
  # Find non-zero pixel bounds
137
- non_zero_indices = ops.convert_to_tensor(ops.where(thresh > 0))
138
- if ops.shape(non_zero_indices)[0] == 0:
113
+ non_zero_indices = np.argwhere(thresh > 0)
114
+ if len(non_zero_indices) == 0:
139
115
  return None
140
116
 
141
- min_y = ops.min(non_zero_indices[:, 0])
142
- max_y = ops.max(non_zero_indices[:, 0])
117
+ min_y = np.min(non_zero_indices[:, 0])
118
+ max_y = np.max(non_zero_indices[:, 0])
143
119
 
144
120
  # Collect left and right edge points
145
121
  left_edge_points = []
146
122
  right_edge_points = []
147
123
 
148
- # Convert back to numpy for row-wise processing (this is hard to vectorize efficiently)
149
- thresh_np = ops.convert_to_numpy(thresh)
150
- min_y_np = int(ops.convert_to_numpy(min_y))
151
- max_y_np = int(ops.convert_to_numpy(max_y))
152
-
153
- for y in range(min_y_np, max_y_np + 1):
154
- row = thresh_np[y, :]
124
+ for y in range(min_y, max_y + 1):
125
+ row = thresh[y, :]
155
126
  non_zero_x = np.where(row > 0)[0]
156
127
 
157
128
  if len(non_zero_x) > 0:
@@ -163,8 +134,8 @@ def detect_cone_parameters(image, min_cone_half_angle_deg=20, threshold=15):
163
134
  if len(left_edge_points) < 10:
164
135
  return None
165
136
 
166
- left_edge_points = ops.convert_to_tensor(left_edge_points, dtype="float32")
167
- right_edge_points = ops.convert_to_tensor(right_edge_points, dtype="float32")
137
+ left_edge_points = np.array(left_edge_points, dtype=np.float32)
138
+ right_edge_points = np.array(right_edge_points, dtype=np.float32)
168
139
 
169
140
  # Filter edge points to keep only boundary points
170
141
  filtered_left_points = filter_edge_points_by_boundary(
@@ -176,50 +147,38 @@ def detect_cone_parameters(image, min_cone_half_angle_deg=20, threshold=15):
176
147
  min_cone_half_angle_deg=min_cone_half_angle_deg,
177
148
  )
178
149
 
179
- if ops.shape(filtered_left_points)[0] < 3 or ops.shape(filtered_right_points)[0] < 3:
150
+ if len(filtered_left_points) < 3 or len(filtered_right_points) < 3:
180
151
  return None
181
152
 
182
153
  # Fit lines using least squares: x = a + b*y
183
- # Convert to numpy for lstsq
184
- filtered_left_np = ops.convert_to_numpy(filtered_left_points)
185
- filtered_right_np = ops.convert_to_numpy(filtered_right_points)
186
-
187
154
  # Left line
188
- A_left = np.vstack([np.ones(len(filtered_left_np)), filtered_left_np[:, 1]]).T
189
- left_coeffs, _, _, _ = np.linalg.lstsq(A_left, filtered_left_np[:, 0], rcond=None)
155
+ A_left = np.vstack([np.ones(len(filtered_left_points)), filtered_left_points[:, 1]]).T
156
+ left_coeffs, _, _, _ = np.linalg.lstsq(A_left, filtered_left_points[:, 0], rcond=None)
190
157
  left_a, left_b = left_coeffs
191
158
 
192
159
  # Right line
193
- A_right = np.vstack([np.ones(len(filtered_right_np)), filtered_right_np[:, 1]]).T
194
- right_coeffs, _, _, _ = np.linalg.lstsq(A_right, filtered_right_np[:, 0], rcond=None)
160
+ A_right = np.vstack([np.ones(len(filtered_right_points)), filtered_right_points[:, 1]]).T
161
+ right_coeffs, _, _, _ = np.linalg.lstsq(A_right, filtered_right_points[:, 0], rcond=None)
195
162
  right_a, right_b = right_coeffs
196
163
 
197
- # Convert back to tensors
198
- left_a = ops.convert_to_tensor(left_a)
199
- left_b = ops.convert_to_tensor(left_b)
200
- right_a = ops.convert_to_tensor(right_a)
201
- right_b = ops.convert_to_tensor(right_b)
202
-
203
164
  # Calculate apex as intersection of fitted lines
204
- if ops.abs(left_b - right_b) < 1e-6: # Lines are parallel
165
+ if np.abs(left_b - right_b) < 1e-6: # Lines are parallel
205
166
  return None
206
167
 
207
168
  apex_y = (right_a - left_a) / (left_b - right_b)
208
169
  apex_x = left_a + left_b * apex_y
209
170
 
210
171
  # Calculate cone height
211
- max_y = ops.cast(max_y, apex_y.dtype)
212
172
  cone_height = max_y - apex_y
213
173
 
214
174
  # Calculate opening angle from the line slopes
215
- # Convert slopes to angles and calculate opening angle
216
- left_angle = ops.arctan(left_b) # angle of left line from horizontal
217
- right_angle = ops.arctan(right_b) # angle of right line from horizontal
218
- opening_angle = ops.abs(left_angle - right_angle)
175
+ left_angle = np.arctan(left_b) # angle of left line from horizontal
176
+ right_angle = np.arctan(right_b) # angle of right line from horizontal
177
+ opening_angle = np.abs(left_angle - right_angle)
219
178
 
220
179
  min_non_zero_pixel_idx = (0, 0)
221
- for i in reversed(range(0, thresh_np.shape[0])):
222
- row = thresh_np[i]
180
+ for i in reversed(range(0, thresh.shape[0])):
181
+ row = thresh[i]
223
182
  non_zero_pixel_col = np.where(row > 0)[0]
224
183
  if np.any(non_zero_pixel_col):
225
184
  min_non_zero_pixel_idx = (i, non_zero_pixel_col[0])
@@ -227,15 +186,13 @@ def detect_cone_parameters(image, min_cone_half_angle_deg=20, threshold=15):
227
186
 
228
187
  circle_radius = float(
229
188
  np.sqrt(
230
- (min_non_zero_pixel_idx[1] - ops.convert_to_numpy(apex_x)) ** 2
231
- + (min_non_zero_pixel_idx[0] - ops.convert_to_numpy(apex_y)) ** 2
189
+ (min_non_zero_pixel_idx[1] - apex_x) ** 2 + (min_non_zero_pixel_idx[0] - apex_y) ** 2
232
190
  )
233
191
  )
234
- circle_center_x = float(ops.convert_to_numpy(apex_x))
235
- circle_center_y = float(ops.convert_to_numpy(apex_y))
192
+ circle_center_x = float(apex_x)
193
+ circle_center_y = float(apex_y)
236
194
 
237
195
  # Calculate where the circle intersects with the cone lines
238
- # For line: x = a + b*y and circle: (x - cx)^2 + (y - cy)^2 = r^2
239
196
  def line_circle_intersection(a, b, cx, cy, r):
240
197
  """Find intersection of line x = a + b*y with circle centered at (cx, cy) with radius r"""
241
198
  # Substitute line equation into circle equation
@@ -261,30 +218,16 @@ def detect_cone_parameters(image, min_cone_half_angle_deg=20, threshold=15):
261
218
 
262
219
  # Find intersection points
263
220
  left_intersect = line_circle_intersection(
264
- ops.convert_to_numpy(left_a),
265
- ops.convert_to_numpy(left_b),
266
- circle_center_x,
267
- circle_center_y,
268
- circle_radius,
221
+ left_a, left_b, circle_center_x, circle_center_y, circle_radius
269
222
  )
270
223
  right_intersect = line_circle_intersection(
271
- ops.convert_to_numpy(right_a),
272
- ops.convert_to_numpy(right_b),
273
- circle_center_x,
274
- circle_center_y,
275
- circle_radius,
224
+ right_a, right_b, circle_center_x, circle_center_y, circle_radius
276
225
  )
277
226
 
278
227
  if left_intersect is None or right_intersect is None:
279
228
  # Fallback to line endpoints at max_y
280
- left_y_bottom, left_x_bottom = (
281
- ops.convert_to_numpy(max_y),
282
- ops.convert_to_numpy(left_a + left_b * max_y),
283
- )
284
- right_y_bottom, right_x_bottom = (
285
- ops.convert_to_numpy(max_y),
286
- ops.convert_to_numpy(right_a + right_b * max_y),
287
- )
229
+ left_y_bottom, left_x_bottom = max_y, left_a + left_b * max_y
230
+ right_y_bottom, right_x_bottom = max_y, right_a + right_b * max_y
288
231
  else:
289
232
  left_y_bottom, left_x_bottom = left_intersect
290
233
  right_y_bottom, right_x_bottom = right_intersect
@@ -297,7 +240,7 @@ def detect_cone_parameters(image, min_cone_half_angle_deg=20, threshold=15):
297
240
 
298
241
  crop_left = int(left_x_bottom) - padding_x
299
242
  crop_right = int(right_x_bottom) + padding_x
300
- crop_top = int(ops.convert_to_numpy(apex_y)) - padding_y
243
+ crop_top = int(apex_y) - padding_y
301
244
  crop_bottom = int(sector_bottom)
302
245
 
303
246
  # Calculate final dimensions
@@ -311,50 +254,42 @@ def detect_cone_parameters(image, min_cone_half_angle_deg=20, threshold=15):
311
254
  sector_right_y = right_y_bottom
312
255
 
313
256
  # Calculate symmetry ratio (how symmetric the cone is)
314
- symmetry_ratio = float(
315
- ops.convert_to_numpy(
316
- ops.abs(left_b + right_b) / (ops.abs(left_b) + ops.abs(right_b) + 1e-8)
317
- )
318
- )
257
+ symmetry_ratio = float(np.abs(left_b + right_b) / (np.abs(left_b) + np.abs(right_b) + 1e-8))
319
258
 
320
259
  # Calculate data coverage in the crop region
321
- h_np = int(ops.convert_to_numpy(h))
322
- w_np = int(ops.convert_to_numpy(w))
323
260
  crop_left_clipped = max(0, crop_left)
324
- crop_right_clipped = min(w_np, crop_right)
261
+ crop_right_clipped = min(w, crop_right)
325
262
  crop_top_clipped = max(0, crop_top)
326
- crop_bottom_clipped = min(h_np, crop_bottom)
263
+ crop_bottom_clipped = min(h, crop_bottom)
327
264
 
328
265
  data_coverage = 0.0
329
266
  assert crop_left_clipped < crop_right_clipped and crop_top_clipped < crop_bottom_clipped
330
- crop_region = thresh_np[
331
- crop_top_clipped:crop_bottom_clipped, crop_left_clipped:crop_right_clipped
332
- ]
267
+ crop_region = thresh[crop_top_clipped:crop_bottom_clipped, crop_left_clipped:crop_right_clipped]
333
268
  data_coverage = float(np.sum(crop_region > 0) / crop_region.size)
334
269
 
335
270
  return {
336
- "apex_x": float(ops.convert_to_numpy(apex_x)),
337
- "apex_y": float(ops.convert_to_numpy(apex_y)),
271
+ "apex_x": float(apex_x),
272
+ "apex_y": float(apex_y),
338
273
  "crop_left": crop_left,
339
274
  "crop_right": crop_right,
340
275
  "crop_top": crop_top,
341
276
  "crop_bottom": crop_bottom,
342
- "original_width": int(ops.convert_to_numpy(w)),
343
- "original_height": int(ops.convert_to_numpy(h)),
277
+ "original_width": int(w),
278
+ "original_height": int(h),
344
279
  # Additional parameters for debug and analysis
345
- "cone_height": float(ops.convert_to_numpy(cone_height)),
346
- "opening_angle": float(ops.convert_to_numpy(opening_angle)),
280
+ "cone_height": float(cone_height),
281
+ "opening_angle": float(opening_angle),
347
282
  "new_width": new_width,
348
283
  "new_height": new_height,
349
284
  "symmetry_ratio": symmetry_ratio,
350
- "first_data_row": int(ops.convert_to_numpy(min_y)),
285
+ "first_data_row": int(min_y),
351
286
  "data_coverage": data_coverage,
352
- "apex_above_image": bool(ops.convert_to_numpy(apex_y) < 0),
287
+ "apex_above_image": bool(apex_y < 0),
353
288
  # Line parameters for reconstruction if needed
354
- "left_slope": float(ops.convert_to_numpy(left_b)),
355
- "right_slope": float(ops.convert_to_numpy(right_b)),
356
- "left_intercept": float(ops.convert_to_numpy(left_a)),
357
- "right_intercept": float(ops.convert_to_numpy(right_a)),
289
+ "left_slope": float(left_b),
290
+ "right_slope": float(right_b),
291
+ "left_intercept": float(left_a),
292
+ "right_intercept": float(right_a),
358
293
  # Circle parameters for the bottom boundary
359
294
  "circle_center_x": circle_center_x,
360
295
  "circle_center_y": circle_center_y,
@@ -378,11 +313,11 @@ def crop_and_center_cone(image, cone_params):
378
313
  3. Centers the apex horizontally in the final image
379
314
 
380
315
  Args:
381
- image: 2D Keras tensor (grayscale image)
316
+ image: 2D numpy array (grayscale image)
382
317
  cone_params: Dictionary of cone parameters from detect_cone_parameters()
383
318
 
384
319
  Returns:
385
- Keras tensor of the cropped and centered image with the cone apex at the top center
320
+ numpy array of the cropped and centered image with the cone apex at the top center
386
321
  """
387
322
  # Get crop boundaries
388
323
  crop_left = cone_params["crop_left"]
@@ -395,76 +330,70 @@ def crop_and_center_cone(image, cone_params):
395
330
  cropped = image[0:crop_bottom, crop_left:crop_right]
396
331
  # Add top padding
397
332
  top_padding = -crop_top
398
- cropped_width = ops.shape(cropped)[1]
399
- top_pad = ops.zeros((top_padding, cropped_width), dtype=cropped.dtype)
400
- cropped = ops.concatenate([top_pad, cropped], axis=0)
333
+ cropped_width = cropped.shape[1]
334
+ top_pad = np.zeros((top_padding, cropped_width), dtype=cropped.dtype)
335
+ cropped = np.concatenate([top_pad, cropped], axis=0)
401
336
  else:
402
337
  cropped = image[crop_top:crop_bottom, crop_left:crop_right]
403
338
 
404
339
  # Now handle horizontal centering
405
340
  # Calculate where the apex is in the cropped image
406
341
  apex_x_in_crop = cone_params["apex_x"] - crop_left
407
- cropped_height = ops.shape(cropped)[0]
408
- cropped_width = ops.shape(cropped)[1]
342
+ cropped_height = cropped.shape[0]
343
+ cropped_width = cropped.shape[1]
409
344
 
410
345
  # Calculate the target center position
411
- target_center_x = ops.cast(cropped_width / 2, "float32")
346
+ target_center_x = cropped_width / 2
412
347
 
413
348
  # Calculate how much padding we need on each side
414
- # We want: left_padding + apex_x_in_crop = final_width / 2
415
- # And: final_width = cropped_width + left_padding + right_padding
416
- # For symmetric padding: left_padding = right_padding
417
- # So: left_padding + apex_x_in_crop = (cropped_width + 2*left_padding) / 2
418
- # Solving: left_padding = cropped_width/2 - apex_x_in_crop
419
-
420
349
  left_padding_needed = target_center_x - apex_x_in_crop
421
350
 
422
351
  # Ensure we have non-negative padding
423
- left_padding = ops.maximum(0, ops.cast(left_padding_needed, "int32"))
424
- right_padding = ops.maximum(0, ops.cast(-left_padding_needed, "int32"))
352
+ left_padding = max(0, int(left_padding_needed))
353
+ right_padding = max(0, int(-left_padding_needed))
425
354
 
426
355
  # Apply horizontal padding if needed
427
356
  if left_padding > 0 or right_padding > 0:
428
357
  if left_padding > 0:
429
- left_pad = ops.zeros((cropped_height, left_padding), dtype=cropped.dtype)
430
- cropped = ops.concatenate([left_pad, cropped], axis=1)
358
+ left_pad = np.zeros((cropped_height, left_padding), dtype=cropped.dtype)
359
+ cropped = np.concatenate([left_pad, cropped], axis=1)
431
360
 
432
361
  if right_padding > 0:
433
- right_pad = ops.zeros((cropped_height, right_padding), dtype=cropped.dtype)
434
- cropped = ops.concatenate([cropped, right_pad], axis=1)
362
+ right_pad = np.zeros((cropped_height, right_padding), dtype=cropped.dtype)
363
+ cropped = np.concatenate([cropped, right_pad], axis=1)
435
364
 
436
365
  return cropped
437
366
 
438
367
 
439
368
  def fit_and_crop_around_scan_cone(
440
- image_tensor, min_cone_half_angle_deg=20, threshold=15, return_params=False
369
+ image, image_range, min_cone_half_angle_deg=20, threshold=15, return_params=False
441
370
  ):
442
371
  """
443
372
  Detect scan cone in ultrasound image and return cropped/padded image with centered apex.
444
373
 
445
374
  Args:
446
- image_tensor: Keras tensor (2D grayscale image)
375
+ image: numpy array (2D grayscale image)
376
+ image_range: tuple (vmin, vmax) for display scaling
447
377
  min_cone_half_angle_deg: Minimum expected half-angle of the cone in degrees (default: 20)
448
- threshold: Threshold for binary image - pixels above this are considered data (default: 15)
378
+ threshold: Threshold for binary image - pixels above this are considered data.
379
+ This is always on a scale of 0-255 (default: 15).
449
380
  return_params: If True, also return cone parameters (default: False)
450
381
 
451
382
  Returns:
452
- - If return_params is False: Keras tensor (cropped and padded image with apex at center)
453
- - If return_params is True: Tuple of (cropped_tensor, cone_parameters_dict)
383
+ - If return_params is False: numpy array (cropped and padded image with apex at center)
384
+ - If return_params is True: Tuple of (cropped_array, cone_parameters_dict)
454
385
 
455
386
  Raises:
456
387
  ValueError: If cone detection fails or image is not 2D
457
388
  """
458
- if keras.backend.backend() != "numpy":
459
- log.info(f"❗️ It is recommended to use {log.blue('numpy')} backend for `fit_scan_cone()`.")
460
-
461
389
  # Ensure image is 2D
462
- if len(ops.shape(image_tensor)) != 2:
463
- raise ValueError(f"Input must be 2D grayscale image, got shape {ops.shape(image_tensor)}")
390
+ if len(image.shape) != 2:
391
+ raise ValueError(f"Input must be 2D grayscale image, got shape {image.shape}")
464
392
 
465
393
  # Detect cone parameters
394
+ threshold = translate(threshold, range_from=(0, 255), range_to=image_range)
466
395
  cone_params = detect_cone_parameters(
467
- image_tensor,
396
+ image,
468
397
  min_cone_half_angle_deg=min_cone_half_angle_deg,
469
398
  threshold=threshold,
470
399
  )
@@ -473,7 +402,7 @@ def fit_and_crop_around_scan_cone(
473
402
  raise ValueError("Failed to detect ultrasound cone in image")
474
403
 
475
404
  # Crop and center the image
476
- cropped_image = crop_and_center_cone(image_tensor, cone_params)
405
+ cropped_image = crop_and_center_cone(image, cone_params)
477
406
 
478
407
  if return_params:
479
408
  return cropped_image, cone_params
@@ -567,7 +496,6 @@ def visualize_scan_cone(image, cone_params, output_dir="output"):
567
496
  marker="*", # Star marker
568
497
  markersize=15,
569
498
  color="#FFD700", # Gold
570
- # markeredgecolor="white",
571
499
  markeredgewidth=2,
572
500
  label="Cone apex",
573
501
  )
@@ -673,13 +601,14 @@ def main(avi_path):
673
601
  # Convert to grayscale
674
602
  frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
675
603
 
676
- # Convert to tensor
677
- frame_tensor = ops.convert_to_tensor(frame)
678
-
679
604
  try:
680
605
  # Fit scan cone
681
606
  _, cone_params = fit_and_crop_around_scan_cone(
682
- frame_tensor, min_cone_half_angle_deg=20, threshold=15, return_params=True
607
+ frame,
608
+ image_range=(0, 255),
609
+ min_cone_half_angle_deg=20,
610
+ threshold=15,
611
+ return_params=True,
683
612
  )
684
613
 
685
614
  # Create visualization
@@ -698,6 +627,7 @@ def get_args():
698
627
  parser.add_argument(
699
628
  "--input_file",
700
629
  type=str,
630
+ help="Path to input AVI file for visualization",
701
631
  required=True,
702
632
  )
703
633
  return parser.parse_args()
@@ -0,0 +1,16 @@
1
+ """Tracking module.
2
+
3
+ This module provides point tracking algorithms for 2D and 3D data.
4
+
5
+ Classes:
6
+ - BaseTracker: Abstract base class for trackers.
7
+ - LucasKanadeTracker: Pyramidal Lucas-Kanade tracker (2D/3D).
8
+ - SegmentationTracker: Segmentation-based tracker using contour matching.
9
+
10
+ """
11
+
12
+ from .base import BaseTracker
13
+ from .lucas_kanade import LucasKanadeTracker
14
+ from .segmentation import SegmentationTracker
15
+
16
+ __all__ = ["BaseTracker", "LucasKanadeTracker", "SegmentationTracker"]
zea/tracking/base.py ADDED
@@ -0,0 +1,94 @@
1
+ """Base tracker class for point tracking algorithms."""
2
+
3
+ from abc import ABC, abstractmethod
4
+ from typing import List
5
+
6
+ from keras import ops
7
+
8
+
9
+ class BaseTracker(ABC):
10
+ """Abstract base class for point tracking algorithms.
11
+
12
+ This class defines the interface for tracking algorithms in the zea package.
13
+ Implementations should handle both 2D and 3D tracking where applicable.
14
+
15
+ Args:
16
+ ndim: Number of dimensions (2 for 2D, 3 for 3D).
17
+ **kwargs: Tracker-specific parameters.
18
+ """
19
+
20
+ def __init__(self, ndim: int = 2, **kwargs):
21
+ """Initialize the tracker with parameters."""
22
+ self.ndim = ndim
23
+
24
+ if self.ndim not in [2, 3]:
25
+ raise ValueError(f"Only 2D and 3D tracking supported, got {ndim}D")
26
+
27
+ @abstractmethod
28
+ def track(
29
+ self,
30
+ prev_frame,
31
+ next_frame,
32
+ points,
33
+ ):
34
+ """
35
+ Track points from prev_frame to next_frame.
36
+
37
+ Args:
38
+ prev_frame: Previous frame/volume of shape (H, W) or (D, H, W).
39
+ next_frame: Next frame/volume of shape (H, W) or (D, H, W).
40
+ points: Points to track, shape (N, ndim) in (y, x) or (z, y, x) format.
41
+
42
+ Returns:
43
+ new_points: Tracked point locations, shape (N, ndim).
44
+ """
45
+ pass
46
+
47
+ def track_sequence(
48
+ self,
49
+ frames: List,
50
+ initial_points,
51
+ ) -> List:
52
+ """
53
+ Track points through a sequence of frames.
54
+
55
+ Args:
56
+ frames: List of frames/volumes to track through.
57
+ initial_points: Starting points in first frame, shape (N, ndim).
58
+
59
+ Returns:
60
+ List of N arrays, where each array has shape (T, ndim) containing
61
+ the trajectory of one point through all T frames.
62
+
63
+ """
64
+
65
+ n_frames = len(frames)
66
+ n_points = int(ops.shape(initial_points)[0])
67
+
68
+ frames_t = [ops.convert_to_tensor(f, dtype="float32") for f in frames]
69
+ current_points = ops.convert_to_tensor(initial_points, dtype="float32")
70
+
71
+ trajectories = [ops.zeros((n_frames, self.ndim), dtype="float32") for _ in range(n_points)]
72
+
73
+ # Set initial positions
74
+ for i in range(n_points):
75
+ trajectories[i] = ops.scatter_update(
76
+ trajectories[i], [[0]], ops.expand_dims(current_points[i], 0)
77
+ )
78
+
79
+ # Track frame by frame
80
+ for t in range(n_frames - 1):
81
+ new_points = self.track(frames_t[t], frames_t[t + 1], current_points)
82
+
83
+ for i in range(n_points):
84
+ trajectories[i] = ops.scatter_update(
85
+ trajectories[i], [[t + 1]], ops.expand_dims(new_points[i], 0)
86
+ )
87
+
88
+ current_points = new_points
89
+
90
+ return trajectories
91
+
92
+ def __repr__(self):
93
+ """String representation of the tracker."""
94
+ return f"{self.__class__.__name__}(ndim={self.ndim})"