byotrack 1.2.0.dev0__tar.gz → 1.2.0.dev1__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (70) hide show
  1. {byotrack-1.2.0.dev0 → byotrack-1.2.0.dev1}/PKG-INFO +2 -2
  2. {byotrack-1.2.0.dev0 → byotrack-1.2.0.dev1}/byotrack/__init__.py +1 -1
  3. {byotrack-1.2.0.dev0 → byotrack-1.2.0.dev1}/byotrack/api/detector/detections.py +57 -2
  4. {byotrack-1.2.0.dev0 → byotrack-1.2.0.dev1}/byotrack/dataset/ctc.py +5 -3
  5. {byotrack-1.2.0.dev0 → byotrack-1.2.0.dev1}/byotrack/implementation/linker/frame_by_frame/base.py +7 -0
  6. {byotrack-1.2.0.dev0 → byotrack-1.2.0.dev1}/byotrack/implementation/linker/frame_by_frame/kalman_linker.py +25 -3
  7. {byotrack-1.2.0.dev0 → byotrack-1.2.0.dev1}/byotrack/implementation/linker/frame_by_frame/koft.py +21 -2
  8. {byotrack-1.2.0.dev0 → byotrack-1.2.0.dev1}/byotrack/implementation/linker/frame_by_frame/nearest_neighbor.py +8 -1
  9. {byotrack-1.2.0.dev0 → byotrack-1.2.0.dev1}/byotrack/metrics/ctc.py +14 -2
  10. {byotrack-1.2.0.dev0 → byotrack-1.2.0.dev1}/byotrack.egg-info/PKG-INFO +2 -2
  11. {byotrack-1.2.0.dev0 → byotrack-1.2.0.dev1}/byotrack.egg-info/requires.txt +1 -1
  12. {byotrack-1.2.0.dev0 → byotrack-1.2.0.dev1}/setup.cfg +1 -1
  13. {byotrack-1.2.0.dev0 → byotrack-1.2.0.dev1}/LICENSE +0 -0
  14. {byotrack-1.2.0.dev0 → byotrack-1.2.0.dev1}/README.md +0 -0
  15. {byotrack-1.2.0.dev0 → byotrack-1.2.0.dev1}/byotrack/api/__init__.py +0 -0
  16. {byotrack-1.2.0.dev0 → byotrack-1.2.0.dev1}/byotrack/api/detector/__init__.py +0 -0
  17. {byotrack-1.2.0.dev0 → byotrack-1.2.0.dev1}/byotrack/api/detector/detector.py +0 -0
  18. {byotrack-1.2.0.dev0 → byotrack-1.2.0.dev1}/byotrack/api/features_extractor.py +0 -0
  19. {byotrack-1.2.0.dev0 → byotrack-1.2.0.dev1}/byotrack/api/linker.py +0 -0
  20. {byotrack-1.2.0.dev0 → byotrack-1.2.0.dev1}/byotrack/api/optical_flow/__init__.py +0 -0
  21. {byotrack-1.2.0.dev0 → byotrack-1.2.0.dev1}/byotrack/api/optical_flow/optical_flow.py +0 -0
  22. {byotrack-1.2.0.dev0 → byotrack-1.2.0.dev1}/byotrack/api/parameters.py +0 -0
  23. {byotrack-1.2.0.dev0 → byotrack-1.2.0.dev1}/byotrack/api/refiner.py +0 -0
  24. {byotrack-1.2.0.dev0 → byotrack-1.2.0.dev1}/byotrack/api/tracker.py +0 -0
  25. {byotrack-1.2.0.dev0 → byotrack-1.2.0.dev1}/byotrack/api/tracks.py +0 -0
  26. {byotrack-1.2.0.dev0 → byotrack-1.2.0.dev1}/byotrack/dataset/__init__.py +0 -0
  27. {byotrack-1.2.0.dev0 → byotrack-1.2.0.dev1}/byotrack/example_data.py +0 -0
  28. {byotrack-1.2.0.dev0 → byotrack-1.2.0.dev1}/byotrack/fiji/__init__.py +0 -0
  29. {byotrack-1.2.0.dev0 → byotrack-1.2.0.dev1}/byotrack/fiji/io.py +0 -0
  30. {byotrack-1.2.0.dev0 → byotrack-1.2.0.dev1}/byotrack/fiji/run.py +0 -0
  31. {byotrack-1.2.0.dev0 → byotrack-1.2.0.dev1}/byotrack/icy/__init__.py +0 -0
  32. {byotrack-1.2.0.dev0 → byotrack-1.2.0.dev1}/byotrack/icy/io.py +0 -0
  33. {byotrack-1.2.0.dev0 → byotrack-1.2.0.dev1}/byotrack/icy/run.py +0 -0
  34. {byotrack-1.2.0.dev0 → byotrack-1.2.0.dev1}/byotrack/implementation/__init__.py +0 -0
  35. {byotrack-1.2.0.dev0 → byotrack-1.2.0.dev1}/byotrack/implementation/detector/__init__.py +0 -0
  36. {byotrack-1.2.0.dev0 → byotrack-1.2.0.dev1}/byotrack/implementation/detector/stardist.py +0 -0
  37. {byotrack-1.2.0.dev0 → byotrack-1.2.0.dev1}/byotrack/implementation/detector/wavelet.py +0 -0
  38. {byotrack-1.2.0.dev0 → byotrack-1.2.0.dev1}/byotrack/implementation/linker/__init__.py +0 -0
  39. {byotrack-1.2.0.dev0 → byotrack-1.2.0.dev1}/byotrack/implementation/linker/frame_by_frame/__init__.py +0 -0
  40. {byotrack-1.2.0.dev0 → byotrack-1.2.0.dev1}/byotrack/implementation/linker/frame_by_frame/greedy_lap.py +0 -0
  41. {byotrack-1.2.0.dev0 → byotrack-1.2.0.dev1}/byotrack/implementation/linker/icy_emht/__init__.py +0 -0
  42. {byotrack-1.2.0.dev0 → byotrack-1.2.0.dev1}/byotrack/implementation/linker/icy_emht/emht_protocol.xml +0 -0
  43. {byotrack-1.2.0.dev0 → byotrack-1.2.0.dev1}/byotrack/implementation/linker/icy_emht/emht_protocol_with_full_specs.xml +0 -0
  44. {byotrack-1.2.0.dev0 → byotrack-1.2.0.dev1}/byotrack/implementation/linker/icy_emht/icy_emht.py +0 -0
  45. {byotrack-1.2.0.dev0 → byotrack-1.2.0.dev1}/byotrack/implementation/linker/trackmate/__init__.py +0 -0
  46. {byotrack-1.2.0.dev0 → byotrack-1.2.0.dev1}/byotrack/implementation/linker/trackmate/_trackmate.py +0 -0
  47. {byotrack-1.2.0.dev0 → byotrack-1.2.0.dev1}/byotrack/implementation/linker/trackmate/trackmate.py +0 -0
  48. {byotrack-1.2.0.dev0 → byotrack-1.2.0.dev1}/byotrack/implementation/optical_flow/__init__.py +0 -0
  49. {byotrack-1.2.0.dev0 → byotrack-1.2.0.dev1}/byotrack/implementation/optical_flow/opencv.py +0 -0
  50. {byotrack-1.2.0.dev0 → byotrack-1.2.0.dev1}/byotrack/implementation/optical_flow/skimage.py +0 -0
  51. {byotrack-1.2.0.dev0 → byotrack-1.2.0.dev1}/byotrack/implementation/refiner/__init__.py +0 -0
  52. {byotrack-1.2.0.dev0 → byotrack-1.2.0.dev1}/byotrack/implementation/refiner/cleaner.py +0 -0
  53. {byotrack-1.2.0.dev0 → byotrack-1.2.0.dev1}/byotrack/implementation/refiner/interpolater.py +0 -0
  54. {byotrack-1.2.0.dev0 → byotrack-1.2.0.dev1}/byotrack/implementation/refiner/propagation.py +0 -0
  55. {byotrack-1.2.0.dev0 → byotrack-1.2.0.dev1}/byotrack/implementation/refiner/stitching/__init__.py +0 -0
  56. {byotrack-1.2.0.dev0 → byotrack-1.2.0.dev1}/byotrack/implementation/refiner/stitching/dist_stitcher.py +0 -0
  57. {byotrack-1.2.0.dev0 → byotrack-1.2.0.dev1}/byotrack/implementation/refiner/stitching/emc2.py +0 -0
  58. {byotrack-1.2.0.dev0 → byotrack-1.2.0.dev1}/byotrack/metrics/__init__.py +0 -0
  59. {byotrack-1.2.0.dev0 → byotrack-1.2.0.dev1}/byotrack/py.typed +0 -0
  60. {byotrack-1.2.0.dev0 → byotrack-1.2.0.dev1}/byotrack/utils.py +0 -0
  61. {byotrack-1.2.0.dev0 → byotrack-1.2.0.dev1}/byotrack/video/__init__.py +0 -0
  62. {byotrack-1.2.0.dev0 → byotrack-1.2.0.dev1}/byotrack/video/reader.py +0 -0
  63. {byotrack-1.2.0.dev0 → byotrack-1.2.0.dev1}/byotrack/video/transforms.py +0 -0
  64. {byotrack-1.2.0.dev0 → byotrack-1.2.0.dev1}/byotrack/video/video.py +0 -0
  65. {byotrack-1.2.0.dev0 → byotrack-1.2.0.dev1}/byotrack/visualize.py +0 -0
  66. {byotrack-1.2.0.dev0 → byotrack-1.2.0.dev1}/byotrack.egg-info/SOURCES.txt +0 -0
  67. {byotrack-1.2.0.dev0 → byotrack-1.2.0.dev1}/byotrack.egg-info/dependency_links.txt +0 -0
  68. {byotrack-1.2.0.dev0 → byotrack-1.2.0.dev1}/byotrack.egg-info/top_level.txt +0 -0
  69. {byotrack-1.2.0.dev0 → byotrack-1.2.0.dev1}/pyproject.toml +0 -0
  70. {byotrack-1.2.0.dev0 → byotrack-1.2.0.dev1}/setup.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: byotrack
3
- Version: 1.2.0.dev0
3
+ Version: 1.2.0.dev1
4
4
  Summary: Biological particle tracking with Python
5
5
  Home-page: https://github.com/raphaelreme/byotrack
6
6
  Author: Raphael Reme
@@ -28,7 +28,7 @@ Requires-Dist: platformdirs
28
28
  Requires-Dist: pylapy[scipy]
29
29
  Requires-Dist: tifffile[all]
30
30
  Requires-Dist: torch
31
- Requires-Dist: torch_tps
31
+ Requires-Dist: torch-tps
32
32
  Requires-Dist: tqdm
33
33
  Provides-Extra: full
34
34
  Requires-Dist: matplotlib; extra == "full"
@@ -83,4 +83,4 @@ from byotrack.api.tracks import Track
83
83
  from byotrack.video import Video, VideoTransformConfig
84
84
 
85
85
 
86
- __version__ = "1.2.0.dev0"
86
+ __version__ = "1.2.0.dev1"
@@ -38,7 +38,7 @@ def _check_confidence(confidence: torch.Tensor) -> None:
38
38
 
39
39
  @numba.njit(parallel=False)
40
40
  def _position_from_segmentation(segmentation: np.ndarray) -> np.ndarray:
41
- """Return the centre of each instance in the segmentation"""
41
+ """Return the center (mean) of each instance in the segmentation"""
42
42
  # A bit slower than previous version in 2D, but still fine
43
43
 
44
44
  n = segmentation.max()
@@ -57,6 +57,42 @@ def _position_from_segmentation(segmentation: np.ndarray) -> np.ndarray:
57
57
  return m_1.astype(np.float32) / m_0.reshape(-1, 1)
58
58
 
59
59
 
60
+ @numba.njit(parallel=False)
61
+ def _median_from_segmentation(segmentation: np.ndarray) -> np.ndarray:
62
+ """Return the center (median) of each instance in the segmentation"""
63
+
64
+ # Flatten space axes
65
+ flat_segmentation = segmentation.reshape(-1)
66
+
67
+ n = segmentation.max()
68
+ counts = np.zeros(n, dtype=np.uint)
69
+
70
+ for i in range(flat_segmentation.shape[0]):
71
+ instance = flat_segmentation[i] - 1
72
+ if instance != -1:
73
+ counts[instance] += 1
74
+
75
+ m = np.max(counts)
76
+
77
+ # Reset counts and allocate position
78
+ counts[:] = 0
79
+ positions = np.empty((n, m, len(segmentation.shape)), dtype=np.uint)
80
+
81
+ for index in np.ndindex(*segmentation.shape):
82
+ instance = segmentation[index] - 1
83
+ if instance != -1:
84
+ positions[instance, counts[instance]] = index
85
+ counts[instance] += 1
86
+
87
+ # Compute medians
88
+ median = np.zeros((n, len(segmentation.shape)), dtype=np.float32)
89
+ for instance in range(n):
90
+ for axis in range(len(segmentation.shape)):
91
+ median[instance, axis] = np.median(positions[instance, : counts[instance], axis])
92
+
93
+ return median
94
+
95
+
60
96
  @numba.njit
61
97
  def _bbox_from_segmentation(segmentation: np.ndarray) -> np.ndarray:
62
98
  # A bit slower than previous version in 2D, but still fine
@@ -242,12 +278,15 @@ class Detections:
242
278
  Shape: ([D, ]H, W), dtype: int32
243
279
  confidence (torch.Tensor): Confidence for each instance
244
280
  Shape: (N,), dtype: float32
281
+ use_median_position (bool): Use median instead of mean to compute positions from segmentation.
282
+ Default: True (Usually more robust)
245
283
 
246
284
  """
247
285
 
248
- def __init__(self, data: Dict[str, torch.Tensor], frame_id: int = -1) -> None:
286
+ def __init__(self, data: Dict[str, torch.Tensor], frame_id: int = -1, use_median_position=True) -> None:
249
287
  self.length = -1
250
288
  self.dim = -1
289
+ self._use_median_position = use_median_position
251
290
 
252
291
  if "position" in data:
253
292
  _check_position(data["position"])
@@ -321,6 +360,20 @@ class Detections:
321
360
 
322
361
  return confidence
323
362
 
363
+ @property
364
+ def use_median_position(self) -> bool:
365
+ return self._use_median_position
366
+
367
+ @use_median_position.setter
368
+ def use_median_position(self, value: bool) -> None:
369
+ if value is self._use_median_position:
370
+ return
371
+
372
+ self._use_median_position = value
373
+
374
+ # Invalidate computed positions
375
+ self._lazy_extrapolated_data.pop("position")
376
+
324
377
  def _extrapolate_shape(self) -> Tuple[int, ...]:
325
378
  """Extrapolate shape from data
326
379
 
@@ -354,6 +407,8 @@ class Detections:
354
407
 
355
408
  """
356
409
  if "segmentation" in self.data:
410
+ if self.use_median_position:
411
+ return torch.tensor(_median_from_segmentation(self.data["segmentation"].numpy()))
357
412
  return torch.tensor(_position_from_segmentation(self.data["segmentation"].numpy()))
358
413
 
359
414
  return self.data["bbox"][:, : self.dim] + (self.data["bbox"][:, self.dim :] - 1) / 2
@@ -40,6 +40,8 @@ class GroundTruthDetector(byotrack.BatchDetector):
40
40
 
41
41
  """
42
42
 
43
+ progress_bar_description = "Detections (Load from CTC format)"
44
+
43
45
  def detect(self, batch: np.ndarray) -> List[byotrack.Detections]:
44
46
  assert batch.shape[-1] == 1, "Multichannel segmentation are not supported"
45
47
  assert np.issubdtype(batch.dtype, np.integer)
@@ -131,7 +133,7 @@ def load_tracks( # pylint: disable=too-many-locals,too-many-branches,too-many-s
131
133
  segmentation_paths = path.glob("mask*.tif") if is_res else path.glob("man_*.tif")
132
134
  loader = byotrack.video.reader.FrameTiffLoader()
133
135
 
134
- for path_ in tqdm.tqdm(byotrack.utils.sorted_alphanumeric(segmentation_paths)):
136
+ for path_ in tqdm.tqdm(byotrack.utils.sorted_alphanumeric(segmentation_paths), desc="Loading CTC tracks"):
135
137
  if is_res:
136
138
  frame_id = int(path_.stem[len("mask") :])
137
139
  elif "seg" in path.stem:
@@ -225,7 +227,7 @@ def save_detections(
225
227
 
226
228
  os.makedirs(path, exist_ok=True)
227
229
 
228
- for frame_id, detections in enumerate(tqdm.tqdm(detections_sequence)):
230
+ for frame_id, detections in enumerate(tqdm.tqdm(detections_sequence, desc="Saving Detections to CTC")):
229
231
  segmentation = detections.segmentation.numpy().astype(np.uint16)
230
232
 
231
233
  if as_res:
@@ -472,7 +474,7 @@ def save_tracks( # pylint: disable=too-many-branches,too-many-locals,too-many-s
472
474
  else:
473
475
  assert shape is not None, "Without detections_sequence, you need to provide the shape argument"
474
476
 
475
- for frame_id in tqdm.trange(last + 1):
477
+ for frame_id in tqdm.trange(last + 1, desc="Saving tracks to CTC"):
476
478
  has_detections = len(detections_sequence) > frame_id
477
479
 
478
480
  disk_positions = []
@@ -221,6 +221,10 @@ class FrameByFrameLinkerParameters: # pylint: disable=too-many-instance-attribu
221
221
  association_method (AssociationMethod): The frame-by-frame association to use. See `AssociationMethod`.
222
222
  It can be provided as a string. (Choice: GREEDY, OPT_HARD, OPT_SMOOTH)
223
223
  Default: OPT_SMOOTH
224
+ anisotropy (Tuple[float, float, float]): Anisotropy of images (Ratio of the pixel sizes
225
+ for each axis, depth first). This will be used to scale distances.
226
+ Default: (1., 1., 1.)
227
+
224
228
  """
225
229
 
226
230
  def __init__(
@@ -230,6 +234,7 @@ class FrameByFrameLinkerParameters: # pylint: disable=too-many-instance-attribu
230
234
  n_valid=3,
231
235
  n_gap=3,
232
236
  association_method: Union[str, AssociationMethod] = AssociationMethod.OPT_SMOOTH,
237
+ anisotropy: Tuple[float, float, float] = (1.0, 1.0, 1.0),
233
238
  ):
234
239
  self.association_threshold = association_threshold
235
240
  self.n_valid = n_valid
@@ -239,11 +244,13 @@ class FrameByFrameLinkerParameters: # pylint: disable=too-many-instance-attribu
239
244
  if isinstance(association_method, AssociationMethod)
240
245
  else AssociationMethod[association_method.upper()]
241
246
  )
247
+ self.anisotropy = anisotropy
242
248
 
243
249
  association_threshold: float = 5.0
244
250
  n_valid: int = 3
245
251
  n_gap: int = 3
246
252
  association_method: AssociationMethod = AssociationMethod.OPT_SMOOTH
253
+ anisotropy: Tuple[float, float, float] = (1.0, 1.0, 1.0)
247
254
 
248
255
 
249
256
  class FrameByFrameLinker(byotrack.OnlineLinker):
@@ -1,6 +1,7 @@
1
1
  import dataclasses
2
2
  import enum
3
3
  from typing import List, Optional, Tuple, Union
4
+ import warnings
4
5
 
5
6
  import numpy as np
6
7
  import torch
@@ -86,6 +87,11 @@ class KalmanLinkerParameters(FrameByFrameLinkerParameters):
86
87
  association_method (AssociationMethod): The frame-by-frame association to use. See `AssociationMethod`.
87
88
  It can be provided as a string. (Choice: GREEDY, OPT_HARD, OPT_SMOOTH)
88
89
  Default: OPT_SMOOTH
90
+ anisotropy (Tuple[float, float, float]): Anisotropy of images (Ratio of the pixel sizes
91
+ for each axis, depth first). This will be used to scale distances. It will only impact
92
+ EUCLIDEAN[_SQ] costs. For probabilistic cost, anisotropy should be already integrated
93
+ in the stds of the kalman filter (providing one std for each dimension).
94
+ Default: (1., 1., 1.)
89
95
  cost_method (CostMethod): The cost method to use. It can be provided as a string.
90
96
  See `CostMethod`. It also indicates what is the correct unit of `association_threshold`.
91
97
  Default: EUCLIDEAN
@@ -96,7 +102,7 @@ class KalmanLinkerParameters(FrameByFrameLinkerParameters):
96
102
 
97
103
  """
98
104
 
99
- def __init__(
105
+ def __init__( # pylint: disable=too-many-arguments
100
106
  self,
101
107
  association_threshold: float = 5.0,
102
108
  *,
@@ -106,6 +112,7 @@ class KalmanLinkerParameters(FrameByFrameLinkerParameters):
106
112
  n_valid=3,
107
113
  n_gap=3,
108
114
  association_method: Union[str, AssociationMethod] = AssociationMethod.OPT_SMOOTH,
115
+ anisotropy: Tuple[float, float, float] = (1.0, 1.0, 1.0),
109
116
  cost: Union[str, Cost] = Cost.EUCLIDEAN,
110
117
  track_building: Union[str, TrackBuilding] = TrackBuilding.FILTERED,
111
118
  ):
@@ -113,9 +120,19 @@ class KalmanLinkerParameters(FrameByFrameLinkerParameters):
113
120
  association_threshold=association_threshold,
114
121
  n_valid=n_valid,
115
122
  n_gap=n_gap,
123
+ anisotropy=anisotropy,
116
124
  association_method=association_method,
117
125
  )
118
126
 
127
+ if isinstance(detection_std, float) and min(anisotropy) != max(anisotropy):
128
+ warnings.warn(
129
+ "A single `detection_std` is provided, but images are anisotrope. Consider giving one std by dimension."
130
+ )
131
+ if isinstance(process_std, float) and min(anisotropy) != max(anisotropy):
132
+ warnings.warn(
133
+ "A single `process_std` is provided, but images are anisotrope. Consider giving one std by dimension."
134
+ )
135
+
119
136
  self.detection_std = detection_std
120
137
  self.process_std = process_std
121
138
  self.kalman_order = kalman_order
@@ -282,12 +299,17 @@ class KalmanLinker(FrameByFrameLinker):
282
299
  if self.projections is None:
283
300
  raise RuntimeError("Projections should already be initialized.")
284
301
 
302
+ anisotropy = torch.tensor(self.specs.anisotropy)[: detections.dim]
303
+
285
304
  if self.specs.cost == Cost.EUCLIDEAN:
286
- return torch.cdist(self.projections.mean[..., 0], detections.position), self.specs.association_threshold
305
+ return (
306
+ torch.cdist(self.projections.mean[..., 0] * anisotropy, detections.position * anisotropy),
307
+ self.specs.association_threshold,
308
+ )
287
309
 
288
310
  if self.specs.cost == Cost.EUCLIDEAN_SQ:
289
311
  return (
290
- torch.cdist(self.projections.mean[..., 0], detections.position).pow_(2),
312
+ torch.cdist(self.projections.mean[..., 0] * anisotropy, detections.position * anisotropy).pow_(2),
291
313
  self.specs.association_threshold**2,
292
314
  )
293
315
  if self.specs.cost == Cost.MAHALANOBIS:
@@ -2,6 +2,7 @@
2
2
 
3
3
  import dataclasses
4
4
  from typing import Optional, Tuple, Union
5
+ import warnings
5
6
 
6
7
  import numpy as np
7
8
  import torch
@@ -44,6 +45,11 @@ class KOFTLinkerParameters(KalmanLinkerParameters):
44
45
  association_method (AssociationMethod): The frame-by-frame association to use. See `AssociationMethod`.
45
46
  It can be provided as a string. (Choice: GREEDY, OPT_HARD, OPT_SMOOTH)
46
47
  Default: OPT_SMOOTH
48
+ anisotropy (Tuple[float, float, float]): Anisotropy of images (Ratio of the pixel sizes
49
+ for each axis, depth first). This will be used to scale distances. It will only impact
50
+ EUCLIDEAN[_SQ] costs. For probabilistic cost, anisotropy should be already integrated
51
+ in the different std of the kalman filter.
52
+ Default: (1., 1., 1.)
47
53
  cost_method (CostMethod): The cost method to use. It can be provided as a string.
48
54
  See `CostMethod`. It also indicates what is the correct unit of `association_threshold`.
49
55
  Default: EUCLIDEAN
@@ -71,6 +77,7 @@ class KOFTLinkerParameters(KalmanLinkerParameters):
71
77
  n_valid=3,
72
78
  n_gap=3,
73
79
  association_method: Union[str, AssociationMethod] = AssociationMethod.OPT_SMOOTH,
80
+ anisotropy: Tuple[float, float, float] = (1.0, 1.0, 1.0),
74
81
  cost: Union[str, Cost] = Cost.EUCLIDEAN,
75
82
  track_building: Union[str, TrackBuilding] = TrackBuilding.FILTERED,
76
83
  extract_flows_on_detections=False,
@@ -84,10 +91,16 @@ class KOFTLinkerParameters(KalmanLinkerParameters):
84
91
  n_valid=n_valid,
85
92
  n_gap=n_gap,
86
93
  association_method=association_method,
94
+ anisotropy=anisotropy,
87
95
  cost=cost,
88
96
  track_building=track_building,
89
97
  )
90
98
 
99
+ if isinstance(flow_std, float) and min(anisotropy) != max(anisotropy):
100
+ warnings.warn(
101
+ "A single flow_std is provided, but the images are anisotrope. Consider giving one std by dimension."
102
+ )
103
+
91
104
  self.flow_std = flow_std
92
105
  self.extract_flows_on_detections = extract_flows_on_detections
93
106
  self.always_measure_velocity = always_measure_velocity
@@ -226,15 +239,21 @@ class KOFTLinker(KalmanLinker):
226
239
  if self.projections is None:
227
240
  raise RuntimeError("Projections should already be initialized.")
228
241
 
242
+ anisotropy = torch.tensor(self.specs.anisotropy)[: detections.dim]
243
+
229
244
  if self.specs.cost == Cost.EUCLIDEAN:
230
245
  return (
231
- torch.cdist(self.projections.mean[:, : detections.dim, 0], detections.position),
246
+ torch.cdist(
247
+ self.projections.mean[:, : detections.dim, 0] * anisotropy, detections.position * anisotropy
248
+ ),
232
249
  self.specs.association_threshold,
233
250
  )
234
251
 
235
252
  if self.specs.cost == Cost.EUCLIDEAN_SQ:
236
253
  return (
237
- torch.cdist(self.projections.mean[:, : detections.dim, 0], detections.position).pow_(2),
254
+ torch.cdist(
255
+ self.projections.mean[:, : detections.dim, 0] * anisotropy, detections.position * anisotropy
256
+ ).pow_(2),
238
257
  self.specs.association_threshold**2,
239
258
  )
240
259
 
@@ -45,6 +45,7 @@ class NearestNeighborParameters(FrameByFrameLinkerParameters):
45
45
  n_valid=3,
46
46
  n_gap=3,
47
47
  association_method: Union[str, AssociationMethod] = AssociationMethod.OPT_SMOOTH,
48
+ anisotropy: Tuple[float, float, float] = (1.0, 1.0, 1.0),
48
49
  ema=0.0,
49
50
  fill_gap=False,
50
51
  ):
@@ -53,6 +54,7 @@ class NearestNeighborParameters(FrameByFrameLinkerParameters):
53
54
  n_valid=n_valid,
54
55
  n_gap=n_gap,
55
56
  association_method=association_method,
57
+ anisotropy=anisotropy,
56
58
  )
57
59
  self.ema = ema
58
60
  self.fill_gap = fill_gap
@@ -110,7 +112,12 @@ class NearestNeighborLinker(FrameByFrameLinker):
110
112
  if self.active_positions is None:
111
113
  self.active_positions = torch.empty((0, detections.position.shape[1]))
112
114
 
113
- return torch.cdist(self.active_positions, detections.position), self.specs.association_threshold
115
+ anisotropy = torch.tensor(self.specs.anisotropy)[: -detections.dim]
116
+
117
+ return (
118
+ torch.cdist(self.active_positions * anisotropy, detections.position * anisotropy),
119
+ self.specs.association_threshold,
120
+ )
114
121
 
115
122
  def post_association(self, _: np.ndarray, detections: byotrack.Detections, links: torch.Tensor):
116
123
  if self.active_positions is None:
@@ -125,8 +125,14 @@ class CTCMetrics(CTCSoftwareRunner):
125
125
  and may add points to tracks to fill missing detections. Usually, this leads to better DET/SEG metrics
126
126
  as it reduces false positive and false negative.
127
127
 
128
+ It will store the logs of the last called metric in `self.last_log`
129
+
128
130
  """
129
131
 
132
+ def __init__(self, ctc_software):
133
+ super().__init__(ctc_software)
134
+ self.last_log = ""
135
+
130
136
  def compute_tracking_metric(
131
137
  self,
132
138
  metric: str,
@@ -210,7 +216,10 @@ class CTCMetrics(CTCSoftwareRunner):
210
216
  **kwargs,
211
217
  )
212
218
 
213
- return self.run(metric, output_path, 1)
219
+ results = self.run(metric, output_path, 1)
220
+ self.last_log = (output_path / "01_RES" / f"{metric}_log.txt").read_text("utf-8")
221
+
222
+ return results
214
223
 
215
224
  def compute_detection_metric(
216
225
  self,
@@ -259,7 +268,10 @@ class CTCMetrics(CTCSoftwareRunner):
259
268
  ground_truth_path, ground_truth_detections_sequence, as_res=False, as_seg=metric == "SEG"
260
269
  )
261
270
 
262
- return self.run(metric, output_path, 1)
271
+ results = self.run(metric, output_path, 1)
272
+ self.last_log = (output_path / "01_RES" / f"{metric}_log.txt").read_text("utf-8")
273
+
274
+ return results
263
275
 
264
276
  @staticmethod
265
277
  def copy_ground_truth(
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: byotrack
3
- Version: 1.2.0.dev0
3
+ Version: 1.2.0.dev1
4
4
  Summary: Biological particle tracking with Python
5
5
  Home-page: https://github.com/raphaelreme/byotrack
6
6
  Author: Raphael Reme
@@ -28,7 +28,7 @@ Requires-Dist: platformdirs
28
28
  Requires-Dist: pylapy[scipy]
29
29
  Requires-Dist: tifffile[all]
30
30
  Requires-Dist: torch
31
- Requires-Dist: torch_tps
31
+ Requires-Dist: torch-tps
32
32
  Requires-Dist: tqdm
33
33
  Provides-Extra: full
34
34
  Requires-Dist: matplotlib; extra == "full"
@@ -6,7 +6,7 @@ platformdirs
6
6
  pylapy[scipy]
7
7
  tifffile[all]
8
8
  torch
9
- torch_tps
9
+ torch-tps
10
10
  tqdm
11
11
 
12
12
  [full]
@@ -34,7 +34,7 @@ install_requires =
34
34
  pylapy[scipy]
35
35
  tifffile[all]
36
36
  torch
37
- torch_tps
37
+ torch-tps
38
38
  tqdm
39
39
  include_package_data = False
40
40
 
File without changes
File without changes
File without changes