supervisely 6.73.418__py3-none-any.whl → 6.73.419__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (50) hide show
  1. supervisely/api/entity_annotation/figure_api.py +89 -45
  2. supervisely/nn/inference/inference.py +61 -45
  3. supervisely/nn/inference/instance_segmentation/instance_segmentation.py +1 -0
  4. supervisely/nn/inference/object_detection/object_detection.py +1 -0
  5. supervisely/nn/inference/session.py +4 -4
  6. supervisely/nn/model/model_api.py +31 -20
  7. supervisely/nn/model/prediction.py +11 -0
  8. supervisely/nn/model/prediction_session.py +33 -6
  9. supervisely/nn/tracker/__init__.py +1 -2
  10. supervisely/nn/tracker/base_tracker.py +44 -0
  11. supervisely/nn/tracker/botsort/__init__.py +1 -0
  12. supervisely/nn/tracker/botsort/botsort_config.yaml +31 -0
  13. supervisely/nn/tracker/botsort/osnet_reid/osnet.py +566 -0
  14. supervisely/nn/tracker/botsort/osnet_reid/osnet_reid_interface.py +88 -0
  15. supervisely/nn/tracker/botsort/tracker/__init__.py +0 -0
  16. supervisely/nn/tracker/{bot_sort → botsort/tracker}/basetrack.py +1 -2
  17. supervisely/nn/tracker/{utils → botsort/tracker}/gmc.py +51 -59
  18. supervisely/nn/tracker/{deep_sort/deep_sort → botsort/tracker}/kalman_filter.py +71 -33
  19. supervisely/nn/tracker/botsort/tracker/matching.py +202 -0
  20. supervisely/nn/tracker/{bot_sort/bot_sort.py → botsort/tracker/mc_bot_sort.py} +68 -81
  21. supervisely/nn/tracker/botsort_tracker.py +259 -0
  22. supervisely/project/project.py +1 -1
  23. {supervisely-6.73.418.dist-info → supervisely-6.73.419.dist-info}/METADATA +3 -1
  24. {supervisely-6.73.418.dist-info → supervisely-6.73.419.dist-info}/RECORD +29 -42
  25. supervisely/nn/tracker/bot_sort/__init__.py +0 -21
  26. supervisely/nn/tracker/bot_sort/fast_reid_interface.py +0 -152
  27. supervisely/nn/tracker/bot_sort/matching.py +0 -127
  28. supervisely/nn/tracker/bot_sort/sly_tracker.py +0 -401
  29. supervisely/nn/tracker/deep_sort/__init__.py +0 -6
  30. supervisely/nn/tracker/deep_sort/deep_sort/__init__.py +0 -1
  31. supervisely/nn/tracker/deep_sort/deep_sort/detection.py +0 -49
  32. supervisely/nn/tracker/deep_sort/deep_sort/iou_matching.py +0 -81
  33. supervisely/nn/tracker/deep_sort/deep_sort/linear_assignment.py +0 -202
  34. supervisely/nn/tracker/deep_sort/deep_sort/nn_matching.py +0 -176
  35. supervisely/nn/tracker/deep_sort/deep_sort/track.py +0 -166
  36. supervisely/nn/tracker/deep_sort/deep_sort/tracker.py +0 -145
  37. supervisely/nn/tracker/deep_sort/deep_sort.py +0 -301
  38. supervisely/nn/tracker/deep_sort/generate_clip_detections.py +0 -90
  39. supervisely/nn/tracker/deep_sort/preprocessing.py +0 -70
  40. supervisely/nn/tracker/deep_sort/sly_tracker.py +0 -273
  41. supervisely/nn/tracker/tracker.py +0 -285
  42. supervisely/nn/tracker/utils/kalman_filter.py +0 -492
  43. supervisely/nn/tracking/__init__.py +0 -1
  44. supervisely/nn/tracking/boxmot.py +0 -114
  45. supervisely/nn/tracking/tracking.py +0 -24
  46. /supervisely/nn/tracker/{utils → botsort/osnet_reid}/__init__.py +0 -0
  47. {supervisely-6.73.418.dist-info → supervisely-6.73.419.dist-info}/LICENSE +0 -0
  48. {supervisely-6.73.418.dist-info → supervisely-6.73.419.dist-info}/WHEEL +0 -0
  49. {supervisely-6.73.418.dist-info → supervisely-6.73.419.dist-info}/entry_points.txt +0 -0
  50. {supervisely-6.73.418.dist-info → supervisely-6.73.419.dist-info}/top_level.txt +0 -0
@@ -1,301 +0,0 @@
1
- from __future__ import absolute_import
2
-
3
- import numpy as np
4
-
5
- from ..utils.kalman_filter import KalmanFilterXYAH as KalmanFilter
6
- from .deep_sort import iou_matching, linear_assignment
7
-
8
-
9
- class TrackState:
10
- """
11
- Enumeration type for the single target track state. Newly created tracks are
12
- classified as `tentative` until enough evidence has been collected. Then,
13
- the track state is changed to `confirmed`. Tracks that are no longer alive
14
- are classified as `deleted` to mark them for removal from the set of active
15
- tracks.
16
-
17
- """
18
-
19
- Tentative = 1
20
- Confirmed = 2
21
- Deleted = 3
22
-
23
-
24
- class Track:
25
- """
26
- A single target track with state space `(x, y, a, h)` and associated
27
- velocities, where `(x, y)` is the center of the bounding box, `a` is the
28
- aspect ratio and `h` is the height.
29
-
30
- Parameters
31
- ----------
32
- mean : ndarray
33
- Mean vector of the initial state distribution.
34
- covariance : ndarray
35
- Covariance matrix of the initial state distribution.
36
- track_id : int
37
- A unique track identifier.
38
- n_init : int
39
- Number of consecutive detections before the track is confirmed. The
40
- track state is set to `Deleted` if a miss occurs within the first
41
- `n_init` frames.
42
- max_age : int
43
- The maximum number of consecutive misses before the track state is
44
- set to `Deleted`.
45
- feature : Optional[ndarray]
46
- Feature vector of the detection this track originates from. If not None,
47
- this feature is added to the `features` cache.
48
-
49
- Attributes
50
- ----------
51
- mean : ndarray
52
- Mean vector of the initial state distribution.
53
- covariance : ndarray
54
- Covariance matrix of the initial state distribution.
55
- track_id : int
56
- A unique track identifier.
57
- hits : int
58
- Total number of measurement updates.
59
- age : int
60
- Total number of frames since first occurance.
61
- time_since_update : int
62
- Total number of frames since last measurement update.
63
- state : TrackState
64
- The current track state.
65
- features : List[ndarray]
66
- A cache of features. On each measurement update, the associated feature
67
- vector is added to this list.
68
-
69
- """
70
-
71
- def __init__(self, mean, covariance, track_id, n_init, max_age, feature=None):
72
- self.mean = mean
73
- self.covariance = covariance
74
- self.track_id = track_id
75
- self.hits = 1
76
- self.age = 1
77
- self.time_since_update = 0
78
-
79
- self.state = TrackState.Tentative
80
- self.features = []
81
- if feature is not None:
82
- self.features.append(feature)
83
-
84
- self._n_init = n_init
85
- self._max_age = max_age
86
-
87
- def to_tlwh(self):
88
- """Get current position in bounding box format `(top left x, top left y,
89
- width, height)`.
90
-
91
- Returns
92
- -------
93
- ndarray
94
- The bounding box.
95
-
96
- """
97
- ret = self.mean[:4].copy()
98
- ret[2] *= ret[3]
99
- ret[:2] -= ret[2:] / 2
100
- return ret
101
-
102
- def to_tlbr(self):
103
- """Get current position in bounding box format `(min x, miny, max x,
104
- max y)`.
105
-
106
- Returns
107
- -------
108
- ndarray
109
- The bounding box.
110
-
111
- """
112
- ret = self.to_tlwh()
113
- ret[2:] = ret[:2] + ret[2:]
114
- return ret
115
-
116
- def predict(self, kf):
117
- """Propagate the state distribution to the current time step using a
118
- Kalman filter prediction step.
119
-
120
- Parameters
121
- ----------
122
- kf : kalman_filter.KalmanFilter
123
- The Kalman filter.
124
-
125
- """
126
- self.mean, self.covariance = kf.predict(self.mean, self.covariance)
127
- self.age += 1
128
- self.time_since_update += 1
129
-
130
- def update(self, kf, detection):
131
- """Perform Kalman filter measurement update step and update the feature
132
- cache.
133
-
134
- Parameters
135
- ----------
136
- kf : kalman_filter.KalmanFilter
137
- The Kalman filter.
138
- detection : Detection
139
- The associated detection.
140
-
141
- """
142
- self.mean, self.covariance = kf.update(self.mean, self.covariance, detection.to_xyah())
143
- self.features.append(detection.feature)
144
-
145
- self.hits += 1
146
- self.time_since_update = 0
147
- if self.state == TrackState.Tentative and self.hits >= self._n_init:
148
- self.state = TrackState.Confirmed
149
-
150
- def mark_missed(self):
151
- """Mark this track as missed (no association at the current time step)."""
152
- if self.state == TrackState.Tentative:
153
- self.state = TrackState.Deleted
154
- elif self.time_since_update > self._max_age:
155
- self.state = TrackState.Deleted
156
-
157
- def is_tentative(self):
158
- """Returns True if this track is tentative (unconfirmed)."""
159
- return self.state == TrackState.Tentative
160
-
161
- def is_confirmed(self):
162
- """Returns True if this track is confirmed."""
163
- return self.state == TrackState.Confirmed
164
-
165
- def is_deleted(self):
166
- """Returns True if this track is dead and should be deleted."""
167
- return self.state == TrackState.Deleted
168
-
169
-
170
- class Tracker:
171
- """
172
- This is the multi-target tracker.
173
-
174
- Parameters
175
- ----------
176
- metric : nn_matching.NearestNeighborDistanceMetric
177
- A distance metric for measurement-to-track association.
178
- max_age : int
179
- Maximum number of missed misses before a track is deleted.
180
- n_init : int
181
- Number of consecutive detections before the track is confirmed. The
182
- track state is set to `Deleted` if a miss occurs within the first
183
- `n_init` frames.
184
-
185
- Attributes
186
- ----------
187
- metric : nn_matching.NearestNeighborDistanceMetric
188
- The distance metric used for measurement to track association.
189
- max_age : int
190
- Maximum number of missed misses before a track is deleted.
191
- n_init : int
192
- Number of frames that a track remains in initialization phase.
193
- kf : kalman_filter.KalmanFilter
194
- A Kalman filter to filter target trajectories in image space.
195
- tracks : List[Track]
196
- The list of active tracks at the current time step.
197
-
198
- """
199
-
200
- def __init__(self, metric, max_iou_distance=0.7, max_age=30, n_init=3):
201
- self.metric = metric
202
- self.max_iou_distance = max_iou_distance
203
- self.max_age = max_age
204
- self.n_init = n_init
205
-
206
- self.kf = KalmanFilter()
207
- self.tracks = []
208
- self._next_id = 1
209
-
210
- def predict(self):
211
- """Propagate track state distributions one time step forward.
212
-
213
- This function should be called once every time step, before `update`.
214
- """
215
- for track in self.tracks:
216
- track.predict(self.kf)
217
-
218
- def update(self, detections):
219
- """Perform measurement update and track management.
220
-
221
- Parameters
222
- ----------
223
- detections : List[deep_sort.detection.Detection]
224
- A list of detections at the current time step.
225
-
226
- """
227
- # Run matching cascade.
228
- matches, unmatched_tracks, unmatched_detections = self._match(detections)
229
-
230
- # Update track set.
231
- for track_idx, detection_idx in matches:
232
- self.tracks[track_idx].update(self.kf, detections[detection_idx])
233
- for track_idx in unmatched_tracks:
234
- self.tracks[track_idx].mark_missed()
235
- for detection_idx in unmatched_detections:
236
- self._initiate_track(detections[detection_idx])
237
- self.tracks = [t for t in self.tracks if not t.is_deleted()]
238
-
239
- # Update distance metric.
240
- active_targets = [t.track_id for t in self.tracks if t.is_confirmed()]
241
- features, targets = [], []
242
- for track in self.tracks:
243
- if not track.is_confirmed():
244
- continue
245
- features += track.features
246
- targets += [track.track_id for _ in track.features]
247
- track.features = []
248
- self.metric.partial_fit(np.asarray(features), np.asarray(targets), active_targets)
249
-
250
- def _match(self, detections):
251
-
252
- def gated_metric(tracks, dets, track_indices, detection_indices):
253
- features = np.array([dets[i].feature for i in detection_indices])
254
- targets = np.array([tracks[i].track_id for i in track_indices])
255
- cost_matrix = self.metric.distance(features, targets)
256
- cost_matrix = linear_assignment.gate_cost_matrix(
257
- self.kf, cost_matrix, tracks, dets, track_indices, detection_indices
258
- )
259
-
260
- return cost_matrix
261
-
262
- # Split track set into confirmed and unconfirmed tracks.
263
- confirmed_tracks = [i for i, t in enumerate(self.tracks) if t.is_confirmed()]
264
- unconfirmed_tracks = [i for i, t in enumerate(self.tracks) if not t.is_confirmed()]
265
-
266
- # Associate confirmed tracks using appearance features.
267
- matches_a, unmatched_tracks_a, unmatched_detections = linear_assignment.matching_cascade(
268
- gated_metric,
269
- self.metric.matching_threshold,
270
- self.max_age,
271
- self.tracks,
272
- detections,
273
- confirmed_tracks,
274
- )
275
-
276
- # Associate remaining tracks together with unconfirmed tracks using IOU.
277
- iou_track_candidates = unconfirmed_tracks + [
278
- k for k in unmatched_tracks_a if self.tracks[k].time_since_update == 1
279
- ]
280
- unmatched_tracks_a = [
281
- k for k in unmatched_tracks_a if self.tracks[k].time_since_update != 1
282
- ]
283
- matches_b, unmatched_tracks_b, unmatched_detections = linear_assignment.min_cost_matching(
284
- iou_matching.iou_cost,
285
- self.max_iou_distance,
286
- self.tracks,
287
- detections,
288
- iou_track_candidates,
289
- unmatched_detections,
290
- )
291
-
292
- matches = matches_a + matches_b
293
- unmatched_tracks = list(set(unmatched_tracks_a + unmatched_tracks_b))
294
- return matches, unmatched_tracks, unmatched_detections
295
-
296
- def _initiate_track(self, detection):
297
- mean, covariance = self.kf.initiate(detection.to_xyah())
298
- self.tracks.append(
299
- Track(mean, covariance, self._next_id, self.n_init, self.max_age, detection.feature)
300
- )
301
- self._next_id += 1
@@ -1,90 +0,0 @@
1
- import numpy as np
2
- import torch # pylint: disable=import-error
3
- from PIL import Image
4
-
5
-
6
- def extract_image_patch(image, bbox, patch_shape=None):
7
- """Extract image patch from bounding box.
8
-
9
- Parameters
10
- ----------
11
- image : ndarray
12
- The full image.
13
- bbox : array_like
14
- The bounding box in format (x, y, width, height).
15
- patch_shape : Optional[array_like]
16
- This parameter can be used to enforce a desired patch shape
17
- (height, width). First, the `bbox` is adapted to the aspect ratio
18
- of the patch shape, then it is clipped at the image boundaries.
19
- If None, the shape is computed from :arg:`bbox`.
20
-
21
- Returns
22
- -------
23
- ndarray | NoneType
24
- An image patch showing the :arg:`bbox`, optionally reshaped to
25
- :arg:`patch_shape`.
26
- Returns None if the bounding box is empty or fully outside of the image
27
- boundaries.
28
-
29
- """
30
- bbox = np.array(bbox)
31
- if patch_shape is not None:
32
- # correct aspect ratio to patch shape
33
- target_aspect = float(patch_shape[1]) / patch_shape[0]
34
- new_width = target_aspect * bbox[3]
35
- bbox[0] -= (new_width - bbox[2]) / 2
36
- bbox[2] = new_width
37
-
38
- # convert to top left, bottom right
39
- bbox[2:] += bbox[:2]
40
- bbox = bbox.astype(np.int)
41
-
42
- # clip at image boundaries
43
- bbox[:2] = np.maximum(0, bbox[:2])
44
- bbox[2:] = np.minimum(np.asarray(image.shape[:2][::-1]) - 1, bbox[2:])
45
- if np.any(bbox[:2] >= bbox[2:]):
46
- return None
47
- sx, sy, ex, ey = bbox
48
- image = image[sy:ey, sx:ex]
49
- # image = cv2.resize(image, tuple(patch_shape[::-1]))
50
- return image
51
-
52
-
53
- class ImageEncoder(object):
54
-
55
- def __init__(self, model, transform, device):
56
-
57
- self.model = model
58
- self.transform = transform
59
- self.device = str(device)
60
-
61
- def __call__(self, data_x, batch_size=32):
62
- out = []
63
-
64
- for patch in range(len(data_x)):
65
- if self.device == "cpu":
66
- img = self.transform(Image.fromarray(data_x[patch]))
67
- else:
68
- img = self.transform(Image.fromarray(data_x[patch])).cuda()
69
- out.append(img)
70
-
71
- features = self.model.encode_image(torch.stack(out)).cpu().detach().numpy()
72
- return features
73
-
74
-
75
- def create_box_encoder(model, transform, batch_size=32, device="cpu"):
76
- image_encoder = ImageEncoder(model, transform, device)
77
-
78
- def encoder(image, boxes):
79
- image_patches = []
80
- for box in boxes:
81
- # print("extracting box {} from image {}".format(box, image.shape))
82
- patch = extract_image_patch(image, box)
83
- if patch is None:
84
- print("WARNING: Failed to extract image patch: %s." % str(box))
85
- patch = np.random.uniform(0.0, 255.0, image.shape).astype(np.uint8)
86
- image_patches.append(patch)
87
- # image_patches = np.array(image_patches)
88
- return image_encoder(image_patches, batch_size)
89
-
90
- return encoder
@@ -1,70 +0,0 @@
1
- import numpy as np
2
-
3
-
4
- def non_max_suppression(boxes, classes, max_bbox_overlap, scores=None):
5
- """Suppress overlapping detections.
6
-
7
- Original code from [1]_ has been adapted to include confidence score.
8
-
9
- .. [1] http://www.pyimagesearch.com/2015/02/16/
10
- faster-non-maximum-suppression-python/
11
-
12
- Examples
13
- --------
14
-
15
- >>> boxes = [d.roi for d in detections]
16
- >>> classes = [d.classes for d in detections]
17
- >>> scores = [d.confidence for d in detections]
18
- >>> indices = non_max_suppression(boxes, max_bbox_overlap, scores)
19
- >>> detections = [detections[i] for i in indices]
20
-
21
- Parameters
22
- ----------
23
- boxes : ndarray
24
- Array of ROIs (x, y, width, height).
25
- max_bbox_overlap : float
26
- ROIs that overlap more than this values are suppressed.
27
- scores : Optional[array_like]
28
- Detector confidence score.
29
-
30
- Returns
31
- -------
32
- List[int]
33
- Returns indices of detections that have survived non-maxima suppression.
34
-
35
- """
36
- if len(boxes) == 0:
37
- return []
38
-
39
- boxes = boxes.astype(float)
40
- pick = []
41
-
42
- x1 = boxes[:, 0]
43
- y1 = boxes[:, 1]
44
- x2 = boxes[:, 2] + boxes[:, 0]
45
- y2 = boxes[:, 3] + boxes[:, 1]
46
-
47
- area = (x2 - x1 + 1) * (y2 - y1 + 1)
48
- if scores is not None:
49
- idxs = np.argsort(scores)
50
- else:
51
- idxs = np.argsort(y2)
52
-
53
- while len(idxs) > 0:
54
- last = len(idxs) - 1
55
- i = idxs[last]
56
- pick.append(i)
57
-
58
- xx1 = np.maximum(x1[i], x1[idxs[:last]])
59
- yy1 = np.maximum(y1[i], y1[idxs[:last]])
60
- xx2 = np.minimum(x2[i], x2[idxs[:last]])
61
- yy2 = np.minimum(y2[i], y2[idxs[:last]])
62
-
63
- w = np.maximum(0, xx2 - xx1 + 1)
64
- h = np.maximum(0, yy2 - yy1 + 1)
65
-
66
- overlap = (w * h) / area[idxs[:last]]
67
-
68
- idxs = np.delete(idxs, np.concatenate(([last], np.where(overlap > max_bbox_overlap)[0])))
69
-
70
- return pick