nedo-vision-worker-core 0.2.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of nedo-vision-worker-core might be problematic. Click here for more details.
- nedo_vision_worker_core/__init__.py +23 -0
- nedo_vision_worker_core/ai/FrameDrawer.py +144 -0
- nedo_vision_worker_core/ai/ImageDebugger.py +126 -0
- nedo_vision_worker_core/ai/VideoDebugger.py +69 -0
- nedo_vision_worker_core/ai/__init__.py +1 -0
- nedo_vision_worker_core/cli.py +197 -0
- nedo_vision_worker_core/config/ConfigurationManager.py +173 -0
- nedo_vision_worker_core/config/__init__.py +1 -0
- nedo_vision_worker_core/core_service.py +237 -0
- nedo_vision_worker_core/database/DatabaseManager.py +236 -0
- nedo_vision_worker_core/database/__init__.py +1 -0
- nedo_vision_worker_core/detection/BaseDetector.py +22 -0
- nedo_vision_worker_core/detection/DetectionManager.py +83 -0
- nedo_vision_worker_core/detection/RFDETRDetector.py +62 -0
- nedo_vision_worker_core/detection/YOLODetector.py +57 -0
- nedo_vision_worker_core/detection/__init__.py +1 -0
- nedo_vision_worker_core/detection/detection_processing/DetectionProcessor.py +29 -0
- nedo_vision_worker_core/detection/detection_processing/HumanDetectionProcessor.py +47 -0
- nedo_vision_worker_core/detection/detection_processing/PPEDetectionProcessor.py +44 -0
- nedo_vision_worker_core/detection/detection_processing/__init__.py +1 -0
- nedo_vision_worker_core/doctor.py +342 -0
- nedo_vision_worker_core/drawing_assets/blue/inner_corner.png +0 -0
- nedo_vision_worker_core/drawing_assets/blue/inner_frame.png +0 -0
- nedo_vision_worker_core/drawing_assets/blue/line.png +0 -0
- nedo_vision_worker_core/drawing_assets/blue/top_left.png +0 -0
- nedo_vision_worker_core/drawing_assets/blue/top_right.png +0 -0
- nedo_vision_worker_core/drawing_assets/red/inner_corner.png +0 -0
- nedo_vision_worker_core/drawing_assets/red/inner_frame.png +0 -0
- nedo_vision_worker_core/drawing_assets/red/line.png +0 -0
- nedo_vision_worker_core/drawing_assets/red/top_left.png +0 -0
- nedo_vision_worker_core/drawing_assets/red/top_right.png +0 -0
- nedo_vision_worker_core/icons/boots-green.png +0 -0
- nedo_vision_worker_core/icons/boots-red.png +0 -0
- nedo_vision_worker_core/icons/gloves-green.png +0 -0
- nedo_vision_worker_core/icons/gloves-red.png +0 -0
- nedo_vision_worker_core/icons/goggles-green.png +0 -0
- nedo_vision_worker_core/icons/goggles-red.png +0 -0
- nedo_vision_worker_core/icons/helmet-green.png +0 -0
- nedo_vision_worker_core/icons/helmet-red.png +0 -0
- nedo_vision_worker_core/icons/mask-red.png +0 -0
- nedo_vision_worker_core/icons/vest-green.png +0 -0
- nedo_vision_worker_core/icons/vest-red.png +0 -0
- nedo_vision_worker_core/models/__init__.py +20 -0
- nedo_vision_worker_core/models/ai_model.py +41 -0
- nedo_vision_worker_core/models/auth.py +14 -0
- nedo_vision_worker_core/models/config.py +9 -0
- nedo_vision_worker_core/models/dataset_source.py +30 -0
- nedo_vision_worker_core/models/logs.py +9 -0
- nedo_vision_worker_core/models/ppe_detection.py +39 -0
- nedo_vision_worker_core/models/ppe_detection_label.py +20 -0
- nedo_vision_worker_core/models/restricted_area_violation.py +20 -0
- nedo_vision_worker_core/models/user.py +10 -0
- nedo_vision_worker_core/models/worker_source.py +19 -0
- nedo_vision_worker_core/models/worker_source_pipeline.py +21 -0
- nedo_vision_worker_core/models/worker_source_pipeline_config.py +24 -0
- nedo_vision_worker_core/models/worker_source_pipeline_debug.py +15 -0
- nedo_vision_worker_core/models/worker_source_pipeline_detection.py +14 -0
- nedo_vision_worker_core/pipeline/PipelineConfigManager.py +32 -0
- nedo_vision_worker_core/pipeline/PipelineManager.py +133 -0
- nedo_vision_worker_core/pipeline/PipelinePrepocessor.py +40 -0
- nedo_vision_worker_core/pipeline/PipelineProcessor.py +338 -0
- nedo_vision_worker_core/pipeline/PipelineSyncThread.py +202 -0
- nedo_vision_worker_core/pipeline/__init__.py +1 -0
- nedo_vision_worker_core/preprocessing/ImageResizer.py +42 -0
- nedo_vision_worker_core/preprocessing/ImageRoi.py +61 -0
- nedo_vision_worker_core/preprocessing/Preprocessor.py +16 -0
- nedo_vision_worker_core/preprocessing/__init__.py +1 -0
- nedo_vision_worker_core/repositories/AIModelRepository.py +31 -0
- nedo_vision_worker_core/repositories/PPEDetectionRepository.py +146 -0
- nedo_vision_worker_core/repositories/RestrictedAreaRepository.py +90 -0
- nedo_vision_worker_core/repositories/WorkerSourcePipelineDebugRepository.py +81 -0
- nedo_vision_worker_core/repositories/WorkerSourcePipelineDetectionRepository.py +71 -0
- nedo_vision_worker_core/repositories/WorkerSourcePipelineRepository.py +79 -0
- nedo_vision_worker_core/repositories/WorkerSourceRepository.py +19 -0
- nedo_vision_worker_core/repositories/__init__.py +1 -0
- nedo_vision_worker_core/streams/RTMPStreamer.py +146 -0
- nedo_vision_worker_core/streams/StreamSyncThread.py +66 -0
- nedo_vision_worker_core/streams/VideoStream.py +324 -0
- nedo_vision_worker_core/streams/VideoStreamManager.py +121 -0
- nedo_vision_worker_core/streams/__init__.py +1 -0
- nedo_vision_worker_core/tracker/SFSORT.py +325 -0
- nedo_vision_worker_core/tracker/TrackerManager.py +163 -0
- nedo_vision_worker_core/tracker/__init__.py +1 -0
- nedo_vision_worker_core/util/BoundingBoxMetrics.py +53 -0
- nedo_vision_worker_core/util/DrawingUtils.py +354 -0
- nedo_vision_worker_core/util/ModelReadinessChecker.py +188 -0
- nedo_vision_worker_core/util/PersonAttributeMatcher.py +70 -0
- nedo_vision_worker_core/util/PersonRestrictedAreaMatcher.py +45 -0
- nedo_vision_worker_core/util/TablePrinter.py +28 -0
- nedo_vision_worker_core/util/__init__.py +1 -0
- nedo_vision_worker_core-0.2.0.dist-info/METADATA +347 -0
- nedo_vision_worker_core-0.2.0.dist-info/RECORD +95 -0
- nedo_vision_worker_core-0.2.0.dist-info/WHEEL +5 -0
- nedo_vision_worker_core-0.2.0.dist-info/entry_points.txt +2 -0
- nedo_vision_worker_core-0.2.0.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,325 @@
|
|
|
1
|
+
|
|
2
|
+
import numpy as np
|
|
3
|
+
use_lap=True
|
|
4
|
+
try:
|
|
5
|
+
import lap
|
|
6
|
+
except ImportError:
|
|
7
|
+
from scipy.optimize import linear_sum_assignment
|
|
8
|
+
use_lap=False
|
|
9
|
+
|
|
10
|
+
# ******************************************************************** #
|
|
11
|
+
# ***************************** Classes ****************************** #
|
|
12
|
+
# ******************************************************************** #
|
|
13
|
+
class DotAccess(dict):
|
|
14
|
+
"""Provides dot.notation access to dictionary attributes"""
|
|
15
|
+
__getattr__ = dict.get
|
|
16
|
+
__setattr__ = dict.__setitem__
|
|
17
|
+
__delattr__ = dict.__delitem__
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
class TrackState:
|
|
21
|
+
"""Enumeration of possible states of a track"""
|
|
22
|
+
Active = 0
|
|
23
|
+
Lost_Central = 1
|
|
24
|
+
Lost_Marginal = 2
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
class Track:
|
|
28
|
+
"""Handles basic track attributes and operations"""
|
|
29
|
+
|
|
30
|
+
def __init__(self, bbox, frame_id, track_id):
|
|
31
|
+
"""Track initialization"""
|
|
32
|
+
self.track_id = track_id
|
|
33
|
+
self.bbox = bbox
|
|
34
|
+
self.state = TrackState.Active
|
|
35
|
+
self.last_frame = frame_id
|
|
36
|
+
|
|
37
|
+
def update(self, box, frame_id):
|
|
38
|
+
"""Updates a matched track"""
|
|
39
|
+
self.bbox = box
|
|
40
|
+
self.state = TrackState.Active
|
|
41
|
+
self.last_frame = frame_id
|
|
42
|
+
|
|
43
|
+
class SFSORT:
|
|
44
|
+
"""Multi-Object Tracking System"""
|
|
45
|
+
|
|
46
|
+
def __init__(self, args):
|
|
47
|
+
"""Initialize a tracker with given arguments"""
|
|
48
|
+
args = DotAccess(args)
|
|
49
|
+
|
|
50
|
+
# Register tracking arguments, setting default values if the argument is not provided
|
|
51
|
+
if args.high_th is None:
|
|
52
|
+
self.high_th = 0.6
|
|
53
|
+
else:
|
|
54
|
+
self.high_th = self.clamp(args.high_th, 0, 1)
|
|
55
|
+
|
|
56
|
+
if args.match_th_first is None:
|
|
57
|
+
self.match_th_first = 0.67
|
|
58
|
+
else:
|
|
59
|
+
self.match_th_first = self.clamp(args.match_th_first, 0, 0.67)
|
|
60
|
+
|
|
61
|
+
if args.new_track_th is None:
|
|
62
|
+
self.new_track_th = 0.7
|
|
63
|
+
else:
|
|
64
|
+
self.new_track_th = self.clamp(args.new_track_th, self.high_th, 1)
|
|
65
|
+
|
|
66
|
+
if args.low_th is None:
|
|
67
|
+
self.low_th = 0.1
|
|
68
|
+
else:
|
|
69
|
+
self.low_th = self.clamp(args.low_th, 0, self.high_th)
|
|
70
|
+
|
|
71
|
+
if args.match_th_second is None:
|
|
72
|
+
self.match_th_second = 0.3
|
|
73
|
+
else:
|
|
74
|
+
self.match_th_second = self.clamp(args.match_th_second, 0, 1)
|
|
75
|
+
|
|
76
|
+
self.dynamic_tuning = False
|
|
77
|
+
if args.dynamic_tuning is not None:
|
|
78
|
+
self.cth = 0.5
|
|
79
|
+
self.high_th_m = 0.0
|
|
80
|
+
self.new_track_th_m = 0.0
|
|
81
|
+
self.match_th_first_m = 0.0
|
|
82
|
+
if args.dynamic_tuning:
|
|
83
|
+
self.dynamic_tuning = True
|
|
84
|
+
if args.cth is not None:
|
|
85
|
+
self.cth = self.clamp(args.cth, args.low_th, 1)
|
|
86
|
+
if args.high_th_m is not None:
|
|
87
|
+
self.high_th_m = self.clamp(args.high_th_m, 0.02, 0.1)
|
|
88
|
+
if args.new_track_th_m is not None:
|
|
89
|
+
self.new_track_th_m = self.clamp(args.new_track_th_m, 0.02, 0.08)
|
|
90
|
+
if args.match_th_first_m is not None:
|
|
91
|
+
self.match_th_first_m = self.clamp(args.match_th_first_m, 0.02, 0.08)
|
|
92
|
+
|
|
93
|
+
if args.marginal_timeout is None:
|
|
94
|
+
self.marginal_timeout = 0
|
|
95
|
+
else:
|
|
96
|
+
self.marginal_timeout = self.clamp(args.marginal_timeout, 0, 500)
|
|
97
|
+
|
|
98
|
+
if args.central_timeout is None:
|
|
99
|
+
self.central_timeout = 0
|
|
100
|
+
else:
|
|
101
|
+
self.central_timeout = self.clamp(args.central_timeout, 0, 1000)
|
|
102
|
+
|
|
103
|
+
self.l_margin = 0
|
|
104
|
+
self.r_margin = 0
|
|
105
|
+
if args.frame_width:
|
|
106
|
+
self.r_margin = args.frame_width
|
|
107
|
+
if args.horizontal_margin is not None:
|
|
108
|
+
self.l_margin = self.clamp(args.horizontal_margin, 0, args.frame_width)
|
|
109
|
+
self.r_margin = self.clamp(args.frame_width - args.horizontal_margin, 0, args.frame_width)
|
|
110
|
+
|
|
111
|
+
self.t_margin = 0
|
|
112
|
+
self.b_margin = 0
|
|
113
|
+
if args.frame_height:
|
|
114
|
+
self.b_margin = args.frame_height
|
|
115
|
+
if args.vertical_margin is not None:
|
|
116
|
+
self.t_margin = self.clamp(args.vertical_margin, 0, args.frame_height)
|
|
117
|
+
self.b_margin = self.clamp(args.frame_height - args.vertical_margin , 0, args.frame_height)
|
|
118
|
+
|
|
119
|
+
# Initialize the tracker
|
|
120
|
+
self.frame_no = 0
|
|
121
|
+
self.id_counter = 0
|
|
122
|
+
self.active_tracks = []
|
|
123
|
+
self.lost_tracks = []
|
|
124
|
+
|
|
125
|
+
def update(self, boxes, scores):
|
|
126
|
+
"""Updates tracker with new detections"""
|
|
127
|
+
# Adjust dynamic arguments
|
|
128
|
+
hth = self.high_th
|
|
129
|
+
nth = self.new_track_th
|
|
130
|
+
mth = self.match_th_first
|
|
131
|
+
if self.dynamic_tuning:
|
|
132
|
+
count = len(scores[scores>self.cth])
|
|
133
|
+
if count < 1:
|
|
134
|
+
count = 1
|
|
135
|
+
|
|
136
|
+
lnc = np.log10(count)
|
|
137
|
+
hth = self.clamp(hth - (self.high_th_m * lnc), 0, 1)
|
|
138
|
+
nth = self.clamp(nth + (self.new_track_th_m * lnc), hth, 1)
|
|
139
|
+
mth = self.clamp(mth - (self.match_th_first_m * lnc), 0, 0.67)
|
|
140
|
+
|
|
141
|
+
# Increase frame number
|
|
142
|
+
self.frame_no += 1
|
|
143
|
+
|
|
144
|
+
# Variable: Active tracks in the next frame
|
|
145
|
+
next_active_tracks = []
|
|
146
|
+
|
|
147
|
+
# Remove long-time lost tracks
|
|
148
|
+
all_lost_tracks = self.lost_tracks.copy()
|
|
149
|
+
for track in all_lost_tracks:
|
|
150
|
+
if track.state == TrackState.Lost_Central:
|
|
151
|
+
if self.frame_no - track.last_frame > self.central_timeout:
|
|
152
|
+
self.lost_tracks.remove(track)
|
|
153
|
+
else:
|
|
154
|
+
if self.frame_no - track.last_frame > self.marginal_timeout:
|
|
155
|
+
self.lost_tracks.remove(track)
|
|
156
|
+
|
|
157
|
+
# Gather out all previous tracks
|
|
158
|
+
track_pool = self.active_tracks + self.lost_tracks
|
|
159
|
+
|
|
160
|
+
# Try to associate tracks with high score detections
|
|
161
|
+
unmatched_tracks = np.array([])
|
|
162
|
+
high_score = scores > hth
|
|
163
|
+
if high_score.any():
|
|
164
|
+
definite_boxes = boxes[high_score]
|
|
165
|
+
definite_scores = scores[high_score]
|
|
166
|
+
if track_pool:
|
|
167
|
+
cost = self.calculate_cost(track_pool, definite_boxes)
|
|
168
|
+
matches, unmatched_tracks, unmatched_detections = self.linear_assignment(cost, mth)
|
|
169
|
+
# Update/Activate matched tracks
|
|
170
|
+
for track_idx, detection_idx in matches:
|
|
171
|
+
box = definite_boxes[detection_idx]
|
|
172
|
+
track = track_pool[track_idx]
|
|
173
|
+
track.update(box, self.frame_no)
|
|
174
|
+
next_active_tracks.append(track)
|
|
175
|
+
# Remove re-identified tracks from lost list
|
|
176
|
+
if track in self.lost_tracks:
|
|
177
|
+
self.lost_tracks.remove(track)
|
|
178
|
+
# Identify eligible unmatched detections as new tracks
|
|
179
|
+
for detection_idx in unmatched_detections:
|
|
180
|
+
if definite_scores[detection_idx] > nth:
|
|
181
|
+
box = definite_boxes[detection_idx]
|
|
182
|
+
track = Track(box, self.frame_no, self.id_counter)
|
|
183
|
+
next_active_tracks.append(track)
|
|
184
|
+
self.id_counter += 1
|
|
185
|
+
else:
|
|
186
|
+
# Associate tracks of the first frame after object-free/null frames
|
|
187
|
+
for detection_idx, score in enumerate(definite_scores):
|
|
188
|
+
if score > nth:
|
|
189
|
+
box = definite_boxes[detection_idx]
|
|
190
|
+
track = Track(box, self.frame_no, self.id_counter)
|
|
191
|
+
next_active_tracks.append(track)
|
|
192
|
+
self.id_counter += 1
|
|
193
|
+
|
|
194
|
+
# Add unmatched tracks to the lost list
|
|
195
|
+
unmatched_track_pool = []
|
|
196
|
+
for track_address in unmatched_tracks:
|
|
197
|
+
unmatched_track_pool.append(track_pool[track_address])
|
|
198
|
+
next_lost_tracks = unmatched_track_pool.copy()
|
|
199
|
+
|
|
200
|
+
# Try to associate remained tracks with intermediate score detections
|
|
201
|
+
intermediate_score = np.logical_and((self.low_th < scores), (scores < hth))
|
|
202
|
+
if intermediate_score.any():
|
|
203
|
+
if len(unmatched_tracks):
|
|
204
|
+
possible_boxes = boxes[intermediate_score]
|
|
205
|
+
cost = self.calculate_cost(unmatched_track_pool, possible_boxes, iou_only=True)
|
|
206
|
+
matches, unmatched_tracks, unmatched_detections = self.linear_assignment(cost, self.match_th_second)
|
|
207
|
+
# Update/Activate matched tracks
|
|
208
|
+
for track_idx, detection_idx in matches:
|
|
209
|
+
box = possible_boxes[detection_idx]
|
|
210
|
+
track = unmatched_track_pool[track_idx]
|
|
211
|
+
track.update(box, self.frame_no)
|
|
212
|
+
next_active_tracks.append(track)
|
|
213
|
+
# Remove re-identified tracks from lost list
|
|
214
|
+
if track in self.lost_tracks:
|
|
215
|
+
self.lost_tracks.remove(track)
|
|
216
|
+
next_lost_tracks.remove(track)
|
|
217
|
+
|
|
218
|
+
# All tracks are lost if there are no detections!
|
|
219
|
+
if not (high_score.any() or intermediate_score.any()):
|
|
220
|
+
next_lost_tracks = track_pool.copy()
|
|
221
|
+
|
|
222
|
+
# Update the list of lost tracks
|
|
223
|
+
for track in next_lost_tracks:
|
|
224
|
+
if track not in self.lost_tracks:
|
|
225
|
+
self.lost_tracks.append(track)
|
|
226
|
+
u = track.bbox[0] + (track.bbox[2] - track.bbox[0])/2
|
|
227
|
+
v = track.bbox[1] + (track.bbox[3] - track.bbox[1])/2
|
|
228
|
+
if (self.l_margin < u < self.r_margin) and (self.t_margin < v < self.b_margin):
|
|
229
|
+
track.state = TrackState.Lost_Central
|
|
230
|
+
else:
|
|
231
|
+
track.state = TrackState.Lost_Marginal
|
|
232
|
+
|
|
233
|
+
# Update the list of active tracks
|
|
234
|
+
self.active_tracks = next_active_tracks.copy()
|
|
235
|
+
|
|
236
|
+
return np.asarray([[x.bbox, x.track_id] for x in next_active_tracks], dtype=object)
|
|
237
|
+
|
|
238
|
+
@staticmethod
|
|
239
|
+
def clamp(value, min_value, max_value):
|
|
240
|
+
""" Clamps a value within the specified minimum and maximum bounds."""
|
|
241
|
+
return max(min_value, min(value, max_value))
|
|
242
|
+
|
|
243
|
+
@staticmethod
|
|
244
|
+
def calculate_cost(tracks, boxes, iou_only=False):
|
|
245
|
+
"""Calculates the association cost based on IoU and box similarity"""
|
|
246
|
+
eps = 1e-7
|
|
247
|
+
active_boxes = [track.bbox for track in tracks]
|
|
248
|
+
|
|
249
|
+
# Get the coordinates of bounding boxes
|
|
250
|
+
b1_x1, b1_y1, b1_x2, b1_y2 = np.array(active_boxes).T
|
|
251
|
+
b2_x1, b2_y1, b2_x2, b2_y2 = np.array(boxes).T
|
|
252
|
+
|
|
253
|
+
h_intersection = (np.minimum(b1_x2[:, None], b2_x2) - np.maximum(b1_x1[:, None], b2_x1)).clip(0)
|
|
254
|
+
w_intersection = (np.minimum(b1_y2[:, None], b2_y2) - np.maximum(b1_y1[:, None], b2_y1)).clip(0)
|
|
255
|
+
|
|
256
|
+
# Calculate the intersection area
|
|
257
|
+
intersection = h_intersection * w_intersection
|
|
258
|
+
|
|
259
|
+
# Calculate the union area
|
|
260
|
+
box1_height = b1_x2 - b1_x1
|
|
261
|
+
box2_height = b2_x2 - b2_x1
|
|
262
|
+
box1_width = b1_y2 - b1_y1
|
|
263
|
+
box2_width = b2_y2 - b2_y1
|
|
264
|
+
|
|
265
|
+
box1_area = box1_height * box1_width
|
|
266
|
+
box2_area = box2_height * box2_width
|
|
267
|
+
|
|
268
|
+
union = (box2_area + box1_area[:, None] - intersection + eps)
|
|
269
|
+
|
|
270
|
+
# Calculate the IoU
|
|
271
|
+
iou = intersection / union
|
|
272
|
+
|
|
273
|
+
if iou_only:
|
|
274
|
+
return 1.0 - iou
|
|
275
|
+
|
|
276
|
+
# Calculate the DIoU
|
|
277
|
+
centerx1 = (b1_x1 + b1_x2) / 2.0
|
|
278
|
+
centery1 = (b1_y1 + b1_y2) / 2.0
|
|
279
|
+
centerx2 = (b2_x1 + b2_x2) / 2.0
|
|
280
|
+
centery2 = (b2_y1 + b2_y2) / 2.0
|
|
281
|
+
inner_diag = np.abs(centerx1[:, None] - centerx2) + np.abs(centery1[:, None] - centery2)
|
|
282
|
+
|
|
283
|
+
xxc1 = np.minimum(b1_x1[:, None], b2_x1)
|
|
284
|
+
yyc1 = np.minimum(b1_y1[:, None], b2_y1)
|
|
285
|
+
xxc2 = np.maximum(b1_x2[:, None], b2_x2)
|
|
286
|
+
yyc2 = np.maximum(b1_y2[:, None], b2_y2)
|
|
287
|
+
outer_diag = np.abs(xxc2 - xxc1) + np.abs(yyc2 - yyc1)
|
|
288
|
+
|
|
289
|
+
diou = iou - (inner_diag / outer_diag)
|
|
290
|
+
|
|
291
|
+
# Calculate the BBSI
|
|
292
|
+
delta_w = np.abs(box2_width - box1_width[:, None])
|
|
293
|
+
sw = w_intersection / np.abs(w_intersection + delta_w + eps)
|
|
294
|
+
|
|
295
|
+
delta_h = np.abs(box2_height - box1_height[:, None])
|
|
296
|
+
sh = h_intersection / np.abs(h_intersection + delta_h + eps)
|
|
297
|
+
|
|
298
|
+
bbsi = diou + sh + sw
|
|
299
|
+
|
|
300
|
+
# Normalize the BBSI
|
|
301
|
+
cost = (bbsi)/3.0
|
|
302
|
+
|
|
303
|
+
return 1.0 - cost
|
|
304
|
+
|
|
305
|
+
@staticmethod
|
|
306
|
+
def linear_assignment(cost_matrix, thresh):
|
|
307
|
+
"""Linear assignment"""
|
|
308
|
+
if cost_matrix.size == 0:
|
|
309
|
+
return np.empty((0, 2), dtype=int), tuple(range(cost_matrix.shape[0])), tuple(range(cost_matrix.shape[1]))
|
|
310
|
+
|
|
311
|
+
if use_lap:
|
|
312
|
+
_, x, y = lap.lapjv(cost_matrix, extend_cost=True, cost_limit=thresh)
|
|
313
|
+
matches = [[ix, mx] for ix, mx in enumerate(x) if mx >= 0]
|
|
314
|
+
unmatched_a = np.where(x < 0)[0]
|
|
315
|
+
unmatched_b = np.where(y < 0)[0]
|
|
316
|
+
else:
|
|
317
|
+
row_ind, col_ind = linear_sum_assignment(cost_matrix)
|
|
318
|
+
matches = np.array([[row, col] for row, col in zip(row_ind, col_ind) if cost_matrix[row, col] <= thresh])
|
|
319
|
+
matched_rows = set(row_ind)
|
|
320
|
+
matched_cols = set(col_ind)
|
|
321
|
+
unmatched_a = np.array([i for i in range(cost_matrix.shape[0]) if i not in matched_rows])
|
|
322
|
+
unmatched_b = np.array([j for j in range(cost_matrix.shape[1]) if j not in matched_cols])
|
|
323
|
+
|
|
324
|
+
return matches, unmatched_a, unmatched_b
|
|
325
|
+
|
|
@@ -0,0 +1,163 @@
|
|
|
1
|
+
import uuid
|
|
2
|
+
import time
|
|
3
|
+
import numpy as np
|
|
4
|
+
from .SFSORT import SFSORT
|
|
5
|
+
|
|
6
|
+
class TrackerManager:
|
|
7
|
+
def __init__(self, attribute_labels=None, exclusive_attribute_groups=None):
|
|
8
|
+
self.tracker = SFSORT({
|
|
9
|
+
"dynamic_tuning": True,
|
|
10
|
+
"cth": 0.5,
|
|
11
|
+
"high_th": 0.6,
|
|
12
|
+
"match_th_first": 0.67,
|
|
13
|
+
"match_th_second": 0.2,
|
|
14
|
+
"low_th": 0.1,
|
|
15
|
+
"new_track_th": 0.7,
|
|
16
|
+
"marginal_timeout": 7,
|
|
17
|
+
"central_timeout": 30
|
|
18
|
+
})
|
|
19
|
+
self.track_uuid_map = {}
|
|
20
|
+
self.track_count_map = {}
|
|
21
|
+
self.track_attributes_presence = {}
|
|
22
|
+
self.track_last_seen = {}
|
|
23
|
+
self.track_timeout_seconds = 5
|
|
24
|
+
self.attribute_labels = attribute_labels or []
|
|
25
|
+
self.exclusive_attribute_groups = exclusive_attribute_groups or []
|
|
26
|
+
|
|
27
|
+
def track_objects(self, detections):
|
|
28
|
+
if not detections:
|
|
29
|
+
self._cleanup_stale_tracks()
|
|
30
|
+
return []
|
|
31
|
+
|
|
32
|
+
bboxes = np.array([d["bbox"] for d in detections], dtype=np.float32)
|
|
33
|
+
confidences = np.array([d["confidence"] for d in detections], dtype=np.float32)
|
|
34
|
+
tracks = self.tracker.update(bboxes, confidences)
|
|
35
|
+
|
|
36
|
+
results = self._generate_tracking_results(detections, tracks)
|
|
37
|
+
self._cleanup_stale_tracks()
|
|
38
|
+
return results
|
|
39
|
+
|
|
40
|
+
def _generate_tracking_results(self, detections, tracks):
|
|
41
|
+
tracked_results = []
|
|
42
|
+
detection_map = {tuple(d["bbox"]): d for d in detections}
|
|
43
|
+
|
|
44
|
+
for track in tracks:
|
|
45
|
+
track_id = int(track[1])
|
|
46
|
+
bbox = track[0].tolist()
|
|
47
|
+
data = detection_map.get(tuple(bbox))
|
|
48
|
+
|
|
49
|
+
if not data:
|
|
50
|
+
continue
|
|
51
|
+
|
|
52
|
+
obj_uuid = self._assign_uuid(track_id)
|
|
53
|
+
|
|
54
|
+
self.track_count_map[obj_uuid] += 1
|
|
55
|
+
self.track_last_seen[obj_uuid] = time.time() # Time-based last seen
|
|
56
|
+
|
|
57
|
+
attributes = data.get("attributes", [])
|
|
58
|
+
filtered_attributes = self._filter_exclusive_attributes(attributes)
|
|
59
|
+
self._update_attribute_presence(obj_uuid, filtered_attributes)
|
|
60
|
+
|
|
61
|
+
for attr in filtered_attributes:
|
|
62
|
+
label = attr["label"]
|
|
63
|
+
if label in self.track_attributes_presence[obj_uuid]:
|
|
64
|
+
attr["count"] = self.track_attributes_presence[obj_uuid][label]
|
|
65
|
+
|
|
66
|
+
tracked_results.append({
|
|
67
|
+
"uuid": obj_uuid,
|
|
68
|
+
"track_id": track_id,
|
|
69
|
+
"detections": self.track_count_map[obj_uuid],
|
|
70
|
+
"label": data["label"],
|
|
71
|
+
"confidence": data["confidence"],
|
|
72
|
+
"bbox": bbox,
|
|
73
|
+
"attributes": filtered_attributes
|
|
74
|
+
})
|
|
75
|
+
|
|
76
|
+
return tracked_results
|
|
77
|
+
|
|
78
|
+
def _assign_uuid(self, track_id):
|
|
79
|
+
if track_id not in self.track_uuid_map:
|
|
80
|
+
new_uuid = str(uuid.uuid4())
|
|
81
|
+
self.track_uuid_map[track_id] = new_uuid
|
|
82
|
+
self.track_count_map[new_uuid] = 0
|
|
83
|
+
self.track_attributes_presence[new_uuid] = {attr: 0 for attr in self.attribute_labels}
|
|
84
|
+
return self.track_uuid_map[track_id]
|
|
85
|
+
|
|
86
|
+
def _filter_exclusive_attributes(self, attributes):
|
|
87
|
+
if not attributes:
|
|
88
|
+
return []
|
|
89
|
+
|
|
90
|
+
# Group attributes by label to handle multiple instances
|
|
91
|
+
attrs_by_label = {}
|
|
92
|
+
for attr in attributes:
|
|
93
|
+
label = attr["label"]
|
|
94
|
+
if label not in attrs_by_label:
|
|
95
|
+
attrs_by_label[label] = []
|
|
96
|
+
attrs_by_label[label].append(attr)
|
|
97
|
+
|
|
98
|
+
# Multi-instance classes that can have multiple instances per person
|
|
99
|
+
MULTI_INSTANCE_CLASSES = ["boots", "gloves", "goggles", "no_gloves"]
|
|
100
|
+
# Negative classes that are exclusive with their positive counterparts
|
|
101
|
+
NEGATIVE_CLASSES = ["no_helmet", "no_vest", "no_goggles", "no_boots"]
|
|
102
|
+
|
|
103
|
+
# For exclusive groups, keep only the highest confidence per group
|
|
104
|
+
filtered_attrs = []
|
|
105
|
+
for group in self.exclusive_attribute_groups:
|
|
106
|
+
group_attrs = []
|
|
107
|
+
for label in group:
|
|
108
|
+
if label in attrs_by_label:
|
|
109
|
+
group_attrs.extend(attrs_by_label[label])
|
|
110
|
+
if group_attrs:
|
|
111
|
+
# Keep only the highest confidence in this exclusive group
|
|
112
|
+
best = max(group_attrs, key=lambda a: a["confidence"])
|
|
113
|
+
filtered_attrs.append(best)
|
|
114
|
+
|
|
115
|
+
# For multi-instance classes, add all instances (but respect exclusive logic above)
|
|
116
|
+
exclusive_labels = set(l for group in self.exclusive_attribute_groups for l in group)
|
|
117
|
+
for label, attrs in attrs_by_label.items():
|
|
118
|
+
# Skip if already handled by exclusive logic
|
|
119
|
+
if label in exclusive_labels:
|
|
120
|
+
continue
|
|
121
|
+
# Add all instances of multi-instance classes
|
|
122
|
+
if label in MULTI_INSTANCE_CLASSES:
|
|
123
|
+
filtered_attrs.extend(attrs)
|
|
124
|
+
|
|
125
|
+
# Special case: for multi-instance classes, we want to allow multiple instances
|
|
126
|
+
# even if they were in exclusive groups, but we need to handle the negative classes properly
|
|
127
|
+
for label, attrs in attrs_by_label.items():
|
|
128
|
+
# If this is a multi-instance class and not a negative class, add all instances
|
|
129
|
+
if label in MULTI_INSTANCE_CLASSES and label not in NEGATIVE_CLASSES:
|
|
130
|
+
# Check which attributes were already added
|
|
131
|
+
for attr in attrs:
|
|
132
|
+
already_added = any(
|
|
133
|
+
a["label"] == label and list(a["bbox"]) == list(attr["bbox"])
|
|
134
|
+
for a in filtered_attrs
|
|
135
|
+
)
|
|
136
|
+
if not already_added:
|
|
137
|
+
filtered_attrs.append(attr)
|
|
138
|
+
|
|
139
|
+
return filtered_attrs
|
|
140
|
+
|
|
141
|
+
def _update_attribute_presence(self, uuid, attributes):
|
|
142
|
+
current_frame_attrs = set(attr["label"] for attr in attributes)
|
|
143
|
+
for label in self.attribute_labels:
|
|
144
|
+
if label in current_frame_attrs:
|
|
145
|
+
self.track_attributes_presence[uuid][label] += 1
|
|
146
|
+
else:
|
|
147
|
+
self.track_attributes_presence[uuid][label] = 0
|
|
148
|
+
|
|
149
|
+
def _cleanup_stale_tracks(self):
|
|
150
|
+
now = time.time()
|
|
151
|
+
expired = [
|
|
152
|
+
uuid for uuid, last_seen in self.track_last_seen.items()
|
|
153
|
+
if now - last_seen > self.track_timeout_seconds
|
|
154
|
+
]
|
|
155
|
+
|
|
156
|
+
for obj_uuid in expired:
|
|
157
|
+
track_ids_to_remove = [tid for tid, uid in self.track_uuid_map.items() if uid == obj_uuid]
|
|
158
|
+
for tid in track_ids_to_remove:
|
|
159
|
+
self.track_uuid_map.pop(tid, None)
|
|
160
|
+
|
|
161
|
+
self.track_count_map.pop(obj_uuid, None)
|
|
162
|
+
self.track_attributes_presence.pop(obj_uuid, None)
|
|
163
|
+
self.track_last_seen.pop(obj_uuid, None)
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
|
|
@@ -0,0 +1,53 @@
|
|
|
1
|
+
class BoundingBoxMetrics:
|
|
2
|
+
"""Computes IoU and coverage for bounding boxes."""
|
|
3
|
+
|
|
4
|
+
@staticmethod
|
|
5
|
+
def compute_iou(box1, box2):
|
|
6
|
+
"""
|
|
7
|
+
Computes Intersection over Union (IoU) between two bounding boxes.
|
|
8
|
+
Args:
|
|
9
|
+
box1, box2: [x1, y1, x2, y2] (coordinates of two bounding boxes)
|
|
10
|
+
Returns:
|
|
11
|
+
IoU score (float between 0 and 1)
|
|
12
|
+
"""
|
|
13
|
+
x1, y1, x2, y2 = box1
|
|
14
|
+
x1_p, y1_p, x2_p, y2_p = box2
|
|
15
|
+
|
|
16
|
+
# Compute intersection
|
|
17
|
+
inter_x1 = max(x1, x1_p)
|
|
18
|
+
inter_y1 = max(y1, y1_p)
|
|
19
|
+
inter_x2 = min(x2, x2_p)
|
|
20
|
+
inter_y2 = min(y2, y2_p)
|
|
21
|
+
|
|
22
|
+
inter_area = max(0, inter_x2 - inter_x1) * max(0, inter_y2 - inter_y1)
|
|
23
|
+
|
|
24
|
+
# Compute union
|
|
25
|
+
box1_area = (x2 - x1) * (y2 - y1)
|
|
26
|
+
box2_area = (x2_p - x1_p) * (y2_p - y1_p)
|
|
27
|
+
union_area = box1_area + box2_area - inter_area
|
|
28
|
+
|
|
29
|
+
return inter_area / union_area if union_area > 0 else 0
|
|
30
|
+
|
|
31
|
+
@staticmethod
|
|
32
|
+
def compute_coverage(box1, box2):
|
|
33
|
+
"""
|
|
34
|
+
Computes the coverage percentage of box2 inside box1.
|
|
35
|
+
Coverage is defined as the intersection area over box2's area.
|
|
36
|
+
Args:
|
|
37
|
+
box1, box2: [x1, y1, x2, y2] (coordinates of two bounding boxes)
|
|
38
|
+
Returns:
|
|
39
|
+
Coverage ratio (float between 0 and 1)
|
|
40
|
+
"""
|
|
41
|
+
x1, y1, x2, y2 = box1
|
|
42
|
+
x1_o, y1_o, x2_o, y2_o = box2
|
|
43
|
+
|
|
44
|
+
# Compute intersection
|
|
45
|
+
inter_x1 = max(x1, x1_o)
|
|
46
|
+
inter_y1 = max(y1, y1_o)
|
|
47
|
+
inter_x2 = min(x2, x2_o)
|
|
48
|
+
inter_y2 = min(y2, y2_o)
|
|
49
|
+
|
|
50
|
+
inter_area = max(0, inter_x2 - inter_x1) * max(0, inter_y2 - inter_y1)
|
|
51
|
+
box2_area = (x2_o - x1_o) * (y2_o - y1_o)
|
|
52
|
+
|
|
53
|
+
return inter_area / box2_area if box2_area > 0 else 0
|