dnt 0.2.0__tar.gz → 0.2.1__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of dnt might be problematic. Click here for more details.
- {dnt-0.2.0/dnt.egg-info → dnt-0.2.1}/PKG-INFO +1 -1
- dnt-0.2.1/dnt/analysis/__init__.py +6 -0
- dnt-0.2.1/dnt/analysis/speed.py +15 -0
- dnt-0.2.1/dnt/analysis/stop.py +241 -0
- {dnt-0.2.0 → dnt-0.2.1}/dnt/label/labeler.py +96 -19
- {dnt-0.2.0 → dnt-0.2.1/dnt.egg-info}/PKG-INFO +1 -1
- {dnt-0.2.0 → dnt-0.2.1}/dnt.egg-info/SOURCES.txt +3 -0
- {dnt-0.2.0 → dnt-0.2.1}/setup.py +1 -1
- {dnt-0.2.0 → dnt-0.2.1}/LICENSE +0 -0
- {dnt-0.2.0 → dnt-0.2.1}/MANIFEST.in +0 -0
- {dnt-0.2.0 → dnt-0.2.1}/README.md +0 -0
- {dnt-0.2.0 → dnt-0.2.1}/dnt/__init__.py +0 -0
- {dnt-0.2.0 → dnt-0.2.1}/dnt/detect/__init__.py +0 -0
- {dnt-0.2.0 → dnt-0.2.1}/dnt/detect/yolov8/__init__.py +0 -0
- {dnt-0.2.0 → dnt-0.2.1}/dnt/detect/yolov8/detector.py +0 -0
- {dnt-0.2.0 → dnt-0.2.1}/dnt/filter/__init__.py +0 -0
- {dnt-0.2.0 → dnt-0.2.1}/dnt/filter/filter.py +0 -0
- {dnt-0.2.0 → dnt-0.2.1}/dnt/label/__init__.py +0 -0
- {dnt-0.2.0 → dnt-0.2.1}/dnt/shared/__init__.py +0 -0
- {dnt-0.2.0 → dnt-0.2.1}/dnt/shared/convert.py +0 -0
- {dnt-0.2.0 → dnt-0.2.1}/dnt/shared/data/coco.names +0 -0
- {dnt-0.2.0 → dnt-0.2.1}/dnt/shared/data/openimages.names +0 -0
- {dnt-0.2.0 → dnt-0.2.1}/dnt/shared/data/voc.names +0 -0
- {dnt-0.2.0 → dnt-0.2.1}/dnt/shared/files.py +0 -0
- {dnt-0.2.0 → dnt-0.2.1}/dnt/shared/filter.py +0 -0
- {dnt-0.2.0 → dnt-0.2.1}/dnt/shared/util.py +0 -0
- {dnt-0.2.0 → dnt-0.2.1}/dnt/track/__init__.py +0 -0
- {dnt-0.2.0 → dnt-0.2.1}/dnt/track/dsort/__init__.py +0 -0
- {dnt-0.2.0 → dnt-0.2.1}/dnt/track/dsort/configs/deep_sort.yaml +0 -0
- {dnt-0.2.0 → dnt-0.2.1}/dnt/track/dsort/configs/fastreid.yaml +0 -0
- {dnt-0.2.0 → dnt-0.2.1}/dnt/track/dsort/deep_sort/__init__.py +0 -0
- {dnt-0.2.0 → dnt-0.2.1}/dnt/track/dsort/deep_sort/deep/__init__.py +0 -0
- {dnt-0.2.0 → dnt-0.2.1}/dnt/track/dsort/deep_sort/deep/checkpoint/ckpt.t7 +0 -0
- {dnt-0.2.0 → dnt-0.2.1}/dnt/track/dsort/deep_sort/deep/evaluate.py +0 -0
- {dnt-0.2.0 → dnt-0.2.1}/dnt/track/dsort/deep_sort/deep/feature_extractor.py +0 -0
- {dnt-0.2.0 → dnt-0.2.1}/dnt/track/dsort/deep_sort/deep/model.py +0 -0
- {dnt-0.2.0 → dnt-0.2.1}/dnt/track/dsort/deep_sort/deep/original_model.py +0 -0
- {dnt-0.2.0 → dnt-0.2.1}/dnt/track/dsort/deep_sort/deep/test.py +0 -0
- {dnt-0.2.0 → dnt-0.2.1}/dnt/track/dsort/deep_sort/deep/train.py +0 -0
- {dnt-0.2.0 → dnt-0.2.1}/dnt/track/dsort/deep_sort/deep_sort.py +0 -0
- {dnt-0.2.0 → dnt-0.2.1}/dnt/track/dsort/deep_sort/sort/__init__.py +0 -0
- {dnt-0.2.0 → dnt-0.2.1}/dnt/track/dsort/deep_sort/sort/detection.py +0 -0
- {dnt-0.2.0 → dnt-0.2.1}/dnt/track/dsort/deep_sort/sort/iou_matching.py +0 -0
- {dnt-0.2.0 → dnt-0.2.1}/dnt/track/dsort/deep_sort/sort/kalman_filter.py +0 -0
- {dnt-0.2.0 → dnt-0.2.1}/dnt/track/dsort/deep_sort/sort/linear_assignment.py +0 -0
- {dnt-0.2.0 → dnt-0.2.1}/dnt/track/dsort/deep_sort/sort/nn_matching.py +0 -0
- {dnt-0.2.0 → dnt-0.2.1}/dnt/track/dsort/deep_sort/sort/preprocessing.py +0 -0
- {dnt-0.2.0 → dnt-0.2.1}/dnt/track/dsort/deep_sort/sort/track.py +0 -0
- {dnt-0.2.0 → dnt-0.2.1}/dnt/track/dsort/deep_sort/sort/tracker.py +0 -0
- {dnt-0.2.0 → dnt-0.2.1}/dnt/track/dsort/dsort.py +0 -0
- {dnt-0.2.0 → dnt-0.2.1}/dnt/track/dsort/utils/__init__.py +0 -0
- {dnt-0.2.0 → dnt-0.2.1}/dnt/track/dsort/utils/asserts.py +0 -0
- {dnt-0.2.0 → dnt-0.2.1}/dnt/track/dsort/utils/draw.py +0 -0
- {dnt-0.2.0 → dnt-0.2.1}/dnt/track/dsort/utils/evaluation.py +0 -0
- {dnt-0.2.0 → dnt-0.2.1}/dnt/track/dsort/utils/io.py +0 -0
- {dnt-0.2.0 → dnt-0.2.1}/dnt/track/dsort/utils/json_logger.py +0 -0
- {dnt-0.2.0 → dnt-0.2.1}/dnt/track/dsort/utils/log.py +0 -0
- {dnt-0.2.0 → dnt-0.2.1}/dnt/track/dsort/utils/parser.py +0 -0
- {dnt-0.2.0 → dnt-0.2.1}/dnt/track/dsort/utils/tools.py +0 -0
- {dnt-0.2.0 → dnt-0.2.1}/dnt/track/sort/__init__.py +0 -0
- {dnt-0.2.0 → dnt-0.2.1}/dnt/track/sort/sort.py +0 -0
- {dnt-0.2.0 → dnt-0.2.1}/dnt/track/tracker.py +0 -0
- {dnt-0.2.0 → dnt-0.2.1}/dnt.egg-info/dependency_links.txt +0 -0
- {dnt-0.2.0 → dnt-0.2.1}/dnt.egg-info/requires.txt +0 -0
- {dnt-0.2.0 → dnt-0.2.1}/dnt.egg-info/top_level.txt +0 -0
- {dnt-0.2.0 → dnt-0.2.1}/setup.cfg +0 -0
|
@@ -0,0 +1,15 @@
|
|
|
1
|
+
import pandas as pd
|
|
2
|
+
|
|
3
|
+
def frame_reflines(tracks, ref_lines, tolerance):
|
|
4
|
+
'''
|
|
5
|
+
tracks - a dataframe contains vehicle tracks without header
|
|
6
|
+
frame, veh_id, leftup_x, leftup_y, w, h
|
|
7
|
+
ref_lineas - a list of ref_lines
|
|
8
|
+
[(pt1_x, pt1_y, pt2_x, pt2_y), (....), ...]
|
|
9
|
+
tolerance - allowed error to intersect (reserved)
|
|
10
|
+
|
|
11
|
+
*************************************************************
|
|
12
|
+
return - a dataframe contains frames passing reference lines
|
|
13
|
+
veh_id, ref_line_index, frame
|
|
14
|
+
'''
|
|
15
|
+
|
|
@@ -0,0 +1,241 @@
|
|
|
1
|
+
from shapely.geometry import Point, Polygon, LineString, box
|
|
2
|
+
import geopandas as gpd, pandas as pd
|
|
3
|
+
from tqdm import tqdm
|
|
4
|
+
|
|
5
|
+
class StopAnalyzer():
|
|
6
|
+
def __init__(self, h_coords, v_coords, event_dict, stop_iou=0.97, frame_buffer=5, verbose=True):
|
|
7
|
+
self.hzones = StopAnalyzer.gen_zones(h_coords)
|
|
8
|
+
self.vzones = StopAnalyzer.gen_zones(v_coords)
|
|
9
|
+
self.event_dict = event_dict
|
|
10
|
+
self.stop_iou = stop_iou
|
|
11
|
+
self.frame_buffer = frame_buffer
|
|
12
|
+
self.verbose = verbose
|
|
13
|
+
|
|
14
|
+
def analysis(self, track_file, result_file=None, output_file=None, video_index=None, video_tot=None):
|
|
15
|
+
|
|
16
|
+
tracks = pd.read_csv(track_file, header=None, sep=',')
|
|
17
|
+
|
|
18
|
+
tracks = self.__stop_scan(tracks, video_index, video_tot)
|
|
19
|
+
tracks = self.__event_identify(tracks, video_index, video_tot)
|
|
20
|
+
results = self.__event_count(tracks, video_index, video_tot)
|
|
21
|
+
|
|
22
|
+
if result_file:
|
|
23
|
+
results.to_csv(result_file, index=False)
|
|
24
|
+
|
|
25
|
+
if output_file:
|
|
26
|
+
tracks.to_csv(output_file, header=None, index=False)
|
|
27
|
+
|
|
28
|
+
def __stop_scan(self, tracks, video_index, video_tot):
|
|
29
|
+
vehicles = tracks[1].unique()
|
|
30
|
+
|
|
31
|
+
pbar = tqdm(total=len(vehicles), unit=' tracks')
|
|
32
|
+
if video_index and video_tot:
|
|
33
|
+
pbar.set_description_str("Scan stops {} of {}".format(video_index, video_tot))
|
|
34
|
+
else:
|
|
35
|
+
pbar.set_description_str("Scan stops ")
|
|
36
|
+
|
|
37
|
+
for vehicle in vehicles:
|
|
38
|
+
track = tracks[tracks[1] == vehicle].sort_values(by=0)
|
|
39
|
+
|
|
40
|
+
for i in range(self.frame_buffer, len(track)):
|
|
41
|
+
|
|
42
|
+
index = track.iloc[i].name
|
|
43
|
+
|
|
44
|
+
bb0 = [track.iloc[i-self.frame_buffer, 2], track.iloc[i-self.frame_buffer, 3],
|
|
45
|
+
track.iloc[i-self.frame_buffer, 4], track.iloc[i-self.frame_buffer, 5]]
|
|
46
|
+
bb1 = [track.iloc[i, 2], track.iloc[i, 3], track.iloc[i, 4], track.iloc[i, 5]]
|
|
47
|
+
tracks.at[index, 6] = StopAnalyzer.iou(bb0, bb1)
|
|
48
|
+
|
|
49
|
+
center = Point(track.iloc[i,2]+track.iloc[i,4], track.iloc[i,3]+track.iloc[i,5])
|
|
50
|
+
for j in range(len(self.vzones)):
|
|
51
|
+
if center.within(self.vzones[j]):
|
|
52
|
+
tracks.loc[tracks[1]==vehicle, 7]=j
|
|
53
|
+
break
|
|
54
|
+
|
|
55
|
+
bb = box(track.iat[i, 2], track.iat[i, 3], track.iat[i, 2] + track.iat[i, 4], track.iat[i, 3] + track.iat[i, 5])
|
|
56
|
+
for j in range(len(self.hzones)):
|
|
57
|
+
if bb.intersects(self.hzones[j]):
|
|
58
|
+
if j > tracks.at[index, 8]:
|
|
59
|
+
tracks.at[index, 8] = j
|
|
60
|
+
if self.verbose:
|
|
61
|
+
pbar.update()
|
|
62
|
+
|
|
63
|
+
pbar.close()
|
|
64
|
+
|
|
65
|
+
return tracks
|
|
66
|
+
|
|
67
|
+
def __event_identify(self, tracks, video_index, video_tot):
|
|
68
|
+
|
|
69
|
+
pbar = tqdm(total=len(tracks), unit=' frames')
|
|
70
|
+
if video_index and video_tot:
|
|
71
|
+
pbar.set_description_str("Identify events {} of {}".format(video_index, video_tot))
|
|
72
|
+
else:
|
|
73
|
+
pbar.set_description_str("Identify events ")
|
|
74
|
+
|
|
75
|
+
for i in range(len(tracks)):
|
|
76
|
+
|
|
77
|
+
if tracks.iat[i, 6] >= self.stop_iou:
|
|
78
|
+
|
|
79
|
+
vzone = tracks.iat[i, 7]
|
|
80
|
+
hzone = tracks.iat[i, 8]
|
|
81
|
+
frame = tracks.iat[i, 0]
|
|
82
|
+
veh = tracks.iat[i, 1]
|
|
83
|
+
|
|
84
|
+
vehs_inlane = tracks.loc[(tracks[0]==frame) & (tracks[1]!=veh) & (tracks[7]==vzone) & (tracks[8]>hzone)]
|
|
85
|
+
|
|
86
|
+
if len(vehs_inlane)==0:
|
|
87
|
+
|
|
88
|
+
pre_key = -1
|
|
89
|
+
for key in self.event_dict:
|
|
90
|
+
|
|
91
|
+
if key >= pre_key:
|
|
92
|
+
pre_key = key
|
|
93
|
+
if tracks.iat[i, 8] in self.event_dict[key]:
|
|
94
|
+
tracks.iat[i, 9] = key
|
|
95
|
+
|
|
96
|
+
if self.verbose:
|
|
97
|
+
pbar.update()
|
|
98
|
+
|
|
99
|
+
pbar.close()
|
|
100
|
+
|
|
101
|
+
return tracks
|
|
102
|
+
|
|
103
|
+
def __event_count(self, tracks, video_index, video_tot):
|
|
104
|
+
|
|
105
|
+
|
|
106
|
+
pbar = tqdm(unit='events')
|
|
107
|
+
results = []
|
|
108
|
+
|
|
109
|
+
for key in self.event_dict:
|
|
110
|
+
|
|
111
|
+
vehicles = tracks.loc[tracks[9]==key][1].unique()
|
|
112
|
+
|
|
113
|
+
if video_index and video_tot:
|
|
114
|
+
pbar.set_description_str("Count event {} for {} of {}".format(key, video_index, video_tot))
|
|
115
|
+
else:
|
|
116
|
+
pbar.set_description_str("Count event {}".format(key))
|
|
117
|
+
|
|
118
|
+
pbar.total = len(vehicles)
|
|
119
|
+
for vehicle in vehicles:
|
|
120
|
+
|
|
121
|
+
track = tracks[(tracks[1] == vehicle) & (tracks[9] == key)]
|
|
122
|
+
|
|
123
|
+
start_frame = int(track[0].min())
|
|
124
|
+
end_frame = int(track[0].max())
|
|
125
|
+
vzone = track[7].mode()[0]
|
|
126
|
+
results.append([key, vehicle, vzone, start_frame, end_frame])
|
|
127
|
+
|
|
128
|
+
pbar.update()
|
|
129
|
+
|
|
130
|
+
results = pd.DataFrame(results, columns=['EVENT', 'TRACKID', 'LANE', 'START_FRAME', 'END_FRAME'])
|
|
131
|
+
pbar.close()
|
|
132
|
+
|
|
133
|
+
return results
|
|
134
|
+
|
|
135
|
+
@staticmethod
|
|
136
|
+
def export_label(track_file, analysis_file, label_file, vid_field=1, label_field=10, frame_field=0,
|
|
137
|
+
event_label=None, vid_disp=True, verbose=True):
|
|
138
|
+
|
|
139
|
+
tracks = pd.read_csv(track_file, header=None)
|
|
140
|
+
results = pd.read_csv(analysis_file)
|
|
141
|
+
|
|
142
|
+
pbar = tqdm(total=len(tracks), desc='Generate labels ', unit=' frames')
|
|
143
|
+
for index, track in tracks.iterrows():
|
|
144
|
+
|
|
145
|
+
vid = int(track[vid_field])
|
|
146
|
+
|
|
147
|
+
selected = results.loc[(results['START_FRAME']<=track[frame_field]) &
|
|
148
|
+
(results['END_FRAME']>=track[frame_field]) &
|
|
149
|
+
(results['TRACKID']==track[vid_field])]
|
|
150
|
+
if len(selected)>0:
|
|
151
|
+
|
|
152
|
+
event = selected.iloc[0]['EVENT']
|
|
153
|
+
|
|
154
|
+
if event_label:
|
|
155
|
+
tracks.at[index, label_field] = str(vid)+"-"+str(event_label[event])
|
|
156
|
+
else:
|
|
157
|
+
tracks.at[index, label_field] = str(vid)+"-"+str(event)
|
|
158
|
+
|
|
159
|
+
else:
|
|
160
|
+
if vid_disp:
|
|
161
|
+
tracks.at[index, label_field] = str(vid)
|
|
162
|
+
else:
|
|
163
|
+
tracks.at[index, label_field] = '-1'
|
|
164
|
+
|
|
165
|
+
if verbose:
|
|
166
|
+
pbar.update()
|
|
167
|
+
|
|
168
|
+
pbar.close()
|
|
169
|
+
|
|
170
|
+
tracks.to_csv(label_file, header=None, index=False)
|
|
171
|
+
|
|
172
|
+
if verbose:
|
|
173
|
+
print('Write to {}'.format(label_file))
|
|
174
|
+
|
|
175
|
+
|
|
176
|
+
@staticmethod
|
|
177
|
+
def iou(bb1, bb2):
|
|
178
|
+
"""
|
|
179
|
+
Calculate the Intersection over Union (IoU) of two bounding boxes.
|
|
180
|
+
Parameters
|
|
181
|
+
----------
|
|
182
|
+
bb1 : [x1, y1, w, h]
|
|
183
|
+
x1, y1 - top left corner
|
|
184
|
+
w, h - width and height
|
|
185
|
+
bb2 : [x1, y1, w, h]
|
|
186
|
+
x1, y1 - top left corner
|
|
187
|
+
w, h - width and height
|
|
188
|
+
Returns
|
|
189
|
+
-------
|
|
190
|
+
iou: float [0, 1]
|
|
191
|
+
"""
|
|
192
|
+
|
|
193
|
+
assert bb1[0] < bb1[0] + bb1[2]
|
|
194
|
+
assert bb1[1] < bb1[1] + bb1[3]
|
|
195
|
+
assert bb2[0] < bb2[0] + bb2[2]
|
|
196
|
+
assert bb2[1] < bb2[1] + bb2[3]
|
|
197
|
+
|
|
198
|
+
# determine the coordinates of the intersection rectangle
|
|
199
|
+
x_left = max(bb1[0], bb2[0])
|
|
200
|
+
y_top = max(bb1[1], bb2[1])
|
|
201
|
+
x_right = min(bb1[0]+bb1[2], bb2[0]+bb2[2])
|
|
202
|
+
y_bottom = min(bb1[1]+bb1[3], bb2[1]+bb2[3])
|
|
203
|
+
|
|
204
|
+
if x_right < x_left or y_bottom < y_top:
|
|
205
|
+
return 0.0
|
|
206
|
+
|
|
207
|
+
# The intersection of two axis-aligned bounding boxes is always an
|
|
208
|
+
# axis-aligned bounding box
|
|
209
|
+
intersection_area = (x_right - x_left) * (y_bottom - y_top)
|
|
210
|
+
|
|
211
|
+
# compute the area of both AABBs
|
|
212
|
+
bb1_area = bb1[2] * bb1[3]
|
|
213
|
+
bb2_area = bb2[2] * bb2[3]
|
|
214
|
+
|
|
215
|
+
# compute the intersection over union by taking the intersection
|
|
216
|
+
# area and dividing it by the sum of prediction + ground-truth
|
|
217
|
+
# areas - the interesection area
|
|
218
|
+
iou = intersection_area / float(bb1_area + bb2_area - intersection_area)
|
|
219
|
+
assert iou >= 0.0
|
|
220
|
+
assert iou <= 1.0
|
|
221
|
+
return iou
|
|
222
|
+
|
|
223
|
+
@staticmethod
|
|
224
|
+
def gen_zones(line_coords):
|
|
225
|
+
"""
|
|
226
|
+
Generate a list of shapely polygons
|
|
227
|
+
Inputs:
|
|
228
|
+
line_coords: a list of line coords ([[(x11, y11),(x12, y12)], [(x21, y21),(x22, y22)], ...])
|
|
229
|
+
Returns:
|
|
230
|
+
A list of PloyGons
|
|
231
|
+
"""
|
|
232
|
+
|
|
233
|
+
zones = []
|
|
234
|
+
for i in range(1, len(line_coords)):
|
|
235
|
+
shell = line_coords[i-1] + [line_coords[i][1], line_coords[i][0]]
|
|
236
|
+
zones.append(Polygon(shell))
|
|
237
|
+
|
|
238
|
+
return zones
|
|
239
|
+
|
|
240
|
+
if __name__ == "__main__":
|
|
241
|
+
pass
|
|
@@ -8,7 +8,7 @@ import os
|
|
|
8
8
|
class Labeler:
|
|
9
9
|
def __init__(self, method='opencv', frame_field=0,
|
|
10
10
|
label_fields=[1], color_field=1, zoom_factor=1, line_thickness=1,
|
|
11
|
-
color_bgr = (0, 255, 0), compress_message=False):
|
|
11
|
+
color_bgr = (0, 255, 0), compress_message=False, nodraw_empty=True):
|
|
12
12
|
|
|
13
13
|
self.method = method
|
|
14
14
|
self.frame_field = frame_field
|
|
@@ -18,8 +18,10 @@ class Labeler:
|
|
|
18
18
|
self.line_thickness=line_thickness
|
|
19
19
|
self.color_bgr = color_bgr
|
|
20
20
|
self.compress_message=compress_message
|
|
21
|
+
self.nodraw_empty = nodraw_empty
|
|
21
22
|
|
|
22
|
-
def draw(self, label_file, input_video, output_video, start_frame=None, end_frame=None,
|
|
23
|
+
def draw(self, label_file, input_video, output_video, start_frame=None, end_frame=None,
|
|
24
|
+
video_index=None, video_tot=None):
|
|
23
25
|
tracks = pd.read_csv(label_file, header=None)
|
|
24
26
|
|
|
25
27
|
cmap = plt.get_cmap('tab20b')
|
|
@@ -62,23 +64,31 @@ class Labeler:
|
|
|
62
64
|
#boxes = tracks.loc[tracks.columns[0]==pos_frame].values.tolist()
|
|
63
65
|
|
|
64
66
|
for box in boxes:
|
|
65
|
-
x1 = box[2]
|
|
66
|
-
y1 = box[3]
|
|
67
|
-
x2 = x1 + box[4]
|
|
68
|
-
y2 = y1 + box[5]
|
|
67
|
+
x1 = int(box[2])
|
|
68
|
+
y1 = int(box[3])
|
|
69
|
+
x2 = x1 + int(box[4])
|
|
70
|
+
y2 = y1 + int(box[5])
|
|
69
71
|
|
|
70
72
|
color = colors[int(box[self.color_field]) % len(colors)]
|
|
71
73
|
color = [i * 255 for i in color]
|
|
72
|
-
|
|
74
|
+
|
|
73
75
|
label_txt = ''
|
|
74
76
|
for field in self.label_fields:
|
|
75
|
-
|
|
77
|
+
if (str(box[field]).strip() == '-1'):
|
|
78
|
+
if self.nodraw_empty:
|
|
79
|
+
label_txt += ''
|
|
80
|
+
else:
|
|
81
|
+
label_txt += str(box[field]) + ' '
|
|
82
|
+
else:
|
|
83
|
+
label_txt += str(box[field]) + ' '
|
|
84
|
+
|
|
76
85
|
label_txt = label_txt.strip()
|
|
77
86
|
|
|
78
|
-
|
|
79
|
-
|
|
87
|
+
if label_txt:
|
|
88
|
+
cv2.rectangle(frame, (x1, y1), (x2, y2), color, self.line_thickness)
|
|
89
|
+
cv2.rectangle(frame, (x1, int(y1-30*self.zoom_factor)), (x1+len(label_txt)*int(17*self.zoom_factor), y1),
|
|
80
90
|
color, -1)
|
|
81
|
-
|
|
91
|
+
cv2.putText(frame,str(label_txt),(int(x1), int(y1-int(10*self.zoom_factor))),
|
|
82
92
|
0, 0.75*self.zoom_factor, (255,255,255), 1)
|
|
83
93
|
|
|
84
94
|
writer.write(frame)
|
|
@@ -127,7 +137,7 @@ class Labeler:
|
|
|
127
137
|
break
|
|
128
138
|
|
|
129
139
|
for line in lines:
|
|
130
|
-
cv2.line(frame, (line[0], line[1]), (line[2], line[3]), self.color_bgr, self.line_thickness)
|
|
140
|
+
cv2.line(frame, (int(line[0]), int(line[1])), (int(line[2]), int(line[3])), self.color_bgr, self.line_thickness)
|
|
131
141
|
|
|
132
142
|
writer.write(frame)
|
|
133
143
|
key = cv2.waitKey(1) & 0xFF
|
|
@@ -243,6 +253,72 @@ class Labeler:
|
|
|
243
253
|
|
|
244
254
|
return results
|
|
245
255
|
|
|
256
|
+
def draw_shapes(self, input_video, output_video, points=None, lines=None, polygons=None,
|
|
257
|
+
point_color=None, line_color=None, polygon_color=None,
|
|
258
|
+
start_frame=None, end_frame=None, video_index=None, video_tot=None):
|
|
259
|
+
cap = cv2.VideoCapture(input_video)
|
|
260
|
+
if not cap.isOpened():
|
|
261
|
+
raise IOError("Couldn't open webcam or video")
|
|
262
|
+
|
|
263
|
+
if start_frame is None:
|
|
264
|
+
start_frame = 0
|
|
265
|
+
if end_frame is None:
|
|
266
|
+
end_frame = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))-1
|
|
267
|
+
|
|
268
|
+
tot_frames = end_frame - start_frame + 1
|
|
269
|
+
fps = int(cap.get(cv2.CAP_PROP_FPS))
|
|
270
|
+
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
|
|
271
|
+
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
|
|
272
|
+
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
|
|
273
|
+
writer = cv2.VideoWriter(output_video, fourcc, fps, (width, height))
|
|
274
|
+
|
|
275
|
+
pbar = tqdm(total=tot_frames, unit=" frames")
|
|
276
|
+
if self.compress_message:
|
|
277
|
+
pbar.set_description_str("Labeling")
|
|
278
|
+
else:
|
|
279
|
+
if video_index and video_tot:
|
|
280
|
+
pbar.set_description_str("Labeling {} of {}".format(video_index, video_tot))
|
|
281
|
+
else:
|
|
282
|
+
pbar.set_description_str("Labeling {} ".format(input_video))
|
|
283
|
+
|
|
284
|
+
cap.set(cv2.CAP_PROP_POS_FRAMES, start_frame)
|
|
285
|
+
while cap.isOpened():
|
|
286
|
+
pos_frame = int(cap.get(cv2.CAP_PROP_POS_FRAMES))
|
|
287
|
+
ret, frame = cap.read()
|
|
288
|
+
if (not ret) or (pos_frame>end_frame):
|
|
289
|
+
break
|
|
290
|
+
|
|
291
|
+
if points is not None:
|
|
292
|
+
if point_color is None:
|
|
293
|
+
point_color = self.color_bgr
|
|
294
|
+
for point in points:
|
|
295
|
+
cv2.circle(frame, (point[0], point[1]), radius=self.line_thickness, color=point_color, thickness=-1)
|
|
296
|
+
|
|
297
|
+
if lines is not None:
|
|
298
|
+
if line_color is None:
|
|
299
|
+
line_color = self.color_bgr
|
|
300
|
+
for line in lines:
|
|
301
|
+
cv2.line(frame, (line[0], line[1]), (line[2], line[3]), line_color, self.line_thickness)
|
|
302
|
+
|
|
303
|
+
if polygons is not None:
|
|
304
|
+
if polygon_color is None:
|
|
305
|
+
polygon_color = self.color_bgr
|
|
306
|
+
for polygon in polygons:
|
|
307
|
+
pts = np.array(polygon, np.int32)
|
|
308
|
+
pts = pts.reshape((-1, 1, 2))
|
|
309
|
+
cv2.polylines(frame, [pts], isClosed=True, color=polygon_color, thickness=self.line_thickness)
|
|
310
|
+
|
|
311
|
+
writer.write(frame)
|
|
312
|
+
key = cv2.waitKey(1) & 0xFF
|
|
313
|
+
if key == ord("q"):
|
|
314
|
+
break
|
|
315
|
+
|
|
316
|
+
pbar.update()
|
|
317
|
+
|
|
318
|
+
cv2.destroyAllWindows()
|
|
319
|
+
cap.release()
|
|
320
|
+
writer.release()
|
|
321
|
+
|
|
246
322
|
@staticmethod
|
|
247
323
|
def export_frames(input_video, frames, output_path):
|
|
248
324
|
|
|
@@ -281,11 +357,12 @@ class Labeler:
|
|
|
281
357
|
return frame
|
|
282
358
|
|
|
283
359
|
if __name__=='__main__':
|
|
284
|
-
video_file = "/mnt/
|
|
285
|
-
iou_file = "/mnt/
|
|
286
|
-
track_file = "/mnt/
|
|
287
|
-
|
|
288
|
-
|
|
289
|
-
|
|
290
|
-
labeler
|
|
360
|
+
video_file = "/mnt/d/videos/hfst/Standard_SCU7WH_2022-09-16_0630.02.001.mp4"
|
|
361
|
+
iou_file = "/mnt/d/videos/hfst/Standard_SCU7WH_2022-09-16_0630.02.001_iou.txt"
|
|
362
|
+
track_file = "/mnt/d/videos/hfst/tracks/Standard_SCU7WH_2022-09-16_0630.02.001_track.txt"
|
|
363
|
+
label_video = "/mnt/d/videos/hfst/labels/Standard_SCU7WH_2022-09-16_0630.02.001_track.mp4"
|
|
364
|
+
label_file = "/mnt/d/videos/hfst/tracks/Standard_SCU7WH_2022-09-16_0630.02.001_label.txt"
|
|
365
|
+
|
|
366
|
+
labeler = Labeler(video_file, zoom_factor=0.5, nodraw_empty=True, label_fields=[6])
|
|
367
|
+
labeler.draw(label_file, video_file, label_video)
|
|
291
368
|
|
|
@@ -8,6 +8,9 @@ dnt.egg-info/SOURCES.txt
|
|
|
8
8
|
dnt.egg-info/dependency_links.txt
|
|
9
9
|
dnt.egg-info/requires.txt
|
|
10
10
|
dnt.egg-info/top_level.txt
|
|
11
|
+
dnt/analysis/__init__.py
|
|
12
|
+
dnt/analysis/speed.py
|
|
13
|
+
dnt/analysis/stop.py
|
|
11
14
|
dnt/detect/__init__.py
|
|
12
15
|
dnt/detect/yolov8/__init__.py
|
|
13
16
|
dnt/detect/yolov8/detector.py
|
{dnt-0.2.0 → dnt-0.2.1}/setup.py
RENAMED
{dnt-0.2.0 → dnt-0.2.1}/LICENSE
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|