vidformer 0.6.5__tar.gz → 0.7.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {vidformer-0.6.5 → vidformer-0.7.0}/PKG-INFO +1 -1
- {vidformer-0.6.5 → vidformer-0.7.0}/vidformer/__init__.py +1 -1
- {vidformer-0.6.5 → vidformer-0.7.0}/vidformer/cv2/vf_cv2.py +49 -15
- {vidformer-0.6.5 → vidformer-0.7.0}/vidformer/vf.py +7 -2
- {vidformer-0.6.5 → vidformer-0.7.0}/README.md +0 -0
- {vidformer-0.6.5 → vidformer-0.7.0}/cv2-functions.md +0 -0
- {vidformer-0.6.5 → vidformer-0.7.0}/pyproject.toml +0 -0
- {vidformer-0.6.5 → vidformer-0.7.0}/vidformer/cv2/__init__.py +0 -0
@@ -45,6 +45,8 @@ LINE_4 = 4
|
|
45
45
|
LINE_8 = 8
|
46
46
|
LINE_AA = 16
|
47
47
|
|
48
|
+
_inline_mat = vf.Filter("_inline_mat")
|
49
|
+
|
48
50
|
_filter_scale = vf.Filter("Scale")
|
49
51
|
_filter_rectangle = vf.Filter("cv2.rectangle")
|
50
52
|
_filter_putText = vf.Filter("cv2.putText")
|
@@ -96,8 +98,9 @@ class Frame:
|
|
96
98
|
return
|
97
99
|
|
98
100
|
self._modified = True
|
99
|
-
|
100
|
-
|
101
|
+
if self._fmt["pix_fmt"] != "rgb24":
|
102
|
+
self._f = _filter_scale(self._f, pix_fmt="rgb24")
|
103
|
+
self._fmt["pix_fmt"] = "rgb24"
|
101
104
|
|
102
105
|
def numpy(self):
|
103
106
|
"""
|
@@ -113,9 +116,41 @@ class Frame:
|
|
113
116
|
assert len(frame_raster_rgb24) == self.shape[0] * self.shape[1] * 3
|
114
117
|
raw_data_array = np.frombuffer(frame_raster_rgb24, dtype=np.uint8)
|
115
118
|
frame = raw_data_array.reshape(self.shape)
|
119
|
+
frame = frame[:, :, ::-1] # convert RGB to BGR
|
116
120
|
return frame
|
117
121
|
|
118
122
|
|
123
|
+
def _inline_frame(arr):
|
124
|
+
assert arr.dtype == np.uint8
|
125
|
+
assert arr.ndim == 3
|
126
|
+
assert arr.shape[2] == 3
|
127
|
+
|
128
|
+
# convert BGR to RGB
|
129
|
+
arr = arr[:, :, ::-1]
|
130
|
+
|
131
|
+
width = arr.shape[1]
|
132
|
+
height = arr.shape[0]
|
133
|
+
pix_fmt = "rgb24"
|
134
|
+
|
135
|
+
f = _inline_mat(arr.tobytes(), width=width, height=height, pix_fmt=pix_fmt)
|
136
|
+
fmt = {"width": width, "height": height, "pix_fmt": pix_fmt}
|
137
|
+
return Frame(f, fmt)
|
138
|
+
|
139
|
+
|
140
|
+
def _framify(obj, field_name=None):
|
141
|
+
if isinstance(obj, Frame):
|
142
|
+
return obj
|
143
|
+
elif isinstance(obj, np.ndarray):
|
144
|
+
return _inline_frame(obj)
|
145
|
+
else:
|
146
|
+
if field_name is not None:
|
147
|
+
raise Exception(
|
148
|
+
f"Unsupported type for field {field_name}, expected Frame or np.ndarray"
|
149
|
+
)
|
150
|
+
else:
|
151
|
+
raise Exception("Unsupported type, expected Frame or np.ndarray")
|
152
|
+
|
153
|
+
|
119
154
|
class VideoCapture:
|
120
155
|
def __init__(self, path):
|
121
156
|
self._path = path
|
@@ -178,9 +213,9 @@ class VideoWriter:
|
|
178
213
|
self._pix_fmt = "yuv420p"
|
179
214
|
|
180
215
|
def write(self, frame):
|
181
|
-
|
182
|
-
|
183
|
-
if frame.
|
216
|
+
frame = _framify(frame, "frame")
|
217
|
+
|
218
|
+
if frame._fmt["pix_fmt"] != self._pix_fmt:
|
184
219
|
f_obj = _filter_scale(frame._f, pix_fmt=self._pix_fmt)
|
185
220
|
self._frames.append(f_obj)
|
186
221
|
else:
|
@@ -225,8 +260,7 @@ def imwrite(path, img, *args):
|
|
225
260
|
if len(args) > 0:
|
226
261
|
raise NotImplementedError("imwrite does not support additional arguments")
|
227
262
|
|
228
|
-
|
229
|
-
raise Exception("img must be a vidformer.cv2.Frame object")
|
263
|
+
img = _framify(img)
|
230
264
|
|
231
265
|
fmt = img._fmt.copy()
|
232
266
|
width = fmt["width"]
|
@@ -282,7 +316,7 @@ def rectangle(img, pt1, pt2, color, thickness=None, lineType=None, shift=None):
|
|
282
316
|
cv.rectangle( img, pt1, pt2, color[, thickness[, lineType[, shift]]] )
|
283
317
|
"""
|
284
318
|
|
285
|
-
|
319
|
+
img = _framify(img)
|
286
320
|
img._mut()
|
287
321
|
|
288
322
|
assert len(pt1) == 2
|
@@ -326,7 +360,7 @@ def putText(
|
|
326
360
|
cv.putText( img, text, org, fontFace, fontScale, color[, thickness[, lineType[, bottomLeftOrigin]]] )
|
327
361
|
"""
|
328
362
|
|
329
|
-
|
363
|
+
img = _framify(img)
|
330
364
|
img._mut()
|
331
365
|
|
332
366
|
assert isinstance(text, str)
|
@@ -365,7 +399,7 @@ def arrowedLine(
|
|
365
399
|
"""
|
366
400
|
cv.arrowedLine( img, pt1, pt2, color[, thickness[, line_type[, shift[, tipLength]]]] )
|
367
401
|
"""
|
368
|
-
|
402
|
+
img = _framify(img)
|
369
403
|
img._mut()
|
370
404
|
|
371
405
|
assert len(pt1) == 2
|
@@ -399,7 +433,7 @@ def arrowedLine(
|
|
399
433
|
|
400
434
|
|
401
435
|
def line(img, pt1, pt2, color, thickness=None, lineType=None, shift=None):
|
402
|
-
|
436
|
+
img = _framify(img)
|
403
437
|
img._mut()
|
404
438
|
|
405
439
|
assert len(pt1) == 2
|
@@ -429,7 +463,7 @@ def line(img, pt1, pt2, color, thickness=None, lineType=None, shift=None):
|
|
429
463
|
|
430
464
|
|
431
465
|
def circle(img, center, radius, color, thickness=None, lineType=None, shift=None):
|
432
|
-
|
466
|
+
img = _framify(img)
|
433
467
|
img._mut()
|
434
468
|
|
435
469
|
assert len(center) == 2
|
@@ -480,15 +514,15 @@ def addWeighted(src1, alpha, src2, beta, gamma, dst=None, dtype=-1):
|
|
480
514
|
"""
|
481
515
|
cv.addWeighted( src1, alpha, src2, beta, gamma[, dst[, dtype]] ) -> dst
|
482
516
|
"""
|
483
|
-
|
484
|
-
|
517
|
+
src1 = _framify(src1, "src1")
|
518
|
+
src2 = _framify(src2, "src2")
|
485
519
|
src1._mut()
|
486
520
|
src2._mut()
|
487
521
|
|
488
522
|
if dst is None:
|
489
523
|
dst = Frame(src1._f, src1._fmt.copy())
|
490
524
|
else:
|
491
|
-
assert isinstance(dst, Frame)
|
525
|
+
assert isinstance(dst, Frame), "dst must be a Frame"
|
492
526
|
dst._mut()
|
493
527
|
|
494
528
|
assert isinstance(alpha, float) or isinstance(alpha, int)
|
@@ -638,6 +638,11 @@ def _json_arg(arg, skip_data_anot=False):
|
|
638
638
|
if skip_data_anot:
|
639
639
|
return {"String": arg}
|
640
640
|
return {"Data": {"String": arg}}
|
641
|
+
elif type(arg) == bytes:
|
642
|
+
arg = list(arg)
|
643
|
+
if skip_data_anot:
|
644
|
+
return {"Bytes": arg}
|
645
|
+
return {"Data": {"Bytes": arg}}
|
641
646
|
elif type(arg) == float:
|
642
647
|
if skip_data_anot:
|
643
648
|
return {"Float": arg}
|
@@ -837,7 +842,7 @@ class UDF:
|
|
837
842
|
assert type(obj[type_key]) == bool
|
838
843
|
return obj[type_key]
|
839
844
|
else:
|
840
|
-
assert False
|
845
|
+
assert False, f"Unknown type: {type_key}"
|
841
846
|
|
842
847
|
def _deser_filter(self, obj):
|
843
848
|
assert type(obj) == dict
|
@@ -873,7 +878,7 @@ class UDF:
|
|
873
878
|
assert type(obj[type_key]) == bool
|
874
879
|
return obj[type_key]
|
875
880
|
else:
|
876
|
-
assert False
|
881
|
+
assert False, f"Unknown type: {type_key}"
|
877
882
|
|
878
883
|
def _host(self, socket_path: str):
|
879
884
|
if os.path.exists(socket_path):
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|