vidformer 1.2.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of vidformer might be problematic. Click here for more details.
- vidformer/__init__.py +900 -0
- vidformer/cv2/__init__.py +937 -0
- vidformer/supervision/__init__.py +635 -0
- vidformer-1.2.0.dist-info/METADATA +37 -0
- vidformer-1.2.0.dist-info/RECORD +6 -0
- vidformer-1.2.0.dist-info/WHEEL +4 -0
|
@@ -0,0 +1,937 @@
|
|
|
1
|
+
"""
|
|
2
|
+
vidformer.cv2 is the cv2 frontend for [vidformer](https://github.com/ixlab/vidformer).
|
|
3
|
+
|
|
4
|
+
> ⚠️ This module is a work in progress. See the [implemented functions list](https://ixlab.github.io/vidformer/opencv-filters.html).
|
|
5
|
+
|
|
6
|
+
**Quick links:**
|
|
7
|
+
* [📦 PyPI](https://pypi.org/project/vidformer/)
|
|
8
|
+
* [📘 Documentation - vidformer-py](https://ixlab.github.io/vidformer/vidformer-py/pdoc/)
|
|
9
|
+
* [📘 Documentation - vidformer.cv2](https://ixlab.github.io/vidformer/vidformer-py/pdoc/vidformer/cv2.html)
|
|
10
|
+
* [📘 Documentation - vidformer.supervision](https://ixlab.github.io/vidformer/vidformer-py/pdoc/vidformer/supervision.html)
|
|
11
|
+
* [🧑💻 Source Code](https://github.com/ixlab/vidformer/tree/main/vidformer-py/)
|
|
12
|
+
"""
|
|
13
|
+
|
|
14
|
+
import vidformer as vf
|
|
15
|
+
|
|
16
|
+
try:
|
|
17
|
+
import cv2 as _opencv2
|
|
18
|
+
except Exception:
|
|
19
|
+
_opencv2 = None
|
|
20
|
+
|
|
21
|
+
import re
|
|
22
|
+
import zlib
|
|
23
|
+
from bisect import bisect_right
|
|
24
|
+
from fractions import Fraction
|
|
25
|
+
import os
|
|
26
|
+
|
|
27
|
+
import numpy as np
|
|
28
|
+
|
|
29
|
+
CAP_PROP_POS_MSEC = 0
|
|
30
|
+
CAP_PROP_POS_FRAMES = 1
|
|
31
|
+
CAP_PROP_FRAME_WIDTH = 3
|
|
32
|
+
CAP_PROP_FRAME_HEIGHT = 4
|
|
33
|
+
CAP_PROP_FPS = 5
|
|
34
|
+
CAP_PROP_FRAME_COUNT = 7
|
|
35
|
+
|
|
36
|
+
FONT_HERSHEY_SIMPLEX = 0
|
|
37
|
+
FONT_HERSHEY_PLAIN = 1
|
|
38
|
+
FONT_HERSHEY_DUPLEX = 2
|
|
39
|
+
FONT_HERSHEY_COMPLEX = 3
|
|
40
|
+
FONT_HERSHEY_TRIPLEX = 4
|
|
41
|
+
FONT_HERSHEY_COMPLEX_SMALL = 5
|
|
42
|
+
FONT_HERSHEY_SCRIPT_SIMPLEX = 6
|
|
43
|
+
FONT_HERSHEY_SCRIPT_COMPLEX = 7
|
|
44
|
+
FONT_ITALIC = 16
|
|
45
|
+
|
|
46
|
+
FILLED = -1
|
|
47
|
+
LINE_4 = 4
|
|
48
|
+
LINE_8 = 8
|
|
49
|
+
LINE_AA = 16
|
|
50
|
+
|
|
51
|
+
_inline_mat = vf.Filter("_inline_mat")
|
|
52
|
+
_slice_mat = vf.Filter("_slice_mat")
|
|
53
|
+
_slice_write_mat = vf.Filter("_slice_write_mat")
|
|
54
|
+
_black = vf.Filter("_black")
|
|
55
|
+
|
|
56
|
+
|
|
57
|
+
_filter_scale = vf.Filter("Scale")
|
|
58
|
+
_filter_rectangle = vf.Filter("cv2.rectangle")
|
|
59
|
+
_filter_putText = vf.Filter("cv2.putText")
|
|
60
|
+
_filter_arrowedLine = vf.Filter("cv2.arrowedLine")
|
|
61
|
+
_filter_line = vf.Filter("cv2.line")
|
|
62
|
+
_filter_circle = vf.Filter("cv2.circle")
|
|
63
|
+
_filter_addWeighted = vf.Filter("cv2.addWeighted")
|
|
64
|
+
_filter_ellipse = vf.Filter("cv2.ellipse")
|
|
65
|
+
_set_to = vf.Filter("cv2.setTo")
|
|
66
|
+
|
|
67
|
+
|
|
68
|
+
def _ts_to_fps(timestamps):
|
|
69
|
+
if len(timestamps) < 2:
|
|
70
|
+
return 0
|
|
71
|
+
fps = Fraction(len(timestamps), timestamps[-1] - timestamps[0])
|
|
72
|
+
if fps.denominator == 1:
|
|
73
|
+
return fps.numerator
|
|
74
|
+
return float(fps)
|
|
75
|
+
|
|
76
|
+
|
|
77
|
+
def _fps_to_ts(fps, n_frames):
|
|
78
|
+
assert type(fps) is int
|
|
79
|
+
return [Fraction(i, fps) for i in range(n_frames)]
|
|
80
|
+
|
|
81
|
+
|
|
82
|
+
_global_cv2_server = None
|
|
83
|
+
|
|
84
|
+
|
|
85
|
+
def _server():
|
|
86
|
+
global _global_cv2_server
|
|
87
|
+
if _global_cv2_server is None:
|
|
88
|
+
if "VF_IGNI_ENDPOINT" in os.environ:
|
|
89
|
+
server_endpoint = os.environ["VF_IGNI_ENDPOINT"]
|
|
90
|
+
if "VF_IGNI_API_KEY" not in os.environ:
|
|
91
|
+
raise Exception("VF_IGNI_API_KEY must be set")
|
|
92
|
+
api_key = os.environ["VF_IGNI_API_KEY"]
|
|
93
|
+
_global_cv2_server = vf.Server(server_endpoint, api_key)
|
|
94
|
+
else:
|
|
95
|
+
raise Exception(
|
|
96
|
+
"No server set for the cv2 frontend (https://ixlab.github.io/vidformer/install.html). Set VF_IGNI_ENDPOINT and VF_IGNI_API_KEY environment variables or use cv2.set_server() before use."
|
|
97
|
+
)
|
|
98
|
+
return _global_cv2_server
|
|
99
|
+
|
|
100
|
+
|
|
101
|
+
def set_server(server):
|
|
102
|
+
"""Set the server to use for the cv2 frontend."""
|
|
103
|
+
global _global_cv2_server
|
|
104
|
+
assert isinstance(server, vf.Server)
|
|
105
|
+
_global_cv2_server = server
|
|
106
|
+
|
|
107
|
+
|
|
108
|
+
def get_server():
|
|
109
|
+
"""Get the server used by the cv2 frontend."""
|
|
110
|
+
return _server()
|
|
111
|
+
|
|
112
|
+
|
|
113
|
+
_PIX_FMT_MAP = {
|
|
114
|
+
"rgb24": "rgb24",
|
|
115
|
+
"yuv420p": "rgb24",
|
|
116
|
+
"yuv422p": "rgb24",
|
|
117
|
+
"yuv422p10le": "rgb24",
|
|
118
|
+
"yuv444p": "rgb24",
|
|
119
|
+
"yuvj420p": "rgb24",
|
|
120
|
+
"yuvj422p": "rgb24",
|
|
121
|
+
"yuvj444p": "rgb24",
|
|
122
|
+
"gray": "gray",
|
|
123
|
+
}
|
|
124
|
+
|
|
125
|
+
|
|
126
|
+
def _top_level_pix_fmt(pix_fmt):
|
|
127
|
+
if pix_fmt in _PIX_FMT_MAP:
|
|
128
|
+
return _PIX_FMT_MAP[pix_fmt]
|
|
129
|
+
raise Exception(f"Unsupported pix_fmt {pix_fmt}")
|
|
130
|
+
|
|
131
|
+
|
|
132
|
+
class Frame:
|
|
133
|
+
def __init__(self, f, fmt):
|
|
134
|
+
self._f = f
|
|
135
|
+
self._fmt = fmt
|
|
136
|
+
channels = 3 if _top_level_pix_fmt(fmt["pix_fmt"]) == "rgb24" else 1
|
|
137
|
+
self.shape = (fmt["height"], fmt["width"], channels)
|
|
138
|
+
|
|
139
|
+
# denotes that the frame has not yet been modified
|
|
140
|
+
# when a frame is modified, it is converted to rgb24 first
|
|
141
|
+
self._modified = False
|
|
142
|
+
|
|
143
|
+
def _mut(self):
|
|
144
|
+
if self._modified:
|
|
145
|
+
assert self._fmt["pix_fmt"] in ["rgb24", "gray"]
|
|
146
|
+
return
|
|
147
|
+
|
|
148
|
+
self._modified = True
|
|
149
|
+
if (
|
|
150
|
+
self._fmt["pix_fmt"] != "rgb24"
|
|
151
|
+
and _top_level_pix_fmt(self._fmt["pix_fmt"]) == "rgb24"
|
|
152
|
+
):
|
|
153
|
+
self._f = _filter_scale(self._f, pix_fmt="rgb24")
|
|
154
|
+
self._fmt["pix_fmt"] = "rgb24"
|
|
155
|
+
elif (
|
|
156
|
+
self._fmt["pix_fmt"] != "gray"
|
|
157
|
+
and _top_level_pix_fmt(self._fmt["pix_fmt"]) == "gray"
|
|
158
|
+
):
|
|
159
|
+
self._f = _filter_scale(self._f, pix_fmt="gray")
|
|
160
|
+
self._fmt["pix_fmt"] = "gray"
|
|
161
|
+
|
|
162
|
+
def copy(self):
|
|
163
|
+
return Frame(self._f, self._fmt.copy())
|
|
164
|
+
|
|
165
|
+
def numpy(self):
|
|
166
|
+
"""
|
|
167
|
+
Return the frame as a numpy array.
|
|
168
|
+
"""
|
|
169
|
+
|
|
170
|
+
self._mut()
|
|
171
|
+
server = _server()
|
|
172
|
+
frame = server.frame(
|
|
173
|
+
self.shape[1], self.shape[0], self._fmt["pix_fmt"], self._f
|
|
174
|
+
)
|
|
175
|
+
assert type(frame) is bytes
|
|
176
|
+
assert len(frame) == self.shape[0] * self.shape[1] * self.shape[2]
|
|
177
|
+
raw_data_array = np.frombuffer(frame, dtype=np.uint8)
|
|
178
|
+
frame = raw_data_array.reshape(self.shape)
|
|
179
|
+
if self.shape[2] == 3:
|
|
180
|
+
frame = frame[:, :, ::-1] # convert RGB to BGR
|
|
181
|
+
return frame
|
|
182
|
+
|
|
183
|
+
def __getitem__(self, key):
|
|
184
|
+
if not isinstance(key, tuple):
|
|
185
|
+
raise NotImplementedError("Only 2D slicing is supported")
|
|
186
|
+
|
|
187
|
+
if len(key) != 2:
|
|
188
|
+
raise NotImplementedError("Only 2D slicing is supported")
|
|
189
|
+
|
|
190
|
+
if not all(isinstance(x, slice) for x in key):
|
|
191
|
+
raise NotImplementedError("Only 2D slicing is supported")
|
|
192
|
+
|
|
193
|
+
miny = key[0].start if key[0].start is not None else 0
|
|
194
|
+
maxy = key[0].stop if key[0].stop is not None else self.shape[0]
|
|
195
|
+
minx = key[1].start if key[1].start is not None else 0
|
|
196
|
+
maxx = key[1].stop if key[1].stop is not None else self.shape[1]
|
|
197
|
+
|
|
198
|
+
# handle negative indices
|
|
199
|
+
if miny < 0:
|
|
200
|
+
miny = self.shape[0] + miny
|
|
201
|
+
if maxy < 0:
|
|
202
|
+
maxy = self.shape[0] + maxy
|
|
203
|
+
if minx < 0:
|
|
204
|
+
minx = self.shape[1] + minx
|
|
205
|
+
if maxx < 0:
|
|
206
|
+
maxx = self.shape[1] + maxx
|
|
207
|
+
|
|
208
|
+
if (
|
|
209
|
+
maxy <= miny
|
|
210
|
+
or maxx <= minx
|
|
211
|
+
or miny < 0
|
|
212
|
+
or minx < 0
|
|
213
|
+
or maxy > self.shape[0]
|
|
214
|
+
or maxx > self.shape[1]
|
|
215
|
+
):
|
|
216
|
+
raise NotImplementedError("Invalid slice")
|
|
217
|
+
|
|
218
|
+
f = _slice_mat(self._f, miny, maxy, minx, maxx)
|
|
219
|
+
fmt = self._fmt.copy()
|
|
220
|
+
fmt["width"] = maxx - minx
|
|
221
|
+
fmt["height"] = maxy - miny
|
|
222
|
+
return Frame(f, fmt)
|
|
223
|
+
|
|
224
|
+
def __setitem__(self, key, value):
|
|
225
|
+
if type(key) is tuple:
|
|
226
|
+
value = frameify(value, "value")
|
|
227
|
+
|
|
228
|
+
if len(key) != 2:
|
|
229
|
+
raise NotImplementedError("Only 2D slicing is supported")
|
|
230
|
+
|
|
231
|
+
if not all(isinstance(x, slice) for x in key):
|
|
232
|
+
raise NotImplementedError("Only 2D slicing is supported")
|
|
233
|
+
|
|
234
|
+
miny = key[0].start if key[0].start is not None else 0
|
|
235
|
+
maxy = key[0].stop if key[0].stop is not None else self.shape[0]
|
|
236
|
+
minx = key[1].start if key[1].start is not None else 0
|
|
237
|
+
maxx = key[1].stop if key[1].stop is not None else self.shape[1]
|
|
238
|
+
|
|
239
|
+
# handle negative indices
|
|
240
|
+
if miny < 0:
|
|
241
|
+
miny = self.shape[0] + miny
|
|
242
|
+
if maxy < 0:
|
|
243
|
+
maxy = self.shape[0] + maxy
|
|
244
|
+
if minx < 0:
|
|
245
|
+
minx = self.shape[1] + minx
|
|
246
|
+
if maxx < 0:
|
|
247
|
+
maxx = self.shape[1] + maxx
|
|
248
|
+
|
|
249
|
+
if (
|
|
250
|
+
maxy <= miny
|
|
251
|
+
or maxx <= minx
|
|
252
|
+
or miny < 0
|
|
253
|
+
or minx < 0
|
|
254
|
+
or maxy > self.shape[0]
|
|
255
|
+
or maxx > self.shape[1]
|
|
256
|
+
):
|
|
257
|
+
raise NotImplementedError("Invalid slice")
|
|
258
|
+
|
|
259
|
+
if value.shape[0] != maxy - miny or value.shape[1] != maxx - minx:
|
|
260
|
+
raise NotImplementedError("Shape mismatch")
|
|
261
|
+
|
|
262
|
+
self._mut()
|
|
263
|
+
value._mut()
|
|
264
|
+
|
|
265
|
+
self._f = _slice_write_mat(self._f, value._f, miny, maxy, minx, maxx)
|
|
266
|
+
elif type(key) is Frame or type(key) is np.ndarray:
|
|
267
|
+
key = frameify(key, "key")
|
|
268
|
+
|
|
269
|
+
if key.shape[0] != self.shape[0] or key.shape[1] != self.shape[1]:
|
|
270
|
+
raise NotImplementedError("Shape mismatch")
|
|
271
|
+
|
|
272
|
+
if key.shape[2] != 1:
|
|
273
|
+
raise NotImplementedError("Only 1-channel mask frames are supported")
|
|
274
|
+
|
|
275
|
+
# Value should be a bgr or bgra color
|
|
276
|
+
if (type(value) is not list and type(value) is not tuple) or len(
|
|
277
|
+
value
|
|
278
|
+
) not in [3, 4]:
|
|
279
|
+
raise NotImplementedError(
|
|
280
|
+
"Value should be a 3 or 4 element list or tuple"
|
|
281
|
+
)
|
|
282
|
+
value = [float(x) for x in value]
|
|
283
|
+
if len(value) == 3:
|
|
284
|
+
value.append(255.0)
|
|
285
|
+
|
|
286
|
+
self._mut()
|
|
287
|
+
key._mut()
|
|
288
|
+
|
|
289
|
+
self._f = _set_to(self._f, value, key._f)
|
|
290
|
+
else:
|
|
291
|
+
raise NotImplementedError(
|
|
292
|
+
"__setitem__ only supports slicing by a 2d tuple or a mask frame"
|
|
293
|
+
)
|
|
294
|
+
|
|
295
|
+
|
|
296
|
+
def _inline_frame(arr):
|
|
297
|
+
if arr.dtype != np.uint8:
|
|
298
|
+
raise Exception("Only uint8 arrays are supported")
|
|
299
|
+
if len(arr.shape) != 3:
|
|
300
|
+
raise Exception("Only 3D arrays are supported")
|
|
301
|
+
if arr.shape[2] != 3:
|
|
302
|
+
raise Exception("To inline a frame, the array must have 3 channels")
|
|
303
|
+
|
|
304
|
+
arr = arr[:, :, ::-1]
|
|
305
|
+
if not arr.flags["C_CONTIGUOUS"]:
|
|
306
|
+
arr = np.ascontiguousarray(arr)
|
|
307
|
+
|
|
308
|
+
width = arr.shape[1]
|
|
309
|
+
height = arr.shape[0]
|
|
310
|
+
pix_fmt = "rgb24"
|
|
311
|
+
|
|
312
|
+
data_gzip = zlib.compress(memoryview(arr), level=1)
|
|
313
|
+
|
|
314
|
+
f = _inline_mat(
|
|
315
|
+
data_gzip, width=width, height=height, pix_fmt=pix_fmt, compression="zlib"
|
|
316
|
+
)
|
|
317
|
+
fmt = {"width": width, "height": height, "pix_fmt": pix_fmt}
|
|
318
|
+
|
|
319
|
+
# Return the resulting Frame object
|
|
320
|
+
return Frame(f, fmt)
|
|
321
|
+
|
|
322
|
+
|
|
323
|
+
def _check_opencv2(method_name):
|
|
324
|
+
if _opencv2 is None:
|
|
325
|
+
raise NotImplementedError(
|
|
326
|
+
f"{method_name} requires python OpenCV cv2. Either it's not installed or the import failed (such as a mission libGL.so.1)."
|
|
327
|
+
)
|
|
328
|
+
|
|
329
|
+
|
|
330
|
+
class VideoCapture:
|
|
331
|
+
def __init__(self, path: str):
|
|
332
|
+
server = _server()
|
|
333
|
+
if type(path) is str:
|
|
334
|
+
match = re.match(r"(http|https)://([^/]+)(.*)", path)
|
|
335
|
+
if match is not None:
|
|
336
|
+
endpoint = f"{match.group(1)}://{match.group(2)}"
|
|
337
|
+
path = match.group(3)
|
|
338
|
+
if path.startswith("/"):
|
|
339
|
+
path = path[1:]
|
|
340
|
+
self._path = path
|
|
341
|
+
self._source = server.source(path, 0, "http", {"endpoint": endpoint})
|
|
342
|
+
else:
|
|
343
|
+
self._path = path
|
|
344
|
+
self._source = server.source(path, 0, "fs", {"root": "."})
|
|
345
|
+
elif isinstance(path, vf.Source):
|
|
346
|
+
assert isinstance(server, vf.Server)
|
|
347
|
+
self._path = path._name
|
|
348
|
+
self._source = path
|
|
349
|
+
self._next_frame_idx = 0
|
|
350
|
+
|
|
351
|
+
def isOpened(self) -> bool:
|
|
352
|
+
return True
|
|
353
|
+
|
|
354
|
+
def get(self, prop):
|
|
355
|
+
if prop == CAP_PROP_FPS:
|
|
356
|
+
return _ts_to_fps(self._source.ts())
|
|
357
|
+
elif prop == CAP_PROP_FRAME_WIDTH:
|
|
358
|
+
return self._source.fmt()["width"]
|
|
359
|
+
elif prop == CAP_PROP_FRAME_HEIGHT:
|
|
360
|
+
return self._source.fmt()["height"]
|
|
361
|
+
elif prop == CAP_PROP_FRAME_COUNT:
|
|
362
|
+
return len(self._source)
|
|
363
|
+
elif prop == CAP_PROP_POS_FRAMES:
|
|
364
|
+
return self._next_frame_idx
|
|
365
|
+
elif prop == CAP_PROP_POS_MSEC:
|
|
366
|
+
ts = self._source.ts()
|
|
367
|
+
if self._next_frame_idx >= len(ts):
|
|
368
|
+
# Past the end, return the last timestamp
|
|
369
|
+
if len(ts) > 0:
|
|
370
|
+
return float(ts[-1] * 1000)
|
|
371
|
+
return 0.0
|
|
372
|
+
return float(ts[self._next_frame_idx] * 1000)
|
|
373
|
+
|
|
374
|
+
raise Exception(f"Unknown property {prop}")
|
|
375
|
+
|
|
376
|
+
def set(self, prop, value):
|
|
377
|
+
if prop == CAP_PROP_POS_FRAMES:
|
|
378
|
+
assert value >= 0 and value < len(self._source.ts())
|
|
379
|
+
self._next_frame_idx = value
|
|
380
|
+
elif prop == CAP_PROP_POS_MSEC:
|
|
381
|
+
t = Fraction(value, 1000)
|
|
382
|
+
ts = self._source.ts()
|
|
383
|
+
next_frame_idx = bisect_right(ts, t)
|
|
384
|
+
self._next_frame_idx = next_frame_idx
|
|
385
|
+
else:
|
|
386
|
+
raise Exception(f"Unsupported property {prop}")
|
|
387
|
+
|
|
388
|
+
def read(self):
|
|
389
|
+
if self._next_frame_idx >= len(self._source):
|
|
390
|
+
return False, None
|
|
391
|
+
frame = self._source.iloc[self._next_frame_idx]
|
|
392
|
+
self._next_frame_idx += 1
|
|
393
|
+
frame = Frame(frame, self._source.fmt())
|
|
394
|
+
return True, frame
|
|
395
|
+
|
|
396
|
+
def __getitem__(self, key):
|
|
397
|
+
if not isinstance(key, int):
|
|
398
|
+
raise NotImplementedError("Only integer indexing is supported")
|
|
399
|
+
if key < 0:
|
|
400
|
+
key = len(self._source) + key
|
|
401
|
+
if key < 0 or key >= len(self._source):
|
|
402
|
+
raise IndexError("Index out of bounds")
|
|
403
|
+
frame = self._source.iloc[key]
|
|
404
|
+
frame = Frame(frame, self._source.fmt())
|
|
405
|
+
return frame
|
|
406
|
+
|
|
407
|
+
def __len__(self):
|
|
408
|
+
return len(self._source)
|
|
409
|
+
|
|
410
|
+
def release(self):
|
|
411
|
+
pass
|
|
412
|
+
|
|
413
|
+
|
|
414
|
+
class VideoWriter:
|
|
415
|
+
def __init__(
|
|
416
|
+
self,
|
|
417
|
+
path,
|
|
418
|
+
fourcc,
|
|
419
|
+
fps,
|
|
420
|
+
size,
|
|
421
|
+
batch_size=1024,
|
|
422
|
+
compression="gzip",
|
|
423
|
+
ttl=3600,
|
|
424
|
+
pix_fmt="yuv420p",
|
|
425
|
+
vod_segment_length=Fraction(2, 1),
|
|
426
|
+
):
|
|
427
|
+
server = _server()
|
|
428
|
+
assert isinstance(server, vf.Server)
|
|
429
|
+
assert path is None or type(path) is str
|
|
430
|
+
self._path = path
|
|
431
|
+
if isinstance(fps, int):
|
|
432
|
+
self._f_time = Fraction(1, fps)
|
|
433
|
+
elif isinstance(fps, Fraction):
|
|
434
|
+
self._f_time = 1 / fps
|
|
435
|
+
elif isinstance(fps, float):
|
|
436
|
+
# 29.97
|
|
437
|
+
if abs(fps - 30000 / 1001) < 1e-6:
|
|
438
|
+
self._f_time = Fraction(1001, 30000)
|
|
439
|
+
# 23.976
|
|
440
|
+
elif abs(fps - 24000 / 1001) < 1e-6:
|
|
441
|
+
self._f_time = Fraction(1001, 24000)
|
|
442
|
+
# 59.94
|
|
443
|
+
elif abs(fps - 60000 / 1001) < 1e-6:
|
|
444
|
+
self._f_time = Fraction(1001, 60000)
|
|
445
|
+
else:
|
|
446
|
+
# Round to nearest integer fps
|
|
447
|
+
self._f_time = Fraction(1, int(round(fps)))
|
|
448
|
+
else:
|
|
449
|
+
raise Exception("fps must be an integer, float, or Fraction")
|
|
450
|
+
|
|
451
|
+
assert isinstance(size, tuple) or isinstance(size, list)
|
|
452
|
+
assert len(size) == 2
|
|
453
|
+
width, height = size
|
|
454
|
+
assert ttl is None or isinstance(ttl, int)
|
|
455
|
+
self._spec = server.create_spec(
|
|
456
|
+
width, height, pix_fmt, vod_segment_length, 1 / self._f_time, ttl=ttl
|
|
457
|
+
)
|
|
458
|
+
self._batch_size = batch_size
|
|
459
|
+
assert compression is None or compression in ["gzip"]
|
|
460
|
+
self._compression = compression
|
|
461
|
+
self._idx = 0
|
|
462
|
+
self._feb = vf._FrameExpressionBlock()
|
|
463
|
+
|
|
464
|
+
def _flush(self, terminal=False):
|
|
465
|
+
server = _server()
|
|
466
|
+
if len(self._feb) > 0:
|
|
467
|
+
server.push_spec_part_block(
|
|
468
|
+
self._spec,
|
|
469
|
+
self._idx - len(self._feb),
|
|
470
|
+
[self._feb],
|
|
471
|
+
terminal=terminal,
|
|
472
|
+
compression=self._compression,
|
|
473
|
+
)
|
|
474
|
+
self._feb = vf._FrameExpressionBlock()
|
|
475
|
+
else:
|
|
476
|
+
server.push_spec_part_block(
|
|
477
|
+
self._spec,
|
|
478
|
+
self._idx - len(self._feb),
|
|
479
|
+
[],
|
|
480
|
+
terminal=terminal,
|
|
481
|
+
)
|
|
482
|
+
|
|
483
|
+
def spec(self):
|
|
484
|
+
return self._spec
|
|
485
|
+
|
|
486
|
+
def write(self, frame):
|
|
487
|
+
if frame is not None:
|
|
488
|
+
frame = frameify(frame, "frame")
|
|
489
|
+
if frame._fmt["width"] != self._spec._fmt["width"]:
|
|
490
|
+
raise Exception(
|
|
491
|
+
f"Frame type error; expected width {self._spec._fmt['width']}, got {frame._fmt['width']}"
|
|
492
|
+
)
|
|
493
|
+
if frame._fmt["height"] != self._spec._fmt["height"]:
|
|
494
|
+
raise Exception(
|
|
495
|
+
f"Frame type error; expected height {self._spec._fmt['height']}, got {frame._fmt['height']}"
|
|
496
|
+
)
|
|
497
|
+
if frame._fmt["pix_fmt"] != self._spec._fmt["pix_fmt"]:
|
|
498
|
+
f_obj = _filter_scale(frame._f, pix_fmt=self._spec._fmt["pix_fmt"])
|
|
499
|
+
frame = Frame(f_obj, self._spec._fmt)
|
|
500
|
+
self._feb.insert_frame(frame._f if frame is not None else None)
|
|
501
|
+
self._idx += 1
|
|
502
|
+
|
|
503
|
+
if len(self._feb) >= self._batch_size:
|
|
504
|
+
self._flush()
|
|
505
|
+
|
|
506
|
+
def release(self):
|
|
507
|
+
self._flush(True)
|
|
508
|
+
if self._path is not None:
|
|
509
|
+
server = _server()
|
|
510
|
+
server.export_spec(self._spec.id(), self._path)
|
|
511
|
+
|
|
512
|
+
|
|
513
|
+
class VideoWriter_fourcc:
|
|
514
|
+
def __init__(self, *args):
|
|
515
|
+
self._args = args
|
|
516
|
+
|
|
517
|
+
|
|
518
|
+
def frameify(obj, field_name=None):
|
|
519
|
+
"""
|
|
520
|
+
Turn an object (e.g., ndarray) into a Frame.
|
|
521
|
+
"""
|
|
522
|
+
|
|
523
|
+
if isinstance(obj, Frame):
|
|
524
|
+
return obj
|
|
525
|
+
elif isinstance(obj, np.ndarray):
|
|
526
|
+
return _inline_frame(obj)
|
|
527
|
+
else:
|
|
528
|
+
if field_name is not None:
|
|
529
|
+
raise Exception(
|
|
530
|
+
f"Unsupported type for field {field_name}, expected Frame or np.ndarray"
|
|
531
|
+
)
|
|
532
|
+
else:
|
|
533
|
+
raise Exception("Unsupported type, expected Frame or np.ndarray")
|
|
534
|
+
|
|
535
|
+
|
|
536
|
+
def imread(path, *args):
|
|
537
|
+
if len(args) > 0:
|
|
538
|
+
raise NotImplementedError("imread does not support additional arguments")
|
|
539
|
+
assert path.lower().endswith((".jpg", ".jpeg", ".png"))
|
|
540
|
+
server = _server()
|
|
541
|
+
|
|
542
|
+
cap = VideoCapture(path)
|
|
543
|
+
assert cap.isOpened()
|
|
544
|
+
assert len(cap._source) == 1
|
|
545
|
+
ret, frame = cap.read()
|
|
546
|
+
assert ret
|
|
547
|
+
cap.release()
|
|
548
|
+
return frame
|
|
549
|
+
|
|
550
|
+
|
|
551
|
+
def imwrite(path, img, *args):
|
|
552
|
+
if len(args) > 0:
|
|
553
|
+
raise NotImplementedError("imwrite does not support additional arguments")
|
|
554
|
+
|
|
555
|
+
img = frameify(img)
|
|
556
|
+
fmt = img._fmt.copy()
|
|
557
|
+
width = fmt["width"]
|
|
558
|
+
height = fmt["height"]
|
|
559
|
+
|
|
560
|
+
if path.lower().endswith(".png"):
|
|
561
|
+
out_pix_fmt = "rgb24"
|
|
562
|
+
encoder = "png"
|
|
563
|
+
elif path.lower().endswith((".jpg", ".jpeg")):
|
|
564
|
+
encoder = "mjpeg"
|
|
565
|
+
if img._fmt["pix_fmt"] not in ["yuvj420p", "yuvj422p", "yuvj444p"]:
|
|
566
|
+
out_pix_fmt = "yuvj420p"
|
|
567
|
+
else:
|
|
568
|
+
out_pix_fmt = img._fmt["pix_fmt"]
|
|
569
|
+
else:
|
|
570
|
+
raise Exception("Unsupported image format")
|
|
571
|
+
|
|
572
|
+
if img._fmt["pix_fmt"] != out_pix_fmt:
|
|
573
|
+
f = _filter_scale(img._f, pix_fmt=out_pix_fmt)
|
|
574
|
+
img = Frame(f, {"width": width, "height": height, "pix_fmt": out_pix_fmt})
|
|
575
|
+
|
|
576
|
+
writer = VideoWriter(None, None, 1, (width, height), pix_fmt=out_pix_fmt)
|
|
577
|
+
writer.write(img)
|
|
578
|
+
writer.release()
|
|
579
|
+
|
|
580
|
+
spec = writer.spec()
|
|
581
|
+
server = _server()
|
|
582
|
+
server.export_spec(spec.id(), path, encoder=encoder)
|
|
583
|
+
|
|
584
|
+
|
|
585
|
+
def vidplay(video, method="html"):
|
|
586
|
+
"""
|
|
587
|
+
Play a vidformer video specification.
|
|
588
|
+
"""
|
|
589
|
+
if isinstance(video, VideoWriter):
|
|
590
|
+
return video.spec().play(method=method)
|
|
591
|
+
elif isinstance(video, vf.Spec):
|
|
592
|
+
return video.play(method=method)
|
|
593
|
+
else:
|
|
594
|
+
raise Exception("Unsupported video type to vidplay")
|
|
595
|
+
|
|
596
|
+
|
|
597
|
+
def zeros(shape, dtype=np.uint8):
|
|
598
|
+
"""
|
|
599
|
+
Create a black frame. Mimics numpy.zeros.
|
|
600
|
+
"""
|
|
601
|
+
assert isinstance(shape, tuple) or isinstance(shape, list)
|
|
602
|
+
assert len(shape) == 3
|
|
603
|
+
assert shape[2] in [1, 3]
|
|
604
|
+
assert dtype == np.uint8
|
|
605
|
+
|
|
606
|
+
height, width, channels = shape
|
|
607
|
+
if channels == 1:
|
|
608
|
+
pix_fmt = "gray"
|
|
609
|
+
else:
|
|
610
|
+
pix_fmt = "rgb24"
|
|
611
|
+
|
|
612
|
+
f = _black(width=width, height=height, pix_fmt=pix_fmt)
|
|
613
|
+
fmt = {"width": width, "height": height, "pix_fmt": pix_fmt}
|
|
614
|
+
return Frame(f, fmt)
|
|
615
|
+
|
|
616
|
+
|
|
617
|
+
def resize(src, dsize):
|
|
618
|
+
src = frameify(src)
|
|
619
|
+
src._mut()
|
|
620
|
+
|
|
621
|
+
assert isinstance(dsize, tuple) or isinstance(dsize, list)
|
|
622
|
+
assert len(dsize) == 2
|
|
623
|
+
width, height = dsize
|
|
624
|
+
|
|
625
|
+
f = _filter_scale(src._f, width=width, height=height)
|
|
626
|
+
fmt = {"width": width, "height": height, "pix_fmt": src._fmt["pix_fmt"]}
|
|
627
|
+
return Frame(f, fmt)
|
|
628
|
+
|
|
629
|
+
|
|
630
|
+
def rectangle(img, pt1, pt2, color, thickness=None, lineType=None, shift=None):
|
|
631
|
+
"""
|
|
632
|
+
cv.rectangle( img, pt1, pt2, color[, thickness[, lineType[, shift]]] )
|
|
633
|
+
"""
|
|
634
|
+
|
|
635
|
+
img = frameify(img)
|
|
636
|
+
img._mut()
|
|
637
|
+
|
|
638
|
+
assert len(pt1) == 2
|
|
639
|
+
assert len(pt2) == 2
|
|
640
|
+
pt1 = [int(x) for x in pt1]
|
|
641
|
+
pt2 = [int(x) for x in pt2]
|
|
642
|
+
|
|
643
|
+
assert len(color) == 3 or len(color) == 4
|
|
644
|
+
color = [float(x) for x in color]
|
|
645
|
+
if len(color) == 3:
|
|
646
|
+
color.append(255.0)
|
|
647
|
+
|
|
648
|
+
args = []
|
|
649
|
+
if thickness is not None:
|
|
650
|
+
assert isinstance(thickness, int)
|
|
651
|
+
args.append(thickness)
|
|
652
|
+
if lineType is not None:
|
|
653
|
+
assert isinstance(lineType, int)
|
|
654
|
+
assert thickness is not None
|
|
655
|
+
args.append(lineType)
|
|
656
|
+
if shift is not None:
|
|
657
|
+
assert isinstance(shift, int)
|
|
658
|
+
assert shift is not None
|
|
659
|
+
args.append(shift)
|
|
660
|
+
|
|
661
|
+
img._f = _filter_rectangle(img._f, pt1, pt2, color, *args)
|
|
662
|
+
|
|
663
|
+
|
|
664
|
+
def putText(
|
|
665
|
+
img,
|
|
666
|
+
text,
|
|
667
|
+
org,
|
|
668
|
+
fontFace,
|
|
669
|
+
fontScale,
|
|
670
|
+
color,
|
|
671
|
+
thickness=None,
|
|
672
|
+
lineType=None,
|
|
673
|
+
bottomLeftOrigin=None,
|
|
674
|
+
):
|
|
675
|
+
"""
|
|
676
|
+
cv.putText( img, text, org, fontFace, fontScale, color[, thickness[, lineType[, bottomLeftOrigin]]] )
|
|
677
|
+
"""
|
|
678
|
+
|
|
679
|
+
img = frameify(img)
|
|
680
|
+
img._mut()
|
|
681
|
+
|
|
682
|
+
assert isinstance(text, str)
|
|
683
|
+
|
|
684
|
+
assert len(org) == 2
|
|
685
|
+
org = [int(x) for x in org]
|
|
686
|
+
|
|
687
|
+
assert isinstance(fontFace, int)
|
|
688
|
+
assert isinstance(fontScale, float) or isinstance(fontScale, int)
|
|
689
|
+
fontScale = float(fontScale)
|
|
690
|
+
|
|
691
|
+
assert len(color) == 3 or len(color) == 4
|
|
692
|
+
color = [float(x) for x in color]
|
|
693
|
+
if len(color) == 3:
|
|
694
|
+
color.append(255.0)
|
|
695
|
+
|
|
696
|
+
args = []
|
|
697
|
+
if thickness is not None:
|
|
698
|
+
assert isinstance(thickness, int)
|
|
699
|
+
args.append(thickness)
|
|
700
|
+
if lineType is not None:
|
|
701
|
+
assert isinstance(lineType, int)
|
|
702
|
+
assert thickness is not None
|
|
703
|
+
args.append(lineType)
|
|
704
|
+
if bottomLeftOrigin is not None:
|
|
705
|
+
assert isinstance(bottomLeftOrigin, bool)
|
|
706
|
+
assert lineType is not None
|
|
707
|
+
args.append(bottomLeftOrigin)
|
|
708
|
+
|
|
709
|
+
img._f = _filter_putText(img._f, text, org, fontFace, fontScale, color, *args)
|
|
710
|
+
|
|
711
|
+
|
|
712
|
+
def arrowedLine(
|
|
713
|
+
img, pt1, pt2, color, thickness=None, line_type=None, shift=None, tipLength=None
|
|
714
|
+
):
|
|
715
|
+
"""
|
|
716
|
+
cv.arrowedLine( img, pt1, pt2, color[, thickness[, line_type[, shift[, tipLength]]]] )
|
|
717
|
+
"""
|
|
718
|
+
img = frameify(img)
|
|
719
|
+
img._mut()
|
|
720
|
+
|
|
721
|
+
assert len(pt1) == 2
|
|
722
|
+
assert len(pt2) == 2
|
|
723
|
+
assert all(isinstance(x, int) for x in pt1)
|
|
724
|
+
assert all(isinstance(x, int) for x in pt2)
|
|
725
|
+
|
|
726
|
+
assert len(color) == 3 or len(color) == 4
|
|
727
|
+
color = [float(x) for x in color]
|
|
728
|
+
if len(color) == 3:
|
|
729
|
+
color.append(255.0)
|
|
730
|
+
|
|
731
|
+
args = []
|
|
732
|
+
if thickness is not None:
|
|
733
|
+
assert isinstance(thickness, int)
|
|
734
|
+
args.append(thickness)
|
|
735
|
+
if line_type is not None:
|
|
736
|
+
assert isinstance(line_type, int)
|
|
737
|
+
assert thickness is not None
|
|
738
|
+
args.append(line_type)
|
|
739
|
+
if shift is not None:
|
|
740
|
+
assert isinstance(shift, int)
|
|
741
|
+
assert shift is not None
|
|
742
|
+
args.append(shift)
|
|
743
|
+
if tipLength is not None:
|
|
744
|
+
assert isinstance(tipLength, float)
|
|
745
|
+
assert shift is not None
|
|
746
|
+
args.append(tipLength)
|
|
747
|
+
|
|
748
|
+
img._f = _filter_arrowedLine(img._f, pt1, pt2, color, *args)
|
|
749
|
+
|
|
750
|
+
|
|
751
|
+
def line(img, pt1, pt2, color, thickness=None, lineType=None, shift=None):
|
|
752
|
+
img = frameify(img)
|
|
753
|
+
img._mut()
|
|
754
|
+
|
|
755
|
+
assert len(pt1) == 2
|
|
756
|
+
assert len(pt2) == 2
|
|
757
|
+
pt1 = [int(x) for x in pt1]
|
|
758
|
+
pt2 = [int(x) for x in pt2]
|
|
759
|
+
|
|
760
|
+
assert len(color) == 3 or len(color) == 4
|
|
761
|
+
color = [float(x) for x in color]
|
|
762
|
+
if len(color) == 3:
|
|
763
|
+
color.append(255.0)
|
|
764
|
+
|
|
765
|
+
args = []
|
|
766
|
+
if thickness is not None:
|
|
767
|
+
assert isinstance(thickness, int)
|
|
768
|
+
args.append(thickness)
|
|
769
|
+
if lineType is not None:
|
|
770
|
+
assert isinstance(lineType, int)
|
|
771
|
+
assert thickness is not None
|
|
772
|
+
args.append(lineType)
|
|
773
|
+
if shift is not None:
|
|
774
|
+
assert isinstance(shift, int)
|
|
775
|
+
assert shift is not None
|
|
776
|
+
args.append(shift)
|
|
777
|
+
|
|
778
|
+
img._f = _filter_line(img._f, pt1, pt2, color, *args)
|
|
779
|
+
|
|
780
|
+
|
|
781
|
+
def circle(img, center, radius, color, thickness=None, lineType=None, shift=None):
|
|
782
|
+
img = frameify(img)
|
|
783
|
+
img._mut()
|
|
784
|
+
|
|
785
|
+
assert len(center) == 2
|
|
786
|
+
center = [int(x) for x in center]
|
|
787
|
+
|
|
788
|
+
assert isinstance(radius, int)
|
|
789
|
+
|
|
790
|
+
assert len(color) == 3 or len(color) == 4
|
|
791
|
+
color = [float(x) for x in color]
|
|
792
|
+
if len(color) == 3:
|
|
793
|
+
color.append(255.0)
|
|
794
|
+
|
|
795
|
+
args = []
|
|
796
|
+
if thickness is not None:
|
|
797
|
+
assert isinstance(thickness, int)
|
|
798
|
+
args.append(thickness)
|
|
799
|
+
if lineType is not None:
|
|
800
|
+
assert isinstance(lineType, int)
|
|
801
|
+
assert thickness is not None
|
|
802
|
+
args.append(lineType)
|
|
803
|
+
if shift is not None:
|
|
804
|
+
assert isinstance(shift, int)
|
|
805
|
+
assert shift is not None
|
|
806
|
+
args.append(shift)
|
|
807
|
+
|
|
808
|
+
img._f = _filter_circle(img._f, center, radius, color, *args)
|
|
809
|
+
|
|
810
|
+
|
|
811
|
+
def getFontScaleFromHeight(*args, **kwargs):
|
|
812
|
+
"""
|
|
813
|
+
cv.getFontScaleFromHeight( fontFace, pixelHeight[, thickness] )
|
|
814
|
+
"""
|
|
815
|
+
_check_opencv2("getFontScaleFromHeight")
|
|
816
|
+
return _opencv2.getFontScaleFromHeight(*args, **kwargs)
|
|
817
|
+
|
|
818
|
+
|
|
819
|
+
def getTextSize(*args, **kwargs):
|
|
820
|
+
"""
|
|
821
|
+
cv.getTextSize( text, fontFace, fontScale, thickness )
|
|
822
|
+
"""
|
|
823
|
+
_check_opencv2("getTextSize")
|
|
824
|
+
return _opencv2.getTextSize(*args, **kwargs)
|
|
825
|
+
|
|
826
|
+
|
|
827
|
+
def addWeighted(src1, alpha, src2, beta, gamma, dst=None, dtype=-1):
|
|
828
|
+
"""
|
|
829
|
+
cv.addWeighted( src1, alpha, src2, beta, gamma[, dst[, dtype]] ) -> dst
|
|
830
|
+
"""
|
|
831
|
+
src1 = frameify(src1, "src1")
|
|
832
|
+
src2 = frameify(src2, "src2")
|
|
833
|
+
src1._mut()
|
|
834
|
+
src2._mut()
|
|
835
|
+
|
|
836
|
+
if dst is None:
|
|
837
|
+
dst = Frame(src1._f, src1._fmt.copy())
|
|
838
|
+
else:
|
|
839
|
+
assert isinstance(dst, Frame), "dst must be a Frame"
|
|
840
|
+
dst._mut()
|
|
841
|
+
|
|
842
|
+
assert isinstance(alpha, float) or isinstance(alpha, int)
|
|
843
|
+
assert isinstance(beta, float) or isinstance(beta, int)
|
|
844
|
+
assert isinstance(gamma, float) or isinstance(gamma, int)
|
|
845
|
+
alpha = float(alpha)
|
|
846
|
+
beta = float(beta)
|
|
847
|
+
gamma = float(gamma)
|
|
848
|
+
|
|
849
|
+
if dtype != -1:
|
|
850
|
+
raise Exception("addWeighted does not support the dtype argument")
|
|
851
|
+
|
|
852
|
+
dst._f = _filter_addWeighted(src1._f, alpha, src2._f, beta, gamma)
|
|
853
|
+
return dst
|
|
854
|
+
|
|
855
|
+
|
|
856
|
+
def ellipse(
|
|
857
|
+
img,
|
|
858
|
+
center,
|
|
859
|
+
axes,
|
|
860
|
+
angle,
|
|
861
|
+
startAngle,
|
|
862
|
+
endAngle,
|
|
863
|
+
color,
|
|
864
|
+
thickness=1,
|
|
865
|
+
lineType=LINE_8,
|
|
866
|
+
shift=0,
|
|
867
|
+
):
|
|
868
|
+
img = frameify(img)
|
|
869
|
+
img._mut()
|
|
870
|
+
|
|
871
|
+
assert len(center) == 2
|
|
872
|
+
center = [int(x) for x in center]
|
|
873
|
+
|
|
874
|
+
assert len(axes) == 2
|
|
875
|
+
axes = [int(x) for x in axes]
|
|
876
|
+
|
|
877
|
+
assert isinstance(angle, float) or isinstance(angle, int)
|
|
878
|
+
assert isinstance(startAngle, float) or isinstance(startAngle, int)
|
|
879
|
+
assert isinstance(endAngle, float) or isinstance(endAngle, int)
|
|
880
|
+
angle = float(angle)
|
|
881
|
+
startAngle = float(startAngle)
|
|
882
|
+
endAngle = float(endAngle)
|
|
883
|
+
|
|
884
|
+
assert len(color) == 3 or len(color) == 4
|
|
885
|
+
color = [float(x) for x in color]
|
|
886
|
+
if len(color) == 3:
|
|
887
|
+
color.append(255.0)
|
|
888
|
+
|
|
889
|
+
assert isinstance(thickness, int)
|
|
890
|
+
assert isinstance(lineType, int)
|
|
891
|
+
assert isinstance(shift, int)
|
|
892
|
+
|
|
893
|
+
img._f = _filter_ellipse(
|
|
894
|
+
img._f,
|
|
895
|
+
center,
|
|
896
|
+
axes,
|
|
897
|
+
angle,
|
|
898
|
+
startAngle,
|
|
899
|
+
endAngle,
|
|
900
|
+
color,
|
|
901
|
+
thickness,
|
|
902
|
+
lineType,
|
|
903
|
+
shift,
|
|
904
|
+
)
|
|
905
|
+
|
|
906
|
+
|
|
907
|
+
# Stubs for unimplemented functions
|
|
908
|
+
|
|
909
|
+
|
|
910
|
+
def clipLine(*args, **kwargs):
|
|
911
|
+
raise NotImplementedError("clipLine is not yet implemented in the cv2 frontend")
|
|
912
|
+
|
|
913
|
+
|
|
914
|
+
def drawContours(*args, **kwargs):
|
|
915
|
+
raise NotImplementedError("drawContours is not yet implemented in the cv2 frontend")
|
|
916
|
+
|
|
917
|
+
|
|
918
|
+
def drawMarker(*args, **kwargs):
|
|
919
|
+
raise NotImplementedError("drawMarker is not yet implemented in the cv2 frontend")
|
|
920
|
+
|
|
921
|
+
|
|
922
|
+
def ellipse2Poly(*args, **kwargs):
|
|
923
|
+
raise NotImplementedError("ellipse2Poly is not yet implemented in the cv2 frontend")
|
|
924
|
+
|
|
925
|
+
|
|
926
|
+
def fillConvexPoly(*args, **kwargs):
|
|
927
|
+
raise NotImplementedError(
|
|
928
|
+
"fillConvexPoly is not yet implemented in the cv2 frontend"
|
|
929
|
+
)
|
|
930
|
+
|
|
931
|
+
|
|
932
|
+
def fillPoly(*args, **kwargs):
|
|
933
|
+
raise NotImplementedError("fillPoly is not yet implemented in the cv2 frontend")
|
|
934
|
+
|
|
935
|
+
|
|
936
|
+
def polylines(*args, **kwargs):
|
|
937
|
+
raise NotImplementedError("polylines is not yet implemented in the cv2 frontend")
|