vidformer 0.6.0__tar.gz → 0.6.2__tar.gz
Sign up to get free protection for your applications and to get access to all the features.
- vidformer-0.6.2/PKG-INFO +36 -0
- vidformer-0.6.2/README.md +19 -0
- vidformer-0.6.2/cv2-functions.md +59 -0
- {vidformer-0.6.0 → vidformer-0.6.2}/pyproject.toml +2 -1
- {vidformer-0.6.0 → vidformer-0.6.2}/vidformer/__init__.py +1 -1
- vidformer-0.6.2/vidformer/cv2/vf_cv2.py +538 -0
- {vidformer-0.6.0 → vidformer-0.6.2}/vidformer/vf.py +58 -11
- vidformer-0.6.0/PKG-INFO +0 -24
- vidformer-0.6.0/README.md +0 -8
- vidformer-0.6.0/vidformer/cv2/vf_cv2.py +0 -236
- {vidformer-0.6.0 → vidformer-0.6.2}/vidformer/cv2/__init__.py +0 -0
vidformer-0.6.2/PKG-INFO
ADDED
@@ -0,0 +1,36 @@
|
|
1
|
+
Metadata-Version: 2.1
|
2
|
+
Name: vidformer
|
3
|
+
Version: 0.6.2
|
4
|
+
Summary: A Python library for creating and viewing videos with vidformer.
|
5
|
+
Author-email: Dominik Winecki <dominikwinecki@gmail.com>
|
6
|
+
Requires-Python: >=3.8
|
7
|
+
Description-Content-Type: text/markdown
|
8
|
+
Classifier: Programming Language :: Python :: 3
|
9
|
+
Classifier: Operating System :: OS Independent
|
10
|
+
Requires-Dist: requests
|
11
|
+
Requires-Dist: msgpack
|
12
|
+
Requires-Dist: numpy
|
13
|
+
Project-URL: Documentation, https://ixlab.github.io/vidformer/vidformer-py/
|
14
|
+
Project-URL: Homepage, https://ixlab.github.io/vidformer/
|
15
|
+
Project-URL: Issues, https://github.com/ixlab/vidformer/issues
|
16
|
+
|
17
|
+
# vidformer-py
|
18
|
+
|
19
|
+
[![PyPI version](https://img.shields.io/pypi/v/vidformer.svg)](https://pypi.org/project/vidformer/)
|
20
|
+
[![License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](https://github.com/ixlab/vidformer/blob/main/LICENSE)
|
21
|
+
|
22
|
+
vidformer-py is a Python 🐍 interface for [vidformer](https://github.com/ixlab/vidformer).
|
23
|
+
Our [getting started guide](https://ixlab.github.io/vidformer/getting-started.html) explains how to use it.
|
24
|
+
|
25
|
+
**Quick links:**
|
26
|
+
* [📦 PyPI](https://pypi.org/project/vidformer/)
|
27
|
+
* [📘 Documentation - vidformer-py](https://ixlab.github.io/vidformer/vidformer-py/)
|
28
|
+
* [📘 Documentation - vidformer.cv2](https://ixlab.github.io/vidformer/vidformer-py-cv2/)
|
29
|
+
* [🧑💻 Source Code](https://github.com/ixlab/vidformer/tree/main/vidformer-py/)
|
30
|
+
|
31
|
+
**Publish:**
|
32
|
+
```bash
|
33
|
+
export FLIT_USERNAME='__token__' FLIT_PASSWORD='<token>'
|
34
|
+
flit publish
|
35
|
+
```
|
36
|
+
|
@@ -0,0 +1,19 @@
|
|
1
|
+
# vidformer-py
|
2
|
+
|
3
|
+
[![PyPI version](https://img.shields.io/pypi/v/vidformer.svg)](https://pypi.org/project/vidformer/)
|
4
|
+
[![License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](https://github.com/ixlab/vidformer/blob/main/LICENSE)
|
5
|
+
|
6
|
+
vidformer-py is a Python 🐍 interface for [vidformer](https://github.com/ixlab/vidformer).
|
7
|
+
Our [getting started guide](https://ixlab.github.io/vidformer/getting-started.html) explains how to use it.
|
8
|
+
|
9
|
+
**Quick links:**
|
10
|
+
* [📦 PyPI](https://pypi.org/project/vidformer/)
|
11
|
+
* [📘 Documentation - vidformer-py](https://ixlab.github.io/vidformer/vidformer-py/)
|
12
|
+
* [📘 Documentation - vidformer.cv2](https://ixlab.github.io/vidformer/vidformer-py-cv2/)
|
13
|
+
* [🧑💻 Source Code](https://github.com/ixlab/vidformer/tree/main/vidformer-py/)
|
14
|
+
|
15
|
+
**Publish:**
|
16
|
+
```bash
|
17
|
+
export FLIT_USERNAME='__token__' FLIT_PASSWORD='<token>'
|
18
|
+
flit publish
|
19
|
+
```
|
@@ -0,0 +1,59 @@
|
|
1
|
+
# OpenCV/cv2 Functions
|
2
|
+
|
3
|
+
See [vidformer.cv2 API docs](https://ixlab.github.io/vidformer/vidformer-py-cv2/).
|
4
|
+
|
5
|
+
> ⚠️ The `cv2` module is a work in progress. If you find a bug or need a missing feature implemented feel free to [file an issue](https://github.com/ixlab/vidformer/issues) or contribute yourself!
|
6
|
+
|
7
|
+
Legend:
|
8
|
+
* ✅ - Support
|
9
|
+
* 🔸 - Support via OpenCV cv2
|
10
|
+
* ❌ - Not yet implemented
|
11
|
+
|
12
|
+
## Vidformer-specific Functions
|
13
|
+
|
14
|
+
* `cv2.vidplay(video2)` - Play a VideoWriter, Spec, or Source
|
15
|
+
* `VideoWriter.spec()` - Return the Spec of an output video
|
16
|
+
* `Frame.numpy()` - Return the frame as a numpy array
|
17
|
+
* `cv2.setTo` - The OpenCV `Mat.setTo` function (not in cv2)
|
18
|
+
|
19
|
+
## opencv
|
20
|
+
|
21
|
+
|**Class**|**Status**|
|
22
|
+
|---|---|
|
23
|
+
|VideoCapture|✅|
|
24
|
+
|VideoWriter|✅|
|
25
|
+
|VideoWriter_fourcc|✅|
|
26
|
+
|
27
|
+
|**Function**|**Status**|
|
28
|
+
|---|---|
|
29
|
+
|imread|✅|
|
30
|
+
|imwrite|✅|
|
31
|
+
|
32
|
+
|
33
|
+
## opencv.imgproc
|
34
|
+
|
35
|
+
Drawing Functions:
|
36
|
+
|
37
|
+
|**Function**|**Status**|
|
38
|
+
|---|---|
|
39
|
+
|arrowedLine|✅|
|
40
|
+
|circle|✅|
|
41
|
+
|clipLine|❌|
|
42
|
+
|drawContours|❌|
|
43
|
+
|drawMarker|❌|
|
44
|
+
|ellipse|❌|
|
45
|
+
|ellipse2Poly|❌|
|
46
|
+
|fillConvexPoly|❌|
|
47
|
+
|fillPoly|❌|
|
48
|
+
|getFontScaleFromHeight|🔸|
|
49
|
+
|getTextSize|🔸|
|
50
|
+
|line|✅|
|
51
|
+
|polylines|❌|
|
52
|
+
|putText|✅|
|
53
|
+
|rectangle|✅|
|
54
|
+
|
55
|
+
## opencv.core
|
56
|
+
|
57
|
+
|**Function**|**Status**|
|
58
|
+
|---|---|
|
59
|
+
|addWeighted|✅|
|
@@ -18,7 +18,8 @@ dependencies = [
|
|
18
18
|
|
19
19
|
[project.urls]
|
20
20
|
Homepage = "https://ixlab.github.io/vidformer/"
|
21
|
-
|
21
|
+
Documentation = "https://ixlab.github.io/vidformer/vidformer-py/"
|
22
|
+
Issues = "https://github.com/ixlab/vidformer/issues"
|
22
23
|
|
23
24
|
|
24
25
|
[build-system]
|
@@ -0,0 +1,538 @@
|
|
1
|
+
"""
|
2
|
+
vidformer.cv2 is the cv2 frontend for [vidformer](https://github.com/ixlab/vidformer).
|
3
|
+
|
4
|
+
> ⚠️ This module is a work in progress. See the [implemented functions list](https://ixlab.github.io/vidformer/opencv-filters.html).
|
5
|
+
|
6
|
+
**Quick links:**
|
7
|
+
* [📦 PyPI](https://pypi.org/project/vidformer/)
|
8
|
+
* [📘 Documentation - vidformer-py](https://ixlab.github.io/vidformer/vidformer-py/)
|
9
|
+
* [📘 Documentation - vidformer.cv2](https://ixlab.github.io/vidformer/vidformer-py-cv2/)
|
10
|
+
* [🧑💻 Source Code](https://github.com/ixlab/vidformer/tree/main/vidformer-py/)
|
11
|
+
"""
|
12
|
+
|
13
|
+
from .. import vf
|
14
|
+
|
15
|
+
try:
|
16
|
+
import cv2 as _opencv2
|
17
|
+
except:
|
18
|
+
_opencv2 = None
|
19
|
+
|
20
|
+
import numpy as np
|
21
|
+
|
22
|
+
import uuid
|
23
|
+
from fractions import Fraction
|
24
|
+
from bisect import bisect_right
|
25
|
+
|
26
|
+
CAP_PROP_POS_MSEC = 0
|
27
|
+
CAP_PROP_POS_FRAMES = 1
|
28
|
+
CAP_PROP_FRAME_WIDTH = 3
|
29
|
+
CAP_PROP_FRAME_HEIGHT = 4
|
30
|
+
CAP_PROP_FPS = 5
|
31
|
+
CAP_PROP_FRAME_COUNT = 7
|
32
|
+
|
33
|
+
FONT_HERSHEY_SIMPLEX = 0
|
34
|
+
FONT_HERSHEY_PLAIN = 1
|
35
|
+
FONT_HERSHEY_DUPLEX = 2
|
36
|
+
FONT_HERSHEY_COMPLEX = 3
|
37
|
+
FONT_HERSHEY_TRIPLEX = 4
|
38
|
+
FONT_HERSHEY_COMPLEX_SMALL = 5
|
39
|
+
FONT_HERSHEY_SCRIPT_SIMPLEX = 6
|
40
|
+
FONT_HERSHEY_SCRIPT_COMPLEX = 7
|
41
|
+
FONT_ITALIC = 16
|
42
|
+
|
43
|
+
FILLED = -1
|
44
|
+
LINE_4 = 4
|
45
|
+
LINE_8 = 8
|
46
|
+
LINE_AA = 16
|
47
|
+
|
48
|
+
_filter_scale = vf.Filter("Scale")
|
49
|
+
_filter_rectangle = vf.Filter("cv2.rectangle")
|
50
|
+
_filter_putText = vf.Filter("cv2.putText")
|
51
|
+
_filter_arrowedLine = vf.Filter("cv2.arrowedLine")
|
52
|
+
_filter_line = vf.Filter("cv2.line")
|
53
|
+
_filter_circle = vf.Filter("cv2.circle")
|
54
|
+
_filter_addWeighted = vf.Filter("cv2.addWeighted")
|
55
|
+
|
56
|
+
|
57
|
+
def _ts_to_fps(timestamps):
|
58
|
+
return int(1 / (timestamps[1] - timestamps[0])) # TODO: Fix for non-integer fps
|
59
|
+
|
60
|
+
|
61
|
+
def _fps_to_ts(fps, n_frames):
|
62
|
+
assert type(fps) == int
|
63
|
+
return [Fraction(i, fps) for i in range(n_frames)]
|
64
|
+
|
65
|
+
|
66
|
+
_global_cv2_server = None
|
67
|
+
|
68
|
+
|
69
|
+
def _server():
|
70
|
+
global _global_cv2_server
|
71
|
+
if _global_cv2_server is None:
|
72
|
+
_global_cv2_server = vf.YrdenServer()
|
73
|
+
return _global_cv2_server
|
74
|
+
|
75
|
+
|
76
|
+
def set_cv2_server(server: vf.YrdenServer):
|
77
|
+
"""Set the server to use for the cv2 frontend."""
|
78
|
+
global _global_cv2_server
|
79
|
+
assert isinstance(server, vf.YrdenServer)
|
80
|
+
_global_cv2_server = server
|
81
|
+
|
82
|
+
|
83
|
+
class Frame:
|
84
|
+
def __init__(self, f, fmt):
|
85
|
+
self._f = f
|
86
|
+
self._fmt = fmt
|
87
|
+
self.shape = (fmt["height"], fmt["width"], 3)
|
88
|
+
|
89
|
+
# denotes that the frame has not yet been modified
|
90
|
+
# when a frame is modified, it is converted to rgb24 first
|
91
|
+
self._modified = False
|
92
|
+
|
93
|
+
def _mut(self):
|
94
|
+
self._modified = True
|
95
|
+
self._f = _filter_scale(self._f, pix_fmt="rgb24")
|
96
|
+
self._fmt["pix_fmt"] = "rgb24"
|
97
|
+
|
98
|
+
def numpy(self):
|
99
|
+
"""
|
100
|
+
Return the frame as a numpy array.
|
101
|
+
"""
|
102
|
+
|
103
|
+
self._mut()
|
104
|
+
spec = vf.Spec([Fraction(0, 1)], lambda t, i: self._f, self._fmt)
|
105
|
+
loader = spec.load(_server())
|
106
|
+
|
107
|
+
frame_raster_rgb24 = loader[0]
|
108
|
+
assert type(frame_raster_rgb24) == bytes
|
109
|
+
assert len(frame_raster_rgb24) == self.shape[0] * self.shape[1] * 3
|
110
|
+
raw_data_array = np.frombuffer(frame_raster_rgb24, dtype=np.uint8)
|
111
|
+
frame = raw_data_array.reshape(self.shape)
|
112
|
+
return frame
|
113
|
+
|
114
|
+
|
115
|
+
class VideoCapture:
|
116
|
+
def __init__(self, path):
|
117
|
+
self._path = path
|
118
|
+
server = _server()
|
119
|
+
self._source = vf.Source(server, str(uuid.uuid4()), path, 0)
|
120
|
+
self._next_frame_idx = 0
|
121
|
+
|
122
|
+
def isOpened(self):
|
123
|
+
return True
|
124
|
+
|
125
|
+
def get(self, prop):
|
126
|
+
if prop == CAP_PROP_FPS:
|
127
|
+
return _ts_to_fps(self._source.ts())
|
128
|
+
elif prop == CAP_PROP_FRAME_WIDTH:
|
129
|
+
return self._source.fmt()["width"]
|
130
|
+
elif prop == CAP_PROP_FRAME_HEIGHT:
|
131
|
+
return self._source.fmt()["height"]
|
132
|
+
elif prop == CAP_PROP_FRAME_COUNT:
|
133
|
+
return len(self._source.ts())
|
134
|
+
elif prop == CAP_PROP_POS_FRAMES:
|
135
|
+
return self._next_frame_idx
|
136
|
+
|
137
|
+
raise Exception(f"Unknown property {prop}")
|
138
|
+
|
139
|
+
def set(self, prop, value):
|
140
|
+
if prop == CAP_PROP_POS_FRAMES:
|
141
|
+
assert value >= 0 and value < len(self._source.ts())
|
142
|
+
self._next_frame_idx = value
|
143
|
+
elif prop == CAP_PROP_POS_MSEC:
|
144
|
+
t = Fraction(value, 1000)
|
145
|
+
ts = self._source.ts()
|
146
|
+
next_frame_idx = bisect_right(ts, t)
|
147
|
+
self._next_frame_idx = next_frame_idx
|
148
|
+
else:
|
149
|
+
raise Exception(f"Unsupported property {prop}")
|
150
|
+
|
151
|
+
def read(self):
|
152
|
+
if self._next_frame_idx >= len(self._source.ts()):
|
153
|
+
return False, None
|
154
|
+
frame = self._source.iloc[self._next_frame_idx]
|
155
|
+
self._next_frame_idx += 1
|
156
|
+
frame = Frame(frame, self._source.fmt())
|
157
|
+
return True, frame
|
158
|
+
|
159
|
+
def release(self):
|
160
|
+
pass
|
161
|
+
|
162
|
+
|
163
|
+
class VideoWriter:
|
164
|
+
def __init__(self, path, fourcc, fps, size):
|
165
|
+
assert isinstance(fourcc, VideoWriter_fourcc)
|
166
|
+
if path is not None and not isinstance(path, str):
|
167
|
+
raise Exception("path must be a string or None")
|
168
|
+
self._path = path
|
169
|
+
self._fourcc = fourcc
|
170
|
+
self._fps = fps
|
171
|
+
self._size = size
|
172
|
+
|
173
|
+
self._frames = []
|
174
|
+
self._pix_fmt = "yuv420p"
|
175
|
+
|
176
|
+
def write(self, frame):
|
177
|
+
if not isinstance(frame, Frame):
|
178
|
+
raise Exception("frame must be a vidformer.cv2.Frame object")
|
179
|
+
if frame._modified:
|
180
|
+
f_obj = _filter_scale(frame._f, pix_fmt=self._pix_fmt)
|
181
|
+
self._frames.append(f_obj)
|
182
|
+
else:
|
183
|
+
self._frames.append(frame._f)
|
184
|
+
|
185
|
+
def release(self):
|
186
|
+
if self._path is None:
|
187
|
+
return
|
188
|
+
|
189
|
+
spec = self.spec()
|
190
|
+
server = _server()
|
191
|
+
spec.save(server, self._path)
|
192
|
+
|
193
|
+
def spec(self) -> vf.Spec:
|
194
|
+
fmt = {
|
195
|
+
"width": self._size[0],
|
196
|
+
"height": self._size[1],
|
197
|
+
"pix_fmt": self._pix_fmt,
|
198
|
+
}
|
199
|
+
domain = _fps_to_ts(self._fps, len(self._frames))
|
200
|
+
spec = vf.Spec(domain, lambda t, i: self._frames[i], fmt)
|
201
|
+
return spec
|
202
|
+
|
203
|
+
|
204
|
+
class VideoWriter_fourcc:
|
205
|
+
def __init__(self, *args):
|
206
|
+
self._args = args
|
207
|
+
|
208
|
+
|
209
|
+
def imread(path, *args):
|
210
|
+
if len(args) > 0:
|
211
|
+
raise NotImplementedError("imread does not support additional arguments")
|
212
|
+
|
213
|
+
assert path.lower().endswith((".jpg", ".jpeg", ".png"))
|
214
|
+
server = _server()
|
215
|
+
source = vf.Source(server, str(uuid.uuid4()), path, 0)
|
216
|
+
frame = Frame(source.iloc[0], source.fmt())
|
217
|
+
return frame
|
218
|
+
|
219
|
+
|
220
|
+
def imwrite(path, img, *args):
|
221
|
+
if len(args) > 0:
|
222
|
+
raise NotImplementedError("imwrite does not support additional arguments")
|
223
|
+
|
224
|
+
if not isinstance(img, Frame):
|
225
|
+
raise Exception("img must be a vidformer.cv2.Frame object")
|
226
|
+
|
227
|
+
fmt = img._fmt.copy()
|
228
|
+
width = fmt["width"]
|
229
|
+
height = fmt["height"]
|
230
|
+
f = img._f
|
231
|
+
|
232
|
+
domain = [Fraction(0, 1)]
|
233
|
+
|
234
|
+
if path.lower().endswith(".png"):
|
235
|
+
img._mut() # Make sure it's in rgb24
|
236
|
+
spec = vf.Spec(
|
237
|
+
domain,
|
238
|
+
lambda t, i: img._f,
|
239
|
+
{"width": width, "height": height, "pix_fmt": "rgb24"},
|
240
|
+
)
|
241
|
+
spec.save(_server(), path, encoder="png")
|
242
|
+
elif path.lower().endswith((".jpg", ".jpeg")):
|
243
|
+
if img._modified:
|
244
|
+
# it's rgb24, we need to convert to something jpeg can handle
|
245
|
+
f = _filter_scale(img._f, pix_fmt="yuv420p")
|
246
|
+
fmt["pix_fmt"] = "yuv420p"
|
247
|
+
else:
|
248
|
+
if fmt["pix_fmt"] not in ["yuvj420p", "yuvj422p", "yuvj444p"]:
|
249
|
+
f = _filter_scale(img._f, pix_fmt="yuvj420p")
|
250
|
+
fmt["pix_fmt"] = "yuvj420p"
|
251
|
+
|
252
|
+
spec = vf.Spec(domain, lambda t, i: f, fmt)
|
253
|
+
spec.save(_server(), path, encoder="mjpeg")
|
254
|
+
else:
|
255
|
+
raise Exception("Unsupported image format")
|
256
|
+
|
257
|
+
|
258
|
+
def vidplay(video, *args, **kwargs):
|
259
|
+
"""
|
260
|
+
Play a vidformer video specification.
|
261
|
+
|
262
|
+
Args:
|
263
|
+
video: one of [vidformer.Spec, vidformer.Source, vidformer.cv2.VideoWriter]
|
264
|
+
"""
|
265
|
+
|
266
|
+
if isinstance(video, vf.Spec):
|
267
|
+
return video.play(_server(), *args, **kwargs)
|
268
|
+
elif isinstance(video, vf.Source):
|
269
|
+
return video.play(_server(), *args, **kwargs)
|
270
|
+
elif isinstance(video, VideoWriter):
|
271
|
+
return video.spec().play(_server(), *args, **kwargs)
|
272
|
+
else:
|
273
|
+
raise Exception("Unsupported video type to vidplay")
|
274
|
+
|
275
|
+
|
276
|
+
def rectangle(img, pt1, pt2, color, thickness=None, lineType=None, shift=None):
|
277
|
+
"""
|
278
|
+
cv.rectangle( img, pt1, pt2, color[, thickness[, lineType[, shift]]] )
|
279
|
+
"""
|
280
|
+
|
281
|
+
assert isinstance(img, Frame)
|
282
|
+
img._mut()
|
283
|
+
|
284
|
+
assert len(pt1) == 2
|
285
|
+
assert len(pt2) == 2
|
286
|
+
assert all(isinstance(x, int) for x in pt1)
|
287
|
+
assert all(isinstance(x, int) for x in pt2)
|
288
|
+
|
289
|
+
assert len(color) == 3 or len(color) == 4
|
290
|
+
color = [float(x) for x in color]
|
291
|
+
if len(color) == 3:
|
292
|
+
color.append(255.0)
|
293
|
+
|
294
|
+
args = []
|
295
|
+
if thickness is not None:
|
296
|
+
assert isinstance(thickness, int)
|
297
|
+
args.append(thickness)
|
298
|
+
if lineType is not None:
|
299
|
+
assert isinstance(lineType, int)
|
300
|
+
assert thickness is not None
|
301
|
+
args.append(lineType)
|
302
|
+
if shift is not None:
|
303
|
+
assert isinstance(shift, int)
|
304
|
+
assert shift is not None
|
305
|
+
args.append(shift)
|
306
|
+
|
307
|
+
img._f = _filter_rectangle(img._f, pt1, pt2, color, *args)
|
308
|
+
|
309
|
+
|
310
|
+
def putText(
|
311
|
+
img,
|
312
|
+
text,
|
313
|
+
org,
|
314
|
+
fontFace,
|
315
|
+
fontScale,
|
316
|
+
color,
|
317
|
+
thickness=None,
|
318
|
+
lineType=None,
|
319
|
+
bottomLeftOrigin=None,
|
320
|
+
):
|
321
|
+
"""
|
322
|
+
cv.putText( img, text, org, fontFace, fontScale, color[, thickness[, lineType[, bottomLeftOrigin]]] )
|
323
|
+
"""
|
324
|
+
|
325
|
+
assert isinstance(img, Frame)
|
326
|
+
img._mut()
|
327
|
+
|
328
|
+
assert isinstance(text, str)
|
329
|
+
|
330
|
+
assert len(org) == 2
|
331
|
+
assert all(isinstance(x, int) for x in org)
|
332
|
+
|
333
|
+
assert isinstance(fontFace, int)
|
334
|
+
assert isinstance(fontScale, float) or isinstance(fontScale, int)
|
335
|
+
fontScale = float(fontScale)
|
336
|
+
|
337
|
+
assert len(color) == 3 or len(color) == 4
|
338
|
+
color = [float(x) for x in color]
|
339
|
+
if len(color) == 3:
|
340
|
+
color.append(255.0)
|
341
|
+
|
342
|
+
args = []
|
343
|
+
if thickness is not None:
|
344
|
+
assert isinstance(thickness, int)
|
345
|
+
args.append(thickness)
|
346
|
+
if lineType is not None:
|
347
|
+
assert isinstance(lineType, int)
|
348
|
+
assert thickness is not None
|
349
|
+
args.append(lineType)
|
350
|
+
if bottomLeftOrigin is not None:
|
351
|
+
assert isinstance(bottomLeftOrigin, bool)
|
352
|
+
assert lineType is not None
|
353
|
+
args.append(bottomLeftOrigin)
|
354
|
+
|
355
|
+
img._f = _filter_putText(img._f, text, org, fontFace, fontScale, color, *args)
|
356
|
+
|
357
|
+
|
358
|
+
def arrowedLine(
|
359
|
+
img, pt1, pt2, color, thickness=None, line_type=None, shift=None, tipLength=None
|
360
|
+
):
|
361
|
+
"""
|
362
|
+
cv.arrowedLine( img, pt1, pt2, color[, thickness[, line_type[, shift[, tipLength]]]] )
|
363
|
+
"""
|
364
|
+
assert isinstance(img, Frame)
|
365
|
+
img._mut()
|
366
|
+
|
367
|
+
assert len(pt1) == 2
|
368
|
+
assert len(pt2) == 2
|
369
|
+
assert all(isinstance(x, int) for x in pt1)
|
370
|
+
assert all(isinstance(x, int) for x in pt2)
|
371
|
+
|
372
|
+
assert len(color) == 3 or len(color) == 4
|
373
|
+
color = [float(x) for x in color]
|
374
|
+
if len(color) == 3:
|
375
|
+
color.append(255.0)
|
376
|
+
|
377
|
+
args = []
|
378
|
+
if thickness is not None:
|
379
|
+
assert isinstance(thickness, int)
|
380
|
+
args.append(thickness)
|
381
|
+
if line_type is not None:
|
382
|
+
assert isinstance(line_type, int)
|
383
|
+
assert thickness is not None
|
384
|
+
args.append(line_type)
|
385
|
+
if shift is not None:
|
386
|
+
assert isinstance(shift, int)
|
387
|
+
assert shift is not None
|
388
|
+
args.append(shift)
|
389
|
+
if tipLength is not None:
|
390
|
+
assert isinstance(tipLength, float)
|
391
|
+
assert shift is not None
|
392
|
+
args.append(tipLength)
|
393
|
+
|
394
|
+
img._f = _filter_arrowedLine(img._f, pt1, pt2, color, *args)
|
395
|
+
|
396
|
+
|
397
|
+
def line(img, pt1, pt2, color, thickness=None, lineType=None, shift=None):
|
398
|
+
assert isinstance(img, Frame)
|
399
|
+
img._mut()
|
400
|
+
|
401
|
+
assert len(pt1) == 2
|
402
|
+
assert len(pt2) == 2
|
403
|
+
assert all(isinstance(x, int) for x in pt1)
|
404
|
+
assert all(isinstance(x, int) for x in pt2)
|
405
|
+
|
406
|
+
assert len(color) == 3 or len(color) == 4
|
407
|
+
color = [float(x) for x in color]
|
408
|
+
if len(color) == 3:
|
409
|
+
color.append(255.0)
|
410
|
+
|
411
|
+
args = []
|
412
|
+
if thickness is not None:
|
413
|
+
assert isinstance(thickness, int)
|
414
|
+
args.append(thickness)
|
415
|
+
if lineType is not None:
|
416
|
+
assert isinstance(lineType, int)
|
417
|
+
assert thickness is not None
|
418
|
+
args.append(lineType)
|
419
|
+
if shift is not None:
|
420
|
+
assert isinstance(shift, int)
|
421
|
+
assert shift is not None
|
422
|
+
args.append(shift)
|
423
|
+
|
424
|
+
img._f = _filter_line(img._f, pt1, pt2, color, *args)
|
425
|
+
|
426
|
+
|
427
|
+
def circle(img, center, radius, color, thickness=None, lineType=None, shift=None):
|
428
|
+
assert isinstance(img, Frame)
|
429
|
+
img._mut()
|
430
|
+
|
431
|
+
assert len(center) == 2
|
432
|
+
assert all(isinstance(x, int) for x in center)
|
433
|
+
|
434
|
+
assert isinstance(radius, int)
|
435
|
+
|
436
|
+
assert len(color) == 3 or len(color) == 4
|
437
|
+
color = [float(x) for x in color]
|
438
|
+
if len(color) == 3:
|
439
|
+
color.append(255.0)
|
440
|
+
|
441
|
+
args = []
|
442
|
+
if thickness is not None:
|
443
|
+
assert isinstance(thickness, int)
|
444
|
+
args.append(thickness)
|
445
|
+
if lineType is not None:
|
446
|
+
assert isinstance(lineType, int)
|
447
|
+
assert thickness is not None
|
448
|
+
args.append(lineType)
|
449
|
+
if shift is not None:
|
450
|
+
assert isinstance(shift, int)
|
451
|
+
assert shift is not None
|
452
|
+
args.append(shift)
|
453
|
+
|
454
|
+
img._f = _filter_circle(img._f, center, radius, color, *args)
|
455
|
+
|
456
|
+
|
457
|
+
def getFontScaleFromHeight(*args, **kwargs):
|
458
|
+
"""
|
459
|
+
cv.getFontScaleFromHeight( fontFace, pixelHeight[, thickness] )
|
460
|
+
"""
|
461
|
+
if _opencv2 is None:
|
462
|
+
raise NotImplementedError("getFontScaleFromHeight requires the cv2 module")
|
463
|
+
return _opencv2.getFontScaleFromHeight(*args, **kwargs)
|
464
|
+
|
465
|
+
|
466
|
+
def getTextSize(*args, **kwargs):
|
467
|
+
"""
|
468
|
+
cv.getTextSize( text, fontFace, fontScale, thickness )
|
469
|
+
"""
|
470
|
+
if _opencv2 is None:
|
471
|
+
raise NotImplementedError("getTextSize requires the cv2 module")
|
472
|
+
return _opencv2.getTextSize(*args, **kwargs)
|
473
|
+
|
474
|
+
|
475
|
+
def addWeighted(src1, alpha, src2, beta, gamma, dst=None, dtype=-1):
|
476
|
+
"""
|
477
|
+
cv.addWeighted( src1, alpha, src2, beta, gamma[, dst[, dtype]] ) -> dst
|
478
|
+
"""
|
479
|
+
assert isinstance(src1, Frame)
|
480
|
+
assert isinstance(src2, Frame)
|
481
|
+
src1._mut()
|
482
|
+
src2._mut()
|
483
|
+
|
484
|
+
if dst is None:
|
485
|
+
dst = Frame(src1._f, src1._fmt.copy())
|
486
|
+
else:
|
487
|
+
assert isinstance(dst, Frame)
|
488
|
+
dst._mut()
|
489
|
+
|
490
|
+
assert isinstance(alpha, float) or isinstance(alpha, int)
|
491
|
+
assert isinstance(beta, float) or isinstance(beta, int)
|
492
|
+
assert isinstance(gamma, float) or isinstance(gamma, int)
|
493
|
+
alpha = float(alpha)
|
494
|
+
beta = float(beta)
|
495
|
+
gamma = float(gamma)
|
496
|
+
|
497
|
+
if dtype != -1:
|
498
|
+
raise Exception("addWeighted does not support the dtype argument")
|
499
|
+
|
500
|
+
dst._f = _filter_addWeighted(src1._f, alpha, src2._f, beta, gamma)
|
501
|
+
return dst
|
502
|
+
|
503
|
+
|
504
|
+
# Stubs for unimplemented functions
|
505
|
+
|
506
|
+
|
507
|
+
def clipLine(*args, **kwargs):
|
508
|
+
raise NotImplementedError("clipLine is not yet implemented in the cv2 frontend")
|
509
|
+
|
510
|
+
|
511
|
+
def drawContours(*args, **kwargs):
|
512
|
+
raise NotImplementedError("drawContours is not yet implemented in the cv2 frontend")
|
513
|
+
|
514
|
+
|
515
|
+
def drawMarker(*args, **kwargs):
|
516
|
+
raise NotImplementedError("drawMarker is not yet implemented in the cv2 frontend")
|
517
|
+
|
518
|
+
|
519
|
+
def ellipse(*args, **kwargs):
|
520
|
+
raise NotImplementedError("ellipse is not yet implemented in the cv2 frontend")
|
521
|
+
|
522
|
+
|
523
|
+
def ellipse2Poly(*args, **kwargs):
|
524
|
+
raise NotImplementedError("ellipse2Poly is not yet implemented in the cv2 frontend")
|
525
|
+
|
526
|
+
|
527
|
+
def fillConvexPoly(*args, **kwargs):
|
528
|
+
raise NotImplementedError(
|
529
|
+
"fillConvexPoly is not yet implemented in the cv2 frontend"
|
530
|
+
)
|
531
|
+
|
532
|
+
|
533
|
+
def fillPoly(*args, **kwargs):
|
534
|
+
raise NotImplementedError("fillPoly is not yet implemented in the cv2 frontend")
|
535
|
+
|
536
|
+
|
537
|
+
def polylines(*args, **kwargs):
|
538
|
+
raise NotImplementedError("polylines is not yet implemented in the cv2 frontend")
|
@@ -1,3 +1,13 @@
|
|
1
|
+
"""
|
2
|
+
vidformer-py is a Python 🐍 interface for [vidformer](https://github.com/ixlab/vidformer).
|
3
|
+
|
4
|
+
**Quick links:**
|
5
|
+
* [📦 PyPI](https://pypi.org/project/vidformer/)
|
6
|
+
* [📘 Documentation - vidformer-py](https://ixlab.github.io/vidformer/vidformer-py/)
|
7
|
+
* [📘 Documentation - vidformer.cv2](https://ixlab.github.io/vidformer/vidformer-py-cv2/)
|
8
|
+
* [🧑💻 Source Code](https://github.com/ixlab/vidformer/tree/main/vidformer-py/)
|
9
|
+
"""
|
10
|
+
|
1
11
|
import subprocess
|
2
12
|
from fractions import Fraction
|
3
13
|
import random
|
@@ -43,19 +53,40 @@ def _check_hls_link_exists(url, max_attempts=150, delay=0.1):
|
|
43
53
|
|
44
54
|
|
45
55
|
class Spec:
|
56
|
+
"""
|
57
|
+
A video transformation specification.
|
58
|
+
|
59
|
+
See https://ixlab.github.io/vidformer/concepts.html for more information.
|
60
|
+
"""
|
61
|
+
|
46
62
|
def __init__(self, domain: list[Fraction], render, fmt: dict):
|
47
63
|
self._domain = domain
|
48
64
|
self._render = render
|
49
65
|
self._fmt = fmt
|
50
66
|
|
51
67
|
def __repr__(self):
|
52
|
-
|
53
|
-
|
54
|
-
|
55
|
-
|
56
|
-
|
57
|
-
|
58
|
-
|
68
|
+
if len(self._domain) <= 20:
|
69
|
+
lines = []
|
70
|
+
for i, t in enumerate(self._domain):
|
71
|
+
frame_expr = self._render(t, i)
|
72
|
+
lines.append(
|
73
|
+
f"{t.numerator}/{t.denominator} => {frame_expr}",
|
74
|
+
)
|
75
|
+
return "\n".join(lines)
|
76
|
+
else:
|
77
|
+
lines = []
|
78
|
+
for i, t in enumerate(self._domain[:10]):
|
79
|
+
frame_expr = self._render(t, i)
|
80
|
+
lines.append(
|
81
|
+
f"{t.numerator}/{t.denominator} => {frame_expr}",
|
82
|
+
)
|
83
|
+
lines.append("...")
|
84
|
+
for i, t in enumerate(self._domain[-10:]):
|
85
|
+
frame_expr = self._render(t, i)
|
86
|
+
lines.append(
|
87
|
+
f"{t.numerator}/{t.denominator} => {frame_expr}",
|
88
|
+
)
|
89
|
+
return "\n".join(lines)
|
59
90
|
|
60
91
|
def _sources(self):
|
61
92
|
s = set()
|
@@ -351,12 +382,18 @@ class Loader:
|
|
351
382
|
|
352
383
|
|
353
384
|
class YrdenServer:
|
354
|
-
"""
|
385
|
+
"""
|
386
|
+
A connection to a Yrden server.
|
387
|
+
|
388
|
+
A yrden server is the main API for local use of vidformer.
|
389
|
+
"""
|
355
390
|
|
356
391
|
def __init__(self, domain=None, port=None, bin=None):
|
357
|
-
"""
|
392
|
+
"""
|
393
|
+
Connect to a Yrden server
|
358
394
|
|
359
|
-
Can either connect to an existing server, if domain and port are provided, or start a new server using the provided binary
|
395
|
+
Can either connect to an existing server, if domain and port are provided, or start a new server using the provided binary.
|
396
|
+
If no domain or binary is provided, the `VIDFORMER_BIN` environment variable is used.
|
360
397
|
"""
|
361
398
|
|
362
399
|
self._domain = domain
|
@@ -503,11 +540,13 @@ class SourceILoc:
|
|
503
540
|
|
504
541
|
def __getitem__(self, idx):
|
505
542
|
if type(idx) != int:
|
506
|
-
raise Exception("Source iloc index must be an integer")
|
543
|
+
raise Exception(f"Source iloc index must be an integer, got a {type(idx)}")
|
507
544
|
return SourceExpr(self._source, idx, True)
|
508
545
|
|
509
546
|
|
510
547
|
class Source:
|
548
|
+
"""A video source."""
|
549
|
+
|
511
550
|
def __init__(
|
512
551
|
self, server: YrdenServer, name: str, path: str, stream: int, service=None
|
513
552
|
):
|
@@ -610,6 +649,8 @@ def _json_arg(arg, skip_data_anot=False):
|
|
610
649
|
|
611
650
|
|
612
651
|
class Filter:
|
652
|
+
"""A video filter."""
|
653
|
+
|
613
654
|
def __init__(self, name: str, tl_func=None, **kwargs):
|
614
655
|
self._name = name
|
615
656
|
|
@@ -854,6 +895,10 @@ class UDF:
|
|
854
895
|
|
855
896
|
|
856
897
|
class UDFFrameType:
|
898
|
+
"""
|
899
|
+
Frame type for use in UDFs.
|
900
|
+
"""
|
901
|
+
|
857
902
|
def __init__(self, width: int, height: int, pix_fmt: str):
|
858
903
|
assert type(width) == int
|
859
904
|
assert type(height) == int
|
@@ -886,6 +931,8 @@ class UDFFrameType:
|
|
886
931
|
|
887
932
|
|
888
933
|
class UDFFrame:
|
934
|
+
"""A symbolic reference to a frame for use in UDFs."""
|
935
|
+
|
889
936
|
def __init__(self, data: np.ndarray, f_type: UDFFrameType):
|
890
937
|
assert type(data) == np.ndarray
|
891
938
|
assert type(f_type) == UDFFrameType
|
vidformer-0.6.0/PKG-INFO
DELETED
@@ -1,24 +0,0 @@
|
|
1
|
-
Metadata-Version: 2.1
|
2
|
-
Name: vidformer
|
3
|
-
Version: 0.6.0
|
4
|
-
Summary: A Python library for creating and viewing videos with vidformer.
|
5
|
-
Author-email: Dominik Winecki <dominikwinecki@gmail.com>
|
6
|
-
Requires-Python: >=3.8
|
7
|
-
Description-Content-Type: text/markdown
|
8
|
-
Classifier: Programming Language :: Python :: 3
|
9
|
-
Classifier: Operating System :: OS Independent
|
10
|
-
Requires-Dist: requests
|
11
|
-
Requires-Dist: msgpack
|
12
|
-
Requires-Dist: numpy
|
13
|
-
Project-URL: Homepage, https://ixlab.github.io/vidformer/
|
14
|
-
Project-URL: Issues, https://ixlab.github.io/vidformer/issues
|
15
|
-
|
16
|
-
# vidformer-py
|
17
|
-
|
18
|
-
## Publish
|
19
|
-
|
20
|
-
```bash
|
21
|
-
export FLIT_USERNAME='__token__' FLIT_PASSWORD='<token>'
|
22
|
-
flit publish
|
23
|
-
```
|
24
|
-
|
vidformer-0.6.0/README.md
DELETED
@@ -1,236 +0,0 @@
|
|
1
|
-
from .. import vf
|
2
|
-
|
3
|
-
import uuid
|
4
|
-
from fractions import Fraction
|
5
|
-
from bisect import bisect_right
|
6
|
-
|
7
|
-
CAP_PROP_POS_MSEC = 0
|
8
|
-
CAP_PROP_POS_FRAMES = 1
|
9
|
-
CAP_PROP_FRAME_WIDTH = 3
|
10
|
-
CAP_PROP_FRAME_HEIGHT = 4
|
11
|
-
CAP_PROP_FPS = 5
|
12
|
-
|
13
|
-
FONT_HERSHEY_SIMPLEX = 0
|
14
|
-
FONT_HERSHEY_PLAIN = 1
|
15
|
-
FONT_HERSHEY_DUPLEX = 2
|
16
|
-
FONT_HERSHEY_COMPLEX = 3
|
17
|
-
FONT_HERSHEY_TRIPLEX = 4
|
18
|
-
FONT_HERSHEY_COMPLEX_SMALL = 5
|
19
|
-
FONT_HERSHEY_SCRIPT_SIMPLEX = 6
|
20
|
-
FONT_HERSHEY_SCRIPT_COMPLEX = 7
|
21
|
-
FONT_ITALIC = 16
|
22
|
-
|
23
|
-
FILLED = -1
|
24
|
-
LINE_4 = 4
|
25
|
-
LINE_8 = 8
|
26
|
-
LINE_AA = 16
|
27
|
-
|
28
|
-
_filter_scale = vf.Filter("Scale")
|
29
|
-
_filter_rectangle = vf.Filter("cv2.rectangle")
|
30
|
-
_filter_putText = vf.Filter("cv2.putText")
|
31
|
-
|
32
|
-
|
33
|
-
def _ts_to_fps(timestamps):
|
34
|
-
return int(1 / (timestamps[1] - timestamps[0])) # TODO: Fix for non-integer fps
|
35
|
-
|
36
|
-
|
37
|
-
def _fps_to_ts(fps, n_frames):
|
38
|
-
assert type(fps) == int
|
39
|
-
return [Fraction(i, fps) for i in range(n_frames)]
|
40
|
-
|
41
|
-
|
42
|
-
_global_cv2_server = None
|
43
|
-
|
44
|
-
|
45
|
-
def _server():
|
46
|
-
global _global_cv2_server
|
47
|
-
if _global_cv2_server is None:
|
48
|
-
_global_cv2_server = vf.YrdenServer()
|
49
|
-
return _global_cv2_server
|
50
|
-
|
51
|
-
|
52
|
-
def set_cv2_server(server):
|
53
|
-
"""Set the server to use for the cv2 frontend."""
|
54
|
-
global _global_cv2_server
|
55
|
-
assert isinstance(server, vf.YrdenServer)
|
56
|
-
_global_cv2_server = server
|
57
|
-
|
58
|
-
|
59
|
-
class _Frame:
|
60
|
-
def __init__(self, f):
|
61
|
-
self._f = f
|
62
|
-
|
63
|
-
# denotes that the frame has not yet been modified
|
64
|
-
# when a frame is modified, it is converted to rgb24 first
|
65
|
-
self._modified = False
|
66
|
-
|
67
|
-
def _mut(self):
|
68
|
-
self._modified = True
|
69
|
-
self._f = _filter_scale(self._f, pix_fmt="rgb24")
|
70
|
-
|
71
|
-
|
72
|
-
class VideoCapture:
|
73
|
-
def __init__(self, path):
|
74
|
-
self._path = path
|
75
|
-
server = _server()
|
76
|
-
self._source = vf.Source(server, str(uuid.uuid4()), path, 0)
|
77
|
-
self._next_frame_idx = 0
|
78
|
-
|
79
|
-
def isOpened(self):
|
80
|
-
return True
|
81
|
-
|
82
|
-
def get(self, prop):
|
83
|
-
if prop == CAP_PROP_FPS:
|
84
|
-
return _ts_to_fps(self._source.ts())
|
85
|
-
elif prop == CAP_PROP_FRAME_WIDTH:
|
86
|
-
return self._source.fmt()["width"]
|
87
|
-
elif prop == CAP_PROP_FRAME_HEIGHT:
|
88
|
-
return self._source.fmt()["height"]
|
89
|
-
|
90
|
-
raise Exception(f"Unknown property {prop}")
|
91
|
-
|
92
|
-
def set(self, prop, value):
|
93
|
-
if prop == CAP_PROP_POS_FRAMES:
|
94
|
-
assert value >= 0 and value < len(self._source.ts())
|
95
|
-
self._next_frame_idx = value
|
96
|
-
elif prop == CAP_PROP_POS_MSEC:
|
97
|
-
t = Fraction(value, 1000)
|
98
|
-
ts = self._source.ts()
|
99
|
-
next_frame_idx = bisect_right(ts, t)
|
100
|
-
self._next_frame_idx = next_frame_idx
|
101
|
-
else:
|
102
|
-
raise Exception(f"Unsupported property {prop}")
|
103
|
-
|
104
|
-
def read(self):
|
105
|
-
if self._next_frame_idx >= len(self._source.ts()):
|
106
|
-
return False, None
|
107
|
-
frame = self._source.iloc[self._next_frame_idx]
|
108
|
-
self._next_frame_idx += 1
|
109
|
-
frame = _Frame(frame)
|
110
|
-
return True, frame
|
111
|
-
|
112
|
-
def release(self):
|
113
|
-
pass
|
114
|
-
|
115
|
-
|
116
|
-
class VideoWriter:
|
117
|
-
def __init__(self, path, fourcc, fps, size):
|
118
|
-
assert isinstance(fourcc, VideoWriter_fourcc)
|
119
|
-
self._path = path
|
120
|
-
self._fourcc = fourcc
|
121
|
-
self._fps = fps
|
122
|
-
self._size = size
|
123
|
-
|
124
|
-
self._frames = []
|
125
|
-
self._pix_fmt = "yuv420p"
|
126
|
-
|
127
|
-
def write(self, frame):
|
128
|
-
if not isinstance(frame, _Frame):
|
129
|
-
raise Exception("frame must be a _Frame object")
|
130
|
-
if frame._modified:
|
131
|
-
f_obj = _filter_scale(frame._f, pix_fmt=self._pix_fmt)
|
132
|
-
self._frames.append(f_obj)
|
133
|
-
else:
|
134
|
-
self._frames.append(frame._f)
|
135
|
-
|
136
|
-
def release(self):
|
137
|
-
spec = self.vf_spec()
|
138
|
-
server = _server()
|
139
|
-
spec.save(server, self._path)
|
140
|
-
|
141
|
-
def vf_spec(self):
|
142
|
-
fmt = {
|
143
|
-
"width": self._size[0],
|
144
|
-
"height": self._size[1],
|
145
|
-
"pix_fmt": self._pix_fmt,
|
146
|
-
}
|
147
|
-
domain = _fps_to_ts(self._fps, len(self._frames))
|
148
|
-
spec = vf.Spec(domain, lambda t, i: self._frames[i], fmt)
|
149
|
-
return spec
|
150
|
-
|
151
|
-
|
152
|
-
class VideoWriter_fourcc:
|
153
|
-
def __init__(self, *args):
|
154
|
-
self._args = args
|
155
|
-
|
156
|
-
|
157
|
-
def rectangle(img, pt1, pt2, color, thickness=None, lineType=None, shift=None):
|
158
|
-
"""
|
159
|
-
cv.rectangle( img, pt1, pt2, color[, thickness[, lineType[, shift]]] )
|
160
|
-
"""
|
161
|
-
|
162
|
-
assert isinstance(img, _Frame)
|
163
|
-
img._mut()
|
164
|
-
|
165
|
-
assert len(pt1) == 2
|
166
|
-
assert len(pt2) == 2
|
167
|
-
assert all(isinstance(x, int) for x in pt1)
|
168
|
-
assert all(isinstance(x, int) for x in pt2)
|
169
|
-
|
170
|
-
assert len(color) == 3 or len(color) == 4
|
171
|
-
color = [float(x) for x in color]
|
172
|
-
if len(color) == 3:
|
173
|
-
color.append(255.0)
|
174
|
-
|
175
|
-
args = []
|
176
|
-
if thickness is not None:
|
177
|
-
assert isinstance(thickness, int)
|
178
|
-
args.append(thickness)
|
179
|
-
if lineType is not None:
|
180
|
-
assert isinstance(lineType, int)
|
181
|
-
assert thickness is not None
|
182
|
-
args.append(lineType)
|
183
|
-
if shift is not None:
|
184
|
-
assert isinstance(shift, int)
|
185
|
-
assert shift is not None
|
186
|
-
args.append(shift)
|
187
|
-
|
188
|
-
img._f = _filter_rectangle(img._f, pt1, pt2, color, *args)
|
189
|
-
|
190
|
-
|
191
|
-
def putText(
|
192
|
-
img,
|
193
|
-
text,
|
194
|
-
org,
|
195
|
-
fontFace,
|
196
|
-
fontScale,
|
197
|
-
color,
|
198
|
-
thickness=None,
|
199
|
-
lineType=None,
|
200
|
-
bottomLeftOrigin=None,
|
201
|
-
):
|
202
|
-
"""
|
203
|
-
cv.putText( img, text, org, fontFace, fontScale, color[, thickness[, lineType[, bottomLeftOrigin]]] )
|
204
|
-
"""
|
205
|
-
|
206
|
-
assert isinstance(img, _Frame)
|
207
|
-
img._mut()
|
208
|
-
|
209
|
-
assert isinstance(text, str)
|
210
|
-
|
211
|
-
assert len(org) == 2
|
212
|
-
assert all(isinstance(x, int) for x in org)
|
213
|
-
|
214
|
-
assert isinstance(fontFace, int)
|
215
|
-
assert isinstance(fontScale, float) or isinstance(fontScale, int)
|
216
|
-
fontScale = float(fontScale)
|
217
|
-
|
218
|
-
assert len(color) == 3 or len(color) == 4
|
219
|
-
color = [float(x) for x in color]
|
220
|
-
if len(color) == 3:
|
221
|
-
color.append(255.0)
|
222
|
-
|
223
|
-
args = []
|
224
|
-
if thickness is not None:
|
225
|
-
assert isinstance(thickness, int)
|
226
|
-
args.append(thickness)
|
227
|
-
if lineType is not None:
|
228
|
-
assert isinstance(lineType, int)
|
229
|
-
assert thickness is not None
|
230
|
-
args.append(lineType)
|
231
|
-
if bottomLeftOrigin is not None:
|
232
|
-
assert isinstance(bottomLeftOrigin, bool)
|
233
|
-
assert lineType is not None
|
234
|
-
args.append(bottomLeftOrigin)
|
235
|
-
|
236
|
-
img._f = _filter_putText(img._f, text, org, fontFace, fontScale, color, *args)
|
File without changes
|