vidformer 0.6.0__py3-none-any.whl → 0.6.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- vidformer/__init__.py +1 -1
- vidformer/cv2/vf_cv2.py +312 -10
- vidformer/vf.py +58 -11
- vidformer-0.6.2.dist-info/METADATA +36 -0
- vidformer-0.6.2.dist-info/RECORD +7 -0
- vidformer-0.6.0.dist-info/METADATA +0 -24
- vidformer-0.6.0.dist-info/RECORD +0 -7
- {vidformer-0.6.0.dist-info → vidformer-0.6.2.dist-info}/WHEEL +0 -0
vidformer/__init__.py
CHANGED
vidformer/cv2/vf_cv2.py
CHANGED
@@ -1,5 +1,24 @@
|
|
1
|
+
"""
|
2
|
+
vidformer.cv2 is the cv2 frontend for [vidformer](https://github.com/ixlab/vidformer).
|
3
|
+
|
4
|
+
> ⚠️ This module is a work in progress. See the [implemented functions list](https://ixlab.github.io/vidformer/opencv-filters.html).
|
5
|
+
|
6
|
+
**Quick links:**
|
7
|
+
* [📦 PyPI](https://pypi.org/project/vidformer/)
|
8
|
+
* [📘 Documentation - vidformer-py](https://ixlab.github.io/vidformer/vidformer-py/)
|
9
|
+
* [📘 Documentation - vidformer.cv2](https://ixlab.github.io/vidformer/vidformer-py-cv2/)
|
10
|
+
* [🧑💻 Source Code](https://github.com/ixlab/vidformer/tree/main/vidformer-py/)
|
11
|
+
"""
|
12
|
+
|
1
13
|
from .. import vf
|
2
14
|
|
15
|
+
try:
|
16
|
+
import cv2 as _opencv2
|
17
|
+
except:
|
18
|
+
_opencv2 = None
|
19
|
+
|
20
|
+
import numpy as np
|
21
|
+
|
3
22
|
import uuid
|
4
23
|
from fractions import Fraction
|
5
24
|
from bisect import bisect_right
|
@@ -9,6 +28,7 @@ CAP_PROP_POS_FRAMES = 1
|
|
9
28
|
CAP_PROP_FRAME_WIDTH = 3
|
10
29
|
CAP_PROP_FRAME_HEIGHT = 4
|
11
30
|
CAP_PROP_FPS = 5
|
31
|
+
CAP_PROP_FRAME_COUNT = 7
|
12
32
|
|
13
33
|
FONT_HERSHEY_SIMPLEX = 0
|
14
34
|
FONT_HERSHEY_PLAIN = 1
|
@@ -28,6 +48,10 @@ LINE_AA = 16
|
|
28
48
|
_filter_scale = vf.Filter("Scale")
|
29
49
|
_filter_rectangle = vf.Filter("cv2.rectangle")
|
30
50
|
_filter_putText = vf.Filter("cv2.putText")
|
51
|
+
_filter_arrowedLine = vf.Filter("cv2.arrowedLine")
|
52
|
+
_filter_line = vf.Filter("cv2.line")
|
53
|
+
_filter_circle = vf.Filter("cv2.circle")
|
54
|
+
_filter_addWeighted = vf.Filter("cv2.addWeighted")
|
31
55
|
|
32
56
|
|
33
57
|
def _ts_to_fps(timestamps):
|
@@ -49,16 +73,18 @@ def _server():
|
|
49
73
|
return _global_cv2_server
|
50
74
|
|
51
75
|
|
52
|
-
def set_cv2_server(server):
|
76
|
+
def set_cv2_server(server: vf.YrdenServer):
|
53
77
|
"""Set the server to use for the cv2 frontend."""
|
54
78
|
global _global_cv2_server
|
55
79
|
assert isinstance(server, vf.YrdenServer)
|
56
80
|
_global_cv2_server = server
|
57
81
|
|
58
82
|
|
59
|
-
class
|
60
|
-
def __init__(self, f):
|
83
|
+
class Frame:
|
84
|
+
def __init__(self, f, fmt):
|
61
85
|
self._f = f
|
86
|
+
self._fmt = fmt
|
87
|
+
self.shape = (fmt["height"], fmt["width"], 3)
|
62
88
|
|
63
89
|
# denotes that the frame has not yet been modified
|
64
90
|
# when a frame is modified, it is converted to rgb24 first
|
@@ -67,6 +93,23 @@ class _Frame:
|
|
67
93
|
def _mut(self):
|
68
94
|
self._modified = True
|
69
95
|
self._f = _filter_scale(self._f, pix_fmt="rgb24")
|
96
|
+
self._fmt["pix_fmt"] = "rgb24"
|
97
|
+
|
98
|
+
def numpy(self):
|
99
|
+
"""
|
100
|
+
Return the frame as a numpy array.
|
101
|
+
"""
|
102
|
+
|
103
|
+
self._mut()
|
104
|
+
spec = vf.Spec([Fraction(0, 1)], lambda t, i: self._f, self._fmt)
|
105
|
+
loader = spec.load(_server())
|
106
|
+
|
107
|
+
frame_raster_rgb24 = loader[0]
|
108
|
+
assert type(frame_raster_rgb24) == bytes
|
109
|
+
assert len(frame_raster_rgb24) == self.shape[0] * self.shape[1] * 3
|
110
|
+
raw_data_array = np.frombuffer(frame_raster_rgb24, dtype=np.uint8)
|
111
|
+
frame = raw_data_array.reshape(self.shape)
|
112
|
+
return frame
|
70
113
|
|
71
114
|
|
72
115
|
class VideoCapture:
|
@@ -86,6 +129,10 @@ class VideoCapture:
|
|
86
129
|
return self._source.fmt()["width"]
|
87
130
|
elif prop == CAP_PROP_FRAME_HEIGHT:
|
88
131
|
return self._source.fmt()["height"]
|
132
|
+
elif prop == CAP_PROP_FRAME_COUNT:
|
133
|
+
return len(self._source.ts())
|
134
|
+
elif prop == CAP_PROP_POS_FRAMES:
|
135
|
+
return self._next_frame_idx
|
89
136
|
|
90
137
|
raise Exception(f"Unknown property {prop}")
|
91
138
|
|
@@ -106,7 +153,7 @@ class VideoCapture:
|
|
106
153
|
return False, None
|
107
154
|
frame = self._source.iloc[self._next_frame_idx]
|
108
155
|
self._next_frame_idx += 1
|
109
|
-
frame =
|
156
|
+
frame = Frame(frame, self._source.fmt())
|
110
157
|
return True, frame
|
111
158
|
|
112
159
|
def release(self):
|
@@ -116,6 +163,8 @@ class VideoCapture:
|
|
116
163
|
class VideoWriter:
|
117
164
|
def __init__(self, path, fourcc, fps, size):
|
118
165
|
assert isinstance(fourcc, VideoWriter_fourcc)
|
166
|
+
if path is not None and not isinstance(path, str):
|
167
|
+
raise Exception("path must be a string or None")
|
119
168
|
self._path = path
|
120
169
|
self._fourcc = fourcc
|
121
170
|
self._fps = fps
|
@@ -125,8 +174,8 @@ class VideoWriter:
|
|
125
174
|
self._pix_fmt = "yuv420p"
|
126
175
|
|
127
176
|
def write(self, frame):
|
128
|
-
if not isinstance(frame,
|
129
|
-
raise Exception("frame must be a
|
177
|
+
if not isinstance(frame, Frame):
|
178
|
+
raise Exception("frame must be a vidformer.cv2.Frame object")
|
130
179
|
if frame._modified:
|
131
180
|
f_obj = _filter_scale(frame._f, pix_fmt=self._pix_fmt)
|
132
181
|
self._frames.append(f_obj)
|
@@ -134,11 +183,14 @@ class VideoWriter:
|
|
134
183
|
self._frames.append(frame._f)
|
135
184
|
|
136
185
|
def release(self):
|
137
|
-
|
186
|
+
if self._path is None:
|
187
|
+
return
|
188
|
+
|
189
|
+
spec = self.spec()
|
138
190
|
server = _server()
|
139
191
|
spec.save(server, self._path)
|
140
192
|
|
141
|
-
def
|
193
|
+
def spec(self) -> vf.Spec:
|
142
194
|
fmt = {
|
143
195
|
"width": self._size[0],
|
144
196
|
"height": self._size[1],
|
@@ -154,12 +206,79 @@ class VideoWriter_fourcc:
|
|
154
206
|
self._args = args
|
155
207
|
|
156
208
|
|
209
|
+
def imread(path, *args):
|
210
|
+
if len(args) > 0:
|
211
|
+
raise NotImplementedError("imread does not support additional arguments")
|
212
|
+
|
213
|
+
assert path.lower().endswith((".jpg", ".jpeg", ".png"))
|
214
|
+
server = _server()
|
215
|
+
source = vf.Source(server, str(uuid.uuid4()), path, 0)
|
216
|
+
frame = Frame(source.iloc[0], source.fmt())
|
217
|
+
return frame
|
218
|
+
|
219
|
+
|
220
|
+
def imwrite(path, img, *args):
|
221
|
+
if len(args) > 0:
|
222
|
+
raise NotImplementedError("imwrite does not support additional arguments")
|
223
|
+
|
224
|
+
if not isinstance(img, Frame):
|
225
|
+
raise Exception("img must be a vidformer.cv2.Frame object")
|
226
|
+
|
227
|
+
fmt = img._fmt.copy()
|
228
|
+
width = fmt["width"]
|
229
|
+
height = fmt["height"]
|
230
|
+
f = img._f
|
231
|
+
|
232
|
+
domain = [Fraction(0, 1)]
|
233
|
+
|
234
|
+
if path.lower().endswith(".png"):
|
235
|
+
img._mut() # Make sure it's in rgb24
|
236
|
+
spec = vf.Spec(
|
237
|
+
domain,
|
238
|
+
lambda t, i: img._f,
|
239
|
+
{"width": width, "height": height, "pix_fmt": "rgb24"},
|
240
|
+
)
|
241
|
+
spec.save(_server(), path, encoder="png")
|
242
|
+
elif path.lower().endswith((".jpg", ".jpeg")):
|
243
|
+
if img._modified:
|
244
|
+
# it's rgb24, we need to convert to something jpeg can handle
|
245
|
+
f = _filter_scale(img._f, pix_fmt="yuv420p")
|
246
|
+
fmt["pix_fmt"] = "yuv420p"
|
247
|
+
else:
|
248
|
+
if fmt["pix_fmt"] not in ["yuvj420p", "yuvj422p", "yuvj444p"]:
|
249
|
+
f = _filter_scale(img._f, pix_fmt="yuvj420p")
|
250
|
+
fmt["pix_fmt"] = "yuvj420p"
|
251
|
+
|
252
|
+
spec = vf.Spec(domain, lambda t, i: f, fmt)
|
253
|
+
spec.save(_server(), path, encoder="mjpeg")
|
254
|
+
else:
|
255
|
+
raise Exception("Unsupported image format")
|
256
|
+
|
257
|
+
|
258
|
+
def vidplay(video, *args, **kwargs):
|
259
|
+
"""
|
260
|
+
Play a vidformer video specification.
|
261
|
+
|
262
|
+
Args:
|
263
|
+
video: one of [vidformer.Spec, vidformer.Source, vidformer.cv2.VideoWriter]
|
264
|
+
"""
|
265
|
+
|
266
|
+
if isinstance(video, vf.Spec):
|
267
|
+
return video.play(_server(), *args, **kwargs)
|
268
|
+
elif isinstance(video, vf.Source):
|
269
|
+
return video.play(_server(), *args, **kwargs)
|
270
|
+
elif isinstance(video, VideoWriter):
|
271
|
+
return video.spec().play(_server(), *args, **kwargs)
|
272
|
+
else:
|
273
|
+
raise Exception("Unsupported video type to vidplay")
|
274
|
+
|
275
|
+
|
157
276
|
def rectangle(img, pt1, pt2, color, thickness=None, lineType=None, shift=None):
|
158
277
|
"""
|
159
278
|
cv.rectangle( img, pt1, pt2, color[, thickness[, lineType[, shift]]] )
|
160
279
|
"""
|
161
280
|
|
162
|
-
assert isinstance(img,
|
281
|
+
assert isinstance(img, Frame)
|
163
282
|
img._mut()
|
164
283
|
|
165
284
|
assert len(pt1) == 2
|
@@ -203,7 +322,7 @@ def putText(
|
|
203
322
|
cv.putText( img, text, org, fontFace, fontScale, color[, thickness[, lineType[, bottomLeftOrigin]]] )
|
204
323
|
"""
|
205
324
|
|
206
|
-
assert isinstance(img,
|
325
|
+
assert isinstance(img, Frame)
|
207
326
|
img._mut()
|
208
327
|
|
209
328
|
assert isinstance(text, str)
|
@@ -234,3 +353,186 @@ def putText(
|
|
234
353
|
args.append(bottomLeftOrigin)
|
235
354
|
|
236
355
|
img._f = _filter_putText(img._f, text, org, fontFace, fontScale, color, *args)
|
356
|
+
|
357
|
+
|
358
|
+
def arrowedLine(
|
359
|
+
img, pt1, pt2, color, thickness=None, line_type=None, shift=None, tipLength=None
|
360
|
+
):
|
361
|
+
"""
|
362
|
+
cv.arrowedLine( img, pt1, pt2, color[, thickness[, line_type[, shift[, tipLength]]]] )
|
363
|
+
"""
|
364
|
+
assert isinstance(img, Frame)
|
365
|
+
img._mut()
|
366
|
+
|
367
|
+
assert len(pt1) == 2
|
368
|
+
assert len(pt2) == 2
|
369
|
+
assert all(isinstance(x, int) for x in pt1)
|
370
|
+
assert all(isinstance(x, int) for x in pt2)
|
371
|
+
|
372
|
+
assert len(color) == 3 or len(color) == 4
|
373
|
+
color = [float(x) for x in color]
|
374
|
+
if len(color) == 3:
|
375
|
+
color.append(255.0)
|
376
|
+
|
377
|
+
args = []
|
378
|
+
if thickness is not None:
|
379
|
+
assert isinstance(thickness, int)
|
380
|
+
args.append(thickness)
|
381
|
+
if line_type is not None:
|
382
|
+
assert isinstance(line_type, int)
|
383
|
+
assert thickness is not None
|
384
|
+
args.append(line_type)
|
385
|
+
if shift is not None:
|
386
|
+
assert isinstance(shift, int)
|
387
|
+
assert shift is not None
|
388
|
+
args.append(shift)
|
389
|
+
if tipLength is not None:
|
390
|
+
assert isinstance(tipLength, float)
|
391
|
+
assert shift is not None
|
392
|
+
args.append(tipLength)
|
393
|
+
|
394
|
+
img._f = _filter_arrowedLine(img._f, pt1, pt2, color, *args)
|
395
|
+
|
396
|
+
|
397
|
+
def line(img, pt1, pt2, color, thickness=None, lineType=None, shift=None):
|
398
|
+
assert isinstance(img, Frame)
|
399
|
+
img._mut()
|
400
|
+
|
401
|
+
assert len(pt1) == 2
|
402
|
+
assert len(pt2) == 2
|
403
|
+
assert all(isinstance(x, int) for x in pt1)
|
404
|
+
assert all(isinstance(x, int) for x in pt2)
|
405
|
+
|
406
|
+
assert len(color) == 3 or len(color) == 4
|
407
|
+
color = [float(x) for x in color]
|
408
|
+
if len(color) == 3:
|
409
|
+
color.append(255.0)
|
410
|
+
|
411
|
+
args = []
|
412
|
+
if thickness is not None:
|
413
|
+
assert isinstance(thickness, int)
|
414
|
+
args.append(thickness)
|
415
|
+
if lineType is not None:
|
416
|
+
assert isinstance(lineType, int)
|
417
|
+
assert thickness is not None
|
418
|
+
args.append(lineType)
|
419
|
+
if shift is not None:
|
420
|
+
assert isinstance(shift, int)
|
421
|
+
assert shift is not None
|
422
|
+
args.append(shift)
|
423
|
+
|
424
|
+
img._f = _filter_line(img._f, pt1, pt2, color, *args)
|
425
|
+
|
426
|
+
|
427
|
+
def circle(img, center, radius, color, thickness=None, lineType=None, shift=None):
|
428
|
+
assert isinstance(img, Frame)
|
429
|
+
img._mut()
|
430
|
+
|
431
|
+
assert len(center) == 2
|
432
|
+
assert all(isinstance(x, int) for x in center)
|
433
|
+
|
434
|
+
assert isinstance(radius, int)
|
435
|
+
|
436
|
+
assert len(color) == 3 or len(color) == 4
|
437
|
+
color = [float(x) for x in color]
|
438
|
+
if len(color) == 3:
|
439
|
+
color.append(255.0)
|
440
|
+
|
441
|
+
args = []
|
442
|
+
if thickness is not None:
|
443
|
+
assert isinstance(thickness, int)
|
444
|
+
args.append(thickness)
|
445
|
+
if lineType is not None:
|
446
|
+
assert isinstance(lineType, int)
|
447
|
+
assert thickness is not None
|
448
|
+
args.append(lineType)
|
449
|
+
if shift is not None:
|
450
|
+
assert isinstance(shift, int)
|
451
|
+
assert shift is not None
|
452
|
+
args.append(shift)
|
453
|
+
|
454
|
+
img._f = _filter_circle(img._f, center, radius, color, *args)
|
455
|
+
|
456
|
+
|
457
|
+
def getFontScaleFromHeight(*args, **kwargs):
|
458
|
+
"""
|
459
|
+
cv.getFontScaleFromHeight( fontFace, pixelHeight[, thickness] )
|
460
|
+
"""
|
461
|
+
if _opencv2 is None:
|
462
|
+
raise NotImplementedError("getFontScaleFromHeight requires the cv2 module")
|
463
|
+
return _opencv2.getFontScaleFromHeight(*args, **kwargs)
|
464
|
+
|
465
|
+
|
466
|
+
def getTextSize(*args, **kwargs):
|
467
|
+
"""
|
468
|
+
cv.getTextSize( text, fontFace, fontScale, thickness )
|
469
|
+
"""
|
470
|
+
if _opencv2 is None:
|
471
|
+
raise NotImplementedError("getTextSize requires the cv2 module")
|
472
|
+
return _opencv2.getTextSize(*args, **kwargs)
|
473
|
+
|
474
|
+
|
475
|
+
def addWeighted(src1, alpha, src2, beta, gamma, dst=None, dtype=-1):
|
476
|
+
"""
|
477
|
+
cv.addWeighted( src1, alpha, src2, beta, gamma[, dst[, dtype]] ) -> dst
|
478
|
+
"""
|
479
|
+
assert isinstance(src1, Frame)
|
480
|
+
assert isinstance(src2, Frame)
|
481
|
+
src1._mut()
|
482
|
+
src2._mut()
|
483
|
+
|
484
|
+
if dst is None:
|
485
|
+
dst = Frame(src1._f, src1._fmt.copy())
|
486
|
+
else:
|
487
|
+
assert isinstance(dst, Frame)
|
488
|
+
dst._mut()
|
489
|
+
|
490
|
+
assert isinstance(alpha, float) or isinstance(alpha, int)
|
491
|
+
assert isinstance(beta, float) or isinstance(beta, int)
|
492
|
+
assert isinstance(gamma, float) or isinstance(gamma, int)
|
493
|
+
alpha = float(alpha)
|
494
|
+
beta = float(beta)
|
495
|
+
gamma = float(gamma)
|
496
|
+
|
497
|
+
if dtype != -1:
|
498
|
+
raise Exception("addWeighted does not support the dtype argument")
|
499
|
+
|
500
|
+
dst._f = _filter_addWeighted(src1._f, alpha, src2._f, beta, gamma)
|
501
|
+
return dst
|
502
|
+
|
503
|
+
|
504
|
+
# Stubs for unimplemented functions
|
505
|
+
|
506
|
+
|
507
|
+
def clipLine(*args, **kwargs):
|
508
|
+
raise NotImplementedError("clipLine is not yet implemented in the cv2 frontend")
|
509
|
+
|
510
|
+
|
511
|
+
def drawContours(*args, **kwargs):
|
512
|
+
raise NotImplementedError("drawContours is not yet implemented in the cv2 frontend")
|
513
|
+
|
514
|
+
|
515
|
+
def drawMarker(*args, **kwargs):
|
516
|
+
raise NotImplementedError("drawMarker is not yet implemented in the cv2 frontend")
|
517
|
+
|
518
|
+
|
519
|
+
def ellipse(*args, **kwargs):
|
520
|
+
raise NotImplementedError("ellipse is not yet implemented in the cv2 frontend")
|
521
|
+
|
522
|
+
|
523
|
+
def ellipse2Poly(*args, **kwargs):
|
524
|
+
raise NotImplementedError("ellipse2Poly is not yet implemented in the cv2 frontend")
|
525
|
+
|
526
|
+
|
527
|
+
def fillConvexPoly(*args, **kwargs):
|
528
|
+
raise NotImplementedError(
|
529
|
+
"fillConvexPoly is not yet implemented in the cv2 frontend"
|
530
|
+
)
|
531
|
+
|
532
|
+
|
533
|
+
def fillPoly(*args, **kwargs):
|
534
|
+
raise NotImplementedError("fillPoly is not yet implemented in the cv2 frontend")
|
535
|
+
|
536
|
+
|
537
|
+
def polylines(*args, **kwargs):
|
538
|
+
raise NotImplementedError("polylines is not yet implemented in the cv2 frontend")
|
vidformer/vf.py
CHANGED
@@ -1,3 +1,13 @@
|
|
1
|
+
"""
|
2
|
+
vidformer-py is a Python 🐍 interface for [vidformer](https://github.com/ixlab/vidformer).
|
3
|
+
|
4
|
+
**Quick links:**
|
5
|
+
* [📦 PyPI](https://pypi.org/project/vidformer/)
|
6
|
+
* [📘 Documentation - vidformer-py](https://ixlab.github.io/vidformer/vidformer-py/)
|
7
|
+
* [📘 Documentation - vidformer.cv2](https://ixlab.github.io/vidformer/vidformer-py-cv2/)
|
8
|
+
* [🧑💻 Source Code](https://github.com/ixlab/vidformer/tree/main/vidformer-py/)
|
9
|
+
"""
|
10
|
+
|
1
11
|
import subprocess
|
2
12
|
from fractions import Fraction
|
3
13
|
import random
|
@@ -43,19 +53,40 @@ def _check_hls_link_exists(url, max_attempts=150, delay=0.1):
|
|
43
53
|
|
44
54
|
|
45
55
|
class Spec:
|
56
|
+
"""
|
57
|
+
A video transformation specification.
|
58
|
+
|
59
|
+
See https://ixlab.github.io/vidformer/concepts.html for more information.
|
60
|
+
"""
|
61
|
+
|
46
62
|
def __init__(self, domain: list[Fraction], render, fmt: dict):
|
47
63
|
self._domain = domain
|
48
64
|
self._render = render
|
49
65
|
self._fmt = fmt
|
50
66
|
|
51
67
|
def __repr__(self):
|
52
|
-
|
53
|
-
|
54
|
-
|
55
|
-
|
56
|
-
|
57
|
-
|
58
|
-
|
68
|
+
if len(self._domain) <= 20:
|
69
|
+
lines = []
|
70
|
+
for i, t in enumerate(self._domain):
|
71
|
+
frame_expr = self._render(t, i)
|
72
|
+
lines.append(
|
73
|
+
f"{t.numerator}/{t.denominator} => {frame_expr}",
|
74
|
+
)
|
75
|
+
return "\n".join(lines)
|
76
|
+
else:
|
77
|
+
lines = []
|
78
|
+
for i, t in enumerate(self._domain[:10]):
|
79
|
+
frame_expr = self._render(t, i)
|
80
|
+
lines.append(
|
81
|
+
f"{t.numerator}/{t.denominator} => {frame_expr}",
|
82
|
+
)
|
83
|
+
lines.append("...")
|
84
|
+
for i, t in enumerate(self._domain[-10:]):
|
85
|
+
frame_expr = self._render(t, i)
|
86
|
+
lines.append(
|
87
|
+
f"{t.numerator}/{t.denominator} => {frame_expr}",
|
88
|
+
)
|
89
|
+
return "\n".join(lines)
|
59
90
|
|
60
91
|
def _sources(self):
|
61
92
|
s = set()
|
@@ -351,12 +382,18 @@ class Loader:
|
|
351
382
|
|
352
383
|
|
353
384
|
class YrdenServer:
|
354
|
-
"""
|
385
|
+
"""
|
386
|
+
A connection to a Yrden server.
|
387
|
+
|
388
|
+
A yrden server is the main API for local use of vidformer.
|
389
|
+
"""
|
355
390
|
|
356
391
|
def __init__(self, domain=None, port=None, bin=None):
|
357
|
-
"""
|
392
|
+
"""
|
393
|
+
Connect to a Yrden server
|
358
394
|
|
359
|
-
Can either connect to an existing server, if domain and port are provided, or start a new server using the provided binary
|
395
|
+
Can either connect to an existing server, if domain and port are provided, or start a new server using the provided binary.
|
396
|
+
If no domain or binary is provided, the `VIDFORMER_BIN` environment variable is used.
|
360
397
|
"""
|
361
398
|
|
362
399
|
self._domain = domain
|
@@ -503,11 +540,13 @@ class SourceILoc:
|
|
503
540
|
|
504
541
|
def __getitem__(self, idx):
|
505
542
|
if type(idx) != int:
|
506
|
-
raise Exception("Source iloc index must be an integer")
|
543
|
+
raise Exception(f"Source iloc index must be an integer, got a {type(idx)}")
|
507
544
|
return SourceExpr(self._source, idx, True)
|
508
545
|
|
509
546
|
|
510
547
|
class Source:
|
548
|
+
"""A video source."""
|
549
|
+
|
511
550
|
def __init__(
|
512
551
|
self, server: YrdenServer, name: str, path: str, stream: int, service=None
|
513
552
|
):
|
@@ -610,6 +649,8 @@ def _json_arg(arg, skip_data_anot=False):
|
|
610
649
|
|
611
650
|
|
612
651
|
class Filter:
|
652
|
+
"""A video filter."""
|
653
|
+
|
613
654
|
def __init__(self, name: str, tl_func=None, **kwargs):
|
614
655
|
self._name = name
|
615
656
|
|
@@ -854,6 +895,10 @@ class UDF:
|
|
854
895
|
|
855
896
|
|
856
897
|
class UDFFrameType:
|
898
|
+
"""
|
899
|
+
Frame type for use in UDFs.
|
900
|
+
"""
|
901
|
+
|
857
902
|
def __init__(self, width: int, height: int, pix_fmt: str):
|
858
903
|
assert type(width) == int
|
859
904
|
assert type(height) == int
|
@@ -886,6 +931,8 @@ class UDFFrameType:
|
|
886
931
|
|
887
932
|
|
888
933
|
class UDFFrame:
|
934
|
+
"""A symbolic reference to a frame for use in UDFs."""
|
935
|
+
|
889
936
|
def __init__(self, data: np.ndarray, f_type: UDFFrameType):
|
890
937
|
assert type(data) == np.ndarray
|
891
938
|
assert type(f_type) == UDFFrameType
|
@@ -0,0 +1,36 @@
|
|
1
|
+
Metadata-Version: 2.1
|
2
|
+
Name: vidformer
|
3
|
+
Version: 0.6.2
|
4
|
+
Summary: A Python library for creating and viewing videos with vidformer.
|
5
|
+
Author-email: Dominik Winecki <dominikwinecki@gmail.com>
|
6
|
+
Requires-Python: >=3.8
|
7
|
+
Description-Content-Type: text/markdown
|
8
|
+
Classifier: Programming Language :: Python :: 3
|
9
|
+
Classifier: Operating System :: OS Independent
|
10
|
+
Requires-Dist: requests
|
11
|
+
Requires-Dist: msgpack
|
12
|
+
Requires-Dist: numpy
|
13
|
+
Project-URL: Documentation, https://ixlab.github.io/vidformer/vidformer-py/
|
14
|
+
Project-URL: Homepage, https://ixlab.github.io/vidformer/
|
15
|
+
Project-URL: Issues, https://github.com/ixlab/vidformer/issues
|
16
|
+
|
17
|
+
# vidformer-py
|
18
|
+
|
19
|
+
[](https://pypi.org/project/vidformer/)
|
20
|
+
[](https://github.com/ixlab/vidformer/blob/main/LICENSE)
|
21
|
+
|
22
|
+
vidformer-py is a Python 🐍 interface for [vidformer](https://github.com/ixlab/vidformer).
|
23
|
+
Our [getting started guide](https://ixlab.github.io/vidformer/getting-started.html) explains how to use it.
|
24
|
+
|
25
|
+
**Quick links:**
|
26
|
+
* [📦 PyPI](https://pypi.org/project/vidformer/)
|
27
|
+
* [📘 Documentation - vidformer-py](https://ixlab.github.io/vidformer/vidformer-py/)
|
28
|
+
* [📘 Documentation - vidformer.cv2](https://ixlab.github.io/vidformer/vidformer-py-cv2/)
|
29
|
+
* [🧑💻 Source Code](https://github.com/ixlab/vidformer/tree/main/vidformer-py/)
|
30
|
+
|
31
|
+
**Publish:**
|
32
|
+
```bash
|
33
|
+
export FLIT_USERNAME='__token__' FLIT_PASSWORD='<token>'
|
34
|
+
flit publish
|
35
|
+
```
|
36
|
+
|
@@ -0,0 +1,7 @@
|
|
1
|
+
vidformer/__init__.py,sha256=JnJ5T-Afn8dZXAkwmq38cFzrnK4aZZZSFhHdUqVLdno,113
|
2
|
+
vidformer/vf.py,sha256=0idKHOkWXC5bki1thrgihKQwJ91gDCzf96tl3Gm5vaA,31059
|
3
|
+
vidformer/cv2/__init__.py,sha256=wOjDsYyUKlP_Hye8-tyz-msu9xwaPMpN2sGMu3Lh3-w,22
|
4
|
+
vidformer/cv2/vf_cv2.py,sha256=CO9LZ7FwOhrzNJ1eFjd9JddZm0PjY-XtOlKIYGB3iQQ,15486
|
5
|
+
vidformer-0.6.2.dist-info/WHEEL,sha256=EZbGkh7Ie4PoZfRQ8I0ZuP9VklN_TvcZ6DSE5Uar4z4,81
|
6
|
+
vidformer-0.6.2.dist-info/METADATA,sha256=_jQnVbcd_OHAy57G7HXIijyXUOoR9Q8AzURvjn3ljDA,1487
|
7
|
+
vidformer-0.6.2.dist-info/RECORD,,
|
@@ -1,24 +0,0 @@
|
|
1
|
-
Metadata-Version: 2.1
|
2
|
-
Name: vidformer
|
3
|
-
Version: 0.6.0
|
4
|
-
Summary: A Python library for creating and viewing videos with vidformer.
|
5
|
-
Author-email: Dominik Winecki <dominikwinecki@gmail.com>
|
6
|
-
Requires-Python: >=3.8
|
7
|
-
Description-Content-Type: text/markdown
|
8
|
-
Classifier: Programming Language :: Python :: 3
|
9
|
-
Classifier: Operating System :: OS Independent
|
10
|
-
Requires-Dist: requests
|
11
|
-
Requires-Dist: msgpack
|
12
|
-
Requires-Dist: numpy
|
13
|
-
Project-URL: Homepage, https://ixlab.github.io/vidformer/
|
14
|
-
Project-URL: Issues, https://ixlab.github.io/vidformer/issues
|
15
|
-
|
16
|
-
# vidformer-py
|
17
|
-
|
18
|
-
## Publish
|
19
|
-
|
20
|
-
```bash
|
21
|
-
export FLIT_USERNAME='__token__' FLIT_PASSWORD='<token>'
|
22
|
-
flit publish
|
23
|
-
```
|
24
|
-
|
vidformer-0.6.0.dist-info/RECORD
DELETED
@@ -1,7 +0,0 @@
|
|
1
|
-
vidformer/__init__.py,sha256=MgZOCVL7wOxSseM5LArXHtW3FzPKLcmGzUaosUhSn6A,113
|
2
|
-
vidformer/vf.py,sha256=gexrp0PQ8cbkixCPLY9BCquHeHWfD6iUcA_wbSxGmFQ,29511
|
3
|
-
vidformer/cv2/__init__.py,sha256=wOjDsYyUKlP_Hye8-tyz-msu9xwaPMpN2sGMu3Lh3-w,22
|
4
|
-
vidformer/cv2/vf_cv2.py,sha256=C3b8OEGSHh7AFbtddybP_DwR7rvy34lCewt9JWcLTaM,6196
|
5
|
-
vidformer-0.6.0.dist-info/WHEEL,sha256=EZbGkh7Ie4PoZfRQ8I0ZuP9VklN_TvcZ6DSE5Uar4z4,81
|
6
|
-
vidformer-0.6.0.dist-info/METADATA,sha256=hK3hz1DM6NQfkmMz2L5Uc098wgCvPJzaHbpkZy-DDt8,643
|
7
|
-
vidformer-0.6.0.dist-info/RECORD,,
|
File without changes
|