vidformer 0.9.0__py3-none-any.whl → 0.10.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
vidformer/cv2/__init__.py CHANGED
@@ -1 +1,858 @@
1
- from .vf_cv2 import *
1
+ """
2
+ vidformer.cv2 is the cv2 frontend for [vidformer](https://github.com/ixlab/vidformer).
3
+
4
+ > ⚠️ This module is a work in progress. See the [implemented functions list](https://ixlab.github.io/vidformer/opencv-filters.html).
5
+
6
+ **Quick links:**
7
+ * [📦 PyPI](https://pypi.org/project/vidformer/)
8
+ * [📘 Documentation - vidformer-py](https://ixlab.github.io/vidformer/vidformer-py/pdoc/)
9
+ * [📘 Documentation - vidformer.cv2](https://ixlab.github.io/vidformer/vidformer-py/pdoc/vidformer/cv2.html)
10
+ * [📘 Documentation - vidformer.supervision](https://ixlab.github.io/vidformer/vidformer-py/pdoc/vidformer/supervision.html)
11
+ * [🧑‍💻 Source Code](https://github.com/ixlab/vidformer/tree/main/vidformer-py/)
12
+ """
13
+
14
+ import vidformer as vf
15
+
16
+ try:
17
+ import cv2 as _opencv2
18
+ except Exception:
19
+ _opencv2 = None
20
+
21
+ import numpy as np
22
+
23
+ import uuid
24
+ from fractions import Fraction
25
+ from bisect import bisect_right
26
+ import zlib
27
+ import re
28
+
29
+ CAP_PROP_POS_MSEC = 0
30
+ CAP_PROP_POS_FRAMES = 1
31
+ CAP_PROP_FRAME_WIDTH = 3
32
+ CAP_PROP_FRAME_HEIGHT = 4
33
+ CAP_PROP_FPS = 5
34
+ CAP_PROP_FRAME_COUNT = 7
35
+
36
+ FONT_HERSHEY_SIMPLEX = 0
37
+ FONT_HERSHEY_PLAIN = 1
38
+ FONT_HERSHEY_DUPLEX = 2
39
+ FONT_HERSHEY_COMPLEX = 3
40
+ FONT_HERSHEY_TRIPLEX = 4
41
+ FONT_HERSHEY_COMPLEX_SMALL = 5
42
+ FONT_HERSHEY_SCRIPT_SIMPLEX = 6
43
+ FONT_HERSHEY_SCRIPT_COMPLEX = 7
44
+ FONT_ITALIC = 16
45
+
46
+ FILLED = -1
47
+ LINE_4 = 4
48
+ LINE_8 = 8
49
+ LINE_AA = 16
50
+
51
+ _inline_mat = vf.Filter("_inline_mat")
52
+ _slice_mat = vf.Filter("_slice_mat")
53
+ _slice_write_mat = vf.Filter("_slice_write_mat")
54
+
55
+
56
+ _filter_scale = vf.Filter("Scale")
57
+ _filter_rectangle = vf.Filter("cv2.rectangle")
58
+ _filter_putText = vf.Filter("cv2.putText")
59
+ _filter_arrowedLine = vf.Filter("cv2.arrowedLine")
60
+ _filter_line = vf.Filter("cv2.line")
61
+ _filter_circle = vf.Filter("cv2.circle")
62
+ _filter_addWeighted = vf.Filter("cv2.addWeighted")
63
+ _filter_ellipse = vf.Filter("cv2.ellipse")
64
+
65
+
66
+ def _ts_to_fps(timestamps):
67
+ return int(1 / (timestamps[1] - timestamps[0])) # TODO: Fix for non-integer fps
68
+
69
+
70
+ def _fps_to_ts(fps, n_frames):
71
+ assert type(fps) is int
72
+ return [Fraction(i, fps) for i in range(n_frames)]
73
+
74
+
75
+ _global_cv2_server = None
76
+
77
+
78
+ def _server():
79
+ global _global_cv2_server
80
+ if _global_cv2_server is None:
81
+ _global_cv2_server = vf.YrdenServer()
82
+ return _global_cv2_server
83
+
84
+
85
+ def set_server(server):
86
+ """Set the server to use for the cv2 frontend."""
87
+ global _global_cv2_server
88
+ assert isinstance(server, vf.YrdenServer) or isinstance(server, vf.IgniServer)
89
+ _global_cv2_server = server
90
+
91
+
92
+ class Frame:
93
+ def __init__(self, f, fmt):
94
+ self._f = f
95
+ self._fmt = fmt
96
+ self.shape = (fmt["height"], fmt["width"], 3)
97
+
98
+ # denotes that the frame has not yet been modified
99
+ # when a frame is modified, it is converted to rgb24 first
100
+ self._modified = False
101
+
102
+ def _mut(self):
103
+ if self._modified:
104
+ assert self._fmt["pix_fmt"] == "rgb24"
105
+ return
106
+
107
+ self._modified = True
108
+ if self._fmt["pix_fmt"] != "rgb24":
109
+ self._f = _filter_scale(self._f, pix_fmt="rgb24")
110
+ self._fmt["pix_fmt"] = "rgb24"
111
+
112
+ def copy(self):
113
+ return Frame(self._f, self._fmt.copy())
114
+
115
+ def numpy(self):
116
+ """
117
+ Return the frame as a numpy array.
118
+ """
119
+
120
+ self._mut()
121
+ spec = vf.YrdenSpec([Fraction(0, 1)], lambda t, i: self._f, self._fmt)
122
+ loader = spec.load(_server())
123
+
124
+ frame_raster_rgb24 = loader[0]
125
+ assert type(frame_raster_rgb24) is bytes
126
+ assert len(frame_raster_rgb24) == self.shape[0] * self.shape[1] * 3
127
+ raw_data_array = np.frombuffer(frame_raster_rgb24, dtype=np.uint8)
128
+ frame = raw_data_array.reshape(self.shape)
129
+ frame = frame[:, :, ::-1] # convert RGB to BGR
130
+ return frame
131
+
132
+ def __getitem__(self, key):
133
+ if not isinstance(key, tuple):
134
+ raise NotImplementedError("Only 2D slicing is supported")
135
+
136
+ if len(key) != 2:
137
+ raise NotImplementedError("Only 2D slicing is supported")
138
+
139
+ if not all(isinstance(x, slice) for x in key):
140
+ raise NotImplementedError("Only 2D slicing is supported")
141
+
142
+ miny = key[0].start if key[0].start is not None else 0
143
+ maxy = key[0].stop if key[0].stop is not None else self.shape[0]
144
+ minx = key[1].start if key[1].start is not None else 0
145
+ maxx = key[1].stop if key[1].stop is not None else self.shape[1]
146
+
147
+ # handle negative indices
148
+ if miny < 0:
149
+ miny = self.shape[0] + miny
150
+ if maxy < 0:
151
+ maxy = self.shape[0] + maxy
152
+ if minx < 0:
153
+ minx = self.shape[1] + minx
154
+ if maxx < 0:
155
+ maxx = self.shape[1] + maxx
156
+
157
+ if (
158
+ maxy <= miny
159
+ or maxx <= minx
160
+ or miny < 0
161
+ or minx < 0
162
+ or maxy > self.shape[0]
163
+ or maxx > self.shape[1]
164
+ ):
165
+ raise NotImplementedError("Invalid slice")
166
+
167
+ f = _slice_mat(self._f, miny, maxy, minx, maxx)
168
+ fmt = self._fmt.copy()
169
+ fmt["width"] = maxx - minx
170
+ fmt["height"] = maxy - miny
171
+ return Frame(f, fmt)
172
+
173
+ def __setitem__(self, key, value):
174
+ value = frameify(value, "value")
175
+
176
+ if not isinstance(key, tuple):
177
+ raise NotImplementedError("Only 2D slicing is supported")
178
+
179
+ if len(key) != 2:
180
+ raise NotImplementedError("Only 2D slicing is supported")
181
+
182
+ if not all(isinstance(x, slice) for x in key):
183
+ raise NotImplementedError("Only 2D slicing is supported")
184
+
185
+ miny = key[0].start if key[0].start is not None else 0
186
+ maxy = key[0].stop if key[0].stop is not None else self.shape[0]
187
+ minx = key[1].start if key[1].start is not None else 0
188
+ maxx = key[1].stop if key[1].stop is not None else self.shape[1]
189
+
190
+ # handle negative indices
191
+ if miny < 0:
192
+ miny = self.shape[0] + miny
193
+ if maxy < 0:
194
+ maxy = self.shape[0] + maxy
195
+ if minx < 0:
196
+ minx = self.shape[1] + minx
197
+ if maxx < 0:
198
+ maxx = self.shape[1] + maxx
199
+
200
+ if (
201
+ maxy <= miny
202
+ or maxx <= minx
203
+ or miny < 0
204
+ or minx < 0
205
+ or maxy > self.shape[0]
206
+ or maxx > self.shape[1]
207
+ ):
208
+ raise NotImplementedError("Invalid slice")
209
+
210
+ if value.shape[0] != maxy - miny or value.shape[1] != maxx - minx:
211
+ raise NotImplementedError("Shape mismatch")
212
+
213
+ self._mut()
214
+ value._mut()
215
+
216
+ self._f = _slice_write_mat(self._f, value._f, miny, maxy, minx, maxx)
217
+
218
+
219
+ def _inline_frame(arr):
220
+ if arr.dtype != np.uint8:
221
+ raise Exception("Only uint8 arrays are supported")
222
+ if len(arr.shape) != 3:
223
+ raise Exception("Only 3D arrays are supported")
224
+ if arr.shape[2] != 3:
225
+ raise Exception("To inline a frame, the array must have 3 channels")
226
+
227
+ arr = arr[:, :, ::-1]
228
+ if not arr.flags["C_CONTIGUOUS"]:
229
+ arr = np.ascontiguousarray(arr)
230
+
231
+ width = arr.shape[1]
232
+ height = arr.shape[0]
233
+ pix_fmt = "rgb24"
234
+
235
+ data_gzip = zlib.compress(memoryview(arr), level=1)
236
+
237
+ f = _inline_mat(
238
+ data_gzip, width=width, height=height, pix_fmt=pix_fmt, compression="zlib"
239
+ )
240
+ fmt = {"width": width, "height": height, "pix_fmt": pix_fmt}
241
+
242
+ # Return the resulting Frame object
243
+ return Frame(f, fmt)
244
+
245
+
246
+ class VideoCapture:
247
+ def __init__(self, path: str):
248
+ server = _server()
249
+ if type(path) is str:
250
+ if isinstance(server, vf.YrdenServer):
251
+ self._path = path
252
+ self._source = vf.YrdenSource(server, str(uuid.uuid4()), path, 0)
253
+ else:
254
+ assert isinstance(server, vf.IgniServer)
255
+ match = re.match(r"(http|https)://([^/]+)(.*)", path)
256
+ if match is not None:
257
+ endpoint = f"{match.group(1)}://{match.group(2)}"
258
+ path = match.group(3)
259
+ if path.startswith("/"):
260
+ path = path[1:]
261
+ self._path = path
262
+ self._source = server.source(
263
+ path, 0, "http", {"endpoint": endpoint}
264
+ )
265
+ else:
266
+ raise Exception(
267
+ "Using a VideoCapture source by name only works with http(s) URLs. You need to pass an IgniSource instead."
268
+ )
269
+ elif isinstance(path, vf.IgniSource):
270
+ assert isinstance(server, vf.IgniServer)
271
+ self._path = path._name
272
+ self._source = path
273
+ self._next_frame_idx = 0
274
+
275
+ def isOpened(self) -> bool:
276
+ return True
277
+
278
+ def get(self, prop):
279
+ if prop == CAP_PROP_FPS:
280
+ return _ts_to_fps(self._source.ts())
281
+ elif prop == CAP_PROP_FRAME_WIDTH:
282
+ return self._source.fmt()["width"]
283
+ elif prop == CAP_PROP_FRAME_HEIGHT:
284
+ return self._source.fmt()["height"]
285
+ elif prop == CAP_PROP_FRAME_COUNT:
286
+ return len(self._source.ts())
287
+ elif prop == CAP_PROP_POS_FRAMES:
288
+ return self._next_frame_idx
289
+
290
+ raise Exception(f"Unknown property {prop}")
291
+
292
+ def set(self, prop, value):
293
+ if prop == CAP_PROP_POS_FRAMES:
294
+ assert value >= 0 and value < len(self._source.ts())
295
+ self._next_frame_idx = value
296
+ elif prop == CAP_PROP_POS_MSEC:
297
+ t = Fraction(value, 1000)
298
+ ts = self._source.ts()
299
+ next_frame_idx = bisect_right(ts, t)
300
+ self._next_frame_idx = next_frame_idx
301
+ else:
302
+ raise Exception(f"Unsupported property {prop}")
303
+
304
+ def read(self):
305
+ if self._next_frame_idx >= len(self._source):
306
+ return False, None
307
+ frame = self._source.iloc[self._next_frame_idx]
308
+ self._next_frame_idx += 1
309
+ frame = Frame(frame, self._source.fmt())
310
+ return True, frame
311
+
312
+ def release(self):
313
+ pass
314
+
315
+
316
+ class VideoWriter:
317
+ def __init__(self, *args, **kwargs):
318
+ server = _server()
319
+ if isinstance(server, vf.YrdenServer):
320
+ self._writer = _YrdenVideoWriter(*args, **kwargs)
321
+ elif isinstance(server, vf.IgniServer):
322
+ self._writer = _IgniVideoWriter(*args, **kwargs)
323
+ else:
324
+ raise Exception("Unsupported server type")
325
+
326
+ def write(self, *args, **kwargs):
327
+ return self._writer.write(*args, **kwargs)
328
+
329
+ def release(self, *args, **kwargs):
330
+ return self._writer.release(*args, **kwargs)
331
+
332
+ def spec(self, *args, **kwargs):
333
+ return self._writer.spec(*args, **kwargs)
334
+
335
+
336
+ class _IgniVideoWriter:
337
+ def __init__(
338
+ self,
339
+ path,
340
+ fourcc,
341
+ fps,
342
+ size,
343
+ batch_size=1024,
344
+ vod_segment_length=Fraction(2, 1),
345
+ ):
346
+ server = _server()
347
+ assert isinstance(server, vf.IgniServer)
348
+ if path is not None:
349
+ raise Exception(
350
+ "Igni does not support writing to a file. VideoWriter path must be None"
351
+ )
352
+ if isinstance(fps, int):
353
+ self._f_time = Fraction(1, fps)
354
+ elif isinstance(fps, Fraction):
355
+ self._f_time = 1 / fps
356
+ else:
357
+ raise Exception("fps must be an integer or a Fraction")
358
+
359
+ assert isinstance(size, tuple) or isinstance(size, list)
360
+ assert len(size) == 2
361
+ width, height = size
362
+ self._spec = server.create_spec(
363
+ width, height, "yuv420p", vod_segment_length, 1 / self._f_time
364
+ )
365
+ self._batch_size = batch_size
366
+ self._idx = 0
367
+ self._frame_buffer = []
368
+
369
+ def _flush(self, terminal=False):
370
+ server = _server()
371
+ server.push_spec_part(
372
+ self._spec,
373
+ self._idx - len(self._frame_buffer),
374
+ self._frame_buffer,
375
+ terminal=terminal,
376
+ )
377
+ self._frame_buffer = []
378
+
379
+ def _explicit_terminate(self):
380
+ server = _server()
381
+ server.push_spec_part(self._spec._id, self._idx, [], terminal=True)
382
+
383
+ def spec(self):
384
+ return self._spec
385
+
386
+ def write(self, frame):
387
+ if frame is not None:
388
+ frame = frameify(frame, "frame")
389
+ if frame._fmt["width"] != self._spec._fmt["width"]:
390
+ raise Exception(
391
+ f"Frame type error; expected width {self._spec._fmt['width']}, got {frame._fmt['width']}"
392
+ )
393
+ if frame._fmt["height"] != self._spec._fmt["height"]:
394
+ raise Exception(
395
+ f"Frame type error; expected height {self._spec._fmt['height']}, got {frame._fmt['height']}"
396
+ )
397
+ if frame._fmt["pix_fmt"] != self._spec._fmt["pix_fmt"]:
398
+ f_obj = _filter_scale(frame._f, pix_fmt=self._spec._fmt["pix_fmt"])
399
+ frame = Frame(f_obj, self._spec._fmt)
400
+ t = self._f_time * self._idx
401
+ self._frame_buffer.append((t, frame._f if frame is not None else None))
402
+ self._idx += 1
403
+
404
+ if len(self._frame_buffer) >= self._batch_size:
405
+ self._flush()
406
+
407
+ def release(self):
408
+ if len(self._frame_buffer) > 0:
409
+ self._flush(True)
410
+ else:
411
+ self._explicit_terminate()
412
+
413
+
414
+ class _YrdenVideoWriter:
415
+ def __init__(self, path, fourcc, fps, size):
416
+ assert isinstance(fourcc, VideoWriter_fourcc)
417
+ if path is not None and not isinstance(path, str):
418
+ raise Exception("path must be a string or None")
419
+ self._path = path
420
+ self._fourcc = fourcc
421
+ self._fps = fps
422
+ self._size = size
423
+
424
+ self._frames = []
425
+ self._pix_fmt = "yuv420p"
426
+
427
+ def write(self, frame):
428
+ frame = frameify(frame, "frame")
429
+
430
+ if frame._fmt["pix_fmt"] != self._pix_fmt:
431
+ f_obj = _filter_scale(frame._f, pix_fmt=self._pix_fmt)
432
+ self._frames.append(f_obj)
433
+ else:
434
+ self._frames.append(frame._f)
435
+
436
+ def release(self):
437
+ if self._path is None:
438
+ return
439
+
440
+ spec = self.spec()
441
+ server = _server()
442
+ spec.save(server, self._path)
443
+
444
+ def spec(self) -> vf.YrdenSpec:
445
+ fmt = {
446
+ "width": self._size[0],
447
+ "height": self._size[1],
448
+ "pix_fmt": self._pix_fmt,
449
+ }
450
+ domain = _fps_to_ts(self._fps, len(self._frames))
451
+ spec = vf.YrdenSpec(domain, lambda t, i: self._frames[i], fmt)
452
+ return spec
453
+
454
+
455
+ class VideoWriter_fourcc:
456
+ def __init__(self, *args):
457
+ self._args = args
458
+
459
+
460
+ def frameify(obj, field_name=None):
461
+ """
462
+ Turn an object (e.g., ndarray) into a Frame.
463
+ """
464
+
465
+ if isinstance(obj, Frame):
466
+ return obj
467
+ elif isinstance(obj, np.ndarray):
468
+ return _inline_frame(obj)
469
+ else:
470
+ if field_name is not None:
471
+ raise Exception(
472
+ f"Unsupported type for field {field_name}, expected Frame or np.ndarray"
473
+ )
474
+ else:
475
+ raise Exception("Unsupported type, expected Frame or np.ndarray")
476
+
477
+
478
+ def imread(path, *args):
479
+ if len(args) > 0:
480
+ raise NotImplementedError("imread does not support additional arguments")
481
+
482
+ assert path.lower().endswith((".jpg", ".jpeg", ".png"))
483
+ server = _server()
484
+ source = vf.YrdenSource(server, str(uuid.uuid4()), path, 0)
485
+ frame = Frame(source.iloc[0], source.fmt())
486
+ return frame
487
+
488
+
489
+ def imwrite(path, img, *args):
490
+ if len(args) > 0:
491
+ raise NotImplementedError("imwrite does not support additional arguments")
492
+
493
+ img = frameify(img)
494
+
495
+ fmt = img._fmt.copy()
496
+ width = fmt["width"]
497
+ height = fmt["height"]
498
+ f = img._f
499
+
500
+ domain = [Fraction(0, 1)]
501
+
502
+ if path.lower().endswith(".png"):
503
+ img._mut() # Make sure it's in rgb24
504
+ spec = vf.YrdenSpec(
505
+ domain,
506
+ lambda t, i: img._f,
507
+ {"width": width, "height": height, "pix_fmt": "rgb24"},
508
+ )
509
+ spec.save(_server(), path, encoder="png")
510
+ elif path.lower().endswith((".jpg", ".jpeg")):
511
+ if img._modified:
512
+ # it's rgb24, we need to convert to something jpeg can handle
513
+ f = _filter_scale(img._f, pix_fmt="yuv420p")
514
+ fmt["pix_fmt"] = "yuv420p"
515
+ else:
516
+ if fmt["pix_fmt"] not in ["yuvj420p", "yuvj422p", "yuvj444p"]:
517
+ f = _filter_scale(img._f, pix_fmt="yuvj420p")
518
+ fmt["pix_fmt"] = "yuvj420p"
519
+
520
+ spec = vf.YrdenSpec(domain, lambda t, i: f, fmt)
521
+ spec.save(_server(), path, encoder="mjpeg")
522
+ else:
523
+ raise Exception("Unsupported image format")
524
+
525
+
526
+ def vidplay(video, *args, **kwargs):
527
+ """
528
+ Play a vidformer video specification.
529
+
530
+ Args:
531
+ video: one of [vidformer.Spec, vidformer.Source, vidformer.cv2.VideoWriter]
532
+ """
533
+ if isinstance(video, vf.YrdenSpec):
534
+ return video.play(_server(), *args, **kwargs)
535
+ elif isinstance(video, vf.YrdenSource):
536
+ return video.play(_server(), *args, **kwargs)
537
+ elif isinstance(video, VideoWriter):
538
+ return vidplay(video._writer, *args, **kwargs)
539
+ elif isinstance(video, _YrdenVideoWriter):
540
+ return video.spec().play(_server(), *args, **kwargs)
541
+ elif isinstance(video, _IgniVideoWriter):
542
+ return video._spec.play(*args, **kwargs)
543
+ elif isinstance(video, vf.IgniSpec):
544
+ return video.play(*args, **kwargs)
545
+ else:
546
+ raise Exception("Unsupported video type to vidplay")
547
+
548
+
549
+ def rectangle(img, pt1, pt2, color, thickness=None, lineType=None, shift=None):
550
+ """
551
+ cv.rectangle( img, pt1, pt2, color[, thickness[, lineType[, shift]]] )
552
+ """
553
+
554
+ img = frameify(img)
555
+ img._mut()
556
+
557
+ assert len(pt1) == 2
558
+ assert len(pt2) == 2
559
+ pt1 = [int(x) for x in pt1]
560
+ pt2 = [int(x) for x in pt2]
561
+
562
+ assert len(color) == 3 or len(color) == 4
563
+ color = [float(x) for x in color]
564
+ if len(color) == 3:
565
+ color.append(255.0)
566
+
567
+ args = []
568
+ if thickness is not None:
569
+ assert isinstance(thickness, int)
570
+ args.append(thickness)
571
+ if lineType is not None:
572
+ assert isinstance(lineType, int)
573
+ assert thickness is not None
574
+ args.append(lineType)
575
+ if shift is not None:
576
+ assert isinstance(shift, int)
577
+ assert shift is not None
578
+ args.append(shift)
579
+
580
+ img._f = _filter_rectangle(img._f, pt1, pt2, color, *args)
581
+
582
+
583
+ def putText(
584
+ img,
585
+ text,
586
+ org,
587
+ fontFace,
588
+ fontScale,
589
+ color,
590
+ thickness=None,
591
+ lineType=None,
592
+ bottomLeftOrigin=None,
593
+ ):
594
+ """
595
+ cv.putText( img, text, org, fontFace, fontScale, color[, thickness[, lineType[, bottomLeftOrigin]]] )
596
+ """
597
+
598
+ img = frameify(img)
599
+ img._mut()
600
+
601
+ assert isinstance(text, str)
602
+
603
+ assert len(org) == 2
604
+ org = [int(x) for x in org]
605
+
606
+ assert isinstance(fontFace, int)
607
+ assert isinstance(fontScale, float) or isinstance(fontScale, int)
608
+ fontScale = float(fontScale)
609
+
610
+ assert len(color) == 3 or len(color) == 4
611
+ color = [float(x) for x in color]
612
+ if len(color) == 3:
613
+ color.append(255.0)
614
+
615
+ args = []
616
+ if thickness is not None:
617
+ assert isinstance(thickness, int)
618
+ args.append(thickness)
619
+ if lineType is not None:
620
+ assert isinstance(lineType, int)
621
+ assert thickness is not None
622
+ args.append(lineType)
623
+ if bottomLeftOrigin is not None:
624
+ assert isinstance(bottomLeftOrigin, bool)
625
+ assert lineType is not None
626
+ args.append(bottomLeftOrigin)
627
+
628
+ img._f = _filter_putText(img._f, text, org, fontFace, fontScale, color, *args)
629
+
630
+
631
+ def arrowedLine(
632
+ img, pt1, pt2, color, thickness=None, line_type=None, shift=None, tipLength=None
633
+ ):
634
+ """
635
+ cv.arrowedLine( img, pt1, pt2, color[, thickness[, line_type[, shift[, tipLength]]]] )
636
+ """
637
+ img = frameify(img)
638
+ img._mut()
639
+
640
+ assert len(pt1) == 2
641
+ assert len(pt2) == 2
642
+ assert all(isinstance(x, int) for x in pt1)
643
+ assert all(isinstance(x, int) for x in pt2)
644
+
645
+ assert len(color) == 3 or len(color) == 4
646
+ color = [float(x) for x in color]
647
+ if len(color) == 3:
648
+ color.append(255.0)
649
+
650
+ args = []
651
+ if thickness is not None:
652
+ assert isinstance(thickness, int)
653
+ args.append(thickness)
654
+ if line_type is not None:
655
+ assert isinstance(line_type, int)
656
+ assert thickness is not None
657
+ args.append(line_type)
658
+ if shift is not None:
659
+ assert isinstance(shift, int)
660
+ assert shift is not None
661
+ args.append(shift)
662
+ if tipLength is not None:
663
+ assert isinstance(tipLength, float)
664
+ assert shift is not None
665
+ args.append(tipLength)
666
+
667
+ img._f = _filter_arrowedLine(img._f, pt1, pt2, color, *args)
668
+
669
+
670
+ def line(img, pt1, pt2, color, thickness=None, lineType=None, shift=None):
671
+ img = frameify(img)
672
+ img._mut()
673
+
674
+ assert len(pt1) == 2
675
+ assert len(pt2) == 2
676
+ pt1 = [int(x) for x in pt1]
677
+ pt2 = [int(x) for x in pt2]
678
+
679
+ assert len(color) == 3 or len(color) == 4
680
+ color = [float(x) for x in color]
681
+ if len(color) == 3:
682
+ color.append(255.0)
683
+
684
+ args = []
685
+ if thickness is not None:
686
+ assert isinstance(thickness, int)
687
+ args.append(thickness)
688
+ if lineType is not None:
689
+ assert isinstance(lineType, int)
690
+ assert thickness is not None
691
+ args.append(lineType)
692
+ if shift is not None:
693
+ assert isinstance(shift, int)
694
+ assert shift is not None
695
+ args.append(shift)
696
+
697
+ img._f = _filter_line(img._f, pt1, pt2, color, *args)
698
+
699
+
700
+ def circle(img, center, radius, color, thickness=None, lineType=None, shift=None):
701
+ img = frameify(img)
702
+ img._mut()
703
+
704
+ assert len(center) == 2
705
+ center = [int(x) for x in center]
706
+
707
+ assert isinstance(radius, int)
708
+
709
+ assert len(color) == 3 or len(color) == 4
710
+ color = [float(x) for x in color]
711
+ if len(color) == 3:
712
+ color.append(255.0)
713
+
714
+ args = []
715
+ if thickness is not None:
716
+ assert isinstance(thickness, int)
717
+ args.append(thickness)
718
+ if lineType is not None:
719
+ assert isinstance(lineType, int)
720
+ assert thickness is not None
721
+ args.append(lineType)
722
+ if shift is not None:
723
+ assert isinstance(shift, int)
724
+ assert shift is not None
725
+ args.append(shift)
726
+
727
+ img._f = _filter_circle(img._f, center, radius, color, *args)
728
+
729
+
730
+ def getFontScaleFromHeight(*args, **kwargs):
731
+ """
732
+ cv.getFontScaleFromHeight( fontFace, pixelHeight[, thickness] )
733
+ """
734
+ if _opencv2 is None:
735
+ raise NotImplementedError("getFontScaleFromHeight requires the cv2 module")
736
+ return _opencv2.getFontScaleFromHeight(*args, **kwargs)
737
+
738
+
739
+ def getTextSize(*args, **kwargs):
740
+ """
741
+ cv.getTextSize( text, fontFace, fontScale, thickness )
742
+ """
743
+ if _opencv2 is None:
744
+ raise NotImplementedError("getTextSize requires the cv2 module")
745
+ return _opencv2.getTextSize(*args, **kwargs)
746
+
747
+
748
+ def addWeighted(src1, alpha, src2, beta, gamma, dst=None, dtype=-1):
749
+ """
750
+ cv.addWeighted( src1, alpha, src2, beta, gamma[, dst[, dtype]] ) -> dst
751
+ """
752
+ src1 = frameify(src1, "src1")
753
+ src2 = frameify(src2, "src2")
754
+ src1._mut()
755
+ src2._mut()
756
+
757
+ if dst is None:
758
+ dst = Frame(src1._f, src1._fmt.copy())
759
+ else:
760
+ assert isinstance(dst, Frame), "dst must be a Frame"
761
+ dst._mut()
762
+
763
+ assert isinstance(alpha, float) or isinstance(alpha, int)
764
+ assert isinstance(beta, float) or isinstance(beta, int)
765
+ assert isinstance(gamma, float) or isinstance(gamma, int)
766
+ alpha = float(alpha)
767
+ beta = float(beta)
768
+ gamma = float(gamma)
769
+
770
+ if dtype != -1:
771
+ raise Exception("addWeighted does not support the dtype argument")
772
+
773
+ dst._f = _filter_addWeighted(src1._f, alpha, src2._f, beta, gamma)
774
+ return dst
775
+
776
+
777
+ def ellipse(
778
+ img,
779
+ center,
780
+ axes,
781
+ angle,
782
+ startAngle,
783
+ endAngle,
784
+ color,
785
+ thickness=1,
786
+ lineType=LINE_8,
787
+ shift=0,
788
+ ):
789
+ img = frameify(img)
790
+ img._mut()
791
+
792
+ assert len(center) == 2
793
+ center = [int(x) for x in center]
794
+
795
+ assert len(axes) == 2
796
+ axes = [int(x) for x in axes]
797
+
798
+ assert isinstance(angle, float) or isinstance(angle, int)
799
+ assert isinstance(startAngle, float) or isinstance(startAngle, int)
800
+ assert isinstance(endAngle, float) or isinstance(endAngle, int)
801
+ angle = float(angle)
802
+ startAngle = float(startAngle)
803
+ endAngle = float(endAngle)
804
+
805
+ assert len(color) == 3 or len(color) == 4
806
+ color = [float(x) for x in color]
807
+ if len(color) == 3:
808
+ color.append(255.0)
809
+
810
+ assert isinstance(thickness, int)
811
+ assert isinstance(lineType, int)
812
+ assert isinstance(shift, int)
813
+
814
+ img._f = _filter_ellipse(
815
+ img._f,
816
+ center,
817
+ axes,
818
+ angle,
819
+ startAngle,
820
+ endAngle,
821
+ color,
822
+ thickness,
823
+ lineType,
824
+ shift,
825
+ )
826
+
827
+
828
+ # Stubs for unimplemented functions
829
+
830
+
831
+ def clipLine(*args, **kwargs):
832
+ raise NotImplementedError("clipLine is not yet implemented in the cv2 frontend")
833
+
834
+
835
+ def drawContours(*args, **kwargs):
836
+ raise NotImplementedError("drawContours is not yet implemented in the cv2 frontend")
837
+
838
+
839
+ def drawMarker(*args, **kwargs):
840
+ raise NotImplementedError("drawMarker is not yet implemented in the cv2 frontend")
841
+
842
+
843
+ def ellipse2Poly(*args, **kwargs):
844
+ raise NotImplementedError("ellipse2Poly is not yet implemented in the cv2 frontend")
845
+
846
+
847
+ def fillConvexPoly(*args, **kwargs):
848
+ raise NotImplementedError(
849
+ "fillConvexPoly is not yet implemented in the cv2 frontend"
850
+ )
851
+
852
+
853
+ def fillPoly(*args, **kwargs):
854
+ raise NotImplementedError("fillPoly is not yet implemented in the cv2 frontend")
855
+
856
+
857
+ def polylines(*args, **kwargs):
858
+ raise NotImplementedError("polylines is not yet implemented in the cv2 frontend")