vidformer 0.9.0__py3-none-any.whl → 0.10.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
vidformer/cv2/vf_cv2.py DELETED
@@ -1,810 +0,0 @@
1
- """
2
- vidformer.cv2 is the cv2 frontend for [vidformer](https://github.com/ixlab/vidformer).
3
-
4
- > ⚠️ This module is a work in progress. See the [implemented functions list](https://ixlab.github.io/vidformer/opencv-filters.html).
5
-
6
- **Quick links:**
7
- * [📦 PyPI](https://pypi.org/project/vidformer/)
8
- * [📘 Documentation - vidformer-py](https://ixlab.github.io/vidformer/vidformer-py/)
9
- * [📘 Documentation - vidformer.cv2](https://ixlab.github.io/vidformer/vidformer-py-cv2/)
10
- * [🧑‍💻 Source Code](https://github.com/ixlab/vidformer/tree/main/vidformer-py/)
11
- """
12
-
13
- from .. import vf
14
- from .. import igni
15
-
16
- try:
17
- import cv2 as _opencv2
18
- except:
19
- _opencv2 = None
20
-
21
- import numpy as np
22
-
23
- import uuid
24
- from fractions import Fraction
25
- from bisect import bisect_right
26
- import zlib
27
- import re
28
-
29
- CAP_PROP_POS_MSEC = 0
30
- CAP_PROP_POS_FRAMES = 1
31
- CAP_PROP_FRAME_WIDTH = 3
32
- CAP_PROP_FRAME_HEIGHT = 4
33
- CAP_PROP_FPS = 5
34
- CAP_PROP_FRAME_COUNT = 7
35
-
36
- FONT_HERSHEY_SIMPLEX = 0
37
- FONT_HERSHEY_PLAIN = 1
38
- FONT_HERSHEY_DUPLEX = 2
39
- FONT_HERSHEY_COMPLEX = 3
40
- FONT_HERSHEY_TRIPLEX = 4
41
- FONT_HERSHEY_COMPLEX_SMALL = 5
42
- FONT_HERSHEY_SCRIPT_SIMPLEX = 6
43
- FONT_HERSHEY_SCRIPT_COMPLEX = 7
44
- FONT_ITALIC = 16
45
-
46
- FILLED = -1
47
- LINE_4 = 4
48
- LINE_8 = 8
49
- LINE_AA = 16
50
-
51
- _inline_mat = vf.Filter("_inline_mat")
52
- _slice_mat = vf.Filter("_slice_mat")
53
- _slice_write_mat = vf.Filter("_slice_write_mat")
54
-
55
-
56
- _filter_scale = vf.Filter("Scale")
57
- _filter_rectangle = vf.Filter("cv2.rectangle")
58
- _filter_putText = vf.Filter("cv2.putText")
59
- _filter_arrowedLine = vf.Filter("cv2.arrowedLine")
60
- _filter_line = vf.Filter("cv2.line")
61
- _filter_circle = vf.Filter("cv2.circle")
62
- _filter_addWeighted = vf.Filter("cv2.addWeighted")
63
-
64
-
65
- def _ts_to_fps(timestamps):
66
- return int(1 / (timestamps[1] - timestamps[0])) # TODO: Fix for non-integer fps
67
-
68
-
69
- def _fps_to_ts(fps, n_frames):
70
- assert type(fps) == int
71
- return [Fraction(i, fps) for i in range(n_frames)]
72
-
73
-
74
- _global_cv2_server = None
75
-
76
-
77
- def _server():
78
- global _global_cv2_server
79
- if _global_cv2_server is None:
80
- _global_cv2_server = vf.YrdenServer()
81
- return _global_cv2_server
82
-
83
-
84
- def set_cv2_server(server):
85
- """Set the server to use for the cv2 frontend."""
86
- global _global_cv2_server
87
- assert isinstance(server, vf.YrdenServer) or isinstance(server, igni.IgniServer)
88
- _global_cv2_server = server
89
-
90
-
91
- class Frame:
92
- def __init__(self, f, fmt):
93
- self._f = f
94
- self._fmt = fmt
95
- self.shape = (fmt["height"], fmt["width"], 3)
96
-
97
- # denotes that the frame has not yet been modified
98
- # when a frame is modified, it is converted to rgb24 first
99
- self._modified = False
100
-
101
- def _mut(self):
102
- if self._modified:
103
- assert self._fmt["pix_fmt"] == "rgb24"
104
- return
105
-
106
- self._modified = True
107
- if self._fmt["pix_fmt"] != "rgb24":
108
- self._f = _filter_scale(self._f, pix_fmt="rgb24")
109
- self._fmt["pix_fmt"] = "rgb24"
110
-
111
- def numpy(self):
112
- """
113
- Return the frame as a numpy array.
114
- """
115
-
116
- self._mut()
117
- spec = vf.Spec([Fraction(0, 1)], lambda t, i: self._f, self._fmt)
118
- loader = spec.load(_server())
119
-
120
- frame_raster_rgb24 = loader[0]
121
- assert type(frame_raster_rgb24) == bytes
122
- assert len(frame_raster_rgb24) == self.shape[0] * self.shape[1] * 3
123
- raw_data_array = np.frombuffer(frame_raster_rgb24, dtype=np.uint8)
124
- frame = raw_data_array.reshape(self.shape)
125
- frame = frame[:, :, ::-1] # convert RGB to BGR
126
- return frame
127
-
128
- def __getitem__(self, key):
129
- if not isinstance(key, tuple):
130
- raise NotImplementedError("Only 2D slicing is supported")
131
-
132
- if len(key) != 2:
133
- raise NotImplementedError("Only 2D slicing is supported")
134
-
135
- if not all(isinstance(x, slice) for x in key):
136
- raise NotImplementedError("Only 2D slicing is supported")
137
-
138
- miny = key[0].start if key[0].start is not None else 0
139
- maxy = key[0].stop if key[0].stop is not None else self.shape[0]
140
- minx = key[1].start if key[1].start is not None else 0
141
- maxx = key[1].stop if key[1].stop is not None else self.shape[1]
142
-
143
- # handle negative indices
144
- if miny < 0:
145
- miny = self.shape[0] + miny
146
- if maxy < 0:
147
- maxy = self.shape[0] + maxy
148
- if minx < 0:
149
- minx = self.shape[1] + minx
150
- if maxx < 0:
151
- maxx = self.shape[1] + maxx
152
-
153
- if (
154
- maxy <= miny
155
- or maxx <= minx
156
- or miny < 0
157
- or minx < 0
158
- or maxy > self.shape[0]
159
- or maxx > self.shape[1]
160
- ):
161
- raise NotImplementedError("Invalid slice")
162
-
163
- f = _slice_mat(self._f, miny, maxy, minx, maxx)
164
- fmt = self._fmt.copy()
165
- fmt["width"] = maxx - minx
166
- fmt["height"] = maxy - miny
167
- return Frame(f, fmt)
168
-
169
- def __setitem__(self, key, value):
170
- value = frameify(value, "value")
171
-
172
- if not isinstance(key, tuple):
173
- raise NotImplementedError("Only 2D slicing is supported")
174
-
175
- if len(key) != 2:
176
- raise NotImplementedError("Only 2D slicing is supported")
177
-
178
- if not all(isinstance(x, slice) for x in key):
179
- raise NotImplementedError("Only 2D slicing is supported")
180
-
181
- miny = key[0].start if key[0].start is not None else 0
182
- maxy = key[0].stop if key[0].stop is not None else self.shape[0]
183
- minx = key[1].start if key[1].start is not None else 0
184
- maxx = key[1].stop if key[1].stop is not None else self.shape[1]
185
-
186
- # handle negative indices
187
- if miny < 0:
188
- miny = self.shape[0] + miny
189
- if maxy < 0:
190
- maxy = self.shape[0] + maxy
191
- if minx < 0:
192
- minx = self.shape[1] + minx
193
- if maxx < 0:
194
- maxx = self.shape[1] + maxx
195
-
196
- if (
197
- maxy <= miny
198
- or maxx <= minx
199
- or miny < 0
200
- or minx < 0
201
- or maxy > self.shape[0]
202
- or maxx > self.shape[1]
203
- ):
204
- raise NotImplementedError("Invalid slice")
205
-
206
- if value.shape[0] != maxy - miny or value.shape[1] != maxx - minx:
207
- raise NotImplementedError("Shape mismatch")
208
-
209
- self._mut()
210
- value._mut()
211
-
212
- self._f = _slice_write_mat(self._f, value._f, miny, maxy, minx, maxx)
213
-
214
-
215
- def _inline_frame(arr):
216
- if arr.dtype != np.uint8:
217
- raise Exception("Only uint8 arrays are supported")
218
- if len(arr.shape) != 3:
219
- raise Exception("Only 3D arrays are supported")
220
- if arr.shape[2] != 3:
221
- raise Exception("To inline a frame, the array must have 3 channels")
222
-
223
- arr = arr[:, :, ::-1]
224
- if not arr.flags["C_CONTIGUOUS"]:
225
- arr = np.ascontiguousarray(arr)
226
-
227
- width = arr.shape[1]
228
- height = arr.shape[0]
229
- pix_fmt = "rgb24"
230
-
231
- data_gzip = zlib.compress(memoryview(arr), level=1)
232
-
233
- f = _inline_mat(
234
- data_gzip, width=width, height=height, pix_fmt=pix_fmt, compression="zlib"
235
- )
236
- fmt = {"width": width, "height": height, "pix_fmt": pix_fmt}
237
-
238
- # Return the resulting Frame object
239
- return Frame(f, fmt)
240
-
241
-
242
- class VideoCapture:
243
- def __init__(self, path):
244
- server = _server()
245
- if type(path) == str:
246
- if isinstance(server, vf.YrdenServer):
247
- self._path = path
248
- self._source = vf.Source(server, str(uuid.uuid4()), path, 0)
249
- else:
250
- assert isinstance(server, igni.IgniServer)
251
- match = re.match(r"(http|https)://([^/]+)(.*)", path)
252
- if match is not None:
253
- endpoint = f"{match.group(1)}://{match.group(2)}"
254
- path = match.group(3)
255
- if path.startswith("/"):
256
- path = path[1:]
257
- self._path = path
258
- self._source = server.source(
259
- path, 0, "http", {"endpoint": endpoint}
260
- )
261
- else:
262
- raise Exception(
263
- "Using a VideoCapture source by name only works with http(s) URLs. You need to pass an IgniSource instead."
264
- )
265
- elif isinstance(path, igni.IgniSource):
266
- assert isinstance(server, igni.IgniServer)
267
- self._path = path._name
268
- self._source = path
269
- self._next_frame_idx = 0
270
-
271
- def isOpened(self):
272
- return True
273
-
274
- def get(self, prop):
275
- if prop == CAP_PROP_FPS:
276
- return _ts_to_fps(self._source.ts())
277
- elif prop == CAP_PROP_FRAME_WIDTH:
278
- return self._source.fmt()["width"]
279
- elif prop == CAP_PROP_FRAME_HEIGHT:
280
- return self._source.fmt()["height"]
281
- elif prop == CAP_PROP_FRAME_COUNT:
282
- return len(self._source.ts())
283
- elif prop == CAP_PROP_POS_FRAMES:
284
- return self._next_frame_idx
285
-
286
- raise Exception(f"Unknown property {prop}")
287
-
288
- def set(self, prop, value):
289
- if prop == CAP_PROP_POS_FRAMES:
290
- assert value >= 0 and value < len(self._source.ts())
291
- self._next_frame_idx = value
292
- elif prop == CAP_PROP_POS_MSEC:
293
- t = Fraction(value, 1000)
294
- ts = self._source.ts()
295
- next_frame_idx = bisect_right(ts, t)
296
- self._next_frame_idx = next_frame_idx
297
- else:
298
- raise Exception(f"Unsupported property {prop}")
299
-
300
- def read(self):
301
- if self._next_frame_idx >= len(self._source):
302
- return False, None
303
- frame = self._source.iloc[self._next_frame_idx]
304
- self._next_frame_idx += 1
305
- frame = Frame(frame, self._source.fmt())
306
- return True, frame
307
-
308
- def release(self):
309
- pass
310
-
311
-
312
- class VideoWriter:
313
- def __init__(self, *args, **kwargs):
314
- server = _server()
315
- if isinstance(server, vf.YrdenServer):
316
- self._writer = _YrdenVideoWriter(*args, **kwargs)
317
- elif isinstance(server, igni.IgniServer):
318
- self._writer = _IgniVideoWriter(*args, **kwargs)
319
- else:
320
- raise Exception("Unsupported server type")
321
-
322
- def spec(self):
323
- return self._writer.spec()
324
-
325
- def write(self, *args, **kwargs):
326
- return self._writer.write(*args, **kwargs)
327
-
328
- def release(self, *args, **kwargs):
329
- return self._writer.release(*args, **kwargs)
330
-
331
- def spec(self, *args, **kwargs):
332
- return self._writer.spec(*args, **kwargs)
333
-
334
-
335
- class _IgniVideoWriter:
336
- def __init__(
337
- self,
338
- path,
339
- _fourcc,
340
- fps,
341
- size,
342
- batch_size=1024,
343
- vod_segment_length=Fraction(2, 1),
344
- ):
345
- server = _server()
346
- assert isinstance(server, igni.IgniServer)
347
- if path is not None:
348
- raise Exception(
349
- "Igni does not support writing to a file. VideoWriter path must be None"
350
- )
351
- if isinstance(fps, int):
352
- self._f_time = Fraction(1, fps)
353
- elif isinstance(fps, Fraction):
354
- self._f_time = 1 / fps
355
- else:
356
- raise Exception("fps must be an integer or a Fraction")
357
-
358
- assert isinstance(size, tuple) or isinstance(size, list)
359
- assert len(size) == 2
360
- width, height = size
361
- self._spec = server.create_spec(
362
- width, height, "yuv420p", vod_segment_length, 1 / self._f_time
363
- )
364
- self._batch_size = batch_size
365
- self._idx = 0
366
- self._frame_buffer = []
367
-
368
- def _flush(self, terminal=False):
369
- server = _server()
370
- server.push_spec_part(
371
- self._spec,
372
- self._idx - len(self._frame_buffer),
373
- self._frame_buffer,
374
- terminal=terminal,
375
- )
376
- self._frame_buffer = []
377
-
378
- def _explicit_terminate(self):
379
- server = _server()
380
- server.push_spec_part(self._spec._id, self._idx, [], terminal=True)
381
-
382
- def spec(self):
383
- return self._spec
384
-
385
- def write(self, frame):
386
- if frame is not None:
387
- frame = frameify(frame, "frame")
388
- if frame._fmt["width"] != self._spec._fmt["width"]:
389
- raise Exception(
390
- f"Frame type error; expected width {self._spec._fmt['width']}, got {frame._fmt['width']}"
391
- )
392
- if frame._fmt["height"] != self._spec._fmt["height"]:
393
- raise Exception(
394
- f"Frame type error; expected height {self._spec._fmt['height']}, got {frame._fmt['height']}"
395
- )
396
- if frame._fmt["pix_fmt"] != self._spec._fmt["pix_fmt"]:
397
- f_obj = _filter_scale(frame._f, pix_fmt=self._spec._fmt["pix_fmt"])
398
- frame = Frame(f_obj, self._spec._fmt)
399
- t = self._f_time * self._idx
400
- self._frame_buffer.append((t, frame._f if frame is not None else None))
401
- self._idx += 1
402
-
403
- if len(self._frame_buffer) >= self._batch_size:
404
- self._flush()
405
-
406
- def release(self):
407
- if len(self._frame_buffer) > 0:
408
- self._flush(True)
409
- else:
410
- self._explicit_terminate()
411
-
412
-
413
- class _YrdenVideoWriter:
414
- def __init__(self, path, fourcc, fps, size):
415
- assert isinstance(fourcc, VideoWriter_fourcc)
416
- if path is not None and not isinstance(path, str):
417
- raise Exception("path must be a string or None")
418
- self._path = path
419
- self._fourcc = fourcc
420
- self._fps = fps
421
- self._size = size
422
-
423
- self._frames = []
424
- self._pix_fmt = "yuv420p"
425
-
426
- def write(self, frame):
427
- frame = frameify(frame, "frame")
428
-
429
- if frame._fmt["pix_fmt"] != self._pix_fmt:
430
- f_obj = _filter_scale(frame._f, pix_fmt=self._pix_fmt)
431
- self._frames.append(f_obj)
432
- else:
433
- self._frames.append(frame._f)
434
-
435
- def release(self):
436
- if self._path is None:
437
- return
438
-
439
- spec = self.spec()
440
- server = _server()
441
- spec.save(server, self._path)
442
-
443
- def spec(self) -> vf.Spec:
444
- fmt = {
445
- "width": self._size[0],
446
- "height": self._size[1],
447
- "pix_fmt": self._pix_fmt,
448
- }
449
- domain = _fps_to_ts(self._fps, len(self._frames))
450
- spec = vf.Spec(domain, lambda t, i: self._frames[i], fmt)
451
- return spec
452
-
453
-
454
- class VideoWriter_fourcc:
455
- def __init__(self, *args):
456
- self._args = args
457
-
458
-
459
- def frameify(obj, field_name=None):
460
- """
461
- Turn an object (e.g., ndarray) into a Frame.
462
- """
463
-
464
- if isinstance(obj, Frame):
465
- return obj
466
- elif isinstance(obj, np.ndarray):
467
- return _inline_frame(obj)
468
- else:
469
- if field_name is not None:
470
- raise Exception(
471
- f"Unsupported type for field {field_name}, expected Frame or np.ndarray"
472
- )
473
- else:
474
- raise Exception("Unsupported type, expected Frame or np.ndarray")
475
-
476
-
477
- def imread(path, *args):
478
- if len(args) > 0:
479
- raise NotImplementedError("imread does not support additional arguments")
480
-
481
- assert path.lower().endswith((".jpg", ".jpeg", ".png"))
482
- server = _server()
483
- source = vf.Source(server, str(uuid.uuid4()), path, 0)
484
- frame = Frame(source.iloc[0], source.fmt())
485
- return frame
486
-
487
-
488
- def imwrite(path, img, *args):
489
- if len(args) > 0:
490
- raise NotImplementedError("imwrite does not support additional arguments")
491
-
492
- img = frameify(img)
493
-
494
- fmt = img._fmt.copy()
495
- width = fmt["width"]
496
- height = fmt["height"]
497
- f = img._f
498
-
499
- domain = [Fraction(0, 1)]
500
-
501
- if path.lower().endswith(".png"):
502
- img._mut() # Make sure it's in rgb24
503
- spec = vf.Spec(
504
- domain,
505
- lambda t, i: img._f,
506
- {"width": width, "height": height, "pix_fmt": "rgb24"},
507
- )
508
- spec.save(_server(), path, encoder="png")
509
- elif path.lower().endswith((".jpg", ".jpeg")):
510
- if img._modified:
511
- # it's rgb24, we need to convert to something jpeg can handle
512
- f = _filter_scale(img._f, pix_fmt="yuv420p")
513
- fmt["pix_fmt"] = "yuv420p"
514
- else:
515
- if fmt["pix_fmt"] not in ["yuvj420p", "yuvj422p", "yuvj444p"]:
516
- f = _filter_scale(img._f, pix_fmt="yuvj420p")
517
- fmt["pix_fmt"] = "yuvj420p"
518
-
519
- spec = vf.Spec(domain, lambda t, i: f, fmt)
520
- spec.save(_server(), path, encoder="mjpeg")
521
- else:
522
- raise Exception("Unsupported image format")
523
-
524
-
525
- def vidplay(video, *args, **kwargs):
526
- """
527
- Play a vidformer video specification.
528
-
529
- Args:
530
- video: one of [vidformer.Spec, vidformer.Source, vidformer.cv2.VideoWriter]
531
- """
532
- if isinstance(video, vf.Spec):
533
- return video.play(_server(), *args, **kwargs)
534
- elif isinstance(video, vf.Source):
535
- return video.play(_server(), *args, **kwargs)
536
- elif isinstance(video, VideoWriter):
537
- return vidplay(video._writer, *args, **kwargs)
538
- elif isinstance(video, _YrdenVideoWriter):
539
- return video.spec().play(_server(), *args, **kwargs)
540
- elif isinstance(video, _IgniVideoWriter):
541
- return video._spec.play(*args, **kwargs)
542
- elif isinstance(video, igni.IgniSpec):
543
- return video.play(*args, **kwargs)
544
- else:
545
- raise Exception("Unsupported video type to vidplay")
546
-
547
-
548
- def rectangle(img, pt1, pt2, color, thickness=None, lineType=None, shift=None):
549
- """
550
- cv.rectangle( img, pt1, pt2, color[, thickness[, lineType[, shift]]] )
551
- """
552
-
553
- img = frameify(img)
554
- img._mut()
555
-
556
- assert len(pt1) == 2
557
- assert len(pt2) == 2
558
- assert all(isinstance(x, int) for x in pt1)
559
- assert all(isinstance(x, int) for x in pt2)
560
-
561
- assert len(color) == 3 or len(color) == 4
562
- color = [float(x) for x in color]
563
- if len(color) == 3:
564
- color.append(255.0)
565
-
566
- args = []
567
- if thickness is not None:
568
- assert isinstance(thickness, int)
569
- args.append(thickness)
570
- if lineType is not None:
571
- assert isinstance(lineType, int)
572
- assert thickness is not None
573
- args.append(lineType)
574
- if shift is not None:
575
- assert isinstance(shift, int)
576
- assert shift is not None
577
- args.append(shift)
578
-
579
- img._f = _filter_rectangle(img._f, pt1, pt2, color, *args)
580
-
581
-
582
- def putText(
583
- img,
584
- text,
585
- org,
586
- fontFace,
587
- fontScale,
588
- color,
589
- thickness=None,
590
- lineType=None,
591
- bottomLeftOrigin=None,
592
- ):
593
- """
594
- cv.putText( img, text, org, fontFace, fontScale, color[, thickness[, lineType[, bottomLeftOrigin]]] )
595
- """
596
-
597
- img = frameify(img)
598
- img._mut()
599
-
600
- assert isinstance(text, str)
601
-
602
- assert len(org) == 2
603
- assert all(isinstance(x, int) for x in org)
604
-
605
- assert isinstance(fontFace, int)
606
- assert isinstance(fontScale, float) or isinstance(fontScale, int)
607
- fontScale = float(fontScale)
608
-
609
- assert len(color) == 3 or len(color) == 4
610
- color = [float(x) for x in color]
611
- if len(color) == 3:
612
- color.append(255.0)
613
-
614
- args = []
615
- if thickness is not None:
616
- assert isinstance(thickness, int)
617
- args.append(thickness)
618
- if lineType is not None:
619
- assert isinstance(lineType, int)
620
- assert thickness is not None
621
- args.append(lineType)
622
- if bottomLeftOrigin is not None:
623
- assert isinstance(bottomLeftOrigin, bool)
624
- assert lineType is not None
625
- args.append(bottomLeftOrigin)
626
-
627
- img._f = _filter_putText(img._f, text, org, fontFace, fontScale, color, *args)
628
-
629
-
630
- def arrowedLine(
631
- img, pt1, pt2, color, thickness=None, line_type=None, shift=None, tipLength=None
632
- ):
633
- """
634
- cv.arrowedLine( img, pt1, pt2, color[, thickness[, line_type[, shift[, tipLength]]]] )
635
- """
636
- img = frameify(img)
637
- img._mut()
638
-
639
- assert len(pt1) == 2
640
- assert len(pt2) == 2
641
- assert all(isinstance(x, int) for x in pt1)
642
- assert all(isinstance(x, int) for x in pt2)
643
-
644
- assert len(color) == 3 or len(color) == 4
645
- color = [float(x) for x in color]
646
- if len(color) == 3:
647
- color.append(255.0)
648
-
649
- args = []
650
- if thickness is not None:
651
- assert isinstance(thickness, int)
652
- args.append(thickness)
653
- if line_type is not None:
654
- assert isinstance(line_type, int)
655
- assert thickness is not None
656
- args.append(line_type)
657
- if shift is not None:
658
- assert isinstance(shift, int)
659
- assert shift is not None
660
- args.append(shift)
661
- if tipLength is not None:
662
- assert isinstance(tipLength, float)
663
- assert shift is not None
664
- args.append(tipLength)
665
-
666
- img._f = _filter_arrowedLine(img._f, pt1, pt2, color, *args)
667
-
668
-
669
- def line(img, pt1, pt2, color, thickness=None, lineType=None, shift=None):
670
- img = frameify(img)
671
- img._mut()
672
-
673
- assert len(pt1) == 2
674
- assert len(pt2) == 2
675
- assert all(isinstance(x, int) for x in pt1)
676
- assert all(isinstance(x, int) for x in pt2)
677
-
678
- assert len(color) == 3 or len(color) == 4
679
- color = [float(x) for x in color]
680
- if len(color) == 3:
681
- color.append(255.0)
682
-
683
- args = []
684
- if thickness is not None:
685
- assert isinstance(thickness, int)
686
- args.append(thickness)
687
- if lineType is not None:
688
- assert isinstance(lineType, int)
689
- assert thickness is not None
690
- args.append(lineType)
691
- if shift is not None:
692
- assert isinstance(shift, int)
693
- assert shift is not None
694
- args.append(shift)
695
-
696
- img._f = _filter_line(img._f, pt1, pt2, color, *args)
697
-
698
-
699
- def circle(img, center, radius, color, thickness=None, lineType=None, shift=None):
700
- img = frameify(img)
701
- img._mut()
702
-
703
- assert len(center) == 2
704
- assert all(isinstance(x, int) for x in center)
705
-
706
- assert isinstance(radius, int)
707
-
708
- assert len(color) == 3 or len(color) == 4
709
- color = [float(x) for x in color]
710
- if len(color) == 3:
711
- color.append(255.0)
712
-
713
- args = []
714
- if thickness is not None:
715
- assert isinstance(thickness, int)
716
- args.append(thickness)
717
- if lineType is not None:
718
- assert isinstance(lineType, int)
719
- assert thickness is not None
720
- args.append(lineType)
721
- if shift is not None:
722
- assert isinstance(shift, int)
723
- assert shift is not None
724
- args.append(shift)
725
-
726
- img._f = _filter_circle(img._f, center, radius, color, *args)
727
-
728
-
729
- def getFontScaleFromHeight(*args, **kwargs):
730
- """
731
- cv.getFontScaleFromHeight( fontFace, pixelHeight[, thickness] )
732
- """
733
- if _opencv2 is None:
734
- raise NotImplementedError("getFontScaleFromHeight requires the cv2 module")
735
- return _opencv2.getFontScaleFromHeight(*args, **kwargs)
736
-
737
-
738
- def getTextSize(*args, **kwargs):
739
- """
740
- cv.getTextSize( text, fontFace, fontScale, thickness )
741
- """
742
- if _opencv2 is None:
743
- raise NotImplementedError("getTextSize requires the cv2 module")
744
- return _opencv2.getTextSize(*args, **kwargs)
745
-
746
-
747
- def addWeighted(src1, alpha, src2, beta, gamma, dst=None, dtype=-1):
748
- """
749
- cv.addWeighted( src1, alpha, src2, beta, gamma[, dst[, dtype]] ) -> dst
750
- """
751
- src1 = frameify(src1, "src1")
752
- src2 = frameify(src2, "src2")
753
- src1._mut()
754
- src2._mut()
755
-
756
- if dst is None:
757
- dst = Frame(src1._f, src1._fmt.copy())
758
- else:
759
- assert isinstance(dst, Frame), "dst must be a Frame"
760
- dst._mut()
761
-
762
- assert isinstance(alpha, float) or isinstance(alpha, int)
763
- assert isinstance(beta, float) or isinstance(beta, int)
764
- assert isinstance(gamma, float) or isinstance(gamma, int)
765
- alpha = float(alpha)
766
- beta = float(beta)
767
- gamma = float(gamma)
768
-
769
- if dtype != -1:
770
- raise Exception("addWeighted does not support the dtype argument")
771
-
772
- dst._f = _filter_addWeighted(src1._f, alpha, src2._f, beta, gamma)
773
- return dst
774
-
775
-
776
- # Stubs for unimplemented functions
777
-
778
-
779
- def clipLine(*args, **kwargs):
780
- raise NotImplementedError("clipLine is not yet implemented in the cv2 frontend")
781
-
782
-
783
- def drawContours(*args, **kwargs):
784
- raise NotImplementedError("drawContours is not yet implemented in the cv2 frontend")
785
-
786
-
787
- def drawMarker(*args, **kwargs):
788
- raise NotImplementedError("drawMarker is not yet implemented in the cv2 frontend")
789
-
790
-
791
- def ellipse(*args, **kwargs):
792
- raise NotImplementedError("ellipse is not yet implemented in the cv2 frontend")
793
-
794
-
795
- def ellipse2Poly(*args, **kwargs):
796
- raise NotImplementedError("ellipse2Poly is not yet implemented in the cv2 frontend")
797
-
798
-
799
- def fillConvexPoly(*args, **kwargs):
800
- raise NotImplementedError(
801
- "fillConvexPoly is not yet implemented in the cv2 frontend"
802
- )
803
-
804
-
805
- def fillPoly(*args, **kwargs):
806
- raise NotImplementedError("fillPoly is not yet implemented in the cv2 frontend")
807
-
808
-
809
- def polylines(*args, **kwargs):
810
- raise NotImplementedError("polylines is not yet implemented in the cv2 frontend")