yta-video-opengl 0.0.12__py3-none-any.whl → 0.0.14__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
yta_video_opengl/t.py ADDED
@@ -0,0 +1,233 @@
1
+ """
2
+ This is an example of what a video has:
3
+ - fps = 60
4
+ - time_base = 1 / 15360
5
+ - tick = fps * time_base = 256
6
+
7
+ So, the first pts is 0 and the second
8
+ one is 256. The frame 16 will be 3840,
9
+ that is 256 * 15 (because first index
10
+ is 0).
11
+ """
12
+ from yta_validation.parameter import ParameterValidator
13
+ from yta_validation import PythonValidator
14
+ from yta_validation.number import NumberValidator
15
+ from quicktions import Fraction
16
+ from typing import Union
17
+
18
+
19
+ class T:
20
+ """
21
+ Class to simplify the way we work with a
22
+ 't' time moment but using the fractions
23
+ library to be precise and avoid any issue
24
+ related with commas.
25
+
26
+ This class must be used when trying to
27
+ apply a specific 't' time moment for a
28
+ video or audio frame, using the fps or
29
+ sample rate as time_base to be precise.
30
+ """
31
+
32
+ @property
33
+ def truncated(
34
+ self
35
+ ) -> Fraction:
36
+ """
37
+ The 't' but as a Fraction that is multiple
38
+ of the given 'time_base' and truncated.
39
+ """
40
+ return round_t(self._t, self.time_base)
41
+
42
+ @property
43
+ def rounded(
44
+ self
45
+ ) -> Fraction:
46
+ """
47
+ The 't' but as a Fraction that is multiple
48
+ of the given 'time_base' and rounded (the
49
+ value could be the same as truncated if it
50
+ is closer to the previous value).
51
+ """
52
+ return round_t(self._t, self.time_base, do_truncate = False)
53
+
54
+ @property
55
+ def truncated_pts(
56
+ self
57
+ ) -> int:
58
+ """
59
+ The 'truncated' value but as a pts, which
60
+ is the int value to be set in audio and
61
+ video frames in the pyav library to be
62
+ displayed in that moment.
63
+ """
64
+ return int(self.truncated / self.time_base)
65
+
66
+ @property
67
+ def rounded_pts(
68
+ self
69
+ ) -> int:
70
+ """
71
+ The 'rounded' value but as a pts, which
72
+ is the int value to be set in audio and
73
+ video frames in the pyav library to be
74
+ displayed in that moment.
75
+ """
76
+ return int(self.rounded / self.time_base)
77
+
78
+ def __init__(
79
+ self,
80
+ t: Union[int, float, Fraction],
81
+ time_base: Fraction
82
+ ):
83
+ ParameterValidator.validate_mandatory_instance_of('t', t, [int, float, 'Fraction'])
84
+ ParameterValidator.validate_mandatory_instance_of('time_base', time_base, 'Fraction')
85
+
86
+ self._t: Union[int, float, Fraction] = t
87
+ """
88
+ The 't' time moment as it was passed as
89
+ parameter.
90
+ """
91
+ self.time_base: Fraction = time_base
92
+ """
93
+ The time_base that will used to round the
94
+ values to be multiples of it.
95
+ """
96
+
97
+ def next(
98
+ self,
99
+ n: int = 1
100
+ ) -> 'T':
101
+ """
102
+ Get the value that is 'n' times ahead of
103
+ the 'truncated' property of this instance.
104
+
105
+ Useful when you need the next value for a
106
+ range in an iteration or similar.
107
+ """
108
+ return T(self.truncated + n * self.time_base, self.time_base)
109
+
110
+ def previous(
111
+ self,
112
+ n: int = 1
113
+ ) -> 'T':
114
+ """
115
+ Get the value that is 'n' times before the
116
+ 'truncated' property of this instance.
117
+
118
+ Useful when you need the previous value to
119
+ check if the current is the next one or
120
+ similar.
121
+
122
+ Be careful, if the 'truncated' value is 0
123
+ this will give you an unexpected negative
124
+ value.
125
+ """
126
+ return T(self.truncated - n * self.time_base, self.time_base)
127
+
128
+ @staticmethod
129
+ def from_fps(
130
+ t: Union[int, float, Fraction],
131
+ fps: Union[int, float, Fraction]
132
+ ) -> 'T':
133
+ """
134
+ Get the instance but providing the 'fps'
135
+ (or sample rate) value directly, that will
136
+ be turned into a time base.
137
+ """
138
+ return T(t, fps_to_time_base(fps))
139
+
140
+ @staticmethod
141
+ def from_pts(
142
+ pts: int,
143
+ time_base: Fraction
144
+ ) -> 'T':
145
+ """
146
+ Get the instance but providing the 'pts'
147
+ and the 'time_base'.
148
+ """
149
+ return T(pts * time_base, time_base)
150
+
151
+
152
+ # TODO: Careful with this below
153
+ """
154
+ To obtain the pts step, or frame duration in
155
+ ticks, you need to apply 2 formulas that are
156
+ different according to if the frame is video
157
+ or audio:
158
+ - Audio: .samples
159
+ - Video: int(round((1 / .fps) / .time_base))
160
+ """
161
+
162
+ def get_ts(
163
+ start: Union[int, float, Fraction],
164
+ end: Union[int, float, Fraction],
165
+ fps: Fraction
166
+ ) -> list[Fraction]:
167
+ """
168
+ Get all the 't' time moments between the given
169
+ 'start' and the given 'end', using the provided
170
+ 'time_base' for precision.
171
+
172
+ The 'end' is not included, we return a range
173
+ [start, end) because the last frame is the
174
+ start of another time range.
175
+ """
176
+ start = T.from_fps(start, fps).truncated
177
+ end = T.from_fps(end, fps).truncated
178
+
179
+ time_base = fps_to_time_base(fps)
180
+ return [
181
+ start + i * time_base
182
+ for i in range((end - start) // time_base)
183
+ ]
184
+
185
+ def round_t(
186
+ t: Union[int, float, Fraction],
187
+ time_base = Fraction(1, 60),
188
+ do_truncate: bool = True
189
+ ):
190
+ """
191
+ Round the given 't' time moment to the most
192
+ near multiple of the given 'time_base' (or
193
+ the previous one if 'do_truncate' is True)
194
+ using fractions module to be precise.
195
+
196
+ This method is very useful to truncate 't'
197
+ time moments in order to get the frames or
198
+ samples for the specific and exact time
199
+ moments according to their fps or sample
200
+ rate (that should be passed as the
201
+ 'time_base' parameter).
202
+
203
+ Examples below, with `time_base = 1/5`:
204
+ - `t = 0.25` => `0.2` (truncated or rounded)
205
+ - `t = 0.35` => `0.2` (truncated)
206
+ - `t = 0.45` => `0.4` (truncated or rounded)
207
+ - `t = 0.55` => `0.6` (rounded)
208
+ """
209
+ t = Fraction(t).limit_denominator()
210
+ steps = t / time_base
211
+
212
+ snapped_steps = (
213
+ steps.numerator // steps.denominator
214
+ if do_truncate else
215
+ round(steps) # round(float(steps))
216
+ )
217
+
218
+ return snapped_steps * time_base
219
+
220
+ def fps_to_time_base(
221
+ fps: Union[int, float, Fraction]
222
+ ) -> Fraction:
223
+ """
224
+ Get the pyav time base from the given
225
+ 'fps'.
226
+ """
227
+ return (
228
+ Fraction(1, fps)
229
+ if NumberValidator.is_int(fps) else
230
+ Fraction(1, 1) / fps
231
+ if PythonValidator.is_instance_of(fps, 'Fraction') else
232
+ Fraction(1, 1) / Fraction.from_float(fps).limit_denominator(1000000) # if float
233
+ )
yta_video_opengl/tests.py CHANGED
@@ -586,10 +586,12 @@ def video_modified_stored():
586
586
 
587
587
  video = Video(VIDEO_PATH, 0.25, 0.75)
588
588
  timeline = Timeline()
589
- timeline.add_video(Video(VIDEO_PATH, 0.25, 0.75), 0.5)
589
+ timeline.add_video(Video(VIDEO_PATH, 0.25, 1.0), 0.5)
590
590
  # This is successfully raising an exception
591
591
  #timeline.add_video(Video(VIDEO_PATH, 0.25, 0.75), 0.6)
592
- timeline.add_video(Video(VIDEO_PATH, 0.25, 0.75), 1.5)
592
+ timeline.add_video(Video(VIDEO_PATH, 0.25, 0.75), 1.75)
593
+ timeline.add_video(Video('C:/Users/dania/Downloads/Y2meta.app-TOP 12 SIMPLE LIQUID TRANSITION _ GREEN SCREEN TRANSITION PACK-(1080p60).mp4', 4.0, 5.0), 3)
594
+ # timeline.add_video(Video('C:/Users/dania/Downloads/Y2meta.app-10 Smooth Transitions Green Screen Template For Kinemaster, Alight Motion, Filmora, premiere pro-(1080p).mp4', 2.25, 3.0), 3)
593
595
  timeline.render(OUTPUT_PATH)
594
596
 
595
597
  return
yta_video_opengl/utils.py CHANGED
@@ -1,10 +1,11 @@
1
+ from yta_video_opengl.t import fps_to_time_base
1
2
  from yta_validation import PythonValidator
2
3
  from av.container import InputContainer
3
4
  from av.video.stream import VideoStream
4
5
  from av.audio.stream import AudioStream
5
6
  from av.video.frame import VideoFrame
7
+ from quicktions import Fraction
6
8
  from typing import Union
7
- from fractions import Fraction
8
9
 
9
10
  import av
10
11
  import numpy as np
@@ -311,9 +312,13 @@ def iterate_stream_frames_demuxing(
311
312
  pts_to_index(frame.pts, time_base, average_rate)
312
313
  )
313
314
 
315
+ # TODO: These methods below have to be
316
+ # removed because we created the new T
317
+ # class that is working with Fractions
318
+ # to be precise
314
319
  def t_to_pts(
315
- t: float,
316
- stream_time_base: 'Fraction'
320
+ t: Union[int, float, Fraction],
321
+ stream_time_base: Fraction
317
322
  ) -> int:
318
323
  """
319
324
  Transform a 't' time moment (in seconds) to
@@ -324,8 +329,8 @@ def t_to_pts(
324
329
 
325
330
  def pts_to_index(
326
331
  pts: int,
327
- stream_time_base: 'Fraction',
328
- fps: float
332
+ stream_time_base: Fraction,
333
+ fps: Union[float, Fraction]
329
334
  ) -> int:
330
335
  """
331
336
  Transform a 'pts' packet timestamp to a
@@ -335,8 +340,8 @@ def pts_to_index(
335
340
 
336
341
  def index_to_pts(
337
342
  index: int,
338
- stream_time_base: 'Fraction',
339
- fps: float
343
+ stream_time_base: Fraction,
344
+ fps: Union[float, Fraction]
340
345
  ) -> int:
341
346
  """
342
347
  Transform a frame index into a 'pts' packet
@@ -346,7 +351,7 @@ def index_to_pts(
346
351
 
347
352
  def pts_to_t(
348
353
  pts: int,
349
- stream_time_base: 'Fraction'
354
+ stream_time_base: Fraction
350
355
  ) -> float:
351
356
  """
352
357
  Transform a 'pts' packet timestamp to a 't'
@@ -357,59 +362,82 @@ def pts_to_t(
357
362
 
358
363
  # TODO: Move this to another utils
359
364
  def get_silent_audio_frame(
360
- sample_rate,
361
- layout="stereo",
362
- nb_samples=1024,
363
- format="s16"
365
+ sample_rate: int,
366
+ layout = 'stereo',
367
+ number_of_samples: int = 1024,
368
+ format = 's16'
364
369
  ):
365
- # Número de canales
366
- channels = len(av.AudioLayout(layout).channels)
367
-
368
- # Creamos un array de ceros (silencio)
369
- # dtype depende del formato
370
- # if format in ('s16', 's16p'):
371
- # dtype = np.int16
372
- # elif format in ('flt', 'fltp'):
373
- # dtype = np.float32
374
- # else:
375
- # raise ValueError(f"Formato no soportado: {format}")
376
-
377
- if format == "s16":
378
- dtype = np.int16
379
- elif format in ('flt', 'fltp'):
380
- dtype = np.float32
381
- else:
382
- raise ValueError(f"Formato no soportado: {format}")
383
-
384
- # Para formatos packed (1, samples * channels)
385
-
386
- if layout == 'stereo':
387
- silent_array = np.zeros((2, nb_samples), dtype = dtype)
388
- else:
389
- silent_array = np.zeros((1, nb_samples), dtype = dtype)
390
-
391
- # # Si es planar: (channels, samples) | Si es packed: (samples, channels)
392
- # if format.endswith("p"): # planar
393
- # silent_array = np.zeros((channels, nb_samples), dtype=dtype)
394
- # else: # packed
395
- # silent_array = np.zeros((nb_samples, channels), dtype=dtype)
396
-
397
- # Crear frame de audio
398
- frame = av.AudioFrame.from_ndarray(silent_array, format=format, layout=layout)
370
+ # TODO: This could be a utils or something to
371
+ # directly transform format into dtype
372
+ dtype = {
373
+ 's16': np.int16,
374
+ 'flt': np.float32,
375
+ 'fltp': np.float32
376
+ }.get(format, None)
377
+
378
+ if dtype is None:
379
+ raise Exception(f'The format "{format}" is not accepted.')
380
+
381
+ number_of_channels = len(av.AudioLayout(layout).channels)
382
+ # TODO: I think the option above is better
383
+ # number_of_channels = (
384
+ # 2
385
+ # if layout == 'stereo' else
386
+ # 1
387
+ # )
388
+
389
+ # For packed (or planar) formats we apply:
390
+ # (1, samples * channels). This is the same
391
+ # amount of data but planar, in 1D only
392
+ # TODO: This wasn't in the previous version
393
+ # and it was working, we were sending the
394
+ # same 'number_of_samples' even when 'fltp'
395
+ # that includes the 'p'
396
+ # TODO: This is making the audio last 2x
397
+ # if 'p' in format:
398
+ # number_of_samples *= number_of_channels
399
+
400
+ silent_array = np.zeros((number_of_channels, number_of_samples), dtype = dtype)
401
+ frame = av.AudioFrame.from_ndarray(silent_array, format = format, layout = layout)
399
402
  frame.sample_rate = sample_rate
400
403
 
401
404
  return frame
402
405
 
403
406
  def get_black_background_video_frame(
404
407
  size: tuple[int, int] = (1920, 1080),
405
- format: str = 'rgb24'
408
+ format: str = 'rgb24',
409
+ pts: Union[int, None] = None,
410
+ time_base: Union[Fraction, None] = None
406
411
  ):
407
- return av.VideoFrame.from_ndarray(
412
+ """
413
+ Get a pyav VideoFrame that is a completely black
414
+ frame. If the 'pts' and/or 'time_base' parameters
415
+ are provided, they will be set to the frame that
416
+ is returned with them. If not, remember to set
417
+ later because they are needed to be sent to the
418
+ pyav muxer.
419
+ """
420
+ frame = av.VideoFrame.from_ndarray(
408
421
  # TODO: What if we want alpha (?)
409
- array = np.zeros((size[0], size[1], 3), dtype = np.uint8),
422
+ # Size must be inverted
423
+ array = np.zeros((size[1], size[0], 3), dtype = np.uint8),
410
424
  format = format
411
425
  )
412
426
 
427
+ frame.pts = (
428
+ pts
429
+ if pts is not None else
430
+ frame.pts
431
+ )
432
+
433
+ frame.time_base = (
434
+ time_base
435
+ if time_base is not None else
436
+ frame.time_base
437
+ )
438
+
439
+ return frame
440
+
413
441
  def get_audio_frame_pts_range(
414
442
  frame: av.AudioFrame,
415
443
  do_in_seconds: bool = False
@@ -451,43 +479,37 @@ def get_audio_frame_pts_range(
451
479
  )
452
480
 
453
481
  def audio_frames_and_remainder_per_video_frame(
454
- fps: float,
455
- sample_rate: int,
456
- nb_samples: int
457
- ):
482
+ # TODO: Maybe force 'fps' as int (?)
483
+ video_fps: Union[float, Fraction],
484
+ sample_rate: int, # audio_fps
485
+ number_of_samples_per_audio_frame: int
486
+ ) -> tuple[int, int]:
458
487
  """
459
- Calcula cuántos audio frames completos y cuántas muestras sobrantes
460
- corresponden a la duración de 1 frame de vídeo.
461
-
462
- Args:
463
- fps (float): Frames por segundo del vídeo
464
- sample_rate (int): Frecuencia de muestreo del audio (Hz)
465
- nb_samples (int): Número de samples por AudioFrame (PyAV)
466
-
467
- Returns:
468
- (int, int): (frames_completos, muestras_restantes)
488
+ Get how many full silent audio frames we
489
+ need and the remainder for the last one
490
+ (that could be not complete), according
491
+ to the parameters provided.
492
+
493
+ This method returns a tuple containing
494
+ the number of full silent audio frames
495
+ we need and the number of samples we need
496
+ in the last non-full audio frame.
469
497
  """
470
- # Duración de un frame de vídeo en segundos
471
- video_frame_duration = 1.0 / fps
472
-
473
- # Total de samples de audio necesarios
474
- samples_needed = round(sample_rate * video_frame_duration)
475
-
476
- # Cuántos audio frames completos de nb_samples
477
- full_frames = samples_needed // nb_samples
498
+ # Video frame duration (in seconds)
499
+ time_base = fps_to_time_base(video_fps)
500
+ sample_rate = Fraction(int(sample_rate), 1)
501
+
502
+ # Example:
503
+ # 44_100 / 60 = 735 -> This means that we
504
+ # will have 735 samples of sound per each
505
+ # video frame
506
+ # The amount of samples per frame is actually
507
+ # the amount of samples we need, because we
508
+ # are generating it...
509
+ samples_per_frame = sample_rate * time_base
510
+ # The 'nb_samples' is the amount of samples
511
+ # we are including on each audio frame
512
+ full_audio_frames_needed = samples_per_frame // number_of_samples_per_audio_frame
513
+ remainder = samples_per_frame % number_of_samples_per_audio_frame
478
514
 
479
- # Restante que no completa un audio frame
480
- remainder = samples_needed % nb_samples
481
-
482
- return full_frames, remainder
483
-
484
- # # Usage below:
485
- # fps = 30
486
- # sample_rate = 44100
487
- # nb_samples = 1024
488
-
489
- # full, rem = audio_frames_and_remainder_per_video_frame(fps, sample_rate, nb_samples)
490
- # # This will return (1, 446)
491
-
492
-
493
-
515
+ return int(full_audio_frames_needed), int(remainder)
yta_video_opengl/video.py CHANGED
@@ -1,7 +1,8 @@
1
1
  from yta_video_opengl.reader import VideoReader
2
2
  from yta_video_opengl.writer import VideoWriter
3
- from yta_video_opengl.utils import iterate_stream_frames_demuxing
3
+ from yta_video_opengl.t import T
4
4
  from yta_validation import PythonValidator
5
+ from quicktions import Fraction
5
6
  from typing import Union
6
7
 
7
8
 
@@ -26,7 +27,8 @@ class Video:
26
27
  This timestamp is used to read the video
27
28
  file source.
28
29
  """
29
- return int(self.start / self.reader.time_base)
30
+ # TODO: What if 'time_base' is None (?)
31
+ return T(self.start, self.reader.time_base).truncated_pts
30
32
 
31
33
  @property
32
34
  def end_pts(
@@ -40,7 +42,8 @@ class Video:
40
42
  file source.
41
43
  """
42
44
  return (
43
- int(self.end / self.reader.time_base)
45
+ # TODO: What if 'time_base' is None (?)
46
+ T(self.end, self.reader.time_base).truncated_pts
44
47
  # TODO: What do we do if no duration (?)
45
48
  if self.duration is not None else
46
49
  None
@@ -54,7 +57,8 @@ class Video:
54
57
  The start packet time stamp (pts), needed
55
58
  to optimize the packet iteration process.
56
59
  """
57
- return int(self.start / self.reader.audio_time_base)
60
+ # TODO: What if 'audio_time_base' is None (?)
61
+ return T(self.start, self.reader.audio_time_base).truncated_pts
58
62
 
59
63
  @property
60
64
  def audio_end_pts(
@@ -65,7 +69,8 @@ class Video:
65
69
  optimize the packet iteration process.
66
70
  """
67
71
  return (
68
- int(self.end / self.reader.audio_time_base)
72
+ # TODO: What if 'audio_time_base' is None (?)
73
+ T(self.end, self.reader.audio_time_base).truncated_pts
69
74
  # TODO: What do we do if no duration (?)
70
75
  if self.duration is not None else
71
76
  None
@@ -74,7 +79,7 @@ class Video:
74
79
  @property
75
80
  def duration(
76
81
  self
77
- ):
82
+ ) -> Fraction:
78
83
  """
79
84
  The duration of the video.
80
85
  """
@@ -83,7 +88,7 @@ class Video:
83
88
  @property
84
89
  def number_of_frames(
85
90
  self
86
- ):
91
+ ) -> Union[int, None]:
87
92
  """
88
93
  The number of frames of the video.
89
94
  """
@@ -117,8 +122,8 @@ class Video:
117
122
  def __init__(
118
123
  self,
119
124
  filename: str,
120
- start: float = 0.0,
121
- end: Union[float, None] = None
125
+ start: Union[int, float, Fraction] = 0.0,
126
+ end: Union[int, float, Fraction, None] = None
122
127
  ):
123
128
  self.filename: str = filename
124
129
  """
@@ -130,12 +135,12 @@ class Video:
130
135
  """
131
136
  The pyav video reader.
132
137
  """
133
- self.start: float = start
138
+ self.start: Fraction = Fraction(start)
134
139
  """
135
140
  The time moment 't' in which the video
136
141
  should start.
137
142
  """
138
- self.end: Union[float, None] = (
143
+ self.end: Union[Fraction, None] = Fraction(
139
144
  # TODO: Is this 'end' ok (?)
140
145
  self.reader.duration
141
146
  if end is None else
@@ -145,12 +150,85 @@ class Video:
145
150
  The time moment 't' in which the video
146
151
  should end.
147
152
  """
153
+
154
+ # TODO: We need to implement the 'get_frame'
155
+ # methods because this Video can be subclipped
156
+ # and have a 'start' and' end' that are
157
+ # different from [0, end)
158
+ def _get_real_t(
159
+ self,
160
+ t: Union[int, float, Fraction]
161
+ ) -> Fraction:
162
+ """
163
+ Get the real 't' time moment based on the
164
+ video 'start' and 'end'. If they were
165
+ asking for the t=0.5s but our video was
166
+ subclipped to [1.0, 2.0), the 0.5s must be
167
+ actually the 1.5s of the video because of
168
+ the subclipped time range.
169
+ """
170
+ t = self.start + t
148
171
 
172
+ if t >= self.end:
173
+ raise Exception(f'The "t" ({str(t)}) provided is out of range. This video lasts from [{str(self.start)}, {str(self.end)}).')
174
+
175
+ return t
176
+
177
+ def get_frame_from_t(
178
+ self,
179
+ t: Union[int, float, Fraction]
180
+ ) -> 'VideoFrame':
181
+ """
182
+ Get the video frame with the given 't' time
183
+ moment, using the video cache system.
184
+ """
185
+ return self.reader.video_cache.get_video_frame(self._get_real_t(t))
186
+
187
+ def get_audio_frame_from_t(
188
+ self,
189
+ t: Union[int, float, Fraction]
190
+ ) -> 'AudioFrame':
191
+ """
192
+ Get the audio frame with the given 't' time
193
+ moment, using the audio cache system. This
194
+ method is useful when we need to combine
195
+ many different frames so we can obtain them
196
+ one by one.
197
+
198
+ TODO: Is this actually necessary (?)
199
+ """
200
+ return self.reader.audio_cache.get_frame_from_t(self._get_real_t(t))
201
+
202
+ def get_audio_frames_from_t(
203
+ self,
204
+ t: Union[int, float, Fraction]
205
+ ):
206
+ """
207
+ Get the sequence of audio frames for the
208
+ given video 't' time moment, using the
209
+ audio cache system.
210
+
211
+ This is useful when we want to write a
212
+ video frame with its audio, so we obtain
213
+ all the audio frames associated to it
214
+ (remember that a video frame is associated
215
+ with more than 1 audio frame).
216
+ """
217
+ for frame in self.reader.get_audio_frames_from_t(self._get_real_t(t)):
218
+ yield frame
219
+
149
220
  def save_as(
150
221
  self,
151
222
  filename: str
152
223
  ) -> 'Video':
153
- writer = VideoWriter(filename)
224
+ """
225
+ Save the video locally as the given 'filename'.
226
+
227
+ TODO: By now we are doing tests inside so the
228
+ functionality is a manual test. Use it
229
+ carefully.
230
+ """
231
+ writer = VideoWriter(filename)
154
232
  writer.set_video_stream_from_template(self.reader.video_stream)
155
233
  writer.set_audio_stream_from_template(self.reader.audio_stream)
156
234