yta-video-opengl 0.0.22__py3-none-any.whl → 0.0.24__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (33) hide show
  1. yta_video_opengl/editor.py +333 -0
  2. yta_video_opengl/nodes/__init__.py +32 -28
  3. yta_video_opengl/nodes/audio/__init__.py +164 -55
  4. yta_video_opengl/nodes/video/__init__.py +27 -1
  5. yta_video_opengl/nodes/video/{opengl.py → opengl/__init__.py} +8 -4
  6. yta_video_opengl/nodes/video/opengl/experimental.py +760 -0
  7. yta_video_opengl/tests.py +236 -358
  8. yta_video_opengl/utils.py +9 -421
  9. {yta_video_opengl-0.0.22.dist-info → yta_video_opengl-0.0.24.dist-info}/METADATA +2 -6
  10. yta_video_opengl-0.0.24.dist-info/RECORD +13 -0
  11. yta_video_opengl/audio.py +0 -219
  12. yta_video_opengl/classes.py +0 -1276
  13. yta_video_opengl/complete/__init__.py +0 -0
  14. yta_video_opengl/complete/frame_combinator.py +0 -204
  15. yta_video_opengl/complete/frame_generator.py +0 -319
  16. yta_video_opengl/complete/frame_wrapper.py +0 -135
  17. yta_video_opengl/complete/timeline.py +0 -571
  18. yta_video_opengl/complete/track/__init__.py +0 -500
  19. yta_video_opengl/complete/track/media/__init__.py +0 -222
  20. yta_video_opengl/complete/track/parts.py +0 -267
  21. yta_video_opengl/complete/track/utils.py +0 -78
  22. yta_video_opengl/media.py +0 -347
  23. yta_video_opengl/reader/__init__.py +0 -710
  24. yta_video_opengl/reader/cache/__init__.py +0 -253
  25. yta_video_opengl/reader/cache/audio.py +0 -195
  26. yta_video_opengl/reader/cache/utils.py +0 -48
  27. yta_video_opengl/reader/cache/video.py +0 -113
  28. yta_video_opengl/t.py +0 -233
  29. yta_video_opengl/video.py +0 -277
  30. yta_video_opengl/writer.py +0 -278
  31. yta_video_opengl-0.0.22.dist-info/RECORD +0 -31
  32. {yta_video_opengl-0.0.22.dist-info → yta_video_opengl-0.0.24.dist-info}/LICENSE +0 -0
  33. {yta_video_opengl-0.0.22.dist-info → yta_video_opengl-0.0.24.dist-info}/WHEEL +0 -0
yta_video_opengl/utils.py CHANGED
@@ -1,13 +1,6 @@
1
- from yta_video_opengl.t import fps_to_time_base
2
1
  from yta_validation import PythonValidator
3
- from av.container import InputContainer
4
- from av.video.stream import VideoStream
5
- from av.audio.stream import AudioStream
6
- from av.video.frame import VideoFrame
7
- from quicktions import Fraction
8
2
  from typing import Union
9
3
 
10
- import av
11
4
  import numpy as np
12
5
  import moderngl
13
6
 
@@ -40,7 +33,7 @@ def frame_to_texture(
40
33
  # obtain a VideoFrame or a numpy array frame
41
34
  def texture_to_frame(
42
35
  texture: moderngl.Texture
43
- ) -> 'VideoFrame':
36
+ ) -> np.ndarray:
44
37
  """
45
38
  Transform an opengl texture into a pyav
46
39
  VideoFrame instance.
@@ -54,9 +47,13 @@ def texture_to_frame(
54
47
 
55
48
  # This is if we need an 'av' VideoFrame (to
56
49
  # export through the demuxer, for example)
57
- frame = av.VideoFrame.from_ndarray(frame, format = 'rgba')
58
- # TODO: Make this customizable
59
- frame = frame.reformat(format = 'yuv420p')
50
+ # TODO: I avoid this by now because we don't
51
+ # want to import the pyav library, so this
52
+ # below has to be done with the numpy array
53
+ # received...
54
+ # frame = av.VideoFrame.from_ndarray(frame, format = 'rgba')
55
+ # # TODO: Make this customizable
56
+ # frame = frame.reformat(format = 'yuv420p')
60
57
 
61
58
  return frame
62
59
 
@@ -103,413 +100,4 @@ def get_fullscreen_quad_vao(
103
100
  (vbo, '2f 2f', 'in_vert', 'in_texcoord'),
104
101
  ]
105
102
 
106
- return context.vertex_array(program, vao_content, ibo)
107
-
108
- def iterate_streams_packets(
109
- container: 'InputContainer',
110
- video_stream: 'VideoStream',
111
- audio_stream: 'AudioStream',
112
- video_start_pts: int = 0,
113
- video_end_pts: Union[int, None] = None,
114
- audio_start_pts: int = 0,
115
- audio_end_pts: Union[int, None] = None
116
- ):
117
- """
118
- Iterate over the provided 'stream' packets
119
- and yield the ones in the expected range.
120
- This is nice when trying to copy a stream
121
- without modifications.
122
- """
123
- # 'video_start_pts' and 'audio_start_pts' must
124
- # be 0 or a positive tps
125
-
126
- if (
127
- video_stream is None and
128
- audio_stream is None
129
- ):
130
- raise Exception('No streams provided.')
131
-
132
- # We only need to seek on video
133
- if video_stream is not None:
134
- container.seek(video_start_pts, stream = video_stream)
135
- if audio_stream is not None:
136
- container.seek(audio_start_pts, stream = audio_stream)
137
-
138
- stream = [
139
- stream
140
- for stream in (video_stream, audio_stream)
141
- if stream
142
- ]
143
-
144
- """
145
- Apparently, if we ignore some packets based
146
- on the 'pts', we can be ignoring information
147
- that is needed for the next frames to be
148
- decoded, so we need to decode them all...
149
-
150
- If we can find some strategy to seek not for
151
- the inmediate but some before and read from
152
- that one to avoid reading all of the packets
153
- we could save some time, but at what cost?
154
- We cannot skip any crucial frame so we need
155
- to know how many we can skip, and that sounds
156
- a bit difficult depending on the codec.
157
- """
158
- stream_finished: str = ''
159
- for packet in container.demux(stream):
160
- if packet.pts is None:
161
- continue
162
-
163
- # TODO: We cannot skip like this, we need to
164
- # look for the nearest keyframe to be able
165
- # to decode the frames later. Take a look at
166
- # the VideoFrameCache class and use it.
167
-
168
- # start_pts = (
169
- # video_start_pts
170
- # if packet.stream.type == 'video' else
171
- # audio_start_pts
172
- # )
173
- # end_pts = (
174
- # video_end_pts
175
- # if packet.stream.type == 'video' else
176
- # audio_end_pts
177
- # )
178
-
179
- # if packet.pts < start_pts:
180
- # continue
181
-
182
- # if (
183
- # end_pts is not None and
184
- # packet.pts > end_pts
185
- # ):
186
- # if (
187
- # stream_finished != '' and
188
- # (
189
- # # Finish if only one stream
190
- # stream_finished != packet.stream.type or
191
- # video_stream is None or
192
- # audio_stream is None
193
- # )
194
- # ):
195
- # # We have yielded all the frames in the
196
- # # expected range, no more needed
197
- # return
198
-
199
- # stream_finished = packet.stream.type
200
- # continue
201
-
202
- yield packet
203
-
204
- def iterate_stream_frames_demuxing(
205
- container: 'InputContainer',
206
- video_stream: 'VideoStream',
207
- audio_stream: 'AudioStream',
208
- video_start_pts : int = 0,
209
- video_end_pts: Union[int, None] = None,
210
- audio_start_pts: int = 0,
211
- audio_end_pts: Union[int, None] = None
212
- ):
213
- """
214
- Iterate over the provided 'stream' packets
215
- and decode only the ones in the expected
216
- range, so only those frames are decoded
217
- (which is an expensive process).
218
-
219
- This method returns a tuple of 3 elements:
220
- - `frame` as a `VideoFrame` instance
221
- - `t` as the frame time moment
222
- - `index` as the frame index
223
-
224
- You can easy transform the frame received
225
- to a numpy array by using this:
226
- - `frame.to_ndarray(format = format)`
227
- """
228
- # 'start_pts' must be 0 or a positive tps
229
- # 'end_pts' must be None or a positive tps
230
-
231
- # We cannot skip packets or we will lose
232
- # information needed to build the video
233
- for packet in iterate_streams_packets(
234
- container = container,
235
- video_stream = video_stream,
236
- audio_stream = audio_stream,
237
- video_start_pts = video_start_pts,
238
- video_end_pts = video_end_pts,
239
- audio_start_pts = audio_start_pts,
240
- audio_end_pts = audio_end_pts
241
- ):
242
- # Only valid and in range packets here
243
- # Here only the accepted ones
244
- stream_finished: str = ''
245
- for frame in packet.decode():
246
- if frame.pts is None:
247
- continue
248
-
249
- time_base = (
250
- video_stream.time_base
251
- if PythonValidator.is_instance_of(frame, VideoFrame) else
252
- audio_stream.time_base
253
- )
254
-
255
- average_rate = (
256
- video_stream.average_rate
257
- if PythonValidator.is_instance_of(frame, VideoFrame) else
258
- audio_stream.rate
259
- )
260
-
261
- start_pts = (
262
- video_start_pts
263
- if packet.stream.type == 'video' else
264
- audio_start_pts
265
- )
266
-
267
- end_pts = (
268
- video_end_pts
269
- if packet.stream.type == 'video' else
270
- audio_end_pts
271
- )
272
-
273
- if frame.pts < start_pts:
274
- continue
275
-
276
- if (
277
- end_pts is not None and
278
- frame.pts > end_pts
279
- ):
280
- if (
281
- stream_finished != '' and
282
- (
283
- # Finish if only one stream
284
- stream_finished != packet.stream.type or
285
- video_stream is None or
286
- audio_stream is None
287
- )
288
- ):
289
- # We have yielded all the frames in the
290
- # expected range, no more needed
291
- return
292
-
293
- stream_finished = packet.stream.type
294
- continue
295
-
296
- time_base = (
297
- video_stream.time_base
298
- if PythonValidator.is_instance_of(frame, VideoFrame) else
299
- audio_stream.time_base
300
- )
301
-
302
- average_rate = (
303
- video_stream.average_rate
304
- if PythonValidator.is_instance_of(frame, VideoFrame) else
305
- audio_stream.rate
306
- )
307
-
308
- # TODO: Maybe send a @dataclass instead (?)
309
- yield (
310
- frame,
311
- pts_to_t(frame.pts, time_base),
312
- pts_to_index(frame.pts, time_base, average_rate)
313
- )
314
-
315
- # TODO: These methods below have to be
316
- # removed because we created the new T
317
- # class that is working with Fractions
318
- # to be precise
319
- def t_to_pts(
320
- t: Union[int, float, Fraction],
321
- stream_time_base: Fraction
322
- ) -> int:
323
- """
324
- Transform a 't' time moment (in seconds) to
325
- a packet timestamp (pts) understandable by
326
- the pyav library.
327
- """
328
- return int((t + 0.000001) / stream_time_base)
329
-
330
- def pts_to_index(
331
- pts: int,
332
- stream_time_base: Fraction,
333
- fps: Union[float, Fraction]
334
- ) -> int:
335
- """
336
- Transform a 'pts' packet timestamp to a
337
- frame index.
338
- """
339
- return int(round(pts_to_t(pts, stream_time_base) * fps))
340
-
341
- def index_to_pts(
342
- index: int,
343
- stream_time_base: Fraction,
344
- fps: Union[float, Fraction]
345
- ) -> int:
346
- """
347
- Transform a frame index into a 'pts' packet
348
- timestamp.
349
- """
350
- return int(index / fps / stream_time_base)
351
-
352
- def pts_to_t(
353
- pts: int,
354
- stream_time_base: Fraction
355
- ) -> float:
356
- """
357
- Transform a 'pts' packet timestamp to a 't'
358
- time moment.
359
- """
360
- return pts * stream_time_base
361
-
362
-
363
- # TODO: Move this to another utils
364
- def get_silent_audio_frame(
365
- sample_rate: int,
366
- layout = 'stereo',
367
- number_of_samples: int = 1024,
368
- format = 's16'
369
- ):
370
- # TODO: This could be a utils or something to
371
- # directly transform format into dtype
372
- dtype = {
373
- 's16': np.int16,
374
- 'flt': np.float32,
375
- 'fltp': np.float32
376
- }.get(format, None)
377
-
378
- if dtype is None:
379
- raise Exception(f'The format "{format}" is not accepted.')
380
-
381
- number_of_channels = len(av.AudioLayout(layout).channels)
382
- # TODO: I think the option above is better
383
- # number_of_channels = (
384
- # 2
385
- # if layout == 'stereo' else
386
- # 1
387
- # )
388
-
389
- # For packed (or planar) formats we apply:
390
- # (1, samples * channels). This is the same
391
- # amount of data but planar, in 1D only
392
- # TODO: This wasn't in the previous version
393
- # and it was working, we were sending the
394
- # same 'number_of_samples' even when 'fltp'
395
- # that includes the 'p'
396
- # TODO: This is making the audio last 2x
397
- # if 'p' in format:
398
- # number_of_samples *= number_of_channels
399
-
400
- silent_array = np.zeros((number_of_channels, number_of_samples), dtype = dtype)
401
- frame = av.AudioFrame.from_ndarray(silent_array, format = format, layout = layout)
402
- frame.sample_rate = sample_rate
403
-
404
- return frame
405
-
406
- def get_black_background_video_frame(
407
- size: tuple[int, int] = (1920, 1080),
408
- format: str = 'rgb24',
409
- pts: Union[int, None] = None,
410
- time_base: Union[Fraction, None] = None
411
- ):
412
- """
413
- Get a pyav VideoFrame that is a completely black
414
- frame. If the 'pts' and/or 'time_base' parameters
415
- are provided, they will be set to the frame that
416
- is returned with them. If not, remember to set
417
- later because they are needed to be sent to the
418
- pyav muxer.
419
- """
420
- frame = av.VideoFrame.from_ndarray(
421
- # TODO: What if we want alpha (?)
422
- # Size must be inverted
423
- array = np.zeros((size[1], size[0], 3), dtype = np.uint8),
424
- format = format
425
- )
426
-
427
- frame.pts = (
428
- pts
429
- if pts is not None else
430
- frame.pts
431
- )
432
-
433
- frame.time_base = (
434
- time_base
435
- if time_base is not None else
436
- frame.time_base
437
- )
438
-
439
- return frame
440
-
441
- def get_audio_frame_pts_range(
442
- frame: av.AudioFrame,
443
- do_in_seconds: bool = False
444
- ):
445
- """
446
- Get the [start_pts, end_pts) range of the
447
- pyav AudioFrame, or in seconds if the
448
- 'do_in_seconds' parameter is True.
449
- """
450
- if frame.pts is None:
451
- raise Exception('No "pts" found.')
452
-
453
- # First and last sample. Remember that
454
- # the last one is not included
455
- start_pts = frame.pts
456
- end_pts = frame.pts + frame.samples
457
-
458
- # Time base for the seconds conversion
459
- time_base = (
460
- frame.time_base
461
- if frame.time_base else
462
- Fraction(1, frame.sample_rate)
463
- )
464
-
465
- start_time = (
466
- float(start_pts * time_base)
467
- if do_in_seconds else
468
- start_time
469
- )
470
- end_time = (
471
- float(end_pts * time_base)
472
- if do_in_seconds else
473
- end_time
474
- )
475
-
476
- return (
477
- start_time,
478
- end_time
479
- )
480
-
481
- def audio_frames_and_remainder_per_video_frame(
482
- # TODO: Maybe force 'fps' as int (?)
483
- video_fps: Union[float, Fraction],
484
- sample_rate: int, # audio_fps
485
- number_of_samples_per_audio_frame: int
486
- ) -> tuple[int, int]:
487
- """
488
- Get how many full silent audio frames we
489
- need and the remainder for the last one
490
- (that could be not complete), according
491
- to the parameters provided.
492
-
493
- This method returns a tuple containing
494
- the number of full silent audio frames
495
- we need and the number of samples we need
496
- in the last non-full audio frame.
497
- """
498
- # Video frame duration (in seconds)
499
- time_base = fps_to_time_base(video_fps)
500
- sample_rate = Fraction(int(sample_rate), 1)
501
-
502
- # Example:
503
- # 44_100 / 60 = 735 -> This means that we
504
- # will have 735 samples of sound per each
505
- # video frame
506
- # The amount of samples per frame is actually
507
- # the amount of samples we need, because we
508
- # are generating it...
509
- samples_per_frame = sample_rate * time_base
510
- # The 'nb_samples' is the amount of samples
511
- # we are including on each audio frame
512
- full_audio_frames_needed = samples_per_frame // number_of_samples_per_audio_frame
513
- remainder = samples_per_frame % number_of_samples_per_audio_frame
514
-
515
- return int(full_audio_frames_needed), int(remainder)
103
+ return context.vertex_array(program, vao_content, ibo)
@@ -1,20 +1,16 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: yta-video-opengl
3
- Version: 0.0.22
3
+ Version: 0.0.24
4
4
  Summary: Youtube Autonomous Video OpenGL Module
5
5
  Author: danialcala94
6
6
  Author-email: danielalcalavalera@gmail.com
7
7
  Requires-Python: ==3.9
8
8
  Classifier: Programming Language :: Python :: 3
9
9
  Classifier: Programming Language :: Python :: 3.9
10
- Requires-Dist: av (>=0.0.1,<19.0.0)
11
10
  Requires-Dist: moderngl (>=0.0.1,<9.0.0)
12
11
  Requires-Dist: numpy (>=0.0.1,<9.0.0)
13
- Requires-Dist: pillow (>=0.0.1,<99.0.0)
14
- Requires-Dist: quicktions (>=0.0.1,<9.0.0)
15
- Requires-Dist: yta_timer (>0.0.1,<1.0.0)
12
+ Requires-Dist: yta_programming (>=0.0.1,<1.0.0)
16
13
  Requires-Dist: yta_validation (>=0.0.1,<1.0.0)
17
- Requires-Dist: yta_video_frame_time (>=0.0.1,<1.0.0)
18
14
  Description-Content-Type: text/markdown
19
15
 
20
16
  # Youtube Autonomous Video OpenGL Module
@@ -0,0 +1,13 @@
1
+ yta_video_opengl/__init__.py,sha256=ycAx_XYMVDfkuObSvtW6irQ0Wo-fgxEz3fjIRMe8PpY,205
2
+ yta_video_opengl/editor.py,sha256=ILzeGAe6uO5IptwrplbvnFu8e0j8v9AOaAmuvFH4YjI,9218
3
+ yta_video_opengl/nodes/__init__.py,sha256=Rbh6HxWpKrdF_qlcGVKYBGW6i10NodoExa5qGEMcmIM,3738
4
+ yta_video_opengl/nodes/audio/__init__.py,sha256=upKsYZrrNW2DxP77RyMCw2I16Lj9JsHbXGALw5grxbQ,6143
5
+ yta_video_opengl/nodes/video/__init__.py,sha256=Nf1CnUaz6m7mOvVsuoTKMsq5QIVqEqZaiGseNOre8_k,859
6
+ yta_video_opengl/nodes/video/opengl/__init__.py,sha256=bVPdS_wpz4HNurmLRDEHUmNZU7Qz8LptKfJ9yN_2PJM,8587
7
+ yta_video_opengl/nodes/video/opengl/experimental.py,sha256=XuriXcOuJU5P0FruKffudCpMuO-ht8weJud88Qn8GKk,22158
8
+ yta_video_opengl/tests.py,sha256=uvTU1_cvz5r7fETd4X8hvsYw4jViMDA0x-LsDNplFPM,24265
9
+ yta_video_opengl/utils.py,sha256=w6jYnZcBYPNAsTolkAlA6dxph0u_8-mcUzKvADlIlW8,3098
10
+ yta_video_opengl-0.0.24.dist-info/LICENSE,sha256=6kbiFSfobTZ7beWiKnHpN902HgBx-Jzgcme0SvKqhKY,1091
11
+ yta_video_opengl-0.0.24.dist-info/METADATA,sha256=WKcDQFnZS0TvesGJM9N-bbPjOx0DdTNZb6yU-4nzUqY,589
12
+ yta_video_opengl-0.0.24.dist-info/WHEEL,sha256=XbeZDeTWKc1w7CSIyre5aMDU_-PohRwTQceYnisIYYY,88
13
+ yta_video_opengl-0.0.24.dist-info/RECORD,,
yta_video_opengl/audio.py DELETED
@@ -1,219 +0,0 @@
1
- """
2
- TODO: This class has not been refactored nor
3
- tested. I need to put some love on it to make
4
- it work and test that it is working properly.
5
- """
6
- from yta_video_opengl.reader import VideoReader
7
- from yta_video_opengl.writer import VideoWriter
8
- from yta_video_opengl.t import T
9
- from yta_validation import PythonValidator
10
- from quicktions import Fraction
11
- from typing import Union
12
-
13
-
14
- # TODO: Where can I obtain this dynamically (?)
15
- PIXEL_FORMAT = 'yuv420p'
16
-
17
- # TODO: Maybe create a _Media(ABC) to put
18
- # some code shared with the Video class
19
- class Audio:
20
- """
21
- Class to wrap the functionality related to
22
- handling and modifying a video.
23
- """
24
-
25
- @property
26
- def audio_start_pts(
27
- self
28
- ) -> int:
29
- """
30
- The start packet time stamp (pts), needed
31
- to optimize the packet iteration process.
32
- """
33
- # TODO: What if 'audio_time_base' is None (?)
34
- return T(self.start, self.reader.audio_time_base).truncated_pts
35
-
36
- @property
37
- def audio_end_pts(
38
- self
39
- ) -> Union[int, None]:
40
- """
41
- The end packet time stamp (pts), needed to
42
- optimize the packet iteration process.
43
- """
44
- return (
45
- # TODO: What if 'audio_time_base' is None (?)
46
- T(self.end, self.reader.audio_time_base).truncated_pts
47
- # TODO: What do we do if no duration (?)
48
- if self.duration is not None else
49
- None
50
- )
51
-
52
- @property
53
- def duration(
54
- self
55
- ) -> Fraction:
56
- """
57
- The duration of the video.
58
- """
59
- return self.end - self.start
60
-
61
- @property
62
- def frames(
63
- self
64
- ):
65
- """
66
- Iterator to yield all the frames, one by
67
- one, within the range defined by the
68
- 'start' and 'end' parameters provided when
69
- instantiating it.
70
-
71
- The iterator will iterate first over the
72
- audio frames.
73
- """
74
- for frame in self.reader.get_audio_frames(self.start, self.end):
75
- yield frame
76
-
77
- def __init__(
78
- self,
79
- filename: str,
80
- start: Union[int, float, Fraction] = 0.0,
81
- end: Union[int, float, Fraction, None] = None
82
- ):
83
- self.filename: str = filename
84
- """
85
- The filename of the original audio.
86
- """
87
- # TODO: Detect the 'pixel_format' from the
88
- # extension (?)
89
- self.reader: VideoReader = VideoReader(self.filename)
90
- """
91
- The pyav audio reader.
92
- """
93
- self.start: Fraction = Fraction(start)
94
- """
95
- The time moment 't' in which the audio
96
- should start.
97
- """
98
- self.end: Union[Fraction, None] = Fraction(
99
- # TODO: Is this 'end' ok (?)
100
- self.reader.duration
101
- if end is None else
102
- end
103
- )
104
- """
105
- The time moment 't' in which the audio
106
- should end.
107
- """
108
-
109
- def _get_t(
110
- self,
111
- t: Union[int, float, Fraction]
112
- ) -> Fraction:
113
- """
114
- Get the real 't' time moment based on the
115
- audio 'start' and 'end'. If they were
116
- asking for the t=0.5s but our audio was
117
- subclipped to [1.0, 2.0), the 0.5s must be
118
- actually the 1.5s of the audio because of
119
- the subclipped time range.
120
- """
121
- t += self.start
122
-
123
- print(f'Audio real t is {str(float(t))}')
124
- if t >= self.end:
125
- raise Exception(f'The "t" ({str(t)}) provided is out of range. This audio lasts from [{str(self.start)}, {str(self.end)}).')
126
-
127
- return t
128
-
129
- def get_audio_frame_from_t(
130
- self,
131
- t: Union[int, float, Fraction]
132
- ) -> 'AudioFrame':
133
- """
134
- Get the audio frame with the given 't' time
135
- moment, using the audio cache system. This
136
- method is useful when we need to combine
137
- many different frames so we can obtain them
138
- one by one.
139
-
140
- TODO: Is this actually necessary (?)
141
- """
142
- return self.reader.get_audio_frame_from_t(self._get_t(t))
143
-
144
- def get_audio_frames_from_t(
145
- self,
146
- t: Union[int, float, Fraction]
147
- ):
148
- """
149
- Get the sequence of audio frames for a
150
- given video 't' time moment, using the
151
- audio cache system.
152
-
153
- This is useful when we want to write a
154
- video frame with its audio, so we obtain
155
- all the audio frames associated to it
156
- (remember that a video frame is associated
157
- with more than 1 audio frame).
158
- """
159
- print(f'Getting audio frames from {str(float(t))} that is actually {str(float(self._get_t(t)))}')
160
- for frame in self.reader.get_audio_frames_from_t(self._get_t(t)):
161
- yield frame
162
-
163
- def save_as(
164
- self,
165
- filename: str
166
- ) -> 'Video':
167
- """
168
- Save the audio locally as the given 'filename'.
169
-
170
- TODO: By now we are doing tests inside so the
171
- functionality is a manual test. Use it
172
- carefully.
173
- """
174
- writer = VideoWriter(filename)
175
- writer.set_audio_stream_from_template(self.reader.audio_stream)
176
-
177
- from yta_video_opengl.nodes.audio import VolumeAudioNode
178
- # Audio from 0 to 1
179
- # TODO: This effect 'fn' is shitty
180
- def fade_in_fn(t, index, start=0.5, end=1.0):
181
- if t < start or t > end:
182
- # fuera de la franja: no tocar nada → volumen original (1.0)
183
- progress = 1.0
184
- else:
185
- # dentro de la franja: interpolar linealmente entre 0 → 1
186
- progress = (t - start) / (end - start)
187
-
188
- return progress
189
-
190
- #fade_in = SetVolumeAudioNode(lambda t, i: min(1, t / self.duration))
191
- fade_in = VolumeAudioNode(lambda t, i: fade_in_fn(t, i, 0.5, 1.0))
192
-
193
- for frame, t, index in self.frames:
194
- if PythonValidator.is_instance_of(frame, 'VideoFrame'):
195
- print(f'Saving video frame {str(index)}, with t = {str(t)}')
196
-
197
- # TODO: Process any video frame change
198
-
199
- writer.mux_video_frame(
200
- frame = frame
201
- )
202
- else:
203
- print(f'Saving audio frame {str(index)} ({str(round(float(t * self.reader.fps), 2))}), with t = {str(t)}')
204
-
205
- # TODO: Process any audio frame change
206
- # Test setting audio
207
- frame = fade_in.process(frame, t)
208
-
209
- writer.mux_audio_frame(
210
- frame = frame
211
- )
212
-
213
- # Flush the remaining frames to write
214
- writer.mux_audio_frame(None)
215
- writer.mux_video_frame(None)
216
-
217
- # TODO: Maybe move this to the '__del__' (?)
218
- writer.output.close()
219
- self.reader.container.close()