yta-video-opengl 0.0.22__py3-none-any.whl → 0.0.24__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- yta_video_opengl/editor.py +333 -0
- yta_video_opengl/nodes/__init__.py +32 -28
- yta_video_opengl/nodes/audio/__init__.py +164 -55
- yta_video_opengl/nodes/video/__init__.py +27 -1
- yta_video_opengl/nodes/video/{opengl.py → opengl/__init__.py} +8 -4
- yta_video_opengl/nodes/video/opengl/experimental.py +760 -0
- yta_video_opengl/tests.py +236 -358
- yta_video_opengl/utils.py +9 -421
- {yta_video_opengl-0.0.22.dist-info → yta_video_opengl-0.0.24.dist-info}/METADATA +2 -6
- yta_video_opengl-0.0.24.dist-info/RECORD +13 -0
- yta_video_opengl/audio.py +0 -219
- yta_video_opengl/classes.py +0 -1276
- yta_video_opengl/complete/__init__.py +0 -0
- yta_video_opengl/complete/frame_combinator.py +0 -204
- yta_video_opengl/complete/frame_generator.py +0 -319
- yta_video_opengl/complete/frame_wrapper.py +0 -135
- yta_video_opengl/complete/timeline.py +0 -571
- yta_video_opengl/complete/track/__init__.py +0 -500
- yta_video_opengl/complete/track/media/__init__.py +0 -222
- yta_video_opengl/complete/track/parts.py +0 -267
- yta_video_opengl/complete/track/utils.py +0 -78
- yta_video_opengl/media.py +0 -347
- yta_video_opengl/reader/__init__.py +0 -710
- yta_video_opengl/reader/cache/__init__.py +0 -253
- yta_video_opengl/reader/cache/audio.py +0 -195
- yta_video_opengl/reader/cache/utils.py +0 -48
- yta_video_opengl/reader/cache/video.py +0 -113
- yta_video_opengl/t.py +0 -233
- yta_video_opengl/video.py +0 -277
- yta_video_opengl/writer.py +0 -278
- yta_video_opengl-0.0.22.dist-info/RECORD +0 -31
- {yta_video_opengl-0.0.22.dist-info → yta_video_opengl-0.0.24.dist-info}/LICENSE +0 -0
- {yta_video_opengl-0.0.22.dist-info → yta_video_opengl-0.0.24.dist-info}/WHEEL +0 -0
@@ -1,710 +0,0 @@
|
|
1
|
-
"""
|
2
|
-
A video reader using the PyAv (av) library
|
3
|
-
that, using ffmpeg, detects the video.
|
4
|
-
"""
|
5
|
-
from yta_video_opengl.reader.cache.video import VideoFrameCache
|
6
|
-
from yta_video_opengl.reader.cache.audio import AudioFrameCache
|
7
|
-
from yta_video_opengl.utils import iterate_stream_frames_demuxing
|
8
|
-
from yta_video_opengl.t import T
|
9
|
-
from yta_validation import PythonValidator
|
10
|
-
from av.video.frame import VideoFrame
|
11
|
-
from av.audio.frame import AudioFrame
|
12
|
-
from av.packet import Packet
|
13
|
-
from av.video.stream import VideoStream
|
14
|
-
from av.audio.stream import AudioStream
|
15
|
-
from av.container.input import InputContainer
|
16
|
-
from quicktions import Fraction
|
17
|
-
from av import open as av_open
|
18
|
-
from typing import Union
|
19
|
-
from dataclasses import dataclass
|
20
|
-
|
21
|
-
|
22
|
-
@dataclass
|
23
|
-
class VideoReaderFrame:
|
24
|
-
"""
|
25
|
-
Class to wrap a frame of a video that is
|
26
|
-
being read, that can be a video or audio
|
27
|
-
frame, and has been decoded.
|
28
|
-
"""
|
29
|
-
|
30
|
-
@property
|
31
|
-
def is_video(
|
32
|
-
self
|
33
|
-
):
|
34
|
-
"""
|
35
|
-
Flag to indicate if the instance is a video
|
36
|
-
frame.
|
37
|
-
"""
|
38
|
-
return PythonValidator.is_instance_of(self.value, VideoFrame)
|
39
|
-
|
40
|
-
@property
|
41
|
-
def is_audio(
|
42
|
-
self
|
43
|
-
):
|
44
|
-
"""
|
45
|
-
Flag to indicate if the instance is an audio
|
46
|
-
frame.
|
47
|
-
"""
|
48
|
-
return PythonValidator.is_instance_of(self.value, AudioFrame)
|
49
|
-
|
50
|
-
@property
|
51
|
-
def as_numpy(
|
52
|
-
self
|
53
|
-
):
|
54
|
-
"""
|
55
|
-
The frame as a numpy array.
|
56
|
-
"""
|
57
|
-
return self.value.to_ndarray(format = self.pixel_format)
|
58
|
-
|
59
|
-
def __init__(
|
60
|
-
self,
|
61
|
-
# TODO: Add the type, please
|
62
|
-
frame: any,
|
63
|
-
t: Union[int, float, Fraction] = None,
|
64
|
-
pixel_format: str = 'rgb24'
|
65
|
-
):
|
66
|
-
self.value: Union[AudioFrame, VideoFrame] = frame
|
67
|
-
"""
|
68
|
-
The frame content, that can be audio or video
|
69
|
-
frame.
|
70
|
-
"""
|
71
|
-
self.t: Fraction = Fraction(t)
|
72
|
-
"""
|
73
|
-
The 't' time moment of the frame.
|
74
|
-
"""
|
75
|
-
self.pixel_format: str = pixel_format
|
76
|
-
"""
|
77
|
-
The pixel format of the frame.
|
78
|
-
"""
|
79
|
-
|
80
|
-
@dataclass
|
81
|
-
class VideoReaderPacket:
|
82
|
-
"""
|
83
|
-
Class to wrap a packet of a video that is
|
84
|
-
being read, that can contain video or audio
|
85
|
-
frames.
|
86
|
-
"""
|
87
|
-
|
88
|
-
@property
|
89
|
-
def is_video(
|
90
|
-
self
|
91
|
-
) -> bool:
|
92
|
-
"""
|
93
|
-
Flag to indicate if the packet includes video
|
94
|
-
frames or not.
|
95
|
-
"""
|
96
|
-
return self.value.stream.type == 'video'
|
97
|
-
|
98
|
-
@property
|
99
|
-
def is_audio(
|
100
|
-
self
|
101
|
-
) -> bool:
|
102
|
-
"""
|
103
|
-
Flag to indicate if the packet includes audio
|
104
|
-
frames or not.
|
105
|
-
"""
|
106
|
-
return self.value.stream.type == 'audio'
|
107
|
-
|
108
|
-
def __init__(
|
109
|
-
self,
|
110
|
-
packet: Packet
|
111
|
-
):
|
112
|
-
self.value: Packet = packet
|
113
|
-
"""
|
114
|
-
The packet, that can include video or audio
|
115
|
-
frames and can be decoded.
|
116
|
-
"""
|
117
|
-
|
118
|
-
def decode(
|
119
|
-
self
|
120
|
-
) -> list['SubtitleSet']:
|
121
|
-
"""
|
122
|
-
Get the frames but decoded, perfect to make
|
123
|
-
modifications and encode to save them again.
|
124
|
-
"""
|
125
|
-
return self.value.decode()
|
126
|
-
|
127
|
-
|
128
|
-
class VideoReader:
|
129
|
-
"""
|
130
|
-
Class to read video files with the PyAv (av)
|
131
|
-
library that uses ffmpeg on the background.
|
132
|
-
"""
|
133
|
-
|
134
|
-
@property
|
135
|
-
def frame_iterator(
|
136
|
-
self
|
137
|
-
) -> 'Iterator[VideoFrame]':
|
138
|
-
"""
|
139
|
-
Iterator to iterate over all the video frames
|
140
|
-
decodified.
|
141
|
-
"""
|
142
|
-
return self.container.decode(self.video_stream)
|
143
|
-
|
144
|
-
# TODO: Remove this when possible, we are not
|
145
|
-
# using 'next' properties
|
146
|
-
@property
|
147
|
-
def next_frame(
|
148
|
-
self
|
149
|
-
) -> Union[VideoFrame, None]:
|
150
|
-
"""
|
151
|
-
Get the next video frame (decoded) from the
|
152
|
-
iterator.
|
153
|
-
"""
|
154
|
-
return next(self.frame_iterator)
|
155
|
-
|
156
|
-
@property
|
157
|
-
def audio_frame_iterator(
|
158
|
-
self
|
159
|
-
) -> 'Iterator[AudioFrame]':
|
160
|
-
"""
|
161
|
-
Iterator to iterate over all the audio frames
|
162
|
-
decodified.
|
163
|
-
"""
|
164
|
-
return self.container.decode(self.audio_stream)
|
165
|
-
|
166
|
-
@property
|
167
|
-
def packet_iterator(
|
168
|
-
self
|
169
|
-
) -> 'Iterator[Packet]':
|
170
|
-
"""
|
171
|
-
Iterator to iterate over all the video frames
|
172
|
-
as packets (not decodified).
|
173
|
-
"""
|
174
|
-
return self.container.demux(self.video_stream)
|
175
|
-
|
176
|
-
@property
|
177
|
-
def audio_packet_iterator(
|
178
|
-
self
|
179
|
-
) -> 'Iterator[Packet]':
|
180
|
-
"""
|
181
|
-
Iterator to iterate over all the audio frames
|
182
|
-
as packets (not decodified).
|
183
|
-
"""
|
184
|
-
return self.container.demux(self.audio_stream)
|
185
|
-
|
186
|
-
@property
|
187
|
-
def packet_with_audio_iterator(
|
188
|
-
self
|
189
|
-
) -> 'Iterator[Packet]':
|
190
|
-
"""
|
191
|
-
Iterator to iterate over all the video frames
|
192
|
-
as packets (not decodified) including also the
|
193
|
-
audio as packets.
|
194
|
-
"""
|
195
|
-
return self.container.demux((self.video_stream, self.audio_stream))
|
196
|
-
|
197
|
-
@property
|
198
|
-
def has_video(
|
199
|
-
self
|
200
|
-
) -> bool:
|
201
|
-
"""
|
202
|
-
Flag to indicate if there is a video stream
|
203
|
-
or not.
|
204
|
-
"""
|
205
|
-
return self.video_stream is not None
|
206
|
-
|
207
|
-
@property
|
208
|
-
def has_audio(
|
209
|
-
self
|
210
|
-
) -> bool:
|
211
|
-
"""
|
212
|
-
Flag to indicate if there is an audio stream
|
213
|
-
or not.
|
214
|
-
"""
|
215
|
-
return self.audio_stream is not None
|
216
|
-
|
217
|
-
@property
|
218
|
-
def codec_name(
|
219
|
-
self
|
220
|
-
) -> Union[str, None]:
|
221
|
-
"""
|
222
|
-
Get the name of the video codec.
|
223
|
-
"""
|
224
|
-
return (
|
225
|
-
self.video_stream.codec_context.name
|
226
|
-
if self.has_video else
|
227
|
-
None
|
228
|
-
)
|
229
|
-
|
230
|
-
@property
|
231
|
-
def audio_codec_name(
|
232
|
-
self
|
233
|
-
) -> str:
|
234
|
-
"""
|
235
|
-
Get the name of the audio codec.
|
236
|
-
"""
|
237
|
-
return (
|
238
|
-
self.audio_stream.codec_context.name
|
239
|
-
if self.has_audio else
|
240
|
-
None
|
241
|
-
)
|
242
|
-
|
243
|
-
@property
|
244
|
-
def number_of_frames(
|
245
|
-
self
|
246
|
-
) -> Union[int, None]:
|
247
|
-
"""
|
248
|
-
The number of frames in the video.
|
249
|
-
"""
|
250
|
-
return (
|
251
|
-
self.video_stream.frames
|
252
|
-
if self.has_video else
|
253
|
-
None
|
254
|
-
)
|
255
|
-
|
256
|
-
@property
|
257
|
-
def number_of_audio_frames(
|
258
|
-
self
|
259
|
-
) -> Union[int, None]:
|
260
|
-
"""
|
261
|
-
The number of frames in the audio.
|
262
|
-
"""
|
263
|
-
return (
|
264
|
-
self.audio_stream.frames
|
265
|
-
if self.has_audio else
|
266
|
-
None
|
267
|
-
)
|
268
|
-
|
269
|
-
@property
|
270
|
-
def fps(
|
271
|
-
self
|
272
|
-
) -> Union[Fraction, None]:
|
273
|
-
"""
|
274
|
-
The fps of the video.
|
275
|
-
"""
|
276
|
-
return (
|
277
|
-
self.video_stream.average_rate
|
278
|
-
if self.has_video else
|
279
|
-
None
|
280
|
-
)
|
281
|
-
|
282
|
-
@property
|
283
|
-
def audio_fps(
|
284
|
-
self
|
285
|
-
) -> Union[int, None]:
|
286
|
-
"""
|
287
|
-
The fps of the audio.
|
288
|
-
"""
|
289
|
-
return (
|
290
|
-
self.audio_stream.rate
|
291
|
-
if self.has_audio else
|
292
|
-
None
|
293
|
-
)
|
294
|
-
|
295
|
-
@property
|
296
|
-
def time_base(
|
297
|
-
self
|
298
|
-
) -> Union[Fraction, None]:
|
299
|
-
"""
|
300
|
-
The time base of the video.
|
301
|
-
"""
|
302
|
-
return (
|
303
|
-
self.video_stream.time_base
|
304
|
-
if self.has_video else
|
305
|
-
None
|
306
|
-
)
|
307
|
-
|
308
|
-
@property
|
309
|
-
def audio_time_base(
|
310
|
-
self
|
311
|
-
) -> Union[Fraction, None]:
|
312
|
-
"""
|
313
|
-
The time base of the audio.
|
314
|
-
"""
|
315
|
-
return (
|
316
|
-
self.audio_stream.time_base
|
317
|
-
if self.has_audio else
|
318
|
-
None
|
319
|
-
)
|
320
|
-
|
321
|
-
@property
|
322
|
-
def duration(
|
323
|
-
self
|
324
|
-
) -> Union[float, None]:
|
325
|
-
"""
|
326
|
-
The duration of the video.
|
327
|
-
"""
|
328
|
-
# TODO: What to do in the case we have
|
329
|
-
# video stream but not 'duration'
|
330
|
-
# attribute (?)
|
331
|
-
return (
|
332
|
-
float(self.video_stream.duration * self.video_stream.time_base)
|
333
|
-
if (
|
334
|
-
self.has_video and
|
335
|
-
self.video_stream.duration
|
336
|
-
) else
|
337
|
-
None
|
338
|
-
)
|
339
|
-
|
340
|
-
@property
|
341
|
-
def audio_duration(
|
342
|
-
self
|
343
|
-
) -> Union[float, None]:
|
344
|
-
"""
|
345
|
-
The duration of the audio.
|
346
|
-
"""
|
347
|
-
# TODO: What to do in the case we have
|
348
|
-
# audio stream but not 'duration'
|
349
|
-
# attribute (?)
|
350
|
-
return (
|
351
|
-
float(self.audio_stream.duration * self.audio_stream.time_base)
|
352
|
-
if (
|
353
|
-
self.has_audio and
|
354
|
-
self.audio_stream.duration
|
355
|
-
) else
|
356
|
-
None
|
357
|
-
)
|
358
|
-
|
359
|
-
@property
|
360
|
-
def size(
|
361
|
-
self
|
362
|
-
) -> Union[tuple[int, int], None]:
|
363
|
-
"""
|
364
|
-
The size of the video in a (width, height) format.
|
365
|
-
"""
|
366
|
-
return (
|
367
|
-
(
|
368
|
-
self.video_stream.width,
|
369
|
-
self.video_stream.height
|
370
|
-
)
|
371
|
-
if self.has_video else
|
372
|
-
None
|
373
|
-
)
|
374
|
-
|
375
|
-
@property
|
376
|
-
def width(
|
377
|
-
self
|
378
|
-
) -> Union[int, None]:
|
379
|
-
"""
|
380
|
-
The width of the video, in pixels.
|
381
|
-
"""
|
382
|
-
return (
|
383
|
-
self.size[0]
|
384
|
-
if self.size is not None else
|
385
|
-
None
|
386
|
-
)
|
387
|
-
|
388
|
-
@property
|
389
|
-
def height(
|
390
|
-
self
|
391
|
-
) -> Union[int, None]:
|
392
|
-
"""
|
393
|
-
The height of the video, in pixels.
|
394
|
-
"""
|
395
|
-
return (
|
396
|
-
self.size[1]
|
397
|
-
if self.size is not None else
|
398
|
-
None
|
399
|
-
)
|
400
|
-
|
401
|
-
# Any property related to audio has to
|
402
|
-
# start with 'audio_property_name'
|
403
|
-
|
404
|
-
def __init__(
|
405
|
-
self,
|
406
|
-
filename: str,
|
407
|
-
# Use 'rgba' if alpha channel
|
408
|
-
pixel_format: str = 'rgb24'
|
409
|
-
):
|
410
|
-
self.filename: str = filename
|
411
|
-
"""
|
412
|
-
The filename of the video source.
|
413
|
-
"""
|
414
|
-
self.pixel_format: str = pixel_format
|
415
|
-
"""
|
416
|
-
The pixel format.
|
417
|
-
"""
|
418
|
-
self.container: InputContainer = None
|
419
|
-
"""
|
420
|
-
The av input general container of the
|
421
|
-
video (that also includes the audio) we
|
422
|
-
are reading.
|
423
|
-
"""
|
424
|
-
self.video_stream: Union[VideoStream, None] = None
|
425
|
-
"""
|
426
|
-
The stream that includes the video. If
|
427
|
-
no video stream this will be None.
|
428
|
-
"""
|
429
|
-
self.audio_stream: Union[AudioStream, None] = None
|
430
|
-
"""
|
431
|
-
The stream that includes the audio. If
|
432
|
-
no audio stream this will be None.
|
433
|
-
"""
|
434
|
-
self.video_cache: VideoFrameCache = None
|
435
|
-
"""
|
436
|
-
The video frame cache system to optimize
|
437
|
-
the way we access to the frames.
|
438
|
-
"""
|
439
|
-
self.audio_cache: AudioFrameCache = None
|
440
|
-
"""
|
441
|
-
The audio frame cache system to optimize
|
442
|
-
the way we access to the frames.
|
443
|
-
"""
|
444
|
-
|
445
|
-
# TODO: Maybe we can read the first
|
446
|
-
# frame, store it and reset, so we have
|
447
|
-
# it in memory since the first moment.
|
448
|
-
# We should do it here because if we
|
449
|
-
# iterate in some moment and then we
|
450
|
-
# want to obtain it... it will be
|
451
|
-
# difficult.
|
452
|
-
# Lets load the variables
|
453
|
-
self.reset()
|
454
|
-
|
455
|
-
def reset(
|
456
|
-
self
|
457
|
-
) -> 'VideoReader':
|
458
|
-
"""
|
459
|
-
Reset all the instances, closing the file
|
460
|
-
and opening again.
|
461
|
-
|
462
|
-
This will also return to the first frame.
|
463
|
-
"""
|
464
|
-
if self.container is not None:
|
465
|
-
# TODO: Maybe accept forcing it (?)
|
466
|
-
self.container.seek(0)
|
467
|
-
#self.container.close()
|
468
|
-
else:
|
469
|
-
self.container = av_open(self.filename)
|
470
|
-
|
471
|
-
self.video_stream = (
|
472
|
-
self.container.streams.video[0]
|
473
|
-
if self.container.streams.video else
|
474
|
-
None
|
475
|
-
)
|
476
|
-
# TODO: Should this be 'AUTO' (?)
|
477
|
-
self.video_stream.thread_type = 'AUTO'
|
478
|
-
|
479
|
-
self.audio_stream = (
|
480
|
-
self.container.streams.audio[0]
|
481
|
-
if self.container.streams.audio else
|
482
|
-
None
|
483
|
-
)
|
484
|
-
# TODO: Should this be 'AUTO' (?)
|
485
|
-
self.audio_stream.thread_type = 'AUTO'
|
486
|
-
|
487
|
-
if (
|
488
|
-
self.video_stream is None and
|
489
|
-
self.audio_stream is None
|
490
|
-
):
|
491
|
-
raise Exception(f'No video nor audio stream found in the "{self.filename}" file.')
|
492
|
-
|
493
|
-
self.video_cache = VideoFrameCache(self.container, self.video_stream)
|
494
|
-
self.audio_cache = AudioFrameCache(self.container, self.audio_stream)
|
495
|
-
|
496
|
-
def seek(
|
497
|
-
self,
|
498
|
-
pts: int,
|
499
|
-
stream = None
|
500
|
-
) -> 'VideoReader':
|
501
|
-
"""
|
502
|
-
Call the container '.seek()' method with
|
503
|
-
the given 'pts' packet time stamp. By
|
504
|
-
default, the video stream is the one in
|
505
|
-
which we apply the seek.
|
506
|
-
"""
|
507
|
-
stream = (
|
508
|
-
self.video_stream
|
509
|
-
if stream is None else
|
510
|
-
stream
|
511
|
-
)
|
512
|
-
|
513
|
-
# TODO: Is 'offset' actually a 'pts' (?)
|
514
|
-
self.container.seek(
|
515
|
-
offset = pts,
|
516
|
-
stream = stream
|
517
|
-
)
|
518
|
-
|
519
|
-
return self
|
520
|
-
|
521
|
-
def iterate(
|
522
|
-
self
|
523
|
-
) -> 'Iterator[Union[VideoFrame, AudioFrame]]':
|
524
|
-
"""
|
525
|
-
Iterator to iterate over the video frames
|
526
|
-
(already decoded).
|
527
|
-
"""
|
528
|
-
for frame in self.frame_iterator:
|
529
|
-
yield VideoReaderFrame(
|
530
|
-
frame = frame,
|
531
|
-
# TODO: Maybe use util to transform it (?)
|
532
|
-
t = frame.pts * self.time_base,
|
533
|
-
pixel_format = self.pixel_format
|
534
|
-
)
|
535
|
-
|
536
|
-
def iterate_with_audio(
|
537
|
-
self,
|
538
|
-
do_decode_video: bool = True,
|
539
|
-
do_decode_audio: bool = False
|
540
|
-
) -> 'Iterator[Union[VideoReaderFrame, VideoReaderPacket, None]]':
|
541
|
-
"""
|
542
|
-
Iterator to iterate over the video and audio
|
543
|
-
packets, decoded only if the parameters are
|
544
|
-
set as True.
|
545
|
-
|
546
|
-
If the packet is decoded, it will return each
|
547
|
-
frame individually as a VideoReaderFrame
|
548
|
-
instance. If not, the whole packet as a
|
549
|
-
VideoReaderPacket instance.
|
550
|
-
"""
|
551
|
-
for packet in self.packet_with_audio_iterator:
|
552
|
-
is_video = packet.stream.type == 'video'
|
553
|
-
|
554
|
-
do_decode = (
|
555
|
-
(
|
556
|
-
is_video and
|
557
|
-
do_decode_video
|
558
|
-
) or
|
559
|
-
(
|
560
|
-
not is_video and
|
561
|
-
do_decode_audio
|
562
|
-
)
|
563
|
-
)
|
564
|
-
|
565
|
-
if do_decode:
|
566
|
-
for frame in packet.decode():
|
567
|
-
# Return each frame decoded
|
568
|
-
yield VideoReaderFrame(frame)
|
569
|
-
else:
|
570
|
-
# Return the packet as it is
|
571
|
-
yield VideoReaderPacket(packet)
|
572
|
-
|
573
|
-
# These methods below are using the demux
|
574
|
-
def iterate_video_frames(
|
575
|
-
self,
|
576
|
-
start_pts: int = 0,
|
577
|
-
end_pts: Union[int, None] = None
|
578
|
-
):
|
579
|
-
"""
|
580
|
-
Iterate over the video stream packets and
|
581
|
-
decode only the ones in the expected range,
|
582
|
-
so only those frames are decoded (which is
|
583
|
-
an expensive process).
|
584
|
-
|
585
|
-
This method returns a tuple of 3 elements:
|
586
|
-
- `frame` as a `VideoFrame` instance
|
587
|
-
- `t` as the frame time moment
|
588
|
-
- `index` as the frame index
|
589
|
-
"""
|
590
|
-
for frame in iterate_stream_frames_demuxing(
|
591
|
-
container = self.container,
|
592
|
-
video_stream = self.video_stream,
|
593
|
-
audio_stream = None,
|
594
|
-
start_pts = start_pts,
|
595
|
-
end_pts = end_pts
|
596
|
-
):
|
597
|
-
yield frame
|
598
|
-
|
599
|
-
def iterate_audio_frames(
|
600
|
-
self,
|
601
|
-
start_pts: int = 0,
|
602
|
-
end_pts: Union[int, None] = None
|
603
|
-
):
|
604
|
-
"""
|
605
|
-
Iterate over the audio stream packets and
|
606
|
-
decode only the ones in the expected range,
|
607
|
-
so only those frames are decoded (which is
|
608
|
-
an expensive process).
|
609
|
-
|
610
|
-
This method returns a tuple of 3 elements:
|
611
|
-
- `frame` as a `AudioFrame` instance
|
612
|
-
- `t` as the frame time moment
|
613
|
-
- `index` as the frame index
|
614
|
-
"""
|
615
|
-
for frame in iterate_stream_frames_demuxing(
|
616
|
-
container = self.container,
|
617
|
-
video_stream = None,
|
618
|
-
audio_stream = self.audio_stream,
|
619
|
-
start_pts = start_pts,
|
620
|
-
end_pts = end_pts
|
621
|
-
):
|
622
|
-
yield frame
|
623
|
-
|
624
|
-
def get_frame(
|
625
|
-
self,
|
626
|
-
t: Union[int, float, Fraction]
|
627
|
-
) -> VideoFrame:
|
628
|
-
"""
|
629
|
-
Get the video frame that is in the 't' time
|
630
|
-
moment provided.
|
631
|
-
"""
|
632
|
-
return self.video_cache.get_frame(t)
|
633
|
-
|
634
|
-
def get_frames(
|
635
|
-
self,
|
636
|
-
start: Union[int, float, Fraction] = 0.0,
|
637
|
-
end: Union[int, float, Fraction, None] = None
|
638
|
-
):
|
639
|
-
"""
|
640
|
-
Iterator to get the video frames in between
|
641
|
-
the provided 'start' and 'end' time moments.
|
642
|
-
"""
|
643
|
-
for frame in self.video_cache.get_frames(start, end):
|
644
|
-
yield frame
|
645
|
-
|
646
|
-
def get_audio_frame_from_t(
|
647
|
-
self,
|
648
|
-
t: Union[int, float, Fraction]
|
649
|
-
) -> 'AudioFrame':
|
650
|
-
"""
|
651
|
-
Get the audio frame with the given 't' time
|
652
|
-
moment, using the audio cache system.
|
653
|
-
"""
|
654
|
-
return self.audio_cache.get_frame(t)
|
655
|
-
|
656
|
-
def get_audio_frames_from_t(
|
657
|
-
self,
|
658
|
-
t: Union[int, float, Fraction]
|
659
|
-
):
|
660
|
-
"""
|
661
|
-
Get the sequence of audio frames for the
|
662
|
-
given video 't' time moment, using the
|
663
|
-
audio cache system.
|
664
|
-
|
665
|
-
This is useful when we want to write a
|
666
|
-
video frame with its audio, so we obtain
|
667
|
-
all the audio frames associated to it
|
668
|
-
(remember that a video frame is associated
|
669
|
-
with more than 1 audio frame).
|
670
|
-
"""
|
671
|
-
t: T = T.from_fps(t, self.fps)
|
672
|
-
# We want all the audios that must be played
|
673
|
-
# during the video frame that starts in the
|
674
|
-
# 't' time moment
|
675
|
-
for frame in self.get_audio_frames(t.truncated, t.next(1).truncated):
|
676
|
-
yield frame
|
677
|
-
|
678
|
-
def get_audio_frames(
|
679
|
-
self,
|
680
|
-
start: Union[int, float, Fraction] = 0.0,
|
681
|
-
end: Union[int, float, Fraction, None] = None
|
682
|
-
):
|
683
|
-
"""
|
684
|
-
Iterator to get the audio frames in between
|
685
|
-
the provided 'start' and 'end' time moments.
|
686
|
-
"""
|
687
|
-
for frame in self.audio_cache.get_frames(start, end):
|
688
|
-
yield frame
|
689
|
-
|
690
|
-
def close(
|
691
|
-
self
|
692
|
-
) -> None:
|
693
|
-
"""
|
694
|
-
Close the container to free it.
|
695
|
-
"""
|
696
|
-
self.container.close()
|
697
|
-
|
698
|
-
|
699
|
-
"""
|
700
|
-
When reading packets directly from the stream
|
701
|
-
we can receive packets with size=0, but we need
|
702
|
-
to process them and decode (or yield them). It
|
703
|
-
is only when we are passing packets to the mux
|
704
|
-
when we need to ignore teh ones thar are empty
|
705
|
-
(size=0).
|
706
|
-
|
707
|
-
TODO: Do we need to ignore all? By now, ignoring
|
708
|
-
not is causing exceptions, and ignoring them is
|
709
|
-
making it work perfectly.
|
710
|
-
"""
|