yta-video-opengl 0.0.22__py3-none-any.whl → 0.0.23__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- yta_video_opengl/__init__.py +3 -2
- yta_video_opengl/classes.py +1 -1
- yta_video_opengl/nodes/__init__.py +4 -4
- yta_video_opengl/tests.py +424 -550
- yta_video_opengl/utils.py +9 -421
- {yta_video_opengl-0.0.22.dist-info → yta_video_opengl-0.0.23.dist-info}/METADATA +1 -6
- yta_video_opengl-0.0.23.dist-info/RECORD +12 -0
- yta_video_opengl/audio.py +0 -219
- yta_video_opengl/complete/__init__.py +0 -0
- yta_video_opengl/complete/frame_combinator.py +0 -204
- yta_video_opengl/complete/frame_generator.py +0 -319
- yta_video_opengl/complete/frame_wrapper.py +0 -135
- yta_video_opengl/complete/timeline.py +0 -571
- yta_video_opengl/complete/track/__init__.py +0 -500
- yta_video_opengl/complete/track/media/__init__.py +0 -222
- yta_video_opengl/complete/track/parts.py +0 -267
- yta_video_opengl/complete/track/utils.py +0 -78
- yta_video_opengl/media.py +0 -347
- yta_video_opengl/reader/__init__.py +0 -710
- yta_video_opengl/reader/cache/__init__.py +0 -253
- yta_video_opengl/reader/cache/audio.py +0 -195
- yta_video_opengl/reader/cache/utils.py +0 -48
- yta_video_opengl/reader/cache/video.py +0 -113
- yta_video_opengl/t.py +0 -233
- yta_video_opengl/video.py +0 -277
- yta_video_opengl/writer.py +0 -278
- yta_video_opengl-0.0.22.dist-info/RECORD +0 -31
- {yta_video_opengl-0.0.22.dist-info → yta_video_opengl-0.0.23.dist-info}/LICENSE +0 -0
- {yta_video_opengl-0.0.22.dist-info → yta_video_opengl-0.0.23.dist-info}/WHEEL +0 -0
@@ -1,500 +0,0 @@
|
|
1
|
-
|
2
|
-
from yta_video_opengl.complete.track.media import AudioOnTrack, VideoOnTrack
|
3
|
-
from yta_video_opengl.complete.track.parts import _AudioPart, _VideoPart, NON_LIMITED_EMPTY_PART_END
|
4
|
-
from yta_video_opengl.complete.track.utils import generate_silent_frames
|
5
|
-
from yta_video_opengl.audio import Audio
|
6
|
-
from yta_video_opengl.t import T
|
7
|
-
from yta_validation.parameter import ParameterValidator
|
8
|
-
from quicktions import Fraction
|
9
|
-
from typing import Union
|
10
|
-
from abc import ABC, abstractmethod
|
11
|
-
|
12
|
-
|
13
|
-
class _Track(ABC):
|
14
|
-
"""
|
15
|
-
Abstract class to be inherited by the
|
16
|
-
different track classes we implement.
|
17
|
-
"""
|
18
|
-
|
19
|
-
@property
|
20
|
-
def parts(
|
21
|
-
self
|
22
|
-
) -> list[Union['_AudioPart', '_VideoPart']]:
|
23
|
-
"""
|
24
|
-
The list of parts that build this track,
|
25
|
-
but with the empty parts detected to
|
26
|
-
be fulfilled with black frames and silent
|
27
|
-
audios.
|
28
|
-
|
29
|
-
A part can be a video or an empty space.
|
30
|
-
"""
|
31
|
-
if (
|
32
|
-
not hasattr(self, '_parts') or
|
33
|
-
self._parts is None
|
34
|
-
):
|
35
|
-
self._recalculate_parts()
|
36
|
-
|
37
|
-
return self._parts
|
38
|
-
|
39
|
-
@property
|
40
|
-
def end(
|
41
|
-
self
|
42
|
-
) -> Fraction:
|
43
|
-
"""
|
44
|
-
The end of the last audio of this track,
|
45
|
-
which is also the end of the track. This
|
46
|
-
is the last time moment that has not to
|
47
|
-
be rendered because its the end and it
|
48
|
-
is not included -> [start, end).
|
49
|
-
"""
|
50
|
-
return Fraction(
|
51
|
-
max(
|
52
|
-
(
|
53
|
-
media.end
|
54
|
-
for media in self.medias
|
55
|
-
),
|
56
|
-
default = 0.0
|
57
|
-
)
|
58
|
-
)
|
59
|
-
|
60
|
-
@property
|
61
|
-
def medias(
|
62
|
-
self
|
63
|
-
) -> list[AudioOnTrack]:
|
64
|
-
"""
|
65
|
-
The list of medias we have in the track
|
66
|
-
but ordered using the 'start' attribute
|
67
|
-
from first to last.
|
68
|
-
"""
|
69
|
-
return sorted(self._medias, key = lambda media: media.start)
|
70
|
-
|
71
|
-
@property
|
72
|
-
def is_muted(
|
73
|
-
self
|
74
|
-
) -> bool:
|
75
|
-
"""
|
76
|
-
Flag to indicate if the track is muted or
|
77
|
-
not. Being muted means that no audio frames
|
78
|
-
will be retured from this track.
|
79
|
-
"""
|
80
|
-
return self._is_muted
|
81
|
-
|
82
|
-
def __init__(
|
83
|
-
self,
|
84
|
-
index: int
|
85
|
-
):
|
86
|
-
self._medias: list['AudioOnTrack', 'VideoOnTrack'] = []
|
87
|
-
"""
|
88
|
-
The list of 'AudioOnTrack' or 'VideoOnTrack'
|
89
|
-
instances that must be played on this track.
|
90
|
-
"""
|
91
|
-
self._is_muted: bool = False
|
92
|
-
"""
|
93
|
-
Internal flag to indicate if the track is
|
94
|
-
muted or not.
|
95
|
-
"""
|
96
|
-
self.index: int = index
|
97
|
-
"""
|
98
|
-
The index of the track within the timeline.
|
99
|
-
"""
|
100
|
-
|
101
|
-
def _is_free(
|
102
|
-
self,
|
103
|
-
start: Union[int, float, Fraction],
|
104
|
-
end: Union[int, float, Fraction]
|
105
|
-
) -> bool:
|
106
|
-
"""
|
107
|
-
Check if the time range in between the
|
108
|
-
'start' and 'end' time given is free or
|
109
|
-
there is some media playing at any moment.
|
110
|
-
"""
|
111
|
-
return not any(
|
112
|
-
(
|
113
|
-
media.start < end and
|
114
|
-
media.end > start
|
115
|
-
)
|
116
|
-
for media in self.medias
|
117
|
-
)
|
118
|
-
|
119
|
-
def _get_part_at_t(
|
120
|
-
self,
|
121
|
-
t: Union[int, float, Fraction]
|
122
|
-
) -> Union['_AudioPart', '_VideoPart']:
|
123
|
-
"""
|
124
|
-
Get the part at the given 't' time
|
125
|
-
moment, that will always exist because
|
126
|
-
we have an special non ended last
|
127
|
-
empty part that would be returned if
|
128
|
-
accessing to an empty 't'.
|
129
|
-
"""
|
130
|
-
for part in self.parts:
|
131
|
-
if part.start <= t < part.end:
|
132
|
-
return part
|
133
|
-
|
134
|
-
# TODO: This will only happen if they are
|
135
|
-
# asking for a value greater than the
|
136
|
-
# NON_LIMITED_EMPTY_PART_END...
|
137
|
-
raise Exception('NON_LIMITED_EMPTY_PART_END exceeded.')
|
138
|
-
return None
|
139
|
-
|
140
|
-
def mute(
|
141
|
-
self
|
142
|
-
) -> 'AudioTrack':
|
143
|
-
"""
|
144
|
-
Set the track as muted so no audio frame will
|
145
|
-
be played from this track.
|
146
|
-
"""
|
147
|
-
self._is_muted = True
|
148
|
-
|
149
|
-
def unmute(
|
150
|
-
self
|
151
|
-
) -> 'AudioTrack':
|
152
|
-
"""
|
153
|
-
Set the track as unmuted so the audio frames
|
154
|
-
will be played as normal.
|
155
|
-
"""
|
156
|
-
self._is_muted = False
|
157
|
-
|
158
|
-
def add_media(
|
159
|
-
self,
|
160
|
-
media: Union['Audio', 'Video'],
|
161
|
-
t: Union[int, float, Fraction, None] = None
|
162
|
-
) -> '_Track':
|
163
|
-
"""
|
164
|
-
Add the 'media' provided to the track. If
|
165
|
-
a 't' time moment is provided, the media
|
166
|
-
will be added to that time moment if
|
167
|
-
possible. If there is no other media
|
168
|
-
placed in the time gap between the given
|
169
|
-
't' and the provided 'media' duration, it
|
170
|
-
will be added succesfully. In the other
|
171
|
-
case, an exception will be raised.
|
172
|
-
|
173
|
-
If 't' is None, the first available 't'
|
174
|
-
time moment will be used, that will be 0.0
|
175
|
-
if no media, or the end of the last media.
|
176
|
-
"""
|
177
|
-
ParameterValidator.validate_mandatory_instance_of('media', media, ['Audio', 'Video'])
|
178
|
-
ParameterValidator.validate_positive_number('t', t, do_include_zero = True)
|
179
|
-
|
180
|
-
if t is not None:
|
181
|
-
# TODO: We can have many different strategies
|
182
|
-
# that we could define in the '__init__' maybe
|
183
|
-
t: T = T.from_fps(t, self.fps)
|
184
|
-
#if not self._is_free(t.truncated, t.next(1).truncated):
|
185
|
-
if not self._is_free(t.truncated, t.truncated + media.duration):
|
186
|
-
raise Exception('The media cannot be added at the "t" time moment, something blocks it.')
|
187
|
-
t = t.truncated
|
188
|
-
else:
|
189
|
-
t = self.end
|
190
|
-
|
191
|
-
self._medias.append(self._make_media_on_track(
|
192
|
-
media = media,
|
193
|
-
start = t
|
194
|
-
))
|
195
|
-
|
196
|
-
self._recalculate_parts()
|
197
|
-
|
198
|
-
# TODO: Maybe return the AudioOnTrack instead (?)
|
199
|
-
return self
|
200
|
-
|
201
|
-
@abstractmethod
|
202
|
-
def _make_part(
|
203
|
-
self,
|
204
|
-
**kwargs
|
205
|
-
):
|
206
|
-
"""
|
207
|
-
Factory method to return an instance of the
|
208
|
-
'_Part' class that the specific '_Track' class
|
209
|
-
is using.
|
210
|
-
"""
|
211
|
-
pass
|
212
|
-
|
213
|
-
@abstractmethod
|
214
|
-
def _make_media_on_track(
|
215
|
-
self,
|
216
|
-
**kwargs
|
217
|
-
):
|
218
|
-
"""
|
219
|
-
Factory method to return an instance of the
|
220
|
-
'MediaOnTrack' class that the specific '_Track'
|
221
|
-
class is using.
|
222
|
-
"""
|
223
|
-
pass
|
224
|
-
|
225
|
-
def _recalculate_parts(
|
226
|
-
self
|
227
|
-
) -> '_Track':
|
228
|
-
"""
|
229
|
-
Check the track and get all the parts. A
|
230
|
-
part can be empty (no audio or video on
|
231
|
-
that time period, which means silent audio
|
232
|
-
or black frame), or a video or audio.
|
233
|
-
"""
|
234
|
-
parts = []
|
235
|
-
cursor = 0.0
|
236
|
-
|
237
|
-
for media in self.medias:
|
238
|
-
# Empty space between cursor and start of
|
239
|
-
# the next clip
|
240
|
-
if media.start > cursor:
|
241
|
-
parts.append(self._make_part(
|
242
|
-
track = self,
|
243
|
-
start = cursor,
|
244
|
-
end = media.start,
|
245
|
-
media = None
|
246
|
-
))
|
247
|
-
|
248
|
-
# The media itself
|
249
|
-
parts.append(self._make_part(
|
250
|
-
track = self,
|
251
|
-
start = media.start,
|
252
|
-
end = media.end,
|
253
|
-
media = media
|
254
|
-
))
|
255
|
-
|
256
|
-
cursor = media.end
|
257
|
-
|
258
|
-
# Add the non limited last empty part
|
259
|
-
parts.append(self._make_part(
|
260
|
-
track = self,
|
261
|
-
start = cursor,
|
262
|
-
end = NON_LIMITED_EMPTY_PART_END,
|
263
|
-
media = None
|
264
|
-
))
|
265
|
-
|
266
|
-
self._parts = parts
|
267
|
-
|
268
|
-
return self
|
269
|
-
|
270
|
-
class _TrackWithAudio(_Track):
|
271
|
-
"""
|
272
|
-
Class that has the ability to obtain the
|
273
|
-
audio frames from the source and must be
|
274
|
-
inherited by those tracks that have audio
|
275
|
-
(or video, that includes audio).
|
276
|
-
"""
|
277
|
-
|
278
|
-
def __init__(
|
279
|
-
self,
|
280
|
-
index: int,
|
281
|
-
fps: float,
|
282
|
-
audio_fps: float,
|
283
|
-
# TODO: Where does it come from (?)
|
284
|
-
audio_samples_per_frame: int,
|
285
|
-
audio_layout: str = 'stereo',
|
286
|
-
audio_format: str = 'fltp'
|
287
|
-
):
|
288
|
-
_Track.__init__(
|
289
|
-
self,
|
290
|
-
index = index
|
291
|
-
)
|
292
|
-
|
293
|
-
self.fps: float = float(fps)
|
294
|
-
"""
|
295
|
-
The fps of the the video that is associated
|
296
|
-
with the Timeline this track belongs to,
|
297
|
-
needed to calculate the base t time moments
|
298
|
-
to be precise and to obtain or generate the
|
299
|
-
frames.
|
300
|
-
"""
|
301
|
-
self.audio_fps: float = float(audio_fps)
|
302
|
-
"""
|
303
|
-
The fps of the audio track, needed to
|
304
|
-
generate silent audios for the empty parts.
|
305
|
-
"""
|
306
|
-
self.audio_samples_per_frame: int = audio_samples_per_frame
|
307
|
-
"""
|
308
|
-
The number of samples per audio frame.
|
309
|
-
"""
|
310
|
-
self.audio_layout: str = audio_layout
|
311
|
-
"""
|
312
|
-
The layout of the audio, that can be 'mono'
|
313
|
-
or 'stereo'.
|
314
|
-
"""
|
315
|
-
self.audio_format: str = audio_format
|
316
|
-
"""
|
317
|
-
The format of the audio, that can be 's16',
|
318
|
-
'flt', 'fltp', etc.
|
319
|
-
"""
|
320
|
-
|
321
|
-
# TODO: This is not working well when
|
322
|
-
# source has different fps
|
323
|
-
def get_audio_frames_at(
|
324
|
-
self,
|
325
|
-
t: Union[int, float, Fraction]
|
326
|
-
):
|
327
|
-
"""
|
328
|
-
Get the sequence of audio frames that
|
329
|
-
must be displayed at the 't' time
|
330
|
-
moment provided, which the collection
|
331
|
-
of audio frames corresponding to the
|
332
|
-
audio that is being played at that time
|
333
|
-
moment.
|
334
|
-
|
335
|
-
Remember, this 't' time moment provided
|
336
|
-
is about the track, and we make the
|
337
|
-
conversion to the actual audio 't' to
|
338
|
-
get the frame.
|
339
|
-
"""
|
340
|
-
frames = (
|
341
|
-
generate_silent_frames(
|
342
|
-
fps = self.fps,
|
343
|
-
audio_fps = self.audio_fps,
|
344
|
-
audio_samples_per_frame = self.audio_samples_per_frame,
|
345
|
-
layout = self.audio_layout,
|
346
|
-
format = self.audio_format
|
347
|
-
)
|
348
|
-
if self.is_muted else
|
349
|
-
# TODO: What if we find None here (?)
|
350
|
-
self._get_part_at_t(t).get_audio_frames_at(t)
|
351
|
-
)
|
352
|
-
|
353
|
-
for frame in frames:
|
354
|
-
print(' HELLO ')
|
355
|
-
print(frame)
|
356
|
-
if frame == []:
|
357
|
-
print(f' [ERROR] Audio frame t:{str(float(t))} not obtained.')
|
358
|
-
yield frame
|
359
|
-
|
360
|
-
class _TrackWithVideo(_Track):
|
361
|
-
"""
|
362
|
-
Class that has the ability to obtain the
|
363
|
-
video frames from the source and must be
|
364
|
-
inherited by those tracks that have video.
|
365
|
-
"""
|
366
|
-
|
367
|
-
def __init__(
|
368
|
-
self,
|
369
|
-
index: int,
|
370
|
-
size: tuple[int, int],
|
371
|
-
fps: float
|
372
|
-
):
|
373
|
-
_Track.__init__(
|
374
|
-
self,
|
375
|
-
index = index
|
376
|
-
)
|
377
|
-
|
378
|
-
# TODO: This is not needed actually...
|
379
|
-
self.fps: float = float(fps)
|
380
|
-
"""
|
381
|
-
The fps of the the video that is associated
|
382
|
-
with the Timeline this track belongs to,
|
383
|
-
needed to calculate the base t time moments
|
384
|
-
to be precise and to obtain or generate the
|
385
|
-
frames.
|
386
|
-
"""
|
387
|
-
self.size: tuple[int, int] = size
|
388
|
-
"""
|
389
|
-
The size that the output video will have.
|
390
|
-
"""
|
391
|
-
|
392
|
-
def get_frame_at(
|
393
|
-
self,
|
394
|
-
t: Union[int, float, Fraction]
|
395
|
-
) -> 'VideoFrameWrapped':
|
396
|
-
"""
|
397
|
-
Get the frame that must be displayed at
|
398
|
-
the 't' time moment provided, which is
|
399
|
-
a frame from the video audio that is
|
400
|
-
being played at that time moment.
|
401
|
-
|
402
|
-
Remember, this 't' time moment provided
|
403
|
-
is about the track, and we make the
|
404
|
-
conversion to the actual video 't' to
|
405
|
-
get the frame.
|
406
|
-
"""
|
407
|
-
# TODO: What if the frame, that comes from
|
408
|
-
# a video, doesn't have the expected size (?)
|
409
|
-
return self._get_part_at_t(t).get_frame_at(t)
|
410
|
-
|
411
|
-
class AudioTrack(_TrackWithAudio):
|
412
|
-
"""
|
413
|
-
Class to represent a track in which we place
|
414
|
-
audios to build a video project.
|
415
|
-
"""
|
416
|
-
|
417
|
-
def __init__(
|
418
|
-
self,
|
419
|
-
index: int,
|
420
|
-
fps: float,
|
421
|
-
audio_fps: float,
|
422
|
-
# TODO: Where does it come from (?)
|
423
|
-
audio_samples_per_frame: int,
|
424
|
-
audio_layout: str = 'stereo',
|
425
|
-
audio_format: str = 'fltp'
|
426
|
-
):
|
427
|
-
_TrackWithAudio.__init__(
|
428
|
-
index = index,
|
429
|
-
fps = fps,
|
430
|
-
audio_fps = audio_fps,
|
431
|
-
audio_samples_per_frame = audio_samples_per_frame,
|
432
|
-
audio_layout = audio_layout,
|
433
|
-
audio_format = audio_format
|
434
|
-
)
|
435
|
-
|
436
|
-
def _make_part(
|
437
|
-
self,
|
438
|
-
**kwargs
|
439
|
-
):
|
440
|
-
return _AudioPart(**kwargs)
|
441
|
-
|
442
|
-
def _make_media_on_track(
|
443
|
-
self,
|
444
|
-
**kwargs
|
445
|
-
):
|
446
|
-
return AudioOnTrack(**kwargs)
|
447
|
-
|
448
|
-
class VideoTrack(_TrackWithVideo, _TrackWithAudio):
|
449
|
-
"""
|
450
|
-
Class to represent a track in which we place
|
451
|
-
videos to build a video project.
|
452
|
-
"""
|
453
|
-
|
454
|
-
def __init__(
|
455
|
-
self,
|
456
|
-
index: int,
|
457
|
-
size: tuple[int, int],
|
458
|
-
fps: float,
|
459
|
-
audio_fps: float,
|
460
|
-
# TODO: Where does it come from (?)
|
461
|
-
audio_samples_per_frame: int,
|
462
|
-
audio_layout: str = 'stereo',
|
463
|
-
audio_format: str = 'fltp',
|
464
|
-
):
|
465
|
-
# super().__init__(
|
466
|
-
# index = index,
|
467
|
-
# size = size,
|
468
|
-
# fps = fps,
|
469
|
-
# audio_fps = audio_fps,
|
470
|
-
# audio_samples_per_frame = audio_samples_per_frame,
|
471
|
-
# audio_layout = audio_layout,
|
472
|
-
# audio_format = audio_format
|
473
|
-
# )
|
474
|
-
_TrackWithVideo.__init__(
|
475
|
-
self,
|
476
|
-
index = index,
|
477
|
-
size = size,
|
478
|
-
fps = fps
|
479
|
-
)
|
480
|
-
_TrackWithAudio.__init__(
|
481
|
-
self,
|
482
|
-
index = index,
|
483
|
-
fps = fps,
|
484
|
-
audio_fps = audio_fps,
|
485
|
-
audio_samples_per_frame = audio_samples_per_frame,
|
486
|
-
audio_layout = audio_layout,
|
487
|
-
audio_format = audio_format
|
488
|
-
)
|
489
|
-
|
490
|
-
def _make_part(
|
491
|
-
self,
|
492
|
-
**kwargs
|
493
|
-
):
|
494
|
-
return _VideoPart(**kwargs)
|
495
|
-
|
496
|
-
def _make_media_on_track(
|
497
|
-
self,
|
498
|
-
**kwargs
|
499
|
-
):
|
500
|
-
return VideoOnTrack(**kwargs)
|
@@ -1,222 +0,0 @@
|
|
1
|
-
"""
|
2
|
-
If we have a video placed in a timeline,
|
3
|
-
starting at the t=2s and the video lasts
|
4
|
-
2 seconds, the `t` time range in which the
|
5
|
-
video is playing is `[2s, 4s]`, so here
|
6
|
-
you have some examples with global `t`
|
7
|
-
values:
|
8
|
-
- `t=1`, the video is not playing because
|
9
|
-
it starts at `t=2`
|
10
|
-
- `t=3`, the video is playing, it started
|
11
|
-
at `t=2` and it has been playing during 1s
|
12
|
-
- `t=5`, the video is not playing because
|
13
|
-
it started at `t=2`, lasting 2s, so it
|
14
|
-
finished at `t=4`
|
15
|
-
"""
|
16
|
-
from yta_video_opengl.audio import Audio
|
17
|
-
from yta_video_opengl.video import Video
|
18
|
-
from yta_validation.parameter import ParameterValidator
|
19
|
-
from av.audio.frame import AudioFrame
|
20
|
-
from av.video.frame import VideoFrame
|
21
|
-
from quicktions import Fraction
|
22
|
-
from typing import Union
|
23
|
-
from abc import ABC
|
24
|
-
|
25
|
-
|
26
|
-
class _MediaOnTrack(ABC):
|
27
|
-
"""
|
28
|
-
Class to be inherited by any media class
|
29
|
-
that will be placed on a track and should
|
30
|
-
manage this condition.
|
31
|
-
"""
|
32
|
-
|
33
|
-
@property
|
34
|
-
def end(
|
35
|
-
self
|
36
|
-
) -> Fraction:
|
37
|
-
"""
|
38
|
-
The end time moment 't' of the audio once
|
39
|
-
once its been placed on the track, which
|
40
|
-
is affected by the audio duration and its
|
41
|
-
start time moment on the track.
|
42
|
-
|
43
|
-
This end is different from the audio end.
|
44
|
-
"""
|
45
|
-
return self.start + self.media.duration
|
46
|
-
|
47
|
-
def __init__(
|
48
|
-
self,
|
49
|
-
media: Union[Audio, Video],
|
50
|
-
start: Union[int, float, Fraction] = 0.0
|
51
|
-
):
|
52
|
-
ParameterValidator.validate_mandatory_instance_of('media', media, [Audio, Video])
|
53
|
-
ParameterValidator.validate_mandatory_positive_number('start', start, do_include_zero = True)
|
54
|
-
|
55
|
-
self.media: Union[Audio, Video] = media
|
56
|
-
"""
|
57
|
-
The media source, with all its properties,
|
58
|
-
that is placed in the timeline.
|
59
|
-
"""
|
60
|
-
self.start: Fraction = Fraction(start)
|
61
|
-
"""
|
62
|
-
The time moment in which the media should
|
63
|
-
start playing, within the timeline.
|
64
|
-
|
65
|
-
This is the time respect to the timeline
|
66
|
-
and its different from the media `start`
|
67
|
-
time, which is related to the file.
|
68
|
-
"""
|
69
|
-
|
70
|
-
def _get_t(
|
71
|
-
self,
|
72
|
-
t: Union[int, float, Fraction]
|
73
|
-
) -> float:
|
74
|
-
"""
|
75
|
-
The media 't' time moment for the given
|
76
|
-
global 't' time moment. This 't' is the one
|
77
|
-
to use inside the media content to display
|
78
|
-
its frame.
|
79
|
-
"""
|
80
|
-
# TODO: Should we make sure 't' is truncated (?)
|
81
|
-
return t - self.start
|
82
|
-
|
83
|
-
def is_playing(
|
84
|
-
self,
|
85
|
-
t: Union[int, float, Fraction]
|
86
|
-
) -> bool:
|
87
|
-
"""
|
88
|
-
Check if this media is playing at the general
|
89
|
-
't' time moment, which is a global time moment
|
90
|
-
for the whole project.
|
91
|
-
"""
|
92
|
-
# TODO: Should we make sure 't' is truncated (?)
|
93
|
-
return self.start <= t < self.end
|
94
|
-
|
95
|
-
class _MediaOnTrackWithAudio(_MediaOnTrack):
|
96
|
-
"""
|
97
|
-
Class that implements the ability of
|
98
|
-
getting audio frames. This class must
|
99
|
-
be inherited by any other class that
|
100
|
-
has this same ability.
|
101
|
-
"""
|
102
|
-
|
103
|
-
def __init__(
|
104
|
-
self,
|
105
|
-
media: Union[Audio, Video],
|
106
|
-
start: Union[int, float, Fraction] = 0.0
|
107
|
-
):
|
108
|
-
super().__init__(
|
109
|
-
media = media,
|
110
|
-
start = start
|
111
|
-
)
|
112
|
-
|
113
|
-
def get_audio_frame_at(
|
114
|
-
self,
|
115
|
-
t: Union[int, float, Fraction]
|
116
|
-
) -> Union[AudioFrame, None]:
|
117
|
-
"""
|
118
|
-
Get the audio frame for the 't' time moment
|
119
|
-
provided, that could be None if the media
|
120
|
-
is not playing in that moment.
|
121
|
-
"""
|
122
|
-
# TODO: Use 'T' here to be precise or the
|
123
|
-
# argument itself must be precise (?)
|
124
|
-
return (
|
125
|
-
self.media.get_audio_frame_from_t(self._get_t(t))
|
126
|
-
if self.is_playing(t) else
|
127
|
-
None
|
128
|
-
)
|
129
|
-
|
130
|
-
def get_audio_frames_at(
|
131
|
-
self,
|
132
|
-
t: Union[int, float, Fraction]
|
133
|
-
):
|
134
|
-
"""
|
135
|
-
Get the audio frames that must be played at
|
136
|
-
the 't' time moment provided, that could be
|
137
|
-
None if the audio is not playing at that
|
138
|
-
moment.
|
139
|
-
|
140
|
-
This method will return None if no audio
|
141
|
-
frames found in that 't' time moment, or an
|
142
|
-
iterator if yes.
|
143
|
-
"""
|
144
|
-
# TODO: Use 'T' here to be precise or the
|
145
|
-
# argument itself must be precise (?)
|
146
|
-
frames = (
|
147
|
-
self.media.get_audio_frames_from_t(self._get_t(t))
|
148
|
-
if self.is_playing(t) else
|
149
|
-
[]
|
150
|
-
)
|
151
|
-
|
152
|
-
for frame in frames:
|
153
|
-
yield frame
|
154
|
-
|
155
|
-
class _MediaOnTrackWithVideo(_MediaOnTrack):
|
156
|
-
"""
|
157
|
-
Class that implements the ability of
|
158
|
-
getting video frames. This class must
|
159
|
-
be inherited by any other class that
|
160
|
-
has this same ability.
|
161
|
-
"""
|
162
|
-
|
163
|
-
def __init__(
|
164
|
-
self,
|
165
|
-
media: Video,
|
166
|
-
start: Union[int, float, Fraction] = 0.0
|
167
|
-
):
|
168
|
-
super().__init__(
|
169
|
-
media = media,
|
170
|
-
start = start
|
171
|
-
)
|
172
|
-
|
173
|
-
def get_frame_at(
|
174
|
-
self,
|
175
|
-
t: Union[int, float, Fraction]
|
176
|
-
) -> Union[VideoFrame, None]:
|
177
|
-
"""
|
178
|
-
Get the frame for the 't' time moment provided,
|
179
|
-
that could be None if the video is not playing
|
180
|
-
in that moment.
|
181
|
-
"""
|
182
|
-
# TODO: Use 'T' here to be precise or the
|
183
|
-
# argument itself must be precise (?)
|
184
|
-
return (
|
185
|
-
self.media.get_frame_from_t(self._get_t(t))
|
186
|
-
if self.is_playing(t) else
|
187
|
-
None
|
188
|
-
)
|
189
|
-
|
190
|
-
class AudioOnTrack(_MediaOnTrackWithAudio):
|
191
|
-
"""
|
192
|
-
A video in the timeline.
|
193
|
-
"""
|
194
|
-
|
195
|
-
def __init__(
|
196
|
-
self,
|
197
|
-
media: Audio,
|
198
|
-
start: Union[int, float, Fraction] = 0.0
|
199
|
-
):
|
200
|
-
ParameterValidator.validate_mandatory_instance_of('media', media, Audio)
|
201
|
-
|
202
|
-
super().__init__(
|
203
|
-
media = media,
|
204
|
-
start = start
|
205
|
-
)
|
206
|
-
|
207
|
-
class VideoOnTrack(_MediaOnTrackWithAudio, _MediaOnTrackWithVideo):
|
208
|
-
"""
|
209
|
-
A video in the timeline.
|
210
|
-
"""
|
211
|
-
|
212
|
-
def __init__(
|
213
|
-
self,
|
214
|
-
media: Video,
|
215
|
-
start: Union[int, float, Fraction] = 0.0
|
216
|
-
):
|
217
|
-
ParameterValidator.validate_mandatory_instance_of('media', media, Video)
|
218
|
-
|
219
|
-
super().__init__(
|
220
|
-
media = media,
|
221
|
-
start = start
|
222
|
-
)
|