yta-video-opengl 0.0.21__py3-none-any.whl → 0.0.23__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- yta_video_opengl/__init__.py +3 -2
- yta_video_opengl/classes.py +1 -1
- yta_video_opengl/nodes/__init__.py +4 -4
- yta_video_opengl/tests.py +424 -545
- yta_video_opengl/utils.py +9 -421
- {yta_video_opengl-0.0.21.dist-info → yta_video_opengl-0.0.23.dist-info}/METADATA +1 -5
- yta_video_opengl-0.0.23.dist-info/RECORD +12 -0
- yta_video_opengl/audio.py +0 -219
- yta_video_opengl/complete/__init__.py +0 -0
- yta_video_opengl/complete/frame_combinator.py +0 -204
- yta_video_opengl/complete/frame_generator.py +0 -318
- yta_video_opengl/complete/frame_wrapper.py +0 -135
- yta_video_opengl/complete/timeline.py +0 -571
- yta_video_opengl/complete/track/__init__.py +0 -500
- yta_video_opengl/complete/track/media/__init__.py +0 -222
- yta_video_opengl/complete/track/parts.py +0 -267
- yta_video_opengl/complete/track/utils.py +0 -78
- yta_video_opengl/reader/__init__.py +0 -710
- yta_video_opengl/reader/cache/__init__.py +0 -253
- yta_video_opengl/reader/cache/audio.py +0 -195
- yta_video_opengl/reader/cache/utils.py +0 -48
- yta_video_opengl/reader/cache/video.py +0 -113
- yta_video_opengl/t.py +0 -233
- yta_video_opengl/video.py +0 -277
- yta_video_opengl/writer.py +0 -278
- yta_video_opengl-0.0.21.dist-info/RECORD +0 -30
- {yta_video_opengl-0.0.21.dist-info → yta_video_opengl-0.0.23.dist-info}/LICENSE +0 -0
- {yta_video_opengl-0.0.21.dist-info → yta_video_opengl-0.0.23.dist-info}/WHEEL +0 -0
@@ -1,267 +0,0 @@
|
|
1
|
-
from yta_video_opengl.complete.track.media import AudioOnTrack, VideoOnTrack
|
2
|
-
from yta_video_opengl.complete.track.utils import generate_silent_frames
|
3
|
-
from yta_video_opengl.complete.frame_wrapper import AudioFrameWrapped
|
4
|
-
from yta_video_opengl.complete.frame_wrapper import VideoFrameWrapped
|
5
|
-
from yta_video_opengl.complete.frame_generator import VideoFrameGenerator, AudioFrameGenerator
|
6
|
-
from yta_video_opengl.t import fps_to_time_base
|
7
|
-
from yta_validation.parameter import ParameterValidator
|
8
|
-
from quicktions import Fraction
|
9
|
-
from typing import Union
|
10
|
-
from abc import ABC
|
11
|
-
|
12
|
-
|
13
|
-
NON_LIMITED_EMPTY_PART_END = 999
|
14
|
-
"""
|
15
|
-
A value to indicate that the empty part
|
16
|
-
has no end because it is in the last
|
17
|
-
position and there is no video after it.
|
18
|
-
"""
|
19
|
-
|
20
|
-
class _Part(ABC):
|
21
|
-
"""
|
22
|
-
Abstract class to represent an element
|
23
|
-
that is on the track, that can be an
|
24
|
-
empty space or a vide or audio. This
|
25
|
-
class must be inherited by our own
|
26
|
-
custom part classes.
|
27
|
-
"""
|
28
|
-
|
29
|
-
@property
|
30
|
-
def is_empty_part(
|
31
|
-
self
|
32
|
-
) -> bool:
|
33
|
-
"""
|
34
|
-
Flag to indicate if the part is an empty part,
|
35
|
-
which means that there is no media associated
|
36
|
-
but an empty space.
|
37
|
-
"""
|
38
|
-
return self.media is None
|
39
|
-
|
40
|
-
def __init__(
|
41
|
-
self,
|
42
|
-
track: Union['AudioTrack', 'VideoTrack'],
|
43
|
-
start: Union[int, float, Fraction],
|
44
|
-
end: Union[int, float, Fraction],
|
45
|
-
media: Union[AudioOnTrack, VideoOnTrack, None] = None
|
46
|
-
):
|
47
|
-
ParameterValidator.validate_mandatory_positive_number('start', start, do_include_zero = True)
|
48
|
-
ParameterValidator.validate_mandatory_positive_number('end', end, do_include_zero = False)
|
49
|
-
ParameterValidator.validate_instance_of('audimediao', media, [AudioOnTrack, VideoOnTrack])
|
50
|
-
|
51
|
-
self._track: Union['AudioTrack', 'VideoTrack'] = track
|
52
|
-
"""
|
53
|
-
The instance of the track this part belongs
|
54
|
-
to.
|
55
|
-
"""
|
56
|
-
self.start: Fraction = Fraction(start)
|
57
|
-
"""
|
58
|
-
The start 't' time moment of the part.
|
59
|
-
"""
|
60
|
-
self.end: Fraction = Fraction(end)
|
61
|
-
"""
|
62
|
-
The end 't' time moment of the part.
|
63
|
-
"""
|
64
|
-
self.media: Union[AudioOnTrack, VideoOnTrack, None] = media
|
65
|
-
"""
|
66
|
-
The media associated, if existing, or
|
67
|
-
None if it is an empty space that we need
|
68
|
-
to fulfill.
|
69
|
-
"""
|
70
|
-
|
71
|
-
class _PartWithAudio(_Part):
|
72
|
-
"""
|
73
|
-
TODO: Explain
|
74
|
-
"""
|
75
|
-
|
76
|
-
def __init__(
|
77
|
-
self,
|
78
|
-
track: Union['AudioTrack', 'VideoTrack'],
|
79
|
-
start: Union[int, float, Fraction],
|
80
|
-
end: Union[int, float, Fraction],
|
81
|
-
media: Union[AudioOnTrack, VideoOnTrack, None] = None
|
82
|
-
):
|
83
|
-
ParameterValidator.validate_instance_of('media', media, [AudioOnTrack, VideoOnTrack])
|
84
|
-
|
85
|
-
super().__init__(
|
86
|
-
track = track,
|
87
|
-
start = start,
|
88
|
-
end = end,
|
89
|
-
media = media
|
90
|
-
)
|
91
|
-
|
92
|
-
# TODO: Can I refactor this below (?)
|
93
|
-
self._audio_frame_generator: AudioFrameGenerator = AudioFrameGenerator()
|
94
|
-
"""
|
95
|
-
Useful internal tool to generate silent
|
96
|
-
audio frames for the empty parts.
|
97
|
-
"""
|
98
|
-
|
99
|
-
def get_audio_frames_at(
|
100
|
-
self,
|
101
|
-
t: Union[int, float, Fraction]
|
102
|
-
):
|
103
|
-
"""
|
104
|
-
Iterate over all the audio frames that
|
105
|
-
exist at the time moment 't' provided.
|
106
|
-
"""
|
107
|
-
frames = []
|
108
|
-
if not self.is_empty_part:
|
109
|
-
# TODO: What do we do in this case (?)
|
110
|
-
frames = list(self.media.get_audio_frames_at(t))
|
111
|
-
|
112
|
-
if len(frames) == 0:
|
113
|
-
print(f' [ERROR] Audio frame {str(float(t))} was not obtained')
|
114
|
-
else:
|
115
|
-
frames = [
|
116
|
-
AudioFrameWrapped(
|
117
|
-
frame = frame,
|
118
|
-
is_from_empty_part = False
|
119
|
-
)
|
120
|
-
for frame in frames
|
121
|
-
]
|
122
|
-
|
123
|
-
# This could be because is empty part or
|
124
|
-
# because we couldn't obtain the frames
|
125
|
-
if len(frames) == 0:
|
126
|
-
frames = generate_silent_frames(
|
127
|
-
fps = self._track.fps,
|
128
|
-
audio_fps = self._track.audio_fps,
|
129
|
-
audio_samples_per_frame = self._track.audio_samples_per_frame,
|
130
|
-
# TODO: Where do this 2 formats come from (?)
|
131
|
-
layout = self._track.audio_layout,
|
132
|
-
format = self._track.audio_format
|
133
|
-
)
|
134
|
-
|
135
|
-
for frame in frames:
|
136
|
-
yield frame
|
137
|
-
|
138
|
-
class _PartWithVideo(_Part):
|
139
|
-
"""
|
140
|
-
TODO: Explain
|
141
|
-
"""
|
142
|
-
|
143
|
-
def __init__(
|
144
|
-
self,
|
145
|
-
track: 'VideoTrack',
|
146
|
-
start: Union[int, float, Fraction],
|
147
|
-
end: Union[int, float, Fraction],
|
148
|
-
media: Union[VideoOnTrack, None] = None
|
149
|
-
):
|
150
|
-
ParameterValidator.validate_instance_of('media', media, VideoOnTrack)
|
151
|
-
|
152
|
-
super().__init__(
|
153
|
-
track = track,
|
154
|
-
start = start,
|
155
|
-
end = end,
|
156
|
-
media = media
|
157
|
-
)
|
158
|
-
|
159
|
-
# TODO: Can I refactor this below (?)
|
160
|
-
self._video_frame_generator: VideoFrameGenerator = VideoFrameGenerator()
|
161
|
-
"""
|
162
|
-
Useful internal tool to generate background
|
163
|
-
frames for the empty parts.
|
164
|
-
"""
|
165
|
-
|
166
|
-
def get_frame_at(
|
167
|
-
self,
|
168
|
-
t: Union[int, float, Fraction]
|
169
|
-
) -> 'VideoFrameWrapped':
|
170
|
-
"""
|
171
|
-
Get the frame that must be displayed at
|
172
|
-
the given 't' time moment.
|
173
|
-
"""
|
174
|
-
is_from_empty_part = False
|
175
|
-
if self.is_empty_part:
|
176
|
-
frame = self._video_frame_generator.background.full_black(
|
177
|
-
size = self._track.size,
|
178
|
-
time_base = fps_to_time_base(self._track.fps)
|
179
|
-
)
|
180
|
-
|
181
|
-
is_from_empty_part = True
|
182
|
-
else:
|
183
|
-
# TODO: This can be None, why? I don't know...
|
184
|
-
frame = self.media.get_frame_at(t)
|
185
|
-
|
186
|
-
if frame is None:
|
187
|
-
print(f' [ERROR] Frame {str(float(t))} was not obtained')
|
188
|
-
|
189
|
-
frame = (
|
190
|
-
# I'm using a red full frame to be able to detect
|
191
|
-
# fast the frames that were not available, but
|
192
|
-
# I need to find the error and find a real solution
|
193
|
-
self._video_frame_generator.background.full_red(
|
194
|
-
size = self._track.size,
|
195
|
-
time_base = fps_to_time_base(self._track.fps)
|
196
|
-
)
|
197
|
-
if frame is None else
|
198
|
-
frame
|
199
|
-
)
|
200
|
-
|
201
|
-
# TODO: What about the 'format' (?)
|
202
|
-
# TODO: Maybe I shouldn't set the 'time_base'
|
203
|
-
# here and do it just in the Timeline 'render'
|
204
|
-
#return get_black_background_video_frame(self._track.size)
|
205
|
-
# TODO: This 'time_base' maybe has to be related
|
206
|
-
# to a Timeline general 'time_base' and not the fps
|
207
|
-
frame = VideoFrameWrapped(
|
208
|
-
frame = frame,
|
209
|
-
is_from_empty_part = is_from_empty_part
|
210
|
-
)
|
211
|
-
|
212
|
-
# TODO: This should not happen because of
|
213
|
-
# the way we handle the videos here but the
|
214
|
-
# video could send us a None frame here, so
|
215
|
-
# do we raise exception (?)
|
216
|
-
if frame._frame is None:
|
217
|
-
#frame = get_black_background_video_frame(self._track.size)
|
218
|
-
# TODO: By now I'm raising exception to check if
|
219
|
-
# this happens or not because I think it would
|
220
|
-
# be malfunctioning
|
221
|
-
raise Exception(f'Video is returning None video frame at t={str(t)}.')
|
222
|
-
|
223
|
-
return frame
|
224
|
-
|
225
|
-
class _AudioPart(_PartWithAudio):
|
226
|
-
"""
|
227
|
-
Class to represent an element that is on the
|
228
|
-
track, that can be an empty space or an audio.
|
229
|
-
"""
|
230
|
-
|
231
|
-
def __init__(
|
232
|
-
self,
|
233
|
-
track: 'AudioTrack',
|
234
|
-
start: Union[int, float, Fraction],
|
235
|
-
end: Union[int, float, Fraction],
|
236
|
-
media: Union[AudioOnTrack, None] = None
|
237
|
-
):
|
238
|
-
ParameterValidator.validate_instance_of('media', media, AudioOnTrack)
|
239
|
-
|
240
|
-
super().__init__(
|
241
|
-
track = track,
|
242
|
-
start = start,
|
243
|
-
end = end,
|
244
|
-
media = media
|
245
|
-
)
|
246
|
-
|
247
|
-
class _VideoPart(_PartWithAudio, _PartWithVideo):
|
248
|
-
"""
|
249
|
-
Class to represent an element that is on the
|
250
|
-
track, that can be an empty space or a video.
|
251
|
-
"""
|
252
|
-
|
253
|
-
def __init__(
|
254
|
-
self,
|
255
|
-
track: 'VideoTrack',
|
256
|
-
start: Union[int, float, Fraction],
|
257
|
-
end: Union[int, float, Fraction],
|
258
|
-
media: Union[VideoOnTrack, None] = None
|
259
|
-
):
|
260
|
-
ParameterValidator.validate_instance_of('media', media, VideoOnTrack)
|
261
|
-
|
262
|
-
super().__init__(
|
263
|
-
track = track,
|
264
|
-
start = start,
|
265
|
-
end = end,
|
266
|
-
media = media
|
267
|
-
)
|
@@ -1,78 +0,0 @@
|
|
1
|
-
from yta_video_opengl.complete.frame_wrapper import AudioFrameWrapped
|
2
|
-
from yta_video_opengl.complete.frame_generator import AudioFrameGenerator
|
3
|
-
from yta_video_opengl.utils import audio_frames_and_remainder_per_video_frame
|
4
|
-
|
5
|
-
|
6
|
-
# TODO: Is this method here ok (?)
|
7
|
-
def generate_silent_frames(
|
8
|
-
fps: int,
|
9
|
-
audio_fps: int,
|
10
|
-
audio_samples_per_frame: int,
|
11
|
-
layout: str = 'stereo',
|
12
|
-
format: str = 'fltp'
|
13
|
-
) -> list[AudioFrameWrapped]:
|
14
|
-
"""
|
15
|
-
Get the audio silent frames we need for
|
16
|
-
a video with the given 'fps', 'audio_fps'
|
17
|
-
and 'audio_samples_per_frame', using the
|
18
|
-
also provided 'layout' and 'format' for
|
19
|
-
the audio frames.
|
20
|
-
|
21
|
-
This method is used when we have empty
|
22
|
-
parts on our tracks and we need to
|
23
|
-
provide the frames, that are passed as
|
24
|
-
AudioFrameWrapped instances and tagged as
|
25
|
-
coming from empty parts.
|
26
|
-
"""
|
27
|
-
audio_frame_generator: AudioFrameGenerator = AudioFrameGenerator()
|
28
|
-
|
29
|
-
# Check how many full and partial silent
|
30
|
-
# audio frames we need
|
31
|
-
number_of_frames, number_of_remaining_samples = audio_frames_and_remainder_per_video_frame(
|
32
|
-
video_fps = fps,
|
33
|
-
sample_rate = audio_fps,
|
34
|
-
number_of_samples_per_audio_frame = audio_samples_per_frame
|
35
|
-
)
|
36
|
-
|
37
|
-
# The complete silent frames we need
|
38
|
-
silent_frame = audio_frame_generator.silent(
|
39
|
-
sample_rate = audio_fps,
|
40
|
-
layout = layout,
|
41
|
-
number_of_samples = audio_samples_per_frame,
|
42
|
-
format = format,
|
43
|
-
pts = None,
|
44
|
-
time_base = None
|
45
|
-
)
|
46
|
-
|
47
|
-
frames = (
|
48
|
-
[
|
49
|
-
AudioFrameWrapped(
|
50
|
-
frame = silent_frame,
|
51
|
-
is_from_empty_part = True
|
52
|
-
)
|
53
|
-
] * number_of_frames
|
54
|
-
if number_of_frames > 0 else
|
55
|
-
[]
|
56
|
-
)
|
57
|
-
|
58
|
-
# The remaining partial silent frames we need
|
59
|
-
if number_of_remaining_samples > 0:
|
60
|
-
silent_frame = audio_frame_generator.silent(
|
61
|
-
sample_rate = audio_fps,
|
62
|
-
# TODO: Check where do we get this value from
|
63
|
-
layout = layout,
|
64
|
-
number_of_samples = number_of_remaining_samples,
|
65
|
-
# TODO: Check where do we get this value from
|
66
|
-
format = format,
|
67
|
-
pts = None,
|
68
|
-
time_base = None
|
69
|
-
)
|
70
|
-
|
71
|
-
frames.append(
|
72
|
-
AudioFrameWrapped(
|
73
|
-
frame = silent_frame,
|
74
|
-
is_from_empty_part = True
|
75
|
-
)
|
76
|
-
)
|
77
|
-
|
78
|
-
return frames
|