yta-video-opengl 0.0.20__py3-none-any.whl → 0.0.22__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- yta_video_opengl/audio.py +5 -0
- yta_video_opengl/complete/frame_combinator.py +1 -90
- yta_video_opengl/complete/frame_generator.py +42 -1
- yta_video_opengl/complete/frame_wrapper.py +13 -0
- yta_video_opengl/complete/timeline.py +199 -118
- yta_video_opengl/complete/track/__init__.py +14 -7
- yta_video_opengl/complete/track/parts.py +99 -62
- yta_video_opengl/media.py +347 -0
- yta_video_opengl/reader/cache/__init__.py +9 -5
- yta_video_opengl/tests.py +34 -1
- {yta_video_opengl-0.0.20.dist-info → yta_video_opengl-0.0.22.dist-info}/METADATA +2 -1
- {yta_video_opengl-0.0.20.dist-info → yta_video_opengl-0.0.22.dist-info}/RECORD +14 -13
- {yta_video_opengl-0.0.20.dist-info → yta_video_opengl-0.0.22.dist-info}/LICENSE +0 -0
- {yta_video_opengl-0.0.20.dist-info → yta_video_opengl-0.0.22.dist-info}/WHEEL +0 -0
@@ -43,15 +43,17 @@ class _Track(ABC):
|
|
43
43
|
"""
|
44
44
|
The end of the last audio of this track,
|
45
45
|
which is also the end of the track. This
|
46
|
-
is the last time moment that has to
|
47
|
-
rendered
|
46
|
+
is the last time moment that has not to
|
47
|
+
be rendered because its the end and it
|
48
|
+
is not included -> [start, end).
|
48
49
|
"""
|
49
50
|
return Fraction(
|
50
|
-
0.0
|
51
|
-
if len(self.medias) == 0 else
|
52
51
|
max(
|
53
|
-
|
54
|
-
|
52
|
+
(
|
53
|
+
media.end
|
54
|
+
for media in self.medias
|
55
|
+
),
|
56
|
+
default = 0.0
|
55
57
|
)
|
56
58
|
)
|
57
59
|
|
@@ -185,7 +187,7 @@ class _Track(ABC):
|
|
185
187
|
t = t.truncated
|
186
188
|
else:
|
187
189
|
t = self.end
|
188
|
-
|
190
|
+
|
189
191
|
self._medias.append(self._make_media_on_track(
|
190
192
|
media = media,
|
191
193
|
start = t
|
@@ -344,10 +346,15 @@ class _TrackWithAudio(_Track):
|
|
344
346
|
format = self.audio_format
|
345
347
|
)
|
346
348
|
if self.is_muted else
|
349
|
+
# TODO: What if we find None here (?)
|
347
350
|
self._get_part_at_t(t).get_audio_frames_at(t)
|
348
351
|
)
|
349
352
|
|
350
353
|
for frame in frames:
|
354
|
+
print(' HELLO ')
|
355
|
+
print(frame)
|
356
|
+
if frame == []:
|
357
|
+
print(f' [ERROR] Audio frame t:{str(float(t))} not obtained.')
|
351
358
|
yield frame
|
352
359
|
|
353
360
|
class _TrackWithVideo(_Track):
|
@@ -68,20 +68,19 @@ class _Part(ABC):
|
|
68
68
|
to fulfill.
|
69
69
|
"""
|
70
70
|
|
71
|
-
class
|
71
|
+
class _PartWithAudio(_Part):
|
72
72
|
"""
|
73
|
-
|
74
|
-
track, that can be an empty space or an audio.
|
73
|
+
TODO: Explain
|
75
74
|
"""
|
76
75
|
|
77
76
|
def __init__(
|
78
77
|
self,
|
79
|
-
track: 'AudioTrack',
|
78
|
+
track: Union['AudioTrack', 'VideoTrack'],
|
80
79
|
start: Union[int, float, Fraction],
|
81
80
|
end: Union[int, float, Fraction],
|
82
|
-
media: Union[AudioOnTrack, None] = None
|
81
|
+
media: Union[AudioOnTrack, VideoOnTrack, None] = None
|
83
82
|
):
|
84
|
-
ParameterValidator.validate_instance_of('media', media, AudioOnTrack)
|
83
|
+
ParameterValidator.validate_instance_of('media', media, [AudioOnTrack, VideoOnTrack])
|
85
84
|
|
86
85
|
super().__init__(
|
87
86
|
track = track,
|
@@ -105,13 +104,25 @@ class _AudioPart(_Part):
|
|
105
104
|
Iterate over all the audio frames that
|
106
105
|
exist at the time moment 't' provided.
|
107
106
|
"""
|
107
|
+
frames = []
|
108
108
|
if not self.is_empty_part:
|
109
|
-
|
110
|
-
|
111
|
-
|
112
|
-
|
113
|
-
)
|
114
|
-
|
109
|
+
# TODO: What do we do in this case (?)
|
110
|
+
frames = list(self.media.get_audio_frames_at(t))
|
111
|
+
|
112
|
+
if len(frames) == 0:
|
113
|
+
print(f' [ERROR] Audio frame {str(float(t))} was not obtained')
|
114
|
+
else:
|
115
|
+
frames = [
|
116
|
+
AudioFrameWrapped(
|
117
|
+
frame = frame,
|
118
|
+
is_from_empty_part = False
|
119
|
+
)
|
120
|
+
for frame in frames
|
121
|
+
]
|
122
|
+
|
123
|
+
# This could be because is empty part or
|
124
|
+
# because we couldn't obtain the frames
|
125
|
+
if len(frames) == 0:
|
115
126
|
frames = generate_silent_frames(
|
116
127
|
fps = self._track.fps,
|
117
128
|
audio_fps = self._track.audio_fps,
|
@@ -121,13 +132,12 @@ class _AudioPart(_Part):
|
|
121
132
|
format = self._track.audio_format
|
122
133
|
)
|
123
134
|
|
124
|
-
|
125
|
-
|
135
|
+
for frame in frames:
|
136
|
+
yield frame
|
126
137
|
|
127
|
-
class
|
138
|
+
class _PartWithVideo(_Part):
|
128
139
|
"""
|
129
|
-
|
130
|
-
track, that can be an empty space or a video.
|
140
|
+
TODO: Explain
|
131
141
|
"""
|
132
142
|
|
133
143
|
def __init__(
|
@@ -152,12 +162,6 @@ class _VideoPart(_Part):
|
|
152
162
|
Useful internal tool to generate background
|
153
163
|
frames for the empty parts.
|
154
164
|
"""
|
155
|
-
# TODO: Can I refactor this below (?)
|
156
|
-
self._audio_frame_generator: AudioFrameGenerator = AudioFrameGenerator()
|
157
|
-
"""
|
158
|
-
Useful internal tool to generate silent
|
159
|
-
audio frames for the empty parts.
|
160
|
-
"""
|
161
165
|
|
162
166
|
def get_frame_at(
|
163
167
|
self,
|
@@ -167,25 +171,42 @@ class _VideoPart(_Part):
|
|
167
171
|
Get the frame that must be displayed at
|
168
172
|
the given 't' time moment.
|
169
173
|
"""
|
170
|
-
|
171
|
-
|
172
|
-
|
173
|
-
|
174
|
-
|
175
|
-
|
176
|
-
|
177
|
-
|
178
|
-
|
174
|
+
is_from_empty_part = False
|
175
|
+
if self.is_empty_part:
|
176
|
+
frame = self._video_frame_generator.background.full_black(
|
177
|
+
size = self._track.size,
|
178
|
+
time_base = fps_to_time_base(self._track.fps)
|
179
|
+
)
|
180
|
+
|
181
|
+
is_from_empty_part = True
|
182
|
+
else:
|
183
|
+
# TODO: This can be None, why? I don't know...
|
184
|
+
frame = self.media.get_frame_at(t)
|
185
|
+
|
186
|
+
if frame is None:
|
187
|
+
print(f' [ERROR] Frame {str(float(t))} was not obtained')
|
188
|
+
|
189
|
+
frame = (
|
190
|
+
# I'm using a red full frame to be able to detect
|
191
|
+
# fast the frames that were not available, but
|
192
|
+
# I need to find the error and find a real solution
|
193
|
+
self._video_frame_generator.background.full_red(
|
179
194
|
size = self._track.size,
|
180
195
|
time_base = fps_to_time_base(self._track.fps)
|
181
|
-
)
|
182
|
-
|
183
|
-
|
184
|
-
if self.is_empty_part else
|
185
|
-
VideoFrameWrapped(
|
186
|
-
frame = self.media.get_frame_at(t),
|
187
|
-
is_from_empty_part = False
|
196
|
+
)
|
197
|
+
if frame is None else
|
198
|
+
frame
|
188
199
|
)
|
200
|
+
|
201
|
+
# TODO: What about the 'format' (?)
|
202
|
+
# TODO: Maybe I shouldn't set the 'time_base'
|
203
|
+
# here and do it just in the Timeline 'render'
|
204
|
+
#return get_black_background_video_frame(self._track.size)
|
205
|
+
# TODO: This 'time_base' maybe has to be related
|
206
|
+
# to a Timeline general 'time_base' and not the fps
|
207
|
+
frame = VideoFrameWrapped(
|
208
|
+
frame = frame,
|
209
|
+
is_from_empty_part = is_from_empty_part
|
189
210
|
)
|
190
211
|
|
191
212
|
# TODO: This should not happen because of
|
@@ -201,30 +222,46 @@ class _VideoPart(_Part):
|
|
201
222
|
|
202
223
|
return frame
|
203
224
|
|
204
|
-
|
225
|
+
class _AudioPart(_PartWithAudio):
|
226
|
+
"""
|
227
|
+
Class to represent an element that is on the
|
228
|
+
track, that can be an empty space or an audio.
|
229
|
+
"""
|
230
|
+
|
231
|
+
def __init__(
|
205
232
|
self,
|
206
|
-
|
233
|
+
track: 'AudioTrack',
|
234
|
+
start: Union[int, float, Fraction],
|
235
|
+
end: Union[int, float, Fraction],
|
236
|
+
media: Union[AudioOnTrack, None] = None
|
207
237
|
):
|
208
|
-
|
209
|
-
|
210
|
-
|
211
|
-
|
212
|
-
|
213
|
-
|
214
|
-
|
215
|
-
|
216
|
-
|
217
|
-
|
218
|
-
|
219
|
-
|
220
|
-
|
221
|
-
|
222
|
-
audio_samples_per_frame = self._track.audio_samples_per_frame,
|
223
|
-
# TODO: Where do this 2 formats come from (?)
|
224
|
-
layout = self._track.audio_layout,
|
225
|
-
format = self._track.audio_format
|
226
|
-
)
|
238
|
+
ParameterValidator.validate_instance_of('media', media, AudioOnTrack)
|
239
|
+
|
240
|
+
super().__init__(
|
241
|
+
track = track,
|
242
|
+
start = start,
|
243
|
+
end = end,
|
244
|
+
media = media
|
245
|
+
)
|
246
|
+
|
247
|
+
class _VideoPart(_PartWithAudio, _PartWithVideo):
|
248
|
+
"""
|
249
|
+
Class to represent an element that is on the
|
250
|
+
track, that can be an empty space or a video.
|
251
|
+
"""
|
227
252
|
|
228
|
-
|
229
|
-
|
253
|
+
def __init__(
|
254
|
+
self,
|
255
|
+
track: 'VideoTrack',
|
256
|
+
start: Union[int, float, Fraction],
|
257
|
+
end: Union[int, float, Fraction],
|
258
|
+
media: Union[VideoOnTrack, None] = None
|
259
|
+
):
|
260
|
+
ParameterValidator.validate_instance_of('media', media, VideoOnTrack)
|
230
261
|
|
262
|
+
super().__init__(
|
263
|
+
track = track,
|
264
|
+
start = start,
|
265
|
+
end = end,
|
266
|
+
media = media
|
267
|
+
)
|
@@ -0,0 +1,347 @@
|
|
1
|
+
|
2
|
+
"""
|
3
|
+
The pyav uses Pillow to load an image as
|
4
|
+
a numpy array but using not the alpha.
|
5
|
+
"""
|
6
|
+
from yta_video_opengl.complete.frame_generator import VideoFrameGenerator
|
7
|
+
from yta_video_opengl.writer import VideoWriter
|
8
|
+
from yta_video_opengl.t import get_ts
|
9
|
+
from av.video.frame import VideoFrame
|
10
|
+
from yta_validation.parameter import ParameterValidator
|
11
|
+
from yta_validation import PythonValidator
|
12
|
+
from quicktions import Fraction
|
13
|
+
from PIL import Image
|
14
|
+
from abc import ABC, abstractmethod
|
15
|
+
from typing import Union
|
16
|
+
|
17
|
+
import numpy as np
|
18
|
+
|
19
|
+
|
20
|
+
class _Media(ABC):
|
21
|
+
"""
|
22
|
+
Class to represent an element that can be
|
23
|
+
used in our video editor, that can be a
|
24
|
+
video, an audio, an image, a color frame,
|
25
|
+
etc.
|
26
|
+
"""
|
27
|
+
|
28
|
+
@property
|
29
|
+
def duration(
|
30
|
+
self
|
31
|
+
) -> Fraction:
|
32
|
+
"""
|
33
|
+
The duration of the media.
|
34
|
+
"""
|
35
|
+
return self.end - self.start
|
36
|
+
|
37
|
+
def __init__(
|
38
|
+
self,
|
39
|
+
start: Union[int, float, Fraction],
|
40
|
+
end: Union[int, float, Fraction]
|
41
|
+
):
|
42
|
+
self.start: Fraction = Fraction(start)
|
43
|
+
"""
|
44
|
+
The time moment 't' in which the media
|
45
|
+
should start.
|
46
|
+
"""
|
47
|
+
self.end: Fraction = Fraction(end)
|
48
|
+
"""
|
49
|
+
The time moment 't' in which the media
|
50
|
+
should end.
|
51
|
+
"""
|
52
|
+
|
53
|
+
def _get_t(
|
54
|
+
self,
|
55
|
+
t: Union[int, float, Fraction]
|
56
|
+
) -> Fraction:
|
57
|
+
"""
|
58
|
+
Get the real 't' time moment based on the
|
59
|
+
media 'start' and 'end'. If they were
|
60
|
+
asking for the t=0.5s but our media was
|
61
|
+
subclipped to [1.0, 2.0), the 0.5s must be
|
62
|
+
actually the 1.5s of the media because of
|
63
|
+
the subclipped time range.
|
64
|
+
|
65
|
+
This method will raise an exception if the
|
66
|
+
't' time moment provided, when adjusted to
|
67
|
+
the internal media start and end time, is
|
68
|
+
not valid (is after the end).
|
69
|
+
"""
|
70
|
+
t += self.start
|
71
|
+
|
72
|
+
if t >= self.end:
|
73
|
+
raise Exception(f'The "t" ({str(t)}) provided is out of range. This media lasts from [{str(self.start)}, {str(self.end)}).')
|
74
|
+
|
75
|
+
return t
|
76
|
+
|
77
|
+
class _CanBeSavedAsVideo:
|
78
|
+
"""
|
79
|
+
Class to implement the functionality of
|
80
|
+
being written into a video file, frame
|
81
|
+
by frame.
|
82
|
+
"""
|
83
|
+
|
84
|
+
def save_as(
|
85
|
+
self,
|
86
|
+
output_filename: str,
|
87
|
+
fps: Union[int, float, Fraction] = 60.0,
|
88
|
+
video_codec: str = 'h264',
|
89
|
+
video_pixel_format: str = 'yuv420p'
|
90
|
+
) -> '_CanBeSavedAsVideo':
|
91
|
+
"""
|
92
|
+
Save the media as a video to the
|
93
|
+
given 'output_filename' file.
|
94
|
+
"""
|
95
|
+
writer = VideoWriter(output_filename)
|
96
|
+
|
97
|
+
fps = int(fps)
|
98
|
+
|
99
|
+
# TODO: This has to be dynamic according to the
|
100
|
+
# video we are writing (?)
|
101
|
+
writer.set_video_stream(
|
102
|
+
codec_name = video_codec,
|
103
|
+
fps = fps,
|
104
|
+
size = self.size,
|
105
|
+
pixel_format = video_pixel_format
|
106
|
+
)
|
107
|
+
|
108
|
+
for index, t in enumerate(get_ts(self.start, self.end, fps)):
|
109
|
+
frame = self.get_frame_from_t(t)
|
110
|
+
frame.pts = index
|
111
|
+
frame.time_base = Fraction(1, fps)
|
112
|
+
|
113
|
+
writer.mux_video_frame(
|
114
|
+
frame = frame
|
115
|
+
)
|
116
|
+
|
117
|
+
writer.mux_video_frame(None)
|
118
|
+
writer.output.close()
|
119
|
+
|
120
|
+
return self
|
121
|
+
|
122
|
+
class _HasVideoFrame(ABC):
|
123
|
+
"""
|
124
|
+
Class to be inherited by all those classes
|
125
|
+
that have video frames.
|
126
|
+
"""
|
127
|
+
|
128
|
+
# TODO: Maybe force
|
129
|
+
# size: tuple[int, int] = (1920, 1080),
|
130
|
+
# dtype: dtype = np.uint8,
|
131
|
+
# format: str = 'rgb24'
|
132
|
+
# on __init__ (?)
|
133
|
+
|
134
|
+
@abstractmethod
|
135
|
+
def get_frame_from_t(
|
136
|
+
self,
|
137
|
+
t: Union[int, float, Fraction]
|
138
|
+
) -> 'VideoFrame':
|
139
|
+
"""
|
140
|
+
Get the video frame with the given 't' time
|
141
|
+
moment.
|
142
|
+
"""
|
143
|
+
# TODO: Maybe
|
144
|
+
pass
|
145
|
+
|
146
|
+
class _HasAudioFrame(ABC):
|
147
|
+
"""
|
148
|
+
Class to be inherited by all those classes
|
149
|
+
that have audio frames.
|
150
|
+
"""
|
151
|
+
|
152
|
+
@abstractmethod
|
153
|
+
def get_audio_frames_from_t(
|
154
|
+
self,
|
155
|
+
t: Union[int, float, Fraction]
|
156
|
+
):
|
157
|
+
"""
|
158
|
+
Get the sequence of audio frames for the
|
159
|
+
given video 't' time moment, using the
|
160
|
+
audio cache system.
|
161
|
+
|
162
|
+
This is useful when we want to write a
|
163
|
+
video frame with its audio, so we obtain
|
164
|
+
all the audio frames associated to it
|
165
|
+
(remember that a video frame is associated
|
166
|
+
with more than 1 audio frame).
|
167
|
+
"""
|
168
|
+
pass
|
169
|
+
|
170
|
+
class _StaticMedia(_Media, _HasVideoFrame, _CanBeSavedAsVideo):
|
171
|
+
"""
|
172
|
+
Class to represent a media that doesn't
|
173
|
+
change during its whole duration and
|
174
|
+
the frame is the same, it remains static.
|
175
|
+
|
176
|
+
This class must be implemented by our
|
177
|
+
ImageMedia and ColorMedia as the frame
|
178
|
+
will be always the same, an image or
|
179
|
+
a color frame.
|
180
|
+
"""
|
181
|
+
|
182
|
+
@property
|
183
|
+
@abstractmethod
|
184
|
+
def frame(
|
185
|
+
self
|
186
|
+
) -> VideoFrame:
|
187
|
+
"""
|
188
|
+
The frame that must be displayed.
|
189
|
+
"""
|
190
|
+
pass
|
191
|
+
|
192
|
+
def __init__(
|
193
|
+
self,
|
194
|
+
start: Union[int, float, Fraction],
|
195
|
+
end: Union[int, float, Fraction]
|
196
|
+
):
|
197
|
+
_Media.__init__(
|
198
|
+
self,
|
199
|
+
start = start,
|
200
|
+
end = end
|
201
|
+
)
|
202
|
+
|
203
|
+
def get_frame_from_t(
|
204
|
+
self,
|
205
|
+
t: Union[int, float, Fraction]
|
206
|
+
) -> VideoFrame:
|
207
|
+
"""
|
208
|
+
Get the video frame with the given 't' time
|
209
|
+
moment.
|
210
|
+
"""
|
211
|
+
# This will raise an exception if invalid 't'
|
212
|
+
# when adapted to its internal 'start' and 'end'
|
213
|
+
t = self._get_t(t)
|
214
|
+
|
215
|
+
# TODO: What if we need to resize the source
|
216
|
+
# before putting it into a video frame (?)
|
217
|
+
|
218
|
+
return self.frame
|
219
|
+
|
220
|
+
class ColorMedia(_StaticMedia):
|
221
|
+
"""
|
222
|
+
A media that will be a single color video
|
223
|
+
frame during its whole duration.
|
224
|
+
"""
|
225
|
+
|
226
|
+
@property
|
227
|
+
def frame(
|
228
|
+
self
|
229
|
+
) -> VideoFrame:
|
230
|
+
"""
|
231
|
+
The frame that must be displayed.
|
232
|
+
"""
|
233
|
+
# TODO: Make this use the 'color'
|
234
|
+
# TODO: I need to implement the alpha layer
|
235
|
+
return VideoFrameGenerator().background.full_white(
|
236
|
+
size = self.size
|
237
|
+
)
|
238
|
+
|
239
|
+
def __init__(
|
240
|
+
self,
|
241
|
+
# TODO: Apply format
|
242
|
+
color: any,
|
243
|
+
start: Union[int, float, Fraction],
|
244
|
+
end: Union[int, float, Fraction],
|
245
|
+
size: tuple[int, int] = (1920, 1080),
|
246
|
+
# TODO: Do I need this (?)
|
247
|
+
# dtype: dtype = np.uint8,
|
248
|
+
# format: str = 'rgb24',
|
249
|
+
):
|
250
|
+
# TODO: Apply format
|
251
|
+
self._color: any = color
|
252
|
+
"""
|
253
|
+
The color that will be used to make the
|
254
|
+
frame that will be played its whole
|
255
|
+
duration.
|
256
|
+
"""
|
257
|
+
self.size: tuple[int, int] = size
|
258
|
+
"""
|
259
|
+
The size of the media frame.
|
260
|
+
"""
|
261
|
+
|
262
|
+
super().__init__(
|
263
|
+
start = start,
|
264
|
+
end = end
|
265
|
+
)
|
266
|
+
|
267
|
+
class ImageMedia(_StaticMedia):
|
268
|
+
"""
|
269
|
+
A media that will be a single image
|
270
|
+
during its whole duration.
|
271
|
+
"""
|
272
|
+
|
273
|
+
@property
|
274
|
+
def frame(
|
275
|
+
self
|
276
|
+
) -> VideoFrame:
|
277
|
+
"""
|
278
|
+
The frame that must be displayed.
|
279
|
+
"""
|
280
|
+
# By default we use it accepting transparency
|
281
|
+
# TODO: The image must be like this:
|
282
|
+
# arr = np.array(img) # shape (h, w, 4), dtype=uint8
|
283
|
+
# TODO: What value if no alpha (?)
|
284
|
+
return VideoFrame.from_ndarray(self._image, format = 'rgba')
|
285
|
+
|
286
|
+
def __init__(
|
287
|
+
self,
|
288
|
+
# TODO: It must be RGB
|
289
|
+
image: Union[np.ndarray, str],
|
290
|
+
start: Union[int, float, Fraction],
|
291
|
+
end: Union[int, float, Fraction],
|
292
|
+
size: tuple[int, int] = (1920, 1080),
|
293
|
+
do_include_alpha: bool = True,
|
294
|
+
# TODO: Do I need this (?)
|
295
|
+
# dtype: dtype = np.uint8,
|
296
|
+
# TODO: Maybe this 'format' need to be
|
297
|
+
# dynamic according to if we are reading
|
298
|
+
# the alpha channel or not...
|
299
|
+
# format: str = 'rgb24',
|
300
|
+
):
|
301
|
+
ParameterValidator.validate_mandatory_instance_of('image', image, [str, np.ndarray])
|
302
|
+
ParameterValidator.validate_mandatory_positive_number('start', start, do_include_zero = True)
|
303
|
+
ParameterValidator.validate_mandatory_positive_number('end', end, do_include_zero = False)
|
304
|
+
ParameterValidator.validate_mandatory_bool('do_include_alpha', do_include_alpha)
|
305
|
+
|
306
|
+
image = (
|
307
|
+
image_to_numpy_pillow(image, do_include_alpha = do_include_alpha)
|
308
|
+
if PythonValidator.is_string(image) else
|
309
|
+
image
|
310
|
+
)
|
311
|
+
|
312
|
+
self._image: np.ndarray = image
|
313
|
+
"""
|
314
|
+
The image that will be used to make the
|
315
|
+
frame that will be played its whole
|
316
|
+
duration.
|
317
|
+
"""
|
318
|
+
self.size: tuple[int, int] = size
|
319
|
+
"""
|
320
|
+
The size of the media frame.
|
321
|
+
"""
|
322
|
+
|
323
|
+
super().__init__(
|
324
|
+
start = start,
|
325
|
+
end = end
|
326
|
+
)
|
327
|
+
|
328
|
+
# TODO: Need to implement Video as the others
|
329
|
+
# are implemented here
|
330
|
+
|
331
|
+
# TODO: I think I have this util in another
|
332
|
+
# library, so please check it...
|
333
|
+
def image_to_numpy_pillow(
|
334
|
+
filename: str,
|
335
|
+
do_include_alpha: bool = True
|
336
|
+
) -> 'np.ndarray':
|
337
|
+
"""
|
338
|
+
Read the imagen file 'filename' and transform
|
339
|
+
it into a numpy, reading also the alpha channel.
|
340
|
+
"""
|
341
|
+
mode = (
|
342
|
+
'RGBA'
|
343
|
+
if do_include_alpha else
|
344
|
+
'RGB'
|
345
|
+
)
|
346
|
+
|
347
|
+
return np.array(Image.open(filename).convert(mode))
|
@@ -155,11 +155,15 @@ class FrameCache(ABC):
|
|
155
155
|
seek and start decoding frames from that
|
156
156
|
keyframe.
|
157
157
|
"""
|
158
|
-
return max(
|
159
|
-
|
160
|
-
|
161
|
-
|
162
|
-
|
158
|
+
return max(
|
159
|
+
(
|
160
|
+
key_frame_pts
|
161
|
+
for key_frame_pts in self.key_frames_pts
|
162
|
+
if key_frame_pts <= pts
|
163
|
+
),
|
164
|
+
# If no key frames, just 0
|
165
|
+
default = 0
|
166
|
+
)
|
163
167
|
|
164
168
|
def _store_frame_in_cache(
|
165
169
|
self,
|
yta_video_opengl/tests.py
CHANGED
@@ -597,6 +597,39 @@ def video_modified_stored():
|
|
597
597
|
# # timeline.add_video(Video('C:/Users/dania/Downloads/Y2meta.app-10 Smooth Transitions Green Screen Template For Kinemaster, Alight Motion, Filmora, premiere pro-(1080p).mp4', 2.25, 3.0), 3)
|
598
598
|
# timeline.render(OUTPUT_PATH)
|
599
599
|
|
600
|
+
# # Testing concatenating
|
601
|
+
# timeline = Timeline()
|
602
|
+
# # When you concat like this, some of the
|
603
|
+
# # videos have frames that cannot be accessed
|
604
|
+
# # and I don't know why...
|
605
|
+
# timeline.add_video(Video('test_files/glitch_rgb_frame.mp4'))
|
606
|
+
# timeline.add_video(Video('test_files/output.mp4'))
|
607
|
+
# timeline.add_video(Video('test_files/output_render.mp4'))
|
608
|
+
# timeline.add_video(Video('test_files/strange_tv_frame.mp4'))
|
609
|
+
# timeline.add_video(Video('test_files/test_1.mp4'))
|
610
|
+
# timeline.add_video(Video('test_files/test_1_short_2.mp4'))
|
611
|
+
# timeline.add_video(Video('test_files/test_audio_1st_track_solo_v0_0_15.mp4'))
|
612
|
+
# timeline.add_video(Video('test_files/test_audio_2nd_track_solo_v0_0_15.mp4'))
|
613
|
+
# timeline.add_video(Video('test_files/test_audio_combined_tracks_v0_0_015.mp4'))
|
614
|
+
# timeline.add_video(Video('test_files/test_audio_combined_v0_0_15.mp4'))
|
615
|
+
# timeline.add_video(Video('test_files/test_blend_add_v0_0_16.mp4'))
|
616
|
+
# timeline.add_video(Video('test_files/test_blend_difference_v0_0_16.mp4'))
|
617
|
+
# timeline.add_video(Video('test_files/test_blend_multiply_v0_0_16.mp4'))
|
618
|
+
# timeline.add_video(Video('test_files/test_blend_overlay_v0_0_16.mp4'))
|
619
|
+
# timeline.add_video(Video('test_files/test_blend_screen_v0_0_16.mp4'))
|
620
|
+
# timeline.add_video(Video('test_files/test_combine_skipping_empty_using_priority_v0_0_18.mp4'))
|
621
|
+
# timeline.add_video(Video('test_files/test_ok_v0_0_13.mp4'))
|
622
|
+
|
623
|
+
# timeline.render('test_files/concatenated.mp4')
|
624
|
+
|
625
|
+
from yta_video_opengl.media import ImageMedia, ColorMedia
|
626
|
+
|
627
|
+
image_media = ImageMedia('C:/Users/dania/Desktop/PROYECTOS/RECURSOS/mobile_alpha.png', 0, 1).save_as('test_files/test_image.mp4')
|
628
|
+
|
629
|
+
color_media = ColorMedia('random', 0, 1).save_as('test_files/test_color.mp4')
|
630
|
+
|
631
|
+
return
|
632
|
+
|
600
633
|
# TODO: This test will add videos that
|
601
634
|
# must be played at the same time
|
602
635
|
video = Video(VIDEO_PATH, 0.25, 0.75)
|
@@ -610,7 +643,7 @@ def video_modified_stored():
|
|
610
643
|
timeline.add_video(Video(simpsons_60fps, 1.5, 2.0), 3.0, track_index = 0)
|
611
644
|
timeline.add_video(Video(VIDEO_PATH, 0.5, 1.0), 2.0, track_index = 0)
|
612
645
|
|
613
|
-
timeline.tracks[0].mute()
|
646
|
+
#timeline.tracks[0].mute()
|
614
647
|
|
615
648
|
# Track 2
|
616
649
|
timeline.add_video(Video(VIDEO_PATH, 0.5, 1.0), 2.7, track_index = 1)
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.3
|
2
2
|
Name: yta-video-opengl
|
3
|
-
Version: 0.0.
|
3
|
+
Version: 0.0.22
|
4
4
|
Summary: Youtube Autonomous Video OpenGL Module
|
5
5
|
Author: danialcala94
|
6
6
|
Author-email: danielalcalavalera@gmail.com
|
@@ -10,6 +10,7 @@ Classifier: Programming Language :: Python :: 3.9
|
|
10
10
|
Requires-Dist: av (>=0.0.1,<19.0.0)
|
11
11
|
Requires-Dist: moderngl (>=0.0.1,<9.0.0)
|
12
12
|
Requires-Dist: numpy (>=0.0.1,<9.0.0)
|
13
|
+
Requires-Dist: pillow (>=0.0.1,<99.0.0)
|
13
14
|
Requires-Dist: quicktions (>=0.0.1,<9.0.0)
|
14
15
|
Requires-Dist: yta_timer (>0.0.1,<1.0.0)
|
15
16
|
Requires-Dist: yta_validation (>=0.0.1,<1.0.0)
|