yta-video-opengl 0.0.18__py3-none-any.whl → 0.0.20__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- yta_video_opengl/audio.py +214 -0
- yta_video_opengl/complete/timeline.py +65 -38
- yta_video_opengl/complete/track/__init__.py +493 -0
- yta_video_opengl/complete/{video_on_track.py → track/media/__init__.py} +112 -47
- yta_video_opengl/complete/track/parts.py +230 -0
- yta_video_opengl/complete/track/utils.py +78 -0
- yta_video_opengl/reader/__init__.py +0 -19
- yta_video_opengl/reader/cache/utils.py +1 -1
- yta_video_opengl/tests.py +15 -5
- yta_video_opengl/video.py +9 -13
- {yta_video_opengl-0.0.18.dist-info → yta_video_opengl-0.0.20.dist-info}/METADATA +1 -1
- {yta_video_opengl-0.0.18.dist-info → yta_video_opengl-0.0.20.dist-info}/RECORD +14 -11
- yta_video_opengl/complete/track.py +0 -467
- {yta_video_opengl-0.0.18.dist-info → yta_video_opengl-0.0.20.dist-info}/LICENSE +0 -0
- {yta_video_opengl-0.0.18.dist-info → yta_video_opengl-0.0.20.dist-info}/WHEEL +0 -0
@@ -13,17 +13,21 @@ at `t=2` and it has been playing during 1s
|
|
13
13
|
it started at `t=2`, lasting 2s, so it
|
14
14
|
finished at `t=4`
|
15
15
|
"""
|
16
|
+
from yta_video_opengl.audio import Audio
|
16
17
|
from yta_video_opengl.video import Video
|
17
18
|
from yta_validation.parameter import ParameterValidator
|
18
|
-
from av.video.frame import VideoFrame
|
19
19
|
from av.audio.frame import AudioFrame
|
20
|
+
from av.video.frame import VideoFrame
|
20
21
|
from quicktions import Fraction
|
21
22
|
from typing import Union
|
23
|
+
from abc import ABC
|
22
24
|
|
23
25
|
|
24
|
-
class
|
26
|
+
class _MediaOnTrack(ABC):
|
25
27
|
"""
|
26
|
-
|
28
|
+
Class to be inherited by any media class
|
29
|
+
that will be placed on a track and should
|
30
|
+
manage this condition.
|
27
31
|
"""
|
28
32
|
|
29
33
|
@property
|
@@ -31,50 +35,49 @@ class VideoOnTrack:
|
|
31
35
|
self
|
32
36
|
) -> Fraction:
|
33
37
|
"""
|
34
|
-
The end time moment 't' of the
|
38
|
+
The end time moment 't' of the audio once
|
35
39
|
once its been placed on the track, which
|
36
|
-
is affected by the
|
40
|
+
is affected by the audio duration and its
|
37
41
|
start time moment on the track.
|
38
42
|
|
39
|
-
This end is different from the
|
43
|
+
This end is different from the audio end.
|
40
44
|
"""
|
41
|
-
return self.start + self.
|
45
|
+
return self.start + self.media.duration
|
42
46
|
|
43
47
|
def __init__(
|
44
48
|
self,
|
45
|
-
|
49
|
+
media: Union[Audio, Video],
|
46
50
|
start: Union[int, float, Fraction] = 0.0
|
47
51
|
):
|
48
|
-
ParameterValidator.validate_mandatory_instance_of('
|
52
|
+
ParameterValidator.validate_mandatory_instance_of('media', media, [Audio, Video])
|
49
53
|
ParameterValidator.validate_mandatory_positive_number('start', start, do_include_zero = True)
|
50
54
|
|
51
|
-
self.
|
55
|
+
self.media: Union[Audio, Video] = media
|
52
56
|
"""
|
53
|
-
The
|
57
|
+
The media source, with all its properties,
|
54
58
|
that is placed in the timeline.
|
55
59
|
"""
|
56
60
|
self.start: Fraction = Fraction(start)
|
57
61
|
"""
|
58
|
-
The time moment in which the
|
62
|
+
The time moment in which the media should
|
59
63
|
start playing, within the timeline.
|
60
64
|
|
61
65
|
This is the time respect to the timeline
|
62
|
-
and its different from the
|
66
|
+
and its different from the media `start`
|
63
67
|
time, which is related to the file.
|
64
68
|
"""
|
65
69
|
|
66
|
-
def
|
70
|
+
def _get_t(
|
67
71
|
self,
|
68
72
|
t: Union[int, float, Fraction]
|
69
73
|
) -> float:
|
70
74
|
"""
|
71
|
-
The
|
75
|
+
The media 't' time moment for the given
|
72
76
|
global 't' time moment. This 't' is the one
|
73
|
-
to use inside the
|
77
|
+
to use inside the media content to display
|
74
78
|
its frame.
|
75
79
|
"""
|
76
|
-
# TODO:
|
77
|
-
# argument itself must be precise (?)
|
80
|
+
# TODO: Should we make sure 't' is truncated (?)
|
78
81
|
return t - self.start
|
79
82
|
|
80
83
|
def is_playing(
|
@@ -82,48 +85,48 @@ class VideoOnTrack:
|
|
82
85
|
t: Union[int, float, Fraction]
|
83
86
|
) -> bool:
|
84
87
|
"""
|
85
|
-
Check if this
|
88
|
+
Check if this media is playing at the general
|
86
89
|
't' time moment, which is a global time moment
|
87
90
|
for the whole project.
|
88
91
|
"""
|
89
|
-
# TODO:
|
90
|
-
# argument itself must be precise (?)
|
92
|
+
# TODO: Should we make sure 't' is truncated (?)
|
91
93
|
return self.start <= t < self.end
|
94
|
+
|
95
|
+
class _MediaOnTrackWithAudio(_MediaOnTrack):
|
96
|
+
"""
|
97
|
+
Class that implements the ability of
|
98
|
+
getting audio frames. This class must
|
99
|
+
be inherited by any other class that
|
100
|
+
has this same ability.
|
101
|
+
"""
|
92
102
|
|
93
|
-
def
|
103
|
+
def __init__(
|
94
104
|
self,
|
95
|
-
|
96
|
-
|
97
|
-
|
98
|
-
|
99
|
-
|
100
|
-
|
101
|
-
"""
|
102
|
-
# TODO: Use 'T' here to be precise or the
|
103
|
-
# argument itself must be precise (?)
|
104
|
-
return (
|
105
|
-
self.video.get_frame_from_t(self._get_video_t(t))
|
106
|
-
if self.is_playing(t) else
|
107
|
-
None
|
105
|
+
media: Union[Audio, Video],
|
106
|
+
start: Union[int, float, Fraction] = 0.0
|
107
|
+
):
|
108
|
+
super().__init__(
|
109
|
+
media = media,
|
110
|
+
start = start
|
108
111
|
)
|
109
|
-
|
112
|
+
|
110
113
|
def get_audio_frame_at(
|
111
114
|
self,
|
112
115
|
t: Union[int, float, Fraction]
|
113
116
|
) -> Union[AudioFrame, None]:
|
114
117
|
"""
|
115
118
|
Get the audio frame for the 't' time moment
|
116
|
-
provided, that could be None if the
|
119
|
+
provided, that could be None if the media
|
117
120
|
is not playing in that moment.
|
118
121
|
"""
|
119
122
|
# TODO: Use 'T' here to be precise or the
|
120
123
|
# argument itself must be precise (?)
|
121
124
|
return (
|
122
|
-
self.
|
125
|
+
self.media.get_audio_frame_from_t(self._get_t(t))
|
123
126
|
if self.is_playing(t) else
|
124
127
|
None
|
125
128
|
)
|
126
|
-
|
129
|
+
|
127
130
|
def get_audio_frames_at(
|
128
131
|
self,
|
129
132
|
t: Union[int, float, Fraction]
|
@@ -131,7 +134,7 @@ class VideoOnTrack:
|
|
131
134
|
"""
|
132
135
|
Get the audio frames that must be played at
|
133
136
|
the 't' time moment provided, that could be
|
134
|
-
None if the
|
137
|
+
None if the audio is not playing at that
|
135
138
|
moment.
|
136
139
|
|
137
140
|
This method will return None if no audio
|
@@ -141,7 +144,7 @@ class VideoOnTrack:
|
|
141
144
|
# TODO: Use 'T' here to be precise or the
|
142
145
|
# argument itself must be precise (?)
|
143
146
|
frames = (
|
144
|
-
self.
|
147
|
+
self.media.get_audio_frames_from_t(self._get_t(t))
|
145
148
|
if self.is_playing(t) else
|
146
149
|
[]
|
147
150
|
)
|
@@ -149,9 +152,71 @@ class VideoOnTrack:
|
|
149
152
|
for frame in frames:
|
150
153
|
yield frame
|
151
154
|
|
152
|
-
|
153
|
-
|
154
|
-
|
155
|
-
|
156
|
-
|
157
|
-
|
155
|
+
class _MediaOnTrackWithVideo(_MediaOnTrack):
|
156
|
+
"""
|
157
|
+
Class that implements the ability of
|
158
|
+
getting video frames. This class must
|
159
|
+
be inherited by any other class that
|
160
|
+
has this same ability.
|
161
|
+
"""
|
162
|
+
|
163
|
+
def __init__(
|
164
|
+
self,
|
165
|
+
media: Video,
|
166
|
+
start: Union[int, float, Fraction] = 0.0
|
167
|
+
):
|
168
|
+
super().__init__(
|
169
|
+
media = media,
|
170
|
+
start = start
|
171
|
+
)
|
172
|
+
|
173
|
+
def get_frame_at(
|
174
|
+
self,
|
175
|
+
t: Union[int, float, Fraction]
|
176
|
+
) -> Union[VideoFrame, None]:
|
177
|
+
"""
|
178
|
+
Get the frame for the 't' time moment provided,
|
179
|
+
that could be None if the video is not playing
|
180
|
+
in that moment.
|
181
|
+
"""
|
182
|
+
# TODO: Use 'T' here to be precise or the
|
183
|
+
# argument itself must be precise (?)
|
184
|
+
return (
|
185
|
+
self.media.get_frame_from_t(self._get_t(t))
|
186
|
+
if self.is_playing(t) else
|
187
|
+
None
|
188
|
+
)
|
189
|
+
|
190
|
+
class AudioOnTrack(_MediaOnTrackWithAudio):
|
191
|
+
"""
|
192
|
+
A video in the timeline.
|
193
|
+
"""
|
194
|
+
|
195
|
+
def __init__(
|
196
|
+
self,
|
197
|
+
media: Audio,
|
198
|
+
start: Union[int, float, Fraction] = 0.0
|
199
|
+
):
|
200
|
+
ParameterValidator.validate_mandatory_instance_of('media', media, Audio)
|
201
|
+
|
202
|
+
super().__init__(
|
203
|
+
media = media,
|
204
|
+
start = start
|
205
|
+
)
|
206
|
+
|
207
|
+
class VideoOnTrack(_MediaOnTrackWithAudio, _MediaOnTrackWithVideo):
|
208
|
+
"""
|
209
|
+
A video in the timeline.
|
210
|
+
"""
|
211
|
+
|
212
|
+
def __init__(
|
213
|
+
self,
|
214
|
+
media: Video,
|
215
|
+
start: Union[int, float, Fraction] = 0.0
|
216
|
+
):
|
217
|
+
ParameterValidator.validate_mandatory_instance_of('media', media, Video)
|
218
|
+
|
219
|
+
super().__init__(
|
220
|
+
media = media,
|
221
|
+
start = start
|
222
|
+
)
|
@@ -0,0 +1,230 @@
|
|
1
|
+
from yta_video_opengl.complete.track.media import AudioOnTrack, VideoOnTrack
|
2
|
+
from yta_video_opengl.complete.track.utils import generate_silent_frames
|
3
|
+
from yta_video_opengl.complete.frame_wrapper import AudioFrameWrapped
|
4
|
+
from yta_video_opengl.complete.frame_wrapper import VideoFrameWrapped
|
5
|
+
from yta_video_opengl.complete.frame_generator import VideoFrameGenerator, AudioFrameGenerator
|
6
|
+
from yta_video_opengl.t import fps_to_time_base
|
7
|
+
from yta_validation.parameter import ParameterValidator
|
8
|
+
from quicktions import Fraction
|
9
|
+
from typing import Union
|
10
|
+
from abc import ABC
|
11
|
+
|
12
|
+
|
13
|
+
NON_LIMITED_EMPTY_PART_END = 999
|
14
|
+
"""
|
15
|
+
A value to indicate that the empty part
|
16
|
+
has no end because it is in the last
|
17
|
+
position and there is no video after it.
|
18
|
+
"""
|
19
|
+
|
20
|
+
class _Part(ABC):
|
21
|
+
"""
|
22
|
+
Abstract class to represent an element
|
23
|
+
that is on the track, that can be an
|
24
|
+
empty space or a vide or audio. This
|
25
|
+
class must be inherited by our own
|
26
|
+
custom part classes.
|
27
|
+
"""
|
28
|
+
|
29
|
+
@property
|
30
|
+
def is_empty_part(
|
31
|
+
self
|
32
|
+
) -> bool:
|
33
|
+
"""
|
34
|
+
Flag to indicate if the part is an empty part,
|
35
|
+
which means that there is no media associated
|
36
|
+
but an empty space.
|
37
|
+
"""
|
38
|
+
return self.media is None
|
39
|
+
|
40
|
+
def __init__(
|
41
|
+
self,
|
42
|
+
track: Union['AudioTrack', 'VideoTrack'],
|
43
|
+
start: Union[int, float, Fraction],
|
44
|
+
end: Union[int, float, Fraction],
|
45
|
+
media: Union[AudioOnTrack, VideoOnTrack, None] = None
|
46
|
+
):
|
47
|
+
ParameterValidator.validate_mandatory_positive_number('start', start, do_include_zero = True)
|
48
|
+
ParameterValidator.validate_mandatory_positive_number('end', end, do_include_zero = False)
|
49
|
+
ParameterValidator.validate_instance_of('audimediao', media, [AudioOnTrack, VideoOnTrack])
|
50
|
+
|
51
|
+
self._track: Union['AudioTrack', 'VideoTrack'] = track
|
52
|
+
"""
|
53
|
+
The instance of the track this part belongs
|
54
|
+
to.
|
55
|
+
"""
|
56
|
+
self.start: Fraction = Fraction(start)
|
57
|
+
"""
|
58
|
+
The start 't' time moment of the part.
|
59
|
+
"""
|
60
|
+
self.end: Fraction = Fraction(end)
|
61
|
+
"""
|
62
|
+
The end 't' time moment of the part.
|
63
|
+
"""
|
64
|
+
self.media: Union[AudioOnTrack, VideoOnTrack, None] = media
|
65
|
+
"""
|
66
|
+
The media associated, if existing, or
|
67
|
+
None if it is an empty space that we need
|
68
|
+
to fulfill.
|
69
|
+
"""
|
70
|
+
|
71
|
+
class _AudioPart(_Part):
|
72
|
+
"""
|
73
|
+
Class to represent an element that is on the
|
74
|
+
track, that can be an empty space or an audio.
|
75
|
+
"""
|
76
|
+
|
77
|
+
def __init__(
|
78
|
+
self,
|
79
|
+
track: 'AudioTrack',
|
80
|
+
start: Union[int, float, Fraction],
|
81
|
+
end: Union[int, float, Fraction],
|
82
|
+
media: Union[AudioOnTrack, None] = None
|
83
|
+
):
|
84
|
+
ParameterValidator.validate_instance_of('media', media, AudioOnTrack)
|
85
|
+
|
86
|
+
super().__init__(
|
87
|
+
track = track,
|
88
|
+
start = start,
|
89
|
+
end = end,
|
90
|
+
media = media
|
91
|
+
)
|
92
|
+
|
93
|
+
# TODO: Can I refactor this below (?)
|
94
|
+
self._audio_frame_generator: AudioFrameGenerator = AudioFrameGenerator()
|
95
|
+
"""
|
96
|
+
Useful internal tool to generate silent
|
97
|
+
audio frames for the empty parts.
|
98
|
+
"""
|
99
|
+
|
100
|
+
def get_audio_frames_at(
|
101
|
+
self,
|
102
|
+
t: Union[int, float, Fraction]
|
103
|
+
):
|
104
|
+
"""
|
105
|
+
Iterate over all the audio frames that
|
106
|
+
exist at the time moment 't' provided.
|
107
|
+
"""
|
108
|
+
if not self.is_empty_part:
|
109
|
+
for frame in self.media.get_audio_frames_at(t):
|
110
|
+
yield AudioFrameWrapped(
|
111
|
+
frame = frame,
|
112
|
+
is_from_empty_part = False
|
113
|
+
)
|
114
|
+
else:
|
115
|
+
frames = generate_silent_frames(
|
116
|
+
fps = self._track.fps,
|
117
|
+
audio_fps = self._track.audio_fps,
|
118
|
+
audio_samples_per_frame = self._track.audio_samples_per_frame,
|
119
|
+
# TODO: Where do this 2 formats come from (?)
|
120
|
+
layout = self._track.audio_layout,
|
121
|
+
format = self._track.audio_format
|
122
|
+
)
|
123
|
+
|
124
|
+
for frame in frames:
|
125
|
+
yield frame
|
126
|
+
|
127
|
+
class _VideoPart(_Part):
|
128
|
+
"""
|
129
|
+
Class to represent an element that is on the
|
130
|
+
track, that can be an empty space or a video.
|
131
|
+
"""
|
132
|
+
|
133
|
+
def __init__(
|
134
|
+
self,
|
135
|
+
track: 'VideoTrack',
|
136
|
+
start: Union[int, float, Fraction],
|
137
|
+
end: Union[int, float, Fraction],
|
138
|
+
media: Union[VideoOnTrack, None] = None
|
139
|
+
):
|
140
|
+
ParameterValidator.validate_instance_of('media', media, VideoOnTrack)
|
141
|
+
|
142
|
+
super().__init__(
|
143
|
+
track = track,
|
144
|
+
start = start,
|
145
|
+
end = end,
|
146
|
+
media = media
|
147
|
+
)
|
148
|
+
|
149
|
+
# TODO: Can I refactor this below (?)
|
150
|
+
self._video_frame_generator: VideoFrameGenerator = VideoFrameGenerator()
|
151
|
+
"""
|
152
|
+
Useful internal tool to generate background
|
153
|
+
frames for the empty parts.
|
154
|
+
"""
|
155
|
+
# TODO: Can I refactor this below (?)
|
156
|
+
self._audio_frame_generator: AudioFrameGenerator = AudioFrameGenerator()
|
157
|
+
"""
|
158
|
+
Useful internal tool to generate silent
|
159
|
+
audio frames for the empty parts.
|
160
|
+
"""
|
161
|
+
|
162
|
+
def get_frame_at(
|
163
|
+
self,
|
164
|
+
t: Union[int, float, Fraction]
|
165
|
+
) -> 'VideoFrameWrapped':
|
166
|
+
"""
|
167
|
+
Get the frame that must be displayed at
|
168
|
+
the given 't' time moment.
|
169
|
+
"""
|
170
|
+
frame = (
|
171
|
+
# TODO: What about the 'format' (?)
|
172
|
+
# TODO: Maybe I shouldn't set the 'time_base'
|
173
|
+
# here and do it just in the Timeline 'render'
|
174
|
+
#return get_black_background_video_frame(self._track.size)
|
175
|
+
# TODO: This 'time_base' maybe has to be related
|
176
|
+
# to a Timeline general 'time_base' and not the fps
|
177
|
+
VideoFrameWrapped(
|
178
|
+
frame = self._video_frame_generator.background.full_black(
|
179
|
+
size = self._track.size,
|
180
|
+
time_base = fps_to_time_base(self._track.fps)
|
181
|
+
),
|
182
|
+
is_from_empty_part = True
|
183
|
+
)
|
184
|
+
if self.is_empty_part else
|
185
|
+
VideoFrameWrapped(
|
186
|
+
frame = self.media.get_frame_at(t),
|
187
|
+
is_from_empty_part = False
|
188
|
+
)
|
189
|
+
)
|
190
|
+
|
191
|
+
# TODO: This should not happen because of
|
192
|
+
# the way we handle the videos here but the
|
193
|
+
# video could send us a None frame here, so
|
194
|
+
# do we raise exception (?)
|
195
|
+
if frame._frame is None:
|
196
|
+
#frame = get_black_background_video_frame(self._track.size)
|
197
|
+
# TODO: By now I'm raising exception to check if
|
198
|
+
# this happens or not because I think it would
|
199
|
+
# be malfunctioning
|
200
|
+
raise Exception(f'Video is returning None video frame at t={str(t)}.')
|
201
|
+
|
202
|
+
return frame
|
203
|
+
|
204
|
+
def get_audio_frames_at(
|
205
|
+
self,
|
206
|
+
t: Union[int, float, Fraction]
|
207
|
+
):
|
208
|
+
"""
|
209
|
+
Iterate over all the audio frames that
|
210
|
+
exist at the time moment 't' provided.
|
211
|
+
"""
|
212
|
+
if not self.is_empty_part:
|
213
|
+
for frame in self.media.get_audio_frames_at(t):
|
214
|
+
yield AudioFrameWrapped(
|
215
|
+
frame = frame,
|
216
|
+
is_from_empty_part = False
|
217
|
+
)
|
218
|
+
else:
|
219
|
+
frames = generate_silent_frames(
|
220
|
+
fps = self._track.fps,
|
221
|
+
audio_fps = self._track.audio_fps,
|
222
|
+
audio_samples_per_frame = self._track.audio_samples_per_frame,
|
223
|
+
# TODO: Where do this 2 formats come from (?)
|
224
|
+
layout = self._track.audio_layout,
|
225
|
+
format = self._track.audio_format
|
226
|
+
)
|
227
|
+
|
228
|
+
for frame in frames:
|
229
|
+
yield frame
|
230
|
+
|
@@ -0,0 +1,78 @@
|
|
1
|
+
from yta_video_opengl.complete.frame_wrapper import AudioFrameWrapped
|
2
|
+
from yta_video_opengl.complete.frame_generator import AudioFrameGenerator
|
3
|
+
from yta_video_opengl.utils import audio_frames_and_remainder_per_video_frame
|
4
|
+
|
5
|
+
|
6
|
+
# TODO: Is this method here ok (?)
|
7
|
+
def generate_silent_frames(
|
8
|
+
fps: int,
|
9
|
+
audio_fps: int,
|
10
|
+
audio_samples_per_frame: int,
|
11
|
+
layout: str = 'stereo',
|
12
|
+
format: str = 'fltp'
|
13
|
+
) -> list[AudioFrameWrapped]:
|
14
|
+
"""
|
15
|
+
Get the audio silent frames we need for
|
16
|
+
a video with the given 'fps', 'audio_fps'
|
17
|
+
and 'audio_samples_per_frame', using the
|
18
|
+
also provided 'layout' and 'format' for
|
19
|
+
the audio frames.
|
20
|
+
|
21
|
+
This method is used when we have empty
|
22
|
+
parts on our tracks and we need to
|
23
|
+
provide the frames, that are passed as
|
24
|
+
AudioFrameWrapped instances and tagged as
|
25
|
+
coming from empty parts.
|
26
|
+
"""
|
27
|
+
audio_frame_generator: AudioFrameGenerator = AudioFrameGenerator()
|
28
|
+
|
29
|
+
# Check how many full and partial silent
|
30
|
+
# audio frames we need
|
31
|
+
number_of_frames, number_of_remaining_samples = audio_frames_and_remainder_per_video_frame(
|
32
|
+
video_fps = fps,
|
33
|
+
sample_rate = audio_fps,
|
34
|
+
number_of_samples_per_audio_frame = audio_samples_per_frame
|
35
|
+
)
|
36
|
+
|
37
|
+
# The complete silent frames we need
|
38
|
+
silent_frame = audio_frame_generator.silent(
|
39
|
+
sample_rate = audio_fps,
|
40
|
+
layout = layout,
|
41
|
+
number_of_samples = audio_samples_per_frame,
|
42
|
+
format = format,
|
43
|
+
pts = None,
|
44
|
+
time_base = None
|
45
|
+
)
|
46
|
+
|
47
|
+
frames = (
|
48
|
+
[
|
49
|
+
AudioFrameWrapped(
|
50
|
+
frame = silent_frame,
|
51
|
+
is_from_empty_part = True
|
52
|
+
)
|
53
|
+
] * number_of_frames
|
54
|
+
if number_of_frames > 0 else
|
55
|
+
[]
|
56
|
+
)
|
57
|
+
|
58
|
+
# The remaining partial silent frames we need
|
59
|
+
if number_of_remaining_samples > 0:
|
60
|
+
silent_frame = audio_frame_generator.silent(
|
61
|
+
sample_rate = audio_fps,
|
62
|
+
# TODO: Check where do we get this value from
|
63
|
+
layout = layout,
|
64
|
+
number_of_samples = number_of_remaining_samples,
|
65
|
+
# TODO: Check where do we get this value from
|
66
|
+
format = format,
|
67
|
+
pts = None,
|
68
|
+
time_base = None
|
69
|
+
)
|
70
|
+
|
71
|
+
frames.append(
|
72
|
+
AudioFrameWrapped(
|
73
|
+
frame = silent_frame,
|
74
|
+
is_from_empty_part = True
|
75
|
+
)
|
76
|
+
)
|
77
|
+
|
78
|
+
return frames
|
@@ -696,25 +696,6 @@ class VideoReader:
|
|
696
696
|
self.container.close()
|
697
697
|
|
698
698
|
|
699
|
-
# TODO: I think I'm not using this...
|
700
|
-
# Remove it please
|
701
|
-
def audio_ts_for_video_t(
|
702
|
-
t: float,
|
703
|
-
video_fps: float,
|
704
|
-
audio_fps: float
|
705
|
-
):
|
706
|
-
# Remember, from [t_start, t_end), the last one
|
707
|
-
# is not included
|
708
|
-
audio_t_start = int(t * audio_fps)
|
709
|
-
audio_t_end = int((t + 1.0 / video_fps) * audio_fps)
|
710
|
-
|
711
|
-
return [
|
712
|
-
i / audio_fps
|
713
|
-
for i in range(audio_t_start, audio_t_end)
|
714
|
-
]
|
715
|
-
|
716
|
-
|
717
|
-
|
718
699
|
"""
|
719
700
|
When reading packets directly from the stream
|
720
701
|
we can receive packets with size=0, but we need
|
yta_video_opengl/tests.py
CHANGED
@@ -601,15 +601,25 @@ def video_modified_stored():
|
|
601
601
|
# must be played at the same time
|
602
602
|
video = Video(VIDEO_PATH, 0.25, 0.75)
|
603
603
|
timeline = Timeline()
|
604
|
-
|
605
|
-
|
606
|
-
|
607
|
-
|
604
|
+
|
605
|
+
transitions_30fps = 'C:/Users/dania/Downloads/Y2meta.app-10 Smooth Transitions Green Screen Template For Kinemaster, Alight Motion, Filmora, premiere pro-(1080p).mp4'
|
606
|
+
simpsons_60fps = 'C:/Users/dania/Downloads/Y_una_porra_los_simpsons_castellano_60fps.mp4'
|
607
|
+
|
608
|
+
# Track 1
|
609
|
+
timeline.add_video(Video(VIDEO_PATH, 0.25, 1.0), 0.75, track_index = 0)
|
610
|
+
timeline.add_video(Video(simpsons_60fps, 1.5, 2.0), 3.0, track_index = 0)
|
611
|
+
timeline.add_video(Video(VIDEO_PATH, 0.5, 1.0), 2.0, track_index = 0)
|
612
|
+
|
613
|
+
timeline.tracks[0].mute()
|
614
|
+
|
615
|
+
# Track 2
|
616
|
+
timeline.add_video(Video(VIDEO_PATH, 0.5, 1.0), 2.7, track_index = 1)
|
617
|
+
timeline.add_video(Video(simpsons_60fps, 5.8, 7.8), 0.6, track_index = 1)
|
608
618
|
# 30fps
|
609
619
|
# timeline.add_video(Video('C:/Users/dania/Downloads/Y2meta.app-TOP 12 SIMPLE LIQUID TRANSITION _ GREEN SCREEN TRANSITION PACK-(1080p60).mp4', 0.25, 1.5), 0.25, do_use_second_track = True)
|
610
620
|
# 29.97fps
|
611
621
|
# timeline.add_video(Video('C:/Users/dania/Downloads/Y_una_porra_los_simpsons_castellano.mp4', 5.8, 6.8), 3.6, do_use_second_track = True)
|
612
|
-
|
622
|
+
|
613
623
|
timeline.render(OUTPUT_PATH)
|
614
624
|
|
615
625
|
return
|
yta_video_opengl/video.py
CHANGED
@@ -9,7 +9,8 @@ from typing import Union
|
|
9
9
|
# TODO: Where can I obtain this dynamically (?)
|
10
10
|
PIXEL_FORMAT = 'yuv420p'
|
11
11
|
|
12
|
-
# TODO: Maybe
|
12
|
+
# TODO: Maybe create a _Media(ABC) to put
|
13
|
+
# some code shared with the Audio class
|
13
14
|
class Video:
|
14
15
|
"""
|
15
16
|
Class to wrap the functionality related to
|
@@ -107,11 +108,6 @@ class Video:
|
|
107
108
|
The iterator will iterate first over the
|
108
109
|
video frames, and once finished over the
|
109
110
|
audio frames.
|
110
|
-
|
111
|
-
This method returns a tuple of 3 elements:
|
112
|
-
- `frame` as a `VideoFrame` instance
|
113
|
-
- `t` as the frame time moment
|
114
|
-
- `index` as the frame index
|
115
111
|
"""
|
116
112
|
for frame in self.reader.get_frames(self.start, self.end):
|
117
113
|
yield frame
|
@@ -155,7 +151,7 @@ class Video:
|
|
155
151
|
# methods because this Video can be subclipped
|
156
152
|
# and have a 'start' and' end' that are
|
157
153
|
# different from [0, end)
|
158
|
-
def
|
154
|
+
def _get_t(
|
159
155
|
self,
|
160
156
|
t: Union[int, float, Fraction]
|
161
157
|
) -> Fraction:
|
@@ -183,9 +179,9 @@ class Video:
|
|
183
179
|
Get the video frame with the given 't' time
|
184
180
|
moment, using the video cache system.
|
185
181
|
"""
|
186
|
-
print(f'Getting frame from {str(float(t))} that is actually {str(float(self.
|
187
|
-
return self.reader.get_frame(self.
|
188
|
-
#return self.reader.video_cache.get_frame(self.
|
182
|
+
print(f'Getting frame from {str(float(t))} that is actually {str(float(self._get_t(t)))}')
|
183
|
+
return self.reader.get_frame(self._get_t(t))
|
184
|
+
#return self.reader.video_cache.get_frame(self._get_t(t))
|
189
185
|
|
190
186
|
def get_audio_frame_from_t(
|
191
187
|
self,
|
@@ -200,7 +196,7 @@ class Video:
|
|
200
196
|
|
201
197
|
TODO: Is this actually necessary (?)
|
202
198
|
"""
|
203
|
-
return self.reader.get_audio_frame_from_t(self.
|
199
|
+
return self.reader.get_audio_frame_from_t(self._get_t(t))
|
204
200
|
|
205
201
|
def get_audio_frames_from_t(
|
206
202
|
self,
|
@@ -217,8 +213,8 @@ class Video:
|
|
217
213
|
(remember that a video frame is associated
|
218
214
|
with more than 1 audio frame).
|
219
215
|
"""
|
220
|
-
print(f'Getting audio frames from {str(float(t))} that is actually {str(float(self.
|
221
|
-
for frame in self.reader.get_audio_frames_from_t(self.
|
216
|
+
print(f'Getting audio frames from {str(float(t))} that is actually {str(float(self._get_t(t)))}')
|
217
|
+
for frame in self.reader.get_audio_frames_from_t(self._get_t(t)):
|
222
218
|
yield frame
|
223
219
|
|
224
220
|
def save_as(
|