yta-video-opengl 0.0.19__py3-none-any.whl → 0.0.21__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- yta_video_opengl/audio.py +219 -0
- yta_video_opengl/complete/frame_combinator.py +1 -90
- yta_video_opengl/complete/frame_generator.py +40 -0
- yta_video_opengl/complete/frame_wrapper.py +13 -0
- yta_video_opengl/complete/timeline.py +200 -116
- yta_video_opengl/complete/track/__init__.py +500 -0
- yta_video_opengl/complete/{video_on_track.py → track/media/__init__.py} +112 -47
- yta_video_opengl/complete/track/parts.py +267 -0
- yta_video_opengl/complete/track/utils.py +78 -0
- yta_video_opengl/reader/__init__.py +0 -19
- yta_video_opengl/reader/cache/__init__.py +9 -5
- yta_video_opengl/reader/cache/utils.py +1 -1
- yta_video_opengl/tests.py +29 -1
- yta_video_opengl/video.py +9 -13
- {yta_video_opengl-0.0.19.dist-info → yta_video_opengl-0.0.21.dist-info}/METADATA +1 -1
- yta_video_opengl-0.0.21.dist-info/RECORD +30 -0
- yta_video_opengl/complete/track.py +0 -562
- yta_video_opengl-0.0.19.dist-info/RECORD +0 -27
- {yta_video_opengl-0.0.19.dist-info → yta_video_opengl-0.0.21.dist-info}/LICENSE +0 -0
- {yta_video_opengl-0.0.19.dist-info → yta_video_opengl-0.0.21.dist-info}/WHEEL +0 -0
@@ -13,17 +13,21 @@ at `t=2` and it has been playing during 1s
|
|
13
13
|
it started at `t=2`, lasting 2s, so it
|
14
14
|
finished at `t=4`
|
15
15
|
"""
|
16
|
+
from yta_video_opengl.audio import Audio
|
16
17
|
from yta_video_opengl.video import Video
|
17
18
|
from yta_validation.parameter import ParameterValidator
|
18
|
-
from av.video.frame import VideoFrame
|
19
19
|
from av.audio.frame import AudioFrame
|
20
|
+
from av.video.frame import VideoFrame
|
20
21
|
from quicktions import Fraction
|
21
22
|
from typing import Union
|
23
|
+
from abc import ABC
|
22
24
|
|
23
25
|
|
24
|
-
class
|
26
|
+
class _MediaOnTrack(ABC):
|
25
27
|
"""
|
26
|
-
|
28
|
+
Class to be inherited by any media class
|
29
|
+
that will be placed on a track and should
|
30
|
+
manage this condition.
|
27
31
|
"""
|
28
32
|
|
29
33
|
@property
|
@@ -31,50 +35,49 @@ class VideoOnTrack:
|
|
31
35
|
self
|
32
36
|
) -> Fraction:
|
33
37
|
"""
|
34
|
-
The end time moment 't' of the
|
38
|
+
The end time moment 't' of the audio once
|
35
39
|
once its been placed on the track, which
|
36
|
-
is affected by the
|
40
|
+
is affected by the audio duration and its
|
37
41
|
start time moment on the track.
|
38
42
|
|
39
|
-
This end is different from the
|
43
|
+
This end is different from the audio end.
|
40
44
|
"""
|
41
|
-
return self.start + self.
|
45
|
+
return self.start + self.media.duration
|
42
46
|
|
43
47
|
def __init__(
|
44
48
|
self,
|
45
|
-
|
49
|
+
media: Union[Audio, Video],
|
46
50
|
start: Union[int, float, Fraction] = 0.0
|
47
51
|
):
|
48
|
-
ParameterValidator.validate_mandatory_instance_of('
|
52
|
+
ParameterValidator.validate_mandatory_instance_of('media', media, [Audio, Video])
|
49
53
|
ParameterValidator.validate_mandatory_positive_number('start', start, do_include_zero = True)
|
50
54
|
|
51
|
-
self.
|
55
|
+
self.media: Union[Audio, Video] = media
|
52
56
|
"""
|
53
|
-
The
|
57
|
+
The media source, with all its properties,
|
54
58
|
that is placed in the timeline.
|
55
59
|
"""
|
56
60
|
self.start: Fraction = Fraction(start)
|
57
61
|
"""
|
58
|
-
The time moment in which the
|
62
|
+
The time moment in which the media should
|
59
63
|
start playing, within the timeline.
|
60
64
|
|
61
65
|
This is the time respect to the timeline
|
62
|
-
and its different from the
|
66
|
+
and its different from the media `start`
|
63
67
|
time, which is related to the file.
|
64
68
|
"""
|
65
69
|
|
66
|
-
def
|
70
|
+
def _get_t(
|
67
71
|
self,
|
68
72
|
t: Union[int, float, Fraction]
|
69
73
|
) -> float:
|
70
74
|
"""
|
71
|
-
The
|
75
|
+
The media 't' time moment for the given
|
72
76
|
global 't' time moment. This 't' is the one
|
73
|
-
to use inside the
|
77
|
+
to use inside the media content to display
|
74
78
|
its frame.
|
75
79
|
"""
|
76
|
-
# TODO:
|
77
|
-
# argument itself must be precise (?)
|
80
|
+
# TODO: Should we make sure 't' is truncated (?)
|
78
81
|
return t - self.start
|
79
82
|
|
80
83
|
def is_playing(
|
@@ -82,48 +85,48 @@ class VideoOnTrack:
|
|
82
85
|
t: Union[int, float, Fraction]
|
83
86
|
) -> bool:
|
84
87
|
"""
|
85
|
-
Check if this
|
88
|
+
Check if this media is playing at the general
|
86
89
|
't' time moment, which is a global time moment
|
87
90
|
for the whole project.
|
88
91
|
"""
|
89
|
-
# TODO:
|
90
|
-
# argument itself must be precise (?)
|
92
|
+
# TODO: Should we make sure 't' is truncated (?)
|
91
93
|
return self.start <= t < self.end
|
94
|
+
|
95
|
+
class _MediaOnTrackWithAudio(_MediaOnTrack):
|
96
|
+
"""
|
97
|
+
Class that implements the ability of
|
98
|
+
getting audio frames. This class must
|
99
|
+
be inherited by any other class that
|
100
|
+
has this same ability.
|
101
|
+
"""
|
92
102
|
|
93
|
-
def
|
103
|
+
def __init__(
|
94
104
|
self,
|
95
|
-
|
96
|
-
|
97
|
-
|
98
|
-
|
99
|
-
|
100
|
-
|
101
|
-
"""
|
102
|
-
# TODO: Use 'T' here to be precise or the
|
103
|
-
# argument itself must be precise (?)
|
104
|
-
return (
|
105
|
-
self.video.get_frame_from_t(self._get_video_t(t))
|
106
|
-
if self.is_playing(t) else
|
107
|
-
None
|
105
|
+
media: Union[Audio, Video],
|
106
|
+
start: Union[int, float, Fraction] = 0.0
|
107
|
+
):
|
108
|
+
super().__init__(
|
109
|
+
media = media,
|
110
|
+
start = start
|
108
111
|
)
|
109
|
-
|
112
|
+
|
110
113
|
def get_audio_frame_at(
|
111
114
|
self,
|
112
115
|
t: Union[int, float, Fraction]
|
113
116
|
) -> Union[AudioFrame, None]:
|
114
117
|
"""
|
115
118
|
Get the audio frame for the 't' time moment
|
116
|
-
provided, that could be None if the
|
119
|
+
provided, that could be None if the media
|
117
120
|
is not playing in that moment.
|
118
121
|
"""
|
119
122
|
# TODO: Use 'T' here to be precise or the
|
120
123
|
# argument itself must be precise (?)
|
121
124
|
return (
|
122
|
-
self.
|
125
|
+
self.media.get_audio_frame_from_t(self._get_t(t))
|
123
126
|
if self.is_playing(t) else
|
124
127
|
None
|
125
128
|
)
|
126
|
-
|
129
|
+
|
127
130
|
def get_audio_frames_at(
|
128
131
|
self,
|
129
132
|
t: Union[int, float, Fraction]
|
@@ -131,7 +134,7 @@ class VideoOnTrack:
|
|
131
134
|
"""
|
132
135
|
Get the audio frames that must be played at
|
133
136
|
the 't' time moment provided, that could be
|
134
|
-
None if the
|
137
|
+
None if the audio is not playing at that
|
135
138
|
moment.
|
136
139
|
|
137
140
|
This method will return None if no audio
|
@@ -141,7 +144,7 @@ class VideoOnTrack:
|
|
141
144
|
# TODO: Use 'T' here to be precise or the
|
142
145
|
# argument itself must be precise (?)
|
143
146
|
frames = (
|
144
|
-
self.
|
147
|
+
self.media.get_audio_frames_from_t(self._get_t(t))
|
145
148
|
if self.is_playing(t) else
|
146
149
|
[]
|
147
150
|
)
|
@@ -149,9 +152,71 @@ class VideoOnTrack:
|
|
149
152
|
for frame in frames:
|
150
153
|
yield frame
|
151
154
|
|
152
|
-
|
153
|
-
|
154
|
-
|
155
|
-
|
156
|
-
|
157
|
-
|
155
|
+
class _MediaOnTrackWithVideo(_MediaOnTrack):
|
156
|
+
"""
|
157
|
+
Class that implements the ability of
|
158
|
+
getting video frames. This class must
|
159
|
+
be inherited by any other class that
|
160
|
+
has this same ability.
|
161
|
+
"""
|
162
|
+
|
163
|
+
def __init__(
|
164
|
+
self,
|
165
|
+
media: Video,
|
166
|
+
start: Union[int, float, Fraction] = 0.0
|
167
|
+
):
|
168
|
+
super().__init__(
|
169
|
+
media = media,
|
170
|
+
start = start
|
171
|
+
)
|
172
|
+
|
173
|
+
def get_frame_at(
|
174
|
+
self,
|
175
|
+
t: Union[int, float, Fraction]
|
176
|
+
) -> Union[VideoFrame, None]:
|
177
|
+
"""
|
178
|
+
Get the frame for the 't' time moment provided,
|
179
|
+
that could be None if the video is not playing
|
180
|
+
in that moment.
|
181
|
+
"""
|
182
|
+
# TODO: Use 'T' here to be precise or the
|
183
|
+
# argument itself must be precise (?)
|
184
|
+
return (
|
185
|
+
self.media.get_frame_from_t(self._get_t(t))
|
186
|
+
if self.is_playing(t) else
|
187
|
+
None
|
188
|
+
)
|
189
|
+
|
190
|
+
class AudioOnTrack(_MediaOnTrackWithAudio):
|
191
|
+
"""
|
192
|
+
A video in the timeline.
|
193
|
+
"""
|
194
|
+
|
195
|
+
def __init__(
|
196
|
+
self,
|
197
|
+
media: Audio,
|
198
|
+
start: Union[int, float, Fraction] = 0.0
|
199
|
+
):
|
200
|
+
ParameterValidator.validate_mandatory_instance_of('media', media, Audio)
|
201
|
+
|
202
|
+
super().__init__(
|
203
|
+
media = media,
|
204
|
+
start = start
|
205
|
+
)
|
206
|
+
|
207
|
+
class VideoOnTrack(_MediaOnTrackWithAudio, _MediaOnTrackWithVideo):
|
208
|
+
"""
|
209
|
+
A video in the timeline.
|
210
|
+
"""
|
211
|
+
|
212
|
+
def __init__(
|
213
|
+
self,
|
214
|
+
media: Video,
|
215
|
+
start: Union[int, float, Fraction] = 0.0
|
216
|
+
):
|
217
|
+
ParameterValidator.validate_mandatory_instance_of('media', media, Video)
|
218
|
+
|
219
|
+
super().__init__(
|
220
|
+
media = media,
|
221
|
+
start = start
|
222
|
+
)
|
@@ -0,0 +1,267 @@
|
|
1
|
+
from yta_video_opengl.complete.track.media import AudioOnTrack, VideoOnTrack
|
2
|
+
from yta_video_opengl.complete.track.utils import generate_silent_frames
|
3
|
+
from yta_video_opengl.complete.frame_wrapper import AudioFrameWrapped
|
4
|
+
from yta_video_opengl.complete.frame_wrapper import VideoFrameWrapped
|
5
|
+
from yta_video_opengl.complete.frame_generator import VideoFrameGenerator, AudioFrameGenerator
|
6
|
+
from yta_video_opengl.t import fps_to_time_base
|
7
|
+
from yta_validation.parameter import ParameterValidator
|
8
|
+
from quicktions import Fraction
|
9
|
+
from typing import Union
|
10
|
+
from abc import ABC
|
11
|
+
|
12
|
+
|
13
|
+
NON_LIMITED_EMPTY_PART_END = 999
|
14
|
+
"""
|
15
|
+
A value to indicate that the empty part
|
16
|
+
has no end because it is in the last
|
17
|
+
position and there is no video after it.
|
18
|
+
"""
|
19
|
+
|
20
|
+
class _Part(ABC):
|
21
|
+
"""
|
22
|
+
Abstract class to represent an element
|
23
|
+
that is on the track, that can be an
|
24
|
+
empty space or a vide or audio. This
|
25
|
+
class must be inherited by our own
|
26
|
+
custom part classes.
|
27
|
+
"""
|
28
|
+
|
29
|
+
@property
|
30
|
+
def is_empty_part(
|
31
|
+
self
|
32
|
+
) -> bool:
|
33
|
+
"""
|
34
|
+
Flag to indicate if the part is an empty part,
|
35
|
+
which means that there is no media associated
|
36
|
+
but an empty space.
|
37
|
+
"""
|
38
|
+
return self.media is None
|
39
|
+
|
40
|
+
def __init__(
|
41
|
+
self,
|
42
|
+
track: Union['AudioTrack', 'VideoTrack'],
|
43
|
+
start: Union[int, float, Fraction],
|
44
|
+
end: Union[int, float, Fraction],
|
45
|
+
media: Union[AudioOnTrack, VideoOnTrack, None] = None
|
46
|
+
):
|
47
|
+
ParameterValidator.validate_mandatory_positive_number('start', start, do_include_zero = True)
|
48
|
+
ParameterValidator.validate_mandatory_positive_number('end', end, do_include_zero = False)
|
49
|
+
ParameterValidator.validate_instance_of('audimediao', media, [AudioOnTrack, VideoOnTrack])
|
50
|
+
|
51
|
+
self._track: Union['AudioTrack', 'VideoTrack'] = track
|
52
|
+
"""
|
53
|
+
The instance of the track this part belongs
|
54
|
+
to.
|
55
|
+
"""
|
56
|
+
self.start: Fraction = Fraction(start)
|
57
|
+
"""
|
58
|
+
The start 't' time moment of the part.
|
59
|
+
"""
|
60
|
+
self.end: Fraction = Fraction(end)
|
61
|
+
"""
|
62
|
+
The end 't' time moment of the part.
|
63
|
+
"""
|
64
|
+
self.media: Union[AudioOnTrack, VideoOnTrack, None] = media
|
65
|
+
"""
|
66
|
+
The media associated, if existing, or
|
67
|
+
None if it is an empty space that we need
|
68
|
+
to fulfill.
|
69
|
+
"""
|
70
|
+
|
71
|
+
class _PartWithAudio(_Part):
|
72
|
+
"""
|
73
|
+
TODO: Explain
|
74
|
+
"""
|
75
|
+
|
76
|
+
def __init__(
|
77
|
+
self,
|
78
|
+
track: Union['AudioTrack', 'VideoTrack'],
|
79
|
+
start: Union[int, float, Fraction],
|
80
|
+
end: Union[int, float, Fraction],
|
81
|
+
media: Union[AudioOnTrack, VideoOnTrack, None] = None
|
82
|
+
):
|
83
|
+
ParameterValidator.validate_instance_of('media', media, [AudioOnTrack, VideoOnTrack])
|
84
|
+
|
85
|
+
super().__init__(
|
86
|
+
track = track,
|
87
|
+
start = start,
|
88
|
+
end = end,
|
89
|
+
media = media
|
90
|
+
)
|
91
|
+
|
92
|
+
# TODO: Can I refactor this below (?)
|
93
|
+
self._audio_frame_generator: AudioFrameGenerator = AudioFrameGenerator()
|
94
|
+
"""
|
95
|
+
Useful internal tool to generate silent
|
96
|
+
audio frames for the empty parts.
|
97
|
+
"""
|
98
|
+
|
99
|
+
def get_audio_frames_at(
|
100
|
+
self,
|
101
|
+
t: Union[int, float, Fraction]
|
102
|
+
):
|
103
|
+
"""
|
104
|
+
Iterate over all the audio frames that
|
105
|
+
exist at the time moment 't' provided.
|
106
|
+
"""
|
107
|
+
frames = []
|
108
|
+
if not self.is_empty_part:
|
109
|
+
# TODO: What do we do in this case (?)
|
110
|
+
frames = list(self.media.get_audio_frames_at(t))
|
111
|
+
|
112
|
+
if len(frames) == 0:
|
113
|
+
print(f' [ERROR] Audio frame {str(float(t))} was not obtained')
|
114
|
+
else:
|
115
|
+
frames = [
|
116
|
+
AudioFrameWrapped(
|
117
|
+
frame = frame,
|
118
|
+
is_from_empty_part = False
|
119
|
+
)
|
120
|
+
for frame in frames
|
121
|
+
]
|
122
|
+
|
123
|
+
# This could be because is empty part or
|
124
|
+
# because we couldn't obtain the frames
|
125
|
+
if len(frames) == 0:
|
126
|
+
frames = generate_silent_frames(
|
127
|
+
fps = self._track.fps,
|
128
|
+
audio_fps = self._track.audio_fps,
|
129
|
+
audio_samples_per_frame = self._track.audio_samples_per_frame,
|
130
|
+
# TODO: Where do this 2 formats come from (?)
|
131
|
+
layout = self._track.audio_layout,
|
132
|
+
format = self._track.audio_format
|
133
|
+
)
|
134
|
+
|
135
|
+
for frame in frames:
|
136
|
+
yield frame
|
137
|
+
|
138
|
+
class _PartWithVideo(_Part):
|
139
|
+
"""
|
140
|
+
TODO: Explain
|
141
|
+
"""
|
142
|
+
|
143
|
+
def __init__(
|
144
|
+
self,
|
145
|
+
track: 'VideoTrack',
|
146
|
+
start: Union[int, float, Fraction],
|
147
|
+
end: Union[int, float, Fraction],
|
148
|
+
media: Union[VideoOnTrack, None] = None
|
149
|
+
):
|
150
|
+
ParameterValidator.validate_instance_of('media', media, VideoOnTrack)
|
151
|
+
|
152
|
+
super().__init__(
|
153
|
+
track = track,
|
154
|
+
start = start,
|
155
|
+
end = end,
|
156
|
+
media = media
|
157
|
+
)
|
158
|
+
|
159
|
+
# TODO: Can I refactor this below (?)
|
160
|
+
self._video_frame_generator: VideoFrameGenerator = VideoFrameGenerator()
|
161
|
+
"""
|
162
|
+
Useful internal tool to generate background
|
163
|
+
frames for the empty parts.
|
164
|
+
"""
|
165
|
+
|
166
|
+
def get_frame_at(
|
167
|
+
self,
|
168
|
+
t: Union[int, float, Fraction]
|
169
|
+
) -> 'VideoFrameWrapped':
|
170
|
+
"""
|
171
|
+
Get the frame that must be displayed at
|
172
|
+
the given 't' time moment.
|
173
|
+
"""
|
174
|
+
is_from_empty_part = False
|
175
|
+
if self.is_empty_part:
|
176
|
+
frame = self._video_frame_generator.background.full_black(
|
177
|
+
size = self._track.size,
|
178
|
+
time_base = fps_to_time_base(self._track.fps)
|
179
|
+
)
|
180
|
+
|
181
|
+
is_from_empty_part = True
|
182
|
+
else:
|
183
|
+
# TODO: This can be None, why? I don't know...
|
184
|
+
frame = self.media.get_frame_at(t)
|
185
|
+
|
186
|
+
if frame is None:
|
187
|
+
print(f' [ERROR] Frame {str(float(t))} was not obtained')
|
188
|
+
|
189
|
+
frame = (
|
190
|
+
# I'm using a red full frame to be able to detect
|
191
|
+
# fast the frames that were not available, but
|
192
|
+
# I need to find the error and find a real solution
|
193
|
+
self._video_frame_generator.background.full_red(
|
194
|
+
size = self._track.size,
|
195
|
+
time_base = fps_to_time_base(self._track.fps)
|
196
|
+
)
|
197
|
+
if frame is None else
|
198
|
+
frame
|
199
|
+
)
|
200
|
+
|
201
|
+
# TODO: What about the 'format' (?)
|
202
|
+
# TODO: Maybe I shouldn't set the 'time_base'
|
203
|
+
# here and do it just in the Timeline 'render'
|
204
|
+
#return get_black_background_video_frame(self._track.size)
|
205
|
+
# TODO: This 'time_base' maybe has to be related
|
206
|
+
# to a Timeline general 'time_base' and not the fps
|
207
|
+
frame = VideoFrameWrapped(
|
208
|
+
frame = frame,
|
209
|
+
is_from_empty_part = is_from_empty_part
|
210
|
+
)
|
211
|
+
|
212
|
+
# TODO: This should not happen because of
|
213
|
+
# the way we handle the videos here but the
|
214
|
+
# video could send us a None frame here, so
|
215
|
+
# do we raise exception (?)
|
216
|
+
if frame._frame is None:
|
217
|
+
#frame = get_black_background_video_frame(self._track.size)
|
218
|
+
# TODO: By now I'm raising exception to check if
|
219
|
+
# this happens or not because I think it would
|
220
|
+
# be malfunctioning
|
221
|
+
raise Exception(f'Video is returning None video frame at t={str(t)}.')
|
222
|
+
|
223
|
+
return frame
|
224
|
+
|
225
|
+
class _AudioPart(_PartWithAudio):
|
226
|
+
"""
|
227
|
+
Class to represent an element that is on the
|
228
|
+
track, that can be an empty space or an audio.
|
229
|
+
"""
|
230
|
+
|
231
|
+
def __init__(
|
232
|
+
self,
|
233
|
+
track: 'AudioTrack',
|
234
|
+
start: Union[int, float, Fraction],
|
235
|
+
end: Union[int, float, Fraction],
|
236
|
+
media: Union[AudioOnTrack, None] = None
|
237
|
+
):
|
238
|
+
ParameterValidator.validate_instance_of('media', media, AudioOnTrack)
|
239
|
+
|
240
|
+
super().__init__(
|
241
|
+
track = track,
|
242
|
+
start = start,
|
243
|
+
end = end,
|
244
|
+
media = media
|
245
|
+
)
|
246
|
+
|
247
|
+
class _VideoPart(_PartWithAudio, _PartWithVideo):
|
248
|
+
"""
|
249
|
+
Class to represent an element that is on the
|
250
|
+
track, that can be an empty space or a video.
|
251
|
+
"""
|
252
|
+
|
253
|
+
def __init__(
|
254
|
+
self,
|
255
|
+
track: 'VideoTrack',
|
256
|
+
start: Union[int, float, Fraction],
|
257
|
+
end: Union[int, float, Fraction],
|
258
|
+
media: Union[VideoOnTrack, None] = None
|
259
|
+
):
|
260
|
+
ParameterValidator.validate_instance_of('media', media, VideoOnTrack)
|
261
|
+
|
262
|
+
super().__init__(
|
263
|
+
track = track,
|
264
|
+
start = start,
|
265
|
+
end = end,
|
266
|
+
media = media
|
267
|
+
)
|
@@ -0,0 +1,78 @@
|
|
1
|
+
from yta_video_opengl.complete.frame_wrapper import AudioFrameWrapped
|
2
|
+
from yta_video_opengl.complete.frame_generator import AudioFrameGenerator
|
3
|
+
from yta_video_opengl.utils import audio_frames_and_remainder_per_video_frame
|
4
|
+
|
5
|
+
|
6
|
+
# TODO: Is this method here ok (?)
|
7
|
+
def generate_silent_frames(
|
8
|
+
fps: int,
|
9
|
+
audio_fps: int,
|
10
|
+
audio_samples_per_frame: int,
|
11
|
+
layout: str = 'stereo',
|
12
|
+
format: str = 'fltp'
|
13
|
+
) -> list[AudioFrameWrapped]:
|
14
|
+
"""
|
15
|
+
Get the audio silent frames we need for
|
16
|
+
a video with the given 'fps', 'audio_fps'
|
17
|
+
and 'audio_samples_per_frame', using the
|
18
|
+
also provided 'layout' and 'format' for
|
19
|
+
the audio frames.
|
20
|
+
|
21
|
+
This method is used when we have empty
|
22
|
+
parts on our tracks and we need to
|
23
|
+
provide the frames, that are passed as
|
24
|
+
AudioFrameWrapped instances and tagged as
|
25
|
+
coming from empty parts.
|
26
|
+
"""
|
27
|
+
audio_frame_generator: AudioFrameGenerator = AudioFrameGenerator()
|
28
|
+
|
29
|
+
# Check how many full and partial silent
|
30
|
+
# audio frames we need
|
31
|
+
number_of_frames, number_of_remaining_samples = audio_frames_and_remainder_per_video_frame(
|
32
|
+
video_fps = fps,
|
33
|
+
sample_rate = audio_fps,
|
34
|
+
number_of_samples_per_audio_frame = audio_samples_per_frame
|
35
|
+
)
|
36
|
+
|
37
|
+
# The complete silent frames we need
|
38
|
+
silent_frame = audio_frame_generator.silent(
|
39
|
+
sample_rate = audio_fps,
|
40
|
+
layout = layout,
|
41
|
+
number_of_samples = audio_samples_per_frame,
|
42
|
+
format = format,
|
43
|
+
pts = None,
|
44
|
+
time_base = None
|
45
|
+
)
|
46
|
+
|
47
|
+
frames = (
|
48
|
+
[
|
49
|
+
AudioFrameWrapped(
|
50
|
+
frame = silent_frame,
|
51
|
+
is_from_empty_part = True
|
52
|
+
)
|
53
|
+
] * number_of_frames
|
54
|
+
if number_of_frames > 0 else
|
55
|
+
[]
|
56
|
+
)
|
57
|
+
|
58
|
+
# The remaining partial silent frames we need
|
59
|
+
if number_of_remaining_samples > 0:
|
60
|
+
silent_frame = audio_frame_generator.silent(
|
61
|
+
sample_rate = audio_fps,
|
62
|
+
# TODO: Check where do we get this value from
|
63
|
+
layout = layout,
|
64
|
+
number_of_samples = number_of_remaining_samples,
|
65
|
+
# TODO: Check where do we get this value from
|
66
|
+
format = format,
|
67
|
+
pts = None,
|
68
|
+
time_base = None
|
69
|
+
)
|
70
|
+
|
71
|
+
frames.append(
|
72
|
+
AudioFrameWrapped(
|
73
|
+
frame = silent_frame,
|
74
|
+
is_from_empty_part = True
|
75
|
+
)
|
76
|
+
)
|
77
|
+
|
78
|
+
return frames
|
@@ -696,25 +696,6 @@ class VideoReader:
|
|
696
696
|
self.container.close()
|
697
697
|
|
698
698
|
|
699
|
-
# TODO: I think I'm not using this...
|
700
|
-
# Remove it please
|
701
|
-
def audio_ts_for_video_t(
|
702
|
-
t: float,
|
703
|
-
video_fps: float,
|
704
|
-
audio_fps: float
|
705
|
-
):
|
706
|
-
# Remember, from [t_start, t_end), the last one
|
707
|
-
# is not included
|
708
|
-
audio_t_start = int(t * audio_fps)
|
709
|
-
audio_t_end = int((t + 1.0 / video_fps) * audio_fps)
|
710
|
-
|
711
|
-
return [
|
712
|
-
i / audio_fps
|
713
|
-
for i in range(audio_t_start, audio_t_end)
|
714
|
-
]
|
715
|
-
|
716
|
-
|
717
|
-
|
718
699
|
"""
|
719
700
|
When reading packets directly from the stream
|
720
701
|
we can receive packets with size=0, but we need
|
@@ -155,11 +155,15 @@ class FrameCache(ABC):
|
|
155
155
|
seek and start decoding frames from that
|
156
156
|
keyframe.
|
157
157
|
"""
|
158
|
-
return max(
|
159
|
-
|
160
|
-
|
161
|
-
|
162
|
-
|
158
|
+
return max(
|
159
|
+
(
|
160
|
+
key_frame_pts
|
161
|
+
for key_frame_pts in self.key_frames_pts
|
162
|
+
if key_frame_pts <= pts
|
163
|
+
),
|
164
|
+
# If no key frames, just 0
|
165
|
+
default = 0
|
166
|
+
)
|
163
167
|
|
164
168
|
def _store_frame_in_cache(
|
165
169
|
self,
|