yta-video-opengl 0.0.17__tar.gz → 0.0.19__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {yta_video_opengl-0.0.17 → yta_video_opengl-0.0.19}/PKG-INFO +1 -1
- {yta_video_opengl-0.0.17 → yta_video_opengl-0.0.19}/pyproject.toml +2 -2
- {yta_video_opengl-0.0.17 → yta_video_opengl-0.0.19}/src/yta_video_opengl/complete/frame_generator.py +9 -2
- yta_video_opengl-0.0.19/src/yta_video_opengl/complete/frame_wrapper.py +122 -0
- {yta_video_opengl-0.0.17 → yta_video_opengl-0.0.19}/src/yta_video_opengl/complete/timeline.py +201 -70
- {yta_video_opengl-0.0.17 → yta_video_opengl-0.0.19}/src/yta_video_opengl/complete/track.py +181 -58
- {yta_video_opengl-0.0.17 → yta_video_opengl-0.0.19}/src/yta_video_opengl/complete/video_on_track.py +1 -1
- {yta_video_opengl-0.0.17 → yta_video_opengl-0.0.19}/src/yta_video_opengl/reader/cache/video.py +3 -0
- {yta_video_opengl-0.0.17 → yta_video_opengl-0.0.19}/src/yta_video_opengl/tests.py +19 -4
- {yta_video_opengl-0.0.17 → yta_video_opengl-0.0.19}/src/yta_video_opengl/video.py +6 -2
- {yta_video_opengl-0.0.17 → yta_video_opengl-0.0.19}/LICENSE +0 -0
- {yta_video_opengl-0.0.17 → yta_video_opengl-0.0.19}/README.md +0 -0
- {yta_video_opengl-0.0.17 → yta_video_opengl-0.0.19}/src/yta_video_opengl/__init__.py +0 -0
- {yta_video_opengl-0.0.17 → yta_video_opengl-0.0.19}/src/yta_video_opengl/classes.py +0 -0
- {yta_video_opengl-0.0.17 → yta_video_opengl-0.0.19}/src/yta_video_opengl/complete/__init__.py +0 -0
- {yta_video_opengl-0.0.17 → yta_video_opengl-0.0.19}/src/yta_video_opengl/complete/frame_combinator.py +0 -0
- {yta_video_opengl-0.0.17 → yta_video_opengl-0.0.19}/src/yta_video_opengl/nodes/__init__.py +0 -0
- {yta_video_opengl-0.0.17 → yta_video_opengl-0.0.19}/src/yta_video_opengl/nodes/audio/__init__.py +0 -0
- {yta_video_opengl-0.0.17 → yta_video_opengl-0.0.19}/src/yta_video_opengl/nodes/video/__init__.py +0 -0
- {yta_video_opengl-0.0.17 → yta_video_opengl-0.0.19}/src/yta_video_opengl/nodes/video/opengl.py +0 -0
- {yta_video_opengl-0.0.17 → yta_video_opengl-0.0.19}/src/yta_video_opengl/reader/__init__.py +0 -0
- {yta_video_opengl-0.0.17 → yta_video_opengl-0.0.19}/src/yta_video_opengl/reader/cache/__init__.py +0 -0
- {yta_video_opengl-0.0.17 → yta_video_opengl-0.0.19}/src/yta_video_opengl/reader/cache/audio.py +0 -0
- {yta_video_opengl-0.0.17 → yta_video_opengl-0.0.19}/src/yta_video_opengl/reader/cache/utils.py +0 -0
- {yta_video_opengl-0.0.17 → yta_video_opengl-0.0.19}/src/yta_video_opengl/t.py +0 -0
- {yta_video_opengl-0.0.17 → yta_video_opengl-0.0.19}/src/yta_video_opengl/utils.py +0 -0
- {yta_video_opengl-0.0.17 → yta_video_opengl-0.0.19}/src/yta_video_opengl/writer.py +0 -0
@@ -1,6 +1,6 @@
|
|
1
1
|
[project]
|
2
2
|
name = "yta-video-opengl"
|
3
|
-
version = "0.0.
|
3
|
+
version = "0.0.19"
|
4
4
|
description = "Youtube Autonomous Video OpenGL Module"
|
5
5
|
authors = [
|
6
6
|
{name = "danialcala94",email = "danielalcalavalera@gmail.com"}
|
@@ -14,7 +14,7 @@ dependencies = [
|
|
14
14
|
"av (>=0.0.1,<19.0.0)",
|
15
15
|
"moderngl (>=0.0.1,<9.0.0)",
|
16
16
|
"numpy (>=0.0.1,<9.0.0)",
|
17
|
-
"quicktions (>=0.0.1,<9.0.0)"
|
17
|
+
"quicktions (>=0.0.1,<9.0.0)",
|
18
18
|
]
|
19
19
|
|
20
20
|
[tool.poetry]
|
{yta_video_opengl-0.0.17 → yta_video_opengl-0.0.19}/src/yta_video_opengl/complete/frame_generator.py
RENAMED
@@ -5,6 +5,13 @@ array that will be used for it. We will
|
|
5
5
|
receive the values as (width, height) but
|
6
6
|
we will invert them when needed.
|
7
7
|
|
8
|
+
The frames that come from an empty part
|
9
|
+
are flagged with the .metadata attribute
|
10
|
+
'is_from_empty_part' so we can recognize
|
11
|
+
them and ignore when combining on the
|
12
|
+
timeline. We have that metadata in the
|
13
|
+
wrapper class we created.
|
14
|
+
|
8
15
|
TODO: Check because we have a similar
|
9
16
|
module in other project or projects.
|
10
17
|
"""
|
@@ -170,7 +177,7 @@ class AudioFrameGenerator:
|
|
170
177
|
number_of_samples: int = 1024,
|
171
178
|
format = 's16',
|
172
179
|
pts: Union[int, None] = None,
|
173
|
-
time_base: Union['Fraction', None] = None
|
180
|
+
time_base: Union['Fraction', None] = None
|
174
181
|
) -> AudioFrame:
|
175
182
|
"""
|
176
183
|
Get an audio frame that is completely silent.
|
@@ -241,7 +248,7 @@ def numpy_to_audio_frame(
|
|
241
248
|
|
242
249
|
if time_base is not None:
|
243
250
|
frame.time_base = time_base
|
244
|
-
|
251
|
+
|
245
252
|
return frame
|
246
253
|
|
247
254
|
# TODO: Maybe transform into a Enum (?)
|
@@ -0,0 +1,122 @@
|
|
1
|
+
from yta_validation.parameter import ParameterValidator
|
2
|
+
from av.video.frame import VideoFrame
|
3
|
+
from av.audio.frame import AudioFrame
|
4
|
+
from typing import Union
|
5
|
+
|
6
|
+
|
7
|
+
IS_FROM_EMPTY_PART_METADATA = 'is_from_empty_part'
|
8
|
+
|
9
|
+
class _FrameWrappedBase:
|
10
|
+
"""
|
11
|
+
Class to wrap video and audio frames from
|
12
|
+
the pyav library but to support a metadata
|
13
|
+
field to inject some information we need
|
14
|
+
when processing and combining them.
|
15
|
+
"""
|
16
|
+
|
17
|
+
@property
|
18
|
+
def is_from_empty_part(
|
19
|
+
self
|
20
|
+
) -> bool:
|
21
|
+
"""
|
22
|
+
Flag to indicate if the frame comes from
|
23
|
+
an empty part or not, that will be done
|
24
|
+
by checking the 'is_from_empty_part'
|
25
|
+
attribute in the metadata.
|
26
|
+
"""
|
27
|
+
return IS_FROM_EMPTY_PART_METADATA in self.metadata
|
28
|
+
|
29
|
+
def __init__(
|
30
|
+
self,
|
31
|
+
frame,
|
32
|
+
metadata: dict = {}
|
33
|
+
):
|
34
|
+
ParameterValidator.validate_mandatory_instance_of('frame', frame, [VideoFrame, AudioFrame])
|
35
|
+
ParameterValidator.validate_mandatory_dict('metadata', metadata)
|
36
|
+
|
37
|
+
self._frame: Union[VideoFrame, AudioFrame] = frame
|
38
|
+
self.metadata: dict = metadata or {}
|
39
|
+
|
40
|
+
def __getattr__(
|
41
|
+
self,
|
42
|
+
name
|
43
|
+
):
|
44
|
+
return getattr(self._frame, name)
|
45
|
+
|
46
|
+
def __setattr__(
|
47
|
+
self,
|
48
|
+
name,
|
49
|
+
value
|
50
|
+
):
|
51
|
+
super().__setattr__(name, value)
|
52
|
+
#setattr(self._frame, name, value)
|
53
|
+
# if name in ('_frame', 'metadata'):
|
54
|
+
# super().__setattr__(name, value)
|
55
|
+
# else:
|
56
|
+
# setattr(self._frame, name, value)
|
57
|
+
|
58
|
+
def __repr__(
|
59
|
+
self
|
60
|
+
):
|
61
|
+
cname = self.__class__.__name__
|
62
|
+
return f'<{cname} metadata={self.metadata} frame={self._frame!r}>'
|
63
|
+
|
64
|
+
def set_as_from_empty_part(
|
65
|
+
self
|
66
|
+
) -> None:
|
67
|
+
"""
|
68
|
+
Add the metadata information to indicate
|
69
|
+
that this is a frame that comes from an
|
70
|
+
empty part.
|
71
|
+
"""
|
72
|
+
self.metadata[IS_FROM_EMPTY_PART_METADATA] = 'True'
|
73
|
+
|
74
|
+
def unwrap(
|
75
|
+
self
|
76
|
+
):
|
77
|
+
"""
|
78
|
+
Get the original frame instance.
|
79
|
+
"""
|
80
|
+
return self._frame
|
81
|
+
|
82
|
+
class VideoFrameWrapped(_FrameWrappedBase):
|
83
|
+
"""
|
84
|
+
Class to wrap video frames from the pyav
|
85
|
+
library but to support a metadata field
|
86
|
+
to inject some information we need when
|
87
|
+
processing and combining them.
|
88
|
+
"""
|
89
|
+
|
90
|
+
def __init__(
|
91
|
+
self,
|
92
|
+
frame: VideoFrame,
|
93
|
+
metadata: dict = {},
|
94
|
+
is_from_empty_part: bool = False
|
95
|
+
):
|
96
|
+
ParameterValidator.validate_mandatory_instance_of('frame', frame, VideoFrame)
|
97
|
+
|
98
|
+
super().__init__(frame, metadata)
|
99
|
+
|
100
|
+
if is_from_empty_part:
|
101
|
+
self.set_as_from_empty_part()
|
102
|
+
|
103
|
+
class AudioFrameWrapped(_FrameWrappedBase):
|
104
|
+
"""
|
105
|
+
Class to wrap audio frames from the pyav
|
106
|
+
library but to support a metadata field
|
107
|
+
to inject some information we need when
|
108
|
+
processing and combining them.
|
109
|
+
"""
|
110
|
+
|
111
|
+
def __init__(
|
112
|
+
self,
|
113
|
+
frame: AudioFrame,
|
114
|
+
metadata: dict = {},
|
115
|
+
is_from_empty_part: bool = False
|
116
|
+
):
|
117
|
+
ParameterValidator.validate_mandatory_instance_of('frame', frame, AudioFrame)
|
118
|
+
|
119
|
+
super().__init__(frame, metadata)
|
120
|
+
|
121
|
+
if is_from_empty_part:
|
122
|
+
self.set_as_from_empty_part()
|
{yta_video_opengl-0.0.17 → yta_video_opengl-0.0.19}/src/yta_video_opengl/complete/timeline.py
RENAMED
@@ -13,11 +13,14 @@ video written).
|
|
13
13
|
from yta_video_opengl.complete.track import Track
|
14
14
|
from yta_video_opengl.video import Video
|
15
15
|
from yta_video_opengl.t import get_ts, fps_to_time_base, T
|
16
|
+
from yta_video_opengl.complete.frame_wrapper import VideoFrameWrapped, AudioFrameWrapped
|
16
17
|
from yta_video_opengl.complete.frame_combinator import AudioFrameCombinator
|
17
18
|
from yta_validation.parameter import ParameterValidator
|
19
|
+
from yta_validation import PythonValidator
|
18
20
|
from av.video.frame import VideoFrame
|
19
21
|
from av.audio.frame import AudioFrame
|
20
22
|
from quicktions import Fraction
|
23
|
+
from functools import reduce
|
21
24
|
from typing import Union
|
22
25
|
|
23
26
|
import numpy as np
|
@@ -51,48 +54,103 @@ class Timeline:
|
|
51
54
|
audio_fps: Union[int, Fraction] = 44_100.0, # 48_000.0 for aac
|
52
55
|
# TODO: I don't like this name
|
53
56
|
# TODO: Where does this come from (?)
|
54
|
-
audio_samples_per_frame: int = 1024
|
57
|
+
audio_samples_per_frame: int = 1024,
|
58
|
+
video_codec: str = 'h264',
|
59
|
+
video_pixel_format: str = 'yuv420p',
|
60
|
+
audio_codec: str = 'aac'
|
55
61
|
):
|
56
|
-
# TODO: By now we are using just two video
|
57
|
-
# tracks to test the composition
|
58
62
|
# TODO: We need to be careful with the
|
59
63
|
# priority, by now its defined by its
|
60
64
|
# position in the array
|
61
|
-
self.tracks: list[Track] = [
|
62
|
-
Track(
|
63
|
-
size = size,
|
64
|
-
fps = fps,
|
65
|
-
audio_fps = audio_fps,
|
66
|
-
# TODO: I need more info about the audio
|
67
|
-
# I think
|
68
|
-
audio_samples_per_frame = audio_samples_per_frame
|
69
|
-
),
|
70
|
-
Track(
|
71
|
-
size = size,
|
72
|
-
fps = fps,
|
73
|
-
audio_fps = audio_fps,
|
74
|
-
# TODO: I need more info about the audio
|
75
|
-
# I think
|
76
|
-
audio_samples_per_frame = audio_samples_per_frame
|
77
|
-
)
|
78
|
-
]
|
65
|
+
self.tracks: list[Track] = []
|
79
66
|
"""
|
80
67
|
All the video tracks we are handling.
|
81
68
|
"""
|
82
|
-
# TODO: Handle the other properties
|
83
|
-
self.size = size
|
84
|
-
self.fps = fps
|
85
|
-
self.audio_fps = audio_fps
|
86
69
|
|
87
|
-
|
88
|
-
|
70
|
+
self.size: tuple[int, int] = size
|
71
|
+
"""
|
72
|
+
The size that the final video must have.
|
73
|
+
"""
|
74
|
+
self.fps: Union[int, float, Fraction] = fps
|
75
|
+
"""
|
76
|
+
The fps of the output video.
|
77
|
+
"""
|
78
|
+
self.audio_fps: Union[int, Fraction] = audio_fps
|
79
|
+
"""
|
80
|
+
The fps of the output audio.
|
81
|
+
"""
|
82
|
+
self.audio_samples_per_frame: int = audio_samples_per_frame
|
83
|
+
"""
|
84
|
+
The audio samples each audio frame must
|
85
|
+
have.
|
86
|
+
"""
|
87
|
+
self.video_codec: str = video_codec
|
88
|
+
"""
|
89
|
+
The video codec for the video exported.
|
90
|
+
"""
|
91
|
+
self.video_pixel_format: str = video_pixel_format
|
92
|
+
"""
|
93
|
+
The pixel format for the video exported.
|
94
|
+
"""
|
95
|
+
self.audio_codec: str = audio_codec
|
96
|
+
"""
|
97
|
+
The audio codec for the audio exported.
|
98
|
+
"""
|
99
|
+
|
100
|
+
# We will have 2 tracks by now
|
101
|
+
self.add_track().add_track()
|
102
|
+
|
103
|
+
def add_track(
|
104
|
+
self,
|
105
|
+
index: Union[int, None] = None
|
106
|
+
) -> 'Timeline':
|
107
|
+
"""
|
108
|
+
Add a new track to the timeline, that will
|
109
|
+
be placed in the last position (last
|
110
|
+
priority).
|
111
|
+
|
112
|
+
It will be a video track unless you provide
|
113
|
+
'is_audio_track' parameter as True.
|
114
|
+
"""
|
115
|
+
index = (
|
116
|
+
index
|
117
|
+
if (
|
118
|
+
index is not None and
|
119
|
+
index <= len(self.tracks)
|
120
|
+
) else
|
121
|
+
len(self.tracks)
|
122
|
+
)
|
123
|
+
|
124
|
+
# We need to change the index of the
|
125
|
+
# affected tracks (the ones that are
|
126
|
+
# in that index and after it)
|
127
|
+
if index < len(self.tracks):
|
128
|
+
for track in self.tracks:
|
129
|
+
if track.index >= index:
|
130
|
+
track.index += 1
|
131
|
+
|
132
|
+
self.tracks.append(Track(
|
133
|
+
size = self.size,
|
134
|
+
index = index,
|
135
|
+
fps = self.fps,
|
136
|
+
audio_fps = self.audio_fps,
|
137
|
+
# TODO: I need more info about the audio
|
138
|
+
# I think
|
139
|
+
audio_samples_per_frame = self.audio_samples_per_frame,
|
140
|
+
# TODO: Where do we obtain this from (?)
|
141
|
+
audio_layout = 'stereo',
|
142
|
+
audio_format = 'fltp'
|
143
|
+
))
|
144
|
+
|
145
|
+
return self
|
146
|
+
|
147
|
+
# TODO: Create a 'remove_track'
|
148
|
+
|
89
149
|
def add_video(
|
90
150
|
self,
|
91
151
|
video: Video,
|
92
152
|
t: Union[int, float, Fraction],
|
93
|
-
|
94
|
-
# disappear
|
95
|
-
do_use_second_track: bool = False
|
153
|
+
track_index: int = 0
|
96
154
|
) -> 'Timeline':
|
97
155
|
"""
|
98
156
|
Add the provided 'video' to the timeline,
|
@@ -101,16 +159,14 @@ class Timeline:
|
|
101
159
|
TODO: The 'do_use_second_track' parameter
|
102
160
|
is temporary.
|
103
161
|
"""
|
104
|
-
|
105
|
-
# just to be able to test mixing frames
|
106
|
-
# from 2 different tracks at the same
|
107
|
-
# time
|
108
|
-
index = 1 * do_use_second_track
|
162
|
+
ParameterValidator.validate_mandatory_number_between('track_index', track_index, 0, len(self.tracks))
|
109
163
|
|
110
|
-
self.tracks[
|
164
|
+
self.tracks[track_index].add_video(video, t)
|
111
165
|
|
112
166
|
return self
|
113
167
|
|
168
|
+
# TODO: Create a 'remove_video'
|
169
|
+
|
114
170
|
# TODO: This method is not for the Track but
|
115
171
|
# for the timeline, as one track can only
|
116
172
|
# have consecutive elements
|
@@ -122,7 +178,7 @@ class Timeline:
|
|
122
178
|
Get all the frames that are played at the
|
123
179
|
't' time provided, but combined in one.
|
124
180
|
"""
|
125
|
-
frames = (
|
181
|
+
frames = list(
|
126
182
|
track.get_frame_at(t)
|
127
183
|
for track in self.tracks
|
128
184
|
)
|
@@ -134,21 +190,45 @@ class Timeline:
|
|
134
190
|
# other frame in other track, or to know if
|
135
191
|
# I want them as transparent or something
|
136
192
|
|
137
|
-
# TODO: This is just a test function
|
138
|
-
from yta_video_opengl.complete.frame_combinator import VideoFrameCombinator
|
139
|
-
|
140
193
|
# TODO: Combinate frames, we force them to
|
141
194
|
# rgb24 to obtain them with the same shape,
|
142
195
|
# but maybe we have to change this because
|
143
196
|
# we also need to handle alphas
|
144
|
-
|
197
|
+
|
198
|
+
# TODO: We need to ignore the ones that are
|
199
|
+
# tagged with
|
200
|
+
# .metadata['is_from_empty_part'] = 'True'
|
201
|
+
|
202
|
+
"""
|
203
|
+
1. Only empty frames
|
204
|
+
-> Black background, keep one
|
205
|
+
2. Empty frames but other frames:
|
206
|
+
-> Skip all empty frames and apply
|
207
|
+
track orders
|
208
|
+
"""
|
209
|
+
|
210
|
+
output_frame = frames[0]._frame.to_ndarray(format = 'rgb24')
|
211
|
+
|
145
212
|
for frame in frames:
|
146
|
-
#
|
147
|
-
#
|
148
|
-
#
|
149
|
-
#
|
150
|
-
#
|
151
|
-
|
213
|
+
# We just need the first non-empty frame,
|
214
|
+
# that must be from the track with the
|
215
|
+
# bigger priority
|
216
|
+
# TODO: I assume, by now, that the frames
|
217
|
+
# come in order (bigger priority first)
|
218
|
+
if not frame.is_from_empty_part:
|
219
|
+
# TODO: By now I'm just returning the first
|
220
|
+
# one but we will need to check the alpha
|
221
|
+
# layer to combine if possible
|
222
|
+
output_frame = frame._frame.to_ndarray(format = 'rgb24')
|
223
|
+
break
|
224
|
+
|
225
|
+
# # TODO: This code below is to combine the
|
226
|
+
# # frames but merging all of them, that is
|
227
|
+
# # unexpected in a video editor but we have
|
228
|
+
# # the way to do it
|
229
|
+
# from yta_video_opengl.complete.frame_combinator import VideoFrameCombinator
|
230
|
+
# # TODO: What about the 'format' (?)
|
231
|
+
# output_frame = VideoFrameCombinator.blend_add(output_frame, frame.to_ndarray(format = 'rgb24'))
|
152
232
|
|
153
233
|
# TODO: How to build this VideoFrame correctly
|
154
234
|
# and what about the 'format' (?)
|
@@ -159,7 +239,7 @@ class Timeline:
|
|
159
239
|
self,
|
160
240
|
t: float
|
161
241
|
):
|
162
|
-
audio_frames = []
|
242
|
+
audio_frames: list[AudioFrameWrapped] = []
|
163
243
|
"""
|
164
244
|
Matrix in which the rows are the different
|
165
245
|
tracks we have, and the column includes all
|
@@ -179,18 +259,36 @@ class Timeline:
|
|
179
259
|
# not in the combination process
|
180
260
|
|
181
261
|
# We need only 1 single audio frame per column
|
182
|
-
|
262
|
+
collapsed_frames = [
|
183
263
|
concatenate_audio_frames(frames)
|
184
264
|
for frames in audio_frames
|
185
265
|
]
|
186
266
|
|
267
|
+
# TODO: What about the lenghts and those
|
268
|
+
# things? They should be ok because they are
|
269
|
+
# based on our output but I'm not completely
|
270
|
+
# sure here..
|
271
|
+
|
272
|
+
# We keep only the non-silent frames because
|
273
|
+
# we will sum them after and keeping them
|
274
|
+
# will change the results.
|
275
|
+
non_empty_collapsed_frames = [
|
276
|
+
frame._frame
|
277
|
+
for frame in collapsed_frames
|
278
|
+
if not frame.is_from_empty_part
|
279
|
+
]
|
280
|
+
|
281
|
+
if len(non_empty_collapsed_frames) == 0:
|
282
|
+
# If they were all silent, just keep one
|
283
|
+
non_empty_collapsed_frames = [collapsed_frames[0]._frame]
|
284
|
+
|
187
285
|
# Now, mix column by column (track by track)
|
188
286
|
# TODO: I do this to have an iterator, but
|
189
287
|
# maybe we need more than one single audio
|
190
288
|
# frame because of the size at the original
|
191
289
|
# video or something...
|
192
290
|
frames = [
|
193
|
-
AudioFrameCombinator.sum_tracks_frames(
|
291
|
+
AudioFrameCombinator.sum_tracks_frames(non_empty_collapsed_frames, self.audio_fps)
|
194
292
|
]
|
195
293
|
|
196
294
|
for audio_frame in frames:
|
@@ -308,18 +406,36 @@ class Timeline:
|
|
308
406
|
writer.mux_audio_frame(None)
|
309
407
|
writer.output.close()
|
310
408
|
|
409
|
+
def _is_empty_part_frame(
|
410
|
+
frame: Union['VideoFrameWrapped', 'AudioFrameWrapped']
|
411
|
+
) -> bool:
|
412
|
+
"""
|
413
|
+
Flag to indicate if the frame comes from
|
414
|
+
an empty part or not.
|
415
|
+
|
416
|
+
TODO: The 'metadata' is included in our
|
417
|
+
wrapper class, not in VideoFrame or
|
418
|
+
AudioFrame classes. I should be sending
|
419
|
+
the wrapper in all the code, but by now
|
420
|
+
I'm doing it just in specific cases.
|
421
|
+
"""
|
422
|
+
return (
|
423
|
+
hasattr(frame, 'metadata') and
|
424
|
+
frame.is_from_empty_part
|
425
|
+
)
|
311
426
|
|
312
|
-
# TODO: I don't know where to put this
|
313
|
-
# method because if a bit special
|
314
427
|
# TODO: Refactor and move please
|
428
|
+
# TODO: This has to work for AudioFrame
|
429
|
+
# also, but I need it working for Wrapped
|
315
430
|
def concatenate_audio_frames(
|
316
|
-
frames: list[
|
317
|
-
) ->
|
431
|
+
frames: list[AudioFrameWrapped]
|
432
|
+
) -> AudioFrameWrapped:
|
318
433
|
"""
|
319
|
-
|
320
|
-
|
321
|
-
|
322
|
-
|
434
|
+
Concatenate all the given 'frames' in one
|
435
|
+
single audio frame and return it.
|
436
|
+
|
437
|
+
The audio frames must have the same layout
|
438
|
+
and sample rate.
|
323
439
|
"""
|
324
440
|
if not frames:
|
325
441
|
# TODO: This should not happen
|
@@ -328,29 +444,44 @@ def concatenate_audio_frames(
|
|
328
444
|
if len(frames) == 1:
|
329
445
|
return frames[0]
|
330
446
|
|
331
|
-
#
|
332
|
-
|
333
|
-
|
334
|
-
|
447
|
+
# We need to preserve the metadata
|
448
|
+
is_from_empty_part = all(
|
449
|
+
frame.is_from_empty_part
|
450
|
+
for frame in frames
|
451
|
+
)
|
452
|
+
metadata = reduce(lambda key_values, frame: {**key_values, **frame.metadata}, frames, {})
|
453
|
+
|
454
|
+
sample_rate = frames[0]._frame.sample_rate
|
455
|
+
layout = frames[0]._frame.layout.name
|
335
456
|
|
336
457
|
arrays = []
|
337
|
-
|
338
|
-
|
458
|
+
# TODO: What about 'metadata' (?)
|
459
|
+
for frame in frames:
|
460
|
+
if (
|
461
|
+
frame._frame.sample_rate != sample_rate or
|
462
|
+
frame._frame.layout.name != layout
|
463
|
+
):
|
339
464
|
raise ValueError("Los frames deben tener mismo sample_rate y layout")
|
340
465
|
|
341
|
-
# arr =
|
466
|
+
# arr = frame.to_ndarray() # (channels, samples)
|
342
467
|
# if arr.dtype == np.int16:
|
343
468
|
# arr = arr.astype(np.float32) / 32768.0
|
344
469
|
# elif arr.dtype != np.float32:
|
345
470
|
# arr = arr.astype(np.float32)
|
346
471
|
|
347
|
-
arrays.append(
|
472
|
+
arrays.append(frame._frame.to_ndarray())
|
348
473
|
|
349
|
-
# Concatenamos por eje de samples
|
350
474
|
combined = np.concatenate(arrays, axis = 1)
|
351
475
|
|
352
|
-
|
353
|
-
|
476
|
+
out = AudioFrame.from_ndarray(
|
477
|
+
array = combined,
|
478
|
+
format = frames[0].format,
|
479
|
+
layout = layout
|
480
|
+
)
|
354
481
|
out.sample_rate = sample_rate
|
355
482
|
|
356
|
-
return
|
483
|
+
return AudioFrameWrapped(
|
484
|
+
frame = out,
|
485
|
+
metadata = metadata,
|
486
|
+
is_from_empty_part = is_from_empty_part
|
487
|
+
)
|
@@ -1,8 +1,10 @@
|
|
1
1
|
from yta_video_opengl.complete.video_on_track import VideoOnTrack
|
2
2
|
from yta_video_opengl.video import Video
|
3
3
|
from yta_video_opengl.t import T
|
4
|
+
from yta_video_opengl.complete.frame_wrapper import VideoFrameWrapped
|
4
5
|
from yta_video_opengl.utils import audio_frames_and_remainder_per_video_frame
|
5
6
|
from yta_video_opengl.t import fps_to_time_base
|
7
|
+
from yta_video_opengl.complete.frame_wrapper import AudioFrameWrapped
|
6
8
|
from yta_video_opengl.complete.frame_generator import VideoFrameGenerator, AudioFrameGenerator
|
7
9
|
from yta_validation.parameter import ParameterValidator
|
8
10
|
from quicktions import Fraction
|
@@ -82,30 +84,38 @@ class _Part:
|
|
82
84
|
def get_frame_at(
|
83
85
|
self,
|
84
86
|
t: Union[int, float, Fraction]
|
85
|
-
) -> '
|
87
|
+
) -> 'VideoFrameWrapped':
|
86
88
|
"""
|
87
89
|
Get the frame that must be displayed at
|
88
90
|
the given 't' time moment.
|
89
91
|
"""
|
90
|
-
|
92
|
+
frame = (
|
91
93
|
# TODO: What about the 'format' (?)
|
92
94
|
# TODO: Maybe I shouldn't set the 'time_base'
|
93
95
|
# here and do it just in the Timeline 'render'
|
94
96
|
#return get_black_background_video_frame(self._track.size)
|
95
97
|
# TODO: This 'time_base' maybe has to be related
|
96
98
|
# to a Timeline general 'time_base' and not the fps
|
97
|
-
|
98
|
-
|
99
|
-
|
99
|
+
VideoFrameWrapped(
|
100
|
+
frame = self._video_frame_generator.background.full_black(
|
101
|
+
size = self._track.size,
|
102
|
+
time_base = fps_to_time_base(self._track.fps)
|
103
|
+
),
|
104
|
+
is_from_empty_part = True
|
100
105
|
)
|
101
|
-
|
102
|
-
|
106
|
+
if self.is_empty_part else
|
107
|
+
VideoFrameWrapped(
|
108
|
+
frame = self.video.get_frame_at(t),
|
109
|
+
is_from_empty_part = False
|
110
|
+
)
|
111
|
+
|
112
|
+
)
|
103
113
|
|
104
114
|
# TODO: This should not happen because of
|
105
115
|
# the way we handle the videos here but the
|
106
116
|
# video could send us a None frame here, so
|
107
117
|
# do we raise exception (?)
|
108
|
-
if frame is None:
|
118
|
+
if frame._frame is None:
|
109
119
|
#frame = get_black_background_video_frame(self._track.size)
|
110
120
|
# TODO: By now I'm raising exception to check if
|
111
121
|
# this happens or not because I think it would
|
@@ -118,54 +128,29 @@ class _Part:
|
|
118
128
|
self,
|
119
129
|
t: Union[int, float, Fraction]
|
120
130
|
):
|
131
|
+
"""
|
132
|
+
Iterate over all the audio frames that
|
133
|
+
exist at the time moment 't' provided.
|
134
|
+
"""
|
121
135
|
if not self.is_empty_part:
|
122
|
-
|
136
|
+
for frame in self.video.get_audio_frames_at(t):
|
137
|
+
yield AudioFrameWrapped(
|
138
|
+
frame = frame,
|
139
|
+
is_from_empty_part = False
|
140
|
+
)
|
123
141
|
else:
|
124
|
-
|
125
|
-
|
126
|
-
|
127
|
-
|
128
|
-
|
129
|
-
|
130
|
-
|
142
|
+
frames = generate_silent_frames(
|
143
|
+
fps = self._track.fps,
|
144
|
+
audio_fps = self._track.audio_fps,
|
145
|
+
audio_samples_per_frame = self._track.audio_samples_per_frame,
|
146
|
+
# TODO: Where do this 2 formats come from (?)
|
147
|
+
layout = self._track.audio_layout,
|
148
|
+
format = self._track.audio_format
|
131
149
|
)
|
132
150
|
|
133
|
-
|
134
|
-
|
135
|
-
frames = (
|
136
|
-
[
|
137
|
-
self._audio_frame_generator.silent(
|
138
|
-
sample_rate = self._track.audio_fps,
|
139
|
-
# TODO: Check where do we get this value from
|
140
|
-
layout = 'stereo',
|
141
|
-
number_of_samples = self._track.audio_samples_per_frame,
|
142
|
-
# TODO: Check where do we get this value from
|
143
|
-
format = 'fltp',
|
144
|
-
pts = None,
|
145
|
-
time_base = None
|
146
|
-
)
|
147
|
-
] * number_of_frames
|
148
|
-
if number_of_frames > 0 else
|
149
|
-
[]
|
150
|
-
)
|
151
|
+
for frame in frames:
|
152
|
+
yield frame
|
151
153
|
|
152
|
-
# The remaining partial silent frames we need
|
153
|
-
if number_of_remaining_samples > 0:
|
154
|
-
frames.append(
|
155
|
-
self._audio_frame_generator.silent(
|
156
|
-
sample_rate = self._track.audio_fps,
|
157
|
-
# TODO: Check where do we get this value from
|
158
|
-
layout = 'stereo',
|
159
|
-
number_of_samples = number_of_remaining_samples,
|
160
|
-
# TODO: Check where do we get this value from
|
161
|
-
format = 'fltp',
|
162
|
-
pts = None,
|
163
|
-
time_base = None
|
164
|
-
)
|
165
|
-
)
|
166
|
-
|
167
|
-
for frame in frames:
|
168
|
-
yield frame
|
169
154
|
|
170
155
|
# TODO: I don't like using t as float,
|
171
156
|
# we need to implement fractions.Fraction
|
@@ -228,27 +213,50 @@ class Track:
|
|
228
213
|
from first to last.
|
229
214
|
"""
|
230
215
|
return sorted(self._videos, key = lambda video: video.start)
|
216
|
+
|
217
|
+
@property
|
218
|
+
def is_muted(
|
219
|
+
self
|
220
|
+
) -> bool:
|
221
|
+
"""
|
222
|
+
Flag to indicate if the track is muted or
|
223
|
+
not. Being muted means that no audio frames
|
224
|
+
will be retured from this track.
|
225
|
+
"""
|
226
|
+
return self._is_muted
|
231
227
|
|
232
228
|
def __init__(
|
233
229
|
self,
|
234
230
|
# TODO: I need the general settings of the
|
235
231
|
# project to be able to make audio also, not
|
236
232
|
# only the empty frames
|
233
|
+
index: int,
|
237
234
|
size: tuple[int, int],
|
238
235
|
fps: float,
|
239
236
|
audio_fps: float,
|
240
237
|
# TODO: Where does it come from (?)
|
241
|
-
audio_samples_per_frame: int
|
238
|
+
audio_samples_per_frame: int,
|
239
|
+
audio_layout: str = 'stereo',
|
240
|
+
audio_format: str = 'fltp'
|
242
241
|
):
|
243
242
|
self._videos: list[VideoOnTrack] = []
|
244
243
|
"""
|
245
244
|
The list of 'VideoOnTrack' instances that
|
246
245
|
must play on this track.
|
247
246
|
"""
|
247
|
+
self._is_muted: bool = False
|
248
|
+
"""
|
249
|
+
Internal flag to indicate if the track is
|
250
|
+
muted or not.
|
251
|
+
"""
|
248
252
|
self.size: tuple[int, int] = size
|
249
253
|
"""
|
250
254
|
The size of the videos of this track.
|
251
255
|
"""
|
256
|
+
self.index: int = index
|
257
|
+
"""
|
258
|
+
The index of the track within the timeline.
|
259
|
+
"""
|
252
260
|
self.fps: float = float(fps)
|
253
261
|
"""
|
254
262
|
The fps of the track, needed to calculate
|
@@ -264,6 +272,16 @@ class Track:
|
|
264
272
|
"""
|
265
273
|
The number of samples per audio frame.
|
266
274
|
"""
|
275
|
+
self.audio_layout: str = audio_layout
|
276
|
+
"""
|
277
|
+
The layout of the audio, that can be 'mono'
|
278
|
+
or 'stereo'.
|
279
|
+
"""
|
280
|
+
self.audio_format: str = audio_format
|
281
|
+
"""
|
282
|
+
The format of the audio, that can be 's16',
|
283
|
+
'flt', 'fltp', etc.
|
284
|
+
"""
|
267
285
|
|
268
286
|
def _is_free(
|
269
287
|
self,
|
@@ -277,8 +295,8 @@ class Track:
|
|
277
295
|
"""
|
278
296
|
return not any(
|
279
297
|
(
|
280
|
-
video.
|
281
|
-
video.
|
298
|
+
video.start < end and
|
299
|
+
video.end > start
|
282
300
|
)
|
283
301
|
for video in self.videos
|
284
302
|
)
|
@@ -304,10 +322,28 @@ class Track:
|
|
304
322
|
raise Exception('NON_LIMITED_EMPTY_PART_END exceeded.')
|
305
323
|
return None
|
306
324
|
|
325
|
+
def mute(
|
326
|
+
self
|
327
|
+
) -> 'Track':
|
328
|
+
"""
|
329
|
+
Set the track as muted so no audio frame will
|
330
|
+
be played from this track.
|
331
|
+
"""
|
332
|
+
self._is_muted = True
|
333
|
+
|
334
|
+
def unmute(
|
335
|
+
self
|
336
|
+
) -> 'Track':
|
337
|
+
"""
|
338
|
+
Set the track as unmuted so the audio frames
|
339
|
+
will be played as normal.
|
340
|
+
"""
|
341
|
+
self._is_muted = False
|
342
|
+
|
307
343
|
def get_frame_at(
|
308
344
|
self,
|
309
345
|
t: Union[int, float, Fraction]
|
310
|
-
) -> '
|
346
|
+
) -> 'VideoFrameWrapped':
|
311
347
|
"""
|
312
348
|
Get the frame that must be displayed at
|
313
349
|
the 't' time moment provided, which is
|
@@ -347,7 +383,19 @@ class Track:
|
|
347
383
|
(remember that a video frame is associated
|
348
384
|
with more than 1 audio frame).
|
349
385
|
"""
|
350
|
-
|
386
|
+
frames = (
|
387
|
+
generate_silent_frames(
|
388
|
+
fps = self.fps,
|
389
|
+
audio_fps = self.audio_fps,
|
390
|
+
audio_samples_per_frame = self.audio_samples_per_frame,
|
391
|
+
layout = self.audio_layout,
|
392
|
+
format = self.audio_format
|
393
|
+
)
|
394
|
+
if self.is_muted else
|
395
|
+
self._get_part_at_t(t).get_audio_frames_at(t)
|
396
|
+
)
|
397
|
+
|
398
|
+
for frame in frames:
|
351
399
|
yield frame
|
352
400
|
|
353
401
|
def add_video(
|
@@ -376,7 +424,8 @@ class Track:
|
|
376
424
|
# TODO: We can have many different strategies
|
377
425
|
# that we could define in the '__init__' maybe
|
378
426
|
t: T = T.from_fps(t, self.fps)
|
379
|
-
if not self._is_free(t.truncated, t.next(1).truncated):
|
427
|
+
#if not self._is_free(t.truncated, t.next(1).truncated):
|
428
|
+
if not self._is_free(t.truncated, t.truncated + video.duration):
|
380
429
|
raise Exception('The video cannot be added at the "t" time moment, something blocks it.')
|
381
430
|
t = t.truncated
|
382
431
|
else:
|
@@ -436,4 +485,78 @@ class Track:
|
|
436
485
|
|
437
486
|
self._parts = parts
|
438
487
|
|
439
|
-
return self
|
488
|
+
return self
|
489
|
+
|
490
|
+
# TODO: Is this method here ok (?)
|
491
|
+
def generate_silent_frames(
|
492
|
+
fps: int,
|
493
|
+
audio_fps: int,
|
494
|
+
audio_samples_per_frame: int,
|
495
|
+
layout: str = 'stereo',
|
496
|
+
format: str = 'fltp'
|
497
|
+
) -> list[AudioFrameWrapped]:
|
498
|
+
"""
|
499
|
+
Get the audio silent frames we need for
|
500
|
+
a video with the given 'fps', 'audio_fps'
|
501
|
+
and 'audio_samples_per_frame', using the
|
502
|
+
also provided 'layout' and 'format' for
|
503
|
+
the audio frames.
|
504
|
+
|
505
|
+
This method is used when we have empty
|
506
|
+
parts on our tracks and we need to
|
507
|
+
provide the frames, that are passed as
|
508
|
+
AudioFrameWrapped instances and tagged as
|
509
|
+
coming from empty parts.
|
510
|
+
"""
|
511
|
+
audio_frame_generator: AudioFrameGenerator = AudioFrameGenerator()
|
512
|
+
|
513
|
+
# Check how many full and partial silent
|
514
|
+
# audio frames we need
|
515
|
+
number_of_frames, number_of_remaining_samples = audio_frames_and_remainder_per_video_frame(
|
516
|
+
video_fps = fps,
|
517
|
+
sample_rate = audio_fps,
|
518
|
+
number_of_samples_per_audio_frame = audio_samples_per_frame
|
519
|
+
)
|
520
|
+
|
521
|
+
# The complete silent frames we need
|
522
|
+
silent_frame = audio_frame_generator.silent(
|
523
|
+
sample_rate = audio_fps,
|
524
|
+
layout = layout,
|
525
|
+
number_of_samples = audio_samples_per_frame,
|
526
|
+
format = format,
|
527
|
+
pts = None,
|
528
|
+
time_base = None
|
529
|
+
)
|
530
|
+
|
531
|
+
frames = (
|
532
|
+
[
|
533
|
+
AudioFrameWrapped(
|
534
|
+
frame = silent_frame,
|
535
|
+
is_from_empty_part = True
|
536
|
+
)
|
537
|
+
] * number_of_frames
|
538
|
+
if number_of_frames > 0 else
|
539
|
+
[]
|
540
|
+
)
|
541
|
+
|
542
|
+
# The remaining partial silent frames we need
|
543
|
+
if number_of_remaining_samples > 0:
|
544
|
+
silent_frame = audio_frame_generator.silent(
|
545
|
+
sample_rate = audio_fps,
|
546
|
+
# TODO: Check where do we get this value from
|
547
|
+
layout = layout,
|
548
|
+
number_of_samples = number_of_remaining_samples,
|
549
|
+
# TODO: Check where do we get this value from
|
550
|
+
format = format,
|
551
|
+
pts = None,
|
552
|
+
time_base = None
|
553
|
+
)
|
554
|
+
|
555
|
+
frames.append(
|
556
|
+
AudioFrameWrapped(
|
557
|
+
frame = silent_frame,
|
558
|
+
is_from_empty_part = True
|
559
|
+
)
|
560
|
+
)
|
561
|
+
|
562
|
+
return frames
|
{yta_video_opengl-0.0.17 → yta_video_opengl-0.0.19}/src/yta_video_opengl/reader/cache/video.py
RENAMED
@@ -51,7 +51,10 @@ class VideoFrameCache(FrameCache):
|
|
51
51
|
Get the video frame that is in the 't'
|
52
52
|
time moment provided.
|
53
53
|
"""
|
54
|
+
print(f'Getting frame from {str(float(t))}')
|
55
|
+
print(f'FPS: {str(self.fps)}')
|
54
56
|
t: T = T.from_fps(t, self.fps)
|
57
|
+
print(f'So the actual frame is from {str(float(t.truncated))} to {str(float(t.next(1).truncated))}')
|
55
58
|
for frame in self.get_frames(t.truncated, t.next(1).truncated):
|
56
59
|
return frame
|
57
60
|
|
@@ -601,10 +601,25 @@ def video_modified_stored():
|
|
601
601
|
# must be played at the same time
|
602
602
|
video = Video(VIDEO_PATH, 0.25, 0.75)
|
603
603
|
timeline = Timeline()
|
604
|
-
|
605
|
-
|
606
|
-
|
607
|
-
|
604
|
+
|
605
|
+
transitions_30fps = 'C:/Users/dania/Downloads/Y2meta.app-10 Smooth Transitions Green Screen Template For Kinemaster, Alight Motion, Filmora, premiere pro-(1080p).mp4'
|
606
|
+
simpsons_60fps = 'C:/Users/dania/Downloads/Y_una_porra_los_simpsons_castellano_60fps.mp4'
|
607
|
+
|
608
|
+
# Track 1
|
609
|
+
timeline.add_video(Video(VIDEO_PATH, 0.25, 1.0), 0.75, track_index = 0)
|
610
|
+
timeline.add_video(Video(simpsons_60fps, 1.5, 2.0), 3.0, track_index = 0)
|
611
|
+
timeline.add_video(Video(VIDEO_PATH, 0.5, 1.0), 2.0, track_index = 0)
|
612
|
+
|
613
|
+
timeline.tracks[0].mute()
|
614
|
+
|
615
|
+
# Track 2
|
616
|
+
timeline.add_video(Video(VIDEO_PATH, 0.5, 1.0), 2.7, track_index = 1)
|
617
|
+
timeline.add_video(Video(simpsons_60fps, 5.8, 7.8), 0.6, track_index = 1)
|
618
|
+
# 30fps
|
619
|
+
# timeline.add_video(Video('C:/Users/dania/Downloads/Y2meta.app-TOP 12 SIMPLE LIQUID TRANSITION _ GREEN SCREEN TRANSITION PACK-(1080p60).mp4', 0.25, 1.5), 0.25, do_use_second_track = True)
|
620
|
+
# 29.97fps
|
621
|
+
# timeline.add_video(Video('C:/Users/dania/Downloads/Y_una_porra_los_simpsons_castellano.mp4', 5.8, 6.8), 3.6, do_use_second_track = True)
|
622
|
+
|
608
623
|
timeline.render(OUTPUT_PATH)
|
609
624
|
|
610
625
|
return
|
@@ -167,8 +167,9 @@ class Video:
|
|
167
167
|
actually the 1.5s of the video because of
|
168
168
|
the subclipped time range.
|
169
169
|
"""
|
170
|
-
t
|
170
|
+
t += self.start
|
171
171
|
|
172
|
+
print(f'Video real t is {str(float(t))}')
|
172
173
|
if t >= self.end:
|
173
174
|
raise Exception(f'The "t" ({str(t)}) provided is out of range. This video lasts from [{str(self.start)}, {str(self.end)}).')
|
174
175
|
|
@@ -182,7 +183,9 @@ class Video:
|
|
182
183
|
Get the video frame with the given 't' time
|
183
184
|
moment, using the video cache system.
|
184
185
|
"""
|
185
|
-
|
186
|
+
print(f'Getting frame from {str(float(t))} that is actually {str(float(self._get_real_t(t)))}')
|
187
|
+
return self.reader.get_frame(self._get_real_t(t))
|
188
|
+
#return self.reader.video_cache.get_frame(self._get_real_t(t))
|
186
189
|
|
187
190
|
def get_audio_frame_from_t(
|
188
191
|
self,
|
@@ -214,6 +217,7 @@ class Video:
|
|
214
217
|
(remember that a video frame is associated
|
215
218
|
with more than 1 audio frame).
|
216
219
|
"""
|
220
|
+
print(f'Getting audio frames from {str(float(t))} that is actually {str(float(self._get_real_t(t)))}')
|
217
221
|
for frame in self.reader.get_audio_frames_from_t(self._get_real_t(t)):
|
218
222
|
yield frame
|
219
223
|
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
{yta_video_opengl-0.0.17 → yta_video_opengl-0.0.19}/src/yta_video_opengl/complete/__init__.py
RENAMED
File without changes
|
File without changes
|
File without changes
|
{yta_video_opengl-0.0.17 → yta_video_opengl-0.0.19}/src/yta_video_opengl/nodes/audio/__init__.py
RENAMED
File without changes
|
{yta_video_opengl-0.0.17 → yta_video_opengl-0.0.19}/src/yta_video_opengl/nodes/video/__init__.py
RENAMED
File without changes
|
{yta_video_opengl-0.0.17 → yta_video_opengl-0.0.19}/src/yta_video_opengl/nodes/video/opengl.py
RENAMED
File without changes
|
File without changes
|
{yta_video_opengl-0.0.17 → yta_video_opengl-0.0.19}/src/yta_video_opengl/reader/cache/__init__.py
RENAMED
File without changes
|
{yta_video_opengl-0.0.17 → yta_video_opengl-0.0.19}/src/yta_video_opengl/reader/cache/audio.py
RENAMED
File without changes
|
{yta_video_opengl-0.0.17 → yta_video_opengl-0.0.19}/src/yta_video_opengl/reader/cache/utils.py
RENAMED
File without changes
|
File without changes
|
File without changes
|
File without changes
|