yta-video-opengl 0.0.20__tar.gz → 0.0.21__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {yta_video_opengl-0.0.20 → yta_video_opengl-0.0.21}/PKG-INFO +1 -1
- {yta_video_opengl-0.0.20 → yta_video_opengl-0.0.21}/pyproject.toml +1 -1
- {yta_video_opengl-0.0.20 → yta_video_opengl-0.0.21}/src/yta_video_opengl/audio.py +5 -0
- {yta_video_opengl-0.0.20 → yta_video_opengl-0.0.21}/src/yta_video_opengl/complete/frame_combinator.py +1 -90
- {yta_video_opengl-0.0.20 → yta_video_opengl-0.0.21}/src/yta_video_opengl/complete/frame_generator.py +40 -0
- {yta_video_opengl-0.0.20 → yta_video_opengl-0.0.21}/src/yta_video_opengl/complete/frame_wrapper.py +13 -0
- {yta_video_opengl-0.0.20 → yta_video_opengl-0.0.21}/src/yta_video_opengl/complete/timeline.py +199 -118
- {yta_video_opengl-0.0.20 → yta_video_opengl-0.0.21}/src/yta_video_opengl/complete/track/__init__.py +14 -7
- {yta_video_opengl-0.0.20 → yta_video_opengl-0.0.21}/src/yta_video_opengl/complete/track/parts.py +99 -62
- {yta_video_opengl-0.0.20 → yta_video_opengl-0.0.21}/src/yta_video_opengl/reader/cache/__init__.py +9 -5
- {yta_video_opengl-0.0.20 → yta_video_opengl-0.0.21}/src/yta_video_opengl/tests.py +29 -1
- {yta_video_opengl-0.0.20 → yta_video_opengl-0.0.21}/LICENSE +0 -0
- {yta_video_opengl-0.0.20 → yta_video_opengl-0.0.21}/README.md +0 -0
- {yta_video_opengl-0.0.20 → yta_video_opengl-0.0.21}/src/yta_video_opengl/__init__.py +0 -0
- {yta_video_opengl-0.0.20 → yta_video_opengl-0.0.21}/src/yta_video_opengl/classes.py +0 -0
- {yta_video_opengl-0.0.20 → yta_video_opengl-0.0.21}/src/yta_video_opengl/complete/__init__.py +0 -0
- {yta_video_opengl-0.0.20 → yta_video_opengl-0.0.21}/src/yta_video_opengl/complete/track/media/__init__.py +0 -0
- {yta_video_opengl-0.0.20 → yta_video_opengl-0.0.21}/src/yta_video_opengl/complete/track/utils.py +0 -0
- {yta_video_opengl-0.0.20 → yta_video_opengl-0.0.21}/src/yta_video_opengl/nodes/__init__.py +0 -0
- {yta_video_opengl-0.0.20 → yta_video_opengl-0.0.21}/src/yta_video_opengl/nodes/audio/__init__.py +0 -0
- {yta_video_opengl-0.0.20 → yta_video_opengl-0.0.21}/src/yta_video_opengl/nodes/video/__init__.py +0 -0
- {yta_video_opengl-0.0.20 → yta_video_opengl-0.0.21}/src/yta_video_opengl/nodes/video/opengl.py +0 -0
- {yta_video_opengl-0.0.20 → yta_video_opengl-0.0.21}/src/yta_video_opengl/reader/__init__.py +0 -0
- {yta_video_opengl-0.0.20 → yta_video_opengl-0.0.21}/src/yta_video_opengl/reader/cache/audio.py +0 -0
- {yta_video_opengl-0.0.20 → yta_video_opengl-0.0.21}/src/yta_video_opengl/reader/cache/utils.py +0 -0
- {yta_video_opengl-0.0.20 → yta_video_opengl-0.0.21}/src/yta_video_opengl/reader/cache/video.py +0 -0
- {yta_video_opengl-0.0.20 → yta_video_opengl-0.0.21}/src/yta_video_opengl/t.py +0 -0
- {yta_video_opengl-0.0.20 → yta_video_opengl-0.0.21}/src/yta_video_opengl/utils.py +0 -0
- {yta_video_opengl-0.0.20 → yta_video_opengl-0.0.21}/src/yta_video_opengl/video.py +0 -0
- {yta_video_opengl-0.0.20 → yta_video_opengl-0.0.21}/src/yta_video_opengl/writer.py +0 -0
@@ -1,3 +1,8 @@
|
|
1
|
+
"""
|
2
|
+
TODO: This class has not been refactored nor
|
3
|
+
tested. I need to put some love on it to make
|
4
|
+
it work and test that it is working properly.
|
5
|
+
"""
|
1
6
|
from yta_video_opengl.reader import VideoReader
|
2
7
|
from yta_video_opengl.writer import VideoWriter
|
3
8
|
from yta_video_opengl.t import T
|
@@ -201,93 +201,4 @@ class AudioFrameCombinator:
|
|
201
201
|
)
|
202
202
|
out.sample_rate = sample_rate
|
203
203
|
|
204
|
-
return out
|
205
|
-
|
206
|
-
# TODO: This method below has been refactored
|
207
|
-
# to the 'sum_tracks_frames', so delete it
|
208
|
-
# when the one above is working well
|
209
|
-
def mix_audio_frames_by_index(
|
210
|
-
tracks_frames,
|
211
|
-
sample_rate: int,
|
212
|
-
layout = 'stereo',
|
213
|
-
):
|
214
|
-
"""
|
215
|
-
Combine all the columns of the given
|
216
|
-
matrix of audio frames 'tracks_frames'.
|
217
|
-
The rows are the different tracks and
|
218
|
-
the columns are the frame at that 't'
|
219
|
-
moment of each of those tracks.
|
220
|
-
|
221
|
-
The 'tracks_frames' matrix needs to be
|
222
|
-
pre-processed to have only 1 single
|
223
|
-
frame to combine, so we concatenate
|
224
|
-
all the frames if more than 1 per
|
225
|
-
column.
|
226
|
-
"""
|
227
|
-
# TODO: Please, improve and clean all this
|
228
|
-
# code is so sh*tty, and make utils to
|
229
|
-
# combine and those things, not here...
|
230
|
-
# Also the formats, make them dynamic and
|
231
|
-
# based on the output that is defined here
|
232
|
-
# in the Timeline class.
|
233
|
-
mixed_frames = []
|
234
|
-
|
235
|
-
# Iterate by columns (each row is a track)
|
236
|
-
for frames_at_index in zip(*tracks_frames):
|
237
|
-
arrays = []
|
238
|
-
for f in frames_at_index:
|
239
|
-
# Resample to output expected values
|
240
|
-
# TODO: This must be dynamic depending
|
241
|
-
# on the track values
|
242
|
-
resampler = AudioResampler(format = 'fltp', layout = 'stereo', rate = sample_rate)
|
243
|
-
arr = resampler.resample(f)
|
244
|
-
|
245
|
-
arr = f.to_ndarray()
|
246
|
-
|
247
|
-
# TODO: This below must change depending
|
248
|
-
# on the expected output, for us and now
|
249
|
-
# it is float32, fltp, stereo, 44_100
|
250
|
-
# Same format
|
251
|
-
if arr.dtype == np.int16:
|
252
|
-
arr = arr.astype(np.float32) / 32768.0
|
253
|
-
|
254
|
-
# Same layout (number of channels)
|
255
|
-
if arr.shape[0] == 1:
|
256
|
-
return np.repeat(arr, 2, axis = 0)
|
257
|
-
# elif arr.dtype == np.float32:
|
258
|
-
# # Ya está en [-1,1], no lo toques
|
259
|
-
# pass
|
260
|
-
|
261
|
-
arrays.append(arr)
|
262
|
-
|
263
|
-
# Alinear longitudes
|
264
|
-
max_len = max(a.shape[1] for a in arrays)
|
265
|
-
stacked = []
|
266
|
-
for a in arrays:
|
267
|
-
buf = np.zeros((a.shape[0], max_len), dtype = np.float32)
|
268
|
-
buf[:, :a.shape[1]] = a
|
269
|
-
stacked.append(buf)
|
270
|
-
|
271
|
-
# Mezcla
|
272
|
-
mix = np.sum(stacked, axis = 0) / len(stacked)
|
273
|
-
#mix = np.sum(stacked, axis = 0)
|
274
|
-
|
275
|
-
# Limitar al rango [-1,1]
|
276
|
-
mix = np.clip(mix, -1.0, 1.0)
|
277
|
-
|
278
|
-
# Crear frame de salida
|
279
|
-
# TODO: What about the 'format' if they
|
280
|
-
# are all different (?)
|
281
|
-
out = AudioFrame.from_ndarray(mix, format = 'fltp', layout = layout)
|
282
|
-
out.sample_rate = sample_rate
|
283
|
-
# TODO: This will be written later when
|
284
|
-
# encoding
|
285
|
-
# out.pts = frames_at_index[0].pts
|
286
|
-
# out.time_base = frames_at_index[0].time_base
|
287
|
-
|
288
|
-
print(mix.min(), mix.max())
|
289
|
-
|
290
|
-
mixed_frames.append(out)
|
291
|
-
|
292
|
-
return mixed_frames
|
293
|
-
|
204
|
+
return out
|
{yta_video_opengl-0.0.20 → yta_video_opengl-0.0.21}/src/yta_video_opengl/complete/frame_generator.py
RENAMED
@@ -28,6 +28,9 @@ class _FrameGenerator:
|
|
28
28
|
Class to generate frames as numpy arrays.
|
29
29
|
"""
|
30
30
|
|
31
|
+
# TODO: I have some library doing this with
|
32
|
+
# colors and numpy frames, so please refactor
|
33
|
+
|
31
34
|
def full_black(
|
32
35
|
self,
|
33
36
|
size: tuple[int, int] = (1920, 1080),
|
@@ -62,6 +65,24 @@ class _FrameGenerator:
|
|
62
65
|
dtype = dtype
|
63
66
|
)
|
64
67
|
|
68
|
+
def full_red(
|
69
|
+
self,
|
70
|
+
size: tuple[int, int] = (1920, 1080),
|
71
|
+
dtype: np.dtype = np.uint8
|
72
|
+
):
|
73
|
+
"""
|
74
|
+
Get a numpy array that represents a full
|
75
|
+
red frame of the given 'size' and with
|
76
|
+
the given 'dtype'.
|
77
|
+
"""
|
78
|
+
# TODO: I think 'ones' only work if dtype
|
79
|
+
# is int
|
80
|
+
return np.full(
|
81
|
+
shape = (size[1], size[0], 3),
|
82
|
+
fill_value = (255, 0, 0),
|
83
|
+
dtype = dtype
|
84
|
+
)
|
85
|
+
|
65
86
|
class _BackgroundFrameGenerator:
|
66
87
|
"""
|
67
88
|
Internal class to simplify the way we
|
@@ -115,6 +136,25 @@ class _BackgroundFrameGenerator:
|
|
115
136
|
time_base = time_base
|
116
137
|
)
|
117
138
|
|
139
|
+
def full_red(
|
140
|
+
self,
|
141
|
+
size: tuple[int, int] = (1920, 1080),
|
142
|
+
dtype: np.dtype = np.uint8,
|
143
|
+
format: str = 'rgb24',
|
144
|
+
pts: Union[int, None] = None,
|
145
|
+
time_base: Union['Fraction', None] = None
|
146
|
+
) -> VideoFrame:
|
147
|
+
"""
|
148
|
+
Get a video frame that is completely red
|
149
|
+
and of the given 'size'.
|
150
|
+
"""
|
151
|
+
return numpy_to_video_frame(
|
152
|
+
frame = self._frame_generator.full_red(size, dtype),
|
153
|
+
format = format,
|
154
|
+
pts = pts,
|
155
|
+
time_base = time_base
|
156
|
+
)
|
157
|
+
|
118
158
|
class VideoFrameGenerator:
|
119
159
|
"""
|
120
160
|
Class to wrap the functionality related to
|
{yta_video_opengl-0.0.20 → yta_video_opengl-0.0.21}/src/yta_video_opengl/complete/frame_wrapper.py
RENAMED
@@ -5,6 +5,12 @@ from typing import Union
|
|
5
5
|
|
6
6
|
|
7
7
|
IS_FROM_EMPTY_PART_METADATA = 'is_from_empty_part'
|
8
|
+
"""
|
9
|
+
Metadata key to indicate if the frame
|
10
|
+
has been generated by an empty part
|
11
|
+
and should be ignored when trying to
|
12
|
+
combine with others.
|
13
|
+
"""
|
8
14
|
|
9
15
|
class _FrameWrappedBase:
|
10
16
|
"""
|
@@ -35,7 +41,14 @@ class _FrameWrappedBase:
|
|
35
41
|
ParameterValidator.validate_mandatory_dict('metadata', metadata)
|
36
42
|
|
37
43
|
self._frame: Union[VideoFrame, AudioFrame] = frame
|
44
|
+
"""
|
45
|
+
The VideoFrame or AudioFrame pyav instance.
|
46
|
+
"""
|
38
47
|
self.metadata: dict = metadata or {}
|
48
|
+
"""
|
49
|
+
The metadata we want to include with the
|
50
|
+
frame.
|
51
|
+
"""
|
39
52
|
|
40
53
|
def __getattr__(
|
41
54
|
self,
|
{yta_video_opengl-0.0.20 → yta_video_opengl-0.0.21}/src/yta_video_opengl/complete/timeline.py
RENAMED
@@ -10,12 +10,14 @@ an important property or will make ffmpeg
|
|
10
10
|
become crazy and deny packets (that means no
|
11
11
|
video written).
|
12
12
|
"""
|
13
|
-
from yta_video_opengl.complete.track import VideoTrack
|
13
|
+
from yta_video_opengl.complete.track import VideoTrack, AudioTrack
|
14
14
|
from yta_video_opengl.video import Video
|
15
15
|
from yta_video_opengl.t import get_ts, fps_to_time_base, T
|
16
|
-
from yta_video_opengl.complete.frame_wrapper import
|
16
|
+
from yta_video_opengl.complete.frame_wrapper import AudioFrameWrapped
|
17
17
|
from yta_video_opengl.complete.frame_combinator import AudioFrameCombinator
|
18
|
+
from yta_video_opengl.writer import VideoWriter
|
18
19
|
from yta_validation.parameter import ParameterValidator
|
20
|
+
from yta_validation import PythonValidator
|
19
21
|
from av.video.frame import VideoFrame
|
20
22
|
from av.audio.frame import AudioFrame
|
21
23
|
from quicktions import Fraction
|
@@ -45,6 +47,80 @@ class Timeline:
|
|
45
47
|
track.end
|
46
48
|
for track in self.tracks
|
47
49
|
)
|
50
|
+
|
51
|
+
@property
|
52
|
+
def tracks(
|
53
|
+
self
|
54
|
+
) -> list[Union['AudioTrack', 'VideoTrack']]:
|
55
|
+
"""
|
56
|
+
All the tracks we have but ordered by
|
57
|
+
their indexes, from lower index (highest
|
58
|
+
priority) to highest index (lowest
|
59
|
+
priority).
|
60
|
+
"""
|
61
|
+
return sorted(self._tracks, key = lambda track: track.index)
|
62
|
+
|
63
|
+
@property
|
64
|
+
def video_tracks(
|
65
|
+
self
|
66
|
+
) -> list['VideoTrack']:
|
67
|
+
"""
|
68
|
+
All the video tracks we have but ordered
|
69
|
+
by their indexes, from lower index
|
70
|
+
(highest priority) to highest index
|
71
|
+
(lowest priority).
|
72
|
+
"""
|
73
|
+
return [
|
74
|
+
track
|
75
|
+
for track in self.tracks
|
76
|
+
if PythonValidator.is_instance_of(track, 'VideoTrack')
|
77
|
+
]
|
78
|
+
|
79
|
+
@property
|
80
|
+
def audio_tracks(
|
81
|
+
self
|
82
|
+
) -> list['AudioTrack']:
|
83
|
+
"""
|
84
|
+
All the audio tracks we have but ordered
|
85
|
+
by their indexes, from lower index
|
86
|
+
(highest priority) to highest index
|
87
|
+
(lowest priority).
|
88
|
+
"""
|
89
|
+
return [
|
90
|
+
track
|
91
|
+
for track in self.tracks
|
92
|
+
if PythonValidator.is_instance_of(track, 'AudioTrack')
|
93
|
+
]
|
94
|
+
|
95
|
+
@property
|
96
|
+
def number_of_tracks(
|
97
|
+
self
|
98
|
+
) -> int:
|
99
|
+
"""
|
100
|
+
The number of tracks we have in the
|
101
|
+
timeline.
|
102
|
+
"""
|
103
|
+
return len(self.tracks)
|
104
|
+
|
105
|
+
@property
|
106
|
+
def number_of_video_tracks(
|
107
|
+
self
|
108
|
+
) -> int:
|
109
|
+
"""
|
110
|
+
The number of video tracks we have in the
|
111
|
+
timeline.
|
112
|
+
"""
|
113
|
+
return len(self.video_tracks)
|
114
|
+
|
115
|
+
@property
|
116
|
+
def number_of_audio_tracks(
|
117
|
+
self
|
118
|
+
) -> int:
|
119
|
+
"""
|
120
|
+
The number of audio tracks we have in the
|
121
|
+
timeline.
|
122
|
+
"""
|
123
|
+
return len(self.audio_tracks)
|
48
124
|
|
49
125
|
def __init__(
|
50
126
|
self,
|
@@ -56,14 +132,14 @@ class Timeline:
|
|
56
132
|
audio_samples_per_frame: int = 1024,
|
57
133
|
video_codec: str = 'h264',
|
58
134
|
video_pixel_format: str = 'yuv420p',
|
59
|
-
audio_codec: str = 'aac'
|
135
|
+
audio_codec: str = 'aac',
|
136
|
+
# TODO: What about this below (?)
|
137
|
+
# audio_layout = 'stereo',
|
138
|
+
# audio_format = 'fltp'
|
60
139
|
):
|
61
|
-
# TODO: We need to be careful with the
|
62
|
-
# priority, by now its defined by its
|
63
|
-
# position in the array
|
64
140
|
# TODO: By now I'm having only video
|
65
141
|
# tracks
|
66
|
-
self.
|
142
|
+
self._tracks: list[VideoTrack] = []
|
67
143
|
"""
|
68
144
|
All the video tracks we are handling.
|
69
145
|
"""
|
@@ -98,61 +174,112 @@ class Timeline:
|
|
98
174
|
The audio codec for the audio exported.
|
99
175
|
"""
|
100
176
|
|
101
|
-
# We will have 2 tracks by now
|
102
|
-
self.
|
177
|
+
# We will have 2 video tracks by now
|
178
|
+
self.add_video_track().add_video_track()
|
103
179
|
|
104
|
-
|
105
|
-
# adding an AudioTrack
|
106
|
-
def add_track(
|
180
|
+
def _add_track(
|
107
181
|
self,
|
108
|
-
index: Union[int, None] = None
|
182
|
+
index: Union[int, None] = None,
|
183
|
+
is_audio: bool = False
|
109
184
|
) -> 'Timeline':
|
110
185
|
"""
|
111
|
-
Add a new track to the timeline
|
112
|
-
be placed in the last position (
|
113
|
-
priority).
|
186
|
+
Add a new track to the timeline that will
|
187
|
+
be placed in the last position (highest
|
188
|
+
index, lowest priority).
|
114
189
|
|
115
|
-
It will be a video track unless you
|
116
|
-
'
|
190
|
+
It will be a video track unless you send
|
191
|
+
the 'is_audio' parameter as True.
|
117
192
|
"""
|
193
|
+
number_of_tracks = (
|
194
|
+
self.number_of_audio_tracks
|
195
|
+
if is_audio else
|
196
|
+
self.number_of_video_tracks
|
197
|
+
)
|
198
|
+
|
199
|
+
tracks = (
|
200
|
+
self.audio_tracks
|
201
|
+
if is_audio else
|
202
|
+
self.video_tracks
|
203
|
+
)
|
204
|
+
|
118
205
|
index = (
|
119
206
|
index
|
120
207
|
if (
|
121
208
|
index is not None and
|
122
|
-
index <=
|
209
|
+
index <= number_of_tracks
|
123
210
|
) else
|
124
|
-
|
211
|
+
number_of_tracks
|
125
212
|
)
|
126
213
|
|
127
214
|
# We need to change the index of the
|
128
215
|
# affected tracks (the ones that are
|
129
216
|
# in that index and after it)
|
130
|
-
if index <
|
131
|
-
for track in
|
217
|
+
if index < number_of_tracks:
|
218
|
+
for track in tracks:
|
132
219
|
if track.index >= index:
|
133
220
|
track.index += 1
|
134
221
|
|
135
|
-
|
136
|
-
|
137
|
-
|
138
|
-
|
139
|
-
|
140
|
-
|
141
|
-
|
142
|
-
|
143
|
-
|
144
|
-
|
145
|
-
|
146
|
-
|
222
|
+
track = (
|
223
|
+
AudioTrack(
|
224
|
+
index = index,
|
225
|
+
fps = self.fps,
|
226
|
+
audio_fps = self.audio_fps,
|
227
|
+
audio_samples_per_frame = self.audio_samples_per_frame,
|
228
|
+
# TODO: Where do we obtain this from (?)
|
229
|
+
audio_layout = 'stereo',
|
230
|
+
audio_format = 'fltp'
|
231
|
+
)
|
232
|
+
if is_audio else
|
233
|
+
VideoTrack(
|
234
|
+
index = index,
|
235
|
+
size = self.size,
|
236
|
+
fps = self.fps,
|
237
|
+
audio_fps = self.audio_fps,
|
238
|
+
audio_samples_per_frame = self.audio_samples_per_frame,
|
239
|
+
# TODO: Where do we obtain this from (?)
|
240
|
+
audio_layout = 'stereo',
|
241
|
+
audio_format = 'fltp'
|
242
|
+
)
|
243
|
+
)
|
244
|
+
|
245
|
+
self._tracks.append(track)
|
147
246
|
|
148
247
|
return self
|
248
|
+
|
249
|
+
def add_video_track(
|
250
|
+
self,
|
251
|
+
index: Union[int, None] = None
|
252
|
+
) -> 'Timeline':
|
253
|
+
"""
|
254
|
+
Add a new video track to the timeline, that
|
255
|
+
will be placed in the last position (highest
|
256
|
+
index, lowest priority).
|
257
|
+
"""
|
258
|
+
return self._add_track(
|
259
|
+
index = index,
|
260
|
+
is_audio = False
|
261
|
+
)
|
262
|
+
|
263
|
+
def add_audio_track(
|
264
|
+
self,
|
265
|
+
index: Union[int, None] = None
|
266
|
+
) -> 'Timeline':
|
267
|
+
"""
|
268
|
+
Add a new audio track to the timeline, that
|
269
|
+
will be placed in the last position (highest
|
270
|
+
index, lowest priority).
|
271
|
+
"""
|
272
|
+
return self._add_track(
|
273
|
+
index = index,
|
274
|
+
is_audio = True
|
275
|
+
)
|
149
276
|
|
150
277
|
# TODO: Create a 'remove_track'
|
151
278
|
|
152
279
|
def add_video(
|
153
280
|
self,
|
154
281
|
video: Video,
|
155
|
-
t: Union[int, float, Fraction],
|
282
|
+
t: Union[int, float, Fraction, None] = None,
|
156
283
|
track_index: int = 0
|
157
284
|
) -> 'Timeline':
|
158
285
|
"""
|
@@ -162,17 +289,23 @@ class Timeline:
|
|
162
289
|
TODO: The 'do_use_second_track' parameter
|
163
290
|
is temporary.
|
164
291
|
"""
|
165
|
-
ParameterValidator.validate_mandatory_number_between('track_index', track_index, 0,
|
292
|
+
ParameterValidator.validate_mandatory_number_between('track_index', track_index, 0, self.number_of_tracks)
|
293
|
+
|
294
|
+
if track_index >= self.number_of_video_tracks:
|
295
|
+
raise Exception(f'The "track_index" {str(track_index)} provided does not exist in this timeline.')
|
166
296
|
|
167
|
-
|
297
|
+
# TODO: This should be, maybe, looking for
|
298
|
+
# tracks by using the index property, not
|
299
|
+
# as array index, but by now it is like
|
300
|
+
# this as it is not very robust yet
|
301
|
+
self.video_tracks[track_index].add_media(video, t)
|
168
302
|
|
169
303
|
return self
|
170
304
|
|
171
|
-
# TODO: Create a 'remove_video'
|
305
|
+
# TODO: Create a 'remove_video'
|
306
|
+
# TODO: Create a 'add_audio'
|
307
|
+
# TODO: Create a 'remove_audio'
|
172
308
|
|
173
|
-
# TODO: This method is not for the Track but
|
174
|
-
# for the timeline, as one track can only
|
175
|
-
# have consecutive elements
|
176
309
|
def get_frame_at(
|
177
310
|
self,
|
178
311
|
t: Union[int, float, Fraction]
|
@@ -183,26 +316,17 @@ class Timeline:
|
|
183
316
|
"""
|
184
317
|
frames = list(
|
185
318
|
track.get_frame_at(t)
|
186
|
-
for track in self.
|
319
|
+
for track in self.video_tracks
|
187
320
|
)
|
188
|
-
# TODO: Here I receive black frames because
|
189
|
-
# it was empty, but I don't have a way to
|
190
|
-
# detect those black empty frames because
|
191
|
-
# they are just VideoFrame instances... I
|
192
|
-
# need a way to know so I can skip them if
|
193
|
-
# other frame in other track, or to know if
|
194
|
-
# I want them as transparent or something
|
195
|
-
|
196
321
|
# TODO: Combinate frames, we force them to
|
197
322
|
# rgb24 to obtain them with the same shape,
|
198
323
|
# but maybe we have to change this because
|
199
324
|
# we also need to handle alphas
|
200
325
|
|
201
|
-
# TODO: We need to ignore the ones that are
|
202
|
-
# tagged with
|
203
|
-
# .metadata['is_from_empty_part'] = 'True'
|
204
|
-
|
205
326
|
"""
|
327
|
+
We need to ignore the frames that are tagged
|
328
|
+
as coming from an empty part, so we can have:
|
329
|
+
|
206
330
|
1. Only empty frames
|
207
331
|
-> Black background, keep one
|
208
332
|
2. Empty frames but other frames:
|
@@ -254,12 +378,16 @@ class Timeline:
|
|
254
378
|
"""
|
255
379
|
# TODO: What if the different audio streams
|
256
380
|
# have also different fps (?)
|
381
|
+
# We use both tracks because videos and
|
382
|
+
# audio tracks have both audios
|
257
383
|
for track in self.tracks:
|
258
384
|
# TODO: Make this work properly
|
259
385
|
audio_frames.append(list(track.get_audio_frames_at(t)))
|
260
|
-
|
261
|
-
|
262
|
-
|
386
|
+
|
387
|
+
# TODO: I am receiving empty array here []
|
388
|
+
# that doesn't include any frame in a specific
|
389
|
+
# track that contains a video, why (?)
|
390
|
+
print(audio_frames)
|
263
391
|
|
264
392
|
# We need only 1 single audio frame per column
|
265
393
|
collapsed_frames = [
|
@@ -271,6 +399,7 @@ class Timeline:
|
|
271
399
|
# things? They should be ok because they are
|
272
400
|
# based on our output but I'm not completely
|
273
401
|
# sure here..
|
402
|
+
print(collapsed_frames)
|
274
403
|
|
275
404
|
# We keep only the non-silent frames because
|
276
405
|
# we will sum them after and keeping them
|
@@ -299,9 +428,9 @@ class Timeline:
|
|
299
428
|
|
300
429
|
def render(
|
301
430
|
self,
|
302
|
-
|
431
|
+
output_filename: str = 'test_files/output_render.mp4',
|
303
432
|
start: Union[int, float, Fraction] = 0.0,
|
304
|
-
end: Union[int, float, Fraction, None] = None
|
433
|
+
end: Union[int, float, Fraction, None] = None,
|
305
434
|
) -> 'Timeline':
|
306
435
|
"""
|
307
436
|
Render the time range in between the given
|
@@ -311,122 +440,74 @@ class Timeline:
|
|
311
440
|
If no 'start' and 'end' provided, the whole
|
312
441
|
project will be rendered.
|
313
442
|
"""
|
314
|
-
ParameterValidator.validate_mandatory_string('
|
443
|
+
ParameterValidator.validate_mandatory_string('output_filename', output_filename, do_accept_empty = False)
|
315
444
|
ParameterValidator.validate_mandatory_positive_number('start', start, do_include_zero = True)
|
316
445
|
ParameterValidator.validate_positive_number('end', end, do_include_zero = False)
|
317
446
|
|
318
|
-
# TODO: Limitate 'end' a bit...
|
319
447
|
end = (
|
320
448
|
self.end
|
321
449
|
if end is None else
|
322
450
|
end
|
323
451
|
)
|
324
452
|
|
453
|
+
# Limit 'end' a bit...
|
454
|
+
if end >= 300:
|
455
|
+
raise Exception('More than 5 minutes not supported yet.')
|
456
|
+
|
325
457
|
if start >= end:
|
326
458
|
raise Exception('The provided "start" cannot be greater or equal to the "end" provided.')
|
327
459
|
|
328
|
-
|
460
|
+
writer = VideoWriter(output_filename)
|
329
461
|
|
330
|
-
writer = VideoWriter('test_files/output_render.mp4')
|
331
462
|
# TODO: This has to be dynamic according to the
|
332
|
-
# video we are writing
|
463
|
+
# video we are writing (?)
|
333
464
|
writer.set_video_stream(
|
334
|
-
codec_name =
|
465
|
+
codec_name = self.video_codec,
|
335
466
|
fps = self.fps,
|
336
467
|
size = self.size,
|
337
|
-
pixel_format =
|
468
|
+
pixel_format = self.video_pixel_format
|
338
469
|
)
|
339
470
|
|
340
471
|
writer.set_audio_stream(
|
341
|
-
codec_name =
|
472
|
+
codec_name = self.audio_codec,
|
342
473
|
fps = self.audio_fps
|
343
474
|
)
|
344
475
|
|
345
476
|
time_base = fps_to_time_base(self.fps)
|
346
477
|
audio_time_base = fps_to_time_base(self.audio_fps)
|
347
478
|
|
348
|
-
"""
|
349
|
-
We are trying to render this:
|
350
|
-
-----------------------------
|
351
|
-
[0 a 0.5) => Frames negros
|
352
|
-
[0.5 a 1.25) => [0.25 a 1.0) de Video1
|
353
|
-
[1.25 a 1.75) => Frames negros
|
354
|
-
[1.75 a 2.25) => [0.25 a 0.75) de Video1
|
355
|
-
[2.25 a 3.0) => Frames negros
|
356
|
-
[3.0 a 3.75) => [2.25 a 3.0) de Video2
|
357
|
-
"""
|
358
|
-
|
359
479
|
audio_pts = 0
|
360
480
|
for t in get_ts(start, end, self.fps):
|
361
481
|
frame = self.get_frame_at(t)
|
362
482
|
|
363
483
|
print(f'Getting t:{str(float(t))}')
|
364
|
-
#print(frame)
|
365
484
|
|
366
485
|
# We need to adjust our output elements to be
|
367
486
|
# consecutive and with the right values
|
368
487
|
# TODO: We are using int() for fps but its float...
|
369
488
|
frame.time_base = time_base
|
370
|
-
#frame.pts = int(video_frame_index / frame.time_base)
|
371
489
|
frame.pts = T(t, time_base).truncated_pts
|
372
490
|
|
373
|
-
# TODO: We need to handle the audio
|
374
491
|
writer.mux_video_frame(
|
375
492
|
frame = frame
|
376
493
|
)
|
377
494
|
|
378
|
-
#print(f' [VIDEO] Here in t:{str(t)} -> pts:{str(frame.pts)} - dts:{str(frame.dts)}')
|
379
|
-
|
380
|
-
# TODO: Uncomment all this below for the audio
|
381
|
-
num_of_audio_frames = 0
|
382
495
|
for audio_frame in self.get_audio_frames_at(t):
|
383
|
-
# TODO: The track gives us empty (black)
|
384
|
-
# frames by default but maybe we need a
|
385
|
-
# @dataclass in the middle to handle if
|
386
|
-
# we want transparent frames or not and/or
|
387
|
-
# to detect them here because, if not,
|
388
|
-
# they are just simple VideoFrames and we
|
389
|
-
# don't know they are 'empty' frames
|
390
|
-
|
391
496
|
# We need to adjust our output elements to be
|
392
497
|
# consecutive and with the right values
|
393
498
|
# TODO: We are using int() for fps but its float...
|
394
499
|
audio_frame.time_base = audio_time_base
|
395
|
-
#audio_frame.pts = int(audio_frame_index / audio_frame.time_base)
|
396
500
|
audio_frame.pts = audio_pts
|
501
|
+
|
397
502
|
# We increment for the next iteration
|
398
503
|
audio_pts += audio_frame.samples
|
399
|
-
#audio_frame.pts = int(t + (audio_frame_index * audio_frame.time_base) / audio_frame.time_base)
|
400
|
-
|
401
|
-
#print(f'[AUDIO] Here in t:{str(t)} -> pts:{str(audio_frame.pts)} - dts:{str(audio_frame.dts)}')
|
402
504
|
|
403
|
-
#num_of_audio_frames += 1
|
404
|
-
#print(audio_frame)
|
405
505
|
writer.mux_audio_frame(audio_frame)
|
406
|
-
#print(f'Num of audio frames: {str(num_of_audio_frames)}')
|
407
506
|
|
408
507
|
writer.mux_video_frame(None)
|
409
508
|
writer.mux_audio_frame(None)
|
410
509
|
writer.output.close()
|
411
510
|
|
412
|
-
def _is_empty_part_frame(
|
413
|
-
frame: Union['VideoFrameWrapped', 'AudioFrameWrapped']
|
414
|
-
) -> bool:
|
415
|
-
"""
|
416
|
-
Flag to indicate if the frame comes from
|
417
|
-
an empty part or not.
|
418
|
-
|
419
|
-
TODO: The 'metadata' is included in our
|
420
|
-
wrapper class, not in VideoFrame or
|
421
|
-
AudioFrame classes. I should be sending
|
422
|
-
the wrapper in all the code, but by now
|
423
|
-
I'm doing it just in specific cases.
|
424
|
-
"""
|
425
|
-
return (
|
426
|
-
hasattr(frame, 'metadata') and
|
427
|
-
frame.is_from_empty_part
|
428
|
-
)
|
429
|
-
|
430
511
|
# TODO: Refactor and move please
|
431
512
|
# TODO: This has to work for AudioFrame
|
432
513
|
# also, but I need it working for Wrapped
|
{yta_video_opengl-0.0.20 → yta_video_opengl-0.0.21}/src/yta_video_opengl/complete/track/__init__.py
RENAMED
@@ -43,15 +43,17 @@ class _Track(ABC):
|
|
43
43
|
"""
|
44
44
|
The end of the last audio of this track,
|
45
45
|
which is also the end of the track. This
|
46
|
-
is the last time moment that has to
|
47
|
-
rendered
|
46
|
+
is the last time moment that has not to
|
47
|
+
be rendered because its the end and it
|
48
|
+
is not included -> [start, end).
|
48
49
|
"""
|
49
50
|
return Fraction(
|
50
|
-
0.0
|
51
|
-
if len(self.medias) == 0 else
|
52
51
|
max(
|
53
|
-
|
54
|
-
|
52
|
+
(
|
53
|
+
media.end
|
54
|
+
for media in self.medias
|
55
|
+
),
|
56
|
+
default = 0.0
|
55
57
|
)
|
56
58
|
)
|
57
59
|
|
@@ -185,7 +187,7 @@ class _Track(ABC):
|
|
185
187
|
t = t.truncated
|
186
188
|
else:
|
187
189
|
t = self.end
|
188
|
-
|
190
|
+
|
189
191
|
self._medias.append(self._make_media_on_track(
|
190
192
|
media = media,
|
191
193
|
start = t
|
@@ -344,10 +346,15 @@ class _TrackWithAudio(_Track):
|
|
344
346
|
format = self.audio_format
|
345
347
|
)
|
346
348
|
if self.is_muted else
|
349
|
+
# TODO: What if we find None here (?)
|
347
350
|
self._get_part_at_t(t).get_audio_frames_at(t)
|
348
351
|
)
|
349
352
|
|
350
353
|
for frame in frames:
|
354
|
+
print(' HELLO ')
|
355
|
+
print(frame)
|
356
|
+
if frame == []:
|
357
|
+
print(f' [ERROR] Audio frame t:{str(float(t))} not obtained.')
|
351
358
|
yield frame
|
352
359
|
|
353
360
|
class _TrackWithVideo(_Track):
|
{yta_video_opengl-0.0.20 → yta_video_opengl-0.0.21}/src/yta_video_opengl/complete/track/parts.py
RENAMED
@@ -68,20 +68,19 @@ class _Part(ABC):
|
|
68
68
|
to fulfill.
|
69
69
|
"""
|
70
70
|
|
71
|
-
class
|
71
|
+
class _PartWithAudio(_Part):
|
72
72
|
"""
|
73
|
-
|
74
|
-
track, that can be an empty space or an audio.
|
73
|
+
TODO: Explain
|
75
74
|
"""
|
76
75
|
|
77
76
|
def __init__(
|
78
77
|
self,
|
79
|
-
track: 'AudioTrack',
|
78
|
+
track: Union['AudioTrack', 'VideoTrack'],
|
80
79
|
start: Union[int, float, Fraction],
|
81
80
|
end: Union[int, float, Fraction],
|
82
|
-
media: Union[AudioOnTrack, None] = None
|
81
|
+
media: Union[AudioOnTrack, VideoOnTrack, None] = None
|
83
82
|
):
|
84
|
-
ParameterValidator.validate_instance_of('media', media, AudioOnTrack)
|
83
|
+
ParameterValidator.validate_instance_of('media', media, [AudioOnTrack, VideoOnTrack])
|
85
84
|
|
86
85
|
super().__init__(
|
87
86
|
track = track,
|
@@ -105,13 +104,25 @@ class _AudioPart(_Part):
|
|
105
104
|
Iterate over all the audio frames that
|
106
105
|
exist at the time moment 't' provided.
|
107
106
|
"""
|
107
|
+
frames = []
|
108
108
|
if not self.is_empty_part:
|
109
|
-
|
110
|
-
|
111
|
-
|
112
|
-
|
113
|
-
)
|
114
|
-
|
109
|
+
# TODO: What do we do in this case (?)
|
110
|
+
frames = list(self.media.get_audio_frames_at(t))
|
111
|
+
|
112
|
+
if len(frames) == 0:
|
113
|
+
print(f' [ERROR] Audio frame {str(float(t))} was not obtained')
|
114
|
+
else:
|
115
|
+
frames = [
|
116
|
+
AudioFrameWrapped(
|
117
|
+
frame = frame,
|
118
|
+
is_from_empty_part = False
|
119
|
+
)
|
120
|
+
for frame in frames
|
121
|
+
]
|
122
|
+
|
123
|
+
# This could be because is empty part or
|
124
|
+
# because we couldn't obtain the frames
|
125
|
+
if len(frames) == 0:
|
115
126
|
frames = generate_silent_frames(
|
116
127
|
fps = self._track.fps,
|
117
128
|
audio_fps = self._track.audio_fps,
|
@@ -121,13 +132,12 @@ class _AudioPart(_Part):
|
|
121
132
|
format = self._track.audio_format
|
122
133
|
)
|
123
134
|
|
124
|
-
|
125
|
-
|
135
|
+
for frame in frames:
|
136
|
+
yield frame
|
126
137
|
|
127
|
-
class
|
138
|
+
class _PartWithVideo(_Part):
|
128
139
|
"""
|
129
|
-
|
130
|
-
track, that can be an empty space or a video.
|
140
|
+
TODO: Explain
|
131
141
|
"""
|
132
142
|
|
133
143
|
def __init__(
|
@@ -152,12 +162,6 @@ class _VideoPart(_Part):
|
|
152
162
|
Useful internal tool to generate background
|
153
163
|
frames for the empty parts.
|
154
164
|
"""
|
155
|
-
# TODO: Can I refactor this below (?)
|
156
|
-
self._audio_frame_generator: AudioFrameGenerator = AudioFrameGenerator()
|
157
|
-
"""
|
158
|
-
Useful internal tool to generate silent
|
159
|
-
audio frames for the empty parts.
|
160
|
-
"""
|
161
165
|
|
162
166
|
def get_frame_at(
|
163
167
|
self,
|
@@ -167,25 +171,42 @@ class _VideoPart(_Part):
|
|
167
171
|
Get the frame that must be displayed at
|
168
172
|
the given 't' time moment.
|
169
173
|
"""
|
170
|
-
|
171
|
-
|
172
|
-
|
173
|
-
|
174
|
-
|
175
|
-
|
176
|
-
|
177
|
-
|
178
|
-
|
174
|
+
is_from_empty_part = False
|
175
|
+
if self.is_empty_part:
|
176
|
+
frame = self._video_frame_generator.background.full_black(
|
177
|
+
size = self._track.size,
|
178
|
+
time_base = fps_to_time_base(self._track.fps)
|
179
|
+
)
|
180
|
+
|
181
|
+
is_from_empty_part = True
|
182
|
+
else:
|
183
|
+
# TODO: This can be None, why? I don't know...
|
184
|
+
frame = self.media.get_frame_at(t)
|
185
|
+
|
186
|
+
if frame is None:
|
187
|
+
print(f' [ERROR] Frame {str(float(t))} was not obtained')
|
188
|
+
|
189
|
+
frame = (
|
190
|
+
# I'm using a red full frame to be able to detect
|
191
|
+
# fast the frames that were not available, but
|
192
|
+
# I need to find the error and find a real solution
|
193
|
+
self._video_frame_generator.background.full_red(
|
179
194
|
size = self._track.size,
|
180
195
|
time_base = fps_to_time_base(self._track.fps)
|
181
|
-
)
|
182
|
-
|
183
|
-
|
184
|
-
if self.is_empty_part else
|
185
|
-
VideoFrameWrapped(
|
186
|
-
frame = self.media.get_frame_at(t),
|
187
|
-
is_from_empty_part = False
|
196
|
+
)
|
197
|
+
if frame is None else
|
198
|
+
frame
|
188
199
|
)
|
200
|
+
|
201
|
+
# TODO: What about the 'format' (?)
|
202
|
+
# TODO: Maybe I shouldn't set the 'time_base'
|
203
|
+
# here and do it just in the Timeline 'render'
|
204
|
+
#return get_black_background_video_frame(self._track.size)
|
205
|
+
# TODO: This 'time_base' maybe has to be related
|
206
|
+
# to a Timeline general 'time_base' and not the fps
|
207
|
+
frame = VideoFrameWrapped(
|
208
|
+
frame = frame,
|
209
|
+
is_from_empty_part = is_from_empty_part
|
189
210
|
)
|
190
211
|
|
191
212
|
# TODO: This should not happen because of
|
@@ -201,30 +222,46 @@ class _VideoPart(_Part):
|
|
201
222
|
|
202
223
|
return frame
|
203
224
|
|
204
|
-
|
225
|
+
class _AudioPart(_PartWithAudio):
|
226
|
+
"""
|
227
|
+
Class to represent an element that is on the
|
228
|
+
track, that can be an empty space or an audio.
|
229
|
+
"""
|
230
|
+
|
231
|
+
def __init__(
|
205
232
|
self,
|
206
|
-
|
233
|
+
track: 'AudioTrack',
|
234
|
+
start: Union[int, float, Fraction],
|
235
|
+
end: Union[int, float, Fraction],
|
236
|
+
media: Union[AudioOnTrack, None] = None
|
207
237
|
):
|
208
|
-
|
209
|
-
|
210
|
-
|
211
|
-
|
212
|
-
|
213
|
-
|
214
|
-
|
215
|
-
|
216
|
-
|
217
|
-
|
218
|
-
|
219
|
-
|
220
|
-
|
221
|
-
|
222
|
-
audio_samples_per_frame = self._track.audio_samples_per_frame,
|
223
|
-
# TODO: Where do this 2 formats come from (?)
|
224
|
-
layout = self._track.audio_layout,
|
225
|
-
format = self._track.audio_format
|
226
|
-
)
|
238
|
+
ParameterValidator.validate_instance_of('media', media, AudioOnTrack)
|
239
|
+
|
240
|
+
super().__init__(
|
241
|
+
track = track,
|
242
|
+
start = start,
|
243
|
+
end = end,
|
244
|
+
media = media
|
245
|
+
)
|
246
|
+
|
247
|
+
class _VideoPart(_PartWithAudio, _PartWithVideo):
|
248
|
+
"""
|
249
|
+
Class to represent an element that is on the
|
250
|
+
track, that can be an empty space or a video.
|
251
|
+
"""
|
227
252
|
|
228
|
-
|
229
|
-
|
253
|
+
def __init__(
|
254
|
+
self,
|
255
|
+
track: 'VideoTrack',
|
256
|
+
start: Union[int, float, Fraction],
|
257
|
+
end: Union[int, float, Fraction],
|
258
|
+
media: Union[VideoOnTrack, None] = None
|
259
|
+
):
|
260
|
+
ParameterValidator.validate_instance_of('media', media, VideoOnTrack)
|
230
261
|
|
262
|
+
super().__init__(
|
263
|
+
track = track,
|
264
|
+
start = start,
|
265
|
+
end = end,
|
266
|
+
media = media
|
267
|
+
)
|
{yta_video_opengl-0.0.20 → yta_video_opengl-0.0.21}/src/yta_video_opengl/reader/cache/__init__.py
RENAMED
@@ -155,11 +155,15 @@ class FrameCache(ABC):
|
|
155
155
|
seek and start decoding frames from that
|
156
156
|
keyframe.
|
157
157
|
"""
|
158
|
-
return max(
|
159
|
-
|
160
|
-
|
161
|
-
|
162
|
-
|
158
|
+
return max(
|
159
|
+
(
|
160
|
+
key_frame_pts
|
161
|
+
for key_frame_pts in self.key_frames_pts
|
162
|
+
if key_frame_pts <= pts
|
163
|
+
),
|
164
|
+
# If no key frames, just 0
|
165
|
+
default = 0
|
166
|
+
)
|
163
167
|
|
164
168
|
def _store_frame_in_cache(
|
165
169
|
self,
|
@@ -597,6 +597,34 @@ def video_modified_stored():
|
|
597
597
|
# # timeline.add_video(Video('C:/Users/dania/Downloads/Y2meta.app-10 Smooth Transitions Green Screen Template For Kinemaster, Alight Motion, Filmora, premiere pro-(1080p).mp4', 2.25, 3.0), 3)
|
598
598
|
# timeline.render(OUTPUT_PATH)
|
599
599
|
|
600
|
+
# # Testing concatenating
|
601
|
+
# timeline = Timeline()
|
602
|
+
# # When you concat like this, some of the
|
603
|
+
# # videos have frames that cannot be accessed
|
604
|
+
# # and I don't know why...
|
605
|
+
# timeline.add_video(Video('test_files/glitch_rgb_frame.mp4'))
|
606
|
+
# timeline.add_video(Video('test_files/output.mp4'))
|
607
|
+
# timeline.add_video(Video('test_files/output_render.mp4'))
|
608
|
+
# timeline.add_video(Video('test_files/strange_tv_frame.mp4'))
|
609
|
+
# timeline.add_video(Video('test_files/test_1.mp4'))
|
610
|
+
# timeline.add_video(Video('test_files/test_1_short_2.mp4'))
|
611
|
+
# timeline.add_video(Video('test_files/test_audio_1st_track_solo_v0_0_15.mp4'))
|
612
|
+
# timeline.add_video(Video('test_files/test_audio_2nd_track_solo_v0_0_15.mp4'))
|
613
|
+
# timeline.add_video(Video('test_files/test_audio_combined_tracks_v0_0_015.mp4'))
|
614
|
+
# timeline.add_video(Video('test_files/test_audio_combined_v0_0_15.mp4'))
|
615
|
+
# timeline.add_video(Video('test_files/test_blend_add_v0_0_16.mp4'))
|
616
|
+
# timeline.add_video(Video('test_files/test_blend_difference_v0_0_16.mp4'))
|
617
|
+
# timeline.add_video(Video('test_files/test_blend_multiply_v0_0_16.mp4'))
|
618
|
+
# timeline.add_video(Video('test_files/test_blend_overlay_v0_0_16.mp4'))
|
619
|
+
# timeline.add_video(Video('test_files/test_blend_screen_v0_0_16.mp4'))
|
620
|
+
# timeline.add_video(Video('test_files/test_combine_skipping_empty_using_priority_v0_0_18.mp4'))
|
621
|
+
# timeline.add_video(Video('test_files/test_ok_v0_0_13.mp4'))
|
622
|
+
|
623
|
+
# timeline.render('test_files/concatenated.mp4')
|
624
|
+
|
625
|
+
|
626
|
+
# return
|
627
|
+
|
600
628
|
# TODO: This test will add videos that
|
601
629
|
# must be played at the same time
|
602
630
|
video = Video(VIDEO_PATH, 0.25, 0.75)
|
@@ -610,7 +638,7 @@ def video_modified_stored():
|
|
610
638
|
timeline.add_video(Video(simpsons_60fps, 1.5, 2.0), 3.0, track_index = 0)
|
611
639
|
timeline.add_video(Video(VIDEO_PATH, 0.5, 1.0), 2.0, track_index = 0)
|
612
640
|
|
613
|
-
timeline.tracks[0].mute()
|
641
|
+
#timeline.tracks[0].mute()
|
614
642
|
|
615
643
|
# Track 2
|
616
644
|
timeline.add_video(Video(VIDEO_PATH, 0.5, 1.0), 2.7, track_index = 1)
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
{yta_video_opengl-0.0.20 → yta_video_opengl-0.0.21}/src/yta_video_opengl/complete/__init__.py
RENAMED
File without changes
|
File without changes
|
{yta_video_opengl-0.0.20 → yta_video_opengl-0.0.21}/src/yta_video_opengl/complete/track/utils.py
RENAMED
File without changes
|
File without changes
|
{yta_video_opengl-0.0.20 → yta_video_opengl-0.0.21}/src/yta_video_opengl/nodes/audio/__init__.py
RENAMED
File without changes
|
{yta_video_opengl-0.0.20 → yta_video_opengl-0.0.21}/src/yta_video_opengl/nodes/video/__init__.py
RENAMED
File without changes
|
{yta_video_opengl-0.0.20 → yta_video_opengl-0.0.21}/src/yta_video_opengl/nodes/video/opengl.py
RENAMED
File without changes
|
File without changes
|
{yta_video_opengl-0.0.20 → yta_video_opengl-0.0.21}/src/yta_video_opengl/reader/cache/audio.py
RENAMED
File without changes
|
{yta_video_opengl-0.0.20 → yta_video_opengl-0.0.21}/src/yta_video_opengl/reader/cache/utils.py
RENAMED
File without changes
|
{yta_video_opengl-0.0.20 → yta_video_opengl-0.0.21}/src/yta_video_opengl/reader/cache/video.py
RENAMED
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|