yta-video-opengl 0.0.15__py3-none-any.whl → 0.0.16__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- yta_video_opengl/complete/blend.py +83 -0
- yta_video_opengl/complete/timeline.py +164 -11
- yta_video_opengl/complete/track.py +13 -2
- yta_video_opengl/tests.py +19 -6
- {yta_video_opengl-0.0.15.dist-info → yta_video_opengl-0.0.16.dist-info}/METADATA +1 -1
- {yta_video_opengl-0.0.15.dist-info → yta_video_opengl-0.0.16.dist-info}/RECORD +8 -7
- {yta_video_opengl-0.0.15.dist-info → yta_video_opengl-0.0.16.dist-info}/LICENSE +0 -0
- {yta_video_opengl-0.0.15.dist-info → yta_video_opengl-0.0.16.dist-info}/WHEEL +0 -0
@@ -0,0 +1,83 @@
|
|
1
|
+
"""
|
2
|
+
TODO: I don't like the name nor the
|
3
|
+
location of this file, but it is here
|
4
|
+
to encapsulate some functionality
|
5
|
+
related to combining video frames.
|
6
|
+
"""
|
7
|
+
import numpy as np
|
8
|
+
|
9
|
+
|
10
|
+
def blend_alpha(
|
11
|
+
bottom,
|
12
|
+
top,
|
13
|
+
alpha = 0.5
|
14
|
+
):
|
15
|
+
return (alpha * top + (1 - alpha) * bottom).astype(np.uint8)
|
16
|
+
|
17
|
+
def blend_add(
|
18
|
+
bottom,
|
19
|
+
top
|
20
|
+
):
|
21
|
+
"""
|
22
|
+
Aclara la imagen combinada, como si superpusieras dos proyectores de luz.
|
23
|
+
"""
|
24
|
+
return np.clip(bottom.astype(np.int16) + top.astype(np.int16), 0, 255).astype(np.uint8)
|
25
|
+
|
26
|
+
def blend_multiply(
|
27
|
+
bottom,
|
28
|
+
top
|
29
|
+
):
|
30
|
+
"""
|
31
|
+
Oscurece, como proyectar dos transparencias juntas.
|
32
|
+
"""
|
33
|
+
return ((bottom.astype(np.float32) * top.astype(np.float32)) / 255).astype(np.uint8)
|
34
|
+
|
35
|
+
def blend_screen(
|
36
|
+
bottom,
|
37
|
+
top
|
38
|
+
):
|
39
|
+
"""
|
40
|
+
Hace lo contrario a Multiply, aclara la imagen.
|
41
|
+
"""
|
42
|
+
return (255 - ((255 - bottom.astype(np.float32)) * (255 - top.astype(np.float32)) / 255)).astype(np.uint8)
|
43
|
+
|
44
|
+
def blend_overlay(
|
45
|
+
bottom,
|
46
|
+
top
|
47
|
+
):
|
48
|
+
"""
|
49
|
+
Mezcla entre Multiply y Screen según el brillo de cada píxel.
|
50
|
+
"""
|
51
|
+
b = bottom.astype(np.float32) / 255
|
52
|
+
t = top.astype(np.float32) / 255
|
53
|
+
mask = b < 0.5
|
54
|
+
result = np.zeros_like(b)
|
55
|
+
result[mask] = 2 * b[mask] * t[mask]
|
56
|
+
result[~mask] = 1 - 2 * (1 - b[~mask]) * (1 - t[~mask])
|
57
|
+
return (result * 255).astype(np.uint8)
|
58
|
+
|
59
|
+
def blend_difference(
|
60
|
+
bottom,
|
61
|
+
top
|
62
|
+
):
|
63
|
+
"""
|
64
|
+
Resalta las diferencias entre los dos frames.
|
65
|
+
"""
|
66
|
+
return np.abs(bottom.astype(np.int16) - top.astype(np.int16)).astype(np.uint8)
|
67
|
+
|
68
|
+
# TODO: This one needs a mask, thats why
|
69
|
+
# it is commented
|
70
|
+
# def blend_mask(
|
71
|
+
# bottom,
|
72
|
+
# top,
|
73
|
+
# mask
|
74
|
+
# ):
|
75
|
+
# """
|
76
|
+
# En lugar de un alpha fijo, puedes pasar una máscara (por ejemplo, un degradado o un canal alfa real)
|
77
|
+
|
78
|
+
# mask: array float32 entre 0 y 1, mismo tamaño que frame.
|
79
|
+
# """
|
80
|
+
# return (mask * top + (1 - mask) * bottom).astype(np.uint8)
|
81
|
+
|
82
|
+
|
83
|
+
|
@@ -14,9 +14,14 @@ from yta_video_opengl.complete.track import Track
|
|
14
14
|
from yta_video_opengl.video import Video
|
15
15
|
from yta_video_opengl.t import get_ts, fps_to_time_base, T
|
16
16
|
from yta_validation.parameter import ParameterValidator
|
17
|
+
from av.video.frame import VideoFrame
|
18
|
+
from av.audio.frame import AudioFrame
|
19
|
+
from av.audio.resampler import AudioResampler
|
17
20
|
from quicktions import Fraction
|
18
21
|
from typing import Union
|
19
22
|
|
23
|
+
import numpy as np
|
24
|
+
|
20
25
|
|
21
26
|
class Timeline:
|
22
27
|
"""
|
@@ -129,8 +134,25 @@ class Timeline:
|
|
129
134
|
# other frame in other track, or to know if
|
130
135
|
# I want them as transparent or something
|
131
136
|
|
132
|
-
# TODO:
|
133
|
-
|
137
|
+
# TODO: This is just a test function
|
138
|
+
from yta_video_opengl.complete.blend import blend_add
|
139
|
+
|
140
|
+
# TODO: Combinate frames, we force them to
|
141
|
+
# rgb24 to obtain them with the same shape,
|
142
|
+
# but maybe we have to change this because
|
143
|
+
# we also need to handle alphas
|
144
|
+
output_frame = next(frames).to_ndarray(format = 'rgb24')
|
145
|
+
for frame in frames:
|
146
|
+
# Combine them
|
147
|
+
# TODO: We need to ignore the frames that
|
148
|
+
# are just empty black frames and use them
|
149
|
+
# not in the combination process
|
150
|
+
output_frame = blend_add(output_frame, frame.to_ndarray(format = 'rgb24'))
|
151
|
+
|
152
|
+
# TODO: How to build this VideoFrame correctly
|
153
|
+
# and what about the 'format' (?)
|
154
|
+
# We don't handle pts here, just the image
|
155
|
+
return VideoFrame.from_ndarray(output_frame, format = 'rgb24')
|
134
156
|
|
135
157
|
def get_audio_frames_at(
|
136
158
|
self,
|
@@ -138,20 +160,151 @@ class Timeline:
|
|
138
160
|
):
|
139
161
|
# TODO: What if the different audio streams
|
140
162
|
# have also different fps (?)
|
141
|
-
|
163
|
+
audio_frames = []
|
142
164
|
for track in self.tracks:
|
143
165
|
# TODO: Make this work properly
|
144
|
-
audio_frames
|
166
|
+
audio_frames.append(list(track.get_audio_frames_at(t)))
|
145
167
|
|
146
168
|
# TODO: Combine them
|
147
|
-
|
148
|
-
|
149
|
-
|
169
|
+
# TODO: We need to ignore the frames that
|
170
|
+
# are just empty black frames and use them
|
171
|
+
# not in the combination process
|
172
|
+
|
173
|
+
def mix_audio_frames_by_index(
|
174
|
+
tracks_frames,
|
175
|
+
layout = 'stereo'
|
176
|
+
):
|
177
|
+
"""
|
178
|
+
Combine all the columns of the given
|
179
|
+
matrix of audio frames 'tracks_frames'.
|
180
|
+
The rows are the different tracks and
|
181
|
+
the columns are the frame at that 't'
|
182
|
+
moment of each of those tracks.
|
183
|
+
|
184
|
+
The 'tracks_frames' matrix needs to be
|
185
|
+
pre-processed to have only 1 single
|
186
|
+
frame to combine, so we concatenate
|
187
|
+
all the frames if more than 1 per
|
188
|
+
column.
|
189
|
+
"""
|
190
|
+
# TODO: Please, improve and clean all this
|
191
|
+
# code is so sh*tty, and make utils to
|
192
|
+
# combine and those things, not here...
|
193
|
+
# Also the formats, make them dynamic and
|
194
|
+
# based on the output that is defined here
|
195
|
+
# in the Timeline class.
|
196
|
+
mixed_frames = []
|
197
|
+
|
198
|
+
# Iterate by columns (each row is a track)
|
199
|
+
for frames_at_index in zip(*tracks_frames):
|
200
|
+
arrays = []
|
201
|
+
for f in frames_at_index:
|
202
|
+
# Resample to output expected values
|
203
|
+
# TODO: This must be dynamic depending
|
204
|
+
# on the track values
|
205
|
+
resampler = AudioResampler(format = 'fltp', layout = 'stereo', rate = self.audio_fps)
|
206
|
+
arr = resampler.resample(f)
|
207
|
+
|
208
|
+
arr = f.to_ndarray()
|
209
|
+
|
210
|
+
# TODO: This below must change depending
|
211
|
+
# on the expected output, for us and now
|
212
|
+
# it is float32, fltp, stereo, 44_100
|
213
|
+
# Same format
|
214
|
+
if arr.dtype == np.int16:
|
215
|
+
arr = arr.astype(np.float32) / 32768.0
|
216
|
+
|
217
|
+
# Same layout (number of channels)
|
218
|
+
if arr.shape[0] == 1:
|
219
|
+
return np.repeat(arr, 2, axis = 0)
|
220
|
+
# elif arr.dtype == np.float32:
|
221
|
+
# # Ya está en [-1,1], no lo toques
|
222
|
+
# pass
|
223
|
+
|
224
|
+
arrays.append(arr)
|
225
|
+
|
226
|
+
# Alinear longitudes
|
227
|
+
max_len = max(a.shape[1] for a in arrays)
|
228
|
+
stacked = []
|
229
|
+
for a in arrays:
|
230
|
+
buf = np.zeros((a.shape[0], max_len), dtype = np.float32)
|
231
|
+
buf[:, :a.shape[1]] = a
|
232
|
+
stacked.append(buf)
|
233
|
+
|
234
|
+
# Mezcla
|
235
|
+
mix = np.sum(stacked, axis = 0) / len(stacked)
|
236
|
+
#mix = np.sum(stacked, axis = 0)
|
237
|
+
|
238
|
+
# Limitar al rango [-1,1]
|
239
|
+
mix = np.clip(mix, -1.0, 1.0)
|
240
|
+
|
241
|
+
# Crear frame de salida
|
242
|
+
# TODO: What about the 'format' if they
|
243
|
+
# are all different (?)
|
244
|
+
out = AudioFrame.from_ndarray(mix, format = 'fltp', layout = layout)
|
245
|
+
out.sample_rate = self.audio_fps
|
246
|
+
# TODO: This will be written later when
|
247
|
+
# encoding
|
248
|
+
# out.pts = frames_at_index[0].pts
|
249
|
+
# out.time_base = frames_at_index[0].time_base
|
250
|
+
|
251
|
+
print(mix.min(), mix.max())
|
252
|
+
|
253
|
+
mixed_frames.append(out)
|
254
|
+
|
255
|
+
return mixed_frames
|
256
|
+
|
257
|
+
def combine_audio_frames(frames):
|
258
|
+
"""
|
259
|
+
Combina varios AudioFrames consecutivos en uno solo.
|
260
|
+
- Convierte a float32
|
261
|
+
- Concatena muestras a lo largo del tiempo
|
262
|
+
- Devuelve un AudioFrame nuevo
|
263
|
+
"""
|
264
|
+
if not frames:
|
265
|
+
# TODO: This should not happen
|
266
|
+
return None
|
150
267
|
|
151
|
-
|
152
|
-
|
153
|
-
|
154
|
-
|
268
|
+
if len(frames) == 1:
|
269
|
+
return frames
|
270
|
+
|
271
|
+
# Verificamos consistencia básica
|
272
|
+
sample_rate = frames[0].sample_rate
|
273
|
+
layout = frames[0].layout.name
|
274
|
+
channels = frames[0].layout.channels
|
275
|
+
|
276
|
+
arrays = []
|
277
|
+
for f in frames:
|
278
|
+
if f.sample_rate != sample_rate or f.layout.name != layout:
|
279
|
+
raise ValueError("Los frames deben tener mismo sample_rate y layout")
|
280
|
+
|
281
|
+
# arr = f.to_ndarray() # (channels, samples)
|
282
|
+
# if arr.dtype == np.int16:
|
283
|
+
# arr = arr.astype(np.float32) / 32768.0
|
284
|
+
# elif arr.dtype != np.float32:
|
285
|
+
# arr = arr.astype(np.float32)
|
286
|
+
|
287
|
+
arrays.append(f.to_ndarray())
|
288
|
+
|
289
|
+
# Concatenamos por eje de samples
|
290
|
+
combined = np.concatenate(arrays, axis = 1)
|
291
|
+
|
292
|
+
# Creamos un frame nuevo
|
293
|
+
out = AudioFrame.from_ndarray(combined, format = frames[0].format, layout = layout)
|
294
|
+
out.sample_rate = sample_rate
|
295
|
+
|
296
|
+
return [out]
|
297
|
+
|
298
|
+
# We need only 1 single audio frame per column
|
299
|
+
collapsed = []
|
300
|
+
for frames in audio_frames:
|
301
|
+
collapsed.append(combine_audio_frames(frames))
|
302
|
+
|
303
|
+
# Now, mix column by column (track by track)
|
304
|
+
frames = mix_audio_frames_by_index(collapsed)
|
305
|
+
|
306
|
+
for audio_frame in frames:
|
307
|
+
yield audio_frame
|
155
308
|
|
156
309
|
def render(
|
157
310
|
self,
|
@@ -198,6 +198,17 @@ class Track:
|
|
198
198
|
for video in self.videos
|
199
199
|
)
|
200
200
|
)
|
201
|
+
|
202
|
+
@property
|
203
|
+
def videos(
|
204
|
+
self
|
205
|
+
) -> list[VideoOnTrack]:
|
206
|
+
"""
|
207
|
+
The list of videos we have in the track
|
208
|
+
but ordered using the 'start' attribute
|
209
|
+
from first to last.
|
210
|
+
"""
|
211
|
+
return sorted(self._videos, key = lambda video: video.start)
|
201
212
|
|
202
213
|
def __init__(
|
203
214
|
self,
|
@@ -210,7 +221,7 @@ class Track:
|
|
210
221
|
# TODO: Where does it come from (?)
|
211
222
|
audio_samples_per_frame: int
|
212
223
|
):
|
213
|
-
self.
|
224
|
+
self._videos: list[VideoOnTrack] = []
|
214
225
|
"""
|
215
226
|
The list of 'VideoOnTrack' instances that
|
216
227
|
must play on this track.
|
@@ -352,7 +363,7 @@ class Track:
|
|
352
363
|
else:
|
353
364
|
t = self.end
|
354
365
|
|
355
|
-
self.
|
366
|
+
self._videos.append(VideoOnTrack(
|
356
367
|
video,
|
357
368
|
t
|
358
369
|
))
|
yta_video_opengl/tests.py
CHANGED
@@ -584,14 +584,27 @@ def video_modified_stored():
|
|
584
584
|
from yta_video_opengl.video import Video
|
585
585
|
from yta_video_opengl.complete.timeline import Timeline
|
586
586
|
|
587
|
+
# TODO: This test below is just to validate
|
588
|
+
# that it is cropping and placing correctly
|
589
|
+
# but the videos are only in one track
|
590
|
+
# video = Video(VIDEO_PATH, 0.25, 0.75)
|
591
|
+
# timeline = Timeline()
|
592
|
+
# timeline.add_video(Video(VIDEO_PATH, 0.25, 1.0), 0.5)
|
593
|
+
# # This is successfully raising an exception
|
594
|
+
# #timeline.add_video(Video(VIDEO_PATH, 0.25, 0.75), 0.6)
|
595
|
+
# timeline.add_video(Video(VIDEO_PATH, 0.25, 0.75), 1.75)
|
596
|
+
# timeline.add_video(Video('C:/Users/dania/Downloads/Y2meta.app-TOP 12 SIMPLE LIQUID TRANSITION _ GREEN SCREEN TRANSITION PACK-(1080p60).mp4', 4.0, 5.0), 3)
|
597
|
+
# # timeline.add_video(Video('C:/Users/dania/Downloads/Y2meta.app-10 Smooth Transitions Green Screen Template For Kinemaster, Alight Motion, Filmora, premiere pro-(1080p).mp4', 2.25, 3.0), 3)
|
598
|
+
# timeline.render(OUTPUT_PATH)
|
599
|
+
|
600
|
+
# TODO: This test will add videos that
|
601
|
+
# must be played at the same time
|
587
602
|
video = Video(VIDEO_PATH, 0.25, 0.75)
|
588
603
|
timeline = Timeline()
|
589
|
-
timeline.add_video(Video(VIDEO_PATH, 0.25, 1.0), 0.
|
590
|
-
|
591
|
-
|
592
|
-
timeline.add_video(Video(
|
593
|
-
timeline.add_video(Video('C:/Users/dania/Downloads/Y2meta.app-TOP 12 SIMPLE LIQUID TRANSITION _ GREEN SCREEN TRANSITION PACK-(1080p60).mp4', 4.0, 5.0), 3)
|
594
|
-
# timeline.add_video(Video('C:/Users/dania/Downloads/Y2meta.app-10 Smooth Transitions Green Screen Template For Kinemaster, Alight Motion, Filmora, premiere pro-(1080p).mp4', 2.25, 3.0), 3)
|
604
|
+
timeline.add_video(Video(VIDEO_PATH, 0.25, 1.0), 0.75)
|
605
|
+
timeline.add_video(Video(VIDEO_PATH, 0.5, 1.0), 2.0)
|
606
|
+
timeline.add_video(Video(VIDEO_PATH, 0.5, 1.0), 2.1, do_use_second_track = True)
|
607
|
+
timeline.add_video(Video('C:/Users/dania/Downloads/Y2meta.app-TOP 12 SIMPLE LIQUID TRANSITION _ GREEN SCREEN TRANSITION PACK-(1080p60).mp4', 0.25, 1.5), 0.25, do_use_second_track = True)
|
595
608
|
timeline.render(OUTPUT_PATH)
|
596
609
|
|
597
610
|
return
|
@@ -1,8 +1,9 @@
|
|
1
1
|
yta_video_opengl/__init__.py,sha256=ycAx_XYMVDfkuObSvtW6irQ0Wo-fgxEz3fjIRMe8PpY,205
|
2
2
|
yta_video_opengl/classes.py,sha256=t5-Tfc7ecvHl8JlVBp_FVzZT6ole6Ly5-FeBBH7wcxo,37742
|
3
3
|
yta_video_opengl/complete/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
4
|
-
yta_video_opengl/complete/
|
5
|
-
yta_video_opengl/complete/
|
4
|
+
yta_video_opengl/complete/blend.py,sha256=7qtMlgXM8n1Yfqqdl9SDaDWUk7ZTF4Ui1YuMqce2QlE,2001
|
5
|
+
yta_video_opengl/complete/timeline.py,sha256=JWEGgEcO7YkZSNAxnz0JdMQh0ck0AffpCHi1IPhcngc,15429
|
6
|
+
yta_video_opengl/complete/track.py,sha256=g6xPAfYl29Ruw1P6ifOZG4j8v_N8kzbFwVaVk25mx6w,13930
|
6
7
|
yta_video_opengl/complete/video_on_track.py,sha256=KROAI0bndnfcvKlHGsSEyWg9o1xozW0PI_Rhqp0r9kw,4844
|
7
8
|
yta_video_opengl/nodes/__init__.py,sha256=TZ-ZO05PZ0_ABq675E22_PngLWOe-_w5s1cLlV3NbWM,3469
|
8
9
|
yta_video_opengl/nodes/audio/__init__.py,sha256=4nKkC70k1UgLcCSPqFWm3cKdaJM0KUmQTwGWv1xFarQ,2926
|
@@ -14,11 +15,11 @@ yta_video_opengl/reader/cache/audio.py,sha256=cm_1D5f5RnmJgaidA1pnEhTPF8DE0mU2Mo
|
|
14
15
|
yta_video_opengl/reader/cache/utils.py,sha256=9aJ6qyUFRvoh2jRbIvtF_-1MOm_sgQtPiy0WXLCZYcA,1402
|
15
16
|
yta_video_opengl/reader/cache/video.py,sha256=CSVgb3Sjqzk22sQkukoakVzms-wwZpXOT61Y6tirhjg,3292
|
16
17
|
yta_video_opengl/t.py,sha256=xOhT1xBEwChlXf-Tuy-WxA_08iRJWVlnL_Hyzr-9-sk,6633
|
17
|
-
yta_video_opengl/tests.py,sha256=
|
18
|
+
yta_video_opengl/tests.py,sha256=6QvJx9y4kCiq7b9-AKMetzGuJjd__pTBK5r4tJp3aso,27321
|
18
19
|
yta_video_opengl/utils.py,sha256=yUi17EjNR4SVpvdDUwUaKl4mBCb1uyFCSGoIX3Zr2F0,15586
|
19
20
|
yta_video_opengl/video.py,sha256=JPIWDQcYlLi8eT2LOFQtS1jVu5xVmW4bz1VMtP0gMeA,8626
|
20
21
|
yta_video_opengl/writer.py,sha256=QwvjQcEkzn1WAVqVTFiI6tYIXJO67LKKUTJGO_eflFM,8893
|
21
|
-
yta_video_opengl-0.0.
|
22
|
-
yta_video_opengl-0.0.
|
23
|
-
yta_video_opengl-0.0.
|
24
|
-
yta_video_opengl-0.0.
|
22
|
+
yta_video_opengl-0.0.16.dist-info/LICENSE,sha256=6kbiFSfobTZ7beWiKnHpN902HgBx-Jzgcme0SvKqhKY,1091
|
23
|
+
yta_video_opengl-0.0.16.dist-info/METADATA,sha256=VYswuzvZl8rQv0OHZuqp5BHjquuh-NhLrRV-mNN1_Xo,714
|
24
|
+
yta_video_opengl-0.0.16.dist-info/WHEEL,sha256=XbeZDeTWKc1w7CSIyre5aMDU_-PohRwTQceYnisIYYY,88
|
25
|
+
yta_video_opengl-0.0.16.dist-info/RECORD,,
|
File without changes
|
File without changes
|