yta-video-opengl 0.0.25__tar.gz → 0.0.27__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {yta_video_opengl-0.0.25 → yta_video_opengl-0.0.27}/PKG-INFO +1 -1
- {yta_video_opengl-0.0.25 → yta_video_opengl-0.0.27}/pyproject.toml +1 -1
- {yta_video_opengl-0.0.25 → yta_video_opengl-0.0.27}/src/yta_video_opengl/effects.py +44 -0
- {yta_video_opengl-0.0.25 → yta_video_opengl-0.0.27}/src/yta_video_opengl/nodes/__init__.py +2 -2
- yta_video_opengl-0.0.27/src/yta_video_opengl/nodes/audio/__init__.py +133 -0
- {yta_video_opengl-0.0.25 → yta_video_opengl-0.0.27}/src/yta_video_opengl/nodes/video/opengl/__init__.py +3 -3
- {yta_video_opengl-0.0.25 → yta_video_opengl-0.0.27}/src/yta_video_opengl/nodes/video/opengl/experimental.py +6 -6
- yta_video_opengl-0.0.25/src/yta_video_opengl/nodes/audio/__init__.py +0 -223
- {yta_video_opengl-0.0.25 → yta_video_opengl-0.0.27}/LICENSE +0 -0
- {yta_video_opengl-0.0.25 → yta_video_opengl-0.0.27}/README.md +0 -0
- {yta_video_opengl-0.0.25 → yta_video_opengl-0.0.27}/src/yta_video_opengl/__init__.py +0 -0
- {yta_video_opengl-0.0.25 → yta_video_opengl-0.0.27}/src/yta_video_opengl/nodes/video/__init__.py +0 -0
- {yta_video_opengl-0.0.25 → yta_video_opengl-0.0.27}/src/yta_video_opengl/tests.py +0 -0
- {yta_video_opengl-0.0.25 → yta_video_opengl-0.0.27}/src/yta_video_opengl/utils.py +0 -0
@@ -1,7 +1,9 @@
|
|
1
1
|
from yta_video_opengl.nodes.video.opengl import WavingNode
|
2
2
|
from yta_video_opengl.nodes.audio import ChorusNode
|
3
3
|
from yta_video_opengl.nodes import TimedNode
|
4
|
+
from yta_video_opengl.utils import texture_to_frame
|
4
5
|
from yta_validation.parameter import ParameterValidator
|
6
|
+
from yta_validation import PythonValidator
|
5
7
|
from yta_programming.decorators import singleton_old
|
6
8
|
from typing import Union
|
7
9
|
|
@@ -379,4 +381,46 @@ class EffectsStack:
|
|
379
381
|
|
380
382
|
return self
|
381
383
|
|
384
|
+
def apply_video_effects(
|
385
|
+
self,
|
386
|
+
frame: 'np.ndarray',
|
387
|
+
t: Union[int, float, 'Fraction']
|
388
|
+
) -> 'np.ndarray':
|
389
|
+
"""
|
390
|
+
Apply all the video effects that must be
|
391
|
+
applied for the given 't' time moment to
|
392
|
+
the provided 'frame' (that must be the
|
393
|
+
video frame of that time moment).
|
394
|
+
"""
|
395
|
+
for effect in self.get_video_effects_for_t(t):
|
396
|
+
frame = effect.process(frame, t)
|
397
|
+
|
398
|
+
# TODO: Check when the frame comes as a
|
399
|
+
# Texture and when as a numpy array. I
|
400
|
+
# think when we apply an opengl node it
|
401
|
+
# is a texture, but we need to return it
|
402
|
+
# as a numpy, always
|
403
|
+
return (
|
404
|
+
texture_to_frame(frame)
|
405
|
+
if PythonValidator.is_instance_of(frame, moderngl.Texture) else
|
406
|
+
frame
|
407
|
+
)
|
408
|
+
|
409
|
+
def apply_audio_effects(
|
410
|
+
self,
|
411
|
+
frame: 'np.ndarray',
|
412
|
+
t: Union[int, float, 'Fraction']
|
413
|
+
) -> 'np.ndarray':
|
414
|
+
"""
|
415
|
+
Apply all the video effects that must be
|
416
|
+
applied for the given 't' time moment to
|
417
|
+
the provided 'frame' (that must be the
|
418
|
+
audio frame of that time moment).
|
419
|
+
"""
|
420
|
+
for effect in self.get_audio_effects_for_t(t):
|
421
|
+
frame = effect.process(frame, t)
|
422
|
+
|
423
|
+
# Frame can only by a numpy array here
|
424
|
+
return frame
|
425
|
+
|
382
426
|
# TODO: Create 'remove_effect'
|
@@ -131,11 +131,11 @@ class TimedNode:
|
|
131
131
|
|
132
132
|
def process(
|
133
133
|
self,
|
134
|
-
frame: Union[
|
134
|
+
frame: Union[moderngl.Texture, 'np.ndarray'],
|
135
135
|
t: float
|
136
136
|
# TODO: Maybe we need 'fps' and 'number_of_frames'
|
137
137
|
# to calculate progressions or similar...
|
138
|
-
) -> Union[
|
138
|
+
) -> Union[moderngl.Texture, 'np.ndarray']:
|
139
139
|
"""
|
140
140
|
Process the frame if the provided 't' time
|
141
141
|
moment is in the range of this TimedNode
|
@@ -0,0 +1,133 @@
|
|
1
|
+
"""
|
2
|
+
When working with audio frames, we don't need
|
3
|
+
to use the GPU because audios are 1D and the
|
4
|
+
information can be processed perfectly with
|
5
|
+
a library like numpy.
|
6
|
+
|
7
|
+
If we need a very intense calculation for an
|
8
|
+
audio frame (FFT, convolution, etc.) we can
|
9
|
+
use CuPy or some DPS specific libraries, but
|
10
|
+
90% is perfectly done with numpy.
|
11
|
+
|
12
|
+
If you want to modify huge amounts of audio
|
13
|
+
(some seconds at the same time), you can use
|
14
|
+
CuPy, that has the same API as numpy but
|
15
|
+
working in GPU. Doing this below most of the
|
16
|
+
changes would work:
|
17
|
+
- `import numpy as np` → `import cupy as np`
|
18
|
+
"""
|
19
|
+
from abc import abstractmethod
|
20
|
+
from typing import Union
|
21
|
+
|
22
|
+
import numpy as np
|
23
|
+
|
24
|
+
|
25
|
+
class _AudioNode:
|
26
|
+
"""
|
27
|
+
Base audio node class to implement a
|
28
|
+
change in an audio frame by using the
|
29
|
+
numpy library.
|
30
|
+
"""
|
31
|
+
|
32
|
+
@abstractmethod
|
33
|
+
def process(
|
34
|
+
self,
|
35
|
+
input: np.ndarray,
|
36
|
+
t: float
|
37
|
+
) -> np.ndarray:
|
38
|
+
"""
|
39
|
+
Process the provided audio 'input' that
|
40
|
+
is played on the given 't' time moment.
|
41
|
+
"""
|
42
|
+
pass
|
43
|
+
|
44
|
+
class VolumeNode(_AudioNode):
|
45
|
+
"""
|
46
|
+
Set the volume.
|
47
|
+
|
48
|
+
TODO: Explain properly.
|
49
|
+
"""
|
50
|
+
|
51
|
+
def __init__(
|
52
|
+
self,
|
53
|
+
factor_fn
|
54
|
+
):
|
55
|
+
"""
|
56
|
+
factor_fn: function (t, index) -> factor volumen
|
57
|
+
"""
|
58
|
+
self.factor_f: callable = factor_fn
|
59
|
+
|
60
|
+
def process(
|
61
|
+
self,
|
62
|
+
input: np.ndarray,
|
63
|
+
t: float
|
64
|
+
) -> np.ndarray:
|
65
|
+
"""
|
66
|
+
Process the provided audio 'input' that
|
67
|
+
is played on the given 't' time moment.
|
68
|
+
"""
|
69
|
+
factor = self.factor_fn(t, 0)
|
70
|
+
|
71
|
+
samples = input
|
72
|
+
samples *= factor
|
73
|
+
|
74
|
+
# Determine dtype according to format
|
75
|
+
# samples = (
|
76
|
+
# samples.astype(np.int16)
|
77
|
+
# # 'fltp', 's16', 's16p'
|
78
|
+
# if 's16' in input.format.name else
|
79
|
+
# samples.astype(np.float32)
|
80
|
+
# )
|
81
|
+
|
82
|
+
return samples
|
83
|
+
|
84
|
+
class ChorusNode(_AudioNode):
|
85
|
+
"""
|
86
|
+
Apply a chorus effect, also called flanger
|
87
|
+
effect.
|
88
|
+
|
89
|
+
TODO: Explain properly
|
90
|
+
"""
|
91
|
+
|
92
|
+
def __init__(
|
93
|
+
self,
|
94
|
+
sample_rate: int,
|
95
|
+
depth: int = 0,
|
96
|
+
frequency: float = 0.25
|
97
|
+
):
|
98
|
+
"""
|
99
|
+
The 'sample_rate' must be the sample rate
|
100
|
+
of the audio frame.
|
101
|
+
"""
|
102
|
+
self.sample_rate: int = sample_rate
|
103
|
+
self.depth: int = depth
|
104
|
+
self.frequency: float = frequency
|
105
|
+
|
106
|
+
def process(
|
107
|
+
self,
|
108
|
+
input: Union[np.ndarray],
|
109
|
+
t: float
|
110
|
+
) -> np.ndarray:
|
111
|
+
"""
|
112
|
+
Process the provided audio 'input' that
|
113
|
+
is played on the given 't' time moment.
|
114
|
+
"""
|
115
|
+
n_samples = input.shape[0]
|
116
|
+
t = np.arange(n_samples) / self.rate
|
117
|
+
|
118
|
+
# LFO sinusoidal que controla el retardo
|
119
|
+
delay = (self.depth / 1000.0) * self.rate * (0.5 * (1 + np.sin(2 * np.pi * self.frequency * t)))
|
120
|
+
delay = delay.astype(np.int32)
|
121
|
+
|
122
|
+
output = np.zeros_like(input, dtype=np.float32)
|
123
|
+
|
124
|
+
for i in range(n_samples):
|
125
|
+
d = delay[i]
|
126
|
+
|
127
|
+
output[i]= (
|
128
|
+
0.7 * input[i] + 0.7 * input[i - d]
|
129
|
+
if (i - d) >= 0 else
|
130
|
+
input[i]
|
131
|
+
)
|
132
|
+
|
133
|
+
return output
|
@@ -199,7 +199,7 @@ class OpenglNodeBase(_VideoNode):
|
|
199
199
|
|
200
200
|
def process(
|
201
201
|
self,
|
202
|
-
input: Union[moderngl.Texture,
|
202
|
+
input: Union[moderngl.Texture, np.ndarray]
|
203
203
|
) -> moderngl.Texture:
|
204
204
|
"""
|
205
205
|
Apply the shader to the 'input', that
|
@@ -209,7 +209,7 @@ class OpenglNodeBase(_VideoNode):
|
|
209
209
|
We use and return textures to maintain
|
210
210
|
the process in GPU and optimize it.
|
211
211
|
"""
|
212
|
-
if PythonValidator.is_instance_of(input,
|
212
|
+
if PythonValidator.is_instance_of(input, np.ndarray):
|
213
213
|
# TODO: What about the numpy format (?)
|
214
214
|
input = frame_to_texture(input, self.context)
|
215
215
|
|
@@ -293,7 +293,7 @@ class WavingNode(OpenglNodeBase):
|
|
293
293
|
# processed by the code
|
294
294
|
def process(
|
295
295
|
self,
|
296
|
-
input: Union[moderngl.Texture, '
|
296
|
+
input: Union[moderngl.Texture, 'np.ndarray'],
|
297
297
|
t: float = 0.0,
|
298
298
|
) -> moderngl.Texture:
|
299
299
|
"""
|
@@ -65,7 +65,7 @@ class BreathingFrame(OpenglNodeBase):
|
|
65
65
|
|
66
66
|
def process(
|
67
67
|
self,
|
68
|
-
input: Union[moderngl.Texture, '
|
68
|
+
input: Union[moderngl.Texture, 'np.ndarray'],
|
69
69
|
t: float = 0.0,
|
70
70
|
) -> moderngl.Texture:
|
71
71
|
"""
|
@@ -165,7 +165,7 @@ class HandheldFrame(OpenglNodeBase):
|
|
165
165
|
|
166
166
|
def process(
|
167
167
|
self,
|
168
|
-
input: Union[moderngl.Texture,
|
168
|
+
input: Union[moderngl.Texture, np.ndarray],
|
169
169
|
t: float = 0.0,
|
170
170
|
) -> moderngl.Texture:
|
171
171
|
"""
|
@@ -351,7 +351,7 @@ class OrbitingFrame(OpenglNodeBase):
|
|
351
351
|
|
352
352
|
def process(
|
353
353
|
self,
|
354
|
-
input: Union[moderngl.Texture,
|
354
|
+
input: Union[moderngl.Texture, np.ndarray],
|
355
355
|
t: float = 0.0,
|
356
356
|
) -> moderngl.Texture:
|
357
357
|
"""
|
@@ -446,7 +446,7 @@ class RotatingInCenterFrame(OpenglNodeBase):
|
|
446
446
|
|
447
447
|
def process(
|
448
448
|
self,
|
449
|
-
input: Union[moderngl.Texture,
|
449
|
+
input: Union[moderngl.Texture, np.ndarray],
|
450
450
|
t: float = 0.0,
|
451
451
|
) -> moderngl.Texture:
|
452
452
|
"""
|
@@ -636,7 +636,7 @@ class StrangeTvFrame(OpenglNodeBase):
|
|
636
636
|
|
637
637
|
def process(
|
638
638
|
self,
|
639
|
-
input: Union[moderngl.Texture,
|
639
|
+
input: Union[moderngl.Texture, np.ndarray],
|
640
640
|
t: float = 0.0,
|
641
641
|
) -> moderngl.Texture:
|
642
642
|
"""
|
@@ -744,7 +744,7 @@ class GlitchRgbFrame(OpenglNodeBase):
|
|
744
744
|
|
745
745
|
def process(
|
746
746
|
self,
|
747
|
-
input: Union[moderngl.Texture,
|
747
|
+
input: Union[moderngl.Texture, np.ndarray],
|
748
748
|
t: float = 0.0,
|
749
749
|
) -> moderngl.Texture:
|
750
750
|
"""
|
@@ -1,223 +0,0 @@
|
|
1
|
-
"""
|
2
|
-
When working with audio frames, we don't need
|
3
|
-
to use the GPU because audios are 1D and the
|
4
|
-
information can be processed perfectly with
|
5
|
-
a library like numpy.
|
6
|
-
|
7
|
-
If we need a very intense calculation for an
|
8
|
-
audio frame (FFT, convolution, etc.) we can
|
9
|
-
use CuPy or some DPS specific libraries, but
|
10
|
-
90% is perfectly done with numpy.
|
11
|
-
|
12
|
-
If you want to modify huge amounts of audio
|
13
|
-
(some seconds at the same time), you can use
|
14
|
-
CuPy, that has the same API as numpy but
|
15
|
-
working in GPU. Doing this below most of the
|
16
|
-
changes would work:
|
17
|
-
- `import numpy as np` → `import cupy as np`
|
18
|
-
"""
|
19
|
-
from abc import abstractmethod
|
20
|
-
from typing import Union
|
21
|
-
|
22
|
-
import numpy as np
|
23
|
-
|
24
|
-
|
25
|
-
class _AudioNode:
|
26
|
-
"""
|
27
|
-
Base audio node class to implement a
|
28
|
-
change in an audio frame by using the
|
29
|
-
numpy library.
|
30
|
-
"""
|
31
|
-
|
32
|
-
@abstractmethod
|
33
|
-
def process(
|
34
|
-
self,
|
35
|
-
input: Union['AudioFrame', 'np.ndarray'],
|
36
|
-
t: float
|
37
|
-
) -> Union['AudioFrame', 'np.ndarray']:
|
38
|
-
"""
|
39
|
-
Process the provided audio 'input' that
|
40
|
-
is played on the given 't' time moment.
|
41
|
-
"""
|
42
|
-
pass
|
43
|
-
|
44
|
-
class VolumeNode(_AudioNode):
|
45
|
-
"""
|
46
|
-
Set the volume.
|
47
|
-
|
48
|
-
TODO: Explain properly.
|
49
|
-
"""
|
50
|
-
|
51
|
-
def __init__(
|
52
|
-
self,
|
53
|
-
factor_fn
|
54
|
-
):
|
55
|
-
"""
|
56
|
-
factor_fn: function (t, index) -> factor volumen
|
57
|
-
"""
|
58
|
-
self.factor_f: callable = factor_fn
|
59
|
-
|
60
|
-
def process(
|
61
|
-
self,
|
62
|
-
input: Union['AudioFrame', 'np.ndarray'],
|
63
|
-
t: float
|
64
|
-
) -> Union['AudioFrame', 'np.ndarray']:
|
65
|
-
"""
|
66
|
-
Process the provided audio 'input' that
|
67
|
-
is played on the given 't' time moment.
|
68
|
-
"""
|
69
|
-
# if PythonValidator.is_instance_of(input, 'AudioFrame'):
|
70
|
-
# input = input.to_ndarray().astype(np.float32)
|
71
|
-
|
72
|
-
# TODO: I think we should receive only
|
73
|
-
# numpy arrays and the AudioFrame should
|
74
|
-
# be handled outside, but I'm not sure
|
75
|
-
|
76
|
-
factor = self.factor_fn(t, 0)
|
77
|
-
|
78
|
-
samples = input
|
79
|
-
samples *= factor
|
80
|
-
|
81
|
-
# Determine dtype according to format
|
82
|
-
# samples = (
|
83
|
-
# samples.astype(np.int16)
|
84
|
-
# # 'fltp', 's16', 's16p'
|
85
|
-
# if 's16' in input.format.name else
|
86
|
-
# samples.astype(np.float32)
|
87
|
-
# )
|
88
|
-
|
89
|
-
# new_frame = AudioFrame.from_ndarray(
|
90
|
-
# samples,
|
91
|
-
# format = input.format.name,
|
92
|
-
# layout = input.layout.name
|
93
|
-
# )
|
94
|
-
# new_frame.sample_rate = input.sample_rate
|
95
|
-
# new_frame.pts = input.pts
|
96
|
-
# new_frame.time_base = input.time_base
|
97
|
-
|
98
|
-
return samples
|
99
|
-
|
100
|
-
class ChorusNode(_AudioNode):
|
101
|
-
"""
|
102
|
-
Apply a chorus effect, also called flanger
|
103
|
-
effect.
|
104
|
-
|
105
|
-
TODO: Explain properly
|
106
|
-
"""
|
107
|
-
|
108
|
-
def __init__(
|
109
|
-
self,
|
110
|
-
sample_rate: int,
|
111
|
-
depth: int = 0,
|
112
|
-
frequency: float = 0.25
|
113
|
-
):
|
114
|
-
"""
|
115
|
-
The 'sample_rate' must be the sample rate
|
116
|
-
of the audio frame.
|
117
|
-
"""
|
118
|
-
self.sample_rate: int = sample_rate
|
119
|
-
self.depth: int = depth
|
120
|
-
self.frequency: float = frequency
|
121
|
-
|
122
|
-
def process(
|
123
|
-
self,
|
124
|
-
input: Union['AudioFrame', 'np.ndarray'],
|
125
|
-
t: float
|
126
|
-
) -> Union['AudioFrame', 'np.ndarray']:
|
127
|
-
"""
|
128
|
-
Process the provided audio 'input' that
|
129
|
-
is played on the given 't' time moment.
|
130
|
-
"""
|
131
|
-
# TODO: I think we should receive only
|
132
|
-
# numpy arrays and the AudioFrame should
|
133
|
-
# be handled outside, but I'm not sure
|
134
|
-
|
135
|
-
n_samples = input.shape[0]
|
136
|
-
t = np.arange(n_samples) / self.rate
|
137
|
-
|
138
|
-
# LFO sinusoidal que controla el retardo
|
139
|
-
delay = (self.depth / 1000.0) * self.rate * (0.5 * (1 + np.sin(2 * np.pi * self.frequency * t)))
|
140
|
-
delay = delay.astype(np.int32)
|
141
|
-
|
142
|
-
output = np.zeros_like(input, dtype=np.float32)
|
143
|
-
|
144
|
-
for i in range(n_samples):
|
145
|
-
d = delay[i]
|
146
|
-
if i - d >= 0:
|
147
|
-
output[i] = 0.7 * input[i] + 0.7 * input[i - d]
|
148
|
-
else:
|
149
|
-
output[i] = input[i]
|
150
|
-
|
151
|
-
return output
|
152
|
-
|
153
|
-
# TODO: Remove this below
|
154
|
-
"""
|
155
|
-
Here you have an example. The 'private'
|
156
|
-
node class is the modifier, that we don't
|
157
|
-
want to expose, and the 'public' class is
|
158
|
-
the one that inherits from TimedNode and
|
159
|
-
wraps the 'private' class to build the
|
160
|
-
functionality.
|
161
|
-
"""
|
162
|
-
# class VolumeAudioNode(TimedNode):
|
163
|
-
# """
|
164
|
-
# TimedNode to set the audio volume of a video
|
165
|
-
# in a specific frame.
|
166
|
-
# """
|
167
|
-
|
168
|
-
# def __init__(
|
169
|
-
# self,
|
170
|
-
# factor_fn,
|
171
|
-
# start: float = 0.0,
|
172
|
-
# end: Union[float, None] = None
|
173
|
-
# ):
|
174
|
-
# super().__init__(
|
175
|
-
# node = _SetVolumeAudioNode(factor_fn),
|
176
|
-
# start = start,
|
177
|
-
# end = end
|
178
|
-
# )
|
179
|
-
|
180
|
-
# class _SetVolumeAudioNode(AudioNode):
|
181
|
-
# """
|
182
|
-
# Audio node to change the volume of an
|
183
|
-
# audio frame.
|
184
|
-
# """
|
185
|
-
|
186
|
-
# def __init__(
|
187
|
-
# self,
|
188
|
-
# factor_fn
|
189
|
-
# ):
|
190
|
-
# """
|
191
|
-
# factor_fn: function (t, index) -> factor volumen
|
192
|
-
# """
|
193
|
-
# self.factor_fn = factor_fn
|
194
|
-
|
195
|
-
# def process(
|
196
|
-
# self,
|
197
|
-
# frame: av.AudioFrame,
|
198
|
-
# t: float,
|
199
|
-
# ) -> av.AudioFrame:
|
200
|
-
# # TODO: Why index (?) Maybe 'total_frames'
|
201
|
-
# factor = self.factor_fn(t, 0)
|
202
|
-
|
203
|
-
# samples = frame.to_ndarray().astype(np.float32)
|
204
|
-
# samples *= factor
|
205
|
-
|
206
|
-
# # Determine dtype according to format
|
207
|
-
# samples = (
|
208
|
-
# samples.astype(np.int16)
|
209
|
-
# # 'fltp', 's16', 's16p'
|
210
|
-
# if 's16' in frame.format.name else
|
211
|
-
# samples.astype(np.float32)
|
212
|
-
# )
|
213
|
-
|
214
|
-
# new_frame = av.AudioFrame.from_ndarray(
|
215
|
-
# samples,
|
216
|
-
# format = frame.format.name,
|
217
|
-
# layout = frame.layout.name
|
218
|
-
# )
|
219
|
-
# new_frame.sample_rate = frame.sample_rate
|
220
|
-
# new_frame.pts = frame.pts
|
221
|
-
# new_frame.time_base = frame.time_base
|
222
|
-
|
223
|
-
# return new_frame
|
File without changes
|
File without changes
|
File without changes
|
{yta_video_opengl-0.0.25 → yta_video_opengl-0.0.27}/src/yta_video_opengl/nodes/video/__init__.py
RENAMED
File without changes
|
File without changes
|
File without changes
|