yta-video-opengl 0.0.24__tar.gz → 0.0.26__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {yta_video_opengl-0.0.24 → yta_video_opengl-0.0.26}/PKG-INFO +1 -1
- {yta_video_opengl-0.0.24 → yta_video_opengl-0.0.26}/pyproject.toml +1 -1
- yta_video_opengl-0.0.24/src/yta_video_opengl/editor.py → yta_video_opengl-0.0.26/src/yta_video_opengl/effects.py +133 -82
- {yta_video_opengl-0.0.24 → yta_video_opengl-0.0.26}/src/yta_video_opengl/nodes/__init__.py +26 -1
- yta_video_opengl-0.0.26/src/yta_video_opengl/nodes/audio/__init__.py +133 -0
- {yta_video_opengl-0.0.24 → yta_video_opengl-0.0.26}/src/yta_video_opengl/nodes/video/__init__.py +4 -0
- {yta_video_opengl-0.0.24 → yta_video_opengl-0.0.26}/src/yta_video_opengl/nodes/video/opengl/__init__.py +3 -3
- {yta_video_opengl-0.0.24 → yta_video_opengl-0.0.26}/src/yta_video_opengl/nodes/video/opengl/experimental.py +6 -6
- {yta_video_opengl-0.0.24 → yta_video_opengl-0.0.26}/src/yta_video_opengl/tests.py +2 -2
- yta_video_opengl-0.0.24/src/yta_video_opengl/nodes/audio/__init__.py +0 -224
- {yta_video_opengl-0.0.24 → yta_video_opengl-0.0.26}/LICENSE +0 -0
- {yta_video_opengl-0.0.24 → yta_video_opengl-0.0.26}/README.md +0 -0
- {yta_video_opengl-0.0.24 → yta_video_opengl-0.0.26}/src/yta_video_opengl/__init__.py +0 -0
- {yta_video_opengl-0.0.24 → yta_video_opengl-0.0.26}/src/yta_video_opengl/utils.py +0 -0
@@ -1,6 +1,6 @@
|
|
1
1
|
from yta_video_opengl.nodes.video.opengl import WavingNode
|
2
2
|
from yta_video_opengl.nodes.audio import ChorusNode
|
3
|
-
from yta_video_opengl.nodes import
|
3
|
+
from yta_video_opengl.nodes import TimedNode
|
4
4
|
from yta_validation.parameter import ParameterValidator
|
5
5
|
from yta_programming.decorators import singleton_old
|
6
6
|
from typing import Union
|
@@ -20,9 +20,9 @@ class _AudioEffects:
|
|
20
20
|
|
21
21
|
def __init__(
|
22
22
|
self,
|
23
|
-
effects: '
|
23
|
+
effects: 'Effects'
|
24
24
|
):
|
25
|
-
self._effects:
|
25
|
+
self._effects: Effects = effects
|
26
26
|
"""
|
27
27
|
The parent instance that includes this
|
28
28
|
class instance as a property.
|
@@ -66,9 +66,9 @@ class _VideoEffects:
|
|
66
66
|
|
67
67
|
def __init__(
|
68
68
|
self,
|
69
|
-
effects: '
|
69
|
+
effects: 'Effects'
|
70
70
|
):
|
71
|
-
self._effects:
|
71
|
+
self._effects: Effects = effects
|
72
72
|
"""
|
73
73
|
The parent instance that includes this
|
74
74
|
class instance as a property.
|
@@ -101,7 +101,7 @@ class _VideoEffects:
|
|
101
101
|
"""
|
102
102
|
return _create_node(
|
103
103
|
WavingNode(
|
104
|
-
context = self._effects.
|
104
|
+
context = self._effects.opengl_context,
|
105
105
|
size = size,
|
106
106
|
amplitude = amplitude,
|
107
107
|
frequency = frequency,
|
@@ -114,45 +114,8 @@ class _VideoEffects:
|
|
114
114
|
# TODO: Include definitive and tested video
|
115
115
|
# effects here below
|
116
116
|
|
117
|
-
class _Effects:
|
118
|
-
"""
|
119
|
-
*For internal use only*
|
120
|
-
|
121
|
-
Class to be used within the OpenglEditor
|
122
|
-
as a property to simplify the access to
|
123
|
-
the effect nodes and also to have the
|
124
|
-
single context always available through
|
125
|
-
the OpenglEditor instance that is a
|
126
|
-
singleton one.
|
127
|
-
|
128
|
-
Even though we can have more effects,
|
129
|
-
this class is also the way we expose only
|
130
|
-
the ones we actually want to expose to
|
131
|
-
the user.
|
132
|
-
"""
|
133
|
-
|
134
|
-
def __init__(
|
135
|
-
self,
|
136
|
-
opengl_editor: 'OpenglEditor'
|
137
|
-
):
|
138
|
-
self._opengl_editor: OpenglEditor = opengl_editor
|
139
|
-
"""
|
140
|
-
The parent instance that includes this
|
141
|
-
class instance as a property.
|
142
|
-
"""
|
143
|
-
self.audio: _AudioEffects = _AudioEffects(self)
|
144
|
-
"""
|
145
|
-
Shortcut to the audio effects that are
|
146
|
-
available.
|
147
|
-
"""
|
148
|
-
self.video: _VideoEffects = _VideoEffects(self)
|
149
|
-
"""
|
150
|
-
Shortcut to the video effects that are
|
151
|
-
available.
|
152
|
-
"""
|
153
|
-
|
154
117
|
@singleton_old
|
155
|
-
class
|
118
|
+
class Effects:
|
156
119
|
"""
|
157
120
|
Singleton instance.
|
158
121
|
|
@@ -163,6 +126,15 @@ class OpenglEditor:
|
|
163
126
|
the nodes we have available for the
|
164
127
|
user.
|
165
128
|
|
129
|
+
This class is to simplify the access to
|
130
|
+
the effect nodes and also to have the
|
131
|
+
single context always available.
|
132
|
+
|
133
|
+
Even though we can have more effects,
|
134
|
+
this class is also the way we expose only
|
135
|
+
the ones we actually want to expose to
|
136
|
+
the user.
|
137
|
+
|
166
138
|
The GPU will make the calculations in
|
167
139
|
parallel by itself, so we can handle a
|
168
140
|
single context to make the nodes share
|
@@ -172,32 +144,33 @@ class OpenglEditor:
|
|
172
144
|
def __init__(
|
173
145
|
self
|
174
146
|
):
|
175
|
-
self.
|
147
|
+
self.opengl_context = moderngl.create_context(standalone = True)
|
176
148
|
"""
|
177
|
-
The context that will be shared
|
178
|
-
the nodes.
|
149
|
+
The opengl context that will be shared
|
150
|
+
by all the opengl nodes.
|
179
151
|
"""
|
180
|
-
self.
|
152
|
+
self.audio: _AudioEffects = _AudioEffects(self)
|
181
153
|
"""
|
182
|
-
Shortcut to the effects
|
154
|
+
Shortcut to the audio effects that are
|
155
|
+
available.
|
156
|
+
"""
|
157
|
+
self.video: _VideoEffects = _VideoEffects(self)
|
158
|
+
"""
|
159
|
+
Shortcut to the video effects that are
|
160
|
+
available.
|
183
161
|
"""
|
184
|
-
# TODO: I should do something like
|
185
|
-
# editor.effects.waving_node() to create
|
186
|
-
# an instance of that effect node
|
187
|
-
|
188
162
|
|
189
163
|
def _create_node(
|
190
164
|
node: Union['_AudioNode', '_VideoNode'],
|
191
165
|
start: Union[int, float, 'Fraction'],
|
192
166
|
end: Union[int, float, 'Fraction', None]
|
193
167
|
):
|
194
|
-
#
|
195
|
-
#
|
196
|
-
#
|
197
|
-
|
198
|
-
ParameterValidator.validate_mandatory_subclass_of('node', node, ['_AudioNode', '_VideoNode'])
|
168
|
+
# We could be other classes in the middle,
|
169
|
+
# because an OpenglNode inherits from
|
170
|
+
# other class
|
171
|
+
ParameterValidator.validate_mandatory_subclass_of('node', node, ['_AudioNode', '_VideoNode', 'OpenglNodeBase'])
|
199
172
|
|
200
|
-
# We have to create a
|
173
|
+
# We have to create a node wrapper with the
|
201
174
|
# time range in which it has to be applied
|
202
175
|
# to all the frames.
|
203
176
|
return TimedNode(
|
@@ -215,6 +188,26 @@ class _EffectStacked:
|
|
215
188
|
and lower when higher value.
|
216
189
|
"""
|
217
190
|
|
191
|
+
@property
|
192
|
+
def is_audio_effect(
|
193
|
+
self
|
194
|
+
) -> bool:
|
195
|
+
"""
|
196
|
+
Flag to indicate if it is an audio effect
|
197
|
+
or not.
|
198
|
+
"""
|
199
|
+
return self.effect.is_audio_node
|
200
|
+
|
201
|
+
@property
|
202
|
+
def is_video_effect(
|
203
|
+
self
|
204
|
+
) -> bool:
|
205
|
+
"""
|
206
|
+
Flag to indicate if it is a video effect
|
207
|
+
or not.
|
208
|
+
"""
|
209
|
+
return self.effect.is_video_node
|
210
|
+
|
218
211
|
def __init__(
|
219
212
|
self,
|
220
213
|
effect: TimedNode,
|
@@ -243,34 +236,66 @@ class EffectsStack:
|
|
243
236
|
"""
|
244
237
|
|
245
238
|
@property
|
246
|
-
def
|
239
|
+
def video_effects(
|
247
240
|
self
|
248
241
|
) -> list[_EffectStacked]:
|
249
242
|
"""
|
250
|
-
The effects but ordered
|
251
|
-
time moment.
|
243
|
+
The video effects but ordered by 'priority'
|
244
|
+
and 'start' time moment.
|
252
245
|
"""
|
253
|
-
return sorted(
|
246
|
+
return sorted(
|
247
|
+
[
|
248
|
+
effect
|
249
|
+
for effect in self._effects
|
250
|
+
if effect.is_video_effect
|
251
|
+
],
|
252
|
+
key = lambda effect: (effect.priority, effect.effect.start)
|
253
|
+
)
|
254
254
|
|
255
255
|
@property
|
256
|
-
def
|
256
|
+
def audio_effects(
|
257
257
|
self
|
258
|
-
) -> _EffectStacked:
|
258
|
+
) -> list[_EffectStacked]:
|
259
259
|
"""
|
260
|
-
The
|
261
|
-
|
260
|
+
The audio effects but ordered by 'priority'
|
261
|
+
and 'start' time moment.
|
262
262
|
"""
|
263
|
-
return
|
263
|
+
return sorted(
|
264
|
+
[
|
265
|
+
effect
|
266
|
+
for effect in self._effects
|
267
|
+
if effect.is_audio_effect
|
268
|
+
],
|
269
|
+
key = lambda effect: (effect.priority, effect.effect.start)
|
270
|
+
)
|
271
|
+
|
272
|
+
@property
|
273
|
+
def lowest_audio_priority(
|
274
|
+
self
|
275
|
+
) -> int:
|
276
|
+
"""
|
277
|
+
The priority of the audio effect with the
|
278
|
+
lowest one, or 0 if no audio effects.
|
279
|
+
"""
|
280
|
+
return min(
|
281
|
+
self.audio_effects,
|
282
|
+
key = lambda effect: effect.priority,
|
283
|
+
default = 0
|
284
|
+
)
|
264
285
|
|
265
286
|
@property
|
266
|
-
def
|
287
|
+
def lowest_video_priority(
|
267
288
|
self
|
268
|
-
) ->
|
289
|
+
) -> int:
|
269
290
|
"""
|
270
|
-
The effect with the
|
271
|
-
|
291
|
+
The priority of the video effect with the
|
292
|
+
lowest one, or 0 if no video effects.
|
272
293
|
"""
|
273
|
-
return
|
294
|
+
return min(
|
295
|
+
self.video_effects,
|
296
|
+
key = lambda effect: effect.priority,
|
297
|
+
default = 0
|
298
|
+
)
|
274
299
|
|
275
300
|
def __init__(
|
276
301
|
self
|
@@ -281,20 +306,35 @@ class EffectsStack:
|
|
281
306
|
have been added to this stack, unordered.
|
282
307
|
"""
|
283
308
|
|
284
|
-
def
|
309
|
+
def get_audio_effects_for_t(
|
310
|
+
self,
|
311
|
+
t: Union[int, float, 'Fraction']
|
312
|
+
) -> list[TimedNode]:
|
313
|
+
"""
|
314
|
+
Get the audio effects, ordered by priority
|
315
|
+
and the 'start' field, that must be applied
|
316
|
+
within the 't' time moment provided because
|
317
|
+
it is within the [start, end) time range.
|
318
|
+
"""
|
319
|
+
return [
|
320
|
+
effect.effect
|
321
|
+
for effect in self.audio_effects
|
322
|
+
if effect.effect.is_within_time(t)
|
323
|
+
]
|
324
|
+
|
325
|
+
def get_video_effects_for_t(
|
285
326
|
self,
|
286
327
|
t: Union[int, float, 'Fraction']
|
287
328
|
) -> list[TimedNode]:
|
288
329
|
"""
|
289
|
-
Get the effects, ordered by priority
|
290
|
-
and the 'start' field, that must be
|
291
|
-
|
292
|
-
|
293
|
-
[start, end) time range.
|
330
|
+
Get the video effects, ordered by priority
|
331
|
+
and the 'start' field, that must be applied
|
332
|
+
within the 't' time moment provided because
|
333
|
+
it is within the [start, end) time range.
|
294
334
|
"""
|
295
335
|
return [
|
296
336
|
effect.effect
|
297
|
-
for effect in self.
|
337
|
+
for effect in self.video_effects
|
298
338
|
if effect.effect.is_within_time(t)
|
299
339
|
]
|
300
340
|
|
@@ -307,6 +347,7 @@ class EffectsStack:
|
|
307
347
|
Add the provided 'effect' to the stack.
|
308
348
|
"""
|
309
349
|
ParameterValidator.validate_mandatory_instance_of('effect', effect, TimedNode)
|
350
|
+
ParameterValidator.validate_positive_int('priority', priority, do_include_zero = True)
|
310
351
|
|
311
352
|
# TODO: What about the same effect added
|
312
353
|
# twice during the same time range? Can we
|
@@ -318,8 +359,16 @@ class EffectsStack:
|
|
318
359
|
# Should we let some effects have the same
|
319
360
|
# priority (?)
|
320
361
|
priority = (
|
321
|
-
self.
|
322
|
-
if
|
362
|
+
self.lowest_audio_priority + 1
|
363
|
+
if (
|
364
|
+
priority is None and
|
365
|
+
effect.is_audio_node
|
366
|
+
) else
|
367
|
+
self.lowest_video_priority + 1
|
368
|
+
if (
|
369
|
+
priority is None and
|
370
|
+
effect.is_video_node
|
371
|
+
) else
|
323
372
|
priority
|
324
373
|
)
|
325
374
|
|
@@ -330,4 +379,6 @@ class EffectsStack:
|
|
330
379
|
|
331
380
|
return self
|
332
381
|
|
382
|
+
# TODO: Maybe create an 'apply_video_effects' (?)
|
383
|
+
# TODO: Maybe create an 'apply_audio_effects' (?)
|
333
384
|
# TODO: Create 'remove_effect'
|
@@ -1,6 +1,7 @@
|
|
1
1
|
from yta_video_opengl.nodes.video import _VideoNode
|
2
2
|
from yta_video_opengl.nodes.audio import _AudioNode
|
3
3
|
from yta_validation.parameter import ParameterValidator
|
4
|
+
from yta_validation import PythonValidator
|
4
5
|
from typing import Union
|
5
6
|
|
6
7
|
import moderngl
|
@@ -32,6 +33,30 @@ class TimedNode:
|
|
32
33
|
The 'start' and 'end' values by default
|
33
34
|
"""
|
34
35
|
|
36
|
+
@property
|
37
|
+
def is_audio_node(
|
38
|
+
self
|
39
|
+
) -> bool:
|
40
|
+
"""
|
41
|
+
Flag to indicate if the node (or effect)
|
42
|
+
is an audio effect or not.
|
43
|
+
"""
|
44
|
+
# TODO: Is this checking not only the
|
45
|
+
# first level (?)
|
46
|
+
return PythonValidator.is_subclass_of(self.node, _AudioNode)
|
47
|
+
|
48
|
+
@property
|
49
|
+
def is_video_node(
|
50
|
+
self
|
51
|
+
) -> bool:
|
52
|
+
"""
|
53
|
+
Flag to indicate if the node (or effect)
|
54
|
+
is a video effect or not.
|
55
|
+
"""
|
56
|
+
# TODO: Is this checking not only the
|
57
|
+
# first level (?)
|
58
|
+
return PythonValidator.is_subclass_of(self.node, _VideoNode)
|
59
|
+
|
35
60
|
def __init__(
|
36
61
|
self,
|
37
62
|
node: Union[_VideoNode, _AudioNode],
|
@@ -47,7 +72,7 @@ class TimedNode:
|
|
47
72
|
):
|
48
73
|
raise Exception('The "end" parameter provided must be greater or equal to the "start" parameter.')
|
49
74
|
|
50
|
-
self.node:
|
75
|
+
self.node: Union[_VideoNode, _AudioNode] = node
|
51
76
|
"""
|
52
77
|
The node we are wrapping and we want to
|
53
78
|
apply as a modification of the frame in
|
@@ -0,0 +1,133 @@
|
|
1
|
+
"""
|
2
|
+
When working with audio frames, we don't need
|
3
|
+
to use the GPU because audios are 1D and the
|
4
|
+
information can be processed perfectly with
|
5
|
+
a library like numpy.
|
6
|
+
|
7
|
+
If we need a very intense calculation for an
|
8
|
+
audio frame (FFT, convolution, etc.) we can
|
9
|
+
use CuPy or some DPS specific libraries, but
|
10
|
+
90% is perfectly done with numpy.
|
11
|
+
|
12
|
+
If you want to modify huge amounts of audio
|
13
|
+
(some seconds at the same time), you can use
|
14
|
+
CuPy, that has the same API as numpy but
|
15
|
+
working in GPU. Doing this below most of the
|
16
|
+
changes would work:
|
17
|
+
- `import numpy as np` → `import cupy as np`
|
18
|
+
"""
|
19
|
+
from abc import abstractmethod
|
20
|
+
from typing import Union
|
21
|
+
|
22
|
+
import numpy as np
|
23
|
+
|
24
|
+
|
25
|
+
class _AudioNode:
|
26
|
+
"""
|
27
|
+
Base audio node class to implement a
|
28
|
+
change in an audio frame by using the
|
29
|
+
numpy library.
|
30
|
+
"""
|
31
|
+
|
32
|
+
@abstractmethod
|
33
|
+
def process(
|
34
|
+
self,
|
35
|
+
input: np.ndarray,
|
36
|
+
t: float
|
37
|
+
) -> np.ndarray:
|
38
|
+
"""
|
39
|
+
Process the provided audio 'input' that
|
40
|
+
is played on the given 't' time moment.
|
41
|
+
"""
|
42
|
+
pass
|
43
|
+
|
44
|
+
class VolumeNode(_AudioNode):
|
45
|
+
"""
|
46
|
+
Set the volume.
|
47
|
+
|
48
|
+
TODO: Explain properly.
|
49
|
+
"""
|
50
|
+
|
51
|
+
def __init__(
|
52
|
+
self,
|
53
|
+
factor_fn
|
54
|
+
):
|
55
|
+
"""
|
56
|
+
factor_fn: function (t, index) -> factor volumen
|
57
|
+
"""
|
58
|
+
self.factor_f: callable = factor_fn
|
59
|
+
|
60
|
+
def process(
|
61
|
+
self,
|
62
|
+
input: np.ndarray,
|
63
|
+
t: float
|
64
|
+
) -> np.ndarray:
|
65
|
+
"""
|
66
|
+
Process the provided audio 'input' that
|
67
|
+
is played on the given 't' time moment.
|
68
|
+
"""
|
69
|
+
factor = self.factor_fn(t, 0)
|
70
|
+
|
71
|
+
samples = input
|
72
|
+
samples *= factor
|
73
|
+
|
74
|
+
# Determine dtype according to format
|
75
|
+
# samples = (
|
76
|
+
# samples.astype(np.int16)
|
77
|
+
# # 'fltp', 's16', 's16p'
|
78
|
+
# if 's16' in input.format.name else
|
79
|
+
# samples.astype(np.float32)
|
80
|
+
# )
|
81
|
+
|
82
|
+
return samples
|
83
|
+
|
84
|
+
class ChorusNode(_AudioNode):
|
85
|
+
"""
|
86
|
+
Apply a chorus effect, also called flanger
|
87
|
+
effect.
|
88
|
+
|
89
|
+
TODO: Explain properly
|
90
|
+
"""
|
91
|
+
|
92
|
+
def __init__(
|
93
|
+
self,
|
94
|
+
sample_rate: int,
|
95
|
+
depth: int = 0,
|
96
|
+
frequency: float = 0.25
|
97
|
+
):
|
98
|
+
"""
|
99
|
+
The 'sample_rate' must be the sample rate
|
100
|
+
of the audio frame.
|
101
|
+
"""
|
102
|
+
self.sample_rate: int = sample_rate
|
103
|
+
self.depth: int = depth
|
104
|
+
self.frequency: float = frequency
|
105
|
+
|
106
|
+
def process(
|
107
|
+
self,
|
108
|
+
input: Union[np.ndarray],
|
109
|
+
t: float
|
110
|
+
) -> np.ndarray:
|
111
|
+
"""
|
112
|
+
Process the provided audio 'input' that
|
113
|
+
is played on the given 't' time moment.
|
114
|
+
"""
|
115
|
+
n_samples = input.shape[0]
|
116
|
+
t = np.arange(n_samples) / self.rate
|
117
|
+
|
118
|
+
# LFO sinusoidal que controla el retardo
|
119
|
+
delay = (self.depth / 1000.0) * self.rate * (0.5 * (1 + np.sin(2 * np.pi * self.frequency * t)))
|
120
|
+
delay = delay.astype(np.int32)
|
121
|
+
|
122
|
+
output = np.zeros_like(input, dtype=np.float32)
|
123
|
+
|
124
|
+
for i in range(n_samples):
|
125
|
+
d = delay[i]
|
126
|
+
|
127
|
+
output[i]= (
|
128
|
+
0.7 * input[i] + 0.7 * input[i - d]
|
129
|
+
if (i - d) >= 0 else
|
130
|
+
input[i]
|
131
|
+
)
|
132
|
+
|
133
|
+
return output
|
@@ -199,7 +199,7 @@ class OpenglNodeBase(_VideoNode):
|
|
199
199
|
|
200
200
|
def process(
|
201
201
|
self,
|
202
|
-
input: Union[moderngl.Texture,
|
202
|
+
input: Union[moderngl.Texture, np.ndarray]
|
203
203
|
) -> moderngl.Texture:
|
204
204
|
"""
|
205
205
|
Apply the shader to the 'input', that
|
@@ -209,7 +209,7 @@ class OpenglNodeBase(_VideoNode):
|
|
209
209
|
We use and return textures to maintain
|
210
210
|
the process in GPU and optimize it.
|
211
211
|
"""
|
212
|
-
if PythonValidator.is_instance_of(input,
|
212
|
+
if PythonValidator.is_instance_of(input, np.ndarray):
|
213
213
|
# TODO: What about the numpy format (?)
|
214
214
|
input = frame_to_texture(input, self.context)
|
215
215
|
|
@@ -293,7 +293,7 @@ class WavingNode(OpenglNodeBase):
|
|
293
293
|
# processed by the code
|
294
294
|
def process(
|
295
295
|
self,
|
296
|
-
input: Union[moderngl.Texture, '
|
296
|
+
input: Union[moderngl.Texture, 'np.ndarray'],
|
297
297
|
t: float = 0.0,
|
298
298
|
) -> moderngl.Texture:
|
299
299
|
"""
|
@@ -65,7 +65,7 @@ class BreathingFrame(OpenglNodeBase):
|
|
65
65
|
|
66
66
|
def process(
|
67
67
|
self,
|
68
|
-
input: Union[moderngl.Texture, '
|
68
|
+
input: Union[moderngl.Texture, 'np.ndarray'],
|
69
69
|
t: float = 0.0,
|
70
70
|
) -> moderngl.Texture:
|
71
71
|
"""
|
@@ -165,7 +165,7 @@ class HandheldFrame(OpenglNodeBase):
|
|
165
165
|
|
166
166
|
def process(
|
167
167
|
self,
|
168
|
-
input: Union[moderngl.Texture,
|
168
|
+
input: Union[moderngl.Texture, np.ndarray],
|
169
169
|
t: float = 0.0,
|
170
170
|
) -> moderngl.Texture:
|
171
171
|
"""
|
@@ -351,7 +351,7 @@ class OrbitingFrame(OpenglNodeBase):
|
|
351
351
|
|
352
352
|
def process(
|
353
353
|
self,
|
354
|
-
input: Union[moderngl.Texture,
|
354
|
+
input: Union[moderngl.Texture, np.ndarray],
|
355
355
|
t: float = 0.0,
|
356
356
|
) -> moderngl.Texture:
|
357
357
|
"""
|
@@ -446,7 +446,7 @@ class RotatingInCenterFrame(OpenglNodeBase):
|
|
446
446
|
|
447
447
|
def process(
|
448
448
|
self,
|
449
|
-
input: Union[moderngl.Texture,
|
449
|
+
input: Union[moderngl.Texture, np.ndarray],
|
450
450
|
t: float = 0.0,
|
451
451
|
) -> moderngl.Texture:
|
452
452
|
"""
|
@@ -636,7 +636,7 @@ class StrangeTvFrame(OpenglNodeBase):
|
|
636
636
|
|
637
637
|
def process(
|
638
638
|
self,
|
639
|
-
input: Union[moderngl.Texture,
|
639
|
+
input: Union[moderngl.Texture, np.ndarray],
|
640
640
|
t: float = 0.0,
|
641
641
|
) -> moderngl.Texture:
|
642
642
|
"""
|
@@ -744,7 +744,7 @@ class GlitchRgbFrame(OpenglNodeBase):
|
|
744
744
|
|
745
745
|
def process(
|
746
746
|
self,
|
747
|
-
input: Union[moderngl.Texture,
|
747
|
+
input: Union[moderngl.Texture, np.ndarray],
|
748
748
|
t: float = 0.0,
|
749
749
|
) -> moderngl.Texture:
|
750
750
|
"""
|
@@ -606,9 +606,9 @@ def video_modified_stored():
|
|
606
606
|
# size = video.size,
|
607
607
|
# first_frame = video.next_frame
|
608
608
|
# )
|
609
|
-
from yta_video_opengl.
|
609
|
+
from yta_video_opengl.effects import Effects
|
610
610
|
|
611
|
-
editor =
|
611
|
+
editor = Effects()
|
612
612
|
# waving_node_effect = editor.effects.video.waving_node(video.size, amplitude = 0.2, frequency = 9, speed = 3)
|
613
613
|
# chorus_effect = editor.effects.audio.chorus(audio.sample_rate)
|
614
614
|
# print(waving_node_effect)
|
@@ -1,224 +0,0 @@
|
|
1
|
-
"""
|
2
|
-
When working with audio frames, we don't need
|
3
|
-
to use the GPU because audios are 1D and the
|
4
|
-
information can be processed perfectly with
|
5
|
-
a library like numpy.
|
6
|
-
|
7
|
-
If we need a very intense calculation for an
|
8
|
-
audio frame (FFT, convolution, etc.) we can
|
9
|
-
use CuPy or some DPS specific libraries, but
|
10
|
-
90% is perfectly done with numpy.
|
11
|
-
|
12
|
-
If you want to modify huge amounts of audio
|
13
|
-
(some seconds at the same time), you can use
|
14
|
-
CuPy, that has the same API as numpy but
|
15
|
-
working in GPU. Doing this below most of the
|
16
|
-
changes would work:
|
17
|
-
- `import numpy as np` → `import cupy as np`
|
18
|
-
"""
|
19
|
-
from yta_video_opengl.nodes import TimedNode
|
20
|
-
from abc import abstractmethod
|
21
|
-
from typing import Union
|
22
|
-
|
23
|
-
import numpy as np
|
24
|
-
|
25
|
-
|
26
|
-
class _AudioNode:
|
27
|
-
"""
|
28
|
-
Base audio node class to implement a
|
29
|
-
change in an audio frame by using the
|
30
|
-
numpy library.
|
31
|
-
"""
|
32
|
-
|
33
|
-
@abstractmethod
|
34
|
-
def process(
|
35
|
-
self,
|
36
|
-
input: Union['AudioFrame', 'np.ndarray'],
|
37
|
-
t: float
|
38
|
-
) -> Union['AudioFrame', 'np.ndarray']:
|
39
|
-
"""
|
40
|
-
Process the provided audio 'input' that
|
41
|
-
is played on the given 't' time moment.
|
42
|
-
"""
|
43
|
-
pass
|
44
|
-
|
45
|
-
class VolumeNode(_AudioNode):
|
46
|
-
"""
|
47
|
-
Set the volume.
|
48
|
-
|
49
|
-
TODO: Explain properly.
|
50
|
-
"""
|
51
|
-
|
52
|
-
def __init__(
|
53
|
-
self,
|
54
|
-
factor_fn
|
55
|
-
):
|
56
|
-
"""
|
57
|
-
factor_fn: function (t, index) -> factor volumen
|
58
|
-
"""
|
59
|
-
self.factor_f: callable = factor_fn
|
60
|
-
|
61
|
-
def process(
|
62
|
-
self,
|
63
|
-
input: Union['AudioFrame', 'np.ndarray'],
|
64
|
-
t: float
|
65
|
-
) -> Union['AudioFrame', 'np.ndarray']:
|
66
|
-
"""
|
67
|
-
Process the provided audio 'input' that
|
68
|
-
is played on the given 't' time moment.
|
69
|
-
"""
|
70
|
-
# if PythonValidator.is_instance_of(input, 'AudioFrame'):
|
71
|
-
# input = input.to_ndarray().astype(np.float32)
|
72
|
-
|
73
|
-
# TODO: I think we should receive only
|
74
|
-
# numpy arrays and the AudioFrame should
|
75
|
-
# be handled outside, but I'm not sure
|
76
|
-
|
77
|
-
factor = self.factor_fn(t, 0)
|
78
|
-
|
79
|
-
samples = input
|
80
|
-
samples *= factor
|
81
|
-
|
82
|
-
# Determine dtype according to format
|
83
|
-
# samples = (
|
84
|
-
# samples.astype(np.int16)
|
85
|
-
# # 'fltp', 's16', 's16p'
|
86
|
-
# if 's16' in input.format.name else
|
87
|
-
# samples.astype(np.float32)
|
88
|
-
# )
|
89
|
-
|
90
|
-
# new_frame = AudioFrame.from_ndarray(
|
91
|
-
# samples,
|
92
|
-
# format = input.format.name,
|
93
|
-
# layout = input.layout.name
|
94
|
-
# )
|
95
|
-
# new_frame.sample_rate = input.sample_rate
|
96
|
-
# new_frame.pts = input.pts
|
97
|
-
# new_frame.time_base = input.time_base
|
98
|
-
|
99
|
-
return samples
|
100
|
-
|
101
|
-
class ChorusNode(_AudioNode):
|
102
|
-
"""
|
103
|
-
Apply a chorus effect, also called flanger
|
104
|
-
effect.
|
105
|
-
|
106
|
-
TODO: Explain properly
|
107
|
-
"""
|
108
|
-
|
109
|
-
def __init__(
|
110
|
-
self,
|
111
|
-
sample_rate: int,
|
112
|
-
depth: int = 0,
|
113
|
-
frequency: float = 0.25
|
114
|
-
):
|
115
|
-
"""
|
116
|
-
The 'sample_rate' must be the sample rate
|
117
|
-
of the audio frame.
|
118
|
-
"""
|
119
|
-
self.sample_rate: int = sample_rate
|
120
|
-
self.depth: int = depth
|
121
|
-
self.frequency: float = frequency
|
122
|
-
|
123
|
-
def process(
|
124
|
-
self,
|
125
|
-
input: Union['AudioFrame', 'np.ndarray'],
|
126
|
-
t: float
|
127
|
-
) -> Union['AudioFrame', 'np.ndarray']:
|
128
|
-
"""
|
129
|
-
Process the provided audio 'input' that
|
130
|
-
is played on the given 't' time moment.
|
131
|
-
"""
|
132
|
-
# TODO: I think we should receive only
|
133
|
-
# numpy arrays and the AudioFrame should
|
134
|
-
# be handled outside, but I'm not sure
|
135
|
-
|
136
|
-
n_samples = input.shape[0]
|
137
|
-
t = np.arange(n_samples) / self.rate
|
138
|
-
|
139
|
-
# LFO sinusoidal que controla el retardo
|
140
|
-
delay = (self.depth / 1000.0) * self.rate * (0.5 * (1 + np.sin(2 * np.pi * self.frequency * t)))
|
141
|
-
delay = delay.astype(np.int32)
|
142
|
-
|
143
|
-
output = np.zeros_like(input, dtype=np.float32)
|
144
|
-
|
145
|
-
for i in range(n_samples):
|
146
|
-
d = delay[i]
|
147
|
-
if i - d >= 0:
|
148
|
-
output[i] = 0.7 * input[i] + 0.7 * input[i - d]
|
149
|
-
else:
|
150
|
-
output[i] = input[i]
|
151
|
-
|
152
|
-
return output
|
153
|
-
|
154
|
-
# TODO: Remove this below
|
155
|
-
"""
|
156
|
-
Here you have an example. The 'private'
|
157
|
-
node class is the modifier, that we don't
|
158
|
-
want to expose, and the 'public' class is
|
159
|
-
the one that inherits from TimedNode and
|
160
|
-
wraps the 'private' class to build the
|
161
|
-
functionality.
|
162
|
-
"""
|
163
|
-
# class VolumeAudioNode(TimedNode):
|
164
|
-
# """
|
165
|
-
# TimedNode to set the audio volume of a video
|
166
|
-
# in a specific frame.
|
167
|
-
# """
|
168
|
-
|
169
|
-
# def __init__(
|
170
|
-
# self,
|
171
|
-
# factor_fn,
|
172
|
-
# start: float = 0.0,
|
173
|
-
# end: Union[float, None] = None
|
174
|
-
# ):
|
175
|
-
# super().__init__(
|
176
|
-
# node = _SetVolumeAudioNode(factor_fn),
|
177
|
-
# start = start,
|
178
|
-
# end = end
|
179
|
-
# )
|
180
|
-
|
181
|
-
# class _SetVolumeAudioNode(AudioNode):
|
182
|
-
# """
|
183
|
-
# Audio node to change the volume of an
|
184
|
-
# audio frame.
|
185
|
-
# """
|
186
|
-
|
187
|
-
# def __init__(
|
188
|
-
# self,
|
189
|
-
# factor_fn
|
190
|
-
# ):
|
191
|
-
# """
|
192
|
-
# factor_fn: function (t, index) -> factor volumen
|
193
|
-
# """
|
194
|
-
# self.factor_fn = factor_fn
|
195
|
-
|
196
|
-
# def process(
|
197
|
-
# self,
|
198
|
-
# frame: av.AudioFrame,
|
199
|
-
# t: float,
|
200
|
-
# ) -> av.AudioFrame:
|
201
|
-
# # TODO: Why index (?) Maybe 'total_frames'
|
202
|
-
# factor = self.factor_fn(t, 0)
|
203
|
-
|
204
|
-
# samples = frame.to_ndarray().astype(np.float32)
|
205
|
-
# samples *= factor
|
206
|
-
|
207
|
-
# # Determine dtype according to format
|
208
|
-
# samples = (
|
209
|
-
# samples.astype(np.int16)
|
210
|
-
# # 'fltp', 's16', 's16p'
|
211
|
-
# if 's16' in frame.format.name else
|
212
|
-
# samples.astype(np.float32)
|
213
|
-
# )
|
214
|
-
|
215
|
-
# new_frame = av.AudioFrame.from_ndarray(
|
216
|
-
# samples,
|
217
|
-
# format = frame.format.name,
|
218
|
-
# layout = frame.layout.name
|
219
|
-
# )
|
220
|
-
# new_frame.sample_rate = frame.sample_rate
|
221
|
-
# new_frame.pts = frame.pts
|
222
|
-
# new_frame.time_base = frame.time_base
|
223
|
-
|
224
|
-
# return new_frame
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|