yta-video-opengl 0.0.22__tar.gz → 0.0.24__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {yta_video_opengl-0.0.22 → yta_video_opengl-0.0.24}/PKG-INFO +2 -6
- {yta_video_opengl-0.0.22 → yta_video_opengl-0.0.24}/pyproject.toml +4 -9
- yta_video_opengl-0.0.24/src/yta_video_opengl/editor.py +333 -0
- {yta_video_opengl-0.0.22 → yta_video_opengl-0.0.24}/src/yta_video_opengl/nodes/__init__.py +32 -28
- yta_video_opengl-0.0.24/src/yta_video_opengl/nodes/audio/__init__.py +224 -0
- yta_video_opengl-0.0.24/src/yta_video_opengl/nodes/video/__init__.py +31 -0
- yta_video_opengl-0.0.22/src/yta_video_opengl/nodes/video/opengl.py → yta_video_opengl-0.0.24/src/yta_video_opengl/nodes/video/opengl/__init__.py +8 -4
- yta_video_opengl-0.0.24/src/yta_video_opengl/nodes/video/opengl/experimental.py +760 -0
- yta_video_opengl-0.0.24/src/yta_video_opengl/tests.py +772 -0
- yta_video_opengl-0.0.24/src/yta_video_opengl/utils.py +103 -0
- yta_video_opengl-0.0.22/src/yta_video_opengl/audio.py +0 -219
- yta_video_opengl-0.0.22/src/yta_video_opengl/classes.py +0 -1276
- yta_video_opengl-0.0.22/src/yta_video_opengl/complete/__init__.py +0 -0
- yta_video_opengl-0.0.22/src/yta_video_opengl/complete/frame_combinator.py +0 -204
- yta_video_opengl-0.0.22/src/yta_video_opengl/complete/frame_generator.py +0 -319
- yta_video_opengl-0.0.22/src/yta_video_opengl/complete/frame_wrapper.py +0 -135
- yta_video_opengl-0.0.22/src/yta_video_opengl/complete/timeline.py +0 -571
- yta_video_opengl-0.0.22/src/yta_video_opengl/complete/track/__init__.py +0 -500
- yta_video_opengl-0.0.22/src/yta_video_opengl/complete/track/media/__init__.py +0 -222
- yta_video_opengl-0.0.22/src/yta_video_opengl/complete/track/parts.py +0 -267
- yta_video_opengl-0.0.22/src/yta_video_opengl/complete/track/utils.py +0 -78
- yta_video_opengl-0.0.22/src/yta_video_opengl/media.py +0 -347
- yta_video_opengl-0.0.22/src/yta_video_opengl/nodes/audio/__init__.py +0 -115
- yta_video_opengl-0.0.22/src/yta_video_opengl/nodes/video/__init__.py +0 -5
- yta_video_opengl-0.0.22/src/yta_video_opengl/reader/__init__.py +0 -710
- yta_video_opengl-0.0.22/src/yta_video_opengl/reader/cache/__init__.py +0 -253
- yta_video_opengl-0.0.22/src/yta_video_opengl/reader/cache/audio.py +0 -195
- yta_video_opengl-0.0.22/src/yta_video_opengl/reader/cache/utils.py +0 -48
- yta_video_opengl-0.0.22/src/yta_video_opengl/reader/cache/video.py +0 -113
- yta_video_opengl-0.0.22/src/yta_video_opengl/t.py +0 -233
- yta_video_opengl-0.0.22/src/yta_video_opengl/tests.py +0 -894
- yta_video_opengl-0.0.22/src/yta_video_opengl/utils.py +0 -515
- yta_video_opengl-0.0.22/src/yta_video_opengl/video.py +0 -277
- yta_video_opengl-0.0.22/src/yta_video_opengl/writer.py +0 -278
- {yta_video_opengl-0.0.22 → yta_video_opengl-0.0.24}/LICENSE +0 -0
- {yta_video_opengl-0.0.22 → yta_video_opengl-0.0.24}/README.md +0 -0
- {yta_video_opengl-0.0.22 → yta_video_opengl-0.0.24}/src/yta_video_opengl/__init__.py +0 -0
@@ -1,20 +1,16 @@
|
|
1
1
|
Metadata-Version: 2.3
|
2
2
|
Name: yta-video-opengl
|
3
|
-
Version: 0.0.
|
3
|
+
Version: 0.0.24
|
4
4
|
Summary: Youtube Autonomous Video OpenGL Module
|
5
5
|
Author: danialcala94
|
6
6
|
Author-email: danielalcalavalera@gmail.com
|
7
7
|
Requires-Python: ==3.9
|
8
8
|
Classifier: Programming Language :: Python :: 3
|
9
9
|
Classifier: Programming Language :: Python :: 3.9
|
10
|
-
Requires-Dist: av (>=0.0.1,<19.0.0)
|
11
10
|
Requires-Dist: moderngl (>=0.0.1,<9.0.0)
|
12
11
|
Requires-Dist: numpy (>=0.0.1,<9.0.0)
|
13
|
-
Requires-Dist:
|
14
|
-
Requires-Dist: quicktions (>=0.0.1,<9.0.0)
|
15
|
-
Requires-Dist: yta_timer (>0.0.1,<1.0.0)
|
12
|
+
Requires-Dist: yta_programming (>=0.0.1,<1.0.0)
|
16
13
|
Requires-Dist: yta_validation (>=0.0.1,<1.0.0)
|
17
|
-
Requires-Dist: yta_video_frame_time (>=0.0.1,<1.0.0)
|
18
14
|
Description-Content-Type: text/markdown
|
19
15
|
|
20
16
|
# Youtube Autonomous Video OpenGL Module
|
@@ -1,6 +1,6 @@
|
|
1
1
|
[project]
|
2
2
|
name = "yta-video-opengl"
|
3
|
-
version = "0.0.
|
3
|
+
version = "0.0.24"
|
4
4
|
description = "Youtube Autonomous Video OpenGL Module"
|
5
5
|
authors = [
|
6
6
|
{name = "danialcala94",email = "danielalcalavalera@gmail.com"}
|
@@ -9,16 +9,11 @@ readme = "README.md"
|
|
9
9
|
requires-python = "==3.9"
|
10
10
|
dependencies = [
|
11
11
|
"yta_validation (>=0.0.1,<1.0.0)",
|
12
|
-
"
|
13
|
-
"yta_timer (>0.0.1,<1.0.0)",
|
14
|
-
"av (>=0.0.1,<19.0.0)",
|
12
|
+
"yta_programming (>=0.0.1,<1.0.0)",
|
15
13
|
"moderngl (>=0.0.1,<9.0.0)",
|
16
14
|
"numpy (>=0.0.1,<9.0.0)",
|
17
|
-
|
18
|
-
#
|
19
|
-
# and transform into a numpy array to be able
|
20
|
-
# to handle the ImageMedia
|
21
|
-
"pillow (>=0.0.1,<99.0.0)"
|
15
|
+
# TODO: This below is just for testing
|
16
|
+
#"yta_video_pyav (>=0.0.1,<1.0.0)",
|
22
17
|
]
|
23
18
|
|
24
19
|
[tool.poetry]
|
@@ -0,0 +1,333 @@
|
|
1
|
+
from yta_video_opengl.nodes.video.opengl import WavingNode
|
2
|
+
from yta_video_opengl.nodes.audio import ChorusNode
|
3
|
+
from yta_video_opengl.nodes import Node, TimedNode
|
4
|
+
from yta_validation.parameter import ParameterValidator
|
5
|
+
from yta_programming.decorators import singleton_old
|
6
|
+
from typing import Union
|
7
|
+
|
8
|
+
import moderngl
|
9
|
+
|
10
|
+
|
11
|
+
class _AudioEffects:
|
12
|
+
"""
|
13
|
+
*For internal use only*
|
14
|
+
|
15
|
+
The audio effects that will be available
|
16
|
+
throught our internal _Effects class to
|
17
|
+
wrap and make available all the audio
|
18
|
+
effects we want to be available.
|
19
|
+
"""
|
20
|
+
|
21
|
+
def __init__(
|
22
|
+
self,
|
23
|
+
effects: '_Effects'
|
24
|
+
):
|
25
|
+
self._effects: _Effects = effects
|
26
|
+
"""
|
27
|
+
The parent instance that includes this
|
28
|
+
class instance as a property.
|
29
|
+
"""
|
30
|
+
|
31
|
+
"""
|
32
|
+
Here below we expose all the effects
|
33
|
+
we want the users to have available to
|
34
|
+
be used.
|
35
|
+
"""
|
36
|
+
def chorus(
|
37
|
+
self,
|
38
|
+
sample_rate: int,
|
39
|
+
depth: int = 0,
|
40
|
+
frequency: float = 0.25,
|
41
|
+
start: Union[int, float, 'Fraction'] = 0,
|
42
|
+
end: Union[int, float, 'Fraction', None] = None
|
43
|
+
):
|
44
|
+
return _create_node(
|
45
|
+
ChorusNode(
|
46
|
+
sample_rate = sample_rate,
|
47
|
+
depth = depth,
|
48
|
+
frequency = frequency
|
49
|
+
),
|
50
|
+
start = start,
|
51
|
+
end = end
|
52
|
+
)
|
53
|
+
|
54
|
+
# TODO: Include definitive and tested audio
|
55
|
+
# effects here below
|
56
|
+
|
57
|
+
class _VideoEffects:
|
58
|
+
"""
|
59
|
+
*For internal use only*
|
60
|
+
|
61
|
+
The video effects that will be available
|
62
|
+
throught our internal _Effects class to
|
63
|
+
wrap and make available all the video
|
64
|
+
effects we want to be available.
|
65
|
+
"""
|
66
|
+
|
67
|
+
def __init__(
|
68
|
+
self,
|
69
|
+
effects: '_Effects'
|
70
|
+
):
|
71
|
+
self._effects: _Effects = effects
|
72
|
+
"""
|
73
|
+
The parent instance that includes this
|
74
|
+
class instance as a property.
|
75
|
+
"""
|
76
|
+
|
77
|
+
"""
|
78
|
+
Here below we expose all the effects
|
79
|
+
we want the users to have available to
|
80
|
+
be used.
|
81
|
+
"""
|
82
|
+
def waving_node(
|
83
|
+
self,
|
84
|
+
# TODO: Maybe 'frame_size' (?)
|
85
|
+
size: tuple[int, int],
|
86
|
+
amplitude: float = 0.05,
|
87
|
+
frequency: float = 10.0,
|
88
|
+
speed: float = 2.0,
|
89
|
+
start: Union[int, float, 'Fraction'] = 0.0,
|
90
|
+
end: Union[int, float, 'Fraction', None] = None
|
91
|
+
) -> 'TimedNode':
|
92
|
+
"""
|
93
|
+
TODO: Explain this better.
|
94
|
+
|
95
|
+
The 'start' and 'end' time moments are the
|
96
|
+
limits of the time range in which the effect
|
97
|
+
has to be applied to the frames inside that
|
98
|
+
time range. Providing start=0 and end=None
|
99
|
+
will make the effect to be applied to any
|
100
|
+
frame.
|
101
|
+
"""
|
102
|
+
return _create_node(
|
103
|
+
WavingNode(
|
104
|
+
context = self._effects._opengl_editor.context,
|
105
|
+
size = size,
|
106
|
+
amplitude = amplitude,
|
107
|
+
frequency = frequency,
|
108
|
+
speed = speed
|
109
|
+
),
|
110
|
+
start = start,
|
111
|
+
end = end
|
112
|
+
)
|
113
|
+
|
114
|
+
# TODO: Include definitive and tested video
|
115
|
+
# effects here below
|
116
|
+
|
117
|
+
class _Effects:
|
118
|
+
"""
|
119
|
+
*For internal use only*
|
120
|
+
|
121
|
+
Class to be used within the OpenglEditor
|
122
|
+
as a property to simplify the access to
|
123
|
+
the effect nodes and also to have the
|
124
|
+
single context always available through
|
125
|
+
the OpenglEditor instance that is a
|
126
|
+
singleton one.
|
127
|
+
|
128
|
+
Even though we can have more effects,
|
129
|
+
this class is also the way we expose only
|
130
|
+
the ones we actually want to expose to
|
131
|
+
the user.
|
132
|
+
"""
|
133
|
+
|
134
|
+
def __init__(
|
135
|
+
self,
|
136
|
+
opengl_editor: 'OpenglEditor'
|
137
|
+
):
|
138
|
+
self._opengl_editor: OpenglEditor = opengl_editor
|
139
|
+
"""
|
140
|
+
The parent instance that includes this
|
141
|
+
class instance as a property.
|
142
|
+
"""
|
143
|
+
self.audio: _AudioEffects = _AudioEffects(self)
|
144
|
+
"""
|
145
|
+
Shortcut to the audio effects that are
|
146
|
+
available.
|
147
|
+
"""
|
148
|
+
self.video: _VideoEffects = _VideoEffects(self)
|
149
|
+
"""
|
150
|
+
Shortcut to the video effects that are
|
151
|
+
available.
|
152
|
+
"""
|
153
|
+
|
154
|
+
@singleton_old
|
155
|
+
class OpenglEditor:
|
156
|
+
"""
|
157
|
+
Singleton instance.
|
158
|
+
|
159
|
+
It is a singleton instance to have a
|
160
|
+
unique context for all the instances
|
161
|
+
that need it and instantiate this
|
162
|
+
class to obtain it. Here we group all
|
163
|
+
the nodes we have available for the
|
164
|
+
user.
|
165
|
+
|
166
|
+
The GPU will make the calculations in
|
167
|
+
parallel by itself, so we can handle a
|
168
|
+
single context to make the nodes share
|
169
|
+
textures and buffers.
|
170
|
+
"""
|
171
|
+
|
172
|
+
def __init__(
|
173
|
+
self
|
174
|
+
):
|
175
|
+
self.context = moderngl.create_context(standalone = True)
|
176
|
+
"""
|
177
|
+
The context that will be shared by all
|
178
|
+
the nodes.
|
179
|
+
"""
|
180
|
+
self.effects: _Effects = _Effects(self)
|
181
|
+
"""
|
182
|
+
Shortcut to the effects.
|
183
|
+
"""
|
184
|
+
# TODO: I should do something like
|
185
|
+
# editor.effects.waving_node() to create
|
186
|
+
# an instance of that effect node
|
187
|
+
|
188
|
+
|
189
|
+
def _create_node(
|
190
|
+
node: Union['_AudioNode', '_VideoNode'],
|
191
|
+
start: Union[int, float, 'Fraction'],
|
192
|
+
end: Union[int, float, 'Fraction', None]
|
193
|
+
):
|
194
|
+
# The class we pass has to inherit from this
|
195
|
+
# 'Node' class, but could be other classes
|
196
|
+
# in the middle, because an OpenglNode
|
197
|
+
# inherits from other class
|
198
|
+
ParameterValidator.validate_mandatory_subclass_of('node', node, ['_AudioNode', '_VideoNode'])
|
199
|
+
|
200
|
+
# We have to create a Node wrapper with the
|
201
|
+
# time range in which it has to be applied
|
202
|
+
# to all the frames.
|
203
|
+
return TimedNode(
|
204
|
+
node = node,
|
205
|
+
start = start,
|
206
|
+
end = end
|
207
|
+
)
|
208
|
+
|
209
|
+
class _EffectStacked:
|
210
|
+
"""
|
211
|
+
Class to wrap an effect that will be
|
212
|
+
stacked with an specific priority.
|
213
|
+
|
214
|
+
Priority is higher when lower value,
|
215
|
+
and lower when higher value.
|
216
|
+
"""
|
217
|
+
|
218
|
+
def __init__(
|
219
|
+
self,
|
220
|
+
effect: TimedNode,
|
221
|
+
priority: int
|
222
|
+
):
|
223
|
+
self.effect: TimedNode = effect
|
224
|
+
"""
|
225
|
+
The effect to be applied.
|
226
|
+
"""
|
227
|
+
self.priority: int = priority
|
228
|
+
"""
|
229
|
+
The priority this stacked frame has versus
|
230
|
+
the other stacked effects.
|
231
|
+
"""
|
232
|
+
|
233
|
+
# TODO: Move to another py file (?)
|
234
|
+
class EffectsStack:
|
235
|
+
"""
|
236
|
+
Class to include a collection of effects
|
237
|
+
we want to apply in some entity, that
|
238
|
+
will make easier to apply them.
|
239
|
+
|
240
|
+
You can use this stack to keep the effects
|
241
|
+
you want to apply on a Media or on the
|
242
|
+
Timeline of your video editor.
|
243
|
+
"""
|
244
|
+
|
245
|
+
@property
|
246
|
+
def effects(
|
247
|
+
self
|
248
|
+
) -> list[_EffectStacked]:
|
249
|
+
"""
|
250
|
+
The effects but ordered from their 'start'
|
251
|
+
time moment.
|
252
|
+
"""
|
253
|
+
return sorted(self._effects, key = lambda effect: (effect.priority, effect.effect.start))
|
254
|
+
|
255
|
+
@property
|
256
|
+
def most_priority_effect(
|
257
|
+
self
|
258
|
+
) -> _EffectStacked:
|
259
|
+
"""
|
260
|
+
The effect with the highest priority,
|
261
|
+
that is the lower priority value.
|
262
|
+
"""
|
263
|
+
return min(self._effects, key = lambda effect: effect.priority)
|
264
|
+
|
265
|
+
@property
|
266
|
+
def less_priority_effect(
|
267
|
+
self
|
268
|
+
) -> _EffectStacked:
|
269
|
+
"""
|
270
|
+
The effect with the lowest priority,
|
271
|
+
that is the biggest priority value.
|
272
|
+
"""
|
273
|
+
return max(self._effects, key = lambda effect: effect.priority)
|
274
|
+
|
275
|
+
def __init__(
|
276
|
+
self
|
277
|
+
):
|
278
|
+
self._effects: list[_EffectStacked] = []
|
279
|
+
"""
|
280
|
+
A list containing all the effects that
|
281
|
+
have been added to this stack, unordered.
|
282
|
+
"""
|
283
|
+
|
284
|
+
def get_effects_for_t(
|
285
|
+
self,
|
286
|
+
t: Union[int, float, 'Fraction']
|
287
|
+
) -> list[TimedNode]:
|
288
|
+
"""
|
289
|
+
Get the effects, ordered by priority
|
290
|
+
and the 'start' field, that must be
|
291
|
+
applied within the 't' time moment
|
292
|
+
provided because it is within the
|
293
|
+
[start, end) time range.
|
294
|
+
"""
|
295
|
+
return [
|
296
|
+
effect.effect
|
297
|
+
for effect in self.effects
|
298
|
+
if effect.effect.is_within_time(t)
|
299
|
+
]
|
300
|
+
|
301
|
+
def add_effect(
|
302
|
+
self,
|
303
|
+
effect: TimedNode,
|
304
|
+
priority: Union[int, None] = None
|
305
|
+
) -> 'EffectsStack':
|
306
|
+
"""
|
307
|
+
Add the provided 'effect' to the stack.
|
308
|
+
"""
|
309
|
+
ParameterValidator.validate_mandatory_instance_of('effect', effect, TimedNode)
|
310
|
+
|
311
|
+
# TODO: What about the same effect added
|
312
|
+
# twice during the same time range? Can we
|
313
|
+
# allow it? It will be applied twice for
|
314
|
+
# specific 't' time moments but with
|
315
|
+
# different attributes. is it ok (?)
|
316
|
+
|
317
|
+
# TODO: What if priority is already taken?
|
318
|
+
# Should we let some effects have the same
|
319
|
+
# priority (?)
|
320
|
+
priority = (
|
321
|
+
self.less_priority_effect.priority + 1
|
322
|
+
if priority is None else
|
323
|
+
priority
|
324
|
+
)
|
325
|
+
|
326
|
+
self._effects.append(_EffectStacked(
|
327
|
+
effect = effect,
|
328
|
+
priority = priority
|
329
|
+
))
|
330
|
+
|
331
|
+
return self
|
332
|
+
|
333
|
+
# TODO: Create 'remove_effect'
|
@@ -1,31 +1,11 @@
|
|
1
|
+
from yta_video_opengl.nodes.video import _VideoNode
|
2
|
+
from yta_video_opengl.nodes.audio import _AudioNode
|
1
3
|
from yta_validation.parameter import ParameterValidator
|
2
4
|
from typing import Union
|
3
|
-
from abc import ABC, abstractmethod
|
4
5
|
|
5
|
-
import av
|
6
6
|
import moderngl
|
7
7
|
|
8
8
|
|
9
|
-
class Node(ABC):
|
10
|
-
"""
|
11
|
-
Base class to represent a node, which
|
12
|
-
is an entity that processes frames
|
13
|
-
individually.
|
14
|
-
|
15
|
-
This class must be inherited by any
|
16
|
-
video or audio node class.
|
17
|
-
"""
|
18
|
-
|
19
|
-
# TODO: What about the types?
|
20
|
-
@abstractmethod
|
21
|
-
def process(
|
22
|
-
frame: Union[av.VideoFrame, av.AudioFrame, moderngl.Texture],
|
23
|
-
t: float
|
24
|
-
# TODO: Maybe we need 'fps' and 'number_of_frames'
|
25
|
-
# to calculate progressions or similar...
|
26
|
-
) -> Union[av.VideoFrame, av.AudioFrame, moderngl.Texture]:
|
27
|
-
pass
|
28
|
-
|
29
9
|
class TimedNode:
|
30
10
|
"""
|
31
11
|
Class to represent a Node wrapper to
|
@@ -54,9 +34,9 @@ class TimedNode:
|
|
54
34
|
|
55
35
|
def __init__(
|
56
36
|
self,
|
57
|
-
node:
|
58
|
-
start: float = 0.0,
|
59
|
-
end: Union[float, None] = None
|
37
|
+
node: Union[_VideoNode, _AudioNode],
|
38
|
+
start: Union[int, float, 'Fraction'] = 0.0,
|
39
|
+
end: Union[int, float, 'Fraction', None] = None
|
60
40
|
):
|
61
41
|
ParameterValidator.validate_mandatory_positive_number('start', start, do_include_zero = True)
|
62
42
|
ParameterValidator.validate_positive_number('end', end, do_include_zero = False)
|
@@ -84,6 +64,28 @@ class TimedNode:
|
|
84
64
|
stop being applied (excluding it).
|
85
65
|
"""
|
86
66
|
|
67
|
+
def _get_t(
|
68
|
+
self,
|
69
|
+
t: Union[int, float, 'Fraction']
|
70
|
+
) -> float:
|
71
|
+
"""
|
72
|
+
Obtain the 't' time moment relative to the
|
73
|
+
effect duration.
|
74
|
+
|
75
|
+
Imagine `start=3` and `end=5`, and we receive
|
76
|
+
a `t=4`. It is inside the range, so we have
|
77
|
+
to apply the effect, but as the effect
|
78
|
+
lasts from second 3 to second 5 (`duration=2`),
|
79
|
+
the `t=4` is actually a `t=1` for the effect
|
80
|
+
because it is the time elapsed since the
|
81
|
+
effect started being applied, that was on the
|
82
|
+
second 3.
|
83
|
+
|
84
|
+
The formula:
|
85
|
+
- `t - self.start`
|
86
|
+
"""
|
87
|
+
return t - self.start
|
88
|
+
|
87
89
|
def is_within_time(
|
88
90
|
self,
|
89
91
|
t: float
|
@@ -91,7 +93,9 @@ class TimedNode:
|
|
91
93
|
"""
|
92
94
|
Flag to indicate if the 't' time moment provided
|
93
95
|
is in the range of this TimedNode instance,
|
94
|
-
which means
|
96
|
+
which means between the 'start' and the 'end'.
|
97
|
+
|
98
|
+
The formula:
|
95
99
|
- `start <= t < end`
|
96
100
|
"""
|
97
101
|
return (
|
@@ -102,7 +106,7 @@ class TimedNode:
|
|
102
106
|
|
103
107
|
def process(
|
104
108
|
self,
|
105
|
-
frame: Union[
|
109
|
+
frame: Union['VideoFrame', 'AudioFrame', moderngl.Texture],
|
106
110
|
t: float
|
107
111
|
# TODO: Maybe we need 'fps' and 'number_of_frames'
|
108
112
|
# to calculate progressions or similar...
|
@@ -113,7 +117,7 @@ class TimedNode:
|
|
113
117
|
instance.
|
114
118
|
"""
|
115
119
|
return (
|
116
|
-
self.node.process(frame, t)
|
120
|
+
self.node.process(frame, self._get_t(t))
|
117
121
|
if self.is_within_time(t) else
|
118
122
|
frame
|
119
123
|
)
|
@@ -0,0 +1,224 @@
|
|
1
|
+
"""
|
2
|
+
When working with audio frames, we don't need
|
3
|
+
to use the GPU because audios are 1D and the
|
4
|
+
information can be processed perfectly with
|
5
|
+
a library like numpy.
|
6
|
+
|
7
|
+
If we need a very intense calculation for an
|
8
|
+
audio frame (FFT, convolution, etc.) we can
|
9
|
+
use CuPy or some DPS specific libraries, but
|
10
|
+
90% is perfectly done with numpy.
|
11
|
+
|
12
|
+
If you want to modify huge amounts of audio
|
13
|
+
(some seconds at the same time), you can use
|
14
|
+
CuPy, that has the same API as numpy but
|
15
|
+
working in GPU. Doing this below most of the
|
16
|
+
changes would work:
|
17
|
+
- `import numpy as np` → `import cupy as np`
|
18
|
+
"""
|
19
|
+
from yta_video_opengl.nodes import TimedNode
|
20
|
+
from abc import abstractmethod
|
21
|
+
from typing import Union
|
22
|
+
|
23
|
+
import numpy as np
|
24
|
+
|
25
|
+
|
26
|
+
class _AudioNode:
|
27
|
+
"""
|
28
|
+
Base audio node class to implement a
|
29
|
+
change in an audio frame by using the
|
30
|
+
numpy library.
|
31
|
+
"""
|
32
|
+
|
33
|
+
@abstractmethod
|
34
|
+
def process(
|
35
|
+
self,
|
36
|
+
input: Union['AudioFrame', 'np.ndarray'],
|
37
|
+
t: float
|
38
|
+
) -> Union['AudioFrame', 'np.ndarray']:
|
39
|
+
"""
|
40
|
+
Process the provided audio 'input' that
|
41
|
+
is played on the given 't' time moment.
|
42
|
+
"""
|
43
|
+
pass
|
44
|
+
|
45
|
+
class VolumeNode(_AudioNode):
|
46
|
+
"""
|
47
|
+
Set the volume.
|
48
|
+
|
49
|
+
TODO: Explain properly.
|
50
|
+
"""
|
51
|
+
|
52
|
+
def __init__(
|
53
|
+
self,
|
54
|
+
factor_fn
|
55
|
+
):
|
56
|
+
"""
|
57
|
+
factor_fn: function (t, index) -> factor volumen
|
58
|
+
"""
|
59
|
+
self.factor_f: callable = factor_fn
|
60
|
+
|
61
|
+
def process(
|
62
|
+
self,
|
63
|
+
input: Union['AudioFrame', 'np.ndarray'],
|
64
|
+
t: float
|
65
|
+
) -> Union['AudioFrame', 'np.ndarray']:
|
66
|
+
"""
|
67
|
+
Process the provided audio 'input' that
|
68
|
+
is played on the given 't' time moment.
|
69
|
+
"""
|
70
|
+
# if PythonValidator.is_instance_of(input, 'AudioFrame'):
|
71
|
+
# input = input.to_ndarray().astype(np.float32)
|
72
|
+
|
73
|
+
# TODO: I think we should receive only
|
74
|
+
# numpy arrays and the AudioFrame should
|
75
|
+
# be handled outside, but I'm not sure
|
76
|
+
|
77
|
+
factor = self.factor_fn(t, 0)
|
78
|
+
|
79
|
+
samples = input
|
80
|
+
samples *= factor
|
81
|
+
|
82
|
+
# Determine dtype according to format
|
83
|
+
# samples = (
|
84
|
+
# samples.astype(np.int16)
|
85
|
+
# # 'fltp', 's16', 's16p'
|
86
|
+
# if 's16' in input.format.name else
|
87
|
+
# samples.astype(np.float32)
|
88
|
+
# )
|
89
|
+
|
90
|
+
# new_frame = AudioFrame.from_ndarray(
|
91
|
+
# samples,
|
92
|
+
# format = input.format.name,
|
93
|
+
# layout = input.layout.name
|
94
|
+
# )
|
95
|
+
# new_frame.sample_rate = input.sample_rate
|
96
|
+
# new_frame.pts = input.pts
|
97
|
+
# new_frame.time_base = input.time_base
|
98
|
+
|
99
|
+
return samples
|
100
|
+
|
101
|
+
class ChorusNode(_AudioNode):
|
102
|
+
"""
|
103
|
+
Apply a chorus effect, also called flanger
|
104
|
+
effect.
|
105
|
+
|
106
|
+
TODO: Explain properly
|
107
|
+
"""
|
108
|
+
|
109
|
+
def __init__(
|
110
|
+
self,
|
111
|
+
sample_rate: int,
|
112
|
+
depth: int = 0,
|
113
|
+
frequency: float = 0.25
|
114
|
+
):
|
115
|
+
"""
|
116
|
+
The 'sample_rate' must be the sample rate
|
117
|
+
of the audio frame.
|
118
|
+
"""
|
119
|
+
self.sample_rate: int = sample_rate
|
120
|
+
self.depth: int = depth
|
121
|
+
self.frequency: float = frequency
|
122
|
+
|
123
|
+
def process(
|
124
|
+
self,
|
125
|
+
input: Union['AudioFrame', 'np.ndarray'],
|
126
|
+
t: float
|
127
|
+
) -> Union['AudioFrame', 'np.ndarray']:
|
128
|
+
"""
|
129
|
+
Process the provided audio 'input' that
|
130
|
+
is played on the given 't' time moment.
|
131
|
+
"""
|
132
|
+
# TODO: I think we should receive only
|
133
|
+
# numpy arrays and the AudioFrame should
|
134
|
+
# be handled outside, but I'm not sure
|
135
|
+
|
136
|
+
n_samples = input.shape[0]
|
137
|
+
t = np.arange(n_samples) / self.rate
|
138
|
+
|
139
|
+
# LFO sinusoidal que controla el retardo
|
140
|
+
delay = (self.depth / 1000.0) * self.rate * (0.5 * (1 + np.sin(2 * np.pi * self.frequency * t)))
|
141
|
+
delay = delay.astype(np.int32)
|
142
|
+
|
143
|
+
output = np.zeros_like(input, dtype=np.float32)
|
144
|
+
|
145
|
+
for i in range(n_samples):
|
146
|
+
d = delay[i]
|
147
|
+
if i - d >= 0:
|
148
|
+
output[i] = 0.7 * input[i] + 0.7 * input[i - d]
|
149
|
+
else:
|
150
|
+
output[i] = input[i]
|
151
|
+
|
152
|
+
return output
|
153
|
+
|
154
|
+
# TODO: Remove this below
|
155
|
+
"""
|
156
|
+
Here you have an example. The 'private'
|
157
|
+
node class is the modifier, that we don't
|
158
|
+
want to expose, and the 'public' class is
|
159
|
+
the one that inherits from TimedNode and
|
160
|
+
wraps the 'private' class to build the
|
161
|
+
functionality.
|
162
|
+
"""
|
163
|
+
# class VolumeAudioNode(TimedNode):
|
164
|
+
# """
|
165
|
+
# TimedNode to set the audio volume of a video
|
166
|
+
# in a specific frame.
|
167
|
+
# """
|
168
|
+
|
169
|
+
# def __init__(
|
170
|
+
# self,
|
171
|
+
# factor_fn,
|
172
|
+
# start: float = 0.0,
|
173
|
+
# end: Union[float, None] = None
|
174
|
+
# ):
|
175
|
+
# super().__init__(
|
176
|
+
# node = _SetVolumeAudioNode(factor_fn),
|
177
|
+
# start = start,
|
178
|
+
# end = end
|
179
|
+
# )
|
180
|
+
|
181
|
+
# class _SetVolumeAudioNode(AudioNode):
|
182
|
+
# """
|
183
|
+
# Audio node to change the volume of an
|
184
|
+
# audio frame.
|
185
|
+
# """
|
186
|
+
|
187
|
+
# def __init__(
|
188
|
+
# self,
|
189
|
+
# factor_fn
|
190
|
+
# ):
|
191
|
+
# """
|
192
|
+
# factor_fn: function (t, index) -> factor volumen
|
193
|
+
# """
|
194
|
+
# self.factor_fn = factor_fn
|
195
|
+
|
196
|
+
# def process(
|
197
|
+
# self,
|
198
|
+
# frame: av.AudioFrame,
|
199
|
+
# t: float,
|
200
|
+
# ) -> av.AudioFrame:
|
201
|
+
# # TODO: Why index (?) Maybe 'total_frames'
|
202
|
+
# factor = self.factor_fn(t, 0)
|
203
|
+
|
204
|
+
# samples = frame.to_ndarray().astype(np.float32)
|
205
|
+
# samples *= factor
|
206
|
+
|
207
|
+
# # Determine dtype according to format
|
208
|
+
# samples = (
|
209
|
+
# samples.astype(np.int16)
|
210
|
+
# # 'fltp', 's16', 's16p'
|
211
|
+
# if 's16' in frame.format.name else
|
212
|
+
# samples.astype(np.float32)
|
213
|
+
# )
|
214
|
+
|
215
|
+
# new_frame = av.AudioFrame.from_ndarray(
|
216
|
+
# samples,
|
217
|
+
# format = frame.format.name,
|
218
|
+
# layout = frame.layout.name
|
219
|
+
# )
|
220
|
+
# new_frame.sample_rate = frame.sample_rate
|
221
|
+
# new_frame.pts = frame.pts
|
222
|
+
# new_frame.time_base = frame.time_base
|
223
|
+
|
224
|
+
# return new_frame
|