yta-video-opengl 0.0.23__py3-none-any.whl → 0.0.25__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -16,15 +16,13 @@ working in GPU. Doing this below most of the
16
16
  changes would work:
17
17
  - `import numpy as np` → `import cupy as np`
18
18
  """
19
- from yta_video_opengl.nodes import TimedNode
20
19
  from abc import abstractmethod
21
20
  from typing import Union
22
21
 
23
22
  import numpy as np
24
- import av
25
23
 
26
24
 
27
- class AudioNode:
25
+ class _AudioNode:
28
26
  """
29
27
  Base audio node class to implement a
30
28
  change in an audio frame by using the
@@ -34,82 +32,192 @@ class AudioNode:
34
32
  @abstractmethod
35
33
  def process(
36
34
  self,
37
- frame: av.AudioFrame,
35
+ input: Union['AudioFrame', 'np.ndarray'],
38
36
  t: float
39
- ):
37
+ ) -> Union['AudioFrame', 'np.ndarray']:
40
38
  """
41
- Process the provided audio 'frame' that
39
+ Process the provided audio 'input' that
42
40
  is played on the given 't' time moment.
43
41
  """
44
42
  pass
45
43
 
46
- """
47
- Here you have an example. The 'private'
48
- node class is the modifier, that we don't
49
- want to expose, and the 'public' class is
50
- the one that inherits from TimedNode and
51
- wraps the 'private' class to build the
52
- functionality.
53
- """
54
- class VolumeAudioNode(TimedNode):
44
+ class VolumeNode(_AudioNode):
55
45
  """
56
- TimedNode to set the audio volume of a video
57
- in a specific frame.
46
+ Set the volume.
47
+
48
+ TODO: Explain properly.
58
49
  """
59
50
 
60
51
  def __init__(
61
52
  self,
62
- factor_fn,
63
- start: float = 0.0,
64
- end: Union[float, None] = None
53
+ factor_fn
65
54
  ):
66
- super().__init__(
67
- node = _SetVolumeAudioNode(factor_fn),
68
- start = start,
69
- end = end
70
- )
55
+ """
56
+ factor_fn: function (t, index) -> factor volumen
57
+ """
58
+ self.factor_f: callable = factor_fn
59
+
60
+ def process(
61
+ self,
62
+ input: Union['AudioFrame', 'np.ndarray'],
63
+ t: float
64
+ ) -> Union['AudioFrame', 'np.ndarray']:
65
+ """
66
+ Process the provided audio 'input' that
67
+ is played on the given 't' time moment.
68
+ """
69
+ # if PythonValidator.is_instance_of(input, 'AudioFrame'):
70
+ # input = input.to_ndarray().astype(np.float32)
71
71
 
72
- class _SetVolumeAudioNode(AudioNode):
72
+ # TODO: I think we should receive only
73
+ # numpy arrays and the AudioFrame should
74
+ # be handled outside, but I'm not sure
75
+
76
+ factor = self.factor_fn(t, 0)
77
+
78
+ samples = input
79
+ samples *= factor
80
+
81
+ # Determine dtype according to format
82
+ # samples = (
83
+ # samples.astype(np.int16)
84
+ # # 'fltp', 's16', 's16p'
85
+ # if 's16' in input.format.name else
86
+ # samples.astype(np.float32)
87
+ # )
88
+
89
+ # new_frame = AudioFrame.from_ndarray(
90
+ # samples,
91
+ # format = input.format.name,
92
+ # layout = input.layout.name
93
+ # )
94
+ # new_frame.sample_rate = input.sample_rate
95
+ # new_frame.pts = input.pts
96
+ # new_frame.time_base = input.time_base
97
+
98
+ return samples
99
+
100
+ class ChorusNode(_AudioNode):
73
101
  """
74
- Audio node to change the volume of an
75
- audio frame.
102
+ Apply a chorus effect, also called flanger
103
+ effect.
104
+
105
+ TODO: Explain properly
76
106
  """
77
107
 
78
108
  def __init__(
79
109
  self,
80
- factor_fn
110
+ sample_rate: int,
111
+ depth: int = 0,
112
+ frequency: float = 0.25
81
113
  ):
82
114
  """
83
- factor_fn: function (t, index) -> factor volumen
115
+ The 'sample_rate' must be the sample rate
116
+ of the audio frame.
84
117
  """
85
- self.factor_fn = factor_fn
118
+ self.sample_rate: int = sample_rate
119
+ self.depth: int = depth
120
+ self.frequency: float = frequency
86
121
 
87
122
  def process(
88
123
  self,
89
- frame: av.AudioFrame,
90
- t: float,
91
- ) -> av.AudioFrame:
92
- # TODO: Why index (?) Maybe 'total_frames'
93
- factor = self.factor_fn(t, 0)
124
+ input: Union['AudioFrame', 'np.ndarray'],
125
+ t: float
126
+ ) -> Union['AudioFrame', 'np.ndarray']:
127
+ """
128
+ Process the provided audio 'input' that
129
+ is played on the given 't' time moment.
130
+ """
131
+ # TODO: I think we should receive only
132
+ # numpy arrays and the AudioFrame should
133
+ # be handled outside, but I'm not sure
94
134
 
95
- samples = frame.to_ndarray().astype(np.float32)
96
- samples *= factor
135
+ n_samples = input.shape[0]
136
+ t = np.arange(n_samples) / self.rate
97
137
 
98
- # Determine dtype according to format
99
- samples = (
100
- samples.astype(np.int16)
101
- # 'fltp', 's16', 's16p'
102
- if 's16' in frame.format.name else
103
- samples.astype(np.float32)
104
- )
105
-
106
- new_frame = av.AudioFrame.from_ndarray(
107
- samples,
108
- format = frame.format.name,
109
- layout = frame.layout.name
110
- )
111
- new_frame.sample_rate = frame.sample_rate
112
- new_frame.pts = frame.pts
113
- new_frame.time_base = frame.time_base
114
-
115
- return new_frame
138
+ # LFO sinusoidal que controla el retardo
139
+ delay = (self.depth / 1000.0) * self.rate * (0.5 * (1 + np.sin(2 * np.pi * self.frequency * t)))
140
+ delay = delay.astype(np.int32)
141
+
142
+ output = np.zeros_like(input, dtype=np.float32)
143
+
144
+ for i in range(n_samples):
145
+ d = delay[i]
146
+ if i - d >= 0:
147
+ output[i] = 0.7 * input[i] + 0.7 * input[i - d]
148
+ else:
149
+ output[i] = input[i]
150
+
151
+ return output
152
+
153
+ # TODO: Remove this below
154
+ """
155
+ Here you have an example. The 'private'
156
+ node class is the modifier, that we don't
157
+ want to expose, and the 'public' class is
158
+ the one that inherits from TimedNode and
159
+ wraps the 'private' class to build the
160
+ functionality.
161
+ """
162
+ # class VolumeAudioNode(TimedNode):
163
+ # """
164
+ # TimedNode to set the audio volume of a video
165
+ # in a specific frame.
166
+ # """
167
+
168
+ # def __init__(
169
+ # self,
170
+ # factor_fn,
171
+ # start: float = 0.0,
172
+ # end: Union[float, None] = None
173
+ # ):
174
+ # super().__init__(
175
+ # node = _SetVolumeAudioNode(factor_fn),
176
+ # start = start,
177
+ # end = end
178
+ # )
179
+
180
+ # class _SetVolumeAudioNode(AudioNode):
181
+ # """
182
+ # Audio node to change the volume of an
183
+ # audio frame.
184
+ # """
185
+
186
+ # def __init__(
187
+ # self,
188
+ # factor_fn
189
+ # ):
190
+ # """
191
+ # factor_fn: function (t, index) -> factor volumen
192
+ # """
193
+ # self.factor_fn = factor_fn
194
+
195
+ # def process(
196
+ # self,
197
+ # frame: av.AudioFrame,
198
+ # t: float,
199
+ # ) -> av.AudioFrame:
200
+ # # TODO: Why index (?) Maybe 'total_frames'
201
+ # factor = self.factor_fn(t, 0)
202
+
203
+ # samples = frame.to_ndarray().astype(np.float32)
204
+ # samples *= factor
205
+
206
+ # # Determine dtype according to format
207
+ # samples = (
208
+ # samples.astype(np.int16)
209
+ # # 'fltp', 's16', 's16p'
210
+ # if 's16' in frame.format.name else
211
+ # samples.astype(np.float32)
212
+ # )
213
+
214
+ # new_frame = av.AudioFrame.from_ndarray(
215
+ # samples,
216
+ # format = frame.format.name,
217
+ # layout = frame.layout.name
218
+ # )
219
+ # new_frame.sample_rate = frame.sample_rate
220
+ # new_frame.pts = frame.pts
221
+ # new_frame.time_base = frame.time_base
222
+
223
+ # return new_frame
@@ -2,4 +2,34 @@
2
2
  Working with video frames has to be done
3
3
  with nodes that use OpenGL because this
4
4
  way, by using the GPU, is the best way.
5
- """
5
+ """
6
+ from typing import Union
7
+ from abc import ABC, abstractmethod
8
+
9
+ import moderngl
10
+
11
+
12
+ # TODO: Expose all the nodes here and move
13
+ # this abstract to an abstract file (?)
14
+
15
+ class _VideoNode(ABC):
16
+ """
17
+ Base abstract class to represent a video
18
+ node, which is an entity that processes
19
+ video frames individually.
20
+
21
+ This class must be inherited by any video
22
+ node class.
23
+ """
24
+
25
+ # TODO: What about the types?
26
+ # TODO: Should we expect pyav frames (?)
27
+ @abstractmethod
28
+ def process(
29
+ self,
30
+ frame: Union['VideoFrame', moderngl.Texture, 'np.ndarray'],
31
+ t: float
32
+ # TODO: Maybe we need 'fps' and 'number_of_frames'
33
+ # to calculate progressions or similar...
34
+ ) -> Union['VideoFrame', moderngl.Texture, 'np.ndarray']:
35
+ pass
@@ -1,5 +1,5 @@
1
1
  from yta_video_opengl.utils import frame_to_texture, get_fullscreen_quad_vao
2
- from yta_video_opengl.nodes import Node
2
+ from yta_video_opengl.nodes.video import _VideoNode
3
3
  from yta_validation.parameter import ParameterValidator
4
4
  from yta_validation import PythonValidator
5
5
  from abc import abstractmethod
@@ -123,7 +123,7 @@ class _Uniforms:
123
123
  for key, value in self.uniforms.items():
124
124
  print(f'"{key}": {str(value)}')
125
125
 
126
- class OpenglNode(Node):
126
+ class OpenglNodeBase(_VideoNode):
127
127
  """
128
128
  The basic class of a node to manipulate frames
129
129
  as opengl textures. This node will process the
@@ -225,7 +225,7 @@ class OpenglNode(Node):
225
225
 
226
226
  return self.output_tex
227
227
 
228
- class WavingNode(OpenglNode):
228
+ class WavingNode(OpenglNodeBase):
229
229
  """
230
230
  Just an example, without the shaders code
231
231
  actually, to indicate that we can use
@@ -306,4 +306,8 @@ class WavingNode(OpenglNode):
306
306
  """
307
307
  self.uniforms.set('time', t)
308
308
 
309
- return super().process(input)
309
+ return super().process(input)
310
+
311
+ # TODO: Add more definitive effects
312
+
313
+