yta-video-opengl 0.0.28__tar.gz → 0.0.30__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (18) hide show
  1. {yta_video_opengl-0.0.28 → yta_video_opengl-0.0.30}/PKG-INFO +1 -1
  2. {yta_video_opengl-0.0.28 → yta_video_opengl-0.0.30}/pyproject.toml +1 -3
  3. yta_video_opengl-0.0.30/src/yta_video_opengl/__init__.py +3 -0
  4. {yta_video_opengl-0.0.28 → yta_video_opengl-0.0.30}/src/yta_video_opengl/effects.py +33 -2
  5. {yta_video_opengl-0.0.28 → yta_video_opengl-0.0.30}/src/yta_video_opengl/nodes/__init__.py +17 -2
  6. yta_video_opengl-0.0.30/src/yta_video_opengl/nodes/audio/__init__.py +68 -0
  7. yta_video_opengl-0.0.30/src/yta_video_opengl/nodes/audio/abstract.py +27 -0
  8. yta_video_opengl-0.0.30/src/yta_video_opengl/nodes/audio/experimental.py +60 -0
  9. yta_video_opengl-0.0.30/src/yta_video_opengl/nodes/video/__init__.py +14 -0
  10. yta_video_opengl-0.0.28/src/yta_video_opengl/nodes/video/__init__.py → yta_video_opengl-0.0.30/src/yta_video_opengl/nodes/video/abstract.py +2 -6
  11. {yta_video_opengl-0.0.28 → yta_video_opengl-0.0.30}/src/yta_video_opengl/nodes/video/opengl/__init__.py +13 -1
  12. yta_video_opengl-0.0.28/src/yta_video_opengl/__init__.py +0 -11
  13. yta_video_opengl-0.0.28/src/yta_video_opengl/nodes/audio/__init__.py +0 -133
  14. yta_video_opengl-0.0.28/src/yta_video_opengl/tests.py +0 -772
  15. {yta_video_opengl-0.0.28 → yta_video_opengl-0.0.30}/LICENSE +0 -0
  16. {yta_video_opengl-0.0.28 → yta_video_opengl-0.0.30}/README.md +0 -0
  17. {yta_video_opengl-0.0.28 → yta_video_opengl-0.0.30}/src/yta_video_opengl/nodes/video/opengl/experimental.py +0 -0
  18. {yta_video_opengl-0.0.28 → yta_video_opengl-0.0.30}/src/yta_video_opengl/utils.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: yta-video-opengl
3
- Version: 0.0.28
3
+ Version: 0.0.30
4
4
  Summary: Youtube Autonomous Video OpenGL Module
5
5
  Author: danialcala94
6
6
  Author-email: danielalcalavalera@gmail.com
@@ -1,6 +1,6 @@
1
1
  [project]
2
2
  name = "yta-video-opengl"
3
- version = "0.0.28"
3
+ version = "0.0.30"
4
4
  description = "Youtube Autonomous Video OpenGL Module"
5
5
  authors = [
6
6
  {name = "danialcala94",email = "danielalcalavalera@gmail.com"}
@@ -12,8 +12,6 @@ dependencies = [
12
12
  "yta_programming (>=0.0.1,<1.0.0)",
13
13
  "moderngl (>=0.0.1,<9.0.0)",
14
14
  "numpy (>=0.0.1,<9.0.0)",
15
- # TODO: This below is just for testing
16
- #"yta_video_pyav (>=0.0.1,<1.0.0)",
17
15
  ]
18
16
 
19
17
  [tool.poetry]
@@ -0,0 +1,3 @@
1
+ """
2
+ Module to include video handling with OpenGL.
3
+ """
@@ -1,5 +1,5 @@
1
- from yta_video_opengl.nodes.video.opengl import WavingNode
2
- from yta_video_opengl.nodes.audio import ChorusNode
1
+ from yta_video_opengl.nodes.video import WavingNode
2
+ from yta_video_opengl.nodes.audio import ChorusNode, VolumeNode
3
3
  from yta_video_opengl.nodes import TimedNode
4
4
  from yta_video_opengl.utils import texture_to_frame
5
5
  from yta_validation.parameter import ParameterValidator
@@ -52,6 +52,20 @@ class _AudioEffects:
52
52
  start = start,
53
53
  end = end
54
54
  )
55
+
56
+ def volume(
57
+ self,
58
+ factor: callable,
59
+ start: Union[int, float, 'Fraction'] = 0,
60
+ end: Union[int, float, 'Fraction', None] = None
61
+ ):
62
+ return _create_node(
63
+ VolumeNode(
64
+ factor_fn = factor
65
+ ),
66
+ start = start,
67
+ end = end
68
+ )
55
69
 
56
70
  # TODO: Include definitive and tested audio
57
71
  # effects here below
@@ -237,6 +251,23 @@ class EffectsStack:
237
251
  Timeline of your video editor.
238
252
  """
239
253
 
254
+ @property
255
+ def copy(
256
+ self
257
+ ) -> 'EffectsStack':
258
+ """
259
+ Get a copy of this instance.
260
+ """
261
+ effects_stack = EffectsStack()
262
+
263
+ for effect_sttacked in self._effects:
264
+ effects_stack.add_effect(
265
+ effect = effect_sttacked.effect.copy,
266
+ priority = effect_sttacked.priority
267
+ )
268
+
269
+ return effects_stack
270
+
240
271
  @property
241
272
  def video_effects(
242
273
  self
@@ -1,5 +1,5 @@
1
- from yta_video_opengl.nodes.video import _VideoNode
2
- from yta_video_opengl.nodes.audio import _AudioNode
1
+ from yta_video_opengl.nodes.video.abstract import _VideoNode
2
+ from yta_video_opengl.nodes.audio.abstract import _AudioNode
3
3
  from yta_validation.parameter import ParameterValidator
4
4
  from yta_validation import PythonValidator
5
5
  from typing import Union
@@ -33,6 +33,21 @@ class TimedNode:
33
33
  The 'start' and 'end' values by default
34
34
  """
35
35
 
36
+ @property
37
+ def copy(
38
+ self
39
+ ) -> 'TimedNode':
40
+ """
41
+ Get a copy of this instance.
42
+ """
43
+ return TimedNode(
44
+ # TODO: If we need a copy here, how do
45
+ # I do it as it is based on subclasses (?)
46
+ node = self.node,
47
+ start = self.start,
48
+ end = self.end
49
+ )
50
+
36
51
  @property
37
52
  def is_audio_node(
38
53
  self
@@ -0,0 +1,68 @@
1
+ """
2
+ When working with audio frames, we don't need
3
+ to use the GPU because audios are 1D and the
4
+ information can be processed perfectly with
5
+ a library like numpy.
6
+
7
+ If we need a very intense calculation for an
8
+ audio frame (FFT, convolution, etc.) we can
9
+ use CuPy or some DPS specific libraries, but
10
+ 90% is perfectly done with numpy.
11
+
12
+ If you want to modify huge amounts of audio
13
+ (some seconds at the same time), you can use
14
+ CuPy, that has the same API as numpy but
15
+ working in GPU. Doing this below most of the
16
+ changes would work:
17
+ - `import numpy as np` → `import cupy as np`
18
+ """
19
+ from yta_video_opengl.nodes.audio.abstract import _AudioNode
20
+ from yta_video_opengl.nodes.audio.experimental import ChorusNode
21
+
22
+ import numpy as np
23
+
24
+
25
+ class VolumeNode(_AudioNode):
26
+ """
27
+ Set the volume.
28
+
29
+ TODO: Explain properly.
30
+ """
31
+
32
+ def __init__(
33
+ self,
34
+ factor_fn
35
+ ):
36
+ """
37
+ factor_fn: function (t, index) -> factor volumen
38
+ """
39
+ self.factor_fn: callable = factor_fn
40
+
41
+ def process(
42
+ self,
43
+ input: np.ndarray,
44
+ t: float
45
+ ) -> np.ndarray:
46
+ """
47
+ Process the provided audio 'input' that
48
+ is played on the given 't' time moment.
49
+ """
50
+ factor = self.factor_fn(t, 0)
51
+
52
+ samples = input
53
+ samples *= factor
54
+
55
+ # Determine dtype according to format
56
+ # samples = (
57
+ # samples.astype(np.int16)
58
+ # # 'fltp', 's16', 's16p'
59
+ # if 's16' in input.format.name else
60
+ # samples.astype(np.float32)
61
+ # )
62
+
63
+ return samples
64
+
65
+ __all__ = [
66
+ 'VolumeNode',
67
+ 'ChorusNode'
68
+ ]
@@ -0,0 +1,27 @@
1
+ """
2
+ Module to include the abstract classes
3
+ and avoid import loops.
4
+ """
5
+ from abc import ABC, abstractmethod
6
+
7
+ import numpy as np
8
+
9
+
10
+ class _AudioNode(ABC):
11
+ """
12
+ Base audio node class to implement a
13
+ change in an audio frame by using the
14
+ numpy library.
15
+ """
16
+
17
+ @abstractmethod
18
+ def process(
19
+ self,
20
+ input: np.ndarray,
21
+ t: float
22
+ ) -> np.ndarray:
23
+ """
24
+ Process the provided audio 'input' that
25
+ is played on the given 't' time moment.
26
+ """
27
+ pass
@@ -0,0 +1,60 @@
1
+ from yta_video_opengl.nodes.audio.abstract import _AudioNode
2
+ from typing import Union
3
+
4
+ import numpy as np
5
+
6
+
7
+ class ChorusNode(_AudioNode):
8
+ """
9
+ Apply a chorus effect, also called flanger
10
+ effect.
11
+
12
+ TODO: This method is being applied but the
13
+ effect is not being notorious, so it is
14
+ experimental by now.
15
+
16
+ TODO: Explain properly
17
+ """
18
+
19
+ def __init__(
20
+ self,
21
+ sample_rate: int,
22
+ depth: int = 0,
23
+ frequency: float = 0.25
24
+ ):
25
+ """
26
+ The 'sample_rate' must be the sample rate
27
+ of the audio frame.
28
+ """
29
+ self.sample_rate: int = sample_rate
30
+ self.depth: int = depth
31
+ self.frequency: float = frequency
32
+
33
+ def process(
34
+ self,
35
+ input: Union[np.ndarray],
36
+ t: float
37
+ ) -> np.ndarray:
38
+ """
39
+ Process the provided audio 'input' that
40
+ is played on the given 't' time moment.
41
+ """
42
+ n_samples = input.shape[0]
43
+ t = np.arange(n_samples) / self.sample_rate
44
+
45
+ # Sinusoidal LFO that controls the delay
46
+ delay = (self.depth / 1000.0) * self.sample_rate * (0.5 * (1 + np.sin(2 * np.pi * self.frequency * t)))
47
+ delay = delay.astype(np.int32)
48
+
49
+ output = np.zeros_like(input, dtype=np.float32)
50
+
51
+ for i in range(n_samples):
52
+ d = delay[i]
53
+
54
+ output[i]= (
55
+ 0.7 * input[i] + 0.7 * input[i - d]
56
+ if (i - d) >= 0 else
57
+ input[i]
58
+ )
59
+
60
+ return output
@@ -0,0 +1,14 @@
1
+ """
2
+ Working with video frames has to be done
3
+ with nodes that use OpenGL because this
4
+ way, by using the GPU, is the best way.
5
+ """
6
+ from yta_video_opengl.nodes.video.opengl import WavingNode
7
+ from yta_video_opengl.nodes.video.opengl.experimental import RotatingInCenterFrame
8
+
9
+ # Expose all the nodes we have
10
+
11
+ __all__ = [
12
+ 'WavingNode',
13
+ 'RotatingInCenterFrame'
14
+ ]
@@ -1,7 +1,6 @@
1
1
  """
2
- Working with video frames has to be done
3
- with nodes that use OpenGL because this
4
- way, by using the GPU, is the best way.
2
+ Module to include the abstract classes
3
+ and avoid import loops.
5
4
  """
6
5
  from typing import Union
7
6
  from abc import ABC, abstractmethod
@@ -9,9 +8,6 @@ from abc import ABC, abstractmethod
9
8
  import moderngl
10
9
 
11
10
 
12
- # TODO: Expose all the nodes here and move
13
- # this abstract to an abstract file (?)
14
-
15
11
  class _VideoNode(ABC):
16
12
  """
17
13
  Base abstract class to represent a video
@@ -1,5 +1,17 @@
1
+ """
2
+ Interesting information:
3
+ | Abrev. | Nombre completo | Uso principal |
4
+ | ------- | -------------------------- | -------------------------------------- |
5
+ | VAO | Vertex Array Object | Esquema de datos de vértices |
6
+ | VBO | Vertex Buffer Object | Datos crudos de vértices en GPU |
7
+ | FBO | Frame Buffer Object | Renderizar fuera de pantalla |
8
+ | UBO | Uniform Buffer Object | Variables `uniform` compartidas |
9
+ | EBO/IBO | Element / Index Buffer Obj | Índices para reutilizar vértices |
10
+ | PBO | Pixel Buffer Object | Transferencia rápida de imágenes |
11
+ | RBO | Render Buffer Object | Almacén intermedio (profundidad, etc.) |
12
+ """
1
13
  from yta_video_opengl.utils import frame_to_texture, get_fullscreen_quad_vao
2
- from yta_video_opengl.nodes.video import _VideoNode
14
+ from yta_video_opengl.nodes.video.abstract import _VideoNode
3
15
  from yta_validation.parameter import ParameterValidator
4
16
  from yta_validation import PythonValidator
5
17
  from abc import abstractmethod
@@ -1,11 +0,0 @@
1
- """
2
- Module to include video handling with OpenGL.
3
- """
4
- def main():
5
- from yta_video_opengl.tests import video_modified_stored
6
-
7
- video_modified_stored()
8
-
9
-
10
- if __name__ == '__main__':
11
- main()
@@ -1,133 +0,0 @@
1
- """
2
- When working with audio frames, we don't need
3
- to use the GPU because audios are 1D and the
4
- information can be processed perfectly with
5
- a library like numpy.
6
-
7
- If we need a very intense calculation for an
8
- audio frame (FFT, convolution, etc.) we can
9
- use CuPy or some DPS specific libraries, but
10
- 90% is perfectly done with numpy.
11
-
12
- If you want to modify huge amounts of audio
13
- (some seconds at the same time), you can use
14
- CuPy, that has the same API as numpy but
15
- working in GPU. Doing this below most of the
16
- changes would work:
17
- - `import numpy as np` → `import cupy as np`
18
- """
19
- from abc import abstractmethod
20
- from typing import Union
21
-
22
- import numpy as np
23
-
24
-
25
- class _AudioNode:
26
- """
27
- Base audio node class to implement a
28
- change in an audio frame by using the
29
- numpy library.
30
- """
31
-
32
- @abstractmethod
33
- def process(
34
- self,
35
- input: np.ndarray,
36
- t: float
37
- ) -> np.ndarray:
38
- """
39
- Process the provided audio 'input' that
40
- is played on the given 't' time moment.
41
- """
42
- pass
43
-
44
- class VolumeNode(_AudioNode):
45
- """
46
- Set the volume.
47
-
48
- TODO: Explain properly.
49
- """
50
-
51
- def __init__(
52
- self,
53
- factor_fn
54
- ):
55
- """
56
- factor_fn: function (t, index) -> factor volumen
57
- """
58
- self.factor_f: callable = factor_fn
59
-
60
- def process(
61
- self,
62
- input: np.ndarray,
63
- t: float
64
- ) -> np.ndarray:
65
- """
66
- Process the provided audio 'input' that
67
- is played on the given 't' time moment.
68
- """
69
- factor = self.factor_fn(t, 0)
70
-
71
- samples = input
72
- samples *= factor
73
-
74
- # Determine dtype according to format
75
- # samples = (
76
- # samples.astype(np.int16)
77
- # # 'fltp', 's16', 's16p'
78
- # if 's16' in input.format.name else
79
- # samples.astype(np.float32)
80
- # )
81
-
82
- return samples
83
-
84
- class ChorusNode(_AudioNode):
85
- """
86
- Apply a chorus effect, also called flanger
87
- effect.
88
-
89
- TODO: Explain properly
90
- """
91
-
92
- def __init__(
93
- self,
94
- sample_rate: int,
95
- depth: int = 0,
96
- frequency: float = 0.25
97
- ):
98
- """
99
- The 'sample_rate' must be the sample rate
100
- of the audio frame.
101
- """
102
- self.sample_rate: int = sample_rate
103
- self.depth: int = depth
104
- self.frequency: float = frequency
105
-
106
- def process(
107
- self,
108
- input: Union[np.ndarray],
109
- t: float
110
- ) -> np.ndarray:
111
- """
112
- Process the provided audio 'input' that
113
- is played on the given 't' time moment.
114
- """
115
- n_samples = input.shape[0]
116
- t = np.arange(n_samples) / self.rate
117
-
118
- # LFO sinusoidal que controla el retardo
119
- delay = (self.depth / 1000.0) * self.rate * (0.5 * (1 + np.sin(2 * np.pi * self.frequency * t)))
120
- delay = delay.astype(np.int32)
121
-
122
- output = np.zeros_like(input, dtype=np.float32)
123
-
124
- for i in range(n_samples):
125
- d = delay[i]
126
-
127
- output[i]= (
128
- 0.7 * input[i] + 0.7 * input[i - d]
129
- if (i - d) >= 0 else
130
- input[i]
131
- )
132
-
133
- return output
@@ -1,772 +0,0 @@
1
- """
2
- Manual tests that are working and are interesting
3
- to learn about the code, refactor and build
4
- classes.
5
- """
6
- """
7
- Interesting information:
8
- | Abrev. | Nombre completo | Uso principal |
9
- | ------- | -------------------------- | -------------------------------------- |
10
- | VAO | Vertex Array Object | Esquema de datos de vértices |
11
- | VBO | Vertex Buffer Object | Datos crudos de vértices en GPU |
12
- | FBO | Frame Buffer Object | Renderizar fuera de pantalla |
13
- | UBO | Uniform Buffer Object | Variables `uniform` compartidas |
14
- | EBO/IBO | Element / Index Buffer Obj | Índices para reutilizar vértices |
15
- | PBO | Pixel Buffer Object | Transferencia rápida de imágenes |
16
- | RBO | Render Buffer Object | Almacén intermedio (profundidad, etc.) |
17
-
18
- """
19
- from yta_validation.parameter import ParameterValidator
20
- # from yta_video_opengl.reader import VideoReader
21
- # from yta_video_opengl.writer import VideoWriter
22
- from abc import abstractmethod
23
- from typing import Union
24
-
25
- import moderngl
26
- import numpy as np
27
-
28
-
29
- class OpenglVertexShader:
30
- """
31
- The vertex shader, for opengl renders.
32
- """
33
-
34
- def __init__(
35
- self,
36
- context: moderngl.Context,
37
- code: str
38
- ):
39
- self.context: moderngl.Context = context
40
- """
41
- The context of the vertex.
42
- """
43
- self.code: str = code
44
- """
45
- The code that builts the vertex shader.
46
- """
47
-
48
- class OpenglFragmentShader:
49
- """
50
- The fragment shader, for opengl renders.
51
- """
52
-
53
- def __init__(
54
- self,
55
- context: moderngl.Context,
56
- code: str
57
- ):
58
- self.context: moderngl.Context = context
59
- """
60
- The context of the vertex.
61
- """
62
- self.code: str = code
63
- """
64
- The code that builts the vertex shader.
65
- """
66
-
67
- class OpenglProgram:
68
- """
69
- The program, for opengl renders.
70
- """
71
-
72
- @property
73
- def program(
74
- self
75
- ) -> moderngl.Program:
76
- """
77
- The opengl program to load variables and use to
78
- modify the frams.
79
- """
80
- if not hasattr(self, '_program'):
81
- self._program = self.context.program(
82
- vertex_shader = self.vertex_shader,
83
- fragment_shader = self.fragment_shader
84
- )
85
-
86
- return self._program
87
-
88
- @property
89
- def vbo(
90
- self
91
- ) -> moderngl.Buffer:
92
- """
93
- The vertex buffer object.
94
-
95
- Block of memory in the GPU in which we
96
- store the information about the vertices
97
- (positions, colors, texture coordinates,
98
- etc.). The VAO points to one or more VBOs
99
- to obtain the information.
100
- """
101
- if not hasattr(self, '_vbo'):
102
- self._vbo = self.create_vbo(self.vertices)
103
-
104
- return self._vbo
105
-
106
- @property
107
- def vao(
108
- self
109
- ):
110
- """
111
- The vertex array object.
112
-
113
- Store the state of how the vertices
114
- information is organized.
115
- """
116
- if not hasattr(self, '_vao'):
117
- self._vao = self.create_vao(self.vbo)
118
-
119
- return self._vao
120
-
121
- def __init__(
122
- self,
123
- context: moderngl.Context,
124
- vertex_shader: OpenglVertexShader,
125
- fragment_shader: OpenglFragmentShader,
126
- vertices: 'np.ndarray'
127
- ):
128
- self.context: moderngl.Context = context
129
- """
130
- The opengl context.
131
- """
132
- self.vertex_shader: OpenglVertexShader = vertex_shader
133
- """
134
- The vertex shader.
135
- """
136
- self.fragment_shader: OpenglFragmentShader = fragment_shader
137
- """
138
- The fragment shader.
139
- """
140
- """
141
- TODO: A program will include the context, one
142
- vertex shader, one fragment shader and one or
143
- more vertices. So, this need to be refactor to
144
- accept more than one vertices, and also to
145
- provide one vbo for each vertices item.
146
- """
147
- self.vertices: Union[list['np.ndarray'], 'np.ndarray'] = vertices
148
- """
149
- The vertices we need to use.
150
- """
151
-
152
- def set_value(
153
- self,
154
- name: str,
155
- value: any
156
- ) -> 'OpenglProgram':
157
- """
158
- Set the provided 'value' as the value of the
159
- program property (uniform) with the name given
160
- as 'name' parameter.
161
- """
162
- self.program[name].value = value
163
-
164
- return self
165
-
166
- def create_vao(
167
- self,
168
- vbo: moderngl.Buffer
169
- ) -> moderngl.VertexArray:
170
- """
171
- Create a vertex array with the given 'vbo'
172
- (vertex array object) parameter.
173
- """
174
- ParameterValidator.validate_mandatory_instance_of('vbo', vbo, moderngl.Buffer)
175
-
176
- return self.context.vertex_array(self.program, vbo, 'in_pos', 'in_uv')
177
-
178
- def create_vbo(
179
- self,
180
- vertices: np.ndarray
181
- ) -> moderngl.Buffer:
182
- """
183
- Create a buffer with the given 'vertices'
184
- parameter.
185
- """
186
- ParameterValidator.validate_mandatory_numpy_array('vertices', vertices)
187
-
188
- return self.context.buffer(vertices.tobytes())
189
-
190
- class OpenglContext:
191
- """
192
- Class to wrap an opengl context to handle
193
- it properly and modify videos.
194
-
195
- TODO: This is ready to apply only one
196
- change per context because the shaders are
197
- limited to one.
198
- """
199
-
200
- def __init__(
201
- self,
202
- vertex_shader: OpenglVertexShader,
203
- fragment_shader: OpenglFragmentShader,
204
- vertices: 'np.ndarray'
205
- ):
206
- self.context: moderngl.Context = moderngl.create_standalone_context()
207
- """
208
- The headless context.
209
- """
210
- self.program: OpenglProgram = OpenglProgram(
211
- context = self.context,
212
- vertex_shader = vertex_shader,
213
- fragment_shader = fragment_shader,
214
- vertices = vertices
215
- )
216
- """
217
- The program custom class instance that is
218
- able to use the vao, vbo, etc.
219
- """
220
-
221
- def fbo(
222
- self,
223
- frame_size: tuple[int, int]
224
- ) -> moderngl.Framebuffer:
225
- """
226
- Get a frame buffer object (fbo) for the
227
- given 'frame_size'.
228
-
229
- A frame buffero bject is a virtual screen
230
- in which you can render out of the screen
231
- to lately do this:
232
- - Save as a texture
233
- - Apply post-processing methods
234
- - Store to a file
235
- """
236
- return self.context.simple_framebuffer(frame_size)
237
-
238
- from dataclasses import dataclass
239
- from abc import ABC
240
- @dataclass
241
- class OpenglEffectProgram(ABC):
242
- """
243
- The abstract class to be inherited by any
244
- of our opengl frame effect methods, that
245
- are actually programs.
246
- """
247
-
248
- @property
249
- @abstractmethod
250
- def vertex_shader(
251
- self
252
- ) -> str:
253
- pass
254
-
255
- @property
256
- @abstractmethod
257
- def fragment_shader(
258
- self
259
- ) -> str:
260
- pass
261
-
262
- @property
263
- @abstractmethod
264
- def vertices(
265
- self
266
- ) -> 'np.ndarray':
267
- pass
268
-
269
- def __init__(
270
- self
271
- ):
272
- pass
273
-
274
- @dataclass
275
- class WavingEffectProgram(OpenglEffectProgram):
276
-
277
- # TODO: I think this has to be a different
278
- # thing...
279
- @property
280
- def vertex_shader(
281
- self
282
- ) -> str:
283
- return (
284
- '''
285
- #version 330
286
- in vec2 in_pos;
287
- in vec2 in_uv;
288
- out vec2 v_uv;
289
- void main() {
290
- v_uv = in_uv;
291
- gl_Position = vec4(in_pos, 0.0, 1.0);
292
- }
293
- '''
294
- )
295
-
296
- @property
297
- def fragment_shader(
298
- self
299
- ) -> str:
300
- return (
301
- '''
302
- #version 330
303
- uniform sampler2D tex;
304
- uniform float time;
305
- uniform float amp;
306
- uniform float freq;
307
- uniform float speed;
308
- in vec2 v_uv;
309
- out vec4 f_color;
310
- void main() {
311
- float wave = sin(v_uv.x * freq + time * speed) * amp;
312
- vec2 uv = vec2(v_uv.x, v_uv.y + wave);
313
- f_color = texture(tex, uv);
314
- }
315
- '''
316
- )
317
-
318
- @property
319
- def vertices(
320
- self
321
- ) -> 'np.ndarray':
322
- # TODO: This should be an array of them because
323
- # we can have more than one, but by now I leave
324
- # it being only one
325
- return np.array([
326
- -1, -1, 0.0, 0.0,
327
- 1, -1, 1.0, 0.0,
328
- -1, 1, 0.0, 1.0,
329
- 1, 1, 1.0, 1.0,
330
- ], dtype = 'f4')
331
-
332
-
333
-
334
-
335
- # TODO: This code below was using the pyav
336
- # reading library that has been moved, that
337
- # is why it is commented by now
338
-
339
- NUMPY_FORMAT = 'rgb24'
340
-
341
- # # TODO: Maybe rename as ContextHandler (?)
342
- # class VideoProcessor:
343
- # """
344
- # Class to read a video, process it (maybe
345
- # applying some effects) and writing the
346
- # results in a new video.
347
- # """
348
-
349
- # @property
350
- # def fbo(
351
- # self
352
- # ) -> moderngl.Framebuffer:
353
- # """
354
- # The frame buffer object for the video frame
355
- # size.
356
- # """
357
- # if not hasattr(self, '_fbo'):
358
- # self._fbo = self.context.fbo(self.reader.size)
359
-
360
- # return self._fbo
361
-
362
- # @property
363
- # def vao(
364
- # self
365
- # ) -> moderngl.VertexArray:
366
- # """
367
- # Shortcut to the program vao.
368
- # """
369
- # return self.program.vao
370
-
371
- # @property
372
- # def first_frame(
373
- # self
374
- # ) -> Union['VideoFrame', None]:
375
- # """
376
- # The first frame of the video as a VideoFrame.
377
- # """
378
- # if not hasattr(self, '_first_frame'):
379
- # # Framebuffer to render
380
- # self.fbo.use()
381
- # self._first_frame = self.reader.next_frame
382
- # # Reset the reader
383
- # self.reader.reset()
384
-
385
- # return self._first_frame
386
-
387
- # @property
388
- # def first_frame_as_texture(
389
- # self
390
- # ) -> moderngl.Texture:
391
- # """
392
- # The first frame of the video as a texture.
393
- # This is needed to start the process.
394
- # """
395
- # if not hasattr(self, '_first_frame_as_texture'):
396
- # self._first_frame_as_texture = self.frame_to_texture(self.first_frame, NUMPY_FORMAT)
397
- # self._first_frame_as_texture.build_mipmaps()
398
-
399
- # return self._first_frame_as_texture
400
-
401
- # @property
402
- # def program(
403
- # self
404
- # ) -> OpenglProgram:
405
- # """
406
- # Shortcut to the context program custom class
407
- # instance.
408
- # """
409
- # return self.context.program
410
-
411
- # def __init__(
412
- # self,
413
- # filename: str,
414
- # output_filename: str
415
- # ):
416
- # self.filename: str = filename
417
- # """
418
- # The filename of the video we want to read and
419
- # process.
420
- # """
421
- # self.output_filename: str = output_filename
422
- # """
423
- # The filename of the video we want to generate
424
- # and store once the original one has been
425
- # processed.
426
- # """
427
- # # TODO: Hardcoded by now
428
- # effect = WavingEffectProgram()
429
- # self.context: OpenglContext = OpenglContext(
430
- # vertex_shader = effect.vertex_shader,
431
- # fragment_shader = effect.fragment_shader,
432
- # vertices = effect.vertices
433
- # )
434
- # """
435
- # The headless context as a custom class instance.
436
- # """
437
- # self.reader: VideoReader = VideoReader(self.filename)
438
- # """
439
- # The video reader instance.
440
- # """
441
- # # TODO: This has to be dynamic, but
442
- # # according to what (?)
443
-
444
- # # TODO: Where do we obtain this from (?)
445
- # VIDEO_CODEC_NAME = 'libx264'
446
- # # TODO: Where do we obtain this from (?)
447
- # PIXEL_FORMAT = 'yuv420p'
448
- # self.writer: VideoWriter = (
449
- # VideoWriter(output_filename)
450
- # .set_video_stream(VIDEO_CODEC_NAME, self.reader.fps, self.reader.size, PIXEL_FORMAT)
451
- # .set_audio_stream_from_template(self.reader.audio_stream)
452
- # )
453
- # """
454
- # The video writer instance.
455
- # """
456
-
457
- # # TODO: This should be a utils
458
- # def frame_to_texture(
459
- # self,
460
- # frame: 'VideoFrame',
461
- # numpy_format: str = 'rgb24'
462
- # ):
463
- # """
464
- # Transform the given 'frame' to an opengl
465
- # texture.
466
- # """
467
- # # To numpy RGB inverted for OpenGL
468
- # # TODO: Maybe we can receive normal frames
469
- # # here, as np.ndarray, from other libraries
470
- # frame: np.ndarray = np.flipud(frame.to_ndarray(format = numpy_format))
471
-
472
- # return self.context.context.texture((frame.shape[1], frame.shape[0]), 3, frame.tobytes())
473
-
474
- # def process(
475
- # self
476
- # ):
477
- # """
478
- # Process the video and generate the new one.
479
-
480
- # TODO: Should I pass some effects to apply (?)
481
- # """
482
- # # [ 1 ] Initialize fbo and texture mipmaps
483
- # self.first_frame_as_texture # This forces it in the code
484
-
485
- # # [ 2 ] Set general program uniforms
486
- # AMP = 0.05
487
- # FREQ = 10.0
488
- # SPEED = 2.0
489
- # (
490
- # self.context.program
491
- # .set_value('amp', AMP)
492
- # .set_value('freq', FREQ)
493
- # .set_value('speed', SPEED)
494
- # )
495
-
496
- # # [ 3 ] Process the frames
497
- # frame_index = 0
498
- # for frame_or_packet in self.reader.iterate_with_audio(
499
- # do_decode_video = True,
500
- # do_decode_audio = False
501
- # ):
502
- # # This below is because of the parameters we
503
- # # passed to the method
504
- # is_video_frame = PythonValidator.is_instance_of(frame_or_packet, 'VideoReaderFrame')
505
- # is_audio_packet = PythonValidator.is_instance_of(frame_or_packet, 'VideoReaderPacket')
506
-
507
- # # To simplify the process
508
- # if frame_or_packet is not None:
509
- # frame_or_packet = frame_or_packet.data
510
- # if is_audio_packet:
511
- # self.writer.mux(frame_or_packet)
512
- # elif is_video_frame:
513
- # with Timer(is_silent_as_context = True) as timer:
514
- # # Check this link:
515
- # # https://stackoverflow.com/a/63153755
516
-
517
- # def process_frame(
518
- # frame: 'VideoFrame'
519
- # ):
520
- # # [ 4 ] Add specific program uniforms
521
- # # TODO: T moved to 'yta_video_pyav'
522
- # self.program.set_value('time', T.video_frame_index_to_video_frame_time(frame_index, float(self.reader.fps)))
523
-
524
- # # Create texture
525
- # texture = self.frame_to_texture(frame)
526
- # texture.use()
527
-
528
- # # Activate frame buffer
529
- # self.fbo.use()
530
-
531
- # # Render, captured by the fbo
532
- # self.vao.render(moderngl.TRIANGLE_STRIP)
533
-
534
- # # Processed GPU result (from fbo) to numpy
535
- # processed_data = np.frombuffer(
536
- # self.fbo.read(components = 3, alignment = 1), dtype = np.uint8
537
- # )
538
-
539
- # # Invert numpy to normal frame
540
- # processed_data = np.flipud(
541
- # processed_data.reshape((texture.size[1], texture.size[0], 3))
542
- # )
543
-
544
- # # To VideoFrame and to buffer
545
- # frame = av.VideoFrame.from_ndarray(processed_data, format = NUMPY_FORMAT)
546
- # # TODO: What is this for (?)
547
- # #out_frame.pict_type = 'NONE'
548
-
549
- # return frame
550
-
551
- # self.writer.mux_video_frame(process_frame(frame_or_packet))
552
-
553
- # print(f'Frame {str(frame_index)}: {timer.time_elapsed_str}s')
554
- # frame_index += 1
555
-
556
- # # While this code can be finished, the work in
557
- # # the muxer could be not finished and have some
558
- # # packets waiting to be written. Here we tell
559
- # # the muxer to process all those packets.
560
- # self.writer.mux_video_frame(None)
561
-
562
- # # TODO: Maybe move this to the '__del__' (?)
563
- # self.writer.output.close()
564
- # self.reader.container.close()
565
- # print(f'Saved as "{self.output_filename}".')
566
-
567
-
568
- def video_modified_stored():
569
- # This path below was trimmed in an online platform
570
- # and seems to be bad codified and generates error
571
- # when processing it, but it is readable in the
572
- # file explorer...
573
- #VIDEO_PATH = 'test_files/test_1_short_broken.mp4'
574
- # This is short but is working well
575
- VIDEO_PATH = "test_files/test_1_short_2.mp4"
576
- # Long version below, comment to test faster
577
- #VIDEO_PATH = "test_files/test_1.mp4"
578
- OUTPUT_PATH = "test_files/output.mp4"
579
- # TODO: This has to be dynamic, but
580
- # according to what (?)
581
- NUMPY_FORMAT = 'rgb24'
582
- # TODO: Where do we obtain this from (?)
583
- VIDEO_CODEC_NAME = 'libx264'
584
- # TODO: Where do we obtain this from (?)
585
- PIXEL_FORMAT = 'yuv420p'
586
-
587
- from yta_video_opengl.utils import texture_to_frame, frame_to_texture
588
-
589
- #from yta_video_pyav.video import Video
590
-
591
- #video = Video(VIDEO_PATH)
592
-
593
- #effect = WavingFrame(size = video.size)
594
- #effect = BreathingFrame(size = video.size)
595
- #effect = HandheldFrame(size = video.size)
596
- # effect = OrbitingFrame(
597
- # size = video.size,
598
- # first_frame = video.next_frame
599
- # )
600
- # effect = RotatingInCenterFrame(
601
- # size = video.size,
602
- # first_frame = video.next_frame
603
- # )
604
-
605
- # effect = GlitchRgbFrame(
606
- # size = video.size,
607
- # first_frame = video.next_frame
608
- # )
609
- from yta_video_opengl.effects import Effects
610
-
611
- editor = Effects()
612
- # waving_node_effect = editor.effects.video.waving_node(video.size, amplitude = 0.2, frequency = 9, speed = 3)
613
- # chorus_effect = editor.effects.audio.chorus(audio.sample_rate)
614
- # print(waving_node_effect)
615
-
616
- # New way, with nodes
617
- # context = moderngl.create_context(standalone = True)
618
- # node = WavingNode(context, video.size, amplitude = 0.2, frequency = 9, speed = 3)
619
- # print(node.process(video.get_frame_from_t(0)))
620
- # We need to reset it to being again pointing
621
- # to the first frame...
622
- # TODO: Improve this by, maybe, storing the first
623
- # frame in memory so we can append it later, or
624
- # using the '.seek(0)' even when it could be not
625
- # accurate
626
- #video.reset()
627
-
628
- # # TODO: By now this is applying an effect
629
- # # by default
630
- # VideoProcessor(
631
- # filename = VIDEO_PATH,
632
- # output_filename = OUTPUT_PATH
633
- # ).process()
634
-
635
- # return
636
-
637
- return
638
-
639
- AMP = 0.05
640
- FREQ = 10.0
641
- SPEED = 2.0
642
-
643
- # Get the information about the video
644
- video = VideoReader(VIDEO_PATH)
645
-
646
- # ModernGL context without window
647
- context = moderngl.create_standalone_context()
648
-
649
- waving_frame_effect = WavingFrame(
650
- context = context,
651
- frame_size = video.size
652
- )
653
-
654
- vao = waving_frame_effect.vao
655
-
656
- # TODO: This has to be dynamic, but
657
- # according to what (?)
658
- NUMPY_FORMAT = 'rgb24'
659
- # TODO: Where do we obtain this from (?)
660
- VIDEO_CODEC_NAME = 'libx264'
661
- # TODO: Where do we obtain this from (?)
662
- PIXEL_FORMAT = 'yuv420p'
663
-
664
- # Framebuffer to render
665
- fbo = waving_frame_effect.fbo
666
- fbo.use()
667
-
668
- # Decode first frame and use as texture
669
- first_frame = video.next_frame
670
- # We need to reset it to being again pointing
671
- # to the first frame...
672
- # TODO: Improve this by, maybe, storing the first
673
- # frame in memory so we can append it later, or
674
- # using the '.seek(0)' even when it could be not
675
- # accurate
676
- video = VideoReader(VIDEO_PATH)
677
-
678
- # Most of OpenGL textures expect origin in lower
679
- # left corner
680
- # TODO: What if alpha (?)
681
- # TODO: Move this to the OpenglFrameEffect maybe (?)
682
-
683
- texture: moderngl.Texture = frame_to_texture(first_frame, waving_frame_effect.context)
684
- texture.build_mipmaps()
685
-
686
- # These properties can be set before
687
- # iterating the frames or maybe for
688
- # each iteration... depending on the
689
- # effect.
690
- # Uniforms (properties)
691
- (
692
- waving_frame_effect
693
- .set_value('amp', AMP)
694
- .set_value('freq', FREQ)
695
- .set_value('speed', SPEED)
696
- )
697
-
698
- # Writer with H.264 codec
699
- video_writer = (
700
- VideoWriter(OUTPUT_PATH)
701
- .set_video_stream(VIDEO_CODEC_NAME, video.fps, video.size, PIXEL_FORMAT)
702
- .set_audio_stream_from_template(video.audio_stream)
703
- )
704
-
705
- frame_index = 0
706
- for frame_or_packet in video.iterate_with_audio(
707
- do_decode_video = True,
708
- do_decode_audio = False
709
- ):
710
- # This below is because of the parameters we
711
- # passed to the method
712
- is_video_frame = PythonValidator.is_instance_of(frame_or_packet, 'VideoReaderFrame')
713
- is_audio_packet = PythonValidator.is_instance_of(frame_or_packet, 'VideoReaderPacket')
714
-
715
- # To simplify the process
716
- if frame_or_packet is not None:
717
- frame_or_packet = frame_or_packet.data
718
-
719
- if is_audio_packet:
720
- video_writer.mux(frame_or_packet)
721
- elif is_video_frame:
722
- with Timer(is_silent_as_context = True) as timer:
723
-
724
- def process_frame(
725
- frame: 'VideoFrame'
726
- ):
727
- # Add some variables if we need, for the
728
- # opengl change we are applying (check the
729
- # program code)
730
- waving_frame_effect.set_value('time', T.video_frame_index_to_video_frame_time(frame_index, float(video.fps)))
731
-
732
- # Create texture
733
- texture = frame_to_texture(frame, waving_frame_effect.context)
734
- texture.use()
735
-
736
- # Render with shader to frame buffer
737
- fbo.use()
738
- vao.render(moderngl.TRIANGLE_STRIP)
739
-
740
- # Processed GPU result to numpy
741
- processed_data = np.frombuffer(
742
- fbo.read(components = 3, alignment = 1), dtype = np.uint8
743
- )
744
-
745
- # Invert numpy to normal frame
746
- # TODO: Can I use the texture.size to fill
747
- # these 'img_array.shape[0]' (?)
748
- processed_data = np.flipud(
749
- processed_data.reshape((texture.size[1], texture.size[0], 3))
750
- )
751
-
752
- # To VideoFrame and to buffer
753
- frame = av.VideoFrame.from_ndarray(processed_data, format = NUMPY_FORMAT)
754
- # TODO: What is this for (?)
755
- #out_frame.pict_type = 'NONE'
756
- return frame
757
-
758
- video_writer.mux_video_frame(process_frame(frame_or_packet))
759
-
760
- print(f'Frame {str(frame_index)}: {timer.time_elapsed_str}s')
761
- frame_index += 1
762
-
763
- # While this code can be finished, the work in
764
- # the muxer could be not finished and have some
765
- # packets waiting to be written. Here we tell
766
- # the muxer to process all those packets.
767
- video_writer.mux_video_frame(None)
768
-
769
- # TODO: Maybe move this to the '__del__' (?)
770
- video_writer.output.close()
771
- video.container.close()
772
- print(f'Saved as "{OUTPUT_PATH}".')