yta-video-opengl 0.0.5__py3-none-any.whl → 0.0.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
yta_video_opengl/tests.py CHANGED
@@ -2,68 +2,702 @@
2
2
  Manual tests that are working and are interesting
3
3
  to learn about the code, refactor and build
4
4
  classes.
5
+ """
6
+ """
7
+ Interesting information:
8
+ | Abrev. | Nombre completo | Uso principal |
9
+ | ------- | -------------------------- | -------------------------------------- |
10
+ | VAO | Vertex Array Object | Esquema de datos de vértices |
11
+ | VBO | Vertex Buffer Object | Datos crudos de vértices en GPU |
12
+ | FBO | Frame Buffer Object | Renderizar fuera de pantalla |
13
+ | UBO | Uniform Buffer Object | Variables `uniform` compartidas |
14
+ | EBO/IBO | Element / Index Buffer Obj | Índices para reutilizar vértices |
15
+ | PBO | Pixel Buffer Object | Transferencia rápida de imágenes |
16
+ | RBO | Render Buffer Object | Almacén intermedio (profundidad, etc.) |
17
+
5
18
  """
6
19
  from yta_validation import PythonValidator
20
+ from yta_validation.parameter import ParameterValidator
7
21
  from yta_video_opengl.reader import VideoReader
8
22
  from yta_video_opengl.writer import VideoWriter
9
23
  from yta_timer import Timer
10
24
  from yta_video_frame_time import T
25
+ from abc import abstractmethod
26
+ from typing import Union
11
27
 
12
28
  import av
13
29
  import moderngl
14
30
  import numpy as np
15
31
 
16
32
 
33
+ class OpenglVertexShader:
34
+ """
35
+ The vertex shader, for opengl renders.
36
+ """
37
+
38
+ def __init__(
39
+ self,
40
+ context: moderngl.Context,
41
+ code: str
42
+ ):
43
+ self.context: moderngl.Context = context
44
+ """
45
+ The context of the vertex.
46
+ """
47
+ self.code: str = code
48
+ """
49
+ The code that builts the vertex shader.
50
+ """
51
+
52
+ class OpenglFragmentShader:
53
+ """
54
+ The fragment shader, for opengl renders.
55
+ """
56
+
57
+ def __init__(
58
+ self,
59
+ context: moderngl.Context,
60
+ code: str
61
+ ):
62
+ self.context: moderngl.Context = context
63
+ """
64
+ The context of the vertex.
65
+ """
66
+ self.code: str = code
67
+ """
68
+ The code that builts the vertex shader.
69
+ """
70
+
71
+ class OpenglProgram:
72
+ """
73
+ The program, for opengl renders.
74
+ """
75
+
76
+ @property
77
+ def program(
78
+ self
79
+ ) -> moderngl.Program:
80
+ """
81
+ The opengl program to load variables and use to
82
+ modify the frams.
83
+ """
84
+ if not hasattr(self, '_program'):
85
+ self._program = self.context.program(
86
+ vertex_shader = self.vertex_shader,
87
+ fragment_shader = self.fragment_shader
88
+ )
89
+
90
+ return self._program
91
+
92
+ @property
93
+ def vbo(
94
+ self
95
+ ) -> moderngl.Buffer:
96
+ """
97
+ The vertex buffer object.
98
+
99
+ Block of memory in the GPU in which we
100
+ store the information about the vertices
101
+ (positions, colors, texture coordinates,
102
+ etc.). The VAO points to one or more VBOs
103
+ to obtain the information.
104
+ """
105
+ if not hasattr(self, '_vbo'):
106
+ self._vbo = self.create_vbo(self.vertices)
107
+
108
+ return self._vbo
109
+
110
+ @property
111
+ def vao(
112
+ self
113
+ ):
114
+ """
115
+ The vertex array object.
116
+
117
+ Store the state of how the vertices
118
+ information is organized.
119
+ """
120
+ if not hasattr(self, '_vao'):
121
+ self._vao = self.create_vao(self.vbo)
122
+
123
+ return self._vao
124
+
125
+ def __init__(
126
+ self,
127
+ context: moderngl.Context,
128
+ vertex_shader: OpenglVertexShader,
129
+ fragment_shader: OpenglFragmentShader,
130
+ vertices: 'np.ndarray'
131
+ ):
132
+ self.context: moderngl.Context = context
133
+ """
134
+ The opengl context.
135
+ """
136
+ self.vertex_shader: OpenglVertexShader = vertex_shader
137
+ """
138
+ The vertex shader.
139
+ """
140
+ self.fragment_shader: OpenglFragmentShader = fragment_shader
141
+ """
142
+ The fragment shader.
143
+ """
144
+ """
145
+ TODO: A program will include the context, one
146
+ vertex shader, one fragment shader and one or
147
+ more vertices. So, this need to be refactor to
148
+ accept more than one vertices, and also to
149
+ provide one vbo for each vertices item.
150
+ """
151
+ self.vertices: Union[list['np.ndarray'], 'np.ndarray'] = vertices
152
+ """
153
+ The vertices we need to use.
154
+ """
155
+
156
+ def set_value(
157
+ self,
158
+ name: str,
159
+ value: any
160
+ ) -> 'OpenglProgram':
161
+ """
162
+ Set the provided 'value' as the value of the
163
+ program property (uniform) with the name given
164
+ as 'name' parameter.
165
+ """
166
+ self.program[name].value = value
167
+
168
+ return self
169
+
170
+ def create_vao(
171
+ self,
172
+ vbo: moderngl.Buffer
173
+ ) -> moderngl.VertexArray:
174
+ """
175
+ Create a vertex array with the given 'vbo'
176
+ (vertex array object) parameter.
177
+ """
178
+ ParameterValidator.validate_mandatory_instance_of('vbo', vbo, moderngl.Buffer)
179
+
180
+ return self.context.vertex_array(self.program, vbo, 'in_pos', 'in_uv')
181
+
182
+ def create_vbo(
183
+ self,
184
+ vertices: np.ndarray
185
+ ) -> moderngl.Buffer:
186
+ """
187
+ Create a buffer with the given 'vertices'
188
+ parameter.
189
+ """
190
+ ParameterValidator.validate_mandatory_numpy_array('vertices', vertices)
191
+
192
+ return self.context.buffer(vertices.tobytes())
193
+
194
+ class OpenglContext:
195
+ """
196
+ Class to wrap an opengl context to handle
197
+ it properly and modify videos.
198
+
199
+ TODO: This is ready to apply only one
200
+ change per context because the shaders are
201
+ limited to one.
202
+ """
203
+
204
+ def __init__(
205
+ self,
206
+ vertex_shader: OpenglVertexShader,
207
+ fragment_shader: OpenglFragmentShader,
208
+ vertices: 'np.ndarray'
209
+ ):
210
+ self.context: moderngl.Context = moderngl.create_standalone_context()
211
+ """
212
+ The headless context.
213
+ """
214
+ self.program: OpenglProgram = OpenglProgram(
215
+ context = self.context,
216
+ vertex_shader = vertex_shader,
217
+ fragment_shader = fragment_shader,
218
+ vertices = vertices
219
+ )
220
+ """
221
+ The program custom class instance that is
222
+ able to use the vao, vbo, etc.
223
+ """
224
+
225
+ def fbo(
226
+ self,
227
+ frame_size: tuple[int, int]
228
+ ) -> moderngl.Framebuffer:
229
+ """
230
+ Get a frame buffer object (fbo) for the
231
+ given 'frame_size'.
232
+
233
+ A frame buffero bject is a virtual screen
234
+ in which you can render out of the screen
235
+ to lately do this:
236
+ - Save as a texture
237
+ - Apply post-processing methods
238
+ - Store to a file
239
+ """
240
+ return self.context.simple_framebuffer(frame_size)
241
+
242
+ from dataclasses import dataclass
243
+ from abc import ABC
244
+ @dataclass
245
+ class OpenglEffectProgram(ABC):
246
+ """
247
+ The abstract class to be inherited by any
248
+ of our opengl frame effect methods, that
249
+ are actually programs.
250
+ """
251
+
252
+ @property
253
+ @abstractmethod
254
+ def vertex_shader(
255
+ self
256
+ ) -> str:
257
+ pass
258
+
259
+ @property
260
+ @abstractmethod
261
+ def fragment_shader(
262
+ self
263
+ ) -> str:
264
+ pass
265
+
266
+ @property
267
+ @abstractmethod
268
+ def vertices(
269
+ self
270
+ ) -> 'np.ndarray':
271
+ pass
272
+
273
+ def __init__(
274
+ self
275
+ ):
276
+ pass
277
+
278
+ @dataclass
279
+ class WavingEffectProgram(OpenglEffectProgram):
280
+
281
+ # TODO: I think this has to be a different
282
+ # thing...
283
+ @property
284
+ def vertex_shader(
285
+ self
286
+ ) -> str:
287
+ return (
288
+ '''
289
+ #version 330
290
+ in vec2 in_pos;
291
+ in vec2 in_uv;
292
+ out vec2 v_uv;
293
+ void main() {
294
+ v_uv = in_uv;
295
+ gl_Position = vec4(in_pos, 0.0, 1.0);
296
+ }
297
+ '''
298
+ )
299
+
300
+ @property
301
+ def fragment_shader(
302
+ self
303
+ ) -> str:
304
+ return (
305
+ '''
306
+ #version 330
307
+ uniform sampler2D tex;
308
+ uniform float time;
309
+ uniform float amp;
310
+ uniform float freq;
311
+ uniform float speed;
312
+ in vec2 v_uv;
313
+ out vec4 f_color;
314
+ void main() {
315
+ float wave = sin(v_uv.x * freq + time * speed) * amp;
316
+ vec2 uv = vec2(v_uv.x, v_uv.y + wave);
317
+ f_color = texture(tex, uv);
318
+ }
319
+ '''
320
+ )
321
+
322
+ @property
323
+ def vertices(
324
+ self
325
+ ) -> 'np.ndarray':
326
+ # TODO: This should be an array of them because
327
+ # we can have more than one, but by now I leave
328
+ # it being only one
329
+ return np.array([
330
+ -1, -1, 0.0, 0.0,
331
+ 1, -1, 1.0, 0.0,
332
+ -1, 1, 0.0, 1.0,
333
+ 1, 1, 1.0, 1.0,
334
+ ], dtype = 'f4')
335
+
336
+ NUMPY_FORMAT = 'rgb24'
337
+
338
+ # TODO: Maybe rename as ContextHandler (?)
339
+ class VideoProcessor:
340
+ """
341
+ Class to read a video, process it (maybe
342
+ applying some effects) and writing the
343
+ results in a new video.
344
+ """
345
+
346
+ @property
347
+ def fbo(
348
+ self
349
+ ) -> moderngl.Framebuffer:
350
+ """
351
+ The frame buffer object for the video frame
352
+ size.
353
+ """
354
+ if not hasattr(self, '_fbo'):
355
+ self._fbo = self.context.fbo(self.reader.size)
356
+
357
+ return self._fbo
358
+
359
+ @property
360
+ def vao(
361
+ self
362
+ ) -> moderngl.VertexArray:
363
+ """
364
+ Shortcut to the program vao.
365
+ """
366
+ return self.program.vao
367
+
368
+ @property
369
+ def first_frame(
370
+ self
371
+ ) -> Union['VideoFrame', None]:
372
+ """
373
+ The first frame of the video as a VideoFrame.
374
+ """
375
+ if not hasattr(self, '_first_frame'):
376
+ # Framebuffer to render
377
+ self.fbo.use()
378
+ self._first_frame = self.reader.next_frame
379
+ # Reset the reader
380
+ self.reader.reset()
381
+
382
+ return self._first_frame
383
+
384
+ @property
385
+ def first_frame_as_texture(
386
+ self
387
+ ) -> moderngl.Texture:
388
+ """
389
+ The first frame of the video as a texture.
390
+ This is needed to start the process.
391
+ """
392
+ if not hasattr(self, '_first_frame_as_texture'):
393
+ self._first_frame_as_texture = self.frame_to_texture(self.first_frame, NUMPY_FORMAT)
394
+ self._first_frame_as_texture.build_mipmaps()
395
+
396
+ return self._first_frame_as_texture
397
+
398
+ @property
399
+ def program(
400
+ self
401
+ ) -> OpenglProgram:
402
+ """
403
+ Shortcut to the context program custom class
404
+ instance.
405
+ """
406
+ return self.context.program
407
+
408
+ def __init__(
409
+ self,
410
+ filename: str,
411
+ output_filename: str
412
+ ):
413
+ self.filename: str = filename
414
+ """
415
+ The filename of the video we want to read and
416
+ process.
417
+ """
418
+ self.output_filename: str = output_filename
419
+ """
420
+ The filename of the video we want to generate
421
+ and store once the original one has been
422
+ processed.
423
+ """
424
+ # TODO: Hardcoded by now
425
+ effect = WavingEffectProgram()
426
+ self.context: OpenglContext = OpenglContext(
427
+ vertex_shader = effect.vertex_shader,
428
+ fragment_shader = effect.fragment_shader,
429
+ vertices = effect.vertices
430
+ )
431
+ """
432
+ The headless context as a custom class instance.
433
+ """
434
+ self.reader: VideoReader = VideoReader(self.filename)
435
+ """
436
+ The video reader instance.
437
+ """
438
+ # TODO: This has to be dynamic, but
439
+ # according to what (?)
440
+
441
+ # TODO: Where do we obtain this from (?)
442
+ VIDEO_CODEC_NAME = 'libx264'
443
+ # TODO: Where do we obtain this from (?)
444
+ PIXEL_FORMAT = 'yuv420p'
445
+ self.writer: VideoWriter = (
446
+ VideoWriter(output_filename)
447
+ .set_video_stream(VIDEO_CODEC_NAME, self.reader.fps, self.reader.size, PIXEL_FORMAT)
448
+ .set_audio_stream_from_template(self.reader.audio_stream)
449
+ )
450
+ """
451
+ The video writer instance.
452
+ """
453
+
454
+ # TODO: This should be a utils
455
+ def frame_to_texture(
456
+ self,
457
+ frame: 'VideoFrame',
458
+ numpy_format: str = 'rgb24'
459
+ ):
460
+ """
461
+ Transform the given 'frame' to an opengl
462
+ texture.
463
+ """
464
+ # To numpy RGB inverted for OpenGL
465
+ # TODO: Maybe we can receive normal frames
466
+ # here, as np.ndarray, from other libraries
467
+ frame: np.ndarray = np.flipud(frame.to_ndarray(format = numpy_format))
468
+
469
+ return self.context.context.texture((frame.shape[1], frame.shape[0]), 3, frame.tobytes())
470
+
471
+ def process(
472
+ self
473
+ ):
474
+ """
475
+ Process the video and generate the new one.
476
+
477
+ TODO: Should I pass some effects to apply (?)
478
+ """
479
+ # [ 1 ] Initialize fbo and texture mipmaps
480
+ self.first_frame_as_texture # This forces it in the code
481
+
482
+ # [ 2 ] Set general program uniforms
483
+ AMP = 0.05
484
+ FREQ = 10.0
485
+ SPEED = 2.0
486
+ (
487
+ self.context.program
488
+ .set_value('amp', AMP)
489
+ .set_value('freq', FREQ)
490
+ .set_value('speed', SPEED)
491
+ )
492
+
493
+ # [ 3 ] Process the frames
494
+ frame_index = 0
495
+ for frame_or_packet in self.reader.iterate_with_audio(
496
+ do_decode_video = True,
497
+ do_decode_audio = False
498
+ ):
499
+ # This below is because of the parameters we
500
+ # passed to the method
501
+ is_video_frame = PythonValidator.is_instance_of(frame_or_packet, 'VideoReaderFrame')
502
+ is_audio_packet = PythonValidator.is_instance_of(frame_or_packet, 'VideoReaderPacket')
503
+
504
+ # To simplify the process
505
+ if frame_or_packet is not None:
506
+ frame_or_packet = frame_or_packet.data
507
+ if is_audio_packet:
508
+ self.writer.mux(frame_or_packet)
509
+ elif is_video_frame:
510
+ with Timer(is_silent_as_context = True) as timer:
511
+ # Check this link:
512
+ # https://stackoverflow.com/a/63153755
513
+
514
+ def process_frame(
515
+ frame: 'VideoFrame'
516
+ ):
517
+ # [ 4 ] Add specific program uniforms
518
+ self.program.set_value('time', T.video_frame_index_to_video_frame_time(frame_index, float(self.reader.fps)))
519
+
520
+ # Create texture
521
+ texture = self.frame_to_texture(frame)
522
+ texture.use()
523
+
524
+ # Activate frame buffer
525
+ self.fbo.use()
526
+
527
+ # Render, captured by the fbo
528
+ self.vao.render(moderngl.TRIANGLE_STRIP)
529
+
530
+ # Processed GPU result (from fbo) to numpy
531
+ processed_data = np.frombuffer(
532
+ self.fbo.read(components = 3, alignment = 1), dtype = np.uint8
533
+ )
534
+
535
+ # Invert numpy to normal frame
536
+ processed_data = np.flipud(
537
+ processed_data.reshape((texture.size[1], texture.size[0], 3))
538
+ )
539
+
540
+ # To VideoFrame and to buffer
541
+ frame = av.VideoFrame.from_ndarray(processed_data, format = NUMPY_FORMAT)
542
+ # TODO: What is this for (?)
543
+ #out_frame.pict_type = 'NONE'
544
+
545
+ return frame
546
+
547
+ self.writer.mux_video_frame(process_frame(frame_or_packet))
548
+
549
+ print(f'Frame {str(frame_index)}: {timer.time_elapsed_str}s')
550
+ frame_index += 1
551
+
552
+ # While this code can be finished, the work in
553
+ # the muxer could be not finished and have some
554
+ # packets waiting to be written. Here we tell
555
+ # the muxer to process all those packets.
556
+ self.writer.mux_video_frame(None)
557
+
558
+ # TODO: Maybe move this to the '__del__' (?)
559
+ self.writer.output.close()
560
+ self.reader.container.close()
561
+ print(f'Saved as "{self.output_filename}".')
562
+
17
563
  def video_modified_stored():
18
- VIDEO_PATH = "test_files/test_1.mp4"
564
+ # This path below was trimmed in an online platform
565
+ # and seems to be bad codified and generates error
566
+ # when processing it, but it is readable in the
567
+ # file explorer...
568
+ #VIDEO_PATH = 'test_files/test_1_short_broken.mp4'
569
+ # This is short but is working well
570
+ VIDEO_PATH = "test_files/test_1_short_2.mp4"
571
+ # Long version below, comment to test faster
572
+ #VIDEO_PATH = "test_files/test_1.mp4"
19
573
  OUTPUT_PATH = "test_files/output.mp4"
574
+ # TODO: This has to be dynamic, but
575
+ # according to what (?)
576
+ NUMPY_FORMAT = 'rgb24'
577
+ # TODO: Where do we obtain this from (?)
578
+ VIDEO_CODEC_NAME = 'libx264'
579
+ # TODO: Where do we obtain this from (?)
580
+ PIXEL_FORMAT = 'yuv420p'
581
+
582
+ from yta_video_opengl.classes import WavingFrame, BreathingFrame, HandheldFrame, OrbitingFrame, RotatingInCenterFrame, StrangeTvFrame, GlitchRgbFrame, WavingNode
583
+ from yta_video_opengl.utils import texture_to_frame, frame_to_texture
584
+
585
+ video = VideoReader(VIDEO_PATH)
586
+ video_writer = (
587
+ VideoWriter(OUTPUT_PATH)
588
+ #.set_video_stream(VIDEO_CODEC_NAME, video.fps, video.size, PIXEL_FORMAT)
589
+ .set_video_stream_from_template(video.video_stream)
590
+ .set_audio_stream_from_template(video.audio_stream)
591
+ )
592
+
593
+ #effect = WavingFrame(size = video.size)
594
+ #effect = BreathingFrame(size = video.size)
595
+ #effect = HandheldFrame(size = video.size)
596
+ # effect = OrbitingFrame(
597
+ # size = video.size,
598
+ # first_frame = video.next_frame
599
+ # )
600
+ # effect = RotatingInCenterFrame(
601
+ # size = video.size,
602
+ # first_frame = video.next_frame
603
+ # )
604
+ effect = GlitchRgbFrame(
605
+ size = video.size,
606
+ first_frame = video.next_frame
607
+ )
608
+ context = moderngl.create_context(standalone = True)
609
+
610
+ # New way, with nodes
611
+ node = WavingNode(context, video.size, amplitude = 0.2, frequency = 9, speed = 3)
612
+ # We need to reset it to being again pointing
613
+ # to the first frame...
614
+ # TODO: Improve this by, maybe, storing the first
615
+ # frame in memory so we can append it later, or
616
+ # using the '.seek(0)' even when it could be not
617
+ # accurate
618
+ video.reset()
619
+
620
+ frame_index = 0
621
+ for frame_or_packet in video.iterate_with_audio(
622
+ do_decode_video = True,
623
+ do_decode_audio = False
624
+ ):
625
+ # This below is because of the parameters we
626
+ # passed to the method
627
+ is_video_frame = PythonValidator.is_instance_of(frame_or_packet, 'VideoReaderFrame')
628
+ is_audio_packet = PythonValidator.is_instance_of(frame_or_packet, 'VideoReaderPacket')
629
+
630
+ # To simplify the process
631
+ if frame_or_packet is not None:
632
+ frame_or_packet = frame_or_packet.value
633
+
634
+ if is_audio_packet:
635
+ video_writer.mux(frame_or_packet)
636
+ elif is_video_frame:
637
+ with Timer(is_silent_as_context = True) as timer:
638
+ t = T.video_frame_index_to_video_frame_time(frame_index, float(video.fps))
639
+
640
+ # This is another way of getting 't'
641
+ #t = float(frame_or_packet.pts * video.time_base)
642
+
643
+ # TODO: Pass the frame as a texture
644
+
645
+ video_writer.mux_video_frame(
646
+ frame = texture_to_frame(
647
+ texture = node.process(
648
+ input = frame_or_packet,
649
+ t = t
650
+ )
651
+ )
652
+ )
653
+
654
+ # video_writer.mux_video_frame(
655
+ # effect.process_frame(
656
+ # frame = frame_or_packet,
657
+ # t = t,
658
+ # numpy_format = NUMPY_FORMAT
659
+ # )
660
+ # )
661
+
662
+ frame_index += 1
663
+
664
+ print(f'Frame {str(frame_index)}: {timer.time_elapsed_str}s')
665
+
666
+ video_writer.mux_video_frame(None)
667
+
668
+ # TODO: Maybe move this to the '__del__' (?)
669
+ video_writer.output.close()
670
+ video.container.close()
671
+ print(f'Saved as "{OUTPUT_PATH}".')
672
+
673
+ return
674
+
675
+ # # TODO: By now this is applying an effect
676
+ # # by default
677
+ # VideoProcessor(
678
+ # filename = VIDEO_PATH,
679
+ # output_filename = OUTPUT_PATH
680
+ # ).process()
681
+
682
+ # return
683
+
684
+
20
685
  AMP = 0.05
21
686
  FREQ = 10.0
22
687
  SPEED = 2.0
23
688
 
689
+ # Get the information about the video
690
+ video = VideoReader(VIDEO_PATH)
691
+
24
692
  # ModernGL context without window
25
693
  context = moderngl.create_standalone_context()
26
694
 
27
- # Wave shader vertex and fragment
28
- program = context.program(
29
- vertex_shader = '''
30
- #version 330
31
- in vec2 in_pos;
32
- in vec2 in_uv;
33
- out vec2 v_uv;
34
- void main() {
35
- v_uv = in_uv;
36
- gl_Position = vec4(in_pos, 0.0, 1.0);
37
- }
38
- ''',
39
- fragment_shader = '''
40
- #version 330
41
- uniform sampler2D tex;
42
- uniform float time;
43
- uniform float amp;
44
- uniform float freq;
45
- uniform float speed;
46
- in vec2 v_uv;
47
- out vec4 f_color;
48
- void main() {
49
- float wave = sin(v_uv.x * freq + time * speed) * amp;
50
- vec2 uv = vec2(v_uv.x, v_uv.y + wave);
51
- f_color = texture(tex, uv);
52
- }
53
- '''
695
+ waving_frame_effect = WavingFrame(
696
+ context = context,
697
+ frame_size = video.size
54
698
  )
55
699
 
56
- # Quad
57
- vertices = np.array([
58
- -1, -1, 0.0, 0.0,
59
- 1, -1, 1.0, 0.0,
60
- -1, 1, 0.0, 1.0,
61
- 1, 1, 1.0, 1.0,
62
- ], dtype = 'f4')
63
- vbo = context.buffer(vertices.tobytes())
64
- vao = context.simple_vertex_array(program, vbo, 'in_pos', 'in_uv')
65
-
66
- video = VideoReader(VIDEO_PATH)
700
+ vao = waving_frame_effect.vao
67
701
 
68
702
  # TODO: This has to be dynamic, but
69
703
  # according to what (?)
@@ -74,7 +708,7 @@ def video_modified_stored():
74
708
  PIXEL_FORMAT = 'yuv420p'
75
709
 
76
710
  # Framebuffer to render
77
- fbo = context.simple_framebuffer(video.size)
711
+ fbo = waving_frame_effect.fbo
78
712
  fbo.use()
79
713
 
80
714
  # Decode first frame and use as texture
@@ -90,15 +724,22 @@ def video_modified_stored():
90
724
  # Most of OpenGL textures expect origin in lower
91
725
  # left corner
92
726
  # TODO: What if alpha (?)
93
- image = np.flipud(first_frame.to_ndarray(format = NUMPY_FORMAT))
94
- texture = context.texture((image.shape[1], image.shape[0]), 3, image.tobytes())
95
-
727
+ # TODO: Move this to the OpenglFrameEffect maybe (?)
728
+
729
+ texture: moderngl.Texture = frame_to_texture(first_frame, waving_frame_effect.context)
96
730
  texture.build_mipmaps()
97
731
 
98
- # Uniforms
99
- program['amp'].value = AMP
100
- program['freq'].value = FREQ
101
- program['speed'].value = SPEED
732
+ # These properties can be set before
733
+ # iterating the frames or maybe for
734
+ # each iteration... depending on the
735
+ # effect.
736
+ # Uniforms (properties)
737
+ (
738
+ waving_frame_effect
739
+ .set_value('amp', AMP)
740
+ .set_value('freq', FREQ)
741
+ .set_value('speed', SPEED)
742
+ )
102
743
 
103
744
  # Writer with H.264 codec
104
745
  video_writer = (
@@ -132,15 +773,10 @@ def video_modified_stored():
132
773
  # Add some variables if we need, for the
133
774
  # opengl change we are applying (check the
134
775
  # program code)
135
- program['time'].value = T.video_frame_index_to_video_frame_time(frame_index, float(video.fps))
776
+ waving_frame_effect.set_value('time', T.video_frame_index_to_video_frame_time(frame_index, float(video.fps)))
136
777
 
137
- # To numpy RGB inverted for OpenGL
138
- img_array = np.flipud(
139
- frame.to_ndarray(format = NUMPY_FORMAT)
140
- )
141
-
142
778
  # Create texture
143
- texture = context.texture((img_array.shape[1], img_array.shape[0]), 3, img_array.tobytes())
779
+ texture = frame_to_texture(frame, waving_frame_effect.context)
144
780
  texture.use()
145
781
 
146
782
  # Render with shader to frame buffer
@@ -151,9 +787,12 @@ def video_modified_stored():
151
787
  processed_data = np.frombuffer(
152
788
  fbo.read(components = 3, alignment = 1), dtype = np.uint8
153
789
  )
790
+
154
791
  # Invert numpy to normal frame
792
+ # TODO: Can I use the texture.size to fill
793
+ # these 'img_array.shape[0]' (?)
155
794
  processed_data = np.flipud(
156
- processed_data.reshape((img_array.shape[0], img_array.shape[1], 3))
795
+ processed_data.reshape((texture.size[1], texture.size[0], 3))
157
796
  )
158
797
 
159
798
  # To VideoFrame and to buffer