yta-video-opengl 0.0.21__py3-none-any.whl → 0.0.23__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
yta_video_opengl/tests.py CHANGED
@@ -16,16 +16,12 @@ Interesting information:
16
16
  | RBO | Render Buffer Object | Almacén intermedio (profundidad, etc.) |
17
17
 
18
18
  """
19
- from yta_validation import PythonValidator
20
19
  from yta_validation.parameter import ParameterValidator
21
- from yta_video_opengl.reader import VideoReader
22
- from yta_video_opengl.writer import VideoWriter
23
- from yta_timer import Timer
24
- from yta_video_frame_time import T
20
+ # from yta_video_opengl.reader import VideoReader
21
+ # from yta_video_opengl.writer import VideoWriter
25
22
  from abc import abstractmethod
26
23
  from typing import Union
27
24
 
28
- import av
29
25
  import moderngl
30
26
  import numpy as np
31
27
 
@@ -333,557 +329,440 @@ class WavingEffectProgram(OpenglEffectProgram):
333
329
  1, 1, 1.0, 1.0,
334
330
  ], dtype = 'f4')
335
331
 
336
- NUMPY_FORMAT = 'rgb24'
337
332
 
338
- # TODO: Maybe rename as ContextHandler (?)
339
- class VideoProcessor:
340
- """
341
- Class to read a video, process it (maybe
342
- applying some effects) and writing the
343
- results in a new video.
344
- """
345
333
 
346
- @property
347
- def fbo(
348
- self
349
- ) -> moderngl.Framebuffer:
350
- """
351
- The frame buffer object for the video frame
352
- size.
353
- """
354
- if not hasattr(self, '_fbo'):
355
- self._fbo = self.context.fbo(self.reader.size)
356
334
 
357
- return self._fbo
358
-
359
- @property
360
- def vao(
361
- self
362
- ) -> moderngl.VertexArray:
363
- """
364
- Shortcut to the program vao.
365
- """
366
- return self.program.vao
335
+ # TODO: This code below was using the pyav
336
+ # reading library that has been moved, that
337
+ # is why it is commented by now
367
338
 
368
- @property
369
- def first_frame(
370
- self
371
- ) -> Union['VideoFrame', None]:
372
- """
373
- The first frame of the video as a VideoFrame.
374
- """
375
- if not hasattr(self, '_first_frame'):
376
- # Framebuffer to render
377
- self.fbo.use()
378
- self._first_frame = self.reader.next_frame
379
- # Reset the reader
380
- self.reader.reset()
339
+ NUMPY_FORMAT = 'rgb24'
381
340
 
382
- return self._first_frame
341
+ # # TODO: Maybe rename as ContextHandler (?)
342
+ # class VideoProcessor:
343
+ # """
344
+ # Class to read a video, process it (maybe
345
+ # applying some effects) and writing the
346
+ # results in a new video.
347
+ # """
348
+
349
+ # @property
350
+ # def fbo(
351
+ # self
352
+ # ) -> moderngl.Framebuffer:
353
+ # """
354
+ # The frame buffer object for the video frame
355
+ # size.
356
+ # """
357
+ # if not hasattr(self, '_fbo'):
358
+ # self._fbo = self.context.fbo(self.reader.size)
359
+
360
+ # return self._fbo
383
361
 
384
- @property
385
- def first_frame_as_texture(
386
- self
387
- ) -> moderngl.Texture:
388
- """
389
- The first frame of the video as a texture.
390
- This is needed to start the process.
391
- """
392
- if not hasattr(self, '_first_frame_as_texture'):
393
- self._first_frame_as_texture = self.frame_to_texture(self.first_frame, NUMPY_FORMAT)
394
- self._first_frame_as_texture.build_mipmaps()
395
-
396
- return self._first_frame_as_texture
362
+ # @property
363
+ # def vao(
364
+ # self
365
+ # ) -> moderngl.VertexArray:
366
+ # """
367
+ # Shortcut to the program vao.
368
+ # """
369
+ # return self.program.vao
370
+
371
+ # @property
372
+ # def first_frame(
373
+ # self
374
+ # ) -> Union['VideoFrame', None]:
375
+ # """
376
+ # The first frame of the video as a VideoFrame.
377
+ # """
378
+ # if not hasattr(self, '_first_frame'):
379
+ # # Framebuffer to render
380
+ # self.fbo.use()
381
+ # self._first_frame = self.reader.next_frame
382
+ # # Reset the reader
383
+ # self.reader.reset()
384
+
385
+ # return self._first_frame
397
386
 
398
- @property
399
- def program(
400
- self
401
- ) -> OpenglProgram:
402
- """
403
- Shortcut to the context program custom class
404
- instance.
405
- """
406
- return self.context.program
407
-
408
- def __init__(
409
- self,
410
- filename: str,
411
- output_filename: str
412
- ):
413
- self.filename: str = filename
414
- """
415
- The filename of the video we want to read and
416
- process.
417
- """
418
- self.output_filename: str = output_filename
419
- """
420
- The filename of the video we want to generate
421
- and store once the original one has been
422
- processed.
423
- """
424
- # TODO: Hardcoded by now
425
- effect = WavingEffectProgram()
426
- self.context: OpenglContext = OpenglContext(
427
- vertex_shader = effect.vertex_shader,
428
- fragment_shader = effect.fragment_shader,
429
- vertices = effect.vertices
430
- )
431
- """
432
- The headless context as a custom class instance.
433
- """
434
- self.reader: VideoReader = VideoReader(self.filename)
435
- """
436
- The video reader instance.
437
- """
438
- # TODO: This has to be dynamic, but
439
- # according to what (?)
387
+ # @property
388
+ # def first_frame_as_texture(
389
+ # self
390
+ # ) -> moderngl.Texture:
391
+ # """
392
+ # The first frame of the video as a texture.
393
+ # This is needed to start the process.
394
+ # """
395
+ # if not hasattr(self, '_first_frame_as_texture'):
396
+ # self._first_frame_as_texture = self.frame_to_texture(self.first_frame, NUMPY_FORMAT)
397
+ # self._first_frame_as_texture.build_mipmaps()
398
+
399
+ # return self._first_frame_as_texture
400
+
401
+ # @property
402
+ # def program(
403
+ # self
404
+ # ) -> OpenglProgram:
405
+ # """
406
+ # Shortcut to the context program custom class
407
+ # instance.
408
+ # """
409
+ # return self.context.program
410
+
411
+ # def __init__(
412
+ # self,
413
+ # filename: str,
414
+ # output_filename: str
415
+ # ):
416
+ # self.filename: str = filename
417
+ # """
418
+ # The filename of the video we want to read and
419
+ # process.
420
+ # """
421
+ # self.output_filename: str = output_filename
422
+ # """
423
+ # The filename of the video we want to generate
424
+ # and store once the original one has been
425
+ # processed.
426
+ # """
427
+ # # TODO: Hardcoded by now
428
+ # effect = WavingEffectProgram()
429
+ # self.context: OpenglContext = OpenglContext(
430
+ # vertex_shader = effect.vertex_shader,
431
+ # fragment_shader = effect.fragment_shader,
432
+ # vertices = effect.vertices
433
+ # )
434
+ # """
435
+ # The headless context as a custom class instance.
436
+ # """
437
+ # self.reader: VideoReader = VideoReader(self.filename)
438
+ # """
439
+ # The video reader instance.
440
+ # """
441
+ # # TODO: This has to be dynamic, but
442
+ # # according to what (?)
440
443
 
441
- # TODO: Where do we obtain this from (?)
442
- VIDEO_CODEC_NAME = 'libx264'
443
- # TODO: Where do we obtain this from (?)
444
- PIXEL_FORMAT = 'yuv420p'
445
- self.writer: VideoWriter = (
446
- VideoWriter(output_filename)
447
- .set_video_stream(VIDEO_CODEC_NAME, self.reader.fps, self.reader.size, PIXEL_FORMAT)
448
- .set_audio_stream_from_template(self.reader.audio_stream)
449
- )
450
- """
451
- The video writer instance.
452
- """
453
-
454
- # TODO: This should be a utils
455
- def frame_to_texture(
456
- self,
457
- frame: 'VideoFrame',
458
- numpy_format: str = 'rgb24'
459
- ):
460
- """
461
- Transform the given 'frame' to an opengl
462
- texture.
463
- """
464
- # To numpy RGB inverted for OpenGL
465
- # TODO: Maybe we can receive normal frames
466
- # here, as np.ndarray, from other libraries
467
- frame: np.ndarray = np.flipud(frame.to_ndarray(format = numpy_format))
468
-
469
- return self.context.context.texture((frame.shape[1], frame.shape[0]), 3, frame.tobytes())
470
-
471
- def process(
472
- self
473
- ):
474
- """
475
- Process the video and generate the new one.
476
-
477
- TODO: Should I pass some effects to apply (?)
478
- """
479
- # [ 1 ] Initialize fbo and texture mipmaps
480
- self.first_frame_as_texture # This forces it in the code
481
-
482
- # [ 2 ] Set general program uniforms
483
- AMP = 0.05
484
- FREQ = 10.0
485
- SPEED = 2.0
486
- (
487
- self.context.program
488
- .set_value('amp', AMP)
489
- .set_value('freq', FREQ)
490
- .set_value('speed', SPEED)
491
- )
492
-
493
- # [ 3 ] Process the frames
494
- frame_index = 0
495
- for frame_or_packet in self.reader.iterate_with_audio(
496
- do_decode_video = True,
497
- do_decode_audio = False
498
- ):
499
- # This below is because of the parameters we
500
- # passed to the method
501
- is_video_frame = PythonValidator.is_instance_of(frame_or_packet, 'VideoReaderFrame')
502
- is_audio_packet = PythonValidator.is_instance_of(frame_or_packet, 'VideoReaderPacket')
503
-
504
- # To simplify the process
505
- if frame_or_packet is not None:
506
- frame_or_packet = frame_or_packet.data
507
- if is_audio_packet:
508
- self.writer.mux(frame_or_packet)
509
- elif is_video_frame:
510
- with Timer(is_silent_as_context = True) as timer:
511
- # Check this link:
512
- # https://stackoverflow.com/a/63153755
513
-
514
- def process_frame(
515
- frame: 'VideoFrame'
516
- ):
517
- # [ 4 ] Add specific program uniforms
518
- self.program.set_value('time', T.video_frame_index_to_video_frame_time(frame_index, float(self.reader.fps)))
444
+ # # TODO: Where do we obtain this from (?)
445
+ # VIDEO_CODEC_NAME = 'libx264'
446
+ # # TODO: Where do we obtain this from (?)
447
+ # PIXEL_FORMAT = 'yuv420p'
448
+ # self.writer: VideoWriter = (
449
+ # VideoWriter(output_filename)
450
+ # .set_video_stream(VIDEO_CODEC_NAME, self.reader.fps, self.reader.size, PIXEL_FORMAT)
451
+ # .set_audio_stream_from_template(self.reader.audio_stream)
452
+ # )
453
+ # """
454
+ # The video writer instance.
455
+ # """
456
+
457
+ # # TODO: This should be a utils
458
+ # def frame_to_texture(
459
+ # self,
460
+ # frame: 'VideoFrame',
461
+ # numpy_format: str = 'rgb24'
462
+ # ):
463
+ # """
464
+ # Transform the given 'frame' to an opengl
465
+ # texture.
466
+ # """
467
+ # # To numpy RGB inverted for OpenGL
468
+ # # TODO: Maybe we can receive normal frames
469
+ # # here, as np.ndarray, from other libraries
470
+ # frame: np.ndarray = np.flipud(frame.to_ndarray(format = numpy_format))
471
+
472
+ # return self.context.context.texture((frame.shape[1], frame.shape[0]), 3, frame.tobytes())
473
+
474
+ # def process(
475
+ # self
476
+ # ):
477
+ # """
478
+ # Process the video and generate the new one.
479
+
480
+ # TODO: Should I pass some effects to apply (?)
481
+ # """
482
+ # # [ 1 ] Initialize fbo and texture mipmaps
483
+ # self.first_frame_as_texture # This forces it in the code
484
+
485
+ # # [ 2 ] Set general program uniforms
486
+ # AMP = 0.05
487
+ # FREQ = 10.0
488
+ # SPEED = 2.0
489
+ # (
490
+ # self.context.program
491
+ # .set_value('amp', AMP)
492
+ # .set_value('freq', FREQ)
493
+ # .set_value('speed', SPEED)
494
+ # )
495
+
496
+ # # [ 3 ] Process the frames
497
+ # frame_index = 0
498
+ # for frame_or_packet in self.reader.iterate_with_audio(
499
+ # do_decode_video = True,
500
+ # do_decode_audio = False
501
+ # ):
502
+ # # This below is because of the parameters we
503
+ # # passed to the method
504
+ # is_video_frame = PythonValidator.is_instance_of(frame_or_packet, 'VideoReaderFrame')
505
+ # is_audio_packet = PythonValidator.is_instance_of(frame_or_packet, 'VideoReaderPacket')
506
+
507
+ # # To simplify the process
508
+ # if frame_or_packet is not None:
509
+ # frame_or_packet = frame_or_packet.data
510
+ # if is_audio_packet:
511
+ # self.writer.mux(frame_or_packet)
512
+ # elif is_video_frame:
513
+ # with Timer(is_silent_as_context = True) as timer:
514
+ # # Check this link:
515
+ # # https://stackoverflow.com/a/63153755
516
+
517
+ # def process_frame(
518
+ # frame: 'VideoFrame'
519
+ # ):
520
+ # # [ 4 ] Add specific program uniforms
521
+ # # TODO: T moved to 'yta_video_pyav'
522
+ # self.program.set_value('time', T.video_frame_index_to_video_frame_time(frame_index, float(self.reader.fps)))
519
523
 
520
- # Create texture
521
- texture = self.frame_to_texture(frame)
522
- texture.use()
523
-
524
- # Activate frame buffer
525
- self.fbo.use()
526
-
527
- # Render, captured by the fbo
528
- self.vao.render(moderngl.TRIANGLE_STRIP)
529
-
530
- # Processed GPU result (from fbo) to numpy
531
- processed_data = np.frombuffer(
532
- self.fbo.read(components = 3, alignment = 1), dtype = np.uint8
533
- )
534
-
535
- # Invert numpy to normal frame
536
- processed_data = np.flipud(
537
- processed_data.reshape((texture.size[1], texture.size[0], 3))
538
- )
539
-
540
- # To VideoFrame and to buffer
541
- frame = av.VideoFrame.from_ndarray(processed_data, format = NUMPY_FORMAT)
542
- # TODO: What is this for (?)
543
- #out_frame.pict_type = 'NONE'
544
-
545
- return frame
546
-
547
- self.writer.mux_video_frame(process_frame(frame_or_packet))
548
-
549
- print(f'Frame {str(frame_index)}: {timer.time_elapsed_str}s')
550
- frame_index += 1
551
-
552
- # While this code can be finished, the work in
553
- # the muxer could be not finished and have some
554
- # packets waiting to be written. Here we tell
555
- # the muxer to process all those packets.
556
- self.writer.mux_video_frame(None)
557
-
558
- # TODO: Maybe move this to the '__del__' (?)
559
- self.writer.output.close()
560
- self.reader.container.close()
561
- print(f'Saved as "{self.output_filename}".')
562
-
563
- def video_modified_stored():
564
- # This path below was trimmed in an online platform
565
- # and seems to be bad codified and generates error
566
- # when processing it, but it is readable in the
567
- # file explorer...
568
- #VIDEO_PATH = 'test_files/test_1_short_broken.mp4'
569
- # This is short but is working well
570
- VIDEO_PATH = "test_files/test_1_short_2.mp4"
571
- # Long version below, comment to test faster
572
- #VIDEO_PATH = "test_files/test_1.mp4"
573
- OUTPUT_PATH = "test_files/output.mp4"
574
- # TODO: This has to be dynamic, but
575
- # according to what (?)
576
- NUMPY_FORMAT = 'rgb24'
577
- # TODO: Where do we obtain this from (?)
578
- VIDEO_CODEC_NAME = 'libx264'
579
- # TODO: Where do we obtain this from (?)
580
- PIXEL_FORMAT = 'yuv420p'
581
-
582
- from yta_video_opengl.classes import WavingFrame, BreathingFrame, HandheldFrame, OrbitingFrame, RotatingInCenterFrame, StrangeTvFrame, GlitchRgbFrame, WavingNode
583
- from yta_video_opengl.utils import texture_to_frame, frame_to_texture
584
- from yta_video_opengl.video import Video
585
- from yta_video_opengl.complete.timeline import Timeline
586
-
587
- # TODO: This test below is just to validate
588
- # that it is cropping and placing correctly
589
- # but the videos are only in one track
590
- # video = Video(VIDEO_PATH, 0.25, 0.75)
591
- # timeline = Timeline()
592
- # timeline.add_video(Video(VIDEO_PATH, 0.25, 1.0), 0.5)
593
- # # This is successfully raising an exception
594
- # #timeline.add_video(Video(VIDEO_PATH, 0.25, 0.75), 0.6)
595
- # timeline.add_video(Video(VIDEO_PATH, 0.25, 0.75), 1.75)
596
- # timeline.add_video(Video('C:/Users/dania/Downloads/Y2meta.app-TOP 12 SIMPLE LIQUID TRANSITION _ GREEN SCREEN TRANSITION PACK-(1080p60).mp4', 4.0, 5.0), 3)
597
- # # timeline.add_video(Video('C:/Users/dania/Downloads/Y2meta.app-10 Smooth Transitions Green Screen Template For Kinemaster, Alight Motion, Filmora, premiere pro-(1080p).mp4', 2.25, 3.0), 3)
598
- # timeline.render(OUTPUT_PATH)
599
-
600
- # # Testing concatenating
601
- # timeline = Timeline()
602
- # # When you concat like this, some of the
603
- # # videos have frames that cannot be accessed
604
- # # and I don't know why...
605
- # timeline.add_video(Video('test_files/glitch_rgb_frame.mp4'))
606
- # timeline.add_video(Video('test_files/output.mp4'))
607
- # timeline.add_video(Video('test_files/output_render.mp4'))
608
- # timeline.add_video(Video('test_files/strange_tv_frame.mp4'))
609
- # timeline.add_video(Video('test_files/test_1.mp4'))
610
- # timeline.add_video(Video('test_files/test_1_short_2.mp4'))
611
- # timeline.add_video(Video('test_files/test_audio_1st_track_solo_v0_0_15.mp4'))
612
- # timeline.add_video(Video('test_files/test_audio_2nd_track_solo_v0_0_15.mp4'))
613
- # timeline.add_video(Video('test_files/test_audio_combined_tracks_v0_0_015.mp4'))
614
- # timeline.add_video(Video('test_files/test_audio_combined_v0_0_15.mp4'))
615
- # timeline.add_video(Video('test_files/test_blend_add_v0_0_16.mp4'))
616
- # timeline.add_video(Video('test_files/test_blend_difference_v0_0_16.mp4'))
617
- # timeline.add_video(Video('test_files/test_blend_multiply_v0_0_16.mp4'))
618
- # timeline.add_video(Video('test_files/test_blend_overlay_v0_0_16.mp4'))
619
- # timeline.add_video(Video('test_files/test_blend_screen_v0_0_16.mp4'))
620
- # timeline.add_video(Video('test_files/test_combine_skipping_empty_using_priority_v0_0_18.mp4'))
621
- # timeline.add_video(Video('test_files/test_ok_v0_0_13.mp4'))
622
-
623
- # timeline.render('test_files/concatenated.mp4')
624
-
625
-
626
- # return
627
-
628
- # TODO: This test will add videos that
629
- # must be played at the same time
630
- video = Video(VIDEO_PATH, 0.25, 0.75)
631
- timeline = Timeline()
632
-
633
- transitions_30fps = 'C:/Users/dania/Downloads/Y2meta.app-10 Smooth Transitions Green Screen Template For Kinemaster, Alight Motion, Filmora, premiere pro-(1080p).mp4'
634
- simpsons_60fps = 'C:/Users/dania/Downloads/Y_una_porra_los_simpsons_castellano_60fps.mp4'
635
-
636
- # Track 1
637
- timeline.add_video(Video(VIDEO_PATH, 0.25, 1.0), 0.75, track_index = 0)
638
- timeline.add_video(Video(simpsons_60fps, 1.5, 2.0), 3.0, track_index = 0)
639
- timeline.add_video(Video(VIDEO_PATH, 0.5, 1.0), 2.0, track_index = 0)
640
-
641
- #timeline.tracks[0].mute()
642
-
643
- # Track 2
644
- timeline.add_video(Video(VIDEO_PATH, 0.5, 1.0), 2.7, track_index = 1)
645
- timeline.add_video(Video(simpsons_60fps, 5.8, 7.8), 0.6, track_index = 1)
646
- # 30fps
647
- # timeline.add_video(Video('C:/Users/dania/Downloads/Y2meta.app-TOP 12 SIMPLE LIQUID TRANSITION _ GREEN SCREEN TRANSITION PACK-(1080p60).mp4', 0.25, 1.5), 0.25, do_use_second_track = True)
648
- # 29.97fps
649
- # timeline.add_video(Video('C:/Users/dania/Downloads/Y_una_porra_los_simpsons_castellano.mp4', 5.8, 6.8), 3.6, do_use_second_track = True)
650
-
651
- timeline.render(OUTPUT_PATH)
652
-
653
- return
654
-
655
- Video(VIDEO_PATH, 0.25, 0.75).save_as(OUTPUT_PATH)
656
-
657
- return
658
-
659
- video = VideoReader(VIDEO_PATH)
660
- video_writer = (
661
- VideoWriter(OUTPUT_PATH)
662
- #.set_video_stream(VIDEO_CODEC_NAME, video.fps, video.size, PIXEL_FORMAT)
663
- .set_video_stream_from_template(video.video_stream)
664
- .set_audio_stream_from_template(video.audio_stream)
665
- )
524
+ # # Create texture
525
+ # texture = self.frame_to_texture(frame)
526
+ # texture.use()
527
+
528
+ # # Activate frame buffer
529
+ # self.fbo.use()
530
+
531
+ # # Render, captured by the fbo
532
+ # self.vao.render(moderngl.TRIANGLE_STRIP)
533
+
534
+ # # Processed GPU result (from fbo) to numpy
535
+ # processed_data = np.frombuffer(
536
+ # self.fbo.read(components = 3, alignment = 1), dtype = np.uint8
537
+ # )
538
+
539
+ # # Invert numpy to normal frame
540
+ # processed_data = np.flipud(
541
+ # processed_data.reshape((texture.size[1], texture.size[0], 3))
542
+ # )
543
+
544
+ # # To VideoFrame and to buffer
545
+ # frame = av.VideoFrame.from_ndarray(processed_data, format = NUMPY_FORMAT)
546
+ # # TODO: What is this for (?)
547
+ # #out_frame.pict_type = 'NONE'
548
+
549
+ # return frame
550
+
551
+ # self.writer.mux_video_frame(process_frame(frame_or_packet))
552
+
553
+ # print(f'Frame {str(frame_index)}: {timer.time_elapsed_str}s')
554
+ # frame_index += 1
555
+
556
+ # # While this code can be finished, the work in
557
+ # # the muxer could be not finished and have some
558
+ # # packets waiting to be written. Here we tell
559
+ # # the muxer to process all those packets.
560
+ # self.writer.mux_video_frame(None)
561
+
562
+ # # TODO: Maybe move this to the '__del__' (?)
563
+ # self.writer.output.close()
564
+ # self.reader.container.close()
565
+ # print(f'Saved as "{self.output_filename}".')
566
+
567
+
568
+ # def video_modified_stored():
569
+ # # This path below was trimmed in an online platform
570
+ # # and seems to be bad codified and generates error
571
+ # # when processing it, but it is readable in the
572
+ # # file explorer...
573
+ # #VIDEO_PATH = 'test_files/test_1_short_broken.mp4'
574
+ # # This is short but is working well
575
+ # VIDEO_PATH = "test_files/test_1_short_2.mp4"
576
+ # # Long version below, comment to test faster
577
+ # #VIDEO_PATH = "test_files/test_1.mp4"
578
+ # OUTPUT_PATH = "test_files/output.mp4"
579
+ # # TODO: This has to be dynamic, but
580
+ # # according to what (?)
581
+ # NUMPY_FORMAT = 'rgb24'
582
+ # # TODO: Where do we obtain this from (?)
583
+ # VIDEO_CODEC_NAME = 'libx264'
584
+ # # TODO: Where do we obtain this from (?)
585
+ # PIXEL_FORMAT = 'yuv420p'
586
+
587
+ # from yta_video_opengl.classes import WavingFrame, BreathingFrame, HandheldFrame, OrbitingFrame, RotatingInCenterFrame, StrangeTvFrame, GlitchRgbFrame, WavingNode
588
+ # from yta_video_opengl.utils import texture_to_frame, frame_to_texture
589
+
590
+ # video = VideoReader(VIDEO_PATH)
591
+ # video_writer = (
592
+ # VideoWriter(OUTPUT_PATH)
593
+ # #.set_video_stream(VIDEO_CODEC_NAME, video.fps, video.size, PIXEL_FORMAT)
594
+ # .set_video_stream_from_template(video.video_stream)
595
+ # .set_audio_stream_from_template(video.audio_stream)
596
+ # )
666
597
 
667
- #effect = WavingFrame(size = video.size)
668
- #effect = BreathingFrame(size = video.size)
669
- #effect = HandheldFrame(size = video.size)
670
- # effect = OrbitingFrame(
671
- # size = video.size,
672
- # first_frame = video.next_frame
673
- # )
674
- # effect = RotatingInCenterFrame(
675
- # size = video.size,
676
- # first_frame = video.next_frame
677
- # )
678
- effect = GlitchRgbFrame(
679
- size = video.size,
680
- first_frame = video.next_frame
681
- )
682
- context = moderngl.create_context(standalone = True)
598
+ # #effect = WavingFrame(size = video.size)
599
+ # #effect = BreathingFrame(size = video.size)
600
+ # #effect = HandheldFrame(size = video.size)
601
+ # # effect = OrbitingFrame(
602
+ # # size = video.size,
603
+ # # first_frame = video.next_frame
604
+ # # )
605
+ # # effect = RotatingInCenterFrame(
606
+ # # size = video.size,
607
+ # # first_frame = video.next_frame
608
+ # # )
609
+ # effect = GlitchRgbFrame(
610
+ # size = video.size,
611
+ # first_frame = video.next_frame
612
+ # )
613
+ # context = moderngl.create_context(standalone = True)
683
614
 
684
- # New way, with nodes
685
- node = WavingNode(context, video.size, amplitude = 0.2, frequency = 9, speed = 3)
686
- # We need to reset it to being again pointing
687
- # to the first frame...
688
- # TODO: Improve this by, maybe, storing the first
689
- # frame in memory so we can append it later, or
690
- # using the '.seek(0)' even when it could be not
691
- # accurate
692
- video.reset()
693
-
694
- frame_index = 0
695
- for frame_or_packet in video.iterate_with_audio(
696
- do_decode_video = True,
697
- do_decode_audio = False
698
- ):
699
- # This below is because of the parameters we
700
- # passed to the method
701
- is_video_frame = PythonValidator.is_instance_of(frame_or_packet, 'VideoReaderFrame')
702
- is_audio_packet = PythonValidator.is_instance_of(frame_or_packet, 'VideoReaderPacket')
703
-
704
- # To simplify the process
705
- if frame_or_packet is not None:
706
- frame_or_packet = frame_or_packet.value
707
-
708
- if is_audio_packet:
709
- video_writer.mux(frame_or_packet)
710
- elif is_video_frame:
711
- with Timer(is_silent_as_context = True) as timer:
712
- t = T.video_frame_index_to_video_frame_time(frame_index, float(video.fps))
713
- # This is another way of getting 't'
714
- #t = float(frame_or_packet.pts * video.time_base)
715
-
716
- video_writer.mux_video_frame(
717
- frame = texture_to_frame(
718
- texture = node.process(
719
- input = frame_or_packet,
720
- t = t
721
- )
722
- )
723
- )
724
-
725
- # video_writer.mux_video_frame(
726
- # effect.process_frame(
727
- # frame = frame_or_packet,
728
- # t = t,
729
- # numpy_format = NUMPY_FORMAT
730
- # )
731
- # )
732
-
733
- frame_index += 1
734
-
735
- print(f'Frame {str(frame_index)}: {timer.time_elapsed_str}s')
736
-
737
- video_writer.mux_video_frame(None)
738
-
739
- # TODO: Maybe move this to the '__del__' (?)
740
- video_writer.output.close()
741
- video.container.close()
742
- print(f'Saved as "{OUTPUT_PATH}".')
743
-
744
- return
745
-
746
- # # TODO: By now this is applying an effect
747
- # # by default
748
- # VideoProcessor(
749
- # filename = VIDEO_PATH,
750
- # output_filename = OUTPUT_PATH
751
- # ).process()
752
-
753
- # return
615
+ # # New way, with nodes
616
+ # node = WavingNode(context, video.size, amplitude = 0.2, frequency = 9, speed = 3)
617
+ # # We need to reset it to being again pointing
618
+ # # to the first frame...
619
+ # # TODO: Improve this by, maybe, storing the first
620
+ # # frame in memory so we can append it later, or
621
+ # # using the '.seek(0)' even when it could be not
622
+ # # accurate
623
+ # video.reset()
624
+
625
+ # # # TODO: By now this is applying an effect
626
+ # # # by default
627
+ # # VideoProcessor(
628
+ # # filename = VIDEO_PATH,
629
+ # # output_filename = OUTPUT_PATH
630
+ # # ).process()
631
+
632
+ # # return
754
633
 
755
634
 
756
- AMP = 0.05
757
- FREQ = 10.0
758
- SPEED = 2.0
759
-
760
- # Get the information about the video
761
- video = VideoReader(VIDEO_PATH)
762
-
763
- # ModernGL context without window
764
- context = moderngl.create_standalone_context()
765
-
766
- waving_frame_effect = WavingFrame(
767
- context = context,
768
- frame_size = video.size
769
- )
770
-
771
- vao = waving_frame_effect.vao
772
-
773
- # TODO: This has to be dynamic, but
774
- # according to what (?)
775
- NUMPY_FORMAT = 'rgb24'
776
- # TODO: Where do we obtain this from (?)
777
- VIDEO_CODEC_NAME = 'libx264'
778
- # TODO: Where do we obtain this from (?)
779
- PIXEL_FORMAT = 'yuv420p'
780
-
781
- # Framebuffer to render
782
- fbo = waving_frame_effect.fbo
783
- fbo.use()
784
-
785
- # Decode first frame and use as texture
786
- first_frame = video.next_frame
787
- # We need to reset it to being again pointing
788
- # to the first frame...
789
- # TODO: Improve this by, maybe, storing the first
790
- # frame in memory so we can append it later, or
791
- # using the '.seek(0)' even when it could be not
792
- # accurate
793
- video = VideoReader(VIDEO_PATH)
794
-
795
- # Most of OpenGL textures expect origin in lower
796
- # left corner
797
- # TODO: What if alpha (?)
798
- # TODO: Move this to the OpenglFrameEffect maybe (?)
635
+ # AMP = 0.05
636
+ # FREQ = 10.0
637
+ # SPEED = 2.0
638
+
639
+ # # Get the information about the video
640
+ # video = VideoReader(VIDEO_PATH)
641
+
642
+ # # ModernGL context without window
643
+ # context = moderngl.create_standalone_context()
644
+
645
+ # waving_frame_effect = WavingFrame(
646
+ # context = context,
647
+ # frame_size = video.size
648
+ # )
649
+
650
+ # vao = waving_frame_effect.vao
651
+
652
+ # # TODO: This has to be dynamic, but
653
+ # # according to what (?)
654
+ # NUMPY_FORMAT = 'rgb24'
655
+ # # TODO: Where do we obtain this from (?)
656
+ # VIDEO_CODEC_NAME = 'libx264'
657
+ # # TODO: Where do we obtain this from (?)
658
+ # PIXEL_FORMAT = 'yuv420p'
659
+
660
+ # # Framebuffer to render
661
+ # fbo = waving_frame_effect.fbo
662
+ # fbo.use()
663
+
664
+ # # Decode first frame and use as texture
665
+ # first_frame = video.next_frame
666
+ # # We need to reset it to being again pointing
667
+ # # to the first frame...
668
+ # # TODO: Improve this by, maybe, storing the first
669
+ # # frame in memory so we can append it later, or
670
+ # # using the '.seek(0)' even when it could be not
671
+ # # accurate
672
+ # video = VideoReader(VIDEO_PATH)
673
+
674
+ # # Most of OpenGL textures expect origin in lower
675
+ # # left corner
676
+ # # TODO: What if alpha (?)
677
+ # # TODO: Move this to the OpenglFrameEffect maybe (?)
799
678
 
800
- texture: moderngl.Texture = frame_to_texture(first_frame, waving_frame_effect.context)
801
- texture.build_mipmaps()
802
-
803
- # These properties can be set before
804
- # iterating the frames or maybe for
805
- # each iteration... depending on the
806
- # effect.
807
- # Uniforms (properties)
808
- (
809
- waving_frame_effect
810
- .set_value('amp', AMP)
811
- .set_value('freq', FREQ)
812
- .set_value('speed', SPEED)
813
- )
814
-
815
- # Writer with H.264 codec
816
- video_writer = (
817
- VideoWriter(OUTPUT_PATH)
818
- .set_video_stream(VIDEO_CODEC_NAME, video.fps, video.size, PIXEL_FORMAT)
819
- .set_audio_stream_from_template(video.audio_stream)
820
- )
821
-
822
- frame_index = 0
823
- for frame_or_packet in video.iterate_with_audio(
824
- do_decode_video = True,
825
- do_decode_audio = False
826
- ):
827
- # This below is because of the parameters we
828
- # passed to the method
829
- is_video_frame = PythonValidator.is_instance_of(frame_or_packet, 'VideoReaderFrame')
830
- is_audio_packet = PythonValidator.is_instance_of(frame_or_packet, 'VideoReaderPacket')
831
-
832
- # To simplify the process
833
- if frame_or_packet is not None:
834
- frame_or_packet = frame_or_packet.data
835
-
836
- if is_audio_packet:
837
- video_writer.mux(frame_or_packet)
838
- elif is_video_frame:
839
- with Timer(is_silent_as_context = True) as timer:
840
-
841
- def process_frame(
842
- frame: 'VideoFrame'
843
- ):
844
- # Add some variables if we need, for the
845
- # opengl change we are applying (check the
846
- # program code)
847
- waving_frame_effect.set_value('time', T.video_frame_index_to_video_frame_time(frame_index, float(video.fps)))
679
+ # texture: moderngl.Texture = frame_to_texture(first_frame, waving_frame_effect.context)
680
+ # texture.build_mipmaps()
681
+
682
+ # # These properties can be set before
683
+ # # iterating the frames or maybe for
684
+ # # each iteration... depending on the
685
+ # # effect.
686
+ # # Uniforms (properties)
687
+ # (
688
+ # waving_frame_effect
689
+ # .set_value('amp', AMP)
690
+ # .set_value('freq', FREQ)
691
+ # .set_value('speed', SPEED)
692
+ # )
693
+
694
+ # # Writer with H.264 codec
695
+ # video_writer = (
696
+ # VideoWriter(OUTPUT_PATH)
697
+ # .set_video_stream(VIDEO_CODEC_NAME, video.fps, video.size, PIXEL_FORMAT)
698
+ # .set_audio_stream_from_template(video.audio_stream)
699
+ # )
700
+
701
+ # frame_index = 0
702
+ # for frame_or_packet in video.iterate_with_audio(
703
+ # do_decode_video = True,
704
+ # do_decode_audio = False
705
+ # ):
706
+ # # This below is because of the parameters we
707
+ # # passed to the method
708
+ # is_video_frame = PythonValidator.is_instance_of(frame_or_packet, 'VideoReaderFrame')
709
+ # is_audio_packet = PythonValidator.is_instance_of(frame_or_packet, 'VideoReaderPacket')
710
+
711
+ # # To simplify the process
712
+ # if frame_or_packet is not None:
713
+ # frame_or_packet = frame_or_packet.data
714
+
715
+ # if is_audio_packet:
716
+ # video_writer.mux(frame_or_packet)
717
+ # elif is_video_frame:
718
+ # with Timer(is_silent_as_context = True) as timer:
719
+
720
+ # def process_frame(
721
+ # frame: 'VideoFrame'
722
+ # ):
723
+ # # Add some variables if we need, for the
724
+ # # opengl change we are applying (check the
725
+ # # program code)
726
+ # waving_frame_effect.set_value('time', T.video_frame_index_to_video_frame_time(frame_index, float(video.fps)))
848
727
 
849
- # Create texture
850
- texture = frame_to_texture(frame, waving_frame_effect.context)
851
- texture.use()
852
-
853
- # Render with shader to frame buffer
854
- fbo.use()
855
- vao.render(moderngl.TRIANGLE_STRIP)
856
-
857
- # Processed GPU result to numpy
858
- processed_data = np.frombuffer(
859
- fbo.read(components = 3, alignment = 1), dtype = np.uint8
860
- )
861
-
862
- # Invert numpy to normal frame
863
- # TODO: Can I use the texture.size to fill
864
- # these 'img_array.shape[0]' (?)
865
- processed_data = np.flipud(
866
- processed_data.reshape((texture.size[1], texture.size[0], 3))
867
- )
868
-
869
- # To VideoFrame and to buffer
870
- frame = av.VideoFrame.from_ndarray(processed_data, format = NUMPY_FORMAT)
871
- # TODO: What is this for (?)
872
- #out_frame.pict_type = 'NONE'
873
- return frame
874
-
875
- video_writer.mux_video_frame(process_frame(frame_or_packet))
876
-
877
- print(f'Frame {str(frame_index)}: {timer.time_elapsed_str}s')
878
- frame_index += 1
879
-
880
- # While this code can be finished, the work in
881
- # the muxer could be not finished and have some
882
- # packets waiting to be written. Here we tell
883
- # the muxer to process all those packets.
884
- video_writer.mux_video_frame(None)
885
-
886
- # TODO: Maybe move this to the '__del__' (?)
887
- video_writer.output.close()
888
- video.container.close()
889
- print(f'Saved as "{OUTPUT_PATH}".')
728
+ # # Create texture
729
+ # texture = frame_to_texture(frame, waving_frame_effect.context)
730
+ # texture.use()
731
+
732
+ # # Render with shader to frame buffer
733
+ # fbo.use()
734
+ # vao.render(moderngl.TRIANGLE_STRIP)
735
+
736
+ # # Processed GPU result to numpy
737
+ # processed_data = np.frombuffer(
738
+ # fbo.read(components = 3, alignment = 1), dtype = np.uint8
739
+ # )
740
+
741
+ # # Invert numpy to normal frame
742
+ # # TODO: Can I use the texture.size to fill
743
+ # # these 'img_array.shape[0]' (?)
744
+ # processed_data = np.flipud(
745
+ # processed_data.reshape((texture.size[1], texture.size[0], 3))
746
+ # )
747
+
748
+ # # To VideoFrame and to buffer
749
+ # frame = av.VideoFrame.from_ndarray(processed_data, format = NUMPY_FORMAT)
750
+ # # TODO: What is this for (?)
751
+ # #out_frame.pict_type = 'NONE'
752
+ # return frame
753
+
754
+ # video_writer.mux_video_frame(process_frame(frame_or_packet))
755
+
756
+ # print(f'Frame {str(frame_index)}: {timer.time_elapsed_str}s')
757
+ # frame_index += 1
758
+
759
+ # # While this code can be finished, the work in
760
+ # # the muxer could be not finished and have some
761
+ # # packets waiting to be written. Here we tell
762
+ # # the muxer to process all those packets.
763
+ # video_writer.mux_video_frame(None)
764
+
765
+ # # TODO: Maybe move this to the '__del__' (?)
766
+ # video_writer.output.close()
767
+ # video.container.close()
768
+ # print(f'Saved as "{OUTPUT_PATH}".')