yta-video-opengl 0.0.22__py3-none-any.whl → 0.0.24__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- yta_video_opengl/editor.py +333 -0
- yta_video_opengl/nodes/__init__.py +32 -28
- yta_video_opengl/nodes/audio/__init__.py +164 -55
- yta_video_opengl/nodes/video/__init__.py +27 -1
- yta_video_opengl/nodes/video/{opengl.py → opengl/__init__.py} +8 -4
- yta_video_opengl/nodes/video/opengl/experimental.py +760 -0
- yta_video_opengl/tests.py +236 -358
- yta_video_opengl/utils.py +9 -421
- {yta_video_opengl-0.0.22.dist-info → yta_video_opengl-0.0.24.dist-info}/METADATA +2 -6
- yta_video_opengl-0.0.24.dist-info/RECORD +13 -0
- yta_video_opengl/audio.py +0 -219
- yta_video_opengl/classes.py +0 -1276
- yta_video_opengl/complete/__init__.py +0 -0
- yta_video_opengl/complete/frame_combinator.py +0 -204
- yta_video_opengl/complete/frame_generator.py +0 -319
- yta_video_opengl/complete/frame_wrapper.py +0 -135
- yta_video_opengl/complete/timeline.py +0 -571
- yta_video_opengl/complete/track/__init__.py +0 -500
- yta_video_opengl/complete/track/media/__init__.py +0 -222
- yta_video_opengl/complete/track/parts.py +0 -267
- yta_video_opengl/complete/track/utils.py +0 -78
- yta_video_opengl/media.py +0 -347
- yta_video_opengl/reader/__init__.py +0 -710
- yta_video_opengl/reader/cache/__init__.py +0 -253
- yta_video_opengl/reader/cache/audio.py +0 -195
- yta_video_opengl/reader/cache/utils.py +0 -48
- yta_video_opengl/reader/cache/video.py +0 -113
- yta_video_opengl/t.py +0 -233
- yta_video_opengl/video.py +0 -277
- yta_video_opengl/writer.py +0 -278
- yta_video_opengl-0.0.22.dist-info/RECORD +0 -31
- {yta_video_opengl-0.0.22.dist-info → yta_video_opengl-0.0.24.dist-info}/LICENSE +0 -0
- {yta_video_opengl-0.0.22.dist-info → yta_video_opengl-0.0.24.dist-info}/WHEEL +0 -0
yta_video_opengl/tests.py
CHANGED
@@ -16,16 +16,12 @@ Interesting information:
|
|
16
16
|
| RBO | Render Buffer Object | Almacén intermedio (profundidad, etc.) |
|
17
17
|
|
18
18
|
"""
|
19
|
-
from yta_validation import PythonValidator
|
20
19
|
from yta_validation.parameter import ParameterValidator
|
21
|
-
from yta_video_opengl.reader import VideoReader
|
22
|
-
from yta_video_opengl.writer import VideoWriter
|
23
|
-
from yta_timer import Timer
|
24
|
-
from yta_video_frame_time import T
|
20
|
+
# from yta_video_opengl.reader import VideoReader
|
21
|
+
# from yta_video_opengl.writer import VideoWriter
|
25
22
|
from abc import abstractmethod
|
26
23
|
from typing import Union
|
27
24
|
|
28
|
-
import av
|
29
25
|
import moderngl
|
30
26
|
import numpy as np
|
31
27
|
|
@@ -333,232 +329,241 @@ class WavingEffectProgram(OpenglEffectProgram):
|
|
333
329
|
1, 1, 1.0, 1.0,
|
334
330
|
], dtype = 'f4')
|
335
331
|
|
336
|
-
NUMPY_FORMAT = 'rgb24'
|
337
332
|
|
338
|
-
# TODO: Maybe rename as ContextHandler (?)
|
339
|
-
class VideoProcessor:
|
340
|
-
"""
|
341
|
-
Class to read a video, process it (maybe
|
342
|
-
applying some effects) and writing the
|
343
|
-
results in a new video.
|
344
|
-
"""
|
345
333
|
|
346
|
-
@property
|
347
|
-
def fbo(
|
348
|
-
self
|
349
|
-
) -> moderngl.Framebuffer:
|
350
|
-
"""
|
351
|
-
The frame buffer object for the video frame
|
352
|
-
size.
|
353
|
-
"""
|
354
|
-
if not hasattr(self, '_fbo'):
|
355
|
-
self._fbo = self.context.fbo(self.reader.size)
|
356
334
|
|
357
|
-
|
358
|
-
|
359
|
-
|
360
|
-
def vao(
|
361
|
-
self
|
362
|
-
) -> moderngl.VertexArray:
|
363
|
-
"""
|
364
|
-
Shortcut to the program vao.
|
365
|
-
"""
|
366
|
-
return self.program.vao
|
335
|
+
# TODO: This code below was using the pyav
|
336
|
+
# reading library that has been moved, that
|
337
|
+
# is why it is commented by now
|
367
338
|
|
368
|
-
|
369
|
-
def first_frame(
|
370
|
-
self
|
371
|
-
) -> Union['VideoFrame', None]:
|
372
|
-
"""
|
373
|
-
The first frame of the video as a VideoFrame.
|
374
|
-
"""
|
375
|
-
if not hasattr(self, '_first_frame'):
|
376
|
-
# Framebuffer to render
|
377
|
-
self.fbo.use()
|
378
|
-
self._first_frame = self.reader.next_frame
|
379
|
-
# Reset the reader
|
380
|
-
self.reader.reset()
|
339
|
+
NUMPY_FORMAT = 'rgb24'
|
381
340
|
|
382
|
-
|
341
|
+
# # TODO: Maybe rename as ContextHandler (?)
|
342
|
+
# class VideoProcessor:
|
343
|
+
# """
|
344
|
+
# Class to read a video, process it (maybe
|
345
|
+
# applying some effects) and writing the
|
346
|
+
# results in a new video.
|
347
|
+
# """
|
348
|
+
|
349
|
+
# @property
|
350
|
+
# def fbo(
|
351
|
+
# self
|
352
|
+
# ) -> moderngl.Framebuffer:
|
353
|
+
# """
|
354
|
+
# The frame buffer object for the video frame
|
355
|
+
# size.
|
356
|
+
# """
|
357
|
+
# if not hasattr(self, '_fbo'):
|
358
|
+
# self._fbo = self.context.fbo(self.reader.size)
|
359
|
+
|
360
|
+
# return self._fbo
|
383
361
|
|
384
|
-
|
385
|
-
|
386
|
-
|
387
|
-
|
388
|
-
|
389
|
-
|
390
|
-
|
391
|
-
|
392
|
-
|
393
|
-
|
394
|
-
|
395
|
-
|
396
|
-
|
362
|
+
# @property
|
363
|
+
# def vao(
|
364
|
+
# self
|
365
|
+
# ) -> moderngl.VertexArray:
|
366
|
+
# """
|
367
|
+
# Shortcut to the program vao.
|
368
|
+
# """
|
369
|
+
# return self.program.vao
|
370
|
+
|
371
|
+
# @property
|
372
|
+
# def first_frame(
|
373
|
+
# self
|
374
|
+
# ) -> Union['VideoFrame', None]:
|
375
|
+
# """
|
376
|
+
# The first frame of the video as a VideoFrame.
|
377
|
+
# """
|
378
|
+
# if not hasattr(self, '_first_frame'):
|
379
|
+
# # Framebuffer to render
|
380
|
+
# self.fbo.use()
|
381
|
+
# self._first_frame = self.reader.next_frame
|
382
|
+
# # Reset the reader
|
383
|
+
# self.reader.reset()
|
384
|
+
|
385
|
+
# return self._first_frame
|
397
386
|
|
398
|
-
|
399
|
-
|
400
|
-
|
401
|
-
|
402
|
-
|
403
|
-
|
404
|
-
|
405
|
-
|
406
|
-
|
407
|
-
|
408
|
-
|
409
|
-
|
410
|
-
|
411
|
-
|
412
|
-
|
413
|
-
|
414
|
-
|
415
|
-
|
416
|
-
|
417
|
-
|
418
|
-
|
419
|
-
|
420
|
-
|
421
|
-
|
422
|
-
|
423
|
-
|
424
|
-
|
425
|
-
|
426
|
-
|
427
|
-
|
428
|
-
|
429
|
-
|
430
|
-
|
431
|
-
|
432
|
-
|
433
|
-
|
434
|
-
|
435
|
-
|
436
|
-
|
437
|
-
|
438
|
-
|
439
|
-
|
387
|
+
# @property
|
388
|
+
# def first_frame_as_texture(
|
389
|
+
# self
|
390
|
+
# ) -> moderngl.Texture:
|
391
|
+
# """
|
392
|
+
# The first frame of the video as a texture.
|
393
|
+
# This is needed to start the process.
|
394
|
+
# """
|
395
|
+
# if not hasattr(self, '_first_frame_as_texture'):
|
396
|
+
# self._first_frame_as_texture = self.frame_to_texture(self.first_frame, NUMPY_FORMAT)
|
397
|
+
# self._first_frame_as_texture.build_mipmaps()
|
398
|
+
|
399
|
+
# return self._first_frame_as_texture
|
400
|
+
|
401
|
+
# @property
|
402
|
+
# def program(
|
403
|
+
# self
|
404
|
+
# ) -> OpenglProgram:
|
405
|
+
# """
|
406
|
+
# Shortcut to the context program custom class
|
407
|
+
# instance.
|
408
|
+
# """
|
409
|
+
# return self.context.program
|
410
|
+
|
411
|
+
# def __init__(
|
412
|
+
# self,
|
413
|
+
# filename: str,
|
414
|
+
# output_filename: str
|
415
|
+
# ):
|
416
|
+
# self.filename: str = filename
|
417
|
+
# """
|
418
|
+
# The filename of the video we want to read and
|
419
|
+
# process.
|
420
|
+
# """
|
421
|
+
# self.output_filename: str = output_filename
|
422
|
+
# """
|
423
|
+
# The filename of the video we want to generate
|
424
|
+
# and store once the original one has been
|
425
|
+
# processed.
|
426
|
+
# """
|
427
|
+
# # TODO: Hardcoded by now
|
428
|
+
# effect = WavingEffectProgram()
|
429
|
+
# self.context: OpenglContext = OpenglContext(
|
430
|
+
# vertex_shader = effect.vertex_shader,
|
431
|
+
# fragment_shader = effect.fragment_shader,
|
432
|
+
# vertices = effect.vertices
|
433
|
+
# )
|
434
|
+
# """
|
435
|
+
# The headless context as a custom class instance.
|
436
|
+
# """
|
437
|
+
# self.reader: VideoReader = VideoReader(self.filename)
|
438
|
+
# """
|
439
|
+
# The video reader instance.
|
440
|
+
# """
|
441
|
+
# # TODO: This has to be dynamic, but
|
442
|
+
# # according to what (?)
|
440
443
|
|
441
|
-
|
442
|
-
|
443
|
-
|
444
|
-
|
445
|
-
|
446
|
-
|
447
|
-
|
448
|
-
|
449
|
-
|
450
|
-
|
451
|
-
|
452
|
-
|
453
|
-
|
454
|
-
|
455
|
-
|
456
|
-
|
457
|
-
|
458
|
-
|
459
|
-
|
460
|
-
|
461
|
-
|
462
|
-
|
463
|
-
|
464
|
-
|
465
|
-
|
466
|
-
|
467
|
-
|
468
|
-
|
469
|
-
|
470
|
-
|
471
|
-
|
472
|
-
|
473
|
-
|
474
|
-
|
475
|
-
|
476
|
-
|
477
|
-
|
478
|
-
|
479
|
-
|
480
|
-
|
481
|
-
|
482
|
-
|
483
|
-
|
484
|
-
|
485
|
-
|
486
|
-
|
487
|
-
|
488
|
-
|
489
|
-
|
490
|
-
|
491
|
-
|
492
|
-
|
493
|
-
|
494
|
-
|
495
|
-
|
496
|
-
|
497
|
-
|
498
|
-
|
499
|
-
|
500
|
-
|
501
|
-
|
502
|
-
|
503
|
-
|
504
|
-
|
505
|
-
|
506
|
-
|
507
|
-
|
508
|
-
|
509
|
-
|
510
|
-
|
511
|
-
|
512
|
-
|
513
|
-
|
514
|
-
|
515
|
-
|
516
|
-
|
517
|
-
|
518
|
-
|
444
|
+
# # TODO: Where do we obtain this from (?)
|
445
|
+
# VIDEO_CODEC_NAME = 'libx264'
|
446
|
+
# # TODO: Where do we obtain this from (?)
|
447
|
+
# PIXEL_FORMAT = 'yuv420p'
|
448
|
+
# self.writer: VideoWriter = (
|
449
|
+
# VideoWriter(output_filename)
|
450
|
+
# .set_video_stream(VIDEO_CODEC_NAME, self.reader.fps, self.reader.size, PIXEL_FORMAT)
|
451
|
+
# .set_audio_stream_from_template(self.reader.audio_stream)
|
452
|
+
# )
|
453
|
+
# """
|
454
|
+
# The video writer instance.
|
455
|
+
# """
|
456
|
+
|
457
|
+
# # TODO: This should be a utils
|
458
|
+
# def frame_to_texture(
|
459
|
+
# self,
|
460
|
+
# frame: 'VideoFrame',
|
461
|
+
# numpy_format: str = 'rgb24'
|
462
|
+
# ):
|
463
|
+
# """
|
464
|
+
# Transform the given 'frame' to an opengl
|
465
|
+
# texture.
|
466
|
+
# """
|
467
|
+
# # To numpy RGB inverted for OpenGL
|
468
|
+
# # TODO: Maybe we can receive normal frames
|
469
|
+
# # here, as np.ndarray, from other libraries
|
470
|
+
# frame: np.ndarray = np.flipud(frame.to_ndarray(format = numpy_format))
|
471
|
+
|
472
|
+
# return self.context.context.texture((frame.shape[1], frame.shape[0]), 3, frame.tobytes())
|
473
|
+
|
474
|
+
# def process(
|
475
|
+
# self
|
476
|
+
# ):
|
477
|
+
# """
|
478
|
+
# Process the video and generate the new one.
|
479
|
+
|
480
|
+
# TODO: Should I pass some effects to apply (?)
|
481
|
+
# """
|
482
|
+
# # [ 1 ] Initialize fbo and texture mipmaps
|
483
|
+
# self.first_frame_as_texture # This forces it in the code
|
484
|
+
|
485
|
+
# # [ 2 ] Set general program uniforms
|
486
|
+
# AMP = 0.05
|
487
|
+
# FREQ = 10.0
|
488
|
+
# SPEED = 2.0
|
489
|
+
# (
|
490
|
+
# self.context.program
|
491
|
+
# .set_value('amp', AMP)
|
492
|
+
# .set_value('freq', FREQ)
|
493
|
+
# .set_value('speed', SPEED)
|
494
|
+
# )
|
495
|
+
|
496
|
+
# # [ 3 ] Process the frames
|
497
|
+
# frame_index = 0
|
498
|
+
# for frame_or_packet in self.reader.iterate_with_audio(
|
499
|
+
# do_decode_video = True,
|
500
|
+
# do_decode_audio = False
|
501
|
+
# ):
|
502
|
+
# # This below is because of the parameters we
|
503
|
+
# # passed to the method
|
504
|
+
# is_video_frame = PythonValidator.is_instance_of(frame_or_packet, 'VideoReaderFrame')
|
505
|
+
# is_audio_packet = PythonValidator.is_instance_of(frame_or_packet, 'VideoReaderPacket')
|
506
|
+
|
507
|
+
# # To simplify the process
|
508
|
+
# if frame_or_packet is not None:
|
509
|
+
# frame_or_packet = frame_or_packet.data
|
510
|
+
# if is_audio_packet:
|
511
|
+
# self.writer.mux(frame_or_packet)
|
512
|
+
# elif is_video_frame:
|
513
|
+
# with Timer(is_silent_as_context = True) as timer:
|
514
|
+
# # Check this link:
|
515
|
+
# # https://stackoverflow.com/a/63153755
|
516
|
+
|
517
|
+
# def process_frame(
|
518
|
+
# frame: 'VideoFrame'
|
519
|
+
# ):
|
520
|
+
# # [ 4 ] Add specific program uniforms
|
521
|
+
# # TODO: T moved to 'yta_video_pyav'
|
522
|
+
# self.program.set_value('time', T.video_frame_index_to_video_frame_time(frame_index, float(self.reader.fps)))
|
519
523
|
|
520
|
-
|
521
|
-
|
522
|
-
|
524
|
+
# # Create texture
|
525
|
+
# texture = self.frame_to_texture(frame)
|
526
|
+
# texture.use()
|
523
527
|
|
524
|
-
|
525
|
-
|
528
|
+
# # Activate frame buffer
|
529
|
+
# self.fbo.use()
|
526
530
|
|
527
|
-
|
528
|
-
|
531
|
+
# # Render, captured by the fbo
|
532
|
+
# self.vao.render(moderngl.TRIANGLE_STRIP)
|
529
533
|
|
530
|
-
|
531
|
-
|
532
|
-
|
533
|
-
|
534
|
+
# # Processed GPU result (from fbo) to numpy
|
535
|
+
# processed_data = np.frombuffer(
|
536
|
+
# self.fbo.read(components = 3, alignment = 1), dtype = np.uint8
|
537
|
+
# )
|
534
538
|
|
535
|
-
|
536
|
-
|
537
|
-
|
538
|
-
|
539
|
+
# # Invert numpy to normal frame
|
540
|
+
# processed_data = np.flipud(
|
541
|
+
# processed_data.reshape((texture.size[1], texture.size[0], 3))
|
542
|
+
# )
|
539
543
|
|
540
|
-
|
541
|
-
|
542
|
-
|
543
|
-
|
544
|
+
# # To VideoFrame and to buffer
|
545
|
+
# frame = av.VideoFrame.from_ndarray(processed_data, format = NUMPY_FORMAT)
|
546
|
+
# # TODO: What is this for (?)
|
547
|
+
# #out_frame.pict_type = 'NONE'
|
544
548
|
|
545
|
-
|
549
|
+
# return frame
|
546
550
|
|
547
|
-
|
551
|
+
# self.writer.mux_video_frame(process_frame(frame_or_packet))
|
548
552
|
|
549
|
-
|
550
|
-
|
553
|
+
# print(f'Frame {str(frame_index)}: {timer.time_elapsed_str}s')
|
554
|
+
# frame_index += 1
|
551
555
|
|
552
|
-
|
553
|
-
|
554
|
-
|
555
|
-
|
556
|
-
|
556
|
+
# # While this code can be finished, the work in
|
557
|
+
# # the muxer could be not finished and have some
|
558
|
+
# # packets waiting to be written. Here we tell
|
559
|
+
# # the muxer to process all those packets.
|
560
|
+
# self.writer.mux_video_frame(None)
|
561
|
+
|
562
|
+
# # TODO: Maybe move this to the '__del__' (?)
|
563
|
+
# self.writer.output.close()
|
564
|
+
# self.reader.container.close()
|
565
|
+
# print(f'Saved as "{self.output_filename}".')
|
557
566
|
|
558
|
-
# TODO: Maybe move this to the '__del__' (?)
|
559
|
-
self.writer.output.close()
|
560
|
-
self.reader.container.close()
|
561
|
-
print(f'Saved as "{self.output_filename}".')
|
562
567
|
|
563
568
|
def video_modified_stored():
|
564
569
|
# This path below was trimmed in an online platform
|
@@ -579,96 +584,12 @@ def video_modified_stored():
|
|
579
584
|
# TODO: Where do we obtain this from (?)
|
580
585
|
PIXEL_FORMAT = 'yuv420p'
|
581
586
|
|
582
|
-
from yta_video_opengl.classes import WavingFrame, BreathingFrame, HandheldFrame, OrbitingFrame, RotatingInCenterFrame, StrangeTvFrame, GlitchRgbFrame, WavingNode
|
583
587
|
from yta_video_opengl.utils import texture_to_frame, frame_to_texture
|
584
|
-
from yta_video_opengl.video import Video
|
585
|
-
from yta_video_opengl.complete.timeline import Timeline
|
586
|
-
|
587
|
-
# TODO: This test below is just to validate
|
588
|
-
# that it is cropping and placing correctly
|
589
|
-
# but the videos are only in one track
|
590
|
-
# video = Video(VIDEO_PATH, 0.25, 0.75)
|
591
|
-
# timeline = Timeline()
|
592
|
-
# timeline.add_video(Video(VIDEO_PATH, 0.25, 1.0), 0.5)
|
593
|
-
# # This is successfully raising an exception
|
594
|
-
# #timeline.add_video(Video(VIDEO_PATH, 0.25, 0.75), 0.6)
|
595
|
-
# timeline.add_video(Video(VIDEO_PATH, 0.25, 0.75), 1.75)
|
596
|
-
# timeline.add_video(Video('C:/Users/dania/Downloads/Y2meta.app-TOP 12 SIMPLE LIQUID TRANSITION _ GREEN SCREEN TRANSITION PACK-(1080p60).mp4', 4.0, 5.0), 3)
|
597
|
-
# # timeline.add_video(Video('C:/Users/dania/Downloads/Y2meta.app-10 Smooth Transitions Green Screen Template For Kinemaster, Alight Motion, Filmora, premiere pro-(1080p).mp4', 2.25, 3.0), 3)
|
598
|
-
# timeline.render(OUTPUT_PATH)
|
599
|
-
|
600
|
-
# # Testing concatenating
|
601
|
-
# timeline = Timeline()
|
602
|
-
# # When you concat like this, some of the
|
603
|
-
# # videos have frames that cannot be accessed
|
604
|
-
# # and I don't know why...
|
605
|
-
# timeline.add_video(Video('test_files/glitch_rgb_frame.mp4'))
|
606
|
-
# timeline.add_video(Video('test_files/output.mp4'))
|
607
|
-
# timeline.add_video(Video('test_files/output_render.mp4'))
|
608
|
-
# timeline.add_video(Video('test_files/strange_tv_frame.mp4'))
|
609
|
-
# timeline.add_video(Video('test_files/test_1.mp4'))
|
610
|
-
# timeline.add_video(Video('test_files/test_1_short_2.mp4'))
|
611
|
-
# timeline.add_video(Video('test_files/test_audio_1st_track_solo_v0_0_15.mp4'))
|
612
|
-
# timeline.add_video(Video('test_files/test_audio_2nd_track_solo_v0_0_15.mp4'))
|
613
|
-
# timeline.add_video(Video('test_files/test_audio_combined_tracks_v0_0_015.mp4'))
|
614
|
-
# timeline.add_video(Video('test_files/test_audio_combined_v0_0_15.mp4'))
|
615
|
-
# timeline.add_video(Video('test_files/test_blend_add_v0_0_16.mp4'))
|
616
|
-
# timeline.add_video(Video('test_files/test_blend_difference_v0_0_16.mp4'))
|
617
|
-
# timeline.add_video(Video('test_files/test_blend_multiply_v0_0_16.mp4'))
|
618
|
-
# timeline.add_video(Video('test_files/test_blend_overlay_v0_0_16.mp4'))
|
619
|
-
# timeline.add_video(Video('test_files/test_blend_screen_v0_0_16.mp4'))
|
620
|
-
# timeline.add_video(Video('test_files/test_combine_skipping_empty_using_priority_v0_0_18.mp4'))
|
621
|
-
# timeline.add_video(Video('test_files/test_ok_v0_0_13.mp4'))
|
622
|
-
|
623
|
-
# timeline.render('test_files/concatenated.mp4')
|
624
|
-
|
625
|
-
from yta_video_opengl.media import ImageMedia, ColorMedia
|
626
|
-
|
627
|
-
image_media = ImageMedia('C:/Users/dania/Desktop/PROYECTOS/RECURSOS/mobile_alpha.png', 0, 1).save_as('test_files/test_image.mp4')
|
628
|
-
|
629
|
-
color_media = ColorMedia('random', 0, 1).save_as('test_files/test_color.mp4')
|
630
|
-
|
631
|
-
return
|
632
|
-
|
633
|
-
# TODO: This test will add videos that
|
634
|
-
# must be played at the same time
|
635
|
-
video = Video(VIDEO_PATH, 0.25, 0.75)
|
636
|
-
timeline = Timeline()
|
637
588
|
|
638
|
-
|
639
|
-
simpsons_60fps = 'C:/Users/dania/Downloads/Y_una_porra_los_simpsons_castellano_60fps.mp4'
|
589
|
+
#from yta_video_pyav.video import Video
|
640
590
|
|
641
|
-
#
|
642
|
-
timeline.add_video(Video(VIDEO_PATH, 0.25, 1.0), 0.75, track_index = 0)
|
643
|
-
timeline.add_video(Video(simpsons_60fps, 1.5, 2.0), 3.0, track_index = 0)
|
644
|
-
timeline.add_video(Video(VIDEO_PATH, 0.5, 1.0), 2.0, track_index = 0)
|
591
|
+
#video = Video(VIDEO_PATH)
|
645
592
|
|
646
|
-
#timeline.tracks[0].mute()
|
647
|
-
|
648
|
-
# Track 2
|
649
|
-
timeline.add_video(Video(VIDEO_PATH, 0.5, 1.0), 2.7, track_index = 1)
|
650
|
-
timeline.add_video(Video(simpsons_60fps, 5.8, 7.8), 0.6, track_index = 1)
|
651
|
-
# 30fps
|
652
|
-
# timeline.add_video(Video('C:/Users/dania/Downloads/Y2meta.app-TOP 12 SIMPLE LIQUID TRANSITION _ GREEN SCREEN TRANSITION PACK-(1080p60).mp4', 0.25, 1.5), 0.25, do_use_second_track = True)
|
653
|
-
# 29.97fps
|
654
|
-
# timeline.add_video(Video('C:/Users/dania/Downloads/Y_una_porra_los_simpsons_castellano.mp4', 5.8, 6.8), 3.6, do_use_second_track = True)
|
655
|
-
|
656
|
-
timeline.render(OUTPUT_PATH)
|
657
|
-
|
658
|
-
return
|
659
|
-
|
660
|
-
Video(VIDEO_PATH, 0.25, 0.75).save_as(OUTPUT_PATH)
|
661
|
-
|
662
|
-
return
|
663
|
-
|
664
|
-
video = VideoReader(VIDEO_PATH)
|
665
|
-
video_writer = (
|
666
|
-
VideoWriter(OUTPUT_PATH)
|
667
|
-
#.set_video_stream(VIDEO_CODEC_NAME, video.fps, video.size, PIXEL_FORMAT)
|
668
|
-
.set_video_stream_from_template(video.video_stream)
|
669
|
-
.set_audio_stream_from_template(video.audio_stream)
|
670
|
-
)
|
671
|
-
|
672
593
|
#effect = WavingFrame(size = video.size)
|
673
594
|
#effect = BreathingFrame(size = video.size)
|
674
595
|
#effect = HandheldFrame(size = video.size)
|
@@ -680,73 +601,29 @@ def video_modified_stored():
|
|
680
601
|
# size = video.size,
|
681
602
|
# first_frame = video.next_frame
|
682
603
|
# )
|
683
|
-
|
684
|
-
|
685
|
-
|
686
|
-
|
687
|
-
|
604
|
+
|
605
|
+
# effect = GlitchRgbFrame(
|
606
|
+
# size = video.size,
|
607
|
+
# first_frame = video.next_frame
|
608
|
+
# )
|
609
|
+
from yta_video_opengl.editor import OpenglEditor
|
610
|
+
|
611
|
+
editor = OpenglEditor()
|
612
|
+
# waving_node_effect = editor.effects.video.waving_node(video.size, amplitude = 0.2, frequency = 9, speed = 3)
|
613
|
+
# chorus_effect = editor.effects.audio.chorus(audio.sample_rate)
|
614
|
+
# print(waving_node_effect)
|
688
615
|
|
689
616
|
# New way, with nodes
|
690
|
-
|
617
|
+
# context = moderngl.create_context(standalone = True)
|
618
|
+
# node = WavingNode(context, video.size, amplitude = 0.2, frequency = 9, speed = 3)
|
619
|
+
# print(node.process(video.get_frame_from_t(0)))
|
691
620
|
# We need to reset it to being again pointing
|
692
621
|
# to the first frame...
|
693
622
|
# TODO: Improve this by, maybe, storing the first
|
694
623
|
# frame in memory so we can append it later, or
|
695
624
|
# using the '.seek(0)' even when it could be not
|
696
625
|
# accurate
|
697
|
-
video.reset()
|
698
|
-
|
699
|
-
frame_index = 0
|
700
|
-
for frame_or_packet in video.iterate_with_audio(
|
701
|
-
do_decode_video = True,
|
702
|
-
do_decode_audio = False
|
703
|
-
):
|
704
|
-
# This below is because of the parameters we
|
705
|
-
# passed to the method
|
706
|
-
is_video_frame = PythonValidator.is_instance_of(frame_or_packet, 'VideoReaderFrame')
|
707
|
-
is_audio_packet = PythonValidator.is_instance_of(frame_or_packet, 'VideoReaderPacket')
|
708
|
-
|
709
|
-
# To simplify the process
|
710
|
-
if frame_or_packet is not None:
|
711
|
-
frame_or_packet = frame_or_packet.value
|
712
|
-
|
713
|
-
if is_audio_packet:
|
714
|
-
video_writer.mux(frame_or_packet)
|
715
|
-
elif is_video_frame:
|
716
|
-
with Timer(is_silent_as_context = True) as timer:
|
717
|
-
t = T.video_frame_index_to_video_frame_time(frame_index, float(video.fps))
|
718
|
-
# This is another way of getting 't'
|
719
|
-
#t = float(frame_or_packet.pts * video.time_base)
|
720
|
-
|
721
|
-
video_writer.mux_video_frame(
|
722
|
-
frame = texture_to_frame(
|
723
|
-
texture = node.process(
|
724
|
-
input = frame_or_packet,
|
725
|
-
t = t
|
726
|
-
)
|
727
|
-
)
|
728
|
-
)
|
729
|
-
|
730
|
-
# video_writer.mux_video_frame(
|
731
|
-
# effect.process_frame(
|
732
|
-
# frame = frame_or_packet,
|
733
|
-
# t = t,
|
734
|
-
# numpy_format = NUMPY_FORMAT
|
735
|
-
# )
|
736
|
-
# )
|
737
|
-
|
738
|
-
frame_index += 1
|
739
|
-
|
740
|
-
print(f'Frame {str(frame_index)}: {timer.time_elapsed_str}s')
|
741
|
-
|
742
|
-
video_writer.mux_video_frame(None)
|
743
|
-
|
744
|
-
# TODO: Maybe move this to the '__del__' (?)
|
745
|
-
video_writer.output.close()
|
746
|
-
video.container.close()
|
747
|
-
print(f'Saved as "{OUTPUT_PATH}".')
|
748
|
-
|
749
|
-
return
|
626
|
+
#video.reset()
|
750
627
|
|
751
628
|
# # TODO: By now this is applying an effect
|
752
629
|
# # by default
|
@@ -757,6 +634,7 @@ def video_modified_stored():
|
|
757
634
|
|
758
635
|
# return
|
759
636
|
|
637
|
+
return
|
760
638
|
|
761
639
|
AMP = 0.05
|
762
640
|
FREQ = 10.0
|