yta-video-opengl 0.0.2__py3-none-any.whl → 0.0.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -2,9 +2,8 @@
2
2
  Module to include video handling with OpenGL.
3
3
  """
4
4
  def main():
5
- from yta_video_opengl.tests import video_modified_displayed_on_window, video_modified_stored
5
+ from yta_video_opengl.tests import video_modified_stored
6
6
 
7
- #video_modified_displayed_on_window()
8
7
  video_modified_stored()
9
8
 
10
9
 
@@ -2,10 +2,106 @@
2
2
  A video reader using the PyAv (av) library
3
3
  that, using ffmpeg, detects the video.
4
4
  """
5
+ from yta_validation import PythonValidator
6
+ from av.video.frame import VideoFrame
7
+ from av.audio.frame import AudioFrame
8
+ from av.packet import Packet
9
+ from av.video.stream import VideoStream
10
+ from av.audio.stream import AudioStream
11
+ from av.container.input import InputContainer
5
12
  from fractions import Fraction
13
+ from av import open as av_open
14
+ from typing import Union
6
15
 
7
- import av
8
16
 
17
+ class VideoReaderFrame:
18
+ """
19
+ Class to wrap a frame of a video that is
20
+ being read, that can be a video or audio
21
+ frame.
22
+ """
23
+
24
+ @property
25
+ def is_packet(
26
+ self
27
+ ) -> bool:
28
+ """
29
+ Flag to indicate if the frame is actually
30
+ a packet of frames.
31
+ """
32
+ return PythonValidator.is_instance_of(self.frame, Packet)
33
+
34
+ @property
35
+ def is_video_frame(
36
+ self
37
+ ):
38
+ """
39
+ Flag to indicate if the instance is a video
40
+ frame.
41
+ """
42
+ return PythonValidator.is_instance_of(self.frame, VideoFrame)
43
+
44
+ @property
45
+ def is_audio_frame(
46
+ self
47
+ ):
48
+ """
49
+ Flag to indicate if the instance is an audio
50
+ frame.
51
+ """
52
+ return PythonValidator.is_instance_of(self.frame, AudioFrame)
53
+
54
+ @property
55
+ def is_video_packet(
56
+ self
57
+ ) -> bool:
58
+ """
59
+ Flag to indicate if the instance is a packet
60
+ containing video frames.
61
+ """
62
+ return (
63
+ self.is_packet and
64
+ self.frame.stream.type == 'video'
65
+ )
66
+
67
+ @property
68
+ def is_audio_packet(
69
+ self
70
+ ) -> bool:
71
+ """
72
+ Flag to indicate if the instance is a packet
73
+ containing audio frames.
74
+ """
75
+ return (
76
+ self.is_packet and
77
+ self.frame.stream.type == 'audio'
78
+ )
79
+
80
+ def __init__(
81
+ self,
82
+ # TODO: Add the type, please
83
+ frame: any
84
+ ):
85
+ self.frame: Union[AudioFrame, VideoFrame, Packet] = frame
86
+ """
87
+ The frame content, that can be audio or video
88
+ frame, or a packet of more than one frame.
89
+ """
90
+
91
+ def test():
92
+ # If packet we need to do this (if we want)
93
+ # to decode
94
+ #for frame in packet.decode():
95
+
96
+ # In the output we need to 'mux' packets,
97
+ # always
98
+ pass
99
+
100
+ # The '.decode()' generates a list of Packet
101
+ # The '.encode()' receives a list of Packet
102
+ # The '.demux()' extract packets from a single input
103
+ # The '.mux()' mix packets into a single output
104
+
9
105
 
10
106
  class VideoReader:
11
107
  """
@@ -16,22 +112,61 @@ class VideoReader:
16
112
  @property
17
113
  def frame_iterator(
18
114
  self
19
- ):
115
+ ) -> 'Iterator[VideoFrame]':
20
116
  """
21
117
  Iterator to iterate over all the video frames
22
118
  decodified.
23
119
  """
24
- return self._container.decode(self._video_stream)
120
+ return self.container.decode(self.video_stream)
25
121
 
26
122
  @property
27
123
  def next_frame(
28
124
  self
29
- ):
125
+ ) -> Union[VideoFrame, None]:
30
126
  """
31
127
  Get the next frame of the iterator.
32
128
  """
33
129
  return next(self.frame_iterator)
34
130
 
131
+ # TODO: Maybe rename (?)
132
+ @property
133
+ def frame_with_audio_iterator(
134
+ self
135
+ ) -> 'Iterator[Packet]':
136
+ """
137
+ Iterator to iterate over all the video frames
138
+ decodified, including also the audio.
139
+ """
140
+ return self.container.demux((self.video_stream, self.audio_stream))
141
+
142
+ @property
143
+ def next_frame_with_audio(
144
+ self
145
+ ) -> Union[Packet, None]:
146
+ """
147
+ Get the next frame of the iterator that includes
148
+ the audio.
149
+ """
150
+ return next(self.frame_with_audio_iterator)
151
+
152
+ @property
153
+ def codec_name(
154
+ self
155
+ ) -> str:
156
+ """
157
+ Get the name of the video codec.
158
+ """
159
+ return self.video_stream.codec_context.name
160
+
161
+ @property
162
+ def audio_codec_name(
163
+ self
164
+ ) -> str:
165
+ """
166
+ Get the name of the audio codec.
167
+ """
168
+ return self.audio_stream.codec_context.name
169
+
35
170
  @property
36
171
  def fps(
37
172
  self
@@ -40,7 +175,17 @@ class VideoReader:
40
175
  The fps of the video.
41
176
  """
42
177
  # They return it as a Fraction but...
43
- return self._video_stream.average_rate
178
+ return self.video_stream.average_rate
179
+
180
+ @property
181
+ def audio_fps(
182
+ self
183
+ ) -> Fraction:
184
+ """
185
+ The fps of the audio.
186
+ """
187
+ # TODO: What if no audio (?)
188
+ return self.audio_stream.average_rate
44
189
 
45
190
  @property
46
191
  def size(
@@ -50,8 +195,8 @@ class VideoReader:
50
195
  The size of the video in a (width, height) format.
51
196
  """
52
197
  return (
53
- self._video_stream.width,
54
- self._video_stream.height
198
+ self.video_stream.width,
199
+ self.video_stream.height
55
200
  )
56
201
 
57
202
  @property
@@ -83,22 +228,35 @@ class VideoReader:
83
228
  """
84
229
  The filename of the video source.
85
230
  """
86
- self._container: av.InputContainer = av.open(filename)
231
+ self.container: InputContainer = av_open(filename)
87
232
  """
88
233
  The av input general container of the
89
234
  video (that also includes the audio) we
90
235
  are reading.
91
236
  """
92
- self._video_stream = self._container.streams.video[0]
237
+ self.video_stream: VideoStream = self.container.streams.video[0]
93
238
  """
94
239
  The stream that includes the video.
95
240
  """
96
- self._video_stream.thread_type = "AUTO"
97
- self._audio_stream = self._container.streams.audio[0]
241
+ self.video_stream.thread_type = 'AUTO'
242
+ # TODO: What if no audio (?)
243
+ self.audio_stream: AudioStream = self.container.streams.audio[0]
98
244
  """
99
245
  The stream that includes the audio.
100
246
  """
101
- self._audio_stream.thread_type = "AUTO"
247
+ self.audio_stream.thread_type = 'AUTO'
248
+
249
+ def iterate(
250
+ self
251
+ ):
252
+ for frame in self.frame_iterator:
253
+ yield VideoReaderFrame(frame)
254
+
255
+ def iterate_with_audio(
256
+ self
257
+ ):
258
+ for frame_or_packet in self.frame_with_audio_iterator:
259
+ yield VideoReaderFrame(frame_or_packet)
102
260
 
103
261
 
104
262
 
yta_video_opengl/tests.py CHANGED
@@ -3,125 +3,15 @@ Manual tests that are working and are interesting
3
3
  to learn about the code, refactor and build
4
4
  classes.
5
5
  """
6
+ from yta_video_opengl.reader import VideoReader
7
+ from yta_video_opengl.writer import VideoWriter
8
+ from yta_timer import Timer
6
9
  from yta_video_frame_time import T
7
- from PIL import Image
8
10
 
9
11
  import av
10
- # This 'glfw' is only needed to show in a window
11
- import glfw
12
12
  import moderngl
13
13
  import numpy as np
14
- import time
15
14
 
16
- def video_modified_displayed_on_window():
17
- # -------- CONFIG --------
18
- VIDEO_PATH = "test_files/test_1.mp4" # Cambia por tu vídeo
19
- AMP = 0.05
20
- FREQ = 10.0
21
- SPEED = 2.0
22
- # ------------------------
23
-
24
- # Inicializar ventana GLFW
25
- if not glfw.init():
26
- raise RuntimeError("No se pudo inicializar GLFW")
27
-
28
- window = glfw.create_window(1280, 720, "Wave Shader Python", None, None)
29
- if not window:
30
- glfw.terminate()
31
- raise RuntimeError("No se pudo crear ventana GLFW")
32
-
33
- glfw.make_context_current(window)
34
- ctx = moderngl.create_context()
35
-
36
- # Shader GLSL
37
- prog = ctx.program(
38
- vertex_shader='''
39
- #version 330
40
- in vec2 in_pos;
41
- in vec2 in_uv;
42
- out vec2 v_uv;
43
- void main() {
44
- v_uv = in_uv;
45
- gl_Position = vec4(in_pos, 0.0, 1.0);
46
- }
47
- ''',
48
- fragment_shader='''
49
- #version 330
50
- uniform sampler2D tex;
51
- uniform float time;
52
- uniform float amp;
53
- uniform float freq;
54
- uniform float speed;
55
- in vec2 v_uv;
56
- out vec4 f_color;
57
- void main() {
58
- float wave = sin(v_uv.x * freq + time * speed) * amp;
59
- vec2 uv = vec2(v_uv.x, v_uv.y + wave);
60
- f_color = texture(tex, uv);
61
- }
62
- '''
63
- )
64
-
65
- # Cuadrado a pantalla completa
66
- vertices = np.array([
67
- -1, -1, 0.0, 0.0,
68
- 1, -1, 1.0, 0.0,
69
- -1, 1, 0.0, 1.0,
70
- 1, 1, 1.0, 1.0,
71
- ], dtype='f4')
72
-
73
- vbo = ctx.buffer(vertices.tobytes())
74
- vao = ctx.simple_vertex_array(prog, vbo, 'in_pos', 'in_uv')
75
-
76
- # Abrir vídeo con PyAV
77
- container = av.open(VIDEO_PATH)
78
- stream = container.streams.video[0]
79
- fps = stream.average_rate
80
- stream.thread_type = "AUTO"
81
-
82
- # Decodificar primer frame para crear textura
83
- first_frame = next(container.decode(stream))
84
- img = first_frame.to_image().transpose(Image.FLIP_TOP_BOTTOM).convert("RGB")
85
- tex = ctx.texture(img.size, 3, img.tobytes())
86
- tex.build_mipmaps()
87
-
88
- # Uniforms fijos
89
- prog['amp'].value = AMP
90
- prog['freq'].value = FREQ
91
- prog['speed'].value = SPEED
92
-
93
- # Render loop
94
- frame_index = 0
95
- start_time = time.time()
96
- frame_iter = container.decode(stream)
97
- for frame in frame_iter:
98
- if glfw.window_should_close(window):
99
- break
100
-
101
- # Time
102
- """
103
- When showing in the window the frames are very
104
- slow if using the T, thats why I'm using the
105
- time, but still not very fast... I think it
106
- depends on my GPU.
107
- """
108
- prog['time'].value = time.time() - start_time
109
- #prog['time'].value = T.video_frame_index_to_video_frame_time(frame_index, float(fps))
110
-
111
- # Convertir frame a textura
112
- img = frame.to_image().transpose(Image.FLIP_TOP_BOTTOM).convert("RGB")
113
- tex.write(img.tobytes())
114
-
115
- # Dibujar
116
- ctx.clear(0.1, 0.1, 0.1)
117
- tex.use()
118
- vao.render(moderngl.TRIANGLE_STRIP)
119
- glfw.swap_buffers(window)
120
- glfw.poll_events()
121
-
122
- frame_index += 1
123
-
124
- glfw.terminate()
125
15
 
126
16
  def video_modified_stored():
127
17
  VIDEO_PATH = "test_files/test_1.mp4"
@@ -130,12 +20,12 @@ def video_modified_stored():
130
20
  FREQ = 10.0
131
21
  SPEED = 2.0
132
22
 
133
- # Crear contexto ModernGL sin ventana
134
- ctx = moderngl.create_standalone_context()
23
+ # ModernGL context without window
24
+ context = moderngl.create_standalone_context()
135
25
 
136
- # Shader de onda
137
- prog = ctx.program(
138
- vertex_shader='''
26
+ # Wave shader vertex and fragment
27
+ program = context.program(
28
+ vertex_shader = '''
139
29
  #version 330
140
30
  in vec2 in_pos;
141
31
  in vec2 in_uv;
@@ -145,7 +35,7 @@ def video_modified_stored():
145
35
  gl_Position = vec4(in_pos, 0.0, 1.0);
146
36
  }
147
37
  ''',
148
- fragment_shader='''
38
+ fragment_shader = '''
149
39
  #version 330
150
40
  uniform sampler2D tex;
151
41
  uniform float time;
@@ -168,95 +58,210 @@ def video_modified_stored():
168
58
  1, -1, 1.0, 0.0,
169
59
  -1, 1, 0.0, 1.0,
170
60
  1, 1, 1.0, 1.0,
171
- ], dtype='f4')
172
- vbo = ctx.buffer(vertices.tobytes())
173
- vao = ctx.simple_vertex_array(prog, vbo, 'in_pos', 'in_uv')
61
+ ], dtype = 'f4')
62
+ vbo = context.buffer(vertices.tobytes())
63
+ vao = context.simple_vertex_array(program, vbo, 'in_pos', 'in_uv')
174
64
 
175
- from yta_video_opengl.reader import VideoReader
176
- from yta_video_opengl.writer import VideoWriter
177
65
  video = VideoReader(VIDEO_PATH)
178
66
 
179
- # Framebuffer para renderizar
180
- fbo = ctx.simple_framebuffer(video.size)
67
+ # TODO: This has to be dynamic, but
68
+ # according to what (?)
69
+ NUMPY_FORMAT = 'rgb24'
70
+ # TODO: Where do we obtain this from (?)
71
+ VIDEO_CODEC_NAME = 'libx264'
72
+ # TODO: Where do we obtain this from (?)
73
+ PIXEL_FORMAT = 'yuv420p'
74
+
75
+ # Framebuffer to render
76
+ fbo = context.simple_framebuffer(video.size)
181
77
  fbo.use()
182
78
 
183
- # Decodificar primer frame y crear textura
79
+ # Decode first frame and use as texture
184
80
  first_frame = video.next_frame
185
81
 
186
- # This below is with numpy
187
82
  # Most of OpenGL textures expect origin in lower
188
83
  # left corner
189
84
  # TODO: What if alpha (?)
190
- image = np.flipud(first_frame.to_ndarray(format = "rgb24"))
191
- tex = ctx.texture((image.shape[1], image.shape[0]), 3, image.tobytes())
192
-
193
- # # This below is with Pillow
194
- # # TODO: Why not to ndarray? It's faster
195
- # img = first_frame.to_image().transpose(Image.FLIP_TOP_BOTTOM).convert("RGB")
196
- # tex = ctx.texture(img.size, 3, img.tobytes())
85
+ image = np.flipud(first_frame.to_ndarray(format = NUMPY_FORMAT))
86
+ texture = context.texture((image.shape[1], image.shape[0]), 3, image.tobytes())
197
87
 
198
- tex.build_mipmaps()
88
+ texture.build_mipmaps()
199
89
 
200
90
  # Uniforms
201
- prog['amp'].value = AMP
202
- prog['freq'].value = FREQ
203
- prog['speed'].value = SPEED
204
-
205
- # Abrir salida con PyAV (codificador H.264)
206
- video_writer = VideoWriter(OUTPUT_PATH, video.fps, video.size, 'yuv420p')
207
- output_stream = video_writer._stream
91
+ program['amp'].value = AMP
92
+ program['freq'].value = FREQ
93
+ program['speed'].value = SPEED
94
+
95
+ # Writer with H.264 codec
96
+ video_writer = (
97
+ VideoWriter(OUTPUT_PATH)
98
+ .set_video_stream(VIDEO_CODEC_NAME, video.fps, video.size, PIXEL_FORMAT)
99
+ .set_audio_stream_from_template(video.audio_stream)
100
+ )
208
101
 
209
102
  frame_index = 0
103
+ # for frame in video.frame_iterator:
104
+ # with Timer(is_silent_as_context = True) as timer:
105
+ # program['time'].value = T.video_frame_index_to_video_frame_time(frame_index, float(video.fps))
106
+
107
+ # # To numpy array and flip to OpenGL coordinates
108
+ # image_array = np.flipud(
109
+ # frame.to_ndarray(format = 'rgb24')
110
+ # )
111
+
112
+ # # Create texture
113
+ # texture = context.texture((image_array.shape[1], image_array.shape[0]), 3, image_array.tobytes())
114
+ # # Add frame to texture
115
+ # image = frame.to_image().transpose(Image.FLIP_TOP_BOTTOM).convert('RGB')
116
+ # texture.write(image.tobytes())
117
+
118
+ # # Render to framebuffer with shader
119
+ # fbo.clear(0.0, 0.0, 0.0)
120
+ # texture.use()
121
+ # vao.render(moderngl.TRIANGLE_STRIP)
122
+
123
+ # # Read pixels from framebuffer
124
+ # data = fbo.read(components = 3, alignment = 1)
125
+
126
+ # # To numpy array and flip from OpenGL coordinates
127
+ # image_output = np.flipud(
128
+ # np.frombuffer(data, dtype = np.uint8).reshape((video.height, video.width, 3))
129
+ # )
130
+ # # Turn into a frame
131
+ # video_frame = av.VideoFrame.from_ndarray(image_output, format = 'rgb24')
132
+
133
+ # # Write
134
+ # packet = video_writer.stream.encode(video_frame)
135
+ # if packet:
136
+ # video_writer.output.mux(packet)
137
+
138
+ # print(f'Frame {str(frame_index)}: {timer.time_elapsed_str}s')
139
+
140
+ # frame_index += 1
141
+
142
+ # # Empty codification buffers
143
+ # packet = video_writer.stream.encode(None)
144
+ # if packet:
145
+ # video_writer.output.mux(packet)
146
+
147
+ # # This below is the main workflow with frames
148
+ # # and packets when processing a video
149
+ # do_process_video: bool = False
150
+ # do_process_audio: bool = False
151
+ # for frame_or_packet in video.iterate_with_audio():
152
+ # if frame_or_packet.is_video_frame:
153
+ # if do_process_video:
154
+ # # If we are processing it, we need to decode
155
+ # frame_decoded = frame_or_packet.decode()
156
+ # else:
157
+ # # if we are not processing it, just copy
158
+ # # video_writer.output.mux(frame_or_packet)
159
+ # pass
160
+ # elif frame_or_packet.is_audio_frame:
161
+ # if do_process_audio:
162
+ # # If we are processing it, we need to decode
163
+ # frame_decoded = frame_or_packet.decode()
164
+ # else:
165
+ # # if we are not processing it, just copy
166
+ # # video_writer.output.mux(frame_or_packet)
167
+ # pass
168
+ # elif frame_or_packet.is_video_packet:
169
+ # if do_process_video:
170
+ # # If we are processing it, we need to decode
171
+ # # the packet, that will give us an array of
172
+ # # frames
173
+ # for frame_decoded in frame_or_packet.decode():
174
+ # pass
175
+ # else:
176
+ # # If we are not processing it, just copy
177
+ # # video_writer.output.mux(packet)
178
+ # pass
179
+ # elif frame_or_packet.is_audio_packet:
180
+ # if do_process_audio:
181
+ # # If we are processing it, we need to decode
182
+ # # the packet, that will give us an array of
183
+ # # frames
184
+ # for frame_decoded in frame_or_packet.decode():
185
+ # pass
186
+ # else:
187
+ # # If we are not processing it, just copy
188
+ # # video_writer.output.mux(packet)
189
+ # pass
190
+
210
191
 
211
- for frame in video.frame_iterator:
212
- prog['time'].value = T.video_frame_index_to_video_frame_time(frame_index, float(video.fps))
213
-
214
- # This below is with numpy
215
- # To numpy array and flip to OpenGL coordinates
216
- img_array = np.flipud(
217
- frame.to_ndarray(format = "rgb24")
218
- )
219
-
220
- # Subir a textura
221
- tex = ctx.texture((img_array.shape[1], img_array.shape[0]), 3, img_array.tobytes())
222
-
223
- # This bellow is with Pillow
224
- # Subir frame a textura
225
- img = frame.to_image().transpose(Image.FLIP_TOP_BOTTOM).convert("RGB")
226
-
227
- tex.write(img.tobytes())
228
-
229
- # Renderizar con shader al framebuffer
230
- fbo.clear(0.0, 0.0, 0.0)
231
- tex.use()
232
- vao.render(moderngl.TRIANGLE_STRIP)
233
-
234
- # Leer píxeles del framebuffer
235
- data = fbo.read(components = 3, alignment = 1)
236
-
237
- # To numpy array and flip to OpenGL coordinates
238
- img_out = np.flipud(
239
- np.frombuffer(data, dtype = np.uint8).reshape((video.height, video.width, 3))
240
- )
241
- # Turn into a frame
242
- video_frame = av.VideoFrame.from_ndarray(img_out)
243
-
244
- # # This below is with Pillow
245
- # img_out = Image.frombytes("RGB", video.size, data).transpose(Image.FLIP_TOP_BOTTOM)
246
- # # Trn into a frame
247
- # video_frame = av.VideoFrame.from_image(img_out)
248
-
249
- # Write
250
- packet = output_stream.encode(video_frame)
251
- if packet:
252
- video_writer._output.mux(packet)
253
-
254
- frame_index += 1
255
-
256
- # Vaciar buffers de codificación
257
- packet = output_stream.encode(None)
258
- if packet:
259
- video_writer._output.mux(packet)
260
-
261
- video_writer._output.close()
262
- print(f"Vídeo guardado en {OUTPUT_PATH}")
192
+ for packet in video.frame_with_audio_iterator:
193
+ # The frames don't come in a specific order.
194
+ # In this specific case we receive 1 or 2
195
+ # video frames per each audio frame, but
196
+ # they come always ordered.
197
+ if packet.stream.type == 'video':
198
+ for frame in packet.decode():
199
+ with Timer(is_silent_as_context = True) as timer:
200
+ # Add some variables if we need, for the
201
+ # opengl change we are applying (check the
202
+ # program code)
203
+ program['time'].value = T.video_frame_index_to_video_frame_time(frame_index, float(video.fps))
204
+
205
+
206
+ # To numpy RGB inverted for OpenGL
207
+ img_array = np.flipud(
208
+ frame.to_ndarray(format = NUMPY_FORMAT)
209
+ )
210
+
211
+ # Create texture
212
+ texture = context.texture((img_array.shape[1], img_array.shape[0]), 3, img_array.tobytes())
213
+ texture.use()
214
+
215
+ # Render with shader to frame buffer
216
+ fbo.use()
217
+ vao.render(moderngl.TRIANGLE_STRIP)
218
+
219
+ # Processed GPU result to numpy
220
+ processed_data = np.frombuffer(
221
+ fbo.read(components = 3, alignment = 1), dtype = np.uint8
222
+ )
223
+ # Invert numpy to normal frame
224
+ processed_data = np.flipud(
225
+ processed_data.reshape((img_array.shape[0], img_array.shape[1], 3))
226
+ )
227
+
228
+ # To VideoFrame and to buffer
229
+ out_frame = av.VideoFrame.from_ndarray(processed_data, format = NUMPY_FORMAT)
230
+ # TODO: What is this for (?)
231
+ #out_frame.pict_type = 'NONE'
232
+
233
+ for v_packet in video_writer.video_stream.encode(out_frame):
234
+ # Check that the packet received is not the
235
+ # one that indicates the end (that must not
236
+ # be passed to the mux)
237
+ if v_packet.size > 0:
238
+ video_writer.output.mux(v_packet)
239
+
240
+ frame_index += 1
241
+ print(f'Frame {str(frame_index)}: {timer.time_elapsed_str}s')
242
+
243
+ elif packet.stream.type == "audio":
244
+ print('-- AUDIO --')
245
+ # Check that the packet received is not the
246
+ # one that indicates the end (that must not
247
+ # be passed to the mux)
248
+ if packet.size > 0:
249
+ # Copy audio as it is
250
+ video_writer.output.mux(packet)
251
+
252
+ # Empty buffers
253
+ # This '.encode()' with no params will tell the
254
+ # encoder that you will not send more packets,
255
+ # so it can process the remaining ones, that
256
+ # are the ones obtained in the 'packet' var.
257
+ # While this code can be finished, the work in
258
+ # the muxer could be not finished and have some
259
+ # packets waiting to be written. Here we tell
260
+ # the muxer to process all those packets.
261
+ for packet in video_writer.video_stream.encode():
262
+ video_writer.output.mux(packet)
263
+
264
+ # TODO: Maybe move this to the '__del__' (?)
265
+ video_writer.output.close()
266
+ video.container.close()
267
+ print(f'Saved as "{OUTPUT_PATH}".')
@@ -1,5 +1,10 @@
1
1
  from av.stream import Stream
2
+ from av.video.stream import VideoStream
3
+ from av.audio.stream import AudioStream
4
+ from av.container.output import OutputContainer
2
5
  from av import open as av_open
6
+ from fractions import Fraction
7
+ from typing import Union
3
8
 
4
9
 
5
10
  class VideoWriter:
@@ -11,9 +16,6 @@ class VideoWriter:
11
16
  def __init__(
12
17
  self,
13
18
  filename: str,
14
- fps: float,
15
- size: tuple[int, int],
16
- pixel_format: str = 'yuv420p'
17
19
  ):
18
20
  self.filename: str = filename
19
21
  """
@@ -21,8 +23,85 @@ class VideoWriter:
21
23
  file.
22
24
  """
23
25
  # TODO: What about this 'libx264' (?)
24
- self._output = av_open(filename, mode = 'w')
25
- self._stream: Stream = self._output.add_stream("libx264", rate = fps)
26
- self._stream.width = size[0]
27
- self._stream.height = size[1]
28
- self._stream.pix_fmt = pixel_format
26
+ self.output: OutputContainer = av_open(filename, mode = 'w')
27
+ """
28
+ An OutputContainer to control the writing process.
29
+ """
30
+ self.video_stream: VideoStream = None
31
+ """
32
+ The video stream.
33
+ """
34
+ self.audio_stream: AudioStream = None
35
+ """
36
+ The audio stream.
37
+ """
38
+
39
+ def set_video_stream(
40
+ self,
41
+ codec_name: Union[str, None],
42
+ fps: Union[Fraction, int, float, None],
43
+ size: Union[tuple[int, int], None] = None,
44
+ pixel_format: Union[str, None] = None,
45
+ options: Union[dict[str, str], None] = None
46
+ ) -> 'VideoWriter':
47
+ """
48
+ Set the video stream, that will overwrite any other
49
+ previous video stream set.
50
+ """
51
+ self.video_stream: VideoStream = self.output.add_stream(
52
+ # TODO: Maybe 'libx264' as default 'codec_name' (?)
53
+ codec_name = codec_name,
54
+ rate = fps,
55
+ options = options
56
+ )
57
+
58
+ if size is not None:
59
+ self.video_stream.width = size[0]
60
+ self.video_stream.height = size[1]
61
+
62
+ if pixel_format is not None:
63
+ # TODO: Maybe 'yuv420p' as default 'pixel_format' (?)
64
+ self.video_stream.pix_fmt = pixel_format
65
+
66
+ return self
67
+
68
+ # TODO: Maybe 'add_video_stream_from_template' (?)
69
+
70
+ def set_audio_stream(
71
+ self,
72
+ codec_name: Union[str, None]
73
+ # TODO: Add more if needed
74
+ ) -> 'VideoWriter':
75
+ """
76
+ Set the audio stream, that will overwrite any other
77
+ previous audio stream set.
78
+ """
79
+ self.audio_stream: AudioStream = self.output.add_stream(
80
+ codec_name = codec_name
81
+ )
82
+
83
+ # TODO: Add more if needed
84
+
85
+ return self
86
+
87
+ def set_audio_stream_from_template(
88
+ self,
89
+ template: Stream
90
+ ) -> 'VideoWriter':
91
+ """
92
+ Set the audio stream, that will overwrite any other
93
+ previous audio stream set.
94
+
95
+ You can pass the audio stream as it was
96
+ obtained from the reader.
97
+ """
98
+ self.audio_stream: AudioStream = self.output.add_stream_from_template(
99
+ template
100
+ )
101
+
102
+ return self
103
+
104
+ """
105
+ # TODO: Check 'https://www.youtube.com/watch?v=OlNWCpFdVMA'
106
+ # for ffmpeg with mp3 access
107
+ """
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: yta-video-opengl
3
- Version: 0.0.2
3
+ Version: 0.0.3
4
4
  Summary: Youtube Autonomous Video OpenGL Module
5
5
  Author: danialcala94
6
6
  Author-email: danielalcalavalera@gmail.com
@@ -8,10 +8,9 @@ Requires-Python: ==3.9
8
8
  Classifier: Programming Language :: Python :: 3
9
9
  Classifier: Programming Language :: Python :: 3.9
10
10
  Requires-Dist: av (>=0.0.1,<19.0.0)
11
- Requires-Dist: glfw (>=0.0.1,<9.0.0)
12
11
  Requires-Dist: moderngl (>=0.0.1,<9.0.0)
13
12
  Requires-Dist: numpy (>=0.0.1,<9.0.0)
14
- Requires-Dist: pillow (>=0.0.1,<19.0.0)
13
+ Requires-Dist: yta_timer (>0.0.1,<1.0.0)
15
14
  Requires-Dist: yta_validation (>=0.0.1,<1.0.0)
16
15
  Requires-Dist: yta_video_frame_time (>=0.0.1,<1.0.0)
17
16
  Description-Content-Type: text/markdown
@@ -0,0 +1,8 @@
1
+ yta_video_opengl/__init__.py,sha256=ycAx_XYMVDfkuObSvtW6irQ0Wo-fgxEz3fjIRMe8PpY,205
2
+ yta_video_opengl/reader.py,sha256=ffq_mliXUjz8N7_ZOoX97P2wTDeMjW2rhGceqItCvtM,7394
3
+ yta_video_opengl/tests.py,sha256=MLLpIVtdAA1L7ZpaZE6AH5gUJpiPxmvg-pktUcubOmw,10015
4
+ yta_video_opengl/writer.py,sha256=dSlzUvyuQsZvghs8-7QASVvejBtm1ysv4V84quNB9Sw,3033
5
+ yta_video_opengl-0.0.3.dist-info/LICENSE,sha256=6kbiFSfobTZ7beWiKnHpN902HgBx-Jzgcme0SvKqhKY,1091
6
+ yta_video_opengl-0.0.3.dist-info/METADATA,sha256=Om_IJ8JtdlnUATEkxVPTfO8Fs1eiDqVszPHsEnqZfj4,670
7
+ yta_video_opengl-0.0.3.dist-info/WHEEL,sha256=XbeZDeTWKc1w7CSIyre5aMDU_-PohRwTQceYnisIYYY,88
8
+ yta_video_opengl-0.0.3.dist-info/RECORD,,
@@ -1,8 +0,0 @@
1
- yta_video_opengl/__init__.py,sha256=Xb1dwLlYPh_sttMNyKsuP81ie5RXhuQf6AM_XvsUuWE,284
2
- yta_video_opengl/reader.py,sha256=PcbQiQLrPDDrcRLw_UyuMBVmjFvbH7XF-V-LNF9fUAU,3276
3
- yta_video_opengl/tests.py,sha256=Jxg5ysCkVsxYOsJBsutsnnfB9F0mquMswQ9UWxHpsxQ,7741
4
- yta_video_opengl/writer.py,sha256=_GrJnqyejQXiSN7iAG_MN85tQllcidZa3iwR4PYNhNg,799
5
- yta_video_opengl-0.0.2.dist-info/LICENSE,sha256=6kbiFSfobTZ7beWiKnHpN902HgBx-Jzgcme0SvKqhKY,1091
6
- yta_video_opengl-0.0.2.dist-info/METADATA,sha256=R-a5c_sSfovpHSXyA0eVbM6q9dx2QVdh0i02nqYSsfU,706
7
- yta_video_opengl-0.0.2.dist-info/WHEEL,sha256=XbeZDeTWKc1w7CSIyre5aMDU_-PohRwTQceYnisIYYY,88
8
- yta_video_opengl-0.0.2.dist-info/RECORD,,