yta-video-opengl 0.0.1__py3-none-any.whl → 0.0.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -2,9 +2,8 @@
2
2
  Module to include video handling with OpenGL.
3
3
  """
4
4
  def main():
5
- from yta_video_opengl.tests import video_modified_displayed_on_window, video_modified_stored
5
+ from yta_video_opengl.tests import video_modified_stored
6
6
 
7
- #video_modified_displayed_on_window()
8
7
  video_modified_stored()
9
8
 
10
9
 
@@ -0,0 +1,295 @@
1
+ """
2
+ A video reader using the PyAv (av) library
3
+ that, using ffmpeg, detects the video.
4
+ """
5
+ from yta_validation import PythonValidator
6
+ from av.video.frame import VideoFrame
7
+ from av.audio.frame import AudioFrame
8
+ from av.packet import Packet
9
+ from av.video.stream import VideoStream
10
+ from av.audio.stream import AudioStream
11
+ from av.container.input import InputContainer
12
+ from fractions import Fraction
13
+ from av import open as av_open
14
+ from typing import Union
15
+
16
+
17
+ class VideoReaderFrame:
18
+ """
19
+ Class to wrap a frame of a video that is
20
+ being read, that can be a video or audio
21
+ frame.
22
+ """
23
+
24
+ @property
25
+ def is_packet(
26
+ self
27
+ ) -> bool:
28
+ """
29
+ Flag to indicate if the frame is actually
30
+ a packet of frames.
31
+ """
32
+ return PythonValidator.is_instance_of(self.frame, Packet)
33
+
34
+ @property
35
+ def is_video_frame(
36
+ self
37
+ ):
38
+ """
39
+ Flag to indicate if the instance is a video
40
+ frame.
41
+ """
42
+ return PythonValidator.is_instance_of(self.frame, VideoFrame)
43
+
44
+ @property
45
+ def is_audio_frame(
46
+ self
47
+ ):
48
+ """
49
+ Flag to indicate if the instance is an audio
50
+ frame.
51
+ """
52
+ return PythonValidator.is_instance_of(self.frame, AudioFrame)
53
+
54
+ @property
55
+ def is_video_packet(
56
+ self
57
+ ) -> bool:
58
+ """
59
+ Flag to indicate if the instance is a packet
60
+ containing video frames.
61
+ """
62
+ return (
63
+ self.is_packet and
64
+ self.frame.stream.type == 'video'
65
+ )
66
+
67
+ @property
68
+ def is_audio_packet(
69
+ self
70
+ ) -> bool:
71
+ """
72
+ Flag to indicate if the instance is a packet
73
+ containing audio frames.
74
+ """
75
+ return (
76
+ self.is_packet and
77
+ self.frame.stream.type == 'audio'
78
+ )
79
+
80
+ def __init__(
81
+ self,
82
+ # TODO: Add the type, please
83
+ frame: any
84
+ ):
85
+ self.frame: Union[AudioFrame, VideoFrame, Packet] = frame
86
+ """
87
+ The frame content, that can be audio or video
88
+ frame, or a packet of more than one frame.
89
+ """
90
+
91
+ def test():
92
+ # If packet we need to do this (if we want)
93
+ # to decode
94
+ #for frame in packet.decode():
95
+
96
+ # In the output we need to 'mux' packets,
97
+ # always
98
+ pass
99
+
100
+ # The '.decode()' generates a list of Packet
101
+ # The '.encode()' receives a list of Packet
102
+ # The '.demux()' extract packets from a single input
103
+ # The '.mux()' mix packets into a single output
104
+
105
+
106
+ class VideoReader:
107
+ """
108
+ Class to read video files with the PyAv (av)
109
+ library that uses ffmpeg on the background.
110
+ """
111
+
112
+ @property
113
+ def frame_iterator(
114
+ self
115
+ ) -> 'Iterator[VideoFrame]':
116
+ """
117
+ Iterator to iterate over all the video frames
118
+ decodified.
119
+ """
120
+ return self.container.decode(self.video_stream)
121
+
122
+ @property
123
+ def next_frame(
124
+ self
125
+ ) -> Union[VideoFrame, None]:
126
+ """
127
+ Get the next frame of the iterator.
128
+ """
129
+ return next(self.frame_iterator)
130
+
131
+ # TODO: Maybe rename (?)
132
+ @property
133
+ def frame_with_audio_iterator(
134
+ self
135
+ ) -> 'Iterator[Packet]':
136
+ """
137
+ Iterator to iterate over all the video frames
138
+ decodified, including also the audio.
139
+ """
140
+ return self.container.demux((self.video_stream, self.audio_stream))
141
+
142
+ @property
143
+ def next_frame_with_audio(
144
+ self
145
+ ) -> Union[Packet, None]:
146
+ """
147
+ Get the next frame of the iterator that includes
148
+ the audio.
149
+ """
150
+ return next(self.frame_with_audio_iterator)
151
+
152
+ @property
153
+ def codec_name(
154
+ self
155
+ ) -> str:
156
+ """
157
+ Get the name of the video codec.
158
+ """
159
+ return self.video_stream.codec_context.name
160
+
161
+ @property
162
+ def audio_codec_name(
163
+ self
164
+ ) -> str:
165
+ """
166
+ Get the name of the audio codec.
167
+ """
168
+ return self.audio_stream.codec_context.name
169
+
170
+ @property
171
+ def fps(
172
+ self
173
+ ) -> Fraction:
174
+ """
175
+ The fps of the video.
176
+ """
177
+ # They return it as a Fraction but...
178
+ return self.video_stream.average_rate
179
+
180
+ @property
181
+ def audio_fps(
182
+ self
183
+ ) -> Fraction:
184
+ """
185
+ The fps of the audio.
186
+ """
187
+ # TODO: What if no audio (?)
188
+ return self.audio_stream.average_rate
189
+
190
+ @property
191
+ def size(
192
+ self
193
+ ) -> tuple[int, int]:
194
+ """
195
+ The size of the video in a (width, height) format.
196
+ """
197
+ return (
198
+ self.video_stream.width,
199
+ self.video_stream.height
200
+ )
201
+
202
+ @property
203
+ def width(
204
+ self
205
+ ) -> int:
206
+ """
207
+ The width of the video, in pixels.
208
+ """
209
+ return self.size[0]
210
+
211
+ @property
212
+ def height(
213
+ self
214
+ ) -> int:
215
+ """
216
+ The height of the video, in pixels.
217
+ """
218
+ return self.size[1]
219
+
220
+ # Any property related to audio has to
221
+ # start with 'audio_property_name'
222
+
223
+ def __init__(
224
+ self,
225
+ filename: str
226
+ ):
227
+ self.filename: str = filename
228
+ """
229
+ The filename of the video source.
230
+ """
231
+ self.container: InputContainer = av_open(filename)
232
+ """
233
+ The av input general container of the
234
+ video (that also includes the audio) we
235
+ are reading.
236
+ """
237
+ self.video_stream: VideoStream = self.container.streams.video[0]
238
+ """
239
+ The stream that includes the video.
240
+ """
241
+ self.video_stream.thread_type = 'AUTO'
242
+ # TODO: What if no audio (?)
243
+ self.audio_stream: AudioStream = self.container.streams.audio[0]
244
+ """
245
+ The stream that includes the audio.
246
+ """
247
+ self.audio_stream.thread_type = 'AUTO'
248
+
249
+ def iterate(
250
+ self
251
+ ):
252
+ for frame in self.frame_iterator:
253
+ yield VideoReaderFrame(frame)
254
+
255
+ def iterate_with_audio(
256
+ self
257
+ ):
258
+ for frame_or_packet in self.frame_with_audio_iterator:
259
+ yield VideoReaderFrame(frame_or_packet)
260
+
261
+
262
+
263
+
264
+ """
265
+ Read this below if you can to combine videos
266
+ that have not been written yet to the disk
267
+ (maybe a composition in moviepy or I don't
268
+ know).
269
+
270
+ Usar un pipe (sin escribir archivo completo)
271
+ Puedes lanzar un proceso FFmpeg que envíe el vídeo a PyAV por stdin como flujo sin codificar (por ejemplo en rawvideo), así no tienes que escribir el archivo final.
272
+ Ejemplo:
273
+
274
+ PYTHON_CODE:
275
+ import subprocess
276
+ import av
277
+
278
+ # FFmpeg produce frames en crudo por stdout
279
+ ffmpeg_proc = subprocess.Popen(
280
+ [
281
+ "ffmpeg",
282
+ "-i", "-", # Lee de stdin
283
+ "-f", "rawvideo",
284
+ "-pix_fmt", "rgba",
285
+ "-"
286
+ ],
287
+ stdin=subprocess.PIPE,
288
+ stdout=subprocess.PIPE
289
+ )
290
+
291
+ # Aquí enviarías los datos combinados desde tu programa al ffmpeg_proc.stdin
292
+ # y podrías leer con PyAV o directamente procesar arrays de píxeles
293
+
294
+ Esto es lo más usado para pipeline de vídeo en tiempo real.
295
+ """
yta_video_opengl/tests.py CHANGED
@@ -3,114 +3,15 @@ Manual tests that are working and are interesting
3
3
  to learn about the code, refactor and build
4
4
  classes.
5
5
  """
6
+ from yta_video_opengl.reader import VideoReader
7
+ from yta_video_opengl.writer import VideoWriter
8
+ from yta_timer import Timer
9
+ from yta_video_frame_time import T
10
+
6
11
  import av
7
- import glfw
8
12
  import moderngl
9
13
  import numpy as np
10
- from PIL import Image
11
- import time
12
-
13
-
14
- def video_modified_displayed_on_window():
15
- # -------- CONFIG --------
16
- VIDEO_PATH = "test_files/test_1.mp4" # Cambia por tu vídeo
17
- AMP = 0.05
18
- FREQ = 10.0
19
- SPEED = 2.0
20
- # ------------------------
21
-
22
- # Inicializar ventana GLFW
23
- if not glfw.init():
24
- raise RuntimeError("No se pudo inicializar GLFW")
25
14
 
26
- window = glfw.create_window(1280, 720, "Wave Shader Python", None, None)
27
- if not window:
28
- glfw.terminate()
29
- raise RuntimeError("No se pudo crear ventana GLFW")
30
-
31
- glfw.make_context_current(window)
32
- ctx = moderngl.create_context()
33
-
34
- # Shader GLSL
35
- prog = ctx.program(
36
- vertex_shader='''
37
- #version 330
38
- in vec2 in_pos;
39
- in vec2 in_uv;
40
- out vec2 v_uv;
41
- void main() {
42
- v_uv = in_uv;
43
- gl_Position = vec4(in_pos, 0.0, 1.0);
44
- }
45
- ''',
46
- fragment_shader='''
47
- #version 330
48
- uniform sampler2D tex;
49
- uniform float time;
50
- uniform float amp;
51
- uniform float freq;
52
- uniform float speed;
53
- in vec2 v_uv;
54
- out vec4 f_color;
55
- void main() {
56
- float wave = sin(v_uv.x * freq + time * speed) * amp;
57
- vec2 uv = vec2(v_uv.x, v_uv.y + wave);
58
- f_color = texture(tex, uv);
59
- }
60
- '''
61
- )
62
-
63
- # Cuadrado a pantalla completa
64
- vertices = np.array([
65
- -1, -1, 0.0, 0.0,
66
- 1, -1, 1.0, 0.0,
67
- -1, 1, 0.0, 1.0,
68
- 1, 1, 1.0, 1.0,
69
- ], dtype='f4')
70
-
71
- vbo = ctx.buffer(vertices.tobytes())
72
- vao = ctx.simple_vertex_array(prog, vbo, 'in_pos', 'in_uv')
73
-
74
- # Abrir vídeo con PyAV
75
- container = av.open(VIDEO_PATH)
76
- stream = container.streams.video[0]
77
- stream.thread_type = "AUTO"
78
-
79
- # Decodificar primer frame para crear textura
80
- first_frame = next(container.decode(stream))
81
- img = first_frame.to_image().transpose(Image.FLIP_TOP_BOTTOM).convert("RGB")
82
- tex = ctx.texture(img.size, 3, img.tobytes())
83
- tex.build_mipmaps()
84
-
85
- # Uniforms fijos
86
- prog['amp'].value = AMP
87
- prog['freq'].value = FREQ
88
- prog['speed'].value = SPEED
89
-
90
- start_time = time.time()
91
-
92
- # Render loop
93
- frame_iter = container.decode(stream)
94
- for frame in frame_iter:
95
- if glfw.window_should_close(window):
96
- break
97
-
98
- # Tiempo
99
- t = time.time() - start_time
100
- prog['time'].value = t
101
-
102
- # Convertir frame a textura
103
- img = frame.to_image().transpose(Image.FLIP_TOP_BOTTOM).convert("RGB")
104
- tex.write(img.tobytes())
105
-
106
- # Dibujar
107
- ctx.clear(0.1, 0.1, 0.1)
108
- tex.use()
109
- vao.render(moderngl.TRIANGLE_STRIP)
110
- glfw.swap_buffers(window)
111
- glfw.poll_events()
112
-
113
- glfw.terminate()
114
15
 
115
16
  def video_modified_stored():
116
17
  VIDEO_PATH = "test_files/test_1.mp4"
@@ -119,12 +20,12 @@ def video_modified_stored():
119
20
  FREQ = 10.0
120
21
  SPEED = 2.0
121
22
 
122
- # Crear contexto ModernGL sin ventana
123
- ctx = moderngl.create_standalone_context()
23
+ # ModernGL context without window
24
+ context = moderngl.create_standalone_context()
124
25
 
125
- # Shader de onda
126
- prog = ctx.program(
127
- vertex_shader='''
26
+ # Wave shader vertex and fragment
27
+ program = context.program(
28
+ vertex_shader = '''
128
29
  #version 330
129
30
  in vec2 in_pos;
130
31
  in vec2 in_uv;
@@ -134,7 +35,7 @@ def video_modified_stored():
134
35
  gl_Position = vec4(in_pos, 0.0, 1.0);
135
36
  }
136
37
  ''',
137
- fragment_shader='''
38
+ fragment_shader = '''
138
39
  #version 330
139
40
  uniform sampler2D tex;
140
41
  uniform float time;
@@ -157,68 +58,210 @@ def video_modified_stored():
157
58
  1, -1, 1.0, 0.0,
158
59
  -1, 1, 0.0, 1.0,
159
60
  1, 1, 1.0, 1.0,
160
- ], dtype='f4')
161
- vbo = ctx.buffer(vertices.tobytes())
162
- vao = ctx.simple_vertex_array(prog, vbo, 'in_pos', 'in_uv')
163
-
164
- # Abrir vídeo de entrada
165
- container = av.open(VIDEO_PATH)
166
- stream = container.streams.video[0]
167
- fps = stream.average_rate
168
- width = stream.width
169
- height = stream.height
170
-
171
- # Framebuffer para renderizar
172
- fbo = ctx.simple_framebuffer((width, height))
61
+ ], dtype = 'f4')
62
+ vbo = context.buffer(vertices.tobytes())
63
+ vao = context.simple_vertex_array(program, vbo, 'in_pos', 'in_uv')
64
+
65
+ video = VideoReader(VIDEO_PATH)
66
+
67
+ # TODO: This has to be dynamic, but
68
+ # according to what (?)
69
+ NUMPY_FORMAT = 'rgb24'
70
+ # TODO: Where do we obtain this from (?)
71
+ VIDEO_CODEC_NAME = 'libx264'
72
+ # TODO: Where do we obtain this from (?)
73
+ PIXEL_FORMAT = 'yuv420p'
74
+
75
+ # Framebuffer to render
76
+ fbo = context.simple_framebuffer(video.size)
173
77
  fbo.use()
174
78
 
175
- # Decodificar primer frame y crear textura
176
- first_frame = next(container.decode(stream))
177
- img = first_frame.to_image().transpose(Image.FLIP_TOP_BOTTOM).convert("RGB")
178
- tex = ctx.texture(img.size, 3, img.tobytes())
179
- tex.build_mipmaps()
79
+ # Decode first frame and use as texture
80
+ first_frame = video.next_frame
81
+
82
+ # Most of OpenGL textures expect origin in lower
83
+ # left corner
84
+ # TODO: What if alpha (?)
85
+ image = np.flipud(first_frame.to_ndarray(format = NUMPY_FORMAT))
86
+ texture = context.texture((image.shape[1], image.shape[0]), 3, image.tobytes())
87
+
88
+ texture.build_mipmaps()
180
89
 
181
90
  # Uniforms
182
- prog['amp'].value = AMP
183
- prog['freq'].value = FREQ
184
- prog['speed'].value = SPEED
185
-
186
- # Abrir salida con PyAV (codificador H.264)
187
- output = av.open(OUTPUT_PATH, mode='w')
188
- out_stream = output.add_stream("libx264", rate=fps)
189
- out_stream.width = width
190
- out_stream.height = height
191
- out_stream.pix_fmt = "yuv420p"
192
-
193
- start_time = time.time()
194
-
195
- for frame in container.decode(stream):
196
- t = time.time() - start_time
197
- prog['time'].value = t
198
-
199
- # Subir frame a textura
200
- img = frame.to_image().transpose(Image.FLIP_TOP_BOTTOM).convert("RGB")
201
- tex.write(img.tobytes())
202
-
203
- # Renderizar con shader al framebuffer
204
- fbo.clear(0.0, 0.0, 0.0)
205
- tex.use()
206
- vao.render(moderngl.TRIANGLE_STRIP)
207
-
208
- # Leer píxeles del framebuffer
209
- data = fbo.read(components=3, alignment=1)
210
- img_out = Image.frombytes("RGB", (width, height), data).transpose(Image.FLIP_TOP_BOTTOM)
211
-
212
- # Convertir a frame de PyAV y escribir
213
- video_frame = av.VideoFrame.from_image(img_out)
214
- packet = out_stream.encode(video_frame)
215
- if packet:
216
- output.mux(packet)
217
-
218
- # Vaciar buffers de codificación
219
- packet = out_stream.encode(None)
220
- if packet:
221
- output.mux(packet)
222
-
223
- output.close()
224
- print(f"Vídeo guardado en {OUTPUT_PATH}")
91
+ program['amp'].value = AMP
92
+ program['freq'].value = FREQ
93
+ program['speed'].value = SPEED
94
+
95
+ # Writer with H.264 codec
96
+ video_writer = (
97
+ VideoWriter(OUTPUT_PATH)
98
+ .set_video_stream(VIDEO_CODEC_NAME, video.fps, video.size, PIXEL_FORMAT)
99
+ .set_audio_stream_from_template(video.audio_stream)
100
+ )
101
+
102
+ frame_index = 0
103
+ # for frame in video.frame_iterator:
104
+ # with Timer(is_silent_as_context = True) as timer:
105
+ # program['time'].value = T.video_frame_index_to_video_frame_time(frame_index, float(video.fps))
106
+
107
+ # # To numpy array and flip to OpenGL coordinates
108
+ # image_array = np.flipud(
109
+ # frame.to_ndarray(format = 'rgb24')
110
+ # )
111
+
112
+ # # Create texture
113
+ # texture = context.texture((image_array.shape[1], image_array.shape[0]), 3, image_array.tobytes())
114
+ # # Add frame to texture
115
+ # image = frame.to_image().transpose(Image.FLIP_TOP_BOTTOM).convert('RGB')
116
+ # texture.write(image.tobytes())
117
+
118
+ # # Render to framebuffer with shader
119
+ # fbo.clear(0.0, 0.0, 0.0)
120
+ # texture.use()
121
+ # vao.render(moderngl.TRIANGLE_STRIP)
122
+
123
+ # # Read pixels from framebuffer
124
+ # data = fbo.read(components = 3, alignment = 1)
125
+
126
+ # # To numpy array and flip from OpenGL coordinates
127
+ # image_output = np.flipud(
128
+ # np.frombuffer(data, dtype = np.uint8).reshape((video.height, video.width, 3))
129
+ # )
130
+ # # Turn into a frame
131
+ # video_frame = av.VideoFrame.from_ndarray(image_output, format = 'rgb24')
132
+
133
+ # # Write
134
+ # packet = video_writer.stream.encode(video_frame)
135
+ # if packet:
136
+ # video_writer.output.mux(packet)
137
+
138
+ # print(f'Frame {str(frame_index)}: {timer.time_elapsed_str}s')
139
+
140
+ # frame_index += 1
141
+
142
+ # # Empty codification buffers
143
+ # packet = video_writer.stream.encode(None)
144
+ # if packet:
145
+ # video_writer.output.mux(packet)
146
+
147
+ # # This below is the main workflow with frames
148
+ # # and packets when processing a video
149
+ # do_process_video: bool = False
150
+ # do_process_audio: bool = False
151
+ # for frame_or_packet in video.iterate_with_audio():
152
+ # if frame_or_packet.is_video_frame:
153
+ # if do_process_video:
154
+ # # If we are processing it, we need to decode
155
+ # frame_decoded = frame_or_packet.decode()
156
+ # else:
157
+ # # if we are not processing it, just copy
158
+ # # video_writer.output.mux(frame_or_packet)
159
+ # pass
160
+ # elif frame_or_packet.is_audio_frame:
161
+ # if do_process_audio:
162
+ # # If we are processing it, we need to decode
163
+ # frame_decoded = frame_or_packet.decode()
164
+ # else:
165
+ # # if we are not processing it, just copy
166
+ # # video_writer.output.mux(frame_or_packet)
167
+ # pass
168
+ # elif frame_or_packet.is_video_packet:
169
+ # if do_process_video:
170
+ # # If we are processing it, we need to decode
171
+ # # the packet, that will give us an array of
172
+ # # frames
173
+ # for frame_decoded in frame_or_packet.decode():
174
+ # pass
175
+ # else:
176
+ # # If we are not processing it, just copy
177
+ # # video_writer.output.mux(packet)
178
+ # pass
179
+ # elif frame_or_packet.is_audio_packet:
180
+ # if do_process_audio:
181
+ # # If we are processing it, we need to decode
182
+ # # the packet, that will give us an array of
183
+ # # frames
184
+ # for frame_decoded in frame_or_packet.decode():
185
+ # pass
186
+ # else:
187
+ # # If we are not processing it, just copy
188
+ # # video_writer.output.mux(packet)
189
+ # pass
190
+
191
+
192
+ for packet in video.frame_with_audio_iterator:
193
+ # The frames don't come in a specific order.
194
+ # In this specific case we receive 1 or 2
195
+ # video frames per each audio frame, but
196
+ # they come always ordered.
197
+ if packet.stream.type == 'video':
198
+ for frame in packet.decode():
199
+ with Timer(is_silent_as_context = True) as timer:
200
+ # Add some variables if we need, for the
201
+ # opengl change we are applying (check the
202
+ # program code)
203
+ program['time'].value = T.video_frame_index_to_video_frame_time(frame_index, float(video.fps))
204
+
205
+
206
+ # To numpy RGB inverted for OpenGL
207
+ img_array = np.flipud(
208
+ frame.to_ndarray(format = NUMPY_FORMAT)
209
+ )
210
+
211
+ # Create texture
212
+ texture = context.texture((img_array.shape[1], img_array.shape[0]), 3, img_array.tobytes())
213
+ texture.use()
214
+
215
+ # Render with shader to frame buffer
216
+ fbo.use()
217
+ vao.render(moderngl.TRIANGLE_STRIP)
218
+
219
+ # Processed GPU result to numpy
220
+ processed_data = np.frombuffer(
221
+ fbo.read(components = 3, alignment = 1), dtype = np.uint8
222
+ )
223
+ # Invert numpy to normal frame
224
+ processed_data = np.flipud(
225
+ processed_data.reshape((img_array.shape[0], img_array.shape[1], 3))
226
+ )
227
+
228
+ # To VideoFrame and to buffer
229
+ out_frame = av.VideoFrame.from_ndarray(processed_data, format = NUMPY_FORMAT)
230
+ # TODO: What is this for (?)
231
+ #out_frame.pict_type = 'NONE'
232
+
233
+ for v_packet in video_writer.video_stream.encode(out_frame):
234
+ # Check that the packet received is not the
235
+ # one that indicates the end (that must not
236
+ # be passed to the mux)
237
+ if v_packet.size > 0:
238
+ video_writer.output.mux(v_packet)
239
+
240
+ frame_index += 1
241
+ print(f'Frame {str(frame_index)}: {timer.time_elapsed_str}s')
242
+
243
+ elif packet.stream.type == "audio":
244
+ print('-- AUDIO --')
245
+ # Check that the packet received is not the
246
+ # one that indicates the end (that must not
247
+ # be passed to the mux)
248
+ if packet.size > 0:
249
+ # Copy audio as it is
250
+ video_writer.output.mux(packet)
251
+
252
+ # Empty buffers
253
+ # This '.encode()' with no params will tell the
254
+ # encoder that you will not send more packets,
255
+ # so it can process the remaining ones, that
256
+ # are the ones obtained in the 'packet' var.
257
+ # While this code can be finished, the work in
258
+ # the muxer could be not finished and have some
259
+ # packets waiting to be written. Here we tell
260
+ # the muxer to process all those packets.
261
+ for packet in video_writer.video_stream.encode():
262
+ video_writer.output.mux(packet)
263
+
264
+ # TODO: Maybe move this to the '__del__' (?)
265
+ video_writer.output.close()
266
+ video.container.close()
267
+ print(f'Saved as "{OUTPUT_PATH}".')
@@ -0,0 +1,107 @@
1
+ from av.stream import Stream
2
+ from av.video.stream import VideoStream
3
+ from av.audio.stream import AudioStream
4
+ from av.container.output import OutputContainer
5
+ from av import open as av_open
6
+ from fractions import Fraction
7
+ from typing import Union
8
+
9
+
10
+ class VideoWriter:
11
+ """
12
+ Class to write video files with the PyAv (av)
13
+ library that uses ffmpeg on the background.
14
+ """
15
+
16
+ def __init__(
17
+ self,
18
+ filename: str,
19
+ ):
20
+ self.filename: str = filename
21
+ """
22
+ The filename we want to use to save the video
23
+ file.
24
+ """
25
+ # TODO: What about this 'libx264' (?)
26
+ self.output: OutputContainer = av_open(filename, mode = 'w')
27
+ """
28
+ An OutputContainer to control the writing process.
29
+ """
30
+ self.video_stream: VideoStream = None
31
+ """
32
+ The video stream.
33
+ """
34
+ self.audio_stream: AudioStream = None
35
+ """
36
+ The audio stream.
37
+ """
38
+
39
+ def set_video_stream(
40
+ self,
41
+ codec_name: Union[str, None],
42
+ fps: Union[Fraction, int, float, None],
43
+ size: Union[tuple[int, int], None] = None,
44
+ pixel_format: Union[str, None] = None,
45
+ options: Union[dict[str, str], None] = None
46
+ ) -> 'VideoWriter':
47
+ """
48
+ Set the video stream, that will overwrite any other
49
+ previous video stream set.
50
+ """
51
+ self.video_stream: VideoStream = self.output.add_stream(
52
+ # TODO: Maybe 'libx264' as default 'codec_name' (?)
53
+ codec_name = codec_name,
54
+ rate = fps,
55
+ options = options
56
+ )
57
+
58
+ if size is not None:
59
+ self.video_stream.width = size[0]
60
+ self.video_stream.height = size[1]
61
+
62
+ if pixel_format is not None:
63
+ # TODO: Maybe 'yuv420p' as default 'pixel_format' (?)
64
+ self.video_stream.pix_fmt = pixel_format
65
+
66
+ return self
67
+
68
+ # TODO: Maybe 'add_video_stream_from_template' (?)
69
+
70
+ def set_audio_stream(
71
+ self,
72
+ codec_name: Union[str, None]
73
+ # TODO: Add more if needed
74
+ ) -> 'VideoWriter':
75
+ """
76
+ Set the audio stream, that will overwrite any other
77
+ previous audio stream set.
78
+ """
79
+ self.audio_stream: AudioStream = self.output.add_stream(
80
+ codec_name = codec_name
81
+ )
82
+
83
+ # TODO: Add more if needed
84
+
85
+ return self
86
+
87
+ def set_audio_stream_from_template(
88
+ self,
89
+ template: Stream
90
+ ) -> 'VideoWriter':
91
+ """
92
+ Set the audio stream, that will overwrite any other
93
+ previous audio stream set.
94
+
95
+ You can pass the audio stream as it was
96
+ obtained from the reader.
97
+ """
98
+ self.audio_stream: AudioStream = self.output.add_stream_from_template(
99
+ template
100
+ )
101
+
102
+ return self
103
+
104
+ """
105
+ # TODO: Check 'https://www.youtube.com/watch?v=OlNWCpFdVMA'
106
+ # for ffmpeg with mp3 access
107
+ """
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: yta-video-opengl
3
- Version: 0.0.1
3
+ Version: 0.0.3
4
4
  Summary: Youtube Autonomous Video OpenGL Module
5
5
  Author: danialcala94
6
6
  Author-email: danielalcalavalera@gmail.com
@@ -8,11 +8,11 @@ Requires-Python: ==3.9
8
8
  Classifier: Programming Language :: Python :: 3
9
9
  Classifier: Programming Language :: Python :: 3.9
10
10
  Requires-Dist: av (>=0.0.1,<19.0.0)
11
- Requires-Dist: glfw (>=0.0.1,<9.0.0)
12
11
  Requires-Dist: moderngl (>=0.0.1,<9.0.0)
13
12
  Requires-Dist: numpy (>=0.0.1,<9.0.0)
14
- Requires-Dist: pillow (>=0.0.1,<19.0.0)
13
+ Requires-Dist: yta_timer (>0.0.1,<1.0.0)
15
14
  Requires-Dist: yta_validation (>=0.0.1,<1.0.0)
15
+ Requires-Dist: yta_video_frame_time (>=0.0.1,<1.0.0)
16
16
  Description-Content-Type: text/markdown
17
17
 
18
18
  # Youtube Autonomous Video OpenGL Module
@@ -0,0 +1,8 @@
1
+ yta_video_opengl/__init__.py,sha256=ycAx_XYMVDfkuObSvtW6irQ0Wo-fgxEz3fjIRMe8PpY,205
2
+ yta_video_opengl/reader.py,sha256=ffq_mliXUjz8N7_ZOoX97P2wTDeMjW2rhGceqItCvtM,7394
3
+ yta_video_opengl/tests.py,sha256=MLLpIVtdAA1L7ZpaZE6AH5gUJpiPxmvg-pktUcubOmw,10015
4
+ yta_video_opengl/writer.py,sha256=dSlzUvyuQsZvghs8-7QASVvejBtm1ysv4V84quNB9Sw,3033
5
+ yta_video_opengl-0.0.3.dist-info/LICENSE,sha256=6kbiFSfobTZ7beWiKnHpN902HgBx-Jzgcme0SvKqhKY,1091
6
+ yta_video_opengl-0.0.3.dist-info/METADATA,sha256=Om_IJ8JtdlnUATEkxVPTfO8Fs1eiDqVszPHsEnqZfj4,670
7
+ yta_video_opengl-0.0.3.dist-info/WHEEL,sha256=XbeZDeTWKc1w7CSIyre5aMDU_-PohRwTQceYnisIYYY,88
8
+ yta_video_opengl-0.0.3.dist-info/RECORD,,
@@ -1,6 +0,0 @@
1
- yta_video_opengl/__init__.py,sha256=Xb1dwLlYPh_sttMNyKsuP81ie5RXhuQf6AM_XvsUuWE,284
2
- yta_video_opengl/tests.py,sha256=INSGzF7EcYqtUdN4SN39KOmXsAME4Sg7eJFbMmfmXx4,6292
3
- yta_video_opengl-0.0.1.dist-info/LICENSE,sha256=6kbiFSfobTZ7beWiKnHpN902HgBx-Jzgcme0SvKqhKY,1091
4
- yta_video_opengl-0.0.1.dist-info/METADATA,sha256=W4DX2yB9bfprIBGH-wgd2B0CM8N6dZlupkkkkKtIirY,653
5
- yta_video_opengl-0.0.1.dist-info/WHEEL,sha256=XbeZDeTWKc1w7CSIyre5aMDU_-PohRwTQceYnisIYYY,88
6
- yta_video_opengl-0.0.1.dist-info/RECORD,,