yta-video-opengl 0.0.1__tar.gz → 0.0.2__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {yta_video_opengl-0.0.1 → yta_video_opengl-0.0.2}/PKG-INFO +2 -1
- {yta_video_opengl-0.0.1 → yta_video_opengl-0.0.2}/pyproject.toml +2 -1
- yta_video_opengl-0.0.2/src/yta_video_opengl/reader.py +137 -0
- {yta_video_opengl-0.0.1 → yta_video_opengl-0.0.2}/src/yta_video_opengl/tests.py +74 -36
- yta_video_opengl-0.0.2/src/yta_video_opengl/writer.py +28 -0
- {yta_video_opengl-0.0.1 → yta_video_opengl-0.0.2}/LICENSE +0 -0
- {yta_video_opengl-0.0.1 → yta_video_opengl-0.0.2}/README.md +0 -0
- {yta_video_opengl-0.0.1 → yta_video_opengl-0.0.2}/src/yta_video_opengl/__init__.py +0 -0
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.3
|
2
2
|
Name: yta-video-opengl
|
3
|
-
Version: 0.0.
|
3
|
+
Version: 0.0.2
|
4
4
|
Summary: Youtube Autonomous Video OpenGL Module
|
5
5
|
Author: danialcala94
|
6
6
|
Author-email: danielalcalavalera@gmail.com
|
@@ -13,6 +13,7 @@ Requires-Dist: moderngl (>=0.0.1,<9.0.0)
|
|
13
13
|
Requires-Dist: numpy (>=0.0.1,<9.0.0)
|
14
14
|
Requires-Dist: pillow (>=0.0.1,<19.0.0)
|
15
15
|
Requires-Dist: yta_validation (>=0.0.1,<1.0.0)
|
16
|
+
Requires-Dist: yta_video_frame_time (>=0.0.1,<1.0.0)
|
16
17
|
Description-Content-Type: text/markdown
|
17
18
|
|
18
19
|
# Youtube Autonomous Video OpenGL Module
|
@@ -1,6 +1,6 @@
|
|
1
1
|
[project]
|
2
2
|
name = "yta-video-opengl"
|
3
|
-
version = "0.0.
|
3
|
+
version = "0.0.2"
|
4
4
|
description = "Youtube Autonomous Video OpenGL Module"
|
5
5
|
authors = [
|
6
6
|
{name = "danialcala94",email = "danielalcalavalera@gmail.com"}
|
@@ -9,6 +9,7 @@ readme = "README.md"
|
|
9
9
|
requires-python = "==3.9"
|
10
10
|
dependencies = [
|
11
11
|
"yta_validation (>=0.0.1,<1.0.0)",
|
12
|
+
"yta_video_frame_time (>=0.0.1,<1.0.0)",
|
12
13
|
"av (>=0.0.1,<19.0.0)",
|
13
14
|
"moderngl (>=0.0.1,<9.0.0)",
|
14
15
|
"glfw (>=0.0.1,<9.0.0)",
|
@@ -0,0 +1,137 @@
|
|
1
|
+
"""
|
2
|
+
A video reader using the PyAv (av) library
|
3
|
+
that, using ffmpeg, detects the video.
|
4
|
+
"""
|
5
|
+
from fractions import Fraction
|
6
|
+
|
7
|
+
import av
|
8
|
+
|
9
|
+
|
10
|
+
class VideoReader:
|
11
|
+
"""
|
12
|
+
Class to read video files with the PyAv (av)
|
13
|
+
library that uses ffmpeg on the background.
|
14
|
+
"""
|
15
|
+
|
16
|
+
@property
|
17
|
+
def frame_iterator(
|
18
|
+
self
|
19
|
+
):
|
20
|
+
"""
|
21
|
+
Iterator to iterate over all the video frames
|
22
|
+
decodified.
|
23
|
+
"""
|
24
|
+
return self._container.decode(self._video_stream)
|
25
|
+
|
26
|
+
@property
|
27
|
+
def next_frame(
|
28
|
+
self
|
29
|
+
):
|
30
|
+
"""
|
31
|
+
Get the next frame of the iterator.
|
32
|
+
"""
|
33
|
+
return next(self.frame_iterator)
|
34
|
+
|
35
|
+
@property
|
36
|
+
def fps(
|
37
|
+
self
|
38
|
+
) -> Fraction:
|
39
|
+
"""
|
40
|
+
The fps of the video.
|
41
|
+
"""
|
42
|
+
# They return it as a Fraction but...
|
43
|
+
return self._video_stream.average_rate
|
44
|
+
|
45
|
+
@property
|
46
|
+
def size(
|
47
|
+
self
|
48
|
+
) -> tuple[int, int]:
|
49
|
+
"""
|
50
|
+
The size of the video in a (width, height) format.
|
51
|
+
"""
|
52
|
+
return (
|
53
|
+
self._video_stream.width,
|
54
|
+
self._video_stream.height
|
55
|
+
)
|
56
|
+
|
57
|
+
@property
|
58
|
+
def width(
|
59
|
+
self
|
60
|
+
) -> int:
|
61
|
+
"""
|
62
|
+
The width of the video, in pixels.
|
63
|
+
"""
|
64
|
+
return self.size[0]
|
65
|
+
|
66
|
+
@property
|
67
|
+
def height(
|
68
|
+
self
|
69
|
+
) -> int:
|
70
|
+
"""
|
71
|
+
The height of the video, in pixels.
|
72
|
+
"""
|
73
|
+
return self.size[1]
|
74
|
+
|
75
|
+
# Any property related to audio has to
|
76
|
+
# start with 'audio_property_name'
|
77
|
+
|
78
|
+
def __init__(
|
79
|
+
self,
|
80
|
+
filename: str
|
81
|
+
):
|
82
|
+
self.filename: str = filename
|
83
|
+
"""
|
84
|
+
The filename of the video source.
|
85
|
+
"""
|
86
|
+
self._container: av.InputContainer = av.open(filename)
|
87
|
+
"""
|
88
|
+
The av input general container of the
|
89
|
+
video (that also includes the audio) we
|
90
|
+
are reading.
|
91
|
+
"""
|
92
|
+
self._video_stream = self._container.streams.video[0]
|
93
|
+
"""
|
94
|
+
The stream that includes the video.
|
95
|
+
"""
|
96
|
+
self._video_stream.thread_type = "AUTO"
|
97
|
+
self._audio_stream = self._container.streams.audio[0]
|
98
|
+
"""
|
99
|
+
The stream that includes the audio.
|
100
|
+
"""
|
101
|
+
self._audio_stream.thread_type = "AUTO"
|
102
|
+
|
103
|
+
|
104
|
+
|
105
|
+
|
106
|
+
"""
|
107
|
+
Read this below if you can to combine videos
|
108
|
+
that have not been written yet to the disk
|
109
|
+
(maybe a composition in moviepy or I don't
|
110
|
+
know).
|
111
|
+
|
112
|
+
Usar un pipe (sin escribir archivo completo)
|
113
|
+
Puedes lanzar un proceso FFmpeg que envíe el vídeo a PyAV por stdin como flujo sin codificar (por ejemplo en rawvideo), así no tienes que escribir el archivo final.
|
114
|
+
Ejemplo:
|
115
|
+
|
116
|
+
PYTHON_CODE:
|
117
|
+
import subprocess
|
118
|
+
import av
|
119
|
+
|
120
|
+
# FFmpeg produce frames en crudo por stdout
|
121
|
+
ffmpeg_proc = subprocess.Popen(
|
122
|
+
[
|
123
|
+
"ffmpeg",
|
124
|
+
"-i", "-", # Lee de stdin
|
125
|
+
"-f", "rawvideo",
|
126
|
+
"-pix_fmt", "rgba",
|
127
|
+
"-"
|
128
|
+
],
|
129
|
+
stdin=subprocess.PIPE,
|
130
|
+
stdout=subprocess.PIPE
|
131
|
+
)
|
132
|
+
|
133
|
+
# Aquí enviarías los datos combinados desde tu programa al ffmpeg_proc.stdin
|
134
|
+
# y podrías leer con PyAV o directamente procesar arrays de píxeles
|
135
|
+
|
136
|
+
Esto es lo más usado para pipeline de vídeo en tiempo real.
|
137
|
+
"""
|
@@ -3,14 +3,16 @@ Manual tests that are working and are interesting
|
|
3
3
|
to learn about the code, refactor and build
|
4
4
|
classes.
|
5
5
|
"""
|
6
|
+
from yta_video_frame_time import T
|
7
|
+
from PIL import Image
|
8
|
+
|
6
9
|
import av
|
10
|
+
# This 'glfw' is only needed to show in a window
|
7
11
|
import glfw
|
8
12
|
import moderngl
|
9
13
|
import numpy as np
|
10
|
-
from PIL import Image
|
11
14
|
import time
|
12
15
|
|
13
|
-
|
14
16
|
def video_modified_displayed_on_window():
|
15
17
|
# -------- CONFIG --------
|
16
18
|
VIDEO_PATH = "test_files/test_1.mp4" # Cambia por tu vídeo
|
@@ -74,6 +76,7 @@ def video_modified_displayed_on_window():
|
|
74
76
|
# Abrir vídeo con PyAV
|
75
77
|
container = av.open(VIDEO_PATH)
|
76
78
|
stream = container.streams.video[0]
|
79
|
+
fps = stream.average_rate
|
77
80
|
stream.thread_type = "AUTO"
|
78
81
|
|
79
82
|
# Decodificar primer frame para crear textura
|
@@ -87,17 +90,23 @@ def video_modified_displayed_on_window():
|
|
87
90
|
prog['freq'].value = FREQ
|
88
91
|
prog['speed'].value = SPEED
|
89
92
|
|
90
|
-
start_time = time.time()
|
91
|
-
|
92
93
|
# Render loop
|
94
|
+
frame_index = 0
|
95
|
+
start_time = time.time()
|
93
96
|
frame_iter = container.decode(stream)
|
94
97
|
for frame in frame_iter:
|
95
98
|
if glfw.window_should_close(window):
|
96
99
|
break
|
97
100
|
|
98
|
-
#
|
99
|
-
|
100
|
-
|
101
|
+
# Time
|
102
|
+
"""
|
103
|
+
When showing in the window the frames are very
|
104
|
+
slow if using the T, thats why I'm using the
|
105
|
+
time, but still not very fast... I think it
|
106
|
+
depends on my GPU.
|
107
|
+
"""
|
108
|
+
prog['time'].value = time.time() - start_time
|
109
|
+
#prog['time'].value = T.video_frame_index_to_video_frame_time(frame_index, float(fps))
|
101
110
|
|
102
111
|
# Convertir frame a textura
|
103
112
|
img = frame.to_image().transpose(Image.FLIP_TOP_BOTTOM).convert("RGB")
|
@@ -110,6 +119,8 @@ def video_modified_displayed_on_window():
|
|
110
119
|
glfw.swap_buffers(window)
|
111
120
|
glfw.poll_events()
|
112
121
|
|
122
|
+
frame_index += 1
|
123
|
+
|
113
124
|
glfw.terminate()
|
114
125
|
|
115
126
|
def video_modified_stored():
|
@@ -161,21 +172,29 @@ def video_modified_stored():
|
|
161
172
|
vbo = ctx.buffer(vertices.tobytes())
|
162
173
|
vao = ctx.simple_vertex_array(prog, vbo, 'in_pos', 'in_uv')
|
163
174
|
|
164
|
-
|
165
|
-
|
166
|
-
|
167
|
-
fps = stream.average_rate
|
168
|
-
width = stream.width
|
169
|
-
height = stream.height
|
175
|
+
from yta_video_opengl.reader import VideoReader
|
176
|
+
from yta_video_opengl.writer import VideoWriter
|
177
|
+
video = VideoReader(VIDEO_PATH)
|
170
178
|
|
171
179
|
# Framebuffer para renderizar
|
172
|
-
fbo = ctx.simple_framebuffer(
|
180
|
+
fbo = ctx.simple_framebuffer(video.size)
|
173
181
|
fbo.use()
|
174
182
|
|
175
183
|
# Decodificar primer frame y crear textura
|
176
|
-
first_frame =
|
177
|
-
|
178
|
-
|
184
|
+
first_frame = video.next_frame
|
185
|
+
|
186
|
+
# This below is with numpy
|
187
|
+
# Most of OpenGL textures expect origin in lower
|
188
|
+
# left corner
|
189
|
+
# TODO: What if alpha (?)
|
190
|
+
image = np.flipud(first_frame.to_ndarray(format = "rgb24"))
|
191
|
+
tex = ctx.texture((image.shape[1], image.shape[0]), 3, image.tobytes())
|
192
|
+
|
193
|
+
# # This below is with Pillow
|
194
|
+
# # TODO: Why not to ndarray? It's faster
|
195
|
+
# img = first_frame.to_image().transpose(Image.FLIP_TOP_BOTTOM).convert("RGB")
|
196
|
+
# tex = ctx.texture(img.size, 3, img.tobytes())
|
197
|
+
|
179
198
|
tex.build_mipmaps()
|
180
199
|
|
181
200
|
# Uniforms
|
@@ -184,20 +203,27 @@ def video_modified_stored():
|
|
184
203
|
prog['speed'].value = SPEED
|
185
204
|
|
186
205
|
# Abrir salida con PyAV (codificador H.264)
|
187
|
-
|
188
|
-
|
189
|
-
out_stream.width = width
|
190
|
-
out_stream.height = height
|
191
|
-
out_stream.pix_fmt = "yuv420p"
|
206
|
+
video_writer = VideoWriter(OUTPUT_PATH, video.fps, video.size, 'yuv420p')
|
207
|
+
output_stream = video_writer._stream
|
192
208
|
|
193
|
-
|
209
|
+
frame_index = 0
|
210
|
+
|
211
|
+
for frame in video.frame_iterator:
|
212
|
+
prog['time'].value = T.video_frame_index_to_video_frame_time(frame_index, float(video.fps))
|
213
|
+
|
214
|
+
# This below is with numpy
|
215
|
+
# To numpy array and flip to OpenGL coordinates
|
216
|
+
img_array = np.flipud(
|
217
|
+
frame.to_ndarray(format = "rgb24")
|
218
|
+
)
|
194
219
|
|
195
|
-
|
196
|
-
|
197
|
-
prog['time'].value = t
|
220
|
+
# Subir a textura
|
221
|
+
tex = ctx.texture((img_array.shape[1], img_array.shape[0]), 3, img_array.tobytes())
|
198
222
|
|
223
|
+
# This bellow is with Pillow
|
199
224
|
# Subir frame a textura
|
200
225
|
img = frame.to_image().transpose(Image.FLIP_TOP_BOTTOM).convert("RGB")
|
226
|
+
|
201
227
|
tex.write(img.tobytes())
|
202
228
|
|
203
229
|
# Renderizar con shader al framebuffer
|
@@ -206,19 +232,31 @@ def video_modified_stored():
|
|
206
232
|
vao.render(moderngl.TRIANGLE_STRIP)
|
207
233
|
|
208
234
|
# Leer píxeles del framebuffer
|
209
|
-
data = fbo.read(components=3, alignment=1)
|
210
|
-
|
211
|
-
|
212
|
-
|
213
|
-
|
214
|
-
|
235
|
+
data = fbo.read(components = 3, alignment = 1)
|
236
|
+
|
237
|
+
# To numpy array and flip to OpenGL coordinates
|
238
|
+
img_out = np.flipud(
|
239
|
+
np.frombuffer(data, dtype = np.uint8).reshape((video.height, video.width, 3))
|
240
|
+
)
|
241
|
+
# Turn into a frame
|
242
|
+
video_frame = av.VideoFrame.from_ndarray(img_out)
|
243
|
+
|
244
|
+
# # This below is with Pillow
|
245
|
+
# img_out = Image.frombytes("RGB", video.size, data).transpose(Image.FLIP_TOP_BOTTOM)
|
246
|
+
# # Trn into a frame
|
247
|
+
# video_frame = av.VideoFrame.from_image(img_out)
|
248
|
+
|
249
|
+
# Write
|
250
|
+
packet = output_stream.encode(video_frame)
|
215
251
|
if packet:
|
216
|
-
|
252
|
+
video_writer._output.mux(packet)
|
253
|
+
|
254
|
+
frame_index += 1
|
217
255
|
|
218
256
|
# Vaciar buffers de codificación
|
219
|
-
packet =
|
257
|
+
packet = output_stream.encode(None)
|
220
258
|
if packet:
|
221
|
-
|
259
|
+
video_writer._output.mux(packet)
|
222
260
|
|
223
|
-
|
261
|
+
video_writer._output.close()
|
224
262
|
print(f"Vídeo guardado en {OUTPUT_PATH}")
|
@@ -0,0 +1,28 @@
|
|
1
|
+
from av.stream import Stream
|
2
|
+
from av import open as av_open
|
3
|
+
|
4
|
+
|
5
|
+
class VideoWriter:
|
6
|
+
"""
|
7
|
+
Class to write video files with the PyAv (av)
|
8
|
+
library that uses ffmpeg on the background.
|
9
|
+
"""
|
10
|
+
|
11
|
+
def __init__(
|
12
|
+
self,
|
13
|
+
filename: str,
|
14
|
+
fps: float,
|
15
|
+
size: tuple[int, int],
|
16
|
+
pixel_format: str = 'yuv420p'
|
17
|
+
):
|
18
|
+
self.filename: str = filename
|
19
|
+
"""
|
20
|
+
The filename we want to use to save the video
|
21
|
+
file.
|
22
|
+
"""
|
23
|
+
# TODO: What about this 'libx264' (?)
|
24
|
+
self._output = av_open(filename, mode = 'w')
|
25
|
+
self._stream: Stream = self._output.add_stream("libx264", rate = fps)
|
26
|
+
self._stream.width = size[0]
|
27
|
+
self._stream.height = size[1]
|
28
|
+
self._stream.pix_fmt = pixel_format
|
File without changes
|
File without changes
|
File without changes
|