yta-video-opengl 0.0.9__py3-none-any.whl → 0.0.11__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,309 @@
1
+ from yta_video_opengl.utils import frame_to_texture, get_fullscreen_quad_vao
2
+ from yta_video_opengl.nodes import Node
3
+ from yta_validation.parameter import ParameterValidator
4
+ from yta_validation import PythonValidator
5
+ from abc import abstractmethod
6
+ from typing import Union
7
+
8
+ import numpy as np
9
+ import moderngl
10
+
11
+
12
+ class _Uniforms:
13
+ """
14
+ Class to wrap the functionality related to
15
+ handling the opengl program uniforms.
16
+ """
17
+
18
+ @property
19
+ def uniforms(
20
+ self
21
+ ) -> dict:
22
+ """
23
+ The uniforms in the program, as a dict, in
24
+ the format `{key, value}`.
25
+ """
26
+ return {
27
+ key: self.program[key].value
28
+ for key in self.program
29
+ if PythonValidator.is_instance_of(self.program[key], moderngl.Uniform)
30
+ }
31
+
32
+ def __init__(
33
+ self,
34
+ program: moderngl.Program
35
+ ):
36
+ self.program: moderngl.Program = program
37
+ """
38
+ The program instance this handler class
39
+ belongs to.
40
+ """
41
+
42
+ def get(
43
+ self,
44
+ name: str
45
+ ) -> Union[any, None]:
46
+ """
47
+ Get the value of the uniform with the
48
+ given 'name'.
49
+ """
50
+ return self.uniforms.get(name, None)
51
+
52
+ # TODO: I need to refactor these method to
53
+ # accept a **kwargs maybe, or to auto-detect
54
+ # the type and add the uniform as it must be
55
+ # done
56
+ def set(
57
+ self,
58
+ name: str,
59
+ value
60
+ ) -> '_Uniforms':
61
+ """
62
+ Set the provided 'value' to the normal type
63
+ uniform with the given 'name'. Here you have
64
+ some examples of defined uniforms we can set
65
+ with this method:
66
+ - `uniform float name;`
67
+
68
+ TODO: Add more examples
69
+ """
70
+ if name in self.program:
71
+ self.program[name].value = value
72
+
73
+ return self
74
+
75
+ def set_vec(
76
+ self,
77
+ name: str,
78
+ values
79
+ ) -> '_Uniforms':
80
+ """
81
+ Set the provided 'value' to the normal type
82
+ uniform with the given 'name'. Here you have
83
+ some examples of defined uniforms we can set
84
+ with this method:
85
+ - `uniform vec2 name;`
86
+
87
+ TODO: Is this example ok? I didn't use it yet
88
+ """
89
+ if name in self.program:
90
+ self.program[name].write(np.array(values, dtype = 'f4').tobytes())
91
+
92
+ return self
93
+
94
+ def set_mat(
95
+ self,
96
+ name: str,
97
+ value
98
+ ) -> '_Uniforms':
99
+ """
100
+ Set the provided 'value' to a `matN` type
101
+ uniform with the given 'name'. The 'value'
102
+ must be a NxN matrix (maybe numpy array)
103
+ transformed to bytes ('.tobytes()').
104
+
105
+ This uniform must be defined in the vertex
106
+ like this:
107
+ - `uniform matN name;`
108
+
109
+ TODO: Maybe we can accept a NxN numpy
110
+ array and do the .tobytes() by ourselves...
111
+ """
112
+ if name in self.program:
113
+ self.program[name].write(value)
114
+
115
+ return self
116
+
117
+ def print(
118
+ self
119
+ ) -> '_Uniforms':
120
+ """
121
+ Print the defined uniforms in console.
122
+ """
123
+ for key, value in self.uniforms.items():
124
+ print(f'"{key}": {str(value)}')
125
+
126
+ class OpenglNode(Node):
127
+ """
128
+ The basic class of a node to manipulate frames
129
+ as opengl textures. This node will process the
130
+ frame as an input texture and will generate
131
+ also a texture as the output.
132
+
133
+ Nodes can be chained and the result from one
134
+ node can be applied on another node.
135
+ """
136
+
137
+ @property
138
+ @abstractmethod
139
+ def vertex_shader(
140
+ self
141
+ ) -> str:
142
+ """
143
+ The code of the vertex shader.
144
+ """
145
+ pass
146
+
147
+ @property
148
+ @abstractmethod
149
+ def fragment_shader(
150
+ self
151
+ ) -> str:
152
+ """
153
+ The code of the fragment shader.
154
+ """
155
+ pass
156
+
157
+ def __init__(
158
+ self,
159
+ context: moderngl.Context,
160
+ size: tuple[int, int],
161
+ **kwargs
162
+ ):
163
+ ParameterValidator.validate_mandatory_instance_of('context', context, moderngl.Context)
164
+ # TODO: Validate size
165
+
166
+ self.context: moderngl.Context = context
167
+ """
168
+ The context of the program.
169
+ """
170
+ self.size: tuple[int, int] = size
171
+ """
172
+ The size we want to use for the frame buffer
173
+ in a (width, height) format.
174
+ """
175
+ # Compile shaders within the program
176
+ self.program: moderngl.Program = self.context.program(
177
+ vertex_shader = self.vertex_shader,
178
+ fragment_shader = self.fragment_shader
179
+ )
180
+
181
+ # Create the fullscreen quad
182
+ self.quad = get_fullscreen_quad_vao(
183
+ context = self.context,
184
+ program = self.program
185
+ )
186
+
187
+ # Create the output fbo
188
+ self.output_tex = self.context.texture(self.size, 4)
189
+ self.output_tex.filter = (moderngl.LINEAR, moderngl.LINEAR)
190
+ self.fbo = self.context.framebuffer(color_attachments = [self.output_tex])
191
+
192
+ self.uniforms: _Uniforms = _Uniforms(self.program)
193
+ """
194
+ Shortcut to the uniforms functionality.
195
+ """
196
+ # Auto set uniforms dynamically if existing
197
+ for key, value in kwargs.items():
198
+ self.uniforms.set(key, value)
199
+
200
+ def process(
201
+ self,
202
+ input: Union[moderngl.Texture, 'VideoFrame', 'np.ndarray']
203
+ ) -> moderngl.Texture:
204
+ """
205
+ Apply the shader to the 'input', that
206
+ must be a frame or a texture, and return
207
+ the new resulting texture.
208
+
209
+ We use and return textures to maintain
210
+ the process in GPU and optimize it.
211
+ """
212
+ if PythonValidator.is_instance_of(input, ['VideoFrame', 'ndarray']):
213
+ # TODO: What about the numpy format (?)
214
+ input = frame_to_texture(input, self.context)
215
+
216
+ self.fbo.use()
217
+ self.context.clear(0.0, 0.0, 0.0, 0.0)
218
+
219
+ input.use(location = 0)
220
+
221
+ if 'texture' in self.program:
222
+ self.program['texture'] = 0
223
+
224
+ self.quad.render()
225
+
226
+ return self.output_tex
227
+
228
+ class WavingNode(OpenglNode):
229
+ """
230
+ Just an example, without the shaders code
231
+ actually, to indicate that we can use
232
+ custom parameters to make it work.
233
+ """
234
+
235
+ @property
236
+ def vertex_shader(
237
+ self
238
+ ) -> str:
239
+ return (
240
+ '''
241
+ #version 330
242
+ in vec2 in_vert;
243
+ in vec2 in_texcoord;
244
+ out vec2 v_uv;
245
+ void main() {
246
+ v_uv = in_texcoord;
247
+ gl_Position = vec4(in_vert, 0.0, 1.0);
248
+ }
249
+ '''
250
+ )
251
+
252
+ @property
253
+ def fragment_shader(
254
+ self
255
+ ) -> str:
256
+ return (
257
+ '''
258
+ #version 330
259
+ uniform sampler2D tex;
260
+ uniform float time;
261
+ uniform float amplitude;
262
+ uniform float frequency;
263
+ uniform float speed;
264
+ in vec2 v_uv;
265
+ out vec4 f_color;
266
+ void main() {
267
+ float wave = sin(v_uv.x * frequency + time * speed) * amplitude;
268
+ vec2 uv = vec2(v_uv.x, v_uv.y + wave);
269
+ f_color = texture(tex, uv);
270
+ }
271
+ '''
272
+ )
273
+
274
+ def __init__(
275
+ self,
276
+ context: moderngl.Context,
277
+ size: tuple[int, int],
278
+ amplitude: float = 0.05,
279
+ frequency: float = 10.0,
280
+ speed: float = 2.0
281
+ ):
282
+ super().__init__(
283
+ context = context,
284
+ size = size,
285
+ amplitude = amplitude,
286
+ frequency = frequency,
287
+ speed = speed
288
+ )
289
+
290
+ # This is just an example and we are not
291
+ # using the parameters actually, but we
292
+ # could set those specific uniforms to be
293
+ # processed by the code
294
+ def process(
295
+ self,
296
+ input: Union[moderngl.Texture, 'VideoFrame', 'np.ndarray'],
297
+ t: float = 0.0,
298
+ ) -> moderngl.Texture:
299
+ """
300
+ Apply the shader to the 'input', that
301
+ must be a frame or a texture, and return
302
+ the new resulting texture.
303
+
304
+ We use and return textures to maintain
305
+ the process in GPU and optimize it.
306
+ """
307
+ self.uniforms.set('time', t)
308
+
309
+ return super().process(input)
@@ -575,6 +575,16 @@ class VideoReader:
575
575
  """
576
576
  return self.video_cache.get_frame(index)
577
577
 
578
+ def get_frame_from_t(
579
+ self,
580
+ t: float
581
+ ) -> 'VideoFrame':
582
+ """
583
+ Get the video frame with the given 't' time
584
+ moment, using the video cache system.
585
+ """
586
+ return self.video_cache.get_frame_from_t(t)
587
+
578
588
  # TODO: Will we use this (?)
579
589
  def get_audio_frame(
580
590
  self,
@@ -16,17 +16,22 @@ memory all those frames to be handled fast. It
16
16
  will remove the old frames if needed to use only
17
17
  the 'size' we set when creating it.
18
18
  """
19
- from yta_video_opengl.utils import t_to_pts, pts_to_t, pts_to_index
19
+ from yta_video_opengl.utils import t_to_pts, pts_to_t, pts_to_index, index_to_pts
20
+ from yta_video_frame_time import T
20
21
  from av.container import InputContainer
21
22
  from av.video.stream import VideoStream
22
23
  from av.audio.stream import AudioStream
23
24
  from av.video.frame import VideoFrame
24
25
  from av.audio.frame import AudioFrame
25
26
  from yta_validation.parameter import ParameterValidator
27
+ from yta_validation import PythonValidator
26
28
  from fractions import Fraction
27
29
  from collections import OrderedDict
28
30
  from typing import Union
29
31
 
32
+ import numpy as np
33
+ import math
34
+
30
35
 
31
36
  class VideoFrameCache:
32
37
  """
@@ -60,11 +65,11 @@ class VideoFrameCache:
60
65
  self,
61
66
  container: InputContainer,
62
67
  stream: Union[VideoStream, AudioStream],
63
- size: int = 50
68
+ size: Union[int, None] = None
64
69
  ):
65
70
  ParameterValidator.validate_mandatory_instance_of('container', container, InputContainer)
66
71
  ParameterValidator.validate_mandatory_instance_of('stream', stream, [VideoStream, AudioStream])
67
- ParameterValidator.validate_mandatory_positive_int('size', size)
72
+ ParameterValidator.validate_positive_int('size', size)
68
73
 
69
74
  self.container: InputContainer = container
70
75
  """
@@ -78,7 +83,7 @@ class VideoFrameCache:
78
83
  """
79
84
  The cache ordered dictionary.
80
85
  """
81
- self.size = size
86
+ self.size: Union[int, None] = size
82
87
  """
83
88
  The size (in number of frames) of the cache.
84
89
  """
@@ -99,6 +104,31 @@ class VideoFrameCache:
99
104
  if packet.is_keyframe:
100
105
  self.key_frames_pts.append(packet.pts)
101
106
 
107
+ # The cache size will be auto-calculated to
108
+ # use the amount of frames of the biggest
109
+ # interval of frames that belongs to a key
110
+ # frame, or a value by default
111
+ fps = (
112
+ float(self.stream.average_rate)
113
+ if PythonValidator.is_instance_of(self.stream, VideoStream) else
114
+ float(self.stream.rate)
115
+ )
116
+ # Intervals, but in number of frames
117
+ intervals = np.diff(
118
+ # Intervals of time between keyframes
119
+ np.array(self.key_frames_pts) * self.stream.time_base
120
+ ) * fps
121
+
122
+ self.size = (
123
+ math.ceil(np.max(intervals))
124
+ if intervals.size > 0 else
125
+ (
126
+ self.size or
127
+ # TODO: Make this 'default_size' a setting or something
128
+ 60
129
+ )
130
+ )
131
+
102
132
  self.container.seek(0)
103
133
 
104
134
  def _get_nearest_keyframe_fps(
@@ -117,10 +147,29 @@ class VideoFrameCache:
117
147
  if key_frame_pts <= pts
118
148
  ])
119
149
 
150
+ def _store_frame_in_cache(
151
+ self,
152
+ frame: Union[VideoFrame, AudioFrame]
153
+ ) -> Union[VideoFrame, AudioFrame]:
154
+ """
155
+ Store the provided 'frame' in cache if it
156
+ is not on it, removing the first item of
157
+ the cache if full.
158
+ """
159
+ if frame.pts not in self.cache:
160
+ # TODO: The 'format' must be dynamic
161
+ self.cache[frame.pts] = frame
162
+
163
+ # Clean cache if full
164
+ if len(self.cache) > self.size:
165
+ self.cache.popitem(last = False)
166
+
167
+ return frame
168
+
120
169
  def _get_frame_by_pts(
121
170
  self,
122
171
  pts: int
123
- ):
172
+ ) -> Union[VideoFrame, AudioFrame, None]:
124
173
  """
125
174
  Get the frame that has the provided 'pts'.
126
175
 
@@ -145,18 +194,14 @@ class VideoFrameCache:
145
194
  continue
146
195
 
147
196
  # Store in cache if needed
148
- if frame.pts not in self.cache:
149
- # TODO: The 'format' must be dynamic
150
- self.cache[frame.pts] = frame.to_ndarray(format = "rgb24")
151
-
152
- # Clean cache if full
153
- if len(self.cache) > self.size:
154
- self.cache.popitem(last = False)
197
+ self._store_frame_in_cache(frame)
155
198
 
156
199
  if frame.pts >= pts:
157
200
  decoded = self.cache[frame.pts]
158
201
  break
159
202
 
203
+ # TODO: Is this working? We need previous
204
+ # frames to be able to decode...
160
205
  return decoded
161
206
 
162
207
  def get_frame(
@@ -167,14 +212,24 @@ class VideoFrameCache:
167
212
  Get the frame with the given 'index' from
168
213
  the cache.
169
214
  """
170
- # TODO: Maybe we can accept 't' and 'pts' also
171
- target_pts = int(index / self.fps / self.time_base)
215
+ # TODO: Maybe we can accept 'pts' also
216
+ pts = index_to_pts(index, self.time_base, self.fps)
172
217
 
173
218
  return (
174
- self.cache[target_pts]
175
- if target_pts in self.cache else
176
- self._get_frame_by_pts(target_pts)
219
+ self.cache[pts]
220
+ if pts in self.cache else
221
+ self._get_frame_by_pts(pts)
177
222
  )
223
+
224
+ def get_frame_from_t(
225
+ self,
226
+ t: float
227
+ ) -> Union[VideoFrame, AudioFrame]:
228
+ """
229
+ Get the frame with the given 't' time moment
230
+ from the cache.
231
+ """
232
+ return self.get_frame(T.video_frame_time_to_video_frame_index(t, self.fps))
178
233
 
179
234
  def get_frames(
180
235
  self,
@@ -186,10 +241,22 @@ class VideoFrameCache:
186
241
  the provided 'start' and 'end' time in
187
242
  seconds.
188
243
  """
189
- # TODO: I create this method by default using
190
- # the cache. Think about how to implement it
191
- # and apply it here, please.
192
- # Go to the nearest key frame
244
+ # We use the cache as iterator if all the frames
245
+ # requested are stored there
246
+ pts_list = [
247
+ t_to_pts(t, self.time_base)
248
+ for t in T.get_frame_indexes(self.stream.duration, self.fps, start, end)
249
+ ]
250
+
251
+ if all(
252
+ pts in self.cache
253
+ for pts in pts_list
254
+ ):
255
+ for pts in pts_list:
256
+ yield self.cache[pts]
257
+
258
+ # If not all, we ignore the cache because we
259
+ # need to decode and they are all consecutive
193
260
  start = t_to_pts(start, self.time_base)
194
261
  end = (
195
262
  t_to_pts(end, self.time_base)
@@ -206,6 +273,9 @@ class VideoFrameCache:
206
273
  if frame.pts is None:
207
274
  continue
208
275
 
276
+ # We store all the frames in cache
277
+ self._store_frame_in_cache(frame)
278
+
209
279
  if frame.pts < start:
210
280
  continue
211
281
 
yta_video_opengl/tests.py CHANGED
@@ -582,6 +582,17 @@ def video_modified_stored():
582
582
  from yta_video_opengl.classes import WavingFrame, BreathingFrame, HandheldFrame, OrbitingFrame, RotatingInCenterFrame, StrangeTvFrame, GlitchRgbFrame, WavingNode
583
583
  from yta_video_opengl.utils import texture_to_frame, frame_to_texture
584
584
  from yta_video_opengl.video import Video
585
+ from yta_video_opengl.complete.timeline import Timeline
586
+
587
+ video = Video(VIDEO_PATH, 0.25, 0.75)
588
+ timeline = Timeline()
589
+ timeline.add_video(Video(VIDEO_PATH, 0.25, 0.75), 0.5)
590
+ # This is successfully raising an exception
591
+ #timeline.add_video(Video(VIDEO_PATH, 0.25, 0.75), 0.6)
592
+ timeline.add_video(Video(VIDEO_PATH, 0.25, 0.75), 1.5)
593
+ timeline.render(OUTPUT_PATH)
594
+
595
+ return
585
596
 
586
597
  Video(VIDEO_PATH, 0.25, 0.75).save_as(OUTPUT_PATH)
587
598
 
yta_video_opengl/utils.py CHANGED
@@ -332,6 +332,17 @@ def pts_to_index(
332
332
  """
333
333
  return int(round(pts_to_t(pts, stream_time_base) * fps))
334
334
 
335
+ def index_to_pts(
336
+ index: int,
337
+ stream_time_base: 'Fraction',
338
+ fps: float
339
+ ) -> int:
340
+ """
341
+ Transform a frame index into a 'pts' packet
342
+ timestamp.
343
+ """
344
+ return int(index / fps / stream_time_base)
345
+
335
346
  def pts_to_t(
336
347
  pts: int,
337
348
  stream_time_base: 'Fraction'
yta_video_opengl/video.py CHANGED
@@ -22,6 +22,9 @@ class Video:
22
22
  """
23
23
  The start packet time stamp (pts), needed
24
24
  to optimize the packet iteration process.
25
+
26
+ This timestamp is used to read the video
27
+ file source.
25
28
  """
26
29
  return int(self.start / self.reader.time_base)
27
30
 
@@ -32,6 +35,9 @@ class Video:
32
35
  """
33
36
  The end packet time stamp (pts), needed to
34
37
  optimize the packet iteration process.
38
+
39
+ This timestamp is used to read the video
40
+ file source.
35
41
  """
36
42
  return (
37
43
  int(self.end / self.reader.time_base)
@@ -73,6 +79,15 @@ class Video:
73
79
  The duration of the video.
74
80
  """
75
81
  return self.end - self.start
82
+
83
+ @property
84
+ def number_of_frames(
85
+ self
86
+ ):
87
+ """
88
+ The number of frames of the video.
89
+ """
90
+ return self.reader.number_of_frames
76
91
 
77
92
  @property
78
93
  def frames(
@@ -84,6 +99,10 @@ class Video:
84
99
  'start' and 'end' parameters provided when
85
100
  instantiating it.
86
101
 
102
+ The iterator will iterate first over the
103
+ video frames, and once finished over the
104
+ audio frames.
105
+
87
106
  This method returns a tuple of 3 elements:
88
107
  - `frame` as a `VideoFrame` instance
89
108
  - `t` as the frame time moment
@@ -95,17 +114,6 @@ class Video:
95
114
  for frame in self.reader.get_audio_frames(self.start, self.end):
96
115
  yield frame
97
116
 
98
- # for frame in iterate_stream_frames_demuxing(
99
- # container = self.reader.container,
100
- # video_stream = self.reader.video_stream,
101
- # audio_stream = self.reader.audio_stream,
102
- # video_start_pts = self.start_pts,
103
- # video_end_pts = self.end_pts,
104
- # audio_start_pts = self.audio_start_pts,
105
- # audio_end_pts = self.audio_end_pts
106
- # ):
107
- # yield frame
108
-
109
117
  def __init__(
110
118
  self,
111
119
  filename: str,
@@ -143,25 +151,46 @@ class Video:
143
151
  filename: str
144
152
  ) -> 'Video':
145
153
  writer = VideoWriter(filename)
146
- #writer.set_video_stream(self.reader.video_stream.codec.name, self.reader.fps, self.reader.size, PIXEL_FORMAT)
147
154
  writer.set_video_stream_from_template(self.reader.video_stream)
148
155
  writer.set_audio_stream_from_template(self.reader.audio_stream)
149
156
 
150
- # TODO: I need to process the audio also, so
151
- # build a method that do the same but for
152
- # both streams at the same time
157
+ from yta_video_opengl.nodes.audio import VolumeAudioNode
158
+ # Audio from 0 to 1
159
+ # TODO: This effect 'fn' is shitty
160
+ def fade_in_fn(t, index, start=0.5, end=1.0):
161
+ if t < start or t > end:
162
+ # fuera de la franja: no tocar nada → volumen original (1.0)
163
+ progress = 1.0
164
+ else:
165
+ # dentro de la franja: interpolar linealmente entre 0 → 1
166
+ progress = (t - start) / (end - start)
167
+
168
+ return progress
169
+
170
+ #fade_in = SetVolumeAudioNode(lambda t, i: min(1, t / self.duration))
171
+ fade_in = VolumeAudioNode(lambda t, i: fade_in_fn(t, i, 0.5, 1.0))
172
+
153
173
  for frame, t, index in self.frames:
154
174
  if PythonValidator.is_instance_of(frame, 'VideoFrame'):
155
175
  print(f'Saving video frame {str(index)}, with t = {str(t)}')
176
+
177
+ # TODO: Process any video frame change
178
+
156
179
  writer.mux_video_frame(
157
180
  frame = frame
158
181
  )
159
182
  else:
160
183
  print(f'Saving audio frame {str(index)} ({str(round(float(t * self.reader.fps), 2))}), with t = {str(t)}')
184
+
185
+ # TODO: Process any audio frame change
186
+ # Test setting audio
187
+ frame = fade_in.process(frame, t)
188
+
161
189
  writer.mux_audio_frame(
162
190
  frame = frame
163
191
  )
164
192
 
193
+ # Flush the remaining frames to write
165
194
  writer.mux_audio_frame(None)
166
195
  writer.mux_video_frame(None)
167
196
 
@@ -59,6 +59,9 @@ class VideoWriter:
59
59
  options = options
60
60
  )
61
61
 
62
+ # We need to force this or it will not work
63
+ self.video_stream.time_base = Fraction(1, int(fps))
64
+
62
65
  if size is not None:
63
66
  self.video_stream.width = size[0]
64
67
  self.video_stream.height = size[1]
@@ -190,6 +193,7 @@ class VideoWriter:
190
193
  # TODO: What strategy should we adopt with
191
194
  # the packets that cannot be handled
192
195
  # properly (?)
196
+ print('Invalid packet')
193
197
  print(packet)
194
198
  pass
195
199
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: yta-video-opengl
3
- Version: 0.0.9
3
+ Version: 0.0.11
4
4
  Summary: Youtube Autonomous Video OpenGL Module
5
5
  Author: danialcala94
6
6
  Author-email: danielalcalavalera@gmail.com