yta-video-opengl 0.0.9__py3-none-any.whl → 0.0.10__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -132,6 +132,7 @@ class _Uniforms:
132
132
  for key, value in self.uniforms.items():
133
133
  print(f'"{key}": {str(value)}')
134
134
 
135
+ # TODO: Moved to 'nodes.opengl.py'
135
136
  class BaseNode:
136
137
  """
137
138
  The basic class of a node to manipulate frames
@@ -0,0 +1,119 @@
1
+ from yta_validation.parameter import ParameterValidator
2
+ from typing import Union
3
+ from abc import ABC, abstractmethod
4
+
5
+ import av
6
+ import moderngl
7
+
8
+
9
+ class Node(ABC):
10
+ """
11
+ Base class to represent a node, which
12
+ is an entity that processes frames
13
+ individually.
14
+
15
+ This class must be inherited by any
16
+ video or audio node class.
17
+ """
18
+
19
+ # TODO: What about the types?
20
+ @abstractmethod
21
+ def process(
22
+ frame: Union[av.VideoFrame, av.AudioFrame, moderngl.Texture],
23
+ t: float
24
+ # TODO: Maybe we need 'fps' and 'number_of_frames'
25
+ # to calculate progressions or similar...
26
+ ) -> Union[av.VideoFrame, av.AudioFrame, moderngl.Texture]:
27
+ pass
28
+
29
+ class TimedNode:
30
+ """
31
+ Class to represent a Node wrapper to
32
+ be able to specify the time range in
33
+ which we want the node to be applied.
34
+
35
+ If the 't' time moment is not inside
36
+ this range, the frame will be returned
37
+ as it is, with no change.
38
+
39
+ A 't' time moment inside the range has
40
+ this condition:
41
+ - `start <= t < end`
42
+
43
+ We are not including the end because
44
+ the next TimedNode could start on that
45
+ specific value, and remember that the
46
+ first time moment is 0.
47
+
48
+ This is the class that has to be applied
49
+ when working with videos and not a Node
50
+ directly.
51
+
52
+ The 'start' and 'end' values by default
53
+ """
54
+
55
+ def __init__(
56
+ self,
57
+ node: Node,
58
+ start: float = 0.0,
59
+ end: Union[float, None] = None
60
+ ):
61
+ ParameterValidator.validate_mandatory_positive_number('start', start, do_include_zero = True)
62
+ ParameterValidator.validate_positive_number('end', end, do_include_zero = False)
63
+
64
+ if (
65
+ end is not None and
66
+ end < start
67
+ ):
68
+ raise Exception('The "end" parameter provided must be greater or equal to the "start" parameter.')
69
+
70
+ self.node: Node = node
71
+ """
72
+ The node we are wrapping and we want to
73
+ apply as a modification of the frame in
74
+ which we are in a 't' time moment.
75
+ """
76
+ self.start: float = start
77
+ """
78
+ The 't' time moment in which the Node must
79
+ start being applied (including it).
80
+ """
81
+ self.end: Union[float, None] = end
82
+ """
83
+ The 't' time moment in which the Node must
84
+ stop being applied (excluding it).
85
+ """
86
+
87
+ def is_within_time(
88
+ self,
89
+ t: float
90
+ ) -> bool:
91
+ """
92
+ Flag to indicate if the 't' time moment provided
93
+ is in the range of this TimedNode instance,
94
+ which means that it fits this condition:
95
+ - `start <= t < end`
96
+ """
97
+ return (
98
+ self.start <= t < self.end
99
+ if self.end is not None else
100
+ self.start <= t
101
+ )
102
+
103
+ def process(
104
+ self,
105
+ frame: Union[av.VideoFrame, av.AudioFrame, moderngl.Texture],
106
+ t: float
107
+ # TODO: Maybe we need 'fps' and 'number_of_frames'
108
+ # to calculate progressions or similar...
109
+ ) -> Union['VideoFrame', 'AudioFrame', 'Texture']:
110
+ """
111
+ Process the frame if the provided 't' time
112
+ moment is in the range of this TimedNode
113
+ instance.
114
+ """
115
+ return (
116
+ self.node.process(frame, t)
117
+ if self.is_within_time(t) else
118
+ frame
119
+ )
@@ -0,0 +1,115 @@
1
+ """
2
+ When working with audio frames, we don't need
3
+ to use the GPU because audios are 1D and the
4
+ information can be processed perfectly with
5
+ a library like numpy.
6
+
7
+ If we need a very intense calculation for an
8
+ audio frame (FFT, convolution, etc.) we can
9
+ use CuPy or some DPS specific libraries, but
10
+ 90% is perfectly done with numpy.
11
+
12
+ If you want to modify huge amounts of audio
13
+ (some seconds at the same time), you can use
14
+ CuPy, that has the same API as numpy but
15
+ working in GPU. Doing this below most of the
16
+ changes would work:
17
+ - `import numpy as np` → `import cupy as np`
18
+ """
19
+ from yta_video_opengl.nodes import TimedNode
20
+ from abc import abstractmethod
21
+ from typing import Union
22
+
23
+ import numpy as np
24
+ import av
25
+
26
+
27
+ class AudioNode:
28
+ """
29
+ Base audio node class to implement a
30
+ change in an audio frame by using the
31
+ numpy library.
32
+ """
33
+
34
+ @abstractmethod
35
+ def process(
36
+ self,
37
+ frame: av.AudioFrame,
38
+ t: float
39
+ ):
40
+ """
41
+ Process the provided audio 'frame' that
42
+ is played on the given 't' time moment.
43
+ """
44
+ pass
45
+
46
+ """
47
+ Here you have an example. The 'private'
48
+ node class is the modifier, that we don't
49
+ want to expose, and the 'public' class is
50
+ the one that inherits from TimedNode and
51
+ wraps the 'private' class to build the
52
+ functionality.
53
+ """
54
+ class VolumeAudioNode(TimedNode):
55
+ """
56
+ TimedNode to set the audio volume of a video
57
+ in a specific frame.
58
+ """
59
+
60
+ def __init__(
61
+ self,
62
+ factor_fn,
63
+ start: float = 0.0,
64
+ end: Union[float, None] = None
65
+ ):
66
+ super().__init__(
67
+ node = _SetVolumeAudioNode(factor_fn),
68
+ start = start,
69
+ end = end
70
+ )
71
+
72
+ class _SetVolumeAudioNode(AudioNode):
73
+ """
74
+ Audio node to change the volume of an
75
+ audio frame.
76
+ """
77
+
78
+ def __init__(
79
+ self,
80
+ factor_fn
81
+ ):
82
+ """
83
+ factor_fn: function (t, index) -> factor volumen
84
+ """
85
+ self.factor_fn = factor_fn
86
+
87
+ def process(
88
+ self,
89
+ frame: av.AudioFrame,
90
+ t: float,
91
+ ) -> av.AudioFrame:
92
+ # TODO: Why index (?) Maybe 'total_frames'
93
+ factor = self.factor_fn(t, 0)
94
+
95
+ samples = frame.to_ndarray().astype(np.float32)
96
+ samples *= factor
97
+
98
+ # Determine dtype according to format
99
+ samples = (
100
+ samples.astype(np.int16)
101
+ # 'fltp', 's16', 's16p'
102
+ if 's16' in frame.format.name else
103
+ samples.astype(np.float32)
104
+ )
105
+
106
+ new_frame = av.AudioFrame.from_ndarray(
107
+ samples,
108
+ format = frame.format.name,
109
+ layout = frame.layout.name
110
+ )
111
+ new_frame.sample_rate = frame.sample_rate
112
+ new_frame.pts = frame.pts
113
+ new_frame.time_base = frame.time_base
114
+
115
+ return new_frame
@@ -0,0 +1,5 @@
1
+ """
2
+ Working with video frames has to be done
3
+ with nodes that use OpenGL because this
4
+ way, by using the GPU, is the best way.
5
+ """
@@ -0,0 +1,309 @@
1
+ from yta_video_opengl.utils import frame_to_texture, get_fullscreen_quad_vao
2
+ from yta_video_opengl.nodes import Node
3
+ from yta_validation.parameter import ParameterValidator
4
+ from yta_validation import PythonValidator
5
+ from abc import abstractmethod
6
+ from typing import Union
7
+
8
+ import numpy as np
9
+ import moderngl
10
+
11
+
12
+ class _Uniforms:
13
+ """
14
+ Class to wrap the functionality related to
15
+ handling the opengl program uniforms.
16
+ """
17
+
18
+ @property
19
+ def uniforms(
20
+ self
21
+ ) -> dict:
22
+ """
23
+ The uniforms in the program, as a dict, in
24
+ the format `{key, value}`.
25
+ """
26
+ return {
27
+ key: self.program[key].value
28
+ for key in self.program
29
+ if PythonValidator.is_instance_of(self.program[key], moderngl.Uniform)
30
+ }
31
+
32
+ def __init__(
33
+ self,
34
+ program: moderngl.Program
35
+ ):
36
+ self.program: moderngl.Program = program
37
+ """
38
+ The program instance this handler class
39
+ belongs to.
40
+ """
41
+
42
+ def get(
43
+ self,
44
+ name: str
45
+ ) -> Union[any, None]:
46
+ """
47
+ Get the value of the uniform with the
48
+ given 'name'.
49
+ """
50
+ return self.uniforms.get(name, None)
51
+
52
+ # TODO: I need to refactor these method to
53
+ # accept a **kwargs maybe, or to auto-detect
54
+ # the type and add the uniform as it must be
55
+ # done
56
+ def set(
57
+ self,
58
+ name: str,
59
+ value
60
+ ) -> '_Uniforms':
61
+ """
62
+ Set the provided 'value' to the normal type
63
+ uniform with the given 'name'. Here you have
64
+ some examples of defined uniforms we can set
65
+ with this method:
66
+ - `uniform float name;`
67
+
68
+ TODO: Add more examples
69
+ """
70
+ if name in self.program:
71
+ self.program[name].value = value
72
+
73
+ return self
74
+
75
+ def set_vec(
76
+ self,
77
+ name: str,
78
+ values
79
+ ) -> '_Uniforms':
80
+ """
81
+ Set the provided 'value' to the normal type
82
+ uniform with the given 'name'. Here you have
83
+ some examples of defined uniforms we can set
84
+ with this method:
85
+ - `uniform vec2 name;`
86
+
87
+ TODO: Is this example ok? I didn't use it yet
88
+ """
89
+ if name in self.program:
90
+ self.program[name].write(np.array(values, dtype = 'f4').tobytes())
91
+
92
+ return self
93
+
94
+ def set_mat(
95
+ self,
96
+ name: str,
97
+ value
98
+ ) -> '_Uniforms':
99
+ """
100
+ Set the provided 'value' to a `matN` type
101
+ uniform with the given 'name'. The 'value'
102
+ must be a NxN matrix (maybe numpy array)
103
+ transformed to bytes ('.tobytes()').
104
+
105
+ This uniform must be defined in the vertex
106
+ like this:
107
+ - `uniform matN name;`
108
+
109
+ TODO: Maybe we can accept a NxN numpy
110
+ array and do the .tobytes() by ourselves...
111
+ """
112
+ if name in self.program:
113
+ self.program[name].write(value)
114
+
115
+ return self
116
+
117
+ def print(
118
+ self
119
+ ) -> '_Uniforms':
120
+ """
121
+ Print the defined uniforms in console.
122
+ """
123
+ for key, value in self.uniforms.items():
124
+ print(f'"{key}": {str(value)}')
125
+
126
+ class OpenglNode(Node):
127
+ """
128
+ The basic class of a node to manipulate frames
129
+ as opengl textures. This node will process the
130
+ frame as an input texture and will generate
131
+ also a texture as the output.
132
+
133
+ Nodes can be chained and the result from one
134
+ node can be applied on another node.
135
+ """
136
+
137
+ @property
138
+ @abstractmethod
139
+ def vertex_shader(
140
+ self
141
+ ) -> str:
142
+ """
143
+ The code of the vertex shader.
144
+ """
145
+ pass
146
+
147
+ @property
148
+ @abstractmethod
149
+ def fragment_shader(
150
+ self
151
+ ) -> str:
152
+ """
153
+ The code of the fragment shader.
154
+ """
155
+ pass
156
+
157
+ def __init__(
158
+ self,
159
+ context: moderngl.Context,
160
+ size: tuple[int, int],
161
+ **kwargs
162
+ ):
163
+ ParameterValidator.validate_mandatory_instance_of('context', context, moderngl.Context)
164
+ # TODO: Validate size
165
+
166
+ self.context: moderngl.Context = context
167
+ """
168
+ The context of the program.
169
+ """
170
+ self.size: tuple[int, int] = size
171
+ """
172
+ The size we want to use for the frame buffer
173
+ in a (width, height) format.
174
+ """
175
+ # Compile shaders within the program
176
+ self.program: moderngl.Program = self.context.program(
177
+ vertex_shader = self.vertex_shader,
178
+ fragment_shader = self.fragment_shader
179
+ )
180
+
181
+ # Create the fullscreen quad
182
+ self.quad = get_fullscreen_quad_vao(
183
+ context = self.context,
184
+ program = self.program
185
+ )
186
+
187
+ # Create the output fbo
188
+ self.output_tex = self.context.texture(self.size, 4)
189
+ self.output_tex.filter = (moderngl.LINEAR, moderngl.LINEAR)
190
+ self.fbo = self.context.framebuffer(color_attachments = [self.output_tex])
191
+
192
+ self.uniforms: _Uniforms = _Uniforms(self.program)
193
+ """
194
+ Shortcut to the uniforms functionality.
195
+ """
196
+ # Auto set uniforms dynamically if existing
197
+ for key, value in kwargs.items():
198
+ self.uniforms.set(key, value)
199
+
200
+ def process(
201
+ self,
202
+ input: Union[moderngl.Texture, 'VideoFrame', 'np.ndarray']
203
+ ) -> moderngl.Texture:
204
+ """
205
+ Apply the shader to the 'input', that
206
+ must be a frame or a texture, and return
207
+ the new resulting texture.
208
+
209
+ We use and return textures to maintain
210
+ the process in GPU and optimize it.
211
+ """
212
+ if PythonValidator.is_instance_of(input, ['VideoFrame', 'ndarray']):
213
+ # TODO: What about the numpy format (?)
214
+ input = frame_to_texture(input, self.context)
215
+
216
+ self.fbo.use()
217
+ self.context.clear(0.0, 0.0, 0.0, 0.0)
218
+
219
+ input.use(location = 0)
220
+
221
+ if 'texture' in self.program:
222
+ self.program['texture'] = 0
223
+
224
+ self.quad.render()
225
+
226
+ return self.output_tex
227
+
228
+ class WavingNode(OpenglNode):
229
+ """
230
+ Just an example, without the shaders code
231
+ actually, to indicate that we can use
232
+ custom parameters to make it work.
233
+ """
234
+
235
+ @property
236
+ def vertex_shader(
237
+ self
238
+ ) -> str:
239
+ return (
240
+ '''
241
+ #version 330
242
+ in vec2 in_vert;
243
+ in vec2 in_texcoord;
244
+ out vec2 v_uv;
245
+ void main() {
246
+ v_uv = in_texcoord;
247
+ gl_Position = vec4(in_vert, 0.0, 1.0);
248
+ }
249
+ '''
250
+ )
251
+
252
+ @property
253
+ def fragment_shader(
254
+ self
255
+ ) -> str:
256
+ return (
257
+ '''
258
+ #version 330
259
+ uniform sampler2D tex;
260
+ uniform float time;
261
+ uniform float amplitude;
262
+ uniform float frequency;
263
+ uniform float speed;
264
+ in vec2 v_uv;
265
+ out vec4 f_color;
266
+ void main() {
267
+ float wave = sin(v_uv.x * frequency + time * speed) * amplitude;
268
+ vec2 uv = vec2(v_uv.x, v_uv.y + wave);
269
+ f_color = texture(tex, uv);
270
+ }
271
+ '''
272
+ )
273
+
274
+ def __init__(
275
+ self,
276
+ context: moderngl.Context,
277
+ size: tuple[int, int],
278
+ amplitude: float = 0.05,
279
+ frequency: float = 10.0,
280
+ speed: float = 2.0
281
+ ):
282
+ super().__init__(
283
+ context = context,
284
+ size = size,
285
+ amplitude = amplitude,
286
+ frequency = frequency,
287
+ speed = speed
288
+ )
289
+
290
+ # This is just an example and we are not
291
+ # using the parameters actually, but we
292
+ # could set those specific uniforms to be
293
+ # processed by the code
294
+ def process(
295
+ self,
296
+ input: Union[moderngl.Texture, 'VideoFrame', 'np.ndarray'],
297
+ t: float = 0.0,
298
+ ) -> moderngl.Texture:
299
+ """
300
+ Apply the shader to the 'input', that
301
+ must be a frame or a texture, and return
302
+ the new resulting texture.
303
+
304
+ We use and return textures to maintain
305
+ the process in GPU and optimize it.
306
+ """
307
+ self.uniforms.set('time', t)
308
+
309
+ return super().process(input)
yta_video_opengl/video.py CHANGED
@@ -22,6 +22,9 @@ class Video:
22
22
  """
23
23
  The start packet time stamp (pts), needed
24
24
  to optimize the packet iteration process.
25
+
26
+ This timestamp is used to read the video
27
+ file source.
25
28
  """
26
29
  return int(self.start / self.reader.time_base)
27
30
 
@@ -32,6 +35,9 @@ class Video:
32
35
  """
33
36
  The end packet time stamp (pts), needed to
34
37
  optimize the packet iteration process.
38
+
39
+ This timestamp is used to read the video
40
+ file source.
35
41
  """
36
42
  return (
37
43
  int(self.end / self.reader.time_base)
@@ -73,6 +79,15 @@ class Video:
73
79
  The duration of the video.
74
80
  """
75
81
  return self.end - self.start
82
+
83
+ @property
84
+ def number_of_frames(
85
+ self
86
+ ):
87
+ """
88
+ The number of frames of the video.
89
+ """
90
+ return self.reader.number_of_frames
76
91
 
77
92
  @property
78
93
  def frames(
@@ -84,6 +99,10 @@ class Video:
84
99
  'start' and 'end' parameters provided when
85
100
  instantiating it.
86
101
 
102
+ The iterator will iterate first over the
103
+ video frames, and once finished over the
104
+ audio frames.
105
+
87
106
  This method returns a tuple of 3 elements:
88
107
  - `frame` as a `VideoFrame` instance
89
108
  - `t` as the frame time moment
@@ -95,17 +114,6 @@ class Video:
95
114
  for frame in self.reader.get_audio_frames(self.start, self.end):
96
115
  yield frame
97
116
 
98
- # for frame in iterate_stream_frames_demuxing(
99
- # container = self.reader.container,
100
- # video_stream = self.reader.video_stream,
101
- # audio_stream = self.reader.audio_stream,
102
- # video_start_pts = self.start_pts,
103
- # video_end_pts = self.end_pts,
104
- # audio_start_pts = self.audio_start_pts,
105
- # audio_end_pts = self.audio_end_pts
106
- # ):
107
- # yield frame
108
-
109
117
  def __init__(
110
118
  self,
111
119
  filename: str,
@@ -143,25 +151,46 @@ class Video:
143
151
  filename: str
144
152
  ) -> 'Video':
145
153
  writer = VideoWriter(filename)
146
- #writer.set_video_stream(self.reader.video_stream.codec.name, self.reader.fps, self.reader.size, PIXEL_FORMAT)
147
154
  writer.set_video_stream_from_template(self.reader.video_stream)
148
155
  writer.set_audio_stream_from_template(self.reader.audio_stream)
149
156
 
150
- # TODO: I need to process the audio also, so
151
- # build a method that do the same but for
152
- # both streams at the same time
157
+ from yta_video_opengl.nodes.audio import VolumeAudioNode
158
+ # Audio from 0 to 1
159
+ # TODO: This effect 'fn' is shitty
160
+ def fade_in_fn(t, index, start=0.5, end=1.0):
161
+ if t < start or t > end:
162
+ # fuera de la franja: no tocar nada → volumen original (1.0)
163
+ progress = 1.0
164
+ else:
165
+ # dentro de la franja: interpolar linealmente entre 0 → 1
166
+ progress = (t - start) / (end - start)
167
+
168
+ return progress
169
+
170
+ #fade_in = SetVolumeAudioNode(lambda t, i: min(1, t / self.duration))
171
+ fade_in = VolumeAudioNode(lambda t, i: fade_in_fn(t, i, 0.5, 1.0))
172
+
153
173
  for frame, t, index in self.frames:
154
174
  if PythonValidator.is_instance_of(frame, 'VideoFrame'):
155
175
  print(f'Saving video frame {str(index)}, with t = {str(t)}')
176
+
177
+ # TODO: Process any video frame change
178
+
156
179
  writer.mux_video_frame(
157
180
  frame = frame
158
181
  )
159
182
  else:
160
183
  print(f'Saving audio frame {str(index)} ({str(round(float(t * self.reader.fps), 2))}), with t = {str(t)}')
184
+
185
+ # TODO: Process any audio frame change
186
+ # Test setting audio
187
+ frame = fade_in.process(frame, t)
188
+
161
189
  writer.mux_audio_frame(
162
190
  frame = frame
163
191
  )
164
192
 
193
+ # Flush the remaining frames to write
165
194
  writer.mux_audio_frame(None)
166
195
  writer.mux_video_frame(None)
167
196
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: yta-video-opengl
3
- Version: 0.0.9
3
+ Version: 0.0.10
4
4
  Summary: Youtube Autonomous Video OpenGL Module
5
5
  Author: danialcala94
6
6
  Author-email: danielalcalavalera@gmail.com
@@ -0,0 +1,16 @@
1
+ yta_video_opengl/__init__.py,sha256=ycAx_XYMVDfkuObSvtW6irQ0Wo-fgxEz3fjIRMe8PpY,205
2
+ yta_video_opengl/classes.py,sha256=t5-Tfc7ecvHl8JlVBp_FVzZT6ole6Ly5-FeBBH7wcxo,37742
3
+ yta_video_opengl/nodes/__init__.py,sha256=TZ-ZO05PZ0_ABq675E22_PngLWOe-_w5s1cLlV3NbWM,3469
4
+ yta_video_opengl/nodes/audio/__init__.py,sha256=4nKkC70k1UgLcCSPqFWm3cKdaJM0KUmQTwGWv1xFarQ,2926
5
+ yta_video_opengl/nodes/video/__init__.py,sha256=gSoaoEmjdQmyRwH18mf5z3NAhap3S0RgbeBbfBXi4jc,132
6
+ yta_video_opengl/nodes/video/opengl.py,sha256=K2pyCJEd9z4gnZqJetKyGPbtHuBzFsx74ZYyzhSqYPo,8510
7
+ yta_video_opengl/reader/__init__.py,sha256=rAWISZ7OzDnzar0At-LCfDA-MmWzax2jT2l5gySv4aw,16911
8
+ yta_video_opengl/reader/cache.py,sha256=UKhZvgY80ySuOYH52ikco6affsm8bjP656EroVR9Utg,6960
9
+ yta_video_opengl/tests.py,sha256=NZ-W1ak-ygwL9wATzEXtlCeCZX74ij_TZhktetMnOD4,25810
10
+ yta_video_opengl/utils.py,sha256=y0N1mS9FjpB4nFnx00K7sIs5EsqMkTe8C0bzLXZe9YM,10479
11
+ yta_video_opengl/video.py,sha256=3n7jgZab7PUSOpODoaH4iNg0sy7NMRo_OaJ4Zj8u0NM,5855
12
+ yta_video_opengl/writer.py,sha256=7xglz8xHOXMtWkctzuB21Y-e9xWFXYcklt3jVUN4svQ,8198
13
+ yta_video_opengl-0.0.10.dist-info/LICENSE,sha256=6kbiFSfobTZ7beWiKnHpN902HgBx-Jzgcme0SvKqhKY,1091
14
+ yta_video_opengl-0.0.10.dist-info/METADATA,sha256=YZ81GUO5J78iri9e_GdjUXafQqjEyohcVjwAuknXGhU,671
15
+ yta_video_opengl-0.0.10.dist-info/WHEEL,sha256=XbeZDeTWKc1w7CSIyre5aMDU_-PohRwTQceYnisIYYY,88
16
+ yta_video_opengl-0.0.10.dist-info/RECORD,,
@@ -1,12 +0,0 @@
1
- yta_video_opengl/__init__.py,sha256=ycAx_XYMVDfkuObSvtW6irQ0Wo-fgxEz3fjIRMe8PpY,205
2
- yta_video_opengl/classes.py,sha256=VUw73kfz8kxYLE0x0LxNHqFekF3CklcyofCNN-z57Lg,37706
3
- yta_video_opengl/reader/__init__.py,sha256=rAWISZ7OzDnzar0At-LCfDA-MmWzax2jT2l5gySv4aw,16911
4
- yta_video_opengl/reader/cache.py,sha256=UKhZvgY80ySuOYH52ikco6affsm8bjP656EroVR9Utg,6960
5
- yta_video_opengl/tests.py,sha256=NZ-W1ak-ygwL9wATzEXtlCeCZX74ij_TZhktetMnOD4,25810
6
- yta_video_opengl/utils.py,sha256=y0N1mS9FjpB4nFnx00K7sIs5EsqMkTe8C0bzLXZe9YM,10479
7
- yta_video_opengl/video.py,sha256=Y14-Bsq7AH0GenwbPk61giD9eLHZDmWeZvP_iZn0e7w,5182
8
- yta_video_opengl/writer.py,sha256=7xglz8xHOXMtWkctzuB21Y-e9xWFXYcklt3jVUN4svQ,8198
9
- yta_video_opengl-0.0.9.dist-info/LICENSE,sha256=6kbiFSfobTZ7beWiKnHpN902HgBx-Jzgcme0SvKqhKY,1091
10
- yta_video_opengl-0.0.9.dist-info/METADATA,sha256=iYodm7r8DJD5lpPAs92lAClPcJ4dFrS94Dv5WjHcx5Q,670
11
- yta_video_opengl-0.0.9.dist-info/WHEEL,sha256=XbeZDeTWKc1w7CSIyre5aMDU_-PohRwTQceYnisIYYY,88
12
- yta_video_opengl-0.0.9.dist-info/RECORD,,