yta-video-opengl 0.0.3__py3-none-any.whl → 0.0.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -12,95 +12,94 @@ from av.container.input import InputContainer
12
12
  from fractions import Fraction
13
13
  from av import open as av_open
14
14
  from typing import Union
15
+ from dataclasses import dataclass
15
16
 
16
17
 
18
+ @dataclass
17
19
  class VideoReaderFrame:
18
20
  """
19
21
  Class to wrap a frame of a video that is
20
22
  being read, that can be a video or audio
21
- frame.
23
+ frame, and has been decoded.
22
24
  """
23
25
 
24
26
  @property
25
- def is_packet(
26
- self
27
- ) -> bool:
28
- """
29
- Flag to indicate if the frame is actually
30
- a packet of frames.
31
- """
32
- return PythonValidator.is_instance_of(self.frame, Packet)
33
-
34
- @property
35
- def is_video_frame(
27
+ def is_video(
36
28
  self
37
29
  ):
38
30
  """
39
31
  Flag to indicate if the instance is a video
40
32
  frame.
41
33
  """
42
- return PythonValidator.is_instance_of(self.frame, VideoFrame)
34
+ return PythonValidator.is_instance_of(self.data, VideoFrame)
43
35
 
44
36
  @property
45
- def is_audio_frame(
37
+ def is_audio(
46
38
  self
47
39
  ):
48
40
  """
49
41
  Flag to indicate if the instance is an audio
50
42
  frame.
51
43
  """
52
- return PythonValidator.is_instance_of(self.frame, AudioFrame)
44
+ return PythonValidator.is_instance_of(self.data, AudioFrame)
53
45
 
46
+ def __init__(
47
+ self,
48
+ # TODO: Add the type, please
49
+ data: any
50
+ ):
51
+ self.data: Union[AudioFrame, VideoFrame] = data
52
+ """
53
+ The frame content, that can be audio or video
54
+ frame.
55
+ """
56
+
57
+ @dataclass
58
+ class VideoReaderPacket:
59
+ """
60
+ Class to wrap a packet of a video that is
61
+ being read, that can contain video or audio
62
+ frames.
63
+ """
64
+
54
65
  @property
55
- def is_video_packet(
66
+ def is_video(
56
67
  self
57
68
  ) -> bool:
58
69
  """
59
- Flag to indicate if the instance is a packet
60
- containing video frames.
70
+ Flag to indicate if the packet includes video
71
+ frames or not.
61
72
  """
62
- return (
63
- self.is_packet and
64
- self.frame.stream.type == 'video'
65
- )
66
-
73
+ return self.data.stream.type == 'video'
74
+
67
75
  @property
68
- def is_audio_packet(
76
+ def is_audio(
69
77
  self
70
78
  ) -> bool:
71
79
  """
72
- Flag to indicate if the instance is a packet
73
- containing audio frames.
80
+ Flag to indicate if the packet includes audio
81
+ frames or not.
74
82
  """
75
- return (
76
- self.is_packet and
77
- self.frame.stream.type == 'audio'
78
- )
79
-
83
+ return self.data.stream.type == 'audio'
84
+
80
85
  def __init__(
81
86
  self,
82
- # TODO: Add the type, please
83
- frame: any
87
+ data: Packet
84
88
  ):
85
- self.frame: Union[AudioFrame, VideoFrame, Packet] = frame
89
+ self.data: Packet = data
86
90
  """
87
- The frame content, that can be audio or video
88
- frame, or a packet of more than one frame.
91
+ The packet, that can include video or audio
92
+ frames and can be decoded.
89
93
  """
90
94
 
91
- def test():
92
- # If packet we need to do this (if we want)
93
- # to decode
94
- #for frame in packet.decode():
95
-
96
- # In the output we need to 'mux' packets,
97
- # always
98
- pass
99
-
100
- # The '.decode()' generates a list of Packet
101
- # The '.encode()' receives a list of Packet
102
- # The '.demux()' extract packets from a single input
103
- # The '.mux()' mix packets into a single output
95
+ def decode(
96
+ self
97
+ ) -> list['SubtitleSet']:
98
+ """
99
+ Get the frames but decoded, perfect to make
100
+ modifications and encode to save them again.
101
+ """
102
+ return self.data.decode()
104
103
 
105
104
 
106
105
  class VideoReader:
@@ -124,30 +123,93 @@ class VideoReader:
124
123
  self
125
124
  ) -> Union[VideoFrame, None]:
126
125
  """
127
- Get the next frame of the iterator.
126
+ Get the next video frame (decoded) from the
127
+ iterator.
128
128
  """
129
129
  return next(self.frame_iterator)
130
+
131
+ @property
132
+ def audio_frame_iterator(
133
+ self
134
+ ) -> 'Iterator[AudioFrame]':
135
+ """
136
+ Iterator to iterate over all the audio frames
137
+ decodified.
138
+ """
139
+ return self.container.decode(self.audio_stream)
140
+
141
+ @property
142
+ def next_audio_frame(
143
+ self
144
+ ) -> Union[AudioFrame, None]:
145
+ """
146
+ Get the next audio frame (decoded) from the
147
+ iterator.
148
+ """
149
+ return next(self.audio_frame_iterator)
150
+
151
+ @property
152
+ def packet_iterator(
153
+ self
154
+ ) -> 'Iterator[Packet]':
155
+ """
156
+ Iterator to iterate over all the video frames
157
+ as packets (not decodified).
158
+ """
159
+ return self.container.demux(self.video_stream)
160
+
161
+ @property
162
+ def next_packet(
163
+ self
164
+ ) -> Union[Packet, None]:
165
+ """
166
+ Get the next video packet (not decoded) from
167
+ the iterator.
168
+ """
169
+ return next(self.packet_iterator)
170
+
171
+ @property
172
+ def audio_packet_iterator(
173
+ self
174
+ ) -> 'Iterator[Packet]':
175
+ """
176
+ Iterator to iterate over all the audio frames
177
+ as packets (not decodified).
178
+ """
179
+ return self.container.demux(self.audio_stream)
180
+
181
+ @property
182
+ def next_audio_packet(
183
+ self
184
+ ) -> Union[Packet, None]:
185
+ """
186
+ Get the next audio packet (not decoded) from
187
+ the iterator.
188
+ """
189
+ return next(self.packet_iterator)
130
190
 
131
- # TODO: Maybe rename (?)
132
191
  @property
133
- def frame_with_audio_iterator(
192
+ def packet_with_audio_iterator(
134
193
  self
135
194
  ) -> 'Iterator[Packet]':
136
195
  """
137
196
  Iterator to iterate over all the video frames
138
- decodified, including also the audio.
197
+ as packets (not decodified) including also the
198
+ audio as packets.
139
199
  """
140
200
  return self.container.demux((self.video_stream, self.audio_stream))
141
201
 
142
202
  @property
143
- def next_frame_with_audio(
203
+ def next_packet_with_audio(
144
204
  self
145
205
  ) -> Union[Packet, None]:
146
206
  """
147
- Get the next frame of the iterator that includes
148
- the audio.
207
+ Get the next video frames packet (or audio
208
+ frames packet) from the iterator. Depending
209
+ on the position, the packet can be video or
210
+ audio.
149
211
  """
150
- return next(self.frame_with_audio_iterator)
212
+ return next(self.packet_with_audio_iterator)
151
213
 
152
214
  @property
153
215
  def codec_name(
@@ -166,6 +228,24 @@ class VideoReader:
166
228
  Get the name of the audio codec.
167
229
  """
168
230
  return self.audio_stream.codec_context.name
231
+
232
+ @property
233
+ def number_of_frames(
234
+ self
235
+ ) -> int:
236
+ """
237
+ The number of frames in the video.
238
+ """
239
+ return self.video_stream.frames
240
+
241
+ @property
242
+ def number_of_audio_frames(
243
+ self
244
+ ) -> int:
245
+ """
246
+ The number of frames in the audio.
247
+ """
248
+ return self.audio_stream.frames
169
249
 
170
250
  @property
171
251
  def fps(
@@ -248,48 +328,63 @@ class VideoReader:
248
328
 
249
329
  def iterate(
250
330
  self
251
- ):
331
+ ) -> 'Iterator[Union[VideoFrame, AudioFrame]]':
332
+ """
333
+ Iterator to iterate over the video frames
334
+ (already decoded).
335
+ """
252
336
  for frame in self.frame_iterator:
253
337
  yield VideoReaderFrame(frame)
254
338
 
255
339
  def iterate_with_audio(
256
- self
257
- ):
258
- for frame_or_packet in self.frame_with_audio_iterator:
259
- yield VideoReaderFrame(frame_or_packet)
260
-
340
+ self,
341
+ do_decode_video: bool = True,
342
+ do_decode_audio: bool = False
343
+ ) -> 'Iterator[Union[VideoReaderFrame, VideoReaderPacket, None]]':
344
+ """
345
+ Iterator to iterate over the video and audio
346
+ packets, decoded only if the parameters are
347
+ set as True.
261
348
 
349
+ If the packet is decoded, it will return each
350
+ frame individually as a VideoReaderFrame
351
+ instance. If not, the whole packet as a
352
+ VideoReaderPacket instance.
353
+ """
354
+ for packet in self.packet_with_audio_iterator:
355
+ is_video = packet.stream.type == 'video'
262
356
 
357
+ do_decode = (
358
+ (
359
+ is_video and
360
+ do_decode_video
361
+ ) or
362
+ (
363
+ not is_video and
364
+ do_decode_audio
365
+ )
366
+ )
263
367
 
264
- """
265
- Read this below if you can to combine videos
266
- that have not been written yet to the disk
267
- (maybe a composition in moviepy or I don't
268
- know).
368
+ if do_decode:
369
+ for frame in packet.decode():
370
+ # Return each frame decoded
371
+ yield VideoReaderFrame(frame)
372
+ else:
373
+ # Return the packet as it is
374
+ yield VideoReaderPacket(packet)
269
375
 
270
- Usar un pipe (sin escribir archivo completo)
271
- Puedes lanzar un proceso FFmpeg que envíe el vídeo a PyAV por stdin como flujo sin codificar (por ejemplo en rawvideo), así no tienes que escribir el archivo final.
272
- Ejemplo:
273
376
 
274
- PYTHON_CODE:
275
- import subprocess
276
- import av
277
377
 
278
- # FFmpeg produce frames en crudo por stdout
279
- ffmpeg_proc = subprocess.Popen(
280
- [
281
- "ffmpeg",
282
- "-i", "-", # Lee de stdin
283
- "-f", "rawvideo",
284
- "-pix_fmt", "rgba",
285
- "-"
286
- ],
287
- stdin=subprocess.PIPE,
288
- stdout=subprocess.PIPE
289
- )
290
378
 
291
- # Aquí enviarías los datos combinados desde tu programa al ffmpeg_proc.stdin
292
- # y podrías leer con PyAV o directamente procesar arrays de píxeles
379
+ """
380
+ When reading packets directly from the stream
381
+ we can receive packets with size=0, but we need
382
+ to process them and decode (or yield them). It
383
+ is only when we are passing packets to the mux
384
+ when we need to ignore teh ones thar are empty
385
+ (size=0).
293
386
 
294
- Esto es lo más usado para pipeline de vídeo en tiempo real.
387
+ TODO: Do we need to ignore all? By now, ignoring
388
+ not is causing exceptions, and ignoring them is
389
+ making it work perfectly.
295
390
  """
yta_video_opengl/tests.py CHANGED
@@ -3,6 +3,7 @@ Manual tests that are working and are interesting
3
3
  to learn about the code, refactor and build
4
4
  classes.
5
5
  """
6
+ from yta_validation import PythonValidator
6
7
  from yta_video_opengl.reader import VideoReader
7
8
  from yta_video_opengl.writer import VideoWriter
8
9
  from yta_timer import Timer
@@ -78,6 +79,13 @@ def video_modified_stored():
78
79
 
79
80
  # Decode first frame and use as texture
80
81
  first_frame = video.next_frame
82
+ # We need to reset it to being again pointing
83
+ # to the first frame...
84
+ # TODO: Improve this by, maybe, storing the first
85
+ # frame in memory so we can append it later, or
86
+ # using the '.seek(0)' even when it could be not
87
+ # accurate
88
+ video = VideoReader(VIDEO_PATH)
81
89
 
82
90
  # Most of OpenGL textures expect origin in lower
83
91
  # left corner
@@ -100,109 +108,32 @@ def video_modified_stored():
100
108
  )
101
109
 
102
110
  frame_index = 0
103
- # for frame in video.frame_iterator:
104
- # with Timer(is_silent_as_context = True) as timer:
105
- # program['time'].value = T.video_frame_index_to_video_frame_time(frame_index, float(video.fps))
106
-
107
- # # To numpy array and flip to OpenGL coordinates
108
- # image_array = np.flipud(
109
- # frame.to_ndarray(format = 'rgb24')
110
- # )
111
-
112
- # # Create texture
113
- # texture = context.texture((image_array.shape[1], image_array.shape[0]), 3, image_array.tobytes())
114
- # # Add frame to texture
115
- # image = frame.to_image().transpose(Image.FLIP_TOP_BOTTOM).convert('RGB')
116
- # texture.write(image.tobytes())
117
-
118
- # # Render to framebuffer with shader
119
- # fbo.clear(0.0, 0.0, 0.0)
120
- # texture.use()
121
- # vao.render(moderngl.TRIANGLE_STRIP)
122
-
123
- # # Read pixels from framebuffer
124
- # data = fbo.read(components = 3, alignment = 1)
125
-
126
- # # To numpy array and flip from OpenGL coordinates
127
- # image_output = np.flipud(
128
- # np.frombuffer(data, dtype = np.uint8).reshape((video.height, video.width, 3))
129
- # )
130
- # # Turn into a frame
131
- # video_frame = av.VideoFrame.from_ndarray(image_output, format = 'rgb24')
132
-
133
- # # Write
134
- # packet = video_writer.stream.encode(video_frame)
135
- # if packet:
136
- # video_writer.output.mux(packet)
137
-
138
- # print(f'Frame {str(frame_index)}: {timer.time_elapsed_str}s')
139
-
140
- # frame_index += 1
141
-
142
- # # Empty codification buffers
143
- # packet = video_writer.stream.encode(None)
144
- # if packet:
145
- # video_writer.output.mux(packet)
146
-
147
- # # This below is the main workflow with frames
148
- # # and packets when processing a video
149
- # do_process_video: bool = False
150
- # do_process_audio: bool = False
151
- # for frame_or_packet in video.iterate_with_audio():
152
- # if frame_or_packet.is_video_frame:
153
- # if do_process_video:
154
- # # If we are processing it, we need to decode
155
- # frame_decoded = frame_or_packet.decode()
156
- # else:
157
- # # if we are not processing it, just copy
158
- # # video_writer.output.mux(frame_or_packet)
159
- # pass
160
- # elif frame_or_packet.is_audio_frame:
161
- # if do_process_audio:
162
- # # If we are processing it, we need to decode
163
- # frame_decoded = frame_or_packet.decode()
164
- # else:
165
- # # if we are not processing it, just copy
166
- # # video_writer.output.mux(frame_or_packet)
167
- # pass
168
- # elif frame_or_packet.is_video_packet:
169
- # if do_process_video:
170
- # # If we are processing it, we need to decode
171
- # # the packet, that will give us an array of
172
- # # frames
173
- # for frame_decoded in frame_or_packet.decode():
174
- # pass
175
- # else:
176
- # # If we are not processing it, just copy
177
- # # video_writer.output.mux(packet)
178
- # pass
179
- # elif frame_or_packet.is_audio_packet:
180
- # if do_process_audio:
181
- # # If we are processing it, we need to decode
182
- # # the packet, that will give us an array of
183
- # # frames
184
- # for frame_decoded in frame_or_packet.decode():
185
- # pass
186
- # else:
187
- # # If we are not processing it, just copy
188
- # # video_writer.output.mux(packet)
189
- # pass
190
-
191
-
192
- for packet in video.frame_with_audio_iterator:
193
- # The frames don't come in a specific order.
194
- # In this specific case we receive 1 or 2
195
- # video frames per each audio frame, but
196
- # they come always ordered.
197
- if packet.stream.type == 'video':
198
- for frame in packet.decode():
199
- with Timer(is_silent_as_context = True) as timer:
111
+ for frame_or_packet in video.iterate_with_audio(
112
+ do_decode_video = True,
113
+ do_decode_audio = False
114
+ ):
115
+ # This below is because of the parameters we
116
+ # passed to the method
117
+ is_video_frame = PythonValidator.is_instance_of(frame_or_packet, 'VideoReaderFrame')
118
+ is_audio_packet = PythonValidator.is_instance_of(frame_or_packet, 'VideoReaderPacket')
119
+
120
+ # To simplify the process
121
+ if frame_or_packet is not None:
122
+ frame_or_packet = frame_or_packet.data
123
+
124
+ if is_audio_packet:
125
+ video_writer.mux(frame_or_packet)
126
+ elif is_video_frame:
127
+ with Timer(is_silent_as_context = True) as timer:
128
+
129
+ def process_frame(
130
+ frame: 'VideoFrame'
131
+ ):
200
132
  # Add some variables if we need, for the
201
133
  # opengl change we are applying (check the
202
134
  # program code)
203
135
  program['time'].value = T.video_frame_index_to_video_frame_time(frame_index, float(video.fps))
204
136
 
205
-
206
137
  # To numpy RGB inverted for OpenGL
207
138
  img_array = np.flipud(
208
139
  frame.to_ndarray(format = NUMPY_FORMAT)
@@ -226,42 +157,23 @@ def video_modified_stored():
226
157
  )
227
158
 
228
159
  # To VideoFrame and to buffer
229
- out_frame = av.VideoFrame.from_ndarray(processed_data, format = NUMPY_FORMAT)
160
+ frame = av.VideoFrame.from_ndarray(processed_data, format = NUMPY_FORMAT)
230
161
  # TODO: What is this for (?)
231
162
  #out_frame.pict_type = 'NONE'
163
+ return frame
232
164
 
233
- for v_packet in video_writer.video_stream.encode(out_frame):
234
- # Check that the packet received is not the
235
- # one that indicates the end (that must not
236
- # be passed to the mux)
237
- if v_packet.size > 0:
238
- video_writer.output.mux(v_packet)
239
-
240
- frame_index += 1
241
- print(f'Frame {str(frame_index)}: {timer.time_elapsed_str}s')
165
+ video_writer.mux_video_frame(process_frame(frame_or_packet))
242
166
 
243
- elif packet.stream.type == "audio":
244
- print('-- AUDIO --')
245
- # Check that the packet received is not the
246
- # one that indicates the end (that must not
247
- # be passed to the mux)
248
- if packet.size > 0:
249
- # Copy audio as it is
250
- video_writer.output.mux(packet)
167
+ print(f'Frame {str(frame_index)}: {timer.time_elapsed_str}s')
168
+ frame_index += 1
251
169
 
252
- # Empty buffers
253
- # This '.encode()' with no params will tell the
254
- # encoder that you will not send more packets,
255
- # so it can process the remaining ones, that
256
- # are the ones obtained in the 'packet' var.
257
170
  # While this code can be finished, the work in
258
171
  # the muxer could be not finished and have some
259
172
  # packets waiting to be written. Here we tell
260
173
  # the muxer to process all those packets.
261
- for packet in video_writer.video_stream.encode():
262
- video_writer.output.mux(packet)
174
+ video_writer.mux_video_frame(None)
263
175
 
264
176
  # TODO: Maybe move this to the '__del__' (?)
265
177
  video_writer.output.close()
266
178
  video.container.close()
267
- print(f'Saved as "{OUTPUT_PATH}".')
179
+ print(f'Saved as "{OUTPUT_PATH}".')
@@ -1,4 +1,9 @@
1
+ from yta_validation import PythonValidator
2
+ from yta_validation.parameter import ParameterValidator
1
3
  from av.stream import Stream
4
+ from av.packet import Packet
5
+ from av.video.frame import VideoFrame
6
+ from av.audio.frame import AudioFrame
2
7
  from av.video.stream import VideoStream
3
8
  from av.audio.stream import AudioStream
4
9
  from av.container.output import OutputContainer
@@ -101,6 +106,107 @@ class VideoWriter:
101
106
 
102
107
  return self
103
108
 
109
+ def encode_video_frame(
110
+ self,
111
+ frame: Union[VideoFrame, None] = None
112
+ ) -> list[Packet]:
113
+ """
114
+ Get the provided 'frame' but encoded for the
115
+ video stream, or the remaining packets if the
116
+ 'frame' parameter given is None.
117
+
118
+ The `.encode()` method with a `None` parameter
119
+ will tell the encoder that we will not send
120
+ more frames to encode so the remaining ones can
121
+ be processed, emptying the buffers.
122
+ """
123
+ ParameterValidator.validate_instance_of('frame', frame, VideoFrame)
124
+
125
+ return self.video_stream.encode(frame)
126
+
127
+ def encode_audio_frame(
128
+ self,
129
+ frame: Union[AudioFrame, None] = None
130
+ ) -> list[Packet]:
131
+ """
132
+ Get the provided 'frame' but encoded for the
133
+ audio stream, or the remaining packets if the
134
+ 'frame' parameter given is None.
135
+
136
+ The `.encode()` method with a `None` parameter
137
+ will tell the encoder that we will not send
138
+ more frames to encode so the remaining ones can
139
+ be processed, emptying the buffers.
140
+ """
141
+ ParameterValidator.validate_instance_of('frame', frame, AudioFrame)
142
+
143
+ return self.audio_stream.encode(frame)
144
+
145
+ def mux(
146
+ self,
147
+ packet: Packet
148
+ ) -> 'VideoWriter':
149
+ """
150
+ Add the provided video or audio 'packet'
151
+ to the mux.
152
+
153
+ Packets with a size of 0 will be discarded,
154
+ as they are indicators of the end.
155
+ """
156
+ ParameterValidator.validate_mandatory_instance_of('packet', packet, Packet)
157
+
158
+ if packet.size > 0:
159
+ self.output.mux(packet)
160
+
161
+ return self
162
+
163
+ def mux_video_frame(
164
+ self,
165
+ frame: Union[VideoFrame, None] = None
166
+ ) -> 'VideoWriter':
167
+ """
168
+ Encode the provided 'frame' and add the
169
+ obtained packets to the multiplexing (mux)
170
+ process.
171
+
172
+ If `None` provided, it will obtain the
173
+ remaining packets and add those ones to
174
+ the multiplexing (mux) process.
175
+
176
+ Packets with a size of 0 will be discarded,
177
+ as they are indicators of the end.
178
+ """
179
+ ParameterValidator.validate_instance_of('frame', frame, VideoFrame)
180
+
181
+ for packet in self.encode_video_frame(frame):
182
+ self.mux(packet)
183
+
184
+ return self
185
+
186
+ def mux_audio_frame(
187
+ self,
188
+ frame: Union[AudioFrame, None] = None
189
+ ) -> 'VideoWriter':
190
+ """
191
+ Encode the provided 'frame' and add the
192
+ obtained packets to the multiplexing (mux)
193
+ process.
194
+
195
+ If `None` provided, it will obtain the
196
+ remaining packets and add those ones to
197
+ the multiplexing (mux) process.
198
+
199
+ Packets with a size of 0 will be discarded,
200
+ as they are indicators of the end.
201
+ """
202
+ ParameterValidator.validate_instance_of('frame', frame, AudioFrame)
203
+
204
+ for packet in self.encode_audio_frame(frame):
205
+ self.mux(packet)
206
+
207
+ return self
208
+
209
+
104
210
  """
105
211
  # TODO: Check 'https://www.youtube.com/watch?v=OlNWCpFdVMA'
106
212
  # for ffmpeg with mp3 access
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: yta-video-opengl
3
- Version: 0.0.3
3
+ Version: 0.0.5
4
4
  Summary: Youtube Autonomous Video OpenGL Module
5
5
  Author: danialcala94
6
6
  Author-email: danielalcalavalera@gmail.com
@@ -0,0 +1,8 @@
1
+ yta_video_opengl/__init__.py,sha256=ycAx_XYMVDfkuObSvtW6irQ0Wo-fgxEz3fjIRMe8PpY,205
2
+ yta_video_opengl/reader.py,sha256=ROjx5jSi_M2KWcp1Fksi4y-apb1IvItPhpLF32u5DpU,9869
3
+ yta_video_opengl/tests.py,sha256=7a_n-xW2xBVYR1sj-IuGm8c8F6It2LHZ8GqCH_SeY-U,6081
4
+ yta_video_opengl/writer.py,sha256=9i1SSaOxWF-uBTWvpToP8juIqSWhbIh_Nfyi69adWIw,6356
5
+ yta_video_opengl-0.0.5.dist-info/LICENSE,sha256=6kbiFSfobTZ7beWiKnHpN902HgBx-Jzgcme0SvKqhKY,1091
6
+ yta_video_opengl-0.0.5.dist-info/METADATA,sha256=hH7uBbRBkNQ44kIEi1oKB_s3fGLtwcnd0rdZB_M3bVw,670
7
+ yta_video_opengl-0.0.5.dist-info/WHEEL,sha256=XbeZDeTWKc1w7CSIyre5aMDU_-PohRwTQceYnisIYYY,88
8
+ yta_video_opengl-0.0.5.dist-info/RECORD,,
@@ -1,8 +0,0 @@
1
- yta_video_opengl/__init__.py,sha256=ycAx_XYMVDfkuObSvtW6irQ0Wo-fgxEz3fjIRMe8PpY,205
2
- yta_video_opengl/reader.py,sha256=ffq_mliXUjz8N7_ZOoX97P2wTDeMjW2rhGceqItCvtM,7394
3
- yta_video_opengl/tests.py,sha256=MLLpIVtdAA1L7ZpaZE6AH5gUJpiPxmvg-pktUcubOmw,10015
4
- yta_video_opengl/writer.py,sha256=dSlzUvyuQsZvghs8-7QASVvejBtm1ysv4V84quNB9Sw,3033
5
- yta_video_opengl-0.0.3.dist-info/LICENSE,sha256=6kbiFSfobTZ7beWiKnHpN902HgBx-Jzgcme0SvKqhKY,1091
6
- yta_video_opengl-0.0.3.dist-info/METADATA,sha256=Om_IJ8JtdlnUATEkxVPTfO8Fs1eiDqVszPHsEnqZfj4,670
7
- yta_video_opengl-0.0.3.dist-info/WHEEL,sha256=XbeZDeTWKc1w7CSIyre5aMDU_-PohRwTQceYnisIYYY,88
8
- yta_video_opengl-0.0.3.dist-info/RECORD,,