yta-video-opengl 0.0.11__py3-none-any.whl → 0.0.12__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -166,7 +166,7 @@ class VideoFrameCache:
166
166
 
167
167
  return frame
168
168
 
169
- def _get_frame_by_pts(
169
+ def get_frame_from_pts(
170
170
  self,
171
171
  pts: int
172
172
  ) -> Union[VideoFrame, AudioFrame, None]:
@@ -181,6 +181,9 @@ class VideoFrameCache:
181
181
  This method must be called when the frame
182
182
  requested is not stored in the caché.
183
183
  """
184
+ if pts in self.cache:
185
+ return self.cache[pts]
186
+
184
187
  # Look for the most near key frame
185
188
  key_frame_pts = self._get_nearest_keyframe_fps(pts)
186
189
 
@@ -218,7 +221,7 @@ class VideoFrameCache:
218
221
  return (
219
222
  self.cache[pts]
220
223
  if pts in self.cache else
221
- self._get_frame_by_pts(pts)
224
+ self.get_frame_from_pts(pts)
222
225
  )
223
226
 
224
227
  def get_frame_from_t(
@@ -229,7 +232,13 @@ class VideoFrameCache:
229
232
  Get the frame with the given 't' time moment
230
233
  from the cache.
231
234
  """
232
- return self.get_frame(T.video_frame_time_to_video_frame_index(t, self.fps))
235
+ pts = t_to_pts(t, self.time_base)
236
+
237
+ return (
238
+ self.cache[pts]
239
+ if pts in self.cache else
240
+ self.get_frame_from_pts(pts)
241
+ )
233
242
 
234
243
  def get_frames(
235
244
  self,
@@ -240,20 +249,46 @@ class VideoFrameCache:
240
249
  Get all the frames in the range between
241
250
  the provided 'start' and 'end' time in
242
251
  seconds.
252
+
253
+ This method is an iterator that yields
254
+ the frame, its t and its index.
243
255
  """
244
256
  # We use the cache as iterator if all the frames
245
257
  # requested are stored there
246
- pts_list = [
247
- t_to_pts(t, self.time_base)
248
- for t in T.get_frame_indexes(self.stream.duration, self.fps, start, end)
249
- ]
250
-
251
- if all(
252
- pts in self.cache
253
- for pts in pts_list
254
- ):
255
- for pts in pts_list:
256
- yield self.cache[pts]
258
+ # TODO: I think this is not ok... I will never
259
+ # have all the pts form here stored, as they come
260
+ # from 't' that is different...
261
+
262
+ """
263
+ Feel free to move this explanation to other
264
+ place, its about the duration.
265
+
266
+ The stream 'duration' parameter is measured
267
+ on ticks, the amount of ticks that the
268
+ stream lasts. Here below is an example:
269
+
270
+ - Duration raw: 529200
271
+ - Time base: 1/44100
272
+ - Duration (seconds): 12.0
273
+ """
274
+
275
+ # The 'duration' is on pts ticks
276
+ duration = float(self.stream.duration * self.stream.time_base)
277
+ print(f'duration of the whole stream: {str(duration)}s, asking for [{str(start)}, {str(end)})')
278
+ # TODO: I think it would be better to
279
+ # receive and work with pts instead of
280
+ # 't' time moments...
281
+ # pts_list = [
282
+ # t_to_pts(t, self.time_base)
283
+ # for t in T.get_frame_indexes(duration, self.fps, start, end)
284
+ # ]
285
+
286
+ # if all(
287
+ # pts in self.cache
288
+ # for pts in pts_list
289
+ # ):
290
+ # for pts in pts_list:
291
+ # yield self.cache[pts]
257
292
 
258
293
  # If not all, we ignore the cache because we
259
294
  # need to decode and they are all consecutive
@@ -276,16 +311,82 @@ class VideoFrameCache:
276
311
  # We store all the frames in cache
277
312
  self._store_frame_in_cache(frame)
278
313
 
279
- if frame.pts < start:
314
+ print(frame)
315
+ frame_end_pts = frame.pts + int(frame.samples * (1 / self.stream.sample_rate) / self.time_base)
316
+ #frame_end_pts = frame.pts + int(frame.samples)
317
+ #frame_end_pts = frame.pts + int(frame.samples / (self.stream.sample_rate * self.time_base))
318
+ print(f' Frame from [{str(frame.pts)}, {str(frame_end_pts)}] and looking for [{str(start)}, {str(end)}]')
319
+
320
+ # For the next comments imagine we are looking
321
+ # for the [1.0, 2.0) audio time range
322
+ # Previous frame and nothing is inside
323
+ if frame_end_pts <= start:
324
+ # From 0.25 to 1.0
280
325
  continue
281
326
 
327
+ # We finished, nothing is inside and its after
282
328
  if (
283
329
  end is not None and
284
- frame.pts > end
330
+ frame.pts >= end
285
331
  ):
332
+ # From 2.0 to 2.75
286
333
  return
334
+
335
+ # We need: from 1 to 2
336
+ # Audio is:
337
+ # - from 0 to 0.75 (Not included, omit)
338
+ # - from 0.5 to 1.5 (Included, take 1.0 to 1.5)
339
+ # - from 0.5 to 2.5 (Included, take 1.0 to 2.0)
340
+ # - from 1.25 to 1.5 (Included, take 1.25 to 1.5)
341
+ # - from 1.25 to 2.5 (Included, take 1.25 to 2.0)
342
+ # - from 2.5 to 3.5 (Not included, omit)
343
+
344
+ # Here below, at least a part is inside
345
+ if (
346
+ frame.pts < start and
347
+ frame_end_pts > start
348
+ ):
349
+ # A part at the end is included
350
+ end_time = (
351
+ # From 0.5 to 1.5 0> take 1.0 to 1.5
352
+ frame_end_pts
353
+ if frame_end_pts <= end else
354
+ # From 0.5 to 2.5 => take 1.0 to 2.0
355
+ end
356
+ )
357
+ print('A part at the end is included.')
358
+ # TODO: I'm using too much 'pts_to_t'
359
+ frame = trim_audio_frame_pts(
360
+ frame = frame,
361
+ start_pts = start,
362
+ end_pts = end_time,
363
+ time_base = self.time_base
364
+ )
365
+ elif (
366
+ frame.pts >= start and
367
+ frame.pts < end
368
+ ):
369
+ end_time = (
370
+ # From 1.25 to 1.5 => take 1.25 to 1.5
371
+ frame_end_pts
372
+ if frame_end_pts <= end else
373
+ # From 1.25 to 2.5 => take 1.25 to 2.0
374
+ end
375
+ )
376
+ # A part at the begining is included
377
+ print('A part at the begining is included.')
378
+ # TODO: I'm using too much 'pts_to_t'
379
+ frame = trim_audio_frame_pts(
380
+ frame = frame,
381
+ start_pts = frame.pts,
382
+ end_pts = end_time,
383
+ time_base = self.time_base
384
+ )
385
+
386
+ # If the whole frame is in, past as it is
287
387
 
288
388
  # TODO: Maybe send a @dataclass instead (?)
389
+ # TODO: Do I really need these 't' and 'index' (?)
289
390
  yield (
290
391
  frame,
291
392
  pts_to_t(frame.pts, self.time_base),
@@ -300,4 +401,107 @@ class VideoFrameCache:
300
401
  """
301
402
  self.cache.clear()
302
403
 
303
- return self
404
+ return self
405
+
406
+
407
+
408
+ import av
409
+ import numpy as np
410
+
411
+ import av
412
+
413
+
414
+
415
+ def trim_audio_frame_pts(
416
+ frame: av.AudioFrame,
417
+ start_pts: int,
418
+ end_pts: int,
419
+ time_base
420
+ ) -> av.AudioFrame:
421
+ """
422
+ Recorta un AudioFrame para quedarse solo con la parte entre [start_pts, end_pts] en ticks (PTS).
423
+ """
424
+ samples = frame.to_ndarray() # (channels, n_samples)
425
+ n_channels, n_samples = samples.shape
426
+ sr = frame.sample_rate
427
+
428
+ #frame_end_pts = frame.pts + int((n_samples / sr) / time_base)
429
+ # TODO: This could be wrong
430
+ frame_end_pts = frame.pts + int(frame.samples)
431
+
432
+ # solapamiento en PTS
433
+ cut_start_pts = max(frame.pts, start_pts)
434
+ cut_end_pts = min(frame_end_pts, end_pts)
435
+
436
+ if cut_start_pts >= cut_end_pts:
437
+ raise Exception('Oops...')
438
+ return None # no hay solapamiento
439
+
440
+ # convertir a índices de samples (en ticks → segundos → samples)
441
+ cut_start_time = (cut_start_pts - frame.pts) * time_base
442
+ cut_end_time = (cut_end_pts - frame.pts) * time_base
443
+
444
+ start_idx = int(cut_start_time * sr)
445
+ end_idx = int(cut_end_time * sr)
446
+
447
+ print(
448
+ f"cutting [{frame.pts}, {frame_end_pts}] "
449
+ f"to [{cut_start_pts}, {cut_end_pts}] "
450
+ f"({start_idx}:{end_idx} / {frame.samples})"
451
+ #f"({start_idx}:{end_idx} / {n_samples})"
452
+ )
453
+
454
+ cut_samples = samples[:, start_idx:end_idx]
455
+
456
+ # crear nuevo AudioFrame
457
+ new_frame = av.AudioFrame.from_ndarray(cut_samples, format=frame.format, layout=frame.layout)
458
+ new_frame.sample_rate = sr
459
+
460
+ # ajustar PTS → corresponde al inicio real del recorte
461
+ new_frame.pts = cut_start_pts
462
+ new_frame.time_base = time_base
463
+
464
+ return new_frame
465
+
466
+
467
+
468
+ def trim_audio_frame_t(
469
+ frame: av.AudioFrame,
470
+ start_time: float,
471
+ end_time: float,
472
+ time_base
473
+ ) -> av.AudioFrame:
474
+ """
475
+ Recorta un AudioFrame para quedarse solo con la parte entre [start_time, end_time] en segundos.
476
+ """
477
+ samples = frame.to_ndarray() # (channels, n_samples)
478
+ n_channels, n_samples = samples.shape
479
+ sr = frame.sample_rate
480
+
481
+ frame_start = float(frame.pts * time_base)
482
+ frame_end = frame_start + (n_samples / sr)
483
+
484
+ # calcular solapamiento en segundos
485
+ cut_start = max(frame_start, start_time)
486
+ cut_end = min(frame_end, end_time)
487
+
488
+ if cut_start >= cut_end:
489
+ return None # no hay solapamiento
490
+
491
+ # convertir a índices de samples
492
+ start_idx = int((cut_start - frame_start) * sr)
493
+ end_idx = int((cut_end - frame_start) * sr)
494
+
495
+ print(f'cutting [{str(frame_start)}, {str(frame_end)}] to [{str(float(start_time))}, {str(float(end_time))}] from {str(start_idx)} to {str(end_idx)} of {str(int((frame_end - frame_start) * sr))}')
496
+ cut_samples = samples[:, start_idx:end_idx]
497
+
498
+ # crear nuevo AudioFrame
499
+ new_frame = av.AudioFrame.from_ndarray(cut_samples, format = frame.format, layout = frame.layout)
500
+ new_frame.sample_rate = sr
501
+
502
+ # ajustar PTS → corresponde al inicio real del recorte
503
+ new_pts = int(cut_start / time_base)
504
+ new_frame.pts = new_pts
505
+ new_frame.time_base = time_base
506
+
507
+ return new_frame
yta_video_opengl/utils.py CHANGED
@@ -4,6 +4,7 @@ from av.video.stream import VideoStream
4
4
  from av.audio.stream import AudioStream
5
5
  from av.video.frame import VideoFrame
6
6
  from typing import Union
7
+ from fractions import Fraction
7
8
 
8
9
  import av
9
10
  import numpy as np
@@ -351,4 +352,142 @@ def pts_to_t(
351
352
  Transform a 'pts' packet timestamp to a 't'
352
353
  time moment.
353
354
  """
354
- return pts * stream_time_base
355
+ return pts * stream_time_base
356
+
357
+
358
+ # TODO: Move this to another utils
359
+ def get_silent_audio_frame(
360
+ sample_rate,
361
+ layout="stereo",
362
+ nb_samples=1024,
363
+ format="s16"
364
+ ):
365
+ # Número de canales
366
+ channels = len(av.AudioLayout(layout).channels)
367
+
368
+ # Creamos un array de ceros (silencio)
369
+ # dtype depende del formato
370
+ # if format in ('s16', 's16p'):
371
+ # dtype = np.int16
372
+ # elif format in ('flt', 'fltp'):
373
+ # dtype = np.float32
374
+ # else:
375
+ # raise ValueError(f"Formato no soportado: {format}")
376
+
377
+ if format == "s16":
378
+ dtype = np.int16
379
+ elif format in ('flt', 'fltp'):
380
+ dtype = np.float32
381
+ else:
382
+ raise ValueError(f"Formato no soportado: {format}")
383
+
384
+ # Para formatos packed → (1, samples * channels)
385
+
386
+ if layout == 'stereo':
387
+ silent_array = np.zeros((2, nb_samples), dtype = dtype)
388
+ else:
389
+ silent_array = np.zeros((1, nb_samples), dtype = dtype)
390
+
391
+ # # Si es planar: (channels, samples) | Si es packed: (samples, channels)
392
+ # if format.endswith("p"): # planar
393
+ # silent_array = np.zeros((channels, nb_samples), dtype=dtype)
394
+ # else: # packed
395
+ # silent_array = np.zeros((nb_samples, channels), dtype=dtype)
396
+
397
+ # Crear frame de audio
398
+ frame = av.AudioFrame.from_ndarray(silent_array, format=format, layout=layout)
399
+ frame.sample_rate = sample_rate
400
+
401
+ return frame
402
+
403
+ def get_black_background_video_frame(
404
+ size: tuple[int, int] = (1920, 1080),
405
+ format: str = 'rgb24'
406
+ ):
407
+ return av.VideoFrame.from_ndarray(
408
+ # TODO: What if we want alpha (?)
409
+ array = np.zeros((size[0], size[1], 3), dtype = np.uint8),
410
+ format = format
411
+ )
412
+
413
+ def get_audio_frame_pts_range(
414
+ frame: av.AudioFrame,
415
+ do_in_seconds: bool = False
416
+ ):
417
+ """
418
+ Get the [start_pts, end_pts) range of the
419
+ pyav AudioFrame, or in seconds if the
420
+ 'do_in_seconds' parameter is True.
421
+ """
422
+ if frame.pts is None:
423
+ raise Exception('No "pts" found.')
424
+
425
+ # First and last sample. Remember that
426
+ # the last one is not included
427
+ start_pts = frame.pts
428
+ end_pts = frame.pts + frame.samples
429
+
430
+ # Time base for the seconds conversion
431
+ time_base = (
432
+ frame.time_base
433
+ if frame.time_base else
434
+ Fraction(1, frame.sample_rate)
435
+ )
436
+
437
+ start_time = (
438
+ float(start_pts * time_base)
439
+ if do_in_seconds else
440
+ start_time
441
+ )
442
+ end_time = (
443
+ float(end_pts * time_base)
444
+ if do_in_seconds else
445
+ end_time
446
+ )
447
+
448
+ return (
449
+ start_time,
450
+ end_time
451
+ )
452
+
453
+ def audio_frames_and_remainder_per_video_frame(
454
+ fps: float,
455
+ sample_rate: int,
456
+ nb_samples: int
457
+ ):
458
+ """
459
+ Calcula cuántos audio frames completos y cuántas muestras sobrantes
460
+ corresponden a la duración de 1 frame de vídeo.
461
+
462
+ Args:
463
+ fps (float): Frames por segundo del vídeo
464
+ sample_rate (int): Frecuencia de muestreo del audio (Hz)
465
+ nb_samples (int): Número de samples por AudioFrame (PyAV)
466
+
467
+ Returns:
468
+ (int, int): (frames_completos, muestras_restantes)
469
+ """
470
+ # Duración de un frame de vídeo en segundos
471
+ video_frame_duration = 1.0 / fps
472
+
473
+ # Total de samples de audio necesarios
474
+ samples_needed = round(sample_rate * video_frame_duration)
475
+
476
+ # Cuántos audio frames completos de nb_samples
477
+ full_frames = samples_needed // nb_samples
478
+
479
+ # Restante que no completa un audio frame
480
+ remainder = samples_needed % nb_samples
481
+
482
+ return full_frames, remainder
483
+
484
+ # # Usage below:
485
+ # fps = 30
486
+ # sample_rate = 44100
487
+ # nb_samples = 1024
488
+
489
+ # full, rem = audio_frames_and_remainder_per_video_frame(fps, sample_rate, nb_samples)
490
+ # # This will return (1, 446)
491
+
492
+
493
+
@@ -55,7 +55,7 @@ class VideoWriter:
55
55
  self.video_stream: VideoStream = self.output.add_stream(
56
56
  # TODO: Maybe 'libx264' as default 'codec_name' (?)
57
57
  codec_name = codec_name,
58
- rate = fps,
58
+ rate = int(fps),
59
59
  options = options
60
60
  )
61
61
 
@@ -91,17 +91,27 @@ class VideoWriter:
91
91
 
92
92
  def set_audio_stream(
93
93
  self,
94
- codec_name: Union[str, None]
94
+ codec_name: Union[str, None],
95
+ fps: float = 44_100.0
95
96
  # TODO: Add more if needed
96
97
  ) -> 'VideoWriter':
97
98
  """
98
99
  Set the audio stream, that will overwrite any other
99
100
  previous audio stream set.
100
101
  """
102
+ # TODO: Check what else we can set
101
103
  self.audio_stream: AudioStream = self.output.add_stream(
102
- codec_name = codec_name
104
+ codec_name = codec_name,
105
+ rate = int(fps)
103
106
  )
104
107
 
108
+ # audio_stream = output.add_stream("aac", rate=48000) # codec AAC, 48kHz
109
+ # # Configurar stream
110
+ # audio_stream.channels = 2 # número de canales
111
+ # audio_stream.layout = "stereo" # layout
112
+ # audio_stream.sample_rate = 48000 # sample rate
113
+ # audio_stream.format = "s16" # formato de las muestras (PCM signed 16-bit)
114
+
105
115
  # TODO: Add more if needed
106
116
 
107
117
  return self
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: yta-video-opengl
3
- Version: 0.0.11
3
+ Version: 0.0.12
4
4
  Summary: Youtube Autonomous Video OpenGL Module
5
5
  Author: danialcala94
6
6
  Author-email: danielalcalavalera@gmail.com
@@ -0,0 +1,20 @@
1
+ yta_video_opengl/__init__.py,sha256=ycAx_XYMVDfkuObSvtW6irQ0Wo-fgxEz3fjIRMe8PpY,205
2
+ yta_video_opengl/classes.py,sha256=t5-Tfc7ecvHl8JlVBp_FVzZT6ole6Ly5-FeBBH7wcxo,37742
3
+ yta_video_opengl/complete/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
4
+ yta_video_opengl/complete/timeline.py,sha256=mf5QH7pFHvQCfDFpZd4HhyHSinvU-RDtvRhXaKOTNuY,9096
5
+ yta_video_opengl/complete/track.py,sha256=vDGjkMxYY9y_KHFZb-kvN0K1Yt3_foiR_eTB3fIJxVY,12923
6
+ yta_video_opengl/complete/video_on_track.py,sha256=jbRXX6KMwtvshcW1ce2FRAVKlVRHYxyyY-IOzKSO8-I,4364
7
+ yta_video_opengl/nodes/__init__.py,sha256=TZ-ZO05PZ0_ABq675E22_PngLWOe-_w5s1cLlV3NbWM,3469
8
+ yta_video_opengl/nodes/audio/__init__.py,sha256=4nKkC70k1UgLcCSPqFWm3cKdaJM0KUmQTwGWv1xFarQ,2926
9
+ yta_video_opengl/nodes/video/__init__.py,sha256=gSoaoEmjdQmyRwH18mf5z3NAhap3S0RgbeBbfBXi4jc,132
10
+ yta_video_opengl/nodes/video/opengl.py,sha256=K2pyCJEd9z4gnZqJetKyGPbtHuBzFsx74ZYyzhSqYPo,8510
11
+ yta_video_opengl/reader/__init__.py,sha256=da4Jqtqi7xjbjgfzwJpXIMkILz0SKLedt8KDFqmy0Is,20848
12
+ yta_video_opengl/reader/cache.py,sha256=KOFU3BtymZeel3FIvgxrWm9siUlakfNZdJzQUbFxavg,16657
13
+ yta_video_opengl/tests.py,sha256=p2Pq4o2H0DMZkV7HNNNAlebSjrDMHKTKk0d_weiiPHQ,26221
14
+ yta_video_opengl/utils.py,sha256=znlkvL5xjQeeN37cqFHNILBgop4W1PQTfFFudyt8-60,14614
15
+ yta_video_opengl/video.py,sha256=3n7jgZab7PUSOpODoaH4iNg0sy7NMRo_OaJ4Zj8u0NM,5855
16
+ yta_video_opengl/writer.py,sha256=wcDVL6Av-16kgx1X_LCAgKboa1eGnKvXaKuGPOsky-s,8880
17
+ yta_video_opengl-0.0.12.dist-info/LICENSE,sha256=6kbiFSfobTZ7beWiKnHpN902HgBx-Jzgcme0SvKqhKY,1091
18
+ yta_video_opengl-0.0.12.dist-info/METADATA,sha256=Q-stok3ZF1NXMjfNxzSA8GBjPs6-yb4BnrPg1cxjTds,671
19
+ yta_video_opengl-0.0.12.dist-info/WHEEL,sha256=XbeZDeTWKc1w7CSIyre5aMDU_-PohRwTQceYnisIYYY,88
20
+ yta_video_opengl-0.0.12.dist-info/RECORD,,
@@ -1,20 +0,0 @@
1
- yta_video_opengl/__init__.py,sha256=ycAx_XYMVDfkuObSvtW6irQ0Wo-fgxEz3fjIRMe8PpY,205
2
- yta_video_opengl/classes.py,sha256=t5-Tfc7ecvHl8JlVBp_FVzZT6ole6Ly5-FeBBH7wcxo,37742
3
- yta_video_opengl/complete/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
4
- yta_video_opengl/complete/timeline.py,sha256=o6nAtpkdr6RzYlIEB9kLfNAZiu9eRXyA6QE3TXuNL_I,5465
5
- yta_video_opengl/complete/track.py,sha256=gPushEuLYIoMBjvS3wTDbOKSV8VEtVTqrEm666ABFUc,3937
6
- yta_video_opengl/complete/video_on_track.py,sha256=sTSd8gqNwK1wT1uzJEGkJqTbu5qM3wWqk361K7OAbl0,2851
7
- yta_video_opengl/nodes/__init__.py,sha256=TZ-ZO05PZ0_ABq675E22_PngLWOe-_w5s1cLlV3NbWM,3469
8
- yta_video_opengl/nodes/audio/__init__.py,sha256=4nKkC70k1UgLcCSPqFWm3cKdaJM0KUmQTwGWv1xFarQ,2926
9
- yta_video_opengl/nodes/video/__init__.py,sha256=gSoaoEmjdQmyRwH18mf5z3NAhap3S0RgbeBbfBXi4jc,132
10
- yta_video_opengl/nodes/video/opengl.py,sha256=K2pyCJEd9z4gnZqJetKyGPbtHuBzFsx74ZYyzhSqYPo,8510
11
- yta_video_opengl/reader/__init__.py,sha256=zqVMMUjWHCsOaWFrnkYdnbO4YEAx3zLIBTuMnzQJRug,17180
12
- yta_video_opengl/reader/cache.py,sha256=Gtwl2kRLSJn3JJOVI-45Wc2vMPYj5-_g3AR5wPbw8OY,9152
13
- yta_video_opengl/tests.py,sha256=p2Pq4o2H0DMZkV7HNNNAlebSjrDMHKTKk0d_weiiPHQ,26221
14
- yta_video_opengl/utils.py,sha256=_89-IrTDFVbY86qLlJzw42MxYAsNAnux_DOO89mul64,10710
15
- yta_video_opengl/video.py,sha256=3n7jgZab7PUSOpODoaH4iNg0sy7NMRo_OaJ4Zj8u0NM,5855
16
- yta_video_opengl/writer.py,sha256=rzOfxPtlClLRIVLFyIOoBVcEgse3ISmpKRWHg8OwFHQ,8355
17
- yta_video_opengl-0.0.11.dist-info/LICENSE,sha256=6kbiFSfobTZ7beWiKnHpN902HgBx-Jzgcme0SvKqhKY,1091
18
- yta_video_opengl-0.0.11.dist-info/METADATA,sha256=8JNEZ05Elie6lMQREjRjGPQQ-kLW-GCW6uZpePTiLgY,671
19
- yta_video_opengl-0.0.11.dist-info/WHEEL,sha256=XbeZDeTWKc1w7CSIyre5aMDU_-PohRwTQceYnisIYYY,88
20
- yta_video_opengl-0.0.11.dist-info/RECORD,,