yta-video-opengl 0.0.12__py3-none-any.whl → 0.0.13__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
yta_video_opengl/utils.py CHANGED
@@ -1,10 +1,11 @@
1
+ from yta_video_opengl.t import fps_to_time_base
1
2
  from yta_validation import PythonValidator
2
3
  from av.container import InputContainer
3
4
  from av.video.stream import VideoStream
4
5
  from av.audio.stream import AudioStream
5
6
  from av.video.frame import VideoFrame
7
+ from quicktions import Fraction
6
8
  from typing import Union
7
- from fractions import Fraction
8
9
 
9
10
  import av
10
11
  import numpy as np
@@ -311,9 +312,13 @@ def iterate_stream_frames_demuxing(
311
312
  pts_to_index(frame.pts, time_base, average_rate)
312
313
  )
313
314
 
315
+ # TODO: These methods below have to be
316
+ # removed because we created the new T
317
+ # class that is working with Fractions
318
+ # to be precise
314
319
  def t_to_pts(
315
- t: float,
316
- stream_time_base: 'Fraction'
320
+ t: Union[int, float, Fraction],
321
+ stream_time_base: Fraction
317
322
  ) -> int:
318
323
  """
319
324
  Transform a 't' time moment (in seconds) to
@@ -324,8 +329,8 @@ def t_to_pts(
324
329
 
325
330
  def pts_to_index(
326
331
  pts: int,
327
- stream_time_base: 'Fraction',
328
- fps: float
332
+ stream_time_base: Fraction,
333
+ fps: Union[float, Fraction]
329
334
  ) -> int:
330
335
  """
331
336
  Transform a 'pts' packet timestamp to a
@@ -335,8 +340,8 @@ def pts_to_index(
335
340
 
336
341
  def index_to_pts(
337
342
  index: int,
338
- stream_time_base: 'Fraction',
339
- fps: float
343
+ stream_time_base: Fraction,
344
+ fps: Union[float, Fraction]
340
345
  ) -> int:
341
346
  """
342
347
  Transform a frame index into a 'pts' packet
@@ -346,7 +351,7 @@ def index_to_pts(
346
351
 
347
352
  def pts_to_t(
348
353
  pts: int,
349
- stream_time_base: 'Fraction'
354
+ stream_time_base: Fraction
350
355
  ) -> float:
351
356
  """
352
357
  Transform a 'pts' packet timestamp to a 't'
@@ -357,59 +362,82 @@ def pts_to_t(
357
362
 
358
363
  # TODO: Move this to another utils
359
364
  def get_silent_audio_frame(
360
- sample_rate,
361
- layout="stereo",
362
- nb_samples=1024,
363
- format="s16"
365
+ sample_rate: int,
366
+ layout = 'stereo',
367
+ number_of_samples: int = 1024,
368
+ format = 's16'
364
369
  ):
365
- # Número de canales
366
- channels = len(av.AudioLayout(layout).channels)
367
-
368
- # Creamos un array de ceros (silencio)
369
- # dtype depende del formato
370
- # if format in ('s16', 's16p'):
371
- # dtype = np.int16
372
- # elif format in ('flt', 'fltp'):
373
- # dtype = np.float32
374
- # else:
375
- # raise ValueError(f"Formato no soportado: {format}")
376
-
377
- if format == "s16":
378
- dtype = np.int16
379
- elif format in ('flt', 'fltp'):
380
- dtype = np.float32
381
- else:
382
- raise ValueError(f"Formato no soportado: {format}")
383
-
384
- # Para formatos packed (1, samples * channels)
385
-
386
- if layout == 'stereo':
387
- silent_array = np.zeros((2, nb_samples), dtype = dtype)
388
- else:
389
- silent_array = np.zeros((1, nb_samples), dtype = dtype)
390
-
391
- # # Si es planar: (channels, samples) | Si es packed: (samples, channels)
392
- # if format.endswith("p"): # planar
393
- # silent_array = np.zeros((channels, nb_samples), dtype=dtype)
394
- # else: # packed
395
- # silent_array = np.zeros((nb_samples, channels), dtype=dtype)
396
-
397
- # Crear frame de audio
398
- frame = av.AudioFrame.from_ndarray(silent_array, format=format, layout=layout)
370
+ # TODO: This could be a utils or something to
371
+ # directly transform format into dtype
372
+ dtype = {
373
+ 's16': np.int16,
374
+ 'flt': np.float32,
375
+ 'fltp': np.float32
376
+ }.get(format, None)
377
+
378
+ if dtype is None:
379
+ raise Exception(f'The format "{format}" is not accepted.')
380
+
381
+ number_of_channels = len(av.AudioLayout(layout).channels)
382
+ # TODO: I think the option above is better
383
+ # number_of_channels = (
384
+ # 2
385
+ # if layout == 'stereo' else
386
+ # 1
387
+ # )
388
+
389
+ # For packed (or planar) formats we apply:
390
+ # (1, samples * channels). This is the same
391
+ # amount of data but planar, in 1D only
392
+ # TODO: This wasn't in the previous version
393
+ # and it was working, we were sending the
394
+ # same 'number_of_samples' even when 'fltp'
395
+ # that includes the 'p'
396
+ # TODO: This is making the audio last 2x
397
+ # if 'p' in format:
398
+ # number_of_samples *= number_of_channels
399
+
400
+ silent_array = np.zeros((number_of_channels, number_of_samples), dtype = dtype)
401
+ frame = av.AudioFrame.from_ndarray(silent_array, format = format, layout = layout)
399
402
  frame.sample_rate = sample_rate
400
403
 
401
404
  return frame
402
405
 
403
406
  def get_black_background_video_frame(
404
407
  size: tuple[int, int] = (1920, 1080),
405
- format: str = 'rgb24'
408
+ format: str = 'rgb24',
409
+ pts: Union[int, None] = None,
410
+ time_base: Union[Fraction, None] = None
406
411
  ):
407
- return av.VideoFrame.from_ndarray(
412
+ """
413
+ Get a pyav VideoFrame that is a completely black
414
+ frame. If the 'pts' and/or 'time_base' parameters
415
+ are provided, they will be set to the frame that
416
+ is returned with them. If not, remember to set
417
+ later because they are needed to be sent to the
418
+ pyav muxer.
419
+ """
420
+ frame = av.VideoFrame.from_ndarray(
408
421
  # TODO: What if we want alpha (?)
409
- array = np.zeros((size[0], size[1], 3), dtype = np.uint8),
422
+ # Size must be inverted
423
+ array = np.zeros((size[1], size[0], 3), dtype = np.uint8),
410
424
  format = format
411
425
  )
412
426
 
427
+ frame.pts = (
428
+ pts
429
+ if pts is not None else
430
+ frame.pts
431
+ )
432
+
433
+ frame.time_base = (
434
+ time_base
435
+ if time_base is not None else
436
+ frame.time_base
437
+ )
438
+
439
+ return frame
440
+
413
441
  def get_audio_frame_pts_range(
414
442
  frame: av.AudioFrame,
415
443
  do_in_seconds: bool = False
@@ -451,43 +479,37 @@ def get_audio_frame_pts_range(
451
479
  )
452
480
 
453
481
  def audio_frames_and_remainder_per_video_frame(
454
- fps: float,
455
- sample_rate: int,
456
- nb_samples: int
457
- ):
482
+ # TODO: Maybe force 'fps' as int (?)
483
+ video_fps: Union[float, Fraction],
484
+ sample_rate: int, # audio_fps
485
+ number_of_samples_per_audio_frame: int
486
+ ) -> tuple[int, int]:
458
487
  """
459
- Calcula cuántos audio frames completos y cuántas muestras sobrantes
460
- corresponden a la duración de 1 frame de vídeo.
461
-
462
- Args:
463
- fps (float): Frames por segundo del vídeo
464
- sample_rate (int): Frecuencia de muestreo del audio (Hz)
465
- nb_samples (int): Número de samples por AudioFrame (PyAV)
466
-
467
- Returns:
468
- (int, int): (frames_completos, muestras_restantes)
488
+ Get how many full silent audio frames we
489
+ need and the remainder for the last one
490
+ (that could be not complete), according
491
+ to the parameters provided.
492
+
493
+ This method returns a tuple containing
494
+ the number of full silent audio frames
495
+ we need and the number of samples we need
496
+ in the last non-full audio frame.
469
497
  """
470
- # Duración de un frame de vídeo en segundos
471
- video_frame_duration = 1.0 / fps
472
-
473
- # Total de samples de audio necesarios
474
- samples_needed = round(sample_rate * video_frame_duration)
475
-
476
- # Cuántos audio frames completos de nb_samples
477
- full_frames = samples_needed // nb_samples
498
+ # Video frame duration (in seconds)
499
+ time_base = fps_to_time_base(video_fps)
500
+ sample_rate = Fraction(int(sample_rate), 1)
501
+
502
+ # Example:
503
+ # 44_100 / 60 = 735 -> This means that we
504
+ # will have 735 samples of sound per each
505
+ # video frame
506
+ # The amount of samples per frame is actually
507
+ # the amount of samples we need, because we
508
+ # are generating it...
509
+ samples_per_frame = sample_rate * time_base
510
+ # The 'nb_samples' is the amount of samples
511
+ # we are including on each audio frame
512
+ full_audio_frames_needed = samples_per_frame // number_of_samples_per_audio_frame
513
+ remainder = samples_per_frame % number_of_samples_per_audio_frame
478
514
 
479
- # Restante que no completa un audio frame
480
- remainder = samples_needed % nb_samples
481
-
482
- return full_frames, remainder
483
-
484
- # # Usage below:
485
- # fps = 30
486
- # sample_rate = 44100
487
- # nb_samples = 1024
488
-
489
- # full, rem = audio_frames_and_remainder_per_video_frame(fps, sample_rate, nb_samples)
490
- # # This will return (1, 446)
491
-
492
-
493
-
515
+ return int(full_audio_frames_needed), int(remainder)
yta_video_opengl/video.py CHANGED
@@ -1,7 +1,8 @@
1
1
  from yta_video_opengl.reader import VideoReader
2
2
  from yta_video_opengl.writer import VideoWriter
3
- from yta_video_opengl.utils import iterate_stream_frames_demuxing
3
+ from yta_video_opengl.t import T
4
4
  from yta_validation import PythonValidator
5
+ from quicktions import Fraction
5
6
  from typing import Union
6
7
 
7
8
 
@@ -26,7 +27,8 @@ class Video:
26
27
  This timestamp is used to read the video
27
28
  file source.
28
29
  """
29
- return int(self.start / self.reader.time_base)
30
+ # TODO: What if 'time_base' is None (?)
31
+ return T(self.start, self.reader.time_base).truncated_pts
30
32
 
31
33
  @property
32
34
  def end_pts(
@@ -40,7 +42,8 @@ class Video:
40
42
  file source.
41
43
  """
42
44
  return (
43
- int(self.end / self.reader.time_base)
45
+ # TODO: What if 'time_base' is None (?)
46
+ T(self.end, self.reader.time_base).truncated_pts
44
47
  # TODO: What do we do if no duration (?)
45
48
  if self.duration is not None else
46
49
  None
@@ -54,7 +57,8 @@ class Video:
54
57
  The start packet time stamp (pts), needed
55
58
  to optimize the packet iteration process.
56
59
  """
57
- return int(self.start / self.reader.audio_time_base)
60
+ # TODO: What if 'audio_time_base' is None (?)
61
+ return T(self.start, self.reader.audio_time_base).truncated_pts
58
62
 
59
63
  @property
60
64
  def audio_end_pts(
@@ -65,7 +69,8 @@ class Video:
65
69
  optimize the packet iteration process.
66
70
  """
67
71
  return (
68
- int(self.end / self.reader.audio_time_base)
72
+ # TODO: What if 'audio_time_base' is None (?)
73
+ T(self.end, self.reader.audio_time_base).truncated_pts
69
74
  # TODO: What do we do if no duration (?)
70
75
  if self.duration is not None else
71
76
  None
@@ -74,7 +79,7 @@ class Video:
74
79
  @property
75
80
  def duration(
76
81
  self
77
- ):
82
+ ) -> Fraction:
78
83
  """
79
84
  The duration of the video.
80
85
  """
@@ -83,7 +88,7 @@ class Video:
83
88
  @property
84
89
  def number_of_frames(
85
90
  self
86
- ):
91
+ ) -> Union[int, None]:
87
92
  """
88
93
  The number of frames of the video.
89
94
  """
@@ -117,8 +122,8 @@ class Video:
117
122
  def __init__(
118
123
  self,
119
124
  filename: str,
120
- start: float = 0.0,
121
- end: Union[float, None] = None
125
+ start: Union[int, float, Fraction] = 0.0,
126
+ end: Union[int, float, Fraction, None] = None
122
127
  ):
123
128
  self.filename: str = filename
124
129
  """
@@ -130,12 +135,12 @@ class Video:
130
135
  """
131
136
  The pyav video reader.
132
137
  """
133
- self.start: float = start
138
+ self.start: Fraction = Fraction(start)
134
139
  """
135
140
  The time moment 't' in which the video
136
141
  should start.
137
142
  """
138
- self.end: Union[float, None] = (
143
+ self.end: Union[Fraction, None] = Fraction(
139
144
  # TODO: Is this 'end' ok (?)
140
145
  self.reader.duration
141
146
  if end is None else
@@ -145,12 +150,80 @@ class Video:
145
150
  The time moment 't' in which the video
146
151
  should end.
147
152
  """
153
+
154
+ # TODO: We need to implement the 'get_frame'
155
+ # methods because this Video can be subclipped
156
+ # and have a 'start' and' end' that are
157
+ # different from [0, end)
158
+ def _get_real_t(
159
+ self,
160
+ t: Union[int, float, Fraction]
161
+ ) -> Fraction:
162
+ """
163
+ Get the real 't' time moment based on the
164
+ video 'start' and 'end'. If they were
165
+ asking for the t=0.5s but our video was
166
+ subclipped to [1.0, 2.0), the 0.5s must be
167
+ actually the 1.5s of the video because of
168
+ the subclipped time range.
169
+ """
170
+ t = self.start + t
148
171
 
172
+ if t >= self.end:
173
+ raise Exception(f'The "t" ({str(t)}) provided is out of range. This video lasts from [{str(self.start)}, {str(self.end)}).')
174
+
175
+ return t
176
+
177
+ def get_frame_from_t(
178
+ self,
179
+ t: Union[int, float, Fraction]
180
+ ) -> 'VideoFrame':
181
+ """
182
+ Get the video frame with the given 't' time
183
+ moment, using the video cache system.
184
+ """
185
+ return self.reader.video_cache.get_frame_from_t(self._get_real_t(t))
186
+
187
+ def get_audio_frame_from_t(
188
+ self,
189
+ t: Union[int, float, Fraction]
190
+ ) -> 'AudioFrame':
191
+ """
192
+ Get the audio frame with the given 't' time
193
+ moment, using the audio cache system.
194
+ """
195
+ return self.reader.audio_cache.get_frame_from_t(self._get_real_t(t))
196
+
197
+ def get_audio_frames_from_t(
198
+ self,
199
+ t: Union[int, float, Fraction]
200
+ ):
201
+ """
202
+ Get the sequence of audio frames for the
203
+ given video 't' time moment, using the
204
+ audio cache system.
205
+
206
+ This is useful when we want to write a
207
+ video frame with its audio, so we obtain
208
+ all the audio frames associated to it
209
+ (remember that a video frame is associated
210
+ with more than 1 audio frame).
211
+ """
212
+ for frame in self.reader.get_audio_frames_from_t(self._get_real_t(t)):
213
+ yield frame
214
+
149
215
  def save_as(
150
216
  self,
151
217
  filename: str
152
218
  ) -> 'Video':
153
- writer = VideoWriter(filename)
219
+ """
220
+ Save the video locally as the given 'filename'.
221
+
222
+ TODO: By now we are doing tests inside so the
223
+ functionality is a manual test. Use it
224
+ carefully.
225
+ """
226
+ writer = VideoWriter(filename)
154
227
  writer.set_video_stream_from_template(self.reader.video_stream)
155
228
  writer.set_audio_stream_from_template(self.reader.audio_stream)
156
229
 
@@ -7,7 +7,7 @@ from av.video.stream import VideoStream
7
7
  from av.audio.stream import AudioStream
8
8
  from av.container.output import OutputContainer
9
9
  from av import open as av_open
10
- from fractions import Fraction
10
+ from quicktions import Fraction
11
11
  from typing import Union
12
12
 
13
13
 
@@ -43,6 +43,7 @@ class VideoWriter:
43
43
  def set_video_stream(
44
44
  self,
45
45
  codec_name: Union[str, None],
46
+ # TODO: Maybe force fps to 'int' (?)
46
47
  fps: Union[Fraction, int, float, None],
47
48
  size: Union[tuple[int, int], None] = None,
48
49
  pixel_format: Union[str, None] = None,
@@ -52,15 +53,19 @@ class VideoWriter:
52
53
  Set the video stream, that will overwrite any other
53
54
  previous video stream set.
54
55
  """
56
+ # TODO: They say that pyav accepts fps as
57
+ # Fraction...
58
+ fps = int(fps)
59
+
55
60
  self.video_stream: VideoStream = self.output.add_stream(
56
61
  # TODO: Maybe 'libx264' as default 'codec_name' (?)
57
62
  codec_name = codec_name,
58
- rate = int(fps),
63
+ rate = fps,
59
64
  options = options
60
65
  )
61
66
 
62
67
  # We need to force this or it will not work
63
- self.video_stream.time_base = Fraction(1, int(fps))
68
+ self.video_stream.time_base = Fraction(1, fps)
64
69
 
65
70
  if size is not None:
66
71
  self.video_stream.width = size[0]
@@ -92,7 +97,7 @@ class VideoWriter:
92
97
  def set_audio_stream(
93
98
  self,
94
99
  codec_name: Union[str, None],
95
- fps: float = 44_100.0
100
+ fps: Union[int, float, Fraction] = 44_100.0
96
101
  # TODO: Add more if needed
97
102
  ) -> 'VideoWriter':
98
103
  """
@@ -137,16 +142,6 @@ class VideoWriter:
137
142
 
138
143
  return self
139
144
 
140
- # This below is not working
141
- self.audio_stream: AudioStream = self.output.add_stream_from_template(
142
- template
143
- )
144
- # TODO: Is this actually needed (?)
145
- # Force this 'rate'
146
- self.audio_stream.time_base = Fraction(1, template.codec_context.rate)
147
-
148
- return self
149
-
150
145
  def encode_video_frame(
151
146
  self,
152
147
  frame: Union[VideoFrame, None] = None
@@ -196,6 +191,10 @@ class VideoWriter:
196
191
  """
197
192
  ParameterValidator.validate_mandatory_instance_of('packet', packet, Packet)
198
193
 
194
+ # We are ignoring empty packets because they
195
+ # are used to indicate the end or things like
196
+ # that, not actual data... But maybe we are
197
+ # wrong...
199
198
  if packet.size > 0:
200
199
  try:
201
200
  self.output.mux(packet)
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: yta-video-opengl
3
- Version: 0.0.12
3
+ Version: 0.0.13
4
4
  Summary: Youtube Autonomous Video OpenGL Module
5
5
  Author: danialcala94
6
6
  Author-email: danielalcalavalera@gmail.com
@@ -10,6 +10,7 @@ Classifier: Programming Language :: Python :: 3.9
10
10
  Requires-Dist: av (>=0.0.1,<19.0.0)
11
11
  Requires-Dist: moderngl (>=0.0.1,<9.0.0)
12
12
  Requires-Dist: numpy (>=0.0.1,<9.0.0)
13
+ Requires-Dist: quicktions (>=0.0.1,<9.0.0)
13
14
  Requires-Dist: yta_timer (>0.0.1,<1.0.0)
14
15
  Requires-Dist: yta_validation (>=0.0.1,<1.0.0)
15
16
  Requires-Dist: yta_video_frame_time (>=0.0.1,<1.0.0)
@@ -0,0 +1,21 @@
1
+ yta_video_opengl/__init__.py,sha256=ycAx_XYMVDfkuObSvtW6irQ0Wo-fgxEz3fjIRMe8PpY,205
2
+ yta_video_opengl/classes.py,sha256=t5-Tfc7ecvHl8JlVBp_FVzZT6ole6Ly5-FeBBH7wcxo,37742
3
+ yta_video_opengl/complete/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
4
+ yta_video_opengl/complete/timeline.py,sha256=drIeM1xYv-8OM-uwjUifwA5teT5X9LZOEK5Nfzux4FI,9384
5
+ yta_video_opengl/complete/track.py,sha256=MQJPFyZpl1XFRwYqfq32QzzEESCVtPcnz9yhwm7crng,13729
6
+ yta_video_opengl/complete/video_on_track.py,sha256=iDD3wyEHblbRxm1e-clMecx104wUfUwKdCq7ZXSq6CU,5127
7
+ yta_video_opengl/nodes/__init__.py,sha256=TZ-ZO05PZ0_ABq675E22_PngLWOe-_w5s1cLlV3NbWM,3469
8
+ yta_video_opengl/nodes/audio/__init__.py,sha256=4nKkC70k1UgLcCSPqFWm3cKdaJM0KUmQTwGWv1xFarQ,2926
9
+ yta_video_opengl/nodes/video/__init__.py,sha256=gSoaoEmjdQmyRwH18mf5z3NAhap3S0RgbeBbfBXi4jc,132
10
+ yta_video_opengl/nodes/video/opengl.py,sha256=K2pyCJEd9z4gnZqJetKyGPbtHuBzFsx74ZYyzhSqYPo,8510
11
+ yta_video_opengl/reader/__init__.py,sha256=Cxk838QMjIUOTiO2ZyPGkiPojHlX0c7x6iJkm81_UB4,20039
12
+ yta_video_opengl/reader/cache.py,sha256=YYx5AX0BkGj1KtiDePHKj-AZlET4ATTqPB0xbr98WOw,17742
13
+ yta_video_opengl/t.py,sha256=vK23hBQQt_KtBO3ceLQ-SwQbB2jfFsPFX2pYNjYaVwk,5488
14
+ yta_video_opengl/tests.py,sha256=EdTyYtTUd_mj6geWnrvnF-wZSHCKKvhYgiLclkV73O0,26576
15
+ yta_video_opengl/utils.py,sha256=yUi17EjNR4SVpvdDUwUaKl4mBCb1uyFCSGoIX3Zr2F0,15586
16
+ yta_video_opengl/video.py,sha256=ap3H-aA29Yj-SAsTKsNq2gotcBUatQVQ2jmplgKMm9Q,8465
17
+ yta_video_opengl/writer.py,sha256=QwvjQcEkzn1WAVqVTFiI6tYIXJO67LKKUTJGO_eflFM,8893
18
+ yta_video_opengl-0.0.13.dist-info/LICENSE,sha256=6kbiFSfobTZ7beWiKnHpN902HgBx-Jzgcme0SvKqhKY,1091
19
+ yta_video_opengl-0.0.13.dist-info/METADATA,sha256=DvAvfeMY-o4WMVKdk5d8TiphrseGwMFadDrinfE0wh8,714
20
+ yta_video_opengl-0.0.13.dist-info/WHEEL,sha256=XbeZDeTWKc1w7CSIyre5aMDU_-PohRwTQceYnisIYYY,88
21
+ yta_video_opengl-0.0.13.dist-info/RECORD,,
@@ -1,20 +0,0 @@
1
- yta_video_opengl/__init__.py,sha256=ycAx_XYMVDfkuObSvtW6irQ0Wo-fgxEz3fjIRMe8PpY,205
2
- yta_video_opengl/classes.py,sha256=t5-Tfc7ecvHl8JlVBp_FVzZT6ole6Ly5-FeBBH7wcxo,37742
3
- yta_video_opengl/complete/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
4
- yta_video_opengl/complete/timeline.py,sha256=mf5QH7pFHvQCfDFpZd4HhyHSinvU-RDtvRhXaKOTNuY,9096
5
- yta_video_opengl/complete/track.py,sha256=vDGjkMxYY9y_KHFZb-kvN0K1Yt3_foiR_eTB3fIJxVY,12923
6
- yta_video_opengl/complete/video_on_track.py,sha256=jbRXX6KMwtvshcW1ce2FRAVKlVRHYxyyY-IOzKSO8-I,4364
7
- yta_video_opengl/nodes/__init__.py,sha256=TZ-ZO05PZ0_ABq675E22_PngLWOe-_w5s1cLlV3NbWM,3469
8
- yta_video_opengl/nodes/audio/__init__.py,sha256=4nKkC70k1UgLcCSPqFWm3cKdaJM0KUmQTwGWv1xFarQ,2926
9
- yta_video_opengl/nodes/video/__init__.py,sha256=gSoaoEmjdQmyRwH18mf5z3NAhap3S0RgbeBbfBXi4jc,132
10
- yta_video_opengl/nodes/video/opengl.py,sha256=K2pyCJEd9z4gnZqJetKyGPbtHuBzFsx74ZYyzhSqYPo,8510
11
- yta_video_opengl/reader/__init__.py,sha256=da4Jqtqi7xjbjgfzwJpXIMkILz0SKLedt8KDFqmy0Is,20848
12
- yta_video_opengl/reader/cache.py,sha256=KOFU3BtymZeel3FIvgxrWm9siUlakfNZdJzQUbFxavg,16657
13
- yta_video_opengl/tests.py,sha256=p2Pq4o2H0DMZkV7HNNNAlebSjrDMHKTKk0d_weiiPHQ,26221
14
- yta_video_opengl/utils.py,sha256=znlkvL5xjQeeN37cqFHNILBgop4W1PQTfFFudyt8-60,14614
15
- yta_video_opengl/video.py,sha256=3n7jgZab7PUSOpODoaH4iNg0sy7NMRo_OaJ4Zj8u0NM,5855
16
- yta_video_opengl/writer.py,sha256=wcDVL6Av-16kgx1X_LCAgKboa1eGnKvXaKuGPOsky-s,8880
17
- yta_video_opengl-0.0.12.dist-info/LICENSE,sha256=6kbiFSfobTZ7beWiKnHpN902HgBx-Jzgcme0SvKqhKY,1091
18
- yta_video_opengl-0.0.12.dist-info/METADATA,sha256=Q-stok3ZF1NXMjfNxzSA8GBjPs6-yb4BnrPg1cxjTds,671
19
- yta_video_opengl-0.0.12.dist-info/WHEEL,sha256=XbeZDeTWKc1w7CSIyre5aMDU_-PohRwTQceYnisIYYY,88
20
- yta_video_opengl-0.0.12.dist-info/RECORD,,