yta-video-opengl 0.0.11__py3-none-any.whl → 0.0.13__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
yta_video_opengl/utils.py CHANGED
@@ -1,8 +1,10 @@
1
+ from yta_video_opengl.t import fps_to_time_base
1
2
  from yta_validation import PythonValidator
2
3
  from av.container import InputContainer
3
4
  from av.video.stream import VideoStream
4
5
  from av.audio.stream import AudioStream
5
6
  from av.video.frame import VideoFrame
7
+ from quicktions import Fraction
6
8
  from typing import Union
7
9
 
8
10
  import av
@@ -310,9 +312,13 @@ def iterate_stream_frames_demuxing(
310
312
  pts_to_index(frame.pts, time_base, average_rate)
311
313
  )
312
314
 
315
+ # TODO: These methods below have to be
316
+ # removed because we created the new T
317
+ # class that is working with Fractions
318
+ # to be precise
313
319
  def t_to_pts(
314
- t: float,
315
- stream_time_base: 'Fraction'
320
+ t: Union[int, float, Fraction],
321
+ stream_time_base: Fraction
316
322
  ) -> int:
317
323
  """
318
324
  Transform a 't' time moment (in seconds) to
@@ -323,8 +329,8 @@ def t_to_pts(
323
329
 
324
330
  def pts_to_index(
325
331
  pts: int,
326
- stream_time_base: 'Fraction',
327
- fps: float
332
+ stream_time_base: Fraction,
333
+ fps: Union[float, Fraction]
328
334
  ) -> int:
329
335
  """
330
336
  Transform a 'pts' packet timestamp to a
@@ -334,8 +340,8 @@ def pts_to_index(
334
340
 
335
341
  def index_to_pts(
336
342
  index: int,
337
- stream_time_base: 'Fraction',
338
- fps: float
343
+ stream_time_base: Fraction,
344
+ fps: Union[float, Fraction]
339
345
  ) -> int:
340
346
  """
341
347
  Transform a frame index into a 'pts' packet
@@ -345,10 +351,165 @@ def index_to_pts(
345
351
 
346
352
  def pts_to_t(
347
353
  pts: int,
348
- stream_time_base: 'Fraction'
354
+ stream_time_base: Fraction
349
355
  ) -> float:
350
356
  """
351
357
  Transform a 'pts' packet timestamp to a 't'
352
358
  time moment.
353
359
  """
354
- return pts * stream_time_base
360
+ return pts * stream_time_base
361
+
362
+
363
+ # TODO: Move this to another utils
364
+ def get_silent_audio_frame(
365
+ sample_rate: int,
366
+ layout = 'stereo',
367
+ number_of_samples: int = 1024,
368
+ format = 's16'
369
+ ):
370
+ # TODO: This could be a utils or something to
371
+ # directly transform format into dtype
372
+ dtype = {
373
+ 's16': np.int16,
374
+ 'flt': np.float32,
375
+ 'fltp': np.float32
376
+ }.get(format, None)
377
+
378
+ if dtype is None:
379
+ raise Exception(f'The format "{format}" is not accepted.')
380
+
381
+ number_of_channels = len(av.AudioLayout(layout).channels)
382
+ # TODO: I think the option above is better
383
+ # number_of_channels = (
384
+ # 2
385
+ # if layout == 'stereo' else
386
+ # 1
387
+ # )
388
+
389
+ # For packed (or planar) formats we apply:
390
+ # (1, samples * channels). This is the same
391
+ # amount of data but planar, in 1D only
392
+ # TODO: This wasn't in the previous version
393
+ # and it was working, we were sending the
394
+ # same 'number_of_samples' even when 'fltp'
395
+ # that includes the 'p'
396
+ # TODO: This is making the audio last 2x
397
+ # if 'p' in format:
398
+ # number_of_samples *= number_of_channels
399
+
400
+ silent_array = np.zeros((number_of_channels, number_of_samples), dtype = dtype)
401
+ frame = av.AudioFrame.from_ndarray(silent_array, format = format, layout = layout)
402
+ frame.sample_rate = sample_rate
403
+
404
+ return frame
405
+
406
+ def get_black_background_video_frame(
407
+ size: tuple[int, int] = (1920, 1080),
408
+ format: str = 'rgb24',
409
+ pts: Union[int, None] = None,
410
+ time_base: Union[Fraction, None] = None
411
+ ):
412
+ """
413
+ Get a pyav VideoFrame that is a completely black
414
+ frame. If the 'pts' and/or 'time_base' parameters
415
+ are provided, they will be set to the frame that
416
+ is returned with them. If not, remember to set
417
+ later because they are needed to be sent to the
418
+ pyav muxer.
419
+ """
420
+ frame = av.VideoFrame.from_ndarray(
421
+ # TODO: What if we want alpha (?)
422
+ # Size must be inverted
423
+ array = np.zeros((size[1], size[0], 3), dtype = np.uint8),
424
+ format = format
425
+ )
426
+
427
+ frame.pts = (
428
+ pts
429
+ if pts is not None else
430
+ frame.pts
431
+ )
432
+
433
+ frame.time_base = (
434
+ time_base
435
+ if time_base is not None else
436
+ frame.time_base
437
+ )
438
+
439
+ return frame
440
+
441
+ def get_audio_frame_pts_range(
442
+ frame: av.AudioFrame,
443
+ do_in_seconds: bool = False
444
+ ):
445
+ """
446
+ Get the [start_pts, end_pts) range of the
447
+ pyav AudioFrame, or in seconds if the
448
+ 'do_in_seconds' parameter is True.
449
+ """
450
+ if frame.pts is None:
451
+ raise Exception('No "pts" found.')
452
+
453
+ # First and last sample. Remember that
454
+ # the last one is not included
455
+ start_pts = frame.pts
456
+ end_pts = frame.pts + frame.samples
457
+
458
+ # Time base for the seconds conversion
459
+ time_base = (
460
+ frame.time_base
461
+ if frame.time_base else
462
+ Fraction(1, frame.sample_rate)
463
+ )
464
+
465
+ start_time = (
466
+ float(start_pts * time_base)
467
+ if do_in_seconds else
468
+ start_time
469
+ )
470
+ end_time = (
471
+ float(end_pts * time_base)
472
+ if do_in_seconds else
473
+ end_time
474
+ )
475
+
476
+ return (
477
+ start_time,
478
+ end_time
479
+ )
480
+
481
+ def audio_frames_and_remainder_per_video_frame(
482
+ # TODO: Maybe force 'fps' as int (?)
483
+ video_fps: Union[float, Fraction],
484
+ sample_rate: int, # audio_fps
485
+ number_of_samples_per_audio_frame: int
486
+ ) -> tuple[int, int]:
487
+ """
488
+ Get how many full silent audio frames we
489
+ need and the remainder for the last one
490
+ (that could be not complete), according
491
+ to the parameters provided.
492
+
493
+ This method returns a tuple containing
494
+ the number of full silent audio frames
495
+ we need and the number of samples we need
496
+ in the last non-full audio frame.
497
+ """
498
+ # Video frame duration (in seconds)
499
+ time_base = fps_to_time_base(video_fps)
500
+ sample_rate = Fraction(int(sample_rate), 1)
501
+
502
+ # Example:
503
+ # 44_100 / 60 = 735 -> This means that we
504
+ # will have 735 samples of sound per each
505
+ # video frame
506
+ # The amount of samples per frame is actually
507
+ # the amount of samples we need, because we
508
+ # are generating it...
509
+ samples_per_frame = sample_rate * time_base
510
+ # The 'nb_samples' is the amount of samples
511
+ # we are including on each audio frame
512
+ full_audio_frames_needed = samples_per_frame // number_of_samples_per_audio_frame
513
+ remainder = samples_per_frame % number_of_samples_per_audio_frame
514
+
515
+ return int(full_audio_frames_needed), int(remainder)
yta_video_opengl/video.py CHANGED
@@ -1,7 +1,8 @@
1
1
  from yta_video_opengl.reader import VideoReader
2
2
  from yta_video_opengl.writer import VideoWriter
3
- from yta_video_opengl.utils import iterate_stream_frames_demuxing
3
+ from yta_video_opengl.t import T
4
4
  from yta_validation import PythonValidator
5
+ from quicktions import Fraction
5
6
  from typing import Union
6
7
 
7
8
 
@@ -26,7 +27,8 @@ class Video:
26
27
  This timestamp is used to read the video
27
28
  file source.
28
29
  """
29
- return int(self.start / self.reader.time_base)
30
+ # TODO: What if 'time_base' is None (?)
31
+ return T(self.start, self.reader.time_base).truncated_pts
30
32
 
31
33
  @property
32
34
  def end_pts(
@@ -40,7 +42,8 @@ class Video:
40
42
  file source.
41
43
  """
42
44
  return (
43
- int(self.end / self.reader.time_base)
45
+ # TODO: What if 'time_base' is None (?)
46
+ T(self.end, self.reader.time_base).truncated_pts
44
47
  # TODO: What do we do if no duration (?)
45
48
  if self.duration is not None else
46
49
  None
@@ -54,7 +57,8 @@ class Video:
54
57
  The start packet time stamp (pts), needed
55
58
  to optimize the packet iteration process.
56
59
  """
57
- return int(self.start / self.reader.audio_time_base)
60
+ # TODO: What if 'audio_time_base' is None (?)
61
+ return T(self.start, self.reader.audio_time_base).truncated_pts
58
62
 
59
63
  @property
60
64
  def audio_end_pts(
@@ -65,7 +69,8 @@ class Video:
65
69
  optimize the packet iteration process.
66
70
  """
67
71
  return (
68
- int(self.end / self.reader.audio_time_base)
72
+ # TODO: What if 'audio_time_base' is None (?)
73
+ T(self.end, self.reader.audio_time_base).truncated_pts
69
74
  # TODO: What do we do if no duration (?)
70
75
  if self.duration is not None else
71
76
  None
@@ -74,7 +79,7 @@ class Video:
74
79
  @property
75
80
  def duration(
76
81
  self
77
- ):
82
+ ) -> Fraction:
78
83
  """
79
84
  The duration of the video.
80
85
  """
@@ -83,7 +88,7 @@ class Video:
83
88
  @property
84
89
  def number_of_frames(
85
90
  self
86
- ):
91
+ ) -> Union[int, None]:
87
92
  """
88
93
  The number of frames of the video.
89
94
  """
@@ -117,8 +122,8 @@ class Video:
117
122
  def __init__(
118
123
  self,
119
124
  filename: str,
120
- start: float = 0.0,
121
- end: Union[float, None] = None
125
+ start: Union[int, float, Fraction] = 0.0,
126
+ end: Union[int, float, Fraction, None] = None
122
127
  ):
123
128
  self.filename: str = filename
124
129
  """
@@ -130,12 +135,12 @@ class Video:
130
135
  """
131
136
  The pyav video reader.
132
137
  """
133
- self.start: float = start
138
+ self.start: Fraction = Fraction(start)
134
139
  """
135
140
  The time moment 't' in which the video
136
141
  should start.
137
142
  """
138
- self.end: Union[float, None] = (
143
+ self.end: Union[Fraction, None] = Fraction(
139
144
  # TODO: Is this 'end' ok (?)
140
145
  self.reader.duration
141
146
  if end is None else
@@ -145,12 +150,80 @@ class Video:
145
150
  The time moment 't' in which the video
146
151
  should end.
147
152
  """
153
+
154
+ # TODO: We need to implement the 'get_frame'
155
+ # methods because this Video can be subclipped
156
+ # and have a 'start' and' end' that are
157
+ # different from [0, end)
158
+ def _get_real_t(
159
+ self,
160
+ t: Union[int, float, Fraction]
161
+ ) -> Fraction:
162
+ """
163
+ Get the real 't' time moment based on the
164
+ video 'start' and 'end'. If they were
165
+ asking for the t=0.5s but our video was
166
+ subclipped to [1.0, 2.0), the 0.5s must be
167
+ actually the 1.5s of the video because of
168
+ the subclipped time range.
169
+ """
170
+ t = self.start + t
148
171
 
172
+ if t >= self.end:
173
+ raise Exception(f'The "t" ({str(t)}) provided is out of range. This video lasts from [{str(self.start)}, {str(self.end)}).')
174
+
175
+ return t
176
+
177
+ def get_frame_from_t(
178
+ self,
179
+ t: Union[int, float, Fraction]
180
+ ) -> 'VideoFrame':
181
+ """
182
+ Get the video frame with the given 't' time
183
+ moment, using the video cache system.
184
+ """
185
+ return self.reader.video_cache.get_frame_from_t(self._get_real_t(t))
186
+
187
+ def get_audio_frame_from_t(
188
+ self,
189
+ t: Union[int, float, Fraction]
190
+ ) -> 'AudioFrame':
191
+ """
192
+ Get the audio frame with the given 't' time
193
+ moment, using the audio cache system.
194
+ """
195
+ return self.reader.audio_cache.get_frame_from_t(self._get_real_t(t))
196
+
197
+ def get_audio_frames_from_t(
198
+ self,
199
+ t: Union[int, float, Fraction]
200
+ ):
201
+ """
202
+ Get the sequence of audio frames for the
203
+ given video 't' time moment, using the
204
+ audio cache system.
205
+
206
+ This is useful when we want to write a
207
+ video frame with its audio, so we obtain
208
+ all the audio frames associated to it
209
+ (remember that a video frame is associated
210
+ with more than 1 audio frame).
211
+ """
212
+ for frame in self.reader.get_audio_frames_from_t(self._get_real_t(t)):
213
+ yield frame
214
+
149
215
  def save_as(
150
216
  self,
151
217
  filename: str
152
218
  ) -> 'Video':
153
- writer = VideoWriter(filename)
219
+ """
220
+ Save the video locally as the given 'filename'.
221
+
222
+ TODO: By now we are doing tests inside so the
223
+ functionality is a manual test. Use it
224
+ carefully.
225
+ """
226
+ writer = VideoWriter(filename)
154
227
  writer.set_video_stream_from_template(self.reader.video_stream)
155
228
  writer.set_audio_stream_from_template(self.reader.audio_stream)
156
229
 
@@ -7,7 +7,7 @@ from av.video.stream import VideoStream
7
7
  from av.audio.stream import AudioStream
8
8
  from av.container.output import OutputContainer
9
9
  from av import open as av_open
10
- from fractions import Fraction
10
+ from quicktions import Fraction
11
11
  from typing import Union
12
12
 
13
13
 
@@ -43,6 +43,7 @@ class VideoWriter:
43
43
  def set_video_stream(
44
44
  self,
45
45
  codec_name: Union[str, None],
46
+ # TODO: Maybe force fps to 'int' (?)
46
47
  fps: Union[Fraction, int, float, None],
47
48
  size: Union[tuple[int, int], None] = None,
48
49
  pixel_format: Union[str, None] = None,
@@ -52,6 +53,10 @@ class VideoWriter:
52
53
  Set the video stream, that will overwrite any other
53
54
  previous video stream set.
54
55
  """
56
+ # TODO: They say that pyav accepts fps as
57
+ # Fraction...
58
+ fps = int(fps)
59
+
55
60
  self.video_stream: VideoStream = self.output.add_stream(
56
61
  # TODO: Maybe 'libx264' as default 'codec_name' (?)
57
62
  codec_name = codec_name,
@@ -60,7 +65,7 @@ class VideoWriter:
60
65
  )
61
66
 
62
67
  # We need to force this or it will not work
63
- self.video_stream.time_base = Fraction(1, int(fps))
68
+ self.video_stream.time_base = Fraction(1, fps)
64
69
 
65
70
  if size is not None:
66
71
  self.video_stream.width = size[0]
@@ -91,17 +96,27 @@ class VideoWriter:
91
96
 
92
97
  def set_audio_stream(
93
98
  self,
94
- codec_name: Union[str, None]
99
+ codec_name: Union[str, None],
100
+ fps: Union[int, float, Fraction] = 44_100.0
95
101
  # TODO: Add more if needed
96
102
  ) -> 'VideoWriter':
97
103
  """
98
104
  Set the audio stream, that will overwrite any other
99
105
  previous audio stream set.
100
106
  """
107
+ # TODO: Check what else we can set
101
108
  self.audio_stream: AudioStream = self.output.add_stream(
102
- codec_name = codec_name
109
+ codec_name = codec_name,
110
+ rate = int(fps)
103
111
  )
104
112
 
113
+ # audio_stream = output.add_stream("aac", rate=48000) # codec AAC, 48kHz
114
+ # # Configurar stream
115
+ # audio_stream.channels = 2 # número de canales
116
+ # audio_stream.layout = "stereo" # layout
117
+ # audio_stream.sample_rate = 48000 # sample rate
118
+ # audio_stream.format = "s16" # formato de las muestras (PCM signed 16-bit)
119
+
105
120
  # TODO: Add more if needed
106
121
 
107
122
  return self
@@ -127,16 +142,6 @@ class VideoWriter:
127
142
 
128
143
  return self
129
144
 
130
- # This below is not working
131
- self.audio_stream: AudioStream = self.output.add_stream_from_template(
132
- template
133
- )
134
- # TODO: Is this actually needed (?)
135
- # Force this 'rate'
136
- self.audio_stream.time_base = Fraction(1, template.codec_context.rate)
137
-
138
- return self
139
-
140
145
  def encode_video_frame(
141
146
  self,
142
147
  frame: Union[VideoFrame, None] = None
@@ -186,6 +191,10 @@ class VideoWriter:
186
191
  """
187
192
  ParameterValidator.validate_mandatory_instance_of('packet', packet, Packet)
188
193
 
194
+ # We are ignoring empty packets because they
195
+ # are used to indicate the end or things like
196
+ # that, not actual data... But maybe we are
197
+ # wrong...
189
198
  if packet.size > 0:
190
199
  try:
191
200
  self.output.mux(packet)
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: yta-video-opengl
3
- Version: 0.0.11
3
+ Version: 0.0.13
4
4
  Summary: Youtube Autonomous Video OpenGL Module
5
5
  Author: danialcala94
6
6
  Author-email: danielalcalavalera@gmail.com
@@ -10,6 +10,7 @@ Classifier: Programming Language :: Python :: 3.9
10
10
  Requires-Dist: av (>=0.0.1,<19.0.0)
11
11
  Requires-Dist: moderngl (>=0.0.1,<9.0.0)
12
12
  Requires-Dist: numpy (>=0.0.1,<9.0.0)
13
+ Requires-Dist: quicktions (>=0.0.1,<9.0.0)
13
14
  Requires-Dist: yta_timer (>0.0.1,<1.0.0)
14
15
  Requires-Dist: yta_validation (>=0.0.1,<1.0.0)
15
16
  Requires-Dist: yta_video_frame_time (>=0.0.1,<1.0.0)
@@ -0,0 +1,21 @@
1
+ yta_video_opengl/__init__.py,sha256=ycAx_XYMVDfkuObSvtW6irQ0Wo-fgxEz3fjIRMe8PpY,205
2
+ yta_video_opengl/classes.py,sha256=t5-Tfc7ecvHl8JlVBp_FVzZT6ole6Ly5-FeBBH7wcxo,37742
3
+ yta_video_opengl/complete/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
4
+ yta_video_opengl/complete/timeline.py,sha256=drIeM1xYv-8OM-uwjUifwA5teT5X9LZOEK5Nfzux4FI,9384
5
+ yta_video_opengl/complete/track.py,sha256=MQJPFyZpl1XFRwYqfq32QzzEESCVtPcnz9yhwm7crng,13729
6
+ yta_video_opengl/complete/video_on_track.py,sha256=iDD3wyEHblbRxm1e-clMecx104wUfUwKdCq7ZXSq6CU,5127
7
+ yta_video_opengl/nodes/__init__.py,sha256=TZ-ZO05PZ0_ABq675E22_PngLWOe-_w5s1cLlV3NbWM,3469
8
+ yta_video_opengl/nodes/audio/__init__.py,sha256=4nKkC70k1UgLcCSPqFWm3cKdaJM0KUmQTwGWv1xFarQ,2926
9
+ yta_video_opengl/nodes/video/__init__.py,sha256=gSoaoEmjdQmyRwH18mf5z3NAhap3S0RgbeBbfBXi4jc,132
10
+ yta_video_opengl/nodes/video/opengl.py,sha256=K2pyCJEd9z4gnZqJetKyGPbtHuBzFsx74ZYyzhSqYPo,8510
11
+ yta_video_opengl/reader/__init__.py,sha256=Cxk838QMjIUOTiO2ZyPGkiPojHlX0c7x6iJkm81_UB4,20039
12
+ yta_video_opengl/reader/cache.py,sha256=YYx5AX0BkGj1KtiDePHKj-AZlET4ATTqPB0xbr98WOw,17742
13
+ yta_video_opengl/t.py,sha256=vK23hBQQt_KtBO3ceLQ-SwQbB2jfFsPFX2pYNjYaVwk,5488
14
+ yta_video_opengl/tests.py,sha256=EdTyYtTUd_mj6geWnrvnF-wZSHCKKvhYgiLclkV73O0,26576
15
+ yta_video_opengl/utils.py,sha256=yUi17EjNR4SVpvdDUwUaKl4mBCb1uyFCSGoIX3Zr2F0,15586
16
+ yta_video_opengl/video.py,sha256=ap3H-aA29Yj-SAsTKsNq2gotcBUatQVQ2jmplgKMm9Q,8465
17
+ yta_video_opengl/writer.py,sha256=QwvjQcEkzn1WAVqVTFiI6tYIXJO67LKKUTJGO_eflFM,8893
18
+ yta_video_opengl-0.0.13.dist-info/LICENSE,sha256=6kbiFSfobTZ7beWiKnHpN902HgBx-Jzgcme0SvKqhKY,1091
19
+ yta_video_opengl-0.0.13.dist-info/METADATA,sha256=DvAvfeMY-o4WMVKdk5d8TiphrseGwMFadDrinfE0wh8,714
20
+ yta_video_opengl-0.0.13.dist-info/WHEEL,sha256=XbeZDeTWKc1w7CSIyre5aMDU_-PohRwTQceYnisIYYY,88
21
+ yta_video_opengl-0.0.13.dist-info/RECORD,,
@@ -1,20 +0,0 @@
1
- yta_video_opengl/__init__.py,sha256=ycAx_XYMVDfkuObSvtW6irQ0Wo-fgxEz3fjIRMe8PpY,205
2
- yta_video_opengl/classes.py,sha256=t5-Tfc7ecvHl8JlVBp_FVzZT6ole6Ly5-FeBBH7wcxo,37742
3
- yta_video_opengl/complete/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
4
- yta_video_opengl/complete/timeline.py,sha256=o6nAtpkdr6RzYlIEB9kLfNAZiu9eRXyA6QE3TXuNL_I,5465
5
- yta_video_opengl/complete/track.py,sha256=gPushEuLYIoMBjvS3wTDbOKSV8VEtVTqrEm666ABFUc,3937
6
- yta_video_opengl/complete/video_on_track.py,sha256=sTSd8gqNwK1wT1uzJEGkJqTbu5qM3wWqk361K7OAbl0,2851
7
- yta_video_opengl/nodes/__init__.py,sha256=TZ-ZO05PZ0_ABq675E22_PngLWOe-_w5s1cLlV3NbWM,3469
8
- yta_video_opengl/nodes/audio/__init__.py,sha256=4nKkC70k1UgLcCSPqFWm3cKdaJM0KUmQTwGWv1xFarQ,2926
9
- yta_video_opengl/nodes/video/__init__.py,sha256=gSoaoEmjdQmyRwH18mf5z3NAhap3S0RgbeBbfBXi4jc,132
10
- yta_video_opengl/nodes/video/opengl.py,sha256=K2pyCJEd9z4gnZqJetKyGPbtHuBzFsx74ZYyzhSqYPo,8510
11
- yta_video_opengl/reader/__init__.py,sha256=zqVMMUjWHCsOaWFrnkYdnbO4YEAx3zLIBTuMnzQJRug,17180
12
- yta_video_opengl/reader/cache.py,sha256=Gtwl2kRLSJn3JJOVI-45Wc2vMPYj5-_g3AR5wPbw8OY,9152
13
- yta_video_opengl/tests.py,sha256=p2Pq4o2H0DMZkV7HNNNAlebSjrDMHKTKk0d_weiiPHQ,26221
14
- yta_video_opengl/utils.py,sha256=_89-IrTDFVbY86qLlJzw42MxYAsNAnux_DOO89mul64,10710
15
- yta_video_opengl/video.py,sha256=3n7jgZab7PUSOpODoaH4iNg0sy7NMRo_OaJ4Zj8u0NM,5855
16
- yta_video_opengl/writer.py,sha256=rzOfxPtlClLRIVLFyIOoBVcEgse3ISmpKRWHg8OwFHQ,8355
17
- yta_video_opengl-0.0.11.dist-info/LICENSE,sha256=6kbiFSfobTZ7beWiKnHpN902HgBx-Jzgcme0SvKqhKY,1091
18
- yta_video_opengl-0.0.11.dist-info/METADATA,sha256=8JNEZ05Elie6lMQREjRjGPQQ-kLW-GCW6uZpePTiLgY,671
19
- yta_video_opengl-0.0.11.dist-info/WHEEL,sha256=XbeZDeTWKc1w7CSIyre5aMDU_-PohRwTQceYnisIYYY,88
20
- yta_video_opengl-0.0.11.dist-info/RECORD,,