yta-video-opengl 0.0.14__py3-none-any.whl → 0.0.16__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,249 @@
1
+ """
2
+ The pyav container stores the information based
3
+ on the packets timestamps (called 'pts'). Some
4
+ of the packets are considered key_frames because
5
+ they include those key frames.
6
+
7
+ Also, this library uses those key frames to start
8
+ decodifying from there to the next one, obtaining
9
+ all the frames in between able to be read and
10
+ modified.
11
+
12
+ This cache system will look for the range of
13
+ frames that belong to the key frame related to the
14
+ frame we are requesting in the moment, keeping in
15
+ memory all those frames to be handled fast. It
16
+ will remove the old frames if needed to use only
17
+ the 'size' we set when creating it.
18
+
19
+ A stream can have 'fps = 60' but use another
20
+ different time base that make the pts values go 0,
21
+ 256, 512... for example. The 'time_base' is the
22
+ only accurate way to obtain the pts.
23
+
24
+ Feel free to move this explanation to other
25
+ place, its about the duration.
26
+
27
+ The stream 'duration' parameter is measured
28
+ on ticks, the amount of ticks that the
29
+ stream lasts. Here below is an example:
30
+
31
+ - Duration raw: 529200
32
+ - Time base: 1/44100
33
+ - Duration (seconds): 12.0
34
+ """
35
+ from av.container import InputContainer
36
+ from av.video.stream import VideoStream
37
+ from av.audio.stream import AudioStream
38
+ from av.video.frame import VideoFrame
39
+ from av.audio.frame import AudioFrame
40
+ from av.packet import Packet
41
+ from yta_validation.parameter import ParameterValidator
42
+ from quicktions import Fraction
43
+ from collections import OrderedDict
44
+ from typing import Union
45
+ from abc import abstractmethod, ABC
46
+
47
+ import numpy as np
48
+ import math
49
+
50
+
51
+ class FrameCache(ABC):
52
+ """
53
+ Class to manage the frames cache of a video
54
+ or audio.
55
+ """
56
+
57
+ @property
58
+ @abstractmethod
59
+ def fps(
60
+ self
61
+ ) -> Union[int, Fraction, None]:
62
+ """
63
+ The frames per second.
64
+ """
65
+ pass
66
+
67
+ @property
68
+ def time_base(
69
+ self
70
+ ) -> Union[Fraction, None]:
71
+ """
72
+ The time base of the stream.
73
+ """
74
+ return self.stream.time_base
75
+
76
+ def __init__(
77
+ self,
78
+ container: InputContainer,
79
+ stream: Union[VideoStream, AudioStream],
80
+ size: Union[int, None] = None
81
+ ):
82
+ ParameterValidator.validate_mandatory_instance_of('container', container, InputContainer)
83
+ ParameterValidator.validate_mandatory_instance_of('stream', stream, [VideoStream, AudioStream])
84
+ ParameterValidator.validate_number_between('size', size, 1, 120)
85
+
86
+ self.container: InputContainer = container
87
+ """
88
+ The pyav container.
89
+ """
90
+ self.stream: Union[VideoStream, AudioStream] = stream
91
+ """
92
+ The pyav stream.
93
+ """
94
+ self.cache: OrderedDict = OrderedDict()
95
+ """
96
+ The cache ordered dictionary.
97
+ """
98
+ self.key_frames_pts: list[int] = []
99
+ """
100
+ The list that contains the timestamps of the
101
+ key frame packets, ordered from begining to
102
+ end.
103
+ """
104
+ self.size: int = size
105
+ """
106
+ The size of the cache.
107
+ """
108
+
109
+ self._last_packet_accessed: Union[Packet, None] = None
110
+ """
111
+ The last packet that has been accessed
112
+ """
113
+
114
+ self._prepare()
115
+
116
+ def _prepare(
117
+ self
118
+ ):
119
+ # Index key frames
120
+ for packet in self.container.demux(self.stream):
121
+ if packet.is_keyframe:
122
+ self.key_frames_pts.append(packet.pts)
123
+
124
+ # The cache size will be auto-calculated to
125
+ # use the amount of frames of the biggest
126
+ # interval of frames that belongs to a key
127
+ # frame, or a value by default
128
+ # TODO: Careful if this is too big
129
+ # Intervals, but in number of frames
130
+ intervals = np.diff(
131
+ # Intervals of time between keyframes
132
+ np.array(self.key_frames_pts) * self.time_base
133
+ ) * self.fps
134
+
135
+ self.size = (
136
+ math.ceil(np.max(intervals))
137
+ if intervals.size > 0 else
138
+ (
139
+ self.size
140
+ if self.size is not None else
141
+ # TODO: Make this a setting (?)
142
+ 60
143
+ )
144
+ )
145
+
146
+ self.container.seek(0)
147
+
148
+ def _get_nearest_keyframe_pts(
149
+ self,
150
+ pts: int
151
+ ):
152
+ """
153
+ Get the fps of the keyframe that is the
154
+ nearest to the provided 'pts'. Useful to
155
+ seek and start decoding frames from that
156
+ keyframe.
157
+ """
158
+ return max([
159
+ key_frame_pts
160
+ for key_frame_pts in self.key_frames_pts
161
+ if key_frame_pts <= pts
162
+ ])
163
+
164
+ def _store_frame_in_cache(
165
+ self,
166
+ frame: Union[VideoFrame, AudioFrame]
167
+ ) -> Union[VideoFrame, AudioFrame]:
168
+ """
169
+ Store the provided 'frame' in cache if it
170
+ is not on it, removing the first item of
171
+ the cache if full.
172
+ """
173
+ if frame.pts not in self.cache:
174
+ self.cache[frame.pts] = frame
175
+
176
+ # Clean cache if full
177
+ if len(self.cache) > self.size:
178
+ self.cache.popitem(last = False)
179
+
180
+ return frame
181
+
182
+ def _seek(
183
+ self,
184
+ pts: int
185
+ ):
186
+ """
187
+ Seek to the given 'pts' This is useful
188
+ when working with 'container.demux' and
189
+ iterating over packets, not when using
190
+ 'stream.decode' and getting frames
191
+ directly.
192
+ """
193
+ self.container.seek(
194
+ offset = pts,
195
+ stream = self.stream
196
+ )
197
+
198
+ def clear(
199
+ self
200
+ ) -> 'VideoFrameCache':
201
+ """
202
+ Clear the cache by removing all the items.
203
+ """
204
+ self.cache.clear()
205
+
206
+ return self
207
+
208
+ def get_frame(
209
+ self,
210
+ t: Union[int, float, Fraction]
211
+ ) -> Union[VideoFrame, AudioFrame]:
212
+ """
213
+ Get the single frame that is in the 't'
214
+ time moment provided.
215
+ """
216
+ for frame in self.get_frames(t):
217
+ return frame
218
+
219
+ @abstractmethod
220
+ def get_frames(
221
+ self,
222
+ start: Union[int, float, Fraction],
223
+ end: Union[int, float, Fraction]
224
+ ):
225
+ pass
226
+
227
+
228
+ """
229
+ There is a way of editing videos being
230
+ able to arbitrary access to frames, that
231
+ is transforming the source videos to
232
+ intra-frame videos. This is a ffmpeg
233
+ command that can do it:
234
+
235
+ - `ffmpeg -i input.mp4 -c:v libx264 -x264opts keyint=1 -preset fast -crf 18 -c:a copy output_intra.mp4`
236
+
237
+ Once you have the 'output_intra.mp4',
238
+ each packet can decodify its frame
239
+ depending not on the previous one, being
240
+ able to seek and jump easy.
241
+
242
+ There are 3 type of video codifications,
243
+ the I-frame (intra-coded), in which any
244
+ frame can be decoded by itself, P-frame
245
+ (predicted), that need one or more
246
+ previous frames to be decoded, and
247
+ B-frame (bidirectional predicted), that
248
+ needs previous and future frames.
249
+ """
@@ -0,0 +1,195 @@
1
+
2
+ from yta_video_opengl.reader.cache import FrameCache
3
+ from yta_video_opengl.reader.cache.utils import trim_audio_frame
4
+ from yta_video_opengl.t import T
5
+ from yta_validation.parameter import ParameterValidator
6
+ from av.container import InputContainer
7
+ from av.audio.stream import AudioStream
8
+ from av.audio.frame import AudioFrame
9
+ from quicktions import Fraction
10
+ from typing import Union
11
+
12
+
13
+ class AudioFrameCache(FrameCache):
14
+ """
15
+ Cache for the audio frames.
16
+ """
17
+
18
+ @property
19
+ def fps(
20
+ self
21
+ ) -> Union[Fraction, int]:
22
+ """
23
+ The frames per second.
24
+ """
25
+ return self.stream.rate
26
+
27
+ @property
28
+ def frame_duration(
29
+ self
30
+ ) -> int:
31
+ """
32
+ The frame duration in ticks, which is the
33
+ minimum amount of time, 1 / time_base.
34
+ """
35
+ return self.stream.frames
36
+
37
+ def __init__(
38
+ self,
39
+ container: InputContainer,
40
+ stream: AudioStream,
41
+ size: Union[int, None] = None
42
+ ):
43
+ ParameterValidator.validate_mandatory_instance_of('stream', stream, AudioStream)
44
+
45
+ super().__init__(container, stream, size)
46
+
47
+ def _seek(
48
+ self,
49
+ pts: int
50
+ ):
51
+ """
52
+ Seek to the given 'pts' only if it is not
53
+ the next 'pts' to the last read, and it
54
+ will also apply a pad to avoid problems
55
+ when reading audio frames.
56
+ """
57
+ # I found that it is recommended to
58
+ # read ~100ms before the pts we want to
59
+ # actually read so we obtain the frames
60
+ # clean (this is important in audio).
61
+ # This solves a problem I had related
62
+ # to some artifacts on the audio when
63
+ # trimming exactly without this pad.
64
+ pts_pad = int(0.1 / self.time_base)
65
+ self.container.seek(
66
+ offset = max(0, pts - pts_pad),
67
+ stream = self.stream
68
+ )
69
+
70
+ def get_frame(
71
+ self,
72
+ t: Union[int, float, Fraction]
73
+ ) -> AudioFrame:
74
+ """
75
+ Get the video frame that is in the 't'
76
+ time moment provided.
77
+ """
78
+ t: T = T.from_fps(t, self.fps)
79
+ for frame in self.get_frames(t.truncated, t.next(1).truncated):
80
+ return frame
81
+
82
+ def get_frames(
83
+ self,
84
+ start: Union[int, float, Fraction],
85
+ end: Union[int, float, Fraction]
86
+ ):
87
+ """
88
+ Get all the audio frames in the range
89
+ between the provided 'start' and 'end'
90
+ time (in seconds).
91
+
92
+ This method is an iterator that yields
93
+ the frame, its t and its index.
94
+ """
95
+ # TODO: Validate 'start' and 'end' are mandatory
96
+ # positive numbers
97
+ # Make sure the 'start' and 'end' time moments
98
+ # provided are truncated values based on the
99
+ # stream time base
100
+ start = T(start, self.time_base).truncated
101
+ end = T(end, self.time_base).truncated
102
+
103
+ if end <= start:
104
+ raise Exception(f'The time range start:{str(float(start))} - end:{str(float(end))}) is not valid.')
105
+
106
+ key_frame_pts = self._get_nearest_keyframe_pts(start / self.time_base)
107
+
108
+ if (
109
+ self._last_packet_accessed is None or
110
+ self._last_packet_accessed.pts != key_frame_pts
111
+ ):
112
+ self._seek(key_frame_pts)
113
+
114
+ for packet in self.container.demux(self.stream):
115
+ if packet.pts is None:
116
+ continue
117
+
118
+ self._last_packet_accessed = packet
119
+
120
+ for frame in packet.decode():
121
+ if frame.pts is None:
122
+ continue
123
+
124
+ # We store all the frames in cache
125
+ self._store_frame_in_cache(frame)
126
+
127
+ current_frame_time = frame.pts * self.time_base
128
+ # End is not included, its the start of the
129
+ # next frame actually
130
+ frame_end = current_frame_time + (frame.samples / self.stream.sample_rate)
131
+
132
+ # For the next comments imagine we are looking
133
+ # for the [1.0, 2.0) audio time range
134
+ # Previous frame and nothing is inside
135
+ if frame_end <= start:
136
+ # From 0.25 to 1.0
137
+ continue
138
+
139
+ # We finished, nothing is inside and its after
140
+ if current_frame_time >= end:
141
+ # From 2.0 to 2.75
142
+ return
143
+
144
+ """
145
+ If we need audio from 1 to 2, audio is:
146
+ - from 0 to 0.75 (Not included, omit)
147
+ - from 0.5 to 1.5 (Included, take 1.0 to 1.5)
148
+ - from 0.5 to 2.5 (Included, take 1.0 to 2.0)
149
+ - from 1.25 to 1.5 (Included, take 1.25 to 1.5)
150
+ - from 1.25 to 2.5 (Included, take 1.25 to 2.0)
151
+ - from 2.5 to 3.5 (Not included, omit)
152
+ """
153
+
154
+ # Here below, at least a part is inside
155
+ if (
156
+ current_frame_time < start and
157
+ frame_end > start
158
+ ):
159
+ # A part at the end is included
160
+ end_time = (
161
+ # From 0.5 to 1.5 0> take 1.0 to 1.5
162
+ frame_end
163
+ if frame_end <= end else
164
+ # From 0.5 to 2.5 => take 1.0 to 2.0
165
+ end
166
+ )
167
+ #print('A part at the end is included.')
168
+ frame = trim_audio_frame(
169
+ frame = frame,
170
+ start = start,
171
+ end = end_time,
172
+ time_base = self.time_base
173
+ )
174
+ elif (
175
+ current_frame_time >= start and
176
+ current_frame_time < end
177
+ ):
178
+ end_time = (
179
+ # From 1.25 to 1.5 => take 1.25 to 1.5
180
+ frame_end
181
+ if frame_end <= end else
182
+ # From 1.25 to 2.5 => take 1.25 to 2.0
183
+ end
184
+ )
185
+ # A part at the begining is included
186
+ #print('A part at the begining is included.')
187
+ frame = trim_audio_frame(
188
+ frame = frame,
189
+ start = current_frame_time,
190
+ end = end_time,
191
+ time_base = self.time_base
192
+ )
193
+
194
+ # If the whole frame is in, past as it is
195
+ yield frame
@@ -0,0 +1,48 @@
1
+ from av.audio.frame import AudioFrame
2
+ from quicktions import Fraction
3
+ from typing import Union
4
+
5
+
6
+ def trim_audio_frame(
7
+ frame: AudioFrame,
8
+ start: Union[int, float, Fraction],
9
+ end: Union[int, float, Fraction],
10
+ time_base: Fraction
11
+ ) -> AudioFrame:
12
+ """
13
+ Trim an audio frame to obtain the part between
14
+ [start, end), that is provided in seconds.
15
+ """
16
+ # (channels, n_samples)
17
+ samples = frame.to_ndarray()
18
+ n_samples = samples.shape[1]
19
+
20
+ # In seconds
21
+ frame_start = frame.pts * float(time_base)
22
+ frame_end = frame_start + (n_samples / frame.sample_rate)
23
+
24
+ # Overlapping
25
+ cut_start = max(frame_start, float(start))
26
+ cut_end = min(frame_end, float(end))
27
+
28
+ if cut_start >= cut_end:
29
+ # No overlapping
30
+ return None
31
+
32
+ # To sample indexes
33
+ start_index = int(round((cut_start - frame_start) * frame.sample_rate))
34
+ end_index = int(round((cut_end - frame_start) * frame.sample_rate))
35
+
36
+ new_frame = AudioFrame.from_ndarray(
37
+ # end_index is not included: so [start, end)
38
+ array = samples[:, start_index:end_index],
39
+ format = frame.format,
40
+ layout = frame.layout
41
+ )
42
+
43
+ # Set attributes
44
+ new_frame.sample_rate = frame.sample_rate
45
+ new_frame.time_base = time_base
46
+ new_frame.pts = int(round(cut_start / float(time_base)))
47
+
48
+ return new_frame
@@ -0,0 +1,110 @@
1
+
2
+ from yta_video_opengl.reader.cache import FrameCache
3
+ from yta_video_opengl.t import T
4
+ from yta_validation.parameter import ParameterValidator
5
+ from av.container import InputContainer
6
+ from av.video.stream import VideoStream
7
+ from av.video.frame import VideoFrame
8
+ from quicktions import Fraction
9
+ from typing import Union
10
+
11
+
12
+ class VideoFrameCache(FrameCache):
13
+ """
14
+ Cache for the video frames.
15
+ """
16
+
17
+ @property
18
+ def fps(
19
+ self
20
+ ) -> Union[Fraction, None]:
21
+ """
22
+ The frames per second.
23
+ """
24
+ return self.stream.average_rate
25
+
26
+ @property
27
+ def frame_duration(
28
+ self
29
+ ) -> int:
30
+ """
31
+ The frame duration in ticks, which is the
32
+ minimum amount of time, 1 / time_base.
33
+ """
34
+ return self.stream.duration / self.stream.frames
35
+
36
+ def __init__(
37
+ self,
38
+ container: InputContainer,
39
+ stream: VideoStream,
40
+ size: Union[int, None] = None
41
+ ):
42
+ ParameterValidator.validate_mandatory_instance_of('stream', stream, VideoStream)
43
+
44
+ super().__init__(container, stream, size)
45
+
46
+ def get_frame(
47
+ self,
48
+ t: Union[int, float, Fraction]
49
+ ) -> VideoFrame:
50
+ """
51
+ Get the video frame that is in the 't'
52
+ time moment provided.
53
+ """
54
+ t: T = T.from_fps(t, self.fps)
55
+ for frame in self.get_frames(t.truncated, t.next(1).truncated):
56
+ return frame
57
+
58
+ def get_frames(
59
+ self,
60
+ start: Union[int, float, Fraction],
61
+ end: Union[int, float, Fraction]
62
+ ):
63
+ """
64
+ Get all the frames in the range between
65
+ the provided 'start' and 'end' time in
66
+ seconds.
67
+
68
+ This method is an iterator that yields
69
+ the frame, its t and its index.
70
+ """
71
+ # TODO: Validate 'start' and 'end' are mandatory
72
+ # positive numbers
73
+ # Make sure the 'start' and 'end' time moments
74
+ # provided are truncated values based on the
75
+ # stream time base
76
+ start = T(start, self.time_base).truncated
77
+ end = T(end, self.time_base).truncated
78
+
79
+ if end <= start:
80
+ raise Exception(f'The time range start:{str(float(start))} - end:{str(float(end))}) is not valid.')
81
+
82
+ key_frame_pts = self._get_nearest_keyframe_pts(start / self.time_base)
83
+
84
+ if (
85
+ self._last_packet_accessed is None or
86
+ self._last_packet_accessed.pts != key_frame_pts
87
+ ):
88
+ self._seek(key_frame_pts)
89
+
90
+ for packet in self.container.demux(self.stream):
91
+ if packet.pts is None:
92
+ continue
93
+
94
+ self._last_packet_accessed = packet
95
+
96
+ for frame in packet.decode():
97
+ if frame.pts is None:
98
+ continue
99
+
100
+ # We store all the frames in cache
101
+ self._store_frame_in_cache(frame)
102
+
103
+ current_frame_time = frame.pts * self.time_base
104
+
105
+ # We want the range [start, end)
106
+ if start <= current_frame_time < end:
107
+ yield frame
108
+
109
+ if current_frame_time >= end:
110
+ break
yta_video_opengl/tests.py CHANGED
@@ -584,14 +584,27 @@ def video_modified_stored():
584
584
  from yta_video_opengl.video import Video
585
585
  from yta_video_opengl.complete.timeline import Timeline
586
586
 
587
+ # TODO: This test below is just to validate
588
+ # that it is cropping and placing correctly
589
+ # but the videos are only in one track
590
+ # video = Video(VIDEO_PATH, 0.25, 0.75)
591
+ # timeline = Timeline()
592
+ # timeline.add_video(Video(VIDEO_PATH, 0.25, 1.0), 0.5)
593
+ # # This is successfully raising an exception
594
+ # #timeline.add_video(Video(VIDEO_PATH, 0.25, 0.75), 0.6)
595
+ # timeline.add_video(Video(VIDEO_PATH, 0.25, 0.75), 1.75)
596
+ # timeline.add_video(Video('C:/Users/dania/Downloads/Y2meta.app-TOP 12 SIMPLE LIQUID TRANSITION _ GREEN SCREEN TRANSITION PACK-(1080p60).mp4', 4.0, 5.0), 3)
597
+ # # timeline.add_video(Video('C:/Users/dania/Downloads/Y2meta.app-10 Smooth Transitions Green Screen Template For Kinemaster, Alight Motion, Filmora, premiere pro-(1080p).mp4', 2.25, 3.0), 3)
598
+ # timeline.render(OUTPUT_PATH)
599
+
600
+ # TODO: This test will add videos that
601
+ # must be played at the same time
587
602
  video = Video(VIDEO_PATH, 0.25, 0.75)
588
603
  timeline = Timeline()
589
- timeline.add_video(Video(VIDEO_PATH, 0.25, 1.0), 0.5)
590
- # This is successfully raising an exception
591
- #timeline.add_video(Video(VIDEO_PATH, 0.25, 0.75), 0.6)
592
- timeline.add_video(Video(VIDEO_PATH, 0.25, 0.75), 1.75)
593
- timeline.add_video(Video('C:/Users/dania/Downloads/Y2meta.app-TOP 12 SIMPLE LIQUID TRANSITION _ GREEN SCREEN TRANSITION PACK-(1080p60).mp4', 4.0, 5.0), 3)
594
- # timeline.add_video(Video('C:/Users/dania/Downloads/Y2meta.app-10 Smooth Transitions Green Screen Template For Kinemaster, Alight Motion, Filmora, premiere pro-(1080p).mp4', 2.25, 3.0), 3)
604
+ timeline.add_video(Video(VIDEO_PATH, 0.25, 1.0), 0.75)
605
+ timeline.add_video(Video(VIDEO_PATH, 0.5, 1.0), 2.0)
606
+ timeline.add_video(Video(VIDEO_PATH, 0.5, 1.0), 2.1, do_use_second_track = True)
607
+ timeline.add_video(Video('C:/Users/dania/Downloads/Y2meta.app-TOP 12 SIMPLE LIQUID TRANSITION _ GREEN SCREEN TRANSITION PACK-(1080p60).mp4', 0.25, 1.5), 0.25, do_use_second_track = True)
595
608
  timeline.render(OUTPUT_PATH)
596
609
 
597
610
  return
yta_video_opengl/video.py CHANGED
@@ -182,7 +182,7 @@ class Video:
182
182
  Get the video frame with the given 't' time
183
183
  moment, using the video cache system.
184
184
  """
185
- return self.reader.video_cache.get_video_frame(self._get_real_t(t))
185
+ return self.reader.video_cache.get_frame(self._get_real_t(t))
186
186
 
187
187
  def get_audio_frame_from_t(
188
188
  self,
@@ -197,7 +197,7 @@ class Video:
197
197
 
198
198
  TODO: Is this actually necessary (?)
199
199
  """
200
- return self.reader.audio_cache.get_frame_from_t(self._get_real_t(t))
200
+ return self.reader.get_audio_frame_from_t(self._get_real_t(t))
201
201
 
202
202
  def get_audio_frames_from_t(
203
203
  self,
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: yta-video-opengl
3
- Version: 0.0.14
3
+ Version: 0.0.16
4
4
  Summary: Youtube Autonomous Video OpenGL Module
5
5
  Author: danialcala94
6
6
  Author-email: danielalcalavalera@gmail.com
@@ -0,0 +1,25 @@
1
+ yta_video_opengl/__init__.py,sha256=ycAx_XYMVDfkuObSvtW6irQ0Wo-fgxEz3fjIRMe8PpY,205
2
+ yta_video_opengl/classes.py,sha256=t5-Tfc7ecvHl8JlVBp_FVzZT6ole6Ly5-FeBBH7wcxo,37742
3
+ yta_video_opengl/complete/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
4
+ yta_video_opengl/complete/blend.py,sha256=7qtMlgXM8n1Yfqqdl9SDaDWUk7ZTF4Ui1YuMqce2QlE,2001
5
+ yta_video_opengl/complete/timeline.py,sha256=JWEGgEcO7YkZSNAxnz0JdMQh0ck0AffpCHi1IPhcngc,15429
6
+ yta_video_opengl/complete/track.py,sha256=g6xPAfYl29Ruw1P6ifOZG4j8v_N8kzbFwVaVk25mx6w,13930
7
+ yta_video_opengl/complete/video_on_track.py,sha256=KROAI0bndnfcvKlHGsSEyWg9o1xozW0PI_Rhqp0r9kw,4844
8
+ yta_video_opengl/nodes/__init__.py,sha256=TZ-ZO05PZ0_ABq675E22_PngLWOe-_w5s1cLlV3NbWM,3469
9
+ yta_video_opengl/nodes/audio/__init__.py,sha256=4nKkC70k1UgLcCSPqFWm3cKdaJM0KUmQTwGWv1xFarQ,2926
10
+ yta_video_opengl/nodes/video/__init__.py,sha256=gSoaoEmjdQmyRwH18mf5z3NAhap3S0RgbeBbfBXi4jc,132
11
+ yta_video_opengl/nodes/video/opengl.py,sha256=K2pyCJEd9z4gnZqJetKyGPbtHuBzFsx74ZYyzhSqYPo,8510
12
+ yta_video_opengl/reader/__init__.py,sha256=kKvOAEeDjIwAaWtpDEQHdAd_Gwk3Ssz2tv6gpNwVkQo,19644
13
+ yta_video_opengl/reader/cache/__init__.py,sha256=PAfGM2J-8Vv6p6Cd9aAUvyBcw3rjx2gy_2pJO22VtDM,7020
14
+ yta_video_opengl/reader/cache/audio.py,sha256=cm_1D5f5RnmJgaidA1pnEhTPF8DE0mU2MofmwjU_b5k,6781
15
+ yta_video_opengl/reader/cache/utils.py,sha256=9aJ6qyUFRvoh2jRbIvtF_-1MOm_sgQtPiy0WXLCZYcA,1402
16
+ yta_video_opengl/reader/cache/video.py,sha256=CSVgb3Sjqzk22sQkukoakVzms-wwZpXOT61Y6tirhjg,3292
17
+ yta_video_opengl/t.py,sha256=xOhT1xBEwChlXf-Tuy-WxA_08iRJWVlnL_Hyzr-9-sk,6633
18
+ yta_video_opengl/tests.py,sha256=6QvJx9y4kCiq7b9-AKMetzGuJjd__pTBK5r4tJp3aso,27321
19
+ yta_video_opengl/utils.py,sha256=yUi17EjNR4SVpvdDUwUaKl4mBCb1uyFCSGoIX3Zr2F0,15586
20
+ yta_video_opengl/video.py,sha256=JPIWDQcYlLi8eT2LOFQtS1jVu5xVmW4bz1VMtP0gMeA,8626
21
+ yta_video_opengl/writer.py,sha256=QwvjQcEkzn1WAVqVTFiI6tYIXJO67LKKUTJGO_eflFM,8893
22
+ yta_video_opengl-0.0.16.dist-info/LICENSE,sha256=6kbiFSfobTZ7beWiKnHpN902HgBx-Jzgcme0SvKqhKY,1091
23
+ yta_video_opengl-0.0.16.dist-info/METADATA,sha256=VYswuzvZl8rQv0OHZuqp5BHjquuh-NhLrRV-mNN1_Xo,714
24
+ yta_video_opengl-0.0.16.dist-info/WHEEL,sha256=XbeZDeTWKc1w7CSIyre5aMDU_-PohRwTQceYnisIYYY,88
25
+ yta_video_opengl-0.0.16.dist-info/RECORD,,