yta-video-opengl 0.0.10__py3-none-any.whl → 0.0.11__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
File without changes
@@ -0,0 +1,179 @@
1
+ from yta_video_opengl.complete.track import Track
2
+ from yta_video_opengl.video import Video
3
+ from yta_validation.parameter import ParameterValidator
4
+ from typing import Union
5
+ from fractions import Fraction
6
+
7
+ import numpy as np
8
+ import av
9
+
10
+
11
+ class Timeline:
12
+ """
13
+ Class to represent all the tracks that
14
+ exist on the project and to handle the
15
+ combination of all their frames.
16
+ """
17
+
18
+ @property
19
+ def end(
20
+ self
21
+ ) -> float:
22
+ """
23
+ The end of the last video of the track
24
+ that lasts longer. This is the last time
25
+ moment that has to be rendered.
26
+ """
27
+ return max(track.end for track in self.tracks)
28
+
29
+ def __init__(
30
+ self,
31
+ size: tuple[int, int] = (1920, 1080),
32
+ fps: float = 60.0
33
+ ):
34
+ # TODO: By now we are using just two video
35
+ # tracks to test the composition
36
+ # TODO: We need to be careful with the
37
+ # priority, by now its defined by its
38
+ # position in the array
39
+ self.tracks: list[Track] = [Track(), Track()]
40
+ """
41
+ All the video tracks we are handling.
42
+ """
43
+ # TODO: Handle size and fps
44
+ self.size = size
45
+ self.fps = fps
46
+
47
+ # TODO: Create 'add_track' method, but by now
48
+ # we hare handling only one
49
+ def add_video(
50
+ self,
51
+ video: Video,
52
+ t: float,
53
+ # TODO: This is for testing, it has to
54
+ # disappear
55
+ do_use_second_track: bool = False
56
+ ) -> 'Timeline':
57
+ """
58
+ Add the provided 'video' to the timeline,
59
+ starting at the provided 't' time moment.
60
+
61
+ TODO: The 'do_use_second_track' parameter
62
+ is temporary.
63
+ """
64
+ index = 1 * do_use_second_track
65
+
66
+ self.tracks[index].add_video(video, t)
67
+
68
+ return self
69
+
70
+ # TODO: This method is not for the Track but
71
+ # for the timeline, as one track can only
72
+ # have consecutive elements
73
+ def get_frame_at(
74
+ self,
75
+ t: float
76
+ ) -> Union['VideoFrame', None]:
77
+ """
78
+ Get all the frames that are played at the
79
+ 't' time provided, but combined in one.
80
+ """
81
+ frames = (
82
+ track.get_frame_at(t)
83
+ for track in self.tracks
84
+ )
85
+
86
+ frames = [
87
+ frame
88
+ for frame in frames
89
+ if frame is not None
90
+ ]
91
+
92
+ return (
93
+ # TODO: Combinate them, I send first by now
94
+ frames[0]
95
+ if len(frames) > 0 else
96
+ # TODO: Should I send None or a full
97
+ # black (or transparent) frame? I think
98
+ # None is better because I don't know
99
+ # the size here (?)
100
+ None
101
+ )
102
+
103
+ def render(
104
+ self,
105
+ filename: str,
106
+ start: float = 0.0,
107
+ end: Union[float, None] = None
108
+ ) -> 'Timeline':
109
+ """
110
+ Render the time range in between the given
111
+ 'start' and 'end' and store the result with
112
+ the also provided 'fillename'.
113
+
114
+ If no 'start' and 'end' provided, the whole
115
+ project will be rendered.
116
+ """
117
+ ParameterValidator.validate_mandatory_string('filename', filename, do_accept_empty = False)
118
+ ParameterValidator.validate_mandatory_positive_number('start', start, do_include_zero = True)
119
+ ParameterValidator.validate_positive_number('end', end, do_include_zero = False)
120
+
121
+ # TODO: Limitate 'end' a bit...
122
+ end = (
123
+ self.end
124
+ if end is None else
125
+ end
126
+ )
127
+
128
+ if start >= end:
129
+ raise Exception('The provided "start" cannot be greater or equal to the "end" provided.')
130
+ # TODO: Obtain all the 't', based on 'fps'
131
+ # that we need to render from 'start' to
132
+ # 'end'
133
+ # TODO: I don't want to have this here
134
+ def generate_times(start: float, end: float, fps: int):
135
+ dt = 1.0 / fps
136
+ times = []
137
+
138
+ t = start
139
+ while t <= end:
140
+ times.append(t + 0.000001)
141
+ t += dt
142
+
143
+ return times
144
+
145
+ from yta_video_opengl.writer import VideoWriter
146
+
147
+ writer = VideoWriter('test_files/output_render.mp4')
148
+ # TODO: This has to be dynamic according to the
149
+ # video we are writing
150
+ writer.set_video_stream(
151
+ codec_name = 'h264',
152
+ fps = 60,
153
+ size = (1920, 1080),
154
+ pixel_format = 'yuv420p'
155
+ )
156
+
157
+ for t in generate_times(start, end, self.fps):
158
+ frame = self.get_frame_at(t)
159
+
160
+ if frame is None:
161
+ # Replace with black background if no frame
162
+ frame = av.VideoFrame.from_ndarray(
163
+ array = np.zeros((1920, 1080, 3), dtype = np.uint8),
164
+ format = 'rgb24'
165
+ )
166
+
167
+ # We need to adjust our output elements to be
168
+ # consecutive and with the right values
169
+ # TODO: We are using int() for fps but its float...
170
+ frame.time_base = Fraction(1, int(self.fps))
171
+ frame.pts = int(t / frame.time_base)
172
+
173
+ # TODO: We need to handle the audio
174
+ writer.mux_video_frame(
175
+ frame = frame
176
+ )
177
+
178
+ writer.mux_video_frame(None)
179
+ writer.output.close()
@@ -0,0 +1,136 @@
1
+ from yta_video_opengl.complete.video_on_track import VideoOnTrack
2
+ from yta_video_opengl.video import Video
3
+ from yta_validation.parameter import ParameterValidator
4
+ from typing import Union
5
+
6
+
7
+ # TODO: This is called Track but it is
8
+ # handling videos only. Should I have
9
+ # VideoTrack and AudioTrack (?)
10
+ class Track:
11
+ """
12
+ Class to represent a track in which we place
13
+ videos, images and audio to build a video
14
+ project.
15
+ """
16
+
17
+ @property
18
+ def end(
19
+ self
20
+ ) -> float:
21
+ """
22
+ The end of the last video of this track,
23
+ which is also the end of the track. This
24
+ is the last time moment that has to be
25
+ rendered.
26
+ """
27
+ return (
28
+ 0.0
29
+ if len(self.videos) == 0 else
30
+ max(
31
+ video.end
32
+ for video in self.videos
33
+ )
34
+ )
35
+
36
+ def __init__(
37
+ self
38
+ ):
39
+ self.videos: list[VideoOnTrack] = []
40
+ """
41
+ The list of 'VideoOnTrack' instances that
42
+ must play on this track.
43
+ """
44
+
45
+ def _is_free(
46
+ self,
47
+ start: float,
48
+ end: float
49
+ ) -> bool:
50
+ """
51
+ Check if the time range in between the
52
+ 'start' and 'end' time given is free or
53
+ there is some video playing at any moment.
54
+ """
55
+ return not any(
56
+ (
57
+ video.video.start < end and
58
+ video.video.end > start
59
+ )
60
+ for video in self.videos
61
+ )
62
+
63
+ def _get_video_at_t(
64
+ self,
65
+ t: float
66
+ ) -> Union[VideoOnTrack, None]:
67
+ """
68
+ Get the video that is being played at
69
+ the 't' time moment provided.
70
+ """
71
+ for video in self.videos:
72
+ if video.start <= t < video.end:
73
+ return video
74
+
75
+ return None
76
+
77
+ def get_frame_at(
78
+ self,
79
+ t: float
80
+ ) -> Union['VideoFrame', None]:
81
+ """
82
+ Get the frame that must be displayed at
83
+ the 't' time moment provided, which is
84
+ a frame from the video that is being
85
+ played at that time moment.
86
+
87
+ Remember, this 't' time moment provided
88
+ is about the track, and we make the
89
+ conversion to the actual video 't' to
90
+ get the frame.
91
+ """
92
+ video = self._get_video_at_t(t)
93
+
94
+ return (
95
+ video.get_frame_at(t)
96
+ if video is not None else
97
+ None
98
+ )
99
+
100
+ def add_video(
101
+ self,
102
+ video: Video,
103
+ t: Union[float, None] = None
104
+ ) -> 'Track':
105
+ """
106
+ Add the 'video' provided to the track. If
107
+ a 't' time moment is provided, the video
108
+ will be added to that time moment if
109
+ possible. If there is no other video
110
+ placed in the time gap between the given
111
+ 't' and the provided 'video' duration, it
112
+ will be added succesfully. In the other
113
+ case, an exception will be raised.
114
+
115
+ If 't' is None, the first available 't'
116
+ time moment will be used, that will be 0.0
117
+ if no video, or the end of the last video.
118
+ """
119
+ ParameterValidator.validate_mandatory_instance_of('video', video, Video)
120
+ ParameterValidator.validate_positive_float('t', t, do_include_zero = True)
121
+
122
+ if t is not None:
123
+ # TODO: We can have many different strategies
124
+ # that we could define in the '__init__' maybe
125
+ if not self._is_free(t, (t + video.end)):
126
+ raise Exception('The video cannot be added at the "t" time moment, something blocks it.')
127
+ else:
128
+ t = self.end
129
+
130
+ self.videos.append(VideoOnTrack(
131
+ video,
132
+ t
133
+ ))
134
+
135
+ # TODO: Maybe return the VideoOnTrack instead (?)
136
+ return self
@@ -0,0 +1,100 @@
1
+ """
2
+ If we have a video placed in a timeline,
3
+ starting at the t=2s and the video lasts
4
+ 2 seconds, the `t` time range in which the
5
+ video is playing is `[2s, 4s]`, so here
6
+ you have some examples with global `t`
7
+ values:
8
+ - `t=1`, the video is not playing because
9
+ it starts at `t=2`
10
+ - `t=3`, the video is playing, it started
11
+ at `t=2` and it has been playing during 1s
12
+ - `t=5`, the video is not playing because
13
+ it started at `t=2`, lasting 2s, so it
14
+ finished at `t=4`
15
+ """
16
+ from yta_video_opengl.video import Video
17
+ from yta_validation.parameter import ParameterValidator
18
+ from av.video.frame import VideoFrame
19
+ from typing import Union
20
+
21
+
22
+ class VideoOnTrack:
23
+ """
24
+ A video in the timeline.
25
+ """
26
+
27
+ @property
28
+ def end(
29
+ self
30
+ ) -> float:
31
+ """
32
+ The end time moment 't' of the video once
33
+ once its been placed on the track, which
34
+ is affected by the video duration and its
35
+ start time moment on the track.
36
+
37
+ This end is different from the video end.
38
+ """
39
+ return self.start + self.video.duration
40
+
41
+ def __init__(
42
+ self,
43
+ video: Video,
44
+ start: float = 0.0
45
+ ):
46
+ ParameterValidator.validate_mandatory_instance_of('video', video, Video)
47
+ ParameterValidator.validate_mandatory_positive_number('start', start, do_include_zero = True)
48
+
49
+ self.video: Video = video
50
+ """
51
+ The video source, with all its properties,
52
+ that is placed in the timeline.
53
+ """
54
+ self.start: float = float(start)
55
+ """
56
+ The time moment in which the video should
57
+ start playing, within the timeline.
58
+
59
+ This is the time respect to the timeline
60
+ and its different from the video `start`
61
+ time, which is related to the file.
62
+ """
63
+
64
+ def _get_video_t(
65
+ self,
66
+ t: float
67
+ ) -> float:
68
+ """
69
+ The video 't' time moment for the given
70
+ global 't' time moment. This 't' is the one
71
+ to use inside the video content to display
72
+ its frame.
73
+ """
74
+ return t - self.start
75
+
76
+ def is_playing(
77
+ self,
78
+ t: float
79
+ ) -> bool:
80
+ """
81
+ Check if this video is playing at the general
82
+ 't' time moment, which is a global time moment
83
+ for the whole project.
84
+ """
85
+ return self.start <= t < self.end
86
+
87
+ def get_frame_at(
88
+ self,
89
+ t: float
90
+ ) -> Union[VideoFrame, None]:
91
+ """
92
+ Get the frame for the 't' time moment provided,
93
+ that could be None if the video is not playing
94
+ in that moment.
95
+ """
96
+ return (
97
+ self.video.reader.get_frame_from_t(self._get_video_t(t))
98
+ if self.is_playing(t) else
99
+ None
100
+ )
@@ -575,6 +575,16 @@ class VideoReader:
575
575
  """
576
576
  return self.video_cache.get_frame(index)
577
577
 
578
+ def get_frame_from_t(
579
+ self,
580
+ t: float
581
+ ) -> 'VideoFrame':
582
+ """
583
+ Get the video frame with the given 't' time
584
+ moment, using the video cache system.
585
+ """
586
+ return self.video_cache.get_frame_from_t(t)
587
+
578
588
  # TODO: Will we use this (?)
579
589
  def get_audio_frame(
580
590
  self,
@@ -16,17 +16,22 @@ memory all those frames to be handled fast. It
16
16
  will remove the old frames if needed to use only
17
17
  the 'size' we set when creating it.
18
18
  """
19
- from yta_video_opengl.utils import t_to_pts, pts_to_t, pts_to_index
19
+ from yta_video_opengl.utils import t_to_pts, pts_to_t, pts_to_index, index_to_pts
20
+ from yta_video_frame_time import T
20
21
  from av.container import InputContainer
21
22
  from av.video.stream import VideoStream
22
23
  from av.audio.stream import AudioStream
23
24
  from av.video.frame import VideoFrame
24
25
  from av.audio.frame import AudioFrame
25
26
  from yta_validation.parameter import ParameterValidator
27
+ from yta_validation import PythonValidator
26
28
  from fractions import Fraction
27
29
  from collections import OrderedDict
28
30
  from typing import Union
29
31
 
32
+ import numpy as np
33
+ import math
34
+
30
35
 
31
36
  class VideoFrameCache:
32
37
  """
@@ -60,11 +65,11 @@ class VideoFrameCache:
60
65
  self,
61
66
  container: InputContainer,
62
67
  stream: Union[VideoStream, AudioStream],
63
- size: int = 50
68
+ size: Union[int, None] = None
64
69
  ):
65
70
  ParameterValidator.validate_mandatory_instance_of('container', container, InputContainer)
66
71
  ParameterValidator.validate_mandatory_instance_of('stream', stream, [VideoStream, AudioStream])
67
- ParameterValidator.validate_mandatory_positive_int('size', size)
72
+ ParameterValidator.validate_positive_int('size', size)
68
73
 
69
74
  self.container: InputContainer = container
70
75
  """
@@ -78,7 +83,7 @@ class VideoFrameCache:
78
83
  """
79
84
  The cache ordered dictionary.
80
85
  """
81
- self.size = size
86
+ self.size: Union[int, None] = size
82
87
  """
83
88
  The size (in number of frames) of the cache.
84
89
  """
@@ -99,6 +104,31 @@ class VideoFrameCache:
99
104
  if packet.is_keyframe:
100
105
  self.key_frames_pts.append(packet.pts)
101
106
 
107
+ # The cache size will be auto-calculated to
108
+ # use the amount of frames of the biggest
109
+ # interval of frames that belongs to a key
110
+ # frame, or a value by default
111
+ fps = (
112
+ float(self.stream.average_rate)
113
+ if PythonValidator.is_instance_of(self.stream, VideoStream) else
114
+ float(self.stream.rate)
115
+ )
116
+ # Intervals, but in number of frames
117
+ intervals = np.diff(
118
+ # Intervals of time between keyframes
119
+ np.array(self.key_frames_pts) * self.stream.time_base
120
+ ) * fps
121
+
122
+ self.size = (
123
+ math.ceil(np.max(intervals))
124
+ if intervals.size > 0 else
125
+ (
126
+ self.size or
127
+ # TODO: Make this 'default_size' a setting or something
128
+ 60
129
+ )
130
+ )
131
+
102
132
  self.container.seek(0)
103
133
 
104
134
  def _get_nearest_keyframe_fps(
@@ -117,10 +147,29 @@ class VideoFrameCache:
117
147
  if key_frame_pts <= pts
118
148
  ])
119
149
 
150
+ def _store_frame_in_cache(
151
+ self,
152
+ frame: Union[VideoFrame, AudioFrame]
153
+ ) -> Union[VideoFrame, AudioFrame]:
154
+ """
155
+ Store the provided 'frame' in cache if it
156
+ is not on it, removing the first item of
157
+ the cache if full.
158
+ """
159
+ if frame.pts not in self.cache:
160
+ # TODO: The 'format' must be dynamic
161
+ self.cache[frame.pts] = frame
162
+
163
+ # Clean cache if full
164
+ if len(self.cache) > self.size:
165
+ self.cache.popitem(last = False)
166
+
167
+ return frame
168
+
120
169
  def _get_frame_by_pts(
121
170
  self,
122
171
  pts: int
123
- ):
172
+ ) -> Union[VideoFrame, AudioFrame, None]:
124
173
  """
125
174
  Get the frame that has the provided 'pts'.
126
175
 
@@ -145,18 +194,14 @@ class VideoFrameCache:
145
194
  continue
146
195
 
147
196
  # Store in cache if needed
148
- if frame.pts not in self.cache:
149
- # TODO: The 'format' must be dynamic
150
- self.cache[frame.pts] = frame.to_ndarray(format = "rgb24")
151
-
152
- # Clean cache if full
153
- if len(self.cache) > self.size:
154
- self.cache.popitem(last = False)
197
+ self._store_frame_in_cache(frame)
155
198
 
156
199
  if frame.pts >= pts:
157
200
  decoded = self.cache[frame.pts]
158
201
  break
159
202
 
203
+ # TODO: Is this working? We need previous
204
+ # frames to be able to decode...
160
205
  return decoded
161
206
 
162
207
  def get_frame(
@@ -167,14 +212,24 @@ class VideoFrameCache:
167
212
  Get the frame with the given 'index' from
168
213
  the cache.
169
214
  """
170
- # TODO: Maybe we can accept 't' and 'pts' also
171
- target_pts = int(index / self.fps / self.time_base)
215
+ # TODO: Maybe we can accept 'pts' also
216
+ pts = index_to_pts(index, self.time_base, self.fps)
172
217
 
173
218
  return (
174
- self.cache[target_pts]
175
- if target_pts in self.cache else
176
- self._get_frame_by_pts(target_pts)
219
+ self.cache[pts]
220
+ if pts in self.cache else
221
+ self._get_frame_by_pts(pts)
177
222
  )
223
+
224
+ def get_frame_from_t(
225
+ self,
226
+ t: float
227
+ ) -> Union[VideoFrame, AudioFrame]:
228
+ """
229
+ Get the frame with the given 't' time moment
230
+ from the cache.
231
+ """
232
+ return self.get_frame(T.video_frame_time_to_video_frame_index(t, self.fps))
178
233
 
179
234
  def get_frames(
180
235
  self,
@@ -186,10 +241,22 @@ class VideoFrameCache:
186
241
  the provided 'start' and 'end' time in
187
242
  seconds.
188
243
  """
189
- # TODO: I create this method by default using
190
- # the cache. Think about how to implement it
191
- # and apply it here, please.
192
- # Go to the nearest key frame
244
+ # We use the cache as iterator if all the frames
245
+ # requested are stored there
246
+ pts_list = [
247
+ t_to_pts(t, self.time_base)
248
+ for t in T.get_frame_indexes(self.stream.duration, self.fps, start, end)
249
+ ]
250
+
251
+ if all(
252
+ pts in self.cache
253
+ for pts in pts_list
254
+ ):
255
+ for pts in pts_list:
256
+ yield self.cache[pts]
257
+
258
+ # If not all, we ignore the cache because we
259
+ # need to decode and they are all consecutive
193
260
  start = t_to_pts(start, self.time_base)
194
261
  end = (
195
262
  t_to_pts(end, self.time_base)
@@ -206,6 +273,9 @@ class VideoFrameCache:
206
273
  if frame.pts is None:
207
274
  continue
208
275
 
276
+ # We store all the frames in cache
277
+ self._store_frame_in_cache(frame)
278
+
209
279
  if frame.pts < start:
210
280
  continue
211
281
 
yta_video_opengl/tests.py CHANGED
@@ -582,6 +582,17 @@ def video_modified_stored():
582
582
  from yta_video_opengl.classes import WavingFrame, BreathingFrame, HandheldFrame, OrbitingFrame, RotatingInCenterFrame, StrangeTvFrame, GlitchRgbFrame, WavingNode
583
583
  from yta_video_opengl.utils import texture_to_frame, frame_to_texture
584
584
  from yta_video_opengl.video import Video
585
+ from yta_video_opengl.complete.timeline import Timeline
586
+
587
+ video = Video(VIDEO_PATH, 0.25, 0.75)
588
+ timeline = Timeline()
589
+ timeline.add_video(Video(VIDEO_PATH, 0.25, 0.75), 0.5)
590
+ # This is successfully raising an exception
591
+ #timeline.add_video(Video(VIDEO_PATH, 0.25, 0.75), 0.6)
592
+ timeline.add_video(Video(VIDEO_PATH, 0.25, 0.75), 1.5)
593
+ timeline.render(OUTPUT_PATH)
594
+
595
+ return
585
596
 
586
597
  Video(VIDEO_PATH, 0.25, 0.75).save_as(OUTPUT_PATH)
587
598
 
yta_video_opengl/utils.py CHANGED
@@ -332,6 +332,17 @@ def pts_to_index(
332
332
  """
333
333
  return int(round(pts_to_t(pts, stream_time_base) * fps))
334
334
 
335
+ def index_to_pts(
336
+ index: int,
337
+ stream_time_base: 'Fraction',
338
+ fps: float
339
+ ) -> int:
340
+ """
341
+ Transform a frame index into a 'pts' packet
342
+ timestamp.
343
+ """
344
+ return int(index / fps / stream_time_base)
345
+
335
346
  def pts_to_t(
336
347
  pts: int,
337
348
  stream_time_base: 'Fraction'
@@ -59,6 +59,9 @@ class VideoWriter:
59
59
  options = options
60
60
  )
61
61
 
62
+ # We need to force this or it will not work
63
+ self.video_stream.time_base = Fraction(1, int(fps))
64
+
62
65
  if size is not None:
63
66
  self.video_stream.width = size[0]
64
67
  self.video_stream.height = size[1]
@@ -190,6 +193,7 @@ class VideoWriter:
190
193
  # TODO: What strategy should we adopt with
191
194
  # the packets that cannot be handled
192
195
  # properly (?)
196
+ print('Invalid packet')
193
197
  print(packet)
194
198
  pass
195
199
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: yta-video-opengl
3
- Version: 0.0.10
3
+ Version: 0.0.11
4
4
  Summary: Youtube Autonomous Video OpenGL Module
5
5
  Author: danialcala94
6
6
  Author-email: danielalcalavalera@gmail.com
@@ -0,0 +1,20 @@
1
+ yta_video_opengl/__init__.py,sha256=ycAx_XYMVDfkuObSvtW6irQ0Wo-fgxEz3fjIRMe8PpY,205
2
+ yta_video_opengl/classes.py,sha256=t5-Tfc7ecvHl8JlVBp_FVzZT6ole6Ly5-FeBBH7wcxo,37742
3
+ yta_video_opengl/complete/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
4
+ yta_video_opengl/complete/timeline.py,sha256=o6nAtpkdr6RzYlIEB9kLfNAZiu9eRXyA6QE3TXuNL_I,5465
5
+ yta_video_opengl/complete/track.py,sha256=gPushEuLYIoMBjvS3wTDbOKSV8VEtVTqrEm666ABFUc,3937
6
+ yta_video_opengl/complete/video_on_track.py,sha256=sTSd8gqNwK1wT1uzJEGkJqTbu5qM3wWqk361K7OAbl0,2851
7
+ yta_video_opengl/nodes/__init__.py,sha256=TZ-ZO05PZ0_ABq675E22_PngLWOe-_w5s1cLlV3NbWM,3469
8
+ yta_video_opengl/nodes/audio/__init__.py,sha256=4nKkC70k1UgLcCSPqFWm3cKdaJM0KUmQTwGWv1xFarQ,2926
9
+ yta_video_opengl/nodes/video/__init__.py,sha256=gSoaoEmjdQmyRwH18mf5z3NAhap3S0RgbeBbfBXi4jc,132
10
+ yta_video_opengl/nodes/video/opengl.py,sha256=K2pyCJEd9z4gnZqJetKyGPbtHuBzFsx74ZYyzhSqYPo,8510
11
+ yta_video_opengl/reader/__init__.py,sha256=zqVMMUjWHCsOaWFrnkYdnbO4YEAx3zLIBTuMnzQJRug,17180
12
+ yta_video_opengl/reader/cache.py,sha256=Gtwl2kRLSJn3JJOVI-45Wc2vMPYj5-_g3AR5wPbw8OY,9152
13
+ yta_video_opengl/tests.py,sha256=p2Pq4o2H0DMZkV7HNNNAlebSjrDMHKTKk0d_weiiPHQ,26221
14
+ yta_video_opengl/utils.py,sha256=_89-IrTDFVbY86qLlJzw42MxYAsNAnux_DOO89mul64,10710
15
+ yta_video_opengl/video.py,sha256=3n7jgZab7PUSOpODoaH4iNg0sy7NMRo_OaJ4Zj8u0NM,5855
16
+ yta_video_opengl/writer.py,sha256=rzOfxPtlClLRIVLFyIOoBVcEgse3ISmpKRWHg8OwFHQ,8355
17
+ yta_video_opengl-0.0.11.dist-info/LICENSE,sha256=6kbiFSfobTZ7beWiKnHpN902HgBx-Jzgcme0SvKqhKY,1091
18
+ yta_video_opengl-0.0.11.dist-info/METADATA,sha256=8JNEZ05Elie6lMQREjRjGPQQ-kLW-GCW6uZpePTiLgY,671
19
+ yta_video_opengl-0.0.11.dist-info/WHEEL,sha256=XbeZDeTWKc1w7CSIyre5aMDU_-PohRwTQceYnisIYYY,88
20
+ yta_video_opengl-0.0.11.dist-info/RECORD,,
@@ -1,16 +0,0 @@
1
- yta_video_opengl/__init__.py,sha256=ycAx_XYMVDfkuObSvtW6irQ0Wo-fgxEz3fjIRMe8PpY,205
2
- yta_video_opengl/classes.py,sha256=t5-Tfc7ecvHl8JlVBp_FVzZT6ole6Ly5-FeBBH7wcxo,37742
3
- yta_video_opengl/nodes/__init__.py,sha256=TZ-ZO05PZ0_ABq675E22_PngLWOe-_w5s1cLlV3NbWM,3469
4
- yta_video_opengl/nodes/audio/__init__.py,sha256=4nKkC70k1UgLcCSPqFWm3cKdaJM0KUmQTwGWv1xFarQ,2926
5
- yta_video_opengl/nodes/video/__init__.py,sha256=gSoaoEmjdQmyRwH18mf5z3NAhap3S0RgbeBbfBXi4jc,132
6
- yta_video_opengl/nodes/video/opengl.py,sha256=K2pyCJEd9z4gnZqJetKyGPbtHuBzFsx74ZYyzhSqYPo,8510
7
- yta_video_opengl/reader/__init__.py,sha256=rAWISZ7OzDnzar0At-LCfDA-MmWzax2jT2l5gySv4aw,16911
8
- yta_video_opengl/reader/cache.py,sha256=UKhZvgY80ySuOYH52ikco6affsm8bjP656EroVR9Utg,6960
9
- yta_video_opengl/tests.py,sha256=NZ-W1ak-ygwL9wATzEXtlCeCZX74ij_TZhktetMnOD4,25810
10
- yta_video_opengl/utils.py,sha256=y0N1mS9FjpB4nFnx00K7sIs5EsqMkTe8C0bzLXZe9YM,10479
11
- yta_video_opengl/video.py,sha256=3n7jgZab7PUSOpODoaH4iNg0sy7NMRo_OaJ4Zj8u0NM,5855
12
- yta_video_opengl/writer.py,sha256=7xglz8xHOXMtWkctzuB21Y-e9xWFXYcklt3jVUN4svQ,8198
13
- yta_video_opengl-0.0.10.dist-info/LICENSE,sha256=6kbiFSfobTZ7beWiKnHpN902HgBx-Jzgcme0SvKqhKY,1091
14
- yta_video_opengl-0.0.10.dist-info/METADATA,sha256=YZ81GUO5J78iri9e_GdjUXafQqjEyohcVjwAuknXGhU,671
15
- yta_video_opengl-0.0.10.dist-info/WHEEL,sha256=XbeZDeTWKc1w7CSIyre5aMDU_-PohRwTQceYnisIYYY,88
16
- yta_video_opengl-0.0.10.dist-info/RECORD,,