yta-video-opengl 0.0.21__tar.gz → 0.0.22__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (31) hide show
  1. {yta_video_opengl-0.0.21 → yta_video_opengl-0.0.22}/PKG-INFO +2 -1
  2. {yta_video_opengl-0.0.21 → yta_video_opengl-0.0.22}/pyproject.toml +5 -1
  3. {yta_video_opengl-0.0.21 → yta_video_opengl-0.0.22}/src/yta_video_opengl/complete/frame_generator.py +2 -1
  4. yta_video_opengl-0.0.22/src/yta_video_opengl/media.py +347 -0
  5. {yta_video_opengl-0.0.21 → yta_video_opengl-0.0.22}/src/yta_video_opengl/tests.py +6 -1
  6. {yta_video_opengl-0.0.21 → yta_video_opengl-0.0.22}/LICENSE +0 -0
  7. {yta_video_opengl-0.0.21 → yta_video_opengl-0.0.22}/README.md +0 -0
  8. {yta_video_opengl-0.0.21 → yta_video_opengl-0.0.22}/src/yta_video_opengl/__init__.py +0 -0
  9. {yta_video_opengl-0.0.21 → yta_video_opengl-0.0.22}/src/yta_video_opengl/audio.py +0 -0
  10. {yta_video_opengl-0.0.21 → yta_video_opengl-0.0.22}/src/yta_video_opengl/classes.py +0 -0
  11. {yta_video_opengl-0.0.21 → yta_video_opengl-0.0.22}/src/yta_video_opengl/complete/__init__.py +0 -0
  12. {yta_video_opengl-0.0.21 → yta_video_opengl-0.0.22}/src/yta_video_opengl/complete/frame_combinator.py +0 -0
  13. {yta_video_opengl-0.0.21 → yta_video_opengl-0.0.22}/src/yta_video_opengl/complete/frame_wrapper.py +0 -0
  14. {yta_video_opengl-0.0.21 → yta_video_opengl-0.0.22}/src/yta_video_opengl/complete/timeline.py +0 -0
  15. {yta_video_opengl-0.0.21 → yta_video_opengl-0.0.22}/src/yta_video_opengl/complete/track/__init__.py +0 -0
  16. {yta_video_opengl-0.0.21 → yta_video_opengl-0.0.22}/src/yta_video_opengl/complete/track/media/__init__.py +0 -0
  17. {yta_video_opengl-0.0.21 → yta_video_opengl-0.0.22}/src/yta_video_opengl/complete/track/parts.py +0 -0
  18. {yta_video_opengl-0.0.21 → yta_video_opengl-0.0.22}/src/yta_video_opengl/complete/track/utils.py +0 -0
  19. {yta_video_opengl-0.0.21 → yta_video_opengl-0.0.22}/src/yta_video_opengl/nodes/__init__.py +0 -0
  20. {yta_video_opengl-0.0.21 → yta_video_opengl-0.0.22}/src/yta_video_opengl/nodes/audio/__init__.py +0 -0
  21. {yta_video_opengl-0.0.21 → yta_video_opengl-0.0.22}/src/yta_video_opengl/nodes/video/__init__.py +0 -0
  22. {yta_video_opengl-0.0.21 → yta_video_opengl-0.0.22}/src/yta_video_opengl/nodes/video/opengl.py +0 -0
  23. {yta_video_opengl-0.0.21 → yta_video_opengl-0.0.22}/src/yta_video_opengl/reader/__init__.py +0 -0
  24. {yta_video_opengl-0.0.21 → yta_video_opengl-0.0.22}/src/yta_video_opengl/reader/cache/__init__.py +0 -0
  25. {yta_video_opengl-0.0.21 → yta_video_opengl-0.0.22}/src/yta_video_opengl/reader/cache/audio.py +0 -0
  26. {yta_video_opengl-0.0.21 → yta_video_opengl-0.0.22}/src/yta_video_opengl/reader/cache/utils.py +0 -0
  27. {yta_video_opengl-0.0.21 → yta_video_opengl-0.0.22}/src/yta_video_opengl/reader/cache/video.py +0 -0
  28. {yta_video_opengl-0.0.21 → yta_video_opengl-0.0.22}/src/yta_video_opengl/t.py +0 -0
  29. {yta_video_opengl-0.0.21 → yta_video_opengl-0.0.22}/src/yta_video_opengl/utils.py +0 -0
  30. {yta_video_opengl-0.0.21 → yta_video_opengl-0.0.22}/src/yta_video_opengl/video.py +0 -0
  31. {yta_video_opengl-0.0.21 → yta_video_opengl-0.0.22}/src/yta_video_opengl/writer.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: yta-video-opengl
3
- Version: 0.0.21
3
+ Version: 0.0.22
4
4
  Summary: Youtube Autonomous Video OpenGL Module
5
5
  Author: danialcala94
6
6
  Author-email: danielalcalavalera@gmail.com
@@ -10,6 +10,7 @@ Classifier: Programming Language :: Python :: 3.9
10
10
  Requires-Dist: av (>=0.0.1,<19.0.0)
11
11
  Requires-Dist: moderngl (>=0.0.1,<9.0.0)
12
12
  Requires-Dist: numpy (>=0.0.1,<9.0.0)
13
+ Requires-Dist: pillow (>=0.0.1,<99.0.0)
13
14
  Requires-Dist: quicktions (>=0.0.1,<9.0.0)
14
15
  Requires-Dist: yta_timer (>0.0.1,<1.0.0)
15
16
  Requires-Dist: yta_validation (>=0.0.1,<1.0.0)
@@ -1,6 +1,6 @@
1
1
  [project]
2
2
  name = "yta-video-opengl"
3
- version = "0.0.21"
3
+ version = "0.0.22"
4
4
  description = "Youtube Autonomous Video OpenGL Module"
5
5
  authors = [
6
6
  {name = "danialcala94",email = "danielalcalavalera@gmail.com"}
@@ -15,6 +15,10 @@ dependencies = [
15
15
  "moderngl (>=0.0.1,<9.0.0)",
16
16
  "numpy (>=0.0.1,<9.0.0)",
17
17
  "quicktions (>=0.0.1,<9.0.0)",
18
+ # I need this 'pillow' just to read an image
19
+ # and transform into a numpy array to be able
20
+ # to handle the ImageMedia
21
+ "pillow (>=0.0.1,<99.0.0)"
18
22
  ]
19
23
 
20
24
  [tool.poetry]
@@ -60,8 +60,9 @@ class _FrameGenerator:
60
60
  """
61
61
  # TODO: I think 'ones' only work if dtype
62
62
  # is int
63
- return np.ones(
63
+ return np.full(
64
64
  shape = (size[1], size[0], 3),
65
+ fill_value = (255, 255, 255),
65
66
  dtype = dtype
66
67
  )
67
68
 
@@ -0,0 +1,347 @@
1
+
2
+ """
3
+ The pyav uses Pillow to load an image as
4
+ a numpy array but using not the alpha.
5
+ """
6
+ from yta_video_opengl.complete.frame_generator import VideoFrameGenerator
7
+ from yta_video_opengl.writer import VideoWriter
8
+ from yta_video_opengl.t import get_ts
9
+ from av.video.frame import VideoFrame
10
+ from yta_validation.parameter import ParameterValidator
11
+ from yta_validation import PythonValidator
12
+ from quicktions import Fraction
13
+ from PIL import Image
14
+ from abc import ABC, abstractmethod
15
+ from typing import Union
16
+
17
+ import numpy as np
18
+
19
+
20
+ class _Media(ABC):
21
+ """
22
+ Class to represent an element that can be
23
+ used in our video editor, that can be a
24
+ video, an audio, an image, a color frame,
25
+ etc.
26
+ """
27
+
28
+ @property
29
+ def duration(
30
+ self
31
+ ) -> Fraction:
32
+ """
33
+ The duration of the media.
34
+ """
35
+ return self.end - self.start
36
+
37
+ def __init__(
38
+ self,
39
+ start: Union[int, float, Fraction],
40
+ end: Union[int, float, Fraction]
41
+ ):
42
+ self.start: Fraction = Fraction(start)
43
+ """
44
+ The time moment 't' in which the media
45
+ should start.
46
+ """
47
+ self.end: Fraction = Fraction(end)
48
+ """
49
+ The time moment 't' in which the media
50
+ should end.
51
+ """
52
+
53
+ def _get_t(
54
+ self,
55
+ t: Union[int, float, Fraction]
56
+ ) -> Fraction:
57
+ """
58
+ Get the real 't' time moment based on the
59
+ media 'start' and 'end'. If they were
60
+ asking for the t=0.5s but our media was
61
+ subclipped to [1.0, 2.0), the 0.5s must be
62
+ actually the 1.5s of the media because of
63
+ the subclipped time range.
64
+
65
+ This method will raise an exception if the
66
+ 't' time moment provided, when adjusted to
67
+ the internal media start and end time, is
68
+ not valid (is after the end).
69
+ """
70
+ t += self.start
71
+
72
+ if t >= self.end:
73
+ raise Exception(f'The "t" ({str(t)}) provided is out of range. This media lasts from [{str(self.start)}, {str(self.end)}).')
74
+
75
+ return t
76
+
77
+ class _CanBeSavedAsVideo:
78
+ """
79
+ Class to implement the functionality of
80
+ being written into a video file, frame
81
+ by frame.
82
+ """
83
+
84
+ def save_as(
85
+ self,
86
+ output_filename: str,
87
+ fps: Union[int, float, Fraction] = 60.0,
88
+ video_codec: str = 'h264',
89
+ video_pixel_format: str = 'yuv420p'
90
+ ) -> '_CanBeSavedAsVideo':
91
+ """
92
+ Save the media as a video to the
93
+ given 'output_filename' file.
94
+ """
95
+ writer = VideoWriter(output_filename)
96
+
97
+ fps = int(fps)
98
+
99
+ # TODO: This has to be dynamic according to the
100
+ # video we are writing (?)
101
+ writer.set_video_stream(
102
+ codec_name = video_codec,
103
+ fps = fps,
104
+ size = self.size,
105
+ pixel_format = video_pixel_format
106
+ )
107
+
108
+ for index, t in enumerate(get_ts(self.start, self.end, fps)):
109
+ frame = self.get_frame_from_t(t)
110
+ frame.pts = index
111
+ frame.time_base = Fraction(1, fps)
112
+
113
+ writer.mux_video_frame(
114
+ frame = frame
115
+ )
116
+
117
+ writer.mux_video_frame(None)
118
+ writer.output.close()
119
+
120
+ return self
121
+
122
+ class _HasVideoFrame(ABC):
123
+ """
124
+ Class to be inherited by all those classes
125
+ that have video frames.
126
+ """
127
+
128
+ # TODO: Maybe force
129
+ # size: tuple[int, int] = (1920, 1080),
130
+ # dtype: dtype = np.uint8,
131
+ # format: str = 'rgb24'
132
+ # on __init__ (?)
133
+
134
+ @abstractmethod
135
+ def get_frame_from_t(
136
+ self,
137
+ t: Union[int, float, Fraction]
138
+ ) -> 'VideoFrame':
139
+ """
140
+ Get the video frame with the given 't' time
141
+ moment.
142
+ """
143
+ # TODO: Maybe
144
+ pass
145
+
146
+ class _HasAudioFrame(ABC):
147
+ """
148
+ Class to be inherited by all those classes
149
+ that have audio frames.
150
+ """
151
+
152
+ @abstractmethod
153
+ def get_audio_frames_from_t(
154
+ self,
155
+ t: Union[int, float, Fraction]
156
+ ):
157
+ """
158
+ Get the sequence of audio frames for the
159
+ given video 't' time moment, using the
160
+ audio cache system.
161
+
162
+ This is useful when we want to write a
163
+ video frame with its audio, so we obtain
164
+ all the audio frames associated to it
165
+ (remember that a video frame is associated
166
+ with more than 1 audio frame).
167
+ """
168
+ pass
169
+
170
+ class _StaticMedia(_Media, _HasVideoFrame, _CanBeSavedAsVideo):
171
+ """
172
+ Class to represent a media that doesn't
173
+ change during its whole duration and
174
+ the frame is the same, it remains static.
175
+
176
+ This class must be implemented by our
177
+ ImageMedia and ColorMedia as the frame
178
+ will be always the same, an image or
179
+ a color frame.
180
+ """
181
+
182
+ @property
183
+ @abstractmethod
184
+ def frame(
185
+ self
186
+ ) -> VideoFrame:
187
+ """
188
+ The frame that must be displayed.
189
+ """
190
+ pass
191
+
192
+ def __init__(
193
+ self,
194
+ start: Union[int, float, Fraction],
195
+ end: Union[int, float, Fraction]
196
+ ):
197
+ _Media.__init__(
198
+ self,
199
+ start = start,
200
+ end = end
201
+ )
202
+
203
+ def get_frame_from_t(
204
+ self,
205
+ t: Union[int, float, Fraction]
206
+ ) -> VideoFrame:
207
+ """
208
+ Get the video frame with the given 't' time
209
+ moment.
210
+ """
211
+ # This will raise an exception if invalid 't'
212
+ # when adapted to its internal 'start' and 'end'
213
+ t = self._get_t(t)
214
+
215
+ # TODO: What if we need to resize the source
216
+ # before putting it into a video frame (?)
217
+
218
+ return self.frame
219
+
220
+ class ColorMedia(_StaticMedia):
221
+ """
222
+ A media that will be a single color video
223
+ frame during its whole duration.
224
+ """
225
+
226
+ @property
227
+ def frame(
228
+ self
229
+ ) -> VideoFrame:
230
+ """
231
+ The frame that must be displayed.
232
+ """
233
+ # TODO: Make this use the 'color'
234
+ # TODO: I need to implement the alpha layer
235
+ return VideoFrameGenerator().background.full_white(
236
+ size = self.size
237
+ )
238
+
239
+ def __init__(
240
+ self,
241
+ # TODO: Apply format
242
+ color: any,
243
+ start: Union[int, float, Fraction],
244
+ end: Union[int, float, Fraction],
245
+ size: tuple[int, int] = (1920, 1080),
246
+ # TODO: Do I need this (?)
247
+ # dtype: dtype = np.uint8,
248
+ # format: str = 'rgb24',
249
+ ):
250
+ # TODO: Apply format
251
+ self._color: any = color
252
+ """
253
+ The color that will be used to make the
254
+ frame that will be played its whole
255
+ duration.
256
+ """
257
+ self.size: tuple[int, int] = size
258
+ """
259
+ The size of the media frame.
260
+ """
261
+
262
+ super().__init__(
263
+ start = start,
264
+ end = end
265
+ )
266
+
267
+ class ImageMedia(_StaticMedia):
268
+ """
269
+ A media that will be a single image
270
+ during its whole duration.
271
+ """
272
+
273
+ @property
274
+ def frame(
275
+ self
276
+ ) -> VideoFrame:
277
+ """
278
+ The frame that must be displayed.
279
+ """
280
+ # By default we use it accepting transparency
281
+ # TODO: The image must be like this:
282
+ # arr = np.array(img) # shape (h, w, 4), dtype=uint8
283
+ # TODO: What value if no alpha (?)
284
+ return VideoFrame.from_ndarray(self._image, format = 'rgba')
285
+
286
+ def __init__(
287
+ self,
288
+ # TODO: It must be RGB
289
+ image: Union[np.ndarray, str],
290
+ start: Union[int, float, Fraction],
291
+ end: Union[int, float, Fraction],
292
+ size: tuple[int, int] = (1920, 1080),
293
+ do_include_alpha: bool = True,
294
+ # TODO: Do I need this (?)
295
+ # dtype: dtype = np.uint8,
296
+ # TODO: Maybe this 'format' need to be
297
+ # dynamic according to if we are reading
298
+ # the alpha channel or not...
299
+ # format: str = 'rgb24',
300
+ ):
301
+ ParameterValidator.validate_mandatory_instance_of('image', image, [str, np.ndarray])
302
+ ParameterValidator.validate_mandatory_positive_number('start', start, do_include_zero = True)
303
+ ParameterValidator.validate_mandatory_positive_number('end', end, do_include_zero = False)
304
+ ParameterValidator.validate_mandatory_bool('do_include_alpha', do_include_alpha)
305
+
306
+ image = (
307
+ image_to_numpy_pillow(image, do_include_alpha = do_include_alpha)
308
+ if PythonValidator.is_string(image) else
309
+ image
310
+ )
311
+
312
+ self._image: np.ndarray = image
313
+ """
314
+ The image that will be used to make the
315
+ frame that will be played its whole
316
+ duration.
317
+ """
318
+ self.size: tuple[int, int] = size
319
+ """
320
+ The size of the media frame.
321
+ """
322
+
323
+ super().__init__(
324
+ start = start,
325
+ end = end
326
+ )
327
+
328
+ # TODO: Need to implement Video as the others
329
+ # are implemented here
330
+
331
+ # TODO: I think I have this util in another
332
+ # library, so please check it...
333
+ def image_to_numpy_pillow(
334
+ filename: str,
335
+ do_include_alpha: bool = True
336
+ ) -> 'np.ndarray':
337
+ """
338
+ Read the imagen file 'filename' and transform
339
+ it into a numpy, reading also the alpha channel.
340
+ """
341
+ mode = (
342
+ 'RGBA'
343
+ if do_include_alpha else
344
+ 'RGB'
345
+ )
346
+
347
+ return np.array(Image.open(filename).convert(mode))
@@ -622,8 +622,13 @@ def video_modified_stored():
622
622
 
623
623
  # timeline.render('test_files/concatenated.mp4')
624
624
 
625
+ from yta_video_opengl.media import ImageMedia, ColorMedia
625
626
 
626
- # return
627
+ image_media = ImageMedia('C:/Users/dania/Desktop/PROYECTOS/RECURSOS/mobile_alpha.png', 0, 1).save_as('test_files/test_image.mp4')
628
+
629
+ color_media = ColorMedia('random', 0, 1).save_as('test_files/test_color.mp4')
630
+
631
+ return
627
632
 
628
633
  # TODO: This test will add videos that
629
634
  # must be played at the same time