yta-video-opengl 0.0.9__tar.gz → 0.0.11__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (20) hide show
  1. {yta_video_opengl-0.0.9 → yta_video_opengl-0.0.11}/PKG-INFO +1 -1
  2. {yta_video_opengl-0.0.9 → yta_video_opengl-0.0.11}/pyproject.toml +1 -1
  3. {yta_video_opengl-0.0.9 → yta_video_opengl-0.0.11}/src/yta_video_opengl/classes.py +1 -0
  4. yta_video_opengl-0.0.11/src/yta_video_opengl/complete/__init__.py +0 -0
  5. yta_video_opengl-0.0.11/src/yta_video_opengl/complete/timeline.py +179 -0
  6. yta_video_opengl-0.0.11/src/yta_video_opengl/complete/track.py +136 -0
  7. yta_video_opengl-0.0.11/src/yta_video_opengl/complete/video_on_track.py +100 -0
  8. yta_video_opengl-0.0.11/src/yta_video_opengl/nodes/__init__.py +119 -0
  9. yta_video_opengl-0.0.11/src/yta_video_opengl/nodes/audio/__init__.py +115 -0
  10. yta_video_opengl-0.0.11/src/yta_video_opengl/nodes/video/__init__.py +5 -0
  11. yta_video_opengl-0.0.11/src/yta_video_opengl/nodes/video/opengl.py +309 -0
  12. {yta_video_opengl-0.0.9 → yta_video_opengl-0.0.11}/src/yta_video_opengl/reader/__init__.py +10 -0
  13. {yta_video_opengl-0.0.9 → yta_video_opengl-0.0.11}/src/yta_video_opengl/reader/cache.py +91 -21
  14. {yta_video_opengl-0.0.9 → yta_video_opengl-0.0.11}/src/yta_video_opengl/tests.py +11 -0
  15. {yta_video_opengl-0.0.9 → yta_video_opengl-0.0.11}/src/yta_video_opengl/utils.py +11 -0
  16. {yta_video_opengl-0.0.9 → yta_video_opengl-0.0.11}/src/yta_video_opengl/video.py +44 -15
  17. {yta_video_opengl-0.0.9 → yta_video_opengl-0.0.11}/src/yta_video_opengl/writer.py +4 -0
  18. {yta_video_opengl-0.0.9 → yta_video_opengl-0.0.11}/LICENSE +0 -0
  19. {yta_video_opengl-0.0.9 → yta_video_opengl-0.0.11}/README.md +0 -0
  20. {yta_video_opengl-0.0.9 → yta_video_opengl-0.0.11}/src/yta_video_opengl/__init__.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: yta-video-opengl
3
- Version: 0.0.9
3
+ Version: 0.0.11
4
4
  Summary: Youtube Autonomous Video OpenGL Module
5
5
  Author: danialcala94
6
6
  Author-email: danielalcalavalera@gmail.com
@@ -1,6 +1,6 @@
1
1
  [project]
2
2
  name = "yta-video-opengl"
3
- version = "0.0.9"
3
+ version = "0.0.11"
4
4
  description = "Youtube Autonomous Video OpenGL Module"
5
5
  authors = [
6
6
  {name = "danialcala94",email = "danielalcalavalera@gmail.com"}
@@ -132,6 +132,7 @@ class _Uniforms:
132
132
  for key, value in self.uniforms.items():
133
133
  print(f'"{key}": {str(value)}')
134
134
 
135
+ # TODO: Moved to 'nodes.opengl.py'
135
136
  class BaseNode:
136
137
  """
137
138
  The basic class of a node to manipulate frames
@@ -0,0 +1,179 @@
1
+ from yta_video_opengl.complete.track import Track
2
+ from yta_video_opengl.video import Video
3
+ from yta_validation.parameter import ParameterValidator
4
+ from typing import Union
5
+ from fractions import Fraction
6
+
7
+ import numpy as np
8
+ import av
9
+
10
+
11
+ class Timeline:
12
+ """
13
+ Class to represent all the tracks that
14
+ exist on the project and to handle the
15
+ combination of all their frames.
16
+ """
17
+
18
+ @property
19
+ def end(
20
+ self
21
+ ) -> float:
22
+ """
23
+ The end of the last video of the track
24
+ that lasts longer. This is the last time
25
+ moment that has to be rendered.
26
+ """
27
+ return max(track.end for track in self.tracks)
28
+
29
+ def __init__(
30
+ self,
31
+ size: tuple[int, int] = (1920, 1080),
32
+ fps: float = 60.0
33
+ ):
34
+ # TODO: By now we are using just two video
35
+ # tracks to test the composition
36
+ # TODO: We need to be careful with the
37
+ # priority, by now its defined by its
38
+ # position in the array
39
+ self.tracks: list[Track] = [Track(), Track()]
40
+ """
41
+ All the video tracks we are handling.
42
+ """
43
+ # TODO: Handle size and fps
44
+ self.size = size
45
+ self.fps = fps
46
+
47
+ # TODO: Create 'add_track' method, but by now
48
+ # we hare handling only one
49
+ def add_video(
50
+ self,
51
+ video: Video,
52
+ t: float,
53
+ # TODO: This is for testing, it has to
54
+ # disappear
55
+ do_use_second_track: bool = False
56
+ ) -> 'Timeline':
57
+ """
58
+ Add the provided 'video' to the timeline,
59
+ starting at the provided 't' time moment.
60
+
61
+ TODO: The 'do_use_second_track' parameter
62
+ is temporary.
63
+ """
64
+ index = 1 * do_use_second_track
65
+
66
+ self.tracks[index].add_video(video, t)
67
+
68
+ return self
69
+
70
+ # TODO: This method is not for the Track but
71
+ # for the timeline, as one track can only
72
+ # have consecutive elements
73
+ def get_frame_at(
74
+ self,
75
+ t: float
76
+ ) -> Union['VideoFrame', None]:
77
+ """
78
+ Get all the frames that are played at the
79
+ 't' time provided, but combined in one.
80
+ """
81
+ frames = (
82
+ track.get_frame_at(t)
83
+ for track in self.tracks
84
+ )
85
+
86
+ frames = [
87
+ frame
88
+ for frame in frames
89
+ if frame is not None
90
+ ]
91
+
92
+ return (
93
+ # TODO: Combinate them, I send first by now
94
+ frames[0]
95
+ if len(frames) > 0 else
96
+ # TODO: Should I send None or a full
97
+ # black (or transparent) frame? I think
98
+ # None is better because I don't know
99
+ # the size here (?)
100
+ None
101
+ )
102
+
103
+ def render(
104
+ self,
105
+ filename: str,
106
+ start: float = 0.0,
107
+ end: Union[float, None] = None
108
+ ) -> 'Timeline':
109
+ """
110
+ Render the time range in between the given
111
+ 'start' and 'end' and store the result with
112
+ the also provided 'fillename'.
113
+
114
+ If no 'start' and 'end' provided, the whole
115
+ project will be rendered.
116
+ """
117
+ ParameterValidator.validate_mandatory_string('filename', filename, do_accept_empty = False)
118
+ ParameterValidator.validate_mandatory_positive_number('start', start, do_include_zero = True)
119
+ ParameterValidator.validate_positive_number('end', end, do_include_zero = False)
120
+
121
+ # TODO: Limitate 'end' a bit...
122
+ end = (
123
+ self.end
124
+ if end is None else
125
+ end
126
+ )
127
+
128
+ if start >= end:
129
+ raise Exception('The provided "start" cannot be greater or equal to the "end" provided.')
130
+ # TODO: Obtain all the 't', based on 'fps'
131
+ # that we need to render from 'start' to
132
+ # 'end'
133
+ # TODO: I don't want to have this here
134
+ def generate_times(start: float, end: float, fps: int):
135
+ dt = 1.0 / fps
136
+ times = []
137
+
138
+ t = start
139
+ while t <= end:
140
+ times.append(t + 0.000001)
141
+ t += dt
142
+
143
+ return times
144
+
145
+ from yta_video_opengl.writer import VideoWriter
146
+
147
+ writer = VideoWriter('test_files/output_render.mp4')
148
+ # TODO: This has to be dynamic according to the
149
+ # video we are writing
150
+ writer.set_video_stream(
151
+ codec_name = 'h264',
152
+ fps = 60,
153
+ size = (1920, 1080),
154
+ pixel_format = 'yuv420p'
155
+ )
156
+
157
+ for t in generate_times(start, end, self.fps):
158
+ frame = self.get_frame_at(t)
159
+
160
+ if frame is None:
161
+ # Replace with black background if no frame
162
+ frame = av.VideoFrame.from_ndarray(
163
+ array = np.zeros((1920, 1080, 3), dtype = np.uint8),
164
+ format = 'rgb24'
165
+ )
166
+
167
+ # We need to adjust our output elements to be
168
+ # consecutive and with the right values
169
+ # TODO: We are using int() for fps but its float...
170
+ frame.time_base = Fraction(1, int(self.fps))
171
+ frame.pts = int(t / frame.time_base)
172
+
173
+ # TODO: We need to handle the audio
174
+ writer.mux_video_frame(
175
+ frame = frame
176
+ )
177
+
178
+ writer.mux_video_frame(None)
179
+ writer.output.close()
@@ -0,0 +1,136 @@
1
+ from yta_video_opengl.complete.video_on_track import VideoOnTrack
2
+ from yta_video_opengl.video import Video
3
+ from yta_validation.parameter import ParameterValidator
4
+ from typing import Union
5
+
6
+
7
+ # TODO: This is called Track but it is
8
+ # handling videos only. Should I have
9
+ # VideoTrack and AudioTrack (?)
10
+ class Track:
11
+ """
12
+ Class to represent a track in which we place
13
+ videos, images and audio to build a video
14
+ project.
15
+ """
16
+
17
+ @property
18
+ def end(
19
+ self
20
+ ) -> float:
21
+ """
22
+ The end of the last video of this track,
23
+ which is also the end of the track. This
24
+ is the last time moment that has to be
25
+ rendered.
26
+ """
27
+ return (
28
+ 0.0
29
+ if len(self.videos) == 0 else
30
+ max(
31
+ video.end
32
+ for video in self.videos
33
+ )
34
+ )
35
+
36
+ def __init__(
37
+ self
38
+ ):
39
+ self.videos: list[VideoOnTrack] = []
40
+ """
41
+ The list of 'VideoOnTrack' instances that
42
+ must play on this track.
43
+ """
44
+
45
+ def _is_free(
46
+ self,
47
+ start: float,
48
+ end: float
49
+ ) -> bool:
50
+ """
51
+ Check if the time range in between the
52
+ 'start' and 'end' time given is free or
53
+ there is some video playing at any moment.
54
+ """
55
+ return not any(
56
+ (
57
+ video.video.start < end and
58
+ video.video.end > start
59
+ )
60
+ for video in self.videos
61
+ )
62
+
63
+ def _get_video_at_t(
64
+ self,
65
+ t: float
66
+ ) -> Union[VideoOnTrack, None]:
67
+ """
68
+ Get the video that is being played at
69
+ the 't' time moment provided.
70
+ """
71
+ for video in self.videos:
72
+ if video.start <= t < video.end:
73
+ return video
74
+
75
+ return None
76
+
77
+ def get_frame_at(
78
+ self,
79
+ t: float
80
+ ) -> Union['VideoFrame', None]:
81
+ """
82
+ Get the frame that must be displayed at
83
+ the 't' time moment provided, which is
84
+ a frame from the video that is being
85
+ played at that time moment.
86
+
87
+ Remember, this 't' time moment provided
88
+ is about the track, and we make the
89
+ conversion to the actual video 't' to
90
+ get the frame.
91
+ """
92
+ video = self._get_video_at_t(t)
93
+
94
+ return (
95
+ video.get_frame_at(t)
96
+ if video is not None else
97
+ None
98
+ )
99
+
100
+ def add_video(
101
+ self,
102
+ video: Video,
103
+ t: Union[float, None] = None
104
+ ) -> 'Track':
105
+ """
106
+ Add the 'video' provided to the track. If
107
+ a 't' time moment is provided, the video
108
+ will be added to that time moment if
109
+ possible. If there is no other video
110
+ placed in the time gap between the given
111
+ 't' and the provided 'video' duration, it
112
+ will be added succesfully. In the other
113
+ case, an exception will be raised.
114
+
115
+ If 't' is None, the first available 't'
116
+ time moment will be used, that will be 0.0
117
+ if no video, or the end of the last video.
118
+ """
119
+ ParameterValidator.validate_mandatory_instance_of('video', video, Video)
120
+ ParameterValidator.validate_positive_float('t', t, do_include_zero = True)
121
+
122
+ if t is not None:
123
+ # TODO: We can have many different strategies
124
+ # that we could define in the '__init__' maybe
125
+ if not self._is_free(t, (t + video.end)):
126
+ raise Exception('The video cannot be added at the "t" time moment, something blocks it.')
127
+ else:
128
+ t = self.end
129
+
130
+ self.videos.append(VideoOnTrack(
131
+ video,
132
+ t
133
+ ))
134
+
135
+ # TODO: Maybe return the VideoOnTrack instead (?)
136
+ return self
@@ -0,0 +1,100 @@
1
+ """
2
+ If we have a video placed in a timeline,
3
+ starting at the t=2s and the video lasts
4
+ 2 seconds, the `t` time range in which the
5
+ video is playing is `[2s, 4s]`, so here
6
+ you have some examples with global `t`
7
+ values:
8
+ - `t=1`, the video is not playing because
9
+ it starts at `t=2`
10
+ - `t=3`, the video is playing, it started
11
+ at `t=2` and it has been playing during 1s
12
+ - `t=5`, the video is not playing because
13
+ it started at `t=2`, lasting 2s, so it
14
+ finished at `t=4`
15
+ """
16
+ from yta_video_opengl.video import Video
17
+ from yta_validation.parameter import ParameterValidator
18
+ from av.video.frame import VideoFrame
19
+ from typing import Union
20
+
21
+
22
+ class VideoOnTrack:
23
+ """
24
+ A video in the timeline.
25
+ """
26
+
27
+ @property
28
+ def end(
29
+ self
30
+ ) -> float:
31
+ """
32
+ The end time moment 't' of the video once
33
+ once its been placed on the track, which
34
+ is affected by the video duration and its
35
+ start time moment on the track.
36
+
37
+ This end is different from the video end.
38
+ """
39
+ return self.start + self.video.duration
40
+
41
+ def __init__(
42
+ self,
43
+ video: Video,
44
+ start: float = 0.0
45
+ ):
46
+ ParameterValidator.validate_mandatory_instance_of('video', video, Video)
47
+ ParameterValidator.validate_mandatory_positive_number('start', start, do_include_zero = True)
48
+
49
+ self.video: Video = video
50
+ """
51
+ The video source, with all its properties,
52
+ that is placed in the timeline.
53
+ """
54
+ self.start: float = float(start)
55
+ """
56
+ The time moment in which the video should
57
+ start playing, within the timeline.
58
+
59
+ This is the time respect to the timeline
60
+ and its different from the video `start`
61
+ time, which is related to the file.
62
+ """
63
+
64
+ def _get_video_t(
65
+ self,
66
+ t: float
67
+ ) -> float:
68
+ """
69
+ The video 't' time moment for the given
70
+ global 't' time moment. This 't' is the one
71
+ to use inside the video content to display
72
+ its frame.
73
+ """
74
+ return t - self.start
75
+
76
+ def is_playing(
77
+ self,
78
+ t: float
79
+ ) -> bool:
80
+ """
81
+ Check if this video is playing at the general
82
+ 't' time moment, which is a global time moment
83
+ for the whole project.
84
+ """
85
+ return self.start <= t < self.end
86
+
87
+ def get_frame_at(
88
+ self,
89
+ t: float
90
+ ) -> Union[VideoFrame, None]:
91
+ """
92
+ Get the frame for the 't' time moment provided,
93
+ that could be None if the video is not playing
94
+ in that moment.
95
+ """
96
+ return (
97
+ self.video.reader.get_frame_from_t(self._get_video_t(t))
98
+ if self.is_playing(t) else
99
+ None
100
+ )
@@ -0,0 +1,119 @@
1
+ from yta_validation.parameter import ParameterValidator
2
+ from typing import Union
3
+ from abc import ABC, abstractmethod
4
+
5
+ import av
6
+ import moderngl
7
+
8
+
9
+ class Node(ABC):
10
+ """
11
+ Base class to represent a node, which
12
+ is an entity that processes frames
13
+ individually.
14
+
15
+ This class must be inherited by any
16
+ video or audio node class.
17
+ """
18
+
19
+ # TODO: What about the types?
20
+ @abstractmethod
21
+ def process(
22
+ frame: Union[av.VideoFrame, av.AudioFrame, moderngl.Texture],
23
+ t: float
24
+ # TODO: Maybe we need 'fps' and 'number_of_frames'
25
+ # to calculate progressions or similar...
26
+ ) -> Union[av.VideoFrame, av.AudioFrame, moderngl.Texture]:
27
+ pass
28
+
29
+ class TimedNode:
30
+ """
31
+ Class to represent a Node wrapper to
32
+ be able to specify the time range in
33
+ which we want the node to be applied.
34
+
35
+ If the 't' time moment is not inside
36
+ this range, the frame will be returned
37
+ as it is, with no change.
38
+
39
+ A 't' time moment inside the range has
40
+ this condition:
41
+ - `start <= t < end`
42
+
43
+ We are not including the end because
44
+ the next TimedNode could start on that
45
+ specific value, and remember that the
46
+ first time moment is 0.
47
+
48
+ This is the class that has to be applied
49
+ when working with videos and not a Node
50
+ directly.
51
+
52
+ The 'start' and 'end' values by default
53
+ """
54
+
55
+ def __init__(
56
+ self,
57
+ node: Node,
58
+ start: float = 0.0,
59
+ end: Union[float, None] = None
60
+ ):
61
+ ParameterValidator.validate_mandatory_positive_number('start', start, do_include_zero = True)
62
+ ParameterValidator.validate_positive_number('end', end, do_include_zero = False)
63
+
64
+ if (
65
+ end is not None and
66
+ end < start
67
+ ):
68
+ raise Exception('The "end" parameter provided must be greater or equal to the "start" parameter.')
69
+
70
+ self.node: Node = node
71
+ """
72
+ The node we are wrapping and we want to
73
+ apply as a modification of the frame in
74
+ which we are in a 't' time moment.
75
+ """
76
+ self.start: float = start
77
+ """
78
+ The 't' time moment in which the Node must
79
+ start being applied (including it).
80
+ """
81
+ self.end: Union[float, None] = end
82
+ """
83
+ The 't' time moment in which the Node must
84
+ stop being applied (excluding it).
85
+ """
86
+
87
+ def is_within_time(
88
+ self,
89
+ t: float
90
+ ) -> bool:
91
+ """
92
+ Flag to indicate if the 't' time moment provided
93
+ is in the range of this TimedNode instance,
94
+ which means that it fits this condition:
95
+ - `start <= t < end`
96
+ """
97
+ return (
98
+ self.start <= t < self.end
99
+ if self.end is not None else
100
+ self.start <= t
101
+ )
102
+
103
+ def process(
104
+ self,
105
+ frame: Union[av.VideoFrame, av.AudioFrame, moderngl.Texture],
106
+ t: float
107
+ # TODO: Maybe we need 'fps' and 'number_of_frames'
108
+ # to calculate progressions or similar...
109
+ ) -> Union['VideoFrame', 'AudioFrame', 'Texture']:
110
+ """
111
+ Process the frame if the provided 't' time
112
+ moment is in the range of this TimedNode
113
+ instance.
114
+ """
115
+ return (
116
+ self.node.process(frame, t)
117
+ if self.is_within_time(t) else
118
+ frame
119
+ )
@@ -0,0 +1,115 @@
1
+ """
2
+ When working with audio frames, we don't need
3
+ to use the GPU because audios are 1D and the
4
+ information can be processed perfectly with
5
+ a library like numpy.
6
+
7
+ If we need a very intense calculation for an
8
+ audio frame (FFT, convolution, etc.) we can
9
+ use CuPy or some DPS specific libraries, but
10
+ 90% is perfectly done with numpy.
11
+
12
+ If you want to modify huge amounts of audio
13
+ (some seconds at the same time), you can use
14
+ CuPy, that has the same API as numpy but
15
+ working in GPU. Doing this below most of the
16
+ changes would work:
17
+ - `import numpy as np` → `import cupy as np`
18
+ """
19
+ from yta_video_opengl.nodes import TimedNode
20
+ from abc import abstractmethod
21
+ from typing import Union
22
+
23
+ import numpy as np
24
+ import av
25
+
26
+
27
+ class AudioNode:
28
+ """
29
+ Base audio node class to implement a
30
+ change in an audio frame by using the
31
+ numpy library.
32
+ """
33
+
34
+ @abstractmethod
35
+ def process(
36
+ self,
37
+ frame: av.AudioFrame,
38
+ t: float
39
+ ):
40
+ """
41
+ Process the provided audio 'frame' that
42
+ is played on the given 't' time moment.
43
+ """
44
+ pass
45
+
46
+ """
47
+ Here you have an example. The 'private'
48
+ node class is the modifier, that we don't
49
+ want to expose, and the 'public' class is
50
+ the one that inherits from TimedNode and
51
+ wraps the 'private' class to build the
52
+ functionality.
53
+ """
54
+ class VolumeAudioNode(TimedNode):
55
+ """
56
+ TimedNode to set the audio volume of a video
57
+ in a specific frame.
58
+ """
59
+
60
+ def __init__(
61
+ self,
62
+ factor_fn,
63
+ start: float = 0.0,
64
+ end: Union[float, None] = None
65
+ ):
66
+ super().__init__(
67
+ node = _SetVolumeAudioNode(factor_fn),
68
+ start = start,
69
+ end = end
70
+ )
71
+
72
+ class _SetVolumeAudioNode(AudioNode):
73
+ """
74
+ Audio node to change the volume of an
75
+ audio frame.
76
+ """
77
+
78
+ def __init__(
79
+ self,
80
+ factor_fn
81
+ ):
82
+ """
83
+ factor_fn: function (t, index) -> factor volumen
84
+ """
85
+ self.factor_fn = factor_fn
86
+
87
+ def process(
88
+ self,
89
+ frame: av.AudioFrame,
90
+ t: float,
91
+ ) -> av.AudioFrame:
92
+ # TODO: Why index (?) Maybe 'total_frames'
93
+ factor = self.factor_fn(t, 0)
94
+
95
+ samples = frame.to_ndarray().astype(np.float32)
96
+ samples *= factor
97
+
98
+ # Determine dtype according to format
99
+ samples = (
100
+ samples.astype(np.int16)
101
+ # 'fltp', 's16', 's16p'
102
+ if 's16' in frame.format.name else
103
+ samples.astype(np.float32)
104
+ )
105
+
106
+ new_frame = av.AudioFrame.from_ndarray(
107
+ samples,
108
+ format = frame.format.name,
109
+ layout = frame.layout.name
110
+ )
111
+ new_frame.sample_rate = frame.sample_rate
112
+ new_frame.pts = frame.pts
113
+ new_frame.time_base = frame.time_base
114
+
115
+ return new_frame
@@ -0,0 +1,5 @@
1
+ """
2
+ Working with video frames has to be done
3
+ with nodes that use OpenGL because this
4
+ way, by using the GPU, is the best way.
5
+ """