yta-video-opengl 0.0.19__tar.gz → 0.0.20__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (31) hide show
  1. {yta_video_opengl-0.0.19 → yta_video_opengl-0.0.20}/PKG-INFO +1 -1
  2. {yta_video_opengl-0.0.19 → yta_video_opengl-0.0.20}/pyproject.toml +1 -1
  3. yta_video_opengl-0.0.20/src/yta_video_opengl/audio.py +214 -0
  4. {yta_video_opengl-0.0.19 → yta_video_opengl-0.0.20}/src/yta_video_opengl/complete/timeline.py +9 -6
  5. yta_video_opengl-0.0.20/src/yta_video_opengl/complete/track/__init__.py +493 -0
  6. yta_video_opengl-0.0.19/src/yta_video_opengl/complete/video_on_track.py → yta_video_opengl-0.0.20/src/yta_video_opengl/complete/track/media/__init__.py +112 -47
  7. yta_video_opengl-0.0.20/src/yta_video_opengl/complete/track/parts.py +230 -0
  8. yta_video_opengl-0.0.20/src/yta_video_opengl/complete/track/utils.py +78 -0
  9. {yta_video_opengl-0.0.19 → yta_video_opengl-0.0.20}/src/yta_video_opengl/reader/__init__.py +0 -19
  10. {yta_video_opengl-0.0.19 → yta_video_opengl-0.0.20}/src/yta_video_opengl/reader/cache/utils.py +1 -1
  11. {yta_video_opengl-0.0.19 → yta_video_opengl-0.0.20}/src/yta_video_opengl/video.py +9 -13
  12. yta_video_opengl-0.0.19/src/yta_video_opengl/complete/track.py +0 -562
  13. {yta_video_opengl-0.0.19 → yta_video_opengl-0.0.20}/LICENSE +0 -0
  14. {yta_video_opengl-0.0.19 → yta_video_opengl-0.0.20}/README.md +0 -0
  15. {yta_video_opengl-0.0.19 → yta_video_opengl-0.0.20}/src/yta_video_opengl/__init__.py +0 -0
  16. {yta_video_opengl-0.0.19 → yta_video_opengl-0.0.20}/src/yta_video_opengl/classes.py +0 -0
  17. {yta_video_opengl-0.0.19 → yta_video_opengl-0.0.20}/src/yta_video_opengl/complete/__init__.py +0 -0
  18. {yta_video_opengl-0.0.19 → yta_video_opengl-0.0.20}/src/yta_video_opengl/complete/frame_combinator.py +0 -0
  19. {yta_video_opengl-0.0.19 → yta_video_opengl-0.0.20}/src/yta_video_opengl/complete/frame_generator.py +0 -0
  20. {yta_video_opengl-0.0.19 → yta_video_opengl-0.0.20}/src/yta_video_opengl/complete/frame_wrapper.py +0 -0
  21. {yta_video_opengl-0.0.19 → yta_video_opengl-0.0.20}/src/yta_video_opengl/nodes/__init__.py +0 -0
  22. {yta_video_opengl-0.0.19 → yta_video_opengl-0.0.20}/src/yta_video_opengl/nodes/audio/__init__.py +0 -0
  23. {yta_video_opengl-0.0.19 → yta_video_opengl-0.0.20}/src/yta_video_opengl/nodes/video/__init__.py +0 -0
  24. {yta_video_opengl-0.0.19 → yta_video_opengl-0.0.20}/src/yta_video_opengl/nodes/video/opengl.py +0 -0
  25. {yta_video_opengl-0.0.19 → yta_video_opengl-0.0.20}/src/yta_video_opengl/reader/cache/__init__.py +0 -0
  26. {yta_video_opengl-0.0.19 → yta_video_opengl-0.0.20}/src/yta_video_opengl/reader/cache/audio.py +0 -0
  27. {yta_video_opengl-0.0.19 → yta_video_opengl-0.0.20}/src/yta_video_opengl/reader/cache/video.py +0 -0
  28. {yta_video_opengl-0.0.19 → yta_video_opengl-0.0.20}/src/yta_video_opengl/t.py +0 -0
  29. {yta_video_opengl-0.0.19 → yta_video_opengl-0.0.20}/src/yta_video_opengl/tests.py +0 -0
  30. {yta_video_opengl-0.0.19 → yta_video_opengl-0.0.20}/src/yta_video_opengl/utils.py +0 -0
  31. {yta_video_opengl-0.0.19 → yta_video_opengl-0.0.20}/src/yta_video_opengl/writer.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: yta-video-opengl
3
- Version: 0.0.19
3
+ Version: 0.0.20
4
4
  Summary: Youtube Autonomous Video OpenGL Module
5
5
  Author: danialcala94
6
6
  Author-email: danielalcalavalera@gmail.com
@@ -1,6 +1,6 @@
1
1
  [project]
2
2
  name = "yta-video-opengl"
3
- version = "0.0.19"
3
+ version = "0.0.20"
4
4
  description = "Youtube Autonomous Video OpenGL Module"
5
5
  authors = [
6
6
  {name = "danialcala94",email = "danielalcalavalera@gmail.com"}
@@ -0,0 +1,214 @@
1
+ from yta_video_opengl.reader import VideoReader
2
+ from yta_video_opengl.writer import VideoWriter
3
+ from yta_video_opengl.t import T
4
+ from yta_validation import PythonValidator
5
+ from quicktions import Fraction
6
+ from typing import Union
7
+
8
+
9
+ # TODO: Where can I obtain this dynamically (?)
10
+ PIXEL_FORMAT = 'yuv420p'
11
+
12
+ # TODO: Maybe create a _Media(ABC) to put
13
+ # some code shared with the Video class
14
+ class Audio:
15
+ """
16
+ Class to wrap the functionality related to
17
+ handling and modifying a video.
18
+ """
19
+
20
+ @property
21
+ def audio_start_pts(
22
+ self
23
+ ) -> int:
24
+ """
25
+ The start packet time stamp (pts), needed
26
+ to optimize the packet iteration process.
27
+ """
28
+ # TODO: What if 'audio_time_base' is None (?)
29
+ return T(self.start, self.reader.audio_time_base).truncated_pts
30
+
31
+ @property
32
+ def audio_end_pts(
33
+ self
34
+ ) -> Union[int, None]:
35
+ """
36
+ The end packet time stamp (pts), needed to
37
+ optimize the packet iteration process.
38
+ """
39
+ return (
40
+ # TODO: What if 'audio_time_base' is None (?)
41
+ T(self.end, self.reader.audio_time_base).truncated_pts
42
+ # TODO: What do we do if no duration (?)
43
+ if self.duration is not None else
44
+ None
45
+ )
46
+
47
+ @property
48
+ def duration(
49
+ self
50
+ ) -> Fraction:
51
+ """
52
+ The duration of the video.
53
+ """
54
+ return self.end - self.start
55
+
56
+ @property
57
+ def frames(
58
+ self
59
+ ):
60
+ """
61
+ Iterator to yield all the frames, one by
62
+ one, within the range defined by the
63
+ 'start' and 'end' parameters provided when
64
+ instantiating it.
65
+
66
+ The iterator will iterate first over the
67
+ audio frames.
68
+ """
69
+ for frame in self.reader.get_audio_frames(self.start, self.end):
70
+ yield frame
71
+
72
+ def __init__(
73
+ self,
74
+ filename: str,
75
+ start: Union[int, float, Fraction] = 0.0,
76
+ end: Union[int, float, Fraction, None] = None
77
+ ):
78
+ self.filename: str = filename
79
+ """
80
+ The filename of the original audio.
81
+ """
82
+ # TODO: Detect the 'pixel_format' from the
83
+ # extension (?)
84
+ self.reader: VideoReader = VideoReader(self.filename)
85
+ """
86
+ The pyav audio reader.
87
+ """
88
+ self.start: Fraction = Fraction(start)
89
+ """
90
+ The time moment 't' in which the audio
91
+ should start.
92
+ """
93
+ self.end: Union[Fraction, None] = Fraction(
94
+ # TODO: Is this 'end' ok (?)
95
+ self.reader.duration
96
+ if end is None else
97
+ end
98
+ )
99
+ """
100
+ The time moment 't' in which the audio
101
+ should end.
102
+ """
103
+
104
+ def _get_t(
105
+ self,
106
+ t: Union[int, float, Fraction]
107
+ ) -> Fraction:
108
+ """
109
+ Get the real 't' time moment based on the
110
+ audio 'start' and 'end'. If they were
111
+ asking for the t=0.5s but our audio was
112
+ subclipped to [1.0, 2.0), the 0.5s must be
113
+ actually the 1.5s of the audio because of
114
+ the subclipped time range.
115
+ """
116
+ t += self.start
117
+
118
+ print(f'Audio real t is {str(float(t))}')
119
+ if t >= self.end:
120
+ raise Exception(f'The "t" ({str(t)}) provided is out of range. This audio lasts from [{str(self.start)}, {str(self.end)}).')
121
+
122
+ return t
123
+
124
+ def get_audio_frame_from_t(
125
+ self,
126
+ t: Union[int, float, Fraction]
127
+ ) -> 'AudioFrame':
128
+ """
129
+ Get the audio frame with the given 't' time
130
+ moment, using the audio cache system. This
131
+ method is useful when we need to combine
132
+ many different frames so we can obtain them
133
+ one by one.
134
+
135
+ TODO: Is this actually necessary (?)
136
+ """
137
+ return self.reader.get_audio_frame_from_t(self._get_t(t))
138
+
139
+ def get_audio_frames_from_t(
140
+ self,
141
+ t: Union[int, float, Fraction]
142
+ ):
143
+ """
144
+ Get the sequence of audio frames for a
145
+ given video 't' time moment, using the
146
+ audio cache system.
147
+
148
+ This is useful when we want to write a
149
+ video frame with its audio, so we obtain
150
+ all the audio frames associated to it
151
+ (remember that a video frame is associated
152
+ with more than 1 audio frame).
153
+ """
154
+ print(f'Getting audio frames from {str(float(t))} that is actually {str(float(self._get_t(t)))}')
155
+ for frame in self.reader.get_audio_frames_from_t(self._get_t(t)):
156
+ yield frame
157
+
158
+ def save_as(
159
+ self,
160
+ filename: str
161
+ ) -> 'Video':
162
+ """
163
+ Save the audio locally as the given 'filename'.
164
+
165
+ TODO: By now we are doing tests inside so the
166
+ functionality is a manual test. Use it
167
+ carefully.
168
+ """
169
+ writer = VideoWriter(filename)
170
+ writer.set_audio_stream_from_template(self.reader.audio_stream)
171
+
172
+ from yta_video_opengl.nodes.audio import VolumeAudioNode
173
+ # Audio from 0 to 1
174
+ # TODO: This effect 'fn' is shitty
175
+ def fade_in_fn(t, index, start=0.5, end=1.0):
176
+ if t < start or t > end:
177
+ # fuera de la franja: no tocar nada → volumen original (1.0)
178
+ progress = 1.0
179
+ else:
180
+ # dentro de la franja: interpolar linealmente entre 0 → 1
181
+ progress = (t - start) / (end - start)
182
+
183
+ return progress
184
+
185
+ #fade_in = SetVolumeAudioNode(lambda t, i: min(1, t / self.duration))
186
+ fade_in = VolumeAudioNode(lambda t, i: fade_in_fn(t, i, 0.5, 1.0))
187
+
188
+ for frame, t, index in self.frames:
189
+ if PythonValidator.is_instance_of(frame, 'VideoFrame'):
190
+ print(f'Saving video frame {str(index)}, with t = {str(t)}')
191
+
192
+ # TODO: Process any video frame change
193
+
194
+ writer.mux_video_frame(
195
+ frame = frame
196
+ )
197
+ else:
198
+ print(f'Saving audio frame {str(index)} ({str(round(float(t * self.reader.fps), 2))}), with t = {str(t)}')
199
+
200
+ # TODO: Process any audio frame change
201
+ # Test setting audio
202
+ frame = fade_in.process(frame, t)
203
+
204
+ writer.mux_audio_frame(
205
+ frame = frame
206
+ )
207
+
208
+ # Flush the remaining frames to write
209
+ writer.mux_audio_frame(None)
210
+ writer.mux_video_frame(None)
211
+
212
+ # TODO: Maybe move this to the '__del__' (?)
213
+ writer.output.close()
214
+ self.reader.container.close()
@@ -10,13 +10,12 @@ an important property or will make ffmpeg
10
10
  become crazy and deny packets (that means no
11
11
  video written).
12
12
  """
13
- from yta_video_opengl.complete.track import Track
13
+ from yta_video_opengl.complete.track import VideoTrack
14
14
  from yta_video_opengl.video import Video
15
15
  from yta_video_opengl.t import get_ts, fps_to_time_base, T
16
16
  from yta_video_opengl.complete.frame_wrapper import VideoFrameWrapped, AudioFrameWrapped
17
17
  from yta_video_opengl.complete.frame_combinator import AudioFrameCombinator
18
18
  from yta_validation.parameter import ParameterValidator
19
- from yta_validation import PythonValidator
20
19
  from av.video.frame import VideoFrame
21
20
  from av.audio.frame import AudioFrame
22
21
  from quicktions import Fraction
@@ -62,7 +61,9 @@ class Timeline:
62
61
  # TODO: We need to be careful with the
63
62
  # priority, by now its defined by its
64
63
  # position in the array
65
- self.tracks: list[Track] = []
64
+ # TODO: By now I'm having only video
65
+ # tracks
66
+ self.tracks: list[VideoTrack] = []
66
67
  """
67
68
  All the video tracks we are handling.
68
69
  """
@@ -100,6 +101,8 @@ class Timeline:
100
101
  # We will have 2 tracks by now
101
102
  self.add_track().add_track()
102
103
 
104
+ # TODO: This has to be modified to accept
105
+ # adding an AudioTrack
103
106
  def add_track(
104
107
  self,
105
108
  index: Union[int, None] = None
@@ -129,9 +132,9 @@ class Timeline:
129
132
  if track.index >= index:
130
133
  track.index += 1
131
134
 
132
- self.tracks.append(Track(
133
- size = self.size,
135
+ self.tracks.append(VideoTrack(
134
136
  index = index,
137
+ size = self.size,
135
138
  fps = self.fps,
136
139
  audio_fps = self.audio_fps,
137
140
  # TODO: I need more info about the audio
@@ -161,7 +164,7 @@ class Timeline:
161
164
  """
162
165
  ParameterValidator.validate_mandatory_number_between('track_index', track_index, 0, len(self.tracks))
163
166
 
164
- self.tracks[track_index].add_video(video, t)
167
+ self.tracks[track_index].add_media(video, t)
165
168
 
166
169
  return self
167
170