yta-video-opengl 0.0.11__tar.gz → 0.0.13__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (25) hide show
  1. {yta_video_opengl-0.0.11 → yta_video_opengl-0.0.13}/PKG-INFO +2 -1
  2. {yta_video_opengl-0.0.11 → yta_video_opengl-0.0.13}/pyproject.toml +2 -1
  3. yta_video_opengl-0.0.13/src/yta_video_opengl/complete/timeline.py +267 -0
  4. yta_video_opengl-0.0.13/src/yta_video_opengl/complete/track.py +411 -0
  5. yta_video_opengl-0.0.13/src/yta_video_opengl/complete/video_on_track.py +163 -0
  6. {yta_video_opengl-0.0.11 → yta_video_opengl-0.0.13}/src/yta_video_opengl/reader/__init__.py +190 -89
  7. yta_video_opengl-0.0.13/src/yta_video_opengl/reader/cache.py +529 -0
  8. yta_video_opengl-0.0.13/src/yta_video_opengl/t.py +185 -0
  9. {yta_video_opengl-0.0.11 → yta_video_opengl-0.0.13}/src/yta_video_opengl/tests.py +4 -2
  10. {yta_video_opengl-0.0.11 → yta_video_opengl-0.0.13}/src/yta_video_opengl/utils.py +169 -8
  11. {yta_video_opengl-0.0.11 → yta_video_opengl-0.0.13}/src/yta_video_opengl/video.py +85 -12
  12. {yta_video_opengl-0.0.11 → yta_video_opengl-0.0.13}/src/yta_video_opengl/writer.py +23 -14
  13. yta_video_opengl-0.0.11/src/yta_video_opengl/complete/timeline.py +0 -179
  14. yta_video_opengl-0.0.11/src/yta_video_opengl/complete/track.py +0 -136
  15. yta_video_opengl-0.0.11/src/yta_video_opengl/complete/video_on_track.py +0 -100
  16. yta_video_opengl-0.0.11/src/yta_video_opengl/reader/cache.py +0 -303
  17. {yta_video_opengl-0.0.11 → yta_video_opengl-0.0.13}/LICENSE +0 -0
  18. {yta_video_opengl-0.0.11 → yta_video_opengl-0.0.13}/README.md +0 -0
  19. {yta_video_opengl-0.0.11 → yta_video_opengl-0.0.13}/src/yta_video_opengl/__init__.py +0 -0
  20. {yta_video_opengl-0.0.11 → yta_video_opengl-0.0.13}/src/yta_video_opengl/classes.py +0 -0
  21. {yta_video_opengl-0.0.11 → yta_video_opengl-0.0.13}/src/yta_video_opengl/complete/__init__.py +0 -0
  22. {yta_video_opengl-0.0.11 → yta_video_opengl-0.0.13}/src/yta_video_opengl/nodes/__init__.py +0 -0
  23. {yta_video_opengl-0.0.11 → yta_video_opengl-0.0.13}/src/yta_video_opengl/nodes/audio/__init__.py +0 -0
  24. {yta_video_opengl-0.0.11 → yta_video_opengl-0.0.13}/src/yta_video_opengl/nodes/video/__init__.py +0 -0
  25. {yta_video_opengl-0.0.11 → yta_video_opengl-0.0.13}/src/yta_video_opengl/nodes/video/opengl.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: yta-video-opengl
3
- Version: 0.0.11
3
+ Version: 0.0.13
4
4
  Summary: Youtube Autonomous Video OpenGL Module
5
5
  Author: danialcala94
6
6
  Author-email: danielalcalavalera@gmail.com
@@ -10,6 +10,7 @@ Classifier: Programming Language :: Python :: 3.9
10
10
  Requires-Dist: av (>=0.0.1,<19.0.0)
11
11
  Requires-Dist: moderngl (>=0.0.1,<9.0.0)
12
12
  Requires-Dist: numpy (>=0.0.1,<9.0.0)
13
+ Requires-Dist: quicktions (>=0.0.1,<9.0.0)
13
14
  Requires-Dist: yta_timer (>0.0.1,<1.0.0)
14
15
  Requires-Dist: yta_validation (>=0.0.1,<1.0.0)
15
16
  Requires-Dist: yta_video_frame_time (>=0.0.1,<1.0.0)
@@ -1,6 +1,6 @@
1
1
  [project]
2
2
  name = "yta-video-opengl"
3
- version = "0.0.11"
3
+ version = "0.0.13"
4
4
  description = "Youtube Autonomous Video OpenGL Module"
5
5
  authors = [
6
6
  {name = "danialcala94",email = "danielalcalavalera@gmail.com"}
@@ -14,6 +14,7 @@ dependencies = [
14
14
  "av (>=0.0.1,<19.0.0)",
15
15
  "moderngl (>=0.0.1,<9.0.0)",
16
16
  "numpy (>=0.0.1,<9.0.0)",
17
+ "quicktions (>=0.0.1,<9.0.0)"
17
18
  ]
18
19
 
19
20
  [tool.poetry]
@@ -0,0 +1,267 @@
1
+ """
2
+ When we are reading from a source, the reader
3
+ has its own time base and properties. When we
4
+ are writing, the writer has different time
5
+ base and properties. We need to adjust our
6
+ writer to be able to write, because the videos
7
+ we read can be different, and the video we are
8
+ writing is defined by us. The 'time_base' is
9
+ an important property or will make ffmpeg
10
+ become crazy and deny packets (that means no
11
+ video written).
12
+ """
13
+ from yta_video_opengl.complete.track import Track
14
+ from yta_video_opengl.video import Video
15
+ from yta_video_opengl.t import get_ts, fps_to_time_base, T
16
+ from yta_validation.parameter import ParameterValidator
17
+ from quicktions import Fraction
18
+ from typing import Union
19
+
20
+
21
+ class Timeline:
22
+ """
23
+ Class to represent all the tracks that
24
+ exist on the project and to handle the
25
+ combination of all their frames.
26
+ """
27
+
28
+ @property
29
+ def end(
30
+ self
31
+ ) -> Fraction:
32
+ """
33
+ The end of the last video of the track
34
+ that lasts longer. This is the last time
35
+ moment that has to be rendered.
36
+ """
37
+ return max(
38
+ track.end
39
+ for track in self.tracks
40
+ )
41
+
42
+ def __init__(
43
+ self,
44
+ size: tuple[int, int] = (1_920, 1_080),
45
+ fps: Union[int, float, Fraction] = 60.0,
46
+ audio_fps: Union[int, Fraction] = 44_100.0, # 48_000.0 for aac
47
+ # TODO: I don't like this name
48
+ # TODO: Where does this come from (?)
49
+ audio_samples_per_frame: int = 1024
50
+ ):
51
+ # TODO: By now we are using just two video
52
+ # tracks to test the composition
53
+ # TODO: We need to be careful with the
54
+ # priority, by now its defined by its
55
+ # position in the array
56
+ self.tracks: list[Track] = [
57
+ Track(
58
+ size = size,
59
+ fps = fps,
60
+ audio_fps = audio_fps,
61
+ # TODO: I need more info about the audio
62
+ # I think
63
+ audio_samples_per_frame = audio_samples_per_frame
64
+ ),
65
+ Track(
66
+ size = size,
67
+ fps = fps,
68
+ audio_fps = audio_fps,
69
+ # TODO: I need more info about the audio
70
+ # I think
71
+ audio_samples_per_frame = audio_samples_per_frame
72
+ )
73
+ ]
74
+ """
75
+ All the video tracks we are handling.
76
+ """
77
+ # TODO: Handle the other properties
78
+ self.size = size
79
+ self.fps = fps
80
+ self.audio_fps = audio_fps
81
+
82
+ # TODO: Create 'add_track' method, but by now
83
+ # we hare handling only one
84
+ def add_video(
85
+ self,
86
+ video: Video,
87
+ t: Union[int, float, Fraction],
88
+ # TODO: This is for testing, it has to
89
+ # disappear
90
+ do_use_second_track: bool = False
91
+ ) -> 'Timeline':
92
+ """
93
+ Add the provided 'video' to the timeline,
94
+ starting at the provided 't' time moment.
95
+
96
+ TODO: The 'do_use_second_track' parameter
97
+ is temporary.
98
+ """
99
+ # TODO: This is temporary logic by now
100
+ # just to be able to test mixing frames
101
+ # from 2 different tracks at the same
102
+ # time
103
+ index = 1 * do_use_second_track
104
+
105
+ self.tracks[index].add_video(video, t)
106
+
107
+ return self
108
+
109
+ # TODO: This method is not for the Track but
110
+ # for the timeline, as one track can only
111
+ # have consecutive elements
112
+ def get_frame_at(
113
+ self,
114
+ t: Union[int, float, Fraction]
115
+ ) -> 'VideoFrame':
116
+ """
117
+ Get all the frames that are played at the
118
+ 't' time provided, but combined in one.
119
+ """
120
+ frames = (
121
+ track.get_frame_at(t)
122
+ for track in self.tracks
123
+ )
124
+ # TODO: Here I receive black frames because
125
+ # it was empty, but I don't have a way to
126
+ # detect those black empty frames because
127
+ # they are just VideoFrame instances... I
128
+ # need a way to know so I can skip them if
129
+ # other frame in other track, or to know if
130
+ # I want them as transparent or something
131
+
132
+ # TODO: Combinate them, I send first by now
133
+ return next(frames)
134
+
135
+ def get_audio_frames_at(
136
+ self,
137
+ t: float
138
+ ):
139
+ # TODO: What if the different audio streams
140
+ # have also different fps (?)
141
+ frames = []
142
+ for track in self.tracks:
143
+ # TODO: Make this work properly
144
+ audio_frames = track.get_audio_frames_at(t)
145
+
146
+ # TODO: Combine them
147
+ if audio_frames is not None:
148
+ frames = audio_frames
149
+ break
150
+
151
+ #from yta_video_opengl.utils import get_silent_audio_frame
152
+ #make_silent_audio_frame()
153
+ for frame in frames:
154
+ yield frame
155
+
156
+ def render(
157
+ self,
158
+ filename: str,
159
+ start: Union[int, float, Fraction] = 0.0,
160
+ end: Union[int, float, Fraction, None] = None
161
+ ) -> 'Timeline':
162
+ """
163
+ Render the time range in between the given
164
+ 'start' and 'end' and store the result with
165
+ the also provided 'fillename'.
166
+
167
+ If no 'start' and 'end' provided, the whole
168
+ project will be rendered.
169
+ """
170
+ ParameterValidator.validate_mandatory_string('filename', filename, do_accept_empty = False)
171
+ # TODO: We need to accept Fraction as number
172
+ #ParameterValidator.validate_mandatory_positive_number('start', start, do_include_zero = True)
173
+ # TODO: We need to accept Fraction as number
174
+ #ParameterValidator.validate_positive_number('end', end, do_include_zero = False)
175
+
176
+ # TODO: Limitate 'end' a bit...
177
+ end = (
178
+ self.end
179
+ if end is None else
180
+ end
181
+ )
182
+
183
+ if start >= end:
184
+ raise Exception('The provided "start" cannot be greater or equal to the "end" provided.')
185
+
186
+ from yta_video_opengl.writer import VideoWriter
187
+
188
+ writer = VideoWriter('test_files/output_render.mp4')
189
+ # TODO: This has to be dynamic according to the
190
+ # video we are writing
191
+ writer.set_video_stream(
192
+ codec_name = 'h264',
193
+ fps = self.fps,
194
+ size = self.size,
195
+ pixel_format = 'yuv420p'
196
+ )
197
+
198
+ writer.set_audio_stream(
199
+ codec_name = 'aac',
200
+ fps = self.audio_fps
201
+ )
202
+
203
+ time_base = fps_to_time_base(self.fps)
204
+ audio_time_base = fps_to_time_base(self.audio_fps)
205
+
206
+ """
207
+ We are trying to render this:
208
+ -----------------------------
209
+ [0 a 0.5) => Frames negros
210
+ [0.5 a 1.25) => [0.25 a 1.0) de Video1
211
+ [1.25 a 1.75) => Frames negros
212
+ [1.75 a 2.25) => [0.25 a 0.75) de Video1
213
+ [2.25 a 3.0) => Frames negros
214
+ [3.0 a 3.75) => [2.25 a 3.0) de Video2
215
+ """
216
+
217
+ audio_pts = 0
218
+ for t in get_ts(start, end, self.fps):
219
+ frame = self.get_frame_at(t)
220
+
221
+ #print(frame)
222
+
223
+ # We need to adjust our output elements to be
224
+ # consecutive and with the right values
225
+ # TODO: We are using int() for fps but its float...
226
+ frame.time_base = time_base
227
+ #frame.pts = int(video_frame_index / frame.time_base)
228
+ frame.pts = T(t, time_base).truncated_pts
229
+
230
+ # TODO: We need to handle the audio
231
+ writer.mux_video_frame(
232
+ frame = frame
233
+ )
234
+
235
+ #print(f' [VIDEO] Here in t:{str(t)} -> pts:{str(frame.pts)} - dts:{str(frame.dts)}')
236
+
237
+ # TODO: Uncomment all this below for the audio
238
+ num_of_audio_frames = 0
239
+ for audio_frame in self.get_audio_frames_at(t):
240
+ # TODO: The track gives us empty (black)
241
+ # frames by default but maybe we need a
242
+ # @dataclass in the middle to handle if
243
+ # we want transparent frames or not and/or
244
+ # to detect them here because, if not,
245
+ # they are just simple VideoFrames and we
246
+ # don't know they are 'empty' frames
247
+
248
+ # We need to adjust our output elements to be
249
+ # consecutive and with the right values
250
+ # TODO: We are using int() for fps but its float...
251
+ audio_frame.time_base = audio_time_base
252
+ #audio_frame.pts = int(audio_frame_index / audio_frame.time_base)
253
+ audio_frame.pts = audio_pts
254
+ # We increment for the next iteration
255
+ audio_pts += audio_frame.samples
256
+ #audio_frame.pts = int(t + (audio_frame_index * audio_frame.time_base) / audio_frame.time_base)
257
+
258
+ #print(f'[AUDIO] Here in t:{str(t)} -> pts:{str(audio_frame.pts)} - dts:{str(audio_frame.dts)}')
259
+
260
+ #num_of_audio_frames += 1
261
+ #print(audio_frame)
262
+ writer.mux_audio_frame(audio_frame)
263
+ #print(f'Num of audio frames: {str(num_of_audio_frames)}')
264
+
265
+ writer.mux_video_frame(None)
266
+ writer.mux_audio_frame(None)
267
+ writer.output.close()
@@ -0,0 +1,411 @@
1
+ from yta_video_opengl.complete.video_on_track import VideoOnTrack
2
+ from yta_video_opengl.video import Video
3
+ from yta_video_opengl.t import T
4
+ from yta_video_opengl.utils import get_black_background_video_frame, get_silent_audio_frame, audio_frames_and_remainder_per_video_frame
5
+ from yta_video_opengl.t import fps_to_time_base
6
+ from yta_validation.parameter import ParameterValidator
7
+ from quicktions import Fraction
8
+ from typing import Union
9
+
10
+
11
+ NON_LIMITED_EMPTY_PART_END = 999
12
+ """
13
+ A value to indicate that the empty part
14
+ has no end because it is in the last
15
+ position and there is no video after it.
16
+ """
17
+ class _Part:
18
+ """
19
+ Class to represent an element that is on the
20
+ track, that can be an empty space or a video
21
+ (with audio).
22
+ """
23
+
24
+ @property
25
+ def is_empty_part(
26
+ self
27
+ ) -> bool:
28
+ """
29
+ Flag to indicate if the part is an empty part,
30
+ which means that there is no video associated
31
+ but an empty space.
32
+ """
33
+ return self.video is None
34
+
35
+ def __init__(
36
+ self,
37
+ track: 'Track',
38
+ start: Union[int, float, Fraction],
39
+ end: Union[int, float, Fraction],
40
+ video: Union[VideoOnTrack, None] = None
41
+ ):
42
+ # TODO: We need to accept Fraction as number
43
+ # ParameterValidator.validate_mandatory_positive_number('start', start, do_include_zero = True)
44
+ # TODO: We need to accept Fraction as number
45
+ # ParameterValidator.validate_mandatory_positive_number('end', end, do_include_zero = False)
46
+ ParameterValidator.validate_instance_of('video', video, VideoOnTrack)
47
+
48
+ self._track: Track = track
49
+ """
50
+ The instance of the track this part belongs
51
+ to.
52
+ """
53
+ self.start: Fraction = Fraction(start)
54
+ """
55
+ The start 't' time moment of the part.
56
+ """
57
+ self.end: Fraction = Fraction(end)
58
+ """
59
+ The end 't' time moment of the part.
60
+ """
61
+ self.video: Union[VideoOnTrack, None] = video
62
+ """
63
+ The video associated, if existing, or
64
+ None if it is an empty space that we need
65
+ to fulfill with a black background and
66
+ silent audio.
67
+ """
68
+
69
+ def get_frame_at(
70
+ self,
71
+ t: Union[int, float, Fraction]
72
+ ) -> 'VideoFrame':
73
+ """
74
+ Get the frame that must be displayed at
75
+ the given 't' time moment.
76
+ """
77
+ if self.is_empty_part:
78
+ # TODO: What about the 'format' (?)
79
+ # TODO: Maybe I shouldn't set the 'time_base'
80
+ # here and do it just in the Timeline 'render'
81
+ #return get_black_background_video_frame(self._track.size)
82
+ # TODO: This 'time_base' maybe has to be related
83
+ # to a Timeline general 'time_base' and not the fps
84
+ return get_black_background_video_frame(self._track.size, time_base = fps_to_time_base(self._track.fps))
85
+
86
+ frame = self.video.get_frame_at(t)
87
+
88
+ # TODO: This should not happen because of
89
+ # the way we handle the videos here but the
90
+ # video could send us a None frame here, so
91
+ # do we raise exception (?)
92
+ if frame is None:
93
+ #frame = get_black_background_video_frame(self._track.size)
94
+ # TODO: By now I'm raising exception to check if
95
+ # this happens or not because I think it would
96
+ # be malfunctioning
97
+ raise Exception(f'Video is returning None frame at t={str(t)}.')
98
+
99
+ return frame
100
+
101
+ # TODO: I'm not sure if we need this
102
+ def get_audio_frames_at(
103
+ self,
104
+ t: Union[int, float, Fraction]
105
+ ):
106
+ if not self.is_empty_part:
107
+ frames = self.video.get_audio_frames_at(t)
108
+ else:
109
+ # TODO: Transform this below to a utils in
110
+ # which I obtain the array directly
111
+ # Check many full and partial silent frames we need
112
+ number_of_frames, number_of_remaining_samples = audio_frames_and_remainder_per_video_frame(
113
+ video_fps = self._track.fps,
114
+ sample_rate = self._track.audio_fps,
115
+ number_of_samples_per_audio_frame = self._track.audio_samples_per_frame
116
+ )
117
+
118
+ # TODO: I need to set the pts, but here (?)
119
+ # The complete silent frames we need
120
+ frames = (
121
+ [
122
+ get_silent_audio_frame(
123
+ sample_rate = self._track.audio_fps,
124
+ # TODO: Check where do we get this value from
125
+ layout = 'stereo',
126
+ number_of_samples = self._track.audio_samples_per_frame,
127
+ # TODO: Check where do we get this value from
128
+ format = 'fltp'
129
+ )
130
+ ] * number_of_frames
131
+ if number_of_frames > 0 else
132
+ []
133
+ )
134
+
135
+ # The remaining partial silent frames we need
136
+ if number_of_remaining_samples > 0:
137
+ frames.append(
138
+ get_silent_audio_frame(
139
+ sample_rate = self._track.audio_fps,
140
+ # TODO: Check where do we get this value from
141
+ layout = 'stereo',
142
+ number_of_samples = number_of_remaining_samples,
143
+ # TODO: Check where do we get this value from
144
+ format = 'fltp'
145
+ )
146
+ )
147
+
148
+ # TODO: Return or yield (?)
149
+ for frame in frames:
150
+ yield frame
151
+ #return frames
152
+
153
+ # TODO: I don't like using t as float,
154
+ # we need to implement fractions.Fraction
155
+ # TODO: This is called Track but it is
156
+ # handling videos only. Should I have
157
+ # VideoTrack and AudioTrack (?)
158
+ class Track:
159
+ """
160
+ Class to represent a track in which we place
161
+ videos, images and audio to build a video
162
+ project.
163
+ """
164
+
165
+ @property
166
+ def parts(
167
+ self
168
+ ) -> list[_Part]:
169
+ """
170
+ The list of parts that build this track,
171
+ but with the empty parts detected to
172
+ be fulfilled with black frames and silent
173
+ audios.
174
+
175
+ A part can be a video or an empty space.
176
+ """
177
+ if (
178
+ not hasattr(self, '_parts') or
179
+ self._parts is None
180
+ ):
181
+ self._recalculate_parts()
182
+
183
+ return self._parts
184
+
185
+ @property
186
+ def end(
187
+ self
188
+ ) -> Fraction:
189
+ """
190
+ The end of the last video of this track,
191
+ which is also the end of the track. This
192
+ is the last time moment that has to be
193
+ rendered.
194
+ """
195
+ return Fraction(
196
+ 0.0
197
+ if len(self.videos) == 0 else
198
+ max(
199
+ video.end
200
+ for video in self.videos
201
+ )
202
+ )
203
+
204
+ def __init__(
205
+ self,
206
+ # TODO: I need the general settings of the
207
+ # project to be able to make audio also, not
208
+ # only the empty frames
209
+ size: tuple[int, int],
210
+ fps: float,
211
+ audio_fps: float,
212
+ # TODO: Where does it come from (?)
213
+ audio_samples_per_frame: int
214
+ ):
215
+ self.videos: list[VideoOnTrack] = []
216
+ """
217
+ The list of 'VideoOnTrack' instances that
218
+ must play on this track.
219
+ """
220
+ self.size: tuple[int, int] = size
221
+ """
222
+ The size of the videos of this track.
223
+ """
224
+ self.fps: float = float(fps)
225
+ """
226
+ The fps of the track, needed to calculate
227
+ the base t time moments to be precise and
228
+ to obtain or generate the frames.
229
+ """
230
+ self.audio_fps: float = float(audio_fps)
231
+ """
232
+ The fps of the audio track, needed to
233
+ generate silent audios for the empty parts.
234
+ """
235
+ self.audio_samples_per_frame: int = audio_samples_per_frame
236
+ """
237
+ The number of samples per audio frame.
238
+ """
239
+
240
+ def _is_free(
241
+ self,
242
+ start: Union[int, float, Fraction],
243
+ end: Union[int, float, Fraction]
244
+ ) -> bool:
245
+ """
246
+ Check if the time range in between the
247
+ 'start' and 'end' time given is free or
248
+ there is some video playing at any moment.
249
+ """
250
+ return not any(
251
+ (
252
+ video.video.start < end and
253
+ video.video.end > start
254
+ )
255
+ for video in self.videos
256
+ )
257
+
258
+ def _get_part_at_t(
259
+ self,
260
+ t: Union[int, float, Fraction]
261
+ ) -> _Part:
262
+ """
263
+ Get the part at the given 't' time
264
+ moment, that will always exist because
265
+ we have an special non ended last
266
+ empty part that would be returned if
267
+ accessing to an empty 't'.
268
+ """
269
+ for part in self.parts:
270
+ if part.start <= t < part.end:
271
+ return part
272
+
273
+ # TODO: This will only happen if they are
274
+ # asking for a value greater than the
275
+ # NON_LIMITED_EMPTY_PART_END...
276
+ raise Exception('NON_LIMITED_EMPTY_PART_END exceeded.')
277
+ return None
278
+
279
+ def get_frame_at(
280
+ self,
281
+ t: Union[int, float, Fraction]
282
+ ) -> 'VideoFrame':
283
+ """
284
+ Get the frame that must be displayed at
285
+ the 't' time moment provided, which is
286
+ a frame from the video audio that is
287
+ being played at that time moment.
288
+
289
+ Remember, this 't' time moment provided
290
+ is about the track, and we make the
291
+ conversion to the actual video 't' to
292
+ get the frame.
293
+ """
294
+ # TODO: What if the frame, that comes from
295
+ # a video, doesn't have the expected size (?)
296
+ return self._get_part_at_t(t).get_frame_at(t)
297
+
298
+ # TODO: This is not working well...
299
+ def get_audio_frames_at(
300
+ self,
301
+ t: Union[int, float, Fraction]
302
+ ):
303
+ """
304
+ Get the sequence of audio frames that
305
+ must be displayed at the 't' time
306
+ moment provided, which the collection
307
+ of audio frames corresponding to the
308
+ video frame that is being played at
309
+ that time moment.
310
+
311
+ Remember, this 't' time moment provided
312
+ is about the track, and we make the
313
+ conversion to the actual video 't' to
314
+ get the frame.
315
+
316
+ This is useful when we want to write a
317
+ video frame with its audio, so we obtain
318
+ all the audio frames associated to it
319
+ (remember that a video frame is associated
320
+ with more than 1 audio frame).
321
+ """
322
+ for frame in self._get_part_at_t(t).get_audio_frames_at(t):
323
+ yield frame
324
+
325
+ def add_video(
326
+ self,
327
+ video: Video,
328
+ t: Union[int, float, Fraction, None] = None
329
+ ) -> 'Track':
330
+ """
331
+ Add the 'video' provided to the track. If
332
+ a 't' time moment is provided, the video
333
+ will be added to that time moment if
334
+ possible. If there is no other video
335
+ placed in the time gap between the given
336
+ 't' and the provided 'video' duration, it
337
+ will be added succesfully. In the other
338
+ case, an exception will be raised.
339
+
340
+ If 't' is None, the first available 't'
341
+ time moment will be used, that will be 0.0
342
+ if no video, or the end of the last video.
343
+ """
344
+ ParameterValidator.validate_mandatory_instance_of('video', video, Video)
345
+ ParameterValidator.validate_positive_number('t', t, do_include_zero = True)
346
+
347
+ if t is not None:
348
+ # TODO: We can have many different strategies
349
+ # that we could define in the '__init__' maybe
350
+ t: T = T.from_fps(t, self.fps)
351
+ if not self._is_free(t.truncated, t.next(1).truncated):
352
+ raise Exception('The video cannot be added at the "t" time moment, something blocks it.')
353
+ t = t.truncated
354
+ else:
355
+ t = self.end
356
+
357
+ self.videos.append(VideoOnTrack(
358
+ video,
359
+ t
360
+ ))
361
+
362
+ self._recalculate_parts()
363
+
364
+ # TODO: Maybe return the VideoOnTrack instead (?)
365
+ return self
366
+
367
+ def _recalculate_parts(
368
+ self
369
+ ) -> 'Track':
370
+ """
371
+ Check the track and get all the parts. A
372
+ part can be empty (non video nor audio on
373
+ that time period, which means black
374
+ background and silence audio), or a video
375
+ with (or without) audio.
376
+ """
377
+ parts = []
378
+ cursor = 0.0
379
+
380
+ for video in self.videos:
381
+ # Empty space between cursor and start of
382
+ # the next clip
383
+ if video.start > cursor:
384
+ parts.append(_Part(
385
+ track = self,
386
+ start = cursor,
387
+ end = video.start,
388
+ video = None
389
+ ))
390
+
391
+ # The video itself
392
+ parts.append(_Part(
393
+ track = self,
394
+ start = video.start,
395
+ end = video.end,
396
+ video = video
397
+ ))
398
+
399
+ cursor = video.end
400
+
401
+ # Add the non limited last empty part
402
+ parts.append(_Part(
403
+ track = self,
404
+ start = cursor,
405
+ end = NON_LIMITED_EMPTY_PART_END,
406
+ video = None
407
+ ))
408
+
409
+ self._parts = parts
410
+
411
+ return self