yta-video-opengl 0.0.10__tar.gz → 0.0.12__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (21) hide show
  1. {yta_video_opengl-0.0.10 → yta_video_opengl-0.0.12}/PKG-INFO +1 -1
  2. {yta_video_opengl-0.0.10 → yta_video_opengl-0.0.12}/pyproject.toml +1 -1
  3. yta_video_opengl-0.0.12/src/yta_video_opengl/complete/__init__.py +0 -0
  4. yta_video_opengl-0.0.12/src/yta_video_opengl/complete/timeline.py +271 -0
  5. yta_video_opengl-0.0.12/src/yta_video_opengl/complete/track.py +403 -0
  6. yta_video_opengl-0.0.12/src/yta_video_opengl/complete/video_on_track.py +149 -0
  7. {yta_video_opengl-0.0.10 → yta_video_opengl-0.0.12}/src/yta_video_opengl/reader/__init__.py +190 -42
  8. yta_video_opengl-0.0.12/src/yta_video_opengl/reader/cache.py +507 -0
  9. {yta_video_opengl-0.0.10 → yta_video_opengl-0.0.12}/src/yta_video_opengl/tests.py +11 -0
  10. {yta_video_opengl-0.0.10 → yta_video_opengl-0.0.12}/src/yta_video_opengl/utils.py +151 -1
  11. {yta_video_opengl-0.0.10 → yta_video_opengl-0.0.12}/src/yta_video_opengl/writer.py +17 -3
  12. yta_video_opengl-0.0.10/src/yta_video_opengl/reader/cache.py +0 -233
  13. {yta_video_opengl-0.0.10 → yta_video_opengl-0.0.12}/LICENSE +0 -0
  14. {yta_video_opengl-0.0.10 → yta_video_opengl-0.0.12}/README.md +0 -0
  15. {yta_video_opengl-0.0.10 → yta_video_opengl-0.0.12}/src/yta_video_opengl/__init__.py +0 -0
  16. {yta_video_opengl-0.0.10 → yta_video_opengl-0.0.12}/src/yta_video_opengl/classes.py +0 -0
  17. {yta_video_opengl-0.0.10 → yta_video_opengl-0.0.12}/src/yta_video_opengl/nodes/__init__.py +0 -0
  18. {yta_video_opengl-0.0.10 → yta_video_opengl-0.0.12}/src/yta_video_opengl/nodes/audio/__init__.py +0 -0
  19. {yta_video_opengl-0.0.10 → yta_video_opengl-0.0.12}/src/yta_video_opengl/nodes/video/__init__.py +0 -0
  20. {yta_video_opengl-0.0.10 → yta_video_opengl-0.0.12}/src/yta_video_opengl/nodes/video/opengl.py +0 -0
  21. {yta_video_opengl-0.0.10 → yta_video_opengl-0.0.12}/src/yta_video_opengl/video.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: yta-video-opengl
3
- Version: 0.0.10
3
+ Version: 0.0.12
4
4
  Summary: Youtube Autonomous Video OpenGL Module
5
5
  Author: danialcala94
6
6
  Author-email: danielalcalavalera@gmail.com
@@ -1,6 +1,6 @@
1
1
  [project]
2
2
  name = "yta-video-opengl"
3
- version = "0.0.10"
3
+ version = "0.0.12"
4
4
  description = "Youtube Autonomous Video OpenGL Module"
5
5
  authors = [
6
6
  {name = "danialcala94",email = "danielalcalavalera@gmail.com"}
@@ -0,0 +1,271 @@
1
+ """
2
+ When we are reading from a source, the reader
3
+ has its own time base and properties. When we
4
+ are writing, the writer has different time
5
+ base and properties. We need to adjust our
6
+ writer to be able to write, because the videos
7
+ we read can be different, and the video we are
8
+ writing is defined by us. The 'time_base' is
9
+ an important property or will make ffmpeg
10
+ become crazy and deny packets (that means no
11
+ video written).
12
+ """
13
+ from yta_video_opengl.complete.track import Track
14
+ from yta_video_opengl.video import Video
15
+ from yta_validation.parameter import ParameterValidator
16
+ from typing import Union
17
+ from fractions import Fraction
18
+
19
+
20
+ class Timeline:
21
+ """
22
+ Class to represent all the tracks that
23
+ exist on the project and to handle the
24
+ combination of all their frames.
25
+ """
26
+
27
+ @property
28
+ def end(
29
+ self
30
+ ) -> float:
31
+ """
32
+ The end of the last video of the track
33
+ that lasts longer. This is the last time
34
+ moment that has to be rendered.
35
+ """
36
+ return max(
37
+ track.end
38
+ for track in self.tracks
39
+ )
40
+
41
+ def __init__(
42
+ self,
43
+ size: tuple[int, int] = (1_920, 1_080),
44
+ fps: float = 60.0,
45
+ audio_fps: float = 44_100.0, # 48_000.0 for aac
46
+ # TODO: I don't like this name
47
+ # TODO: Where does this come from (?)
48
+ audio_nb_samples: int = 1024
49
+ ):
50
+ # TODO: By now we are using just two video
51
+ # tracks to test the composition
52
+ # TODO: We need to be careful with the
53
+ # priority, by now its defined by its
54
+ # position in the array
55
+ self.tracks: list[Track] = [
56
+ Track(
57
+ size = size,
58
+ fps = fps,
59
+ audio_fps = audio_fps,
60
+ # TODO: I need more info about the audio
61
+ # I think
62
+ audio_nb_samples = audio_nb_samples
63
+ ),
64
+ Track(
65
+ size = size,
66
+ fps = fps,
67
+ audio_fps = audio_fps,
68
+ # TODO: I need more info about the audio
69
+ # I think
70
+ audio_nb_samples = audio_nb_samples
71
+ )
72
+ ]
73
+ """
74
+ All the video tracks we are handling.
75
+ """
76
+ # TODO: Handle the other properties
77
+ self.size = size
78
+ self.fps = fps
79
+ self.audio_fps = audio_fps
80
+
81
+ # TODO: Create 'add_track' method, but by now
82
+ # we hare handling only one
83
+ def add_video(
84
+ self,
85
+ video: Video,
86
+ t: float,
87
+ # TODO: This is for testing, it has to
88
+ # disappear
89
+ do_use_second_track: bool = False
90
+ ) -> 'Timeline':
91
+ """
92
+ Add the provided 'video' to the timeline,
93
+ starting at the provided 't' time moment.
94
+
95
+ TODO: The 'do_use_second_track' parameter
96
+ is temporary.
97
+ """
98
+ # TODO: This is temporary logic by now
99
+ # just to be able to test mixing frames
100
+ # from 2 different tracks at the same
101
+ # time
102
+ index = 1 * do_use_second_track
103
+
104
+ self.tracks[index].add_video(video, t)
105
+
106
+ return self
107
+
108
+ # TODO: This method is not for the Track but
109
+ # for the timeline, as one track can only
110
+ # have consecutive elements
111
+ def get_frame_at(
112
+ self,
113
+ t: float
114
+ ) -> 'VideoFrame':
115
+ """
116
+ Get all the frames that are played at the
117
+ 't' time provided, but combined in one.
118
+ """
119
+ frames = (
120
+ track.get_frame_at(t)
121
+ for track in self.tracks
122
+ )
123
+ # TODO: Here I receive black frames because
124
+ # it was empty, but I don't have a way to
125
+ # detect those black empty frames because
126
+ # they are just VideoFrame instances... I
127
+ # need a way to know so I can skip them if
128
+ # other frame in other track, or to know if
129
+ # I want them as transparent or something
130
+
131
+ # TODO: Combinate them, I send first by now
132
+ return next(frames)
133
+
134
+ def get_audio_frames_at(
135
+ self,
136
+ t: float
137
+ ):
138
+ # TODO: What if the different audio streams
139
+ # have also different fps (?)
140
+ frames = []
141
+ for track in self.tracks:
142
+ # TODO: Make this work properly
143
+ audio_frames = track.get_audio_frames_at(t)
144
+
145
+ # TODO: Combine them
146
+ if audio_frames is not None:
147
+ frames = audio_frames
148
+ break
149
+
150
+ #from yta_video_opengl.utils import get_silent_audio_frame
151
+ #make_silent_audio_frame()
152
+ for frame in frames:
153
+ yield frame
154
+
155
+ def render(
156
+ self,
157
+ filename: str,
158
+ start: float = 0.0,
159
+ end: Union[float, None] = None
160
+ ) -> 'Timeline':
161
+ """
162
+ Render the time range in between the given
163
+ 'start' and 'end' and store the result with
164
+ the also provided 'fillename'.
165
+
166
+ If no 'start' and 'end' provided, the whole
167
+ project will be rendered.
168
+ """
169
+ ParameterValidator.validate_mandatory_string('filename', filename, do_accept_empty = False)
170
+ ParameterValidator.validate_mandatory_positive_number('start', start, do_include_zero = True)
171
+ ParameterValidator.validate_positive_number('end', end, do_include_zero = False)
172
+
173
+ # TODO: Limitate 'end' a bit...
174
+ end = (
175
+ self.end
176
+ if end is None else
177
+ end
178
+ )
179
+
180
+ if start >= end:
181
+ raise Exception('The provided "start" cannot be greater or equal to the "end" provided.')
182
+
183
+ from yta_video_opengl.writer import VideoWriter
184
+ from yta_video_opengl.utils import get_black_background_video_frame, get_silent_audio_frame
185
+
186
+ writer = VideoWriter('test_files/output_render.mp4')
187
+ # TODO: This has to be dynamic according to the
188
+ # video we are writing
189
+ writer.set_video_stream(
190
+ codec_name = 'h264',
191
+ fps = self.fps,
192
+ size = self.size,
193
+ pixel_format = 'yuv420p'
194
+ )
195
+
196
+ writer.set_audio_stream(
197
+ codec_name = 'aac',
198
+ fps = self.audio_fps
199
+ )
200
+
201
+ audio_pts = 0
202
+ for t in get_ts(start, end, self.fps):
203
+ frame = self.get_frame_at(t)
204
+
205
+ # We need to adjust our output elements to be
206
+ # consecutive and with the right values
207
+ # TODO: We are using int() for fps but its float...
208
+ frame.time_base = Fraction(1, int(self.fps))
209
+ #frame.pts = int(video_frame_index / frame.time_base)
210
+ frame.pts = int(t / frame.time_base)
211
+
212
+ # TODO: We need to handle the audio
213
+ writer.mux_video_frame(
214
+ frame = frame
215
+ )
216
+
217
+ #print(f' [VIDEO] Here in t:{str(t)} -> pts:{str(frame.pts)} - dts:{str(frame.dts)}')
218
+
219
+ num_of_audio_frames = 0
220
+ for audio_frame in self.get_audio_frames_at(t):
221
+ # TODO: The track gives us empty (black)
222
+ # frames by default but maybe we need a
223
+ # @dataclass in the middle to handle if
224
+ # we want transparent frames or not and/or
225
+ # to detect them here because, if not,
226
+ # they are just simple VideoFrames and we
227
+ # don't know they are 'empty' frames
228
+
229
+ # We need to adjust our output elements to be
230
+ # consecutive and with the right values
231
+ # TODO: We are using int() for fps but its float...
232
+ audio_frame.time_base = Fraction(1, int(self.audio_fps))
233
+ #audio_frame.pts = int(audio_frame_index / audio_frame.time_base)
234
+ audio_frame.pts = audio_pts
235
+ # We increment for the next iteration
236
+ audio_pts += audio_frame.samples
237
+ #audio_frame.pts = int(t + (audio_frame_index * audio_frame.time_base) / audio_frame.time_base)
238
+
239
+ #print(f'[AUDIO] Here in t:{str(t)} -> pts:{str(audio_frame.pts)} - dts:{str(audio_frame.dts)}')
240
+
241
+ num_of_audio_frames += 1
242
+ print(audio_frame)
243
+ writer.mux_audio_frame(audio_frame)
244
+ print(f'Num of audio frames: {str(num_of_audio_frames)}')
245
+
246
+ writer.mux_video_frame(None)
247
+ writer.mux_audio_frame(None)
248
+ writer.output.close()
249
+
250
+
251
+ # TODO: I don't want to have this here
252
+ def get_ts(
253
+ start: float,
254
+ end: float,
255
+ fps: int
256
+ ):
257
+ """
258
+ Obtain, without using a Progression class and
259
+ importing the library, a list of 't' time
260
+ moments from the provided 'start' to the also
261
+ given 'end', with the 'fps' given as parameter.
262
+ """
263
+ dt = 1.0 / fps
264
+ times = []
265
+
266
+ t = start
267
+ while t <= end:
268
+ times.append(t + 0.000001)
269
+ t += dt
270
+
271
+ return times
@@ -0,0 +1,403 @@
1
+ from yta_video_opengl.complete.video_on_track import VideoOnTrack
2
+ from yta_video_opengl.video import Video
3
+ from yta_video_frame_time import T
4
+ from yta_video_opengl.utils import get_black_background_video_frame, get_silent_audio_frame, audio_frames_and_remainder_per_video_frame
5
+ from yta_validation.parameter import ParameterValidator
6
+ from typing import Union
7
+
8
+
9
+ NON_LIMITED_EMPTY_PART_END = 999
10
+ """
11
+ A value to indicate that the empty part
12
+ has no end because it is in the last
13
+ position and there is no video after it.
14
+ """
15
+ class _Part:
16
+ """
17
+ Class to represent an element that is on the
18
+ track, that can be an empty space or a video
19
+ (with audio).
20
+ """
21
+
22
+ @property
23
+ def is_empty_part(
24
+ self
25
+ ) -> bool:
26
+ """
27
+ Flag to indicate if the part is an empty part,
28
+ which means that there is no video associated
29
+ but an empty space.
30
+ """
31
+ return self.video is None
32
+
33
+ def __init__(
34
+ self,
35
+ track: 'Track',
36
+ start: float,
37
+ end: float,
38
+ video: Union[VideoOnTrack, None] = None
39
+ ):
40
+ ParameterValidator.validate_mandatory_positive_number('start', start, do_include_zero = True)
41
+ ParameterValidator.validate_mandatory_positive_number('end', end, do_include_zero = False)
42
+ ParameterValidator.validate_instance_of('video', video, VideoOnTrack)
43
+
44
+ self._track: Track = track
45
+ """
46
+ The instance of the track this part belongs
47
+ to.
48
+ """
49
+ self.start: float = float(start)
50
+ """
51
+ The start 't' time moment of the part.
52
+ """
53
+ self.end: float = float(end)
54
+ """
55
+ The end 't' time moment of the part.
56
+ """
57
+ self.video: Union[VideoOnTrack, None] = video
58
+ """
59
+ The video associated, if existing, or
60
+ None if it is an empty space that we need
61
+ to fulfill with a black background and
62
+ silent audio.
63
+ """
64
+
65
+ def get_frame_at(
66
+ self,
67
+ t: float
68
+ ) -> 'VideoFrame':
69
+ """
70
+ Get the frame that must be displayed at
71
+ the given 't' time moment.
72
+ """
73
+ if self.is_empty_part:
74
+ # TODO: What about the 'format' (?)
75
+ return get_black_background_video_frame(self._track.size)
76
+
77
+ frame = self.video.get_frame_at(t)
78
+
79
+ # TODO: This should not happen because of
80
+ # the way we handle the videos here but the
81
+ # video could send us a None frame here, so
82
+ # do we raise exception (?)
83
+ if frame is None:
84
+ #frame = get_black_background_video_frame(self._track.size)
85
+ # TODO: By now I'm raising exception to check if
86
+ # this happens or not because I think it would
87
+ # be malfunctioning
88
+ raise Exception(f'Video is returning None frame at t={str(t)}.')
89
+
90
+ return frame
91
+
92
+ # TODO: I'm not sure if we need this
93
+ def get_audio_frames_at(
94
+ self,
95
+ t: float
96
+ ):
97
+ if not self.is_empty_part:
98
+ frames = self.video.get_audio_frames_at(t)
99
+ else:
100
+ # TODO: Transform this below to a utils in
101
+ # which I obtain the array directly
102
+ # Check many full and partial silent frames we need
103
+ number_of_frames, number_of_remaining_samples = audio_frames_and_remainder_per_video_frame(
104
+ fps = self._track.fps,
105
+ sample_rate = self._track.audio_fps,
106
+ nb_samples = self._track.audio_nb_samples
107
+ )
108
+
109
+ # TODO: I need to set the pts, but here (?)
110
+ # The complete silent frames we need
111
+ frames = (
112
+ [
113
+ get_silent_audio_frame(
114
+ sample_rate = self._track.audio_fps,
115
+ # TODO: Check where do we get this value from
116
+ layout = 'stereo',
117
+ nb_samples = self._track.audio_nb_samples,
118
+ # TODO: Check where do we get this value from
119
+ format = 'fltp'
120
+ )
121
+ ] * number_of_frames
122
+ if number_of_frames > 0 else
123
+ []
124
+ )
125
+
126
+ # The remaining partial silent frames we need
127
+ if number_of_remaining_samples > 0:
128
+ frames.append(
129
+ get_silent_audio_frame(
130
+ sample_rate = self._track.audio_fps,
131
+ # TODO: Check where do we get this value from
132
+ layout = 'stereo',
133
+ nb_samples = number_of_remaining_samples,
134
+ # TODO: Check where do we get this value from
135
+ format = 'fltp'
136
+ )
137
+ )
138
+
139
+ # TODO: Return or yield (?)
140
+ for frame in frames:
141
+ yield frame
142
+ #return frames
143
+
144
+ # TODO: I don't like using t as float,
145
+ # we need to implement fractions.Fraction
146
+ # TODO: This is called Track but it is
147
+ # handling videos only. Should I have
148
+ # VideoTrack and AudioTrack (?)
149
+ class Track:
150
+ """
151
+ Class to represent a track in which we place
152
+ videos, images and audio to build a video
153
+ project.
154
+ """
155
+
156
+ @property
157
+ def parts(
158
+ self
159
+ ) -> list[_Part]:
160
+ """
161
+ The list of parts that build this track,
162
+ but with the empty parts detected to
163
+ be fulfilled with black frames and silent
164
+ audios.
165
+
166
+ A part can be a video or an empty space.
167
+ """
168
+ if (
169
+ not hasattr(self, '_parts') or
170
+ self._parts is None
171
+ ):
172
+ self._recalculate_parts()
173
+
174
+ return self._parts
175
+
176
+ @property
177
+ def end(
178
+ self
179
+ ) -> float:
180
+ """
181
+ The end of the last video of this track,
182
+ which is also the end of the track. This
183
+ is the last time moment that has to be
184
+ rendered.
185
+ """
186
+ return (
187
+ 0.0
188
+ if len(self.videos) == 0 else
189
+ max(
190
+ video.end
191
+ for video in self.videos
192
+ )
193
+ )
194
+
195
+ def __init__(
196
+ self,
197
+ # TODO: I need the general settings of the
198
+ # project to be able to make audio also, not
199
+ # only the empty frames
200
+ size: tuple[int, int],
201
+ fps: float,
202
+ audio_fps: float,
203
+ # TODO: Change the name
204
+ audio_nb_samples: int
205
+ ):
206
+ self.videos: list[VideoOnTrack] = []
207
+ """
208
+ The list of 'VideoOnTrack' instances that
209
+ must play on this track.
210
+ """
211
+ self.size: tuple[int, int] = size
212
+ """
213
+ The size of the videos of this track.
214
+ """
215
+ self.fps: float = fps
216
+ """
217
+ The fps of the track, needed to calculate
218
+ the base t time moments to be precise and
219
+ to obtain or generate the frames.
220
+ """
221
+ self.audio_fps: float = audio_fps
222
+ """
223
+ The fps of the audio track, needed to
224
+ generate silent audios for the empty parts.
225
+ """
226
+ self.audio_nb_samples: int = audio_nb_samples
227
+ """
228
+ The number of samples per audio frame.
229
+ """
230
+
231
+ def _is_free(
232
+ self,
233
+ start: float,
234
+ end: float
235
+ ) -> bool:
236
+ """
237
+ Check if the time range in between the
238
+ 'start' and 'end' time given is free or
239
+ there is some video playing at any moment.
240
+ """
241
+ return not any(
242
+ (
243
+ video.video.start < end and
244
+ video.video.end > start
245
+ )
246
+ for video in self.videos
247
+ )
248
+
249
+ def _get_part_at_t(
250
+ self,
251
+ t: float
252
+ ) -> _Part:
253
+ """
254
+ Get the part at the given 't' time
255
+ moment, that will always exist because
256
+ we have an special non ended last
257
+ empty part that would be returned if
258
+ accessing to an empty 't'.
259
+ """
260
+ for part in self.parts:
261
+ if part.start <= t < part.end:
262
+ return part
263
+
264
+ # TODO: This will only happen if they are
265
+ # asking for a value greater than the
266
+ # NON_LIMITED_EMPTY_PART_END...
267
+ raise Exception('NON_LIMITED_EMPTY_PART_END exceeded.')
268
+ return None
269
+
270
+ def get_frame_at(
271
+ self,
272
+ t: float
273
+ ) -> 'VideoFrame':
274
+ """
275
+ Get the frame that must be displayed at
276
+ the 't' time moment provided, which is
277
+ a frame from the video audio that is
278
+ being played at that time moment.
279
+
280
+ Remember, this 't' time moment provided
281
+ is about the track, and we make the
282
+ conversion to the actual video 't' to
283
+ get the frame.
284
+ """
285
+ # TODO: What if the frame, that comes from
286
+ # a video, doesn't have the expected size (?)
287
+ return self._get_part_at_t(t).get_frame_at(t)
288
+
289
+ # TODO: This is not working well...
290
+ def get_audio_frames_at(
291
+ self,
292
+ t: float
293
+ ):
294
+ """
295
+ Get the sequence of audio frames that
296
+ must be displayed at the 't' time
297
+ moment provided, which the collection
298
+ of audio frames corresponding to the
299
+ video frame that is being played at
300
+ that time moment.
301
+
302
+ Remember, this 't' time moment provided
303
+ is about the track, and we make the
304
+ conversion to the actual video 't' to
305
+ get the frame.
306
+
307
+ This is useful when we want to write a
308
+ video frame with its audio, so we obtain
309
+ all the audio frames associated to it
310
+ (remember that a video frame is associated
311
+ with more than 1 audio frame).
312
+ """
313
+ for frame in self._get_part_at_t(t).get_audio_frames_at(t):
314
+ yield frame
315
+
316
+ def add_video(
317
+ self,
318
+ video: Video,
319
+ t: Union[float, None] = None
320
+ ) -> 'Track':
321
+ """
322
+ Add the 'video' provided to the track. If
323
+ a 't' time moment is provided, the video
324
+ will be added to that time moment if
325
+ possible. If there is no other video
326
+ placed in the time gap between the given
327
+ 't' and the provided 'video' duration, it
328
+ will be added succesfully. In the other
329
+ case, an exception will be raised.
330
+
331
+ If 't' is None, the first available 't'
332
+ time moment will be used, that will be 0.0
333
+ if no video, or the end of the last video.
334
+ """
335
+ ParameterValidator.validate_mandatory_instance_of('video', video, Video)
336
+ ParameterValidator.validate_positive_number('t', t, do_include_zero = True)
337
+
338
+ if t is not None:
339
+ # TODO: We can have many different strategies
340
+ # that we could define in the '__init__' maybe
341
+ # TODO: I don't like using float 't', but we
342
+ # need to make sure it is a multiple of 1 / fps
343
+ t = T.get_frame_time_base(float(t), self.fps)
344
+ if not self._is_free(t, (t + video.end)):
345
+ raise Exception('The video cannot be added at the "t" time moment, something blocks it.')
346
+ else:
347
+ t = self.end
348
+
349
+ self.videos.append(VideoOnTrack(
350
+ video,
351
+ t
352
+ ))
353
+
354
+ self._recalculate_parts()
355
+
356
+ # TODO: Maybe return the VideoOnTrack instead (?)
357
+ return self
358
+
359
+ def _recalculate_parts(
360
+ self
361
+ ) -> 'Track':
362
+ """
363
+ Check the track and get all the parts. A
364
+ part can be empty (non video nor audio on
365
+ that time period, which means black
366
+ background and silence audio), or a video
367
+ with (or without) audio.
368
+ """
369
+ parts = []
370
+ cursor = 0.0
371
+
372
+ for video in self.videos:
373
+ # Empty space between cursor and start of
374
+ # the next clip
375
+ if video.start > cursor:
376
+ parts.append(_Part(
377
+ track = self,
378
+ start = cursor,
379
+ end = video.start,
380
+ video = None
381
+ ))
382
+
383
+ # The video itself
384
+ parts.append(_Part(
385
+ track = self,
386
+ start = video.start,
387
+ end = video.end,
388
+ video = video
389
+ ))
390
+
391
+ cursor = video.end
392
+
393
+ # Add the non limited last empty part
394
+ parts.append(_Part(
395
+ track = self,
396
+ start = cursor,
397
+ end = NON_LIMITED_EMPTY_PART_END,
398
+ video = None
399
+ ))
400
+
401
+ self._parts = parts
402
+
403
+ return self