yta-video-opengl 0.0.17__py3-none-any.whl → 0.0.19__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -5,6 +5,13 @@ array that will be used for it. We will
5
5
  receive the values as (width, height) but
6
6
  we will invert them when needed.
7
7
 
8
+ The frames that come from an empty part
9
+ are flagged with the .metadata attribute
10
+ 'is_from_empty_part' so we can recognize
11
+ them and ignore when combining on the
12
+ timeline. We have that metadata in the
13
+ wrapper class we created.
14
+
8
15
  TODO: Check because we have a similar
9
16
  module in other project or projects.
10
17
  """
@@ -170,7 +177,7 @@ class AudioFrameGenerator:
170
177
  number_of_samples: int = 1024,
171
178
  format = 's16',
172
179
  pts: Union[int, None] = None,
173
- time_base: Union['Fraction', None] = None
180
+ time_base: Union['Fraction', None] = None
174
181
  ) -> AudioFrame:
175
182
  """
176
183
  Get an audio frame that is completely silent.
@@ -241,7 +248,7 @@ def numpy_to_audio_frame(
241
248
 
242
249
  if time_base is not None:
243
250
  frame.time_base = time_base
244
-
251
+
245
252
  return frame
246
253
 
247
254
  # TODO: Maybe transform into a Enum (?)
@@ -0,0 +1,122 @@
1
+ from yta_validation.parameter import ParameterValidator
2
+ from av.video.frame import VideoFrame
3
+ from av.audio.frame import AudioFrame
4
+ from typing import Union
5
+
6
+
7
+ IS_FROM_EMPTY_PART_METADATA = 'is_from_empty_part'
8
+
9
+ class _FrameWrappedBase:
10
+ """
11
+ Class to wrap video and audio frames from
12
+ the pyav library but to support a metadata
13
+ field to inject some information we need
14
+ when processing and combining them.
15
+ """
16
+
17
+ @property
18
+ def is_from_empty_part(
19
+ self
20
+ ) -> bool:
21
+ """
22
+ Flag to indicate if the frame comes from
23
+ an empty part or not, that will be done
24
+ by checking the 'is_from_empty_part'
25
+ attribute in the metadata.
26
+ """
27
+ return IS_FROM_EMPTY_PART_METADATA in self.metadata
28
+
29
+ def __init__(
30
+ self,
31
+ frame,
32
+ metadata: dict = {}
33
+ ):
34
+ ParameterValidator.validate_mandatory_instance_of('frame', frame, [VideoFrame, AudioFrame])
35
+ ParameterValidator.validate_mandatory_dict('metadata', metadata)
36
+
37
+ self._frame: Union[VideoFrame, AudioFrame] = frame
38
+ self.metadata: dict = metadata or {}
39
+
40
+ def __getattr__(
41
+ self,
42
+ name
43
+ ):
44
+ return getattr(self._frame, name)
45
+
46
+ def __setattr__(
47
+ self,
48
+ name,
49
+ value
50
+ ):
51
+ super().__setattr__(name, value)
52
+ #setattr(self._frame, name, value)
53
+ # if name in ('_frame', 'metadata'):
54
+ # super().__setattr__(name, value)
55
+ # else:
56
+ # setattr(self._frame, name, value)
57
+
58
+ def __repr__(
59
+ self
60
+ ):
61
+ cname = self.__class__.__name__
62
+ return f'<{cname} metadata={self.metadata} frame={self._frame!r}>'
63
+
64
+ def set_as_from_empty_part(
65
+ self
66
+ ) -> None:
67
+ """
68
+ Add the metadata information to indicate
69
+ that this is a frame that comes from an
70
+ empty part.
71
+ """
72
+ self.metadata[IS_FROM_EMPTY_PART_METADATA] = 'True'
73
+
74
+ def unwrap(
75
+ self
76
+ ):
77
+ """
78
+ Get the original frame instance.
79
+ """
80
+ return self._frame
81
+
82
+ class VideoFrameWrapped(_FrameWrappedBase):
83
+ """
84
+ Class to wrap video frames from the pyav
85
+ library but to support a metadata field
86
+ to inject some information we need when
87
+ processing and combining them.
88
+ """
89
+
90
+ def __init__(
91
+ self,
92
+ frame: VideoFrame,
93
+ metadata: dict = {},
94
+ is_from_empty_part: bool = False
95
+ ):
96
+ ParameterValidator.validate_mandatory_instance_of('frame', frame, VideoFrame)
97
+
98
+ super().__init__(frame, metadata)
99
+
100
+ if is_from_empty_part:
101
+ self.set_as_from_empty_part()
102
+
103
+ class AudioFrameWrapped(_FrameWrappedBase):
104
+ """
105
+ Class to wrap audio frames from the pyav
106
+ library but to support a metadata field
107
+ to inject some information we need when
108
+ processing and combining them.
109
+ """
110
+
111
+ def __init__(
112
+ self,
113
+ frame: AudioFrame,
114
+ metadata: dict = {},
115
+ is_from_empty_part: bool = False
116
+ ):
117
+ ParameterValidator.validate_mandatory_instance_of('frame', frame, AudioFrame)
118
+
119
+ super().__init__(frame, metadata)
120
+
121
+ if is_from_empty_part:
122
+ self.set_as_from_empty_part()
@@ -13,11 +13,14 @@ video written).
13
13
  from yta_video_opengl.complete.track import Track
14
14
  from yta_video_opengl.video import Video
15
15
  from yta_video_opengl.t import get_ts, fps_to_time_base, T
16
+ from yta_video_opengl.complete.frame_wrapper import VideoFrameWrapped, AudioFrameWrapped
16
17
  from yta_video_opengl.complete.frame_combinator import AudioFrameCombinator
17
18
  from yta_validation.parameter import ParameterValidator
19
+ from yta_validation import PythonValidator
18
20
  from av.video.frame import VideoFrame
19
21
  from av.audio.frame import AudioFrame
20
22
  from quicktions import Fraction
23
+ from functools import reduce
21
24
  from typing import Union
22
25
 
23
26
  import numpy as np
@@ -51,48 +54,103 @@ class Timeline:
51
54
  audio_fps: Union[int, Fraction] = 44_100.0, # 48_000.0 for aac
52
55
  # TODO: I don't like this name
53
56
  # TODO: Where does this come from (?)
54
- audio_samples_per_frame: int = 1024
57
+ audio_samples_per_frame: int = 1024,
58
+ video_codec: str = 'h264',
59
+ video_pixel_format: str = 'yuv420p',
60
+ audio_codec: str = 'aac'
55
61
  ):
56
- # TODO: By now we are using just two video
57
- # tracks to test the composition
58
62
  # TODO: We need to be careful with the
59
63
  # priority, by now its defined by its
60
64
  # position in the array
61
- self.tracks: list[Track] = [
62
- Track(
63
- size = size,
64
- fps = fps,
65
- audio_fps = audio_fps,
66
- # TODO: I need more info about the audio
67
- # I think
68
- audio_samples_per_frame = audio_samples_per_frame
69
- ),
70
- Track(
71
- size = size,
72
- fps = fps,
73
- audio_fps = audio_fps,
74
- # TODO: I need more info about the audio
75
- # I think
76
- audio_samples_per_frame = audio_samples_per_frame
77
- )
78
- ]
65
+ self.tracks: list[Track] = []
79
66
  """
80
67
  All the video tracks we are handling.
81
68
  """
82
- # TODO: Handle the other properties
83
- self.size = size
84
- self.fps = fps
85
- self.audio_fps = audio_fps
86
69
 
87
- # TODO: Create 'add_track' method, but by now
88
- # we hare handling only one
70
+ self.size: tuple[int, int] = size
71
+ """
72
+ The size that the final video must have.
73
+ """
74
+ self.fps: Union[int, float, Fraction] = fps
75
+ """
76
+ The fps of the output video.
77
+ """
78
+ self.audio_fps: Union[int, Fraction] = audio_fps
79
+ """
80
+ The fps of the output audio.
81
+ """
82
+ self.audio_samples_per_frame: int = audio_samples_per_frame
83
+ """
84
+ The audio samples each audio frame must
85
+ have.
86
+ """
87
+ self.video_codec: str = video_codec
88
+ """
89
+ The video codec for the video exported.
90
+ """
91
+ self.video_pixel_format: str = video_pixel_format
92
+ """
93
+ The pixel format for the video exported.
94
+ """
95
+ self.audio_codec: str = audio_codec
96
+ """
97
+ The audio codec for the audio exported.
98
+ """
99
+
100
+ # We will have 2 tracks by now
101
+ self.add_track().add_track()
102
+
103
+ def add_track(
104
+ self,
105
+ index: Union[int, None] = None
106
+ ) -> 'Timeline':
107
+ """
108
+ Add a new track to the timeline, that will
109
+ be placed in the last position (last
110
+ priority).
111
+
112
+ It will be a video track unless you provide
113
+ 'is_audio_track' parameter as True.
114
+ """
115
+ index = (
116
+ index
117
+ if (
118
+ index is not None and
119
+ index <= len(self.tracks)
120
+ ) else
121
+ len(self.tracks)
122
+ )
123
+
124
+ # We need to change the index of the
125
+ # affected tracks (the ones that are
126
+ # in that index and after it)
127
+ if index < len(self.tracks):
128
+ for track in self.tracks:
129
+ if track.index >= index:
130
+ track.index += 1
131
+
132
+ self.tracks.append(Track(
133
+ size = self.size,
134
+ index = index,
135
+ fps = self.fps,
136
+ audio_fps = self.audio_fps,
137
+ # TODO: I need more info about the audio
138
+ # I think
139
+ audio_samples_per_frame = self.audio_samples_per_frame,
140
+ # TODO: Where do we obtain this from (?)
141
+ audio_layout = 'stereo',
142
+ audio_format = 'fltp'
143
+ ))
144
+
145
+ return self
146
+
147
+ # TODO: Create a 'remove_track'
148
+
89
149
  def add_video(
90
150
  self,
91
151
  video: Video,
92
152
  t: Union[int, float, Fraction],
93
- # TODO: This is for testing, it has to
94
- # disappear
95
- do_use_second_track: bool = False
153
+ track_index: int = 0
96
154
  ) -> 'Timeline':
97
155
  """
98
156
  Add the provided 'video' to the timeline,
@@ -101,16 +159,14 @@ class Timeline:
101
159
  TODO: The 'do_use_second_track' parameter
102
160
  is temporary.
103
161
  """
104
- # TODO: This is temporary logic by now
105
- # just to be able to test mixing frames
106
- # from 2 different tracks at the same
107
- # time
108
- index = 1 * do_use_second_track
162
+ ParameterValidator.validate_mandatory_number_between('track_index', track_index, 0, len(self.tracks))
109
163
 
110
- self.tracks[index].add_video(video, t)
164
+ self.tracks[track_index].add_video(video, t)
111
165
 
112
166
  return self
113
167
 
168
+ # TODO: Create a 'remove_video'
169
+
114
170
  # TODO: This method is not for the Track but
115
171
  # for the timeline, as one track can only
116
172
  # have consecutive elements
@@ -122,7 +178,7 @@ class Timeline:
122
178
  Get all the frames that are played at the
123
179
  't' time provided, but combined in one.
124
180
  """
125
- frames = (
181
+ frames = list(
126
182
  track.get_frame_at(t)
127
183
  for track in self.tracks
128
184
  )
@@ -134,21 +190,45 @@ class Timeline:
134
190
  # other frame in other track, or to know if
135
191
  # I want them as transparent or something
136
192
 
137
- # TODO: This is just a test function
138
- from yta_video_opengl.complete.frame_combinator import VideoFrameCombinator
139
-
140
193
  # TODO: Combinate frames, we force them to
141
194
  # rgb24 to obtain them with the same shape,
142
195
  # but maybe we have to change this because
143
196
  # we also need to handle alphas
144
- output_frame = next(frames).to_ndarray(format = 'rgb24')
197
+
198
+ # TODO: We need to ignore the ones that are
199
+ # tagged with
200
+ # .metadata['is_from_empty_part'] = 'True'
201
+
202
+ """
203
+ 1. Only empty frames
204
+ -> Black background, keep one
205
+ 2. Empty frames but other frames:
206
+ -> Skip all empty frames and apply
207
+ track orders
208
+ """
209
+
210
+ output_frame = frames[0]._frame.to_ndarray(format = 'rgb24')
211
+
145
212
  for frame in frames:
146
- # Combine them
147
- # TODO: We need to ignore the frames that
148
- # are just empty black frames and use them
149
- # not in the combination process
150
- # TODO: What about the 'format' (?)
151
- output_frame = VideoFrameCombinator.blend_add(output_frame, frame.to_ndarray(format = 'rgb24'))
213
+ # We just need the first non-empty frame,
214
+ # that must be from the track with the
215
+ # bigger priority
216
+ # TODO: I assume, by now, that the frames
217
+ # come in order (bigger priority first)
218
+ if not frame.is_from_empty_part:
219
+ # TODO: By now I'm just returning the first
220
+ # one but we will need to check the alpha
221
+ # layer to combine if possible
222
+ output_frame = frame._frame.to_ndarray(format = 'rgb24')
223
+ break
224
+
225
+ # # TODO: This code below is to combine the
226
+ # # frames but merging all of them, that is
227
+ # # unexpected in a video editor but we have
228
+ # # the way to do it
229
+ # from yta_video_opengl.complete.frame_combinator import VideoFrameCombinator
230
+ # # TODO: What about the 'format' (?)
231
+ # output_frame = VideoFrameCombinator.blend_add(output_frame, frame.to_ndarray(format = 'rgb24'))
152
232
 
153
233
  # TODO: How to build this VideoFrame correctly
154
234
  # and what about the 'format' (?)
@@ -159,7 +239,7 @@ class Timeline:
159
239
  self,
160
240
  t: float
161
241
  ):
162
- audio_frames = []
242
+ audio_frames: list[AudioFrameWrapped] = []
163
243
  """
164
244
  Matrix in which the rows are the different
165
245
  tracks we have, and the column includes all
@@ -179,18 +259,36 @@ class Timeline:
179
259
  # not in the combination process
180
260
 
181
261
  # We need only 1 single audio frame per column
182
- collapsed = [
262
+ collapsed_frames = [
183
263
  concatenate_audio_frames(frames)
184
264
  for frames in audio_frames
185
265
  ]
186
266
 
267
+ # TODO: What about the lenghts and those
268
+ # things? They should be ok because they are
269
+ # based on our output but I'm not completely
270
+ # sure here..
271
+
272
+ # We keep only the non-silent frames because
273
+ # we will sum them after and keeping them
274
+ # will change the results.
275
+ non_empty_collapsed_frames = [
276
+ frame._frame
277
+ for frame in collapsed_frames
278
+ if not frame.is_from_empty_part
279
+ ]
280
+
281
+ if len(non_empty_collapsed_frames) == 0:
282
+ # If they were all silent, just keep one
283
+ non_empty_collapsed_frames = [collapsed_frames[0]._frame]
284
+
187
285
  # Now, mix column by column (track by track)
188
286
  # TODO: I do this to have an iterator, but
189
287
  # maybe we need more than one single audio
190
288
  # frame because of the size at the original
191
289
  # video or something...
192
290
  frames = [
193
- AudioFrameCombinator.sum_tracks_frames(collapsed, self.audio_fps)
291
+ AudioFrameCombinator.sum_tracks_frames(non_empty_collapsed_frames, self.audio_fps)
194
292
  ]
195
293
 
196
294
  for audio_frame in frames:
@@ -308,18 +406,36 @@ class Timeline:
308
406
  writer.mux_audio_frame(None)
309
407
  writer.output.close()
310
408
 
409
+ def _is_empty_part_frame(
410
+ frame: Union['VideoFrameWrapped', 'AudioFrameWrapped']
411
+ ) -> bool:
412
+ """
413
+ Flag to indicate if the frame comes from
414
+ an empty part or not.
415
+
416
+ TODO: The 'metadata' is included in our
417
+ wrapper class, not in VideoFrame or
418
+ AudioFrame classes. I should be sending
419
+ the wrapper in all the code, but by now
420
+ I'm doing it just in specific cases.
421
+ """
422
+ return (
423
+ hasattr(frame, 'metadata') and
424
+ frame.is_from_empty_part
425
+ )
311
426
 
312
- # TODO: I don't know where to put this
313
- # method because if a bit special
314
427
  # TODO: Refactor and move please
428
+ # TODO: This has to work for AudioFrame
429
+ # also, but I need it working for Wrapped
315
430
  def concatenate_audio_frames(
316
- frames: list[AudioFrame]
317
- ) -> AudioFrame:
431
+ frames: list[AudioFrameWrapped]
432
+ ) -> AudioFrameWrapped:
318
433
  """
319
- Combina varios AudioFrames consecutivos en uno solo.
320
- - Convierte a float32
321
- - Concatena muestras a lo largo del tiempo
322
- - Devuelve un AudioFrame nuevo
434
+ Concatenate all the given 'frames' in one
435
+ single audio frame and return it.
436
+
437
+ The audio frames must have the same layout
438
+ and sample rate.
323
439
  """
324
440
  if not frames:
325
441
  # TODO: This should not happen
@@ -328,29 +444,44 @@ def concatenate_audio_frames(
328
444
  if len(frames) == 1:
329
445
  return frames[0]
330
446
 
331
- # Verificamos consistencia básica
332
- sample_rate = frames[0].sample_rate
333
- layout = frames[0].layout.name
334
- channels = frames[0].layout.channels
447
+ # We need to preserve the metadata
448
+ is_from_empty_part = all(
449
+ frame.is_from_empty_part
450
+ for frame in frames
451
+ )
452
+ metadata = reduce(lambda key_values, frame: {**key_values, **frame.metadata}, frames, {})
453
+
454
+ sample_rate = frames[0]._frame.sample_rate
455
+ layout = frames[0]._frame.layout.name
335
456
 
336
457
  arrays = []
337
- for f in frames:
338
- if f.sample_rate != sample_rate or f.layout.name != layout:
458
+ # TODO: What about 'metadata' (?)
459
+ for frame in frames:
460
+ if (
461
+ frame._frame.sample_rate != sample_rate or
462
+ frame._frame.layout.name != layout
463
+ ):
339
464
  raise ValueError("Los frames deben tener mismo sample_rate y layout")
340
465
 
341
- # arr = f.to_ndarray() # (channels, samples)
466
+ # arr = frame.to_ndarray() # (channels, samples)
342
467
  # if arr.dtype == np.int16:
343
468
  # arr = arr.astype(np.float32) / 32768.0
344
469
  # elif arr.dtype != np.float32:
345
470
  # arr = arr.astype(np.float32)
346
471
 
347
- arrays.append(f.to_ndarray())
472
+ arrays.append(frame._frame.to_ndarray())
348
473
 
349
- # Concatenamos por eje de samples
350
474
  combined = np.concatenate(arrays, axis = 1)
351
475
 
352
- # Creamos un frame nuevo
353
- out = AudioFrame.from_ndarray(combined, format = frames[0].format, layout = layout)
476
+ out = AudioFrame.from_ndarray(
477
+ array = combined,
478
+ format = frames[0].format,
479
+ layout = layout
480
+ )
354
481
  out.sample_rate = sample_rate
355
482
 
356
- return out
483
+ return AudioFrameWrapped(
484
+ frame = out,
485
+ metadata = metadata,
486
+ is_from_empty_part = is_from_empty_part
487
+ )
@@ -1,8 +1,10 @@
1
1
  from yta_video_opengl.complete.video_on_track import VideoOnTrack
2
2
  from yta_video_opengl.video import Video
3
3
  from yta_video_opengl.t import T
4
+ from yta_video_opengl.complete.frame_wrapper import VideoFrameWrapped
4
5
  from yta_video_opengl.utils import audio_frames_and_remainder_per_video_frame
5
6
  from yta_video_opengl.t import fps_to_time_base
7
+ from yta_video_opengl.complete.frame_wrapper import AudioFrameWrapped
6
8
  from yta_video_opengl.complete.frame_generator import VideoFrameGenerator, AudioFrameGenerator
7
9
  from yta_validation.parameter import ParameterValidator
8
10
  from quicktions import Fraction
@@ -82,30 +84,38 @@ class _Part:
82
84
  def get_frame_at(
83
85
  self,
84
86
  t: Union[int, float, Fraction]
85
- ) -> 'VideoFrame':
87
+ ) -> 'VideoFrameWrapped':
86
88
  """
87
89
  Get the frame that must be displayed at
88
90
  the given 't' time moment.
89
91
  """
90
- if self.is_empty_part:
92
+ frame = (
91
93
  # TODO: What about the 'format' (?)
92
94
  # TODO: Maybe I shouldn't set the 'time_base'
93
95
  # here and do it just in the Timeline 'render'
94
96
  #return get_black_background_video_frame(self._track.size)
95
97
  # TODO: This 'time_base' maybe has to be related
96
98
  # to a Timeline general 'time_base' and not the fps
97
- return self._video_frame_generator.background.full_black(
98
- size = self._track.size,
99
- time_base = fps_to_time_base(self._track.fps)
99
+ VideoFrameWrapped(
100
+ frame = self._video_frame_generator.background.full_black(
101
+ size = self._track.size,
102
+ time_base = fps_to_time_base(self._track.fps)
103
+ ),
104
+ is_from_empty_part = True
100
105
  )
101
-
102
- frame = self.video.get_frame_at(t)
106
+ if self.is_empty_part else
107
+ VideoFrameWrapped(
108
+ frame = self.video.get_frame_at(t),
109
+ is_from_empty_part = False
110
+ )
111
+
112
+ )
103
113
 
104
114
  # TODO: This should not happen because of
105
115
  # the way we handle the videos here but the
106
116
  # video could send us a None frame here, so
107
117
  # do we raise exception (?)
108
- if frame is None:
118
+ if frame._frame is None:
109
119
  #frame = get_black_background_video_frame(self._track.size)
110
120
  # TODO: By now I'm raising exception to check if
111
121
  # this happens or not because I think it would
@@ -118,54 +128,29 @@ class _Part:
118
128
  self,
119
129
  t: Union[int, float, Fraction]
120
130
  ):
131
+ """
132
+ Iterate over all the audio frames that
133
+ exist at the time moment 't' provided.
134
+ """
121
135
  if not self.is_empty_part:
122
- frames = self.video.get_audio_frames_at(t)
136
+ for frame in self.video.get_audio_frames_at(t):
137
+ yield AudioFrameWrapped(
138
+ frame = frame,
139
+ is_from_empty_part = False
140
+ )
123
141
  else:
124
- # TODO: Transform this below to a utils in
125
- # which I obtain the array directly
126
- # Check many full and partial silent frames we need
127
- number_of_frames, number_of_remaining_samples = audio_frames_and_remainder_per_video_frame(
128
- video_fps = self._track.fps,
129
- sample_rate = self._track.audio_fps,
130
- number_of_samples_per_audio_frame = self._track.audio_samples_per_frame
142
+ frames = generate_silent_frames(
143
+ fps = self._track.fps,
144
+ audio_fps = self._track.audio_fps,
145
+ audio_samples_per_frame = self._track.audio_samples_per_frame,
146
+ # TODO: Where do this 2 formats come from (?)
147
+ layout = self._track.audio_layout,
148
+ format = self._track.audio_format
131
149
  )
132
150
 
133
- # TODO: I need to set the pts, but here (?)
134
- # The complete silent frames we need
135
- frames = (
136
- [
137
- self._audio_frame_generator.silent(
138
- sample_rate = self._track.audio_fps,
139
- # TODO: Check where do we get this value from
140
- layout = 'stereo',
141
- number_of_samples = self._track.audio_samples_per_frame,
142
- # TODO: Check where do we get this value from
143
- format = 'fltp',
144
- pts = None,
145
- time_base = None
146
- )
147
- ] * number_of_frames
148
- if number_of_frames > 0 else
149
- []
150
- )
151
+ for frame in frames:
152
+ yield frame
151
153
 
152
- # The remaining partial silent frames we need
153
- if number_of_remaining_samples > 0:
154
- frames.append(
155
- self._audio_frame_generator.silent(
156
- sample_rate = self._track.audio_fps,
157
- # TODO: Check where do we get this value from
158
- layout = 'stereo',
159
- number_of_samples = number_of_remaining_samples,
160
- # TODO: Check where do we get this value from
161
- format = 'fltp',
162
- pts = None,
163
- time_base = None
164
- )
165
- )
166
-
167
- for frame in frames:
168
- yield frame
169
154
 
170
155
  # TODO: I don't like using t as float,
171
156
  # we need to implement fractions.Fraction
@@ -228,27 +213,50 @@ class Track:
228
213
  from first to last.
229
214
  """
230
215
  return sorted(self._videos, key = lambda video: video.start)
216
+
217
+ @property
218
+ def is_muted(
219
+ self
220
+ ) -> bool:
221
+ """
222
+ Flag to indicate if the track is muted or
223
+ not. Being muted means that no audio frames
224
+ will be retured from this track.
225
+ """
226
+ return self._is_muted
231
227
 
232
228
  def __init__(
233
229
  self,
234
230
  # TODO: I need the general settings of the
235
231
  # project to be able to make audio also, not
236
232
  # only the empty frames
233
+ index: int,
237
234
  size: tuple[int, int],
238
235
  fps: float,
239
236
  audio_fps: float,
240
237
  # TODO: Where does it come from (?)
241
- audio_samples_per_frame: int
238
+ audio_samples_per_frame: int,
239
+ audio_layout: str = 'stereo',
240
+ audio_format: str = 'fltp'
242
241
  ):
243
242
  self._videos: list[VideoOnTrack] = []
244
243
  """
245
244
  The list of 'VideoOnTrack' instances that
246
245
  must play on this track.
247
246
  """
247
+ self._is_muted: bool = False
248
+ """
249
+ Internal flag to indicate if the track is
250
+ muted or not.
251
+ """
248
252
  self.size: tuple[int, int] = size
249
253
  """
250
254
  The size of the videos of this track.
251
255
  """
256
+ self.index: int = index
257
+ """
258
+ The index of the track within the timeline.
259
+ """
252
260
  self.fps: float = float(fps)
253
261
  """
254
262
  The fps of the track, needed to calculate
@@ -264,6 +272,16 @@ class Track:
264
272
  """
265
273
  The number of samples per audio frame.
266
274
  """
275
+ self.audio_layout: str = audio_layout
276
+ """
277
+ The layout of the audio, that can be 'mono'
278
+ or 'stereo'.
279
+ """
280
+ self.audio_format: str = audio_format
281
+ """
282
+ The format of the audio, that can be 's16',
283
+ 'flt', 'fltp', etc.
284
+ """
267
285
 
268
286
  def _is_free(
269
287
  self,
@@ -277,8 +295,8 @@ class Track:
277
295
  """
278
296
  return not any(
279
297
  (
280
- video.video.start < end and
281
- video.video.end > start
298
+ video.start < end and
299
+ video.end > start
282
300
  )
283
301
  for video in self.videos
284
302
  )
@@ -304,10 +322,28 @@ class Track:
304
322
  raise Exception('NON_LIMITED_EMPTY_PART_END exceeded.')
305
323
  return None
306
324
 
325
+ def mute(
326
+ self
327
+ ) -> 'Track':
328
+ """
329
+ Set the track as muted so no audio frame will
330
+ be played from this track.
331
+ """
332
+ self._is_muted = True
333
+
334
+ def unmute(
335
+ self
336
+ ) -> 'Track':
337
+ """
338
+ Set the track as unmuted so the audio frames
339
+ will be played as normal.
340
+ """
341
+ self._is_muted = False
342
+
307
343
  def get_frame_at(
308
344
  self,
309
345
  t: Union[int, float, Fraction]
310
- ) -> 'VideoFrame':
346
+ ) -> 'VideoFrameWrapped':
311
347
  """
312
348
  Get the frame that must be displayed at
313
349
  the 't' time moment provided, which is
@@ -347,7 +383,19 @@ class Track:
347
383
  (remember that a video frame is associated
348
384
  with more than 1 audio frame).
349
385
  """
350
- for frame in self._get_part_at_t(t).get_audio_frames_at(t):
386
+ frames = (
387
+ generate_silent_frames(
388
+ fps = self.fps,
389
+ audio_fps = self.audio_fps,
390
+ audio_samples_per_frame = self.audio_samples_per_frame,
391
+ layout = self.audio_layout,
392
+ format = self.audio_format
393
+ )
394
+ if self.is_muted else
395
+ self._get_part_at_t(t).get_audio_frames_at(t)
396
+ )
397
+
398
+ for frame in frames:
351
399
  yield frame
352
400
 
353
401
  def add_video(
@@ -376,7 +424,8 @@ class Track:
376
424
  # TODO: We can have many different strategies
377
425
  # that we could define in the '__init__' maybe
378
426
  t: T = T.from_fps(t, self.fps)
379
- if not self._is_free(t.truncated, t.next(1).truncated):
427
+ #if not self._is_free(t.truncated, t.next(1).truncated):
428
+ if not self._is_free(t.truncated, t.truncated + video.duration):
380
429
  raise Exception('The video cannot be added at the "t" time moment, something blocks it.')
381
430
  t = t.truncated
382
431
  else:
@@ -436,4 +485,78 @@ class Track:
436
485
 
437
486
  self._parts = parts
438
487
 
439
- return self
488
+ return self
489
+
490
+ # TODO: Is this method here ok (?)
491
+ def generate_silent_frames(
492
+ fps: int,
493
+ audio_fps: int,
494
+ audio_samples_per_frame: int,
495
+ layout: str = 'stereo',
496
+ format: str = 'fltp'
497
+ ) -> list[AudioFrameWrapped]:
498
+ """
499
+ Get the audio silent frames we need for
500
+ a video with the given 'fps', 'audio_fps'
501
+ and 'audio_samples_per_frame', using the
502
+ also provided 'layout' and 'format' for
503
+ the audio frames.
504
+
505
+ This method is used when we have empty
506
+ parts on our tracks and we need to
507
+ provide the frames, that are passed as
508
+ AudioFrameWrapped instances and tagged as
509
+ coming from empty parts.
510
+ """
511
+ audio_frame_generator: AudioFrameGenerator = AudioFrameGenerator()
512
+
513
+ # Check how many full and partial silent
514
+ # audio frames we need
515
+ number_of_frames, number_of_remaining_samples = audio_frames_and_remainder_per_video_frame(
516
+ video_fps = fps,
517
+ sample_rate = audio_fps,
518
+ number_of_samples_per_audio_frame = audio_samples_per_frame
519
+ )
520
+
521
+ # The complete silent frames we need
522
+ silent_frame = audio_frame_generator.silent(
523
+ sample_rate = audio_fps,
524
+ layout = layout,
525
+ number_of_samples = audio_samples_per_frame,
526
+ format = format,
527
+ pts = None,
528
+ time_base = None
529
+ )
530
+
531
+ frames = (
532
+ [
533
+ AudioFrameWrapped(
534
+ frame = silent_frame,
535
+ is_from_empty_part = True
536
+ )
537
+ ] * number_of_frames
538
+ if number_of_frames > 0 else
539
+ []
540
+ )
541
+
542
+ # The remaining partial silent frames we need
543
+ if number_of_remaining_samples > 0:
544
+ silent_frame = audio_frame_generator.silent(
545
+ sample_rate = audio_fps,
546
+ # TODO: Check where do we get this value from
547
+ layout = layout,
548
+ number_of_samples = number_of_remaining_samples,
549
+ # TODO: Check where do we get this value from
550
+ format = format,
551
+ pts = None,
552
+ time_base = None
553
+ )
554
+
555
+ frames.append(
556
+ AudioFrameWrapped(
557
+ frame = silent_frame,
558
+ is_from_empty_part = True
559
+ )
560
+ )
561
+
562
+ return frames
@@ -127,7 +127,7 @@ class VideoOnTrack:
127
127
  def get_audio_frames_at(
128
128
  self,
129
129
  t: Union[int, float, Fraction]
130
- ) -> Union[any, None]:
130
+ ):
131
131
  """
132
132
  Get the audio frames that must be played at
133
133
  the 't' time moment provided, that could be
@@ -51,7 +51,10 @@ class VideoFrameCache(FrameCache):
51
51
  Get the video frame that is in the 't'
52
52
  time moment provided.
53
53
  """
54
+ print(f'Getting frame from {str(float(t))}')
55
+ print(f'FPS: {str(self.fps)}')
54
56
  t: T = T.from_fps(t, self.fps)
57
+ print(f'So the actual frame is from {str(float(t.truncated))} to {str(float(t.next(1).truncated))}')
55
58
  for frame in self.get_frames(t.truncated, t.next(1).truncated):
56
59
  return frame
57
60
 
yta_video_opengl/tests.py CHANGED
@@ -601,10 +601,25 @@ def video_modified_stored():
601
601
  # must be played at the same time
602
602
  video = Video(VIDEO_PATH, 0.25, 0.75)
603
603
  timeline = Timeline()
604
- timeline.add_video(Video(VIDEO_PATH, 0.25, 1.0), 0.75)
605
- timeline.add_video(Video(VIDEO_PATH, 0.5, 1.0), 2.0)
606
- timeline.add_video(Video(VIDEO_PATH, 0.5, 1.0), 2.1, do_use_second_track = True)
607
- timeline.add_video(Video('C:/Users/dania/Downloads/Y2meta.app-TOP 12 SIMPLE LIQUID TRANSITION _ GREEN SCREEN TRANSITION PACK-(1080p60).mp4', 0.25, 1.5), 0.25, do_use_second_track = True)
604
+
605
+ transitions_30fps = 'C:/Users/dania/Downloads/Y2meta.app-10 Smooth Transitions Green Screen Template For Kinemaster, Alight Motion, Filmora, premiere pro-(1080p).mp4'
606
+ simpsons_60fps = 'C:/Users/dania/Downloads/Y_una_porra_los_simpsons_castellano_60fps.mp4'
607
+
608
+ # Track 1
609
+ timeline.add_video(Video(VIDEO_PATH, 0.25, 1.0), 0.75, track_index = 0)
610
+ timeline.add_video(Video(simpsons_60fps, 1.5, 2.0), 3.0, track_index = 0)
611
+ timeline.add_video(Video(VIDEO_PATH, 0.5, 1.0), 2.0, track_index = 0)
612
+
613
+ timeline.tracks[0].mute()
614
+
615
+ # Track 2
616
+ timeline.add_video(Video(VIDEO_PATH, 0.5, 1.0), 2.7, track_index = 1)
617
+ timeline.add_video(Video(simpsons_60fps, 5.8, 7.8), 0.6, track_index = 1)
618
+ # 30fps
619
+ # timeline.add_video(Video('C:/Users/dania/Downloads/Y2meta.app-TOP 12 SIMPLE LIQUID TRANSITION _ GREEN SCREEN TRANSITION PACK-(1080p60).mp4', 0.25, 1.5), 0.25, do_use_second_track = True)
620
+ # 29.97fps
621
+ # timeline.add_video(Video('C:/Users/dania/Downloads/Y_una_porra_los_simpsons_castellano.mp4', 5.8, 6.8), 3.6, do_use_second_track = True)
622
+
608
623
  timeline.render(OUTPUT_PATH)
609
624
 
610
625
  return
yta_video_opengl/video.py CHANGED
@@ -167,8 +167,9 @@ class Video:
167
167
  actually the 1.5s of the video because of
168
168
  the subclipped time range.
169
169
  """
170
- t = self.start + t
170
+ t += self.start
171
171
 
172
+ print(f'Video real t is {str(float(t))}')
172
173
  if t >= self.end:
173
174
  raise Exception(f'The "t" ({str(t)}) provided is out of range. This video lasts from [{str(self.start)}, {str(self.end)}).')
174
175
 
@@ -182,7 +183,9 @@ class Video:
182
183
  Get the video frame with the given 't' time
183
184
  moment, using the video cache system.
184
185
  """
185
- return self.reader.video_cache.get_frame(self._get_real_t(t))
186
+ print(f'Getting frame from {str(float(t))} that is actually {str(float(self._get_real_t(t)))}')
187
+ return self.reader.get_frame(self._get_real_t(t))
188
+ #return self.reader.video_cache.get_frame(self._get_real_t(t))
186
189
 
187
190
  def get_audio_frame_from_t(
188
191
  self,
@@ -214,6 +217,7 @@ class Video:
214
217
  (remember that a video frame is associated
215
218
  with more than 1 audio frame).
216
219
  """
220
+ print(f'Getting audio frames from {str(float(t))} that is actually {str(float(self._get_real_t(t)))}')
217
221
  for frame in self.reader.get_audio_frames_from_t(self._get_real_t(t)):
218
222
  yield frame
219
223
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: yta-video-opengl
3
- Version: 0.0.17
3
+ Version: 0.0.19
4
4
  Summary: Youtube Autonomous Video OpenGL Module
5
5
  Author: danialcala94
6
6
  Author-email: danielalcalavalera@gmail.com
@@ -2,10 +2,11 @@ yta_video_opengl/__init__.py,sha256=ycAx_XYMVDfkuObSvtW6irQ0Wo-fgxEz3fjIRMe8PpY,
2
2
  yta_video_opengl/classes.py,sha256=t5-Tfc7ecvHl8JlVBp_FVzZT6ole6Ly5-FeBBH7wcxo,37742
3
3
  yta_video_opengl/complete/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
4
4
  yta_video_opengl/complete/frame_combinator.py,sha256=uYg7907knjBlmZUZCCzkxDcj0Nown0muvL5PNVS707A,9413
5
- yta_video_opengl/complete/frame_generator.py,sha256=mhtm22COVb_BmlLOIWsCPBz1a0ge_O9pN4UQXafCNXE,7625
6
- yta_video_opengl/complete/timeline.py,sha256=z9I6cFUvx2U-Xe18piZ3ZxH_ypGmmX3xnOHZwKiZ_gY,12647
7
- yta_video_opengl/complete/track.py,sha256=UiA8-vh-CRW3h29Pk3kXZArzAm0bmtireNkbuYQ5qMs,14741
8
- yta_video_opengl/complete/video_on_track.py,sha256=KROAI0bndnfcvKlHGsSEyWg9o1xozW0PI_Rhqp0r9kw,4844
5
+ yta_video_opengl/complete/frame_generator.py,sha256=VRcPgpqfxQWMeLzgEJObbM0xu7_85I1y_YyQVhcEswc,7853
6
+ yta_video_opengl/complete/frame_wrapper.py,sha256=g0aTcUVmF5uQtxs95_XsxlwL0QUj-fNOSRHvK4ENqg4,3347
7
+ yta_video_opengl/complete/timeline.py,sha256=d6_5Yd5n6TTjwo0ozKNDsd3GAfL12ATx0w-TVkIr0Eo,16767
8
+ yta_video_opengl/complete/track.py,sha256=NCfNLOjg_7AWX5g5moYgAjmgAJ22Hp92TLa5pdoW6RM,17910
9
+ yta_video_opengl/complete/video_on_track.py,sha256=laxDvMr1rrmnDfHpk825j42f4pj9I1X8vOFmzwteuM8,4824
9
10
  yta_video_opengl/nodes/__init__.py,sha256=TZ-ZO05PZ0_ABq675E22_PngLWOe-_w5s1cLlV3NbWM,3469
10
11
  yta_video_opengl/nodes/audio/__init__.py,sha256=4nKkC70k1UgLcCSPqFWm3cKdaJM0KUmQTwGWv1xFarQ,2926
11
12
  yta_video_opengl/nodes/video/__init__.py,sha256=gSoaoEmjdQmyRwH18mf5z3NAhap3S0RgbeBbfBXi4jc,132
@@ -14,13 +15,13 @@ yta_video_opengl/reader/__init__.py,sha256=kKvOAEeDjIwAaWtpDEQHdAd_Gwk3Ssz2tv6gp
14
15
  yta_video_opengl/reader/cache/__init__.py,sha256=PAfGM2J-8Vv6p6Cd9aAUvyBcw3rjx2gy_2pJO22VtDM,7020
15
16
  yta_video_opengl/reader/cache/audio.py,sha256=cm_1D5f5RnmJgaidA1pnEhTPF8DE0mU2MofmwjU_b5k,6781
16
17
  yta_video_opengl/reader/cache/utils.py,sha256=9aJ6qyUFRvoh2jRbIvtF_-1MOm_sgQtPiy0WXLCZYcA,1402
17
- yta_video_opengl/reader/cache/video.py,sha256=CSVgb3Sjqzk22sQkukoakVzms-wwZpXOT61Y6tirhjg,3292
18
+ yta_video_opengl/reader/cache/video.py,sha256=3sT9cE0sdTty5AE9yFAPJrJNxCX5vWVATK8OeJInr8I,3496
18
19
  yta_video_opengl/t.py,sha256=xOhT1xBEwChlXf-Tuy-WxA_08iRJWVlnL_Hyzr-9-sk,6633
19
- yta_video_opengl/tests.py,sha256=6QvJx9y4kCiq7b9-AKMetzGuJjd__pTBK5r4tJp3aso,27321
20
+ yta_video_opengl/tests.py,sha256=eyFnz7yBDJyIoti-cV7Dz1bMTeF61Z4_JrFkLXuZQl4,28019
20
21
  yta_video_opengl/utils.py,sha256=yUi17EjNR4SVpvdDUwUaKl4mBCb1uyFCSGoIX3Zr2F0,15586
21
- yta_video_opengl/video.py,sha256=JPIWDQcYlLi8eT2LOFQtS1jVu5xVmW4bz1VMtP0gMeA,8626
22
+ yta_video_opengl/video.py,sha256=Fgu_BzuDlMbfl1Hwjk8Yzo3ZxO73wPuyTUjTbf9OSLw,8951
22
23
  yta_video_opengl/writer.py,sha256=QwvjQcEkzn1WAVqVTFiI6tYIXJO67LKKUTJGO_eflFM,8893
23
- yta_video_opengl-0.0.17.dist-info/LICENSE,sha256=6kbiFSfobTZ7beWiKnHpN902HgBx-Jzgcme0SvKqhKY,1091
24
- yta_video_opengl-0.0.17.dist-info/METADATA,sha256=joHFJiGnEqyDbZYS_L4igkLn6GSHOCCWQ9nxOYjZoOw,714
25
- yta_video_opengl-0.0.17.dist-info/WHEEL,sha256=XbeZDeTWKc1w7CSIyre5aMDU_-PohRwTQceYnisIYYY,88
26
- yta_video_opengl-0.0.17.dist-info/RECORD,,
24
+ yta_video_opengl-0.0.19.dist-info/LICENSE,sha256=6kbiFSfobTZ7beWiKnHpN902HgBx-Jzgcme0SvKqhKY,1091
25
+ yta_video_opengl-0.0.19.dist-info/METADATA,sha256=wLhgCKzFD4EJiPuuCnNv40dimACB-Zkg8A7otla6ins,714
26
+ yta_video_opengl-0.0.19.dist-info/WHEEL,sha256=XbeZDeTWKc1w7CSIyre5aMDU_-PohRwTQceYnisIYYY,88
27
+ yta_video_opengl-0.0.19.dist-info/RECORD,,