yta-video-opengl 0.0.17__tar.gz → 0.0.18__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (27) hide show
  1. {yta_video_opengl-0.0.17 → yta_video_opengl-0.0.18}/PKG-INFO +1 -1
  2. {yta_video_opengl-0.0.17 → yta_video_opengl-0.0.18}/pyproject.toml +2 -2
  3. {yta_video_opengl-0.0.17 → yta_video_opengl-0.0.18}/src/yta_video_opengl/complete/frame_generator.py +9 -2
  4. yta_video_opengl-0.0.18/src/yta_video_opengl/complete/frame_wrapper.py +122 -0
  5. {yta_video_opengl-0.0.17 → yta_video_opengl-0.0.18}/src/yta_video_opengl/complete/timeline.py +146 -39
  6. {yta_video_opengl-0.0.17 → yta_video_opengl-0.0.18}/src/yta_video_opengl/complete/track.py +58 -30
  7. {yta_video_opengl-0.0.17 → yta_video_opengl-0.0.18}/src/yta_video_opengl/complete/video_on_track.py +1 -1
  8. {yta_video_opengl-0.0.17 → yta_video_opengl-0.0.18}/src/yta_video_opengl/reader/cache/video.py +3 -0
  9. {yta_video_opengl-0.0.17 → yta_video_opengl-0.0.18}/src/yta_video_opengl/tests.py +6 -1
  10. {yta_video_opengl-0.0.17 → yta_video_opengl-0.0.18}/src/yta_video_opengl/video.py +6 -2
  11. {yta_video_opengl-0.0.17 → yta_video_opengl-0.0.18}/LICENSE +0 -0
  12. {yta_video_opengl-0.0.17 → yta_video_opengl-0.0.18}/README.md +0 -0
  13. {yta_video_opengl-0.0.17 → yta_video_opengl-0.0.18}/src/yta_video_opengl/__init__.py +0 -0
  14. {yta_video_opengl-0.0.17 → yta_video_opengl-0.0.18}/src/yta_video_opengl/classes.py +0 -0
  15. {yta_video_opengl-0.0.17 → yta_video_opengl-0.0.18}/src/yta_video_opengl/complete/__init__.py +0 -0
  16. {yta_video_opengl-0.0.17 → yta_video_opengl-0.0.18}/src/yta_video_opengl/complete/frame_combinator.py +0 -0
  17. {yta_video_opengl-0.0.17 → yta_video_opengl-0.0.18}/src/yta_video_opengl/nodes/__init__.py +0 -0
  18. {yta_video_opengl-0.0.17 → yta_video_opengl-0.0.18}/src/yta_video_opengl/nodes/audio/__init__.py +0 -0
  19. {yta_video_opengl-0.0.17 → yta_video_opengl-0.0.18}/src/yta_video_opengl/nodes/video/__init__.py +0 -0
  20. {yta_video_opengl-0.0.17 → yta_video_opengl-0.0.18}/src/yta_video_opengl/nodes/video/opengl.py +0 -0
  21. {yta_video_opengl-0.0.17 → yta_video_opengl-0.0.18}/src/yta_video_opengl/reader/__init__.py +0 -0
  22. {yta_video_opengl-0.0.17 → yta_video_opengl-0.0.18}/src/yta_video_opengl/reader/cache/__init__.py +0 -0
  23. {yta_video_opengl-0.0.17 → yta_video_opengl-0.0.18}/src/yta_video_opengl/reader/cache/audio.py +0 -0
  24. {yta_video_opengl-0.0.17 → yta_video_opengl-0.0.18}/src/yta_video_opengl/reader/cache/utils.py +0 -0
  25. {yta_video_opengl-0.0.17 → yta_video_opengl-0.0.18}/src/yta_video_opengl/t.py +0 -0
  26. {yta_video_opengl-0.0.17 → yta_video_opengl-0.0.18}/src/yta_video_opengl/utils.py +0 -0
  27. {yta_video_opengl-0.0.17 → yta_video_opengl-0.0.18}/src/yta_video_opengl/writer.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: yta-video-opengl
3
- Version: 0.0.17
3
+ Version: 0.0.18
4
4
  Summary: Youtube Autonomous Video OpenGL Module
5
5
  Author: danialcala94
6
6
  Author-email: danielalcalavalera@gmail.com
@@ -1,6 +1,6 @@
1
1
  [project]
2
2
  name = "yta-video-opengl"
3
- version = "0.0.17"
3
+ version = "0.0.18"
4
4
  description = "Youtube Autonomous Video OpenGL Module"
5
5
  authors = [
6
6
  {name = "danialcala94",email = "danielalcalavalera@gmail.com"}
@@ -14,7 +14,7 @@ dependencies = [
14
14
  "av (>=0.0.1,<19.0.0)",
15
15
  "moderngl (>=0.0.1,<9.0.0)",
16
16
  "numpy (>=0.0.1,<9.0.0)",
17
- "quicktions (>=0.0.1,<9.0.0)"
17
+ "quicktions (>=0.0.1,<9.0.0)",
18
18
  ]
19
19
 
20
20
  [tool.poetry]
@@ -5,6 +5,13 @@ array that will be used for it. We will
5
5
  receive the values as (width, height) but
6
6
  we will invert them when needed.
7
7
 
8
+ The frames that come from an empty part
9
+ are flagged with the .metadata attribute
10
+ 'is_from_empty_part' so we can recognize
11
+ them and ignore when combining on the
12
+ timeline. We have that metadata in the
13
+ wrapper class we created.
14
+
8
15
  TODO: Check because we have a similar
9
16
  module in other project or projects.
10
17
  """
@@ -170,7 +177,7 @@ class AudioFrameGenerator:
170
177
  number_of_samples: int = 1024,
171
178
  format = 's16',
172
179
  pts: Union[int, None] = None,
173
- time_base: Union['Fraction', None] = None
180
+ time_base: Union['Fraction', None] = None
174
181
  ) -> AudioFrame:
175
182
  """
176
183
  Get an audio frame that is completely silent.
@@ -241,7 +248,7 @@ def numpy_to_audio_frame(
241
248
 
242
249
  if time_base is not None:
243
250
  frame.time_base = time_base
244
-
251
+
245
252
  return frame
246
253
 
247
254
  # TODO: Maybe transform into a Enum (?)
@@ -0,0 +1,122 @@
1
+ from yta_validation.parameter import ParameterValidator
2
+ from av.video.frame import VideoFrame
3
+ from av.audio.frame import AudioFrame
4
+ from typing import Union
5
+
6
+
7
+ IS_FROM_EMPTY_PART_METADATA = 'is_from_empty_part'
8
+
9
+ class _FrameWrappedBase:
10
+ """
11
+ Class to wrap video and audio frames from
12
+ the pyav library but to support a metadata
13
+ field to inject some information we need
14
+ when processing and combining them.
15
+ """
16
+
17
+ @property
18
+ def is_from_empty_part(
19
+ self
20
+ ) -> bool:
21
+ """
22
+ Flag to indicate if the frame comes from
23
+ an empty part or not, that will be done
24
+ by checking the 'is_from_empty_part'
25
+ attribute in the metadata.
26
+ """
27
+ return IS_FROM_EMPTY_PART_METADATA in self.metadata
28
+
29
+ def __init__(
30
+ self,
31
+ frame,
32
+ metadata: dict = {}
33
+ ):
34
+ ParameterValidator.validate_mandatory_instance_of('frame', frame, [VideoFrame, AudioFrame])
35
+ ParameterValidator.validate_mandatory_dict('metadata', metadata)
36
+
37
+ self._frame: Union[VideoFrame, AudioFrame] = frame
38
+ self.metadata: dict = metadata or {}
39
+
40
+ def __getattr__(
41
+ self,
42
+ name
43
+ ):
44
+ return getattr(self._frame, name)
45
+
46
+ def __setattr__(
47
+ self,
48
+ name,
49
+ value
50
+ ):
51
+ super().__setattr__(name, value)
52
+ #setattr(self._frame, name, value)
53
+ # if name in ('_frame', 'metadata'):
54
+ # super().__setattr__(name, value)
55
+ # else:
56
+ # setattr(self._frame, name, value)
57
+
58
+ def __repr__(
59
+ self
60
+ ):
61
+ cname = self.__class__.__name__
62
+ return f'<{cname} metadata={self.metadata} frame={self._frame!r}>'
63
+
64
+ def set_as_from_empty_part(
65
+ self
66
+ ) -> None:
67
+ """
68
+ Add the metadata information to indicate
69
+ that this is a frame that comes from an
70
+ empty part.
71
+ """
72
+ self.metadata[IS_FROM_EMPTY_PART_METADATA] = 'True'
73
+
74
+ def unwrap(
75
+ self
76
+ ):
77
+ """
78
+ Get the original frame instance.
79
+ """
80
+ return self._frame
81
+
82
+ class VideoFrameWrapped(_FrameWrappedBase):
83
+ """
84
+ Class to wrap video frames from the pyav
85
+ library but to support a metadata field
86
+ to inject some information we need when
87
+ processing and combining them.
88
+ """
89
+
90
+ def __init__(
91
+ self,
92
+ frame: VideoFrame,
93
+ metadata: dict = {},
94
+ is_from_empty_part: bool = False
95
+ ):
96
+ ParameterValidator.validate_mandatory_instance_of('frame', frame, VideoFrame)
97
+
98
+ super().__init__(frame, metadata)
99
+
100
+ if is_from_empty_part:
101
+ self.set_as_from_empty_part()
102
+
103
+ class AudioFrameWrapped(_FrameWrappedBase):
104
+ """
105
+ Class to wrap audio frames from the pyav
106
+ library but to support a metadata field
107
+ to inject some information we need when
108
+ processing and combining them.
109
+ """
110
+
111
+ def __init__(
112
+ self,
113
+ frame: AudioFrame,
114
+ metadata: dict = {},
115
+ is_from_empty_part: bool = False
116
+ ):
117
+ ParameterValidator.validate_mandatory_instance_of('frame', frame, AudioFrame)
118
+
119
+ super().__init__(frame, metadata)
120
+
121
+ if is_from_empty_part:
122
+ self.set_as_from_empty_part()
@@ -13,11 +13,14 @@ video written).
13
13
  from yta_video_opengl.complete.track import Track
14
14
  from yta_video_opengl.video import Video
15
15
  from yta_video_opengl.t import get_ts, fps_to_time_base, T
16
+ from yta_video_opengl.complete.frame_wrapper import VideoFrameWrapped, AudioFrameWrapped
16
17
  from yta_video_opengl.complete.frame_combinator import AudioFrameCombinator
17
18
  from yta_validation.parameter import ParameterValidator
19
+ from yta_validation import PythonValidator
18
20
  from av.video.frame import VideoFrame
19
21
  from av.audio.frame import AudioFrame
20
22
  from quicktions import Fraction
23
+ from functools import reduce
21
24
  from typing import Union
22
25
 
23
26
  import numpy as np
@@ -51,7 +54,10 @@ class Timeline:
51
54
  audio_fps: Union[int, Fraction] = 44_100.0, # 48_000.0 for aac
52
55
  # TODO: I don't like this name
53
56
  # TODO: Where does this come from (?)
54
- audio_samples_per_frame: int = 1024
57
+ audio_samples_per_frame: int = 1024,
58
+ video_codec: str = 'h264',
59
+ video_pixel_format: str = 'yuv420p',
60
+ audio_codec: str = 'aac'
55
61
  ):
56
62
  # TODO: By now we are using just two video
57
63
  # tracks to test the composition
@@ -79,10 +85,36 @@ class Timeline:
79
85
  """
80
86
  All the video tracks we are handling.
81
87
  """
82
- # TODO: Handle the other properties
83
- self.size = size
84
- self.fps = fps
85
- self.audio_fps = audio_fps
88
+
89
+ self.size: tuple[int, int] = size
90
+ """
91
+ The size that the final video must have.
92
+ """
93
+ self.fps: Union[int, float, Fraction] = fps
94
+ """
95
+ The fps of the output video.
96
+ """
97
+ self.audio_fps: Union[int, Fraction] = audio_fps
98
+ """
99
+ The fps of the output audio.
100
+ """
101
+ self.audio_samples_per_frame: int = audio_samples_per_frame
102
+ """
103
+ The audio samples each audio frame must
104
+ have.
105
+ """
106
+ self.video_codec: str = video_codec
107
+ """
108
+ The video codec for the video exported.
109
+ """
110
+ self.video_pixel_format: str = video_pixel_format
111
+ """
112
+ The pixel format for the video exported.
113
+ """
114
+ self.audio_codec: str = audio_codec
115
+ """
116
+ The audio codec for the audio exported.
117
+ """
86
118
 
87
119
  # TODO: Create 'add_track' method, but by now
88
120
  # we hare handling only one
@@ -122,7 +154,7 @@ class Timeline:
122
154
  Get all the frames that are played at the
123
155
  't' time provided, but combined in one.
124
156
  """
125
- frames = (
157
+ frames = list(
126
158
  track.get_frame_at(t)
127
159
  for track in self.tracks
128
160
  )
@@ -134,21 +166,45 @@ class Timeline:
134
166
  # other frame in other track, or to know if
135
167
  # I want them as transparent or something
136
168
 
137
- # TODO: This is just a test function
138
- from yta_video_opengl.complete.frame_combinator import VideoFrameCombinator
139
-
140
169
  # TODO: Combinate frames, we force them to
141
170
  # rgb24 to obtain them with the same shape,
142
171
  # but maybe we have to change this because
143
172
  # we also need to handle alphas
144
- output_frame = next(frames).to_ndarray(format = 'rgb24')
173
+
174
+ # TODO: We need to ignore the ones that are
175
+ # tagged with
176
+ # .metadata['is_from_empty_part'] = 'True'
177
+
178
+ """
179
+ 1. Only empty frames
180
+ -> Black background, keep one
181
+ 2. Empty frames but other frames:
182
+ -> Skip all empty frames and apply
183
+ track orders
184
+ """
185
+
186
+ output_frame = frames[0]._frame.to_ndarray(format = 'rgb24')
187
+
145
188
  for frame in frames:
146
- # Combine them
147
- # TODO: We need to ignore the frames that
148
- # are just empty black frames and use them
149
- # not in the combination process
150
- # TODO: What about the 'format' (?)
151
- output_frame = VideoFrameCombinator.blend_add(output_frame, frame.to_ndarray(format = 'rgb24'))
189
+ # We just need the first non-empty frame,
190
+ # that must be from the track with the
191
+ # bigger priority
192
+ # TODO: I assume, by now, that the frames
193
+ # come in order (bigger priority first)
194
+ if not frame.is_from_empty_part:
195
+ # TODO: By now I'm just returning the first
196
+ # one but we will need to check the alpha
197
+ # layer to combine if possible
198
+ output_frame = frame._frame.to_ndarray(format = 'rgb24')
199
+ break
200
+
201
+ # # TODO: This code below is to combine the
202
+ # # frames but merging all of them, that is
203
+ # # unexpected in a video editor but we have
204
+ # # the way to do it
205
+ # from yta_video_opengl.complete.frame_combinator import VideoFrameCombinator
206
+ # # TODO: What about the 'format' (?)
207
+ # output_frame = VideoFrameCombinator.blend_add(output_frame, frame.to_ndarray(format = 'rgb24'))
152
208
 
153
209
  # TODO: How to build this VideoFrame correctly
154
210
  # and what about the 'format' (?)
@@ -159,7 +215,7 @@ class Timeline:
159
215
  self,
160
216
  t: float
161
217
  ):
162
- audio_frames = []
218
+ audio_frames: list[AudioFrameWrapped] = []
163
219
  """
164
220
  Matrix in which the rows are the different
165
221
  tracks we have, and the column includes all
@@ -179,18 +235,36 @@ class Timeline:
179
235
  # not in the combination process
180
236
 
181
237
  # We need only 1 single audio frame per column
182
- collapsed = [
238
+ collapsed_frames = [
183
239
  concatenate_audio_frames(frames)
184
240
  for frames in audio_frames
185
241
  ]
186
242
 
243
+ # TODO: What about the lenghts and those
244
+ # things? They should be ok because they are
245
+ # based on our output but I'm not completely
246
+ # sure here..
247
+
248
+ # We keep only the non-silent frames because
249
+ # we will sum them after and keeping them
250
+ # will change the results.
251
+ collapsed_frames = [
252
+ frame._frame
253
+ for frame in collapsed_frames
254
+ if not frame.is_from_empty_part
255
+ ]
256
+
257
+ if len(collapsed_frames) == 0:
258
+ # If they were all silent, just keep one
259
+ collapsed_frames = [collapsed_frames[0]._frame]
260
+
187
261
  # Now, mix column by column (track by track)
188
262
  # TODO: I do this to have an iterator, but
189
263
  # maybe we need more than one single audio
190
264
  # frame because of the size at the original
191
265
  # video or something...
192
266
  frames = [
193
- AudioFrameCombinator.sum_tracks_frames(collapsed, self.audio_fps)
267
+ AudioFrameCombinator.sum_tracks_frames(collapsed_frames, self.audio_fps)
194
268
  ]
195
269
 
196
270
  for audio_frame in frames:
@@ -308,18 +382,36 @@ class Timeline:
308
382
  writer.mux_audio_frame(None)
309
383
  writer.output.close()
310
384
 
385
+ def _is_empty_part_frame(
386
+ frame: Union['VideoFrameWrapped', 'AudioFrameWrapped']
387
+ ) -> bool:
388
+ """
389
+ Flag to indicate if the frame comes from
390
+ an empty part or not.
391
+
392
+ TODO: The 'metadata' is included in our
393
+ wrapper class, not in VideoFrame or
394
+ AudioFrame classes. I should be sending
395
+ the wrapper in all the code, but by now
396
+ I'm doing it just in specific cases.
397
+ """
398
+ return (
399
+ hasattr(frame, 'metadata') and
400
+ frame.is_from_empty_part
401
+ )
311
402
 
312
- # TODO: I don't know where to put this
313
- # method because if a bit special
314
403
  # TODO: Refactor and move please
404
+ # TODO: This has to work for AudioFrame
405
+ # also, but I need it working for Wrapped
315
406
  def concatenate_audio_frames(
316
- frames: list[AudioFrame]
317
- ) -> AudioFrame:
407
+ frames: list[AudioFrameWrapped]
408
+ ) -> AudioFrameWrapped:
318
409
  """
319
- Combina varios AudioFrames consecutivos en uno solo.
320
- - Convierte a float32
321
- - Concatena muestras a lo largo del tiempo
322
- - Devuelve un AudioFrame nuevo
410
+ Concatenate all the given 'frames' in one
411
+ single audio frame and return it.
412
+
413
+ The audio frames must have the same layout
414
+ and sample rate.
323
415
  """
324
416
  if not frames:
325
417
  # TODO: This should not happen
@@ -328,29 +420,44 @@ def concatenate_audio_frames(
328
420
  if len(frames) == 1:
329
421
  return frames[0]
330
422
 
331
- # Verificamos consistencia básica
332
- sample_rate = frames[0].sample_rate
333
- layout = frames[0].layout.name
334
- channels = frames[0].layout.channels
423
+ # We need to preserve the metadata
424
+ is_from_empty_part = all(
425
+ frame.is_from_empty_part
426
+ for frame in frames
427
+ )
428
+ metadata = reduce(lambda key_values, frame: {**key_values, **frame.metadata}, frames, {})
429
+
430
+ sample_rate = frames[0]._frame.sample_rate
431
+ layout = frames[0]._frame.layout.name
335
432
 
336
433
  arrays = []
337
- for f in frames:
338
- if f.sample_rate != sample_rate or f.layout.name != layout:
434
+ # TODO: What about 'metadata' (?)
435
+ for frame in frames:
436
+ if (
437
+ frame._frame.sample_rate != sample_rate or
438
+ frame._frame.layout.name != layout
439
+ ):
339
440
  raise ValueError("Los frames deben tener mismo sample_rate y layout")
340
441
 
341
- # arr = f.to_ndarray() # (channels, samples)
442
+ # arr = frame.to_ndarray() # (channels, samples)
342
443
  # if arr.dtype == np.int16:
343
444
  # arr = arr.astype(np.float32) / 32768.0
344
445
  # elif arr.dtype != np.float32:
345
446
  # arr = arr.astype(np.float32)
346
447
 
347
- arrays.append(f.to_ndarray())
448
+ arrays.append(frame._frame.to_ndarray())
348
449
 
349
- # Concatenamos por eje de samples
350
450
  combined = np.concatenate(arrays, axis = 1)
351
451
 
352
- # Creamos un frame nuevo
353
- out = AudioFrame.from_ndarray(combined, format = frames[0].format, layout = layout)
452
+ out = AudioFrame.from_ndarray(
453
+ array = combined,
454
+ format = frames[0].format,
455
+ layout = layout
456
+ )
354
457
  out.sample_rate = sample_rate
355
458
 
356
- return out
459
+ return AudioFrameWrapped(
460
+ frame = out,
461
+ metadata = metadata,
462
+ is_from_empty_part = is_from_empty_part
463
+ )
@@ -1,8 +1,10 @@
1
1
  from yta_video_opengl.complete.video_on_track import VideoOnTrack
2
2
  from yta_video_opengl.video import Video
3
3
  from yta_video_opengl.t import T
4
+ from yta_video_opengl.complete.frame_wrapper import VideoFrameWrapped
4
5
  from yta_video_opengl.utils import audio_frames_and_remainder_per_video_frame
5
6
  from yta_video_opengl.t import fps_to_time_base
7
+ from yta_video_opengl.complete.frame_wrapper import AudioFrameWrapped
6
8
  from yta_video_opengl.complete.frame_generator import VideoFrameGenerator, AudioFrameGenerator
7
9
  from yta_validation.parameter import ParameterValidator
8
10
  from quicktions import Fraction
@@ -82,30 +84,38 @@ class _Part:
82
84
  def get_frame_at(
83
85
  self,
84
86
  t: Union[int, float, Fraction]
85
- ) -> 'VideoFrame':
87
+ ) -> 'VideoFrameWrapped':
86
88
  """
87
89
  Get the frame that must be displayed at
88
90
  the given 't' time moment.
89
91
  """
90
- if self.is_empty_part:
92
+ frame = (
91
93
  # TODO: What about the 'format' (?)
92
94
  # TODO: Maybe I shouldn't set the 'time_base'
93
95
  # here and do it just in the Timeline 'render'
94
96
  #return get_black_background_video_frame(self._track.size)
95
97
  # TODO: This 'time_base' maybe has to be related
96
98
  # to a Timeline general 'time_base' and not the fps
97
- return self._video_frame_generator.background.full_black(
98
- size = self._track.size,
99
- time_base = fps_to_time_base(self._track.fps)
99
+ VideoFrameWrapped(
100
+ frame = self._video_frame_generator.background.full_black(
101
+ size = self._track.size,
102
+ time_base = fps_to_time_base(self._track.fps)
103
+ ),
104
+ is_from_empty_part = True
100
105
  )
101
-
102
- frame = self.video.get_frame_at(t)
106
+ if self.is_empty_part else
107
+ VideoFrameWrapped(
108
+ frame = self.video.get_frame_at(t),
109
+ is_from_empty_part = False
110
+ )
111
+
112
+ )
103
113
 
104
114
  # TODO: This should not happen because of
105
115
  # the way we handle the videos here but the
106
116
  # video could send us a None frame here, so
107
117
  # do we raise exception (?)
108
- if frame is None:
118
+ if frame._frame is None:
109
119
  #frame = get_black_background_video_frame(self._track.size)
110
120
  # TODO: By now I'm raising exception to check if
111
121
  # this happens or not because I think it would
@@ -118,8 +128,16 @@ class _Part:
118
128
  self,
119
129
  t: Union[int, float, Fraction]
120
130
  ):
131
+ """
132
+ Iterate over all the audio frames that
133
+ exist at the time moment 't' provided.
134
+ """
121
135
  if not self.is_empty_part:
122
- frames = self.video.get_audio_frames_at(t)
136
+ for frame in self.video.get_audio_frames_at(t):
137
+ yield AudioFrameWrapped(
138
+ frame = frame,
139
+ is_from_empty_part = False
140
+ )
123
141
  else:
124
142
  # TODO: Transform this below to a utils in
125
143
  # which I obtain the array directly
@@ -132,17 +150,22 @@ class _Part:
132
150
 
133
151
  # TODO: I need to set the pts, but here (?)
134
152
  # The complete silent frames we need
153
+ silent_frame = self._audio_frame_generator.silent(
154
+ sample_rate = self._track.audio_fps,
155
+ # TODO: Check where do we get this value from
156
+ layout = 'stereo',
157
+ number_of_samples = self._track.audio_samples_per_frame,
158
+ # TODO: Check where do we get this value from
159
+ format = 'fltp',
160
+ pts = None,
161
+ time_base = None
162
+ )
163
+
135
164
  frames = (
136
165
  [
137
- self._audio_frame_generator.silent(
138
- sample_rate = self._track.audio_fps,
139
- # TODO: Check where do we get this value from
140
- layout = 'stereo',
141
- number_of_samples = self._track.audio_samples_per_frame,
142
- # TODO: Check where do we get this value from
143
- format = 'fltp',
144
- pts = None,
145
- time_base = None
166
+ AudioFrameWrapped(
167
+ frame = silent_frame,
168
+ is_from_empty_part = True
146
169
  )
147
170
  ] * number_of_frames
148
171
  if number_of_frames > 0 else
@@ -151,21 +174,26 @@ class _Part:
151
174
 
152
175
  # The remaining partial silent frames we need
153
176
  if number_of_remaining_samples > 0:
177
+ silent_frame = self._audio_frame_generator.silent(
178
+ sample_rate = self._track.audio_fps,
179
+ # TODO: Check where do we get this value from
180
+ layout = 'stereo',
181
+ number_of_samples = number_of_remaining_samples,
182
+ # TODO: Check where do we get this value from
183
+ format = 'fltp',
184
+ pts = None,
185
+ time_base = None
186
+ )
187
+
154
188
  frames.append(
155
- self._audio_frame_generator.silent(
156
- sample_rate = self._track.audio_fps,
157
- # TODO: Check where do we get this value from
158
- layout = 'stereo',
159
- number_of_samples = number_of_remaining_samples,
160
- # TODO: Check where do we get this value from
161
- format = 'fltp',
162
- pts = None,
163
- time_base = None
189
+ AudioFrameWrapped(
190
+ frame = silent_frame,
191
+ is_from_empty_part = True
164
192
  )
165
193
  )
166
194
 
167
- for frame in frames:
168
- yield frame
195
+ for frame in frames:
196
+ yield frame
169
197
 
170
198
  # TODO: I don't like using t as float,
171
199
  # we need to implement fractions.Fraction
@@ -307,7 +335,7 @@ class Track:
307
335
  def get_frame_at(
308
336
  self,
309
337
  t: Union[int, float, Fraction]
310
- ) -> 'VideoFrame':
338
+ ) -> 'VideoFrameWrapped':
311
339
  """
312
340
  Get the frame that must be displayed at
313
341
  the 't' time moment provided, which is
@@ -127,7 +127,7 @@ class VideoOnTrack:
127
127
  def get_audio_frames_at(
128
128
  self,
129
129
  t: Union[int, float, Fraction]
130
- ) -> Union[any, None]:
130
+ ):
131
131
  """
132
132
  Get the audio frames that must be played at
133
133
  the 't' time moment provided, that could be
@@ -51,7 +51,10 @@ class VideoFrameCache(FrameCache):
51
51
  Get the video frame that is in the 't'
52
52
  time moment provided.
53
53
  """
54
+ print(f'Getting frame from {str(float(t))}')
55
+ print(f'FPS: {str(self.fps)}')
54
56
  t: T = T.from_fps(t, self.fps)
57
+ print(f'So the actual frame is from {str(float(t.truncated))} to {str(float(t.next(1).truncated))}')
55
58
  for frame in self.get_frames(t.truncated, t.next(1).truncated):
56
59
  return frame
57
60
 
@@ -602,9 +602,14 @@ def video_modified_stored():
602
602
  video = Video(VIDEO_PATH, 0.25, 0.75)
603
603
  timeline = Timeline()
604
604
  timeline.add_video(Video(VIDEO_PATH, 0.25, 1.0), 0.75)
605
+ timeline.add_video(Video('C:/Users/dania/Downloads/Y2meta.app-10 Smooth Transitions Green Screen Template For Kinemaster, Alight Motion, Filmora, premiere pro-(1080p).mp4', 1.5, 2.0), 3.0)
605
606
  timeline.add_video(Video(VIDEO_PATH, 0.5, 1.0), 2.0)
606
607
  timeline.add_video(Video(VIDEO_PATH, 0.5, 1.0), 2.1, do_use_second_track = True)
607
- timeline.add_video(Video('C:/Users/dania/Downloads/Y2meta.app-TOP 12 SIMPLE LIQUID TRANSITION _ GREEN SCREEN TRANSITION PACK-(1080p60).mp4', 0.25, 1.5), 0.25, do_use_second_track = True)
608
+ # 30fps
609
+ # timeline.add_video(Video('C:/Users/dania/Downloads/Y2meta.app-TOP 12 SIMPLE LIQUID TRANSITION _ GREEN SCREEN TRANSITION PACK-(1080p60).mp4', 0.25, 1.5), 0.25, do_use_second_track = True)
610
+ # 29.97fps
611
+ # timeline.add_video(Video('C:/Users/dania/Downloads/Y_una_porra_los_simpsons_castellano.mp4', 5.8, 6.8), 3.6, do_use_second_track = True)
612
+ timeline.add_video(Video('C:/Users/dania/Downloads/Y_una_porra_los_simpsons_castellano_60fps.mp4', 5.8, 7.8), 3.6, do_use_second_track = True)
608
613
  timeline.render(OUTPUT_PATH)
609
614
 
610
615
  return
@@ -167,8 +167,9 @@ class Video:
167
167
  actually the 1.5s of the video because of
168
168
  the subclipped time range.
169
169
  """
170
- t = self.start + t
170
+ t += self.start
171
171
 
172
+ print(f'Video real t is {str(float(t))}')
172
173
  if t >= self.end:
173
174
  raise Exception(f'The "t" ({str(t)}) provided is out of range. This video lasts from [{str(self.start)}, {str(self.end)}).')
174
175
 
@@ -182,7 +183,9 @@ class Video:
182
183
  Get the video frame with the given 't' time
183
184
  moment, using the video cache system.
184
185
  """
185
- return self.reader.video_cache.get_frame(self._get_real_t(t))
186
+ print(f'Getting frame from {str(float(t))} that is actually {str(float(self._get_real_t(t)))}')
187
+ return self.reader.get_frame(self._get_real_t(t))
188
+ #return self.reader.video_cache.get_frame(self._get_real_t(t))
186
189
 
187
190
  def get_audio_frame_from_t(
188
191
  self,
@@ -214,6 +217,7 @@ class Video:
214
217
  (remember that a video frame is associated
215
218
  with more than 1 audio frame).
216
219
  """
220
+ print(f'Getting audio frames from {str(float(t))} that is actually {str(float(self._get_real_t(t)))}')
217
221
  for frame in self.reader.get_audio_frames_from_t(self._get_real_t(t)):
218
222
  yield frame
219
223