yta-video-opengl 0.0.16__py3-none-any.whl → 0.0.18__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -13,11 +13,14 @@ video written).
13
13
  from yta_video_opengl.complete.track import Track
14
14
  from yta_video_opengl.video import Video
15
15
  from yta_video_opengl.t import get_ts, fps_to_time_base, T
16
+ from yta_video_opengl.complete.frame_wrapper import VideoFrameWrapped, AudioFrameWrapped
17
+ from yta_video_opengl.complete.frame_combinator import AudioFrameCombinator
16
18
  from yta_validation.parameter import ParameterValidator
19
+ from yta_validation import PythonValidator
17
20
  from av.video.frame import VideoFrame
18
21
  from av.audio.frame import AudioFrame
19
- from av.audio.resampler import AudioResampler
20
22
  from quicktions import Fraction
23
+ from functools import reduce
21
24
  from typing import Union
22
25
 
23
26
  import numpy as np
@@ -51,7 +54,10 @@ class Timeline:
51
54
  audio_fps: Union[int, Fraction] = 44_100.0, # 48_000.0 for aac
52
55
  # TODO: I don't like this name
53
56
  # TODO: Where does this come from (?)
54
- audio_samples_per_frame: int = 1024
57
+ audio_samples_per_frame: int = 1024,
58
+ video_codec: str = 'h264',
59
+ video_pixel_format: str = 'yuv420p',
60
+ audio_codec: str = 'aac'
55
61
  ):
56
62
  # TODO: By now we are using just two video
57
63
  # tracks to test the composition
@@ -79,10 +85,36 @@ class Timeline:
79
85
  """
80
86
  All the video tracks we are handling.
81
87
  """
82
- # TODO: Handle the other properties
83
- self.size = size
84
- self.fps = fps
85
- self.audio_fps = audio_fps
88
+
89
+ self.size: tuple[int, int] = size
90
+ """
91
+ The size that the final video must have.
92
+ """
93
+ self.fps: Union[int, float, Fraction] = fps
94
+ """
95
+ The fps of the output video.
96
+ """
97
+ self.audio_fps: Union[int, Fraction] = audio_fps
98
+ """
99
+ The fps of the output audio.
100
+ """
101
+ self.audio_samples_per_frame: int = audio_samples_per_frame
102
+ """
103
+ The audio samples each audio frame must
104
+ have.
105
+ """
106
+ self.video_codec: str = video_codec
107
+ """
108
+ The video codec for the video exported.
109
+ """
110
+ self.video_pixel_format: str = video_pixel_format
111
+ """
112
+ The pixel format for the video exported.
113
+ """
114
+ self.audio_codec: str = audio_codec
115
+ """
116
+ The audio codec for the audio exported.
117
+ """
86
118
 
87
119
  # TODO: Create 'add_track' method, but by now
88
120
  # we hare handling only one
@@ -122,7 +154,7 @@ class Timeline:
122
154
  Get all the frames that are played at the
123
155
  't' time provided, but combined in one.
124
156
  """
125
- frames = (
157
+ frames = list(
126
158
  track.get_frame_at(t)
127
159
  for track in self.tracks
128
160
  )
@@ -134,20 +166,45 @@ class Timeline:
134
166
  # other frame in other track, or to know if
135
167
  # I want them as transparent or something
136
168
 
137
- # TODO: This is just a test function
138
- from yta_video_opengl.complete.blend import blend_add
139
-
140
169
  # TODO: Combinate frames, we force them to
141
170
  # rgb24 to obtain them with the same shape,
142
171
  # but maybe we have to change this because
143
172
  # we also need to handle alphas
144
- output_frame = next(frames).to_ndarray(format = 'rgb24')
173
+
174
+ # TODO: We need to ignore the ones that are
175
+ # tagged with
176
+ # .metadata['is_from_empty_part'] = 'True'
177
+
178
+ """
179
+ 1. Only empty frames
180
+ -> Black background, keep one
181
+ 2. Empty frames but other frames:
182
+ -> Skip all empty frames and apply
183
+ track orders
184
+ """
185
+
186
+ output_frame = frames[0]._frame.to_ndarray(format = 'rgb24')
187
+
145
188
  for frame in frames:
146
- # Combine them
147
- # TODO: We need to ignore the frames that
148
- # are just empty black frames and use them
149
- # not in the combination process
150
- output_frame = blend_add(output_frame, frame.to_ndarray(format = 'rgb24'))
189
+ # We just need the first non-empty frame,
190
+ # that must be from the track with the
191
+ # bigger priority
192
+ # TODO: I assume, by now, that the frames
193
+ # come in order (bigger priority first)
194
+ if not frame.is_from_empty_part:
195
+ # TODO: By now I'm just returning the first
196
+ # one but we will need to check the alpha
197
+ # layer to combine if possible
198
+ output_frame = frame._frame.to_ndarray(format = 'rgb24')
199
+ break
200
+
201
+ # # TODO: This code below is to combine the
202
+ # # frames but merging all of them, that is
203
+ # # unexpected in a video editor but we have
204
+ # # the way to do it
205
+ # from yta_video_opengl.complete.frame_combinator import VideoFrameCombinator
206
+ # # TODO: What about the 'format' (?)
207
+ # output_frame = VideoFrameCombinator.blend_add(output_frame, frame.to_ndarray(format = 'rgb24'))
151
208
 
152
209
  # TODO: How to build this VideoFrame correctly
153
210
  # and what about the 'format' (?)
@@ -158,150 +215,57 @@ class Timeline:
158
215
  self,
159
216
  t: float
160
217
  ):
218
+ audio_frames: list[AudioFrameWrapped] = []
219
+ """
220
+ Matrix in which the rows are the different
221
+ tracks we have, and the column includes all
222
+ the audio frames for this 't' time moment
223
+ for the track of that row. We can have more
224
+ than one frame per column per row (track)
225
+ but we need a single frame to combine all
226
+ the tracks.
227
+ """
161
228
  # TODO: What if the different audio streams
162
229
  # have also different fps (?)
163
- audio_frames = []
164
230
  for track in self.tracks:
165
231
  # TODO: Make this work properly
166
232
  audio_frames.append(list(track.get_audio_frames_at(t)))
167
-
168
- # TODO: Combine them
169
233
  # TODO: We need to ignore the frames that
170
234
  # are just empty black frames and use them
171
235
  # not in the combination process
172
236
 
173
- def mix_audio_frames_by_index(
174
- tracks_frames,
175
- layout = 'stereo'
176
- ):
177
- """
178
- Combine all the columns of the given
179
- matrix of audio frames 'tracks_frames'.
180
- The rows are the different tracks and
181
- the columns are the frame at that 't'
182
- moment of each of those tracks.
183
-
184
- The 'tracks_frames' matrix needs to be
185
- pre-processed to have only 1 single
186
- frame to combine, so we concatenate
187
- all the frames if more than 1 per
188
- column.
189
- """
190
- # TODO: Please, improve and clean all this
191
- # code is so sh*tty, and make utils to
192
- # combine and those things, not here...
193
- # Also the formats, make them dynamic and
194
- # based on the output that is defined here
195
- # in the Timeline class.
196
- mixed_frames = []
197
-
198
- # Iterate by columns (each row is a track)
199
- for frames_at_index in zip(*tracks_frames):
200
- arrays = []
201
- for f in frames_at_index:
202
- # Resample to output expected values
203
- # TODO: This must be dynamic depending
204
- # on the track values
205
- resampler = AudioResampler(format = 'fltp', layout = 'stereo', rate = self.audio_fps)
206
- arr = resampler.resample(f)
207
-
208
- arr = f.to_ndarray()
209
-
210
- # TODO: This below must change depending
211
- # on the expected output, for us and now
212
- # it is float32, fltp, stereo, 44_100
213
- # Same format
214
- if arr.dtype == np.int16:
215
- arr = arr.astype(np.float32) / 32768.0
216
-
217
- # Same layout (number of channels)
218
- if arr.shape[0] == 1:
219
- return np.repeat(arr, 2, axis = 0)
220
- # elif arr.dtype == np.float32:
221
- # # Ya está en [-1,1], no lo toques
222
- # pass
223
-
224
- arrays.append(arr)
225
-
226
- # Alinear longitudes
227
- max_len = max(a.shape[1] for a in arrays)
228
- stacked = []
229
- for a in arrays:
230
- buf = np.zeros((a.shape[0], max_len), dtype = np.float32)
231
- buf[:, :a.shape[1]] = a
232
- stacked.append(buf)
233
-
234
- # Mezcla
235
- mix = np.sum(stacked, axis = 0) / len(stacked)
236
- #mix = np.sum(stacked, axis = 0)
237
-
238
- # Limitar al rango [-1,1]
239
- mix = np.clip(mix, -1.0, 1.0)
240
-
241
- # Crear frame de salida
242
- # TODO: What about the 'format' if they
243
- # are all different (?)
244
- out = AudioFrame.from_ndarray(mix, format = 'fltp', layout = layout)
245
- out.sample_rate = self.audio_fps
246
- # TODO: This will be written later when
247
- # encoding
248
- # out.pts = frames_at_index[0].pts
249
- # out.time_base = frames_at_index[0].time_base
250
-
251
- print(mix.min(), mix.max())
252
-
253
- mixed_frames.append(out)
254
-
255
- return mixed_frames
256
-
257
- def combine_audio_frames(frames):
258
- """
259
- Combina varios AudioFrames consecutivos en uno solo.
260
- - Convierte a float32
261
- - Concatena muestras a lo largo del tiempo
262
- - Devuelve un AudioFrame nuevo
263
- """
264
- if not frames:
265
- # TODO: This should not happen
266
- return None
267
-
268
- if len(frames) == 1:
269
- return frames
270
-
271
- # Verificamos consistencia básica
272
- sample_rate = frames[0].sample_rate
273
- layout = frames[0].layout.name
274
- channels = frames[0].layout.channels
275
-
276
- arrays = []
277
- for f in frames:
278
- if f.sample_rate != sample_rate or f.layout.name != layout:
279
- raise ValueError("Los frames deben tener mismo sample_rate y layout")
280
-
281
- # arr = f.to_ndarray() # (channels, samples)
282
- # if arr.dtype == np.int16:
283
- # arr = arr.astype(np.float32) / 32768.0
284
- # elif arr.dtype != np.float32:
285
- # arr = arr.astype(np.float32)
286
-
287
- arrays.append(f.to_ndarray())
288
-
289
- # Concatenamos por eje de samples
290
- combined = np.concatenate(arrays, axis = 1)
291
-
292
- # Creamos un frame nuevo
293
- out = AudioFrame.from_ndarray(combined, format = frames[0].format, layout = layout)
294
- out.sample_rate = sample_rate
237
+ # We need only 1 single audio frame per column
238
+ collapsed_frames = [
239
+ concatenate_audio_frames(frames)
240
+ for frames in audio_frames
241
+ ]
295
242
 
296
- return [out]
243
+ # TODO: What about the lenghts and those
244
+ # things? They should be ok because they are
245
+ # based on our output but I'm not completely
246
+ # sure here..
247
+
248
+ # We keep only the non-silent frames because
249
+ # we will sum them after and keeping them
250
+ # will change the results.
251
+ collapsed_frames = [
252
+ frame._frame
253
+ for frame in collapsed_frames
254
+ if not frame.is_from_empty_part
255
+ ]
297
256
 
298
- # We need only 1 single audio frame per column
299
- collapsed = []
300
- for frames in audio_frames:
301
- collapsed.append(combine_audio_frames(frames))
257
+ if len(collapsed_frames) == 0:
258
+ # If they were all silent, just keep one
259
+ collapsed_frames = [collapsed_frames[0]._frame]
302
260
 
303
261
  # Now, mix column by column (track by track)
304
- frames = mix_audio_frames_by_index(collapsed)
262
+ # TODO: I do this to have an iterator, but
263
+ # maybe we need more than one single audio
264
+ # frame because of the size at the original
265
+ # video or something...
266
+ frames = [
267
+ AudioFrameCombinator.sum_tracks_frames(collapsed_frames, self.audio_fps)
268
+ ]
305
269
 
306
270
  for audio_frame in frames:
307
271
  yield audio_frame
@@ -416,4 +380,84 @@ class Timeline:
416
380
 
417
381
  writer.mux_video_frame(None)
418
382
  writer.mux_audio_frame(None)
419
- writer.output.close()
383
+ writer.output.close()
384
+
385
+ def _is_empty_part_frame(
386
+ frame: Union['VideoFrameWrapped', 'AudioFrameWrapped']
387
+ ) -> bool:
388
+ """
389
+ Flag to indicate if the frame comes from
390
+ an empty part or not.
391
+
392
+ TODO: The 'metadata' is included in our
393
+ wrapper class, not in VideoFrame or
394
+ AudioFrame classes. I should be sending
395
+ the wrapper in all the code, but by now
396
+ I'm doing it just in specific cases.
397
+ """
398
+ return (
399
+ hasattr(frame, 'metadata') and
400
+ frame.is_from_empty_part
401
+ )
402
+
403
+ # TODO: Refactor and move please
404
+ # TODO: This has to work for AudioFrame
405
+ # also, but I need it working for Wrapped
406
+ def concatenate_audio_frames(
407
+ frames: list[AudioFrameWrapped]
408
+ ) -> AudioFrameWrapped:
409
+ """
410
+ Concatenate all the given 'frames' in one
411
+ single audio frame and return it.
412
+
413
+ The audio frames must have the same layout
414
+ and sample rate.
415
+ """
416
+ if not frames:
417
+ # TODO: This should not happen
418
+ return None
419
+
420
+ if len(frames) == 1:
421
+ return frames[0]
422
+
423
+ # We need to preserve the metadata
424
+ is_from_empty_part = all(
425
+ frame.is_from_empty_part
426
+ for frame in frames
427
+ )
428
+ metadata = reduce(lambda key_values, frame: {**key_values, **frame.metadata}, frames, {})
429
+
430
+ sample_rate = frames[0]._frame.sample_rate
431
+ layout = frames[0]._frame.layout.name
432
+
433
+ arrays = []
434
+ # TODO: What about 'metadata' (?)
435
+ for frame in frames:
436
+ if (
437
+ frame._frame.sample_rate != sample_rate or
438
+ frame._frame.layout.name != layout
439
+ ):
440
+ raise ValueError("Los frames deben tener mismo sample_rate y layout")
441
+
442
+ # arr = frame.to_ndarray() # (channels, samples)
443
+ # if arr.dtype == np.int16:
444
+ # arr = arr.astype(np.float32) / 32768.0
445
+ # elif arr.dtype != np.float32:
446
+ # arr = arr.astype(np.float32)
447
+
448
+ arrays.append(frame._frame.to_ndarray())
449
+
450
+ combined = np.concatenate(arrays, axis = 1)
451
+
452
+ out = AudioFrame.from_ndarray(
453
+ array = combined,
454
+ format = frames[0].format,
455
+ layout = layout
456
+ )
457
+ out.sample_rate = sample_rate
458
+
459
+ return AudioFrameWrapped(
460
+ frame = out,
461
+ metadata = metadata,
462
+ is_from_empty_part = is_from_empty_part
463
+ )
@@ -1,8 +1,11 @@
1
1
  from yta_video_opengl.complete.video_on_track import VideoOnTrack
2
2
  from yta_video_opengl.video import Video
3
3
  from yta_video_opengl.t import T
4
- from yta_video_opengl.utils import get_black_background_video_frame, get_silent_audio_frame, audio_frames_and_remainder_per_video_frame
4
+ from yta_video_opengl.complete.frame_wrapper import VideoFrameWrapped
5
+ from yta_video_opengl.utils import audio_frames_and_remainder_per_video_frame
5
6
  from yta_video_opengl.t import fps_to_time_base
7
+ from yta_video_opengl.complete.frame_wrapper import AudioFrameWrapped
8
+ from yta_video_opengl.complete.frame_generator import VideoFrameGenerator, AudioFrameGenerator
6
9
  from yta_validation.parameter import ParameterValidator
7
10
  from quicktions import Fraction
8
11
  from typing import Union
@@ -48,6 +51,20 @@ class _Part:
48
51
  The instance of the track this part belongs
49
52
  to.
50
53
  """
54
+ # TODO: I would like to avoid this 2 instances
55
+ # here, and I think I've done it with static
56
+ # properties in other project, but as I don't
57
+ # remember how and where by now, here it is...
58
+ self._video_frame_generator: VideoFrameGenerator = VideoFrameGenerator()
59
+ """
60
+ Useful internal tool to generate background
61
+ frames for the empty parts.
62
+ """
63
+ self._audio_frame_generator: AudioFrameGenerator = AudioFrameGenerator()
64
+ """
65
+ Useful internal tool to generate silent
66
+ audio frames for the empty parts.
67
+ """
51
68
  self.start: Fraction = Fraction(start)
52
69
  """
53
70
  The start 't' time moment of the part.
@@ -67,27 +84,38 @@ class _Part:
67
84
  def get_frame_at(
68
85
  self,
69
86
  t: Union[int, float, Fraction]
70
- ) -> 'VideoFrame':
87
+ ) -> 'VideoFrameWrapped':
71
88
  """
72
89
  Get the frame that must be displayed at
73
90
  the given 't' time moment.
74
91
  """
75
- if self.is_empty_part:
92
+ frame = (
76
93
  # TODO: What about the 'format' (?)
77
94
  # TODO: Maybe I shouldn't set the 'time_base'
78
95
  # here and do it just in the Timeline 'render'
79
96
  #return get_black_background_video_frame(self._track.size)
80
97
  # TODO: This 'time_base' maybe has to be related
81
98
  # to a Timeline general 'time_base' and not the fps
82
- return get_black_background_video_frame(self._track.size, time_base = fps_to_time_base(self._track.fps))
83
-
84
- frame = self.video.get_frame_at(t)
99
+ VideoFrameWrapped(
100
+ frame = self._video_frame_generator.background.full_black(
101
+ size = self._track.size,
102
+ time_base = fps_to_time_base(self._track.fps)
103
+ ),
104
+ is_from_empty_part = True
105
+ )
106
+ if self.is_empty_part else
107
+ VideoFrameWrapped(
108
+ frame = self.video.get_frame_at(t),
109
+ is_from_empty_part = False
110
+ )
111
+
112
+ )
85
113
 
86
114
  # TODO: This should not happen because of
87
115
  # the way we handle the videos here but the
88
116
  # video could send us a None frame here, so
89
117
  # do we raise exception (?)
90
- if frame is None:
118
+ if frame._frame is None:
91
119
  #frame = get_black_background_video_frame(self._track.size)
92
120
  # TODO: By now I'm raising exception to check if
93
121
  # this happens or not because I think it would
@@ -96,13 +124,20 @@ class _Part:
96
124
 
97
125
  return frame
98
126
 
99
- # TODO: I'm not sure if we need this
100
127
  def get_audio_frames_at(
101
128
  self,
102
129
  t: Union[int, float, Fraction]
103
130
  ):
131
+ """
132
+ Iterate over all the audio frames that
133
+ exist at the time moment 't' provided.
134
+ """
104
135
  if not self.is_empty_part:
105
- frames = self.video.get_audio_frames_at(t)
136
+ for frame in self.video.get_audio_frames_at(t):
137
+ yield AudioFrameWrapped(
138
+ frame = frame,
139
+ is_from_empty_part = False
140
+ )
106
141
  else:
107
142
  # TODO: Transform this below to a utils in
108
143
  # which I obtain the array directly
@@ -115,15 +150,22 @@ class _Part:
115
150
 
116
151
  # TODO: I need to set the pts, but here (?)
117
152
  # The complete silent frames we need
153
+ silent_frame = self._audio_frame_generator.silent(
154
+ sample_rate = self._track.audio_fps,
155
+ # TODO: Check where do we get this value from
156
+ layout = 'stereo',
157
+ number_of_samples = self._track.audio_samples_per_frame,
158
+ # TODO: Check where do we get this value from
159
+ format = 'fltp',
160
+ pts = None,
161
+ time_base = None
162
+ )
163
+
118
164
  frames = (
119
165
  [
120
- get_silent_audio_frame(
121
- sample_rate = self._track.audio_fps,
122
- # TODO: Check where do we get this value from
123
- layout = 'stereo',
124
- number_of_samples = self._track.audio_samples_per_frame,
125
- # TODO: Check where do we get this value from
126
- format = 'fltp'
166
+ AudioFrameWrapped(
167
+ frame = silent_frame,
168
+ is_from_empty_part = True
127
169
  )
128
170
  ] * number_of_frames
129
171
  if number_of_frames > 0 else
@@ -132,21 +174,26 @@ class _Part:
132
174
 
133
175
  # The remaining partial silent frames we need
134
176
  if number_of_remaining_samples > 0:
177
+ silent_frame = self._audio_frame_generator.silent(
178
+ sample_rate = self._track.audio_fps,
179
+ # TODO: Check where do we get this value from
180
+ layout = 'stereo',
181
+ number_of_samples = number_of_remaining_samples,
182
+ # TODO: Check where do we get this value from
183
+ format = 'fltp',
184
+ pts = None,
185
+ time_base = None
186
+ )
187
+
135
188
  frames.append(
136
- get_silent_audio_frame(
137
- sample_rate = self._track.audio_fps,
138
- # TODO: Check where do we get this value from
139
- layout = 'stereo',
140
- number_of_samples = number_of_remaining_samples,
141
- # TODO: Check where do we get this value from
142
- format = 'fltp'
189
+ AudioFrameWrapped(
190
+ frame = silent_frame,
191
+ is_from_empty_part = True
143
192
  )
144
193
  )
145
194
 
146
- # TODO: Return or yield (?)
147
- for frame in frames:
148
- yield frame
149
- #return frames
195
+ for frame in frames:
196
+ yield frame
150
197
 
151
198
  # TODO: I don't like using t as float,
152
199
  # we need to implement fractions.Fraction
@@ -288,7 +335,7 @@ class Track:
288
335
  def get_frame_at(
289
336
  self,
290
337
  t: Union[int, float, Fraction]
291
- ) -> 'VideoFrame':
338
+ ) -> 'VideoFrameWrapped':
292
339
  """
293
340
  Get the frame that must be displayed at
294
341
  the 't' time moment provided, which is
@@ -127,7 +127,7 @@ class VideoOnTrack:
127
127
  def get_audio_frames_at(
128
128
  self,
129
129
  t: Union[int, float, Fraction]
130
- ) -> Union[any, None]:
130
+ ):
131
131
  """
132
132
  Get the audio frames that must be played at
133
133
  the 't' time moment provided, that could be
@@ -51,7 +51,10 @@ class VideoFrameCache(FrameCache):
51
51
  Get the video frame that is in the 't'
52
52
  time moment provided.
53
53
  """
54
+ print(f'Getting frame from {str(float(t))}')
55
+ print(f'FPS: {str(self.fps)}')
54
56
  t: T = T.from_fps(t, self.fps)
57
+ print(f'So the actual frame is from {str(float(t.truncated))} to {str(float(t.next(1).truncated))}')
55
58
  for frame in self.get_frames(t.truncated, t.next(1).truncated):
56
59
  return frame
57
60
 
yta_video_opengl/tests.py CHANGED
@@ -602,9 +602,14 @@ def video_modified_stored():
602
602
  video = Video(VIDEO_PATH, 0.25, 0.75)
603
603
  timeline = Timeline()
604
604
  timeline.add_video(Video(VIDEO_PATH, 0.25, 1.0), 0.75)
605
+ timeline.add_video(Video('C:/Users/dania/Downloads/Y2meta.app-10 Smooth Transitions Green Screen Template For Kinemaster, Alight Motion, Filmora, premiere pro-(1080p).mp4', 1.5, 2.0), 3.0)
605
606
  timeline.add_video(Video(VIDEO_PATH, 0.5, 1.0), 2.0)
606
607
  timeline.add_video(Video(VIDEO_PATH, 0.5, 1.0), 2.1, do_use_second_track = True)
607
- timeline.add_video(Video('C:/Users/dania/Downloads/Y2meta.app-TOP 12 SIMPLE LIQUID TRANSITION _ GREEN SCREEN TRANSITION PACK-(1080p60).mp4', 0.25, 1.5), 0.25, do_use_second_track = True)
608
+ # 30fps
609
+ # timeline.add_video(Video('C:/Users/dania/Downloads/Y2meta.app-TOP 12 SIMPLE LIQUID TRANSITION _ GREEN SCREEN TRANSITION PACK-(1080p60).mp4', 0.25, 1.5), 0.25, do_use_second_track = True)
610
+ # 29.97fps
611
+ # timeline.add_video(Video('C:/Users/dania/Downloads/Y_una_porra_los_simpsons_castellano.mp4', 5.8, 6.8), 3.6, do_use_second_track = True)
612
+ timeline.add_video(Video('C:/Users/dania/Downloads/Y_una_porra_los_simpsons_castellano_60fps.mp4', 5.8, 7.8), 3.6, do_use_second_track = True)
608
613
  timeline.render(OUTPUT_PATH)
609
614
 
610
615
  return
yta_video_opengl/video.py CHANGED
@@ -167,8 +167,9 @@ class Video:
167
167
  actually the 1.5s of the video because of
168
168
  the subclipped time range.
169
169
  """
170
- t = self.start + t
170
+ t += self.start
171
171
 
172
+ print(f'Video real t is {str(float(t))}')
172
173
  if t >= self.end:
173
174
  raise Exception(f'The "t" ({str(t)}) provided is out of range. This video lasts from [{str(self.start)}, {str(self.end)}).')
174
175
 
@@ -182,7 +183,9 @@ class Video:
182
183
  Get the video frame with the given 't' time
183
184
  moment, using the video cache system.
184
185
  """
185
- return self.reader.video_cache.get_frame(self._get_real_t(t))
186
+ print(f'Getting frame from {str(float(t))} that is actually {str(float(self._get_real_t(t)))}')
187
+ return self.reader.get_frame(self._get_real_t(t))
188
+ #return self.reader.video_cache.get_frame(self._get_real_t(t))
186
189
 
187
190
  def get_audio_frame_from_t(
188
191
  self,
@@ -214,6 +217,7 @@ class Video:
214
217
  (remember that a video frame is associated
215
218
  with more than 1 audio frame).
216
219
  """
220
+ print(f'Getting audio frames from {str(float(t))} that is actually {str(float(self._get_real_t(t)))}')
217
221
  for frame in self.reader.get_audio_frames_from_t(self._get_real_t(t)):
218
222
  yield frame
219
223
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: yta-video-opengl
3
- Version: 0.0.16
3
+ Version: 0.0.18
4
4
  Summary: Youtube Autonomous Video OpenGL Module
5
5
  Author: danialcala94
6
6
  Author-email: danielalcalavalera@gmail.com