yta-video-opengl 0.0.12__tar.gz → 0.0.14__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {yta_video_opengl-0.0.12 → yta_video_opengl-0.0.14}/PKG-INFO +2 -1
- {yta_video_opengl-0.0.12 → yta_video_opengl-0.0.14}/pyproject.toml +2 -1
- {yta_video_opengl-0.0.12 → yta_video_opengl-0.0.14}/src/yta_video_opengl/complete/timeline.py +41 -44
- {yta_video_opengl-0.0.12 → yta_video_opengl-0.0.14}/src/yta_video_opengl/complete/track.py +40 -32
- {yta_video_opengl-0.0.12 → yta_video_opengl-0.0.14}/src/yta_video_opengl/complete/video_on_track.py +27 -16
- {yta_video_opengl-0.0.12 → yta_video_opengl-0.0.14}/src/yta_video_opengl/reader/__init__.py +27 -82
- yta_video_opengl-0.0.14/src/yta_video_opengl/reader/cache.py +512 -0
- yta_video_opengl-0.0.14/src/yta_video_opengl/t.py +233 -0
- {yta_video_opengl-0.0.12 → yta_video_opengl-0.0.14}/src/yta_video_opengl/tests.py +4 -2
- {yta_video_opengl-0.0.12 → yta_video_opengl-0.0.14}/src/yta_video_opengl/utils.py +108 -86
- {yta_video_opengl-0.0.12 → yta_video_opengl-0.0.14}/src/yta_video_opengl/video.py +90 -12
- {yta_video_opengl-0.0.12 → yta_video_opengl-0.0.14}/src/yta_video_opengl/writer.py +13 -14
- yta_video_opengl-0.0.12/src/yta_video_opengl/reader/cache.py +0 -507
- {yta_video_opengl-0.0.12 → yta_video_opengl-0.0.14}/LICENSE +0 -0
- {yta_video_opengl-0.0.12 → yta_video_opengl-0.0.14}/README.md +0 -0
- {yta_video_opengl-0.0.12 → yta_video_opengl-0.0.14}/src/yta_video_opengl/__init__.py +0 -0
- {yta_video_opengl-0.0.12 → yta_video_opengl-0.0.14}/src/yta_video_opengl/classes.py +0 -0
- {yta_video_opengl-0.0.12 → yta_video_opengl-0.0.14}/src/yta_video_opengl/complete/__init__.py +0 -0
- {yta_video_opengl-0.0.12 → yta_video_opengl-0.0.14}/src/yta_video_opengl/nodes/__init__.py +0 -0
- {yta_video_opengl-0.0.12 → yta_video_opengl-0.0.14}/src/yta_video_opengl/nodes/audio/__init__.py +0 -0
- {yta_video_opengl-0.0.12 → yta_video_opengl-0.0.14}/src/yta_video_opengl/nodes/video/__init__.py +0 -0
- {yta_video_opengl-0.0.12 → yta_video_opengl-0.0.14}/src/yta_video_opengl/nodes/video/opengl.py +0 -0
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.3
|
2
2
|
Name: yta-video-opengl
|
3
|
-
Version: 0.0.
|
3
|
+
Version: 0.0.14
|
4
4
|
Summary: Youtube Autonomous Video OpenGL Module
|
5
5
|
Author: danialcala94
|
6
6
|
Author-email: danielalcalavalera@gmail.com
|
@@ -10,6 +10,7 @@ Classifier: Programming Language :: Python :: 3.9
|
|
10
10
|
Requires-Dist: av (>=0.0.1,<19.0.0)
|
11
11
|
Requires-Dist: moderngl (>=0.0.1,<9.0.0)
|
12
12
|
Requires-Dist: numpy (>=0.0.1,<9.0.0)
|
13
|
+
Requires-Dist: quicktions (>=0.0.1,<9.0.0)
|
13
14
|
Requires-Dist: yta_timer (>0.0.1,<1.0.0)
|
14
15
|
Requires-Dist: yta_validation (>=0.0.1,<1.0.0)
|
15
16
|
Requires-Dist: yta_video_frame_time (>=0.0.1,<1.0.0)
|
@@ -1,6 +1,6 @@
|
|
1
1
|
[project]
|
2
2
|
name = "yta-video-opengl"
|
3
|
-
version = "0.0.
|
3
|
+
version = "0.0.14"
|
4
4
|
description = "Youtube Autonomous Video OpenGL Module"
|
5
5
|
authors = [
|
6
6
|
{name = "danialcala94",email = "danielalcalavalera@gmail.com"}
|
@@ -14,6 +14,7 @@ dependencies = [
|
|
14
14
|
"av (>=0.0.1,<19.0.0)",
|
15
15
|
"moderngl (>=0.0.1,<9.0.0)",
|
16
16
|
"numpy (>=0.0.1,<9.0.0)",
|
17
|
+
"quicktions (>=0.0.1,<9.0.0)"
|
17
18
|
]
|
18
19
|
|
19
20
|
[tool.poetry]
|
{yta_video_opengl-0.0.12 → yta_video_opengl-0.0.14}/src/yta_video_opengl/complete/timeline.py
RENAMED
@@ -12,9 +12,10 @@ video written).
|
|
12
12
|
"""
|
13
13
|
from yta_video_opengl.complete.track import Track
|
14
14
|
from yta_video_opengl.video import Video
|
15
|
+
from yta_video_opengl.t import get_ts, fps_to_time_base, T
|
15
16
|
from yta_validation.parameter import ParameterValidator
|
17
|
+
from quicktions import Fraction
|
16
18
|
from typing import Union
|
17
|
-
from fractions import Fraction
|
18
19
|
|
19
20
|
|
20
21
|
class Timeline:
|
@@ -27,7 +28,7 @@ class Timeline:
|
|
27
28
|
@property
|
28
29
|
def end(
|
29
30
|
self
|
30
|
-
) ->
|
31
|
+
) -> Fraction:
|
31
32
|
"""
|
32
33
|
The end of the last video of the track
|
33
34
|
that lasts longer. This is the last time
|
@@ -41,11 +42,11 @@ class Timeline:
|
|
41
42
|
def __init__(
|
42
43
|
self,
|
43
44
|
size: tuple[int, int] = (1_920, 1_080),
|
44
|
-
fps: float = 60.0,
|
45
|
-
audio_fps:
|
45
|
+
fps: Union[int, float, Fraction] = 60.0,
|
46
|
+
audio_fps: Union[int, Fraction] = 44_100.0, # 48_000.0 for aac
|
46
47
|
# TODO: I don't like this name
|
47
48
|
# TODO: Where does this come from (?)
|
48
|
-
|
49
|
+
audio_samples_per_frame: int = 1024
|
49
50
|
):
|
50
51
|
# TODO: By now we are using just two video
|
51
52
|
# tracks to test the composition
|
@@ -59,7 +60,7 @@ class Timeline:
|
|
59
60
|
audio_fps = audio_fps,
|
60
61
|
# TODO: I need more info about the audio
|
61
62
|
# I think
|
62
|
-
|
63
|
+
audio_samples_per_frame = audio_samples_per_frame
|
63
64
|
),
|
64
65
|
Track(
|
65
66
|
size = size,
|
@@ -67,7 +68,7 @@ class Timeline:
|
|
67
68
|
audio_fps = audio_fps,
|
68
69
|
# TODO: I need more info about the audio
|
69
70
|
# I think
|
70
|
-
|
71
|
+
audio_samples_per_frame = audio_samples_per_frame
|
71
72
|
)
|
72
73
|
]
|
73
74
|
"""
|
@@ -83,7 +84,7 @@ class Timeline:
|
|
83
84
|
def add_video(
|
84
85
|
self,
|
85
86
|
video: Video,
|
86
|
-
t: float,
|
87
|
+
t: Union[int, float, Fraction],
|
87
88
|
# TODO: This is for testing, it has to
|
88
89
|
# disappear
|
89
90
|
do_use_second_track: bool = False
|
@@ -110,7 +111,7 @@ class Timeline:
|
|
110
111
|
# have consecutive elements
|
111
112
|
def get_frame_at(
|
112
113
|
self,
|
113
|
-
t: float
|
114
|
+
t: Union[int, float, Fraction]
|
114
115
|
) -> 'VideoFrame':
|
115
116
|
"""
|
116
117
|
Get all the frames that are played at the
|
@@ -155,8 +156,8 @@ class Timeline:
|
|
155
156
|
def render(
|
156
157
|
self,
|
157
158
|
filename: str,
|
158
|
-
start: float = 0.0,
|
159
|
-
end: Union[float, None] = None
|
159
|
+
start: Union[int, float, Fraction] = 0.0,
|
160
|
+
end: Union[int, float, Fraction, None] = None
|
160
161
|
) -> 'Timeline':
|
161
162
|
"""
|
162
163
|
Render the time range in between the given
|
@@ -167,8 +168,10 @@ class Timeline:
|
|
167
168
|
project will be rendered.
|
168
169
|
"""
|
169
170
|
ParameterValidator.validate_mandatory_string('filename', filename, do_accept_empty = False)
|
170
|
-
|
171
|
-
ParameterValidator.
|
171
|
+
# TODO: We need to accept Fraction as number
|
172
|
+
#ParameterValidator.validate_mandatory_positive_number('start', start, do_include_zero = True)
|
173
|
+
# TODO: We need to accept Fraction as number
|
174
|
+
#ParameterValidator.validate_positive_number('end', end, do_include_zero = False)
|
172
175
|
|
173
176
|
# TODO: Limitate 'end' a bit...
|
174
177
|
end = (
|
@@ -181,7 +184,6 @@ class Timeline:
|
|
181
184
|
raise Exception('The provided "start" cannot be greater or equal to the "end" provided.')
|
182
185
|
|
183
186
|
from yta_video_opengl.writer import VideoWriter
|
184
|
-
from yta_video_opengl.utils import get_black_background_video_frame, get_silent_audio_frame
|
185
187
|
|
186
188
|
writer = VideoWriter('test_files/output_render.mp4')
|
187
189
|
# TODO: This has to be dynamic according to the
|
@@ -197,17 +199,34 @@ class Timeline:
|
|
197
199
|
codec_name = 'aac',
|
198
200
|
fps = self.audio_fps
|
199
201
|
)
|
202
|
+
|
203
|
+
time_base = fps_to_time_base(self.fps)
|
204
|
+
audio_time_base = fps_to_time_base(self.audio_fps)
|
205
|
+
|
206
|
+
"""
|
207
|
+
We are trying to render this:
|
208
|
+
-----------------------------
|
209
|
+
[0 a 0.5) => Frames negros
|
210
|
+
[0.5 a 1.25) => [0.25 a 1.0) de Video1
|
211
|
+
[1.25 a 1.75) => Frames negros
|
212
|
+
[1.75 a 2.25) => [0.25 a 0.75) de Video1
|
213
|
+
[2.25 a 3.0) => Frames negros
|
214
|
+
[3.0 a 3.75) => [2.25 a 3.0) de Video2
|
215
|
+
"""
|
200
216
|
|
201
217
|
audio_pts = 0
|
202
218
|
for t in get_ts(start, end, self.fps):
|
203
219
|
frame = self.get_frame_at(t)
|
204
220
|
|
221
|
+
print(f'Getting t:{str(float(t))}')
|
222
|
+
#print(frame)
|
223
|
+
|
205
224
|
# We need to adjust our output elements to be
|
206
225
|
# consecutive and with the right values
|
207
226
|
# TODO: We are using int() for fps but its float...
|
208
|
-
frame.time_base =
|
227
|
+
frame.time_base = time_base
|
209
228
|
#frame.pts = int(video_frame_index / frame.time_base)
|
210
|
-
frame.pts =
|
229
|
+
frame.pts = T(t, time_base).truncated_pts
|
211
230
|
|
212
231
|
# TODO: We need to handle the audio
|
213
232
|
writer.mux_video_frame(
|
@@ -216,6 +235,7 @@ class Timeline:
|
|
216
235
|
|
217
236
|
#print(f' [VIDEO] Here in t:{str(t)} -> pts:{str(frame.pts)} - dts:{str(frame.dts)}')
|
218
237
|
|
238
|
+
# TODO: Uncomment all this below for the audio
|
219
239
|
num_of_audio_frames = 0
|
220
240
|
for audio_frame in self.get_audio_frames_at(t):
|
221
241
|
# TODO: The track gives us empty (black)
|
@@ -229,7 +249,7 @@ class Timeline:
|
|
229
249
|
# We need to adjust our output elements to be
|
230
250
|
# consecutive and with the right values
|
231
251
|
# TODO: We are using int() for fps but its float...
|
232
|
-
audio_frame.time_base =
|
252
|
+
audio_frame.time_base = audio_time_base
|
233
253
|
#audio_frame.pts = int(audio_frame_index / audio_frame.time_base)
|
234
254
|
audio_frame.pts = audio_pts
|
235
255
|
# We increment for the next iteration
|
@@ -238,34 +258,11 @@ class Timeline:
|
|
238
258
|
|
239
259
|
#print(f'[AUDIO] Here in t:{str(t)} -> pts:{str(audio_frame.pts)} - dts:{str(audio_frame.dts)}')
|
240
260
|
|
241
|
-
num_of_audio_frames += 1
|
242
|
-
print(audio_frame)
|
261
|
+
#num_of_audio_frames += 1
|
262
|
+
#print(audio_frame)
|
243
263
|
writer.mux_audio_frame(audio_frame)
|
244
|
-
print(f'Num of audio frames: {str(num_of_audio_frames)}')
|
264
|
+
#print(f'Num of audio frames: {str(num_of_audio_frames)}')
|
245
265
|
|
246
266
|
writer.mux_video_frame(None)
|
247
267
|
writer.mux_audio_frame(None)
|
248
|
-
writer.output.close()
|
249
|
-
|
250
|
-
|
251
|
-
# TODO: I don't want to have this here
|
252
|
-
def get_ts(
|
253
|
-
start: float,
|
254
|
-
end: float,
|
255
|
-
fps: int
|
256
|
-
):
|
257
|
-
"""
|
258
|
-
Obtain, without using a Progression class and
|
259
|
-
importing the library, a list of 't' time
|
260
|
-
moments from the provided 'start' to the also
|
261
|
-
given 'end', with the 'fps' given as parameter.
|
262
|
-
"""
|
263
|
-
dt = 1.0 / fps
|
264
|
-
times = []
|
265
|
-
|
266
|
-
t = start
|
267
|
-
while t <= end:
|
268
|
-
times.append(t + 0.000001)
|
269
|
-
t += dt
|
270
|
-
|
271
|
-
return times
|
268
|
+
writer.output.close()
|
@@ -1,8 +1,10 @@
|
|
1
1
|
from yta_video_opengl.complete.video_on_track import VideoOnTrack
|
2
2
|
from yta_video_opengl.video import Video
|
3
|
-
from
|
3
|
+
from yta_video_opengl.t import T
|
4
4
|
from yta_video_opengl.utils import get_black_background_video_frame, get_silent_audio_frame, audio_frames_and_remainder_per_video_frame
|
5
|
+
from yta_video_opengl.t import fps_to_time_base
|
5
6
|
from yta_validation.parameter import ParameterValidator
|
7
|
+
from quicktions import Fraction
|
6
8
|
from typing import Union
|
7
9
|
|
8
10
|
|
@@ -33,12 +35,14 @@ class _Part:
|
|
33
35
|
def __init__(
|
34
36
|
self,
|
35
37
|
track: 'Track',
|
36
|
-
start: float,
|
37
|
-
end: float,
|
38
|
+
start: Union[int, float, Fraction],
|
39
|
+
end: Union[int, float, Fraction],
|
38
40
|
video: Union[VideoOnTrack, None] = None
|
39
41
|
):
|
40
|
-
|
41
|
-
ParameterValidator.validate_mandatory_positive_number('
|
42
|
+
# TODO: We need to accept Fraction as number
|
43
|
+
# ParameterValidator.validate_mandatory_positive_number('start', start, do_include_zero = True)
|
44
|
+
# TODO: We need to accept Fraction as number
|
45
|
+
# ParameterValidator.validate_mandatory_positive_number('end', end, do_include_zero = False)
|
42
46
|
ParameterValidator.validate_instance_of('video', video, VideoOnTrack)
|
43
47
|
|
44
48
|
self._track: Track = track
|
@@ -46,11 +50,11 @@ class _Part:
|
|
46
50
|
The instance of the track this part belongs
|
47
51
|
to.
|
48
52
|
"""
|
49
|
-
self.start:
|
53
|
+
self.start: Fraction = Fraction(start)
|
50
54
|
"""
|
51
55
|
The start 't' time moment of the part.
|
52
56
|
"""
|
53
|
-
self.end:
|
57
|
+
self.end: Fraction = Fraction(end)
|
54
58
|
"""
|
55
59
|
The end 't' time moment of the part.
|
56
60
|
"""
|
@@ -64,7 +68,7 @@ class _Part:
|
|
64
68
|
|
65
69
|
def get_frame_at(
|
66
70
|
self,
|
67
|
-
t: float
|
71
|
+
t: Union[int, float, Fraction]
|
68
72
|
) -> 'VideoFrame':
|
69
73
|
"""
|
70
74
|
Get the frame that must be displayed at
|
@@ -72,7 +76,12 @@ class _Part:
|
|
72
76
|
"""
|
73
77
|
if self.is_empty_part:
|
74
78
|
# TODO: What about the 'format' (?)
|
75
|
-
|
79
|
+
# TODO: Maybe I shouldn't set the 'time_base'
|
80
|
+
# here and do it just in the Timeline 'render'
|
81
|
+
#return get_black_background_video_frame(self._track.size)
|
82
|
+
# TODO: This 'time_base' maybe has to be related
|
83
|
+
# to a Timeline general 'time_base' and not the fps
|
84
|
+
return get_black_background_video_frame(self._track.size, time_base = fps_to_time_base(self._track.fps))
|
76
85
|
|
77
86
|
frame = self.video.get_frame_at(t)
|
78
87
|
|
@@ -85,14 +94,14 @@ class _Part:
|
|
85
94
|
# TODO: By now I'm raising exception to check if
|
86
95
|
# this happens or not because I think it would
|
87
96
|
# be malfunctioning
|
88
|
-
raise Exception(f'Video is returning None frame at t={str(t)}.')
|
97
|
+
raise Exception(f'Video is returning None video frame at t={str(t)}.')
|
89
98
|
|
90
99
|
return frame
|
91
100
|
|
92
101
|
# TODO: I'm not sure if we need this
|
93
102
|
def get_audio_frames_at(
|
94
103
|
self,
|
95
|
-
t: float
|
104
|
+
t: Union[int, float, Fraction]
|
96
105
|
):
|
97
106
|
if not self.is_empty_part:
|
98
107
|
frames = self.video.get_audio_frames_at(t)
|
@@ -101,9 +110,9 @@ class _Part:
|
|
101
110
|
# which I obtain the array directly
|
102
111
|
# Check many full and partial silent frames we need
|
103
112
|
number_of_frames, number_of_remaining_samples = audio_frames_and_remainder_per_video_frame(
|
104
|
-
|
113
|
+
video_fps = self._track.fps,
|
105
114
|
sample_rate = self._track.audio_fps,
|
106
|
-
|
115
|
+
number_of_samples_per_audio_frame = self._track.audio_samples_per_frame
|
107
116
|
)
|
108
117
|
|
109
118
|
# TODO: I need to set the pts, but here (?)
|
@@ -114,7 +123,7 @@ class _Part:
|
|
114
123
|
sample_rate = self._track.audio_fps,
|
115
124
|
# TODO: Check where do we get this value from
|
116
125
|
layout = 'stereo',
|
117
|
-
|
126
|
+
number_of_samples = self._track.audio_samples_per_frame,
|
118
127
|
# TODO: Check where do we get this value from
|
119
128
|
format = 'fltp'
|
120
129
|
)
|
@@ -130,7 +139,7 @@ class _Part:
|
|
130
139
|
sample_rate = self._track.audio_fps,
|
131
140
|
# TODO: Check where do we get this value from
|
132
141
|
layout = 'stereo',
|
133
|
-
|
142
|
+
number_of_samples = number_of_remaining_samples,
|
134
143
|
# TODO: Check where do we get this value from
|
135
144
|
format = 'fltp'
|
136
145
|
)
|
@@ -176,14 +185,14 @@ class Track:
|
|
176
185
|
@property
|
177
186
|
def end(
|
178
187
|
self
|
179
|
-
) ->
|
188
|
+
) -> Fraction:
|
180
189
|
"""
|
181
190
|
The end of the last video of this track,
|
182
191
|
which is also the end of the track. This
|
183
192
|
is the last time moment that has to be
|
184
193
|
rendered.
|
185
194
|
"""
|
186
|
-
return (
|
195
|
+
return Fraction(
|
187
196
|
0.0
|
188
197
|
if len(self.videos) == 0 else
|
189
198
|
max(
|
@@ -200,8 +209,8 @@ class Track:
|
|
200
209
|
size: tuple[int, int],
|
201
210
|
fps: float,
|
202
211
|
audio_fps: float,
|
203
|
-
# TODO:
|
204
|
-
|
212
|
+
# TODO: Where does it come from (?)
|
213
|
+
audio_samples_per_frame: int
|
205
214
|
):
|
206
215
|
self.videos: list[VideoOnTrack] = []
|
207
216
|
"""
|
@@ -212,26 +221,26 @@ class Track:
|
|
212
221
|
"""
|
213
222
|
The size of the videos of this track.
|
214
223
|
"""
|
215
|
-
self.fps: float = fps
|
224
|
+
self.fps: float = float(fps)
|
216
225
|
"""
|
217
226
|
The fps of the track, needed to calculate
|
218
227
|
the base t time moments to be precise and
|
219
228
|
to obtain or generate the frames.
|
220
229
|
"""
|
221
|
-
self.audio_fps: float = audio_fps
|
230
|
+
self.audio_fps: float = float(audio_fps)
|
222
231
|
"""
|
223
232
|
The fps of the audio track, needed to
|
224
233
|
generate silent audios for the empty parts.
|
225
234
|
"""
|
226
|
-
self.
|
235
|
+
self.audio_samples_per_frame: int = audio_samples_per_frame
|
227
236
|
"""
|
228
237
|
The number of samples per audio frame.
|
229
238
|
"""
|
230
239
|
|
231
240
|
def _is_free(
|
232
241
|
self,
|
233
|
-
start: float,
|
234
|
-
end: float
|
242
|
+
start: Union[int, float, Fraction],
|
243
|
+
end: Union[int, float, Fraction]
|
235
244
|
) -> bool:
|
236
245
|
"""
|
237
246
|
Check if the time range in between the
|
@@ -248,7 +257,7 @@ class Track:
|
|
248
257
|
|
249
258
|
def _get_part_at_t(
|
250
259
|
self,
|
251
|
-
t: float
|
260
|
+
t: Union[int, float, Fraction]
|
252
261
|
) -> _Part:
|
253
262
|
"""
|
254
263
|
Get the part at the given 't' time
|
@@ -269,7 +278,7 @@ class Track:
|
|
269
278
|
|
270
279
|
def get_frame_at(
|
271
280
|
self,
|
272
|
-
t: float
|
281
|
+
t: Union[int, float, Fraction]
|
273
282
|
) -> 'VideoFrame':
|
274
283
|
"""
|
275
284
|
Get the frame that must be displayed at
|
@@ -289,7 +298,7 @@ class Track:
|
|
289
298
|
# TODO: This is not working well...
|
290
299
|
def get_audio_frames_at(
|
291
300
|
self,
|
292
|
-
t: float
|
301
|
+
t: Union[int, float, Fraction]
|
293
302
|
):
|
294
303
|
"""
|
295
304
|
Get the sequence of audio frames that
|
@@ -316,7 +325,7 @@ class Track:
|
|
316
325
|
def add_video(
|
317
326
|
self,
|
318
327
|
video: Video,
|
319
|
-
t: Union[float, None] = None
|
328
|
+
t: Union[int, float, Fraction, None] = None
|
320
329
|
) -> 'Track':
|
321
330
|
"""
|
322
331
|
Add the 'video' provided to the track. If
|
@@ -338,11 +347,10 @@ class Track:
|
|
338
347
|
if t is not None:
|
339
348
|
# TODO: We can have many different strategies
|
340
349
|
# that we could define in the '__init__' maybe
|
341
|
-
|
342
|
-
|
343
|
-
t = T.get_frame_time_base(float(t), self.fps)
|
344
|
-
if not self._is_free(t, (t + video.end)):
|
350
|
+
t: T = T.from_fps(t, self.fps)
|
351
|
+
if not self._is_free(t.truncated, t.next(1).truncated):
|
345
352
|
raise Exception('The video cannot be added at the "t" time moment, something blocks it.')
|
353
|
+
t = t.truncated
|
346
354
|
else:
|
347
355
|
t = self.end
|
348
356
|
|
{yta_video_opengl-0.0.12 → yta_video_opengl-0.0.14}/src/yta_video_opengl/complete/video_on_track.py
RENAMED
@@ -17,6 +17,7 @@ from yta_video_opengl.video import Video
|
|
17
17
|
from yta_validation.parameter import ParameterValidator
|
18
18
|
from av.video.frame import VideoFrame
|
19
19
|
from av.audio.frame import AudioFrame
|
20
|
+
from quicktions import Fraction
|
20
21
|
from typing import Union
|
21
22
|
|
22
23
|
|
@@ -28,7 +29,7 @@ class VideoOnTrack:
|
|
28
29
|
@property
|
29
30
|
def end(
|
30
31
|
self
|
31
|
-
) ->
|
32
|
+
) -> Fraction:
|
32
33
|
"""
|
33
34
|
The end time moment 't' of the video once
|
34
35
|
once its been placed on the track, which
|
@@ -42,17 +43,20 @@ class VideoOnTrack:
|
|
42
43
|
def __init__(
|
43
44
|
self,
|
44
45
|
video: Video,
|
45
|
-
start: float = 0.0
|
46
|
+
start: Union[int, float, Fraction] = 0.0
|
46
47
|
):
|
47
48
|
ParameterValidator.validate_mandatory_instance_of('video', video, Video)
|
48
|
-
|
49
|
+
# TODO: Now we need to accept 'Fraction',
|
50
|
+
# from 'fractions' or 'quicktions', as a
|
51
|
+
# number
|
52
|
+
#ParameterValidator.validate_mandatory_positive_number('start', start, do_include_zero = True)
|
49
53
|
|
50
54
|
self.video: Video = video
|
51
55
|
"""
|
52
56
|
The video source, with all its properties,
|
53
57
|
that is placed in the timeline.
|
54
58
|
"""
|
55
|
-
self.start:
|
59
|
+
self.start: Fraction = Fraction(start)
|
56
60
|
"""
|
57
61
|
The time moment in which the video should
|
58
62
|
start playing, within the timeline.
|
@@ -64,7 +68,7 @@ class VideoOnTrack:
|
|
64
68
|
|
65
69
|
def _get_video_t(
|
66
70
|
self,
|
67
|
-
t: float
|
71
|
+
t: Union[int, float, Fraction]
|
68
72
|
) -> float:
|
69
73
|
"""
|
70
74
|
The video 't' time moment for the given
|
@@ -72,52 +76,60 @@ class VideoOnTrack:
|
|
72
76
|
to use inside the video content to display
|
73
77
|
its frame.
|
74
78
|
"""
|
79
|
+
# TODO: Use 'T' here to be precise or the
|
80
|
+
# argument itself must be precise (?)
|
75
81
|
return t - self.start
|
76
82
|
|
77
83
|
def is_playing(
|
78
84
|
self,
|
79
|
-
t: float
|
85
|
+
t: Union[int, float, Fraction]
|
80
86
|
) -> bool:
|
81
87
|
"""
|
82
88
|
Check if this video is playing at the general
|
83
89
|
't' time moment, which is a global time moment
|
84
90
|
for the whole project.
|
85
91
|
"""
|
92
|
+
# TODO: Use 'T' here to be precise or the
|
93
|
+
# argument itself must be precise (?)
|
86
94
|
return self.start <= t < self.end
|
87
95
|
|
88
96
|
def get_frame_at(
|
89
97
|
self,
|
90
|
-
t: float
|
98
|
+
t: Union[int, float, Fraction]
|
91
99
|
) -> Union[VideoFrame, None]:
|
92
100
|
"""
|
93
101
|
Get the frame for the 't' time moment provided,
|
94
102
|
that could be None if the video is not playing
|
95
103
|
in that moment.
|
96
104
|
"""
|
105
|
+
# TODO: Use 'T' here to be precise or the
|
106
|
+
# argument itself must be precise (?)
|
97
107
|
return (
|
98
|
-
self.video.
|
108
|
+
self.video.get_frame_from_t(self._get_video_t(t))
|
99
109
|
if self.is_playing(t) else
|
100
110
|
None
|
101
111
|
)
|
102
112
|
|
103
113
|
def get_audio_frame_at(
|
104
114
|
self,
|
105
|
-
t: float
|
115
|
+
t: Union[int, float, Fraction]
|
106
116
|
) -> Union[AudioFrame, None]:
|
107
117
|
"""
|
108
118
|
Get the audio frame for the 't' time moment
|
109
119
|
provided, that could be None if the video
|
110
120
|
is not playing in that moment.
|
111
121
|
"""
|
122
|
+
# TODO: Use 'T' here to be precise or the
|
123
|
+
# argument itself must be precise (?)
|
112
124
|
return (
|
113
|
-
self.video.
|
125
|
+
self.video.get_audio_frame_from_t(self._get_video_t(t))
|
114
126
|
if self.is_playing(t) else
|
115
127
|
None
|
116
128
|
)
|
117
129
|
|
118
130
|
def get_audio_frames_at(
|
119
131
|
self,
|
120
|
-
t: float
|
132
|
+
t: Union[int, float, Fraction]
|
121
133
|
) -> Union[any, None]:
|
122
134
|
"""
|
123
135
|
Get the audio frames that must be played at
|
@@ -129,17 +141,16 @@ class VideoOnTrack:
|
|
129
141
|
frames found in that 't' time moment, or an
|
130
142
|
iterator if yes.
|
131
143
|
"""
|
144
|
+
# TODO: Use 'T' here to be precise or the
|
145
|
+
# argument itself must be precise (?)
|
132
146
|
frames = (
|
133
|
-
self.video.
|
147
|
+
self.video.get_audio_frames_from_t(self._get_video_t(t))
|
134
148
|
if self.is_playing(t) else
|
135
149
|
[]
|
136
150
|
)
|
137
151
|
|
138
152
|
for frame in frames:
|
139
|
-
|
140
|
-
# src\yta_video_opengl\reader\cache.py
|
141
|
-
# get_frames method... maybe remove it (?)
|
142
|
-
yield frame[0]
|
153
|
+
yield frame
|
143
154
|
|
144
155
|
# # TODO: This was a simple return before
|
145
156
|
# return (
|