yta-video-opengl 0.0.22__py3-none-any.whl → 0.0.24__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (33) hide show
  1. yta_video_opengl/editor.py +333 -0
  2. yta_video_opengl/nodes/__init__.py +32 -28
  3. yta_video_opengl/nodes/audio/__init__.py +164 -55
  4. yta_video_opengl/nodes/video/__init__.py +27 -1
  5. yta_video_opengl/nodes/video/{opengl.py → opengl/__init__.py} +8 -4
  6. yta_video_opengl/nodes/video/opengl/experimental.py +760 -0
  7. yta_video_opengl/tests.py +236 -358
  8. yta_video_opengl/utils.py +9 -421
  9. {yta_video_opengl-0.0.22.dist-info → yta_video_opengl-0.0.24.dist-info}/METADATA +2 -6
  10. yta_video_opengl-0.0.24.dist-info/RECORD +13 -0
  11. yta_video_opengl/audio.py +0 -219
  12. yta_video_opengl/classes.py +0 -1276
  13. yta_video_opengl/complete/__init__.py +0 -0
  14. yta_video_opengl/complete/frame_combinator.py +0 -204
  15. yta_video_opengl/complete/frame_generator.py +0 -319
  16. yta_video_opengl/complete/frame_wrapper.py +0 -135
  17. yta_video_opengl/complete/timeline.py +0 -571
  18. yta_video_opengl/complete/track/__init__.py +0 -500
  19. yta_video_opengl/complete/track/media/__init__.py +0 -222
  20. yta_video_opengl/complete/track/parts.py +0 -267
  21. yta_video_opengl/complete/track/utils.py +0 -78
  22. yta_video_opengl/media.py +0 -347
  23. yta_video_opengl/reader/__init__.py +0 -710
  24. yta_video_opengl/reader/cache/__init__.py +0 -253
  25. yta_video_opengl/reader/cache/audio.py +0 -195
  26. yta_video_opengl/reader/cache/utils.py +0 -48
  27. yta_video_opengl/reader/cache/video.py +0 -113
  28. yta_video_opengl/t.py +0 -233
  29. yta_video_opengl/video.py +0 -277
  30. yta_video_opengl/writer.py +0 -278
  31. yta_video_opengl-0.0.22.dist-info/RECORD +0 -31
  32. {yta_video_opengl-0.0.22.dist-info → yta_video_opengl-0.0.24.dist-info}/LICENSE +0 -0
  33. {yta_video_opengl-0.0.22.dist-info → yta_video_opengl-0.0.24.dist-info}/WHEEL +0 -0
yta_video_opengl/t.py DELETED
@@ -1,233 +0,0 @@
1
- """
2
- This is an example of what a video has:
3
- - fps = 60
4
- - time_base = 1 / 15360
5
- - tick = fps * time_base = 256
6
-
7
- So, the first pts is 0 and the second
8
- one is 256. The frame 16 will be 3840,
9
- that is 256 * 15 (because first index
10
- is 0).
11
- """
12
- from yta_validation.parameter import ParameterValidator
13
- from yta_validation import PythonValidator
14
- from yta_validation.number import NumberValidator
15
- from quicktions import Fraction
16
- from typing import Union
17
-
18
-
19
- class T:
20
- """
21
- Class to simplify the way we work with a
22
- 't' time moment but using the fractions
23
- library to be precise and avoid any issue
24
- related with commas.
25
-
26
- This class must be used when trying to
27
- apply a specific 't' time moment for a
28
- video or audio frame, using the fps or
29
- sample rate as time_base to be precise.
30
- """
31
-
32
- @property
33
- def truncated(
34
- self
35
- ) -> Fraction:
36
- """
37
- The 't' but as a Fraction that is multiple
38
- of the given 'time_base' and truncated.
39
- """
40
- return round_t(self._t, self.time_base)
41
-
42
- @property
43
- def rounded(
44
- self
45
- ) -> Fraction:
46
- """
47
- The 't' but as a Fraction that is multiple
48
- of the given 'time_base' and rounded (the
49
- value could be the same as truncated if it
50
- is closer to the previous value).
51
- """
52
- return round_t(self._t, self.time_base, do_truncate = False)
53
-
54
- @property
55
- def truncated_pts(
56
- self
57
- ) -> int:
58
- """
59
- The 'truncated' value but as a pts, which
60
- is the int value to be set in audio and
61
- video frames in the pyav library to be
62
- displayed in that moment.
63
- """
64
- return int(self.truncated / self.time_base)
65
-
66
- @property
67
- def rounded_pts(
68
- self
69
- ) -> int:
70
- """
71
- The 'rounded' value but as a pts, which
72
- is the int value to be set in audio and
73
- video frames in the pyav library to be
74
- displayed in that moment.
75
- """
76
- return int(self.rounded / self.time_base)
77
-
78
- def __init__(
79
- self,
80
- t: Union[int, float, Fraction],
81
- time_base: Fraction
82
- ):
83
- ParameterValidator.validate_mandatory_instance_of('t', t, [int, float, 'Fraction'])
84
- ParameterValidator.validate_mandatory_instance_of('time_base', time_base, 'Fraction')
85
-
86
- self._t: Union[int, float, Fraction] = t
87
- """
88
- The 't' time moment as it was passed as
89
- parameter.
90
- """
91
- self.time_base: Fraction = time_base
92
- """
93
- The time_base that will used to round the
94
- values to be multiples of it.
95
- """
96
-
97
- def next(
98
- self,
99
- n: int = 1
100
- ) -> 'T':
101
- """
102
- Get the value that is 'n' times ahead of
103
- the 'truncated' property of this instance.
104
-
105
- Useful when you need the next value for a
106
- range in an iteration or similar.
107
- """
108
- return T(self.truncated + n * self.time_base, self.time_base)
109
-
110
- def previous(
111
- self,
112
- n: int = 1
113
- ) -> 'T':
114
- """
115
- Get the value that is 'n' times before the
116
- 'truncated' property of this instance.
117
-
118
- Useful when you need the previous value to
119
- check if the current is the next one or
120
- similar.
121
-
122
- Be careful, if the 'truncated' value is 0
123
- this will give you an unexpected negative
124
- value.
125
- """
126
- return T(self.truncated - n * self.time_base, self.time_base)
127
-
128
- @staticmethod
129
- def from_fps(
130
- t: Union[int, float, Fraction],
131
- fps: Union[int, float, Fraction]
132
- ) -> 'T':
133
- """
134
- Get the instance but providing the 'fps'
135
- (or sample rate) value directly, that will
136
- be turned into a time base.
137
- """
138
- return T(t, fps_to_time_base(fps))
139
-
140
- @staticmethod
141
- def from_pts(
142
- pts: int,
143
- time_base: Fraction
144
- ) -> 'T':
145
- """
146
- Get the instance but providing the 'pts'
147
- and the 'time_base'.
148
- """
149
- return T(pts * time_base, time_base)
150
-
151
-
152
- # TODO: Careful with this below
153
- """
154
- To obtain the pts step, or frame duration in
155
- ticks, you need to apply 2 formulas that are
156
- different according to if the frame is video
157
- or audio:
158
- - Audio: .samples
159
- - Video: int(round((1 / .fps) / .time_base))
160
- """
161
-
162
- def get_ts(
163
- start: Union[int, float, Fraction],
164
- end: Union[int, float, Fraction],
165
- fps: Fraction
166
- ) -> list[Fraction]:
167
- """
168
- Get all the 't' time moments between the given
169
- 'start' and the given 'end', using the provided
170
- 'time_base' for precision.
171
-
172
- The 'end' is not included, we return a range
173
- [start, end) because the last frame is the
174
- start of another time range.
175
- """
176
- start = T.from_fps(start, fps).truncated
177
- end = T.from_fps(end, fps).truncated
178
-
179
- time_base = fps_to_time_base(fps)
180
- return [
181
- start + i * time_base
182
- for i in range((end - start) // time_base)
183
- ]
184
-
185
- def round_t(
186
- t: Union[int, float, Fraction],
187
- time_base = Fraction(1, 60),
188
- do_truncate: bool = True
189
- ):
190
- """
191
- Round the given 't' time moment to the most
192
- near multiple of the given 'time_base' (or
193
- the previous one if 'do_truncate' is True)
194
- using fractions module to be precise.
195
-
196
- This method is very useful to truncate 't'
197
- time moments in order to get the frames or
198
- samples for the specific and exact time
199
- moments according to their fps or sample
200
- rate (that should be passed as the
201
- 'time_base' parameter).
202
-
203
- Examples below, with `time_base = 1/5`:
204
- - `t = 0.25` => `0.2` (truncated or rounded)
205
- - `t = 0.35` => `0.2` (truncated)
206
- - `t = 0.45` => `0.4` (truncated or rounded)
207
- - `t = 0.55` => `0.6` (rounded)
208
- """
209
- t = Fraction(t).limit_denominator()
210
- steps = t / time_base
211
-
212
- snapped_steps = (
213
- steps.numerator // steps.denominator
214
- if do_truncate else
215
- round(steps) # round(float(steps))
216
- )
217
-
218
- return snapped_steps * time_base
219
-
220
- def fps_to_time_base(
221
- fps: Union[int, float, Fraction]
222
- ) -> Fraction:
223
- """
224
- Get the pyav time base from the given
225
- 'fps'.
226
- """
227
- return (
228
- Fraction(1, fps)
229
- if NumberValidator.is_int(fps) else
230
- Fraction(1, 1) / fps
231
- if PythonValidator.is_instance_of(fps, 'Fraction') else
232
- Fraction(1, 1) / Fraction.from_float(fps).limit_denominator(1000000) # if float
233
- )
yta_video_opengl/video.py DELETED
@@ -1,277 +0,0 @@
1
- from yta_video_opengl.reader import VideoReader
2
- from yta_video_opengl.writer import VideoWriter
3
- from yta_video_opengl.t import T
4
- from yta_validation import PythonValidator
5
- from quicktions import Fraction
6
- from typing import Union
7
-
8
-
9
- # TODO: Where can I obtain this dynamically (?)
10
- PIXEL_FORMAT = 'yuv420p'
11
-
12
- # TODO: Maybe create a _Media(ABC) to put
13
- # some code shared with the Audio class
14
- class Video:
15
- """
16
- Class to wrap the functionality related to
17
- handling and modifying a video.
18
- """
19
-
20
- @property
21
- def start_pts(
22
- self
23
- ) -> int:
24
- """
25
- The start packet time stamp (pts), needed
26
- to optimize the packet iteration process.
27
-
28
- This timestamp is used to read the video
29
- file source.
30
- """
31
- # TODO: What if 'time_base' is None (?)
32
- return T(self.start, self.reader.time_base).truncated_pts
33
-
34
- @property
35
- def end_pts(
36
- self
37
- ) -> Union[int, None]:
38
- """
39
- The end packet time stamp (pts), needed to
40
- optimize the packet iteration process.
41
-
42
- This timestamp is used to read the video
43
- file source.
44
- """
45
- return (
46
- # TODO: What if 'time_base' is None (?)
47
- T(self.end, self.reader.time_base).truncated_pts
48
- # TODO: What do we do if no duration (?)
49
- if self.duration is not None else
50
- None
51
- )
52
-
53
- @property
54
- def audio_start_pts(
55
- self
56
- ) -> int:
57
- """
58
- The start packet time stamp (pts), needed
59
- to optimize the packet iteration process.
60
- """
61
- # TODO: What if 'audio_time_base' is None (?)
62
- return T(self.start, self.reader.audio_time_base).truncated_pts
63
-
64
- @property
65
- def audio_end_pts(
66
- self
67
- ) -> Union[int, None]:
68
- """
69
- The end packet time stamp (pts), needed to
70
- optimize the packet iteration process.
71
- """
72
- return (
73
- # TODO: What if 'audio_time_base' is None (?)
74
- T(self.end, self.reader.audio_time_base).truncated_pts
75
- # TODO: What do we do if no duration (?)
76
- if self.duration is not None else
77
- None
78
- )
79
-
80
- @property
81
- def duration(
82
- self
83
- ) -> Fraction:
84
- """
85
- The duration of the video.
86
- """
87
- return self.end - self.start
88
-
89
- @property
90
- def number_of_frames(
91
- self
92
- ) -> Union[int, None]:
93
- """
94
- The number of frames of the video.
95
- """
96
- return self.reader.number_of_frames
97
-
98
- @property
99
- def frames(
100
- self
101
- ):
102
- """
103
- Iterator to yield all the frames, one by
104
- one, within the range defined by the
105
- 'start' and 'end' parameters provided when
106
- instantiating it.
107
-
108
- The iterator will iterate first over the
109
- video frames, and once finished over the
110
- audio frames.
111
- """
112
- for frame in self.reader.get_frames(self.start, self.end):
113
- yield frame
114
-
115
- for frame in self.reader.get_audio_frames(self.start, self.end):
116
- yield frame
117
-
118
- def __init__(
119
- self,
120
- filename: str,
121
- start: Union[int, float, Fraction] = 0.0,
122
- end: Union[int, float, Fraction, None] = None
123
- ):
124
- self.filename: str = filename
125
- """
126
- The filename of the original video.
127
- """
128
- # TODO: Detect the 'pixel_format' from the
129
- # extension (?)
130
- self.reader: VideoReader = VideoReader(self.filename)
131
- """
132
- The pyav video reader.
133
- """
134
- self.start: Fraction = Fraction(start)
135
- """
136
- The time moment 't' in which the video
137
- should start.
138
- """
139
- self.end: Union[Fraction, None] = Fraction(
140
- # TODO: Is this 'end' ok (?)
141
- self.reader.duration
142
- if end is None else
143
- end
144
- )
145
- """
146
- The time moment 't' in which the video
147
- should end.
148
- """
149
-
150
- # TODO: We need to implement the 'get_frame'
151
- # methods because this Video can be subclipped
152
- # and have a 'start' and' end' that are
153
- # different from [0, end)
154
- def _get_t(
155
- self,
156
- t: Union[int, float, Fraction]
157
- ) -> Fraction:
158
- """
159
- Get the real 't' time moment based on the
160
- video 'start' and 'end'. If they were
161
- asking for the t=0.5s but our video was
162
- subclipped to [1.0, 2.0), the 0.5s must be
163
- actually the 1.5s of the video because of
164
- the subclipped time range.
165
- """
166
- t += self.start
167
-
168
- print(f'Video real t is {str(float(t))}')
169
- if t >= self.end:
170
- raise Exception(f'The "t" ({str(t)}) provided is out of range. This video lasts from [{str(self.start)}, {str(self.end)}).')
171
-
172
- return t
173
-
174
- def get_frame_from_t(
175
- self,
176
- t: Union[int, float, Fraction]
177
- ) -> 'VideoFrame':
178
- """
179
- Get the video frame with the given 't' time
180
- moment, using the video cache system.
181
- """
182
- print(f'Getting frame from {str(float(t))} that is actually {str(float(self._get_t(t)))}')
183
- return self.reader.get_frame(self._get_t(t))
184
- #return self.reader.video_cache.get_frame(self._get_t(t))
185
-
186
- def get_audio_frame_from_t(
187
- self,
188
- t: Union[int, float, Fraction]
189
- ) -> 'AudioFrame':
190
- """
191
- Get the audio frame with the given 't' time
192
- moment, using the audio cache system. This
193
- method is useful when we need to combine
194
- many different frames so we can obtain them
195
- one by one.
196
-
197
- TODO: Is this actually necessary (?)
198
- """
199
- return self.reader.get_audio_frame_from_t(self._get_t(t))
200
-
201
- def get_audio_frames_from_t(
202
- self,
203
- t: Union[int, float, Fraction]
204
- ):
205
- """
206
- Get the sequence of audio frames for the
207
- given video 't' time moment, using the
208
- audio cache system.
209
-
210
- This is useful when we want to write a
211
- video frame with its audio, so we obtain
212
- all the audio frames associated to it
213
- (remember that a video frame is associated
214
- with more than 1 audio frame).
215
- """
216
- print(f'Getting audio frames from {str(float(t))} that is actually {str(float(self._get_t(t)))}')
217
- for frame in self.reader.get_audio_frames_from_t(self._get_t(t)):
218
- yield frame
219
-
220
- def save_as(
221
- self,
222
- filename: str
223
- ) -> 'Video':
224
- """
225
- Save the video locally as the given 'filename'.
226
-
227
- TODO: By now we are doing tests inside so the
228
- functionality is a manual test. Use it
229
- carefully.
230
- """
231
- writer = VideoWriter(filename)
232
- writer.set_video_stream_from_template(self.reader.video_stream)
233
- writer.set_audio_stream_from_template(self.reader.audio_stream)
234
-
235
- from yta_video_opengl.nodes.audio import VolumeAudioNode
236
- # Audio from 0 to 1
237
- # TODO: This effect 'fn' is shitty
238
- def fade_in_fn(t, index, start=0.5, end=1.0):
239
- if t < start or t > end:
240
- # fuera de la franja: no tocar nada → volumen original (1.0)
241
- progress = 1.0
242
- else:
243
- # dentro de la franja: interpolar linealmente entre 0 → 1
244
- progress = (t - start) / (end - start)
245
-
246
- return progress
247
-
248
- #fade_in = SetVolumeAudioNode(lambda t, i: min(1, t / self.duration))
249
- fade_in = VolumeAudioNode(lambda t, i: fade_in_fn(t, i, 0.5, 1.0))
250
-
251
- for frame, t, index in self.frames:
252
- if PythonValidator.is_instance_of(frame, 'VideoFrame'):
253
- print(f'Saving video frame {str(index)}, with t = {str(t)}')
254
-
255
- # TODO: Process any video frame change
256
-
257
- writer.mux_video_frame(
258
- frame = frame
259
- )
260
- else:
261
- print(f'Saving audio frame {str(index)} ({str(round(float(t * self.reader.fps), 2))}), with t = {str(t)}')
262
-
263
- # TODO: Process any audio frame change
264
- # Test setting audio
265
- frame = fade_in.process(frame, t)
266
-
267
- writer.mux_audio_frame(
268
- frame = frame
269
- )
270
-
271
- # Flush the remaining frames to write
272
- writer.mux_audio_frame(None)
273
- writer.mux_video_frame(None)
274
-
275
- # TODO: Maybe move this to the '__del__' (?)
276
- writer.output.close()
277
- self.reader.container.close()