yta-video-opengl 0.0.23__py3-none-any.whl → 0.0.25__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -2,10 +2,9 @@
2
2
  Module to include video handling with OpenGL.
3
3
  """
4
4
  def main():
5
- # from yta_video_opengl.tests import video_modified_stored
5
+ from yta_video_opengl.tests import video_modified_stored
6
6
 
7
- # video_modified_stored()
8
- print('main')
7
+ video_modified_stored()
9
8
 
10
9
 
11
10
  if __name__ == '__main__':
@@ -0,0 +1,382 @@
1
+ from yta_video_opengl.nodes.video.opengl import WavingNode
2
+ from yta_video_opengl.nodes.audio import ChorusNode
3
+ from yta_video_opengl.nodes import TimedNode
4
+ from yta_validation.parameter import ParameterValidator
5
+ from yta_programming.decorators import singleton_old
6
+ from typing import Union
7
+
8
+ import moderngl
9
+
10
+
11
+ class _AudioEffects:
12
+ """
13
+ *For internal use only*
14
+
15
+ The audio effects that will be available
16
+ throught our internal _Effects class to
17
+ wrap and make available all the audio
18
+ effects we want to be available.
19
+ """
20
+
21
+ def __init__(
22
+ self,
23
+ effects: 'Effects'
24
+ ):
25
+ self._effects: Effects = effects
26
+ """
27
+ The parent instance that includes this
28
+ class instance as a property.
29
+ """
30
+
31
+ """
32
+ Here below we expose all the effects
33
+ we want the users to have available to
34
+ be used.
35
+ """
36
+ def chorus(
37
+ self,
38
+ sample_rate: int,
39
+ depth: int = 0,
40
+ frequency: float = 0.25,
41
+ start: Union[int, float, 'Fraction'] = 0,
42
+ end: Union[int, float, 'Fraction', None] = None
43
+ ):
44
+ return _create_node(
45
+ ChorusNode(
46
+ sample_rate = sample_rate,
47
+ depth = depth,
48
+ frequency = frequency
49
+ ),
50
+ start = start,
51
+ end = end
52
+ )
53
+
54
+ # TODO: Include definitive and tested audio
55
+ # effects here below
56
+
57
+ class _VideoEffects:
58
+ """
59
+ *For internal use only*
60
+
61
+ The video effects that will be available
62
+ throught our internal _Effects class to
63
+ wrap and make available all the video
64
+ effects we want to be available.
65
+ """
66
+
67
+ def __init__(
68
+ self,
69
+ effects: 'Effects'
70
+ ):
71
+ self._effects: Effects = effects
72
+ """
73
+ The parent instance that includes this
74
+ class instance as a property.
75
+ """
76
+
77
+ """
78
+ Here below we expose all the effects
79
+ we want the users to have available to
80
+ be used.
81
+ """
82
+ def waving_node(
83
+ self,
84
+ # TODO: Maybe 'frame_size' (?)
85
+ size: tuple[int, int],
86
+ amplitude: float = 0.05,
87
+ frequency: float = 10.0,
88
+ speed: float = 2.0,
89
+ start: Union[int, float, 'Fraction'] = 0.0,
90
+ end: Union[int, float, 'Fraction', None] = None
91
+ ) -> 'TimedNode':
92
+ """
93
+ TODO: Explain this better.
94
+
95
+ The 'start' and 'end' time moments are the
96
+ limits of the time range in which the effect
97
+ has to be applied to the frames inside that
98
+ time range. Providing start=0 and end=None
99
+ will make the effect to be applied to any
100
+ frame.
101
+ """
102
+ return _create_node(
103
+ WavingNode(
104
+ context = self._effects.opengl_context,
105
+ size = size,
106
+ amplitude = amplitude,
107
+ frequency = frequency,
108
+ speed = speed
109
+ ),
110
+ start = start,
111
+ end = end
112
+ )
113
+
114
+ # TODO: Include definitive and tested video
115
+ # effects here below
116
+
117
+ @singleton_old
118
+ class Effects:
119
+ """
120
+ Singleton instance.
121
+
122
+ It is a singleton instance to have a
123
+ unique context for all the instances
124
+ that need it and instantiate this
125
+ class to obtain it. Here we group all
126
+ the nodes we have available for the
127
+ user.
128
+
129
+ This class is to simplify the access to
130
+ the effect nodes and also to have the
131
+ single context always available.
132
+
133
+ Even though we can have more effects,
134
+ this class is also the way we expose only
135
+ the ones we actually want to expose to
136
+ the user.
137
+
138
+ The GPU will make the calculations in
139
+ parallel by itself, so we can handle a
140
+ single context to make the nodes share
141
+ textures and buffers.
142
+ """
143
+
144
+ def __init__(
145
+ self
146
+ ):
147
+ self.opengl_context = moderngl.create_context(standalone = True)
148
+ """
149
+ The opengl context that will be shared
150
+ by all the opengl nodes.
151
+ """
152
+ self.audio: _AudioEffects = _AudioEffects(self)
153
+ """
154
+ Shortcut to the audio effects that are
155
+ available.
156
+ """
157
+ self.video: _VideoEffects = _VideoEffects(self)
158
+ """
159
+ Shortcut to the video effects that are
160
+ available.
161
+ """
162
+
163
+ def _create_node(
164
+ node: Union['_AudioNode', '_VideoNode'],
165
+ start: Union[int, float, 'Fraction'],
166
+ end: Union[int, float, 'Fraction', None]
167
+ ):
168
+ # We could be other classes in the middle,
169
+ # because an OpenglNode inherits from
170
+ # other class
171
+ ParameterValidator.validate_mandatory_subclass_of('node', node, ['_AudioNode', '_VideoNode', 'OpenglNodeBase'])
172
+
173
+ # We have to create a node wrapper with the
174
+ # time range in which it has to be applied
175
+ # to all the frames.
176
+ return TimedNode(
177
+ node = node,
178
+ start = start,
179
+ end = end
180
+ )
181
+
182
+ class _EffectStacked:
183
+ """
184
+ Class to wrap an effect that will be
185
+ stacked with an specific priority.
186
+
187
+ Priority is higher when lower value,
188
+ and lower when higher value.
189
+ """
190
+
191
+ @property
192
+ def is_audio_effect(
193
+ self
194
+ ) -> bool:
195
+ """
196
+ Flag to indicate if it is an audio effect
197
+ or not.
198
+ """
199
+ return self.effect.is_audio_node
200
+
201
+ @property
202
+ def is_video_effect(
203
+ self
204
+ ) -> bool:
205
+ """
206
+ Flag to indicate if it is a video effect
207
+ or not.
208
+ """
209
+ return self.effect.is_video_node
210
+
211
+ def __init__(
212
+ self,
213
+ effect: TimedNode,
214
+ priority: int
215
+ ):
216
+ self.effect: TimedNode = effect
217
+ """
218
+ The effect to be applied.
219
+ """
220
+ self.priority: int = priority
221
+ """
222
+ The priority this stacked frame has versus
223
+ the other stacked effects.
224
+ """
225
+
226
+ # TODO: Move to another py file (?)
227
+ class EffectsStack:
228
+ """
229
+ Class to include a collection of effects
230
+ we want to apply in some entity, that
231
+ will make easier to apply them.
232
+
233
+ You can use this stack to keep the effects
234
+ you want to apply on a Media or on the
235
+ Timeline of your video editor.
236
+ """
237
+
238
+ @property
239
+ def video_effects(
240
+ self
241
+ ) -> list[_EffectStacked]:
242
+ """
243
+ The video effects but ordered by 'priority'
244
+ and 'start' time moment.
245
+ """
246
+ return sorted(
247
+ [
248
+ effect
249
+ for effect in self._effects
250
+ if effect.is_video_effect
251
+ ],
252
+ key = lambda effect: (effect.priority, effect.effect.start)
253
+ )
254
+
255
+ @property
256
+ def audio_effects(
257
+ self
258
+ ) -> list[_EffectStacked]:
259
+ """
260
+ The audio effects but ordered by 'priority'
261
+ and 'start' time moment.
262
+ """
263
+ return sorted(
264
+ [
265
+ effect
266
+ for effect in self._effects
267
+ if effect.is_audio_effect
268
+ ],
269
+ key = lambda effect: (effect.priority, effect.effect.start)
270
+ )
271
+
272
+ @property
273
+ def lowest_audio_priority(
274
+ self
275
+ ) -> int:
276
+ """
277
+ The priority of the audio effect with the
278
+ lowest one, or 0 if no audio effects.
279
+ """
280
+ return min(
281
+ self.audio_effects,
282
+ key = lambda effect: effect.priority,
283
+ default = 0
284
+ )
285
+
286
+ @property
287
+ def lowest_video_priority(
288
+ self
289
+ ) -> int:
290
+ """
291
+ The priority of the video effect with the
292
+ lowest one, or 0 if no video effects.
293
+ """
294
+ return min(
295
+ self.video_effects,
296
+ key = lambda effect: effect.priority,
297
+ default = 0
298
+ )
299
+
300
+ def __init__(
301
+ self
302
+ ):
303
+ self._effects: list[_EffectStacked] = []
304
+ """
305
+ A list containing all the effects that
306
+ have been added to this stack, unordered.
307
+ """
308
+
309
+ def get_audio_effects_for_t(
310
+ self,
311
+ t: Union[int, float, 'Fraction']
312
+ ) -> list[TimedNode]:
313
+ """
314
+ Get the audio effects, ordered by priority
315
+ and the 'start' field, that must be applied
316
+ within the 't' time moment provided because
317
+ it is within the [start, end) time range.
318
+ """
319
+ return [
320
+ effect.effect
321
+ for effect in self.audio_effects
322
+ if effect.effect.is_within_time(t)
323
+ ]
324
+
325
+ def get_video_effects_for_t(
326
+ self,
327
+ t: Union[int, float, 'Fraction']
328
+ ) -> list[TimedNode]:
329
+ """
330
+ Get the video effects, ordered by priority
331
+ and the 'start' field, that must be applied
332
+ within the 't' time moment provided because
333
+ it is within the [start, end) time range.
334
+ """
335
+ return [
336
+ effect.effect
337
+ for effect in self.video_effects
338
+ if effect.effect.is_within_time(t)
339
+ ]
340
+
341
+ def add_effect(
342
+ self,
343
+ effect: TimedNode,
344
+ priority: Union[int, None] = None
345
+ ) -> 'EffectsStack':
346
+ """
347
+ Add the provided 'effect' to the stack.
348
+ """
349
+ ParameterValidator.validate_mandatory_instance_of('effect', effect, TimedNode)
350
+ ParameterValidator.validate_positive_int('priority', priority, do_include_zero = True)
351
+
352
+ # TODO: What about the same effect added
353
+ # twice during the same time range? Can we
354
+ # allow it? It will be applied twice for
355
+ # specific 't' time moments but with
356
+ # different attributes. is it ok (?)
357
+
358
+ # TODO: What if priority is already taken?
359
+ # Should we let some effects have the same
360
+ # priority (?)
361
+ priority = (
362
+ self.lowest_audio_priority + 1
363
+ if (
364
+ priority is None and
365
+ effect.is_audio_node
366
+ ) else
367
+ self.lowest_video_priority + 1
368
+ if (
369
+ priority is None and
370
+ effect.is_video_node
371
+ ) else
372
+ priority
373
+ )
374
+
375
+ self._effects.append(_EffectStacked(
376
+ effect = effect,
377
+ priority = priority
378
+ ))
379
+
380
+ return self
381
+
382
+ # TODO: Create 'remove_effect'
@@ -1,31 +1,12 @@
1
+ from yta_video_opengl.nodes.video import _VideoNode
2
+ from yta_video_opengl.nodes.audio import _AudioNode
1
3
  from yta_validation.parameter import ParameterValidator
4
+ from yta_validation import PythonValidator
2
5
  from typing import Union
3
- from abc import ABC, abstractmethod
4
6
 
5
7
  import moderngl
6
8
 
7
9
 
8
- class Node(ABC):
9
- """
10
- Base class to represent a node, which
11
- is an entity that processes frames
12
- individually.
13
-
14
- This class must be inherited by any
15
- video or audio node class.
16
- """
17
-
18
- # TODO: What about the types?
19
- # TODO: Should we expect pyav frames (?)
20
- @abstractmethod
21
- def process(
22
- frame: Union['VideoFrame', 'AudioFrame', moderngl.Texture],
23
- t: float
24
- # TODO: Maybe we need 'fps' and 'number_of_frames'
25
- # to calculate progressions or similar...
26
- ) -> Union['VideoFrame', 'AudioFrame', moderngl.Texture]:
27
- pass
28
-
29
10
  class TimedNode:
30
11
  """
31
12
  Class to represent a Node wrapper to
@@ -52,11 +33,35 @@ class TimedNode:
52
33
  The 'start' and 'end' values by default
53
34
  """
54
35
 
36
+ @property
37
+ def is_audio_node(
38
+ self
39
+ ) -> bool:
40
+ """
41
+ Flag to indicate if the node (or effect)
42
+ is an audio effect or not.
43
+ """
44
+ # TODO: Is this checking not only the
45
+ # first level (?)
46
+ return PythonValidator.is_subclass_of(self.node, _AudioNode)
47
+
48
+ @property
49
+ def is_video_node(
50
+ self
51
+ ) -> bool:
52
+ """
53
+ Flag to indicate if the node (or effect)
54
+ is a video effect or not.
55
+ """
56
+ # TODO: Is this checking not only the
57
+ # first level (?)
58
+ return PythonValidator.is_subclass_of(self.node, _VideoNode)
59
+
55
60
  def __init__(
56
61
  self,
57
- node: Node,
58
- start: float = 0.0,
59
- end: Union[float, None] = None
62
+ node: Union[_VideoNode, _AudioNode],
63
+ start: Union[int, float, 'Fraction'] = 0.0,
64
+ end: Union[int, float, 'Fraction', None] = None
60
65
  ):
61
66
  ParameterValidator.validate_mandatory_positive_number('start', start, do_include_zero = True)
62
67
  ParameterValidator.validate_positive_number('end', end, do_include_zero = False)
@@ -67,7 +72,7 @@ class TimedNode:
67
72
  ):
68
73
  raise Exception('The "end" parameter provided must be greater or equal to the "start" parameter.')
69
74
 
70
- self.node: Node = node
75
+ self.node: Union[_VideoNode, _AudioNode] = node
71
76
  """
72
77
  The node we are wrapping and we want to
73
78
  apply as a modification of the frame in
@@ -84,6 +89,28 @@ class TimedNode:
84
89
  stop being applied (excluding it).
85
90
  """
86
91
 
92
+ def _get_t(
93
+ self,
94
+ t: Union[int, float, 'Fraction']
95
+ ) -> float:
96
+ """
97
+ Obtain the 't' time moment relative to the
98
+ effect duration.
99
+
100
+ Imagine `start=3` and `end=5`, and we receive
101
+ a `t=4`. It is inside the range, so we have
102
+ to apply the effect, but as the effect
103
+ lasts from second 3 to second 5 (`duration=2`),
104
+ the `t=4` is actually a `t=1` for the effect
105
+ because it is the time elapsed since the
106
+ effect started being applied, that was on the
107
+ second 3.
108
+
109
+ The formula:
110
+ - `t - self.start`
111
+ """
112
+ return t - self.start
113
+
87
114
  def is_within_time(
88
115
  self,
89
116
  t: float
@@ -91,7 +118,9 @@ class TimedNode:
91
118
  """
92
119
  Flag to indicate if the 't' time moment provided
93
120
  is in the range of this TimedNode instance,
94
- which means that it fits this condition:
121
+ which means between the 'start' and the 'end'.
122
+
123
+ The formula:
95
124
  - `start <= t < end`
96
125
  """
97
126
  return (
@@ -113,7 +142,7 @@ class TimedNode:
113
142
  instance.
114
143
  """
115
144
  return (
116
- self.node.process(frame, t)
145
+ self.node.process(frame, self._get_t(t))
117
146
  if self.is_within_time(t) else
118
147
  frame
119
148
  )