videopython 0.1.2__tar.gz → 0.1.4__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of videopython might be problematic. Click here for more details.

Files changed (39) hide show
  1. {videopython-0.1.2 → videopython-0.1.4}/PKG-INFO +43 -35
  2. videopython-0.1.4/README.md +53 -0
  3. {videopython-0.1.2 → videopython-0.1.4}/pyproject.toml +15 -16
  4. videopython-0.1.4/requirements-dev.txt +7 -0
  5. videopython-0.1.4/requirements-generation.txt +4 -0
  6. videopython-0.1.4/requirements.txt +6 -0
  7. videopython-0.1.4/src/videopython/base/effects.py +183 -0
  8. videopython-0.1.4/src/videopython/base/transforms.py +178 -0
  9. {videopython-0.1.2 → videopython-0.1.4}/src/videopython/base/transitions.py +43 -5
  10. {videopython-0.1.2 → videopython-0.1.4}/src/videopython/base/video.py +39 -60
  11. videopython-0.1.4/src/videopython/generation/__init__.py +10 -0
  12. videopython-0.1.4/src/videopython/generation/audio.py +22 -0
  13. videopython-0.1.4/src/videopython/generation/image.py +22 -0
  14. videopython-0.1.4/src/videopython/generation/video.py +45 -0
  15. videopython-0.1.4/src/videopython/utils/common.py +31 -0
  16. videopython-0.1.4/src/videopython/utils/image.py +275 -0
  17. {videopython-0.1.2 → videopython-0.1.4}/src/videopython.egg-info/PKG-INFO +43 -35
  18. {videopython-0.1.2 → videopython-0.1.4}/src/videopython.egg-info/SOURCES.txt +11 -3
  19. videopython-0.1.4/src/videopython.egg-info/requires.txt +21 -0
  20. videopython-0.1.4/tests/test_effects.py +71 -0
  21. {videopython-0.1.2 → videopython-0.1.4}/tests/test_transforms.py +1 -1
  22. {videopython-0.1.2 → videopython-0.1.4}/tests/test_transitions.py +11 -1
  23. videopython-0.1.4/tests/test_utils.py +11 -0
  24. videopython-0.1.2/README.md +0 -54
  25. videopython-0.1.2/src/videopython/base/transforms.py +0 -94
  26. videopython-0.1.2/src/videopython/generation/openai/text_to_speech.py +0 -31
  27. videopython-0.1.2/src/videopython/generation/stability/text_to_image.py +0 -77
  28. videopython-0.1.2/src/videopython/utils/__init__.py +0 -0
  29. videopython-0.1.2/src/videopython/utils/common.py +0 -20
  30. videopython-0.1.2/src/videopython.egg-info/requires.txt +0 -10
  31. {videopython-0.1.2 → videopython-0.1.4}/LICENSE +0 -0
  32. {videopython-0.1.2 → videopython-0.1.4}/setup.cfg +0 -0
  33. {videopython-0.1.2/src/videopython → videopython-0.1.4/src/videopython/base}/__init__.py +0 -0
  34. {videopython-0.1.2 → videopython-0.1.4}/src/videopython/base/compose.py +0 -0
  35. {videopython-0.1.2/src/videopython/base → videopython-0.1.4/src/videopython/utils}/__init__.py +0 -0
  36. {videopython-0.1.2 → videopython-0.1.4}/src/videopython.egg-info/dependency_links.txt +0 -0
  37. {videopython-0.1.2 → videopython-0.1.4}/src/videopython.egg-info/top_level.txt +0 -0
  38. {videopython-0.1.2 → videopython-0.1.4}/tests/test_compose.py +0 -0
  39. {videopython-0.1.2 → videopython-0.1.4}/tests/test_video.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: videopython
3
- Version: 0.1.2
3
+ Version: 0.1.4
4
4
  Summary: Minimal video generation and processing library.
5
5
  Author-email: Bartosz Wójtowicz <bartoszwojtowicz@outlook.com>, Bartosz Rudnikowicz <bartoszrudnikowicz840@gmail.com>, Piotr Pukisz <piotr.pukisz@gmail.com>
6
6
  License: Apache License
@@ -199,7 +199,7 @@ License: Apache License
199
199
  Project-URL: Homepage, https://github.com/bartwojtowicz/videopython/
200
200
  Project-URL: Bug Reports, https://github.com/bartwojtowicz/videopython/issues
201
201
  Project-URL: Source, https://github.com/bartwojtowicz/videopython/
202
- Keywords: videopython,video,movie,opencv,generation,editing
202
+ Keywords: python,videopython,video,movie,opencv,generation,editing
203
203
  Classifier: License :: OSI Approved :: Apache Software License
204
204
  Classifier: Programming Language :: Python :: 3
205
205
  Classifier: Programming Language :: Python :: 3.10
@@ -210,14 +210,23 @@ Description-Content-Type: text/markdown
210
210
  License-File: LICENSE
211
211
  Requires-Dist: click>=8.1.7
212
212
  Requires-Dist: numpy>=1.25.2
213
- Requires-Dist: opencv-python>=4.7.0.68
214
- Requires-Dist: pytest>=7.4.0
215
- Requires-Dist: transformers>=4.36.0
216
- Requires-Dist: diffusers>=0.21.4
217
- Requires-Dist: torch>=2.1.0
218
- Requires-Dist: stability-sdk>=0.8.4
219
- Requires-Dist: openai==1.3.5
213
+ Requires-Dist: opencv-python>=4.9.0.80
214
+ Requires-Dist: pillow>=10.3.0
220
215
  Requires-Dist: pydub>=0.25.1
216
+ Requires-Dist: tqdm>=4.66.3
217
+ Provides-Extra: dev
218
+ Requires-Dist: black==24.3.0; extra == "dev"
219
+ Requires-Dist: isort==5.12.0; extra == "dev"
220
+ Requires-Dist: mypy==1.8.0; extra == "dev"
221
+ Requires-Dist: pytest==7.4.0; extra == "dev"
222
+ Requires-Dist: types-Pillow==10.2.0.20240213; extra == "dev"
223
+ Requires-Dist: types-tqdm==4.66.0.20240106; extra == "dev"
224
+ Requires-Dist: pydub-stubs==0.25.1.1; extra == "dev"
225
+ Provides-Extra: generation
226
+ Requires-Dist: accelerate>=0.29.2; extra == "generation"
227
+ Requires-Dist: diffusers>=0.26.3; extra == "generation"
228
+ Requires-Dist: torch>=2.1.0; extra == "generation"
229
+ Requires-Dist: transformers>=4.38.1; extra == "generation"
221
230
 
222
231
  # About
223
232
 
@@ -235,41 +244,40 @@ sudo apt-get install ffmpeg
235
244
 
236
245
  ### Install with pip
237
246
  ```bash
238
- pip install videopython
247
+ pip install videopython[generation]
239
248
  ```
249
+ > You can install without `[generation]` dependencies for basic video handling and processing.
250
+ > The funcionalities found in `videopython.generation` won't work.
240
251
 
241
252
  ## Basic Usage
253
+ > Using Nvidia A40 or better is recommended for the `videopython.generation` module.
242
254
 
243
255
  ```python
244
- from videopython.base.video import Video
245
- from videopython.base.transitions import FadeTransition
256
+ # Generate image and animate it
257
+ from videopython.generation import ImageToVideo
258
+ from videopython.generation import TextToImage
246
259
 
247
- # Load video
248
- video = Video.from_path("tests/test_data/fast_benchmark.mp4")
249
- print(video.metadata)
250
- print(video.frames.shape) # Video is based on numpy representation of frames
260
+ image = TextToImage().generate_image(prompt="Golden Retriever playing in the park")
261
+ video = ImageToVideo().generate_video(image=image, fps=24)
251
262
 
252
- # Generate videos
253
- video1 = Video.from_prompt("Dogs playing in the snow.")
254
- video2 = Video.from_prompt("Dogs going back home.")
263
+ # Video generation directly from prompt
264
+ from videopython.generation import TextToVideo
265
+ video_gen = TextToVideo()
266
+ video = video_gen.generate_video("Dogs playing in the snow")
267
+ for _ in range(10):
268
+ video += video_gen.generate_video("Dogs playing in the snow")
255
269
 
256
- # Add videos
257
- combined_video = video1 + video2
258
- print(combined_video.metadata)
270
+ # Cut the first 2 seconds
271
+ from videopython.base.transforms import CutSeconds
272
+ transformed_video = CutSeconds(start_second=0, end_second=2).apply(video.copy())
259
273
 
260
- # Apply fade transition between videos
261
- fade = FadeTransition(0.5) # 0.5s effect time
262
- faded_video = fade.apply(videos=(video1, video2))
263
- print(faded_video.metadata)
274
+ # Upsample to 30 FPS
275
+ from videopython.base.transforms import ResampleFPS
276
+ transformed_video = ResampleFPS(new_fps=30).apply(transformed_video)
264
277
 
265
- # Add audio from file
266
- faded_video.add_audio_from_file("tests/test_data/test_audio.mp3")
278
+ # Resize to 1000x1000
279
+ from videopython.base.transforms import Resize
280
+ transformed_video = Resize(width=1000, height=1000).apply(transformed_video)
267
281
 
268
- # Save to a file
269
- faded_video.save("my_video.mp4")
270
- ```
271
-
272
- ### Running Unit Tests
273
- ```bash
274
- PYTHONPATH=./src/ pytest
282
+ filepath = transformed_video.save()
275
283
  ```
@@ -0,0 +1,53 @@
1
+ # About
2
+
3
+ Minimal video generation and processing library.
4
+
5
+ ## Setup
6
+
7
+ ### Install ffmpeg
8
+ ```bash
9
+ # Install with brew for MacOS:
10
+ brew install ffmpeg
11
+ # Install with apt-get for Ubuntu:
12
+ sudo apt-get install ffmpeg
13
+ ```
14
+
15
+ ### Install with pip
16
+ ```bash
17
+ pip install videopython[generation]
18
+ ```
19
+ > You can install without `[generation]` dependencies for basic video handling and processing.
20
+ > The funcionalities found in `videopython.generation` won't work.
21
+
22
+ ## Basic Usage
23
+ > Using Nvidia A40 or better is recommended for the `videopython.generation` module.
24
+
25
+ ```python
26
+ # Generate image and animate it
27
+ from videopython.generation import ImageToVideo
28
+ from videopython.generation import TextToImage
29
+
30
+ image = TextToImage().generate_image(prompt="Golden Retriever playing in the park")
31
+ video = ImageToVideo().generate_video(image=image, fps=24)
32
+
33
+ # Video generation directly from prompt
34
+ from videopython.generation import TextToVideo
35
+ video_gen = TextToVideo()
36
+ video = video_gen.generate_video("Dogs playing in the snow")
37
+ for _ in range(10):
38
+ video += video_gen.generate_video("Dogs playing in the snow")
39
+
40
+ # Cut the first 2 seconds
41
+ from videopython.base.transforms import CutSeconds
42
+ transformed_video = CutSeconds(start_second=0, end_second=2).apply(video.copy())
43
+
44
+ # Upsample to 30 FPS
45
+ from videopython.base.transforms import ResampleFPS
46
+ transformed_video = ResampleFPS(new_fps=30).apply(transformed_video)
47
+
48
+ # Resize to 1000x1000
49
+ from videopython.base.transforms import Resize
50
+ transformed_video = Resize(width=1000, height=1000).apply(transformed_video)
51
+
52
+ filepath = transformed_video.save()
53
+ ```
@@ -1,15 +1,23 @@
1
1
  [build-system]
2
- requires = ["setuptools>=61.0"]
2
+ requires = ["setuptools>=66.1"]
3
3
  build-backend = "setuptools.build_meta"
4
4
 
5
+ [tool.setuptools.packages.find]
6
+ where = ["src"]
7
+ include = ["videopython.*"]
8
+
9
+ [tool.setuptools.package-data]
10
+ "videopython" = ["py.typed"]
11
+
5
12
  [project]
6
13
  name = "videopython"
7
- version = "0.1.2"
14
+ version = "0.1.4"
8
15
  description = "Minimal video generation and processing library."
9
16
  readme = "README.md"
10
17
  requires-python = ">=3.10"
11
18
  license = {file = "LICENSE"}
12
- keywords = ["videopython", "video", "movie", "opencv", "generation", "editing"]
19
+ keywords = ["python", "videopython", "video", "movie", "opencv", "generation", "editing"]
20
+ dynamic = ["dependencies", "optional-dependencies"]
13
21
 
14
22
  authors = [
15
23
  {name = "Bartosz Wójtowicz", email = "bartoszwojtowicz@outlook.com" },
@@ -25,20 +33,11 @@ classifiers = [
25
33
  "Operating System :: OS Independent",
26
34
  ]
27
35
 
28
- dependencies = [
29
- "click>=8.1.7",
30
- "numpy>=1.25.2",
31
- "opencv-python>=4.7.0.68",
32
- "pytest>=7.4.0",
33
- "transformers>=4.36.0",
34
- "diffusers>=0.21.4",
35
- "torch>=2.1.0",
36
- "stability-sdk>=0.8.4",
37
- "openai==1.3.5",
38
- "pydub>=0.25.1"
39
- ]
36
+ [tool.setuptools.dynamic]
37
+ dependencies = {file = ["requirements.txt"]}
38
+ optional-dependencies = { dev = {file = ["requirements-dev.txt"]}, generation = {file = ["requirements-generation.txt"]} }
40
39
 
41
- [project.urls] # Optional
40
+ [project.urls]
42
41
  "Homepage" = "https://github.com/bartwojtowicz/videopython/"
43
42
  "Bug Reports" = "https://github.com/bartwojtowicz/videopython/issues"
44
43
  "Source" = "https://github.com/bartwojtowicz/videopython/"
@@ -0,0 +1,7 @@
1
+ black==24.3.0
2
+ isort==5.12.0
3
+ mypy==1.8.0
4
+ pytest==7.4.0
5
+ types-Pillow==10.2.0.20240213
6
+ types-tqdm==4.66.0.20240106
7
+ pydub-stubs==0.25.1.1
@@ -0,0 +1,4 @@
1
+ accelerate>=0.29.2
2
+ diffusers>=0.26.3
3
+ torch>=2.1.0
4
+ transformers>=4.38.1
@@ -0,0 +1,6 @@
1
+ click>=8.1.7
2
+ numpy>=1.25.2
3
+ opencv-python>=4.9.0.80
4
+ pillow>=10.3.0
5
+ pydub>=0.25.1
6
+ tqdm>=4.66.3
@@ -0,0 +1,183 @@
1
+ from abc import ABC, abstractmethod
2
+ from typing import Literal, final
3
+
4
+ import cv2
5
+ import numpy as np
6
+ from PIL import Image
7
+ from tqdm import tqdm
8
+
9
+ from videopython.base.video import Video
10
+
11
+
12
+ class Effect(ABC):
13
+ """Abstract class for effect on frames of video.
14
+
15
+ The effect must not change the number of frames and the shape of the frames.
16
+ """
17
+
18
+ @final
19
+ def apply(self, video: Video, start: float | None = None, stop: float | None = None) -> Video:
20
+ original_shape = video.video_shape
21
+ start = start if start is not None else 0
22
+ stop = stop if stop is not None else video.total_seconds
23
+ # Check for start and stop correctness
24
+ if not 0 <= start <= video.total_seconds:
25
+ raise ValueError(f"Video is only {video.total_seconds} long, but passed start: {start}!")
26
+ elif not start <= stop <= video.total_seconds:
27
+ raise ValueError(f"Video is only {video.total_seconds} long, but passed stop: {stop}!")
28
+ # Apply effect on video slice
29
+ effect_start_frame = round(start * video.fps)
30
+ effect_end_frame = round(stop * video.fps)
31
+ video_with_effect = self._apply(video[effect_start_frame:effect_end_frame])
32
+ old_audio = video.audio
33
+ video = Video.from_frames(
34
+ np.r_[
35
+ "0,2",
36
+ video.frames[:effect_start_frame],
37
+ video_with_effect.frames,
38
+ video.frames[effect_end_frame:],
39
+ ],
40
+ fps=video.fps,
41
+ )
42
+ video.audio = old_audio
43
+ # Check if dimensions didn't change
44
+ if not video.video_shape == original_shape:
45
+ raise RuntimeError("The effect must not change the number of frames and the shape of the frames!")
46
+
47
+ return video
48
+
49
+ @abstractmethod
50
+ def _apply(self, video: Video) -> Video:
51
+ pass
52
+
53
+
54
+ class FullImageOverlay(Effect):
55
+ def __init__(self, overlay_image: np.ndarray, alpha: float | None = None, fade_time: float = 0.0):
56
+ if alpha is not None and not 0 <= alpha <= 1:
57
+ raise ValueError("Alpha must be in range [0, 1]!")
58
+ elif not (overlay_image.ndim == 3 and overlay_image.shape[-1] in [3, 4]):
59
+ raise ValueError("Only RGB and RGBA images are supported as an overlay!")
60
+ elif alpha is None:
61
+ alpha = 1.0
62
+
63
+ if overlay_image.shape[-1] == 3:
64
+ overlay_image = np.dstack([overlay_image, np.full(overlay_image.shape[:2], 255, dtype=np.uint8)])
65
+
66
+ self.alpha = alpha
67
+ self.overlay = overlay_image.astype(np.uint8)
68
+ self.fade_time = fade_time
69
+
70
+ def _overlay(self, img: np.ndarray, alpha: float = 1.0) -> np.ndarray:
71
+ img_pil = Image.fromarray(img)
72
+ overlay = self.overlay.copy()
73
+ overlay[:, :, 3] = overlay[:, :, 3] * (self.alpha * alpha)
74
+ overlay_pil = Image.fromarray(overlay)
75
+ img_pil.paste(overlay_pil, (0, 0), overlay_pil)
76
+ return np.array(img_pil)
77
+
78
+ def _apply(self, video: Video) -> Video:
79
+ if not video.frame_shape == self.overlay[:, :, :3].shape:
80
+ raise ValueError(
81
+ f"Mismatch of overlay shape `{self.overlay.shape}` with video shape: `{video.frame_shape}`!"
82
+ )
83
+ elif not (0 <= 2 * self.fade_time <= video.total_seconds):
84
+ raise ValueError(f"Video is only {video.total_seconds}s long, but fade time is {self.fade_time}s!")
85
+
86
+ print("Overlaying video...")
87
+ if self.fade_time == 0:
88
+ video.frames = np.array([self._overlay(frame) for frame in tqdm(video.frames)], dtype=np.uint8)
89
+ else:
90
+ num_video_frames = len(video.frames)
91
+ num_fade_frames = round(self.fade_time * video.fps)
92
+ new_frames = []
93
+ for i, frame in enumerate(tqdm(video.frames)):
94
+ frames_dist_from_end = min(i, num_video_frames - i)
95
+ if frames_dist_from_end >= num_fade_frames:
96
+ fade_alpha = 1.0
97
+ else:
98
+ fade_alpha = frames_dist_from_end / num_fade_frames
99
+ new_frames.append(self._overlay(frame, fade_alpha))
100
+ video.frames = np.array(new_frames, dtype=np.uint8)
101
+ return video
102
+
103
+
104
+ class Blur(Effect):
105
+ def __init__(
106
+ self,
107
+ mode: Literal["constant", "ascending", "descending"],
108
+ iterations: int,
109
+ kernel_size: tuple[int, int] = (5, 5),
110
+ ):
111
+ if iterations < 1:
112
+ raise ValueError("Iterations must be at least 1!")
113
+ self.mode = mode
114
+ self.iterations = iterations
115
+ self.kernel_size = kernel_size
116
+
117
+ def _apply(self, video: Video) -> Video:
118
+ n_frames = len(video.frames)
119
+ new_frames = []
120
+ if self.mode == "constant":
121
+ for frame in video.frames:
122
+ blurred_frame = frame
123
+ for _ in range(self.iterations):
124
+ blurred_frame = cv2.GaussianBlur(blurred_frame, self.kernel_size, 0)
125
+ new_frames.append(blurred_frame)
126
+ elif self.mode == "ascending":
127
+ for i, frame in tqdm(enumerate(video.frames)):
128
+ frame_iterations = max(1, round((i / n_frames) * self.iterations))
129
+ blurred_frame = frame
130
+ for _ in range(frame_iterations):
131
+ blurred_frame = cv2.GaussianBlur(blurred_frame, self.kernel_size, 0)
132
+ new_frames.append(blurred_frame)
133
+ elif self.mode == "descending":
134
+ for i, frame in tqdm(enumerate(video.frames)):
135
+ frame_iterations = max(round(((n_frames - i) / n_frames) * self.iterations), 1)
136
+ blurred_frame = frame
137
+ for _ in range(frame_iterations):
138
+ blurred_frame = cv2.GaussianBlur(blurred_frame, self.kernel_size, 0)
139
+ new_frames.append(blurred_frame)
140
+ else:
141
+ raise ValueError(f"Unknown mode: `{self.mode}`.")
142
+ video.frames = np.asarray(new_frames)
143
+ return video
144
+
145
+
146
+ class Zoom(Effect):
147
+ def __init__(self, zoom_factor: float, mode: Literal["in", "out"]):
148
+ if zoom_factor <= 1:
149
+ raise ValueError("Zoom factor must be greater than 1!")
150
+ self.zoom_factor = zoom_factor
151
+ self.mode = mode
152
+
153
+ def _apply(self, video: Video) -> Video:
154
+ n_frames = len(video.frames)
155
+ new_frames = []
156
+
157
+ width = video.metadata.width
158
+ height = video.metadata.height
159
+ crop_sizes_w, crop_sizes_h = np.linspace(width // self.zoom_factor, width, n_frames), np.linspace(
160
+ height // self.zoom_factor, height, n_frames
161
+ )
162
+
163
+ if self.mode == "in":
164
+ for frame, w, h in tqdm(zip(video.frames, reversed(crop_sizes_w), reversed(crop_sizes_h))):
165
+
166
+ x = width / 2 - w / 2
167
+ y = height / 2 - h / 2
168
+
169
+ cropped_frame = frame[round(y) : round(y + h), round(x) : round(x + w)]
170
+ zoomed_frame = cv2.resize(cropped_frame, (width, height))
171
+ new_frames.append(zoomed_frame)
172
+ elif self.mode == "out":
173
+ for frame, w, h in tqdm(zip(video.frames, crop_sizes_w, crop_sizes_h)):
174
+ x = width / 2 - w / 2
175
+ y = height / 2 - h / 2
176
+
177
+ cropped_frame = frame[round(y) : round(y + h), round(x) : round(x + w)]
178
+ zoomed_frame = cv2.resize(cropped_frame, (width, height))
179
+ new_frames.append(zoomed_frame)
180
+ else:
181
+ raise ValueError(f"Unknown mode: `{self.mode}`.")
182
+ video.frames = np.asarray(new_frames)
183
+ return video
@@ -0,0 +1,178 @@
1
+ from abc import ABC, abstractmethod
2
+ from enum import Enum
3
+ from multiprocessing import Pool
4
+ from typing import Literal
5
+
6
+ import cv2
7
+ import numpy as np
8
+ from tqdm import tqdm
9
+
10
+ from videopython.base.video import Video
11
+
12
+
13
+ class Transformation(ABC):
14
+ """Abstract class for transformation on frames of video."""
15
+
16
+ @abstractmethod
17
+ def apply(self, video: Video) -> Video:
18
+ pass
19
+
20
+
21
+ class TransformationPipeline:
22
+ def __init__(self, transformations: list[Transformation] | None):
23
+ """Initializes pipeline."""
24
+ self.transformations = transformations if transformations else []
25
+
26
+ def add(self, transformation: Transformation):
27
+ """Adds transformation to the pipeline.
28
+
29
+ Args:
30
+ transformation: Transformation to add.
31
+
32
+ Returns:
33
+ Pipeline with added transformation.
34
+ """
35
+ self.transformations.append(transformation)
36
+ return self
37
+
38
+ def run(self, video: Video) -> Video:
39
+ """Applies pipeline to the video.
40
+
41
+ Args:
42
+ video: Video to transform.
43
+
44
+ Returns:
45
+ Transformed video.
46
+ """
47
+ for transformation in self.transformations:
48
+ video = transformation.apply(video)
49
+ return video
50
+
51
+ def __call__(self, video: Video) -> Video:
52
+ return self.run(video)
53
+
54
+
55
+ class CutFrames(Transformation):
56
+ def __init__(self, start_frame: int, end_frame: int):
57
+ self.start_frame = start_frame
58
+ self.end_frame = end_frame
59
+
60
+ def apply(self, video: Video) -> Video:
61
+ video = video[self.start_frame : self.end_frame]
62
+ return video
63
+
64
+
65
+ class CutSeconds(Transformation):
66
+ def __init__(self, start_second: float | int, end_second: float | int):
67
+ self.start_second = start_second
68
+ self.end_second = end_second
69
+
70
+ def apply(self, video: Video) -> Video:
71
+ video = video[round(self.start_second * video.fps) : round(self.end_second * video.fps)]
72
+ return video
73
+
74
+
75
+ class Resize(Transformation):
76
+ def __init__(self, width: int | None = None, height: int | None = None):
77
+ self.width = width
78
+ self.height = height
79
+ if width is None and height is None:
80
+ raise ValueError("You must provide either `width` or `height`!")
81
+
82
+ def _resize_frame(self, frame: np.ndarray, new_width: int, new_height: int) -> np.ndarray:
83
+ return cv2.resize(
84
+ frame,
85
+ (new_width, new_height),
86
+ interpolation=cv2.INTER_AREA,
87
+ )
88
+
89
+ def apply(self, video: Video) -> Video:
90
+ if self.width and self.height:
91
+ new_height = self.height
92
+ new_width = self.width
93
+ elif self.height is None and self.width:
94
+ video_height = video.video_shape[1]
95
+ video_width = video.video_shape[2]
96
+ new_height = round(video_height * (self.width / video_width))
97
+ new_width = self.width
98
+ elif self.width is None and self.height:
99
+ video_height = video.video_shape[1]
100
+ video_width = video.video_shape[2]
101
+ new_width = round(video_width * (self.height / video_height))
102
+ new_height = self.height
103
+
104
+ print(f"Resizing video to: {new_width}x{new_height}!")
105
+ with Pool() as pool:
106
+ frames_copy = pool.starmap(
107
+ self._resize_frame,
108
+ [(frame, new_width, new_height) for frame in video.frames],
109
+ )
110
+ video.frames = np.array(frames_copy)
111
+ return video
112
+
113
+
114
+ class ResampleFPS(Transformation):
115
+ def __init__(self, new_fps: int | float):
116
+ self.new_fps = float(new_fps)
117
+
118
+ def _downsample(self, video: Video) -> Video:
119
+ target_frame_count = int(len(video.frames) * (self.new_fps / video.fps))
120
+ new_frame_indices = np.round(np.linspace(0, len(video.frames) - 1, target_frame_count)).astype(int)
121
+ video.frames = video.frames[new_frame_indices]
122
+ video.fps = self.new_fps
123
+ return video
124
+
125
+ def _upsample(self, video: Video) -> Video:
126
+ target_frame_count = int(len(video.frames) * (self.new_fps / video.fps))
127
+ new_frame_indices = np.linspace(0, len(video.frames) - 1, target_frame_count)
128
+ new_frames = []
129
+ for i in tqdm(range(len(new_frame_indices) - 1)):
130
+ # Interpolate between the two nearest frames
131
+ ratio = new_frame_indices[i] % 1
132
+ new_frame = (1 - ratio) * video.frames[int(new_frame_indices[i])] + ratio * video.frames[
133
+ int(np.ceil(new_frame_indices[i]))
134
+ ]
135
+ new_frames.append(new_frame.astype(np.uint8))
136
+ video.frames = np.array(new_frames, dtype=np.uint8)
137
+ video.fps = self.new_fps
138
+ return video
139
+
140
+ def apply(self, video: Video) -> Video:
141
+ if video.fps == self.new_fps:
142
+ return video
143
+ elif video.fps > self.new_fps:
144
+ print(f"Downsampling video from {video.fps} to {self.new_fps} FPS.")
145
+ video = self._downsample(video)
146
+ else:
147
+ print(f"Upsampling video from {video.fps} to {self.new_fps} FPS.")
148
+ video = self._upsample(video)
149
+ return video
150
+
151
+
152
+ class CropMode(Enum):
153
+ CENTER = "center"
154
+
155
+
156
+ class Crop(Transformation):
157
+
158
+ def __init__(self, width: int, height: int, mode: CropMode = CropMode.CENTER):
159
+ self.width = width
160
+ self.height = height
161
+ self.mode = mode
162
+
163
+ def apply(self, video: Video) -> Video:
164
+ if self.mode == CropMode.CENTER:
165
+ current_shape = video.frame_shape[:2]
166
+ center_height = current_shape[0] // 2
167
+ center_width = current_shape[1] // 2
168
+ width_offset = self.width // 2
169
+ height_offset = self.height // 2
170
+ video.frames = video.frames[
171
+ :,
172
+ center_height - height_offset : center_height + height_offset,
173
+ center_width - width_offset : center_width + width_offset,
174
+ :,
175
+ ]
176
+ else:
177
+ raise ValueError(f"Unknown mode: {self.mode}")
178
+ return video
@@ -4,6 +4,7 @@ from typing import final
4
4
 
5
5
  import numpy as np
6
6
 
7
+ from videopython.base.effects import Blur
7
8
  from videopython.base.video import Video
8
9
 
9
10
 
@@ -15,19 +16,19 @@ class Transition(ABC):
15
16
  """
16
17
 
17
18
  @final
18
- def apply(self, videos: tuple[Video, Video], **kwargs) -> Video:
19
+ def apply(self, videos: tuple[Video, Video]) -> Video:
19
20
  assert videos[0].metadata.can_be_merged_with(videos[1].metadata)
20
- return self._apply(videos, **kwargs)
21
+ return self._apply(videos)
21
22
 
22
23
  @abstractmethod
23
- def _apply(self, videos: tuple[Video, Video], **kwargs) -> Video:
24
+ def _apply(self, videos: tuple[Video, Video]) -> Video:
24
25
  pass
25
26
 
26
27
 
27
28
  class InstantTransition(Transition):
28
29
  """Instant cut without any transition."""
29
30
 
30
- def _apply(self, videos: list[Video] | tuple[Video]) -> Video:
31
+ def _apply(self, videos: tuple[Video, Video]) -> Video:
31
32
  return videos[0] + videos[1]
32
33
 
33
34
 
@@ -57,7 +58,7 @@ class FadeTransition(Transition):
57
58
  effect_time_fps = math.floor(self.effect_time_seconds * video_fps)
58
59
  transition = self.fade(videos[0].frames[-effect_time_fps:], videos[1].frames[:effect_time_fps])
59
60
 
60
- return Video.from_frames(
61
+ faded_videos = Video.from_frames(
61
62
  np.r_[
62
63
  "0,2",
63
64
  videos[0].frames[:-effect_time_fps],
@@ -66,3 +67,40 @@ class FadeTransition(Transition):
66
67
  ],
67
68
  fps=video_fps,
68
69
  )
70
+ faded_videos.audio = videos[0].audio.append(videos[1].audio, crossfade=(effect_time_fps / video_fps) * 1000)
71
+ return faded_videos
72
+
73
+
74
+ class BlurTransition(Transition):
75
+ def __init__(
76
+ self, effect_time_seconds: float = 1.5, blur_iterations: int = 400, blur_kernel_size: tuple[int, int] = (11, 11)
77
+ ):
78
+ self.effect_time_seconds = effect_time_seconds
79
+ self.blur_iterations = blur_iterations
80
+ self.blur_kernel_size = blur_kernel_size
81
+
82
+ def _apply(self, videos: tuple[Video, Video]) -> Video:
83
+ video_fps = videos[0].fps
84
+ for video in videos:
85
+ if video.total_seconds < self.effect_time_seconds:
86
+ raise RuntimeError("Not enough space to make transition!")
87
+
88
+ effect_time_fps = math.floor(self.effect_time_seconds * video_fps)
89
+
90
+ ascending_blur = Blur("ascending", self.blur_iterations, self.blur_kernel_size)
91
+ descending_blur = Blur("descending", self.blur_iterations, self.blur_kernel_size)
92
+ transition = ascending_blur.apply(videos[0][-effect_time_fps:]) + descending_blur.apply(
93
+ videos[1][:effect_time_fps]
94
+ )
95
+
96
+ blurred_videos = Video.from_frames(
97
+ np.r_[
98
+ "0,2",
99
+ videos[0].frames[:-effect_time_fps],
100
+ transition.frames,
101
+ videos[1].frames[effect_time_fps:],
102
+ ],
103
+ fps=video_fps,
104
+ )
105
+ blurred_videos.audio = videos[0].audio.append(videos[1].audio)
106
+ return blurred_videos