videopython 0.1.4__tar.gz → 0.2.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of videopython might be problematic. Click here for more details.

Files changed (34) hide show
  1. {videopython-0.1.4 → videopython-0.2.0}/PKG-INFO +35 -2
  2. {videopython-0.1.4 → videopython-0.2.0}/README.md +34 -1
  3. {videopython-0.1.4 → videopython-0.2.0}/pyproject.toml +1 -1
  4. videopython-0.2.0/src/videopython/base/exceptions.py +2 -0
  5. {videopython-0.1.4 → videopython-0.2.0}/src/videopython/base/transforms.py +18 -18
  6. videopython-0.2.0/src/videopython/generation/pipeline.py +32 -0
  7. {videopython-0.1.4 → videopython-0.2.0}/src/videopython/generation/video.py +1 -1
  8. {videopython-0.1.4 → videopython-0.2.0}/src/videopython/utils/image.py +2 -2
  9. {videopython-0.1.4 → videopython-0.2.0}/src/videopython.egg-info/PKG-INFO +35 -2
  10. {videopython-0.1.4 → videopython-0.2.0}/src/videopython.egg-info/SOURCES.txt +2 -0
  11. {videopython-0.1.4 → videopython-0.2.0}/tests/test_transforms.py +2 -2
  12. {videopython-0.1.4 → videopython-0.2.0}/tests/test_utils.py +1 -1
  13. {videopython-0.1.4 → videopython-0.2.0}/LICENSE +0 -0
  14. {videopython-0.1.4 → videopython-0.2.0}/requirements-dev.txt +0 -0
  15. {videopython-0.1.4 → videopython-0.2.0}/requirements-generation.txt +0 -0
  16. {videopython-0.1.4 → videopython-0.2.0}/requirements.txt +0 -0
  17. {videopython-0.1.4 → videopython-0.2.0}/setup.cfg +0 -0
  18. {videopython-0.1.4 → videopython-0.2.0}/src/videopython/base/__init__.py +0 -0
  19. {videopython-0.1.4 → videopython-0.2.0}/src/videopython/base/compose.py +0 -0
  20. {videopython-0.1.4 → videopython-0.2.0}/src/videopython/base/effects.py +0 -0
  21. {videopython-0.1.4 → videopython-0.2.0}/src/videopython/base/transitions.py +0 -0
  22. {videopython-0.1.4 → videopython-0.2.0}/src/videopython/base/video.py +0 -0
  23. {videopython-0.1.4 → videopython-0.2.0}/src/videopython/generation/__init__.py +0 -0
  24. {videopython-0.1.4 → videopython-0.2.0}/src/videopython/generation/audio.py +0 -0
  25. {videopython-0.1.4 → videopython-0.2.0}/src/videopython/generation/image.py +0 -0
  26. {videopython-0.1.4 → videopython-0.2.0}/src/videopython/utils/__init__.py +0 -0
  27. {videopython-0.1.4 → videopython-0.2.0}/src/videopython/utils/common.py +0 -0
  28. {videopython-0.1.4 → videopython-0.2.0}/src/videopython.egg-info/dependency_links.txt +0 -0
  29. {videopython-0.1.4 → videopython-0.2.0}/src/videopython.egg-info/requires.txt +0 -0
  30. {videopython-0.1.4 → videopython-0.2.0}/src/videopython.egg-info/top_level.txt +0 -0
  31. {videopython-0.1.4 → videopython-0.2.0}/tests/test_compose.py +0 -0
  32. {videopython-0.1.4 → videopython-0.2.0}/tests/test_effects.py +0 -0
  33. {videopython-0.1.4 → videopython-0.2.0}/tests/test_transitions.py +0 -0
  34. {videopython-0.1.4 → videopython-0.2.0}/tests/test_video.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: videopython
3
- Version: 0.1.4
3
+ Version: 0.2.0
4
4
  Summary: Minimal video generation and processing library.
5
5
  Author-email: Bartosz Wójtowicz <bartoszwojtowicz@outlook.com>, Bartosz Rudnikowicz <bartoszrudnikowicz840@gmail.com>, Piotr Pukisz <piotr.pukisz@gmail.com>
6
6
  License: Apache License
@@ -250,8 +250,41 @@ pip install videopython[generation]
250
250
  > The funcionalities found in `videopython.generation` won't work.
251
251
 
252
252
  ## Basic Usage
253
- > Using Nvidia A40 or better is recommended for the `videopython.generation` module.
254
253
 
254
+ ### Video handling
255
+
256
+ ```python
257
+ from videopython.base.video import Video
258
+
259
+ # Load videos and print metadata
260
+ video1 = Video.from_path("tests/test_data/fast_benchmark.mp4")
261
+ print(video1)
262
+
263
+ video2 = Video.from_path("tests/test_data/slow_benchmark.mp4")
264
+ print(video2)
265
+
266
+ # Define the transformations
267
+ from videopython.base.transforms import CutSeconds, ResampleFPS, Resize, TransformationPipeline
268
+
269
+ pipeline = TransformationPipeline(
270
+ [CutSeconds(start=1.5, end=6.5), ResampleFPS(fps=30), Resize(width=1000, height=1000)]
271
+ )
272
+ video1 = pipeline.run(video1)
273
+ video2 = pipeline.run(video2)
274
+
275
+ # Combine videos, add audio and save
276
+ from videopython.base.transitions import FadeTransition
277
+
278
+ fade = FadeTransition(effect_time_seconds=3.0)
279
+ video = fade.apply(videos=(video1, video2))
280
+ video.add_audio_from_file("tests/test_data/test_audio.mp3")
281
+
282
+ savepath = video.save()
283
+ ```
284
+
285
+ ### Video Generation
286
+
287
+ > Using Nvidia A40 or better is recommended for the `videopython.generation` module.
255
288
  ```python
256
289
  # Generate image and animate it
257
290
  from videopython.generation import ImageToVideo
@@ -20,8 +20,41 @@ pip install videopython[generation]
20
20
  > The funcionalities found in `videopython.generation` won't work.
21
21
 
22
22
  ## Basic Usage
23
- > Using Nvidia A40 or better is recommended for the `videopython.generation` module.
24
23
 
24
+ ### Video handling
25
+
26
+ ```python
27
+ from videopython.base.video import Video
28
+
29
+ # Load videos and print metadata
30
+ video1 = Video.from_path("tests/test_data/fast_benchmark.mp4")
31
+ print(video1)
32
+
33
+ video2 = Video.from_path("tests/test_data/slow_benchmark.mp4")
34
+ print(video2)
35
+
36
+ # Define the transformations
37
+ from videopython.base.transforms import CutSeconds, ResampleFPS, Resize, TransformationPipeline
38
+
39
+ pipeline = TransformationPipeline(
40
+ [CutSeconds(start=1.5, end=6.5), ResampleFPS(fps=30), Resize(width=1000, height=1000)]
41
+ )
42
+ video1 = pipeline.run(video1)
43
+ video2 = pipeline.run(video2)
44
+
45
+ # Combine videos, add audio and save
46
+ from videopython.base.transitions import FadeTransition
47
+
48
+ fade = FadeTransition(effect_time_seconds=3.0)
49
+ video = fade.apply(videos=(video1, video2))
50
+ video.add_audio_from_file("tests/test_data/test_audio.mp3")
51
+
52
+ savepath = video.save()
53
+ ```
54
+
55
+ ### Video Generation
56
+
57
+ > Using Nvidia A40 or better is recommended for the `videopython.generation` module.
25
58
  ```python
26
59
  # Generate image and animate it
27
60
  from videopython.generation import ImageToVideo
@@ -11,7 +11,7 @@ include = ["videopython.*"]
11
11
 
12
12
  [project]
13
13
  name = "videopython"
14
- version = "0.1.4"
14
+ version = "0.2.0"
15
15
  description = "Minimal video generation and processing library."
16
16
  readme = "README.md"
17
17
  requires-python = ">=3.10"
@@ -0,0 +1,2 @@
1
+ class OutOfBoundsError(Exception):
2
+ pass
@@ -53,22 +53,22 @@ class TransformationPipeline:
53
53
 
54
54
 
55
55
  class CutFrames(Transformation):
56
- def __init__(self, start_frame: int, end_frame: int):
57
- self.start_frame = start_frame
58
- self.end_frame = end_frame
56
+ def __init__(self, start: int, end: int):
57
+ self.start = start
58
+ self.end = end
59
59
 
60
60
  def apply(self, video: Video) -> Video:
61
- video = video[self.start_frame : self.end_frame]
61
+ video = video[self.start : self.end]
62
62
  return video
63
63
 
64
64
 
65
65
  class CutSeconds(Transformation):
66
- def __init__(self, start_second: float | int, end_second: float | int):
67
- self.start_second = start_second
68
- self.end_second = end_second
66
+ def __init__(self, start: float | int, end: float | int):
67
+ self.start = start
68
+ self.end = end
69
69
 
70
70
  def apply(self, video: Video) -> Video:
71
- video = video[round(self.start_second * video.fps) : round(self.end_second * video.fps)]
71
+ video = video[round(self.start * video.fps) : round(self.end * video.fps)]
72
72
  return video
73
73
 
74
74
 
@@ -112,18 +112,18 @@ class Resize(Transformation):
112
112
 
113
113
 
114
114
  class ResampleFPS(Transformation):
115
- def __init__(self, new_fps: int | float):
116
- self.new_fps = float(new_fps)
115
+ def __init__(self, fps: int | float):
116
+ self.fps = float(fps)
117
117
 
118
118
  def _downsample(self, video: Video) -> Video:
119
- target_frame_count = int(len(video.frames) * (self.new_fps / video.fps))
119
+ target_frame_count = int(len(video.frames) * (self.fps / video.fps))
120
120
  new_frame_indices = np.round(np.linspace(0, len(video.frames) - 1, target_frame_count)).astype(int)
121
121
  video.frames = video.frames[new_frame_indices]
122
- video.fps = self.new_fps
122
+ video.fps = self.fps
123
123
  return video
124
124
 
125
125
  def _upsample(self, video: Video) -> Video:
126
- target_frame_count = int(len(video.frames) * (self.new_fps / video.fps))
126
+ target_frame_count = int(len(video.frames) * (self.fps / video.fps))
127
127
  new_frame_indices = np.linspace(0, len(video.frames) - 1, target_frame_count)
128
128
  new_frames = []
129
129
  for i in tqdm(range(len(new_frame_indices) - 1)):
@@ -134,17 +134,17 @@ class ResampleFPS(Transformation):
134
134
  ]
135
135
  new_frames.append(new_frame.astype(np.uint8))
136
136
  video.frames = np.array(new_frames, dtype=np.uint8)
137
- video.fps = self.new_fps
137
+ video.fps = self.fps
138
138
  return video
139
139
 
140
140
  def apply(self, video: Video) -> Video:
141
- if video.fps == self.new_fps:
141
+ if video.fps == self.fps:
142
142
  return video
143
- elif video.fps > self.new_fps:
144
- print(f"Downsampling video from {video.fps} to {self.new_fps} FPS.")
143
+ elif video.fps > self.fps:
144
+ print(f"Downsampling video from {video.fps} to {self.fps} FPS.")
145
145
  video = self._downsample(video)
146
146
  else:
147
- print(f"Upsampling video from {video.fps} to {self.new_fps} FPS.")
147
+ print(f"Upsampling video from {video.fps} to {self.fps} FPS.")
148
148
  video = self._upsample(video)
149
149
  return video
150
150
 
@@ -0,0 +1,32 @@
1
+ import cv2
2
+ import numpy as np
3
+ from PIL import Image
4
+
5
+ from videopython.base.transforms import Resize
6
+ from videopython.generation import ImageToVideo, TextToImage
7
+
8
+ N_ITERATIONS = 11
9
+ PRMOPT = "Sunset at the sea, cimenatic view"
10
+
11
+
12
+ def main():
13
+ text_to_image = TextToImage()
14
+ image_to_video = ImageToVideo()
15
+
16
+ target_height = 576
17
+ target_width = 1024
18
+
19
+ base_image = text_to_image.generate_image(PRMOPT)
20
+ image = cv2.resize(np.asarray(base_image), (target_width, target_height))
21
+
22
+ video = image_to_video.generate_video(image)
23
+
24
+ for i in range(N_ITERATIONS - 1):
25
+ print(f"Generating {i+2}/{N_ITERATIONS}...")
26
+ video += image_to_video.generate_video(Image.fromarray(video.frames[-1]))
27
+
28
+ video.save()
29
+
30
+
31
+ if __name__ == "__main__":
32
+ main()
@@ -6,7 +6,7 @@ from PIL.Image import Image
6
6
  from videopython.base.video import Video
7
7
 
8
8
  TEXT_TO_VIDEO_MODEL = "cerspense/zeroscope_v2_576w"
9
- IMAGE_TO_VIDEO_MODEL = "stabilityai/stable-video-diffusion-img2vid-xt"
9
+ IMAGE_TO_VIDEO_MODEL = "stabilityai/stable-video-diffusion-img2vid-xt-1-1"
10
10
 
11
11
 
12
12
  class TextToVideo:
@@ -4,8 +4,8 @@ import cv2
4
4
  import numpy as np
5
5
  from PIL import Image, ImageDraw, ImageFont
6
6
 
7
+ from videopython.base.exceptions import OutOfBoundsError
7
8
  from videopython.base.video import Video
8
- from videopython.exceptions import OutOfBoundsError
9
9
 
10
10
 
11
11
  class ImageText:
@@ -247,7 +247,7 @@ class SlideOverImage:
247
247
  self.fps = fps
248
248
  self.length_seconds = length_seconds
249
249
 
250
- def slide(self, image: np.ndarray) -> Video:
250
+ def apply(self, image: np.ndarray) -> Video:
251
251
  image = self._resize(image)
252
252
  max_offset = image.shape[1] - self.video_width
253
253
  frame_count = round(self.fps * self.length_seconds)
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: videopython
3
- Version: 0.1.4
3
+ Version: 0.2.0
4
4
  Summary: Minimal video generation and processing library.
5
5
  Author-email: Bartosz Wójtowicz <bartoszwojtowicz@outlook.com>, Bartosz Rudnikowicz <bartoszrudnikowicz840@gmail.com>, Piotr Pukisz <piotr.pukisz@gmail.com>
6
6
  License: Apache License
@@ -250,8 +250,41 @@ pip install videopython[generation]
250
250
  > The funcionalities found in `videopython.generation` won't work.
251
251
 
252
252
  ## Basic Usage
253
- > Using Nvidia A40 or better is recommended for the `videopython.generation` module.
254
253
 
254
+ ### Video handling
255
+
256
+ ```python
257
+ from videopython.base.video import Video
258
+
259
+ # Load videos and print metadata
260
+ video1 = Video.from_path("tests/test_data/fast_benchmark.mp4")
261
+ print(video1)
262
+
263
+ video2 = Video.from_path("tests/test_data/slow_benchmark.mp4")
264
+ print(video2)
265
+
266
+ # Define the transformations
267
+ from videopython.base.transforms import CutSeconds, ResampleFPS, Resize, TransformationPipeline
268
+
269
+ pipeline = TransformationPipeline(
270
+ [CutSeconds(start=1.5, end=6.5), ResampleFPS(fps=30), Resize(width=1000, height=1000)]
271
+ )
272
+ video1 = pipeline.run(video1)
273
+ video2 = pipeline.run(video2)
274
+
275
+ # Combine videos, add audio and save
276
+ from videopython.base.transitions import FadeTransition
277
+
278
+ fade = FadeTransition(effect_time_seconds=3.0)
279
+ video = fade.apply(videos=(video1, video2))
280
+ video.add_audio_from_file("tests/test_data/test_audio.mp3")
281
+
282
+ savepath = video.save()
283
+ ```
284
+
285
+ ### Video Generation
286
+
287
+ > Using Nvidia A40 or better is recommended for the `videopython.generation` module.
255
288
  ```python
256
289
  # Generate image and animate it
257
290
  from videopython.generation import ImageToVideo
@@ -12,12 +12,14 @@ src/videopython.egg-info/top_level.txt
12
12
  src/videopython/base/__init__.py
13
13
  src/videopython/base/compose.py
14
14
  src/videopython/base/effects.py
15
+ src/videopython/base/exceptions.py
15
16
  src/videopython/base/transforms.py
16
17
  src/videopython/base/transitions.py
17
18
  src/videopython/base/video.py
18
19
  src/videopython/generation/__init__.py
19
20
  src/videopython/generation/audio.py
20
21
  src/videopython/generation/image.py
22
+ src/videopython/generation/pipeline.py
21
23
  src/videopython/generation/video.py
22
24
  src/videopython/utils/__init__.py
23
25
  src/videopython/utils/common.py
@@ -7,7 +7,7 @@ from videopython.base.transforms import CutFrames, CutSeconds, Resize
7
7
 
8
8
  @pytest.mark.parametrize("start, end", [(0, 100), (100, 101), (100, 120)])
9
9
  def test_cut_frames(start, end, small_video):
10
- cut_frames = CutFrames(start_frame=start, end_frame=end)
10
+ cut_frames = CutFrames(start=start, end=end)
11
11
  start_frame = small_video.frames[start].copy()
12
12
  transformed = cut_frames.apply(small_video)
13
13
  assert len(transformed.frames) == (end - start)
@@ -16,7 +16,7 @@ def test_cut_frames(start, end, small_video):
16
16
 
17
17
  @pytest.mark.parametrize("start, end", [(0, 0.5), (0, 1), (0.5, 1.5)])
18
18
  def test_cut_seconds(start, end, small_video):
19
- cut_seconds = CutSeconds(start_second=start, end_second=end)
19
+ cut_seconds = CutSeconds(start=start, end=end)
20
20
  start_frame = small_video.frames[round(start * small_video.fps)].copy()
21
21
  transformed = cut_seconds.apply(small_video)
22
22
  assert len(transformed.frames) == round((end - start) * small_video.fps)
@@ -3,7 +3,7 @@ from videopython.utils.image import SlideOverImage
3
3
 
4
4
  def test_slide_over_image(small_image):
5
5
  slide = SlideOverImage(direction="left", video_shape=(150, small_image.shape[0]), fps=24.0, length_seconds=1.0)
6
- video = slide.slide(small_image)
6
+ video = slide.apply(small_image)
7
7
 
8
8
  assert video.frames.shape[0] == 24
9
9
  assert video.frames.shape[1] == small_image.shape[0]
File without changes
File without changes