videopython 0.2.1__py3-none-any.whl → 0.4.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of videopython might be problematic. Click here for more details.
- videopython/ai/__init__.py +0 -0
- videopython/{generation → ai/generation}/audio.py +25 -13
- videopython/{generation → ai/generation}/image.py +0 -3
- videopython/ai/understanding/__init__.py +0 -0
- videopython/ai/understanding/transcribe.py +37 -0
- videopython/base/effects.py +3 -3
- videopython/base/transcription.py +13 -0
- videopython/base/transforms.py +0 -2
- videopython/base/transitions.py +2 -2
- videopython/base/video.py +269 -187
- videopython/utils/__init__.py +3 -0
- videopython/utils/image.py +0 -228
- videopython/utils/text.py +727 -0
- {videopython-0.2.1.dist-info → videopython-0.4.0.dist-info}/METADATA +13 -25
- videopython-0.4.0.dist-info/RECORD +25 -0
- {videopython-0.2.1.dist-info → videopython-0.4.0.dist-info}/WHEEL +1 -1
- videopython-0.2.1.dist-info/RECORD +0 -20
- /videopython/{generation → ai/generation}/__init__.py +0 -0
- /videopython/{generation → ai/generation}/video.py +0 -0
- {videopython-0.2.1.dist-info → videopython-0.4.0.dist-info}/licenses/LICENSE +0 -0
|
File without changes
|
|
@@ -1,6 +1,5 @@
|
|
|
1
|
-
import numpy as np
|
|
2
1
|
import torch
|
|
3
|
-
from
|
|
2
|
+
from soundpython import Audio, AudioMetadata
|
|
4
3
|
from transformers import (
|
|
5
4
|
AutoProcessor,
|
|
6
5
|
AutoTokenizer,
|
|
@@ -17,15 +16,24 @@ class TextToSpeech:
|
|
|
17
16
|
self.pipeline = VitsModel.from_pretrained(TEXT_TO_SPEECH_MODEL)
|
|
18
17
|
self.tokenizer = AutoTokenizer.from_pretrained(TEXT_TO_SPEECH_MODEL)
|
|
19
18
|
|
|
20
|
-
def generate_audio(self, text: str) ->
|
|
19
|
+
def generate_audio(self, text: str) -> Audio:
|
|
21
20
|
tokenized = self.tokenizer(text, return_tensors="pt")
|
|
22
21
|
|
|
23
22
|
with torch.no_grad():
|
|
24
23
|
output = self.pipeline(**tokenized).waveform
|
|
25
24
|
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
|
|
25
|
+
# Convert to float32 and normalize to [-1, 1]
|
|
26
|
+
audio_data = output.T.float().numpy()
|
|
27
|
+
|
|
28
|
+
metadata = AudioMetadata(
|
|
29
|
+
sample_rate=self.pipeline.config.sampling_rate,
|
|
30
|
+
channels=1,
|
|
31
|
+
sample_width=4,
|
|
32
|
+
duration_seconds=len(audio_data) / self.pipeline.config.sampling_rate,
|
|
33
|
+
frame_count=len(audio_data),
|
|
34
|
+
)
|
|
35
|
+
|
|
36
|
+
return Audio(audio_data, metadata)
|
|
29
37
|
|
|
30
38
|
|
|
31
39
|
class TextToMusic:
|
|
@@ -37,7 +45,7 @@ class TextToMusic:
|
|
|
37
45
|
self.processor = AutoProcessor.from_pretrained(MUSIC_GENERATION_MODEL_SMALL)
|
|
38
46
|
self.model = MusicgenForConditionalGeneration.from_pretrained(MUSIC_GENERATION_MODEL_SMALL)
|
|
39
47
|
|
|
40
|
-
def generate_audio(self, text: str, max_new_tokens: int) ->
|
|
48
|
+
def generate_audio(self, text: str, max_new_tokens: int) -> Audio:
|
|
41
49
|
inputs = self.processor(
|
|
42
50
|
text=[text],
|
|
43
51
|
padding=True,
|
|
@@ -45,12 +53,16 @@ class TextToMusic:
|
|
|
45
53
|
)
|
|
46
54
|
audio_values = self.model.generate(**inputs, max_new_tokens=max_new_tokens)
|
|
47
55
|
sampling_rate = self.model.config.audio_encoder.sampling_rate
|
|
48
|
-
output = (audio_values[0, 0].float().numpy() * (2**31 - 1)).astype(np.int32)
|
|
49
56
|
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
57
|
+
# Convert to float32 and normalize to [-1, 1]
|
|
58
|
+
audio_data = audio_values[0, 0].float().numpy()
|
|
59
|
+
|
|
60
|
+
metadata = AudioMetadata(
|
|
61
|
+
sample_rate=sampling_rate,
|
|
54
62
|
channels=1,
|
|
63
|
+
sample_width=4,
|
|
64
|
+
duration_seconds=len(audio_data) / sampling_rate,
|
|
65
|
+
frame_count=len(audio_data),
|
|
55
66
|
)
|
|
56
|
-
|
|
67
|
+
|
|
68
|
+
return Audio(audio_data, metadata)
|
|
File without changes
|
|
@@ -0,0 +1,37 @@
|
|
|
1
|
+
from typing import Literal
|
|
2
|
+
|
|
3
|
+
import whisper
|
|
4
|
+
|
|
5
|
+
from videopython.base.transcription import Transcription, TranscriptionSegment
|
|
6
|
+
from videopython.base.video import Video
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
class VideoTranscription:
|
|
10
|
+
def __init__(self, model_name: Literal["tiny", "base", "small", "medium", "large", "turbo"] = "small") -> None:
|
|
11
|
+
self.model = whisper.load_model(name=model_name)
|
|
12
|
+
|
|
13
|
+
def transcribe_video(self, video: Video) -> Transcription:
|
|
14
|
+
"""Transcribes video to text.
|
|
15
|
+
|
|
16
|
+
Args:
|
|
17
|
+
video: Video to transcribe.
|
|
18
|
+
|
|
19
|
+
Returns:
|
|
20
|
+
List of dictionaries with segments of text and their start and end times.
|
|
21
|
+
"""
|
|
22
|
+
if video.audio.is_silent:
|
|
23
|
+
return Transcription(segments=[])
|
|
24
|
+
|
|
25
|
+
audio = video.audio.to_mono()
|
|
26
|
+
audio = audio.resample(whisper.audio.SAMPLE_RATE)
|
|
27
|
+
audio_data = audio.data
|
|
28
|
+
|
|
29
|
+
transcription = self.model.transcribe(audio=audio_data, word_timestamps=True)
|
|
30
|
+
|
|
31
|
+
transcription_segments = [
|
|
32
|
+
TranscriptionSegment(start=segment["start"], end=segment["end"], text=segment["text"])
|
|
33
|
+
for segment in transcription["segments"]
|
|
34
|
+
]
|
|
35
|
+
result = Transcription(segments=transcription_segments)
|
|
36
|
+
|
|
37
|
+
return result
|
videopython/base/effects.py
CHANGED
|
@@ -156,13 +156,13 @@ class Zoom(Effect):
|
|
|
156
156
|
|
|
157
157
|
width = video.metadata.width
|
|
158
158
|
height = video.metadata.height
|
|
159
|
-
crop_sizes_w, crop_sizes_h =
|
|
160
|
-
|
|
159
|
+
crop_sizes_w, crop_sizes_h = (
|
|
160
|
+
np.linspace(width // self.zoom_factor, width, n_frames),
|
|
161
|
+
np.linspace(height // self.zoom_factor, height, n_frames),
|
|
161
162
|
)
|
|
162
163
|
|
|
163
164
|
if self.mode == "in":
|
|
164
165
|
for frame, w, h in tqdm(zip(video.frames, reversed(crop_sizes_w), reversed(crop_sizes_h))):
|
|
165
|
-
|
|
166
166
|
x = width / 2 - w / 2
|
|
167
167
|
y = height / 2 - h / 2
|
|
168
168
|
|
videopython/base/transforms.py
CHANGED
|
@@ -1,7 +1,6 @@
|
|
|
1
1
|
from abc import ABC, abstractmethod
|
|
2
2
|
from enum import Enum
|
|
3
3
|
from multiprocessing import Pool
|
|
4
|
-
from typing import Literal
|
|
5
4
|
|
|
6
5
|
import cv2
|
|
7
6
|
import numpy as np
|
|
@@ -154,7 +153,6 @@ class CropMode(Enum):
|
|
|
154
153
|
|
|
155
154
|
|
|
156
155
|
class Crop(Transformation):
|
|
157
|
-
|
|
158
156
|
def __init__(self, width: int, height: int, mode: CropMode = CropMode.CENTER):
|
|
159
157
|
self.width = width
|
|
160
158
|
self.height = height
|
videopython/base/transitions.py
CHANGED
|
@@ -67,7 +67,7 @@ class FadeTransition(Transition):
|
|
|
67
67
|
],
|
|
68
68
|
fps=video_fps,
|
|
69
69
|
)
|
|
70
|
-
faded_videos.audio = videos[0].audio.
|
|
70
|
+
faded_videos.audio = videos[0].audio.concat(videos[1].audio, crossfade=(effect_time_fps / video_fps))
|
|
71
71
|
return faded_videos
|
|
72
72
|
|
|
73
73
|
|
|
@@ -102,5 +102,5 @@ class BlurTransition(Transition):
|
|
|
102
102
|
],
|
|
103
103
|
fps=video_fps,
|
|
104
104
|
)
|
|
105
|
-
blurred_videos.audio = videos[0].audio.
|
|
105
|
+
blurred_videos.audio = videos[0].audio.concat(videos[1].audio)
|
|
106
106
|
return blurred_videos
|