videopython 0.2.1__py3-none-any.whl → 0.4.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of videopython might be problematic. Click here for more details.

File without changes
@@ -1,6 +1,5 @@
1
- import numpy as np
2
1
  import torch
3
- from pydub import AudioSegment
2
+ from soundpython import Audio, AudioMetadata
4
3
  from transformers import (
5
4
  AutoProcessor,
6
5
  AutoTokenizer,
@@ -17,15 +16,24 @@ class TextToSpeech:
17
16
  self.pipeline = VitsModel.from_pretrained(TEXT_TO_SPEECH_MODEL)
18
17
  self.tokenizer = AutoTokenizer.from_pretrained(TEXT_TO_SPEECH_MODEL)
19
18
 
20
- def generate_audio(self, text: str) -> AudioSegment:
19
+ def generate_audio(self, text: str) -> Audio:
21
20
  tokenized = self.tokenizer(text, return_tensors="pt")
22
21
 
23
22
  with torch.no_grad():
24
23
  output = self.pipeline(**tokenized).waveform
25
24
 
26
- output = (output.T.float().numpy() * (2**31 - 1)).astype(np.int32)
27
- audio = AudioSegment(data=output, frame_rate=self.pipeline.config.sampling_rate, sample_width=4, channels=1)
28
- return audio
25
+ # Convert to float32 and normalize to [-1, 1]
26
+ audio_data = output.T.float().numpy()
27
+
28
+ metadata = AudioMetadata(
29
+ sample_rate=self.pipeline.config.sampling_rate,
30
+ channels=1,
31
+ sample_width=4,
32
+ duration_seconds=len(audio_data) / self.pipeline.config.sampling_rate,
33
+ frame_count=len(audio_data),
34
+ )
35
+
36
+ return Audio(audio_data, metadata)
29
37
 
30
38
 
31
39
  class TextToMusic:
@@ -37,7 +45,7 @@ class TextToMusic:
37
45
  self.processor = AutoProcessor.from_pretrained(MUSIC_GENERATION_MODEL_SMALL)
38
46
  self.model = MusicgenForConditionalGeneration.from_pretrained(MUSIC_GENERATION_MODEL_SMALL)
39
47
 
40
- def generate_audio(self, text: str, max_new_tokens: int) -> AudioSegment:
48
+ def generate_audio(self, text: str, max_new_tokens: int) -> Audio:
41
49
  inputs = self.processor(
42
50
  text=[text],
43
51
  padding=True,
@@ -45,12 +53,16 @@ class TextToMusic:
45
53
  )
46
54
  audio_values = self.model.generate(**inputs, max_new_tokens=max_new_tokens)
47
55
  sampling_rate = self.model.config.audio_encoder.sampling_rate
48
- output = (audio_values[0, 0].float().numpy() * (2**31 - 1)).astype(np.int32)
49
56
 
50
- audio = AudioSegment(
51
- data=output.tobytes(),
52
- frame_rate=sampling_rate,
53
- sample_width=4,
57
+ # Convert to float32 and normalize to [-1, 1]
58
+ audio_data = audio_values[0, 0].float().numpy()
59
+
60
+ metadata = AudioMetadata(
61
+ sample_rate=sampling_rate,
54
62
  channels=1,
63
+ sample_width=4,
64
+ duration_seconds=len(audio_data) / sampling_rate,
65
+ frame_count=len(audio_data),
55
66
  )
56
- return audio
67
+
68
+ return Audio(audio_data, metadata)
@@ -1,6 +1,3 @@
1
- import io
2
- import os
3
-
4
1
  import torch
5
2
  from diffusers import DiffusionPipeline
6
3
  from PIL import Image
File without changes
@@ -0,0 +1,37 @@
1
+ from typing import Literal
2
+
3
+ import whisper
4
+
5
+ from videopython.base.transcription import Transcription, TranscriptionSegment
6
+ from videopython.base.video import Video
7
+
8
+
9
+ class VideoTranscription:
10
+ def __init__(self, model_name: Literal["tiny", "base", "small", "medium", "large", "turbo"] = "small") -> None:
11
+ self.model = whisper.load_model(name=model_name)
12
+
13
+ def transcribe_video(self, video: Video) -> Transcription:
14
+ """Transcribes video to text.
15
+
16
+ Args:
17
+ video: Video to transcribe.
18
+
19
+ Returns:
20
+ List of dictionaries with segments of text and their start and end times.
21
+ """
22
+ if video.audio.is_silent:
23
+ return Transcription(segments=[])
24
+
25
+ audio = video.audio.to_mono()
26
+ audio = audio.resample(whisper.audio.SAMPLE_RATE)
27
+ audio_data = audio.data
28
+
29
+ transcription = self.model.transcribe(audio=audio_data, word_timestamps=True)
30
+
31
+ transcription_segments = [
32
+ TranscriptionSegment(start=segment["start"], end=segment["end"], text=segment["text"])
33
+ for segment in transcription["segments"]
34
+ ]
35
+ result = Transcription(segments=transcription_segments)
36
+
37
+ return result
@@ -156,13 +156,13 @@ class Zoom(Effect):
156
156
 
157
157
  width = video.metadata.width
158
158
  height = video.metadata.height
159
- crop_sizes_w, crop_sizes_h = np.linspace(width // self.zoom_factor, width, n_frames), np.linspace(
160
- height // self.zoom_factor, height, n_frames
159
+ crop_sizes_w, crop_sizes_h = (
160
+ np.linspace(width // self.zoom_factor, width, n_frames),
161
+ np.linspace(height // self.zoom_factor, height, n_frames),
161
162
  )
162
163
 
163
164
  if self.mode == "in":
164
165
  for frame, w, h in tqdm(zip(video.frames, reversed(crop_sizes_w), reversed(crop_sizes_h))):
165
-
166
166
  x = width / 2 - w / 2
167
167
  y = height / 2 - h / 2
168
168
 
@@ -0,0 +1,13 @@
1
+ from dataclasses import dataclass
2
+
3
+
4
+ @dataclass
5
+ class TranscriptionSegment:
6
+ start: float
7
+ end: float
8
+ text: str
9
+
10
+
11
+ @dataclass
12
+ class Transcription:
13
+ segments: list[TranscriptionSegment]
@@ -1,7 +1,6 @@
1
1
  from abc import ABC, abstractmethod
2
2
  from enum import Enum
3
3
  from multiprocessing import Pool
4
- from typing import Literal
5
4
 
6
5
  import cv2
7
6
  import numpy as np
@@ -154,7 +153,6 @@ class CropMode(Enum):
154
153
 
155
154
 
156
155
  class Crop(Transformation):
157
-
158
156
  def __init__(self, width: int, height: int, mode: CropMode = CropMode.CENTER):
159
157
  self.width = width
160
158
  self.height = height
@@ -67,7 +67,7 @@ class FadeTransition(Transition):
67
67
  ],
68
68
  fps=video_fps,
69
69
  )
70
- faded_videos.audio = videos[0].audio.append(videos[1].audio, crossfade=(effect_time_fps / video_fps) * 1000)
70
+ faded_videos.audio = videos[0].audio.concat(videos[1].audio, crossfade=(effect_time_fps / video_fps))
71
71
  return faded_videos
72
72
 
73
73
 
@@ -102,5 +102,5 @@ class BlurTransition(Transition):
102
102
  ],
103
103
  fps=video_fps,
104
104
  )
105
- blurred_videos.audio = videos[0].audio.append(videos[1].audio)
105
+ blurred_videos.audio = videos[0].audio.concat(videos[1].audio)
106
106
  return blurred_videos